diff --git a/.gitignore b/.gitignore index 0c39aa20b6ba..868cdd2594e0 100644 --- a/.gitignore +++ b/.gitignore @@ -7,38 +7,40 @@ # command after changing this file, to see if there are # any tracked files which get ignored after the change. # -# Normal rules +# Normal rules (sorted alphabetically) # .* +*.a +*.bin +*.bz2 +*.c.[012]*.* +*.dtb +*.dtb.S +*.dwo +*.elf +*.gcno +*.gz +*.i +*.ko +*.ll +*.lst +*.lz4 +*.lzma +*.lzo +*.mod.c *.o *.o.* -*.a +*.order +*.patch *.s -*.ko *.so *.so.dbg -*.mod.c -*.i -*.lst +*.su *.symtypes -*.order -*.elf -*.bin *.tar -*.gz -*.bz2 -*.lzma *.xz -*.lz4 -*.lzo -*.patch -*.gcno -*.ll -modules.builtin Module.symvers -*.dwo -*.su -*.c.[012]*.* +modules.builtin # # Top-level generic files @@ -53,6 +55,11 @@ Module.symvers /System.map /Module.markers +# +# RPM spec file (make rpm-pkg) +# +/*.spec + # # Debian directory (make deb-pkg) # @@ -115,3 +122,7 @@ all.config # Kdevelop4 *.kdev4 + +out/ +# fetched Android config fragments +kernel/configs/android-*.cfg diff --git a/Documentation/ABI/testing/configfs-usb-gadget-dvctrace b/Documentation/ABI/testing/configfs-usb-gadget-dvctrace new file mode 100644 index 000000000000..6391096ac151 --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-dvctrace @@ -0,0 +1,9 @@ +What: /config/usb-gadget//functions/dvctrace./source_dev +Date: Mar 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) The name of the source device paired with this function + instance, if upon creation of the instance a source device + named exists and is free, the source device will be + associated with the current instance, otherwise the first free + source device will be used. diff --git a/Documentation/ABI/testing/sysfs-bus-dvctrace b/Documentation/ABI/testing/sysfs-bus-dvctrace new file mode 100644 index 000000000000..bf2b5bb2144a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dvctrace @@ -0,0 +1,68 @@ +What: /sys/bus/dvctrace +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: Groups the devices and drivers registered to + to dvc-trace bus. + +What: /sys/bus/dvctrace/devices//status +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) The status of a dvc-trace source device with + respect to an USB function driver. + Free - The device is free + Reserved - The device is reserved by an USB + function but not in use. + In use - The device is used by an USB function. + +What: /sys/bus/dvctrace/devices//protocol +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) The protocol id of a dvc-trace source device, + this will used in function driver interface + descriptors (u8). According to USB debug class + specification the protocol id is vendor specific. + +What: /sys/bus/dvctrace/devices//descriptors +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Hex-dump of the descriptors provided by the + source device. + eg. A debug class output connection descriptor + 09 24 02 04 03 00 00 00 00 + ll tt ss xx xx xx xx xx ii + | | | +- iConnection string id. + | | +- Descriptor sub-type DC_OUTPUT_CONNECTION + | +- Descriptor type (USB_DT_CS_INTERFACE) + +- Descriptor length + Writing: + - is not allowed while the device is Reserved or In Use. + - will replace all the descriptors currently present. + - will remove any strings previously provided. + - should use the same format. + - accepts multiple descriptors separated by space or '\n'. + +What: /sys/bus/dvctrace/devices//strings +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Currently set usb descriptor strings in + .: string format. + . identifies the location where + the string id is needed. + eg. Having the same debug class output connection descriptor, + as the first descriptor. + 09 24 02 04 03 00 00 00 00 + ll tt ss xx xx xx xx xx ii + +- iConnection string id. + 0.8: My output connection - will identify the string associated + with this descriptor. + Writing: + - is not allowed while the device is Reserved or In Use. + - will replace all the strings currently present. + - should use the same format. + - accepts multiple strings separated by ";" or '\n'. + eg. "0.4: first string; 1.4: second string" diff --git a/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith b/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith new file mode 100644 index 000000000000..0dda3aefb89c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith @@ -0,0 +1,68 @@ +What: /sys/bus/dvctrace/devices/dvcith-/msc +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) Symbolic link to the Intel Trace Hub MSC + (Memory Storage Controller) sub-device used to get tracing data. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_min_transfer +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Window transfer watermark, the driver will queue a + new transfer only if at least bytes + of trace data is available. Since on every switch @48 bytes + of trace data is generated, this should not be set under this + threshold. + Default 2048 + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_retry_timeout +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Read retry interval, If by the time the last usb transfer + is complete, there is no new data to be sent the driver will + sleep ms, before checking again. + Default: 2 ms + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_max_retry +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) the maximum retries to be bone before triggering a switch + and sending the currently available data regardless of the + available size. + Default: 150 + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_proc_type +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Data process type, during DvC tracing the MSC is set up in + Multi Window mode (check Intel Trace Hub Developer's Manual for + details), This attribute specifies what the dvc-trace data stream + should contain. + Available values are: + - 1 - Full blocks, + - 2 - Trimmed blocks (Block header + STP data) + - 3 - STP data only. + Default 3. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_transfer_type +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Data transfer type, This attribute specifies how the trace data + is queued in the USB requests. + Available values are: + - 1 - Auto, + - 2 - SG-List, + - 3 - Linear buffer. + Default 1. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_stats +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) Provides statistical information regarding the latest. + trace session. Available if (CONFIG_INTEL_TH_MSU_DVC_DEBUG). diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 7eead5f97e02..64e65450f483 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -32,7 +32,7 @@ Description: Description of the physical chip / device for device X. Typically a part number. -What: /sys/bus/iio/devices/iio:deviceX/timestamp_clock +What: /sys/bus/iio/devices/iio:deviceX/current_timestamp_clock KernelVersion: 4.5 Contact: linux-iio@vger.kernel.org Description: diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl index 640f65e79ef1..267920a1874b 100644 --- a/Documentation/ABI/testing/sysfs-class-cxl +++ b/Documentation/ABI/testing/sysfs-class-cxl @@ -69,7 +69,9 @@ Date: September 2014 Contact: linuxppc-dev@lists.ozlabs.org Description: read/write Set the mode for prefaulting in segments into the segment table - when performing the START_WORK ioctl. Possible values: + when performing the START_WORK ioctl. Only applicable when + running under hashed page table mmu. + Possible values: none: No prefaulting (default) work_element_descriptor: Treat the work element descriptor as an effective address and diff --git a/Documentation/ABI/testing/sysfs-class-dual-role-usb b/Documentation/ABI/testing/sysfs-class-dual-role-usb new file mode 100644 index 000000000000..a900fd75430c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-dual-role-usb @@ -0,0 +1,71 @@ +What: /sys/class/dual_role_usb/.../ +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + Provide a generic interface to monitor and change + the state of dual role usb ports. The name here + refers to the name mentioned in the + dual_role_phy_desc that is passed while registering + the dual_role_phy_intstance through + devm_dual_role_instance_register. + +What: /sys/class/dual_role_usb/.../supported_modes +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + This is a static node, once initialized this + is not expected to change during runtime. "dfp" + refers to "downstream facing port" i.e. port can + only act as host. "ufp" refers to "upstream + facing port" i.e. port can only act as device. + "dfp ufp" refers to "dual role port" i.e. the port + can either be a host port or a device port. + +What: /sys/class/dual_role_usb/.../mode +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The mode node refers to the current mode in which the + port is operating. "dfp" for host ports. "ufp" for device + ports and "none" when cable is not connected. + + On devices where the USB mode is software-controllable, + userspace can change the mode by writing "dfp" or "ufp". + On devices where the USB mode is fixed in hardware, + this attribute is read-only. + +What: /sys/class/dual_role_usb/.../power_role +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The power_role node mentions whether the port + is "sink"ing or "source"ing power. "none" if + they are not connected. + + On devices implementing USB Power Delivery, + userspace can control the power role by writing "sink" or + "source". On devices without USB-PD, this attribute is + read-only. + +What: /sys/class/dual_role_usb/.../data_role +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The data_role node mentions whether the port + is acting as "host" or "device" for USB data connection. + "none" if there is no active data link. + + On devices implementing USB Power Delivery, userspace + can control the data role by writing "host" or "device". + On devices without USB-PD, this attribute is read-only + +What: /sys/class/dual_role_usb/.../powers_vconn +Date: June 2015 +Contact: Badhri Jagan Sridharan +Description: + The powers_vconn node mentions whether the port + is supplying power for VCONN pin. + + On devices with software control of VCONN, + userspace can disable the power supply to VCONN by writing "n", + or enable the power supply by writing "y". diff --git a/Documentation/ABI/testing/sysfs-class-rpmb b/Documentation/ABI/testing/sysfs-class-rpmb new file mode 100644 index 000000000000..44db3bae695f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-rpmb @@ -0,0 +1,57 @@ +What: /sys/class/rpmb/ +Date: Jun 2018 +KernelVersion: 4.17 +Contact: Tomas Winkler +Description: + The rpmb/ class sub-directory belongs to RPMB device class. + + Few storage technologies such is EMMC, UFS, and NVMe support + Replay Protected Memory Block (RPMB) hardware partition with + common protocol and similar frame layout. + Such a partition provides authenticated and replay protected access, + hence suitable as a secure storage. + +What: /sys/class/rpmb/rpmbN/ +Date: Jun 2018 +KernelVersion: 4.17 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN directory is created for + each RPMB registered device. + +What: /sys/class/rpmb/rpmbN/type +Date: Jun 2018 +KernelVersion: 4.17 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/type file contains device + underlying storage type technology: EMMC, UFS, NVMe. + In case of simulated device it will have :SIM suffix + i.e EMMC:SIM. + +What: /sys/class/rpmb/rpmbN/id +Date: Jun 2018 +KernelVersion: 4.17 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/id file contains unique device id + in a binary form as defined by underlying storage device. + In case of multiple RPMB devices a user can determine correct + device. + The content can be parsed according the storage device type. + +What: /sys/class/rpmb/rpmbN/wr_cnt_max +Date: Jun 2018 +KernelVersion: 4.17 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/wr_cnt_max file contains + number of blocks that can be reliable written in a single request. + +What: /sys/class/rpmb/rpmbN/rd_cnt_max +Date: Jun 2018 +KernelVersion: 4.17 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/rd_cnt_max file contains + number of blocks that can be read in a single request. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index f3d5817c4ef0..6cae60929cb6 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -373,3 +373,44 @@ Contact: Linux kernel mailing list Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpu#. + +What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/l1tf +Date: January 2018 +Contact: Linux kernel mailing list +Description: Information about CPU vulnerabilities + + The files are named after the code names of CPU + vulnerabilities. The output of those files reflects the + state of the CPUs in the system. Possible output values: + + "Not affected" CPU is not affected by the vulnerability + "Vulnerable" CPU is affected and no mitigation in effect + "Mitigation: $M" CPU is affected and mitigation $M is in effect + + Details about the l1tf file can be found in + Documentation/admin-guide/l1tf.rst + +What: /sys/devices/system/cpu/smt + /sys/devices/system/cpu/smt/active + /sys/devices/system/cpu/smt/control +Date: June 2018 +Contact: Linux kernel mailing list +Description: Control Symetric Multi Threading (SMT) + + active: Tells whether SMT is active (enabled and siblings online) + + control: Read/write interface to control SMT. Possible + values: + + "on" SMT is enabled + "off" SMT is disabled + "forceoff" SMT is force disabled. Cannot be changed. + "notsupported" SMT is not supported by the CPU + + If control status is "forceoff" or "notsupported" writes + are rejected. diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 11b7f4ebea7c..372b88f4e706 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -51,6 +51,18 @@ Description: Controls the dirty page count condition for the in-place-update policies. +What: /sys/fs/f2fs//min_hot_blocks +Date: March 2017 +Contact: "Jaegeuk Kim" +Description: + Controls the dirty page count condition for redefining hot data. + +What: /sys/fs/f2fs//min_ssr_sections +Date: October 2017 +Contact: "Chao Yu" +Description: + Controls the fee section threshold to trigger SSR allocation. + What: /sys/fs/f2fs//max_small_discards Date: November 2013 Contact: "Jaegeuk Kim" @@ -89,6 +101,7 @@ Date: February 2015 Contact: "Jaegeuk Kim" Description: Controls the trimming rate in batch mode. + What: /sys/fs/f2fs//cp_interval Date: October 2015 @@ -102,6 +115,12 @@ Contact: "Jaegeuk Kim" Description: Controls the idle timing. +What: /sys/fs/f2fs//iostat_enable +Date: August 2017 +Contact: "Chao Yu" +Description: + Controls to enable/disable IO stat. + What: /sys/fs/f2fs//ra_nid_pages Date: October 2015 Contact: "Chao Yu" @@ -122,6 +141,12 @@ Contact: "Shuoran Liu" Description: Shows total written kbytes issued to disk. +What: /sys/fs/f2fs//feature +Date: July 2017 +Contact: "Jaegeuk Kim" +Description: + Shows all enabled features in current device. + What: /sys/fs/f2fs//inject_rate Date: May 2016 Contact: "Sheng Yong" @@ -138,7 +163,18 @@ What: /sys/fs/f2fs//reserved_blocks Date: June 2017 Contact: "Chao Yu" Description: - Controls current reserved blocks in system. + Controls target reserved blocks in system, the threshold + is soft, it could exceed current available user space. + +What: /sys/fs/f2fs//current_reserved_blocks +Date: October 2017 +Contact: "Yunlong Song" +Contact: "Chao Yu" +Description: + Shows current reserved blocks in system, it may be temporarily + smaller than target_reserved_blocks, but will gradually + increase to target_reserved_blocks when more free blocks are + freed by user later. What: /sys/fs/f2fs//gc_urgent Date: August 2017 @@ -151,3 +187,20 @@ Date: August 2017 Contact: "Jaegeuk Kim" Description: Controls sleep time of GC urgent mode + +What: /sys/fs/f2fs//readdir_ra +Date: November 2017 +Contact: "Sheng Yong" +Description: + Controls readahead inode block in readdir. + +What: /sys/fs/f2fs//extension_list +Date: Feburary 2018 +Contact: "Chao Yu" +Description: + Used to control configure extension list: + - Query: cat /sys/fs/f2fs//extension_list + - Add: echo '[h/c]extension' > /sys/fs/f2fs//extension_list + - Del: echo '[h/c]!extension' > /sys/fs/f2fs//extension_list + - [h] means add/del hot file extension + - [c] means add/del cold file extension diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons new file mode 100644 index 000000000000..acb19b91c192 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons @@ -0,0 +1,16 @@ +What: /sys/kernel/wakeup_reasons/last_resume_reason +Date: February 2014 +Contact: Ruchi Kandoi +Description: + The /sys/kernel/wakeup_reasons/last_resume_reason is + used to report wakeup reasons after system exited suspend. + +What: /sys/kernel/wakeup_reasons/last_suspend_time +Date: March 2015 +Contact: jinqian +Description: + The /sys/kernel/wakeup_reasons/last_suspend_time is + used to report time spent in last suspend cycle. It contains + two numbers (in seconds) separated by space. First number is + the time spent in suspend and resume processes. Second number + is the time spent in sleep state. \ No newline at end of file diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst index 5bb9161dbe6a..78f8f00c369f 100644 --- a/Documentation/admin-guide/index.rst +++ b/Documentation/admin-guide/index.rst @@ -17,6 +17,15 @@ etc. kernel-parameters devices +This section describes CPU vulnerabilities and provides an overview of the +possible mitigations along with guidance for selecting mitigations if they +are configurable at compile, boot or run time. + +.. toctree:: + :maxdepth: 1 + + l1tf + Here is a set of documents aimed at users who are trying to track down problems and bugs in particular. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 05496622b4ef..8c0502f8f3d5 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -837,6 +837,9 @@ dis_ucode_ldr [X86] Disable the microcode loader. + dm= [DM] Allows early creation of a device-mapper device. + See Documentation/device-mapper/boot.txt. + dma_debug=off If the kernel is compiled with DMA_API_DEBUG support, this option disables the debugging code at boot. @@ -1841,13 +1844,6 @@ Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, the default is off. - kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode - Valid arguments: 0, 1, 2 - kmemcheck=0 (disabled) - kmemcheck=1 (enabled) - kmemcheck=2 (one-shot mode) - Default: 2 (one-shot mode) - kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) @@ -1895,10 +1891,84 @@ (virtualized real and unpaged mode) on capable Intel chips. Default is 1 (enabled) + kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault + CVE-2018-3620. + + Valid arguments: never, cond, always + + always: L1D cache flush on every VMENTER. + cond: Flush L1D on VMENTER only when the code between + VMEXIT and VMENTER can leak host memory. + never: Disables the mitigation + + Default is cond (do L1 cache flush in specific instances) + kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification feature (tagged TLBs) on capable Intel chips. Default is 1 (enabled) + l1tf= [X86] Control mitigation of the L1TF vulnerability on + affected CPUs + + The kernel PTE inversion protection is unconditionally + enabled and cannot be disabled. + + full + Provides all available mitigations for the + L1TF vulnerability. Disables SMT and + enables all mitigations in the + hypervisors, i.e. unconditional L1D flush. + + SMT control and L1D flush control via the + sysfs interface is still possible after + boot. Hypervisors will issue a warning + when the first VM is started in a + potentially insecure configuration, + i.e. SMT enabled or L1D flush disabled. + + full,force + Same as 'full', but disables SMT and L1D + flush runtime control. Implies the + 'nosmt=force' command line option. + (i.e. sysfs control of SMT is disabled.) + + flush + Leaves SMT enabled and enables the default + hypervisor mitigation, i.e. conditional + L1D flush. + + SMT control and L1D flush control via the + sysfs interface is still possible after + boot. Hypervisors will issue a warning + when the first VM is started in a + potentially insecure configuration, + i.e. SMT enabled or L1D flush disabled. + + flush,nosmt + + Disables SMT and enables the default + hypervisor mitigation. + + SMT control and L1D flush control via the + sysfs interface is still possible after + boot. Hypervisors will issue a warning + when the first VM is started in a + potentially insecure configuration, + i.e. SMT enabled or L1D flush disabled. + + flush,nowarn + Same as 'flush', but hypervisors will not + warn when a VM is started in a potentially + insecure configuration. + + off + Disables hypervisor mitigations and doesn't + emit any warnings. + + Default is 'flush'. + + For details see: Documentation/admin-guide/l1tf.rst + l2cr= [PPC] l3cr= [PPC] @@ -2548,6 +2618,9 @@ noalign [KNL,ARM] + noaltinstr [S390] Disables alternative instructions patching + (CPU alternatives feature). + noapic [SMP,APIC] Tells the kernel to not make use of any IOAPICs that may be present in the system. @@ -2599,6 +2672,18 @@ nosmt [KNL,S390] Disable symmetric multithreading (SMT). Equivalent to smt=1. + [KNL,x86] Disable symmetric multithreading (SMT). + nosmt=force: Force disable SMT, cannot be undone + via the sysfs control file. + + nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 + (indirect branch prediction) vulnerability. System may + allow data leaks with this option, which is equivalent + to spectre_v2=off. + + nospec_store_bypass_disable + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. @@ -2713,8 +2798,6 @@ norandmaps Don't use address space randomization. Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space - noreplace-paravirt [X86,IA-64,PV_OPS] Don't patch paravirt_ops - noreplace-smp [X86-32,SMP] Don't replace SMP instructions with UP alternatives @@ -3253,6 +3336,21 @@ pt. [PARIDE] See Documentation/blockdev/paride.txt. + pti= [X86_64] Control Page Table Isolation of user and + kernel address spaces. Disabling this feature + removes hardening, but improves performance of + system calls and interrupts. + + on - unconditionally enable + off - unconditionally disable + auto - kernel detects whether your CPU model is + vulnerable to issues that PTI mitigates + + Not specifying this option is equivalent to pti=auto. + + nopti [X86_64] + Equivalent to pti=off + pty.legacy_count= [KNL] Number of legacy pty's. Overwrites compiled-in default number. @@ -3639,6 +3737,9 @@ reboot_cpu is s[mp]#### with #### being the processor to be used for rebooting. + reboot_panic= [KNL] + Same as reboot parameter above but only in case of panic. + relax_domain_level= [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/cgroup-v1/cpusets.txt. @@ -3893,6 +3994,71 @@ sonypi.*= [HW] Sony Programmable I/O Control Device driver See Documentation/laptops/sonypi.txt + spectre_v2= [X86] Control mitigation of Spectre variant 2 + (indirect branch speculation) vulnerability. + + on - unconditionally enable + off - unconditionally disable + auto - kernel detects whether your CPU model is + vulnerable + + Selecting 'on' will, and 'auto' may, choose a + mitigation method at run time according to the + CPU, the available microcode, the setting of the + CONFIG_RETPOLINE configuration option, and the + compiler with which the kernel was built. + + Specific mitigations can also be selected manually: + + retpoline - replace indirect branches + retpoline,generic - google's original retpoline + retpoline,amd - AMD-specific minimal thunk + + Not specifying this option is equivalent to + spectre_v2=auto. + + spec_store_bypass_disable= + [HW] Control Speculative Store Bypass (SSB) Disable mitigation + (Speculative Store Bypass vulnerability) + + Certain CPUs are vulnerable to an exploit against a + a common industry wide performance optimization known + as "Speculative Store Bypass" in which recent stores + to the same memory location may not be observed by + later loads during speculative execution. The idea + is that such stores are unlikely and that they can + be detected prior to instruction retirement at the + end of a particular speculation execution window. + + In vulnerable processors, the speculatively forwarded + store can be used in a cache side channel attack, for + example to read memory to which the attacker does not + directly have access (e.g. inside sandboxed code). + + This parameter controls whether the Speculative Store + Bypass optimization is used. + + on - Unconditionally disable Speculative Store Bypass + off - Unconditionally enable Speculative Store Bypass + auto - Kernel detects whether the CPU model contains an + implementation of Speculative Store Bypass and + picks the most appropriate mitigation. If the + CPU is not vulnerable, "off" is selected. If the + CPU is vulnerable the default mitigation is + architecture and Kconfig dependent. See below. + prctl - Control Speculative Store Bypass per thread + via prctl. Speculative Store Bypass is enabled + for a process by default. The state of the control + is inherited on fork. + seccomp - Same as "prctl" above, but all seccomp threads + will disable SSB unless they explicitly opt out. + + Not specifying this option is equivalent to + spec_store_bypass_disable=auto. + + Default mitigations: + X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl" + spia_io_base= [HW,MTD] spia_fio_base= spia_pedr= @@ -3915,6 +4081,23 @@ expediting. Set to zero to disable automatic expediting. + ssbd= [ARM64,HW] + Speculative Store Bypass Disable control + + On CPUs that are vulnerable to the Speculative + Store Bypass vulnerability and offer a + firmware based mitigation, this parameter + indicates how the mitigation should be used: + + force-on: Unconditionally enable mitigation for + for both kernel and userspace + force-off: Unconditionally disable mitigation for + for both kernel and userspace + kernel: Always enable mitigation in the + kernel, and offer a prctl interface + to allow userspace to register its + interest in being mitigated too. + stack_guard_gap= [MM] override the default stack gap protection. The value is in page units and it defines how many pages prior diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst new file mode 100644 index 000000000000..bae52b845de0 --- /dev/null +++ b/Documentation/admin-guide/l1tf.rst @@ -0,0 +1,610 @@ +L1TF - L1 Terminal Fault +======================== + +L1 Terminal Fault is a hardware vulnerability which allows unprivileged +speculative access to data which is available in the Level 1 Data Cache +when the page table entry controlling the virtual address, which is used +for the access, has the Present bit cleared or other reserved bits set. + +Affected processors +------------------- + +This vulnerability affects a wide range of Intel processors. The +vulnerability is not present on: + + - Processors from AMD, Centaur and other non Intel vendors + + - Older processor models, where the CPU family is < 6 + + - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft, + Penwell, Pineview, Silvermont, Airmont, Merrifield) + + - The Intel XEON PHI family + + - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the + IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected + by the Meltdown vulnerability either. These CPUs should become + available by end of 2018. + +Whether a processor is affected or not can be read out from the L1TF +vulnerability file in sysfs. See :ref:`l1tf_sys_info`. + +Related CVEs +------------ + +The following CVE entries are related to the L1TF vulnerability: + + ============= ================= ============================== + CVE-2018-3615 L1 Terminal Fault SGX related aspects + CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects + CVE-2018-3646 L1 Terminal Fault Virtualization related aspects + ============= ================= ============================== + +Problem +------- + +If an instruction accesses a virtual address for which the relevant page +table entry (PTE) has the Present bit cleared or other reserved bits set, +then speculative execution ignores the invalid PTE and loads the referenced +data if it is present in the Level 1 Data Cache, as if the page referenced +by the address bits in the PTE was still present and accessible. + +While this is a purely speculative mechanism and the instruction will raise +a page fault when it is retired eventually, the pure act of loading the +data and making it available to other speculative instructions opens up the +opportunity for side channel attacks to unprivileged malicious code, +similar to the Meltdown attack. + +While Meltdown breaks the user space to kernel space protection, L1TF +allows to attack any physical memory address in the system and the attack +works across all protection domains. It allows an attack of SGX and also +works from inside virtual machines because the speculation bypasses the +extended page table (EPT) protection mechanism. + + +Attack scenarios +---------------- + +1. Malicious user space +^^^^^^^^^^^^^^^^^^^^^^^ + + Operating Systems store arbitrary information in the address bits of a + PTE which is marked non present. This allows a malicious user space + application to attack the physical memory to which these PTEs resolve. + In some cases user-space can maliciously influence the information + encoded in the address bits of the PTE, thus making attacks more + deterministic and more practical. + + The Linux kernel contains a mitigation for this attack vector, PTE + inversion, which is permanently enabled and has no performance + impact. The kernel ensures that the address bits of PTEs, which are not + marked present, never point to cacheable physical memory space. + + A system with an up to date kernel is protected against attacks from + malicious user space applications. + +2. Malicious guest in a virtual machine +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The fact that L1TF breaks all domain protections allows malicious guest + OSes, which can control the PTEs directly, and malicious guest user + space applications, which run on an unprotected guest kernel lacking the + PTE inversion mitigation for L1TF, to attack physical host memory. + + A special aspect of L1TF in the context of virtualization is symmetric + multi threading (SMT). The Intel implementation of SMT is called + HyperThreading. The fact that Hyperthreads on the affected processors + share the L1 Data Cache (L1D) is important for this. As the flaw allows + only to attack data which is present in L1D, a malicious guest running + on one Hyperthread can attack the data which is brought into the L1D by + the context which runs on the sibling Hyperthread of the same physical + core. This context can be host OS, host user space or a different guest. + + If the processor does not support Extended Page Tables, the attack is + only possible, when the hypervisor does not sanitize the content of the + effective (shadow) page tables. + + While solutions exist to mitigate these attack vectors fully, these + mitigations are not enabled by default in the Linux kernel because they + can affect performance significantly. The kernel provides several + mechanisms which can be utilized to address the problem depending on the + deployment scenario. The mitigations, their protection scope and impact + are described in the next sections. + + The default mitigations and the rationale for choosing them are explained + at the end of this document. See :ref:`default_mitigations`. + +.. _l1tf_sys_info: + +L1TF system information +----------------------- + +The Linux kernel provides a sysfs interface to enumerate the current L1TF +status of the system: whether the system is vulnerable, and which +mitigations are active. The relevant sysfs file is: + +/sys/devices/system/cpu/vulnerabilities/l1tf + +The possible values in this file are: + + =========================== =============================== + 'Not affected' The processor is not vulnerable + 'Mitigation: PTE Inversion' The host protection is active + =========================== =============================== + +If KVM/VMX is enabled and the processor is vulnerable then the following +information is appended to the 'Mitigation: PTE Inversion' part: + + - SMT status: + + ===================== ================ + 'VMX: SMT vulnerable' SMT is enabled + 'VMX: SMT disabled' SMT is disabled + ===================== ================ + + - L1D Flush mode: + + ================================ ==================================== + 'L1D vulnerable' L1D flushing is disabled + + 'L1D conditional cache flushes' L1D flush is conditionally enabled + + 'L1D cache flushes' L1D flush is unconditionally enabled + ================================ ==================================== + +The resulting grade of protection is discussed in the following sections. + + +Host mitigation mechanism +------------------------- + +The kernel is unconditionally protected against L1TF attacks from malicious +user space running on the host. + + +Guest mitigation mechanisms +--------------------------- + +.. _l1d_flush: + +1. L1D flush on VMENTER +^^^^^^^^^^^^^^^^^^^^^^^ + + To make sure that a guest cannot attack data which is present in the L1D + the hypervisor flushes the L1D before entering the guest. + + Flushing the L1D evicts not only the data which should not be accessed + by a potentially malicious guest, it also flushes the guest + data. Flushing the L1D has a performance impact as the processor has to + bring the flushed guest data back into the L1D. Depending on the + frequency of VMEXIT/VMENTER and the type of computations in the guest + performance degradation in the range of 1% to 50% has been observed. For + scenarios where guest VMEXIT/VMENTER are rare the performance impact is + minimal. Virtio and mechanisms like posted interrupts are designed to + confine the VMEXITs to a bare minimum, but specific configurations and + application scenarios might still suffer from a high VMEXIT rate. + + The kernel provides two L1D flush modes: + - conditional ('cond') + - unconditional ('always') + + The conditional mode avoids L1D flushing after VMEXITs which execute + only audited code paths before the corresponding VMENTER. These code + paths have been verified that they cannot expose secrets or other + interesting data to an attacker, but they can leak information about the + address space layout of the hypervisor. + + Unconditional mode flushes L1D on all VMENTER invocations and provides + maximum protection. It has a higher overhead than the conditional + mode. The overhead cannot be quantified correctly as it depends on the + workload scenario and the resulting number of VMEXITs. + + The general recommendation is to enable L1D flush on VMENTER. The kernel + defaults to conditional mode on affected processors. + + **Note**, that L1D flush does not prevent the SMT problem because the + sibling thread will also bring back its data into the L1D which makes it + attackable again. + + L1D flush can be controlled by the administrator via the kernel command + line and sysfs control files. See :ref:`mitigation_control_command_line` + and :ref:`mitigation_control_kvm`. + +.. _guest_confinement: + +2. Guest VCPU confinement to dedicated physical cores +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + To address the SMT problem, it is possible to make a guest or a group of + guests affine to one or more physical cores. The proper mechanism for + that is to utilize exclusive cpusets to ensure that no other guest or + host tasks can run on these cores. + + If only a single guest or related guests run on sibling SMT threads on + the same physical core then they can only attack their own memory and + restricted parts of the host memory. + + Host memory is attackable, when one of the sibling SMT threads runs in + host OS (hypervisor) context and the other in guest context. The amount + of valuable information from the host OS context depends on the context + which the host OS executes, i.e. interrupts, soft interrupts and kernel + threads. The amount of valuable data from these contexts cannot be + declared as non-interesting for an attacker without deep inspection of + the code. + + **Note**, that assigning guests to a fixed set of physical cores affects + the ability of the scheduler to do load balancing and might have + negative effects on CPU utilization depending on the hosting + scenario. Disabling SMT might be a viable alternative for particular + scenarios. + + For further information about confining guests to a single or to a group + of cores consult the cpusets documentation: + + https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt + +.. _interrupt_isolation: + +3. Interrupt affinity +^^^^^^^^^^^^^^^^^^^^^ + + Interrupts can be made affine to logical CPUs. This is not universally + true because there are types of interrupts which are truly per CPU + interrupts, e.g. the local timer interrupt. Aside of that multi queue + devices affine their interrupts to single CPUs or groups of CPUs per + queue without allowing the administrator to control the affinities. + + Moving the interrupts, which can be affinity controlled, away from CPUs + which run untrusted guests, reduces the attack vector space. + + Whether the interrupts with are affine to CPUs, which run untrusted + guests, provide interesting data for an attacker depends on the system + configuration and the scenarios which run on the system. While for some + of the interrupts it can be assumed that they won't expose interesting + information beyond exposing hints about the host OS memory layout, there + is no way to make general assumptions. + + Interrupt affinity can be controlled by the administrator via the + /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is + available at: + + https://www.kernel.org/doc/Documentation/IRQ-affinity.txt + +.. _smt_control: + +4. SMT control +^^^^^^^^^^^^^^ + + To prevent the SMT issues of L1TF it might be necessary to disable SMT + completely. Disabling SMT can have a significant performance impact, but + the impact depends on the hosting scenario and the type of workloads. + The impact of disabling SMT needs also to be weighted against the impact + of other mitigation solutions like confining guests to dedicated cores. + + The kernel provides a sysfs interface to retrieve the status of SMT and + to control it. It also provides a kernel command line interface to + control SMT. + + The kernel command line interface consists of the following options: + + =========== ========================================================== + nosmt Affects the bring up of the secondary CPUs during boot. The + kernel tries to bring all present CPUs online during the + boot process. "nosmt" makes sure that from each physical + core only one - the so called primary (hyper) thread is + activated. Due to a design flaw of Intel processors related + to Machine Check Exceptions the non primary siblings have + to be brought up at least partially and are then shut down + again. "nosmt" can be undone via the sysfs interface. + + nosmt=force Has the same effect as "nosmt" but it does not allow to + undo the SMT disable via the sysfs interface. + =========== ========================================================== + + The sysfs interface provides two files: + + - /sys/devices/system/cpu/smt/control + - /sys/devices/system/cpu/smt/active + + /sys/devices/system/cpu/smt/control: + + This file allows to read out the SMT control state and provides the + ability to disable or (re)enable SMT. The possible states are: + + ============== =================================================== + on SMT is supported by the CPU and enabled. All + logical CPUs can be onlined and offlined without + restrictions. + + off SMT is supported by the CPU and disabled. Only + the so called primary SMT threads can be onlined + and offlined without restrictions. An attempt to + online a non-primary sibling is rejected + + forceoff Same as 'off' but the state cannot be controlled. + Attempts to write to the control file are rejected. + + notsupported The processor does not support SMT. It's therefore + not affected by the SMT implications of L1TF. + Attempts to write to the control file are rejected. + ============== =================================================== + + The possible states which can be written into this file to control SMT + state are: + + - on + - off + - forceoff + + /sys/devices/system/cpu/smt/active: + + This file reports whether SMT is enabled and active, i.e. if on any + physical core two or more sibling threads are online. + + SMT control is also possible at boot time via the l1tf kernel command + line parameter in combination with L1D flush control. See + :ref:`mitigation_control_command_line`. + +5. Disabling EPT +^^^^^^^^^^^^^^^^ + + Disabling EPT for virtual machines provides full mitigation for L1TF even + with SMT enabled, because the effective page tables for guests are + managed and sanitized by the hypervisor. Though disabling EPT has a + significant performance impact especially when the Meltdown mitigation + KPTI is enabled. + + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter. + +There is ongoing research and development for new mitigation mechanisms to +address the performance impact of disabling SMT or EPT. + +.. _mitigation_control_command_line: + +Mitigation control on the kernel command line +--------------------------------------------- + +The kernel command line allows to control the L1TF mitigations at boot +time with the option "l1tf=". The valid arguments for this option are: + + ============ ============================================================= + full Provides all available mitigations for the L1TF + vulnerability. Disables SMT and enables all mitigations in + the hypervisors, i.e. unconditional L1D flushing + + SMT control and L1D flush control via the sysfs interface + is still possible after boot. Hypervisors will issue a + warning when the first VM is started in a potentially + insecure configuration, i.e. SMT enabled or L1D flush + disabled. + + full,force Same as 'full', but disables SMT and L1D flush runtime + control. Implies the 'nosmt=force' command line option. + (i.e. sysfs control of SMT is disabled.) + + flush Leaves SMT enabled and enables the default hypervisor + mitigation, i.e. conditional L1D flushing + + SMT control and L1D flush control via the sysfs interface + is still possible after boot. Hypervisors will issue a + warning when the first VM is started in a potentially + insecure configuration, i.e. SMT enabled or L1D flush + disabled. + + flush,nosmt Disables SMT and enables the default hypervisor mitigation, + i.e. conditional L1D flushing. + + SMT control and L1D flush control via the sysfs interface + is still possible after boot. Hypervisors will issue a + warning when the first VM is started in a potentially + insecure configuration, i.e. SMT enabled or L1D flush + disabled. + + flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is + started in a potentially insecure configuration. + + off Disables hypervisor mitigations and doesn't emit any + warnings. + ============ ============================================================= + +The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`. + + +.. _mitigation_control_kvm: + +Mitigation control for KVM - module parameter +------------------------------------------------------------- + +The KVM hypervisor mitigation mechanism, flushing the L1D cache when +entering a guest, can be controlled with a module parameter. + +The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the +following arguments: + + ============ ============================================================== + always L1D cache flush on every VMENTER. + + cond Flush L1D on VMENTER only when the code between VMEXIT and + VMENTER can leak host memory which is considered + interesting for an attacker. This still can leak host memory + which allows e.g. to determine the hosts address space layout. + + never Disables the mitigation + ============ ============================================================== + +The parameter can be provided on the kernel command line, as a module +parameter when loading the modules and at runtime modified via the sysfs +file: + +/sys/module/kvm_intel/parameters/vmentry_l1d_flush + +The default is 'cond'. If 'l1tf=full,force' is given on the kernel command +line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush +module parameter is ignored and writes to the sysfs file are rejected. + + +Mitigation selection guide +-------------------------- + +1. No virtualization in use +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The system is protected by the kernel unconditionally and no further + action is required. + +2. Virtualization with trusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + If the guest comes from a trusted source and the guest OS kernel is + guaranteed to have the L1TF mitigations in place the system is fully + protected against L1TF and no further action is required. + + To avoid the overhead of the default L1D flushing on VMENTER the + administrator can disable the flushing via the kernel command line and + sysfs control files. See :ref:`mitigation_control_command_line` and + :ref:`mitigation_control_kvm`. + + +3. Virtualization with untrusted guests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +3.1. SMT not supported or disabled +"""""""""""""""""""""""""""""""""" + + If SMT is not supported by the processor or disabled in the BIOS or by + the kernel, it's only required to enforce L1D flushing on VMENTER. + + Conditional L1D flushing is the default behaviour and can be tuned. See + :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`. + +3.2. EPT not supported or disabled +"""""""""""""""""""""""""""""""""" + + If EPT is not supported by the processor or disabled in the hypervisor, + the system is fully protected. SMT can stay enabled and L1D flushing on + VMENTER is not required. + + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter. + +3.3. SMT and EPT supported and active +""""""""""""""""""""""""""""""""""""" + + If SMT and EPT are supported and active then various degrees of + mitigations can be employed: + + - L1D flushing on VMENTER: + + L1D flushing on VMENTER is the minimal protection requirement, but it + is only potent in combination with other mitigation methods. + + Conditional L1D flushing is the default behaviour and can be tuned. See + :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`. + + - Guest confinement: + + Confinement of guests to a single or a group of physical cores which + are not running any other processes, can reduce the attack surface + significantly, but interrupts, soft interrupts and kernel threads can + still expose valuable data to a potential attacker. See + :ref:`guest_confinement`. + + - Interrupt isolation: + + Isolating the guest CPUs from interrupts can reduce the attack surface + further, but still allows a malicious guest to explore a limited amount + of host physical memory. This can at least be used to gain knowledge + about the host address space layout. The interrupts which have a fixed + affinity to the CPUs which run the untrusted guests can depending on + the scenario still trigger soft interrupts and schedule kernel threads + which might expose valuable information. See + :ref:`interrupt_isolation`. + +The above three mitigation methods combined can provide protection to a +certain degree, but the risk of the remaining attack surface has to be +carefully analyzed. For full protection the following methods are +available: + + - Disabling SMT: + + Disabling SMT and enforcing the L1D flushing provides the maximum + amount of protection. This mitigation is not depending on any of the + above mitigation methods. + + SMT control and L1D flushing can be tuned by the command line + parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run + time with the matching sysfs control files. See :ref:`smt_control`, + :ref:`mitigation_control_command_line` and + :ref:`mitigation_control_kvm`. + + - Disabling EPT: + + Disabling EPT provides the maximum amount of protection as well. It is + not depending on any of the above mitigation methods. SMT can stay + enabled and L1D flushing is not required, but the performance impact is + significant. + + EPT can be disabled in the hypervisor via the 'kvm-intel.ept' + parameter. + +3.4. Nested virtual machines +"""""""""""""""""""""""""""" + +When nested virtualization is in use, three operating systems are involved: +the bare metal hypervisor, the nested hypervisor and the nested virtual +machine. VMENTER operations from the nested hypervisor into the nested +guest will always be processed by the bare metal hypervisor. If KVM is the +bare metal hypervisor it wiil: + + - Flush the L1D cache on every switch from the nested hypervisor to the + nested virtual machine, so that the nested hypervisor's secrets are not + exposed to the nested virtual machine; + + - Flush the L1D cache on every switch from the nested virtual machine to + the nested hypervisor; this is a complex operation, and flushing the L1D + cache avoids that the bare metal hypervisor's secrets are exposed to the + nested virtual machine; + + - Instruct the nested hypervisor to not perform any L1D cache flush. This + is an optimization to avoid double L1D flushing. + + +.. _default_mitigations: + +Default mitigations +------------------- + + The kernel default mitigations for vulnerable processors are: + + - PTE inversion to protect against malicious user space. This is done + unconditionally and cannot be controlled. + + - L1D conditional flushing on VMENTER when EPT is enabled for + a guest. + + The kernel does not by default enforce the disabling of SMT, which leaves + SMT systems vulnerable when running untrusted guests with EPT enabled. + + The rationale for this choice is: + + - Force disabling SMT can break existing setups, especially with + unattended updates. + + - If regular users run untrusted guests on their machine, then L1TF is + just an add on to other malware which might be embedded in an untrusted + guest, e.g. spam-bots or attacks on the local network. + + There is no technical way to prevent a user from running untrusted code + on their machines blindly. + + - It's technically extremely unlikely and from today's knowledge even + impossible that L1TF can be exploited via the most popular attack + mechanisms like JavaScript because these mechanisms have no way to + control PTEs. If this would be possible and not other mitigation would + be possible, then the default might be different. + + - The administrators of cloud and hosting setups have to carefully + analyze the risk for their scenarios and make the appropriate + mitigation choices, which might even vary across their deployed + machines and also result in other changes of their overall setup. + There is no way for the kernel to provide a sensible default for this + kind of scenarios. diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 66e8ce14d23d..e4fe6adc372b 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -55,6 +55,7 @@ stable kernels. | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A72 | #853709 | N/A | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | +| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | @@ -71,6 +72,7 @@ stable kernels. | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | Hisilicon | Hip0{6,7} | #161010701 | N/A | | | | | | -| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | +| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | +| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | diff --git a/Documentation/conf.py b/Documentation/conf.py index 63857d33778c..b9cadc9817f7 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -404,6 +404,8 @@ 'The kernel development community', 'manual'), ('userspace-api/index', 'userspace-api.tex', 'The Linux kernel user-space API guide', 'The kernel development community', 'manual'), + ('rpmb/index', 'rpmb.tex', 'Linux RPMB Subsystem Documentation', + 'The kernel development community', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst index a81787cd47d7..e313925fb0fa 100644 --- a/Documentation/dev-tools/index.rst +++ b/Documentation/dev-tools/index.rst @@ -21,7 +21,6 @@ whole; patches welcome! kasan ubsan kmemleak - kmemcheck gdb-kernel-debugging kgdb kselftest diff --git a/Documentation/dev-tools/kmemcheck.rst b/Documentation/dev-tools/kmemcheck.rst deleted file mode 100644 index 7f3d1985de74..000000000000 --- a/Documentation/dev-tools/kmemcheck.rst +++ /dev/null @@ -1,733 +0,0 @@ -Getting started with kmemcheck -============================== - -Vegard Nossum - - -Introduction ------------- - -kmemcheck is a debugging feature for the Linux Kernel. More specifically, it -is a dynamic checker that detects and warns about some uses of uninitialized -memory. - -Userspace programmers might be familiar with Valgrind's memcheck. The main -difference between memcheck and kmemcheck is that memcheck works for userspace -programs only, and kmemcheck works for the kernel only. The implementations -are of course vastly different. Because of this, kmemcheck is not as accurate -as memcheck, but it turns out to be good enough in practice to discover real -programmer errors that the compiler is not able to find through static -analysis. - -Enabling kmemcheck on a kernel will probably slow it down to the extent that -the machine will not be usable for normal workloads such as e.g. an -interactive desktop. kmemcheck will also cause the kernel to use about twice -as much memory as normal. For this reason, kmemcheck is strictly a debugging -feature. - - -Downloading ------------ - -As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel. - - -Configuring and compiling -------------------------- - -kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of -configuration variables must have specific settings in order for the kmemcheck -menu to even appear in "menuconfig". These are: - -- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n`` - This option is located under "General setup" / "Optimize for size". - - Without this, gcc will use certain optimizations that usually lead to - false positive warnings from kmemcheck. An example of this is a 16-bit - field in a struct, where gcc may load 32 bits, then discard the upper - 16 bits. kmemcheck sees only the 32-bit load, and may trigger a - warning for the upper 16 bits (if they're uninitialized). - -- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y`` - This option is located under "General setup" / "Choose SLAB - allocator". - -- ``CONFIG_FUNCTION_TRACER=n`` - This option is located under "Kernel hacking" / "Tracers" / "Kernel - Function Tracer" - - When function tracing is compiled in, gcc emits a call to another - function at the beginning of every function. This means that when the - page fault handler is called, the ftrace framework will be called - before kmemcheck has had a chance to handle the fault. If ftrace then - modifies memory that was tracked by kmemcheck, the result is an - endless recursive page fault. - -- ``CONFIG_DEBUG_PAGEALLOC=n`` - This option is located under "Kernel hacking" / "Memory Debugging" - / "Debug page memory allocations". - -In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also -located under "Kernel hacking". With this, you will be able to get line number -information from the kmemcheck warnings, which is extremely valuable in -debugging a problem. This option is not mandatory, however, because it slows -down the compilation process and produces a much bigger kernel image. - -Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory -Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows -a description of the kmemcheck configuration variables: - -- ``CONFIG_KMEMCHECK`` - This must be enabled in order to use kmemcheck at all... - -- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT`` - This option controls the status of kmemcheck at boot-time. "Enabled" - will enable kmemcheck right from the start, "disabled" will boot the - kernel as normal (but with the kmemcheck code compiled in, so it can - be enabled at run-time after the kernel has booted), and "one-shot" is - a special mode which will turn kmemcheck off automatically after - detecting the first use of uninitialized memory. - - If you are using kmemcheck to actively debug a problem, then you - probably want to choose "enabled" here. - - The one-shot mode is mostly useful in automated test setups because it - can prevent floods of warnings and increase the chances of the machine - surviving in case something is really wrong. In other cases, the one- - shot mode could actually be counter-productive because it would turn - itself off at the very first error -- in the case of a false positive - too -- and this would come in the way of debugging the specific - problem you were interested in. - - If you would like to use your kernel as normal, but with a chance to - enable kmemcheck in case of some problem, it might be a good idea to - choose "disabled" here. When kmemcheck is disabled, most of the run- - time overhead is not incurred, and the kernel will be almost as fast - as normal. - -- ``CONFIG_KMEMCHECK_QUEUE_SIZE`` - Select the maximum number of error reports to store in an internal - (fixed-size) buffer. Since errors can occur virtually anywhere and in - any context, we need a temporary storage area which is guaranteed not - to generate any other page faults when accessed. The queue will be - emptied as soon as a tasklet may be scheduled. If the queue is full, - new error reports will be lost. - - The default value of 64 is probably fine. If some code produces more - than 64 errors within an irqs-off section, then the code is likely to - produce many, many more, too, and these additional reports seldom give - any more information (the first report is usually the most valuable - anyway). - - This number might have to be adjusted if you are not using serial - console or similar to capture the kernel log. If you are using the - "dmesg" command to save the log, then getting a lot of kmemcheck - warnings might overflow the kernel log itself, and the earlier reports - will get lost in that way instead. Try setting this to 10 or so on - such a setup. - -- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT`` - Select the number of shadow bytes to save along with each entry of the - error-report queue. These bytes indicate what parts of an allocation - are initialized, uninitialized, etc. and will be displayed when an - error is detected to help the debugging of a particular problem. - - The number entered here is actually the logarithm of the number of - bytes that will be saved. So if you pick for example 5 here, kmemcheck - will save 2^5 = 32 bytes. - - The default value should be fine for debugging most problems. It also - fits nicely within 80 columns. - -- ``CONFIG_KMEMCHECK_PARTIAL_OK`` - This option (when enabled) works around certain GCC optimizations that - produce 32-bit reads from 16-bit variables where the upper 16 bits are - thrown away afterwards. - - The default value (enabled) is recommended. This may of course hide - some real errors, but disabling it would probably produce a lot of - false positives. - -- ``CONFIG_KMEMCHECK_BITOPS_OK`` - This option silences warnings that would be generated for bit-field - accesses where not all the bits are initialized at the same time. This - may also hide some real bugs. - - This option is probably obsolete, or it should be replaced with - the kmemcheck-/bitfield-annotations for the code in question. The - default value is therefore fine. - -Now compile the kernel as usual. - - -How to use ----------- - -Booting -~~~~~~~ - -First some information about the command-line options. There is only one -option specific to kmemcheck, and this is called "kmemcheck". It can be used -to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT`` -option. Its possible settings are: - -- ``kmemcheck=0`` (disabled) -- ``kmemcheck=1`` (enabled) -- ``kmemcheck=2`` (one-shot mode) - -If SLUB debugging has been enabled in the kernel, it may take precedence over -kmemcheck in such a way that the slab caches which are under SLUB debugging -will not be tracked by kmemcheck. In order to ensure that this doesn't happen -(even though it shouldn't by default), use SLUB's boot option ``slub_debug``, -like this: ``slub_debug=-`` - -In fact, this option may also be used for fine-grained control over SLUB vs. -kmemcheck. For example, if the command line includes -``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only -for the "dentry" slab cache, and with kmemcheck tracking all the other -caches. This is advanced usage, however, and is not generally recommended. - - -Run-time enable/disable -~~~~~~~~~~~~~~~~~~~~~~~ - -When the kernel has booted, it is possible to enable or disable kmemcheck at -run-time. WARNING: This feature is still experimental and may cause false -positive warnings to appear. Therefore, try not to use this. If you find that -it doesn't work properly (e.g. you see an unreasonable amount of warnings), I -will be happy to take bug reports. - -Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.:: - - $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck - -The numbers are the same as for the ``kmemcheck=`` command-line option. - - -Debugging -~~~~~~~~~ - -A typical report will look something like this:: - - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024) - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - ^ - - Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A - RIP: 0010:[] [] __dequeue_signal+0xc8/0x190 - RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002 - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009 - RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84 - RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000 - R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e - R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8 - FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000 - CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 - CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0 - DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 - DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400 - [] dequeue_signal+0x8e/0x170 - [] get_signal_to_deliver+0x98/0x390 - [] do_notify_resume+0xad/0x7d0 - [] int_signal+0x12/0x17 - [] 0xffffffffffffffff - -The single most valuable information in this report is the RIP (or EIP on 32- -bit) value. This will help us pinpoint exactly which instruction that caused -the warning. - -If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do -is give this address to the addr2line program, like this:: - - $ addr2line -e vmlinux -i ffffffff8104ede8 - arch/x86/include/asm/string_64.h:12 - include/asm-generic/siginfo.h:287 - kernel/signal.c:380 - kernel/signal.c:410 - -The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:** -This must be the vmlinux of the kernel that produced the warning in the -first place! If not, the line number information will almost certainly be -wrong. - -The "``-i``" tells addr2line to also print the line numbers of inlined -functions. In this case, the flag was very important, because otherwise, -it would only have printed the first line, which is just a call to -``memcpy()``, which could be called from a thousand places in the kernel, and -is therefore not very useful. These inlined functions would not show up in -the stack trace above, simply because the kernel doesn't load the extra -debugging information. This technique can of course be used with ordinary -kernel oopses as well. - -In this case, it's the caller of ``memcpy()`` that is interesting, and it can be -found in ``include/asm-generic/siginfo.h``, line 287:: - - 281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) - 282 { - 283 if (from->si_code < 0) - 284 memcpy(to, from, sizeof(*to)); - 285 else - 286 /* _sigchld is currently the largest know union member */ - 287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); - 288 } - -Since this was a read (kmemcheck usually warns about reads only, though it can -warn about writes to unallocated or freed memory as well), it was probably the -"from" argument which contained some uninitialized bytes. Following the chain -of calls, we move upwards to see where "from" was allocated or initialized, -``kernel/signal.c``, line 380:: - - 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) - 360 { - ... - 367 list_for_each_entry(q, &list->list, list) { - 368 if (q->info.si_signo == sig) { - 369 if (first) - 370 goto still_pending; - 371 first = q; - ... - 377 if (first) { - 378 still_pending: - 379 list_del_init(&first->list); - 380 copy_siginfo(info, &first->info); - 381 __sigqueue_free(first); - ... - 392 } - 393 } - -Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The -variable ``first`` was found on a list -- passed in as the second argument to -``collect_signal()``. We continue our journey through the stack, to figure out -where the item on "list" was allocated or initialized. We move to line 410:: - - 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - 396 siginfo_t *info) - 397 { - ... - 410 collect_signal(sig, pending, info); - ... - 414 } - -Now we need to follow the ``pending`` pointer, since that is being passed on to -``collect_signal()`` as ``list``. At this point, we've run out of lines from the -"addr2line" output. Not to worry, we just paste the next addresses from the -kmemcheck stack dump, i.e.:: - - [] dequeue_signal+0x8e/0x170 - [] get_signal_to_deliver+0x98/0x390 - [] do_notify_resume+0xad/0x7d0 - [] int_signal+0x12/0x17 - - $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \ - ffffffff8100b87d ffffffff8100c7b5 - kernel/signal.c:446 - kernel/signal.c:1806 - arch/x86/kernel/signal.c:805 - arch/x86/kernel/signal.c:871 - arch/x86/kernel/entry_64.S:694 - -Remember that since these addresses were found on the stack and not as the -RIP value, they actually point to the _next_ instruction (they are return -addresses). This becomes obvious when we look at the code for line 446:: - - 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) - 423 { - ... - 431 signr = __dequeue_signal(&tsk->signal->shared_pending, - 432 mask, info); - 433 /* - 434 * itimer signal ? - 435 * - 436 * itimers are process shared and we restart periodic - 437 * itimers in the signal delivery path to prevent DoS - 438 * attacks in the high resolution timer case. This is - 439 * compliant with the old way of self restarting - 440 * itimers, as the SIGALRM is a legacy signal and only - 441 * queued once. Changing the restart behaviour to - 442 * restart the timer in the signal dequeue path is - 443 * reducing the timer noise on heavy loaded !highres - 444 * systems too. - 445 */ - 446 if (unlikely(signr == SIGALRM)) { - ... - 489 } - -So instead of looking at 446, we should be looking at 431, which is the line -that executes just before 446. Here we see that what we are looking for is -``&tsk->signal->shared_pending``. - -Our next task is now to figure out which function that puts items on this -``shared_pending`` list. A crude, but efficient tool, is ``git grep``:: - - $ git grep -n 'shared_pending' kernel/ - ... - kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending; - kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending; - ... - -There were more results, but none of them were related to list operations, -and these were the only assignments. We inspect the line numbers more closely -and find that this is indeed where items are being added to the list:: - - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - 817 int group) - 818 { - ... - 828 pending = group ? &t->signal->shared_pending : &t->pending; - ... - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - 852 (is_si_special(info) || - 853 info->si_code >= 0))); - 854 if (q) { - 855 list_add_tail(&q->list, &pending->list); - ... - 890 } - -and:: - - 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) - 1310 { - .... - 1339 pending = group ? &t->signal->shared_pending : &t->pending; - 1340 list_add_tail(&q->list, &pending->list); - .... - 1347 } - -In the first case, the list element we are looking for, ``q``, is being -returned from the function ``__sigqueue_alloc()``, which looks like an -allocation function. Let's take a look at it:: - - 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, - 188 int override_rlimit) - 189 { - 190 struct sigqueue *q = NULL; - 191 struct user_struct *user; - 192 - 193 /* - 194 * We won't get problems with the target's UID changing under us - 195 * because changing it requires RCU be used, and if t != current, the - 196 * caller must be holding the RCU readlock (by way of a spinlock) and - 197 * we use RCU protection here - 198 */ - 199 user = get_uid(__task_cred(t)->user); - 200 atomic_inc(&user->sigpending); - 201 if (override_rlimit || - 202 atomic_read(&user->sigpending) <= - 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) - 204 q = kmem_cache_alloc(sigqueue_cachep, flags); - 205 if (unlikely(q == NULL)) { - 206 atomic_dec(&user->sigpending); - 207 free_uid(user); - 208 } else { - 209 INIT_LIST_HEAD(&q->list); - 210 q->flags = 0; - 211 q->user = user; - 212 } - 213 - 214 return q; - 215 } - -We see that this function initializes ``q->list``, ``q->flags``, and -``q->user``. It seems that now is the time to look at the definition of -``struct sigqueue``, e.g.:: - - 14 struct sigqueue { - 15 struct list_head list; - 16 int flags; - 17 siginfo_t info; - 18 struct user_struct *user; - 19 }; - -And, you might remember, it was a ``memcpy()`` on ``&first->info`` that -caused the warning, so this makes perfect sense. It also seems reasonable -to assume that it is the caller of ``__sigqueue_alloc()`` that has the -responsibility of filling out (initializing) this member. - -But just which fields of the struct were uninitialized? Let's look at -kmemcheck's report again:: - - WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024) - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - ^ - -These first two lines are the memory dump of the memory object itself, and -the shadow bytemap, respectively. The memory object itself is in this case -``&first->info``. Just beware that the start of this dump is NOT the start -of the object itself! The position of the caret (^) corresponds with the -address of the read (ffff88003e4a2024). - -The shadow bytemap dump legend is as follows: - -- i: initialized -- u: uninitialized -- a: unallocated (memory has been allocated by the slab layer, but has not - yet been handed off to anybody) -- f: freed (memory has been allocated by the slab layer, but has been freed - by the previous owner) - -In order to figure out where (relative to the start of the object) the -uninitialized memory was located, we have to look at the disassembly. For -that, we'll need the RIP address again:: - - RIP: 0010:[] [] __dequeue_signal+0xc8/0x190 - - $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8: - ffffffff8104edc8: mov %r8,0x8(%r8) - ffffffff8104edcc: test %r10d,%r10d - ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168> - ffffffff8104edd5: mov %rax,%rdx - ffffffff8104edd8: mov $0xc,%ecx - ffffffff8104eddd: mov %r13,%rdi - ffffffff8104ede0: mov $0x30,%eax - ffffffff8104ede5: mov %rdx,%rsi - ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi) - ffffffff8104edea: test $0x2,%al - ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0> - ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi) - ffffffff8104edf0: test $0x1,%al - ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5> - ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi) - ffffffff8104edf5: mov %r8,%rdi - ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free> - -As expected, it's the "``rep movsl``" instruction from the ``memcpy()`` -that causes the warning. We know about ``REP MOVSL`` that it uses the register -``RCX`` to count the number of remaining iterations. By taking a look at the -register dump again (from the kmemcheck report), we can figure out how many -bytes were left to copy:: - - RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009 - -By looking at the disassembly, we also see that ``%ecx`` is being loaded -with the value ``$0xc`` just before (ffffffff8104edd8), so we are very -lucky. Keep in mind that this is the number of iterations, not bytes. And -since this is a "long" operation, we need to multiply by 4 to get the -number of bytes. So this means that the uninitialized value was encountered -at 4 * (0xc - 0x9) = 12 bytes from the start of the object. - -We can now try to figure out which field of the "``struct siginfo``" that -was not initialized. This is the beginning of the struct:: - - 40 typedef struct siginfo { - 41 int si_signo; - 42 int si_errno; - 43 int si_code; - 44 - 45 union { - .. - 92 } _sifields; - 93 } siginfo_t; - -On 64-bit, the int is 4 bytes long, so it must the union member that has -not been initialized. We can verify this using gdb:: - - $ gdb vmlinux - ... - (gdb) p &((struct siginfo *) 0)->_sifields - $1 = (union {...} *) 0x10 - -Actually, it seems that the union member is located at offset 0x10 -- which -means that gcc has inserted 4 bytes of padding between the members ``si_code`` -and ``_sifields``. We can now get a fuller picture of the memory dump:: - - _----------------------------=> si_code - / _--------------------=> (padding) - | / _------------=> _sifields(._kill._pid) - | | / _----=> _sifields(._kill._uid) - | | | / - -------|-------|-------|-------| - 80000000000000000000000000000000000000000088ffff0000000000000000 - i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u - -This allows us to realize another important fact: ``si_code`` contains the -value 0x80. Remember that x86 is little endian, so the first 4 bytes -"80000000" are really the number 0x00000080. With a bit of research, we -find that this is actually the constant ``SI_KERNEL`` defined in -``include/asm-generic/siginfo.h``:: - - 144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */ - -This macro is used in exactly one place in the x86 kernel: In ``send_signal()`` -in ``kernel/signal.c``:: - - 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - 817 int group) - 818 { - ... - 828 pending = group ? &t->signal->shared_pending : &t->pending; - ... - 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - 852 (is_si_special(info) || - 853 info->si_code >= 0))); - 854 if (q) { - 855 list_add_tail(&q->list, &pending->list); - 856 switch ((unsigned long) info) { - ... - 865 case (unsigned long) SEND_SIG_PRIV: - 866 q->info.si_signo = sig; - 867 q->info.si_errno = 0; - 868 q->info.si_code = SI_KERNEL; - 869 q->info.si_pid = 0; - 870 q->info.si_uid = 0; - 871 break; - ... - 890 } - -Not only does this match with the ``.si_code`` member, it also matches the place -we found earlier when looking for where siginfo_t objects are enqueued on the -``shared_pending`` list. - -So to sum up: It seems that it is the padding introduced by the compiler -between two struct fields that is uninitialized, and this gets reported when -we do a ``memcpy()`` on the struct. This means that we have identified a false -positive warning. - -Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls -when both the source and destination addresses are tracked. (Instead, we copy -the shadow bytemap as well). In this case, the destination address clearly -was not tracked. We can dig a little deeper into the stack trace from above:: - - arch/x86/kernel/signal.c:805 - arch/x86/kernel/signal.c:871 - arch/x86/kernel/entry_64.S:694 - -And we clearly see that the destination siginfo object is located on the -stack:: - - 782 static void do_signal(struct pt_regs *regs) - 783 { - 784 struct k_sigaction ka; - 785 siginfo_t info; - ... - 804 signr = get_signal_to_deliver(&info, &ka, regs, NULL); - ... - 854 } - -And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the -destination argument. - -Now, even though we didn't find an actual error here, the example is still a -good one, because it shows how one would go about to find out what the report -was all about. - - -Annotating false positives -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are a few different ways to make annotations in the source code that -will keep kmemcheck from checking and reporting certain allocations. Here -they are: - -- ``__GFP_NOTRACK_FALSE_POSITIVE`` - This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()`` - (therefore also to other functions that end up calling one of - these) to indicate that the allocation should not be tracked - because it would lead to a false positive report. This is a "big - hammer" way of silencing kmemcheck; after all, even if the false - positive pertains to particular field in a struct, for example, we - will now lose the ability to find (real) errors in other parts of - the same struct. - - Example:: - - /* No warnings will ever trigger on accessing any part of x */ - x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE); - -- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and - ``kmemcheck_annotate_bitfield(ptr, name)`` - The first two of these three macros can be used inside struct - definitions to signal, respectively, the beginning and end of a - bitfield. Additionally, this will assign the bitfield a name, which - is given as an argument to the macros. - - Having used these markers, one can later use - kmemcheck_annotate_bitfield() at the point of allocation, to indicate - which parts of the allocation is part of a bitfield. - - Example:: - - struct foo { - int x; - - kmemcheck_bitfield_begin(flags); - int flag_a:1; - int flag_b:1; - kmemcheck_bitfield_end(flags); - - int y; - }; - - struct foo *x = kmalloc(sizeof *x); - - /* No warnings will trigger on accessing the bitfield of x */ - kmemcheck_annotate_bitfield(x, flags); - - Note that ``kmemcheck_annotate_bitfield()`` can be used even before the - return value of ``kmalloc()`` is checked -- in other words, passing NULL - as the first argument is legal (and will do nothing). - - -Reporting errors ----------------- - -As we have seen, kmemcheck will produce false positive reports. Therefore, it -is not very wise to blindly post kmemcheck warnings to mailing lists and -maintainers. Instead, I encourage maintainers and developers to find errors -in their own code. If you get a warning, you can try to work around it, try -to figure out if it's a real error or not, or simply ignore it. Most -developers know their own code and will quickly and efficiently determine the -root cause of a kmemcheck report. This is therefore also the most efficient -way to work with kmemcheck. - -That said, we (the kmemcheck maintainers) will always be on the lookout for -false positives that we can annotate and silence. So whatever you find, -please drop us a note privately! Kernel configs and steps to reproduce (if -available) are of course a great help too. - -Happy hacking! - - -Technical description ---------------------- - -kmemcheck works by marking memory pages non-present. This means that whenever -somebody attempts to access the page, a page fault is generated. The page -fault handler notices that the page was in fact only hidden, and so it calls -on the kmemcheck code to make further investigations. - -When the investigations are completed, kmemcheck "shows" the page by marking -it present (as it would be under normal circumstances). This way, the -interrupted code can continue as usual. - -But after the instruction has been executed, we should hide the page again, so -that we can catch the next access too! Now kmemcheck makes use of a debugging -feature of the processor, namely single-stepping. When the processor has -finished the one instruction that generated the memory access, a debug -exception is raised. From here, we simply hide the page again and continue -execution, this time with the single-stepping feature turned off. - -kmemcheck requires some assistance from the memory allocator in order to work. -The memory allocator needs to - - 1. Tell kmemcheck about newly allocated pages and pages that are about to - be freed. This allows kmemcheck to set up and tear down the shadow memory - for the pages in question. The shadow memory stores the status of each - byte in the allocation proper, e.g. whether it is initialized or - uninitialized. - - 2. Tell kmemcheck which parts of memory should be marked uninitialized. - There are actually a few more states, such as "not yet allocated" and - "recently freed". - -If a slab cache is set up using the SLAB_NOTRACK flag, it will never return -memory that can take page faults because of kmemcheck. - -If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still -request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags. -This does not prevent the page faults from occurring, however, but marks the -object in question as being initialized so that no warnings will ever be -produced for this object. - -Currently, the SLAB and SLUB allocators are supported by kmemcheck. diff --git a/Documentation/device-mapper/boot.txt b/Documentation/device-mapper/boot.txt new file mode 100644 index 000000000000..adcaad5e5e32 --- /dev/null +++ b/Documentation/device-mapper/boot.txt @@ -0,0 +1,42 @@ +Boot time creation of mapped devices +=================================== + +It is possible to configure a device mapper device to act as the root +device for your system in two ways. + +The first is to build an initial ramdisk which boots to a minimal +userspace which configures the device, then pivot_root(8) in to it. + +For simple device mapper configurations, it is possible to boot directly +using the following kernel command line: + +dm=" ,table line 1,...,table line n" + +name = the name to associate with the device + after boot, udev, if used, will use that name to label + the device node. +uuid = may be 'none' or the UUID desired for the device. +ro = may be "ro" or "rw". If "ro", the device and device table will be + marked read-only. + +Each table line may be as normal when using the dmsetup tool except for +two variations: +1. Any use of commas will be interpreted as a newline +2. Quotation marks cannot be escaped and cannot be used without + terminating the dm= argument. + +Unless renamed by udev, the device node created will be dm-0 as the +first minor number for the device-mapper is used during early creation. + +Example +======= + +- Booting to a linear array made up of user-mode linux block devices: + + dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \ + root=/dev/dm-0 + +Will boot to a rw dm-linear target of 8192 sectors split across two +block devices identified by their major:minor numbers. After boot, udev +will rename this target to /dev/mapper/lroot (depending on the rules). +No uuid was assigned. diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 1699a55b7b70..ef639960b272 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If free space on the data device drops below this level then a dm event will be triggered which a userspace daemon should catch allowing it to extend the pool device. Only one such event will be sent. -Resuming a device with a new table itself triggers an event so the -userspace daemon can use this to detect a situation where a new table -already exceeds the threshold. + +No special event is triggered if a just resumed device's free space is below +the low water mark. However, resuming a device always triggers an +event; a userspace daemon should verify that free space exceeds the low +water mark when handling this event. A low water mark for the metadata device is maintained in the kernel and will trigger a dm event if free space on the metadata device drops below diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt index 89fd8f9a259f..b3d2e4a42255 100644 --- a/Documentation/device-mapper/verity.txt +++ b/Documentation/device-mapper/verity.txt @@ -109,6 +109,17 @@ fec_start This is the offset, in blocks, from the start of the FEC device to the beginning of the encoding data. +check_at_most_once + Verify data blocks only the first time they are read from the data device, + rather than every time. This reduces the overhead of dm-verity so that it + can be used on systems that are memory and/or CPU constrained. However, it + provides a reduced level of security because only offline tampering of the + data device's content will be detected, not online tampering. + + Hash blocks are still verified each time they are read from the hash device, + since verification of hash blocks is less performance critical than data + blocks, and a hash block will not be verified any more after all the data + blocks it covers have been verified anyway. Theory of operation =================== diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index 7eda08eb8a1e..a2b6a8a565a7 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt @@ -20,6 +20,7 @@ Required properties : - "allwinner,sun50i-a64-ccu" - "allwinner,sun50i-a64-r-ccu" - "allwinner,sun50i-h5-ccu" + - "allwinner,sun50i-h6-ccu" - "nextthing,gr8-ccu" - reg: Must contain the registers base address and length @@ -31,6 +32,9 @@ Required properties : - #clock-cells : must contain 1 - #reset-cells : must contain 1 +For the main CCU on H6, one more clock is needed: +- "iosc": the SoC's internal frequency oscillator + For the PRCM CCUs on A83T/H3/A64, two more clocks are needed: - "pll-periph": the SoC's peripheral PLL from the main CCU - "iosc": the SoC's internal frequency oscillator diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt index ec52c472c845..0603af877155 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-common.txt +++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt @@ -38,7 +38,7 @@ Display Timings require specific display timings. The panel-timing subnode expresses those timings as specified in the timing subnode section of the display timing bindings defined in - Documentation/devicetree/bindings/display/display-timing.txt. + Documentation/devicetree/bindings/display/panel/display-timing.txt. Connectivity diff --git a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt similarity index 84% rename from Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt rename to Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt index 7175dc3740ac..ed34253d9fb1 100644 --- a/Documentation/devicetree/bindings/display/panel/toppoly,td028ttec1.txt +++ b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt @@ -2,7 +2,7 @@ Toppoly TD028TTEC1 Panel ======================== Required properties: -- compatible: "toppoly,td028ttec1" +- compatible: "tpo,td028ttec1" Optional properties: - label: a symbolic name for the panel @@ -14,7 +14,7 @@ Example ------- lcd-panel: td028ttec1@0 { - compatible = "toppoly,td028ttec1"; + compatible = "tpo,td028ttec1"; reg = <0>; spi-max-frequency = <100000>; spi-cpol; diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt index 217a90eaabe7..9c38bbe7e6d7 100644 --- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt +++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt @@ -11,7 +11,11 @@ Required properties: interrupts. Optional properties: -- clocks: Optional reference to the clock used by the XOR engine. +- clocks: Optional reference to the clocks used by the XOR engine. +- clock-names: mandatory if there is a second clock, in this case the + name must be "core" for the first clock and "reg" for the second + one + Example: diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 891db41e9420..98d7898fcd78 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -25,6 +25,7 @@ Required Properties: - "renesas,dmac-r8a7794" (R-Car E2) - "renesas,dmac-r8a7795" (R-Car H3) - "renesas,dmac-r8a7796" (R-Car M3-W) + - "renesas,dmac-r8a77965" (R-Car M3-N) - "renesas,dmac-r8a77970" (R-Car V3M) - reg: base address and length of the registers block for the DMAC diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index a122723907ac..99acc712f83a 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt @@ -64,6 +64,6 @@ Example: reg = <0xe0000000 0x1000>; interrupts = <0 35 0x4>; dmas = <&dmahost 12 0 1>, - <&dmahost 13 0 1 0>; + <&dmahost 13 1 0>; dma-names = "rx", "rx"; }; diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt index 07a250498fbb..f569db58f64a 100644 --- a/Documentation/devicetree/bindings/hwmon/jc42.txt +++ b/Documentation/devicetree/bindings/hwmon/jc42.txt @@ -34,6 +34,10 @@ Required properties: - reg: I2C address +Optional properties: +- smbus-timeout-disable: When set, the smbus timeout function will be disabled. + This is not supported on all chips. + Example: temp-sensor@1a { diff --git a/Documentation/devicetree/bindings/misc/memory-state-time.txt b/Documentation/devicetree/bindings/misc/memory-state-time.txt new file mode 100644 index 000000000000..c99a506c030d --- /dev/null +++ b/Documentation/devicetree/bindings/misc/memory-state-time.txt @@ -0,0 +1,8 @@ +Memory bandwidth and frequency state tracking + +Required properties: +- compatible : should be: + "memory-state-time" +- freq-tbl: Should contain entries with each frequency in Hz. +- bw-buckets: Should contain upper-bound limits for each bandwidth bucket in Mbps. + Must match the framework power_profile.xml for the device. diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt index 8acf51a4dfa8..47a6a7fe0b86 100644 --- a/Documentation/devicetree/bindings/net/dsa/b53.txt +++ b/Documentation/devicetree/bindings/net/dsa/b53.txt @@ -10,6 +10,7 @@ Required properties: "brcm,bcm53128" "brcm,bcm5365" "brcm,bcm5395" + "brcm,bcm5389" "brcm,bcm5397" "brcm,bcm5398" diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt index 9c67ee4890d7..bbcb255c3150 100644 --- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt +++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt @@ -2,7 +2,10 @@ Required properties: -- compatible: should be "qca,qca8337" +- compatible: should be one of: + "qca,qca8334" + "qca,qca8337" + - #size-cells: must be 0 - #address-cells: must be 1 @@ -14,6 +17,20 @@ port and PHY id, each subnode describing a port needs to have a valid phandle referencing the internal PHY connected to it. The CPU port of this switch is always port 0. +A CPU port node has the following optional node: + +- fixed-link : Fixed-link subnode describing a link to a non-MDIO + managed entity. See + Documentation/devicetree/bindings/net/fixed-link.txt + for details. + +For QCA8K the 'fixed-link' sub-node supports only the following properties: + +- 'speed' (integer, mandatory), to indicate the link speed. Accepted + values are 10, 100 and 1000 +- 'full-duplex' (boolean, optional), to indicate that full duplex is + used. When absent, half duplex is assumed. + Example: @@ -53,6 +70,10 @@ Example: label = "cpu"; ethernet = <&gmac1>; phy-mode = "rgmii"; + fixed-link { + speed = 1000; + full-duplex; + }; }; port@1 { diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt index 354dd9896bb5..910187ebf1ce 100644 --- a/Documentation/devicetree/bindings/net/meson-dwmac.txt +++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt @@ -10,6 +10,7 @@ Required properties on all platforms: - "amlogic,meson6-dwmac" - "amlogic,meson8b-dwmac" - "amlogic,meson-gxbb-dwmac" + - "amlogic,meson-axg-dwmac" Additionally "snps,dwmac" and any applicable more detailed version number described in net/stmmac.txt should be used. diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt index 6f2ec9af0de2..dee9520224a9 100644 --- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt @@ -55,9 +55,9 @@ pins it needs, and how they should be configured, with regard to muxer configuration, drive strength and pullups. If one of these options is not set, its actual value will be unspecified. -This driver supports the generic pin multiplexing and configuration -bindings. For details on each properties, you can refer to -./pinctrl-bindings.txt. +Allwinner A1X Pin Controller supports the generic pin multiplexing and +configuration bindings. For details on each properties, you can refer to + ./pinctrl-bindings.txt. Required sub-node properties: - pins diff --git a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt index 47284f85ec80..c3f9826692bc 100644 --- a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt @@ -20,7 +20,8 @@ Required subnode-properties: gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0, i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0, spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0, - uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0 + uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0, + uart5nocts cpuclkout: cpuclkoutgrp0 udlclkout: udlclkoutgrp0 i2c1: i2c1grp0 @@ -37,7 +38,7 @@ Required subnode-properties: uart2: uart2grp0, uart2grp1 uart3: uart3grp0 uart4: uart4grp0 - uart5: uart5grp0 + uart5: uart5grp0, uart5nocts nand: nandgrp0 sdio0: sdio0grp0 sdio1: sdio1grp0 diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt index 2392557ede27..df77d394edc0 100644 --- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt @@ -3,8 +3,10 @@ Required properties for the root node: - compatible: one of "amlogic,meson8-cbus-pinctrl" "amlogic,meson8b-cbus-pinctrl" + "amlogic,meson8m2-cbus-pinctrl" "amlogic,meson8-aobus-pinctrl" "amlogic,meson8b-aobus-pinctrl" + "amlogic,meson8m2-aobus-pinctrl" "amlogic,meson-gxbb-periphs-pinctrl" "amlogic,meson-gxbb-aobus-pinctrl" "amlogic,meson-gxl-periphs-pinctrl" diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt new file mode 100644 index 000000000000..c6b82511ae8a --- /dev/null +++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt @@ -0,0 +1,8 @@ +Binding for MIPS Cluster Power Controller (CPC). + +This binding allows a system to specify where the CPC registers are +located. + +Required properties: +compatible : Should be "mti,mips-cpc". +regs: Should describe the address & size of the CPC register region. diff --git a/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt new file mode 100644 index 000000000000..2ceb202462cb --- /dev/null +++ b/Documentation/devicetree/bindings/scheduler/sched-energy-costs.txt @@ -0,0 +1,378 @@ +=========================================================== +Energy cost bindings for Energy Aware Scheduling +=========================================================== + +=========================================================== +1 - Introduction +=========================================================== + +This note specifies bindings required for energy-aware scheduling +(EAS)[1]. Historically, the scheduler's primary objective has been +performance. EAS aims to provide an alternative objective - energy +efficiency. EAS relies on a simple platform energy cost model to +guide scheduling decisions. The model only considers the CPU +subsystem. + +This note is aligned with the definition of the layout of physical +CPUs in the system as described in the ARM topology binding +description [2]. The concept is applicable to any system so long as +the cost model data is provided for those processing elements in +that system's topology that EAS is required to service. + +Processing elements refer to hardware threads, CPUs and clusters of +related CPUs in increasing order of hierarchy. + +EAS requires two key cost metrics - busy costs and idle costs. Busy +costs comprise of a list of compute capacities for the processing +element in question and the corresponding power consumption at that +capacity. Idle costs comprise of a list of power consumption values +for each idle state [C-state] that the processing element supports. +For a detailed description of these metrics, their derivation and +their use see [3]. + +These cost metrics are required for processing elements in all +scheduling domain levels that EAS is required to service. + +=========================================================== +2 - energy-costs node +=========================================================== + +Energy costs for the processing elements in scheduling domains that +EAS is required to service are defined in the energy-costs node +which acts as a container for the actual per processing element cost +nodes. A single energy-costs node is required for a given system. + +- energy-costs node + + Usage: Required + + Description: The energy-costs node is a container node and + it's sub-nodes describe costs for each processing element at + all scheduling domain levels that EAS is required to + service. + + Node name must be "energy-costs". + + The energy-costs node's parent node must be the cpus node. + + The energy-costs node's child nodes can be: + + - one or more cost nodes. + + Any other configuration is considered invalid. + +The energy-costs node can only contain a single type of child node +whose bindings are described in paragraph 4. + +=========================================================== +3 - energy-costs node child nodes naming convention +=========================================================== + +energy-costs child nodes must follow a naming convention where the +node name must be "thread-costN", "core-costN", "cluster-costN" +depending on whether the costs in the node are for a thread, core or +cluster. N (where N = {0, 1, ...}) is the node number and has no +bearing to the OS' logical thread, core or cluster index. + +=========================================================== +4 - cost node bindings +=========================================================== + +Bindings for cost nodes are defined as follows: + +- system-cost node + + Description: Optional. Must be declared within an energy-costs + node. A system should contain no more than one system-cost node. + + Systems with no modelled system cost should not provide this + node. + + The system-cost node name must be "system-costN" as + described in 3 above. + + A system-cost node must be a leaf node with no children. + + Properties for system-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +- cluster-cost node + + Description: must be declared within an energy-costs node. A + system can contain multiple clusters and each cluster + serviced by EAS must have a corresponding cluster-costs + node. + + The cluster-cost node name must be "cluster-costN" as + described in 3 above. + + A cluster-cost node must be a leaf node with no children. + + Properties for cluster-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +- core-cost node + + Description: must be declared within an energy-costs node. A + system can contain multiple cores and each core serviced by + EAS must have a corresponding core-cost node. + + The core-cost node name must be "core-costN" as described in + 3 above. + + A core-cost node must be a leaf node with no children. + + Properties for core-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +- thread-cost node + + Description: must be declared within an energy-costs node. A + system can contain cores with multiple hardware threads and + each thread serviced by EAS must have a corresponding + thread-cost node. + + The core-cost node name must be "core-costN" as described in + 3 above. + + A core-cost node must be a leaf node with no children. + + Properties for thread-cost nodes are described in paragraph + 5 below. + + Any other configuration is considered invalid. + +=========================================================== +5 - Cost node properties +========================================================== + +All cost node types must have only the following properties: + +- busy-cost-data + + Usage: required + Value type: An array of 2-item tuples. Each item is of type + u32. + Definition: The first item in the tuple is the capacity + value as described in [3]. The second item in the tuple is + the energy cost value as described in [3]. + +- idle-cost-data + + Usage: required + Value type: An array of 1-item tuples. The item is of type + u32. + Definition: The item in the tuple is the energy cost value + as described in [3]. + +=========================================================== +4 - Extensions to the cpu node +=========================================================== + +The cpu node is extended with a property that establishes the +connection between the processing element represented by the cpu +node and the cost-nodes associated with this processing element. + +The connection is expressed in line with the topological hierarchy +that this processing element belongs to starting with the level in +the hierarchy that this processing element itself belongs to through +to the highest level that EAS is required to service. The +connection cannot be sparse and must be contiguous from the +processing element's level through to the highest desired level. The +highest desired level must be the same for all processing elements. + +Example: Given that a cpu node may represent a thread that is a part +of a core, this property may contain multiple elements which +associate the thread with cost nodes describing the costs for the +thread itself, the core the thread belongs to, the cluster the core +belongs to and so on. The elements must be ordered from the lowest +level nodes to the highest desired level that EAS must service. The +highest desired level must be the same for all cpu nodes. The +elements must not be sparse: there must be elements for the current +thread, the next level of hierarchy (core) and so on without any +'holes'. + +Example: Given that a cpu node may represent a core that is a part +of a cluster of related cpus this property may contain multiple +elements which associate the core with cost nodes describing the +costs for the core itself, the cluster the core belongs to and so +on. The elements must be ordered from the lowest level nodes to the +highest desired level that EAS must service. The highest desired +level must be the same for all cpu nodes. The elements must not be +sparse: there must be elements for the current thread, the next +level of hierarchy (core) and so on without any 'holes'. + +If the system comprises of hierarchical clusters of clusters, this +property will contain multiple associations with the relevant number +of cluster elements in hierarchical order. + +Property added to the cpu node: + +- sched-energy-costs + + Usage: required + Value type: List of phandles + Definition: a list of phandles to specific cost nodes in the + energy-costs parent node that correspond to the processing + element represented by this cpu node in hierarchical order + of topology. + + The order of phandles in the list is significant. The first + phandle is to the current processing element's own cost + node. Subsequent phandles are to higher hierarchical level + cost nodes up until the maximum level that EAS is to + service. + + All cpu nodes must have the same highest level cost node. + + The phandle list must not be sparsely populated with handles + to non-contiguous hierarchical levels. See commentary above + for clarity. + + Any other configuration is invalid. + +=========================================================== +5 - Example dts +=========================================================== + +Example 1 (ARM 64-bit, 6-cpu system, two clusters of cpus, one +cluster of 2 Cortex-A57 cpus, one cluster of 4 Cortex-A53 cpus): + +cpus { + #address-cells = <2>; + #size-cells = <0>; + . + . + . + A57_0: cpu@0 { + compatible = "arm,cortex-a57","arm,armv8"; + reg = <0x0 0x0>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A57_L2>; + clocks = <&scpi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; + }; + + A57_1: cpu@1 { + compatible = "arm,cortex-a57","arm,armv8"; + reg = <0x0 0x1>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A57_L2>; + clocks = <&scpi_dvfs 0>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; + }; + + A53_0: cpu@100 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x100>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + A53_1: cpu@101 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x101>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + A53_2: cpu@102 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x102>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + A53_3: cpu@103 { + compatible = "arm,cortex-a53","arm,armv8"; + reg = <0x0 0x103>; + device_type = "cpu"; + enable-method = "psci"; + next-level-cache = <&A53_L2>; + clocks = <&scpi_dvfs 1>; + cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; + }; + + energy-costs { + CPU_COST_0: core-cost0 { + busy-cost-data = < + 417 168 + 579 251 + 744 359 + 883 479 + 1024 616 + >; + idle-cost-data = < + 15 + 0 + >; + }; + CPU_COST_1: core-cost1 { + busy-cost-data = < + 235 33 + 302 46 + 368 61 + 406 76 + 447 93 + >; + idle-cost-data = < + 6 + 0 + >; + }; + CLUSTER_COST_0: cluster-cost0 { + busy-cost-data = < + 417 24 + 579 32 + 744 43 + 883 49 + 1024 64 + >; + idle-cost-data = < + 65 + 24 + >; + }; + CLUSTER_COST_1: cluster-cost1 { + busy-cost-data = < + 235 26 + 303 30 + 368 39 + 406 47 + 447 57 + >; + idle-cost-data = < + 56 + 17 + >; + }; + }; +}; + +=============================================================================== +[1] https://lkml.org/lkml/2015/5/12/728 +[2] Documentation/devicetree/bindings/topology.txt +[3] Documentation/scheduler/sched-energy.txt diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt index dad3b2ec66d4..aeb6db4e35c3 100644 --- a/Documentation/devicetree/bindings/serial/8250.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt @@ -24,6 +24,7 @@ Required properties: - "ti,da830-uart" - "aspeed,ast2400-vuart" - "aspeed,ast2500-vuart" + - "nuvoton,npcm750-uart" - "serial" if the port type is unknown. - reg : offset and length of the register set for the device. - interrupts : should contain uart interrupt. diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt index 8ff65fa632fd..c06c045126fc 100644 --- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt +++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt @@ -21,7 +21,7 @@ Required properties: - interrupts : identifier to the device interrupt - clocks : a list of phandle + clock-specifier pairs, one for each entry in clock names. -- clocks-names : +- clock-names : * "xtal" for external xtal clock identifier * "pclk" for the bus core clock, either the clk81 clock or the gate clock * "baud" for the source of the baudrate generator, can be either the xtal diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index cf504d0380ae..88f947c47adc 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt @@ -41,6 +41,8 @@ Required properties: - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. + - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART. + - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART. - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART. diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt index 6ca6b9e582a0..d740989eb569 100644 --- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt +++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt @@ -20,16 +20,16 @@ Required Properties: (CMT1 on sh73a0 and r8a7740) This is a fallback for the above renesas,cmt-48-* entries. - - "renesas,cmt0-r8a73a4" for the 32-bit CMT0 device included in r8a73a4. - - "renesas,cmt1-r8a73a4" for the 48-bit CMT1 device included in r8a73a4. - - "renesas,cmt0-r8a7790" for the 32-bit CMT0 device included in r8a7790. - - "renesas,cmt1-r8a7790" for the 48-bit CMT1 device included in r8a7790. - - "renesas,cmt0-r8a7791" for the 32-bit CMT0 device included in r8a7791. - - "renesas,cmt1-r8a7791" for the 48-bit CMT1 device included in r8a7791. - - "renesas,cmt0-r8a7793" for the 32-bit CMT0 device included in r8a7793. - - "renesas,cmt1-r8a7793" for the 48-bit CMT1 device included in r8a7793. - - "renesas,cmt0-r8a7794" for the 32-bit CMT0 device included in r8a7794. - - "renesas,cmt1-r8a7794" for the 48-bit CMT1 device included in r8a7794. + - "renesas,r8a73a4-cmt0" for the 32-bit CMT0 device included in r8a73a4. + - "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4. + - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790. + - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790. + - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791. + - "renesas,r8a7791-cmt1" for the 48-bit CMT1 device included in r8a7791. + - "renesas,r8a7793-cmt0" for the 32-bit CMT0 device included in r8a7793. + - "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793. + - "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794. + - "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794. - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2. - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2. @@ -46,7 +46,7 @@ Required Properties: Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes cmt0: timer@ffca0000 { - compatible = "renesas,cmt0-r8a7790", "renesas,rcar-gen2-cmt0"; + compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0"; reg = <0 0xffca0000 0 0x1004>; interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>, <0 142 IRQ_TYPE_LEVEL_HIGH>; @@ -55,7 +55,7 @@ Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes }; cmt1: timer@e6130000 { - compatible = "renesas,cmt1-r8a7790", "renesas,rcar-gen2-cmt1"; + compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1"; reg = <0 0xe6130000 0 0x1004>; interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>, <0 121 IRQ_TYPE_LEVEL_HIGH>, diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt new file mode 100644 index 000000000000..18329d39487e --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt @@ -0,0 +1,8 @@ +Trusty fiq debugger interface + +Provides a single fiq for the fiq debugger. + +Required properties: +- compatible: compatible = "android,trusty-fiq-v1-*"; where * is a serial port. + +Must be a child of the node that provides fiq support ("android,trusty-fiq-v1"). diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt new file mode 100644 index 000000000000..de810b955bc9 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt @@ -0,0 +1,8 @@ +Trusty fiq interface + +Trusty provides fiq emulation. + +Required properties: +- compatible: "android,trusty-fiq-v1" + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt new file mode 100644 index 000000000000..5aefeb8e536f --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt @@ -0,0 +1,67 @@ +Trusty irq interface + +Trusty requires non-secure irqs to be forwarded to the secure OS. + +Required properties: +- compatible: "android,trusty-irq-v1" + +Optional properties: + +- interrupt-templates: is an optional property that works together + with "interrupt-ranges" to specify secure side to kernel IRQs mapping. + + It is a list of entries, each one of which defines a group of interrupts + having common properties, and has the following format: + < phandle irq_id_pos [templ_data]> + phandle - phandle of interrupt controller this template is for + irq_id_pos - the position of irq id in interrupt specifier array + for interrupt controller referenced by phandle. + templ_data - is an array of u32 values (could be empty) in the same + format as interrupt specifier for interrupt controller + referenced by phandle but with omitted irq id field. + +- interrupt-ranges: list of entries that specifies secure side to kernel + IRQs mapping. + + Each entry in the "interrupt-ranges" list has the following format: + + beg - first entry in this range + end - last entry in this range + templ_idx - index of entry in "interrupt-templates" property + that must be used as a template for all interrupts + in this range + +Example: +{ + gic: interrupt-controller@50041000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + interrupt-controller; + ... + }; + ... + IPI: interrupt-controller { + compatible = "android,CustomIPI"; + #interrupt-cells = <1>; + interrupt-controller; + }; + ... + trusty { + compatible = "android,trusty-smc-v1"; + ranges; + #address-cells = <2>; + #size-cells = <2>; + + irq { + compatible = "android,trusty-irq-v1"; + interrupt-templates = <&IPI 0>, + <&gic 1 GIC_PPI 0>, + <&gic 1 GIC_SPI 0>; + interrupt-ranges = < 0 15 0>, + <16 31 1>, + <32 223 2>; + }; + } +} + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt new file mode 100644 index 000000000000..1b39ad317c67 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt @@ -0,0 +1,6 @@ +Trusty smc interface + +Trusty is running in secure mode on the same (arm) cpu(s) as the current os. + +Required properties: +- compatible: "android,trusty-smc-v1" diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt index ce02cebac26a..464ddf7b509a 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.txt +++ b/Documentation/devicetree/bindings/usb/usb-device.txt @@ -11,7 +11,7 @@ Required properties: be used, but a device adhering to this binding may leave out all except for usbVID,PID. - reg: the port number which this device is connecting to, the range - is 1-31. + is 1-255. Example: diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt index 2d80b60eeabe..7a69b8b47b97 100644 --- a/Documentation/devicetree/bindings/usb/usb-xhci.txt +++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt @@ -12,6 +12,7 @@ Required properties: - "renesas,xhci-r8a7793" for r8a7793 SoC - "renesas,xhci-r8a7795" for r8a7795 SoC - "renesas,xhci-r8a7796" for r8a7796 SoC + - "renesas,xhci-r8a77965" for r8a77965 SoC - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 compatible device - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device - "xhci-platform" (deprecated) diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 1afd298eddd7..f4a98c85340a 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -172,6 +172,7 @@ karo Ka-Ro electronics GmbH keithkoep Keith & Koep GmbH keymile Keymile GmbH khadas Khadas +kiebackpeter Kieback & Peter GmbH kinetic Kinetic Technologies kingnovel Kingnovel Technology Co., Ltd. kosagi Sutajio Ko-Usagi PTE Ltd. diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst index a0dc2879a152..4a18ef9997c0 100644 --- a/Documentation/driver-api/pm/devices.rst +++ b/Documentation/driver-api/pm/devices.rst @@ -328,7 +328,10 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. After the ``->prepare`` callback method returns, no new children may be registered below the device. The method may also prepare the device or driver in some way for the upcoming system power transition, but it - should not put the device into a low-power state. + should not put the device into a low-power state. Moreover, if the + device supports runtime power management, the ``->prepare`` callback + method must not update its state in case it is necessary to resume it + from runtime suspend later on. For devices supporting runtime power management, the return value of the prepare callback can be used to indicate to the PM core that it may @@ -356,6 +359,16 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``. the appropriate low-power state, depending on the bus type the device is on, and they may enable wakeup events. + However, for devices supporting runtime power management, the + ``->suspend`` methods provided by subsystems (bus types and PM domains + in particular) must follow an additional rule regarding what can be done + to the devices before their drivers' ``->suspend`` methods are called. + Namely, they can only resume the devices from runtime suspend by + calling :c:func:`pm_runtime_resume` for them, if that is necessary, and + they must not update the state of the devices in any other way at that + time (in case the drivers need to resume the devices from runtime + suspend in their ``->suspend`` methods). + 3. For a number of devices it is convenient to split suspend into the "quiesce device" and "save device state" phases, in which cases ``suspend_late`` is meant to do the latter. It is always executed after @@ -729,6 +742,16 @@ state temporarily, for example so that its system wakeup capability can be disabled. This all depends on the hardware and the design of the subsystem and device driver in question. +If it is necessary to resume a device from runtime suspend during a system-wide +transition into a sleep state, that can be done by calling +:c:func:`pm_runtime_resume` for it from the ``->suspend`` callback (or its +couterpart for transitions related to hibernation) of either the device's driver +or a subsystem responsible for it (for example, a bus type or a PM domain). +That is guaranteed to work by the requirement that subsystems must not change +the state of devices (possibly except for resuming them from runtime suspend) +from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before* +invoking device drivers' ``->suspend`` callbacks (or equivalent). + During system-wide resume from a sleep state it's easiest to put devices into the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`. Refer to that document for more information regarding this particular issue as diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index 5a8f7f4d2bca..7449893dc039 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt @@ -233,7 +233,7 @@ data_err=ignore(*) Just print an error message if an error occurs data_err=abort Abort the journal if an error occurs in a file data buffer in ordered mode. -grpid Give objects the same group ID as their creator. +grpid New objects have the group ID of their parent. bsdgroups nogrpid (*) New objects have the group ID of their creator. diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 13c2ff034348..37d8698ca2d6 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -174,6 +174,25 @@ offgrpjquota Turn off group journelled quota. offprjjquota Turn off project journelled quota. quota Enable plain user disk quota accounting. noquota Disable all plain disk quota option. +whint_mode=%s Control which write hints are passed down to block + layer. This supports "off", "user-based", and + "fs-based". In "off" mode (default), f2fs does not pass + down hints. In "user-based" mode, f2fs tries to pass + down hints given by users. And in "fs-based" mode, f2fs + passes down hints with its policy. +alloc_mode=%s Adjust block allocation policy, which supports "reuse" + and "default". +fsync_mode=%s Control the policy of fsync. Currently supports "posix", + "strict", and "nobarrier". In "posix" mode, which is + default, fsync will follow POSIX semantics and does a + light operation to improve the filesystem performance. + In "strict" mode, fsync will be heavy and behaves in line + with xfs, ext4 and btrfs, where xfstest generic/342 will + pass, but the performance will regress. "nobarrier" is + based on "posix", but doesn't issue flush command for + non-atomic files likewise "nobarrier" mount option. +test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt + context. The fake fscrypt context is used by xfstests. ================================================================================ DEBUGFS ENTRIES diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index adba21b5ada7..99ca8e30a4ca 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -396,6 +396,8 @@ is not associated with a file: [stack] = the stack of the main process [vdso] = the "virtual dynamic shared object", the kernel system call handler + [anon:] = an anonymous mapping that has been + named by userspace or if empty, the mapping is anonymous. @@ -424,6 +426,7 @@ KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 0 kB VmFlags: rd ex mr mw me dw +Name: name from userspace the first of these lines shows the same information as is displayed for the mapping in /proc/PID/maps. The remaining lines show the size of the mapping @@ -496,6 +499,9 @@ Note that there is no guarantee that every flag and associated mnemonic will be present in all further kernel releases. Things get changed, the flags may be vanished or the reverse -- new added. +The "Name" field will only be present on a mapping that has been named by +userspace, and will show the name passed in by userspace. + This file is only present if the CONFIG_MMU kernel configuration option is enabled. diff --git a/Documentation/index.rst b/Documentation/index.rst index cb7f1ba5b3b1..380868d12704 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -75,6 +75,7 @@ needed). sound/index crypto/index filesystems/index + rpmb/index Architecture-specific documentation ----------------------------------- diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 3e3fdae5f3ed..c195b51d9ab0 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -324,6 +324,7 @@ Code Seq#(hex) Include File Comments 0xB3 00 linux/mmc/ioctl.h 0xB4 00-0F linux/gpio.h 0xB5 00-0F uapi/linux/rpmsg.h +0xB5 80-8F linux/uapi/linux/rpmb.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h 0xCA 80-BF uapi/scsi/cxlflash_ioctl.h diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index ac2363ea05c5..82afdb7f0816 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -152,15 +152,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then the default option --strip-debug will be used. Otherwise, INSTALL_MOD_STRIP value will be used as the options to the strip command. -INSTALL_FW_PATH --------------------------------------------------- -INSTALL_FW_PATH specifies where to install the firmware blobs. -The default value is: - - $(INSTALL_MOD_PATH)/lib/firmware - -The value can be overridden in which case the default value is ignored. - INSTALL_HDR_PATH -------------------------------------------------- INSTALL_HDR_PATH specifies where to install user space headers when diff --git a/Documentation/misc-devices/mei/dal/dal.txt b/Documentation/misc-devices/mei/dal/dal.txt new file mode 100644 index 000000000000..09801e725a0b --- /dev/null +++ b/Documentation/misc-devices/mei/dal/dal.txt @@ -0,0 +1,126 @@ +Intel(R) Dynamic Application Loader (Intel(R) DAL) +=================================================== + +Introduction +============ + +Intel Dynamic Application Loader (Intel DAL) is a service that runs +on the Intel Management Engine (Intel ME). It provides the ability to +install, run, and interact with Trusted Applets - TAs, +written in subset of Java) in a secure environment. +--- +installing applet int JHI and running them on Intel ME. + +There are two interfaces to Intel DAL from the operating system. +One from the user space, called JHI and one from kernel space, called KDI. +User space applications can install and uninstall TAs, and both kernel +and user space applications can communicate with installed TAs. + +Intel DAL Linux Kernel Driver +================ +The driver supports both user space clients and kernel space clients. + +For user space clients: +----------------------- +For each DAL FW client (IVM, SDM and RTM) the driver exposes a char device +called /dev/dal{i}, while i is 0-2 respectively. + +The user space interface allows sending raw messages from user-space +to DAL FW client, without any modifications. +The driver will send back to the user the raw messages which was received back. +Usually this interface is used by JHI - the DAL SW (for more information +search dynamic-application-loader-host-interface in github) +The messages are sent by using the char device 'write' function, +and received by using the 'read' function in accordance. + +For kernel space clients: +------------------------- +The driver expose api in file, to allow a kernel space client +using DAL. + +dal_uuid_to_bin - convert uuid string to bin + Input uuid is in either hyphenless or standard format + Arguments: + uuid_str: uuid string + uuid: output param to hold uuid bin + + Return: + 0 on success + <0 on failure + +dal_create_session - create session to an installed trusted application. + Arguments: + session_handle: output param to hold the session handle + ta_id: trusted application (ta) id + acp_pkg acp file of the ta + acp_pkg_len: acp file length + init_param: init parameters to the session (optional) + init_param_len: length of the init parameters + + Return: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_send_and_receive - send and receive data to/from ta + Arguments: + session_handle: session handle + command_id: command id + input: message to be sent + input_len: sent message size + output: An output parameter to hold a pointer + to the buffer which will contain the received + message. + This buffer is allocated by the driver and freed + by the user + output_len: An input and output param - + - input: the expected maximum length + of the received message. + - output: size of the received message + response_code: An output parameter to hold the return + value from the applet + + Return: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_close_session - close ta session + Arguments: +session_handle: session handle + + Return: + 0 on success + <0 on system failure + >0 on DAL FW failure + +dal_set_ta_exclusive_access - set client to be owner of the ta, + so no one else (especially user space client) + will be able to open session to it + Arguments: + ta_id: trusted application (ta) id + + Return: + 0 on success + -ENODEV when the device can't be found + -ENOMEM on memory allocation failure + -EPERM when ta is owned by another client + -EEXIST when ta is already owned by current client + +dal_unset_ta_exclusive_access - unset client from owning ta + Arguments: + ta_id: trusted application (ta) id + + Return: + 0 on success + -ENODEV when the device can't be found + -ENOENT when ta isn't found in exclusiveness ta list + -EPERM when ta is owned by another client + +dal_get_version_info - return DAL driver version + Arguments: + version_info: output param to hold DAL driver version information + + Return: + 0 on success + -EINVAL on incorrect input diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 77f4de59dc9c..57f55bf0b906 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max min: Minimal size of receive buffer used by TCP sockets. It is guaranteed to each TCP socket, even under moderate memory pressure. - Default: 1 page + Default: 4K default: initial size of receive buffer used by TCP sockets. This value overrides net.core.rmem_default used by other protocols. @@ -608,6 +608,16 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER initial value when the blackhole issue goes away. By default, it is set to 1hr. +tcp_fwmark_accept - BOOLEAN + If set, incoming connections to listening sockets that do not have a + socket mark will set the mark of the accepting socket to the fwmark of + the incoming SYN packet. This will cause all packets on that connection + (starting from the first SYNACK) to be sent with that fwmark. The + listening socket's mark is unchanged. Listening sockets that already + have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are + unaffected. + Default: 0 + tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt will be retransmitted. Should not be higher than 127. Default value @@ -666,7 +676,7 @@ tcp_window_scaling - BOOLEAN tcp_wmem - vector of 3 INTEGERs: min, default, max min: Amount of memory reserved for send buffers for TCP sockets. Each TCP socket has rights to use it due to fact of its birth. - Default: 1 page + Default: 4K default: initial size of send buffer used by TCP sockets. This value overrides net.core.wmem_default used by other protocols. diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt index cfc66ea72329..a365656e4873 100644 --- a/Documentation/networking/netdev-FAQ.txt +++ b/Documentation/networking/netdev-FAQ.txt @@ -176,6 +176,15 @@ A: No. See above answer. In short, if you think it really belongs in dash marker line as described in Documentation/process/submitting-patches.rst to temporarily embed that information into the patch that you send. +Q: Are all networking bug fixes backported to all stable releases? + +A: Due to capacity, Dave could only take care of the backports for the last + 2 stable releases. For earlier stable releases, each stable branch maintainer + is supposed to take care of them. If you find any patch is missing from an + earlier stable branch, please notify stable@vger.kernel.org with either a + commit ID or a formal patch backported, and CC Dave and other relevant + networking developers. + Q: Someone said that the comment style and coding convention is different for the networking content. Is this true? diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt index 361789df51ec..d1aecf53badb 100644 --- a/Documentation/printk-formats.txt +++ b/Documentation/printk-formats.txt @@ -397,11 +397,10 @@ struct clk %pC pll1 %pCn pll1 - %pCr 1560000000 For printing struct clk structures. ``%pC`` and ``%pCn`` print the name (Common Clock Framework) or address (legacy clock framework) of the -structure; ``%pCr`` prints the current clock rate. +structure. Passed by reference. diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 560beaef5a7c..73fcdcd52b87 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -33,7 +33,7 @@ GNU C 3.2 gcc --version GNU make 3.81 make --version binutils 2.20 ld -v util-linux 2.10o fdformat --version -module-init-tools 0.9.10 depmod -V +kmod 13 depmod -V e2fsprogs 1.41.4 e2fsck -V jfsutils 1.1.3 fsck.jfs -V reiserfsprogs 3.6.3 reiserfsck -V @@ -141,12 +141,6 @@ is not build with ``CONFIG_KALLSYMS`` and you have no way to rebuild and reproduce the Oops with that option, then you can still decode that Oops with ksymoops. -Module-Init-Tools ------------------ - -A new module loader is now in the kernel that requires ``module-init-tools`` -to use. It is backward compatible with the 2.4.x series kernels. - Mkinitrd -------- @@ -346,16 +340,17 @@ Util-linux - +Kmod +---- + +- +- + Ksymoops -------- - -Module-Init-Tools ------------------ - -- - Mkinitrd -------- diff --git a/Documentation/rpmb/conf.py b/Documentation/rpmb/conf.py new file mode 100644 index 000000000000..15430a0b3a08 --- /dev/null +++ b/Documentation/rpmb/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "Linux RPMB Subsystem" + +tags.add("subproject") diff --git a/Documentation/rpmb/index.rst b/Documentation/rpmb/index.rst new file mode 100644 index 000000000000..876a2603e4b5 --- /dev/null +++ b/Documentation/rpmb/index.rst @@ -0,0 +1,18 @@ +.. -*- coding: utf-8; mode: rst -*- + +============================================== +Replay Protected Memory Block (RPMB) subsystem +============================================== + +.. toctree:: + + introduction + simulation-device.rst + rpmb-tool.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/rpmb/introduction.rst b/Documentation/rpmb/introduction.rst new file mode 100644 index 000000000000..c9de8c1b6618 --- /dev/null +++ b/Documentation/rpmb/introduction.rst @@ -0,0 +1,97 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +============= +Introduction: +============= + +Few storage technologies such is EMMC, UFS, and NVMe support RPMB +hardware partition with common protocol and frame layout. +The RPMB partition `cannot` be accessed via standard block layer, +but by a set of specific commands: + +WRITE, READ, GET_WRITE_COUNTER, and PROGRAM_KEY. + +The commands and the data are embedded within :c:type:`rpmb_frame `. + +An RPMB partition provides authenticated and replay protected access, +hence it is suitable as a secure storage. + +In-kernel API +------------- +The RPMB layer aims to provide in-kernel API for Trusted Execution +Environment (TEE) devices that are capable to securely compute the block +frame signature. In case a TEE device wish to store a replay protected +data, it creates an RPMB frame with requested data and computes HMAC of +the frame, then it requests the storage device via RPMB layer to store +the data. + +The layer provides APIs, for :c:func:`rpmb_seq_cmd()` for issuing sequence +of raw RPMB protocol frames, which is close to the functionality provided +by emmc multi ioctl interface. + +.. c:function:: int rpmb_cmd_seq(struct rpmb_dev *rdev, u8 target, struct rpmb_cmd *cmds, u32 ncmds); + + +A TEE driver can claim the RPMB interface, for example, via +:c:func:`class_interface_register`: + +.. code-block:: c + + struct class_interface tee_rpmb_intf = { + .class = &rpmb_class; + .add_dev = rpmb_add_device; + .remove_dev = rpmb_remove_device; + } + class_interface_register(&tee_rpmb_intf); + + +RPMB device registeration +---------------------------- + +A storage device registers its RPMB hardware (eMMC) partition or RPMB +W-LUN (UFS) with the RPMB layer :c:func:`rpmb_dev_register` providing +an implementation for :c:func:`rpmb_seq_cmd()` handler. The interface +enables sending sequence of RPMB standard frames. + +.. code-block:: c + + struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .type = RPMB_TYPE_EMMC, + ... + } + rpmb_dev_register(disk_to_dev(part_md->disk), &mmc_rpmb_dev_ops); + + +User space API +-------------- + +A parallel user space API is provided via /dev/rpmbX character +device with two IOCTL commands. +- First ``RPMB_IOC_VER_CMD``, return driver protocol version, +- second ``RPMB_IOC_CAP_CMD`` return capability structure, +- last ``RPMB_IOC_SEQ_CMD`` where the whole RPMB sequence, and + including ``RESULT_READ`` is supplied by the caller. +https://android.googlesource.com/trusty/app/storage/ + +.. code-block:: c + + struct rpmb_ioc_req_cmd ireq; + int ret; + + ireq.req_type = RPMB_WRITE_DATA; + rpmb_ioc_cmd_set(ireq.icmd, RPMB_F_WRITE, frames_in, cnt_in); + rpmb_ioc_cmd_set(ireq.ocmd, 0, frames_out, cnt_out); + + ret = ioctl(fd, RPMB_IOC_REQ_CMD, &ireq); + + +API +--- +.. kernel-doc:: include/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/core.c + +.. kernel-doc:: include/uapi/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/cdev.c diff --git a/Documentation/rpmb/rpmb-tool.rst b/Documentation/rpmb/rpmb-tool.rst new file mode 100644 index 000000000000..3f4eed84542a --- /dev/null +++ b/Documentation/rpmb/rpmb-tool.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +========== +RPMB Tool +========== + +There is a sample rpmb tool under tools/rpmb/ directory that exercises +the RPMB devices via RPMB character devices interface (/dev/rpmbX) + +.. code-block:: none + + rpmb [-v] [-r|-s] + + rpmb get-info + rpmb program-key + rpmb write-counter [KEY_FILE] + rpmb write-blocks
+ rpmb read-blocks
[KEY_FILE] + + rpmb -v/--verbose: runs in verbose mode diff --git a/Documentation/rpmb/simulation-device.rst b/Documentation/rpmb/simulation-device.rst new file mode 100644 index 000000000000..9192a78a71d4 --- /dev/null +++ b/Documentation/rpmb/simulation-device.rst @@ -0,0 +1,19 @@ +====================== +RPMB Simulation Device +====================== + +RPMB partition simulation device is a virtual device that +provides simulation of the RPMB protocol and uses kernel memory +as storage. + +This driver cannot promise any real security, it is suitable for testing +of the RPMB subsystem it self and mostly it was found useful for testing of +RPMB applications prior to RPMB key provisioning/programming as +The RPMB key programming can be performed only once in the life time +of the storage device. + +Implementation: +--------------- + +.. kernel-doc:: drivers/char/rpmb/rpmb_sim.c + diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt new file mode 100644 index 000000000000..dab2f9088b33 --- /dev/null +++ b/Documentation/scheduler/sched-energy.txt @@ -0,0 +1,362 @@ +Energy cost model for energy-aware scheduling (EXPERIMENTAL) + +Introduction +============= + +The basic energy model uses platform energy data stored in sched_group_energy +data structures attached to the sched_groups in the sched_domain hierarchy. The +energy cost model offers two functions that can be used to guide scheduling +decisions: + +1. static unsigned int sched_group_energy(struct energy_env *eenv) +2. static int energy_diff(struct energy_env *eenv) + +sched_group_energy() estimates the energy consumed by all cpus in a specific +sched_group including any shared resources owned exclusively by this group of +cpus. Resources shared with other cpus are excluded (e.g. later level caches). + +energy_diff() estimates the total energy impact of a utilization change. That +is, adding, removing, or migrating utilization (tasks). + +Both functions use a struct energy_env to specify the scenario to be evaluated: + + struct energy_env { + struct sched_group *sg_top; + struct sched_group *sg_cap; + int cap_idx; + int util_delta; + int src_cpu; + int dst_cpu; + int energy; + }; + +sg_top: sched_group to be evaluated. Not used by energy_diff(). + +sg_cap: sched_group covering the cpus in the same frequency domain. Set by +sched_group_energy(). + +cap_idx: Capacity state to be used for energy calculations. Set by +find_new_capacity(). + +util_delta: Amount of utilization to be added, removed, or migrated. + +src_cpu: Source cpu from where 'util_delta' utilization is removed. Should be +-1 if no source (e.g. task wake-up). + +dst_cpu: Destination cpu where 'util_delta' utilization is added. Should be -1 +if utilization is removed (e.g. terminating tasks). + +energy: Result of sched_group_energy(). + +The metric used to represent utilization is the actual per-entity running time +averaged over time using a geometric series. Very similar to the existing +per-entity load-tracking, but _not_ scaled by task priority and capped by the +capacity of the cpu. The latter property does mean that utilization may +underestimate the compute requirements for task on fully/over utilized cpus. +The greatest potential for energy savings without affecting performance too much +is scenarios where the system isn't fully utilized. If the system is deemed +fully utilized load-balancing should be done with task load (includes task +priority) instead in the interest of fairness and performance. + + +Background and Terminology +=========================== + +To make it clear from the start: + +energy = [joule] (resource like a battery on powered devices) +power = energy/time = [joule/second] = [watt] + +The goal of energy-aware scheduling is to minimize energy, while still getting +the job done. That is, we want to maximize: + + performance [inst/s] + -------------------- + power [W] + +which is equivalent to minimizing: + + energy [J] + ----------- + instruction + +while still getting 'good' performance. It is essentially an alternative +optimization objective to the current performance-only objective for the +scheduler. This alternative considers two objectives: energy-efficiency and +performance. Hence, there needs to be a user controllable knob to switch the +objective. Since it is early days, this is currently a sched_feature +(ENERGY_AWARE). + +The idea behind introducing an energy cost model is to allow the scheduler to +evaluate the implications of its decisions rather than applying energy-saving +techniques blindly that may only have positive effects on some platforms. At +the same time, the energy cost model must be as simple as possible to minimize +the scheduler latency impact. + +Platform topology +------------------ + +The system topology (cpus, caches, and NUMA information, not peripherals) is +represented in the scheduler by the sched_domain hierarchy which has +sched_groups attached at each level that covers one or more cpus (see +sched-domains.txt for more details). To add energy awareness to the scheduler +we need to consider power and frequency domains. + +Power domain: + +A power domain is a part of the system that can be powered on/off +independently. Power domains are typically organized in a hierarchy where you +may be able to power down just a cpu or a group of cpus along with any +associated resources (e.g. shared caches). Powering up a cpu means that all +power domains it is a part of in the hierarchy must be powered up. Hence, it is +more expensive to power up the first cpu that belongs to a higher level power +domain than powering up additional cpus in the same high level domain. Two +level power domain hierarchy example: + + Power source + +-------------------------------+----... +per group PD G G + | +----------+ | + +--------+-------| Shared | (other groups) +per-cpu PD G G | resource | + | | +----------+ + +-------+ +-------+ + | CPU 0 | | CPU 1 | + +-------+ +-------+ + +Frequency domain: + +Frequency domains (P-states) typically cover the same group of cpus as one of +the power domain levels. That is, there might be several smaller power domains +sharing the same frequency (P-state) or there might be a power domain spanning +multiple frequency domains. + +From a scheduling point of view there is no need to know the actual frequencies +[Hz]. All the scheduler cares about is the compute capacity available at the +current state (P-state) the cpu is in and any other available states. For that +reason, and to also factor in any cpu micro-architecture differences, compute +capacity scaling states are called 'capacity states' in this document. For SMP +systems this is equivalent to P-states. For mixed micro-architecture systems +(like ARM big.LITTLE) it is P-states scaled according to the micro-architecture +performance relative to the other cpus in the system. + +Energy modelling: +------------------ + +Due to the hierarchical nature of the power domains, the most obvious way to +model energy costs is therefore to associate power and energy costs with +domains (groups of cpus). Energy costs of shared resources are associated with +the group of cpus that share the resources, only the cost of powering the +cpu itself and any private resources (e.g. private L1 caches) is associated +with the per-cpu groups (lowest level). + +For example, for an SMP system with per-cpu power domains and a cluster level +(group of cpus) power domain we get the overall energy costs to be: + + energy = energy_cluster + n * energy_cpu + +where 'n' is the number of cpus powered up and energy_cluster is the cost paid +as soon as any cpu in the cluster is powered up. + +The power and frequency domains can naturally be mapped onto the existing +sched_domain hierarchy and sched_groups by adding the necessary data to the +existing data structures. + +The energy model considers energy consumption from two contributors (shown in +the illustration below): + +1. Busy energy: Energy consumed while a cpu and the higher level groups that it +belongs to are busy running tasks. Busy energy is associated with the state of +the cpu, not an event. The time the cpu spends in this state varies. Thus, the +most obvious platform parameter for this contribution is busy power +(energy/time). + +2. Idle energy: Energy consumed while a cpu and higher level groups that it +belongs to are idle (in a C-state). Like busy energy, idle energy is associated +with the state of the cpu. Thus, the platform parameter for this contribution +is idle power (energy/time). + +Energy consumed during transitions from an idle-state (C-state) to a busy state +(P-state) or going the other way is ignored by the model to simplify the energy +model calculations. + + + Power + ^ + | busy->idle idle->busy + | transition transition + | + | _ __ + | / \ / \__________________ + |______________/ \ / + | \ / + | Busy \ Idle / Busy + | low P-state \____________/ high P-state + | + +------------------------------------------------------------> time + +Busy |--------------| |-----------------| + +Wakeup |------| |------| + +Idle |------------| + + +The basic algorithm +==================== + +The basic idea is to determine the total energy impact when utilization is +added or removed by estimating the impact at each level in the sched_domain +hierarchy starting from the bottom (sched_group contains just a single cpu). +The energy cost comes from busy time (sched_group is awake because one or more +cpus are busy) and idle time (in an idle-state). Energy model numbers account +for energy costs associated with all cpus in the sched_group as a group. + + for_each_domain(cpu, sd) { + sg = sched_group_of(cpu) + energy_before = curr_util(sg) * busy_power(sg) + + (1-curr_util(sg)) * idle_power(sg) + energy_after = new_util(sg) * busy_power(sg) + + (1-new_util(sg)) * idle_power(sg) + energy_diff += energy_before - energy_after + + } + + return energy_diff + +{curr, new}_util: The cpu utilization at the lowest level and the overall +non-idle time for the entire group for higher levels. Utilization is in the +range 0.0 to 1.0 in the pseudo-code. + +busy_power: The power consumption of the sched_group. + +idle_power: The power consumption of the sched_group when idle. + +Note: It is a fundamental assumption that the utilization is (roughly) scale +invariant. Task utilization tracking factors in any frequency scaling and +performance scaling differences due to difference cpu microarchitectures such +that task utilization can be used across the entire system. + + +Platform energy data +===================== + +struct sched_group_energy can be attached to sched_groups in the sched_domain +hierarchy and has the following members: + +cap_states: + List of struct capacity_state representing the supported capacity states + (P-states). struct capacity_state has two members: cap and power, which + represents the compute capacity and the busy_power of the state. The + list must be ordered by capacity low->high. + +nr_cap_states: + Number of capacity states in cap_states list. + +idle_states: + List of struct idle_state containing idle_state power cost for each + idle-state supported by the system orderd by shallowest state first. + All states must be included at all level in the hierarchy, i.e. a + sched_group spanning just a single cpu must also include coupled + idle-states (cluster states). In addition to the cpuidle idle-states, + the list must also contain an entry for the idling using the arch + default idle (arch_idle_cpu()). Despite this state may not be a true + hardware idle-state it is considered the shallowest idle-state in the + energy model and must be the first entry. cpus may enter this state + (possibly 'active idling') if cpuidle decides not enter a cpuidle + idle-state. Default idle may not be used when cpuidle is enabled. + In this case, it should just be a copy of the first cpuidle idle-state. + +nr_idle_states: + Number of idle states in idle_states list. + +There are no unit requirements for the energy cost data. Data can be normalized +with any reference, however, the normalization must be consistent across all +energy cost data. That is, one bogo-joule/watt must be the same quantity for +data, but we don't care what it is. + +A recipe for platform characterization +======================================= + +Obtaining the actual model data for a particular platform requires some way of +measuring power/energy. There isn't a tool to help with this (yet). This +section provides a recipe for use as reference. It covers the steps used to +characterize the ARM TC2 development platform. This sort of measurements is +expected to be done anyway when tuning cpuidle and cpufreq for a given +platform. + +The energy model needs two types of data (struct sched_group_energy holds +these) for each sched_group where energy costs should be taken into account: + +1. Capacity state information + +A list containing the compute capacity and power consumption when fully +utilized attributed to the group as a whole for each available capacity state. +At the lowest level (group contains just a single cpu) this is the power of the +cpu alone without including power consumed by resources shared with other cpus. +It basically needs to fit the basic modelling approach described in "Background +and Terminology" section: + + energy_system = energy_shared + n * energy_cpu + +for a system containing 'n' busy cpus. Only 'energy_cpu' should be included at +the lowest level. 'energy_shared' is included at the next level which +represents the group of cpus among which the resources are shared. + +This model is, of course, a simplification of reality. Thus, power/energy +attributions might not always exactly represent how the hardware is designed. +Also, busy power is likely to depend on the workload. It is therefore +recommended to use a representative mix of workloads when characterizing the +capacity states. + +If the group has no capacity scaling support, the list will contain a single +state where power is the busy power attributed to the group. The capacity +should be set to a default value (1024). + +When frequency domains include multiple power domains, the group representing +the frequency domain and all child groups share capacity states. This must be +indicated by setting the SD_SHARE_CAP_STATES sched_domain flag. All groups at +all levels that share the capacity state must have the list of capacity states +with the power set to the contribution of the individual group. + +2. Idle power information + +Stored in the idle_states list. The power number is the group idle power +consumption in each idle state as well when the group is idle but has not +entered an idle-state ('active idle' as mentioned earlier). Due to the way the +energy model is defined, the idle power of the deepest group idle state can +alternatively be accounted for in the parent group busy power. In that case the +group idle state power values are offset such that the idle power of the +deepest state is zero. It is less intuitive, but it is easier to measure as +idle power consumed by the group and the busy/idle power of the parent group +cannot be distinguished without per group measurement points. + +Measuring capacity states and idle power: + +The capacity states' capacity and power can be estimated by running a benchmark +workload at each available capacity state. By restricting the benchmark to run +on subsets of cpus it is possible to extrapolate the power consumption of +shared resources. + +ARM TC2 has two clusters of two and three cpus respectively. Each cluster has a +shared L2 cache. TC2 has on-chip energy counters per cluster. Running a +benchmark workload on just one cpu in a cluster means that power is consumed in +the cluster (higher level group) and a single cpu (lowest level group). Adding +another benchmark task to another cpu increases the power consumption by the +amount consumed by the additional cpu. Hence, it is possible to extrapolate the +cluster busy power. + +For platforms that don't have energy counters or equivalent instrumentation +built-in, it may be possible to use an external DAQ to acquire similar data. + +If the benchmark includes some performance score (for example sysbench cpu +benchmark), this can be used to record the compute capacity. + +Measuring idle power requires insight into the idle state implementation on the +particular platform. Specifically, if the platform has coupled idle-states (or +package states). To measure non-coupled per-cpu idle-states it is necessary to +keep one cpu busy to keep any shared resources alive to isolate the idle power +of the cpu from idle/busy power of the shared resources. The cpu can be tricked +into different per-cpu idle states by disabling the other states. Based on +various combinations of measurements with specific cpus busy and disabling +idle-states it is possible to extrapolate the idle-state power. diff --git a/Documentation/scheduler/sched-pelt.c b/Documentation/scheduler/sched-pelt.c index e4219139386a..cd3e1fe7a98a 100644 --- a/Documentation/scheduler/sched-pelt.c +++ b/Documentation/scheduler/sched-pelt.c @@ -10,21 +10,21 @@ #include #include -#define HALFLIFE 32 +#define HALFLIFE { 32, 16, 8 } #define SHIFT 32 double y; -void calc_runnable_avg_yN_inv(void) +void calc_runnable_avg_yN_inv(const int halflife) { int i; unsigned int x; printf("static const u32 runnable_avg_yN_inv[] = {"); - for (i = 0; i < HALFLIFE; i++) { + for (i = 0; i < halflife; i++) { x = ((1UL<<32)-1)*pow(y, i); - if (i % 6 == 0) printf("\n\t"); + if (i % 4 == 0) printf("\n\t"); printf("0x%8x, ", x); } printf("\n};\n\n"); @@ -32,12 +32,12 @@ void calc_runnable_avg_yN_inv(void) int sum = 1024; -void calc_runnable_avg_yN_sum(void) +void calc_runnable_avg_yN_sum(const int halflife) { int i; printf("static const u32 runnable_avg_yN_sum[] = {\n\t 0,"); - for (i = 1; i <= HALFLIFE; i++) { + for (i = 1; i <= halflife; i++) { if (i == 1) sum *= y; else @@ -55,7 +55,7 @@ int n = -1; /* first period */ long max = 1024; -void calc_converged_max(void) +void calc_converged_max(const int halflife) { long last = 0, y_inv = ((1UL<<32)-1)*y; @@ -73,17 +73,17 @@ void calc_converged_max(void) last = max; } n--; - printf("#define LOAD_AVG_PERIOD %d\n", HALFLIFE); + printf("#define LOAD_AVG_PERIOD %d\n", halflife); printf("#define LOAD_AVG_MAX %ld\n", max); -// printf("#define LOAD_AVG_MAX_N %d\n\n", n); + printf("#define LOAD_AVG_MAX_N %d\n\n", n); } -void calc_accumulated_sum_32(void) +void calc_accumulated_sum_32(const int halflife) { int i, x = sum; printf("static const u32 __accumulated_sum_N32[] = {\n\t 0,"); - for (i = 1; i <= n/HALFLIFE+1; i++) { + for (i = 1; i <= n/halflife+1; i++) { if (i > 1) x = x/2 + sum; @@ -97,12 +97,22 @@ void calc_accumulated_sum_32(void) void main(void) { + int hl_value[] = HALFLIFE; + int hl_count = sizeof(hl_value) / sizeof(int); + int hl_idx, halflife; + printf("/* Generated by Documentation/scheduler/sched-pelt; do not modify. */\n\n"); - y = pow(0.5, 1/(double)HALFLIFE); + for (hl_idx = 0; hl_idx < hl_count; ++hl_idx) { + halflife = hl_value[hl_idx]; + + y = pow(0.5, 1/(double)halflife); - calc_runnable_avg_yN_inv(); -// calc_runnable_avg_yN_sum(); - calc_converged_max(); -// calc_accumulated_sum_32(); + printf("#if CONFIG_PELT_UTIL_HALFLIFE_%d\n", halflife); + calc_runnable_avg_yN_inv(halflife); + calc_runnable_avg_yN_sum(halflife); + calc_converged_max(halflife); + calc_accumulated_sum_32(halflife); + printf("#endif\n\n"); + } } diff --git a/Documentation/scheduler/sched-tune.txt b/Documentation/scheduler/sched-tune.txt new file mode 100644 index 000000000000..5df0ea361311 --- /dev/null +++ b/Documentation/scheduler/sched-tune.txt @@ -0,0 +1,413 @@ + Central, scheduler-driven, power-performance control + (EXPERIMENTAL) + +Abstract +======== + +The topic of a single simple power-performance tunable, that is wholly +scheduler centric, and has well defined and predictable properties has come up +on several occasions in the past [1,2]. With techniques such as a scheduler +driven DVFS [3], we now have a good framework for implementing such a tunable. +This document describes the overall ideas behind its design and implementation. + + +Table of Contents +================= + +1. Motivation +2. Introduction +3. Signal Boosting Strategy +4. OPP selection using boosted CPU utilization +5. Per task group boosting +6. Per-task wakeup-placement-strategy Selection +7. Question and Answers + - What about "auto" mode? + - What about boosting on a congested system? + - How CPUs are boosted when we have tasks with multiple boost values? +8. References + + +1. Motivation +============= + +Sched-DVFS [3] was a new event-driven cpufreq governor which allows the +scheduler to select the optimal DVFS operating point (OPP) for running a task +allocated to a CPU. Later, the cpufreq maintainers introduced a similar +governor, schedutil. The introduction of schedutil also enables running +workloads at the most energy efficient OPPs. + +However, sometimes it may be desired to intentionally boost the performance of +a workload even if that could imply a reasonable increase in energy +consumption. For example, in order to reduce the response time of a task, we +may want to run the task at a higher OPP than the one that is actually required +by it's CPU bandwidth demand. + +This last requirement is especially important if we consider that one of the +main goals of the utilization-driven governor component is to replace all +currently available CPUFreq policies. Since sched-DVFS and schedutil are event +based, as opposed to the sampling driven governors we currently have, they are +already more responsive at selecting the optimal OPP to run tasks allocated to +a CPU. However, just tracking the actual task load demand may not be enough +from a performance standpoint. For example, it is not possible to get +behaviors similar to those provided by the "performance" and "interactive" +CPUFreq governors. + +This document describes an implementation of a tunable, stacked on top of the +utilization-driven governors which extends their functionality to support task +performance boosting. + +By "performance boosting" we mean the reduction of the time required to +complete a task activation, i.e. the time elapsed from a task wakeup to its +next deactivation (e.g. because it goes back to sleep or it terminates). For +example, if we consider a simple periodic task which executes the same workload +for 5[s] every 20[s] while running at a certain OPP, a boosted execution of +that task must complete each of its activations in less than 5[s]. + +A previous attempt [5] to introduce such a boosting feature has not been +successful mainly because of the complexity of the proposed solution. Previous +versions of the approach described in this document exposed a single simple +interface to user-space. This single tunable knob allowed the tuning of +system wide scheduler behaviours ranging from energy efficiency at one end +through to incremental performance boosting at the other end. This first +tunable affects all tasks. However, that is not useful for Android products +so in this version only a more advanced extension of the concept is provided +which uses CGroups to boost the performance of only selected tasks while using +the energy efficient default for all others. + +The rest of this document introduces in more details the proposed solution +which has been named SchedTune. + + +2. Introduction +=============== + +SchedTune exposes a simple user-space interface provided through a new +CGroup controller 'stune' which provides two power-performance tunables +per group: + + //schedtune.prefer_idle + //schedtune.boost + +The CGroup implementation permits arbitrary user-space defined task +classification to tune the scheduler for different goals depending on the +specific nature of the task, e.g. background vs interactive vs low-priority. + +More details are given in section 5. + +2.1 Boosting +============ + +The boost value is expressed as an integer in the range [-100..0..100]. + +A value of 0 (default) configures the CFS scheduler for maximum energy +efficiency. This means that sched-DVFS runs the tasks at the minimum OPP +required to satisfy their workload demand. + +A value of 100 configures scheduler for maximum performance, which translates +to the selection of the maximum OPP on that CPU. + +A value of -100 configures scheduler for minimum performance, which translates +to the selection of the minimum OPP on that CPU. + +The range between -100, 0 and 100 can be set to satisfy other scenarios suitably. +For example to satisfy interactive response or depending on other system events +(battery level etc). + +The overall design of the SchedTune module is built on top of "Per-Entity Load +Tracking" (PELT) signals and sched-DVFS by introducing a bias on the Operating +Performance Point (OPP) selection. + +Each time a task is allocated on a CPU, cpufreq is given the opportunity to tune +the operating frequency of that CPU to better match the workload demand. The +selection of the actual OPP being activated is influenced by the boost value +for the task CGroup. + +This simple biasing approach leverages existing frameworks, which means minimal +modifications to the scheduler, and yet it allows to achieve a range of +different behaviours all from a single simple tunable knob. + +In EAS schedulers, we use boosted task and CPU utilization for energy +calculation and energy-aware task placement. + +2.2 prefer_idle +=============== + +This is a flag which indicates to the scheduler that userspace would like +the scheduler to focus on energy or to focus on performance. + +A value of 0 (default) signals to the CFS scheduler that tasks in this group +can be placed according to the energy-aware wakeup strategy. + +A value of 1 signals to the CFS scheduler that tasks in this group should be +placed to minimise wakeup latency. + +The value is combined with the boost value - task placement will not be +boost aware however CPU OPP selection is still boost aware. + +Android platforms typically use this flag for application tasks which the +user is currently interacting with. + + +3. Signal Boosting Strategy +=========================== + +The whole PELT machinery works based on the value of a few load tracking signals +which basically track the CPU bandwidth requirements for tasks and the capacity +of CPUs. The basic idea behind the SchedTune knob is to artificially inflate +some of these load tracking signals to make a task or RQ appears more demanding +that it actually is. + +Which signals have to be inflated depends on the specific "consumer". However, +independently from the specific (signal, consumer) pair, it is important to +define a simple and possibly consistent strategy for the concept of boosting a +signal. + +A boosting strategy defines how the "abstract" user-space defined +sched_cfs_boost value is translated into an internal "margin" value to be added +to a signal to get its inflated value: + + margin := boosting_strategy(sched_cfs_boost, signal) + boosted_signal := signal + margin + +Different boosting strategies were identified and analyzed before selecting the +one found to be most effective. + +Signal Proportional Compensation (SPC) +-------------------------------------- + +In this boosting strategy the sched_cfs_boost value is used to compute a +margin which is proportional to the complement of the original signal. +When a signal has a maximum possible value, its complement is defined as +the delta from the actual value and its possible maximum. + +Since the tunable implementation uses signals which have SCHED_LOAD_SCALE as +the maximum possible value, the margin becomes: + + margin := sched_cfs_boost * (SCHED_LOAD_SCALE - signal) + +Using this boosting strategy: +- a 100% sched_cfs_boost means that the signal is scaled to the maximum value +- each value in the range of sched_cfs_boost effectively inflates the signal in + question by a quantity which is proportional to the maximum value. + +For example, by applying the SPC boosting strategy to the selection of the OPP +to run a task it is possible to achieve these behaviors: + +- 0% boosting: run the task at the minimum OPP required by its workload +- 100% boosting: run the task at the maximum OPP available for the CPU +- 50% boosting: run at the half-way OPP between minimum and maximum + +Which means that, at 50% boosting, a task will be scheduled to run at half of +the maximum theoretically achievable performance on the specific target +platform. + +A graphical representation of an SPC boosted signal is represented in the +following figure where: + a) "-" represents the original signal + b) "b" represents a 50% boosted signal + c) "p" represents a 100% boosted signal + + + ^ + | SCHED_LOAD_SCALE + +-----------------------------------------------------------------+ + |pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp + | + | boosted_signal + | bbbbbbbbbbbbbbbbbbbbbbbb + | + | original signal + | bbbbbbbbbbbbbbbbbbbbbbbb+----------------------+ + | | + |bbbbbbbbbbbbbbbbbb | + | | + | | + | | + | +-----------------------+ + | | + | | + | | + |------------------+ + | + | + +-----------------------------------------------------------------------> + +The plot above shows a ramped load signal (titled 'original_signal') and it's +boosted equivalent. For each step of the original signal the boosted signal +corresponding to a 50% boost is midway from the original signal and the upper +bound. Boosting by 100% generates a boosted signal which is always saturated to +the upper bound. + + +4. OPP selection using boosted CPU utilization +============================================== + +It is worth calling out that the implementation does not introduce any new load +signals. Instead, it provides an API to tune existing signals. This tuning is +done on demand and only in scheduler code paths where it is sensible to do so. +The new API calls are defined to return either the default signal or a boosted +one, depending on the value of sched_cfs_boost. This is a clean an non invasive +modification of the existing existing code paths. + +The signal representing a CPU's utilization is boosted according to the +previously described SPC boosting strategy. To sched-DVFS, this allows a CPU +(ie CFS run-queue) to appear more used then it actually is. + +Thus, with the sched_cfs_boost enabled we have the following main functions to +get the current utilization of a CPU: + + cpu_util() + boosted_cpu_util() + +The new boosted_cpu_util() is similar to the first but returns a boosted +utilization signal which is a function of the sched_cfs_boost value. + +This function is used in the CFS scheduler code paths where sched-DVFS needs to +decide the OPP to run a CPU at. +For example, this allows selecting the highest OPP for a CPU which has +the boost value set to 100%. + + +5. Per task group boosting +========================== + +On battery powered devices there usually are many background services which are +long running and need energy efficient scheduling. On the other hand, some +applications are more performance sensitive and require an interactive +response and/or maximum performance, regardless of the energy cost. + +To better service such scenarios, the SchedTune implementation has an extension +that provides a more fine grained boosting interface. + +A new CGroup controller, namely "schedtune", can be enabled which allows to +defined and configure task groups with different boosting values. +Tasks that require special performance can be put into separate CGroups. +The value of the boost associated with the tasks in this group can be specified +using a single knob exposed by the CGroup controller: + + schedtune.boost + +This knob allows the definition of a boost value that is to be used for +SPC boosting of all tasks attached to this group. + +The current schedtune controller implementation is really simple and has these +main characteristics: + + 1) It is only possible to create 1 level depth hierarchies + + The root control groups define the system-wide boost value to be applied + by default to all tasks. Its direct subgroups are named "boost groups" and + they define the boost value for specific set of tasks. + Further nested subgroups are not allowed since they do not have a sensible + meaning from a user-space standpoint. + + 2) It is possible to define only a limited number of "boost groups" + + This number is defined at compile time and by default configured to 16. + This is a design decision motivated by two main reasons: + a) In a real system we do not expect utilization scenarios with more then few + boost groups. For example, a reasonable collection of groups could be + just "background", "interactive" and "performance". + b) It simplifies the implementation considerably, especially for the code + which has to compute the per CPU boosting once there are multiple + RUNNABLE tasks with different boost values. + +Such a simple design should allow servicing the main utilization scenarios identified +so far. It provides a simple interface which can be used to manage the +power-performance of all tasks or only selected tasks. +Moreover, this interface can be easily integrated by user-space run-times (e.g. +Android, ChromeOS) to implement a QoS solution for task boosting based on tasks +classification, which has been a long standing requirement. + +Setup and usage +--------------- + +0. Use a kernel with CONFIG_SCHED_TUNE support enabled + +1. Check that the "schedtune" CGroup controller is available: + + root@linaro-nano:~# cat /proc/cgroups + #subsys_name hierarchy num_cgroups enabled + cpuset 0 1 1 + cpu 0 1 1 + schedtune 0 1 1 + +2. Mount a tmpfs to create the CGroups mount point (Optional) + + root@linaro-nano:~# sudo mount -t tmpfs cgroups /sys/fs/cgroup + +3. Mount the "schedtune" controller + + root@linaro-nano:~# mkdir /sys/fs/cgroup/stune + root@linaro-nano:~# sudo mount -t cgroup -o schedtune stune /sys/fs/cgroup/stune + +4. Create task groups and configure their specific boost value (Optional) + + For example here we create a "performance" boost group configure to boost + all its tasks to 100% + + root@linaro-nano:~# mkdir /sys/fs/cgroup/stune/performance + root@linaro-nano:~# echo 100 > /sys/fs/cgroup/stune/performance/schedtune.boost + +5. Move tasks into the boost group + + For example, the following moves the tasks with PID $TASKPID (and all its + threads) into the "performance" boost group. + + root@linaro-nano:~# echo "TASKPID > /sys/fs/cgroup/stune/performance/cgroup.procs + +This simple configuration allows only the threads of the $TASKPID task to run, +when needed, at the highest OPP in the most capable CPU of the system. + + +6. Per-task wakeup-placement-strategy Selection +=============================================== + +Many devices have a number of CFS tasks in use which require an absolute +minimum wakeup latency, and many tasks for which wakeup latency is not +important. + +For touch-driven environments, removing additional wakeup latency can be +critical. + +When you use the Schedtume CGroup controller, you have access to a second +parameter which allows a group to be marked such that energy_aware task +placement is bypassed for tasks belonging to that group. + +prefer_idle=0 (default - use energy-aware task placement if available) +prefer_idle=1 (never use energy-aware task placement for these tasks) + +Since the regular wakeup task placement algorithm in CFS is biased for +performance, this has the effect of restoring minimum wakeup latency +for the desired tasks whilst still allowing energy-aware wakeup placement +to save energy for other tasks. + + +7. Question and Answers +======================= + +What about "auto" mode? +----------------------- + +The 'auto' mode as described in [5] can be implemented by interfacing SchedTune +with some suitable user-space element. This element could use the exposed +system-wide or cgroup based interface. + +How are multiple groups of tasks with different boost values managed? +--------------------------------------------------------------------- + +The current SchedTune implementation keeps track of the boosted RUNNABLE tasks +on a CPU. The CPU utilization seen by the scheduler-driven cpufreq governors +(and used to select an appropriate OPP) is boosted with a value which is the +maximum of the boost values of the currently RUNNABLE tasks in its RQ. + +This allows cpufreq to boost a CPU only while there are boosted tasks ready +to run and switch back to the energy efficient mode as soon as the last boosted +task is dequeued. + + +8. References +============= +[1] http://lwn.net/Articles/552889 +[2] http://lkml.org/lkml/2012/5/18/91 +[3] http://lkml.org/lkml/2015/6/26/620 diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt new file mode 100644 index 000000000000..e9e6cbae2841 --- /dev/null +++ b/Documentation/speculation.txt @@ -0,0 +1,90 @@ +This document explains potential effects of speculation, and how undesirable +effects can be mitigated portably using common APIs. + +=========== +Speculation +=========== + +To improve performance and minimize average latencies, many contemporary CPUs +employ speculative execution techniques such as branch prediction, performing +work which may be discarded at a later stage. + +Typically speculative execution cannot be observed from architectural state, +such as the contents of registers. However, in some cases it is possible to +observe its impact on microarchitectural state, such as the presence or +absence of data in caches. Such state may form side-channels which can be +observed to extract secret information. + +For example, in the presence of branch prediction, it is possible for bounds +checks to be ignored by code which is speculatively executed. Consider the +following code: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else + return array[index]; + } + +Which, on arm64, may be compiled to an assembly sequence such as: + + CMP , #MAX_ARRAY_ELEMS + B.LT less + MOV , #0 + RET + less: + LDR , [, ] + RET + +It is possible that a CPU mis-predicts the conditional branch, and +speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This +value will subsequently be discarded, but the speculated load may affect +microarchitectural state which can be subsequently measured. + +More complex sequences involving multiple dependent memory accesses may +result in sensitive information being leaked. Consider the following +code, building on the prior example: + + int load_dependent_arrays(int *arr1, int *arr2, int index) + { + int val1, val2, + + val1 = load_array(arr1, index); + val2 = load_array(arr2, val1); + + return val2; + } + +Under speculation, the first call to load_array() may return the value +of an out-of-bounds address, while the second call will influence +microarchitectural state dependent on this value. This may provide an +arbitrary read primitive. + +==================================== +Mitigating speculation side-channels +==================================== + +The kernel provides a generic API to ensure that bounds checks are +respected even under speculation. Architectures which are affected by +speculation-based side-channels are expected to implement these +primitives. + +The array_index_nospec() helper in can be used to +prevent information from being leaked via side-channels. + +A call to array_index_nospec(index, size) returns a sanitized index +value that is bounded to [0, size) even under cpu speculation +conditions. + +This can be used to protect the earlier load_array() example: + + int load_array(int *array, unsigned int index) + { + if (index >= MAX_ARRAY_ELEMS) + return 0; + else { + index = array_index_nospec(index, MAX_ARRAY_ELEMS); + return array[index]; + } + } diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py index 39aa9e8697cc..fbedcc39460b 100644 --- a/Documentation/sphinx/kerneldoc.py +++ b/Documentation/sphinx/kerneldoc.py @@ -36,8 +36,7 @@ from docutils import nodes, statemachine from docutils.statemachine import ViewList -from docutils.parsers.rst import directives -from sphinx.util.compat import Directive +from docutils.parsers.rst import directives, Directive from sphinx.ext.autodoc import AutodocReporter __version__ = '1.0' diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 694968c7523c..b757d6eb365b 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -653,7 +653,8 @@ allowed to execute. perf_event_paranoid: Controls use of the performance events system by unprivileged -users (without CAP_SYS_ADMIN). The default value is 2. +users (without CAP_SYS_ADMIN). The default value is 3 if +CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise. -1: Allow use of (almost) all events by all users Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK @@ -661,6 +662,7 @@ users (without CAP_SYS_ADMIN). The default value is 2. Disallow raw tracepoint access by users without CAP_SYS_ADMIN >=1: Disallow CPU event access by users without CAP_SYS_ADMIN >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN +>=3: Disallow all event access by users without CAP_SYS_ADMIN ============================================================== diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 9baf66a9ef4e..1d1f2cb5abc8 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -30,6 +30,7 @@ Currently, these files are in /proc/sys/vm: - dirty_writeback_centisecs - drop_caches - extfrag_threshold +- extra_free_kbytes - hugepages_treat_as_movable - hugetlb_shm_group - laptop_mode @@ -260,6 +261,21 @@ any throttling. ============================================================== +extra_free_kbytes + +This parameter tells the VM to keep extra free memory between the threshold +where background reclaim (kswapd) kicks in, and the threshold where direct +reclaim (by allocating processes) kicks in. + +This is useful for workloads that require low latency memory allocations +and have a bounded burstiness in memory allocations, for example a +realtime application that receives and transmits network traffic +(causing in-kernel memory allocations) with a maximum total message burst +size of 200MB may need 200MB of extra free memory to avoid direct reclaim +related latencies. + +============================================================== + hugepages_treat_as_movable This parameter controls whether we can allocate hugepages from ZONE_MOVABLE diff --git a/Documentation/trace/events-power.txt b/Documentation/trace/events-power.txt index 21d514ced212..4d817d5acc40 100644 --- a/Documentation/trace/events-power.txt +++ b/Documentation/trace/events-power.txt @@ -25,6 +25,7 @@ cpufreq. cpu_idle "state=%lu cpu_id=%lu" cpu_frequency "state=%lu cpu_id=%lu" +cpu_frequency_limits "min=%lu max=%lu cpu_id=%lu" A suspend event is used to indicate the system going in and out of the suspend mode: diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt index d4601df6e72e..f2fcbb7a70c6 100644 --- a/Documentation/trace/ftrace.txt +++ b/Documentation/trace/ftrace.txt @@ -2407,6 +2407,35 @@ will produce: 1) 1.449 us | } +You can disable the hierarchical function call formatting and instead print a +flat list of function entry and return events. This uses the format described +in the Output Formatting section and respects all the trace options that +control that formatting. Hierarchical formatting is the default. + + hierachical: echo nofuncgraph-flat > trace_options + flat: echo funcgraph-flat > trace_options + + ie: + + # tracer: function_graph + # + # entries-in-buffer/entries-written: 68355/68355 #P:2 + # + # _-----=> irqs-off + # / _----=> need-resched + # | / _---=> hardirq/softirq + # || / _--=> preempt-depth + # ||| / delay + # TASK-PID CPU# |||| TIMESTAMP FUNCTION + # | | | |||| | | + sh-1806 [001] d... 198.843443: graph_ent: func=_raw_spin_lock + sh-1806 [001] d... 198.843445: graph_ent: func=__raw_spin_lock + sh-1806 [001] d..1 198.843447: graph_ret: func=__raw_spin_lock + sh-1806 [001] d..1 198.843449: graph_ret: func=_raw_spin_lock + sh-1806 [001] d..1 198.843451: graph_ent: func=_raw_spin_unlock_irqrestore + sh-1806 [001] d... 198.843453: graph_ret: func=_raw_spin_unlock_irqrestore + + You might find other useful features for this tracer in the following "dynamic ftrace" section such as tracing only specific functions or tasks. diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 7b2eb1b7d4ca..a3233da7fa88 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -19,6 +19,7 @@ place where this information is gathered. no_new_privs seccomp_filter unshare + spec_ctrl .. only:: subproject and html diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst new file mode 100644 index 000000000000..32f3d55c54b7 --- /dev/null +++ b/Documentation/userspace-api/spec_ctrl.rst @@ -0,0 +1,94 @@ +=================== +Speculation Control +=================== + +Quite some CPUs have speculation-related misfeatures which are in +fact vulnerabilities causing data leaks in various forms even across +privilege domains. + +The kernel provides mitigation for such vulnerabilities in various +forms. Some of these mitigations are compile-time configurable and some +can be supplied on the kernel command line. + +There is also a class of mitigations which are very expensive, but they can +be restricted to a certain set of processes or tasks in controlled +environments. The mechanism to control these mitigations is via +:manpage:`prctl(2)`. + +There are two prctl options which are related to this: + + * PR_GET_SPECULATION_CTRL + + * PR_SET_SPECULATION_CTRL + +PR_GET_SPECULATION_CTRL +----------------------- + +PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature +which is selected with arg2 of prctl(2). The return value uses bits 0-3 with +the following meaning: + +==== ===================== =================================================== +Bit Define Description +==== ===================== =================================================== +0 PR_SPEC_PRCTL Mitigation can be controlled per task by + PR_SET_SPECULATION_CTRL. +1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is + disabled. +2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is + enabled. +3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A + subsequent prctl(..., PR_SPEC_ENABLE) will fail. +==== ===================== =================================================== + +If all bits are 0 the CPU is not affected by the speculation misfeature. + +If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is +available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation +misfeature will fail. + +PR_SET_SPECULATION_CTRL +----------------------- + +PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which +is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand +in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or +PR_SPEC_FORCE_DISABLE. + +Common error codes +------------------ +======= ================================================================= +Value Meaning +======= ================================================================= +EINVAL The prctl is not implemented by the architecture or unused + prctl(2) arguments are not 0. + +ENODEV arg2 is selecting a not supported speculation misfeature. +======= ================================================================= + +PR_SET_SPECULATION_CTRL error codes +----------------------------------- +======= ================================================================= +Value Meaning +======= ================================================================= +0 Success + +ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor + PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE. + +ENXIO Control of the selected speculation misfeature is not possible. + See PR_GET_SPECULATION_CTRL. + +EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller + tried to enable it again. +======= ================================================================= + +Speculation misfeature controls +------------------------------- +- PR_SPEC_STORE_BYPASS: Speculative Store Bypass + + Invocations: + * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt index 1b3950346532..c3f69bcaf96e 100644 --- a/Documentation/vfio-mediated-device.txt +++ b/Documentation/vfio-mediated-device.txt @@ -145,6 +145,11 @@ The functions in the mdev_parent_ops structure are as follows: * create: allocate basic resources in a driver for a mediated device * remove: free resources in a driver when a mediated device is destroyed +(Note that mdev-core provides no implicit serialization of create/remove +callbacks per mdev parent device, per mdev type, or any other categorization. +Vendor drivers are expected to be fully asynchronous in this respect or +provide their own internal resource protection.) + The callbacks in the mdev_parent_ops structure are as follows: * open: open callback of mediated device diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index af0d23968ee7..9d616fca64bd 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX @@ -9,3 +9,6 @@ kvm/ - Kernel Virtual Machine. See also http://linux-kvm.org uml/ - User Mode Linux, builds/runs Linux kernel as a userspace program. + +acrn/ + - ACRN Project. See also http://xxxx diff --git a/Documentation/virtual/acrn/00-INDEX b/Documentation/virtual/acrn/00-INDEX new file mode 100644 index 000000000000..5beb50eef9e1 --- /dev/null +++ b/Documentation/virtual/acrn/00-INDEX @@ -0,0 +1,8 @@ +00-INDEX + - this file. +index.rst + - Index. +vhm.rst + - virtio and hypervisor service module (VHM) APIs. +vbs.rst + - virtio and backend service (VBS) APIs. diff --git a/Documentation/virtual/acrn/conf.py b/Documentation/virtual/acrn/conf.py new file mode 100644 index 000000000000..ed247df22700 --- /dev/null +++ b/Documentation/virtual/acrn/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "ACRN Project" + +tags.add("subproject") diff --git a/Documentation/virtual/acrn/index.rst b/Documentation/virtual/acrn/index.rst new file mode 100644 index 000000000000..3630d4fe3207 --- /dev/null +++ b/Documentation/virtual/acrn/index.rst @@ -0,0 +1,17 @@ +.. -*- coding: utf-8; mode: rst -*- + +============================= +ACRN Project +============================= + +.. toctree:: + + vbs.rst + vhm.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/virtual/acrn/vbs.rst b/Documentation/virtual/acrn/vbs.rst new file mode 100644 index 000000000000..40a0683a1c0b --- /dev/null +++ b/Documentation/virtual/acrn/vbs.rst @@ -0,0 +1,20 @@ +================================ +Virtio and Backend Service (VBS) +================================ + +The Virtio and Backend Service (VBS) in part of ACRN Project. + +The VBS can be further divided into two parts: VBS in user space (VBS-U) +and VBS in kernel space (VBS-K). + +Example: +-------- +A reference driver for VBS-K can be found at :c:type:`struct vbs_rng`. + +.. kernel-doc:: drivers/vbs/vbs_rng.c + +APIs: +----- + +.. kernel-doc:: include/linux/vbs/vbs.h +.. kernel-doc:: include/linux/vbs/vq.h diff --git a/Documentation/virtual/acrn/vhm.rst b/Documentation/virtual/acrn/vhm.rst new file mode 100644 index 000000000000..d3235eab9eb3 --- /dev/null +++ b/Documentation/virtual/acrn/vhm.rst @@ -0,0 +1,14 @@ +================================== +Virtio and Hypervisor Module (VHM) +================================== + +The Virtio and Hypervisor service Module (VHM) in part of +ACRN Project. + +APIs: +----- + +.. kernel-doc:: include/linux/vhm/acrn_vhm_ioreq.h +.. kernel-doc:: include/linux/vhm/acrn_vhm_mm.h +.. kernel-doc:: include/linux/vhm/vhm_ioctl_defs.h +.. kernel-doc:: include/linux/vhm/vhm_vm_mngt.h diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index e63a35fafef0..5d12166bd66b 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the flag KVM_VM_MIPS_VZ. -4.3 KVM_GET_MSR_INDEX_LIST +4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST -Capability: basic +Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST Architectures: x86 -Type: system +Type: system ioctl Parameters: struct kvm_msr_list (in/out) Returns: 0 on success; -1 on error Errors: + EFAULT: the msr index list cannot be read from or written to E2BIG: the msr index list is to be to fit in the array specified by the user. @@ -139,16 +140,23 @@ struct kvm_msr_list { __u32 indices[0]; }; -This ioctl returns the guest msrs that are supported. The list varies -by kvm version and host processor, but does not change otherwise. The -user fills in the size of the indices array in nmsrs, and in return -kvm adjusts nmsrs to reflect the actual number of msrs and fills in -the indices array with their numbers. +The user fills in the size of the indices array in nmsrs, and in return +kvm adjusts nmsrs to reflect the actual number of msrs and fills in the +indices array with their numbers. + +KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list +varies by kvm version and host processor, but does not change otherwise. Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are not returned in the MSR list, as different vcpus can have a different number of banks, as set via the KVM_X86_SETUP_MCE ioctl. +KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed +to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities +and processor features that are exposed via MSRs (e.g., VMX capabilities). +This list also varies by kvm version and host processor, but does not change +otherwise. + 4.4 KVM_CHECK_EXTENSION @@ -475,14 +483,22 @@ Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. 4.18 KVM_GET_MSRS -Capability: basic +Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) Architectures: x86 -Type: vcpu ioctl +Type: system ioctl, vcpu ioctl Parameters: struct kvm_msrs (in/out) -Returns: 0 on success, -1 on error +Returns: number of msrs successfully returned; + -1 on error + +When used as a system ioctl: +Reads the values of MSR-based features that are available for the VM. This +is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. +The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST +in a system ioctl. +When used as a vcpu ioctl: Reads model-specific registers from the vcpu. Supported msr indices can -be obtained using KVM_GET_MSR_INDEX_LIST. +be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. struct kvm_msrs { __u32 nmsrs; /* number of msrs in entries */ @@ -1837,6 +1853,7 @@ registers, find a list below: PPC | KVM_REG_PPC_DBSR | 32 PPC | KVM_REG_PPC_TIDR | 64 PPC | KVM_REG_PPC_PSSCR | 64 + PPC | KVM_REG_PPC_DEC_EXPIRY | 64 PPC | KVM_REG_PPC_TM_GPR0 | 64 ... PPC | KVM_REG_PPC_TM_GPR31 | 64 @@ -1939,6 +1956,9 @@ ARM 32-bit VFP control registers have the following id bit patterns: ARM 64-bit FP registers have the following id bit patterns: 0x4030 0000 0012 0 +ARM firmware pseudo-registers have the following bit pattern: + 0x4030 0000 0014 + arm64 registers are mapped using the lower 32 bits. The upper 16 of that is the register group type, or coprocessor number: @@ -1955,6 +1975,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value: arm64 system registers have the following id bit patterns: 0x6030 0000 0013 +arm64 firmware pseudo-registers have the following bit pattern: + 0x6030 0000 0014 + MIPS registers are mapped using the lower 32 bits. The upper 16 of that is the register group type: @@ -2489,7 +2512,8 @@ Possible features: and execute guest code when KVM_RUN is called. - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). - - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. + - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision + backward compatible with v0.2) for the CPU. Depends on KVM_CAP_ARM_PSCI_0_2. - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. Depends on KVM_CAP_ARM_PMU_V3. diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt new file mode 100644 index 000000000000..aafdab887b04 --- /dev/null +++ b/Documentation/virtual/kvm/arm/psci.txt @@ -0,0 +1,30 @@ +KVM implements the PSCI (Power State Coordination Interface) +specification in order to provide services such as CPU on/off, reset +and power-off to the guest. + +The PSCI specification is regularly updated to provide new features, +and KVM implements these updates if they make sense from a virtualization +point of view. + +This means that a guest booted on two different versions of KVM can +observe two different "firmware" revisions. This could cause issues if +a given guest is tied to a particular PSCI revision (unlikely), or if +a migration causes a different PSCI version to be exposed out of the +blue to an unsuspecting guest. + +In order to remedy this situation, KVM exposes a set of "firmware +pseudo-registers" that can be manipulated using the GET/SET_ONE_REG +interface. These registers can be saved/restored by userspace, and set +to a convenient value if required. + +The following register is defined: + +* KVM_REG_ARM_PSCI_VERSION: + + - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set + (and thus has already been initialized) + - Returns the current PSCI version on GET_ONE_REG (defaulting to the + highest PSCI version implemented by KVM and compatible with v0.2) + - Allows any PSCI version implemented by KVM and compatible with + v0.2 to be set with SET_ONE_REG + - Affects the whole VM (even if the register view is per-vcpu) diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index 3c65feb83010..a81c97a4b4a5 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt @@ -54,6 +54,10 @@ KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit || || before enabling paravirtualized || || spinlock support. ------------------------------------------------------------------------------ +KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit + || || can be enabled by setting bit 2 + || || when writing to msr 0x4b564d02 +------------------------------------------------------------------------------ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side || || per-cpu warps are expected in || || kvmclock. diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index 1ebecc115dc6..f3f0d57ced8e 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt @@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02 when asynchronous page faults are enabled on the vcpu 0 when disabled. Bit 1 is 1 if asynchronous page faults can be injected when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults - are delivered to L1 as #PF vmexits. + are delivered to L1 as #PF vmexits. Bit 2 can be set only if + KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID. First 4 byte of 64 byte memory location will be written to by the hypervisor at the time of asynchronous page fault (APF) diff --git a/Documentation/x86/orc-unwinder.txt b/Documentation/x86/orc-unwinder.txt index af0c9a4c65a6..cd4b29be29af 100644 --- a/Documentation/x86/orc-unwinder.txt +++ b/Documentation/x86/orc-unwinder.txt @@ -4,7 +4,7 @@ ORC unwinder Overview -------- -The kernel CONFIG_ORC_UNWINDER option enables the ORC unwinder, which is +The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is similar in concept to a DWARF unwinder. The difference is that the format of the ORC data is much simpler than DWARF, which in turn allows the ORC unwinder to be much simpler and faster. diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt new file mode 100644 index 000000000000..5cd58439ad2d --- /dev/null +++ b/Documentation/x86/pti.txt @@ -0,0 +1,186 @@ +Overview +======== + +Page Table Isolation (pti, previously known as KAISER[1]) is a +countermeasure against attacks on the shared user/kernel address +space such as the "Meltdown" approach[2]. + +To mitigate this class of attacks, we create an independent set of +page tables for use only when running userspace applications. When +the kernel is entered via syscalls, interrupts or exceptions, the +page tables are switched to the full "kernel" copy. When the system +switches back to user mode, the user copy is used again. + +The userspace page tables contain only a minimal amount of kernel +data: only what is needed to enter/exit the kernel such as the +entry/exit functions themselves and the interrupt descriptor table +(IDT). There are a few strictly unnecessary things that get mapped +such as the first C function when entering an interrupt (see +comments in pti.c). + +This approach helps to ensure that side-channel attacks leveraging +the paging structures do not function when PTI is enabled. It can be +enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time. +Once enabled at compile-time, it can be disabled at boot with the +'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt). + +Page Table Management +===================== + +When PTI is enabled, the kernel manages two sets of page tables. +The first set is very similar to the single set which is present in +kernels without PTI. This includes a complete mapping of userspace +that the kernel can use for things like copy_to_user(). + +Although _complete_, the user portion of the kernel page tables is +crippled by setting the NX bit in the top level. This ensures +that any missed kernel->user CR3 switch will immediately crash +userspace upon executing its first instruction. + +The userspace page tables map only the kernel data needed to enter +and exit the kernel. This data is entirely contained in the 'struct +cpu_entry_area' structure which is placed in the fixmap which gives +each CPU's copy of the area a compile-time-fixed virtual address. + +For new userspace mappings, the kernel makes the entries in its +page tables like normal. The only difference is when the kernel +makes entries in the top (PGD) level. In addition to setting the +entry in the main kernel PGD, a copy of the entry is made in the +userspace page tables' PGD. + +This sharing at the PGD level also inherently shares all the lower +layers of the page tables. This leaves a single, shared set of +userspace page tables to manage. One PTE to lock, one set of +accessed bits, dirty bits, etc... + +Overhead +======== + +Protection against side-channel attacks is important. But, +this protection comes at a cost: + +1. Increased Memory Use + a. Each process now needs an order-1 PGD instead of order-0. + (Consumes an additional 4k per process). + b. The 'cpu_entry_area' structure must be 2MB in size and 2MB + aligned so that it can be mapped by setting a single PMD + entry. This consumes nearly 2MB of RAM once the kernel + is decompressed, but no space in the kernel image itself. + +2. Runtime Cost + a. CR3 manipulation to switch between the page table copies + must be done at interrupt, syscall, and exception entry + and exit (it can be skipped when the kernel is interrupted, + though.) Moves to CR3 are on the order of a hundred + cycles, and are required at every entry and exit. + b. A "trampoline" must be used for SYSCALL entry. This + trampoline depends on a smaller set of resources than the + non-PTI SYSCALL entry code, so requires mapping fewer + things into the userspace page tables. The downside is + that stacks must be switched at entry time. + c. Global pages are disabled for all kernel structures not + mapped into both kernel and userspace page tables. This + feature of the MMU allows different processes to share TLB + entries mapping the kernel. Losing the feature means more + TLB misses after a context switch. The actual loss of + performance is very small, however, never exceeding 1%. + d. Process Context IDentifiers (PCID) is a CPU feature that + allows us to skip flushing the entire TLB when switching page + tables by setting a special bit in CR3 when the page tables + are changed. This makes switching the page tables (at context + switch, or kernel entry/exit) cheaper. But, on systems with + PCID support, the context switch code must flush both the user + and kernel entries out of the TLB. The user PCID TLB flush is + deferred until the exit to userspace, minimizing the cost. + See intel.com/sdm for the gory PCID/INVPCID details. + e. The userspace page tables must be populated for each new + process. Even without PTI, the shared kernel mappings + are created by copying top-level (PGD) entries into each + new process. But, with PTI, there are now *two* kernel + mappings: one in the kernel page tables that maps everything + and one for the entry/exit structures. At fork(), we need to + copy both. + f. In addition to the fork()-time copying, there must also + be an update to the userspace PGD any time a set_pgd() is done + on a PGD used to map userspace. This ensures that the kernel + and userspace copies always map the same userspace + memory. + g. On systems without PCID support, each CR3 write flushes + the entire TLB. That means that each syscall, interrupt + or exception flushes the TLB. + h. INVPCID is a TLB-flushing instruction which allows flushing + of TLB entries for non-current PCIDs. Some systems support + PCIDs, but do not support INVPCID. On these systems, addresses + can only be flushed from the TLB for the current PCID. When + flushing a kernel address, we need to flush all PCIDs, so a + single kernel address flush will require a TLB-flushing CR3 + write upon the next use of every PCID. + +Possible Future Work +==================== +1. We can be more careful about not actually writing to CR3 + unless its value is actually changed. +2. Allow PTI to be enabled/disabled at runtime in addition to the + boot-time switching. + +Testing +======== + +To test stability of PTI, the following test procedure is recommended, +ideally doing all of these in parallel: + +1. Set CONFIG_DEBUG_ENTRY=y +2. Run several copies of all of the tools/testing/selftests/x86/ tests + (excluding MPX and protection_keys) in a loop on multiple CPUs for + several minutes. These tests frequently uncover corner cases in the + kernel entry code. In general, old kernels might cause these tests + themselves to crash, but they should never crash the kernel. +3. Run the 'perf' tool in a mode (top or record) that generates many + frequent performance monitoring non-maskable interrupts (see "NMI" + in /proc/interrupts). This exercises the NMI entry/exit code which + is known to trigger bugs in code paths that did not expect to be + interrupted, including nested NMIs. Using "-c" boosts the rate of + NMIs, and using two -c with separate counters encourages nested NMIs + and less deterministic behavior. + + while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done + +4. Launch a KVM virtual machine. +5. Run 32-bit binaries on systems supporting the SYSCALL instruction. + This has been a lightly-tested code path and needs extra scrutiny. + +Debugging +========= + +Bugs in PTI cause a few different signatures of crashes +that are worth noting here. + + * Failures of the selftests/x86 code. Usually a bug in one of the + more obscure corners of entry_64.S + * Crashes in early boot, especially around CPU bringup. Bugs + in the trampoline code or mappings cause these. + * Crashes at the first interrupt. Caused by bugs in entry_64.S, + like screwing up a page table switch. Also caused by + incorrectly mapping the IRQ handler entry code. + * Crashes at the first NMI. The NMI code is separate from main + interrupt handlers and can have bugs that do not affect + normal interrupts. Also caused by incorrectly mapping NMI + code. NMIs that interrupt the entry code must be very + careful and can be the cause of crashes that show up when + running perf. + * Kernel crashes at the first exit to userspace. entry_64.S + bugs, or failing to map some of the exit code. + * Crashes at first interrupt that interrupts userspace. The paths + in entry_64.S that return to userspace are sometimes separate + from the ones that return to the kernel. + * Double faults: overflowing the kernel stack because of page + faults upon page faults. Caused by touching non-pti-mapped + data in the entry code, or forgetting to switch to kernel + CR3 before calling into C functions which are not pti-mapped. + * Userspace segfaults early in boot, sometimes manifesting + as mount(8) failing to mount the rootfs. These have + tended to be TLB invalidation issues. Usually invalidating + the wrong PCID, or otherwise missing an invalidation. + +1. https://gruss.cc/files/kaiser.pdf +2. https://meltdownattack.com/meltdown.pdf diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index b0798e281aa6..ea91cb61a602 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -1,6 +1,4 @@ - - Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm @@ -14,13 +12,17 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... + vaddr_end for KASLR +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping +fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable) -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable) +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Virtual memory map with 5 level page tables: @@ -29,26 +31,31 @@ Virtual memory map with 5 level page tables: hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff91ffffffffffff (=49 bits) hole -ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space +ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI +ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... -ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB) +ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) +... unused hole ... + vaddr_end for KASLR +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping ... unused hole ... ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Architecture defines a 64-bit virtual address. Implementations can support less. Currently supported are 48- and 57-bit virtual addresses. Bits 63 -through to the most-significant implemented bit are set to either all ones -or all zero. This causes hole between user space and kernel addresses. +through to the most-significant implemented bit are sign extended. +This causes hole between user space and kernel addresses if you interpret them +as unsigned. The direct mapping covers all memory in the system up to the highest memory address (this means in some cases it can also include PCI memory @@ -58,19 +65,15 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of the processes using the page fault handler, with init_top_pgt as reference. -Current X86-64 implementations support up to 46 bits of address space (64 TB), -which is our current limit. This expands into MBZ space in the page tables. - We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual memory window (this size is arbitrary, it can be raised later if needed). The mappings are not part of any other kernel PGD and are only available during EFI runtime calls. -The module mapping space size changes based on the CONFIG requirements for the -following fixmap section. - Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. --Andi Kleen, Jul 2004 +Be very careful vs. KASLR when changing anything here. The KASLR address +range must not overlap with anything except the KASAN shadow area, which is +correct as KASAN disables KASLR. diff --git a/MAINTAINERS b/MAINTAINERS index 2811a211632c..5201db8bf7f1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4839,6 +4839,21 @@ S: Maintained F: drivers/media/usb/dvb-usb-v2/dvb_usb* F: drivers/media/usb/dvb-usb-v2/usb_urb.c +DVC_TRACE BUS DRIVER +M: Traian Schiau +S: Maintained +F: drivers/bus/dvctrace.c +F: include/linux/dvctrace.h +F: Documentation/ABI/testing/sysfs-bus-dvctrace + +DVC_TRACE USB_GADGET DRIVER +M: Traian Schiau +S: Maintained +F: drivers/usb/gadget/function/f_dvctrace.c +F: drivers/usb/gadget/function/u_dvctrace.h +F: include/linux/usb/debug.h +F: Documentation/ABI/testing/configfs-usb-gadget-dvctrace + DYNAMIC DEBUG M: Jason Baron S: Maintained @@ -7110,6 +7125,12 @@ S: Supported F: Documentation/trace/intel_th.txt F: drivers/hwtracing/intel_th/ +INTEL(R) TRACE HUB TO USB-DVC.TRACE +M: Traian Schiau +S: Supported +F: drivers/hwtracing/intel_th/msu-dvc.c +F: Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith + INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) M: Ning Sun L: tboot-devel@lists.sourceforge.net @@ -7670,16 +7691,6 @@ F: include/linux/kdb.h F: include/linux/kgdb.h F: kernel/debug/ -KMEMCHECK -M: Vegard Nossum -M: Pekka Enberg -S: Maintained -F: Documentation/dev-tools/kmemcheck.rst -F: arch/x86/include/asm/kmemcheck.h -F: arch/x86/mm/kmemcheck/ -F: include/linux/kmemcheck.h -F: mm/kmemcheck.c - KMEMLEAK M: Catalin Marinas S: Maintained @@ -9011,6 +9022,7 @@ MIPS GENERIC PLATFORM M: Paul Burton L: linux-mips@linux-mips.org S: Supported +F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt F: arch/mips/generic/ F: arch/mips/tools/generic-board-config.sh @@ -10054,7 +10066,7 @@ M: Stephen Boyd L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git -F: drivers/base/power/opp/ +F: drivers/opp/ F: include/linux/pm_opp.h F: Documentation/power/opp.txt F: Documentation/devicetree/bindings/opp/ @@ -11573,6 +11585,17 @@ F: include/net/rose.h F: include/uapi/linux/rose.h F: net/rose/ +RPMB SUBSYSTEM +M: Tomas Winkler +L: linux-kernel@vger.kernel.org +S: Supported +F: drivers/char/rpmb/* +F: include/uapi/linux/rpmb.h +F: include/linux/rpmb.h +F: Documentation/ABI/testing/sysfs-class-rpmb +F: Documentation/rpmb.rst +F: tools/rpmb/ + RTL2830 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org diff --git a/Makefile b/Makefile index ccd981892ef2..9479eb8c4ae0 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 0 +SUBLEVEL = 66 EXTRAVERSION = -NAME = Fearless Coyote +NAME = Petit Gorille # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -11,6 +11,10 @@ NAME = Fearless Coyote # Comments in this file are targeted only to the developer, do not # expect to learn how to build the kernel reading this file. +# That's our default target when none is given on the command line +PHONY := _all +_all: + # o Do not use make's built-in rules and variables # (this increases performance and avoids hard-to-debug behaviour); # o Look for make include files relative to root of kernel src @@ -117,10 +121,6 @@ ifeq ("$(origin O)", "command line") KBUILD_OUTPUT := $(O) endif -# That's our default target when none is given on the command line -PHONY := _all -_all: - # Cancel implicit rules on top Makefile $(CURDIR)/Makefile Makefile: ; @@ -187,15 +187,6 @@ ifeq ("$(origin M)", "command line") KBUILD_EXTMOD := $(M) endif -# If building an external module we do not care about the all: rule -# but instead _all depend on modules -PHONY += all -ifeq ($(KBUILD_EXTMOD),) -_all: all -else -_all: modules -endif - ifeq ($(KBUILD_SRC),) # building in the source tree srctree := . @@ -207,6 +198,9 @@ else srctree := $(KBUILD_SRC) endif endif + +export KBUILD_CHECKSRC KBUILD_EXTMOD KBUILD_SRC + objtree := . src := $(srctree) obj := $(objtree) @@ -215,6 +209,74 @@ VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD)) export srctree objtree VPATH +# To make sure we do not include .config for any of the *config targets +# catch them early, and hand them over to scripts/kconfig/Makefile +# It is allowed to specify more targets when calling make, including +# mixing *config targets and build targets. +# For example 'make oldconfig all'. +# Detect when mixed targets is specified, and make a second invocation +# of make so .config is not included in this case either (for *config). + +version_h := include/generated/uapi/linux/version.h +old_version_h := include/linux/version.h + +no-dot-config-targets := clean mrproper distclean \ + cscope gtags TAGS tags help% %docs check% coccicheck \ + $(version_h) headers_% archheaders archscripts \ + kernelversion %src-pkg + +config-targets := 0 +mixed-targets := 0 +dot-config := 1 + +ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) + ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) + dot-config := 0 + endif +endif + +ifeq ($(KBUILD_EXTMOD),) + ifneq ($(filter config %config,$(MAKECMDGOALS)),) + config-targets := 1 + ifneq ($(words $(MAKECMDGOALS)),1) + mixed-targets := 1 + endif + endif +endif +# install and modules_install need also be processed one by one +ifneq ($(filter install,$(MAKECMDGOALS)),) + ifneq ($(filter modules_install,$(MAKECMDGOALS)),) + mixed-targets := 1 + endif +endif + +ifeq ($(mixed-targets),1) +# =========================================================================== +# We're called with mixed targets (*config and build targets). +# Handle them one by one. + +PHONY += $(MAKECMDGOALS) __build_one_by_one + +$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one + @: + +__build_one_by_one: + $(Q)set -e; \ + for i in $(MAKECMDGOALS); do \ + $(MAKE) -f $(srctree)/Makefile $$i; \ + done + +else + +# We need some generic definitions (do not try to remake the file). +scripts/Kbuild.include: ; +include scripts/Kbuild.include + +# Read KERNELRELEASE from include/config/kernel.release (if it exists) +KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) +KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) +export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION + # SUBARCH tells the usermode build what the underlying arch is. That is set # first, and if a usermode build is happening, the "ARCH=um" on the command # line overrides the setting of ARCH below. If a native build is happening, @@ -307,48 +369,10 @@ HOSTCXXFLAGS := -O2 $(HOST_LFS_CFLAGS) HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) HOST_LOADLIBES := $(HOST_LFS_LIBS) -ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1) -HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \ - -Wno-missing-field-initializers -fno-delete-null-pointer-checks -endif - -# Decide whether to build built-in, modular, or both. -# Normally, just do built-in. - -KBUILD_MODULES := -KBUILD_BUILTIN := 1 - -# If we have only "make modules", don't compile built-in objects. -# When we're building modules with modversions, we need to consider -# the built-in objects during the descend as well, in order to -# make sure the checksums are up to date before we record them. - -ifeq ($(MAKECMDGOALS),modules) - KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) -endif - -# If we have "make modules", compile modules -# in addition to whatever we do anyway. -# Just "make" or "make all" shall build modules as well - -ifneq ($(filter all _all modules,$(MAKECMDGOALS)),) - KBUILD_MODULES := 1 -endif - -ifeq ($(MAKECMDGOALS),) - KBUILD_MODULES := 1 -endif - -export KBUILD_MODULES KBUILD_BUILTIN -export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD - -# We need some generic definitions (do not try to remake the file). -scripts/Kbuild.include: ; -include scripts/Kbuild.include - # Make variables (CC, etc...) AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld +LDGOLD = $(CROSS_COMPILE)ld.gold CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E AR = $(CROSS_COMPILE)ar @@ -373,9 +397,6 @@ LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = LDFLAGS_vmlinux = -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) -CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) - # Use USERINCLUDE when you must reference the UAPI directories only. USERINCLUDE := \ @@ -394,34 +415,28 @@ LINUXINCLUDE := \ -I$(objtree)/include \ $(USERINCLUDE) -KBUILD_CPPFLAGS := -D__KERNEL__ - +KBUILD_AFLAGS := -D__ASSEMBLY__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -std=gnu89 $(call cc-option,-fno-PIE) - - + -std=gnu89 +KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := -KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds +GCC_PLUGINS_CFLAGS := -# Read KERNELRELEASE from include/config/kernel.release (if it exists) -KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) -KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) - -export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP HOSTLDFLAGS HOST_LOADLIBES export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL @@ -463,73 +478,23 @@ ifneq ($(KBUILD_SRC),) $(srctree) $(objtree) $(VERSION) $(PATCHLEVEL) endif -# Support for using generic headers in asm-generic -PHONY += asm-generic uapi-asm-generic -asm-generic: uapi-asm-generic - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ - src=asm obj=arch/$(SRCARCH)/include/generated/asm -uapi-asm-generic: - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ - src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm - -# To make sure we do not include .config for any of the *config targets -# catch them early, and hand them over to scripts/kconfig/Makefile -# It is allowed to specify more targets when calling make, including -# mixing *config targets and build targets. -# For example 'make oldconfig all'. -# Detect when mixed targets is specified, and make a second invocation -# of make so .config is not included in this case either (for *config). - -version_h := include/generated/uapi/linux/version.h -old_version_h := include/linux/version.h - -no-dot-config-targets := clean mrproper distclean \ - cscope gtags TAGS tags help% %docs check% coccicheck \ - $(version_h) headers_% archheaders archscripts \ - kernelversion %src-pkg - -config-targets := 0 -mixed-targets := 0 -dot-config := 1 - -ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),) - ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),) - dot-config := 0 - endif +ifeq ($(cc-name),clang) +ifneq ($(CROSS_COMPILE),) +CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) +GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) endif - -ifeq ($(KBUILD_EXTMOD),) - ifneq ($(filter config %config,$(MAKECMDGOALS)),) - config-targets := 1 - ifneq ($(words $(MAKECMDGOALS)),1) - mixed-targets := 1 - endif - endif +ifneq ($(GCC_TOOLCHAIN),) +CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) endif -# install and modules_install need also be processed one by one -ifneq ($(filter install,$(MAKECMDGOALS)),) - ifneq ($(filter modules_install,$(MAKECMDGOALS)),) - mixed-targets := 1 - endif +KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) +KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) endif -ifeq ($(mixed-targets),1) -# =========================================================================== -# We're called with mixed targets (*config and build targets). -# Handle them one by one. +RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register +RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk +RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) +export RETPOLINE_CFLAGS -PHONY += $(MAKECMDGOALS) __build_one_by_one - -$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one - @: - -__build_one_by_one: - $(Q)set -e; \ - for i in $(MAKECMDGOALS); do \ - $(MAKE) -f $(srctree)/Makefile $$i; \ - done - -else ifeq ($(config-targets),1) # =========================================================================== # *config targets only - make sure prerequisites are updated, and descend @@ -552,6 +517,44 @@ else # Build targets only - this includes vmlinux, arch specific targets, clean # targets and others. In general all targets except *config targets. +# If building an external module we do not care about the all: rule +# but instead _all depend on modules +PHONY += all +ifeq ($(KBUILD_EXTMOD),) +_all: all +else +_all: modules +endif + +# Decide whether to build built-in, modular, or both. +# Normally, just do built-in. + +KBUILD_MODULES := +KBUILD_BUILTIN := 1 + +# If we have only "make modules", don't compile built-in objects. +# When we're building modules with modversions, we need to consider +# the built-in objects during the descend as well, in order to +# make sure the checksums are up to date before we record them. + +ifeq ($(MAKECMDGOALS),modules) + KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) +endif + +# If we have "make modules", compile modules +# in addition to whatever we do anyway. +# Just "make" or "make all" shall build modules as well + +ifneq ($(filter all _all modules,$(MAKECMDGOALS)),) + KBUILD_MODULES := 1 +endif + +ifeq ($(MAKECMDGOALS),) + KBUILD_MODULES := 1 +endif + +export KBUILD_MODULES KBUILD_BUILTIN + ifeq ($(KBUILD_EXTMOD),) # Additional helpers built in scripts/ # Carefully list dependencies so we do not try to build scripts twice @@ -622,6 +625,26 @@ endif # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) +KBUILD_AFLAGS += $(call cc-option,-fno-PIE) +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) +CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) +export CFLAGS_GCOV CFLAGS_KCOV + +# Make toolchain changes before including arch/$(SRCARCH)/Makefile to ensure +# ar/cc/ld-* macros return correct values. +ifdef CONFIG_LTO_CLANG +# use GNU gold with LLVMgold for LTO linking, and LD for vmlinux_link +LDFINAL_vmlinux := $(LD) +LD := $(LDGOLD) +LDFLAGS += -plugin LLVMgold.so +# use llvm-ar for building symbol tables from IR files, and llvm-dis instead +# of objdump for processing symbol versions and exports +LLVM_AR := llvm-ar +LLVM_DIS := llvm-dis +export LLVM_AR LLVM_DIS +endif + # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default # values of the respective KBUILD_* variables ARCH_CPPFLAGS := @@ -634,6 +657,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) +KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) @@ -698,7 +722,8 @@ KBUILD_CFLAGS += $(stackp-flag) ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) +CLANG_TRIPLE ?= $(CROSS_COMPILE) +CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -707,10 +732,10 @@ endif KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) +KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the @@ -725,9 +750,9 @@ else # These warnings generated too much noise in a regular build. # Use make W=1 to enable them (see scripts/Makefile.extrawarn) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) endif +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else @@ -788,6 +813,53 @@ KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) endif +ifdef CONFIG_LTO_CLANG +lto-clang-flags := -flto -fvisibility=hidden + +# allow disabling only clang LTO where needed +DISABLE_LTO_CLANG := -fno-lto -fvisibility=default +export DISABLE_LTO_CLANG +endif + +ifdef CONFIG_LTO +lto-flags := $(lto-clang-flags) +KBUILD_CFLAGS += $(lto-flags) + +DISABLE_LTO := $(DISABLE_LTO_CLANG) +export DISABLE_LTO + +# LDFINAL_vmlinux and LDFLAGS_FINAL_vmlinux can be set to override +# the linker and flags for vmlinux_link. +export LDFINAL_vmlinux LDFLAGS_FINAL_vmlinux +endif + +ifdef CONFIG_CFI_CLANG +cfi-clang-flags += -fsanitize=cfi +DISABLE_CFI_CLANG := -fno-sanitize=cfi +ifdef CONFIG_MODULES +cfi-clang-flags += -fsanitize-cfi-cross-dso +DISABLE_CFI_CLANG += -fno-sanitize-cfi-cross-dso +endif +ifdef CONFIG_CFI_PERMISSIVE +cfi-clang-flags += -fsanitize-recover=cfi -fno-sanitize-trap=cfi +endif + +# also disable CFI when LTO is disabled +DISABLE_LTO_CLANG += $(DISABLE_CFI_CLANG) +# allow disabling only clang CFI where needed +export DISABLE_CFI_CLANG +endif + +ifdef CONFIG_CFI +# cfi-flags are re-tested in prepare-compiler-check +cfi-flags := $(cfi-clang-flags) +KBUILD_CFLAGS += $(cfi-flags) + +DISABLE_CFI := $(DISABLE_CFI_CLANG) +DISABLE_LTO += $(DISABLE_CFI) +export DISABLE_CFI +endif + # arch Makefile may override CC so keep this after arch Makefile is included NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) CHECKFLAGS += $(NOSTDINC_FLAGS) @@ -801,6 +873,18 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +# clang sets -fmerge-all-constants by default as optimization, but this +# is non-conforming behavior for C and in fact breaks the kernel, so we +# need to disable it here generally. +KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants) + +# for gcc -fno-merge-all-constants disables everything, but it is fine +# to have actual conforming behavior enabled. +KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) + +# Make sure -fstack-check isn't enabled (like gentoo apparently did) +KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) + # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) @@ -934,8 +1018,8 @@ ifdef CONFIG_STACK_VALIDATION ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else - ifdef CONFIG_ORC_UNWINDER - $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") + ifdef CONFIG_UNWINDER_ORC + $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") else $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") endif @@ -1071,6 +1155,15 @@ prepare0: archprepare gcc-plugins # All the preparing.. prepare: prepare0 prepare-objtool +# Support for using generic headers in asm-generic +PHONY += asm-generic uapi-asm-generic +asm-generic: uapi-asm-generic + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ + src=asm obj=arch/$(SRCARCH)/include/generated/asm +uapi-asm-generic: + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \ + src=uapi/asm obj=arch/$(SRCARCH)/include/generated/uapi/asm + PHONY += prepare-objtool prepare-objtool: $(objtool_target) @@ -1084,6 +1177,22 @@ prepare-objtool: $(objtool_target) # CC_STACKPROTECTOR_STRONG! Why did it build with _REGULAR?!") PHONY += prepare-compiler-check prepare-compiler-check: FORCE +# Make sure we're using a supported toolchain with LTO_CLANG +ifdef CONFIG_LTO_CLANG + ifneq ($(call clang-ifversion, -ge, 0500, y), y) + @echo Cannot use CONFIG_LTO_CLANG: requires clang 5.0 or later >&2 && exit 1 + endif + ifneq ($(call gold-ifversion, -ge, 112000000, y), y) + @echo Cannot use CONFIG_LTO_CLANG: requires GNU gold 1.12 or later >&2 && exit 1 + endif +endif +# Make sure compiler supports LTO flags +ifdef lto-flags + ifeq ($(call cc-option, $(lto-flags)),) + @echo Cannot use CONFIG_LTO: $(lto-flags) not supported by compiler \ + >&2 && exit 1 + endif +endif # Make sure compiler supports requested stack protector flag. ifdef stackp-name ifeq ($(call cc-option, $(stackp-flag)),) @@ -1097,6 +1206,11 @@ ifdef stackp-check @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ $(stackp-flag) available but compiler is broken >&2 && exit 1 endif +endif +ifdef cfi-flags + ifeq ($(call cc-option, $(cfi-flags)),) + @echo Cannot use CONFIG_CFI: $(cfi-flags) not supported by compiler >&2 && exit 1 + endif endif @: @@ -1557,7 +1671,8 @@ clean: $(clean-dirs) -o -name modules.builtin -o -name '.tmp_*.o.*' \ -o -name '*.c.[012]*.*' \ -o -name '*.ll' \ - -o -name '*.gcno' \) -type f -print | xargs rm -f + -o -name '*.gcno' \ + -o -name '*.*.symversions' \) -type f -print | xargs rm -f # Generate tags for editors # --------------------------------------------------------------------------- diff --git a/arch/Kconfig b/arch/Kconfig index 057370a0ac4e..afb20bf4ad40 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -13,6 +13,9 @@ config KEXEC_CORE config HAVE_IMA_KEXEC bool +config HOTPLUG_SMT + bool + config OPROFILE tristate "OProfile system profiling" depends on PROFILING @@ -91,7 +94,7 @@ config STATIC_KEYS_SELFTEST config OPTPROBES def_bool y depends on KPROBES && HAVE_OPTPROBES - depends on !PREEMPT + select TASKS_RCU if PREEMPT config KPROBES_ON_FTRACE def_bool y @@ -605,6 +608,74 @@ config LD_DEAD_CODE_DATA_ELIMINATION sections (e.g., '.text.init'). Typically '.' in section names is used to distinguish them from label names / C identifiers. +config LTO + def_bool n + +config ARCH_SUPPORTS_LTO_CLANG + bool + help + An architecture should select this option it supports: + - compiling with clang, + - compiling inline assembly with clang's integrated assembler, + - and linking with either lld or GNU gold w/ LLVMgold. + +choice + prompt "Link-Time Optimization (LTO) (EXPERIMENTAL)" + default LTO_NONE + help + This option turns on Link-Time Optimization (LTO). + +config LTO_NONE + bool "None" + +config LTO_CLANG + bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)" + depends on ARCH_SUPPORTS_LTO_CLANG + depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT + select LTO + select THIN_ARCHIVES + select LD_DEAD_CODE_DATA_ELIMINATION + help + This option enables clang's Link Time Optimization (LTO), which allows + the compiler to optimize the kernel globally at link time. If you + enable this option, the compiler generates LLVM IR instead of object + files, and the actual compilation from IR occurs at the LTO link step, + which may take several minutes. + + If you select this option, you must compile the kernel with clang >= + 5.0 (make CC=clang) and GNU gold from binutils >= 2.27, and have the + LLVMgold plug-in in LD_LIBRARY_PATH. + +endchoice + +config CFI + bool + +config CFI_PERMISSIVE + bool "Use CFI in permissive mode" + depends on CFI + help + When selected, Control Flow Integrity (CFI) violations result in a + warning instead of a kernel panic. This option is useful for finding + CFI violations in drivers during development. + +config CFI_CLANG + bool "Use clang Control Flow Integrity (CFI) (EXPERIMENTAL)" + depends on LTO_CLANG + depends on KALLSYMS + select CFI + help + This option enables clang Control Flow Integrity (CFI), which adds + runtime checking for indirect function calls. + +config CFI_CLANG_SHADOW + bool "Use CFI shadow to speed up cross-module checks" + default y + depends on CFI_CLANG + help + If you select this option, the kernel builds a fast look-up table of + CFI check functions in loaded modules to reduce overhead. + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index d2e4da93e68c..ca3322536f72 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -20,8 +20,8 @@ "3: .subsection 2\n" \ "4: br 1b\n" \ " .previous\n" \ - EXC(1b,3b,%1,$31) \ - EXC(2b,3b,%1,$31) \ + EXC(1b,3b,$31,%1) \ + EXC(2b,3b,$31,%1) \ : "=&r" (oldval), "=&r"(ret) \ : "r" (uaddr), "r"(oparg) \ : "memory") @@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "3: .subsection 2\n" "4: br 1b\n" " .previous\n" - EXC(1b,3b,%0,$31) - EXC(2b,3b,%0,$31) + EXC(1b,3b,$31,%0) + EXC(2b,3b,$31,%0) : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) : "memory"); diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h index 68dfb3cb7145..02a7c2fa6106 100644 --- a/arch/alpha/include/asm/xchg.h +++ b/arch/alpha/include/asm/xchg.h @@ -12,6 +12,10 @@ * Atomic exchange. * Since it can be used to implement critical sections * it must clobber "memory" (also for interrupts in UP). + * + * The leading and the trailing memory barriers guarantee that these + * operations are fully ordered. + * */ static inline unsigned long @@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val) { unsigned long ret, tmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %4,7,%3\n" " insbl %1,%4,%1\n" @@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val) { unsigned long ret, tmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %4,7,%3\n" " inswl %1,%4,%1\n" @@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val) { unsigned long dummy; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%4\n" " bis $31,%3,%1\n" @@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val) { unsigned long dummy; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%4\n" " bis $31,%3,%1\n" @@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size) * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. * - * The memory barrier should be placed in SMP only when we actually - * make the change. If we don't change anything (so if the returned - * prev is equal to old) then we aren't acquiring anything new and - * we don't need any memory barrier as far I can tell. + * The leading and the trailing memory barriers guarantee that these + * operations are fully ordered. + * + * The trailing memory barrier is placed in SMP unconditionally, in + * order to guarantee that dependency ordering is preserved when a + * dependency is headed by an unsuccessful operation. */ static inline unsigned long @@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) { unsigned long prev, tmp, cmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %5,7,%4\n" " insbl %1,%5,%1\n" @@ -150,8 +161,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) " or %1,%2,%2\n" " stq_c %2,0(%4)\n" " beq %2,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" @@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) { unsigned long prev, tmp, cmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %5,7,%4\n" " inswl %1,%5,%1\n" @@ -177,8 +189,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) " or %1,%2,%2\n" " stq_c %2,0(%4)\n" " beq %2,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" @@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) { unsigned long prev, cmp; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%5\n" " cmpeq %0,%3,%1\n" @@ -200,8 +213,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) " mov %4,%1\n" " stl_c %1,%2\n" " beq %1,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" @@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) { unsigned long prev, cmp; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%5\n" " cmpeq %0,%3,%1\n" @@ -223,8 +237,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) " mov %4,%1\n" " stq_c %1,%2\n" " beq %1,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index be14f16149d5..065fb372e355 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -112,4 +112,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c index 8e9a41966881..5476279329a6 100644 --- a/arch/alpha/kernel/console.c +++ b/arch/alpha/kernel/console.c @@ -21,6 +21,7 @@ struct pci_controller *pci_vga_hose; static struct resource alpha_vga = { .name = "alpha-vga+", + .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index ce3a675c0c4b..a48976dc9bcd 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -964,8 +964,8 @@ static inline long put_tv32(struct timeval32 __user *o, struct timeval *i) { return copy_to_user(o, &(struct timeval32){ - .tv_sec = o->tv_sec, - .tv_usec = o->tv_usec}, + .tv_sec = i->tv_sec, + .tv_usec = i->tv_usec}, sizeof(struct timeval32)); } @@ -1183,13 +1183,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { - unsigned int status = 0; struct rusage r; - long err = kernel_wait4(pid, &status, options, &r); + long err = kernel_wait4(pid, ustatus, options, &r); if (err <= 0) return err; - if (put_user(status, ustatus)) - return -EFAULT; if (!ur) return err; if (put_tv32(&ur->ru_utime, &r.ru_utime)) diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h index 26231601630e..f332d88ffaff 100644 --- a/arch/alpha/kernel/pci_impl.h +++ b/arch/alpha/kernel/pci_impl.h @@ -144,7 +144,8 @@ struct pci_iommu_arena }; #if defined(CONFIG_ALPHA_SRM) && \ - (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA)) + (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \ + defined(CONFIG_ALPHA_AVANTI)) # define NEED_SRM_SAVE_RESTORE #else # undef NEED_SRM_SAVE_RESTORE diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 74bfb1f2d68e..3a885253f486 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -269,12 +269,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp, application calling fork. */ if (clone_flags & CLONE_SETTLS) childti->pcb.unique = regs->r20; + else + regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ childti->pcb.usp = usp ?: rdusp(); *childregs = *regs; childregs->r0 = 0; childregs->r19 = 0; childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ - regs->r20 = 0; stack = ((struct switch_stack *) regs) - 1; *childstack = *stack; childstack->r26 = (unsigned long) ret_from_fork; diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index 37bd6d9b8eb9..a6bdc1da47ad 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -102,6 +102,15 @@ sio_pci_route(void) alpha_mv.sys.sio.route_tab); } +static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev) +{ + if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && + (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + return false; + + return true; +} + static unsigned int __init sio_collect_irq_levels(void) { @@ -110,8 +119,7 @@ sio_collect_irq_levels(void) /* Iterate through the devices, collecting IRQ levels. */ for_each_pci_dev(dev) { - if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && - (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) + if (!sio_pci_dev_irq_needs_level(dev)) continue; if (dev->irq) @@ -120,8 +128,7 @@ sio_collect_irq_levels(void) return level_bits; } -static void __init -sio_fixup_irq_levels(unsigned int level_bits) +static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset) { unsigned int old_level_bits; @@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits) */ old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); - level_bits |= (old_level_bits & 0x71ff); + if (reset) + old_level_bits &= 0x71ff; + + level_bits |= old_level_bits; outb((level_bits >> 0) & 0xff, 0x4d0); outb((level_bits >> 8) & 0xff, 0x4d1); } +static inline void +sio_fixup_irq_levels(unsigned int level_bits) +{ + __sio_fixup_irq_levels(level_bits, true); +} + static inline int noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { @@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP, tmp; tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); - return irq >= 0 ? tmp : -1; + + irq = irq >= 0 ? tmp : -1; + + /* Fixup IRQ level if an actual IRQ mapping is detected */ + if (sio_pci_dev_irq_needs_level(dev) && irq >= 0) + __sio_fixup_irq_levels(1 << irq, false); + + return irq; } static inline int diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 4bd99a7b1c41..f43bd05dede2 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -160,11 +160,16 @@ void show_stack(struct task_struct *task, unsigned long *sp) for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; - if (i && ((i % 4) == 0)) - printk("\n "); - printk("%016lx ", *stack++); + if ((i % 4) == 0) { + if (i) + pr_cont("\n"); + printk(" "); + } else { + pr_cont(" "); + } + pr_cont("%016lx", *stack++); } - printk("\n"); + pr_cont("\n"); dik_show_trace(sp); } diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c84e67fdea09..5c8caf85c350 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -408,7 +408,7 @@ config ARC_HAS_DIV_REM config ARC_HAS_ACCL_REGS bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)" - default n + default y help Depending on the configuration, CPU can contain accumulator reg-pair (also referred to as r58:r59). These can also be used by gcc as GPR so @@ -487,7 +487,6 @@ config ARC_CURR_IN_REG config ARC_EMUL_UNALIGNED bool "Emulate unaligned memory access (userspace only)" - default N select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_ARCH_UNALIGN_ALLOW depends on ISA_ARCOMPACT diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore index 5246969a20c5..c4c5fd529c25 100644 --- a/arch/arc/boot/.gitignore +++ b/arch/arc/boot/.gitignore @@ -1,2 +1 @@ -*.dtb* uImage diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index ec7c849a5c8e..a8242362e551 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 63d3cf69e0b0..ef3c31cd7737 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index f613ecac14a7..1757ac9cecbc 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index db04ea4dd2d9..aa8240a92b60 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EXPERT=y CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 3507be2af6fe..bc5a24ea6cf7 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 7b8f8faf8a24..762b1fcd93dc 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y # CONFIG_VM_EVENT_COUNTERS is not set diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 6dff83a238b8..b1a78222699c 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 31ee51b987e7..217d7ea3c956 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index 8d3b1f67cae4..e733e4f1a320 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 6168ce2ac2ef..14377b8234f7 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index a70bdeb2b3fd..7e61c923a3cd 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index ef96406c446e..299fbe8003b2 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/" CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set CONFIG_KPROBES=y diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index ea022d47896c..21ec82466d62 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h @@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address); #define BUG() do { \ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ - dump_stack(); \ + barrier_before_unreachable(); \ + __builtin_trap(); \ } while (0) #define HAVE_ARCH_BUG diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 109baa06831c..09ddddf71cc5 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -105,7 +105,7 @@ typedef pte_t * pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define WANT_PAGE_VIRTUAL 1 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 08fe33830d4b..77676e18da69 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -379,7 +379,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, /* Decode a PTE containing swap "identifier "into constituents */ #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) -#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) +#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) /* NOPs, to keep generic kernel happy */ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index f35974ee7264..c9173c02081c 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) return 0; __asm__ __volatile__( + " mov lp_count, %5 \n" " lp 3f \n" "1: ldb.ab %3, [%2, 1] \n" " breq.d %3, 0, 3f \n" @@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .word 1b, 4b \n" " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) - : "g"(-EFAULT), "l"(count) - : "memory"); + : "g"(-EFAULT), "r"(count) + : "lp_count", "lp_start", "lp_end", "memory"); return res; } diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index f61a52b01625..5fe84e481654 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); static char smp_cpuinfo_buf[128]; +/* + * Set mask to halt GFRC if any online core in SMP cluster is halted. + * Only works for ARC HS v3.0+, on earlier versions has no effect. + */ +static void mcip_update_gfrc_halt_mask(int cpu) +{ + struct bcr_generic gfrc; + unsigned long flags; + u32 gfrc_halt_mask; + + READ_BCR(ARC_REG_GFRC_BUILD, gfrc); + + /* + * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in + * GFRC 0x3 version. + */ + if (gfrc.ver < 0x3) + return; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + __mcip_cmd(CMD_GFRC_READ_CORE, 0); + gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); + gfrc_halt_mask |= BIT(cpu); + __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + +static void mcip_update_debug_halt_mask(int cpu) +{ + u32 mcip_mask = 0; + unsigned long flags; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + /* + * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK + * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK + * and CMD_DEBUG_READ_SELECT. + */ + __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); + mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); + + mcip_mask |= BIT(cpu); + + __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); + /* + * Parameter specified halt cause: + * STATUS32[H]/actionpoint/breakpoint/self-halt + * We choose all of them (0xF). + */ + __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + static void mcip_setup_per_cpu(int cpu) { + struct mcip_bcr mp; + + READ_BCR(ARC_REG_MCIP_BCR, mp); + smp_ipi_irq_setup(cpu, IPI_IRQ); smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); + + /* Update GFRC halt mask as new CPU came online */ + if (mp.gfrc) + mcip_update_gfrc_halt_mask(cpu); + + /* Update MCIP debug mask as new CPU came online */ + if (mp.dbg) + mcip_update_debug_halt_mask(cpu); } static void mcip_ipi_send(int cpu) @@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void) IS_AVAIL1(mp.gfrc, "GFRC")); cpuinfo_arc700[0].extn.gfrc = mp.gfrc; - - if (mp.dbg) { - __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); - __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); - } } struct plat_smp_ops plat_smp_ops = { diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 6df9d94a9537..115eecc0d9a4 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void) { } +static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) +{ + unsigned long dt_root = of_get_flat_dt_root(); + const char *buf; + + buf = of_get_flat_dt_prop(dt_root, name, NULL); + if (!buf) + return -EINVAL; + + if (cpulist_parse(buf, cpumask)) + return -EINVAL; + + return 0; +} + +/* + * Read from DeviceTree and setup cpu possible mask. If there is no + * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist. + */ +static void __init arc_init_cpu_possible(void) +{ + struct cpumask cpumask; + + if (arc_get_cpu_map("possible-cpus", &cpumask)) { + pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n", + NR_CPUS); + + cpumask_setall(&cpumask); + } + + if (!cpumask_test_cpu(0, &cpumask)) + panic("Master cpu (cpu[0]) is missed in cpu possible mask!"); + + init_cpu_possible(&cpumask); +} + /* * Called from setup_arch() before calling setup_processor() * @@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void) */ void __init smp_init_cpus(void) { - unsigned int i; - - for (i = 0; i < NR_CPUS; i++) - set_cpu_possible(i, true); + arc_init_cpu_possible(); if (plat_smp_ops.init_early_smp) plat_smp_ops.init_early_smp(); @@ -70,16 +104,12 @@ void __init smp_init_cpus(void) /* called from init ( ) => process 1 */ void __init smp_prepare_cpus(unsigned int max_cpus) { - int i; - /* * if platform didn't set the present map already, do it now * boot cpu is set to present already by init/main.c */ - if (num_present_cpus() <= 1) { - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); - } + if (num_present_cpus() <= 1) + init_cpu_present(cpu_possible_mask); } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 19ab3cf98f0f..fcc9a9e27e9c 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -7,5 +7,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" + depends on ISA_ARCV2 + select ARC_HAS_ACCL_REGS select CLK_HSDK select RESET_HSDK diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d1346a160760..858638134bfa 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1826,6 +1826,15 @@ config XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. +config ARM_FLUSH_CONSOLE_ON_RESTART + bool "Force flush the console on restart" + help + If the console is locked while the system is rebooted, the messages + in the temporary logbuffer would not have propogated to all the + console drivers. This option forces the console lock to be + released if it failed to be acquired, which will cause all the + pending messages to be flushed. + endmenu menu "Boot options" @@ -1854,6 +1863,21 @@ config DEPRECATED_PARAM_STRUCT This was deprecated in 2001 and announced to live on for 5 years. Some old boot loaders still use this way. +config BUILD_ARM_APPENDED_DTB_IMAGE + bool "Build a concatenated zImage/dtb by default" + depends on OF + help + Enabling this option will cause a concatenated zImage and list of + DTBs to be built by default (instead of a standalone zImage.) + The image will built in arch/arm/boot/zImage-dtb + +config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES + string "Default dtb names" + depends on BUILD_ARM_APPENDED_DTB_IMAGE + help + Space separated list of names of dtbs to append when + building a concatenated zImage-dtb. + # Compressed boot loader in ROM. Yes, we really want to ask about # TEXT and BSS so we preserve their values in the config files. config ZBOOT_ROM_TEXT diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 36ae4454554c..bc805d702d82 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -303,6 +303,8 @@ libs-y := arch/arm/lib/ $(libs-y) boot := arch/arm/boot ifeq ($(CONFIG_XIP_KERNEL),y) KBUILD_IMAGE := $(boot)/xipImage +else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y) +KBUILD_IMAGE := $(boot)/zImage-dtb else KBUILD_IMAGE := $(boot)/zImage endif @@ -356,6 +358,9 @@ ifeq ($(CONFIG_VDSO),y) $(Q)$(MAKE) $(build)=arch/arm/vdso $@ endif +zImage-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ + # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore index 3c79f85975aa..ad7a0253ea96 100644 --- a/arch/arm/boot/.gitignore +++ b/arch/arm/boot/.gitignore @@ -4,3 +4,4 @@ xipImage bootpImage uImage *.dtb +zImage-dtb \ No newline at end of file diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 50f8d1be7fcb..da75630c440d 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -16,6 +16,7 @@ OBJCOPYFLAGS :=-O binary -R .comment -S ifneq ($(MACHINE),) include $(MACHINE)/Makefile.boot endif +include $(srctree)/arch/arm/boot/dts/Makefile # Note: the following conditions must always be true: # ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) @@ -29,6 +30,14 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS targets := Image zImage xipImage bootpImage uImage +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) + ifeq ($(CONFIG_XIP_KERNEL),y) $(obj)/xipImage: vmlinux FORCE @@ -55,6 +64,10 @@ $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) +$(obj)/zImage-dtb: $(obj)/zImage $(DTB_OBJS) FORCE + $(call if_changed,cat) + @echo ' Kernel: $@ is ready' + endif ifneq ($(LOADADDR),) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 8a756870c238..03c122779783 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -29,19 +29,19 @@ #if defined(CONFIG_DEBUG_ICEDCC) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c0, c5, 0 .endm #elif defined(CONFIG_CPU_XSCALE) - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c8, c0, 0 .endm #else - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c1, c0, 0 @@ -57,7 +57,7 @@ .endm #if defined(CONFIG_ARCH_SA1100) - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 mov \rb, #0x80000000 @ physical base address #ifdef CONFIG_DEBUG_LL_SER3 add \rb, \rb, #0x00050000 @ Ser3 @@ -66,8 +66,8 @@ #endif .endm #else - .macro loadsp, rb, tmp - addruart \rb, \tmp + .macro loadsp, rb, tmp1, tmp2 + addruart \rb, \tmp1, \tmp2 .endm #endif #endif @@ -559,8 +559,6 @@ not_relocated: mov r0, #0 bl decompress_kernel bl cache_clean_flush bl cache_off - mov r1, r7 @ restore architecture number - mov r2, r8 @ restore atags pointer #ifdef CONFIG_ARM_VIRT_EXT mrs r0, spsr @ Get saved CPU boot mode @@ -794,6 +792,8 @@ __armv7_mmu_cache_on: bic r6, r6, #1 << 31 @ 32-bit translation system bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer + mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs + mcr p15, 0, r0, c7, c5, 4 @ ISB mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif @@ -1295,7 +1295,7 @@ phex: adr r3, phexbuf b 1b @ puts corrupts {r0, r1, r2, r3} -puts: loadsp r3, r1 +puts: loadsp r3, r2, r1 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr @@ -1312,8 +1312,8 @@ puts: loadsp r3, r1 @ putc corrupts {r0, r1, r2, r3} putc: mov r2, r0 + loadsp r3, r1, r0 mov r0, #0 - loadsp r3, r1 b 2b @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} @@ -1363,6 +1363,8 @@ __hyp_reentry_vectors: __enter_kernel: mov r0, #0 @ must be 0 + mov r1, r7 @ restore architecture number + mov r2, r8 @ restore atags pointer ARM( mov pc, r4 ) @ call kernel M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class THUMB( bx r4 ) @ entry point is always ARM for A/R classes diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index eff87a344566..86e591cc2567 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -1074,5 +1074,15 @@ endif dtstree := $(srctree)/$(src) dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) -always := $(dtb-y) +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif + +targets += dtbs dtbs_install +targets += $(DTB_LIST) + +always := $(DTB_LIST) clean-files := *.dtb diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index e5b061469bf8..4714a59fd86d 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -927,7 +927,8 @@ reg = <0x48038000 0x2000>, <0x46000000 0x400000>; reg-names = "mpu", "dat"; - interrupts = <80>, <81>; + interrupts = , + ; interrupt-names = "tx", "rx"; status = "disabled"; dmas = <&edma 8 2>, @@ -941,7 +942,8 @@ reg = <0x4803C000 0x2000>, <0x46400000 0x400000>; reg-names = "mpu", "dat"; - interrupts = <82>, <83>; + interrupts = , + ; interrupt-names = "tx", "rx"; status = "disabled"; dmas = <&edma 10 2>, diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts index 9e92d480576b..3b9a94c274a7 100644 --- a/arch/arm/boot/dts/am437x-cm-t43.dts +++ b/arch/arm/boot/dts/am437x-cm-t43.dts @@ -301,8 +301,8 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>; - dmas = <&edma 16 - &edma 17>; + dmas = <&edma 16 0 + &edma 17 0>; dma-names = "tx0", "rx0"; flash: w25q64cvzpig@0 { diff --git a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi index 7b8d90b7aeea..29b636fce23f 100644 --- a/arch/arm/boot/dts/arm-realview-eb-mp.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb-mp.dtsi @@ -150,11 +150,6 @@ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>; }; -&charlcd { - interrupt-parent = <&intc>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; -}; - &serial0 { interrupt-parent = <&intc>; interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts index f53e89d63477..c971cc93f42d 100644 --- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts +++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts @@ -16,7 +16,7 @@ bootargs = "console=ttyS4,115200 earlyprintk"; }; - memory { + memory@80000000 { reg = <0x80000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts index 5f29010cdbd8..4ef80a703eda 100644 --- a/arch/arm/boot/dts/at91-tse850-3.dts +++ b/arch/arm/boot/dts/at91-tse850-3.dts @@ -245,7 +245,7 @@ }; eeprom@50 { - compatible = "nxp,24c02", "atmel,24c02"; + compatible = "nxp,se97b", "atmel,24c02"; reg = <0x50>; pagesize = <16>; }; diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi index a7da0dd0c98f..0898213f3bb2 100644 --- a/arch/arm/boot/dts/at91sam9g25.dtsi +++ b/arch/arm/boot/dts/at91sam9g25.dtsi @@ -21,7 +21,7 @@ atmel,mux-mask = < /* A B C */ 0xffffffff 0xffe0399f 0xc000001c /* pioA */ - 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */ + 0x0007ffff 0x00047e3f 0x00000000 /* pioB */ 0x80000000 0x07c0ffff 0xb83fffff /* pioC */ 0x003fffff 0x003f8000 0x00000000 /* pioD */ >; diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 7c957ea06c66..9a9902974b1b 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -69,7 +69,7 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index dff66974feed..d5f5e92e7488 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -85,7 +85,7 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; @@ -93,7 +93,7 @@ compatible = "arm,cortex-a9-twd-timer"; reg = <0x20600 0x20>; interrupts = ; + IRQ_TYPE_EDGE_RISING)>; clocks = <&periph_clk>; }; diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi index 61e158003509..168c002f0ca0 100644 --- a/arch/arm/boot/dts/bcm2836.dtsi +++ b/arch/arm/boot/dts/bcm2836.dtsi @@ -9,7 +9,7 @@ <0x40000000 0x40000000 0x00001000>; dma-ranges = <0xc0000000 0x00000000 0x3f000000>; - local_intc: local_intc { + local_intc: local_intc@40000000 { compatible = "brcm,bcm2836-l1-intc"; reg = <0x40000000 0x100>; interrupt-controller; diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi index bc1cca5cf43c..d5d058a568c3 100644 --- a/arch/arm/boot/dts/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi @@ -8,7 +8,7 @@ <0x40000000 0x40000000 0x00001000>; dma-ranges = <0xc0000000 0x00000000 0x3f000000>; - local_intc: local_intc { + local_intc: local_intc@40000000 { compatible = "brcm,bcm2836-l1-intc"; reg = <0x40000000 0x100>; interrupt-controller; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 013431e3d7c3..4745e3c7806b 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -251,7 +251,7 @@ jtag_gpio4: jtag_gpio4 { brcm,pins = <4 5 6 12 13>; - brcm,function = ; + brcm,function = ; }; jtag_gpio22: jtag_gpio22 { brcm,pins = <22 23 24 25 26 27>; @@ -396,8 +396,8 @@ i2s: i2s@7e203000 { compatible = "brcm,bcm2835-i2s"; - reg = <0x7e203000 0x20>, - <0x7e101098 0x02>; + reg = <0x7e203000 0x24>; + clocks = <&clocks BCM2835_CLOCK_PCM>; dmas = <&dma 2>, <&dma 3>; diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index 3bc50849d013..b8bde13de90a 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -141,10 +141,6 @@ status = "okay"; }; -&sata { - status = "okay"; -}; - &qspi { bspi-sel = <0>; flash: m25p80@0 { diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index d94d14b3c745..f0e2008f7490 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -49,7 +49,7 @@ memory { device_type = "memory"; - reg = <0x60000000 0x80000000>; + reg = <0x60000000 0x20000000>; }; gpio-restart { @@ -177,10 +177,6 @@ status = "okay"; }; -&sata { - status = "okay"; -}; - &srab { compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; status = "okay"; diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts index 413dbd5d9f64..81942ae83e1f 100644 --- a/arch/arm/boot/dts/da850-lego-ev3.dts +++ b/arch/arm/boot/dts/da850-lego-ev3.dts @@ -178,7 +178,7 @@ */ battery { pinctrl-names = "default"; - pintctrl-0 = <&battery_pins>; + pinctrl-0 = <&battery_pins>; compatible = "lego,ev3-battery"; io-channels = <&adc 4>, <&adc 3>; io-channel-names = "voltage", "current"; @@ -392,7 +392,7 @@ batt_volt_en { gpio-hog; gpios = <6 GPIO_ACTIVE_HIGH>; - output-low; + output-high; }; }; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index af68ef7b0caa..8a15f7193c82 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -34,8 +34,6 @@ pmx_core: pinmux@14120 { compatible = "pinctrl-single"; reg = <0x14120 0x50>; - #address-cells = <1>; - #size-cells = <0>; #pinctrl-cells = <2>; pinctrl-single,bit-per-mux; pinctrl-single,register-width = <32>; diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts index 41c9132eb550..64363f75c01a 100644 --- a/arch/arm/boot/dts/dra71-evm.dts +++ b/arch/arm/boot/dts/dra71-evm.dts @@ -24,13 +24,13 @@ regulator-name = "vddshv8"; regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; regulator-boot-on; vin-supply = <&evm_5v0>; gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; states = <1800000 0x0 - 3000000 0x1>; + 3300000 0x1>; }; evm_1v8_sw: fixedregulator-evm_1v8 { diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts index b024a65c6e27..f64aab450315 100644 --- a/arch/arm/boot/dts/dra76-evm.dts +++ b/arch/arm/boot/dts/dra76-evm.dts @@ -148,6 +148,7 @@ compatible = "ti,tps65917"; reg = <0x58>; ti,system-power-controller; + ti,palmas-override-powerhold; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi index 42ea246e71cb..fec1241b858f 100644 --- a/arch/arm/boot/dts/emev2.dtsi +++ b/arch/arm/boot/dts/emev2.dtsi @@ -31,13 +31,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0>; clock-frequency = <533000000>; }; - cpu@1 { + cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <1>; @@ -57,6 +57,7 @@ compatible = "arm,cortex-a9-pmu"; interrupts = , ; + interrupt-affinity = <&cpu0>, <&cpu1>; }; clocks@e0110000 { diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index bceb919ac637..65602cd51a4e 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -395,7 +395,7 @@ reg = <0>; vdd3-supply = <&lcd_vdd3_reg>; vci-supply = <&ldo25_reg>; - reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>; power-on-delay= <50>; reset-delay = <100>; init-delay = <100>; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 8dbeb873e99c..35b1949a3e3c 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -643,7 +643,7 @@ power-domains = <&pd_gsc>; clocks = <&clock CLK_GSCL0>; clock-names = "gscl"; - iommu = <&sysmmu_gsc0>; + iommus = <&sysmmu_gsc0>; }; gsc_1: gsc@13e10000 { @@ -653,7 +653,7 @@ power-domains = <&pd_gsc>; clocks = <&clock CLK_GSCL1>; clock-names = "gscl"; - iommu = <&sysmmu_gsc1>; + iommus = <&sysmmu_gsc1>; }; gsc_2: gsc@13e20000 { @@ -663,7 +663,7 @@ power-domains = <&pd_gsc>; clocks = <&clock CLK_GSCL2>; clock-names = "gscl"; - iommu = <&sysmmu_gsc2>; + iommus = <&sysmmu_gsc2>; }; gsc_3: gsc@13e30000 { @@ -673,7 +673,7 @@ power-domains = <&pd_gsc>; clocks = <&clock CLK_GSCL3>; clock-names = "gscl"; - iommu = <&sysmmu_gsc3>; + iommus = <&sysmmu_gsc3>; }; hdmi: hdmi@14530000 { diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi index 7eab4bc07cec..7628bbb02324 100644 --- a/arch/arm/boot/dts/exynos5410.dtsi +++ b/arch/arm/boot/dts/exynos5410.dtsi @@ -333,7 +333,6 @@ &rtc { clocks = <&clock CLK_RTC>; clock-names = "rtc"; - interrupt-parent = <&pmu_system_controller>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 8bf0d89cdd35..2e516f4985e4 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -433,15 +433,6 @@ clock-names = "ipg", "per"; }; - srtc: srtc@53fa4000 { - compatible = "fsl,imx53-rtc", "fsl,imx25-rtc"; - reg = <0x53fa4000 0x4000>; - interrupts = <24>; - interrupt-parent = <&tzic>; - clocks = <&clks IMX5_CLK_SRTC_GATE>; - clock-names = "ipg"; - }; - iomuxc: iomuxc@53fa8000 { compatible = "fsl,imx53-iomuxc"; reg = <0x53fa8000 0x4000>; diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts index cf42c2f5cdc7..1281bc39b7ab 100644 --- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts +++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts @@ -42,7 +42,7 @@ /dts-v1/; -#include "imx6q.dtsi" +#include "imx6dl.dtsi" #include "imx6qdl-icore-rqs.dtsi" / { diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 90a741732f60..4747ede61acd 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -96,7 +96,7 @@ clocks = <&clks IMX6Q_CLK_ECSPI5>, <&clks IMX6Q_CLK_ECSPI5>; clock-names = "ipg", "per"; - dmas = <&sdma 11 7 1>, <&sdma 12 7 2>; + dmas = <&sdma 11 8 1>, <&sdma 12 8 2>; dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 6c7eb54be9e2..d64438bfa68b 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -1305,7 +1305,7 @@ 0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; bus-range = <0x00 0xff>; num-lanes = <1>; - interrupts = ; + interrupts = ; clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>, <&clks IMX6SX_CLK_PCIE_AXI>, <&clks IMX6SX_CLK_LVDS1_OUT>, diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts index ae45af1ad062..3cc1fb9ce441 100644 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts @@ -213,37 +213,37 @@ &iomuxc { pinctrl_enet1: enet1grp { fsl,pins = < - MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3 - MX7D_PAD_SD2_WP__ENET1_MDC 0x3 - MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1 - MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1 - MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1 - MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1 - MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1 - MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1 - MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1 - MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1 - MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1 - MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1 - MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1 - MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1 + MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30 + MX7D_PAD_SD2_WP__ENET1_MDC 0x30 + MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11 + MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11 + MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11 + MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11 + MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11 + MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11 + MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11 + MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11 + MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11 + MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11 + MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11 + MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11 >; }; pinctrl_enet2: enet2grp { fsl,pins = < - MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1 - MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1 - MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1 - MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1 - MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1 - MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1 - MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1 - MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1 - MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1 - MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1 - MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1 - MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1 + MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11 + MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11 + MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11 + MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11 + MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11 + MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11 + MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11 + MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11 + MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11 + MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11 + MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11 + MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11 >; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 44637cabcc56..255e64ba32e2 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -82,7 +82,7 @@ enable-active-high; }; - reg_usb_otg2_vbus: regulator-usb-otg1-vbus { + reg_usb_otg2_vbus: regulator-usb-otg2-vbus { compatible = "regulator-fixed"; regulator-name = "usb_otg2_vbus"; regulator-min-microvolt = <5000000>; diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts index cf2f5240e176..27cc913ca0f5 100644 --- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts +++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts @@ -53,7 +53,8 @@ }; pinctrl: pin-controller@10000 { - pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>; + pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header + &pmx_gpio_header_gpo>; pinctrl-names = "default"; pmx_uart0: pmx-uart0 { @@ -85,11 +86,16 @@ * ground. */ pmx_gpio_header: pmx-gpio-header { - marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28", + marvell,pins = "mpp17", "mpp29", "mpp28", "mpp35", "mpp34", "mpp40"; marvell,function = "gpio"; }; + pmx_gpio_header_gpo: pxm-gpio-header-gpo { + marvell,pins = "mpp7"; + marvell,function = "gpo"; + }; + pmx_gpio_init: pmx-init { marvell,pins = "mpp38"; marvell,function = "gpio"; diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts index 38faa90007d7..2fa5eb4bd402 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts @@ -72,7 +72,8 @@ }; &gpmc { - ranges = <1 0 0x08000000 0x1000000>; /* CS1: 16MB for LAN9221 */ + ranges = <0 0 0x30000000 0x1000000 /* CS0: 16MB for NAND */ + 1 0 0x2c000000 0x1000000>; /* CS1: 16MB for LAN9221 */ ethernet@gpmc { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index 26cce4d18405..c335b923753a 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -26,7 +26,7 @@ gpio = <&gpio1 3 0>; /* gpio_3 */ startup-delay-us = <70000>; enable-active-high; - vin-supply = <&vmmc2>; + vin-supply = <&vaux3>; }; /* HS USB Host PHY on PORT 1 */ @@ -37,7 +37,7 @@ }; &gpmc { - ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ + ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */ nand@0,0 { compatible = "ti,omap2-nand"; @@ -97,6 +97,8 @@ }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <2600000>; twl: twl@48 { @@ -106,6 +108,7 @@ twl_audio: audio { compatible = "ti,twl4030-audio"; codec { + ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>; }; }; }; @@ -121,7 +124,7 @@ &mmc3 { interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; - pinctrl-0 = <&mmc3_pins>; + pinctrl-0 = <&mmc3_pins &wl127x_gpio>; pinctrl-names = "default"; vmmc-supply = <&wl12xx_vmmc>; non-removable; @@ -132,8 +135,8 @@ wlcore: wlcore@2 { compatible = "ti,wl1273"; reg = <2>; - interrupt-parent = <&gpio5>; - interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */ + interrupt-parent = <&gpio1>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */ ref-clock-frequency = <26000000>; }; }; @@ -157,8 +160,6 @@ OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */ OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */ OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */ - OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */ - OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */ OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */ >; @@ -217,7 +218,13 @@ >; }; - + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */ + >; + }; }; &omap3_pmx_wkup { @@ -228,6 +235,12 @@ OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */ >; }; + wl127x_gpio: pinmux_wl127x_gpio_pin { + pinctrl-single,pins = < + OMAP3_WKUP_IOPAD(0x2a0a, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */ + OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ + >; + }; }; &omap3_pmx_core2 { @@ -259,6 +272,11 @@ #include "twl4030.dtsi" #include "twl4030_omap3.dtsi" +&vaux3 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; +}; + &twl { twl_power: power { compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle"; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 6d89736c7b44..cf22b35f0a28 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -104,6 +104,8 @@ }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <2600000>; twl: twl@48 { @@ -211,6 +213,12 @@ OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ >; }; + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + >; + }; }; &uart2 { diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts index 52b3ed10283a..e2bc731079be 100644 --- a/arch/arm/boot/dts/lpc3250-ea3250.dts +++ b/arch/arm/boot/dts/lpc3250-ea3250.dts @@ -156,8 +156,8 @@ uda1380: uda1380@18 { compatible = "nxp,uda1380"; reg = <0x18>; - power-gpio = <&gpio 0x59 0>; - reset-gpio = <&gpio 0x51 0>; + power-gpio = <&gpio 3 10 0>; + reset-gpio = <&gpio 3 2 0>; dac-clk = "wspll"; }; diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts index fd95e2b10357..b7bd3a110a8d 100644 --- a/arch/arm/boot/dts/lpc3250-phy3250.dts +++ b/arch/arm/boot/dts/lpc3250-phy3250.dts @@ -81,8 +81,8 @@ uda1380: uda1380@18 { compatible = "nxp,uda1380"; reg = <0x18>; - power-gpio = <&gpio 0x59 0>; - reset-gpio = <&gpio 0x51 0>; + power-gpio = <&gpio 3 10 0>; + reset-gpio = <&gpio 3 2 0>; dac-clk = "wspll"; }; diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts index 940875316d0f..67b4de0e3439 100644 --- a/arch/arm/boot/dts/ls1021a-qds.dts +++ b/arch/arm/boot/dts/ls1021a-qds.dts @@ -215,7 +215,7 @@ reg = <0x2a>; VDDA-supply = <®_3p3v>; VDDIO-supply = <®_3p3v>; - clocks = <&sys_mclk 1>; + clocks = <&sys_mclk>; }; }; }; diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts index a8b148ad1dd2..44715c8ef756 100644 --- a/arch/arm/boot/dts/ls1021a-twr.dts +++ b/arch/arm/boot/dts/ls1021a-twr.dts @@ -187,7 +187,7 @@ reg = <0x0a>; VDDA-supply = <®_3p3v>; VDDIO-supply = <®_3p3v>; - clocks = <&sys_mclk 1>; + clocks = <&sys_mclk>; }; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 9319e1f0f1d8..379b4a03cfe2 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -155,7 +155,7 @@ }; esdhc: esdhc@1560000 { - compatible = "fsl,esdhc"; + compatible = "fsl,ls1021a-esdhc", "fsl,esdhc"; reg = <0x0 0x1560000 0x0 0x10000>; interrupts = ; clock-frequency = <0>; diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi index afe12e5b51f9..f936000f0699 100644 --- a/arch/arm/boot/dts/mt2701.dtsi +++ b/arch/arm/boot/dts/mt2701.dtsi @@ -593,6 +593,7 @@ compatible = "mediatek,mt2701-hifsys", "syscon"; reg = <0 0x1a000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; usb0: usb@1a1c0000 { @@ -677,6 +678,7 @@ compatible = "mediatek,mt2701-ethsys", "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; eth: ethernet@1b100000 { diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index ec8a07415cb3..185357323572 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -22,11 +22,12 @@ #include #include #include -#include "skeleton64.dtsi" / { compatible = "mediatek,mt7623"; interrupt-parent = <&sysirq>; + #address-cells = <2>; + #size-cells = <2>; cpu_opp_table: opp_table { compatible = "operating-points-v2"; @@ -753,6 +754,7 @@ "syscon"; reg = <0 0x1b000000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; eth: ethernet@1b100000 { diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts index 688a86378cee..e96c0ca97972 100644 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts @@ -39,6 +39,24 @@ }; }; + reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-name = "fixed-3.3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + reg_5v: regulator-5v { + compatible = "regulator-fixed"; + regulator-name = "fixed-5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-boot-on; + regulator-always-on; + }; + gpio_keys { compatible = "gpio-keys"; pinctrl-names = "default"; @@ -82,6 +100,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; }; @@ -204,7 +223,7 @@ bus-width = <4>; max-frequency = <50000000>; cap-sd-highspeed; - cd-gpios = <&pio 261 0>; + cd-gpios = <&pio 261 GPIO_ACTIVE_LOW>; vmmc-supply = <&mt6323_vmch_reg>; vqmmc-supply = <&mt6323_vio18_reg>; }; @@ -468,12 +487,14 @@ }; &usb1 { - vusb33-supply = <&mt6323_vusb_reg>; + vusb33-supply = <®_3p3v>; + vbus-supply = <®_5v>; status = "okay"; }; &usb2 { - vusb33-supply = <&mt6323_vusb_reg>; + vusb33-supply = <®_3p3v>; + vbus-supply = <®_5v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/mt7623n-rfb.dtsi b/arch/arm/boot/dts/mt7623n-rfb.dtsi index 256c5fd947bf..43c9d7ca23a0 100644 --- a/arch/arm/boot/dts/mt7623n-rfb.dtsi +++ b/arch/arm/boot/dts/mt7623n-rfb.dtsi @@ -47,6 +47,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 64d00f5893a6..28d10abd8b04 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -354,7 +354,7 @@ elm: elm@48078000 { compatible = "ti,am3352-elm"; reg = <0x48078000 0x2000>; - interrupts = <4>; + interrupts = ; ti,hwmods = "elm"; status = "disabled"; }; @@ -861,14 +861,12 @@ usbhsohci: ohci@4a064800 { compatible = "ti,ohci-omap3"; reg = <0x4a064800 0x400>; - interrupt-parent = <&gic>; interrupts = ; }; usbhsehci: ehci@4a064c00 { compatible = "ti,ehci-omap"; reg = <0x4a064c00 0x400>; - interrupt-parent = <&gic>; interrupts = ; }; }; diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 0ce0b278e1cb..25c3a10d669c 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -278,6 +278,12 @@ }; }; + cec_clock: cec-clock { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <12000000>; + }; + hdmi-out { compatible = "hdmi-connector"; type = "a"; @@ -642,12 +648,6 @@ }; }; - cec_clock: cec-clock { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <12000000>; - }; - hdmi@39 { compatible = "adi,adv7511w"; reg = <0x39>; diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index 95da5cb9d37a..b6ebe79261c6 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts @@ -427,7 +427,7 @@ "dclkin.0", "dclkin.1"; ports { - port@1 { + port@0 { endpoint { remote-endpoint = <&adv7511_in>; }; diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 4916c65e0ace..5c0a76493d22 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -261,7 +261,7 @@ max-frequency = <37500000>; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; resets = <&cru SRST_SDIO>; @@ -279,7 +279,7 @@ max-frequency = <37500000>; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; default-sample-phase = <158>; disable-wp; dmas = <&pdma 12>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 06814421eed2..f59f7cc62be6 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -600,7 +600,7 @@ interrupts = ; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; @@ -613,7 +613,7 @@ interrupts = ; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; pinctrl-names = "default"; pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; @@ -628,7 +628,7 @@ max-frequency = <37500000>; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; bus-width = <8>; default-sample-phase = <158>; fifo-depth = <0x100>; diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi index 99cfae875e12..5eae4776ffde 100644 --- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi +++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi @@ -110,26 +110,6 @@ }; }; -&cpu0 { - cpu0-supply = <&vdd_cpu>; - operating-points = < - /* KHz uV */ - 1800000 1400000 - 1608000 1350000 - 1512000 1300000 - 1416000 1200000 - 1200000 1100000 - 1008000 1050000 - 816000 1000000 - 696000 950000 - 600000 900000 - 408000 900000 - 312000 900000 - 216000 900000 - 126000 900000 - >; -}; - &emmc { status = "okay"; bus-width = <8>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 356ed1e62452..f7a951afd281 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -927,6 +927,7 @@ i2s: i2s@ff890000 { compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s"; reg = <0x0 0xff890000 0x0 0x10000>; + #sound-dai-cells = <0>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -1122,6 +1123,7 @@ compatible = "rockchip,rk3288-dw-hdmi"; reg = <0x0 0xff980000 0x0 0x20000>; reg-io-width = <4>; + #sound-dai-cells = <0>; rockchip,grf = <&grf>; interrupts = ; clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>; diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 726c5d0dbd5b..b290a5abb901 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -463,6 +463,7 @@ compatible = "samsung,exynos4210-ohci"; reg = <0xec300000 0x100>; interrupts = <23>; + interrupt-parent = <&vic1>; clocks = <&clocks CLK_USB_HOST>; clock-names = "usbhost"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 2fa36c525957..81b526085097 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -1365,7 +1365,7 @@ pinctrl@fc06a000 { #address-cells = <1>; #size-cells = <1>; - compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; + compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus"; ranges = <0xfc068000 0xfc068000 0x100 0xfc06a000 0xfc06a000 0x4000>; /* WARNING: revisit as pin spec has changed */ diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi index 4ea5c5a16c57..5fc24d4c2d5d 100644 --- a/arch/arm/boot/dts/sh73a0.dtsi +++ b/arch/arm/boot/dts/sh73a0.dtsi @@ -22,7 +22,7 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0>; @@ -30,7 +30,7 @@ power-domains = <&pd_a2sl>; next-level-cache = <&L2>; }; - cpu@1 { + cpu1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <1>; @@ -89,6 +89,7 @@ compatible = "arm,cortex-a9-pmu"; interrupts = , ; + interrupt-affinity = <&cpu0>, <&cpu1>; }; cmt1: timer@e6138000 { diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 7e24dc8e82d4..10d2fa183a9f 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -744,13 +744,13 @@ nand0: nand@ff900000 { #address-cells = <0x1>; #size-cells = <0x1>; - compatible = "denali,denali-nand-dt"; + compatible = "altr,socfpga-denali-nand"; reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; interrupts = <0x0 0x90 0x4>; dma-mask = <0xffffffff>; - clocks = <&nand_clk>; + clocks = <&nand_x_clk>; status = "disabled"; }; @@ -827,7 +827,7 @@ timer@fffec600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xfffec600 0x100>; - interrupts = <1 13 0xf04>; + interrupts = <1 13 0xf01>; clocks = <&mpu_periph_clk>; }; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index bead79e4b2aa..791ca15c799e 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -593,8 +593,7 @@ #size-cells = <0>; reg = <0xffda5000 0x100>; interrupts = <0 102 4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; /*32bit_access;*/ tx-dma-channel = <&pdma 16>; rx-dma-channel = <&pdma 17>; @@ -633,7 +632,7 @@ nand: nand@ffb90000 { #address-cells = <1>; #size-cells = <1>; - compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand"; + compatible = "altr,socfpga-denali-nand"; reg = <0xffb90000 0x72000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts index 84101e4eebbf..0f5f379323a8 100644 --- a/arch/arm/boot/dts/spear1310-evb.dts +++ b/arch/arm/boot/dts/spear1310-evb.dts @@ -349,7 +349,7 @@ spi0: spi@e0100000 { status = "okay"; num-cs = <3>; - cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>; + cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>; stmpe610@0 { compatible = "st,stmpe610"; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 5f347054527d..d4dbc4098653 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -142,8 +142,8 @@ reg = <0xb4100000 0x1000>; interrupts = <0 105 0x4>; status = "disabled"; - dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */ - <&dwdma0 0x680 0 1 0>; /* 0xD << 7 */ + dmas = <&dwdma0 12 0 1>, + <&dwdma0 13 1 0>; dma-names = "tx", "rx"; }; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 17ea0abcdbd7..086b4b333249 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -100,7 +100,7 @@ reg = <0xb2800000 0x1000>; interrupts = <0 29 0x4>; status = "disabled"; - dmas = <&dwdma0 0 0 0 0>; + dmas = <&dwdma0 0 0 0>; dma-names = "data"; }; @@ -290,8 +290,8 @@ #size-cells = <0>; interrupts = <0 31 0x4>; status = "disabled"; - dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */ - <&dwdma0 0x0280 0 0 0>; /* 0x5 << 7 */ + dmas = <&dwdma0 4 0 0>, + <&dwdma0 5 0 0>; dma-names = "tx", "rx"; }; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index 6b32d20acc9f..00166eb9be86 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -194,6 +194,7 @@ rtc: rtc@fc900000 { compatible = "st,spear600-rtc"; reg = <0xfc900000 0x1000>; + interrupt-parent = <&vic0>; interrupts = <10>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 68aab50a73ab..733678b75b88 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -750,6 +750,7 @@ reg = <0x10120000 0x1000>; interrupt-names = "combined"; interrupts = <14>; + interrupt-parent = <&vica>; clocks = <&clcdclk>, <&hclkclcd>; clock-names = "clcdclk", "apb_pclk"; status = "disabled"; diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi index bd1a82e8fffe..fe501d32d059 100644 --- a/arch/arm/boot/dts/stih407-pinctrl.dtsi +++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi @@ -52,7 +52,7 @@ st,syscfg = <&syscfg_sbc>; reg = <0x0961f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09610000 0x6000>; @@ -376,7 +376,7 @@ st,syscfg = <&syscfg_front>; reg = <0x0920f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09200000 0x10000>; @@ -936,7 +936,7 @@ st,syscfg = <&syscfg_front>; reg = <0x0921f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09210000 0x10000>; @@ -969,7 +969,7 @@ st,syscfg = <&syscfg_rear>; reg = <0x0922f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09220000 0x6000>; @@ -1164,7 +1164,7 @@ st,syscfg = <&syscfg_flash>; reg = <0x0923f080 0x4>; reg-names = "irqmux"; - interrupts = ; + interrupts = ; interrupt-names = "irqmux"; ranges = <0 0x09230000 0x3000>; diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi index fa149837df14..11fdecd9312e 100644 --- a/arch/arm/boot/dts/stih407.dtsi +++ b/arch/arm/boot/dts/stih407.dtsi @@ -8,6 +8,7 @@ */ #include "stih407-clock.dtsi" #include "stih407-family.dtsi" +#include / { soc { sti-display-subsystem { @@ -122,7 +123,7 @@ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; reset-names = "hdmi"; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; ddc = <&hdmiddc>; diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi index 21fe72b183d8..96eed0dc08b8 100644 --- a/arch/arm/boot/dts/stih410.dtsi +++ b/arch/arm/boot/dts/stih410.dtsi @@ -9,6 +9,7 @@ #include "stih410-clock.dtsi" #include "stih407-family.dtsi" #include "stih410-pinctrl.dtsi" +#include / { aliases { bdisp0 = &bdisp0; @@ -213,7 +214,7 @@ <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; reset-names = "hdmi"; resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; ddc = <&hdmiddc>; diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts index 51e6f1d21c32..b2758dd8ce43 100644 --- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts +++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts @@ -42,7 +42,6 @@ /dts-v1/; #include "sun6i-a31s.dtsi" -#include "sunxi-common-regulators.dtsi" #include / { @@ -99,6 +98,7 @@ pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>; phy = <&phy1>; phy-mode = "rgmii"; + phy-supply = <®_dldo1>; snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */ snps,reset-active-low; snps,reset-delays-us = <0 10000 30000>; @@ -118,7 +118,7 @@ &mmc0 { pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>; - vmmc-supply = <®_vcc3v0>; + vmmc-supply = <®_dcdc1>; bus-width = <4>; cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */ cd-inverted; @@ -132,7 +132,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins_a>; - vmmc-supply = <®_vcc3v0>; + vmmc-supply = <®_aldo1>; mmc-pwrseq = <&mmc2_pwrseq>; bus-width = <4>; non-removable; @@ -163,6 +163,8 @@ reg = <0x68>; interrupt-parent = <&nmi_intc>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + eldoin-supply = <®_dcdc1>; + x-powers,drive-vbus-en; }; }; @@ -193,7 +195,28 @@ #include "axp22x.dtsi" +®_aldo1 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi"; +}; + +®_aldo2 { + regulator-always-on; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-name = "vcc-gmac"; +}; + +®_aldo3 { + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "avcc"; +}; + ®_dc5ldo { + regulator-always-on; regulator-min-microvolt = <700000>; regulator-max-microvolt = <1320000>; regulator-name = "vdd-cpus"; @@ -233,6 +256,40 @@ regulator-name = "vcc-dram"; }; +®_dldo1 { + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc-mac"; +}; + +®_dldo2 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-name = "avdd-csi"; +}; + +®_dldo3 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-pb"; +}; + +®_eldo1 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vdd-csi"; + status = "okay"; +}; + +®_ldo_io1 { + regulator-always-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc-pm-cpus"; + status = "okay"; +}; + &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index a4c7713edfcd..c5c365f35baa 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -41,6 +41,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_A15 &CLUSTER_COST_A15>; }; cpu1: cpu@1 { @@ -50,6 +51,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_A15 &CLUSTER_COST_A15>; }; cpu2: cpu@2 { @@ -59,6 +61,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + sched-energy-costs = <&CPU_COST_A7 &CLUSTER_COST_A7>; }; cpu3: cpu@3 { @@ -68,6 +71,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + sched-energy-costs = <&CPU_COST_A7 &CLUSTER_COST_A7>; }; cpu4: cpu@4 { @@ -77,6 +81,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + sched-energy-costs = <&CPU_COST_A7 &CLUSTER_COST_A7>; }; idle-states { @@ -96,6 +101,77 @@ min-residency-us = <2500>; }; }; + + energy-costs { + CPU_COST_A15: core-cost0 { + busy-cost-data = < + 426 2021 + 512 2312 + 597 2756 + 682 3125 + 768 3524 + 853 3846 + 938 5177 + 1024 6997 + >; + idle-cost-data = < + 0 + 0 + 0 + >; + }; + CPU_COST_A7: core-cost1 { + busy-cost-data = < + 150 187 + 172 275 + 215 334 + 258 407 + 301 447 + 344 549 + 387 761 + 430 1024 + >; + idle-cost-data = < + 0 + 0 + 0 + >; + }; + CLUSTER_COST_A15: cluster-cost0 { + busy-cost-data = < + 426 7920 + 512 8165 + 597 8172 + 682 8195 + 768 8265 + 853 8446 + 938 11426 + 1024 15200 + >; + idle-cost-data = < + 70 + 70 + 25 + >; + }; + CLUSTER_COST_A7: cluster-cost1 { + busy-cost-data = < + 150 2967 + 172 2792 + 215 2810 + 258 2815 + 301 2919 + 344 2847 + 387 3917 + 430 4905 + >; + idle-cost-data = < + 25 + 25 + 10 + >; + }; + }; }; memory@80000000 { diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index e5ad0708849a..f07f47943bf3 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -15,3 +15,7 @@ config SHARP_PARAM config SHARP_SCOOP bool + +config FIQ_GLUE + bool + select FIQ diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 70b4a14ed993..10b506469c95 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -5,6 +5,7 @@ obj-y += firmware.o +obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c index 4c10c6452678..f4dc1714a79e 100644 --- a/arch/arm/common/bL_switcher_dummy_if.c +++ b/arch/arm/common/bL_switcher_dummy_if.c @@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = { &bL_switcher_fops }; module_misc_device(bL_switcher_device); + +MODULE_AUTHOR("Nicolas Pitre "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface"); diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S new file mode 100644 index 000000000000..24b42cec4813 --- /dev/null +++ b/arch/arm/common/fiq_glue.S @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + + .text + + .global fiq_glue_end + + /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ + +ENTRY(fiq_glue) + /* store pc, cpsr from previous mode, reserve space for spsr */ + mrs r12, spsr + sub lr, lr, #4 + subs r10, #1 + bne nested_fiq + + str r12, [sp, #-8]! + str lr, [sp, #-4]! + + /* store r8-r14 from previous mode */ + sub sp, sp, #(7 * 4) + stmia sp, {r8-r14}^ + nop + + /* store r0-r7 from previous mode */ + stmfd sp!, {r0-r7} + + /* setup func(data,regs) arguments */ + mov r0, r9 + mov r1, sp + mov r3, r8 + + mov r7, sp + + /* Get sp and lr from non-user modes */ + and r4, r12, #MODE_MASK + cmp r4, #USR_MODE + beq fiq_from_usr_mode + + mov r7, sp + orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) + msr cpsr_c, r4 + str sp, [r7, #(4 * 13)] + str lr, [r7, #(4 * 14)] + mrs r5, spsr + str r5, [r7, #(4 * 17)] + + cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + /* use fiq stack if we reenter this mode */ + subne sp, r7, #(4 * 3) + +fiq_from_usr_mode: + msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) + mov r2, sp + sub sp, r7, #12 + stmfd sp!, {r2, ip, lr} + /* call func(data,regs) */ + blx r3 + ldmfd sp, {r2, ip, lr} + mov sp, r2 + + /* restore/discard saved state */ + cmp r4, #USR_MODE + beq fiq_from_usr_mode_exit + + msr cpsr_c, r4 + ldr sp, [r7, #(4 * 13)] + ldr lr, [r7, #(4 * 14)] + msr spsr_cxsf, r5 + +fiq_from_usr_mode_exit: + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + + ldmfd sp!, {r0-r7} + ldr lr, [sp, #(4 * 7)] + ldr r12, [sp, #(4 * 8)] + add sp, sp, #(10 * 4) +exit_fiq: + msr spsr_cxsf, r12 + add r10, #1 + cmp r11, #0 + moveqs pc, lr + bx r11 /* jump to custom fiq return function */ + +nested_fiq: + orr r12, r12, #(PSR_F_BIT) + b exit_fiq + +fiq_glue_end: + +ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */ + stmfd sp!, {r4} + mrs r4, cpsr + msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) + movs r8, r0 + mov r9, r1 + mov sp, r2 + mov r11, r3 + moveq r10, #0 + movne r10, #1 + msr cpsr_c, r4 + ldmfd sp!, {r4} + bx lr + diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c new file mode 100644 index 000000000000..8cb1b611c6d5 --- /dev/null +++ b/arch/arm/common/fiq_glue_setup.c @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +extern unsigned char fiq_glue, fiq_glue_end; +extern void fiq_glue_setup(void *func, void *data, void *sp, + fiq_return_handler_t fiq_return_handler); + +static struct fiq_handler fiq_debbuger_fiq_handler = { + .name = "fiq_glue", +}; +DEFINE_PER_CPU(void *, fiq_stack); +static struct fiq_glue_handler *current_handler; +static fiq_return_handler_t fiq_return_handler; +static DEFINE_MUTEX(fiq_glue_lock); + +static void fiq_glue_setup_helper(void *info) +{ + struct fiq_glue_handler *handler = info; + fiq_glue_setup(handler->fiq, handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP, + fiq_return_handler); +} + +int fiq_glue_register_handler(struct fiq_glue_handler *handler) +{ + int ret; + int cpu; + + if (!handler || !handler->fiq) + return -EINVAL; + + mutex_lock(&fiq_glue_lock); + if (fiq_stack) { + ret = -EBUSY; + goto err_busy; + } + + for_each_possible_cpu(cpu) { + void *stack; + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { + ret = -ENOMEM; + goto err_alloc_fiq_stack; + } + per_cpu(fiq_stack, cpu) = stack; + } + + ret = claim_fiq(&fiq_debbuger_fiq_handler); + if (WARN_ON(ret)) + goto err_claim_fiq; + + current_handler = handler; + on_each_cpu(fiq_glue_setup_helper, handler, true); + set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); + + mutex_unlock(&fiq_glue_lock); + return 0; + +err_claim_fiq: +err_alloc_fiq_stack: + for_each_possible_cpu(cpu) { + __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); + per_cpu(fiq_stack, cpu) = NULL; + } +err_busy: + mutex_unlock(&fiq_glue_lock); + return ret; +} + +static void fiq_glue_update_return_handler(void (*fiq_return)(void)) +{ + fiq_return_handler = fiq_return; + if (current_handler) + on_each_cpu(fiq_glue_setup_helper, current_handler, true); +} + +int fiq_glue_set_return_handler(void (*fiq_return)(void)) +{ + int ret; + + mutex_lock(&fiq_glue_lock); + if (fiq_return_handler) { + ret = -EBUSY; + goto err_busy; + } + fiq_glue_update_return_handler(fiq_return); + ret = 0; +err_busy: + mutex_unlock(&fiq_glue_lock); + + return ret; +} +EXPORT_SYMBOL(fiq_glue_set_return_handler); + +int fiq_glue_clear_return_handler(void (*fiq_return)(void)) +{ + int ret; + + mutex_lock(&fiq_glue_lock); + if (WARN_ON(fiq_return_handler != fiq_return)) { + ret = -EINVAL; + goto err_inval; + } + fiq_glue_update_return_handler(NULL); + ret = 0; +err_inval: + mutex_unlock(&fiq_glue_lock); + + return ret; +} +EXPORT_SYMBOL(fiq_glue_clear_return_handler); + +/** + * fiq_glue_resume - Restore fiqs after suspend or low power idle states + * + * This must be called before calling local_fiq_enable after returning from a + * power state where the fiq mode registers were lost. If a driver provided + * a resume hook when it registered the handler it will be called. + */ + +void fiq_glue_resume(void) +{ + if (!current_handler) + return; + fiq_glue_setup(current_handler->fiq, current_handler, + __get_cpu_var(fiq_stack) + THREAD_START_SP, + fiq_return_handler); + if (current_handler->resume) + current_handler->resume(current_handler); +} + diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 6c7b06854fce..51936bde1eb2 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -826,28 +826,6 @@ static int locomo_match(struct device *_dev, struct device_driver *_drv) return dev->devid == drv->devid; } -static int locomo_bus_suspend(struct device *dev, pm_message_t state) -{ - struct locomo_dev *ldev = LOCOMO_DEV(dev); - struct locomo_driver *drv = LOCOMO_DRV(dev->driver); - int ret = 0; - - if (drv && drv->suspend) - ret = drv->suspend(ldev, state); - return ret; -} - -static int locomo_bus_resume(struct device *dev) -{ - struct locomo_dev *ldev = LOCOMO_DEV(dev); - struct locomo_driver *drv = LOCOMO_DRV(dev->driver); - int ret = 0; - - if (drv && drv->resume) - ret = drv->resume(ldev); - return ret; -} - static int locomo_bus_probe(struct device *dev) { struct locomo_dev *ldev = LOCOMO_DEV(dev); @@ -875,8 +853,6 @@ struct bus_type locomo_bus_type = { .match = locomo_match, .probe = locomo_bus_probe, .remove = locomo_bus_remove, - .suspend = locomo_bus_suspend, - .resume = locomo_bus_resume, }; int locomo_driver_register(struct locomo_driver *driver) diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 8c2a2619971b..f1d7834990ec 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -244,7 +244,7 @@ CONFIG_USB_STORAGE_ONETOUCH=m CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m +CONFIG_USB_UAS=y CONFIG_USB_DWC3=y CONFIG_USB_DWC2=y CONFIG_USB_HSIC_USB3503=y diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig new file mode 100644 index 000000000000..69157c4c21fd --- /dev/null +++ b/arch/arm/configs/ranchu_defconfig @@ -0,0 +1,313 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_ARCH_MMAP_RND_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_VIRT=y +CONFIG_ARM_KERNMEM_PERMS=y +CONFIG_SMP=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_VFP=y +CONFIG_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_VIRTUALIZATION=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 2620ce790db0..371fca4e1ab7 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_DENALI_DT=y CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set CONFIG_SPI_CADENCE_QUADSPI=y CONFIG_OF_OVERLAY=y CONFIG_OF_CONFIGFS=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 5caaf971fb50..df433abfcb02 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -10,6 +10,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=8 CONFIG_AEABI=y CONFIG_HIGHMEM=y +CONFIG_CMA=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CPU_FREQ=y @@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y # CONFIG_WIRELESS is not set CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMA_CMA=y CONFIG_BLK_DEV_SD=y CONFIG_ATA=y CONFIG_AHCI_SUNXI=y diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index b8e69fe282b8..925d1364727a 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -121,4 +121,10 @@ config CRYPTO_CHACHA20_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 +config CRYPTO_SPECK_NEON + tristate "NEON accelerated Speck cipher algorithms" + depends on KERNEL_MODE_NEON + select CRYPTO_BLKCIPHER + select CRYPTO_SPECK + endif diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 30ef8e291271..3304e671918d 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o +obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o @@ -53,7 +54,9 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +speck-neon-y := speck-neon-core.o speck-neon-glue.o +ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ cmd_perl = $(PERL) $(<) > $(@) @@ -62,5 +65,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl $(call cmd,perl) +endif .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 1b0e0e86ee9c..96e62ec105d0 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { @@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S new file mode 100644 index 000000000000..3c1e203e53b9 --- /dev/null +++ b/arch/arm/crypto/speck-neon-core.S @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Author: Eric Biggers + */ + +#include + + .text + .fpu neon + + // arguments + ROUND_KEYS .req r0 // const {u64,u32} *round_keys + NROUNDS .req r1 // int nrounds + DST .req r2 // void *dst + SRC .req r3 // const void *src + NBYTES .req r4 // unsigned int nbytes + TWEAK .req r5 // void *tweak + + // registers which hold the data being encrypted/decrypted + X0 .req q0 + X0_L .req d0 + X0_H .req d1 + Y0 .req q1 + Y0_H .req d3 + X1 .req q2 + X1_L .req d4 + X1_H .req d5 + Y1 .req q3 + Y1_H .req d7 + X2 .req q4 + X2_L .req d8 + X2_H .req d9 + Y2 .req q5 + Y2_H .req d11 + X3 .req q6 + X3_L .req d12 + X3_H .req d13 + Y3 .req q7 + Y3_H .req d15 + + // the round key, duplicated in all lanes + ROUND_KEY .req q8 + ROUND_KEY_L .req d16 + ROUND_KEY_H .req d17 + + // index vector for vtbl-based 8-bit rotates + ROTATE_TABLE .req d18 + + // multiplication table for updating XTS tweaks + GF128MUL_TABLE .req d19 + GF64MUL_TABLE .req d19 + + // current XTS tweak value(s) + TWEAKV .req q10 + TWEAKV_L .req d20 + TWEAKV_H .req d21 + + TMP0 .req q12 + TMP0_L .req d24 + TMP0_H .req d25 + TMP1 .req q13 + TMP2 .req q14 + TMP3 .req q15 + + .align 4 +.Lror64_8_table: + .byte 1, 2, 3, 4, 5, 6, 7, 0 +.Lror32_8_table: + .byte 1, 2, 3, 0, 5, 6, 7, 4 +.Lrol64_8_table: + .byte 7, 0, 1, 2, 3, 4, 5, 6 +.Lrol32_8_table: + .byte 3, 0, 1, 2, 7, 4, 5, 6 +.Lgf128mul_table: + .byte 0, 0x87 + .fill 14 +.Lgf64mul_table: + .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b + .fill 12 + +/* + * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time + * + * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for + * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes + * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. + * + * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because + * the vtbl approach is faster on some processors and the same speed on others. + */ +.macro _speck_round_128bytes n + + // x = ror(x, 8) + vtbl.8 X0_L, {X0_L}, ROTATE_TABLE + vtbl.8 X0_H, {X0_H}, ROTATE_TABLE + vtbl.8 X1_L, {X1_L}, ROTATE_TABLE + vtbl.8 X1_H, {X1_H}, ROTATE_TABLE + vtbl.8 X2_L, {X2_L}, ROTATE_TABLE + vtbl.8 X2_H, {X2_H}, ROTATE_TABLE + vtbl.8 X3_L, {X3_L}, ROTATE_TABLE + vtbl.8 X3_H, {X3_H}, ROTATE_TABLE + + // x += y + vadd.u\n X0, Y0 + vadd.u\n X1, Y1 + vadd.u\n X2, Y2 + vadd.u\n X3, Y3 + + // x ^= k + veor X0, ROUND_KEY + veor X1, ROUND_KEY + veor X2, ROUND_KEY + veor X3, ROUND_KEY + + // y = rol(y, 3) + vshl.u\n TMP0, Y0, #3 + vshl.u\n TMP1, Y1, #3 + vshl.u\n TMP2, Y2, #3 + vshl.u\n TMP3, Y3, #3 + vsri.u\n TMP0, Y0, #(\n - 3) + vsri.u\n TMP1, Y1, #(\n - 3) + vsri.u\n TMP2, Y2, #(\n - 3) + vsri.u\n TMP3, Y3, #(\n - 3) + + // y ^= x + veor Y0, TMP0, X0 + veor Y1, TMP1, X1 + veor Y2, TMP2, X2 + veor Y3, TMP3, X3 +.endm + +/* + * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time + * + * This is the inverse of _speck_round_128bytes(). + */ +.macro _speck_unround_128bytes n + + // y ^= x + veor TMP0, Y0, X0 + veor TMP1, Y1, X1 + veor TMP2, Y2, X2 + veor TMP3, Y3, X3 + + // y = ror(y, 3) + vshr.u\n Y0, TMP0, #3 + vshr.u\n Y1, TMP1, #3 + vshr.u\n Y2, TMP2, #3 + vshr.u\n Y3, TMP3, #3 + vsli.u\n Y0, TMP0, #(\n - 3) + vsli.u\n Y1, TMP1, #(\n - 3) + vsli.u\n Y2, TMP2, #(\n - 3) + vsli.u\n Y3, TMP3, #(\n - 3) + + // x ^= k + veor X0, ROUND_KEY + veor X1, ROUND_KEY + veor X2, ROUND_KEY + veor X3, ROUND_KEY + + // x -= y + vsub.u\n X0, Y0 + vsub.u\n X1, Y1 + vsub.u\n X2, Y2 + vsub.u\n X3, Y3 + + // x = rol(x, 8); + vtbl.8 X0_L, {X0_L}, ROTATE_TABLE + vtbl.8 X0_H, {X0_H}, ROTATE_TABLE + vtbl.8 X1_L, {X1_L}, ROTATE_TABLE + vtbl.8 X1_H, {X1_H}, ROTATE_TABLE + vtbl.8 X2_L, {X2_L}, ROTATE_TABLE + vtbl.8 X2_H, {X2_H}, ROTATE_TABLE + vtbl.8 X3_L, {X3_L}, ROTATE_TABLE + vtbl.8 X3_H, {X3_H}, ROTATE_TABLE +.endm + +.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp + + // Load the next source block + vld1.8 {\dst_reg}, [SRC]! + + // Save the current tweak in the tweak buffer + vst1.8 {TWEAKV}, [\tweak_buf:128]! + + // XOR the next source block with the current tweak + veor \dst_reg, TWEAKV + + /* + * Calculate the next tweak by multiplying the current one by x, + * modulo p(x) = x^128 + x^7 + x^2 + x + 1. + */ + vshr.u64 \tmp, TWEAKV, #63 + vshl.u64 TWEAKV, #1 + veor TWEAKV_H, \tmp\()_L + vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H + veor TWEAKV_L, \tmp\()_H +.endm + +.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp + + // Load the next two source blocks + vld1.8 {\dst_reg}, [SRC]! + + // Save the current two tweaks in the tweak buffer + vst1.8 {TWEAKV}, [\tweak_buf:128]! + + // XOR the next two source blocks with the current two tweaks + veor \dst_reg, TWEAKV + + /* + * Calculate the next two tweaks by multiplying the current ones by x^2, + * modulo p(x) = x^64 + x^4 + x^3 + x + 1. + */ + vshr.u64 \tmp, TWEAKV, #62 + vshl.u64 TWEAKV, #2 + vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L + vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H + veor TWEAKV, \tmp +.endm + +/* + * _speck_xts_crypt() - Speck-XTS encryption/decryption + * + * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer + * using Speck-XTS, specifically the variant with a block size of '2n' and round + * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and + * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a + * nonzero multiple of 128. + */ +.macro _speck_xts_crypt n, decrypting + push {r4-r7} + mov r7, sp + + /* + * The first four parameters were passed in registers r0-r3. Load the + * additional parameters, which were passed on the stack. + */ + ldr NBYTES, [sp, #16] + ldr TWEAK, [sp, #20] + + /* + * If decrypting, modify the ROUND_KEYS parameter to point to the last + * round key rather than the first, since for decryption the round keys + * are used in reverse order. + */ +.if \decrypting +.if \n == 64 + add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3 + sub ROUND_KEYS, #8 +.else + add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2 + sub ROUND_KEYS, #4 +.endif +.endif + + // Load the index vector for vtbl-based 8-bit rotates +.if \decrypting + ldr r12, =.Lrol\n\()_8_table +.else + ldr r12, =.Lror\n\()_8_table +.endif + vld1.8 {ROTATE_TABLE}, [r12:64] + + // One-time XTS preparation + + /* + * Allocate stack space to store 128 bytes worth of tweaks. For + * performance, this space is aligned to a 16-byte boundary so that we + * can use the load/store instructions that declare 16-byte alignment. + */ + sub sp, #128 + bic sp, #0xf + +.if \n == 64 + // Load first tweak + vld1.8 {TWEAKV}, [TWEAK] + + // Load GF(2^128) multiplication table + ldr r12, =.Lgf128mul_table + vld1.8 {GF128MUL_TABLE}, [r12:64] +.else + // Load first tweak + vld1.8 {TWEAKV_L}, [TWEAK] + + // Load GF(2^64) multiplication table + ldr r12, =.Lgf64mul_table + vld1.8 {GF64MUL_TABLE}, [r12:64] + + // Calculate second tweak, packing it together with the first + vshr.u64 TMP0_L, TWEAKV_L, #63 + vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L + vshl.u64 TWEAKV_H, TWEAKV_L, #1 + veor TWEAKV_H, TMP0_L +.endif + +.Lnext_128bytes_\@: + + /* + * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak + * values, and save the tweaks on the stack for later. Then + * de-interleave the 'x' and 'y' elements of each block, i.e. make it so + * that the X[0-3] registers contain only the second halves of blocks, + * and the Y[0-3] registers contain only the first halves of blocks. + * (Speck uses the order (y, x) rather than the more intuitive (x, y).) + */ + mov r12, sp +.if \n == 64 + _xts128_precrypt_one X0, r12, TMP0 + _xts128_precrypt_one Y0, r12, TMP0 + _xts128_precrypt_one X1, r12, TMP0 + _xts128_precrypt_one Y1, r12, TMP0 + _xts128_precrypt_one X2, r12, TMP0 + _xts128_precrypt_one Y2, r12, TMP0 + _xts128_precrypt_one X3, r12, TMP0 + _xts128_precrypt_one Y3, r12, TMP0 + vswp X0_L, Y0_H + vswp X1_L, Y1_H + vswp X2_L, Y2_H + vswp X3_L, Y3_H +.else + _xts64_precrypt_two X0, r12, TMP0 + _xts64_precrypt_two Y0, r12, TMP0 + _xts64_precrypt_two X1, r12, TMP0 + _xts64_precrypt_two Y1, r12, TMP0 + _xts64_precrypt_two X2, r12, TMP0 + _xts64_precrypt_two Y2, r12, TMP0 + _xts64_precrypt_two X3, r12, TMP0 + _xts64_precrypt_two Y3, r12, TMP0 + vuzp.32 Y0, X0 + vuzp.32 Y1, X1 + vuzp.32 Y2, X2 + vuzp.32 Y3, X3 +.endif + + // Do the cipher rounds + + mov r12, ROUND_KEYS + mov r6, NROUNDS + +.Lnext_round_\@: +.if \decrypting +.if \n == 64 + vld1.64 ROUND_KEY_L, [r12] + sub r12, #8 + vmov ROUND_KEY_H, ROUND_KEY_L +.else + vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12] + sub r12, #4 +.endif + _speck_unround_128bytes \n +.else +.if \n == 64 + vld1.64 ROUND_KEY_L, [r12]! + vmov ROUND_KEY_H, ROUND_KEY_L +.else + vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]! +.endif + _speck_round_128bytes \n +.endif + subs r6, r6, #1 + bne .Lnext_round_\@ + + // Re-interleave the 'x' and 'y' elements of each block +.if \n == 64 + vswp X0_L, Y0_H + vswp X1_L, Y1_H + vswp X2_L, Y2_H + vswp X3_L, Y3_H +.else + vzip.32 Y0, X0 + vzip.32 Y1, X1 + vzip.32 Y2, X2 + vzip.32 Y3, X3 +.endif + + // XOR the encrypted/decrypted blocks with the tweaks we saved earlier + mov r12, sp + vld1.8 {TMP0, TMP1}, [r12:128]! + vld1.8 {TMP2, TMP3}, [r12:128]! + veor X0, TMP0 + veor Y0, TMP1 + veor X1, TMP2 + veor Y1, TMP3 + vld1.8 {TMP0, TMP1}, [r12:128]! + vld1.8 {TMP2, TMP3}, [r12:128]! + veor X2, TMP0 + veor Y2, TMP1 + veor X3, TMP2 + veor Y3, TMP3 + + // Store the ciphertext in the destination buffer + vst1.8 {X0, Y0}, [DST]! + vst1.8 {X1, Y1}, [DST]! + vst1.8 {X2, Y2}, [DST]! + vst1.8 {X3, Y3}, [DST]! + + // Continue if there are more 128-byte chunks remaining, else return + subs NBYTES, #128 + bne .Lnext_128bytes_\@ + + // Store the next tweak +.if \n == 64 + vst1.8 {TWEAKV}, [TWEAK] +.else + vst1.8 {TWEAKV_L}, [TWEAK] +.endif + + mov sp, r7 + pop {r4-r7} + bx lr +.endm + +ENTRY(speck128_xts_encrypt_neon) + _speck_xts_crypt n=64, decrypting=0 +ENDPROC(speck128_xts_encrypt_neon) + +ENTRY(speck128_xts_decrypt_neon) + _speck_xts_crypt n=64, decrypting=1 +ENDPROC(speck128_xts_decrypt_neon) + +ENTRY(speck64_xts_encrypt_neon) + _speck_xts_crypt n=32, decrypting=0 +ENDPROC(speck64_xts_encrypt_neon) + +ENTRY(speck64_xts_decrypt_neon) + _speck_xts_crypt n=32, decrypting=1 +ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c new file mode 100644 index 000000000000..f012c3ea998f --- /dev/null +++ b/arch/arm/crypto/speck-neon-glue.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Note: the NIST recommendation for XTS only specifies a 128-bit block size, + * but a 64-bit version (needed for Speck64) is fairly straightforward; the math + * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial + * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004: + * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes + * OCB and PMAC"), represented as 0x1B. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The assembly functions only handle multiples of 128 bytes */ +#define SPECK_NEON_CHUNK_SIZE 128 + +/* Speck128 */ + +struct speck128_xts_tfm_ctx { + struct speck128_tfm_ctx main_key; + struct speck128_tfm_ctx tweak_key; +}; + +asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck128_xts_crypt(struct skcipher_request *req, + speck128_crypt_one_t crypt_one, + speck128_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + le128 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + le128_xor((le128 *)dst, (const le128 *)src, &tweak); + (*crypt_one)(&ctx->main_key, dst, dst); + le128_xor((le128 *)dst, (const le128 *)dst, &tweak); + gf128mul_x_ble(&tweak, &tweak); + + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck128_xts_encrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_encrypt, + speck128_xts_encrypt_neon); +} + +static int speck128_xts_decrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_decrypt, + speck128_xts_decrypt_neon); +} + +static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck128_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +/* Speck64 */ + +struct speck64_xts_tfm_ctx { + struct speck64_tfm_ctx main_key; + struct speck64_tfm_ctx tweak_key; +}; + +asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, + speck64_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + __le64 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + *(__le64 *)dst = *(__le64 *)src ^ tweak; + (*crypt_one)(&ctx->main_key, dst, dst); + *(__le64 *)dst ^= tweak; + tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ + ((tweak & cpu_to_le64(1ULL << 63)) ? + 0x1B : 0)); + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck64_xts_encrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_encrypt, + speck64_xts_encrypt_neon); +} + +static int speck64_xts_decrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_decrypt, + speck64_xts_decrypt_neon); +} + +static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck64_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +static struct skcipher_alg speck_algs[] = { + { + .base.cra_name = "xts(speck128)", + .base.cra_driver_name = "xts-speck128-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK128_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK128_128_KEY_SIZE, + .max_keysize = 2 * SPECK128_256_KEY_SIZE, + .ivsize = SPECK128_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck128_xts_setkey, + .encrypt = speck128_xts_encrypt, + .decrypt = speck128_xts_decrypt, + }, { + .base.cra_name = "xts(speck64)", + .base.cra_driver_name = "xts-speck64-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK64_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK64_96_KEY_SIZE, + .max_keysize = 2 * SPECK64_128_KEY_SIZE, + .ivsize = SPECK64_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck64_xts_setkey, + .encrypt = speck64_xts_encrypt, + .decrypt = speck64_xts_decrypt, + } +}; + +static int __init speck_neon_module_init(void) +{ + if (!(elf_hwcap & HWCAP_NEON)) + return -ENODEV; + return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_neon_module_exit(void) +{ + crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_neon_module_init); +module_exit(speck_neon_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("xts(speck128)"); +MODULE_ALIAS_CRYPTO("xts-speck128-neon"); +MODULE_ALIAS_CRYPTO("xts(speck64)"); +MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index ad301f107dd2..9342904cccca 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -518,4 +518,32 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro bug, msg, line +#ifdef CONFIG_THUMB2_KERNEL +1: .inst 0xde02 +#else +1: .inst 0xe7f001f2 +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE + .pushsection .rodata.str, "aMS", %progbits, 1 +2: .asciz "\msg" + .popsection + .pushsection __bug_table, "aw" + .align 2 + .word 1b, 2b + .hword \line + .popsection +#endif + .endm + +#ifdef CONFIG_KPROBES +#define _ASM_NOKPROBE(entry) \ + .pushsection "_kprobe_blacklist", "aw" ; \ + .balign 4 ; \ + .long entry; \ + .popsection +#else +#define _ASM_NOKPROBE(entry) +#endif + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 0722ec6be692..6821f1249300 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -7,7 +7,6 @@ #include #include #include -#include #include #define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 8c5ca92a87a9..51f0d2417fa9 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -113,8 +113,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the base location for PIE (ET_DYN with INTERP) loads. */ -#define ELF_ET_DYN_BASE 0x400000UL +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h new file mode 100644 index 000000000000..a9e244f9f197 --- /dev/null +++ b/arch/arm/include/asm/fiq_glue.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_FIQ_GLUE_H +#define __ASM_FIQ_GLUE_H + +struct fiq_glue_handler { + void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp); + void (*resume)(struct fiq_glue_handler *h); +}; +typedef void (*fiq_return_handler_t)(void); + +int fiq_glue_register_handler(struct fiq_glue_handler *handler); +int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return); +int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return); + +#ifdef CONFIG_FIQ_GLUE +void fiq_glue_resume(void); +#else +static inline void fiq_glue_resume(void) {} +#endif + +#endif diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index 74e51d6bd93f..f8712e3c29cf 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -189,8 +189,6 @@ struct locomo_driver { unsigned int devid; int (*probe)(struct locomo_dev *); int (*remove)(struct locomo_dev *); - int (*suspend)(struct locomo_dev *, pm_message_t); - int (*resume)(struct locomo_dev *); }; #define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv) diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 3b73fdcf3627..8de1100d1067 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -77,7 +77,7 @@ extern int kgdb_fault_expected; #define KGDB_MAX_NO_CPUS 1 #define BUFMAX 400 -#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) +#define NUMREGBYTES (GDB_MAX_REGS << 2) #define NUMCRITREGBYTES (32 << 2) #define _R0 0 diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index c8781450905b..3ab8b3781bfe 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -161,8 +161,7 @@ #else #define VTTBR_X (5 - KVM_T0SZ) #endif -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT _AC(48, ULL) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4a879f6ff13b..65572e14306c 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -75,6 +75,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; int max_vcpus; + + /* Mandated version of PSCI */ + u32 psci_version; }; #define KVM_NR_MEM_OBJS 40 @@ -293,4 +296,22 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +static inline bool kvm_arm_harden_branch_predictor(void) +{ + /* No way to detect it yet, pretend it is not there. */ + return false; +} + +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ + /* No way to detect it yet, pretend it is not there. */ + return KVM_SSBD_UNKNOWN; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index fa6f2174276b..8a098e65f5f8 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -28,6 +28,13 @@ */ #define kern_hyp_va(kva) (kva) +/* Contrary to arm64, there is no need to generate a PC-relative address */ +#define hyp_symbol_addr(s) \ + ({ \ + typeof(s) *addr = &(s); \ + addr; \ + }) + /* * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. */ @@ -221,6 +228,37 @@ static inline unsigned int kvm_get_vmid_bits(void) return 8; } +/* + * We are not in the kvm->srcu critical section most of the time, so we take + * the SRCU read lock here. Since we copy the data from the user page, we + * can immediately drop the lock again. + */ +static inline int kvm_read_guest_lock(struct kvm *kvm, + gpa_t gpa, void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_read_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + +static inline void *kvm_get_hyp_vector(void) +{ + return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ + return 0; +} + +static inline int hyp_map_aux_data(void) +{ + return 0; +} + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 6bda945d31fa..000000000000 --- a/arch/arm/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_PSCI_H__ -#define __ARM_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b2902a5cd780..2d7344f0e208 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) static inline void clean_pte_table(pte_t *pte) { diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index e9c9a117bd25..c7cdbb43ae7c 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -126,8 +126,7 @@ extern unsigned long profile_pc(struct pt_regs *regs); /* * kprobe-based event tracer support */ -#include -#include +#include #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) extern int regs_query_register_offset(const char *name); diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index f59ab9bcbaf9..201dc2011c16 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -25,6 +25,20 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); +#include + +/* Replace task scheduler's default frequency-invariant accounting */ +#define arch_scale_freq_capacity topology_get_freq_scale + +/* Replace task scheduler's default max-frequency-invariant accounting */ +#define arch_scale_max_freq_capacity topology_get_max_freq_scale + +/* Replace task scheduler's default cpu-invariant accounting */ +#define arch_scale_cpu_capacity topology_get_cpu_scale + +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + #else static inline void init_cpu_topology(void) { } diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h index 9c99e817535e..5b85889f82ee 100644 --- a/arch/arm/include/asm/vdso.h +++ b/arch/arm/include/asm/vdso.h @@ -12,8 +12,6 @@ struct mm_struct; void arm_install_vdso(struct mm_struct *mm, unsigned long addr); -extern char vdso_start, vdso_end; - extern unsigned int vdso_total_pages; #else /* CONFIG_VDSO */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 1f57bbe82b6f..df24fc8da1bc 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -180,6 +180,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index d523cd8439a3..0f07579af472 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -300,6 +300,8 @@ mov r2, sp ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr ldr lr, [r2, #\offset + S_PC]! @ get pc + tst r1, #PSR_I_BIT | 0x0f + bne 1f msr spsr_cxsf, r1 @ save in spsr_svc #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -314,6 +316,7 @@ @ after ldm {}^ add sp, sp, #\offset + PT_REGS_SIZE movs pc, lr @ return & move spsr_svc into cpsr +1: bug "Returning to usermode but unexpected PSR bits set?", \@ #elif defined(CONFIG_CPU_V7M) @ V7M restore. @ Note that we don't need to do clrex here as clearing the local @@ -329,6 +332,8 @@ ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP + tst r1, #PSR_I_BIT | 0x0f + bne 1f msr spsr_cxsf, r1 @ save in spsr_svc @ We must avoid clrex due to Cortex-A15 erratum #830321 @@ -341,6 +346,7 @@ .endif add sp, sp, #PT_REGS_SIZE - S_SP movs pc, lr @ return & move spsr_svc into cpsr +1: bug "Returning to usermode but unexpected PSR bits set?", \@ #endif /* !CONFIG_THUMB2_KERNEL */ .endm diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index caa0dbe3dc61..923a725ab9b5 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -141,6 +141,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) { + if (user_mode(regs)) + return -1; kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; @@ -148,6 +150,8 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) { + if (user_mode(regs)) + return -1; compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 6b38d7a634c1..c15318431986 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused) cpu_relax(); } +void crash_smp_send_stop(void) +{ + static int cpus_stopped; + unsigned long msecs; + + if (cpus_stopped) + return; + + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("Non-crashing CPUs did not react to IPI\n"); + + cpus_stopped = 1; +} + static void machine_kexec_mask_interrupts(void) { unsigned int i; @@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void) void machine_crash_shutdown(struct pt_regs *regs) { - unsigned long msecs; - local_irq_disable(); - - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - smp_call_function(machine_crash_nonpanic_core, NULL, false); - msecs = 1000; /* Wait at most a second for the other cpus to stop */ - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { - mdelay(1); - msecs--; - } - if (atomic_read(&waiting_for_crash_ipi) > 0) - pr_warn("Non-crashing CPUs did not react to IPI\n"); + crash_smp_send_stop(); crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d96714e1858c..4b675a80f523 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -94,6 +94,77 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } +/* + * dump a block of kernel memory from around the given address + */ +static void show_data(unsigned long addr, int nbytes, const char *name) +{ + int i, j; + int nlines; + u32 *p; + + /* + * don't attempt to dump non-kernel addresses or + * values that are probably just small negative numbers + */ + if (addr < PAGE_OFFSET || addr > -256UL) + return; + + printk("\n%s: %#lx:\n", name, addr); + + /* + * round address down to a 32 bit boundary + * and always dump a multiple of 32 bytes + */ + p = (u32 *)(addr & ~(sizeof(u32) - 1)); + nbytes += (addr & (sizeof(u32) - 1)); + nlines = (nbytes + 31) / 32; + + + for (i = 0; i < nlines; i++) { + /* + * just display low 16 bits of address to keep + * each line of the dump < 80 characters + */ + printk("%04lx ", (unsigned long)p & 0xffff); + for (j = 0; j < 8; j++) { + u32 data; + if (probe_kernel_address(p, data)) { + printk(" ********"); + } else { + printk(" %08x", data); + } + ++p; + } + printk("\n"); + } +} + +static void show_extra_register_data(struct pt_regs *regs, int nbytes) +{ + mm_segment_t fs; + + fs = get_fs(); + set_fs(KERNEL_DS); + show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC"); + show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR"); + show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP"); + show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP"); + show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP"); + show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0"); + show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1"); + show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2"); + show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3"); + show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4"); + show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5"); + show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6"); + show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7"); + show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8"); + show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9"); + show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10"); + set_fs(fs); +} + void __show_regs(struct pt_regs *regs) { unsigned long flags; @@ -185,6 +256,8 @@ void __show_regs(struct pt_regs *regs) printk("Control: %08x%s\n", ctrl, buf); } #endif + + show_extra_register_data(regs, 128); } void show_regs(struct pt_regs * regs) diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index 3b2aa9a9fe26..c74249136f18 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -6,6 +6,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include #include #include #include @@ -125,6 +126,31 @@ void machine_power_off(void) pm_power_off(); } +#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART +void arm_machine_flush_console(void) +{ + printk("\n"); + pr_emerg("Restarting %s\n", linux_banner); + if (console_trylock()) { + console_unlock(); + return; + } + + mdelay(50); + + local_irq_disable(); + if (!console_trylock()) + pr_emerg("arm_restart: Console was locked! Busting\n"); + else + pr_emerg("arm_restart: Console was locked!\n"); + console_unlock(); +} +#else +void arm_machine_flush_console(void) +{ +} +#endif + /* * Restart requires that the secondary CPUs stop performing any activity * while the primary CPU resets the system. Systems with a single CPU can @@ -141,6 +167,10 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* Flush the console to make sure all the relevant messages make it + * out to the console drivers */ + arm_machine_flush_console(); + if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); else diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 24ac3cab411d..28ca1646a38a 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,18 @@ #include #include +static inline +const struct sched_group_energy * const cpu_core_energy(int cpu) +{ + return sge_array[cpu][SD_LEVEL0]; +} + +static inline +const struct sched_group_energy * const cpu_cluster_energy(int cpu) +{ + return sge_array[cpu][SD_LEVEL1]; +} + /* * cpu capacity scale management */ @@ -278,23 +291,37 @@ void store_cpu_topology(unsigned int cpuid) update_cpu_capacity(cpuid); + topology_detect_flags(); + pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, cpu_topology[cpuid].socket_id, mpidr); } +#ifdef CONFIG_SCHED_MC +static int core_flags(void) +{ + return cpu_core_flags() | topology_core_flags(); +} + static inline int cpu_corepower_flags(void) { - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; + return topology_core_flags() + | SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; +} +#endif + +static int cpu_flags(void) +{ + return topology_cpu_flags(); } static struct sched_domain_topology_level arm_topology[] = { #ifdef CONFIG_SCHED_MC - { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, + { cpu_coregroup_mask, core_flags, cpu_core_energy, SD_INIT_NAME(MC) }, #endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, + { cpu_cpu_mask, cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) }, { NULL, }, }; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0fcd82f01388..f702f2b37052 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook) raw_spin_unlock_irqrestore(&undef_lock, flags); } -static int call_undef_hook(struct pt_regs *regs, unsigned int instr) +static nokprobe_inline +int call_undef_hook(struct pt_regs *regs, unsigned int instr) { struct undef_hook *hook; unsigned long flags; @@ -490,6 +492,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); } +NOKPROBE_SYMBOL(do_undefinstr) /* * Handle FIQ similarly to NMI on x86 systems. @@ -790,7 +793,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index a4d6dc0f2427..f4dd7f9663c1 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -39,6 +39,8 @@ static struct page **vdso_text_pagelist; +extern char vdso_start[], vdso_end[]; + /* Total number of pages needed for the data and text portions of the VDSO. */ unsigned int vdso_total_pages __ro_after_init; @@ -197,13 +199,13 @@ static int __init vdso_init(void) unsigned int text_pages; int i; - if (memcmp(&vdso_start, "\177ELF", 4)) { + if (memcmp(vdso_start, "\177ELF", 4)) { pr_err("VDSO is not a valid ELF object!\n"); return -ENOEXEC; } - text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; - pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start); + text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; + pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start); /* Allocate the VDSO text pagelist */ vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), @@ -218,7 +220,7 @@ static int __init vdso_init(void) for (i = 0; i < text_pages; i++) { struct page *page; - page = virt_to_page(&vdso_start + i * PAGE_SIZE); + page = virt_to_page(vdso_start + i * PAGE_SIZE); vdso_text_pagelist[i] = page; } @@ -229,7 +231,7 @@ static int __init vdso_init(void) cntvct_ok = cntvct_functional(); - patch_vdso(&vdso_start); + patch_vdso(vdso_start); return 0; } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 1e0784ebbfd6..a18f33edc471 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -176,6 +177,7 @@ static unsigned long num_core_regs(void) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; } @@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += kvm_arm_get_fw_num_regs(vcpu); + ret = copy_timer_indices(vcpu, uindices); if (ret) return ret; @@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_get_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_set_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index cf8bf6bf87c4..910bd8dabb3c 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include "trace.h" @@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ + vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 5638ce0c9524..63d6b404d88e 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING KVM=../../../../virt/kvm +CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve) + obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o @@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o +CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE) + obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o +CFLAGS_switch.o += $(CFLAGS_ARMV7VE) obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index 111bda8cdebd..be4b8b0a40ad 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -20,6 +20,10 @@ #include +/* + * gcc before 4.9 doesn't understand -march=armv7ve, so we have to + * trick the assembler. + */ __asm__(".arch_extension virt"); void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 1712f132b80d..b83fdc06286a 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -85,7 +85,11 @@ .pushsection .text.fixup,"ax" .align 4 9001: mov r4, #-EFAULT +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + ldr r5, [sp, #9*4] @ *err_ptr +#else ldr r5, [sp, #8*4] @ *err_ptr +#endif str r4, [r5] ldmia sp, {r1, r2} @ retrieve dst, len add r2, r2, r1 diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index df73914e81c8..746e7801dcdf 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -38,6 +38,7 @@ ENTRY(__get_user_1) mov r0, #0 ret lr ENDPROC(__get_user_1) +_ASM_NOKPROBE(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad @@ -58,6 +59,7 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_2) +_ASM_NOKPROBE(__get_user_2) ENTRY(__get_user_4) check_uaccess r0, 4, r1, r2, __get_user_bad @@ -65,6 +67,7 @@ ENTRY(__get_user_4) mov r0, #0 ret lr ENDPROC(__get_user_4) +_ASM_NOKPROBE(__get_user_4) ENTRY(__get_user_8) check_uaccess r0, 8, r1, r2, __get_user_bad8 @@ -78,6 +81,7 @@ ENTRY(__get_user_8) mov r0, #0 ret lr ENDPROC(__get_user_8) +_ASM_NOKPROBE(__get_user_8) #ifdef __ARMEB__ ENTRY(__get_user_32t_8) @@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8) mov r0, #0 ret lr ENDPROC(__get_user_32t_8) +_ASM_NOKPROBE(__get_user_32t_8) ENTRY(__get_user_64t_1) check_uaccess r0, 1, r1, r2, __get_user_bad8 @@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1) mov r0, #0 ret lr ENDPROC(__get_user_64t_1) +_ASM_NOKPROBE(__get_user_64t_1) ENTRY(__get_user_64t_2) check_uaccess r0, 2, r1, r2, __get_user_bad8 @@ -114,6 +120,7 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_64t_2) +_ASM_NOKPROBE(__get_user_64t_2) ENTRY(__get_user_64t_4) check_uaccess r0, 4, r1, r2, __get_user_bad8 @@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4) mov r0, #0 ret lr ENDPROC(__get_user_64t_4) +_ASM_NOKPROBE(__get_user_64t_4) #endif __get_user_bad8: @@ -131,6 +139,8 @@ __get_user_bad: ret lr ENDPROC(__get_user_bad) ENDPROC(__get_user_bad8) +_ASM_NOKPROBE(__get_user_bad) +_ASM_NOKPROBE(__get_user_bad8) .pushsection __ex_table, "a" .long 1b, __get_user_bad diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index f673cd7a6766..fb7c44cdadcb 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -205,12 +205,17 @@ static const short da830_evm_mmc_sd_pins[] = { -1 }; +#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1) +#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2) + static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* gpio chip 1 contains gpio range 32-63 */ - GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", + GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", + GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index cbde0030c092..25f12118c364 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -763,12 +763,17 @@ static const short da850_evm_mcasp_pins[] __initconst = { -1 }; +#define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0) +#define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1) + static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* gpio chip 2 contains gpio range 64-95 */ - GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", + GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", + GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 62e7bc3018f0..8e64685b1941 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -168,11 +169,16 @@ static struct resource dm355evm_dm9000_rsrc[] = { }, }; +static struct dm9000_plat_data dm335evm_dm9000_platdata; + static struct platform_device dm355evm_dm9000 = { .name = "dm9000", .id = -1, .resource = dm355evm_dm9000_rsrc, .num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc), + .dev = { + .platform_data = &dm335evm_dm9000_platdata, + }, }; static struct tvp514x_platform_data tvp5146_pdata = { diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index cb0a41e83582..4c458f714101 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -534,11 +534,12 @@ static struct vpif_display_config dm646x_vpif_display_config = { .set_clock = set_vpif_clock, .subdevinfo = dm646x_vpif_subdev, .subdev_count = ARRAY_SIZE(dm646x_vpif_subdev), + .i2c_adapter_id = 1, .chan_config[0] = { .outputs = dm6467_ch0_outputs, .output_count = ARRAY_SIZE(dm6467_ch0_outputs), }, - .card_name = "DM646x EVM", + .card_name = "DM646x EVM Video Display", }; /** @@ -676,6 +677,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = { .setup_input_channel_mode = setup_vpif_input_channel_mode, .subdev_info = vpif_capture_sdev_info, .subdev_count = ARRAY_SIZE(vpif_capture_sdev_info), + .i2c_adapter_id = 1, .chan_config[0] = { .inputs = dm6467_ch0_inputs, .input_count = ARRAY_SIZE(dm6467_ch0_inputs), @@ -696,6 +698,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = { .fid_pol = 0, }, }, + .card_name = "DM646x EVM Video Capture", }; static void __init evm_init_video(void) diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index a3e78074be70..10a027253250 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -123,12 +123,16 @@ static const short hawk_mmcsd0_pins[] = { -1 }; +#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) +#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) + static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { - /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/ - GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", + GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", + GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index da21353cac45..d869369ca2bc 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -495,7 +495,8 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { [IRQ_DM646X_MCASP0TXINT] = 7, [IRQ_DM646X_MCASP0RXINT] = 7, [IRQ_DM646X_RESERVED_3] = 7, - [IRQ_DM646X_MCASP1TXINT] = 7, /* clockevent */ + [IRQ_DM646X_MCASP1TXINT] = 7, + [IRQ_TINT0_TINT12] = 7, /* clockevent */ [IRQ_TINT0_TINT34] = 7, /* clocksource */ [IRQ_TINT1_TINT12] = 7, /* DSP timer */ [IRQ_TINT1_TINT34] = 7, /* system tick */ diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c index 1a7e5b5d08d8..3dbbf1fffead 100644 --- a/arch/arm/mach-exynos/pm.c +++ b/arch/arm/mach-exynos/pm.c @@ -276,11 +276,7 @@ static int exynos_cpu0_enter_aftr(void) goto fail; call_firmware_op(cpu_boot, 1); - - if (soc_is_exynos3250()) - dsb_sev(); - else - arch_send_wakeup_ipi_mask(cpumask_of(1)); + dsb_sev(); } } fail: diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index fe57e2692629..abca83d22ff3 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -29,6 +29,7 @@ static struct dev_pm_domain keystone_pm_domain = { static struct pm_clk_notifier_block platform_domain_notifier = { .pm_domain = &keystone_pm_domain, + .con_ids = { NULL }, }; static const struct of_device_id of_keystone_table[] = { diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 9b49867154bf..63fa79f9f121 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig @@ -42,7 +42,7 @@ config MACH_ARMADA_375 depends on ARCH_MULTI_V7 select ARMADA_370_XP_IRQ select ARM_ERRATA_720789 - select ARM_ERRATA_753970 + select PL310_ERRATA_753970 select ARM_GIC select ARMADA_375_CLK select HAVE_ARM_SCU @@ -58,7 +58,7 @@ config MACH_ARMADA_38X bool "Marvell Armada 380/385 boards" depends on ARCH_MULTI_V7 select ARM_ERRATA_720789 - select ARM_ERRATA_753970 + select PL310_ERRATA_753970 select ARM_GIC select ARM_GLOBAL_TIMER select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index 793a24a53c52..d7ca9e2b40d2 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -58,22 +58,24 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) irq_num = gpio_to_irq(gpio); fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; - while (irq_counter[gpio] < fiq_count) { - if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { - struct irq_data *d = irq_get_irq_data(irq_num); - - /* - * It looks like handle_edge_irq() that - * OMAP GPIO edge interrupts default to, - * expects interrupt already unmasked. - */ - if (irq_chip && irq_chip->irq_unmask) + if (irq_counter[gpio] < fiq_count && + gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { + struct irq_data *d = irq_get_irq_data(irq_num); + + /* + * handle_simple_irq() that OMAP GPIO edge + * interrupts default to since commit 80ac93c27441 + * requires interrupt already acked and unmasked. + */ + if (irq_chip) { + if (irq_chip->irq_ack) + irq_chip->irq_ack(d); + if (irq_chip->irq_unmask) irq_chip->irq_unmask(d); } - generic_handle_irq(irq_num); - - irq_counter[gpio]++; } + for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) + generic_handle_irq(irq_num); } return IRQ_HANDLED; } diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c index 43e3e188f521..fa512413a471 100644 --- a/arch/arm/mach-omap1/clock.c +++ b/arch/arm/mach-omap1/clock.c @@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c) return -ENOMEM; c->dent = d; - d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); + d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); if (!d) { err = -ENOMEM; goto err_out; } - d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); + d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); if (!d) { err = -ENOMEM; goto err_out; } - d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); + d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); if (!d) { err = -ENOMEM; goto err_out; diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index 5ac122e88f67..fa7f308c9027 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void) return omap_secure_memblock_base; } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) +u32 omap3_save_secure_ram(void __iomem *addr, int size) +{ + u32 ret; + u32 param[5]; + + if (size != OMAP3_SAVE_SECURE_RAM_SZ) + return OMAP3_SAVE_SECURE_RAM_SZ; + + param[0] = 4; /* Number of arguments */ + param[1] = __pa(addr); /* Physical address for saving */ + param[2] = 0; + param[3] = 1; + param[4] = 1; + + ret = save_secure_ram_context(__pa(param)); + + return ret; +} +#endif + /** * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls * @idx: The PPA API index diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index bae263fba640..c509cde71f93 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h @@ -31,6 +31,8 @@ /* Maximum Secure memory storage size */ #define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) +#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F + /* Secure low power HAL API index */ #define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a #define OMAP4_HAL_SAVEHW_INDEX 0x1b @@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs); extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs); extern phys_addr_t omap_secure_ram_mempool_base(void); extern int omap_secure_ram_reserve_memblock(void); +extern u32 save_secure_ram_context(u32 args_pa); +extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size); extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4); diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 4bb6751864a5..fc5fb776a710 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -299,8 +299,6 @@ static void irq_save_context(void) if (soc_is_dra7xx()) return; - if (!sar_base) - sar_base = omap4_get_sar_ram_base(); if (wakeupgen_ops && wakeupgen_ops->save_context) wakeupgen_ops->save_context(); } @@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node, irq_hotplug_init(); irq_pm_init(); + sar_base = omap4_get_sar_ram_base(); + return 0; } IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index c3276436b0ae..c12e7b572a41 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1656,6 +1656,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = { .main_clk = "mmchs3_fck", .prcm = { .omap2 = { + .module_offs = CORE_MOD, .prcm_reg_id = 1, .module_bit = OMAP3430_EN_MMC3_SHIFT, .idlest_reg_id = 1, diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 366158a54fcd..6f68576e5695 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -186,7 +186,7 @@ static void omap_pm_end(void) cpu_idle_poll_ctrl(false); } -static void omap_pm_finish(void) +static void omap_pm_wake(void) { if (soc_is_omap34xx()) omap_prcm_irq_complete(); @@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = { .begin = omap_pm_begin, .end = omap_pm_end, .enter = omap_pm_enter, - .finish = omap_pm_finish, + .wake = omap_pm_wake, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index b668719b9b25..8e30772cfe32 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz; /* ... and its pointer from SRAM after copy */ extern void (*omap3_do_wfi_sram)(void); -/* save_secure_ram_context function pointer and size, for copy to SRAM */ -extern int save_secure_ram_context(u32 *addr); -extern unsigned int save_secure_ram_context_sz; - extern void omap3_save_scratchpad_contents(void); #define PM_RTA_ERRATUM_i608 (1 << 0) diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 841ba19d64a6..36c55547137c 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -48,6 +48,7 @@ #include "prm3xxx.h" #include "pm.h" #include "sdrc.h" +#include "omap-secure.h" #include "sram.h" #include "control.h" #include "vc.h" @@ -66,7 +67,6 @@ struct power_state { static LIST_HEAD(pwrst_list); -static int (*_omap_save_secure_sram)(u32 *addr); void (*omap3_do_wfi_sram)(void); static struct powerdomain *mpu_pwrdm, *neon_pwrdm; @@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void) * will hang the system. */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); - ret = _omap_save_secure_sram((u32 *)(unsigned long) - __pa(omap3_secure_ram_storage)); + ret = omap3_save_secure_ram(omap3_secure_ram_storage, + OMAP3_SAVE_SECURE_RAM_SZ); pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); /* Following is for error tracking, it should not happen */ if (ret) { @@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) * * The minimum set of functions is pushed to SRAM for execution: * - omap3_do_wfi for erratum i581 WA, - * - save_secure_ram_context for security extensions. */ void omap_push_sram_idle(void) { omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); - - if (omap_type() != OMAP2_DEVICE_TYPE_GP) - _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, - save_secure_ram_context_sz); } static void __init pm_errata_configure(void) @@ -553,7 +548,7 @@ int __init omap3_pm_init(void) clkdm_add_wkdep(neon_clkdm, mpu_clkdm); if (omap_type() != OMAP2_DEVICE_TYPE_GP) { omap3_secure_ram_storage = - kmalloc(0x803F, GFP_KERNEL); + kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL); if (!omap3_secure_ram_storage) pr_err("Memory allocation failed when allocating for secure sram context\n"); diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 76eb6ec5f157..1e6a967cd2d5 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -188,7 +188,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) ((prev & OMAP_POWERSTATE_MASK) << 0)); trace_power_domain_target_rcuidle(pwrdm->name, trace_state, - smp_processor_id()); + raw_smp_processor_id()); } break; default: @@ -518,7 +518,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { /* Trace the pwrdm desired target state */ trace_power_domain_target_rcuidle(pwrdm->name, pwrst, - smp_processor_id()); + raw_smp_processor_id()); /* Program the pwrdm desired target state */ ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); } diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c index d2c5bcabdbeb..ebaf80d72a10 100644 --- a/arch/arm/mach-omap2/prm33xx.c +++ b/arch/arm/mach-omap2/prm33xx.c @@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm) return v; } -static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) -{ - u32 v; - - v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs); - v &= AM33XX_LASTPOWERSTATEENTERED_MASK; - v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT; - - return v; -} - static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK, @@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = { .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst, - .pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst, .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst, .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst, diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index fa5fd24f524c..22daf4efed68 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore) ENDPROC(enable_omap3630_toggle_l2_on_restore) /* - * Function to call rom code to save secure ram context. This gets - * relocated to SRAM, so it can be all in .data section. Otherwise - * we need to initialize api_params separately. + * Function to call rom code to save secure ram context. + * + * r0 = physical address of the parameters */ - .data - .align 3 ENTRY(save_secure_ram_context) stmfd sp!, {r4 - r11, lr} @ save registers on stack - adr r3, api_params @ r3 points to parameters - str r0, [r3,#0x4] @ r0 has sdram address - ldr r12, high_mask - and r3, r3, r12 - ldr r12, sram_phy_addr_mask - orr r3, r3, r12 + mov r3, r0 @ physical address of parameters mov r0, #25 @ set service ID for PPA mov r12, r0 @ copy secure service ID in r12 mov r1, #0 @ set task id for ROM code in r1 @@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context) nop nop ldmfd sp!, {r4 - r11, pc} - .align -sram_phy_addr_mask: - .word SRAM_BASE_P -high_mask: - .word 0xffff -api_params: - .word 0x4, 0x0, 0x0, 0x1, 0x1 ENDPROC(save_secure_ram_context) -ENTRY(save_secure_ram_context_sz) - .word . - save_secure_ram_context - - .text /* * ====================== diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index ece09c9461f7..d61fbd7a2840 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = { .tick_resume = omap2_gp_timer_shutdown, }; -static struct property device_disabled = { - .name = "status", - .length = sizeof("disabled"), - .value = "disabled", -}; - static const struct of_device_id omap_timer_match[] __initconst = { { .compatible = "ti,omap2420-timer", }, { .compatible = "ti,omap3430-timer", }, @@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id * of_get_property(np, "ti,timer-secure", NULL))) continue; - if (!of_device_is_compatible(np, "ti,omap-counter32k")) - of_add_property(np, &device_disabled); + if (!of_device_is_compatible(np, "ti,omap-counter32k")) { + struct property *prop; + + prop = kzalloc(sizeof(*prop), GFP_KERNEL); + if (!prop) + return NULL; + prop->name = "status"; + prop->value = "disabled"; + prop->length = strlen(prop->value); + of_add_property(np, prop); + } return np; } diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig index 2a7bb6ccdcb7..a810f4dd34b1 100644 --- a/arch/arm/mach-orion5x/Kconfig +++ b/arch/arm/mach-orion5x/Kconfig @@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO config MACH_DNS323 bool "D-Link DNS-323" - select GENERIC_NET_UTILS select I2C_BOARDINFO if I2C help Say 'Y' here if you want your kernel to support the @@ -66,7 +65,6 @@ config MACH_DNS323 config MACH_TS209 bool "QNAP TS-109/TS-209" - select GENERIC_NET_UTILS help Say 'Y' here if you want your kernel to support the QNAP TS-109/TS-209 platform. @@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL config MACH_TS409 bool "QNAP TS-409" - select GENERIC_NET_UTILS help Say 'Y' here if you want your kernel to support the QNAP TS-409 platform. diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index cd483bfb5ca8..d13344b2ddcd 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; +/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these + * functions be kept somewhere? + */ +static int __init dns323_parse_hex_nibble(char n) +{ + if (n >= '0' && n <= '9') + return n - '0'; + + if (n >= 'A' && n <= 'F') + return n - 'A' + 10; + + if (n >= 'a' && n <= 'f') + return n - 'a' + 10; + + return -1; +} + +static int __init dns323_parse_hex_byte(const char *b) +{ + int hi; + int lo; + + hi = dns323_parse_hex_nibble(b[0]); + lo = dns323_parse_hex_nibble(b[1]); + + if (hi < 0 || lo < 0) + return -1; + + return (hi << 4) | lo; +} + static int __init dns323_read_mac_addr(void) { u_int8_t addr[6]; - void __iomem *mac_page; + int i; + char *mac_page; /* MAC address is stored as a regular ol' string in /dev/mtdblock4 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). @@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void) if (!mac_page) return -ENOMEM; - if (!mac_pton((__force const char *) mac_page, addr)) - goto error_fail; + /* Sanity check the string we're looking at */ + for (i = 0; i < 5; i++) { + if (*(mac_page + (i * 3) + 2) != ':') { + goto error_fail; + } + } + + for (i = 0; i < 6; i++) { + int byte; + + byte = dns323_parse_hex_byte(mac_page + (i * 3)); + if (byte < 0) { + goto error_fail; + } + + addr[i] = byte; + } iounmap(mac_page); printk("DNS-323: Found ethernet MAC address: %pM\n", addr); diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c index 89774985d380..905d4f2dd0b8 100644 --- a/arch/arm/mach-orion5x/tsx09-common.c +++ b/arch/arm/mach-orion5x/tsx09-common.c @@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; +static int __init qnap_tsx09_parse_hex_nibble(char n) +{ + if (n >= '0' && n <= '9') + return n - '0'; + + if (n >= 'A' && n <= 'F') + return n - 'A' + 10; + + if (n >= 'a' && n <= 'f') + return n - 'a' + 10; + + return -1; +} + +static int __init qnap_tsx09_parse_hex_byte(const char *b) +{ + int hi; + int lo; + + hi = qnap_tsx09_parse_hex_nibble(b[0]); + lo = qnap_tsx09_parse_hex_nibble(b[1]); + + if (hi < 0 || lo < 0) + return -1; + + return (hi << 4) | lo; +} + static int __init qnap_tsx09_check_mac_addr(const char *addr_str) { u_int8_t addr[6]; + int i; - if (!mac_pton(addr_str, addr)) - return -1; + for (i = 0; i < 6; i++) { + int byte; + + /* + * Enforce "xx:xx:xx:xx:xx:xx\n" format. + */ + if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n')) + return -1; + + byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3)); + if (byte < 0) + return -1; + addr[i] = byte; + } printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); @@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) unsigned long addr; for (addr = mem_base; addr < (mem_base + size); addr += 1024) { - void __iomem *nor_page; + char *nor_page; int ret = 0; nor_page = ioremap(addr, 1024); if (nor_page != NULL) { - ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); + ret = qnap_tsx09_check_mac_addr(nor_page); iounmap(nor_page); } diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index 107f37210fb9..83606087edc7 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -132,3 +132,7 @@ static struct platform_driver tosa_bt_driver = { }, }; module_platform_driver(tosa_bt_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dmitry Baryshkov"); +MODULE_DESCRIPTION("Bluetooth built-in chip control"); diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index d28ecb9ef172..c28fa48ae683 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -541,21 +541,6 @@ fixup_assabet(struct tag *tags, char **cmdline) printk("Neponset expansion board detected\n"); } - -static void assabet_uart_pm(struct uart_port *port, u_int state, u_int oldstate) -{ - if (port->mapbase == _Ser1UTCR0) { - if (state) - ASSABET_BCR_clear(ASSABET_BCR_RS232EN | - ASSABET_BCR_COM_RTS | - ASSABET_BCR_COM_DTR); - else - ASSABET_BCR_set(ASSABET_BCR_RS232EN | - ASSABET_BCR_COM_RTS | - ASSABET_BCR_COM_DTR); - } -} - /* * Assabet uses COM_RTS and COM_DTR for both UART1 (com port) * and UART3 (radio module). We only handle them for UART1 here. @@ -614,7 +599,6 @@ static u_int assabet_get_mctrl(struct uart_port *port) static struct sa1100_port_fns assabet_port_fns __initdata = { .set_mctrl = assabet_set_mctrl, .get_mctrl = assabet_get_mctrl, - .pm = assabet_uart_pm, }; static struct map_desc assabet_io_desc[] __initdata = { diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 24659952c278..11da0f50a1fe 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -270,6 +270,11 @@ v6_dma_clean_range: * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT + sub r2, r1, r0 + cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT + bhi v6_dma_flush_dcache_all +#endif #ifdef CONFIG_DMA_CACHE_RWFO ldrb r2, [r0] @ read for ownership strb r2, [r0] @ write for ownership @@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer ret lr +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT +v6_dma_flush_dcache_all: + mov r0, #0 +#ifdef HARVARD_CACHE + mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate +#else + mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate +#endif + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer + mov pc, lr +#endif + /* * dma_map_area(start, size, dir) * - start - kernel virtual start address diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 35ff45470dbf..fc3b44028cfb 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = { .val = PMD_SECT_USER, .set = "USR", }, { - .mask = L_PMD_SECT_RDONLY, - .val = L_PMD_SECT_RDONLY, + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, .set = "ro", .clear = "RW", #elif __LINUX_ARM_ARCH__ >= 6 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 42f585379e19..6123d126e5ae 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -274,10 +274,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) local_irq_enable(); /* - * If we're in an interrupt or have no user + * If we're in an interrupt, or have no irqs, or have no user * context, we must not take the fault.. */ - if (faulthandler_disabled() || !mm) + if (faulthandler_disabled() || irqs_disabled() || !mm) goto no_context; if (user_mode(regs)) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad80548325fe..0f6d1537f330 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~L_PMD_SECT_RDONLY, - .prot = L_PMD_SECT_RDONLY, + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2), + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index c199990e12b6..ece2d1d43724 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -27,14 +27,58 @@ int bpf_jit_enable __read_mostly; +/* + * eBPF prog stack layout: + * + * high + * original ARM_SP => +-----+ + * | | callee saved registers + * +-----+ <= (BPF_FP + SCRATCH_SIZE) + * | ... | eBPF JIT scratch space + * eBPF fp register => +-----+ + * (BPF_FP) | ... | eBPF prog stack + * +-----+ + * |RSVD | JIT scratchpad + * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + * + * The callee saved registers depends on whether frame pointers are enabled. + * With frame pointers (to be compliant with the ABI): + * + * high + * original ARM_SP => +------------------+ \ + * | pc | | + * current ARM_FP => +------------------+ } callee saved registers + * |r4-r8,r10,fp,ip,lr| | + * +------------------+ / + * low + * + * Without frame pointers: + * + * high + * original ARM_SP => +------------------+ + * | r4-r8,r10,fp,lr | callee saved registers + * current ARM_FP => +------------------+ + * low + * + * When popping registers off the stack at the end of a BPF function, we + * reference them via the current ARM_FP register. + */ +#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ + 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \ + 1 << ARM_FP) +#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) +#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) + #define STACK_OFFSET(k) (k) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ #define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ -/* Flags used for JIT optimization */ -#define SEEN_CALL (1 << 0) - #define FLAG_IMM_OVERFLOW (1 << 0) /* @@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = { * idx : index of current last JITed instruction. * prologue_bytes : bytes used in prologue. * epilogue_offset : offset of epilogue starting. - * seen : bit mask used for JIT optimization. * offsets : array of eBPF instruction offsets in * JITed code. * target : final JITed code. @@ -110,7 +153,6 @@ struct jit_ctx { unsigned int idx; unsigned int prologue_bytes; unsigned int epilogue_offset; - u32 seen; u32 flags; u32 *offsets; u32 *target; @@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); } -/* Stack must be multiples of 16 Bytes */ -#define STACK_ALIGN(sz) (((sz) + 3) & ~3) +#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) +/* EABI requires the stack to be aligned to 64-bit boundaries */ +#define STACK_ALIGNMENT 8 +#else +/* Stack must be aligned to 32-bit boundaries */ +#define STACK_ALIGNMENT 4 +#endif /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, @@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size) + SCRATCH_SIZE + \ + 4 /* extra for skb_copy_bits buffer */) -#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) +#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) /* Get the offset of eBPF REGISTERs stored on scratch space. */ #define STACK_VAR(off) (STACK_SIZE-off-4) @@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) emit_mov_i_no8m(rd, val, ctx); } -static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) { - ctx->seen |= SEEN_CALL; -#if __LINUX_ARM_ARCH__ < 5 - emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); - if (elf_hwcap & HWCAP_THUMB) emit(ARM_BX(tgt_reg), ctx); else emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); +} + +static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 5 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); + emit_bx_r(tgt_reg, ctx); #else emit(ARM_BLX_R(tgt_reg), ctx); #endif @@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) } /* Call appropriate function */ - ctx->seen |= SEEN_CALL; emit_mov_i(ARM_IP, op == BPF_DIV ? (u32)jit_udiv32 : (u32)jit_mod32, ctx); emit_blx_r(ARM_IP, ctx); @@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, /* Do LSH operation */ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); @@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, /* Do the ARSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); _emit(ARM_COND_MI, ARM_B(0), ctx); @@ -673,7 +718,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, } /* dst = dst >> src */ -static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, +static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, bool sstk, struct jit_ctx *ctx) { const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp2 = bpf2a32[TMP_REG_2]; @@ -689,11 +734,9 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); } - /* Do LSH operation */ + /* Do RSH operation */ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); @@ -741,7 +784,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, } /* dst = dst >> val */ -static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk, +static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, const u32 val, struct jit_ctx *ctx) { const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp2 = bpf2a32[TMP_REG_2]; @@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, /* Do Multiplication */ emit(ARM_MUL(ARM_IP, rd, rn), ctx); emit(ARM_MUL(ARM_LR, rm, rt), ctx); - /* As we are using ARM_LR */ - ctx->seen |= SEEN_CALL; emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); @@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, } /* dst = *(size*)(src + off) */ -static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, - const s32 off, struct jit_ctx *ctx, const u8 sz){ +static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, + s32 off, struct jit_ctx *ctx, const u8 sz){ const u8 *tmp = bpf2a32[TMP_REG_1]; - u8 rd = dstk ? tmp[1] : dst; + const u8 *rd = dstk ? tmp : dst; u8 rm = src; + s32 off_max; - if (off) { + if (sz == BPF_H) + off_max = 0xff; + else + off_max = 0xfff; + + if (off < 0 || off > off_max) { emit_a32_mov_i(tmp[0], off, false, ctx); emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); rm = tmp[0]; + off = 0; + } else if (rd[1] == rm) { + emit(ARM_MOV_R(tmp[0], rm), ctx); + rm = tmp[0]; } switch (sz) { - case BPF_W: - /* Load a Word */ - emit(ARM_LDR_I(rd, rm, 0), ctx); + case BPF_B: + /* Load a Byte */ + emit(ARM_LDRB_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); break; case BPF_H: /* Load a HalfWord */ - emit(ARM_LDRH_I(rd, rm, 0), ctx); + emit(ARM_LDRH_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); break; - case BPF_B: - /* Load a Byte */ - emit(ARM_LDRB_I(rd, rm, 0), ctx); + case BPF_W: + /* Load a Word */ + emit(ARM_LDR_I(rd[1], rm, off), ctx); + emit_a32_mov_i(dst[0], 0, dstk, ctx); + break; + case BPF_DW: + /* Load a Double Word */ + emit(ARM_LDR_I(rd[1], rm, off), ctx); + emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); break; } if (dstk) - emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx); + if (dstk && sz == BPF_DW) + emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx); } /* Arithmatic Operation */ @@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, const u8 rn, struct jit_ctx *ctx, u8 op) { switch (op) { case BPF_JSET: - ctx->seen |= SEEN_CALL; emit(ARM_AND_R(ARM_IP, rt, rn), ctx); emit(ARM_AND_R(ARM_LR, rd, rm), ctx); emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); @@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) const u8 *tcc = bpf2a32[TCALL_CNT]; const int idx0 = ctx->idx; #define cur_offset (ctx->idx - idx0) -#define jmp_offset (out_offset - (cur_offset)) +#define jmp_offset (out_offset - (cur_offset) - 2) u32 off, lo, hi; /* if (index >= array->map.max_entries) @@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_a32_mov_i(tmp[1], off, false, ctx); emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); - /* index (64 bit) */ + /* index is 32-bit for arrays */ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); /* index >= array->map.max_entries */ emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); @@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit_a32_mov_i(tmp2[1], off, false, ctx); emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); - emit(ARM_BX(tmp[1]), ctx); + emit_bx_r(tmp[1], ctx); /* out: */ if (out_offset == -1) @@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx) const u8 r2 = bpf2a32[BPF_REG_1][1]; const u8 r3 = bpf2a32[BPF_REG_1][0]; const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 r5 = bpf2a32[BPF_REG_6][0]; - const u8 r6 = bpf2a32[TMP_REG_1][1]; - const u8 r7 = bpf2a32[TMP_REG_1][0]; - const u8 r8 = bpf2a32[TMP_REG_2][1]; - const u8 r10 = bpf2a32[TMP_REG_2][0]; const u8 fplo = bpf2a32[BPF_REG_FP][1]; const u8 fphi = bpf2a32[BPF_REG_FP][0]; - const u8 sp = ARM_SP; const u8 *tcc = bpf2a32[TCALL_CNT]; - u16 reg_set = 0; - - /* - * eBPF prog stack layout - * - * high - * original ARM_SP => +-----+ eBPF prologue - * |FP/LR| - * current ARM_FP => +-----+ - * | ... | callee saved registers - * eBPF fp register => +-----+ <= (BPF_FP) - * | ... | eBPF JIT scratch space - * | | eBPF prog stack - * +-----+ - * |RSVD | JIT scratchpad - * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) - * | | - * | ... | Function call stack - * | | - * +-----+ - * low - */ - /* Save callee saved registers. */ - reg_set |= (1<seen & SEEN_CALL) - reg_set |= (1<stack_size = imm8m(STACK_SIZE); @@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx) /* end of prologue */ } +/* restore callee saved registers. */ static void build_epilogue(struct jit_ctx *ctx) { - const u8 r4 = bpf2a32[BPF_REG_6][1]; - const u8 r5 = bpf2a32[BPF_REG_6][0]; - const u8 r6 = bpf2a32[TMP_REG_1][1]; - const u8 r7 = bpf2a32[TMP_REG_1][0]; - const u8 r8 = bpf2a32[TMP_REG_2][1]; - const u8 r10 = bpf2a32[TMP_REG_2][0]; - u16 reg_set = 0; - - /* unwind function call stack */ - emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); - - /* restore callee saved registers. */ - reg_set |= (1<seen & SEEN_CALL) - reg_set |= (1<seen & SEEN_CALL)) - emit(ARM_BX(ARM_LR), ctx); + emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx); + emit(ARM_POP(CALLEE_POP_MASK), ctx); #endif } @@ -1326,7 +1340,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_ALU64 | BPF_RSH | BPF_K: if (unlikely(imm > 63)) return -EINVAL; - emit_a32_lsr_i64(dst, dstk, imm, ctx); + emit_a32_rsh_i64(dst, dstk, imm, ctx); break; /* dst = dst << src */ case BPF_ALU64 | BPF_LSH | BPF_X: @@ -1334,7 +1348,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) break; /* dst = dst >> src */ case BPF_ALU64 | BPF_RSH | BPF_X: - emit_a32_lsr_r64(dst, src, dstk, sstk, ctx); + emit_a32_rsh_r64(dst, src, dstk, sstk, ctx); break; /* dst = dst >> src (signed) */ case BPF_ALU64 | BPF_ARSH | BPF_X: @@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) emit_rev32(rt, rt, ctx); goto emit_bswap_uxt; case 64: - /* Because of the usage of ARM_LR */ - ctx->seen |= SEEN_CALL; emit_rev32(ARM_LR, rt, ctx); emit_rev32(rt, rd, ctx); emit(ARM_MOV_R(rd, ARM_LR), ctx); @@ -1448,22 +1460,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) rn = sstk ? tmp2[1] : src_lo; if (sstk) emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); - switch (BPF_SIZE(code)) { - case BPF_W: - /* Load a Word */ - case BPF_H: - /* Load a Half-Word */ - case BPF_B: - /* Load a Byte */ - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code)); - emit_a32_mov_i(dst_hi, 0, dstk, ctx); - break; - case BPF_DW: - /* Load a double word */ - emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W); - emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W); - break; - } + emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code)); break; /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ case BPF_LD | BPF_ABS | BPF_W: diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 7a327bd32521..ebef8aacea83 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -890,11 +890,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev) timer->irq = irq->start; timer->pdev = pdev; - /* Skip pm_runtime_enable for OMAP1 */ - if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { - pm_runtime_enable(dev); - pm_runtime_irq_safe(dev); - } + pm_runtime_enable(dev); + pm_runtime_irq_safe(dev); if (!timer->reserved) { ret = pm_runtime_get_sync(dev); diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h index fb061cf0d736..30a07730807a 100644 --- a/arch/arm/plat-omap/include/plat/sram.h +++ b/arch/arm/plat-omap/include/plat/sram.h @@ -5,13 +5,4 @@ void omap_map_sram(unsigned long start, unsigned long size, unsigned long skip, int cached); void omap_sram_reset(void); -extern void *omap_sram_push_address(unsigned long size); - -/* Macro to push a function to the internal SRAM, using the fncpy API */ -#define omap_sram_push(funcp, size) ({ \ - typeof(&(funcp)) _res = NULL; \ - void *_sram_address = omap_sram_push_address(size); \ - if (_sram_address) \ - _res = fncpy(_sram_address, &(funcp), size); \ - _res; \ -}) +extern void *omap_sram_push(void *funcp, unsigned long size); diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index a5bc92d7e476..921840acf65c 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -42,7 +43,7 @@ static void __iomem *omap_sram_ceil; * Note that fncpy requires the returned address to be aligned * to an 8-byte boundary. */ -void *omap_sram_push_address(unsigned long size) +static void *omap_sram_push_address(unsigned long size) { unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; @@ -60,6 +61,30 @@ void *omap_sram_push_address(unsigned long size) return (void *)omap_sram_ceil; } +void *omap_sram_push(void *funcp, unsigned long size) +{ + void *sram; + unsigned long base; + int pages; + void *dst = NULL; + + sram = omap_sram_push_address(size); + if (!sram) + return NULL; + + base = (unsigned long)sram & PAGE_MASK; + pages = PAGE_ALIGN(size) / PAGE_SIZE; + + set_memory_rw(base, pages); + + dst = fncpy(sram, funcp, size); + + set_memory_ro(base, pages); + set_memory_x(base, pages); + + return dst; +} + /* * The SRAM context is lost during off-idle and stack * needs to be reset. @@ -75,6 +100,9 @@ void omap_sram_reset(void) void __init omap_map_sram(unsigned long start, unsigned long size, unsigned long skip, int cached) { + unsigned long base; + int pages; + if (size == 0) return; @@ -95,4 +123,10 @@ void __init omap_map_sram(unsigned long start, unsigned long size, */ memset_io(omap_sram_base + omap_sram_skip, 0, omap_sram_size - omap_sram_skip); + + base = (unsigned long)omap_sram_base; + pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE; + + set_memory_ro(base, pages); + set_memory_x(base, pages); } diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index aff6994950ba..a2399fd66e97 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, /***************************************************************************** * Ethernet switch ****************************************************************************/ -static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii"; -static __initdata struct mdio_board_info - orion_ge00_switch_board_info; +static __initdata struct mdio_board_info orion_ge00_switch_board_info = { + .bus_id = "orion-mii", + .modalias = "mv88e6085", +}; void __init orion_ge00_switch_init(struct dsa_chip_data *d) { - struct mdio_board_info *bd; unsigned int i; if (!IS_BUILTIN(CONFIG_PHYLIB)) return; - for (i = 0; i < ARRAY_SIZE(d->port_names); i++) - if (!strcmp(d->port_names[i], "cpu")) + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) { + if (!strcmp(d->port_names[i], "cpu")) { + d->netdev[i] = &orion_ge00.dev; break; + } + } - bd = &orion_ge00_switch_board_info; - bd->bus_id = orion_ge00_mvmdio_bus_name; - bd->mdio_addr = d->sw_addr; - d->netdev[i] = &orion_ge00.dev; - strcpy(bd->modalias, "mv88e6085"); - bd->platform_data = d; + orion_ge00_switch_board_info.mdio_addr = d->sw_addr; + orion_ge00_switch_board_info.platform_data = d; mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); } diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index bcdecc25461b..b2aa9b32bff2 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { unsigned long flags; struct kprobe *p = &op->kp; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + struct kprobe_ctlblk *kcb; /* Save skipped registers */ regs->ARM_pc = (unsigned long)op->kp.addr; regs->ARM_ORIG_r0 = ~0UL; local_irq_save(flags); + kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); @@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) local_irq_restore(flags); } +NOKPROBE_SYMBOL(optimized_callback) int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) { diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index a71a48e71fff..aa7496be311d 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, */ static int vfp_dying_cpu(unsigned int cpu) { - vfp_force_reload(cpu, current_thread_info()); + vfp_current_hw_state[cpu] = NULL; return 0; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0df64a6a56d4..cd187ed7a426 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -24,6 +24,7 @@ config ARM64 select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA select ARCH_USE_CMPXCHG_LOCKREF select ARCH_SUPPORTS_MEMORY_FAILURE + select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_WANT_COMPAT_IPC_PARSE_VERSION @@ -433,7 +434,7 @@ config ARM64_ERRATUM_845719 config ARM64_ERRATUM_843419 bool "Cortex-A53: 843419: A load or store might access an incorrect address" - default y + default y if !LTO_CLANG select ARM64_MODULE_CMODEL_LARGE if MODULES help This option links the kernel with '--fix-cortex-a53-843419' and @@ -443,6 +444,20 @@ config ARM64_ERRATUM_843419 If unsure, say Y. +config ARM64_ERRATUM_1024718 + bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" + default y + help + This option adds work around for Arm Cortex-A55 Erratum 1024718. + + Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect + update of the hardware dirty bit when the DBM/AP bits are updated + without a break-before-make. The work around is to disable the usage + of hardware DBM locally on the affected cores. CPUs not affected by + erratum will continue to use the feature. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y @@ -504,20 +519,13 @@ config CAVIUM_ERRATUM_30115 config QCOM_FALKOR_ERRATUM_1003 bool "Falkor E1003: Incorrect translation due to ASID change" default y - select ARM64_PAN if ARM64_SW_TTBR0_PAN help On Falkor v1, an incorrect ASID may be cached in the TLB when ASID - and BADDR are changed together in TTBRx_EL1. The workaround for this - issue is to use a reserved ASID in cpu_do_switch_mm() before - switching to the new ASID. Saying Y here selects ARM64_PAN if - ARM64_SW_TTBR0_PAN is selected. This is done because implementing and - maintaining the E1003 workaround in the software PAN emulation code - would be an unnecessary complication. The affected Falkor v1 CPU - implements ARMv8.1 hardware PAN support and using hardware PAN - support versus software PAN emulation is mutually exclusive at - runtime. - - If unsure, say Y. + and BADDR are changed together in TTBRx_EL1. Since we keep the ASID + in TTBR1_EL1, this situation only occurs in the entry trampoline and + then only for entries in the walk cache, since the leaf translation + is unchanged. Work around the erratum by invalidating the walk cache + entries for the trampoline before entering the kernel proper. config QCOM_FALKOR_ERRATUM_1009 bool "Falkor E1009: Prematurely complete a DSB after a TLBI" @@ -539,6 +547,16 @@ config QCOM_QDF2400_ERRATUM_0065 If unsure, say Y. +config QCOM_FALKOR_ERRATUM_E1041 + bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" + default y + help + Falkor CPU may speculatively fetch instructions from an improper + memory location when MMU translation is changed from SCTLR_ELn[M]=1 + to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. + + If unsure, say Y. + endmenu @@ -803,6 +821,44 @@ config FORCE_MAX_ZONEORDER However for 4K, we choose a higher default value, 11 as opposed to 10, giving us 4M allocations matching the default size used by generic code. +config UNMAP_KERNEL_AT_EL0 + bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + be used to bypass MMU permission checks and leak kernel data to + userspace. This can be defended against by unmapping the kernel + when running in userspace, mapping it back in on exception entry + via a trampoline page in the vector table. + + If unsure, say Y. + +config HARDEN_BRANCH_PREDICTOR + bool "Harden the branch predictor against aliasing attacks" if EXPERT + default y + help + Speculation attacks against some high-performance processors rely on + being able to manipulate the branch predictor for a victim context by + executing aliasing branches in the attacker context. Such attacks + can be partially mitigated against by clearing internal branch + predictor state and limiting the prediction logic in some situations. + + This config option will take CPU-specific actions to harden the + branch predictor against aliasing attacks and may rely on specific + instruction sequences or control bits being set by the system + firmware. + + If unsure, say Y. + +config ARM64_SSBD + bool "Speculative Store Bypass Disable" if EXPERT + default y + help + This enables mitigation of the bypassing of previous stores + by speculative loads. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT @@ -1015,7 +1071,7 @@ config RANDOMIZE_BASE config RANDOMIZE_MODULE_REGION_FULL bool "Randomize the module region independently from the core kernel" - depends on RANDOMIZE_BASE + depends on RANDOMIZE_BASE && !LTO_CLANG default y help Randomizes the location of the module region without considering the @@ -1049,6 +1105,23 @@ config CMDLINE entering them here. As a minimum, you should specify the the root device (e.g. root=/dev/nfs). +choice + prompt "Kernel command line type" if CMDLINE != "" + default CMDLINE_FROM_BOOTLOADER + +config CMDLINE_FROM_BOOTLOADER + bool "Use bootloader kernel arguments if available" + help + Uses the command-line options passed by the boot loader. If + the boot loader doesn't provide any, the default kernel command + string provided in CMDLINE will be used. + +config CMDLINE_EXTEND + bool "Extend bootloader kernel arguments" + help + The command-line arguments provided by the boot loader will be + appended to the default kernel command string. + config CMDLINE_FORCE bool "Always use the default kernel command string" help @@ -1056,6 +1129,7 @@ config CMDLINE_FORCE loader passes other arguments to the kernel. This is useful if you cannot or don't want to change the command-line options your boot loader passes to the kernel. +endchoice config EFI_STUB bool @@ -1088,6 +1162,41 @@ config DMI However, even with this option, the resultant kernel should continue to boot on existing non-UEFI platforms. +config BUILD_ARM64_APPENDED_DTB_IMAGE + bool "Build a concatenated Image.gz/dtb by default" + depends on OF + help + Enabling this option will cause a concatenated Image.gz and list of + DTBs to be built by default (instead of a standalone Image.gz.) + The image will built in arch/arm64/boot/Image.gz-dtb + +choice + prompt "Appended DTB Kernel Image name" + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + help + Enabling this option will cause a specific kernel image Image or + Image.gz to be used for final image creation. + The image will built in arch/arm64/boot/IMAGE-NAME-dtb + + config IMG_GZ_DTB + bool "Image.gz-dtb" + config IMG_DTB + bool "Image-dtb" +endchoice + +config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME + string + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + default "Image.gz-dtb" if IMG_GZ_DTB + default "Image-dtb" if IMG_DTB + +config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES + string "Default dtb names" + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + help + Space separated list of names of dtbs to append when + building a concatenated Image.gz-dtb. + endmenu menu "Userspace binary formats" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 939b310913cf..e64f5f0c5fd7 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -14,16 +14,29 @@ LDFLAGS_vmlinux :=-p --no-undefined -X CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) GZFLAGS :=-9 -ifneq ($(CONFIG_RELOCATABLE),) -LDFLAGS_vmlinux += -pie -shared -Bsymbolic +ifeq ($(CONFIG_RELOCATABLE), y) +# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour +# for relative relocs, since this leads to better Image compression +# with the relocation offsets always being zero. +LDFLAGS_vmlinux += -pie -shared -Bsymbolic \ + $(call ld-option, --no-apply-dynamic-relocs) endif ifeq ($(CONFIG_ARM64_ERRATUM_843419),y) ifeq ($(call ld-option, --fix-cortex-a53-843419),) $(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum) else + ifeq ($(call gold-ifversion, -lt, 114000000, y), y) +$(warning This version of GNU gold may generate incorrect code with --fix-cortex-a53-843419;\ + see https://sourceware.org/bugzilla/show_bug.cgi?id=21491) + endif LDFLAGS_vmlinux += --fix-cortex-a53-843419 endif +else + ifeq ($(ld-name),gold) +# Pass --no-fix-cortex-a53-843419 to ensure the erratum fix is disabled +LDFLAGS += --no-fix-cortex-a53-843419 + endif endif KBUILD_DEFCONFIG := defconfig @@ -45,9 +58,17 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) +ifeq ($(cc-name),clang) +# This is a workaround for https://bugs.llvm.org/show_bug.cgi?id=30792. +# TODO: revert when this is fixed in LLVM. +KBUILD_CFLAGS += -mno-implicit-float +else +KBUILD_CFLAGS += -mgeneral-regs-only +endif +KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) +KBUILD_CFLAGS += -fno-pic KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) @@ -58,14 +79,22 @@ KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ AS += -EB LD += -EB +ifeq ($(ld-name),gold) +LDFLAGS += -maarch64_elf64_be_vec +else LDFLAGS += -maarch64linuxb +endif UTS_MACHINE := aarch64_be else KBUILD_CPPFLAGS += -mlittle-endian CHECKFLAGS += -D__AARCH64EL__ AS += -EL LD += -EL +ifeq ($(ld-name),gold) +LDFLAGS += -maarch64_elf64_le_vec +else LDFLAGS += -maarch64linux +endif UTS_MACHINE := aarch64 endif @@ -73,13 +102,14 @@ CHECKFLAGS += -D__aarch64__ -m64 ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y) KBUILD_CFLAGS_MODULE += -mcmodel=large +ifeq ($(CONFIG_LTO_CLANG), y) +# Code model is not stored in LLVM IR, so we need to pass it also to LLVMgold +LDFLAGS += -plugin-opt=-code-model=large +endif endif ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds -ifeq ($(CONFIG_DYNAMIC_FTRACE),y) -KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o -endif endif # Default value @@ -94,6 +124,10 @@ else TEXT_OFFSET := 0x00080000 endif +ifeq ($(cc-name),clang) +KBUILD_CFLAGS += $(call cc-disable-warning, asm-operand-widths) +endif + # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) # in 32-bit arithmetic KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ @@ -113,10 +147,15 @@ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make boot := arch/arm64/boot +ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y) +KBUILD_IMAGE := $(boot)/$(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) +else KBUILD_IMAGE := $(boot)/Image.gz +endif + KBUILD_DTBS := dtbs -all: Image.gz $(KBUILD_DTBS) +all: Image.gz $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) Image: vmlinux @@ -139,6 +178,12 @@ dtbs: prepare scripts dtbs_install: $(Q)$(MAKE) $(dtbinst)=$(boot)/dts +Image-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +Image.gz-dtb: vmlinux scripts dtbs Image.gz + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore index 8dab0bb6ae66..34e35209fc2e 100644 --- a/arch/arm64/boot/.gitignore +++ b/arch/arm64/boot/.gitignore @@ -1,2 +1,4 @@ Image +Image-dtb Image.gz +Image.gz-dtb diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index 1f012c506434..2c8cb864315e 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -14,16 +14,29 @@ # Based on the ia64 boot/Makefile. # +include $(srctree)/arch/arm64/boot/dts/Makefile + OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.gz +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) + $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) $(obj)/Image.bz2: $(obj)/Image FORCE $(call if_changed,bzip2) +$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE + $(call if_changed,cat) + $(obj)/Image.gz: $(obj)/Image FORCE $(call if_changed,gzip) @@ -36,6 +49,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE $(obj)/Image.lzo: $(obj)/Image FORCE $(call if_changed,lzo) +$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE + $(call if_changed,cat) + install: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore deleted file mode 100644 index b60ed208c779..000000000000 --- a/arch/arm64/boot/dts/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.dtb diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index c6684ab8e201..db5a70876487 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -32,3 +32,17 @@ dtstree := $(srctree)/$(src) dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts))) always := $(dtb-y) + +targets += dtbs + +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +targets += $(DTB_LIST) + +dtbs: $(addprefix $(obj)/, $(DTB_LIST)) + +clean-files := dts/*.dtb *.dtb diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 1ffa1c238a72..c3c65b06ba76 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -276,7 +276,7 @@ pinctrl-names = "default", "clk-gate"; bus-width = <8>; - max-frequency = <200000000>; + max-frequency = <100000000>; non-removable; disable-wp; cap-mmc-highspeed; @@ -301,6 +301,7 @@ &usb1_phy { status = "okay"; + phy-supply = <&usb_otg_pwr>; }; &usb0 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 64c54c92e214..d71cbf596d1f 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -205,9 +205,6 @@ bus-width = <4>; cap-sd-highspeed; - sd-uhs-sdr12; - sd-uhs-sdr25; - sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index d8dd3298b15c..fb8d76a17bc5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -49,6 +49,14 @@ / { compatible = "amlogic,meson-gxl"; + + reserved-memory { + /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */ + secmon_reserved_alt: secmon@05000000 { + reg = <0x0 0x05000000 0x0 0x300000>; + no-map; + }; + }; }; ðmac { diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts index b39b6d6ec5aa..d2467e478ec3 100644 --- a/arch/arm64/boot/dts/arm/juno-r2.dts +++ b/arch/arm64/boot/dts/arm/juno-r2.dts @@ -98,6 +98,7 @@ next-level-cache = <&A72_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>; capacity-dmips-mhz = <1024>; }; @@ -115,6 +116,7 @@ next-level-cache = <&A72_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A72 &CLUSTER_COST_A72>; capacity-dmips-mhz = <1024>; }; @@ -132,6 +134,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -149,6 +152,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -166,6 +170,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -183,6 +188,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53R2 &CLUSTER_COST_A53R2>; capacity-dmips-mhz = <485>; }; @@ -199,6 +205,7 @@ cache-line-size = <64>; cache-sets = <1024>; }; + /include/ "juno-sched-energy.dtsi" }; pmu_a72 { diff --git a/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi b/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi new file mode 100644 index 000000000000..221196ea2091 --- /dev/null +++ b/arch/arm64/boot/dts/arm/juno-sched-energy.dtsi @@ -0,0 +1,123 @@ +/* + * ARM JUNO specific energy cost model data. There are no unit requirements for + * the data. Data can be normalized to any reference point, but the + * normalization must be consistent. That is, one bogo-joule/watt must be the + * same quantity for all data, but we don't care what it is. + */ + +energy-costs { + /* Juno r0 Energy */ + CPU_COST_A57: core-cost0 { + busy-cost-data = < + 417 168 + 579 251 + 744 359 + 883 479 + 1024 616 + >; + idle-cost-data = < + 15 + 15 + 0 + 0 + >; + }; + CPU_COST_A53: core-cost1 { + busy-cost-data = < + 235 33 + 302 46 + 368 61 + 406 76 + 446 93 + >; + idle-cost-data = < + 6 + 6 + 0 + 0 + >; + }; + CLUSTER_COST_A57: cluster-cost0 { + busy-cost-data = < + 417 24 + 579 32 + 744 43 + 883 49 + 1024 64 + >; + idle-cost-data = < + 65 + 65 + 65 + 24 + >; + }; + CLUSTER_COST_A53: cluster-cost1 { + busy-cost-data = < + 235 26 + 302 30 + 368 39 + 406 47 + 446 57 + >; + idle-cost-data = < + 56 + 56 + 56 + 17 + >; + }; + /* Juno r2 Energy */ + CPU_COST_A72: core-cost2 { + busy-cost-data = < + 501 174 + 849 344 + 1024 526 + >; + idle-cost-data = < + 48 + 48 + 0 + 0 + >; + }; + CPU_COST_A53R2: core-cost3 { + busy-cost-data = < + 276 37 + 501 59 + 593 117 + >; + idle-cost-data = < + 33 + 33 + 0 + 0 + >; + }; + CLUSTER_COST_A72: cluster-cost2 { + busy-cost-data = < + 501 48 + 849 73 + 1024 107 + >; + idle-cost-data = < + 48 + 48 + 48 + 18 + >; + }; + CLUSTER_COST_A53R2: cluster-cost3 { + busy-cost-data = < + 276 41 + 501 86 + 593 107 + >; + idle-cost-data = < + 41 + 41 + 41 + 14 + >; + }; +}; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index c9236c4b967d..ae5306a0ca26 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -97,6 +97,7 @@ next-level-cache = <&A57_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A57 &CLUSTER_COST_A57>; capacity-dmips-mhz = <1024>; }; @@ -114,6 +115,7 @@ next-level-cache = <&A57_L2>; clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A57 &CLUSTER_COST_A57>; capacity-dmips-mhz = <1024>; }; @@ -131,6 +133,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -148,6 +151,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -165,6 +169,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -182,6 +187,7 @@ next-level-cache = <&A53_L2>; clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; + sched-energy-costs = <&CPU_COST_A53 &CLUSTER_COST_A53>; capacity-dmips-mhz = <578>; }; @@ -198,6 +204,7 @@ cache-line-size = <64>; cache-sets = <1024>; }; + /include/ "juno-sched-energy.dtsi" }; pmu_a57 { diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi index 4220fbdcb24a..ff5c4c47b22b 100644 --- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi @@ -98,7 +98,7 @@ clock-output-names = "clk125mhz"; }; - pci { + pcie@30000000 { compatible = "pci-host-ecam-generic"; device_type = "pci"; #interrupt-cells = <1>; @@ -118,6 +118,7 @@ ranges = <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; + bus-range = <0 0xff>; interrupt-map-mask = <0 0 0 7>; interrupt-map = /* addr pin ic icaddr icintr */ diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi index ff1dc89f599e..66d48e35d66d 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi @@ -92,7 +92,9 @@ cooling-max-level = <0>; #cooling-cells = <2>; /* min followed by max */ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; dynamic-power-coefficient = <311>; + capacity-dmips-mhz = <1024>; }; cpu1: cpu@1 { @@ -103,6 +105,8 @@ next-level-cache = <&CLUSTER0_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu2: cpu@2 { @@ -113,6 +117,7 @@ next-level-cache = <&CLUSTER0_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + capacity-dmips-mhz = <1024>; }; cpu3: cpu@3 { @@ -123,6 +128,8 @@ next-level-cache = <&CLUSTER0_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu4: cpu@100 { @@ -133,6 +140,7 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + capacity-dmips-mhz = <1024>; }; cpu5: cpu@101 { @@ -143,6 +151,8 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu6: cpu@102 { @@ -153,6 +163,8 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; cpu7: cpu@103 { @@ -163,6 +175,8 @@ next-level-cache = <&CLUSTER1_L2>; operating-points-v2 = <&cpu_opp_table>; cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>; + sched-energy-costs = <&CPU_COST &CLUSTER_COST &SYSTEM_COST>; + capacity-dmips-mhz = <1024>; }; CLUSTER0_L2: l2-cache0 { @@ -172,6 +186,50 @@ CLUSTER1_L2: l2-cache1 { compatible = "cache"; }; + + energy-costs { + SYSTEM_COST: system-cost0 { + busy-cost-data = < + 1024 0 + >; + idle-cost-data = < + 0 + 0 + 0 + 0 + >; + }; + CLUSTER_COST: cluster-cost0 { + busy-cost-data = < + 178 16 + 369 29 + 622 47 + 819 75 + 1024 112 + >; + idle-cost-data = < + 107 + 107 + 47 + 0 + >; + }; + CPU_COST: core-cost0 { + busy-cost-data = < + 178 69 + 369 125 + 622 224 + 819 367 + 1024 670 + >; + idle-cost-data = < + 15 + 15 + 0 + 0 + >; + }; + }; }; cpu_opp_table: cpu_opp_table { diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts index 9c3bdf87e543..51327645b3fb 100644 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts @@ -61,6 +61,12 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cpm_eth1; + ethernet2 = &cpm_eth2; + }; + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { compatible = "regulator-fixed"; regulator-name = "usb3h0-vbus"; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts index 0d7b2ae46610..a4f82f1efbbc 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts @@ -61,6 +61,13 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cpm_eth2; + ethernet2 = &cps_eth0; + ethernet3 = &cps_eth1; + }; + cpm_reg_usb3_0_vbus: cpm-usb3-0-vbus { compatible = "regulator-fixed"; regulator-name = "cpm-usb3h0-vbus"; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts index acf5c7d16d79..e6ee7443b530 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts @@ -62,6 +62,12 @@ reg = <0x0 0x0 0x0 0x80000000>; }; + aliases { + ethernet0 = &cpm_eth0; + ethernet1 = &cps_eth0; + ethernet2 = &cps_eth1; + }; + /* Regulator labels correspond with schematics */ v_3_3: regulator-3-3v { compatible = "regulator-fixed"; @@ -222,8 +228,11 @@ &cpm_eth0 { status = "okay"; + /* Network PHY */ phy = <&phy0>; phy-mode = "10gbase-kr"; + /* Generic PHY, providing serdes lanes */ + phys = <&cpm_comphy4 0>; }; &cpm_sata0 { @@ -257,15 +266,21 @@ &cps_eth0 { status = "okay"; + /* Network PHY */ phy = <&phy8>; phy-mode = "10gbase-kr"; + /* Generic PHY, providing serdes lanes */ + phys = <&cps_comphy4 0>; }; &cps_eth1 { /* CPS Lane 0 - J5 (Gigabit RJ45) */ status = "okay"; + /* Network PHY */ phy = <&ge_phy>; phy-mode = "sgmii"; + /* Generic PHY, providing serdes lanes */ + phys = <&cps_comphy0 1>; }; &cps_pinctrl { diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index f2aa2a81de4d..9a7b63cd63a3 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi @@ -63,8 +63,10 @@ cpm_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>; - clock-names = "pp_clk", "gop_clk", "mg_clk"; + clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, + <&cpm_clk 1 5>, <&cpm_clk 1 18>; + clock-names = "pp_clk", "gop_clk", + "mg_clk","axi_clk"; marvell,system-controller = <&cpm_syscon0>; status = "disabled"; dma-coherent; @@ -109,12 +111,51 @@ }; }; + cpm_comphy: phy@120000 { + compatible = "marvell,comphy-cp110"; + reg = <0x120000 0x6000>; + marvell,system-controller = <&cpm_syscon0>; + #address-cells = <1>; + #size-cells = <0>; + + cpm_comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + cpm_comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + cpm_comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + + cpm_comphy3: phy@3 { + reg = <3>; + #phy-cells = <1>; + }; + + cpm_comphy4: phy@4 { + reg = <4>; + #phy-cells = <1>; + }; + + cpm_comphy5: phy@5 { + reg = <5>; + #phy-cells = <1>; + }; + }; + cpm_mdio: mdio@12a200 { #address-cells = <1>; #size-cells = <0>; compatible = "marvell,orion-mdio"; reg = <0x12a200 0x10>; - clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>; + clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>, + <&cpm_clk 1 6>, <&cpm_clk 1 18>; status = "disabled"; }; @@ -295,8 +336,8 @@ compatible = "marvell,armada-cp110-sdhci"; reg = <0x780000 0x300>; interrupts = ; - clock-names = "core"; - clocks = <&cpm_clk 1 4>; + clock-names = "core","axi"; + clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>; dma-coherent; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 4fe70323abb3..faf28633a309 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -63,8 +63,10 @@ cps_ethernet: ethernet@0 { compatible = "marvell,armada-7k-pp22"; reg = <0x0 0x100000>, <0x129000 0xb000>; - clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>; - clock-names = "pp_clk", "gop_clk", "mg_clk"; + clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, + <&cps_clk 1 5>, <&cps_clk 1 18>; + clock-names = "pp_clk", "gop_clk", + "mg_clk", "axi_clk"; marvell,system-controller = <&cps_syscon0>; status = "disabled"; dma-coherent; @@ -109,12 +111,51 @@ }; }; + cps_comphy: phy@120000 { + compatible = "marvell,comphy-cp110"; + reg = <0x120000 0x6000>; + marvell,system-controller = <&cps_syscon0>; + #address-cells = <1>; + #size-cells = <0>; + + cps_comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + cps_comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + cps_comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + + cps_comphy3: phy@3 { + reg = <3>; + #phy-cells = <1>; + }; + + cps_comphy4: phy@4 { + reg = <4>; + #phy-cells = <1>; + }; + + cps_comphy5: phy@5 { + reg = <5>; + #phy-cells = <1>; + }; + }; + cps_mdio: mdio@12a200 { #address-cells = <1>; #size-cells = <0>; compatible = "marvell,orion-mdio"; reg = <0x12a200 0x10>; - clocks = <&cps_clk 1 9>, <&cps_clk 1 5>; + clocks = <&cps_clk 1 9>, <&cps_clk 1 5>, + <&cps_clk 1 6>, <&cps_clk 1 18>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index b99a27372965..da64e1cab233 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -81,6 +81,7 @@ reg = <0x000>; enable-method = "psci"; cpu-idle-states = <&CPU_SLEEP_0>; + #cooling-cells = <2>; }; cpu1: cpu@1 { @@ -97,6 +98,7 @@ reg = <0x100>; enable-method = "psci"; cpu-idle-states = <&CPU_SLEEP_0>; + #cooling-cells = <2>; }; cpu3: cpu@101 { diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi index 54f418d05e15..2306b1a0c09a 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi @@ -46,7 +46,7 @@ compatible = "ethernet-phy-ieee802.3-c22"; reg = <0x0>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index dc3817593e14..61da6e65900b 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -901,6 +901,7 @@ "dsi_phy_regulator"; #clock-cells = <1>; + #phy-cells = <0>; clocks = <&gcc GCC_MDSS_AHB_CLK>; clock-names = "iface_clk"; @@ -1430,8 +1431,8 @@ #address-cells = <1>; #size-cells = <0>; - qcom,ipc-1 = <&apcs 0 13>; - qcom,ipc-6 = <&apcs 0 19>; + qcom,ipc-1 = <&apcs 8 13>; + qcom,ipc-3 = <&apcs 8 19>; apps_smsm: apps@0 { reg = <0>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 887b61c872dd..ab00be277c6f 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -484,8 +484,8 @@ blsp2_spi5: spi@075ba000{ compatible = "qcom,spi-qup-v2.2.1"; reg = <0x075ba000 0x600>; - interrupts = ; - clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>, + interrupts = ; + clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; clock-names = "core", "iface"; pinctrl-names = "default", "sleep"; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index d9d885006a8e..26a978616071 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -93,20 +93,12 @@ regulator-always-on; }; - rsnd_ak4613: sound { - compatible = "simple-audio-card"; + sound_card: sound { + compatible = "audio-graph-card"; - simple-audio-card,format = "left_j"; - simple-audio-card,bitclock-master = <&sndcpu>; - simple-audio-card,frame-master = <&sndcpu>; + label = "rcar-sound"; - sndcpu: simple-audio-card,cpu { - sound-dai = <&rcar_sound>; - }; - - sndcodec: simple-audio-card,codec { - sound-dai = <&ak4613>; - }; + dais = <&rsnd_port0>; }; vbus0_usb2: regulator-vbus0-usb2 { @@ -264,6 +256,7 @@ reg = <0>; interrupt-parent = <&gpio2>; interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; }; }; @@ -319,6 +312,12 @@ asahi-kasei,out4-single-end; asahi-kasei,out5-single-end; asahi-kasei,out6-single-end; + + port { + ak4613_endpoint: endpoint { + remote-endpoint = <&rsnd_endpoint0>; + }; + }; }; cs2000: clk_multiplier@4f { @@ -537,10 +536,18 @@ <&audio_clk_c>, <&cpg CPG_CORE CPG_AUDIO_CLK_I>; - rcar_sound,dai { - dai0 { - playback = <&ssi0 &src0 &dvc0>; - capture = <&ssi1 &src1 &dvc1>; + ports { + rsnd_port0: port@0 { + rsnd_endpoint0: endpoint { + remote-endpoint = <&ak4613_endpoint>; + + dai-format = "left_j"; + bitclock-master = <&rsnd_endpoint0>; + frame-master = <&rsnd_endpoint0>; + + playback = <&ssi0 &src0 &dvc0>; + capture = <&ssi1 &src1 &dvc1>; + }; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index 1b868df2393f..e95d99265af9 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -145,7 +145,6 @@ &avb { pinctrl-0 = <&avb_pins>; pinctrl-names = "default"; - renesas,no-ether-link; phy-handle = <&phy0>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index d4f80786e7c2..28257724a56e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -136,11 +136,12 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmiim1_pins>; + snps,force_thresh_dma_mode; snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - tx_delay = <0x26>; - rx_delay = <0x11>; + tx_delay = <0x24>; + rx_delay = <0x18>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 41d61840fb99..d70e409e2b0c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -683,7 +683,7 @@ interrupts = ; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; status = "disabled"; }; @@ -694,7 +694,7 @@ interrupts = ; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; status = "disabled"; }; @@ -705,7 +705,7 @@ interrupts = ; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 1070c8264c13..2313aea0e69e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -257,7 +257,7 @@ max-frequency = <150000000>; clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; resets = <&cru SRST_SDIO0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 199a5118b20d..264a6bb60c53 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -406,8 +406,9 @@ wlan_pd_n: wlan-pd-n { compatible = "regulator-fixed"; regulator-name = "wlan_pd_n"; + pinctrl-names = "default"; + pinctrl-0 = <&wlan_module_reset_l>; - /* Note the wlan_module_reset_l pinctrl */ enable-active-high; gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>; @@ -940,12 +941,6 @@ ap_i2c_audio: &i2c8 { pinctrl-0 = < &ap_pwroff /* AP will auto-assert this when in S3 */ &clk_32k /* This pin is always 32k on gru boards */ - - /* - * We want this driven low ASAP; firmware should help us, but - * we can help ourselves too. - */ - &wlan_module_reset_l >; pcfg_output_low: pcfg-output-low { @@ -1125,12 +1120,7 @@ ap_i2c_audio: &i2c8 { }; wlan_module_reset_l: wlan-module-reset-l { - /* - * We want this driven low ASAP (As {Soon,Strongly} As - * Possible), to avoid leakage through the powered-down - * WiFi. - */ - rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>; + rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>; }; bt_host_wake_l: bt-host-wake-l { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 910628d18add..1fc5060d7027 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -155,17 +155,6 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; }; - - vdd_log: vdd-log { - compatible = "pwm-regulator"; - pwms = <&pwm2 0 25000 0>; - regulator-name = "vdd_log"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1400000>; - regulator-always-on; - regulator-boot-on; - status = "okay"; - }; }; &cpu_b0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 0f873c897d0d..ce592a4c0c4c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi @@ -457,7 +457,7 @@ assigned-clocks = <&cru SCLK_PCIEPHY_REF>; assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; assigned-clock-rates = <100000000>; - ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; + ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; num-lanes = <4>; pinctrl-names = "default"; pinctrl-0 = <&pcie_clkreqn_cpm>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 34480e9af2e7..14f170fa433c 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -4,6 +4,7 @@ CONFIG_AUDIT=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -24,8 +25,9 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y CONFIG_BLK_DEV_INITRD=y -CONFIG_KALLSYMS_ALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_JUMP_LABEL=y @@ -69,13 +71,13 @@ CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_PCI_LAYERSCAPE=y CONFIG_PCI_HISI=y CONFIG_PCIE_QCOM=y -CONFIG_PCIE_KIRIN=y CONFIG_PCIE_ARMADA_8K=y +CONFIG_PCIE_KIRIN=y CONFIG_PCI_AARDVARK=y CONFIG_PCIE_RCAR=y -CONFIG_PCIE_ROCKCHIP=m CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y +CONFIG_PCIE_ROCKCHIP=m CONFIG_ARM64_VA_BITS_48=y CONFIG_SCHED_MC=y CONFIG_NUMA=y @@ -93,6 +95,12 @@ CONFIG_HIBERNATION=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPUFREQ_DT=y CONFIG_ARM_BIG_LITTLE_CPUFREQ=y CONFIG_ARM_SCPI_CPUFREQ=y @@ -140,11 +148,10 @@ CONFIG_BT_HIDP=m CONFIG_BT_LEDS=y # CONFIG_BT_DEBUGFS is not set CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_LL=y -CONFIG_CFG80211=m -CONFIG_MAC80211=m +CONFIG_CFG80211=y +CONFIG_MAC80211=y CONFIG_MAC80211_LEDS=y -CONFIG_RFKILL=m +CONFIG_RFKILL=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -210,21 +217,16 @@ CONFIG_REALTEK_PHY=m CONFIG_ROCKCHIP_PHY=y CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SR9800=m -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m +CONFIG_USB_RTL8152=y CONFIG_BRCMFMAC=m +CONFIG_RTL_CARDS=m CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m +CONFIG_USB_NET_RNDIS_WLAN=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_ADC=m -CONFIG_KEYBOARD_CROS_EC=y CONFIG_KEYBOARD_GPIO=y +CONFIG_KEYBOARD_CROS_EC=y CONFIG_INPUT_MISC=y CONFIG_INPUT_PM8941_PWRKEY=y CONFIG_INPUT_HISI_POWERKEY=y @@ -275,20 +277,20 @@ CONFIG_I2C_UNIPHIER_F=y CONFIG_I2C_RCAR=y CONFIG_I2C_CROS_EC_TUNNEL=y CONFIG_SPI=y -CONFIG_SPI_MESON_SPICC=m -CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_BCM2835=m CONFIG_SPI_BCM2835AUX=m +CONFIG_SPI_MESON_SPICC=m +CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y -CONFIG_SPI_QUP=y CONFIG_SPI_ROCKCHIP=y +CONFIG_SPI_QUP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y -CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y +CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_MSM8994=y CONFIG_PINCTRL_MSM8996=y @@ -302,6 +304,8 @@ CONFIG_GPIO_XGENE_SB=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y CONFIG_GPIO_MAX77620=y +CONFIG_POWER_AVS=y +CONFIG_ROCKCHIP_IODOMAIN=y CONFIG_POWER_RESET_MSM=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y @@ -313,9 +317,8 @@ CONFIG_SENSORS_INA2XX=m CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y -CONFIG_BRCMSTB_THERMAL=m -CONFIG_EXYNOS_THERMAL=y CONFIG_ROCKCHIP_THERMAL=m +CONFIG_EXYNOS_THERMAL=y CONFIG_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y CONFIG_MESON_GXBB_WATCHDOG=m @@ -334,9 +337,9 @@ CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI6421V530=y CONFIG_REGULATOR_HI655X=y @@ -346,16 +349,13 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_S2MPS11=y +CONFIG_RC_DEVICES=y +CONFIG_IR_MESON=m CONFIG_MEDIA_SUPPORT=m CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_MEDIA_ANALOG_TV_SUPPORT=y CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y -CONFIG_MEDIA_RC_SUPPORT=y -CONFIG_RC_CORE=m -CONFIG_RC_DEVICES=y -CONFIG_RC_DECODERS=y -CONFIG_IR_MESON=m CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_DVB_NET is not set CONFIG_V4L_MEM2MEM_DRIVERS=y @@ -393,7 +393,6 @@ CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -492,7 +491,6 @@ CONFIG_XEN_GRANT_DEV_ALLOC=y CONFIG_COMMON_CLK_RK808=y CONFIG_COMMON_CLK_SCPI=y CONFIG_COMMON_CLK_CS2000_CP=y -CONFIG_COMMON_CLK_S2MPS11=y CONFIG_CLK_QORIQ=y CONFIG_COMMON_CLK_PWM=y CONFIG_COMMON_CLK_QCOM=y @@ -531,13 +529,13 @@ CONFIG_PWM_MESON=m CONFIG_PWM_ROCKCHIP=y CONFIG_PWM_SAMSUNG=y CONFIG_PWM_TEGRA=m -CONFIG_PHY_RCAR_GEN3_USB2=y -CONFIG_PHY_HI6220_USB=y +CONFIG_PHY_XGENE=y CONFIG_PHY_SUN4I_USB=y -CONFIG_PHY_ROCKCHIP_INNO_USB2=y +CONFIG_PHY_HI6220_USB=y +CONFIG_PHY_RCAR_GEN3_USB2=y CONFIG_PHY_ROCKCHIP_EMMC=y +CONFIG_PHY_ROCKCHIP_INNO_USB2=y CONFIG_PHY_ROCKCHIP_PCIE=m -CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_QCOM_L2_PMU=y CONFIG_QCOM_L3_PMU=y @@ -579,29 +577,27 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y -# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_PROVE_LOCKING=y +CONFIG_FUNCTION_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_SCHED_TRACER=y CONFIG_MEMTEST=y CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=m CONFIG_CRYPTO_SHA512_ARM64=m CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_CRC32_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64=m -CONFIG_CRYPTO_AES_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig new file mode 100644 index 000000000000..3d2eb3275b1f --- /dev/null +++ b/arch/arm64/configs/ranchu64_defconfig @@ -0,0 +1,309 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_MMAP_RND_BITS=24 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_NR_CPUS=4 +CONFIG_PREEMPT=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMC91X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_BATTERY_GOLDFISH=y +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_FTRACE is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_DEBUG_RODATA=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 70c517aa4501..f856628e53fc 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -95,4 +95,10 @@ config CRYPTO_AES_ARM64_BS select CRYPTO_AES_ARM64 select CRYPTO_SIMD +config CRYPTO_SPECK_NEON + tristate "NEON accelerated Speck cipher algorithms" + depends on KERNEL_MODE_NEON + select CRYPTO_BLKCIPHER + select CRYPTO_SPECK + endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index b5edc5918c28..e761c0a7a181 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o -CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto +aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o @@ -44,6 +44,9 @@ sha512-arm64-y := sha512-glue.o sha512-core.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o +obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o +speck-neon-y := speck-neon-core.o speck-neon-glue.o + obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o @@ -58,6 +61,7 @@ CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE $(call if_changed_rule,cc_o_c) +ifdef REGENERATE_ARM64_CRYPTO quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $(<) void $(@) @@ -66,5 +70,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl $(call cmd,perlasm) +endif .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S new file mode 100644 index 000000000000..8efdfdade393 --- /dev/null +++ b/arch/arm64/crypto/aes-ce-core.S @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2013 - 2017 Linaro Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + + .arch armv8-a+crypto + +ENTRY(__aes_ce_encrypt) + sub w3, w3, #2 + ld1 {v0.16b}, [x2] + ld1 {v1.4s}, [x0], #16 + cmp w3, #10 + bmi 0f + bne 3f + mov v3.16b, v1.16b + b 2f +0: mov v2.16b, v1.16b + ld1 {v3.4s}, [x0], #16 +1: aese v0.16b, v2.16b + aesmc v0.16b, v0.16b +2: ld1 {v1.4s}, [x0], #16 + aese v0.16b, v3.16b + aesmc v0.16b, v0.16b +3: ld1 {v2.4s}, [x0], #16 + subs w3, w3, #3 + aese v0.16b, v1.16b + aesmc v0.16b, v0.16b + ld1 {v3.4s}, [x0], #16 + bpl 1b + aese v0.16b, v2.16b + eor v0.16b, v0.16b, v3.16b + st1 {v0.16b}, [x1] + ret +ENDPROC(__aes_ce_encrypt) + +ENTRY(__aes_ce_decrypt) + sub w3, w3, #2 + ld1 {v0.16b}, [x2] + ld1 {v1.4s}, [x0], #16 + cmp w3, #10 + bmi 0f + bne 3f + mov v3.16b, v1.16b + b 2f +0: mov v2.16b, v1.16b + ld1 {v3.4s}, [x0], #16 +1: aesd v0.16b, v2.16b + aesimc v0.16b, v0.16b +2: ld1 {v1.4s}, [x0], #16 + aesd v0.16b, v3.16b + aesimc v0.16b, v0.16b +3: ld1 {v2.4s}, [x0], #16 + subs w3, w3, #3 + aesd v0.16b, v1.16b + aesimc v0.16b, v0.16b + ld1 {v3.4s}, [x0], #16 + bpl 1b + aesd v0.16b, v2.16b + eor v0.16b, v0.16b, v3.16b + st1 {v0.16b}, [x1] + ret +ENDPROC(__aes_ce_decrypt) + +/* + * __aes_ce_sub() - use the aese instruction to perform the AES sbox + * substitution on each byte in 'input' + */ +ENTRY(__aes_ce_sub) + dup v1.4s, w0 + movi v0.16b, #0 + aese v0.16b, v1.16b + umov w0, v0.s[0] + ret +ENDPROC(__aes_ce_sub) + +ENTRY(__aes_ce_invert) + ld1 {v0.4s}, [x1] + aesimc v1.16b, v0.16b + st1 {v1.4s}, [x0] + ret +ENDPROC(__aes_ce_invert) diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-glue.c similarity index 62% rename from arch/arm64/crypto/aes-ce-cipher.c rename to arch/arm64/crypto/aes-ce-glue.c index 6a75cd75ed11..e6b3227bbf57 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-glue.c @@ -29,6 +29,13 @@ struct aes_block { u8 b[AES_BLOCK_SIZE]; }; +asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); +asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); + +asmlinkage u32 __aes_ce_sub(u32 l); +asmlinkage void __aes_ce_invert(struct aes_block *out, + const struct aes_block *in); + static int num_rounds(struct crypto_aes_ctx *ctx) { /* @@ -44,10 +51,6 @@ static int num_rounds(struct crypto_aes_ctx *ctx) static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - struct aes_block *out = (struct aes_block *)dst; - struct aes_block const *in = (struct aes_block *)src; - void *dummy0; - int dummy1; if (!may_use_simd()) { __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); @@ -55,49 +58,13 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) } kernel_neon_begin(); - - __asm__(" ld1 {v0.16b}, %[in] ;" - " ld1 {v1.4s}, [%[key]], #16 ;" - " cmp %w[rounds], #10 ;" - " bmi 0f ;" - " bne 3f ;" - " mov v3.16b, v1.16b ;" - " b 2f ;" - "0: mov v2.16b, v1.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - "1: aese v0.16b, v2.16b ;" - " aesmc v0.16b, v0.16b ;" - "2: ld1 {v1.4s}, [%[key]], #16 ;" - " aese v0.16b, v3.16b ;" - " aesmc v0.16b, v0.16b ;" - "3: ld1 {v2.4s}, [%[key]], #16 ;" - " subs %w[rounds], %w[rounds], #3 ;" - " aese v0.16b, v1.16b ;" - " aesmc v0.16b, v0.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - " bpl 1b ;" - " aese v0.16b, v2.16b ;" - " eor v0.16b, v0.16b, v3.16b ;" - " st1 {v0.16b}, %[out] ;" - - : [out] "=Q"(*out), - [key] "=r"(dummy0), - [rounds] "=r"(dummy1) - : [in] "Q"(*in), - "1"(ctx->key_enc), - "2"(num_rounds(ctx) - 2) - : "cc"); - + __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); kernel_neon_end(); } static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); - struct aes_block *out = (struct aes_block *)dst; - struct aes_block const *in = (struct aes_block *)src; - void *dummy0; - int dummy1; if (!may_use_simd()) { __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); @@ -105,62 +72,10 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) } kernel_neon_begin(); - - __asm__(" ld1 {v0.16b}, %[in] ;" - " ld1 {v1.4s}, [%[key]], #16 ;" - " cmp %w[rounds], #10 ;" - " bmi 0f ;" - " bne 3f ;" - " mov v3.16b, v1.16b ;" - " b 2f ;" - "0: mov v2.16b, v1.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - "1: aesd v0.16b, v2.16b ;" - " aesimc v0.16b, v0.16b ;" - "2: ld1 {v1.4s}, [%[key]], #16 ;" - " aesd v0.16b, v3.16b ;" - " aesimc v0.16b, v0.16b ;" - "3: ld1 {v2.4s}, [%[key]], #16 ;" - " subs %w[rounds], %w[rounds], #3 ;" - " aesd v0.16b, v1.16b ;" - " aesimc v0.16b, v0.16b ;" - " ld1 {v3.4s}, [%[key]], #16 ;" - " bpl 1b ;" - " aesd v0.16b, v2.16b ;" - " eor v0.16b, v0.16b, v3.16b ;" - " st1 {v0.16b}, %[out] ;" - - : [out] "=Q"(*out), - [key] "=r"(dummy0), - [rounds] "=r"(dummy1) - : [in] "Q"(*in), - "1"(ctx->key_dec), - "2"(num_rounds(ctx) - 2) - : "cc"); - + __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); kernel_neon_end(); } -/* - * aes_sub() - use the aese instruction to perform the AES sbox substitution - * on each byte in 'input' - */ -static u32 aes_sub(u32 input) -{ - u32 ret; - - __asm__("dup v1.4s, %w[in] ;" - "movi v0.16b, #0 ;" - "aese v0.16b, v1.16b ;" - "umov %w[out], v0.4s[0] ;" - - : [out] "=r"(ret) - : [in] "r"(input) - : "v0","v1"); - - return ret; -} - int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) { @@ -189,7 +104,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, u32 *rki = ctx->key_enc + (i * kwords); u32 *rko = rki + kwords; - rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; + rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; rko[1] = rko[0] ^ rki[1]; rko[2] = rko[1] ^ rki[2]; rko[3] = rko[2] ^ rki[3]; @@ -202,7 +117,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, } else if (key_len == AES_KEYSIZE_256) { if (i >= 6) break; - rko[4] = aes_sub(rko[3]) ^ rki[4]; + rko[4] = __aes_ce_sub(rko[3]) ^ rki[4]; rko[5] = rko[4] ^ rki[5]; rko[6] = rko[5] ^ rki[6]; rko[7] = rko[6] ^ rki[7]; @@ -221,13 +136,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, key_dec[0] = key_enc[j]; for (i = 1, j--; j > 0; i++, j--) - __asm__("ld1 {v0.4s}, %[in] ;" - "aesimc v1.16b, v0.16b ;" - "st1 {v1.4s}, %[out] ;" - - : [out] "=Q"(key_dec[i]) - : [in] "Q"(key_enc[j]) - : "v0","v1"); + __aes_ce_invert(key_dec + i, key_enc + j); key_dec[i] = key_enc[0]; kernel_neon_end(); diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c index 624f4137918c..34b4e3d46aab 100644 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ b/arch/arm64/crypto/crc32-ce-glue.c @@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32", .base.cra_driver_name = "crc32-arm64-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, }, { @@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { { .base.cra_name = "crc32c", .base.cra_driver_name = "crc32c-arm64-ce", .base.cra_priority = 200, + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .base.cra_blocksize = 1, .base.cra_module = THIS_MODULE, } }; diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index efbeb3e0dcfb..656b959bcdaa 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -29,6 +29,14 @@ struct sha1_ce_state { asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, int blocks); +#ifdef CONFIG_CFI_CLANG +static inline void __cfi_sha1_ce_transform(struct sha1_state *sst, + u8 const *src, int blocks) +{ + sha1_ce_transform((struct sha1_ce_state *)sst, src, blocks); +} +#define sha1_ce_transform __cfi_sha1_ce_transform +#endif const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count); const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index fd1ff2b13dfa..ddda74844c43 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -29,6 +29,14 @@ struct sha256_ce_state { asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, int blocks); +#ifdef CONFIG_CFI_CLANG +static inline void __cfi_sha2_ce_transform(struct sha256_state *sst, + u8 const *src, int blocks) +{ + sha2_ce_transform((struct sha256_ce_state *)sst, src, blocks); +} +#define sha2_ce_transform __cfi_sha2_ce_transform +#endif const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, sst.count); diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S new file mode 100644 index 000000000000..b14463438b09 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-core.S @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Author: Eric Biggers + */ + +#include + + .text + + // arguments + ROUND_KEYS .req x0 // const {u64,u32} *round_keys + NROUNDS .req w1 // int nrounds + NROUNDS_X .req x1 + DST .req x2 // void *dst + SRC .req x3 // const void *src + NBYTES .req w4 // unsigned int nbytes + TWEAK .req x5 // void *tweak + + // registers which hold the data being encrypted/decrypted + // (underscores avoid a naming collision with ARM64 registers x0-x3) + X_0 .req v0 + Y_0 .req v1 + X_1 .req v2 + Y_1 .req v3 + X_2 .req v4 + Y_2 .req v5 + X_3 .req v6 + Y_3 .req v7 + + // the round key, duplicated in all lanes + ROUND_KEY .req v8 + + // index vector for tbl-based 8-bit rotates + ROTATE_TABLE .req v9 + ROTATE_TABLE_Q .req q9 + + // temporary registers + TMP0 .req v10 + TMP1 .req v11 + TMP2 .req v12 + TMP3 .req v13 + + // multiplication table for updating XTS tweaks + GFMUL_TABLE .req v14 + GFMUL_TABLE_Q .req q14 + + // next XTS tweak value(s) + TWEAKV_NEXT .req v15 + + // XTS tweaks for the blocks currently being encrypted/decrypted + TWEAKV0 .req v16 + TWEAKV1 .req v17 + TWEAKV2 .req v18 + TWEAKV3 .req v19 + TWEAKV4 .req v20 + TWEAKV5 .req v21 + TWEAKV6 .req v22 + TWEAKV7 .req v23 + + .align 4 +.Lror64_8_table: + .octa 0x080f0e0d0c0b0a090007060504030201 +.Lror32_8_table: + .octa 0x0c0f0e0d080b0a090407060500030201 +.Lrol64_8_table: + .octa 0x0e0d0c0b0a09080f0605040302010007 +.Lrol32_8_table: + .octa 0x0e0d0c0f0a09080b0605040702010003 +.Lgf128mul_table: + .octa 0x00000000000000870000000000000001 +.Lgf64mul_table: + .octa 0x0000000000000000000000002d361b00 + +/* + * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time + * + * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for + * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes + * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. + * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. + */ +.macro _speck_round_128bytes n, lanes + + // x = ror(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b + + // x += y + add X_0.\lanes, X_0.\lanes, Y_0.\lanes + add X_1.\lanes, X_1.\lanes, Y_1.\lanes + add X_2.\lanes, X_2.\lanes, Y_2.\lanes + add X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // y = rol(y, 3) + shl TMP0.\lanes, Y_0.\lanes, #3 + shl TMP1.\lanes, Y_1.\lanes, #3 + shl TMP2.\lanes, Y_2.\lanes, #3 + shl TMP3.\lanes, Y_3.\lanes, #3 + sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) + sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) + sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) + sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) + + // y ^= x + eor Y_0.16b, TMP0.16b, X_0.16b + eor Y_1.16b, TMP1.16b, X_1.16b + eor Y_2.16b, TMP2.16b, X_2.16b + eor Y_3.16b, TMP3.16b, X_3.16b +.endm + +/* + * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time + * + * This is the inverse of _speck_round_128bytes(). + */ +.macro _speck_unround_128bytes n, lanes + + // y ^= x + eor TMP0.16b, Y_0.16b, X_0.16b + eor TMP1.16b, Y_1.16b, X_1.16b + eor TMP2.16b, Y_2.16b, X_2.16b + eor TMP3.16b, Y_3.16b, X_3.16b + + // y = ror(y, 3) + ushr Y_0.\lanes, TMP0.\lanes, #3 + ushr Y_1.\lanes, TMP1.\lanes, #3 + ushr Y_2.\lanes, TMP2.\lanes, #3 + ushr Y_3.\lanes, TMP3.\lanes, #3 + sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) + sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) + sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) + sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // x -= y + sub X_0.\lanes, X_0.\lanes, Y_0.\lanes + sub X_1.\lanes, X_1.\lanes, Y_1.\lanes + sub X_2.\lanes, X_2.\lanes, Y_2.\lanes + sub X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x = rol(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b +.endm + +.macro _next_xts_tweak next, cur, tmp, n +.if \n == 64 + /* + * Calculate the next tweak by multiplying the current one by x, + * modulo p(x) = x^128 + x^7 + x^2 + x + 1. + */ + sshr \tmp\().2d, \cur\().2d, #63 + and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b + shl \next\().2d, \cur\().2d, #1 + ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 + eor \next\().16b, \next\().16b, \tmp\().16b +.else + /* + * Calculate the next two tweaks by multiplying the current ones by x^2, + * modulo p(x) = x^64 + x^4 + x^3 + x + 1. + */ + ushr \tmp\().2d, \cur\().2d, #62 + shl \next\().2d, \cur\().2d, #2 + tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b + eor \next\().16b, \next\().16b, \tmp\().16b +.endif +.endm + +/* + * _speck_xts_crypt() - Speck-XTS encryption/decryption + * + * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer + * using Speck-XTS, specifically the variant with a block size of '2n' and round + * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and + * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a + * nonzero multiple of 128. + */ +.macro _speck_xts_crypt n, lanes, decrypting + + /* + * If decrypting, modify the ROUND_KEYS parameter to point to the last + * round key rather than the first, since for decryption the round keys + * are used in reverse order. + */ +.if \decrypting + mov NROUNDS, NROUNDS /* zero the high 32 bits */ +.if \n == 64 + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 + sub ROUND_KEYS, ROUND_KEYS, #8 +.else + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 + sub ROUND_KEYS, ROUND_KEYS, #4 +.endif +.endif + + // Load the index vector for tbl-based 8-bit rotates +.if \decrypting + ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table +.else + ldr ROTATE_TABLE_Q, .Lror\n\()_8_table +.endif + + // One-time XTS preparation +.if \n == 64 + // Load first tweak + ld1 {TWEAKV0.16b}, [TWEAK] + + // Load GF(2^128) multiplication table + ldr GFMUL_TABLE_Q, .Lgf128mul_table +.else + // Load first tweak + ld1 {TWEAKV0.8b}, [TWEAK] + + // Load GF(2^64) multiplication table + ldr GFMUL_TABLE_Q, .Lgf64mul_table + + // Calculate second tweak, packing it together with the first + ushr TMP0.2d, TWEAKV0.2d, #63 + shl TMP1.2d, TWEAKV0.2d, #1 + tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b + eor TMP0.8b, TMP0.8b, TMP1.8b + mov TWEAKV0.d[1], TMP0.d[0] +.endif + +.Lnext_128bytes_\@: + + // Calculate XTS tweaks for next 128 bytes + _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n + _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n + _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n + _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n + _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n + _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n + _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n + _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n + + // Load the next source blocks into {X,Y}[0-3] + ld1 {X_0.16b-Y_1.16b}, [SRC], #64 + ld1 {X_2.16b-Y_3.16b}, [SRC], #64 + + // XOR the source blocks with their XTS tweaks + eor TMP0.16b, X_0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor TMP1.16b, X_1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor TMP2.16b, X_2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor TMP3.16b, X_3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + + /* + * De-interleave the 'x' and 'y' elements of each block, i.e. make it so + * that the X[0-3] registers contain only the second halves of blocks, + * and the Y[0-3] registers contain only the first halves of blocks. + * (Speck uses the order (y, x) rather than the more intuitive (x, y).) + */ + uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes + uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes + + // Do the cipher rounds + mov x6, ROUND_KEYS + mov w7, NROUNDS +.Lnext_round_\@: +.if \decrypting + ld1r {ROUND_KEY.\lanes}, [x6] + sub x6, x6, #( \n / 8 ) + _speck_unround_128bytes \n, \lanes +.else + ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) + _speck_round_128bytes \n, \lanes +.endif + subs w7, w7, #1 + bne .Lnext_round_\@ + + // Re-interleave the 'x' and 'y' elements of each block + zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes + zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes + zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes + zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes + zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes + zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes + zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes + zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes + + // XOR the encrypted/decrypted blocks with the tweaks calculated earlier + eor X_0.16b, TMP0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor X_1.16b, TMP1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor X_2.16b, TMP2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor X_3.16b, TMP3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + mov TWEAKV0.16b, TWEAKV_NEXT.16b + + // Store the ciphertext in the destination buffer + st1 {X_0.16b-Y_1.16b}, [DST], #64 + st1 {X_2.16b-Y_3.16b}, [DST], #64 + + // Continue if there are more 128-byte chunks remaining + subs NBYTES, NBYTES, #128 + bne .Lnext_128bytes_\@ + + // Store the next tweak and return +.if \n == 64 + st1 {TWEAKV_NEXT.16b}, [TWEAK] +.else + st1 {TWEAKV_NEXT.8b}, [TWEAK] +.endif + ret +.endm + +ENTRY(speck128_xts_encrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=0 +ENDPROC(speck128_xts_encrypt_neon) + +ENTRY(speck128_xts_decrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=1 +ENDPROC(speck128_xts_decrypt_neon) + +ENTRY(speck64_xts_encrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=0 +ENDPROC(speck64_xts_encrypt_neon) + +ENTRY(speck64_xts_decrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=1 +ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c new file mode 100644 index 000000000000..6e233aeb4ff4 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-glue.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * (64-bit version; based on the 32-bit version) + * + * Copyright (c) 2018 Google, Inc + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* The assembly functions only handle multiples of 128 bytes */ +#define SPECK_NEON_CHUNK_SIZE 128 + +/* Speck128 */ + +struct speck128_xts_tfm_ctx { + struct speck128_tfm_ctx main_key; + struct speck128_tfm_ctx tweak_key; +}; + +asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck128_xts_crypt(struct skcipher_request *req, + speck128_crypt_one_t crypt_one, + speck128_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + le128 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + le128_xor((le128 *)dst, (const le128 *)src, &tweak); + (*crypt_one)(&ctx->main_key, dst, dst); + le128_xor((le128 *)dst, (const le128 *)dst, &tweak); + gf128mul_x_ble(&tweak, &tweak); + + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck128_xts_encrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_encrypt, + speck128_xts_encrypt_neon); +} + +static int speck128_xts_decrypt(struct skcipher_request *req) +{ + return __speck128_xts_crypt(req, crypto_speck128_decrypt, + speck128_xts_decrypt_neon); +} + +static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck128_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +/* Speck64 */ + +struct speck64_xts_tfm_ctx { + struct speck64_tfm_ctx main_key; + struct speck64_tfm_ctx tweak_key; +}; + +asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, + speck64_xts_crypt_many_t crypt_many) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + __le64 tweak; + int err; + + err = skcipher_walk_virt(&walk, req, true); + + crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + *(__le64 *)dst = *(__le64 *)src ^ tweak; + (*crypt_one)(&ctx->main_key, dst, dst); + *(__le64 *)dst ^= tweak; + tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ + ((tweak & cpu_to_le64(1ULL << 63)) ? + 0x1B : 0)); + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int speck64_xts_encrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_encrypt, + speck64_xts_encrypt_neon); +} + +static int speck64_xts_decrypt(struct skcipher_request *req) +{ + return __speck64_xts_crypt(req, crypto_speck64_decrypt, + speck64_xts_decrypt_neon); +} + +static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + err = xts_verify_key(tfm, key, keylen); + if (err) + return err; + + keylen /= 2; + + err = crypto_speck64_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +static struct skcipher_alg speck_algs[] = { + { + .base.cra_name = "xts(speck128)", + .base.cra_driver_name = "xts-speck128-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK128_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK128_128_KEY_SIZE, + .max_keysize = 2 * SPECK128_256_KEY_SIZE, + .ivsize = SPECK128_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck128_xts_setkey, + .encrypt = speck128_xts_encrypt, + .decrypt = speck128_xts_decrypt, + }, { + .base.cra_name = "xts(speck64)", + .base.cra_driver_name = "xts-speck64-neon", + .base.cra_priority = 300, + .base.cra_blocksize = SPECK64_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), + .base.cra_alignmask = 7, + .base.cra_module = THIS_MODULE, + .min_keysize = 2 * SPECK64_96_KEY_SIZE, + .max_keysize = 2 * SPECK64_128_KEY_SIZE, + .ivsize = SPECK64_BLOCK_SIZE, + .walksize = SPECK_NEON_CHUNK_SIZE, + .setkey = speck64_xts_setkey, + .encrypt = speck64_xts_encrypt, + .decrypt = speck64_xts_decrypt, + } +}; + +static int __init speck_neon_module_init(void) +{ + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_neon_module_exit(void) +{ + crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_neon_module_init); +module_exit(speck_neon_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("xts(speck128)"); +MODULE_ALIAS_CRYPTO("xts-speck128-neon"); +MODULE_ALIAS_CRYPTO("xts(speck64)"); +MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4a85c6952a22..a91933b1e2e6 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -5,6 +5,8 @@ #include #include +#define ARM64_CB_PATCH ARM64_NCAPS + #ifndef __ASSEMBLY__ #include @@ -12,6 +14,8 @@ #include #include +extern int alternatives_applied; + struct alt_instr { s32 orig_offset; /* offset to original instruction */ s32 alt_offset; /* offset to replacement instruction */ @@ -20,12 +24,19 @@ struct alt_instr { u8 alt_len; /* size of new instruction(s), <= orig_len */ }; +typedef void (*alternative_cb_t)(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); + void __init apply_alternatives_all(void); void apply_alternatives(void *start, size_t length); -#define ALTINSTR_ENTRY(feature) \ +#define ALTINSTR_ENTRY(feature,cb) \ " .word 661b - .\n" /* label */ \ + " .if " __stringify(cb) " == 0\n" \ " .word 663f - .\n" /* new instruction */ \ + " .else\n" \ + " .word " __stringify(cb) "- .\n" /* callback */ \ + " .endif\n" \ " .hword " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* source len */ \ " .byte 664f-663f\n" /* replacement len */ @@ -43,15 +54,18 @@ void apply_alternatives(void *start, size_t length); * but most assemblers die if insn1 or insn2 have a .inst. This should * be fixed in a binutils release posterior to 2.25.51.0.2 (anything * containing commit 4e4d08cf7399b606 or c1baaddf8861). + * + * Alternatives with callbacks do not generate replacement instructions. */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \ ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature) \ + ALTINSTR_ENTRY(feature,cb) \ ".popsection\n" \ + " .if " __stringify(cb) " == 0\n" \ ".pushsection .altinstr_replacement, \"a\"\n" \ "663:\n\t" \ newinstr "\n" \ @@ -59,11 +73,17 @@ void apply_alternatives(void *start, size_t length); ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ ".org . - (662b-661b) + (664b-663b)\n" \ + ".else\n\t" \ + "663:\n\t" \ + "664:\n\t" \ + ".endif\n" \ ".endif\n" #define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0) +#define ALTERNATIVE_CB(oldinstr, cb) \ + __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb) #else #include @@ -130,6 +150,14 @@ void apply_alternatives(void *start, size_t length); 661: .endm +.macro alternative_cb cb + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + .popsection +661: +.endm + /* * Provide the other half of the alternative code sequence. */ @@ -155,6 +183,13 @@ void apply_alternatives(void *start, size_t length); .org . - (662b-661b) + (664b-663b) .endm +/* + * Callback-based alternative epilogue + */ +.macro alternative_cb_end +662: +.endm + /* * Provides a trivial alternative or default sequence consisting solely * of NOPs. The number of NOPs is chosen automatically to match the diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index b3da6c886835..dd49c3567f20 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -13,51 +14,62 @@ #ifdef CONFIG_ARM64_SW_TTBR0_PAN .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir + bic \tmp1, \tmp1, #TTBR_ASID_MASK add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb + sub \tmp1, \tmp1, #SWAPPER_DIR_SIZE + msr ttbr1_el1, \tmp1 // set reserved ASID + isb .endm - .macro __uaccess_ttbr0_enable, tmp1 + .macro __uaccess_ttbr0_enable, tmp1, tmp2 get_thread_info \tmp1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 + mrs \tmp2, ttbr1_el1 + extr \tmp2, \tmp2, \tmp1, #48 + ror \tmp2, \tmp2, #16 + msr ttbr1_el1, \tmp2 // set the active ASID + isb msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 isb .endm - .macro uaccess_ttbr0_disable, tmp1 + .macro uaccess_ttbr0_disable, tmp1, tmp2 alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp2 // avoid preemption __uaccess_ttbr0_disable \tmp1 + restore_irq \tmp2 alternative_else_nop_endif .endm - .macro uaccess_ttbr0_enable, tmp1, tmp2 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 alternative_if_not ARM64_HAS_PAN - save_and_disable_irq \tmp2 // avoid preemption - __uaccess_ttbr0_enable \tmp1 - restore_irq \tmp2 + save_and_disable_irq \tmp3 // avoid preemption + __uaccess_ttbr0_enable \tmp1, \tmp2 + restore_irq \tmp3 alternative_else_nop_endif .endm #else - .macro uaccess_ttbr0_disable, tmp1 + .macro uaccess_ttbr0_disable, tmp1, tmp2 .endm - .macro uaccess_ttbr0_enable, tmp1, tmp2 + .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3 .endm #endif /* * These macros are no-ops when UAO is present. */ - .macro uaccess_disable_not_uao, tmp1 - uaccess_ttbr0_disable \tmp1 + .macro uaccess_disable_not_uao, tmp1, tmp2 + uaccess_ttbr0_disable \tmp1, \tmp2 alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(1) alternative_else_nop_endif .endm - .macro uaccess_enable_not_uao, tmp1, tmp2 - uaccess_ttbr0_enable \tmp1, \tmp2 + .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 + uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 alternative_if ARM64_ALT_PAN_NOT_UAO SET_PSTATE_PAN(0) alternative_else_nop_endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index d58a6253c6ab..66aea4aa455d 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include #include @@ -96,6 +96,24 @@ dmb \opt .endm +/* + * Value prediction barrier + */ + .macro csdb + hint #20 + .endm + +/* + * Sanitise a 64-bit bounded index wrt speculation, returning zero if out + * of bounds. + */ + .macro mask_nospec64, idx, limit, tmp + sub \tmp, \idx, \limit + bic \tmp, \tmp, \idx + and \idx, \idx, \tmp, asr #63 + csdb + .endm + /* * NOP sequence */ @@ -242,7 +260,11 @@ lr .req x30 // link register #else adr_l \dst, \sym #endif +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \tmp, tpidr_el1 +alternative_else + mrs \tmp, tpidr_el2 +alternative_endif add \dst, \dst, \tmp .endm @@ -253,7 +275,11 @@ lr .req x30 // link register */ .macro ldr_this_cpu dst, sym, tmp adr_l \dst, \sym +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \tmp, tpidr_el1 +alternative_else + mrs \tmp, tpidr_el2 +alternative_endif ldr \dst, [\dst, \tmp] .endm @@ -464,39 +490,57 @@ alternative_endif mrs \rd, sp_el0 .endm -/* - * Errata workaround prior to TTBR0_EL1 update - * - * val: TTBR value with new BADDR, preserved - * tmp0: temporary register, clobbered - * tmp1: other temporary register, clobbered +/** + * Errata workaround prior to disable MMU. Insert an ISB immediately prior + * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. */ - .macro pre_ttbr0_update_workaround, val, tmp0, tmp1 -#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 -alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 - mrs \tmp0, ttbr0_el1 - mov \tmp1, #FALKOR_RESERVED_ASID - bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR - msr ttbr0_el1, \tmp0 + .macro pre_disable_mmu_workaround +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 isb - bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR - msr ttbr0_el1, \tmp0 - isb -alternative_else_nop_endif #endif .endm + .macro pte_to_phys, phys, pte + and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) + .endm + /* - * Errata workaround post TTBR0_EL1 update. + * Check the MIDR_EL1 of the current CPU for a given model and a range of + * variant/revision. See asm/cputype.h for the macros used below. + * + * model: MIDR_CPU_MODEL of CPU + * rv_min: Minimum of MIDR_CPU_VAR_REV() + * rv_max: Maximum of MIDR_CPU_VAR_REV() + * res: Result register. + * tmp1, tmp2, tmp3: Temporary registers + * + * Corrupts: res, tmp1, tmp2, tmp3 + * Returns: 0, if the CPU id doesn't match. Non-zero otherwise */ - .macro post_ttbr0_update_workaround -#ifdef CONFIG_CAVIUM_ERRATUM_27456 -alternative_if ARM64_WORKAROUND_CAVIUM_27456 - ic iallu - dsb nsh - isb -alternative_else_nop_endif -#endif + .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3 + mrs \res, midr_el1 + mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK) + mov_q \tmp2, MIDR_CPU_MODEL_MASK + and \tmp3, \res, \tmp2 // Extract model + and \tmp1, \res, \tmp1 // rev & variant + mov_q \tmp2, \model + cmp \tmp3, \tmp2 + cset \res, eq + cbz \res, .Ldone\@ // Model matches ? + + .if (\rv_min != 0) // Skip min check if rv_min == 0 + mov_q \tmp3, \rv_min + cmp \tmp1, \tmp3 + cset \res, ge + .endif // \rv_min != 0 + /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */ + .if ((\rv_min != \rv_max) || \rv_min == 0) + mov_q \tmp2, \rv_max + cmp \tmp1, \tmp2 + cset \tmp2, le + and \res, \res, \tmp2 + .endif +.Ldone\@: .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 9ef0797380cb..f9b0b09153e0 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v) /* LSE atomics */ " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]") - : [i] "+r" (w0), [v] "+Q" (v->counter) + : [i] "+&r" (w0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ /* LSE atomics */ \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ + : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v) /* LSE atomics */ " neg %w[i], %w[i]\n" " stadd %w[i], %[v]") - : [i] "+r" (w0), [v] "+Q" (v->counter) + : [i] "+&r" (w0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], w30, %[v]\n" \ " add %w[i], %w[i], w30") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ + : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS , ##cl); \ \ @@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ /* LSE atomics */ \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ + : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v) /* LSE atomics */ " mvn %[i], %[i]\n" " stclr %[i], %[v]") - : [i] "+r" (x0), [v] "+Q" (v->counter) + : [i] "+&r" (x0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ /* LSE atomics */ \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ + : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) /* LSE atomics */ " neg %[i], %[i]\n" " stadd %[i], %[v]") - : [i] "+r" (x0), [v] "+Q" (v->counter) + : [i] "+&r" (x0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], x30, %[v]\n" \ " add %[i], %[i], x30") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ + : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ /* LSE atomics */ \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ + : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) " sub x30, x30, %[ret]\n" " cbnz x30, 1b\n" "2:") - : [ret] "+r" (x0), [v] "+Q" (v->counter) + : [ret] "+&r" (x0), [v] "+Q" (v->counter) : : __LL_SC_CLOBBERS, "cc", "memory"); @@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ " orr %[old1], %[old1], %[old2]") \ - : [old1] "+r" (x0), [old2] "+r" (x1), \ + : [old1] "+&r" (x0), [old2] "+&r" (x1), \ [v] "+Q" (*(unsigned long *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0fe7e43b7fbc..0b0755c961ac 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -31,6 +31,8 @@ #define dmb(opt) asm volatile("dmb " #opt : : : "memory") #define dsb(opt) asm volatile("dsb " #opt : : : "memory") +#define csdb() asm volatile("hint #20" : : : "memory") + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) @@ -38,6 +40,27 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) +/* + * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz + * and 0 otherwise. + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + " cmp %1, %2\n" + " sbc %0, xzr, xzr\n" + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + csdb(); + return mask; +} + #define __smp_mb() dmb(ish) #define __smp_rmb() dmb(ishld) #define __smp_wmb() dmb(ishst) diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index ae852add053d..0f2e1ab5e166 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -229,7 +229,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ + " sevl\n" \ + " wfe\n" \ + " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ " cbnz %" #w "[tmp], 1f\n" \ " wfe\n" \ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index e39d487bf724..a3c7f271ad4c 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -215,7 +215,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8da621627d7c..76c0d23ca161 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -40,7 +40,11 @@ #define ARM64_WORKAROUND_858921 19 #define ARM64_WORKAROUND_CAVIUM_30115 20 #define ARM64_HAS_DCPOP 21 +#define ARM64_UNMAP_KERNEL_AT_EL0 23 +#define ARM64_HARDEN_BRANCH_PREDICTOR 24 +#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 +#define ARM64_SSBD 26 -#define ARM64_NCAPS 22 +#define ARM64_NCAPS 27 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 428ee1f2468c..c5bc80a03515 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -262,6 +262,28 @@ static inline bool system_uses_ttbr0_pan(void) !cpus_have_const_cap(ARM64_HAS_PAN); } +#define ARM64_SSBD_UNKNOWN -1 +#define ARM64_SSBD_FORCE_DISABLE 0 +#define ARM64_SSBD_KERNEL 1 +#define ARM64_SSBD_FORCE_ENABLE 2 +#define ARM64_SSBD_MITIGATED 3 + +static inline int arm64_get_ssbd_state(void) +{ +#ifdef CONFIG_ARM64_SSBD + extern int ssbd_state; + return ssbd_state; +#else + return ARM64_SSBD_UNKNOWN; +#endif +} + +#ifdef CONFIG_ARM64_SSBD +void arm64_set_ssbd_mitigation(bool state); +#else +static inline void arm64_set_ssbd_mitigation(bool state) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 235e77d98261..04569aa267fd 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -75,30 +75,49 @@ #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 +#define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 +#define ARM_CPU_PART_CORTEX_A55 0xD05 #define ARM_CPU_PART_CORTEX_A57 0xD07 +#define ARM_CPU_PART_CORTEX_A72 0xD08 #define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A73 0xD09 +#define ARM_CPU_PART_CORTEX_A75 0xD0A #define APM_CPU_PART_POTENZA 0x000 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 #define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3 +#define CAVIUM_CPU_PART_THUNDERX2 0x0AF #define BRCM_CPU_PART_VULCAN 0x516 #define QCOM_CPU_PART_FALKOR_V1 0x800 +#define QCOM_CPU_PART_FALKOR 0xC00 +#define QCOM_CPU_PART_KRYO 0x200 + +#define NVIDIA_CPU_PART_DENVER 0x003 +#define NVIDIA_CPU_PART_CARMEL 0x004 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) +#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) +#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) +#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2) +#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) +#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) +#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) +#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) +#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 650344d01124..8389050328bb 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -121,22 +121,22 @@ static inline void efi_set_pgd(struct mm_struct *mm) if (mm != current->active_mm) { /* * Update the current thread's saved ttbr0 since it is - * restored as part of a return from exception. Set - * the hardware TTBR0_EL1 using cpu_switch_mm() - * directly to enable potential errata workarounds. + * restored as part of a return from exception. Enable + * access to the valid TTBR0_EL1 and invoke the errata + * workaround directly since there is no return from + * exception when invoking the EFI run-time services. */ update_saved_ttbr0(current, mm); - cpu_switch_mm(mm->pgd, mm); + uaccess_ttbr0_enable(); + post_ttbr_update_workaround(); } else { /* * Defer the switch to the current thread's TTBR0_EL1 * until uaccess_enable(). Restore the current * thread's saved ttbr0 corresponding to its active_mm - * (if different from init_mm). */ - cpu_set_reserved_ttbr0(); - if (current->active_mm != &init_mm) - update_saved_ttbr0(current, current->active_mm); + uaccess_ttbr0_disable(); + update_saved_ttbr0(current, current->active_mm); } } } diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 33be513ef24c..36d9863cb3cb 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -173,7 +173,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ -#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL +#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index caf86be815ba..ec1e6d6fa14c 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -51,6 +51,18 @@ enum fixed_addresses { FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, + +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, +#endif /* CONFIG_ACPI_APEI_GHES */ + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_DATA, + FIX_ENTRY_TRAMP_TEXT, +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, /* diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 5bb2fd4674e7..07fe2479d310 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -48,9 +48,10 @@ do { \ } while (0) static inline int -arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp; + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); pagefault_disable(); @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, u32 oldval, u32 newval) { int ret = 0; u32 val, tmp; + u32 __user *uaddr; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) return -EFAULT; + uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" " prfm pstl1strm, %2\n" diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 61d694c2eae5..555d463c0eaa 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -170,8 +170,7 @@ #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 26a64d0f9ab9..1a6d02350fc6 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -33,6 +33,10 @@ #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0 +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) + +/* Translate a kernel address of @sym into its equivalent linear mapping */ #define kvm_ksym_ref(sym) \ ({ \ void *val = &sym; \ @@ -66,6 +70,45 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); +extern void __qcom_hyp_sanitize_btac_predictors(void); + +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ +#define __hyp_this_cpu_ptr(sym) \ + ({ \ + void *__ptr = hyp_symbol_addr(sym); \ + __ptr += read_sysreg(tpidr_el2); \ + (typeof(&sym))__ptr; \ + }) + +#define __hyp_this_cpu_read(sym) \ + ({ \ + *__hyp_this_cpu_ptr(sym); \ + }) + +#else /* __ASSEMBLY__ */ + +.macro hyp_adr_this_cpu reg, sym, tmp + adr_l \reg, \sym + mrs \tmp, tpidr_el2 + add \reg, \reg, \tmp +.endm + +.macro hyp_ldr_this_cpu reg, sym, tmp + adr_l \reg, \sym + mrs \tmp, tpidr_el2 + ldr \reg, [\reg, \tmp] +.endm + +.macro get_host_ctxt reg, tmp + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp +.endm + +.macro get_vcpu_ptr vcpu, ctxt + get_host_ctxt \ctxt, \vcpu + ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] + kern_hyp_va \vcpu +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e923b58606e2..b01ad3489bd8 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -71,6 +71,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; + + /* Mandated version of PSCI */ + u32 psci_version; }; #define KVM_NR_MEM_OBJS 40 @@ -191,6 +194,8 @@ struct kvm_cpu_context { u64 sys_regs[NR_SYS_REGS]; u32 copro[NR_COPRO_REGS]; }; + + struct kvm_vcpu *__hyp_running_vcpu; }; typedef struct kvm_cpu_context kvm_cpu_context_t; @@ -205,6 +210,9 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; + /* State of various workarounds, see kvm_asm.h for bit assignment */ + u64 workaround_flags; + /* Guest debug state */ u64 debug_flags; @@ -345,10 +353,15 @@ int kvm_perf_teardown(void); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); +void __kvm_set_tpidr_el2(u64 tpidr_el2); +DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); + static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) { + u64 tpidr_el2; + /* * Call initialization code, and switch to the full blown HYP code. * If the cpucaps haven't been finalized yet, something has gone very @@ -357,6 +370,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, */ BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); + + /* + * Calculate the raw per-cpu offset without a translation from the + * kernel's mapping to the linear mapping, and store it in tpidr_el2 + * so that we can use adr_l to access per-cpu variables in EL2. + */ + tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state) + - (u64)kvm_ksym_ref(kvm_host_cpu_state); + + kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); } static inline void kvm_arch_hardware_unsetup(void) {} @@ -384,4 +407,32 @@ static inline void __cpu_init_stage2(void) "PARange is %d bits, unsupported configuration!", parange); } +static inline bool kvm_arm_harden_branch_predictor(void) +{ + return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); +} + +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_DISABLE: + return KVM_SSBD_FORCE_DISABLE; + case ARM64_SSBD_KERNEL: + return KVM_SSBD_KERNEL; + case ARM64_SSBD_FORCE_ENABLE: + return KVM_SSBD_FORCE_ENABLE; + case ARM64_SSBD_MITIGATED: + return KVM_SSBD_MITIGATED; + case ARM64_SSBD_UNKNOWN: + default: + return KVM_SSBD_UNKNOWN; + } +} + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4572a9b560fa..20bfb8e676e0 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -29,7 +29,9 @@ ({ \ u64 reg; \ asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ - "mrs_s %0, " __stringify(r##vh),\ + DEFINE_MRS_S \ + "mrs_s %0, " __stringify(r##vh) "\n"\ + UNDEFINE_MRS_S, \ ARM64_HAS_VIRT_HOST_EXTN) \ : "=r" (reg)); \ reg; \ @@ -39,7 +41,9 @@ do { \ u64 __val = (u64)(v); \ asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ - "msr_s " __stringify(r##vh) ", %x0",\ + DEFINE_MSR_S \ + "msr_s " __stringify(r##vh) ", %x0\n"\ + UNDEFINE_MSR_S, \ ARM64_HAS_VIRT_HOST_EXTN) \ : : "rZ" (__val)); \ } while (0) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 672c8684d5c2..e42c1f0ae6cf 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) +/* + * Obtain the PC-relative address of a kernel symbol + * s: symbol + * + * The goal of this macro is to return a symbol's address based on a + * PC-relative computation, as opposed to a loading the VA from a + * constant pool or something similar. This works well for HYP, as an + * absolute VA is guaranteed to be wrong. Only use this if trying to + * obtain the address of a symbol (i.e. not something you obtained by + * following a pointer). + */ +#define hyp_symbol_addr(s) \ + ({ \ + typeof(s) *addr; \ + asm("adrp %0, %1\n" \ + "add %0, %0, :lo12:%1\n" \ + : "=r" (addr) : "S" (&s)); \ + addr; \ + }) + /* * We currently only support a 40bit IPA. */ @@ -309,5 +329,83 @@ static inline unsigned int kvm_get_vmid_bits(void) return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; } +/* + * We are not in the kvm->srcu critical section most of the time, so we take + * the SRCU read lock here. Since we copy the data from the user page, we + * can immediately drop the lock again. + */ +static inline int kvm_read_guest_lock(struct kvm *kvm, + gpa_t gpa, void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_read_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include + +static inline void *kvm_get_hyp_vector(void) +{ + struct bp_hardening_data *data = arm64_get_bp_hardening_data(); + void *vect = kvm_ksym_ref(__kvm_hyp_vector); + + if (data->fn) { + vect = __bp_harden_hyp_vecs_start + + data->hyp_vectors_slot * SZ_2K; + + if (!has_vhe()) + vect = lm_alias(vect); + } + + return vect; +} + +static inline int kvm_map_vectors(void) +{ + return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start), + kvm_ksym_ref(__bp_harden_hyp_vecs_end), + PAGE_HYP_EXEC); +} + +#else +static inline void *kvm_get_hyp_vector(void) +{ + return kvm_ksym_ref(__kvm_hyp_vector); +} + +static inline int kvm_map_vectors(void) +{ + return 0; +} +#endif + +#ifdef CONFIG_ARM64_SSBD +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +static inline int hyp_map_aux_data(void) +{ + int cpu, err; + + for_each_possible_cpu(cpu) { + u64 *ptr; + + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu); + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP); + if (err) + return err; + } + return 0; +} +#else +static inline int hyp_map_aux_data(void) +{ + return 0; +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h deleted file mode 100644 index bc39e557c56c..000000000000 --- a/arch/arm64/include/asm/kvm_psci.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012,2013 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM64_KVM_PSCI_H__ -#define __ARM64_KVM_PSCI_H__ - -#define KVM_ARM_PSCI_0_1 1 -#define KVM_ARM_PSCI_0_2 2 - -int kvm_psci_version(struct kvm_vcpu *vcpu); -int kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM64_KVM_PSCI_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f7c4d2146aed..d4bae7d6e0d8 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -61,8 +61,6 @@ * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. - * TASK_SIZE - the maximum size of a user space task. - * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_START (UL(0xffffffffffffffff) - \ @@ -77,19 +75,6 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) -#define TASK_SIZE_64 (UL(1) << VA_BITS) - -#ifdef CONFIG_COMPAT -#define TASK_SIZE_32 UL(0x100000000) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) -#else -#define TASK_SIZE TASK_SIZE_64 -#endif /* CONFIG_COMPAT */ - -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) #define KERNEL_START _text #define KERNEL_END _end diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 0d34bf0a89c7..6dd83d75b82a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,6 +17,10 @@ #define __ASM_MMU_H #define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ +#define USER_ASID_FLAG (UL(1) << 48) +#define TTBR_ASID_MASK (UL(0xffff) << 48) + +#ifndef __ASSEMBLY__ typedef struct { atomic64_t id; @@ -31,6 +35,49 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) +static inline bool arm64_kernel_unmapped_at_el0(void) +{ + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && + cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); +} + +typedef void (*bp_hardening_cb_t)(void); + +struct bp_hardening_data { + int hyp_vectors_slot; + bp_hardening_cb_t fn; +}; + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[]; + +DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) +{ + return this_cpu_ptr(&bp_hardening_data); +} + +static inline void arm64_apply_bp_hardening(void) +{ + struct bp_hardening_data *d; + + if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) + return; + + d = arm64_get_bp_hardening_data(); + if (d->fn) + d->fn(); +} +#else +static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) +{ + return NULL; +} + +static inline void arm64_apply_bp_hardening(void) { } +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ + extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); @@ -41,4 +88,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys); extern void mark_linear_text_alias_ro(void); +#endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 3257895a9b5e..f7ff06580721 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -19,8 +19,6 @@ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H -#define FALKOR_RESERVED_ASID 1 - #ifndef __ASSEMBLY__ #include @@ -57,6 +55,13 @@ static inline void cpu_set_reserved_ttbr0(void) isb(); } +static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) +{ + BUG_ON(pgd == swapper_pg_dir); + cpu_set_reserved_ttbr0(); + cpu_do_switch_mm(virt_to_phys(pgd),mm); +} + /* * TCR.T0SZ value to use when the ID map is active. Usually equals * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in @@ -127,7 +132,7 @@ static inline void cpu_install_idmap(void) * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ -static inline void cpu_replace_ttbr1(pgd_t *pgd) +static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgd) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; @@ -156,29 +161,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) -/* - * This is called when "tsk" is about to enter lazy TLB mode. - * - * mm: describes the currently active mm context - * tsk: task which is entering lazy tlb - * cpu: cpu number which is entering lazy tlb - * - * tsk->mm will be NULL - */ -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { - if (system_uses_ttbr0_pan()) { - BUG_ON(mm->pgd == swapper_pg_dir); - task_thread_info(tsk)->ttbr0 = - virt_to_phys(mm->pgd) | ASID(mm) << 48; - } + u64 ttbr; + + if (!system_uses_ttbr0_pan()) + return; + + if (mm == &init_mm) + ttbr = __pa_symbol(empty_zero_page); + else + ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; + + WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } #else static inline void update_saved_ttbr0(struct task_struct *tsk, @@ -187,6 +184,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, } #endif +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + /* + * We don't actually care about the ttbr0 mapping, so point it at the + * zero page. + */ + update_saved_ttbr0(tsk, &init_mm); +} + static inline void __switch_mm(struct mm_struct *next) { unsigned int cpu = smp_processor_id(); @@ -214,17 +221,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the * ASID has changed since the last run (following the context switch - * of another thread of the same process). Avoid setting the reserved - * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). + * of another thread of the same process). */ - if (next != &init_mm) - update_saved_ttbr0(tsk, next); + update_saved_ttbr0(tsk, next); } #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(prev,next) switch_mm(prev, next, current) void verify_cpu_asid_bits(void); +void post_ttbr_update_workaround(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index 19bd97671bb8..4f766178fa6f 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -32,7 +32,7 @@ struct mod_arch_specific { struct mod_plt_sec init; /* for CONFIG_DYNAMIC_FTRACE */ - void *ftrace_trampoline; + struct plt_entry *ftrace_trampoline; }; #endif @@ -45,4 +45,48 @@ extern u64 module_alloc_base; #define module_alloc_base ((u64)_etext - MODULES_VSIZE) #endif +struct plt_entry { + /* + * A program that conforms to the AArch64 Procedure Call Standard + * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or + * IP1 (x17) may be inserted at any branch instruction that is + * exposed to a relocation that supports long branches. Since that + * is exactly what we are dealing with here, we are free to use x16 + * as a scratch register in the PLT veneers. + */ + __le32 mov0; /* movn x16, #0x.... */ + __le32 mov1; /* movk x16, #0x...., lsl #16 */ + __le32 mov2; /* movk x16, #0x...., lsl #32 */ + __le32 br; /* br x16 */ +}; + +static inline struct plt_entry get_plt_entry(u64 val) +{ + /* + * MOVK/MOVN/MOVZ opcode: + * +--------+------------+--------+-----------+-------------+---------+ + * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | + * +--------+------------+--------+-----------+-------------+---------+ + * + * Rd := 0x10 (x16) + * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) + * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) + * sf := 1 (64-bit variant) + */ + return (struct plt_entry){ + cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), + cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), + cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), + cpu_to_le32(0xd61f0200) + }; +} + +static inline bool plt_entries_equal(const struct plt_entry *a, + const struct plt_entry *b) +{ + return a->mov0 == b->mov0 && + a->mov1 == b->mov1 && + a->mov2 == b->mov2; +} + #endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 3bd498e4de4c..43393208229e 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,11 +16,15 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#include #include static inline void set_my_cpu_offset(unsigned long off) { - asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); + asm volatile(ALTERNATIVE("msr tpidr_el1, %0", + "msr tpidr_el2, %0", + ARM64_HAS_VIRT_HOST_EXTN) + :: "r" (off) : "memory"); } static inline unsigned long __my_cpu_offset(void) @@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void) * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ - asm("mrs %0, tpidr_el1" : "=r" (off) : + asm(ALTERNATIVE("mrs %0, tpidr_el1", + "mrs %0, tpidr_el2", + ARM64_HAS_VIRT_HOST_EXTN) + : "=r" (off) : "Q" (*(const unsigned long *)current_stack_pointer)); return off; diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index d25f4f137c2a..5ca6a573a701 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -26,7 +26,7 @@ #define check_pgt_cache() do { } while (0) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) #if CONFIG_PGTABLE_LEVELS > 2 diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index eb0c2bd90de9..8df4cb6ac6f7 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -272,6 +272,7 @@ #define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT) #define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT) +#define TCR_A1 (UL(1) << 22) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) #define TCR_HA (UL(1) << 39) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 0a5635fb0ef9..2db84df5eb42 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -34,8 +34,14 @@ #include -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) + +#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) +#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) + +#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) +#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) @@ -47,23 +53,24 @@ #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) +#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT -#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) +#define PAGE_KERNEL __pgprot(PROT_NORMAL) +#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) +#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) +#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) -#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) -#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) -#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) +#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) +#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) +#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) -#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b46e54c2399b..aafea648a30f 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending @@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_accessible(mm, pte) \ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) +/* + * p??_access_permitted() is true for valid user mappings (subject to the + * write permission check) other than user execute-only which do not have the + * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. + */ +#define pte_access_permitted(pte, write) \ + (pte_valid_user(pte) && (!(write) || pte_write(pte))) +#define pmd_access_permitted(pmd, write) \ + (pte_access_permitted(pmd_pte(pmd), (write))) +#define pud_access_permitted(pud, write) \ + (pte_access_permitted(pud_pte(pud), (write))) + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) { pte_val(pte) &= ~pgprot_val(prot); @@ -135,12 +149,20 @@ static inline pte_t pte_mkwrite(pte_t pte) static inline pte_t pte_mkclean(pte_t pte) { - return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); + + return pte; } static inline pte_t pte_mkdirty(pte_t pte) { - return set_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); + + if (pte_write(pte)) + pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); + + return pte; } static inline pte_t pte_mkold(pte_t pte) @@ -628,28 +650,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * ptep_set_wrprotect - mark read-only while preserving the hardware update of - * the Access Flag. + * ptep_set_wrprotect - mark read-only while trasferring potential hardware + * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. */ #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte, pte; - /* - * ptep_set_wrprotect() is only called on CoW mappings which are - * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE && - * PTE_RDONLY) or writable and software-dirty (PTE_WRITE && - * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and - * protection_map[]. There is no race with the hardware update of the - * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM) - * is set. - */ - VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep), - "%s: potential race with hardware DBM", __func__); pte = READ_ONCE(*ptep); do { old_pte = pte; + /* + * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY + * clear), set the PTE_DIRTY bit. + */ + if (pte_hw_dirty(pte)) + pte = pte_mkdirty(pte); pte = pte_wrprotect(pte); pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), pte_val(old_pte), pte_val(pte)); @@ -667,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; +extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 14ad6e4e87d1..16cef2e8449e 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include -#define cpu_switch_mm(pgd,mm) \ -do { \ - BUG_ON(pgd == swapper_pg_dir); \ - cpu_do_switch_mm(virt_to_phys(pgd),mm); \ -} while (0) - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 29adab8138c3..fda6f5812281 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -19,6 +19,13 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H +#define TASK_SIZE_64 (UL(1) << VA_BITS) + +#define KERNEL_DS UL(-1) +#define USER_DS (TASK_SIZE_64 - 1) + +#ifndef __ASSEMBLY__ + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -37,6 +44,22 @@ #include #include +/* + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. + */ +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 UL(0x100000000) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif /* CONFIG_COMPAT */ + +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) + #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 @@ -194,4 +217,5 @@ static inline void spin_lock_prefetch(const void *ptr) int cpu_enable_pan(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +#endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 95ad7102b63c..82375b896be5 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -89,8 +89,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) " cbnz %w1, 1f\n" " add %w1, %w0, %3\n" " casa %w0, %w1, %2\n" - " and %w1, %w1, #0xffff\n" - " eor %w1, %w1, %w0, lsr #16\n" + " sub %w1, %w1, %3\n" + " eor %w1, %w1, %w0\n" "1:") : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) : "I" (1 << TICKET_SHIFT) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 6ad30776e984..99390755c0c4 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -27,7 +27,7 @@ struct stackframe { unsigned long fp; unsigned long pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - unsigned int graph; + int graph; #endif }; diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f707fed5886f..3bdec2f5cbb4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -332,6 +332,8 @@ #define ID_AA64ISAR1_DPB_SHIFT 0 /* id_aa64pfr0 */ +#define ID_AA64PFR0_CSV3_SHIFT 60 +#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_FP_SHIFT 16 @@ -463,20 +465,39 @@ #include -asm( -" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" -" .equ .L__reg_num_x\\num, \\num\n" -" .endr\n" +#define __DEFINE_MRS_MSR_S_REGNUM \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ +" .equ .L__reg_num_x\\num, \\num\n" \ +" .endr\n" \ " .equ .L__reg_num_xzr, 31\n" -"\n" -" .macro mrs_s, rt, sreg\n" - __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) + +#define DEFINE_MRS_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro mrs_s, rt, sreg\n" \ +" .inst 0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n" \ " .endm\n" -"\n" -" .macro msr_s, sreg, rt\n" - __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) + +#define DEFINE_MSR_S \ + __DEFINE_MRS_MSR_S_REGNUM \ +" .macro msr_s, sreg, rt\n" \ +" .inst 0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n" \ " .endm\n" -); + +#define UNDEFINE_MRS_S \ +" .purgem mrs_s\n" + +#define UNDEFINE_MSR_S \ +" .purgem msr_s\n" + +#define __mrs_s(r, v) \ + DEFINE_MRS_S \ +" mrs_s %0, " __stringify(r) "\n" \ + UNDEFINE_MRS_S : "=r" (v) + +#define __msr_s(r, v) \ + DEFINE_MSR_S \ +" msr_s " __stringify(r) ", %x0\n" \ + UNDEFINE_MSR_S : : "rZ" (v) /* * Unlike read_cpuid, calls to read_sysreg are never expected to be @@ -502,15 +523,15 @@ asm( * For registers without architectural names, or simply unsupported by * GAS. */ -#define read_sysreg_s(r) ({ \ - u64 __val; \ - asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \ - __val; \ +#define read_sysreg_s(r) ({ \ + u64 __val; \ + asm volatile(__mrs_s(r, __val)); \ + __val; \ }) -#define write_sysreg_s(v, r) do { \ - u64 __val = (u64)(v); \ - asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ +#define write_sysreg_s(v, r) do { \ + u64 __val = (u64)(v); \ + asm volatile(__msr_s(r, __val)); \ } while (0) static inline void config_sctlr_el1(u32 clear, u32 set) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index ddded6497a8a..fc786d344e46 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -92,6 +92,7 @@ void arch_setup_new_exec(void); #define TIF_RESTORE_SIGMASK 20 #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ +#define TIF_SSBD 23 /* Wants SSB mitigation */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index af1c76981911..9e82dd79c7db 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -23,6 +23,7 @@ #include #include +#include /* * Raw TLBI operations. @@ -54,6 +55,11 @@ #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) +#define __tlbi_user(op, arg) do { \ + if (arm64_kernel_unmapped_at_el0()) \ + __tlbi(op, (arg) | USER_ASID_FLAG); \ +} while (0) + /* * TLB Management * ============== @@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) dsb(ishst); __tlbi(aside1is, asid); + __tlbi_user(aside1is, asid); dsb(ish); } @@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, dsb(ishst); __tlbi(vale1is, addr); + __tlbi_user(vale1is, addr); dsb(ish); } @@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { - if (last_level) + if (last_level) { __tlbi(vale1is, addr); - else + __tlbi_user(vale1is, addr); + } else { __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); + } } dsb(ish); } @@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); __tlbi(vae1is, addr); + __tlbi_user(vae1is, addr); dsb(ish); } diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index b3202284568b..b7bd70520a77 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -33,6 +33,20 @@ int pcibus_to_node(struct pci_bus *bus); #endif /* CONFIG_NUMA */ +#include + +/* Replace task scheduler's default frequency-invariant accounting */ +#define arch_scale_freq_capacity topology_get_freq_scale + +/* Replace task scheduler's default max-frequency-invariant accounting */ +#define arch_scale_max_freq_capacity topology_get_max_freq_scale + +/* Replace task scheduler's default cpu-invariant accounting */ +#define arch_scale_cpu_capacity topology_get_cpu_scale + +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + #include #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index fc0f9eb66039..fad8c1b2ca3e 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -35,16 +35,20 @@ #include #include -#define KERNEL_DS (-1UL) #define get_ds() (KERNEL_DS) - -#define USER_DS TASK_SIZE_64 #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; + /* + * Prevent a mispredicted conditional call to set_fs from forwarding + * the wrong address limit to access_ok under speculation. + */ + dsb(nsh); + isb(); + /* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); @@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs) * Returns 1 if the range is valid, 0 otherwise. * * This is equivalent to the following test: - * (u65)addr + (u65)size <= current->addr_limit - * - * This needs 65-bit arithmetic. + * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 */ -#define __range_ok(addr, size) \ -({ \ - unsigned long __addr = (unsigned long)(addr); \ - unsigned long flag, roksum; \ - __chk_user_ptr(addr); \ - asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ - : "=&r" (flag), "=&r" (roksum) \ - : "1" (__addr), "Ir" (size), \ - "r" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) +static inline unsigned long __range_ok(unsigned long addr, unsigned long size) +{ + unsigned long limit = current_thread_info()->addr_limit; + + __chk_user_ptr(addr); + asm volatile( + // A + B <= C + 1 for all A,B,C, in four easy steps: + // 1: X = A + B; X' = X % 2^64 + " adds %0, %0, %2\n" + // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 + " csel %1, xzr, %1, hi\n" + // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' + // to compensate for the carry flag being set in step 4. For + // X > 2^64, X' merely has to remain nonzero, which it does. + " csinv %0, %0, xzr, cc\n" + // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 + // comes from the carry in being clear. Otherwise, we are + // testing X' - C == 0, subject to the previous adjustments. + " sbcs xzr, %0, %1\n" + " cset %0, ls\n" + : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); + + return addr; +} /* * When dealing with data aborts, watchpoints, or instruction traps we may end @@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs) */ #define untagged_addr(addr) sign_extend64(addr, 55) -#define access_ok(type, addr, size) __range_ok(addr, size) +#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) #define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ @@ -105,17 +119,23 @@ static inline void set_fs(mm_segment_t fs) #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void __uaccess_ttbr0_disable(void) { - unsigned long ttbr; + unsigned long flags, ttbr; + local_irq_save(flags); + ttbr = read_sysreg(ttbr1_el1); + ttbr &= ~TTBR_ASID_MASK; /* reserved_ttbr0 placed at the end of swapper_pg_dir */ - ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; - write_sysreg(ttbr, ttbr0_el1); + write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); isb(); + /* Set reserved ASID */ + write_sysreg(ttbr, ttbr1_el1); + isb(); + local_irq_restore(flags); } static inline void __uaccess_ttbr0_enable(void) { - unsigned long flags; + unsigned long flags, ttbr0, ttbr1; /* * Disable interrupts to avoid preemption between reading the 'ttbr0' @@ -123,7 +143,17 @@ static inline void __uaccess_ttbr0_enable(void) * roll-over and an update of 'ttbr0'. */ local_irq_save(flags); - write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); + ttbr0 = READ_ONCE(current_thread_info()->ttbr0); + + /* Restore active ASID */ + ttbr1 = read_sysreg(ttbr1_el1); + ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ + ttbr1 |= ttbr0 & TTBR_ASID_MASK; + write_sysreg(ttbr1, ttbr1_el1); + isb(); + + /* Restore user page table */ + write_sysreg(ttbr0, ttbr0_el1); isb(); local_irq_restore(flags); } @@ -192,6 +222,26 @@ static inline void uaccess_enable_not_uao(void) __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); } +/* + * Sanitise a uaccess pointer such that it becomes NULL if above the + * current addr_limit. + */ +#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) +static inline void __user *__uaccess_mask_ptr(const void __user *ptr) +{ + void __user *safe_ptr; + + asm volatile( + " bics xzr, %1, %2\n" + " csel %0, %1, xzr, eq\n" + : "=&r" (safe_ptr) + : "r" (ptr), "r" (current_thread_info()->addr_limit) + : "cc"); + + csdb(); + return safe_ptr; +} + /* * The "__xxx" versions of the user access functions do not verify the address * space - it must have been done previously with a separate "access_ok()" @@ -244,28 +294,33 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user(x, ptr) \ +#define __get_user_check(x, ptr, err) \ ({ \ - int __gu_err = 0; \ - __get_user_err((x), (ptr), __gu_err); \ - __gu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __get_user_err((x), __p, (err)); \ + } else { \ + (x) = 0; (err) = -EFAULT; \ + } \ }) #define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x), (ptr), (err)); \ + __get_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define get_user(x, ptr) \ +#define __get_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ - __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + int __gu_err = 0; \ + __get_user_check((x), (ptr), __gu_err); \ + __gu_err; \ }) +#define get_user __get_user + #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ @@ -308,43 +363,63 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user(x, ptr) \ +#define __put_user_check(x, ptr, err) \ ({ \ - int __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ - __pu_err; \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ + __p = uaccess_mask_ptr(__p); \ + __put_user_err((x), __p, (err)); \ + } else { \ + (err) = -EFAULT; \ + } \ }) #define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x), (ptr), (err)); \ + __put_user_check((x), (ptr), (err)); \ (void)0; \ }) -#define put_user(x, ptr) \ +#define __put_user(x, ptr) \ ({ \ - __typeof__(*(ptr)) __user *__p = (ptr); \ - might_fault(); \ - access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ - __put_user((x), __p) : \ - -EFAULT; \ + int __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ }) +#define put_user __put_user + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); -#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_from_user(to, from, n) \ +({ \ + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ +}) + extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -#define raw_copy_to_user __arch_copy_to_user -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +#define raw_copy_to_user(to, from, n) \ +({ \ + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ +}) + +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_in_user(to, from, n) \ +({ \ + __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ +}) + #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(to, n); + n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } +#define clear_user __clear_user extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -358,7 +433,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) { kasan_check_write(dst, size); - return __copy_user_flushcache(dst, src, size); + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); } #endif diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 51149ec75fe4..9f74ce5899f0 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -200,6 +200,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 0029e13adb59..714fe90dbf66 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -54,6 +54,11 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o + +ifeq ($(CONFIG_KVM),y) +arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o +endif obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) @@ -63,6 +68,3 @@ extra-y += $(head-y) vmlinux.lds ifeq ($(CONFIG_DEBUG_EFI),y) AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" endif - -# will be included by each individual module but not by the core kernel itself -extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 6dd0a3a3e5c9..5c4bce4ac381 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,6 +32,8 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) +int alternatives_applied; + struct alt_region { struct alt_instr *begin; struct alt_instr *end; @@ -105,32 +107,53 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp return insn; } +static void patch_alternative(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + __le32 *replptr; + int i; + + replptr = ALT_REPL_PTR(alt); + for (i = 0; i < nr_inst; i++) { + u32 insn; + + insn = get_alt_insn(alt, origptr + i, replptr + i); + updptr[i] = cpu_to_le32(insn); + } +} + static void __apply_alternatives(void *alt_region, bool use_linear_alias) { struct alt_instr *alt; struct alt_region *region = alt_region; - __le32 *origptr, *replptr, *updptr; + __le32 *origptr, *updptr; + alternative_cb_t alt_cb; for (alt = region->begin; alt < region->end; alt++) { - u32 insn; - int i, nr_inst; + int nr_inst; - if (!cpus_have_cap(alt->cpufeature)) + /* Use ARM64_CB_PATCH as an unconditional patch */ + if (alt->cpufeature < ARM64_CB_PATCH && + !cpus_have_cap(alt->cpufeature)) continue; - BUG_ON(alt->alt_len != alt->orig_len); + if (alt->cpufeature == ARM64_CB_PATCH) + BUG_ON(alt->alt_len != 0); + else + BUG_ON(alt->alt_len != alt->orig_len); pr_info_once("patching kernel code\n"); origptr = ALT_ORIG_PTR(alt); - replptr = ALT_REPL_PTR(alt); updptr = use_linear_alias ? lm_alias(origptr) : origptr; - nr_inst = alt->alt_len / sizeof(insn); + nr_inst = alt->orig_len / AARCH64_INSN_SIZE; - for (i = 0; i < nr_inst; i++) { - insn = get_alt_insn(alt, origptr + i, replptr + i); - updptr[i] = cpu_to_le32(insn); - } + if (alt->cpufeature < ARM64_CB_PATCH) + alt_cb = patch_alternative; + else + alt_cb = ALT_REPL_PTR(alt); + + alt_cb(alt, origptr, updptr, nr_inst); flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + nr_inst)); @@ -143,7 +166,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias) */ static int __apply_alternatives_multi_stop(void *unused) { - static int patched = 0; struct alt_region region = { .begin = (struct alt_instr *)__alt_instructions, .end = (struct alt_instr *)__alt_instructions_end, @@ -151,14 +173,14 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(patched)) + while (!READ_ONCE(alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(patched); + BUG_ON(alternatives_applied); __apply_alternatives(®ion, true); /* Barriers provided by the cache flushing */ - WRITE_ONCE(patched, 1); + WRITE_ONCE(alternatives_applied, 1); } return 0; diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 67368c7329c0..66be504edb6c 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); /* user mem (segment) */ EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(raw_copy_in_user); +EXPORT_SYMBOL(__arch_clear_user); +EXPORT_SYMBOL(__arch_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 71bf088f1e4b..b5e43b01b396 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -130,11 +131,13 @@ int main(void) BLANK(); #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); #endif #ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); @@ -148,11 +151,14 @@ int main(void) DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2)); DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); - BLANK(); DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val)); + BLANK(); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); +#endif return 0; } diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S new file mode 100644 index 000000000000..e5de33513b5d --- /dev/null +++ b/arch/arm64/kernel/bpi.S @@ -0,0 +1,83 @@ +/* + * Contains CPU specific branch predictor invalidation sequences + * + * Copyright (C) 2018 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +.macro ventry target + .rept 31 + nop + .endr + b \target +.endm + +.macro vectors target + ventry \target + 0x000 + ventry \target + 0x080 + ventry \target + 0x100 + ventry \target + 0x180 + + ventry \target + 0x200 + ventry \target + 0x280 + ventry \target + 0x300 + ventry \target + 0x380 + + ventry \target + 0x400 + ventry \target + 0x480 + ventry \target + 0x500 + ventry \target + 0x580 + + ventry \target + 0x600 + ventry \target + 0x680 + ventry \target + 0x700 + ventry \target + 0x780 +.endm + + .align 11 +ENTRY(__bp_harden_hyp_vecs_start) + .rept 4 + vectors __kvm_hyp_vector + .endr +ENTRY(__bp_harden_hyp_vecs_end) + +ENTRY(__qcom_hyp_sanitize_link_stack_start) + stp x29, x30, [sp, #-16]! + .rept 16 + bl . + 4 + .endr + ldp x29, x30, [sp], #16 +ENTRY(__qcom_hyp_sanitize_link_stack_end) + +.macro smccc_workaround_1 inst + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 + \inst #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +.endm + +ENTRY(__smccc_workaround_1_smc_start) + smccc_workaround_1 smc +ENTRY(__smccc_workaround_1_smc_end) + +ENTRY(__smccc_workaround_1_hvc_start) + smccc_workaround_1 hvc +ENTRY(__smccc_workaround_1_hvc_end) diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 65f42d257414..8021b46c9743 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -16,7 +16,7 @@ #include .text -.pushsection .idmap.text, "ax" +.pushsection .idmap.text, "awx" /* * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for @@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) mrs x12, sctlr_el1 ldr x13, =SCTLR_ELx_FLAGS bic x12, x12, x13 + pre_disable_mmu_workaround msr sctlr_el1, x12 isb diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 0e27f86ee709..eccdb28b4a39 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) entry->midr_range_max); } +static bool __maybe_unused +is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) +{ + u32 model; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + model = read_cpuid_id(); + model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | + MIDR_ARCHITECTURE_MASK; + + return model == entry->midr_model; +} + static bool has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, int scope) @@ -46,6 +60,346 @@ static int cpu_enable_trap_ctr_access(void *__unused) return 0; } +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +#include +#include + +DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); + +#ifdef CONFIG_KVM +extern char __qcom_hyp_sanitize_link_stack_start[]; +extern char __qcom_hyp_sanitize_link_stack_end[]; +extern char __smccc_workaround_1_smc_start[]; +extern char __smccc_workaround_1_smc_end[]; +extern char __smccc_workaround_1_hvc_start[]; +extern char __smccc_workaround_1_hvc_end[]; + +static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); + int i; + + for (i = 0; i < SZ_2K; i += 0x80) + memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); + + flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); +} + +static void __install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + static int last_slot = -1; + static DEFINE_SPINLOCK(bp_lock); + int cpu, slot = -1; + + spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.fn, cpu) == fn) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; + } + } + + if (slot == -1) { + last_slot++; + BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start) + / SZ_2K) <= last_slot); + slot = last_slot; + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.fn, fn); + spin_unlock(&bp_lock); +} +#else +#define __qcom_hyp_sanitize_link_stack_start NULL +#define __qcom_hyp_sanitize_link_stack_end NULL +#define __smccc_workaround_1_smc_start NULL +#define __smccc_workaround_1_smc_end NULL +#define __smccc_workaround_1_hvc_start NULL +#define __smccc_workaround_1_hvc_end NULL + +static void __install_bp_hardening_cb(bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + __this_cpu_write(bp_hardening_data.fn, fn); +} +#endif /* CONFIG_KVM */ + +static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, + bp_hardening_cb_t fn, + const char *hyp_vecs_start, + const char *hyp_vecs_end) +{ + u64 pfr0; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return; + + pfr0 = read_cpuid(ID_AA64PFR0_EL1); + if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) + return; + + __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); +} + +#include +#include +#include + +static void call_smc_arch_workaround_1(void) +{ + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static void call_hvc_arch_workaround_1(void) +{ + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); +} + +static int enable_smccc_arch_workaround_1(void *data) +{ + const struct arm64_cpu_capabilities *entry = data; + bp_hardening_cb_t cb; + void *smccc_start, *smccc_end; + struct arm_smccc_res res; + + if (!entry->matches(entry, SCOPE_LOCAL_CPU)) + return 0; + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) + return 0; + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 < 0) + return 0; + cb = call_hvc_arch_workaround_1; + smccc_start = __smccc_workaround_1_hvc_start; + smccc_end = __smccc_workaround_1_hvc_end; + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + if ((int)res.a0 < 0) + return 0; + cb = call_smc_arch_workaround_1; + smccc_start = __smccc_workaround_1_smc_start; + smccc_end = __smccc_workaround_1_smc_end; + break; + + default: + return 0; + } + + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); + + return 0; +} + +static void qcom_link_stack_sanitization(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + +static int qcom_enable_link_stack_sanitization(void *data) +{ + const struct arm64_cpu_capabilities *entry = data; + + install_bp_hardening_cb(entry, qcom_link_stack_sanitization, + __qcom_hyp_sanitize_link_stack_start, + __qcom_hyp_sanitize_link_stack_end); + + return 0; +} +#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ + +#ifdef CONFIG_ARM64_SSBD +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; + +static const struct ssbd_options { + const char *str; + int state; +} ssbd_options[] = { + { "force-on", ARM64_SSBD_FORCE_ENABLE, }, + { "force-off", ARM64_SSBD_FORCE_DISABLE, }, + { "kernel", ARM64_SSBD_KERNEL, }, +}; + +static int __init ssbd_cfg(char *buf) +{ + int i; + + if (!buf || !buf[0]) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { + int len = strlen(ssbd_options[i].str); + + if (strncmp(buf, ssbd_options[i].str, len)) + continue; + + ssbd_state = ssbd_options[i].state; + return 0; + } + + return -EINVAL; +} +early_param("ssbd", ssbd_cfg); + +void __init arm64_update_smccc_conduit(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, + int nr_inst) +{ + u32 insn; + + BUG_ON(nr_inst != 1); + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + insn = aarch64_insn_get_hvc_value(); + break; + case PSCI_CONDUIT_SMC: + insn = aarch64_insn_get_smc_value(); + break; + default: + return; + } + + *updptr = cpu_to_le32(insn); +} + +void __init arm64_enable_wa2_handling(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, + int nr_inst) +{ + BUG_ON(nr_inst != 1); + /* + * Only allow mitigation on EL1 entry/exit and guest + * ARCH_WORKAROUND_2 handling if the SSBD state allows it to + * be flipped. + */ + if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) + *updptr = cpu_to_le32(aarch64_insn_gen_nop()); +} + +void arm64_set_ssbd_mitigation(bool state) +{ + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); + break; + + default: + WARN_ON_ONCE(1); + break; + } +} + +static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, + int scope) +{ + struct arm_smccc_res res; + bool required = true; + s32 val; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + } + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + break; + + default: + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + } + + val = (s32)res.a0; + + switch (val) { + case SMCCC_RET_NOT_SUPPORTED: + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + + case SMCCC_RET_NOT_REQUIRED: + pr_info_once("%s mitigation not required\n", entry->desc); + ssbd_state = ARM64_SSBD_MITIGATED; + return false; + + case SMCCC_RET_SUCCESS: + required = true; + break; + + case 1: /* Mitigation not required on this CPU */ + required = false; + break; + + default: + WARN_ON(1); + return false; + } + + switch (ssbd_state) { + case ARM64_SSBD_FORCE_DISABLE: + pr_info_once("%s disabled from command-line\n", entry->desc); + arm64_set_ssbd_mitigation(false); + required = false; + break; + + case ARM64_SSBD_KERNEL: + if (required) { + __this_cpu_write(arm64_ssbd_callback_required, 1); + arm64_set_ssbd_mitigation(true); + } + break; + + case ARM64_SSBD_FORCE_ENABLE: + pr_info_once("%s forced from command-line\n", entry->desc); + arm64_set_ssbd_mitigation(true); + required = true; + break; + + default: + WARN_ON(1); + break; + } + + return required; +} +#endif /* CONFIG_ARM64_SSBD */ + #define MIDR_RANGE(model, min, max) \ .def_scope = SCOPE_LOCAL_CPU, \ .matches = is_affected_midr_range, \ @@ -169,6 +523,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(0, 0)), }, + { + .desc = "Qualcomm Technologies Kryo erratum 1003", + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, + .def_scope = SCOPE_LOCAL_CPU, + .midr_model = MIDR_QCOM_KRYO, + .matches = is_kryo_midr, + }, #endif #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 { @@ -186,6 +547,64 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_858921, MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), }, +#endif +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + .enable = enable_smccc_arch_workaround_1, + }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), + .enable = enable_smccc_arch_workaround_1, + }, +#endif +#ifdef CONFIG_ARM64_SSBD + { + .desc = "Speculative Store Bypass Disable", + .def_scope = SCOPE_LOCAL_CPU, + .capability = ARM64_SSBD, + .matches = has_ssbd_mitigation, + }, #endif { } @@ -200,15 +619,18 @@ void verify_local_cpu_errata_workarounds(void) { const struct arm64_cpu_capabilities *caps = arm64_errata; - for (; caps->matches; caps++) - if (!cpus_have_cap(caps->capability) && - caps->matches(caps, SCOPE_LOCAL_CPU)) { + for (; caps->matches; caps++) { + if (cpus_have_cap(caps->capability)) { + if (caps->enable) + caps->enable((void *)caps); + } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { pr_crit("CPU%d: Requires work around for %s, not detected" " at boot time\n", smp_processor_id(), caps->desc ? : "an erratum"); cpu_die_early(); } + } } void update_cpu_errata_workarounds(void) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 21e2c95d24e7..28d13bed8f76 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -125,6 +125,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), @@ -173,9 +175,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { }; static const struct arm64_ftr_bits ftr_ctr[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ /* * Linux can handle differing I-cache policies. Userspace JITs will @@ -796,6 +800,102 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus ID_AA64PFR0_FP_SHIFT) < 0; } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ + +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + char const *str = "command line option"; + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * For reasons that aren't entirely clear, enabling KPTI on Cavium + * ThunderX leads to apparent I-cache corruption of kernel text, which + * ends as well as you might imagine. Don't even try. + */ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { + str = "ARM64_WORKAROUND_CAVIUM_27456"; + __kpti_forced = -1; + } + + /* Forced? */ + if (__kpti_forced) { + pr_info_once("kernel page table isolation forced %s by %s\n", + __kpti_forced > 0 ? "ON" : "OFF", str); + return __kpti_forced > 0; + } + + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return true; + + /* Don't force KPTI for CPUs that are not vulnerable */ + switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { + case MIDR_CAVIUM_THUNDERX2: + case MIDR_BRCM_VULCAN: + return false; + } + + /* Defer to CPU feature registers */ + return !cpuid_feature_extract_unsigned_field(pfr0, + ID_AA64PFR0_CSV3_SHIFT); +} + +static int __nocfi kpti_install_ng_mappings(void *__unused) +{ + typedef void (kpti_remap_fn)(int, int, phys_addr_t); + extern kpti_remap_fn idmap_kpti_install_ng_mappings; + kpti_remap_fn *remap_fn; + + static bool kpti_applied = false; + int cpu = smp_processor_id(); + + if (kpti_applied) + return 0; + + remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); + + cpu_install_idmap(); + remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); + cpu_uninstall_idmap(); + + if (!cpu) + kpti_applied = true; + + return 0; +} + +static int __init parse_kpti(char *str) +{ + bool enabled; + int ret = strtobool(str, &enabled); + + if (ret) + return ret; + + __kpti_forced = enabled ? 1 : -1; + return 0; +} +early_param("kpti", parse_kpti); +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + +static int cpu_copy_el2regs(void *__unused) +{ + /* + * Copy register values that aren't redirected by hardware. + * + * Before code patching, we only set tpidr_el1, all CPUs need to copy + * this value to tpidr_el2 before we patch the code. Once we've done + * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to + * do anything here. + */ + if (!alternatives_applied) + write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); + + return 0; +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -865,6 +965,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_VIRT_HOST_EXTN, .def_scope = SCOPE_SYSTEM, .matches = runs_at_el2, + .enable = cpu_copy_el2regs, }, { .desc = "32-bit EL0 Support", @@ -882,6 +983,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .def_scope = SCOPE_SYSTEM, .matches = hyp_offset_low, }, +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + { + .desc = "Kernel page table isolation (KPTI)", + .capability = ARM64_UNMAP_KERNEL_AT_EL0, + .def_scope = SCOPE_SYSTEM, + .matches = unmap_kernel_at_el0, + .enable = kpti_install_ng_mappings, + }, +#endif { /* FP/SIMD is not implemented */ .capability = ARM64_HAS_NO_FPSIMD, @@ -1000,6 +1110,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) cap_set_elf_hwcap(hwcaps); } +/* + * Check if the current CPU has a given feature capability. + * Should be called from non-preemptible context. + */ +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, + unsigned int cap) +{ + const struct arm64_cpu_capabilities *caps; + + if (WARN_ON(preemptible())) + return false; + + for (caps = cap_array; caps->matches; caps++) + if (caps->capability == cap && + caps->matches(caps, SCOPE_LOCAL_CPU)) + return true; + return false; +} + void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info) { @@ -1035,7 +1164,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) * uses an IPI, giving us a PSTATE that disappears when * we return. */ - stop_machine(caps->enable, NULL, cpu_online_mask); + stop_machine(caps->enable, (void *)caps, cpu_online_mask); } } } @@ -1078,8 +1207,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) } static void -verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) +verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) { + const struct arm64_cpu_capabilities *caps = caps_list; for (; caps->matches; caps++) { if (!cpus_have_cap(caps->capability)) continue; @@ -1087,13 +1217,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps) * If the new CPU misses an advertised feature, we cannot proceed * further, park the cpu. */ - if (!caps->matches(caps, SCOPE_LOCAL_CPU)) { + if (!__this_cpu_has_cap(caps_list, caps->capability)) { pr_crit("CPU%d: missing feature: %s\n", smp_processor_id(), caps->desc); cpu_die_early(); } if (caps->enable) - caps->enable(NULL); + caps->enable((void *)caps); } } @@ -1148,25 +1278,6 @@ static void __init mark_const_caps_ready(void) static_branch_enable(&arm64_const_caps_ready); } -/* - * Check if the current CPU has a given feature capability. - * Should be called from non-preemptible context. - */ -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, - unsigned int cap) -{ - const struct arm64_cpu_capabilities *caps; - - if (WARN_ON(preemptible())) - return false; - - for (caps = cap_array; caps->desc; caps++) - if (caps->capability == cap && caps->matches) - return caps->matches(caps, SCOPE_LOCAL_CPU); - - return false; -} - extern const struct arm64_cpu_capabilities arm64_errata[]; bool this_cpu_has_cap(unsigned int cap) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 4e6ad355bd05..6b9736c3fb56 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -96,6 +96,7 @@ ENTRY(entry) mrs x0, sctlr_el2 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el2, x0 isb b 2f @@ -103,6 +104,7 @@ ENTRY(entry) mrs x0, sctlr_el1 bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 2 // clear SCTLR.C + pre_disable_mmu_workaround msr sctlr_el1, x0 isb 2: diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index e1c59d4008a8..c1ffa95c0ad2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -18,6 +18,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -29,6 +30,8 @@ #include #include #include +#include +#include #include #include #include @@ -69,8 +72,21 @@ #define BAD_FIQ 2 #define BAD_ERROR 3 - .macro kernel_ventry label + .macro kernel_ventry, el, label, regsize = 64 .align 7 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0 + .if \el == 0 + .if \regsize == 64 + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + .else + mov x30, xzr + .endif + .endif +alternative_else_nop_endif +#endif + sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK /* @@ -82,7 +98,7 @@ tbnz x0, #THREAD_SHIFT, 0f sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp - b \label + b el\()\el\()_\label 0: /* @@ -114,7 +130,31 @@ sub sp, sp, x0 mrs x0, tpidrro_el0 #endif - b \label + b el\()\el\()_\label + .endm + + .macro tramp_alias, dst, sym + mov_q \dst, TRAMP_VALIAS + add \dst, \dst, #(\sym - .entry.tramp.text) + .endm + + // This macro corrupts x0-x3. It is the caller's duty + // to save/restore them if required. + .macro apply_ssbd, state, targ, tmp1, tmp2 +#ifdef CONFIG_ARM64_SSBD +alternative_cb arm64_enable_wa2_handling + b \targ +alternative_cb_end + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 + cbz \tmp2, \targ + ldr \tmp2, [tsk, #TSK_TI_FLAGS] + tbnz \tmp2, #TIF_SSBD, \targ + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + mov w1, #\state +alternative_cb arm64_update_smccc_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end +#endif .endm .macro kernel_entry, el, regsize = 64 @@ -143,14 +183,22 @@ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. + apply_ssbd 1, 1f, x22, x23 + +#ifdef CONFIG_ARM64_SSBD + ldp x0, x1, [sp, #16 * 0] + ldp x2, x3, [sp, #16 * 1] +#endif +1: + mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE get_thread_info tsk - /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ + /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] - mov x20, #TASK_SIZE_64 + mov x20, #USER_DS str x20, [tsk, #TSK_TI_ADDR_LIMIT] /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ @@ -185,7 +233,7 @@ alternative_else_nop_endif .if \el != 0 mrs x21, ttbr0_el1 - tst x21, #0xffff << 48 // Check for the reserved ASID + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR @@ -246,7 +294,7 @@ alternative_else_nop_endif tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set .endif - __uaccess_ttbr0_enable x0 + __uaccess_ttbr0_enable x0, x1 .if \el == 0 /* @@ -255,7 +303,7 @@ alternative_else_nop_endif * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache * corruption). */ - post_ttbr0_update_workaround + bl post_ttbr_update_workaround .endif 1: .if \el != 0 @@ -267,18 +315,22 @@ alternative_else_nop_endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + tst x22, #PSR_MODE32_BIT // native task? + b.eq 3f + #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 - tbz x22, #4, 1f #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif -1: alternative_else_nop_endif #endif +3: + apply_ssbd 0, 5f, x0, x1 +5: .endif msr elr_el1, x21 // set up the return data @@ -300,7 +352,21 @@ alternative_else_nop_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp - eret // return to kernel + + .if \el == 0 +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + bne 4f + msr far_el1, x30 + tramp_alias x30, tramp_exit_native + br x30 +4: + tramp_alias x30, tramp_exit_compat + br x30 +#endif + .else + eret + .endif .endm .macro irq_stack_entry @@ -340,6 +406,7 @@ alternative_else_nop_endif * x7 is reserved for the system call number in 32-bit mode. */ wsc_nr .req w25 // number of system calls +xsc_nr .req x25 // number of system calls (zero-extended) wscno .req w26 // syscall number xscno .req x26 // syscall number (zero-extended) stbl .req x27 // syscall table pointer @@ -365,31 +432,31 @@ tsk .req x28 // current thread_info .align 11 ENTRY(vectors) - kernel_ventry el1_sync_invalid // Synchronous EL1t - kernel_ventry el1_irq_invalid // IRQ EL1t - kernel_ventry el1_fiq_invalid // FIQ EL1t - kernel_ventry el1_error_invalid // Error EL1t + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t - kernel_ventry el1_sync // Synchronous EL1h - kernel_ventry el1_irq // IRQ EL1h - kernel_ventry el1_fiq_invalid // FIQ EL1h - kernel_ventry el1_error_invalid // Error EL1h + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error_invalid // Error EL1h - kernel_ventry el0_sync // Synchronous 64-bit EL0 - kernel_ventry el0_irq // IRQ 64-bit EL0 - kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0 - kernel_ventry el0_error_invalid // Error 64-bit EL0 + kernel_ventry 0, sync // Synchronous 64-bit EL0 + kernel_ventry 0, irq // IRQ 64-bit EL0 + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 + kernel_ventry 0, error_invalid // Error 64-bit EL0 #ifdef CONFIG_COMPAT - kernel_ventry el0_sync_compat // Synchronous 32-bit EL0 - kernel_ventry el0_irq_compat // IRQ 32-bit EL0 - kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid_compat // Error 32-bit EL0 + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 #else - kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0 - kernel_ventry el0_irq_invalid // IRQ 32-bit EL0 - kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0 - kernel_ventry el0_error_invalid // Error 32-bit EL0 + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif END(vectors) @@ -687,13 +754,15 @@ el0_ia: * Instruction abort handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp - bl do_mem_abort + bl do_el0_ia_bp_hardening b ret_to_user el0_fpsimd_acc: /* @@ -720,8 +789,10 @@ el0_sp_pc: * Stack or PC alignment exception handling */ mrs x26, far_el1 - // enable interrupts before calling the main handler - enable_dbg_and_irq + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif ct_user_exit mov x0, x26 mov x1, x25 @@ -780,6 +851,11 @@ el0_irq_naked: #endif ct_user_exit +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + tbz x22, #55, 1f + bl do_el0_irq_bp_hardening +1: +#endif irq_handler #ifdef CONFIG_TRACE_IRQFLAGS @@ -848,6 +924,7 @@ el0_svc_naked: // compat entry point b.ne __sys_trace cmp wscno, wsc_nr // check upper syscall limit b.hs ni_sys + mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number ldr x16, [stbl, xscno, lsl #3] // address in the syscall table blr x16 // call sys_* routine b ret_fast_syscall @@ -895,6 +972,117 @@ __ni_sys_trace: .popsection // .entry.text +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + */ + .pushsection ".entry.tramp.text", "ax" + + .macro tramp_map_kernel, tmp + mrs \tmp, ttbr1_el1 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + bic \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 + /* ASID already in \tmp[63:48] */ + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) + /* 2MB boundary containing the vectors, so we nobble the walk cache */ + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) + isb + tlbi vae1, \tmp + dsb nsh +alternative_else_nop_endif +#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ + .endm + + .macro tramp_unmap_kernel, tmp + mrs \tmp, ttbr1_el1 + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + orr \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + /* + * We avoid running the post_ttbr_update_workaround here because + * it's only needed by Cavium ThunderX, which requires KPTI to be + * disabled. + */ + .endm + + .macro tramp_ventry, regsize = 64 + .align 7 +1: + .if \regsize == 64 + msr tpidrro_el0, x30 // Restored in kernel_ventry + .endif + /* + * Defend against branch aliasing attacks by pushing a dummy + * entry onto the return stack and using a RET instruction to + * enter the full-fat kernel vectors. + */ + bl 2f + b . +2: + tramp_map_kernel x30 +#ifdef CONFIG_RANDOMIZE_BASE + adr x30, tramp_vectors + PAGE_SIZE +alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 + ldr x30, [x30] +#else + ldr x30, =vectors +#endif + prfm plil1strm, [x30, #(1b - tramp_vectors)] + msr vbar_el1, x30 + add x30, x30, #(1b - tramp_vectors) + isb + ret + .endm + + .macro tramp_exit, regsize = 64 + adr x30, tramp_vectors + msr vbar_el1, x30 + tramp_unmap_kernel x30 + .if \regsize == 64 + mrs x30, far_el1 + .endif + eret + .endm + + .align 11 +ENTRY(tramp_vectors) + .space 0x400 + + tramp_ventry + tramp_ventry + tramp_ventry + tramp_ventry + + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 +END(tramp_vectors) + +ENTRY(tramp_exit_native) + tramp_exit +END(tramp_exit_native) + +ENTRY(tramp_exit_compat) + tramp_exit 32 +END(tramp_exit_compat) + + .ltorg + .popsection // .entry.tramp.text +#ifdef CONFIG_RANDOMIZE_BASE + .pushsection ".rodata", "a" + .align PAGE_SHIFT + .globl __entry_tramp_data_start +__entry_tramp_data_start: + .quad vectors + .popsection // .rodata +#endif /* CONFIG_RANDOMIZE_BASE */ +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S deleted file mode 100644 index 00c4025be4ff..000000000000 --- a/arch/arm64/kernel/ftrace-mod.S +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (C) 2017 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include - - .section ".text.ftrace_trampoline", "ax" - .align 3 -0: .quad 0 -__ftrace_trampoline: - ldr x16, 0b - br x16 -ENDPROC(__ftrace_trampoline) diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index c13b1fca0e5b..50986e388d2b 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) if (offset < -SZ_128M || offset >= SZ_128M) { #ifdef CONFIG_ARM64_MODULE_PLTS - unsigned long *trampoline; + struct plt_entry trampoline; struct module *mod; /* @@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) * is added in the future, but for now, the pr_err() below * deals with a theoretical issue only. */ - trampoline = (unsigned long *)mod->arch.ftrace_trampoline; - if (trampoline[0] != addr) { - if (trampoline[0] != 0) { + trampoline = get_plt_entry(addr); + if (!plt_entries_equal(mod->arch.ftrace_trampoline, + &trampoline)) { + if (!plt_entries_equal(mod->arch.ftrace_trampoline, + &(struct plt_entry){})) { pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); return -EINVAL; } /* point the trampoline to our ftrace entry point */ module_disable_ro(mod); - trampoline[0] = addr; + *mod->arch.ftrace_trampoline = trampoline; module_enable_ro(mod, true); /* update trampoline before patching in the branch */ smp_wmb(); } - addr = (unsigned long)&trampoline[1]; + addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; #else /* CONFIG_ARM64_MODULE_PLTS */ return -EINVAL; #endif /* CONFIG_ARM64_MODULE_PLTS */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0b243ecaf7ac..261f3f88364c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -371,7 +371,7 @@ ENDPROC(__primary_switched) * end early head section, begin head code that is also used for * hotplug and needs to have the same protections as the text region */ - .section ".idmap.text","ax" + .section ".idmap.text","awx" ENTRY(kimage_vaddr) .quad _text - TEXT_OFFSET @@ -732,6 +732,7 @@ __primary_switch: * to take into account by discarding the current kernel mapping and * creating a new one. */ + pre_disable_mmu_workaround msr sctlr_el1, x20 // disable the MMU isb bl __create_page_tables // recreate kernel mapping diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 095d3c170f5d..a028cc95afe1 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -313,6 +313,17 @@ int swsusp_arch_suspend(void) sleep_cpu = -EINVAL; __cpu_suspend_exit(); + + /* + * Just in case the boot kernel did turn the SSBD + * mitigation off behind our back, let's set the state + * to what we expect it to be. + */ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_ENABLE: + case ARM64_SSBD_KERNEL: + arm64_set_ssbd_mitigation(true); + } } local_dbg_restore(flags); diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 354be2a872ae..79b17384effa 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -25,8 +25,7 @@ */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)from, 8) || - !IS_ALIGNED((unsigned long)to, 8))) { + while (count && !IS_ALIGNED((unsigned long)from, 8)) { *(u8 *)to = __raw_readb(from); from++; to++; @@ -54,23 +53,22 @@ EXPORT_SYMBOL(__memcpy_fromio); */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - while (count && (!IS_ALIGNED((unsigned long)to, 8) || - !IS_ALIGNED((unsigned long)from, 8))) { - __raw_writeb(*(volatile u8 *)from, to); + while (count && !IS_ALIGNED((unsigned long)to, 8)) { + __raw_writeb(*(u8 *)from, to); from++; to++; count--; } while (count >= 8) { - __raw_writeq(*(volatile u64 *)from, to); + __raw_writeq(*(u64 *)from, to); from += 8; to += 8; count -= 8; } while (count) { - __raw_writeb(*(volatile u8 *)from, to); + __raw_writeb(*(u8 *)from, to); from++; to++; count--; diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index d05dbe658409..ea640f92fe5a 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -11,21 +11,6 @@ #include #include -struct plt_entry { - /* - * A program that conforms to the AArch64 Procedure Call Standard - * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or - * IP1 (x17) may be inserted at any branch instruction that is - * exposed to a relocation that supports long branches. Since that - * is exactly what we are dealing with here, we are free to use x16 - * as a scratch register in the PLT veneers. - */ - __le32 mov0; /* movn x16, #0x.... */ - __le32 mov1; /* movk x16, #0x...., lsl #16 */ - __le32 mov2; /* movk x16, #0x...., lsl #32 */ - __le32 br; /* br x16 */ -}; - static bool in_init(const struct module *mod, void *loc) { return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; @@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, int i = pltsec->plt_num_entries; u64 val = sym->st_value + rela->r_addend; - /* - * MOVK/MOVN/MOVZ opcode: - * +--------+------------+--------+-----------+-------------+---------+ - * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] | - * +--------+------------+--------+-----------+-------------+---------+ - * - * Rd := 0x10 (x16) - * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32) - * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ) - * sf := 1 (64-bit variant) - */ - plt[i] = (struct plt_entry){ - cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5), - cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5), - cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5), - cpu_to_le32(0xd61f0200) - }; + plt[i] = get_plt_entry(val); /* * Check if the entry we just created is a duplicate. Given that the * relocations are sorted, this will be the last entry we allocated. * (if one exists). */ - if (i > 0 && - plt[i].mov0 == plt[i - 1].mov0 && - plt[i].mov1 == plt[i - 1].mov1 && - plt[i].mov2 == plt[i - 1].mov2) + if (i > 0 && plt_entries_equal(plt + i, plt + i - 1)) return (u64)&plt[i - 1]; pltsec->plt_num_entries++; @@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned long core_plts = 0; unsigned long init_plts = 0; Elf64_Sym *syms = NULL; + Elf_Shdr *tramp = NULL; int i; /* @@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt = sechdrs + i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) mod->arch.init.plt = sechdrs + i; + else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && + !strcmp(secstrings + sechdrs[i].sh_name, + ".text.ftrace_trampoline")) + tramp = sechdrs + i; else if (sechdrs[i].sh_type == SHT_SYMTAB) syms = (Elf64_Sym *)sechdrs[i].sh_addr; } @@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.init.plt_num_entries = 0; mod->arch.init.plt_max_entries = init_plts; + if (tramp) { + tramp->sh_type = SHT_NOBITS; + tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + tramp->sh_addralign = __alignof__(struct plt_entry); + tramp->sh_size = sizeof(struct plt_entry); + } + return 0; } diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds index f7c9781a9d48..99eb7c292494 100644 --- a/arch/arm64/kernel/module.lds +++ b/arch/arm64/kernel/module.lds @@ -1,4 +1,5 @@ SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .init.plt (NOLOAD) : { BYTE(0) } + .plt : { BYTE(0) } + .init.plt : { BYTE(0) } + .text.ftrace_trampoline : { BYTE(0) } } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9eaef51f83ff..1984e739f155 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -914,9 +914,9 @@ static void __armv8pmu_probe_pmu(void *info) int pmuver; dfr0 = read_sysreg(id_aa64dfr0_el1); - pmuver = cpuid_feature_extract_signed_field(dfr0, + pmuver = cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMUVER_SHIFT); - if (pmuver < 1) + if (pmuver == 0xf || pmuver == 0) return; probe->present = true; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 2dc0f8482210..e5d670a80fa4 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -170,6 +170,70 @@ void machine_restart(char *cmd) while (1); } +/* + * dump a block of kernel memory from around the given address + */ +static void show_data(unsigned long addr, int nbytes, const char *name) +{ + int i, j; + int nlines; + u32 *p; + + /* + * don't attempt to dump non-kernel addresses or + * values that are probably just small negative numbers + */ + if (addr < PAGE_OFFSET || addr > -256UL) + return; + + printk("\n%s: %#lx:\n", name, addr); + + /* + * round address down to a 32 bit boundary + * and always dump a multiple of 32 bytes + */ + p = (u32 *)(addr & ~(sizeof(u32) - 1)); + nbytes += (addr & (sizeof(u32) - 1)); + nlines = (nbytes + 31) / 32; + + + for (i = 0; i < nlines; i++) { + /* + * just display low 16 bits of address to keep + * each line of the dump < 80 characters + */ + printk("%04lx ", (unsigned long)p & 0xffff); + for (j = 0; j < 8; j++) { + u32 data; + if (probe_kernel_address(p, data)) { + pr_cont(" ********"); + } else { + pr_cont(" %08x", data); + } + ++p; + } + pr_cont("\n"); + } +} + +static void show_extra_register_data(struct pt_regs *regs, int nbytes) +{ + mm_segment_t fs; + unsigned int i; + + fs = get_fs(); + set_fs(KERNEL_DS); + show_data(regs->pc - nbytes, nbytes * 2, "PC"); + show_data(regs->regs[30] - nbytes, nbytes * 2, "LR"); + show_data(regs->sp - nbytes, nbytes * 2, "SP"); + for (i = 0; i < 30; i++) { + char name[4]; + snprintf(name, sizeof(name), "X%u", i); + show_data(regs->regs[i] - nbytes, nbytes * 2, name); + } + set_fs(fs); +} + void __show_regs(struct pt_regs *regs) { int i, top_reg; @@ -205,6 +269,9 @@ void __show_regs(struct pt_regs *regs) pr_cont("\n"); } + if (!user_mode(regs)) + show_extra_register_data(regs, 128); + printk("\n"); } void show_regs(struct pt_regs * regs) @@ -258,6 +325,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + /* + * In case p was allocated the same task_struct pointer as some + * other recently-exited task, make sure p is disassociated from + * any cpu that may have run that now-exited task recently. + * Otherwise we could erroneously skip reloading the FPSIMD + * registers for p. + */ + fpsimd_flush_task_state(p); + if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; @@ -305,16 +381,14 @@ void tls_preserve_current_state(void) static void tls_thread_switch(struct task_struct *next) { - unsigned long tpidr, tpidrro; - tls_preserve_current_state(); - tpidr = *task_user_tls(next); - tpidrro = is_compat_thread(task_thread_info(next)) ? - next->thread.tp_value : 0; + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); - write_sysreg(tpidr, tpidr_el0); - write_sysreg(tpidrro, tpidrro_el0); + write_sysreg(*task_user_tls(next), tpidr_el0); } /* Restore the UAO state depending on next's addr_limit */ diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9cbb6123208f..edaf346d13d5 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -247,15 +248,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, switch (note_type) { case NT_ARM_HW_BREAK: - if (idx < ARM_MAX_BRP) - bp = tsk->thread.debug.hbp_break[idx]; + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + bp = tsk->thread.debug.hbp_break[idx]; break; case NT_ARM_HW_WATCH: - if (idx < ARM_MAX_WRP) - bp = tsk->thread.debug.hbp_watch[idx]; + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + bp = tsk->thread.debug.hbp_watch[idx]; break; } +out: return bp; } @@ -1194,9 +1200,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); /* Watchpoint */ if (num < 0) { ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); @@ -1207,7 +1211,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, } else { ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); } - set_fs(old_fs); if (!ret) ret = put_user(kdata, data); @@ -1220,7 +1223,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata = 0; - mm_segment_t old_fs = get_fs(); if (num == 0) return 0; @@ -1229,12 +1231,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, if (ret) return ret; - set_fs(KERNEL_DS); if (num < 0) ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); else ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); - set_fs(old_fs); return ret; } diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index ce704a4aeadd..f407e422a720 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) mrs x0, sctlr_el2 ldr x1, =SCTLR_ELx_FLAGS bic x0, x0, x1 + pre_disable_mmu_workaround msr sctlr_el2, x0 isb 1: diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0bdc96c61bc0..43442b3a463f 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -676,11 +676,12 @@ static void do_signal(struct pt_regs *regs) unsigned long continue_addr = 0, restart_addr = 0; int retval = 0; struct ksignal ksig; + bool syscall = in_syscall(regs); /* * If we were from a system call, check for system call restarting... */ - if (in_syscall(regs)) { + if (syscall) { continue_addr = regs->pc; restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); retval = regs->regs[0]; @@ -732,7 +733,7 @@ static void do_signal(struct pt_regs *regs) * Handle restarting a different system call. As above, if a debugger * has chosen to restart at a different PC, ignore the restart. */ - if (in_syscall(regs) && regs->pc == restart_addr) { + if (syscall && regs->pc == restart_addr) { if (retval == -ERESTART_RESTARTBLOCK) setup_restart_syscall(regs); user_rewind_single_step(current); diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 10dd16d7902d..bebec8ef9372 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter) ret ENDPROC(__cpu_suspend_enter) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly bl __cpu_setup diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c new file mode 100644 index 000000000000..0560738c1d5c --- /dev/null +++ b/arch/arm64/kernel/ssbd.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 ARM Ltd, All Rights Reserved. + */ + +#include +#include +#include +#include + +#include + +/* + * prctl interface for SSBD + */ +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + int state = arm64_get_ssbd_state(); + + /* Unsupported */ + if (state == ARM64_SSBD_UNKNOWN) + return -EINVAL; + + /* Treat the unaffected/mitigated state separately */ + if (state == ARM64_SSBD_MITIGATED) { + switch (ctrl) { + case PR_SPEC_ENABLE: + return -EPERM; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + return 0; + } + } + + /* + * Things are a bit backward here: the arm64 internal API + * *enables the mitigation* when the userspace API *disables + * speculation*. So much fun. + */ + switch (ctrl) { + case PR_SPEC_ENABLE: + /* If speculation is force disabled, enable is not allowed */ + if (state == ARM64_SSBD_FORCE_ENABLE || + task_spec_ssb_force_disable(task)) + return -EPERM; + task_clear_spec_ssb_disable(task); + clear_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_DISABLE: + if (state == ARM64_SSBD_FORCE_DISABLE) + return -EPERM; + task_set_spec_ssb_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_FORCE_DISABLE: + if (state == ARM64_SSBD_FORCE_DISABLE) + return -EPERM; + task_set_spec_ssb_disable(task); + task_set_spec_ssb_force_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); + break; + default: + return -ERANGE; + } + + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +static int ssbd_prctl_get(struct task_struct *task) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_UNKNOWN: + return -EINVAL; + case ARM64_SSBD_FORCE_ENABLE: + return PR_SPEC_DISABLE; + case ARM64_SSBD_KERNEL: + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + case ARM64_SSBD_FORCE_DISABLE: + return PR_SPEC_ENABLE; + default: + return PR_SPEC_NOT_AFFECTED; + } +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_get(task); + default: + return -ENODEV; + } +} diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 76809ccd309c..d5718a060672 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { + if (WARN_ON_ONCE(frame->graph == -1)) + return -EINVAL; + if (frame->graph < -1) + frame->graph += FTRACE_NOTRACE_DEPTH; + /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 77cd655e6eb7..7a655e60cf4b 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void) */ if (hw_breakpoint_restore) hw_breakpoint_restore(cpu); + + /* + * On resume, firmware implementing dynamic mitigation will + * have turned the mitigation on. If the user has forcefully + * disabled it, make sure their wishes are obeyed. + */ + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) + arm64_set_ssbd_mitigation(false); } /* diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index a4391280fba9..f258636273c9 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs) frame.fp = regs->regs[29]; frame.pc = regs->pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = -1; /* no task info */ + frame.graph = current->curr_ret_stack; #endif do { int ret = unwind_frame(NULL, &frame); diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 8d48b233e6ce..6eb9350be46f 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -280,8 +281,58 @@ void store_cpu_topology(unsigned int cpuid) topology_populated: update_siblings_masks(cpuid); + topology_detect_flags(); } +#ifdef CONFIG_SCHED_SMT +static int smt_flags(void) +{ + return cpu_smt_flags() | topology_smt_flags(); +} +#endif + +#ifdef CONFIG_SCHED_MC +static int core_flags(void) +{ + return cpu_core_flags() | topology_core_flags(); +} +#endif + +static int cpu_flags(void) +{ + return topology_cpu_flags(); +} + +static inline +const struct sched_group_energy * const cpu_core_energy(int cpu) +{ + return sge_array[cpu][SD_LEVEL0]; +} + +static inline +const struct sched_group_energy * const cpu_cluster_energy(int cpu) +{ + return sge_array[cpu][SD_LEVEL1]; +} + +static inline +const struct sched_group_energy * const cpu_system_energy(int cpu) +{ + return sge_array[cpu][SD_LEVEL2]; +} + +static struct sched_domain_topology_level arm64_topology[] = { +#ifdef CONFIG_SCHED_SMT + { cpu_smt_mask, smt_flags, SD_INIT_NAME(SMT) }, +#endif +#ifdef CONFIG_SCHED_MC + { cpu_coregroup_mask, core_flags, cpu_core_energy, SD_INIT_NAME(MC) }, +#endif + { cpu_cpu_mask, cpu_flags, cpu_cluster_energy, SD_INIT_NAME(DIE) }, + { cpu_cpu_mask, NULL, cpu_system_energy, SD_INIT_NAME(SYS) }, + { NULL, } +}; + static void __init reset_cpu_topology(void) { unsigned int cpu; @@ -310,4 +361,6 @@ void __init init_cpu_topology(void) */ if (of_have_populated_dt() && parse_dt_topology()) reset_cpu_topology(); + else + set_sched_topology(arm64_topology); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8383af15a759..4fc0e958770b 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -56,7 +56,7 @@ static const char *handler[]= { "Error" }; -int show_unhandled_signals = 1; +int show_unhandled_signals = 0; /* * Dump out the contents of some kernel memory nicely... @@ -573,14 +573,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) } #endif - if (show_unhandled_signals_ratelimited()) { - pr_info("%s[%d]: syscall %d\n", current->comm, - task_pid_nr(current), regs->syscallno); - dump_instr("", regs); - if (user_mode(regs)) - __show_regs(regs); - } - return sys_ni_syscall(); } diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index b215c712d897..ef3f9d9d4062 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -15,6 +15,7 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) ccflags-y := -shared -fno-common -fno-builtin ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +ccflags-y += $(DISABLE_LTO) # Disable gcov profiling for VDSO code GCOV_PROFILE := n diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index 76320e920965..c39872a7b03c 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -309,7 +309,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: - cbz w1, 3f + cbz x1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7da3e5c366a0..cb3e4393b412 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -57,6 +57,17 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ + KEEP(*(.entry.tramp.text)) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_end) = .; +#else +#define TRAMP_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -113,6 +124,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT + TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -138,11 +150,11 @@ SECTIONS . = ALIGN(4); .altinstructions : { __alt_instructions = .; - *(.altinstructions) + KEEP(*(.altinstructions)) __alt_instructions_end = .; } .altinstr_replacement : { - *(.altinstr_replacement) + KEEP(*(.altinstr_replacement)) } . = ALIGN(PAGE_SIZE); @@ -214,6 +226,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + __pecoff_data_size = ABSOLUTE(. - __initdata_begin); _end = .; @@ -234,7 +251,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif - +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, + "Entry trampoline text too big") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 5c7f657dd207..811f04c5760e 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) - + NUM_TIMER_REGS; + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; } /** @@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += kvm_arm_get_fw_num_regs(vcpu); + ret = copy_timer_indices(vcpu, uindices); if (ret) return ret; @@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_get_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_set_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 7debb74843a0..ab48c5ed3943 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -22,12 +22,13 @@ #include #include +#include + #include #include #include #include #include -#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -42,9 +43,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; - ret = kvm_psci_call(vcpu); + ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { - kvm_inject_undefined(vcpu); + vcpu_set_reg(vcpu, 0, ~0UL); return 1; } @@ -53,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_inject_undefined(vcpu); + /* + * "If an SMC instruction executed at Non-secure EL1 is + * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a + * Trap exception, not a Secure Monitor Call exception [...]" + * + * We need to advance the PC after the trap, as it would + * otherwise return to the same address... + */ + vcpu_set_reg(vcpu, 0, ~0UL); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3f9615582377..dea20651a5f1 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -122,6 +122,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE) kern_hyp_va x2 msr vbar_el2, x2 + /* copy tpidr_el1 into tpidr_el2 for use by HYP */ + mrs x1, tpidr_el1 + msr tpidr_el2, x1 + /* Hello, World! */ eret ENDPROC(__kvm_hyp_init) @@ -151,6 +155,7 @@ reset: mrs x5, sctlr_el2 ldr x6, =SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc + pre_disable_mmu_workaround msr sctlr_el2, x5 isb diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index f04400d494b7..bfa00a9e161d 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -3,7 +3,11 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # -ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING +ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING $(DISABLE_CFI) + +ifeq ($(cc-name),clang) +ccflags-y += -fno-jump-tables +endif KVM=../../../../virt/kvm diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index f5154ed3da6c..2add22699764 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -84,6 +84,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1) { u64 reg; + /* Clear pmscr in case of early return */ + *pmscr_el1 = 0; + /* SPE present on this CPU? */ if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), ID_AA64DFR0_PMSVER_SHIFT)) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 12ee62d6d410..a7b3c198d4de 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -62,9 +62,6 @@ ENTRY(__guest_enter) // Store the host regs save_callee_saved_regs x1 - // Store the host_ctxt for use at exit time - str x1, [sp, #-16]! - add x18, x0, #VCPU_CONTEXT // Restore guest regs x0-x17 @@ -118,8 +115,7 @@ ENTRY(__guest_exit) // Store the guest regs x19-x29, lr save_callee_saved_regs x1 - // Restore the host_ctxt from the stack - ldr x2, [sp], #16 + get_host_ctxt x2, x3 // Now restore the host regs restore_callee_saved_regs x2 @@ -159,6 +155,10 @@ abort_guest_exit_end: ENDPROC(__guest_exit) ENTRY(__fpsimd_guest_restore) + // x0: esr + // x1: vcpu + // x2-x29,lr: vcpu regs + // vcpu x0-x1 on the stack stp x2, x3, [sp, #-16]! stp x4, lr, [sp, #-16]! @@ -173,7 +173,7 @@ alternative_else alternative_endif isb - mrs x3, tpidr_el2 + mov x3, x1 ldr x0, [x3, #VCPU_HOST_CONTEXT] kern_hyp_va x0 @@ -196,3 +196,15 @@ alternative_endif eret ENDPROC(__fpsimd_guest_restore) + +ENTRY(__qcom_hyp_sanitize_btac_predictors) + /** + * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700) + * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls + * b15-b0: contains SiP functionID + */ + movz x0, #0x1700 + movk x0, #0xc200, lsl #16 + smc #0 + ret +ENDPROC(__qcom_hyp_sanitize_btac_predictors) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 5170ce1021da..3c283fd8c8f5 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -56,18 +57,14 @@ ENDPROC(__vhe_hyp_call) el1_sync: // Guest trapped into EL2 stp x0, x1, [sp, #-16]! -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x1, esr_el2 -alternative_else - mrs x1, esr_el1 -alternative_endif - lsr x0, x1, #ESR_ELx_EC_SHIFT - + mrs x0, esr_el2 + lsr x0, x0, #ESR_ELx_EC_SHIFT cmp x0, #ESR_ELx_EC_HVC64 + ccmp x0, #ESR_ELx_EC_HVC32, #4, ne b.ne el1_trap - mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest - cbnz x1, el1_trap // called HVC + mrs x1, vttbr_el2 // If vttbr is valid, the guest + cbnz x1, el1_hvc_guest // called HVC /* Here, we're pretty sure the host called HVC. */ ldp x0, x1, [sp], #16 @@ -100,9 +97,64 @@ alternative_endif eret +el1_hvc_guest: + /* + * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. + * The workaround has already been applied on the host, + * so let's quickly get back to the guest. We don't bother + * restoring x1, as it can be clobbered anyway. + */ + ldr x1, [sp] // Guest's x0 + eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 + cbz w1, wa_epilogue + + /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ + ARM_SMCCC_ARCH_WORKAROUND_2) + cbnz w1, el1_trap + +#ifdef CONFIG_ARM64_SSBD +alternative_cb arm64_enable_wa2_handling + b wa2_end +alternative_cb_end + get_vcpu_ptr x2, x0 + ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] + + // Sanitize the argument and update the guest flags + ldr x1, [sp, #8] // Guest's x1 + clz w1, w1 // Murphy's device: + lsr w1, w1, #5 // w1 = !!w1 without using + eor w1, w1, #1 // the flags... + bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 + str x0, [x2, #VCPU_WORKAROUND_FLAGS] + + /* Check that we actually need to perform the call */ + hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 + cbz x0, wa2_end + + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + smc #0 + + /* Don't leak data from the SMC call */ + mov x3, xzr +wa2_end: + mov x2, xzr + mov x1, xzr +#endif + +wa_epilogue: + mov x0, xzr + add sp, sp, #16 + eret + el1_trap: + get_vcpu_ptr x1, x0 + + mrs x0, esr_el2 + lsr x0, x0, #ESR_ELx_EC_SHIFT /* * x0: ESR_EC + * x1: vcpu pointer */ /* @@ -116,19 +168,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD b.eq __fpsimd_guest_restore alternative_else_nop_endif - mrs x1, tpidr_el2 mov x0, #ARM_EXCEPTION_TRAP b __guest_exit el1_irq: stp x0, x1, [sp, #-16]! - mrs x1, tpidr_el2 + get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IRQ b __guest_exit el1_error: stp x0, x1, [sp, #-16]! - mrs x1, tpidr_el2 + get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit @@ -163,6 +214,11 @@ ENTRY(__hyp_do_panic) eret ENDPROC(__hyp_do_panic) +ENTRY(__hyp_panic) + get_host_ctxt x0, x1 + b hyp_panic +ENDPROC(__hyp_panic) + .macro invalid_vector label, target = __hyp_panic .align 2 \label: diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4..b2f1992c6234 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -15,8 +15,12 @@ * along with this program. If not, see . */ +#include #include #include +#include + +#include #include #include @@ -51,7 +55,7 @@ static void __hyp_text __activate_traps_vhe(void) val &= ~CPACR_EL1_FPEN; write_sysreg(val, cpacr_el1); - write_sysreg(__kvm_hyp_vector, vbar_el1); + write_sysreg(kvm_get_hyp_vector(), vbar_el1); } static void __hyp_text __activate_traps_nvhe(void) @@ -278,6 +282,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) write_sysreg_el2(*vcpu_pc(vcpu), elr); } +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu) +{ + if (!cpus_have_const_cap(ARM64_SSBD)) + return false; + + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); +} + +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ARM64_SSBD + /* + * The host runs with the workaround always present. If the + * guest wants it disabled, so be it... + */ + if (__needs_ssbd_off(vcpu) && + __hyp_this_cpu_read(arm64_ssbd_callback_required)) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); +#endif +} + +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_ARM64_SSBD + /* + * If the guest has disabled the workaround, bring it back on. + */ + if (__needs_ssbd_off(vcpu) && + __hyp_this_cpu_read(arm64_ssbd_callback_required)) + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); +#endif +} + int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -286,9 +323,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) u64 exit_code; vcpu = kern_hyp_va(vcpu); - write_sysreg(vcpu, tpidr_el2); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; __sysreg_save_host_state(host_ctxt); @@ -308,6 +345,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_restore_guest_state(guest_ctxt); __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); + __set_guest_arch_workaround_state(vcpu); + /* Jump in the fire! */ again: exit_code = __guest_enter(vcpu, host_ctxt); @@ -364,6 +403,18 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* 0 falls through to be handled out of EL2 */ } + __set_host_arch_workaround_state(vcpu); + + if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { + u32 midr = read_cpuid_id(); + + /* Apply BTAC predictors mitigation to all Falkor chips */ + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { + __qcom_hyp_sanitize_btac_predictors(); + } + } + fp_enabled = __fpsimd_enabled(); __sysreg_save_guest_state(guest_ctxt); @@ -393,7 +444,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; -static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) +static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, + struct kvm_vcpu *vcpu) { unsigned long str_va; @@ -407,35 +459,32 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) __hyp_do_panic(str_va, spsr, elr, read_sysreg(esr_el2), read_sysreg_el2(far), - read_sysreg(hpfar_el2), par, - (void *)read_sysreg(tpidr_el2)); + read_sysreg(hpfar_el2), par, vcpu); } -static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) +static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, + struct kvm_vcpu *vcpu) { panic(__hyp_panic_string, spsr, elr, read_sysreg_el2(esr), read_sysreg_el2(far), - read_sysreg(hpfar_el2), par, - (void *)read_sysreg(tpidr_el2)); + read_sysreg(hpfar_el2), par, vcpu); } static hyp_alternate_select(__hyp_call_panic, __hyp_call_panic_nvhe, __hyp_call_panic_vhe, ARM64_HAS_VIRT_HOST_EXTN); -void __hyp_text __noreturn __hyp_panic(void) +void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) { + struct kvm_vcpu *vcpu = NULL; + u64 spsr = read_sysreg_el2(spsr); u64 elr = read_sysreg_el2(elr); u64 par = read_sysreg(par_el1); if (read_sysreg(vttbr_el2)) { - struct kvm_vcpu *vcpu; - struct kvm_cpu_context *host_ctxt; - - vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + vcpu = host_ctxt->__hyp_running_vcpu; __timer_save_state(vcpu); __deactivate_traps(vcpu); __deactivate_vm(vcpu); @@ -443,7 +492,7 @@ void __hyp_text __noreturn __hyp_panic(void) } /* Call panic for real */ - __hyp_call_panic()(spsr, elr, par); + __hyp_call_panic()(spsr, elr, par, vcpu); unreachable(); } diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 934137647837..e19d89cabf2a 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } /* * Non-VHE: Both host and guest must save everything. * - * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc, - * pstate, and guest must save everything. + * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0, + * and guest must save everything. */ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) @@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); - ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); - ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); - ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); } static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) @@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); + ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr); + ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); + ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); } static hyp_alternate_select(__sysreg_call_save_host_state, @@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); - write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); - write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); } static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) @@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); + write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr); + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); + write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); } static hyp_alternate_select(__sysreg_call_restore_host_state, @@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); } + +void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2) +{ + asm("msr tpidr_el2, %0": : "r" (tpidr_el2)); +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 3256b9228e75..a74311beda35 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -122,6 +122,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset PMU */ kvm_pmu_vcpu_reset(vcpu); + /* Default workaround setup is enabled (if supported) */ + if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) + vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; + /* Reset timer */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index e88fb99c1561..21ba0b29621b 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -21,7 +21,7 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear @@ -29,8 +29,8 @@ * * Alignment fixed up by hardware. */ -ENTRY(__clear_user) - uaccess_enable_not_uao x2, x3 +ENTRY(__arch_clear_user) + uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -50,9 +50,9 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 - uaccess_disable_not_uao x2 + uaccess_disable_not_uao x2, x3 ret -ENDPROC(__clear_user) +ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 4b5d826895ff..20305d485046 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -64,10 +64,10 @@ end .req x5 ENTRY(__arch_copy_from_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index b24a830419ad..54b75deb1d16 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,15 @@ .endm end .req x5 -ENTRY(raw_copy_in_user) - uaccess_enable_not_uao x3, x4 + +ENTRY(__arch_copy_in_user) + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret -ENDPROC(raw_copy_in_user) +ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 351f0766f7a6..fda6172d6b88 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -63,10 +63,10 @@ end .req x5 ENTRY(__arch_copy_to_user) - uaccess_enable_not_uao x3, x4 + uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3 + uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 7f1dbe962cf5..91464e7f77cc 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -49,7 +49,7 @@ ENTRY(flush_icache_range) * - end - virtual end address of region */ ENTRY(__flush_cache_user_range) - uaccess_ttbr0_enable x2, x3 + uaccess_ttbr0_enable x2, x3, x4 dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x0, x3 @@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU isb mov x0, #0 1: - uaccess_ttbr0_disable x1 + uaccess_ttbr0_disable x1, x2 ret 9: mov x0, #-EFAULT diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index ab9f5f0fb2c7..9284788733d6 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#define NUM_USER_ASIDS ASID_FIRST_VERSION + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) +#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) +#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) +#else +#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) +#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void) } } -static void set_reserved_asid_bits(void) -{ - if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) && - cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003)) - __set_bit(FALKOR_RESERVED_ASID, asid_map); -} - static void flush_context(unsigned int cpu) { int i; @@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); - set_reserved_asid_bits(); - /* * Ensure the generation bump is observed before we xchg the * active_asids. @@ -113,7 +113,7 @@ static void flush_context(unsigned int cpu) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid & ~ASID_MASK, asid_map); + __set_bit(asid2idx(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -165,16 +165,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - asid &= ~ASID_MASK; - if (!__test_and_set_bit(asid, asid_map)) + if (!__test_and_set_bit(asid2idx(asid), asid_map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the - * currently active ASIDs and mark the TLBs as requiring flushes. - * We always count from ASID #1, as we use ASID #0 when setting a - * reserved TTBR0 for the init_mm. + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. */ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); if (asid != NUM_USER_ASIDS) @@ -191,7 +191,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return asid | generation; + return idx2asid(asid) | generation; } void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) @@ -227,6 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: + + arm64_apply_bp_hardening(); + /* * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when * emulating PAN. @@ -235,6 +238,15 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) cpu_switch_mm(mm->pgd, mm); } +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ + asm(ALTERNATIVE("nop; nop; nop", + "ic iallu; dsb nsh; isb", + ARM64_WORKAROUND_CAVIUM_27456, + CONFIG_CAVIUM_ERRATUM_27456)); +} + static int asids_init(void) { asid_bits = get_cpu_asid_bits(); @@ -250,8 +262,6 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - set_reserved_asid_bits(); - pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 614af886b7ef..115b32639a3c 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -166,7 +166,7 @@ static void *__dma_alloc(struct device *dev, size_t size, /* create a coherent mapping */ page = virt_to_page(ptr); coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, NULL); + prot, __builtin_return_address(0)); if (!coherent_ptr) goto no_map; diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index ca74a2aace42..7b60d62ac593 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -389,7 +389,7 @@ void ptdump_check_wx(void) .check_wx = true, }; - walk_pgd(&st, &init_mm, 0); + walk_pgd(&st, &init_mm, VA_START); note_page(&st, 0, 0, 0); if (st.wx_pages || st.uxn_pages) pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b64958b23a7f..5edb706aacb0 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -242,7 +242,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, if (fsc_type == ESR_ELx_FSC_PERM) return true; - if (addr < USER_DS && system_uses_ttbr0_pan()) + if (addr < TASK_SIZE && system_uses_ttbr0_pan()) return fsc_type == ESR_ELx_FSC_FAULT && (regs->pstate & PSR_PAN_BIT); @@ -426,7 +426,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, mm_flags |= FAULT_FLAG_WRITE; } - if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { + if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { /* regs->orig_addr_limit may be 0 if we entered from EL0 */ if (regs->orig_addr_limit == KERNEL_DS) die("Accessing user space memory with fs=KERNEL_DS", regs, esr); @@ -751,6 +751,29 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, arm64_notify_die("", regs, &info, esr); } +asmlinkage void __exception do_el0_irq_bp_hardening(void) +{ + /* PC has already been checked in entry.S */ + arm64_apply_bp_hardening(); +} + +asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + /* + * We've taken an instruction abort from userspace and not yet + * re-enabled IRQs. If the address is a kernel address, apply + * BP hardening prior to enabling IRQs and pre-emption. + */ + if (addr > TASK_SIZE) + arm64_apply_bp_hardening(); + + local_irq_enable(); + do_mem_abort(addr, esr, regs); +} + + /* * Handle stack alignment exceptions. */ @@ -761,6 +784,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, struct siginfo info; struct task_struct *tsk = current; + if (user_mode(regs)) { + if (instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + local_irq_enable(); + } + if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", tsk->comm, task_pid_nr(tsk), @@ -820,6 +849,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); + if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) + arm64_apply_bp_hardening(); + if (!inf->fn(addr, esr, regs)) { rv = 1; } else { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5960bef0170d..8154803a89a5 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -285,9 +285,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #endif /* CONFIG_NUMA */ #ifdef CONFIG_HAVE_ARCH_PFN_VALID +#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1) + int pfn_valid(unsigned long pfn) { - return memblock_is_map_memory(pfn << PAGE_SHIFT); + return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); #endif @@ -476,6 +478,8 @@ void __init arm64_memblock_init(void) reserve_elfcorehdr(); + high_memory = __va(memblock_end_of_DRAM() - 1) + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); @@ -502,7 +506,6 @@ void __init bootmem_init(void) sparse_init(); zone_sizes_init(min, max); - high_memory = __va((max << PAGE_SHIFT) - 1) + 1; memblock_dump_all(); } @@ -650,11 +653,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); #endif +#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Make sure we chose the upper bound of sizeof(struct page) - * correctly. + * correctly when sizing the VMEMMAP array. */ BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); +#endif if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index f1eb15e0e864..6ac0d32d60a5 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -107,7 +107,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new) * The following mapping attributes may be updated in live * kernel mappings without the need for break-before-make. */ - static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; + static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; /* creating or taking down mappings is always safe */ if (old == 0 || new == 0) @@ -117,6 +117,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) if ((old | new) & PTE_CONT) return false; + /* Transitioning from Non-Global to Global is unsafe */ + if (old & ~new & PTE_NG) + return false; + return ((old ^ new) & ~mask) == 0; } @@ -525,6 +529,37 @@ static int __init parse_rodata(char *arg) } early_param("rodata", parse_rodata); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __init map_entry_trampoline(void) +{ + extern char __entry_tramp_text_start[]; + + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); + + /* The trampoline is always mapped and can therefore be global */ + pgprot_val(prot) &= ~PTE_NG; + + /* Map only the text into the trampoline page table */ + memset(tramp_pg_dir, 0, PGD_SIZE); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, + prot, pgd_pgtable_alloc, 0); + + /* Map both the text and data into the kernel page table */ + __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern char __entry_tramp_data_start[]; + + __set_fixmap(FIX_ENTRY_TRAMP_DATA, + __pa_symbol(__entry_tramp_data_start), + PAGE_KERNEL_RO); + } + + return 0; +} +core_initcall(map_entry_trampoline); +#endif + /* * Create fine-grained mappings for the kernel. */ @@ -902,3 +937,13 @@ int pmd_clear_huge(pmd_t *pmd) pmd_clear(pmd); return 1; } + +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return pud_none(*pud); +} + +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return pmd_none(*pmd); +} diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 877d42fb0df6..10c835f13f62 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend) mrs x8, mdscr_el1 mrs x9, oslsr_el1 mrs x10, sctlr_el1 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs x11, tpidr_el1 +alternative_else + mrs x11, tpidr_el2 +alternative_endif mrs x12, sp_el0 stp x2, x3, [x0] stp x4, xzr, [x0, #16] @@ -86,7 +90,7 @@ ENDPROC(cpu_do_suspend) * * x0: Address of context pointer */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] @@ -116,7 +120,11 @@ ENTRY(cpu_do_resume) msr mdscr_el1, x10 msr sctlr_el1, x12 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN msr tpidr_el1, x13 +alternative_else + msr tpidr_el2, x13 +alternative_endif msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 @@ -138,16 +146,30 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) - pre_ttbr0_update_workaround x0, x2, x3 + mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id - bfi x0, x1, #48, #16 // set the ASID - msr ttbr0_el1, x0 // set TTBR0 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + bfi x0, x1, #48, #16 // set the ASID field in TTBR0 +#endif + bfi x2, x1, #48, #16 // set the ASID + msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set) isb - post_ttbr0_update_workaround - ret + msr ttbr0_el1, x0 // now update TTBR0 + isb + b post_ttbr_update_workaround // Back to C code... ENDPROC(cpu_do_switch_mm) - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" + +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 + adrp \tmp1, empty_zero_page + msr ttbr1_el1, \tmp1 + isb + tlbi vmalle1 + dsb nsh + isb +.endm + /* * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) * @@ -158,13 +180,7 @@ ENTRY(idmap_cpu_replace_ttbr1) mrs x2, daif msr daifset, #0xf - adrp x1, empty_zero_page - msr ttbr1_el1, x1 - isb - - tlbi vmalle1 - dsb nsh - isb + __idmap_cpu_set_reserved_ttbr1 x1, x3 msr ttbr1_el1, x0 isb @@ -175,13 +191,201 @@ ENTRY(idmap_cpu_replace_ttbr1) ENDPROC(idmap_cpu_replace_ttbr1) .popsection +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + .pushsection ".idmap.text", "awx" + + .macro __idmap_kpti_get_pgtable_ent, type + dc cvac, cur_\()\type\()p // Ensure any existing dirty + dmb sy // lines are written back before + ldr \type, [cur_\()\type\()p] // loading the entry + tbz \type, #0, skip_\()\type // Skip invalid and + tbnz \type, #11, skip_\()\type // non-global entries + .endm + + .macro __idmap_kpti_put_pgtable_ent_ng, type + orr \type, \type, #PTE_NG // Same bit for blocks and pages + str \type, [cur_\()\type\()p] // Update the entry and ensure + dmb sy // that it is visible to all + dc civac, cur_\()\type\()p // CPUs. + .endm + +/* + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * + * Called exactly once from stop_machine context by each CPU found during boot. + */ +__idmap_kpti_flag: + .long 1 +ENTRY(idmap_kpti_install_ng_mappings) + cpu .req w0 + num_cpus .req w1 + swapper_pa .req x2 + swapper_ttb .req x3 + flag_ptr .req x4 + cur_pgdp .req x5 + end_pgdp .req x6 + pgd .req x7 + cur_pudp .req x8 + end_pudp .req x9 + pud .req x10 + cur_pmdp .req x11 + end_pmdp .req x12 + pmd .req x13 + cur_ptep .req x14 + end_ptep .req x15 + pte .req x16 + + mrs swapper_ttb, ttbr1_el1 + adr flag_ptr, __idmap_kpti_flag + + cbnz cpu, __idmap_kpti_secondary + + /* We're the boot CPU. Wait for the others to catch up */ + sevl +1: wfe + ldaxr w18, [flag_ptr] + eor w18, w18, num_cpus + cbnz w18, 1b + + /* We need to walk swapper, so turn off the MMU. */ + mrs x18, sctlr_el1 + bic x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ + /* PGD */ + mov cur_pgdp, swapper_pa + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) +do_pgd: __idmap_kpti_get_pgtable_ent pgd + tbnz pgd, #1, walk_puds +next_pgd: + __idmap_kpti_put_pgtable_ent_ng pgd +skip_pgd: + add cur_pgdp, cur_pgdp, #8 + cmp cur_pgdp, end_pgdp + b.ne do_pgd + + /* Publish the updated tables and nuke all the TLBs */ + dsb sy + tlbi vmalle1is + dsb ish + isb + + /* We're done: fire up the MMU again */ + mrs x18, sctlr_el1 + orr x18, x18, #SCTLR_ELx_M + msr sctlr_el1, x18 + isb + + /* Set the flag to zero to indicate that we're all done */ + str wzr, [flag_ptr] + ret + + /* PUD */ +walk_puds: + .if CONFIG_PGTABLE_LEVELS > 3 + pte_to_phys cur_pudp, pgd + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) +do_pud: __idmap_kpti_get_pgtable_ent pud + tbnz pud, #1, walk_pmds +next_pud: + __idmap_kpti_put_pgtable_ent_ng pud +skip_pud: + add cur_pudp, cur_pudp, 8 + cmp cur_pudp, end_pudp + b.ne do_pud + b next_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + mov pud, pgd + b walk_pmds +next_pud: + b next_pgd + .endif + + /* PMD */ +walk_pmds: + .if CONFIG_PGTABLE_LEVELS > 2 + pte_to_phys cur_pmdp, pud + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) +do_pmd: __idmap_kpti_get_pgtable_ent pmd + tbnz pmd, #1, walk_ptes +next_pmd: + __idmap_kpti_put_pgtable_ent_ng pmd +skip_pmd: + add cur_pmdp, cur_pmdp, #8 + cmp cur_pmdp, end_pmdp + b.ne do_pmd + b next_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + mov pmd, pud + b walk_ptes +next_pmd: + b next_pud + .endif + + /* PTE */ +walk_ptes: + pte_to_phys cur_ptep, pmd + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) +do_pte: __idmap_kpti_get_pgtable_ent pte + __idmap_kpti_put_pgtable_ent_ng pte +skip_pte: + add cur_ptep, cur_ptep, #8 + cmp cur_ptep, end_ptep + b.ne do_pte + b next_pmd + + /* Secondary CPUs end up here */ +__idmap_kpti_secondary: + /* Uninstall swapper before surgery begins */ + __idmap_cpu_set_reserved_ttbr1 x18, x17 + + /* Increment the flag to let the boot CPU we're ready */ +1: ldxr w18, [flag_ptr] + add w18, w18, #1 + stxr w17, w18, [flag_ptr] + cbnz w17, 1b + + /* Wait for the boot CPU to finish messing around with swapper */ + sevl +1: wfe + ldxr w18, [flag_ptr] + cbnz w18, 1b + + /* All done, act like nothing happened */ + msr ttbr1_el1, swapper_ttb + isb + ret + + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq swapper_ttb + .unreq flag_ptr + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte +ENDPROC(idmap_kpti_install_ng_mappings) + .popsection +#endif + /* * __cpu_setup * * Initialise the processor for turning the MMU on. Return in x0 the * value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "ax" + .pushsection ".idmap.text", "awx" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh @@ -225,7 +429,7 @@ ENTRY(__cpu_setup) * both user and kernel. */ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 + TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 tcr_set_idmap_t0sz x10, x9 /* @@ -243,6 +447,11 @@ ENTRY(__cpu_setup) cbz x9, 2f cmp x9, #2 b.lt 1f +#ifdef CONFIG_ARM64_ERRATUM_1024718 + /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */ + cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4 + cbnz x1, 1f +#endif orr x10, x10, #TCR_HD // hardware Dirty flag update 1: orr x10, x10, #TCR_HA // hardware Access flag update 2: diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ba38d403abb2..be155f70f108 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) /* Stack must be multiples of 16B */ #define STACK_ALIGN(sz) (((sz) + 15) & ~15) -#define PROLOGUE_OFFSET 8 +/* Tail call offset to jump into */ +#define PROLOGUE_OFFSET 7 static int build_prologue(struct jit_ctx *ctx) { @@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx) /* Initialize tail_call_cnt */ emit(A64_MOVZ(1, tcc, 0, 0), ctx); - /* 4 byte extra for skb_copy_bits buffer */ - ctx->stack_size = prog->aux->stack_depth + 4; - ctx->stack_size = STACK_ALIGN(ctx->stack_size); - - /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); - cur_offset = ctx->idx - idx0; if (cur_offset != PROLOGUE_OFFSET) { pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n", cur_offset, PROLOGUE_OFFSET); return -1; } + + /* 4 byte extra for skb_copy_bits buffer */ + ctx->stack_size = prog->aux->stack_depth + 4; + ctx->stack_size = STACK_ALIGN(ctx->stack_size); + + /* Set up function call stack */ + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); return 0; } @@ -237,8 +238,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) off = offsetof(struct bpf_array, map.max_entries); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR32(tmp, r2, tmp), ctx); + emit(A64_MOV(0, r3, r3), ctx); emit(A64_CMP(0, r3, tmp), ctx); - emit(A64_B_(A64_COND_GE, jmp_offset), ctx); + emit(A64_B_(A64_COND_CS, jmp_offset), ctx); /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; @@ -246,7 +248,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); emit(A64_CMP(1, tcc, tmp), ctx); - emit(A64_B_(A64_COND_GT, jmp_offset), ctx); + emit(A64_B_(A64_COND_HI, jmp_offset), ctx); emit(A64_ADD_I(1, tcc, tcc, 1), ctx); /* prog = array->ptrs[index]; @@ -260,11 +262,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) emit(A64_LDR64(prg, tmp, prg), ctx); emit(A64_CBZ(1, prg, jmp_offset), ctx); - /* goto *(prog->bpf_func + prologue_size); */ + /* goto *(prog->bpf_func + prologue_offset); */ off = offsetof(struct bpf_prog, bpf_func); emit_a64_mov_i64(tmp, off, ctx); emit(A64_LDR64(tmp, prg, tmp), ctx); emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx); emit(A64_BR(tmp), ctx); /* out: */ diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 401ceb71540c..c5f05c4a4d00 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -101,12 +101,12 @@ ENTRY(privcmd_call) * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation * is enabled (it implies that hardware UAO and PAN disabled). */ - uaccess_ttbr0_enable x6, x7 + uaccess_ttbr0_enable x6, x7, x8 hvc XEN_IMM /* * Disable userspace access from kernel once the hyp call completed. */ - uaccess_ttbr0_disable x6 + uaccess_ttbr0_disable x6, x7 ret ENDPROC(privcmd_call); diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index af5369422032..d9c2866ba618 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -321,11 +321,14 @@ config BF53x config GPIO_ADI def_bool y + depends on !PINCTRL depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561) -config PINCTRL +config PINCTRL_BLACKFIN_ADI2 def_bool y - depends on BF54x || BF60x + depends on (BF54x || BF60x) + select PINCTRL + select PINCTRL_ADI2 config MEM_MT48LC64M4A2FB_7E bool diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index 4ddd1b73ee3e..c8d957274cc2 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -18,6 +18,7 @@ config DEBUG_VERBOSE config DEBUG_MMRS tristate "Generate Blackfin MMR tree" + depends on !PINCTRL select DEBUG_FS help Create a tree of Blackfin MMRs via the debugfs tree. If diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h index 905afeacfedf..06da9d49152a 100644 --- a/arch/cris/include/arch-v10/arch/bug.h +++ b/arch/cris/include/arch-v10/arch/bug.h @@ -44,18 +44,25 @@ struct bug_frame { * not be used like this with newer versions of gcc. */ #define BUG() \ +do { \ __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ "movu.w " __stringify(__LINE__) ",$r0\n\t"\ "jump 0f\n\t" \ ".section .rodata\n" \ "0:\t.string \"" __FILE__ "\"\n\t" \ - ".previous") + ".previous"); \ + unreachable(); \ +} while (0) #endif #else /* This just causes an oops. */ -#define BUG() (*(int *)0 = 0) +#define BUG() \ +do { \ + barrier_before_unreachable(); \ + __builtin_trap(); \ +} while (0) #endif diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h index ecff2d1ca5a3..6eaa7ad5fc2c 100644 --- a/arch/h8300/include/asm/byteorder.h +++ b/arch/h8300/include/asm/byteorder.h @@ -2,7 +2,6 @@ #ifndef __H8300_BYTEORDER_H__ #define __H8300_BYTEORDER_H__ -#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__ #include #endif diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 66f5e9a61efc..7288e31d3713 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, memcpy((void *) dst, src, count); } +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset((void __force *)addr, value, size); +} + #define PCI_IO_ADDR (volatile void __iomem *) /* diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c index 617506d1a559..7cd0a2259269 100644 --- a/arch/hexagon/lib/checksum.c +++ b/arch/hexagon/lib/checksum.c @@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) memcpy(dst, src, len); return csum_partial(dst, len, sum); } +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h index bd3eeb8d1cfa..66b37a532765 100644 --- a/arch/ia64/include/asm/bug.h +++ b/arch/ia64/include/asm/bug.h @@ -4,7 +4,11 @@ #ifdef CONFIG_BUG #define ia64_abort() __builtin_trap() -#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) +#define BUG() do { \ + printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ + ia64_abort(); \ +} while (0) /* should this BUG be made generic? */ #define HAVE_ARCH_BUG diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 3efba40adc54..c872c4e6bafb 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -114,4 +114,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 85bba43e7d5d..658a8e06a69b 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; - ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); + ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %lx is not existing.\n",virt_addr); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index aa7be020a904..c954523d00fe 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk) } if (ti->softirq_time) { - delta = cycle_to_nsec(ti->softirq_time)); + delta = cycle_to_nsec(ti->softirq_time); account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 0d9446c37ae8..498398d915c1 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -196,8 +196,8 @@ config TIMER_DIVIDE default "128" config CPU_BIG_ENDIAN - bool "Generate big endian code" - default n + bool + default !CPU_LITTLE_ENDIAN config CPU_LITTLE_ENDIAN bool "Generate little endian code" diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index cb79fba79d43..b88a8dd14933 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -122,7 +122,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 84938fdbbada..908d58347790 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = { .id = 0, .num_resources = ARRAY_SIZE(mcf_fec0_resources), .resource = mcf_fec0_resources, - .dev.platform_data = FEC_PDATA, + .dev = { + .dma_mask = &mcf_fec0.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = FEC_PDATA, + } }; #ifdef MCFFEC_BASE1 @@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = { .id = 1, .num_resources = ARRAY_SIZE(mcf_fec1_resources), .resource = mcf_fec1_resources, - .dev.platform_data = FEC_PDATA, + .dev = { + .dma_mask = &mcf_fec1.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = FEC_PDATA, + } }; #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h index b7e2bf1ba4a6..275dca1435bf 100644 --- a/arch/m68k/include/asm/bug.h +++ b/arch/m68k/include/asm/bug.h @@ -8,16 +8,19 @@ #ifndef CONFIG_SUN3 #define BUG() do { \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #else #define BUG() do { \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif #else #define BUG() do { \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #endif diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index 3aa571a513b5..cf6edda38971 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -45,6 +45,8 @@ SECTIONS { .text : { HEAD_TEXT TEXT_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 89172b8974b9..625a5785804f 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -16,6 +16,8 @@ SECTIONS .text : { HEAD_TEXT TEXT_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 293990efc917..9868270b0984 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -16,6 +16,8 @@ SECTIONS .text : { HEAD_TEXT TEXT_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 22123f7e8f75..2004b3f72d80 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -1017,7 +1017,7 @@ int __init mac_platform_init(void) struct resource swim_rsrc = { .flags = IORESOURCE_MEM, .start = (resource_size_t)swim_base, - .end = (resource_size_t)swim_base + 0x2000, + .end = (resource_size_t)swim_base + 0x1FFF, }; platform_device_register_simple("swim", -1, &swim_rsrc, 1); diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index c2a38321c96d..3b420f6d8822 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -89,7 +89,8 @@ static inline void free_io_area(void *addr) for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { if (tmp->addr == addr) { *p = tmp->next; - __iounmap(tmp->addr, tmp->size); + /* remove gap added in get_io_area() */ + __iounmap(tmp->addr, tmp->size - IO_SIZE); kfree(tmp); return; } diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 8d1408583cf4..b523a604cb87 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -170,7 +170,7 @@ void __init cf_bootmem_alloc(void) max_pfn = max_low_pfn = PFN_DOWN(_ramend); high_memory = (void *)_ramend; - m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; + m68k_virt_to_node_shift = fls(_ramend - 1) - 6; module_fixup(NULL, __start_fixup, __stop_fixup); /* setup bootmem data */ diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore index 2d6c0c160884..6c662ddb909a 100644 --- a/arch/metag/boot/.gitignore +++ b/arch/metag/boot/.gitignore @@ -1,4 +1,3 @@ vmlinux* uImage* ramdisk.* -*.dtb* diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform index 1b3d8c849101..f7f1739c11b9 100644 --- a/arch/microblaze/Kconfig.platform +++ b/arch/microblaze/Kconfig.platform @@ -20,6 +20,7 @@ config OPT_LIB_FUNCTION config OPT_LIB_ASM bool "Optimalized lib function ASM" depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) + depends on CPU_BIG_ENDIAN default n help Allows turn on optimalized library function (memcpy and memmove). diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 830ee7d42fa0..d269dd4b8279 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -36,16 +36,21 @@ endif CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_DIV) += -mno-xl-soft-div CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_BARREL) += -mxl-barrel-shift CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare -CPUFLAGS-$(CONFIG_BIG_ENDIAN) += -mbig-endian -CPUFLAGS-$(CONFIG_LITTLE_ENDIAN) += -mlittle-endian + +ifdef CONFIG_CPU_BIG_ENDIAN +KBUILD_CFLAGS += -mbig-endian +KBUILD_AFLAGS += -mbig-endian +LD += -EB +else +KBUILD_CFLAGS += -mlittle-endian +KBUILD_AFLAGS += -mlittle-endian +LD += -EL +endif CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) # r31 holds current when in kernel mode -KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) - -LDFLAGS := -LDFLAGS_vmlinux := +KBUILD_CFLAGS += -ffixed-r31 $(CPUFLAGS-y) $(CPUFLAGS-1) $(CPUFLAGS-2) head-y := arch/microblaze/kernel/head.o libs-y += arch/microblaze/lib/ diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore index bf0459186027..679502d64a97 100644 --- a/arch/microblaze/boot/.gitignore +++ b/arch/microblaze/boot/.gitignore @@ -1,3 +1,2 @@ -*.dtb linux.bin* simpleImage.* diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile index 47f94cc383b6..7c2f52d4a0e4 100644 --- a/arch/microblaze/boot/Makefile +++ b/arch/microblaze/boot/Makefile @@ -22,17 +22,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE quiet_cmd_cp = CP $< $@$2 cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) -quiet_cmd_strip = STRIP $@ +quiet_cmd_strip = STRIP $< $@$2 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ - -K _fdt_start vmlinux -o $@ + -K _fdt_start $< -o $@$2 UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) +UIMAGE_IN = $@ +UIMAGE_OUT = $@.ub $(obj)/simpleImage.%: vmlinux FORCE $(call if_changed,cp,.unstrip) $(call if_changed,objcopy) $(call if_changed,uimage) - $(call if_changed,strip) - @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' + $(call if_changed,strip,.strip) + @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S index 62021d7e249e..fdc48bb065d8 100644 --- a/arch/microblaze/lib/fastcopy.S +++ b/arch/microblaze/lib/fastcopy.S @@ -29,10 +29,6 @@ * between mem locations with size of xfer spec'd in bytes */ -#ifdef __MICROBLAZEEL__ -#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM. -#endif - #include .text .globl memcpy diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5d3284d20678..c82457b0e733 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -65,7 +65,7 @@ config MIPS select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS - select HAVE_VIRT_CPU_ACCOUNTING_GEN + select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP select IRQ_FORCED_THREADING select MODULES_USE_ELF_RELA if MODULES && 64BIT select MODULES_USE_ELF_REL if MODULES @@ -119,12 +119,12 @@ config MIPS_GENERIC select SYS_SUPPORTS_MULTITHREADING select SYS_SUPPORTS_RELOCATABLE select SYS_SUPPORTS_SMARTMIPS - select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN - select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN - select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN - select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN + select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN + select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN + select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN + select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USE_OF help Select this to build a kernel which aims to support multiple boards, diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index d77a64f4c78b..ed8ab0de612f 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c @@ -28,26 +28,6 @@ #include -static void alchemy_8250_pm(struct uart_port *port, unsigned int state, - unsigned int old_state) -{ -#ifdef CONFIG_SERIAL_8250 - switch (state) { - case 0: - alchemy_uart_enable(CPHYSADDR(port->membase)); - serial8250_do_pm(port, state, old_state); - break; - case 3: /* power off */ - serial8250_do_pm(port, state, old_state); - alchemy_uart_disable(CPHYSADDR(port->membase)); - break; - default: - serial8250_do_pm(port, state, old_state); - break; - } -#endif -} - #define PORT(_base, _irq) \ { \ .mapbase = _base, \ @@ -57,7 +37,6 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state, .flags = UPF_SKIP_TEST | UPF_IOREMAP | \ UPF_FIXED_TYPE, \ .type = PORT_16550A, \ - .pm = alchemy_8250_pm, \ } static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = { diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index 4674f1efbe7a..e1675c25d5d4 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void) uart_port.type = PORT_AR7; uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.iotype = UPIO_MEM32; - uart_port.flags = UPF_FIXED_TYPE; + uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF; uart_port.regshift = 2; uart_port.line = 0; diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c index 9ab48ff80c1c..6d11ae581ea7 100644 --- a/arch/mips/ath25/board.c +++ b/arch/mips/ath25/board.c @@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size) } board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); + if (!board_data) + goto error; ath25_board.config = (struct ath25_boarddata *)board_data; memcpy_fromio(board_data, bcfg, 0x100); if (broken_boarddata) { diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c index 10a405d593df..c782b10ddf50 100644 --- a/arch/mips/ath79/common.c +++ b/arch/mips/ath79/common.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); void ath79_ddr_wb_flush(u32 reg) { - void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; + void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); /* Flush the DDR write buffer. */ __raw_writel(0x1, flush_reg); diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d4f2407a42c6..8307a8a02667 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = { /* Verified on: WRT54GS V1.0 */ static const struct gpio_led bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), }; diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608e..8c9cbf13d32a 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void) */ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) cpu_wait = NULL; + + /* + * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" + * Enable ExternalSync for sync instruction to take effect + */ + set_c0_config7(MIPS_CONF7_ES); break; #endif } diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore index d3962cd5ce0c..a73d6e2c4f64 100644 --- a/arch/mips/boot/.gitignore +++ b/arch/mips/boot/.gitignore @@ -5,4 +5,3 @@ zImage zImage.tmp calc_vmlinuz_load_addr uImage -*.dtb diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 1bd5c4f00d19..c22da16d67b8 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS quiet_cmd_cpp_its_S = ITS $@ cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \ + -D__ASSEMBLY__ \ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \ -DVMLINUX_BINARY="\"$(3)\"" \ -DVMLINUX_COMPRESSION="\"$(2)\"" \ diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index c675eece389a..adce180f3ee4 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -133,4 +133,8 @@ vmlinuz.srec: vmlinuz uzImage.bin: vmlinuz.bin FORCE $(call if_changed,uimage,none) -clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec} +clean-files += $(objtree)/vmlinuz +clean-files += $(objtree)/vmlinuz.32 +clean-files += $(objtree)/vmlinuz.ecoff +clean-files += $(objtree)/vmlinuz.bin +clean-files += $(objtree)/vmlinuz.srec diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile index 9e09cc4556b3..398994312361 100644 --- a/arch/mips/boot/dts/brcm/Makefile +++ b/arch/mips/boot/dts/brcm/Makefile @@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \ bcm63268-comtrend-vr-3032u.dtb \ bcm93384wvg.dtb \ bcm93384wvg_viper.dtb \ - bcm96358nb4ser.dtb \ bcm96368mvwg.dtb \ bcm9ejtagprb.dtb \ bcm97125cbmb.dtb \ diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 2cd49b60e030..f7aad80c69ab 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -51,6 +51,8 @@ ranges = <0x02000000 0 0x40000000 0x40000000 0 0x40000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci0_intc 1>, <0 0 0 2 &pci0_intc 2>, @@ -79,6 +81,8 @@ ranges = <0x02000000 0 0x20000000 0x20000000 0 0x20000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci1_intc 1>, <0 0 0 2 &pci1_intc 2>, @@ -107,6 +111,8 @@ ranges = <0x02000000 0 0x16000000 0x16000000 0 0x100000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci2_intc 1>, <0 0 0 2 &pci2_intc 2>, diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 5b3a3f6a9ad3..b3aec101a65d 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2271,17 +2271,19 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, parent_irq = irq_of_parse_and_map(ciu_node, 0); if (!parent_irq) { - pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", + pr_err("ERROR: Couldn't acquire parent_irq for %s\n", ciu_node->name); return -EINVAL; } host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); + if (!host_data) + return -ENOMEM; raw_spin_lock_init(&host_data->lock); addr = of_get_address(ciu_node, 0, NULL, NULL); if (!addr) { - pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); + pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name); return -EINVAL; } host_data->raw_reg = (u64)phys_to_virt( @@ -2289,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, addr = of_get_address(ciu_node, 1, NULL, NULL); if (!addr) { - pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); + pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name); return -EINVAL; } host_data->en_reg = (u64)phys_to_virt( @@ -2297,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); if (r) { - pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n", ciu_node->name); return r; } @@ -2307,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, &octeon_irq_domain_cib_ops, host_data); if (!cib_domain) { - pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); + pr_err("ERROR: Couldn't irq_domain_add_linear()\n"); return -ENOMEM; } diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c index 394f8161e462..cb7fdaeef426 100644 --- a/arch/mips/generic/irq.c +++ b/arch/mips/generic/irq.c @@ -22,10 +22,10 @@ int get_c0_fdc_int(void) { int mips_cpu_fdc_irq; - if (cpu_has_veic) - panic("Unimplemented!"); - else if (mips_gic_present()) + if (mips_gic_present()) mips_cpu_fdc_irq = gic_get_c0_fdc_int(); + else if (cpu_has_veic) + panic("Unimplemented!"); else if (cp0_fdc_irq >= 0) mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; else @@ -38,10 +38,10 @@ int get_c0_perfcount_int(void) { int mips_cpu_perf_irq; - if (cpu_has_veic) - panic("Unimplemented!"); - else if (mips_gic_present()) + if (mips_gic_present()) mips_cpu_perf_irq = gic_get_c0_perfcount_int(); + else if (cpu_has_veic) + panic("Unimplemented!"); else if (cp0_perfcount_irq >= 0) mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; else @@ -54,10 +54,10 @@ unsigned int get_c0_compare_int(void) { int mips_cpu_timer_irq; - if (cpu_has_veic) - panic("Unimplemented!"); - else if (mips_gic_present()) + if (mips_gic_present()) mips_cpu_timer_irq = gic_get_c0_compare_int(); + else if (cpu_has_veic) + panic("Unimplemented!"); else mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 7c8aab23bce8..b1f66699677d 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += qrwlock.h generic-y += qspinlock.h generic-y += sections.h generic-y += segment.h -generic-y += serial.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += user.h diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 83054f79f72a..feb069cbf44e 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -19,6 +19,9 @@ #include #endif +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ +#undef fp + /* * Helper macros for generating raw instruction encodings. */ @@ -105,6 +108,7 @@ .macro fpu_save_16odd thread .set push .set mips64r2 + .set fp=64 SET_HARDFLOAT sdc1 $f1, THREAD_FPR1(\thread) sdc1 $f3, THREAD_FPR3(\thread) @@ -126,8 +130,8 @@ .endm .macro fpu_save_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) sll \tmp, \status, 5 bgez \tmp, 10f fpu_save_16odd \thread @@ -163,6 +167,7 @@ .macro fpu_restore_16odd thread .set push .set mips64r2 + .set fp=64 SET_HARDFLOAT ldc1 $f1, THREAD_FPR1(\thread) ldc1 $f3, THREAD_FPR3(\thread) @@ -184,8 +189,8 @@ .endm .macro fpu_restore_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) sll \tmp, \status, 5 bgez \tmp, 10f # 16 register mode? @@ -234,9 +239,6 @@ .endm #ifdef TOOLCHAIN_SUPPORTS_MSA -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ -#undef fp - .macro _cfcmsa rd, cs .set push .set mips32r2 diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 7e25c5cc353a..89e9fb7976fe 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #else #include #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#ifndef CONFIG_SMP #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #endif +#endif #undef __scbeqz diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 8e2b5b556488..08ec0762ca50 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -86,7 +86,6 @@ struct compat_flock { compat_off_t l_len; s32 l_sysid; compat_pid_t l_pid; - short __unused; s32 pad[4]; }; @@ -200,7 +199,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..cea8ad864b3f 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) #define war_io_reorder_wmb() wmb() #else -#define war_io_reorder_wmb() do { } while (0) +#define war_io_reorder_wmb() barrier() #endif #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ BUG(); \ } \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__mem, __val); \ } @@ -412,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ __val = *__addr; \ slow; \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__addr, __val); \ } diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index aa3800c82332..d99ca862dae3 100644 --- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -167,7 +167,7 @@ #define AR71XX_AHB_DIV_MASK 0x7 #define AR724X_PLL_REG_CPU_CONFIG 0x00 -#define AR724X_PLL_REG_PCIE_CONFIG 0x18 +#define AR724X_PLL_REG_PCIE_CONFIG 0x10 #define AR724X_PLL_FB_SHIFT 0 #define AR724X_PLL_FB_MASK 0x3ff diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h index e0d9b373d415..f83879dadd1e 100644 --- a/arch/mips/include/asm/machine.h +++ b/arch/mips/include/asm/machine.h @@ -52,7 +52,7 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt) if (!mach->matches) return NULL; - for (match = mach->matches; match->compatible; match++) { + for (match = mach->matches; match->compatible[0]; match++) { if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0) return match; } diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index a6810923b3f0..60c787d943b0 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -680,6 +680,8 @@ #define MIPS_CONF7_WII (_ULCAST_(1) << 31) #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) +/* ExternalSync */ +#define MIPS_CONF7_ES (_ULCAST_(1) << 8) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) @@ -2745,6 +2747,7 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) +__BUILD_SET_C0(config7) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h new file mode 100644 index 000000000000..1d830c6666c2 --- /dev/null +++ b/arch/mips/include/asm/serial.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2017 MIPS Tech, LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __ASM__SERIAL_H +#define __ASM__SERIAL_H + +#ifdef CONFIG_MIPS_GENERIC +/* + * Generic kernels cannot know a correct value for all platforms at + * compile time. Set it to 0 to prevent 8250_early using it + */ +#define BASE_BAUD 0 +#else +#include +#endif + +#endif /* __ASM__SERIAL_H */ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b71306947290..06629011a434 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; +#ifdef CONFIG_CPU_MICROMIPS +/* micromips memset / bzero also clobbers t7 & t8 */ +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" +#else +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" +#endif /* CONFIG_CPU_MICROMIPS */ + if (eva_kernel_access()) { __asm__ __volatile__( "move\t$4, %1\n\t" @@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } else { might_fault(); __asm__ __volatile__( @@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } return res; diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 49c3d4795963..71370fb3ceef 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -123,4 +123,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index f2ee7e1e3342..cff52b283e03 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra) EXPORT_SYMBOL(_mcount) PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ - bne t1, t2, static_trace + beq t1, t2, fgraph_trace nop + MCOUNT_SAVE_REGS + + move a0, ra /* arg1: self return address */ + jalr t2 /* (1) call *ftrace_trace_function */ + move a1, AT /* arg2: parent's return address */ + + MCOUNT_RESTORE_REGS + +fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER + PTR_LA t1, ftrace_stub PTR_L t3, ftrace_graph_return bne t1, t3, ftrace_graph_caller nop @@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount) bne t1, t3, ftrace_graph_caller nop #endif - b ftrace_stub -#ifdef CONFIG_32BIT - addiu sp, sp, 8 -#else - nop -#endif -static_trace: - MCOUNT_SAVE_REGS - - move a0, ra /* arg1: self return address */ - jalr t2 /* (1) call *ftrace_trace_function */ - move a1, AT /* arg2: parent's return address */ - - MCOUNT_RESTORE_REGS #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif + .globl ftrace_stub ftrace_stub: RETURN_BACK diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index dd5567b1e305..8f5bd04f320a 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, *this_cpu_ptr(&cm_core_lock_flags)); } else { WARN_ON(cluster != 0); - WARN_ON(vp != 0); WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 19c88d770054..fcf9af492d60 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -10,6 +10,8 @@ #include #include +#include +#include #include #include @@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); phys_addr_t __weak mips_cpc_default_phys_base(void) { + struct device_node *cpc_node; + struct resource res; + int err; + + cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); + if (cpc_node) { + err = of_address_to_resource(cpc_node, 0, &res); + if (!err) + return res.start; + } + return 0; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index c5ff6bfe2825..e8d772a2597d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ALMASK; } -static void arch_dump_stack(void *info) +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); +static struct cpumask backtrace_csd_busy; + +static void handle_backtrace(void *info) { - struct pt_regs *regs; + nmi_cpu_backtrace(get_irq_regs()); + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); +} - regs = get_irq_regs(); +static void raise_backtrace(cpumask_t *mask) +{ + call_single_data_t *csd; + int cpu; - if (regs) - show_regs(regs); + for_each_cpu(cpu, mask) { + /* + * If we previously sent an IPI to the target CPU & it hasn't + * cleared its bit in the busy cpumask then it didn't handle + * our previous IPI & it's not safe for us to reuse the + * call_single_data_t. + */ + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", + cpu); + continue; + } - dump_stack(); + csd = &per_cpu(backtrace_csd, cpu); + csd->func = handle_backtrace; + smp_call_function_single_async(cpu, csd); + } } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { - long this_cpu = get_cpu(); - - if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) - dump_stack(); - - smp_call_function_many(mask, arch_dump_stack, NULL, 1); - - put_cpu(); + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); } int mips_get_process_fp_mode(struct task_struct *task) @@ -705,10 +720,26 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) struct task_struct *t; int max_users; + /* If nothing to change, return right away, successfully. */ + if (value == mips_get_process_fp_mode(task)) + return 0; + + /* Only accept a mode change if 64-bit FP enabled for o32. */ + if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) + return -EOPNOTSUPP; + + /* And only for o32 tasks. */ + if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) + return -EOPNOTSUPP; + /* Check the value is valid */ if (value & ~known_bits) return -EOPNOTSUPP; + /* Setting FRE without FR is not supported. */ + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) + return -EOPNOTSUPP; + /* Avoid inadvertently triggering emulation */ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 1395654cfc8d..e058cd300713 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -410,63 +410,174 @@ static int gpr64_set(struct task_struct *target, #endif /* CONFIG_64BIT */ +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer, + * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots + * correspond 1:1 to buffer slots. Only general registers are copied. + */ +static int fpr_get_fpa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf) +{ + return user_regset_copyout(pos, count, kbuf, ubuf, + &target->thread.fpu, + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); +} + +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer, + * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's + * general register slots are copied to buffer slots. Only general + * registers are copied. + */ +static int fpr_get_msa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + void **kbuf, void __user **ubuf) +{ + unsigned int i; + u64 fpr_val; + int err; + + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS; i++) { + fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); + err = user_regset_copyout(pos, count, kbuf, ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); + if (err) + return err; + } + + return 0; +} + +/* + * Copy the floating-point context to the supplied NT_PRFPREG buffer. + * Choose the appropriate helper for general registers, and then copy + * the FCSR and FIR registers separately. + */ static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - unsigned i; + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); + const int fir_pos = fcr31_pos + sizeof(u32); int err; - u64 fpr_val; - /* XXX fcr31 */ + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf); + else + err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; + + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu.fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); + if (err) + return err; - if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &boot_cpu_data.fpu_id, + fir_pos, fir_pos + sizeof(u32)); - for (i = 0; i < NUM_FPU_REGS; i++) { - fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); - err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &fpr_val, i * sizeof(elf_fpreg_t), - (i + 1) * sizeof(elf_fpreg_t)); + return err; +} + +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context, + * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP + * context's general register slots. Only general registers are copied. + */ +static int fpr_set_fpa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + return user_regset_copyin(pos, count, kbuf, ubuf, + &target->thread.fpu, + 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); +} + +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context, + * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 + * bits only of FP context's general register slots. Only general + * registers are copied. + */ +static int fpr_set_msa(struct task_struct *target, + unsigned int *pos, unsigned int *count, + const void **kbuf, const void __user **ubuf) +{ + unsigned int i; + u64 fpr_val; + int err; + + BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); + for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { + err = user_regset_copyin(pos, count, kbuf, ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); if (err) return err; + set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); } return 0; } +/* + * Copy the supplied NT_PRFPREG buffer to the floating-point context. + * Choose the appropriate helper for general registers, and then copy + * the FCSR register separately. Ignore the incoming FIR register + * contents though, as the register is read-only. + * + * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', + * which is supposed to have been guaranteed by the kernel before + * calling us, e.g. in `ptrace_regset'. We enforce that requirement, + * so that we can safely avoid preinitializing temporaries for + * partial register writes. + */ static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - unsigned i; + const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); + const int fir_pos = fcr31_pos + sizeof(u32); + u32 fcr31; int err; - u64 fpr_val; - /* XXX fcr31 */ + BUG_ON(count % sizeof(elf_fpreg_t)); + + if (pos + count > sizeof(elf_fpregset_t)) + return -EIO; init_fp_ctx(target); - if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) + err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); + else + err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); + if (err) + return err; - BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); - for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) { + if (count > 0) { err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &fpr_val, i * sizeof(elf_fpreg_t), - (i + 1) * sizeof(elf_fpreg_t)); + &fcr31, + fcr31_pos, fcr31_pos + sizeof(u32)); if (err) return err; - set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); + + ptrace_setfcr31(target, fcr31); } - return 0; + if (count > 0) + err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + fir_pos, + fir_pos + sizeof(u32)); + + return err; } enum mips_regset { @@ -618,6 +729,19 @@ static const struct user_regset_view user_mips64_view = { .n = ARRAY_SIZE(mips64_regsets), }; +#ifdef CONFIG_MIPS32_N32 + +static const struct user_regset_view user_mipsn32_view = { + .name = "mipsn32", + .e_flags = EF_MIPS_ABI2, + .e_machine = ELF_ARCH, + .ei_osabi = ELF_OSABI, + .regsets = mips64_regsets, + .n = ARRAY_SIZE(mips64_regsets), +}; + +#endif /* CONFIG_MIPS32_N32 */ + #endif /* CONFIG_64BIT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) @@ -628,6 +752,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) #ifdef CONFIG_MIPS32_O32 if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) return &user_mips_view; +#endif +#ifdef CONFIG_MIPS32_N32 + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) + return &user_mipsn32_view; #endif return &user_mips64_view; #endif @@ -670,7 +798,7 @@ long arch_ptrace(struct task_struct *child, long request, fregs = get_fpu_regs(child); #ifdef CONFIG_32BIT - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even @@ -681,7 +809,7 @@ long arch_ptrace(struct task_struct *child, long request, break; } #endif - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; @@ -759,7 +887,7 @@ long arch_ptrace(struct task_struct *child, long request, init_fp_ctx(child); #ifdef CONFIG_32BIT - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 40e212d6b26b..89026d33a07b 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -98,7 +98,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; } fregs = get_fpu_regs(child); - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even @@ -108,7 +108,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, addr & 1); break; } - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; @@ -205,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, sizeof(child->thread.fpu)); child->thread.fpu.fcr31 = 0; } - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 0a83b1708b3c..8e3a6020c613 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -40,8 +40,8 @@ */ LEAF(_save_fp) EXPORT_SYMBOL(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -246,11 +246,11 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 .set pop -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT -#ifdef CONFIG_CPU_MIPS32_R2 +#ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS @@ -314,11 +314,11 @@ LEAF(_save_fp_context) LEAF(_restore_fp_context) EX lw t1, 0(a1) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ - defined(CONFIG_CPU_MIPS32_R6) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ + defined(CONFIG_CPU_MIPSR6) .set push SET_HARDFLOAT -#ifdef CONFIG_CPU_MIPS32_R2 +#ifdef CONFIG_CPU_MIPSR2 .set mips32r2 .set fp=64 mfc0 t0, CP0_STATUS diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index fe3939726765..795caa763da3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -374,6 +374,7 @@ static void __init bootmem_init(void) unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; + phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; bool bootmap_valid = false; int i; @@ -394,7 +395,8 @@ static void __init bootmem_init(void) max_low_pfn = 0; /* - * Find the highest page frame number we have available. + * Find the highest page frame number we have available + * and the lowest used RAM address */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -406,6 +408,8 @@ static void __init bootmem_init(void) end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); + ramstart = min(ramstart, boot_mem_map.map[i].addr); + #ifndef CONFIG_HIGHMEM /* * Skip highmem here so we get an accurate max_low_pfn if low @@ -435,6 +439,13 @@ static void __init bootmem_init(void) mapstart = max(reserved_end, start); } + /* + * Reserve any memory between the start of RAM and PHYS_OFFSET + */ + if (ramstart > PHYS_OFFSET) + add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, + BOOT_MEM_RESERVED); + if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { @@ -663,9 +674,6 @@ static int __init early_parse_mem(char *p) add_memory_region(start, size, BOOT_MEM_RAM); - if (start && start > PHYS_OFFSET) - add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET, - BOOT_MEM_RESERVED); return 0; } early_param("mem", early_parse_mem); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 87dcac2447c8..382d12eb88f0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus) return; } - if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, - "smp_ipi0", NULL)) + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL)) panic("Can't request IPI0 interrupt"); - if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, - "smp_ipi1", NULL)) + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL)) panic("Can't request IPI1 interrupt"); } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 5669d3b8bd38..583aed906933 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs) void show_regs(struct pt_regs *regs) { __show_regs((struct pt_regs *)regs); + dump_stack(); } void show_registers(struct pt_regs *regs) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index d535edc01434..9730ba734afe 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, - { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, + { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, @@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = -EINTR; - sigset_t sigsaved; - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) @@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_enable(); out: - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index 692ae85a3e3d..8e3a1fc2bc39 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -13,6 +13,8 @@ choice config SOC_AMAZON_SE bool "Amazon SE" select SOC_TYPE_XWAY + select MFD_SYSCON + select MFD_CORE config SOC_XWAY bool "XWAY" diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 7611c3013793..c05bed624075 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -551,9 +551,9 @@ void __init ltq_soc_init(void) clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), ltq_ar9_fpi_hz(), CLOCK_250M); clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); - clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P); - clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); + clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM); clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); @@ -562,7 +562,7 @@ void __init ltq_soc_init(void) } else { clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); - clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0); + clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM); clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 78c2affeabf8..e84e12655fa8 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o # libgcc-style stuff needed in the kernel -obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o +obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \ + ucmpdi2.o diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h index 28002ed90c2c..199a7f96282f 100644 --- a/arch/mips/lib/libgcc.h +++ b/arch/mips/lib/libgcc.h @@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__))); struct DWstruct { int high, low; }; + +struct TWstruct { + long long high, low; +}; #elif defined(__LITTLE_ENDIAN) struct DWstruct { int low, high; }; + +struct TWstruct { + long long low, high; +}; #else #error I feel sick. #endif @@ -23,4 +31,13 @@ typedef union { long long ll; } DWunion; +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) +typedef int ti_type __attribute__((mode(TI))); + +typedef union { + struct TWstruct s; + ti_type ti; +} TWunion; +#endif + #endif /* __ASM_LIBGCC_H */ diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index a1456664d6c2..f7327979a8f8 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -219,7 +219,7 @@ 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) bne t1, a0, 1b - sb a1, -1(a0) + EX(sb, a1, -1(a0), .Lsmall_fixup\@) 2: jr ra /* done */ move a2, zero @@ -252,13 +252,18 @@ PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) - LONG_ADDU a2, t1 + LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Llast_fixup\@: jr ra - andi v1, a2, STORMASK + nop + +.Lsmall_fixup\@: + PTR_SUBU a2, t1, a0 + jr ra + PTR_ADDIU a2, 1 .endm diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c new file mode 100644 index 000000000000..111ad475aa0c --- /dev/null +++ b/arch/mips/lib/multi3.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include "libgcc.h" + +/* + * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that + * specific case only we'll implement it here. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 + */ +#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) + +/* multiply 64-bit values, low 64-bits returned */ +static inline long long notrace dmulu(long long a, long long b) +{ + long long res; + + asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); + return res; +} + +/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */ +static inline long long notrace dmuhu(long long a, long long b) +{ + long long res; + + asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b)); + return res; +} + +/* multiply 128-bit values, low 128-bits returned */ +ti_type notrace __multi3(ti_type a, ti_type b) +{ + TWunion res, aa, bb; + + aa.ti = a; + bb.ti = b; + + /* + * a * b = (a.lo * b.lo) + * + 2^64 * (a.hi * b.lo + a.lo * b.hi) + * [+ 2^128 * (a.hi * b.hi)] + */ + res.s.low = dmulu(aa.s.low, bb.s.low); + res.s.high = dmuhu(aa.s.low, bb.s.low); + res.s.high += dmulu(aa.s.high, bb.s.low); + res.s.high += dmulu(aa.s.low, bb.s.high); + + return res.ti; +} +EXPORT_SYMBOL(__multi3); + +#endif /* 64BIT && CPU_MIPSR6 && GCC7 */ diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 16d9ef5a78c5..6f57212f5659 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); rv.s = ieee754sp_maddf(fd, fs, ft); - break; + goto copcsr; } case fmsubf_op: { @@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fd, MIPSInst_FD(ir)); rv.s = ieee754sp_msubf(fd, fs, ft); - break; + goto copcsr; } case frint_op: { @@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(fs, MIPSInst_FS(ir)); rv.w = ieee754sp_2008class(fs); rfmt = w_fmt; - break; + goto copcsr; } case fmin_op: { @@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmin(fs, ft); - break; + goto copcsr; } case fmina_op: { @@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmina(fs, ft); - break; + goto copcsr; } case fmax_op: { @@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmax(fs, ft); - break; + goto copcsr; } case fmaxa_op: { @@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(fs, MIPSInst_FS(ir)); rv.s = ieee754sp_fmaxa(fs, ft); - break; + goto copcsr; } case fabs_op: @@ -2165,7 +2165,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); rv.d = ieee754dp_maddf(fd, fs, ft); - break; + goto copcsr; } case fmsubf_op: { @@ -2179,7 +2179,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fd, MIPSInst_FD(ir)); rv.d = ieee754dp_msubf(fd, fs, ft); - break; + goto copcsr; } case frint_op: { @@ -2204,7 +2204,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(fs, MIPSInst_FS(ir)); rv.l = ieee754dp_2008class(fs); rfmt = l_fmt; - break; + goto copcsr; } case fmin_op: { @@ -2217,7 +2217,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmin(fs, ft); - break; + goto copcsr; } case fmina_op: { @@ -2230,7 +2230,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmina(fs, ft); - break; + goto copcsr; } case fmax_op: { @@ -2243,7 +2243,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmax(fs, ft); - break; + goto copcsr; } case fmaxa_op: { @@ -2256,7 +2256,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(fs, MIPSInst_FS(ir)); rv.d = ieee754dp_fmaxa(fs, ft); - break; + goto copcsr; } case fabs_op: diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6f534b209971..e12dfa48b478 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* * Either no secondary cache or the available caches don't have the * subset property so we have to flush the primary caches - * explicitly + * explicitly. + * If we would need IPI to perform an INDEX-type operation, then + * we have to use the HIT-type alternative as IPI cannot be used + * here due to interrupts possibly being disabled. */ - if (size >= dcache_size) { + if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; @@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) return; } - if (size >= dcache_size) { + if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index 1986e09fb457..1601d90b087b 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, return error; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; i++) { + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + } + + return 0; +} + /* * Generic mapping function (not visible outside): */ @@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr, void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags) { + unsigned long offset, pfn, last_pfn; struct vm_struct * area; - unsigned long offset; phys_addr_t last_addr; void * addr; @@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long return (void __iomem *) CKSEG1ADDR(phys_addr); /* - * Don't allow anybody to remap normal RAM that we're using.. + * Don't allow anybody to remap RAM that may be allocated by the page + * allocator, since that could lead to races & data clobbering. */ - if (phys_addr < virt_to_phys(high_memory)) { - char *t_addr, *t_end; - struct page *page; - - t_addr = __va(phys_addr); - t_end = t_addr + (size - 1); - - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) - if(!PageReserved(page)) - return NULL; + pfn = PFN_DOWN(phys_addr); + last_pfn = PFN_DOWN(last_addr); + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); + return NULL; } /* diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c index 90fba9bf98da..27ac00c36bc0 100644 --- a/arch/mips/pci/pci-mt7620.c +++ b/arch/mips/pci/pci-mt7620.c @@ -121,7 +121,7 @@ static int wait_pciephy_busy(void) else break; if (retry++ > WAITRETRY_MAX) { - printk(KERN_WARN "PCIE-PHY retry failed.\n"); + pr_warn("PCIE-PHY retry failed.\n"); return -1; } } diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index 9632436d74d7..c2e94cf5ecda 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, phys_addr_t size = resource_size(rsrc); *start = fixup_bigphys_addr(rsrc->start, size); - *end = rsrc->start + size; + *end = rsrc->start + size - 1; } diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 9be8b08ae46b..41b71c4352c2 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index 1b274742077d..d2718de60b9b 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info) u32 n1; u32 rev; + /* Early detection of CMP support */ + mips_cm_probe(); + mips_cpc_probe(); + + if (mips_cps_numiocu(0)) { + /* + * mips_cm_probe() wipes out bootloader + * config for CM regions and we have to configure them + * again. This SoC cannot talk to pamlbus devices + * witout proper iocu region set up. + * + * FIXME: it would be better to do this with values + * from DT, but we need this very early because + * without this we cannot talk to pretty much anything + * including serial. + */ + write_gcr_reg0_base(MT7621_PALMBUS_BASE); + write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE | + CM_GCR_REGn_MASK_CMTGT_IOCU0); + __sync(); + } + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); @@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info) rt2880_pinmux_data = mt7621_pinmux_data; - /* Early detection of CMP support */ - mips_cm_probe(); - mips_cpc_probe(); - - if (mips_cps_numiocu(0)) { - /* - * mips_cm_probe() wipes out bootloader - * config for CM regions and we have to configure them - * again. This SoC cannot talk to pamlbus devices - * witout proper iocu region set up. - * - * FIXME: it would be better to do this with values - * from DT, but we need this very early because - * without this we cannot talk to pretty much anything - * including serial. - */ - write_gcr_reg0_base(MT7621_PALMBUS_BASE); - write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE | - CM_GCR_REGn_MASK_CMTGT_IOCU0); - } if (!register_cps_smp_ops()) return; diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c index 64543d66e76b..e9531fea23a2 100644 --- a/arch/mips/ralink/reset.c +++ b/arch/mips/ralink/reset.c @@ -96,16 +96,9 @@ static void ralink_restart(char *command) unreachable(); } -static void ralink_halt(void) -{ - local_irq_disable(); - unreachable(); -} - static int __init mips_reboot_setup(void) { _machine_restart = ralink_restart; - _machine_halt = ralink_halt; return 0; } diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 8b937300fb7f..fd26fadc8617 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void) #define RBTX4939_MAX_7SEGLEDS 8 -#if IS_ENABLED(CONFIG_LEDS_CLASS) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) static u8 led_val[RBTX4939_MAX_7SEGLEDS]; struct rbtx4939_led_data { struct led_classdev cdev; @@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void) static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val) { -#if IS_ENABLED(CONFIG_LEDS_CLASS) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) unsigned long flags; local_irq_save(flags); /* bit7: reserved for LED class */ diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index b39a388825ae..8ace89617c1c 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -437,7 +437,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) info.si_signo = SIGSEGV; info.si_errno = 0; - info.si_code = 0; + info.si_code = SEGV_MAPERR; info.si_addr = (void *) regs->pc; force_sig_info(SIGSEGV, &info, current); return; diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore index 109279ca5a4d..64386a8dedd8 100644 --- a/arch/nios2/boot/.gitignore +++ b/arch/nios2/boot/.gitignore @@ -1,2 +1 @@ -*.dtb vmImage diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index f41bd3cb76d9..e212a1f0b6d2 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -23,7 +23,6 @@ */ #include -#include #include extern const struct dma_map_ops or1k_dma_map_ops; diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index 803e9e756f77..8d8437169b5e 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -306,12 +306,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) siginfo_t info; if (user_mode(regs)) { - /* Send a SIGSEGV */ - info.si_signo = SIGSEGV; + /* Send a SIGBUS */ + info.si_signo = SIGBUS; info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void *)address; - force_sig_info(SIGSEGV, &info, current); + info.si_code = BUS_ADRALN; + info.si_addr = (void __user *)address; + force_sig_info(SIGBUS, &info, current); } else { printk("KERNEL: Unaligned Access 0x%.8lx\n", address); show_registers(regs); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 1fd3eb5b66c6..89e684fd795f 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -201,7 +201,7 @@ config PREFETCH config MLONGCALLS bool "Enable the -mlong-calls compiler option for big kernels" - def_bool y if (!MODULES) + default y depends on PA8X00 help If you configure the kernel to include many drivers built-in instead diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index 9345b44b86f0..f57118e1f6b4 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -123,8 +123,8 @@ int puts(const char *s) while ((nuline = strchr(s, '\n')) != NULL) { if (nuline != s) pdc_iodc_print(s, nuline - s); - pdc_iodc_print("\r\n", 2); - s = nuline + 1; + pdc_iodc_print("\r\n", 2); + s = nuline + 1; } if (*s != '\0') pdc_iodc_print(s, strlen(s)); diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h new file mode 100644 index 000000000000..dbaaca84f27f --- /dev/null +++ b/arch/parisc/include/asm/barrier.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ + +/* The synchronize caches instruction executes as a nop on systems in + which all memory references are performed in order. */ +#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory") + +#if defined(CONFIG_SMP) +#define mb() do { synchronize_caches(); } while (0) +#define rmb() mb() +#define wmb() mb() +#define dma_rmb() mb() +#define dma_wmb() mb() +#else +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() +#endif + +#define __smp_mb() mb() +#define __smp_rmb() mb() +#define __smp_wmb() mb() + +#include + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 3742508cc534..bd5ce31936f5 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); +void purge_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_page_asm(void *); void flush_kernel_icache_page(void *); diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 07f48827afda..acf8aa07cbe0 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -195,7 +195,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index dd5a08aaa4da..3eb4bfc1fb36 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h @@ -12,6 +12,7 @@ for the semaphore. */ #define __PA_LDCW_ALIGNMENT 16 +#define __PA_LDCW_ALIGN_ORDER 4 #define __ldcw_align(a) ({ \ unsigned long __ret = (unsigned long) &(a)->lock[0]; \ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ @@ -29,6 +30,7 @@ ldcd). */ #define __PA_LDCW_ALIGNMENT 4 +#define __PA_LDCW_ALIGN_ORDER 2 #define __ldcw_align(a) (&(a)->slock) #define __LDCW "ldcw,co" diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 0e6ab6e4a4e9..2dbe5580a1a4 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -316,6 +316,8 @@ extern int _parisc_requires_coherency; #define parisc_requires_coherency() (0) #endif +extern int running_on_qemu; + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PARISC_PROCESSOR_H */ diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index c980a02a52bc..598c8d60fa5e 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -35,7 +35,12 @@ struct thread_info { /* thread information allocation */ +#ifdef CONFIG_IRQSTACKS +#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */ +#else #define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */ +#endif + /* Be sure to hunt all references to this down when you change the size of * the kernel stack */ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 1d0fdc3b5d22..061b9cf2a779 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -104,4 +104,7 @@ #define SO_ZEROCOPY 0x4035 +#define SO_TXTIME 0x4036 +#define SCM_TXTIME SO_TXTIME + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 19c0c141bc3f..e3b45546d589 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page); int __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { - unsigned long flags, size; + unsigned long flags; - size = (end - start); - if (size >= parisc_tlb_flush_threshold) { + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + end - start >= parisc_tlb_flush_threshold) { flush_tlb_all(); return 1; } @@ -539,13 +539,12 @@ void flush_cache_mm(struct mm_struct *mm) struct vm_area_struct *vma; pgd_t *pgd; - /* Flush the TLB to avoid speculation if coherency is required. */ - if (parisc_requires_coherency()) - flush_tlb_all(); - /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ - if (mm_total_size(mm) >= parisc_cache_flush_threshold) { + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + mm_total_size(mm) >= parisc_cache_flush_threshold) { + if (mm->context) + flush_tlb_all(); flush_cache_all(); return; } @@ -553,9 +552,9 @@ void flush_cache_mm(struct mm_struct *mm) if (mm->context == mfsp(3)) { for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); - if ((vma->vm_flags & VM_EXEC) == 0) - continue; - flush_user_icache_range_asm(vma->vm_start, vma->vm_end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(vma->vm_start, vma->vm_end); + flush_tlb_range(vma, vma->vm_start, vma->vm_end); } return; } @@ -573,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm) pfn = pte_pfn(*ptep); if (!pfn_valid(pfn)) continue; + if (unlikely(mm->context)) + flush_tlb_page(vma, addr); __flush_cache_page(vma, addr, PFN_PHYS(pfn)); } } @@ -581,30 +582,45 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - BUG_ON(!vma->vm_mm->context); - - /* Flush the TLB to avoid speculation if coherency is required. */ - if (parisc_requires_coherency()) - flush_tlb_range(vma, start, end); + pgd_t *pgd; + unsigned long addr; - if ((end - start) >= parisc_cache_flush_threshold - || vma->vm_mm->context != mfsp(3)) { + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + end - start >= parisc_cache_flush_threshold) { + if (vma->vm_mm->context) + flush_tlb_range(vma, start, end); flush_cache_all(); return; } - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); + if (vma->vm_mm->context == mfsp(3)) { + flush_user_dcache_range_asm(start, end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(start, end); + flush_tlb_range(vma, start, end); + return; + } + + pgd = vma->vm_mm->pgd; + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + unsigned long pfn; + pte_t *ptep = get_ptep(pgd, addr); + if (!ptep) + continue; + pfn = pte_pfn(*ptep); + if (pfn_valid(pfn)) { + if (unlikely(vma->vm_mm->context)) + flush_tlb_page(vma, addr); + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + } + } } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - BUG_ON(!vma->vm_mm->context); - if (pfn_valid(pfn)) { - if (parisc_requires_coherency()) + if (likely(vma->vm_mm->context)) flush_tlb_page(vma, vmaddr); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } @@ -613,21 +629,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; - if ((unsigned long)size > parisc_cache_flush_threshold) + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + (unsigned long)size >= parisc_cache_flush_threshold) { + flush_tlb_kernel_range(start, end); flush_data_cache(); - else - flush_kernel_dcache_range_asm(start, start + size); + return; + } + + flush_kernel_dcache_range_asm(start, end); + flush_tlb_kernel_range(start, end); } EXPORT_SYMBOL(flush_kernel_vmap_range); void invalidate_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; - if ((unsigned long)size > parisc_cache_flush_threshold) + if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && + (unsigned long)size >= parisc_cache_flush_threshold) { + flush_tlb_kernel_range(start, end); flush_data_cache(); - else - flush_kernel_dcache_range_asm(start, start + size); + return; + } + + purge_kernel_dcache_range_asm(start, end); + flush_tlb_kernel_range(start, end); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index d8f77358e2ba..6a71d3151a23 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data) * Checks all the children of @parent for a matching @id. If none * found, it allocates a new device and returns it. */ -static struct parisc_device * alloc_tree_node(struct device *parent, char id) +static struct parisc_device * __init alloc_tree_node( + struct device *parent, char id) { struct match_id_data d = { .id = id, @@ -651,6 +652,10 @@ static int match_pci_device(struct device *dev, int index, (modpath->mod == PCI_FUNC(devfn))); } + /* index might be out of bounds for bc[] */ + if (index >= 6) + return 0; + id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5); return (modpath->bc[index] == id); } @@ -821,8 +826,8 @@ void walk_lower_bus(struct parisc_device *dev) * devices which are not physically connected (such as extra serial & * keyboard ports). This problem is not yet solved. */ -static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, - struct device *parent) +static void __init walk_native_bus(unsigned long io_io_low, + unsigned long io_io_high, struct device *parent) { int i, devices_found = 0; unsigned long hpa = io_io_low; diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index a4fd296c958e..1b4732e20137 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,14 @@ #endif .import pa_tlb_lock,data + .macro load_pa_tlb_lock reg +#if __PA_LDCW_ALIGNMENT > 4 + load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg + depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg +#else + load32 PA(pa_tlb_lock), \reg +#endif + .endm /* space_to_prot macro creates a prot id from a space id */ @@ -457,7 +466,7 @@ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault #ifdef CONFIG_SMP cmpib,COND(=),n 0,\spc,2f - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -472,6 +481,8 @@ /* Release pa_tlb_lock lock without reloading lock address. */ .macro tlb_unlock0 spc,tmp #ifdef CONFIG_SMP + or,COND(=) %r0,\spc,%r0 + sync or,COND(=) %r0,\spc,%r0 stw \spc,0(\tmp) #endif @@ -480,7 +491,7 @@ /* Release pa_tlb_lock lock. */ .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp tlb_unlock0 \spc,\tmp #endif .endm @@ -878,9 +889,6 @@ ENTRY_CFI(syscall_exit_rfi) STREG %r19,PT_SR7(%r16) intr_return: - /* NOTE: Need to enable interrupts incase we schedule. */ - ssm PSW_SM_I, %r0 - /* check for reschedule */ mfctl %cr30,%r1 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ @@ -907,6 +915,11 @@ intr_check_sig: LDREG PT_IASQ1(%r16), %r20 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ + /* NOTE: We need to enable interrupts if we have to deliver + * signals. We used to do this earlier but it caused kernel + * stack overflows. */ + ssm PSW_SM_I, %r0 + copy %r0, %r25 /* long in_syscall = 0 */ #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ @@ -958,6 +971,10 @@ intr_do_resched: cmpib,COND(=) 0, %r20, intr_do_preempt nop + /* NOTE: We need to enable interrupts if we schedule. We used + * to do this earlier but it caused kernel stack overflows. */ + ssm PSW_SM_I, %r0 + #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index e3a8e5e4d5de..781c3b9a3e46 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -84,6 +84,7 @@ END(hpmc_pim_data) .text .import intr_save, code + .align 16 ENTRY_CFI(os_hpmc) .os_hpmc: @@ -300,11 +301,15 @@ os_hpmc_6: b . nop + .align 16 /* make function length multiple of 16 bytes */ ENDPROC_CFI(os_hpmc) .os_hpmc_end: __INITRODATA - .export os_hpmc_size +.globl os_hpmc_size + .align 4 + .type os_hpmc_size, @object + .size os_hpmc_size, 4 os_hpmc_size: .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index adf7187f8951..3e163df49cf3 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -36,6 +36,7 @@ #include #include #include +#include #include .text @@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_lock la,flags,tmp #ifdef CONFIG_SMP - ldil L%pa_tlb_lock,%r1 - ldo R%pa_tlb_lock(%r1),\la +#if __PA_LDCW_ALIGNMENT > 4 + load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la + depi 0,31,__PA_LDCW_ALIGN_ORDER, \la +#else + load32 pa_tlb_lock, \la +#endif rsm PSW_SM_I,\flags 1: LDCW 0(\la),\tmp cmpib,<>,n 0,\tmp,3f @@ -349,6 +354,7 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_unlock la,flags,tmp #ifdef CONFIG_SMP ldi 1,\tmp + sync stw \tmp,0(\la) mtsm \flags #endif @@ -1105,6 +1111,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) .procend ENDPROC_CFI(flush_kernel_dcache_range_asm) +ENTRY_CFI(purge_kernel_dcache_range_asm) + .proc + .callinfo NO_CALLS + .entry + + ldil L%dcache_stride, %r1 + ldw R%dcache_stride(%r1), %r23 + ldo -1(%r23), %r21 + ANDCM %r26, %r21, %r26 + +1: cmpb,COND(<<),n %r26, %r25,1b + pdc,m %r23(%r26) + + sync + syncdma + bv %r0(%r2) + nop + .exit + + .procend +ENDPROC_CFI(purge_kernel_dcache_range_asm) + ENTRY_CFI(flush_user_icache_range_asm) .proc .callinfo NO_CALLS diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 30f92391a93e..cad3e8661cd6 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) return 1; } +/* + * Idle thread support + * + * Detect when running on QEMU with SeaBIOS PDC Firmware and let + * QEMU idle the host too. + */ + +int running_on_qemu __read_mostly; + +void __cpuidle arch_cpu_idle_dead(void) +{ + /* nop on real hardware, qemu will offline CPU. */ + asm volatile("or %%r31,%%r31,%%r31\n":::); +} + +void __cpuidle arch_cpu_idle(void) +{ + local_irq_enable(); + + /* nop on real hardware, qemu will idle sleep. */ + asm volatile("or %%r10,%%r10,%%r10\n":::); +} + +static int __init parisc_idle_init(void) +{ + const char *marker; + + /* check QEMU/SeaBIOS marker in PAGE0 */ + marker = (char *) &PAGE0->pad0; + running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); + + if (!running_on_qemu) + cpu_idle_poll_ctrl(1); + + return 0; +} +arch_initcall(parisc_idle_init); + /* * Copy architecture-specific thread state */ diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 30c28ab14540..ab4d5580bb02 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -418,8 +418,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) } #ifdef CONFIG_PROC_FS -int __init -setup_profiling_timer(unsigned int multiplier) +int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 41e60a9c7db2..4886a6db42e9 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -633,6 +633,7 @@ cas_action: sub,<> %r28, %r25, %r0 2: stw,ma %r24, 0(%r26) /* Free lock */ + sync stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ @@ -647,6 +648,7 @@ cas_action: 3: /* Error occurred on load or store */ /* Free lock */ + sync stw %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) @@ -690,15 +692,15 @@ cas_action: /* ELF32 Process entry path */ lws_compare_and_swap_2: #ifdef CONFIG_64BIT - /* Clip the input registers */ + /* Clip the input registers. We don't need to clip %r23 as we + only use it for word operations */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r24 - depdi 0, 31, 32, %r23 #endif /* Check the validity of the size pointer */ - subi,>>= 4, %r23, %r0 + subi,>>= 3, %r23, %r0 b,n lws_exit_nosys /* Jump to the functions which will load the old and new values into @@ -848,6 +850,7 @@ cas2_action: cas2_end: /* Free lock */ + sync stw,ma %r20, 0(%sr2,%r20) /* Enable interrupts */ ssm PSW_SM_I, %r0 @@ -858,6 +861,7 @@ cas2_end: 22: /* Error occurred on load or store */ /* Free lock */ + sync stw %r20, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 1(%r0),%r28 diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4b8fd6dc22da..42a873226a04 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) next_tick = cpuinfo->it_value; /* Calculate how many ticks have elapsed. */ + now = mfctl(16); do { ++ticks_elapsed; next_tick += cpt; - now = mfctl(16); } while (next_tick - now > cpt); /* Store (in CR16 cycles) up to when we are accounting right now. */ @@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) * if one or the other wrapped. If "now" is "bigger" we'll end up * with a very large unsigned number. */ - while (next_tick - mfctl(16) > cpt) + now = mfctl(16); + while (next_tick - now > cpt) next_tick += cpt; /* Program the IT when to deliver the next interrupt. * Only bottom 32-bits of next_tick are writable in CR16! * Timer interrupt will be delivered at least a few hundred cycles - * after the IT fires, so if we are too close (<= 500 cycles) to the + * after the IT fires, so if we are too close (<= 8000 cycles) to the * next cycle, simply skip it. */ - if (next_tick - mfctl(16) <= 500) + if (next_tick - now <= 8000) next_tick += cpt; mtctl(next_tick, 16); @@ -204,7 +205,7 @@ static int __init rtc_init(void) device_initcall(rtc_init); #endif -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { static struct pdc_tod tod_data; if (pdc_tod_read(&tod_data) == 0) { @@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void) * different sockets, so mark them unstable and lower rating on * multi-socket SMP systems. */ - if (num_online_cpus() > 1) { + if (num_online_cpus() > 1 && !running_on_qemu) { int cpu; unsigned long cpu0_loc; cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index cb782ac1c35d..fe418226df7f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,6 +164,7 @@ config PPC select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_SMP_IDLE_THREAD diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 84774ccba1c2..f92d0530ceb1 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -18,7 +18,6 @@ otheros.bld uImage cuImage.* dtbImage.* -*.dtb treeImage.* vmlinux.strip zImage diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 651974192c4d..b479926f0167 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \ libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c libfdtheader := fdt.h libfdt.h libfdt_internal.h -$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ +$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \ + treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \ $(addprefix $(obj)/,$(libfdtheader)) src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index f058e0c3e4d4..fd1d6c83f0c0 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -141,6 +141,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vpmsum", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 10daa1d56e0a..e582d2c88092 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -35,7 +35,8 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#ifdef __SUBARCH_HAS_LWSYNC +/* The sub-arch has lwsync */ +#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) # define SMPWMB LWSYNC #else # define SMPWMB eieio @@ -75,6 +76,21 @@ do { \ ___p1; \ }) +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +#define barrier_nospec_asm ori 31,31,0 + +// This also acts as a compiler barrier due to the memory clobber. +#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") + +#else /* !CONFIG_PPC_BOOK3S_64 */ +#define barrier_nospec_asm +#define barrier_nospec() +#endif + #include #endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 1fcfa425cefa..f326b40b7c7b 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { + pgd_t *pgd; + if (radix_enabled()) return radix__pgd_alloc(mm); - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), - pgtable_gfp_flags(mm, GFP_KERNEL)); + + pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); + memset(pgd, 0, PGD_TABLE_SIZE); + + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h new file mode 100644 index 000000000000..db0dedab65ee --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/slice.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H +#define _ASM_POWERPC_BOOK3S_64_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ul) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 40 +#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) +#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) + +#else /* CONFIG_PPC_MM_SLICES */ + +#define get_slice_psize(mm, addr) ((mm)->context.user_psize) +#define slice_set_user_psize(mm, psize) \ +do { \ + (mm)->context.user_psize = (psize); \ + (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ +} while (0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index c1d257aa4c2d..66298461b640 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -9,11 +9,14 @@ #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) #define L1_CACHE_SHIFT 4 #define MAX_COPY_PREFETCH 1 +#define IFETCH_ALIGN_SHIFT 2 #elif defined(CONFIG_PPC_E500MC) #define L1_CACHE_SHIFT 6 #define MAX_COPY_PREFETCH 4 +#define IFETCH_ALIGN_SHIFT 3 #elif defined(CONFIG_PPC32) #define MAX_COPY_PREFETCH 4 +#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */ #if defined(CONFIG_PPC_47x) #define L1_CACHE_SHIFT 7 #else diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index abef812de7f8..2c895e8d07f7 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -33,6 +33,7 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int instr_is_relative_branch(unsigned int instr); +int instr_is_relative_link_branch(unsigned int instr); int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); unsigned long branch_target(const unsigned int *instr); unsigned int translate_branch(const unsigned int *dest, diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index a035b1e5dfa7..8a2aecfe9b02 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -185,7 +185,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a703452d67b6..555e22d5e07f 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -209,5 +209,11 @@ exc_##label##_book3e: ori r3,r3,vector_offset@l; \ mtspr SPRN_IVOR##vector_number,r3; +#define RFI_TO_KERNEL \ + rfi + +#define RFI_TO_USER \ + rfi + #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 9a318973af05..c3bdd2d8ec90 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -69,6 +69,87 @@ */ #define EX_R3 EX_DAR +#define STF_ENTRY_BARRIER_SLOT \ + STF_ENTRY_BARRIER_FIXUP_SECTION; \ + nop; \ + nop; \ + nop + +#define STF_EXIT_BARRIER_SLOT \ + STF_EXIT_BARRIER_FIXUP_SECTION; \ + nop; \ + nop; \ + nop; \ + nop; \ + nop; \ + nop + +/* + * r10 must be free to use, r13 must be paca + */ +#define INTERRUPT_TO_KERNEL \ + STF_ENTRY_BARRIER_SLOT + +/* + * Macros for annotating the expected destination of (h)rfid + * + * The nop instructions allow us to insert one or more instructions to flush the + * L1-D cache when returning to userspace or a guest. + */ +#define RFI_FLUSH_SLOT \ + RFI_FLUSH_FIXUP_SECTION; \ + nop; \ + nop; \ + nop + +#define RFI_TO_KERNEL \ + rfid + +#define RFI_TO_USER \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + +#define RFI_TO_USER_OR_KERNEL \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + +#define RFI_TO_GUEST \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + rfid; \ + b rfi_flush_fallback + +#define HRFI_TO_KERNEL \ + hrfid + +#define HRFI_TO_USER \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + +#define HRFI_TO_USER_OR_KERNEL \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + +#define HRFI_TO_GUEST \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + +#define HRFI_TO_UNKNOWN \ + STF_EXIT_BARRIER_SLOT; \ + RFI_FLUSH_SLOT; \ + hrfid; \ + b hrfi_flush_fallback + #ifdef CONFIG_RELOCATABLE #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ @@ -196,6 +277,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define __EXCEPTION_PROLOG_1(area, extra, vec) \ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ + INTERRUPT_TO_KERNEL; \ SAVE_CTR(r10, area); \ mfcr r9; \ extra(vec); \ @@ -213,7 +295,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR0,r12; \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mtspr SPRN_##h##SRR1,r10; \ - h##rfid; \ + h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ __EXCEPTION_PROLOG_PSERIES_1(label, h) @@ -227,7 +309,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) mtspr SPRN_##h##SRR0,r12; \ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ mtspr SPRN_##h##SRR1,r10; \ - h##rfid; \ + h##RFI_TO_KERNEL; \ b . /* prevent speculative execution */ #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 8f88f771cc55..a9b64df34e2a 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -187,7 +187,39 @@ label##3: \ FTR_ENTRY_OFFSET label##1b-label##3b; \ .popsection; +#define STF_ENTRY_BARRIER_FIXUP_SECTION \ +953: \ + .pushsection __stf_entry_barrier_fixup,"a"; \ + .align 2; \ +954: \ + FTR_ENTRY_OFFSET 953b-954b; \ + .popsection; + +#define STF_EXIT_BARRIER_FIXUP_SECTION \ +955: \ + .pushsection __stf_exit_barrier_fixup,"a"; \ + .align 2; \ +956: \ + FTR_ENTRY_OFFSET 955b-956b; \ + .popsection; + +#define RFI_FLUSH_FIXUP_SECTION \ +951: \ + .pushsection __rfi_flush_fixup,"a"; \ + .align 2; \ +952: \ + FTR_ENTRY_OFFSET 951b-952b; \ + .popsection; + + #ifndef __ASSEMBLY__ +#include + +extern long stf_barrier_fallback; +extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; +extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; +extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + void apply_feature_fixups(void); void setup_feature_keys(void); #endif diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a409177be8bd..5a740feb7bd7 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -241,6 +241,7 @@ #define H_GET_HCA_INFO 0x1B8 #define H_GET_PERF_COUNT 0x1BC #define H_MANAGE_TRACE 0x1C0 +#define H_GET_CPU_CHARACTERISTICS 0x1C8 #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 #define H_QUERY_INT_STATE 0x1E4 #define H_POLL_PENDING 0x1D8 @@ -330,6 +331,20 @@ #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 /* >= 0 values are CPU number */ +/* H_GET_CPU_CHARACTERISTICS return values */ +#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0 +#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1 +#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 +#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 +#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 +#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 +#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 +#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 + +#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 +#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 +#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 + /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 #define PROC_TABLE_DEREG 0x10 @@ -341,6 +356,7 @@ #define PROC_TABLE_GTSE 0x01 #ifndef __ASSEMBLY__ +#include /** * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments @@ -436,6 +452,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc) } } +struct h_cpu_char_result { + u64 character; + u64 behaviour; +}; + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h index c6d3078bd8c3..b8b0be8f1a07 100644 --- a/arch/powerpc/include/asm/irq_work.h +++ b/arch/powerpc/include/asm/irq_work.h @@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void) { return true; } +extern void arch_irq_work_raise(void); #endif /* _ASM_POWERPC_IRQ_WORK_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 73b92017b6d7..cd2fc1cc1cc7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -76,6 +76,7 @@ struct machdep_calls { void __noreturn (*restart)(char *cmd); void __noreturn (*halt)(void); + void (*panic)(char *str); void (*cpu_die)(void); long (*time_init)(void); /* Optional, may be NULL */ diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 5bb3dbede41a..1325e5b5f680 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -169,6 +169,12 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; +#ifdef CONFIG_PPC_MM_SLICES + u16 user_psize; /* page size index */ + u64 low_slices_psize; /* page size encodings */ + unsigned char high_slices_psize[0]; + unsigned long addr_limit; +#endif } mm_context_t; #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 492d8140a395..6f67ff5a5267 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, unsigned long ua, unsigned long entries); extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa); + unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa); + unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); #endif @@ -114,9 +114,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h new file mode 100644 index 000000000000..95d532e18092 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/slice.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H +#define _ASM_POWERPC_NOHASH_32_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ull) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 0 +#define SLICE_NUM_HIGH 0ul +#define GET_HIGH_SLICE_INDEX(addr) (addr & 0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */ diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h new file mode 100644 index 000000000000..ad0d6e3cc1c5 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/slice.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H +#define _ASM_POWERPC_NOHASH_64_SLICE_H + +#ifdef CONFIG_PPC_64K_PAGES +#define get_slice_psize(mm, addr) MMU_PAGE_64K +#else /* CONFIG_PPC_64K_PAGES */ +#define get_slice_psize(mm, addr) MMU_PAGE_4K +#endif /* !CONFIG_PPC_64K_PAGES */ +#define slice_set_user_psize(mm, psize) do { BUG(); } while (0) + +#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 726c23304a57..8eb3ebca02df 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -21,6 +21,9 @@ /* We calculate number of sg entries based on PAGE_SIZE */ #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) +/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */ +#define OPAL_BUSY_DELAY_MS 10 + /* /sys/firmware/opal */ extern struct kobject *opal_kobj; diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 04b60af027ae..e6bd59353e40 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -231,6 +231,15 @@ struct paca_struct { struct sibling_subcore_state *sibling_subcore_state; #endif #endif +#ifdef CONFIG_PPC_BOOK3S_64 + /* + * rfi fallback flush must be in its own cacheline to prevent + * other paca data leaking into the L1d + */ + u64 exrfi[EX_SIZE] __aligned(0x80); + void *rfi_flush_fallback_area; + u64 l1d_flush_size; +#endif }; extern void copy_mm_to_paca(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 8da5d4c1cab2..d5f1c41b7dba 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -344,5 +344,6 @@ typedef struct page *pgtable_t; #include #endif /* __ASSEMBLY__ */ +#include #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index c4d9654bd637..af04acdb873f 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -86,65 +86,6 @@ extern u64 ppc64_pft_size; #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_PPC_MM_SLICES - -#define SLICE_LOW_SHIFT 28 -#define SLICE_HIGH_SHIFT 40 - -#define SLICE_LOW_TOP (0x100000000ul) -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) -#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) - -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) -#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) - -#ifndef __ASSEMBLY__ -struct mm_struct; - -extern unsigned long slice_get_unmapped_area(unsigned long addr, - unsigned long len, - unsigned long flags, - unsigned int psize, - int topdown); - -extern unsigned int get_slice_psize(struct mm_struct *mm, - unsigned long addr); - -extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); -extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, - unsigned long len, unsigned int psize); - -#endif /* __ASSEMBLY__ */ -#else -#define slice_init() -#ifdef CONFIG_PPC_STD_MMU_64 -#define get_slice_psize(mm, addr) ((mm)->context.user_psize) -#define slice_set_user_psize(mm, psize) \ -do { \ - (mm)->context.user_psize = (psize); \ - (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ -} while (0) -#else /* CONFIG_PPC_STD_MMU_64 */ -#ifdef CONFIG_PPC_64K_PAGES -#define get_slice_psize(mm, addr) MMU_PAGE_64K -#else /* CONFIG_PPC_64K_PAGES */ -#define get_slice_psize(mm, addr) MMU_PAGE_4K -#endif /* !CONFIG_PPC_64K_PAGES */ -#define slice_set_user_psize(mm, psize) do { BUG(); } while(0) -#endif /* !CONFIG_PPC_STD_MMU_64 */ - -#define slice_set_range_psize(mm, start, len, psize) \ - slice_set_user_psize((mm), (psize)) -#endif /* CONFIG_PPC_MM_SLICES */ - -#ifdef CONFIG_HUGETLB_PAGE - -#ifdef CONFIG_PPC_MM_SLICES -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - -#endif /* !CONFIG_HUGETLB_PAGE */ - #define VM_DATA_DEFAULT_FLAGS \ (is_32bit_task() ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index a14203c005f1..e11f03007b57 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h @@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) } #endif /* MODULE */ -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) #ifdef CONFIG_PPC_BOOK3S #include diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 7f01b22fa6cb..55eddf50d149 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu) return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); } +static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf); + if (rc == H_SUCCESS) { + p->character = retbuf[0]; + p->behaviour = retbuf[1]; + } + + return rc; +} + #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h new file mode 100644 index 000000000000..44989b22383c --- /dev/null +++ b/arch/powerpc/include/asm/security_features.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Security related feature bit definitions. + * + * Copyright 2018, Michael Ellerman, IBM Corporation. + */ + +#ifndef _ASM_POWERPC_SECURITY_FEATURES_H +#define _ASM_POWERPC_SECURITY_FEATURES_H + + +extern unsigned long powerpc_security_features; +extern bool rfi_flush; + +/* These are bit flags */ +enum stf_barrier_type { + STF_BARRIER_NONE = 0x1, + STF_BARRIER_FALLBACK = 0x2, + STF_BARRIER_EIEIO = 0x4, + STF_BARRIER_SYNC_ORI = 0x8, +}; + +void setup_stf_barrier(void); +void do_stf_barrier_fixups(enum stf_barrier_type types); + +static inline void security_ftr_set(unsigned long feature) +{ + powerpc_security_features |= feature; +} + +static inline void security_ftr_clear(unsigned long feature) +{ + powerpc_security_features &= ~feature; +} + +static inline bool security_ftr_enabled(unsigned long feature) +{ + return !!(powerpc_security_features & feature); +} + + +// Features indicating support for Spectre/Meltdown mitigations + +// The L1-D cache can be flushed with ori r30,r30,0 +#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull + +// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2) +#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull + +// ori r31,r31,0 acts as a speculation barrier +#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull + +// Speculation past bctr is disabled +#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull + +// Entries in L1-D are private to a SMT thread +#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull + +// Indirect branch prediction cache disabled +#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull + + +// Features indicating need for Spectre/Meltdown mitigations + +// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest) +#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull + +// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace) +#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull + +// A speculation barrier should be used for bounds checks (Spectre variant 1) +#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull + +// Firmware configuration indicates user favours security over performance +#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull + + +// Features enabled by default +#define SEC_FTR_DEFAULT \ + (SEC_FTR_L1D_FLUSH_HV | \ + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_FAVOUR_SECURITY) + +#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 257d23dbf55d..bbcdf929be54 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long); void check_for_initrd(void); void initmem_init(void); +void setup_panic(void); #define ARCH_PANIC_TIMEOUT 180 #ifdef CONFIG_PPC_PSERIES @@ -38,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {} static inline void pseries_little_endian_exceptions(void) {} #endif /* CONFIG_PPC_PSERIES */ +void rfi_flush_enable(bool enable); + +/* These are bit flags */ +enum l1d_flush_type { + L1D_FLUSH_NONE = 0x1, + L1D_FLUSH_FALLBACK = 0x2, + L1D_FLUSH_ORI = 0x4, + L1D_FLUSH_MTTRIG = 0x8, +}; + +void setup_rfi_flush(enum l1d_flush_type, bool enable); +void do_rfi_flush_fixups(enum l1d_flush_type types); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h new file mode 100644 index 000000000000..172711fadb1c --- /dev/null +++ b/arch/powerpc/include/asm/slice.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_SLICE_H +#define _ASM_POWERPC_SLICE_H + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#elif defined(CONFIG_PPC64) +#include +#elif defined(CONFIG_PPC_MMU_NOHASH) +#include +#endif + +#ifdef CONFIG_PPC_MM_SLICES + +#ifdef CONFIG_HUGETLB_PAGE +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#ifndef __ASSEMBLY__ + +struct mm_struct; + +unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + unsigned long flags, unsigned int psize, + int topdown); + +unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); + +void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); +void slice_set_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long len, unsigned int psize); +#endif /* __ASSEMBLY__ */ + +#else /* CONFIG_PPC_MM_SLICES */ + +#define slice_set_range_psize(mm, start, len, psize) \ + slice_set_user_psize((mm), (psize)) +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_SLICE_H */ diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 63e7f5a1f105..6ec546090ba1 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -6,10 +6,6 @@ #include #include -#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) -#define __SUBARCH_HAS_LWSYNC -#endif - #ifndef __ASSEMBLY__ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; extern void do_lwsync_fixups(unsigned long value, void *fixup_start, diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 023ff9f17501..d5f2ee882f74 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); extern int numa_update_cpu_topology(bool cpus_locked); +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) +{ + numa_cpu_lookup_table[cpu] = node; +} + static inline int early_cpu_to_node(int cpu) { int nid; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 61d6049f4c1e..8aaec831053a 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -607,6 +607,8 @@ struct kvm_ppc_rmmu_info { #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc) #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) +#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) + /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 6c6cce937dd8..1479c61e29c5 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC64) += vdso64/ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8cfb20e38cfe..2e5ea300258a 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -237,6 +237,10 @@ int main(void) OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); OFFSET(PACA_IN_MCE, paca_struct, in_mce); OFFSET(PACA_IN_NMI, paca_struct, in_nmi); + OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); + OFFSET(PACA_EXRFI, paca_struct, exrfi); + OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size); + #endif OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 610955fe8b81..9daede99c131 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR li r4,(LPCR_LPES1 >> LPCR_LPES_SH) bl __init_LPCR_ISA206 @@ -42,6 +43,7 @@ _GLOBAL(__restore_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR li r4,(LPCR_LPES1 >> LPCR_LPES_SH) bl __init_LPCR_ISA206 @@ -59,6 +61,7 @@ _GLOBAL(__setup_cpu_power8) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR ori r3, r3, LPCR_PECEDH li r4,0 /* LPES = 0 */ @@ -81,6 +84,7 @@ _GLOBAL(__restore_cpu_power8) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR ori r3, r3, LPCR_PECEDH li r4,0 /* LPES = 0 */ @@ -102,6 +106,8 @@ _GLOBAL(__setup_cpu_power9) li r0,0 mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 @@ -126,6 +132,8 @@ _GLOBAL(__restore_cpu_power9) li r0,0 mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 + mtspr SPRN_PID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 7275fed271af..2dba206b065a 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -86,6 +86,7 @@ static int hv_mode; static struct { u64 lpcr; + u64 lpcr_clear; u64 hfscr; u64 fscr; } system_registers; @@ -115,6 +116,8 @@ static void cpufeatures_flush_tlb(void) static void __restore_cpu_cpufeatures(void) { + u64 lpcr; + /* * LPCR is restored by the power on engine already. It can be changed * after early init e.g., by radix enable, and we have no unified API @@ -127,11 +130,14 @@ static void __restore_cpu_cpufeatures(void) * The best we can do to accommodate secondary boot and idle restore * for now is "or" LPCR with existing. */ - - mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR)); + lpcr = mfspr(SPRN_LPCR); + lpcr |= system_registers.lpcr; + lpcr &= ~system_registers.lpcr_clear; + mtspr(SPRN_LPCR, lpcr); if (hv_mode) { mtspr(SPRN_LPID, 0); mtspr(SPRN_HFSCR, system_registers.hfscr); + mtspr(SPRN_PCR, 0); } mtspr(SPRN_FSCR, system_registers.fscr); @@ -351,8 +357,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f) { u64 lpcr; + system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR); lpcr = mfspr(SPRN_LPCR); - lpcr &= ~LPCR_ISL; + lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR); mtspr(SPRN_LPCR, lpcr); cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 8b840191df59..470284f9e4f6 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -207,18 +207,18 @@ static void *eeh_report_error(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_frozen; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; eeh_disable_irq(dev); if (!driver->err_handler || - !driver->err_handler->error_detected) { - eeh_pcid_put(dev); - return NULL; - } + !driver->err_handler->error_detected) + goto out; rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); @@ -227,7 +227,10 @@ static void *eeh_report_error(void *data, void *userdata) if (*res == PCI_ERS_RESULT_NONE) *res = rc; edev->in_error = true; +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -250,15 +253,14 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + device_lock(&dev->dev); driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; if (!driver->err_handler || !driver->err_handler->mmio_enabled || - (edev->mode & EEH_DEV_NO_HANDLER)) { - eeh_pcid_put(dev); - return NULL; - } + (edev->mode & EEH_DEV_NO_HANDLER)) + goto out; rc = driver->err_handler->mmio_enabled(dev); @@ -266,7 +268,10 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata) if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; if (*res == PCI_ERS_RESULT_NONE) *res = rc; +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -289,20 +294,20 @@ static void *eeh_report_reset(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_normal; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; eeh_enable_irq(dev); if (!driver->err_handler || !driver->err_handler->slot_reset || (edev->mode & EEH_DEV_NO_HANDLER) || - (!edev->in_error)) { - eeh_pcid_put(dev); - return NULL; - } + (!edev->in_error)) + goto out; rc = driver->err_handler->slot_reset(dev); if ((*res == PCI_ERS_RESULT_NONE) || @@ -310,7 +315,10 @@ static void *eeh_report_reset(void *data, void *userdata) if (*res == PCI_ERS_RESULT_DISCONNECT && rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -361,10 +369,12 @@ static void *eeh_report_resume(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_normal; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; was_in_error = edev->in_error; edev->in_error = false; @@ -374,13 +384,15 @@ static void *eeh_report_resume(void *data, void *userdata) !driver->err_handler->resume || (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) { edev->mode &= ~EEH_DEV_NO_HANDLER; - eeh_pcid_put(dev); - return NULL; + goto out; } driver->err_handler->resume(dev); +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -400,22 +412,25 @@ static void *eeh_report_failure(void *data, void *userdata) if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) return NULL; + + device_lock(&dev->dev); dev->error_state = pci_channel_io_perm_failure; driver = eeh_pcid_get(dev); - if (!driver) return NULL; + if (!driver) goto out_no_dev; eeh_disable_irq(dev); if (!driver->err_handler || - !driver->err_handler->error_detected) { - eeh_pcid_put(dev); - return NULL; - } + !driver->err_handler->error_detected) + goto out; driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); +out: eeh_pcid_put(dev); +out_no_dev: + device_unlock(&dev->dev); return NULL; } @@ -435,9 +450,11 @@ static void *eeh_add_virt_device(void *data, void *userdata) driver = eeh_pcid_get(dev); if (driver) { - eeh_pcid_put(dev); - if (driver->err_handler) + if (driver->err_handler) { + eeh_pcid_put(dev); return NULL; + } + eeh_pcid_put(dev); } #ifdef CONFIG_PPC_POWERNV @@ -474,17 +491,19 @@ static void *eeh_rmv_device(void *data, void *userdata) if (eeh_dev_removed(edev)) return NULL; - driver = eeh_pcid_get(dev); - if (driver) { - eeh_pcid_put(dev); - if (removed && - eeh_pe_passed(edev->pe)) - return NULL; - if (removed && - driver->err_handler && - driver->err_handler->error_detected && - driver->err_handler->slot_reset) + if (removed) { + if (eeh_pe_passed(edev->pe)) return NULL; + driver = eeh_pcid_get(dev); + if (driver) { + if (driver->err_handler && + driver->err_handler->error_detected && + driver->err_handler->slot_reset) { + eeh_pcid_put(dev); + return NULL; + } + eeh_pcid_put(dev); + } } /* Remove it from PCI subsystem */ diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 2e8d1b2b5af4..8545a9523b9b 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); /* PCI Command: 0x4 */ - eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); + eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* Check the PCIe link is ready */ eeh_bridge_check_link(edev); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 4a0fd4f40245..c194f4c8e66b 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -37,6 +37,11 @@ #include #include #include +#ifdef CONFIG_PPC_BOOK3S +#include +#else +#include +#endif /* * System calls. @@ -262,13 +267,23 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ + ld r2,GPR2(r1) + ld r1,GPR1(r1) + mtlr r4 + mtcr r5 + mtspr SPRN_SRR0,r7 + mtspr SPRN_SRR1,r8 + RFI_TO_USER + b . /* prevent speculative execution */ + + /* exit to kernel */ 1: ld r2,GPR2(r1) ld r1,GPR1(r1) mtlr r4 mtcr r5 mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 - RFI + RFI_TO_KERNEL b . /* prevent speculative execution */ .Lsyscall_error: @@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) mtmsrd r10, 1 mtspr SPRN_SRR0, r11 mtspr SPRN_SRR1, r12 - - rfid + RFI_TO_USER b . /* prevent speculative execution */ #endif _ASM_NOKPROBE_SYMBOL(system_call_common); @@ -583,6 +597,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) * actually hit this code path. */ + isync slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbmte r7,r0 @@ -878,7 +893,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ACCOUNT_CPU_USER_EXIT(r13, r2, r4) REST_GPR(13, r1) -1: + mtspr SPRN_SRR1,r3 ld r2,_CCR(r1) @@ -891,8 +906,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r3,GPR3(r1) ld r4,GPR4(r1) ld r1,GPR1(r1) + RFI_TO_USER + b . /* prevent speculative execution */ - rfid +1: mtspr SPRN_SRR1,r3 + + ld r2,_CCR(r1) + mtcrf 0xFF,r2 + ld r2,_NIP(r1) + mtspr SPRN_SRR0,r2 + + ld r0,GPR0(r1) + ld r2,GPR2(r1) + ld r3,GPR3(r1) + ld r4,GPR4(r1) + ld r1,GPR1(r1) + RFI_TO_KERNEL b . /* prevent speculative execution */ #endif /* CONFIG_PPC_BOOK3E */ @@ -911,9 +940,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) beq 1f rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS stb r7,PACAIRQHAPPENED(r13) -1: li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS +1: +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) + /* The interrupt should not have soft enabled. */ + lbz r7,PACASOFTIRQEN(r13) +1: tdnei r7,0 + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING +#endif b .Ldo_restore /* @@ -1073,7 +1106,7 @@ __enter_rtas: mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r6 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ rtas_return_loc: @@ -1098,7 +1131,7 @@ rtas_return_loc: mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ _ASM_NOKPROBE_SYMBOL(__enter_rtas) _ASM_NOKPROBE_SYMBOL(rtas_return_loc) @@ -1171,7 +1204,7 @@ _GLOBAL(enter_prom) LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 - rfid + RFI_TO_KERNEL #endif /* CONFIG_PPC_BOOK3E */ 1: /* Return from OF */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1c80bd292e48..c09f0a6f8495 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -254,7 +254,7 @@ BEGIN_FTR_SECTION LOAD_HANDLER(r12, machine_check_handle_early) 1: mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r11 - rfid + RFI_TO_KERNEL b . /* prevent speculative execution */ 2: /* Stack overflow. Stay on emergency stack and panic. @@ -443,7 +443,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) li r3,MSR_ME andc r10,r10,r3 /* Turn off MSR_ME */ mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . 2: /* @@ -461,7 +461,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) */ bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP - rfid + RFI_TO_USER_OR_KERNEL 9: /* Deliver the machine check to host kernel in V mode. */ MACHINE_CHECK_HANDLER_WINDUP @@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) ld r3,_NIP(r1) - andis. r4,r12,DSISR_BAD_FAULT_64S@h + andis. r4,r12,DSISR_SRR1_MATCH_64S@h li r5,0x400 std r3,_DAR(r1) std r4,_DSISR(r1) @@ -596,6 +596,9 @@ EXC_COMMON_BEGIN(slb_miss_common) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ + andi. r9,r11,MSR_PR // Check for exception from userspace + cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later + /* * Test MSR_RI before calling slb_allocate_realmode, because the * MSR in r11 gets clobbered. However we still want to allocate @@ -622,9 +625,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) /* All done -- return from exception. */ + bne cr4,1f /* returning to kernel */ + .machine push .machine "power4" mtcrf 0x80,r9 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ mtcrf 0x02,r9 /* I/D indication is in cr6 */ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ @@ -638,9 +644,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ld r11,PACA_EXSLB+EX_R11(r13) ld r12,PACA_EXSLB+EX_R12(r13) ld r13,PACA_EXSLB+EX_R13(r13) - rfid + RFI_TO_USER + b . /* prevent speculative execution */ +1: +.machine push +.machine "power4" + mtcrf 0x80,r9 + mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ + mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ + mtcrf 0x02,r9 /* I/D indication is in cr6 */ + mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ +.machine pop + + RESTORE_CTR(r9, PACA_EXSLB) + RESTORE_PPR_PACA(PACA_EXSLB, r9) + mr r3,r12 + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + ld r11,PACA_EXSLB+EX_R11(r13) + ld r12,PACA_EXSLB+EX_R12(r13) + ld r13,PACA_EXSLB+EX_R13(r13) + RFI_TO_KERNEL b . /* prevent speculative execution */ + 2: std r3,PACA_EXSLB+EX_DAR(r13) mr r3,r12 mfspr r11,SPRN_SRR0 @@ -649,7 +676,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . 8: std r3,PACA_EXSLB+EX_DAR(r13) @@ -660,7 +687,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) mtspr SPRN_SRR0,r10 ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 - rfid + RFI_TO_KERNEL b . EXC_COMMON_BEGIN(unrecov_slb) @@ -677,7 +704,7 @@ EXC_COMMON_BEGIN(bad_addr_slb) ld r3, PACA_EXSLB+EX_DAR(r13) std r3, _DAR(r1) beq cr6, 2f - li r10, 0x480 /* fix trap number for I-SLB miss */ + li r10, 0x481 /* fix trap number for I-SLB miss */ std r10, _TRAP(r1) 2: bl save_nvgprs addi r3, r1, STACK_FRAME_OVERHEAD @@ -798,7 +825,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif -EXC_REAL_MASKABLE(decrementer, 0x900, 0x80) +EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80) EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900) TRAMP_KVM(PACA_EXGEN, 0x900) EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) @@ -874,6 +901,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) mtctr r13; \ GET_PACA(r13); \ std r10,PACA_EXGEN+EX_R10(r13); \ + INTERRUPT_TO_KERNEL; \ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ HMT_MEDIUM; \ mfctr r9; @@ -882,7 +910,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) #define SYSCALL_KVMTEST \ HMT_MEDIUM; \ mr r9,r13; \ - GET_PACA(r13); + GET_PACA(r13); \ + INTERRUPT_TO_KERNEL; #endif #define LOAD_SYSCALL_HANDLER(reg) \ @@ -905,7 +934,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ mtspr SPRN_SRR0,r10 ; \ ld r10,PACAKMSR(r13) ; \ mtspr SPRN_SRR1,r10 ; \ - rfid ; \ + RFI_TO_KERNEL ; \ b . ; /* prevent speculative execution */ #define SYSCALL_FASTENDIAN \ @@ -914,7 +943,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ xori r12,r12,MSR_LE ; \ mtspr SPRN_SRR1,r12 ; \ mr r13,r9 ; \ - rfid ; /* return to userspace */ \ + RFI_TO_USER ; /* return to userspace */ \ b . ; /* prevent speculative execution */ #if defined(CONFIG_RELOCATABLE) @@ -1299,7 +1328,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) - HRFID + HRFI_TO_UNKNOWN b . #endif @@ -1403,10 +1432,103 @@ masked_##_H##interrupt: \ ld r10,PACA_EXGEN+EX_R10(r13); \ ld r11,PACA_EXGEN+EX_R11(r13); \ /* returns to kernel where r13 must be set up, so don't restore it */ \ - ##_H##rfid; \ + ##_H##RFI_TO_KERNEL; \ b .; \ MASKED_DEC_HANDLER(_H) +TRAMP_REAL_BEGIN(stf_barrier_fallback) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + sync + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ori 31,31,0 + .rept 14 + b 1f +1: + .endr + blr + +TRAMP_REAL_BEGIN(rfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SIZE(r13) + srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync + + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ +1: + ld r11,(0x80 + 8)*0(r10) + ld r11,(0x80 + 8)*1(r10) + ld r11,(0x80 + 8)*2(r10) + ld r11,(0x80 + 8)*3(r10) + ld r11,(0x80 + 8)*4(r10) + ld r11,(0x80 + 8)*5(r10) + ld r11,(0x80 + 8)*6(r10) + ld r11,(0x80 + 8)*7(r10) + addi r10,r10,0x80*8 + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + GET_SCRATCH0(r13); + rfid + +TRAMP_REAL_BEGIN(hrfi_flush_fallback) + SET_SCRATCH0(r13); + GET_PACA(r13); + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 + ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) + ld r11,PACA_L1D_FLUSH_SIZE(r13) + srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ + mtctr r11 + DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ + + /* order ld/st prior to dcbt stop all streams with flushing */ + sync + + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ +1: + ld r11,(0x80 + 8)*0(r10) + ld r11,(0x80 + 8)*1(r10) + ld r11,(0x80 + 8)*2(r10) + ld r11,(0x80 + 8)*3(r10) + ld r11,(0x80 + 8)*4(r10) + ld r11,(0x80 + 8)*5(r10) + ld r11,(0x80 + 8)*6(r10) + ld r11,(0x80 + 8)*7(r10) + addi r10,r10,0x80*8 + bdnz 1b + + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ld r11,PACA_EXRFI+EX_R11(r13) + GET_SCRATCH0(r13); + hrfid + /* * Real mode exceptions actually use this too, but alternate * instruction code patches (which end up in the common .text area) @@ -1426,7 +1548,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) addi r13, r13, 4 mtspr SPRN_SRR0, r13 GET_SCRATCH0(r13) - rfid + RFI_TO_KERNEL b . TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) @@ -1438,7 +1560,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) addi r13, r13, 4 mtspr SPRN_HSRR0, r13 GET_SCRATCH0(r13) - hrfid + HRFI_TO_KERNEL b . #endif @@ -1506,7 +1628,7 @@ USE_TEXT_SECTION() .balign IFETCH_ALIGN_BYTES do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 - lis r0,DSISR_BAD_FAULT_64S@h + lis r0,(DSISR_BAD_FAULT_64S|DSISR_DABRMATCH)@h ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r4,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index e1431800bfb9..d0020bc1f209 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1155,6 +1155,9 @@ void fadump_cleanup(void) init_fadump_mem_struct(&fdm, be64_to_cpu(fdm_active->cpu_state_data.destination_address)); fadump_invalidate_dump(&fdm); + } else if (fw_dump.dump_registered) { + /* Un-register Firmware-assisted dump if it was registered. */ + fadump_unregister_dump(&fdm); } } @@ -1453,25 +1456,6 @@ static void fadump_init_files(void) return; } -static int fadump_panic_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - /* - * If firmware-assisted dump has been registered then trigger - * firmware-assisted dump and let firmware handle everything - * else. If this returns, then fadump was not registered, so - * go through the rest of the panic path. - */ - crash_fadump(NULL, ptr); - - return NOTIFY_DONE; -} - -static struct notifier_block fadump_panic_block = { - .notifier_call = fadump_panic_event, - .priority = INT_MIN /* may not return; must be done last */ -}; - /* * Prepare for firmware-assisted dump. */ @@ -1504,9 +1488,6 @@ int __init setup_fadump(void) init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start); fadump_init_files(); - atomic_notifier_chain_register(&panic_notifier_list, - &fadump_panic_block); - return 1; } subsys_initcall(setup_fadump); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 8c54166491e7..29b2fed93289 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -388,7 +388,7 @@ DataAccess: EXCEPTION_PROLOG mfspr r10,SPRN_DSISR stw r10,_DSISR(r11) - andis. r0,r10,DSISR_BAD_FAULT_32S@h + andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h bne 1f /* if not, try to put a PTE */ mfspr r4,SPRN_DAR /* into the hash table */ rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 4fee00d414e8..2d0d89e2cb9a 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -958,7 +958,7 @@ start_here: tovirt(r6,r6) lis r5, abatron_pteptrs@h ori r5, r5, abatron_pteptrs@l - stw r5, 0xf0(r0) /* Must match your Abatron config file */ + stw r5, 0xf0(0) /* Must match your Abatron config file */ tophys(r5,r5) stw r6, 0(r5) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 53b9c1dfd7d9..ceafad83ef50 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -175,8 +175,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ - if ((bp->attr.bp_addr >> 10) != - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) + if ((bp->attr.bp_addr >> 9) != + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9)) return -EINVAL; } if (info->len > diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 1125c9be9e06..4efbde0984b2 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -140,6 +140,8 @@ power9_restore_additional_sprs: ld r4, STOP_MMCR2(r13) mtspr SPRN_MMCR1, r3 mtspr SPRN_MMCR2, r4 + ld r4, PACA_SPRG_VDSO(r13) + mtspr SPRN_SPRG3, r4 blr /* @@ -838,6 +840,8 @@ BEGIN_FTR_SECTION mtspr SPRN_PTCR,r4 ld r4,_RPR(r1) mtspr SPRN_RPR,r4 + ld r4,_AMOR(r1) + mtspr SPRN_AMOR,r4 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r4,_TSCR(r1) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4e65bf82f5e0..0ce8b0e5d7ba 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -430,6 +430,14 @@ void force_external_irq_replay(void) */ WARN_ON(!arch_irqs_disabled()); + /* + * Interrupts must always be hard disabled before irq_happened is + * modified (to prevent lost update in case of interrupt between + * load and store). + */ + __hard_irq_disable(); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + /* Indicate in the PACA that we have an interrupt to replay */ local_paca->irq_happened |= PACA_IRQ_EE; } diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c index 6c089d9757c9..2d81404f818c 100644 --- a/arch/powerpc/kernel/kprobes-ftrace.c +++ b/arch/powerpc/kernel/kprobes-ftrace.c @@ -65,6 +65,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, /* Disable irq for emulating a breakpoint and avoiding preempt */ local_irq_save(flags); hard_irq_disable(); + preempt_disable(); p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) @@ -86,12 +87,18 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) __skip_singlestep(p, regs, kcb, orig_nip); - /* - * If pre_handler returns !0, it sets regs->nip and - * resets current kprobe. - */ + else { + /* + * If pre_handler returns !0, it sets regs->nip and + * resets current kprobe. In this case, we still need + * to restore irq, but not preemption. + */ + local_irq_restore(flags); + return; + } } end: + preempt_enable_no_resched(); local_irq_restore(flags); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index bebc3007a793..10b46b35c059 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -457,29 +457,33 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) } kretprobe_assert(ri, orig_ret_address, trampoline_address); - regs->nip = orig_ret_address; + /* - * Make LR point to the orig_ret_address. - * When the 'nop' inside the kretprobe_trampoline - * is optimized, we can do a 'blr' after executing the - * detour buffer code. + * We get here through one of two paths: + * 1. by taking a trap -> kprobe_handler() -> here + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here + * + * When going back through (1), we need regs->nip to be setup properly + * as it is used to determine the return address from the trap. + * For (2), since nip is not honoured with optprobes, we instead setup + * the link register properly so that the subsequent 'blr' in + * kretprobe_trampoline jumps back to the right instruction. + * + * For nip, we should set the address to the previous instruction since + * we end up emulating it in kprobe_handler(), which increments the nip + * again. */ + regs->nip = orig_ret_address - 4; regs->link = orig_ret_address; - reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); - preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } - /* - * By returning a non-zero value, we are telling - * kprobe_handler() that we don't want the post_handler - * to run (and have re-enabled preemption) - */ - return 1; + + return 0; } NOKPROBE_SYMBOL(trampoline_probe_handler); diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c index 992c0d258e5d..c66132b145eb 100644 --- a/arch/powerpc/kernel/machine_kexec_file_64.c +++ b/arch/powerpc/kernel/machine_kexec_file_64.c @@ -43,7 +43,7 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, /* We don't support crash kernels yet. */ if (image->type == KEXEC_TYPE_CRASH) - return -ENOTSUPP; + return -EOPNOTSUPP; for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { fops = kexec_file_loaders[i]; diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 8ac0bd2bddb0..3280953a82cf 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -623,7 +623,9 @@ BEGIN_FTR_SECTION * NOTE, we rely on r0 being 0 from above. */ mtspr SPRN_IAMR,r0 +BEGIN_FTR_SECTION_NESTED(42) mtspr SPRN_AMOR,r0 +END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* save regs for local vars on new stack. diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 0b0f89685b67..2a1b1273a312 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -486,7 +486,17 @@ static bool is_early_mcount_callsite(u32 *instruction) restore r2. */ static int restore_r2(u32 *instruction, struct module *me) { - if (is_early_mcount_callsite(instruction - 1)) + u32 *prev_insn = instruction - 1; + + if (is_early_mcount_callsite(prev_insn)) + return 1; + + /* + * Make sure the branch isn't a sibling call. Sibling calls aren't + * "link" branches and they don't return, so they don't need the r2 + * restore afterwards. + */ + if (!instr_is_relative_link_branch(*prev_insn)) return 1; if (*instruction != PPC_INST_NOP) { diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 91e037ab20a1..60ba7f1370a8 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p) static void optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long flags; /* This is possible if op is under delayed unoptimizing */ @@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op, local_irq_save(flags); hard_irq_disable(); + preempt_disable(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { __this_cpu_write(current_kprobe, &op->kp); regs->nip = (unsigned long)op->kp.addr; - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } @@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op, * local_irq_restore() will re-enable interrupts, * if they were hard disabled. */ + preempt_enable_no_resched(); local_irq_restore(flags); } NOKPROBE_SYMBOL(optimized_callback); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 1d817f4d97d9..2094f2b249fd 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 02190e90c7ae..f8782c7ef50f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -334,6 +334,7 @@ static void __init prom_print_dec(unsigned long val) call_prom("write", 3, 1, prom.stdout, buf+i, size); } +__printf(1, 2) static void __init prom_printf(const char *format, ...) { const char *p, *q, *s; @@ -1148,7 +1149,7 @@ static void __init prom_send_capabilities(void) */ cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); - prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", + prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", cores, NR_CPUS); ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); @@ -1230,7 +1231,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) if (align) base = _ALIGN_UP(base, align); - prom_debug("alloc_up(%x, %x)\n", size, align); + prom_debug("%s(%lx, %lx)\n", __func__, size, align); if (ram_top == 0) prom_panic("alloc_up() called with mem not initialized\n"); @@ -1241,7 +1242,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) for(; (base + size) <= alloc_top; base = _ALIGN_UP(base + 0x100000, align)) { - prom_debug(" trying: 0x%x\n\r", base); + prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break; @@ -1253,12 +1254,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) return 0; alloc_bottom = addr + size; - prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", alloc_bottom); - prom_debug(" alloc_top : %x\n", alloc_top); - prom_debug(" alloc_top_hi : %x\n", alloc_top_high); - prom_debug(" rmo_top : %x\n", rmo_top); - prom_debug(" ram_top : %x\n", ram_top); + prom_debug(" -> %lx\n", addr); + prom_debug(" alloc_bottom : %lx\n", alloc_bottom); + prom_debug(" alloc_top : %lx\n", alloc_top); + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); + prom_debug(" rmo_top : %lx\n", rmo_top); + prom_debug(" ram_top : %lx\n", ram_top); return addr; } @@ -1273,7 +1274,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, { unsigned long base, addr = 0; - prom_debug("alloc_down(%x, %x, %s)\n", size, align, + prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, highmem ? "(high)" : "(low)"); if (ram_top == 0) prom_panic("alloc_down() called with mem not initialized\n"); @@ -1301,7 +1302,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, base = _ALIGN_DOWN(alloc_top - size, align); for (; base > alloc_bottom; base = _ALIGN_DOWN(base - 0x100000, align)) { - prom_debug(" trying: 0x%x\n\r", base); + prom_debug(" trying: 0x%lx\n\r", base); addr = (unsigned long)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break; @@ -1312,12 +1313,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, alloc_top = addr; bail: - prom_debug(" -> %x\n", addr); - prom_debug(" alloc_bottom : %x\n", alloc_bottom); - prom_debug(" alloc_top : %x\n", alloc_top); - prom_debug(" alloc_top_hi : %x\n", alloc_top_high); - prom_debug(" rmo_top : %x\n", rmo_top); - prom_debug(" ram_top : %x\n", ram_top); + prom_debug(" -> %lx\n", addr); + prom_debug(" alloc_bottom : %lx\n", alloc_bottom); + prom_debug(" alloc_top : %lx\n", alloc_top); + prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); + prom_debug(" rmo_top : %lx\n", rmo_top); + prom_debug(" ram_top : %lx\n", ram_top); return addr; } @@ -1443,7 +1444,7 @@ static void __init prom_init_mem(void) if (size == 0) continue; - prom_debug(" %x %x\n", base, size); + prom_debug(" %lx %lx\n", base, size); if (base == 0 && (of_platform & PLATFORM_LPAR)) rmo_top = size; if ((base + size) > ram_top) @@ -1463,12 +1464,12 @@ static void __init prom_init_mem(void) if (prom_memory_limit) { if (prom_memory_limit <= alloc_bottom) { - prom_printf("Ignoring mem=%x <= alloc_bottom.\n", - prom_memory_limit); + prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", + prom_memory_limit); prom_memory_limit = 0; } else if (prom_memory_limit >= ram_top) { - prom_printf("Ignoring mem=%x >= ram_top.\n", - prom_memory_limit); + prom_printf("Ignoring mem=%lx >= ram_top.\n", + prom_memory_limit); prom_memory_limit = 0; } else { ram_top = prom_memory_limit; @@ -1500,12 +1501,13 @@ static void __init prom_init_mem(void) alloc_bottom = PAGE_ALIGN(prom_initrd_end); prom_printf("memory layout at init:\n"); - prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); - prom_printf(" alloc_bottom : %x\n", alloc_bottom); - prom_printf(" alloc_top : %x\n", alloc_top); - prom_printf(" alloc_top_hi : %x\n", alloc_top_high); - prom_printf(" rmo_top : %x\n", rmo_top); - prom_printf(" ram_top : %x\n", ram_top); + prom_printf(" memory_limit : %lx (16 MB aligned)\n", + prom_memory_limit); + prom_printf(" alloc_bottom : %lx\n", alloc_bottom); + prom_printf(" alloc_top : %lx\n", alloc_top); + prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); + prom_printf(" rmo_top : %lx\n", rmo_top); + prom_printf(" ram_top : %lx\n", ram_top); } static void __init prom_close_stdin(void) @@ -1566,7 +1568,7 @@ static void __init prom_instantiate_opal(void) return; } - prom_printf("instantiating opal at 0x%x...", base); + prom_printf("instantiating opal at 0x%llx...", base); if (call_prom_ret("call-method", 4, 3, rets, ADDR("load-opal-runtime"), @@ -1582,10 +1584,10 @@ static void __init prom_instantiate_opal(void) reserve_mem(base, size); - prom_debug("opal base = 0x%x\n", base); - prom_debug("opal align = 0x%x\n", align); - prom_debug("opal entry = 0x%x\n", entry); - prom_debug("opal size = 0x%x\n", (long)size); + prom_debug("opal base = 0x%llx\n", base); + prom_debug("opal align = 0x%llx\n", align); + prom_debug("opal entry = 0x%llx\n", entry); + prom_debug("opal size = 0x%llx\n", size); prom_setprop(opal_node, "/ibm,opal", "opal-base-address", &base, sizeof(base)); @@ -1662,7 +1664,7 @@ static void __init prom_instantiate_rtas(void) prom_debug("rtas base = 0x%x\n", base); prom_debug("rtas entry = 0x%x\n", entry); - prom_debug("rtas size = 0x%x\n", (long)size); + prom_debug("rtas size = 0x%x\n", size); prom_debug("prom_instantiate_rtas: end...\n"); } @@ -1720,7 +1722,7 @@ static void __init prom_instantiate_sml(void) if (base == 0) prom_panic("Could not allocate memory for sml\n"); - prom_printf("instantiating sml at 0x%x...", base); + prom_printf("instantiating sml at 0x%llx...", base); memset((void *)base, 0, size); @@ -1739,8 +1741,8 @@ static void __init prom_instantiate_sml(void) prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", &size, sizeof(size)); - prom_debug("sml base = 0x%x\n", base); - prom_debug("sml size = 0x%x\n", (long)size); + prom_debug("sml base = 0x%llx\n", base); + prom_debug("sml size = 0x%x\n", size); prom_debug("prom_instantiate_sml: end...\n"); } @@ -1841,7 +1843,7 @@ static void __init prom_initialize_tce_table(void) prom_debug("TCE table: %s\n", path); prom_debug("\tnode = 0x%x\n", node); - prom_debug("\tbase = 0x%x\n", base); + prom_debug("\tbase = 0x%llx\n", base); prom_debug("\tsize = 0x%x\n", minsize); /* Initialize the table to have a one-to-one mapping @@ -1928,12 +1930,12 @@ static void __init prom_hold_cpus(void) } prom_debug("prom_hold_cpus: start...\n"); - prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); - prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); - prom_debug(" 1) acknowledge = 0x%x\n", + prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); + prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); + prom_debug(" 1) acknowledge = 0x%lx\n", (unsigned long)acknowledge); - prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge); - prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold); + prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); + prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); /* Set the common spinloop variable, so all of the secondary cpus * will block when they are awakened from their OF spinloop. @@ -1961,7 +1963,7 @@ static void __init prom_hold_cpus(void) prom_getprop(node, "reg", ®, sizeof(reg)); cpu_no = be32_to_cpu(reg); - prom_debug("cpu hw idx = %lu\n", cpu_no); + prom_debug("cpu hw idx = %u\n", cpu_no); /* Init the acknowledge var which will be reset by * the secondary cpu when it awakens from its OF @@ -1971,7 +1973,7 @@ static void __init prom_hold_cpus(void) if (cpu_no != prom.cpu) { /* Primary Thread of non-boot cpu or any thread */ - prom_printf("starting cpu hw idx %lu... ", cpu_no); + prom_printf("starting cpu hw idx %u... ", cpu_no); call_prom("start-cpu", 3, 0, node, secondary_hold, cpu_no); @@ -1982,11 +1984,11 @@ static void __init prom_hold_cpus(void) if (*acknowledge == cpu_no) prom_printf("done\n"); else - prom_printf("failed: %x\n", *acknowledge); + prom_printf("failed: %lx\n", *acknowledge); } #ifdef CONFIG_SMP else - prom_printf("boot cpu hw idx %lu\n", cpu_no); + prom_printf("boot cpu hw idx %u\n", cpu_no); #endif /* CONFIG_SMP */ } @@ -2264,7 +2266,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, while ((*mem_start + needed) > *mem_end) { unsigned long room, chunk; - prom_debug("Chunk exhausted, claiming more at %x...\n", + prom_debug("Chunk exhausted, claiming more at %lx...\n", alloc_bottom); room = alloc_top - alloc_bottom; if (room > DEVTREE_CHUNK_SIZE) @@ -2490,7 +2492,7 @@ static void __init flatten_device_tree(void) room = alloc_top - alloc_bottom - 0x4000; if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; - prom_debug("starting device tree allocs at %x\n", alloc_bottom); + prom_debug("starting device tree allocs at %lx\n", alloc_bottom); /* Now try to claim that */ mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); @@ -2553,7 +2555,7 @@ static void __init flatten_device_tree(void) int i; prom_printf("reserved memory map:\n"); for (i = 0; i < mem_reserve_cnt; i++) - prom_printf(" %x - %x\n", + prom_printf(" %llx - %llx\n", be64_to_cpu(mem_reserve_map[i].base), be64_to_cpu(mem_reserve_map[i].size)); } @@ -2563,9 +2565,9 @@ static void __init flatten_device_tree(void) */ mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; - prom_printf("Device tree strings 0x%x -> 0x%x\n", + prom_printf("Device tree strings 0x%lx -> 0x%lx\n", dt_string_start, dt_string_end); - prom_printf("Device tree struct 0x%x -> 0x%x\n", + prom_printf("Device tree struct 0x%lx -> 0x%lx\n", dt_struct_start, dt_struct_end); } @@ -2997,7 +2999,7 @@ static void __init prom_find_boot_cpu(void) prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom.cpu = be32_to_cpu(rval); - prom_debug("Booting CPU hw index = %lu\n", prom.cpu); + prom_debug("Booting CPU hw index = %d\n", prom.cpu); } static void __init prom_check_initrd(unsigned long r3, unsigned long r4) @@ -3019,8 +3021,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) reserve_mem(prom_initrd_start, prom_initrd_end - prom_initrd_start); - prom_debug("initrd_start=0x%x\n", prom_initrd_start); - prom_debug("initrd_end=0x%x\n", prom_initrd_end); + prom_debug("initrd_start=0x%lx\n", prom_initrd_start); + prom_debug("initrd_end=0x%lx\n", prom_initrd_end); } #endif /* CONFIG_BLK_DEV_INITRD */ } @@ -3273,7 +3275,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* Don't print anything after quiesce under OPAL, it crashes OFW */ if (of_platform != PLATFORM_OPAL) { prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); - prom_debug("->dt_header_start=0x%x\n", hdr); + prom_debug("->dt_header_start=0x%lx\n", hdr); } #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index f52ad5bb7109..81750d9624ab 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -2362,6 +2362,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); attr.bp_addr = hw_brk.address; + attr.bp_len = 8; arch_bp_generic_fields(hw_brk.type, &attr.bp_type); diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c new file mode 100644 index 000000000000..b98a722da915 --- /dev/null +++ b/arch/powerpc/kernel/security.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Security related flags and so on. +// +// Copyright 2018, Michael Ellerman, IBM Corporation. + +#include +#include +#include + +#include +#include + + +unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + bool thread_priv; + + thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); + + if (rfi_flush || thread_priv) { + struct seq_buf s; + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + seq_buf_printf(&s, "Mitigation: "); + + if (rfi_flush) + seq_buf_printf(&s, "RFI Flush"); + + if (rfi_flush && thread_priv) + seq_buf_printf(&s, ", "); + + if (thread_priv) + seq_buf_printf(&s, "L1D private per thread"); + + seq_buf_printf(&s, "\n"); + + return s.len; + } + + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) +{ + bool bcs, ccd, ori; + struct seq_buf s; + + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); + ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); + + if (bcs || ccd) { + seq_buf_printf(&s, "Mitigation: "); + + if (bcs) + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); + + if (bcs && ccd) + seq_buf_printf(&s, ", "); + + if (ccd) + seq_buf_printf(&s, "Indirect branch cache disabled"); + } else + seq_buf_printf(&s, "Vulnerable"); + + if (ori) + seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + + seq_buf_printf(&s, "\n"); + + return s.len; +} + +/* + * Store-forwarding barrier support. + */ + +static enum stf_barrier_type stf_enabled_flush_types; +static bool no_stf_barrier; +bool stf_barrier; + +static int __init handle_no_stf_barrier(char *p) +{ + pr_info("stf-barrier: disabled on command line."); + no_stf_barrier = true; + return 0; +} + +early_param("no_stf_barrier", handle_no_stf_barrier); + +/* This is the generic flag used by other architectures */ +static int __init handle_ssbd(char *p) +{ + if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { + /* Until firmware tells us, we have the barrier with auto */ + return 0; + } else if (strncmp(p, "off", 3) == 0) { + handle_no_stf_barrier(NULL); + return 0; + } else + return 1; + + return 0; +} +early_param("spec_store_bypass_disable", handle_ssbd); + +/* This is the generic flag used by other architectures */ +static int __init handle_no_ssbd(char *p) +{ + handle_no_stf_barrier(NULL); + return 0; +} +early_param("nospec_store_bypass_disable", handle_no_ssbd); + +static void stf_barrier_enable(bool enable) +{ + if (enable) + do_stf_barrier_fixups(stf_enabled_flush_types); + else + do_stf_barrier_fixups(STF_BARRIER_NONE); + + stf_barrier = enable; +} + +void setup_stf_barrier(void) +{ + enum stf_barrier_type type; + bool enable, hv; + + hv = cpu_has_feature(CPU_FTR_HVMODE); + + /* Default to fallback in case fw-features are not available */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + type = STF_BARRIER_EIEIO; + else if (cpu_has_feature(CPU_FTR_ARCH_207S)) + type = STF_BARRIER_SYNC_ORI; + else if (cpu_has_feature(CPU_FTR_ARCH_206)) + type = STF_BARRIER_FALLBACK; + else + type = STF_BARRIER_NONE; + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); + + if (type == STF_BARRIER_FALLBACK) { + pr_info("stf-barrier: fallback barrier available\n"); + } else if (type == STF_BARRIER_SYNC_ORI) { + pr_info("stf-barrier: hwsync barrier available\n"); + } else if (type == STF_BARRIER_EIEIO) { + pr_info("stf-barrier: eieio barrier available\n"); + } + + stf_enabled_flush_types = type; + + if (!no_stf_barrier) + stf_barrier_enable(enable); +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { + const char *type; + switch (stf_enabled_flush_types) { + case STF_BARRIER_EIEIO: + type = "eieio"; + break; + case STF_BARRIER_SYNC_ORI: + type = "hwsync"; + break; + case STF_BARRIER_FALLBACK: + type = "fallback"; + break; + default: + type = "unknown"; + } + return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); + } + + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +#ifdef CONFIG_DEBUG_FS +static int stf_barrier_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != stf_barrier) + stf_barrier_enable(enable); + + return 0; +} + +static int stf_barrier_get(void *data, u64 *val) +{ + *val = stf_barrier ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); + +static __init int stf_barrier_debugfs_init(void) +{ + debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); + return 0; +} +device_initcall(stf_barrier_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 2e3bc16d02b2..008447664643 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned short maj; unsigned short min; - /* We only show online cpus: disable preempt (overzealous, I - * knew) to prevent cpu going down. */ - preempt_disable(); - if (!cpu_online(cpu_id)) { - preempt_enable(); - return 0; - } - #ifdef CONFIG_SMP pvr = per_cpu(cpu_pvr, cpu_id); #else @@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "\n"); #endif - - preempt_enable(); - /* If this is the last cpu, print the summary */ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) show_cpuinfo_summary(m); @@ -704,6 +693,30 @@ int check_legacy_ioport(unsigned long base_port) } EXPORT_SYMBOL(check_legacy_ioport); +static int ppc_panic_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + /* + * If firmware-assisted dump has been registered then trigger + * firmware-assisted dump and let firmware handle everything else. + */ + crash_fadump(NULL, ptr); + ppc_md.panic(ptr); /* May not return */ + return NOTIFY_DONE; +} + +static struct notifier_block ppc_panic_block = { + .notifier_call = ppc_panic_event, + .priority = INT_MIN /* may not return; must be done last */ +}; + +void __init setup_panic(void) +{ + if (!ppc_md.panic) + return; + atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); +} + #ifdef CONFIG_CHECK_CACHE_COHERENCY /* * For platforms that have configurable cache-coherency. This function @@ -848,6 +861,9 @@ void __init setup_arch(char **cmdline_p) /* Probe the machine type, establish ppc_md. */ probe_machine(); + /* Setup panic notifier if requested by the platform. */ + setup_panic(); + /* * Configure ppc_md.power_save (ppc32 only, 64-bit machines do * it from their respective probe() function. @@ -899,6 +915,8 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC64 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64; +#elif defined(CONFIG_PPC_8xx) + init_mm.context.addr_limit = DEFAULT_MAP_WINDOW; #else #error "context.addr_limit not initialized." #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b89c6aac48c9..0618aa61b26a 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -784,3 +785,129 @@ static int __init disable_hardlockup_detector(void) return 0; } early_initcall(disable_hardlockup_detector); + +#ifdef CONFIG_PPC_BOOK3S_64 +static enum l1d_flush_type enabled_flush_types; +static void *l1d_flush_fallback_area; +static bool no_rfi_flush; +bool rfi_flush; + +static int __init handle_no_rfi_flush(char *p) +{ + pr_info("rfi-flush: disabled on command line."); + no_rfi_flush = true; + return 0; +} +early_param("no_rfi_flush", handle_no_rfi_flush); + +/* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. + */ +static int __init handle_no_pti(char *p) +{ + pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); + handle_no_rfi_flush(NULL); + return 0; +} +early_param("nopti", handle_no_pti); + +static void do_nothing(void *unused) +{ + /* + * We don't need to do the flush explicitly, just enter+exit kernel is + * sufficient, the RFI exit handlers will do the right thing. + */ +} + +void rfi_flush_enable(bool enable) +{ + if (enable) { + do_rfi_flush_fixups(enabled_flush_types); + on_each_cpu(do_nothing, NULL, 1); + } else + do_rfi_flush_fixups(L1D_FLUSH_NONE); + + rfi_flush = enable; +} + +static void __ref init_fallback_flush(void) +{ + u64 l1d_size, limit; + int cpu; + + /* Only allocate the fallback flush area once (at boot time). */ + if (l1d_flush_fallback_area) + return; + + l1d_size = ppc64_caches.l1d.size; + limit = min(safe_stack_limit(), ppc64_rma_size); + + /* + * Align to L1d size, and size it at 2x L1d size, to catch possible + * hardware prefetch runoff. We don't have a recipe for load patterns to + * reliably avoid the prefetcher. + */ + l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); + memset(l1d_flush_fallback_area, 0, l1d_size * 2); + + for_each_possible_cpu(cpu) { + paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; + paca[cpu].l1d_flush_size = l1d_size; + } +} + +void setup_rfi_flush(enum l1d_flush_type types, bool enable) +{ + if (types & L1D_FLUSH_FALLBACK) { + pr_info("rfi-flush: fallback displacement flush available\n"); + init_fallback_flush(); + } + + if (types & L1D_FLUSH_ORI) + pr_info("rfi-flush: ori type flush available\n"); + + if (types & L1D_FLUSH_MTTRIG) + pr_info("rfi-flush: mttrig type flush available\n"); + + enabled_flush_types = types; + + if (!no_rfi_flush) + rfi_flush_enable(enable); +} + +#ifdef CONFIG_DEBUG_FS +static int rfi_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != rfi_flush) + rfi_flush_enable(enable); + + return 0; +} + +static int rfi_flush_get(void *data, u64 *val) +{ + *val = rfi_flush ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); + +static __init int rfi_flush_debugfs_init(void) +{ + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); + return 0; +} +device_initcall(rfi_flush_debugfs_init); +#endif +#endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index e9436c5e1e09..3d7539b90010 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, static void do_signal(struct task_struct *tsk) { sigset_t *oldset = sigmask_to_save(); - struct ksignal ksig; + struct ksignal ksig = { .sig = 0 }; int ret; int is32 = is_32bit_task(); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 13c9dcdcba69..ac2e5e56a9f0 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -182,6 +182,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, } raw_local_irq_restore(flags); + /* + * system_reset_excption handles debugger, crash dump, panic, for 0x100 + */ + if (TRAP(regs) == 0x100) + return; + crash_fadump(regs, "die oops"); if (kexec_should_crash(current)) @@ -246,8 +252,13 @@ void die(const char *str, struct pt_regs *regs, long err) { unsigned long flags; - if (debugger(regs)) - return; + /* + * system_reset_excption handles debugger, crash dump, panic, for 0x100 + */ + if (TRAP(regs) != 0x100) { + if (debugger(regs)) + return; + } flags = oops_begin(regs); if (__die(str, regs, err)) @@ -336,7 +347,7 @@ void system_reset_exception(struct pt_regs *regs) * No debugger or crash dump registered, print logs then * panic. */ - __die("System Reset", regs, SIGABRT); + die("System Reset", regs, SIGABRT); mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); @@ -1379,6 +1390,22 @@ void facility_unavailable_exception(struct pt_regs *regs) value = mfspr(SPRN_FSCR); status = value >> 56; + if ((hv || status >= 2) && + (status < ARRAY_SIZE(facility_strings)) && + facility_strings[status]) + facility = facility_strings[status]; + + /* We should not have taken this interrupt in kernel */ + if (!user_mode(regs)) { + pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", + facility, status, regs->nip); + die("Unexpected facility unavailable exception", regs, SIGABRT); + } + + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + if (status == FSCR_DSCR_LG) { /* * User is accessing the DSCR register using the problem @@ -1445,25 +1472,11 @@ void facility_unavailable_exception(struct pt_regs *regs) return; } - if ((hv || status >= 2) && - (status < ARRAY_SIZE(facility_strings)) && - facility_strings[status]) - facility = facility_strings[status]; - - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); - pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); out: - if (user_mode(regs)) { - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - return; - } - - die("Unexpected facility unavailable exception", regs, SIGABRT); + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } #endif diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0494e1566ee2..c89ffb88fa3b 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -132,6 +132,29 @@ SECTIONS /* Read-only data */ RO_DATA(PAGE_SIZE) +#ifdef CONFIG_PPC64 + . = ALIGN(8); + __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { + __start___stf_entry_barrier_fixup = .; + *(__stf_entry_barrier_fixup) + __stop___stf_entry_barrier_fixup = .; + } + + . = ALIGN(8); + __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { + __start___stf_exit_barrier_fixup = .; + *(__stf_exit_barrier_fixup) + __stop___stf_exit_barrier_fixup = .; + } + + . = ALIGN(8); + __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { + __start___rfi_flush_fixup = .; + *(__rfi_flush_fixup) + __stop___rfi_flush_fixup = .; + } +#endif + EXCEPTION_TABLE(0) NOTES :kernel :notes diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 57190f384f63..ce848ff84edd 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -276,9 +276,12 @@ void arch_touch_nmi_watchdog(void) { unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; int cpu = smp_processor_id(); + u64 tb = get_tb(); - if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks) - watchdog_timer_interrupt(cpu); + if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { + per_cpu(wd_timer_tb, cpu) = tb; + wd_smp_clear_cpu_pending(cpu, tb); + } } EXPORT_SYMBOL(arch_touch_nmi_watchdog); diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b12b8eb39c29..648160334abf 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -68,7 +68,7 @@ config KVM_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV) ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 29ebe2fd5867..a93d719edc90 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_read = true; gpte->may_write = true; gpte->page_size = MMU_PAGE_4K; + gpte->wimg = HPTE_R_M; return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 59247af5fd45..df9b53f40b1e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -65,11 +65,17 @@ struct kvm_resize_hpt { u32 order; /* These fields protected by kvm->lock */ + + /* Possible values and their usage: + * <0 an error occurred during allocation, + * -EBUSY allocation is in the progress, + * 0 allocation made successfuly. + */ int error; - bool prepare_done; - /* Private to the work thread, until prepare_done is true, - * then protected by kvm->resize_hpt_sem */ + /* Private to the work thread, until error != -EBUSY, + * then protected by kvm->lock. + */ struct kvm_hpt_info hpt; }; @@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) * Reset all the reverse-mapping chains for all memslots */ kvmppc_rmap_reset(kvm); - /* Ensure that each vcpu will flush its TLB on next entry. */ - cpumask_setall(&kvm->arch.need_tlb_flush); err = 0; goto out; } @@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) kvmppc_set_hpt(kvm, &info); out: + if (err == 0) + /* Ensure that each vcpu will flush its TLB on next entry. */ + cpumask_setall(&kvm->arch.need_tlb_flush); + mutex_unlock(&kvm->lock); return err; } @@ -1340,12 +1348,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, } new_pteg = hash & new_hash_mask; - if (vpte & HPTE_V_SECONDARY) { - BUG_ON(~pteg != (hash & old_hash_mask)); - new_pteg = ~new_pteg; - } else { - BUG_ON(pteg != (hash & old_hash_mask)); - } + if (vpte & HPTE_V_SECONDARY) + new_pteg = ~hash & new_hash_mask; new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); new_hptep = (__be64 *)(new->virt + (new_idx << 4)); @@ -1424,16 +1428,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) { - BUG_ON(kvm->arch.resize_hpt != resize); + if (WARN_ON(!mutex_is_locked(&kvm->lock))) + return; if (!resize) return; - if (resize->hpt.virt) - kvmppc_free_hpt(&resize->hpt); + if (resize->error != -EBUSY) { + if (resize->hpt.virt) + kvmppc_free_hpt(&resize->hpt); + kfree(resize); + } - kvm->arch.resize_hpt = NULL; - kfree(resize); + if (kvm->arch.resize_hpt == resize) + kvm->arch.resize_hpt = NULL; } static void resize_hpt_prepare_work(struct work_struct *work) @@ -1442,17 +1450,41 @@ static void resize_hpt_prepare_work(struct work_struct *work) struct kvm_resize_hpt, work); struct kvm *kvm = resize->kvm; - int err; - - resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", - resize->order); + int err = 0; - err = resize_hpt_allocate(resize); + if (WARN_ON(resize->error != -EBUSY)) + return; mutex_lock(&kvm->lock); + /* Request is still current? */ + if (kvm->arch.resize_hpt == resize) { + /* We may request large allocations here: + * do not sleep with kvm->lock held for a while. + */ + mutex_unlock(&kvm->lock); + + resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", + resize->order); + + err = resize_hpt_allocate(resize); + + /* We have strict assumption about -EBUSY + * when preparing for HPT resize. + */ + if (WARN_ON(err == -EBUSY)) + err = -EINPROGRESS; + + mutex_lock(&kvm->lock); + /* It is possible that kvm->arch.resize_hpt != resize + * after we grab kvm->lock again. + */ + } + resize->error = err; - resize->prepare_done = true; + + if (kvm->arch.resize_hpt != resize) + resize_hpt_release(kvm, resize); mutex_unlock(&kvm->lock); } @@ -1477,14 +1509,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, if (resize) { if (resize->order == shift) { - /* Suitable resize in progress */ - if (resize->prepare_done) { - ret = resize->error; - if (ret != 0) - resize_hpt_release(kvm, resize); - } else { + /* Suitable resize in progress? */ + ret = resize->error; + if (ret == -EBUSY) ret = 100; /* estimated time in ms */ - } + else if (ret) + resize_hpt_release(kvm, resize); goto out; } @@ -1504,6 +1534,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, ret = -ENOMEM; goto out; } + + resize->error = -EBUSY; resize->order = shift; resize->kvm = kvm; INIT_WORK(&resize->work, resize_hpt_prepare_work); @@ -1558,16 +1590,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, if (!resize || (resize->order != shift)) goto out; - ret = -EBUSY; - if (!resize->prepare_done) - goto out; - ret = resize->error; - if (ret != 0) + if (ret) goto out; ret = resize_hpt_rehash(resize); - if (ret != 0) + if (ret) goto out; resize_hpt_pivot(resize); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index c5d7435455f1..27a41695fcfd 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -19,6 +19,9 @@ #include #include +static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn, unsigned int order); + /* * Supported radix tree geometry. * Like p9, we support either 5 or 9 bits at the first (lowest) level, @@ -195,6 +198,12 @@ static void kvmppc_pte_free(pte_t *ptep) kmem_cache_free(kvm_pte_cache, ptep); } +/* Like pmd_huge() and pmd_large(), but works regardless of config options */ +static inline int pmd_is_leaf(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_PTE); +} + static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, unsigned int level, unsigned long mmu_seq) { @@ -219,7 +228,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, else new_pmd = pmd_alloc_one(kvm->mm, gpa); - if (level == 0 && !(pmd && pmd_present(*pmd))) + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) new_ptep = kvmppc_pte_alloc(); /* Check if we might have been invalidated; let the guest retry if so */ @@ -244,12 +253,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, new_pmd = NULL; } pmd = pmd_offset(pud, gpa); - if (pmd_large(*pmd)) { - /* Someone else has instantiated a large page here; retry */ - ret = -EAGAIN; - goto out_unlock; - } - if (level == 1 && !pmd_none(*pmd)) { + if (pmd_is_leaf(*pmd)) { + unsigned long lgpa = gpa & PMD_MASK; + + /* + * If we raced with another CPU which has just put + * a 2MB pte in after we saw a pte page, try again. + */ + if (level == 0 && !new_ptep) { + ret = -EAGAIN; + goto out_unlock; + } + /* Valid 2MB page here already, remove it */ + old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), + ~0UL, 0, lgpa, PMD_SHIFT); + kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT); + if (old & _PAGE_DIRTY) { + unsigned long gfn = lgpa >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + memslot = gfn_to_memslot(kvm, gfn); + if (memslot) + mark_pages_dirty(kvm, memslot, gfn, + PMD_SHIFT - PAGE_SHIFT); + } + } else if (level == 1 && !pmd_none(*pmd)) { /* * There's a page table page here, but we wanted * to install a large page. Tell the caller and let @@ -412,28 +439,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } else { page = pages[0]; pfn = page_to_pfn(page); - if (PageHuge(page)) { - page = compound_head(page); - pte_size <<= compound_order(page); + if (PageCompound(page)) { + pte_size <<= compound_order(compound_head(page)); /* See if we can insert a 2MB large-page PTE here */ if (pte_size >= PMD_SIZE && - (gpa & PMD_MASK & PAGE_MASK) == - (hva & PMD_MASK & PAGE_MASK)) { + (gpa & (PMD_SIZE - PAGE_SIZE)) == + (hva & (PMD_SIZE - PAGE_SIZE))) { level = 1; pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); } } /* See if we can provide write access */ if (writing) { - /* - * We assume gup_fast has set dirty on the host PTE. - */ pgflags |= _PAGE_WRITE; } else { local_irq_save(flags); ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL); - if (ptep && pte_write(*ptep) && pte_dirty(*ptep)) + if (ptep && pte_write(*ptep)) pgflags |= _PAGE_WRITE; local_irq_restore(flags); } @@ -459,18 +482,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte = pfn_pte(pfn, __pgprot(pgflags)); ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); } - if (ret == 0 || ret == -EAGAIN) - ret = RESUME_GUEST; if (page) { - /* - * We drop pages[0] here, not page because page might - * have been set to the head page of a compound, but - * we have to drop the reference on the correct tail - * page to match the get inside gup() - */ - put_page(pages[0]); + if (!ret && (pgflags & _PAGE_WRITE)) + set_page_dirty_lock(page); + put_page(page); } + + if (ret == 0 || ret == -EAGAIN) + ret = RESUME_GUEST; return ret; } @@ -676,7 +696,7 @@ void kvmppc_free_radix(struct kvm *kvm) continue; pmd = pmd_offset(pud, 0); for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { - if (pmd_huge(*pmd)) { + if (pmd_is_leaf(*pmd)) { pmd_clear(pmd); continue; } diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 4dffa611376d..e14cec6bc339 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ return H_TOO_HARD; - if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) + if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) return H_HARDWARE; if (mm_iommu_mapped_inc(mem)) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index c32e9bfe75b1..648cf6c01348 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, if (!mem) return H_TOO_HARD; - if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) + if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, + &hpa))) return H_HARDWARE; pua = (void *) vmalloc_to_phys(pua); @@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); if (mem) - prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; + prereg = mm_iommu_ua_to_hpa_rm(mem, ua, + IOMMU_PAGE_SHIFT_4K, &tces) == 0; } if (!prereg) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8d43cf205d34..377d1420bd02 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -999,8 +999,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return EMULATE_FAIL; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) return RESUME_GUEST; if (get_op(inst) != 31) @@ -1050,6 +1048,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1169,7 +1168,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); + spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1184,8 +1186,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, */ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && + cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_doorbell_instr(vcpu); + spin_lock(&vcpu->arch.vcore->lock); + } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1490,6 +1497,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ARCH_COMPAT: *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); break; + case KVM_REG_PPC_DEC_EXPIRY: + *val = get_reg_val(id, vcpu->arch.dec_expires + + vcpu->arch.vcore->tb_offset); + break; default: r = -EINVAL; break; @@ -1717,6 +1728,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ARCH_COMPAT: r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); break; + case KVM_REG_PPC_DEC_EXPIRY: + vcpu->arch.dec_expires = set_reg_val(id, *val) - + vcpu->arch.vcore->tb_offset; + break; default: r = -EINVAL; break; @@ -2832,7 +2847,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ trace_hardirqs_on(); - guest_enter(); + guest_enter_irqoff(); srcu_idx = srcu_read_lock(&vc->kvm->srcu); @@ -2840,8 +2855,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) srcu_read_unlock(&vc->kvm->srcu, srcu_idx); - guest_exit(); - trace_hardirqs_off(); set_irq_happened(trap); @@ -2875,6 +2888,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_set_host_core(pcpu); local_irq_enable(); + guest_exit(); /* Let secondaries go back to the offline loop */ for (i = 0; i < controlled_threads; ++i) { @@ -2889,13 +2903,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); + preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); - preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; @@ -3603,15 +3618,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) goto up_out; psize = vma_kernel_pagesize(vma); - porder = __ilog2(psize); up_read(¤t->mm->mmap_sem); /* We can handle 4k, 64k or 16M pages in the VRMA */ - err = -EINVAL; - if (!(psize == 0x1000 || psize == 0x10000 || - psize == 0x1000000)) - goto out_srcu; + if (psize >= 0x1000000) + psize = 0x1000000; + else if (psize >= 0x10000) + psize = 0x10000; + else + psize = 0x1000; + porder = __ilog2(psize); senc = slb_pgsize_encoding(psize); kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 90644db9d38e..8e0cf8f186df 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -529,6 +529,8 @@ static inline bool is_rm(void) unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_xirr(vcpu); @@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; vcpu->arch.gpr[5] = get_tb(); if (xive_enabled()) { if (is_rm()) @@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_ipoll(vcpu, server); @@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_ipi(vcpu, server, mfrr); @@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_cppr(vcpu, cppr); @@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { + if (!kvmppc_xics_enabled(vcpu)) + return H_TOO_HARD; if (xive_enabled()) { if (is_rm()) return xive_rm_h_eoi(vcpu, xirr); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 4efe364f1188..4962d537c186 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -447,8 +447,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (kvm->arch.lpid)); - trace_tlbie(kvm->arch.lpid, 0, rbvalues[i], - kvm->arch.lpid, 0, 0, 0); } asm volatile("eieio; tlbsync; ptesync" : : : "memory"); kvm->arch.tlbie_lock = 0; @@ -458,8 +456,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (0)); - trace_tlbie(kvm->arch.lpid, 1, rbvalues[i], - 0, 0, 0, 0); } asm volatile("ptesync" : : : "memory"); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 42639fba89e8..663a398449b7 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -78,7 +78,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) mtmsrd r0,1 /* clear RI in MSR */ mtsrr0 r5 mtsrr1 r6 - RFI + RFI_TO_KERNEL kvmppc_call_hv_entry: ld r4, HSTATE_KVM_VCPU(r13) @@ -187,7 +187,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtmsrd r6, 1 /* Clear RI in MSR */ mtsrr0 r8 mtsrr1 r7 - RFI + RFI_TO_KERNEL /* Virtual-mode return */ .Lvirt_return: @@ -308,7 +308,6 @@ kvm_novcpu_exit: stw r12, STACK_SLOT_TRAP(r1) bl kvmhv_commence_exit nop - lwz r12, STACK_SLOT_TRAP(r1) b kvmhv_switch_to_host /* @@ -1131,12 +1130,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) - - hrfid + HRFI_TO_GUEST b . secondary_too_late: li r12, 0 + stw r12, STACK_SLOT_TRAP(r1) cmpdi r4, 0 beq 11f stw r12, VCPU_TRAP(r4) @@ -1388,6 +1387,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) blt deliver_guest_interrupt guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ + /* Save more register state */ + mfdar r6 + mfdsisr r7 + std r6, VCPU_DAR(r9) + stw r7, VCPU_DSISR(r9) + /* don't overwrite fault_dar/fault_dsisr if HDSI */ + cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE + beq mc_cont + std r6, VCPU_FAULT_DAR(r9) + stw r7, VCPU_FAULT_DSISR(r9) + + /* See if it is a machine check */ + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK + beq machine_check_realmode +mc_cont: +#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING + addi r3, r9, VCPU_TB_RMEXIT + mr r4, r9 + bl kvmhv_accumulate_time +#endif #ifdef CONFIG_KVM_XICS /* We are exiting, pull the VP from the XIVE */ lwz r0, VCPU_XIVE_PUSHED(r9) @@ -1425,33 +1444,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ eieio 1: #endif /* CONFIG_KVM_XICS */ - /* Save more register state */ - mfdar r6 - mfdsisr r7 - std r6, VCPU_DAR(r9) - stw r7, VCPU_DSISR(r9) - /* don't overwrite fault_dar/fault_dsisr if HDSI */ - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE - beq mc_cont - std r6, VCPU_FAULT_DAR(r9) - stw r7, VCPU_FAULT_DSISR(r9) - - /* See if it is a machine check */ - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK - beq machine_check_realmode -mc_cont: -#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - addi r3, r9, VCPU_TB_RMEXIT - mr r4, r9 - bl kvmhv_accumulate_time -#endif + stw r12, STACK_SLOT_TRAP(r1) mr r3, r12 /* Increment exit count, poke other threads to exit */ bl kvmhv_commence_exit nop ld r9, HSTATE_KVM_VCPU(r13) - lwz r12, VCPU_TRAP(r9) /* Stop others sending VCPU interrupts to this physical CPU */ li r0, -1 @@ -1817,6 +1816,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) * POWER7/POWER8 guest -> host partition switch code. * We don't have to lock against tlbies but we do * have to coordinate the hardware threads. + * Here STACK_SLOT_TRAP(r1) contains the trap number. */ kvmhv_switch_to_host: /* Secondary threads wait for primary to do partition switch */ @@ -1869,11 +1869,11 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* If HMI, call kvmppc_realmode_hmi_handler() */ + lwz r12, STACK_SLOT_TRAP(r1) cmpwi r12, BOOK3S_INTERRUPT_HMI bne 27f bl kvmppc_realmode_hmi_handler nop - li r12, BOOK3S_INTERRUPT_HMI /* * At this point kvmppc_realmode_hmi_handler would have resync-ed * the TB. Hence it is not required to subtract guest timebase @@ -1951,6 +1951,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) + lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ ld r0, SFS+PPC_LR_STKOFF(r1) addi r1, r1, SFS mtlr r0 diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 69a09444d46e..e2ef16198456 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE +#define HPTE_R_M _PAGE_COHERENT #endif static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) @@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.eaddr = eaddr; pte.vpage = eaddr >> 12; pte.page_size = MMU_PAGE_64K; + pte.wimg = HPTE_R_M; } switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 42a4b237df5f..34a5adeff084 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -46,6 +46,9 @@ #define FUNC(name) name +#define RFI_TO_KERNEL RFI +#define RFI_TO_GUEST RFI + .macro INTERRUPT_TRAMPOLINE intno .global kvmppc_trampoline_\intno @@ -141,7 +144,7 @@ kvmppc_handler_skip_ins: GET_SCRATCH0(r13) /* And get back into the code */ - RFI + RFI_TO_KERNEL #endif /* @@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline) ori r5, r5, MSR_EE mtsrr0 r7 mtsrr1 r6 - RFI + RFI_TO_KERNEL #include "book3s_segment.S" diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 2a2b96d53999..93a180ceefad 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -156,7 +156,7 @@ no_dcbz32_on: PPC_LL r9, SVCPU_R9(r3) PPC_LL r3, (SVCPU_R3)(r3) - RFI + RFI_TO_GUEST kvmppc_handler_trampoline_enter_end: @@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) cmpwi r12, BOOK3S_INTERRUPT_DOORBELL beqa BOOK3S_INTERRUPT_DOORBELL - RFI + RFI_TO_KERNEL kvmppc_handler_trampoline_exit_end: diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index bf457843e032..0d750d274c4e 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) /* Return the per-cpu state for state saving/migration */ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | - (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | + (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; } int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) @@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) /* * Restore P and Q. If the interrupt was pending, we - * force both P and Q, which will trigger a resend. + * force Q and !P, which will trigger a resend. * * That means that a guest that had both an interrupt * pending (queued) and Q set will restore with only @@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) * is perfectly fine as coalescing interrupts that haven't * been presented yet is always allowed. */ - if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) + if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) state->old_p = true; if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) state->old_q = true; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index ee279c7f4802..ecb45361095b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -758,7 +758,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; - vcpu->arch.dec_expires = ~(u64)0; + vcpu->arch.dec_expires = get_tb(); #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); @@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; - sigset_t sigsaved; if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; @@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) #endif } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (run->immediate_exit) r = -EINTR; else r = kvmppc_vcpu_run(run, vcpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index c9de03e0c1f1..096d4e4d31e6 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -21,6 +21,7 @@ #include #include #include +#include static int __patch_instruction(unsigned int *addr, unsigned int instr) { @@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr) * During early early boot patch_instruction is called * when text_poke_area is not ready, but we still need * to allow patching. We just do the plain old patching - * We use slab_is_available and per cpu read * via this_cpu_read - * of text_poke_area. Per-CPU areas might not be up early - * this can create problems with just using this_cpu_read() */ - if (!slab_is_available() || !this_cpu_read(text_poke_area)) + if (!this_cpu_read(*PTRRELOC(&text_poke_area))) return __patch_instruction(addr, instr); local_irq_save(flags); @@ -304,6 +302,11 @@ int instr_is_relative_branch(unsigned int instr) return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); } +int instr_is_relative_link_branch(unsigned int instr) +{ + return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK); +} + static unsigned long branch_iform_target(const unsigned int *instr) { signed long imm; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 41cf5ae273cf..762a899e85a4 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -23,6 +23,7 @@ #include #include #include +#include #include struct fixup_entry { @@ -55,7 +56,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, unsigned int *target = (unsigned int *)branch_target(src); /* Branch within the section doesn't need translating */ - if (target < alt_start || target >= alt_end) { + if (target < alt_start || target > alt_end) { instr = translate_branch(dest, src); if (!instr) return 1; @@ -116,6 +117,168 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +#ifdef CONFIG_PPC_BOOK3S_64 +void do_stf_entry_barrier_fixups(enum stf_barrier_type types) +{ + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___stf_entry_barrier_fixup), + end = PTRRELOC(&__stop___stf_entry_barrier_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + i = 0; + if (types & STF_BARRIER_FALLBACK) { + instrs[i++] = 0x7d4802a6; /* mflr r10 */ + instrs[i++] = 0x60000000; /* branch patched below */ + instrs[i++] = 0x7d4803a6; /* mtlr r10 */ + } else if (types & STF_BARRIER_EIEIO) { + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ + } else if (types & STF_BARRIER_SYNC_ORI) { + instrs[i++] = 0x7c0004ac; /* hwsync */ + instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */ + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + + if (types & STF_BARRIER_FALLBACK) + patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, + BRANCH_SET_LINK); + else + patch_instruction(dest + 1, instrs[1]); + + patch_instruction(dest + 2, instrs[2]); + } + + printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, + (types == STF_BARRIER_NONE) ? "no" : + (types == STF_BARRIER_FALLBACK) ? "fallback" : + (types == STF_BARRIER_EIEIO) ? "eieio" : + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" + : "unknown"); +} + +void do_stf_exit_barrier_fixups(enum stf_barrier_type types) +{ + unsigned int instrs[6], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___stf_exit_barrier_fixup), + end = PTRRELOC(&__stop___stf_exit_barrier_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + instrs[3] = 0x60000000; /* nop */ + instrs[4] = 0x60000000; /* nop */ + instrs[5] = 0x60000000; /* nop */ + + i = 0; + if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { + if (cpu_has_feature(CPU_FTR_HVMODE)) { + instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */ + instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */ + } else { + instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */ + instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */ + } + instrs[i++] = 0x7c0004ac; /* hwsync */ + instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */ + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */ + } else { + instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */ + } + } else if (types & STF_BARRIER_EIEIO) { + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 2, instrs[2]); + patch_instruction(dest + 3, instrs[3]); + patch_instruction(dest + 4, instrs[4]); + patch_instruction(dest + 5, instrs[5]); + } + printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, + (types == STF_BARRIER_NONE) ? "no" : + (types == STF_BARRIER_FALLBACK) ? "fallback" : + (types == STF_BARRIER_EIEIO) ? "eieio" : + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" + : "unknown"); +} + + +void do_stf_barrier_fixups(enum stf_barrier_type types) +{ + do_stf_entry_barrier_fixups(types); + do_stf_exit_barrier_fixups(types); +} + +void do_rfi_flush_fixups(enum l1d_flush_type types) +{ + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___rfi_flush_fixup), + end = PTRRELOC(&__stop___rfi_flush_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + if (types & L1D_FLUSH_FALLBACK) + /* b .+16 to fallback flush */ + instrs[0] = 0x48000010; + + i = 0; + if (types & L1D_FLUSH_ORI) { + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ + } + + if (types & L1D_FLUSH_MTTRIG) + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 2, instrs[2]); + } + + printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); +} +#endif /* CONFIG_PPC_BOOK3S_64 */ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index a787776822d8..0378def28d41 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -12,6 +12,7 @@ #include #include #include +#include .text @@ -23,7 +24,7 @@ _GLOBAL(strncpy) mtctr r5 addi r6,r3,-1 addi r4,r4,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r6) @@ -43,7 +44,7 @@ _GLOBAL(strncmp) mtctr r5 addi r5,r3,-1 addi r4,r4,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) @@ -77,7 +78,7 @@ _GLOBAL(memchr) beq- 2f mtctr r5 addi r3,r3,-1 - .balign 16 + .balign IFETCH_ALIGN_BYTES 1: lbzu r0,1(r3) cmpw 0,r0,r4 bdnzf 2,1b diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index f29212e40f40..0be77709446c 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd) mtspr(SPRN_M_TW, __pa(pgd) - offset); /* Update context */ - mtspr(SPRN_M_CASID, id); + mtspr(SPRN_M_CASID, id - 1); /* sync */ mb(); } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 4797d08581ce..6e1e39035380 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address) return __bad_area(regs, address, SEGV_MAPERR); } +static noinline int bad_access(struct pt_regs *regs, unsigned long address) +{ + return __bad_area(regs, address, SEGV_ACCERR); +} + static int do_sigbus(struct pt_regs *regs, unsigned long address, unsigned int fault) { @@ -490,7 +495,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, good_area: if (unlikely(access_error(is_write, is_exec, vma))) - return bad_area(regs, address); + return bad_access(regs, address); /* * If for any reason at all we couldn't handle the fault, diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 3848af167df9..640cf566e986 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -47,7 +47,8 @@ DEFINE_RAW_SPINLOCK(native_tlbie_lock); -static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) +static inline unsigned long ___tlbie(unsigned long vpn, int psize, + int apsize, int ssize) { unsigned long va; unsigned int penc; @@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) : "memory"); break; } - trace_tlbie(0, 0, va, 0, 0, 0, 0); + return va; +} + +static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) +{ + unsigned long rb; + + rb = ___tlbie(vpn, psize, apsize, ssize); + trace_tlbie(0, 0, rb, 0, 0, 0, 0); } static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) @@ -652,7 +661,7 @@ static void native_hpte_clear(void) if (hpte_v & HPTE_V_VALID) { hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); hptep->v = 0; - __tlbie(vpn, psize, apsize, ssize); + ___tlbie(vpn, psize, apsize, ssize); } } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 67ec2e927253..87687e46b48b 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -872,6 +872,12 @@ static void __init htab_initialize(void) /* Using a hypervisor which owns the htab */ htab_address = NULL; _SDR1 = 0; + /* + * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall + * to inform the hypervisor that we wish to use the HPT. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + register_process_table(0, 0, 0); #ifdef CONFIG_FA_DUMP /* * If firmware assisted dump is active firmware preserves diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 558e9d3891bf..bd022d16745c 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; - if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) - mm->context.addr_limit = TASK_SIZE; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > mm->task_size) + if (len > high_limit) return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } - if (flags & MAP_FIXED) { + if (unlikely(addr > mm->context.addr_limit && + mm->context.addr_limit != TASK_SIZE)) + mm->context.addr_limit = TASK_SIZE; + + if (fixed) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; @@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && + if (high_limit - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; - info.high_limit = current->mm->mmap_base; + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; - if (addr > DEFAULT_MAP_WINDOW) - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; - return vm_unmapped_area(&info); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 1571a498a33f..4c9e5f9c7a44 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -552,9 +552,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); +#ifdef CONFIG_PPC_RADIX_MMU if (radix_enabled()) return radix__hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); +#endif return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); } #endif diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 4362b86ef84c..9c2f83331e5b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -143,6 +143,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) start, start + size, rc); return -EFAULT; } + flush_inval_dcache_range(start, start + size); return __add_pages(nid, start_pfn, nr_pages, want_memblock); } @@ -171,6 +172,7 @@ int arch_remove_memory(u64 start, u64 size) /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); + flush_inval_dcache_range(start, start + size); ret = remove_section_mapping(start, start + size); /* Ensure all vmalloc mappings are flushed in case they also diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 5d78b193fec4..6d476a7b5611 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } + if (unlikely(addr > mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit = TASK_SIZE; - if (len > mm->task_size - mmap_min_addr) - return -ENOMEM; - - if (flags & MAP_FIXED) + if (fixed) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && addr >= mmap_min_addr && + if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; + info.high_limit = high_limit; info.align_mask = 0; - if (unlikely(addr > DEFAULT_MAP_WINDOW)) - info.high_limit = mm->context.addr_limit; - else - info.high_limit = DEFAULT_MAP_WINDOW; - return vm_unmapped_area(&info); } @@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; + int fixed = (flags & MAP_FIXED); + unsigned long high_limit; struct vm_unmapped_area_info info; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (fixed) { + if (addr > high_limit - len) + return -ENOMEM; + } + if (unlikely(addr > mm->context.addr_limit && mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit = TASK_SIZE; - /* requested length too big for entire address space */ - if (len > mm->task_size - mmap_min_addr) - return -ENOMEM; - - if (flags & MAP_FIXED) + if (fixed) return addr; - /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (mm->task_size - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vm_start_gap(vma))) + if (high_limit - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = mm->mmap_base; + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = 0; - if (addr > DEFAULT_MAP_WINDOW) - info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW; - addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 05e15386d4cb..b94fb62e60fd 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm) return index; /* - * We do switch_slb() early in fork, even before we setup the - * mm->context.addr_limit. Default to max task size so that we copy the - * default values to paca which will help us to handle slb miss early. + * In the case of exec, use the default limit, + * otherwise inherit it from the mm we are duplicating. */ - mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; + if (!mm->context.addr_limit) + mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; /* * The old code would re-promote on fork, we don't do that when using diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index e0a2d8e806ed..816055927ee4 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -19,6 +19,7 @@ #include #include #include +#include static DEFINE_MUTEX(mem_list_mutex); @@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { struct rcu_head rcu; unsigned long used; atomic64_t mapped; + unsigned int pageshift; u64 ua; /* userspace address */ u64 entries; /* number of entries in hpas[] */ u64 *hpas; /* vmalloc'ed */ @@ -126,6 +128,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, { struct mm_iommu_table_group_mem_t *mem; long i, j, ret = 0, locked_entries = 0; + unsigned int pageshift; + unsigned long flags; struct page *page = NULL; mutex_lock(&mem_list_mutex); @@ -160,6 +164,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, goto unlock_exit; } + /* + * For a starting point for a maximum page size calculation + * we use @ua and @entries natural alignment to allow IOMMU pages + * smaller than huge pages but still bigger than PAGE_SIZE. + */ + mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); if (!mem->hpas) { kfree(mem); @@ -200,6 +210,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, } } populate: + pageshift = PAGE_SHIFT; + if (PageCompound(page)) { + pte_t *pte; + struct page *head = compound_head(page); + unsigned int compshift = compound_order(head); + + local_irq_save(flags); /* disables as well */ + pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); + local_irq_restore(flags); + + /* Double check it is still the same pinned page */ + if (pte && pte_page(*pte) == head && + pageshift == compshift) + pageshift = max_t(unsigned int, pageshift, + PAGE_SHIFT); + } + mem->pageshift = min(mem->pageshift, pageshift); mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; } @@ -350,7 +377,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, EXPORT_SYMBOL_GPL(mm_iommu_find); long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa) + unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; u64 *va = &mem->hpas[entry]; @@ -358,6 +385,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, if (entry >= mem->entries) return -EFAULT; + if (pageshift > mem->pageshift) + return -EFAULT; + *hpa = *va | (ua & ~PAGE_MASK); return 0; @@ -365,7 +395,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, - unsigned long ua, unsigned long *hpa) + unsigned long ua, unsigned int pageshift, unsigned long *hpa) { const long entry = (ua - mem->ua) >> PAGE_SHIFT; void *va = &mem->hpas[entry]; @@ -374,6 +404,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, if (entry >= mem->entries) return -EFAULT; + if (pageshift > mem->pageshift) + return -EFAULT; + pa = (void *) vmalloc_to_phys(va); if (!pa) return -EFAULT; diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 4554d6527682..e2b28b3a512e 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) { pr_hard("initing context for mm @%p\n", mm); +#ifdef CONFIG_PPC_MM_SLICES + if (!mm->context.addr_limit) + mm->context.addr_limit = DEFAULT_MAP_WINDOW; + + /* + * We have MMU_NO_CONTEXT set to be ~0. Hence check + * explicitly against context.id == 0. This ensures that we properly + * initialize context slice details for newly allocated mm's (which will + * have id == 0) and don't alter context slice inherited via fork (which + * will have id != 0). + */ + if (mm->context.id == 0) + slice_set_user_psize(mm, mmu_virtual_psize); +#endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; return 0; @@ -428,8 +442,8 @@ void __init mmu_context_init(void) * -- BenH */ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { - first_context = 0; - last_context = 15; + first_context = 1; + last_context = 16; no_selective_tlbil = true; } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index a51df9ef529d..9fead0796364 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -142,11 +142,6 @@ static void reset_numa_cpu_lookup_table(void) numa_cpu_lookup_table[cpu] = -1; } -static void update_numa_cpu_lookup_table(unsigned int cpu, int node) -{ - numa_cpu_lookup_table[cpu] = node; -} - static void map_cpu_to_node(int cpu, int node) { update_numa_cpu_lookup_table(cpu, node); @@ -551,7 +546,7 @@ static int numa_setup_cpu(unsigned long lcpu) nid = of_node_to_nid_single(cpu); out_present: - if (nid < 0 || !node_online(nid)) + if (nid < 0 || !node_possible(nid)) nid = first_online_node; map_cpu_to_node(lcpu, nid); @@ -892,6 +887,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) NODE_DATA(nid)->node_spanned_pages = spanned_pages; } +static void __init find_possible_nodes(void) +{ + struct device_node *rtas; + u32 numnodes, i; + + if (min_common_depth <= 0) + return; + + rtas = of_find_node_by_path("/rtas"); + if (!rtas) + return; + + if (of_property_read_u32_index(rtas, + "ibm,max-associativity-domains", + min_common_depth, &numnodes)) + goto out; + + for (i = 0; i < numnodes; i++) { + if (!node_possible(i)) + node_set(i, node_possible_map); + } + +out: + of_node_put(rtas); +} + void __init initmem_init(void) { int nid, cpu; @@ -905,12 +926,15 @@ void __init initmem_init(void) memblock_dump_all(); /* - * Reduce the possible NUMA nodes to the online NUMA nodes, - * since we do not support node hotplug. This ensures that we - * lower the maximum NUMA node ID to what is actually present. + * Modify the set of possible NUMA nodes to reflect information + * available about the set of online nodes, and the set of nodes + * that we expect to make use of for this platform's affinity + * calculations. */ nodes_and(node_possible_map, node_possible_map, node_online_map); + find_possible_nodes(); + for_each_online_node(nid) { unsigned long start_pfn, end_pfn; @@ -1251,6 +1275,40 @@ static long vphn_get_associativity(unsigned long cpu, return rc; } +static inline int find_and_online_cpu_nid(int cpu) +{ + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; + int new_nid; + + /* Use associativity from first thread for all siblings */ + vphn_get_associativity(cpu, associativity); + new_nid = associativity_to_nid(associativity); + if (new_nid < 0 || !node_possible(new_nid)) + new_nid = first_online_node; + + if (NODE_DATA(new_nid) == NULL) { +#ifdef CONFIG_MEMORY_HOTPLUG + /* + * Need to ensure that NODE_DATA is initialized for a node from + * available memory (see memblock_alloc_try_nid). If unable to + * init the node, then default to nearest node that has memory + * installed. + */ + if (try_online_node(new_nid)) + new_nid = first_online_node; +#else + /* + * Default to using the nearest node that has memory installed. + * Otherwise, it would be necessary to patch the kernel MM code + * to deal with more memoryless-node error conditions. + */ + new_nid = first_online_node; +#endif + } + + return new_nid; +} + /* * Update the CPU maps and sysfs entries for a single CPU when its NUMA * characteristics change. This function doesn't perform any locking and is @@ -1318,7 +1376,6 @@ int numa_update_cpu_topology(bool cpus_locked) { unsigned int cpu, sibling, changed = 0; struct topology_update_data *updates, *ud; - __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; cpumask_t updated_cpus; struct device *dev; int weight, new_nid, i = 0; @@ -1353,11 +1410,7 @@ int numa_update_cpu_topology(bool cpus_locked) continue; } - /* Use associativity from first thread for all siblings */ - vphn_get_associativity(cpu, associativity); - new_nid = associativity_to_nid(associativity); - if (new_nid < 0 || !node_online(new_nid)) - new_nid = first_online_node; + new_nid = find_and_online_cpu_nid(cpu); if (new_nid == numa_cpu_lookup_table[cpu]) { cpumask_andnot(&cpu_associativity_changes_mask, diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 39c252b54d16..17ae5c15a9e0 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -169,6 +170,16 @@ void radix__mark_rodata_ro(void) { unsigned long start, end; + /* + * mark_rodata_ro() will mark itself as !writable at some point. + * Due to DD1 workaround in radix__pte_update(), we'll end up with + * an invalid pte and the system will crash quite severly. + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n"); + return; + } + start = (unsigned long)_stext; end = (unsigned long)__init_begin; @@ -661,6 +672,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) pud_clear(pud); } +struct change_mapping_params { + pte_t *pte; + unsigned long start; + unsigned long end; + unsigned long aligned_start; + unsigned long aligned_end; +}; + +static int stop_machine_change_mapping(void *data) +{ + struct change_mapping_params *params = + (struct change_mapping_params *)data; + + if (!data) + return -1; + + spin_unlock(&init_mm.page_table_lock); + pte_clear(&init_mm, params->aligned_start, params->pte); + create_physical_mapping(params->aligned_start, params->start); + create_physical_mapping(params->end, params->aligned_end); + spin_lock(&init_mm.page_table_lock); + return 0; +} + static void remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end) { @@ -689,6 +724,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, } } +/* + * clear the pte and potentially split the mapping helper + */ +static void split_kernel_mapping(unsigned long addr, unsigned long end, + unsigned long size, pte_t *pte) +{ + unsigned long mask = ~(size - 1); + unsigned long aligned_start = addr & mask; + unsigned long aligned_end = addr + size; + struct change_mapping_params params; + bool split_region = false; + + if ((end - addr) < size) { + /* + * We're going to clear the PTE, but not flushed + * the mapping, time to remap and flush. The + * effects if visible outside the processor or + * if we are running in code close to the + * mapping we cleared, we are in trouble. + */ + if (overlaps_kernel_text(aligned_start, addr) || + overlaps_kernel_text(end, aligned_end)) { + /* + * Hack, just return, don't pte_clear + */ + WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " + "text, not splitting\n", addr, end); + return; + } + split_region = true; + } + + if (split_region) { + params.pte = pte; + params.start = addr; + params.end = end; + params.aligned_start = addr & ~(size - 1); + params.aligned_end = min_t(unsigned long, aligned_end, + (unsigned long)__va(memblock_end_of_DRAM())); + stop_machine(stop_machine_change_mapping, ¶ms, NULL); + return; + } + + pte_clear(&init_mm, addr, pte); +} + static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end) { @@ -704,13 +785,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, continue; if (pmd_huge(*pmd)) { - if (!IS_ALIGNED(addr, PMD_SIZE) || - !IS_ALIGNED(next, PMD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pmd); + split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); continue; } @@ -735,13 +810,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, continue; if (pud_huge(*pud)) { - if (!IS_ALIGNED(addr, PUD_SIZE) || - !IS_ALIGNED(next, PUD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pud); + split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); continue; } @@ -767,13 +836,7 @@ static void remove_pagetable(unsigned long start, unsigned long end) continue; if (pgd_huge(*pgd)) { - if (!IS_ALIGNED(addr, PGDIR_SIZE) || - !IS_ALIGNED(next, PGDIR_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pgd); + split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd); continue; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ac0717a90ca6..12f95b1f7d07 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -483,6 +483,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, if (old & PATB_HR) { asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); } else { asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 13cfe413b40d..6d9bf014b3e7 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -62,14 +62,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, * updating it. No write barriers are needed here, provided * we only update the current CPU's SLB shadow buffer. */ - p->save_area[index].esid = 0; - p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); - p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); + WRITE_ONCE(p->save_area[index].esid, 0); + WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); + WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); } static inline void slb_shadow_clear(enum slb_index index) { - get_slb_shadow()->save_area[index].esid = 0; + WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0); } static inline void create_shadowed_slbe(unsigned long ea, int ssize, diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 45f6740dd407..8baaa6c6f21c 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, unsigned long end = start + len - 1; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); if (start < SLICE_LOW_TOP) { - unsigned long mend = min(end, (SLICE_LOW_TOP - 1)); + unsigned long mend = min(end, + (unsigned long)(SLICE_LOW_TOP - 1)); ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(start)); @@ -96,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, { struct vm_area_struct *vma; - if ((mm->task_size - len) < addr) + if ((mm->context.addr_limit - len) < addr) return 0; vma = find_vma(mm, addr); return (!vma || (addr + len) <= vm_start_gap(vma)); @@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) unsigned long start = slice << SLICE_HIGH_SHIFT; unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); +#ifdef CONFIG_PPC64 /* Hack, so that each addresses is controlled by exactly one * of the high or low area bitmaps, the first high area starts * at 4GB, not 0 */ if (start == 0) start = SLICE_LOW_TOP; +#endif return !slice_area_is_free(mm, start, end - start); } @@ -127,13 +131,14 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) unsigned long i; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); for (i = 0; i < SLICE_NUM_LOW; i++) if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; - if (mm->task_size <= SLICE_LOW_TOP) + if (mm->context.addr_limit <= SLICE_LOW_TOP) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) @@ -149,7 +154,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma u64 lpsizes; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) @@ -171,6 +177,10 @@ static int slice_check_fit(struct mm_struct *mm, DECLARE_BITMAP(result, SLICE_NUM_HIGH); unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit); + if (!SLICE_NUM_HIGH) + return (mask.low_slices & available.low_slices) == + mask.low_slices; + bitmap_and(result, mask.high_slices, available.high_slices, slice_count); @@ -180,6 +190,7 @@ static int slice_check_fit(struct mm_struct *mm, static void slice_flush_segments(void *parm) { +#ifdef CONFIG_PPC64 struct mm_struct *mm = parm; unsigned long flags; @@ -191,6 +202,7 @@ static void slice_flush_segments(void *parm) local_irq_save(flags); slb_flush_and_rebolt(); local_irq_restore(flags); +#endif } static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) @@ -379,21 +391,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices |= src->low_slices; - bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + if (!SLICE_NUM_HIGH) + return; + bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices &= ~src->low_slices; - bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + if (!SLICE_NUM_HIGH) + return; + bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } #ifdef CONFIG_PPC_64K_PAGES @@ -412,61 +424,62 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, struct slice_mask compat_mask; int fixed = (flags & MAP_FIXED); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); + unsigned long page_size = 1UL << pshift; struct mm_struct *mm = current->mm; unsigned long newaddr; unsigned long high_limit; - /* - * Check if we need to expland slice area. - */ - if (unlikely(addr > mm->context.addr_limit && - mm->context.addr_limit != TASK_SIZE)) { - mm->context.addr_limit = TASK_SIZE; + high_limit = DEFAULT_MAP_WINDOW; + if (addr >= high_limit || (fixed && (addr + len > high_limit))) + high_limit = TASK_SIZE; + + if (len > high_limit) + return -ENOMEM; + if (len & (page_size - 1)) + return -EINVAL; + if (fixed) { + if (addr & (page_size - 1)) + return -EINVAL; + if (addr > high_limit - len) + return -ENOMEM; + } + + if (high_limit > mm->context.addr_limit) { + mm->context.addr_limit = high_limit; on_each_cpu(slice_flush_segments, mm, 1); } - /* - * This mmap request can allocate upt to 512TB - */ - if (addr > DEFAULT_MAP_WINDOW) - high_limit = mm->context.addr_limit; - else - high_limit = DEFAULT_MAP_WINDOW; + /* * init different masks */ mask.low_slices = 0; - bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); /* silence stupid warning */; potential_mask.low_slices = 0; - bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); compat_mask.low_slices = 0; - bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + + if (SLICE_NUM_HIGH) { + bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + } /* Sanity checks */ BUG_ON(mm->task_size == 0); + BUG_ON(mm->context.addr_limit == 0); VM_BUG_ON(radix_enabled()); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", addr, len, flags, topdown); - if (len > mm->task_size) - return -ENOMEM; - if (len & ((1ul << pshift) - 1)) - return -EINVAL; - if (fixed && (addr & ((1ul << pshift) - 1))) - return -EINVAL; - if (fixed && addr > (mm->task_size - len)) - return -ENOMEM; - /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { - addr = _ALIGN_UP(addr, 1ul << pshift); + addr = _ALIGN_UP(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ - if (addr > mm->task_size - len || + if (addr > high_limit - len || !slice_area_is_free(mm, addr, len)) addr = 0; } @@ -588,7 +601,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, convert: slice_andnot_mask(&mask, &good_mask); slice_andnot_mask(&mask, &compat_mask); - if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) { + if (mask.low_slices || + (SLICE_NUM_HIGH && + !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { slice_convert(mm, mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index d304028641a2..4b295cfd5f7e 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -453,14 +453,12 @@ void radix__flush_tlb_all(void) */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); - trace_tlbie(0, 0, rb, rs, ric, prs, r); /* * now flush host entires by passing PRS = 0 and LPID == 0 */ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory"); - trace_tlbie(0, 0, rb, 0, ric, prs, r); } void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index f9941b3b5770..f760494ecd66 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); break; + case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ + PPC_LWZ_OFFS(r_A, r_skb, K); + break; case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); break; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a66e64b0b251..254634fb3fc7 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -203,25 +203,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) { + unsigned int i, ctx_idx = ctx->idx; + + /* Load function address into r12 */ + PPC_LI64(12, func); + + /* For bpf-to-bpf function calls, the callee's address is unknown + * until the last extra pass. As seen above, we use PPC_LI64() to + * load the callee's address, but this may optimize the number of + * instructions required based on the nature of the address. + * + * Since we don't want the number of instructions emitted to change, + * we pad the optimized PPC_LI64() call with NOPs to guarantee that + * we always have a five-instruction sequence, which is the maximum + * that PPC_LI64() can emit. + */ + for (i = ctx->idx - ctx_idx; i < 5; i++) + PPC_NOP(); + #ifdef PPC64_ELF_ABI_v1 - /* func points to the function descriptor */ - PPC_LI64(b2p[TMP_REG_2], func); - /* Load actual entry point from function descriptor */ - PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); - /* ... and move it to LR */ - PPC_MTLR(b2p[TMP_REG_1]); /* * Load TOC from function descriptor at offset 8. * We can clobber r2 since we get called through a * function pointer (so caller will save/restore r2) * and since we don't use a TOC ourself. */ - PPC_BPF_LL(2, b2p[TMP_REG_2], 8); -#else - /* We can clobber r12 */ - PPC_FUNC_ADDR(12, func); - PPC_MTLR(12); + PPC_BPF_LL(2, 12, 8); + /* Load actual entry point from function descriptor */ + PPC_BPF_LL(12, 12, 0); #endif + + PPC_MTLR(12); PPC_BLRL(); } @@ -241,6 +253,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 * goto out; */ PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); + PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31); PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); PPC_BCC(COND_GE, out); @@ -762,7 +775,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, func = (u8 *) __bpf_call_base + imm; /* Save skb pointer if we need to re-cache skb data */ - if (bpf_helper_changes_pkt_data(func)) + if ((ctx->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data(func)) PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx)); bpf_jit_emit_func_call(image, ctx, (u64)func); @@ -771,7 +785,8 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, PPC_MR(b2p[BPF_REG_0], 3); /* refresh skb cache */ - if (bpf_helper_changes_pkt_data(func)) { + if ((ctx->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data(func)) { /* reload skb pointer to r3 */ PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx)); bpf_jit_emit_skb_loads(image, ctx); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 9e3da168d54c..b7a6044161e8 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr) int ret; __u64 target; - if (is_kernel_addr(addr)) - return branch_target((unsigned int *)addr); + if (is_kernel_addr(addr)) { + if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) + return 0; + + return branch_target(&instr); + } /* Userspace: need copy instruction here then translate it */ pagefault_disable(); @@ -453,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) /* invalid entry */ continue; + /* + * BHRB rolling buffer could very much contain the kernel + * addresses at this point. Check the privileges before + * exporting it to userspace (avoid exposure of regions + * where we could have speculative execution) + */ + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && + is_kernel_addr(addr)) + continue; + /* Branches are read most recent first (ie. mfbhrb 0 is * the most recent branch). * There are two types of valid entries: @@ -1222,6 +1236,7 @@ static void power_pmu_disable(struct pmu *pmu) */ write_mmcr0(cpuhw, val); mb(); + isync(); /* * Disable instruction sampling if it was enabled @@ -1230,12 +1245,26 @@ static void power_pmu_disable(struct pmu *pmu) mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); + isync(); } cpuhw->disabled = 1; cpuhw->n_added = 0; ebb_switch_out(mmcr0); + +#ifdef CONFIG_PPC64 + /* + * These are readable by userspace, may contain kernel + * addresses and are not switched by context switch, so clear + * them now to avoid leaking anything to userspace in general + * including to another process. + */ + if (ppmu->flags & PPMU_ARCH_207S) { + mtspr(SPRN_SDAR, 0); + mtspr(SPRN_SIAR, 0); + } +#endif } local_irq_restore(flags); @@ -1415,7 +1444,7 @@ static int collect_events(struct perf_event *group, int max_count, int n = 0; struct perf_event *event; - if (!is_software_event(group)) { + if (group->pmu->task_ctx_nr == perf_hw_context) { if (n >= max_count) return -1; ctrs[n] = group; @@ -1423,7 +1452,7 @@ static int collect_events(struct perf_event *group, int max_count, events[n++] = group->hw.config; } list_for_each_entry(event, &group->sibling_list, group_entry) { - if (!is_software_event(event) && + if (event->pmu->task_ctx_nr == perf_hw_context && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 9c88b82f6229..72238eedc360 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -540,7 +540,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2) { if (s1 < s2) return 1; - if (s2 > s1) + if (s1 > s2) return -1; return memcmp(d1, d2, s1); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 36344117c680..b73961b95c34 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -308,6 +308,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu) if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) return 0; + /* + * Check whether nest_imc is registered. We could end up here if the + * cpuhotplug callback registration fails. i.e, callback invokes the + * offline path for all successfully registered nodes. At this stage, + * nest_imc pmu will not be registered and we should return here. + * + * We return with a zero since this is not an offline failure. And + * cpuhp_setup_state() returns the actual failure reason to the caller, + * which in turn will call the cleanup routine. + */ + if (!nest_pmus) + return 0; + /* * Now that this cpu is one of the designated, * find a next cpu a) which is online and b) in same chip. @@ -467,7 +480,7 @@ static int nest_imc_event_init(struct perf_event *event) * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). * Get the base memory addresss for this cpu. */ - chip_id = topology_physical_package_id(event->cpu); + chip_id = cpu_to_chip_id(event->cpu); pcni = pmu->mem_info; do { if (pcni->id == chip_id) { @@ -524,19 +537,19 @@ static int nest_imc_event_init(struct perf_event *event) */ static int core_imc_mem_init(int cpu, int size) { - int phys_id, rc = 0, core_id = (cpu / threads_per_core); + int nid, rc = 0, core_id = (cpu / threads_per_core); struct imc_mem_info *mem_info; /* * alloc_pages_node() will allocate memory for core in the * local node only. */ - phys_id = topology_physical_package_id(cpu); + nid = cpu_to_node(cpu); mem_info = &core_imc_pmu->mem_info[core_id]; mem_info->id = core_id; /* We need only vbase for core counters */ - mem_info->vbase = page_address(alloc_pages_node(phys_id, + mem_info->vbase = page_address(alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | __GFP_NOWARN, get_order(size))); if (!mem_info->vbase) @@ -797,14 +810,14 @@ static int core_imc_event_init(struct perf_event *event) static int thread_imc_mem_alloc(int cpu_id, int size) { u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); - int phys_id = topology_physical_package_id(cpu_id); + int nid = cpu_to_node(cpu_id); if (!local_mem) { /* * This case could happen only once at start, since we dont * free the memory in cpu offline path. */ - local_mem = page_address(alloc_pages_node(phys_id, + local_mem = page_address(alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | __GFP_NOWARN, get_order(size))); if (!local_mem) @@ -1118,7 +1131,7 @@ static int init_nest_pmu_ref(void) static void cleanup_all_core_imc_memory(void) { - int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); + int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); struct imc_mem_info *ptr = core_imc_pmu->mem_info; int size = core_imc_pmu->counter_mem_size; @@ -1226,7 +1239,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, if (!pmu_ptr->pmu.name) return -ENOMEM; - nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); + nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), GFP_KERNEL); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index a78f255111f2..3ce376b42330 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -325,6 +325,7 @@ config PPC_BOOK3E_MMU config PPC_MM_SLICES bool default y if PPC_STD_MMU_64 + default y if PPC_8xx && HUGETLB_PAGE default n config PPC_HAVE_PMU_SUPPORT diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 03d115aaa191..acde7bbe0716 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c @@ -28,6 +28,8 @@ #include #include +#include + extern spinlock_t rtc_lock; #define NVRAM_AS0 0x74 @@ -63,7 +65,7 @@ long __init chrp_time_init(void) return 0; } -int chrp_cmos_clock_read(int addr) +static int chrp_cmos_clock_read(int addr) { if (nvram_as1 != 0) outb(addr>>8, nvram_as1); @@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr) return (inb(nvram_data)); } -void chrp_cmos_clock_write(unsigned long val, int addr) +static void chrp_cmos_clock_write(unsigned long val, int addr) { if (nvram_as1 != 0) outb(addr>>8, nvram_as1); diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 89c54de88b7a..bf4a125faec6 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -35,6 +35,8 @@ */ #define HW_BROADWAY_ICR 0x00 #define HW_BROADWAY_IMR 0x04 +#define HW_STARLET_ICR 0x08 +#define HW_STARLET_IMR 0x0c /* @@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d) void __iomem *io_base = irq_data_get_irq_chip_data(d); setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); + + /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */ + clrbits32(io_base + HW_STARLET_IMR, 1 << irq); } diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index c3c9bbb3573a..ba0964c17620 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4) boot_infos_t *bi = (boot_infos_t *) r4; unsigned long hdr; unsigned long space; - unsigned long ptr, x; + unsigned long ptr; char *model; unsigned long offset = reloc_offset(); @@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4) * MMU switched OFF, so this should not be useful anymore. */ if (bi->version < 4) { + unsigned long x __maybe_unused; + bootx_printf("Touching pages...\n"); /* diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index ab668cb72263..8b2eab1340f4 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -352,6 +352,7 @@ static int pmac_late_init(void) } machine_late_initcall(powermac, pmac_late_init); +void note_bootable_part(dev_t dev, int part, int goodness); /* * This is __ref because we check for "initializing" before * touching any of the __init sensitive things and "initializing" diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h index c9a503623431..e9a6c35f8a29 100644 --- a/arch/powerpc/platforms/powernv/copy-paste.h +++ b/arch/powerpc/platforms/powernv/copy-paste.h @@ -42,5 +42,6 @@ static inline int vas_paste(void *paste_address, int offset) : "b" (offset), "b" (paste_address) : "memory", "cr0"); - return (cr >> CR0_SHIFT) & CR0_MASK; + /* We mask with 0xE to ignore SO */ + return (cr >> CR0_SHIFT) & 0xE; } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 443d5ca71995..028d6d12ba32 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -78,7 +78,7 @@ static int pnv_save_sprs_for_deep_states(void) uint64_t msr_val = MSR_IDLE; uint64_t psscr_val = pnv_deepest_stop_psscr_val; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { uint64_t pir = get_hard_smp_processor_id(cpu); uint64_t hsprg0_val = (uint64_t)&paca[cpu]; @@ -741,7 +741,7 @@ static int __init pnv_init_idle_states(void) int cpu; pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n"); - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { int base_cpu = cpu_first_thread_sibling(cpu); int idx = cpu_thread_in_core(cpu); int i; diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index de470caf0784..fc222a0c2ac4 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = { .open = simple_open, }; -static void flush_memory_region(u64 base, u64 size) -{ - unsigned long line_size = ppc64_caches.l1d.size; - u64 end = base + size; - u64 addr; - - base = round_down(base, line_size); - end = round_up(end, line_size); - - for (addr = base; addr < end; addr += line_size) - asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory"); -} - static int check_memblock_online(struct memory_block *mem, void *arg) { if (mem->state != MEM_ONLINE) @@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - /* RCU grace period? */ - flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT), - nr_pages << PAGE_SHIFT); - lock_device_hotplug(); remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); unlock_device_hotplug(); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 2cb6cbea4b3b..63f007f2de7e 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -33,6 +33,13 @@ #define npu_to_phb(x) container_of(x, struct pnv_phb, npu) +/* + * When an address shootdown range exceeds this threshold we invalidate the + * entire TLB on the GPU for the given PID rather than each specific address in + * the range. + */ +#define ATSD_THRESHOLD (2*1024*1024) + /* * Other types of TCE cache invalidation are not functional in the * hardware. @@ -406,6 +413,11 @@ struct npu_context { void *priv; }; +struct mmio_atsd_reg { + struct npu *npu; + int reg; +}; + /* * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC * if none are available. @@ -415,7 +427,7 @@ static int get_mmio_atsd_reg(struct npu *npu) int i; for (i = 0; i < npu->mmio_atsd_count; i++) { - if (!test_and_set_bit(i, &npu->mmio_atsd_usage)) + if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) return i; } @@ -424,86 +436,90 @@ static int get_mmio_atsd_reg(struct npu *npu) static void put_mmio_atsd_reg(struct npu *npu, int reg) { - clear_bit(reg, &npu->mmio_atsd_usage); + clear_bit_unlock(reg, &npu->mmio_atsd_usage); } /* MMIO ATSD register offsets */ #define XTS_ATSD_AVA 1 #define XTS_ATSD_STAT 2 -static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, - unsigned long va) +static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg, + unsigned long launch, unsigned long va) { - int mmio_atsd_reg; - - do { - mmio_atsd_reg = get_mmio_atsd_reg(npu); - cpu_relax(); - } while (mmio_atsd_reg < 0); + struct npu *npu = mmio_atsd_reg->npu; + int reg = mmio_atsd_reg->reg; __raw_writeq(cpu_to_be64(va), - npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA); + npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); eieio(); - __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]); - - return mmio_atsd_reg; + __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]); } -static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) +static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long pid, bool flush) { + int i; unsigned long launch; - /* IS set to invalidate matching PID */ - launch = PPC_BIT(12); + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* IS set to invalidate matching PID */ + launch = PPC_BIT(12); - /* PRS set to process-scoped */ - launch |= PPC_BIT(13); + /* PRS set to process-scoped */ + launch |= PPC_BIT(13); - /* AP */ - launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* AP */ + launch |= (u64) + mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); - /* Invalidating the entire process doesn't use a va */ - return mmio_launch_invalidate(npu, launch, 0); + /* Invalidating the entire process doesn't use a va */ + mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0); + } } -static int mmio_invalidate_va(struct npu *npu, unsigned long va, - unsigned long pid, bool flush) +static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long va, unsigned long pid, bool flush) { + int i; unsigned long launch; - /* IS set to invalidate target VA */ - launch = 0; + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* IS set to invalidate target VA */ + launch = 0; - /* PRS set to process scoped */ - launch |= PPC_BIT(13); + /* PRS set to process scoped */ + launch |= PPC_BIT(13); - /* AP */ - launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* AP */ + launch |= (u64) + mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); - return mmio_launch_invalidate(npu, launch, va); + mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va); + } } #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) -struct mmio_atsd_reg { - struct npu *npu; - int reg; -}; - static void mmio_invalidate_wait( - struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) { struct npu *npu; int i, reg; @@ -518,16 +534,67 @@ static void mmio_invalidate_wait( reg = mmio_atsd_reg[i].reg; while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) cpu_relax(); + } +} + +/* + * Acquires all the address translation shootdown (ATSD) registers required to + * launch an ATSD on all links this npu_context is active on. + */ +static void acquire_atsd_reg(struct npu_context *npu_context, + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) +{ + int i, j; + struct npu *npu; + struct pci_dev *npdev; + struct pnv_phb *nphb; - put_mmio_atsd_reg(npu, reg); + for (i = 0; i <= max_npu2_index; i++) { + mmio_atsd_reg[i].reg = -1; + for (j = 0; j < NV_MAX_LINKS; j++) { + /* + * There are no ordering requirements with respect to + * the setup of struct npu_context, but to ensure + * consistent behaviour we need to ensure npdev[][] is + * only read once. + */ + npdev = READ_ONCE(npu_context->npdev[i][j]); + if (!npdev) + continue; + nphb = pci_bus_to_host(npdev->bus)->private_data; + npu = &nphb->npu; + mmio_atsd_reg[i].npu = npu; + mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); + while (mmio_atsd_reg[i].reg < 0) { + mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); + cpu_relax(); + } + break; + } + } +} + +/* + * Release previously acquired ATSD registers. To avoid deadlocks the registers + * must be released in the same order they were acquired above in + * acquire_atsd_reg. + */ +static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) +{ + int i; + + for (i = 0; i <= max_npu2_index; i++) { /* - * The GPU requires two flush ATSDs to ensure all entries have - * been flushed. We use PID 0 as it will never be used for a - * process on the GPU. + * We can't rely on npu_context->npdev[][] being the same here + * as when acquire_atsd_reg() was called, hence we use the + * values stored in mmio_atsd_reg during the acquire phase + * rather than re-reading npdev[][]. */ - if (flush) - mmio_invalidate_pid(npu, 0, true); + if (mmio_atsd_reg[i].reg < 0) + continue; + + put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg); } } @@ -538,10 +605,6 @@ static void mmio_invalidate_wait( static void mmio_invalidate(struct npu_context *npu_context, int va, unsigned long address, bool flush) { - int i, j; - struct npu *npu; - struct pnv_phb *nphb; - struct pci_dev *npdev; struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; @@ -555,37 +618,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, * Loop over all the NPUs this process is active on and launch * an invalidate. */ - for (i = 0; i <= max_npu2_index; i++) { - mmio_atsd_reg[i].reg = -1; - for (j = 0; j < NV_MAX_LINKS; j++) { - npdev = npu_context->npdev[i][j]; - if (!npdev) - continue; - - nphb = pci_bus_to_host(npdev->bus)->private_data; - npu = &nphb->npu; - mmio_atsd_reg[i].npu = npu; - - if (va) - mmio_atsd_reg[i].reg = - mmio_invalidate_va(npu, address, pid, - flush); - else - mmio_atsd_reg[i].reg = - mmio_invalidate_pid(npu, pid, flush); - - /* - * The NPU hardware forwards the shootdown to all GPUs - * so we only have to launch one shootdown per NPU. - */ - break; - } + acquire_atsd_reg(npu_context, mmio_atsd_reg); + if (va) + mmio_invalidate_va(mmio_atsd_reg, address, pid, flush); + else + mmio_invalidate_pid(mmio_atsd_reg, pid, flush); + + mmio_invalidate_wait(mmio_atsd_reg); + if (flush) { + /* + * The GPU requires two flush ATSDs to ensure all entries have + * been flushed. We use PID 0 as it will never be used for a + * process on the GPU. + */ + mmio_invalidate_pid(mmio_atsd_reg, 0, true); + mmio_invalidate_wait(mmio_atsd_reg); + mmio_invalidate_pid(mmio_atsd_reg, 0, true); + mmio_invalidate_wait(mmio_atsd_reg); } - - mmio_invalidate_wait(mmio_atsd_reg, flush); - if (flush) - /* Wait for the flush to complete */ - mmio_invalidate_wait(mmio_atsd_reg, false); + release_atsd_reg(mmio_atsd_reg); } static void pnv_npu2_mn_release(struct mmu_notifier *mn, @@ -621,11 +672,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct npu_context *npu_context = mn_to_npu_context(mn); unsigned long address; - for (address = start; address < end; address += PAGE_SIZE) - mmio_invalidate(npu_context, 1, address, false); + if (end - start > ATSD_THRESHOLD) { + /* + * Just invalidate the entire PID if the address range is too + * large. + */ + mmio_invalidate(npu_context, 0, 0, true); + } else { + for (address = start; address < end; address += PAGE_SIZE) + mmio_invalidate(npu_context, 1, address, false); - /* Do the flush only on the final addess == end */ - mmio_invalidate(npu_context, 1, address, true); + /* Do the flush only on the final addess == end */ + mmio_invalidate(npu_context, 1, address, true); + } } static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { @@ -720,7 +779,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return ERR_PTR(-ENODEV); - npu_context->npdev[npu->index][nvlink_index] = npdev; + + /* + * npdev is a pci_dev pointer setup by the PCI code. We assign it to + * npdev[][] to indicate to the mmu notifiers that an invalidation + * should also be sent over this nvlink. The notifiers don't use any + * other fields in npu_context, so we just need to ensure that when they + * deference npu_context->npdev[][] it is either a valid pointer or + * NULL. + */ + WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); return npu_context; } @@ -759,7 +827,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return; - npu_context->npdev[npu->index][nvlink_index] = NULL; + WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); kref_put(&npu_context->kref, pnv_npu2_release_context); diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c index cf33769a7b72..45b3feb8aa2f 100644 --- a/arch/powerpc/platforms/powernv/opal-async.c +++ b/arch/powerpc/platforms/powernv/opal-async.c @@ -39,18 +39,18 @@ int __opal_async_get_token(void) int token; spin_lock_irqsave(&opal_async_comp_lock, flags); - token = find_first_bit(opal_async_complete_map, opal_max_async_tokens); + token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens); if (token >= opal_max_async_tokens) { token = -EBUSY; goto out; } - if (__test_and_set_bit(token, opal_async_token_map)) { + if (!__test_and_clear_bit(token, opal_async_complete_map)) { token = -EBUSY; goto out; } - __clear_bit(token, opal_async_complete_map); + __set_bit(token, opal_async_token_map); out: spin_unlock_irqrestore(&opal_async_comp_lock, flags); diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index 21f6531fae20..6914b289c86b 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -126,9 +126,11 @@ static void disable_nest_pmu_counters(void) const struct cpumask *l_cpumask; get_online_cpus(); - for_each_online_node(nid) { + for_each_node_with_cpus(nid) { l_cpumask = cpumask_of_node(nid); - cpu = cpumask_first(l_cpumask); + cpu = cpumask_first_and(l_cpumask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + continue; opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, get_hard_smp_processor_id(cpu)); } @@ -191,8 +193,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev) break; } - if (!imc_pmu_create(imc_dev, pmu_count, domain)) - pmu_count++; + if (!imc_pmu_create(imc_dev, pmu_count, domain)) { + if (domain == IMC_DOMAIN_NEST) + pmu_count++; + } } return 0; diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index 9db4398ded5d..5584247f5029 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c @@ -11,6 +11,7 @@ #define DEBUG +#include #include #include #include @@ -43,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index) return count; } +/* + * This can be called in the panic path with interrupts off, so use + * mdelay in that case. + */ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) { s64 rc = OPAL_BUSY; @@ -56,9 +61,23 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_write_nvram(__pa(buf), count, off); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + if (in_interrupt() || irqs_disabled()) + mdelay(OPAL_BUSY_DELAY_MS); + else + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); + } else if (rc == OPAL_BUSY) { + if (in_interrupt() || irqs_disabled()) + mdelay(OPAL_BUSY_DELAY_MS); + else + msleep(OPAL_BUSY_DELAY_MS); + } } + + if (rc) + return -EIO; + *index += count; return count; } diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index f8868864f373..aa2a5139462e 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + mdelay(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (rc == OPAL_BUSY) - mdelay(10); + } else if (rc == OPAL_BUSY) { + mdelay(OPAL_BUSY_DELAY_MS); + } } if (rc != OPAL_SUCCESS) return 0; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 57f9e55f4352..677b29ef4532 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3591,7 +3591,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) WARN_ON(pe->table_group.group); } - pnv_pci_ioda2_table_free_pages(tbl); iommu_tce_table_put(tbl); } diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index bbb73aa0eb8f..fd143c934768 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -36,13 +36,102 @@ #include #include #include +#include +#include #include "powernv.h" + +static bool fw_feature_is(const char *state, const char *name, + struct device_node *fw_features) +{ + struct device_node *np; + bool rc = false; + + np = of_get_child_by_name(fw_features, name); + if (np) { + rc = of_property_read_bool(np, state); + of_node_put(np); + } + + return rc; +} + +static void init_fw_feat_flags(struct device_node *np) +{ + if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); + + if (fw_feature_is("enabled", "fw-bcctrl-serialized", np)) + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); + + if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np)) + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); + + if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np)) + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); + + if (fw_feature_is("enabled", "fw-l1d-thread-split", np)) + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); + + if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. + */ + if (fw_feature_is("disabled", "speculation-policy-favor-security", np)) + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + + if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + + if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + + if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); +} + +static void pnv_setup_rfi_flush(void) +{ + struct device_node *np, *fw_features; + enum l1d_flush_type type; + bool enable; + + /* Default to fallback in case fw-features are not available */ + type = L1D_FLUSH_FALLBACK; + + np = of_find_node_by_name(NULL, "ibm,opal"); + fw_features = of_get_child_by_name(np, "fw-features"); + of_node_put(np); + + if (fw_features) { + init_fw_feat_flags(fw_features); + of_node_put(fw_features); + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) + type = L1D_FLUSH_MTTRIG; + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) + type = L1D_FLUSH_ORI; + } + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); +} + static void __init pnv_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); + pnv_setup_rfi_flush(); + setup_stf_barrier(); + /* Initialize SMP */ pnv_smp_init(); @@ -319,7 +408,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu) { unsigned long ret_freq; - ret_freq = cpufreq_quick_get(cpu) * 1000ul; + ret_freq = cpufreq_get(cpu) * 1000ul; /* * If the backend cpufreq driver does not exist, diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 9dabea6e1443..6244bc849469 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void) ps3_sys_manager_halt(); /* never returns */ } +static void ps3_panic(char *str) +{ + DBG("%s:%d %s\n", __func__, __LINE__, str); + + smp_send_stop(); + printk("\n"); + printk(" System does not reboot automatically.\n"); + printk(" Please press POWER button.\n"); + printk("\n"); + + while(1) + lv1_pause(1); +} + #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) static void __init prealloc(struct ps3_prealloc *p) @@ -255,6 +269,7 @@ define_machine(ps3) { .probe = ps3_probe, .setup_arch = ps3_setup_arch, .init_IRQ = ps3_init_IRQ, + .panic = ps3_panic, .get_boot_time = ps3_get_boot_time, .set_dabr = ps3_set_dabr, .calibrate_decr = ps3_calibrate_decr, diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 4ac419c7eb4c..560aefde06c0 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -742,7 +742,7 @@ static void cmm_exit(void) * Return value: * 0 on success / other on failure **/ -static int cmm_set_disable(const char *val, struct kernel_param *kp) +static int cmm_set_disable(const char *val, const struct kernel_param *kp) { int disable = simple_strtoul(val, NULL, 10); diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index e45b5f10645a..e9149d05d30b 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -586,11 +586,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, static CLASS_ATTR_RW(dlpar); -static int __init pseries_dlpar_init(void) +int __init dlpar_workqueue_init(void) { + if (pseries_hp_wq) + return 0; + pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", - WQ_UNBOUND, 1); + WQ_UNBOUND, 1); + + return pseries_hp_wq ? 0 : -ENOMEM; +} + +static int __init dlpar_sysfs_init(void) +{ + int rc; + + rc = dlpar_workqueue_init(); + if (rc) + return rc; + return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); } -machine_device_initcall(pseries, pseries_dlpar_init); +machine_device_initcall(pseries, dlpar_sysfs_init); diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fadb95efbb9e..b1ac8ac38434 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "pseries.h" #include "offline_states.h" @@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np) BUG_ON(cpu_online(cpu)); set_cpu_present(cpu, false); set_hard_smp_processor_id(cpu, -1); + update_numa_cpu_lookup_table(cpu, -1); break; } if (cpu >= nr_cpu_ids) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 495ba4e7336d..55e97565ed2d 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -726,15 +726,18 @@ static int pseries_lpar_resize_hpt(unsigned long shift) return 0; } -/* Actually only used for radix, so far */ static int pseries_lpar_register_process_table(unsigned long base, unsigned long page_size, unsigned long table_size) { long rc; - unsigned long flags = PROC_TABLE_NEW; + unsigned long flags = 0; + if (table_size) + flags |= PROC_TABLE_NEW; if (radix_enabled()) flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE; + else + flags |= PROC_TABLE_HPT_SLB; for (;;) { rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base, page_size, table_size); @@ -760,6 +763,7 @@ void __init hpte_init_pseries(void) mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; + register_process_table = pseries_lpar_register_process_table; if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index f7042ad492ba..fbea7db043fa 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -348,6 +348,9 @@ void post_mobility_fixup(void) printk(KERN_ERR "Post-mobility device tree update " "failed: %d\n", rc); + /* Possibly switch to a new RFI flush type */ + pseries_setup_rfi_flush(); + return; } diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 4470a3194311..27cdcb69fd18 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -98,4 +98,8 @@ static inline unsigned long cmo_get_page_size(void) return CMO_PageSize; } +int dlpar_workqueue_init(void); + +void pseries_setup_rfi_flush(void); + #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 4923ffe230cf..5e1ef9150182 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id); +/* + * Enable the hotplug interrupt late because processing them may touch other + * devices or systems (e.g. hugepages) that have not been initialized at the + * subsys stage. + */ +int __init init_ras_hotplug_IRQ(void) +{ + struct device_node *np; + + /* Hotplug Events */ + np = of_find_node_by_path("/event-sources/hot-plug-events"); + if (np != NULL) { + if (dlpar_workqueue_init() == 0) + request_event_sources_irqs(np, ras_hotplug_interrupt, + "RAS_HOTPLUG"); + of_node_put(np); + } + + return 0; +} +machine_late_initcall(pseries, init_ras_hotplug_IRQ); + /* * Initialize handlers for the set of interrupts caused by hardware errors * and power system events. @@ -66,14 +88,6 @@ static int __init init_ras_IRQ(void) of_node_put(np); } - /* Hotplug Events */ - np = of_find_node_by_path("/event-sources/hot-plug-events"); - if (np != NULL) { - request_event_sources_irqs(np, ras_hotplug_interrupt, - "RAS_HOTPLUG"); - of_node_put(np); - } - /* EPOW Events */ np = of_find_node_by_path("/event-sources/epow-events"); if (np != NULL) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5f1beb8367ac..45f814041448 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -68,6 +68,7 @@ #include #include #include +#include #include "pseries.h" @@ -459,6 +460,82 @@ static void __init find_and_init_phbs(void) of_pci_check_probe_only(); } +static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) +{ + /* + * The features below are disabled by default, so we instead look to see + * if firmware has *enabled* them, and set them if so. + */ + if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31) + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); + + if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED) + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); + + if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30) + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); + + if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2) + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); + + if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV) + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); + + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. + */ + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + + if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); +} + +void pseries_setup_rfi_flush(void) +{ + struct h_cpu_char_result result; + enum l1d_flush_type types; + bool enable; + long rc; + + /* + * Set features to the defaults assumed by init_cpu_char_feature_flags() + * so it can set/clear again any features that might have changed after + * migration, and in case the hypercall fails and it is not even called. + */ + powerpc_security_features = SEC_FTR_DEFAULT; + + rc = plpar_get_cpu_characteristics(&result); + if (rc == H_SUCCESS) + init_cpu_char_feature_flags(&result); + + /* + * We're the guest so this doesn't apply to us, clear it to simplify + * handling of it elsewhere. + */ + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + + types = L1D_FLUSH_FALLBACK; + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) + types |= L1D_FLUSH_MTTRIG; + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) + types |= L1D_FLUSH_ORI; + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); + + setup_rfi_flush(types, enable); +} + static void __init pSeries_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); @@ -476,6 +553,9 @@ static void __init pSeries_setup_arch(void) fwnmi_init(); + pseries_setup_rfi_flush(); + setup_stf_barrier(); + /* By default, only probe PCI (can be overridden by rtas_pci) */ pci_add_flags(PCI_PROBE_ONLY); @@ -726,6 +806,7 @@ define_machine(pseries) { .pcibios_fixup = pSeries_final_fixup, .restart = rtas_restart, .halt = rtas_halt, + .panic = rtas_os_term, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 12277bc9fd9e..d86938260a86 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1592,6 +1592,8 @@ ATTRIBUTE_GROUPS(vio_dev); void vio_unregister_device(struct vio_dev *viodev) { device_unregister(&viodev->dev); + if (viodev->family == VDEVICE) + irq_dispose_mapping(viodev->irq); } EXPORT_SYMBOL(vio_unregister_device); diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 16f1edd78c40..535cf1f6941c 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -846,12 +846,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) u32 ipic_get_mcp_status(void) { - return ipic_read(primary_ipic->regs, IPIC_SERMR); + return ipic_read(primary_ipic->regs, IPIC_SERSR); } void ipic_clear_mcp_status(u32 mask) { - ipic_write(primary_ipic->regs, IPIC_SERMR, mask); + ipic_write(primary_ipic->regs, IPIC_SERSR, mask); } /* Return an interrupt vector or 0 if no interrupt is pending. */ diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index ead3e2549ebf..205dec18d6b5 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask) int i; u32 mask = 0; - for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) + for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1) mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index ebc244b08d67..0f89ee557b04 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -388,6 +388,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) if (xive_pool_vps == XIVE_INVALID_VP) return; + /* Check if pool VP already active, if it is, pull it */ + if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) + in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); + /* Enable the pool VP */ vp = xive_pool_vps + cpu; pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index d9c4c9366049..091f1d0d0af1 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); if (rc) { - pr_err("Error %lld getting queue info prio %d\n", rc, prio); + pr_err("Error %lld getting queue info CPU %d prio %d\n", rc, + target, prio); rc = -EIO; goto fail; } @@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, /* Configure and enable the queue in HW */ rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); if (rc) { - pr_err("Error %lld setting queue for prio %d\n", rc, prio); + pr_err("Error %lld setting queue for CPU %d prio %d\n", rc, + target, prio); rc = -EIO; } else { q->qpage = qpage; @@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, if (IS_ERR(qpage)) return PTR_ERR(qpage); - return xive_spapr_configure_queue(cpu, q, prio, qpage, - xive_queue_shift); + return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu), + q, prio, qpage, xive_queue_shift); } static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, @@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, struct xive_q *q = &xc->queue[prio]; unsigned int alloc_order; long rc; + int hw_cpu = get_hard_smp_processor_id(cpu); - rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); + rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0); if (rc) - pr_err("Error %ld setting queue for prio %d\n", rc, prio); + pr_err("Error %ld setting queue for CPU %d prio %d\n", rc, + hw_cpu, prio); alloc_order = xive_alloc_order(xive_queue_shift); free_pages((unsigned long)q->qpage, alloc_order); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 33351c6704b1..a5938fadd031 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -530,14 +530,19 @@ static int xmon_core(struct pt_regs *regs, int fromipi) waiting: secondary = 1; + spin_begin(); while (secondary && !xmon_gate) { if (in_xmon == 0) { - if (fromipi) + if (fromipi) { + spin_end(); goto leave; + } secondary = test_and_set_bit(0, &in_xmon); } - barrier(); + spin_cpu_relax(); + touch_nmi_watchdog(); } + spin_end(); if (!secondary && !xmon_gate) { /* we are the first cpu to come in */ @@ -568,21 +573,25 @@ static int xmon_core(struct pt_regs *regs, int fromipi) mb(); xmon_gate = 1; barrier(); + touch_nmi_watchdog(); } cmdloop: while (in_xmon) { if (secondary) { + spin_begin(); if (cpu == xmon_owner) { if (!test_and_set_bit(0, &xmon_taken)) { secondary = 0; + spin_end(); continue; } /* missed it */ while (cpu == xmon_owner) - barrier(); + spin_cpu_relax(); } - barrier(); + spin_cpu_relax(); + touch_nmi_watchdog(); } else { cmd = cmds(regs); if (cmd != 0) { @@ -2339,6 +2348,8 @@ static void dump_one_paca(int cpu) DUMP(p, slb_cache_ptr, "x"); for (i = 0; i < SLB_CACHE_ENTRIES; i++) printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); + + DUMP(p, rfi_flush_fallback_area, "px"); #endif DUMP(p, dscr_default, "llx"); #ifdef CONFIG_PPC_BOOK3E @@ -2475,6 +2486,11 @@ static void dump_xives(void) unsigned long num; int c; + if (!xive_enabled()) { + printf("Xive disabled on this system\n"); + return; + } + c = inchar(); if (c == 'a') { dump_all_xives(); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ae55e715cc74..49fb6614ea8c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -121,6 +121,7 @@ config S390 select GENERIC_CLOCKEVENTS select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_DEVICES if !SMP + select GENERIC_CPU_VULNERABILITIES select GENERIC_FIND_FIRST_BIT select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL @@ -538,6 +539,51 @@ config ARCH_RANDOM If unsure, say Y. +config KERNEL_NOBP + def_bool n + prompt "Enable modified branch prediction for the kernel by default" + help + If this option is selected the kernel will switch to a modified + branch prediction mode if the firmware interface is available. + The modified branch prediction mode improves the behaviour in + regard to speculative execution. + + With the option enabled the kernel parameter "nobp=0" or "nospec" + can be used to run the kernel in the normal branch prediction mode. + + With the option disabled the modified branch prediction mode is + enabled with the "nobp=1" kernel parameter. + + If unsure, say N. + +config EXPOLINE + def_bool n + prompt "Avoid speculative indirect branches in the kernel" + help + Compile the kernel with the expoline compiler options to guard + against kernel-to-user data leaks by avoiding speculative indirect + branches. + Requires a compiler with -mindirect-branch=thunk support for full + protection. The kernel may run slower. + + If unsure, say N. + +choice + prompt "Expoline default" + depends on EXPOLINE + default EXPOLINE_FULL + +config EXPOLINE_OFF + bool "spectre_v2=off" + +config EXPOLINE_AUTO + bool "spectre_v2=auto" + +config EXPOLINE_FULL + bool "spectre_v2=on" + +endchoice + endmenu menu "Memory setup" @@ -812,6 +858,7 @@ config PFAULT config SHARED_KERNEL bool "VM shared kernel support" depends on !JUMP_LABEL + depends on !ALTERNATIVES help Select this option, if you want to share the text segment of the Linux kernel between different VM guests. This reduces memory diff --git a/arch/s390/Makefile b/arch/s390/Makefile index dac821cfcd43..ec3fa105f448 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -81,6 +81,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack endif +ifdef CONFIG_EXPOLINE + ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y) + CC_FLAGS_EXPOLINE := -mindirect-branch=thunk + CC_FLAGS_EXPOLINE += -mfunction-return=thunk + CC_FLAGS_EXPOLINE += -mindirect-branch-table + export CC_FLAGS_EXPOLINE + cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE + endif +endif + ifdef CONFIG_FUNCTION_TRACER # make use of hotpatch feature if the compiler supports it cc_hotpatch := -mhotpatch=0,3 diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 992e630c227b..6f4985f357c6 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c @@ -238,6 +238,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32", .cra_driver_name = "crc32-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, @@ -258,6 +259,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32be", .cra_driver_name = "crc32be-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, @@ -278,6 +280,7 @@ static struct shash_alg crc32_vx_algs[] = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vx", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CRC32_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crc_ctx), .cra_module = THIS_MODULE, diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S index e8077f0971f8..2bf01ba44107 100644 --- a/arch/s390/crypto/crc32be-vx.S +++ b/arch/s390/crypto/crc32be-vx.S @@ -13,6 +13,7 @@ */ #include +#include #include /* Vector register range containing CRC-32 constants */ @@ -67,6 +68,8 @@ .previous + GEN_BR_THUNK %r14 + .text /* * The CRC-32 function(s) use these calling conventions: @@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16) .Ldone: VLGVF %r2,%v2,3 - br %r14 + BR_EX %r14 .previous diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S index d8c67a58c0c5..7d6f568bd3ad 100644 --- a/arch/s390/crypto/crc32le-vx.S +++ b/arch/s390/crypto/crc32le-vx.S @@ -14,6 +14,7 @@ */ #include +#include #include /* Vector register range containing CRC-32 constants */ @@ -76,6 +77,7 @@ .previous + GEN_BR_THUNK %r14 .text @@ -264,6 +266,6 @@ crc32_le_vgfm_generic: .Ldone: VLGVF %r2,%v2,2 - br %r14 + BR_EX %r14 .previous diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index cf8a2d92467f..45eb5999110b 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb) if (sb->s_root) hypfs_delete_tree(sb->s_root); - if (sb_info->update_file) + if (sb_info && sb_info->update_file) hypfs_remove(sb_info->update_file); kfree(sb->s_fs_info); sb->s_fs_info = NULL; diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h new file mode 100644 index 000000000000..955d620db23e --- /dev/null +++ b/arch/s390/include/asm/alternative-asm.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ALTERNATIVE_ASM_H +#define _ASM_S390_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +/* + * Check the length of an instruction sequence. The length may not be larger + * than 254 bytes and it has to be divisible by 2. + */ +.macro alt_len_check start,end + .if ( \end - \start ) > 254 + .error "cpu alternatives does not support instructions blocks > 254 bytes\n" + .endif + .if ( \end - \start ) % 2 + .error "cpu alternatives instructions length is odd\n" + .endif +.endm + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature + .long \orig_start - . + .long \alt_start - . + .word \feature + .byte \orig_end - \orig_start + .byte \alt_end - \alt_start +.endm + +/* + * Fill up @bytes with nops. The macro emits 6-byte nop instructions + * for the bulk of the area, possibly followed by a 4-byte and/or + * a 2-byte nop if the size of the area is not divisible by 6. + */ +.macro alt_pad_fill bytes + .fill ( \bytes ) / 6, 6, 0xc0040000 + .fill ( \bytes ) % 6 / 4, 4, 0x47000000 + .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700 +.endm + +/* + * Fill up @bytes with nops. If the number of bytes is larger + * than 6, emit a jg instruction to branch over all nops, then + * fill an area of size (@bytes - 6) with nop instructions. + */ +.macro alt_pad bytes + .if ( \bytes > 0 ) + .if ( \bytes > 6 ) + jg . + \bytes + alt_pad_fill \bytes - 6 + .else + alt_pad_fill \bytes + .endif + .endif +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".skip" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature + .pushsection .altinstr_replacement,"ax" +770: \newinstr +771: .popsection +772: \oldinstr +773: alt_len_check 770b, 771b + alt_len_check 772b, 773b + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) +774: .pushsection .altinstructions,"a" + alt_entry 772b, 774b, 770b, 771b, \feature + .popsection +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".skip" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + .pushsection .altinstr_replacement,"ax" +770: \newinstr1 +771: \newinstr2 +772: .popsection +773: \oldinstr +774: alt_len_check 770b, 771b + alt_len_check 771b, 772b + alt_len_check 773b, 774b + .if ( 771b - 770b > 772b - 771b ) + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) + .else + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) + .endif +775: .pushsection .altinstructions,"a" + alt_entry 773b, 775b, 770b, 771b,\feature1 + alt_entry 773b, 775b, 771b, 772b,\feature2 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_ALTERNATIVE_ASM_H */ diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h new file mode 100644 index 000000000000..a72002056b54 --- /dev/null +++ b/arch/s390/include/asm/alternative.h @@ -0,0 +1,149 @@ +#ifndef _ASM_S390_ALTERNATIVE_H +#define _ASM_S390_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +struct alt_instr { + s32 instr_offset; /* original instruction */ + s32 repl_offset; /* offset to replacement instruction */ + u16 facility; /* facility bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction */ +} __packed; + +void apply_alternative_instructions(void); +void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +/* + * |661: |662: |6620 |663: + * +-----------+---------------------+ + * | oldinstr | oldinstr_padding | + * | +----------+----------+ + * | | | | + * | | >6 bytes |6/4/2 nops| + * | |6 bytes jg-----------> + * +-----------+---------------------+ + * ^^ static padding ^^ + * + * .altinstr_replacement section + * +---------------------+-----------+ + * |6641: |6651: + * | alternative instr 1 | + * +-----------+---------+- - - - - -+ + * |6642: |6652: | + * | alternative instr 2 | padding + * +---------------------+- - - - - -+ + * ^ runtime ^ + * + * .altinstructions section + * +---------------------------------+ + * | alt_instr entries for each | + * | alternative instr | + * +---------------------------------+ + */ + +#define b_altinstr(num) "664"#num +#define e_altinstr(num) "665"#num + +#define e_oldinstr_pad_end "663" +#define oldinstr_len "662b-661b" +#define oldinstr_total_len e_oldinstr_pad_end"b-661b" +#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" +#define oldinstr_pad_len(num) \ + "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \ + "((" altinstr_len(num) ")-(" oldinstr_len "))" + +#define INSTR_LEN_SANITY_CHECK(len) \ + ".if " len " > 254\n" \ + "\t.error \"cpu alternatives does not support instructions " \ + "blocks > 254 bytes\"\n" \ + ".endif\n" \ + ".if (" len ") %% 2\n" \ + "\t.error \"cpu alternatives instructions length is odd\"\n" \ + ".endif\n" + +#define OLDINSTR_PADDING(oldinstr, num) \ + ".if " oldinstr_pad_len(num) " > 6\n" \ + "\tjg " e_oldinstr_pad_end "f\n" \ + "6620:\n" \ + "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \ + ".else\n" \ + "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \ + "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \ + "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \ + ".endif\n" + +#define OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + OLDINSTR_PADDING(oldinstr, num) \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \ + OLDINSTR_PADDING(oldinstr, num2) \ + ".else\n" \ + OLDINSTR_PADDING(oldinstr, num1) \ + ".endif\n" \ + e_oldinstr_pad_end ":\n" \ + INSTR_LEN_SANITY_CHECK(oldinstr_len) + +#define ALTINSTR_ENTRY(facility, num) \ + "\t.long 661b - .\n" /* old instruction */ \ + "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ + "\t.word " __stringify(facility) "\n" /* facility bit */ \ + "\t.byte " oldinstr_total_len "\n" /* source len */ \ + "\t.byte " altinstr_len(num) "\n" /* alt instruction len */ + +#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \ + INSTR_LEN_SANITY_CHECK(altinstr_len(num)) + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, altinstr, facility) \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr, 1) \ + ".popsection\n" \ + OLDINSTR(oldinstr, 1) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility, 1) \ + ".popsection\n" + +#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(altinstr1, 1) \ + ALTINSTR_REPLACEMENT(altinstr2, 2) \ + ".popsection\n" \ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(facility1, 1) \ + ALTINSTR_ENTRY(facility2, 2) \ + ".popsection\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * oldinstr is padded with jump and nops at compile time if altinstr is + * longer. altinstr is padded with jump and nops at run-time during patching. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, altinstr, facility) \ + asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") + +#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ + asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ + altinstr2, facility2) ::: "memory") + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_ALTERNATIVE_H */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 10432607a573..f9eddbca79d2 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -49,6 +49,30 @@ do { \ #define __smp_mb__before_atomic() barrier() #define __smp_mb__after_atomic() barrier() +/** + * array_index_mask_nospec - generate a mask for array_idx() that is + * ~0UL when the bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + if (__builtin_constant_p(size) && size > 0) { + asm(" clgr %2,%1\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); + return mask; + } + asm(" clgr %1,%2\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size), "d" (index) :"cc"); + return ~mask; +} + #include #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 1b60eb3676d5..5e6a63641a5f 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -263,7 +263,6 @@ typedef struct compat_siginfo { #define si_overrun _sifields._timer._overrun #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 05480e4cc5ca..bc764a674594 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -116,7 +116,7 @@ struct hws_basic_entry { struct hws_diag_entry { unsigned int def:16; /* 0-15 Data Entry Format */ - unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int R:15; /* 16-19 and 20-30 reserved */ unsigned int I:1; /* 31 entry valid or invalid */ u8 data[]; /* Machine-dependent sample data */ } __packed; @@ -132,7 +132,9 @@ struct hws_trailer_entry { unsigned int f:1; /* 0 - Block Full Indicator */ unsigned int a:1; /* 1 - Alert request control */ unsigned int t:1; /* 2 - Timestamp format */ - unsigned long long:61; /* 3 - 63: Reserved */ + unsigned int :29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ }; unsigned long long flags; /* 0 - 63: All indicators */ }; diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index eb5323161f11..bb63b2afdf6f 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -4,7 +4,7 @@ #include #include -#include +#include struct arqb { u64 data; diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 9a3cb3983c01..1a61b1b997f2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -194,13 +194,14 @@ struct arch_elf_state { #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* - * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address - * space open for things that want to use the area for 32-bit pointers. - */ -#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ - 0x100000000UL) +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. 64-bit + tasks are aligned to 4GB. */ +#define ELF_ET_DYN_BASE (is_compat_task() ? \ + (STACK_TOP / 3 * 2) : \ + (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index f040644575b7..2d58478c2745 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -15,6 +15,24 @@ #define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8) +static inline void __set_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] |= 0x80 >> (nr & 7); +} + +static inline void __clear_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] &= ~(0x80 >> (nr & 7)); +} + static inline int __test_facility(unsigned long nr, void *facilities) { unsigned char *ptr; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 51375e766e90..d660e784e445 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -210,7 +210,8 @@ struct kvm_s390_sie_block { __u16 ipa; /* 0x0056 */ __u32 ipb; /* 0x0058 */ __u32 scaoh; /* 0x005c */ - __u8 reserved60; /* 0x0060 */ +#define FPF_BPBC 0x20 + __u8 fpf; /* 0x0060 */ #define ECB_GS 0x40 #define ECB_TE 0x10 #define ECB_SRSI 0x04 diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 917f7344cab6..88a212df0dbc 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -140,7 +140,9 @@ struct lowcore { /* Per cpu primary space access list */ __u32 paste[16]; /* 0x0400 */ - __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */ + /* br %r1 trampoline */ + __u16 br_r1_trampoline; /* 0x0440 */ + __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -155,7 +157,8 @@ struct lowcore { __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */ /* Extended facility list */ - __u64 stfle_fac_list[32]; /* 0x0f00 */ + __u64 stfle_fac_list[16]; /* 0x0f00 */ + __u64 alt_stfle_fac_list[16]; /* 0x0f80 */ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */ /* Pointer to the machine check extended save area */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 43607bb12cc2..a6cc744ff5fb 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste || test_thread_flag(TIF_PGSTE) || - current->mm->context.alloc_pgste; + (current->mm && current->mm->context.alloc_pgste); mm->context.has_pgste = 0; mm->context.use_skey = 0; mm->context.use_cmma = 0; diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h new file mode 100644 index 000000000000..b4bd8c41e9d3 --- /dev/null +++ b/arch/s390/include/asm/nospec-branch.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_EXPOLINE_H +#define _ASM_S390_EXPOLINE_H + +#ifndef __ASSEMBLY__ + +#include + +extern int nospec_disable; + +void nospec_init_branches(void); +void nospec_auto_detect(void); +void nospec_revert(s32 *start, s32 *end); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_EXPOLINE_H */ diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h new file mode 100644 index 000000000000..9a56e738d645 --- /dev/null +++ b/arch/s390/include/asm/nospec-insn.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_NOSPEC_ASM_H +#define _ASM_S390_NOSPEC_ASM_H + +#include +#include + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_EXPOLINE + +_LC_BR_R1 = __LC_BR_R1 + +/* + * The expoline macros are used to create thunks in the same format + * as gcc generates them. The 'comdat' section flag makes sure that + * the various thunks are merged into a single copy. + */ + .macro __THUNK_PROLOG_NAME name + .pushsection .text.\name,"axG",@progbits,\name,comdat + .globl \name + .hidden \name + .type \name,@function +\name: + .cfi_startproc + .endm + + .macro __THUNK_EPILOG + .cfi_endproc + .popsection + .endm + + .macro __THUNK_PROLOG_BR r1,r2 + __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1 + .endm + + .macro __THUNK_PROLOG_BC d0,r1,r2 + __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1 + .endm + + .macro __THUNK_BR r1,r2 + jg __s390x_indirect_jump_r\r2\()use_r\r1 + .endm + + .macro __THUNK_BC d0,r1,r2 + jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1 + .endm + + .macro __THUNK_BRASL r1,r2,r3 + brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2 + .endm + + .macro __DECODE_RR expand,reg,ruse + .set __decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \reg,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \ruse,%r\r2 + \expand \r1,\r2 + .set __decode_fail,0 + .endif + .endr + .endif + .endr + .if __decode_fail == 1 + .error "__DECODE_RR failed" + .endif + .endm + + .macro __DECODE_RRR expand,rsave,rtarget,ruse + .set __decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \rsave,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \rtarget,%r\r2 + .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \ruse,%r\r3 + \expand \r1,\r2,\r3 + .set __decode_fail,0 + .endif + .endr + .endif + .endr + .endif + .endr + .if __decode_fail == 1 + .error "__DECODE_RRR failed" + .endif + .endm + + .macro __DECODE_DRR expand,disp,reg,ruse + .set __decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \reg,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \ruse,%r\r2 + \expand \disp,\r1,\r2 + .set __decode_fail,0 + .endif + .endr + .endif + .endr + .if __decode_fail == 1 + .error "__DECODE_DRR failed" + .endif + .endm + + .macro __THUNK_EX_BR reg,ruse + # Be very careful when adding instructions to this macro! + # The ALTERNATIVE replacement code has a .+10 which targets + # the "br \reg" after the code has been patched. +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + exrl 0,555f + j . +#else + .ifc \reg,%r1 + ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 + j . + .else + larl \ruse,555f + ex 0,0(\ruse) + j . + .endif +#endif +555: br \reg + .endm + + .macro __THUNK_EX_BC disp,reg,ruse +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + exrl 0,556f + j . +#else + larl \ruse,556f + ex 0,0(\ruse) + j . +#endif +556: b \disp(\reg) + .endm + + .macro GEN_BR_THUNK reg,ruse=%r1 + __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse + __THUNK_EX_BR \reg,\ruse + __THUNK_EPILOG + .endm + + .macro GEN_B_THUNK disp,reg,ruse=%r1 + __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse + __THUNK_EX_BC \disp,\reg,\ruse + __THUNK_EPILOG + .endm + + .macro BR_EX reg,ruse=%r1 +557: __DECODE_RR __THUNK_BR,\reg,\ruse + .pushsection .s390_indirect_branches,"a",@progbits + .long 557b-. + .popsection + .endm + + .macro B_EX disp,reg,ruse=%r1 +558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse + .pushsection .s390_indirect_branches,"a",@progbits + .long 558b-. + .popsection + .endm + + .macro BASR_EX rsave,rtarget,ruse=%r1 +559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse + .pushsection .s390_indirect_branches,"a",@progbits + .long 559b-. + .popsection + .endm + +#else + .macro GEN_BR_THUNK reg,ruse=%r1 + .endm + + .macro GEN_B_THUNK disp,reg,ruse=%r1 + .endm + + .macro BR_EX reg,ruse=%r1 + br \reg + .endm + + .macro B_EX disp,reg,ruse=%r1 + b \disp(\reg) + .endm + + .macro BASR_EX rsave,rtarget,ruse=%r1 + basr \rsave,\rtarget + .endm +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_NOSPEC_ASM_H */ diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index 419e83fa4721..ba22a6ea51a1 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range); int zpci_load(u64 *data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset); int zpci_store_block(const u64 *data, u64 req, u64 offset); -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); #endif diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 9cf92abe23c3..0a39cd102c49 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -89,6 +89,7 @@ void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; extern int sysctl_ieee_emulation_warnings; extern void execve_tail(void); +extern void __bpon(void); /* * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. @@ -377,6 +378,9 @@ extern void memcpy_absolute(void *, void *, size_t); memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ } while (0) +extern int s390_isolate_bp(void); +extern int s390_isolate_bp_guest(void); + #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index ea8896ba5afc..2502d05403ef 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, load_runtime_instr_cb(&runtime_instr_empty_cb); } -void exit_thread_runtime_instr(void); +struct task_struct; + +void runtime_instr_release(struct task_struct *tsk); #endif /* _RUNTIME_INSTR_H */ diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index c21fe1d57c00..c61b2cc1a8a8 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs) asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); } -#define switch_to(prev,next,last) do { \ - if (prev->mm) { \ - save_fpu_regs(); \ - save_access_regs(&prev->thread.acrs[0]); \ - save_ri_cb(prev->thread.ri_cb); \ - save_gs_cb(prev->thread.gs_cb); \ - } \ - if (next->mm) { \ - update_cr_regs(next); \ - set_cpu_flag(CIF_FPU); \ - restore_access_regs(&next->thread.acrs[0]); \ - restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ - restore_gs_cb(next->thread.gs_cb); \ - } \ - prev = __switch_to(prev,next); \ +#define switch_to(prev, next, last) do { \ + /* save_fpu_regs() sets the CIF_FPU flag, which enforces \ + * a restore of the floating point / vector registers as \ + * soon as the next task returns to user space \ + */ \ + save_fpu_regs(); \ + save_access_regs(&prev->thread.acrs[0]); \ + save_ri_cb(prev->thread.ri_cb); \ + save_gs_cb(prev->thread.gs_cb); \ + update_cr_regs(next); \ + restore_access_regs(&next->thread.acrs[0]); \ + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ + restore_gs_cb(next->thread.gs_cb); \ + prev = __switch_to(prev, next); \ } while (0) #endif /* __ASM_SWITCH_TO_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 0880a37b6d3b..301b4f70bf31 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -60,6 +60,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ +#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ +#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -80,6 +82,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_UPROBE _BITUL(TIF_UPROBE) #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE) #define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) +#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP) +#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST) #define _TIF_31BIT _BITUL(TIF_31BIT) #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 55de4eb73604..de0a8b17bcaa 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -51,6 +51,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu); static inline void topology_init_early(void) { } static inline void topology_schedule_update(void) { } static inline int topology_cpu_init(struct cpu *cpu) { return 0; } +static inline int topology_cpu_dedicated(int cpu_nr) { return 0; } static inline void topology_expect_change(void) { } #endif /* CONFIG_SCHED_TOPOLOGY */ diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9ad172dcd912..a3938db010f7 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -228,6 +228,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_RICCB (1UL << 7) #define KVM_SYNC_FPRS (1UL << 8) #define KVM_SYNC_GSCB (1UL << 9) +#define KVM_SYNC_BPBC (1UL << 10) /* length and alignment of the sdnx as a power of two */ #define SDNXC 8 #define SDNXL (1UL << SDNXC) @@ -251,7 +252,9 @@ struct kvm_sync_regs { }; __u8 reserved[512]; /* for future vector expansion */ __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ - __u8 padding1[52]; /* riccb needs to be 64byte aligned */ + __u8 bpbc : 1; /* bp mode */ + __u8 reserved2 : 7; + __u8 padding1[51]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ union { diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 3510c0fd06f4..39d901476ee5 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -111,4 +111,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/virtio-ccw.h b/arch/s390/include/uapi/asm/virtio-ccw.h index 967aad390105..9e62587d9472 100644 --- a/arch/s390/include/uapi/asm/virtio-ccw.h +++ b/arch/s390/include/uapi/asm/virtio-ccw.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* * Definitions for virtio-ccw devices. * diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 4ce2d05929a7..e0784fff07f5 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -29,6 +29,7 @@ UBSAN_SANITIZE_early.o := n # ifneq ($(CC_FLAGS_MARCH),-march=z900) CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH) +CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE) CFLAGS_als.o += -march=z900 AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH) AFLAGS_head.o += -march=z900 @@ -57,10 +58,14 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o -obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o +obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o +obj-y += nospec-branch.o extra-y += head.o head64.o vmlinux.lds +obj-$(CONFIG_SYSFS) += nospec-sysfs.o +CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) + obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c new file mode 100644 index 000000000000..b57b293998dc --- /dev/null +++ b/arch/s390/kernel/alternative.c @@ -0,0 +1,112 @@ +#include +#include +#include +#include + +#define MAX_PATCH_LEN (255 - 1) + +static int __initdata_or_module alt_instr_disabled; + +static int __init disable_alternative_instructions(char *str) +{ + alt_instr_disabled = 1; + return 0; +} + +early_param("noaltinstr", disable_alternative_instructions); + +struct brcl_insn { + u16 opc; + s32 disp; +} __packed; + +static u16 __initdata_or_module nop16 = 0x0700; +static u32 __initdata_or_module nop32 = 0x47000000; +static struct brcl_insn __initdata_or_module nop48 = { + 0xc004, 0 +}; + +static const void *nops[] __initdata_or_module = { + &nop16, + &nop32, + &nop48 +}; + +static void __init_or_module add_jump_padding(void *insns, unsigned int len) +{ + struct brcl_insn brcl = { + 0xc0f4, + len / 2 + }; + + memcpy(insns, &brcl, sizeof(brcl)); + insns += sizeof(brcl); + len -= sizeof(brcl); + + while (len > 0) { + memcpy(insns, &nop16, 2); + insns += 2; + len -= 2; + } +} + +static void __init_or_module add_padding(void *insns, unsigned int len) +{ + if (len > 6) + add_jump_padding(insns, len); + else if (len >= 2) + memcpy(insns, nops[len / 2 - 1], len); +} + +static void __init_or_module __apply_alternatives(struct alt_instr *start, + struct alt_instr *end) +{ + struct alt_instr *a; + u8 *instr, *replacement; + u8 insnbuf[MAX_PATCH_LEN]; + + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite previously scanned alternative code. + */ + for (a = start; a < end; a++) { + int insnbuf_sz = 0; + + instr = (u8 *)&a->instr_offset + a->instr_offset; + replacement = (u8 *)&a->repl_offset + a->repl_offset; + + if (!__test_facility(a->facility, + S390_lowcore.alt_stfle_fac_list)) + continue; + + if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) { + WARN_ONCE(1, "cpu alternatives instructions length is " + "odd, skipping patching\n"); + continue; + } + + memcpy(insnbuf, replacement, a->replacementlen); + insnbuf_sz = a->replacementlen; + + if (a->instrlen > a->replacementlen) { + add_padding(insnbuf + a->replacementlen, + a->instrlen - a->replacementlen); + insnbuf_sz += a->instrlen - a->replacementlen; + } + + s390_kernel_write(instr, insnbuf, insnbuf_sz); + } +} + +void __init_or_module apply_alternatives(struct alt_instr *start, + struct alt_instr *end) +{ + if (!alt_instr_disabled) + __apply_alternatives(start, end); +} + +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; +void __init apply_alternative_instructions(void) +{ + apply_alternatives(__alt_instructions, __alt_instructions_end); +} diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 0e6d2b032484..4e69bf909e87 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -177,6 +177,7 @@ int main(void) OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); OFFSET(__LC_GMAP, lowcore, gmap); OFFSET(__LC_PASTE, lowcore, paste); + OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ OFFSET(__LC_DUMP_REIPL, lowcore, ipib); /* hardware defined lowcore locations 0x1000 - 0x18ff */ diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f6c56009e822..b65874b0b412 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -9,18 +9,22 @@ #include #include +#include #include #include + GEN_BR_THUNK %r9 + GEN_BR_THUNK %r14 + ENTRY(s390_base_mcck_handler) basr %r13,0 0: lg %r15,__LC_PANIC_STACK # load panic stack aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_mcck_handler_fn - lg %r1,0(%r1) - ltgr %r1,%r1 + lg %r9,0(%r1) + ltgr %r9,%r9 jz 1f - basr %r14,%r1 + BASR_EX %r14,%r9 1: la %r1,4095 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) lpswe __LC_MCK_OLD_PSW @@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler) basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_ext_handler_fn - lg %r1,0(%r1) - ltgr %r1,%r1 + lg %r9,0(%r1) + ltgr %r9,%r9 jz 1f - basr %r14,%r1 + BASR_EX %r14,%r9 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit lpswe __LC_EXT_OLD_PSW @@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler) basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_pgm_handler_fn - lg %r1,0(%r1) - ltgr %r1,%r1 + lg %r9,0(%r1) + ltgr %r9,%r9 jz 1f - basr %r14,%r1 + BASR_EX %r14,%r9 lmg %r0,%r15,__LC_SAVE_AREA_SYNC lpswe __LC_PGM_OLD_PSW 1: lpswe disabled_wait_psw-0b(%r13) @@ -117,7 +121,7 @@ ENTRY(diag308_reset) larl %r4,.Lcontinue_psw # Restore PSW flags lpswe 0(%r4) .Lcontinue: - br %r14 + BR_EX %r14 .align 16 .Lrestart_psw: .long 0x00080000,0x80000000 + .Lrestart_part2 diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index f04db3779b34..79b7a3438d54 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) { - return sys_setgid((gid_t)gid); + return sys_setgid(low2highgid(gid)); } COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) @@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) { - return sys_setuid((uid_t)uid); + return sys_setuid(low2highuid(uid)); } COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) @@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) { - return sys_setfsuid((uid_t)uid); + return sys_setfsuid(low2highuid(uid)); } COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) { - return sys_setfsgid((gid_t)gid); + return sys_setfsgid(low2highgid(gid)); } static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) @@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis return retval; } + groups_sort(group_info); retval = set_current_groups(group_info); put_group_info(group_info); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f7e82302a71e..2394557653d5 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = { { "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vftci", 0x4a, INSTR_VRI_VVIMM }, + { "", 0, INSTR_INVALID } }; static struct s390_insn opcode_eb[] = { @@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs) { char *mode = user_mode(regs) ? "User" : "Krnl"; unsigned char code[64]; - char buffer[64], *ptr; + char buffer[128], *ptr; mm_segment_t old_fs; unsigned long addr; int start, end, opsize, hops, i; @@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs) start += opsize; pr_cont("%s", buffer); ptr = buffer; - ptr += sprintf(ptr, "\n "); + ptr += sprintf(ptr, "\n\t "); hops++; } pr_cont("\n"); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b945448b9eae..a3219837fa70 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -329,6 +329,11 @@ static noinline __init void setup_facility_list(void) { stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list)); + memcpy(S390_lowcore.alt_stfle_fac_list, + S390_lowcore.stfle_fac_list, + sizeof(S390_lowcore.alt_stfle_fac_list)); + if (!IS_ENABLED(CONFIG_KERNEL_NOBP)) + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); } static __init void detect_diag9c(void) @@ -375,8 +380,10 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; - if (test_facility(50) && test_facility(73)) + if (test_facility(50) && test_facility(73)) { S390_lowcore.machine_flags |= MACHINE_FLAG_TE; + __ctl_set_bit(0, 55); + } if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) { diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 7c6904d616d8..e928c2af6a10 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -25,6 +25,7 @@ #include #include #include +#include __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -106,6 +107,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 3f 1: UPDATE_VTIME %r14,%r15,\timer + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 2: lg %r15,__LC_ASYNC_STACK # load async stack 3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm @@ -158,6 +160,72 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) tm off+\addr, \mask .endm + .macro BPOFF + .pushsection .altinstr_replacement, "ax" +660: .long 0xb2e8c000 + .popsection +661: .long 0x47000000 + .pushsection .altinstructions, "a" + .long 661b - . + .long 660b - . + .word 82 + .byte 4 + .byte 4 + .popsection + .endm + + .macro BPON + .pushsection .altinstr_replacement, "ax" +662: .long 0xb2e8d000 + .popsection +663: .long 0x47000000 + .pushsection .altinstructions, "a" + .long 663b - . + .long 662b - . + .word 82 + .byte 4 + .byte 4 + .popsection + .endm + + .macro BPENTER tif_ptr,tif_mask + .pushsection .altinstr_replacement, "ax" +662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop + .word 0xc004, 0x0000, 0x0000 # 6 byte nop + .popsection +664: TSTMSK \tif_ptr,\tif_mask + jz . + 8 + .long 0xb2e8d000 + .pushsection .altinstructions, "a" + .long 664b - . + .long 662b - . + .word 82 + .byte 12 + .byte 12 + .popsection + .endm + + .macro BPEXIT tif_ptr,tif_mask + TSTMSK \tif_ptr,\tif_mask + .pushsection .altinstr_replacement, "ax" +662: jnz . + 8 + .long 0xb2e8d000 + .popsection +664: jz . + 8 + .long 0xb2e8c000 + .pushsection .altinstructions, "a" + .long 664b - . + .long 662b - . + .word 82 + .byte 8 + .byte 8 + .popsection + .endm + + GEN_BR_THUNK %r9 + GEN_BR_THUNK %r14 + GEN_BR_THUNK %r14,%r11 + .section .kprobes.text, "ax" .Ldummy: /* @@ -170,6 +238,11 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) */ nop 0 +ENTRY(__bpon) + .globl __bpon + BPON + BR_EX %r14 + /* * Scheduler resume function, called by switch_to * gpr2 = (task_struct *) prev @@ -193,9 +266,9 @@ ENTRY(__switch_to) mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP - bzr %r14 + jz 0f .insn s,0xb2800000,__LC_LPP # set program parameter - br %r14 +0: BR_EX %r14 .L__critical_start: @@ -207,9 +280,11 @@ ENTRY(__switch_to) */ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + lg %r12,__LC_CURRENT stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 + mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? jno .Lsie_load_guest_gprs brasl %r14,load_fpu_regs # load guest fp/vx regs @@ -226,8 +301,12 @@ ENTRY(sie64a) jnz .Lsie_skip TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed + BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_entry: sie 0(%r14) +.Lsie_exit: + BPOFF + BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_skip: ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce @@ -248,9 +327,15 @@ ENTRY(sie64a) sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + xgr %r0,%r0 # clear guest registers to + xgr %r1,%r1 # prevent speculative use + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_EMPTY+16(%r15) # return exit reason code - br %r14 + BR_EX %r14 .Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_EMPTY+16(%r15) # set exit reason code @@ -273,6 +358,7 @@ ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER .Lsysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC + BPOFF lg %r12,__LC_CURRENT lghi %r13,__TASK_thread lghi %r14,_PIF_SYSCALL @@ -281,12 +367,15 @@ ENTRY(system_call) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs .Lsysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC stg %r14,__PT_FLAGS(%r11) .Lsysc_do_svc: + # clear user controlled register to prevent speculative use + xgr %r0,%r0 # load address of system call table lg %r10,__THREAD_sysc_table(%r13,%r12) llgh %r8,__PT_INT_CODE+2(%r11) @@ -305,7 +394,7 @@ ENTRY(system_call) lgf %r9,0(%r8,%r10) # get system call add. TSTMSK __TI_flags(%r12),_TIF_TRACE jnz .Lsysc_tracesys - basr %r14,%r9 # call sys_xxxx + BASR_EX %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: @@ -317,6 +406,7 @@ ENTRY(system_call) jnz .Lsysc_work # check for work TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lsysc_work + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lsysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) @@ -481,7 +571,7 @@ ENTRY(system_call) lmg %r3,%r7,__PT_R3(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r2,__PT_ORIG_GPR2(%r11) - basr %r14,%r9 # call sys_xxx + BASR_EX %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: TSTMSK __TI_flags(%r12),_TIF_TRACE @@ -505,7 +595,7 @@ ENTRY(ret_from_fork) lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) - basr %r14,%r9 + BASR_EX %r14,%r9 j .Lsysc_tracenogo /* @@ -514,6 +604,7 @@ ENTRY(kernel_thread_starter) ENTRY(pgm_check_handler) stpt __LC_SYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_CURRENT @@ -540,6 +631,7 @@ ENTRY(pgm_check_handler) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 4f 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 aghi %r14,__TASK_thread # pointer to thread_struct @@ -550,6 +642,15 @@ ENTRY(pgm_check_handler) 3: stg %r10,__THREAD_last_break(%r14) 4: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC @@ -571,9 +672,9 @@ ENTRY(pgm_check_handler) nill %r10,0x007f sll %r10,2 je .Lpgm_return - lgf %r1,0(%r10,%r1) # load address of handler routine + lgf %r9,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # branch to interrupt-handler + BASR_EX %r14,%r9 # branch to interrupt-handler .Lpgm_return: LOCKDEP_SYS_EXIT tm __PT_PSW+1(%r11),0x01 # returning to user ? @@ -609,12 +710,23 @@ ENTRY(pgm_check_handler) ENTRY(io_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID @@ -649,9 +761,13 @@ ENTRY(io_int_handler) lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno .Lio_exit_kernel + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lio_exit_timer: stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER +.Lio_exit_kernel: lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW .Lio_done: @@ -814,12 +930,23 @@ ENTRY(io_int_handler) ENTRY(ext_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) lghi %r1,__LC_EXT_PARAMS2 @@ -852,11 +979,12 @@ ENTRY(psw_idle) .Lpsw_idle_stcctm: #endif oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT + BPON STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: lpswe __SF_EMPTY(%r15) - br %r14 + BR_EX %r14 .Lpsw_idle_end: /* @@ -870,7 +998,7 @@ ENTRY(save_fpu_regs) lg %r2,__LC_CURRENT aghi %r2,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU - bor %r14 + jo .Lsave_fpu_regs_exit stfpc __THREAD_FPU_fpc(%r2) lg %r3,__THREAD_FPU_regs(%r2) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX @@ -897,7 +1025,8 @@ ENTRY(save_fpu_regs) std 15,120(%r3) .Lsave_fpu_regs_done: oi __LC_CPU_FLAGS+7,_CIF_FPU - br %r14 +.Lsave_fpu_regs_exit: + BR_EX %r14 .Lsave_fpu_regs_end: EXPORT_SYMBOL(save_fpu_regs) @@ -915,7 +1044,7 @@ load_fpu_regs: lg %r4,__LC_CURRENT aghi %r4,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU - bnor %r14 + jno .Lload_fpu_regs_exit lfpc __THREAD_FPU_fpc(%r4) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area @@ -942,7 +1071,8 @@ load_fpu_regs: ld 15,120(%r4) .Lload_fpu_regs_done: ni __LC_CPU_FLAGS+7,255-_CIF_FPU - br %r14 +.Lload_fpu_regs_exit: + BR_EX %r14 .Lload_fpu_regs_end: .L__critical_end: @@ -952,6 +1082,7 @@ load_fpu_regs: */ ENTRY(mcck_int_handler) STCK __LC_MCCK_CLOCK + BPOFF la %r1,4095 # revalidate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs @@ -982,6 +1113,16 @@ ENTRY(mcck_int_handler) .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) @@ -1007,6 +1148,7 @@ ENTRY(mcck_int_handler) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 0: lmg %r11,%r15,__PT_R11(%r11) @@ -1102,7 +1244,7 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs -0: br %r14 +0: BR_EX %r14,%r11 .align 8 .Lcleanup_table: @@ -1133,11 +1275,12 @@ cleanup_critical: clg %r9,BASED(.Lsie_crit_mcck_length) jh 1f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST -1: lg %r9,__SF_EMPTY(%r15) # get control block pointer +1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) + lg %r9,__SF_EMPTY(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - br %r14 + BR_EX %r14,%r11 #endif .Lcleanup_system_call: @@ -1175,6 +1318,7 @@ cleanup_critical: stg %r15,__LC_SYSTEM_TIMER 0: # update accounting time stamp mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP # set up saved register r11 lg %r15,__LC_KERNEL_STACK la %r9,STACK_FRAME_OVERHEAD(%r15) @@ -1190,7 +1334,7 @@ cleanup_critical: stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,.Lsysc_do_svc - br %r14 + BR_EX %r14,%r11 .Lcleanup_system_call_insn: .quad system_call .quad .Lsysc_stmg @@ -1202,7 +1346,7 @@ cleanup_critical: .Lcleanup_sysc_tif: larl %r9,.Lsysc_tif - br %r14 + BR_EX %r14,%r11 .Lcleanup_sysc_restore: # check if stpt has been executed @@ -1219,14 +1363,14 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 + BR_EX %r14,%r11 .Lcleanup_sysc_restore_insn: .quad .Lsysc_exit_timer .quad .Lsysc_done - 4 .Lcleanup_io_tif: larl %r9,.Lio_tif - br %r14 + BR_EX %r14,%r11 .Lcleanup_io_restore: # check if stpt has been executed @@ -1240,7 +1384,7 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 + BR_EX %r14,%r11 .Lcleanup_io_restore_insn: .quad .Lio_exit_timer .quad .Lio_done - 4 @@ -1293,17 +1437,17 @@ cleanup_critical: # prepare return psw nihh %r8,0xfcfd # clear irq & wait state bits lg %r9,48(%r11) # return from psw_idle - br %r14 + BR_EX %r14,%r11 .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw .Lcleanup_save_fpu_regs: larl %r9,save_fpu_regs - br %r14 + BR_EX %r14,%r11 .Lcleanup_load_fpu_regs: larl %r9,load_fpu_regs - br %r14 + BR_EX %r14,%r11 /* * Integer constants @@ -1323,7 +1467,6 @@ cleanup_critical: .Lsie_crit_mcck_length: .quad .Lsie_skip - .Lsie_entry #endif - .section .rodata, "a" #define SYSCALL(esame,emu) .long esame .globl sys_call_table diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c index bff39b66c9ff..9ee794e14f33 100644 --- a/arch/s390/kernel/guarded_storage.c +++ b/arch/s390/kernel/guarded_storage.c @@ -14,9 +14,11 @@ void exit_thread_gs(void) { + preempt_disable(); kfree(current->thread.gs_cb); kfree(current->thread.gs_bc_cb); current->thread.gs_cb = current->thread.gs_bc_cb = NULL; + preempt_enable(); } static int gs_enable(void) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 8e622bb52f7a..b565e784bae8 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -564,6 +564,7 @@ static struct kset *ipl_kset; static void __ipl_run(void *unused) { + __bpon(); diag308(DIAG308_LOAD_CLEAR, NULL); if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); @@ -799,6 +800,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, /* copy and convert to ebcdic */ memcpy(ipb->hdr.loadparm, buf, lp_len); ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); + ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID; return len; } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 94f2099bceb0..3d17c41074ca 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -176,10 +176,9 @@ void do_softirq_own_stack(void) new -= STACK_FRAME_OVERHEAD; ((struct stack_frame *) new)->back_chain = old; asm volatile(" la 15,0(%0)\n" - " basr 14,%2\n" + " brasl 14,__do_softirq\n" " la 15,0(%1)\n" - : : "a" (new), "a" (old), - "a" (__do_softirq) + : : "a" (new), "a" (old) : "0", "1", "2", "3", "4", "5", "14", "cc", "memory" ); } else { diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index b0ba2c26b45e..d6f7782e75c9 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data) s390_reset_system(); data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); + __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ /* Call the moving routine */ (*data_mover)(&image->head, image->start); diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 82df7d80fab2..27110f3294ed 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -9,13 +9,17 @@ #include #include #include +#include #include #include + GEN_BR_THUNK %r1 + GEN_BR_THUNK %r14 + .section .kprobes.text, "ax" ENTRY(ftrace_stub) - br %r14 + BR_EX %r14 #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) #define STACK_PTREGS (STACK_FRAME_OVERHEAD) @@ -23,7 +27,7 @@ ENTRY(ftrace_stub) #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) ENTRY(_mcount) - br %r14 + BR_EX %r14 EXPORT_SYMBOL(_mcount) @@ -53,7 +57,7 @@ ENTRY(ftrace_caller) #endif lgr %r3,%r14 la %r5,STACK_PTREGS(%r15) - basr %r14,%r1 + BASR_EX %r14,%r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER # The j instruction gets runtime patched to a nop instruction. # See ftrace_enable_ftrace_graph_caller. @@ -68,7 +72,7 @@ ftrace_graph_caller_end: #endif lg %r1,(STACK_PTREGS_PSW+8)(%r15) lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) - br %r1 + BR_EX %r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -81,6 +85,6 @@ ENTRY(return_to_handler) aghi %r15,STACK_FRAME_OVERHEAD lgr %r14,%r2 lmg %r2,%r5,32(%r15) - br %r14 + BR_EX %r14 #endif diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 1a27f307a920..b441e069e674 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -31,6 +31,9 @@ #include #include #include +#include +#include +#include #if 0 #define DEBUGP printk @@ -168,7 +171,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, me->arch.got_offset = me->core_layout.size; me->core_layout.size += me->arch.got_size; me->arch.plt_offset = me->core_layout.size; - me->core_layout.size += me->arch.plt_size; + if (me->arch.plt_size) { + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) + me->arch.plt_size += PLT_ENTRY_SIZE; + me->core_layout.size += me->arch.plt_size; + } return 0; } @@ -322,9 +329,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, unsigned int *ip; ip = me->core_layout.base + me->arch.plt_offset + info->plt_offset; - ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ - ip[1] = 0x100a0004; - ip[2] = 0x07f10000; + ip[0] = 0x0d10e310; /* basr 1,0 */ + ip[1] = 0x100a0004; /* lg 1,10(1) */ + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { + unsigned int *ij; + ij = me->core_layout.base + + me->arch.plt_offset + + me->arch.plt_size - PLT_ENTRY_SIZE; + ip[2] = 0xa7f40000 + /* j __jump_r1 */ + (unsigned int)(u16) + (((unsigned long) ij - 8 - + (unsigned long) ip) / 2); + } else { + ip[2] = 0x07f10000; /* br %r1 */ + } ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; info->plt_initialized = 1; @@ -429,6 +447,45 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { + const Elf_Shdr *s; + char *secstrings, *secname; + void *aseg; + + if (IS_ENABLED(CONFIG_EXPOLINE) && + !nospec_disable && me->arch.plt_size) { + unsigned int *ij; + + ij = me->core_layout.base + me->arch.plt_offset + + me->arch.plt_size - PLT_ENTRY_SIZE; + if (test_facility(35)) { + ij[0] = 0xc6000000; /* exrl %r0,.+10 */ + ij[1] = 0x0005a7f4; /* j . */ + ij[2] = 0x000007f1; /* br %r1 */ + } else { + ij[0] = 0x44000000 | (unsigned int) + offsetof(struct lowcore, br_r1_trampoline); + ij[1] = 0xa7f40000; /* j . */ + } + } + + secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + aseg = (void *) s->sh_addr; + secname = secstrings + s->sh_name; + + if (!strcmp(".altinstructions", secname)) + /* patch .altinstructions */ + apply_alternatives(aseg, aseg + s->sh_size); + + if (IS_ENABLED(CONFIG_EXPOLINE) && + (!strncmp(".s390_indirect", secname, 14))) + nospec_revert(aseg, aseg + s->sh_size); + + if (IS_ENABLED(CONFIG_EXPOLINE) && + (!strncmp(".s390_return", secname, 12))) + nospec_revert(aseg, aseg + s->sh_size); + } + jump_label_apply_nops(me); return 0; } diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c new file mode 100644 index 000000000000..d5eed651b5ab --- /dev/null +++ b/arch/s390/kernel/nospec-branch.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +static int __init nobp_setup_early(char *str) +{ + bool enabled; + int rc; + + rc = kstrtobool(str, &enabled); + if (rc) + return rc; + if (enabled && test_facility(82)) { + /* + * The user explicitely requested nobp=1, enable it and + * disable the expoline support. + */ + __set_facility(82, S390_lowcore.alt_stfle_fac_list); + if (IS_ENABLED(CONFIG_EXPOLINE)) + nospec_disable = 1; + } else { + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + } + return 0; +} +early_param("nobp", nobp_setup_early); + +static int __init nospec_setup_early(char *str) +{ + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + return 0; +} +early_param("nospec", nospec_setup_early); + +static int __init nospec_report(void) +{ + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) + pr_info("Spectre V2 mitigation: execute trampolines.\n"); + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) + pr_info("Spectre V2 mitigation: limited branch prediction.\n"); + return 0; +} +arch_initcall(nospec_report); + +#ifdef CONFIG_EXPOLINE + +int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); + +static int __init nospectre_v2_setup_early(char *str) +{ + nospec_disable = 1; + return 0; +} +early_param("nospectre_v2", nospectre_v2_setup_early); + +void __init nospec_auto_detect(void) +{ + if (IS_ENABLED(CC_USING_EXPOLINE)) { + /* + * The kernel has been compiled with expolines. + * Keep expolines enabled and disable nobp. + */ + nospec_disable = 0; + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + } + /* + * If the kernel has not been compiled with expolines the + * nobp setting decides what is done, this depends on the + * CONFIG_KERNEL_NP option and the nobp/nospec parameters. + */ +} + +static int __init spectre_v2_setup_early(char *str) +{ + if (str && !strncmp(str, "on", 2)) { + nospec_disable = 0; + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + } + if (str && !strncmp(str, "off", 3)) + nospec_disable = 1; + if (str && !strncmp(str, "auto", 4)) + nospec_auto_detect(); + return 0; +} +early_param("spectre_v2", spectre_v2_setup_early); + +static void __init_or_module __nospec_revert(s32 *start, s32 *end) +{ + enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type; + u8 *instr, *thunk, *br; + u8 insnbuf[6]; + s32 *epo; + + /* Second part of the instruction replace is always a nop */ + for (epo = start; epo < end; epo++) { + instr = (u8 *) epo + *epo; + if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) + type = BRCL_EXPOLINE; /* brcl instruction */ + else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05) + type = BRASL_EXPOLINE; /* brasl instruction */ + else + continue; + thunk = instr + (*(int *)(instr + 2)) * 2; + if (thunk[0] == 0xc6 && thunk[1] == 0x00) + /* exrl %r0, */ + br = thunk + (*(int *)(thunk + 2)) * 2; + else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 && + thunk[6] == 0x44 && thunk[7] == 0x00 && + (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 && + (thunk[1] & 0xf0) == (thunk[8] & 0xf0)) + /* larl %rx, + ex %r0,0(%rx) */ + br = thunk + (*(int *)(thunk + 2)) * 2; + else + continue; + /* Check for unconditional branch 0x07f? or 0x47f???? */ + if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) + continue; + + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4); + switch (type) { + case BRCL_EXPOLINE: + insnbuf[0] = br[0]; + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + if (br[0] == 0x47) { + /* brcl to b, replace with bc + nopr */ + insnbuf[2] = br[2]; + insnbuf[3] = br[3]; + } else { + /* brcl to br, replace with bcr + nop */ + } + break; + case BRASL_EXPOLINE: + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + if (br[0] == 0x47) { + /* brasl to b, replace with bas + nopr */ + insnbuf[0] = 0x4d; + insnbuf[2] = br[2]; + insnbuf[3] = br[3]; + } else { + /* brasl to br, replace with basr + nop */ + insnbuf[0] = 0x0d; + } + break; + } + + s390_kernel_write(instr, insnbuf, 6); + } +} + +void __init_or_module nospec_revert(s32 *start, s32 *end) +{ + if (nospec_disable) + __nospec_revert(start, end); +} + +extern s32 __nospec_call_start[], __nospec_call_end[]; +extern s32 __nospec_return_start[], __nospec_return_end[]; +void __init nospec_init_branches(void) +{ + nospec_revert(__nospec_call_start, __nospec_call_end); + nospec_revert(__nospec_return_start, __nospec_return_end); +} + +#endif /* CONFIG_EXPOLINE */ diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c new file mode 100644 index 000000000000..8affad5f18cb --- /dev/null +++ b/arch/s390/kernel/nospec-sysfs.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) + return sprintf(buf, "Mitigation: execute trampolines\n"); + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) + return sprintf(buf, "Mitigation: limited branch prediction\n"); + return sprintf(buf, "Vulnerable\n"); +} diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 7e1e40323b78..d99155793c26 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -739,6 +739,10 @@ static int __hw_perf_event_init(struct perf_event *event) */ rate = 0; if (attr->freq) { + if (!attr->sample_freq) { + err = -EINVAL; + goto out; + } rate = freq_to_sample_rate(&si, attr->sample_freq); rate = hw_limit_rate(&si, rate); attr->freq = 0; diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index a4a84fb08046..7d4c5500c6c2 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -49,10 +49,8 @@ extern void kernel_thread_starter(void); */ void exit_thread(struct task_struct *tsk) { - if (tsk == current) { - exit_thread_runtime_instr(); + if (tsk == current) exit_thread_gs(); - } } void flush_thread(void) @@ -65,6 +63,7 @@ void release_thread(struct task_struct *dead_task) void arch_release_task_struct(struct task_struct *tsk) { + runtime_instr_release(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -100,6 +99,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); clear_tsk_thread_flag(p, TIF_SINGLE_STEP); + p->thread.per_flags = 0; /* Initialize per thread user and system timer values */ p->thread.user_timer = 0; p->thread.guest_timer = 0; diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 5362fd868d0d..6fe2e1875058 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -197,3 +197,21 @@ const struct seq_operations cpuinfo_op = { .stop = c_stop, .show = show_cpuinfo, }; + +int s390_isolate_bp(void) +{ + if (!test_facility(82)) + return -EOPNOTSUPP; + set_thread_flag(TIF_ISOLATE_BP); + return 0; +} +EXPORT_SYMBOL(s390_isolate_bp); + +int s390_isolate_bp_guest(void) +{ + if (!test_facility(82)) + return -EOPNOTSUPP; + set_thread_flag(TIF_ISOLATE_BP_GUEST); + return 0; +} +EXPORT_SYMBOL(s390_isolate_bp_guest); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1427d60ce628..56e0190d6e65 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1172,26 +1172,37 @@ static int s390_gs_cb_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - struct gs_cb *data = target->thread.gs_cb; + struct gs_cb gs_cb = { }, *data = NULL; int rc; if (!MACHINE_HAS_GS) return -ENODEV; - if (!data) { + if (!target->thread.gs_cb) { data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - data->gsd = 25; - target->thread.gs_cb = data; - if (target == current) - __ctl_set_bit(2, 4); - } else if (target == current) { - save_gs_cb(data); } + if (!target->thread.gs_cb) + gs_cb.gsd = 25; + else if (target == current) + save_gs_cb(&gs_cb); + else + gs_cb = *target->thread.gs_cb; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - data, 0, sizeof(struct gs_cb)); - if (target == current) - restore_gs_cb(data); + &gs_cb, 0, sizeof(gs_cb)); + if (rc) { + kfree(data); + return -EFAULT; + } + preempt_disable(); + if (!target->thread.gs_cb) + target->thread.gs_cb = data; + *target->thread.gs_cb = gs_cb; + if (target == current) { + __ctl_set_bit(2, 4); + restore_gs_cb(target->thread.gs_cb); + } + preempt_enable(); return rc; } diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index a40ebd1d29d0..8e954c102639 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -7,8 +7,11 @@ #include #include +#include #include + GEN_BR_THUNK %r9 + # # Issue "store status" for the current CPU to its prefix page # and call passed function afterwards @@ -67,9 +70,9 @@ ENTRY(store_status) st %r4,0(%r1) st %r5,4(%r1) stg %r2,8(%r1) - lgr %r1,%r2 + lgr %r9,%r2 lgr %r2,%r3 - br %r1 + BR_EX %r9 .section .bss .align 8 diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index ca37e5d5b40c..9c2c96da23d0 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -29,7 +29,6 @@ ENTRY(relocate_kernel) basr %r13,0 # base address .base: - stnsm sys_msk-.base(%r13),0xfb # disable DAT stctg %c0,%c15,ctlregs-.base(%r13) stmg %r0,%r15,gprregs-.base(%r13) lghi %r0,3 @@ -103,8 +102,6 @@ ENTRY(relocate_kernel) .align 8 load_psw: .long 0x00080000,0x80000000 - sys_msk: - .quad 0 ctlregs: .rept 16 .quad 0 diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 32aefb215e59..94c9ba72cf83 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -21,11 +21,24 @@ /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; +void runtime_instr_release(struct task_struct *tsk) +{ + kfree(tsk->thread.ri_cb); +} + static void disable_runtime_instr(void) { - struct pt_regs *regs = task_pt_regs(current); + struct task_struct *task = current; + struct pt_regs *regs; + if (!task->thread.ri_cb) + return; + regs = task_pt_regs(task); + preempt_disable(); load_runtime_instr_cb(&runtime_instr_empty_cb); + kfree(task->thread.ri_cb); + task->thread.ri_cb = NULL; + preempt_enable(); /* * Make sure the RI bit is deleted from the PSW. If the user did not @@ -46,17 +59,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->valid = 1; } -void exit_thread_runtime_instr(void) -{ - struct task_struct *task = current; - - if (!task->thread.ri_cb) - return; - disable_runtime_instr(); - kfree(task->thread.ri_cb); - task->thread.ri_cb = NULL; -} - SYSCALL_DEFINE1(s390_runtime_instr, int, command) { struct runtime_instr_cb *cb; @@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { - preempt_disable(); - exit_thread_runtime_instr(); - preempt_enable(); + disable_runtime_instr(); return 0; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 164a1e16b53e..98c1f7941142 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -66,6 +66,8 @@ #include #include #include +#include +#include #include "entry.h" /* @@ -338,7 +340,9 @@ static void __init setup_lowcore(void) lc->preempt_count = S390_lowcore.preempt_count; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); + sizeof(lc->stfle_fac_list)); + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, + sizeof(lc->alt_stfle_fac_list)); if (MACHINE_HAS_VX || MACHINE_HAS_GS) { unsigned long bits, size; @@ -381,6 +385,7 @@ static void __init setup_lowcore(void) #ifdef CONFIG_SMP lc->spinlock_lockval = arch_spin_lockval(0); #endif + lc->br_r1_trampoline = 0x07f1; /* br %r1 */ set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; @@ -892,6 +897,9 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) &_end; + if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) + nospec_auto_detect(); + parse_early_param(); #ifdef CONFIG_CRASH_DUMP /* Deactivate elfcorehdr= kernel parameter */ @@ -955,6 +963,10 @@ void __init setup_arch(char **cmdline_p) conmode_default(); set_preferred_console(); + apply_alternative_instructions(); + if (IS_ENABLED(CONFIG_EXPOLINE)) + nospec_init_branches(); + /* Setup zfcpdump support */ setup_zfcpdump(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 092c4154abd7..ae5df4177803 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "entry.h" enum { @@ -227,6 +228,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->mcesad = mcesa_origin | mcesa_bits; lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); + lc->br_r1_trampoline = 0x07f1; /* br %r1 */ if (vdso_alloc_per_cpu(lc)) goto out; lowcore_ptr[cpu] = lc; @@ -281,7 +283,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) __ctl_store(lc->cregs_save_area, 0, 15); save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); + sizeof(lc->stfle_fac_list)); + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, + sizeof(lc->alt_stfle_fac_list)); } static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) @@ -331,6 +335,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), mem_assign_absolute(lc->restart_fn, (unsigned long) func); mem_assign_absolute(lc->restart_data, (unsigned long) data); mem_assign_absolute(lc->restart_source, source_cpu); + __bpon(); asm volatile( "0: sigp 0,%0,%2 # sigp restart to target cpu\n" " brc 2,0b # busy, try again\n" @@ -906,6 +911,7 @@ void __cpu_die(unsigned int cpu) void __noreturn cpu_die(void) { idle_task_exit(); + __bpon(); pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index e99187149f17..a049a7b9d6e8 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S @@ -13,6 +13,7 @@ #include #include #include +#include #include /* @@ -24,6 +25,8 @@ * (see below) in the resume process. * This function runs with disabled interrupts. */ + GEN_BR_THUNK %r14 + .section .text ENTRY(swsusp_arch_suspend) stmg %r6,%r15,__SF_GPRS(%r15) @@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend) spx 0x318(%r1) lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 - br %r14 + BR_EX %r14 /* * Restore saved memory image to correct place and restore register context. @@ -197,11 +200,10 @@ pgm_check_entry: larl %r15,init_thread_union ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) larl %r2,.Lpanic_string - larl %r3,sclp_early_printk lghi %r1,0 sam31 sigp %r1,%r0,SIGP_SET_ARCHITECTURE - basr %r14,%r3 + brasl %r14,sclp_early_printk larl %r3,.Ldisabled_wait_31 lpsw 0(%r3) 4: @@ -267,7 +269,7 @@ restore_registers: /* Return 0 */ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 - br %r14 + BR_EX %r14 .section .data..nosave,"aw",@progbits .align 8 diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index d39f121e67a9..bc905ae1d5c8 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg) SYSCALL(sys_sendmmsg,compat_sys_sendmmsg) SYSCALL(sys_socket,sys_socket) SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */ -SYSCALL(sys_bind,sys_bind) -SYSCALL(sys_connect,sys_connect) +SYSCALL(sys_bind,compat_sys_bind) +SYSCALL(sys_connect,compat_sys_connect) SYSCALL(sys_listen,sys_listen) -SYSCALL(sys_accept4,sys_accept4) +SYSCALL(sys_accept4,compat_sys_accept4) SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */ SYSCALL(sys_setsockopt,compat_sys_setsockopt) SYSCALL(sys_getsockname,compat_sys_getsockname) diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index d9d1f512f019..5007fac01bb5 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, return orig; } +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return user_stack_pointer(regs) <= ret->stack; + else + return user_stack_pointer(regs) < ret->stack; +} + /* Instruction Emulation */ static void adjust_psw_addr(psw_t *psw, unsigned long len) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 96a713a470e7..85dd3c7bdd86 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -105,6 +105,43 @@ SECTIONS EXIT_DATA } + /* + * struct alt_inst entries. From the header (alternative.h): + * "Alternative instructions for different CPU types or capabilities" + * Think locking instructions on spinlocks. + * Note, that it is a part of __init region. + */ + . = ALIGN(8); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + + /* + * And here are the replacement instructions. The linker sticks + * them as binary blobs. The .altinstructions has enough data to + * get the address and the length of them to patch the kernel safely. + * Note, that it is a part of __init region. + */ + .altinstr_replacement : { + *(.altinstr_replacement) + } + + /* + * Table with the patch locations to undo expolines + */ + .nospec_call_table : { + __nospec_call_start = . ; + *(.s390_indirect*) + __nospec_call_end = . ; + } + .nospec_return_table : { + __nospec_return_start = . ; + *(.s390_return*) + __nospec_return_end = . ; + } + /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index a832ad031cee..5185be314661 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -173,8 +173,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { - if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); + const u64 ckc = vcpu->arch.sie_block->ckc; + + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if ((s64)ckc >= (s64)now) + return 0; + } else if (ckc >= now) { return 0; + } return ckc_interrupts_enabled(vcpu); } @@ -1004,13 +1011,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) static u64 __calculate_sltime(struct kvm_vcpu *vcpu) { - u64 now, cputm, sltime = 0; + const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); + const u64 ckc = vcpu->arch.sie_block->ckc; + u64 cputm, sltime = 0; if (ckc_interrupts_enabled(vcpu)) { - now = kvm_s390_get_tod_clock_fast(vcpu->kvm); - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); - /* already expired or overflow? */ - if (!sltime || vcpu->arch.sie_block->ckc <= now) + if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if ((s64)now < (s64)ckc) + sltime = tod_to_ns((s64)ckc - (s64)now); + } else if (now < ckc) { + sltime = tod_to_ns(ckc - now); + } + /* already expired */ + if (!sltime) return 0; if (cpu_timer_interrupts_enabled(vcpu)) { cputm = kvm_s390_get_cpu_timer(vcpu); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 40d0a1a97889..4f6adbea592b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -169,6 +169,28 @@ int kvm_arch_hardware_enable(void) static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, unsigned long end); +static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) +{ + u8 delta_idx = 0; + + /* + * The TOD jumps by delta, we have to compensate this by adding + * -delta to the epoch. + */ + delta = -delta; + + /* sign-extension - we're adding to signed values below */ + if ((s64)delta < 0) + delta_idx = -1; + + scb->epoch += delta; + if (scb->ecd & ECD_MEF) { + scb->epdx += delta_idx; + if (scb->epoch < delta) + scb->epdx += 1; + } +} + /* * This callback is executed during stop_machine(). All CPUs are therefore * temporarily stopped. In order not to change guest behavior, we have to @@ -184,13 +206,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val, unsigned long long *delta = v; list_for_each_entry(kvm, &vm_list, vm_list) { - kvm->arch.epoch -= *delta; kvm_for_each_vcpu(i, vcpu, kvm) { - vcpu->arch.sie_block->epoch -= *delta; + kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); + if (i == 0) { + kvm->arch.epoch = vcpu->arch.sie_block->epoch; + kvm->arch.epdx = vcpu->arch.sie_block->epdx; + } if (vcpu->arch.cputm_enabled) vcpu->arch.cputm_start += *delta; if (vcpu->arch.vsie_block) - vcpu->arch.vsie_block->epoch -= *delta; + kvm_clock_sync_scb(vcpu->arch.vsie_block, + *delta); } } return NOTIFY_OK; @@ -423,6 +449,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_GS: r = test_facility(133); break; + case KVM_CAP_S390_BPB: + r = test_facility(82); + break; default: r = 0; } @@ -575,7 +604,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_S390_GS: r = -EINVAL; mutex_lock(&kvm->lock); - if (atomic_read(&kvm->online_vcpus)) { + if (kvm->created_vcpus) { r = -EBUSY; } else if (test_facility(133)) { set_kvm_facility(kvm->arch.model.fac_mask, 133); @@ -768,7 +797,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) /* * Must be called with kvm->srcu held to avoid races on memslots, and with - * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. + * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. */ static int kvm_s390_vm_start_migration(struct kvm *kvm) { @@ -794,11 +823,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { /* - * Get the last slot. They should be sorted by base_gfn, so the - * last slot is also the one at the end of the address space. - * We have verified above that at least one slot is present. + * Get the first slot. They are reverse sorted by base_gfn, so + * the first slot is also the one at the end of the address + * space. We have verified above that at least one slot is + * present. */ - ms = slots->memslots + slots->used_slots - 1; + ms = slots->memslots; /* round up so we only use full longs */ ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); /* allocate enough bytes to store all the bits */ @@ -823,7 +853,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) } /* - * Must be called with kvm->lock to avoid races with ourselves and + * Must be called with kvm->slots_lock to avoid races with ourselves and * kvm_s390_vm_start_migration. */ static int kvm_s390_vm_stop_migration(struct kvm *kvm) @@ -838,6 +868,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); + /* We have to wait for the essa emulation to finish */ + synchronize_srcu(&kvm->srcu); vfree(mgs->pgste_bitmap); } kfree(mgs); @@ -847,14 +879,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) static int kvm_s390_vm_set_migration(struct kvm *kvm, struct kvm_device_attr *attr) { - int idx, res = -ENXIO; + int res = -ENXIO; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_S390_VM_MIGRATION_START: - idx = srcu_read_lock(&kvm->srcu); res = kvm_s390_vm_start_migration(kvm); - srcu_read_unlock(&kvm->srcu, idx); break; case KVM_S390_VM_MIGRATION_STOP: res = kvm_s390_vm_stop_migration(kvm); @@ -862,7 +892,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm, default: break; } - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->slots_lock); return res; } @@ -887,12 +917,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) return -EFAULT; - if (test_kvm_facility(kvm, 139)) - kvm_s390_set_tod_clock_ext(kvm, >od); - else if (gtod.epoch_idx == 0) - kvm_s390_set_tod_clock(kvm, gtod.tod); - else + if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) return -EINVAL; + kvm_s390_set_tod_clock(kvm, >od); VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", gtod.epoch_idx, gtod.tod); @@ -917,13 +944,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { - u64 gtod; + struct kvm_s390_vm_tod_clock gtod = { 0 }; - if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) + if (copy_from_user(>od.tod, (void __user *)attr->addr, + sizeof(gtod.tod))) return -EFAULT; - kvm_s390_set_tod_clock(kvm, gtod); - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); + kvm_s390_set_tod_clock(kvm, >od); + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); return 0; } @@ -1096,7 +1124,7 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, return -EINVAL; mutex_lock(&kvm->lock); - if (!atomic_read(&kvm->online_vcpus)) { + if (!kvm->created_vcpus) { bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); ret = 0; @@ -1752,7 +1780,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; + mutex_lock(&kvm->slots_lock); r = kvm_s390_get_cmma_bits(kvm, &args); + mutex_unlock(&kvm->slots_lock); if (!r) { r = copy_to_user(argp, &args, sizeof(args)); if (r) @@ -1766,7 +1796,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&args, argp, sizeof(args))) break; + mutex_lock(&kvm->slots_lock); r = kvm_s390_set_cmma_bits(kvm, &args); + mutex_unlock(&kvm->slots_lock); break; } default: @@ -2090,6 +2122,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu) /* we still need the basic sca for the ipte control */ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; + return; } read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { @@ -2201,6 +2234,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) kvm_s390_set_prefix(vcpu, 0); if (test_kvm_facility(vcpu->kvm, 64)) vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; + if (test_kvm_facility(vcpu->kvm, 82)) + vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; if (test_kvm_facility(vcpu->kvm, 133)) vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; /* fprs can be synchronized via vrs, even if the guest has no vx. With @@ -2342,6 +2377,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) current->thread.fpu.fpc = 0; vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->pp = 0; + vcpu->arch.sie_block->fpf &= ~FPF_BPBC; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; kvm_clear_async_pf_completion_queue(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) @@ -2354,6 +2390,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) mutex_lock(&vcpu->kvm->lock); preempt_disable(); vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; + vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; preempt_enable(); mutex_unlock(&vcpu->kvm->lock); if (!kvm_is_ucontrol(vcpu->kvm)) { @@ -2940,8 +2977,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) return 0; } -void kvm_s390_set_tod_clock_ext(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod) +void kvm_s390_set_tod_clock(struct kvm *kvm, + const struct kvm_s390_vm_tod_clock *gtod) { struct kvm_vcpu *vcpu; struct kvm_s390_tod_clock_ext htod; @@ -2953,10 +2990,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, get_tod_clock_ext((char *)&htod); kvm->arch.epoch = gtod->tod - htod.tod; - kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; - - if (kvm->arch.epoch > gtod->tod) - kvm->arch.epdx -= 1; + kvm->arch.epdx = 0; + if (test_kvm_facility(kvm, 139)) { + kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; + if (kvm->arch.epoch > gtod->tod) + kvm->arch.epdx -= 1; + } kvm_s390_vcpu_block_all(kvm); kvm_for_each_vcpu(i, vcpu, kvm) { @@ -2969,22 +3008,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm, mutex_unlock(&kvm->lock); } -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) -{ - struct kvm_vcpu *vcpu; - int i; - - mutex_lock(&kvm->lock); - preempt_disable(); - kvm->arch.epoch = tod - get_tod_clock(); - kvm_s390_vcpu_block_all(kvm); - kvm_for_each_vcpu(i, vcpu, kvm) - vcpu->arch.sie_block->epoch = kvm->arch.epoch; - kvm_s390_vcpu_unblock_all(kvm); - preempt_enable(); - mutex_unlock(&kvm->lock); -} - /** * kvm_arch_fault_in_page - fault-in guest page if necessary * @vcpu: The corresponding virtual cpu @@ -3301,6 +3324,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; vcpu->arch.gs_enabled = 1; } + if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && + test_kvm_facility(vcpu->kvm, 82)) { + vcpu->arch.sie_block->fpf &= ~FPF_BPBC; + vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; + } save_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->run->s.regs.acrs); /* save host (userspace) fprs/vrs */ @@ -3347,6 +3375,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_run->s.regs.pft = vcpu->arch.pfault_token; kvm_run->s.regs.pfs = vcpu->arch.pfault_select; kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; + kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; save_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->arch.host_acrs); /* Save guest register state */ @@ -3373,7 +3402,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int rc; - sigset_t sigsaved; if (kvm_run->immediate_exit) return -EINTR; @@ -3383,8 +3411,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { kvm_s390_vcpu_start(vcpu); @@ -3418,8 +3445,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) disable_cpu_timer_accounting(vcpu); store_regs(vcpu, kvm_run); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); vcpu->stat.exit_userspace++; return rc; diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 9f8fdd7b2311..e22d94f494a7 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -272,9 +272,8 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int handle_sthyi(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ -void kvm_s390_set_tod_clock_ext(struct kvm *kvm, - const struct kvm_s390_vm_tod_clock *gtod); -void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); +void kvm_s390_set_tod_clock(struct kvm *kvm, + const struct kvm_s390_vm_tod_clock *gtod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c954ac49eee4..734283a21677 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -84,9 +84,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { + struct kvm_s390_vm_tod_clock gtod = { 0 }; int rc; u8 ar; - u64 op2, val; + u64 op2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -94,12 +95,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) op2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); + rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); - kvm_s390_set_tod_clock(vcpu->kvm, val); + VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); + kvm_s390_set_tod_clock(vcpu->kvm, >od); kvm_s390_set_psw_cc(vcpu, 0); return 0; @@ -235,8 +236,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return -EAGAIN; } - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return 0; } @@ -247,6 +246,9 @@ static int handle_iske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -276,6 +278,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -311,6 +316,9 @@ static int handle_sske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + rc = try_handle_skey(vcpu); if (rc) return rc != -EAGAIN ? rc : 0; @@ -1002,7 +1010,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) cbrlo[entries] = gfn << PAGE_SHIFT; } - if (orc) { + if (orc && gfn < ms->bitmap_size) { /* increment only if we are really flipping the bit to 1 */ if (!test_and_set_bit(gfn, ms->pgste_bitmap)) atomic64_inc(&ms->dirty_pages); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index b18b5652e5c5..4f1f5fc8139d 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -31,7 +31,11 @@ struct vsie_page { * the same offset as that in struct sie_page! */ struct mcck_volatile_info mcck_info; /* 0x0200 */ - /* the pinned originial scb */ + /* + * The pinned original scb. Be aware that other VCPUs can modify + * it while we read from it. Values that are used for conditions or + * are reused conditionally, should be accessed via READ_ONCE. + */ struct kvm_s390_sie_block *scb_o; /* 0x0218 */ /* the shadow gmap in use by the vsie_page */ struct gmap *gmap; /* 0x0220 */ @@ -143,12 +147,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; - u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U; + const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); + const u32 crycb_addr = crycbd_o & 0x7ffffff8U; unsigned long *b1, *b2; u8 ecb3_flags; scb_s->crycbd = 0; - if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) + if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) return 0; /* format-1 is supported with message-security-assist extension 3 */ if (!test_kvm_facility(vcpu->kvm, 76)) @@ -186,12 +191,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; + /* READ_ONCE does not work on bitfields - use a temporary variable */ + const uint32_t __new_ibc = scb_o->ibc; + const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU; __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU; scb_s->ibc = 0; /* ibc installed in g2 and requested for g3 */ - if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) { - scb_s->ibc = scb_o->ibc & 0x0fffU; + if (vcpu->kvm->arch.model.ibc && new_ibc) { + scb_s->ibc = new_ibc; /* takte care of the minimum ibc level of the machine */ if (scb_s->ibc < min_ibc) scb_s->ibc = min_ibc; @@ -226,6 +234,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) memcpy(scb_o->gcr, scb_s->gcr, 128); scb_o->pp = scb_s->pp; + /* branch prediction */ + if (test_kvm_facility(vcpu->kvm, 82)) { + scb_o->fpf &= ~FPF_BPBC; + scb_o->fpf |= scb_s->fpf & FPF_BPBC; + } + /* interrupt intercept */ switch (scb_s->icptcode) { case ICPT_PROGI: @@ -256,6 +270,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; + /* READ_ONCE does not work on bitfields - use a temporary variable */ + const uint32_t __new_prefix = scb_o->prefix; + const uint32_t new_prefix = READ_ONCE(__new_prefix); + const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE; bool had_tx = scb_s->ecb & ECB_TE; unsigned long new_mso = 0; int rc; @@ -268,6 +286,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->ecb3 = 0; scb_s->ecd = 0; scb_s->fac = 0; + scb_s->fpf = 0; rc = prepare_cpuflags(vcpu, vsie_page); if (rc) @@ -302,14 +321,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->icpua = scb_o->icpua; if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) - new_mso = scb_o->mso & 0xfffffffffff00000UL; + new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL; /* if the hva of the prefix changes, we have to remap the prefix */ - if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix) + if (scb_s->mso != new_mso || scb_s->prefix != new_prefix) prefix_unmapped(vsie_page); /* SIE will do mso/msl validity and exception checks for us */ scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; scb_s->mso = new_mso; - scb_s->prefix = scb_o->prefix; + scb_s->prefix = new_prefix; /* We have to definetly flush the tlb if this scb never ran */ if (scb_s->ihcpu != 0xffffU) @@ -321,12 +340,15 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; /* transactional execution */ - if (test_kvm_facility(vcpu->kvm, 73)) { + if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) { /* remap the prefix is tx is toggled on */ - if ((scb_o->ecb & ECB_TE) && !had_tx) + if (!had_tx) prefix_unmapped(vsie_page); - scb_s->ecb |= scb_o->ecb & ECB_TE; + scb_s->ecb |= ECB_TE; } + /* branch prediction */ + if (test_kvm_facility(vcpu->kvm, 82)) + scb_s->fpf |= scb_o->fpf & FPF_BPBC; /* SIMD */ if (test_kvm_facility(vcpu->kvm, 129)) { scb_s->eca |= scb_o->eca & ECA_VX; @@ -544,9 +566,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa_t gpa; int rc = 0; - gpa = scb_o->scaol & ~0xfUL; + gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) - gpa |= (u64) scb_o->scaoh << 32; + gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; if (gpa) { if (!(gpa & ~0x1fffUL)) rc = set_validity_icpt(scb_s, 0x0038U); @@ -566,9 +588,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->scaol = (u32)(u64)hpa; } - gpa = scb_o->itdba & ~0xffUL; + gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; if (gpa && (scb_s->ecb & ECB_TE)) { - if (!(gpa & ~0x1fffU)) { + if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x0080U); goto unpin; } @@ -581,7 +603,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->itdba = hpa; } - gpa = scb_o->gvrd & ~0x1ffUL; + gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x1310U); @@ -599,7 +621,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->gvrd = hpa; } - gpa = scb_o->riccbd & ~0x3fUL; + gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; if (gpa && (scb_s->ecb3 & ECB3_RI)) { if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x0043U); @@ -617,8 +639,8 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { unsigned long sdnxc; - gpa = scb_o->sdnxo & ~0xfUL; - sdnxc = scb_o->sdnxo & 0xfUL; + gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; + sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; if (!gpa || !(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x10b0U); goto unpin; @@ -785,7 +807,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page) static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; - __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U; + __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U; if (fac && test_kvm_facility(vcpu->kvm, 7)) { retry_vsie_icpt(vsie_page); @@ -809,6 +831,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; + int guest_bp_isolation; int rc; handle_last_fault(vcpu, vsie_page); @@ -819,6 +842,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) s390_handle_mcck(); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + + /* save current guest state of bp isolation override */ + guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST); + + /* + * The guest is running with BPBC, so we have to force it on for our + * nested guest. This is done by enabling BPBC globally, so the BPBC + * control in the SCB (which the nested guest can modify) is simply + * ignored. + */ + if (test_kvm_facility(vcpu->kvm, 82) && + vcpu->arch.sie_block->fpf & FPF_BPBC) + set_thread_flag(TIF_ISOLATE_BP_GUEST); + local_irq_disable(); guest_enter_irqoff(); local_irq_enable(); @@ -828,6 +865,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) local_irq_disable(); guest_exit_irqoff(); local_irq_enable(); + + /* restore guest state for bp isolation override */ + if (!guest_bp_isolation) + clear_thread_flag(TIF_ISOLATE_BP_GUEST); + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); if (rc == -EINTR) { diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S index d66751397e72..e1fa974ac500 100644 --- a/arch/s390/lib/mem.S +++ b/arch/s390/lib/mem.S @@ -7,6 +7,9 @@ #include #include +#include + + GEN_BR_THUNK %r14 /* * void *memmove(void *dest, const void *src, size_t n) @@ -33,14 +36,14 @@ ENTRY(memmove) .Lmemmove_forward_remainder: larl %r5,.Lmemmove_mvc ex %r4,0(%r5) - br %r14 + BR_EX %r14 .Lmemmove_reverse: ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) brctg %r4,.Lmemmove_reverse ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) - br %r14 + BR_EX %r14 .Lmemmove_mvc: mvc 0(1,%r1),0(%r3) EXPORT_SYMBOL(memmove) @@ -77,7 +80,7 @@ ENTRY(memset) .Lmemset_clear_remainder: larl %r3,.Lmemset_xc ex %r4,0(%r3) - br %r14 + BR_EX %r14 .Lmemset_fill: stc %r3,0(%r2) cghi %r4,1 @@ -94,7 +97,7 @@ ENTRY(memset) .Lmemset_fill_remainder: larl %r3,.Lmemset_mvc ex %r4,0(%r3) - br %r14 + BR_EX %r14 .Lmemset_xc: xc 0(1,%r1),0(%r1) .Lmemset_mvc: @@ -117,7 +120,7 @@ ENTRY(memcpy) .Lmemcpy_remainder: larl %r5,.Lmemcpy_mvc ex %r4,0(%r5) - br %r14 + BR_EX %r14 .Lmemcpy_loop: mvc 0(256,%r1),0(%r3) la %r1,256(%r1) diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index cc2faffa7d6e..334b6d103cbd 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -85,8 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); - if (end >= TASK_SIZE_MAX) - return -ENOMEM; rc = 0; notify = 0; while (mm->context.asce_limit < end) { diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S index 25bb4643c4f4..9f794869c1b0 100644 --- a/arch/s390/net/bpf_jit.S +++ b/arch/s390/net/bpf_jit.S @@ -9,6 +9,7 @@ */ #include +#include #include "bpf_jit.h" /* @@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \ clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ jh sk_load_##NAME##_slow; \ LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ - b OFF_OK(%r6); /* Return */ \ + B_EX OFF_OK,%r6; /* Return */ \ \ sk_load_##NAME##_slow:; \ lgr %r2,%r7; /* Arg1 = skb pointer */ \ @@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \ brasl %r14,skb_copy_bits; /* Get data from skb */ \ LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ - br %r6; /* Return */ + BR_EX %r6; /* Return */ sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ + GEN_BR_THUNK %r6 + GEN_B_THUNK OFF_OK,%r6 + /* * Load 1 byte from SKB (optimized version) */ @@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos) clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? jnl sk_load_byte_slow llgc %r14,0(%r3,%r12) # Get byte from skb - b OFF_OK(%r6) # Return OK + B_EX OFF_OK,%r6 # Return OK sk_load_byte_slow: lgr %r2,%r7 # Arg1 = skb pointer @@ -90,7 +94,7 @@ sk_load_byte_slow: brasl %r14,skb_copy_bits # Get data from skb llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer ltgr %r2,%r2 # Set cc to (%r2 != 0) - br %r6 # Return cc + BR_EX %r6 # Return cc #define sk_negative_common(NAME, SIZE, LOAD) \ sk_load_##NAME##_slow_neg:; \ @@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \ jz bpf_error; \ LOAD %r14,0(%r2); /* Get data from pointer */ \ xr %r3,%r3; /* Set cc to zero */ \ - br %r6; /* Return cc */ + BR_EX %r6; /* Return cc */ sk_negative_common(word, 4, llgf) sk_negative_common(half, 2, llgh) @@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc) bpf_error: # force a return 0 from jit handler ltgr %r15,%r15 # Set condition code - br %r6 + BR_EX %r6 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index b15cd2f0320f..11cd151733d4 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include "bpf_jit.h" @@ -43,6 +45,8 @@ struct bpf_jit { int base_ip; /* Base address for literal pool */ int ret0_ip; /* Address of return 0 */ int exit_ip; /* Address of exit */ + int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ + int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ int tail_call_start; /* Tail call start offset */ int labels[1]; /* Labels for local jumps */ }; @@ -55,8 +59,7 @@ struct bpf_jit { #define SEEN_LITERAL 8 /* code uses literals */ #define SEEN_FUNC 16 /* calls C functions */ #define SEEN_TAIL_CALL 32 /* code uses tail calls */ -#define SEEN_SKB_CHANGE 64 /* code changes skb data */ -#define SEEN_REG_AX 128 /* code uses constant blinding */ +#define SEEN_REG_AX 64 /* code uses constant blinding */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) /* @@ -253,6 +256,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) REG_SET_SEEN(b2); \ }) +#define EMIT6_PCREL_RILB(op, b, target) \ +({ \ + int rel = (target - jit->prg) / 2; \ + _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ + REG_SET_SEEN(b); \ +}) + +#define EMIT6_PCREL_RIL(op, target) \ +({ \ + int rel = (target - jit->prg) / 2; \ + _EMIT6(op | rel >> 16, rel & 0xffff); \ +}) + #define _EMIT6_IMM(op, imm) \ ({ \ unsigned int __imm = (imm); \ @@ -448,12 +464,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit) EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } - if (jit->seen & SEEN_SKB) + if (jit->seen & SEEN_SKB) { emit_load_skb_data_hlen(jit); - if (jit->seen & SEEN_SKB_CHANGE) /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); + } } /* @@ -472,8 +488,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ save_restore_regs(jit, REGS_RESTORE); + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { + jit->r14_thunk_ip = jit->prg; + /* Generate __s390_indirect_jump_r14 thunk */ + if (test_facility(35)) { + /* exrl %r0,.+10 */ + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); + } else { + /* larl %r1,.+14 */ + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,0(%r1) */ + EMIT4_DISP(0x44000000, REG_0, REG_1, 0); + } + /* j . */ + EMIT4_PCREL(0xa7f40000, 0); + } /* br %r14 */ _EMIT2(0x07fe); + + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable && + (jit->seen & SEEN_FUNC)) { + jit->r1_thunk_ip = jit->prg; + /* Generate __s390_indirect_jump_r1 thunk */ + if (test_facility(35)) { + /* exrl %r0,.+10 */ + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); + /* j . */ + EMIT4_PCREL(0xa7f40000, 0); + /* br %r1 */ + _EMIT2(0x07f1); + } else { + /* larl %r1,.+14 */ + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,S390_lowcore.br_r1_tampoline */ + EMIT4_DISP(0x44000000, REG_0, REG_0, + offsetof(struct lowcore, br_r1_trampoline)); + /* j . */ + EMIT4_PCREL(0xa7f40000, 0); + } + } } /* @@ -979,12 +1032,17 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* lg %w1,(%l) */ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, EMIT_CONST_U64(func)); - /* basr %r14,%w1 */ - EMIT2(0x0d00, REG_14, REG_W1); + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { + /* brasl %r14,__s390_indirect_jump_r1 */ + EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); + } else { + /* basr %r14,%w1 */ + EMIT2(0x0d00, REG_14, REG_W1); + } /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); - if (bpf_helper_changes_pkt_data((void *)func)) { - jit->seen |= SEEN_SKB_CHANGE; + if ((jit->seen & SEEN_SKB) && + bpf_helper_changes_pkt_data((void *)func)) { /* lg %b1,ST_OFF_SKBP(%r15) */ EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a25d95a6612d..0fe649c0d542 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -368,7 +368,8 @@ static void zpci_irq_handler(struct airq_struct *airq) /* End of second scan with interrupts on. */ break; /* First scan complete, reenable interrupts. */ - zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); + if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC)) + break; si = 0; continue; } @@ -956,7 +957,7 @@ static int __init pci_base_init(void) if (!s390_pci_probe) return 0; - if (!test_facility(69) || !test_facility(71) || !test_facility(72)) + if (!test_facility(69) || !test_facility(71)) return 0; rc = zpci_debug_init(); diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index ea34086c8674..81b840bc6e4e 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) } /* Set Interruption Controls */ -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) { + if (!test_facility(72)) + return -EIO; asm volatile ( " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); + return 0; } /* PCI Load */ diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c index 77c35350ee77..b7fa7a87e946 100644 --- a/arch/sh/boards/mach-se/770x/setup.c +++ b/arch/sh/boards/mach-se/770x/setup.c @@ -9,6 +9,7 @@ */ #include #include +#include #include #include #include @@ -115,6 +116,11 @@ static struct platform_device heartbeat_device = { #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\ defined(CONFIG_CPU_SUBTYPE_SH7712) /* SH771X Ethernet driver */ +static struct sh_eth_plat_data sh_eth_plat = { + .phy = PHY_ID, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + static struct resource sh_eth0_resources[] = { [0] = { .start = SH_ETH0_BASE, @@ -132,7 +138,7 @@ static struct platform_device sh_eth0_device = { .name = "sh771x-ether", .id = 0, .dev = { - .platform_data = PHY_ID, + .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth0_resources), .resource = sh_eth0_resources, @@ -155,7 +161,7 @@ static struct platform_device sh_eth1_device = { .name = "sh771x-ether", .id = 1, .dev = { - .platform_data = PHY_ID, + .platform_data = &sh_eth_plat, }, .num_resources = ARRAY_SIZE(sh_eth1_resources), .resource = sh_eth1_resources, diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index 4205f6d42b69..a5bd03642678 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c @@ -43,7 +43,11 @@ void __ref cpu_probe(void) #endif #if defined(CONFIG_CPU_J2) +#if defined(CONFIG_SMP) unsigned cpu = hard_smp_processor_id(); +#else + unsigned cpu = 0; +#endif if (cpu == 0) of_scan_flat_dt(scan_cache, NULL); if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu); if (cpu != 0) return; diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index e1d751ae2498..1a2526676a87 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void) dwarf_frame_cachep = kmem_cache_create("dwarf_frames", sizeof(struct dwarf_frame), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); dwarf_reg_cachep = kmem_cache_create("dwarf_regs", sizeof(struct dwarf_reg), 0, - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ, dwarf_frame_cachep); diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index c001f782c5f1..28cc61216b64 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -255,7 +255,7 @@ debug_trap: mov.l @r8, r8 jsr @r8 nop - bra __restore_all + bra ret_from_exception nop CFI_ENDPROC diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index b2d9963d5978..68b1a67533ce 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -59,7 +59,7 @@ void arch_task_cache_init(void) task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC | SLAB_NOTRACK, NULL); + SLAB_PANIC, NULL); } #ifdef CONFIG_SH_FPU_EMU diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 57cff00cad17..b3770bb26211 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4) break; } - force_sig_info(SIGFPE, &info, current); + info.si_signo = SIGFPE; + force_sig_info(info.si_signo, &info, current); } #endif diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c index d1064e46efe8..8aa664638c3c 100644 --- a/arch/sparc/crypto/crc32c_glue.c +++ b/arch/sparc/crypto/crc32c_glue.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_alignmask = 7, diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index abad97edf736..28db058d471b 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -83,7 +83,11 @@ ATOMIC_OPS(xor) #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_xchg(atomic_t *v, int new) +{ + return xchg(&v->counter, new); +} static inline int __atomic_add_unless(atomic_t *v, int a, int u) { diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h index 6f17528356b2..ea53e418f6c0 100644 --- a/arch/sparc/include/asm/bug.h +++ b/arch/sparc/include/asm/bug.h @@ -9,10 +9,14 @@ void do_BUG(const char *file, int line); #define BUG() do { \ do_BUG(__FILE__, __LINE__); \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #else -#define BUG() __builtin_trap() +#define BUG() do { \ + barrier_before_unreachable(); \ + __builtin_trap(); \ +} while (0) #endif #define HAVE_ARCH_BUG diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 3e3823db303e..c73b5a3ab7b9 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) (unsigned long)_n_, sizeof(*(ptr))); \ }) +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new); +#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new) + #include /* diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 977c3f280ba1..fa38c78de0f0 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -209,7 +209,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL /* * A pointer passed in from user mode. This should not diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index e25d25b0a34b..b361702ef52a 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -8,9 +8,11 @@ #include #include +#include #include #include +#include static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index fd9d9bac7cfa..79c3bdaaa0b4 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -980,7 +980,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd); #define __HAVE_ARCH_PMDP_INVALIDATE -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #define __HAVE_ARCH_PGTABLE_DEPOSIT diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index 6a339a78f4f4..71dd82b43cc5 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -7,6 +7,7 @@ #if defined(__sparc__) && defined(__arch64__) #ifndef __ASSEMBLY__ +#include #include #include diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 25b6abdb3908..522a677e050d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; sllx REG2, 32, REG2; \ andcc REG1, REG2, %g0; \ be,pt %xcc, 700f; \ - sethi %hi(0x1ffc0000), REG2; \ + sethi %hi(0xffe00000), REG2; \ sllx REG2, 1, REG2; \ brgez,pn REG1, FAIL_LABEL; \ andn REG1, REG2, REG1; \ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index d58520c2e6ff..7ea35e5601b6 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -101,6 +101,9 @@ #define SO_ZEROCOPY 0x003e +#define SO_TXTIME 0x003f +#define SCM_TXTIME SO_TXTIME + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1a0fa10cb6b7..32bae68e34c1 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, if (err) { printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", dev_name(&vdev->dev), err); - kfree(vdev); + put_device(&vdev->dev); return NULL; } if (vdev->dp) diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 5010df497387..465a901a0ada 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -173,6 +173,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) } EXPORT_SYMBOL(__cmpxchg_u32); +u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new) +{ + unsigned long flags; + u64 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + if ((prev = *ptr) == old) + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return prev; +} +EXPORT_SYMBOL(__cmpxchg_u64); + unsigned long __xchg_u32(volatile u32 *ptr, u32 new) { unsigned long flags; diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S index e5547b22cd18..0ddbbb031822 100644 --- a/arch/sparc/lib/hweight.S +++ b/arch/sparc/lib/hweight.S @@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32) .previous ENTRY(__arch_hweight64) - sethi %hi(__sw_hweight16), %g1 - jmpl %g1 + %lo(__sw_hweight16), %g0 + sethi %hi(__sw_hweight64), %g1 + jmpl %g1 + %lo(__sw_hweight64), %g0 nop ENDPROC(__arch_hweight64) EXPORT_SYMBOL(__arch_hweight64) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 61bdc1270d19..984e9d65ea0d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2540,9 +2540,16 @@ void __init mem_init(void) { high_memory = __va(last_valid_pfn << PAGE_SHIFT); - register_page_bootmem_info(); free_all_bootmem(); + /* + * Must be done after boot memory is put on freelist, because here we + * might set fields in deferred struct pages that have not yet been + * initialized, and free_all_bootmem() initializes all the reserved + * deferred pages for us. + */ + register_page_bootmem_info(); + /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. @@ -2927,7 +2934,7 @@ void __flush_tlb_all(void) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); pte_t *pte = NULL; if (page) @@ -2939,7 +2946,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) return NULL; if (!pgtable_page_ctor(page)) { diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 4ae86bc0d35c..847ddffbf38a 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -219,17 +219,28 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, } } +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old; + + do { + old = *pmdp; + } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); + + return old; +} + /* * This routine is only called when splitting a THP */ -void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, +pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - pmd_t entry = *pmdp; - - pmd_val(entry) &= ~_PAGE_VALID; + pmd_t old, entry; - set_pmd_at(vma->vm_mm, address, pmdp, entry); + entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID); + old = pmdp_establish(vma, address, pmdp, entry); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); /* @@ -240,6 +251,8 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, if ((pmd_val(entry) & _PAGE_PMD_HUGE) && !is_huge_zero_page(pmd_page(entry))) (vma->vm_mm)->context.thp_pte_count--; + + return old; } void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 5765e7e711f7..ff5f9cb3039a 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) u8 *func = ((u8 *)__bpf_call_base) + imm; ctx->saw_call = true; + if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) + emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx); emit_call((u32 *)func, ctx); emit_nop(ctx); emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); - if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind) - load_skb_regs(ctx, bpf2sparc[BPF_REG_6]); + if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func)) + load_skb_regs(ctx, L7); break; } diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index c14e36f008c8..62a7b83025dd 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -173,7 +173,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 50a32c33d729..73c57f614c9e 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -1,4 +1,5 @@ generic-y += barrier.h +generic-y += bpf_perf_event.h generic-y += bug.h generic-y += clkdev.h generic-y += current.h diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index b668e351fd6c..fca34b2177e2 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm); /* * Needed since we do not use the asm-generic/mm_hooks.h: */ -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { uml_setup_stubs(mm); + return 0; } extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index 390572daa40d..b3f5865a92c9 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h @@ -41,7 +41,7 @@ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); -#include +#include /* These are for everybody (although not all archs will actually discard it in modules) */ diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c index 2db18cbbb0ea..c0197097c86e 100644 --- a/arch/um/os-Linux/file.c +++ b/arch/um/os-Linux/file.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index a86d7cc2c2d8..bf0acb8aad8b 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c @@ -16,6 +16,7 @@ #include #include #include +#include void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = { [SIGTRAP] = relay_signal, @@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = { static void hard_handler(int sig, siginfo_t *si, void *p) { - struct ucontext *uc = p; + ucontext_t *uc = p; mcontext_t *mc = &uc->uc_mcontext; unsigned long pending = 1UL << sig; diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 59b06b48f27d..5c205a9cb5a6 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -81,9 +81,10 @@ do { \ } \ } while (0) -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h index 26775793c204..f0fdb268f8f2 100644 --- a/arch/unicore32/include/asm/pgalloc.h +++ b/arch/unicore32/include/asm/pgalloc.h @@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); #define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) /* * Allocate one PTE table. diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 5f25b39f04d4..c4ac6043ebb0 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -298,7 +298,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 0038a2d10a7a..466219296cd6 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/ # Xen paravirtualization support obj-$(CONFIG_XEN) += xen/ +obj-$(CONFIG_ACRN) += acrn/ + # Hyper-V paravirtualization support obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2fdb23313dd5..ffd58020f26d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -56,7 +56,7 @@ config X86 select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_PMEM_API if X86_64 # Causing hangs/crashes, see the commit that added this change for details. - select ARCH_HAS_REFCOUNT if BROKEN + select ARCH_HAS_REFCOUNT select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_SET_MEMORY select ARCH_HAS_SG_CHAIN @@ -89,6 +89,7 @@ config X86 select GENERIC_CLOCKEVENTS_MIN_ADJUST select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_CPU_VULNERABILITIES select GENERIC_EARLY_IOREMAP select GENERIC_FIND_FIRST_BIT select GENERIC_IOMAP @@ -108,9 +109,8 @@ config X86 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE select HAVE_ARCH_JUMP_LABEL - select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP + select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KGDB - select HAVE_ARCH_KMEMCHECK select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT @@ -171,11 +171,12 @@ config X86 select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION + select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION select HAVE_STACK_VALIDATION if X86_64 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_USER_RETURN_NOTIFIER + select HOTPLUG_SMT if SMP select IRQ_FORCED_THREADING select PCI_LOCKLESS_CONFIG select PERF_EVENTS @@ -303,7 +304,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xdff8000000000000 if X86_5LEVEL default 0xdffffc0000000000 config HAVE_INTEL_TXT @@ -429,6 +429,20 @@ config GOLDFISH def_bool y depends on X86_GOLDFISH +config RETPOLINE + bool "Avoid speculative indirect branches in kernel" + default y + select STACK_VALIDATION if HAVE_STACK_VALIDATION + help + Compile kernel with the retpoline compiler options to guard against + kernel-to-user data leaks by avoiding speculative indirect + branches. Requires a compiler with -mindirect-branch=thunk-extern + support for full protection. The kernel may run slower. + + Without compiler support, at least indirect branches in assembler + code are eliminated. Since this includes the syscall entry path, + it is not entirely pointless. + config INTEL_RDT bool "Intel Resource Director Technology support" default n @@ -759,6 +773,8 @@ config QUEUED_LOCK_STAT behavior of paravirtualized queued spinlocks and report them on debugfs. +source "arch/x86/acrn/Kconfig" + source "arch/x86/xen/Kconfig" config KVM_GUEST @@ -926,7 +942,8 @@ config MAXSMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK + range 2 64 if SMP && X86_32 && X86_BIGSMP + range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 default "1" if !SMP default "8192" if MAXSMP @@ -1429,7 +1446,7 @@ config ARCH_DMA_ADDR_T_64BIT config X86_DIRECT_GBPAGES def_bool y - depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK + depends on X86_64 && !DEBUG_PAGEALLOC ---help--- Certain kernel features effectively disable kernel linear 1 GB mappings (even if the CPU otherwise @@ -1803,6 +1820,16 @@ config X86_SMAP If unsure, say Y. +config X86_INTEL_UMIP + def_bool n + depends on CPU_SUP_INTEL + prompt "Intel User Mode Instruction Prevention" if EXPERT + ---help--- + The User Mode Instruction Prevention (UMIP) is a security + feature in newer Intel processors. If enabled, a general + protection fault is issued if the instructions SGDT, SLDT, + SIDT, SMSW and STR are executed in user mode. + config X86_INTEL_MPX prompt "Intel MPX (Memory Protection Extensions)" def_bool n diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 90b123056f4b..6293a8768a91 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -359,28 +359,14 @@ config PUNIT_ATOM_DEBUG choice prompt "Choose kernel unwinder" - default FRAME_POINTER_UNWINDER + default UNWINDER_ORC if X86_64 + default UNWINDER_FRAME_POINTER if X86_32 ---help--- This determines which method will be used for unwinding kernel stack traces for panics, oopses, bugs, warnings, perf, /proc//stack, livepatch, lockdep, and more. -config FRAME_POINTER_UNWINDER - bool "Frame pointer unwinder" - select FRAME_POINTER - ---help--- - This option enables the frame pointer unwinder for unwinding kernel - stack traces. - - The unwinder itself is fast and it uses less RAM than the ORC - unwinder, but the kernel text size will grow by ~3% and the kernel's - overall performance will degrade by roughly 5-10%. - - This option is recommended if you want to use the livepatch - consistency model, as this is currently the only way to get a - reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). - -config ORC_UNWINDER +config UNWINDER_ORC bool "ORC unwinder" depends on X86_64 select STACK_VALIDATION @@ -396,7 +382,22 @@ config ORC_UNWINDER Enabling this option will increase the kernel's runtime memory usage by roughly 2-4MB, depending on your kernel config. -config GUESS_UNWINDER +config UNWINDER_FRAME_POINTER + bool "Frame pointer unwinder" + select FRAME_POINTER + ---help--- + This option enables the frame pointer unwinder for unwinding kernel + stack traces. + + The unwinder itself is fast and it uses less RAM than the ORC + unwinder, but the kernel text size will grow by ~3% and the kernel's + overall performance will degrade by roughly 5-10%. + + This option is recommended if you want to use the livepatch + consistency model, as this is currently the only way to get a + reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE). + +config UNWINDER_GUESS bool "Guess unwinder" depends on EXPERT ---help--- @@ -411,7 +412,7 @@ config GUESS_UNWINDER endchoice config FRAME_POINTER - depends on !ORC_UNWINDER && !GUESS_UNWINDER + depends on !UNWINDER_ORC && !UNWINDER_GUESS bool endmenu diff --git a/arch/x86/Makefile b/arch/x86/Makefile index a20eacd9c7e9..9451ea021131 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -111,6 +111,8 @@ else KBUILD_CFLAGS += $(call cc-option,-mno-80387) KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387) + KBUILD_CFLAGS += -fno-pic + # By default gcc and clang use a stack alignment of 16 bytes for x86. # However the standard kernel entry on x86-64 leaves the stack on an # 8-byte boundary. If the compiler isn't informed about the actual @@ -158,11 +160,6 @@ ifdef CONFIG_X86_X32 endif export CONFIG_X86_X32_ABI -# Don't unroll struct assignments with kmemcheck enabled -ifeq ($(CONFIG_KMEMCHECK),y) - KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) -endif - # # If the function graph tracer is used with mcount instead of fentry, # '-maccumulate-outgoing-args' is needed to prevent a GCC bug @@ -228,6 +225,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) LDFLAGS := -m elf_$(UTS_MACHINE) +# +# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to +# the linker to force 2MB page size regardless of the default page size used +# by the linker. +# +ifdef CONFIG_X86_64 +LDFLAGS += $(call ld-option, -z max-page-size=0x200000) +endif + # Speed up the build KBUILD_CFLAGS += -pipe # Workaround for a gcc prelease that unfortunately was shipped in a suse release @@ -235,6 +241,13 @@ KBUILD_CFLAGS += -Wno-sign-compare # KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +# Avoid indirect branches in kernel to deal with Spectre +ifdef CONFIG_RETPOLINE +ifneq ($(RETPOLINE_CFLAGS),) + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE +endif +endif + archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs diff --git a/arch/x86/acrn/Kconfig b/arch/x86/acrn/Kconfig new file mode 100644 index 000000000000..aa30915a4474 --- /dev/null +++ b/arch/x86/acrn/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# This Kconfig describes ACRN options +# + +config ACRN + bool "ACRN guest support" + depends on X86_64 + depends on PARAVIRT + help + This is the Linux ACRN port. Enabling this will allow the + kernel to boot in a paravirtualized environment under the + ACRN hypervisor. + +config ACRN_HV + bool "Enable services run on ACRN hypervisor" + depends on ACRN + help + This option is needed if were to run ACRN services linux on top of + ACRN hypervisor. diff --git a/arch/x86/acrn/Makefile b/arch/x86/acrn/Makefile new file mode 100644 index 000000000000..3f42025a326a --- /dev/null +++ b/arch/x86/acrn/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ACRN_HV) += acrn_hv.o diff --git a/arch/x86/acrn/acrn_hv.c b/arch/x86/acrn/acrn_hv.c new file mode 100644 index 000000000000..10fcc9a963ff --- /dev/null +++ b/arch/x86/acrn/acrn_hv.c @@ -0,0 +1,89 @@ +/* + * ACRN hypervisor support + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ +#include +#include + +static unsigned long cpu_khz_from_acrn(void) +{ + unsigned int eax, ebx, ecx, edx; + + /* Get TSC frequency from cpuid 0x40000010 */ + eax = 0x40000010; + ebx = ecx = edx = 0; + __cpuid(&eax, &ebx, &ecx, &edx); + + return (unsigned long)eax; +} + +static uint32_t __init acrn_detect(void) +{ + return hypervisor_cpuid_base("ACRNACRNACRN", 0); +} + +static void __init acrn_init_platform(void) +{ + pv_cpu_ops.cpu_khz = cpu_khz_from_acrn; + +#ifdef CONFIG_ACRN_VHM + pv_irq_ops.write_msi = acrn_write_msi_msg; +#endif /* CONFIG_ACRN_VHM */ +} + +static void acrn_pin_vcpu(int cpu) +{ + /* do nothing here now */ +} + +static bool acrn_x2apic_available(void) +{ + /* do not support x2apic */ + return false; +} + +static void __init acrn_init_mem_mapping(void) +{ + /* do nothing here now */ +} + +const struct hypervisor_x86 x86_hyper_acrn = { + .name = "ACRN", + .detect = acrn_detect, + .type = X86_HYPER_ACRN, + .init.init_platform = acrn_init_platform, + .runtime.pin_vcpu = acrn_pin_vcpu, + .init.x2apic_available = acrn_x2apic_available, + .init.init_mem_mapping = acrn_init_mem_mapping, +}; +EXPORT_SYMBOL(x86_hyper_acrn); diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 4b7575b00563..98018a621f6b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -78,6 +78,7 @@ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o + vmlinux-objs-y += $(obj)/pgtable_64.o endif $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index e56dbc67e837..97c57b5f8d57 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) if (status != EFI_SUCCESS) goto free_struct; - memcpy(rom->romdata, pci->romimage, pci->romsize); + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, + pci->romsize); return status; free_struct: @@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom) if (status != EFI_SUCCESS) goto free_struct; - memcpy(rom->romdata, pci->romimage, pci->romsize); + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, + pci->romsize); return status; free_struct: diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index beb255b66447..4b3d92a37c80 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -289,10 +289,18 @@ ENTRY(startup_64) leaq boot_stack_end(%rbx), %rsp #ifdef CONFIG_X86_5LEVEL - /* Check if 5-level paging has already enabled */ - movq %cr4, %rax - testl $X86_CR4_LA57, %eax - jnz lvl5 + /* + * Check if we need to enable 5-level paging. + * RSI holds real mode data and need to be preserved across + * a function call. + */ + pushq %rsi + call l5_paging_required + popq %rsi + + /* If l5_paging_required() returned zero, we're done here. */ + cmpq $0, %rax + je lvl5 /* * At this point we are in long mode with 4-level paging enabled, diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b50c42455e25..252fee320816 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -169,6 +169,16 @@ void __puthex(unsigned long value) } } +static bool l5_supported(void) +{ + /* Check if leaf 7 is supported. */ + if (native_cpuid_eax(0) < 7) + return 0; + + /* Check if la57 is supported. */ + return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)); +} + #if CONFIG_X86_NEED_RELOCS static void handle_relocations(void *output, unsigned long output_len, unsigned long virt_addr) @@ -299,6 +309,10 @@ static void parse_elf(void *output) switch (phdr->p_type) { case PT_LOAD: +#ifdef CONFIG_X86_64 + if ((phdr->p_align % 0x200000) != 0) + error("Alignment of LOAD segment isn't multiple of 2MB"); +#endif #ifdef CONFIG_RELOCATABLE dest = output; dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); @@ -362,6 +376,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, console_init(); debug_putstr("early console in extract_kernel\n"); + if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) { + error("This linux kernel as configured requires 5-level paging\n" + "This CPU does not support the required 'cr4.la57' feature\n" + "Unable to boot - please use a kernel appropriate for your CPU\n"); + } + free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index 972319ff5b01..e691ff734cb5 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -23,6 +23,9 @@ */ #undef CONFIG_AMD_MEM_ENCRYPT +/* No PAGE_TABLE_ISOLATION support needed either: */ +#undef CONFIG_PAGE_TABLE_ISOLATION + #include "misc.h" /* These actually do the work of building the kernel identity maps. */ diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c new file mode 100644 index 000000000000..b4469a37e9a1 --- /dev/null +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -0,0 +1,28 @@ +#include + +/* + * __force_order is used by special_insns.h asm code to force instruction + * serialization. + * + * It is not referenced from the code, but GCC < 5 with -fPIE would fail + * due to an undefined symbol. Define it to make these ancient GCCs work. + */ +unsigned long __force_order; + +int l5_paging_required(void) +{ + /* Check if leaf 7 is supported. */ + + if (native_cpuid_eax(0) < 7) + return 0; + + /* Check if la57 is supported. */ + if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + return 0; + + /* Check if 5-level paging has already been enabled. */ + if (native_read_cr4() & X86_CR4_LA57) + return 0; + + return 1; +} diff --git a/arch/x86/configs/abl_diffconfig b/arch/x86/configs/abl_diffconfig new file mode 100644 index 000000000000..899be931801b --- /dev/null +++ b/arch/x86/configs/abl_diffconfig @@ -0,0 +1 @@ +CONFIG_ABL_BOOTLOADER_CONTROL=y diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig new file mode 100644 index 000000000000..4e9dc7d49cbe --- /dev/null +++ b/arch/x86/configs/i386_ranchu_defconfig @@ -0,0 +1,421 @@ +# CONFIG_64BIT is not set +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_X86_BIGSMP=y +CONFIG_MCORE2=y +CONFIG_X86_GENERIC=y +CONFIG_HPET_TIMER=y +CONFIG_NR_CPUS=512 +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_REBOOTFIXUPS=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SYNC_FILE=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_AES_586=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config index 550cd5012b73..66c9e2aab16c 100644 --- a/arch/x86/configs/tiny.config +++ b/arch/x86/configs/tiny.config @@ -1,5 +1,5 @@ CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set -CONFIG_GUESS_UNWINDER=y -# CONFIG_FRAME_POINTER_UNWINDER is not set +CONFIG_UNWINDER_GUESS=y +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig new file mode 100644 index 000000000000..19e3a812306b --- /dev/null +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -0,0 +1,460 @@ +CONFIG_POSIX_MQUEUE=y +# CONFIG_FHANDLE is not set +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +# CONFIG_PCSPKR_PLATFORM is not set +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_SMP=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_MCORE2=y +CONFIG_PROCESSOR_SELECT=y +# CONFIG_CPU_SUP_CENTAUR is not set +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +# CONFIG_MICROCODE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_MTRR is not set +CONFIG_HZ_100=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x200000 +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0 reboot=p" +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_ACPI_PROCFS_POWER=y +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_THERMAL is not set +# CONFIG_X86_PM_TIMER is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_MSI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_RFKILL=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEBUG_DEVRES=y +CONFIG_OF=y +CONFIG_OF_UNITTEST=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_VIRTIO=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1 +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +# CONFIG_ETHERNET is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +# CONFIG_USB_NET_CDCETHER is not set +# CONFIG_USB_NET_CDC_NCM is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +CONFIG_MAC80211_HWSIM=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_EXAR is not set +CONFIG_SERIAL_8250_NR_UARTS=48 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_INTEL is not set +# CONFIG_HW_RANDOM_AMD is not set +# CONFIG_HW_RANDOM_VIA is not set +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_HPET=y +# CONFIG_HPET_MMAP_DEFAULT is not set +# CONFIG_DEVPORT is not set +# CONFIG_ACPI_I2C_OPREGION is not set +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_PTP_1588_CLOCK=y +# CONFIG_HWMON is not set +# CONFIG_X86_PKG_TEMP_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_SOFT_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +# CONFIG_VGA_ARB is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_GADGET=y +CONFIG_USB_DUMMY_HCD=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_SW_SYNC=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_VSOC=y +CONFIG_ION=y +# CONFIG_X86_PLATFORM_DEVICES is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_SDCARD_FS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_IO_DELAY_NONE=y +CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_OPTIMIZE_INLINING=y +CONFIG_UNWINDER_FRAME_POINTER=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_DEV_VIRTIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509" diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 4a4b16e56d35..e32fc1f274d8 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -299,6 +299,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_UNWINDER_ORC=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig new file mode 100644 index 000000000000..81202e3f6ae8 --- /dev/null +++ b/arch/x86/configs/x86_64_ranchu_defconfig @@ -0,0 +1,416 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_MCORE2=y +CONFIG_MAXSMP=y +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SYNC_FILE=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 5f07333bb224..9c903a420cda 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o -obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o @@ -24,7 +23,6 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o -obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o @@ -59,7 +57,6 @@ endif aes-i586-y := aes-i586-asm_32.o aes_glue.o twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o -salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o @@ -68,7 +65,6 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o -salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 16627fec80b2..12e8484a8ee7 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -32,6 +32,7 @@ #include #include #include +#include /* * The following macros are used to move an (un)aligned 16 byte value to/from @@ -89,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 ALL_F: .octa 0xffffffffffffffffffffffffffffffff .octa 0x00000000000000000000000000000000 -.section .rodata -.align 16 -.type aad_shift_arr, @object -.size aad_shift_arr, 272 -aad_shift_arr: - .octa 0xffffffffffffffffffffffffffffffff - .octa 0xffffffffffffffffffffffffffffff0C - .octa 0xffffffffffffffffffffffffffff0D0C - .octa 0xffffffffffffffffffffffffff0E0D0C - .octa 0xffffffffffffffffffffffff0F0E0D0C - .octa 0xffffffffffffffffffffff0C0B0A0908 - .octa 0xffffffffffffffffffff0D0C0B0A0908 - .octa 0xffffffffffffffffff0E0D0C0B0A0908 - .octa 0xffffffffffffffff0F0E0D0C0B0A0908 - .octa 0xffffffffffffff0C0B0A090807060504 - .octa 0xffffffffffff0D0C0B0A090807060504 - .octa 0xffffffffff0E0D0C0B0A090807060504 - .octa 0xffffffff0F0E0D0C0B0A090807060504 - .octa 0xffffff0C0B0A09080706050403020100 - .octa 0xffff0D0C0B0A09080706050403020100 - .octa 0xff0E0D0C0B0A09080706050403020100 - .octa 0x0F0E0D0C0B0A09080706050403020100 - - .text @@ -256,6 +233,37 @@ aad_shift_arr: pxor \TMP1, \GH # result is in TMP1 .endm +# Reads DLEN bytes starting at DPTR and stores in XMMDst +# where 0 < DLEN < 16 +# Clobbers %rax, DLEN and XMM1 +.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst + cmp $8, \DLEN + jl _read_lt8_\@ + mov (\DPTR), %rax + MOVQ_R64_XMM %rax, \XMMDst + sub $8, \DLEN + jz _done_read_partial_block_\@ + xor %eax, %eax +_read_next_byte_\@: + shl $8, %rax + mov 7(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_\@ + MOVQ_R64_XMM %rax, \XMM1 + pslldq $8, \XMM1 + por \XMM1, \XMMDst + jmp _done_read_partial_block_\@ +_read_lt8_\@: + xor %eax, %eax +_read_next_byte_lt8_\@: + shl $8, %rax + mov -1(\DPTR, \DLEN, 1), %al + dec \DLEN + jnz _read_next_byte_lt8_\@ + MOVQ_R64_XMM %rax, \XMMDst +_done_read_partial_block_\@: +.endm + /* * if a = number of total plaintext bytes * b = floor(a/16) @@ -272,62 +280,30 @@ aad_shift_arr: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r11 # %r11 = aadLen pxor %xmm\i, %xmm\i pxor \XMM2, \XMM2 cmp $16, %r11 - jl _get_AAD_rest8\num_initial_blocks\operation + jl _get_AAD_rest\num_initial_blocks\operation _get_AAD_blocks\num_initial_blocks\operation: movdqu (%r10), %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor %xmm\i, \XMM2 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 add $16, %r10 - sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\num_initial_blocks\operation movdqu \XMM2, %xmm\i + + /* read the last <16B of AAD */ +_get_AAD_rest\num_initial_blocks\operation: cmp $0, %r11 je _get_AAD_done\num_initial_blocks\operation - pxor %xmm\i,%xmm\i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some CT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\num_initial_blocks\operation: - cmp $4, %r11 - jle _get_AAD_rest4\num_initial_blocks\operation - movq (%r10), \TMP1 - add $8, %r10 - sub $8, %r11 - pslldq $8, \TMP1 - psrldq $8, %xmm\i - pxor \TMP1, %xmm\i - jmp _get_AAD_rest8\num_initial_blocks\operation -_get_AAD_rest4\num_initial_blocks\operation: - cmp $0, %r11 - jle _get_AAD_rest0\num_initial_blocks\operation - mov (%r10), %eax - movq %rax, \TMP1 - add $4, %r10 - sub $4, %r10 - pslldq $12, \TMP1 - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i -_get_AAD_rest0\num_initial_blocks\operation: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \TMP1 - PSHUFB_XMM \TMP1, %xmm\i -_get_AAD_rest_final\num_initial_blocks\operation: + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor \XMM2, %xmm\i GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 @@ -531,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation: XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation MOVADQ SHUF_MASK(%rip), %xmm14 mov arg7, %r10 # %r10 = AAD - mov arg8, %r12 # %r12 = aadLen - mov %r12, %r11 + mov arg8, %r11 # %r11 = aadLen pxor %xmm\i, %xmm\i pxor \XMM2, \XMM2 cmp $16, %r11 - jl _get_AAD_rest8\num_initial_blocks\operation + jl _get_AAD_rest\num_initial_blocks\operation _get_AAD_blocks\num_initial_blocks\operation: movdqu (%r10), %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor %xmm\i, \XMM2 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 add $16, %r10 - sub $16, %r12 sub $16, %r11 cmp $16, %r11 jge _get_AAD_blocks\num_initial_blocks\operation movdqu \XMM2, %xmm\i + + /* read the last <16B of AAD */ +_get_AAD_rest\num_initial_blocks\operation: cmp $0, %r11 je _get_AAD_done\num_initial_blocks\operation - pxor %xmm\i,%xmm\i - - /* read the last <16B of AAD. since we have at least 4B of - data right after the AAD (the ICV, and maybe some PT), we can - read 4B/8B blocks safely, and then get rid of the extra stuff */ -_get_AAD_rest8\num_initial_blocks\operation: - cmp $4, %r11 - jle _get_AAD_rest4\num_initial_blocks\operation - movq (%r10), \TMP1 - add $8, %r10 - sub $8, %r11 - pslldq $8, \TMP1 - psrldq $8, %xmm\i - pxor \TMP1, %xmm\i - jmp _get_AAD_rest8\num_initial_blocks\operation -_get_AAD_rest4\num_initial_blocks\operation: - cmp $0, %r11 - jle _get_AAD_rest0\num_initial_blocks\operation - mov (%r10), %eax - movq %rax, \TMP1 - add $4, %r10 - sub $4, %r10 - pslldq $12, \TMP1 - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i -_get_AAD_rest0\num_initial_blocks\operation: - /* finalize: shift out the extra bytes we read, and align - left. since pslldq can only shift by an immediate, we use - vpshufb and an array of shuffle masks */ - movq %r12, %r11 - salq $4, %r11 - movdqu aad_shift_arr(%r11), \TMP1 - PSHUFB_XMM \TMP1, %xmm\i -_get_AAD_rest_final\num_initial_blocks\operation: + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data pxor \XMM2, %xmm\i GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 @@ -1385,14 +1329,6 @@ _esb_loop_\@: * * AAD Format with 64-bit Extended Sequence Number * -* aadLen: -* from the definition of the spec, aadLen can only be 8 or 12 bytes. -* The code supports 16 too but for other sizes, the code will fail. -* -* TLen: -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. -* For other sizes, the code will fail. -* * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ @@ -1486,19 +1422,16 @@ _zero_cipher_left_decrypt: PSHUFB_XMM %xmm10, %xmm0 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block - lea SHIFT_MASK+16(%rip), %r12 - sub %r13, %r12 -# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes -# (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes + lea (%arg3,%r11,1), %r10 + mov %r13, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + + lea ALL_F+16(%rip), %r12 + sub %r13, %r12 movdqa %xmm1, %xmm2 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 pand %xmm1, %xmm2 @@ -1507,9 +1440,6 @@ _zero_cipher_left_decrypt: pxor %xmm2, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 - # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 # output %r13 bytes MOVQ_R64_XMM %xmm0, %rax @@ -1663,14 +1593,6 @@ ENDPROC(aesni_gcm_dec) * * AAD Format with 64-bit Extended Sequence Number * -* aadLen: -* from the definition of the spec, aadLen can only be 8 or 12 bytes. -* The code supports 16 too but for other sizes, the code will fail. -* -* TLen: -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes. -* For other sizes, the code will fail. -* * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ ENTRY(aesni_gcm_enc) @@ -1763,19 +1685,16 @@ _zero_cipher_left_encrypt: movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 - ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks - lea SHIFT_MASK+16(%rip), %r12 + + lea (%arg3,%r11,1), %r10 + mov %r13, %r12 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 + + lea ALL_F+16(%rip), %r12 sub %r13, %r12 - # adjust the shuffle mask pointer to be able to shift 16-r13 bytes - # (%r13 is the number of bytes in plaintext mod 16) - movdqu (%r12), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 + movdqu (%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 movdqa SHUF_MASK(%rip), %xmm10 @@ -1784,9 +1703,6 @@ _zero_cipher_left_encrypt: pxor %xmm0, %xmm8 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 # GHASH computation for the last <16 byte block - sub %r13, %r11 - add $16, %r11 - movdqa SHUF_MASK(%rip), %xmm10 PSHUFB_XMM %xmm10, %xmm0 @@ -2884,7 +2800,7 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - call *%r11 + CALL_NOSPEC %r11 movdqu 0x00(OUTP), INC pxor INC, STATE1 @@ -2929,7 +2845,7 @@ ENTRY(aesni_xts_crypt8) _aesni_gf128mul_x_ble() movups IV, (IVP) - call *%r11 + CALL_NOSPEC %r11 movdqu 0x40(OUTP), INC pxor INC, STATE1 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 5c15d6b57329..c690ddc78c03 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -689,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key, rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); } -static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, - unsigned int key_len) +static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, + unsigned int key_len) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; @@ -715,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead, /* This is the Integrity Check Value (aka the authentication tag length and can * be 8, 12 or 16 bytes long. */ -static int rfc4106_set_authsize(struct crypto_aead *parent, - unsigned int authsize) +static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, + unsigned int authsize) { struct cryptd_aead **ctx = crypto_aead_ctx(parent); struct cryptd_aead *cryptd_tfm = *ctx; @@ -823,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, if (sg_is_last(req->src) && (!PageHighMem(sg_page(req->src)) || req->src->offset + req->src->length <= PAGE_SIZE) && - sg_is_last(req->dst) && + sg_is_last(req->dst) && req->dst->length && (!PageHighMem(sg_page(req->dst)) || req->dst->offset + req->dst->length <= PAGE_SIZE)) { one_entry_in_sg = 1; @@ -928,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) aes_ctx); } -static int rfc4106_encrypt(struct aead_request *req) +static int gcmaes_wrapper_encrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); @@ -944,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req) return crypto_aead_encrypt(req); } -static int rfc4106_decrypt(struct aead_request *req) +static int gcmaes_wrapper_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cryptd_aead **ctx = crypto_aead_ctx(tfm); @@ -1115,7 +1116,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req) { __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); + struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); void *aes_ctx = &(ctx->aes_key_expanded); u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); @@ -1126,12 +1127,36 @@ static int generic_gcmaes_decrypt(struct aead_request *req) aes_ctx); } +static int generic_gcmaes_init(struct crypto_aead *aead) +{ + struct cryptd_aead *cryptd_tfm; + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", + CRYPTO_ALG_INTERNAL, + CRYPTO_ALG_INTERNAL); + if (IS_ERR(cryptd_tfm)) + return PTR_ERR(cryptd_tfm); + + *ctx = cryptd_tfm; + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); + + return 0; +} + +static void generic_gcmaes_exit(struct crypto_aead *aead) +{ + struct cryptd_aead **ctx = crypto_aead_ctx(aead); + + cryptd_free_aead(*ctx); +} + static struct aead_alg aesni_aead_algs[] = { { .setkey = common_rfc4106_set_key, .setauthsize = common_rfc4106_set_authsize, .encrypt = helper_rfc4106_encrypt, .decrypt = helper_rfc4106_decrypt, - .ivsize = 8, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "__gcm-aes-aesni", @@ -1145,11 +1170,11 @@ static struct aead_alg aesni_aead_algs[] = { { }, { .init = rfc4106_init, .exit = rfc4106_exit, - .setkey = rfc4106_set_key, - .setauthsize = rfc4106_set_authsize, - .encrypt = rfc4106_encrypt, - .decrypt = rfc4106_decrypt, - .ivsize = 8, + .setkey = gcmaes_wrapper_set_key, + .setauthsize = gcmaes_wrapper_set_authsize, + .encrypt = gcmaes_wrapper_encrypt, + .decrypt = gcmaes_wrapper_decrypt, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "rfc4106(gcm(aes))", @@ -1165,7 +1190,26 @@ static struct aead_alg aesni_aead_algs[] = { { .setauthsize = generic_gcmaes_set_authsize, .encrypt = generic_gcmaes_encrypt, .decrypt = generic_gcmaes_decrypt, - .ivsize = 12, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = 16, + .base = { + .cra_name = "__generic-gcm-aes-aesni", + .cra_driver_name = "__driver-generic-gcm-aes-aesni", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), + .cra_alignmask = AESNI_ALIGN - 1, + .cra_module = THIS_MODULE, + }, +}, { + .init = generic_gcmaes_init, + .exit = generic_gcmaes_exit, + .setkey = gcmaes_wrapper_set_key, + .setauthsize = gcmaes_wrapper_set_authsize, + .encrypt = gcmaes_wrapper_encrypt, + .decrypt = gcmaes_wrapper_decrypt, + .ivsize = GCM_AES_IV_SIZE, .maxauthsize = 16, .base = { .cra_name = "gcm(aes)", @@ -1173,8 +1217,7 @@ static struct aead_alg aesni_aead_algs[] = { { .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), - .cra_alignmask = AESNI_ALIGN - 1, + .cra_ctxsize = sizeof(struct cryptd_aead *), .cra_module = THIS_MODULE, }, } }; diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f7c495e2863c..a14af6eb09cb 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -17,6 +17,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - call *%r9; + CALL_NOSPEC %r9; addq $(16 * 16), %rsp; diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index eee5b3982cfd..b66bbfa62f50 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -12,6 +12,7 @@ #include #include +#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way: vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - call *%r9; + CALL_NOSPEC %r9; addq $(16 * 32), %rsp; diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index dbea6020ffe7..575292a33bdf 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -66,8 +66,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src); int err; - fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; - err = blkcipher_walk_virt(desc, walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -79,6 +77,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, /* Process multi-block batch */ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { + fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; do { fn(ctx, wdst, wsrc); diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c index 27226df3f7d8..c8d9cdacbf10 100644 --- a/arch/x86/crypto/crc32-pclmul_glue.c +++ b/arch/x86/crypto/crc32-pclmul_glue.c @@ -162,6 +162,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-pclmul", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index c194d5717ae5..5773e1161072 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -226,6 +226,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-intel", .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 7a7de27c6f41..d9b734d0c8cc 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -45,6 +45,7 @@ #include #include +#include ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction @@ -172,7 +173,7 @@ continue_block: movzxw (bufp, %rax, 2), len lea crc_array(%rip), bufp lea (bufp, len, 1), bufp - jmp *bufp + JMP_NOSPEC bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index e32142bc071d..28c372003e44 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -164,7 +164,6 @@ static struct shash_alg alg = { .init = poly1305_simd_init, .update = poly1305_simd_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_simd_desc_ctx), .base = { .cra_name = "poly1305", diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S deleted file mode 100644 index 329452b8f794..000000000000 --- a/arch/x86/crypto/salsa20-i586-asm_32.S +++ /dev/null @@ -1,1114 +0,0 @@ -# salsa20_pm.s version 20051229 -# D. J. Bernstein -# Public domain. - -#include - -.text - -# enter salsa20_encrypt_bytes -ENTRY(salsa20_encrypt_bytes) - mov %esp,%eax - and $31,%eax - add $256,%eax - sub %eax,%esp - # eax_stack = eax - movl %eax,80(%esp) - # ebx_stack = ebx - movl %ebx,84(%esp) - # esi_stack = esi - movl %esi,88(%esp) - # edi_stack = edi - movl %edi,92(%esp) - # ebp_stack = ebp - movl %ebp,96(%esp) - # x = arg1 - movl 4(%esp,%eax),%edx - # m = arg2 - movl 8(%esp,%eax),%esi - # out = arg3 - movl 12(%esp,%eax),%edi - # bytes = arg4 - movl 16(%esp,%eax),%ebx - # bytes -= 0 - sub $0,%ebx - # goto done if unsigned<= - jbe ._done -._start: - # in0 = *(uint32 *) (x + 0) - movl 0(%edx),%eax - # in1 = *(uint32 *) (x + 4) - movl 4(%edx),%ecx - # in2 = *(uint32 *) (x + 8) - movl 8(%edx),%ebp - # j0 = in0 - movl %eax,164(%esp) - # in3 = *(uint32 *) (x + 12) - movl 12(%edx),%eax - # j1 = in1 - movl %ecx,168(%esp) - # in4 = *(uint32 *) (x + 16) - movl 16(%edx),%ecx - # j2 = in2 - movl %ebp,172(%esp) - # in5 = *(uint32 *) (x + 20) - movl 20(%edx),%ebp - # j3 = in3 - movl %eax,176(%esp) - # in6 = *(uint32 *) (x + 24) - movl 24(%edx),%eax - # j4 = in4 - movl %ecx,180(%esp) - # in7 = *(uint32 *) (x + 28) - movl 28(%edx),%ecx - # j5 = in5 - movl %ebp,184(%esp) - # in8 = *(uint32 *) (x + 32) - movl 32(%edx),%ebp - # j6 = in6 - movl %eax,188(%esp) - # in9 = *(uint32 *) (x + 36) - movl 36(%edx),%eax - # j7 = in7 - movl %ecx,192(%esp) - # in10 = *(uint32 *) (x + 40) - movl 40(%edx),%ecx - # j8 = in8 - movl %ebp,196(%esp) - # in11 = *(uint32 *) (x + 44) - movl 44(%edx),%ebp - # j9 = in9 - movl %eax,200(%esp) - # in12 = *(uint32 *) (x + 48) - movl 48(%edx),%eax - # j10 = in10 - movl %ecx,204(%esp) - # in13 = *(uint32 *) (x + 52) - movl 52(%edx),%ecx - # j11 = in11 - movl %ebp,208(%esp) - # in14 = *(uint32 *) (x + 56) - movl 56(%edx),%ebp - # j12 = in12 - movl %eax,212(%esp) - # in15 = *(uint32 *) (x + 60) - movl 60(%edx),%eax - # j13 = in13 - movl %ecx,216(%esp) - # j14 = in14 - movl %ebp,220(%esp) - # j15 = in15 - movl %eax,224(%esp) - # x_backup = x - movl %edx,64(%esp) -._bytesatleast1: - # bytes - 64 - cmp $64,%ebx - # goto nocopy if unsigned>= - jae ._nocopy - # ctarget = out - movl %edi,228(%esp) - # out = &tmp - leal 0(%esp),%edi - # i = bytes - mov %ebx,%ecx - # while (i) { *out++ = *m++; --i } - rep movsb - # out = &tmp - leal 0(%esp),%edi - # m = &tmp - leal 0(%esp),%esi -._nocopy: - # out_backup = out - movl %edi,72(%esp) - # m_backup = m - movl %esi,68(%esp) - # bytes_backup = bytes - movl %ebx,76(%esp) - # in0 = j0 - movl 164(%esp),%eax - # in1 = j1 - movl 168(%esp),%ecx - # in2 = j2 - movl 172(%esp),%edx - # in3 = j3 - movl 176(%esp),%ebx - # x0 = in0 - movl %eax,100(%esp) - # x1 = in1 - movl %ecx,104(%esp) - # x2 = in2 - movl %edx,108(%esp) - # x3 = in3 - movl %ebx,112(%esp) - # in4 = j4 - movl 180(%esp),%eax - # in5 = j5 - movl 184(%esp),%ecx - # in6 = j6 - movl 188(%esp),%edx - # in7 = j7 - movl 192(%esp),%ebx - # x4 = in4 - movl %eax,116(%esp) - # x5 = in5 - movl %ecx,120(%esp) - # x6 = in6 - movl %edx,124(%esp) - # x7 = in7 - movl %ebx,128(%esp) - # in8 = j8 - movl 196(%esp),%eax - # in9 = j9 - movl 200(%esp),%ecx - # in10 = j10 - movl 204(%esp),%edx - # in11 = j11 - movl 208(%esp),%ebx - # x8 = in8 - movl %eax,132(%esp) - # x9 = in9 - movl %ecx,136(%esp) - # x10 = in10 - movl %edx,140(%esp) - # x11 = in11 - movl %ebx,144(%esp) - # in12 = j12 - movl 212(%esp),%eax - # in13 = j13 - movl 216(%esp),%ecx - # in14 = j14 - movl 220(%esp),%edx - # in15 = j15 - movl 224(%esp),%ebx - # x12 = in12 - movl %eax,148(%esp) - # x13 = in13 - movl %ecx,152(%esp) - # x14 = in14 - movl %edx,156(%esp) - # x15 = in15 - movl %ebx,160(%esp) - # i = 20 - mov $20,%ebp - # p = x0 - movl 100(%esp),%eax - # s = x5 - movl 120(%esp),%ecx - # t = x10 - movl 140(%esp),%edx - # w = x15 - movl 160(%esp),%ebx -._mainloop: - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x12 - addl 148(%esp),%eax - # x5 = s - movl %ecx,120(%esp) - # t += x6 - addl 124(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x1 - movl 104(%esp),%esi - # r += s - add %ecx,%esi - # v = x11 - movl 144(%esp),%edi - # v += w - add %ebx,%edi - # p <<<= 7 - rol $7,%eax - # p ^= x4 - xorl 116(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x14 - xorl 156(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x9 - xorl 136(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x3 - xorl 112(%esp),%edi - # x4 = p - movl %eax,116(%esp) - # x14 = t - movl %edx,156(%esp) - # p += x0 - addl 100(%esp),%eax - # x9 = r - movl %esi,136(%esp) - # t += x10 - addl 140(%esp),%edx - # x3 = v - movl %edi,112(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x8 - xorl 132(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x2 - xorl 108(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x13 - xorl 152(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x7 - xorl 128(%esp),%ebx - # x8 = p - movl %eax,132(%esp) - # x2 = t - movl %edx,108(%esp) - # p += x4 - addl 116(%esp),%eax - # x13 = s - movl %ecx,152(%esp) - # t += x14 - addl 156(%esp),%edx - # x7 = w - movl %ebx,128(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x12 - xorl 148(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x6 - xorl 124(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x1 - xorl 104(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x11 - xorl 144(%esp),%edi - # x12 = p - movl %eax,148(%esp) - # x6 = t - movl %edx,124(%esp) - # p += x8 - addl 132(%esp),%eax - # x1 = r - movl %esi,104(%esp) - # t += x2 - addl 108(%esp),%edx - # x11 = v - movl %edi,144(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x3 - addl 112(%esp),%eax - # p <<<= 7 - rol $7,%eax - # x5 = s - movl %ecx,120(%esp) - # t += x9 - addl 136(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x4 - movl 116(%esp),%esi - # r += s - add %ecx,%esi - # v = x14 - movl 156(%esp),%edi - # v += w - add %ebx,%edi - # p ^= x1 - xorl 104(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x11 - xorl 144(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x6 - xorl 124(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x12 - xorl 148(%esp),%edi - # x1 = p - movl %eax,104(%esp) - # x11 = t - movl %edx,144(%esp) - # p += x0 - addl 100(%esp),%eax - # x6 = r - movl %esi,124(%esp) - # t += x10 - addl 140(%esp),%edx - # x12 = v - movl %edi,148(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x2 - xorl 108(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x8 - xorl 132(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x7 - xorl 128(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x13 - xorl 152(%esp),%ebx - # x2 = p - movl %eax,108(%esp) - # x8 = t - movl %edx,132(%esp) - # p += x1 - addl 104(%esp),%eax - # x7 = s - movl %ecx,128(%esp) - # t += x11 - addl 144(%esp),%edx - # x13 = w - movl %ebx,152(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x3 - xorl 112(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x9 - xorl 136(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x4 - xorl 116(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x14 - xorl 156(%esp),%edi - # x3 = p - movl %eax,112(%esp) - # x9 = t - movl %edx,136(%esp) - # p += x2 - addl 108(%esp),%eax - # x4 = r - movl %esi,116(%esp) - # t += x8 - addl 132(%esp),%edx - # x14 = v - movl %edi,156(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x12 - addl 148(%esp),%eax - # x5 = s - movl %ecx,120(%esp) - # t += x6 - addl 124(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x1 - movl 104(%esp),%esi - # r += s - add %ecx,%esi - # v = x11 - movl 144(%esp),%edi - # v += w - add %ebx,%edi - # p <<<= 7 - rol $7,%eax - # p ^= x4 - xorl 116(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x14 - xorl 156(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x9 - xorl 136(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x3 - xorl 112(%esp),%edi - # x4 = p - movl %eax,116(%esp) - # x14 = t - movl %edx,156(%esp) - # p += x0 - addl 100(%esp),%eax - # x9 = r - movl %esi,136(%esp) - # t += x10 - addl 140(%esp),%edx - # x3 = v - movl %edi,112(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x8 - xorl 132(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x2 - xorl 108(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x13 - xorl 152(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x7 - xorl 128(%esp),%ebx - # x8 = p - movl %eax,132(%esp) - # x2 = t - movl %edx,108(%esp) - # p += x4 - addl 116(%esp),%eax - # x13 = s - movl %ecx,152(%esp) - # t += x14 - addl 156(%esp),%edx - # x7 = w - movl %ebx,128(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x12 - xorl 148(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x6 - xorl 124(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x1 - xorl 104(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x11 - xorl 144(%esp),%edi - # x12 = p - movl %eax,148(%esp) - # x6 = t - movl %edx,124(%esp) - # p += x8 - addl 132(%esp),%eax - # x1 = r - movl %esi,104(%esp) - # t += x2 - addl 108(%esp),%edx - # x11 = v - movl %edi,144(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # x0 = p - movl %eax,100(%esp) - # x10 = t - movl %edx,140(%esp) - # p += x3 - addl 112(%esp),%eax - # p <<<= 7 - rol $7,%eax - # x5 = s - movl %ecx,120(%esp) - # t += x9 - addl 136(%esp),%edx - # x15 = w - movl %ebx,160(%esp) - # r = x4 - movl 116(%esp),%esi - # r += s - add %ecx,%esi - # v = x14 - movl 156(%esp),%edi - # v += w - add %ebx,%edi - # p ^= x1 - xorl 104(%esp),%eax - # t <<<= 7 - rol $7,%edx - # t ^= x11 - xorl 144(%esp),%edx - # r <<<= 7 - rol $7,%esi - # r ^= x6 - xorl 124(%esp),%esi - # v <<<= 7 - rol $7,%edi - # v ^= x12 - xorl 148(%esp),%edi - # x1 = p - movl %eax,104(%esp) - # x11 = t - movl %edx,144(%esp) - # p += x0 - addl 100(%esp),%eax - # x6 = r - movl %esi,124(%esp) - # t += x10 - addl 140(%esp),%edx - # x12 = v - movl %edi,148(%esp) - # p <<<= 9 - rol $9,%eax - # p ^= x2 - xorl 108(%esp),%eax - # t <<<= 9 - rol $9,%edx - # t ^= x8 - xorl 132(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 9 - rol $9,%ecx - # s ^= x7 - xorl 128(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 9 - rol $9,%ebx - # w ^= x13 - xorl 152(%esp),%ebx - # x2 = p - movl %eax,108(%esp) - # x8 = t - movl %edx,132(%esp) - # p += x1 - addl 104(%esp),%eax - # x7 = s - movl %ecx,128(%esp) - # t += x11 - addl 144(%esp),%edx - # x13 = w - movl %ebx,152(%esp) - # p <<<= 13 - rol $13,%eax - # p ^= x3 - xorl 112(%esp),%eax - # t <<<= 13 - rol $13,%edx - # t ^= x9 - xorl 136(%esp),%edx - # r += s - add %ecx,%esi - # r <<<= 13 - rol $13,%esi - # r ^= x4 - xorl 116(%esp),%esi - # v += w - add %ebx,%edi - # v <<<= 13 - rol $13,%edi - # v ^= x14 - xorl 156(%esp),%edi - # x3 = p - movl %eax,112(%esp) - # x9 = t - movl %edx,136(%esp) - # p += x2 - addl 108(%esp),%eax - # x4 = r - movl %esi,116(%esp) - # t += x8 - addl 132(%esp),%edx - # x14 = v - movl %edi,156(%esp) - # p <<<= 18 - rol $18,%eax - # p ^= x0 - xorl 100(%esp),%eax - # t <<<= 18 - rol $18,%edx - # t ^= x10 - xorl 140(%esp),%edx - # s += r - add %esi,%ecx - # s <<<= 18 - rol $18,%ecx - # s ^= x5 - xorl 120(%esp),%ecx - # w += v - add %edi,%ebx - # w <<<= 18 - rol $18,%ebx - # w ^= x15 - xorl 160(%esp),%ebx - # i -= 4 - sub $4,%ebp - # goto mainloop if unsigned > - ja ._mainloop - # x0 = p - movl %eax,100(%esp) - # x5 = s - movl %ecx,120(%esp) - # x10 = t - movl %edx,140(%esp) - # x15 = w - movl %ebx,160(%esp) - # out = out_backup - movl 72(%esp),%edi - # m = m_backup - movl 68(%esp),%esi - # in0 = x0 - movl 100(%esp),%eax - # in1 = x1 - movl 104(%esp),%ecx - # in0 += j0 - addl 164(%esp),%eax - # in1 += j1 - addl 168(%esp),%ecx - # in0 ^= *(uint32 *) (m + 0) - xorl 0(%esi),%eax - # in1 ^= *(uint32 *) (m + 4) - xorl 4(%esi),%ecx - # *(uint32 *) (out + 0) = in0 - movl %eax,0(%edi) - # *(uint32 *) (out + 4) = in1 - movl %ecx,4(%edi) - # in2 = x2 - movl 108(%esp),%eax - # in3 = x3 - movl 112(%esp),%ecx - # in2 += j2 - addl 172(%esp),%eax - # in3 += j3 - addl 176(%esp),%ecx - # in2 ^= *(uint32 *) (m + 8) - xorl 8(%esi),%eax - # in3 ^= *(uint32 *) (m + 12) - xorl 12(%esi),%ecx - # *(uint32 *) (out + 8) = in2 - movl %eax,8(%edi) - # *(uint32 *) (out + 12) = in3 - movl %ecx,12(%edi) - # in4 = x4 - movl 116(%esp),%eax - # in5 = x5 - movl 120(%esp),%ecx - # in4 += j4 - addl 180(%esp),%eax - # in5 += j5 - addl 184(%esp),%ecx - # in4 ^= *(uint32 *) (m + 16) - xorl 16(%esi),%eax - # in5 ^= *(uint32 *) (m + 20) - xorl 20(%esi),%ecx - # *(uint32 *) (out + 16) = in4 - movl %eax,16(%edi) - # *(uint32 *) (out + 20) = in5 - movl %ecx,20(%edi) - # in6 = x6 - movl 124(%esp),%eax - # in7 = x7 - movl 128(%esp),%ecx - # in6 += j6 - addl 188(%esp),%eax - # in7 += j7 - addl 192(%esp),%ecx - # in6 ^= *(uint32 *) (m + 24) - xorl 24(%esi),%eax - # in7 ^= *(uint32 *) (m + 28) - xorl 28(%esi),%ecx - # *(uint32 *) (out + 24) = in6 - movl %eax,24(%edi) - # *(uint32 *) (out + 28) = in7 - movl %ecx,28(%edi) - # in8 = x8 - movl 132(%esp),%eax - # in9 = x9 - movl 136(%esp),%ecx - # in8 += j8 - addl 196(%esp),%eax - # in9 += j9 - addl 200(%esp),%ecx - # in8 ^= *(uint32 *) (m + 32) - xorl 32(%esi),%eax - # in9 ^= *(uint32 *) (m + 36) - xorl 36(%esi),%ecx - # *(uint32 *) (out + 32) = in8 - movl %eax,32(%edi) - # *(uint32 *) (out + 36) = in9 - movl %ecx,36(%edi) - # in10 = x10 - movl 140(%esp),%eax - # in11 = x11 - movl 144(%esp),%ecx - # in10 += j10 - addl 204(%esp),%eax - # in11 += j11 - addl 208(%esp),%ecx - # in10 ^= *(uint32 *) (m + 40) - xorl 40(%esi),%eax - # in11 ^= *(uint32 *) (m + 44) - xorl 44(%esi),%ecx - # *(uint32 *) (out + 40) = in10 - movl %eax,40(%edi) - # *(uint32 *) (out + 44) = in11 - movl %ecx,44(%edi) - # in12 = x12 - movl 148(%esp),%eax - # in13 = x13 - movl 152(%esp),%ecx - # in12 += j12 - addl 212(%esp),%eax - # in13 += j13 - addl 216(%esp),%ecx - # in12 ^= *(uint32 *) (m + 48) - xorl 48(%esi),%eax - # in13 ^= *(uint32 *) (m + 52) - xorl 52(%esi),%ecx - # *(uint32 *) (out + 48) = in12 - movl %eax,48(%edi) - # *(uint32 *) (out + 52) = in13 - movl %ecx,52(%edi) - # in14 = x14 - movl 156(%esp),%eax - # in15 = x15 - movl 160(%esp),%ecx - # in14 += j14 - addl 220(%esp),%eax - # in15 += j15 - addl 224(%esp),%ecx - # in14 ^= *(uint32 *) (m + 56) - xorl 56(%esi),%eax - # in15 ^= *(uint32 *) (m + 60) - xorl 60(%esi),%ecx - # *(uint32 *) (out + 56) = in14 - movl %eax,56(%edi) - # *(uint32 *) (out + 60) = in15 - movl %ecx,60(%edi) - # bytes = bytes_backup - movl 76(%esp),%ebx - # in8 = j8 - movl 196(%esp),%eax - # in9 = j9 - movl 200(%esp),%ecx - # in8 += 1 - add $1,%eax - # in9 += 0 + carry - adc $0,%ecx - # j8 = in8 - movl %eax,196(%esp) - # j9 = in9 - movl %ecx,200(%esp) - # bytes - 64 - cmp $64,%ebx - # goto bytesatleast65 if unsigned> - ja ._bytesatleast65 - # goto bytesatleast64 if unsigned>= - jae ._bytesatleast64 - # m = out - mov %edi,%esi - # out = ctarget - movl 228(%esp),%edi - # i = bytes - mov %ebx,%ecx - # while (i) { *out++ = *m++; --i } - rep movsb -._bytesatleast64: - # x = x_backup - movl 64(%esp),%eax - # in8 = j8 - movl 196(%esp),%ecx - # in9 = j9 - movl 200(%esp),%edx - # *(uint32 *) (x + 32) = in8 - movl %ecx,32(%eax) - # *(uint32 *) (x + 36) = in9 - movl %edx,36(%eax) -._done: - # eax = eax_stack - movl 80(%esp),%eax - # ebx = ebx_stack - movl 84(%esp),%ebx - # esi = esi_stack - movl 88(%esp),%esi - # edi = edi_stack - movl 92(%esp),%edi - # ebp = ebp_stack - movl 96(%esp),%ebp - # leave - add %eax,%esp - ret -._bytesatleast65: - # bytes -= 64 - sub $64,%ebx - # out += 64 - add $64,%edi - # m += 64 - add $64,%esi - # goto bytesatleast1 - jmp ._bytesatleast1 -ENDPROC(salsa20_encrypt_bytes) - -# enter salsa20_keysetup -ENTRY(salsa20_keysetup) - mov %esp,%eax - and $31,%eax - add $256,%eax - sub %eax,%esp - # eax_stack = eax - movl %eax,64(%esp) - # ebx_stack = ebx - movl %ebx,68(%esp) - # esi_stack = esi - movl %esi,72(%esp) - # edi_stack = edi - movl %edi,76(%esp) - # ebp_stack = ebp - movl %ebp,80(%esp) - # k = arg2 - movl 8(%esp,%eax),%ecx - # kbits = arg3 - movl 12(%esp,%eax),%edx - # x = arg1 - movl 4(%esp,%eax),%eax - # in1 = *(uint32 *) (k + 0) - movl 0(%ecx),%ebx - # in2 = *(uint32 *) (k + 4) - movl 4(%ecx),%esi - # in3 = *(uint32 *) (k + 8) - movl 8(%ecx),%edi - # in4 = *(uint32 *) (k + 12) - movl 12(%ecx),%ebp - # *(uint32 *) (x + 4) = in1 - movl %ebx,4(%eax) - # *(uint32 *) (x + 8) = in2 - movl %esi,8(%eax) - # *(uint32 *) (x + 12) = in3 - movl %edi,12(%eax) - # *(uint32 *) (x + 16) = in4 - movl %ebp,16(%eax) - # kbits - 256 - cmp $256,%edx - # goto kbits128 if unsigned< - jb ._kbits128 -._kbits256: - # in11 = *(uint32 *) (k + 16) - movl 16(%ecx),%edx - # in12 = *(uint32 *) (k + 20) - movl 20(%ecx),%ebx - # in13 = *(uint32 *) (k + 24) - movl 24(%ecx),%esi - # in14 = *(uint32 *) (k + 28) - movl 28(%ecx),%ecx - # *(uint32 *) (x + 44) = in11 - movl %edx,44(%eax) - # *(uint32 *) (x + 48) = in12 - movl %ebx,48(%eax) - # *(uint32 *) (x + 52) = in13 - movl %esi,52(%eax) - # *(uint32 *) (x + 56) = in14 - movl %ecx,56(%eax) - # in0 = 1634760805 - mov $1634760805,%ecx - # in5 = 857760878 - mov $857760878,%edx - # in10 = 2036477234 - mov $2036477234,%ebx - # in15 = 1797285236 - mov $1797285236,%esi - # *(uint32 *) (x + 0) = in0 - movl %ecx,0(%eax) - # *(uint32 *) (x + 20) = in5 - movl %edx,20(%eax) - # *(uint32 *) (x + 40) = in10 - movl %ebx,40(%eax) - # *(uint32 *) (x + 60) = in15 - movl %esi,60(%eax) - # goto keysetupdone - jmp ._keysetupdone -._kbits128: - # in11 = *(uint32 *) (k + 0) - movl 0(%ecx),%edx - # in12 = *(uint32 *) (k + 4) - movl 4(%ecx),%ebx - # in13 = *(uint32 *) (k + 8) - movl 8(%ecx),%esi - # in14 = *(uint32 *) (k + 12) - movl 12(%ecx),%ecx - # *(uint32 *) (x + 44) = in11 - movl %edx,44(%eax) - # *(uint32 *) (x + 48) = in12 - movl %ebx,48(%eax) - # *(uint32 *) (x + 52) = in13 - movl %esi,52(%eax) - # *(uint32 *) (x + 56) = in14 - movl %ecx,56(%eax) - # in0 = 1634760805 - mov $1634760805,%ecx - # in5 = 824206446 - mov $824206446,%edx - # in10 = 2036477238 - mov $2036477238,%ebx - # in15 = 1797285236 - mov $1797285236,%esi - # *(uint32 *) (x + 0) = in0 - movl %ecx,0(%eax) - # *(uint32 *) (x + 20) = in5 - movl %edx,20(%eax) - # *(uint32 *) (x + 40) = in10 - movl %ebx,40(%eax) - # *(uint32 *) (x + 60) = in15 - movl %esi,60(%eax) -._keysetupdone: - # eax = eax_stack - movl 64(%esp),%eax - # ebx = ebx_stack - movl 68(%esp),%ebx - # esi = esi_stack - movl 72(%esp),%esi - # edi = edi_stack - movl 76(%esp),%edi - # ebp = ebp_stack - movl 80(%esp),%ebp - # leave - add %eax,%esp - ret -ENDPROC(salsa20_keysetup) - -# enter salsa20_ivsetup -ENTRY(salsa20_ivsetup) - mov %esp,%eax - and $31,%eax - add $256,%eax - sub %eax,%esp - # eax_stack = eax - movl %eax,64(%esp) - # ebx_stack = ebx - movl %ebx,68(%esp) - # esi_stack = esi - movl %esi,72(%esp) - # edi_stack = edi - movl %edi,76(%esp) - # ebp_stack = ebp - movl %ebp,80(%esp) - # iv = arg2 - movl 8(%esp,%eax),%ecx - # x = arg1 - movl 4(%esp,%eax),%eax - # in6 = *(uint32 *) (iv + 0) - movl 0(%ecx),%edx - # in7 = *(uint32 *) (iv + 4) - movl 4(%ecx),%ecx - # in8 = 0 - mov $0,%ebx - # in9 = 0 - mov $0,%esi - # *(uint32 *) (x + 24) = in6 - movl %edx,24(%eax) - # *(uint32 *) (x + 28) = in7 - movl %ecx,28(%eax) - # *(uint32 *) (x + 32) = in8 - movl %ebx,32(%eax) - # *(uint32 *) (x + 36) = in9 - movl %esi,36(%eax) - # eax = eax_stack - movl 64(%esp),%eax - # ebx = ebx_stack - movl 68(%esp),%ebx - # esi = esi_stack - movl 72(%esp),%esi - # edi = edi_stack - movl 76(%esp),%edi - # ebp = ebp_stack - movl 80(%esp),%ebp - # leave - add %eax,%esp - ret -ENDPROC(salsa20_ivsetup) diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S deleted file mode 100644 index 10db30d58006..000000000000 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S +++ /dev/null @@ -1,919 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include - -# enter salsa20_encrypt_bytes -ENTRY(salsa20_encrypt_bytes) - mov %rsp,%r11 - and $31,%r11 - add $256,%r11 - sub %r11,%rsp - # x = arg1 - mov %rdi,%r8 - # m = arg2 - mov %rsi,%rsi - # out = arg3 - mov %rdx,%rdi - # bytes = arg4 - mov %rcx,%rdx - # unsigned>? bytes - 0 - cmp $0,%rdx - # comment:fp stack unchanged by jump - # goto done if !unsigned> - jbe ._done - # comment:fp stack unchanged by fallthrough -# start: -._start: - # r11_stack = r11 - movq %r11,0(%rsp) - # r12_stack = r12 - movq %r12,8(%rsp) - # r13_stack = r13 - movq %r13,16(%rsp) - # r14_stack = r14 - movq %r14,24(%rsp) - # r15_stack = r15 - movq %r15,32(%rsp) - # rbx_stack = rbx - movq %rbx,40(%rsp) - # rbp_stack = rbp - movq %rbp,48(%rsp) - # in0 = *(uint64 *) (x + 0) - movq 0(%r8),%rcx - # in2 = *(uint64 *) (x + 8) - movq 8(%r8),%r9 - # in4 = *(uint64 *) (x + 16) - movq 16(%r8),%rax - # in6 = *(uint64 *) (x + 24) - movq 24(%r8),%r10 - # in8 = *(uint64 *) (x + 32) - movq 32(%r8),%r11 - # in10 = *(uint64 *) (x + 40) - movq 40(%r8),%r12 - # in12 = *(uint64 *) (x + 48) - movq 48(%r8),%r13 - # in14 = *(uint64 *) (x + 56) - movq 56(%r8),%r14 - # j0 = in0 - movq %rcx,56(%rsp) - # j2 = in2 - movq %r9,64(%rsp) - # j4 = in4 - movq %rax,72(%rsp) - # j6 = in6 - movq %r10,80(%rsp) - # j8 = in8 - movq %r11,88(%rsp) - # j10 = in10 - movq %r12,96(%rsp) - # j12 = in12 - movq %r13,104(%rsp) - # j14 = in14 - movq %r14,112(%rsp) - # x_backup = x - movq %r8,120(%rsp) -# bytesatleast1: -._bytesatleast1: - # unsigned>= 32 - shr $32,%rdi - # x3 = j2 - movq 64(%rsp),%rsi - # x2 = x3 - mov %rsi,%rcx - # (uint64) x3 >>= 32 - shr $32,%rsi - # x5 = j4 - movq 72(%rsp),%r8 - # x4 = x5 - mov %r8,%r9 - # (uint64) x5 >>= 32 - shr $32,%r8 - # x5_stack = x5 - movq %r8,160(%rsp) - # x7 = j6 - movq 80(%rsp),%r8 - # x6 = x7 - mov %r8,%rax - # (uint64) x7 >>= 32 - shr $32,%r8 - # x9 = j8 - movq 88(%rsp),%r10 - # x8 = x9 - mov %r10,%r11 - # (uint64) x9 >>= 32 - shr $32,%r10 - # x11 = j10 - movq 96(%rsp),%r12 - # x10 = x11 - mov %r12,%r13 - # x10_stack = x10 - movq %r13,168(%rsp) - # (uint64) x11 >>= 32 - shr $32,%r12 - # x13 = j12 - movq 104(%rsp),%r13 - # x12 = x13 - mov %r13,%r14 - # (uint64) x13 >>= 32 - shr $32,%r13 - # x15 = j14 - movq 112(%rsp),%r15 - # x14 = x15 - mov %r15,%rbx - # (uint64) x15 >>= 32 - shr $32,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # i = 20 - mov $20,%r15 -# mainloop: -._mainloop: - # i_backup = i - movq %r15,184(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x12 + x0 - lea (%r14,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x4 ^= a - xor %rbp,%r9 - # b = x1 + x5 - lea (%rdi,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x9 ^= b - xor %rbp,%r10 - # a = x0 + x4 - lea (%rdx,%r9),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x8 ^= a - xor %rbp,%r11 - # b = x5 + x9 - lea (%r15,%r10),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x13 ^= b - xor %rbp,%r13 - # a = x4 + x8 - lea (%r9,%r11),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x12 ^= a - xor %rbp,%r14 - # b = x9 + x13 - lea (%r10,%r13),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x1 ^= b - xor %rbp,%rdi - # a = x8 + x12 - lea (%r11,%r14),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x13 + x1 - lea (%r13,%rdi),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x6 + x10 - lea (%rax,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x14 ^= c - xor %r15,%rbx - # c = x10 + x14 - lea (%rbp,%rbx),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x2 ^= c - xor %r15,%rcx - # c = x14 + x2 - lea (%rbx,%rcx),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x6 ^= c - xor %r15,%rax - # c = x2 + x6 - lea (%rcx,%rax),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x11 + x15 - lea (%r12,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x3 ^= d - xor %rbp,%rsi - # d = x15 + x3 - lea (%r15,%rsi),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x7 ^= d - xor %rbp,%r8 - # d = x3 + x7 - lea (%rsi,%r8),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x11 ^= d - xor %rbp,%r12 - # d = x7 + x11 - lea (%r8,%r12),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x3 + x0 - lea (%rsi,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x1 ^= a - xor %rbp,%rdi - # b = x4 + x5 - lea (%r9,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x6 ^= b - xor %rbp,%rax - # a = x0 + x1 - lea (%rdx,%rdi),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x2 ^= a - xor %rbp,%rcx - # b = x5 + x6 - lea (%r15,%rax),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x7 ^= b - xor %rbp,%r8 - # a = x1 + x2 - lea (%rdi,%rcx),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x3 ^= a - xor %rbp,%rsi - # b = x6 + x7 - lea (%rax,%r8),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x4 ^= b - xor %rbp,%r9 - # a = x2 + x3 - lea (%rcx,%rsi),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x7 + x4 - lea (%r8,%r9),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x9 + x10 - lea (%r10,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x11 ^= c - xor %r15,%r12 - # c = x10 + x11 - lea (%rbp,%r12),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x8 ^= c - xor %r15,%r11 - # c = x11 + x8 - lea (%r12,%r11),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x9 ^= c - xor %r15,%r10 - # c = x8 + x9 - lea (%r11,%r10),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x14 + x15 - lea (%rbx,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x12 ^= d - xor %rbp,%r14 - # d = x15 + x12 - lea (%r15,%r14),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x13 ^= d - xor %rbp,%r13 - # d = x12 + x13 - lea (%r14,%r13),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x14 ^= d - xor %rbp,%rbx - # d = x13 + x14 - lea (%r13,%rbx),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x12 + x0 - lea (%r14,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x4 ^= a - xor %rbp,%r9 - # b = x1 + x5 - lea (%rdi,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x9 ^= b - xor %rbp,%r10 - # a = x0 + x4 - lea (%rdx,%r9),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x8 ^= a - xor %rbp,%r11 - # b = x5 + x9 - lea (%r15,%r10),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x13 ^= b - xor %rbp,%r13 - # a = x4 + x8 - lea (%r9,%r11),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x12 ^= a - xor %rbp,%r14 - # b = x9 + x13 - lea (%r10,%r13),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x1 ^= b - xor %rbp,%rdi - # a = x8 + x12 - lea (%r11,%r14),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x13 + x1 - lea (%r13,%rdi),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x6 + x10 - lea (%rax,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x14 ^= c - xor %r15,%rbx - # c = x10 + x14 - lea (%rbp,%rbx),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x2 ^= c - xor %r15,%rcx - # c = x14 + x2 - lea (%rbx,%rcx),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x6 ^= c - xor %r15,%rax - # c = x2 + x6 - lea (%rcx,%rax),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x11 + x15 - lea (%r12,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x3 ^= d - xor %rbp,%rsi - # d = x15 + x3 - lea (%r15,%rsi),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x7 ^= d - xor %rbp,%r8 - # d = x3 + x7 - lea (%rsi,%r8),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x11 ^= d - xor %rbp,%r12 - # d = x7 + x11 - lea (%r8,%r12),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # x5 = x5_stack - movq 160(%rsp),%r15 - # a = x3 + x0 - lea (%rsi,%rdx),%rbp - # (uint32) a <<<= 7 - rol $7,%ebp - # x1 ^= a - xor %rbp,%rdi - # b = x4 + x5 - lea (%r9,%r15),%rbp - # (uint32) b <<<= 7 - rol $7,%ebp - # x6 ^= b - xor %rbp,%rax - # a = x0 + x1 - lea (%rdx,%rdi),%rbp - # (uint32) a <<<= 9 - rol $9,%ebp - # x2 ^= a - xor %rbp,%rcx - # b = x5 + x6 - lea (%r15,%rax),%rbp - # (uint32) b <<<= 9 - rol $9,%ebp - # x7 ^= b - xor %rbp,%r8 - # a = x1 + x2 - lea (%rdi,%rcx),%rbp - # (uint32) a <<<= 13 - rol $13,%ebp - # x3 ^= a - xor %rbp,%rsi - # b = x6 + x7 - lea (%rax,%r8),%rbp - # (uint32) b <<<= 13 - rol $13,%ebp - # x4 ^= b - xor %rbp,%r9 - # a = x2 + x3 - lea (%rcx,%rsi),%rbp - # (uint32) a <<<= 18 - rol $18,%ebp - # x0 ^= a - xor %rbp,%rdx - # b = x7 + x4 - lea (%r8,%r9),%rbp - # (uint32) b <<<= 18 - rol $18,%ebp - # x5 ^= b - xor %rbp,%r15 - # x10 = x10_stack - movq 168(%rsp),%rbp - # x5_stack = x5 - movq %r15,160(%rsp) - # c = x9 + x10 - lea (%r10,%rbp),%r15 - # (uint32) c <<<= 7 - rol $7,%r15d - # x11 ^= c - xor %r15,%r12 - # c = x10 + x11 - lea (%rbp,%r12),%r15 - # (uint32) c <<<= 9 - rol $9,%r15d - # x8 ^= c - xor %r15,%r11 - # c = x11 + x8 - lea (%r12,%r11),%r15 - # (uint32) c <<<= 13 - rol $13,%r15d - # x9 ^= c - xor %r15,%r10 - # c = x8 + x9 - lea (%r11,%r10),%r15 - # (uint32) c <<<= 18 - rol $18,%r15d - # x10 ^= c - xor %r15,%rbp - # x15 = x15_stack - movq 176(%rsp),%r15 - # x10_stack = x10 - movq %rbp,168(%rsp) - # d = x14 + x15 - lea (%rbx,%r15),%rbp - # (uint32) d <<<= 7 - rol $7,%ebp - # x12 ^= d - xor %rbp,%r14 - # d = x15 + x12 - lea (%r15,%r14),%rbp - # (uint32) d <<<= 9 - rol $9,%ebp - # x13 ^= d - xor %rbp,%r13 - # d = x12 + x13 - lea (%r14,%r13),%rbp - # (uint32) d <<<= 13 - rol $13,%ebp - # x14 ^= d - xor %rbp,%rbx - # d = x13 + x14 - lea (%r13,%rbx),%rbp - # (uint32) d <<<= 18 - rol $18,%ebp - # x15 ^= d - xor %rbp,%r15 - # x15_stack = x15 - movq %r15,176(%rsp) - # i = i_backup - movq 184(%rsp),%r15 - # unsigned>? i -= 4 - sub $4,%r15 - # comment:fp stack unchanged by jump - # goto mainloop if unsigned> - ja ._mainloop - # (uint32) x2 += j2 - addl 64(%rsp),%ecx - # x3 <<= 32 - shl $32,%rsi - # x3 += j2 - addq 64(%rsp),%rsi - # (uint64) x3 >>= 32 - shr $32,%rsi - # x3 <<= 32 - shl $32,%rsi - # x2 += x3 - add %rsi,%rcx - # (uint32) x6 += j6 - addl 80(%rsp),%eax - # x7 <<= 32 - shl $32,%r8 - # x7 += j6 - addq 80(%rsp),%r8 - # (uint64) x7 >>= 32 - shr $32,%r8 - # x7 <<= 32 - shl $32,%r8 - # x6 += x7 - add %r8,%rax - # (uint32) x8 += j8 - addl 88(%rsp),%r11d - # x9 <<= 32 - shl $32,%r10 - # x9 += j8 - addq 88(%rsp),%r10 - # (uint64) x9 >>= 32 - shr $32,%r10 - # x9 <<= 32 - shl $32,%r10 - # x8 += x9 - add %r10,%r11 - # (uint32) x12 += j12 - addl 104(%rsp),%r14d - # x13 <<= 32 - shl $32,%r13 - # x13 += j12 - addq 104(%rsp),%r13 - # (uint64) x13 >>= 32 - shr $32,%r13 - # x13 <<= 32 - shl $32,%r13 - # x12 += x13 - add %r13,%r14 - # (uint32) x0 += j0 - addl 56(%rsp),%edx - # x1 <<= 32 - shl $32,%rdi - # x1 += j0 - addq 56(%rsp),%rdi - # (uint64) x1 >>= 32 - shr $32,%rdi - # x1 <<= 32 - shl $32,%rdi - # x0 += x1 - add %rdi,%rdx - # x5 = x5_stack - movq 160(%rsp),%rdi - # (uint32) x4 += j4 - addl 72(%rsp),%r9d - # x5 <<= 32 - shl $32,%rdi - # x5 += j4 - addq 72(%rsp),%rdi - # (uint64) x5 >>= 32 - shr $32,%rdi - # x5 <<= 32 - shl $32,%rdi - # x4 += x5 - add %rdi,%r9 - # x10 = x10_stack - movq 168(%rsp),%r8 - # (uint32) x10 += j10 - addl 96(%rsp),%r8d - # x11 <<= 32 - shl $32,%r12 - # x11 += j10 - addq 96(%rsp),%r12 - # (uint64) x11 >>= 32 - shr $32,%r12 - # x11 <<= 32 - shl $32,%r12 - # x10 += x11 - add %r12,%r8 - # x15 = x15_stack - movq 176(%rsp),%rdi - # (uint32) x14 += j14 - addl 112(%rsp),%ebx - # x15 <<= 32 - shl $32,%rdi - # x15 += j14 - addq 112(%rsp),%rdi - # (uint64) x15 >>= 32 - shr $32,%rdi - # x15 <<= 32 - shl $32,%rdi - # x14 += x15 - add %rdi,%rbx - # out = out_backup - movq 136(%rsp),%rdi - # m = m_backup - movq 144(%rsp),%rsi - # x0 ^= *(uint64 *) (m + 0) - xorq 0(%rsi),%rdx - # *(uint64 *) (out + 0) = x0 - movq %rdx,0(%rdi) - # x2 ^= *(uint64 *) (m + 8) - xorq 8(%rsi),%rcx - # *(uint64 *) (out + 8) = x2 - movq %rcx,8(%rdi) - # x4 ^= *(uint64 *) (m + 16) - xorq 16(%rsi),%r9 - # *(uint64 *) (out + 16) = x4 - movq %r9,16(%rdi) - # x6 ^= *(uint64 *) (m + 24) - xorq 24(%rsi),%rax - # *(uint64 *) (out + 24) = x6 - movq %rax,24(%rdi) - # x8 ^= *(uint64 *) (m + 32) - xorq 32(%rsi),%r11 - # *(uint64 *) (out + 32) = x8 - movq %r11,32(%rdi) - # x10 ^= *(uint64 *) (m + 40) - xorq 40(%rsi),%r8 - # *(uint64 *) (out + 40) = x10 - movq %r8,40(%rdi) - # x12 ^= *(uint64 *) (m + 48) - xorq 48(%rsi),%r14 - # *(uint64 *) (out + 48) = x12 - movq %r14,48(%rdi) - # x14 ^= *(uint64 *) (m + 56) - xorq 56(%rsi),%rbx - # *(uint64 *) (out + 56) = x14 - movq %rbx,56(%rdi) - # bytes = bytes_backup - movq 152(%rsp),%rdx - # in8 = j8 - movq 88(%rsp),%rcx - # in8 += 1 - add $1,%rcx - # j8 = in8 - movq %rcx,88(%rsp) - # unsigned>? unsigned - ja ._bytesatleast65 - # comment:fp stack unchanged by jump - # goto bytesatleast64 if !unsigned< - jae ._bytesatleast64 - # m = out - mov %rdi,%rsi - # out = ctarget - movq 128(%rsp),%rdi - # i = bytes - mov %rdx,%rcx - # while (i) { *out++ = *m++; --i } - rep movsb - # comment:fp stack unchanged by fallthrough -# bytesatleast64: -._bytesatleast64: - # x = x_backup - movq 120(%rsp),%rdi - # in8 = j8 - movq 88(%rsp),%rsi - # *(uint64 *) (x + 32) = in8 - movq %rsi,32(%rdi) - # r11 = r11_stack - movq 0(%rsp),%r11 - # r12 = r12_stack - movq 8(%rsp),%r12 - # r13 = r13_stack - movq 16(%rsp),%r13 - # r14 = r14_stack - movq 24(%rsp),%r14 - # r15 = r15_stack - movq 32(%rsp),%r15 - # rbx = rbx_stack - movq 40(%rsp),%rbx - # rbp = rbp_stack - movq 48(%rsp),%rbp - # comment:fp stack unchanged by fallthrough -# done: -._done: - # leave - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx - ret -# bytesatleast65: -._bytesatleast65: - # bytes -= 64 - sub $64,%rdx - # out += 64 - add $64,%rdi - # m += 64 - add $64,%rsi - # comment:fp stack unchanged by jump - # goto bytesatleast1 - jmp ._bytesatleast1 -ENDPROC(salsa20_encrypt_bytes) - -# enter salsa20_keysetup -ENTRY(salsa20_keysetup) - mov %rsp,%r11 - and $31,%r11 - add $256,%r11 - sub %r11,%rsp - # k = arg2 - mov %rsi,%rsi - # kbits = arg3 - mov %rdx,%rdx - # x = arg1 - mov %rdi,%rdi - # in0 = *(uint64 *) (k + 0) - movq 0(%rsi),%r8 - # in2 = *(uint64 *) (k + 8) - movq 8(%rsi),%r9 - # *(uint64 *) (x + 4) = in0 - movq %r8,4(%rdi) - # *(uint64 *) (x + 12) = in2 - movq %r9,12(%rdi) - # unsigned - * - * The assembly codes are public domain assembly codes written by Daniel. J. - * Bernstein . The codes are modified to include indentation - * and to remove extraneous comments and functions that are not needed. - * - i586 version, renamed as salsa20-i586-asm_32.S - * available from - * - x86-64 version, renamed as salsa20-x86_64-asm_64.S - * available from - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include -#include -#include - -#define SALSA20_IV_SIZE 8U -#define SALSA20_MIN_KEY_SIZE 16U -#define SALSA20_MAX_KEY_SIZE 32U - -struct salsa20_ctx -{ - u32 input[16]; -}; - -asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, - u32 keysize, u32 ivsize); -asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv); -asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, - const u8 *src, u8 *dst, u32 bytes); - -static int setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keysize) -{ - struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); - salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8); - return 0; -} - -static int encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, 64); - - salsa20_ivsetup(ctx, walk.iv); - - if (likely(walk.nbytes == nbytes)) - { - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, - walk.dst.virt.addr, nbytes); - return blkcipher_walk_done(desc, &walk, 0); - } - - while (walk.nbytes >= 64) { - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, - walk.dst.virt.addr, - walk.nbytes - (walk.nbytes % 64)); - err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); - } - - if (walk.nbytes) { - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, - walk.dst.virt.addr, walk.nbytes); - err = blkcipher_walk_done(desc, &walk, 0); - } - - return err; -} - -static struct crypto_alg alg = { - .cra_name = "salsa20", - .cra_driver_name = "salsa20-asm", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_type = &crypto_blkcipher_type, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct salsa20_ctx), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .setkey = setkey, - .encrypt = encrypt, - .decrypt = encrypt, - .min_keysize = SALSA20_MIN_KEY_SIZE, - .max_keysize = SALSA20_MAX_KEY_SIZE, - .ivsize = SALSA20_IV_SIZE, - } - } -}; - -static int __init init(void) -{ - return crypto_register_alg(&alg); -} - -static void __exit fini(void) -{ - crypto_unregister_alg(&alg); -} - -module_init(init); -module_exit(fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); -MODULE_ALIAS_CRYPTO("salsa20"); -MODULE_ALIAS_CRYPTO("salsa20-asm"); diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S index 16c4ccb1f154..d2364c55bbde 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S @@ -265,7 +265,7 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2) vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - vmovd _args_digest(state , idx, 4) , %xmm0 + vmovd _args_digest+4*32(state, idx, 4), %xmm1 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c index 36870b26067a..d08805032f01 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c @@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state) { unsigned int j; - state->lens[0] = 0; - state->lens[1] = 1; - state->lens[2] = 2; - state->lens[3] = 3; + /* initially all lanes are unused */ + state->lens[0] = 0xFFFFFFFF00000000; + state->lens[1] = 0xFFFFFFFF00000001; + state->lens[2] = 0xFFFFFFFF00000002; + state->lens[3] = 0xFFFFFFFF00000003; + state->unused_lanes = 0xFF03020100; for (j = 0; j < 4; j++) state->ldata[j].job_in_lane = NULL; diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index 1c3b7ceb36d2..e7273a606a07 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -55,29 +55,31 @@ #define RAB1bl %bl #define RAB2bl %cl +#define CD0 0x0(%rsp) +#define CD1 0x8(%rsp) +#define CD2 0x10(%rsp) + +# used only before/after all rounds #define RCD0 %r8 #define RCD1 %r9 #define RCD2 %r10 -#define RCD0d %r8d -#define RCD1d %r9d -#define RCD2d %r10d - -#define RX0 %rbp -#define RX1 %r11 -#define RX2 %r12 +# used only during rounds +#define RX0 %r8 +#define RX1 %r9 +#define RX2 %r10 -#define RX0d %ebp -#define RX1d %r11d -#define RX2d %r12d +#define RX0d %r8d +#define RX1d %r9d +#define RX2d %r10d -#define RY0 %r13 -#define RY1 %r14 -#define RY2 %r15 +#define RY0 %r11 +#define RY1 %r12 +#define RY2 %r13 -#define RY0d %r13d -#define RY1d %r14d -#define RY2d %r15d +#define RY0d %r11d +#define RY1d %r12d +#define RY2d %r13d #define RT0 %rdx #define RT1 %rsi @@ -85,6 +87,8 @@ #define RT0d %edx #define RT1d %esi +#define RT1bl %sil + #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ movzbl ab ## bl, tmp2 ## d; \ movzbl ab ## bh, tmp1 ## d; \ @@ -92,6 +96,11 @@ op1##l T0(CTX, tmp2, 4), dst ## d; \ op2##l T1(CTX, tmp1, 4), dst ## d; +#define swap_ab_with_cd(ab, cd, tmp) \ + movq cd, tmp; \ + movq ab, cd; \ + movq tmp, ab; + /* * Combined G1 & G2 function. Reordered with help of rotates to have moves * at begining. @@ -110,15 +119,15 @@ /* G1,2 && G2,2 */ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ - xchgq cd ## 0, ab ## 0; \ + swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ - xchgq cd ## 1, ab ## 1; \ + swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ \ do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ - xchgq cd ## 2, ab ## 2; + swap_ab_with_cd(ab ## 2, cd ## 2, RT0); #define enc_round_end(ab, x, y, n) \ addl y ## d, x ## d; \ @@ -168,6 +177,16 @@ decrypt_round3(ba, dc, (n*2)+1); \ decrypt_round3(ba, dc, (n*2)); +#define push_cd() \ + pushq RCD2; \ + pushq RCD1; \ + pushq RCD0; + +#define pop_cd() \ + popq RCD0; \ + popq RCD1; \ + popq RCD2; + #define inpack3(in, n, xy, m) \ movq 4*(n)(in), xy ## 0; \ xorq w+4*m(CTX), xy ## 0; \ @@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way) * %rdx: src, RIO * %rcx: bool, if true: xor output */ - pushq %r15; - pushq %r14; pushq %r13; pushq %r12; - pushq %rbp; pushq %rbx; pushq %rcx; /* bool xor */ @@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way) inpack_enc3(); - encrypt_cycle3(RAB, RCD, 0); - encrypt_cycle3(RAB, RCD, 1); - encrypt_cycle3(RAB, RCD, 2); - encrypt_cycle3(RAB, RCD, 3); - encrypt_cycle3(RAB, RCD, 4); - encrypt_cycle3(RAB, RCD, 5); - encrypt_cycle3(RAB, RCD, 6); - encrypt_cycle3(RAB, RCD, 7); + push_cd(); + encrypt_cycle3(RAB, CD, 0); + encrypt_cycle3(RAB, CD, 1); + encrypt_cycle3(RAB, CD, 2); + encrypt_cycle3(RAB, CD, 3); + encrypt_cycle3(RAB, CD, 4); + encrypt_cycle3(RAB, CD, 5); + encrypt_cycle3(RAB, CD, 6); + encrypt_cycle3(RAB, CD, 7); + pop_cd(); popq RIO; /* dst */ - popq %rbp; /* bool xor */ + popq RT1; /* bool xor */ - testb %bpl, %bpl; + testb RT1bl, RT1bl; jnz .L__enc_xor3; outunpack_enc3(mov); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; .L__enc_xor3: outunpack_enc3(xor); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; ENDPROC(__twofish_enc_blk_3way) @@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way) * %rsi: dst * %rdx: src, RIO */ - pushq %r15; - pushq %r14; pushq %r13; pushq %r12; - pushq %rbp; pushq %rbx; pushq %rsi; /* dst */ inpack_dec3(); - decrypt_cycle3(RAB, RCD, 7); - decrypt_cycle3(RAB, RCD, 6); - decrypt_cycle3(RAB, RCD, 5); - decrypt_cycle3(RAB, RCD, 4); - decrypt_cycle3(RAB, RCD, 3); - decrypt_cycle3(RAB, RCD, 2); - decrypt_cycle3(RAB, RCD, 1); - decrypt_cycle3(RAB, RCD, 0); + push_cd(); + decrypt_cycle3(RAB, CD, 7); + decrypt_cycle3(RAB, CD, 6); + decrypt_cycle3(RAB, CD, 5); + decrypt_cycle3(RAB, CD, 4); + decrypt_cycle3(RAB, CD, 3); + decrypt_cycle3(RAB, CD, 2); + decrypt_cycle3(RAB, CD, 1); + decrypt_cycle3(RAB, CD, 0); + pop_cd(); popq RIO; /* dst */ outunpack_dec3(); popq %rbx; - popq %rbp; popq %r12; popq %r13; - popq %r14; - popq %r15; ret; ENDPROC(twofish_dec_blk_3way) diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 6e160031cfea..5d10b7a85cad 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -1,6 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include +#include +#include +#include +#include +#include /* @@ -92,111 +97,78 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro ALLOC_PT_GPREGS_ON_STACK - addq $-(15*8), %rsp - .endm - - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 - .if \r11 - movq %r11, 6*8+\offset(%rsp) - .endif - .if \r8910 - movq %r10, 7*8+\offset(%rsp) - movq %r9, 8*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - .endif - .if \rax - movq %rax, 10*8+\offset(%rsp) - .endif - .if \rcx - movq %rcx, 11*8+\offset(%rsp) - .endif - movq %rdx, 12*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) - movq %rdi, 14*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - .macro SAVE_C_REGS offset=0 - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_R891011 - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RCX_R891011 - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 - .endm - - .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) - movq %rbx, 5*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset - .endm - - .macro RESTORE_EXTRA_REGS offset=0 - movq 0*8+\offset(%rsp), %r15 - movq 1*8+\offset(%rsp), %r14 - movq 2*8+\offset(%rsp), %r13 - movq 3*8+\offset(%rsp), %r12 - movq 4*8+\offset(%rsp), %rbp - movq 5*8+\offset(%rsp), %rbx - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - - .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 - .if \rstor_r11 - movq 6*8(%rsp), %r11 +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 + /* + * Push registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ + .if \save_ret + pushq %rsi /* pt_regs->si */ + movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ + movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ + .else + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ .endif - .if \rstor_r8910 - movq 7*8(%rsp), %r10 - movq 8*8(%rsp), %r9 - movq 9*8(%rsp), %r8 + pushq \rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq \rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ + xorl %r8d, %r8d /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ + xorl %r9d, %r9d /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ + xorl %r10d, %r10d /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ + xorl %r11d, %r11d /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ + xorl %r12d, %r12d /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ + xorl %r13d, %r13d /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ + xorl %r14d, %r14d /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ + xorl %r15d, %r15d /* nospec r15*/ + UNWIND_HINT_REGS + .if \save_ret + pushq %rsi /* return address on top of stack */ .endif - .if \rstor_rax - movq 10*8(%rsp), %rax +.endm + +.macro POP_REGS pop_rdi=1 skip_r11rcx=0 + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %rbp + popq %rbx + .if \skip_r11rcx + popq %rsi + .else + popq %r11 .endif - .if \rstor_rcx - movq 11*8(%rsp), %rcx + popq %r10 + popq %r9 + popq %r8 + popq %rax + .if \skip_r11rcx + popq %rsi + .else + popq %rcx .endif - .if \rstor_rdx - movq 12*8(%rsp), %rdx + popq %rdx + popq %rsi + .if \pop_rdi + popq %rdi .endif - movq 13*8(%rsp), %rsi - movq 14*8(%rsp), %rdi - UNWIND_HINT_IRET_REGS offset=16*8 - .endm - .macro RESTORE_C_REGS - RESTORE_C_REGS_HELPER 1,1,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RAX - RESTORE_C_REGS_HELPER 0,1,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RCX - RESTORE_C_REGS_HELPER 1,0,1,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_R11 - RESTORE_C_REGS_HELPER 1,1,0,1,1 - .endm - .macro RESTORE_C_REGS_EXCEPT_RCX_R11 - RESTORE_C_REGS_HELPER 1,0,0,1,1 - .endm - - .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 - subq $-(15*8+\addskip), %rsp - .endm - - .macro icebp - .byte 0xf1 - .endm +.endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The @@ -204,7 +176,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 @@ -218,6 +190,148 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm +#ifdef CONFIG_PAGE_TABLE_ISOLATION + +/* + * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two + * halves: + */ +#define PTI_USER_PGTABLE_BIT PAGE_SHIFT +#define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) +#define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT +#define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) +#define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) + +.macro SET_NOFLUSH_BIT reg:req + bts $X86_CR3_PCID_NOFLUSH_BIT, \reg +.endm + +.macro ADJUST_KERNEL_CR3 reg:req + ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID + /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ + andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg +.endm + +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + mov %cr3, \scratch_reg + ADJUST_KERNEL_CR3 \scratch_reg + mov \scratch_reg, %cr3 +.Lend_\@: +.endm + +#define THIS_CPU_user_pcid_flush_mask \ + PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask + +.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + mov %cr3, \scratch_reg + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * Test if the ASID needs a flush. + */ + movq \scratch_reg, \scratch_reg2 + andq $(0x7FF), \scratch_reg /* mask ASID */ + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + /* Flush needed, clear the bit */ + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + movq \scratch_reg2, \scratch_reg + jmp .Lwrcr3_pcid_\@ + +.Lnoflush_\@: + movq \scratch_reg2, \scratch_reg + SET_NOFLUSH_BIT \scratch_reg + +.Lwrcr3_pcid_\@: + /* Flip the ASID to the user version */ + orq $(PTI_USER_PCID_MASK), \scratch_reg + +.Lwrcr3_\@: + /* Flip the PGD to the user version */ + orq $(PTI_USER_PGTABLE_MASK), \scratch_reg + mov \scratch_reg, %cr3 +.Lend_\@: +.endm + +.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req + pushq %rax + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax + popq %rax +.endm + +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI + movq %cr3, \scratch_reg + movq \scratch_reg, \save_reg + /* + * Test the user pagetable bit. If set, then the user page tables + * are active. If clear CR3 already has the kernel page table + * active. + */ + bt $PTI_USER_PGTABLE_BIT, \scratch_reg + jnc .Ldone_\@ + + ADJUST_KERNEL_CR3 \scratch_reg + movq \scratch_reg, %cr3 + +.Ldone_\@: +.endm + +.macro RESTORE_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * KERNEL pages can always resume with NOFLUSH as we do + * explicit flushes. + */ + bt $PTI_USER_PGTABLE_BIT, \save_reg + jnc .Lnoflush_\@ + + /* + * Check if there's a pending flush for the user ASID we're + * about to set. + */ + movq \save_reg, \scratch_reg + andq $(0x7FF), \scratch_reg + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + jmp .Lwrcr3_\@ + +.Lnoflush_\@: + SET_NOFLUSH_BIT \save_reg + +.Lwrcr3_\@: + /* + * The CR3 write could be avoided when not changing its value, + * but would require a CR3 read *and* a scratch register. + */ + movq \save_reg, %cr3 +.Lend_\@: +.endm + +#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ + +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req +.endm +.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req +.endm +.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req +.endm +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req +.endm +.macro RESTORE_CR3 scratch_reg:req save_reg:req +.endm + +#endif + #endif /* CONFIG_X86_64 */ /* diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 03505ffbe1b6..60e21ccfb6d6 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -208,7 +209,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) * special case only applies after poking regs and before the * very next return to user mode. */ - current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED); + ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); #endif user_enter_irqoff(); @@ -284,7 +285,8 @@ __visible void do_syscall_64(struct pt_regs *regs) * regs->orig_ax, which changes the behavior of some syscalls. */ if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { - regs->ax = sys_call_table[nr & __SYSCALL_MASK]( + nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls); + regs->ax = sys_call_table[nr]( regs->di, regs->si, regs->dx, regs->r10, regs->r8, regs->r9); } @@ -306,7 +308,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) unsigned int nr = (unsigned int)regs->orig_ax; #ifdef CONFIG_IA32_EMULATION - current->thread.status |= TS_COMPAT; + ti->status |= TS_COMPAT; #endif if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { @@ -320,6 +322,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) } if (likely(nr < IA32_NR_syscalls)) { + nr = array_index_nospec(nr, IA32_NR_syscalls); /* * It's possible that a 32-bit syscall implementation * takes a 64-bit parameter but nonetheless assumes that diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 4838037f97f6..60c4c342316c 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,6 +44,7 @@ #include #include #include +#include .section .entry.text, "ax" @@ -243,6 +244,17 @@ ENTRY(__switch_to_asm) movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif +#ifdef CONFIG_RETPOLINE + /* + * When switching from a shallower to a deeper call stack + * the RSB may either underflow or use entries populated + * with userspace addresses. On CPUs where those concerns + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ + FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW +#endif + /* restore callee-saved registers */ popl %esi popl %edi @@ -290,7 +302,7 @@ ENTRY(ret_from_fork) /* kernel thread */ 1: movl %edi, %eax - call *%ebx + CALL_NOSPEC %ebx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -919,7 +931,7 @@ common_exception: movl %ecx, %es TRACE_IRQS_OFF movl %esp, %eax # pt_regs pointer - call *%edi + CALL_NOSPEC %edi jmp ret_from_exception END(common_exception) @@ -941,9 +953,10 @@ ENTRY(debug) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + movl PER_CPU_VAR(cpu_entry_area), %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Ldebug_from_sysenter_stack TRACE_IRQS_OFF @@ -984,9 +997,10 @@ ENTRY(nmi) movl %esp, %eax # pt_regs pointer /* Are we currently on the SYSENTER stack? */ - PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + movl PER_CPU_VAR(cpu_entry_area), %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index bcfc5668dcb2..0fae7096ae23 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -23,7 +23,6 @@ #include #include #include -#include "calling.h" #include #include #include @@ -38,8 +37,11 @@ #include #include #include +#include #include +#include "calling.h" + .code64 .section .entry.text, "ax" @@ -136,6 +138,67 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ + .pushsection .entry_trampoline, "ax" + +/* + * The code in here gets remapped into cpu_entry_area's trampoline. This means + * that the assembler and linker have the wrong idea as to where this code + * lives (and, in fact, it's mapped more than once, so it's not even at a + * fixed address). So we can't reference any symbols outside the entry + * trampoline and expect it to work. + * + * Instead, we carefully abuse %rip-relative addressing. + * _entry_trampoline(%rip) refers to the start of the remapped) entry + * trampoline. We can thus find cpu_entry_area with this macro: + */ + +#define CPU_ENTRY_AREA \ + _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) + +/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ +#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ + SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA + +ENTRY(entry_SYSCALL_64_trampoline) + UNWIND_HINT_EMPTY + swapgs + + /* Stash the user RSP. */ + movq %rsp, RSP_SCRATCH + + /* Note: using %rsp as a scratch reg. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + /* Load the top of the task stack into RSP */ + movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp + + /* Start building the simulated IRET frame. */ + pushq $__USER_DS /* pt_regs->ss */ + pushq RSP_SCRATCH /* pt_regs->sp */ + pushq %r11 /* pt_regs->flags */ + pushq $__USER_CS /* pt_regs->cs */ + pushq %rcx /* pt_regs->ip */ + + /* + * x86 lacks a near absolute jump, and we can't jump to the real + * entry text with a relative jump. We could push the target + * address and then use retq, but this destroys the pipeline on + * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, + * spill RDI and restore it in a second-stage trampoline. + */ + pushq %rdi + movq $entry_SYSCALL_64_stage2, %rdi + JMP_NOSPEC %rdi +END(entry_SYSCALL_64_trampoline) + + .popsection + +ENTRY(entry_SYSCALL_64_stage2) + UNWIND_HINT_EMPTY + popq %rdi + jmp entry_SYSCALL_64_after_hwframe +END(entry_SYSCALL_64_stage2) + ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* @@ -145,11 +208,13 @@ ENTRY(entry_SYSCALL_64) */ swapgs + /* + * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it + * is not required to switch CR3. + */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - TRACE_IRQS_OFF - /* Construct struct pt_regs on stack */ pushq $__USER_DS /* pt_regs->ss */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ @@ -158,105 +223,27 @@ ENTRY(entry_SYSCALL_64) pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq %rdx /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq $-ENOSYS /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - pushq %r11 /* pt_regs->r11 */ - sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ - UNWIND_HINT_REGS extra=0 - /* - * If we need to do entry work or if we guess we'll need to do - * exit work, go straight to the slow path. - */ - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz entry_SYSCALL64_slow_path + PUSH_AND_CLEAR_REGS rax=$-ENOSYS -entry_SYSCALL_64_fastpath: - /* - * Easy case: enable interrupts and issue the syscall. If the syscall - * needs pt_regs, we'll call a stub that disables interrupts again - * and jumps to the slow path. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) -#if __SYSCALL_MASK == ~0 - cmpq $__NR_syscall_max, %rax -#else - andl $__SYSCALL_MASK, %eax - cmpl $__NR_syscall_max, %eax -#endif - ja 1f /* return -ENOSYS (already in pt_regs->ax) */ - movq %r10, %rcx - - /* - * This call instruction is handled specially in stub_ptregs_64. - * It might end up jumping to the slow path. If it jumps, RAX - * and all argument registers are clobbered. - */ - call *sys_call_table(, %rax, 8) -.Lentry_SYSCALL_64_after_fastpath_call: - - movq %rax, RAX(%rsp) -1: - - /* - * If we get here, then we know that pt_regs is clean for SYSRET64. - * If we see that no exit work is required (which we are required - * to check with IRQs off), then we can go straight to SYSRET64. - */ - DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz 1f - - LOCKDEP_SYS_EXIT - TRACE_IRQS_ON /* user mode is traced as IRQs on */ - movq RIP(%rsp), %rcx - movq EFLAGS(%rsp), %r11 - RESTORE_C_REGS_EXCEPT_RCX_R11 - movq RSP(%rsp), %rsp - UNWIND_HINT_EMPTY - USERGS_SYSRET64 - -1: - /* - * The fast path looked good when we started, but something changed - * along the way and we need to switch to the slow path. Calling - * raise(3) will trigger this, for example. IRQs are off. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) - SAVE_EXTRA_REGS - movq %rsp, %rdi - call syscall_return_slowpath /* returns with IRQs disabled */ - jmp return_from_SYSCALL_64 -entry_SYSCALL64_slow_path: /* IRQs are off. */ - SAVE_EXTRA_REGS movq %rsp, %rdi call do_syscall_64 /* returns with IRQs disabled */ -return_from_SYSCALL_64: - RESTORE_EXTRA_REGS TRACE_IRQS_IRETQ /* we're about to change IF */ /* * Try to use SYSRET instead of IRET if we're returning to - * a completely clean 64-bit userspace context. + * a completely clean 64-bit userspace context. If we're not, + * go to the slow exit path. */ movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 - cmpq %rcx, %r11 /* RCX == RIP */ - jne opportunistic_sysret_failed + + cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ + jne swapgs_restore_regs_and_return_to_usermode /* * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP @@ -274,14 +261,14 @@ return_from_SYSCALL_64: /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode movq R11(%rsp), %r11 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode /* * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot @@ -302,12 +289,12 @@ return_from_SYSCALL_64: * would never get past 'stuck_here'. */ testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 - jnz opportunistic_sysret_failed + jnz swapgs_restore_regs_and_return_to_usermode /* nothing to check for RSP */ cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ - jne opportunistic_sysret_failed + jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding @@ -315,56 +302,29 @@ return_from_SYSCALL_64: */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ - RESTORE_C_REGS_EXCEPT_RCX_R11 - movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY - USERGS_SYSRET64 - -opportunistic_sysret_failed: - SWAPGS - jmp restore_c_regs_and_iret -END(entry_SYSCALL_64) + POP_REGS pop_rdi=0 skip_r11rcx=1 -ENTRY(stub_ptregs_64) /* - * Syscalls marked as needing ptregs land here. - * If we are on the fast path, we need to save the extra regs, - * which we achieve by trying again on the slow path. If we are on - * the slow path, the extra regs are already saved. - * - * RAX stores a pointer to the C function implementing the syscall. - * IRQs are on. + * Now all regs are restored except RSP and RDI. + * Save old stack pointer and switch to trampoline stack. */ - cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) - jne 1f + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + + pushq RSP-RDI(%rdi) /* RSP */ + pushq (%rdi) /* RDI */ /* - * Called from fast path -- disable IRQs again, pop return address - * and jump to slow path + * We are on the trampoline stack. All regs except RDI are live. + * We can do future final exit work right here. */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - popq %rax - UNWIND_HINT_REGS extra=0 - jmp entry_SYSCALL64_slow_path + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi -1: - jmp *%rax /* Called from C */ -END(stub_ptregs_64) - -.macro ptregs_stub func -ENTRY(ptregs_\func) - UNWIND_HINT_FUNC - leaq \func(%rip), %rax - jmp stub_ptregs_64 -END(ptregs_\func) -.endm - -/* Instantiate ptregs_stub for each ptregs-using syscall */ -#define __SYSCALL_64_QUAL_(sym) -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym -#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) -#include + popq %rdi + popq %rsp + USERGS_SYSRET64 +END(entry_SYSCALL_64) /* * %rdi: prev task @@ -392,6 +352,17 @@ ENTRY(__switch_to_asm) movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif +#ifdef CONFIG_RETPOLINE + /* + * When switching from a shallower to a deeper call stack + * the RSB may either underflow or use entries populated + * with userspace addresses. On CPUs where those concerns + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ + FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW +#endif + /* restore callee-saved registers */ popq %r15 popq %r14 @@ -423,13 +394,12 @@ ENTRY(ret_from_fork) movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ - SWAPGS - jmp restore_regs_and_iret + jmp swapgs_restore_regs_and_return_to_usermode 1: /* kernel thread */ movq %r12, %rdi - call *%rbx + CALL_NOSPEC %rbx /* * A kernel thread is allowed to return here after successfully * calling do_execve(). Exit to userspace to complete the execve() @@ -457,12 +427,13 @@ END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY - pushfq - testl $X86_EFLAGS_IF, (%rsp) + pushq %rax + SAVE_FLAGS(CLBR_RAX) + testl $X86_EFLAGS_IF, %eax jz .Lokay_\@ ud2 .Lokay_\@: - addq $8, %rsp + popq %rax #endif .endm @@ -554,21 +525,22 @@ END(irq_entries_start) /* 0(%rsp): ~(interrupt number) */ .macro interrupt func cld - ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + + testb $3, CS-ORIG_RAX(%rsp) + jz 1f + SWAPGS + call switch_to_thread_stack +1: + + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) jz 1f /* - * IRQ from user mode. Switch to kernel gsbase and inform context - * tracking that we're in kernel mode. - */ - SWAPGS - - /* + * IRQ from user mode. + * * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode * (which can take locks). Since TRACE_IRQS_OFF idempotent, @@ -612,8 +584,46 @@ GLOBAL(retint_user) mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ + +GLOBAL(swapgs_restore_regs_and_return_to_usermode) +#ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates user mode. */ + testb $3, CS(%rsp) + jnz 1f + ud2 +1: +#endif + POP_REGS pop_rdi=0 + + /* + * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. + * Save old stack pointer and switch to trampoline stack. + */ + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp + + /* Copy the IRET frame to the trampoline stack. */ + pushq 6*8(%rdi) /* SS */ + pushq 5*8(%rdi) /* RSP */ + pushq 4*8(%rdi) /* EFLAGS */ + pushq 3*8(%rdi) /* CS */ + pushq 2*8(%rdi) /* RIP */ + + /* Push user RDI on the trampoline stack. */ + pushq (%rdi) + + /* + * We are on the trampoline stack. All regs except RDI are live. + * We can do future final exit work right here. + */ + + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi + + /* Restore RDI. */ + popq %rdi SWAPGS - jmp restore_regs_and_iret + INTERRUPT_RETURN + /* Returning to kernel space */ retint_kernel: @@ -633,15 +643,16 @@ retint_kernel: */ TRACE_IRQS_IRETQ -/* - * At this label, code paths which return to kernel and to user, - * which come from interrupts/exception and from syscalls, merge. - */ -GLOBAL(restore_regs_and_iret) - RESTORE_EXTRA_REGS -restore_c_regs_and_iret: - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 +GLOBAL(restore_regs_and_return_to_kernel) +#ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates kernel mode. */ + testb $3, CS(%rsp) + jz 1f + ud2 +1: +#endif + POP_REGS + addq $8, %rsp /* skip regs->orig_ax */ INTERRUPT_RETURN ENTRY(native_iret) @@ -689,7 +700,9 @@ native_irq_return_ldt: */ pushq %rdi /* Stash user RDI */ - SWAPGS + SWAPGS /* to kernel GS */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ + movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ @@ -705,7 +718,6 @@ native_irq_return_ldt: /* Now RAX == RSP. */ andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ - popq %rdi /* Restore user RDI */ /* * espfix_stack[31:16] == 0. The page tables are set up such that @@ -716,7 +728,11 @@ native_irq_return_ldt: * still points to an RO alias of the ESPFIX stack. */ orq PER_CPU_VAR(espfix_stack), %rax - SWAPGS + + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi + SWAPGS /* to user GS */ + popq %rdi /* Restore user RDI */ + movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 @@ -805,7 +821,35 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt /* * Exception entry points. */ -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) +#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) + +/* + * Switch to the thread stack. This is called with the IRET frame and + * orig_ax on the stack. (That is, RDI..R12 are not on the stack and + * space has not been allocated for them.) + */ +ENTRY(switch_to_thread_stack) + UNWIND_HINT_FUNC + + pushq %rdi + /* Need to switch before accessing the thread stack. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI + + pushq 7*8(%rdi) /* regs->ss */ + pushq 6*8(%rdi) /* regs->rsp */ + pushq 5*8(%rdi) /* regs->eflags */ + pushq 4*8(%rdi) /* regs->cs */ + pushq 3*8(%rdi) /* regs->ip */ + pushq 2*8(%rdi) /* regs->orig_ax */ + pushq 8(%rdi) /* return address */ + UNWIND_HINT_FUNC + + movq (%rdi), %rdi + ret +END(switch_to_thread_stack) .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) @@ -818,17 +862,16 @@ ENTRY(\sym) ASM_CLAC - .ifeq \has_error_code + .if \has_error_code == 0 pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif - ALLOC_PT_GPREGS_ON_STACK + .if \paranoid < 2 + testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ + jnz .Lfrom_usermode_switch_stack_\@ + .endif .if \paranoid - .if \paranoid == 1 - testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ - jnz 1f - .endif call paranoid_entry .else call error_entry @@ -870,20 +913,15 @@ ENTRY(\sym) jmp error_exit .endif - .if \paranoid == 1 + .if \paranoid < 2 /* - * Paranoid entry from userspace. Switch stacks and treat it + * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers * run in real process context if user_mode(regs). */ -1: +.Lfrom_usermode_switch_stack_\@: call error_entry - - movq %rsp, %rdi /* pt_regs pointer */ - call sync_regs - movq %rax, %rsp /* switch stack */ - movq %rsp, %rdi /* pt_regs pointer */ .if \has_error_code @@ -895,7 +933,7 @@ ENTRY(\sym) call \do_sym - jmp error_exit /* %ebx: no swapgs flag */ + jmp error_exit .endif END(\sym) .endm @@ -1037,9 +1075,7 @@ ENTRY(xen_failsafe_callback) addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ - ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) @@ -1055,10 +1091,11 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK +idtentry int3 do_int3 has_error_code=0 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN +idtentry xennmi do_nmi has_error_code=0 idtentry xendebug do_debug has_error_code=0 idtentry xenint3 do_int3 has_error_code=0 #endif @@ -1071,7 +1108,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1 #endif #ifdef CONFIG_X86_MCE -idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) +idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* @@ -1082,8 +1119,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 + PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx @@ -1092,7 +1128,11 @@ ENTRY(paranoid_entry) js 1f /* negative -> in kernel */ SWAPGS xorl %ebx, %ebx -1: ret + +1: + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + + ret END(paranoid_entry) /* @@ -1112,30 +1152,26 @@ ENTRY(paranoid_exit) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ - jnz paranoid_exit_no_swapgs + jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK - jmp paranoid_exit_restore -paranoid_exit_no_swapgs: + jmp .Lparanoid_exit_restore +.Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG -paranoid_exit_restore: - RESTORE_EXTRA_REGS - RESTORE_C_REGS - REMOVE_PT_GPREGS_FROM_STACK 8 - INTERRUPT_RETURN + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 +.Lparanoid_exit_restore: + jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* - * Save all registers in pt_regs, and switch gs if needed. - * Return: EBX=0: came from user mode; EBX=1: otherwise + * Save all registers in pt_regs, and switch GS if needed. */ ENTRY(error_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 + PUSH_AND_CLEAR_REGS save_ret=1 ENCODE_FRAME_POINTER 8 - xorl %ebx, %ebx testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1144,8 +1180,18 @@ ENTRY(error_entry) * from user mode due to an IRET fault. */ SWAPGS + /* We have user CR3. Change to kernel CR3. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax .Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ + popq %r12 /* save return addr in %12 */ + movq %rsp, %rdi /* arg0 = pt_regs pointer */ + call sync_regs + movq %rax, %rsp /* switch stack */ + ENCODE_FRAME_POINTER + pushq %r12 + /* * We need to tell lockdep that IRQs are off. We can't do this until * we fix gsbase, and we should do it before enter_from_user_mode @@ -1166,7 +1212,6 @@ ENTRY(error_entry) * for these here too. */ .Lerror_kernelspace: - incl %ebx leaq native_irq_return_iret(%rip), %rcx cmpq %rcx, RIP+8(%rsp) je .Lerror_bad_iret @@ -1182,6 +1227,7 @@ ENTRY(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: @@ -1191,42 +1237,42 @@ ENTRY(error_entry) .Lerror_bad_iret: /* - * We came from an IRET to user mode, so we have user gsbase. - * Switch to kernel gsbase: + * We came from an IRET to user mode, so we have user + * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* * Pretend that the exception came from user mode: set up pt_regs - * as if we faulted immediately after IRET and clear EBX so that - * error_exit knows that we will be returning to user mode. + * as if we faulted immediately after IRET. */ mov %rsp, %rdi call fixup_bad_iret mov %rax, %rsp - decl %ebx jmp .Lerror_entry_from_usermode_after_swapgs END(error_entry) - -/* - * On entry, EBX is a "return to kernel mode" flag: - * 1: already in kernel mode, don't need SWAPGS - * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode - */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %ebx, %ebx - jnz retint_kernel + testb $3, CS(%rsp) + jz retint_kernel jmp retint_user END(error_exit) -/* Runs on exception stack */ -/* XXX: broken on Xen PV */ +/* + * Runs on exception stack. Xen PV does not go through this path at all, + * so we can use real assembly here. + * + * Registers: + * %r14: Used to save/restore the CR3 of the interrupted context + * when PAGE_TABLE_ISOLATION is in use. Do not clobber. + */ ENTRY(nmi) UNWIND_HINT_IRET_REGS + /* * We allow breakpoints in NMIs. If a breakpoint occurs, then * the iretq it performs will take us out of NMI context. @@ -1284,8 +1330,9 @@ ENTRY(nmi) * stacks lest we corrupt the "NMI executing" variable. */ - SWAPGS_UNSAFE_STACK + swapgs cld + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 @@ -1296,22 +1343,7 @@ ENTRY(nmi) pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq (%rdx) /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq %rax /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - pushq %r11 /* pt_regs->r11 */ - pushq %rbx /* pt_regs->rbx */ - pushq %rbp /* pt_regs->rbp */ - pushq %r12 /* pt_regs->r12 */ - pushq %r13 /* pt_regs->r13 */ - pushq %r14 /* pt_regs->r14 */ - pushq %r15 /* pt_regs->r15 */ - UNWIND_HINT_REGS + PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER /* @@ -1328,8 +1360,7 @@ ENTRY(nmi) * Return back to user mode. We must *not* do the normal exit * work, because we don't want to enable interrupts. */ - SWAPGS - jmp restore_regs_and_iret + jmp swapgs_restore_regs_and_return_to_usermode .Lnmi_from_kernel: /* @@ -1450,7 +1481,7 @@ nested_nmi_out: popq %rdx /* We are returning to kernel mode, so this cannot result in a fault. */ - INTERRUPT_RETURN + iretq first_nmi: /* Restore rdx. */ @@ -1481,7 +1512,7 @@ first_nmi: pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ - INTERRUPT_RETURN /* continues at repeat_nmi below */ + iretq /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif @@ -1522,7 +1553,6 @@ end_repeat_nmi: * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ - ALLOC_PT_GPREGS_ON_STACK /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit @@ -1539,34 +1569,40 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 + testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: - RESTORE_EXTRA_REGS - RESTORE_C_REGS + POP_REGS - /* Point RSP at the "iret" frame. */ - REMOVE_PT_GPREGS_FROM_STACK 6*8 + /* + * Skip orig_ax and the "outermost" frame to point RSP at the "iret" + * at the "iret" frame. + */ + addq $6*8, %rsp /* * Clear "NMI executing". Set DF first so that we can easily * distinguish the remaining code between here and IRET from - * the SYSCALL entry and exit paths. On a native kernel, we - * could just inspect RIP, but, on paravirt kernels, - * INTERRUPT_RETURN can translate into a jump into a - * hypercall page. + * the SYSCALL entry and exit paths. + * + * We arguably should just inspect RIP instead, but I (Andy) wrote + * this code when I had the misapprehension that Xen PV supported + * NMIs, and Xen PV would break that approach. */ std movq $0, 5*8(%rsp) /* clear "NMI executing" */ /* - * INTERRUPT_RETURN reads the "iret" frame and exits the NMI - * stack in a single instruction. We are returning to kernel - * mode, so this cannot result in a fault. + * iretq reads the "iret" frame and exits the NMI stack in a + * single instruction. We are returning to kernel mode, so this + * cannot result in a fault. Similarly, we don't need to worry + * about espfix64 on the way back to kernel mode. */ - INTERRUPT_RETURN + iretq END(nmi) ENTRY(ignore_sysret) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index b5c7a56ed256..364ea4a207be 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -48,7 +48,11 @@ */ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ - SWAPGS_UNSAFE_STACK + SWAPGS + + /* We are about to clobber %rsp anyway, clobbering here is OK */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* @@ -81,15 +85,25 @@ ENTRY(entry_SYSENTER_compat) pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorl %r12d, %r12d /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorl %r13d, %r13d /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorl %r15d, %r15d /* nospec r15 */ cld /* @@ -186,8 +200,13 @@ ENTRY(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs - /* Stash user ESP and switch to the kernel stack. */ + /* Stash user ESP */ movl %esp, %r8d + + /* Use %rsp as scratch reg. User ESP is stashed in r8 */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ @@ -205,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq %rbp /* pt_regs->cx (stashed in bp) */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorl %r12d, %r12d /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorl %r13d, %r13d /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorl %r14d, %r14d /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorl %r15d, %r15d /* nospec r15 */ /* * User mode is traced as though IRQs are on, and SYSENTER @@ -256,10 +285,22 @@ sysret32_from_system_call: * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ - xorq %r8, %r8 - xorq %r9, %r9 - xorq %r10, %r10 movq RSP-ORIG_RAX(%rsp), %rsp + + /* + * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored + * on the process stack which is not mapped to userspace and + * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 + * switch until after after the last reference to the process + * stack. + * + * %r8/%r9 are zeroed before the sysret, thus safe to clobber. + */ + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 + + xorl %r8d, %r8d + xorl %r9d, %r9d + xorl %r10d, %r10d swapgs sysretl END(entry_SYSCALL_compat) @@ -306,23 +347,36 @@ ENTRY(entry_INT80_compat) */ movl %eax, %eax - /* Construct struct pt_regs on stack (iret frame is already on stack) */ pushq %rax /* pt_regs->orig_ax */ + + /* switch to thread stack expects orig_ax to be pushed */ + call switch_to_thread_stack + pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorl %r8d, %r8d /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorl %r9d, %r9d /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorl %r10d, %r10d /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp */ pushq %r12 /* pt_regs->r12 */ + xorl %r12d, %r12d /* nospec r12 */ pushq %r13 /* pt_regs->r13 */ + xorl %r13d, %r13d /* nospec r13 */ pushq %r14 /* pt_regs->r14 */ + xorl %r14d, %r14d /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ + xorl %r15d, %r15d /* nospec r15 */ cld /* @@ -337,8 +391,7 @@ ENTRY(entry_INT80_compat) /* Go back to user mode. */ TRACE_IRQS_ON - SWAPGS - jmp restore_regs_and_iret + jmp swapgs_restore_regs_and_return_to_usermode END(entry_INT80_compat) ENTRY(stub32_clone) diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 9c09775e589d..c176d2fab1da 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -7,14 +7,11 @@ #include #include -#define __SYSCALL_64_QUAL_(sym) sym -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym - -#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include #undef __SYSCALL_64 -#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym), +#define __SYSCALL_64(nr, sym, qual) [nr] = sym, extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile index 331f1dca5085..6fb9b57ed5ba 100644 --- a/arch/x86/entry/syscalls/Makefile +++ b/arch/x86/entry/syscalls/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -out := $(obj)/../../include/generated/asm -uapi := $(obj)/../../include/generated/uapi/asm +out := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm # Create output directory if not already present _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \ diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index c366c0adeb40..c82dbf912216 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -98,6 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \ + -fuse-ld=bfd -Wl,-soname=linux-vdso.so.1 \ -Wl,-z,max-page-size=4096 \ -Wl,-z,common-page-size=4096 @@ -172,7 +173,8 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \ - $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) + $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) \ + $(filter --target=% --gcc-toolchain=%,$(KBUILD_CFLAGS)) GCOV_PROFILE := n # diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index f279ba2643dc..542392b6aab6 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -37,6 +37,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" @@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) WARN_ON_ONCE(address != regs->ip); + /* This should be unreachable in NATIVE mode. */ + if (WARN_ON(vsyscall_mode == NATIVE)) + return false; + if (vsyscall_mode == NONE) { warn_bad_vsyscall(KERN_INFO, regs, "vsyscall attempted with vsyscall=none"); @@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr) return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; } +/* + * The VSYSCALL page is the only user-accessible page in the kernel address + * range. Normally, the kernel page tables can have _PAGE_USER clear, but + * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls + * are enabled. + * + * Some day we may create a "minimal" vsyscall mode in which we emulate + * vsyscalls but leave the page not present. If so, we skip calling + * this. + */ +void __init set_vsyscall_pgtable_user_bits(pgd_t *root) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + p4d = p4d_offset(pgd, VSYSCALL_ADDR); +#if CONFIG_PGTABLE_LEVELS >= 5 + set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER)); +#endif + pud = pud_offset(p4d, VSYSCALL_ADDR); + set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); + pmd = pmd_offset(pud, VSYSCALL_ADDR); + set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); +} + void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); - if (vsyscall_mode != NONE) + if (vsyscall_mode != NONE) { __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); + set_vsyscall_pgtable_user_bits(swapper_pg_dir); + } BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index a6eee5ac4f58..2aefacf5c5b2 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void) int ret; if (!x86_match_cpu(cpu_match)) - return 0; + return -ENODEV; if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) return -ENODEV; diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 80534d3c2480..717c9219d00e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) config = attr->config; - cache_type = (config >> 0) & 0xff; + cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; + cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX); cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; + cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX); cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; + cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); val = hw_cache_event_ids[cache_type][cache_op][cache_result]; @@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event) if (attr->config >= x86_pmu.max_events) return -EINVAL; + attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); + /* * The generic map: */ @@ -1156,16 +1162,13 @@ int x86_perf_event_set_period(struct perf_event *event) per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; - if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) || - local64_read(&hwc->prev_count) != (u64)-left) { - /* - * The hw event starts counting from this event offset, - * mark it to be able to extra future deltas: - */ - local64_set(&hwc->prev_count, (u64)-left); + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + local64_set(&hwc->prev_count, (u64)-left); - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); - } + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Due to erratum on certan cpu we need @@ -2371,7 +2374,7 @@ static unsigned long get_segment_base(unsigned int segment) struct ldt_struct *ldt; /* IRQs are off, so this synchronizes with smp_store_release */ - ldt = lockless_dereference(current->active_mm->context.ldt); + ldt = READ_ONCE(current->active_mm->context.ldt); if (!ldt || idx >= ldt->nr_entries) return 0; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 141e07b06216..24ffa1e88cf9 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -582,6 +582,24 @@ static __init int bts_init(void) if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) return -ENODEV; + if (boot_cpu_has(X86_FEATURE_PTI)) { + /* + * BTS hardware writes through a virtual memory map we must + * either use the kernel physical map, or the user mapping of + * the AUX buffer. + * + * However, since this driver supports per-CPU and per-task inherit + * we cannot use the user mapping since it will not be availble + * if we're not running the owning process. + * + * With PTI we can't use the kernal map either, because its not + * there when we run userspace. + * + * For now, disable this driver when using PTI. + */ + return -ENODEV; + } + bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | PERF_PMU_CAP_EXCLUSIVE; bts_pmu.task_ctx_nr = perf_sw_context; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9fb9a1f1e47b..228732654cfe 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) int bit, loops; u64 status; int handled; + int pmu_enabled; cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * Save the PMU state. + * It needs to be restored when leaving the handler. + */ + pmu_enabled = cpuc->enabled; /* * No known reason to not always do late ACK, * but just in case do it opt-in. @@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) if (!x86_pmu.late_ack) apic_write(APIC_LVTPC, APIC_DM_NMI); intel_bts_disable_local(); + cpuc->enabled = 0; __intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); handled += intel_bts_interrupt(); @@ -2320,7 +2327,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) done: /* Only restore PMU state when it's active. See x86_pmu_disable(). */ - if (cpuc->enabled) + cpuc->enabled = pmu_enabled; + if (pmu_enabled) __intel_pmu_enable_all(0, true); intel_bts_enable_local(); @@ -2958,6 +2966,10 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event) if (event->attr.use_clockid) flags &= ~PERF_SAMPLE_TIME; + if (!event->attr.exclude_kernel) + flags &= ~PERF_SAMPLE_REGS_USER; + if (event->attr.sample_regs_user & ~PEBS_REGS) + flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); return flags; } @@ -3184,13 +3196,13 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, * Therefore the effective (average) period matches the requested period, * despite coarser hardware granularity. */ -static unsigned bdw_limit_period(struct perf_event *event, unsigned left) +static u64 bdw_limit_period(struct perf_event *event, u64 left) { if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xc0, .umask=0x01)) { if (left < 128) left = 128; - left &= ~0x3fu; + left &= ~0x3fULL; } return left; } @@ -3319,7 +3331,8 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = NULL; - flip_smm_bit(&x86_pmu.attr_freeze_on_smi); + if (x86_pmu.version > 1) + flip_smm_bit(&x86_pmu.attr_freeze_on_smi); if (!cpuc->shared_regs) return; @@ -3482,6 +3495,8 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_dying = intel_pmu_cpu_dying, }; +static struct attribute *intel_pmu_attrs[]; + static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -3512,6 +3527,8 @@ static __initconst const struct x86_pmu intel_pmu = { .format_attrs = intel_arch3_formats_attr, .events_sysfs_show = intel_event_sysfs_show, + .attrs = intel_pmu_attrs, + .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, @@ -3555,7 +3572,7 @@ static int intel_snb_pebs_broken(int cpu) break; case INTEL_FAM6_SANDYBRIDGE_X: - switch (cpu_data(cpu).x86_mask) { + switch (cpu_data(cpu).x86_stepping) { case 6: rev = 0x618; break; case 7: rev = 0x70c; break; } @@ -3730,6 +3747,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1"); EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); static struct attribute *hsw_events_attrs[] = { + EVENT_PTR(mem_ld_hsw), + EVENT_PTR(mem_st_hsw), + EVENT_PTR(td_slots_issued), + EVENT_PTR(td_slots_retired), + EVENT_PTR(td_fetch_bubbles), + EVENT_PTR(td_total_slots), + EVENT_PTR(td_total_slots_scale), + EVENT_PTR(td_recovery_bubbles), + EVENT_PTR(td_recovery_bubbles_scale), + NULL +}; + +static struct attribute *hsw_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_commit), EVENT_PTR(tx_abort), @@ -3742,18 +3772,16 @@ static struct attribute *hsw_events_attrs[] = { EVENT_PTR(el_conflict), EVENT_PTR(cycles_t), EVENT_PTR(cycles_ct), - EVENT_PTR(mem_ld_hsw), - EVENT_PTR(mem_st_hsw), - EVENT_PTR(td_slots_issued), - EVENT_PTR(td_slots_retired), - EVENT_PTR(td_fetch_bubbles), - EVENT_PTR(td_total_slots), - EVENT_PTR(td_total_slots_scale), - EVENT_PTR(td_recovery_bubbles), - EVENT_PTR(td_recovery_bubbles_scale), NULL }; +static __init struct attribute **get_hsw_events_attrs(void) +{ + return boot_cpu_has(X86_FEATURE_RTM) ? + merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) : + hsw_events_attrs; +} + static ssize_t freeze_on_smi_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -3832,6 +3860,8 @@ static struct attribute *intel_pmu_attrs[] = { __init int intel_pmu_init(void) { + struct attribute **extra_attr = NULL; + struct attribute **to_free = NULL; union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; @@ -3839,7 +3869,6 @@ __init int intel_pmu_init(void) unsigned int unused; struct extra_reg *er; int version, i; - struct attribute **extra_attr = NULL; char *name; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { @@ -3878,8 +3907,6 @@ __init int intel_pmu_init(void) x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); - - x86_pmu.attrs = intel_pmu_attrs; /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events, when not running in a hypervisor: @@ -4182,7 +4209,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.lbr_double_abort = true; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; @@ -4221,7 +4248,7 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.cpu_events = get_hsw_events_attrs(); x86_pmu.limit_period = bdw_limit_period; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; @@ -4279,7 +4306,8 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_attr = merge_attr(extra_attr, skl_format_attr); - x86_pmu.cpu_events = hsw_events_attrs; + to_free = extra_attr; + x86_pmu.cpu_events = get_hsw_events_attrs(); intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); pr_cont("Skylake events, "); @@ -4386,6 +4414,7 @@ __init int intel_pmu_init(void) pr_cont("full-width counters, "); } + kfree(to_free); return 0; } diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 72db0664a53d..357e82dc0e2a 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include "../perf_event.h" @@ -301,6 +302,7 @@ static int cstate_pmu_event_init(struct perf_event *event) } else if (event->pmu == &cstate_pkg_pmu) { if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) return -EINVAL; + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); if (!pkg_msr[cfg].attr) return -EINVAL; event->hw.event_base = pkg_msr[cfg].msr; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3674a4b6f8bd..25386be0d757 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -3,16 +3,19 @@ #include #include +#include #include +#include #include #include "../perf_event.h" +/* Waste a full page so it can be mapped into the cpu_entry_area */ +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 -#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_FIXUP_SIZE PAGE_SIZE /* @@ -279,17 +282,67 @@ void fini_debug_store_on_cpu(int cpu) static DEFINE_PER_CPU(void *, insn_buffer); -static int alloc_pebs_buffer(int cpu) +static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + unsigned long start = (unsigned long)cea; + phys_addr_t pa; + size_t msz = 0; + + pa = virt_to_phys(addr); + + preempt_disable(); + for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, pa, prot); + + /* + * This is a cross-CPU update of the cpu_entry_area, we must shoot down + * all TLB entries for it. + */ + flush_tlb_kernel_range(start, start + size); + preempt_enable(); +} + +static void ds_clear_cea(void *cea, size_t size) +{ + unsigned long start = (unsigned long)cea; + size_t msz = 0; + + preempt_disable(); + for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); + + flush_tlb_kernel_range(start, start + size); + preempt_enable(); +} + +static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) +{ + unsigned int order = get_order(size); int node = cpu_to_node(cpu); - int max; - void *buffer, *ibuffer; + struct page *page; + + page = __alloc_pages_node(node, flags | __GFP_ZERO, order); + return page ? page_address(page) : NULL; +} + +static void dsfree_pages(const void *buffer, size_t size) +{ + if (buffer) + free_pages((unsigned long)buffer, get_order(size)); +} + +static int alloc_pebs_buffer(int cpu) +{ + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + size_t bsiz = x86_pmu.pebs_buffer_size; + int max, node = cpu_to_node(cpu); + void *buffer, *ibuffer, *cea; if (!x86_pmu.pebs) return 0; - buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); + buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); if (unlikely(!buffer)) return -ENOMEM; @@ -300,25 +353,27 @@ static int alloc_pebs_buffer(int cpu) if (x86_pmu.intel_cap.pebs_format < 2) { ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); if (!ibuffer) { - kfree(buffer); + dsfree_pages(buffer, bsiz); return -ENOMEM; } per_cpu(insn_buffer, cpu) = ibuffer; } - - max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size; - - ds->pebs_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_pebs_vaddr = buffer; + /* Update the cpu entry area mapping */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds->pebs_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL); ds->pebs_index = ds->pebs_buffer_base; - ds->pebs_absolute_maximum = ds->pebs_buffer_base + - max * x86_pmu.pebs_record_size; - + max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); + ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; return 0; } static void release_pebs_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.pebs) return; @@ -326,73 +381,72 @@ static void release_pebs_buffer(int cpu) kfree(per_cpu(insn_buffer, cpu)); per_cpu(insn_buffer, cpu) = NULL; - kfree((void *)(unsigned long)ds->pebs_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds_clear_cea(cea, x86_pmu.pebs_buffer_size); ds->pebs_buffer_base = 0; + dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); + hwev->ds_pebs_vaddr = NULL; } static int alloc_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - int node = cpu_to_node(cpu); - int max, thresh; - void *buffer; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *buffer, *cea; + int max; if (!x86_pmu.bts) return 0; - buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); + buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); if (unlikely(!buffer)) { WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); return -ENOMEM; } - - max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; - thresh = max / 16; - - ds->bts_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_bts_vaddr = buffer; + /* Update the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds->bts_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); ds->bts_index = ds->bts_buffer_base; + max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; ds->bts_absolute_maximum = ds->bts_buffer_base + - max * BTS_RECORD_SIZE; + max * BTS_RECORD_SIZE; ds->bts_interrupt_threshold = ds->bts_absolute_maximum - - thresh * BTS_RECORD_SIZE; - + (max / 16) * BTS_RECORD_SIZE; return 0; } static void release_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.bts) return; - kfree((void *)(unsigned long)ds->bts_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds_clear_cea(cea, BTS_BUFFER_SIZE); ds->bts_buffer_base = 0; + dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); + hwev->ds_bts_vaddr = NULL; } static int alloc_ds_buffer(int cpu) { - int node = cpu_to_node(cpu); - struct debug_store *ds; - - ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); - if (unlikely(!ds)) - return -ENOMEM; + struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; + memset(ds, 0, sizeof(*ds)); per_cpu(cpu_hw_events, cpu).ds = ds; - return 0; } static void release_ds_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - return; - per_cpu(cpu_hw_events, cpu).ds = NULL; - kfree(ds); } void release_ds_buffers(void) @@ -1098,6 +1152,7 @@ static void setup_pebs_sample_data(struct perf_event *event, if (pebs == NULL) return; + regs->flags &= ~PERF_EFLAGS_EXACT; sample_type = event->attr.sample_type; dsrc = sample_type & PERF_SAMPLE_DATA_SRC; @@ -1142,7 +1197,6 @@ static void setup_pebs_sample_data(struct perf_event *event, */ *regs = *iregs; regs->flags = pebs->flags; - set_linear_ip(regs, pebs->ip); if (sample_type & PERF_SAMPLE_REGS_INTR) { regs->ax = pebs->ax; @@ -1178,13 +1232,22 @@ static void setup_pebs_sample_data(struct perf_event *event, #endif } - if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { - regs->ip = pebs->real_ip; - regs->flags |= PERF_EFLAGS_EXACT; - } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs)) - regs->flags |= PERF_EFLAGS_EXACT; - else - regs->flags &= ~PERF_EFLAGS_EXACT; + if (event->attr.precise_ip > 1) { + /* Haswell and later have the eventing IP, so use it: */ + if (x86_pmu.intel_cap.pebs_format >= 2) { + set_linear_ip(regs, pebs->real_ip); + regs->flags |= PERF_EFLAGS_EXACT; + } else { + /* Otherwise use PEBS off-by-1 IP: */ + set_linear_ip(regs, pebs->ip); + + /* ... and try to fix it up using the LBR entries: */ + if (intel_pmu_pebs_fixup_ip(regs)) + regs->flags |= PERF_EFLAGS_EXACT; + } + } else + set_linear_ip(regs, pebs->ip); + if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) && x86_pmu.intel_cap.pebs_format >= 1) @@ -1251,17 +1314,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; } +/* + * Special variant of intel_pmu_save_and_restart() for auto-reload. + */ +static int +intel_pmu_save_and_restart_reload(struct perf_event *event, int count) +{ + struct hw_perf_event *hwc = &event->hw; + int shift = 64 - x86_pmu.cntval_bits; + u64 period = hwc->sample_period; + u64 prev_raw_count, new_raw_count; + s64 new, old; + + WARN_ON(!period); + + /* + * drain_pebs() only happens when the PMU is disabled. + */ + WARN_ON(this_cpu_read(cpu_hw_events.enabled)); + + prev_raw_count = local64_read(&hwc->prev_count); + rdpmcl(hwc->event_base_rdpmc, new_raw_count); + local64_set(&hwc->prev_count, new_raw_count); + + /* + * Since the counter increments a negative counter value and + * overflows on the sign switch, giving the interval: + * + * [-period, 0] + * + * the difference between two consequtive reads is: + * + * A) value2 - value1; + * when no overflows have happened in between, + * + * B) (0 - value1) + (value2 - (-period)); + * when one overflow happened in between, + * + * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); + * when @n overflows happened in between. + * + * Here A) is the obvious difference, B) is the extension to the + * discrete interval, where the first term is to the top of the + * interval and the second term is from the bottom of the next + * interval and C) the extension to multiple intervals, where the + * middle term is the whole intervals covered. + * + * An equivalent of C, by reduction, is: + * + * value2 - value1 + n * period + */ + new = ((s64)(new_raw_count << shift) >> shift); + old = ((s64)(prev_raw_count << shift) >> shift); + local64_add(new - old + count * period, &event->count); + + perf_event_update_userpage(event); + + return 0; +} + static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *base, void *top, int bit, int count) { + struct hw_perf_event *hwc = &event->hw; struct perf_sample_data data; struct pt_regs regs; void *at = get_next_pebs_record_by_bit(base, top, bit); - if (!intel_pmu_save_and_restart(event) && - !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)) + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { + /* + * Now, auto-reload is only enabled in fixed period mode. + * The reload value is always hwc->sample_period. + * May need to change it, if auto-reload is enabled in + * freq mode later. + */ + intel_pmu_save_and_restart_reload(event, count); + } else if (!intel_pmu_save_and_restart(event)) return; while (count > 1) { @@ -1313,8 +1443,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) return; n = top - at; - if (n <= 0) + if (n <= 0) { + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); return; + } __intel_pmu_pebs_event(event, iregs, at, top, 0, n); } @@ -1337,8 +1470,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ds->pebs_index = ds->pebs_buffer_base; - if (unlikely(base >= top)) + if (unlikely(base >= top)) { + /* + * The drain_pebs() could be called twice in a short period + * for auto-reload event in pmu::read(). There are no + * overflows have happened in between. + * It needs to call intel_pmu_save_and_restart_reload() to + * update the event->count for this case. + */ + for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, + x86_pmu.max_pebs_events) { + event = cpuc->events[bit]; + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); + } return; + } for (at = base; at < top; at += x86_pmu.pebs_record_size) { struct pebs_record_nhm *p = at; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index ae64d0b69729..cf372b90557e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void) * on PMU interrupt */ if (boot_cpu_data.x86_model == 28 - && boot_cpu_data.x86_mask < 10) { + && boot_cpu_data.x86_stepping < 10) { pr_cont("LBR disabled due to erratum"); return; } diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c index a5604c352930..408879b0c0d4 100644 --- a/arch/x86/events/intel/p6.c +++ b/arch/x86/events/intel/p6.c @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = { static __init void p6_pmu_rdpmc_quirk(void) { - if (boot_cpu_data.x86_mask < 9) { + if (boot_cpu_data.x86_stepping < 9) { /* * PPro erratum 26; fixed in stepping 9 and above. */ diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index d45e06346f14..c56cb37b88e3 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e u64 prev_count, new_count, delta; int shift; - if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) + if (event->hw.idx == UNCORE_PMC_IDX_FIXED) shift = 64 - uncore_fixed_ctr_bits(box); else shift = 64 - uncore_perf_ctr_bits(box); diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 93e7a8397cde..173e2674be6e 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p { struct hw_perf_event *hwc = &event->hw; - if (hwc->idx >= UNCORE_PMC_IDX_FIXED) + if (hwc->idx == UNCORE_PMC_IDX_FIXED) wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 95cb19f4e06f..2dae3f585c01 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = { NULL, }; +/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */ +static struct event_constraint bdx_uncore_pcu_constraints[] = { + EVENT_CONSTRAINT(0x80, 0xe, 0x80), + EVENT_CONSTRAINT_END +}; + void bdx_uncore_cpu_init(void) { if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; + + hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } static struct intel_uncore_type bdx_uncore_ha = { @@ -3554,24 +3562,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = { NULL, }; +/* + * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 + * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. + */ +#define SKX_CAPID6 0x9c +#define SKX_CHA_BIT_MASK GENMASK(27, 0) + static int skx_count_chabox(void) { - struct pci_dev *chabox_dev = NULL; - int bus, count = 0; + struct pci_dev *dev = NULL; + u32 val = 0; - while (1) { - chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); - if (!chabox_dev) - break; - if (count == 0) - bus = chabox_dev->bus->number; - if (bus != chabox_dev->bus->number) - break; - count++; - } + dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev); + if (!dev) + goto out; - pci_dev_put(chabox_dev); - return count; + pci_read_config_dword(dev, SKX_CAPID6, &val); + val &= SKX_CHA_BIT_MASK; +out: + pci_dev_put(dev); + return hweight32(val); } void skx_uncore_cpu_init(void) @@ -3598,7 +3609,7 @@ static struct intel_uncore_type skx_uncore_imc = { }; static struct attribute *skx_upi_uncore_formats_attr[] = { - &format_attr_event_ext.attr, + &format_attr_event.attr, &format_attr_umask_ext.attr, &format_attr_edge.attr, &format_attr_inv.attr, diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 14efaa0e8684..81dd57280441 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include enum perf_msr_id { @@ -145,9 +146,6 @@ static int msr_event_init(struct perf_event *event) if (event->attr.type != event->pmu->type) return -ENOENT; - if (cfg >= PERF_MSR_EVENT_MAX) - return -EINVAL; - /* unsupported modes and filters */ if (event->attr.exclude_user || event->attr.exclude_kernel || @@ -158,6 +156,11 @@ static int msr_event_init(struct perf_event *event) event->attr.sample_period) /* no sampling */ return -EINVAL; + if (cfg >= PERF_MSR_EVENT_MAX) + return -EINVAL; + + cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX); + if (!msr[cfg].attr) return -EINVAL; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 4196f81ec0e1..dc4728eccfd8 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -14,6 +14,8 @@ #include +#include + /* To enable MSR tracing please use the generic trace points. */ /* @@ -77,38 +79,41 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; -/* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 8 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) /* * Flags PEBS can handle without an PMI. * * TID can only be handled by flushing at context switch. + * REGS_USER can be handled for events limited to ring 3. * */ #define PEBS_FREERUNNING_FLAGS \ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ - PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR) - -/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; -}; + PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ + PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER) + +#define PEBS_REGS \ + (PERF_REG_X86_AX | \ + PERF_REG_X86_BX | \ + PERF_REG_X86_CX | \ + PERF_REG_X86_DX | \ + PERF_REG_X86_DI | \ + PERF_REG_X86_SI | \ + PERF_REG_X86_SP | \ + PERF_REG_X86_BP | \ + PERF_REG_X86_IP | \ + PERF_REG_X86_FLAGS | \ + PERF_REG_X86_R8 | \ + PERF_REG_X86_R9 | \ + PERF_REG_X86_R10 | \ + PERF_REG_X86_R11 | \ + PERF_REG_X86_R12 | \ + PERF_REG_X86_R13 | \ + PERF_REG_X86_R14 | \ + PERF_REG_X86_R15) /* * Per register state. @@ -194,6 +199,8 @@ struct cpu_hw_events { * Intel DebugStore bits */ struct debug_store *ds; + void *ds_pebs_vaddr; + void *ds_bts_vaddr; u64 pebs_enabled; int n_pebs; int n_large_pebs; @@ -549,7 +556,7 @@ struct x86_pmu { struct x86_pmu_quirk *quirks; int perfctr_second_write; bool late_ack; - unsigned (*limit_period)(struct perf_event *event, unsigned l); + u64 (*limit_period)(struct perf_event *event, u64 l); /* * sysfs attrs diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a5db63f728a2..2e9d58cc371e 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -110,10 +110,17 @@ static int hv_cpu_init(unsigned int cpu) */ void hyperv_init(void) { - u64 guest_id; + u64 guest_id, required_msrs; union hv_x64_msr_hypercall_contents hypercall_msr; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) + return; + + /* Absolutely required MSRs */ + required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE | + HV_X64_MSR_VP_INDEX_AVAILABLE; + + if ((ms_hyperv.features & required_msrs) != required_msrs) return; /* Allocate percpu VP index */ diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 9cc9e1c1e2db..56c9ebac946f 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -137,7 +137,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, } if (info->mm) { + /* + * AddressSpace argument must match the CR3 with PCID bits + * stripped out. + */ flush->address_space = virt_to_phys(info->mm->pgd); + flush->address_space &= CR3_ADDR_MASK; flush->flags = 0; } else { flush->address_space = 0; @@ -219,7 +224,12 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, } if (info->mm) { + /* + * AddressSpace argument must match the CR3 with PCID bits + * stripped out. + */ flush->address_space = virt_to_phys(info->mm->pgd); + flush->address_space &= CR3_ADDR_MASK; flush->flags = 0; } else { flush->address_space = 0; @@ -278,8 +288,6 @@ void hyperv_setup_mmu_ops(void) if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)) return; - setup_clear_cpu_cap(X86_FEATURE_PCID); - if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) { pr_info("Using hypercall for remote TLB flush\n"); pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others; diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d0ec9df1cbe..f077401869ee 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) if (boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86_model <= 0x05 && - boot_cpu_data.x86_mask < 0x0A) + boot_cpu_data.x86_stepping < 0x0A) return 1; else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) return 1; diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index dbfd0854651f..4cd6a3b71824 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ - ".popsection" + ".popsection\n" #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ OLDINSTR_2(oldinstr, 1, 2) \ @@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ - ".popsection" + ".popsection\n" /* * Alternative instructions for different CPU types or capabilities. @@ -218,13 +218,11 @@ static inline int alternatives_text_reserved(void *start, void *end) */ #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ output, input...) \ -{ \ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ "call %P[new2]", feature2) \ : output, ASM_CALL_CONSTRAINT \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ - [new2] "i" (newfunc2), ## input); \ -} + [new2] "i" (newfunc2), ## input) /* * use this macro(s) if you need more than one output parameter diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5f01671c68f2..a1ed92aae12a 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -10,6 +10,7 @@ #include #include #include +#include #define ARCH_APICTIMER_STOPS_ON_C3 1 @@ -613,12 +614,20 @@ extern int default_check_phys_apicid_present(int phys_apicid); #endif #endif /* CONFIG_X86_LOCAL_APIC */ + +#ifdef CONFIG_SMP +bool apic_id_is_primary_thread(unsigned int id); +#else +static inline bool apic_id_is_primary_thread(unsigned int id) { return false; } +#endif + extern void irq_enter(void); extern void irq_exit(void); static inline void entering_irq(void) { irq_enter(); + kvm_set_cpu_l1tf_flush_l1d(); } static inline void entering_ack_irq(void) @@ -631,6 +640,7 @@ static inline void ipi_entering_ack_irq(void) { irq_enter(); ack_APIC_irq(); + kvm_set_cpu_l1tf_flush_l1d(); } static inline void exiting_irq(void) diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 5b0579abb398..3ac991d81e74 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned long *v) bool ok; unsigned int retry = RDRAND_RETRY_LOOPS; do { - asm volatile(RDRAND_LONG "\n\t" + asm volatile(RDRAND_LONG CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); if (ok) @@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned int *v) bool ok; unsigned int retry = RDRAND_RETRY_LOOPS; do { - asm volatile(RDRAND_INT "\n\t" + asm volatile(RDRAND_INT CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); if (ok) @@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned int *v) static inline bool rdseed_long(unsigned long *v) { bool ok; - asm volatile(RDSEED_LONG "\n\t" + asm volatile(RDSEED_LONG CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); return ok; @@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned long *v) static inline bool rdseed_int(unsigned int *v) { bool ok; - asm volatile(RDSEED_INT "\n\t" + asm volatile(RDSEED_INT CC_SET(c) : CC_OUT(c) (ok), "=a" (*v)); return ok; diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index ff700d81e91e..1908214b9125 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -11,7 +11,31 @@ #include #include #include +#include #ifndef CONFIG_X86_CMPXCHG64 extern void cmpxchg8b_emu(void); #endif + +#ifdef CONFIG_RETPOLINE +#ifdef CONFIG_X86_32 +#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void); +#else +#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void); +INDIRECT_THUNK(8) +INDIRECT_THUNK(9) +INDIRECT_THUNK(10) +INDIRECT_THUNK(11) +INDIRECT_THUNK(12) +INDIRECT_THUNK(13) +INDIRECT_THUNK(14) +INDIRECT_THUNK(15) +#endif +INDIRECT_THUNK(ax) +INDIRECT_THUNK(bx) +INDIRECT_THUNK(cx) +INDIRECT_THUNK(dx) +INDIRECT_THUNK(si) +INDIRECT_THUNK(di) +INDIRECT_THUNK(bp) +#endif /* CONFIG_RETPOLINE */ diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 219faaec51df..3bf87f92b932 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -46,6 +46,65 @@ #define _ASM_SI __ASM_REG(si) #define _ASM_DI __ASM_REG(di) +#ifndef __x86_64__ +/* 32 bit */ + +#define _ASM_ARG1 _ASM_AX +#define _ASM_ARG2 _ASM_DX +#define _ASM_ARG3 _ASM_CX + +#define _ASM_ARG1L eax +#define _ASM_ARG2L edx +#define _ASM_ARG3L ecx + +#define _ASM_ARG1W ax +#define _ASM_ARG2W dx +#define _ASM_ARG3W cx + +#define _ASM_ARG1B al +#define _ASM_ARG2B dl +#define _ASM_ARG3B cl + +#else +/* 64 bit */ + +#define _ASM_ARG1 _ASM_DI +#define _ASM_ARG2 _ASM_SI +#define _ASM_ARG3 _ASM_DX +#define _ASM_ARG4 _ASM_CX +#define _ASM_ARG5 r8 +#define _ASM_ARG6 r9 + +#define _ASM_ARG1Q rdi +#define _ASM_ARG2Q rsi +#define _ASM_ARG3Q rdx +#define _ASM_ARG4Q rcx +#define _ASM_ARG5Q r8 +#define _ASM_ARG6Q r9 + +#define _ASM_ARG1L edi +#define _ASM_ARG2L esi +#define _ASM_ARG3L edx +#define _ASM_ARG4L ecx +#define _ASM_ARG5L r8d +#define _ASM_ARG6L r9d + +#define _ASM_ARG1W di +#define _ASM_ARG2W si +#define _ASM_ARG3W dx +#define _ASM_ARG4W cx +#define _ASM_ARG5W r8w +#define _ASM_ARG6W r9w + +#define _ASM_ARG1B dil +#define _ASM_ARG2B sil +#define _ASM_ARG3B dl +#define _ASM_ARG4B cl +#define _ASM_ARG5B r8b +#define _ASM_ARG6B r9b + +#endif + /* * Macros to generate condition code outputs from inline assembly, * The output operand must be type "bool". @@ -136,6 +195,7 @@ #endif #ifndef __ASSEMBLY__ +#ifndef __BPF__ /* * This output constraint should be used for any inline asm which has a "call" * instruction. Otherwise the asm may be inserted before the frame pointer @@ -145,5 +205,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #endif +#endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 01727dbc294a..a04f0c242a28 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -24,6 +24,34 @@ #define wmb() asm volatile("sfence" ::: "memory") #endif +/** + * array_index_mask_nospec() - generate a mask that is ~0UL when the + * bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (index < size) + */ +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + asm volatile ("cmp %1,%2; sbb %0,%0;" + :"=r" (mask) + :"g"(size),"r" (index) + :"cc"); + return mask; +} + +/* Override the default implementation from linux/nospec.h. */ +#define array_index_mask_nospec array_index_mask_nospec + +/* Prevent speculative execution past this barrier. */ +#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \ + "lfence", X86_FEATURE_LFENCE_RDTSC) + #ifdef CONFIG_X86_PPRO_FENCE #define dma_rmb() rmb() #else diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 2bcf47314959..3fa039855b8f 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -143,7 +143,7 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { bool negative; - asm volatile(LOCK_PREFIX "andb %2,%1\n\t" + asm volatile(LOCK_PREFIX "andb %2,%1" CC_SET(s) : CC_OUT(s) (negative), ADDR : "ir" ((char) ~(1 << nr)) : "memory"); @@ -246,7 +246,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * { bool oldbit; - asm("bts %2,%1\n\t" + asm("bts %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -286,7 +286,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long { bool oldbit; - asm volatile("btr %2,%1\n\t" + asm volatile("btr %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -298,7 +298,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon { bool oldbit; - asm volatile("btc %2,%1\n\t" + asm volatile("btc %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr) : "memory"); @@ -329,7 +329,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l { bool oldbit; - asm volatile("bt %2,%1\n\t" + asm volatile("bt %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 34d99af43994..6804d6642767 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -5,23 +5,20 @@ #include /* - * Since some emulators terminate on UD2, we cannot use it for WARN. - * Since various instruction decoders disagree on the length of UD1, - * we cannot use it either. So use UD0 for WARN. + * Despite that some emulators terminate on UD2, we use it for WARN(). * - * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas - * our kernel decoder thinks it takes a ModRM byte, which seems consistent - * with various things like the Intel SDM instruction encoding rules) + * Since various instruction decoders/specs disagree on the encoding of + * UD0/UD1. */ -#define ASM_UD0 ".byte 0x0f, 0xff" +#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */ #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ #define ASM_UD2 ".byte 0x0f, 0x0b" #define INSN_UD0 0xff0f #define INSN_UD2 0x0b0f -#define LEN_UD0 2 +#define LEN_UD2 2 #ifdef CONFIG_GENERIC_BUG @@ -77,7 +74,11 @@ do { \ unreachable(); \ } while (0) -#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)) +#define __WARN_FLAGS(flags) \ +do { \ + _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ + annotate_reachable(); \ +} while (0) #include diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 9eef9cc64c68..2cbd75dd2fd3 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -7,6 +7,7 @@ */ #include #include +#include #include #include #include @@ -209,7 +210,6 @@ typedef struct compat_siginfo { } compat_siginfo_t; #define COMPAT_OFF_T_MAX 0x7fffffff -#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL struct compat_ipc64_perm { compat_key_t key; diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h new file mode 100644 index 000000000000..4a7884b8dca5 --- /dev/null +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _ASM_X86_CPU_ENTRY_AREA_H +#define _ASM_X86_CPU_ENTRY_AREA_H + +#include +#include +#include + +/* + * cpu_entry_area is a percpu region that contains things needed by the CPU + * and early entry/exit code. Real types aren't used for all fields here + * to avoid circular header dependencies. + * + * Every field is a virtual alias of some other allocated backing store. + * There is no direct allocation of a struct cpu_entry_area. + */ +struct cpu_entry_area { + char gdt[PAGE_SIZE]; + + /* + * The GDT is just below entry_stack and thus serves (on x86_64) as + * a a read-only guard page. + */ + struct entry_stack_page entry_stack_page; + + /* + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because + * we need task switches to work, and task switches write to the TSS. + */ + struct tss_struct tss; + + char entry_trampoline[PAGE_SIZE]; + +#ifdef CONFIG_X86_64 + /* + * Exception stacks used for IST entries. + * + * In the future, this should have a separate slot for each stack + * with guard pages between them. + */ + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; +#endif +#ifdef CONFIG_CPU_SUP_INTEL + /* + * Per CPU debug store for Intel performance monitoring. Wastes a + * full page at the moment. + */ + struct debug_store cpu_debug_store; + /* + * The actual PEBS/BTS buffers must be mapped to user space + * Reserve enough fixmap PTEs. + */ + struct debug_store_buffers cpu_debug_buffers; +#endif +}; + +#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) + +DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); + +extern void setup_cpu_entry_areas(void); +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); + +#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE +#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) + +#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) + +#define CPU_ENTRY_AREA_MAP_SIZE \ + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) + +extern struct cpu_entry_area *get_cpu_entry_area(int cpu); + +static inline struct entry_stack *cpu_entry_stack(int cpu) +{ + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; +} + +#endif diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0dfa68438e80..70eddb3922ff 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -29,6 +29,7 @@ enum cpuid_leafs CPUID_8000_000A_EDX, CPUID_7_ECX, CPUID_8000_0007_EBX, + CPUID_7_EDX, }; #ifdef CONFIG_X86_FEATURE_NAMES @@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + BUILD_BUG_ON_ZERO(NCAPINTS != 19)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 18)) + BUILD_BUG_ON_ZERO(NCAPINTS != 19)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ @@ -126,16 +129,17 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) -#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) -#define setup_clear_cpu_cap(bit) do { \ - clear_cpu_cap(&boot_cpu_data, bit); \ - set_bit(bit, (unsigned long *)cpu_caps_cleared); \ -} while (0) + +extern void setup_clear_cpu_cap(unsigned int bit); +extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); + #define setup_force_cpu_cap(bit) do { \ set_cpu_cap(&boot_cpu_data, bit); \ set_bit(bit, (unsigned long *)cpu_caps_set); \ } while (0) +#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) + #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS) /* * Static testing of CPU features. Used the same as boot_cpu_has(). diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 793690fbda36..8418462298e7 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,173 +13,176 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 18 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NCAPINTS 19 /* N 32-bit words worth of info */ +#define NBUGINTS 1 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used * in /proc/cpuinfo instead of the macro name. If the string is "", * this feature bit is not displayed in /proc/cpuinfo at all. + * + * When adding new features here that depend on other features, + * please update the table in kernel/cpu/cpuid-deps.c as well. */ -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ -#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ -#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ - /* (plus FCMOVcc, FCOMI with FPU) */ -#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ -#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ -#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ -#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ -#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ -#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ -#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ -#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ -#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ +/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ +#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ +#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ +#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ +#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ +#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ +#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ +#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ +#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ +#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ +#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ -#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ -#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ +#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */ +#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ +#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ +#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */ +#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ +#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ -#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ -#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ -#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ -#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ -#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ -#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ -#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ -#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ -#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ -#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ -#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ +#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ + +/* CPU types for specific tunings: */ +#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ +#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ +#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ +#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */ +#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */ +#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ +#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ +#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ +#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ +#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ +#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ +#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ +#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */ +#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ +#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ +#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ -#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ -#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ -#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ -#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ -#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ -#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ -#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ -#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ -#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ -#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ -#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ -#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ -#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ -#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ -#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ -#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ -#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ +/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ +#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ +#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ +#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ +#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ +#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ +#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ +#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */ +#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ +#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ +#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ +#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */ +#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */ +#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ +#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ +#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ +#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */ +#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ +#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ +#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */ +#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ +#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ +#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */ +#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */ +#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */ +#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ -#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ -#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ -#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ -#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ +#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ +#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ -#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ -#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ -#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ -#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ -#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ -#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ -#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ -#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ -#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ -#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ -#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ -#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ -#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ -#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ -#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ -#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ +/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ +#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */ +#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ +#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ +#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ +#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ +#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ +#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ +#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ +#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ +#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ +#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ +#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */ +#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */ +#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */ +#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */ +#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -187,146 +190,188 @@ * * Reuse free bits when adding new feature flags! */ -#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ -#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ -#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ -#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ -#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ -#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ - -#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ -#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ +#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ +#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ +#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ +#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ +#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ +#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ +#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ +#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ +#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ -#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ +#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ +#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ +#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ +#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ +#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ +#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ +#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */ /* Virtualization flags: Linux defined, word 8 */ -#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ +#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ -#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ +#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ -#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ -#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ -#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ -#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ -#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ -#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ -#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ -#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ -#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ -#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ -#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ -#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ -#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ -#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ -#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ -#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ -#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ -#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ -#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ -#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ -#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ +#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ +#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ +#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ +#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ +#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ +#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ +#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ +#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ +#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ +#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ +#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ +#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */ +#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ +#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ +#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ +#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ +#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ -/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ +/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ +#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */ +#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ +#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ +#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ -#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ +#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ -#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ -#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ +#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ +#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ -/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ -#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ +/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ +#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ +#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ +#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ +#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ +#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ -/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ -#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ -#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ -#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ -#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ -#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ -#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ -#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ +#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ -/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ -#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ -#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ -#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ -#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ -#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ -#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ -#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ +/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ +#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ +#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ +#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ +#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ -#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ -#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ -#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ -#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ -#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ +#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ +#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ +#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ +#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ +#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ +#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ -/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ -#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ -#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ +/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ +#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ +#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ +#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ +#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ +#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ /* * BUG word(s) */ -#define X86_BUG(x) (NCAPINTS*32 + (x)) +#define X86_BUG(x) (NCAPINTS*32 + (x)) -#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ -#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ -#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ -#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ -#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ -#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ -#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ -#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ +#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ +#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ #ifdef CONFIG_X86_32 /* * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional * to avoid confusion. */ -#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ +#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ #endif -#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ -#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ -#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ -#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ +#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ +#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ +#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ +#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ +#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ + #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 0a3e808b9123..85e23bb7b34e 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -20,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + /* Set the ACCESS bit so it can be mapped RO */ + desc->type |= 1; desc->s = 1; desc->dpl = 0x3; @@ -60,17 +63,10 @@ static inline struct desc_struct *get_current_gdt_rw(void) return this_cpu_ptr(&gdt_page)->gdt; } -/* Get the fixmap index for a specific processor */ -static inline unsigned int get_cpu_gdt_ro_index(int cpu) -{ - return FIX_GDT_REMAP_BEGIN + cpu; -} - /* Provide the fixmap address of the remapped GDT */ static inline struct desc_struct *get_cpu_gdt_ro(int cpu) { - unsigned int idx = get_cpu_gdt_ro_index(cpu); - return (struct desc_struct *)__fix_to_virt(idx); + return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt; } /* Provide the current read-only GDT */ @@ -185,7 +181,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, #endif } -static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) +static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr) { struct desc_struct *d = get_cpu_gdt_rw(cpu); tss_desc tss; diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index c10c9128f54e..33833d1909af 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -16,6 +16,12 @@ # define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) #endif +#ifdef CONFIG_X86_INTEL_UMIP +# define DISABLE_UMIP 0 +#else +# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31)) +#endif + #ifdef CONFIG_X86_64 # define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) @@ -44,6 +50,12 @@ # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) #endif +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define DISABLE_PTI 0 +#else +# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -54,7 +66,7 @@ #define DISABLED_MASK4 (DISABLE_PCID) #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 -#define DISABLED_MASK7 0 +#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) #define DISABLED_MASK10 0 @@ -63,8 +75,9 @@ #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 -#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP) #define DISABLED_MASK17 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define DISABLED_MASK18 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 836ca1178a6a..69f16f0729d0 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -7,7 +7,6 @@ * Documentation/DMA-API.txt for documentation. */ -#include #include #include #include diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h index 0ab2ab27ad1f..b825cb201251 100644 --- a/arch/x86/include/asm/dmi.h +++ b/arch/x86/include/asm/dmi.h @@ -4,8 +4,8 @@ #include #include +#include -#include #include static __always_inline __init void *dmi_alloc(unsigned len) diff --git a/arch/x86/include/asm/early_intel_th.h b/arch/x86/include/asm/early_intel_th.h new file mode 100644 index 000000000000..bf93609995c8 --- /dev/null +++ b/arch/x86/include/asm/early_intel_th.h @@ -0,0 +1,20 @@ +/* + * early_intel_th.h: Intel Trace Hub early printk + * + * (C) Copyright 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _ASM_X86_EARLY_INTEL_TH_H +#define _ASM_X86_EARLY_INTEL_TH_H + +#ifdef CONFIG_INTEL_TH_EARLY_PRINTK +extern struct console intel_th_early_console; +extern void early_intel_th_init(const char *); +#endif /* CONFIG_INTEL_TH_EARLY_PRINTK */ + +#endif /* _ASM_X86_EARLY_INTEL_TH_H */ + diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 85f6ccb80b91..a399c1ebf6f0 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -6,6 +6,7 @@ #include #include #include +#include /* * We map the EFI regions needed for runtime services non-contiguously, @@ -36,8 +37,18 @@ extern asmlinkage unsigned long efi_call_phys(void *, ...); -#define arch_efi_call_virt_setup() kernel_fpu_begin() -#define arch_efi_call_virt_teardown() kernel_fpu_end() +#define arch_efi_call_virt_setup() \ +({ \ + kernel_fpu_begin(); \ + firmware_restrict_branch_speculation_start(); \ +}) + +#define arch_efi_call_virt_teardown() \ +({ \ + firmware_restrict_branch_speculation_end(); \ + kernel_fpu_end(); \ +}) + /* * Wrap all the virtual calls in a way that forces the parameters on the stack. @@ -73,6 +84,7 @@ struct efi_scratch { efi_sync_low_kernel_mappings(); \ preempt_disable(); \ __kernel_fpu_begin(); \ + firmware_restrict_branch_speculation_start(); \ \ if (efi_scratch.use_pgd) { \ efi_scratch.prev_cr3 = __read_cr3(); \ @@ -91,6 +103,7 @@ struct efi_scratch { __flush_tlb_all(); \ } \ \ + firmware_restrict_branch_speculation_end(); \ __kernel_fpu_end(); \ preempt_enable(); \ }) diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h index 0211029076ea..6777480d8a42 100644 --- a/arch/x86/include/asm/espfix.h +++ b/arch/x86/include/asm/espfix.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_ESPFIX_H #define _ASM_X86_ESPFIX_H -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_ESPFIX64 #include @@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); extern void init_espfix_bsp(void); extern void init_espfix_ap(int cpu); - -#endif /* CONFIG_X86_64 */ +#else +static inline void init_espfix_ap(int cpu) { } +#endif #endif /* _ASM_X86_ESPFIX_H */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index dcd9fb55e679..e203169931c7 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -44,7 +44,6 @@ extern unsigned long __FIXADDR_TOP; PAGE_SIZE) #endif - /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at @@ -84,7 +83,6 @@ enum fixed_addresses { FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, #endif - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ #ifdef CONFIG_X86_32 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, @@ -100,9 +98,12 @@ enum fixed_addresses { #ifdef CONFIG_X86_INTEL_MID FIX_LNW_VRTC, #endif - /* Fixmap entries to remap the GDTs, one per processor. */ - FIX_GDT_REMAP_BEGIN, - FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, + +#ifdef CONFIG_ACPI_APEI_GHES + /* Used for GHES mapping from assorted contexts */ + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, +#endif __end_of_permanent_fixed_addresses, @@ -136,8 +137,10 @@ enum fixed_addresses { extern void reserve_top_address(unsigned long reserve); -#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE) extern int fixmaps_set; diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 51cc979dd364..486c843273c4 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -3,10 +3,12 @@ #define _ASM_X86_HARDIRQ_H #include -#include typedef struct { - unsigned int __softirq_pending; + u16 __softirq_pending; +#if IS_ENABLED(CONFIG_KVM_INTEL) + u8 kvm_cpu_l1tf_flush_l1d; +#endif unsigned int __nmi_count; /* arch dependent */ #ifdef CONFIG_X86_LOCAL_APIC unsigned int apic_timer_irqs; /* arch dependent */ @@ -62,4 +64,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu); extern u64 arch_irq_stat(void); #define arch_irq_stat arch_irq_stat + +#if IS_ENABLED(CONFIG_KVM_INTEL) +static inline void kvm_set_cpu_l1tf_flush_l1d(void) +{ + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1); +} + +static inline void kvm_clear_cpu_l1tf_flush_l1d(void) +{ + __this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0); +} + +static inline bool kvm_get_cpu_l1tf_flush_l1d(void) +{ + return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d); +} +#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */ +static inline void kvm_set_cpu_l1tf_flush_l1d(void) { } +#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */ + #endif /* _ASM_X86_HARDIRQ_H */ diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 8ec99a55e6b9..bf253ad93bbc 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -34,6 +34,7 @@ extern asmlinkage void kvm_posted_intr_wakeup_ipi(void); extern asmlinkage void kvm_posted_intr_nested_ipi(void); extern asmlinkage void error_interrupt(void); extern asmlinkage void irq_work_interrupt(void); +extern asmlinkage void uv_bau_message_intr1(void); extern asmlinkage void spurious_interrupt(void); extern asmlinkage void thermal_interrupt(void); diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 0ead9dbb9130..9e114b8c901e 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -20,14 +20,23 @@ #ifndef _ASM_X86_HYPERVISOR_H #define _ASM_X86_HYPERVISOR_H +/* x86 hypervisor types */ +enum x86_hypervisor_type { + X86_HYPER_NATIVE = 0, + X86_HYPER_VMWARE, + X86_HYPER_MS_HYPERV, + X86_HYPER_XEN_PV, + X86_HYPER_XEN_HVM, + X86_HYPER_KVM, + X86_HYPER_ACRN, +}; + #ifdef CONFIG_HYPERVISOR_GUEST #include +#include #include -/* - * x86 hypervisor information - */ struct hypervisor_x86 { /* Hypervisor name */ const char *name; @@ -35,40 +44,27 @@ struct hypervisor_x86 { /* Detection routine */ uint32_t (*detect)(void); - /* Platform setup (run once per boot) */ - void (*init_platform)(void); - - /* X2APIC detection (run once per boot) */ - bool (*x2apic_available)(void); + /* Hypervisor type */ + enum x86_hypervisor_type type; - /* pin current vcpu to specified physical cpu (run rarely) */ - void (*pin_vcpu)(int); + /* init time callbacks */ + struct x86_hyper_init init; - /* called during init_mem_mapping() to setup early mappings. */ - void (*init_mem_mapping)(void); + /* runtime callbacks */ + struct x86_hyper_runtime runtime; }; -extern const struct hypervisor_x86 *x86_hyper; - -/* Recognized hypervisors */ -extern const struct hypervisor_x86 x86_hyper_vmware; -extern const struct hypervisor_x86 x86_hyper_ms_hyperv; -extern const struct hypervisor_x86 x86_hyper_xen_pv; -extern const struct hypervisor_x86 x86_hyper_xen_hvm; -extern const struct hypervisor_x86 x86_hyper_kvm; - +extern enum x86_hypervisor_type x86_hyper_type; extern void init_hypervisor_platform(void); -extern bool hypervisor_x2apic_available(void); -extern void hypervisor_pin_vcpu(int cpu); - -static inline void hypervisor_init_mem_mapping(void) +static inline bool hypervisor_is_type(enum x86_hypervisor_type type) { - if (x86_hyper && x86_hyper->init_mem_mapping) - x86_hyper->init_mem_mapping(); + return x86_hyper_type == type; } #else static inline void init_hypervisor_platform(void) { } -static inline bool hypervisor_x2apic_available(void) { return false; } -static inline void hypervisor_init_mem_mapping(void) { } +static inline bool hypervisor_is_type(enum x86_hypervisor_type type) +{ + return type == X86_HYPER_NATIVE; +} #endif /* CONFIG_HYPERVISOR_GUEST */ #endif /* _ASM_X86_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h index c8376b40e882..89789e8c80f6 100644 --- a/arch/x86/include/asm/i8259.h +++ b/arch/x86/include/asm/i8259.h @@ -3,6 +3,7 @@ #define _ASM_X86_I8259_H #include +#include extern unsigned int cached_irq_mask; @@ -69,6 +70,11 @@ struct legacy_pic { extern struct legacy_pic *legacy_pic; extern struct legacy_pic null_legacy_pic; +static inline bool has_legacy_pic(void) +{ + return legacy_pic != &null_legacy_pic; +} + static inline int nr_legacy_irqs(void) { return legacy_pic->nr_legacy_irqs; diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 02aff0867211..1c78580e58be 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -97,6 +97,16 @@ #define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) #define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) +/* Identifiers for segment registers */ +#define INAT_SEG_REG_IGNORE 0 +#define INAT_SEG_REG_DEFAULT 1 +#define INAT_SEG_REG_CS 2 +#define INAT_SEG_REG_SS 3 +#define INAT_SEG_REG_DS 4 +#define INAT_SEG_REG_ES 5 +#define INAT_SEG_REG_FS 6 +#define INAT_SEG_REG_GS 7 + /* Attribute search APIs */ extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); extern int inat_get_last_prefix_id(insn_byte_t last_pfx); diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h new file mode 100644 index 000000000000..e8c3e7cd1673 --- /dev/null +++ b/arch/x86/include/asm/insn-eval.h @@ -0,0 +1,24 @@ +#ifndef _ASM_X86_INSN_EVAL_H +#define _ASM_X86_INSN_EVAL_H +/* + * A collection of utility functions for x86 instruction analysis to be + * used in a kernel context. Useful when, for instance, making sense + * of the registers indicated by operands. + */ + +#include +#include +#include +#include + +#define INSN_CODE_SEG_ADDR_SZ(params) ((params >> 4) & 0xf) +#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf) +#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4)) + +void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); +int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); +unsigned long insn_get_seg_base(struct pt_regs *regs, struct insn *insn, + int regoff); +char insn_get_code_seg_defaults(struct pt_regs *regs); + +#endif /* _ASM_X86_INSN_EVAL_H */ diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index b3e32b010ab1..c2c01f84df75 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +#define POP_SS_OPCODE 0x1f +#define MOV_SREG_OPCODE 0x8e + +/* + * Intel SDM Vol.3A 6.8.3 states; + * "Any single-step trap that would be delivered following the MOV to SS + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is + * suppressed." + * This function returns true if @insn is MOV SS or POP SS. On these + * instructions, single stepping is suppressed. + */ +static inline int insn_masking_exception(struct insn *insn) +{ + return insn->opcode.bytes[0] == POP_SS_OPCODE || + (insn->opcode.bytes[0] == MOV_SREG_OPCODE && + X86_MODRM_REG(insn->modrm.bytes[0]) == 2); +} + #endif /* _ASM_X86_INSN_H */ diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h new file mode 100644 index 000000000000..62a9f4966b42 --- /dev/null +++ b/arch/x86/include/asm/intel_ds.h @@ -0,0 +1,36 @@ +#ifndef _ASM_INTEL_DS_H +#define _ASM_INTEL_DS_H + +#include + +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 8 + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +} __aligned(PAGE_SIZE); + +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + +struct debug_store_buffers { + char bts_buffer[BTS_BUFFER_SIZE]; + char pebs_buffer[PEBS_BUFFER_SIZE]; +}; + +#endif diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h new file mode 100644 index 000000000000..989cfa86de85 --- /dev/null +++ b/arch/x86/include/asm/invpcid.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INVPCID +#define _ASM_X86_INVPCID + +static inline void __invpcid(unsigned long pcid, unsigned long addr, + unsigned long type) +{ + struct { u64 d[2]; } desc = { { pcid, addr } }; + + /* + * The memory clobber is because the whole point is to invalidate + * stale TLB entries and, especially if we're flushing global + * mappings, we don't want the compiler to reorder any subsequent + * memory accesses before the TLB flush. + * + * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and + * invpcid (%rcx), %rax in long mode. + */ + asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" + : : "m" (desc), "a" (type), "c" (&desc) : "memory"); +} + +#define INVPCID_TYPE_INDIV_ADDR 0 +#define INVPCID_TYPE_SINGLE_CTXT 1 +#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 +#define INVPCID_TYPE_ALL_NON_GLOBAL 3 + +/* Flush all mappings for a given pcid and addr, not including globals. */ +static inline void invpcid_flush_one(unsigned long pcid, + unsigned long addr) +{ + __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); +} + +/* Flush all mappings for a given PCID, not including globals. */ +static inline void invpcid_flush_single_context(unsigned long pcid) +{ + __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); +} + +/* Flush all mappings, including globals, for all PCIDs. */ +static inline void invpcid_flush_all(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); +} + +/* Flush all mappings for all PCIDs except globals. */ +static inline void invpcid_flush_all_nonglobals(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); +} + +#endif /* _ASM_X86_INVPCID */ diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c8ef23f2c28f..c14f2a74b2be 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -13,7 +13,9 @@ * Interrupt control: */ -static inline unsigned long native_save_fl(void) +/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ +extern inline unsigned long native_save_fl(void); +extern inline unsigned long native_save_fl(void) { unsigned long flags; @@ -142,6 +144,9 @@ static inline notrace unsigned long arch_local_irq_save(void) swapgs; \ sysretl +#ifdef CONFIG_DEBUG_ENTRY +#define SAVE_FLAGS(x) pushfq; popq %rax +#endif #else #define INTERRUPT_RETURN iret #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index f86a8caa561e..395c9631e000 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_stack_regs(struct pt_regs *regs); extern void __show_regs(struct pt_regs *regs, int all); +extern void show_iret_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); extern void oops_end(unsigned long, struct pt_regs *, int signr); diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h deleted file mode 100644 index 945a0337fbcf..000000000000 --- a/arch/x86/include/asm/kmemcheck.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_X86_KMEMCHECK_H -#define ASM_X86_KMEMCHECK_H - -#include -#include - -#ifdef CONFIG_KMEMCHECK -bool kmemcheck_active(struct pt_regs *regs); - -void kmemcheck_show(struct pt_regs *regs); -void kmemcheck_hide(struct pt_regs *regs); - -bool kmemcheck_fault(struct pt_regs *regs, - unsigned long address, unsigned long error_code); -bool kmemcheck_trap(struct pt_regs *regs); -#else -static inline bool kmemcheck_active(struct pt_regs *regs) -{ - return false; -} - -static inline void kmemcheck_show(struct pt_regs *regs) -{ -} - -static inline void kmemcheck_hide(struct pt_regs *regs) -{ -} - -static inline bool kmemcheck_fault(struct pt_regs *regs, - unsigned long address, unsigned long error_code) -{ - return false; -} - -static inline bool kmemcheck_trap(struct pt_regs *regs) -{ - return false; -} -#endif /* CONFIG_KMEMCHECK */ - -#endif diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index ee23a43386a2..8493303d8b2e 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -107,11 +107,12 @@ struct x86_emulate_ops { * @addr: [IN ] Linear address from which to read. * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. + * @system:[IN ] Whether the access is forced to be at CPL0. */ int (*read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, - struct x86_exception *fault); + struct x86_exception *fault, bool system); /* * read_phys: Read bytes of standard (non-emulated/special) memory. @@ -129,10 +130,11 @@ struct x86_emulate_ops { * @addr: [IN ] Linear address to which to write. * @val: [OUT] Value write to memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to write to memory. + * @system:[IN ] Whether the access is forced to be at CPL0. */ int (*write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, - struct x86_exception *fault); + struct x86_exception *fault, bool system); /* * fetch: Read bytes of standard (non-emulated/special) memory. * Used for instruction fetch. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c73e493adf07..4015b88383ce 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -506,6 +507,7 @@ struct kvm_vcpu_arch { u64 smbase; bool tpr_access_reporting; u64 ia32_xss; + u64 microcode_version; /* * Paging state of the vcpu @@ -693,6 +695,9 @@ struct kvm_vcpu_arch { /* be preempted when it's in kernel-mode(cpl=0) */ bool preempted_in_kernel; + + /* Flush the L1 Data cache for L1TF mitigation on VMENTER */ + bool l1tf_flush_l1d; }; struct kvm_lpage_info { @@ -862,6 +867,7 @@ struct kvm_vcpu_stat { u64 signal_exits; u64 irq_window_exits; u64 nmi_window_exits; + u64 l1d_flush; u64 halt_exits; u64 halt_successful_poll; u64 halt_attempted_poll; @@ -908,7 +914,7 @@ struct kvm_x86_ops { int (*hardware_setup)(void); /* __init */ void (*hardware_unsetup)(void); /* __exit */ bool (*cpu_has_accelerated_tpr)(void); - bool (*cpu_has_high_real_mode_segbase)(void); + bool (*has_emulated_msr)(int index); void (*cpuid_update)(struct kvm_vcpu *vcpu); int (*vm_init)(struct kvm *kvm); @@ -1061,6 +1067,8 @@ struct kvm_x86_ops { void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); void (*setup_mce)(struct kvm_vcpu *vcpu); + + int (*get_msr_feature)(struct kvm_msr_entry *entry); }; struct kvm_arch_async_pf { @@ -1156,7 +1164,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, static inline int emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { - return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); + return x86_emulate_instruction(vcpu, 0, + emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0); } void kvm_enable_efer_bits(u64); @@ -1365,6 +1374,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); +u64 kvm_get_arch_capabilities(void); void kvm_define_shared_msr(unsigned index, u32 msr); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); @@ -1426,4 +1436,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #endif } +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index b1e8d8db921f..340070415c2c 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -346,6 +346,7 @@ enum smca_bank_types { SMCA_IF, /* Instruction Fetch */ SMCA_L2_CACHE, /* L2 Cache */ SMCA_DE, /* Decoder Unit */ + SMCA_RESERVED, /* Reserved */ SMCA_EX, /* Execution Unit */ SMCA_FP, /* Floating Point */ SMCA_L3_CACHE, /* L3 Cache */ @@ -376,6 +377,7 @@ struct smca_bank { extern struct smca_bank smca_banks[MAX_NR_BANKS]; extern const char *smca_get_long_name(enum smca_bank_types t); +extern bool amd_mce_is_memory_error(struct mce *m); extern int mce_threshold_create_device(unsigned int cpu); extern int mce_threshold_remove_device(unsigned int cpu); @@ -384,6 +386,7 @@ extern int mce_threshold_remove_device(unsigned int cpu); static inline int mce_threshold_create_device(unsigned int cpu) { return 0; }; static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; }; +static inline bool amd_mce_is_memory_error(struct mce *m) { return false; }; #endif diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 6a77c63540f7..e7d96c0766fe 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data); void __init sme_early_init(void); -void __init sme_encrypt_kernel(void); +void __init sme_encrypt_kernel(struct boot_params *bp); void __init sme_enable(struct boot_params *bp); /* Architecture __weak replacement functions */ @@ -61,7 +61,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { } static inline void __init sme_early_init(void) { } -static inline void __init sme_encrypt_kernel(void) { } +static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 55520cec8b27..6cf0e4cb7b97 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -37,7 +37,13 @@ struct cpu_signature { struct device; -enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; +enum ucode_state { + UCODE_OK = 0, + UCODE_NEW, + UCODE_UPDATED, + UCODE_NFOUND, + UCODE_ERROR, +}; struct microcode_ops { enum ucode_state (*request_microcode_user) (int cpu, @@ -54,7 +60,7 @@ struct microcode_ops { * are being called. * See also the "Synchronization" section in microcode_core.c. */ - int (*apply_microcode) (int cpu); + enum ucode_state (*apply_microcode) (int cpu); int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); }; diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 9ea26f167497..5ff3e8af2c20 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -3,6 +3,7 @@ #define _ASM_X86_MMU_H #include +#include #include #include @@ -27,7 +28,8 @@ typedef struct { atomic64_t tlb_gen; #ifdef CONFIG_MODIFY_LDT_SYSCALL - struct ldt_struct *ldt; + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; #endif #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6699fc441644..ed97ef3b48a7 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -50,22 +50,54 @@ struct ldt_struct { * call gates. On native, we could merge the ldt_struct and LDT * allocations, but it's not worth trying to optimize. */ - struct desc_struct *entries; - unsigned int nr_entries; + struct desc_struct *entries; + unsigned int nr_entries; + + /* + * If PTI is in use, then the entries array is not mapped while we're + * in user mode. The whole array will be aliased at the addressed + * given by ldt_slot_va(slot). We use two slots so that we can allocate + * and map, and enable a new LDT without invalidating the mapping + * of an older, still-in-use LDT. + * + * slot will be -1 if this LDT doesn't have an alias mapping. + */ + int slot; }; +/* This is a multiple of PAGE_SIZE. */ +#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) + +static inline void *ldt_slot_va(int slot) +{ +#ifdef CONFIG_X86_64 + return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); +#else + BUG(); + return (void *)fix_to_virt(FIX_HOLE); +#endif +} + /* * Used for LDT copy/destruction. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); +static inline void init_new_context_ldt(struct mm_struct *mm) +{ + mm->context.ldt = NULL; + init_rwsem(&mm->context.ldt_usr_sem); +} +int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); void destroy_context_ldt(struct mm_struct *mm); +void ldt_arch_exit_mmap(struct mm_struct *mm); #else /* CONFIG_MODIFY_LDT_SYSCALL */ -static inline int init_new_context_ldt(struct task_struct *tsk, - struct mm_struct *mm) +static inline void init_new_context_ldt(struct mm_struct *mm) { } +static inline int ldt_dup_context(struct mm_struct *oldmm, + struct mm_struct *mm) { return 0; } -static inline void destroy_context_ldt(struct mm_struct *mm) {} +static inline void destroy_context_ldt(struct mm_struct *mm) { } +static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } #endif static inline void load_mm_ldt(struct mm_struct *mm) @@ -73,8 +105,8 @@ static inline void load_mm_ldt(struct mm_struct *mm) #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; - /* lockless_dereference synchronizes with smp_store_release */ - ldt = lockless_dereference(mm->context.ldt); + /* READ_ONCE synchronizes with smp_store_release */ + ldt = READ_ONCE(mm->context.ldt); /* * Any change to mm->context.ldt is followed by an IPI to all @@ -90,10 +122,31 @@ static inline void load_mm_ldt(struct mm_struct *mm) * that we can see. */ - if (unlikely(ldt)) - set_ldt(ldt->entries, ldt->nr_entries); - else + if (unlikely(ldt)) { + if (static_cpu_has(X86_FEATURE_PTI)) { + if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { + /* + * Whoops -- either the new LDT isn't mapped + * (if slot == -1) or is mapped into a bogus + * slot (if slot > 1). + */ + clear_LDT(); + return; + } + + /* + * If page table isolation is enabled, ldt->entries + * will not be mapped in the userspace pagetables. + * Tell the CPU to access the LDT through the alias + * at ldt_slot_va(ldt->slot). + */ + set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); + } else { + set_ldt(ldt->entries, ldt->nr_entries); + } + } else { clear_LDT(); + } #else clear_LDT(); #endif @@ -132,18 +185,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + mutex_init(&mm->context.lock); + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); atomic64_set(&mm->context.tlb_gen, 0); - #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { - /* pkey 0 is the default and always allocated */ + /* pkey 0 is the default and allocated implicitly */ mm->context.pkey_allocation_map = 0x1; /* -1 means unallocated or invalid */ mm->context.execute_only_pkey = -1; } - #endif - return init_new_context_ldt(tsk, mm); +#endif + init_new_context_ldt(mm); + return 0; } static inline void destroy_context(struct mm_struct *mm) { @@ -176,15 +232,16 @@ do { \ } while (0) #endif -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { paravirt_arch_dup_mmap(oldmm, mm); + return ldt_dup_context(oldmm, mm); } static inline void arch_exit_mmap(struct mm_struct *mm) { paravirt_arch_exit_mmap(mm); + ldt_arch_exit_mmap(mm); } #ifdef CONFIG_X86_64 @@ -281,33 +338,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, return __pkru_allows_pkey(vma_pkey(vma), write); } -/* - * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID - * bits. This serves two purposes. It prevents a nasty situation in - * which PCID-unaware code saves CR3, loads some other value (with PCID - * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if - * the saved ASID was nonzero. It also means that any bugs involving - * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger - * deterministically. - */ - -static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) -{ - if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1); - } else { - VM_WARN_ON_ONCE(asid != 0); - return __sme_pa(mm->pgd); - } -} - -static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) -{ - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; -} - /* * This can be used from process context to figure out what the value of * CR3 is without needing to do a (slow) __read_cr3(). @@ -317,7 +347,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) */ static inline unsigned long __get_current_cr3_fast(void) { - unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* For now, be very restrictive about when this can be called. */ diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index 8546fafa21a9..7948a17febb4 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -6,7 +6,7 @@ #include struct mod_arch_specific { -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC unsigned int num_orcs; int *orc_unwind_ip; struct orc_entry *orc_unwind; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 581bb54dd464..5119e4b555cc 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent @@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) return U64_MAX; __asm__ __volatile__("mov %4, %%r8\n" - "call *%5" + CALL_NOSPEC : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input_address) - : "r" (output_address), "m" (hv_hypercall_pg) + : "r" (output_address), + THUNK_TARGET(hv_hypercall_pg) : "cc", "memory", "r8", "r9", "r10", "r11"); #else u32 input_address_hi = upper_32_bits(input_address); @@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) if (!hv_hypercall_pg) return U64_MAX; - __asm__ __volatile__("call *%7" + __asm__ __volatile__(CALL_NOSPEC : "=A" (hv_status), "+c" (input_address_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input_address_hi), "D"(output_address_hi), "S"(output_address_lo), - "m" (hv_hypercall_pg) + THUNK_TARGET(hv_hypercall_pg) : "cc", "memory"); #endif /* !x86_64 */ return hv_status; @@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) #ifdef CONFIG_X86_64 { - __asm__ __volatile__("call *%4" + __asm__ __volatile__(CALL_NOSPEC : "=a" (hv_status), ASM_CALL_CONSTRAINT, "+c" (control), "+d" (input1) - : "m" (hv_hypercall_pg) + : THUNK_TARGET(hv_hypercall_pg) : "cc", "r8", "r9", "r10", "r11"); } #else @@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) u32 input1_hi = upper_32_bits(input1); u32 input1_lo = lower_32_bits(input1); - __asm__ __volatile__ ("call *%5" + __asm__ __volatile__ (CALL_NOSPEC : "=A"(hv_status), "+c"(input1_lo), ASM_CALL_CONSTRAINT : "A" (control), "b" (input1_hi), - "m" (hv_hypercall_pg) + THUNK_TARGET(hv_hypercall_pg) : "cc", "edi", "esi"); } #endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ab022618a50a..ef7eec669a1b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -39,6 +39,15 @@ /* Intel MSRs. Some also available on other CPUs */ +#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ +#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ +#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ +#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ + +#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ +#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ + #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f @@ -57,6 +66,23 @@ #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) #define MSR_MTRRcap 0x000000fe + +#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a +#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ +#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ +#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ +#define ARCH_CAP_SSB_NO (1 << 4) /* + * Not susceptible to Speculative Store Bypass + * attack, so no Speculative Store Bypass + * control required. + */ + +#define MSR_IA32_FLUSH_CMD 0x0000010b +#define L1D_FLUSH (1 << 0) /* + * Writeback and invalidate the + * L1 data cache. + */ + #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e @@ -325,6 +351,8 @@ #define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f + /* Fam 17h MSRs */ #define MSR_F17H_IRPERF 0xc00000e9 @@ -352,6 +380,9 @@ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL #define FAM10H_MMIO_CONF_BASE_SHIFT 20 #define MSR_FAM10H_NODE_ID 0xc001100c +#define MSR_F10H_DECFG 0xc0011029 +#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 +#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) /* K8 MSRs */ #define MSR_K8_TOP_MEM1 0xc001001a diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 07962f5f6fba..30df295f6d94 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -214,8 +214,7 @@ static __always_inline unsigned long long rdtsc_ordered(void) * that some other imaginary CPU is updating continuously with a * time stamp. */ - alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, - "lfence", X86_FEATURE_LFENCE_RDTSC); + barrier_nospec(); return rdtsc(); } diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h new file mode 100644 index 000000000000..8b38df98548e --- /dev/null +++ b/arch/x86/include/asm/nospec-branch.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_X86_NOSPEC_BRANCH_H_ +#define _ASM_X86_NOSPEC_BRANCH_H_ + +#include +#include +#include +#include + +/* + * Fill the CPU return stack buffer. + * + * Each entry in the RSB, if used for a speculative 'ret', contains an + * infinite 'pause; lfence; jmp' loop to capture speculative execution. + * + * This is required in various cases for retpoline and IBRS-based + * mitigations for the Spectre variant 2 vulnerability. Sometimes to + * eliminate potentially bogus entries from the RSB, and sometimes + * purely to ensure that it doesn't get empty, which on some CPUs would + * allow predictions from other (unwanted!) sources to be used. + * + * We define a CPP macro such that it can be used from both .S files and + * inline assembly. It's possible to do a .macro and then include that + * from C via asm(".include ") but let's not go there. + */ + +#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ +#define RSB_FILL_LOOPS 16 /* To avoid underflow */ + +/* + * Google experimented with loop-unrolling and this turned out to be + * the optimal version — two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ +#define __FILL_RETURN_BUFFER(reg, nr, sp) \ + mov $(nr/2), reg; \ +771: \ + call 772f; \ +773: /* speculation trap */ \ + pause; \ + lfence; \ + jmp 773b; \ +772: \ + call 774f; \ +775: /* speculation trap */ \ + pause; \ + lfence; \ + jmp 775b; \ +774: \ + dec reg; \ + jnz 771b; \ + add $(BITS_PER_LONG/8) * nr, sp; + +#ifdef __ASSEMBLY__ + +/* + * This should be used immediately before a retpoline alternative. It tells + * objtool where the retpolines are so that it can make sense of the control + * flow by just reading the original instruction(s) and ignoring the + * alternatives. + */ +.macro ANNOTATE_NOSPEC_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.nospec + .long .Lannotate_\@ - . + .popsection +.endm + +/* + * This should be used immediately before an indirect jump/call. It tells + * objtool the subsequent indirect jump/call is vouched safe for retpoline + * builds. + */ +.macro ANNOTATE_RETPOLINE_SAFE + .Lannotate_\@: + .pushsection .discard.retpoline_safe + _ASM_PTR .Lannotate_\@ + .popsection +.endm + +/* + * These are the bare retpoline primitives for indirect jmp and call. + * Do not use these directly; they only exist to make the ALTERNATIVE + * invocation below less ugly. + */ +.macro RETPOLINE_JMP reg:req + call .Ldo_rop_\@ +.Lspec_trap_\@: + pause + lfence + jmp .Lspec_trap_\@ +.Ldo_rop_\@: + mov \reg, (%_ASM_SP) + ret +.endm + +/* + * This is a wrapper around RETPOLINE_JMP so the called function in reg + * returns to the instruction after the macro. + */ +.macro RETPOLINE_CALL reg:req + jmp .Ldo_call_\@ +.Ldo_retpoline_jmp_\@: + RETPOLINE_JMP \reg +.Ldo_call_\@: + call .Ldo_retpoline_jmp_\@ +.endm + +/* + * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple + * indirect jmp/call which may be susceptible to the Spectre variant 2 + * attack. + */ +.macro JMP_NOSPEC reg:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ + __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD +#else + jmp *\reg +#endif +.endm + +.macro CALL_NOSPEC reg:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ + __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD +#else + call *\reg +#endif +.endm + + /* + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP + * monstrosity above, manually. + */ +.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req +#ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE "jmp .Lskip_rsb_\@", \ + __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ + \ftr +.Lskip_rsb_\@: +#endif +.endm + +#else /* __ASSEMBLY__ */ + +#define ANNOTATE_NOSPEC_ALTERNATIVE \ + "999:\n\t" \ + ".pushsection .discard.nospec\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + +#define ANNOTATE_RETPOLINE_SAFE \ + "999:\n\t" \ + ".pushsection .discard.retpoline_safe\n\t" \ + _ASM_PTR " 999b\n\t" \ + ".popsection\n\t" + +#if defined(CONFIG_X86_64) && defined(RETPOLINE) + +/* + * Since the inline asm uses the %V modifier which is only in newer GCC, + * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. + */ +# define CALL_NOSPEC \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE( \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ + X86_FEATURE_RETPOLINE) +# define THUNK_TARGET(addr) [thunk_target] "r" (addr) + +#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) +/* + * For i386 we use the original ret-equivalent retpoline, because + * otherwise we'll run out of registers. We don't care about CET + * here, anyway. + */ +# define CALL_NOSPEC \ + ALTERNATIVE( \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + " jmp 904f;\n" \ + " .align 16\n" \ + "901: call 903f;\n" \ + "902: pause;\n" \ + " lfence;\n" \ + " jmp 902b;\n" \ + " .align 16\n" \ + "903: addl $4, %%esp;\n" \ + " pushl %[thunk_target];\n" \ + " ret;\n" \ + " .align 16\n" \ + "904: call 901b;\n", \ + X86_FEATURE_RETPOLINE) + +# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#else /* No retpoline for C / inline asm */ +# define CALL_NOSPEC "call *%[thunk_target]\n" +# define THUNK_TARGET(addr) [thunk_target] "rm" (addr) +#endif + +/* The Spectre V2 mitigation variants */ +enum spectre_v2_mitigation { + SPECTRE_V2_NONE, + SPECTRE_V2_RETPOLINE_MINIMAL, + SPECTRE_V2_RETPOLINE_MINIMAL_AMD, + SPECTRE_V2_RETPOLINE_GENERIC, + SPECTRE_V2_RETPOLINE_AMD, + SPECTRE_V2_IBRS, +}; + +/* The Speculative Store Bypass disable variants */ +enum ssb_mitigation { + SPEC_STORE_BYPASS_NONE, + SPEC_STORE_BYPASS_DISABLE, + SPEC_STORE_BYPASS_PRCTL, + SPEC_STORE_BYPASS_SECCOMP, +}; + +extern char __indirect_thunk_start[]; +extern char __indirect_thunk_end[]; + +/* + * On VMEXIT we must ensure that no RSB predictions learned in the guest + * can be followed in the host, by overwriting the RSB completely. Both + * retpoline and IBRS mitigations for Spectre v2 need this; only on future + * CPUs with IBRS_ALL *might* it be avoided. + */ +static inline void vmexit_fill_RSB(void) +{ +#ifdef CONFIG_RETPOLINE + unsigned long loops; + + asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE("jmp 910f", + __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), + X86_FEATURE_RETPOLINE) + "910:" + : "=r" (loops), ASM_CALL_CONSTRAINT + : : "memory" ); +#endif +} + +static __always_inline +void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) +{ + asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) + : : "c" (msr), + "a" ((u32)val), + "d" ((u32)(val >> 32)), + [feature] "i" (feature) + : "memory"); +} + +static inline void indirect_branch_prediction_barrier(void) +{ + u64 val = PRED_CMD_IBPB; + + alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); +} + +/* The Intel SPEC CTRL MSR base value cache */ +extern u64 x86_spec_ctrl_base; + +/* + * With retpoline, we must use IBRS to restrict branch prediction + * before calling into firmware. + * + * (Implemented as CPP macros due to header hell.) + */ +#define firmware_restrict_branch_speculation_start() \ +do { \ + u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ + \ + preempt_disable(); \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ + X86_FEATURE_USE_IBRS_FW); \ +} while (0) + +#define firmware_restrict_branch_speculation_end() \ +do { \ + u64 val = x86_spec_ctrl_base; \ + \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ + X86_FEATURE_USE_IBRS_FW); \ + preempt_enable(); \ +} while (0) + +#endif /* __ASSEMBLY__ */ + +/* + * Below is used in the eBPF JIT compiler and emits the byte sequence + * for the following assembly: + * + * With retpolines configured: + * + * callq do_rop + * spec_trap: + * pause + * lfence + * jmp spec_trap + * do_rop: + * mov %rax,(%rsp) + * retq + * + * Without retpolines configured: + * + * jmp *%rax + */ +#ifdef CONFIG_RETPOLINE +# define RETPOLINE_RAX_BPF_JIT_SIZE 17 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT1_off32(0xE8, 7); /* callq do_rop */ \ + /* spec_trap: */ \ + EMIT2(0xF3, 0x90); /* pause */ \ + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ + /* do_rop: */ \ + EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ + EMIT1(0xC3); /* retq */ +#else +# define RETPOLINE_RAX_BPF_JIT_SIZE 2 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT2(0xFF, 0xE0); /* jmp *%rax */ +#endif + +#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index aa30c3241ea7..0d5c739eebd7 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -29,8 +29,13 @@ #define N_EXCEPTION_STACKS 1 #ifdef CONFIG_X86_PAE -/* 44=32+12, the limit we can fit into an unsigned long pfn */ -#define __PHYSICAL_MASK_SHIFT 44 +/* + * This is beyond the 44 bit limit imposed by the 32bit long pfns, + * but we need the full mask to make sure inverted PROT_NONE + * entries have all the host bits set in a guest. + * The real limit is still 44 bits. + */ +#define __PHYSICAL_MASK_SHIFT 52 #define __VIRTUAL_MASK_SHIFT 32 #else /* !CONFIG_X86_PAE */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..d652a3808065 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -52,10 +52,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); -#ifdef CONFIG_X86_MCE -#define arch_unmap_kpfn arch_unmap_kpfn -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index fd81228e8037..93be6ee2f2be 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -7,6 +7,7 @@ #ifdef CONFIG_PARAVIRT #include #include +#include #include @@ -16,10 +17,9 @@ #include #include -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void load_sp0(unsigned long sp0) { - PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread); + PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0); } /* The paravirtualized CPUID instruction. */ @@ -273,6 +273,13 @@ static inline void slow_down_io(void) #endif } +static inline unsigned long cpu_khz_from_paravirt(void) +{ + if (pv_cpu_ops.cpu_khz == NULL) + return 0; + return PVOP_CALL0(unsigned long, pv_cpu_ops.cpu_khz); +} + static inline void paravirt_activate_mm(struct mm_struct *prev, struct mm_struct *next) { @@ -298,9 +305,9 @@ static inline void __flush_tlb_global(void) { PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); } -static inline void __flush_tlb_single(unsigned long addr) +static inline void __flush_tlb_one_user(unsigned long addr) { - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); } static inline void flush_tlb_others(const struct cpumask *cpumask, @@ -797,6 +804,12 @@ static inline notrace unsigned long arch_local_irq_save(void) return f; } +static inline void write_msi_msg_paravirt(struct msi_desc *entry, + struct msi_msg *msg) +{ + return PVOP_VCALL2(pv_irq_ops.write_msi, entry, msg); +} + /* Make sure as little as possible of this mess escapes. */ #undef PARAVIRT_CALL @@ -880,23 +893,27 @@ extern void default_banner(void); #define INTERRUPT_RETURN \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) + ANNOTATE_RETPOLINE_SAFE; \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + ANNOTATE_RETPOLINE_SAFE; \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #define ENABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + ANNOTATE_RETPOLINE_SAFE; \ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) #ifdef CONFIG_X86_32 #define GET_CR0_INTO_EAX \ push %ecx; push %edx; \ + ANNOTATE_RETPOLINE_SAFE; \ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ pop %edx; pop %ecx #else /* !CONFIG_X86_32 */ @@ -918,16 +935,29 @@ extern void default_banner(void); */ #define SWAPGS \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \ + ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ ) #define GET_CR2_INTO_RAX \ - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2) + ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); #define USERGS_SYSRET64 \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) + ANNOTATE_RETPOLINE_SAFE; \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) + +#ifdef CONFIG_DEBUG_ENTRY +#define SAVE_FLAGS(clobbers) \ + PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ + ANNOTATE_RETPOLINE_SAFE; \ + call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) +#endif + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 10cc3b9709fe..63f39a5c621d 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -43,6 +43,7 @@ #include #include #include +#include struct page; struct thread_struct; @@ -53,6 +54,8 @@ struct desc_struct; struct task_struct; struct cpumask; struct flush_tlb_info; +struct msi_desc; +struct msi_msg; /* * Wrapper type for pointers to code which uses the non-standard @@ -134,7 +137,7 @@ struct pv_cpu_ops { void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); void (*free_ldt)(struct desc_struct *ldt, unsigned entries); - void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); + void (*load_sp0)(unsigned long sp0); void (*set_iopl_mask)(unsigned mask); @@ -174,6 +177,8 @@ struct pv_cpu_ops { void (*start_context_switch)(struct task_struct *prev); void (*end_context_switch)(struct task_struct *next); + + unsigned long (*cpu_khz)(void); } __no_randomize_layout; struct pv_irq_ops { @@ -194,6 +199,7 @@ struct pv_irq_ops { void (*safe_halt)(void); void (*halt)(void); + void (*write_msi)(struct msi_desc *entry, struct msi_msg *msg); } __no_randomize_layout; struct pv_mmu_ops { @@ -217,7 +223,7 @@ struct pv_mmu_ops { /* TLB operations */ void (*flush_tlb_user)(void); void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); + void (*flush_tlb_one_user)(unsigned long addr); void (*flush_tlb_others)(const struct cpumask *cpus, const struct flush_tlb_info *info); @@ -392,7 +398,9 @@ int paravirt_disable_iospace(void); * offset into the paravirt_patch_template structure, and can therefore be * freely converted back into a structure offset. */ -#define PARAVIRT_CALL "call *%c[paravirt_opptr];" +#define PARAVIRT_CALL \ + ANNOTATE_RETPOLINE_SAFE \ + "call *%c[paravirt_opptr];" /* * These macros are intended to wrap calls through one of the paravirt diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 377f1ffd18be..ba3c523aaf16 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, { bool oldbit; - asm volatile("bt "__percpu_arg(2)",%1\n\t" + asm volatile("bt "__percpu_arg(2)",%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 4b5e1eafada7..aff42e1da6ee 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {} */ extern gfp_t __userpte_alloc_gfp; +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Instead of one PGD, we acquire two PGDs. Being order-1, it is + * both 8k in size and 8k-aligned. That lets us just flip bit 12 + * in a pointer to swap between the two 4k halves. + */ +#define PGD_ALLOCATION_ORDER 1 +#else +#define PGD_ALLOCATION_ORDER 0 +#endif + /* * Allocate and free page tables. */ diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index 685ffe8a0eaf..60d0f9015317 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h @@ -95,4 +95,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) +/* No inverted PFNs on 2 level page tables */ + +static inline u64 protnone_mask(u64 val) +{ + return 0; +} + +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) +{ + return val; +} + +static inline bool __pte_needs_invert(u64 val) +{ + return false; +} + #endif /* _ASM_X86_PGTABLE_2LEVEL_H */ diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index bc4af5453802..9dc19b4a2a87 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -206,12 +206,43 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp) #endif /* Encode and de-code a swap entry */ +#define SWP_TYPE_BITS 5 + +#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) + +/* We always extract/encode the offset by shifting it all the way up, and then down again */ +#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS) + #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) #define __swp_type(x) (((x).val) & 0x1f) #define __swp_offset(x) ((x).val >> 5) #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) -#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) -#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) + +/* + * Normally, __swp_entry() converts from arch-independent swp_entry_t to + * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result + * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the + * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to + * __swp_entry_to_pte() through the following helper macro based on 64bit + * __swp_entry(). + */ +#define __swp_pteval_entry(type, offset) ((pteval_t) { \ + (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ + | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) }) + +#define __swp_entry_to_pte(x) ((pte_t){ .pte = \ + __swp_pteval_entry(__swp_type(x), __swp_offset(x)) }) +/* + * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent + * swp_entry_t, but also has to convert it from 64bit to the 32bit + * intermediate representation, using the following macros based on 64bit + * __swp_type() and __swp_offset(). + */ +#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS))) +#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)) + +#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \ + __pteval_swp_offset(pte))) #define gup_get_pte gup_get_pte /* @@ -260,4 +291,6 @@ static inline pte_t gup_get_pte(pte_t *ptep) return pte; } +#include + #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h new file mode 100644 index 000000000000..a0c1525f1b6f --- /dev/null +++ b/arch/x86/include/asm/pgtable-invert.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PGTABLE_INVERT_H +#define _ASM_PGTABLE_INVERT_H 1 + +#ifndef __ASSEMBLY__ + +/* + * A clear pte value is special, and doesn't get inverted. + * + * Note that even users that only pass a pgprot_t (rather + * than a full pte) won't trigger the special zero case, + * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED + * set. So the all zero case really is limited to just the + * cleared page table entry case. + */ +static inline bool __pte_needs_invert(u64 val) +{ + return val && !(val & _PAGE_PRESENT); +} + +/* Get a mask to xor with the page table entry to get the correct pfn. */ +static inline u64 protnone_mask(u64 val) +{ + return __pte_needs_invert(val) ? ~0ull : 0; +} + +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) +{ + /* + * When a PTE transitions from NONE to !NONE or vice-versa + * invert the PFN part to stop speculation. + * pte_pfn undoes this when needed. + */ + if (__pte_needs_invert(oldval) != __pte_needs_invert(val)) + val = (val & ~mask) | (~val & mask); + return val; +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index f735c3016325..6a4b1a54ff47 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD]; int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user); void ptdump_walk_pgd_level_checkwx(void); #ifdef CONFIG_DEBUG_WX @@ -184,19 +185,29 @@ static inline int pte_special(pte_t pte) return pte_flags(pte) & _PAGE_SPECIAL; } +/* Entries that were set to PROT_NONE are inverted */ + +static inline u64 protnone_mask(u64 val); + static inline unsigned long pte_pfn(pte_t pte) { - return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; + phys_addr_t pfn = pte_val(pte); + pfn ^= protnone_mask(pfn); + return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT; } static inline unsigned long pmd_pfn(pmd_t pmd) { - return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; + phys_addr_t pfn = pmd_val(pmd); + pfn ^= protnone_mask(pfn); + return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; } static inline unsigned long pud_pfn(pud_t pud) { - return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; + phys_addr_t pfn = pud_val(pud); + pfn ^= protnone_mask(pfn); + return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT; } static inline unsigned long p4d_pfn(p4d_t p4d) @@ -349,14 +360,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) { pmdval_t v = native_pmd_val(pmd); - return __pmd(v | set); + return native_make_pmd(v | set); } static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) { pmdval_t v = native_pmd_val(pmd); - return __pmd(v & ~clear); + return native_make_pmd(v & ~clear); } static inline pmd_t pmd_mkold(pmd_t pmd) @@ -399,23 +410,18 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_RW); } -static inline pmd_t pmd_mknotpresent(pmd_t pmd) -{ - return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); -} - static inline pud_t pud_set_flags(pud_t pud, pudval_t set) { pudval_t v = native_pud_val(pud); - return __pud(v | set); + return native_make_pud(v | set); } static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) { pudval_t v = native_pud_val(pud); - return __pud(v & ~clear); + return native_make_pud(v & ~clear); } static inline pud_t pud_mkold(pud_t pud) @@ -458,11 +464,6 @@ static inline pud_t pud_mkwrite(pud_t pud) return pud_set_flags(pud, _PAGE_RW); } -static inline pud_t pud_mknotpresent(pud_t pud) -{ - return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); -} - #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline int pte_soft_dirty(pte_t pte) { @@ -527,25 +528,45 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot) static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { - return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; + pfn ^= protnone_mask(pgprot_val(pgprot)); + pfn &= PTE_PFN_MASK; + return __pte(pfn | massage_pgprot(pgprot)); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { - return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; + pfn ^= protnone_mask(pgprot_val(pgprot)); + pfn &= PHYSICAL_PMD_PAGE_MASK; + return __pmd(pfn | massage_pgprot(pgprot)); } static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) { - return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT; + pfn ^= protnone_mask(pgprot_val(pgprot)); + pfn &= PHYSICAL_PUD_PAGE_MASK; + return __pud(pfn | massage_pgprot(pgprot)); } +static inline pmd_t pmd_mknotpresent(pmd_t pmd) +{ + return pfn_pmd(pmd_pfn(pmd), + __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); +} + +static inline pud_t pud_mknotpresent(pud_t pud) +{ + return pfn_pud(pud_pfn(pud), + __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); +} + +static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - pteval_t val = pte_val(pte); + pteval_t val = pte_val(pte), oldval = val; /* * Chop off the NX bit (if present), and add the NX portion of @@ -553,17 +574,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) */ val &= _PAGE_CHG_MASK; val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; - + val = flip_protnone_guard(oldval, val, PTE_PFN_MASK); return __pte(val); } static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { - pmdval_t val = pmd_val(pmd); + pmdval_t val = pmd_val(pmd), oldval = val; val &= _HPAGE_CHG_MASK; val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; - + val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK); return __pmd(val); } @@ -667,11 +688,6 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a) return false; } -static inline int pte_hidden(pte_t pte) -{ - return pte_flags(pte) & _PAGE_HIDDEN; -} - static inline int pmd_present(pmd_t pmd) { /* @@ -846,7 +862,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) static inline int p4d_bad(p4d_t p4d) { - return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; + unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (p4d_flags(p4d) & ~ignore_flags) != 0; } #endif /* CONFIG_PGTABLE_LEVELS > 3 */ @@ -880,7 +901,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + unsigned long ignore_flags = _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) @@ -909,7 +935,11 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address))) +/* + * a shortcut to get a pgd_t in a given mm + */ +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's @@ -1093,6 +1123,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return pud_flags(pud) & _PAGE_RW; +} + /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * @@ -1105,7 +1141,14 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + memcpy(dst, src, count * sizeof(pgd_t)); +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + /* Clone the user space pgd as well */ + memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), + count * sizeof(pgd_t)); +#endif } #define PTE_SHIFT ilog2(PTRS_PER_PTE) @@ -1251,6 +1294,14 @@ static inline bool pud_access_permitted(pud_t pud, bool write) return __pte_access_permitted(pud_val(pud), write); } +#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1 +extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); + +static inline bool arch_has_pfn_modify_check(void) +{ + return boot_cpu_has_bug(X86_BUG_L1TF); +} + #include #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index e67c0620aec2..b3ec519e3982 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[]; static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } void paging_init(void); +void sync_initial_page_table(void); /* * Define this if things work differently on an i386 and an i486: @@ -61,7 +62,7 @@ void paging_init(void); #define kpte_clear_flush(ptep, vaddr) \ do { \ pte_clear(&init_mm, (vaddr), (ptep)); \ - __flush_tlb_one((vaddr)); \ + __flush_tlb_one_kernel((vaddr)); \ } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index f2ca9b28fd68..0777e18a1d23 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -38,13 +38,23 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ - & PMD_MASK) +/* + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c + * to avoid include recursion hell + */ +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) + +#define CPU_ENTRY_AREA_BASE \ + ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ + & PMD_MASK) + +#define PKMAP_BASE \ + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) #else -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) +# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) #endif #define MODULES_VADDR VMALLOC_START diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e9f05331e732..4ecb72831938 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[]; #define swapper_pg_dir init_top_pgt extern void paging_init(void); +static inline void sync_initial_page_table(void) { } #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %p(%016lx)\n", \ @@ -131,9 +132,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp) #endif } +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages + * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and + * the user one is in the last 4k. To switch between them, you + * just need to flip the 12th bit in their addresses. + */ +#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT + +/* + * This generates better code than the inline assembly in + * __set_bit(). + */ +static inline void *ptr_set_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr |= BIT(bit); + return (void *)__ptr; +} +static inline void *ptr_clear_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr &= ~BIT(bit); + return (void *)__ptr; +} + +static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) +{ + return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) +{ + return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) +{ + return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) +{ + return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} +#endif /* CONFIG_PAGE_TABLE_ISOLATION */ + +/* + * Page table pages are page-aligned. The lower half of the top + * level is used for userspace and the top half for the kernel. + * + * Returns true for parts of the PGD that map userspace and + * false for the parts that map the kernel. + */ +static inline bool pgdp_maps_userspace(void *__ptr) +{ + unsigned long ptr = (unsigned long)__ptr; + + return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2); +} + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd); + +/* + * Take a PGD location (pgdp) and a pgd value that needs to be set there. + * Populates the user and returns the resulting PGD that must be set in + * the kernel copy of the page tables. + */ +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return pgd; + return __pti_set_user_pgd(pgdp, pgd); +} +#else +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + return pgd; +} +#endif + static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { +#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL) + p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd); +#else *p4dp = p4d; +#endif } static inline void native_p4d_clear(p4d_t *p4d) @@ -147,7 +236,11 @@ static inline void native_p4d_clear(p4d_t *p4d) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { +#ifdef CONFIG_PAGE_TABLE_ISOLATION + *pgdp = pti_set_user_pgd(pgdp, pgd); +#else *pgdp = pgd; +#endif } static inline void native_pgd_clear(pgd_t *pgd) @@ -183,7 +276,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } * * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names - * | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry + * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry * * G (8) is aliased and used as a PROT_NONE indicator for * !present ptes. We need to start storing swap entries above @@ -196,20 +289,34 @@ static inline int pgd_large(pgd_t pgd) { return 0; } * * Bit 7 in swp entry should be 0 because pmd_present checks not only P, * but also L and G. + * + * The offset is inverted by a binary not operation to make the high + * physical bits set. */ -#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) -#define SWP_TYPE_BITS 5 -/* Place the offset above the type: */ -#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS) +#define SWP_TYPE_BITS 5 + +#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1) + +/* We always extract/encode the offset by shifting it all the way up, and then down again */ +#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) -#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \ - & ((1U << SWP_TYPE_BITS) - 1)) -#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT) -#define __swp_entry(type, offset) ((swp_entry_t) { \ - ((type) << (SWP_TYPE_FIRST_BIT)) \ - | ((offset) << SWP_OFFSET_FIRST_BIT) }) +/* Extract the high bits for type */ +#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS)) + +/* Shift up (to get rid of type), then down to get value */ +#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT) + +/* + * Shift the offset up "too far" by TYPE bits, then down again + * The offset is inverted by a binary not operation to make the high + * physical bits set. + */ +#define __swp_entry(type, offset) ((swp_entry_t) { \ + (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \ + | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) }) + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) @@ -253,5 +360,7 @@ static inline bool gup_fast_permitted(unsigned long start, int nr_pages, return true; } +#include + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_64_H */ diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6d5f45dcd4a1..6b8f73dcbc2c 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -75,33 +75,52 @@ typedef struct { pteval_t pte; } pte_t; #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +/* + * See Documentation/x86/x86_64/mm.txt for a description of the memory map. + * + * Be very careful vs. KASLR when changing anything here. The KASLR address + * range must not overlap with anything except the KASAN shadow area, which + * is correct as KASAN disables KASLR. + */ +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) + #ifdef CONFIG_X86_5LEVEL -#define VMALLOC_SIZE_TB _AC(16384, UL) -#define __VMALLOC_BASE _AC(0xff92000000000000, UL) -#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define VMALLOC_SIZE_TB _AC(12800, UL) +# define __VMALLOC_BASE _AC(0xffa0000000000000, UL) +# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define LDT_PGD_ENTRY _AC(-112, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #else -#define VMALLOC_SIZE_TB _AC(32, UL) -#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) -#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define VMALLOC_SIZE_TB _AC(32, UL) +# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) +# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define LDT_PGD_ENTRY _AC(-3, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif + #ifdef CONFIG_RANDOMIZE_MEMORY -#define VMALLOC_START vmalloc_base -#define VMEMMAP_START vmemmap_base +# define VMALLOC_START vmalloc_base +# define VMEMMAP_START vmemmap_base #else -#define VMALLOC_START __VMALLOC_BASE -#define VMEMMAP_START __VMEMMAP_BASE +# define VMALLOC_START __VMALLOC_BASE +# define VMEMMAP_START __VMEMMAP_BASE #endif /* CONFIG_RANDOMIZE_MEMORY */ -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) -#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) + +#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) + +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) -#define MODULES_LEN (MODULES_END - MODULES_VADDR) -#define ESPFIX_PGD_ENTRY _AC(-2, UL) -#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) -#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) -#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) +#define MODULES_END _AC(0xffffffffff000000, UL) +#define MODULES_LEN (MODULES_END - MODULES_VADDR) + +#define ESPFIX_PGD_ENTRY _AC(-2, UL) +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) + +#define CPU_ENTRY_AREA_PGD _AC(-4, UL) +#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) + +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) +#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) #define EARLY_DYNAMIC_PAGE_TABLES 64 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 59df7b47a434..246f15b4e64c 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -32,7 +32,6 @@ #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 -#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 @@ -79,18 +78,6 @@ #define _PAGE_KNL_ERRATUM_MASK 0 #endif -#ifdef CONFIG_KMEMCHECK -#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) -#else -#define _PAGE_HIDDEN (_AT(pteval_t, 0)) -#endif - -/* - * The same hidden bit is used by kmemcheck, but since kmemcheck - * works on kernel pages while soft-dirty engine on user space, - * they do not conflict with each other. - */ - #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else @@ -200,10 +187,9 @@ enum page_cache_mode { #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY | _PAGE_ENC) +#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC) #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC) @@ -337,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud) #else #include +static inline pud_t native_make_pud(pudval_t val) +{ + return (pud_t) { .p4d.pgd = native_make_pgd(val) }; +} + static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.p4d.pgd); @@ -358,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) #else #include +static inline pmd_t native_make_pmd(pmdval_t val) +{ + return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; +} + static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.p4d.pgd); diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index a0ba1ffda0df..851c04b7a092 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_PKEYS_H #define _ASM_X86_PKEYS_H +#define ARCH_DEFAULT_PKEY 0 + #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, @@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm); static inline int execute_only_pkey(struct mm_struct *mm) { if (!boot_cpu_has(X86_FEATURE_OSPKE)) - return 0; + return ARCH_DEFAULT_PKEY; return __execute_only_pkey(mm); } @@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { /* * "Allocated" pkeys are those that have been returned - * from pkey_alloc(). pkey 0 is special, and never - * returned from pkey_alloc(). + * from pkey_alloc() or pkey 0 which is allocated + * implicitly when the mm is created. */ - if (pkey <= 0) + if (pkey < 0) return false; if (pkey >= arch_max_pkey()) return false; + /* + * The exec-only pkey is set in the allocation map, but + * is not available to any of the user interfaces like + * mprotect_pkey(). + */ + if (pkey == mm->context.execute_only_pkey) + return false; + return mm_pkey_allocation_map(mm) & (1U << pkey); } diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index 43212a43ee69..625a52a5594f 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -38,6 +38,11 @@ #define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull) #define CR3_PCID_MASK 0xFFFull #define CR3_NOFLUSH BIT_ULL(63) + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define X86_CR3_PTI_PCID_USER_BIT 11 +#endif + #else /* * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index bdac19ab2488..0e856c0628b3 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -91,7 +91,7 @@ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; - __u8 x86_mask; + __u8 x86_stepping; #ifdef CONFIG_X86_64 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ int x86_tlbsize; @@ -109,7 +109,7 @@ struct cpuinfo_x86 { char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ - int x86_cache_size; + unsigned int x86_cache_size; int x86_cache_alignment; /* In bytes */ /* Cache QoS architectural values: */ int x86_cache_max_rmid; /* max index */ @@ -162,9 +162,9 @@ enum cpuid_regs_idx { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -extern __u32 cpu_caps_cleared[NCAPINTS]; -extern __u32 cpu_caps_set[NCAPINTS]; +extern struct x86_hw_tss doublefault_tss; +extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; +extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; #ifdef CONFIG_SMP DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); @@ -180,6 +180,11 @@ extern const struct seq_operations cpuinfo_op; extern void cpu_detect(struct cpuinfo_x86 *c); +static inline unsigned long l1tf_pfn_limit(void) +{ + return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; +} + extern void early_cpu_init(void); extern void identify_boot_cpu(void); extern void identify_secondary_cpu(struct cpuinfo_x86 *); @@ -252,6 +257,11 @@ static inline void load_cr3(pgd_t *pgdir) write_cr3(__sme_pa(pgdir)); } +/* + * Note that while the legacy 'TSS' name comes from 'Task State Segment', + * on modern x86 CPUs the TSS also holds information important to 64-bit mode, + * unrelated to the task-switch mechanism: + */ #ifdef CONFIG_X86_32 /* This is the TSS defined by the hardware. */ struct x86_hw_tss { @@ -304,7 +314,13 @@ struct x86_hw_tss { struct x86_hw_tss { u32 reserved1; u64 sp0; + + /* + * We store cpu_current_top_of_stack in sp1 so it's always accessible. + * Linux does not use ring 1, so sp1 is not otherwise needed. + */ u64 sp1; + u64 sp2; u64 reserved2; u64 ist[7]; @@ -322,12 +338,22 @@ struct x86_hw_tss { #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) +#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define INVALID_IO_BITMAP_OFFSET 0x8000 +struct entry_stack { + unsigned long words[64]; +}; + +struct entry_stack_page { + struct entry_stack stack; +} __aligned(PAGE_SIZE); + struct tss_struct { /* - * The hardware state: + * The fixed hardware portion. This must not cross a page boundary + * at risk of violating the SDM's advice and potentially triggering + * errata. */ struct x86_hw_tss x86_tss; @@ -338,18 +364,9 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; +} __aligned(PAGE_SIZE); -#ifdef CONFIG_X86_32 - /* - * Space for the temporary SYSENTER stack. - */ - unsigned long SYSENTER_stack_canary; - unsigned long SYSENTER_stack[64]; -#endif - -} ____cacheline_aligned; - -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); /* * sizeof(unsigned long) coming from an extra "long" at the end @@ -363,6 +380,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); #ifdef CONFIG_X86_32 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); +#else +/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */ +#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1 #endif /* @@ -431,7 +451,9 @@ typedef struct { struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; +#ifdef CONFIG_X86_32 unsigned long sp0; +#endif unsigned long sp; #ifdef CONFIG_X86_32 unsigned long sysenter_cs; @@ -442,8 +464,6 @@ struct thread_struct { unsigned short gsindex; #endif - u32 status; /* thread synchronous flags */ - #ifdef CONFIG_X86_64 unsigned long fsbase; unsigned long gsbase; @@ -518,16 +538,9 @@ static inline void native_set_iopl_mask(unsigned mask) } static inline void -native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) +native_load_sp0(unsigned long sp0) { - tss->x86_tss.sp0 = thread->sp0; -#ifdef CONFIG_X86_32 - /* Only happens when SEP is enabled, no need to test "SEP"arately: */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -#endif + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } static inline void native_swapgs(void) @@ -539,12 +552,18 @@ static inline void native_swapgs(void) static inline unsigned long current_top_of_stack(void) { -#ifdef CONFIG_X86_64 - return this_cpu_read_stable(cpu_tss.x86_tss.sp0); -#else - /* sp0 on x86_32 is special in and around vm86 mode. */ + /* + * We can't read directly from tss.sp0: sp0 on x86_32 is special in + * and around vm86 mode and sp0 on x86_64 is special because of the + * entry trampoline. + */ return this_cpu_read_stable(cpu_current_top_of_stack); -#endif +} + +static inline bool on_thread_stack(void) +{ + return (unsigned long)(current_top_of_stack() - + current_stack_pointer) < THREAD_SIZE; } #ifdef CONFIG_PARAVIRT @@ -552,10 +571,9 @@ static inline unsigned long current_top_of_stack(void) #else #define __cpuid native_cpuid -static inline void load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static inline void load_sp0(unsigned long sp0) { - native_load_sp0(tss, thread); + native_load_sp0(sp0); } #define set_iopl_mask native_set_iopl_mask @@ -804,6 +822,15 @@ static inline void spin_lock_prefetch(const void *x) #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ TOP_OF_KERNEL_STACK_PADDING) +#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) + +#define task_pt_regs(task) \ +({ \ + unsigned long __ptr = (unsigned long)task_stack_page(task); \ + __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ + ((struct pt_regs *)__ptr) - 1; \ +}) + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). @@ -823,34 +850,26 @@ static inline void spin_lock_prefetch(const void *x) .addr_limit = KERNEL_DS, \ } -/* - * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. - * This is necessary to guarantee that the entire "struct pt_regs" - * is accessible even if the CPU haven't stored the SS/ESP registers - * on the stack (interrupt gate does not save these registers - * when switching to the same priv ring). - * Therefore beware: accessing the ss/esp fields of the - * "struct pt_regs" is possible, but they may contain the - * completely wrong values. - */ -#define task_pt_regs(task) \ -({ \ - unsigned long __ptr = (unsigned long)task_stack_page(task); \ - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ - ((struct pt_regs *)__ptr) - 1; \ -}) - #define KSTK_ESP(task) (task_pt_regs(task)->sp) #else /* - * User space process size. 47bits minus one guard page. The guard - * page is necessary on Intel CPUs: if a SYSCALL instruction is at - * the highest possible canonical userspace address, then that - * syscall will enter the kernel with a non-canonical return - * address, and SYSRET will explode dangerously. We avoid this - * particular problem by preventing anything from being mapped - * at the maximum canonical address. + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything executable + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] */ #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) @@ -873,11 +892,9 @@ static inline void spin_lock_prefetch(const void *x) #define STACK_TOP_MAX TASK_SIZE_MAX #define INIT_THREAD { \ - .sp0 = TOP_OF_INIT_STACK, \ .addr_limit = KERNEL_DS, \ } -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) extern unsigned long KSTK_ESP(struct task_struct *task); #endif /* CONFIG_X86_64 */ @@ -956,4 +973,17 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); +void microcode_check(void); + +enum l1tf_mitigations { + L1TF_MITIGATION_OFF, + L1TF_MITIGATION_FLUSH_NOWARN, + L1TF_MITIGATION_FLUSH, + L1TF_MITIGATION_FLUSH_NOSMT, + L1TF_MITIGATION_FULL, + L1TF_MITIGATION_FULL_FORCE +}; + +extern enum l1tf_mitigations l1tf_mitigation; + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h new file mode 100644 index 000000000000..0b5ef05b2d2d --- /dev/null +++ b/arch/x86/include/asm/pti.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _ASM_X86_PTI_H +#define _ASM_X86_PTI_H +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +extern void pti_init(void); +extern void pti_check_boottime_disable(void); +#else +static inline void pti_check_boottime_disable(void) { } +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_PTI_H */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index c0e3c45cf6ab..14131dd06b29 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -136,9 +136,9 @@ static inline int v8086_mode(struct pt_regs *regs) #endif } -#ifdef CONFIG_X86_64 static inline bool user_64bit_mode(struct pt_regs *regs) { +#ifdef CONFIG_X86_64 #ifndef CONFIG_PARAVIRT /* * On non-paravirt systems, this is the only long mode CPL 3 @@ -149,8 +149,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs) /* Headers are too twisted for this to go in paravirt.h. */ return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; #endif +#else /* !CONFIG_X86_64 */ + return false; +#endif } +#ifdef CONFIG_X86_64 #define current_user_stack_pointer() current_pt_regs()->sp #define compat_user_stack_pointer() current_pt_regs()->sp #endif diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index ff871210b9f2..d65171120e90 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -15,7 +15,7 @@ * back to the regular execution flow in .text. */ #define _REFCOUNT_EXCEPTION \ - ".pushsection .text.unlikely\n" \ + ".pushsection .text..refcount\n" \ "111:\tlea %[counter], %%" _ASM_CX "\n" \ "112:\t" ASM_UD0 "\n" \ ASM_UNREACHABLE \ @@ -67,13 +67,13 @@ static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, "er", i, "%0", e); + r->refs.counter, "er", i, "%0", e, "cx"); } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, "%0", e); + r->refs.counter, "%0", e, "cx"); } static __always_inline __must_check diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index d91ba04dd007..fb3a6de7440b 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -106,6 +106,7 @@ #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK17 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define REQUIRED_MASK18 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index d8f3a6ae9f6c..4914a3e7c803 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -2,8 +2,7 @@ #ifndef _ASM_X86_RMWcc #define _ASM_X86_RMWcc -#define __CLOBBERS_MEM "memory" -#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx" +#define __CLOBBERS_MEM(clb...) "memory", ## clb #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) @@ -29,7 +28,7 @@ cc_label: \ #define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ do { \ bool c; \ - asm volatile (fullop ";" CC_SET(cc) \ + asm volatile (fullop CC_SET(cc) \ : [counter] "+m" (var), CC_OUT(cc) (c) \ : __VA_ARGS__ : clobbers); \ return c; \ @@ -40,18 +39,19 @@ do { \ #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ - __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM) + __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) -#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \ +#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ - __CLOBBERS_MEM_CC_CX) + __CLOBBERS_MEM(clobbers)) #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ - __CLOBBERS_MEM, vcon (val)) + __CLOBBERS_MEM(), vcon (val)) -#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \ +#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ + clobbers...) \ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ - __CLOBBERS_MEM_CC_CX, vcon (val)) + __CLOBBERS_MEM(clobbers), vcon (val)) #endif /* _ASM_X86_RMWcc */ diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index d6baf23782bc..5c019d23d06b 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h @@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[]; #if defined(CONFIG_X86_64) extern char __end_rodata_hpage_align[]; +extern char __entry_trampoline_start[], __entry_trampoline_end[]; #endif #endif /* _ASM_X86_SECTIONS_H */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index b20f9d623f9c..8f09012b92e7 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -236,11 +236,23 @@ */ #define EARLY_IDT_HANDLER_SIZE 9 +/* + * xen_early_idt_handler_array is for Xen pv guests: for each entry in + * early_idt_handler_array it contains a prequel in the form of + * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to + * max 8 bytes. + */ +#define XEN_EARLY_IDT_HANDLER_SIZE 8 + #ifndef __ASSEMBLY__ extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; extern void early_ignore_irq(void); +#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) +extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; +#endif + /* * Load a segment. Fall back on loading the zero segment if something goes * wrong. This variant assumes that loading zero fully clears the segment. diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ae13bc974416..9490cb15a275 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -4,7 +4,7 @@ #include -#define COMMAND_LINE_SIZE 2048 +#define COMMAND_LINE_SIZE 4096 #include #include diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 461f53d27708..fe2ee61880a8 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -170,7 +170,6 @@ static inline int wbinvd_on_all_cpus(void) wbinvd(); return 0; } -#define smp_num_siblings 1 #endif /* CONFIG_SMP */ extern unsigned disabled_cpus; diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h new file mode 100644 index 000000000000..ae7c2c5cd7f0 --- /dev/null +++ b/arch/x86/include/asm/spec-ctrl.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_SPECCTRL_H_ +#define _ASM_X86_SPECCTRL_H_ + +#include +#include + +/* + * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR + * the guest has, while on VMEXIT we restore the host view. This + * would be easier if SPEC_CTRL were architecturally maskable or + * shadowable for guests but this is not (currently) the case. + * Takes the guest view of SPEC_CTRL MSR as a parameter and also + * the guest's version of VIRT_SPEC_CTRL, if emulated. + */ +extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest); + +/** + * x86_spec_ctrl_set_guest - Set speculation control registers for the guest + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL + * (may get translated to MSR_AMD64_LS_CFG bits) + * + * Avoids writing to the MSR if the content/bits are the same + */ +static inline +void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) +{ + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true); +} + +/** + * x86_spec_ctrl_restore_host - Restore host speculation control registers + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL + * (may get translated to MSR_AMD64_LS_CFG bits) + * + * Avoids writing to the MSR if the content/bits are the same + */ +static inline +void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) +{ + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false); +} + +/* AMD specific Speculative Store Bypass MSR data */ +extern u64 x86_amd_ls_cfg_base; +extern u64 x86_amd_ls_cfg_ssbd_mask; + +static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) +{ + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); + return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); +} + +static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) +{ + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); + return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); +} + +static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) +{ + return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; +} + +#ifdef CONFIG_SMP +extern void speculative_store_bypass_ht_init(void); +#else +static inline void speculative_store_bypass_ht_init(void) { } +#endif + +extern void speculative_store_bypass_update(unsigned long tif); + +static inline void speculative_store_bypass_update_current(void) +{ + speculative_store_bypass_update(current_thread_info()->flags); +} + +#endif diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 8da111b3c342..f73706878772 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -16,6 +16,7 @@ enum stack_type { STACK_TYPE_TASK, STACK_TYPE_IRQ, STACK_TYPE_SOFTIRQ, + STACK_TYPE_ENTRY, STACK_TYPE_EXCEPTION, STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, }; @@ -28,6 +29,8 @@ struct stack_info { bool in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info); +bool in_entry_stack(unsigned long *stack, struct stack_info *info); + int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 076502241eae..55d392c6bd29 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) * No 3D Now! */ -#ifndef CONFIG_KMEMCHECK - #if (__GNUC__ >= 4) #define memcpy(t, f, n) __builtin_memcpy(t, f, n) #else @@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) ? __constant_memcpy((t), (f), (n)) \ : __memcpy((t), (f), (n))) #endif -#else -/* - * kmemcheck becomes very happy if we use the REP instructions unconditionally, - * because it means that we know both memory operands in advance. - */ -#define memcpy(t, f, n) __memcpy((t), (f), (n)) -#endif #endif #endif /* !CONFIG_FORTIFY_SOURCE */ diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 0b1b4445f4c5..533f74c300c2 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); #ifndef CONFIG_FORTIFY_SOURCE -#ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ ({ \ @@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len); __ret; \ }) #endif -#else -/* - * kmemcheck becomes very happy if we use the REP instructions unconditionally, - * because it means that we know both memory operands in advance. - */ -#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) -#endif #endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMSET diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 899084b70412..9b6df68d8fd1 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_SWITCH_TO_H #define _ASM_X86_SWITCH_TO_H +#include + struct task_struct; /* one of the stranger aspects of C forward declarations */ struct task_struct *__switch_to_asm(struct task_struct *prev, @@ -73,4 +75,28 @@ do { \ ((last) = __switch_to_asm((prev), (next))); \ } while (0) +#ifdef CONFIG_X86_32 +static inline void refresh_sysenter_cs(struct thread_struct *thread) +{ + /* Only happens when SEP is enabled, no need to test "SEP"arately: */ + if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) + return; + + this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); +} +#endif + +/* This is used when switching tasks or entering/exiting vm86 mode. */ +static inline void update_sp0(struct task_struct *task) +{ + /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ +#ifdef CONFIG_X86_32 + load_sp0(task->thread.sp0); +#else + if (static_cpu_has(X86_FEATURE_XENPV)) + load_sp0(task_top_of_stack(task)); +#endif +} + #endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index e3c95e8e61c5..03eedc21246d 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -60,7 +60,7 @@ static inline long syscall_get_error(struct task_struct *task, * TS_COMPAT is set for 32-bit syscall entries and then * remains set until we return to user mode. */ - if (task->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (task->thread_info.status & (TS_COMPAT|TS_I386_REGS_POKED)) /* * Sign-extend the value so (int)-EFOO becomes (long)-EFOO * and will match correctly in comparisons. @@ -116,7 +116,7 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread.status & TS_COMPAT) + if (task->thread_info.status & TS_COMPAT) switch (i) { case 0: if (!n--) break; @@ -177,7 +177,7 @@ static inline void syscall_set_arguments(struct task_struct *task, const unsigned long *args) { # ifdef CONFIG_IA32_EMULATION - if (task->thread.status & TS_COMPAT) + if (task->thread_info.status & TS_COMPAT) switch (i) { case 0: if (!n--) break; diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 91dfcafe27a6..bad25bb80679 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); /* kernel/ldt.c */ -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); +asmlinkage long sys_modify_ldt(int, void __user *, unsigned long); /* kernel/signal.c */ asmlinkage long sys_rt_sigreturn(void); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 70f425947dc5..95ff2d7f553f 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -55,6 +55,7 @@ struct task_struct; struct thread_info { unsigned long flags; /* low level flags */ + u32 status; /* thread synchronous flags */ }; #define INIT_THREAD_INFO(tsk) \ @@ -80,6 +81,7 @@ struct thread_info { #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ +#define TIF_SSBD 5 /* Reduced data speculation */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ @@ -106,6 +108,7 @@ struct thread_info { #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_SSBD (1 << TIF_SSBD) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -145,7 +148,7 @@ struct thread_info { /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) @@ -207,7 +210,7 @@ static inline int arch_within_stack_frames(const void * const stack, #else /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_64 -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0) +# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1) #endif #endif @@ -221,7 +224,7 @@ static inline int arch_within_stack_frames(const void * const stack, #define in_ia32_syscall() true #else #define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \ - current->thread.status & TS_COMPAT) + current_thread_info()->status & TS_COMPAT) #endif /* diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 509046cfa5ce..875ca99b82ee 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -9,70 +9,135 @@ #include #include #include +#include +#include +#include -static inline void __invpcid(unsigned long pcid, unsigned long addr, - unsigned long type) -{ - struct { u64 d[2]; } desc = { { pcid, addr } }; +/* + * The x86 feature is called PCID (Process Context IDentifier). It is similar + * to what is traditionally called ASID on the RISC processors. + * + * We don't use the traditional ASID implementation, where each process/mm gets + * its own ASID and flush/restart when we run out of ASID space. + * + * Instead we have a small per-cpu array of ASIDs and cache the last few mm's + * that came by on this CPU, allowing cheaper switch_mm between processes on + * this CPU. + * + * We end up with different spaces for different things. To avoid confusion we + * use different names for each of them: + * + * ASID - [0, TLB_NR_DYN_ASIDS-1] + * the canonical identifier for an mm + * + * kPCID - [1, TLB_NR_DYN_ASIDS] + * the value we write into the PCID part of CR3; corresponds to the + * ASID+1, because PCID 0 is special. + * + * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] + * for KPTI each mm has two address spaces and thus needs two + * PCID values, but we can still do with a single ASID denomination + * for each mm. Corresponds to kPCID + 2048. + * + */ - /* - * The memory clobber is because the whole point is to invalidate - * stale TLB entries and, especially if we're flushing global - * mappings, we don't want the compiler to reorder any subsequent - * memory accesses before the TLB flush. - * - * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and - * invpcid (%rcx), %rax in long mode. - */ - asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" - : : "m" (desc), "a" (type), "c" (&desc) : "memory"); -} +/* There are 12 bits of space for ASIDS in CR3 */ +#define CR3_HW_ASID_BITS 12 -#define INVPCID_TYPE_INDIV_ADDR 0 -#define INVPCID_TYPE_SINGLE_CTXT 1 -#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 -#define INVPCID_TYPE_ALL_NON_GLOBAL 3 +/* + * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for + * user/kernel switches + */ +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define PTI_CONSUMED_PCID_BITS 1 +#else +# define PTI_CONSUMED_PCID_BITS 0 +#endif -/* Flush all mappings for a given pcid and addr, not including globals. */ -static inline void invpcid_flush_one(unsigned long pcid, - unsigned long addr) -{ - __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); -} +#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) + +/* + * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account + * for them being zero-based. Another -1 is because PCID 0 is reserved for + * use by non-PCID-aware users. + */ +#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) + +/* + * 6 because 6 should be plenty and struct tlb_state will fit in two cache + * lines. + */ +#define TLB_NR_DYN_ASIDS 6 -/* Flush all mappings for a given PCID, not including globals. */ -static inline void invpcid_flush_single_context(unsigned long pcid) +/* + * Given @asid, compute kPCID + */ +static inline u16 kern_pcid(u16 asid) { - __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + /* + * Make sure that the dynamic ASID space does not confict with the + * bit we are using to switch between user and kernel ASIDs. + */ + BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); + + /* + * The ASID being passed in here should have respected the + * MAX_ASID_AVAILABLE and thus never have the switch bit set. + */ + VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); +#endif + /* + * The dynamically-assigned ASIDs that get passed in are small + * ( MAX_ASID_AVAILABLE); /* - * Bump the generation count. This also serves as a full barrier - * that synchronizes with switch_mm(): callers are required to order - * their read of mm_cpumask after their writes to the paging - * structures. + * Use boot_cpu_has() instead of this_cpu_has() as this function + * might be called during early boot. This should work even after + * boot because all CPU's the have same capabilities: */ - smp_mb__before_atomic(); - new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); - smp_mb__after_atomic(); - - return new_tlb_gen; + VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } #ifdef CONFIG_PARAVIRT @@ -80,7 +145,7 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) #else #define __flush_tlb() __native_flush_tlb() #define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) #endif static inline bool tlb_defer_switch_to_init_mm(void) @@ -99,12 +164,6 @@ static inline bool tlb_defer_switch_to_init_mm(void) return !static_cpu_has(X86_FEATURE_PCID); } -/* - * 6 because 6 should be plenty and struct tlb_state will fit in - * two cache lines. - */ -#define TLB_NR_DYN_ASIDS 6 - struct tlb_context { u64 ctx_id; u64 tlb_gen; @@ -120,6 +179,8 @@ struct tlb_state { struct mm_struct *loaded_mm; u16 loaded_mm_asid; u16 next_asid; + /* last user mm's ctx id */ + u64 last_ctx_id; /* * We can be in one of several states: @@ -138,6 +199,24 @@ struct tlb_state { */ bool is_lazy; + /* + * If set we changed the page tables in such a way that we + * needed an invalidation of all contexts (aka. PCIDs / ASIDs). + * This tells us to go invalidate all the non-loaded ctxs[] + * on the next context switch. + * + * The current ctx was kept up-to-date as it ran and does not + * need to be invalidated. + */ + bool invalidate_other; + + /* + * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate + * the corresponding user PCID needs a flush next time we + * switch to it; see SWITCH_TO_USER_CR3. + */ + unsigned short user_pcid_flush_mask; + /* * Access to this CR4 shadow and to H/W CR4 is protected by * disabling interrupts when modifying either one. @@ -215,6 +294,14 @@ static inline unsigned long cr4_read_shadow(void) return this_cpu_read(cpu_tlbstate.cr4); } +/* + * Mark all other ASIDs as invalid, preserves the current. + */ +static inline void invalidate_other_asid(void) +{ + this_cpu_write(cpu_tlbstate.invalidate_other, true); +} + /* * Save some of cr4 feature set we're using (e.g. Pentium 4MB * enable and PPro Global page enable), so that any CPU's that boot @@ -234,37 +321,63 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) extern void initialize_tlbstate_and_flush(void); -static inline void __native_flush_tlb(void) +/* + * Given an ASID, flush the corresponding user ASID. We can delay this + * until the next time we switch to it. + * + * See SWITCH_TO_USER_CR3. + */ +static inline void invalidate_user_asid(u16 asid) { + /* There is no user ASID if address space separation is off */ + if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + return; + /* - * If current->mm == NULL then we borrow a mm which may change during a - * task switch and therefore we must not be preempted while we write CR3 - * back: + * We only have a single ASID if PCID is off and the CR3 + * write will have flushed it. */ - preempt_disable(); - native_write_cr3(__native_read_cr3()); - preempt_enable(); + if (!cpu_feature_enabled(X86_FEATURE_PCID)) + return; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + __set_bit(kern_pcid(asid), + (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); } -static inline void __native_flush_tlb_global_irq_disabled(void) +/* + * flush the entire current user mapping + */ +static inline void __native_flush_tlb(void) { - unsigned long cr4; + /* + * Preemption or interrupts must be disabled to protect the access + * to the per CPU variable and to prevent being preempted between + * read_cr3() and write_cr3(). + */ + WARN_ON_ONCE(preemptible()); - cr4 = this_cpu_read(cpu_tlbstate.cr4); - /* clear PGE */ - native_write_cr4(cr4 & ~X86_CR4_PGE); - /* write old PGE again and flush TLBs */ - native_write_cr4(cr4); + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); + + /* If current->mm == NULL then the read_cr3() "borrows" an mm */ + native_write_cr3(__native_read_cr3()); } +/* + * flush everything + */ static inline void __native_flush_tlb_global(void) { - unsigned long flags; + unsigned long cr4, flags; if (static_cpu_has(X86_FEATURE_INVPCID)) { /* * Using INVPCID is considerably faster than a pair of writes * to CR4 sandwiched inside an IRQ flag save/restore. + * + * Note, this works with CR4.PCIDE=0 or 1. */ invpcid_flush_all(); return; @@ -277,36 +390,82 @@ static inline void __native_flush_tlb_global(void) */ raw_local_irq_save(flags); - __native_flush_tlb_global_irq_disabled(); + cr4 = this_cpu_read(cpu_tlbstate.cr4); + /* toggle PGE */ + native_write_cr4(cr4 ^ X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + native_write_cr4(cr4); raw_local_irq_restore(flags); } -static inline void __native_flush_tlb_single(unsigned long addr) +/* + * flush one page in the user mapping + */ +static inline void __native_flush_tlb_one_user(unsigned long addr) { + u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. + * Just use invalidate_user_asid() in case we are called early. + */ + if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) + invalidate_user_asid(loaded_mm_asid); + else + invpcid_flush_one(user_pcid(loaded_mm_asid), addr); } +/* + * flush everything + */ static inline void __flush_tlb_all(void) { - if (boot_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); - else + } else { + /* + * !PGE -> !PCID (setup_pcid()), thus every flush is total. + */ __flush_tlb(); + } +} + +/* + * flush one page in the kernel mapping + */ +static inline void __flush_tlb_one_kernel(unsigned long addr) +{ + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); /* - * Note: if we somehow had PCID but not PGE, then this wouldn't work -- - * we'd end up flushing kernel translations for the current ASID but - * we might fail to flush kernel translations for other cached ASIDs. + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its + * paravirt equivalent. Even with PCID, this is sufficient: we only + * use PCID if we also use global PTEs for the kernel mapping, and + * INVLPG flushes global translations across all address spaces. * - * To avoid this issue, we force PCID off if PGE is off. + * If PTI is on, then the kernel is mapped with non-global PTEs, and + * __flush_tlb_one_user() will flush the given address for the current + * kernel address space and for its usermode counterpart, but it does + * not flush it for other address spaces. */ -} + __flush_tlb_one_user(addr); -static inline void __flush_tlb_one(unsigned long addr) -{ - count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); - __flush_tlb_single(addr); + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * See above. We need to propagate the flush to all other address + * spaces. In principle, we only need to propagate it to kernelmode + * address spaces, but the extra bookkeeping we would need is not + * worth it. + */ + invalidate_other_asid(); } #define TLB_FLUSH_ALL -1UL @@ -367,6 +526,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) +{ + /* + * Bump the generation count. This also serves as a full barrier + * that synchronizes with switch_mm(): callers are required to order + * their read of mm_cpumask after their writes to the paging + * structures. + */ + return atomic64_inc_return(&mm->context.tlb_gen); +} + static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm) { diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index c1d2a9892352..453cf38a1c33 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -123,13 +123,17 @@ static inline int topology_max_smt_threads(void) } int topology_update_package_map(unsigned int apicid, unsigned int cpu); -extern int topology_phys_to_logical_pkg(unsigned int pkg); +int topology_phys_to_logical_pkg(unsigned int pkg); +bool topology_is_primary_thread(unsigned int cpu); +bool topology_smt_supported(void); #else #define topology_max_packages() (1) static inline int topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; } static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } static inline int topology_max_smt_threads(void) { return 1; } +static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } +static inline bool topology_smt_supported(void) { return false; } #endif static inline void arch_fix_phys_package_id(int num, u32 slot) diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h index fa60398bbc3a..069c04be1507 100644 --- a/arch/x86/include/asm/trace/fpu.h +++ b/arch/x86/include/asm/trace/fpu.h @@ -34,11 +34,6 @@ DECLARE_EVENT_CLASS(x86_fpu, ) ); -DEFINE_EVENT(x86_fpu, x86_fpu_state, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_before_save, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) @@ -74,11 +69,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_activate_state, TP_ARGS(fpu) ); -DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state, - TP_PROTO(struct fpu *fpu), - TP_ARGS(fpu) -); - DEFINE_EVENT(x86_fpu, x86_fpu_init_state, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index b0cced97a6ce..3de69330e6c5 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -38,9 +38,9 @@ asmlinkage void simd_coprocessor_error(void); #if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV) asmlinkage void xen_divide_error(void); +asmlinkage void xen_xennmi(void); asmlinkage void xen_xendebug(void); asmlinkage void xen_xenint3(void); -asmlinkage void xen_nmi(void); asmlinkage void xen_overflow(void); asmlinkage void xen_bounds(void); asmlinkage void xen_invalid_op(void); @@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_stack_segment(struct pt_regs *, long); #ifdef CONFIG_X86_64 dotraplinkage void do_double_fault(struct pt_regs *, long); -asmlinkage struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); @@ -89,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *, long); #endif +dotraplinkage void do_mce(struct pt_regs *, long); static inline int get_si_code(unsigned long condition) { @@ -145,4 +145,22 @@ enum { X86_TRAP_IRET = 32, /* 32, IRET Exception */ }; +/* + * Page fault error code bits: + * + * bit 0 == 0: no page found 1: protection fault + * bit 1 == 0: read access 1: write access + * bit 2 == 0: kernel-mode access 1: user-mode access + * bit 3 == 1: use of reserved bit detected + * bit 4 == 1: fault was an instruction fetch + * bit 5 == 1: protection keys block access + */ +enum x86_pf_error_code { + X86_PF_PROT = 1 << 0, + X86_PF_WRITE = 1 << 1, + X86_PF_USER = 1 << 2, + X86_PF_RSVD = 1 << 3, + X86_PF_INSTR = 1 << 4, + X86_PF_PK = 1 << 5, +}; #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 8da0efb13544..a62413de1e2f 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -62,6 +62,15 @@ extern int notsc_setup(char *); extern void tsc_save_sched_clock_state(void); extern void tsc_restore_sched_clock_state(void); +#ifdef CONFIG_PARAVIRT +#include +#else +static inline unsigned long cpu_khz_from_paravirt(void) +{ + return 0; +} +#endif + unsigned long cpu_khz_from_msr(void); #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 574dff4d2913..aae77eb8491c 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -124,6 +124,11 @@ extern int __get_user_bad(void); #define __uaccess_begin() stac() #define __uaccess_end() clac() +#define __uaccess_begin_nospec() \ +({ \ + stac(); \ + barrier_nospec(); \ +}) /* * This is a type: either unsigned long, if the argument fits into @@ -445,7 +450,7 @@ do { \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ @@ -487,6 +492,10 @@ struct __large_struct { unsigned long buf[100]; }; __uaccess_begin(); \ barrier(); +#define uaccess_try_nospec do { \ + current->thread.uaccess_err = 0; \ + __uaccess_begin_nospec(); \ + #define uaccess_catch(err) \ __uaccess_end(); \ (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ @@ -548,7 +557,7 @@ struct __large_struct { unsigned long buf[100]; }; * get_user_ex(...); * } get_user_catch(err) */ -#define get_user_try uaccess_try +#define get_user_try uaccess_try_nospec #define get_user_catch(err) uaccess_catch(err) #define get_user_ex(x, ptr) do { \ @@ -582,7 +591,7 @@ extern void __cmpxchg_wrong_size(void) __typeof__(ptr) __uval = (uval); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ switch (size) { \ case 1: \ { \ diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 72950401b223..ba2dc1930630 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -29,21 +29,21 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) switch (n) { case 1: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)to, from, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)to, from, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)to, from, ret, "l", "k", "=r", 4); __uaccess_end(); diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index f07ef3c575db..62546b3a398e 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -55,31 +55,31 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size) return copy_user_generic(dst, (__force void *)src, size); switch (size) { case 1: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) @@ -89,7 +89,7 @@ raw_copy_from_user(void *dst, const void __user *src, unsigned long size) __uaccess_end(); return ret; case 16: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h new file mode 100644 index 000000000000..db43f2a0d92c --- /dev/null +++ b/arch/x86/include/asm/umip.h @@ -0,0 +1,12 @@ +#ifndef _ASM_X86_UMIP_H +#define _ASM_X86_UMIP_H + +#include +#include + +#ifdef CONFIG_X86_INTEL_UMIP +bool fixup_umip_exception(struct pt_regs *regs); +#else +static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; } +#endif /* CONFIG_X86_INTEL_UMIP */ +#endif /* _ASM_X86_UMIP_H */ diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 87adc0d38c4a..1f86e1b0a5cd 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -7,17 +7,20 @@ #include #include +#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip)) +#define IRET_FRAME_SIZE (sizeof(struct pt_regs) - IRET_FRAME_OFFSET) + struct unwind_state { struct stack_info stack_info; unsigned long stack_mask; struct task_struct *task; int graph_idx; bool error; -#if defined(CONFIG_ORC_UNWINDER) +#if defined(CONFIG_UNWINDER_ORC) bool signal, full_regs; unsigned long sp, bp, ip; struct pt_regs *regs; -#elif defined(CONFIG_FRAME_POINTER_UNWINDER) +#elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; struct pt_regs *regs; @@ -51,22 +54,35 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, __unwind_start(state, task, regs, first_frame); } -#if defined(CONFIG_ORC_UNWINDER) || defined(CONFIG_FRAME_POINTER_UNWINDER) -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) +/* + * If 'partial' returns true, only the iret frame registers are valid. + */ +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { if (unwind_done(state)) return NULL; + if (partial) { +#ifdef CONFIG_UNWINDER_ORC + *partial = !state->full_regs; +#else + *partial = false; +#endif + } + return state->regs; } #else -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { return NULL; } #endif -#ifdef CONFIG_ORC_UNWINDER +#ifdef CONFIG_UNWINDER_ORC void unwind_init(void); void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size); diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index caec8417539f..08c14aec26ac 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -352,6 +352,7 @@ enum vmcs_field { #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ +#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ @@ -570,4 +571,15 @@ enum vm_instruction_error_number { VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, }; +enum vmx_l1d_flush_state { + VMENTER_L1D_FLUSH_AUTO, + VMENTER_L1D_FLUSH_NEVER, + VMENTER_L1D_FLUSH_COND, + VMENTER_L1D_FLUSH_ALWAYS, + VMENTER_L1D_FLUSH_EPT_DISABLED, + VMENTER_L1D_FLUSH_NOT_REQUIRED, +}; + +extern enum vmx_l1d_flush_state l1tf_vmx_mitigation; + #endif diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d9a7c659009c..b986b2ca688a 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -7,6 +7,7 @@ #ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); +extern void set_vsyscall_pgtable_user_bits(pgd_t *root); /* * Called on instruction fetch fault in vsyscall page. diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 8a1ebf9540dd..ad15a0fda917 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -114,6 +114,18 @@ struct x86_init_pci { void (*fixup_irqs)(void); }; +/** + * struct x86_hyper_init - x86 hypervisor init functions + * @init_platform: platform setup + * @x2apic_available: X2APIC detection + * @init_mem_mapping: setup early mappings during init_mem_mapping() + */ +struct x86_hyper_init { + void (*init_platform)(void); + bool (*x2apic_available)(void); + void (*init_mem_mapping)(void); +}; + /** * struct x86_init_ops - functions for platform specific setup * @@ -127,6 +139,7 @@ struct x86_init_ops { struct x86_init_timers timers; struct x86_init_iommu iommu; struct x86_init_pci pci; + struct x86_hyper_init hyper; }; /** @@ -199,6 +212,15 @@ struct x86_legacy_features { struct x86_legacy_devices devices; }; +/** + * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks + * + * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) + */ +struct x86_hyper_runtime { + void (*pin_vcpu)(int cpu); +}; + /** * struct x86_platform_ops - platform specific runtime functions * @calibrate_cpu: calibrate CPU @@ -218,6 +240,7 @@ struct x86_legacy_features { * possible in x86_early_init_platform_quirks() by * only using the current x86_hardware_subarch * semantics. + * @hyper: x86 hypervisor specific runtime callbacks */ struct x86_platform_ops { unsigned long (*calibrate_cpu)(void); @@ -233,6 +256,7 @@ struct x86_platform_ops { void (*apic_post_init)(void); struct x86_legacy_features legacy; void (*set_legacy_features)(void); + struct x86_hyper_runtime hyper; }; struct pci_dev; diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 7cb282e9e587..bfd882617613 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -217,9 +218,9 @@ privcmd_call(unsigned call, __HYPERCALL_5ARG(a1, a2, a3, a4, a5); stac(); - asm volatile("call *%[call]" + asm volatile(CALL_NOSPEC : __HYPERCALL_5PARAM - : [call] "a" (&hypercall_page[call]) + : [thunk_target] "a" (&hypercall_page[call]) : __HYPERCALL_CLOBBER5); clac(); diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 1f5c5161ead6..45c8605467f1 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h @@ -1,7 +1,4 @@ -#ifdef CONFIG_KMEMCHECK -/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ -# include -#elif !defined(_ASM_X86_XOR_H) +#ifndef _ASM_X86_XOR_H #define _ASM_X86_XOR_H /* diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 554aa8f24f91..341db0462b85 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -25,6 +25,7 @@ #define KVM_FEATURE_STEAL_TIME 5 #define KVM_FEATURE_PV_EOI 6 #define KVM_FEATURE_PV_UNHALT 7 +#define KVM_FEATURE_ASYNC_PF_VMEXIT 10 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 91723461dc1f..435db58a7bad 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -30,6 +30,7 @@ struct mce { __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ __u64 ppin; /* Protected Processor Inventory Number */ + __u32 microcode;/* Microcode revision */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h index 809134c644a6..90ab9a795b49 100644 --- a/arch/x86/include/uapi/asm/msgbuf.h +++ b/arch/x86/include/uapi/asm/msgbuf.h @@ -1 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X64_MSGBUF_H +#define __ASM_X64_MSGBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include +#else +/* + * The msqid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ + __kernel_ulong_t msg_qnum; /* number of messages in queue */ + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +#endif + +#endif /* __ASM_GENERIC_MSGBUF_H */ diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 6f3355399665..bcba3c643e63 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -78,7 +78,12 @@ #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) -#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ + +#define X86_CR3_PCID_BITS 12 +#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL)) + +#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */ +#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT) /* * Intel CPU features in CR4 @@ -105,6 +110,8 @@ #define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT) #define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */ #define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT) +#define X86_CR4_UMIP_BIT 11 /* enable UMIP support */ +#define X86_CR4_UMIP _BITUL(X86_CR4_UMIP_BIT) #define X86_CR4_LA57_BIT 12 /* enable 5-level page tables */ #define X86_CR4_LA57 _BITUL(X86_CR4_LA57_BIT) #define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */ @@ -152,5 +159,8 @@ #define CX86_ARR_BASE 0xc4 #define CX86_RCR_BASE 0xdc +#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ + X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ + X86_CR0_PG) #endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */ diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h index 83c05fc2de38..644421f3823b 100644 --- a/arch/x86/include/uapi/asm/shmbuf.h +++ b/arch/x86/include/uapi/asm/shmbuf.h @@ -1 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X86_SHMBUF_H +#define __ASM_X86_SHMBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include +#else +/* + * The shmid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + __kernel_ulong_t shm_nattch; /* no. of current attaches */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +struct shminfo64 { + __kernel_ulong_t shmmax; + __kernel_ulong_t shmmin; + __kernel_ulong_t shmmni; + __kernel_ulong_t shmseg; + __kernel_ulong_t shmall; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; + +#endif + +#endif /* __ASM_X86_SHMBUF_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 5f70044340ff..de941e670208 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -25,9 +25,9 @@ endif KASAN_SANITIZE_head$(BITS).o := n KASAN_SANITIZE_dumpstack.o := n KASAN_SANITIZE_dumpstack_$(BITS).o := n -KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_paravirt.o := n -OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y @@ -58,6 +58,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o +obj-y += irqflags.o obj-y += process.o obj-y += fpu/ @@ -127,10 +128,11 @@ obj-$(CONFIG_EFI) += sysfb_efi.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_TRACING) += tracepoint.o obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o +obj-$(CONFIG_X86_INTEL_UMIP) += umip.o -obj-$(CONFIG_ORC_UNWINDER) += unwind_orc.o -obj-$(CONFIG_FRAME_POINTER_UNWINDER) += unwind_frame.o -obj-$(CONFIG_GUESS_UNWINDER) += unwind_guess.o +obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o +obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o +obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o ### # 64 bit specific files diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index ea3046e0b0cf..28d70ac93faf 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -55,5 +55,5 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) void arch_apei_flush_tlb_one(unsigned long addr) { - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 079535e53e2a..6dda3595acf8 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; + /* Ignore invalid ID */ + if (apic_id == 0xffffffff) + return 0; + /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size @@ -342,13 +346,12 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e #ifdef CONFIG_X86_IO_APIC #define MP_ISA_BUS 0 +static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, + u8 trigger, u32 gsi); + static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) { - int ioapic; - int pin; - struct mpc_intsrc mp_irq; - /* * Check bus_irq boundary. */ @@ -357,14 +360,6 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, return; } - /* - * Convert 'gsi' to 'ioapic.pin'. - */ - ioapic = mp_find_ioapic(gsi); - if (ioapic < 0) - return; - pin = mp_find_ioapic_pin(ioapic, gsi); - /* * TBD: This check is for faulty timer entries, where the override * erroneously sets the trigger to level, resulting in a HUGE @@ -373,16 +368,8 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, if ((bus_irq == 0) && (trigger == 3)) trigger = 1; - mp_irq.type = MP_INTSRC; - mp_irq.irqtype = mp_INT; - mp_irq.irqflag = (trigger << 2) | polarity; - mp_irq.srcbus = MP_ISA_BUS; - mp_irq.srcbusirq = bus_irq; /* IRQ */ - mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */ - mp_irq.dstirq = pin; /* INTIN# */ - - mp_save_irq(&mp_irq); - + if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0) + return; /* * Reset default identity mapping if gsi is also an legacy IRQ, * otherwise there will be more than one entry with the same GSI @@ -429,6 +416,34 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger, return 0; } +static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity, + u8 trigger, u32 gsi) +{ + struct mpc_intsrc mp_irq; + int ioapic, pin; + + /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */ + ioapic = mp_find_ioapic(gsi); + if (ioapic < 0) { + pr_warn("Failed to find ioapic for gsi : %u\n", gsi); + return ioapic; + } + + pin = mp_find_ioapic_pin(ioapic, gsi); + + mp_irq.type = MP_INTSRC; + mp_irq.irqtype = mp_INT; + mp_irq.irqflag = (trigger << 2) | polarity; + mp_irq.srcbus = MP_ISA_BUS; + mp_irq.srcbusirq = bus_irq; + mp_irq.dstapic = mpc_ioapic_id(ioapic); + mp_irq.dstirq = pin; + + mp_save_irq(&mp_irq); + + return 0; +} + static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { @@ -473,7 +488,11 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK) polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; - mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + if (bus_irq < NR_IRQS_LEGACY) + mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + else + mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi); + acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3344d3382e91..b034826a0b3b 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -46,17 +46,6 @@ static int __init setup_noreplace_smp(char *str) } __setup("noreplace-smp", setup_noreplace_smp); -#ifdef CONFIG_PARAVIRT -static int __initdata_or_module noreplace_paravirt = 0; - -static int __init setup_noreplace_paravirt(char *str) -{ - noreplace_paravirt = 1; - return 1; -} -__setup("noreplace-paravirt", setup_noreplace_paravirt); -#endif - #define DPRINTK(fmt, args...) \ do { \ if (debug_alternative) \ @@ -344,9 +333,12 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf) static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) { unsigned long flags; + int i; - if (instr[0] != 0x90) - return; + for (i = 0; i < a->padlen; i++) { + if (instr[i] != 0x90) + return; + } local_irq_save(flags); add_nops(instr + (a->instrlen - a->padlen), a->padlen); @@ -596,9 +588,6 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *p; char insnbuf[MAX_PATCH_LEN]; - if (noreplace_paravirt) - return; - for (p = start; p < end; p++) { unsigned int used; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6db28f17ff28..c88e0b127810 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -235,7 +235,7 @@ int amd_cache_northbridges(void) if (boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model >= 0x8 && (boot_cpu_data.x86_model > 0x9 || - boot_cpu_data.x86_mask >= 0x1)) + boot_cpu_data.x86_stepping >= 0x1)) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; if (boot_cpu_data.x86 == 0x15) diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index f5d92bc3b884..2c4d5ece7456 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -30,6 +30,7 @@ #include #include #include +#include /* * Using 512M as goal, in case kexec will load kernel_big @@ -56,6 +57,33 @@ int fallback_aper_force __initdata; int fix_aperture __initdata = 1; +#ifdef CONFIG_PROC_VMCORE +/* + * If the first kernel maps the aperture over e820 RAM, the kdump kernel will + * use the same range because it will remain configured in the northbridge. + * Trying to dump this area via /proc/vmcore may crash the machine, so exclude + * it from vmcore. + */ +static unsigned long aperture_pfn_start, aperture_page_count; + +static int gart_oldmem_pfn_is_ram(unsigned long pfn) +{ + return likely((pfn < aperture_pfn_start) || + (pfn >= aperture_pfn_start + aperture_page_count)); +} + +static void exclude_from_vmcore(u64 aper_base, u32 aper_order) +{ + aperture_pfn_start = aper_base >> PAGE_SHIFT; + aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; + WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); +} +#else +static void exclude_from_vmcore(u64 aper_base, u32 aper_order) +{ +} +#endif + /* This code runs before the PCI subsystem is initialized, so just access the northbridge directly. */ @@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void) out: if (!fix && !fallback_aper_force) { - if (last_aper_base) + if (last_aper_base) { + /* + * If this is the kdump kernel, the first kernel + * may have allocated the range over its e820 RAM + * and fixed up the northbridge + */ + exclude_from_vmcore(last_aper_base, last_aper_order); + return 1; + } return 0; } @@ -473,6 +509,14 @@ int __init gart_iommu_hole_init(void) return 0; } + /* + * If this is the kdump kernel _and_ the first kernel did not + * configure the aperture in the northbridge, this range may + * overlap with the first kernel's memory. We can't access the + * range through vmcore even though it should be part of the dump. + */ + exclude_from_vmcore(aper_alloc, aper_order); + /* Fix up the north bridges */ for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { int bus, dev_base, dev_limit; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index ff891772c9f8..2e64178f284d 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,7 @@ #include #include #include +#include unsigned int num_processors; @@ -553,7 +555,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static u32 hsx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x3a; /* EP */ case 0x04: return 0x0f; /* EX */ } @@ -563,7 +565,7 @@ static u32 hsx_deadline_rev(void) static u32 bdx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x00000011; case 0x03: return 0x0700000e; case 0x04: return 0x0f00000c; @@ -575,11 +577,14 @@ static u32 bdx_deadline_rev(void) static u32 skx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x03: return 0x01000136; case 0x04: return 0x02000014; } + if (boot_cpu_data.x86_stepping > 4) + return 0; + return ~0U; } @@ -1481,7 +1486,7 @@ void setup_local_APIC(void) * TODO: set up through-local-APIC from through-I/O-APIC? --macro */ value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; - if (!cpu && (pic_mode || !value)) { + if (!cpu && (pic_mode || !value || skip_ioapic_setup)) { value = APIC_DM_EXTINT; apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); } else { @@ -1645,7 +1650,7 @@ static __init void try_to_enable_x2apic(int remap_mode) * under KVM */ if (max_physical_apicid > 255 || - !hypervisor_x2apic_available()) { + !x86_init.hyper.x2apic_available()) { pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); x2apic_disable(); return; @@ -2089,6 +2094,23 @@ static int cpuid_to_apicid[] = { [0 ... NR_CPUS - 1] = -1, }; +#ifdef CONFIG_SMP +/** + * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread + * @id: APIC ID to check + */ +bool apic_id_is_primary_thread(unsigned int apicid) +{ + u32 mask; + + if (smp_num_siblings == 1) + return true; + /* Isolate the SMT bit(s) in the APICID and check for 0 */ + mask = (1U << (fls(smp_num_siblings) - 1)) - 1; + return !(apicid & mask); +} +#endif + /* * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids * and cpuid_to_apicid[] synchronized. diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c index 56ccf9346b08..741de281ed5d 100644 --- a/arch/x86/kernel/apic/htirq.c +++ b/arch/x86/kernel/apic/htirq.c @@ -16,6 +16,8 @@ #include #include #include +#include + #include #include #include diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 3b89b27945ff..96a8a68f9c79 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 9b18be764422..f10e7f93b0e2 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 88c214e75a6b..b958082c74a7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include #include @@ -369,8 +370,11 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irq_data->hwirq = virq + i; err = assign_irq_vector_policy(virq + i, node, data, info, irq_data); - if (err) + if (err) { + irq_data->chip_data = NULL; + free_apic_chip_data(data); goto error; + } /* * If the apic destination mode is physical, then the * effective affinity is restricted to a single target @@ -383,7 +387,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, return 0; error: - x86_vector_free_irqs(domain, virq, i + 1); + x86_vector_free_irqs(domain, virq, i); return err; } diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 0d57bb9079c9..02cfc615e3fb 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -920,9 +920,8 @@ static __init void uv_rtc_init(void) /* * percpu heartbeat timer */ -static void uv_heartbeat(unsigned long ignored) +static void uv_heartbeat(struct timer_list *timer) { - struct timer_list *timer = &uv_scir_info->timer; unsigned char bits = uv_scir_info->state; /* Flip heartbeat bit: */ @@ -947,7 +946,7 @@ static int uv_heartbeat_enable(unsigned int cpu) struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); - setup_pinned_timer(timer, uv_heartbeat, cpu); + timer_setup(timer, uv_heartbeat, TIMER_PINNED); timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; add_timer_on(timer, cpu); uv_cpu_scir_info(cpu)->enabled = 1; @@ -1141,16 +1140,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) uv_gre_table = gre; for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { + unsigned long size = ((unsigned long)(gre->limit - lgre) + << UV_GAM_RANGE_SHFT); + int order = 0; + char suffix[] = " KMGTPE"; + + while (size > 9999 && order < sizeof(suffix)) { + size /= 1024; + order++; + } + if (!index) { pr_info("UV: GAM Range Table...\n"); pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); } - pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", + pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, - ((unsigned long)(gre->limit - lgre)) >> - (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ + size, suffix[order], gre->type, gre->nasid, gre->sockid, gre->pnode); lgre = gre->limit; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index e4b0d92b3ae0..63d3e6a6b5ef 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -240,6 +240,7 @@ #include #include #include +#include #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT) extern int (*console_blank_hook)(int); @@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; apm_bios_call_asm(call->func, call->ebx, call->ecx, &call->eax, &call->ebx, &call->ecx, &call->edx, &call->esi); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); @@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call) gdt[0x40 / 8] = bad_bios_desc; apm_irq_save(flags); + firmware_restrict_branch_speculation_start(); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, &call->eax); APM_DO_RESTORE_SEGS; + firmware_restrict_branch_speculation_end(); apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); @@ -2389,6 +2394,7 @@ static int __init apm_init(void) if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { + cpuidle_poll_state_init(&apm_idle_driver); if (!cpuidle_register_driver(&apm_idle_driver)) if (cpuidle_register_device(&apm_cpuidle_device)) cpuidle_unregister_driver(&apm_idle_driver); diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 8ea78275480d..76417a9aab73 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -17,6 +17,7 @@ #include #include #include +#include #ifdef CONFIG_XEN #include @@ -93,4 +94,13 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + + /* TLB state for the entry code */ + OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); + + /* Layout info for cpu_entry_area */ + OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); + OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); + OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page); + DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack)); } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index dedf428b20b6..f91ba53e06c8 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -18,7 +18,7 @@ void foo(void) OFFSET(CPUINFO_x86, cpuinfo_x86, x86); OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); @@ -47,13 +47,8 @@ void foo(void) BLANK(); /* Offset from the sysenter stack to tss.sp0 */ - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - - offsetofend(struct tss_struct, SYSENTER_stack)); - - /* Offset from cpu_tss to SYSENTER_stack */ - OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack); - /* Size of SYSENTER_stack */ - DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack)); + DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - + offsetofend(struct cpu_entry_area, entry_stack_page.stack)); #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 630212fa9b9d..bf51e51d808d 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -23,6 +23,9 @@ int main(void) #ifdef CONFIG_PARAVIRT OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64); OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); +#ifdef CONFIG_DEBUG_ENTRY + OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl); +#endif BLANK(); #endif @@ -63,6 +66,7 @@ int main(void) OFFSET(TSS_ist, tss_struct, x86_tss.ist); OFFSET(TSS_sp0, tss_struct, x86_tss.sp0); + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); BLANK(); #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index c60922a66385..570e8bb1f386 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -22,7 +22,8 @@ obj-y += common.o obj-y += rdrand.o obj-y += match.o obj-y += bugs.o -obj-$(CONFIG_CPU_FREQ) += aperfmperf.o +obj-y += aperfmperf.o +obj-y += cpuid-deps.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d58184b7cd44..dda741bd5789 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -119,7 +120,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if (c->x86_model == 6 && c->x86_mask == 1) { + if (c->x86_model == 6 && c->x86_stepping == 1) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); @@ -149,7 +150,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) /* K6 with old style WHCR */ if (c->x86_model < 8 || - (c->x86_model == 8 && c->x86_mask < 8)) { + (c->x86_model == 8 && c->x86_stepping < 8)) { /* We can only write allocate on the low 508Mb */ if (mbytes > 508) mbytes = 508; @@ -168,7 +169,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if ((c->x86_model == 8 && c->x86_mask > 7) || + if ((c->x86_model == 8 && c->x86_stepping > 7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ @@ -221,7 +222,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * As per AMD technical note 27212 0.2 */ - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", @@ -241,12 +242,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * but they are not certified as MP capable. */ /* Athlon 660/661 is valid. */ - if ((c->x86_model == 6) && ((c->x86_mask == 0) || - (c->x86_mask == 1))) + if ((c->x86_model == 6) && ((c->x86_stepping == 0) || + (c->x86_stepping == 1))) return; /* Duron 670 is valid */ - if ((c->x86_model == 7) && (c->x86_mask == 0)) + if ((c->x86_model == 7) && (c->x86_stepping == 0)) return; /* @@ -256,8 +257,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for * more. */ - if (((c->x86_model == 6) && (c->x86_mask >= 2)) || - ((c->x86_model == 7) && (c->x86_mask >= 1)) || + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || + ((c->x86_model == 7) && (c->x86_stepping >= 1)) || (c->x86_model > 7)) if (cpu_has(c, X86_FEATURE_MP)) return; @@ -297,7 +298,6 @@ static int nearby_node(int apicid) } #endif -#ifdef CONFIG_SMP /* * Fix up cpu_core_id for pre-F17h systems to be in the * [0 .. cores_per_node - 1] range. Not really needed but @@ -314,6 +314,13 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c) c->cpu_core_id %= cus_per_node; } + +static void amd_get_topology_early(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_TOPOEXT)) + smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; +} + /* * Fixup core topology information for * (1) AMD multi-node processors @@ -332,7 +339,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); node_id = ecx & 0xff; - smp_num_siblings = ((ebx >> 8) & 0xff) + 1; if (c->x86 == 0x15) c->cu_id = ebx & 0xff; @@ -375,7 +381,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) legacy_fixup_core_id(c); } } -#endif /* * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. @@ -383,7 +388,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c) */ static void amd_detect_cmp(struct cpuinfo_x86 *c) { -#ifdef CONFIG_SMP unsigned bits; int cpu = smp_processor_id(); @@ -395,16 +399,11 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; amd_get_topology(c); -#endif } u16 amd_get_nb_id(int cpu) { - u16 id = 0; -#ifdef CONFIG_SMP - id = per_cpu(cpu_llc_id, cpu); -#endif - return id; + return per_cpu(cpu_llc_id, cpu); } EXPORT_SYMBOL_GPL(amd_get_nb_id); @@ -554,10 +553,31 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) rdmsrl(MSR_FAM10H_NODE_ID, value); nodes_per_socket = ((value >> 3) & 7) + 1; } + + if (c->x86 >= 0x15 && c->x86 <= 0x17) { + unsigned int bit; + + switch (c->x86) { + case 0x15: bit = 54; break; + case 0x16: bit = 33; break; + case 0x17: bit = 10; break; + default: return; + } + /* + * Try to cache the base value so further operations can + * avoid RMW. If that faults, do not enable SSBD. + */ + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); + setup_force_cpu_cap(X86_FEATURE_SSBD); + x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; + } + } } static void early_init_amd(struct cpuinfo_x86 *c) { + u64 value; u32 dummy; early_init_amd_mc(c); @@ -583,7 +603,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) /* Set MTRR capability flag if appropriate */ if (c->x86 == 5) if (c->x86_model == 13 || c->x86_model == 9 || - (c->x86_model == 8 && c->x86_mask >= 8)) + (c->x86_model == 8 && c->x86_stepping >= 8)) set_cpu_cap(c, X86_FEATURE_K6_MTRR); #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) @@ -647,6 +667,22 @@ static void early_init_amd(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_SME); } } + + /* Re-enable TopologyExtensions if switched off by BIOS */ + if (c->x86 == 0x15 && + (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && + !cpu_has(c, X86_FEATURE_TOPOEXT)) { + + if (msr_set_bit(0xc0011005, 54) > 0) { + rdmsrl(0xc0011005, value); + if (value & BIT_64(54)) { + set_cpu_cap(c, X86_FEATURE_TOPOEXT); + pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); + } + } + } + + amd_get_topology_early(c); } static void init_amd_k8(struct cpuinfo_x86 *c) @@ -738,19 +774,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c) { u64 value; - /* re-enable TopologyExtensions if switched off by BIOS */ - if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) && - !cpu_has(c, X86_FEATURE_TOPOEXT)) { - - if (msr_set_bit(0xc0011005, 54) > 0) { - rdmsrl(0xc0011005, value); - if (value & BIT_64(54)) { - set_cpu_cap(c, X86_FEATURE_TOPOEXT); - pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); - } - } - } - /* * The way access filter has a performance penalty on some workloads. * Disable it on the affected CPUs. @@ -765,11 +788,12 @@ static void init_amd_bd(struct cpuinfo_x86 *c) static void init_amd_zn(struct cpuinfo_x86 *c) { + set_cpu_cap(c, X86_FEATURE_ZEN); /* * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects * all up to and including B1. */ - if (c->x86_model <= 1 && c->x86_mask <= 1) + if (c->x86_model <= 1 && c->x86_stepping <= 1) set_cpu_cap(c, X86_FEATURE_CPB); } @@ -804,21 +828,17 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x17: init_amd_zn(c); break; } - /* Enable workaround for FXSAVE leak */ - if (c->x86 >= 6) + /* + * Enable workaround for FXSAVE leak on CPUs + * without a XSaveErPtr feature + */ + if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); cpu_detect_cache_sizes(c); - /* Multi core CPU? */ - if (c->extended_cpuid_level >= 0x80000008) { - amd_detect_cmp(c); - srat_detect_node(c); - } - -#ifdef CONFIG_X86_32 - detect_ht(c); -#endif + amd_detect_cmp(c); + srat_detect_node(c); init_amd_cacheinfo(c); @@ -826,8 +846,32 @@ static void init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_K8); if (cpu_has(c, X86_FEATURE_XMM2)) { - /* MFENCE stops RDTSC speculation */ - set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + unsigned long long val; + int ret; + + /* + * A serializing LFENCE has less overhead than MFENCE, so + * use it for execution serialization. On families which + * don't have that MSR, LFENCE is already serializing. + * msr_set_bit() uses the safe accessors, too, even if the MSR + * is not present. + */ + msr_set_bit(MSR_F10H_DECFG, + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + + /* + * Verify that the MSR write was successful (could be running + * under a hypervisor) and only then assume that LFENCE is + * serializing. + */ + ret = rdmsrl_safe(MSR_F10H_DECFG, &val); + if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + } else { + /* MFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + } } /* @@ -853,11 +897,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { /* Duron Rev A0 */ - if (c->x86_model == 3 && c->x86_mask == 0) + if (c->x86_model == 3 && c->x86_stepping == 0) size = 64; /* Tbird rev A1/A2 */ if (c->x86_model == 4 && - (c->x86_mask == 0 || c->x86_mask == 1)) + (c->x86_stepping == 0 || c->x86_stepping == 1)) size = 256; } return size; @@ -994,7 +1038,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) } /* OSVW unavailable or ID unknown, match family-model-stepping range */ - ms = (cpu->x86_model << 4) | cpu->x86_mask; + ms = (cpu->x86_model << 4) | cpu->x86_stepping; while ((range = *erratum++)) if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && (ms >= AMD_MODEL_RANGE_START(range)) && diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 0ee83321a313..7eba34df54c3 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -14,6 +14,8 @@ #include #include +#include "cpu.h" + struct aperfmperf_sample { unsigned int khz; ktime_t time; @@ -24,7 +26,7 @@ struct aperfmperf_sample { static DEFINE_PER_CPU(struct aperfmperf_sample, samples); #define APERFMPERF_CACHE_THRESHOLD_MS 10 -#define APERFMPERF_REFRESH_DELAY_MS 20 +#define APERFMPERF_REFRESH_DELAY_MS 10 #define APERFMPERF_STALE_THRESHOLD_MS 1000 /* @@ -38,14 +40,8 @@ static void aperfmperf_snapshot_khz(void *dummy) u64 aperf, aperf_delta; u64 mperf, mperf_delta; struct aperfmperf_sample *s = this_cpu_ptr(&samples); - ktime_t now = ktime_get(); - s64 time_delta = ktime_ms_delta(now, s->time); unsigned long flags; - /* Don't bother re-computing within the cache threshold time. */ - if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) - return; - local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); @@ -61,31 +57,68 @@ static void aperfmperf_snapshot_khz(void *dummy) if (mperf_delta == 0) return; - s->time = now; + s->time = ktime_get(); s->aperf = aperf; s->mperf = mperf; + s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); +} - /* If the previous iteration was too long ago, discard it. */ - if (time_delta > APERFMPERF_STALE_THRESHOLD_MS) - s->khz = 0; - else - s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta); +static bool aperfmperf_snapshot_cpu(int cpu, ktime_t now, bool wait) +{ + s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu)); + + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS) + return true; + + smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, wait); + + /* Return false if the previous iteration was too long ago. */ + return time_delta <= APERFMPERF_STALE_THRESHOLD_MS; } -unsigned int arch_freq_get_on_cpu(int cpu) +unsigned int aperfmperf_get_khz(int cpu) { - unsigned int khz; + if (!cpu_khz) + return 0; + + if (!static_cpu_has(X86_FEATURE_APERFMPERF)) + return 0; + aperfmperf_snapshot_cpu(cpu, ktime_get(), true); + return per_cpu(samples.khz, cpu); +} + +void arch_freq_prepare_all(void) +{ + ktime_t now = ktime_get(); + bool wait = false; + int cpu; + + if (!cpu_khz) + return; + + if (!static_cpu_has(X86_FEATURE_APERFMPERF)) + return; + + for_each_online_cpu(cpu) + if (!aperfmperf_snapshot_cpu(cpu, now, false)) + wait = true; + + if (wait) + msleep(APERFMPERF_REFRESH_DELAY_MS); +} + +unsigned int arch_freq_get_on_cpu(int cpu) +{ if (!cpu_khz) return 0; if (!static_cpu_has(X86_FEATURE_APERFMPERF)) return 0; - smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); - khz = per_cpu(samples.khz, cpu); - if (khz) - return khz; + if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true)) + return per_cpu(samples.khz, cpu); msleep(APERFMPERF_REFRESH_DELAY_MS); smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ba0b2424c9b0..d07addb99b71 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -10,25 +10,88 @@ */ #include #include +#include +#include +#include +#include + +#include +#include #include #include #include #include #include +#include #include #include #include #include +#include +#include + +static void __init spectre_v2_select_mitigation(void); +static void __init ssb_select_mitigation(void); +static void __init l1tf_select_mitigation(void); + +/* + * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any + * writes to SPEC_CTRL contain whatever reserved bits have been set. + */ +u64 __ro_after_init x86_spec_ctrl_base; +EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); + +/* + * The vendor and possibly platform specific bits which can be modified in + * x86_spec_ctrl_base. + */ +static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; + +/* + * AMD specific MSR info for Speculative Store Bypass control. + * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). + */ +u64 __ro_after_init x86_amd_ls_cfg_base; +u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; void __init check_bugs(void) { identify_boot_cpu(); + /* + * identify_boot_cpu() initialized SMT support information, let the + * core code know. + */ + cpu_smt_check_topology_early(); + if (!IS_ENABLED(CONFIG_SMP)) { pr_info("CPU: "); print_cpu_info(&boot_cpu_data); } + /* + * Read the SPEC_CTRL MSR to account for reserved bits which may + * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD + * init code as it is not enumerated and depends on the family. + */ + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + + /* Allow STIBP in MSR_SPEC_CTRL if supported */ + if (boot_cpu_has(X86_FEATURE_STIBP)) + x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; + + /* Select the proper spectre mitigation before patching alternatives */ + spectre_v2_select_mitigation(); + + /* + * Select proper mitigation for any exposure to the Speculative Store + * Bypass vulnerability. + */ + ssb_select_mitigation(); + + l1tf_select_mitigation(); + #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. @@ -60,3 +123,696 @@ void __init check_bugs(void) set_memory_4k((unsigned long)__va(0), 1); #endif } + +/* The kernel command line selection */ +enum spectre_v2_mitigation_cmd { + SPECTRE_V2_CMD_NONE, + SPECTRE_V2_CMD_AUTO, + SPECTRE_V2_CMD_FORCE, + SPECTRE_V2_CMD_RETPOLINE, + SPECTRE_V2_CMD_RETPOLINE_GENERIC, + SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +static const char *spectre_v2_strings[] = { + [SPECTRE_V2_NONE] = "Vulnerable", + [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", + [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", + [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", + [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", +}; + +#undef pr_fmt +#define pr_fmt(fmt) "Spectre V2 : " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +void +x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) +{ + u64 msrval, guestval, hostval = x86_spec_ctrl_base; + struct thread_info *ti = current_thread_info(); + + /* Is MSR_SPEC_CTRL implemented ? */ + if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { + /* + * Restrict guest_spec_ctrl to supported values. Clear the + * modifiable bits in the host base value and or the + * modifiable bits from the guest value. + */ + guestval = hostval & ~x86_spec_ctrl_mask; + guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; + + /* SSBD controlled in MSR_SPEC_CTRL */ + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) + hostval |= ssbd_tif_to_spec_ctrl(ti->flags); + + if (hostval != guestval) { + msrval = setguest ? guestval : hostval; + wrmsrl(MSR_IA32_SPEC_CTRL, msrval); + } + } + + /* + * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update + * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. + */ + if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && + !static_cpu_has(X86_FEATURE_VIRT_SSBD)) + return; + + /* + * If the host has SSBD mitigation enabled, force it in the host's + * virtual MSR value. If its not permanently enabled, evaluate + * current's TIF_SSBD thread flag. + */ + if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) + hostval = SPEC_CTRL_SSBD; + else + hostval = ssbd_tif_to_spec_ctrl(ti->flags); + + /* Sanitize the guest value */ + guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; + + if (hostval != guestval) { + unsigned long tif; + + tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : + ssbd_spec_ctrl_to_tif(hostval); + + speculative_store_bypass_update(tif); + } +} +EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); + +static void x86_amd_ssb_disable(void) +{ + u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; + + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); + else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) + wrmsrl(MSR_AMD64_LS_CFG, msrval); +} + +#ifdef RETPOLINE +static bool spectre_v2_bad_module; + +bool retpoline_module_ok(bool has_retpoline) +{ + if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) + return true; + + pr_err("System may be vulnerable to spectre v2\n"); + spectre_v2_bad_module = true; + return false; +} + +static inline const char *spectre_v2_module_string(void) +{ + return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; +} +#else +static inline const char *spectre_v2_module_string(void) { return ""; } +#endif + +static void __init spec2_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + pr_info("%s selected on command line.\n", reason); +} + +static void __init spec2_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + pr_info("%s selected on command line.\n", reason); +} + +static inline bool retp_compiler(void) +{ + return __is_defined(RETPOLINE); +} + +static inline bool match_option(const char *arg, int arglen, const char *opt) +{ + int len = strlen(opt); + + return len == arglen && !strncmp(arg, opt, len); +} + +static const struct { + const char *option; + enum spectre_v2_mitigation_cmd cmd; + bool secure; +} mitigation_options[] = { + { "off", SPECTRE_V2_CMD_NONE, false }, + { "on", SPECTRE_V2_CMD_FORCE, true }, + { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "auto", SPECTRE_V2_CMD_AUTO, false }, +}; + +static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) +{ + char arg[20]; + int ret, i; + enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; + + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + return SPECTRE_V2_CMD_NONE; + else { + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); + if (ret < 0) + return SPECTRE_V2_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { + if (!match_option(arg, ret, mitigation_options[i].option)) + continue; + cmd = mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPECTRE_V2_CMD_AUTO; + } + } + + if ((cmd == SPECTRE_V2_CMD_RETPOLINE || + cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && + !IS_ENABLED(CONFIG_RETPOLINE)) { + pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); + return SPECTRE_V2_CMD_AUTO; + } + + if (mitigation_options[i].secure) + spec2_print_if_secure(mitigation_options[i].option); + else + spec2_print_if_insecure(mitigation_options[i].option); + + return cmd; +} + +static void __init spectre_v2_select_mitigation(void) +{ + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); + enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; + + /* + * If the CPU is not affected and the command line mode is NONE or AUTO + * then nothing to do. + */ + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && + (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) + return; + + switch (cmd) { + case SPECTRE_V2_CMD_NONE: + return; + + case SPECTRE_V2_CMD_FORCE: + case SPECTRE_V2_CMD_AUTO: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_auto; + break; + case SPECTRE_V2_CMD_RETPOLINE_AMD: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_amd; + break; + case SPECTRE_V2_CMD_RETPOLINE_GENERIC: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_generic; + break; + case SPECTRE_V2_CMD_RETPOLINE: + if (IS_ENABLED(CONFIG_RETPOLINE)) + goto retpoline_auto; + break; + } + pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); + return; + +retpoline_auto: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + retpoline_amd: + if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { + pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); + goto retpoline_generic; + } + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : + SPECTRE_V2_RETPOLINE_MINIMAL_AMD; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + } else { + retpoline_generic: + mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : + SPECTRE_V2_RETPOLINE_MINIMAL; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + } + + spectre_v2_enabled = mode; + pr_info("%s\n", spectre_v2_strings[mode]); + + /* + * If spectre v2 protection has been enabled, unconditionally fill + * RSB during a context switch; this protects against two independent + * issues: + * + * - RSB underflow (and switch to BTB) on Skylake+ + * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs + */ + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + + /* Initialize Indirect Branch Prediction Barrier if supported */ + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); + } + + /* + * Retpoline means the kernel is safe because it has no indirect + * branches. But firmware isn't, so use IBRS to protect that. + */ + if (boot_cpu_has(X86_FEATURE_IBRS)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); + pr_info("Enabling Restricted Speculation for firmware calls\n"); + } +} + +#undef pr_fmt +#define pr_fmt(fmt) "Speculative Store Bypass: " fmt + +static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; + +/* The kernel command line selection */ +enum ssb_mitigation_cmd { + SPEC_STORE_BYPASS_CMD_NONE, + SPEC_STORE_BYPASS_CMD_AUTO, + SPEC_STORE_BYPASS_CMD_ON, + SPEC_STORE_BYPASS_CMD_PRCTL, + SPEC_STORE_BYPASS_CMD_SECCOMP, +}; + +static const char *ssb_strings[] = { + [SPEC_STORE_BYPASS_NONE] = "Vulnerable", + [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", + [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", + [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", +}; + +static const struct { + const char *option; + enum ssb_mitigation_cmd cmd; +} ssb_mitigation_options[] = { + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ + { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ + { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ +}; + +static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) +{ + enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; + char arg[20]; + int ret, i; + + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { + return SPEC_STORE_BYPASS_CMD_NONE; + } else { + ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", + arg, sizeof(arg)); + if (ret < 0) + return SPEC_STORE_BYPASS_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { + if (!match_option(arg, ret, ssb_mitigation_options[i].option)) + continue; + + cmd = ssb_mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(ssb_mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPEC_STORE_BYPASS_CMD_AUTO; + } + } + + return cmd; +} + +static enum ssb_mitigation __init __ssb_select_mitigation(void) +{ + enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; + enum ssb_mitigation_cmd cmd; + + if (!boot_cpu_has(X86_FEATURE_SSBD)) + return mode; + + cmd = ssb_parse_cmdline(); + if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && + (cmd == SPEC_STORE_BYPASS_CMD_NONE || + cmd == SPEC_STORE_BYPASS_CMD_AUTO)) + return mode; + + switch (cmd) { + case SPEC_STORE_BYPASS_CMD_AUTO: + case SPEC_STORE_BYPASS_CMD_SECCOMP: + /* + * Choose prctl+seccomp as the default mode if seccomp is + * enabled. + */ + if (IS_ENABLED(CONFIG_SECCOMP)) + mode = SPEC_STORE_BYPASS_SECCOMP; + else + mode = SPEC_STORE_BYPASS_PRCTL; + break; + case SPEC_STORE_BYPASS_CMD_ON: + mode = SPEC_STORE_BYPASS_DISABLE; + break; + case SPEC_STORE_BYPASS_CMD_PRCTL: + mode = SPEC_STORE_BYPASS_PRCTL; + break; + case SPEC_STORE_BYPASS_CMD_NONE: + break; + } + + /* + * We have three CPU feature flags that are in play here: + * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. + * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass + * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation + */ + if (mode == SPEC_STORE_BYPASS_DISABLE) { + setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); + /* + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses + * a completely different MSR and bit dependent on family. + */ + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + x86_spec_ctrl_base |= SPEC_CTRL_SSBD; + x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + break; + case X86_VENDOR_AMD: + x86_amd_ssb_disable(); + break; + } + } + + return mode; +} + +static void ssb_select_mitigation(void) +{ + ssb_mode = __ssb_select_mitigation(); + + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + pr_info("%s\n", ssb_strings[ssb_mode]); +} + +#undef pr_fmt +#define pr_fmt(fmt) "Speculation prctl: " fmt + +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + bool update; + + if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && + ssb_mode != SPEC_STORE_BYPASS_SECCOMP) + return -ENXIO; + + switch (ctrl) { + case PR_SPEC_ENABLE: + /* If speculation is force disabled, enable is not allowed */ + if (task_spec_ssb_force_disable(task)) + return -EPERM; + task_clear_spec_ssb_disable(task); + update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_DISABLE: + task_set_spec_ssb_disable(task); + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_FORCE_DISABLE: + task_set_spec_ssb_disable(task); + task_set_spec_ssb_force_disable(task); + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + break; + default: + return -ERANGE; + } + + /* + * If being set on non-current task, delay setting the CPU + * mitigation until it is next scheduled. + */ + if (task == current && update) + speculative_store_bypass_update_current(); + + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssb_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +#ifdef CONFIG_SECCOMP +void arch_seccomp_spec_mitigate(struct task_struct *task) +{ + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +} +#endif + +static int ssb_prctl_get(struct task_struct *task) +{ + switch (ssb_mode) { + case SPEC_STORE_BYPASS_DISABLE: + return PR_SPEC_DISABLE; + case SPEC_STORE_BYPASS_SECCOMP: + case SPEC_STORE_BYPASS_PRCTL: + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + default: + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + return PR_SPEC_ENABLE; + return PR_SPEC_NOT_AFFECTED; + } +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssb_prctl_get(task); + default: + return -ENODEV; + } +} + +void x86_spec_ctrl_setup_ap(void) +{ + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) + x86_amd_ssb_disable(); +} + +#undef pr_fmt +#define pr_fmt(fmt) "L1TF: " fmt + +/* Default mitigation for L1TF-affected CPUs */ +enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH; +#if IS_ENABLED(CONFIG_KVM_INTEL) +EXPORT_SYMBOL_GPL(l1tf_mitigation); +#endif +enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; +EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); + +static void __init l1tf_select_mitigation(void) +{ + u64 half_pa; + + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: + case L1TF_MITIGATION_FLUSH: + break; + case L1TF_MITIGATION_FLUSH_NOSMT: + case L1TF_MITIGATION_FULL: + cpu_smt_disable(false); + break; + case L1TF_MITIGATION_FULL_FORCE: + cpu_smt_disable(true); + break; + } + +#if CONFIG_PGTABLE_LEVELS == 2 + pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); + return; +#endif + + /* + * This is extremely unlikely to happen because almost all + * systems have far more MAX_PA/2 than RAM can be fit into + * DIMM slots. + */ + half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; + if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { + pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); + return; + } + + setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); +} + +static int __init l1tf_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + l1tf_mitigation = L1TF_MITIGATION_OFF; + else if (!strcmp(str, "flush,nowarn")) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; + else if (!strcmp(str, "flush")) + l1tf_mitigation = L1TF_MITIGATION_FLUSH; + else if (!strcmp(str, "flush,nosmt")) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; + else if (!strcmp(str, "full")) + l1tf_mitigation = L1TF_MITIGATION_FULL; + else if (!strcmp(str, "full,force")) + l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; + + return 0; +} +early_param("l1tf", l1tf_cmdline); + +#undef pr_fmt + +#ifdef CONFIG_SYSFS + +#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" + +#if IS_ENABLED(CONFIG_KVM_INTEL) +static const char *l1tf_vmx_states[] = { + [VMENTER_L1D_FLUSH_AUTO] = "auto", + [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", + [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", + [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", + [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", + [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" +}; + +static ssize_t l1tf_show_state(char *buf) +{ + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); + + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || + (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && + cpu_smt_control == CPU_SMT_ENABLED)) + return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation]); + + return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation], + cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); +} +#else +static ssize_t l1tf_show_state(char *buf) +{ + return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); +} +#endif + +static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) +{ + if (!boot_cpu_has_bug(bug)) + return sprintf(buf, "Not affected\n"); + + switch (bug) { + case X86_BUG_CPU_MELTDOWN: + if (boot_cpu_has(X86_FEATURE_PTI)) + return sprintf(buf, "Mitigation: PTI\n"); + + break; + + case X86_BUG_SPECTRE_V1: + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + + case X86_BUG_SPECTRE_V2: + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + spectre_v2_module_string()); + + case X86_BUG_SPEC_STORE_BYPASS: + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); + + case X86_BUG_L1TF: + if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) + return l1tf_show_state(buf); + break; + default: + break; + } + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); +} + +ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); +} +#endif diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 68bc6d9b3132..595be776727d 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -136,7 +136,7 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: - switch (c->x86_mask) { + switch (c->x86_stepping) { default: name = "2"; break; @@ -211,7 +211,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) * - Note, it seems this may only be in engineering samples. */ if ((c->x86 == 6) && (c->x86_model == 9) && - (c->x86_mask == 1) && (size == 65)) + (c->x86_stepping == 1) && (size == 65)) size -= 1; return size; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c9176bae7fd8..837b6dbcb846 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -47,6 +47,8 @@ #include #include #include +#include +#include #ifdef CONFIG_X86_LOCAL_APIC #include @@ -64,6 +66,13 @@ cpumask_var_t cpu_callin_mask; /* representing cpus for which sibling maps can be computed */ cpumask_var_t cpu_sibling_setup_mask; +/* Number of siblings per CPU package */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* Last level cache ID of each logical CPU */ +DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; + /* correctly size the local cpu masks */ void __init setup_cpu_local_masks(void) { @@ -329,6 +338,28 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c) } } +static __always_inline void setup_umip(struct cpuinfo_x86 *c) +{ + /* Check the boot processor, plus build option for UMIP. */ + if (!cpu_feature_enabled(X86_FEATURE_UMIP)) + goto out; + + /* Check the current processor's cpuid bits. */ + if (!cpu_has(c, X86_FEATURE_UMIP)) + goto out; + + cr4_set_bits(X86_CR4_UMIP); + + return; + +out: + /* + * Make sure UMIP is disabled in case it was enabled in a + * previous boot (e.g., via kexec). + */ + cr4_clear_bits(X86_CR4_UMIP); +} + /* * Protection Keys are not available in 32-bit mode. */ @@ -452,8 +483,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c) return NULL; /* Not found */ } -__u32 cpu_caps_cleared[NCAPINTS]; -__u32 cpu_caps_set[NCAPINTS]; +__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; +__u32 cpu_caps_set[NCAPINTS + NBUGINTS]; void load_percpu_segment(int cpu) { @@ -466,28 +497,23 @@ void load_percpu_segment(int cpu) load_stack_canary_segment(); } -/* Setup the fixmap mapping only once per-processor */ -static inline void setup_fixmap_gdt(int cpu) -{ -#ifdef CONFIG_X86_64 - /* On 64-bit systems, we use a read-only fixmap GDT. */ - pgprot_t prot = PAGE_KERNEL_RO; -#else - /* - * On native 32-bit systems, the GDT cannot be read-only because - * our double fault handler uses a task gate, and entering through - * a task gate needs to change an available TSS to busy. If the GDT - * is read-only, that will triple fault. - * - * On Xen PV, the GDT must be read-only because the hypervisor requires - * it. - */ - pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ? - PAGE_KERNEL_RO : PAGE_KERNEL; +#ifdef CONFIG_X86_32 +/* The 32-bit entry code needs to find cpu_entry_area. */ +DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); #endif - __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot); -} +#ifdef CONFIG_X86_64 +/* + * Special IST stacks which the CPU switches to when it calls + * an IST-marked descriptor entry. Up to 7 stacks (hardware + * limit), all of them are 4K, except the debug stack which + * is 8K. + */ +static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, + [DEBUG_STACK - 1] = DEBUG_STKSZ +}; +#endif /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) @@ -617,33 +643,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c) tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); } -void detect_ht(struct cpuinfo_x86 *c) +int detect_ht_early(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP u32 eax, ebx, ecx, edx; - int index_msb, core_bits; - static bool printed; if (!cpu_has(c, X86_FEATURE_HT)) - return; + return -1; if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) - goto out; + return -1; if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) - return; + return -1; cpuid(1, &eax, &ebx, &ecx, &edx); smp_num_siblings = (ebx & 0xff0000) >> 16; - - if (smp_num_siblings == 1) { + if (smp_num_siblings == 1) pr_info_once("CPU0: Hyper-Threading is disabled\n"); - goto out; - } +#endif + return 0; +} - if (smp_num_siblings <= 1) - goto out; +void detect_ht(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + int index_msb, core_bits; + + if (detect_ht_early(c) < 0) + return; index_msb = get_count_order(smp_num_siblings); c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); @@ -656,15 +685,6 @@ void detect_ht(struct cpuinfo_x86 *c) c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & ((1 << core_bits) - 1); - -out: - if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { - pr_info("CPU: Physical Processor ID: %d\n", - c->phys_proc_id); - pr_info("CPU: Processor Core ID: %d\n", - c->cpu_core_id); - printed = 1; - } #endif } @@ -710,7 +730,7 @@ void cpu_detect(struct cpuinfo_x86 *c) cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = x86_family(tfms); c->x86_model = x86_model(tfms); - c->x86_mask = x86_stepping(tfms); + c->x86_stepping = x86_stepping(tfms); if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; @@ -723,12 +743,47 @@ static void apply_forced_caps(struct cpuinfo_x86 *c) { int i; - for (i = 0; i < NCAPINTS; i++) { + for (i = 0; i < NCAPINTS + NBUGINTS; i++) { c->x86_capability[i] &= ~cpu_caps_cleared[i]; c->x86_capability[i] |= cpu_caps_set[i]; } } +static void init_speculation_control(struct cpuinfo_x86 *c) +{ + /* + * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, + * and they also have a different bit for STIBP support. Also, + * a hypervisor might have set the individual AMD bits even on + * Intel CPUs, for finer-grained selection of what's available. + */ + if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { + set_cpu_cap(c, X86_FEATURE_IBRS); + set_cpu_cap(c, X86_FEATURE_IBPB); + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); + } + + if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) + set_cpu_cap(c, X86_FEATURE_STIBP); + + if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || + cpu_has(c, X86_FEATURE_VIRT_SSBD)) + set_cpu_cap(c, X86_FEATURE_SSBD); + + if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { + set_cpu_cap(c, X86_FEATURE_IBRS); + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); + } + + if (cpu_has(c, X86_FEATURE_AMD_IBPB)) + set_cpu_cap(c, X86_FEATURE_IBPB); + + if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { + set_cpu_cap(c, X86_FEATURE_STIBP); + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); + } +} + void get_cpu_cap(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; @@ -750,6 +805,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); c->x86_capability[CPUID_7_0_EBX] = ebx; c->x86_capability[CPUID_7_ECX] = ecx; + c->x86_capability[CPUID_7_EDX] = edx; } /* Extended state features: level 0x0000000d */ @@ -822,6 +878,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); init_scattered_cpuid_features(c); + init_speculation_control(c); /* * Clear/Set all flags overridden by options, after probe. @@ -857,6 +914,95 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #endif } +static const __initconst struct x86_cpu_id cpu_no_speculation[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, + { X86_VENDOR_CENTAUR, 5 }, + { X86_VENDOR_INTEL, 5 }, + { X86_VENDOR_NSC, 5 }, + { X86_VENDOR_ANY, 4 }, + {} +}; + +static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { + { X86_VENDOR_AMD }, + {} +}; + +static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + { X86_VENDOR_CENTAUR, 5, }, + { X86_VENDOR_INTEL, 5, }, + { X86_VENDOR_NSC, 5, }, + { X86_VENDOR_AMD, 0x12, }, + { X86_VENDOR_AMD, 0x11, }, + { X86_VENDOR_AMD, 0x10, }, + { X86_VENDOR_AMD, 0xf, }, + { X86_VENDOR_ANY, 4, }, + {} +}; + +static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { + /* in addition to cpu_no_speculation */ + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + {} +}; + +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) +{ + u64 ia32_cap = 0; + + if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + + if (!x86_match_cpu(cpu_no_spec_store_bypass) && + !(ia32_cap & ARCH_CAP_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + + if (x86_match_cpu(cpu_no_speculation)) + return; + + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + + if (x86_match_cpu(cpu_no_meltdown)) + return; + + /* Rogue Data Cache Load? No! */ + if (ia32_cap & ARCH_CAP_RDCL_NO) + return; + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + + if (x86_match_cpu(cpu_no_l1tf)) + return; + + setup_force_cpu_bug(X86_BUG_L1TF); +} + /* * Do minimum CPU detection early. * Fields really needed: vendor, cpuid_level, family, model, mask, @@ -903,6 +1049,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) } setup_force_cpu_cap(X86_FEATURE_ALWAYS); + + cpu_set_bug_bits(c); + fpu__init_system(c); #ifdef CONFIG_X86_32 @@ -1098,9 +1247,9 @@ static void identify_cpu(struct cpuinfo_x86 *c) int i; c->loops_per_jiffy = loops_per_jiffy; - c->x86_cache_size = -1; + c->x86_cache_size = 0; c->x86_vendor = X86_VENDOR_UNKNOWN; - c->x86_model = c->x86_mask = 0; /* So far unknown... */ + c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; @@ -1147,9 +1296,10 @@ static void identify_cpu(struct cpuinfo_x86 *c) /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); - /* Set up SMEP/SMAP */ + /* Set up SMEP/SMAP/UMIP */ setup_smep(c); setup_smap(c); + setup_umip(c); /* * The vendor-specific functions might have changed features. @@ -1225,7 +1375,7 @@ void enable_sep_cpu(void) return; cpu = get_cpu(); - tss = &per_cpu(cpu_tss, cpu); + tss = &per_cpu(cpu_tss_rw, cpu); /* * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- @@ -1234,11 +1384,7 @@ void enable_sep_cpu(void) tss->x86_tss.ss1 = __KERNEL_CS; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); - - wrmsr(MSR_IA32_SYSENTER_ESP, - (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), - 0); - + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); put_cpu(); @@ -1264,6 +1410,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) #endif mtrr_ap_init(); validate_apic_and_package_id(c); + x86_spec_ctrl_setup_ap(); } static __init int setup_noclflush(char *arg) @@ -1295,24 +1442,22 @@ void print_cpu_info(struct cpuinfo_x86 *c) pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); - if (c->x86_mask || c->cpuid_level >= 0) - pr_cont(", stepping: 0x%x)\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + pr_cont(", stepping: 0x%x)\n", c->x86_stepping); else pr_cont(")\n"); } -static __init int setup_disablecpuid(char *arg) +/* + * clearcpuid= was already parsed in fpu__init_parse_early_param. + * But we need to keep a dummy __setup around otherwise it would + * show up as an environment variable for init. + */ +static __init int setup_clearcpuid(char *arg) { - int bit; - - if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) - setup_clear_cpu_cap(bit); - else - return 0; - return 1; } -__setup("clearcpuid=", setup_disablecpuid); +__setup("clearcpuid=", setup_clearcpuid); #ifdef CONFIG_X86_64 DEFINE_PER_CPU_FIRST(union irq_stack_union, @@ -1334,25 +1479,22 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); -/* - * Special IST stacks which the CPU switches to when it calls - * an IST-marked descriptor entry. Up to 7 stacks (hardware - * limit), all of them are 4K, except the debug stack which - * is 8K. - */ -static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, - [DEBUG_STACK - 1] = DEBUG_STKSZ -}; - -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); - /* May not be marked __init: used by software suspend */ void syscall_init(void) { + extern char _entry_trampoline[]; + extern char entry_SYSCALL_64_trampoline[]; + + int cpu = smp_processor_id(); + unsigned long SYSCALL64_entry_trampoline = + (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline + + (entry_SYSCALL_64_trampoline - _entry_trampoline); + wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); - wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); + if (static_cpu_has(X86_FEATURE_PTI)) + wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); + else + wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); @@ -1363,7 +1505,7 @@ void syscall_init(void) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); @@ -1507,7 +1649,7 @@ void cpu_init(void) if (cpu) load_ucode_ap(); - t = &per_cpu(cpu_tss, cpu); + t = &per_cpu(cpu_tss_rw, cpu); oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA @@ -1546,7 +1688,7 @@ void cpu_init(void) * set up and load the per-CPU TSS */ if (!oist->ist[0]) { - char *estacks = per_cpu(exception_stacks, cpu); + char *estacks = get_cpu_entry_area(cpu)->exception_stacks; for (v = 0; v < N_EXCEPTION_STACKS; v++) { estacks += exception_stack_sizes[v]; @@ -1557,7 +1699,7 @@ void cpu_init(void) } } - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; /* * <= is required because the CPU will access up to @@ -1572,9 +1714,14 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); - load_sp0(t, ¤t->thread); - set_tss_desc(cpu, t); + /* + * Initialize the TSS. sp0 points to the entry trampoline stack + * regardless of what task is running. + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); + load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); + load_mm_ldt(&init_mm); clear_all_debug_regs(); @@ -1585,7 +1732,6 @@ void cpu_init(void) if (is_uv_system()) uv_cpu_init(); - setup_fixmap_gdt(cpu); load_fixmap_gdt(cpu); } @@ -1595,8 +1741,7 @@ void cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct *t = &per_cpu(cpu_tss, cpu); - struct thread_struct *thread = &curr->thread; + struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); wait_for_master_cpu(cpu); @@ -1627,12 +1772,16 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); - load_sp0(t, thread); - set_tss_desc(cpu, t); + /* + * Initialize the TSS. Don't bother initializing sp0, as the initial + * task never enters user mode. + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); + load_mm_ldt(&init_mm); - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; #ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ @@ -1644,7 +1793,6 @@ void cpu_init(void) fpu__init_cpu(); - setup_fixmap_gdt(cpu); load_fixmap_gdt(cpu); } #endif @@ -1665,3 +1813,33 @@ static int __init init_cpu_syscore(void) return 0; } core_initcall(init_cpu_syscore); + +/* + * The microcode loader calls this upon late microcode load to recheck features, + * only when microcode has been updated. Caller holds microcode_mutex and CPU + * hotplug lock. + */ +void microcode_check(void) +{ + struct cpuinfo_x86 info; + + perf_check_microcode(); + + /* Reload CPUID max function as it might've changed. */ + info.cpuid_level = cpuid_eax(0); + + /* + * Copy all capability leafs to pick up the synthetic ones so that + * memcmp() below doesn't fail on that. The ones coming from CPUID will + * get overwritten in get_cpu_cap(). + */ + memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); + + get_cpu_cap(&info); + + if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) + return; + + pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); + pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); +} diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index f52a370b6c00..cca588407dca 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -47,4 +47,11 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], extern void get_cpu_cap(struct cpuinfo_x86 *c); extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); +extern int detect_extended_topology_early(struct cpuinfo_x86 *c); +extern int detect_ht_early(struct cpuinfo_x86 *c); + +unsigned int aperfmperf_get_khz(int cpu); + +extern void x86_spec_ctrl_setup_ap(void); + #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c new file mode 100644 index 000000000000..904b0a3c4e53 --- /dev/null +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -0,0 +1,121 @@ +/* Declare dependencies between CPUIDs */ +#include +#include +#include +#include + +struct cpuid_dep { + unsigned int feature; + unsigned int depends; +}; + +/* + * Table of CPUID features that depend on others. + * + * This only includes dependencies that can be usefully disabled, not + * features part of the base set (like FPU). + * + * Note this all is not __init / __initdata because it can be + * called from cpu hotplug. It shouldn't do anything in this case, + * but it's difficult to tell that to the init reference checker. + */ +const static struct cpuid_dep cpuid_deps[] = { + { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, + { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, + { X86_FEATURE_AVX, X86_FEATURE_XSAVE }, + { X86_FEATURE_PKU, X86_FEATURE_XSAVE }, + { X86_FEATURE_MPX, X86_FEATURE_XSAVE }, + { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE }, + { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM, X86_FEATURE_FXSR }, + { X86_FEATURE_XMM2, X86_FEATURE_XMM }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 }, + { X86_FEATURE_XMM3, X86_FEATURE_XMM2 }, + { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 }, + { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, }, + { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, + { X86_FEATURE_AES, X86_FEATURE_XMM2 }, + { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, + { X86_FEATURE_FMA, X86_FEATURE_AVX }, + { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, + { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, + { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F }, + { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F }, + {} +}; + +static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature) +{ + /* + * Note: This could use the non atomic __*_bit() variants, but the + * rest of the cpufeature code uses atomics as well, so keep it for + * consistency. Cleanup all of it separately. + */ + if (!c) { + clear_cpu_cap(&boot_cpu_data, feature); + set_bit(feature, (unsigned long *)cpu_caps_cleared); + } else { + clear_bit(feature, (unsigned long *)c->x86_capability); + } +} + +/* Take the capabilities and the BUG bits into account */ +#define MAX_FEATURE_BITS ((NCAPINTS + NBUGINTS) * sizeof(u32) * 8) + +static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) +{ + DECLARE_BITMAP(disable, MAX_FEATURE_BITS); + const struct cpuid_dep *d; + bool changed; + + if (WARN_ON(feature >= MAX_FEATURE_BITS)) + return; + + clear_feature(c, feature); + + /* Collect all features to disable, handling dependencies */ + memset(disable, 0, sizeof(disable)); + __set_bit(feature, disable); + + /* Loop until we get a stable state. */ + do { + changed = false; + for (d = cpuid_deps; d->feature; d++) { + if (!test_bit(d->depends, disable)) + continue; + if (__test_and_set_bit(d->feature, disable)) + continue; + + changed = true; + clear_feature(c, d->feature); + } + } while (changed); +} + +void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature) +{ + do_clear_cpu_cap(c, feature); +} + +void setup_clear_cpu_cap(unsigned int feature) +{ + do_clear_cpu_cap(NULL, feature); +} diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 6b4bb335641f..8949b7ae6d92 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; - c->x86_mask = dir1 & 0xf; + c->x86_stepping = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 4fa90006ac68..cf6252f08f85 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -26,6 +26,13 @@ #include #include +extern const struct hypervisor_x86 x86_hyper_vmware; +extern const struct hypervisor_x86 x86_hyper_ms_hyperv; +extern const struct hypervisor_x86 x86_hyper_xen_pv; +extern const struct hypervisor_x86 x86_hyper_xen_hvm; +extern const struct hypervisor_x86 x86_hyper_kvm; +extern const struct hypervisor_x86 x86_hyper_acrn; + static const __initconst struct hypervisor_x86 * const hypervisors[] = { #ifdef CONFIG_XEN_PV @@ -39,56 +46,57 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #ifdef CONFIG_KVM_GUEST &x86_hyper_kvm, #endif +#ifdef CONFIG_ACRN_HV + &x86_hyper_acrn, +#endif }; -const struct hypervisor_x86 *x86_hyper; -EXPORT_SYMBOL(x86_hyper); +enum x86_hypervisor_type x86_hyper_type; +EXPORT_SYMBOL(x86_hyper_type); -static inline void __init +static inline const struct hypervisor_x86 * __init detect_hypervisor_vendor(void) { - const struct hypervisor_x86 *h, * const *p; + const struct hypervisor_x86 *h = NULL, * const *p; uint32_t pri, max_pri = 0; for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { - h = *p; - pri = h->detect(); - if (pri != 0 && pri > max_pri) { + pri = (*p)->detect(); + if (pri > max_pri) { max_pri = pri; - x86_hyper = h; + h = *p; } } - if (max_pri) - pr_info("Hypervisor detected: %s\n", x86_hyper->name); + if (h) + pr_info("Hypervisor detected: %s\n", h->name); + + return h; } -void __init init_hypervisor_platform(void) +static void __init copy_array(const void *src, void *target, unsigned int size) { + unsigned int i, n = size / sizeof(void *); + const void * const *from = (const void * const *)src; + const void **to = (const void **)target; - detect_hypervisor_vendor(); - - if (!x86_hyper) - return; - - if (x86_hyper->init_platform) - x86_hyper->init_platform(); + for (i = 0; i < n; i++) + if (from[i]) + to[i] = from[i]; } -bool __init hypervisor_x2apic_available(void) +void __init init_hypervisor_platform(void) { - return x86_hyper && - x86_hyper->x2apic_available && - x86_hyper->x2apic_available(); -} + const struct hypervisor_x86 *h; -void hypervisor_pin_vcpu(int cpu) -{ - if (!x86_hyper) + h = detect_hypervisor_vendor(); + + if (!h) return; - if (x86_hyper->pin_vcpu) - x86_hyper->pin_vcpu(cpu); - else - WARN_ONCE(1, "vcpu pinning requested but not supported!\n"); + copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); + copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); + + x86_hyper_type = h->type; + x86_init.hyper.init_platform(); } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b720dacac051..278be092b300 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -102,6 +102,62 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) ELF_HWCAP2 |= HWCAP2_RING3MWAIT; } +/* + * Early microcode releases for the Spectre v2 mitigation were broken. + * Information taken from; + * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf + * - https://kb.vmware.com/s/article/52345 + * - Microcode revisions observed in the wild + * - Release note from 20180108 microcode release + */ +struct sku_microcode { + u8 model; + u8 stepping; + u32 microcode; +}; +static const struct sku_microcode spectre_bad_microcodes[] = { + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, + { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, + { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, + { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, + { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, + { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, + { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, + { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, + { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, + { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, + { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, + { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, + { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, + /* Observed in the wild */ + { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, + { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, +}; + +static bool bad_spectre_microcode(struct cpuinfo_x86 *c) +{ + int i; + + /* + * We know that the hypervisor lie to us on the microcode version so + * we may as well hope that it is running the correct version. + */ + if (cpu_has(c, X86_FEATURE_HYPERVISOR)) + return false; + + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { + if (c->x86_model == spectre_bad_microcodes[i].model && + c->x86_stepping == spectre_bad_microcodes[i].stepping) + return (c->microcode <= spectre_bad_microcodes[i].microcode); + } + return false; +} + static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; @@ -122,6 +178,22 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) c->microcode = intel_get_microcode_revision(); + /* Now if any of them are set, check the blacklist and clear the lot */ + if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || + cpu_has(c, X86_FEATURE_INTEL_STIBP) || + cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || + cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { + pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); + setup_clear_cpu_cap(X86_FEATURE_IBRS); + setup_clear_cpu_cap(X86_FEATURE_IBPB); + setup_clear_cpu_cap(X86_FEATURE_STIBP); + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); + setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); + setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); + setup_clear_cpu_cap(X86_FEATURE_SSBD); + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); + } + /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * @@ -130,7 +202,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && c->microcode < 0x20e) { pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); @@ -146,7 +218,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 - && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) c->x86_phys_bits = 36; /* @@ -187,21 +259,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); -#ifdef CONFIG_KMEMCHECK - /* - * P4s have a "fast strings" feature which causes single- - * stepping REP instructions to only generate a #DB on - * cache-line boundaries. - * - * Ingo Molnar reported a Pentium D (model 6) and a Xeon - * (model 2) with the same problem. - */ - if (c->x86 == 15) - if (msr_clear_bit(MSR_IA32_MISC_ENABLE, - MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) - pr_info("kmemcheck: Disabling fast string operations\n"); -#endif - /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. @@ -244,6 +301,13 @@ static void early_init_intel(struct cpuinfo_x86 *c) } check_mpx_erratum(c); + + /* + * Get the number of SMT siblings early from the extended topology + * leaf, if available. Otherwise try the legacy SMT detection. + */ + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } #ifdef CONFIG_X86_32 @@ -259,7 +323,7 @@ int ppro_with_ram_bug(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask < 8) { + boot_cpu_data.x86_stepping < 8) { pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } @@ -276,7 +340,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c) * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && - c->x86_mask >= 1 && c->x86_mask <= 4 && + c->x86_stepping >= 1 && c->x86_stepping <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs @@ -319,7 +383,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* @@ -337,7 +401,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * P4 Xeon erratum 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); @@ -352,7 +416,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * Specification Update"). */ if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && - (c->x86_mask < 0x6 || c->x86_mask == 0xb)) + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) set_cpu_bug(c, X86_BUG_11AP); @@ -599,7 +663,7 @@ static void init_intel(struct cpuinfo_x86 *c) case 6: if (l2 == 128) p = "Celeron (Mendocino)"; - else if (c->x86_mask == 0 || c->x86_mask == 5) + else if (c->x86_stepping == 0 || c->x86_stepping == 5) p = "Celeron-A"; break; @@ -694,6 +758,9 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index cd5fc61ba450..665d0f6cd62f 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -267,6 +267,7 @@ static void rdt_get_cdp_l3_config(int type) r->num_closid = r_l3->num_closid / 2; r->cache.cbm_len = r_l3->cache.cbm_len; r->default_ctrl = r_l3->default_ctrl; + r->cache.shareable_bits = r_l3->cache.shareable_bits; r->data_width = (r->cache.cbm_len + 3) / 4; r->alloc_capable = true; /* @@ -524,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) */ if (static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); - kfree(d->ctrl_val); - kfree(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); list_del(&d->list); if (is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); @@ -544,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cancel_delayed_work(&d->cqm_limbo); } + kfree(d->ctrl_val); + kfree(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); kfree(d); return; } @@ -770,8 +771,10 @@ static __init void rdt_quirks(void) cache_alloc_hsw_probe(); break; case INTEL_FAM6_SKYLAKE_X: - if (boot_cpu_data.x86_mask <= 4) + if (boot_cpu_data.x86_stepping <= 4) set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); + else + set_rdt_options("!l3cat"); } } diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index a869d4a073c5..2dae1b3c42fc 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1081,6 +1081,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, struct dentry *dentry; int ret; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* * resctrl file system can only be mounted once. @@ -1130,12 +1131,12 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, goto out_mondata; if (rdt_alloc_capable) - static_branch_enable(&rdt_alloc_enable_key); + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); if (rdt_mon_capable) - static_branch_enable(&rdt_mon_enable_key); + static_branch_enable_cpuslocked(&rdt_mon_enable_key); if (rdt_alloc_capable || rdt_mon_capable) - static_branch_enable(&rdt_enable_key); + static_branch_enable_cpuslocked(&rdt_enable_key); if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3]; @@ -1157,6 +1158,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, cdp_disable(); out: mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return dentry; } @@ -1295,9 +1297,7 @@ static void rmdir_all_sub(void) kfree(rdtgrp); } /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - get_online_cpus(); update_closid_rmid(cpu_online_mask, &rdtgroup_default); - put_online_cpus(); kernfs_remove(kn_info); kernfs_remove(kn_mongrp); @@ -1308,6 +1308,7 @@ static void rdt_kill_sb(struct super_block *sb) { struct rdt_resource *r; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /*Put everything back to default values. */ @@ -1315,11 +1316,12 @@ static void rdt_kill_sb(struct super_block *sb) reset_all_ctrls(r); cdp_disable(); rmdir_all_sub(); - static_branch_disable(&rdt_alloc_enable_key); - static_branch_disable(&rdt_mon_enable_key); - static_branch_disable(&rdt_enable_key); + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_disable_cpuslocked(&rdt_enable_key); kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } static struct file_system_type rdt_fs_type = { @@ -1655,6 +1657,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, if (ret < 0) goto out_common_fail; closid = ret; + ret = 0; rdtgrp->closid = closid; list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 231ad23b24a9..8fec687b3e44 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -48,7 +48,7 @@ static struct dentry *dfs_inj; static u8 n_banks; -#define MAX_FLAG_OPT_SIZE 3 +#define MAX_FLAG_OPT_SIZE 4 #define NBCFG 0x44 enum injection_type { diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60..e956eb267061 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } extern struct mca_config mca_cfg; +#ifndef CONFIG_X86_64 +/* + * On 32-bit systems it would be difficult to safely unmap a poison page + * from the kernel 1:1 map because there are no non-canonical addresses that + * we can use to refer to the address without risking a speculative access. + * However, this isn't much of an issue because: + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which + * are only mapped into the kernel as needed + * 2) Few people would run a 32-bit kernel on a machine that supports + * recoverable errors because they have too much memory to boot 32-bit. + */ +static inline void mce_unmap_kpfn(unsigned long pfn) {} +#define mce_unmap_kpfn mce_unmap_kpfn +#endif + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 87cc9ab7a13c..c51353569492 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -143,6 +143,11 @@ static struct severity { SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), USER ), + MCESEV( + PANIC, "Data load in unrecoverable area of kernel", + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), + KERNEL + ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", @@ -245,6 +250,9 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc if (m->status & MCI_STATUS_UC) { + if (ctx == IN_KERNEL) + return MCE_PANIC_SEVERITY; + /* * On older systems where overflow_recov flag is not present, we * should simply panic if an error overflow occurs. If @@ -255,10 +263,6 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc if (mce_flags.smca) return mce_severity_amd_smca(m, ctx); - /* software can try to contain */ - if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL)) - return MCE_PANIC_SEVERITY; - /* kill current process */ return MCE_AR_SEVERITY; } else { diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3b413065c613..98e4e4dc4a3b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -57,6 +57,9 @@ static DEFINE_MUTEX(mce_log_mutex); +/* sysfs synchronization */ +static DEFINE_MUTEX(mce_sysfs_mutex); + #define CREATE_TRACE_POINTS #include @@ -106,6 +109,10 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn); +#endif + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -127,6 +134,8 @@ void mce_setup(struct mce *m) if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) rdmsrl(MSR_PPIN, m->ppin); + + m->microcode = boot_cpu_data.microcode; } DEFINE_PER_CPU(struct mce, injectm); @@ -259,7 +268,7 @@ static void __print_mce(struct mce *m) */ pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, - cpu_data(m->extcpu).microcode); + m->microcode); } static void print_mce(struct mce *m) @@ -503,10 +512,8 @@ static int mce_usable_address(struct mce *m) bool mce_is_memory_error(struct mce *m) { if (m->cpuvendor == X86_VENDOR_AMD) { - /* ErrCodeExt[20:16] */ - u8 xec = (m->status >> 16) & 0x1f; + return amd_mce_is_memory_error(m); - return (xec == 0x0 || xec == 0x8); } else if (m->cpuvendor == X86_VENDOR_INTEL) { /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -582,7 +589,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - memory_failure(pfn, MCE_VECTOR, 0); + if (memory_failure(pfn, MCE_VECTOR, 0)) + mce_unmap_kpfn(pfn); } return NOTIFY_OK; @@ -752,23 +760,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll); static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { - int i, ret = 0; char *tmp; + int i; for (i = 0; i < mca_cfg.banks; i++) { m->status = mce_rdmsrl(msr_ops.status(i)); - if (m->status & MCI_STATUS_VAL) { - __set_bit(i, validp); - if (quirk_no_way_out) - quirk_no_way_out(i, m, regs); - } + if (!(m->status & MCI_STATUS_VAL)) + continue; + + __set_bit(i, validp); + if (quirk_no_way_out) + quirk_no_way_out(i, m, regs); if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + mce_read_aux(m, i); *msg = tmp; - ret = 1; + return 1; } } - return ret; + return 0; } /* @@ -1049,12 +1059,13 @@ static int do_memory_failure(struct mce *m) ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags); if (ret) pr_err("Memory error not recovered"); + else + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); return ret; } -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) - -void arch_unmap_kpfn(unsigned long pfn) +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn) { unsigned long decoy_addr; @@ -1065,7 +1076,7 @@ void arch_unmap_kpfn(unsigned long pfn) * We would like to just call: * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a - * speculative access to the posion page because we'd have + * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address @@ -1090,7 +1101,6 @@ void arch_unmap_kpfn(unsigned long pfn) if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); - } #endif @@ -1197,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* + * Local machine check may already know that we have to panic. + * Broadcast machine check begins rendezvous in mce_start() * Go through all banks in exclusion of the other CPUs. This way we * don't report duplicated events on shared banks because the first one - * to see it will clear it. If this is a Local MCE, then no need to - * perform rendezvous. + * to see it will clear it. */ - if (!lmce) + if (lmce) { + if (no_way_out) + mce_panic("Fatal local machine check", &m, msg); + } else { order = mce_start(&no_way_out); + } for (i = 0; i < cfg->banks; i++) { __clear_bit(i, toclear); @@ -1279,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code) no_way_out = worst >= MCE_PANIC_SEVERITY; } else { /* - * Local MCE skipped calling mce_reign() - * If we found a fatal error, we need to panic here. + * If there was a fatal machine check we should have + * already called mce_panic earlier in this function. + * Since we re-read the banks, we might have found + * something new. Check again to see if we found a + * fatal error. We call "mce_severity()" again to + * make sure we have the right "msg". */ - if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) - mce_panic("Machine check from unknown source", - NULL, NULL); + if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { + mce_severity(&m, cfg->tolerant, &msg, true); + mce_panic("Local fatal machine check!", &m, msg); + } } /* @@ -1788,6 +1808,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code) void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; +dotraplinkage void do_mce(struct pt_regs *regs, long error_code) +{ + machine_check_vector(regs, error_code); +} + /* * Called for each booted CPU to set up machine checks. * Must be called with preempt off: @@ -2071,6 +2096,7 @@ static ssize_t set_ignore_ce(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ @@ -2083,6 +2109,8 @@ static ssize_t set_ignore_ce(struct device *s, on_each_cpu(mce_enable_ce, (void *)1, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2095,6 +2123,7 @@ static ssize_t set_cmci_disabled(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ @@ -2106,6 +2135,8 @@ static ssize_t set_cmci_disabled(struct device *s, on_each_cpu(mce_enable_ce, NULL, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2113,8 +2144,16 @@ static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { - ssize_t ret = device_store_int(s, attr, buf, size); + unsigned long old_check_interval = check_interval; + ssize_t ret = device_store_ulong(s, attr, buf, size); + + if (check_interval == old_check_interval) + return ret; + + mutex_lock(&mce_sysfs_mutex); mce_restart(); + mutex_unlock(&mce_sysfs_mutex); + return ret; } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 486f640b02ef..dbcb01006749 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -82,6 +82,7 @@ static struct smca_bank_name smca_names[] = { [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, [SMCA_DE] = { "decode_unit", "Decode Unit" }, + [SMCA_RESERVED] = { "reserved", "Reserved" }, [SMCA_EX] = { "execution_unit", "Execution Unit" }, [SMCA_FP] = { "floating_point", "Floating Point Unit" }, [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, @@ -93,6 +94,11 @@ static struct smca_bank_name smca_names[] = { [SMCA_SMU] = { "smu", "System Management Unit" }, }; +static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = +{ + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } +}; + const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) @@ -110,9 +116,26 @@ const char *smca_get_long_name(enum smca_bank_types t) } EXPORT_SYMBOL_GPL(smca_get_long_name); +static enum smca_bank_types smca_get_bank_type(unsigned int bank) +{ + struct smca_bank *b; + + if (bank >= MAX_NR_BANKS) + return N_SMCA_BANK_TYPES; + + b = &smca_banks[bank]; + if (!b->hwid) + return N_SMCA_BANK_TYPES; + + return b->hwid->bank_type; +} + static struct smca_hwid smca_hwid_mcatypes[] = { /* { bank_type, hwid_mcatype, xec_bitmap } */ + /* Reserved type */ + { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, + /* ZN Core (HWID=0xB0) MCA types */ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, @@ -411,34 +434,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) wrmsr(MSR_CU_DEF_ERR, low, high); } -static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, - unsigned int bank, unsigned int block) +static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, + unsigned int block) { - u32 addr = 0, offset = 0; + u32 low, high; + u32 addr = 0; - if (mce_flags.smca) { - if (!block) { - addr = MSR_AMD64_SMCA_MCx_MISC(bank); - } else { - /* - * For SMCA enabled processors, BLKPTR field of the - * first MISC register (MCx_MISC0) indicates presence of - * additional MISC register set (MISC1-4). - */ - u32 low, high; + if (smca_get_bank_type(bank) == SMCA_RESERVED) + return addr; - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) - return addr; + if (!block) + return MSR_AMD64_SMCA_MCx_MISC(bank); - if (!(low & MCI_CONFIG_MCAX)) - return addr; + /* Check our cache first: */ + if (smca_bank_addrs[bank][block] != -1) + return smca_bank_addrs[bank][block]; - if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && - (low & MASK_BLKPTR_LO)) - addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); - } + /* + * For SMCA enabled processors, BLKPTR field of the first MISC register + * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). + */ + if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) + goto out; + + if (!(low & MCI_CONFIG_MCAX)) + goto out; + + if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && + (low & MASK_BLKPTR_LO)) + addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); + +out: + smca_bank_addrs[bank][block] = addr; + return addr; +} + +static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, + unsigned int bank, unsigned int block) +{ + u32 addr = 0, offset = 0; + + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) return addr; - } + + if (mce_flags.smca) + return smca_get_block_address(cpu, bank, block); /* Fall back to method we used for older processors: */ switch (block) { @@ -738,6 +778,17 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) } EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr); +bool amd_mce_is_memory_error(struct mce *m) +{ + /* ErrCodeExt[20:16] */ + u8 xec = (m->status >> 16) & 0x1f; + + if (mce_flags.smca) + return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0; + + return m->bank == 4 && xec == 0x8; +} + static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) { struct mce m; @@ -1036,7 +1087,7 @@ static struct kobj_type threshold_ktype = { static const char *get_name(unsigned int bank, struct threshold_block *b) { - unsigned int bank_type; + enum smca_bank_types bank_type; if (!mce_flags.smca) { if (b && bank == 4) @@ -1045,11 +1096,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return th_names[bank]; } - if (!smca_banks[bank].hwid) + bank_type = smca_get_bank_type(bank); + if (bank_type >= N_SMCA_BANK_TYPES) return NULL; - bank_type = smca_banks[bank].hwid->bank_type; - if (b && bank_type == SMCA_UMC) { if (b->block < ARRAY_SIZE(smca_umc_block_names)) return smca_umc_block_names[b->block]; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index c6daec4bdba5..48179928ff38 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) return -EINVAL; ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); - if (ret != UCODE_OK) + if (ret > UCODE_UPDATED) return -EINVAL; return 0; @@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 +#define F17H_MPB_MAX_SIZE 3200 switch (family) { case 0x14: @@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, case 0x16: max_size = F16H_MPB_MAX_SIZE; break; + case 0x17: + max_size = F17H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; @@ -494,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, return patch_size; } -static int apply_microcode_amd(int cpu) +static enum ucode_state apply_microcode_amd(int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_amd *mc_amd; @@ -508,7 +512,7 @@ static int apply_microcode_amd(int cpu) p = find_patch(cpu); if (!p) - return 0; + return UCODE_NFOUND; mc_amd = p->data; uci->mc = p->data; @@ -519,13 +523,13 @@ static int apply_microcode_amd(int cpu) if (rev >= mc_amd->hdr.patch_id) { c->microcode = rev; uci->cpu_sig.rev = rev; - return 0; + return UCODE_OK; } if (__apply_microcode_amd(mc_amd)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); - return -1; + return UCODE_ERROR; } pr_info("CPU%d: new patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); @@ -533,7 +537,7 @@ static int apply_microcode_amd(int cpu) uci->cpu_sig.rev = mc_amd->hdr.patch_id; c->microcode = mc_amd->hdr.patch_id; - return 0; + return UCODE_UPDATED; } static int install_equiv_cpu_table(const u8 *buf) @@ -679,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, static enum ucode_state load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) { + struct ucode_patch *p; enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); ret = __load_microcode_amd(family, data, size); - - if (ret != UCODE_OK) + if (ret != UCODE_OK) { cleanup(); + return ret; + } -#ifdef CONFIG_X86_32 - /* save BSP's matching patch for early load */ - if (save) { - struct ucode_patch *p = find_patch(0); - if (p) { - memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), - PATCH_MAX_SIZE)); - } + p = find_patch(0); + if (!p) { + return ret; + } else { + if (boot_cpu_data.microcode == p->patch_id) + return ret; + + ret = UCODE_NEW; } -#endif + + /* save BSP's matching patch for early load */ + if (!save) + return ret; + + memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); + memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); + return ret; } diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index c4fa4a85d4cb..387a8f44fba1 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -22,13 +22,16 @@ #define pr_fmt(fmt) "microcode: " fmt #include +#include #include #include #include #include #include +#include #include #include +#include #include #include @@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache); */ static DEFINE_MUTEX(microcode_mutex); +/* + * Serialize late loading so that CPUs get updated one-by-one. + */ +static DEFINE_RAW_SPINLOCK(update_lock); + struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; struct cpu_info_ctx { @@ -239,7 +247,7 @@ static int __init save_microcode_in_initrd(void) break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) - return save_microcode_in_initrd_amd(cpuid_eax(1)); + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; default: break; @@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu) return ret; } -struct apply_microcode_ctx { - int err; -}; - static void apply_microcode_local(void *arg) { - struct apply_microcode_ctx *ctx = arg; + enum ucode_state *err = arg; - ctx->err = microcode_ops->apply_microcode(smp_processor_id()); + *err = microcode_ops->apply_microcode(smp_processor_id()); } static int apply_microcode_on_target(int cpu) { - struct apply_microcode_ctx ctx = { .err = 0 }; + enum ucode_state err; int ret; - ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); - if (!ret) - ret = ctx.err; - + ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); + if (!ret) { + if (err == UCODE_ERROR) + ret = 1; + } return ret; } @@ -489,31 +494,130 @@ static void __exit microcode_dev_exit(void) /* fake device for request_firmware */ static struct platform_device *microcode_pdev; -static int reload_for_cpu(int cpu) +/* + * Late loading dance. Why the heavy-handed stomp_machine effort? + * + * - HT siblings must be idle and not execute other code while the other sibling + * is loading microcode in order to avoid any negative interactions caused by + * the loading. + * + * - In addition, microcode update on the cores must be serialized until this + * requirement can be relaxed in the future. Right now, this is conservative + * and good. + */ +#define SPINUNIT 100 /* 100 nsec */ + +static int check_online_cpus(void) { - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - enum ucode_state ustate; - int err = 0; + unsigned int cpu; - if (!uci->valid) - return err; + /* + * Make sure all CPUs are online. It's fine for SMT to be disabled if + * all the primary threads are still online. + */ + for_each_present_cpu(cpu) { + if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { + pr_err("Not all CPUs online, aborting microcode update.\n"); + return -EINVAL; + } + } - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); - if (ustate == UCODE_OK) - apply_microcode_on_target(cpu); - else - if (ustate == UCODE_ERROR) - err = -EINVAL; - return err; + return 0; +} + +static atomic_t late_cpus_in; +static atomic_t late_cpus_out; + +static int __wait_for_cpus(atomic_t *t, long long timeout) +{ + int all_cpus = num_online_cpus(); + + atomic_inc(t); + + while (atomic_read(t) < all_cpus) { + if (timeout < SPINUNIT) { + pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", + all_cpus - atomic_read(t)); + return 1; + } + + ndelay(SPINUNIT); + timeout -= SPINUNIT; + + touch_nmi_watchdog(); + } + return 0; +} + +/* + * Returns: + * < 0 - on error + * 0 - no update done + * 1 - microcode was updated + */ +static int __reload_late(void *info) +{ + int cpu = smp_processor_id(); + enum ucode_state err; + int ret = 0; + + /* + * Wait for all CPUs to arrive. A load will not be attempted unless all + * CPUs show up. + * */ + if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) + return -1; + + raw_spin_lock(&update_lock); + apply_microcode_local(&err); + raw_spin_unlock(&update_lock); + + /* siblings return UCODE_OK because their engine got updated already */ + if (err > UCODE_NFOUND) { + pr_warn("Error reloading microcode on CPU %d\n", cpu); + ret = -1; + } else if (err == UCODE_UPDATED || err == UCODE_OK) { + ret = 1; + } + + /* + * Increase the wait timeout to a safe value here since we're + * serializing the microcode update and that could take a while on a + * large number of CPUs. And that is fine as the *actual* timeout will + * be determined by the last CPU finished updating and thus cut short. + */ + if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) + panic("Timeout during microcode update!\n"); + + return ret; +} + +/* + * Reload microcode late on all CPUs. Wait for a sec until they + * all gather together. + */ +static int microcode_reload_late(void) +{ + int ret; + + atomic_set(&late_cpus_in, 0); + atomic_set(&late_cpus_out, 0); + + ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + if (ret > 0) + microcode_check(); + + return ret; } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { + enum ucode_state tmp_ret = UCODE_OK; + int bsp = boot_cpu_data.cpu_index; unsigned long val; - int cpu; - ssize_t ret = 0, tmp_ret; + ssize_t ret = 0; ret = kstrtoul(buf, 0, &val); if (ret) @@ -522,23 +626,24 @@ static ssize_t reload_store(struct device *dev, if (val != 1) return size; + tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); + if (tmp_ret != UCODE_NEW) + return size; + get_online_cpus(); - mutex_lock(µcode_mutex); - for_each_online_cpu(cpu) { - tmp_ret = reload_for_cpu(cpu); - if (tmp_ret != 0) - pr_warn("Error reloading microcode on CPU %d\n", cpu); - /* save retval of the first encountered reload error */ - if (!ret) - ret = tmp_ret; - } - if (!ret) - perf_check_microcode(); + ret = check_online_cpus(); + if (ret) + goto put; + + mutex_lock(µcode_mutex); + ret = microcode_reload_late(); mutex_unlock(µcode_mutex); + +put: put_online_cpus(); - if (!ret) + if (ret >= 0) ret = size; return ret; @@ -606,10 +711,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) if (system_state != SYSTEM_RUNNING) return UCODE_NFOUND; - ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, - refresh_fw); - - if (ustate == UCODE_OK) { + ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); + if (ustate == UCODE_NEW) { pr_debug("CPU%d updated upon init\n", cpu); apply_microcode_on_target(cpu); } diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 7dbcb7adf797..1c2cfa0644aa 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ static struct microcode_intel *intel_ucode_patch; +/* last level cache size per core */ +static int llc_size_per_core; + static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, unsigned int s2, unsigned int p2) { @@ -482,7 +485,6 @@ static void show_saved_mc(void) */ static void save_mc_for_early(u8 *mc, unsigned int size) { -#ifdef CONFIG_HOTPLUG_CPU /* Synchronization during CPU hotplug. */ static DEFINE_MUTEX(x86_cpu_microcode_mutex); @@ -492,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size) show_saved_mc(); mutex_unlock(&x86_cpu_microcode_mutex); -#endif } static bool load_builtin_intel_microcode(struct cpio_data *cp) @@ -565,15 +566,6 @@ static void print_ucode(struct ucode_cpu_info *uci) } #else -/* - * Flush global tlb. We only do this in x86_64 where paging has been enabled - * already and PGE should be enabled as well. - */ -static inline void flush_tlb_early(void) -{ - __native_flush_tlb_global_irq_disabled(); -} - static inline void print_ucode(struct ucode_cpu_info *uci) { struct microcode_intel *mc; @@ -595,6 +587,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) if (!mc) return 0; + /* + * Save us the MSR write below - which is a particular expensive + * operation - when the other hyperthread has updated the microcode + * already. + */ + rev = intel_get_microcode_revision(); + if (rev >= mc->hdr.rev) { + uci->cpu_sig.rev = rev; + return UCODE_OK; + } + + /* + * Writeback and invalidate caches before updating microcode to avoid + * internal issues depending on what the microcode is updating. + */ + native_wbinvd(); + /* write microcode via MSR 0x79 */ native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); @@ -602,10 +611,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) if (rev != mc->hdr.rev) return -1; -#ifdef CONFIG_X86_64 - /* Flush global tlb. This is precaution. */ - flush_tlb_early(); -#endif uci->cpu_sig.rev = rev; if (early) @@ -782,27 +787,44 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) return 0; } -static int apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_intel(int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_intel *mc; - struct ucode_cpu_info *uci; - struct cpuinfo_x86 *c; static int prev_rev; u32 rev; /* We should bind the task to the CPU */ if (WARN_ON(raw_smp_processor_id() != cpu)) - return -1; + return UCODE_ERROR; - uci = ucode_cpu_info + cpu; - mc = uci->mc; + /* Look for a newer patch in our cache: */ + mc = find_patch(uci); if (!mc) { - /* Look for a newer patch in our cache: */ - mc = find_patch(uci); + mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; } + /* + * Save us the MSR write below - which is a particular expensive + * operation - when the other hyperthread has updated the microcode + * already. + */ + rev = intel_get_microcode_revision(); + if (rev >= mc->hdr.rev) { + uci->cpu_sig.rev = rev; + c->microcode = rev; + return UCODE_OK; + } + + /* + * Writeback and invalidate caches before updating microcode to avoid + * internal issues depending on what the microcode is updating. + */ + native_wbinvd(); + /* write microcode via MSR 0x79 */ wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); @@ -811,7 +833,7 @@ static int apply_microcode_intel(int cpu) if (rev != mc->hdr.rev) { pr_err("CPU%d update to revision 0x%x failed\n", cpu, mc->hdr.rev); - return -1; + return UCODE_ERROR; } if (rev != prev_rev) { @@ -823,12 +845,10 @@ static int apply_microcode_intel(int cpu) prev_rev = rev; } - c = &cpu_data(cpu); - uci->cpu_sig.rev = rev; c->microcode = rev; - return 0; + return UCODE_UPDATED; } static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, @@ -840,6 +860,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, unsigned int leftover = size; unsigned int curr_mc_size = 0, new_mc_size = 0; unsigned int csig, cpf; + enum ucode_state ret = UCODE_OK; while (leftover) { struct microcode_header_intel mc_header; @@ -881,6 +902,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, new_mc = mc; new_mc_size = mc_size; mc = NULL; /* trigger new vmalloc */ + ret = UCODE_NEW; } ucode_ptr += mc_size; @@ -910,7 +932,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); - return UCODE_OK; + return ret; } static int get_ucode_fw(void *to, const void *from, size_t n) @@ -923,8 +945,19 @@ static bool is_blacklisted(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { - pr_err_once("late loading on model 79 is disabled.\n"); + /* + * Late loading on model 79 with microcode revision less than 0x0b000021 + * and LLC size per core bigger than 2.5MB may result in a system hang. + * This behavior is documented in item BDF90, #334165 (Intel Xeon + * Processor E7-8800/4800 v4 Product Family). + */ + if (c->x86 == 6 && + c->x86_model == INTEL_FAM6_BROADWELL_X && + c->x86_stepping == 0x01 && + llc_size_per_core > 2621440 && + c->microcode < 0x0b000021) { + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); + pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); return true; } @@ -943,7 +976,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, return UCODE_NFOUND; sprintf(name, "intel-ucode/%02x-%02x-%02x", - c->x86, c->x86_model, c->x86_mask); + c->x86, c->x86_model, c->x86_stepping); if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); @@ -979,6 +1012,15 @@ static struct microcode_ops microcode_intel_ops = { .apply_microcode = apply_microcode_intel, }; +static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +{ + u64 llc_size = c->x86_cache_size * 1024ULL; + + do_div(llc_size, c->x86_max_cores); + + return (int)llc_size; +} + struct microcode_ops * __init init_intel_microcode(void) { struct cpuinfo_x86 *c = &boot_cpu_data; @@ -989,5 +1031,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } + llc_size_per_core = calc_llc_size_per_core(c); + return µcode_intel_ops; } diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 236324e83a3a..85eb5fc180c8 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -254,9 +254,9 @@ static void __init ms_hyperv_init_platform(void) #endif } -const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { +const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { .name = "Microsoft Hyper-V", .detect = ms_hyperv_platform, - .init_platform = ms_hyperv_init_platform, + .type = X86_HYPER_MS_HYPERV, + .init.init_platform = ms_hyperv_init_platform, }; -EXPORT_SYMBOL(x86_hyper_ms_hyperv); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fdc55215d44d..e12ee86906c6 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, */ if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask <= 7) { + boot_cpu_data.x86_stepping <= 7) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 40d5a8a75212..7468de429087 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 0x3 && - (boot_cpu_data.x86_mask == 0x3 || - boot_cpu_data.x86_mask == 0x4)) + (boot_cpu_data.x86_stepping == 0x3 || + boot_cpu_data.x86_stepping == 0x4)) phys_addr = 36; size_or_mask = SIZE_OR_MASK_BITS(phys_addr); diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 6b7e17bf0b71..2c8522a39ed5 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -5,6 +5,8 @@ #include #include +#include "cpu.h" + /* * Get CPU information for use by the procfs. */ @@ -70,16 +72,18 @@ static int show_cpuinfo(struct seq_file *m, void *v) c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); - if (c->x86_mask || c->cpuid_level >= 0) - seq_printf(m, "stepping\t: %d\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + seq_printf(m, "stepping\t: %d\n", c->x86_stepping); else seq_puts(m, "stepping\t: unknown\n"); if (c->microcode) seq_printf(m, "microcode\t: 0x%x\n", c->microcode); if (cpu_has(c, X86_FEATURE_TSC)) { - unsigned int freq = cpufreq_quick_get(cpu); + unsigned int freq = aperfmperf_get_khz(cpu); + if (!freq) + freq = cpufreq_quick_get(cpu); if (!freq) freq = cpu_khz; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", @@ -87,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) } /* Cache size */ - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + if (c->x86_cache_size) + seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); show_cpuinfo_core(m, c, cpu); show_cpuinfo_misc(m, c); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 05459ad3db46..df11f5d604be 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -21,9 +21,6 @@ struct cpuid_bit { static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, - { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 }, - { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 }, - { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 }, { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 }, { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 }, { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 }, diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index b099024d339c..19c6e800e816 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -27,16 +27,13 @@ * exists, use it for populating initial_apicid and cpu topology * detection. */ -void detect_extended_topology(struct cpuinfo_x86 *c) +int detect_extended_topology_early(struct cpuinfo_x86 *c) { #ifdef CONFIG_SMP - unsigned int eax, ebx, ecx, edx, sub_index; - unsigned int ht_mask_width, core_plus_mask_width; - unsigned int core_select_mask, core_level_siblings; - static bool printed; + unsigned int eax, ebx, ecx, edx; if (c->cpuid_level < 0xb) - return; + return -1; cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); @@ -44,7 +41,7 @@ void detect_extended_topology(struct cpuinfo_x86 *c) * check if the cpuid leaf 0xb is actually implemented. */ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE)) - return; + return -1; set_cpu_cap(c, X86_FEATURE_XTOPOLOGY); @@ -52,10 +49,30 @@ void detect_extended_topology(struct cpuinfo_x86 *c) * initial apic id, which also represents 32-bit extended x2apic id. */ c->initial_apicid = edx; + smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); +#endif + return 0; +} + +/* + * Check for extended topology enumeration cpuid leaf 0xb and if it + * exists, use it for populating initial_apicid and cpu topology + * detection. + */ +void detect_extended_topology(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + unsigned int eax, ebx, ecx, edx, sub_index; + unsigned int ht_mask_width, core_plus_mask_width; + unsigned int core_select_mask, core_level_siblings; + + if (detect_extended_topology_early(c) < 0) + return; /* * Populate HT related information from sub-leaf level 0. */ + cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx); core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); @@ -86,15 +103,5 @@ void detect_extended_topology(struct cpuinfo_x86 *c) c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); c->x86_max_cores = (core_level_siblings / smp_num_siblings); - - if (!printed) { - pr_info("CPU: Physical Processor ID: %d\n", - c->phys_proc_id); - if (c->x86_max_cores > 1) - pr_info("CPU: Processor Core ID: %d\n", - c->cpu_core_id); - printed = 1; - } - return; #endif } diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 40ed26852ebd..8e005329648b 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -205,10 +205,10 @@ static bool __init vmware_legacy_x2apic_available(void) (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; } -const __refconst struct hypervisor_x86 x86_hyper_vmware = { +const __initconst struct hypervisor_x86 x86_hyper_vmware = { .name = "VMware", .detect = vmware_platform, - .init_platform = vmware_platform_setup, - .x2apic_available = vmware_legacy_x2apic_available, + .type = X86_HYPER_VMWARE, + .init.init_platform = vmware_platform_setup, + .init.x2apic_available = vmware_legacy_x2apic_available, }; -EXPORT_SYMBOL(x86_hyper_vmware); diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 76e07698e6d1..7fa0855e4b9a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -200,19 +201,22 @@ static struct of_ioapic_type of_ioapic_type[] = static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { - struct of_phandle_args *irq_data = (void *)arg; + struct irq_fwspec *fwspec = (struct irq_fwspec *)arg; struct of_ioapic_type *it; struct irq_alloc_info tmp; + int type_index; - if (WARN_ON(irq_data->args_count < 2)) + if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; - if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type)) + + type_index = fwspec->param[1]; + if (type_index >= ARRAY_SIZE(of_ioapic_type)) return -EINVAL; - it = &of_ioapic_type[irq_data->args[1]]; + it = &of_ioapic_type[type_index]; ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); - tmp.ioapic_pin = irq_data->args[0]; + tmp.ioapic_pin = fwspec->param[0]; return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); } @@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void) map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128); - initial_boot_params = dt = early_memremap(initial_dtb, map_len); - size = of_get_flat_dt_size(); + dt = early_memremap(initial_dtb, map_len); + size = fdt_totalsize(dt); if (map_len < size) { early_memunmap(dt, map_len); - initial_boot_params = dt = early_memremap(initial_dtb, size); + dt = early_memremap(initial_dtb, size); map_len = size; } + early_init_dt_verify(dt); unflatten_and_copy_device_tree(); early_memunmap(dt, map_len); } diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c index 0e662c55ae90..0b8cedb20d6d 100644 --- a/arch/x86/kernel/doublefault.c +++ b/arch/x86/kernel/doublefault.c @@ -50,25 +50,23 @@ static void doublefault_fn(void) cpu_relax(); } -struct tss_struct doublefault_tss __cacheline_aligned = { - .x86_tss = { - .sp0 = STACK_START, - .ss0 = __KERNEL_DS, - .ldt = 0, - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, - - .ip = (unsigned long) doublefault_fn, - /* 0x2 bit is always set */ - .flags = X86_EFLAGS_SF | 0x2, - .sp = STACK_START, - .es = __USER_DS, - .cs = __KERNEL_CS, - .ss = __KERNEL_DS, - .ds = __USER_DS, - .fs = __KERNEL_PERCPU, - - .__cr3 = __pa_nodebug(swapper_pg_dir), - } +struct x86_hw_tss doublefault_tss __cacheline_aligned = { + .sp0 = STACK_START, + .ss0 = __KERNEL_DS, + .ldt = 0, + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, + + .ip = (unsigned long) doublefault_fn, + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, + .es = __USER_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, + .ds = __USER_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), }; /* dummy for do_double_fault() call */ diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f13b4c00a5de..a2d8a3908670 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -43,6 +44,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, return true; } +bool in_entry_stack(unsigned long *stack, struct stack_info *info) +{ + struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); + + void *begin = ss; + void *end = ss + 1; + + if ((void *)stack < begin || (void *)stack >= end) + return false; + + info->type = STACK_TYPE_ENTRY; + info->begin = begin; + info->end = end; + info->next_sp = NULL; + + return true; +} + static void printk_stack_address(unsigned long address, int reliable, char *log_lvl) { @@ -50,6 +69,39 @@ static void printk_stack_address(unsigned long address, int reliable, printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); } +void show_iret_regs(struct pt_regs *regs) +{ + printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip); + printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss, + regs->sp, regs->flags); +} + +static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, + bool partial) +{ + /* + * These on_stack() checks aren't strictly necessary: the unwind code + * has already validated the 'regs' pointer. The checks are done for + * ordering reasons: if the registers are on the next stack, we don't + * want to print them out yet. Otherwise they'll be shown as part of + * the wrong stack. Later, when show_trace_log_lvl() switches to the + * next stack, this function will be called again with the same regs so + * they can be printed in the right context. + */ + if (!partial && on_stack(info, regs, sizeof(*regs))) { + __show_regs(regs, 0); + + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, + IRET_FRAME_SIZE)) { + /* + * When an interrupt or exception occurs in entry code, the + * full pt_regs might not have been saved yet. In that case + * just print the iret frame. + */ + show_iret_regs(regs); + } +} + void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { @@ -57,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; + bool partial = false; printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); stack = stack ? : get_stack_pointer(task, regs); + regs = unwind_get_entry_regs(&state, &partial); /* * Iterate through the stacks, starting with the current stack pointer. @@ -71,31 +125,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) + * - entry stack * - * x86-32 can have up to three stacks: + * x86-32 can have up to four stacks: * - task stack * - softirq stack * - hardirq stack + * - entry stack */ - for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { + for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { const char *stack_name; - /* - * If we overflowed the task stack into a guard page, jump back - * to the bottom of the usable stack. - */ - if (task_stack_page(task) - (void *)stack < PAGE_SIZE) - stack = task_stack_page(task); - - if (get_stack_info(stack, task, &stack_info, &visit_mask)) - break; + if (get_stack_info(stack, task, &stack_info, &visit_mask)) { + /* + * We weren't on a valid stack. It's possible that + * we overflowed a valid stack into a guard page. + * See if the next page up is valid so that we can + * generate some kind of backtrace if this happens. + */ + stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); + if (get_stack_info(stack, task, &stack_info, &visit_mask)) + break; + } stack_name = stack_type_name(stack_info.type); if (stack_name) printk("%s <%s>\n", log_lvl, stack_name); - if (regs && on_stack(&stack_info, regs, sizeof(*regs))) - __show_regs(regs, 0); + if (regs) + show_regs_if_on_stack(&stack_info, regs, partial); /* * Scan the stack, printing any text addresses we find. At the @@ -119,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, /* * Don't print regs->ip again if it was already printed - * by __show_regs() below. + * by show_regs_if_on_stack(). */ if (regs && stack == ®s->ip) goto next; @@ -154,9 +212,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unwind_next_frame(&state); /* if the frame has entry regs, print them */ - regs = unwind_get_entry_regs(&state); - if (regs && on_stack(&stack_info, regs, sizeof(*regs))) - __show_regs(regs, 0); + regs = unwind_get_entry_regs(&state, &partial); + if (regs) + show_regs_if_on_stack(&stack_info, regs, partial); } if (stack_name) @@ -252,11 +310,13 @@ int __die(const char *str, struct pt_regs *regs, long err) unsigned long sp; #endif printk(KERN_DEFAULT - "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter, + "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", - IS_ENABLED(CONFIG_KASAN) ? " KASAN" : ""); + IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", + IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? + (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index daefae83a3aa..04170f63e3a1 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_SOFTIRQ) return "SOFTIRQ"; + if (type == STACK_TYPE_ENTRY) + return "ENTRY_TRAMPOLINE"; + return NULL; } @@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (task != current) goto unknown; + if (in_entry_stack(stack, info)) + goto recursion_check; + if (in_hardirq_stack(stack, info)) goto recursion_check; diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 88ce2ffdb110..563e28d14f2c 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -37,6 +37,15 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_IRQ) return "IRQ"; + if (type == STACK_TYPE_ENTRY) { + /* + * On 64-bit, we have a generic entry stack that we + * use for all the kernel entry points, including + * SYSENTER. + */ + return "ENTRY_TRAMPOLINE"; + } + if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; @@ -115,6 +124,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (in_irq_stack(stack, info)) goto recursion_check; + if (in_entry_stack(stack, info)) + goto recursion_check; + goto unknown; recursion_check: diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 1e82f787c160..c87560e1e3ef 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_SKL_IDS(&gen9_early_ops), INTEL_BXT_IDS(&gen9_early_ops), INTEL_KBL_IDS(&gen9_early_ops), + INTEL_CFL_IDS(&gen9_early_ops), INTEL_GLK_IDS(&gen9_early_ops), INTEL_CNL_IDS(&gen9_early_ops), }; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7..f65d32c7040c 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Simple VGA output */ #define VGABASE (__ISA_IO_base + 0xb8000) @@ -387,6 +388,12 @@ static int __init setup_early_printk(char *buf) if (!strncmp(buf, "xdbc", 4)) early_xdbc_parse_parameter(buf + 4); #endif +#ifdef CONFIG_INTEL_TH_EARLY_PRINTK + if (!strncmp(buf, "intelth", 7)) { + early_intel_th_init(buf + 7); + early_console_register(&intel_th_early_console, keep); + } +#endif buf++; } diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 9c4e7ba6870c..cbded50ee601 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -57,7 +57,7 @@ # error "Need more virtual address space for the ESPFIX hack" #endif -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) /* This contains the *bottom* address of the espfix stack */ DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index f92a6593de1e..2ea85b32421a 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 7affb7e3d9a5..6abd83572b01 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -249,6 +249,10 @@ static void __init fpu__init_system_ctx_switch(void) */ static void __init fpu__init_parse_early_param(void) { + char arg[32]; + char *argptr = arg; + int bit; + if (cmdline_find_option_bool(boot_command_line, "no387")) setup_clear_cpu_cap(X86_FEATURE_FPU); @@ -266,6 +270,13 @@ static void __init fpu__init_parse_early_param(void) if (cmdline_find_option_bool(boot_command_line, "noxsaves")) setup_clear_cpu_cap(X86_FEATURE_XSAVES); + + if (cmdline_find_option(boot_command_line, "clearcpuid", arg, + sizeof(arg)) && + get_option(&argptr, &bit) && + bit >= 0 && + bit < NCAPINTS * 32) + setup_clear_cpu_cap(bit); } /* diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index f1d5476c9022..87a57b7642d3 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -15,6 +15,7 @@ #include #include +#include /* * Although we spell it out in here, the Processor Trace @@ -36,6 +37,19 @@ static const char *xfeature_names[] = "unknown xstate feature" , }; +static short xsave_cpuid_features[] __initdata = { + X86_FEATURE_FPU, + X86_FEATURE_XMM, + X86_FEATURE_AVX, + X86_FEATURE_MPX, + X86_FEATURE_MPX, + X86_FEATURE_AVX512F, + X86_FEATURE_AVX512F, + X86_FEATURE_AVX512F, + X86_FEATURE_INTEL_PT, + X86_FEATURE_PKU, +}; + /* * Mask of xstate features supported by the CPU and the kernel: */ @@ -59,26 +73,6 @@ unsigned int fpu_user_xstate_size; void fpu__xstate_clear_all_cpu_caps(void) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); - setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); - setup_clear_cpu_cap(X86_FEATURE_XSAVEC); - setup_clear_cpu_cap(X86_FEATURE_XSAVES); - setup_clear_cpu_cap(X86_FEATURE_AVX); - setup_clear_cpu_cap(X86_FEATURE_AVX2); - setup_clear_cpu_cap(X86_FEATURE_AVX512F); - setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA); - setup_clear_cpu_cap(X86_FEATURE_AVX512PF); - setup_clear_cpu_cap(X86_FEATURE_AVX512ER); - setup_clear_cpu_cap(X86_FEATURE_AVX512CD); - setup_clear_cpu_cap(X86_FEATURE_AVX512DQ); - setup_clear_cpu_cap(X86_FEATURE_AVX512BW); - setup_clear_cpu_cap(X86_FEATURE_AVX512VL); - setup_clear_cpu_cap(X86_FEATURE_MPX); - setup_clear_cpu_cap(X86_FEATURE_XGETBV1); - setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI); - setup_clear_cpu_cap(X86_FEATURE_PKU); - setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW); - setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS); - setup_clear_cpu_cap(X86_FEATURE_AVX512_VPOPCNTDQ); } /* @@ -726,6 +720,7 @@ void __init fpu__init_system_xstate(void) unsigned int eax, ebx, ecx, edx; static int on_boot_cpu __initdata = 1; int err; + int i; WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; @@ -759,6 +754,14 @@ void __init fpu__init_system_xstate(void) goto out_disable; } + /* + * Clear XSAVE features that are disabled in the normal CPUID. + */ + for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) { + if (!boot_cpu_has(xsave_cpuid_features[i])) + xfeatures_mask &= ~BIT(i); + } + xfeatures_mask &= fpu__get_supported_xfeatures_mask(); /* Enable xstate instructions to be able to continue with initialization: */ diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 01ebcb6f263e..7acb87cb2da8 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -27,6 +27,7 @@ #include #include +#include #include #include diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index b6c6468e10bc..4c8440de3355 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -8,6 +8,7 @@ #include #include #include +#include #ifdef CC_USING_FENTRY # define function_hook __fentry__ @@ -197,7 +198,8 @@ ftrace_stub: movl 0x4(%ebp), %edx subl $MCOUNT_INSN_SIZE, %eax - call *ftrace_trace_function + movl ftrace_trace_function, %ecx + CALL_NOSPEC %ecx popl %edx popl %ecx @@ -241,5 +243,5 @@ return_to_handler: movl %eax, %ecx popl %edx popl %eax - jmp *%ecx + JMP_NOSPEC %ecx #endif diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index c832291d948a..7cb8ba08beb9 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -7,7 +7,7 @@ #include #include #include - +#include .code64 .section .entry.text, "ax" @@ -286,8 +286,8 @@ trace: * ip and parent ip are used and the list function is called when * function tracing is enabled. */ - call *ftrace_trace_function - + movq ftrace_trace_function, %r8 + CALL_NOSPEC %r8 restore_mcount_regs jmp fgraph_trace @@ -329,5 +329,5 @@ GLOBAL(return_to_handler) movq 8(%rsp), %rdx movq (%rsp), %rax addq $24, %rsp - jmp *%rdi + JMP_NOSPEC %rdi #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 6a5d757b9cfd..7ba5d819ebe3 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr, p = fixup_pointer(&phys_base, physaddr); *p += load_delta - sme_get_me_mask(); - /* Encrypt the kernel (if SME is active) */ - sme_encrypt_kernel(); + /* Encrypt the kernel and related (if SME is active) */ + sme_encrypt_kernel(bp); /* * Return the SME encryption mask (if SME is active) to be used as a diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index f1d528bb66a6..b59e4fb40fd9 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -37,7 +37,7 @@ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model -#define X86_MASK new_cpu_data+CPUINFO_x86_mask +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability @@ -212,9 +212,6 @@ ENTRY(startup_32_smp) #endif .Ldefault_entry: -#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ - X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ - X86_CR0_PG) movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 @@ -335,7 +332,7 @@ ENTRY(startup_32_smp) shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision - movb %cl,X86_MASK + movb %cl,X86_STEPPING movl %edx,X86_CAPABILITY .Lis486: @@ -402,7 +399,7 @@ ENTRY(early_idt_handler_array) # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 + .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 6dde3f3fc1f8..0f545b3cf926 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -23,6 +23,7 @@ #include #include "../entry/calling.h" #include +#include #ifdef CONFIG_PARAVIRT #include @@ -38,11 +39,12 @@ * */ -#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) PGD_START_KERNEL = pgd_index(__START_KERNEL_map) +#endif L3_START_KERNEL = pud_index(__START_KERNEL_map) .text @@ -50,6 +52,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map) .code64 .globl startup_64 startup_64: + UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded an identity mapped page table @@ -89,6 +92,7 @@ startup_64: addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f ENTRY(secondary_startup_64) + UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * and someone has loaded a mapped page table. @@ -131,8 +135,10 @@ ENTRY(secondary_startup_64) /* Ensure I am executing from virtual addresses */ movq $1f, %rax + ANNOTATE_RETPOLINE_SAFE jmp *%rax 1: + UNWIND_HINT_EMPTY /* Check if nx is implemented */ movl $0x80000001, %eax @@ -150,9 +156,6 @@ ENTRY(secondary_startup_64) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ -#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ - X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ - X86_CR0_PG) movl $CR0_STATE, %eax /* Make changes effective */ movq %rax, %cr0 @@ -235,7 +238,7 @@ ENTRY(secondary_startup_64) pushq %rax # target address in negative space lretq .Lafter_lret: -ENDPROC(secondary_startup_64) +END(secondary_startup_64) #include "verify_cpu.S" @@ -247,6 +250,7 @@ ENDPROC(secondary_startup_64) */ ENTRY(start_cpu0) movq initial_stack(%rip), %rsp + UNWIND_HINT_EMPTY jmp .Ljump_to_C_code ENDPROC(start_cpu0) #endif @@ -266,26 +270,24 @@ ENDPROC(start_cpu0) .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS __FINITDATA -bad_address: - jmp bad_address - __INIT ENTRY(early_idt_handler_array) - # 104(%rsp) %rflags - # 96(%rsp) %cs - # 88(%rsp) %rip - # 80(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 - pushq $0 # Dummy error code, to make stack frame uniform + .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 + UNWIND_HINT_IRET_REGS + pushq $0 # Dummy error code, to make stack frame uniform + .else + UNWIND_HINT_IRET_REGS offset=8 .endif pushq $i # 72(%rsp) Vector number jmp early_idt_handler_common + UNWIND_HINT_IRET_REGS i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handler_array) + UNWIND_HINT_IRET_REGS offset=16 +END(early_idt_handler_array) early_idt_handler_common: /* @@ -313,6 +315,7 @@ early_idt_handler_common: pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ + UNWIND_HINT_REGS cmpq $14,%rsi /* Page fault? */ jnz 10f @@ -327,8 +330,8 @@ early_idt_handler_common: 20: decl early_recursion_flag(%rip) - jmp restore_regs_and_iret -ENDPROC(early_idt_handler_common) + jmp restore_regs_and_return_to_kernel +END(early_idt_handler_common) __INITDATA @@ -340,6 +343,27 @@ GLOBAL(early_recursion_flag) .balign PAGE_SIZE; \ GLOBAL(name) +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Each PGD needs to be 8k long and 8k aligned. We do not + * ever go out to userspace with these, so we do not + * strictly *need* the second page, but this allows us to + * have a single set_pgd() implementation that does not + * need to worry about whether it has 4k or 8k to work + * with. + * + * This ensures PGDs are 8k long: + */ +#define PTI_USER_PGD_FILL 512 +/* This ensures they are 8k-aligned: */ +#define NEXT_PGD_PAGE(name) \ + .balign 2 * PAGE_SIZE; \ +GLOBAL(name) +#else +#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) +#define PTI_USER_PGD_FILL 0 +#endif + /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ @@ -349,30 +373,29 @@ GLOBAL(name) .endr __INITDATA -NEXT_PAGE(early_top_pgt) +NEXT_PGD_PAGE(early_top_pgt) .fill 511,8,0 #ifdef CONFIG_X86_5LEVEL .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #else .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 .data -#ifndef CONFIG_XEN -NEXT_PAGE(init_top_pgt) - .fill 512,8,0 -#else -NEXT_PAGE(init_top_pgt) +#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) +NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC @@ -382,6 +405,10 @@ NEXT_PAGE(level2_ident_pgt) * Don't set NX because code runs from these pages. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +#else +NEXT_PGD_PAGE(init_top_pgt) + .fill 512,8,0 + .fill PTI_USER_PGD_FILL,8,0 #endif #ifdef CONFIG_X86_5LEVEL @@ -435,7 +462,7 @@ ENTRY(phys_base) EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" - + __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 8ce4212e2b8d..afa1a204bc6d 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 8f5cb2c7060c..02abc134367f 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 014cb2fc47ff..38c3d5790970 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -8,6 +8,7 @@ #include #include #include +#include struct idt_data { unsigned int vector; @@ -56,7 +57,7 @@ struct idt_data { * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ -static const __initdata struct idt_data early_idts[] = { +static const __initconst struct idt_data early_idts[] = { INTG(X86_TRAP_DB, debug), SYSG(X86_TRAP_BP, int3), #ifdef CONFIG_X86_32 @@ -70,7 +71,7 @@ static const __initdata struct idt_data early_idts[] = { * the traps which use them are reinitialized with IST after cpu_init() has * set up TSS. */ -static const __initdata struct idt_data def_idts[] = { +static const __initconst struct idt_data def_idts[] = { INTG(X86_TRAP_DE, divide_error), INTG(X86_TRAP_NMI, nmi), INTG(X86_TRAP_BR, bounds), @@ -108,7 +109,7 @@ static const __initdata struct idt_data def_idts[] = { /* * The APIC and SMP idt entries */ -static const __initdata struct idt_data apic_idts[] = { +static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_SMP INTG(RESCHEDULE_VECTOR, reschedule_interrupt), INTG(CALL_FUNCTION_VECTOR, call_function_interrupt), @@ -140,6 +141,9 @@ static const __initdata struct idt_data apic_idts[] = { # ifdef CONFIG_IRQ_WORK INTG(IRQ_WORK_VECTOR, irq_work_interrupt), # endif +#ifdef CONFIG_X86_UV + INTG(UV_BAU_MESSAGE, uv_bau_message_intr1), +#endif INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt), INTG(ERROR_APIC_VECTOR, error_interrupt), #endif @@ -150,7 +154,7 @@ static const __initdata struct idt_data apic_idts[] = { * Early traps running on the DEFAULT_STACK because the other interrupt * stacks work only after cpu_init(). */ -static const __initdata struct idt_data early_pf_idts[] = { +static const __initconst struct idt_data early_pf_idts[] = { INTG(X86_TRAP_PF, page_fault), }; @@ -158,9 +162,8 @@ static const __initdata struct idt_data early_pf_idts[] = { * Override for the debug_idt. Same as the default, but with interrupt * stack set to DEFAULT_STACK (0). Required for NMI trap handling. */ -static const __initdata struct idt_data dbg_idts[] = { +static const __initconst struct idt_data dbg_idts[] = { INTG(X86_TRAP_DB, debug), - INTG(X86_TRAP_BP, int3), }; #endif @@ -180,10 +183,9 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss; * The exceptions which use Interrupt stacks. They are setup after * cpu_init() when the TSS has been initialized. */ -static const __initdata struct idt_data ist_idts[] = { +static const __initconst struct idt_data ist_idts[] = { ISTG(X86_TRAP_DB, debug, DEBUG_STACK), ISTG(X86_TRAP_NMI, nmi, NMI_STACK), - SISTG(X86_TRAP_BP, int3, DEBUG_STACK), ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK), #ifdef CONFIG_X86_MCE ISTG(X86_TRAP_MC, &machine_check, MCE_STACK), diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 3feb648781c4..2f723301eb58 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(cpu_tss, get_cpu()); + tss = &per_cpu(cpu_tss_rw, get_cpu()); if (turn_on) bitmap_clear(t->io_bitmap_ptr, from, num); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 52089c043160..3c2326b59820 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -219,18 +220,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) /* high bit used in ret_from_ code */ unsigned vector = ~regs->orig_ax; - /* - * NB: Unlike exception entries, IRQ entries do not reliably - * handle context tracking in the low-level entry code. This is - * because syscall entries execute briefly with IRQs on before - * updating context tracking state, so we can take an IRQ from - * kernel mode with CONTEXT_USER. The low-level entry code only - * updates the context if we came from user mode, so we won't - * switch to CONTEXT_KERNEL. We'll fix that once the syscall - * code is cleaned up enough that we can cleanly defer enabling - * IRQs. - */ - entering_irq(); /* entering_irq() tells RCU that we're not quiescent. Check it. */ diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a83b3346a0e1..95600a99ae93 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include +#include #ifdef CONFIG_DEBUG_STACKOVERFLOW @@ -55,11 +57,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack); static void call_on_stack(void *func, void *stack) { asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + CALL_NOSPEC "movl %%ebx,%%esp \n" : "=b" (stack) : "0" (stack), - "D"(func) + [thunk_target] "D"(func) : "memory", "cc", "edx", "ecx", "eax"); } @@ -95,11 +97,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) call_on_stack(print_stack_overflow, isp); asm volatile("xchgl %%ebx,%%esp \n" - "call *%%edi \n" + CALL_NOSPEC "movl %%ebx,%%esp \n" : "=a" (arg1), "=b" (isp) : "0" (desc), "1" (isp), - "D" (desc->handle_irq) + [thunk_target] "D" (desc->handle_irq) : "memory", "cc", "ecx"); return 1; } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 020efbf5786b..0469cd078db1 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -57,10 +58,10 @@ static inline void stack_overflow_check(struct pt_regs *regs) if (regs->sp >= estack_top && regs->sp <= estack_bottom) return; - WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n", + WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n", current->comm, curbase, regs->sp, irq_stack_top, irq_stack_bottom, - estack_top, estack_bottom); + estack_top, estack_bottom, (void *)regs->ip); if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S new file mode 100644 index 000000000000..ddeeaac8adda --- /dev/null +++ b/arch/x86/kernel/irqflags.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include + +/* + * unsigned long native_save_fl(void) + */ +ENTRY(native_save_fl) + pushf + pop %_ASM_AX + ret +ENDPROC(native_save_fl) +EXPORT_SYMBOL(native_save_fl) + +/* + * void native_restore_fl(unsigned long flags) + * %eax/%rdi: flags + */ +ENTRY(native_restore_fl) + push %_ASM_ARG1 + popf + ret +ENDPROC(native_restore_fl) +EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 1e4094eba15e..40f83d0d7b8a 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index fb095ba0c02f..f24cd9f1799a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, * little bit simple */ efi_map_sz = efi_get_runtime_map_size(); - efi_map_sz = ALIGN(efi_map_sz, 16); params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); - kbuf.bufsz = params_cmdline_sz + efi_map_sz + + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + sizeof(struct setup_data) + sizeof(struct efi_setup_data); @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; - efi_setup_data_offset = efi_map_offset + efi_map_sz; + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0742491cbb73..65452d555f05 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "common.h" @@ -369,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; + /* We should not singlestep on the exception masking instructions */ + if (insn_masking_exception(insn)) + return 0; + #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(insn)) { @@ -390,8 +395,6 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) - (u8 *) dest; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", - src, dest, insn->displacement.value); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); @@ -617,8 +620,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, * Raise a BUG or we'll continue in an endless reentering loop * and eventually a stack overflow. */ - printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", - p->addr); + pr_err("Unrecoverable kprobe detected.\n"); dump_kprobe(p); BUG(); default: @@ -1149,10 +1151,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler); bool arch_within_kprobe_blacklist(unsigned long addr) { + bool is_in_entry_trampoline_section = false; + +#ifdef CONFIG_X86_64 + is_in_entry_trampoline_section = + (addr >= (unsigned long)__entry_trampoline_start && + addr < (unsigned long)__entry_trampoline_end); +#endif return (addr >= (unsigned long)__kprobes_text_start && addr < (unsigned long)__kprobes_text_end) || (addr >= (unsigned long)__entry_text_start && - addr < (unsigned long)__entry_text_end); + addr < (unsigned long)__entry_text_end) || + is_in_entry_trampoline_section; } int __init arch_init_kprobes(void) diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 041f7b6dfa0f..bcfee4f69b0e 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -26,7 +26,7 @@ #include "common.h" static nokprobe_inline -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, +void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, unsigned long orig_ip) { /* @@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, __this_cpu_write(current_kprobe, NULL); if (orig_ip) regs->ip = orig_ip; - return 1; } int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - if (kprobe_ftrace(p)) - return __skip_singlestep(p, regs, kcb, 0); - else - return 0; + if (kprobe_ftrace(p)) { + __skip_singlestep(p, regs, kcb, 0); + preempt_enable_no_resched(); + return 1; + } + return 0; } NOKPROBE_SYMBOL(skip_singlestep); -/* Ftrace callback handler for kprobes */ +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) { @@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); + /* To emulate trap based kprobes, preempt_disable here */ + preempt_disable(); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; - if (!p->pre_handler || !p->pre_handler(p, regs)) + if (!p->pre_handler || !p->pre_handler(p, regs)) { __skip_singlestep(p, regs, kcb, orig_ip); + preempt_enable_no_resched(); + } /* * If pre_handler returns !0, it sets regs->ip and - * resets current kprobe. + * resets current kprobe, and keep preempt count +1. */ } end: diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 4f98aad38237..3668f28cf5fc 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "common.h" @@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) } /* Check whether insn is indirect jump */ -static int insn_is_indirect_jump(struct insn *insn) +static int __insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); } +static int insn_is_indirect_jump(struct insn *insn) +{ + int ret = __insn_is_indirect_jump(insn); + +#ifdef CONFIG_RETPOLINE + /* + * Jump to x86_indirect_thunk_* is treated as an indirect jump. + * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with + * older gcc may use indirect jump. So we add this check instead of + * replace indirect-jump check. + */ + if (!ret) + ret = insn_jump_into_range(insn, + (unsigned long)__indirect_thunk_start, + (unsigned long)__indirect_thunk_end - + (unsigned long)__indirect_thunk_start); +#endif + return ret; +} + /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 8bb9594d0761..652bdd867782 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void) #endif pa |= KVM_ASYNC_PF_ENABLED; - /* Async page fault support for L1 hypervisor is optional */ - if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN, - (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0) - wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) + pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; + + wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(apf_reason.enabled, 1); printk(KERN_INFO"KVM setup async PF for cpu %d\n", smp_processor_id()); @@ -544,12 +544,12 @@ static uint32_t __init kvm_detect(void) return kvm_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_kvm __refconst = { +const __initconst struct hypervisor_x86 x86_hyper_kvm = { .name = "KVM", .detect = kvm_detect, - .x2apic_available = kvm_para_available, + .type = X86_HYPER_KVM, + .init.x2apic_available = kvm_para_available, }; -EXPORT_SYMBOL_GPL(x86_hyper_kvm); static __init int activate_jump_labels(void) { diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 4d17bacf4030..26d713ecad34 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -5,6 +5,11 @@ * Copyright (C) 2002 Andi Kleen * * This handles calls from both 32bit and 64bit mode. + * + * Lock order: + * contex.ldt_usr_sem + * mmap_sem + * context.lock */ #include @@ -13,11 +18,13 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -41,17 +48,15 @@ static void refresh_ldt_segments(void) #endif } -/* context.lock is held for us, so we don't need any locking. */ +/* context.lock is held by the task which issued the smp function call */ static void flush_ldt(void *__mm) { struct mm_struct *mm = __mm; - mm_context_t *pc; if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) return; - pc = &mm->context; - set_ldt(pc->ldt->entries, pc->ldt->nr_entries); + load_mm_ldt(mm); refresh_ldt_segments(); } @@ -88,25 +93,143 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) return NULL; } + /* The new LDT isn't aliased for PTI yet. */ + new_ldt->slot = -1; + new_ldt->nr_entries = num_entries; return new_ldt; } +/* + * If PTI is enabled, this maps the LDT into the kernelmode and + * usermode tables for the given mm. + * + * There is no corresponding unmap function. Even if the LDT is freed, we + * leave the PTEs around until the slot is reused or the mm is destroyed. + * This is harmless: the LDT is always in ordinary memory, and no one will + * access the freed slot. + * + * If we wanted to unmap freed LDTs, we'd also need to do a flush to make + * it useful, and the flush would slow down modify_ldt(). + */ +static int +map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + bool is_vmalloc, had_top_level_entry; + unsigned long va; + spinlock_t *ptl; + pgd_t *pgd; + int i; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return 0; + + /* + * Any given ldt_struct should have map_ldt_struct() called at most + * once. + */ + WARN_ON(ldt->slot != -1); + + /* + * Did we already have the top level entry allocated? We can't + * use pgd_none() for this because it doens't do anything on + * 4-level page table kernels. + */ + pgd = pgd_offset(mm, LDT_BASE_ADDR); + had_top_level_entry = (pgd->pgd != 0); + + is_vmalloc = is_vmalloc_addr(ldt->entries); + + for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + unsigned long offset = i << PAGE_SHIFT; + const void *src = (char *)ldt->entries + offset; + unsigned long pfn; + pte_t pte, *ptep; + + va = (unsigned long)ldt_slot_va(slot) + offset; + pfn = is_vmalloc ? vmalloc_to_pfn(src) : + page_to_pfn(virt_to_page(src)); + /* + * Treat the PTI LDT range as a *userspace* range. + * get_locked_pte() will allocate all needed pagetables + * and account for them in this mm. + */ + ptep = get_locked_pte(mm, va, &ptl); + if (!ptep) + return -ENOMEM; + /* + * Map it RO so the easy to find address is not a primary + * target via some kernel interface which misses a + * permission check. + */ + pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)); + set_pte_at(mm, va, ptep, pte); + pte_unmap_unlock(ptep, ptl); + } + + if (mm->context.ldt) { + /* + * We already had an LDT. The top-level entry should already + * have been allocated and synchronized with the usermode + * tables. + */ + WARN_ON(!had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) + WARN_ON(!kernel_to_user_pgdp(pgd)->pgd); + } else { + /* + * This is the first time we're mapping an LDT for this process. + * Sync the pgd to the usermode tables. + */ + WARN_ON(had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON(kernel_to_user_pgdp(pgd)->pgd); + set_pgd(kernel_to_user_pgdp(pgd), *pgd); + } + } + + va = (unsigned long)ldt_slot_va(slot); + flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); + + ldt->slot = slot; +#endif + return 0; +} + +static void free_ldt_pgtables(struct mm_struct *mm) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + struct mmu_gather tlb; + unsigned long start = LDT_BASE_ADDR; + unsigned long end = start + (1UL << PGDIR_SHIFT); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + tlb_gather_mmu(&tlb, mm, start, end); + free_pgd_range(&tlb, start, end, start, end); + tlb_finish_mmu(&tlb, start, end); +#endif +} + /* After calling this, the LDT is immutable. */ static void finalize_ldt_struct(struct ldt_struct *ldt) { paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); } -/* context.lock is held */ -static void install_ldt(struct mm_struct *current_mm, - struct ldt_struct *ldt) +static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) { - /* Synchronizes with lockless_dereference in load_mm_ldt. */ - smp_store_release(¤t_mm->context.ldt, ldt); + mutex_lock(&mm->context.lock); + + /* Synchronizes with READ_ONCE in load_mm_ldt. */ + smp_store_release(&mm->context.ldt, ldt); + + /* Activate the LDT for all CPUs using currents mm. */ + on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); - /* Activate the LDT for all CPUs using current_mm. */ - on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); + mutex_unlock(&mm->context.lock); } static void free_ldt_struct(struct ldt_struct *ldt) @@ -123,27 +246,20 @@ static void free_ldt_struct(struct ldt_struct *ldt) } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. + * Called on fork from arch_dup_mmap(). Just copy the current LDT state, + * the new task is not running, so nothing can be installed. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) +int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ldt_struct *new_ldt; - struct mm_struct *old_mm; int retval = 0; - mutex_init(&mm->context.lock); - old_mm = current->mm; - if (!old_mm) { - mm->context.ldt = NULL; + if (!old_mm) return 0; - } mutex_lock(&old_mm->context.lock); - if (!old_mm->context.ldt) { - mm->context.ldt = NULL; + if (!old_mm->context.ldt) goto out_unlock; - } new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); if (!new_ldt) { @@ -155,6 +271,12 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) new_ldt->nr_entries * LDT_ENTRY_SIZE); finalize_ldt_struct(new_ldt); + retval = map_ldt_struct(mm, new_ldt, 0); + if (retval) { + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } mm->context.ldt = new_ldt; out_unlock: @@ -173,13 +295,18 @@ void destroy_context_ldt(struct mm_struct *mm) mm->context.ldt = NULL; } +void ldt_arch_exit_mmap(struct mm_struct *mm) +{ + free_ldt_pgtables(mm); +} + static int read_ldt(void __user *ptr, unsigned long bytecount) { struct mm_struct *mm = current->mm; unsigned long entries_size; int retval; - mutex_lock(&mm->context.lock); + down_read(&mm->context.ldt_usr_sem); if (!mm->context.ldt) { retval = 0; @@ -208,7 +335,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) retval = bytecount; out_unlock: - mutex_unlock(&mm->context.lock); + up_read(&mm->context.ldt_usr_sem); return retval; } @@ -268,7 +395,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ldt.avl = 0; } - mutex_lock(&mm->context.lock); + if (down_write_killable(&mm->context.ldt_usr_sem)) + return -EINTR; old_ldt = mm->context.ldt; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; @@ -285,18 +413,37 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) new_ldt->entries[ldt_info.entry_number] = ldt; finalize_ldt_struct(new_ldt); + /* + * If we are using PTI, map the new LDT into the userspace pagetables. + * If there is already an LDT, use the other slot so that other CPUs + * will continue to use the old LDT until install_ldt() switches + * them over to the new LDT. + */ + error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); + if (error) { + /* + * This only can fail for the first LDT setup. If an LDT is + * already installed then the PTE page is already + * populated. Mop up a half populated page table. + */ + if (!WARN_ON_ONCE(old_ldt)) + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } + install_ldt(mm, new_ldt); free_ldt_struct(old_ldt); error = 0; out_unlock: - mutex_unlock(&mm->context.lock); + up_write(&mm->context.ldt_usr_sem); out: return error; } -asmlinkage int sys_modify_ldt(int func, void __user *ptr, - unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { int ret = -ENOSYS; @@ -314,5 +461,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr, ret = write_ldt(ptr, bytecount, 0); break; } - return ret; + /* + * The SYSCALL_DEFINE() macros give us an 'unsigned long' + * return type, but tht ABI for sys_modify_ldt() expects + * 'int'. This cast gives us an int-sized value in %rax + * for the return code. The 'unsigned' is necessary so + * the compiler does not try to sign-extend the negative + * return codes into the high half of the register when + * taking the value from int->long. + */ + return (unsigned int)ret; } diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 00bc751c861c..5167f3f74136 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -48,8 +48,6 @@ static void load_segments(void) "\tmovl $"STR(__KERNEL_DS)",%%eax\n" "\tmovl %%eax,%%ds\n" "\tmovl %%eax,%%es\n" - "\tmovl %%eax,%%fs\n" - "\tmovl %%eax,%%gs\n" "\tmovl %%eax,%%ss\n" : : : "eax", "memory"); #undef STR @@ -59,12 +57,17 @@ static void load_segments(void) static void machine_kexec_free_page_tables(struct kimage *image) { free_page((unsigned long)image->arch.pgd); + image->arch.pgd = NULL; #ifdef CONFIG_X86_PAE free_page((unsigned long)image->arch.pmd0); + image->arch.pmd0 = NULL; free_page((unsigned long)image->arch.pmd1); + image->arch.pmd1 = NULL; #endif free_page((unsigned long)image->arch.pte0); + image->arch.pte0 = NULL; free_page((unsigned long)image->arch.pte1); + image->arch.pte1 = NULL; } static int machine_kexec_alloc_page_tables(struct kimage *image) @@ -81,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image) !image->arch.pmd0 || !image->arch.pmd1 || #endif !image->arch.pte0 || !image->arch.pte1) { - machine_kexec_free_page_tables(image); return -ENOMEM; } return 0; @@ -232,8 +234,8 @@ void machine_kexec(struct kimage *image) * The gdt & idt are now invalid. * If you want to load them you must set up your own idt & gdt. */ - set_gdt(phys_to_virt(0), 0); idt_invalidate(phys_to_virt(0)); + set_gdt(phys_to_virt(0), 0); /* now call it */ image->start = relocate_kernel_ptr((unsigned long)image->head, diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 1f790cf9d38f..5bce2a88e8a3 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -38,9 +38,13 @@ static struct kexec_file_ops *kexec_file_loaders[] = { static void free_transition_pgtable(struct kimage *image) { free_page((unsigned long)image->arch.p4d); + image->arch.p4d = NULL; free_page((unsigned long)image->arch.pud); + image->arch.pud = NULL; free_page((unsigned long)image->arch.pmd); + image->arch.pmd = NULL; free_page((unsigned long)image->arch.pte); + image->arch.pte = NULL; } static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) @@ -90,7 +94,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC)); return 0; err: - free_transition_pgtable(image); return result; } @@ -542,6 +545,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: value -= (u64)address; *(u32 *)location = value; break; diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index da0c160e5589..f58336af095c 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: if (*(u32 *)loc != 0) goto invalid_relocation; val -= (u64)loc; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 410c5dadcee3..bc6bc6689e68 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -407,7 +407,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.cpuflag = CPU_ENABLED; processor.cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; processor.reserved[0] = 0; processor.reserved[1] = 0; @@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) } static unsigned long mpf_base; +static bool mpf_found; static unsigned long __init get_mpc_size(unsigned long physptr) { @@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early) if (!smp_found_config) return; - if (!mpf_base) + if (!mpf_found) return; if (acpi_lapic && early) @@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) smp_found_config = 1; #endif mpf_base = base; + mpf_found = true; pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", base, base + sizeof(*mpf) - 1, mpf); @@ -858,7 +860,7 @@ static int __init update_mp_table(void) if (!enable_update_mptable) return 0; - if (!mpf_base) + if (!mpf_found) return 0; mpf = early_memremap(mpf_base, sizeof(*mpf)); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 19a3e8f961c7..cba713884be7 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -88,10 +89,12 @@ unsigned paravirt_patch_call(void *insnbuf, struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); - if (tgt_clobbers & ~site_clobbers) - return len; /* target would clobber too much for this site */ - if (len < 5) + if (len < 5) { +#ifdef CONFIG_RETPOLINE + WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); +#endif return len; /* call too long for patch site */ + } b->opcode = 0xe8; /* call */ b->delta = delta; @@ -106,8 +109,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, struct branch *b = insnbuf; unsigned long delta = (unsigned long)target - (addr+5); - if (len < 5) + if (len < 5) { +#ifdef CONFIG_RETPOLINE + WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); +#endif return len; /* call too long for patch site */ + } b->opcode = 0xe9; /* jmp */ b->delta = delta; @@ -190,9 +197,9 @@ static void native_flush_tlb_global(void) __native_flush_tlb_global(); } -static void native_flush_tlb_single(unsigned long addr) +static void native_flush_tlb_one_user(unsigned long addr) { - __native_flush_tlb_single(addr); + __native_flush_tlb_one_user(addr); } struct static_key paravirt_steal_enabled; @@ -319,6 +326,9 @@ __visible struct pv_irq_ops pv_irq_ops = { .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .safe_halt = native_safe_halt, .halt = native_halt, +#ifdef CONFIG_PCI_MSI + .write_msi = native_write_msi_msg, +#endif }; __visible struct pv_cpu_ops pv_cpu_ops = { @@ -367,6 +377,8 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .start_context_switch = paravirt_nop, .end_context_switch = paravirt_nop, + + .cpu_khz = paravirt_nop, }; /* At this point, native_get/set_debugreg has real function entries */ @@ -391,7 +403,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .flush_tlb_user = native_flush_tlb, .flush_tlb_kernel = native_flush_tlb_global, - .flush_tlb_single = native_flush_tlb_single, + .flush_tlb_one_user = native_flush_tlb_one_user, .flush_tlb_others = native_flush_tlb_others, .pgd_alloc = __paravirt_pgd_alloc, diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index ac0be8283325..9edadabf04f6 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c @@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); -DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); @@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, write_cr3); - PATCH_SITE(pv_mmu_ops, flush_tlb_single); PATCH_SITE(pv_cpu_ops, wbinvd); #if defined(CONFIG_PARAVIRT_SPINLOCKS) case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c67685337c5a..988a98f34c66 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -39,6 +39,7 @@ #include #include #include +#include /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -47,9 +48,25 @@ * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { +__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = { .x86_tss = { - .sp0 = TOP_OF_INIT_STACK, + /* + * .sp0 is only used when entering ring 0 from a lower + * privilege level. Since the init task never runs anything + * but ring 0 code, there is no need for a valid value here. + * Poison it. + */ + .sp0 = (1UL << (BITS_PER_LONG-1)) + 1, + +#ifdef CONFIG_X86_64 + /* + * .sp1 is cpu_current_top_of_stack. The init task never + * runs user code, but cpu_current_top_of_stack should still + * be well defined before the first context switch. + */ + .sp1 = TOP_OF_INIT_STACK, +#endif + #ifdef CONFIG_X86_32 .ss0 = __KERNEL_DS, .ss1 = __KERNEL_CS, @@ -65,11 +82,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { */ .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, #endif -#ifdef CONFIG_X86_32 - .SYSENTER_stack_canary = STACK_END_MAGIC, -#endif }; -EXPORT_PER_CPU_SYMBOL(cpu_tss); +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw); DEFINE_PER_CPU(bool, __tss_limit_invalid); EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); @@ -98,7 +112,7 @@ void exit_thread(struct task_struct *tsk) struct fpu *fpu = &t->fpu; if (bp) { - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu()); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); @@ -266,6 +280,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss, } } +#ifdef CONFIG_SMP + +struct ssb_state { + struct ssb_state *shared_state; + raw_spinlock_t lock; + unsigned int disable_state; + unsigned long local_state; +}; + +#define LSTATE_SSB 0 + +static DEFINE_PER_CPU(struct ssb_state, ssb_state); + +void speculative_store_bypass_ht_init(void) +{ + struct ssb_state *st = this_cpu_ptr(&ssb_state); + unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; + + st->local_state = 0; + + /* + * Shared state setup happens once on the first bringup + * of the CPU. It's not destroyed on CPU hotunplug. + */ + if (st->shared_state) + return; + + raw_spin_lock_init(&st->lock); + + /* + * Go over HT siblings and check whether one of them has set up the + * shared state pointer already. + */ + for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { + if (cpu == this_cpu) + continue; + + if (!per_cpu(ssb_state, cpu).shared_state) + continue; + + /* Link it to the state of the sibling: */ + st->shared_state = per_cpu(ssb_state, cpu).shared_state; + return; + } + + /* + * First HT sibling to come up on the core. Link shared state of + * the first HT sibling to itself. The siblings on the same core + * which come up later will see the shared state pointer and link + * themself to the state of this CPU. + */ + st->shared_state = st; +} + +/* + * Logic is: First HT sibling enables SSBD for both siblings in the core + * and last sibling to disable it, disables it for the whole core. This how + * MSR_SPEC_CTRL works in "hardware": + * + * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL + */ +static __always_inline void amd_set_core_ssb_state(unsigned long tifn) +{ + struct ssb_state *st = this_cpu_ptr(&ssb_state); + u64 msr = x86_amd_ls_cfg_base; + + if (!static_cpu_has(X86_FEATURE_ZEN)) { + msr |= ssbd_tif_to_amd_ls_cfg(tifn); + wrmsrl(MSR_AMD64_LS_CFG, msr); + return; + } + + if (tifn & _TIF_SSBD) { + /* + * Since this can race with prctl(), block reentry on the + * same CPU. + */ + if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) + return; + + msr |= x86_amd_ls_cfg_ssbd_mask; + + raw_spin_lock(&st->shared_state->lock); + /* First sibling enables SSBD: */ + if (!st->shared_state->disable_state) + wrmsrl(MSR_AMD64_LS_CFG, msr); + st->shared_state->disable_state++; + raw_spin_unlock(&st->shared_state->lock); + } else { + if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) + return; + + raw_spin_lock(&st->shared_state->lock); + st->shared_state->disable_state--; + if (!st->shared_state->disable_state) + wrmsrl(MSR_AMD64_LS_CFG, msr); + raw_spin_unlock(&st->shared_state->lock); + } +} +#else +static __always_inline void amd_set_core_ssb_state(unsigned long tifn) +{ + u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); + + wrmsrl(MSR_AMD64_LS_CFG, msr); +} +#endif + +static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) +{ + /* + * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, + * so ssbd_tif_to_spec_ctrl() just works. + */ + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); +} + +static __always_inline void intel_set_ssb_state(unsigned long tifn) +{ + u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); + + wrmsrl(MSR_IA32_SPEC_CTRL, msr); +} + +static __always_inline void __speculative_store_bypass_update(unsigned long tifn) +{ + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) + amd_set_ssb_virt_state(tifn); + else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) + amd_set_core_ssb_state(tifn); + else + intel_set_ssb_state(tifn); +} + +void speculative_store_bypass_update(unsigned long tif) +{ + preempt_disable(); + __speculative_store_bypass_update(tif); + preempt_enable(); +} + void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss) { @@ -297,6 +453,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, if ((tifp ^ tifn) & _TIF_NOCPUID) set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); + + if ((tifp ^ tifn) & _TIF_SSBD) + __speculative_store_bypass_update(tifn); } /* @@ -367,19 +526,24 @@ void stop_this_cpu(void *dummy) disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); + /* + * Use wbinvd on processors that support SME. This provides support + * for performing a successful kexec when going from SME inactive + * to SME active (or vice-versa). The cache must be cleared so that + * if there are entries with the same physical address, both with and + * without the encryption bit, they don't race each other when flushed + * and potentially end up with the wrong entry being committed to + * memory. + */ + if (boot_cpu_has(X86_FEATURE_SME)) + native_wbinvd(); for (;;) { /* - * Use wbinvd followed by hlt to stop the processor. This - * provides support for kexec on a processor that supports - * SME. With kexec, going from SME inactive to SME active - * requires clearing cache entries so that addresses without - * the encryption bit set don't corrupt the same physical - * address that has the encryption bit set when caches are - * flushed. To achieve this a wbinvd is performed followed by - * a hlt. Even if the processor is not in the kexec/SME - * scenario this only adds a wbinvd to a halting processor. + * Use native_halt() so that memory contents don't change + * (stack usage and variables) after possibly issuing the + * native_wbinvd() above. */ - asm volatile("wbinvd; hlt" : : : "memory"); + native_halt(); } } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 11966251cd42..5224c6099184 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -284,9 +284,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0 and cpu_current_top_of_stack. This changes - * current_thread_info(). + * current_thread_info(). Refresh the SYSENTER configuration in + * case prev or next is vm86. */ - load_sp0(tss, next); + update_sp0(next_p); + refresh_sysenter_cs(next); this_cpu_write(cpu_current_top_of_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 302e7b2572d1..fa093b77689f 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all) unsigned int fsindex, gsindex; unsigned int ds, cs, es; - printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip); - printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss, - regs->sp, regs->flags); + show_iret_regs(regs); + if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else @@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all) printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", regs->r13, regs->r14, regs->r15); + if (!all) + return; + asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%es,%0" : "=r" (es)); @@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all) rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); - if (!all) - return; - cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); @@ -274,7 +273,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct inactive_task_frame *frame; struct task_struct *me = current; - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; @@ -401,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct fpu *prev_fpu = &prev->fpu; struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); @@ -463,9 +461,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * Switch the PDA and FPU contexts. */ this_cpu_write(current_task, next_p); + this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); - /* Reload esp0 and ss1. This changes current_thread_info(). */ - load_sp0(tss, next); + /* Reload sp0. */ + update_sp0(next_p); /* * Now maybe reload the debug registers and handle I/O bitmaps @@ -529,6 +528,7 @@ void set_personality_64bit(void) clear_thread_flag(TIF_X32); /* Pretend that this comes from a 64bit execve */ task_pt_regs(current)->orig_ax = __NR_execve; + current_thread_info()->status &= ~TS_COMPAT; /* Ensure the corresponding mm is not marked. */ if (current->mm) @@ -558,7 +558,7 @@ static void __set_personality_x32(void) * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; - current->thread.status &= ~TS_COMPAT; + current_thread_info()->status &= ~TS_COMPAT; #endif } @@ -572,7 +572,7 @@ static void __set_personality_ia32(void) current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; - current->thread.status |= TS_COMPAT; + current_thread_info()->status |= TS_COMPAT; #endif } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index f37d18124648..ed5c4cdf0a34 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -935,7 +935,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value) */ regs->orig_ax = value; if (syscall_get_nr(child, regs) >= 0) - child->thread.status |= TS_I386_REGS_POKED; + child->thread_info.status |= TS_I386_REGS_POKED; break; case offsetof(struct user32, regs.eflags): diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 697a4ce04308..736348ead421 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) /* Skylake */ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) { - u32 capid0; + u32 capid0, capid5; pci_read_config_dword(pdev, 0x84, &capid0); + pci_read_config_dword(pdev, 0x98, &capid5); - if ((capid0 & 0xc0) == 0xc0) + /* + * CAPID0{7:6} indicate whether this is an advanced RAS SKU + * CAPID5{8:5} indicate that various NVDIMM usage modes are + * enabled, so memory machine check recovery is also enabled. + */ + if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) static_branch_inc(&mcsafe_key); + } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 307d3bac5f04..11eda21eb697 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -68,6 +68,9 @@ relocate_kernel: movq %cr4, %rax movq %rax, CR4(%r11) + /* Save CR4. Required to enable the right paging mode later. */ + movq %rax, %r13 + /* zero out flags, and disable interrupts */ pushq $0 popfq @@ -126,8 +129,13 @@ identity_mapped: /* * Set cr4 to a known state: * - physical address extension enabled + * - 5-level paging, if it was enabled before */ movl $X86_CR4_PAE, %eax + testq $X86_CR4_LA57, %r13 + jz 1f + orl $X86_CR4_LA57, %eax +1: movq %rax, %cr4 jmp 1f diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0957dd73d127..dcb00acb6583 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -376,14 +376,6 @@ static void __init reserve_initrd(void) !ramdisk_image || !ramdisk_size) return; /* No initrd provided by bootloader */ - /* - * If SME is active, this memory will be marked encrypted by the - * kernel when it is accessed (including relocation). However, the - * ramdisk image was loaded decrypted by the bootloader, so make - * sure that it is encrypted before accessing it. - */ - sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image); - initrd_start = 0; mapped_size = memblock_mem_size(max_pfn_mapped); @@ -860,6 +852,12 @@ void __init setup_arch(char **cmdline_p) memblock_reserve(__pa_symbol(_text), (unsigned long)__bss_stop - (unsigned long)_text); + /* + * Make sure page 0 is always reserved because on systems with + * L1TF its contents can be leaked to user processes. + */ + memblock_reserve(0, PAGE_SIZE); + early_reserve_initrd(); /* @@ -936,9 +934,6 @@ void __init setup_arch(char **cmdline_p) set_bit(EFI_BOOT, &efi.flags); set_bit(EFI_64BIT, &efi.flags); } - - if (efi_enabled(EFI_BOOT)) - efi_memblock_x86_reserve_range(); #endif x86_init.oem.arch_setup(); @@ -992,6 +987,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + if (efi_enabled(EFI_BOOT)) + efi_memblock_x86_reserve_range(); #ifdef CONFIG_MEMORY_HOTPLUG /* * Memory used by the kernel cannot be hot-removed because Linux @@ -1247,20 +1244,13 @@ void __init setup_arch(char **cmdline_p) kasan_init(); -#ifdef CONFIG_X86_32 - /* sync back kernel address range */ - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); - /* - * sync back low identity map too. It is used for example - * in the 32-bit EFI stub. + * Sync back kernel address range. + * + * FIXME: Can the later sync in setup_cpu_entry_areas() replace + * this call? */ - clone_pgd_range(initial_page_table, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); -#endif + sync_initial_page_table(); tboot_probe(); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 497aa766fab3..ea554f812ee1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void) /* Setup cpu initialized, callin, callout masks */ setup_cpu_local_masks(); -#ifdef CONFIG_X86_32 /* * Sync back kernel address range again. We already did this in * setup_arch(), but percpu data also needs to be available in * the smpboot asm. We can't reliably pick up percpu mappings * using vmalloc_fault(), because exception dispatch needs * percpu data. + * + * FIXME: Can the later sync in setup_cpu_entry_areas() replace + * this call? */ - clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - KERNEL_PGD_PTRS); - - /* - * sync back low identity map too. It is used for example - * in the 32-bit EFI stub. - */ - clone_pgd_range(initial_page_table, - swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); -#endif + sync_initial_page_table(); } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b9e00e8f1c9b..4cdc0b27ec82 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -787,7 +787,7 @@ static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) * than the tracee. */ #ifdef CONFIG_IA32_EMULATION - if (current->thread.status & (TS_COMPAT|TS_I386_REGS_POKED)) + if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) return __NR_ia32_restart_syscall; #endif #ifdef CONFIG_X86_X32_ABI diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 5c574dff4c1a..9bbeec53634c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -117,6 +117,19 @@ static atomic_t stopping_cpu = ATOMIC_INIT(-1); static bool smp_no_nmi_ipi = false; +static DEFINE_PER_CPU(struct pt_regs, cpu_regs); + +/* Store regs of this CPU for RAM dump decoding help */ +static inline void store_regs(struct pt_regs *regs) +{ + struct pt_regs *print_regs; + print_regs = &get_cpu_var(cpu_regs); + crash_setup_regs(print_regs, regs); + + /* Flush CPU cache */ + wbinvd(); +} + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -163,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); @@ -173,9 +187,10 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) * this function calls the 'stop' function on all other CPUs in the system. */ -asmlinkage __visible void smp_reboot_interrupt(void) +__visible void smp_reboot_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); irq_exit(); @@ -247,6 +262,7 @@ static void native_stop_other_cpus(int wait) } finish: + store_regs(NULL); local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); @@ -261,6 +277,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); + kvm_set_cpu_l1tf_flush_l1d(); if (trace_resched_ipi_enabled()) { /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 65a0ccdc3050..5ebb0dbcf4f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -77,13 +77,8 @@ #include #include #include - -/* Number of siblings per CPU package */ -int smp_num_siblings = 1; -EXPORT_SYMBOL(smp_num_siblings); - -/* Last level cache ID of each logical CPU */ -DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; +#include +#include /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); @@ -128,25 +123,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(0xa, 0xf); spin_unlock_irqrestore(&rtc_lock, flags); - local_flush_tlb(); - pr_debug("1.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4; - pr_debug("2.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf; - pr_debug("3.\n"); } static inline void smpboot_restore_warm_reset_vector(void) { unsigned long flags; - /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - /* * Paranoid: Set warm reset code and vector here back * to default values. @@ -239,7 +225,7 @@ static void notrace start_secondary(void *unused) load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif - + load_current_idt(); cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); @@ -254,6 +240,8 @@ static void notrace start_secondary(void *unused) */ check_tsc_sync_target(); + speculative_store_bypass_ht_init(); + /* * Lock vector_lock and initialize the vectors on this cpu * before setting the cpu online. We must set it online with @@ -317,6 +305,23 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu) return 0; } +/** + * topology_is_primary_thread - Check whether CPU is the primary SMT thread + * @cpu: CPU to check + */ +bool topology_is_primary_thread(unsigned int cpu) +{ + return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu)); +} + +/** + * topology_smt_supported - Check whether SMT is supported by the CPUs + */ +bool topology_smt_supported(void) +{ + return smp_num_siblings > 1; +} + /** * topology_phys_to_logical_pkg - Map a physical package id to a logical * @@ -962,8 +967,7 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle) #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); - per_cpu(cpu_current_top_of_stack, cpu) = - (unsigned long)task_stack_page(idle) + THREAD_SIZE; + per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); #else initial_gs = per_cpu_offset(cpu); #endif @@ -991,12 +995,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, initial_code = (unsigned long)start_secondary; initial_stack = idle->thread.sp; - /* - * Enable the espfix hack for this CPU - */ -#ifdef CONFIG_X86_ESPFIX64 + /* Enable the espfix hack for this CPU */ init_espfix_ap(cpu); -#endif /* So we see what's up */ announce_cpu(cpu, apicid); @@ -1363,6 +1363,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) set_mtrr_aps_delayed_init(); smp_quirk_init_udelay(); + + speculative_store_bypass_ht_init(); } void arch_enable_nonboot_cpus_begin(void) @@ -1530,6 +1532,7 @@ static void remove_siblinginfo(int cpu) cpumask_clear(topology_core_cpumask(cpu)); c->phys_proc_id = 0; c->cpu_core_id = 0; + c->booted_cores = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); recompute_smt_state(); } @@ -1627,6 +1630,8 @@ static inline void mwait_play_dead(void) void *mwait_ptr; int i; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; if (!this_cpu_has(X86_FEATURE_CLFLUSH)) diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 8dabd7bf1673..4565f31bd398 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -98,7 +98,7 @@ static int __save_stack_trace_reliable(struct stack_trace *trace, for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); unwind_next_frame(&state)) { - regs = unwind_get_entry_regs(&state); + regs = unwind_get_entry_regs(&state, NULL); if (regs) { /* * Kernel mode registers on the stack indicate an @@ -160,8 +160,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk, { int ret; + /* + * If the task doesn't have a stack (e.g., a zombie), the stack is + * "reliably" empty. + */ if (!try_get_task_stack(tsk)) - return -EINVAL; + return 0; ret = __save_stack_trace_reliable(trace, tsk); diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index a4eb27918ceb..a2486f444073 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, return -1; set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); pte_unmap(pte); + + /* + * PTI poisons low addresses in the kernel page tables in the + * name of making them unusable for userspace. To execute + * code at such a low address, the poison must be cleared. + * + * Note: 'pgd' actually gets set in p4d_alloc() _or_ + * pud_alloc() depending on 4/5-level paging. + */ + pgd->pgd &= ~_PAGE_NX; + return 0; } diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 879af864d99a..49a5c394f3ed 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 9a9c9b076955..a5b802a12212 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx, cpu = get_cpu(); while (n-- > 0) { - if (LDT_empty(info) || LDT_zero(info)) { + if (LDT_empty(info) || LDT_zero(info)) memset(desc, 0, sizeof(*desc)); - } else { + else fill_ldt(desc, info); - - /* - * Always set the accessed bit so that the CPU - * doesn't try to write to the (read-only) GDT. - */ - desc->type |= 1; - } ++info; ++desc; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5a6b8f809792..36cf8e90aa58 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -42,7 +42,6 @@ #include #endif -#include #include #include #include @@ -52,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +60,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include @@ -141,8 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) * will catch asm bugs and any attempt to use ist_preempt_enable * from double_fault. */ - BUG_ON((unsigned long)(current_top_of_stack() - - current_stack_pointer) >= THREAD_SIZE); + BUG_ON(!on_thread_stack()); preempt_enable_no_resched(); } @@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr) break; case BUG_TRAP_TYPE_WARN: - regs->ip += LEN_UD0; + regs->ip += LEN_UD2; return 1; } @@ -349,23 +349,42 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) /* * If IRET takes a non-IST fault on the espfix64 stack, then we - * end up promoting it to a doublefault. In that case, modify - * the stack to make it look like we just entered the #GP - * handler from user space, similar to bad_iret. + * end up promoting it to a doublefault. In that case, take + * advantage of the fact that we're not using the normal (TSS.sp0) + * stack right now. We can write a fake #GP(0) frame at TSS.sp0 + * and then modify our own IRET frame so that, when we return, + * we land directly at the #GP(0) vector with the stack already + * set up according to its expectations. + * + * The net result is that our #GP handler will think that we + * entered from usermode with the bad user context. * * No need for ist_enter here because we don't use RCU. */ - if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { - struct pt_regs *normal_regs = task_pt_regs(current); + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; + + /* + * regs->sp points to the failing IRET frame on the + * ESPFIX64 stack. Copy it to the entry stack. This fills + * in gpregs->ss through gpregs->ip. + * + */ + memmove(&gpregs->ip, (void *)regs->sp, 5*8); + gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ - /* Fake a #GP(0) from userspace. */ - memmove(&normal_regs->ip, (void *)regs->sp, 5*8); - normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + /* + * Adjust our frame so that we return straight to the #GP + * vector with the expected RSP value. This is safe because + * we won't enable interupts or schedule before we invoke + * general_protection, so nothing will clobber the stack + * frame we just set up. + */ regs->ip = (unsigned long)general_protection; - regs->sp = (unsigned long)&normal_regs->orig_ax; + regs->sp = (unsigned long)&gpregs->orig_ax; return; } @@ -390,7 +409,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) * * Processors update CR2 whenever a page fault is detected. If a * second page fault occurs while an earlier page fault is being - * deliv- ered, the faulting linear address of the second fault will + * delivered, the faulting linear address of the second fault will * overwrite the contents of CR2 (replacing the previous * address). These updates to CR2 occur even if the page fault * results in a double fault or occurs during the delivery of a @@ -518,6 +537,10 @@ do_general_protection(struct pt_regs *regs, long error_code) RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); cond_local_irq_enable(regs); + if (static_cpu_has(X86_FEATURE_UMIP)) + if (user_mode(regs) && fixup_umip_exception(regs)) + return; + if (v8086_mode(regs)) { local_irq_enable(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); @@ -553,7 +576,6 @@ do_general_protection(struct pt_regs *regs, long error_code) } NOKPROBE_SYMBOL(do_general_protection); -/* May run on IST stack. */ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_DYNAMIC_FTRACE @@ -568,6 +590,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) if (poke_int3_handler(regs)) return; + /* + * Use ist_enter despite the fact that we don't use an IST stack. + * We can be called from a kprobe in non-CONTEXT_KERNEL kernel + * mode or even during context tracking state changes. + * + * This means that we can't schedule. That's okay. + */ ist_enter(regs); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP @@ -585,15 +614,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) SIGTRAP) == NOTIFY_STOP) goto exit; - /* - * Let others (NMI) know that the debug stack is in use - * as we may switch to the interrupt stack. - */ - debug_stack_usage_inc(); cond_local_irq_enable(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); cond_local_irq_disable(regs); - debug_stack_usage_dec(); + exit: ist_exit(regs); } @@ -601,14 +625,15 @@ NOKPROBE_SYMBOL(do_int3); #ifdef CONFIG_X86_64 /* - * Help handler running on IST stack to switch off the IST stack if the - * interrupted code was in user mode. The actual stack switch is done in - * entry_64.S + * Help handler running on a per-cpu (IST or entry trampoline) stack + * to switch to the normal thread stack if the interrupted code was in + * user mode. The actual stack switch is done in entry_64.S */ asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { - struct pt_regs *regs = task_pt_regs(current); - *regs = *eregs; + struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1; + if (regs != eregs) + *regs = *eregs; return regs; } NOKPROBE_SYMBOL(sync_regs); @@ -624,13 +649,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) /* * This is called from entry_64.S early in handling a fault * caused by a bad iret to user mode. To handle the fault - * correctly, we want move our stack frame to task_pt_regs - * and we want to pretend that the exception came from the - * iret target. + * correctly, we want to move our stack frame to where it would + * be had we entered directly on the entry stack (rather than + * just below the IRET frame) and we want to pretend that the + * exception came from the IRET target. */ struct bad_iret_stack *new_stack = - container_of(task_pt_regs(current), - struct bad_iret_stack, regs); + (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* Copy the IRET target to the new stack. */ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); @@ -744,10 +769,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) if (!dr6 && user_mode(regs)) user_icebp = 1; - /* Catch kmemcheck conditions! */ - if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) - goto exit; - /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; @@ -795,14 +816,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) debug_stack_usage_dec(); exit: -#if defined(CONFIG_X86_32) - /* - * This is the most likely code path that involves non-trivial use - * of the SYSENTER stack. Check that we haven't overrun it. - */ - WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC, - "Overran or corrupted SYSENTER stack\n"); -#endif ist_exit(regs); } NOKPROBE_SYMBOL(do_debug); @@ -820,16 +833,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) - return; cond_local_irq_enable(regs); if (!user_mode(regs)) { - if (!fixup_exception(regs, trapnr)) { - task->thread.error_code = error_code; - task->thread.trap_nr = trapnr; + if (fixup_exception(regs, trapnr)) + return; + + task->thread.error_code = error_code; + task->thread.trap_nr = trapnr; + + if (notify_die(DIE_TRAP, str, regs, error_code, + trapnr, SIGFPE) != NOTIFY_STOP) die(str, regs, error_code); - } return; } @@ -929,6 +944,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) void __init trap_init(void) { + /* Init cpu_entry_area before IST entries are set up */ + setup_cpu_entry_areas(); + idt_setup_traps(); /* @@ -936,8 +954,9 @@ void __init trap_init(void) * "sidt" instruction will not leak the location of the kernel, and * to defend the IDT against arbitrary memory write vulnerabilities. * It will be reloaded in cpu_init() */ - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); - idt_descr.address = fix_to_virt(FIX_RO_IDT); + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), + PAGE_KERNEL_RO); + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; /* * Should be a barrier for any external CPU state: diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ad2b925a808e..ecebc3ce19e4 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -25,6 +25,7 @@ #include #include #include +#include unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); @@ -316,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) hpet2 -= hpet1; tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); do_div(tmp, 1000000); - do_div(deltatsc, tmp); + deltatsc = div64_u64(deltatsc, tmp); return (unsigned long) deltatsc; } @@ -363,6 +364,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) unsigned long tscmin, tscmax; int pitcnt; + if (!has_legacy_pic()) { + /* + * Relies on tsc_early_delay_calibrate() to have given us semi + * usable udelay(), wait for the same 50ms we would have with + * the PIT loop below. + */ + udelay(10 * USEC_PER_MSEC); + udelay(10 * USEC_PER_MSEC); + udelay(10 * USEC_PER_MSEC); + udelay(10 * USEC_PER_MSEC); + udelay(10 * USEC_PER_MSEC); + return ULONG_MAX; + } + /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); @@ -487,6 +502,9 @@ static unsigned long quick_pit_calibrate(void) u64 tsc, delta; unsigned long d1, d2; + if (!has_legacy_pic()) + return 0; + /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); @@ -602,7 +620,6 @@ unsigned long native_calibrate_tsc(void) case INTEL_FAM6_KABYLAKE_DESKTOP: crystal_khz = 24000; /* 24.0 MHz */ break; - case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ATOM_DENVERTON: crystal_khz = 25000; /* 25.0 MHz */ break; @@ -612,6 +629,8 @@ unsigned long native_calibrate_tsc(void) } } + if (crystal_khz == 0) + return 0; /* * TSC frequency determined by CPUID is a "hardware reported" * frequency and is the most accurate one so far we have. This @@ -656,6 +675,10 @@ unsigned long native_calibrate_cpu(void) unsigned long flags, latch, ms, fast_calibrate; int hpet = is_hpet_enabled(), i, loopmin; + fast_calibrate = cpu_khz_from_paravirt(); + if (fast_calibrate) + return fast_calibrate; + fast_calibrate = cpu_khz_from_cpuid(); if (fast_calibrate) return fast_calibrate; diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c new file mode 100644 index 000000000000..6e38b8f5d305 --- /dev/null +++ b/arch/x86/kernel/umip.c @@ -0,0 +1,344 @@ +/* + * umip.c Emulation for instruction protected by the Intel User-Mode + * Instruction Prevention feature + * + * Copyright (c) 2017, Intel Corporation. + * Ricardo Neri + */ + +#include +#include +#include +#include +#include +#include + +/** DOC: Emulation for User-Mode Instruction Prevention (UMIP) + * + * The feature User-Mode Instruction Prevention present in recent Intel + * processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and str) + * from being executed with CPL > 0. Otherwise, a general protection fault is + * issued. + * + * Rather than relaying to the user space the general protection fault caused by + * the UMIP-protected instructions (in the form of a SIGSEGV signal), it can be + * trapped and emulate the result of such instructions to provide dummy values. + * This allows to both conserve the current kernel behavior and not reveal the + * system resources that UMIP intends to protect (i.e., the locations of the + * global descriptor and interrupt descriptor tables, the segment selectors of + * the local descriptor table, the value of the task state register and the + * contents of the CR0 register). + * + * This emulation is needed because certain applications (e.g., WineHQ and + * DOSEMU2) rely on this subset of instructions to function. + * + * The instructions protected by UMIP can be split in two groups. Those which + * return a kernel memory address (sgdt and sidt) and those which return a + * value (sldt, str and smsw). + * + * For the instructions that return a kernel memory address, applications + * such as WineHQ rely on the result being located in the kernel memory space, + * not the actual location of the table. The result is emulated as a hard-coded + * value that, lies close to the top of the kernel memory. The limit for the GDT + * and the IDT are set to zero. + * + * Given that sldt and str are not commonly used in programs that run on WineHQ + * or DOSEMU2, they are not emulated. + * + * The instruction smsw is emulated to return the value that the register CR0 + * has at boot time as set in the head_32. + * + * Also, emulation is provided only for 32-bit processes; 64-bit processes + * that attempt to use the instructions that UMIP protects will receive the + * SIGSEGV signal issued as a consequence of the general protection fault. + * + * Care is taken to appropriately emulate the results when segmentation is + * used. That is, rather than relying on USER_DS and USER_CS, the function + * insn_get_addr_ref() inspects the segment descriptor pointed by the + * registers in pt_regs. This ensures that we correctly obtain the segment + * base address and the address and operand sizes even if the user space + * application uses a local descriptor table. + */ + +#define UMIP_DUMMY_GDT_BASE 0xfffe0000 +#define UMIP_DUMMY_IDT_BASE 0xffff0000 + +/* + * The SGDT and SIDT instructions store the contents of the global descriptor + * table and interrupt table registers, respectively. The destination is a + * memory operand of X+2 bytes. X bytes are used to store the base address of + * the table and 2 bytes are used to store the limit. In 32-bit processes, the + * only processes for which emulation is provided, X has a value of 4. + */ +#define UMIP_GDT_IDT_BASE_SIZE 4 +#define UMIP_GDT_IDT_LIMIT_SIZE 2 + +#define UMIP_INST_SGDT 0 /* 0F 01 /0 */ +#define UMIP_INST_SIDT 1 /* 0F 01 /1 */ +#define UMIP_INST_SMSW 3 /* 0F 01 /4 */ + +/** + * identify_insn() - Identify a UMIP-protected instruction + * @insn: Instruction structure with opcode and ModRM byte. + * + * From the instruction opcode and the reg part of the ModRM byte, identify, + * if any, a UMIP-protected instruction. + * + * Return: a constant that identifies a specific UMIP-protected instruction. + * -EINVAL when not an UMIP-protected instruction. + */ +static int identify_insn(struct insn *insn) +{ + /* By getting modrm we also get the opcode. */ + insn_get_modrm(insn); + + /* All the instructions of interest start with 0x0f. */ + if (insn->opcode.bytes[0] != 0xf) + return -EINVAL; + + if (insn->opcode.bytes[1] == 0x1) { + switch (X86_MODRM_REG(insn->modrm.value)) { + case 0: + return UMIP_INST_SGDT; + case 1: + return UMIP_INST_SIDT; + case 4: + return UMIP_INST_SMSW; + default: + return -EINVAL; + } + } + /* SLDT AND STR are not emulated */ + return -EINVAL; +} + +/** + * emulate_umip_insn() - Emulate UMIP instructions with dummy values + * @insn: Instruction structure with operands + * @umip_inst: Instruction to emulate + * @data: Buffer into which the dummy values will be copied + * @data_size: Size of the emulated result + * + * Emulate an instruction protected by UMIP. The result of the emulation + * is saved in the provided buffer. The size of the results depends on both + * the instruction and type of operand (register vs memory address). Thus, + * the size of the result needs to be updated. + * + * Result: 0 if success, -EINVAL on error while emulating. + */ +static int emulate_umip_insn(struct insn *insn, int umip_inst, + unsigned char *data, int *data_size) +{ + unsigned long dummy_base_addr, dummy_value; + unsigned short dummy_limit = 0; + + if (!data || !data_size || !insn) + return -EINVAL; + /* + * These two instructions return the base address and limit of the + * global and interrupt descriptor table, respectively. According to the + * Intel Software Development manual, the base address can be 24-bit, + * 32-bit or 64-bit. Limit is always 16-bit. If the operand size is + * 16-bit, the returned value of the base address is supposed to be a + * zero-extended 24-byte number. However, it seems that a 32-byte number + * is always returned irrespective of the operand size. + */ + + if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) { + /* SGDT and SIDT do not use registers operands. */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + return -EINVAL; + + if (umip_inst == UMIP_INST_SGDT) + dummy_base_addr = UMIP_DUMMY_GDT_BASE; + else + dummy_base_addr = UMIP_DUMMY_IDT_BASE; + + *data_size = UMIP_GDT_IDT_LIMIT_SIZE + UMIP_GDT_IDT_BASE_SIZE; + + memcpy(data + 2, &dummy_base_addr, UMIP_GDT_IDT_BASE_SIZE); + memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); + + } else if (umip_inst == UMIP_INST_SMSW) { + dummy_value = CR0_STATE; + + /* + * Even though the CR0 register has 4 bytes, the number + * of bytes to be copied in the result buffer is determined + * by whether the operand is a register or a memory location. + * If operand is a register, return as many bytes as the operand + * size. If operand is memory, return only the two least + * siginificant bytes of CR0. + */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) + *data_size = insn->opnd_bytes; + else + *data_size = 2; + + memcpy(data, &dummy_value, *data_size); + /* STR and SLDT are not emulated */ + } else { + return -EINVAL; + } + + return 0; +} + +/** + * force_sig_info_umip_fault() - Force a SIGSEGV with SEGV_MAPERR + * @addr: Address that caused the signal + * @regs: Register set containing the instruction pointer + * + * Force a SIGSEGV signal with SEGV_MAPERR as the error code. This function is + * intended to be used to provide a segmentation fault when the result of the + * UMIP emulation could not be copied to the user space memory. + * + * Return: none + */ +static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) +{ + siginfo_t info; + struct task_struct *tsk = current; + + tsk->thread.cr2 = (unsigned long)addr; + tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE; + tsk->thread.trap_nr = X86_TRAP_PF; + + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = SEGV_MAPERR; + info.si_addr = addr; + force_sig_info(SIGSEGV, &info, tsk); + + if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV))) + return; + + pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n", + tsk->comm, task_pid_nr(tsk), regs->ip, + regs->sp, X86_PF_USER | X86_PF_WRITE, + regs->ip); +} + +/** + * fixup_umip_exception() - Fixup #GP faults caused by UMIP + * @regs: Registers as saved when entering the #GP trap + * + * The instructions sgdt, sidt, str, smsw, sldt cause a general protection + * fault if executed with CPL > 0 (i.e., from user space). If the offending + * user-space process is 32-bit, this function fixes the exception up and + * provides dummy values for the sgdt, sidt and smsw; str and sldt are not + * fixed up. Also 64-bit user-space processes are not fixed up. + * + * If operands are memory addresses, results are copied to user- + * space memory as indicated by the instruction pointed by EIP using the + * registers indicated in the instruction operands. If operands are registers, + * results are copied into the context that was saved when entering kernel mode. + * + * Result: true if emulation was successful; false if not. + */ +bool fixup_umip_exception(struct pt_regs *regs) +{ + int not_copied, nr_copied, reg_offset, dummy_data_size, umip_inst; + /* 10 bytes is the maximum size of the result of UMIP instructions */ + unsigned char dummy_data[10] = { 0 }; + unsigned long seg_base, *reg_addr; + unsigned char buf[MAX_INSN_SIZE]; + void __user *uaddr; + struct insn insn; + char seg_defs; + + /* Do not emulate 64-bit processes. */ + if (user_64bit_mode(regs)) + return false; + + /* + * Use the segment base in case user space used a different code + * segment, either in protected (e.g., from an LDT), virtual-8086 + * or long (via the FS or GS registers) modes. In most of the cases + * seg_base will be zero as in USER_CS. + */ + seg_base = insn_get_seg_base(regs, NULL, offsetof(struct pt_regs, ip)); + if (seg_base == -1L) + return false; + + not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip), + sizeof(buf)); + nr_copied = sizeof(buf) - not_copied; + + /* + * The copy_from_user above could have failed if user code is protected + * by a memory protection key. Give up on emulation in such a case. + * Should we issue a page fault? + */ + if (!nr_copied) + return false; + + insn_init(&insn, buf, nr_copied, user_64bit_mode(regs)); + + /* + * Override the default operand and address sizes with what is specified + * in the code segment descriptor. The instruction decoder only sets + * the address size it to either 4 or 8 address bytes and does nothing + * for the operand bytes. This OK for most of the cases, but we could + * have special cases where, for instance, a 16-bit code segment + * descriptor is used. + * If there is an address override prefix, the instruction decoder + * correctly updates these values, even for 16-bit defaults. + */ + seg_defs = insn_get_code_seg_defaults(regs); + if (seg_defs == -EINVAL) + return false; + + insn.addr_bytes = (unsigned char)INSN_CODE_SEG_ADDR_SZ(seg_defs); + insn.opnd_bytes = (unsigned char)INSN_CODE_SEG_OPND_SZ(seg_defs); + + insn_get_length(&insn); + if (nr_copied < insn.length) + return false; + + umip_inst = identify_insn(&insn); + if (umip_inst < 0) + return false; + + if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size)) + return false; + + /* + * If operand is a register, write result to the copy of the register + * value that was pushed to the stack when entering into kernel mode. + * Upon exit, the value we write will be restored to the actual hardware + * register. + */ + if (X86_MODRM_MOD(insn.modrm.value) == 3) { + reg_offset = insn_get_modrm_rm_off(&insn, regs); + + /* + * Negative values are usually errors. In memory addressing, + * the exception is -EDOM. Since we expect a register operand, + * all negative values are errors. + */ + if (reg_offset < 0) + return false; + + reg_addr = (unsigned long *)((unsigned long)regs + reg_offset); + memcpy(reg_addr, dummy_data, dummy_data_size); + } else { + uaddr = insn_get_addr_ref(&insn, regs); + if ((unsigned long)uaddr == -1L) + return false; + + nr_copied = copy_to_user(uaddr, dummy_data, dummy_data_size); + if (nr_copied > 0) { + /* + * If copy fails, send a signal and tell caller that + * fault was fixed up. + */ + force_sig_info_umip_fault(uaddr, regs); + return true; + } + } + + /* increase IP to let the program keep going */ + regs->ip += insn.length; + return true; +} diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a3f973b2c97a..be86a865087a 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) return NULL; } -static bool stack_access_ok(struct unwind_state *state, unsigned long addr, +static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, size_t len) { struct stack_info *info = &state->stack_info; + void *addr = (void *)_addr; - /* - * If the address isn't on the current stack, switch to the next one. - * - * We may have to traverse multiple stacks to deal with the possibility - * that info->next_sp could point to an empty stack and the address - * could be on a subsequent stack. - */ - while (!on_stack(info, (void *)addr, len)) - if (get_stack_info(info->next_sp, state->task, info, - &state->stack_mask)) - return false; + if (!on_stack(info, addr, len) && + (get_stack_info(addr, state->task, info, &state->stack_mask))) + return false; return true; } @@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, return true; } -#define REGS_SIZE (sizeof(struct pt_regs)) -#define SP_OFFSET (offsetof(struct pt_regs, sp)) -#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip)) -#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip)) - static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, - unsigned long *ip, unsigned long *sp, bool full) + unsigned long *ip, unsigned long *sp) { - size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE; - size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET; - struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE); - - if (IS_ENABLED(CONFIG_X86_64)) { - if (!stack_access_ok(state, addr, regs_size)) - return false; + struct pt_regs *regs = (struct pt_regs *)addr; - *ip = regs->ip; - *sp = regs->sp; + /* x86-32 support will be more complicated due to the ®s->sp hack */ + BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32)); - return true; - } - - if (!stack_access_ok(state, addr, sp_offset)) + if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) return false; *ip = regs->ip; + *sp = regs->sp; + return true; +} - if (user_mode(regs)) { - if (!stack_access_ok(state, addr + sp_offset, - REGS_SIZE - SP_OFFSET)) - return false; +static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr, + unsigned long *ip, unsigned long *sp) +{ + struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; - *sp = regs->sp; - } else - *sp = (unsigned long)®s->sp; + if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) + return false; + *ip = regs->ip; + *sp = regs->sp; return true; } @@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state) unsigned long ip_p, sp, orig_ip, prev_sp = state->sp; enum stack_type prev_type = state->stack_info.type; struct orc_entry *orc; - struct pt_regs *ptregs; bool indirect = false; if (unwind_done(state)) @@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_TYPE_REGS: - if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { + if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { orc_warn("can't dereference registers at %p for ip %pB\n", (void *)sp, (void *)orig_ip); goto done; @@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state) break; case ORC_TYPE_REGS_IRET: - if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { + if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { orc_warn("can't dereference iret registers at %p for ip %pB\n", (void *)sp, (void *)orig_ip); goto done; } - ptregs = container_of((void *)sp, struct pt_regs, ip); - if ((unsigned long)ptregs >= prev_sp && - on_stack(&state->stack_info, ptregs, REGS_SIZE)) { - state->regs = ptregs; - state->full_regs = false; - } else - state->regs = NULL; - + state->regs = (void *)sp - IRET_FRAME_OFFSET; + state->full_regs = false; state->signal = true; break; @@ -553,8 +529,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, } if (get_stack_info((unsigned long *)state->sp, state->task, - &state->stack_info, &state->stack_mask)) - return; + &state->stack_info, &state->stack_mask)) { + /* + * We weren't on a valid stack. It's possible that + * we overflowed a valid stack into a guard page. + * See if the next page up is valid so that we can + * generate some kind of backtrace if this happens. + */ + void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); + if (get_stack_info(next_page, state->task, &state->stack_info, + &state->stack_mask)) + return; + } /* * The caller can provide the address of the first frame directly diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 495c776de4b4..d1ab52727cd5 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -271,12 +271,15 @@ static bool is_prefix_bad(struct insn *insn) int i; for (i = 0; i < insn->prefixes.nbytes; i++) { - switch (insn->prefixes.bytes[i]) { - case 0x26: /* INAT_PFX_ES */ - case 0x2E: /* INAT_PFX_CS */ - case 0x36: /* INAT_PFX_DS */ - case 0x3E: /* INAT_PFX_SS */ - case 0xF0: /* INAT_PFX_LOCK */ + insn_attr_t attr; + + attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + switch (attr) { + case INAT_MAKE_PREFIX(INAT_PFX_ES): + case INAT_MAKE_PREFIX(INAT_PFX_CS): + case INAT_MAKE_PREFIX(INAT_PFX_DS): + case INAT_MAKE_PREFIX(INAT_PFX_SS): + case INAT_MAKE_PREFIX(INAT_PFX_LOCK): return true; } } @@ -290,12 +293,16 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64); /* has the side-effect of processing the entire instruction */ insn_get_length(insn); - if (WARN_ON_ONCE(!insn_complete(insn))) + if (!insn_complete(insn)) return -ENOEXEC; if (is_prefix_bad(insn)) return -ENOTSUPP; + /* We should not singlestep on the exception masking instructions */ + if (insn_masking_exception(insn)) + return -ENOTSUPP; + if (x86_64) good_insns = good_insns_64; else diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index 014ea59aa153..3d3c2f71f617 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -33,7 +33,7 @@ #include #include -verify_cpu: +ENTRY(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf @@ -139,3 +139,4 @@ verify_cpu: popf # Restore caller passed flags xorl %eax, %eax ret +ENDPROC(verify_cpu) diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 68244742ecb0..9d0b5af7db91 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -55,6 +55,7 @@ #include #include #include +#include /* * Known problems: @@ -94,7 +95,6 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) { - struct tss_struct *tss; struct task_struct *tsk = current; struct vm86plus_struct __user *user; struct vm86 *vm86 = current->thread.vm86; @@ -146,12 +146,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) do_exit(SIGSEGV); } - tss = &per_cpu(cpu_tss, get_cpu()); + preempt_disable(); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; - load_sp0(tss, &tsk->thread); + update_sp0(tsk); + refresh_sysenter_cs(&tsk->thread); vm86->saved_sp0 = 0; - put_cpu(); + preempt_enable(); memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs)); @@ -237,7 +238,6 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { - struct tss_struct *tss; struct task_struct *tsk = current; struct vm86 *vm86 = tsk->thread.vm86; struct kernel_vm86_regs vm86regs; @@ -365,15 +365,17 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) vm86->saved_sp0 = tsk->thread.sp0; lazy_save_gs(vm86->regs32.gs); - tss = &per_cpu(cpu_tss, get_cpu()); /* make room for real-mode segments */ + preempt_disable(); tsk->thread.sp0 += 16; - if (static_cpu_has(X86_FEATURE_SEP)) + if (static_cpu_has(X86_FEATURE_SEP)) { tsk->thread.sysenter_cs = 0; + refresh_sysenter_cs(&tsk->thread); + } - load_sp0(tss, &tsk->thread); - put_cpu(); + update_sp0(tsk); + preempt_enable(); if (vm86->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); @@ -725,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) return; check_vip: - if (VEFLAGS & X86_EFLAGS_VIP) { + if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == + (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { save_v86_state(regs, VM86_STI); return; } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index a4009fb9be87..b854ebf5851b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -61,11 +61,17 @@ jiffies_64 = jiffies; . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; +#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); +#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); + #else #define X64_ALIGN_RODATA_BEGIN #define X64_ALIGN_RODATA_END +#define ALIGN_ENTRY_TEXT_BEGIN +#define ALIGN_ENTRY_TEXT_END + #endif PHDRS { @@ -102,11 +108,30 @@ SECTIONS CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT + ALIGN_ENTRY_TEXT_BEGIN ENTRY_TEXT IRQENTRY_TEXT + ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) + +#ifdef CONFIG_X86_64 + . = ALIGN(PAGE_SIZE); + VMLINUX_SYMBOL(__entry_trampoline_start) = .; + _entry_trampoline = .; + *(.entry_trampoline) + . = ALIGN(PAGE_SIZE); + VMLINUX_SYMBOL(__entry_trampoline_end) = .; + ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); +#endif + +#ifdef CONFIG_RETPOLINE + __indirect_thunk_start = .; + *(.text.__x86.indirect_thunk) + __indirect_thunk_end = .; +#endif + /* End of text section */ _etext = .; } :text = 0x9090 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index a088b2c47f73..5b2d10c1973a 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -28,6 +28,8 @@ void x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } int __init iommu_init_noop(void) { return 0; } void iommu_shutdown_noop(void) { } +bool __init bool_x86_init_noop(void) { return false; } +void x86_op_int_noop(int cpu) { } /* * The platform setup functions are preset with the default functions @@ -81,6 +83,12 @@ struct x86_init_ops x86_init __initdata = { .init_irq = x86_default_pci_init_irq, .fixup_irqs = x86_default_pci_fixup_irqs, }, + + .hyper = { + .init_platform = x86_init_noop, + .x2apic_available = bool_x86_init_noop, + .init_mem_mapping = x86_init_noop, + }, }; struct x86_cpuinit_ops x86_cpuinit = { @@ -101,6 +109,7 @@ struct x86_platform_ops x86_platform __ro_after_init = { .get_nmi_reason = default_get_nmi_reason, .save_sched_clock_state = tsc_save_sched_clock_state, .restore_sched_clock_state = tsc_restore_sched_clock_state, + .hyper.pin_vcpu = x86_op_int_noop, }; EXPORT_SYMBOL_GPL(x86_platform); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 0099e10eb045..d1f5c744142b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -67,9 +67,7 @@ u64 kvm_supported_xcr0(void) #define F(x) bit(X86_FEATURE_##x) -/* These are scattered features in cpufeatures.h. */ -#define KVM_CPUID_BIT_AVX512_4VNNIW 2 -#define KVM_CPUID_BIT_AVX512_4FMAPS 3 +/* For scattered features from cpufeatures.h; we currently expose none */ #define KF(x) bit(KVM_CPUID_BIT_##x) int kvm_update_cpuid(struct kvm_vcpu *vcpu) @@ -367,6 +365,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); + /* cpuid 0x80000008.ebx */ + const u32 kvm_cpuid_8000_0008_ebx_x86_features = + F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD); + /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | @@ -392,7 +394,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = - KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS); + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | + F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -477,7 +480,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; - entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); + cpuid_mask(&entry->edx, CPUID_7_EDX); + /* + * We emulate ARCH_CAPABILITIES in software even + * if the host doesn't support it. + */ + entry->edx |= F(ARCH_CAPABILITIES); } else { entry->ebx = 0; entry->ecx = 0; @@ -594,7 +602,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | - (1 << KVM_FEATURE_PV_UNHALT); + (1 << KVM_FEATURE_PV_UNHALT) | + (1 << KVM_FEATURE_ASYNC_PF_VMEXIT); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); @@ -627,7 +636,21 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, if (!g_phys_as) g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); - entry->ebx = entry->edx = 0; + entry->edx = 0; + /* + * IBRS, IBPB and VIRT_SSBD aren't necessarily present in + * hardware cpuid + */ + if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) + entry->ebx |= F(AMD_IBPB); + if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) + entry->ebx |= F(AMD_IBRS); + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) + entry->ebx |= F(VIRT_SSBD); + entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; + cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); + if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) + entry->ebx |= F(VIRT_SSBD); break; } case 0x80000019: diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index cdc70a3a6583..9a327d5b6d1f 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, - [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX}, + [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX}, @@ -54,6 +54,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX}, [CPUID_7_ECX] = { 7, 0, CPUID_ECX}, [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, + [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, }; static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d90cdc77e077..5f758568fc44 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "x86.h" #include "tss.h" @@ -810,6 +811,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) return assign_eip_near(ctxt, ctxt->_eip + rel); } +static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, + void *data, unsigned size) +{ + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); +} + +static int linear_write_system(struct x86_emulate_ctxt *ctxt, + ulong linear, void *data, + unsigned int size) +{ + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); +} + static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, @@ -821,7 +835,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); } static int segmented_write_std(struct x86_emulate_ctxt *ctxt, @@ -835,7 +849,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); } /* @@ -1021,8 +1035,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; - asm("push %[flags]; popf; call *%[fastop]" - : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); + asm("push %[flags]; popf; " CALL_NOSPEC + : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags)); return rc; } @@ -1508,8 +1522,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; - return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, - &ctxt->exception); + return linear_read_system(ctxt, addr, desc, sizeof *desc); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, @@ -1572,8 +1585,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), - &ctxt->exception); + return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); } /* allowed just for 8 bytes segments */ @@ -1587,8 +1599,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, - &ctxt->exception); + return linear_write_system(ctxt, addr, desc, sizeof *desc); } static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, @@ -1749,8 +1760,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, return ret; } } else if (ctxt->mode == X86EMUL_MODE_PROT64) { - ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, - sizeof(base3), &ctxt->exception); + ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); if (ret != X86EMUL_CONTINUE) return ret; if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | @@ -2063,11 +2073,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; - rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); + rc = linear_read_system(ctxt, cs_addr, &cs, 2); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); + rc = linear_read_system(ctxt, eip_addr, &eip, 2); if (rc != X86EMUL_CONTINUE) return rc; @@ -2404,9 +2414,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n) } static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, - u64 cr0, u64 cr4) + u64 cr0, u64 cr3, u64 cr4) { int bad; + u64 pcid; + + /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ + pcid = 0; + if (cr4 & X86_CR4_PCIDE) { + pcid = cr3 & 0xfff; + cr3 &= ~0xfff; + } + + bad = ctxt->ops->set_cr(ctxt, 3, cr3); + if (bad) + return X86EMUL_UNHANDLEABLE; /* * First enable PAE, long mode needs it before CR0.PG = 1 is set. @@ -2425,6 +2447,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, bad = ctxt->ops->set_cr(ctxt, 4, cr4); if (bad) return X86EMUL_UNHANDLEABLE; + if (pcid) { + bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); + if (bad) + return X86EMUL_UNHANDLEABLE; + } + } return X86EMUL_CONTINUE; @@ -2435,11 +2463,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) struct desc_struct desc; struct desc_ptr dt; u16 selector; - u32 val, cr0, cr4; + u32 val, cr0, cr3, cr4; int i; cr0 = GET_SMSTATE(u32, smbase, 0x7ffc); - ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8)); + cr3 = GET_SMSTATE(u32, smbase, 0x7ff8); ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED; ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0); @@ -2481,14 +2509,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8)); - return rsm_enter_protected_mode(ctxt, cr0, cr4); + return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); } static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) { struct desc_struct desc; struct desc_ptr dt; - u64 val, cr0, cr4; + u64 val, cr0, cr3, cr4; u32 base3; u16 selector; int i, r; @@ -2505,7 +2533,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); cr0 = GET_SMSTATE(u64, smbase, 0x7f58); - ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50)); + cr3 = GET_SMSTATE(u64, smbase, 0x7f50); cr4 = GET_SMSTATE(u64, smbase, 0x7f48); ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00)); val = GET_SMSTATE(u64, smbase, 0x7ed0); @@ -2533,7 +2561,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); - r = rsm_enter_protected_mode(ctxt, cr0, cr4); + r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); if (r != X86EMUL_CONTINUE) return r; @@ -2893,12 +2921,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif - r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); + r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; - r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); + r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) @@ -3027,35 +3055,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { - const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); - ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; - ret = ops->write_std(ctxt, new_tss_base, - &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link, - &ctxt->exception); + ret = linear_write_system(ctxt, new_tss_base, + &tss_seg.prev_task_link, + sizeof tss_seg.prev_task_link); if (ret != X86EMUL_CONTINUE) return ret; } @@ -3171,38 +3194,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { - const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ - ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, - ldt_sel_offset - eip_offset, &ctxt->exception); + ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, + ldt_sel_offset - eip_offset); if (ret != X86EMUL_CONTINUE) return ret; - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; - ret = ops->write_std(ctxt, new_tss_base, - &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link, - &ctxt->exception); + ret = linear_write_system(ctxt, new_tss_base, + &tss_seg.prev_task_link, + sizeof tss_seg.prev_task_link); if (ret != X86EMUL_CONTINUE) return ret; } @@ -4005,6 +4024,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) fxstate_size(ctxt)); } +/* + * FXRSTOR might restore XMM registers not provided by the guest. Fill + * in the host registers (via FXSAVE) instead, so they won't be modified. + * (preemption has to stay disabled until FXRSTOR). + * + * Use noinline to keep the stack for other functions called by callers small. + */ +static noinline int fxregs_fixup(struct fxregs_state *fx_state, + const size_t used_size) +{ + struct fxregs_state fx_tmp; + int rc; + + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); + memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, + __fxstate_size(16) - used_size); + + return rc; +} + static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; @@ -4015,19 +4054,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; + size = fxstate_size(ctxt); + rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); + if (rc != X86EMUL_CONTINUE) + return rc; + ctxt->ops->get_fpu(ctxt); - size = fxstate_size(ctxt); if (size < __fxstate_size(16)) { - rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); + rc = fxregs_fixup(&fx_state, size); if (rc != X86EMUL_CONTINUE) goto out; } - rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); - if (rc != X86EMUL_CONTINUE) - goto out; - if (fx_state.mxcsr >> 16) { rc = emulate_gp(ctxt, 0); goto out; @@ -4113,7 +4152,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) maxphyaddr = eax & 0xff; else maxphyaddr = 36; - rsvd = rsvd_bits(maxphyaddr, 62); + rsvd = rsvd_bits(maxphyaddr, 63); + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE) + rsvd &= ~CR3_PCID_INVD; } if (new_val & rsvd) @@ -4991,6 +5032,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; + u16 dummy; + struct desc_struct desc; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; @@ -5009,6 +5052,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: + def_op_bytes = def_ad_bytes = 2; + ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); + if (desc.d) + def_op_bytes = def_ad_bytes = 4; + break; case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; @@ -5305,9 +5353,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; - asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" + asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), - [fastop]"+S"(fop), ASM_CALL_CONSTRAINT + [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index dc97f2544b6f..5d13abecb384 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1223,7 +1223,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) struct kvm_run *run = vcpu->run; kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); - return 1; + return kvm_skip_emulated_instruction(vcpu); } int kvm_hv_hypercall(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index bdff437acbcb..9d270ba9643c 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) index == RTC_GSI) { if (kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode) || - (e->fields.trig_mode == IOAPIC_EDGE_TRIG && - kvm_apic_pending_eoi(vcpu, e->fields.vector))) + kvm_apic_pending_eoi(vcpu, e->fields.vector)) __set_bit(e->fields.vector, ioapic_handled_vectors); } @@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; + int old_remote_irr, old_delivery_status; union kvm_ioapic_redirect_entry *e; switch (ioapic->ioregsel) { @@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) return; e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; + /* Preserve read-only fields */ + old_remote_irr = e->fields.remote_irr; + old_delivery_status = e->fields.delivery_status; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; } else { e->bits &= ~0xffffffffULL; e->bits |= (u32) val; - e->fields.remote_irr = 0; } + e->fields.remote_irr = old_remote_irr; + e->fields.delivery_status = old_delivery_status; + + /* + * Some OSes (Linux, Xen) assume that Remote IRR bit will + * be cleared by IOAPIC hardware when the entry is configured + * as edge-triggered. This behavior is used to simulate an + * explicit EOI on IOAPICs that don't have the EOI register. + */ + if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) + e->fields.remote_irr = 0; + mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 36c90d631096..6d0fbff71d7a 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) recalculate_apic_map(apic->vcpu->kvm); } +static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) +{ + return ((id >> 4) << 16) | (1 << (id & 0xf)); +} + static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) { - u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); + u32 ldr = kvm_apic_calc_x2apic_ldr(id); WARN_ON_ONCE(id != apic->vcpu->vcpu_id); @@ -316,8 +321,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu) if (!lapic_in_kernel(vcpu)) return; + /* + * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation) + * which doesn't have EOI register; Some buggy OSes (e.g. Windows with + * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC + * version first and level-triggered interrupts never get EOIed in + * IOAPIC. + */ feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); - if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) + if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) && + !ioapic_in_kernel(vcpu->kvm)) v |= APIC_LVR_DIRECTED_EOI; kvm_lapic_set_reg(apic, APIC_LVR, v); } @@ -1413,23 +1426,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic) local_irq_restore(flags); } -static void start_sw_period(struct kvm_lapic *apic) -{ - if (!apic->lapic_timer.period) - return; - - if (apic_lvtt_oneshot(apic) && - ktime_after(ktime_get(), - apic->lapic_timer.target_expiration)) { - apic_timer_expired(apic); - return; - } - - hrtimer_start(&apic->lapic_timer.timer, - apic->lapic_timer.target_expiration, - HRTIMER_MODE_ABS_PINNED); -} - static bool set_target_expiration(struct kvm_lapic *apic) { ktime_t now; @@ -1479,11 +1475,43 @@ static bool set_target_expiration(struct kvm_lapic *apic) static void advance_periodic_target_expiration(struct kvm_lapic *apic) { - apic->lapic_timer.tscdeadline += - nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); + ktime_t now = ktime_get(); + u64 tscl = rdtsc(); + ktime_t delta; + + /* + * Synchronize both deadlines to the same time source or + * differences in the periods (caused by differences in the + * underlying clocks or numerical approximation errors) will + * cause the two to drift apart over time as the errors + * accumulate. + */ apic->lapic_timer.target_expiration = ktime_add_ns(apic->lapic_timer.target_expiration, apic->lapic_timer.period); + delta = ktime_sub(apic->lapic_timer.target_expiration, now); + apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + + nsec_to_cycles(apic->vcpu, delta); +} + +static void start_sw_period(struct kvm_lapic *apic) +{ + if (!apic->lapic_timer.period) + return; + + if (ktime_after(ktime_get(), + apic->lapic_timer.target_expiration)) { + apic_timer_expired(apic); + + if (apic_lvtt_oneshot(apic)) + return; + + advance_periodic_target_expiration(apic); + } + + hrtimer_start(&apic->lapic_timer.timer, + apic->lapic_timer.target_expiration, + HRTIMER_MODE_ABS_PINNED); } bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) @@ -1939,14 +1967,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) { - struct kvm_lapic *apic; + struct kvm_lapic *apic = vcpu->arch.apic; int i; - apic_debug("%s\n", __func__); + if (!apic) + return; - ASSERT(vcpu); - apic = vcpu->arch.apic; - ASSERT(apic != NULL); + apic_debug("%s\n", __func__); /* Stop the timer in case it's a reset to an active apic */ hrtimer_cancel(&apic->lapic_timer.timer); @@ -2102,7 +2129,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) */ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ - kvm_lapic_reset(vcpu, false); kvm_iodevice_init(&apic->dev, &apic_mmio_ops); return 0; @@ -2196,6 +2222,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, { if (apic_x2apic_mode(vcpu->arch.apic)) { u32 *id = (u32 *)(s->regs + APIC_ID); + u32 *ldr = (u32 *)(s->regs + APIC_LDR); if (vcpu->kvm->arch.x2apic_format) { if (*id != vcpu->vcpu_id) @@ -2206,6 +2233,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, else *id <<= 24; } + + /* In x2APIC mode, the LDR is fixed and based on the id */ + if (set) + *ldr = kvm_apic_calc_x2apic_ldr(*id); } return 0; @@ -2501,7 +2532,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) pe = xchg(&apic->pending_events, 0); if (test_bit(KVM_APIC_INIT, &pe)) { - kvm_lapic_reset(vcpu, true); kvm_vcpu_reset(vcpu, true); if (kvm_vcpu_is_bsp(apic->vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7a69cf053711..00e2ae033a0f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -150,6 +150,20 @@ module_param(dbg, bool, 0644); /* make pte_list_desc fit well in cache line */ #define PTE_LIST_EXT 3 +/* + * Return values of handle_mmio_page_fault and mmu.page_fault: + * RET_PF_RETRY: let CPU fault again on the address. + * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For handle_mmio_page_fault only: + * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_PF_RETRY = 0, + RET_PF_EMULATE = 1, + RET_PF_INVALID = 2, +}; + struct pte_list_desc { u64 *sptes[PTE_LIST_EXT]; struct pte_list_desc *more; @@ -876,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - page = (void *)__get_free_page(GFP_KERNEL); + page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); if (!page) return -ENOMEM; cache->objects[cache->nobjs++] = page; @@ -2744,8 +2758,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, else pte_access &= ~ACC_WRITE_MASK; + if (!kvm_is_mmio_pfn(pfn)) + spte |= shadow_me_mask; + spte |= (u64)pfn << PAGE_SHIFT; - spte |= shadow_me_mask; if (pte_access & ACC_WRITE_MASK) { @@ -2794,13 +2810,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, return ret; } -static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, - int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, - bool speculative, bool host_writable) +static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, + int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, + bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; - bool emulate = false; + int ret = RET_PF_RETRY; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); @@ -2830,12 +2846,12 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, true, host_writable)) { if (write_fault) - emulate = true; + ret = RET_PF_EMULATE; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } if (unlikely(is_mmio_spte(*sptep))) - emulate = true; + ret = RET_PF_EMULATE; pgprintk("%s: setting spte %llx\n", __func__, *sptep); pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", @@ -2855,7 +2871,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, kvm_release_pfn_clean(pfn); - return emulate; + return ret; } static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, @@ -2994,14 +3010,13 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) * Do not cache the mmio info caused by writing the readonly gfn * into the spte otherwise read access on readonly gfn also can * caused mmio page fault and treat it as mmio access. - * Return 1 to tell kvm to emulate it. */ if (pfn == KVM_PFN_ERR_RO_FAULT) - return 1; + return RET_PF_EMULATE; if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); - return 0; + return RET_PF_RETRY; } return -EFAULT; @@ -3286,13 +3301,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, } if (fast_page_fault(vcpu, v, level, error_code)) - return 0; + return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) return r; @@ -3312,7 +3327,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } @@ -3382,7 +3397,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if(make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, 0, 0, vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); @@ -3397,7 +3412,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL); @@ -3437,7 +3452,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, 0, vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); @@ -3474,7 +3489,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); - return 1; + return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, 0, ACC_ALL); @@ -3659,54 +3674,38 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) return reserved; } -/* - * Return values of handle_mmio_page_fault: - * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction - * directly. - * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page - * fault path update the mmio spte. - * RET_MMIO_PF_RETRY: let CPU fault again on the address. - * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed). - */ -enum { - RET_MMIO_PF_EMULATE = 1, - RET_MMIO_PF_INVALID = 2, - RET_MMIO_PF_RETRY = 0, - RET_MMIO_PF_BUG = -1 -}; - static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) { u64 spte; bool reserved; if (mmio_info_in_cache(vcpu, addr, direct)) - return RET_MMIO_PF_EMULATE; + return RET_PF_EMULATE; reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte); if (WARN_ON(reserved)) - return RET_MMIO_PF_BUG; + return -EINVAL; if (is_mmio_spte(spte)) { gfn_t gfn = get_mmio_spte_gfn(spte); unsigned access = get_mmio_spte_access(spte); if (!check_mmio_spte(vcpu, spte)) - return RET_MMIO_PF_INVALID; + return RET_PF_INVALID; if (direct) addr = 0; trace_handle_mmio_page_fault(addr, gfn, access); vcpu_cache_mmio_info(vcpu, addr, gfn, access); - return RET_MMIO_PF_EMULATE; + return RET_PF_EMULATE; } /* * If the page table is zapped by other cpus, let CPU fault again on * the address. */ - return RET_MMIO_PF_RETRY; + return RET_PF_RETRY; } EXPORT_SYMBOL_GPL(handle_mmio_page_fault); @@ -3756,7 +3755,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return 1; + return RET_PF_EMULATE; r = mmu_topup_memory_caches(vcpu); if (r) @@ -3784,7 +3783,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) { if (unlikely(!lapic_in_kernel(vcpu) || - kvm_event_needs_reinjection(vcpu))) + kvm_event_needs_reinjection(vcpu) || + vcpu->arch.exception.pending)) return false; if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) @@ -3825,6 +3825,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, { int r = 1; + vcpu->arch.l1tf_flush_l1d = true; switch (vcpu->arch.apf.host_apf_reason) { default: trace_kvm_page_fault(fault_address, error_code); @@ -3876,7 +3877,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (page_fault_handle_page_track(vcpu, error_code, gfn)) - return 1; + return RET_PF_EMULATE; r = mmu_topup_memory_caches(vcpu); if (r) @@ -3893,13 +3894,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, } if (fast_page_fault(vcpu, gpa, level, error_code)) - return 0; + return RET_PF_RETRY; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) return r; @@ -3919,7 +3920,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } static void nonpaging_init_context(struct kvm_vcpu *vcpu, @@ -4918,25 +4919,25 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, vcpu->arch.gpa_val = cr2; } + r = RET_PF_INVALID; if (unlikely(error_code & PFERR_RSVD_MASK)) { r = handle_mmio_page_fault(vcpu, cr2, direct); - if (r == RET_MMIO_PF_EMULATE) { + if (r == RET_PF_EMULATE) { emulation_type = 0; goto emulate; } - if (r == RET_MMIO_PF_RETRY) - return 1; - if (r < 0) - return r; - /* Must be RET_MMIO_PF_INVALID. */ } - r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), - false); + if (r == RET_PF_INVALID) { + r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), + false); + WARN_ON(r == RET_PF_INVALID); + } + + if (r == RET_PF_RETRY) + return 1; if (r < 0) return r; - if (!r) - return 1; /* * Before emulating the instruction, check if the error code @@ -5062,7 +5063,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); /* The caller should hold mmu-lock before calling this function. */ -static bool +static __always_inline bool slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) @@ -5092,7 +5093,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, return flush; } -static bool +static __always_inline bool slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, bool lock_flush_tlb) @@ -5103,7 +5104,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5111,7 +5112,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5119,7 +5120,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5476,13 +5477,13 @@ int kvm_mmu_module_init(void) pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), - 0, 0, NULL); + 0, SLAB_ACCOUNT, NULL); if (!pte_list_desc_cache) goto nomem; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), - 0, 0, NULL); + 0, SLAB_ACCOUNT, NULL); if (!mmu_page_header_cache) goto nomem; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index f18d1f8d332b..5abae72266b7 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -593,7 +593,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, struct kvm_mmu_page *sp = NULL; struct kvm_shadow_walk_iterator it; unsigned direct_access, access = gw->pt_access; - int top_level, emulate; + int top_level, ret; direct_access = gw->pte_access; @@ -659,15 +659,15 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, } clear_sp_write_flooding_count(it.sptep); - emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, - it.level, gw->gfn, pfn, prefault, map_writable); + ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, + it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); - return emulate; + return ret; out_gpte_changed: kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } /* @@ -762,12 +762,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (!prefault) inject_page_fault(vcpu, &walker.fault); - return 0; + return RET_PF_RETRY; } if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) { shadow_page_table_clear_flood(vcpu, addr); - return 1; + return RET_PF_EMULATE; } vcpu->arch.write_fault_to_shadow_pgtable = false; @@ -789,7 +789,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, &map_writable)) - return 0; + return RET_PF_RETRY; if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) return r; @@ -834,7 +834,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return RET_PF_RETRY; } static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0e68f0b3cbf7..282bbcbf3b6a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #include "trace.h" @@ -173,6 +175,8 @@ struct vcpu_svm { uint64_t sysenter_eip; uint64_t tsc_aux; + u64 msr_decfg; + u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; @@ -183,6 +187,14 @@ struct vcpu_svm { u64 gs_base; } host; + u64 spec_ctrl; + /* + * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be + * translated into the appropriate L2_CFG bits on the host to + * perform speculative control. + */ + u64 virt_spec_ctrl; + u32 *msrpm; ulong nmi_iret_rip; @@ -248,6 +260,8 @@ static const struct svm_direct_access_msrs { { .index = MSR_CSTAR, .always = true }, { .index = MSR_SYSCALL_MASK, .always = true }, #endif + { .index = MSR_IA32_SPEC_CTRL, .always = false }, + { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -528,6 +542,7 @@ struct svm_cpu_data { struct kvm_ldttss_desc *tss_desc; struct page *save_area; + struct vmcb *current_vmcb; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); @@ -879,6 +894,25 @@ static bool valid_msr_intercept(u32 index) return false; } +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr) +{ + u8 bit_write; + unsigned long tmp; + u32 offset; + u32 *msrpm; + + msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: + to_svm(vcpu)->msrpm; + + offset = svm_msrpm_offset(msr); + bit_write = 2 * (msr & 0x0f) + 1; + tmp = msrpm[offset]; + + BUG_ON(offset == MSR_INVALID); + + return !!test_bit(bit_write, &tmp); +} + static void set_msr_interception(u32 *msrpm, unsigned msr, int read, int write) { @@ -1584,6 +1618,10 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + vcpu->arch.microcode_version = 0x01000065; + svm->spec_ctrl = 0; + svm->virt_spec_ctrl = 0; + if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; @@ -1705,11 +1743,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); + /* + * The vmcb page can be recycled, causing a false negative in + * svm_vcpu_load(). So do a full IBPB now. + */ + indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); int i; if (unlikely(cpu != vcpu->cpu)) { @@ -1738,6 +1782,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + if (sd->current_vmcb != svm->vmcb) { + sd->current_vmcb = svm->vmcb; + indirect_branch_prediction_barrier(); + } avic_vcpu_load(vcpu, cpu); } @@ -2189,6 +2237,8 @@ static int ud_interception(struct vcpu_svm *svm) int er; er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; @@ -3508,6 +3558,22 @@ static int cr8_write_interception(struct vcpu_svm *svm) return 0; } +static int svm_get_msr_feature(struct kvm_msr_entry *msr) +{ + msr->data = 0; + + switch (msr->index) { + case MSR_F10H_DECFG: + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) + msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; + break; + default: + return 1; + } + + return 0; +} + static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3576,8 +3642,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_VM_CR: msr_info->data = svm->nested.vm_cr_msr; break; - case MSR_IA32_UCODE_REV: - msr_info->data = 0x01000065; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) + return 1; + + msr_info->data = svm->spec_ctrl; + break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) + return 1; + + msr_info->data = svm->virt_spec_ctrl; break; case MSR_F15H_IC_CFG: { @@ -3596,6 +3673,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x1E; } break; + case MSR_F10H_DECFG: + msr_info->data = svm->msr_decfg; + break; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3657,9 +3737,69 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; switch (ecx) { + case MSR_IA32_CR_PAT: + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) + return 1; + vcpu->arch.pat = data; + svm->vmcb->save.g_pat = data; + mark_dirty(svm->vmcb, VMCB_NPT); + break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr); break; + case MSR_IA32_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + return 1; + + svm->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_svm_vmrun_msrpm. + * We update the L1 MSR bit as well since it will end up + * touching the MSR anyway now. + */ + set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); + break; + case MSR_IA32_PRED_CMD: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + if (is_guest_mode(vcpu)) + break; + set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); + break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) + return 1; + + if (data & ~SPEC_CTRL_SSBD) + return 1; + + svm->virt_spec_ctrl = data; + break; case MSR_STAR: svm->vmcb->save.star = data; break; @@ -3724,6 +3864,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; + case MSR_F10H_DECFG: { + struct kvm_msr_entry msr_entry; + + msr_entry.index = msr->index; + if (svm_get_msr_feature(&msr_entry)) + return 1; + + /* Check the supported bits */ + if (data & ~msr_entry.data) + return 1; + + /* Don't allow the guest to change a bit, #GP */ + if (!msr->host_initiated && (data ^ msr_entry.data)) + return 1; + + svm->msr_decfg = data; + break; + } case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); @@ -4635,9 +4793,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, } if (!ret && svm) { - trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, - host_irq, e->gsi, - vcpu_info.vector, + trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, + e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); } @@ -4912,6 +5069,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" @@ -4955,6 +5120,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" +#endif + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" + "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" + "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" +#ifdef CONFIG_X86_64 + "xor %%r8, %%r8 \n\t" + "xor %%r9, %%r9 \n\t" + "xor %%r10, %%r10 \n\t" + "xor %%r11, %%r11 \n\t" + "xor %%r12, %%r12 \n\t" + "xor %%r13, %%r13 \n\t" + "xor %%r14, %%r14 \n\t" + "xor %%r15, %%r15 \n\t" #endif "pop %%" _ASM_BP : @@ -4985,6 +5169,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else @@ -4994,6 +5181,26 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif #endif + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); + reload_tss(vcpu); local_irq_disable(); @@ -5095,7 +5302,7 @@ static bool svm_cpu_has_accelerated_tpr(void) return false; } -static bool svm_has_high_real_mode_segbase(void) +static bool svm_has_emulated_msr(int index) { return true; } @@ -5402,7 +5609,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, - .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, + .has_emulated_msr = svm_has_emulated_msr, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, @@ -5418,6 +5625,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .vcpu_unblocking = svm_vcpu_unblocking, .update_bp_intercept = update_bp_intercept, + .get_msr_feature = svm_get_msr_feature, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a6f4f095f8f4..f015ca3997d9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "kvm_cache_regs.h" #include "x86.h" @@ -50,6 +51,8 @@ #include #include #include +#include +#include #include "trace.h" #include "pmu.h" @@ -107,6 +110,14 @@ static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); +#define MSR_TYPE_R 1 +#define MSR_TYPE_W 2 +#define MSR_TYPE_RW 3 + +#define MSR_BITMAP_MODE_X2APIC 1 +#define MSR_BITMAP_MODE_X2APIC_APICV 2 +#define MSR_BITMAP_MODE_LM 4 + #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ @@ -180,8 +191,151 @@ module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); +static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); +static DEFINE_MUTEX(vmx_l1d_flush_mutex); + +/* Storage for pre module init parameter parsing */ +static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO; + +static const struct { + const char *option; + enum vmx_l1d_flush_state cmd; +} vmentry_l1d_param[] = { + {"auto", VMENTER_L1D_FLUSH_AUTO}, + {"never", VMENTER_L1D_FLUSH_NEVER}, + {"cond", VMENTER_L1D_FLUSH_COND}, + {"always", VMENTER_L1D_FLUSH_ALWAYS}, +}; + +#define L1D_CACHE_ORDER 4 +static void *vmx_l1d_flush_pages; + +static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) +{ + struct page *page; + unsigned int i; + + if (!enable_ept) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; + return 0; + } + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) { + u64 msr; + + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; + } + } + + /* If set to auto use the default l1tf mitigation method */ + if (l1tf == VMENTER_L1D_FLUSH_AUTO) { + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + l1tf = VMENTER_L1D_FLUSH_NEVER; + break; + case L1TF_MITIGATION_FLUSH_NOWARN: + case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_FLUSH_NOSMT: + l1tf = VMENTER_L1D_FLUSH_COND; + break; + case L1TF_MITIGATION_FULL: + case L1TF_MITIGATION_FULL_FORCE: + l1tf = VMENTER_L1D_FLUSH_ALWAYS; + break; + } + } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) { + l1tf = VMENTER_L1D_FLUSH_ALWAYS; + } + + if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && + !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { + page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); + if (!page) + return -ENOMEM; + vmx_l1d_flush_pages = page_address(page); + + /* + * Initialize each page with a different pattern in + * order to protect against KSM in the nested + * virtualization case. + */ + for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { + memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, + PAGE_SIZE); + } + } + + l1tf_vmx_mitigation = l1tf; + + if (l1tf != VMENTER_L1D_FLUSH_NEVER) + static_branch_enable(&vmx_l1d_should_flush); + else + static_branch_disable(&vmx_l1d_should_flush); + + if (l1tf == VMENTER_L1D_FLUSH_COND) + static_branch_enable(&vmx_l1d_flush_cond); + else + static_branch_disable(&vmx_l1d_flush_cond); + return 0; +} + +static int vmentry_l1d_flush_parse(const char *s) +{ + unsigned int i; + + if (s) { + for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { + if (sysfs_streq(s, vmentry_l1d_param[i].option)) + return vmentry_l1d_param[i].cmd; + } + } + return -EINVAL; +} + +static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) +{ + int l1tf, ret; + + if (!boot_cpu_has(X86_BUG_L1TF)) + return 0; + + l1tf = vmentry_l1d_flush_parse(s); + if (l1tf < 0) + return l1tf; + + /* + * Has vmx_init() run already? If not then this is the pre init + * parameter parsing. In that case just store the value and let + * vmx_init() do the proper setup after enable_ept has been + * established. + */ + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) { + vmentry_l1d_flush_param = l1tf; + return 0; + } + + mutex_lock(&vmx_l1d_flush_mutex); + ret = vmx_setup_l1d_flush(l1tf); + mutex_unlock(&vmx_l1d_flush_mutex); + return ret; +} + +static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) +{ + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); +} + +static const struct kernel_param_ops vmentry_l1d_flush_ops = { + .set = vmentry_l1d_flush_set, + .get = vmentry_l1d_flush_get, +}; +module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644); + #define NR_AUTOLOAD_MSRS 8 -#define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; @@ -202,6 +356,11 @@ struct loaded_vmcs { bool nmi_known_unmasked; unsigned long vmcs_host_cr3; /* May not match real cr3 */ unsigned long vmcs_host_cr4; /* May not match real cr4 */ + /* Support for vnmi-less CPUs */ + int soft_vnmi_blocked; + ktime_t entry_time; + s64 vnmi_blocked_time; + unsigned long *msr_bitmap; struct list_head loaded_vmcss_on_cpu_link; }; @@ -218,7 +377,7 @@ struct shared_msr_entry { * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. - * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the + * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). @@ -401,13 +560,6 @@ struct __packed vmcs12 { */ #define VMCS12_SIZE 0x1000 -/* Used to remember the last vmcs02 used for some recently used vmcs12s */ -struct vmcs02_list { - struct list_head list; - gpa_t vmptr; - struct loaded_vmcs vmcs02; -}; - /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -432,15 +584,15 @@ struct nested_vmx { */ bool sync_shadow_vmcs; - /* vmcs02_list cache of VMCSs recently used to run L2 guests */ - struct list_head vmcs02_pool; - int vmcs02_num; bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; + + struct loaded_vmcs vmcs02; + /* - * Guest pages referred to in vmcs02 with host-physical pointers, so - * we must keep them pinned while L2 runs. + * Guest pages referred to in the vmcs02 with host-physical + * pointers, so we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; @@ -449,8 +601,6 @@ struct nested_vmx { bool pi_pending; u16 posted_intr_nv; - unsigned long *msr_bitmap; - struct hrtimer preemption_timer; bool preemption_timer_expired; @@ -561,10 +711,16 @@ static inline int pi_test_sn(struct pi_desc *pi_desc) (unsigned long *)&pi_desc->control); } +struct vmx_msrs { + unsigned int nr; + struct vmx_msr_entry val[NR_AUTOLOAD_MSRS]; +}; + struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; + u8 msr_bitmap_mode; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; @@ -576,6 +732,10 @@ struct vcpu_vmx { u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif + + u64 arch_capabilities; + u64 spec_ctrl; + u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; u32 secondary_exec_control; @@ -589,9 +749,8 @@ struct vcpu_vmx { struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { - unsigned nr; - struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; - struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; + struct vmx_msrs guest; + struct vmx_msrs host; } msr_autoload; struct { int loaded; @@ -882,13 +1041,18 @@ static const unsigned short vmcs_field_to_offset_table[] = { static inline short vmcs_field_to_offset(unsigned long field) { - BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); + const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table); + unsigned short offset; - if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || - vmcs_field_to_offset_table[field] == 0) + BUILD_BUG_ON(size > SHRT_MAX); + if (field >= size) return -ENOENT; - return vmcs_field_to_offset_table[field]; + field = array_index_nospec(field, size); + offset = vmcs_field_to_offset_table[field]; + if (offset == 0) + return -ENOENT; + return offset; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) @@ -914,6 +1078,9 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); +static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -933,12 +1100,6 @@ static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); enum { VMX_IO_BITMAP_A, VMX_IO_BITMAP_B, - VMX_MSR_BITMAP_LEGACY, - VMX_MSR_BITMAP_LONGMODE, - VMX_MSR_BITMAP_LEGACY_X2APIC_APICV, - VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV, - VMX_MSR_BITMAP_LEGACY_X2APIC, - VMX_MSR_BITMAP_LONGMODE_X2APIC, VMX_VMREAD_BITMAP, VMX_VMWRITE_BITMAP, VMX_BITMAP_NR @@ -948,12 +1109,6 @@ static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A]) #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B]) -#define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY]) -#define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE]) -#define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV]) -#define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV]) -#define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC]) -#define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC]) #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) @@ -1064,6 +1219,13 @@ static inline bool is_machine_check(u32 intr_info) (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } +/* Undocumented: icebp/int1 */ +static inline bool is_icebp(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); +} + static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; @@ -1286,6 +1448,11 @@ static inline bool cpu_has_vmx_invpcid(void) SECONDARY_EXEC_ENABLE_INVPCID; } +static inline bool cpu_has_virtual_nmis(void) +{ + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; +} + static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -1343,11 +1510,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) (vmcs12->secondary_vm_exec_control & bit); } -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) -{ - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; -} - static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & @@ -1900,6 +2062,52 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) vmcs_write32(EXCEPTION_BITMAP, eb); } +/* + * Check if MSR is intercepted for currently loaded MSR bitmap. + */ +static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + +/* + * Check if MSR is intercepted for L01 MSR bitmap. + */ +static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) +{ + unsigned long *msr_bitmap; + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return true; + + msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; + + if (msr <= 0x1fff) { + return !!test_bit(msr, msr_bitmap + 0x800 / f); + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + return !!test_bit(msr, msr_bitmap + 0xc00 / f); + } + + return true; +} + static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { @@ -1907,9 +2115,20 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, vm_exit_controls_clearbit(vmx, exit); } +static int find_msr(struct vmx_msrs *m, unsigned int msr) +{ + unsigned int i; + + for (i = 0; i < m->nr; ++i) { + if (m->val[i].index == msr) + return i; + } + return -ENOENT; +} + static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { - unsigned i; + int i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { @@ -1930,18 +2149,21 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) } break; } + i = find_msr(&m->guest, msr); + if (i < 0) + goto skip_guest; + --m->guest.nr; + m->guest.val[i] = m->guest.val[m->guest.nr]; + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); - for (i = 0; i < m->nr; ++i) - if (m->guest[i].index == msr) - break; - - if (i == m->nr) +skip_guest: + i = find_msr(&m->host, msr); + if (i < 0) return; - --m->nr; - m->guest[i] = m->guest[m->nr]; - m->host[i] = m->host[m->nr]; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); + + --m->host.nr; + m->host.val[i] = m->host.val[m->host.nr]; + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, @@ -1956,9 +2178,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, - u64 guest_val, u64 host_val) + u64 guest_val, u64 host_val, bool entry_only) { - unsigned i; + int i, j = 0; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { @@ -1993,24 +2215,31 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } - for (i = 0; i < m->nr; ++i) - if (m->guest[i].index == msr) - break; + i = find_msr(&m->guest, msr); + if (!entry_only) + j = find_msr(&m->host, msr); - if (i == NR_AUTOLOAD_MSRS) { + if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; - } else if (i == m->nr) { - ++m->nr; - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } + if (i < 0) { + i = m->guest.nr++; + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); + } + m->guest.val[i].index = msr; + m->guest.val[i].value = guest_val; - m->guest[i].index = msr; - m->guest[i].value = guest_val; - m->host[i].index = msr; - m->host[i].value = host_val; + if (entry_only) + return; + + if (j < 0) { + j = m->host.nr++; + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); + } + m->host.val[j].index = msr; + m->host.val[j].value = host_val; } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) @@ -2054,7 +2283,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, - guest_efer, host_efer); + guest_efer, host_efer, false); return false; } else { guest_efer &= ~ignore_bits; @@ -2278,6 +2507,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); + indirect_branch_prediction_barrier(); } if (!already_loaded) { @@ -2291,7 +2521,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * processors. See 22.2.4. */ vmcs_writel(HOST_TR_BASE, - (unsigned long)this_cpu_ptr(&cpu_tss)); + (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ /* @@ -2522,6 +2752,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) return; } + WARN_ON_ONCE(vmx->emulation_required); + if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); @@ -2554,36 +2786,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) vmx->guest_msrs[from] = tmp; } -static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) -{ - unsigned long *msr_bitmap; - - if (is_guest_mode(vcpu)) - msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap; - else if (cpu_has_secondary_exec_ctrls() && - (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & - SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { - if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv; - else - msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv; - } else { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode_x2apic; - else - msr_bitmap = vmx_msr_bitmap_legacy_x2apic; - } - } else { - if (is_long_mode(vcpu)) - msr_bitmap = vmx_msr_bitmap_longmode; - else - msr_bitmap = vmx_msr_bitmap_legacy; - } - - vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); -} - /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy @@ -2624,7 +2826,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(&vmx->vcpu); + vmx_update_msr_bitmap(&vmx->vcpu); } /* @@ -2841,8 +3043,9 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) * Advertise EPTP switching unconditionally * since we emulate it */ - vmx->nested.nested_vmx_vmfunc_controls = - VMX_VMFUNC_EPTP_SWITCHING; + if (enable_ept) + vmx->nested.nested_vmx_vmfunc_controls = + VMX_VMFUNC_EPTP_SWITCHING; } /* @@ -3232,6 +3435,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, return !(val & ~valid_bits); } +static int vmx_get_msr_feature(struct kvm_msr_entry *msr) +{ + return 1; +} + /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. @@ -3259,6 +3467,19 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + msr_info->data = to_vmx(vcpu)->spec_ctrl; + break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) + return 1; + msr_info->data = to_vmx(vcpu)->arch_capabilities; + break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; @@ -3366,6 +3587,68 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; + case MSR_IA32_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + /* The STIBP bit doesn't fault even if it's not advertised */ + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) + return 1; + + vmx->spec_ctrl = data; + + if (!data) + break; + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. We update the vmcs01 here for L1 as well + * since it will end up touching the MSR anyway now. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_RW); + break; + case MSR_IA32_PRED_CMD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_merge_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. + */ + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, + MSR_TYPE_W); + break; + case MSR_IA32_ARCH_CAPABILITIES: + if (!msr_info->host_initiated) + return 1; + vmx->arch_capabilities = data; + break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) @@ -3414,7 +3697,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.ia32_xss = data; if (vcpu->arch.ia32_xss != host_xss) add_atomic_switch_msr(vmx, MSR_IA32_XSS, - vcpu->arch.ia32_xss, host_xss); + vcpu->arch.ia32_xss, host_xss, false); else clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; @@ -3699,9 +3982,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) &_vmexit_control) < 0) return -EIO; - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | - PIN_BASED_VIRTUAL_NMIS; - opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; + min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; + opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR | + PIN_BASED_VMX_PREEMPTION_TIMER; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; @@ -3808,11 +4091,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) return vmcs; } -static struct vmcs *alloc_vmcs(void) -{ - return alloc_vmcs_cpu(raw_smp_processor_id()); -} - static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); @@ -3828,9 +4106,38 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; + if (loaded_vmcs->msr_bitmap) + free_page((unsigned long)loaded_vmcs->msr_bitmap); WARN_ON(loaded_vmcs->shadow_vmcs != NULL); } +static struct vmcs *alloc_vmcs(void) +{ + return alloc_vmcs_cpu(raw_smp_processor_id()); +} + +static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) +{ + loaded_vmcs->vmcs = alloc_vmcs(); + if (!loaded_vmcs->vmcs) + return -ENOMEM; + + loaded_vmcs->shadow_vmcs = NULL; + loaded_vmcs_init(loaded_vmcs); + + if (cpu_has_vmx_msr_bitmap()) { + loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); + if (!loaded_vmcs->msr_bitmap) + goto out_vmcs; + memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); + } + return 0; + +out_vmcs: + free_loaded_vmcs(loaded_vmcs); + return -ENOMEM; +} + static void free_kvm_area(void) { int cpu; @@ -4903,10 +5210,8 @@ static void free_vpid(int vpid) spin_unlock(&vmx_vpid_lock); } -#define MSR_TYPE_R 1 -#define MSR_TYPE_W 2 -static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, - u32 msr, int type) +static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) { int f = sizeof(unsigned long); @@ -4940,6 +5245,50 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, } } +static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type) +{ + int f = sizeof(unsigned long); + + if (!cpu_has_vmx_msr_bitmap()) + return; + + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals + * have the write-low and read-high bitmap offsets the wrong way round. + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if (msr <= 0x1fff) { + if (type & MSR_TYPE_R) + /* read-low */ + __set_bit(msr, msr_bitmap + 0x000 / f); + + if (type & MSR_TYPE_W) + /* write-low */ + __set_bit(msr, msr_bitmap + 0x800 / f); + + } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { + msr &= 0x1fff; + if (type & MSR_TYPE_R) + /* read-high */ + __set_bit(msr, msr_bitmap + 0x400 / f); + + if (type & MSR_TYPE_W) + /* write-high */ + __set_bit(msr, msr_bitmap + 0xc00 / f); + + } +} + +static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, + u32 msr, int type, bool value) +{ + if (value) + vmx_enable_intercept_for_msr(msr_bitmap, msr, type); + else + vmx_disable_intercept_for_msr(msr_bitmap, msr, type); +} + /* * If a msr is allowed by L0, we should check whether it is allowed by L1. * The corresponding bit will be cleared unless both of L0 and L1 allow it. @@ -4986,30 +5335,70 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, } } -static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) +static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu) { - if (!longmode_only) - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, - msr, MSR_TYPE_R | MSR_TYPE_W); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, - msr, MSR_TYPE_R | MSR_TYPE_W); + u8 mode = 0; + + if (cpu_has_secondary_exec_ctrls() && + (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { + mode |= MSR_BITMAP_MODE_X2APIC; + if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) + mode |= MSR_BITMAP_MODE_X2APIC_APICV; + } + + if (is_long_mode(vcpu)) + mode |= MSR_BITMAP_MODE_LM; + + return mode; } -static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active) +#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) + +static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap, + u8 mode) { - if (apicv_active) { - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv, - msr, type); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv, - msr, type); - } else { - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, - msr, type); - __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, - msr, type); + int msr; + + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0; + msr_bitmap[word + (0x800 / sizeof(long))] = ~0; + } + + if (mode & MSR_BITMAP_MODE_X2APIC) { + /* + * TPR reads and writes can be virtualized even if virtual interrupt + * delivery is not in use. + */ + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW); + if (mode & MSR_BITMAP_MODE_X2APIC_APICV) { + vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W); + vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W); + } } } +static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; + u8 mode = vmx_msr_bitmap_mode(vcpu); + u8 changed = mode ^ vmx->msr_bitmap_mode; + + if (!changed) + return; + + vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW, + !(mode & MSR_BITMAP_MODE_LM)); + + if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) + vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); + + vmx->msr_bitmap_mode = mode; +} + static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) { return enable_apicv; @@ -5114,14 +5503,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { - /* the PIR and ON have been set by L1. */ - kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); + /* the PIR and ON have been set by L1. */ + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) + kvm_vcpu_kick(vcpu); return 0; } return -1; @@ -5255,7 +5645,7 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) } if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); } static u32 vmx_exec_control(struct vcpu_vmx *vmx) @@ -5442,7 +5832,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); + vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ @@ -5498,9 +5888,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); @@ -5520,6 +5910,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ++vmx->nmsrs; } + vmx->arch_capabilities = kvm_get_arch_capabilities(); vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); @@ -5550,7 +5941,9 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u64 cr0; vmx->rmode.vm86_active = 0; + vmx->spec_ctrl = 0; + vcpu->arch.microcode_version = 0x100000000ULL; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); @@ -5592,7 +5985,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } - vmcs_writel(GUEST_RFLAGS, 0x02); + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); @@ -5667,7 +6060,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu) { - if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { + if (!cpu_has_virtual_nmis() || + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } @@ -5707,6 +6101,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!cpu_has_virtual_nmis()) { + /* + * Tracking the NMI-blocked state in software is built upon + * finding the next open IRQ window. This, in turn, depends on + * well-behaving guests: They have to keep IRQs disabled at + * least as long as the NMI handler runs. Otherwise we may + * cause NMI nesting, maybe breaking the guest. But as this is + * highly unlikely, we can live with the residual risk. + */ + vmx->loaded_vmcs->soft_vnmi_blocked = 1; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; @@ -5725,6 +6132,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); bool masked; + if (!cpu_has_virtual_nmis()) + return vmx->loaded_vmcs->soft_vnmi_blocked; if (vmx->loaded_vmcs->nmi_known_unmasked) return false; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; @@ -5735,14 +6144,21 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); - - vmx->loaded_vmcs->nmi_known_unmasked = !masked; - if (masked) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); + + if (!cpu_has_virtual_nmis()) { + if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { + vmx->loaded_vmcs->soft_vnmi_blocked = masked; + vmx->loaded_vmcs->vnmi_blocked_time = 0; + } + } else { + vmx->loaded_vmcs->nmi_known_unmasked = !masked; + if (masked) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + } } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) @@ -5750,6 +6166,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) if (to_vmx(vcpu)->nested.nested_run_pending) return 0; + if (!cpu_has_virtual_nmis() && + to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) + return 0; + return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); @@ -5878,11 +6298,9 @@ static int handle_exception(struct kvm_vcpu *vcpu) return 1; /* already handled by vmx_vcpu_run() */ if (is_invalid_opcode(intr_info)) { - if (is_guest_mode(vcpu)) { - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; - } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); + if (er == EMULATE_USER_EXIT) + return 0; if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; @@ -5931,7 +6349,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; - if (!(dr6 & ~DR6_RESERVED)) /* icebp */ + if (is_icebp(intr_info)) skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); @@ -6478,6 +6896,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); @@ -6519,7 +6938,21 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) if (!is_guest_mode(vcpu) && !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); - return kvm_skip_emulated_instruction(vcpu); + /* + * Doing kvm_skip_emulated_instruction() depends on undefined + * behavior: Intel's manual doesn't mandate + * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG + * occurs and while on real hardware it was observed to be set, + * other hypervisors (namely Hyper-V) don't set it, we end up + * advancing IP with some random value. Disable fast mmio when + * running nested and keep it for real hardware in hope that + * VM_EXIT_INSTRUCTION_LEN will always be set correctly. + */ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return kvm_skip_emulated_instruction(vcpu); + else + return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, + NULL, 0) == EMULATE_DONE; } ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); @@ -6564,7 +6997,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (kvm_test_request(KVM_REQ_EVENT, vcpu)) return 1; - err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); + err = emulate_instruction(vcpu, 0); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; @@ -6572,12 +7005,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) goto out; } - if (err != EMULATE_DONE) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; - return 0; - } + if (err != EMULATE_DONE) + goto emulation_error; + + if (vmx->emulation_required && !vmx->rmode.vm86_active && + vcpu->arch.exception.pending) + goto emulation_error; if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; @@ -6593,6 +7026,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) out: return ret; + +emulation_error: + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; } static int __grow_ple_window(int val) @@ -6699,7 +7138,7 @@ void vmx_enable_tdp(void) static __init int hardware_setup(void) { - int r = -ENOMEM, i, msr; + int r = -ENOMEM, i; rdmsrl_safe(MSR_EFER, &host_efer); @@ -6712,22 +7151,13 @@ static __init int hardware_setup(void) goto out; } - vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); - /* - * Allow direct access to the PC debug port (it is often used for I/O - * delays, but the vmexits simply slow things down). - */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); - clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); - memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); - memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); - if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out; @@ -6790,42 +7220,8 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } - vmx_disable_intercept_for_msr(MSR_FS_BASE, false); - vmx_disable_intercept_for_msr(MSR_GS_BASE, false); - vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); - vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - - memcpy(vmx_msr_bitmap_legacy_x2apic_apicv, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic_apicv, - vmx_msr_bitmap_longmode, PAGE_SIZE); - memcpy(vmx_msr_bitmap_legacy_x2apic, - vmx_msr_bitmap_legacy, PAGE_SIZE); - memcpy(vmx_msr_bitmap_longmode_x2apic, - vmx_msr_bitmap_longmode, PAGE_SIZE); - set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ - for (msr = 0x800; msr <= 0x8ff; msr++) { - if (msr == 0x839 /* TMCCT */) - continue; - vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true); - } - - /* - * TPR reads and writes can be virtualized even if virtual interrupt - * delivery is not in use. - */ - vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true); - vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false); - - /* EOI */ - vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true); - /* SELF-IPI */ - vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true); - if (enable_ept) vmx_enable_tdp(); else @@ -6928,94 +7324,6 @@ static int handle_monitor(struct kvm_vcpu *vcpu) return handle_nop(vcpu); } -/* - * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. - * We could reuse a single VMCS for all the L2 guests, but we also want the - * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this - * allows keeping them loaded on the processor, and in the future will allow - * optimizations where prepare_vmcs02 doesn't need to set all the fields on - * every entry if they never change. - * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE - * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. - * - * The following functions allocate and free a vmcs02 in this pool. - */ - -/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ -static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) -{ - struct vmcs02_list *item; - list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) - if (item->vmptr == vmx->nested.current_vmptr) { - list_move(&item->list, &vmx->nested.vmcs02_pool); - return &item->vmcs02; - } - - if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { - /* Recycle the least recently used VMCS. */ - item = list_last_entry(&vmx->nested.vmcs02_pool, - struct vmcs02_list, list); - item->vmptr = vmx->nested.current_vmptr; - list_move(&item->list, &vmx->nested.vmcs02_pool); - return &item->vmcs02; - } - - /* Create a new VMCS */ - item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); - if (!item) - return NULL; - item->vmcs02.vmcs = alloc_vmcs(); - item->vmcs02.shadow_vmcs = NULL; - if (!item->vmcs02.vmcs) { - kfree(item); - return NULL; - } - loaded_vmcs_init(&item->vmcs02); - item->vmptr = vmx->nested.current_vmptr; - list_add(&(item->list), &(vmx->nested.vmcs02_pool)); - vmx->nested.vmcs02_num++; - return &item->vmcs02; -} - -/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ -static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) -{ - struct vmcs02_list *item; - list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) - if (item->vmptr == vmptr) { - free_loaded_vmcs(&item->vmcs02); - list_del(&item->list); - kfree(item); - vmx->nested.vmcs02_num--; - return; - } -} - -/* - * Free all VMCSs saved for this vcpu, except the one pointed by - * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs - * must be &vmx->vmcs01. - */ -static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) -{ - struct vmcs02_list *item, *n; - - WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); - list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { - /* - * Something will leak if the above WARN triggers. Better than - * a use-after-free. - */ - if (vmx->loaded_vmcs == &item->vmcs02) - continue; - - free_loaded_vmcs(&item->vmcs02); - list_del(&item->list); - kfree(item); - vmx->nested.vmcs02_num--; - } -} - /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified @@ -7183,8 +7491,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, - sizeof(*vmpointer), &e)) { + if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7196,13 +7503,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; + int r; - if (cpu_has_vmx_msr_bitmap()) { - vmx->nested.msr_bitmap = - (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx->nested.msr_bitmap) - goto out_msr_bitmap; - } + r = alloc_loaded_vmcs(&vmx->nested.vmcs02); + if (r < 0) + goto out_vmcs02; vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) @@ -7219,13 +7524,12 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) vmx->vmcs01.shadow_vmcs = shadow_vmcs; } - INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); - vmx->nested.vmcs02_num = 0; - hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; + vmx->nested.vpid02 = allocate_vpid(); + vmx->nested.vmxon = true; return 0; @@ -7233,9 +7537,9 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) kfree(vmx->nested.cached_vmcs12); out_cached_vmcs12: - free_page((unsigned long)vmx->nested.msr_bitmap); + free_loaded_vmcs(&vmx->nested.vmcs02); -out_msr_bitmap: +out_vmcs02: return -ENOMEM; } @@ -7270,6 +7574,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return 1; } + /* CPL=0 must be checked manually. */ + if (vmx_get_cpl(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); @@ -7329,6 +7639,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu) */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { + if (vmx_get_cpl(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + if (!to_vmx(vcpu)->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; @@ -7377,10 +7692,6 @@ static void free_nested(struct vcpu_vmx *vmx) free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; - if (vmx->nested.msr_bitmap) { - free_page((unsigned long)vmx->nested.msr_bitmap); - vmx->nested.msr_bitmap = NULL; - } if (enable_shadow_vmcs) { vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); @@ -7388,7 +7699,7 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->vmcs01.shadow_vmcs = NULL; } kfree(vmx->nested.cached_vmcs12); - /* Unpin physical memory we referred to in current vmcs02 */ + /* Unpin physical memory we referred to in the vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; @@ -7404,7 +7715,7 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.pi_desc = NULL; } - nested_free_all_saved_vmcss(vmx); + free_loaded_vmcs(&vmx->nested.vmcs02); } /* Emulate the VMXOFF instruction */ @@ -7447,8 +7758,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) vmptr + offsetof(struct vmcs12, launch_state), &zero, sizeof(zero)); - nested_free_vmcs02(vmx, vmptr); - nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } @@ -7667,9 +7976,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &gva)) return 1; - /* _system ok, as hardware has verified cpl=0 */ - kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); + /* _system ok, nested_vmx_check_permission has verified cpl=0 */ + kvm_write_guest_virt_system(vcpu, gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); @@ -7705,8 +8014,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &field_value, + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7810,10 +8119,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &vmcs_gva)) return 1; - /* ok to use *_system, as hardware has verified cpl=0 */ - if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, - (void *)&to_vmx(vcpu)->nested.current_vmptr, - sizeof(u64), &e)) { + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ + if (kvm_write_guest_virt_system(vcpu, vmcs_gva, + (void *)&to_vmx(vcpu)->nested.current_vmptr, + sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7860,8 +8169,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, - sizeof(operand), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7925,8 +8233,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, - sizeof(operand), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7978,6 +8285,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + cpu_has_virtual_nmis() && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); @@ -8359,10 +8667,11 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) /* * The host physical addresses of some pages of guest memory - * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU - * may write to these pages via their host physical address while - * L2 is running, bypassing any address-translation-based dirty - * tracking (e.g. EPT write protection). + * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC + * Page). The CPU may write to these pages via their host + * physical address while L2 is running, bypassing any + * address-translation-based dirty tracking (e.g. EPT write + * protection). * * Mark them dirty on every exit from L2 to prevent them from * getting out of sync with dirty tracking. @@ -8822,6 +9131,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) return 0; } + if (unlikely(!cpu_has_virtual_nmis() && + vmx->loaded_vmcs->soft_vnmi_blocked)) { + if (vmx_interrupt_allowed(vcpu)) { + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && + vcpu->arch.nmi_pending) { + /* + * This CPU don't support us in finding the end of an + * NMI-blocked window if the guest runs with IRQs + * disabled. So we pull the trigger after 1 s of + * futile waiting, but inform the user about this. + */ + printk(KERN_WARNING "%s: Breaking out of NMI-blocked " + "state on VCPU %d after 1 s timeout\n", + __func__, vcpu->vcpu_id); + vmx->loaded_vmcs->soft_vnmi_blocked = 0; + } + } + if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); @@ -8833,6 +9161,79 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) } } +/* + * Software based L1D cache flush which is used when microcode providing + * the cache control MSR is not loaded. + * + * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to + * flush it is required to read in 64 KiB because the replacement algorithm + * is not exactly LRU. This could be sized at runtime via topology + * information but as all relevant affected CPUs have 32KiB L1D cache size + * there is no point in doing so. + */ +#define L1D_CACHE_ORDER 4 +static void *vmx_l1d_flush_pages; + +static void vmx_l1d_flush(struct kvm_vcpu *vcpu) +{ + int size = PAGE_SIZE << L1D_CACHE_ORDER; + + /* + * This code is only executed when the the flush mode is 'cond' or + * 'always' + */ + if (static_branch_likely(&vmx_l1d_flush_cond)) { + bool flush_l1d; + + /* + * Clear the per-vcpu flush bit, it gets set again + * either from vcpu_run() or from one of the unsafe + * VMEXIT handlers. + */ + flush_l1d = vcpu->arch.l1tf_flush_l1d; + vcpu->arch.l1tf_flush_l1d = false; + + /* + * Clear the per-cpu flush bit, it gets set again from + * the interrupt handlers. + */ + flush_l1d |= kvm_get_cpu_l1tf_flush_l1d(); + kvm_clear_cpu_l1tf_flush_l1d(); + + if (!flush_l1d) + return; + } + + vcpu->stat.l1d_flush++; + + if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) { + wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + return; + } + + asm volatile( + /* First ensure the pages are in the TLB */ + "xorl %%eax, %%eax\n" + ".Lpopulate_tlb:\n\t" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $4096, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lpopulate_tlb\n\t" + "xorl %%eax, %%eax\n\t" + "cpuid\n\t" + /* Now fill the cache */ + "xorl %%eax, %%eax\n" + ".Lfill_cache:\n" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" + "addl $64, %%eax\n\t" + "cmpl %%eax, %[size]\n\t" + "jne .Lfill_cache\n\t" + "lfence\n" + :: [flush_pages] "r" (vmx_l1d_flush_pages), + [size] "r" (size) + : "eax", "ebx", "ecx", "edx"); +} + static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -8877,7 +9278,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) @@ -9063,14 +9464,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) #endif "pushf\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" - "call *%[entry]\n\t" + CALL_NOSPEC : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif ASM_CALL_CONSTRAINT : - [entry]"r"(entry), + THUNK_TARGET(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); @@ -9078,9 +9479,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) } STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); -static bool vmx_has_high_real_mode_segbase(void) +static bool vmx_has_emulated_msr(int index) { - return enable_unrestricted_guest || emulate_invalid_guest_state; + switch (index) { + case MSR_IA32_SMBASE: + /* + * We cannot do SMM unless we can run the guest in big + * real mode. + */ + return enable_unrestricted_guest || emulate_invalid_guest_state; + case MSR_AMD64_VIRT_SPEC_CTRL: + /* This is AMD only. */ + return false; + default: + return true; + } } static bool vmx_mpx_supported(void) @@ -9104,33 +9517,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; - if (vmx->loaded_vmcs->nmi_known_unmasked) - return; - /* - * Can't use vmx->exit_intr_info since we're not sure what - * the exit reason is. - */ - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); - unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; - vector = exit_intr_info & INTR_INFO_VECTOR_MASK; - /* - * SDM 3: 27.7.1.2 (September 2008) - * Re-set bit "block by NMI" before VM entry if vmexit caused by - * a guest IRET fault. - * SDM 3: 23.2.2 (September 2008) - * Bit 12 is undefined in any of the following cases: - * If the VM exit sets the valid bit in the IDT-vectoring - * information field. - * If the VM exit is due to a double fault. - */ - if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && - vector != DF_VECTOR && !idtv_info_valid) - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, - GUEST_INTR_STATE_NMI); - else - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) - & GUEST_INTR_STATE_NMI); + if (cpu_has_virtual_nmis()) { + if (vmx->loaded_vmcs->nmi_known_unmasked) + return; + /* + * Can't use vmx->exit_intr_info since we're not sure what + * the exit reason is. + */ + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; + /* + * SDM 3: 27.7.1.2 (September 2008) + * Re-set bit "block by NMI" before VM entry if vmexit caused by + * a guest IRET fault. + * SDM 3: 23.2.2 (September 2008) + * Bit 12 is undefined in any of the following cases: + * If the VM exit sets the valid bit in the IDT-vectoring + * information field. + * If the VM exit is due to a double fault. + */ + if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && + vector != DF_VECTOR && !idtv_info_valid) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + else + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) + & GUEST_INTR_STATE_NMI); + } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->vnmi_blocked_time += + ktime_to_ns(ktime_sub(ktime_get(), + vmx->loaded_vmcs->entry_time)); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, @@ -9219,7 +9637,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, - msrs[i].host); + msrs[i].host, false); } static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) @@ -9247,6 +9665,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr3, cr4; + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!cpu_has_virtual_nmis() && + vmx->loaded_vmcs->soft_vnmi_blocked)) + vmx->loaded_vmcs->entry_time = ktime_get(); + /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) @@ -9297,7 +9720,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_arm_hv_timer(vcpu); + /* + * If this vCPU has touched SPEC_CTRL, restore the guest's value if + * it's non-zero. Since vmentry is serialising on affected CPUs, there + * is no need to worry about the conditional branch over the wrmsr + * being speculatively taken. + */ + x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); + vmx->__launched = vmx->loaded_vmcs->launched; + + if (static_branch_unlikely(&vmx_l1d_should_flush)) + vmx_l1d_flush(vcpu); + asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" @@ -9345,6 +9780,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" + "setbe %c[fail](%0)\n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" @@ -9361,12 +9797,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" + "xor %%eax, %%eax \n\t" + "xor %%ebx, %%ebx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" - "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" @@ -9403,6 +9850,29 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* + * We do not use IBRS in the kernel. If this vCPU has used the + * SPEC_CTRL MSR it may have left it on; save the value and + * turn it off. This is much more efficient than blindly adding + * it to the atomic save/restore list. Especially as the former + * (Saving guest MSRs on vmexit) doesn't even exist in KVM. + * + * For non-nested case: + * If the L01 MSR bitmap does not intercept the MSR, then we need to + * save it. + * + * For nested case: + * If the L02 MSR bitmap does not intercept the MSR, then we need to + * save it. + */ + if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) + vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + + x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); + + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); @@ -9514,6 +9984,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + unsigned long *msr_bitmap; int cpu; if (!vmx) @@ -9546,13 +10017,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (!vmx->guest_msrs) goto free_pml; - vmx->loaded_vmcs = &vmx->vmcs01; - vmx->loaded_vmcs->vmcs = alloc_vmcs(); - vmx->loaded_vmcs->shadow_vmcs = NULL; - if (!vmx->loaded_vmcs->vmcs) + err = alloc_loaded_vmcs(&vmx->vmcs01); + if (err < 0) goto free_msrs; - loaded_vmcs_init(vmx->loaded_vmcs); + msr_bitmap = vmx->vmcs01.msr_bitmap; + vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); + vmx->msr_bitmap_mode = 0; + + vmx->loaded_vmcs = &vmx->vmcs01; cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; @@ -9576,10 +10054,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) goto free_vmcs; } - if (nested) { + if (nested) nested_vmx_setup_ctls_msrs(vmx); - vmx->nested.vpid02 = allocate_vpid(); - } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; @@ -9596,7 +10072,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return &vmx->vcpu; free_vmcs: - free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); @@ -9610,6 +10085,37 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(err); } +#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" +#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" + +static int vmx_vm_init(struct kvm *kvm) +{ + if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { + switch (l1tf_mitigation) { + case L1TF_MITIGATION_OFF: + case L1TF_MITIGATION_FLUSH_NOWARN: + /* 'I explicitly don't care' is set */ + break; + case L1TF_MITIGATION_FLUSH: + case L1TF_MITIGATION_FLUSH_NOSMT: + case L1TF_MITIGATION_FULL: + /* + * Warn upon starting the first VM in a potentially + * insecure environment. + */ + if (cpu_smt_control == CPU_SMT_ENABLED) + pr_warn_once(L1TF_MSG_SMT); + if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) + pr_warn_once(L1TF_MSG_L1D); + break; + case L1TF_MITIGATION_FULL_FORCE: + /* Flush is enforced */ + break; + } + } + return 0; +} + static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; @@ -9946,7 +10452,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, if (cpu_has_vmx_msr_bitmap() && nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) - ; + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_USE_MSR_BITMAPS); else vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_USE_MSR_BITMAPS); @@ -10021,10 +10528,25 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, int msr; struct page *page; unsigned long *msr_bitmap_l1; - unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; + unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; + /* + * pred_cmd & spec_ctrl are trying to verify two things: + * + * 1. L0 gave a permission to L1 to actually passthrough the MSR. This + * ensures that we do not accidentally generate an L02 MSR bitmap + * from the L12 MSR bitmap that is too permissive. + * 2. That L1 or L2s have actually used the MSR. This avoids + * unnecessarily merging of the bitmap if the MSR is unused. This + * works properly because we only update the L01 MSR bitmap lazily. + * So even if L0 should pass L1 these MSRs, the L01 bitmap is only + * updated to reflect this when L1 (or its L2s) actually write to + * the MSR. + */ + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); - /* This shortcut is ok because we support only x2APIC MSRs so far. */ - if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) + if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && + !pred_cmd && !spec_ctrl) return false; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); @@ -10057,12 +10579,35 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, MSR_TYPE_W); } } + + if (spec_ctrl) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_SPEC_CTRL, + MSR_TYPE_R | MSR_TYPE_W); + + if (pred_cmd) + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_PRED_CMD, + MSR_TYPE_W); + kunmap(page); kvm_release_page_clean(page); return true; } +static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + !page_address_valid(vcpu, vmcs12->apic_access_addr)) + return -EINVAL; + else + return 0; +} + static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -10510,10 +11055,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * Set the MSR load/store lists to match L0's settings. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before @@ -10598,6 +11143,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); + if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the @@ -10703,6 +11251,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -10814,20 +11365,15 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - struct loaded_vmcs *vmcs02; u32 msr_entry_idx; u32 exit_qual; - vmcs02 = nested_get_current_vmcs02(vmx); - if (!vmcs02) - return -ENOMEM; - enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); - vmx_switch_vmcs(vcpu, vmcs02); + vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_segment_cache_clear(vmx); if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { @@ -10937,7 +11483,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (ret) return ret; - if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) + /* Hide L1D cache contents from the nested guest. */ + vmx->vcpu.arch.l1tf_flush_l1d = true; + + /* + * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken + * by event injection, halt vcpu. + */ + if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; @@ -11031,29 +11585,27 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; - - if (kvm_event_needs_reinjection(vcpu)) - return -EBUSY; + bool block_nested_events = + vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); if (vcpu->arch.exception.pending && nested_vmx_check_exception(vcpu, &exit_qual)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); - vcpu->arch.exception.pending = false; return 0; } if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | @@ -11069,7 +11621,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { - if (vmx->nested.nested_run_pending) + if (block_nested_events) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); return 0; @@ -11256,6 +11808,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_clear_interrupt_queue(vcpu); } +static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 entry_failure_code; + + nested_ept_uninit_mmu_context(vcpu); + + /* + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; +} + /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified @@ -11269,7 +11839,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; - u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; @@ -11296,17 +11865,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vmx_set_cr4(vcpu, vmcs12->host_cr4); - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; + load_vmcs12_mmu_host_state(vcpu, vmcs12); if (enable_vpid) { /* @@ -11325,6 +11884,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) @@ -11388,7 +11949,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vmcs_write64(GUEST_IA32_DEBUGCTL, 0); if (cpu_has_vmx_msr_bitmap()) - vmx_set_msr_bitmap(vcpu); + vmx_update_msr_bitmap(vcpu); if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) @@ -11434,13 +11995,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); - /* if no vmcs02 cache requested, remove the one we used */ - if (VMCS02_POOL_SIZE == 0) - nested_free_vmcs02(vmx, vmx->nested.current_vmptr); - /* Update any VMCS fields that might have changed while L2 ran */ - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (vmx->hv_deadline_tsc == -1) vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, @@ -11530,6 +12087,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, * accordingly. */ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); + + load_vmcs12_mmu_host_state(vcpu, vmcs12); + /* * The emulated instruction was already skipped in * nested_vmx_run, but the updated RIP was never @@ -11907,7 +12467,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; - trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) @@ -11947,7 +12507,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, - .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, + .has_emulated_msr = vmx_has_emulated_msr, + + .vm_init = vmx_vm_init, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, @@ -11958,6 +12520,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .vcpu_put = vmx_vcpu_put, .update_bp_intercept = update_exception_bitmap, + .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, @@ -12065,22 +12628,18 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .setup_mce = vmx_setup_mce, }; -static int __init vmx_init(void) +static void vmx_cleanup_l1d_flush(void) { - int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), - __alignof__(struct vcpu_vmx), THIS_MODULE); - if (r) - return r; - -#ifdef CONFIG_KEXEC_CORE - rcu_assign_pointer(crash_vmclear_loaded_vmcss, - crash_vmclear_local_loaded_vmcss); -#endif - - return 0; + if (vmx_l1d_flush_pages) { + free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); + vmx_l1d_flush_pages = NULL; + } + /* Restore state so sysfs ignores VMX */ + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; } -static void __exit vmx_exit(void) + +static void vmx_exit(void) { #ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); @@ -12088,7 +12647,40 @@ static void __exit vmx_exit(void) #endif kvm_exit(); + + vmx_cleanup_l1d_flush(); } +module_exit(vmx_exit) + +static int __init vmx_init(void) +{ + int r; + r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), + __alignof__(struct vcpu_vmx), THIS_MODULE); + if (r) + return r; + + /* + * Must be called after kvm_init() so enable_ept is properly set + * up. Hand the parameter mitigation value in which was stored in + * the pre module init parser. If no parameter was given, it will + * contain 'auto' which will be turned into the default 'cond' + * mitigation mode. + */ + if (boot_cpu_has(X86_BUG_L1TF)) { + r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); + if (r) { + vmx_exit(); + return r; + } + } + +#ifdef CONFIG_KEXEC_CORE + rcu_assign_pointer(crash_vmclear_loaded_vmcss, + crash_vmclear_local_loaded_vmcss); +#endif + + return 0; +} module_init(vmx_init) -module_exit(vmx_exit) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 03869eb7fcd6..5c2c09f6c1c3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -181,6 +181,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "req_event", VCPU_STAT(req_event) }, + { "l1d_flush", VCPU_STAT(l1d_flush) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, @@ -836,7 +837,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) } if (is_long_mode(vcpu) && - (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62))) + (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))) return 1; else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) @@ -1006,6 +1007,7 @@ static u32 msrs_to_save[] = { #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, + MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES }; static unsigned num_msrs_to_save; @@ -1035,10 +1037,76 @@ static u32 emulated_msrs[] = { MSR_IA32_SMBASE, MSR_PLATFORM_INFO, MSR_MISC_FEATURES_ENABLES, + MSR_AMD64_VIRT_SPEC_CTRL, }; static unsigned num_emulated_msrs; +/* + * List of msr numbers which are used to expose MSR-based features that + * can be used by a hypervisor to validate requested CPU features. + */ +static u32 msr_based_features[] = { + MSR_F10H_DECFG, + MSR_IA32_UCODE_REV, + MSR_IA32_ARCH_CAPABILITIES, +}; + +static unsigned int num_msr_based_features; + +u64 kvm_get_arch_capabilities(void) +{ + u64 data; + + rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data); + + /* + * If we're doing cache flushes (either "always" or "cond") + * we will do one whenever the guest does a vmlaunch/vmresume. + * If an outer hypervisor is doing the cache flush for us + * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that + * capability to the guest too, and if EPT is disabled we're not + * vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will + * require a nested hypervisor to do a flush of its own. + */ + if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER) + data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH; + + return data; +} +EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities); + +static int kvm_get_msr_feature(struct kvm_msr_entry *msr) +{ + switch (msr->index) { + case MSR_IA32_ARCH_CAPABILITIES: + msr->data = kvm_get_arch_capabilities(); + break; + case MSR_IA32_UCODE_REV: + rdmsrl_safe(msr->index, &msr->data); + break; + default: + if (kvm_x86_ops->get_msr_feature(msr)) + return 1; + } + return 0; +} + +static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) +{ + struct kvm_msr_entry msr; + int r; + + msr.index = index; + r = kvm_get_msr_feature(&msr); + if (r) + return r; + + *data = msr.data; + + return 0; +} + bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) @@ -1795,10 +1863,13 @@ u64 get_kvmclock_ns(struct kvm *kvm) /* both __this_cpu_read() and rdtsc() should be on the same cpu */ get_cpu(); - kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, - &hv_clock.tsc_shift, - &hv_clock.tsc_to_system_mul); - ret = __pvclock_read_cycles(&hv_clock, rdtsc()); + if (__this_cpu_read(cpu_tsc_khz)) { + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, + &hv_clock.tsc_shift, + &hv_clock.tsc_to_system_mul); + ret = __pvclock_read_cycles(&hv_clock, rdtsc()); + } else + ret = ktime_get_boot_ns() + ka->kvmclock_offset; put_cpu(); @@ -1830,6 +1901,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) */ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); + if (guest_hv_clock.version & 1) + ++guest_hv_clock.version; /* first time write, random junk */ + vcpu->hv_clock.version = guest_hv_clock.version + 1; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, @@ -2148,7 +2222,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_AMD64_NB_CFG: - case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: @@ -2156,6 +2229,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DC_CFG: break; + case MSR_IA32_UCODE_REV: + if (msr_info->host_initiated) + vcpu->arch.microcode_version = data; + break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: @@ -2442,7 +2519,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0; break; case MSR_IA32_UCODE_REV: - msr_info->data = 0x100000000ULL; + msr_info->data = vcpu->arch.microcode_version; break; case MSR_MTRRcap: case 0x200 ... 0x2ff: @@ -2592,13 +2669,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { - int i, idx; + int i; - idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; - srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } @@ -2697,6 +2772,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SET_BOOT_CPU_ID: case KVM_CAP_SPLIT_IRQCHIP: case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_GET_MSR_FEATURES: r = 1; break; case KVM_CAP_ADJUST_CLOCK: @@ -2714,7 +2790,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * fringe case that is not enabled except via specific settings * of the module parameters. */ - r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); + r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); @@ -2811,6 +2887,31 @@ long kvm_arch_dev_ioctl(struct file *filp, goto out; r = 0; break; + case KVM_GET_MSR_FEATURE_INDEX_LIST: { + struct kvm_msr_list __user *user_msr_list = argp; + struct kvm_msr_list msr_list; + unsigned int n; + + r = -EFAULT; + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) + goto out; + n = msr_list.nmsrs; + msr_list.nmsrs = num_msr_based_features; + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) + goto out; + r = -E2BIG; + if (n < msr_list.nmsrs) + goto out; + r = -EFAULT; + if (copy_to_user(user_msr_list->indices, &msr_based_features, + num_msr_based_features * sizeof(u32))) + goto out; + r = 0; + break; + } + case KVM_GET_MSRS: + r = msr_io(NULL, argp, do_get_msr_feature, 1); + break; } default: r = -EINVAL; @@ -2919,6 +3020,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = rdtsc(); + /* + * If userspace has set any breakpoints or watchpoints, dr6 is restored + * on every vmexit, but if not, we might have a stale dr6 from the + * guest. do_debug expects dr6 to be cleared after it runs, do the same. + */ + set_debugreg(0, 6); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, @@ -3539,12 +3646,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = 0; break; } - case KVM_GET_MSRS: + case KVM_GET_MSRS: { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = msr_io(vcpu, argp, do_get_msr, 1); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; - case KVM_SET_MSRS: + } + case KVM_SET_MSRS: { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = msr_io(vcpu, argp, do_set_msr, 0); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; + } case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; @@ -4212,13 +4325,14 @@ long kvm_arch_vm_ioctl(struct file *filp, mutex_unlock(&kvm->lock); break; case KVM_XEN_HVM_CONFIG: { + struct kvm_xen_hvm_config xhc; r = -EFAULT; - if (copy_from_user(&kvm->arch.xen_hvm_config, argp, - sizeof(struct kvm_xen_hvm_config))) + if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; r = -EINVAL; - if (kvm->arch.xen_hvm_config.flags) + if (xhc.flags) goto out; + memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); r = 0; break; } @@ -4310,20 +4424,27 @@ static void kvm_init_msr_list(void) num_msrs_to_save = j; for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { - switch (emulated_msrs[i]) { - case MSR_IA32_SMBASE: - if (!kvm_x86_ops->cpu_has_high_real_mode_segbase()) - continue; - break; - default: - break; - } + if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i])) + continue; if (j < i) emulated_msrs[j] = emulated_msrs[i]; j++; } num_emulated_msrs = j; + + for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { + struct kvm_msr_entry msr; + + msr.index = msr_based_features[i]; + if (kvm_get_msr_feature(&msr)) + continue; + + if (j < i) + msr_based_features[j] = msr_based_features[i]; + j++; + } + num_msr_based_features = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, @@ -4359,7 +4480,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) addr, n, v)) && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) break; - trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); + trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v); handled += n; addr += n; len -= n; @@ -4483,11 +4604,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; } -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, @@ -4495,12 +4615,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, - gva_t addr, void *val, unsigned int bytes, - struct x86_exception *exception) +static int emulator_read_std(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception, bool system) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); + u32 access = 0; + + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) + access |= PFERR_USER_MASK; + + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, @@ -4512,18 +4637,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; } -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, - gva_t addr, void *val, - unsigned int bytes, - struct x86_exception *exception) +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 access, + struct x86_exception *exception) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, - PFERR_WRITE_MASK, + access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); @@ -4544,6 +4667,30 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, out: return r; } + +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception, + bool system) +{ + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + u32 access = PFERR_WRITE_MASK; + + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) + access |= PFERR_USER_MASK; + + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + access, exception); +} + +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception) +{ + /* kvm_write_guest_virt_system can pull in tons of pages. */ + vcpu->arch.l1tf_flush_l1d = true; + + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + PFERR_WRITE_MASK, exception); +} EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, @@ -4618,7 +4765,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, - vcpu->mmio_fragments[0].gpa, *(u64 *)val); + vcpu->mmio_fragments[0].gpa, val); vcpu->mmio_read_completed = 0; return 1; } @@ -4640,14 +4787,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { - trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { - trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); + trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL); return X86EMUL_IO_NEEDED; } @@ -5278,8 +5425,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, - .read_std = kvm_read_guest_virt_system, - .write_std = kvm_write_guest_virt_system, + .read_std = emulator_read_std, + .write_std = emulator_write_std, .read_phys = kvm_read_guest_phys_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, @@ -5413,7 +5560,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; - r = EMULATE_FAIL; + r = EMULATE_USER_EXIT; } kvm_queue_exception(vcpu, UD_VECTOR); @@ -5669,6 +5816,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; + vcpu->arch.l1tf_flush_l1d = true; + /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. @@ -5685,7 +5834,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, * handle watchpoints yet, those would be handled in * the emulate_ops. */ - if (kvm_vcpu_check_breakpoint(vcpu, &r)) + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_breakpoint(vcpu, &r)) return r; ctxt->interruptibility = 0; @@ -5705,6 +5855,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; + if (ctxt->have_exception && inject_emulated_exception(vcpu)) + return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); @@ -6262,12 +6414,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; - int op_64_bit, r; - - r = kvm_skip_emulated_instruction(vcpu); + int op_64_bit; - if (kvm_hv_hypercall_enabled(vcpu->kvm)) - return kvm_hv_hypercall(vcpu); + if (kvm_hv_hypercall_enabled(vcpu->kvm)) { + if (!kvm_hv_hypercall(vcpu)) + return 0; + goto out; + } nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); @@ -6288,7 +6441,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; - goto out; + goto out_error; } switch (nr) { @@ -6308,12 +6461,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ret = -KVM_ENOSYS; break; } -out: +out_error: if (!op_64_bit) ret = (u32)ret; kvm_register_write(vcpu, VCPU_REGS_RAX, ret); + +out: ++vcpu->stat.hypercalls; - return r; + return kvm_skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); @@ -6740,6 +6895,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) kvm_x86_ops->tlb_flush(vcpu); } +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) +{ + unsigned long apic_address; + + /* + * The physical address of apic access page is stored in the VMCS. + * Update it when it becomes invalid. + */ + apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); + if (start <= apic_address && apic_address < end) + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { struct page *page = NULL; @@ -7094,6 +7263,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); + vcpu->arch.l1tf_flush_l1d = true; for (;;) { if (kvm_vcpu_running(vcpu)) { @@ -7223,12 +7393,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct fpu *fpu = ¤t->thread.fpu; int r; - sigset_t sigsaved; fpu__initialize(fpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + kvm_sigset_activate(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (kvm_run->immediate_exit) { @@ -7271,8 +7439,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) out: post_kvm_run_save(vcpu); - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); + kvm_sigset_deactivate(vcpu); return r; } @@ -7340,7 +7507,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) #endif kvm_rip_write(vcpu, regs->rip); - kvm_set_rflags(vcpu, regs->rflags); + kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); vcpu->arch.exception.pending = false; @@ -7454,11 +7621,35 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, } EXPORT_SYMBOL_GPL(kvm_task_switch); +int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { + /* + * When EFER.LME and CR0.PG are set, the processor is in + * 64-bit mode (though maybe in a 32-bit code segment). + * CR4.PAE and EFER.LMA must be set. + */ + if (!(sregs->cr4 & X86_CR4_PAE) + || !(sregs->efer & EFER_LMA)) + return -EINVAL; + } else { + /* + * Not in 64-bit mode: EFER.LMA is clear and the code + * segment cannot be 64-bit. + */ + if (sregs->efer & EFER_LMA || sregs->cs.l) + return -EINVAL; + } + + return 0; +} + int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct msr_data apic_base_msr; int mmu_reset_needed = 0; + int cpuid_update_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; @@ -7466,6 +7657,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; + if (kvm_valid_sregs(vcpu, sregs)) + return -EINVAL; + apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; if (kvm_set_apic_base(vcpu, &apic_base_msr)) @@ -7493,8 +7687,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; + cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & + (X86_CR4_OSXSAVE | X86_CR4_PKE)); kvm_x86_ops->set_cr4(vcpu, sregs->cr4); - if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) + if (cpuid_update_needed) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); @@ -7769,6 +7965,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { + kvm_lapic_reset(vcpu, init_event); + vcpu->arch.hflags = 0; vcpu->arch.smi_pending = 0; @@ -8073,6 +8271,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { + vcpu->arch.l1tf_flush_l1d = true; kvm_x86_ops->sched_in(vcpu, cpu); } @@ -8197,10 +8396,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) return r; } - if (!size) { - r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); - WARN_ON(r < 0); - } + if (!size) + vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); return 0; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index d0b95b7a90b4..d4b59cf0dc51 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -12,6 +12,7 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { + vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = false; } @@ -212,11 +213,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 get_kvmclock_ns(struct kvm *kvm); -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 457f681ef379..f23934bbaf4e 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -24,8 +24,9 @@ lib-y := delay.o misc.o cmdline.o cpu.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o -lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o +lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o +lib-$(CONFIG_RETPOLINE) += retpoline.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb548b41..46e71a74e612 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -29,7 +29,8 @@ #include #include #include - +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -156,7 +157,7 @@ ENTRY(csum_partial) negl %ebx lea 45f(%ebx,%ebx,2), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx # Handle 2-byte-aligned regions 20: addw (%esi), %ax @@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic) andl $-32,%edx lea 3f(%ebx,%ebx), %ebx testl %esi, %esi - jmp *%ebx + JMP_NOSPEC %ebx 1: addl $64,%esi addl $64,%edi SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c index d6f848d1211d..2dd1fe13a37b 100644 --- a/arch/x86/lib/cpu.c +++ b/arch/x86/lib/cpu.c @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig) { unsigned int fam, model; - fam = x86_family(sig); + fam = x86_family(sig); model = (sig >> 4) & 0xf; diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 553f8fd23cc4..4846eff7e4c8 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops) delay = min_t(u64, MWAITX_MAX_LOOPS, loops); /* - * Use cpu_tss as a cacheline-aligned, seldomly + * Use cpu_tss_rw as a cacheline-aligned, seldomly * accessed per-cpu variable as the monitor target. */ - __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); + __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0); /* * AMD, like Intel, supports the EAX hint and EAX=0xf diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index c97d935a29e8..49b167f73215 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -40,6 +40,8 @@ ENTRY(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 1: movzbl (%_ASM_AX),%edx xor %eax,%eax @@ -54,6 +56,8 @@ ENTRY(__get_user_2) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 2: movzwl -1(%_ASM_AX),%edx xor %eax,%eax @@ -68,6 +72,8 @@ ENTRY(__get_user_4) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 3: movl -3(%_ASM_AX),%edx xor %eax,%eax @@ -83,6 +89,8 @@ ENTRY(__get_user_8) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 4: movq -7(%_ASM_AX),%rdx xor %eax,%eax @@ -94,6 +102,8 @@ ENTRY(__get_user_8) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user_8 + sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ + and %_ASM_DX, %_ASM_AX ASM_STAC 4: movl -7(%_ASM_AX),%edx 5: movl -3(%_ASM_AX),%ecx diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c new file mode 100644 index 000000000000..e52857d64cac --- /dev/null +++ b/arch/x86/lib/insn-eval.c @@ -0,0 +1,1127 @@ +/* + * Utility functions for x86 operand and address decoding + * + * Copyright (C) Intel Corporation 2017 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "insn: " fmt + +enum reg_type { + REG_TYPE_RM = 0, + REG_TYPE_INDEX, + REG_TYPE_BASE, +}; + +/** + * is_string_insn() - Determine if instruction is a string instruction + * @insn: Instruction structure containing the opcode + * + * Return: true if the instruction, determined by the opcode, is any of the + * string instructions as defined in the Intel Software Development manual. + * False otherwise. + */ +static bool is_string_insn(struct insn *insn) +{ + insn_get_opcode(insn); + + /* All string instructions have a 1-byte opcode. */ + if (insn->opcode.nbytes != 1) + return false; + + switch (insn->opcode.bytes[0]) { + case 0x6c ... 0x6f: /* INS, OUTS */ + case 0xa4 ... 0xa7: /* MOVS, CMPS */ + case 0xaa ... 0xaf: /* STOS, LODS, SCAS */ + return true; + default: + return false; + } +} + +/** + * get_overridden_seg_reg() - obtain segment register to use from prefixes + * @insn: Instruction structure with segment override prefixes + * @regs: Structure with register values as seen when entering kernel mode + * @regoff: Operand offset, in pt_regs, used to deterimine segment register + * + * The segment register to which an effective address refers depends on + * a) whether running in long mode (in such a case semgment override prefixes + * are ignored. b) Whether segment override prefixes must be ignored for certain + * registers: always use CS when the register is (R|E)IP; always use ES when + * operand register is (E)DI with a string instruction as defined in the Intel + * documentation. c) If segment overrides prefixes are found in the instruction + * prefixes. d) Use the default segment register associated with the operand + * register. + * + * This function returns the overridden segment register to use, if any, as per + * the conditions described above. Please note that this function + * does not return the value in the segment register (i.e., the segment + * selector). The segment selector needs to be obtained using + * get_segment_selector() and passing the segment register resolved by + * this function. + * + * Return: A constant identifying the segment register to use, among CS, SS, DS, + * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. + * INAT_SEG_REG_DEFAULT is returned if no segment override prefixes were found + * and the default segment register shall be used. -EINVAL in case of error. + */ +static int get_overridden_seg_reg(struct insn *insn, struct pt_regs *regs, + int regoff) +{ + int i; + int sel_overrides = 0; + int seg_register = INAT_SEG_REG_DEFAULT; + + /* + * Segment override prefixes should not be used for (E)IP. Check this + * case first as we might not have (and not needed at all) a + * valid insn structure to evaluate segment override prefixes. + */ + if (regoff == offsetof(struct pt_regs, ip)) { + if (user_64bit_mode(regs)) + return INAT_SEG_REG_IGNORE; + else + return INAT_SEG_REG_DEFAULT; + } + + if (!insn) + return -EINVAL; + + insn_get_prefixes(insn); + + /* Look for any segment override prefixes. */ + for (i = 0; i < insn->prefixes.nbytes; i++) { + insn_attr_t attr; + + attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + switch (attr) { + case INAT_MAKE_PREFIX(INAT_PFX_CS): + seg_register = INAT_SEG_REG_CS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_SS): + seg_register = INAT_SEG_REG_SS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_DS): + seg_register = INAT_SEG_REG_DS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_ES): + seg_register = INAT_SEG_REG_ES; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_FS): + seg_register = INAT_SEG_REG_FS; + sel_overrides++; + break; + case INAT_MAKE_PREFIX(INAT_PFX_GS): + seg_register = INAT_SEG_REG_GS; + sel_overrides++; + break; + /* No default action needed. */ + } + } + + /* + * In long mode, segment override prefixes are ignored, except for + * overrides for FS and GS. + */ + if (user_64bit_mode(regs)) { + if (seg_register != INAT_SEG_REG_FS && + seg_register != INAT_SEG_REG_GS) + return INAT_SEG_REG_IGNORE; + /* More than one segment override prefix leads to undefined behavior. */ + } else if (sel_overrides > 1) { + return -EINVAL; + /* + * Segment override prefixes are always ignored for string instructions + * that involve the use the (E)DI register. + */ + } else if ((regoff == offsetof(struct pt_regs, di)) && + is_string_insn(insn)) { + return INAT_SEG_REG_DEFAULT; + } + + return seg_register; +} + +/** + * resolve_seg_register() - obtain segment register + * @insn: Instruction structure with segment override prefixes + * @regs: Structure with register values as seen when entering kernel mode + * @regoff: Operand offset, in pt_regs, used to deterimine segment register + * + * Determine the segment register associated with the operands and, if + * applicable, prefixes and the instruction pointed by insn. The function first + * checks if the segment register shall be ignored or has been overridden in the + * instruction prefixes. Otherwise, it resolves the segment register to use + * based on the defaults described in the Intel documentation. + * + * The operand register, regoff, is represented as the offset from the base of + * pt_regs. Also, regoff can be -EDOM for cases in which registers are not + * used as operands (e.g., displacement-only memory addressing). + * + * Return: A constant identifying the segment register to use, among CS, SS, DS, + * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode. + * -EINVAL in case of error. + */ +static int resolve_seg_register(struct insn *insn, struct pt_regs *regs, + int regoff) +{ + int seg_reg; + + seg_reg = get_overridden_seg_reg(insn, regs, regoff); + + if (seg_reg < 0) + return seg_reg; + + if (seg_reg == INAT_SEG_REG_IGNORE) + return seg_reg; + + if (seg_reg != INAT_SEG_REG_DEFAULT) + return seg_reg; + + /* + * If we are here, we use the default segment register as described + * in the Intel documentation: + * + DS for all references involving (E)AX, (E)CX, (E)DX, (E)BX, and + * (E)SI. + * + If used in a string instruction, ES for (E)DI. Otherwise, DS. + * + AX, CX and DX are not valid register operands in 16-bit address + * encodings but are valid for 32-bit and 64-bit encodings. + * + -EDOM is reserved to identify for cases in which no register + * is used (i.e., displacement-only addressing). Use DS. + * + SS for (E)SP or (E)BP. + * + CS for (E)IP. + */ + + switch (regoff) { + case offsetof(struct pt_regs, ax): + case offsetof(struct pt_regs, cx): + case offsetof(struct pt_regs, dx): + /* Need insn to verify address size. */ + if (!insn || insn->addr_bytes == 2) + return -EINVAL; + case -EDOM: + case offsetof(struct pt_regs, bx): + case offsetof(struct pt_regs, si): + return INAT_SEG_REG_DS; + case offsetof(struct pt_regs, di): + /* Need insn to see if insn is string instruction. */ + if (!insn) + return -EINVAL; + if (is_string_insn(insn)) + return INAT_SEG_REG_ES; + return INAT_SEG_REG_DS; + case offsetof(struct pt_regs, bp): + case offsetof(struct pt_regs, sp): + return INAT_SEG_REG_SS; + case offsetof(struct pt_regs, ip): + return INAT_SEG_REG_CS; + default: + return -EINVAL; + } +} + +/** + * get_segment_selector() - obtain segment selector + * @regs: Structure with register values as seen when entering kernel mode + * @seg_reg: Segment register to use + * + * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment + * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or + * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained + * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU + * registers. This done for only for completeness as in CONFIG_X86_64 segment + * registers are ignored. + * + * Return: Value of the segment selector, including null when running in + * long mode. -1 on error. + */ +static short get_segment_selector(struct pt_regs *regs, int seg_reg) +{ +#ifdef CONFIG_X86_64 + unsigned short sel; + + switch (seg_reg) { + case INAT_SEG_REG_IGNORE: + return 0; + case INAT_SEG_REG_CS: + return (unsigned short)(regs->cs & 0xffff); + case INAT_SEG_REG_SS: + return (unsigned short)(regs->ss & 0xffff); + case INAT_SEG_REG_DS: + savesegment(ds, sel); + return sel; + case INAT_SEG_REG_ES: + savesegment(es, sel); + return sel; + case INAT_SEG_REG_FS: + savesegment(fs, sel); + return sel; + case INAT_SEG_REG_GS: + savesegment(gs, sel); + return sel; + default: + return -EINVAL; + } +#else /* CONFIG_X86_32 */ + struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs; + + if (v8086_mode(regs)) { + switch (seg_reg) { + case INAT_SEG_REG_CS: + return (unsigned short)(regs->cs & 0xffff); + case INAT_SEG_REG_SS: + return (unsigned short)(regs->ss & 0xffff); + case INAT_SEG_REG_DS: + return vm86regs->ds; + case INAT_SEG_REG_ES: + return vm86regs->es; + case INAT_SEG_REG_FS: + return vm86regs->fs; + case INAT_SEG_REG_GS: + return vm86regs->gs; + case INAT_SEG_REG_IGNORE: + /* fall through */ + default: + return -EINVAL; + } + } + + switch (seg_reg) { + case INAT_SEG_REG_CS: + return (unsigned short)(regs->cs & 0xffff); + case INAT_SEG_REG_SS: + return (unsigned short)(regs->ss & 0xffff); + case INAT_SEG_REG_DS: + return (unsigned short)(regs->ds & 0xffff); + case INAT_SEG_REG_ES: + return (unsigned short)(regs->es & 0xffff); + case INAT_SEG_REG_FS: + return (unsigned short)(regs->fs & 0xffff); + case INAT_SEG_REG_GS: + /* + * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS. + * The macro below takes care of both cases. + */ + return get_user_gs(regs); + case INAT_SEG_REG_IGNORE: + /* fall through */ + default: + return -EINVAL; + } +#endif /* CONFIG_X86_64 */ +} + +static int get_reg_offset(struct insn *insn, struct pt_regs *regs, + enum reg_type type) +{ + int regno = 0; + + static const int regoff[] = { + offsetof(struct pt_regs, ax), + offsetof(struct pt_regs, cx), + offsetof(struct pt_regs, dx), + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, sp), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), +#ifdef CONFIG_X86_64 + offsetof(struct pt_regs, r8), + offsetof(struct pt_regs, r9), + offsetof(struct pt_regs, r10), + offsetof(struct pt_regs, r11), + offsetof(struct pt_regs, r12), + offsetof(struct pt_regs, r13), + offsetof(struct pt_regs, r14), + offsetof(struct pt_regs, r15), +#endif + }; + int nr_registers = ARRAY_SIZE(regoff); + /* + * Don't possibly decode a 32-bit instructions as + * reading a 64-bit-only register. + */ + if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) + nr_registers -= 8; + + switch (type) { + case REG_TYPE_RM: + regno = X86_MODRM_RM(insn->modrm.value); + + /* + * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement + * follows the ModRM byte. + */ + if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) + return -EDOM; + + if (X86_REX_B(insn->rex_prefix.value)) + regno += 8; + break; + + case REG_TYPE_INDEX: + regno = X86_SIB_INDEX(insn->sib.value); + if (X86_REX_X(insn->rex_prefix.value)) + regno += 8; + + /* + * If ModRM.mod != 3 and SIB.index = 4 the scale*index + * portion of the address computation is null. This is + * true only if REX.X is 0. In such a case, the SIB index + * is used in the address computation. + */ + if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) + return -EDOM; + break; + + case REG_TYPE_BASE: + regno = X86_SIB_BASE(insn->sib.value); + /* + * If ModRM.mod is 0 and SIB.base == 5, the base of the + * register-indirect addressing is 0. In this case, a + * 32-bit displacement follows the SIB byte. + */ + if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) + return -EDOM; + + if (X86_REX_B(insn->rex_prefix.value)) + regno += 8; + break; + + default: + pr_err_ratelimited("invalid register type: %d\n", type); + return -EINVAL; + } + + if (regno >= nr_registers) { + WARN_ONCE(1, "decoded an instruction with an invalid register"); + return -EINVAL; + } + return regoff[regno]; +} + +/** + * get_reg_offset_16() - Obtain offset of register indicated by instruction + * @insn: Instruction structure containing ModRM and SIB bytes + * @regs: Structure with register values as seen when entering kernel mode + * @offs1: Offset of the first operand register + * @offs2: Offset of the second opeand register, if applicable + * + * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte + * within insn. This function is to be used with 16-bit address encodings. The + * offs1 and offs2 will be written with the offset of the two registers + * indicated by the instruction. In cases where any of the registers is not + * referenced by the instruction, the value will be set to -EDOM. + * + * Return: 0 on success, -EINVAL on failure. + */ +static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, + int *offs1, int *offs2) +{ + /* + * 16-bit addressing can use one or two registers. Specifics of + * encodings are given in Table 2-1. "16-Bit Addressing Forms with the + * ModR/M Byte" of the Intel Software Development Manual. + */ + static const int regoff1[] = { + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, bx), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), + offsetof(struct pt_regs, bp), + offsetof(struct pt_regs, bx), + }; + + static const int regoff2[] = { + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), + offsetof(struct pt_regs, si), + offsetof(struct pt_regs, di), + -EDOM, + -EDOM, + -EDOM, + -EDOM, + }; + + if (!offs1 || !offs2) + return -EINVAL; + + /* Operand is a register, use the generic function. */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + *offs1 = insn_get_modrm_rm_off(insn, regs); + *offs2 = -EDOM; + return 0; + } + + *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)]; + *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)]; + + /* + * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement- + * only addressing. This means that no registers are involved in + * computing the effective address. Thus, ensure that the first + * register offset is invalild. The second register offset is already + * invalid under the aforementioned conditions. + */ + if ((X86_MODRM_MOD(insn->modrm.value) == 0) && + (X86_MODRM_RM(insn->modrm.value) == 6)) + *offs1 = -EDOM; + + return 0; +} + +/** + * get_desc() - Obtain address of segment descriptor + * @sel: Segment selector + * + * Given a segment selector, obtain a pointer to the segment descriptor. + * Both global and local descriptor tables are supported. + * + * Return: pointer to segment descriptor on success. NULL on error. + */ +static struct desc_struct *get_desc(unsigned short sel) +{ + struct desc_ptr gdt_desc = {0, 0}; + struct desc_struct *desc = NULL; + unsigned long desc_base; + +#ifdef CONFIG_MODIFY_LDT_SYSCALL + if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { + /* Bits [15:3] contain the index of the desired entry. */ + sel >>= 3; + + mutex_lock(¤t->active_mm->context.lock); + /* The size of the LDT refers to the number of entries. */ + if (!current->active_mm->context.ldt || + sel >= current->active_mm->context.ldt->nr_entries) { + mutex_unlock(¤t->active_mm->context.lock); + return NULL; + } + + desc = ¤t->active_mm->context.ldt->entries[sel]; + mutex_unlock(¤t->active_mm->context.lock); + return desc; + } +#endif + native_store_gdt(&gdt_desc); + + /* + * Segment descriptors have a size of 8 bytes. Thus, the index is + * multiplied by 8 to obtain the memory offset of the desired descriptor + * from the base of the GDT. As bits [15:3] of the segment selector + * contain the index, it can be regarded as multiplied by 8 already. + * All that remains is to clear bits [2:0]. + */ + desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); + + if (desc_base > gdt_desc.size) + return NULL; + + desc = (struct desc_struct *)(gdt_desc.address + desc_base); + return desc; +} + +/** + * insn_get_seg_base() - Obtain base address of segment descriptor. + * @regs: Structure with register values as seen when entering kernel mode + * @insn: Instruction structure with selector override prefixes + * @regoff: Operand offset, in pt_regs, of which the selector is needed + * + * Obtain the base address of the segment descriptor as indicated by either + * any segment override prefixes contained in insn or the default segment + * applicable to the register indicated by regoff. regoff is specified as the + * offset in bytes from the base of pt_regs. + * + * Return: In protected mode, base address of the segment. Zero in long mode, + * except when FS or GS are used. In virtual-8086 mode, the segment + * selector shifted 4 positions to the right. -1L in case of + * error. + */ +unsigned long insn_get_seg_base(struct pt_regs *regs, struct insn *insn, + int regoff) +{ + struct desc_struct *desc; + int seg_reg; + short sel; + + seg_reg = resolve_seg_register(insn, regs, regoff); + if (seg_reg < 0) + return -1L; + + sel = get_segment_selector(regs, seg_reg); + if (sel < 0) + return -1L; + + if (v8086_mode(regs)) + /* + * Base is simply the segment selector shifted 4 + * positions to the right. + */ + return (unsigned long)(sel << 4); + + if (user_64bit_mode(regs)) { + /* + * Only FS or GS will have a base address, the rest of + * the segments' bases are forced to 0. + */ + unsigned long base; + + if (seg_reg == INAT_SEG_REG_FS) + rdmsrl(MSR_FS_BASE, base); + else if (seg_reg == INAT_SEG_REG_GS) + /* + * swapgs was called at the kernel entry point. Thus, + * MSR_KERNEL_GS_BASE will have the user-space GS base. + */ + rdmsrl(MSR_KERNEL_GS_BASE, base); + else if (seg_reg != INAT_SEG_REG_IGNORE) + /* We should ignore the rest of segment registers. */ + base = -1L; + else + base = 0; + return base; + } + + /* In protected mode the segment selector cannot be null. */ + if (!sel) + return -1L; + + desc = get_desc(sel); + if (!desc) + return -1L; + + return get_desc_base(desc); +} + +/** + * get_seg_limit() - Obtain the limit of a segment descriptor + * @regs: Structure with register values as seen when entering kernel mode + * @insn: Instruction structure with selector override prefixes + * @regoff: Operand offset, in pt_regs, of which the selector is needed + * + * Obtain the limit of the segment descriptor. The segment selector is obtained + * from the relevant segment register determined by inspecting any segment + * override prefixes or the default segment register associated with regoff. + * regoff is specified as the offset in bytes from the base * of pt_regs. + * + * Return: In protected mode, the limit of the segment descriptor in bytes. + * In long mode and virtual-8086 mode, segment limits are not enforced. Thus, + * limit is returned as -1L to imply a limit-less segment. Zero is returned on + * error. + */ +static unsigned long get_seg_limit(struct pt_regs *regs, struct insn *insn, + int regoff) +{ + struct desc_struct *desc; + unsigned long limit; + int seg_reg; + short sel; + + seg_reg = resolve_seg_register(insn, regs, regoff); + if (seg_reg < 0) + return 0; + + sel = get_segment_selector(regs, seg_reg); + if (sel < 0) + return 0; + + if (user_64bit_mode(regs) || v8086_mode(regs)) + return -1L; + + if (!sel) + return 0; + + desc = get_desc(sel); + if (!desc) + return 0; + + /* + * If the granularity bit is set, the limit is given in multiples + * of 4096. This also means that the 12 least significant bits are + * not tested when checking the segment limits. In practice, + * this means that the segment ends in (limit << 12) + 0xfff. + */ + limit = get_desc_limit(desc); + if (desc->g) + limit = (limit << 12) + 0xfff; + + return limit; +} + +/** + * insn_get_code_seg_defaults() - Obtain code segment default parameters + * @regs: Structure with register values as seen when entering kernel mode + * + * Obtain the default parameters of the code segment: address and operand sizes. + * The code segment is obtained from the selector contained in the CS register + * in regs. In protected mode, the default address is determined by inspecting + * the L and D bits of the segment descriptor. In virtual-8086 mode, the default + * is always two bytes for both address and operand sizes. + * + * Return: A signed 8-bit value containing the default parameters on success and + * -EINVAL on error. + */ +char insn_get_code_seg_defaults(struct pt_regs *regs) +{ + struct desc_struct *desc; + unsigned short sel; + + if (v8086_mode(regs)) + /* Address and operand size are both 16-bit. */ + return INSN_CODE_SEG_PARAMS(2, 2); + + sel = (unsigned short)regs->cs; + + desc = get_desc(sel); + if (!desc) + return -EINVAL; + + /* + * The most significant byte of the Type field of the segment descriptor + * determines whether a segment contains data or code. If this is a data + * segment, return error. + */ + if (!(desc->type & BIT(3))) + return -EINVAL; + + switch ((desc->l << 1) | desc->d) { + case 0: /* + * Legacy mode. CS.L=0, CS.D=0. Address and operand size are + * both 16-bit. + */ + return INSN_CODE_SEG_PARAMS(2, 2); + case 1: /* + * Legacy mode. CS.L=0, CS.D=1. Address and operand size are + * both 32-bit. + */ + return INSN_CODE_SEG_PARAMS(4, 4); + case 2: /* + * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit; + * operand size is 32-bit. + */ + return INSN_CODE_SEG_PARAMS(4, 8); + case 3: /* Invalid setting. CS.L=1, CS.D=1 */ + /* fall through */ + default: + return -EINVAL; + } +} + +/** + * insn_get_modrm_rm_off() - Obtain register in r/m part of ModRM byte + * @insn: Instruction structure containing the ModRM byte + * @regs: Structure with register values as seen when entering kernel mode + * + * Return: The register indicated by the r/m part of the ModRM byte. The + * register is obtained as an offset from the base of pt_regs. In specific + * cases, the returned value can be -EDOM to indicate that the particular value + * of ModRM does not refer to a register and shall be ignored. + */ +int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs) +{ + return get_reg_offset(insn, regs, REG_TYPE_RM); +} + +/** + * get_addr_ref_16() - Obtain the 16-bit address referred by instruction + * @insn: Instruction structure containing ModRM byte and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * This function is to be used with 16-bit address encodings. Obtain the memory + * address referred by the instruction's ModRM and displacement bytes. Also, the + * segment used as base is determined by either any segment override prefixes in + * insn or the default segment of the registers involved in the address + * computation. In protected mode, segment limits are enforced. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs) +{ + unsigned long linear_addr = -1L, seg_base_addr, seg_limit; + short eff_addr, addr1 = 0, addr2 = 0; + int addr_offset1, addr_offset2; + int ret; + + insn_get_modrm(insn); + insn_get_displacement(insn); + + if (insn->addr_bytes != 2) + goto out; + + /* + * If operand is a register, the layout is the same as in + * 32-bit and 64-bit addressing. + */ + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset1 = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset1 < 0) + goto out; + + eff_addr = regs_get_register(regs, addr_offset1); + + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset1); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset1); + } else { + ret = get_reg_offset_16(insn, regs, &addr_offset1, + &addr_offset2); + if (ret < 0) + goto out; + + /* + * Don't fail on invalid offset values. They might be invalid + * because they cannot be used for this particular value of + * the ModRM. Instead, use them in the computation only if + * they contain a valid value. + */ + if (addr_offset1 != -EDOM) + addr1 = 0xffff & regs_get_register(regs, addr_offset1); + if (addr_offset2 != -EDOM) + addr2 = 0xffff & regs_get_register(regs, addr_offset2); + + eff_addr = addr1 + addr2; + + /* + * The first operand register could indicate to use of either SS + * or DS registers to obtain the segment selector. The second + * operand register can only indicate the use of DS. Thus, use + * the first register to obtain the segment selector. + */ + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset1); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset1); + + eff_addr += (insn->displacement.value & 0xffff); + } + + /* + * Before computing the linear address, make sure the effective address + * is within the limits of the segment. In virtual-8086 mode, segment + * limits are not enforced. In such a case, the segment limit is -1L to + * reflect this fact. + */ + if ((unsigned long)(eff_addr & 0xffff) > seg_limit) + goto out; + + linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base_addr; + + /* Limit linear address to 20 bits */ + if (v8086_mode(regs)) + linear_addr &= 0xfffff; + +out: + return (void __user *)linear_addr; +} + +/** + * get_addr_ref_32() - Obtain a 32-bit linear address + * @insn: Instruction struct with ModRM and SIB bytes and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * This function is to be used with 32-bit address encodings to obtain the + * linear memory address referred by the instruction's ModRM, SIB, + * displacement bytes and segment base address, as applicable. If in protected + * mode, segment limits are enforced. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs) +{ + int eff_addr, base, indx, addr_offset, base_offset, indx_offset; + unsigned long linear_addr = -1L, seg_base_addr, seg_limit, tmp; + insn_byte_t sib; + + insn_get_modrm(insn); + insn_get_sib(insn); + sib = insn->sib.value; + + if (insn->addr_bytes != 4) + goto out; + + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset < 0) + goto out; + + tmp = regs_get_register(regs, addr_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + eff_addr = (int)(tmp & 0xffffffff); + + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset); + } else { + if (insn->sib.nbytes) { + /* + * Negative values in the base and index offset means + * an error when decoding the SIB byte. Except -EDOM, + * which means that the registers should not be used + * in the address computation. + */ + base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); + if (base_offset == -EDOM) { + base = 0; + } else if (base_offset < 0) { + goto out; + } else { + tmp = regs_get_register(regs, base_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + base = (int)(tmp & 0xffffffff); + } + + indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); + if (indx_offset == -EDOM) { + indx = 0; + } else if (indx_offset < 0) { + goto out; + } else { + tmp = regs_get_register(regs, indx_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + indx = (int)(tmp & 0xffffffff); + } + + eff_addr = base + indx * (1 << X86_SIB_SCALE(sib)); + + seg_base_addr = insn_get_seg_base(regs, insn, + base_offset); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, base_offset); + } else { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + + /* + * -EDOM means that we must ignore the address_offset. + * In such a case, in 64-bit mode the effective address + * relative to the RIP of the following instruction. + */ + if (addr_offset == -EDOM) { + if (user_64bit_mode(regs)) + eff_addr = (long)regs->ip + insn->length; + else + eff_addr = 0; + } else if (addr_offset < 0) { + goto out; + } else { + tmp = regs_get_register(regs, addr_offset); + /* The 4 most significant bytes must be zero. */ + if (tmp & ~0xffffffffL) + goto out; + + eff_addr = (int)(tmp & 0xffffffff); + } + + seg_base_addr = insn_get_seg_base(regs, insn, + addr_offset); + if (seg_base_addr == -1L) + goto out; + + seg_limit = get_seg_limit(regs, insn, addr_offset); + } + eff_addr += insn->displacement.value; + } + + /* + * In protected mode, before computing the linear address, make sure + * the effective address is within the limits of the segment. + * 32-bit addresses can be used in long and virtual-8086 modes if an + * address override prefix is used. In such cases, segment limits are + * not enforced. When in virtual-8086 mode, the segment limit is -1L + * to reflect this situation. + * + * After computed, the effective address is treated as an unsigned + * quantity. + */ + if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit)) + goto out; + + /* + * Even though 32-bit address encodings are allowed in virtual-8086 + * mode, the address range is still limited to [0x-0xffff]. + */ + if (v8086_mode(regs) && (eff_addr & ~0xffff)) + goto out; + + /* + * Data type long could be 64 bits in size. Ensure that our 32-bit + * effective address is not sign-extended when computing the linear + * address. + */ + linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base_addr; + + /* Limit linear address to 20 bits */ + if (v8086_mode(regs)) + linear_addr &= 0xfffff; + +out: + return (void __user *)linear_addr; +} + +/** + * get_addr_ref_64() - Obtain a 64-bit linear address + * @insn: Instruction struct with ModRM and SIB bytes and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * This function is to be used with 64-bit address encodings to obtain the + * linear memory address referred by the instruction's ModRM, SIB, + * displacement bytes and segment base address, as applicable. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +#ifndef CONFIG_X86_64 +static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) +{ + return (void __user *)-1L; +} +#else +static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs) +{ + unsigned long linear_addr = -1L, seg_base_addr; + int addr_offset, base_offset, indx_offset; + long eff_addr, base, indx; + insn_byte_t sib; + + insn_get_modrm(insn); + insn_get_sib(insn); + sib = insn->sib.value; + + if (insn->addr_bytes != 8) + goto out; + + if (X86_MODRM_MOD(insn->modrm.value) == 3) { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + if (addr_offset < 0) + goto out; + + eff_addr = regs_get_register(regs, addr_offset); + + seg_base_addr = insn_get_seg_base(regs, insn, addr_offset); + if (seg_base_addr == -1L) + goto out; + } else { + if (insn->sib.nbytes) { + /* + * Negative values in the base and index offset means + * an error when decoding the SIB byte. Except -EDOM, + * which means that the registers should not be used + * in the address computation. + */ + base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); + if (base_offset == -EDOM) + base = 0; + else if (base_offset < 0) + goto out; + else + base = regs_get_register(regs, base_offset); + + indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); + + if (indx_offset == -EDOM) + indx = 0; + else if (indx_offset < 0) + goto out; + else + indx = regs_get_register(regs, indx_offset); + + eff_addr = base + indx * (1 << X86_SIB_SCALE(sib)); + + seg_base_addr = insn_get_seg_base(regs, insn, + base_offset); + if (seg_base_addr == -1L) + goto out; + } else { + addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); + /* + * -EDOM means that we must ignore the address_offset. + * In such a case, in 64-bit mode the effective address + * relative to the RIP of the following instruction. + */ + if (addr_offset == -EDOM) { + if (user_64bit_mode(regs)) + eff_addr = (long)regs->ip + insn->length; + else + eff_addr = 0; + } else if (addr_offset < 0) { + goto out; + } else { + eff_addr = regs_get_register(regs, addr_offset); + } + + seg_base_addr = insn_get_seg_base(regs, insn, + addr_offset); + if (seg_base_addr == -1L) + goto out; + } + + eff_addr += insn->displacement.value; + } + + linear_addr = (unsigned long)eff_addr + seg_base_addr; + +out: + return (void __user *)linear_addr; +} +#endif /* CONFIG_X86_64 */ + +/** + * insn_get_addr_ref() - Obtain the linear address referred by instruction + * @insn: Instruction structure containing ModRM byte and displacement + * @regs: Structure with register values as seen when entering kernel mode + * + * Obtain the linear address referred by the instruction's ModRM, SIB and + * displacement bytes, and segment base, as applicable. In protected mode, + * segment limits are enforced. + * + * Return: linear address referenced by instruction and registers on success. + * -1L on error. + */ +void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) +{ + switch (insn->addr_bytes) { + case 2: + return get_addr_ref_16(insn, regs); + case 4: + return get_addr_ref_32(insn, regs); + case 8: + return get_addr_ref_64(insn, regs); + default: + return (void __user *)-1L; + } +} diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S new file mode 100644 index 000000000000..c909961e678a --- /dev/null +++ b/arch/x86/lib/retpoline.S @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include +#include +#include +#include +#include + +.macro THUNK reg + .section .text.__x86.indirect_thunk + +ENTRY(__x86_indirect_thunk_\reg) + CFI_STARTPROC + JMP_NOSPEC %\reg + CFI_ENDPROC +ENDPROC(__x86_indirect_thunk_\reg) +.endm + +/* + * Despite being an assembler file we can't just use .irp here + * because __KSYM_DEPS__ only uses the C preprocessor and would + * only see one instance of "__x86_indirect_thunk_\reg" rather + * than one per register with the correct names. So we do it + * the simple and nasty way... + */ +#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym) +#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg) +#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) + +GENERATE_THUNK(_ASM_AX) +GENERATE_THUNK(_ASM_BX) +GENERATE_THUNK(_ASM_CX) +GENERATE_THUNK(_ASM_DX) +GENERATE_THUNK(_ASM_SI) +GENERATE_THUNK(_ASM_DI) +GENERATE_THUNK(_ASM_BP) +#ifdef CONFIG_64BIT +GENERATE_THUNK(r8) +GENERATE_THUNK(r9) +GENERATE_THUNK(r10) +GENERATE_THUNK(r11) +GENERATE_THUNK(r12) +GENERATE_THUNK(r13) +GENERATE_THUNK(r14) +GENERATE_THUNK(r15) +#endif diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1b377f734e64..7add8ba06887 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -331,12 +331,12 @@ do { \ unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { - stac(); + __uaccess_begin_nospec(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); - clac(); + __uaccess_end(); return n; } EXPORT_SYMBOL(__copy_user_ll); @@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { - stac(); + __uaccess_begin_nospec(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n = __copy_user_intel_nocache(to, from, n); @@ -353,7 +353,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr #else __copy_user(to, from, n); #endif - clac(); + __uaccess_end(); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 12e377184ee4..e0b85930dd77 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) -ff: +ff: UD0 EndTable Table: 3-byte opcode 1 (0x0f 0x38) @@ -717,7 +717,7 @@ AVXcode: 2 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 80: INVEPT Gy,Mdq (66) -81: INVPID Gy,Mdq (66) +81: INVVPID Gy,Mdq (66) 82: INVPCID Gy,Mdq (66) 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) 88: vexpandps/d Vpd,Wpd (66),(ev) @@ -896,7 +896,7 @@ EndTable GrpTable: Grp3_1 0: TEST Eb,Ib -1: +1: TEST Eb,Ib 2: NOT Eb 3: NEG Eb 4: MUL AL,Eb @@ -970,6 +970,15 @@ GrpTable: Grp9 EndTable GrpTable: Grp10 +# all are UD1 +0: UD1 +1: UD1 +2: UD1 +3: UD1 +4: UD1 +5: UD1 +6: UD1 +7: UD1 EndTable # Grp11A and Grp11B are expressed as Grp11 in Intel SDM diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 7ba7f3d7f477..27e9e90a8d35 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) @@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o -obj-$(CONFIG_KMEMCHECK) += kmemcheck/ - KASAN_SANITIZE_kasan_init_$(BITS).o := n obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o @@ -43,9 +41,10 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o -obj-$(CONFIG_X86_INTEL_MPX) += mpx.o -obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o -obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_X86_INTEL_MPX) += mpx.o +obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o +obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c new file mode 100644 index 000000000000..476d810639a8 --- /dev/null +++ b/arch/x86/mm/cpu_entry_area.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); + +#ifdef CONFIG_X86_64 +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); +#endif + +struct cpu_entry_area *get_cpu_entry_area(int cpu) +{ + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return (struct cpu_entry_area *) va; +} +EXPORT_SYMBOL(get_cpu_entry_area); + +void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) +{ + unsigned long va = (unsigned long) cea_vaddr; + + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); +} + +static void __init +cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) +{ + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); +} + +static void percpu_setup_debug_store(int cpu) +{ +#ifdef CONFIG_CPU_SUP_INTEL + int npages; + void *cea; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + cea = &get_cpu_entry_area(cpu)->cpu_debug_store; + npages = sizeof(struct debug_store) / PAGE_SIZE; + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, + PAGE_KERNEL); + + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; + /* + * Force the population of PMDs for not yet allocated per cpu + * memory like debug store buffers. + */ + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; + for (; npages; npages--, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); +#endif +} + +/* Setup the fixmap mappings only once per-processor */ +static void __init setup_cpu_entry_area(int cpu) +{ +#ifdef CONFIG_X86_64 + extern char _entry_trampoline[]; + + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ + pgprot_t gdt_prot = PAGE_KERNEL_RO; + pgprot_t tss_prot = PAGE_KERNEL_RO; +#else + /* + * On native 32-bit systems, the GDT cannot be read-only because + * our double fault handler uses a task gate, and entering through + * a task gate needs to change an available TSS to busy. If the + * GDT is read-only, that will triple fault. The TSS cannot be + * read-only because the CPU writes to it on task switches. + * + * On Xen PV, the GDT must be read-only because the hypervisor + * requires it. + */ + pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? + PAGE_KERNEL_RO : PAGE_KERNEL; + pgprot_t tss_prot = PAGE_KERNEL; +#endif + + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), + gdt_prot); + + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, + per_cpu_ptr(&entry_stack_storage, cpu), 1, + PAGE_KERNEL); + + /* + * The Intel SDM says (Volume 3, 7.2.1): + * + * Avoid placing a page boundary in the part of the TSS that the + * processor reads during a task switch (the first 104 bytes). The + * processor may not correctly perform address translations if a + * boundary occurs in this area. During a task switch, the processor + * reads and writes into the first 104 bytes of each TSS (using + * contiguous physical addresses beginning with the physical address + * of the first byte of the TSS). So, after TSS access begins, if + * part of the 104 bytes is not physically contiguous, the processor + * will access incorrect information without generating a page-fault + * exception. + * + * There are also a lot of errata involving the TSS spanning a page + * boundary. Assert that we're not doing that. + */ + BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ + offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); + BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, + &per_cpu(cpu_tss_rw, cpu), + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); + +#ifdef CONFIG_X86_32 + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); +#endif + +#ifdef CONFIG_X86_64 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + BUILD_BUG_ON(sizeof(exception_stacks) != + sizeof(((struct cpu_entry_area *)0)->exception_stacks)); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); + + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, + __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); +#endif + percpu_setup_debug_store(cpu); +} + +static __init void setup_cpu_entry_area_ptes(void) +{ +#ifdef CONFIG_X86_32 + unsigned long start, end; + + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); + + start = CPU_ENTRY_AREA_BASE; + end = start + CPU_ENTRY_AREA_MAP_SIZE; + + /* Careful here: start + PMD_SIZE might wrap around */ + for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) + populate_extra_pte(start); +#endif +} + +void __init setup_cpu_entry_areas(void) +{ + unsigned int cpu; + + setup_cpu_entry_area_ptes(); + + for_each_possible_cpu(cpu) + setup_cpu_entry_area(cpu); + + /* + * This is the last essential update to swapper_pgdir which needs + * to be synchronized to initial_page_table on 32bit. + */ + sync_initial_page_table(); +} diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index bfcffdf6c577..421f2664ffa0 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -5,7 +5,7 @@ static int ptdump_show(struct seq_file *m, void *v) { - ptdump_walk_pgd_level(m, NULL); + ptdump_walk_pgd_level_debugfs(m, NULL, false); return 0; } @@ -22,21 +22,89 @@ static const struct file_operations ptdump_fops = { .release = single_release, }; -static struct dentry *pe; +static int ptdump_show_curknl(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curknl(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curknl, NULL); +} + +static const struct file_operations ptdump_curknl_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curknl, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +static struct dentry *pe_curusr; + +static int ptdump_show_curusr(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curusr(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curusr, NULL); +} + +static const struct file_operations ptdump_curusr_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curusr, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static struct dentry *dir, *pe_knl, *pe_curknl; static int __init pt_dump_debug_init(void) { - pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL, - &ptdump_fops); - if (!pe) + dir = debugfs_create_dir("page_tables", NULL); + if (!dir) return -ENOMEM; + pe_knl = debugfs_create_file("kernel", 0400, dir, NULL, + &ptdump_fops); + if (!pe_knl) + goto err; + + pe_curknl = debugfs_create_file("current_kernel", 0400, + dir, NULL, &ptdump_curknl_fops); + if (!pe_curknl) + goto err; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pe_curusr = debugfs_create_file("current_user", 0400, + dir, NULL, &ptdump_curusr_fops); + if (!pe_curusr) + goto err; +#endif return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; } static void __exit pt_dump_debug_exit(void) { - debugfs_remove_recursive(pe); + debugfs_remove_recursive(dir); } module_init(pt_dump_debug_init); diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 5e3ac6fe6c9e..2a4849e92831 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -44,68 +44,97 @@ struct addr_marker { unsigned long max_lines; }; -/* indices for address_markers; keep sync'd w/ address_markers below */ +/* Address space markers hints */ + +#ifdef CONFIG_X86_64 + enum address_markers_idx { USER_SPACE_NR = 0, -#ifdef CONFIG_X86_64 KERNEL_SPACE_NR, LOW_KERNEL_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif -# ifdef CONFIG_X86_ESPFIX64 + CPU_ENTRY_AREA_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif +#ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, -# endif +#endif +#ifdef CONFIG_EFI + EFI_END_NR, +#endif HIGH_KERNEL_NR, MODULES_VADDR_NR, MODULES_END_NR, -#else + FIXADDR_START_NR, + END_OF_SPACE_NR, +}; + +static struct addr_marker address_markers[] = { + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, + [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, +#ifdef CONFIG_KASAN + [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, + [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, +#endif +#ifdef CONFIG_MODIFY_LDT_SYSCALL + [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" }, +#endif + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, +#ifdef CONFIG_X86_ESPFIX64 + [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, +#endif +#ifdef CONFIG_EFI + [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, +#endif + [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, + [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, + [MODULES_END_NR] = { MODULES_END, "End Modules" }, + [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, + [END_OF_SPACE_NR] = { -1, NULL } +}; + +#else /* CONFIG_X86_64 */ + +enum address_markers_idx { + USER_SPACE_NR = 0, KERNEL_SPACE_NR, VMALLOC_START_NR, VMALLOC_END_NR, -# ifdef CONFIG_HIGHMEM +#ifdef CONFIG_HIGHMEM PKMAP_BASE_NR, -# endif - FIXADDR_START_NR, #endif + CPU_ENTRY_AREA_NR, + FIXADDR_START_NR, + END_OF_SPACE_NR, }; -/* Address space markers hints */ static struct addr_marker address_markers[] = { - { 0, "User Space" }, -#ifdef CONFIG_X86_64 - { 0x8000000000000000UL, "Kernel Space" }, - { 0/* PAGE_OFFSET */, "Low Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/* VMEMMAP_START */, "Vmemmap" }, -#ifdef CONFIG_KASAN - { KASAN_SHADOW_START, "KASAN shadow" }, - { KASAN_SHADOW_END, "KASAN shadow end" }, + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, +#ifdef CONFIG_HIGHMEM + [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, #endif -# ifdef CONFIG_X86_ESPFIX64 - { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, -# endif -# ifdef CONFIG_EFI - { EFI_VA_END, "EFI Runtime Services" }, -# endif - { __START_KERNEL_map, "High Kernel Mapping" }, - { MODULES_VADDR, "Modules" }, - { MODULES_END, "End Modules" }, -#else - { PAGE_OFFSET, "Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/*VMALLOC_END*/, "vmalloc() End" }, -# ifdef CONFIG_HIGHMEM - { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, -# endif - { 0/*FIXADDR_START*/, "Fixmap Area" }, -#endif - { -1, NULL } /* End of list */ + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, + [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, + [END_OF_SPACE_NR] = { -1, NULL } }; +#endif /* !CONFIG_X86_64 */ + /* Multipliers for offsets within the PTEs */ #define PTE_LEVEL_MULT (PAGE_SIZE) #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) @@ -140,7 +169,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) static const char * const level_name[] = { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; - if (!pgprot_val(prot)) { + if (!(pr & _PAGE_PRESENT)) { /* Not present */ pt_dump_cont_printf(m, dmsg, " "); } else { @@ -447,7 +476,7 @@ static inline bool is_hypervisor_range(int idx) } static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, - bool checkwx) + bool checkwx, bool dmesg) { #ifdef CONFIG_X86_64 pgd_t *start = (pgd_t *) &init_top_pgt; @@ -460,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, if (pgd) { start = pgd; - st.to_dmesg = true; + st.to_dmesg = dmesg; } st.check_wx = checkwx; @@ -498,13 +527,37 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) { - ptdump_walk_pgd_level_core(m, pgd, false); + ptdump_walk_pgd_level_core(m, pgd, false, true); +} + +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (user && static_cpu_has(X86_FEATURE_PTI)) + pgd = kernel_to_user_pgdp(pgd); +#endif + ptdump_walk_pgd_level_core(m, pgd, false, false); +} +EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); + +static void ptdump_walk_user_pgd_level_checkwx(void) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pgd_t *pgd = (pgd_t *) &init_top_pgt; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("x86/mm: Checking user space page tables\n"); + pgd = kernel_to_user_pgdp(pgd); + ptdump_walk_pgd_level_core(NULL, pgd, true, false); +#endif } -EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level); void ptdump_walk_pgd_level_checkwx(void) { - ptdump_walk_pgd_level_core(NULL, NULL, true); + ptdump_walk_pgd_level_core(NULL, NULL, true, false); + ptdump_walk_user_pgd_level_checkwx(); } static int __init pt_dump_init(void) @@ -525,8 +578,8 @@ static int __init pt_dump_init(void) address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; # endif address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; #endif - return 0; } __initcall(pt_dump_init); diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c3521e2be396..9fe656c42aa5 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -67,17 +68,22 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, * wrapped around) will be set. Additionally, seeing the refcount * reach 0 will set ZF (Zero Flag: result was zero). In each of * these cases we want a report, since it's a boundary condition. - * + * The SF case is not reported since it indicates post-boundary + * manipulations below zero or above INT_MAX. And if none of the + * flags are set, something has gone very wrong, so report it. */ if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) { bool zero = regs->flags & X86_EFLAGS_ZF; refcount_error_report(regs, zero ? "hit zero" : "overflow"); + } else if ((regs->flags & X86_EFLAGS_SF) == 0) { + /* Report if none of OF, ZF, nor SF are set. */ + refcount_error_report(regs, "unexpected saturation"); } return true; } -EXPORT_SYMBOL_GPL(ex_handler_refcount); +EXPORT_SYMBOL(ex_handler_refcount); /* * Handler for when we fail to restore a task's FPU state. We should never get @@ -207,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) * Old CPUs leave the high bits of CS on the stack * undefined. I'm not sure which CPUs do this, but at least * the 486 DX works this way. + * Xen pv domains are not using the default __KERNEL_CS. */ - if (regs->cs != __KERNEL_CS) + if (!xen_pv_domain() && regs->cs != __KERNEL_CS) goto fail; /* diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b0ff378650a9..c2faff548f59 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,35 +20,15 @@ #include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ -#include /* kmemcheck_*(), ... */ #include /* VSYSCALL_ADDR */ #include /* emulate_vsyscall */ #include /* struct vm86 */ #include /* vma_pkey() */ +#include #define CREATE_TRACE_POINTS #include -/* - * Page fault error code bits: - * - * bit 0 == 0: no page found 1: protection fault - * bit 1 == 0: read access 1: write access - * bit 2 == 0: kernel-mode access 1: user-mode access - * bit 3 == 1: use of reserved bit detected - * bit 4 == 1: fault was an instruction fetch - * bit 5 == 1: protection keys block access - */ -enum x86_pf_error_code { - - PF_PROT = 1 << 0, - PF_WRITE = 1 << 1, - PF_USER = 1 << 2, - PF_RSVD = 1 << 3, - PF_INSTR = 1 << 4, - PF_PK = 1 << 5, -}; - /* * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: @@ -150,7 +130,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * If it was a exec (instruction fetch) fault on NX page, then * do not ignore the fault: */ - if (error_code & PF_INSTR) + if (error_code & X86_PF_INSTR) return 0; instr = (void *)convert_ip_to_linear(current, regs); @@ -180,7 +160,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * siginfo so userspace can discover which protection key was set * on the PTE. * - * If we get here, we know that the hardware signaled a PF_PK + * If we get here, we know that the hardware signaled a X86_PF_PK * fault and that there was a VMA once we got in the fault * handler. It does *not* guarantee that the VMA we find here * was the one that we faulted on. @@ -193,19 +173,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really * faulted on a pte with its pkey=4. */ -static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) +static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info, + u32 *pkey) { /* This is effectively an #ifdef */ if (!boot_cpu_has(X86_FEATURE_OSPKE)) return; /* Fault not from Protection Keys: nothing to do */ - if (si_code != SEGV_PKUERR) + if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV)) return; /* * force_sig_info_fault() is called from a number of * contexts, some of which have a VMA and some of which - * do not. The PF_PK handing happens after we have a + * do not. The X86_PF_PK handing happens after we have a * valid VMA, so we should never reach this without a * valid VMA. */ @@ -239,7 +220,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address, lsb = PAGE_SHIFT; info.si_addr_lsb = lsb; - fill_sig_info_pkey(si_code, &info, pkey); + fill_sig_info_pkey(si_signo, si_code, &info, pkey); force_sig_info(si_signo, &info, tsk); } @@ -350,7 +331,7 @@ static noinline int vmalloc_fault(unsigned long address) if (!pmd_k) return -1; - if (pmd_huge(*pmd_k)) + if (pmd_large(*pmd_k)) return 0; pte_k = pte_offset_kernel(pmd_k, address); @@ -499,7 +480,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) BUG(); - if (pud_huge(*pud)) + if (pud_large(*pud)) return 0; pmd = pmd_offset(pud, address); @@ -510,7 +491,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) BUG(); - if (pmd_huge(*pmd)) + if (pmd_large(*pmd)) return 0; pte_ref = pte_offset_kernel(pmd_ref, address); @@ -698,7 +679,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, if (!oops_may_print()) return; - if (error_code & PF_INSTR) { + if (error_code & X86_PF_INSTR) { unsigned int level; pgd_t *pgd; pte_t *pte; @@ -780,7 +761,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, */ if (current->thread.sig_on_uaccess_err && signal) { tsk->thread.trap_nr = X86_TRAP_PF; - tsk->thread.error_code = error_code | PF_USER; + tsk->thread.error_code = error_code | X86_PF_USER; tsk->thread.cr2 = address; /* XXX: hwpoison faults will set the wrong code. */ @@ -898,7 +879,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, struct task_struct *tsk = current; /* User mode accesses just cause a SIGSEGV */ - if (error_code & PF_USER) { + if (error_code & X86_PF_USER) { /* * It's possible to have interrupts off here: */ @@ -919,7 +900,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, * Instruction fetch faults in the vsyscall page might need * emulation. */ - if (unlikely((error_code & PF_INSTR) && + if (unlikely((error_code & X86_PF_INSTR) && ((address & ~0xfff) == VSYSCALL_ADDR))) { if (emulate_vsyscall(regs, address)) return; @@ -932,7 +913,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, * are always protection faults. */ if (address >= TASK_SIZE_MAX) - error_code |= PF_PROT; + error_code |= X86_PF_PROT; if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); @@ -993,11 +974,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return false; - if (error_code & PF_PK) + if (error_code & X86_PF_PK) return true; /* this checks permission keys on the VMA: */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), - (error_code & PF_INSTR), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), + (error_code & X86_PF_INSTR), foreign)) return true; return false; } @@ -1025,7 +1006,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, int code = BUS_ADRERR; /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { + if (!(error_code & X86_PF_USER)) { no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); return; } @@ -1053,14 +1034,14 @@ static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 *pkey, unsigned int fault) { - if (fatal_signal_pending(current) && !(error_code & PF_USER)) { + if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { no_context(regs, error_code, address, 0, 0); return; } if (fault & VM_FAULT_OOM) { /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) { + if (!(error_code & X86_PF_USER)) { no_context(regs, error_code, address, SIGSEGV, SEGV_MAPERR); return; @@ -1085,16 +1066,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, static int spurious_fault_check(unsigned long error_code, pte_t *pte) { - if ((error_code & PF_WRITE) && !pte_write(*pte)) + if ((error_code & X86_PF_WRITE) && !pte_write(*pte)) return 0; - if ((error_code & PF_INSTR) && !pte_exec(*pte)) + if ((error_code & X86_PF_INSTR) && !pte_exec(*pte)) return 0; /* * Note: We do not do lazy flushing on protection key - * changes, so no spurious fault will ever set PF_PK. + * changes, so no spurious fault will ever set X86_PF_PK. */ - if ((error_code & PF_PK)) + if ((error_code & X86_PF_PK)) return 1; return 1; @@ -1140,8 +1121,8 @@ spurious_fault(unsigned long error_code, unsigned long address) * change, so user accesses are not expected to cause spurious * faults. */ - if (error_code != (PF_WRITE | PF_PROT) - && error_code != (PF_INSTR | PF_PROT)) + if (error_code != (X86_PF_WRITE | X86_PF_PROT) && + error_code != (X86_PF_INSTR | X86_PF_PROT)) return 0; pgd = init_mm.pgd + pgd_index(address); @@ -1201,19 +1182,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) * always an unconditional error and can never result in * a follow-up action to resolve the fault, like a COW. */ - if (error_code & PF_PK) + if (error_code & X86_PF_PK) return 1; /* * Make sure to check the VMA so that we do not perform - * faults just to hit a PF_PK as soon as we fill in a + * faults just to hit a X86_PF_PK as soon as we fill in a * page. */ - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), - (error_code & PF_INSTR), foreign)) + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE), + (error_code & X86_PF_INSTR), foreign)) return 1; - if (error_code & PF_WRITE) { + if (error_code & X86_PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) return 1; @@ -1221,7 +1202,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) } /* read, present: */ - if (unlikely(error_code & PF_PROT)) + if (unlikely(error_code & X86_PF_PROT)) return 1; /* read, not present: */ @@ -1244,7 +1225,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) if (!static_cpu_has(X86_FEATURE_SMAP)) return false; - if (error_code & PF_USER) + if (error_code & X86_PF_USER) return false; if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) @@ -1272,12 +1253,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, tsk = current; mm = tsk->mm; - /* - * Detect and handle instructions that would cause a page fault for - * both a tracked kernel page and a userspace page. - */ - if (kmemcheck_active(regs)) - kmemcheck_hide(regs); prefetchw(&mm->mmap_sem); if (unlikely(kmmio_fault(regs, address))) @@ -1297,12 +1272,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * protection error (error_code & 9) == 0. */ if (unlikely(fault_in_kernel_space(address))) { - if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { + if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) { if (vmalloc_fault(address) >= 0) return; - - if (kmemcheck_fault(regs, address, error_code)) - return; } /* Can handle a stale RO->RW TLB: */ @@ -1325,7 +1297,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, if (unlikely(kprobes_fault(regs))) return; - if (unlikely(error_code & PF_RSVD)) + if (unlikely(error_code & X86_PF_RSVD)) pgtable_bad(regs, error_code, address); if (unlikely(smap_violation(error_code, regs))) { @@ -1351,7 +1323,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, */ if (user_mode(regs)) { local_irq_enable(); - error_code |= PF_USER; + error_code |= X86_PF_USER; flags |= FAULT_FLAG_USER; } else { if (regs->flags & X86_EFLAGS_IF) @@ -1360,9 +1332,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (error_code & PF_WRITE) + if (error_code & X86_PF_WRITE) flags |= FAULT_FLAG_WRITE; - if (error_code & PF_INSTR) + if (error_code & X86_PF_INSTR) flags |= FAULT_FLAG_INSTRUCTION; /* @@ -1382,7 +1354,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, * space check, thus avoiding the deadlock: */ if (unlikely(!down_read_trylock(&mm->mmap_sem))) { - if ((error_code & PF_USER) == 0 && + if (!(error_code & X86_PF_USER) && !search_exception_tables(regs->ip)) { bad_area_nosemaphore(regs, error_code, address, NULL); return; @@ -1409,7 +1381,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, bad_area(regs, error_code, address); return; } - if (error_code & PF_USER) { + if (error_code & X86_PF_USER) { /* * Accessing the stack below %sp is always a bug. * The large cushion allows instructions like enter diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index af5c1ed21d43..37f60dfd7e4e 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -4,6 +4,8 @@ #include #include #include /* for max_low_pfn */ +#include +#include #include #include @@ -20,6 +22,7 @@ #include #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -92,8 +95,7 @@ __ref void *alloc_low_pages(unsigned int num) unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); - return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | - __GFP_ZERO, order); + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { @@ -161,15 +163,20 @@ struct map_range { static int page_size_mask; +static void enable_global_pages(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + __supported_pte_mask |= _PAGE_GLOBAL; +} + static void __init probe_page_size_mask(void) { /* - * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will - * use small pages. + * For pagealloc debugging, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) page_size_mask |= 1 << PG_LEVEL_2M; else direct_gbpages = 0; @@ -179,11 +186,11 @@ static void __init probe_page_size_mask(void) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ + __supported_pte_mask &= ~_PAGE_GLOBAL; if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); - __supported_pte_mask |= _PAGE_GLOBAL; - } else - __supported_pte_mask &= ~_PAGE_GLOBAL; + enable_global_pages(); + } /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { @@ -196,34 +203,44 @@ static void __init probe_page_size_mask(void) static void setup_pcid(void) { -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_PCID)) { - if (boot_cpu_has(X86_FEATURE_PGE)) { - /* - * This can't be cr4_set_bits_and_update_boot() -- - * the trampoline code can't handle CR4.PCIDE and - * it wouldn't do any good anyway. Despite the name, - * cr4_set_bits_and_update_boot() doesn't actually - * cause the bits in question to remain set all the - * way through the secondary boot asm. - * - * Instead, we brute-force it and set CR4.PCIDE - * manually in start_secondary(). - */ - cr4_set_bits(X86_CR4_PCIDE); - } else { - /* - * flush_tlb_all(), as currently implemented, won't - * work if PCID is on but PGE is not. Since that - * combination doesn't exist on real hardware, there's - * no reason to try to fully support it, but it's - * polite to avoid corrupting data if we're on - * an improperly configured VM. - */ - setup_clear_cpu_cap(X86_FEATURE_PCID); - } + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + if (!boot_cpu_has(X86_FEATURE_PCID)) + return; + + if (boot_cpu_has(X86_FEATURE_PGE)) { + /* + * This can't be cr4_set_bits_and_update_boot() -- the + * trampoline code can't handle CR4.PCIDE and it wouldn't + * do any good anyway. Despite the name, + * cr4_set_bits_and_update_boot() doesn't actually cause + * the bits in question to remain set all the way through + * the secondary boot asm. + * + * Instead, we brute-force it and set CR4.PCIDE manually in + * start_secondary(). + */ + cr4_set_bits(X86_CR4_PCIDE); + + /* + * INVPCID's single-context modes (2/3) only work if we set + * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable + * on systems that have X86_CR4_PCIDE clear, or that have + * no INVPCID support at all. + */ + if (boot_cpu_has(X86_FEATURE_INVPCID)) + setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE); + } else { + /* + * flush_tlb_all(), as currently implemented, won't work if + * PCID is on but PGE is not. Since that combination + * doesn't exist on real hardware, there's no reason to try + * to fully support it, but it's polite to avoid corrupting + * data if we're on an improperly configured VM. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); } -#endif } #ifdef CONFIG_X86_32 @@ -624,6 +641,7 @@ void __init init_mem_mapping(void) { unsigned long end; + pti_check_boottime_disable(); probe_page_size_mask(); setup_pcid(); @@ -671,7 +689,7 @@ void __init init_mem_mapping(void) load_cr3(swapper_pg_dir); __flush_tlb_all(); - hypervisor_init_mem_mapping(); + x86_init.hyper.init_mem_mapping(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } @@ -690,7 +708,9 @@ void __init init_mem_mapping(void) */ int devmem_is_allowed(unsigned long pagenr) { - if (page_is_ram(pagenr)) { + if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE, + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) + != REGION_DISJOINT) { /* * For disallowed memory regions in the low 1MB range, * request that the page be shown as all zeros. @@ -847,12 +867,12 @@ void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; -EXPORT_SYMBOL_GPL(cpu_tlbstate); +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { @@ -862,3 +882,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); __pte2cachemode_tbl[entry] = cache; } + +#ifdef CONFIG_SWAP +unsigned long max_swapfile_size(void) +{ + unsigned long pages; + + pages = generic_max_swapfile_size(); + + if (boot_cpu_has_bug(X86_BUG_L1TF)) { + /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ + unsigned long l1tf_limit = l1tf_pfn_limit() + 1; + /* + * We encode swap offsets also with 3 bits below those for pfn + * which makes the usable limit higher. + */ +#if CONFIG_PGTABLE_LEVELS > 2 + l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; +#endif + pages = min_t(unsigned long, l1tf_limit, pages); + } + return pages; +} +#endif diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8a64a6f2848d..3141e67ec24c 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include "mm_internal.h" @@ -452,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base) } #endif /* CONFIG_HIGHMEM */ +void __init sync_initial_page_table(void) +{ + clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + + /* + * sync back low identity map too. It is used for example + * in the 32-bit EFI stub. + */ + clone_pgd_range(initial_page_table, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); +} + void __init native_pagetable_init(void) { unsigned long pfn, va; @@ -766,6 +782,7 @@ void __init mem_init(void) mem_init_print_info(NULL); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif @@ -777,6 +794,10 @@ void __init mem_init(void) FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, + CPU_ENTRY_AREA_BASE, + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, + CPU_ENTRY_AREA_MAP_SIZE >> 10, + #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 048fbe8fc274..642357aff216 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -184,7 +184,7 @@ static __ref void *spp_getpage(void) void *ptr; if (after_bootmem) - ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); + ptr = (void *) get_zeroed_page(GFP_ATOMIC); else ptr = alloc_bootmem_pages(PAGE_SIZE); @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) @@ -1180,8 +1180,7 @@ void __init mem_init(void) after_bootmem = 1; /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, - PAGE_SIZE, KCORE_OTHER); + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); } @@ -1426,16 +1425,16 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) void register_page_bootmem_memmap(unsigned long section_nr, - struct page *start_page, unsigned long size) + struct page *start_page, unsigned long nr_pages) { unsigned long addr = (unsigned long)start_page; - unsigned long end = (unsigned long)(start_page + size); + unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; - unsigned int nr_pages; + unsigned int nr_pmd_pages; struct page *page; for (; addr < end; addr = next) { @@ -1482,9 +1481,9 @@ void register_page_bootmem_memmap(unsigned long section_nr, if (pmd_none(*pmd)) continue; - nr_pages = 1 << (get_order(PMD_SIZE)); + nr_pmd_pages = 1 << get_order(PMD_SIZE); page = pmd_page(*pmd); - while (nr_pages--) + while (nr_pmd_pages--) get_page_bootmem(section_nr, page++, SECTION_INFO); } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 34f0e1847dd6..7bebdd0273d3 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -349,11 +349,11 @@ void iounmap(volatile void __iomem *addr) return; } + mmiotrace_iounmap(addr); + addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); - mmiotrace_iounmap(addr); - /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by @@ -749,5 +749,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx, set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else pte_clear(&init_mm, addr, pte); - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 8f5be3eb40dd..af6f2f9c6a26 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -4,19 +4,155 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include +#include extern struct range pfn_mapped[E820_MAX_ENTRIES]; -static int __init map_range(struct range *range) +static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); + +static __init void *early_alloc(size_t size, int nid, bool panic) +{ + if (panic) + return memblock_virt_alloc_try_nid(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); + else + return memblock_virt_alloc_try_nid_nopanic(size, size, + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); +} + +static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, + unsigned long end, int nid) +{ + pte_t *pte; + + if (pmd_none(*pmd)) { + void *p; + + if (boot_cpu_has(X86_FEATURE_PSE) && + ((end - addr) == PMD_SIZE) && + IS_ALIGNED(addr, PMD_SIZE)) { + p = early_alloc(PMD_SIZE, nid, false); + if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) + return; + else if (p) + memblock_free(__pa(p), PMD_SIZE); + } + + p = early_alloc(PAGE_SIZE, nid, true); + pmd_populate_kernel(&init_mm, pmd, p); + } + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t entry; + void *p; + + if (!pte_none(*pte)) + continue; + + p = early_alloc(PAGE_SIZE, nid, true); + entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); + set_pte_at(&init_mm, addr, pte, entry); + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, + unsigned long end, int nid) +{ + pmd_t *pmd; + unsigned long next; + + if (pud_none(*pud)) { + void *p; + + if (boot_cpu_has(X86_FEATURE_GBPAGES) && + ((end - addr) == PUD_SIZE) && + IS_ALIGNED(addr, PUD_SIZE)) { + p = early_alloc(PUD_SIZE, nid, false); + if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) + return; + else if (p) + memblock_free(__pa(p), PUD_SIZE); + } + + p = early_alloc(PAGE_SIZE, nid, true); + pud_populate(&init_mm, pud, p); + } + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_large(*pmd)) + kasan_populate_pmd(pmd, addr, next, nid); + } while (pmd++, addr = next, addr != end); +} + +static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, + unsigned long end, int nid) +{ + pud_t *pud; + unsigned long next; + + if (p4d_none(*p4d)) { + void *p = early_alloc(PAGE_SIZE, nid, true); + + p4d_populate(&init_mm, p4d, p); + } + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_large(*pud)) + kasan_populate_pud(pud, addr, next, nid); + } while (pud++, addr = next, addr != end); +} + +static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, + unsigned long end, int nid) +{ + void *p; + p4d_t *p4d; + unsigned long next; + + if (pgd_none(*pgd)) { + p = early_alloc(PAGE_SIZE, nid, true); + pgd_populate(&init_mm, pgd, p); + } + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + kasan_populate_p4d(p4d, addr, next, nid); + } while (p4d++, addr = next, addr != end); +} + +static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, + int nid) +{ + pgd_t *pgd; + unsigned long next; + + addr = addr & PAGE_MASK; + end = round_up(end, PAGE_SIZE); + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + kasan_populate_pgd(pgd, addr, next, nid); + } while (pgd++, addr = next, addr != end); +} + +static void __init map_range(struct range *range) { unsigned long start; unsigned long end; @@ -24,15 +160,17 @@ static int __init map_range(struct range *range) start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); - return vmemmap_populate(start, end, NUMA_NO_NODE); + kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); } static void __init clear_pgds(unsigned long start, unsigned long end) { pgd_t *pgd; + /* See comment in kasan_init() */ + unsigned long pgd_end = end & PGDIR_MASK; - for (; start < end; start += PGDIR_SIZE) { + for (; start < pgd_end; start += PGDIR_SIZE) { pgd = pgd_offset_k(start); /* * With folded p4d, pgd_clear() is nop, use p4d_clear() @@ -43,29 +181,61 @@ static void __init clear_pgds(unsigned long start, else pgd_clear(pgd); } + + pgd = pgd_offset_k(start); + for (; start < end; start += P4D_SIZE) + p4d_clear(p4d_offset(pgd, start)); +} + +static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) +{ + unsigned long p4d; + + if (!IS_ENABLED(CONFIG_X86_5LEVEL)) + return (p4d_t *)pgd; + + p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; + p4d += __START_KERNEL_map - phys_base; + return (p4d_t *)p4d + p4d_index(addr); +} + +static void __init kasan_early_p4d_populate(pgd_t *pgd, + unsigned long addr, + unsigned long end) +{ + pgd_t pgd_entry; + p4d_t *p4d, p4d_entry; + unsigned long next; + + if (pgd_none(*pgd)) { + pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d)); + set_pgd(pgd, pgd_entry); + } + + p4d = early_p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + + if (!p4d_none(*p4d)) + continue; + + p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud)); + set_p4d(p4d, p4d_entry); + } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); } static void __init kasan_map_early_shadow(pgd_t *pgd) { - int i; - unsigned long start = KASAN_SHADOW_START; + /* See comment in kasan_init() */ + unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK; unsigned long end = KASAN_SHADOW_END; + unsigned long next; - for (i = pgd_index(start); start < end; i++) { - switch (CONFIG_PGTABLE_LEVELS) { - case 4: - pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) | - _KERNPG_TABLE); - break; - case 5: - pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) | - _KERNPG_TABLE); - break; - default: - BUILD_BUG(); - } - start += PGDIR_SIZE; - } + pgd += pgd_index(addr); + do { + next = pgd_addr_end(addr, end); + kasan_early_p4d_populate(pgd, addr, next); + } while (pgd++, addr = next, addr != end); } #ifdef CONFIG_KASAN_INLINE @@ -102,7 +272,7 @@ void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PUD; i++) kasan_zero_pud[i] = __pud(pud_val); - for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++) + for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) kasan_zero_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); @@ -112,37 +282,78 @@ void __init kasan_early_init(void) void __init kasan_init(void) { int i; + void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; #ifdef CONFIG_KASAN_INLINE register_die_notifier(&kasan_die_notifier); #endif memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); + + /* + * We use the same shadow offset for 4- and 5-level paging to + * facilitate boot-time switching between paging modes. + * As result in 5-level paging mode KASAN_SHADOW_START and + * KASAN_SHADOW_END are not aligned to PGD boundary. + * + * KASAN_SHADOW_START doesn't share PGD with anything else. + * We claim whole PGD entry to make things easier. + * + * KASAN_SHADOW_END lands in the last PGD entry and it collides with + * bunch of things like kernel code, modules, EFI mapping, etc. + * We need to take extra steps to not overwrite them. + */ + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + void *ptr; + + ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); + memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); + set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], + __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); + } + load_cr3(early_top_pgt); __flush_tlb_all(); - clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); - kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, + kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_MAX_ENTRIES; i++) { if (pfn_mapped[i].end == 0) break; - if (map_range(&pfn_mapped[i])) - panic("kasan: unable to allocate shadow!"); + map_range(&pfn_mapped[i]); } + + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; + shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); + shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, + PAGE_SIZE); + + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + + CPU_ENTRY_AREA_MAP_SIZE); + shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); + shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, + PAGE_SIZE); + kasan_populate_zero_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); + shadow_cpu_entry_begin); + + kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, + (unsigned long)shadow_cpu_entry_end, 0); + + kasan_populate_zero_shadow(shadow_cpu_entry_end, + kasan_mem_to_shadow((void *)__START_KERNEL_map)); - vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), - (unsigned long)kasan_mem_to_shadow(_end), - NUMA_NO_NODE); + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), + (unsigned long)kasan_mem_to_shadow(_end), + early_pfn_to_nid(__pa(_stext))); kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), - (void *)KASAN_SHADOW_END); + (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 879ef930e2c2..aedebd2ebf1e 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -34,25 +34,14 @@ #define TB_SHIFT 40 /* - * Virtual address start and end range for randomization. The end changes base - * on configuration to have the highest amount of space for randomization. - * It increases the possible random position for each randomized region. + * Virtual address start and end range for randomization. * - * You need to add an if/def entry if you introduce a new memory region - * compatible with KASLR. Your entry must be in logical order with memory - * layout. For example, ESPFIX is before EFI because its virtual address is - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to - * ensure that this order is correct and won't be changed. + * The end address could depend on more configuration options to make the + * highest amount of space for randomization available, but that's too hard + * to keep straight and caused issues already. */ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; - -#if defined(CONFIG_X86_ESPFIX64) -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; -#elif defined(CONFIG_EFI) -static const unsigned long vaddr_end = EFI_VA_END; -#else -static const unsigned long vaddr_end = __START_KERNEL_map; -#endif +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; /* Default values */ unsigned long page_offset_base = __PAGE_OFFSET_BASE; @@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void) unsigned long remain_entropy; /* - * All these BUILD_BUG_ON checks ensures the memory layout is - * consistent with the vaddr_start/vaddr_end variables. + * These BUILD_BUG_ON checks ensure the memory layout is consistent + * with the vaddr_start/vaddr_end variables. These checks are very + * limited.... */ BUILD_BUG_ON(vaddr_start >= vaddr_end); - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && - vaddr_end >= EFI_VA_END); - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || - IS_ENABLED(CONFIG_EFI)) && - vaddr_end >= __START_KERNEL_map); + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); if (!kaslr_memory_enabled()) diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile deleted file mode 100644 index 520b3bce4095..000000000000 --- a/arch/x86/mm/kmemcheck/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c deleted file mode 100644 index 872ec4159a68..000000000000 --- a/arch/x86/mm/kmemcheck/error.c +++ /dev/null @@ -1,228 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include - -#include "error.h" -#include "shadow.h" - -enum kmemcheck_error_type { - KMEMCHECK_ERROR_INVALID_ACCESS, - KMEMCHECK_ERROR_BUG, -}; - -#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT) - -struct kmemcheck_error { - enum kmemcheck_error_type type; - - union { - /* KMEMCHECK_ERROR_INVALID_ACCESS */ - struct { - /* Kind of access that caused the error */ - enum kmemcheck_shadow state; - /* Address and size of the erroneous read */ - unsigned long address; - unsigned int size; - }; - }; - - struct pt_regs regs; - struct stack_trace trace; - unsigned long trace_entries[32]; - - /* We compress it to a char. */ - unsigned char shadow_copy[SHADOW_COPY_SIZE]; - unsigned char memory_copy[SHADOW_COPY_SIZE]; -}; - -/* - * Create a ring queue of errors to output. We can't call printk() directly - * from the kmemcheck traps, since this may call the console drivers and - * result in a recursive fault. - */ -static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE]; -static unsigned int error_count; -static unsigned int error_rd; -static unsigned int error_wr; -static unsigned int error_missed_count; - -static struct kmemcheck_error *error_next_wr(void) -{ - struct kmemcheck_error *e; - - if (error_count == ARRAY_SIZE(error_fifo)) { - ++error_missed_count; - return NULL; - } - - e = &error_fifo[error_wr]; - if (++error_wr == ARRAY_SIZE(error_fifo)) - error_wr = 0; - ++error_count; - return e; -} - -static struct kmemcheck_error *error_next_rd(void) -{ - struct kmemcheck_error *e; - - if (error_count == 0) - return NULL; - - e = &error_fifo[error_rd]; - if (++error_rd == ARRAY_SIZE(error_fifo)) - error_rd = 0; - --error_count; - return e; -} - -void kmemcheck_error_recall(void) -{ - static const char *desc[] = { - [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated", - [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized", - [KMEMCHECK_SHADOW_INITIALIZED] = "initialized", - [KMEMCHECK_SHADOW_FREED] = "freed", - }; - - static const char short_desc[] = { - [KMEMCHECK_SHADOW_UNALLOCATED] = 'a', - [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u', - [KMEMCHECK_SHADOW_INITIALIZED] = 'i', - [KMEMCHECK_SHADOW_FREED] = 'f', - }; - - struct kmemcheck_error *e; - unsigned int i; - - e = error_next_rd(); - if (!e) - return; - - switch (e->type) { - case KMEMCHECK_ERROR_INVALID_ACCESS: - printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n", - 8 * e->size, e->state < ARRAY_SIZE(desc) ? - desc[e->state] : "(invalid shadow state)", - (void *) e->address); - - printk(KERN_WARNING); - for (i = 0; i < SHADOW_COPY_SIZE; ++i) - printk(KERN_CONT "%02x", e->memory_copy[i]); - printk(KERN_CONT "\n"); - - printk(KERN_WARNING); - for (i = 0; i < SHADOW_COPY_SIZE; ++i) { - if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) - printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]); - else - printk(KERN_CONT " ?"); - } - printk(KERN_CONT "\n"); - printk(KERN_WARNING "%*c\n", 2 + 2 - * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); - break; - case KMEMCHECK_ERROR_BUG: - printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n"); - break; - } - - __show_regs(&e->regs, 1); - print_stack_trace(&e->trace, 0); -} - -static void do_wakeup(unsigned long data) -{ - while (error_count > 0) - kmemcheck_error_recall(); - - if (error_missed_count > 0) { - printk(KERN_WARNING "kmemcheck: Lost %d error reports because " - "the queue was too small\n", error_missed_count); - error_missed_count = 0; - } -} - -static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0); - -/* - * Save the context of an error report. - */ -void kmemcheck_error_save(enum kmemcheck_shadow state, - unsigned long address, unsigned int size, struct pt_regs *regs) -{ - static unsigned long prev_ip; - - struct kmemcheck_error *e; - void *shadow_copy; - void *memory_copy; - - /* Don't report several adjacent errors from the same EIP. */ - if (regs->ip == prev_ip) - return; - prev_ip = regs->ip; - - e = error_next_wr(); - if (!e) - return; - - e->type = KMEMCHECK_ERROR_INVALID_ACCESS; - - e->state = state; - e->address = address; - e->size = size; - - /* Save regs */ - memcpy(&e->regs, regs, sizeof(*regs)); - - /* Save stack trace */ - e->trace.nr_entries = 0; - e->trace.entries = e->trace_entries; - e->trace.max_entries = ARRAY_SIZE(e->trace_entries); - e->trace.skip = 0; - save_stack_trace_regs(regs, &e->trace); - - /* Round address down to nearest 16 bytes */ - shadow_copy = kmemcheck_shadow_lookup(address - & ~(SHADOW_COPY_SIZE - 1)); - BUG_ON(!shadow_copy); - - memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); - - kmemcheck_show_addr(address); - memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1)); - memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE); - kmemcheck_hide_addr(address); - - tasklet_hi_schedule_first(&kmemcheck_tasklet); -} - -/* - * Save the context of a kmemcheck bug. - */ -void kmemcheck_error_save_bug(struct pt_regs *regs) -{ - struct kmemcheck_error *e; - - e = error_next_wr(); - if (!e) - return; - - e->type = KMEMCHECK_ERROR_BUG; - - memcpy(&e->regs, regs, sizeof(*regs)); - - e->trace.nr_entries = 0; - e->trace.entries = e->trace_entries; - e->trace.max_entries = ARRAY_SIZE(e->trace_entries); - e->trace.skip = 1; - save_stack_trace(&e->trace); - - tasklet_hi_schedule_first(&kmemcheck_tasklet); -} diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h deleted file mode 100644 index 39f80d7a874d..000000000000 --- a/arch/x86/mm/kmemcheck/error.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H -#define ARCH__X86__MM__KMEMCHECK__ERROR_H - -#include - -#include "shadow.h" - -void kmemcheck_error_save(enum kmemcheck_shadow state, - unsigned long address, unsigned int size, struct pt_regs *regs); - -void kmemcheck_error_save_bug(struct pt_regs *regs); - -void kmemcheck_error_recall(void); - -#endif diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c deleted file mode 100644 index 4515bae36bbe..000000000000 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ /dev/null @@ -1,658 +0,0 @@ -/** - * kmemcheck - a heavyweight memory checker for the linux kernel - * Copyright (C) 2007, 2008 Vegard Nossum - * (With a lot of help from Ingo Molnar and Pekka Enberg.) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2) as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "error.h" -#include "opcode.h" -#include "pte.h" -#include "selftest.h" -#include "shadow.h" - - -#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT -# define KMEMCHECK_ENABLED 0 -#endif - -#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT -# define KMEMCHECK_ENABLED 1 -#endif - -#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT -# define KMEMCHECK_ENABLED 2 -#endif - -int kmemcheck_enabled = KMEMCHECK_ENABLED; - -int __init kmemcheck_init(void) -{ -#ifdef CONFIG_SMP - /* - * Limit SMP to use a single CPU. We rely on the fact that this code - * runs before SMP is set up. - */ - if (setup_max_cpus > 1) { - printk(KERN_INFO - "kmemcheck: Limiting number of CPUs to 1.\n"); - setup_max_cpus = 1; - } -#endif - - if (!kmemcheck_selftest()) { - printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n"); - kmemcheck_enabled = 0; - return -EINVAL; - } - - printk(KERN_INFO "kmemcheck: Initialized\n"); - return 0; -} - -early_initcall(kmemcheck_init); - -/* - * We need to parse the kmemcheck= option before any memory is allocated. - */ -static int __init param_kmemcheck(char *str) -{ - int val; - int ret; - - if (!str) - return -EINVAL; - - ret = kstrtoint(str, 0, &val); - if (ret) - return ret; - kmemcheck_enabled = val; - return 0; -} - -early_param("kmemcheck", param_kmemcheck); - -int kmemcheck_show_addr(unsigned long address) -{ - pte_t *pte; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return 0; - - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - __flush_tlb_one(address); - return 1; -} - -int kmemcheck_hide_addr(unsigned long address) -{ - pte_t *pte; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return 0; - - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); - __flush_tlb_one(address); - return 1; -} - -struct kmemcheck_context { - bool busy; - int balance; - - /* - * There can be at most two memory operands to an instruction, but - * each address can cross a page boundary -- so we may need up to - * four addresses that must be hidden/revealed for each fault. - */ - unsigned long addr[4]; - unsigned long n_addrs; - unsigned long flags; - - /* Data size of the instruction that caused a fault. */ - unsigned int size; -}; - -static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); - -bool kmemcheck_active(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - return data->balance > 0; -} - -/* Save an address that needs to be shown/hidden */ -static void kmemcheck_save_addr(unsigned long addr) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); - data->addr[data->n_addrs++] = addr; -} - -static unsigned int kmemcheck_show_all(void) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - unsigned int i; - unsigned int n; - - n = 0; - for (i = 0; i < data->n_addrs; ++i) - n += kmemcheck_show_addr(data->addr[i]); - - return n; -} - -static unsigned int kmemcheck_hide_all(void) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - unsigned int i; - unsigned int n; - - n = 0; - for (i = 0; i < data->n_addrs; ++i) - n += kmemcheck_hide_addr(data->addr[i]); - - return n; -} - -/* - * Called from the #PF handler. - */ -void kmemcheck_show(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - BUG_ON(!irqs_disabled()); - - if (unlikely(data->balance != 0)) { - kmemcheck_show_all(); - kmemcheck_error_save_bug(regs); - data->balance = 0; - return; - } - - /* - * None of the addresses actually belonged to kmemcheck. Note that - * this is not an error. - */ - if (kmemcheck_show_all() == 0) - return; - - ++data->balance; - - /* - * The IF needs to be cleared as well, so that the faulting - * instruction can run "uninterrupted". Otherwise, we might take - * an interrupt and start executing that before we've had a chance - * to hide the page again. - * - * NOTE: In the rare case of multiple faults, we must not override - * the original flags: - */ - if (!(regs->flags & X86_EFLAGS_TF)) - data->flags = regs->flags; - - regs->flags |= X86_EFLAGS_TF; - regs->flags &= ~X86_EFLAGS_IF; -} - -/* - * Called from the #DB handler. - */ -void kmemcheck_hide(struct pt_regs *regs) -{ - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - int n; - - BUG_ON(!irqs_disabled()); - - if (unlikely(data->balance != 1)) { - kmemcheck_show_all(); - kmemcheck_error_save_bug(regs); - data->n_addrs = 0; - data->balance = 0; - - if (!(data->flags & X86_EFLAGS_TF)) - regs->flags &= ~X86_EFLAGS_TF; - if (data->flags & X86_EFLAGS_IF) - regs->flags |= X86_EFLAGS_IF; - return; - } - - if (kmemcheck_enabled) - n = kmemcheck_hide_all(); - else - n = kmemcheck_show_all(); - - if (n == 0) - return; - - --data->balance; - - data->n_addrs = 0; - - if (!(data->flags & X86_EFLAGS_TF)) - regs->flags &= ~X86_EFLAGS_TF; - if (data->flags & X86_EFLAGS_IF) - regs->flags |= X86_EFLAGS_IF; -} - -void kmemcheck_show_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) { - unsigned long address; - pte_t *pte; - unsigned int level; - - address = (unsigned long) page_address(&p[i]); - pte = lookup_address(address, &level); - BUG_ON(!pte); - BUG_ON(level != PG_LEVEL_4K); - - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); - __flush_tlb_one(address); - } -} - -bool kmemcheck_page_is_tracked(struct page *p) -{ - /* This will also check the "hidden" flag of the PTE. */ - return kmemcheck_pte_lookup((unsigned long) page_address(p)); -} - -void kmemcheck_hide_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) { - unsigned long address; - pte_t *pte; - unsigned int level; - - address = (unsigned long) page_address(&p[i]); - pte = lookup_address(address, &level); - BUG_ON(!pte); - BUG_ON(level != PG_LEVEL_4K); - - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); - set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); - __flush_tlb_one(address); - } -} - -/* Access may NOT cross page boundary */ -static void kmemcheck_read_strict(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - void *shadow; - enum kmemcheck_shadow status; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return; - - kmemcheck_save_addr(addr); - status = kmemcheck_shadow_test(shadow, size); - if (status == KMEMCHECK_SHADOW_INITIALIZED) - return; - - if (kmemcheck_enabled) - kmemcheck_error_save(status, addr, size, regs); - - if (kmemcheck_enabled == 2) - kmemcheck_enabled = 0; - - /* Don't warn about it again. */ - kmemcheck_shadow_set(shadow, size); -} - -bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) -{ - enum kmemcheck_shadow status; - void *shadow; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return true; - - status = kmemcheck_shadow_test_all(shadow, size); - - return status == KMEMCHECK_SHADOW_INITIALIZED; -} - -/* Access may cross page boundary */ -static void kmemcheck_read(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - unsigned long page = addr & PAGE_MASK; - unsigned long next_addr = addr + size - 1; - unsigned long next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - kmemcheck_read_strict(regs, addr, size); - return; - } - - /* - * What we do is basically to split the access across the - * two pages and handle each part separately. Yes, this means - * that we may now see reads that are 3 + 5 bytes, for - * example (and if both are uninitialized, there will be two - * reports), but it makes the code a lot simpler. - */ - kmemcheck_read_strict(regs, addr, next_page - addr); - kmemcheck_read_strict(regs, next_page, next_addr - next_page); -} - -static void kmemcheck_write_strict(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - void *shadow; - - shadow = kmemcheck_shadow_lookup(addr); - if (!shadow) - return; - - kmemcheck_save_addr(addr); - kmemcheck_shadow_set(shadow, size); -} - -static void kmemcheck_write(struct pt_regs *regs, - unsigned long addr, unsigned int size) -{ - unsigned long page = addr & PAGE_MASK; - unsigned long next_addr = addr + size - 1; - unsigned long next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - kmemcheck_write_strict(regs, addr, size); - return; - } - - /* See comment in kmemcheck_read(). */ - kmemcheck_write_strict(regs, addr, next_page - addr); - kmemcheck_write_strict(regs, next_page, next_addr - next_page); -} - -/* - * Copying is hard. We have two addresses, each of which may be split across - * a page (and each page will have different shadow addresses). - */ -static void kmemcheck_copy(struct pt_regs *regs, - unsigned long src_addr, unsigned long dst_addr, unsigned int size) -{ - uint8_t shadow[8]; - enum kmemcheck_shadow status; - - unsigned long page; - unsigned long next_addr; - unsigned long next_page; - - uint8_t *x; - unsigned int i; - unsigned int n; - - BUG_ON(size > sizeof(shadow)); - - page = src_addr & PAGE_MASK; - next_addr = src_addr + size - 1; - next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - /* Same page */ - x = kmemcheck_shadow_lookup(src_addr); - if (x) { - kmemcheck_save_addr(src_addr); - for (i = 0; i < size; ++i) - shadow[i] = x[i]; - } else { - for (i = 0; i < size; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } else { - n = next_page - src_addr; - BUG_ON(n > sizeof(shadow)); - - /* First page */ - x = kmemcheck_shadow_lookup(src_addr); - if (x) { - kmemcheck_save_addr(src_addr); - for (i = 0; i < n; ++i) - shadow[i] = x[i]; - } else { - /* Not tracked */ - for (i = 0; i < n; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - - /* Second page */ - x = kmemcheck_shadow_lookup(next_page); - if (x) { - kmemcheck_save_addr(next_page); - for (i = n; i < size; ++i) - shadow[i] = x[i - n]; - } else { - /* Not tracked */ - for (i = n; i < size; ++i) - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - - page = dst_addr & PAGE_MASK; - next_addr = dst_addr + size - 1; - next_page = next_addr & PAGE_MASK; - - if (likely(page == next_page)) { - /* Same page */ - x = kmemcheck_shadow_lookup(dst_addr); - if (x) { - kmemcheck_save_addr(dst_addr); - for (i = 0; i < size; ++i) { - x[i] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - } else { - n = next_page - dst_addr; - BUG_ON(n > sizeof(shadow)); - - /* First page */ - x = kmemcheck_shadow_lookup(dst_addr); - if (x) { - kmemcheck_save_addr(dst_addr); - for (i = 0; i < n; ++i) { - x[i] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - - /* Second page */ - x = kmemcheck_shadow_lookup(next_page); - if (x) { - kmemcheck_save_addr(next_page); - for (i = n; i < size; ++i) { - x[i - n] = shadow[i]; - shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; - } - } - } - - status = kmemcheck_shadow_test(shadow, size); - if (status == KMEMCHECK_SHADOW_INITIALIZED) - return; - - if (kmemcheck_enabled) - kmemcheck_error_save(status, src_addr, size, regs); - - if (kmemcheck_enabled == 2) - kmemcheck_enabled = 0; -} - -enum kmemcheck_method { - KMEMCHECK_READ, - KMEMCHECK_WRITE, -}; - -static void kmemcheck_access(struct pt_regs *regs, - unsigned long fallback_address, enum kmemcheck_method fallback_method) -{ - const uint8_t *insn; - const uint8_t *insn_primary; - unsigned int size; - - struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context); - - /* Recursive fault -- ouch. */ - if (data->busy) { - kmemcheck_show_addr(fallback_address); - kmemcheck_error_save_bug(regs); - return; - } - - data->busy = true; - - insn = (const uint8_t *) regs->ip; - insn_primary = kmemcheck_opcode_get_primary(insn); - - kmemcheck_opcode_decode(insn, &size); - - switch (insn_primary[0]) { -#ifdef CONFIG_KMEMCHECK_BITOPS_OK - /* AND, OR, XOR */ - /* - * Unfortunately, these instructions have to be excluded from - * our regular checking since they access only some (and not - * all) bits. This clears out "bogus" bitfield-access warnings. - */ - case 0x80: - case 0x81: - case 0x82: - case 0x83: - switch ((insn_primary[1] >> 3) & 7) { - /* OR */ - case 1: - /* AND */ - case 4: - /* XOR */ - case 6: - kmemcheck_write(regs, fallback_address, size); - goto out; - - /* ADD */ - case 0: - /* ADC */ - case 2: - /* SBB */ - case 3: - /* SUB */ - case 5: - /* CMP */ - case 7: - break; - } - break; -#endif - - /* MOVS, MOVSB, MOVSW, MOVSD */ - case 0xa4: - case 0xa5: - /* - * These instructions are special because they take two - * addresses, but we only get one page fault. - */ - kmemcheck_copy(regs, regs->si, regs->di, size); - goto out; - - /* CMPS, CMPSB, CMPSW, CMPSD */ - case 0xa6: - case 0xa7: - kmemcheck_read(regs, regs->si, size); - kmemcheck_read(regs, regs->di, size); - goto out; - } - - /* - * If the opcode isn't special in any way, we use the data from the - * page fault handler to determine the address and type of memory - * access. - */ - switch (fallback_method) { - case KMEMCHECK_READ: - kmemcheck_read(regs, fallback_address, size); - goto out; - case KMEMCHECK_WRITE: - kmemcheck_write(regs, fallback_address, size); - goto out; - } - -out: - data->busy = false; -} - -bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - pte_t *pte; - - /* - * XXX: Is it safe to assume that memory accesses from virtual 86 - * mode or non-kernel code segments will _never_ access kernel - * memory (e.g. tracked pages)? For now, we need this to avoid - * invoking kmemcheck for PnP BIOS calls. - */ - if (regs->flags & X86_VM_MASK) - return false; - if (regs->cs != __KERNEL_CS) - return false; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return false; - - WARN_ON_ONCE(in_nmi()); - - if (error_code & 2) - kmemcheck_access(regs, address, KMEMCHECK_WRITE); - else - kmemcheck_access(regs, address, KMEMCHECK_READ); - - kmemcheck_show(regs); - return true; -} - -bool kmemcheck_trap(struct pt_regs *regs) -{ - if (!kmemcheck_active(regs)) - return false; - - /* We're done. */ - kmemcheck_hide(regs); - return true; -} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c deleted file mode 100644 index df8109ddf7fe..000000000000 --- a/arch/x86/mm/kmemcheck/opcode.c +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "opcode.h" - -static bool opcode_is_prefix(uint8_t b) -{ - return - /* Group 1 */ - b == 0xf0 || b == 0xf2 || b == 0xf3 - /* Group 2 */ - || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 - || b == 0x64 || b == 0x65 - /* Group 3 */ - || b == 0x66 - /* Group 4 */ - || b == 0x67; -} - -#ifdef CONFIG_X86_64 -static bool opcode_is_rex_prefix(uint8_t b) -{ - return (b & 0xf0) == 0x40; -} -#else -static bool opcode_is_rex_prefix(uint8_t b) -{ - return false; -} -#endif - -#define REX_W (1 << 3) - -/* - * This is a VERY crude opcode decoder. We only need to find the size of the - * load/store that caused our #PF and this should work for all the opcodes - * that we care about. Moreover, the ones who invented this instruction set - * should be shot. - */ -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size) -{ - /* Default operand size */ - int operand_size_override = 4; - - /* prefixes */ - for (; opcode_is_prefix(*op); ++op) { - if (*op == 0x66) - operand_size_override = 2; - } - - /* REX prefix */ - if (opcode_is_rex_prefix(*op)) { - uint8_t rex = *op; - - ++op; - if (rex & REX_W) { - switch (*op) { - case 0x63: - *size = 4; - return; - case 0x0f: - ++op; - - switch (*op) { - case 0xb6: - case 0xbe: - *size = 1; - return; - case 0xb7: - case 0xbf: - *size = 2; - return; - } - - break; - } - - *size = 8; - return; - } - } - - /* escape opcode */ - if (*op == 0x0f) { - ++op; - - /* - * This is move with zero-extend and sign-extend, respectively; - * we don't have to think about 0xb6/0xbe, because this is - * already handled in the conditional below. - */ - if (*op == 0xb7 || *op == 0xbf) - operand_size_override = 2; - } - - *size = (*op & 1) ? operand_size_override : 1; -} - -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) -{ - /* skip prefixes */ - while (opcode_is_prefix(*op)) - ++op; - if (opcode_is_rex_prefix(*op)) - ++op; - return op; -} diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h deleted file mode 100644 index 51a1ce94c24a..000000000000 --- a/arch/x86/mm/kmemcheck/opcode.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H -#define ARCH__X86__MM__KMEMCHECK__OPCODE_H - -#include - -void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size); -const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); - -#endif diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c deleted file mode 100644 index 8a03be90272a..000000000000 --- a/arch/x86/mm/kmemcheck/pte.c +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include - -#include "pte.h" - -pte_t *kmemcheck_pte_lookup(unsigned long address) -{ - pte_t *pte; - unsigned int level; - - pte = lookup_address(address, &level); - if (!pte) - return NULL; - if (level != PG_LEVEL_4K) - return NULL; - if (!pte_hidden(*pte)) - return NULL; - - return pte; -} - diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h deleted file mode 100644 index b595612382c2..000000000000 --- a/arch/x86/mm/kmemcheck/pte.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H -#define ARCH__X86__MM__KMEMCHECK__PTE_H - -#include - -#include - -pte_t *kmemcheck_pte_lookup(unsigned long address); - -#endif diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c deleted file mode 100644 index 7ce0be1f99eb..000000000000 --- a/arch/x86/mm/kmemcheck/selftest.c +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include - -#include "opcode.h" -#include "selftest.h" - -struct selftest_opcode { - unsigned int expected_size; - const uint8_t *insn; - const char *desc; -}; - -static const struct selftest_opcode selftest_opcodes[] = { - /* REP MOVS */ - {1, "\xf3\xa4", "rep movsb , "}, - {4, "\xf3\xa5", "rep movsl , "}, - - /* MOVZX / MOVZXD */ - {1, "\x66\x0f\xb6\x51\xf8", "movzwq , "}, - {1, "\x0f\xb6\x51\xf8", "movzwq , "}, - - /* MOVSX / MOVSXD */ - {1, "\x66\x0f\xbe\x51\xf8", "movswq , "}, - {1, "\x0f\xbe\x51\xf8", "movswq , "}, - -#ifdef CONFIG_X86_64 - /* MOVZX / MOVZXD */ - {1, "\x49\x0f\xb6\x51\xf8", "movzbq , "}, - {2, "\x49\x0f\xb7\x51\xf8", "movzbq , "}, - - /* MOVSX / MOVSXD */ - {1, "\x49\x0f\xbe\x51\xf8", "movsbq , "}, - {2, "\x49\x0f\xbf\x51\xf8", "movsbq , "}, - {4, "\x49\x63\x51\xf8", "movslq , "}, -#endif -}; - -static bool selftest_opcode_one(const struct selftest_opcode *op) -{ - unsigned size; - - kmemcheck_opcode_decode(op->insn, &size); - - if (size == op->expected_size) - return true; - - printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n", - op->desc, op->expected_size, size); - return false; -} - -static bool selftest_opcodes_all(void) -{ - bool pass = true; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i) - pass = pass && selftest_opcode_one(&selftest_opcodes[i]); - - return pass; -} - -bool kmemcheck_selftest(void) -{ - bool pass = true; - - pass = pass && selftest_opcodes_all(); - - return pass; -} diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h deleted file mode 100644 index 8d759aae453d..000000000000 --- a/arch/x86/mm/kmemcheck/selftest.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H -#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H - -bool kmemcheck_selftest(void); - -#endif diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c deleted file mode 100644 index c2638a7d2c10..000000000000 --- a/arch/x86/mm/kmemcheck/shadow.c +++ /dev/null @@ -1,173 +0,0 @@ -#include -#include -#include - -#include -#include - -#include "pte.h" -#include "shadow.h" - -/* - * Return the shadow address for the given address. Returns NULL if the - * address is not tracked. - * - * We need to be extremely careful not to follow any invalid pointers, - * because this function can be called for *any* possible address. - */ -void *kmemcheck_shadow_lookup(unsigned long address) -{ - pte_t *pte; - struct page *page; - - if (!virt_addr_valid(address)) - return NULL; - - pte = kmemcheck_pte_lookup(address); - if (!pte) - return NULL; - - page = virt_to_page(address); - if (!page->shadow) - return NULL; - return page->shadow + (address & (PAGE_SIZE - 1)); -} - -static void mark_shadow(void *address, unsigned int n, - enum kmemcheck_shadow status) -{ - unsigned long addr = (unsigned long) address; - unsigned long last_addr = addr + n - 1; - unsigned long page = addr & PAGE_MASK; - unsigned long last_page = last_addr & PAGE_MASK; - unsigned int first_n; - void *shadow; - - /* If the memory range crosses a page boundary, stop there. */ - if (page == last_page) - first_n = n; - else - first_n = page + PAGE_SIZE - addr; - - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, first_n); - - addr += first_n; - n -= first_n; - - /* Do full-page memset()s. */ - while (n >= PAGE_SIZE) { - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, PAGE_SIZE); - - addr += PAGE_SIZE; - n -= PAGE_SIZE; - } - - /* Do the remaining page, if any. */ - if (n > 0) { - shadow = kmemcheck_shadow_lookup(addr); - if (shadow) - memset(shadow, status, n); - } -} - -void kmemcheck_mark_unallocated(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); -} - -void kmemcheck_mark_uninitialized(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); -} - -/* - * Fill the shadow memory of the given address such that the memory at that - * address is marked as being initialized. - */ -void kmemcheck_mark_initialized(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); -} -EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); - -void kmemcheck_mark_freed(void *address, unsigned int n) -{ - mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); -} - -void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); -} - -void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); -} - -void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) -{ - unsigned int i; - - for (i = 0; i < n; ++i) - kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); -} - -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) -{ -#ifdef CONFIG_KMEMCHECK_PARTIAL_OK - uint8_t *x; - unsigned int i; - - x = shadow; - - /* - * Make sure _some_ bytes are initialized. Gcc frequently generates - * code to access neighboring bytes. - */ - for (i = 0; i < size; ++i) { - if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) - return x[i]; - } - - return x[0]; -#else - return kmemcheck_shadow_test_all(shadow, size); -#endif -} - -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size) -{ - uint8_t *x; - unsigned int i; - - x = shadow; - - /* All bytes must be initialized. */ - for (i = 0; i < size; ++i) { - if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) - return x[i]; - } - - return x[0]; -} - -void kmemcheck_shadow_set(void *shadow, unsigned int size) -{ - uint8_t *x; - unsigned int i; - - x = shadow; - for (i = 0; i < size; ++i) - x[i] = KMEMCHECK_SHADOW_INITIALIZED; -} diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h deleted file mode 100644 index 49768dc18664..000000000000 --- a/arch/x86/mm/kmemcheck/shadow.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H -#define ARCH__X86__MM__KMEMCHECK__SHADOW_H - -enum kmemcheck_shadow { - KMEMCHECK_SHADOW_UNALLOCATED, - KMEMCHECK_SHADOW_UNINITIALIZED, - KMEMCHECK_SHADOW_INITIALIZED, - KMEMCHECK_SHADOW_FREED, -}; - -void *kmemcheck_shadow_lookup(unsigned long address); - -enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size); -enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, - unsigned int size); -void kmemcheck_shadow_set(void *shadow, unsigned int size); - -#endif diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index c21c2ed04612..79eb55ce69a9 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr) static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) { + pmd_t new_pmd; pmdval_t v = pmd_val(*pmd); if (clear) { - *old = v & _PAGE_PRESENT; - v &= ~_PAGE_PRESENT; - } else /* presume this has been called with clear==true previously */ - v |= *old; - set_pmd(pmd, __pmd(v)); + *old = v; + new_pmd = pmd_mknotpresent(*pmd); + } else { + /* Presume this has been called with clear==true previously */ + new_pmd = __pmd(*old); + } + set_pmd(pmd, new_pmd); } static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) { pteval_t v = pte_val(*pte); if (clear) { - *old = v & _PAGE_PRESENT; - v &= ~_PAGE_PRESENT; - } else /* presume this has been called with clear==true previously */ - v |= *old; - set_pte_atomic(pte, __pte(v)); + *old = v; + /* Nothing should care about address */ + pte_clear(&init_mm, 0, pte); + } else { + /* Presume this has been called with clear==true previously */ + set_pte_atomic(pte, __pte(*old)); + } } static int clear_page_presence(struct kmmio_fault_page *f, bool clear) @@ -168,7 +173,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->addr); + __flush_tlb_one_kernel(f->addr); return 0; } @@ -435,17 +440,18 @@ int register_kmmio_probe(struct kmmio_probe *p) unsigned long flags; int ret = 0; unsigned long size = 0; + unsigned long addr = p->addr & PAGE_MASK; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); unsigned int l; pte_t *pte; spin_lock_irqsave(&kmmio_lock, flags); - if (get_kmmio_probe(p->addr)) { + if (get_kmmio_probe(addr)) { ret = -EEXIST; goto out; } - pte = lookup_address(p->addr, &l); + pte = lookup_address(addr, &l); if (!pte) { ret = -EINVAL; goto out; @@ -454,7 +460,7 @@ int register_kmmio_probe(struct kmmio_probe *p) kmmio_count++; list_add_rcu(&p->list, &kmmio_probes); while (size < size_lim) { - if (add_kmmio_fault_page(p->addr + size)) + if (add_kmmio_fault_page(addr + size)) pr_err("Unable to set page fault.\n"); size += page_level_size(l); } @@ -528,19 +534,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p) { unsigned long flags; unsigned long size = 0; + unsigned long addr = p->addr & PAGE_MASK; const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); struct kmmio_fault_page *release_list = NULL; struct kmmio_delayed_release *drelease; unsigned int l; pte_t *pte; - pte = lookup_address(p->addr, &l); + pte = lookup_address(addr, &l); if (!pte) return; spin_lock_irqsave(&kmmio_lock, flags); while (size < size_lim) { - release_kmmio_fault_page(p->addr + size, &release_list); + release_kmmio_fault_page(addr + size, &release_list); size += page_level_size(l); } list_del_rcu(&p->list); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 0286327e65fa..48c03c74c7f4 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -213,37 +213,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); } -static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, - unsigned long end) +struct sme_populate_pgd_data { + void *pgtable_area; + pgd_t *pgd; + + pmdval_t pmd_flags; + pteval_t pte_flags; + unsigned long paddr; + + unsigned long vaddr; + unsigned long vaddr_end; +}; + +static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) { unsigned long pgd_start, pgd_end, pgd_size; pgd_t *pgd_p; - pgd_start = start & PGDIR_MASK; - pgd_end = end & PGDIR_MASK; + pgd_start = ppd->vaddr & PGDIR_MASK; + pgd_end = ppd->vaddr_end & PGDIR_MASK; - pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1); - pgd_size *= sizeof(pgd_t); + pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); - pgd_p = pgd_base + pgd_index(start); + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); memset(pgd_p, 0, pgd_size); } -#define PGD_FLAGS _KERNPG_TABLE_NOENC -#define P4D_FLAGS _KERNPG_TABLE_NOENC -#define PUD_FLAGS _KERNPG_TABLE_NOENC -#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) +#define PGD_FLAGS _KERNPG_TABLE_NOENC +#define P4D_FLAGS _KERNPG_TABLE_NOENC +#define PUD_FLAGS _KERNPG_TABLE_NOENC +#define PMD_FLAGS _KERNPG_TABLE_NOENC + +#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) + +#define PMD_FLAGS_DEC PMD_FLAGS_LARGE +#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) + +#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) + +#define PTE_FLAGS_DEC PTE_FLAGS +#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) -static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, - unsigned long vaddr, pmdval_t pmd_val) +static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) { pgd_t *pgd_p; p4d_t *p4d_p; pud_t *pud_p; pmd_t *pmd_p; - pgd_p = pgd_base + pgd_index(vaddr); + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); if (native_pgd_val(*pgd_p)) { if (IS_ENABLED(CONFIG_X86_5LEVEL)) p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); @@ -253,15 +278,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, pgd_t pgd; if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p = pgtable_area; + p4d_p = ppd->pgtable_area; memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); - pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; + ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); } else { - pud_p = pgtable_area; + pud_p = ppd->pgtable_area; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); } @@ -269,58 +294,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, } if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p += p4d_index(vaddr); + p4d_p += p4d_index(ppd->vaddr); if (native_p4d_val(*p4d_p)) { pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); } else { p4d_t p4d; - pud_p = pgtable_area; + pud_p = ppd->pgtable_area; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); native_set_p4d(p4d_p, p4d); } } - pud_p += pud_index(vaddr); + pud_p += pud_index(ppd->vaddr); if (native_pud_val(*pud_p)) { if (native_pud_val(*pud_p) & _PAGE_PSE) - goto out; + return NULL; pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); } else { pud_t pud; - pmd_p = pgtable_area; + pmd_p = ppd->pgtable_area; memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); - pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; + ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); native_set_pud(pud_p, pud); } - pmd_p += pmd_index(vaddr); + return pmd_p; +} + +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) - native_set_pmd(pmd_p, native_make_pmd(pmd_val)); + native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); +} + +static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + pte_t *pte_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); + if (native_pmd_val(*pmd_p)) { + if (native_pmd_val(*pmd_p) & _PAGE_PSE) + return; + + pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); + } else { + pmd_t pmd; + + pte_p = ppd->pgtable_area; + memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); + ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; + + pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); + native_set_pmd(pmd_p, pmd); + } -out: - return pgtable_area; + pte_p += pte_index(ppd->vaddr); + if (!native_pte_val(*pte_p)) + native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); +} + +static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd_large(ppd); + + ppd->vaddr += PMD_PAGE_SIZE; + ppd->paddr += PMD_PAGE_SIZE; + } +} + +static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd(ppd); + + ppd->vaddr += PAGE_SIZE; + ppd->paddr += PAGE_SIZE; + } +} + +static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, + pmdval_t pmd_flags, pteval_t pte_flags) +{ + unsigned long vaddr_end; + + ppd->pmd_flags = pmd_flags; + ppd->pte_flags = pte_flags; + + /* Save original end value since we modify the struct value */ + vaddr_end = ppd->vaddr_end; + + /* If start is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); + __sme_map_range_pte(ppd); + + /* Create PMD entries */ + ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; + __sme_map_range_pmd(ppd); + + /* If end is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = vaddr_end; + __sme_map_range_pte(ppd); +} + +static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); +} + +static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); +} + +static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); } static unsigned long __init sme_pgtable_calc(unsigned long len) { - unsigned long p4d_size, pud_size, pmd_size; + unsigned long p4d_size, pud_size, pmd_size, pte_size; unsigned long total; /* * Perform a relatively simplistic calculation of the pagetable - * entries that are needed. That mappings will be covered by 2MB - * PMD entries so we can conservatively calculate the required + * entries that are needed. Those mappings will be covered mostly + * by 2MB PMD entries so we can conservatively calculate the required * number of P4D, PUD and PMD structures needed to perform the - * mappings. Incrementing the count for each covers the case where - * the addresses cross entries. + * mappings. For mappings that are not 2MB aligned, PTE mappings + * would be needed for the start and end portion of the address range + * that fall outside of the 2MB alignment. This results in, at most, + * two extra pages to hold PTE entries for each range that is mapped. + * Incrementing the count for each covers the case where the addresses + * cross entries. */ if (IS_ENABLED(CONFIG_X86_5LEVEL)) { p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; @@ -334,8 +461,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) } pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; + pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; - total = p4d_size + pud_size + pmd_size; + total = p4d_size + pud_size + pmd_size + pte_size; /* * Now calculate the added pagetable structures needed to populate @@ -359,29 +487,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) return total; } -void __init sme_encrypt_kernel(void) +void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) { unsigned long workarea_start, workarea_end, workarea_len; unsigned long execute_start, execute_end, execute_len; unsigned long kernel_start, kernel_end, kernel_len; + unsigned long initrd_start, initrd_end, initrd_len; + struct sme_populate_pgd_data ppd; unsigned long pgtable_area_len; - unsigned long paddr, pmd_flags; unsigned long decrypted_base; - void *pgtable_area; - pgd_t *pgd; if (!sme_active()) return; /* - * Prepare for encrypting the kernel by building new pagetables with - * the necessary attributes needed to encrypt the kernel in place. + * Prepare for encrypting the kernel and initrd by building new + * pagetables with the necessary attributes needed to encrypt the + * kernel in place. * * One range of virtual addresses will map the memory occupied - * by the kernel as encrypted. + * by the kernel and initrd as encrypted. * * Another range of virtual addresses will map the memory occupied - * by the kernel as decrypted and write-protected. + * by the kernel and initrd as decrypted and write-protected. * * The use of write-protect attribute will prevent any of the * memory from being cached. @@ -392,6 +520,20 @@ void __init sme_encrypt_kernel(void) kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); kernel_len = kernel_end - kernel_start; + initrd_start = 0; + initrd_end = 0; + initrd_len = 0; +#ifdef CONFIG_BLK_DEV_INITRD + initrd_len = (unsigned long)bp->hdr.ramdisk_size | + ((unsigned long)bp->ext_ramdisk_size << 32); + if (initrd_len) { + initrd_start = (unsigned long)bp->hdr.ramdisk_image | + ((unsigned long)bp->ext_ramdisk_image << 32); + initrd_end = PAGE_ALIGN(initrd_start + initrd_len); + initrd_len = initrd_end - initrd_start; + } +#endif + /* Set the encryption workarea to be immediately after the kernel */ workarea_start = kernel_end; @@ -414,16 +556,21 @@ void __init sme_encrypt_kernel(void) */ pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; + if (initrd_len) + pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; /* PUDs and PMDs needed in the current pagetables for the workarea */ pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); /* * The total workarea includes the executable encryption area and - * the pagetable area. + * the pagetable area. The start of the workarea is already 2MB + * aligned, align the end of the workarea on a 2MB boundary so that + * we don't try to create/allocate PTE entries from the workarea + * before it is mapped. */ workarea_len = execute_len + pgtable_area_len; - workarea_end = workarea_start + workarea_len; + workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); /* * Set the address to the start of where newly created pagetable @@ -432,45 +579,30 @@ void __init sme_encrypt_kernel(void) * pagetables and when the new encrypted and decrypted kernel * mappings are populated. */ - pgtable_area = (void *)execute_end; + ppd.pgtable_area = (void *)execute_end; /* * Make sure the current pagetable structure has entries for * addressing the workarea. */ - pgd = (pgd_t *)native_read_cr3_pa(); - paddr = workarea_start; - while (paddr < workarea_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + PMD_FLAGS); - - paddr += PMD_PAGE_SIZE; - } + ppd.pgd = (pgd_t *)native_read_cr3_pa(); + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); /* Flush the TLB - no globals so cr3 is enough */ native_write_cr3(__native_read_cr3()); /* * A new pagetable structure is being built to allow for the kernel - * to be encrypted. It starts with an empty PGD that will then be - * populated with new PUDs and PMDs as the encrypted and decrypted - * kernel mappings are created. + * and initrd to be encrypted. It starts with an empty PGD that will + * then be populated with new PUDs and PMDs as the encrypted and + * decrypted kernel mappings are created. */ - pgd = pgtable_area; - memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); - pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; - - /* Add encrypted kernel (identity) mappings */ - pmd_flags = PMD_FLAGS | _PAGE_ENC; - paddr = kernel_start; - while (paddr < kernel_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + pmd_flags); - - paddr += PMD_PAGE_SIZE; - } + ppd.pgd = ppd.pgtable_area; + memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); + ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; /* * A different PGD index/entry must be used to get different @@ -479,47 +611,79 @@ void __init sme_encrypt_kernel(void) * the base of the mapping. */ decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); + if (initrd_len) { + unsigned long check_base; + + check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); + decrypted_base = max(decrypted_base, check_base); + } decrypted_base <<= PGDIR_SHIFT; + /* Add encrypted kernel (identity) mappings */ + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start; + ppd.vaddr_end = kernel_end; + sme_map_range_encrypted(&ppd); + /* Add decrypted, write-protected kernel (non-identity) mappings */ - pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); - paddr = kernel_start; - while (paddr < kernel_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr + decrypted_base, - paddr + pmd_flags); - - paddr += PMD_PAGE_SIZE; + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); + + if (initrd_len) { + /* Add encrypted initrd (identity) mappings */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start; + ppd.vaddr_end = initrd_end; + sme_map_range_encrypted(&ppd); + /* + * Add decrypted, write-protected initrd (non-identity) mappings + */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); } /* Add decrypted workarea mappings to both kernel mappings */ - paddr = workarea_start; - while (paddr < workarea_end) { - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr, - paddr + PMD_FLAGS); + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); - pgtable_area = sme_populate_pgd(pgd, pgtable_area, - paddr + decrypted_base, - paddr + PMD_FLAGS); - - paddr += PMD_PAGE_SIZE; - } + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_map_range_decrypted(&ppd); /* Perform the encryption */ sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, - kernel_len, workarea_start, (unsigned long)pgd); + kernel_len, workarea_start, (unsigned long)ppd.pgd); + + if (initrd_len) + sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, + initrd_len, workarea_start, + (unsigned long)ppd.pgd); /* * At this point we are running encrypted. Remove the mappings for * the decrypted areas - all that is needed for this is to remove * the PGD entry/entries. */ - sme_clear_pgd(pgd, kernel_start + decrypted_base, - kernel_end + decrypted_base); + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_clear_pgd(&ppd); + + if (initrd_len) { + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_clear_pgd(&ppd); + } - sme_clear_pgd(pgd, workarea_start + decrypted_base, - workarea_end + decrypted_base); + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_clear_pgd(&ppd); /* Flush the TLB - no globals so cr3 is enough */ native_write_cr3(__native_read_cr3()); diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 730e6d541df1..40a6085063d6 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -15,6 +15,7 @@ #include #include #include +#include .text .code64 @@ -22,9 +23,9 @@ ENTRY(sme_encrypt_execute) /* * Entry parameters: - * RDI - virtual address for the encrypted kernel mapping - * RSI - virtual address for the decrypted kernel mapping - * RDX - length of kernel + * RDI - virtual address for the encrypted mapping + * RSI - virtual address for the decrypted mapping + * RDX - length to encrypt * RCX - virtual address of the encryption workarea, including: * - stack page (PAGE_SIZE) * - encryption routine page (PAGE_SIZE) @@ -41,9 +42,9 @@ ENTRY(sme_encrypt_execute) addq $PAGE_SIZE, %rax /* Workarea encryption routine */ push %r12 - movq %rdi, %r10 /* Encrypted kernel */ - movq %rsi, %r11 /* Decrypted kernel */ - movq %rdx, %r12 /* Kernel length */ + movq %rdi, %r10 /* Encrypted area */ + movq %rsi, %r11 /* Decrypted area */ + movq %rdx, %r12 /* Area length */ /* Copy encryption routine into the workarea */ movq %rax, %rdi /* Workarea encryption routine */ @@ -52,13 +53,14 @@ ENTRY(sme_encrypt_execute) rep movsb /* Setup registers for call */ - movq %r10, %rdi /* Encrypted kernel */ - movq %r11, %rsi /* Decrypted kernel */ + movq %r10, %rdi /* Encrypted area */ + movq %r11, %rsi /* Decrypted area */ movq %r8, %rdx /* Pagetables used for encryption */ - movq %r12, %rcx /* Kernel length */ + movq %r12, %rcx /* Area length */ movq %rax, %r8 /* Workarea encryption routine */ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */ + ANNOTATE_RETPOLINE_SAFE call *%rax /* Call the encryption routine */ pop %r12 @@ -71,7 +73,7 @@ ENDPROC(sme_encrypt_execute) ENTRY(__enc_copy) /* - * Routine used to encrypt kernel. + * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since * the kernel will be encrypted during the process. So this * routine is defined here and then copied to an area outside @@ -79,19 +81,19 @@ ENTRY(__enc_copy) * during execution. * * On entry the registers must be: - * RDI - virtual address for the encrypted kernel mapping - * RSI - virtual address for the decrypted kernel mapping + * RDI - virtual address for the encrypted mapping + * RSI - virtual address for the decrypted mapping * RDX - address of the pagetables to use for encryption - * RCX - length of kernel + * RCX - length of area * R8 - intermediate copy buffer * * RAX - points to this routine * - * The kernel will be encrypted by copying from the non-encrypted - * kernel space to an intermediate buffer and then copying from the - * intermediate buffer back to the encrypted kernel space. The physical - * addresses of the two kernel space mappings are the same which - * results in the kernel being encrypted "in place". + * The area will be encrypted by copying from the non-encrypted + * memory space to an intermediate buffer and then copying from the + * intermediate buffer back to the encrypted memory space. The physical + * addresses of the two mappings are the same which results in the area + * being encrypted "in place". */ /* Enable the new page tables */ mov %rdx, %cr3 @@ -103,47 +105,55 @@ ENTRY(__enc_copy) orq $X86_CR4_PGE, %rdx mov %rdx, %cr4 + push %r15 + push %r12 + + movq %rcx, %r9 /* Save area length */ + movq %rdi, %r10 /* Save encrypted area address */ + movq %rsi, %r11 /* Save decrypted area address */ + /* Set the PAT register PA5 entry to write-protect */ - push %rcx movl $MSR_IA32_CR_PAT, %ecx rdmsr - push %rdx /* Save original PAT value */ + mov %rdx, %r15 /* Save original PAT value */ andl $0xffff00ff, %edx /* Clear PA5 */ orl $0x00000500, %edx /* Set PA5 to WP */ wrmsr - pop %rdx /* RDX contains original PAT value */ - pop %rcx - - movq %rcx, %r9 /* Save kernel length */ - movq %rdi, %r10 /* Save encrypted kernel address */ - movq %rsi, %r11 /* Save decrypted kernel address */ wbinvd /* Invalidate any cache entries */ - /* Copy/encrypt 2MB at a time */ + /* Copy/encrypt up to 2MB at a time */ + movq $PMD_PAGE_SIZE, %r12 1: - movq %r11, %rsi /* Source - decrypted kernel */ + cmpq %r12, %r9 + jnb 2f + movq %r9, %r12 + +2: + movq %r11, %rsi /* Source - decrypted area */ movq %r8, %rdi /* Dest - intermediate copy buffer */ - movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ + movq %r12, %rcx rep movsb movq %r8, %rsi /* Source - intermediate copy buffer */ - movq %r10, %rdi /* Dest - encrypted kernel */ - movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ + movq %r10, %rdi /* Dest - encrypted area */ + movq %r12, %rcx rep movsb - addq $PMD_PAGE_SIZE, %r11 - addq $PMD_PAGE_SIZE, %r10 - subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ + addq %r12, %r11 + addq %r12, %r10 + subq %r12, %r9 /* Kernel length decrement */ jnz 1b /* Kernel length not zero? */ /* Restore PAT register */ - push %rdx /* Save original PAT value */ movl $MSR_IA32_CR_PAT, %ecx rdmsr - pop %rdx /* Restore original PAT value */ + mov %r15, %rdx /* Restore original PAT value */ wrmsr + pop %r12 + pop %r15 + ret .L__enc_copy_end: ENDPROC(__enc_copy) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index a99679826846..5f4805d69aab 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -174,3 +174,24 @@ const char *arch_vma_name(struct vm_area_struct *vma) return "[mpx]"; return NULL; } + +/* + * Only allow root to set high MMIO mappings to PROT_NONE. + * This prevents an unpriv. user to set them to PROT_NONE and invert + * them, then pointing to valid memory for L1TF speculation. + * + * Note: for locked down kernels may want to disable the root override. + */ +bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return true; + if (!__pte_needs_invert(pgprot_val(prot))) + return true; + /* If it's real memory always allow */ + if (pfn_valid(pfn)) + return true; + if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) + return false; + return true; +} diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 7eb06701a935..e500949bae24 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -61,123 +62,6 @@ static unsigned long mpx_mmap(unsigned long len) return addr; } -enum reg_type { - REG_TYPE_RM = 0, - REG_TYPE_INDEX, - REG_TYPE_BASE, -}; - -static int get_reg_offset(struct insn *insn, struct pt_regs *regs, - enum reg_type type) -{ - int regno = 0; - - static const int regoff[] = { - offsetof(struct pt_regs, ax), - offsetof(struct pt_regs, cx), - offsetof(struct pt_regs, dx), - offsetof(struct pt_regs, bx), - offsetof(struct pt_regs, sp), - offsetof(struct pt_regs, bp), - offsetof(struct pt_regs, si), - offsetof(struct pt_regs, di), -#ifdef CONFIG_X86_64 - offsetof(struct pt_regs, r8), - offsetof(struct pt_regs, r9), - offsetof(struct pt_regs, r10), - offsetof(struct pt_regs, r11), - offsetof(struct pt_regs, r12), - offsetof(struct pt_regs, r13), - offsetof(struct pt_regs, r14), - offsetof(struct pt_regs, r15), -#endif - }; - int nr_registers = ARRAY_SIZE(regoff); - /* - * Don't possibly decode a 32-bit instructions as - * reading a 64-bit-only register. - */ - if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) - nr_registers -= 8; - - switch (type) { - case REG_TYPE_RM: - regno = X86_MODRM_RM(insn->modrm.value); - if (X86_REX_B(insn->rex_prefix.value)) - regno += 8; - break; - - case REG_TYPE_INDEX: - regno = X86_SIB_INDEX(insn->sib.value); - if (X86_REX_X(insn->rex_prefix.value)) - regno += 8; - break; - - case REG_TYPE_BASE: - regno = X86_SIB_BASE(insn->sib.value); - if (X86_REX_B(insn->rex_prefix.value)) - regno += 8; - break; - - default: - pr_err("invalid register type"); - BUG(); - break; - } - - if (regno >= nr_registers) { - WARN_ONCE(1, "decoded an instruction with an invalid register"); - return -EINVAL; - } - return regoff[regno]; -} - -/* - * return the address being referenced be instruction - * for rm=3 returning the content of the rm reg - * for rm!=3 calculates the address using SIB and Disp - */ -static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs) -{ - unsigned long addr, base, indx; - int addr_offset, base_offset, indx_offset; - insn_byte_t sib; - - insn_get_modrm(insn); - insn_get_sib(insn); - sib = insn->sib.value; - - if (X86_MODRM_MOD(insn->modrm.value) == 3) { - addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); - if (addr_offset < 0) - goto out_err; - addr = regs_get_register(regs, addr_offset); - } else { - if (insn->sib.nbytes) { - base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE); - if (base_offset < 0) - goto out_err; - - indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX); - if (indx_offset < 0) - goto out_err; - - base = regs_get_register(regs, base_offset); - indx = regs_get_register(regs, indx_offset); - addr = base + indx * (1 << X86_SIB_SCALE(sib)); - } else { - addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM); - if (addr_offset < 0) - goto out_err; - addr = regs_get_register(regs, addr_offset); - } - addr += insn->displacement.value; - } - return (void __user *)addr; -out_err: - return (void __user *)-1; -} - static int mpx_insn_decode(struct insn *insn, struct pt_regs *regs) { @@ -290,7 +174,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) info->si_signo = SIGSEGV; info->si_errno = 0; info->si_code = SEGV_BNDERR; - info->si_addr = mpx_get_addr_ref(&insn, regs); + info->si_addr = insn_get_addr_ref(&insn, regs); /* * We were not able to extract an address from the instruction, * probably because there was something invalid in it. diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index dfb7d657cf43..464f53da3a6f 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, /* * The .rodata section needs to be read-only. Using the pfn - * catches all aliases. + * catches all aliases. This also includes __ro_after_init, + * so do not enforce until kernel_set_to_readonly is true. */ - if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, + if (kernel_set_to_readonly && + within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, __pa_symbol(__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; @@ -753,7 +755,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte, if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); - base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); + base = alloc_pages(GFP_KERNEL, 0); if (!debug_pagealloc_enabled()) spin_lock(&cpa_lock); if (!base) @@ -904,7 +906,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) static int alloc_pte_page(pmd_t *pmd) { - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); if (!pte) return -1; @@ -914,7 +916,7 @@ static int alloc_pte_page(pmd_t *pmd) static int alloc_pmd_page(pud_t *pud) { - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd) return -1; @@ -1004,8 +1006,8 @@ static long populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | - massage_pgprot(pmd_pgprot))); + set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, + canon_pgprot(pmd_pgprot)))); start += PMD_SIZE; cpa->pfn += PMD_SIZE >> PAGE_SHIFT; @@ -1077,8 +1079,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, * Map everything starting from the Gb boundary, possibly with 1G pages */ while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | - massage_pgprot(pud_pgprot))); + set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, + canon_pgprot(pud_pgprot)))); start += PUD_SIZE; cpa->pfn += PUD_SIZE >> PAGE_SHIFT; @@ -1120,7 +1122,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) pgd_entry = cpa->pgd + pgd_index(addr); if (pgd_none(*pgd_entry)) { - p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); if (!p4d) return -1; @@ -1132,7 +1134,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr) */ p4d = p4d_offset(pgd_entry, addr); if (p4d_none(*p4d)) { - pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + pud = (pud_t *)get_zeroed_page(GFP_KERNEL); if (!pud) return -1; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 17ebc5a978cc..2bdb8e8a9d7c 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include #include #include -#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) #ifdef CONFIG_HIGHPTE #define PGALLOC_USER_GFP __GFP_HIGHMEM @@ -355,14 +356,15 @@ static inline void _pgd_free(pgd_t *pgd) kmem_cache_free(pgd_cache, pgd); } #else + static inline pgd_t *_pgd_alloc(void) { - return (pgd_t *)__get_free_page(PGALLOC_GFP); + return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); } static inline void _pgd_free(pgd_t *pgd) { - free_page((unsigned long)pgd); + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); } #endif /* CONFIG_X86_PAE */ @@ -635,6 +637,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) (mtrr != MTRR_TYPE_WRBACK)) return 0; + /* Bail out if we are we on a populated non-leaf entry: */ + if (pud_present(*pud) && !pud_huge(*pud)) + return 0; + prot = pgprot_4k_2_large(prot); set_pte((pte_t *)pud, pfn_pte( @@ -663,6 +669,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) return 0; } + /* Bail out if we are we on a populated non-leaf entry: */ + if (pmd_present(*pmd) && !pmd_huge(*pmd)) + return 0; + prot = pgprot_4k_2_large(prot); set_pte((pte_t *)pmd, pfn_pte( @@ -701,4 +711,97 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } + +#ifdef CONFIG_X86_64 +/** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. + * @addr: Virtual address associated with pud. + * + * Context: The pud range has been unmapped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + * + * NOTE: Callers must allow a single page allocation. + */ +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + pmd_t *pmd, *pmd_sv; + pte_t *pte; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); + pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); + if (!pmd_sv) + return 0; + + for (i = 0; i < PTRS_PER_PMD; i++) { + pmd_sv[i] = pmd[i]; + if (!pmd_none(pmd[i])) + pmd_clear(&pmd[i]); + } + + pud_clear(pud); + + /* INVLPG to clear all paging-structure caches */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); + + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(pmd_sv[i])) { + pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); + free_page((unsigned long)pte); + } + } + + free_page((unsigned long)pmd_sv); + free_page((unsigned long)pmd); + + return 1; +} + +/** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. + * @addr: Virtual address associated with pmd. + * + * Context: The pmd range has been unmapped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + pte_t *pte; + + if (pmd_none(*pmd)) + return 1; + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); + + /* INVLPG to clear all paging-structure caches */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); + + free_page((unsigned long)pte); + + return 1; +} + +#else /* !CONFIG_X86_64 */ + +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return pud_none(*pud); +} + +/* + * Disable free page handling on x86-PAE. This assures that ioremap() + * does not update sync'd pmd entries. See vmalloc_sync_one(). + */ +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return pmd_none(*pmd); +} + +#endif /* CONFIG_X86_64 */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6b9bf023a700..9bb7f0ab9fe6 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -62,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } unsigned long __FIXADDR_TOP = 0xfffff000; diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index d7bc0eea20a5..6e98e0a7c923 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey */ if (pkey != -1) return pkey; - /* - * Look for a protection-key-drive execute-only mapping - * which is now being given permissions that are not - * execute-only. Move it back to the default pkey. - */ - if (vma_is_pkey_exec_only(vma) && - (prot & (PROT_READ|PROT_WRITE))) { - return 0; - } + /* * The mapping is execute-only. Go try to get the * execute-only protection key. If we fail to do that, * fall through as if we do not have execute-only - * support. + * support in this mm. */ if (prot == PROT_EXEC) { pkey = execute_only_pkey(vma->vm_mm); if (pkey > 0) return pkey; + } else if (vma_is_pkey_exec_only(vma)) { + /* + * Protections are *not* PROT_EXEC, but the mapping + * is using the exec-only pkey. This mapping was + * PROT_EXEC and will no longer be. Move back to + * the default pkey. + */ + return ARCH_DEFAULT_PKEY; } + /* * This is a vanilla, non-pkey mprotect (or we failed to * setup execute-only), inherit the pkey from the VMA we diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c new file mode 100644 index 000000000000..d6f11accd37a --- /dev/null +++ b/arch/x86/mm/pti.c @@ -0,0 +1,369 @@ +/* + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * This code is based in part on work published here: + * + * https://github.com/IAIK/KAISER + * + * The original work was written by and and signed off by for the Linux + * kernel by: + * + * Signed-off-by: Richard Fellner + * Signed-off-by: Moritz Lipp + * Signed-off-by: Daniel Gruss + * Signed-off-by: Michael Schwarz + * + * Major changes to the original code by: Dave Hansen + * Mostly rewritten by Thomas Gleixner and + * Andy Lutomirsky + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt + +/* Backporting helper */ +#ifndef __GFP_NOTRACK +#define __GFP_NOTRACK 0 +#endif + +static void __init pti_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + pr_info("%s\n", reason); +} + +static void __init pti_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + pr_info("%s\n", reason); +} + +void __init pti_check_boottime_disable(void) +{ + char arg[5]; + int ret; + + if (hypervisor_is_type(X86_HYPER_XEN_PV)) { + pti_print_if_insecure("disabled on XEN PV."); + return; + } + + ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); + if (ret > 0) { + if (ret == 3 && !strncmp(arg, "off", 3)) { + pti_print_if_insecure("disabled on command line."); + return; + } + if (ret == 2 && !strncmp(arg, "on", 2)) { + pti_print_if_secure("force enabled on command line."); + goto enable; + } + if (ret == 4 && !strncmp(arg, "auto", 4)) + goto autosel; + } + + if (cmdline_find_option_bool(boot_command_line, "nopti")) { + pti_print_if_insecure("disabled on command line."); + return; + } + +autosel: + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + return; +enable: + setup_force_cpu_cap(X86_FEATURE_PTI); +} + +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + /* + * Changes to the high (kernel) portion of the kernelmode page + * tables are not automatically propagated to the usermode tables. + * + * Users should keep in mind that, unlike the kernelmode tables, + * there is no vmalloc_fault equivalent for the usermode tables. + * Top-level entries added to init_mm's usermode pgd after boot + * will not be automatically propagated to other mms. + */ + if (!pgdp_maps_userspace(pgdp)) + return pgd; + + /* + * The user page tables get the full PGD, accessible from + * userspace: + */ + kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; + + /* + * If this is normal user memory, make it NX in the kernel + * pagetables so that, if we somehow screw up and return to + * usermode with the kernel CR3 loaded, we'll get a page fault + * instead of allowing user code to execute with the wrong CR3. + * + * As exceptions, we don't set NX if: + * - _PAGE_USER is not set. This could be an executable + * EFI runtime mapping or something similar, and the kernel + * may execute from it + * - we don't have NX support + * - we're clearing the PGD (i.e. the new pgd is not present). + */ + if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && + (__supported_pte_mask & _PAGE_NX)) + pgd.pgd |= _PAGE_NX; + + /* return the copy of the PGD we want the kernel to use: */ + return pgd; +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a P4D on success, or NULL on failure. + */ +static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) +{ + pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + + if (address < PAGE_OFFSET) { + WARN_ONCE(1, "attempt to walk user address\n"); + return NULL; + } + + if (pgd_none(*pgd)) { + unsigned long new_p4d_page = __get_free_page(gfp); + if (!new_p4d_page) + return NULL; + + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); + } + BUILD_BUG_ON(pgd_large(*pgd) != 0); + + return p4d_offset(pgd, address); +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a PMD on success, or NULL on failure. + */ +static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + p4d_t *p4d = pti_user_pagetable_walk_p4d(address); + pud_t *pud; + + BUILD_BUG_ON(p4d_large(*p4d) != 0); + if (p4d_none(*p4d)) { + unsigned long new_pud_page = __get_free_page(gfp); + if (!new_pud_page) + return NULL; + + set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); + } + + pud = pud_offset(p4d, address); + /* The user page tables do not use large mappings: */ + if (pud_large(*pud)) { + WARN_ON(1); + return NULL; + } + if (pud_none(*pud)) { + unsigned long new_pmd_page = __get_free_page(gfp); + if (!new_pmd_page) + return NULL; + + set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); + } + + return pmd_offset(pud, address); +} + +#ifdef CONFIG_X86_VSYSCALL_EMULATION +/* + * Walk the shadow copy of the page tables (optionally) trying to allocate + * page table pages on the way down. Does not support large pages. + * + * Note: this is only used when mapping *new* kernel data into the + * user/shadow page tables. It is never used for userspace data. + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd = pti_user_pagetable_walk_pmd(address); + pte_t *pte; + + /* We can't do anything sensible if we hit a large mapping. */ + if (pmd_large(*pmd)) { + WARN_ON(1); + return NULL; + } + + if (pmd_none(*pmd)) { + unsigned long new_pte_page = __get_free_page(gfp); + if (!new_pte_page) + return NULL; + + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + } + + pte = pte_offset_kernel(pmd, address); + if (pte_flags(*pte) & _PAGE_USER) { + WARN_ONCE(1, "attempt to walk to user pte\n"); + return NULL; + } + return pte; +} + +static void __init pti_setup_vsyscall(void) +{ + pte_t *pte, *target_pte; + unsigned int level; + + pte = lookup_address(VSYSCALL_ADDR, &level); + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); + if (WARN_ON(!target_pte)) + return; + + *target_pte = *pte; + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); +} +#else +static void __init pti_setup_vsyscall(void) { } +#endif + +static void __init +pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) +{ + unsigned long addr; + + /* + * Clone the populated PMDs which cover start to end. These PMD areas + * can have holes. + */ + for (addr = start; addr < end; addr += PMD_SIZE) { + pmd_t *pmd, *target_pmd; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + + pgd = pgd_offset_k(addr); + if (WARN_ON(pgd_none(*pgd))) + return; + p4d = p4d_offset(pgd, addr); + if (WARN_ON(p4d_none(*p4d))) + return; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + continue; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + continue; + + target_pmd = pti_user_pagetable_walk_pmd(addr); + if (WARN_ON(!target_pmd)) + return; + + /* + * Copy the PMD. That is, the kernelmode and usermode + * tables will share the last-level page tables of this + * address range + */ + *target_pmd = pmd_clear_flags(*pmd, clear); + } +} + +/* + * Clone a single p4d (i.e. a top-level entry on 4-level systems and a + * next-level entry on 5-level systems. + */ +static void __init pti_clone_p4d(unsigned long addr) +{ + p4d_t *kernel_p4d, *user_p4d; + pgd_t *kernel_pgd; + + user_p4d = pti_user_pagetable_walk_p4d(addr); + kernel_pgd = pgd_offset_k(addr); + kernel_p4d = p4d_offset(kernel_pgd, addr); + *user_p4d = *kernel_p4d; +} + +/* + * Clone the CPU_ENTRY_AREA into the user space visible page table. + */ +static void __init pti_clone_user_shared(void) +{ + pti_clone_p4d(CPU_ENTRY_AREA_BASE); +} + +/* + * Clone the ESPFIX P4D into the user space visinble page table + */ +static void __init pti_setup_espfix64(void) +{ +#ifdef CONFIG_X86_ESPFIX64 + pti_clone_p4d(ESPFIX_BASE_ADDR); +#endif +} + +/* + * Clone the populated PMDs of the entry and irqentry text and force it RO. + */ +static void __init pti_clone_entry_text(void) +{ + pti_clone_pmds((unsigned long) __entry_text_start, + (unsigned long) __irqentry_text_end, + _PAGE_RW | _PAGE_GLOBAL); +} + +/* + * Initialize kernel page table isolation + */ +void __init pti_init(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("enabled\n"); + + pti_clone_user_shared(); + pti_clone_entry_text(); + pti_setup_espfix64(); + pti_setup_vsyscall(); +} diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3118392cdf75..0c936435ea93 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -6,13 +6,14 @@ #include #include #include +#include #include #include +#include #include #include #include -#include /* * TLB flushing, formerly SMP-only @@ -28,6 +29,38 @@ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ +/* + * We get here when we do something requiring a TLB invalidation + * but could not go invalidate all of the contexts. We do the + * necessary invalidation by clearing out the 'ctx_id' which + * forces a TLB flush when the context is loaded. + */ +void clear_asid_other(void) +{ + u16 asid; + + /* + * This is only expected to be set if we have disabled + * kernel _PAGE_GLOBAL pages. + */ + if (!static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON_ONCE(1); + return; + } + + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { + /* Do not need to flush the current asid */ + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) + continue; + /* + * Make sure the next time we go to switch to + * this asid, we do a flush: + */ + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); + } + this_cpu_write(cpu_tlbstate.invalidate_other, false); +} + atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); @@ -42,6 +75,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, return; } + if (this_cpu_read(cpu_tlbstate.invalidate_other)) + clear_asid_other(); + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != next->context.ctx_id) @@ -65,6 +101,25 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, *need_flush = true; } +static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) +{ + unsigned long new_mm_cr3; + + if (need_flush) { + invalidate_user_asid(new_asid); + new_mm_cr3 = build_cr3(pgdir, new_asid); + } else { + new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); + } + + /* + * Caution: many callers of this function expect + * that load_cr3() is serializing and orders TLB + * fills with respect to the mm_cpumask writes. + */ + write_cr3(new_mm_cr3); +} + void leave_mm(int cpu) { struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); @@ -97,6 +152,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, local_irq_restore(flags); } +static void sync_current_stack_to_mm(struct mm_struct *mm) +{ + unsigned long sp = current_stack_pointer; + pgd_t *pgd = pgd_offset(mm, sp); + + if (CONFIG_PGTABLE_LEVELS > 4) { + if (unlikely(pgd_none(*pgd))) { + pgd_t *pgd_ref = pgd_offset_k(sp); + + set_pgd(pgd, *pgd_ref); + } + } else { + /* + * "pgd" is faked. The top level entries are "p4d"s, so sync + * the p4d. This compiles to approximately the same code as + * the 5-level case. + */ + p4d_t *p4d = p4d_offset(pgd, sp); + + if (unlikely(p4d_none(*p4d))) { + pgd_t *pgd_ref = pgd_offset_k(sp); + p4d_t *p4d_ref = p4d_offset(pgd_ref, sp); + + set_p4d(p4d, *p4d_ref); + } + } +} + void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { @@ -128,7 +211,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * isn't free. */ #ifdef CONFIG_DEBUG_VM - if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) { /* * If we were to BUG here, we'd be very likely to kill * the system so hard that we don't see the call trace. @@ -165,6 +248,27 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, } else { u16 new_asid; bool need_flush; + u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); + + /* + * Avoid user/user BTB poisoning by flushing the branch + * predictor when switching between processes. This stops + * one process from doing Spectre-v2 attacks on another. + * + * As an optimization, flush indirect branches only when + * switching into processes that disable dumping. This + * protects high value processes like gpg, without having + * too high performance overhead. IBPB is *expensive*! + * + * This will not flush branches when switching into kernel + * threads. It will also not flush if we switch to idle + * thread and back to the same process. It will flush if we + * switch to a different non-dumpable process. + */ + if (tsk && tsk->mm && + tsk->mm->context.ctx_id != last_ctx_id && + get_dumpable(tsk->mm) != SUID_DUMP_USER) + indirect_branch_prediction_barrier(); if (IS_ENABLED(CONFIG_VMAP_STACK)) { /* @@ -172,11 +276,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * mapped in the new pgd, we'll double-fault. Forcibly * map it. */ - unsigned int index = pgd_index(current_stack_pointer); - pgd_t *pgd = next->pgd + index; - - if (unlikely(pgd_none(*pgd))) - set_pgd(pgd, init_mm.pgd[index]); + sync_current_stack_to_mm(next); } /* Stop remote flushes for the previous mm */ @@ -195,7 +295,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(build_cr3(next, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, true); /* * NB: This gets called via leave_mm() in the idle path @@ -208,12 +308,20 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(build_cr3_noflush(next, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, false); /* See above wrt _rcuidle. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); } + /* + * Record last user mm's context id, so we can avoid + * flushing branch buffer with IBPB if we switch back + * to the same user. + */ + if (next != &init_mm) + this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); } @@ -288,9 +396,10 @@ void initialize_tlbstate_and_flush(void) !(cr4_read_shadow() & X86_CR4_PCIDE)); /* Force ASID 0 and force a TLB flush. */ - write_cr3(build_cr3(mm, 0)); + write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ + this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); this_cpu_write(cpu_tlbstate.next_asid, 1); this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); @@ -383,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * flush that changes context.tlb_gen from 2 to 3. If they get * processed on this CPU in reverse order, we'll see * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. - * If we were to use __flush_tlb_single() and set local_tlb_gen to + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to * 3, we'd be break the invariant: we'd update local_tlb_gen above * 1 without the full flush that's needed for tlb_gen 2. * @@ -404,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, addr = f->start; while (addr < f->end) { - __flush_tlb_single(addr); + __flush_tlb_one_user(addr); addr += PAGE_SIZE; } if (local) @@ -551,7 +660,7 @@ static void do_kernel_range_flush(void *info) /* flush range by one by one 'invlpg' */ for (addr = f->start; addr < f->end; addr += PAGE_SIZE) - __flush_tlb_single(addr); + __flush_tlb_one_kernel(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 0554e8aef4d5..a9deb2b0397d 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -13,6 +13,7 @@ #include #include #include +#include #include int bpf_jit_enable __read_mostly; @@ -287,7 +288,7 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 43 /* number of bytes to jump */ +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -296,7 +297,7 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 32 +#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ @@ -310,7 +311,7 @@ static void emit_bpf_tail_call(u8 **pprog) * goto out; */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ -#define OFFSET3 10 +#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JE, OFFSET3); /* je out */ label3 = cnt; @@ -323,7 +324,7 @@ static void emit_bpf_tail_call(u8 **pprog) * rdi == ctx (1st arg) * rax == prog->bpf_func + prologue_size */ - EMIT2(0xFF, 0xE0); /* jmp rax */ + RETPOLINE_RAX_BPF_JIT(); /* out: */ BUILD_BUG_ON(cnt - label1 != OFFSET1); @@ -1155,9 +1156,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) * may converge on the last pass. In such case do one more * pass to emit the final image */ - for (pass = 0; pass < 10 || image; pass++) { + for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { +out_image: image = NULL; if (header) bpf_jit_binary_free(header); @@ -1168,8 +1170,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (proglen != oldproglen) { pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", proglen, oldproglen); - prog = orig_prog; - goto out_addrs; + goto out_image; } break; } @@ -1182,6 +1183,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } } oldproglen = proglen; + cond_resched(); } if (bpf_jit_enable > 1) diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index ffdbc4836b4f..a7a7677265b6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -460,7 +460,7 @@ static int nmi_setup(void) goto fail; for_each_possible_cpu(cpu) { - if (!cpu) + if (!IS_ENABLED(CONFIG_SMP) || !cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, @@ -592,7 +592,7 @@ enum __force_cpu_type { static int force_cpu_type; -static int set_cpu_type(const char *str, struct kernel_param *kp) +static int set_cpu_type(const char *str, const struct kernel_param *kp) { if (!strcmp(str, "timer")) { force_cpu_type = timer; diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index bb461cfd01ab..526536c81ddc 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void) * We should get host bridge information from ACPI unless the BIOS * doesn't support it. */ - if (acpi_os_get_root_pointer()) + if (!acpi_disabled && acpi_os_get_root_pointer()) return 0; #endif diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 20fb31579b69..ae369c2bbc3e 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -134,7 +134,9 @@ pgd_t * __init efi_call_phys_prolog(void) pud[j] = *pud_offset(p4d_k, vaddr); } } + pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; } + out: __flush_tlb_all(); @@ -164,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) + if (!pgd_present(*pgd)) continue; for (i = 0; i < PTRS_PER_P4D; i++) { p4d = p4d_offset(pgd, pgd_idx * PGDIR_SIZE + i * P4D_SIZE); - if (!(p4d_val(*p4d) & _PAGE_PRESENT)) + if (!p4d_present(*p4d)) continue; pud = (pud_t *)p4d_page_vaddr(*p4d); @@ -195,6 +197,9 @@ static pgd_t *efi_pgd; * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). + * + * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the + * allocation. */ int __init efi_alloc_page_tables(void) { @@ -206,8 +211,8 @@ int __init efi_alloc_page_tables(void) if (efi_enabled(EFI_OLD_MEMMAP)) return 0; - gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; - efi_pgd = (pgd_t *)__get_free_page(gfp_mask); + gfp_mask = GFP_KERNEL | __GFP_ZERO; + efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); if (!efi_pgd) return -ENOMEM; @@ -222,7 +227,7 @@ int __init efi_alloc_page_tables(void) if (!pud) { if (CONFIG_PGTABLE_LEVELS > 4) free_page((unsigned long) pgd_page_vaddr(*pgd)); - free_page((unsigned long)efi_pgd); + free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER); return -ENOMEM; } diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 8a99a2e96537..5b513ccffde4 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff, /* * Update the first page pointer to skip over the CSH header. */ - cap_info->pages[0] += csh->headersize; + cap_info->phys[0] += csh->headersize; + + /* + * cap_info->capsule should point at a virtual mapping of the entire + * capsule, starting at the capsule header. Our image has the Quark + * security header prepended, so we cannot rely on the default vmap() + * mapping created by the generic capsule code. + * Given that the Quark firmware does not appear to care about the + * virtual mapping, let's just point cap_info->capsule at our copy + * of the capsule header. + */ + cap_info->capsule = &cap_info->header; return 1; } diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c index dc036e511f48..5a0483e7bf66 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c @@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) return 0; } -static const struct bt_sfi_data tng_bt_sfi_data __initdata = { +static struct bt_sfi_data tng_bt_sfi_data __initdata = { .setup = tng_bt_sfi_setup, }; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 4f5fa65a1011..2acd6be13375 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -18,6 +18,7 @@ #include #include #include +#include #define TANGIER_EXT_TIMER0_MSI 12 diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 86676cec99a1..09dd7f3cf621 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -79,7 +79,7 @@ static void intel_mid_power_off(void) static void intel_mid_reboot(void) { - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); } static unsigned long __init intel_mid_calibrate_tsc(void) diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index f44c0bc95aa2..34f9a9ce6236 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, local_flush_tlb(); stat->d_alltlb++; } else { - __flush_tlb_one(msg->address); + __flush_tlb_one_user(msg->address); stat->d_onetlb++; } stat->d_requestee++; @@ -1285,6 +1285,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) struct msg_desc msgdesc; ack_APIC_irq(); + kvm_set_cpu_l1tf_flush_l1d(); time_start = get_cycles(); bcp = &per_cpu(bau_control, smp_processor_id()); @@ -2254,8 +2255,6 @@ static int __init uv_bau_init(void) init_uvhub(uvhub, vector, uv_base_pnode); } - alloc_intr_gate(vector, uv_bau_message_intr1); - for_each_possible_blade(uvhub) { if (uv_blade_nr_possible_cpus(uvhub)) { unsigned long val; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 84fcfde53f8f..04d5157fe7f8 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -160,17 +160,19 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(cpu_tss, cpu); #ifdef CONFIG_X86_64 struct desc_struct *desc = get_cpu_gdt_rw(cpu); tss_desc tss; #endif - set_tss_desc(cpu, t); /* - * This just modifies memory; should not be - * necessary. But... This is necessary, because - * 386 hardware has concept of busy TSS or some - * similar stupidity. - */ + + /* + * We need to reload TR, which requires that we change the + * GDT entry to indicate "available" first. + * + * XXX: This could probably all be replaced by a call to + * force_reload_TR(). + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); #ifdef CONFIG_X86_64 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index c35fdb585c68..afc4ed7b1578 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -145,7 +145,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir) #endif } -int swsusp_arch_resume(void) +asmlinkage int swsusp_arch_resume(void) { int error; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index f910c514438f..0ef5e5204968 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -174,7 +174,7 @@ static int relocate_restore_code(void) return 0; } -int swsusp_arch_resume(void) +asmlinkage int swsusp_arch_resume(void) { int error; diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index de53bd15df5a..24bb7598774e 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -102,7 +102,7 @@ ENTRY(startup_32) * don't we'll eventually crash trying to execute encrypted * instructions. */ - bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags + btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags jnc .Ldone movl $MSR_K8_SYSCFG, %ecx rdmsr diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 5d73c443e778..220e97841e49 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, break; case R_X86_64_PC32: + case R_X86_64_PLT32: /* * PC relative relocations don't need to be adjusted unless * referencing a percpu symbol. + * + * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32. */ if (is_percpu_sym(sym, symname)) add_reloc(&relocs32neg, offset); diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 836a1eb5df43..3ee234b6234d 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm) mm->arch.ldt.entry_count = 0; } -int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , + unsigned long , bytecount) { - return do_modify_ldt_skas(func, ptr, bytecount); + /* See non-um modify_ldt() for why we do this cast */ + return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); } diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c index 1518d2805ae8..27361cbb7ca9 100644 --- a/arch/x86/um/stub_segv.c +++ b/arch/x86/um/stub_segv.c @@ -6,11 +6,12 @@ #include #include #include +#include void __attribute__ ((__section__ (".__syscall_stub"))) stub_segv_handler(int sig, siginfo_t *info, void *p) { - struct ucontext *uc = p; + ucontext_t *uc = p; GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA), &uc->uc_mcontext); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index d669e9d89001..df208af3cd74 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1,8 +1,13 @@ +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +#include +#endif #include #include +#include #include #include +#include #include #include @@ -331,3 +336,80 @@ void xen_arch_unregister_cpu(int num) } EXPORT_SYMBOL(xen_arch_unregister_cpu); #endif + +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +void __init arch_xen_balloon_init(struct resource *hostmem_resource) +{ + struct xen_memory_map memmap; + int rc; + unsigned int i, last_guest_ram; + phys_addr_t max_addr = PFN_PHYS(max_pfn); + struct e820_table *xen_e820_table; + const struct e820_entry *entry; + struct resource *res; + + if (!xen_initial_domain()) + return; + + xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); + if (!xen_e820_table) + return; + + memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); + set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); + rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); + if (rc) { + pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); + goto out; + } + + last_guest_ram = 0; + for (i = 0; i < memmap.nr_entries; i++) { + if (xen_e820_table->entries[i].addr >= max_addr) + break; + if (xen_e820_table->entries[i].type == E820_TYPE_RAM) + last_guest_ram = i; + } + + entry = &xen_e820_table->entries[last_guest_ram]; + if (max_addr >= entry->addr + entry->size) + goto out; /* No unallocated host RAM. */ + + hostmem_resource->start = max_addr; + hostmem_resource->end = entry->addr + entry->size; + + /* + * Mark non-RAM regions between the end of dom0 RAM and end of host RAM + * as unavailable. The rest of that region can be used for hotplug-based + * ballooning. + */ + for (; i < memmap.nr_entries; i++) { + entry = &xen_e820_table->entries[i]; + + if (entry->type == E820_TYPE_RAM) + continue; + + if (entry->addr >= hostmem_resource->end) + break; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + goto out; + + res->name = "Unavailable host RAM"; + res->start = entry->addr; + res->end = (entry->addr + entry->size < hostmem_resource->end) ? + entry->addr + entry->size : hostmem_resource->end; + rc = insert_resource(hostmem_resource, res); + if (rc) { + pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", + __func__, res->start, res->end, rc); + kfree(res); + goto out; + } + } + + out: + kfree(xen_e820_table); +} +#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index de503c225ae1..854508b00bbb 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -64,6 +64,19 @@ static void __init xen_hvm_init_mem_mapping(void) { early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); + + /* + * The virtual address of the shared_info page has changed, so + * the vcpu_info pointer for VCPU 0 is now stale. + * + * The prepare_boot_cpu callback will re-initialize it via + * xen_vcpu_setup, but we can't rely on that to be called for + * old Xen versions (xen_have_vector_callback == 0). + * + * It is, in any case, bad to have a stale vcpu_info pointer + * so reset it now. + */ + xen_vcpu_info_reset(0); } static void __init init_hvm_pv_info(void) @@ -226,12 +239,12 @@ static uint32_t __init xen_platform_hvm(void) return xen_cpuid_base(); } -const struct hypervisor_x86 x86_hyper_xen_hvm = { +const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = { .name = "Xen HVM", .detect = xen_platform_hvm, - .init_platform = xen_hvm_guest_init, - .pin_vcpu = xen_pin_vcpu, - .x2apic_available = xen_x2apic_para_available, - .init_mem_mapping = xen_hvm_init_mem_mapping, + .type = X86_HYPER_XEN_HVM, + .init.init_platform = xen_hvm_guest_init, + .init.x2apic_available = xen_x2apic_para_available, + .init.init_mem_mapping = xen_hvm_init_mem_mapping, + .runtime.pin_vcpu = xen_pin_vcpu, }; -EXPORT_SYMBOL(x86_hyper_xen_hvm); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index d4396e27b1fb..fd173e6425cc 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -88,6 +88,8 @@ #include "multicalls.h" #include "pmu.h" +#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */ + void *xen_initial_gdt; static int xen_cpu_up_prepare_pv(unsigned int cpu); @@ -601,7 +603,7 @@ static struct trap_array_entry trap_array[] = { #ifdef CONFIG_X86_MCE { machine_check, xen_machine_check, true }, #endif - { nmi, xen_nmi, true }, + { nmi, xen_xennmi, true }, { overflow, xen_overflow, false }, #ifdef CONFIG_IA32_EMULATION { entry_INT80_compat, xen_entry_INT80_compat, false }, @@ -622,7 +624,7 @@ static struct trap_array_entry trap_array[] = { { simd_coprocessor_error, xen_simd_coprocessor_error, false }, }; -static bool get_trap_addr(void **addr, unsigned int ist) +static bool __ref get_trap_addr(void **addr, unsigned int ist) { unsigned int nr; bool ist_okay = false; @@ -644,6 +646,14 @@ static bool get_trap_addr(void **addr, unsigned int ist) } } + if (nr == ARRAY_SIZE(trap_array) && + *addr >= (void *)early_idt_handler_array[0] && + *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { + nr = (*addr - (void *)early_idt_handler_array[0]) / + EARLY_IDT_HANDLER_SIZE; + *addr = (void *)xen_early_idt_handler_array[nr]; + } + if (WARN_ON(ist != 0 && !ist_okay)) return false; @@ -811,15 +821,14 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, } } -static void xen_load_sp0(struct tss_struct *tss, - struct thread_struct *thread) +static void xen_load_sp0(unsigned long sp0) { struct multicall_space mcs; mcs = xen_mc_entry(0); - MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); + MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); - tss->x86_tss.sp0 = thread->sp0; + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); } void xen_set_iopl_mask(unsigned mask) @@ -1221,12 +1230,20 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_setup_features(); - xen_setup_machphys_mapping(); - /* Install Xen paravirt ops */ pv_info = xen_info; pv_init_ops.patch = paravirt_patch_default; pv_cpu_ops = xen_cpu_ops; + xen_init_irq_ops(); + + /* + * Setup xen_vcpu early because it is needed for + * local_irq_disable(), irqs_disabled(), e.g. in printk(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); x86_platform.get_nmi_reason = xen_get_nmi_reason; @@ -1238,6 +1255,7 @@ asmlinkage __visible void __init xen_start_kernel(void) * Set up some pagetable state before starting to set any ptes. */ + xen_setup_machphys_mapping(); xen_init_mmu_ops(); /* Prevent unwanted bits from being set in PTEs. */ @@ -1249,9 +1267,6 @@ asmlinkage __visible void __init xen_start_kernel(void) */ __userpte_alloc_gfp &= ~__GFP_HIGHMEM; - /* Work out if we support NX */ - x86_configure_nx(); - /* Get mfn list */ xen_build_dynamic_phys_to_machine(); @@ -1261,7 +1276,15 @@ asmlinkage __visible void __init xen_start_kernel(void) */ xen_setup_gdt(0); - xen_init_irq_ops(); + /* Work out if we support NX */ + get_cpu_cap(&boot_cpu_data); + x86_configure_nx(); + + /* Let's presume PV guests always boot on vCPU with id 0. */ + per_cpu(xen_vcpu_id, 0) = 0; + + idt_setup_early_handler(); + xen_init_capabilities(); #ifdef CONFIG_X86_LOCAL_APIC @@ -1295,18 +1318,6 @@ asmlinkage __visible void __init xen_start_kernel(void) */ acpi_numa = -1; #endif - /* Let's presume PV guests always boot on vCPU with id 0. */ - per_cpu(xen_vcpu_id, 0) = 0; - - /* - * Setup xen_vcpu early because start_kernel needs it for - * local_irq_disable(), irqs_disabled(). - * - * Don't do the full vcpu_info placement stuff until we have - * the cpu_possible_mask and a non-dummy shared_info. - */ - xen_vcpu_info_reset(0); - WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); local_irq_disable(); @@ -1460,9 +1471,9 @@ static uint32_t __init xen_platform_pv(void) return 0; } -const struct hypervisor_x86 x86_hyper_xen_pv = { +const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { .name = "Xen PV", .detect = xen_platform_pv, - .pin_vcpu = xen_pin_vcpu, + .type = X86_HYPER_XEN_PV, + .runtime.pin_vcpu = xen_pin_vcpu, }; -EXPORT_SYMBOL(x86_hyper_xen_pv); diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 74179852e46c..7515a19fd324 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = { void __init xen_init_irq_ops(void) { - /* For PVH we use default pv_irq_ops settings. */ - if (!xen_feature(XENFEAT_hvm_callback_vector)) - pv_irq_ops = xen_irq_ops; + pv_irq_ops = xen_irq_ops; x86_init.irqs.intr_init = xen_init_IRQ; } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3e15345abfe7..de0263348f2d 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) } EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); -static void xen_flush_tlb_all(void) +static noinline void xen_flush_tlb_all(void) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb_all(0); - preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c index 2cfcfe4f6b2a..dd2ad82eee80 100644 --- a/arch/x86/xen/mmu_hvm.c +++ b/arch/x86/xen/mmu_hvm.c @@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void) if (is_pagetable_dying_supported()) pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; #ifdef CONFIG_PROC_VMCORE - register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram); + WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram)); #endif } diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 71495f1a86d7..b3526a98a5a5 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -449,7 +449,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); -#if CONFIG_PGTABLE_LEVELS == 4 +#ifdef CONFIG_X86_64 __visible pudval_t xen_pud_val(pud_t pud) { return pte_mfn_to_pfn(pud.pud); @@ -538,7 +538,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) xen_mc_issue(PARAVIRT_LAZY_MMU); } -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ +#endif /* CONFIG_X86_64 */ static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), @@ -580,21 +580,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, int (*func)(struct mm_struct *mm, struct page *, enum pt_level), bool last, unsigned long limit) { - int i, nr, flush = 0; + int flush = 0; + pud_t *pud; - nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D; - for (i = 0; i < nr; i++) { - pud_t *pud; - if (p4d_none(p4d[i])) - continue; + if (p4d_none(*p4d)) + return flush; - pud = pud_offset(&p4d[i], 0); - if (PTRS_PER_PUD > 1) - flush |= (*func)(mm, virt_to_page(pud), PT_PUD); - flush |= xen_pud_walk(mm, pud, func, - last && i == nr - 1, limit); - } + pud = pud_offset(p4d, 0); + if (PTRS_PER_PUD > 1) + flush |= (*func)(mm, virt_to_page(pud), PT_PUD); + flush |= xen_pud_walk(mm, pud, func, last, limit); return flush; } @@ -644,8 +640,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, continue; p4d = p4d_offset(&pgd[i], 0); - if (PTRS_PER_P4D > 1) - flush |= (*func)(mm, virt_to_page(p4d), PT_P4D); flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); } @@ -1176,22 +1170,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr) { pgd_t *pgd; p4d_t *p4d; - unsigned int i; bool unpin; unpin = (vaddr == 2 * PGDIR_SIZE); vaddr &= PMD_MASK; pgd = pgd_offset_k(vaddr); p4d = p4d_offset(pgd, 0); - for (i = 0; i < PTRS_PER_P4D; i++) { - if (p4d_none(p4d[i])) - continue; - xen_cleanmfnmap_p4d(p4d + i, unpin); - } - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - set_pgd(pgd, __pgd(0)); - xen_cleanmfnmap_free_pgtbl(p4d, unpin); - } + if (!p4d_none(*p4d)) + xen_cleanmfnmap_p4d(p4d, unpin); } static void __init xen_pagetable_p2m_free(void) @@ -1294,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void) return this_cpu_read(xen_vcpu_info.arch.cr2); } -static void xen_flush_tlb(void) +static noinline void xen_flush_tlb(void) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb(0); - preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); @@ -1314,12 +1298,12 @@ static void xen_flush_tlb(void) preempt_enable(); } -static void xen_flush_tlb_single(unsigned long addr) +static void xen_flush_tlb_one_user(unsigned long addr) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb_single(addr); + trace_xen_mmu_flush_tlb_one_user(addr); preempt_disable(); @@ -1692,7 +1676,7 @@ static void xen_release_pmd(unsigned long pfn) xen_release_ptpage(pfn, PT_PMD); } -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) { xen_alloc_ptpage(mm, pfn, PT_PUD); @@ -1916,6 +1900,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* Graft it onto L4[511][510] */ copy_page(level2_kernel_pgt, l2); + /* + * Zap execute permission from the ident map. Due to the sharing of + * L1 entries we need to do this in the L2. + */ + if (__supported_pte_mask & _PAGE_NX) { + for (i = 0; i < PTRS_PER_PMD; ++i) { + if (pmd_none(level2_ident_pgt[i])) + continue; + level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX); + } + } + /* Copy the initial P->M table mappings if necessary. */ i = pgd_index(xen_start_info->mfn_list); if (i && i < pgd_index(__START_KERNEL_map)) @@ -2029,13 +2025,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) */ void __init xen_relocate_p2m(void) { - phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys; + phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys; unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; - int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d; + int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud; pte_t *pt; pmd_t *pmd; pud_t *pud; - p4d_t *p4d = NULL; pgd_t *pgd; unsigned long *new_p2m; int save_pud; @@ -2045,11 +2040,7 @@ void __init xen_relocate_p2m(void) n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; - if (PTRS_PER_P4D > 1) - n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; - else - n_p4d = 0; - n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d; + n_frames = n_pte + n_pt + n_pmd + n_pud; new_area = xen_find_free_area(PFN_PHYS(n_frames)); if (!new_area) { @@ -2065,76 +2056,56 @@ void __init xen_relocate_p2m(void) * To avoid any possible virtual address collision, just use * 2 * PUD_SIZE for the new area. */ - p4d_phys = new_area; - pud_phys = p4d_phys + PFN_PHYS(n_p4d); + pud_phys = new_area; pmd_phys = pud_phys + PFN_PHYS(n_pud); pt_phys = pmd_phys + PFN_PHYS(n_pmd); p2m_pfn = PFN_DOWN(pt_phys) + n_pt; pgd = __va(read_cr3_pa()); new_p2m = (unsigned long *)(2 * PGDIR_SIZE); - idx_p4d = 0; save_pud = n_pud; - do { - if (n_p4d > 0) { - p4d = early_memremap(p4d_phys, PAGE_SIZE); - clear_page(p4d); - n_pud = min(save_pud, PTRS_PER_P4D); - } - for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { - pud = early_memremap(pud_phys, PAGE_SIZE); - clear_page(pud); - for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); - idx_pmd++) { - pmd = early_memremap(pmd_phys, PAGE_SIZE); - clear_page(pmd); - for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); - idx_pt++) { - pt = early_memremap(pt_phys, PAGE_SIZE); - clear_page(pt); - for (idx_pte = 0; - idx_pte < min(n_pte, PTRS_PER_PTE); - idx_pte++) { - set_pte(pt + idx_pte, - pfn_pte(p2m_pfn, PAGE_KERNEL)); - p2m_pfn++; - } - n_pte -= PTRS_PER_PTE; - early_memunmap(pt, PAGE_SIZE); - make_lowmem_page_readonly(__va(pt_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, - PFN_DOWN(pt_phys)); - set_pmd(pmd + idx_pt, - __pmd(_PAGE_TABLE | pt_phys)); - pt_phys += PAGE_SIZE; + for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { + pud = early_memremap(pud_phys, PAGE_SIZE); + clear_page(pud); + for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); + idx_pmd++) { + pmd = early_memremap(pmd_phys, PAGE_SIZE); + clear_page(pmd); + for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); + idx_pt++) { + pt = early_memremap(pt_phys, PAGE_SIZE); + clear_page(pt); + for (idx_pte = 0; + idx_pte < min(n_pte, PTRS_PER_PTE); + idx_pte++) { + set_pte(pt + idx_pte, + pfn_pte(p2m_pfn, PAGE_KERNEL)); + p2m_pfn++; } - n_pt -= PTRS_PER_PMD; - early_memunmap(pmd, PAGE_SIZE); - make_lowmem_page_readonly(__va(pmd_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, - PFN_DOWN(pmd_phys)); - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); - pmd_phys += PAGE_SIZE; + n_pte -= PTRS_PER_PTE; + early_memunmap(pt, PAGE_SIZE); + make_lowmem_page_readonly(__va(pt_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, + PFN_DOWN(pt_phys)); + set_pmd(pmd + idx_pt, + __pmd(_PAGE_TABLE | pt_phys)); + pt_phys += PAGE_SIZE; } - n_pmd -= PTRS_PER_PUD; - early_memunmap(pud, PAGE_SIZE); - make_lowmem_page_readonly(__va(pud_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); - if (n_p4d > 0) - set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys)); - else - set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); - pud_phys += PAGE_SIZE; - } - if (n_p4d > 0) { - save_pud -= PTRS_PER_P4D; - early_memunmap(p4d, PAGE_SIZE); - make_lowmem_page_readonly(__va(p4d_phys)); - pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys)); - set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys)); - p4d_phys += PAGE_SIZE; + n_pt -= PTRS_PER_PMD; + early_memunmap(pmd, PAGE_SIZE); + make_lowmem_page_readonly(__va(pmd_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, + PFN_DOWN(pmd_phys)); + set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); + pmd_phys += PAGE_SIZE; } - } while (++idx_p4d < n_p4d); + n_pmd -= PTRS_PER_PUD; + early_memunmap(pud, PAGE_SIZE); + make_lowmem_page_readonly(__va(pud_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); + set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); + pud_phys += PAGE_SIZE; + } /* Now copy the old p2m info to the new area. */ memcpy(new_p2m, xen_p2m_addr, size); @@ -2300,7 +2271,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: - case FIX_RO_IDT: #ifdef CONFIG_X86_32 case FIX_WP_TEST: # ifdef CONFIG_HIGHMEM @@ -2311,7 +2281,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: - case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END: /* All local page mappings */ pte = pfn_pte(phys, prot); break; @@ -2361,7 +2330,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 pv_mmu_ops.set_p4d = xen_set_p4d; #endif @@ -2371,7 +2340,7 @@ static void __init xen_post_allocator_init(void) pv_mmu_ops.alloc_pmd = xen_alloc_pmd; pv_mmu_ops.release_pte = xen_release_pte; pv_mmu_ops.release_pmd = xen_release_pmd; -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 pv_mmu_ops.alloc_pud = xen_alloc_pud; pv_mmu_ops.release_pud = xen_release_pud; #endif @@ -2401,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_user = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb, - .flush_tlb_single = xen_flush_tlb_single, + .flush_tlb_one_user = xen_flush_tlb_one_user, .flush_tlb_others = xen_flush_tlb_others, .pgd_alloc = xen_pgd_alloc, @@ -2435,14 +2404,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), -#if CONFIG_PGTABLE_LEVELS >= 4 +#ifdef CONFIG_X86_64 .pud_val = PV_CALLEE_SAVE(xen_pud_val), .make_pud = PV_CALLEE_SAVE(xen_make_pud), .set_p4d = xen_set_p4d_hyper, .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ +#endif /* CONFIG_X86_64 */ .activate_mm = xen_activate_mm, .dup_mmap = xen_dup_mmap, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 6083ba462f35..15812e553b95 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i, ret = 0; pte_t *pte; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + if (kmap_ops) { ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, kmap_ops, count); @@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, { int i, ret = 0; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c114ca767b3b..6e0d2086eacb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -808,7 +808,6 @@ char * __init xen_memory_setup(void) addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; while (i < xen_e820_table.nr_entries) { - bool discard = false; chunk_size = size; type = xen_e820_table.entries[i].type; @@ -824,11 +823,10 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else - discard = true; + type = E820_TYPE_UNUSABLE; } - if (!discard) - xen_align_and_add_e820_region(addr, chunk_size, type); + xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 05f91ce9b55e..e3b18ad49889 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -14,6 +14,7 @@ * single-threaded. */ #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include +#include #include #include @@ -69,6 +71,8 @@ static void cpu_bringup(void) cpu_data(cpu).x86_max_cores = 1; set_cpu_sibling_map(cpu); + speculative_store_bypass_ht_init(); + xen_setup_cpu_clockevents(); notify_cpu_starting(cpu); @@ -249,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus) } set_cpu_sibling_map(0); + speculative_store_bypass_ht_init(); + xen_pmu_init(0); if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) @@ -294,12 +300,19 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) #endif memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); + /* + * Bring up the CPU in cpu_bringup_and_idle() with the stack + * pointing just below where pt_regs would be if it were a normal + * kernel entry. + */ ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->flags = VGCF_IN_KERNEL; ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ ctxt->user_regs.ds = __USER_DS; ctxt->user_regs.es = __USER_DS; ctxt->user_regs.ss = __KERNEL_DS; + ctxt->user_regs.cs = __KERNEL_CS; + ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle); xen_copy_trap_info(ctxt->trap_ctxt); @@ -314,8 +327,13 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ctxt->gdt_frames[0] = gdt_mfn; ctxt->gdt_ents = GDT_ENTRIES; + /* + * Set SS:SP that Xen will use when entering guest kernel mode + * from guest user mode. Subsequent calls to load_sp0() can + * change this value. + */ ctxt->kernel_ss = __KERNEL_DS; - ctxt->kernel_sp = idle->thread.sp0; + ctxt->kernel_sp = task_top_of_stack(idle); #ifdef CONFIG_X86_32 ctxt->event_callback_cs = __KERNEL_CS; @@ -327,10 +345,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) (unsigned long)xen_hypervisor_callback; ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; - ctxt->user_regs.cs = __KERNEL_CS; per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); - ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) BUG(); @@ -414,6 +430,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */ * data back is to call: */ tick_nohz_idle_enter(); + tick_nohz_idle_stop_tick_protected(); cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 92bf5ecb6baf..3e3a58ea669e 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -1,12 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include #include +#include +#include #include #include #include @@ -15,6 +18,8 @@ #include "mmu.h" #include "pmu.h" +static DEFINE_PER_CPU(u64, spec_ctrl); + void xen_arch_pre_suspend(void) { if (xen_pv_domain()) @@ -31,6 +36,9 @@ void xen_arch_post_suspend(int cancelled) static void xen_vcpu_notify_restore(void *data) { + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) + wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); + /* Boot processor notified via generic timekeeping_resume() */ if (smp_processor_id() == 0) return; @@ -40,7 +48,15 @@ static void xen_vcpu_notify_restore(void *data) static void xen_vcpu_notify_suspend(void *data) { + u64 tmp; + tick_suspend_local(); + + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { + rdmsrl(MSR_IA32_SPEC_CTRL, tmp); + this_cpu_write(spec_ctrl, tmp); + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + } } void xen_arch_resume(void) diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index c98a48c861fd..417b339e5c8e 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -15,6 +15,7 @@ #include +#include #include .macro xen_pv_trap name @@ -30,7 +31,7 @@ xen_pv_trap debug xen_pv_trap xendebug xen_pv_trap int3 xen_pv_trap xenint3 -xen_pv_trap nmi +xen_pv_trap xennmi xen_pv_trap overflow xen_pv_trap bounds xen_pv_trap invalid_op @@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat #endif xen_pv_trap hypervisor_callback + __INIT +ENTRY(xen_early_idt_handler_array) + i = 0 + .rept NUM_EXCEPTION_VECTORS + pop %rcx + pop %r11 + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE + i = i + 1 + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +END(xen_early_idt_handler_array) + __FINIT + hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* * Xen64 iret frame: diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index b5b8d7f43557..96f26e026783 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -9,7 +9,10 @@ #include #include +#include #include +#include +#include #include #include @@ -20,6 +23,7 @@ #ifdef CONFIG_XEN_PV __INIT ENTRY(startup_xen) + UNWIND_HINT_EMPTY cld /* Clear .bss */ @@ -33,22 +37,39 @@ ENTRY(startup_xen) mov %_ASM_SI, xen_start_info mov $init_thread_union+THREAD_SIZE, %_ASM_SP - jmp xen_start_kernel +#ifdef CONFIG_X86_64 + /* Set up %gs. + * + * The base of %gs always points to the bottom of the irqstack + * union. If the stack protector canary is enabled, it is + * located at %gs:40. Note that, on SMP, the boot cpu uses + * init data section till per cpu areas are set up. + */ + movl $MSR_GS_BASE,%ecx + movq $INIT_PER_CPU_VAR(irq_stack_union),%rax + cdq + wrmsr +#endif + jmp xen_start_kernel +END(startup_xen) __FINIT #endif .pushsection .text .balign PAGE_SIZE ENTRY(hypercall_page) - .skip PAGE_SIZE + .rept (PAGE_SIZE / 32) + UNWIND_HINT_EMPTY + .skip 32 + .endr #define HYPERCALL(n) \ .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include #undef HYPERCALL - +END(hypercall_page) .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S index e1a5fbeae08d..5d7554c025fd 100644 --- a/arch/x86/xen/xen-pvh.S +++ b/arch/x86/xen/xen-pvh.S @@ -54,6 +54,9 @@ * charge of setting up it's own stack, GDT and IDT. */ +#define PVH_GDT_ENTRY_CANARY 4 +#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) + ENTRY(pvh_start_xen) cld @@ -98,6 +101,12 @@ ENTRY(pvh_start_xen) /* 64-bit entry point. */ .code64 1: + /* Set base address in stack canary descriptor. */ + mov $MSR_GS_BASE,%ecx + mov $_pa(canary), %eax + xor %edx, %edx + wrmsr + call xen_prepare_pvh /* startup_64 expects boot_params in %rsi. */ @@ -107,6 +116,17 @@ ENTRY(pvh_start_xen) #else /* CONFIG_X86_64 */ + /* Set base address in stack canary descriptor. */ + movl $_pa(gdt_start),%eax + movl $_pa(canary),%ecx + movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) + shrl $16, %ecx + movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) + movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) + + mov $PVH_CANARY_SEL,%eax + mov %eax,%gs + call mk_early_pgtbl_32 mov $_pa(initial_page_table), %eax @@ -150,9 +170,13 @@ gdt_start: .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */ #endif .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */ + .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ gdt_end: - .balign 4 + .balign 16 +canary: + .fill 48, 1, 0 + early_stack: .fill 256, 1, 0 early_stack_end: diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore index be7655998b26..38177c7ebcab 100644 --- a/arch/xtensa/boot/.gitignore +++ b/arch/xtensa/boot/.gitignore @@ -1,3 +1,2 @@ uImage zImage.redboot -*.dtb diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h index eaaf1ebcc7a4..5bfbc1c401d4 100644 --- a/arch/xtensa/include/asm/futex.h +++ b/arch/xtensa/include/asm/futex.h @@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { int ret = 0; - u32 prev; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, __asm__ __volatile__ ( " # futex_atomic_cmpxchg_inatomic\n" - "1: l32i %1, %3, 0\n" - " mov %0, %5\n" - " wsr %1, scompare1\n" - "2: s32c1i %0, %3, 0\n" - "3:\n" + " wsr %5, scompare1\n" + "1: s32c1i %1, %4, 0\n" + " s32i %1, %6, 0\n" + "2:\n" " .section .fixup,\"ax\"\n" " .align 4\n" - "4: .long 3b\n" - "5: l32r %1, 4b\n" - " movi %0, %6\n" + "3: .long 2b\n" + "4: l32r %1, 3b\n" + " movi %0, %7\n" " jx %1\n" " .previous\n" " .section __ex_table,\"a\"\n" - " .long 1b,5b,2b,5b\n" + " .long 1b,4b\n" " .previous\n" - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) - : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) + : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval) + : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT) : "memory"); - *uval = prev; return ret; } diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 75a07b8119a9..1de07a7f7680 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -116,4 +116,7 @@ #define SO_ZEROCOPY 60 +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + #endif /* _XTENSA_SOCKET_H */ diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index bae697a06a98..2986bc88a18e 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -336,7 +336,7 @@ do_unaligned_user (struct pt_regs *regs) info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void *) regs->excvaddr; - force_sig_info(SIGSEGV, &info, current); + force_sig_info(SIGBUS, &info, current); } #endif diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 720fe4e8b497..8dad076661fc 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -79,19 +79,75 @@ void __init zones_init(void) free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); } +#ifdef CONFIG_HIGHMEM +static void __init free_area_high(unsigned long pfn, unsigned long end) +{ + for (; pfn < end; pfn++) + free_highmem_page(pfn_to_page(pfn)); +} + +static void __init free_highpages(void) +{ + unsigned long max_low = max_low_pfn; + struct memblock_region *mem, *res; + + reset_all_zones_managed_pages(); + /* set highmem page free */ + for_each_memblock(memory, mem) { + unsigned long start = memblock_region_memory_base_pfn(mem); + unsigned long end = memblock_region_memory_end_pfn(mem); + + /* Ignore complete lowmem entries */ + if (end <= max_low) + continue; + + if (memblock_is_nomap(mem)) + continue; + + /* Truncate partial highmem entries */ + if (start < max_low) + start = max_low; + + /* Find and exclude any reserved regions */ + for_each_memblock(reserved, res) { + unsigned long res_start, res_end; + + res_start = memblock_region_reserved_base_pfn(res); + res_end = memblock_region_reserved_end_pfn(res); + + if (res_end < start) + continue; + if (res_start < start) + res_start = start; + if (res_start > end) + res_start = end; + if (res_end > end) + res_end = end; + if (res_start != start) + free_area_high(start, res_start); + start = res_end; + if (start == end) + break; + } + + /* And now free anything which remains */ + if (start < end) + free_area_high(start, end); + } +} +#else +static void __init free_highpages(void) +{ +} +#endif + /* * Initialize memory pages. */ void __init mem_init(void) { -#ifdef CONFIG_HIGHMEM - unsigned long tmp; - - reset_all_zones_managed_pages(); - for (tmp = max_low_pfn; tmp < max_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -#endif + free_highpages(); max_mapnr = max_pfn - ARCH_PFN_OFFSET; high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); diff --git a/block/badblocks.c b/block/badblocks.c index 43c71166e1e2..91f7bcf979d3 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, if (bb->shift < 0) /* badblocks are disabled */ - return 0; + return 1; if (bb->shift) { /* round the start down, and the end up */ diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index ceefb9a706d6..5d53e504acae 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -749,10 +749,11 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) unsigned long flags; int i; + spin_lock_irqsave(&bfqd->lock, flags); + if (!entity) /* root group */ - return; + goto put_async_queues; - spin_lock_irqsave(&bfqd->lock, flags); /* * Empty all service_trees belonging to this group before * deactivating the group itself. @@ -783,6 +784,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) } __bfq_deactivate_entity(entity, false); + +put_async_queues: bfq_put_async_queues(bfqd, bfqg); spin_unlock_irqrestore(&bfqd->lock, flags); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a4783da90ba8..3b44bd28fc45 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -108,6 +108,7 @@ #include "blk-mq-tag.h" #include "blk-mq-sched.h" #include "bfq-iosched.h" +#include "blk-wbt.h" #define BFQ_BFQQ_FNS(name) \ void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ @@ -1202,6 +1203,24 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) return dur; } +/* + * Return the farthest future time instant according to jiffies + * macros. + */ +static unsigned long bfq_greatest_from_now(void) +{ + return jiffies + MAX_JIFFY_OFFSET; +} + +/* + * Return the farthest past time instant according to jiffies + * macros. + */ +static unsigned long bfq_smallest_from_now(void) +{ + return jiffies - MAX_JIFFY_OFFSET; +} + static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, struct bfq_queue *bfqq, unsigned int old_wr_coeff, @@ -1216,7 +1235,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, bfqq->wr_coeff = bfqd->bfq_wr_coeff; bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); } else { - bfqq->wr_start_at_switch_to_srt = jiffies; + /* + * No interactive weight raising in progress + * here: assign minus infinity to + * wr_start_at_switch_to_srt, to make sure + * that, at the end of the soft-real-time + * weight raising periods that is starting + * now, no interactive weight-raising period + * may be wrongly considered as still in + * progress (and thus actually started by + * mistake). + */ + bfqq->wr_start_at_switch_to_srt = + bfq_smallest_from_now(); bfqq->wr_coeff = bfqd->bfq_wr_coeff * BFQ_SOFTRT_WEIGHT_FACTOR; bfqq->wr_cur_max_time = @@ -1677,7 +1708,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, if (!RB_EMPTY_NODE(&rq->rb_node)) goto end; - spin_lock_irq(&bfqq->bfqd->lock); /* * If next and rq belong to the same bfq_queue and next is older @@ -1701,7 +1731,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, bfq_remove_request(q, next); - spin_unlock_irq(&bfqq->bfqd->lock); end: bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); } @@ -2897,24 +2926,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); } -/* - * Return the farthest future time instant according to jiffies - * macros. - */ -static unsigned long bfq_greatest_from_now(void) -{ - return jiffies + MAX_JIFFY_OFFSET; -} - -/* - * Return the farthest past time instant according to jiffies - * macros. - */ -static unsigned long bfq_smallest_from_now(void) -{ - return jiffies - MAX_JIFFY_OFFSET; -} - /** * bfq_bfqq_expire - expire a queue. * @bfqd: device owning the queue. @@ -4446,8 +4457,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio) bool new_queue = false; bool bfqq_already_existing = false, split = false; - if (!rq->elv.icq) + /* + * Even if we don't have an icq attached, we should still clear + * the scheduler pointers, as they might point to previously + * allocated bic/bfqq structs. + */ + if (!rq->elv.icq) { + rq->elv.priv[0] = rq->elv.priv[1] = NULL; return; + } + bic = icq_to_bic(rq->elv.icq); spin_lock_irq(&bfqd->lock); @@ -4775,7 +4794,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); - + wbt_disable_default(q); return 0; out_free: diff --git a/block/bio.c b/block/bio.c index 101c2a9b5481..194d28cdc642 100644 --- a/block/bio.c +++ b/block/bio.c @@ -43,9 +43,9 @@ * break badly! cannot be bigger than what you can fit into an * unsigned short */ -#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } +#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { - BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), + BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max), }; #undef BV @@ -597,7 +597,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) * so we don't set nor calculate new physical/hw segment counts here */ bio->bi_disk = bio_src->bi_disk; + bio->bi_partno = bio_src->bi_partno; bio_set_flag(bio, BIO_CLONED); + if (bio_flagged(bio_src, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); bio->bi_opf = bio_src->bi_opf; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; @@ -878,16 +881,16 @@ EXPORT_SYMBOL(bio_add_page); */ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { - unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; + unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; - size_t offset, diff; + size_t offset; ssize_t size; size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); if (unlikely(size <= 0)) return size ? size : -EFAULT; - nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; + idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; /* * Deep magic below: We need to walk the pinned pages backwards @@ -900,17 +903,15 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) bio->bi_iter.bi_size += size; bio->bi_vcnt += nr_pages; - diff = (nr_pages * PAGE_SIZE - offset) - size; - while (nr_pages--) { - bv[nr_pages].bv_page = pages[nr_pages]; - bv[nr_pages].bv_len = PAGE_SIZE; - bv[nr_pages].bv_offset = 0; + while (idx--) { + bv[idx].bv_page = pages[idx]; + bv[idx].bv_len = PAGE_SIZE; + bv[idx].bv_offset = 0; } bv[0].bv_offset += offset; bv[0].bv_len -= offset; - if (diff) - bv[bio->bi_vcnt - 1].bv_len -= diff; + bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size; iov_iter_advance(iter, size); return 0; @@ -1888,9 +1889,10 @@ struct bio *bio_split(struct bio *bio, int sectors, bio_integrity_trim(split); bio_advance(bio, split->bi_iter.bi_size); + bio->bi_iter.bi_done = 0; if (bio_flagged(bio, BIO_TRACE_COMPLETION)) - bio_set_flag(bio, BIO_TRACE_COMPLETION); + bio_set_flag(split, BIO_TRACE_COMPLETION); return split; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d3f56baee936..3dc7c0b4adcb 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1149,18 +1149,16 @@ int blkcg_init_queue(struct request_queue *q) rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); + if (IS_ERR(blkg)) + goto err_unlock; + q->root_blkg = blkg; + q->root_rl.blkg = blkg; spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); - if (IS_ERR(blkg)) - return PTR_ERR(blkg); - - q->root_blkg = blkg; - q->root_rl.blkg = blkg; - ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); @@ -1168,6 +1166,13 @@ int blkcg_init_queue(struct request_queue *q) spin_unlock_irq(q->queue_lock); } return ret; + +err_unlock: + spin_unlock_irq(q->queue_lock); + rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + return PTR_ERR(blkg); } /** @@ -1374,17 +1379,12 @@ void blkcg_deactivate_policy(struct request_queue *q, __clear_bit(pol->plid, q->blkcg_pols); list_for_each_entry(blkg, &q->blkg_list, q_node) { - /* grab blkcg lock too while removing @pd from @blkg */ - spin_lock(&blkg->blkcg->lock); - if (blkg->pd[pol->plid]) { if (pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } - - spin_unlock(&blkg->blkcg->lock); } spin_unlock_irq(q->queue_lock); diff --git a/block/blk-core.c b/block/blk-core.c index 048be4aa6024..68bae6338ad4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -333,11 +333,13 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); + cancel_work_sync(&q->timeout_work); if (q->mq_ops) { struct blk_mq_hw_ctx *hctx; int i; + cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } else { @@ -529,6 +531,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all) } } +void blk_drain_queue(struct request_queue *q) +{ + spin_lock_irq(q->queue_lock); + __blk_drain_queue(q, true); + spin_unlock_irq(q->queue_lock); +} + /** * blk_queue_bypass_start - enter queue bypass mode * @q: queue of interest @@ -604,8 +613,8 @@ void blk_set_queue_dying(struct request_queue *q) spin_lock_irq(q->queue_lock); blk_queue_for_each_rl(rl, q) { if (rl->rq_pool) { - wake_up(&rl->wait[BLK_RW_SYNC]); - wake_up(&rl->wait[BLK_RW_ASYNC]); + wake_up_all(&rl->wait[BLK_RW_SYNC]); + wake_up_all(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); @@ -653,11 +662,18 @@ void blk_cleanup_queue(struct request_queue *q) */ blk_freeze_queue(q); spin_lock_irq(lock); - if (!q->mq_ops) - __blk_drain_queue(q, true); queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); + /* + * make sure all in-progress dispatch are completed because + * blk_freeze_queue() can only complete all requests, and + * dispatch may still be in-progress since we dispatch requests + * from more than one contexts + */ + if (q->mq_ops) + blk_mq_quiesce_queue(q); + /* for synchronous bio-based driver finish in-flight integrity i/o */ blk_flush_integrity(); @@ -763,7 +779,6 @@ EXPORT_SYMBOL(blk_alloc_queue); int blk_queue_enter(struct request_queue *q, bool nowait) { while (true) { - int ret; if (percpu_ref_tryget_live(&q->q_usage_counter)) return 0; @@ -780,13 +795,11 @@ int blk_queue_enter(struct request_queue *q, bool nowait) */ smp_rmb(); - ret = wait_event_interruptible(q->mq_freeze_wq, - !atomic_read(&q->mq_freeze_depth) || - blk_queue_dying(q)); + wait_event(q->mq_freeze_wq, + !atomic_read(&q->mq_freeze_depth) || + blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV; - if (ret) - return ret; } } @@ -844,6 +857,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_WORK(&q->timeout_work, NULL); INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->icq_list); @@ -2260,7 +2274,7 @@ blk_qc_t submit_bio(struct bio *bio) unsigned int count; if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) - count = queue_logical_block_size(bio->bi_disk->queue); + count = queue_logical_block_size(bio->bi_disk->queue) >> 9; else count = bio_sectors(bio); @@ -3048,6 +3062,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, { if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); + else if (bio_op(bio) == REQ_OP_DISCARD) + rq->nr_phys_segments = 1; rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; @@ -3131,6 +3147,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { + dst->rq_flags |= RQF_SPECIAL_PAYLOAD; + dst->special_vec = src->special_vec; + } dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; diff --git a/block/blk-lib.c b/block/blk-lib.c index 63fb971d6574..2bc544ce3d2e 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -275,6 +275,40 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) return min(pages, (sector_t)BIO_MAX_PAGES); } +static int __blkdev_issue_zero_pages(struct block_device *bdev, + sector_t sector, sector_t nr_sects, gfp_t gfp_mask, + struct bio **biop) +{ + struct request_queue *q = bdev_get_queue(bdev); + struct bio *bio = *biop; + int bi_size = 0; + unsigned int sz; + + if (!q) + return -ENXIO; + + while (nr_sects != 0) { + bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), + gfp_mask); + bio->bi_iter.bi_sector = sector; + bio_set_dev(bio, bdev); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + + while (nr_sects != 0) { + sz = min((sector_t) PAGE_SIZE, nr_sects << 9); + bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); + nr_sects -= bi_size >> 9; + sector += bi_size >> 9; + if (bi_size < sz) + break; + } + cond_resched(); + } + + *biop = bio; + return 0; +} + /** * __blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue @@ -288,12 +322,6 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. * - * Note that this function may fail with -EOPNOTSUPP if the driver signals - * zeroing offload support, but the device fails to process the command (for - * some devices there is no non-destructive way to verify whether this - * operation is actually supported). In this case the caller should call - * retry the call to blkdev_issue_zeroout() and the fallback path will be used. - * * If a device is using logical block provisioning, the underlying space will * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. * @@ -305,9 +333,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, unsigned flags) { int ret; - int bi_size = 0; - struct bio *bio = *biop; - unsigned int sz; sector_t bs_mask; bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; @@ -317,30 +342,10 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, biop, flags); if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) - goto out; - - ret = 0; - while (nr_sects != 0) { - bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), - gfp_mask); - bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - - while (nr_sects != 0) { - sz = min((sector_t) PAGE_SIZE, nr_sects << 9); - bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); - nr_sects -= bi_size >> 9; - sector += bi_size >> 9; - if (bi_size < sz) - break; - } - cond_resched(); - } + return ret; - *biop = bio; -out: - return ret; + return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, + biop); } EXPORT_SYMBOL(__blkdev_issue_zeroout); @@ -360,18 +365,49 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout); int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) { - int ret; - struct bio *bio = NULL; + int ret = 0; + sector_t bs_mask; + struct bio *bio; struct blk_plug plug; + bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); + bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; + if ((sector | nr_sects) & bs_mask) + return -EINVAL; + +retry: + bio = NULL; blk_start_plug(&plug); - ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, - &bio, flags); + if (try_write_zeroes) { + ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, + gfp_mask, &bio, flags); + } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { + ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, + gfp_mask, &bio); + } else { + /* No zeroing offload support */ + ret = -EOPNOTSUPP; + } if (ret == 0 && bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); + if (ret && try_write_zeroes) { + if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { + try_write_zeroes = false; + goto retry; + } + if (!bdev_write_zeroes_sectors(bdev)) { + /* + * Zeroing offload support was indicated, but the + * device reported ILLEGAL REQUEST (for some devices + * there is no non-destructive way to verify whether + * WRITE ZEROES is actually supported). + */ + ret = -EOPNOTSUPP; + } + } return ret; } diff --git a/block/blk-map.c b/block/blk-map.c index d5251edcc0dd..e31be14da8ea 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -12,22 +12,29 @@ #include "blk.h" /* - * Append a bio to a passthrough request. Only works can be merged into - * the request based on the driver constraints. + * Append a bio to a passthrough request. Only works if the bio can be merged + * into the request based on the driver constraints. */ -int blk_rq_append_bio(struct request *rq, struct bio *bio) +int blk_rq_append_bio(struct request *rq, struct bio **bio) { - blk_queue_bounce(rq->q, &bio); + struct bio *orig_bio = *bio; + + blk_queue_bounce(rq->q, bio); if (!rq->bio) { - blk_rq_bio_prep(rq->q, rq, bio); + blk_rq_bio_prep(rq->q, rq, *bio); } else { - if (!ll_back_merge_fn(rq->q, rq, bio)) + if (!ll_back_merge_fn(rq->q, rq, *bio)) { + if (orig_bio != *bio) { + bio_put(*bio); + *bio = orig_bio; + } return -EINVAL; + } - rq->biotail->bi_next = bio; - rq->biotail = bio; - rq->__data_len += bio->bi_iter.bi_size; + rq->biotail->bi_next = *bio; + rq->biotail = *bio; + rq->__data_len += (*bio)->bi_iter.bi_size; } return 0; @@ -80,14 +87,12 @@ static int __blk_rq_map_user_iov(struct request *rq, * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ - ret = blk_rq_append_bio(rq, bio); - bio_get(bio); + ret = blk_rq_append_bio(rq, &bio); if (ret) { - bio_endio(bio); __blk_rq_unmap_user(orig_bio); - bio_put(bio); return ret; } + bio_get(bio); return 0; } @@ -121,7 +126,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); struct bio *bio = NULL; struct iov_iter i; - int ret; + int ret = -EINVAL; if (!iter_is_iovec(iter)) goto fail; @@ -150,7 +155,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, __blk_rq_unmap_user(bio); fail: rq->bio = NULL; - return -EINVAL; + return ret; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -220,7 +225,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; int do_copy = 0; - struct bio *bio; + struct bio *bio, *orig_bio; int ret; if (len > (queue_max_hw_sectors(q) << 9)) @@ -243,10 +248,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->rq_flags |= RQF_COPY_USER; - ret = blk_rq_append_bio(rq, bio); + orig_bio = bio; + ret = blk_rq_append_bio(rq, &bio); if (unlikely(ret)) { /* request is too big */ - bio_put(bio); + bio_put(orig_bio); return ret; } diff --git a/block/blk-merge.c b/block/blk-merge.c index f5dedd57dff6..8d60a5bbcef9 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -551,6 +551,24 @@ static bool req_no_special_merge(struct request *req) return !q->mq_ops && req->special; } +static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, + struct request *next) +{ + unsigned short segments = blk_rq_nr_discard_segments(req); + + if (segments >= queue_max_discard_segments(q)) + goto no_merge; + if (blk_rq_sectors(req) + bio_sectors(next->bio) > + blk_rq_get_max_sectors(req, blk_rq_pos(req))) + goto no_merge; + + req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); + return true; +no_merge: + req_set_nomerge(q, req); + return false; +} + static int ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) { @@ -684,9 +702,13 @@ static struct request *attempt_merge(struct request_queue *q, * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn * will have updated segment counts, update sector - * counts here. + * counts here. Handle DISCARDs separately, as they + * have separate settings. */ - if (!ll_merge_requests_fn(q, req, next)) + if (req_op(req) == REQ_OP_DISCARD) { + if (!req_attempt_discard_merge(q, req, next)) + return NULL; + } else if (!ll_merge_requests_fn(q, req, next)) return NULL; /* @@ -716,7 +738,8 @@ static struct request *attempt_merge(struct request_queue *q, req->__data_len += blk_rq_bytes(next); - elv_merge_requests(q, req, next); + if (req_op(req) != REQ_OP_DISCARD) + elv_merge_requests(q, req, next); /* * 'next' is going away, so update stats accordingly diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 9f8cffc8a701..3eb169f15842 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -16,11 +16,6 @@ static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) { - /* - * Non present CPU will be mapped to queue index 0. - */ - if (!cpu_present(cpu)) - return 0; return cpu % nr_queues; } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index de294d775acf..d95439154556 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -704,7 +704,11 @@ static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, const struct blk_mq_debugfs_attr *attr = m->private; void *data = d_inode(file->f_path.dentry->d_parent)->i_private; - if (!attr->write) + /* + * Attributes that only implement .seq_ops are read-only and 'attr' is + * the same with 'data' in this case. + */ + if (attr == data || !attr->write) return -EPERM; return attr->write(data, buf, count, ppos); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 4ab69435708c..eca011fdfa0e 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -94,7 +94,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; - bool did_work = false; + bool do_sched_dispatch = true; LIST_HEAD(rq_list); /* RCU or SRCU read lock is needed before checking quiesced flag */ @@ -125,18 +125,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - did_work = blk_mq_dispatch_rq_list(q, &rq_list); + do_sched_dispatch = blk_mq_dispatch_rq_list(q, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(q, &rq_list); } /* - * We want to dispatch from the scheduler if we had no work left - * on the dispatch list, OR if we did have work but weren't able - * to make progress. + * We want to dispatch from the scheduler if there was nothing + * on the dispatch list or we were able to dispatch from the + * dispatch list. */ - if (!did_work && has_sched_dispatch) { + if (do_sched_dispatch && has_sched_dispatch) { do { struct request *rq; diff --git a/block/blk-mq.c b/block/blk-mq.c index 98a18609755e..49979c095f31 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -118,6 +118,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); } +static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, + bool reserved) +{ + struct mq_inflight *mi = priv; + + if (rq->part == mi->part) + mi->inflight[rq_data_dir(rq)]++; +} + +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + struct mq_inflight mi = { .part = part, .inflight = inflight, }; + + inflight[0] = inflight[1] = 0; + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi); +} + void blk_freeze_queue_start(struct request_queue *q) { int freeze_depth; @@ -159,6 +178,8 @@ void blk_freeze_queue(struct request_queue *q) * exported to drivers as the only user for unfreeze is blk_mq. */ blk_freeze_queue_start(q); + if (!q->mq_ops) + blk_drain_queue(q); blk_mq_freeze_queue_wait(q); } @@ -636,7 +657,6 @@ static void __blk_mq_requeue_request(struct request *rq) trace_block_rq_requeue(q, rq); wbt_requeue(q->rq_wb, &rq->issue_stat); - blk_mq_sched_requeue_request(rq); if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { if (q->dma_drain_size && blk_rq_bytes(rq)) @@ -648,6 +668,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { __blk_mq_requeue_request(rq); + /* this request will be re-inserted to io scheduler queue */ + blk_mq_sched_requeue_request(rq); + BUG_ON(blk_queued_rq(rq)); blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); } @@ -1139,9 +1162,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) /* * We should be running this queue from one of the CPUs that * are mapped to it. + * + * There are at least two related races now between setting + * hctx->next_cpu from blk_mq_hctx_next_cpu() and running + * __blk_mq_run_hw_queue(): + * + * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(), + * but later it becomes online, then this warning is harmless + * at all + * + * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(), + * but later it becomes offline, then the warning can't be + * triggered, and we depend on blk-mq timeout handler to + * handle dispatched requests to this hctx */ - WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && - cpu_online(hctx->next_cpu)); + if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && + cpu_online(hctx->next_cpu)) { + printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n", + raw_smp_processor_id(), + cpumask_empty(hctx->cpumask) ? "inactive": "active"); + dump_stack(); + } /* * We can't run the queue inline with ints disabled. Ensure that @@ -1924,7 +1965,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, { blk_mq_debugfs_unregister_hctx(hctx); - blk_mq_tag_idle(hctx); + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_idle(hctx); if (set->ops->exit_request) set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); @@ -2210,7 +2252,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) mutex_lock(&set->tag_list_lock); list_del_rcu(&q->tag_set_list); - INIT_LIST_HEAD(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_SHARED; @@ -2218,8 +2259,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) blk_mq_update_tag_set_depth(set, false); } mutex_unlock(&set->tag_list_lock); - synchronize_rcu(); + INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, @@ -2310,6 +2351,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; blk_mq_sysfs_unregister(q); + + /* protect against switching io scheduler */ + mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { int node; @@ -2354,6 +2398,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, } } q->nr_hw_queues = i; + mutex_unlock(&q->sysfs_lock); blk_mq_sysfs_register(q); } @@ -2524,9 +2569,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) { - if (set->ops->map_queues) + if (set->ops->map_queues) { + int cpu; + /* + * transport .map_queues is usually done in the following + * way: + * + * for (queue = 0; queue < set->nr_hw_queues; queue++) { + * mask = get_cpu_mask(queue) + * for_each_cpu(cpu, mask) + * set->mq_map[cpu] = queue; + * } + * + * When we need to remap, the table has to be cleared for + * killing stale mapping since one CPU may not be mapped + * to any hw queue. + */ + for_each_possible_cpu(cpu) + set->mq_map[cpu] = 0; + return set->ops->map_queues(set); - else + } else return blk_mq_map_queues(set); } diff --git a/block/blk-mq.h b/block/blk-mq.h index 4933af9d61f7..877237e09083 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -136,6 +136,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) } void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); + unsigned int inflight[2]); +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); #endif diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8631763866c6..a8cd7b3d9647 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2223,13 +2223,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, out_unlock: spin_unlock_irq(q->queue_lock); out: - /* - * As multiple blk-throtls may stack in the same issue path, we - * don't want bios to leave with the flag set. Clear the flag if - * being issued. - */ - if (!throttled) - bio_clear_flag(bio, BIO_THROTTLED); + bio_set_flag(bio, BIO_THROTTLED); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW if (throttled || !td->track_bio_latency) diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 17ec83bb0900..6427be7ac363 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work) struct request *rq, *tmp; int next_set = 0; - if (blk_queue_enter(q, true)) - return; spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) @@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); - blk_queue_exit(q); } /** diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 6a9a0f03a67b..5c105514bca7 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -654,7 +654,7 @@ void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on) } /* - * Disable wbt, if enabled by default. Only called from CFQ. + * Disable wbt, if enabled by default. */ void wbt_disable_default(struct request_queue *q) { @@ -698,7 +698,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q) static int wbt_data_dir(const struct request *rq) { - return rq_data_dir(rq); + const int op = req_op(rq); + + if (op == REQ_OP_READ) + return READ; + else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH) + return WRITE; + + /* don't account */ + return -1; } int wbt_init(struct request_queue *q) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index ff57fb51b338..77fce6f09f78 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -286,7 +286,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, if (!rep.nr_zones) return -EINVAL; - zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL); + if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone)) + return -ERANGE; + + zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone), + GFP_KERNEL | __GFP_ZERO); if (!zones) return -ENOMEM; @@ -308,7 +312,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, } out: - kfree(zones); + kvfree(zones); return ret; } diff --git a/block/blk.h b/block/blk.h index 85be8b232b37..b2c287c2c6a3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -362,4 +362,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) } #endif /* CONFIG_BOUNCE */ +extern void blk_drain_queue(struct request_queue *q); + #endif /* BLK_INTERNAL_H */ diff --git a/block/bounce.c b/block/bounce.c index fceb1a96480b..1d05c422c932 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, unsigned i = 0; bool bounce = false; int sectors = 0; + bool passthrough = bio_is_passthrough(*bio_orig); bio_for_each_segment(from, *bio_orig, iter) { if (i++ < BIO_MAX_PAGES) @@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, if (!bounce) return; - if (sectors < bio_sectors(*bio_orig)) { + if (!passthrough && sectors < bio_sectors(*bio_orig)) { bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split); bio_chain(bio, *bio_orig); generic_make_request(*bio_orig); *bio_orig = bio; } - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set); + bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL : + bounce_bio_set); bio_for_each_segment_all(to, bio, i) { struct page *page = to->bv_page; diff --git a/block/genhd.c b/block/genhd.c index dd305c65ffb0..8e612efcba5b 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part, } } +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + if (q->mq_ops) { + blk_mq_in_flight_rw(q, part, inflight); + return; + } + + inflight[0] = atomic_read(&part->in_flight[0]); + inflight[1] = atomic_read(&part->in_flight[1]); +} + struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) { struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); @@ -1220,6 +1232,7 @@ static void disk_release(struct device *dev) struct class block_class = { .name = "block", }; +EXPORT_SYMBOL_GPL(block_class); static char *block_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index f58cab82105b..09cd5cf2e459 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -814,6 +814,7 @@ static struct elevator_type kyber_sched = { .limit_depth = kyber_limit_depth, .prepare_request = kyber_prepare_request, .finish_request = kyber_finish_request, + .requeue_request = kyber_finish_request, .completed_request = kyber_completed_request, .dispatch_request = kyber_dispatch_request, .has_work = kyber_has_work, diff --git a/block/partition-generic.c b/block/partition-generic.c index 91622db9aedf..db57cced9b98 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf) EXPORT_SYMBOL(bdevname); +const char *bio_devname(struct bio *bio, char *buf) +{ + return disk_name(bio->bi_disk, bio->bi_partno, buf); +} +EXPORT_SYMBOL(bio_devname); + /* * There's very little reason to use this, you should really * have a struct block_device just about everywhere and use @@ -139,13 +145,15 @@ ssize_t part_stat_show(struct device *dev, jiffies_to_msecs(part_stat_read(p, time_in_queue))); } -ssize_t part_inflight_show(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct hd_struct *p = dev_to_part(dev); + struct request_queue *q = part_to_disk(p)->queue; + unsigned int inflight[2]; - return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), - atomic_read(&p->in_flight[1])); + part_in_flight_rw(q, p, inflight); + return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } #ifdef CONFIG_FAIL_MAKE_REQUEST diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 0af3a3db6fb0..82c44f7df911 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c @@ -301,7 +301,9 @@ static void parse_bsd(struct parsed_partitions *state, continue; bsd_start = le32_to_cpu(p->p_offset); bsd_size = le32_to_cpu(p->p_size); - if (memcmp(flavour, "bsd\0", 4) == 0) + /* FreeBSD has relative offset if C partition offset is zero */ + if (memcmp(flavour, "bsd\0", 4) == 0 && + le32_to_cpu(l->d_partitions[2].p_offset) == 0) bsd_start += offset; if (offset == bsd_start && size == bsd_size) /* full parent partition, we have it already */ diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64 new file mode 100644 index 000000000000..694ed56a5f47 --- /dev/null +++ b/build.config.cuttlefish.x86_64 @@ -0,0 +1,16 @@ +ARCH=x86_64 +BRANCH=android-4.14 +CLANG_TRIPLE=x86_64-linux-gnu- +CROSS_COMPILE=x86_64-linux-androidkernel- +DEFCONFIG=x86_64_cuttlefish_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +POST_DEFCONFIG_CMDS="check_defconfig" +CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r328903/bin +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.arm b/build.config.goldfish.arm new file mode 100644 index 000000000000..ff5646ab4f40 --- /dev/null +++ b/build.config.goldfish.arm @@ -0,0 +1,13 @@ +ARCH=arm +BRANCH=android-4.4 +CROSS_COMPILE=arm-linux-androidkernel- +DEFCONFIG=ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin +FILES=" +arch/arm/boot/zImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.arm64 b/build.config.goldfish.arm64 new file mode 100644 index 000000000000..4c896a679ab9 --- /dev/null +++ b/build.config.goldfish.arm64 @@ -0,0 +1,13 @@ +ARCH=arm64 +BRANCH=android-4.4 +CROSS_COMPILE=aarch64-linux-android- +DEFCONFIG=ranchu64_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin +FILES=" +arch/arm64/boot/Image +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.mips b/build.config.goldfish.mips new file mode 100644 index 000000000000..9a14a444ac14 --- /dev/null +++ b/build.config.goldfish.mips @@ -0,0 +1,12 @@ +ARCH=mips +BRANCH=android-4.4 +CROSS_COMPILE=mips64el-linux-android- +DEFCONFIG=ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/bin +FILES=" +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.mips64 b/build.config.goldfish.mips64 new file mode 100644 index 000000000000..6ad9759f5f4a --- /dev/null +++ b/build.config.goldfish.mips64 @@ -0,0 +1,12 @@ +ARCH=mips +BRANCH=android-4.4 +CROSS_COMPILE=mips64el-linux-android- +DEFCONFIG=ranchu64_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/bin +FILES=" +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.x86 b/build.config.goldfish.x86 new file mode 100644 index 000000000000..2266c621835e --- /dev/null +++ b/build.config.goldfish.x86 @@ -0,0 +1,13 @@ +ARCH=x86 +BRANCH=android-4.4 +CROSS_COMPILE=x86_64-linux-android- +DEFCONFIG=i386_ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64 new file mode 100644 index 000000000000..08c42c2eba03 --- /dev/null +++ b/build.config.goldfish.x86_64 @@ -0,0 +1,13 @@ +ARCH=x86_64 +BRANCH=android-4.4 +CROSS_COMPILE=x86_64-linux-android- +DEFCONFIG=x86_64_ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/certs/system_keyring.c b/certs/system_keyring.c index 6251d1b27f0c..0e1ea235c12a 100644 --- a/certs/system_keyring.c +++ b/certs/system_keyring.c @@ -263,5 +263,46 @@ int verify_pkcs7_signature(const void *data, size_t len, return ret; } EXPORT_SYMBOL_GPL(verify_pkcs7_signature); - #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ + +/** + * verify_signature_one - Verify a signature with keys from given keyring + * @sig: The signature to be verified + * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only, + * (void *)1UL for all trusted keys). + * @keyid: key description (not partial) + */ +int verify_signature_one(const struct public_key_signature *sig, + struct key *trusted_keys, const char *keyid) +{ + key_ref_t ref; + struct key *key; + int ret; + + if (!sig) + return -EBADMSG; + if (!trusted_keys) { + trusted_keys = builtin_trusted_keys; + } else if (trusted_keys == (void *)1UL) { +#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING + trusted_keys = secondary_trusted_keys; +#else + trusted_keys = builtin_trusted_keys; +#endif + } + + ref = keyring_search(make_key_ref(trusted_keys, 1), + &key_type_asymmetric, keyid); + if (IS_ERR(ref)) { + pr_err("Asymmetric key (%s) not found in keyring(%s)\n", + keyid, trusted_keys->description); + return -ENOKEY; + } + + key = key_ref_to_ptr(ref); + ret = verify_signature(key, sig); + key_put(key); + return ret; +} +EXPORT_SYMBOL_GPL(verify_signature_one); + diff --git a/crypto/Kconfig b/crypto/Kconfig index ac5fb37e6f4b..8239f27299f0 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -130,7 +130,7 @@ config CRYPTO_DH config CRYPTO_ECDH tristate "ECDH algorithm" - select CRYTPO_KPP + select CRYPTO_KPP select CRYPTO_RNG_DEFAULT help Generic implementation of the ECDH algorithm @@ -1324,32 +1324,6 @@ config CRYPTO_SALSA20 The Salsa20 stream cipher algorithm is designed by Daniel J. Bernstein . See -config CRYPTO_SALSA20_586 - tristate "Salsa20 stream cipher algorithm (i586)" - depends on (X86 || UML_X86) && !64BIT - select CRYPTO_BLKCIPHER - help - Salsa20 stream cipher algorithm. - - Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See - - The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein . See - -config CRYPTO_SALSA20_X86_64 - tristate "Salsa20 stream cipher algorithm (x86_64)" - depends on (X86 || UML_X86) && 64BIT - select CRYPTO_BLKCIPHER - help - Salsa20 stream cipher algorithm. - - Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT - Stream Cipher Project. See - - The Salsa20 stream cipher algorithm is designed by Daniel J. - Bernstein . See - config CRYPTO_CHACHA20 tristate "ChaCha20 cipher algorithm" select CRYPTO_BLKCIPHER @@ -1494,6 +1468,20 @@ config CRYPTO_SERPENT_AVX2_X86_64 See also: +config CRYPTO_SPECK + tristate "Speck cipher algorithm" + select CRYPTO_ALGAPI + help + Speck is a lightweight block cipher that is tuned for optimal + performance in software (rather than hardware). + + Speck may not be as secure as AES, and should only be used on systems + where AES is not fast enough. + + See also: + + If unsure, say N. + config CRYPTO_TEA tristate "TEA, XTEA and XETA cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index da190be60ce2..59220c2264e1 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o +CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o @@ -108,6 +109,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o +obj-$(CONFIG_CRYPTO_SPECK) += speck.o obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index d880a4897159..4ee7c041bb82 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, - unsigned int bsize) +static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, + unsigned int n) { - unsigned int n = bsize; - for (;;) { unsigned int len_this_page = scatterwalk_pagelen(&walk->out); @@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, n -= len_this_page; scatterwalk_start(&walk->out, sg_next(walk->out.sg)); } - - return bsize; } -static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, - unsigned int n) +static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, + unsigned int n) { scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - - return n; } static int ablkcipher_walk_next(struct ablkcipher_request *req, @@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req, struct ablkcipher_walk *walk, int err) { struct crypto_tfm *tfm = req->base.tfm; - unsigned int nbytes = 0; + unsigned int n; /* bytes processed */ + bool more; - if (likely(err >= 0)) { - unsigned int n = walk->nbytes - err; + if (unlikely(err < 0)) + goto finish; - if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) - n = ablkcipher_done_fast(walk, n); - else if (WARN_ON(err)) { - err = -EINVAL; - goto err; - } else - n = ablkcipher_done_slow(walk, n); + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - nbytes = walk->total - n; - err = 0; + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { + ablkcipher_done_fast(walk, n); + } else { + if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ + err = -EINVAL; + goto finish; + } + ablkcipher_done_slow(walk, n); } - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); - -err: - walk->total = nbytes; - walk->nbytes = nbytes; + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); - if (nbytes) { + if (more) { crypto_yield(req->base.flags); return ablkcipher_walk_next(req, walk); } - + err = 0; +finish: + walk->nbytes = 0; if (walk->iv != req->info) memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); kfree(walk->iv_buffer); - return err; } EXPORT_SYMBOL_GPL(ablkcipher_walk_done); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 337cf382718e..42dfdd1fd6d8 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent); static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { - const u32 forbidden = CRYPTO_ALG_INTERNAL; + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct sockaddr_alg *sa = (void *)uaddr; @@ -164,6 +164,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (addr_len < sizeof(*sa)) return -EINVAL; + /* If caller uses non-allowed flag, return error. */ + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) + return -EINVAL; + sa->salg_type[sizeof(sa->salg_type) - 1] = 0; sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0; @@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (IS_ERR(type)) return PTR_ERR(type); - private = type->bind(sa->salg_name, - sa->salg_feat & ~forbidden, - sa->salg_mask & ~forbidden); + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); if (IS_ERR(private)) { module_put(type->owner); return PTR_ERR(private); @@ -691,7 +693,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) unsigned int i; list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { - ctx->rcvused -= rsgl->sg_num_bytes; + atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); af_alg_free_sg(&rsgl->sgl); list_del(&rsgl->list); if (rsgl != &areq->first_rsgl) @@ -699,14 +701,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) } tsgl = areq->tsgl; - for_each_sg(tsgl, sg, areq->tsgl_entries, i) { - if (!sg_page(sg)) - continue; - put_page(sg_page(sg)); - } + if (tsgl) { + for_each_sg(tsgl, sg, areq->tsgl_entries, i) { + if (!sg_page(sg)) + continue; + put_page(sg_page(sg)); + } - if (areq->tsgl && areq->tsgl_entries) sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); + } } EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); @@ -1047,6 +1050,18 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, } EXPORT_SYMBOL_GPL(af_alg_sendpage); +/** + * af_alg_free_resources - release resources required for crypto request + */ +void af_alg_free_resources(struct af_alg_async_req *areq) +{ + struct sock *sk = areq->sk; + + af_alg_free_areq_sgls(areq); + sock_kfree_s(sk, areq, areq->areqlen); +} +EXPORT_SYMBOL_GPL(af_alg_free_resources); + /** * af_alg_async_cb - AIO callback handler * @@ -1063,18 +1078,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err) struct kiocb *iocb = areq->iocb; unsigned int resultlen; - lock_sock(sk); - /* Buffer size written by crypto operation. */ resultlen = areq->outlen; - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); - __sock_put(sk); + af_alg_free_resources(areq); + sock_put(sk); iocb->ki_complete(iocb, err ? err : resultlen, 0); - - release_sock(sk); } EXPORT_SYMBOL_GPL(af_alg_async_cb); @@ -1157,12 +1167,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, if (!af_alg_readable(sk)) break; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); - if (err) - return err; - } - seglen = min_t(size_t, (maxsize - len), msg_data_left(msg)); @@ -1179,8 +1183,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, /* make one iovec available as scatterlist */ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); - if (err < 0) + if (err < 0) { + rsgl->sg_num_bytes = 0; return err; + } /* chain the new scatterlist with previous one */ if (areq->last_rsgl) @@ -1188,7 +1194,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, areq->last_rsgl = rsgl; len += err; - ctx->rcvused += err; + atomic_add(err, &ctx->rcvused); rsgl->sg_num_bytes = err; iov_iter_advance(&msg->msg_iter, err); } diff --git a/crypto/ahash.c b/crypto/ahash.c index 5e8666e6ccae..3980e9e45289 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (nbytes && walk->offset & alignmask && !err) { walk->offset = ALIGN(walk->offset, alignmask + 1); - walk->data += walk->offset; - nbytes = min(nbytes, ((unsigned int)(PAGE_SIZE)) - walk->offset); walk->entrylen -= nbytes; - return nbytes; + if (nbytes) { + walk->data += walk->offset; + return nbytes; + } } if (walk->flags & CRYPTO_ALG_ASYNC) @@ -193,11 +194,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned long alignmask = crypto_ahash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return ahash_setkey_unaligned(tfm, key, keylen); + err = ahash_setkey_unaligned(tfm, key, keylen); + else + err = tfm->setkey(tfm, key, keylen); + + if (err) + return err; - return tfm->setkey(tfm, key, keylen); + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); @@ -370,7 +378,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_ahash_op(req, tfm->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); @@ -456,7 +469,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); hash->setkey = ahash_nosetkey; - hash->has_setkey = false; hash->export = ahash_no_export; hash->import = ahash_no_import; @@ -471,7 +483,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - hash->has_setkey = true; + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); } if (alg->export) hash->export = alg->export; @@ -655,5 +668,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(ahash_attr_alg); +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) +{ + struct crypto_alg *alg = &halg->base; + + if (alg->cra_type != &crypto_ahash_type) + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); + + return __crypto_ahash_alg(alg)->setkey != NULL; +} +EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index aa699ff6c876..50eb828db767 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, spawn->alg = NULL; spawns = &inst->alg.cra_users; + + /* + * We may encounter an unregistered instance here, since + * an instance's spawns are set up prior to the instance + * being registered. An unregistered instance will have + * NULL ->cra_users.next, since ->cra_users isn't + * properly initialized until registration. But an + * unregistered instance cannot have any users, so treat + * it the same as ->cra_users being empty. + */ + if (spawns->next == NULL) + break; } } while ((spawns = crypto_more_spawns(alg, &stack, &top, &secondary_spawns))); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 516b38c3a169..f138af18b500 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -101,16 +101,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, struct aead_tfm *aeadc = pask->private; struct crypto_aead *tfm = aeadc->aead; struct crypto_skcipher *null_tfm = aeadc->null_tfm; - unsigned int as = crypto_aead_authsize(tfm); + unsigned int i, as = crypto_aead_authsize(tfm); struct af_alg_async_req *areq; - struct af_alg_tsgl *tsgl; - struct scatterlist *src; + struct af_alg_tsgl *tsgl, *tmp; + struct scatterlist *rsgl_src, *tsgl_src = NULL; int err = 0; size_t used = 0; /* [in] TX bufs to be en/decrypted */ size_t outlen = 0; /* [out] RX bufs produced by kernel */ size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* * Data length provided by caller via sendmsg/sendpage that has not * yet been processed. @@ -178,7 +184,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, } processed = used + ctx->aead_assoclen; - tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); + list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { + for (i = 0; i < tsgl->cur; i++) { + struct scatterlist *process_sg = tsgl->sg + i; + + if (!(process_sg->length) || !sg_page(process_sg)) + continue; + tsgl_src = process_sg; + break; + } + if (tsgl_src) + break; + } + if (processed && !tsgl_src) { + err = -EFAULT; + goto free; + } /* * Copy of AAD from source to destination @@ -194,7 +215,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Use the RX SGL as source (and destination) for crypto op. */ - src = areq->first_rsgl.sgl.sg; + rsgl_src = areq->first_rsgl.sgl.sg; if (ctx->enc) { /* @@ -207,7 +228,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, * v v * RX SGL: AAD || PT || Tag */ - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + err = crypto_aead_copy_sgl(null_tfm, tsgl_src, areq->first_rsgl.sgl.sg, processed); if (err) goto free; @@ -225,7 +246,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, */ /* Copy AAD || CT to RX SGL buffer for in-place operation. */ - err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, + err = crypto_aead_copy_sgl(null_tfm, tsgl_src, areq->first_rsgl.sgl.sg, outlen); if (err) goto free; @@ -257,23 +278,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, areq->tsgl); } else /* no RX SGL present (e.g. authentication only) */ - src = areq->tsgl; + rsgl_src = areq->tsgl; } /* Initialize the crypto operation */ - aead_request_set_crypt(&areq->cra_u.aead_req, src, + aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, areq->first_rsgl.sgl.sg, used, ctx->iv); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_tfm(&areq->cra_u.aead_req, tfm); if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ + sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = outlen; + aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_async_cb, areq); err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : crypto_aead_decrypt(&areq->cra_u.aead_req); + + /* AIO operation in progress */ + if (err == -EINPROGRESS || err == -EBUSY) + return -EIOCBQUEUED; + + sock_put(sk); } else { /* Synchronous operation */ aead_request_set_callback(&areq->cra_u.aead_req, @@ -285,19 +317,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, &ctx->completion); } - /* AIO operation in progress */ - if (err == -EINPROGRESS) { - sock_hold(sk); - - /* Remember output size that will be generated. */ - areq->outlen = outlen; - - return -EIOCBQUEUED; - } free: - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); + af_alg_free_resources(areq); return err ? err : outlen; } @@ -487,6 +509,7 @@ static void aead_release(void *private) struct aead_tfm *tfm = private; crypto_free_aead(tfm->aead); + crypto_put_default_null_skcipher2(); kfree(tfm); } @@ -519,7 +542,6 @@ static void aead_sock_destruct(struct sock *sk) unsigned int ivlen = crypto_aead_ivsize(tfm); af_alg_pull_tsgl(sk, ctx->used, NULL, 0); - crypto_put_default_null_skcipher2(); sock_kzfree_s(sk, ctx->iv, ivlen); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); @@ -549,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; - ctx->rcvused = 0; + atomic_set(&ctx->rcvused, 0); ctx->more = 0; ctx->merge = 0; ctx->enc = 0; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 5e92bd275ef3..39cebd3256bf 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -34,11 +34,6 @@ struct hash_ctx { struct ahash_request req; }; -struct algif_hash_tfm { - struct crypto_ahash *hash; - bool has_key; -}; - static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) { unsigned ds; @@ -309,7 +304,7 @@ static int hash_check_key(struct socket *sock) int err = 0; struct sock *psk; struct alg_sock *pask; - struct algif_hash_tfm *tfm; + struct crypto_ahash *tfm; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -323,7 +318,7 @@ static int hash_check_key(struct socket *sock) err = -ENOKEY; lock_sock_nested(psk, SINGLE_DEPTH_NESTING); - if (!tfm->has_key) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) goto unlock; if (!pask->refcnt++) @@ -414,41 +409,17 @@ static struct proto_ops algif_hash_ops_nokey = { static void *hash_bind(const char *name, u32 type, u32 mask) { - struct algif_hash_tfm *tfm; - struct crypto_ahash *hash; - - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); - if (!tfm) - return ERR_PTR(-ENOMEM); - - hash = crypto_alloc_ahash(name, type, mask); - if (IS_ERR(hash)) { - kfree(tfm); - return ERR_CAST(hash); - } - - tfm->hash = hash; - - return tfm; + return crypto_alloc_ahash(name, type, mask); } static void hash_release(void *private) { - struct algif_hash_tfm *tfm = private; - - crypto_free_ahash(tfm->hash); - kfree(tfm); + crypto_free_ahash(private); } static int hash_setkey(void *private, const u8 *key, unsigned int keylen) { - struct algif_hash_tfm *tfm = private; - int err; - - err = crypto_ahash_setkey(tfm->hash, key, keylen); - tfm->has_key = !err; - - return err; + return crypto_ahash_setkey(private, key, keylen); } static void hash_sock_destruct(struct sock *sk) @@ -463,11 +434,10 @@ static void hash_sock_destruct(struct sock *sk) static int hash_accept_parent_nokey(void *private, struct sock *sk) { - struct hash_ctx *ctx; + struct crypto_ahash *tfm = private; struct alg_sock *ask = alg_sk(sk); - struct algif_hash_tfm *tfm = private; - struct crypto_ahash *hash = tfm->hash; - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); + struct hash_ctx *ctx; + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) @@ -480,7 +450,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) ask->private = ctx; - ahash_request_set_tfm(&ctx->req, hash); + ahash_request_set_tfm(&ctx->req, tfm); ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); @@ -491,9 +461,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk) static int hash_accept_parent(void *private, struct sock *sk) { - struct algif_hash_tfm *tfm = private; + struct crypto_ahash *tfm = private; - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return hash_accept_parent_nokey(private, sk); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 8ae4170aaeb4..90bc4e0f0785 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* Allocate cipher request for current operation. */ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + crypto_skcipher_reqsize(tfm)); @@ -117,13 +123,24 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { /* AIO operation */ + sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = len; + skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP, af_alg_async_cb, areq); err = ctx->enc ? crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); + + /* AIO operation in progress */ + if (err == -EINPROGRESS || err == -EBUSY) + return -EIOCBQUEUED; + + sock_put(sk); } else { /* Synchronous operation */ skcipher_request_set_callback(&areq->cra_u.skcipher_req, @@ -137,19 +154,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, &ctx->completion); } - /* AIO operation in progress */ - if (err == -EINPROGRESS) { - sock_hold(sk); - - /* Remember output size that will be generated. */ - areq->outlen = len; - - return -EIOCBQUEUED; - } free: - af_alg_free_areq_sgls(areq); - sock_kfree_s(sk, areq, areq->areqlen); + af_alg_free_resources(areq); return err ? err : len; } @@ -384,7 +391,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; - ctx->rcvused = 0; + atomic_set(&ctx->rcvused, 0); ctx->more = 0; ctx->merge = 0; ctx->enc = 0; diff --git a/crypto/api.c b/crypto/api.c index 941cd4c6c7ec..2a2479d168aa 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "internal.h" LIST_HEAD(crypto_alg_list); @@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask) } EXPORT_SYMBOL_GPL(crypto_has_alg); +void crypto_req_done(struct crypto_async_request *req, int err) +{ + struct crypto_wait *wait = req->data; + + if (err == -EINPROGRESS) + return; + + wait->err = err; + complete(&wait->completion); +} +EXPORT_SYMBOL_GPL(crypto_req_done); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c index f6a009d88a33..52e5ea3b8e40 100644 --- a/crypto/asymmetric_keys/pkcs7_trust.c +++ b/crypto/asymmetric_keys/pkcs7_trust.c @@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, pr_devel("sinfo %u: Direct signer is key %x\n", sinfo->index, key_serial(key)); x509 = NULL; + sig = sinfo->sig; goto matched; } if (PTR_ERR(key) != -ENOKEY) diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c index 2d93d9eccb4d..a18295651077 100644 --- a/crypto/asymmetric_keys/pkcs7_verify.c +++ b/crypto/asymmetric_keys/pkcs7_verify.c @@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7, pr_devel("Sig %u: Found cert serial match X.509[%u]\n", sinfo->index, certix); - if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) { + if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) { pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", sinfo->index); continue; @@ -273,7 +273,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7, sinfo->index); return 0; } - ret = public_key_verify_signature(p->pub, p->sig); + ret = public_key_verify_signature(p->pub, x509->sig); if (ret < 0) return ret; x509->signer = p; @@ -369,8 +369,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7, * * (*) -EBADMSG if some part of the message was invalid, or: * - * (*) 0 if no signature chains were found to be blacklisted or to contain - * unsupported crypto, or: + * (*) 0 if a signature chain passed verification, or: * * (*) -EKEYREJECTED if a blacklisted key was encountered, or: * @@ -426,8 +425,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7, for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { ret = pkcs7_verify_one(pkcs7, sinfo); - if (sinfo->blacklisted && actual_ret == -ENOPKG) - actual_ret = -EKEYREJECTED; + if (sinfo->blacklisted) { + if (actual_ret == -ENOPKG) + actual_ret = -EKEYREJECTED; + continue; + } if (ret < 0) { if (ret == -ENOPKG) { sinfo->unsupported_crypto = true; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 3cd6e12cfc46..d1af69d2ff85 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -93,9 +93,11 @@ int public_key_verify_signature(const struct public_key *pkey, BUG_ON(!pkey); BUG_ON(!sig); - BUG_ON(!sig->digest); BUG_ON(!sig->s); + if (!sig->digest) + return -ENOPKG; + alg_name = sig->pkey_algo; if (strcmp(sig->pkey_algo, "rsa") == 0) { /* The data wangled by the RSA algorithm is typically padded diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c index 86fb68508952..7c93c7728454 100644 --- a/crypto/asymmetric_keys/restrict.c +++ b/crypto/asymmetric_keys/restrict.c @@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup); * * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a * matching parent certificate in the trusted list, -EKEYREJECTED if the - * signature check fails or the key is blacklisted and some other error if - * there is a matching certificate but the signature check cannot be performed. + * signature check fails or the key is blacklisted, -ENOPKG if the signature + * uses unsupported crypto, or some other error if there is a matching + * certificate but the signature check cannot be performed. */ int restrict_link_by_signature(struct key *dest_keyring, const struct key_type *type, @@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring, return -EOPNOTSUPP; sig = payload->data[asym_auth]; + if (!sig) + return -ENOPKG; if (!sig->auth_ids[0] && !sig->auth_ids[1]) return -ENOKEY; @@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring, return -EOPNOTSUPP; sig = payload->data[asym_auth]; + if (!sig) + return -ENOPKG; if (!sig->auth_ids[0] && !sig->auth_ids[1]) return -ENOKEY; @@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring, * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, - * -EKEYREJECTED if the signature check fails, and some other error if - * there is a matching certificate but the signature check cannot be - * performed. + * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses + * unsupported crypto, or some other error if there is a matching certificate + * but the signature check cannot be performed. */ int restrict_link_by_key_or_keyring(struct key *dest_keyring, const struct key_type *type, @@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring, * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, - * -EKEYREJECTED if the signature check fails, and some other error if - * there is a matching certificate but the signature check cannot be - * performed. + * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses + * unsupported crypto, or some other error if there is a matching certificate + * but the signature check cannot be performed. */ int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, const struct key_type *type, diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index dd03fead1ca3..7e6a43ffdcbe 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen, return -EINVAL; } + if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) { + /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; + + value++; + vlen--; + } + ctx->cert->raw_sig = value; ctx->cert->raw_sig_size = vlen; return 0; @@ -409,6 +418,8 @@ int x509_extract_key_data(void *context, size_t hdrlen, ctx->cert->pub->pkey_algo = "rsa"; /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index eea71dc9686c..1bd0cf71a22d 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -135,7 +135,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert) } ret = -EKEYREJECTED; - if (cert->pub->pkey_algo != cert->sig->pkey_algo) + if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) goto out; ret = public_key_verify_signature(cert->pub, cert->sig); diff --git a/crypto/authenc.c b/crypto/authenc.c index 875470b0e026..0db344d5a01a 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, CRYPTO_TFM_RES_MASK); out: + memzero_explicit(&keys, sizeof(keys)); return err; badkey: diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 0cf5fefdb859..6de852ce4cf8 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * CRYPTO_TFM_RES_MASK); out: + memzero_explicit(&keys, sizeof(keys)); return err; badkey: diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 6c43a0a17a55..d84c6920ada9 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, - unsigned int bsize) +static inline void blkcipher_done_slow(struct blkcipher_walk *walk, + unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = blkcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, 1); - return bsize; } -static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, - unsigned int n) +static inline void blkcipher_done_fast(struct blkcipher_walk *walk, + unsigned int n) { if (walk->flags & BLKCIPHER_WALK_COPY) { blkcipher_map_dst(walk); @@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - - return n; } int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err) { - unsigned int nbytes = 0; + unsigned int n; /* bytes processed */ + bool more; - if (likely(err >= 0)) { - unsigned int n = walk->nbytes - err; + if (unlikely(err < 0)) + goto finish; - if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) - n = blkcipher_done_fast(walk, n); - else if (WARN_ON(err)) { - err = -EINVAL; - goto err; - } else - n = blkcipher_done_slow(walk, n); + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - nbytes = walk->total - n; - err = 0; + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) { + blkcipher_done_fast(walk, n); + } else { + if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ + err = -EINVAL; + goto finish; + } + blkcipher_done_slow(walk, n); } - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); -err: - walk->total = nbytes; - walk->nbytes = nbytes; - - if (nbytes) { + if (more) { crypto_yield(desc->flags); return blkcipher_walk_next(desc, walk); } - + err = 0; +finish: + walk->nbytes = 0; if (walk->iv != desc->info) memcpy(desc->info, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); - return err; } EXPORT_SYMBOL_GPL(blkcipher_walk_done); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index db1bc3147bc4..600afa99941f 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, algt->mask)); if (IS_ERR(poly)) return PTR_ERR(poly); + poly_hash = __crypto_hash_alg_common(poly); + + err = -EINVAL; + if (poly_hash->digestsize != POLY1305_DIGEST_SIZE) + goto out_put_poly; err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, ctx = aead_instance_ctx(inst); ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; - poly_hash = __crypto_hash_alg_common(poly); err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, aead_crypto_instance(inst)); if (err) diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index aa2a25fc7482..718cbce8d169 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -133,6 +133,7 @@ static struct shash_alg alg = { .cra_name = "crc32", .cra_driver_name = "crc32-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 4c0a0e271876..372320399622 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -146,6 +146,7 @@ static struct shash_alg alg = { .cra_name = "crc32c", .cra_driver_name = "crc32c-generic", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct chksum_ctx), diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 0508c48a45c4..248f6ba41688 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -895,10 +895,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = salg->digestsize; inst->alg.halg.statesize = salg->statesize; @@ -913,7 +912,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = cryptd_hash_finup_enqueue; inst->alg.export = cryptd_hash_export; inst->alg.import = cryptd_hash_import; - inst->alg.setkey = cryptd_hash_setkey; + if (crypto_shash_alg_has_setkey(salg)) + inst->alg.setkey = cryptd_hash_setkey; inst->alg.digest = cryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); diff --git a/crypto/dh.c b/crypto/dh.c index b1032a5c1bfa..aadaf36fb56f 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -21,19 +21,12 @@ struct dh_ctx { MPI xa; }; -static inline void dh_clear_params(struct dh_ctx *ctx) +static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); mpi_free(ctx->g); - ctx->p = NULL; - ctx->g = NULL; -} - -static void dh_free_ctx(struct dh_ctx *ctx) -{ - dh_clear_params(ctx); mpi_free(ctx->xa); - ctx->xa = NULL; + memset(ctx, 0, sizeof(*ctx)); } /* @@ -71,10 +64,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) return -EINVAL; ctx->g = mpi_read_raw_data(params->g, params->g_size); - if (!ctx->g) { - mpi_free(ctx->p); + if (!ctx->g) return -EINVAL; - } return 0; } @@ -86,21 +77,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, struct dh params; /* Free the old MPI key if any */ - dh_free_ctx(ctx); + dh_clear_ctx(ctx); if (crypto_dh_decode_key(buf, len, ¶ms) < 0) - return -EINVAL; + goto err_clear_ctx; if (dh_set_params(ctx, ¶ms) < 0) - return -EINVAL; + goto err_clear_ctx; ctx->xa = mpi_read_raw_data(params.key, params.key_size); - if (!ctx->xa) { - dh_clear_params(ctx); - return -EINVAL; - } + if (!ctx->xa) + goto err_clear_ctx; return 0; + +err_clear_ctx: + dh_clear_ctx(ctx); + return -EINVAL; } static int dh_compute_value(struct kpp_request *req) @@ -158,7 +151,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm) { struct dh_ctx *ctx = dh_get_ctx(tfm); - dh_free_ctx(ctx); + dh_clear_ctx(ctx); } static struct kpp_alg dh = { diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 8ba8a3f82620..7f00c771fe8d 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) if (secret.len != crypto_dh_key_len(params)) return -EINVAL; + /* + * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since + * some drivers assume otherwise. + */ + if (params->key_size > params->p_size || + params->g_size > params->p_size) + return -EINVAL; + /* Don't allocate memory. Set pointers to data within * the given buffer */ @@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) params->p = (void *)(ptr + params->key_size); params->g = (void *)(ptr + params->key_size + params->p_size); + /* + * Don't permit 'p' to be 0. It's not a prime number, and it's subject + * to corner cases such as 'mod 0' being undefined or + * crypto_kpp_maxsize() returning 0. + */ + if (memchr_inv(params->p, 0, params->p_size) == NULL) + return -EINVAL; + return 0; } EXPORT_SYMBOL_GPL(crypto_dh_decode_key); diff --git a/crypto/drbg.c b/crypto/drbg.c index 70018397e59a..6c3221313753 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) if (!drbg) return; kzfree(drbg->Vbuf); + drbg->Vbuf = NULL; drbg->V = NULL; kzfree(drbg->Cbuf); + drbg->Cbuf = NULL; drbg->C = NULL; kzfree(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; diff --git a/crypto/ecc.c b/crypto/ecc.c index 633a9bcdc574..18f32f2a5e1c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) * DRBG with a security strength of 256. */ if (crypto_get_default_rng()) - err = -EFAULT; + return -EFAULT; err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); crypto_put_default_rng(); diff --git a/crypto/hmac.c b/crypto/hmac.c index 92871dc2a63e..e74730224f0a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) return PTR_ERR(salg); + alg = &salg->base; + /* The underlying hash algorithm must be unkeyed */ err = -EINVAL; + if (crypto_shash_alg_has_setkey(salg)) + goto out_put_alg; + ds = salg->digestsize; ss = salg->statesize; - alg = &salg->base; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto out_put_alg; diff --git a/crypto/lrw.c b/crypto/lrw.c index a8bfae4451bf..fdba6dd6db63 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -313,7 +313,7 @@ static void exit_crypt(struct skcipher_request *req) rctx->left = 0; if (rctx->ext) - kfree(rctx->ext); + kzfree(rctx->ext); } static int do_encrypt(struct skcipher_request *req, int err) @@ -610,8 +610,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) ecb_name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, - "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { + err = -ENAMETOOLONG; + goto err_drop_spawn; + } } inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 4e6472658852..e0732d979e3b 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); + spin_lock_init(&cpu_queue->q_lock); } return 0; } @@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, int cpu, err; struct mcryptd_cpu_queue *cpu_queue; - cpu = get_cpu(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); - rctx->tag.cpu = cpu; + cpu_queue = raw_cpu_ptr(queue->cpu_queue); + spin_lock(&cpu_queue->q_lock); + cpu = smp_processor_id(); + rctx->tag.cpu = smp_processor_id(); err = crypto_enqueue_request(&cpu_queue->queue, request); pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", cpu, cpu_queue, request); + spin_unlock(&cpu_queue->q_lock); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); - put_cpu(); return err; } @@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); i = 0; while (i < MCRYPTD_BATCH || single_task_running()) { - /* - * preempt_disable/enable is used to prevent - * being preempted by mcryptd_enqueue_request() - */ - local_bh_disable(); - preempt_disable(); + + spin_lock_bh(&cpu_queue->q_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); - local_bh_enable(); + spin_unlock_bh(&cpu_queue->q_lock); if (!req) { mcryptd_opportunistic_flush(); @@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work) ++i; } if (cpu_queue->queue.qlen) - queue_work(kcrypto_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); } void mcryptd_flusher(struct work_struct *__work) @@ -520,10 +517,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, if (err) goto out_free_inst; - type = CRYPTO_ALG_ASYNC; - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) - type |= CRYPTO_ALG_INTERNAL; - inst->alg.halg.base.cra_flags = type; + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | + CRYPTO_ALG_OPTIONAL_KEY)); inst->alg.halg.digestsize = halg->digestsize; inst->alg.halg.statesize = halg->statesize; @@ -538,7 +534,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.finup = mcryptd_hash_finup_enqueue; inst->alg.export = mcryptd_hash_export; inst->alg.import = mcryptd_hash_import; - inst->alg.setkey = mcryptd_hash_setkey; + if (crypto_hash_alg_has_setkey(halg)) + inst->alg.setkey = mcryptd_hash_setkey; inst->alg.digest = mcryptd_hash_digest_enqueue; err = ahash_register_instance(tmpl, inst); diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index ee9cfb99fe25..f8ec3d4ba4a8 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) crypto_free_aead(ctx->child); } +static void pcrypt_free(struct aead_instance *inst) +{ + struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_aead(&ctx->spawn); + kfree(inst); +} + static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; + inst->free = pcrypt_free; + err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) return -EINVAL; } -static void pcrypt_free(struct crypto_instance *inst) -{ - struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); - - crypto_drop_aead(&ctx->spawn); - kfree(inst); -} - static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, - .free = pcrypt_free, .module = THIS_MODULE, }; diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index b1c2d57dc734..ba39eb308c79 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc) } EXPORT_SYMBOL_GPL(crypto_poly1305_init); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - /* Poly1305 requires a unique key for each tag, which implies that - * we can't set it on the tfm that gets accessed by multiple users - * simultaneously. Instead we expect the key as the first 32 bytes in - * the update() call. */ - return -ENOTSUPP; -} -EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); - static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ @@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key) dctx->s[3] = get_unaligned_le32(key + 12); } +/* + * Poly1305 requires a unique key for each tag, which implies that we can't set + * it on the tfm that gets accessed by multiple users simultaneously. Instead we + * expect the key as the first 32 bytes in the update() call. + */ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { @@ -281,7 +275,6 @@ static struct shash_alg poly1305_alg = { .init = crypto_poly1305_init, .update = crypto_poly1305_update, .final = crypto_poly1305_final, - .setkey = crypto_poly1305_setkey, .descsize = sizeof(struct poly1305_desc_ctx), .base = { .cra_name = "poly1305", diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 0b66dc824606..cad395d70d78 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c @@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, return -EINVAL; if (fips_enabled) { - while (!*ptr && n_sz) { + while (n_sz && !*ptr) { ptr++; n_sz--; } diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index f550b5d94630..d7da0eea5622 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc, salsa20_ivsetup(ctx, walk.iv); - if (likely(walk.nbytes == nbytes)) - { - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, - walk.src.virt.addr, nbytes); - return blkcipher_walk_done(desc, &walk, 0); - } - while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, walk.src.virt.addr, diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 7e8ed96236ce..a68be626017c 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -18,6 +18,7 @@ #include #include #include +#include #define KECCAK_ROUNDS 24 @@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, unsigned int i; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) src)[i]; + sctx->st[i] ^= get_unaligned_le64(src + 8 * i); keccakf(sctx->st); done += sctx->rsiz; @@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) sctx->buf[sctx->rsiz - 1] |= 0x80; for (i = 0; i < sctx->rsizw; i++) - sctx->st[i] ^= ((u64 *) sctx->buf)[i]; + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); keccakf(sctx->st); diff --git a/crypto/shash.c b/crypto/shash.c index 325a14da5827..5d732c6bb4b2 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -25,11 +25,12 @@ static const struct crypto_type crypto_shash_type; -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) { return -ENOSYS; } +EXPORT_SYMBOL_GPL(shash_no_setkey); static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) @@ -57,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, { struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + int err; if ((unsigned long)key & alignmask) - return shash_setkey_unaligned(tfm, key, keylen); + err = shash_setkey_unaligned(tfm, key, keylen); + else + err = shash->setkey(tfm, key, keylen); + + if (err) + return err; - return shash->setkey(tfm, key, keylen); + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); + return 0; } EXPORT_SYMBOL_GPL(crypto_shash_setkey); @@ -180,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (((unsigned long)data | (unsigned long)out) & alignmask) return shash_digest_unaligned(desc, data, len, out); @@ -359,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->digest = shash_async_digest; crt->setkey = shash_async_setkey; - crt->has_setkey = alg->setkey != shash_no_setkey; + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & + CRYPTO_TFM_NEED_KEY); if (alg->export) crt->export = shash_async_export; @@ -374,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) static int crypto_shash_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *hash = __crypto_shash_cast(tfm); + struct shash_alg *alg = crypto_shash_alg(hash); + + hash->descsize = alg->descsize; + + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); - hash->descsize = crypto_shash_alg(hash)->descsize; return 0; } diff --git a/crypto/skcipher.c b/crypto/skcipher.c index d5692e35fab1..e319421a32e7 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); - return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { - unsigned int n = walk->nbytes - err; - unsigned int nbytes; + unsigned int n; /* bytes processed */ + bool more; + + if (unlikely(err < 0)) + goto finish; - nbytes = walk->total - n; + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - if (unlikely(err < 0)) { - nbytes = 0; - n = 0; - } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | - SKCIPHER_WALK_SLOW | - SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF)))) { + if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | + SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { unmap_src: skcipher_unmap_src(walk); } else if (walk->flags & SKCIPHER_WALK_DIFF) { @@ -131,28 +132,28 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err) skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ err = -EINVAL; - nbytes = 0; - } else - n = skcipher_done_slow(walk, n); + goto finish; + } + skcipher_done_slow(walk, n); + goto already_advanced; } - if (err > 0) - err = 0; - - walk->total = nbytes; - walk->nbytes = nbytes; - scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); +already_advanced: + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); - if (nbytes) { + if (more) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } + err = 0; +finish: + walk->nbytes = 0; /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) @@ -399,7 +400,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) unsigned size; u8 *iv; - aligned_bs = ALIGN(bs, alignmask); + aligned_bs = ALIGN(bs, alignmask + 1); /* Minimum size to align buffer by alignmask. */ size = alignmask & ~a; @@ -449,6 +450,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, walk->total = req->cryptlen; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -456,9 +459,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->iv = req->iv; - walk->oiv = req->iv; - walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; @@ -510,6 +510,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, int err; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -522,8 +524,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); - walk->iv = req->iv; - walk->oiv = req->iv; + scatterwalk_done(&walk->in, 0, walk->total); + scatterwalk_done(&walk->out, 0, walk->total); if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; diff --git a/crypto/speck.c b/crypto/speck.c new file mode 100644 index 000000000000..58aa9f7f91f7 --- /dev/null +++ b/crypto/speck.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Speck: a lightweight block cipher + * + * Copyright (c) 2018 Google, Inc + * + * Speck has 10 variants, including 5 block sizes. For now we only implement + * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and + * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits + * and a key size of K bits. The Speck128 variants are believed to be the most + * secure variants, and they use the same block size and key sizes as AES. The + * Speck64 variants are less secure, but on 32-bit processors are usually + * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less + * secure and/or not as well suited for implementation on either 32-bit or + * 64-bit processors, so are omitted. + * + * Reference: "The Simon and Speck Families of Lightweight Block Ciphers" + * https://eprint.iacr.org/2013/404.pdf + * + * In a correspondence, the Speck designers have also clarified that the words + * should be interpreted in little-endian format, and the words should be + * ordered such that the first word of each block is 'y' rather than 'x', and + * the first key word (rather than the last) becomes the first round key. + */ + +#include +#include +#include +#include +#include +#include + +/* Speck128 */ + +static __always_inline void speck128_round(u64 *x, u64 *y, u64 k) +{ + *x = ror64(*x, 8); + *x += *y; + *x ^= k; + *y = rol64(*y, 3); + *y ^= *x; +} + +static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k) +{ + *y ^= *x; + *y = ror64(*y, 3); + *x ^= k; + *x -= *y; + *x = rol64(*x, 8); +} + +void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u64 y = get_unaligned_le64(in); + u64 x = get_unaligned_le64(in + 8); + int i; + + for (i = 0; i < ctx->nrounds; i++) + speck128_round(&x, &y, ctx->round_keys[i]); + + put_unaligned_le64(y, out); + put_unaligned_le64(x, out + 8); +} +EXPORT_SYMBOL_GPL(crypto_speck128_encrypt); + +static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in); +} + +void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u64 y = get_unaligned_le64(in); + u64 x = get_unaligned_le64(in + 8); + int i; + + for (i = ctx->nrounds - 1; i >= 0; i--) + speck128_unround(&x, &y, ctx->round_keys[i]); + + put_unaligned_le64(y, out); + put_unaligned_le64(x, out + 8); +} +EXPORT_SYMBOL_GPL(crypto_speck128_decrypt); + +static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in); +} + +int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u64 l[3]; + u64 k; + int i; + + switch (keylen) { + case SPECK128_128_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + ctx->nrounds = SPECK128_128_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[0], &k, i); + } + break; + case SPECK128_192_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + l[1] = get_unaligned_le64(key + 16); + ctx->nrounds = SPECK128_192_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[i % 2], &k, i); + } + break; + case SPECK128_256_KEY_SIZE: + k = get_unaligned_le64(key); + l[0] = get_unaligned_le64(key + 8); + l[1] = get_unaligned_le64(key + 16); + l[2] = get_unaligned_le64(key + 24); + ctx->nrounds = SPECK128_256_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck128_round(&l[i % 3], &k, i); + } + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_speck128_setkey); + +static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen); +} + +/* Speck64 */ + +static __always_inline void speck64_round(u32 *x, u32 *y, u32 k) +{ + *x = ror32(*x, 8); + *x += *y; + *x ^= k; + *y = rol32(*y, 3); + *y ^= *x; +} + +static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k) +{ + *y ^= *x; + *y = ror32(*y, 3); + *x ^= k; + *x -= *y; + *x = rol32(*x, 8); +} + +void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u32 y = get_unaligned_le32(in); + u32 x = get_unaligned_le32(in + 4); + int i; + + for (i = 0; i < ctx->nrounds; i++) + speck64_round(&x, &y, ctx->round_keys[i]); + + put_unaligned_le32(y, out); + put_unaligned_le32(x, out + 4); +} +EXPORT_SYMBOL_GPL(crypto_speck64_encrypt); + +static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in); +} + +void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx, + u8 *out, const u8 *in) +{ + u32 y = get_unaligned_le32(in); + u32 x = get_unaligned_le32(in + 4); + int i; + + for (i = ctx->nrounds - 1; i >= 0; i--) + speck64_unround(&x, &y, ctx->round_keys[i]); + + put_unaligned_le32(y, out); + put_unaligned_le32(x, out + 4); +} +EXPORT_SYMBOL_GPL(crypto_speck64_decrypt); + +static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in); +} + +int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u32 l[3]; + u32 k; + int i; + + switch (keylen) { + case SPECK64_96_KEY_SIZE: + k = get_unaligned_le32(key); + l[0] = get_unaligned_le32(key + 4); + l[1] = get_unaligned_le32(key + 8); + ctx->nrounds = SPECK64_96_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck64_round(&l[i % 2], &k, i); + } + break; + case SPECK64_128_KEY_SIZE: + k = get_unaligned_le32(key); + l[0] = get_unaligned_le32(key + 4); + l[1] = get_unaligned_le32(key + 8); + l[2] = get_unaligned_le32(key + 12); + ctx->nrounds = SPECK64_128_NROUNDS; + for (i = 0; i < ctx->nrounds; i++) { + ctx->round_keys[i] = k; + speck64_round(&l[i % 3], &k, i); + } + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_speck64_setkey); + +static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen); +} + +/* Algorithm definitions */ + +static struct crypto_alg speck_algs[] = { + { + .cra_name = "speck128", + .cra_driver_name = "speck128-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SPECK128_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct speck128_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = SPECK128_128_KEY_SIZE, + .cia_max_keysize = SPECK128_256_KEY_SIZE, + .cia_setkey = speck128_setkey, + .cia_encrypt = speck128_encrypt, + .cia_decrypt = speck128_decrypt + } + } + }, { + .cra_name = "speck64", + .cra_driver_name = "speck64-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SPECK64_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct speck64_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = SPECK64_96_KEY_SIZE, + .cia_max_keysize = SPECK64_128_KEY_SIZE, + .cia_setkey = speck64_setkey, + .cia_encrypt = speck64_encrypt, + .cia_decrypt = speck64_decrypt + } + } + } +}; + +static int __init speck_module_init(void) +{ + return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_module_exit(void) +{ + crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_module_init); +module_exit(speck_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (generic)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers "); +MODULE_ALIAS_CRYPTO("speck128"); +MODULE_ALIAS_CRYPTO("speck128-generic"); +MODULE_ALIAS_CRYPTO("speck64"); +MODULE_ALIAS_CRYPTO("speck64-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0022a18d36ee..e339960dcac7 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -221,11 +221,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], } sg_init_table(sg, np + 1); - np--; + if (rem) + np--; for (k = 0; k < np; k++) sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); - sg_set_buf(&sg[k + 1], xbuf[k], rem); + if (rem) + sg_set_buf(&sg[k + 1], xbuf[k], rem); } static void test_aead_speed(const char *algo, int enc, unsigned int secs, @@ -340,7 +342,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, } sg_init_aead(sg, xbuf, - *b_size + (enc ? authsize : 0)); + *b_size + (enc ? 0 : authsize)); sg_init_aead(sgout, xoutbuf, *b_size + (enc ? authsize : 0)); @@ -348,7 +350,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, sg_set_buf(&sg[0], assoc, aad_size); sg_set_buf(&sgout[0], assoc, aad_size); - aead_request_set_crypt(req, sg, sgout, *b_size, iv); + aead_request_set_crypt(req, sg, sgout, + *b_size + (enc ? 0 : authsize), + iv); aead_request_set_ad(req, aad_size); if (secs) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 7125ba3880af..b5bb45a89ff8 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -3033,6 +3033,24 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(serpent_dec_tv_template) } } + }, { + .alg = "ecb(speck128)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck128_enc_tv_template), + .dec = __VECS(speck128_dec_tv_template) + } + } + }, { + .alg = "ecb(speck64)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck64_enc_tv_template), + .dec = __VECS(speck64_dec_tv_template) + } + } }, { .alg = "ecb(tea)", .test = alg_test_skcipher, @@ -3584,6 +3602,24 @@ static const struct alg_test_desc alg_test_descs[] = { .dec = __VECS(serpent_xts_dec_tv_template) } } + }, { + .alg = "xts(speck128)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck128_xts_enc_tv_template), + .dec = __VECS(speck128_xts_dec_tv_template) + } + } + }, { + .alg = "xts(speck64)", + .test = alg_test_skcipher, + .suite = { + .cipher = { + .enc = __VECS(speck64_xts_enc_tv_template), + .dec = __VECS(speck64_xts_dec_tv_template) + } + } }, { .alg = "xts(twofish)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d54971d2d1c8..b0de033a5299 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -548,7 +548,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { { .key = - "\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" + "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28" "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67" "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d" @@ -597,8 +597,8 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a" "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda" "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46" - "\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30" - "\x02\x01\x30", + "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00" + "\x02\x01\x00", .key_len = 804, /* * m is SHA256 hash of following message: @@ -13706,6 +13706,1492 @@ static const struct cipher_testvec serpent_xts_dec_tv_template[] = { }, }; +/* + * Speck test vectors taken from the original paper: + * "The Simon and Speck Families of Lightweight Block Ciphers" + * https://eprint.iacr.org/2013/404.pdf + * + * Note that the paper does not make byte and word order clear. But it was + * confirmed with the authors that the intended orders are little endian byte + * order and (y, x) word order. Equivalently, the printed test vectors, when + * looking at only the bytes (ignoring the whitespace that divides them into + * words), are backwards: the left-most byte is actually the one with the + * highest memory address, while the right-most byte is actually the one with + * the lowest memory address. + */ + +static const struct cipher_testvec speck128_enc_tv_template[] = { + { /* Speck128/128 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .input = "\x20\x6d\x61\x64\x65\x20\x69\x74" + "\x20\x65\x71\x75\x69\x76\x61\x6c", + .ilen = 16, + .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" + "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .rlen = 16, + }, { /* Speck128/192 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43" + "\x68\x69\x65\x66\x20\x48\x61\x72", + .ilen = 16, + .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" + "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .rlen = 16, + }, { /* Speck128/256 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" + "\x49\x6e\x20\x74\x68\x6f\x73\x65", + .ilen = 16, + .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" + "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .rlen = 16, + }, +}; + +static const struct cipher_testvec speck128_dec_tv_template[] = { + { /* Speck128/128 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78" + "\x65\x32\x78\x79\x51\x98\x5d\xa6", + .ilen = 16, + .result = "\x20\x6d\x61\x64\x65\x20\x69\x74" + "\x20\x65\x71\x75\x69\x76\x61\x6c", + .rlen = 16, + }, { /* Speck128/192 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9" + "\x66\x55\x13\x13\x3a\xcf\xe4\x1b", + .ilen = 16, + .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43" + "\x68\x69\x65\x66\x20\x48\x61\x72", + .rlen = 16, + }, { /* Speck128/256 */ + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e" + "\x3e\xf5\xc0\x05\x04\x01\x09\x41", + .ilen = 16, + .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20" + "\x49\x6e\x20\x74\x68\x6f\x73\x65", + .rlen = 16, + }, +}; + +/* + * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the + * result recomputed with Speck128 as the cipher + */ + +static const struct cipher_testvec speck128_xts_enc_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ilen = 32, + .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" + "\x3b\x99\x4a\x64\x74\x77\xac\xed" + "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" + "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" + "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" + "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" + "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55" + "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" + "\xda\x63\xb2\xf1\x82\xb0\x89\x59" + "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" + "\x53\xd0\xed\x2d\x30\xc1\x20\xef" + "\x70\x67\x5e\xff\x09\x70\xbb\xc1" + "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" + "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" + "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" + "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" + "\x19\xc5\x58\x84\x63\xb9\x12\x68" + "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" + "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" + "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" + "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" + "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" + "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" + "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" + "\xa7\x49\xa0\x0e\x09\x33\x85\x50" + "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" + "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" + "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" + "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" + "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" + "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" + "\xa5\x20\xac\x24\x1c\x73\x59\x73" + "\x58\x61\x3a\x87\x58\xb3\x20\x56" + "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" + "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" + "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" + "\x09\x35\x71\x50\x65\xac\x92\xe3" + "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" + "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" + "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" + "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" + "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" + "\x2a\x26\xcc\x49\x14\x6d\x55\x01" + "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" + "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" + "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" + "\x24\xa9\x60\xa4\x97\x85\x86\x2a" + "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" + "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" + "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" + "\x87\x30\xac\xd5\xea\x73\x49\x10" + "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" + "\x66\x02\x35\x3d\x60\x06\x36\x4f" + "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" + "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" + "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" + "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" + "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" + "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" + "\xbd\x48\x50\xcd\x75\x70\xc4\x62" + "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" + "\x51\x66\x02\x69\x04\x97\x36\xd4" + "\x75\xae\x0b\xa3\x42\xf8\xca\x79" + "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" + "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" + "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" + "\x03\xe7\x05\x39\xf5\x05\x26\xee" + "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" + "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" + "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" + "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" + "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" + "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 64, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" + "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" + "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" + "\x92\x99\xde\xd3\x76\xed\xcd\x63" + "\x64\x3a\x22\x57\xc1\x43\x49\xd4" + "\x79\x36\x31\x19\x62\xae\x10\x7e" + "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" + "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" + "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" + "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" + "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" + "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" + "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" + "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" + "\x85\x3c\x4f\x26\x64\x85\xbc\x68" + "\xb0\xe0\x86\x5e\x26\x41\xce\x11" + "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" + "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" + "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" + "\x14\x4d\xf0\x74\x37\xfd\x07\x25" + "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" + "\x1b\x83\x4d\x15\x83\xac\x57\xa0" + "\xac\xa5\xd0\x38\xef\x19\x56\x53" + "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" + "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" + "\xed\x22\x34\x1c\x5d\xed\x17\x06" + "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" + "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" + "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" + "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" + "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" + "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" + "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" + "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" + "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" + "\x19\xf5\x94\xf9\xd2\x00\x33\x37" + "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" + "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" + "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" + "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" + "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" + "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" + "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" + "\xad\xb7\x73\xf8\x78\x12\xc8\x59" + "\x17\x80\x4c\x57\x39\xf1\x6d\x80" + "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" + "\xec\xce\xb7\xc8\x02\x8a\xed\x53" + "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" + "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" + "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" + "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" + "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" + "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" + "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" + "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" + "\xcc\x1f\x48\x49\x65\x47\x75\xe9" + "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" + "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" + "\xa1\x51\x89\x3b\xeb\x96\x42\xac" + "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" + "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" + "\x66\x8d\x13\xca\xe0\x59\x2a\x00" + "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" + "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static const struct cipher_testvec speck128_xts_dec_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62" + "\x3b\x99\x4a\x64\x74\x77\xac\xed" + "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42" + "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54", + .ilen = 32, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad" + "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a" + "\xd4\x84\xa4\x53\xd5\x88\x73\x1b" + "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 32, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55" + "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d" + "\xda\x63\xb2\xf1\x82\xb0\x89\x59" + "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95", + .klen = 32, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82" + "\x53\xd0\xed\x2d\x30\xc1\x20\xef" + "\x70\x67\x5e\xff\x09\x70\xbb\xc1" + "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48" + "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7" + "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9" + "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44" + "\x19\xc5\x58\x84\x63\xb9\x12\x68" + "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c" + "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd" + "\x74\x79\x2e\xb4\x44\xd7\x69\xc4" + "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d" + "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb" + "\x6d\x13\x65\xa0\xf9\x31\x12\xe2" + "\x26\xd1\xec\x2b\x0a\x8b\x59\x99" + "\xa7\x49\xa0\x0e\x09\x33\x85\x50" + "\xc3\x23\xca\x7a\xdd\x13\x45\x5f" + "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f" + "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6" + "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f" + "\x79\x91\x8d\x36\x13\x7b\xd0\x4a" + "\x6c\x39\xfb\x53\xb8\x6f\x02\x51" + "\xa5\x20\xac\x24\x1c\x73\x59\x73" + "\x58\x61\x3a\x87\x58\xb3\x20\x56" + "\x39\x06\x2b\x4d\xd3\x20\x2b\x89" + "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd" + "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91" + "\x09\x35\x71\x50\x65\xac\x92\xe3" + "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92" + "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9" + "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d" + "\x77\x04\x80\xa9\xbf\x38\xb5\xbd" + "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8" + "\x2a\x26\xcc\x49\x14\x6d\x55\x01" + "\x06\x94\xd8\xb2\x2d\x53\x83\x1b" + "\x8f\xd4\xdd\x57\x12\x7e\x18\xba" + "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d" + "\x24\xa9\x60\xa4\x97\x85\x86\x2a" + "\x01\x00\x09\xf1\xcb\x4a\x24\x1c" + "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4" + "\x97\x1c\x10\xc6\x4d\x66\x4f\x98" + "\x87\x30\xac\xd5\xea\x73\x49\x10" + "\x80\xea\xe5\x5f\x4d\x5f\x03\x33" + "\x66\x02\x35\x3d\x60\x06\x36\x4f" + "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8" + "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28" + "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93" + "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30" + "\xcc\x75\xcf\x16\x26\xa9\x26\x3b" + "\xe7\x68\x2f\x15\x21\x5b\xe4\x00" + "\xbd\x48\x50\xcd\x75\x70\xc4\x62" + "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b" + "\x51\x66\x02\x69\x04\x97\x36\xd4" + "\x75\xae\x0b\xa3\x42\xf8\xca\x79" + "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2" + "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd" + "\xea\x15\x5a\xa0\x85\x7e\x81\x0d" + "\x03\xe7\x05\x39\xf5\x05\x26\xee" + "\xec\xaa\x1f\x3d\xc9\x98\x76\x01" + "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4" + "\x50\x65\x50\x6d\x04\x1f\xdf\x5a" + "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca" + "\x47\x26\xef\x39\xb8\xb4\xf2\xd1" + "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27" + "\x31\x41\x59\x26\x53\x58\x97\x93" + "\x23\x84\x62\x64\x33\x83\x27\x95" + "\x02\x88\x41\x97\x16\x93\x99\x37" + "\x51\x05\x82\x09\x74\x94\x45\x92", + .klen = 64, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1" + "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb" + "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73" + "\x92\x99\xde\xd3\x76\xed\xcd\x63" + "\x64\x3a\x22\x57\xc1\x43\x49\xd4" + "\x79\x36\x31\x19\x62\xae\x10\x7e" + "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa" + "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0" + "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00" + "\xfc\x81\x99\x8a\x14\x62\xf5\x7e" + "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec" + "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6" + "\x62\x62\x37\xfe\x0a\x4c\x4a\x37" + "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e" + "\x85\x3c\x4f\x26\x64\x85\xbc\x68" + "\xb0\xe0\x86\x5e\x26\x41\xce\x11" + "\x50\xda\x97\x14\xe9\x9e\xc7\x6d" + "\x3b\xdc\x43\xde\x2b\x27\x69\x7d" + "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31" + "\x14\x4d\xf0\x74\x37\xfd\x07\x25" + "\x96\x55\xe5\xfc\x9e\x27\x2a\x74" + "\x1b\x83\x4d\x15\x83\xac\x57\xa0" + "\xac\xa5\xd0\x38\xef\x19\x56\x53" + "\x25\x4b\xfc\xce\x04\x23\xe5\x6b" + "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5" + "\xed\x22\x34\x1c\x5d\xed\x17\x06" + "\x36\xa3\xe6\x77\xb9\x97\x46\xb8" + "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc" + "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82" + "\x35\x91\x3d\x1b\xe4\x97\x9f\x92" + "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1" + "\x8d\x39\xfc\x42\xfb\x38\x80\xb9" + "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1" + "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7" + "\xa1\xbf\xf7\xda\x95\x93\x4b\x78" + "\x19\xf5\x94\xf9\xd2\x00\x33\x37" + "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee" + "\x42\xb2\x9e\x2c\x5f\x48\x23\x26" + "\x15\x25\x17\x03\x3d\xfe\x2c\xfc" + "\xeb\xba\xda\xe0\x00\x05\xb6\xa6" + "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf" + "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a" + "\x49\xa1\xc3\xfa\x10\x52\xb9\x14" + "\xad\xb7\x73\xf8\x78\x12\xc8\x59" + "\x17\x80\x4c\x57\x39\xf1\x6d\x80" + "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21" + "\xec\xce\xb7\xc8\x02\x8a\xed\x53" + "\x2c\x25\x68\x2e\x1f\x85\x5e\x67" + "\xd1\x07\x7a\x3a\x89\x08\xe0\x34" + "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40" + "\x31\x15\x72\xa0\xf0\x73\xd9\x3b" + "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2" + "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8" + "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6" + "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58" + "\xcc\x1f\x48\x49\x65\x47\x75\xe9" + "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07" + "\xf2\xec\x76\xd8\x8f\x09\xf3\x16" + "\xa1\x51\x89\x3b\xeb\x96\x42\xac" + "\x65\xe0\x67\x63\x29\xdc\xb4\x7d" + "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb" + "\x66\x8d\x13\xca\xe0\x59\x2a\x00" + "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5" + "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static const struct cipher_testvec speck64_enc_tv_template[] = { + { /* Speck64/96 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13", + .klen = 12, + .input = "\x65\x61\x6e\x73\x20\x46\x61\x74", + .ilen = 8, + .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f", + .rlen = 8, + }, { /* Speck64/128 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13\x18\x19\x1a\x1b", + .klen = 16, + .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b", + .ilen = 8, + .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", + .rlen = 8, + }, +}; + +static const struct cipher_testvec speck64_dec_tv_template[] = { + { /* Speck64/96 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13", + .klen = 12, + .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f", + .ilen = 8, + .result = "\x65\x61\x6e\x73\x20\x46\x61\x74", + .rlen = 8, + }, { /* Speck64/128 */ + .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b" + "\x10\x11\x12\x13\x18\x19\x1a\x1b", + .klen = 16, + .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c", + .ilen = 8, + .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b", + .rlen = 8, + }, +}; + +/* + * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result + * recomputed with Speck64 as the cipher, and key lengths adjusted + */ + +static const struct cipher_testvec speck64_xts_enc_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .ilen = 32, + .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" + "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" + "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" + "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59" + "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" + "\xb3\x12\x69\x7e\x36\xeb\x52\xff" + "\x62\xdd\xba\x90\xb3\xe1\xee\x99", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .ilen = 32, + .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" + "\x27\x36\xc0\xbf\x5d\xea\x36\x37" + "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" + "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" + "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" + "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" + "\x11\xc7\x39\x96\xd0\x95\xf4\x56" + "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" + "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" + "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" + "\xd5\xd2\x13\x86\x94\x34\xe9\x62" + "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" + "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" + "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" + "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" + "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" + "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" + "\x29\xe5\xbe\x54\x30\xcb\x46\x95" + "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" + "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" + "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" + "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" + "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" + "\x82\xc0\x37\x27\xfc\x91\xa7\x05" + "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" + "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" + "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" + "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" + "\x07\xff\xf3\x72\x74\x48\xb5\x40" + "\x50\xb5\xdd\x90\x43\x31\x18\x15" + "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" + "\x29\x93\x90\x8b\xda\x07\xf0\x35" + "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" + "\x94\x12\xbb\x33\x27\x1d\x3f\x23" + "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" + "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" + "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" + "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" + "\xcf\x37\xef\xc9\x11\x01\x5c\x45" + "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" + "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" + "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" + "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" + "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" + "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" + "\x10\xff\xaf\xef\xca\xdd\x4f\x80" + "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" + "\x33\xca\x00\x8b\x8b\x3f\xea\xec" + "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" + "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" + "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" + "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" + "\xc7\x1b\x53\x17\x02\xea\xd1\xad" + "\x13\x51\x73\xc0\xa0\xb2\x05\x32" + "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" + "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" + "\x59\xda\xee\x1a\x22\x18\xda\x0d" + "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" + "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" + "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" + "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" + "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" + "\x7d\x69\x8d\x00\x62\x77\x0d\x14" + "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" + "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" + "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" + "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .ilen = 512, + .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" + "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" + "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" + "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" + "\x78\x16\xea\x80\xdb\x33\x75\x94" + "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" + "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" + "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" + "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" + "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" + "\xf5\xec\x32\x74\xa3\xb8\x03\x88" + "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" + "\x84\x5e\x46\xed\x20\x89\xb6\x44" + "\x8d\xd0\xed\x54\x47\x16\xbe\x95" + "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" + "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" + "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" + "\x8f\x1e\x10\x07\x57\x25\x98\x7b" + "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" + "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" + "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" + "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" + "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" + "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" + "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" + "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" + "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" + "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" + "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" + "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" + "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" + "\x7b\x7a\x4a\xad\x35\x65\x27\xca" + "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" + "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" + "\xfc\x42\xc8\x31\x59\xbe\x16\x60" + "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" + "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" + "\x52\x7f\x29\x51\x94\x20\x67\x3c" + "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" + "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" + "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" + "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" + "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" + "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" + "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" + "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" + "\x65\x53\x0f\x41\x11\xbd\x98\x99" + "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" + "\x84\x98\xf9\x34\xed\x33\x2a\x1f" + "\x82\xed\xc1\x73\x98\xd3\x02\xdc" + "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" + "\x63\x51\x34\x9d\x96\x12\xae\xce" + "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" + "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" + "\x54\x27\x74\xbb\x10\x86\x57\x46" + "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" + "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" + "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" + "\x7b\x4f\x38\x55\x36\x71\x64\xc1" + "\xfc\x5c\x75\x52\x33\x02\x18\xf8" + "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" + "\x9b\x63\x76\x32\x2f\x19\x72\x10" + "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" + "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + +static const struct cipher_testvec speck64_xts_dec_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6" + "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2" + "\x80\xf5\x72\xe7\xcd\xf0\x99\x22" + "\x35\xa7\x2f\x06\xef\xdc\x51\xaa", + .ilen = 32, + .result = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .rlen = 32, + }, { + .key = "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x11\x11\x11\x11\x11\x11\x11\x11" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59" + "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f" + "\xb3\x12\x69\x7e\x36\xeb\x52\xff" + "\x62\xdd\xba\x90\xb3\xe1\xee\x99", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8" + "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0" + "\x22\x22\x22\x22\x22\x22\x22\x22", + .klen = 24, + .iv = "\x33\x33\x33\x33\x33\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c" + "\x27\x36\xc0\xbf\x5d\xea\x36\x37" + "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b" + "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34", + .ilen = 32, + .result = "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44" + "\x44\x44\x44\x44\x44\x44\x44\x44", + .rlen = 32, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x31\x41\x59\x26\x53\x58\x97\x93", + .klen = 24, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e" + "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09" + "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3" + "\x11\xc7\x39\x96\xd0\x95\xf4\x56" + "\xf4\xdd\x03\x38\x01\x44\x2c\xcf" + "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66" + "\xfe\x3d\xc6\xfb\x01\x23\x51\x43" + "\xd5\xd2\x13\x86\x94\x34\xe9\x62" + "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef" + "\x76\x35\x04\x3f\xdb\x23\x9d\x0b" + "\x85\x42\xb9\x02\xd6\xcc\xdb\x96" + "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d" + "\xae\xd2\x04\xd5\xda\xc1\x7e\x24" + "\x8c\x73\xbe\x48\x7e\xcf\x65\x28" + "\x29\xe5\xbe\x54\x30\xcb\x46\x95" + "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe" + "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69" + "\xa1\x09\x95\x71\x26\xe9\xc4\xdf" + "\xe6\x31\xc3\x46\xda\xaf\x0b\x41" + "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3" + "\x82\xc0\x37\x27\xfc\x91\xa7\x05" + "\xfb\xc5\xdc\x2b\x74\x96\x48\x43" + "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f" + "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a" + "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c" + "\x07\xff\xf3\x72\x74\x48\xb5\x40" + "\x50\xb5\xdd\x90\x43\x31\x18\x15" + "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a" + "\x29\x93\x90\x8b\xda\x07\xf0\x35" + "\x6d\x90\x88\x09\x4e\x83\xf5\x5b" + "\x94\x12\xbb\x33\x27\x1d\x3f\x23" + "\x51\xa8\x7c\x07\xa2\xae\x77\xa6" + "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f" + "\x66\xdd\xcd\x75\x24\x8b\x33\xf7" + "\x20\xdb\x83\x9b\x4f\x11\x63\x6e" + "\xcf\x37\xef\xc9\x11\x01\x5c\x45" + "\x32\x99\x7c\x3c\x9e\x42\x89\xe3" + "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05" + "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc" + "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d" + "\xa0\xa8\x89\x3b\x73\x39\xa5\x94" + "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89" + "\x10\xff\xaf\xef\xca\xdd\x4f\x80" + "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7" + "\x33\xca\x00\x8b\x8b\x3f\xea\xec" + "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f" + "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5" + "\x64\xa3\xf1\x1a\x76\x28\xcc\x35" + "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b" + "\xc7\x1b\x53\x17\x02\xea\xd1\xad" + "\x13\x51\x73\xc0\xa0\xb2\x05\x32" + "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19" + "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d" + "\x59\xda\xee\x1a\x22\x18\xda\x0d" + "\x88\x0f\x55\x8b\x72\x62\xfd\xc1" + "\x69\x13\xcd\x0d\x5f\xc1\x09\x52" + "\xee\xd6\xe3\x84\x4d\xee\xf6\x88" + "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f" + "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54" + "\x7d\x69\x8d\x00\x62\x77\x0d\x14" + "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3" + "\x50\xf7\x5f\xf4\xc2\xca\x41\x97" + "\x37\xbe\x75\x74\xcd\xf0\x75\x6e" + "\x25\x23\x94\xbd\xda\x8d\xb0\xd4", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + }, { + .key = "\x27\x18\x28\x18\x28\x45\x90\x45" + "\x23\x53\x60\x28\x74\x71\x35\x26" + "\x62\x49\x77\x57\x24\x70\x93\x69" + "\x99\x59\x57\x49\x66\x96\x76\x27", + .klen = 32, + .iv = "\xff\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b" + "\xc6\x71\x29\x2d\x3e\x89\x9f\x59" + "\x68\x6a\xcc\x8a\x56\x97\xf3\x95" + "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c" + "\x78\x16\xea\x80\xdb\x33\x75\x94" + "\xf9\x29\xc4\x2b\x76\x75\x97\xc7" + "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b" + "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee" + "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a" + "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c" + "\xf5\xec\x32\x74\xa3\xb8\x03\x88" + "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f" + "\x84\x5e\x46\xed\x20\x89\xb6\x44" + "\x8d\xd0\xed\x54\x47\x16\xbe\x95" + "\x8a\xb3\x6b\x72\xc4\x32\x52\x13" + "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6" + "\x44\x18\xdd\x8c\x6e\xca\x6e\x45" + "\x8f\x1e\x10\x07\x57\x25\x98\x7b" + "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8" + "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb" + "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff" + "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e" + "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d" + "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65" + "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a" + "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a" + "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78" + "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3" + "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e" + "\x35\x10\x30\x82\x0d\xe7\xc5\x9b" + "\xde\x44\x18\xbd\x9f\xd1\x45\xa9" + "\x7b\x7a\x4a\xad\x35\x65\x27\xca" + "\xb2\xc3\xd4\x9b\x71\x86\x70\xee" + "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf" + "\xfc\x42\xc8\x31\x59\xbe\x16\x60" + "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14" + "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef" + "\x52\x7f\x29\x51\x94\x20\x67\x3c" + "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63" + "\xe7\xff\x73\x25\xd1\xdd\x96\x8a" + "\x98\x52\x6d\xf3\xac\x3e\xf2\x18" + "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed" + "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e" + "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad" + "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa" + "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81" + "\x65\x53\x0f\x41\x11\xbd\x98\x99" + "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d" + "\x84\x98\xf9\x34\xed\x33\x2a\x1f" + "\x82\xed\xc1\x73\x98\xd3\x02\xdc" + "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76" + "\x63\x51\x34\x9d\x96\x12\xae\xce" + "\x83\xc9\x76\x5e\xa4\x1b\x53\x37" + "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d" + "\x54\x27\x74\xbb\x10\x86\x57\x46" + "\x68\xe1\xed\x14\xe7\x9d\xfc\x84" + "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf" + "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d" + "\x7b\x4f\x38\x55\x36\x71\x64\xc1" + "\xfc\x5c\x75\x52\x33\x02\x18\xf8" + "\x17\xe1\x2b\xc2\x43\x39\xbd\x76" + "\x9b\x63\x76\x32\x2f\x19\x72\x10" + "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5" + "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c", + .ilen = 512, + .result = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" + "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20\x21\x22\x23\x24\x25\x26\x27" + "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" + "\x30\x31\x32\x33\x34\x35\x36\x37" + "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" + "\x40\x41\x42\x43\x44\x45\x46\x47" + "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" + "\x50\x51\x52\x53\x54\x55\x56\x57" + "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" + "\x60\x61\x62\x63\x64\x65\x66\x67" + "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" + "\x70\x71\x72\x73\x74\x75\x76\x77" + "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" + "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" + "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" + "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" + "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" + "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" + "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" + "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" + "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" + "\xe8\xe9\xea\xeb\xec\xed\xee\xef" + "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .rlen = 512, + .also_non_np = 1, + .np = 3, + .tap = { 512 - 20, 4, 16 }, + } +}; + /* Cast6 test vectors from RFC 2612 */ static const struct cipher_testvec cast6_enc_tv_template[] = { { diff --git a/crypto/vmac.c b/crypto/vmac.c index df76a816cfb2..bb2fc787d615 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -1,6 +1,10 @@ /* - * Modified to interface to the Linux kernel + * VMAC: Message Authentication Code using Universal Hashing + * + * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 + * * Copyright (c) 2009, Intel Corporation. + * Copyright (c) 2018, Google Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -16,14 +20,15 @@ * Place - Suite 330, Boston, MA 02111-1307 USA. */ -/* -------------------------------------------------------------------------- - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Please send bug reports to the authors. - * Last modified: 17 APR 08, 1700 PDT - * ----------------------------------------------------------------------- */ +/* + * Derived from: + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. + * This implementation is herby placed in the public domain. + * The authors offers no warranty. Use at your own risk. + * Last modified: 17 APR 08, 1700 PDT + */ +#include #include #include #include @@ -31,9 +36,35 @@ #include #include #include -#include #include +/* + * User definable settings. + */ +#define VMAC_TAG_LEN 64 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ + +/* per-transform (per-key) context */ +struct vmac_tfm_ctx { + struct crypto_cipher *cipher; + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; + u64 polykey[2*VMAC_TAG_LEN/64]; + u64 l3key[2*VMAC_TAG_LEN/64]; +}; + +/* per-request context */ +struct vmac_desc_ctx { + union { + u8 partial[VMAC_NHBYTES]; /* partial block */ + __le64 partial_words[VMAC_NHBYTES / 8]; + }; + unsigned int partial_size; /* size of the partial block */ + bool first_block_processed; + u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ +}; + /* * Constants and masks */ @@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo, } while (0) #endif -static void vhash_abort(struct vmac_ctx *ctx) -{ - ctx->polytmp[0] = ctx->polykey[0] ; - ctx->polytmp[1] = ctx->polykey[1] ; - ctx->first_block_processed = 0; -} - static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) { u64 rh, rl, t, z = 0; @@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) return rl; } -static void vhash_update(const unsigned char *m, - unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ - struct vmac_ctx *ctx) +/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ +static void vhash_blocks(const struct vmac_tfm_ctx *tctx, + struct vmac_desc_ctx *dctx, + const __le64 *mptr, unsigned int blocks) { - u64 rh, rl, *mptr; - const u64 *kptr = (u64 *)ctx->nhkey; - int i; - u64 ch, cl; - u64 pkh = ctx->polykey[0]; - u64 pkl = ctx->polykey[1]; - - if (!mbytes) - return; - - BUG_ON(mbytes % VMAC_NHBYTES); - - mptr = (u64 *)m; - i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ - - ch = ctx->polytmp[0]; - cl = ctx->polytmp[1]; - - if (!ctx->first_block_processed) { - ctx->first_block_processed = 1; + const u64 *kptr = tctx->nhkey; + const u64 pkh = tctx->polykey[0]; + const u64 pkl = tctx->polykey[1]; + u64 ch = dctx->polytmp[0]; + u64 cl = dctx->polytmp[1]; + u64 rh, rl; + + if (!dctx->first_block_processed) { + dctx->first_block_processed = true; nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; ADD128(ch, cl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); - i--; + blocks--; } - while (i--) { + while (blocks--) { nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; poly_step(ch, cl, pkh, pkl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); } - ctx->polytmp[0] = ch; - ctx->polytmp[1] = cl; + dctx->polytmp[0] = ch; + dctx->polytmp[1] = cl; } -static u64 vhash(unsigned char m[], unsigned int mbytes, - u64 *tagl, struct vmac_ctx *ctx) +static int vmac_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) { - u64 rh, rl, *mptr; - const u64 *kptr = (u64 *)ctx->nhkey; - int i, remaining; - u64 ch, cl; - u64 pkh = ctx->polykey[0]; - u64 pkl = ctx->polykey[1]; - - mptr = (u64 *)m; - i = mbytes / VMAC_NHBYTES; - remaining = mbytes % VMAC_NHBYTES; - - if (ctx->first_block_processed) { - ch = ctx->polytmp[0]; - cl = ctx->polytmp[1]; - } else if (i) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); - ch &= m62; - ADD128(ch, cl, pkh, pkl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - i--; - } else if (remaining) { - nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); - ch &= m62; - ADD128(ch, cl, pkh, pkl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - goto do_l3; - } else {/* Empty String */ - ch = pkh; cl = pkl; - goto do_l3; - } - - while (i--) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - } - if (remaining) { - nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - } - -do_l3: - vhash_abort(ctx); - remaining *= 8; - return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); -} + struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); + __be64 out[2]; + u8 in[16] = { 0 }; + unsigned int i; + int err; -static u64 vmac(unsigned char m[], unsigned int mbytes, - const unsigned char n[16], u64 *tagl, - struct vmac_ctx_t *ctx) -{ - u64 *in_n, *out_p; - u64 p, h; - int i; - - in_n = ctx->__vmac_ctx.cached_nonce; - out_p = ctx->__vmac_ctx.cached_aes; - - i = n[15] & 1; - if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { - in_n[0] = *(u64 *)(n); - in_n[1] = *(u64 *)(n+8); - ((unsigned char *)in_n)[15] &= 0xFE; - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out_p, (unsigned char *)in_n); - - ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); + if (keylen != VMAC_KEY_LEN) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; } - p = be64_to_cpup(out_p + i); - h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); - return le64_to_cpu(p + h); -} -static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) -{ - u64 in[2] = {0}, out[2]; - unsigned i; - int err = 0; - - err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); + err = crypto_cipher_setkey(tctx->cipher, key, keylen); if (err) return err; /* Fill nh key */ - ((unsigned char *)in)[0] = 0x80; - for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); - ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); - ((unsigned char *)in)[15] += 1; + in[0] = 0x80; + for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->nhkey[i] = be64_to_cpu(out[0]); + tctx->nhkey[i+1] = be64_to_cpu(out[1]); + in[15]++; } /* Fill poly key */ - ((unsigned char *)in)[0] = 0xC0; - in[1] = 0; - for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.polytmp[i] = - ctx->__vmac_ctx.polykey[i] = - be64_to_cpup(out) & mpoly; - ctx->__vmac_ctx.polytmp[i+1] = - ctx->__vmac_ctx.polykey[i+1] = - be64_to_cpup(out+1) & mpoly; - ((unsigned char *)in)[15] += 1; + in[0] = 0xC0; + in[15] = 0; + for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; + tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; + in[15]++; } /* Fill ip key */ - ((unsigned char *)in)[0] = 0xE0; - in[1] = 0; - for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { + in[0] = 0xE0; + in[15] = 0; + for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { do { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); - ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); - ((unsigned char *)in)[15] += 1; - } while (ctx->__vmac_ctx.l3key[i] >= p64 - || ctx->__vmac_ctx.l3key[i+1] >= p64); + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->l3key[i] = be64_to_cpu(out[0]); + tctx->l3key[i+1] = be64_to_cpu(out[1]); + in[15]++; + } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); } - /* Invalidate nonce/aes cache and reset other elements */ - ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ - ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ - ctx->__vmac_ctx.first_block_processed = 0; - - return err; + return 0; } -static int vmac_setkey(struct crypto_shash *parent, - const u8 *key, unsigned int keylen) +static int vmac_init(struct shash_desc *desc) { - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - if (keylen != VMAC_KEY_LEN) { - crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return vmac_set_key((u8 *)key, ctx); -} - -static int vmac_init(struct shash_desc *pdesc) -{ + dctx->partial_size = 0; + dctx->first_block_processed = false; + memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); return 0; } -static int vmac_update(struct shash_desc *pdesc, const u8 *p, - unsigned int len) +static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - int expand; - int min; - - expand = VMAC_NHBYTES - ctx->partial_size > 0 ? - VMAC_NHBYTES - ctx->partial_size : 0; - - min = len < expand ? len : expand; - - memcpy(ctx->partial + ctx->partial_size, p, min); - ctx->partial_size += min; - - if (len < expand) - return 0; - - vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); - ctx->partial_size = 0; - - len -= expand; - p += expand; + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + unsigned int n; + + if (dctx->partial_size) { + n = min(len, VMAC_NHBYTES - dctx->partial_size); + memcpy(&dctx->partial[dctx->partial_size], p, n); + dctx->partial_size += n; + p += n; + len -= n; + if (dctx->partial_size == VMAC_NHBYTES) { + vhash_blocks(tctx, dctx, dctx->partial_words, 1); + dctx->partial_size = 0; + } + } - if (len % VMAC_NHBYTES) { - memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), - len % VMAC_NHBYTES); - ctx->partial_size = len % VMAC_NHBYTES; + if (len >= VMAC_NHBYTES) { + n = round_down(len, VMAC_NHBYTES); + /* TODO: 'p' may be misaligned here */ + vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); + p += n; + len -= n; } - vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); + if (len) { + memcpy(dctx->partial, p, len); + dctx->partial_size = len; + } return 0; } -static int vmac_final(struct shash_desc *pdesc, u8 *out) +static u64 vhash_final(const struct vmac_tfm_ctx *tctx, + struct vmac_desc_ctx *dctx) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - vmac_t mac; - u8 nonce[16] = {}; - - /* vmac() ends up accessing outside the array bounds that - * we specify. In appears to access up to the next 2-word - * boundary. We'll just be uber cautious and zero the - * unwritten bytes in the buffer. - */ - if (ctx->partial_size) { - memset(ctx->partial + ctx->partial_size, 0, - VMAC_NHBYTES - ctx->partial_size); + unsigned int partial = dctx->partial_size; + u64 ch = dctx->polytmp[0]; + u64 cl = dctx->polytmp[1]; + + /* L1 and L2-hash the final block if needed */ + if (partial) { + /* Zero-pad to next 128-bit boundary */ + unsigned int n = round_up(partial, 16); + u64 rh, rl; + + memset(&dctx->partial[partial], 0, n - partial); + nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); + rh &= m62; + if (dctx->first_block_processed) + poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], + rh, rl); + else + ADD128(ch, cl, rh, rl); } - mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); - memcpy(out, &mac, sizeof(vmac_t)); - memzero_explicit(&mac, sizeof(vmac_t)); - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); - ctx->partial_size = 0; + + /* L3-hash the 128-bit output of L2-hash */ + return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); +} + +static int vmac_final(struct shash_desc *desc, u8 *out) +{ + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + static const u8 nonce[16] = {}; /* TODO: this is insecure */ + union { + u8 bytes[16]; + __be64 pads[2]; + } block; + int index; + u64 hash, pad; + + /* Finish calculating the VHASH of the message */ + hash = vhash_final(tctx, dctx); + + /* Generate pseudorandom pad by encrypting the nonce */ + memcpy(&block, nonce, 16); + index = block.bytes[15] & 1; + block.bytes[15] &= ~1; + crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes); + pad = be64_to_cpu(block.pads[index]); + + /* The VMAC is the sum of VHASH and the pseudorandom pad */ + put_unaligned_le64(hash + pad, out); return 0; } static int vmac_init_tfm(struct crypto_tfm *tfm) { - struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = cipher; + tctx->cipher = cipher; return 0; } static void vmac_exit_tfm(struct crypto_tfm *tfm) { - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->child); + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_cipher(tctx->cipher); } static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (IS_ERR(alg)) return PTR_ERR(alg); + err = -EINVAL; + if (alg->cra_blocksize != 16) + goto out_put_alg; + inst = shash_alloc_instance("vmac", alg); err = PTR_ERR(inst); if (IS_ERR(inst)) @@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; - inst->alg.digestsize = sizeof(vmac_t); - inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); + inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); inst->alg.base.cra_init = vmac_init_tfm; inst->alg.base.cra_exit = vmac_exit_tfm; + inst->alg.descsize = sizeof(struct vmac_desc_ctx); + inst->alg.digestsize = VMAC_TAG_LEN / 8; inst->alg.init = vmac_init; inst->alg.update = vmac_update; inst->alg.final = vmac_final; diff --git a/crypto/xor.c b/crypto/xor.c index 263af9fb45ea..bce9fe7af40a 100644 --- a/crypto/xor.c +++ b/crypto/xor.c @@ -122,12 +122,7 @@ calibrate_xor_blocks(void) goto out; } - /* - * Note: Since the memory is not actually used for _anything_ but to - * test the XOR speed, we don't really want kmemcheck to warn about - * reading uninitialized bytes here. - */ - b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2); + b1 = (void *) __get_free_pages(GFP_KERNEL, 2); if (!b1) { printk(KERN_WARNING "xor: Yikes! No memory available.\n"); return -ENOMEM; diff --git a/drivers/Kconfig b/drivers/Kconfig index 1d7af3c2ff27..cb03e487342d 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -77,6 +77,8 @@ source "drivers/hwmon/Kconfig" source "drivers/thermal/Kconfig" +source "drivers/trusty/Kconfig" + source "drivers/watchdog/Kconfig" source "drivers/ssb/Kconfig" @@ -209,4 +211,13 @@ source "drivers/tee/Kconfig" source "drivers/mux/Kconfig" +source "drivers/sdw/Kconfig" + +source "drivers/opp/Kconfig" + +source "drivers/vbs/Kconfig" + +source "drivers/acrn/Kconfig" + +source "drivers/vhm/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index d242d3514d30..9c42d2c009c3 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -53,6 +53,9 @@ obj-$(CONFIG_REGULATOR) += regulator/ # reset controllers early, since gpu drivers might rely on them to initialize obj-$(CONFIG_RESET_CONTROLLER) += reset/ +# put mmc early as many morden devices use emm/sd card as rootfs storage +obj-y += mmc/ + # tty/ comes before char/ so that the VT console is the boot-time # default. obj-y += tty/ @@ -105,6 +108,7 @@ obj-$(CONFIG_TC) += tc/ obj-$(CONFIG_UWB) += uwb/ obj-$(CONFIG_USB_PHY) += usb/ obj-$(CONFIG_USB) += usb/ +obj-$(CONFIG_USB_SUPPORT) += usb/ obj-$(CONFIG_PCI) += usb/ obj-$(CONFIG_USB_GADGET) += usb/ obj-$(CONFIG_OF) += usb/ @@ -119,6 +123,7 @@ obj-$(CONFIG_W1) += w1/ obj-y += power/ obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_THERMAL) += thermal/ +obj-$(CONFIG_TRUSTY) += trusty/ obj-$(CONFIG_WATCHDOG) += watchdog/ obj-$(CONFIG_MD) += md/ obj-$(CONFIG_BT) += bluetooth/ @@ -126,9 +131,9 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/ obj-$(CONFIG_ISDN) += isdn/ obj-$(CONFIG_EDAC) += edac/ obj-$(CONFIG_EISA) += eisa/ +obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ -obj-y += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ @@ -146,6 +151,7 @@ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_VHOST_RING) += vhost/ +obj-$(CONFIG_VBS) += vbs/ obj-$(CONFIG_VHOST) += vhost/ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_STAGING) += staging/ @@ -182,3 +188,6 @@ obj-$(CONFIG_FPGA) += fpga/ obj-$(CONFIG_FSI) += fsi/ obj-$(CONFIG_TEE) += tee/ obj-$(CONFIG_MULTIPLEXER) += mux/ +obj-$(CONFIG_SDW) += sdw/ +obj-$(CONFIG_ACRN_VHM) += vhm/ +obj-$(CONFIG_ACRN) += acrn/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 5b1938f4b626..c9781175e59e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -536,4 +536,20 @@ if ARM64 source "drivers/acpi/arm64/Kconfig" endif +config TPS68470_PMIC_OPREGION + bool "ACPI operation region support for TPS68470 PMIC" + depends on MFD_TPS68470 + help + This config adds ACPI operation region support for TI TPS68470 PMIC. + TPS68470 device is an advanced power management unit that powers + a Compact Camera Module (CCM), generates clocks for image sensors, + drives a dual LED for flash and incorporates two LED drivers for + general purpose indicators. + This driver enables ACPI operation region support control voltage + regulators and clocks. + + This option is a bool as it provides an ACPI operation + region, which must be available before any of the devices + using this, are probed. + endif # ACPI diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index cd1abc9bc325..709c1120f315 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -108,6 +108,8 @@ obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o +obj-$(CONFIG_TPS68470_PMIC_OPREGION) += pmic/tps68470_pmic.o + video-objs += acpi_video.o video_detect.o obj-y += dptf/ diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 032ae44710e5..f573a317ad93 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -69,6 +69,10 @@ ACPI_MODULE_NAME("acpi_lpss"); #define LPSS_SAVE_CTX BIT(4) #define LPSS_NO_D3_DELAY BIT(5) +/* Crystal Cove PMIC shares same ACPI ID between different platforms */ +#define BYT_CRC_HRV 2 +#define CHT_CRC_HRV 3 + struct lpss_private_data; struct lpss_device_desc { @@ -162,7 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata) if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) return; - if (!acpi_dev_present("INT33FD", NULL, -1)) + if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV)) pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } @@ -229,11 +233,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, + .prv_offset = 0x800, .setup = byt_pwm_setup, }; static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, .setup = bsw_pwm_setup, }; @@ -465,6 +471,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev, acpi_dev_free_resource_list(&resource_list); if (!pdata->mmio_base) { + /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ + adev->pnp.type.platform_id = 0; /* Skip the device, but continue the namespace scan. */ ret = 0; goto err_out; @@ -693,7 +701,7 @@ static int acpi_lpss_activate(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - ret = acpi_dev_runtime_resume(dev); + ret = acpi_dev_resume(dev); if (ret) return ret; @@ -737,7 +745,7 @@ static int acpi_lpss_resume_early(struct device *dev) struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); int ret; - ret = acpi_dev_resume_early(dev); + ret = acpi_dev_resume(dev); if (ret) return ret; @@ -872,7 +880,7 @@ static int acpi_lpss_runtime_resume(struct device *dev) if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) lpss_iosf_exit_d3_state(); - ret = acpi_dev_runtime_resume(dev); + ret = acpi_dev_resume(dev); if (ret) return ret; @@ -894,7 +902,7 @@ static struct dev_pm_domain acpi_lpss_pm_domain = { #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP .prepare = acpi_subsys_prepare, - .complete = pm_complete_with_resume_check, + .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, .suspend_late = acpi_lpss_suspend_late, .resume_early = acpi_lpss_resume_early, diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 754431031282..552c1f725b6c 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -110,6 +110,7 @@ static void round_robin_cpu(unsigned int tsk_index) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { mutex_unlock(&round_robin_lock); + free_cpumask_var(tmp); return; } for_each_cpu(cpu, tmp) { @@ -127,6 +128,8 @@ static void round_robin_cpu(unsigned int tsk_index) mutex_unlock(&round_robin_lock); set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); + + free_cpumask_var(tmp); } static void exit_round_robin(unsigned int tsk_index) diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 0972ec0e2eb8..dbdd460a9958 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events, static bool device_id_scheme = false; module_param(device_id_scheme, bool, 0444); -static bool only_lcd = false; -module_param(only_lcd, bool, 0444); +static int only_lcd = -1; +module_param(only_lcd, int, 0444); static int register_count; static DEFINE_MUTEX(register_count_mutex); @@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void) return opregion; } +static bool dmi_is_desktop(void) +{ + const char *chassis_type; + + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + if (!chassis_type) + return false; + + if (!strcmp(chassis_type, "3") || /* 3: Desktop */ + !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ + !strcmp(chassis_type, "5") || /* 5: Pizza Box */ + !strcmp(chassis_type, "6") || /* 6: Mini Tower */ + !strcmp(chassis_type, "7") || /* 7: Tower */ + !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ + return true; + + return false; +} + int acpi_video_register(void) { int ret = 0; @@ -2136,6 +2155,20 @@ int acpi_video_register(void) goto leave; } + /* + * We're seeing a lot of bogus backlight interfaces on newer machines + * without a LCD such as desktops, servers and HDMI sticks. Checking + * the lcd flag fixes this, so enable this on any machines which are + * win8 ready (where we also prefer the native backlight driver, so + * normally the acpi_video code should not register there anyways). + */ + if (only_lcd == -1) { + if (dmi_is_desktop() && acpi_osi_is_win8()) + only_lcd = true; + else + only_lcd = false; + } + dmi_check_system(video_dmi_table); ret = acpi_bus_register_driver(&acpi_video_bus); diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 11b113f8e367..4bde16fb97d8 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -12,23 +12,64 @@ #define pr_fmt(fmt) "ACPI: watchdog: " fmt #include +#include #include #include #include "internal.h" +static const struct dmi_system_id acpi_watchdog_skip[] = { + { + /* + * On Lenovo Z50-70 there are two issues with the WDAT + * table. First some of the instructions use RTC SRAM + * to store persistent information. This does not work well + * with Linux RTC driver. Second, more important thing is + * that the instructions do not actually reset the system. + * + * On this particular system iTCO_wdt seems to work just + * fine so we prefer that over WDAT for now. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033. + */ + .ident = "Lenovo Z50-70", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20354"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"), + }, + }, + {} +}; + +static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) +{ + const struct acpi_table_wdat *wdat = NULL; + acpi_status status; + + if (acpi_disabled) + return NULL; + + if (dmi_check_system(acpi_watchdog_skip)) + return NULL; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, + (struct acpi_table_header **)&wdat); + if (ACPI_FAILURE(status)) { + /* It is fine if there is no WDAT */ + return NULL; + } + + return wdat; +} + /** * Returns true if this system should prefer ACPI based watchdog instead of * the native one (which are typically the same hardware). */ bool acpi_has_watchdog(void) { - struct acpi_table_header hdr; - - if (acpi_disabled) - return false; - - return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr)); + return !!acpi_watchdog_get_wdat(); } EXPORT_SYMBOL_GPL(acpi_has_watchdog); @@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void) struct platform_device *pdev; struct resource *resources; size_t nresources = 0; - acpi_status status; int i; - status = acpi_get_table(ACPI_SIG_WDAT, 0, - (struct acpi_table_header **)&wdat); - if (ACPI_FAILURE(status)) { + wdat = acpi_watchdog_get_wdat(); + if (!wdat) { /* It is fine if there is no WDAT */ return; } @@ -74,10 +113,10 @@ void __init acpi_watchdog_init(void) res.start = gas->address; if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { res.flags = IORESOURCE_MEM; - res.end = res.start + ALIGN(gas->access_width, 4); + res.end = res.start + ALIGN(gas->access_width, 4) - 1; } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { res.flags = IORESOURCE_IO; - res.end = res.start + gas->access_width; + res.end = res.start + gas->access_width - 1; } else { pr_warn("Unsupported address space: %u\n", gas->space_id); diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index e05232da0588..71f6f2624deb 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -178,6 +178,7 @@ acpi-y += \ utresrc.o \ utstate.o \ utstring.o \ + utstrsuppt.o \ utstrtoul64.o \ utxface.o \ utxfinit.o \ diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index fd4f3cacb356..cd722d8edacb 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -66,9 +66,9 @@ acpi_status acpi_hw_validate_register(struct acpi_generic_address *reg, u8 max_bit_width, u64 *address); -acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg); +acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg); -acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg); +acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg); struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id); diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 29a863c85318..29555c8789a3 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -101,7 +101,8 @@ typedef const struct acpi_exdump_info { */ acpi_status acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, - union acpi_operand_object **result_desc, u32 flags); + union acpi_operand_object **result_desc, + u32 implicit_conversion); acpi_status acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, @@ -424,9 +425,6 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, struct acpi_walk_state *walk_state, u8 implicit_conversion); -#define ACPI_IMPLICIT_CONVERSION TRUE -#define ACPI_NO_IMPLICIT_CONVERSION FALSE - /* * exstoren - resolve/store object */ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 745134ade35f..b40561ebd792 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -141,6 +141,11 @@ extern const char *acpi_gbl_ptyp_decode[]; #define ACPI_MSG_SUFFIX \ acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number) +/* Flags to indicate implicit or explicit string-to-integer conversion */ + +#define ACPI_IMPLICIT_CONVERSION TRUE +#define ACPI_NO_IMPLICIT_CONVERSION FALSE + /* Types for Resource descriptor entries */ #define ACPI_INVALID_RESOURCE 0 @@ -197,15 +202,33 @@ void acpi_ut_strlwr(char *src_string); int acpi_ut_stricmp(char *string1, char *string2); -acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *ret_integer); +/* + * utstrsuppt - string-to-integer conversion support functions + */ +acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value); + +acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr); + +acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr); + +char acpi_ut_remove_whitespace(char **string); + +char acpi_ut_remove_leading_zeros(char **string); + +u8 acpi_ut_detect_hex_prefix(char **string); + +void acpi_ut_remove_hex_prefix(char **string); + +u8 acpi_ut_detect_octal_prefix(char **string); /* - * Values for Flags above - * Note: LIMIT values correspond to acpi_gbl_integer_byte_width values (4/8) + * utstrtoul64 - string-to-integer conversion functions */ -#define ACPI_STRTOUL_32BIT 0x04 /* 4 bytes */ -#define ACPI_STRTOUL_64BIT 0x08 /* 8 bytes */ -#define ACPI_STRTOUL_BASE16 0x10 /* Default: Base10/16 */ +acpi_status acpi_ut_strtoul64(char *string, u64 *ret_integer); + +u64 acpi_ut_explicit_strtoul64(char *string); + +u64 acpi_ut_implicit_strtoul64(char *string); /* * utglobal - Global data structures and procedures diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 857dbc43a9b1..32d546f0db2f 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -277,10 +277,7 @@ acpi_db_convert_to_object(acpi_object_type type, default: object->type = ACPI_TYPE_INTEGER; - status = acpi_ut_strtoul64(string, - (acpi_gbl_integer_byte_width | - ACPI_STRTOUL_BASE16), - &object->integer.value); + status = acpi_ut_strtoul64(string, &object->integer.value); break; } diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 20d7744b06ae..22f45d090733 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -134,7 +134,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, * object. Implicitly convert the argument if necessary. */ status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); if (ACPI_FAILURE(status)) { goto cleanup; } diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index d3b6b314fa50..37b0b4c04220 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void) u32 fixed_status; u32 fixed_enable; u32 i; + acpi_status status; ACPI_FUNCTION_NAME(ev_fixed_event_detect); @@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void) * Read the fixed feature status and enable registers, as all the cases * depend on their values. Ignore errors here. */ - (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); - (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); + status |= + acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); + if (ACPI_FAILURE(status)) { + return (int_status); + } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Fixed Event Block: Enable %08X Status %08X\n", diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 229382035550..263d8fc4a9e2 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -390,8 +390,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) struct acpi_gpe_handler_info *gpe_handler_info; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u8 enabled_status_byte; - u32 status_reg; - u32 enable_reg; + u64 status_reg; + u64 enable_reg; acpi_cpu_flags flags; u32 i; u32 j; @@ -472,7 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) gpe_register_info->base_gpe_number, gpe_register_info->base_gpe_number + (ACPI_GPE_REGISTER_WIDTH - 1), - status_reg, enable_reg, + (u32)status_reg, (u32)enable_reg, gpe_register_info->enable_for_run, gpe_register_info->enable_for_wake)); diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index 76bfb7dcae2f..59b8de2f07d3 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -156,7 +156,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, status = acpi_ex_convert_to_integer(local_operand1, &temp_operand1, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_BUFFER: diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index f71028e334ee..23ebadb06a95 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -57,10 +57,10 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); * * FUNCTION: acpi_ex_convert_to_integer * - * PARAMETERS: obj_desc - Object to be converted. Must be an - * Integer, Buffer, or String - * result_desc - Where the new Integer object is returned - * flags - Used for string conversion + * PARAMETERS: obj_desc - Object to be converted. Must be an + * Integer, Buffer, or String + * result_desc - Where the new Integer object is returned + * implicit_conversion - Used for string conversion * * RETURN: Status * @@ -70,14 +70,14 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); acpi_status acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, - union acpi_operand_object **result_desc, u32 flags) + union acpi_operand_object **result_desc, + u32 implicit_conversion) { union acpi_operand_object *return_desc; u8 *pointer; u64 result; u32 i; u32 count; - acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc); @@ -123,12 +123,18 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, * hexadecimal as per the ACPI specification. The only exception (as * of ACPI 3.0) is that the to_integer() operator allows both decimal * and hexadecimal strings (hex prefixed with "0x"). + * + * Explicit conversion is used only by to_integer. + * All other string-to-integer conversions are implicit conversions. */ - status = acpi_ut_strtoul64(ACPI_CAST_PTR(char, pointer), - (acpi_gbl_integer_byte_width | - flags), &result); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + if (implicit_conversion) { + result = + acpi_ut_implicit_strtoul64(ACPI_CAST_PTR + (char, pointer)); + } else { + result = + acpi_ut_explicit_strtoul64(ACPI_CAST_PTR + (char, pointer)); } break; @@ -631,7 +637,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type, */ status = acpi_ex_convert_to_integer(source_desc, result_desc, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_STRING: diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 1e7649ce0a7b..dbad3ebd7df5 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -330,7 +330,7 @@ acpi_ex_do_logical_op(u16 opcode, case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_STRING: diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index c4852429e2ff..1c7c9962b0de 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -415,7 +415,7 @@ acpi_ex_resolve_operands(u16 opcode, * Known as "Implicit Source Operand Conversion" */ status = acpi_ex_convert_to_integer(obj_desc, stack_ptr, - ACPI_STRTOUL_BASE16); + ACPI_IMPLICIT_CONVERSION); if (ACPI_FAILURE(status)) { if (status == AE_TYPE) { ACPI_ERROR((AE_INFO, diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 5eb11b30a79e..09b6822aa5cc 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -99,7 +99,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) { struct acpi_gpe_register_info *gpe_register_info; acpi_status status = AE_OK; - u32 enable_mask; + u64 enable_mask; u32 register_bit; ACPI_FUNCTION_ENTRY(); @@ -214,7 +214,7 @@ acpi_status acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, acpi_event_status *event_status) { - u32 in_byte; + u64 in_byte; u32 register_bit; struct acpi_gpe_register_info *gpe_register_info; acpi_event_status local_event_status = 0; diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index acb417b58bbb..872e793577ef 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -220,16 +220,15 @@ acpi_hw_validate_register(struct acpi_generic_address *reg, * * RETURN: Status * - * DESCRIPTION: Read from either memory or IO space. This is a 32-bit max - * version of acpi_read, used internally since the overhead of - * 64-bit values is not needed. + * DESCRIPTION: Read from either memory or IO space. This is a 64-bit max + * version of acpi_read. * * LIMITATIONS: * space_ID must be system_memory or system_IO. * ******************************************************************************/ -acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) +acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg) { u64 address; u8 access_width; @@ -244,17 +243,17 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) /* Validate contents of the GAS register */ - status = acpi_hw_validate_register(reg, 32, &address); + status = acpi_hw_validate_register(reg, 64, &address); if (ACPI_FAILURE(status)) { return (status); } /* - * Initialize entire 32-bit return value to zero, convert access_width + * Initialize entire 64-bit return value to zero, convert access_width * into number of bits based */ *value = 0; - access_width = acpi_hw_get_access_bit_width(address, reg, 32); + access_width = acpi_hw_get_access_bit_width(address, reg, 64); bit_width = reg->bit_offset + reg->bit_width; bit_offset = reg->bit_offset; @@ -265,7 +264,7 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) index = 0; while (bit_width) { if (bit_offset >= access_width) { - value32 = 0; + value64 = 0; bit_offset -= access_width; } else { if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { @@ -276,7 +275,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) ACPI_DIV_8 (access_width), &value64, access_width); - value32 = (u32)value64; } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ status = acpi_hw_read_port((acpi_io_address) @@ -286,15 +284,16 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) (access_width), &value32, access_width); + value64 = (u64)value32; } } /* * Use offset style bit writes because "Index * AccessWidth" is - * ensured to be less than 32-bits by acpi_hw_validate_register(). + * ensured to be less than 64-bits by acpi_hw_validate_register(). */ ACPI_SET_BITS(value, index * access_width, - ACPI_MASK_BITS_ABOVE_32(access_width), value32); + ACPI_MASK_BITS_ABOVE_64(access_width), value64); bit_width -= bit_width > access_width ? access_width : bit_width; @@ -302,8 +301,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) } ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", - *value, access_width, ACPI_FORMAT_UINT64(address), + "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n", + ACPI_FORMAT_UINT64(*value), access_width, + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); @@ -318,20 +318,18 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) * * RETURN: Status * - * DESCRIPTION: Write to either memory or IO space. This is a 32-bit max - * version of acpi_write, used internally since the overhead of - * 64-bit values is not needed. + * DESCRIPTION: Write to either memory or IO space. This is a 64-bit max + * version of acpi_write. * ******************************************************************************/ -acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) +acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg) { u64 address; u8 access_width; u32 bit_width; u8 bit_offset; u64 value64; - u32 value32; u8 index; acpi_status status; @@ -339,14 +337,14 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) /* Validate contents of the GAS register */ - status = acpi_hw_validate_register(reg, 32, &address); + status = acpi_hw_validate_register(reg, 64, &address); if (ACPI_FAILURE(status)) { return (status); } /* Convert access_width into number of bits based */ - access_width = acpi_hw_get_access_bit_width(address, reg, 32); + access_width = acpi_hw_get_access_bit_width(address, reg, 64); bit_width = reg->bit_offset + reg->bit_width; bit_offset = reg->bit_offset; @@ -358,16 +356,15 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) while (bit_width) { /* * Use offset style bit reads because "Index * AccessWidth" is - * ensured to be less than 32-bits by acpi_hw_validate_register(). + * ensured to be less than 64-bits by acpi_hw_validate_register(). */ - value32 = ACPI_GET_BITS(&value, index * access_width, - ACPI_MASK_BITS_ABOVE_32(access_width)); + value64 = ACPI_GET_BITS(&value, index * access_width, + ACPI_MASK_BITS_ABOVE_64(access_width)); if (bit_offset >= access_width) { bit_offset -= access_width; } else { if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - value64 = (u64)value32; status = acpi_os_write_memory((acpi_physical_address) address + @@ -382,7 +379,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) index * ACPI_DIV_8 (access_width), - value32, + (u32)value64, access_width); } } @@ -397,8 +394,9 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) } ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", - value, access_width, ACPI_FORMAT_UINT64(address), + "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n", + ACPI_FORMAT_UINT64(value), access_width, + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(reg->space_id))); return (status); @@ -526,6 +524,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control) acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) { u32 value = 0; + u64 value64; acpi_status status; ACPI_FUNCTION_TRACE(hw_register_read); @@ -564,12 +563,19 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ status = - acpi_hw_read(&value, &acpi_gbl_FADT.xpm2_control_block); + acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block); + if (ACPI_SUCCESS(status)) { + value = (u32)value64; + } break; case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ - status = acpi_hw_read(&value, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block); + if (ACPI_SUCCESS(status)) { + value = (u32)value64; + } + break; case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ @@ -586,7 +592,7 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value) } if (ACPI_SUCCESS(status)) { - *return_value = value; + *return_value = (u32)value; } return_ACPI_STATUS(status); @@ -622,6 +628,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) { acpi_status status; u32 read_value; + u64 read_value64; ACPI_FUNCTION_TRACE(hw_register_write); @@ -685,11 +692,12 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value) * as per the ACPI spec. */ status = - acpi_hw_read(&read_value, + acpi_hw_read(&read_value64, &acpi_gbl_FADT.xpm2_control_block); if (ACPI_FAILURE(status)) { goto exit; } + read_value = (u32)read_value64; /* Insert the bits to be preserved */ @@ -745,22 +753,25 @@ acpi_hw_read_multiple(u32 *value, { u32 value_a = 0; u32 value_b = 0; + u64 value64; acpi_status status; /* The first register is always required */ - status = acpi_hw_read(&value_a, register_a); + status = acpi_hw_read(&value64, register_a); if (ACPI_FAILURE(status)) { return (status); } + value_a = (u32)value64; /* Second register is optional */ if (register_b->address) { - status = acpi_hw_read(&value_b, register_b); + status = acpi_hw_read(&value64, register_b); if (ACPI_FAILURE(status)) { return (status); } + value_b = (u32)value64; } /* diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b3c5d8c754bb..a2f4e25d45b1 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -94,6 +94,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution) acpi_status acpi_get_timer(u32 * ticks) { acpi_status status; + u64 timer_value; ACPI_FUNCTION_TRACE(acpi_get_timer); @@ -107,7 +108,14 @@ acpi_status acpi_get_timer(u32 * ticks) return_ACPI_STATUS(AE_SUPPORT); } - status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_read(&timer_value, &acpi_gbl_FADT.xpm_timer_block); + if (ACPI_SUCCESS(status)) { + + /* ACPI PM Timer is defined to be 32 bits (PM_TMR_LEN) */ + + *ticks = (u32)timer_value; + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 34684ae89981..b3c6e439933c 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -125,76 +125,12 @@ ACPI_EXPORT_SYMBOL(acpi_reset) ******************************************************************************/ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) { - u32 value_lo; - u32 value_hi; - u32 width; - u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_read); - if (!return_value) { - return (AE_BAD_PARAMETER); - } - - /* Validate contents of the GAS register. Allow 64-bit transfers */ - - status = acpi_hw_validate_register(reg, 64, &address); - if (ACPI_FAILURE(status)) { - return (status); - } - - /* - * Two address spaces supported: Memory or I/O. PCI_Config is - * not supported here because the GAS structure is insufficient - */ - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - status = acpi_os_read_memory((acpi_physical_address) - address, return_value, - reg->bit_width); - if (ACPI_FAILURE(status)) { - return (status); - } - } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - - value_lo = 0; - value_hi = 0; - - width = reg->bit_width; - if (width == 64) { - width = 32; /* Break into two 32-bit transfers */ - } - - status = acpi_hw_read_port((acpi_io_address) - address, &value_lo, width); - if (ACPI_FAILURE(status)) { - return (status); - } - - if (reg->bit_width == 64) { - - /* Read the top 32 bits */ - - status = acpi_hw_read_port((acpi_io_address) - (address + 4), &value_hi, - 32); - if (ACPI_FAILURE(status)) { - return (status); - } - } - - /* Set the return value only if status is AE_OK */ - - *return_value = (value_lo | ((u64)value_hi << 32)); - } - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n", - ACPI_FORMAT_UINT64(*return_value), reg->bit_width, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(reg->space_id))); - - return (AE_OK); + status = acpi_hw_read(return_value, reg); + return (status); } ACPI_EXPORT_SYMBOL(acpi_read) @@ -213,59 +149,11 @@ ACPI_EXPORT_SYMBOL(acpi_read) ******************************************************************************/ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg) { - u32 width; - u64 address; acpi_status status; ACPI_FUNCTION_NAME(acpi_write); - /* Validate contents of the GAS register. Allow 64-bit transfers */ - - status = acpi_hw_validate_register(reg, 64, &address); - if (ACPI_FAILURE(status)) { - return (status); - } - - /* - * Two address spaces supported: Memory or IO. PCI_Config is - * not supported here because the GAS structure is insufficient - */ - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { - status = acpi_os_write_memory((acpi_physical_address) - address, value, reg->bit_width); - if (ACPI_FAILURE(status)) { - return (status); - } - } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ - - width = reg->bit_width; - if (width == 64) { - width = 32; /* Break into two 32-bit transfers */ - } - - status = acpi_hw_write_port((acpi_io_address) - address, ACPI_LODWORD(value), - width); - if (ACPI_FAILURE(status)) { - return (status); - } - - if (reg->bit_width == 64) { - status = acpi_hw_write_port((acpi_io_address) - (address + 4), - ACPI_HIDWORD(value), 32); - if (ACPI_FAILURE(status)) { - return (status); - } - } - } - - ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n", - ACPI_FORMAT_UINT64(value), reg->bit_width, - ACPI_FORMAT_UINT64(address), - acpi_ut_get_region_name(reg->space_id))); - + status = acpi_hw_write(value, reg); return (status); } diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index e4a7da8a11f0..539d775bbc92 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -78,8 +78,8 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object, /* String-to-Integer conversion */ - status = acpi_ut_strtoul64(original_object->string.pointer, - acpi_gbl_integer_byte_width, &value); + status = + acpi_ut_strtoul64(original_object->string.pointer, &value); if (ACPI_FAILURE(status)) { return (status); } diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index d22167cbd0ca..f13d3cfa74e1 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */ status = AE_OK; + } else if (ACPI_FAILURE(status)) { + + /* If return_object exists, delete it */ + + if (info->return_object) { + acpi_ut_remove_reference(info->return_object); + info->return_object = NULL; + } } ACPI_DEBUG_PRINT((ACPI_DB_NAMES, diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index eb9dfaca555f..11ce4e5d10e2 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -890,6 +890,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, ACPI_POSSIBLE_METHOD_CALL); if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { + + /* Free method call op and corresponding namestring sub-ob */ + + acpi_ps_free_op(arg->common.value.arg); acpi_ps_free_op(arg); arg = NULL; walk_state->arg_count = 1; diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 26ad596c973e..5ecb8d2e6834 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -173,10 +173,13 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) ACPI_FUNCTION_TRACE(acpi_reallocate_root_table); /* - * Only reallocate the root table if the host provided a static buffer - * for the table array in the call to acpi_initialize_tables. + * If there are tables unverified, it is required to reallocate the + * root table list to clean up invalid table entries. Otherwise only + * reallocate the root table list if the host provided a static buffer + * for the table array in the call to acpi_initialize_tables(). */ - if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { + if ((acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) && + acpi_gbl_enable_table_validation) { return_ACPI_STATUS(AE_SUPPORT); } diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c new file mode 100644 index 000000000000..2dab47616e91 --- /dev/null +++ b/drivers/acpi/acpica/utstrsuppt.c @@ -0,0 +1,458 @@ +/******************************************************************************* + * + * Module Name: utstrsuppt - Support functions for string-to-integer conversion + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2017, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include +#include "accommon.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utstrsuppt") + +/* Local prototypes */ +static acpi_status +acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit); + +static acpi_status +acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product); + +static acpi_status +acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum); + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_octal_string + * + * PARAMETERS: string - Null terminated input string + * return_value_ptr - Where the converted value is returned + * + * RETURN: Status and 64-bit converted integer + * + * DESCRIPTION: Performs a base 8 conversion of the input string to an + * integer value, either 32 or 64 bits. + * + * NOTE: Maximum 64-bit unsigned octal value is 01777777777777777777777 + * Maximum 32-bit unsigned octal value is 037777777777 + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr) +{ + u64 accumulated_value = 0; + acpi_status status = AE_OK; + + /* Convert each ASCII byte in the input string */ + + while (*string) { + + /* Character must be ASCII 0-7, otherwise terminate with no error */ + + if (!(ACPI_IS_OCTAL_DIGIT(*string))) { + break; + } + + /* Convert and insert this octal digit into the accumulator */ + + status = acpi_ut_insert_digit(&accumulated_value, 8, *string); + if (ACPI_FAILURE(status)) { + status = AE_OCTAL_OVERFLOW; + break; + } + + string++; + } + + /* Always return the value that has been accumulated */ + + *return_value_ptr = accumulated_value; + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_decimal_string + * + * PARAMETERS: string - Null terminated input string + * return_value_ptr - Where the converted value is returned + * + * RETURN: Status and 64-bit converted integer + * + * DESCRIPTION: Performs a base 10 conversion of the input string to an + * integer value, either 32 or 64 bits. + * + * NOTE: Maximum 64-bit unsigned decimal value is 18446744073709551615 + * Maximum 32-bit unsigned decimal value is 4294967295 + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr) +{ + u64 accumulated_value = 0; + acpi_status status = AE_OK; + + /* Convert each ASCII byte in the input string */ + + while (*string) { + + /* Character must be ASCII 0-9, otherwise terminate with no error */ + + if (!isdigit(*string)) { + break; + } + + /* Convert and insert this decimal digit into the accumulator */ + + status = acpi_ut_insert_digit(&accumulated_value, 10, *string); + if (ACPI_FAILURE(status)) { + status = AE_DECIMAL_OVERFLOW; + break; + } + + string++; + } + + /* Always return the value that has been accumulated */ + + *return_value_ptr = accumulated_value; + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_convert_hex_string + * + * PARAMETERS: string - Null terminated input string + * return_value_ptr - Where the converted value is returned + * + * RETURN: Status and 64-bit converted integer + * + * DESCRIPTION: Performs a base 16 conversion of the input string to an + * integer value, either 32 or 64 bits. + * + * NOTE: Maximum 64-bit unsigned hex value is 0xFFFFFFFFFFFFFFFF + * Maximum 32-bit unsigned hex value is 0xFFFFFFFF + * + ******************************************************************************/ + +acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr) +{ + u64 accumulated_value = 0; + acpi_status status = AE_OK; + + /* Convert each ASCII byte in the input string */ + + while (*string) { + + /* Must be ASCII A-F, a-f, or 0-9, otherwise terminate with no error */ + + if (!isxdigit(*string)) { + break; + } + + /* Convert and insert this hex digit into the accumulator */ + + status = acpi_ut_insert_digit(&accumulated_value, 16, *string); + if (ACPI_FAILURE(status)) { + status = AE_HEX_OVERFLOW; + break; + } + + string++; + } + + /* Always return the value that has been accumulated */ + + *return_value_ptr = accumulated_value; + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_leading_zeros + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: Next character after any leading zeros. This character may be + * used by the caller to detect end-of-string. + * + * DESCRIPTION: Remove any leading zeros in the input string. Return the + * next character after the final ASCII zero to enable the caller + * to check for the end of the string (NULL terminator). + * + ******************************************************************************/ + +char acpi_ut_remove_leading_zeros(char **string) +{ + + while (**string == ACPI_ASCII_ZERO) { + *string += 1; + } + + return (**string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_whitespace + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: Next character after any whitespace. This character may be + * used by the caller to detect end-of-string. + * + * DESCRIPTION: Remove any leading whitespace in the input string. Return the + * next character after the final ASCII zero to enable the caller + * to check for the end of the string (NULL terminator). + * + ******************************************************************************/ + +char acpi_ut_remove_whitespace(char **string) +{ + + while (isspace((u8)**string)) { + *string += 1; + } + + return (**string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_detect_hex_prefix + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: TRUE if a "0x" prefix was found at the start of the string + * + * DESCRIPTION: Detect and remove a hex "0x" prefix + * + ******************************************************************************/ + +u8 acpi_ut_detect_hex_prefix(char **string) +{ + char *initial_position = *string; + + acpi_ut_remove_hex_prefix(string); + if (*string != initial_position) { + return (TRUE); /* String is past leading 0x */ + } + + return (FALSE); /* Not a hex string */ +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_hex_prefix + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: none + * + * DESCRIPTION: Remove a hex "0x" prefix + * + ******************************************************************************/ + +void acpi_ut_remove_hex_prefix(char **string) +{ + if ((**string == ACPI_ASCII_ZERO) && + (tolower((int)*(*string + 1)) == 'x')) { + *string += 2; /* Go past the leading 0x */ + } +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_detect_octal_prefix + * + * PARAMETERS: string - Pointer to input ASCII string + * + * RETURN: True if an octal "0" prefix was found at the start of the + * string + * + * DESCRIPTION: Detect and remove an octal prefix (zero) + * + ******************************************************************************/ + +u8 acpi_ut_detect_octal_prefix(char **string) +{ + + if (**string == ACPI_ASCII_ZERO) { + *string += 1; /* Go past the leading 0 */ + return (TRUE); + } + + return (FALSE); /* Not an octal string */ +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_insert_digit + * + * PARAMETERS: accumulated_value - Current value of the integer value + * accumulator. The new value is + * returned here. + * base - Radix, either 8/10/16 + * ascii_digit - ASCII single digit to be inserted + * + * RETURN: Status and result of the convert/insert operation. The only + * possible returned exception code is numeric overflow of + * either the multiply or add conversion operations. + * + * DESCRIPTION: Generic conversion and insertion function for all bases: + * + * 1) Multiply the current accumulated/converted value by the + * base in order to make room for the new character. + * + * 2) Convert the new character to binary and add it to the + * current accumulated value. + * + * Note: The only possible exception indicates an integer + * overflow (AE_NUMERIC_OVERFLOW) + * + ******************************************************************************/ + +static acpi_status +acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit) +{ + acpi_status status; + u64 product; + + /* Make room in the accumulated value for the incoming digit */ + + status = acpi_ut_strtoul_multiply64(*accumulated_value, base, &product); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Add in the new digit, and store the sum to the accumulated value */ + + status = + acpi_ut_strtoul_add64(product, + acpi_ut_ascii_char_to_hex(ascii_digit), + accumulated_value); + + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul_multiply64 + * + * PARAMETERS: multiplicand - Current accumulated converted integer + * multiplier - Base/Radix + * out_product - Where the product is returned + * + * RETURN: Status and 64-bit product + * + * DESCRIPTION: Multiply two 64-bit values, with checking for 64-bit overflow as + * well as 32-bit overflow if necessary (if the current global + * integer width is 32). + * + ******************************************************************************/ + +static acpi_status +acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product) +{ + u64 val; + + /* Exit if either operand is zero */ + + *out_product = 0; + if (!multiplicand || !multiplier) { + return (AE_OK); + } + + /* Check for 64-bit overflow before the actual multiplication */ + + acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL); + if (multiplicand > val) { + return (AE_NUMERIC_OVERFLOW); + } + + val = multiplicand * multiplier; + + /* Check for 32-bit overflow if necessary */ + + if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) { + return (AE_NUMERIC_OVERFLOW); + } + + *out_product = val; + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul_add64 + * + * PARAMETERS: addend1 - Current accumulated converted integer + * addend2 - New hex value/char + * out_sum - Where sum is returned (Accumulator) + * + * RETURN: Status and 64-bit sum + * + * DESCRIPTION: Add two 64-bit values, with checking for 64-bit overflow as + * well as 32-bit overflow if necessary (if the current global + * integer width is 32). + * + ******************************************************************************/ + +static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum) +{ + u64 sum; + + /* Check for 64-bit overflow before the actual addition */ + + if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) { + return (AE_NUMERIC_OVERFLOW); + } + + sum = addend1 + addend2; + + /* Check for 32-bit overflow if necessary */ + + if ((acpi_gbl_integer_bit_width == 32) && (sum > ACPI_UINT32_MAX)) { + return (AE_NUMERIC_OVERFLOW); + } + + *out_sum = sum; + return (AE_OK); +} diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c index 9633ee142855..cdbad5ef7870 100644 --- a/drivers/acpi/acpica/utstrtoul64.c +++ b/drivers/acpi/acpica/utstrtoul64.c @@ -1,6 +1,7 @@ /******************************************************************************* * - * Module Name: utstrtoul64 - string to 64-bit integer support + * Module Name: utstrtoul64 - String-to-integer conversion support for both + * 64-bit and 32-bit integers * ******************************************************************************/ @@ -44,304 +45,319 @@ #include #include "accommon.h" -/******************************************************************************* - * - * The functions in this module satisfy the need for 64-bit string-to-integer - * conversions on both 32-bit and 64-bit platforms. - * - ******************************************************************************/ - #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstrtoul64") -/* Local prototypes */ -static u64 acpi_ut_strtoul_base10(char *string, u32 flags); - -static u64 acpi_ut_strtoul_base16(char *string, u32 flags); - /******************************************************************************* * - * String conversion rules as written in the ACPI specification. The error - * conditions and behavior are different depending on the type of conversion. - * - * - * Implicit data type conversion: string-to-integer - * -------------------------------------------------- - * - * Base is always 16. This is the ACPI_STRTOUL_BASE16 case. - * - * Example: - * Add ("BA98", Arg0, Local0) - * - * The integer is initialized to the value zero. - * The ASCII string is interpreted as a hexadecimal constant. + * This module contains the top-level string to 64/32-bit unsigned integer + * conversion functions: * - * 1) A "0x" prefix is not allowed. However, ACPICA allows this for - * compatibility with previous ACPICA. (NO ERROR) + * 1) A standard strtoul() function that supports 64-bit integers, base + * 8/10/16, with integer overflow support. This is used mainly by the + * iASL compiler, which implements tighter constraints on integer + * constants than the runtime (interpreter) integer-to-string conversions. + * 2) Runtime "Explicit conversion" as defined in the ACPI specification. + * 3) Runtime "Implicit conversion" as defined in the ACPI specification. * - * 2) Terminates when the size of an integer is reached (32 or 64 bits). - * (NO ERROR) + * Current users of this module: * - * 3) The first non-hex character terminates the conversion without error. - * (NO ERROR) - * - * 4) Conversion of a null (zero-length) string to an integer is not - * allowed. However, ACPICA allows this for compatibility with previous - * ACPICA. This conversion returns the value 0. (NO ERROR) - * - * - * Explicit data type conversion: to_integer() with string operand - * --------------------------------------------------------------- - * - * Base is either 10 (default) or 16 (with 0x prefix) - * - * Examples: - * to_integer ("1000") - * to_integer ("0xABCD") - * - * 1) Can be (must be) either a decimal or hexadecimal numeric string. - * A hex value must be prefixed by "0x" or it is interpreted as a decimal. + * iASL - Preprocessor (constants and math expressions) + * iASL - Main parser, conversion of constants to integers + * iASL - Data Table Compiler parser (constants and math expressions) + * interpreter - Implicit and explicit conversions, GPE method names + * interpreter - Repair code for return values from predefined names + * debugger - Command line input string conversion + * acpi_dump - ACPI table physical addresses + * acpi_exec - Support for namespace overrides * - * 2) The value must not exceed the maximum of an integer value. ACPI spec - * states the behavior is "unpredictable", so ACPICA matches the behavior - * of the implicit conversion case.(NO ERROR) + * Notes concerning users of these interfaces: * - * 3) Behavior on the first non-hex character is not specified by the ACPI - * spec, so ACPICA matches the behavior of the implicit conversion case - * and terminates. (NO ERROR) + * acpi_gbl_integer_byte_width is used to set the 32/64 bit limit for explicit + * and implicit conversions. This global must be set to the proper width. + * For the core ACPICA code, the width depends on the DSDT version. For the + * acpi_ut_strtoul64 interface, all conversions are 64 bits. This interface is + * used primarily for iASL, where the default width is 64 bits for all parsers, + * but error checking is performed later to flag cases where a 64-bit constant + * is wrongly defined in a 32-bit DSDT/SSDT. * - * 4) A null (zero-length) string is illegal. - * However, ACPICA allows this for compatibility with previous ACPICA. - * This conversion returns the value 0. (NO ERROR) + * In ACPI, the only place where octal numbers are supported is within + * the ASL language itself. This is implemented via the main acpi_ut_strtoul64 + * interface. According the ACPI specification, there is no ACPI runtime + * support (explicit/implicit) for octal string conversions. * ******************************************************************************/ - /******************************************************************************* * * FUNCTION: acpi_ut_strtoul64 * - * PARAMETERS: string - Null terminated input string - * flags - Conversion info, see below + * PARAMETERS: string - Null terminated input string, + * must be a valid pointer * return_value - Where the converted integer is - * returned - * - * RETURN: Status and Converted value + * returned. Must be a valid pointer * - * DESCRIPTION: Convert a string into an unsigned value. Performs either a - * 32-bit or 64-bit conversion, depending on the input integer - * size in Flags (often the current mode of the interpreter). + * RETURN: Status and converted integer. Returns an exception on a + * 64-bit numeric overflow * - * Values for Flags: - * ACPI_STRTOUL_32BIT - Max integer value is 32 bits - * ACPI_STRTOUL_64BIT - Max integer value is 64 bits - * ACPI_STRTOUL_BASE16 - Input string is hexadecimal. Default - * is 10/16 based on string prefix (0x). + * DESCRIPTION: Convert a string into an unsigned integer. Always performs a + * full 64-bit conversion, regardless of the current global + * integer width. Supports Decimal, Hex, and Octal strings. * - * NOTES: - * Negative numbers are not supported, as they are not supported by ACPI. + * Current users of this function: * - * Supports only base 16 or base 10 strings/values. Does not - * support Octal strings, as these are not supported by ACPI. - * - * Current users of this support: - * - * interpreter - Implicit and explicit conversions, GPE method names - * debugger - Command line input string conversion - * iASL - Main parser, conversion of constants to integers - * iASL - Data Table Compiler parser (constant math expressions) - * iASL - Preprocessor (constant math expressions) - * acpi_dump - Input table addresses - * acpi_exec - Testing of the acpi_ut_strtoul64 function - * - * Note concerning callers: - * acpi_gbl_integer_byte_width can be used to set the 32/64 limit. If used, - * this global should be set to the proper width. For the core ACPICA code, - * this width depends on the DSDT version. For iASL, the default byte - * width is always 8 for the parser, but error checking is performed later - * to flag cases where a 64-bit constant is defined in a 32-bit DSDT/SSDT. + * iASL - Preprocessor (constants and math expressions) + * iASL - Main ASL parser, conversion of ASL constants to integers + * iASL - Data Table Compiler parser (constants and math expressions) + * interpreter - Repair code for return values from predefined names + * acpi_dump - ACPI table physical addresses + * acpi_exec - Support for namespace overrides * ******************************************************************************/ - -acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *return_value) +acpi_status acpi_ut_strtoul64(char *string, u64 *return_value) { acpi_status status = AE_OK; - u32 base; + u8 original_bit_width; + u32 base = 10; /* Default is decimal */ ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string); - /* Parameter validation */ - - if (!string || !return_value) { - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - *return_value = 0; - /* Check for zero-length string, returns 0 */ + /* A NULL return string returns a value of zero */ if (*string == 0) { return_ACPI_STATUS(AE_OK); } - /* Skip over any white space at start of string */ - - while (isspace((int)*string)) { - string++; - } - - /* End of string? return 0 */ - - if (*string == 0) { + if (!acpi_ut_remove_whitespace(&string)) { return_ACPI_STATUS(AE_OK); } /* - * 1) The "0x" prefix indicates base 16. Per the ACPI specification, - * the "0x" prefix is only allowed for implicit (non-strict) conversions. - * However, we always allow it for compatibility with older ACPICA. + * 1) Check for a hex constant. A "0x" prefix indicates base 16. */ - if ((*string == ACPI_ASCII_ZERO) && - (tolower((int)*(string + 1)) == 'x')) { - string += 2; /* Go past the 0x */ - if (*string == 0) { - return_ACPI_STATUS(AE_OK); /* Return value 0 */ - } - + if (acpi_ut_detect_hex_prefix(&string)) { base = 16; } - /* 2) Force to base 16 (implicit conversion case) */ - - else if (flags & ACPI_STRTOUL_BASE16) { - base = 16; + /* + * 2) Check for an octal constant, defined to be a leading zero + * followed by sequence of octal digits (0-7) + */ + else if (acpi_ut_detect_octal_prefix(&string)) { + base = 8; } - /* 3) Default fallback is to Base 10 */ - - else { - base = 10; + if (!acpi_ut_remove_leading_zeros(&string)) { + return_ACPI_STATUS(AE_OK); /* Return value 0 */ } - /* Skip all leading zeros */ + /* + * Force a full 64-bit conversion. The caller (usually iASL) must + * check for a 32-bit overflow later as necessary (If current mode + * is 32-bit, meaning a 32-bit DSDT). + */ + original_bit_width = acpi_gbl_integer_bit_width; + acpi_gbl_integer_bit_width = 64; - while (*string == ACPI_ASCII_ZERO) { - string++; - if (*string == 0) { - return_ACPI_STATUS(AE_OK); /* Return value 0 */ - } + /* + * Perform the base 8, 10, or 16 conversion. A 64-bit numeric overflow + * will return an exception (to allow iASL to flag the statement). + */ + switch (base) { + case 8: + status = acpi_ut_convert_octal_string(string, return_value); + break; + + case 10: + status = acpi_ut_convert_decimal_string(string, return_value); + break; + + case 16: + default: + status = acpi_ut_convert_hex_string(string, return_value); + break; } - /* Perform the base 16 or 10 conversion */ - - if (base == 16) { - *return_value = acpi_ut_strtoul_base16(string, flags); - } else { - *return_value = acpi_ut_strtoul_base10(string, flags); - } + /* Only possible exception from above is a 64-bit overflow */ + acpi_gbl_integer_bit_width = original_bit_width; return_ACPI_STATUS(status); } /******************************************************************************* * - * FUNCTION: acpi_ut_strtoul_base10 + * FUNCTION: acpi_ut_implicit_strtoul64 + * + * PARAMETERS: string - Null terminated input string, + * must be a valid pointer + * + * RETURN: Converted integer + * + * DESCRIPTION: Perform a 64-bit conversion with restrictions placed upon + * an "implicit conversion" by the ACPI specification. Used by + * many ASL operators that require an integer operand, and support + * an automatic (implicit) conversion from a string operand + * to the final integer operand. The major restriction is that + * only hex strings are supported. + * + * ----------------------------------------------------------------------------- + * + * Base is always 16, either with or without the 0x prefix. Decimal and + * Octal strings are not supported, as per the ACPI specification. + * + * Examples (both are hex values): + * Add ("BA98", Arg0, Local0) + * Subtract ("0x12345678", Arg1, Local1) + * + * Conversion rules as extracted from the ACPI specification: + * + * The converted integer is initialized to the value zero. + * The ASCII string is always interpreted as a hexadecimal constant. + * + * 1) According to the ACPI specification, a "0x" prefix is not allowed. + * However, ACPICA allows this as an ACPI extension on general + * principle. (NO ERROR) + * + * 2) The conversion terminates when the size of an integer is reached + * (32 or 64 bits). There are no numeric overflow conditions. (NO ERROR) + * + * 3) The first non-hex character terminates the conversion and returns + * the current accumulated value of the converted integer (NO ERROR). * - * PARAMETERS: string - Null terminated input string - * flags - Conversion info + * 4) Conversion of a null (zero-length) string to an integer is + * technically not allowed. However, ACPICA allows this as an ACPI + * extension. The conversion returns the value 0. (NO ERROR) * - * RETURN: 64-bit converted integer + * NOTE: There are no error conditions returned by this function. At + * the minimum, a value of zero is returned. * - * DESCRIPTION: Performs a base 10 conversion of the input string to an - * integer value, either 32 or 64 bits. - * Note: String must be valid and non-null. + * Current users of this function: + * + * interpreter - All runtime implicit conversions, as per ACPI specification + * iASL - Data Table Compiler parser (constants and math expressions) * ******************************************************************************/ -static u64 acpi_ut_strtoul_base10(char *string, u32 flags) +u64 acpi_ut_implicit_strtoul64(char *string) { - int ascii_digit; - u64 next_value; - u64 return_value = 0; - - /* Main loop: convert each ASCII byte in the input string */ - - while (*string) { - ascii_digit = *string; - if (!isdigit(ascii_digit)) { - - /* Not ASCII 0-9, terminate */ - - goto exit; - } - - /* Convert and insert (add) the decimal digit */ + u64 converted_integer = 0; - acpi_ut_short_multiply(return_value, 10, &next_value); - next_value += (ascii_digit - ACPI_ASCII_ZERO); + ACPI_FUNCTION_TRACE_STR(ut_implicit_strtoul64, string); - /* Check for overflow (32 or 64 bit) - return current converted value */ + if (!acpi_ut_remove_whitespace(&string)) { + return_VALUE(0); + } - if (((flags & ACPI_STRTOUL_32BIT) && (next_value > ACPI_UINT32_MAX)) || (next_value < return_value)) { /* 64-bit overflow case */ - goto exit; - } + /* + * Per the ACPI specification, only hexadecimal is supported for + * implicit conversions, and the "0x" prefix is "not allowed". + * However, allow a "0x" prefix as an ACPI extension. + */ + acpi_ut_remove_hex_prefix(&string); - return_value = next_value; - string++; + if (!acpi_ut_remove_leading_zeros(&string)) { + return_VALUE(0); } -exit: - return (return_value); + /* + * Ignore overflow as per the ACPI specification. This is implemented by + * ignoring the return status from the conversion function called below. + * On overflow, the input string is simply truncated. + */ + acpi_ut_convert_hex_string(string, &converted_integer); + return_VALUE(converted_integer); } /******************************************************************************* * - * FUNCTION: acpi_ut_strtoul_base16 + * FUNCTION: acpi_ut_explicit_strtoul64 + * + * PARAMETERS: string - Null terminated input string, + * must be a valid pointer * - * PARAMETERS: string - Null terminated input string - * flags - conversion info + * RETURN: Converted integer * - * RETURN: 64-bit converted integer + * DESCRIPTION: Perform a 64-bit conversion with the restrictions placed upon + * an "explicit conversion" by the ACPI specification. The + * main restriction is that only hex and decimal are supported. * - * DESCRIPTION: Performs a base 16 conversion of the input string to an - * integer value, either 32 or 64 bits. - * Note: String must be valid and non-null. + * ----------------------------------------------------------------------------- + * + * Base is either 10 (default) or 16 (with 0x prefix). Octal (base 8) strings + * are not supported, as per the ACPI specification. + * + * Examples: + * to_integer ("1000") Decimal + * to_integer ("0xABCD") Hex + * + * Conversion rules as extracted from the ACPI specification: + * + * 1) The input string is either a decimal or hexadecimal numeric string. + * A hex value must be prefixed by "0x" or it is interpreted as decimal. + * + * 2) The value must not exceed the maximum of an integer value + * (32 or 64 bits). The ACPI specification states the behavior is + * "unpredictable", so ACPICA matches the behavior of the implicit + * conversion case. There are no numeric overflow conditions. (NO ERROR) + * + * 3) Behavior on the first non-hex character is not defined by the ACPI + * specification (for the to_integer operator), so ACPICA matches the + * behavior of the implicit conversion case. It terminates the + * conversion and returns the current accumulated value of the converted + * integer. (NO ERROR) + * + * 4) Conversion of a null (zero-length) string to an integer is + * technically not allowed. However, ACPICA allows this as an ACPI + * extension. The conversion returns the value 0. (NO ERROR) + * + * NOTE: There are no error conditions returned by this function. At the + * minimum, a value of zero is returned. + * + * Current users of this function: + * + * interpreter - Runtime ASL to_integer operator, as per the ACPI specification * ******************************************************************************/ -static u64 acpi_ut_strtoul_base16(char *string, u32 flags) +u64 acpi_ut_explicit_strtoul64(char *string) { - int ascii_digit; - u32 valid_digits = 1; - u64 return_value = 0; - - /* Main loop: convert each ASCII byte in the input string */ + u64 converted_integer = 0; + u32 base = 10; /* Default is decimal */ - while (*string) { + ACPI_FUNCTION_TRACE_STR(ut_explicit_strtoul64, string); - /* Check for overflow (32 or 64 bit) - return current converted value */ - - if ((valid_digits > 16) || - ((valid_digits > 8) && (flags & ACPI_STRTOUL_32BIT))) { - goto exit; - } - - ascii_digit = *string; - if (!isxdigit(ascii_digit)) { - - /* Not Hex ASCII A-F, a-f, or 0-9, terminate */ - - goto exit; - } + if (!acpi_ut_remove_whitespace(&string)) { + return_VALUE(0); + } - /* Convert and insert the hex digit */ + /* + * Only Hex and Decimal are supported, as per the ACPI specification. + * A "0x" prefix indicates hex; otherwise decimal is assumed. + */ + if (acpi_ut_detect_hex_prefix(&string)) { + base = 16; + } - acpi_ut_short_shift_left(return_value, 4, &return_value); - return_value |= acpi_ut_ascii_char_to_hex(ascii_digit); + if (!acpi_ut_remove_leading_zeros(&string)) { + return_VALUE(0); + } - string++; - valid_digits++; + /* + * Ignore overflow as per the ACPI specification. This is implemented by + * ignoring the return status from the conversion functions called below. + * On overflow, the input string is simply truncated. + */ + switch (base) { + case 10: + default: + acpi_ut_convert_decimal_string(string, &converted_integer); + break; + + case 16: + acpi_ut_convert_hex_string(string, &converted_integer); + break; } -exit: - return (return_value); + return_VALUE(converted_integer); } diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 2c462beee551..a943cf17faa7 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1007,7 +1007,7 @@ static ssize_t erst_reader(struct pstore_record *record) /* The record may be cleared by others, try read next record */ if (len == -ENOENT) goto skip; - else if (len < sizeof(*rcd)) { + else if (len < 0 || len < sizeof(*rcd)) { rc = -EIO; goto out; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 3c3a37b8503b..f14695e744d0 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -112,22 +113,10 @@ static DEFINE_MUTEX(ghes_list_mutex); * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so - * a special version of atomic ioremap is implemented for that. - */ - -/* - * Two virtual pages are used, one for IRQ/PROCESS context, the other for - * NMI context (optionally). - */ -#define GHES_IOREMAP_PAGES 2 -#define GHES_IOREMAP_IRQ_PAGE(base) (base) -#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE) - -/* virtual memory area for atomic ioremap */ -static struct vm_struct *ghes_ioremap_area; -/* - * These 2 spinlock is used to prevent atomic ioremap virtual memory - * area from being mapped simultaneously. + * the fixmap is used instead. + * + * These 2 spinlocks are used to prevent the fixmap entries from being used + * simultaneously. */ static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); @@ -140,71 +129,38 @@ static atomic_t ghes_estatus_cache_alloced; static int ghes_panic_timeout __read_mostly = 30; -static int ghes_ioremap_init(void) -{ - ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, - VM_IOREMAP, VMALLOC_START, VMALLOC_END); - if (!ghes_ioremap_area) { - pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); - return -ENOMEM; - } - - return 0; -} - -static void ghes_ioremap_exit(void) -{ - free_vm_area(ghes_ioremap_area); -} - static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) { - unsigned long vaddr; phys_addr_t paddr; pgprot_t prot; - vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); - paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); + __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot); - return (void __iomem *)vaddr; + return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI); } static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { - unsigned long vaddr, paddr; + phys_addr_t paddr; pgprot_t prot; - vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); - paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); + __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); - - return (void __iomem *)vaddr; + return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ); } -static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) +static void ghes_iounmap_nmi(void) { - unsigned long vaddr = (unsigned long __force)vaddr_ptr; - void *base = ghes_ioremap_area->addr; - - BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); - unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - arch_apei_flush_tlb_one(vaddr); + clear_fixmap(FIX_APEI_GHES_NMI); } -static void ghes_iounmap_irq(void __iomem *vaddr_ptr) +static void ghes_iounmap_irq(void) { - unsigned long vaddr = (unsigned long __force)vaddr_ptr; - void *base = ghes_ioremap_area->addr; - - BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); - unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - arch_apei_flush_tlb_one(vaddr); + clear_fixmap(FIX_APEI_GHES_IRQ); } static int ghes_estatus_pool_init(void) @@ -360,10 +316,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, paddr += trunk; buffer += trunk; if (in_nmi) { - ghes_iounmap_nmi(vaddr); + ghes_iounmap_nmi(); raw_spin_unlock(&ghes_ioremap_lock_nmi); } else { - ghes_iounmap_irq(vaddr); + ghes_iounmap_irq(); spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); } } @@ -851,17 +807,8 @@ static void ghes_sea_remove(struct ghes *ghes) synchronize_rcu(); } #else /* CONFIG_ACPI_APEI_SEA */ -static inline void ghes_sea_add(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to add SEA notification which is not supported\n", - ghes->generic->header.source_id); -} - -static inline void ghes_sea_remove(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to remove SEA notification which is not supported\n", - ghes->generic->header.source_id); -} +static inline void ghes_sea_add(struct ghes *ghes) { } +static inline void ghes_sea_remove(struct ghes *ghes) { } #endif /* CONFIG_ACPI_APEI_SEA */ #ifdef CONFIG_HAVE_ACPI_APEI_NMI @@ -1063,23 +1010,9 @@ static void ghes_nmi_init_cxt(void) init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); } #else /* CONFIG_HAVE_ACPI_APEI_NMI */ -static inline void ghes_nmi_add(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n", - ghes->generic->header.source_id); - BUG(); -} - -static inline void ghes_nmi_remove(struct ghes *ghes) -{ - pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n", - ghes->generic->header.source_id); - BUG(); -} - -static inline void ghes_nmi_init_cxt(void) -{ -} +static inline void ghes_nmi_add(struct ghes *ghes) { } +static inline void ghes_nmi_remove(struct ghes *ghes) { } +static inline void ghes_nmi_init_cxt(void) { } #endif /* CONFIG_HAVE_ACPI_APEI_NMI */ static int ghes_probe(struct platform_device *ghes_dev) @@ -1285,13 +1218,9 @@ static int __init ghes_init(void) ghes_nmi_init_cxt(); - rc = ghes_ioremap_init(); - if (rc) - goto err; - rc = ghes_estatus_pool_init(); if (rc) - goto err_ioremap_exit; + goto err; rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX); @@ -1315,8 +1244,6 @@ static int __init ghes_init(void) return 0; err_pool_exit: ghes_estatus_pool_exit(); -err_ioremap_exit: - ghes_ioremap_exit(); err: return rc; } diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 995c4d8922b1..edf821bc582b 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -91,6 +91,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d) static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE +#if !IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL) /* * DELL XPS 13 (2015) switches sound between HDA and I2S * depending on the ACPI _REV callback. If userspace supports @@ -105,6 +106,7 @@ static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), }, }, +#endif { .callback = dmi_enable_rev_override, .ident = "DELL Precision 5520", diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 4d0979e02a28..f0348e388d01 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id) return 0; } #endif +static int set_gbl_term_list(const struct dmi_system_id *id) +{ + acpi_gbl_parse_table_as_term_list = 1; + return 0; +} -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = { + /* + * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C + * mode. + * https://bugzilla.kernel.org/show_bug.cgi?id=198515 + */ + { + .callback = set_gbl_term_list, + .ident = "Dell Precision M5530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"), + }, + }, + { + .callback = set_gbl_term_list, + .ident = "Dell XPS 15 9570", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"), + }, + }, /* * Invoke DSDT corruption work-around on all Toshiba Satellite. + * DSDT will be copied to memory. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 */ { @@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #else -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = { {} }; #endif @@ -119,6 +146,12 @@ int acpi_bus_get_status(struct acpi_device *device) return 0; } + /* Battery devices must have their deps met before calling _STA */ + if (acpi_device_is_battery(device) && device->dep_unmet) { + acpi_set_device_status(device, 0); + return 0; + } + status = acpi_bus_get_status_handle(device->handle, &sta); if (ACPI_FAILURE(status)) return -ENODEV; @@ -1001,11 +1034,8 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; - /* - * If the machine falls into the DMI check table, - * DSDT will be copied to memory - */ - dmi_check_system(dsdt_dmi_table); + /* Check machine-specific quirks */ + dmi_check_system(acpi_quirks_dmi_table); status = acpi_reallocate_root_table(); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index ef1856b15488..a3297306f544 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -557,7 +557,8 @@ static int acpi_button_remove(struct acpi_device *device) return 0; } -static int param_set_lid_init_state(const char *val, struct kernel_param *kp) +static int param_set_lid_init_state(const char *val, + const struct kernel_param *kp) { int result = 0; @@ -575,7 +576,8 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp) return result; } -static int param_get_lid_init_state(char *buffer, struct kernel_param *kp) +static int param_get_lid_init_state(char *buffer, + const struct kernel_param *kp) { switch (lid_init_state) { case ACPI_BUTTON_LID_INIT_OPEN: @@ -595,4 +597,26 @@ module_param_call(lid_init_state, NULL, 0644); MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); -module_acpi_driver(acpi_button_driver); +static int acpi_button_register_driver(struct acpi_driver *driver) +{ + /* + * Modules such as nouveau.ko and i915.ko have a link time dependency + * on acpi_lid_open(), and would therefore not be loadable on ACPI + * capable kernels booted in non-ACPI mode if the return value of + * acpi_bus_register_driver() is returned from here with ACPI disabled + * when this driver is built as a module. + */ + if (acpi_disabled) + return 0; + + return acpi_bus_register_driver(driver); +} + +static void acpi_button_unregister_driver(struct acpi_driver *driver) +{ + if (!acpi_disabled) + acpi_bus_unregister_driver(driver); +} + +module_driver(acpi_button_driver, acpi_button_register_driver, + acpi_button_unregister_driver); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index fbcc73f7a099..608e9dd565c6 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable); #ifdef CONFIG_PM static DEFINE_MUTEX(acpi_pm_notifier_lock); +static DEFINE_MUTEX(acpi_pm_notifier_install_lock); void acpi_pm_wakeup_event(struct device *dev) { @@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, if (!dev && !func) return AE_BAD_PARAMETER; - mutex_lock(&acpi_pm_notifier_lock); + mutex_lock(&acpi_pm_notifier_install_lock); if (adev->wakeup.flags.notifier_present) goto out; - adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); - adev->wakeup.context.dev = dev; - adev->wakeup.context.func = func; - status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, acpi_pm_notify_handler, NULL); if (ACPI_FAILURE(status)) goto out; + mutex_lock(&acpi_pm_notifier_lock); + adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev)); + adev->wakeup.context.dev = dev; + adev->wakeup.context.func = func; adev->wakeup.flags.notifier_present = true; + mutex_unlock(&acpi_pm_notifier_lock); out: - mutex_unlock(&acpi_pm_notifier_lock); + mutex_unlock(&acpi_pm_notifier_install_lock); return status; } @@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) { acpi_status status = AE_BAD_PARAMETER; - mutex_lock(&acpi_pm_notifier_lock); + mutex_lock(&acpi_pm_notifier_install_lock); if (!adev->wakeup.flags.notifier_present) goto out; @@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev) if (ACPI_FAILURE(status)) goto out; + mutex_lock(&acpi_pm_notifier_lock); adev->wakeup.context.func = NULL; adev->wakeup.context.dev = NULL; wakeup_source_unregister(adev->wakeup.ws); - adev->wakeup.flags.notifier_present = false; + mutex_unlock(&acpi_pm_notifier_lock); out: - mutex_unlock(&acpi_pm_notifier_lock); + mutex_unlock(&acpi_pm_notifier_install_lock); return status; } @@ -882,14 +885,13 @@ int acpi_dev_runtime_suspend(struct device *dev) EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); /** - * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. + * acpi_dev_resume - Put device into the full-power state using ACPI. * @dev: Device to put into the full-power state. * * Put the given device into the full-power state using the standard ACPI - * mechanism at run time. Set the power state of the device to ACPI D0 and - * disable remote wakeup. + * mechanism. Set the power state of the device to ACPI D0 and disable wakeup. */ -int acpi_dev_runtime_resume(struct device *dev) +int acpi_dev_resume(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); int error; @@ -901,7 +903,7 @@ int acpi_dev_runtime_resume(struct device *dev) acpi_device_wakeup_disable(adev); return error; } -EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); +EXPORT_SYMBOL_GPL(acpi_dev_resume); /** * acpi_subsys_runtime_suspend - Suspend device using ACPI. @@ -926,7 +928,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); */ int acpi_subsys_runtime_resume(struct device *dev) { - int ret = acpi_dev_runtime_resume(dev); + int ret = acpi_dev_resume(dev); return ret ? ret : pm_generic_runtime_resume(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); @@ -967,27 +969,26 @@ int acpi_dev_suspend_late(struct device *dev) } EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); -/** - * acpi_dev_resume_early - Put device into the full-power state using ACPI. - * @dev: Device to put into the full-power state. - * - * Put the given device into the full-power state using the standard ACPI - * mechanism during system transition to the working state. Set the power - * state of the device to ACPI D0 and disable remote wakeup. - */ -int acpi_dev_resume_early(struct device *dev) +static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev) { - struct acpi_device *adev = ACPI_COMPANION(dev); - int error; + u32 sys_target = acpi_target_system_state(); + int ret, state; - if (!adev) - return 0; + if (device_may_wakeup(dev) != !!adev->wakeup.prepare_count) + return true; - error = acpi_dev_pm_full_power(adev); - acpi_device_wakeup_disable(adev); - return error; + if (sys_target == ACPI_STATE_S0) + return false; + + if (adev->power.flags.dsw_present) + return true; + + ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); + if (ret) + return true; + + return state != adev->power.state; } -EXPORT_SYMBOL_GPL(acpi_dev_resume_early); /** * acpi_subsys_prepare - Prepare device for system transition to a sleep state. @@ -996,29 +997,36 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early); int acpi_subsys_prepare(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); - u32 sys_target; - int ret, state; + int ret; ret = pm_generic_prepare(dev); if (ret < 0) return ret; - if (!adev || !pm_runtime_suspended(dev) - || device_may_wakeup(dev) != !!adev->wakeup.prepare_count) - return 0; - - sys_target = acpi_target_system_state(); - if (sys_target == ACPI_STATE_S0) - return 1; - - if (adev->power.flags.dsw_present) + if (!adev || !pm_runtime_suspended(dev)) return 0; - ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state); - return !ret && state == adev->power.state; + return !acpi_dev_needs_resume(dev, adev); } EXPORT_SYMBOL_GPL(acpi_subsys_prepare); +/** + * acpi_subsys_complete - Finalize device's resume during system resume. + * @dev: Device to handle. + */ +void acpi_subsys_complete(struct device *dev) +{ + pm_generic_complete(dev); + /* + * If the device had been runtime-suspended before the system went into + * the sleep state it is going out of and it has never been resumed till + * now, resume it in case the firmware powered it up. + */ + if (dev->power.direct_complete && pm_resume_via_firmware()) + pm_request_resume(dev); +} +EXPORT_SYMBOL_GPL(acpi_subsys_complete); + /** * acpi_subsys_suspend - Run the device driver's suspend callback. * @dev: Device to handle. @@ -1057,7 +1065,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); */ int acpi_subsys_resume_early(struct device *dev) { - int ret = acpi_dev_resume_early(dev); + int ret = acpi_dev_resume(dev); return ret ? ret : pm_generic_resume_early(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); @@ -1087,7 +1095,7 @@ static struct dev_pm_domain acpi_general_pm_domain = { .runtime_resume = acpi_subsys_runtime_resume, #ifdef CONFIG_PM_SLEEP .prepare = acpi_subsys_prepare, - .complete = pm_complete_with_resume_check, + .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, .suspend_late = acpi_subsys_suspend_late, .resume_early = acpi_subsys_resume_early, diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 24418932612e..a041689e5701 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, int count; struct acpi_hardware_id *id; + /* Avoid unnecessarily loading modules for non present devices. */ + if (!acpi_device_is_present(acpi_dev)) + return 0; + /* * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 236b14324780..30a572956557 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec) { if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) ec_log_drv("event unblocked"); - if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - advance_transaction(ec); + /* + * Unconditionally invoke this once after enabling the event + * handling mechanism to detect the pending events. + */ + advance_transaction(ec); } static inline void __acpi_ec_disable_event(struct acpi_ec *ec) @@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events) if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) acpi_ec_enable_gpe(ec, true); - - /* EC is fully operational, allow queries */ - acpi_ec_enable_event(ec); } } + /* EC is fully operational, allow queries */ + acpi_ec_enable_event(ec); return 0; } @@ -1514,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events) } acpi_handle_info(ec->handle, - "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", + "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->gpe, ec->command_addr, ec->data_addr); return ret; } @@ -1595,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device) { struct acpi_ec *ec = NULL; int ret; + bool is_ecdt = false; + acpi_status status; strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_EC_CLASS); - ec = acpi_ec_alloc(); - if (!ec) - return -ENOMEM; - if (ec_parse_device(device->handle, 0, ec, NULL) != - AE_CTRL_TERMINATE) { + if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { + is_ecdt = true; + ec = boot_ec; + } else { + ec = acpi_ec_alloc(); + if (!ec) + return -ENOMEM; + status = ec_parse_device(device->handle, 0, ec, NULL); + if (status != AE_CTRL_TERMINATE) { ret = -EINVAL; goto err_alloc; + } } if (acpi_is_boot_ec(ec)) { - boot_ec_is_ecdt = false; - /* - * Trust PNP0C09 namespace location rather than ECDT ID. - * - * But trust ECDT GPE rather than _GPE because of ASUS quirks, - * so do not change boot_ec->gpe to ec->gpe. - */ - boot_ec->handle = ec->handle; - acpi_handle_debug(ec->handle, "duplicated.\n"); - acpi_ec_free(ec); - ec = boot_ec; - ret = acpi_config_boot_ec(ec, ec->handle, true, false); + boot_ec_is_ecdt = is_ecdt; + if (!is_ecdt) { + /* + * Trust PNP0C09 namespace location rather than + * ECDT ID. But trust ECDT GPE rather than _GPE + * because of ASUS quirks, so do not change + * boot_ec->gpe to ec->gpe. + */ + boot_ec->handle = ec->handle; + acpi_handle_debug(ec->handle, "duplicated.\n"); + acpi_ec_free(ec); + ec = boot_ec; + } + ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt); } else ret = acpi_ec_setup(ec, true); if (ret) @@ -1633,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - /* Reprobe devices depending on the EC */ - acpi_walk_dep_device_list(ec->handle); + if (!is_ecdt) { + /* Reprobe devices depending on the EC */ + acpi_walk_dep_device_list(ec->handle); + } acpi_handle_debug(ec->handle, "enumerated.\n"); return 0; @@ -1690,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) static const struct acpi_device_id ec_device_ids[] = { {"PNP0C09", 0}, + {ACPI_ECDT_HID, 0}, {"", 0}, }; @@ -1762,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void) * Note: ec->handle can be valid if this function is called after * acpi_ec_add(), hence the fast path. */ - if (boot_ec->handle != ACPI_ROOT_OBJECT) - handle = boot_ec->handle; - else if (!acpi_ec_ecdt_get_handle(&handle)) - return -ENODEV; - return acpi_config_boot_ec(boot_ec, handle, true, true); + if (boot_ec->handle == ACPI_ROOT_OBJECT) { + if (!acpi_ec_ecdt_get_handle(&handle)) + return -ENODEV; + boot_ec->handle = handle; + } + + /* Register to ACPI bus with PM ops attached */ + return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); } #if 0 @@ -1910,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev) ec->reference_count >= 1) acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); + if (acpi_sleep_no_ec_events()) + acpi_ec_enter_noirq(ec); + return 0; } @@ -1917,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev) { struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); + if (acpi_sleep_no_ec_events()) + acpi_ec_leave_noirq(ec); + if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); @@ -1939,7 +1962,8 @@ static const struct dev_pm_ops acpi_ec_pm = { SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) }; -static int param_set_event_clearing(const char *val, struct kernel_param *kp) +static int param_set_event_clearing(const char *val, + const struct kernel_param *kp) { int result = 0; @@ -1957,7 +1981,8 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp) return result; } -static int param_get_event_clearing(char *buffer, struct kernel_param *kp) +static int param_get_event_clearing(char *buffer, + const struct kernel_param *kp) { switch (ec_event_clearing) { case ACPI_EC_EVT_TIMING_STATUS: @@ -2018,6 +2043,12 @@ int __init acpi_ec_init(void) /* Drivers must be started after acpi_ec_query_init() */ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); + /* + * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is + * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT + * settings but invalid DSDT settings. + * https://bugzilla.kernel.org/show_bug.cgi?id=196847 + */ ecdt_fail = acpi_ec_ecdt_start(); return ecdt_fail && dsdt_fail ? -ENODEV : 0; } diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index 6c7dd7af789e..dd70d6c2bca0 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) return -ENOMEM; } - if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe)) + if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe)) goto error; if (!debugfs_create_bool("use_global_lock", 0444, dev_dir, &first_ec->global_lock)) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 4361c4415b4f..2cd2ae152ab7 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); +int acpi_bus_register_early_device(int type); /* -------------------------------------------------------------------------- Device Matching and Notification @@ -158,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {} -------------------------------------------------------------------------- */ struct acpi_ec { acpi_handle handle; - unsigned long gpe; + u32 gpe; unsigned long command_addr; unsigned long data_addr; bool global_lock; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 9c2c49b6a240..d56822f58ab1 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1022,8 +1022,11 @@ static ssize_t scrub_show(struct device *dev, if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + mutex_lock(&acpi_desc->init_mutex); rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); + work_busy(&acpi_desc->work) + && !acpi_desc->cancel ? "+\n" : "\n"); + mutex_unlock(&acpi_desc->init_mutex); } device_unlock(dev); return rc; @@ -1457,6 +1460,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dev_name(&adev_dimm->dev)); return -ENXIO; } + /* + * Record nfit_mem for the notification path to track back to + * the nfit sysfs attributes for this dimm device object. + */ + dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* * Until standardization materializes we need to consider 4 @@ -1516,9 +1524,11 @@ static void shutdown_dimm_notify(void *data) sysfs_put(nfit_mem->flags_attr); nfit_mem->flags_attr = NULL; } - if (adev_dimm) + if (adev_dimm) { acpi_remove_notify_handler(adev_dimm->handle, ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); + dev_set_drvdata(&adev_dimm->dev, NULL); + } } mutex_unlock(&acpi_desc->init_mutex); } @@ -1611,6 +1621,9 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) struct kernfs_node *nfit_kernfs; nvdimm = nfit_mem->nvdimm; + if (!nvdimm) + continue; + nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); if (nfit_kernfs) nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, @@ -2303,7 +2316,7 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nd_blk_region_desc *ndbr_desc; struct nfit_mem *nfit_mem; - int blk_valid = 0, rc; + int rc; if (!nvdimm) { dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", @@ -2323,15 +2336,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, if (!nfit_mem || !nfit_mem->bdw) { dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", spa->range_index, nvdimm_name(nvdimm)); - } else { - mapping->size = nfit_mem->bdw->capacity; - mapping->start = nfit_mem->bdw->start_address; - ndr_desc->num_lanes = nfit_mem->bdw->windows; - blk_valid = 1; + break; } + mapping->size = nfit_mem->bdw->capacity; + mapping->start = nfit_mem->bdw->start_address; + ndr_desc->num_lanes = nfit_mem->bdw->windows; ndr_desc->mapping = mapping; - ndr_desc->num_mappings = blk_valid; + ndr_desc->num_mappings = 1; ndbr_desc = to_blk_region_desc(ndr_desc); ndbr_desc->enable = acpi_nfit_blk_region_enable; ndbr_desc->do_io = acpi_desc->blk_do_io; @@ -2738,15 +2750,21 @@ static void acpi_nfit_scrub(struct work_struct *work) static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; - int rc; - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) - if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { - /* BLK regions don't need to wait for ars results */ - rc = acpi_nfit_register_region(acpi_desc, nfit_spa); - if (rc) - return rc; - } + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { + int rc, type = nfit_spa_type(nfit_spa->spa); + + /* PMEM and VMEM will be registered by the ARS workqueue */ + if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE) + continue; + /* BLK apertures belong to BLK region registration below */ + if (type == NFIT_SPA_BDW) + continue; + /* BLK regions don't need to wait for ARS results */ + rc = acpi_nfit_register_region(acpi_desc, nfit_spa); + if (rc) + return rc; + } acpi_desc->ars_start_flags = 0; if (!acpi_desc->cancel) diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 917f1cc0fda4..8fb74d9011da 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) */ int acpi_map_pxm_to_online_node(int pxm) { - int node, n, dist, min_dist; + int node, min_node; node = acpi_map_pxm_to_node(pxm); if (node == NUMA_NO_NODE) node = 0; + min_node = node; if (!node_online(node)) { - min_dist = INT_MAX; + int min_dist = INT_MAX, dist, n; + for_each_online_node(n) { dist = node_distance(node, n); if (dist < min_dist) { min_dist = dist; - node = n; + min_node = n; } } } - return node; + return min_node; } EXPORT_SYMBOL(acpi_map_pxm_to_online_node); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index db78d353bab1..191e86c62037 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -45,6 +45,8 @@ #include #include +#include "acpica/accommon.h" +#include "acpica/acnamesp.h" #include "internal.h" #define _COMPONENT ACPI_OS_SERVICES @@ -1477,6 +1479,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n, } EXPORT_SYMBOL(acpi_check_region); +static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, + void *_res, void **return_value) +{ + struct acpi_mem_space_context **mem_ctx; + union acpi_operand_object *handler_obj; + union acpi_operand_object *region_obj2; + union acpi_operand_object *region_obj; + struct resource *res = _res; + acpi_status status; + + region_obj = acpi_ns_get_attached_object(handle); + if (!region_obj) + return AE_OK; + + handler_obj = region_obj->region.handler; + if (!handler_obj) + return AE_OK; + + if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return AE_OK; + + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) + return AE_OK; + + region_obj2 = acpi_ns_get_secondary_object(region_obj); + if (!region_obj2) + return AE_OK; + + mem_ctx = (void *)®ion_obj2->extra.region_context; + + if (!(mem_ctx[0]->address >= res->start && + mem_ctx[0]->address < res->end)) + return AE_OK; + + status = handler_obj->address_space.setup(region_obj, + ACPI_REGION_DEACTIVATE, + NULL, (void **)mem_ctx); + if (ACPI_SUCCESS(status)) + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); + + return status; +} + +/** + * acpi_release_memory - Release any mappings done to a memory region + * @handle: Handle to namespace node + * @res: Memory resource + * @level: A level that terminates the search + * + * Walks through @handle and unmaps all SystemMemory Operation Regions that + * overlap with @res and that have already been activated (mapped). + * + * This is a helper that allows drivers to place special requirements on memory + * region that may overlap with operation regions, primarily allowing them to + * safely map the region as non-cached memory. + * + * The unmapped Operation Regions will be automatically remapped next time they + * are called, so the drivers do not need to do anything else. + */ +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, + u32 level) +{ + if (!(res->flags & IORESOURCE_MEM)) + return AE_TYPE; + + return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, + acpi_deactivate_mem_region, NULL, res, NULL); +} +EXPORT_SYMBOL_GPL(acpi_release_memory); + /* * Let drivers know whether the resource checks are effective */ diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 6fc204a52493..eb857d6ea1fe 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) } control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL - | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | OSC_PCI_EXPRESS_PME_CONTROL; + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; + if (pci_aer_available()) { if (aer_acpi_firmware_first()) dev_info(&device->dev, diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c new file mode 100644 index 000000000000..7f3c567e8168 --- /dev/null +++ b/drivers/acpi/pmic/tps68470_pmic.c @@ -0,0 +1,455 @@ +/* + * TI TPS68470 PMIC operation region driver + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Author: Rajmohan Mani + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based on drivers/acpi/pmic/intel_pmic* drivers + */ + +#include +#include +#include +#include +#include + +struct tps68470_pmic_table { + u32 address; /* operation region address */ + u32 reg; /* corresponding register */ + u32 bitmask; /* bit mask for power, clock */ +}; + +#define TI_PMIC_POWER_OPREGION_ID 0xB0 +#define TI_PMIC_VR_VAL_OPREGION_ID 0xB1 +#define TI_PMIC_CLOCK_OPREGION_ID 0xB2 +#define TI_PMIC_CLKFREQ_OPREGION_ID 0xB3 + +struct tps68470_pmic_opregion { + struct mutex lock; + struct regmap *regmap; +}; + +#define S_IO_I2C_EN (BIT(0) | BIT(1)) + +static const struct tps68470_pmic_table power_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_S_I2C_CTL, + .bitmask = S_IO_I2C_EN, + /* S_I2C_CTL */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_VCMCTL, + .bitmask = BIT(0), + /* VCMCTL */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_VAUX1CTL, + .bitmask = BIT(0), + /* VAUX1_CTL */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_VAUX2CTL, + .bitmask = BIT(0), + /* VAUX2CTL */ + }, + { + .address = 0x10, + .reg = TPS68470_REG_VACTL, + .bitmask = BIT(0), + /* VACTL */ + }, + { + .address = 0x14, + .reg = TPS68470_REG_VDCTL, + .bitmask = BIT(0), + /* VDCTL */ + }, +}; + +/* Table to set voltage regulator value */ +static const struct tps68470_pmic_table vr_val_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_VSIOVAL, + .bitmask = TPS68470_VSIOVAL_IOVOLT_MASK, + /* TPS68470_REG_VSIOVAL */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_VIOVAL, + .bitmask = TPS68470_VIOVAL_IOVOLT_MASK, + /* TPS68470_REG_VIOVAL */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_VCMVAL, + .bitmask = TPS68470_VCMVAL_VCVOLT_MASK, + /* TPS68470_REG_VCMVAL */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_VAUX1VAL, + .bitmask = TPS68470_VAUX1VAL_AUX1VOLT_MASK, + /* TPS68470_REG_VAUX1VAL */ + }, + { + .address = 0x10, + .reg = TPS68470_REG_VAUX2VAL, + .bitmask = TPS68470_VAUX2VAL_AUX2VOLT_MASK, + /* TPS68470_REG_VAUX2VAL */ + }, + { + .address = 0x14, + .reg = TPS68470_REG_VAVAL, + .bitmask = TPS68470_VAVAL_AVOLT_MASK, + /* TPS68470_REG_VAVAL */ + }, + { + .address = 0x18, + .reg = TPS68470_REG_VDVAL, + .bitmask = TPS68470_VDVAL_DVOLT_MASK, + /* TPS68470_REG_VDVAL */ + }, +}; + +/* Table to configure clock frequency */ +static const struct tps68470_pmic_table clk_freq_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_POSTDIV2, + .bitmask = BIT(0) | BIT(1), + /* TPS68470_REG_POSTDIV2 */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_BOOSTDIV, + .bitmask = 0x1F, + /* TPS68470_REG_BOOSTDIV */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_BUCKDIV, + .bitmask = 0x0F, + /* TPS68470_REG_BUCKDIV */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_PLLSWR, + .bitmask = 0x13, + /* TPS68470_REG_PLLSWR */ + }, + { + .address = 0x10, + .reg = TPS68470_REG_XTALDIV, + .bitmask = 0xFF, + /* TPS68470_REG_XTALDIV */ + }, + { + .address = 0x14, + .reg = TPS68470_REG_PLLDIV, + .bitmask = 0xFF, + /* TPS68470_REG_PLLDIV */ + }, + { + .address = 0x18, + .reg = TPS68470_REG_POSTDIV, + .bitmask = 0x83, + /* TPS68470_REG_POSTDIV */ + }, +}; + +/* Table to configure and enable clocks */ +static const struct tps68470_pmic_table clk_table[] = { + { + .address = 0x00, + .reg = TPS68470_REG_PLLCTL, + .bitmask = 0xF5, + /* TPS68470_REG_PLLCTL */ + }, + { + .address = 0x04, + .reg = TPS68470_REG_PLLCTL2, + .bitmask = BIT(0), + /* TPS68470_REG_PLLCTL2 */ + }, + { + .address = 0x08, + .reg = TPS68470_REG_CLKCFG1, + .bitmask = TPS68470_CLKCFG1_MODE_A_MASK | + TPS68470_CLKCFG1_MODE_B_MASK, + /* TPS68470_REG_CLKCFG1 */ + }, + { + .address = 0x0C, + .reg = TPS68470_REG_CLKCFG2, + .bitmask = TPS68470_CLKCFG1_MODE_A_MASK | + TPS68470_CLKCFG1_MODE_B_MASK, + /* TPS68470_REG_CLKCFG2 */ + }, +}; + +static int pmic_get_reg_bit(u64 address, + const struct tps68470_pmic_table *table, + const unsigned int table_size, int *reg, + int *bitmask) +{ + u64 i; + + i = address / 4; + if (i >= table_size) + return -ENOENT; + + if (!reg || !bitmask) + return -EINVAL; + + *reg = table[i].reg; + *bitmask = table[i].bitmask; + + return 0; +} + +static int tps68470_pmic_get_power(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = (data & bitmask) ? 1 : 0; + return 0; +} + +static int tps68470_pmic_get_vr_val(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = data & bitmask; + return 0; +} + +static int tps68470_pmic_get_clk(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = (data & bitmask) ? 1 : 0; + return 0; +} + +static int tps68470_pmic_get_clk_freq(struct regmap *regmap, int reg, + int bitmask, u64 *value) +{ + unsigned int data; + + if (regmap_read(regmap, reg, &data)) + return -EIO; + + *value = data & bitmask; + return 0; +} + +static int ti_tps68470_regmap_update_bits(struct regmap *regmap, int reg, + int bitmask, u64 value) +{ + return regmap_update_bits(regmap, reg, bitmask, value); +} + +static acpi_status tps68470_pmic_common_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *region_context, + int (*get)(struct regmap *, + int, int, u64 *), + int (*update)(struct regmap *, + int, int, u64), + const struct tps68470_pmic_table *tbl, + unsigned int tbl_size) +{ + struct tps68470_pmic_opregion *opregion = region_context; + struct regmap *regmap = opregion->regmap; + int reg, ret, bitmask; + + if (bits != 32) + return AE_BAD_PARAMETER; + + ret = pmic_get_reg_bit(address, tbl, tbl_size, ®, &bitmask); + if (ret < 0) + return AE_BAD_PARAMETER; + + if (function == ACPI_WRITE && *value > bitmask) + return AE_BAD_PARAMETER; + + mutex_lock(&opregion->lock); + + ret = (function == ACPI_READ) ? + get(regmap, reg, bitmask, value) : + update(regmap, reg, bitmask, *value); + + mutex_unlock(&opregion->lock); + + return ret ? AE_ERROR : AE_OK; +} + +static acpi_status tps68470_pmic_cfreq_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *handler_context, + void *region_context) +{ + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_clk_freq, + ti_tps68470_regmap_update_bits, + clk_freq_table, + ARRAY_SIZE(clk_freq_table)); +} + +static acpi_status tps68470_pmic_clk_handler(u32 function, + acpi_physical_address address, u32 bits, + u64 *value, void *handler_context, + void *region_context) +{ + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_clk, + ti_tps68470_regmap_update_bits, + clk_table, + ARRAY_SIZE(clk_table)); +} + +static acpi_status tps68470_pmic_vrval_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *handler_context, + void *region_context) +{ + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_vr_val, + ti_tps68470_regmap_update_bits, + vr_val_table, + ARRAY_SIZE(vr_val_table)); +} + +static acpi_status tps68470_pmic_pwr_handler(u32 function, + acpi_physical_address address, + u32 bits, u64 *value, + void *handler_context, + void *region_context) +{ + if (bits != 32) + return AE_BAD_PARAMETER; + + /* set/clear for bit 0, bits 0 and 1 together */ + if (function == ACPI_WRITE && + !(*value == 0 || *value == 1 || *value == 3)) { + return AE_BAD_PARAMETER; + } + + return tps68470_pmic_common_handler(function, address, bits, value, + region_context, + tps68470_pmic_get_power, + ti_tps68470_regmap_update_bits, + power_table, + ARRAY_SIZE(power_table)); +} + +static int tps68470_pmic_opregion_probe(struct platform_device *pdev) +{ + struct regmap *tps68470_regmap = dev_get_drvdata(pdev->dev.parent); + acpi_handle handle = ACPI_HANDLE(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct tps68470_pmic_opregion *opregion; + acpi_status status; + + if (!dev || !tps68470_regmap) { + dev_warn(dev, "dev or regmap is NULL\n"); + return -EINVAL; + } + + if (!handle) { + dev_warn(dev, "acpi handle is NULL\n"); + return -ENODEV; + } + + opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL); + if (!opregion) + return -ENOMEM; + + mutex_init(&opregion->lock); + opregion->regmap = tps68470_regmap; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_POWER_OPREGION_ID, + tps68470_pmic_pwr_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_mutex_destroy; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_VR_VAL_OPREGION_ID, + tps68470_pmic_vrval_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_remove_power_handler; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_CLOCK_OPREGION_ID, + tps68470_pmic_clk_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_remove_vr_val_handler; + + status = acpi_install_address_space_handler(handle, + TI_PMIC_CLKFREQ_OPREGION_ID, + tps68470_pmic_cfreq_handler, + NULL, opregion); + if (ACPI_FAILURE(status)) + goto out_remove_clk_handler; + + return 0; + +out_remove_clk_handler: + acpi_remove_address_space_handler(handle, TI_PMIC_CLOCK_OPREGION_ID, + tps68470_pmic_clk_handler); +out_remove_vr_val_handler: + acpi_remove_address_space_handler(handle, TI_PMIC_VR_VAL_OPREGION_ID, + tps68470_pmic_vrval_handler); +out_remove_power_handler: + acpi_remove_address_space_handler(handle, TI_PMIC_POWER_OPREGION_ID, + tps68470_pmic_pwr_handler); +out_mutex_destroy: + mutex_destroy(&opregion->lock); + return -ENODEV; +} + +static struct platform_driver tps68470_pmic_opregion_driver = { + .probe = tps68470_pmic_opregion_probe, + .driver = { + .name = "tps68470_pmic_opregion", + }, +}; + +builtin_platform_driver(tps68470_pmic_opregion_driver) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 18b72eec3507..c7cf48ad5cb9 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -159,7 +159,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { int ret; - if (ignore_ppc) { + if (ignore_ppc || !pr->performance) { /* * Only when it is notification event, the _OST object * will be evaluated. Otherwise it is skipped. diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 2fa8304171e0..7a3431018e0a 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device) device->driver_data = hc; acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); - printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", - hc->ec, hc->offset, hc->query_bit); + dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n", + hc->offset, hc->query_bit); return 0; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 602f8ff212f2..c0984d33c4c8 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device) case ACPI_BUS_TYPE_SLEEP_BUTTON: strcpy(device->pnp.bus_id, "SLPF"); break; + case ACPI_BUS_TYPE_ECDT_EC: + strcpy(device->pnp.bus_id, "ECDT"); + break; default: acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); /* Clean up trailing underscores (if any) */ @@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, case ACPI_BUS_TYPE_SLEEP_BUTTON: acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); break; + case ACPI_BUS_TYPE_ECDT_EC: + acpi_add_id(pnp, ACPI_ECDT_HID); + break; } } @@ -1562,6 +1568,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); acpi_init_coherency(device); + /* Assume there are unmet deps until acpi_device_dep_initialize() runs */ + device->dep_unmet = 1; } void acpi_device_add_finalize(struct acpi_device *device) @@ -1585,6 +1593,14 @@ static int acpi_add_single_object(struct acpi_device **child, } acpi_init_device_object(device, handle, type, sta); + /* + * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so + * that we can call acpi_bus_get_status() and use its quirk handling. + * Note this must be done before the get power-/wakeup_dev-flags calls. + */ + if (type == ACPI_BUS_TYPE_DEVICE) + acpi_bus_get_status(device); + acpi_bus_get_power_flags(device); acpi_bus_get_wakeup_device_flags(device); @@ -1657,9 +1673,11 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, return -ENODEV; *type = ACPI_BUS_TYPE_DEVICE; - status = acpi_bus_get_status_handle(handle, sta); - if (ACPI_FAILURE(status)) - *sta = 0; + /* + * acpi_add_single_object updates this once we've an acpi_device + * so that acpi_bus_get_status' quirk handling can be used. + */ + *sta = 0; break; case ACPI_TYPE_PROCESSOR: *type = ACPI_BUS_TYPE_PROCESSOR; @@ -1757,6 +1775,8 @@ static void acpi_device_dep_initialize(struct acpi_device *adev) acpi_status status; int i; + adev->dep_unmet = 0; + if (!acpi_has_method(adev->handle, "_DEP")) return; @@ -2049,6 +2069,21 @@ void acpi_bus_trim(struct acpi_device *adev) } EXPORT_SYMBOL_GPL(acpi_bus_trim); +int acpi_bus_register_early_device(int type) +{ + struct acpi_device *device = NULL; + int result; + + result = acpi_add_single_object(&device, NULL, + type, ACPI_STA_DEFAULT); + if (result) + return result; + + device->flags.match_driver = true; + return device_attach(&device->dev); +} +EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); + static int acpi_bus_scan_fixed(void) { int result = 0; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 8082871b409a..7a0af16f86f2 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -338,6 +338,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), }, }, + { + .callback = init_nvs_save_s3, + .ident = "Asus 1025C", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1025C"), + }, + }, /* * https://bugzilla.kernel.org/show_bug.cgi?id=189431 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory @@ -364,6 +372,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), }, }, + /* + * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using + * the Low Power S0 Idle firmware interface (see + * https://bugzilla.kernel.org/show_bug.cgi?id=199057). + */ + { + .callback = init_no_lps0, + .ident = "ThinkPad X1 Tablet(2016)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), + }, + }, {}, }; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 0fd57bf33524..9e728a1494f6 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -230,7 +230,8 @@ module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); -static int param_set_trace_state(const char *val, struct kernel_param *kp) +static int param_set_trace_state(const char *val, + const struct kernel_param *kp) { acpi_status status; const char *method = trace_method_name; @@ -266,7 +267,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) return 0; } -static int param_get_trace_state(char *buffer, struct kernel_param *kp) +static int param_get_trace_state(char *buffer, const struct kernel_param *kp) { if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) return sprintf(buffer, "disable"); @@ -295,7 +296,8 @@ MODULE_PARM_DESC(aml_debug_output, "To enable/disable the ACPI Debug Object output."); /* /sys/module/acpi/parameters/acpica_version */ -static int param_get_acpica_version(char *buffer, struct kernel_param *kp) +static int param_get_acpica_version(char *buffer, + const struct kernel_param *kp) { int result; diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 0a9e5979aaa9..93814bd7ae86 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -736,16 +736,17 @@ bool acpi_dev_found(const char *hid) } EXPORT_SYMBOL(acpi_dev_found); -struct acpi_dev_present_info { +struct acpi_dev_match_info { + const char *dev_name; struct acpi_device_id hid[2]; const char *uid; s64 hrv; }; -static int acpi_dev_present_cb(struct device *dev, void *data) +static int acpi_dev_match_cb(struct device *dev, void *data) { struct acpi_device *adev = to_acpi_device(dev); - struct acpi_dev_present_info *match = data; + struct acpi_dev_match_info *match = data; unsigned long long hrv; acpi_status status; @@ -756,6 +757,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data) strcmp(adev->pnp.unique_id, match->uid))) return 0; + match->dev_name = acpi_dev_name(adev); + if (match->hrv == -1) return 1; @@ -788,20 +791,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data) */ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) { - struct acpi_dev_present_info match = {}; + struct acpi_dev_match_info match = {}; struct device *dev; strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); match.uid = uid; match.hrv = hrv; - dev = bus_find_device(&acpi_bus_type, NULL, &match, - acpi_dev_present_cb); - + dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); return !!dev; } EXPORT_SYMBOL(acpi_dev_present); +/** + * acpi_dev_get_first_match_name - Return name of first match of ACPI device + * @hid: Hardware ID of the device. + * @uid: Unique ID of the device, pass NULL to not check _UID + * @hrv: Hardware Revision of the device, pass -1 to not check _HRV + * + * Return device name if a matching device was present + * at the moment of invocation, or NULL otherwise. + * + * See additional information in acpi_dev_present() as well. + */ +const char * +acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv) +{ + struct acpi_dev_match_info match = {}; + struct device *dev; + + strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id)); + match.uid = uid; + match.hrv = hrv; + + dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); + return dev ? match.dev_name : NULL; +} +EXPORT_SYMBOL(acpi_dev_get_first_match_name); + /* * acpi_backlight= handling, this is done here rather then in video_detect.c * because __setup cannot be used in modules. diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 601e5d372887..43587ac680e4 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -219,6 +219,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { "3570R/370R/470R/450R/510R/4450RV"), }, }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 670Z5E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ .callback = video_detect_force_video, diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig new file mode 100644 index 000000000000..9fc4cae04a56 --- /dev/null +++ b/drivers/acrn/Kconfig @@ -0,0 +1,21 @@ +config ACRN_SHARED_BUFFER + bool "Intel ACRN SHARED BUFFER" + depends on ACRN_VHM + ---help--- + Ring buffer shared between ACRN Hypervisor and its SOS. + Help ACRN performance profiling. + +config ACRN_TRACE + tristate "Intel ACRN Hypervisor Trace support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor. + You can say y to build it into the kernel, or m to build + it as a module. + +config ACRN_HVLOG + bool "Intel ACRN Hypervisor Logmsg support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor log. + You can say y to build it into the kernel. diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile new file mode 100644 index 000000000000..0a157712aed6 --- /dev/null +++ b/drivers/acrn/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o +obj-$(CONFIG_ACRN_TRACE) += trace.o +obj-$(CONFIG_ACRN_HVLOG) += hvlog.o diff --git a/drivers/acrn/hvlog.c b/drivers/acrn/hvlog.c new file mode 100644 index 000000000000..ed1ab7919a62 --- /dev/null +++ b/drivers/acrn/hvlog.c @@ -0,0 +1,432 @@ +/* + * ACRN Hypervisor logmsg + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ +#define pr_fmt(fmt) "ACRN HVLog: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sbuf.h" + +#define LOG_ENTRY_SIZE 80 +#define PCPU_NRS 4 + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define foreach_hvlog_type(idx, hvlog_type) \ + for ((idx) = 0; (idx) < (hvlog_type); (idx)++) + +enum sbuf_hvlog_index { + ACRN_CURRNET_HVLOG = 0, + ACRN_LAST_HVLOG, + ACRN_HVLOG_TYPE +}; + +struct acrn_hvlog { + struct miscdevice miscdev; + shared_buf_t *sbuf; + atomic_t open_cnt; + int pcpu_num; +}; + +static unsigned long long hvlog_buf_size; +static unsigned long long hvlog_buf_base; + +static int __init early_hvlog(char *p) +{ + int ret; + + pr_debug("%s(%s)\n", __func__, p); + hvlog_buf_size = memparse(p, &p); + if (*p != '@') + return 0; + hvlog_buf_base = memparse(p + 1, &p); + + if (!!hvlog_buf_base && !!hvlog_buf_size) { + ret = memblock_reserve(hvlog_buf_base, hvlog_buf_size); + if (ret) { + pr_err("%s: Error reserving hvlog memblock\n", + __func__); + hvlog_buf_base = 0; + hvlog_buf_size = 0; + return ret; + } + } + + return 0; +} +early_param("hvlog", early_hvlog); + + +static inline shared_buf_t *hvlog_mark_unread(shared_buf_t *sbuf) +{ + /* sbuf must point to valid data. + * clear the lowest bit in the magic to indicate that + * the sbuf point to the last boot valid data, we should + * read it later. + */ + if (sbuf != NULL) + sbuf->magic &= ~1; + + return sbuf; +} + +static int hvlog_open(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = container_of(filp->private_data, + struct acrn_hvlog, miscdev); + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + /* More than one reader at the same time could get data messed up */ + if (atomic_cmpxchg(&acrn_hvlog->open_cnt, 0, 1) != 0) + return -EBUSY; + + filp->private_data = acrn_hvlog; + + return 0; +} + +static int hvlog_release(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + atomic_dec(&acrn_hvlog->open_cnt); + filp->private_data = NULL; + + return 0; +} + +static ssize_t hvlog_read(struct file *filp, char __user *buf, + size_t count, loff_t *offset) +{ + char data[LOG_ENTRY_SIZE]; + struct acrn_hvlog *acrn_hvlog; + int ret; + + acrn_hvlog = (struct acrn_hvlog *)filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + if (acrn_hvlog->sbuf != NULL) { + ret = sbuf_get(acrn_hvlog->sbuf, (uint8_t *)&data); + if (ret > 0) { + if (copy_to_user(buf, &data, ret)) + return -EFAULT; + } + + return ret; + } + + return 0; +} + +static const struct file_operations hvlog_fops = { + .owner = THIS_MODULE, + .open = hvlog_open, + .release = hvlog_release, + .read = hvlog_read, +}; + +static struct acrn_hvlog hvlog_devs[ACRN_HVLOG_TYPE][PCPU_NRS] = { + [ACRN_CURRNET_HVLOG] = { + { + .miscdev = { + .name = "acrn_hvlog_cur_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 0, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 1, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 2, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 3, + }, + }, + [ACRN_LAST_HVLOG] = { + { + .miscdev = { + .name = "acrn_hvlog_last_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 0, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 1, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 2, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &hvlog_fops, + }, + .pcpu_num = 3, + }, + } +}; + +static int __init acrn_hvlog_init(void) +{ + int ret = 0; + int i, j, idx; + uint32_t pcpu_id; + uint64_t logbuf_base0; + uint64_t logbuf_base1; + uint64_t logbuf_size; + uint32_t ele_size; + uint32_t ele_num; + uint32_t size; + bool sbuf_constructed = false; + + shared_buf_t *sbuf0[PCPU_NRS]; + shared_buf_t *sbuf1[PCPU_NRS]; + + pr_info("%s\n", __func__); + if (!hvlog_buf_base || !hvlog_buf_size) { + pr_warn("no fixed memory reserve for hvlog.\n"); + return 0; + } + + logbuf_base0 = hvlog_buf_base; + logbuf_size = (hvlog_buf_size >> 1); + logbuf_base1 = hvlog_buf_base + logbuf_size; + + size = (logbuf_size / PCPU_NRS); + ele_size = LOG_ENTRY_SIZE; + ele_num = (size - SBUF_HEAD_SIZE) / ele_size; + + foreach_cpu(pcpu_id, PCPU_NRS) { + sbuf0[pcpu_id] = sbuf_check_valid(ele_num, ele_size, + logbuf_base0 + size * pcpu_id); + sbuf1[pcpu_id] = sbuf_check_valid(ele_num, ele_size, + logbuf_base1 + size * pcpu_id); + } + + foreach_cpu(pcpu_id, PCPU_NRS) { + if (sbuf0[pcpu_id] == NULL) + continue; + + foreach_cpu(pcpu_id, PCPU_NRS) { + hvlog_devs[ACRN_LAST_HVLOG][pcpu_id].sbuf = + hvlog_mark_unread(sbuf0[pcpu_id]); + hvlog_devs[ACRN_CURRNET_HVLOG][pcpu_id].sbuf = + sbuf_construct(ele_num, ele_size, + logbuf_base1 + size * pcpu_id); + } + sbuf_constructed = true; + } + + if (sbuf_constructed == false) { + foreach_cpu(pcpu_id, PCPU_NRS) { + if (sbuf1[pcpu_id] == NULL) + continue; + + foreach_cpu(pcpu_id, PCPU_NRS) { + hvlog_devs[ACRN_LAST_HVLOG][pcpu_id].sbuf = + hvlog_mark_unread(sbuf1[pcpu_id]); + } + } + foreach_cpu(pcpu_id, PCPU_NRS) { + hvlog_devs[ACRN_CURRNET_HVLOG][pcpu_id].sbuf = + sbuf_construct(ele_num, ele_size, + logbuf_base0 + size * pcpu_id); + } + sbuf_constructed = true; + } + + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(pcpu_id, PCPU_NRS) { + ret = sbuf_share_setup(pcpu_id, ACRN_HVLOG, + hvlog_devs[idx][pcpu_id].sbuf); + if (ret < 0) { + pr_err("Failed to setup %s, errno %d\n", + hvlog_devs[idx][pcpu_id].miscdev.name, ret); + goto setup_err; + } + } + } + + foreach_hvlog_type(idx, ACRN_HVLOG_TYPE) { + foreach_cpu(pcpu_id, PCPU_NRS) { + atomic_set(&hvlog_devs[idx][pcpu_id].open_cnt, 0); + + ret = misc_register( + &hvlog_devs[idx][pcpu_id].miscdev); + if (ret < 0) { + pr_err("Failed to register %s, errno %d\n", + hvlog_devs[idx][pcpu_id].miscdev.name, ret); + goto reg_err; + } + } + } + + return 0; + +reg_err: + foreach_hvlog_type(i, idx) { + foreach_cpu(j, PCPU_NRS) { + misc_deregister(&hvlog_devs[i][j].miscdev); + } + } + + foreach_cpu(j, pcpu_id) { + misc_deregister(&hvlog_devs[idx][j].miscdev); + } + + pcpu_id = PCPU_NRS; +setup_err: + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(j, pcpu_id) { + sbuf_share_setup(j, ACRN_HVLOG, 0); + sbuf_deconstruct(hvlog_devs[idx][j].sbuf); + } + } + + return ret; +} + +static void __exit acrn_hvlog_exit(void) +{ + int idx; + uint32_t pcpu_id; + + pr_info("%s\n", __func__); + + foreach_hvlog_type(idx, ACRN_HVLOG_TYPE) { + foreach_cpu(pcpu_id, PCPU_NRS) { + misc_deregister(&hvlog_devs[idx][pcpu_id].miscdev); + } + } + + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(pcpu_id, PCPU_NRS) { + sbuf_share_setup(pcpu_id, ACRN_HVLOG, 0); + sbuf_deconstruct(hvlog_devs[idx][pcpu_id].sbuf); + } + } +} + +module_init(acrn_hvlog_init); +module_exit(acrn_hvlog_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Logmsg"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c new file mode 100644 index 000000000000..b51ee04e12fa --- /dev/null +++ b/drivers/acrn/sbuf.c @@ -0,0 +1,241 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#define pr_fmt(fmt) "SBuf: " fmt + +#include +#include +#include +#include +#include +#include "sbuf.h" + +static inline bool sbuf_is_empty(shared_buf_t *sbuf) +{ + return (sbuf->head == sbuf->tail); +} + +static inline uint32_t sbuf_next_ptr(uint32_t pos, + uint32_t span, uint32_t scope) +{ + pos += span; + pos = (pos >= scope) ? (pos - scope) : pos; + return pos; +} + +static inline uint32_t sbuf_calculate_allocate_size(uint32_t ele_num, + uint32_t ele_size) +{ + uint64_t sbuf_allocate_size; + + sbuf_allocate_size = ele_num * ele_size; + sbuf_allocate_size += SBUF_HEAD_SIZE; + if (sbuf_allocate_size > SBUF_MAX_SIZE) { + pr_err("num=0x%x, size=0x%x exceed 0x%llx!\n", + ele_num, ele_size, SBUF_MAX_SIZE); + return 0; + } + + /* align to PAGE_SIZE */ + return (sbuf_allocate_size + PAGE_SIZE - 1) & PAGE_MASK; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size) +{ + shared_buf_t *sbuf; + struct page *page; + uint32_t sbuf_allocate_size; + + if (!ele_num || !ele_size) { + pr_err("invalid parameter %s!\n", __func__); + return NULL; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(ele_num, ele_size); + if (!sbuf_allocate_size) + return NULL; + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(sbuf_allocate_size)); + if (page == NULL) { + pr_err("failed to alloc pages!\n"); + return NULL; + } + + sbuf = phys_to_virt(page_to_phys(page)); + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + sbuf->magic = SBUF_MAGIC; + pr_info("ele_num=0x%x, ele_size=0x%x allocated!\n", + ele_num, ele_size); + return sbuf; +} +EXPORT_SYMBOL(sbuf_allocate); + +void sbuf_free(shared_buf_t *sbuf) +{ + uint32_t sbuf_allocate_size; + + if ((sbuf == NULL) || sbuf->magic != SBUF_MAGIC) { + pr_err("invalid parameter %s\n", __func__); + return; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(sbuf->ele_num, + sbuf->ele_size); + if (!sbuf_allocate_size) + return; + + sbuf->magic = 0; + __free_pages((struct page *)virt_to_page(sbuf), + get_order(sbuf_allocate_size)); +} +EXPORT_SYMBOL(sbuf_free); + +int sbuf_get(shared_buf_t *sbuf, uint8_t *data) +{ + const void *from; + + if ((sbuf == NULL) || (data == NULL)) + return -EINVAL; + + if (sbuf_is_empty(sbuf)) { + /* no data available */ + return 0; + } + + from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; + + memcpy(data, from, sbuf->ele_size); + + sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size); + + return sbuf->ele_size; +} +EXPORT_SYMBOL(sbuf_get); + +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) +{ + struct sbuf_setup_param ssp; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + ssp.pcpu_id = pcpu_id; + ssp.sbuf_id = sbuf_id; + + if (!sbuf) { + ssp.gpa = 0; + } else { + BUG_ON(!virt_addr_valid(sbuf)); + ssp.gpa = virt_to_phys(sbuf); + } + pr_info("setup phys add = 0x%llx\n", ssp.gpa); + + return hcall_setup_sbuf(virt_to_phys(&ssp)); +} +EXPORT_SYMBOL(sbuf_share_setup); + +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + if ((sbuf->magic == SBUF_MAGIC) && + (sbuf->ele_num == ele_num) && + (sbuf->ele_size == ele_size)) { + return sbuf; + } + + return NULL; +} +EXPORT_SYMBOL(sbuf_check_valid); + +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + memset(sbuf, 0, SBUF_HEAD_SIZE); + sbuf->magic = SBUF_MAGIC; + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + pr_info("construct sbuf at 0x%llx.\n", paddr); + return sbuf; +} +EXPORT_SYMBOL(sbuf_construct); + +void sbuf_deconstruct(shared_buf_t *sbuf) +{ + if (sbuf == NULL) + return; + + sbuf->magic = 0; +} +EXPORT_SYMBOL(sbuf_deconstruct); diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h new file mode 100644 index 000000000000..4fae7a258bce --- /dev/null +++ b/drivers/acrn/sbuf.h @@ -0,0 +1,129 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#ifndef SHARED_BUF_H +#define SHARED_BUF_H + +#include + + +#define SBUF_MAGIC 0x5aa57aa71aa13aa3 +#define SBUF_MAX_SIZE (1ULL << 22) +#define SBUF_HEAD_SIZE 64 + +/* sbuf flags */ +#define OVERRUN_CNT_EN (1ULL << 0) /* whether overrun counting is enabled */ +#define OVERWRITE_EN (1ULL << 1) /* whether overwrite is enabled */ + +enum sbuf_type { + ACRN_TRACE, + ACRN_HVLOG, + ACRN_SBUF_TYPE_MAX, +}; +/** + * (sbuf) head + buf (store (ele_num - 1) elements at most) + * buffer empty: tail == head + * buffer full: (tail + ele_size) % size == head + * + * Base of memory for elements + * | + * | + * --------------------------------------------------------------------------------------- + * | shared_buf_t | raw data (ele_size)| raw date (ele_size) | ... | raw data (ele_size) | + * --------------------------------------------------------------------------------------- + * | + * | + * shared_buf_t *buf + */ + +/* Make sure sizeof(shared_buf_t) == SBUF_HEAD_SIZE */ +typedef struct shared_buf { + uint64_t magic; + uint32_t ele_num; /* number of elements */ + uint32_t ele_size; /* sizeof of elements */ + uint32_t head; /* offset from base, to read */ + uint32_t tail; /* offset from base, to write */ + uint64_t flags; + uint32_t overrun_cnt; /* count of overrun */ + uint32_t size; /* ele_num * ele_size */ + uint32_t padding[6]; +} ____cacheline_aligned shared_buf_t; + +static inline void sbuf_clear_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags &= ~flags; +} + +static inline void sbuf_set_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags = flags; +} + +static inline void sbuf_add_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags |= flags; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); +void sbuf_free(shared_buf_t *sbuf); +int sbuf_get(shared_buf_t *sbuf, uint8_t *data); +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +void sbuf_deconstruct(shared_buf_t *sbuf); + +#endif /* SHARED_BUF_H */ diff --git a/drivers/acrn/trace.c b/drivers/acrn/trace.c new file mode 100644 index 000000000000..2a9d87e39c5e --- /dev/null +++ b/drivers/acrn/trace.c @@ -0,0 +1,299 @@ +/* +* +* ACRN Trace module +* +* This file is provided under a dual BSD/GPLv2 license.  When using or +* redistributing this file, you may do so under either license. +* +* GPL LICENSE SUMMARY +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +* General Public License for more details. +* +* Contact Information: Yan, Like +* +* BSD LICENSE +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +*   * Redistributions of source code must retain the above copyright +*     notice, this list of conditions and the following disclaimer. +*   * Redistributions in binary form must reproduce the above copyright +*     notice, this list of conditions and the following disclaimer in +*     the documentation and/or other materials provided with the +*     distribution. +*   * Neither the name of Intel Corporation nor the names of its +*     contributors may be used to endorse or promote products derived +*     from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* Like Yan +* +*/ + +#define pr_fmt(fmt) "ACRNTrace: " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#include "sbuf.h" + + +#define TRACE_SBUF_SIZE (4 * 1024 * 1024) +#define TRACE_ELEMENT_SIZE 32 /* byte */ +#define TRACE_ELEMENT_NUM ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / \ + TRACE_ELEMENT_SIZE) + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define MAX_NR_CPUS 4 +/* actual physical cpu number, initialized by module init */ +static int pcpu_num; + +static int nr_cpus = MAX_NR_CPUS; +module_param(nr_cpus, int, S_IRUSR | S_IWUSR); + +static atomic_t open_cnt[MAX_NR_CPUS]; +static shared_buf_t *sbuf_per_cpu[MAX_NR_CPUS]; + +static inline int get_id_from_devname(struct file *filep) +{ + uint32_t cpuid; + int err; + char id_str[16]; + struct miscdevice *dev = filep->private_data; + + strncpy(id_str, (void *)dev->name + sizeof("acrn_trace_") - 1, 16); + id_str[15] = '\0'; + err = kstrtoul(&id_str[0], 10, (unsigned long *)&cpuid); + + if (err) + return err; + + if (cpuid >= pcpu_num) { + pr_err("%s, failed to get cpuid, cpuid %d\n", + __func__, cpuid); + return -1; + } + + return cpuid; +} + +/************************************************************************ + * + * file_operations functions + * + ***********************************************************************/ +static int trace_open(struct inode *inode, struct file *filep) +{ + int cpuid = get_id_from_devname(filep); + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + /* More than one reader at the same time could get data messed up */ + if (atomic_read(&open_cnt[cpuid])) + return -EBUSY; + + atomic_inc(&open_cnt[cpuid]); + + return 0; +} + +static int trace_release(struct inode *inode, struct file *filep) +{ + int cpuid = get_id_from_devname(filep); + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + atomic_dec(&open_cnt[cpuid]); + + return 0; +} + +static int trace_mmap(struct file *filep, struct vm_area_struct *vma) +{ + int cpuid = get_id_from_devname(filep); + phys_addr_t paddr; + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + BUG_ON(!virt_addr_valid(sbuf_per_cpu[cpuid])); + paddr = virt_to_phys(sbuf_per_cpu[cpuid]); + + if (remap_pfn_range(vma, vma->vm_start, + paddr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + pr_err("Failed to mmap sbuf for cpu%d\n", cpuid); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations trace_fops = { + .owner = THIS_MODULE, + .open = trace_open, + .release = trace_release, + .mmap = trace_mmap, +}; + +static struct miscdevice trace_dev0 = { + .name = "acrn_trace_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice trace_dev1 = { + .name = "acrn_trace_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice trace_dev2 = { + .name = "acrn_trace_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice trace_dev3 = { + .name = "acrn_trace_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &trace_fops, +}; + +static struct miscdevice *trace_devs[4] = { + &trace_dev0, + &trace_dev1, + &trace_dev2, + &trace_dev3, +}; + +/* + * acrn_trace_init() + */ +static int __init acrn_trace_init(void) +{ + int ret = 0; + int i, cpu; + + /* TBD: we could get the native cpu number by hypercall later */ + pr_info("%s, cpu_num %d\n", __func__, nr_cpus); + if (nr_cpus > MAX_NR_CPUS) { + pr_err("nr_cpus %d exceed MAX_NR_CPUS %d !\n", + nr_cpus, MAX_NR_CPUS); + return -EINVAL; + } + pcpu_num = nr_cpus; + + foreach_cpu(cpu, pcpu_num) { + /* allocate shared_buf */ + sbuf_per_cpu[cpu] = sbuf_allocate(TRACE_ELEMENT_NUM, + TRACE_ELEMENT_SIZE); + if (!sbuf_per_cpu[cpu]) { + pr_err("Failed alloc SBuf, cpuid %d\n", cpu); + ret = -ENOMEM; + goto out_free; + } + } + + foreach_cpu(cpu, pcpu_num) { + ret = sbuf_share_setup(cpu, ACRN_TRACE, sbuf_per_cpu[cpu]); + if (ret < 0) { + pr_err("Failed to setup SBuf, cpuid %d\n", cpu); + goto out_sbuf; + } + } + + foreach_cpu(cpu, pcpu_num) { + ret = misc_register(trace_devs[cpu]); + if (ret < 0) { + pr_err("Failed to register acrn_trace_%d, errno %d\n", + cpu, ret); + goto out_dereg; + } + } + + return ret; + +out_dereg: + for (i = --cpu; i >= 0; i--) + misc_deregister(trace_devs[i]); + cpu = pcpu_num; + +out_sbuf: + for (i = --cpu; i >= 0; i--) + sbuf_share_setup(i, ACRN_TRACE, NULL); + cpu = pcpu_num; + +out_free: + for (i = --cpu; i >= 0; i--) + sbuf_free(sbuf_per_cpu[i]); + + return ret; +} + +/* + * acrn_trace_exit() + */ +static void __exit acrn_trace_exit(void) +{ + int cpu; + + pr_info("%s, cpu_num %d\n", __func__, pcpu_num); + + foreach_cpu(cpu, pcpu_num) { + /* deregister devices */ + misc_deregister(trace_devs[cpu]); + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_TRACE, NULL); + + /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ + sbuf_free(sbuf_per_cpu[cpu]); + } +} + +module_init(acrn_trace_init); +module_exit(acrn_trace_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Trace"); +MODULE_VERSION("0.1"); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e0f74ddc22b7..8a99fbe5759f 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev, struct device_attribute *attr, char *buf) { struct amba_device *dev = to_amba_device(_dev); + ssize_t len; - if (!dev->driver_override) - return 0; - - return sprintf(buf, "%s\n", dev->driver_override); + device_lock(_dev); + len = sprintf(buf, "%s\n", dev->driver_override); + device_unlock(_dev); + return len; } static ssize_t driver_override_store(struct device *_dev, @@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev, const char *buf, size_t count) { struct amba_device *dev = to_amba_device(_dev); - char *driver_override, *old = dev->driver_override, *cp; + char *driver_override, *old, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); @@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev, if (cp) *cp = '\0'; + device_lock(_dev); + old = dev->driver_override; if (strlen(driver_override)) { dev->driver_override = driver_override; } else { kfree(driver_override); dev->driver_override = NULL; } + device_unlock(_dev); kfree(old); diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 7dce3795b887..ee4880bfdcdc 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -10,7 +10,7 @@ if ANDROID config ANDROID_BINDER_IPC bool "Android Binder IPC Driver" - depends on MMU + depends on MMU && !M68K default n ---help--- Binder is used in Android for both communication between processes, @@ -32,19 +32,6 @@ config ANDROID_BINDER_DEVICES created. Each binder device has its own context manager, and is therefore logically separated from the other devices. -config ANDROID_BINDER_IPC_32BIT - bool "Use old (Android 4.4 and earlier) 32-bit binder API" - depends on !64BIT && ANDROID_BINDER_IPC - default y - ---help--- - The Binder API has been changed to support both 32 and 64bit - applications in a mixed environment. - - Enable this to support an old 32-bit Android user-space (v4.4 and - earlier). - - Note that enabling this will break newer Android user-space. - config ANDROID_BINDER_IPC_SELFTEST bool "Android Binder IPC Driver Selftest" depends on ANDROID_BINDER_IPC diff --git a/drivers/android/binder.c b/drivers/android/binder.c index fddf76ef5bd6..9c06e7f46d7f 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -72,11 +72,8 @@ #include #include -#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT -#define BINDER_IPC_32BIT 1 -#endif - #include +#include #include "binder_alloc.h" #include "binder_trace.h" @@ -141,7 +138,7 @@ enum { }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; -module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); +module_param_named(debug_mask, binder_debug_mask, uint, 0644); static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; module_param_named(devices, binder_devices_param, charp, 0444); @@ -150,7 +147,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, - struct kernel_param *kp) + const struct kernel_param *kp) { int ret; @@ -160,7 +157,7 @@ static int binder_set_stop_on_user_error(const char *val, return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, - param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); + param_get_int, &binder_stop_on_user_error, 0644); #define binder_debug(mask, x...) \ do { \ @@ -249,7 +246,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( unsigned int cur = atomic_inc_return(&log->cur); if (cur >= ARRAY_SIZE(log->entry)) - log->full = 1; + log->full = true; e = &log->entry[cur % ARRAY_SIZE(log->entry)]; WRITE_ONCE(e->debug_id_done, 0); /* @@ -351,10 +348,14 @@ struct binder_error { * and by @lock) * @has_async_transaction: async transaction to node in progress * (protected by @lock) + * @sched_policy: minimum scheduling policy for node + * (invariant after initialized) * @accept_fds: file descriptor operations supported for node * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @inherit_rt: inherit RT scheduling policy from caller + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -390,6 +391,8 @@ struct binder_node { /* * invariant after initialization */ + u8 sched_policy:2; + u8 inherit_rt:1; u8 accept_fds:1; u8 min_priority; }; @@ -463,6 +466,22 @@ enum binder_deferred_state { BINDER_DEFERRED_RELEASE = 0x04, }; +/** + * struct binder_priority - scheduler policy and priority + * @sched_policy scheduler policy + * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT + * + * The binder driver supports inheriting the following scheduler policies: + * SCHED_NORMAL + * SCHED_BATCH + * SCHED_FIFO + * SCHED_RR + */ +struct binder_priority { + unsigned int sched_policy; + int prio; +}; + /** * struct binder_proc - binder process bookkeeping * @proc_node: element for binder_procs list @@ -482,7 +501,8 @@ enum binder_deferred_state { * @tsk task_struct for group_leader of process * (invariant after initialized) * @files files_struct for process - * (invariant after initialized) + * (protected by @files_lock) + * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -492,8 +512,6 @@ enum binder_deferred_state { * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) - * @wait: wait queue head to wait for proc work - * (invariant after initialized) * @stats: per-process binder statistics * (atomics, no lock needed) * @delivered_death: list of delivered death notification @@ -530,19 +548,19 @@ struct binder_proc { int pid; struct task_struct *tsk; struct files_struct *files; + struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; struct list_head todo; - wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int tmp_ref; - long default_priority; + struct binder_priority default_priority; struct dentry *debugfs_entry; struct binder_alloc alloc; struct binder_context *context; @@ -577,6 +595,8 @@ enum { * (protected by @proc->inner_lock) * @todo: list of work to do for this thread * (protected by @proc->inner_lock) + * @process_todo: whether work in @todo should be processed + * (protected by @proc->inner_lock) * @return_error: transaction errors reported by this thread * (only accessed by this thread) * @reply_error: transaction errors reported by target thread @@ -590,6 +610,7 @@ enum { * @is_dead: thread is dead and awaiting free * when outstanding transactions are cleaned up * (protected by @proc->inner_lock) + * @task: struct task_struct for this thread * * Bookkeeping structure for binder threads. */ @@ -602,12 +623,14 @@ struct binder_thread { bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; + bool process_todo; struct binder_error return_error; struct binder_error reply_error; wait_queue_head_t wait; struct binder_stats stats; atomic_t tmp_ref; bool is_dead; + struct task_struct *task; }; struct binder_transaction { @@ -624,8 +647,9 @@ struct binder_transaction { struct binder_buffer *buffer; unsigned int code; unsigned int flags; - long priority; - long saved_priority; + struct binder_priority priority; + struct binder_priority saved_priority; + bool set_priority_called; kuid_t sender_euid; /** * @lock: protects @from, @to_proc, and @to_thread @@ -787,6 +811,16 @@ static bool binder_worklist_empty(struct binder_proc *proc, return ret; } +/** + * binder_enqueue_work_ilocked() - Add an item to the work list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + * + * Requires the proc->inner_lock to be held. + */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) @@ -797,22 +831,56 @@ binder_enqueue_work_ilocked(struct binder_work *work, } /** - * binder_enqueue_work() - Add an item to the work list - * @proc: binder_proc associated with list + * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work + * @thread: thread to queue work to * @work: struct binder_work to add to list - * @target_list: list to add work to * - * Adds the work to the specified list. Asserts that work - * is not already on a list. + * Adds the work to the todo list of the thread. Doesn't set the process_todo + * flag, which means that (if it wasn't already set) the thread will go to + * sleep without handling this work when it calls read. + * + * Requires the proc->inner_lock to be held. */ static void -binder_enqueue_work(struct binder_proc *proc, - struct binder_work *work, - struct list_head *target_list) +binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) { - binder_inner_proc_lock(proc); - binder_enqueue_work_ilocked(work, target_list); - binder_inner_proc_unlock(proc); + binder_enqueue_work_ilocked(work, &thread->todo); +} + +/** + * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + * + * Requires the proc->inner_lock to be held. + */ +static void +binder_enqueue_thread_work_ilocked(struct binder_thread *thread, + struct binder_work *work) +{ + binder_enqueue_work_ilocked(work, &thread->todo); + thread->process_todo = true; +} + +/** + * binder_enqueue_thread_work() - Add an item to the thread work list + * @thread: thread to queue work to + * @work: struct binder_work to add to list + * + * Adds the work to the todo list of the thread, and enables processing + * of the todo queue. + */ +static void +binder_enqueue_thread_work(struct binder_thread *thread, + struct binder_work *work) +{ + binder_inner_proc_lock(thread->proc); + binder_enqueue_thread_work_ilocked(thread, work); + binder_inner_proc_unlock(thread->proc); } static void @@ -877,20 +945,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { - struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; + int ret; - if (files == NULL) - return -ESRCH; - - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - return __alloc_fd(files, 0, rlim_cur, flags); + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; } /* @@ -899,8 +973,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { + mutex_lock(&proc->files_lock); if (proc->files) __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); } /* @@ -910,9 +986,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { int retval; - if (proc->files == NULL) - return -ESRCH; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } retval = __close_fd(proc->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || @@ -920,14 +998,15 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; - +err: + mutex_unlock(&proc->files_lock); return retval; } static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { - return !binder_worklist_empty_ilocked(&thread->todo) || + return thread->process_todo || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); @@ -1051,22 +1130,145 @@ static void binder_wakeup_proc_ilocked(struct binder_proc *proc) binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); } -static void binder_set_nice(long nice) +static bool is_rt_policy(int policy) +{ + return policy == SCHED_FIFO || policy == SCHED_RR; +} + +static bool is_fair_policy(int policy) +{ + return policy == SCHED_NORMAL || policy == SCHED_BATCH; +} + +static bool binder_supported_policy(int policy) { - long min_nice; + return is_fair_policy(policy) || is_rt_policy(policy); +} - if (can_nice(current, nice)) { - set_user_nice(current, nice); +static int to_userspace_prio(int policy, int kernel_priority) +{ + if (is_fair_policy(policy)) + return PRIO_TO_NICE(kernel_priority); + else + return MAX_USER_RT_PRIO - 1 - kernel_priority; +} + +static int to_kernel_prio(int policy, int user_priority) +{ + if (is_fair_policy(policy)) + return NICE_TO_PRIO(user_priority); + else + return MAX_USER_RT_PRIO - 1 - user_priority; +} + +static void binder_do_set_priority(struct task_struct *task, + struct binder_priority desired, + bool verify) +{ + int priority; /* user-space prio value */ + bool has_cap_nice; + unsigned int policy = desired.sched_policy; + + if (task->policy == policy && task->normal_prio == desired.prio) return; + + has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); + + priority = to_userspace_prio(policy, desired.prio); + + if (verify && is_rt_policy(policy) && !has_cap_nice) { + long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); + + if (max_rtprio == 0) { + policy = SCHED_NORMAL; + priority = MIN_NICE; + } else if (priority > max_rtprio) { + priority = max_rtprio; + } + } + + if (verify && is_fair_policy(policy) && !has_cap_nice) { + long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE)); + + if (min_nice > MAX_NICE) { + binder_user_error("%d RLIMIT_NICE not set\n", + task->pid); + return; + } else if (priority < min_nice) { + priority = min_nice; + } } - min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice <= MAX_NICE) + + if (policy != desired.sched_policy || + to_kernel_prio(policy, priority) != desired.prio) + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "%d: priority %d not allowed, using %d instead\n", + task->pid, desired.prio, + to_kernel_prio(policy, priority)); + + trace_binder_set_priority(task->tgid, task->pid, task->normal_prio, + to_kernel_prio(policy, priority), + desired.prio); + + /* Set the actual priority */ + if (task->policy != policy || is_rt_policy(policy)) { + struct sched_param params; + + params.sched_priority = is_rt_policy(policy) ? priority : 0; + + sched_setscheduler_nocheck(task, + policy | SCHED_RESET_ON_FORK, + ¶ms); + } + if (is_fair_policy(policy)) + set_user_nice(task, priority); +} + +static void binder_set_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ true); +} + +static void binder_restore_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ false); +} + +static void binder_transaction_priority(struct task_struct *task, + struct binder_transaction *t, + struct binder_priority node_prio, + bool inherit_rt) +{ + struct binder_priority desired_prio = t->priority; + + if (t->set_priority_called) return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); + + t->set_priority_called = true; + t->saved_priority.sched_policy = task->policy; + t->saved_priority.prio = task->normal_prio; + + if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { + desired_prio.prio = NICE_TO_PRIO(0); + desired_prio.sched_policy = SCHED_NORMAL; + } + + if (node_prio.prio < t->priority.prio || + (node_prio.prio == t->priority.prio && + node_prio.sched_policy == SCHED_FIFO)) { + /* + * In case the minimum priority on the node is + * higher (lower value), use that priority. If + * the priority is the same, but the node uses + * SCHED_FIFO, prefer SCHED_FIFO, since it can + * run unbounded, unlike SCHED_RR. + */ + desired_prio = node_prio; + } + + binder_set_priority(task, desired_prio); } static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, @@ -1119,6 +1321,7 @@ static struct binder_node *binder_init_node_ilocked( binder_uintptr_t ptr = fp ? fp->binder : 0; binder_uintptr_t cookie = fp ? fp->cookie : 0; __u32 flags = fp ? fp->flags : 0; + s8 priority; assert_spin_locked(&proc->inner_lock); @@ -1151,8 +1354,12 @@ static struct binder_node *binder_init_node_ilocked( node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; - node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >> + FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT; + node->min_priority = to_kernel_prio(node->sched_policy, priority); node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -1215,6 +1422,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->local_strong_refs++; if (!node->has_strong_ref && target_list) { binder_dequeue_work_ilocked(&node->work); + /* + * Note: this function is the only place where we queue + * directly to a thread->todo without using the + * corresponding binder_enqueue_thread_work() helper + * functions; in this case it's ok to not set the + * process_todo flag, since we know this node work will + * always be followed by other work that starts queue + * processing: in case of synchronous transactions, a + * BR_REPLY or BR_ERROR; in case of oneway + * transactions, a BR_TRANSACTION_COMPLETE. + */ binder_enqueue_work_ilocked(&node->work, target_list); } } else { @@ -1226,6 +1444,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, node->debug_id); return -EINVAL; } + /* + * See comment above + */ binder_enqueue_work_ilocked(&node->work, target_list); } } @@ -1915,13 +2136,19 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; - binder_enqueue_work_ilocked( - &target_thread->reply_error.work, - &target_thread->todo); + binder_enqueue_thread_work_ilocked( + target_thread, + &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { - WARN(1, "Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + /* + * Cannot get here for normal operation, but + * we can if multiple synchronous transactions + * are sent without blocking for responses. + * Just ignore the 2nd error in this case. + */ + pr_warn("Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); @@ -1947,6 +2174,26 @@ static void binder_send_failed_reply(struct binder_transaction *t, } } +/** + * binder_cleanup_transaction() - cleans up undelivered transaction + * @t: transaction that needs to be cleaned up + * @reason: reason the transaction wasn't delivered + * @error_code: error to return to caller (if synchronous call) + */ +static void binder_cleanup_transaction(struct binder_transaction *t, + const char *reason, + uint32_t error_code) +{ + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { + binder_send_failed_reply(t, error_code); + } else { + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered transaction %d, %s\n", + t->debug_id, reason); + binder_free_transaction(t); + } +} + /** * binder_validate_object() - checks for a valid metadata object in a buffer. * @buffer: binder_buffer that we're parsing. @@ -1961,8 +2208,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) struct binder_object_header *hdr; size_t object_size = 0; - if (offset > buffer->data_size - sizeof(*hdr) || - buffer->data_size < sizeof(*hdr) || + if (buffer->data_size < sizeof(*hdr) || + offset > buffer->data_size - sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; @@ -2102,7 +2349,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %p\n", + "%d buffer release %d, size %zd-%zd, failed at %pK\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); @@ -2536,20 +2783,22 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { - struct list_head *target_list = NULL; struct binder_node *node = t->buffer->target_node; + struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); - bool wakeup = true; + bool pending_async = false; BUG_ON(!node); binder_node_lock(node); + node_prio.prio = node->min_priority; + node_prio.sched_policy = node->sched_policy; + if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { - target_list = &node->async_todo; - wakeup = false; + pending_async = true; } else { - node->has_async_transaction = 1; + node->has_async_transaction = true; } } @@ -2561,19 +2810,20 @@ static bool binder_proc_transaction(struct binder_transaction *t, return false; } - if (!thread && !target_list) + if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); - if (thread) - target_list = &thread->todo; - else if (!target_list) - target_list = &proc->todo; - else - BUG_ON(target_list != &node->async_todo); - - binder_enqueue_work_ilocked(&t->work, target_list); + if (thread) { + binder_transaction_priority(thread->task, t, node_prio, + node->inherit_rt); + binder_enqueue_thread_work_ilocked(thread, &t->work); + } else if (!pending_async) { + binder_enqueue_work_ilocked(&t->work, &proc->todo); + } else { + binder_enqueue_work_ilocked(&t->work, &node->async_todo); + } - if (wakeup) + if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_inner_proc_unlock(proc); @@ -2688,7 +2938,6 @@ static void binder_transaction(struct binder_proc *proc, } thread->transaction_stack = in_reply_to->to_parent; binder_inner_proc_unlock(proc); - binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { return_error = BR_DEAD_REPLY; @@ -2746,6 +2995,14 @@ static void binder_transaction(struct binder_proc *proc, else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); + if (target_node && target_proc == proc) { + binder_user_error("%d:%d got transaction to context manager from process owning it\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } } if (!target_node) { /* @@ -2853,7 +3110,15 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; - t->priority = task_nice(current); + if (!(t->flags & TF_ONE_WAY) && + binder_supported_policy(current->policy)) { + /* Inherit supported policies for synchronous transactions */ + t->priority.sched_policy = current->policy; + t->priority.prio = current->normal_prio; + } else { + /* Otherwise, fall back to the default priority */ + t->priority = target_proc->default_priority; + } trace_binder_transaction(reply, t, target_node); @@ -3068,10 +3333,10 @@ static void binder_transaction(struct binder_proc *proc, } } tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - binder_enqueue_work(proc, tcomplete, &thread->todo); t->work.type = BINDER_WORK_TRANSACTION; if (reply) { + binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { binder_inner_proc_unlock(target_proc); @@ -3079,13 +3344,22 @@ static void binder_transaction(struct binder_proc *proc, } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); - binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_enqueue_thread_work_ilocked(target_thread, &t->work); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); + binder_restore_priority(current, in_reply_to->saved_priority); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); + /* + * Defer the TRANSACTION_COMPLETE, so we don't return to + * userspace immediately; this allows the target process to + * immediately start processing this transaction, reducing + * latency. We will then return the TRANSACTION_COMPLETE when + * the target replies (or there is an error). + */ + binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; @@ -3099,6 +3373,7 @@ static void binder_transaction(struct binder_proc *proc, } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); + binder_enqueue_thread_work(thread, tcomplete); if (!binder_proc_transaction(t, target_proc, NULL)) goto err_dead_proc_or_thread; } @@ -3176,16 +3451,13 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_restore_priority(current, in_reply_to->saved_priority); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { thread->return_error.cmd = return_error; - binder_enqueue_work(thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work(thread, &thread->return_error.work); } } @@ -3391,7 +3663,7 @@ static int binder_thread_write(struct binder_proc *proc, w = binder_dequeue_work_head_ilocked( &buf_node->async_todo); if (!w) { - buf_node->has_async_transaction = 0; + buf_node->has_async_transaction = false; } else { binder_enqueue_work_ilocked( w, &proc->todo); @@ -3489,10 +3761,9 @@ static int binder_thread_write(struct binder_proc *proc, WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; - binder_enqueue_work( - thread->proc, - &thread->return_error.work, - &thread->todo); + binder_enqueue_thread_work( + thread, + &thread->return_error.work); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", @@ -3572,9 +3843,9 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, - &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, + &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3614,7 +3885,7 @@ static int binder_thread_write(struct binder_proc *proc, } } binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", + "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { @@ -3629,8 +3900,8 @@ static int binder_thread_write(struct binder_proc *proc, if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work_ilocked( - &death->work, &thread->todo); + binder_enqueue_thread_work_ilocked( + thread, &death->work); else { binder_enqueue_work_ilocked( &death->work, @@ -3761,7 +4032,7 @@ static int binder_thread_read(struct binder_proc *proc, wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } - binder_set_nice(proc->default_priority); + binder_restore_priority(current, proc->default_priority); } if (non_block) { @@ -3804,6 +4075,8 @@ static int binder_thread_read(struct binder_proc *proc, break; } w = binder_dequeue_work_head_ilocked(list); + if (binder_worklist_empty_ilocked(&thread->todo)) + thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { @@ -3818,6 +4091,7 @@ static int binder_thread_read(struct binder_proc *proc, binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; + cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); @@ -3973,16 +4247,14 @@ static int binder_thread_read(struct binder_proc *proc, BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; + struct binder_priority node_prio; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; - t->saved_priority = task_nice(current); - if (t->priority < target_node->min_priority && - !(t->flags & TF_ONE_WAY)) - binder_set_nice(t->priority); - else if (!(t->flags & TF_ONE_WAY) || - t->saved_priority > target_node->min_priority) - binder_set_nice(target_node->min_priority); + node_prio.sched_policy = target_node->sched_policy; + node_prio.prio = target_node->min_priority; + binder_transaction_priority(current, t, node_prio, + target_node->inherit_rt); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; @@ -4015,12 +4287,20 @@ static int binder_thread_read(struct binder_proc *proc, if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "put_user failed", + BR_FAILED_REPLY); + return -EFAULT; } ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) { if (t_from) binder_thread_dec_tmpref(t_from); + + binder_cleanup_transaction(t, "copy_to_user failed", + BR_FAILED_REPLY); + return -EFAULT; } ptr += sizeof(tr); @@ -4090,15 +4370,9 @@ static void binder_release_work(struct binder_proc *proc, struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); - if (t->buffer->target_node && - !(t->flags & TF_ONE_WAY)) { - binder_send_failed_reply(t, BR_DEAD_REPLY); - } else { - binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, - "undelivered transaction %d\n", - t->debug_id); - binder_free_transaction(t); - } + + binder_cleanup_transaction(t, "process died.", + BR_DEAD_REPLY); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( @@ -4158,6 +4432,8 @@ static struct binder_thread *binder_get_thread_ilocked( binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; + get_task_struct(current); + thread->task = current; atomic_set(&thread->tmp_ref, 0); init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); @@ -4208,6 +4484,7 @@ static void binder_free_thread(struct binder_thread *thread) BUG_ON(!list_empty(&thread->todo)); binder_stats_deleted(BINDER_STAT_THREAD); binder_proc_dec_tmpref(thread->proc); + put_task_struct(thread->task); kfree(thread); } @@ -4267,8 +4544,29 @@ static int binder_thread_release(struct binder_proc *proc, if (t) spin_lock(&t->lock); } + + /* + * If this thread used poll, make sure we remove the waitqueue + * from any epoll data structures holding it with POLLFREE. + * waitqueue_active() is safe to use here because we're holding + * the inner lock. + */ + if ((thread->looper & BINDER_LOOPER_STATE_POLL) && + waitqueue_active(&thread->wait)) { + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + } + binder_inner_proc_unlock(thread->proc); + /* + * This is needed to avoid races between wake_up_poll() above and + * and ep_remove_waitqueue() called for other reasons (eg the epoll file + * descriptor being closed); ep_remove_waitqueue() holds an RCU read + * lock, so we can be sure it's done after calling synchronize_rcu(). + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL) + synchronize_rcu(); + if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(proc, &thread->todo); @@ -4284,6 +4582,8 @@ static unsigned int binder_poll(struct file *filp, bool wait_for_proc_work; thread = binder_get_thread(proc); + if (!thread) + return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -4598,18 +4898,22 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "bad vm_flags"; goto err_bad_arg; } - vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; + vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; + mutex_lock(&proc->files_lock); proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); return 0; err_bad_arg: - pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } @@ -4619,7 +4923,7 @@ static int binder_open(struct inode *nodp, struct file *filp) struct binder_proc *proc; struct binder_device *binder_dev; - binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", + binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); @@ -4629,8 +4933,16 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); - proc->default_priority = task_nice(current); + if (binder_supported_policy(current->policy)) { + proc->default_priority.sched_policy = current->policy; + proc->default_priority.prio = current->normal_prio; + } else { + proc->default_priority.sched_policy = SCHED_NORMAL; + proc->default_priority.prio = NICE_TO_PRIO(0); + } + binder_dev = container_of(filp->private_data, struct binder_device, miscdev); proc->context = &binder_dev->context; @@ -4657,7 +4969,7 @@ static int binder_open(struct inode *nodp, struct file *filp) * anyway print all contexts that a given PID has, so this * is not a problem. */ - proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, + proc->debugfs_entry = debugfs_create_file(strbuf, 0444, binder_debugfs_dir_entry_proc, (void *)(unsigned long)proc->pid, &binder_proc_fops); @@ -4881,9 +5193,11 @@ static void binder_deferred_func(struct work_struct *work) files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); files = proc->files; if (files) proc->files = NULL; + mutex_unlock(&proc->files_lock); } if (defer & BINDER_DEFERRED_FLUSH) @@ -4922,13 +5236,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m, spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); + t->code, t->flags, t->priority.sched_policy, + t->priority.prio, t->need_reply); spin_unlock(&t->lock); if (proc != to_proc) { @@ -4946,7 +5261,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %p\n", + seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, buffer->data); } @@ -5046,8 +5361,9 @@ static void print_binder_node_nilocked(struct seq_file *m, hlist_for_each_entry(ref, &node->refs, node_entry) count++; - seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", + seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, + node->sched_policy, node->min_priority, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count, node->tmp_refs); @@ -5484,7 +5800,9 @@ static int __init binder_init(void) struct binder_device *device; struct hlist_node *tmp; - binder_alloc_shrinker_init(); + ret = binder_alloc_shrinker_init(); + if (ret) + return ret; atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); @@ -5496,27 +5814,27 @@ static int __init binder_init(void) if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, &binder_state_fops); debugfs_create_file("stats", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, &binder_stats_fops); debugfs_create_file("transactions", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops); debugfs_create_file("transaction_log", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", - S_IRUGO, + 0444, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, &binder_transaction_log_fops); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index c2819a3d58a6..4f382d51def1 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) + void *start, void *end) { void *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; + struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; @@ -215,11 +215,11 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } } - if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) + if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) mm = alloc->vma_vm_mm; if (mm) { - down_write(&mm->mmap_sem); + down_read(&mm->mmap_sem); vma = alloc->vma; } @@ -281,11 +281,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, goto err_vm_insert_page_failed; } + if (index + 1 > alloc->pages_high) + alloc->pages_high = index + 1; + trace_binder_alloc_page_end(alloc, index); /* vm_insert_page does not seem to increment the refcount */ } if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return 0; @@ -318,17 +321,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } err_no_vma: if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } -struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) +static struct binder_buffer *binder_alloc_new_buf_locked( + struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -437,7 +441,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); if (ret) return ERR_PTR(ret); @@ -478,7 +482,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, err_alloc_buf_struct_failed: binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - end_page_addr, NULL); + end_page_addr); return ERR_PTR(-ENOMEM); } @@ -562,8 +566,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, alloc->pid, buffer->data, prev->data, next ? next->data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE, - NULL); + buffer_start_page(buffer) + PAGE_SIZE); } list_del(&buffer->entry); kfree(buffer); @@ -600,8 +603,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -668,7 +670,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_already_mapped; } - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); + area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; @@ -855,6 +857,7 @@ void binder_alloc_print_pages(struct seq_file *m, } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); + seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } /** @@ -984,7 +987,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return ret; } -struct shrinker binder_shrinker = { +static struct shrinker binder_shrinker = { .count_objects = binder_shrink_count, .scan_objects = binder_shrink_scan, .seeks = DEFAULT_SEEKS, @@ -1004,8 +1007,14 @@ void binder_alloc_init(struct binder_alloc *alloc) INIT_LIST_HEAD(&alloc->buffers); } -void binder_alloc_shrinker_init(void) +int binder_alloc_shrinker_init(void) { - list_lru_init(&binder_alloc_lru); - register_shrinker(&binder_shrinker); + int ret = list_lru_init(&binder_alloc_lru); + + if (ret == 0) { + ret = register_shrinker(&binder_shrinker); + if (ret) + list_lru_destroy(&binder_alloc_lru); + } + return ret; } diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 2dd33b6df104..9ef64e563856 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -92,6 +92,7 @@ struct binder_lru_page { * @pages: array of binder_lru_page * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) + * @pages_high: high watermark of offset in @pages * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() @@ -112,6 +113,7 @@ struct binder_alloc { size_t buffer_size; uint32_t buffer_free; int pid; + size_t pages_high; }; #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST @@ -128,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t extra_buffers_size, int is_async); extern void binder_alloc_init(struct binder_alloc *alloc); -void binder_alloc_shrinker_init(void); +extern int binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 76e3b9c8a8a2..b11dffc521e8 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -85,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); +TRACE_EVENT(binder_set_priority, + TP_PROTO(int proc, int thread, unsigned int old_prio, + unsigned int desired_prio, unsigned int new_prio), + TP_ARGS(proc, thread, old_prio, new_prio, desired_prio), + + TP_STRUCT__entry( + __field(int, proc) + __field(int, thread) + __field(unsigned int, old_prio) + __field(unsigned int, new_prio) + __field(unsigned int, desired_prio) + ), + TP_fast_assign( + __entry->proc = proc; + __entry->thread = thread; + __entry->old_prio = old_prio; + __entry->new_prio = new_prio; + __entry->desired_prio = desired_prio; + ), + TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d", + __entry->proc, __entry->thread, __entry->old_prio, + __entry->new_prio, __entry->desired_prio) +); + TRACE_EVENT(binder_wait_for_work, TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), TP_ARGS(proc_work, transaction_stack, thread_todo), diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 9f78bb03bb76..f003e301723a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -267,9 +267,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH M AHCI */ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ - { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ + { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH M RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ @@ -292,9 +292,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ - { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT M AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ - { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT M RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ @@ -303,20 +303,20 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */ - { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */ + { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point M AHCI */ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */ - { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */ + { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point M RAID */ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */ - { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */ + { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point M AHCI */ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ - { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point M RAID */ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ @@ -357,21 +357,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ - { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series M AHCI */ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ - { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series M RAID */ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */ - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H M AHCI */ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */ - { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H M RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/ @@ -385,6 +385,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/ + { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */ + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */ + { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -536,7 +541,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */ .driver_data = board_ahci_yes_fbs }, /* Promise */ @@ -679,7 +686,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); @@ -705,7 +712,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, bool online; int rc; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -769,7 +776,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); for (i = 0; i < 2; i++) { u16 val; @@ -1260,6 +1267,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) return strcmp(buf, dmi->driver_data) < 0; } +static bool ahci_broken_lpm(struct pci_dev *pdev) +{ + static const struct dmi_system_id sysids[] = { + /* Various Lenovo 50 series have LPM issues with older BIOSen */ + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"), + }, + .driver_data = "20180406", /* 1.31 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"), + }, + .driver_data = "20180420", /* 1.28 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"), + }, + .driver_data = "20180315", /* 1.33 */ + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"), + }, + /* + * Note date based on release notes, 2.35 has been + * reported to be good, but I've been unable to get + * a hold of the reporter to get the DMI BIOS date. + * TODO: fix this. + */ + .driver_data = "20180310", /* 2.35 */ + }, + { } /* terminate list */ + }; + const struct dmi_system_id *dmi = dmi_first_match(sysids); + int year, month, date; + char buf[9]; + + if (!dmi) + return false; + + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); + + return strcmp(buf, dmi->driver_data) < 0; +} + static bool ahci_broken_online(struct pci_dev *pdev) { #define ENCODE_BUSDEVFN(bus, slot, func) \ @@ -1670,6 +1730,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "quirky BIOS, skipping spindown on poweroff\n"); } + if (ahci_broken_lpm(pdev)) { + pi.flags |= ATA_FLAG_NO_LPM; + dev_warn(&pdev->dev, + "BIOS update required for Link Power Management support\n"); + } + if (ahci_broken_suspend(pdev)) { hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; dev_warn(&pdev->dev, diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 8b61123d2c3c..781b898e5785 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -361,6 +361,13 @@ struct ahci_host_priv { * be overridden anytime before the host is activated. */ void (*start_engine)(struct ata_port *ap); + /* + * Optional ahci_stop_engine override, if not set this gets set to the + * default ahci_stop_engine during ahci_save_initial_config, this can + * be overridden anytime before the host is activated. + */ + int (*stop_engine)(struct ata_port *ap); + irqreturn_t (*irq_handler)(int irq, void *dev_instance); /* only required for per-port MSI(-X) support */ diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index de7128d81e9c..0045dacd814b 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); } +/** + * ahci_mvebu_stop_engine + * + * @ap: Target ata port + * + * Errata Ref#226 - SATA Disk HOT swap issue when connected through + * Port Multiplier in FIS-based Switching mode. + * + * To avoid the issue, according to design, the bits[11:8, 0] of + * register PxFBS are cleared when Port Command and Status (0x18) bit[0] + * changes its value from 1 to 0, i.e. falling edge of Port + * Command and Status bit[0] sends PULSE that resets PxFBS + * bits[11:8; 0]. + * + * This function is used to override function of "ahci_stop_engine" + * from libahci.c by adding the mvebu work around(WA) to save PxFBS + * value before the PxCMD ST write of 0, then restore PxFBS value. + * + * Return: 0 on success; Error code otherwise. + */ +int ahci_mvebu_stop_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp, port_fbs; + + tmp = readl(port_mmio + PORT_CMD); + + /* check if the HBA is idle */ + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) + return 0; + + /* save the port PxFBS register for later restore */ + port_fbs = readl(port_mmio + PORT_FBS); + + /* setting HBA to idle */ + tmp &= ~PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + + /* + * bit #15 PxCMD signal doesn't clear PxFBS, + * restore the PxFBS register right after clearing the PxCMD ST, + * no need to wait for the PxCMD bit #15. + */ + writel(port_fbs, port_mmio + PORT_FBS); + + /* wait for engine to stop. This could be as long as 500 msec */ + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); + if (tmp & PORT_CMD_LIST_ON) + return -EIO; + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) { @@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev) if (rc) return rc; + hpriv->stop_engine = ahci_mvebu_stop_engine; + if (of_device_is_compatible(pdev->dev.of_node, "marvell,armada-380-ahci")) { dram = mv_mbus_dram_info(); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index b6b0bf76dfc7..ab5ac103bfb8 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -94,7 +94,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* * There is a errata on ls1021a Rev1.0 and Rev2.0 which is: diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index c2b5941d9184..ad58da7c9aff 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) PORT_CMD_ISSUE, 0x0, 1, 100)) return -EBUSY; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); ahci_start_fis_rx(ap); /* @@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = xgene_ahci_do_hardreset(link, deadline, &online); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 3e286d86ab42..5ae268b8514e 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) if (!hpriv->start_engine) hpriv->start_engine = ahci_start_engine; + if (!hpriv->stop_engine) + hpriv->stop_engine = ahci_stop_engine; + if (!hpriv->irq_handler) hpriv->irq_handler = ahci_single_level_irq_intr; } @@ -887,9 +890,10 @@ static void ahci_start_port(struct ata_port *ap) static int ahci_deinit_port(struct ata_port *ap, const char **emsg) { int rc; + struct ahci_host_priv *hpriv = ap->host->private_data; /* disable DMA */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) { *emsg = "failed to stop engine"; return rc; @@ -1299,7 +1303,7 @@ int ahci_kick_engine(struct ata_port *ap) int busy, rc; /* stop engine */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) goto out_restart; @@ -1538,7 +1542,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -2064,14 +2068,14 @@ void ahci_error_handler(struct ata_port *ap) if (!(ap->pflags & ATA_PFLAG_FROZEN)) { /* restart engine */ - ahci_stop_engine(ap); + hpriv->stop_engine(ap); hpriv->start_engine(ap); } sata_pmp_error_handler(ap); if (!ata_dev_enabled(ap->link.device)) - ahci_stop_engine(ap); + hpriv->stop_engine(ap); } EXPORT_SYMBOL_GPL(ahci_error_handler); @@ -2118,7 +2122,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) return; /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2178,7 +2182,7 @@ static void ahci_enable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2211,7 +2215,7 @@ static void ahci_disable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ee4c1ec9dca0..6938bd86ff1c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2501,6 +2501,9 @@ int ata_dev_configure(struct ata_device *dev) (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) dev->horkage |= ATA_HORKAGE_NOLPM; + if (ap->flags & ATA_FLAG_NO_LPM) + dev->horkage |= ATA_HORKAGE_NOLPM; + if (dev->horkage & ATA_HORKAGE_NOLPM) { ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; @@ -4439,6 +4442,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { * https://bugzilla.kernel.org/show_bug.cgi?id=121671 */ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, + { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 }, /* Devices we expect to fail diagnostics */ @@ -4482,6 +4486,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on + SD7SN6S256G and SD8SN8U256G */ + { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, @@ -4519,7 +4527,28 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* Crucial BX100 SSD 500GB has broken LPM support */ + { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, + + /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ + { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* 512GB MX100 with newer firmware has only LPM issues */ + { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + + /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ + { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | @@ -4530,7 +4559,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -5390,8 +5421,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) * We guarantee to LLDs that they will have at least one * non-zero sg if the command is a data command. */ - if (WARN_ON_ONCE(ata_is_data(prot) && - (!qc->sg || !qc->n_elem || !qc->nbytes))) + if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) goto sys_err; if (ata_is_dma(prot) || (ata_is_pio(prot) && diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index e4effef0c83f..2651c81d1edf 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ -static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, - va_list args) +static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, + const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, @@ -2217,12 +2217,16 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; - /* SENSE_VALID trumps dev/unknown error and revalidation */ + /* + * SENSE_VALID trumps dev/unknown error and revalidation. Upper + * layers will determine whether the command is worth retrying + * based on the sense data and device class/type. Otherwise, + * determine directly if the command is worth retrying using its + * error mask and flags. + */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - - /* determine whether the command is worth retrying */ - if (ata_eh_worth_retry(qc)) + else if (ata_eh_worth_retry(qc)) qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ @@ -2264,8 +2268,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) eflags |= ATA_EFLAG_DUBIOUS_XFER; ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); } - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); DPRINTK("EXIT\n"); } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 44ba292f2cd7..bf5777bc04d3 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3315,6 +3315,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) goto invalid_fld; } + /* We may not issue NCQ commands to devices not supporting NCQ */ + if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { + fp = 1; + goto invalid_fld; + } + /* sanity check for pio multi commands */ if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { fp = 1; @@ -3795,10 +3801,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) */ goto invalid_param_len; } - if (block > dev->n_sectors) - goto out_of_range; all = cdb[14] & 0x1; + if (all) { + /* + * Ignore the block address (zone ID) as defined by ZBC. + */ + block = 0; + } else if (block >= dev->n_sectors) { + /* + * Block must be a valid zone ID (a zone start LBA). + */ + fp = 2; + goto invalid_fld; + } if (ata_ncq_enabled(qc->dev) && ata_fpdma_zac_mgmt_out_supported(qc->dev)) { @@ -3827,10 +3843,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) invalid_fld: ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); return 1; - out_of_range: - /* "Logical Block Address out of range" */ - ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00); - return 1; invalid_param_len: /* "Parameter list length error" */ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); @@ -4281,7 +4293,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, #ifdef ATA_DEBUG struct scsi_device *scsidev = cmd->device; - DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", + DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n", ap->print_id, scsidev->channel, scsidev->id, scsidev->lun, cmd->cmnd); @@ -4308,7 +4320,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { /* relay SCSI command to ATAPI device */ int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) goto bad_cdb_len; xlat_func = atapi_xlat; diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index de4ddd0e8550..b3ed8f9953a8 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -35,7 +35,7 @@ struct zpodd { static int eject_tray(struct ata_device *dev) { struct ata_taskfile tf; - static const char cdb[] = { GPCMD_START_STOP_UNIT, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT, 0, 0, 0, 0x02, /* LoEj */ 0, 0, 0, 0, 0, 0, 0, diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index aafb8cc03523..e67815b896fc 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, int rc; int retry = 100; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 7e76b35f422c..e121b8485731 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2803,7 +2803,7 @@ static int hrz_probe(struct pci_dev *pci_dev, return err; out_free_irq: - free_irq(dev->irq, dev); + free_irq(irq, dev); out_free: kfree(dev); out_release: diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 1ef67db03c8e..2c288d1f42bb 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "uPD98401.h" #include "uPD98402.h" @@ -1150,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte, } -static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, - int offset, int swap) +static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, + int swap) { unsigned char buf[ZEPROM_SIZE]; struct zatm_dev *zatm_dev; @@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; + pool = array_index_nospec(pool, + ZATM_LAST_POOL + 1); spin_lock_irqsave(&zatm_dev->lock, flags); info = zatm_dev->pool_info[pool]; if (cmd == ZATM_GETPOOLZ) { @@ -1480,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return -EFAULT; if (pool < 0 || pool > ZATM_LAST_POOL) return -EINVAL; + pool = array_index_nospec(pool, + ZATM_LAST_POOL + 1); if (copy_from_user(&info, &((struct zatm_pool_req __user *) arg)->info, sizeof(info))) return -EFAULT; diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index d7d21118d3e0..f9413755177b 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -14,9 +14,6 @@ menuconfig AUXDISPLAY If you say N, all options in this submenu will be skipped and disabled. -config CHARLCD - tristate "Character LCD core support" if COMPILE_TEST - if AUXDISPLAY config HD44780 @@ -136,6 +133,7 @@ config CFAG12864B_RATE config IMG_ASCII_LCD tristate "Imagination Technologies ASCII LCD Display" + depends on HAS_IOMEM default y if MIPS_MALTA || MIPS_SEAD3 select SYSCON help @@ -156,8 +154,6 @@ config HT16K33 Say yes here to add support for Holtek HT16K33, RAM mapping 16*8 LED controller driver with keyscan. -endif # AUXDISPLAY - config ARM_CHARLCD bool "ARM Ltd. Character LCD Driver" depends on PLAT_VERSATILE @@ -168,6 +164,8 @@ config ARM_CHARLCD line and the Linux version on the second line, but that's still useful. +endif # AUXDISPLAY + config PANEL tristate "Parallel port LCD/Keypad Panel support" depends on PARPORT @@ -447,3 +445,6 @@ config PANEL_BOOT_MESSAGE printf()-formatted message is valid with newline and escape codes. endif # PANEL + +config CHARLCD + tristate "Character LCD core support" if COMPILE_TEST diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index a9020f82eea7..58403052514f 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -443,3 +443,7 @@ static struct platform_driver img_ascii_lcd_driver = { .remove = img_ascii_lcd_remove, }; module_platform_driver(img_ascii_lcd_driver); + +MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display"); +MODULE_AUTHOR("Paul Burton "); +MODULE_LICENSE("GPL"); diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 2f6614c9a229..49fd50fccd48 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL depends on FW_LOADER default y help - The kernel source tree includes a number of firmware 'blobs' - that are used by various drivers. The recommended way to - use these is to run "make firmware_install", which, after - converting ihex files to binary, copies all of the needed - binary files in firmware/ to /lib/firmware/ on your system so - that they can be loaded by userspace helpers on request. + Various drivers in the kernel source tree may require firmware, + which is generally available in your distribution's linux-firmware + package. + + The linux-firmware package should install firmware into + /lib/firmware/ on your system, so they can be loaded by userspace + helpers on request. Enabling this option will build each required firmware blob - into the kernel directly, where request_firmware() will find - them without having to call out to userspace. This may be - useful if your root file system requires a device that uses - such firmware and do not wish to use an initrd. + specified by EXTRA_FIRMWARE into the kernel directly, where + request_firmware() will find them without having to call out to + userspace. This may be useful if your root file system requires a + device that uses such firmware and you do not wish to use an + initrd. This single option controls the inclusion of firmware for - every driver that uses request_firmware() and ships its - firmware in the kernel source tree, which avoids a + every driver that uses request_firmware(), which avoids a proliferation of 'Include firmware for xxx device' options. Say 'N' and let firmware be loaded from userspace. @@ -235,6 +236,9 @@ config GENERIC_CPU_DEVICES config GENERIC_CPU_AUTOPROBE bool +config GENERIC_CPU_VULNERABILITIES + bool + config SOC_BUS bool select GLOB @@ -245,6 +249,7 @@ config DMA_SHARED_BUFFER bool default n select ANON_INODES + select IRQ_WORK help This option enables the framework for buffer-sharing between multiple drivers. A buffer is associated with a file using driver diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 6df7d6676a48..d00e7c4e52cf 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -21,15 +21,49 @@ #include #include #include +#include +#include -static DEFINE_MUTEX(cpu_scale_mutex); -static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; +DEFINE_PER_CPU(unsigned long, max_cpu_freq); +DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE; -unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, + unsigned long max_freq) { - return per_cpu(cpu_scale, cpu); + unsigned long scale; + int i; + + scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; + + for_each_cpu(i, cpus) { + per_cpu(freq_scale, i) = scale; + per_cpu(max_cpu_freq, i) = max_freq; + } } +void arch_set_max_freq_scale(struct cpumask *cpus, + unsigned long policy_max_freq) +{ + unsigned long scale, max_freq; + int cpu = cpumask_first(cpus); + + if (cpu > nr_cpu_ids) + return; + + max_freq = per_cpu(max_cpu_freq, cpu); + if (!max_freq) + return; + + scale = (policy_max_freq << SCHED_CAPACITY_SHIFT) / max_freq; + + for_each_cpu(cpu, cpus) + per_cpu(max_freq_scale, cpu) = scale; +} + +static DEFINE_MUTEX(cpu_scale_mutex); +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; + void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) { per_cpu(cpu_scale, cpu) = capacity; @@ -44,6 +78,9 @@ static ssize_t cpu_capacity_show(struct device *dev, return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); } +static void update_topology_flags_workfn(struct work_struct *work); +static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); + static ssize_t cpu_capacity_store(struct device *dev, struct device_attribute *attr, const char *buf, @@ -54,6 +91,7 @@ static ssize_t cpu_capacity_store(struct device *dev, int i; unsigned long new_capacity; ssize_t ret; + cpumask_var_t mask; if (!count) return 0; @@ -65,10 +103,41 @@ static ssize_t cpu_capacity_store(struct device *dev, return -EINVAL; mutex_lock(&cpu_scale_mutex); - for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) + + if (new_capacity < SCHED_CAPACITY_SCALE) { + int highest_score_cpu = 0; + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + mutex_unlock(&cpu_scale_mutex); + return -ENOMEM; + } + + cpumask_andnot(mask, cpu_online_mask, + topology_core_cpumask(this_cpu)); + + for_each_cpu(i, mask) { + if (topology_get_cpu_scale(NULL, i) == + SCHED_CAPACITY_SCALE) { + highest_score_cpu = 1; + break; + } + } + + free_cpumask_var(mask); + + if (!highest_score_cpu) { + mutex_unlock(&cpu_scale_mutex); + return -EINVAL; + } + } + + for_each_cpu(i, topology_core_cpumask(this_cpu)) topology_set_cpu_scale(i, new_capacity); mutex_unlock(&cpu_scale_mutex); + if (topology_detect_flags()) + schedule_work(&update_topology_flags_work); + return count; } @@ -93,6 +162,185 @@ static int register_cpu_capacity_sysctl(void) } subsys_initcall(register_cpu_capacity_sysctl); +enum asym_cpucap_type { no_asym, asym_thread, asym_core, asym_die }; +static enum asym_cpucap_type asym_cpucap = no_asym; +enum share_cap_type { no_share_cap, share_cap_thread, share_cap_core, share_cap_die}; +static enum share_cap_type share_cap = no_share_cap; + +#ifdef CONFIG_CPU_FREQ +int detect_share_cap_flag(void) +{ + int cpu; + enum share_cap_type share_cap_level = no_share_cap; + struct cpufreq_policy *policy; + + for_each_possible_cpu(cpu) { + policy = cpufreq_cpu_get(cpu); + + if (!policy) + return 0; + + if (cpumask_equal(topology_sibling_cpumask(cpu), + policy->related_cpus)) { + share_cap_level = share_cap_thread; + continue; + } + + if (cpumask_equal(topology_core_cpumask(cpu), + policy->related_cpus)) { + share_cap_level = share_cap_core; + continue; + } + + if (cpumask_equal(cpu_cpu_mask(cpu), + policy->related_cpus)) { + share_cap_level = share_cap_die; + continue; + } + } + + if (share_cap != share_cap_level) { + share_cap = share_cap_level; + return 1; + } + + return 0; +} +#else +int detect_share_cap_flag(void) { return 0; } +#endif + +/* + * Walk cpu topology to determine sched_domain flags. + * + * SD_ASYM_CPUCAPACITY: Indicates the lowest level that spans all cpu + * capacities found in the system for all cpus, i.e. the flag is set + * at the same level for all systems. The current algorithm implements + * this by looking for higher capacities, which doesn't work for all + * conceivable topology, but don't complicate things until it is + * necessary. + */ +int topology_detect_flags(void) +{ + unsigned long max_capacity, capacity; + enum asym_cpucap_type asym_level = no_asym; + int cpu, die_cpu, core, thread, flags_changed = 0; + + for_each_possible_cpu(cpu) { + max_capacity = 0; + + if (asym_level >= asym_thread) + goto check_core; + + for_each_cpu(thread, topology_sibling_cpumask(cpu)) { + capacity = topology_get_cpu_scale(NULL, thread); + + if (capacity > max_capacity) { + if (max_capacity != 0) + asym_level = asym_thread; + + max_capacity = capacity; + } + } + +check_core: + if (asym_level >= asym_core) + goto check_die; + + for_each_cpu(core, topology_core_cpumask(cpu)) { + capacity = topology_get_cpu_scale(NULL, core); + + if (capacity > max_capacity) { + if (max_capacity != 0) + asym_level = asym_core; + + max_capacity = capacity; + } + } +check_die: + for_each_possible_cpu(die_cpu) { + capacity = topology_get_cpu_scale(NULL, die_cpu); + + if (capacity > max_capacity) { + if (max_capacity != 0) { + asym_level = asym_die; + goto done; + } + } + } + } + +done: + if (asym_cpucap != asym_level) { + asym_cpucap = asym_level; + flags_changed = 1; + pr_debug("topology flag change detected\n"); + } + + if (detect_share_cap_flag()) + flags_changed = 1; + + return flags_changed; +} + +int topology_smt_flags(void) +{ + int flags = 0; + + if (asym_cpucap == asym_thread) + flags |= SD_ASYM_CPUCAPACITY; + + if (share_cap == share_cap_thread) + flags |= SD_SHARE_CAP_STATES; + + return flags; +} + +int topology_core_flags(void) +{ + int flags = 0; + + if (asym_cpucap == asym_core) + flags |= SD_ASYM_CPUCAPACITY; + + if (share_cap == share_cap_core) + flags |= SD_SHARE_CAP_STATES; + + return flags; +} + +int topology_cpu_flags(void) +{ + int flags = 0; + + if (asym_cpucap == asym_die) + flags |= SD_ASYM_CPUCAPACITY; + + if (share_cap == share_cap_die) + flags |= SD_SHARE_CAP_STATES; + + return flags; +} + +static int update_topology = 0; + +int topology_update_cpu_topology(void) +{ + return update_topology; +} + +/* + * Updating the sched_domains can't be done directly from cpufreq callbacks + * due to locking, so queue the work for later. + */ +static void update_topology_flags_workfn(struct work_struct *work) +{ + update_topology = 1; + rebuild_sched_domains(); + pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); + update_topology = 0; +} + static u32 capacity_scale; static u32 *raw_capacity; @@ -115,13 +363,12 @@ void topology_normalize_cpu_scale(void) pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); mutex_lock(&cpu_scale_mutex); for_each_possible_cpu(cpu) { - pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n", - cpu, raw_capacity[cpu]); capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) / capacity_scale; topology_set_cpu_scale(cpu, capacity); - pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", - cpu, topology_get_cpu_scale(NULL, cpu)); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu raw_capacity=%u\n", + cpu, topology_get_cpu_scale(NULL, cpu), + raw_capacity[cpu]); } mutex_unlock(&cpu_scale_mutex); } @@ -166,11 +413,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) } #ifdef CONFIG_CPU_FREQ -static cpumask_var_t cpus_to_visit __initdata; -static void __init parsing_done_workfn(struct work_struct *work); -static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn); +static cpumask_var_t cpus_to_visit; +static void parsing_done_workfn(struct work_struct *work); +static DECLARE_WORK(parsing_done_work, parsing_done_workfn); -static int __init +static int init_cpu_capacity_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -198,6 +445,9 @@ init_cpu_capacity_callback(struct notifier_block *nb, if (cpumask_empty(cpus_to_visit)) { topology_normalize_cpu_scale(); + init_sched_energy_costs(); + if (topology_detect_flags()) + schedule_work(&update_topology_flags_work); free_raw_capacity(); pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); @@ -206,12 +456,14 @@ init_cpu_capacity_callback(struct notifier_block *nb, return 0; } -static struct notifier_block init_cpu_capacity_notifier __initdata = { +static struct notifier_block init_cpu_capacity_notifier = { .notifier_call = init_cpu_capacity_callback, }; static int __init register_cpufreq_notifier(void) { + int ret; + /* * on ACPI-based systems we need to use the default cpu capacity * until we have the necessary code to parse the cpu capacity, so @@ -227,15 +479,21 @@ static int __init register_cpufreq_notifier(void) cpumask_copy(cpus_to_visit, cpu_possible_mask); - return cpufreq_register_notifier(&init_cpu_capacity_notifier, - CPUFREQ_POLICY_NOTIFIER); + ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); + + if (ret) + free_cpumask_var(cpus_to_visit); + + return ret; } core_initcall(register_cpufreq_notifier); -static void __init parsing_done_workfn(struct work_struct *work) +static void parsing_done_workfn(struct work_struct *work) { cpufreq_unregister_notifier(&init_cpu_capacity_notifier, CPUFREQ_POLICY_NOTIFIER); + free_cpumask_var(cpus_to_visit); } #else diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 22a64fd3309b..641a52bef168 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -186,10 +186,10 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { - if (dev->parent) /* Needed for USB */ + if (dev->parent && dev->bus->need_parent_lock) device_lock(dev->parent); device_release_driver(dev); - if (dev->parent) + if (dev->parent && dev->bus->need_parent_lock) device_unlock(dev->parent); err = count; } @@ -213,12 +213,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { - if (dev->parent) /* Needed for USB */ + if (dev->parent && bus->need_parent_lock) device_lock(dev->parent); device_lock(dev); err = driver_probe_device(drv, dev); device_unlock(dev); - if (dev->parent) + if (dev->parent && bus->need_parent_lock) device_unlock(dev->parent); if (err > 0) { @@ -737,10 +737,10 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, int ret = 0; if (!dev->driver) { - if (dev->parent) /* Needed for USB */ + if (dev->parent && dev->bus->need_parent_lock) device_lock(dev->parent); ret = device_attach(dev); - if (dev->parent) + if (dev->parent && dev->bus->need_parent_lock) device_unlock(dev->parent); } return ret < 0 ? ret : 0; @@ -772,10 +772,10 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices); int device_reprobe(struct device *dev) { if (dev->driver) { - if (dev->parent) /* Needed for USB */ + if (dev->parent && dev->bus->need_parent_lock) device_lock(dev->parent); device_release_driver(dev); - if (dev->parent) + if (dev->parent && dev->bus->need_parent_lock) device_unlock(dev->parent); } return bus_rescan_devices_helper(dev, NULL); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index eb3af2739537..07532d83be0b 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf) this_leaf->ways_of_associativity = (size / nr_sets) / line_size; } +static bool cache_node_is_unified(struct cacheinfo *this_leaf) +{ + return of_property_read_bool(this_leaf->of_node, "cache-unified"); +} + static void cache_of_override_properties(unsigned int cpu) { int index; @@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu) for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = this_cpu_ci->info_list + index; + /* + * init_cache_level must setup the cache level correctly + * overriding the architecturally specified levels, so + * if type is NONE at this stage, it should be unified + */ + if (this_leaf->type == CACHE_TYPE_NOCACHE && + cache_node_is_unified(this_leaf)) + this_leaf->type = CACHE_TYPE_UNIFIED; cache_size(this_leaf); cache_get_line_size(this_leaf); cache_nr_sets(this_leaf); diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..b054cb2fd2b9 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -217,6 +217,13 @@ struct device_link *device_link_add(struct device *consumer, link->rpm_active = true; } pm_runtime_new_link(consumer); + /* + * If the link is being added by the consumer driver at probe + * time, balance the decrementation of the supplier's runtime PM + * usage counter after consumer probe in driver_probe_device(). + */ + if (consumer->links.status == DL_DEV_PROBING) + pm_runtime_get_noresume(supplier); } get_device(supplier); link->supplier = supplier; @@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer, switch (consumer->links.status) { case DL_DEV_PROBING: /* - * Balance the decrementation of the supplier's - * runtime PM usage counter after consumer probe - * in driver_probe_device(). + * Some callers expect the link creation during + * consumer driver probe to resume the supplier + * even without DL_FLAG_RPM_ACTIVE. */ if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_get_sync(supplier); + pm_runtime_resume(supplier); link->status = DL_STATE_CONSUMER_PROBE; break; @@ -313,6 +320,9 @@ static void __device_link_del(struct device_link *link) dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); + if (link->flags & DL_FLAG_PM_RUNTIME) + pm_runtime_drop_link(link->consumer); + list_del(&link->s_node); list_del(&link->c_node); device_link_free(link); @@ -1458,7 +1468,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) dir = kzalloc(sizeof(*dir), GFP_KERNEL); if (!dir) - return NULL; + return ERR_PTR(-ENOMEM); dir->class = class; kobject_init(&dir->kobj, &class_dir_ktype); @@ -1468,7 +1478,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); if (retval < 0) { kobject_put(&dir->kobj); - return NULL; + return ERR_PTR(retval); } return &dir->kobj; } @@ -1775,6 +1785,10 @@ int device_add(struct device *dev) parent = get_device(dev->parent); kobj = get_device_parent(dev, parent); + if (IS_ERR(kobj)) { + error = PTR_ERR(kobj); + goto parent_error; + } if (kobj) dev->kobj.parent = kobj; @@ -1873,6 +1887,7 @@ int device_add(struct device *dev) kobject_del(&dev->kobj); Error: cleanup_glue_dir(dev, glue_dir); +parent_error: put_device(parent); name_error: kfree(dev->p); @@ -2692,6 +2707,11 @@ int device_move(struct device *dev, struct device *new_parent, device_pm_lock(); new_parent = get_device(new_parent); new_parent_kobj = get_device_parent(dev, new_parent); + if (IS_ERR(new_parent_kobj)) { + error = PTR_ERR(new_parent_kobj); + put_device(new_parent); + goto out; + } pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), __func__, new_parent ? dev_name(new_parent) : ""); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 321cd7b4d817..93758b528d8f 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -501,10 +501,74 @@ static void __init cpu_dev_register_generic(void) #endif } +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES + +ssize_t __weak cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + +static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); +static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); +static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); + +static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, + &dev_attr_spectre_v1.attr, + &dev_attr_spectre_v2.attr, + &dev_attr_spec_store_bypass.attr, + &dev_attr_l1tf.attr, + NULL +}; + +static const struct attribute_group cpu_root_vulnerabilities_group = { + .name = "vulnerabilities", + .attrs = cpu_root_vulnerabilities_attrs, +}; + +static void __init cpu_register_vulnerabilities(void) +{ + if (sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpu_root_vulnerabilities_group)) + pr_err("Unable to register CPU vulnerabilities\n"); +} + +#else +static inline void cpu_register_vulnerabilities(void) { } +#endif + void __init cpu_dev_init(void) { if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) panic("Failed to register CPU subsystem"); cpu_dev_register_generic(); + cpu_register_vulnerabilities(); } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index ad44b40fe284..0028c8125342 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -401,14 +401,6 @@ static int really_probe(struct device *dev, struct device_driver *drv) goto probe_failed; } - /* - * Ensure devices are listed in devices_kset in correct order - * It's important to move Dev to the end of devices_kset before - * calling .probe, because it could be recursive and parent Dev - * should always go first - */ - devices_kset_move_last(dev); - if (dev->bus->probe) { ret = dev->bus->probe(dev); if (ret) @@ -784,13 +776,13 @@ static int __driver_attach(struct device *dev, void *data) return ret; } /* ret > 0 means positive match */ - if (dev->parent) /* Needed for USB */ + if (dev->parent && dev->bus->need_parent_lock) device_lock(dev->parent); device_lock(dev); if (!dev->driver) driver_probe_device(drv, dev); device_unlock(dev); - if (dev->parent) + if (dev->parent && dev->bus->need_parent_lock) device_unlock(dev->parent); return 0; @@ -885,7 +877,7 @@ void device_release_driver_internal(struct device *dev, struct device_driver *drv, struct device *parent) { - if (parent) + if (parent && dev->bus->need_parent_lock) device_lock(parent); device_lock(dev); @@ -893,7 +885,7 @@ void device_release_driver_internal(struct device *dev, __device_release_driver(dev, parent); device_unlock(dev); - if (parent) + if (parent && dev->bus->need_parent_lock) device_unlock(parent); } diff --git a/drivers/base/isa.c b/drivers/base/isa.c index cd6ccdcf9df0..372d10af2600 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c @@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->probe) + if (isa_driver && isa_driver->probe) return isa_driver->probe(dev, to_isa_dev(dev)->id); return 0; @@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->remove) + if (isa_driver && isa_driver->remove) return isa_driver->remove(dev, to_isa_dev(dev)->id); return 0; @@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->shutdown) + if (isa_driver && isa_driver->shutdown) isa_driver->shutdown(dev, to_isa_dev(dev)->id); } @@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->suspend) + if (isa_driver && isa_driver->suspend) return isa_driver->suspend(dev, to_isa_dev(dev)->id, state); return 0; @@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev) { struct isa_driver *isa_driver = dev->platform_data; - if (isa_driver->resume) + if (isa_driver && isa_driver->resume) return isa_driver->resume(dev, to_isa_dev(dev)->id); return 0; diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 29cd71d8b360..e1bb691cf8f1 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -2,7 +2,6 @@ obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o -obj-$(CONFIG_PM_OPP) += opp/ obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e8ca5e2cf1e5..c276ba1c0a19 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -921,7 +921,7 @@ static int pm_genpd_prepare(struct device *dev) genpd_unlock(genpd); ret = pm_generic_prepare(dev); - if (ret) { + if (ret < 0) { genpd_lock(genpd); genpd->prepared_count--; @@ -929,7 +929,8 @@ static int pm_genpd_prepare(struct device *dev) genpd_unlock(genpd); } - return ret; + /* Never return 1, as genpd don't cope with the direct_complete path. */ + return ret >= 0 ? 0 : ret; } /** @@ -2161,6 +2162,9 @@ int genpd_dev_pm_attach(struct device *dev) genpd_lock(pd); ret = genpd_power_on(pd, 0); genpd_unlock(pd); + + if (ret) + genpd_remove_device(pd, dev); out: return ret ? -EPROBE_DEFER : 0; } @@ -2205,6 +2209,38 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, return 0; } +static int genpd_iterate_idle_states(struct device_node *dn, + struct genpd_power_state *states) +{ + int ret; + struct of_phandle_iterator it; + struct device_node *np; + int i = 0; + + ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); + if (ret <= 0) + return ret; + + /* Loop over the phandles until all the requested entry is found */ + of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { + np = it.node; + if (!of_match_node(idle_state_match, np)) + continue; + if (states) { + ret = genpd_parse_state(&states[i], np); + if (ret) { + pr_err("Parsing idle state node %pOF failed with err %d\n", + np, ret); + of_node_put(np); + return ret; + } + } + i++; + } + + return i; +} + /** * of_genpd_parse_idle_states: Return array of idle states for the genpd. * @@ -2214,49 +2250,31 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, * * Returns the device states parsed from the OF node. The memory for the states * is allocated by this function and is the responsibility of the caller to - * free the memory after use. + * free the memory after use. If no domain idle states is found it returns + * -EINVAL and in case of errors, a negative error code. */ int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n) { struct genpd_power_state *st; - struct device_node *np; - int i = 0; - int err, ret; - int count; - struct of_phandle_iterator it; - const struct of_device_id *match_id; + int ret; - count = of_count_phandle_with_args(dn, "domain-idle-states", NULL); - if (count <= 0) - return -EINVAL; + ret = genpd_iterate_idle_states(dn, NULL); + if (ret <= 0) + return ret < 0 ? ret : -EINVAL; - st = kcalloc(count, sizeof(*st), GFP_KERNEL); + st = kcalloc(ret, sizeof(*st), GFP_KERNEL); if (!st) return -ENOMEM; - /* Loop over the phandles until all the requested entry is found */ - of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) { - np = it.node; - match_id = of_match_node(idle_state_match, np); - if (!match_id) - continue; - ret = genpd_parse_state(&st[i++], np); - if (ret) { - pr_err - ("Parsing idle state node %pOF failed with err %d\n", - np, ret); - of_node_put(np); - kfree(st); - return ret; - } + ret = genpd_iterate_idle_states(dn, st); + if (ret <= 0) { + kfree(st); + return ret < 0 ? ret : -EINVAL; } - *n = i; - if (!i) - kfree(st); - else - *states = st; + *states = st; + *n = ret; return 0; } diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 07c3c4a9522d..b2ed606265a8 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -9,7 +9,6 @@ #include #include #include -#include #ifdef CONFIG_PM /** @@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev) if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); } - -/** - * pm_complete_with_resume_check - Complete a device power transition. - * @dev: Device to handle. - * - * Complete a device power transition during a system-wide power transition and - * optionally schedule a runtime resume of the device if the system resume in - * progress has been initated by the platform firmware and the device had its - * power.direct_complete flag set. - */ -void pm_complete_with_resume_check(struct device *dev) -{ - pm_generic_complete(dev); - /* - * If the device had been runtime-suspended before the system went into - * the sleep state it is going out of and it has never been resumed till - * now, resume it in case the firmware powered it up. - */ - if (dev->power.direct_complete && pm_resume_via_firmware()) - pm_request_resume(dev); -} -EXPORT_SYMBOL_GPL(pm_complete_with_resume_check); #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 770b1539a083..d514291964a2 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "../base.h" #include "power.h" @@ -848,16 +849,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) goto Driver; } - if (dev->class) { - if (dev->class->pm) { - info = "class "; - callback = pm_op(dev->class->pm, state); - goto Driver; - } else if (dev->class->resume) { - info = "legacy class "; - callback = dev->class->resume; - goto End; - } + if (dev->class && dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Driver; } if (dev->bus) { @@ -1455,6 +1450,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; const char *info = NULL; int error = 0; + char suspend_abort[MAX_SUSPEND_ABORT_LEN]; DECLARE_DPM_WATCHDOG_ON_STACK(wd); TRACE_DEVICE(dev); @@ -1475,6 +1471,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { + pm_get_active_wakeup_sources(suspend_abort, + MAX_SUSPEND_ABORT_LEN); + log_suspend_abort_reason(suspend_abort); async_error = -EBUSY; goto Complete; } @@ -1508,17 +1507,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Run; } - if (dev->class) { - if (dev->class->pm) { - info = "class "; - callback = pm_op(dev->class->pm, state); - goto Run; - } else if (dev->class->suspend) { - pm_dev_dbg(dev, state, "legacy class "); - error = legacy_suspend(dev, state, dev->class->suspend, - "legacy class "); - goto End; - } + if (dev->class && dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Run; } if (dev->bus) { @@ -1862,8 +1854,7 @@ void device_pm_check_callbacks(struct device *dev) dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && - (!dev->class || (pm_ops_is_empty(dev->class->pm) && - !dev->class->suspend && !dev->class->resume)) && + (!dev->class || pm_ops_is_empty(dev->class->pm)) && (!dev->type || pm_ops_is_empty(dev->type->pm)) && (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..b2b1eece0db1 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -276,7 +276,8 @@ static int rpm_get_suppliers(struct device *dev) continue; retval = pm_runtime_get_sync(link->supplier); - if (retval < 0) { + /* Ignore suppliers with disabled runtime PM. */ + if (retval < 0 && retval != -EACCES) { pm_runtime_put_noidle(link->supplier); return retval; } diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index ae0429827f31..67c50738834b 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -323,7 +323,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) return; if (device_may_wakeup(wirq->dev)) { - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && + !pm_runtime_status_suspended(wirq->dev)) enable_irq(wirq->irq); enable_irq_wake(wirq->irq); @@ -345,7 +346,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) if (device_may_wakeup(wirq->dev)) { disable_irq_wake(wirq->irq); - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && + !pm_runtime_status_suspended(wirq->dev)) disable_irq_nosync(wirq->irq); } } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index cdd6f256da59..b932d7f75504 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "power.h" @@ -805,6 +806,37 @@ void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) } EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); +void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max) +{ + struct wakeup_source *ws, *last_active_ws = NULL; + int len = 0; + bool active = false; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->active && len < max) { + if (!active) + len += scnprintf(pending_wakeup_source, max, + "Pending Wakeup Sources: "); + len += scnprintf(pending_wakeup_source + len, max - len, + "%s ", ws->name); + active = true; + } else if (!active && + (!last_active_ws || + ktime_to_ns(ws->last_time) > + ktime_to_ns(last_active_ws->last_time))) { + last_active_ws = ws; + } + } + if (!active && last_active_ws) { + scnprintf(pending_wakeup_source, max, + "Last active Wakeup Source: %s", + last_active_ws->name); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources); + void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; @@ -1018,7 +1050,7 @@ static int print_wakeup_source_stats(struct seq_file *m, active_time = 0; } - seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", + seq_printf(m, "%-32s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", ws->name, active_count, ws->event_count, ws->wakeup_count, ws->expire_count, ktime_to_ms(active_time), ktime_to_ms(total_time), @@ -1039,7 +1071,7 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) struct wakeup_source *ws; int srcuidx; - seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" + seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 0368fd7b3a41..d8959dc06d83 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -37,3 +37,11 @@ config REGMAP_MMIO config REGMAP_IRQ bool + +config REGMAP_SDW + default n + tristate "Regmap support for soundwire" + depends on SDW + help + Enable this if regmap support is required for + soundwire slave devices. diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 0d298c446108..9f37b3906c0b 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o obj-$(CONFIG_REGMAP_W1) += regmap-w1.o +obj-$(CONFIG_REGMAP_SDW) += regmap-sdw.o diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c new file mode 100644 index 000000000000..3eacebf26d5f --- /dev/null +++ b/drivers/base/regmap/regmap-sdw.c @@ -0,0 +1,252 @@ +/* + * regmap-sdw.c - Register map access API - SoundWire support + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include + +#include "internal.h" + +#define SDW_SCP_ADDRPAGE1_MASK 0xFF +#define SDW_SCP_ADDRPAGE1_SHIFT 15 + +#define SDW_SCP_ADDRPAGE2_MASK 0xFF +#define SDW_SCP_ADDRPAGE2_SHIFT 22 + +#define SDW_REGADDR_SHIFT 0x0 +#define SDW_REGADDR_MASK 0xFFFF + +#define SDW_MAX_REG_ADDR 65536 + +static int regmap_sdw_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slave *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_READ; + xfer.len = 0; + t_val = val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static int regmap_sdw_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slave *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + if (!sdw) + return 0; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_WRITE; + xfer.len = 0; + t_val = (u8 *)val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static inline void regmap_sdw_count_check(size_t count, u32 offset) +{ + BUG_ON(count <= offset); +} + +static int regmap_sdw_write(void *context, const void *data, size_t count) +{ + /* 4-byte register address for the soundwire */ + unsigned int offset = 4; + + regmap_sdw_count_check(count, offset); + return regmap_sdw_gather_write(context, data, 4, + data + offset, count - offset); +} + +static struct regmap_bus regmap_sdw = { + .write = regmap_sdw_write, + .gather_write = regmap_sdw_gather_write, + .read = regmap_sdw_read, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static int regmap_sdw_config_check(const struct regmap_config *config) +{ + /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */ + if (config->val_bits != 8) + return -ENOTSUPP; + /* Registers are 32 bit in size, based on SCP_ADDR1 and SCP_ADDR2 + * implementation address range may vary in slave. + */ + if (config->reg_bits != 32) + return -ENOTSUPP; + /* SoundWire register address are contiguous. */ + if (config->reg_stride != 0) + return -ENOTSUPP; + if (config->pad_bits != 0) + return -ENOTSUPP; + + + return 0; +} + +/** + * regmap_init_sdw(): Initialise register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_sdw); + + +/** + * devm_regmap_init_sdw(): Initialise managed register map + * + * @sdw Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return devm_regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_sdw); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index b9a779a4a739..8fd08023c0f5 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -98,7 +98,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg) int ret; unsigned int val; - if (map->cache == REGCACHE_NONE) + if (map->cache_type == REGCACHE_NONE) return false; if (!map->cache_ops) @@ -1739,7 +1739,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; - if (map->max_raw_write && map->max_raw_write > val_len) + if (map->max_raw_write && map->max_raw_write < val_len) return -E2BIG; map->lock(map->lock_arg); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index 8d98a329f6ea..96c34a95cc62 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -11,6 +11,7 @@ #include #include #include +#include static LIST_HEAD(syscore_ops_list); static DEFINE_MUTEX(syscore_ops_lock); @@ -75,6 +76,8 @@ int syscore_suspend(void) return 0; err_out: + log_suspend_abort_reason("System core suspend callback %pF failed", + ops->suspend); pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); list_for_each_entry_continue(ops, &syscore_ops_list, node) diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 03471b3fce86..c2042f822b03 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio) what = COMPLETED_OK; } - bio_put(req->private_bio); req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); + bio_put(bio); /* not req_mod(), we need irqsave here! */ spin_lock_irqsave(&device->resource->req_lock, flags); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 85de67334695..6d61633a7f89 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) struct iov_iter i; ssize_t bw; - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); @@ -617,6 +617,36 @@ static void loop_reread_partitions(struct loop_device *lo, __func__, lo->lo_number, lo->lo_file_name, rc); } +static inline int is_loop_device(struct file *file) +{ + struct inode *i = file->f_mapping->host; + + return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; +} + +static int loop_validate_file(struct file *file, struct block_device *bdev) +{ + struct inode *inode = file->f_mapping->host; + struct file *f = file; + + /* Avoid recursion */ + while (is_loop_device(f)) { + struct loop_device *l; + + if (f->f_mapping->host->i_bdev == bdev) + return -EBADF; + + l = f->f_mapping->host->i_bdev->bd_disk->private_data; + if (l->lo_state == Lo_unbound) { + return -EINVAL; + } + f = l->lo_backing_file; + } + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) + return -EINVAL; + return 0; +} + /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up @@ -646,14 +676,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (!file) goto out; + error = loop_validate_file(file, bdev); + if (error) + goto out_putf; + inode = file->f_mapping->host; old_file = lo->lo_backing_file; error = -EINVAL; - if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) - goto out_putf; - /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) goto out_putf; @@ -679,13 +710,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, return error; } -static inline int is_loop_device(struct file *file) -{ - struct inode *i = file->f_mapping->host; - - return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; -} - /* loop sysfs attributes */ static ssize_t loop_attr_show(struct device *dev, char *page, @@ -782,16 +806,17 @@ static struct attribute_group loop_attribute_group = { .attrs= loop_attrs, }; -static int loop_sysfs_init(struct loop_device *lo) +static void loop_sysfs_init(struct loop_device *lo) { - return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, - &loop_attribute_group); + lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); } static void loop_sysfs_exit(struct loop_device *lo) { - sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, - &loop_attribute_group); + if (lo->sysfs_inited) + sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); } static void loop_config_discard(struct loop_device *lo) @@ -850,7 +875,7 @@ static int loop_prepare_queue(struct loop_device *lo) static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) { - struct file *file, *f; + struct file *file; struct inode *inode; struct address_space *mapping; int lo_flags = 0; @@ -869,29 +894,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, if (lo->lo_state != Lo_unbound) goto out_putf; - /* Avoid recursion */ - f = file; - while (is_loop_device(f)) { - struct loop_device *l; - - if (f->f_mapping->host->i_bdev == bdev) - goto out_putf; - - l = f->f_mapping->host->i_bdev->bd_disk->private_data; - if (l->lo_state == Lo_unbound) { - error = -EINVAL; - goto out_putf; - } - f = l->lo_backing_file; - } + error = loop_validate_file(file, bdev); + if (error) + goto out_putf; mapping = file->f_mapping; inode = mapping->host; - error = -EINVAL; - if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) - goto out_putf; - if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || !file->f_op->write_iter) lo_flags |= LO_FLAGS_READ_ONLY; @@ -1098,11 +1107,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; - if (type >= MAX_LO_CRYPT) - return -EINVAL; + if (type >= MAX_LO_CRYPT) { + err = -EINVAL; + goto exit; + } xfer = xfer_funcs[type]; - if (xfer == NULL) - return -EINVAL; + if (xfer == NULL) { + err = -EINVAL; + goto exit; + } } else xfer = NULL; @@ -1162,21 +1175,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { - struct file *file = lo->lo_backing_file; + struct file *file; struct kstat stat; - int error; + int ret; - if (lo->lo_state != Lo_bound) + if (lo->lo_state != Lo_bound) { + mutex_unlock(&lo->lo_ctl_mutex); return -ENXIO; - error = vfs_getattr(&file->f_path, &stat, - STATX_INO, AT_STATX_SYNC_AS_STAT); - if (error) - return error; + } + memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; - info->lo_device = huge_encode_dev(stat.dev); - info->lo_inode = stat.ino; - info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; @@ -1189,7 +1198,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); } - return 0; + + /* Drop lo_ctl_mutex while we call into the filesystem. */ + file = get_file(lo->lo_backing_file); + mutex_unlock(&lo->lo_ctl_mutex); + ret = vfs_getattr(&file->f_path, &stat, STATX_INO, + AT_STATX_SYNC_AS_STAT); + if (!ret) { + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(stat.rdev); + } + fput(file); + return ret; } static void @@ -1270,12 +1291,13 @@ static int loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; - int err = 0; + int err; - if (!arg) - err = -EINVAL; - if (!err) - err = loop_get_status(lo, &info64); + if (!arg) { + mutex_unlock(&lo->lo_ctl_mutex); + return -EINVAL; + } + err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); if (!err && copy_to_user(arg, &info, sizeof(info))) @@ -1287,12 +1309,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { static int loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; - int err = 0; + int err; - if (!arg) - err = -EINVAL; - if (!err) - err = loop_get_status(lo, &info64); + if (!arg) { + mutex_unlock(&lo->lo_ctl_mutex); + return -EINVAL; + } + err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; @@ -1369,7 +1392,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1378,7 +1402,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_CAPACITY: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1511,12 +1536,13 @@ loop_get_status_compat(struct loop_device *lo, struct compat_loop_info __user *arg) { struct loop_info64 info64; - int err = 0; + int err; - if (!arg) - err = -EINVAL; - if (!err) - err = loop_get_status(lo, &info64); + if (!arg) { + mutex_unlock(&lo->lo_ctl_mutex); + return -EINVAL; + } + err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); return err; @@ -1539,7 +1565,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, mutex_lock(&lo->lo_ctl_mutex); err = loop_get_status_compat( lo, (struct compat_loop_info __user *) arg); - mutex_unlock(&lo->lo_ctl_mutex); + /* loop_get_status() unlocks lo_ctl_mutex */ break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: @@ -1576,9 +1602,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode) return err; } -static void lo_release(struct gendisk *disk, fmode_t mode) +static void __lo_release(struct loop_device *lo) { - struct loop_device *lo = disk->private_data; int err; if (atomic_dec_return(&lo->lo_refcnt)) @@ -1605,6 +1630,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode) mutex_unlock(&lo->lo_ctl_mutex); } +static void lo_release(struct gendisk *disk, fmode_t mode) +{ + mutex_lock(&loop_index_mutex); + __lo_release(disk->private_data); + mutex_unlock(&loop_index_mutex); +} + static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 1f3956702993..dfc54ceba410 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -58,6 +58,7 @@ struct loop_device { struct kthread_worker worker; struct task_struct *worker_task; bool use_dio; + bool sysfs_inited; struct request_queue *lo_queue; struct blk_mq_tag_set tag_set; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9adfb5445f8d..6fb64e73bc96 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = { static void nbd_dev_remove(struct nbd_device *nbd) { struct gendisk *disk = nbd->disk; + struct request_queue *q; + if (disk) { + q = disk->queue; del_gendisk(disk); - blk_cleanup_queue(disk->queue); + blk_cleanup_queue(q); blk_mq_free_tag_set(&nbd->tag_set); disk->private_data = NULL; put_disk(disk); @@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd) static void nbd_size_update(struct nbd_device *nbd) { struct nbd_config *config = nbd->config; + struct block_device *bdev = bdget_disk(nbd->disk, 0); + blk_queue_logical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); + if (bdev) { + if (bdev->bd_disk) + bd_set_size(bdev, config->bytesize); + else + bdev->bd_invalidated = 1; + bdput(bdev); + } kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); } @@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, struct nbd_config *config = nbd->config; config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; + if (nbd->task_recv != NULL) + nbd_size_update(nbd); } static void nbd_complete_rq(struct request *req) @@ -288,15 +302,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, cmd->status = BLK_STS_TIMEOUT; return BLK_EH_HANDLED; } - - /* If we are waiting on our dead timer then we could get timeout - * callbacks for our request. For this we just want to reset the timer - * and let the queue side take care of everything. - */ - if (!completion_done(&cmd->send_complete)) { - nbd_config_put(nbd); - return BLK_EH_RESET_TIMER; - } config = nbd->config; if (config->num_connections > 1) { @@ -723,9 +728,9 @@ static int wait_for_reconnect(struct nbd_device *nbd) return 0; if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) return 0; - wait_event_interruptible_timeout(config->conn_wait, - atomic_read(&config->live_connections), - config->dead_conn_timeout); + wait_event_timeout(config->conn_wait, + atomic_read(&config->live_connections), + config->dead_conn_timeout); return atomic_read(&config->live_connections); } @@ -740,6 +745,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) if (!refcount_inc_not_zero(&nbd->config_refs)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Socks array is empty\n"); + blk_mq_start_request(req); return -EINVAL; } config = nbd->config; @@ -748,6 +754,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) dev_err_ratelimited(disk_to_dev(nbd->disk), "Attempted send on invalid socket\n"); nbd_config_put(nbd); + blk_mq_start_request(req); return -EINVAL; } cmd->status = BLK_STS_OK; @@ -771,6 +778,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) */ sock_shutdown(nbd); nbd_config_put(nbd); + blk_mq_start_request(req); return -EIO; } goto again; @@ -781,6 +789,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) * here so that it gets put _after_ the request that is already on the * dispatch list. */ + blk_mq_start_request(req); if (unlikely(nsock->pending && nsock->pending != req)) { blk_mq_requeue_request(req, true); ret = 0; @@ -793,10 +802,10 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) ret = nbd_send_cmd(nbd, cmd, index); if (ret == -EAGAIN) { dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed trying another connection\n"); + "Request send failed, requeueing\n"); nbd_mark_nsock_dead(nbd, nsock, 1); - mutex_unlock(&nsock->tx_lock); - goto again; + blk_mq_requeue_request(req, true); + ret = 0; } out: mutex_unlock(&nsock->tx_lock); @@ -820,7 +829,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, * done sending everything over the wire. */ init_completion(&cmd->send_complete); - blk_mq_start_request(bd->rq); /* We can be called directly from the user space process, which means we * could possibly have signals pending so our sendmsg will fail. In @@ -1115,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b if (ret) return ret; - bd_set_size(bdev, config->bytesize); if (max_part) bdev->bd_invalidated = 1; mutex_unlock(&nbd->config_lock); @@ -1597,7 +1604,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (new_index < 0) { mutex_unlock(&nbd_index_mutex); printk(KERN_ERR "nbd: failed to add new device\n"); - return ret; + return new_index; } nbd = idr_find(&nbd_index_idr, new_index); } diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8042c26ea9e6..f01d4a8a783a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -68,6 +68,7 @@ enum nullb_device_flags { NULLB_DEV_FL_CACHE = 3, }; +#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) /* * nullb_page is a page in memory for nullb devices. * @@ -82,10 +83,10 @@ enum nullb_device_flags { */ struct nullb_page { struct page *page; - unsigned long bitmap; + DECLARE_BITMAP(bitmap, MAP_SZ); }; -#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) -#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) +#define NULLB_PAGE_LOCK (MAP_SZ - 1) +#define NULLB_PAGE_FREE (MAP_SZ - 2) struct nullb_device { struct nullb *nullb; @@ -467,7 +468,6 @@ static void nullb_device_release(struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); - badblocks_exit(&dev->badblocks); null_free_device_storage(dev, false); null_free_dev(dev); } @@ -578,6 +578,10 @@ static struct nullb_device *null_alloc_dev(void) static void null_free_dev(struct nullb_device *dev) { + if (!dev) + return; + + badblocks_exit(&dev->badblocks); kfree(dev); } @@ -722,7 +726,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) if (!t_page->page) goto out_freepage; - t_page->bitmap = 0; + memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); return t_page; out_freepage: kfree(t_page); @@ -732,13 +736,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) static void null_free_page(struct nullb_page *t_page) { - __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); - if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) + __set_bit(NULLB_PAGE_FREE, t_page->bitmap); + if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) return; __free_page(t_page->page); kfree(t_page); } +static bool null_page_empty(struct nullb_page *page) +{ + int size = MAP_SZ - 2; + + return find_first_bit(page->bitmap, size) == size; +} + static void null_free_sector(struct nullb *nullb, sector_t sector, bool is_cache) { @@ -753,9 +764,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector, t_page = radix_tree_lookup(root, idx); if (t_page) { - __clear_bit(sector_bit, &t_page->bitmap); + __clear_bit(sector_bit, t_page->bitmap); - if (!t_page->bitmap) { + if (null_page_empty(t_page)) { ret = radix_tree_delete_item(root, idx, t_page); WARN_ON(ret != t_page); null_free_page(ret); @@ -826,7 +837,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb, t_page = radix_tree_lookup(root, idx); WARN_ON(t_page && t_page->page->index != idx); - if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) + if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) return t_page; return NULL; @@ -889,10 +900,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); - __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); - if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { + __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); + if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { null_free_page(c_page); - if (t_page && t_page->bitmap == 0) { + if (t_page && null_page_empty(t_page)) { ret = radix_tree_delete_item(&nullb->dev->data, idx, t_page); null_free_page(t_page); @@ -908,11 +919,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) for (i = 0; i < PAGE_SECTORS; i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { - if (test_bit(i, &c_page->bitmap)) { + if (test_bit(i, c_page->bitmap)) { offset = (i << SECTOR_SHIFT); memcpy(dst + offset, src + offset, nullb->dev->blocksize); - __set_bit(i, &t_page->bitmap); + __set_bit(i, t_page->bitmap); } } @@ -949,10 +960,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n) * We found the page which is being flushed to disk by other * threads */ - if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) + if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) c_pages[i] = NULL; else - __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); + __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); } one_round = 0; @@ -1005,7 +1016,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source, kunmap_atomic(dst); kunmap_atomic(src); - __set_bit(sector & SECTOR_MASK, &t_page->bitmap); + __set_bit(sector & SECTOR_MASK, t_page->bitmap); if (is_fua) null_free_sector(nullb, sector, true); @@ -1919,10 +1930,6 @@ static int __init null_init(void) struct nullb *nullb; struct nullb_device *dev; - /* check for nullb_page.bitmap */ - if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) - return -EINVAL; - if (g_bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); @@ -1985,8 +1992,10 @@ static int __init null_init(void) for (i = 0; i < nr_devices; i++) { dev = null_alloc_dev(); - if (!dev) + if (!dev) { + ret = -ENOMEM; goto err_dev; + } ret = null_add_dev(dev); if (ret) { null_free_dev(dev); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 7b8c6368beb7..a026211afb51 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; + check_disk_change(bdev); + mutex_lock(&pcd_mutex); ret = cdrom_open(&cd->info, bdev, mode); mutex_unlock(&pcd_mutex); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 67974796c350..531a0915066b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) bdev = bdget(dev); if (!bdev) return -ENOMEM; + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); + if (ret) + return ret; if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); - bdput(bdev); + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); return -EINVAL; } - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); - if (ret) - return ret; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); @@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) pd->pkt_dev = MKDEV(pktdev_major, idx); ret = pkt_new_dev(pd, dev); if (ret) - goto out_new_dev; + goto out_mem2; /* inherit events of the host device */ disk->events = pd->bdev->bd_disk->events; @@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) mutex_unlock(&ctl_mutex); return 0; -out_new_dev: - blk_cleanup_queue(disk->queue); out_mem2: put_disk(disk); out_mem: diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index adc877dfef5c..9057dad2a64c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) #define RBD_FEATURE_DATA_POOL (1ULL<<7) +#define RBD_FEATURE_OPERATIONS (1ULL<<8) #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ RBD_FEATURE_EXCLUSIVE_LOCK | \ - RBD_FEATURE_DATA_POOL) + RBD_FEATURE_DATA_POOL | \ + RBD_FEATURE_OPERATIONS) /* Features supported by this (client software) implementation. */ @@ -3074,13 +3076,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf) mutex_unlock(&rbd_dev->watch_mutex); } +static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) +{ + struct rbd_client_id cid = rbd_get_cid(rbd_dev); + + strcpy(rbd_dev->lock_cookie, cookie); + rbd_set_owner_cid(rbd_dev, &cid); + queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); +} + /* * lock_rwsem must be held for write */ static int rbd_lock(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct rbd_client_id cid = rbd_get_cid(rbd_dev); char cookie[32]; int ret; @@ -3095,9 +3105,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) return ret; rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; - strcpy(rbd_dev->lock_cookie, cookie); - rbd_set_owner_cid(rbd_dev, &cid); - queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); + __rbd_lock(rbd_dev, cookie); return 0; } @@ -3833,7 +3841,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) { dout("%s rbd_dev %p\n", __func__, rbd_dev); - cancel_delayed_work_sync(&rbd_dev->watch_dwork); cancel_work_sync(&rbd_dev->acquired_lock_work); cancel_work_sync(&rbd_dev->released_lock_work); cancel_delayed_work_sync(&rbd_dev->lock_dwork); @@ -3851,6 +3858,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev) rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; mutex_unlock(&rbd_dev->watch_mutex); + cancel_delayed_work_sync(&rbd_dev->watch_dwork); ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); } @@ -3883,7 +3891,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev) queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0); } else { - strcpy(rbd_dev->lock_cookie, cookie); + __rbd_lock(rbd_dev, cookie); } } @@ -4415,7 +4423,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); q->limits.max_sectors = queue_max_hw_sectors(q); - blk_queue_max_segments(q, segment_size / SECTOR_SIZE); + blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); blk_queue_io_opt(q, segment_size); diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 84434d3ea19b..e88d50f75a4a 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -110,7 +110,7 @@ struct iwm { /* Select values for swim_select and swim_readbit */ #define READ_DATA_0 0x074 -#define TWOMEG_DRIVE 0x075 +#define ONEMEG_DRIVE 0x075 #define SINGLE_SIDED 0x076 #define DRIVE_PRESENT 0x077 #define DISK_IN 0x170 @@ -118,9 +118,9 @@ struct iwm { #define TRACK_ZERO 0x172 #define TACHO 0x173 #define READ_DATA_1 0x174 -#define MFM_MODE 0x175 +#define GCR_MODE 0x175 #define SEEK_COMPLETE 0x176 -#define ONEMEG_MEDIA 0x177 +#define TWOMEG_MEDIA 0x177 /* Bits in handshake register */ @@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs) struct floppy_struct *g; fs->disk_in = 1; fs->write_protected = swim_readbit(base, WRITE_PROT); - fs->type = swim_readbit(base, ONEMEG_MEDIA); if (swim_track00(base)) printk(KERN_ERR @@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs) swim_track00(base); + fs->type = swim_readbit(base, TWOMEG_MEDIA) ? + HD_MEDIA : DD_MEDIA; + fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2; get_floppy_geometry(fs, 0, &g); fs->total_secs = g->size; fs->secpercyl = g->head * g->sect; @@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); udelay(10); - swim_drive(base, INTERNAL_DRIVE); + swim_drive(base, fs->location); swim_motor(base, ON); swim_action(base, SETMFM); if (fs->ejected) @@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) goto out; } + set_capacity(fs->disk, fs->total_secs); + if (mode & FMODE_NDELAY) return 0; @@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, if (copy_to_user((void __user *) param, (void *) &floppy_type, sizeof(struct floppy_struct))) return -EFAULT; - break; - - default: - printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n", - cmd); - return -ENOSYS; + return 0; } - return 0; + return -ENOTTY; } static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) @@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) struct swim_priv *swd = data; int drive = (*part & 3); - if (drive > swd->floppy_count) + if (drive >= swd->floppy_count) return NULL; *part = 0; @@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) swim_motor(base, OFF); - if (swim_readbit(base, SINGLE_SIDED)) - fs->head_number = 1; - else - fs->head_number = 2; + fs->type = HD_MEDIA; + fs->head_number = 2; + fs->ref_count = 0; fs->ejected = 1; @@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd) /* scan floppy drives */ swim_drive(base, INTERNAL_DRIVE); - if (swim_readbit(base, DRIVE_PRESENT)) + if (swim_readbit(base, DRIVE_PRESENT) && + !swim_readbit(base, ONEMEG_DRIVE)) swim_add_floppy(swd, INTERNAL_DRIVE); swim_drive(base, EXTERNAL_DRIVE); - if (swim_readbit(base, DRIVE_PRESENT)) + if (swim_readbit(base, DRIVE_PRESENT) && + !swim_readbit(base, ONEMEG_DRIVE)) swim_add_floppy(swd, EXTERNAL_DRIVE); /* register floppy drives */ @@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd) &swd->lock); if (!swd->unit[drive].disk->queue) { err = -ENOMEM; - put_disk(swd->unit[drive].disk); goto exit_put_disks; } blk_queue_bounce_limit(swd->unit[drive].disk->queue, @@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev) goto out; } - swim_base = ioremap(res->start, resource_size(res)); + swim_base = (struct swim __iomem *)res->start; if (!swim_base) { ret = -ENOMEM; goto out_release_io; @@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev) if (!get_swim_mode(swim_base)) { printk(KERN_INFO "SWIM device not found !\n"); ret = -ENODEV; - goto out_iounmap; + goto out_release_io; } /* set platform driver data */ @@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev) swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); if (!swd) { ret = -ENOMEM; - goto out_iounmap; + goto out_release_io; } platform_set_drvdata(dev, swd); @@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev) out_kfree: kfree(swd); -out_iounmap: - iounmap(swim_base); out_release_io: release_mem_region(res->start, resource_size(res)); out: @@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev) for (drive = 0; drive < swd->floppy_count; drive++) floppy_eject(&swd->unit[drive]); - iounmap(swd->base); - res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 9f931f8f6b4c..0d7527c6825a 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -148,7 +148,7 @@ struct swim3 { #define MOTOR_ON 2 #define RELAX 3 /* also eject in progress */ #define READ_DATA_0 4 -#define TWOMEG_DRIVE 5 +#define ONEMEG_DRIVE 5 #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ #define DRIVE_PRESENT 7 #define DISK_IN 8 @@ -156,9 +156,9 @@ struct swim3 { #define TRACK_ZERO 10 #define TACHO 11 #define READ_DATA_1 12 -#define MFM_MODE 13 +#define GCR_MODE 13 #define SEEK_COMPLETE 14 -#define ONEMEG_MEDIA 15 +#define TWOMEG_MEDIA 15 /* Definitions of values used in writing and formatting */ #define DATA_ESCAPE 0x99 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 891265acb10e..7d23225f79ed 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); static void blkfront_gather_backend_features(struct blkfront_info *info); +static int negotiate_mq(struct blkfront_info *info); static int get_id_from_freelist(struct blkfront_ring_info *rinfo) { @@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, unsigned int i, max_page_order; unsigned int ring_page_order; + if (!info) + return -ENODEV; + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); info->nr_ring_pages = 1 << ring_page_order; + err = negotiate_mq(info); + if (err) + goto destroy_blkring; + for (i = 0; i < info->nr_rings; i++) { struct blkfront_ring_info *rinfo = &info->rinfo[i]; @@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, } info->xbdev = dev; - err = negotiate_mq(info); - if (err) { - kfree(info); - return err; - } mutex_init(&info->mutex); info->vdevice = vdevice; @@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); - err = negotiate_mq(info); - if (err) - return err; - err = talk_to_blkback(dev, info); if (!err) blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 98a60db8e5d1..b33c8d6eb8c7 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -66,6 +66,7 @@ config BT_HCIBTSDIO config BT_HCIUART tristate "HCI UART driver" + depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS depends on TTY help Bluetooth HCI UART driver. @@ -80,7 +81,6 @@ config BT_HCIUART config BT_HCIUART_SERDEV bool depends on SERIAL_DEV_BUS && BT_HCIUART - depends on SERIAL_DEV_BUS=y || SERIAL_DEV_BUS=BT_HCIUART default y config BT_HCIUART_H4 diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index d00c4fdae924..093fd096f0c8 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -26,6 +26,7 @@ struct btqcomsmd { struct hci_dev *hdev; + bdaddr_t bdaddr; struct rpmsg_endpoint *acl_channel; struct rpmsg_endpoint *cmd_channel; }; @@ -85,7 +86,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) break; } - kfree_skb(skb); + if (!ret) + kfree_skb(skb); return ret; } @@ -100,6 +102,38 @@ static int btqcomsmd_close(struct hci_dev *hdev) return 0; } +static int btqcomsmd_setup(struct hci_dev *hdev) +{ + struct btqcomsmd *btq = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + /* Devices do not have persistent storage for BD address. If no + * BD address has been retrieved during probe, mark the device + * as having an invalid BD address. + */ + if (!bacmp(&btq->bdaddr, BDADDR_ANY)) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + /* When setting a configured BD address fails, mark the device + * as having an invalid BD address. + */ + err = qca_set_bdaddr_rome(hdev, &btq->bdaddr); + if (err) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + return 0; +} + static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; @@ -135,6 +169,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->open = btqcomsmd_open; hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; + hdev->setup = btqcomsmd_setup; hdev->set_bdaddr = qca_set_bdaddr_rome; ret = hci_register_dev(hdev); diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index c8e945d19ffe..20142bc77554 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func, tuple = tuple->next; } + /* BCM43341 devices soldered onto the PCB (non-removable) use an + * uart connection for bluetooth, ignore the BT SDIO interface. + */ + if (func->vendor == SDIO_VENDOR_ID_BROADCOM && + func->device == SDIO_DEVICE_ID_BROADCOM_43341 && + !mmc_card_is_removable(func->card->host)) + return -ENODEV; + data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c old mode 100644 new mode 100755 index 7a5c06aaa181..a905d286f886 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -21,8 +21,10 @@ * */ +#include #include #include +#include #include #include #include @@ -272,9 +274,12 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -366,6 +371,12 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8723BU Bluetooth devices */ + { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + + /* Additional Realtek 8723DE Bluetooth devices */ + { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, @@ -373,12 +384,37 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8822BE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, { } /* Terminating entry */ }; +/* The Bluetooth USB module build into some devices needs to be reset on resume, + * this is a problem with the platform (likely shutting off all power) not with + * the module itself. So we use a DMI list to match known broken platforms. + */ +static const struct dmi_system_id btusb_needs_reset_resume_table[] = { + { + /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), + }, + }, + { + /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), + }, + }, + {} +}; + #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 @@ -391,9 +427,8 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_FIRMWARE_LOADED 7 #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 -#define BTUSB_RESET_RESUME 10 -#define BTUSB_DIAG_RUNNING 11 -#define BTUSB_OOB_WAKE_ENABLED 12 +#define BTUSB_DIAG_RUNNING 10 +#define BTUSB_OOB_WAKE_ENABLED 11 struct btusb_data { struct hci_dev *hdev; @@ -1944,6 +1979,18 @@ static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb) } } } + else if (skb->len >= sizeof(struct hci_event_hdr)) { + struct hci_event_hdr *hdr; + + hdr = (struct hci_event_hdr *) skb->data; + + if (hdr->evt == HCI_EV_CMD_COMPLETE) { + *(__u8 *)(skb->data + 2) = 1; + } else if (hdr->evt == HCI_EV_CMD_STATUS) { + *(__u8 *)(skb->data + 3) = 1; + } + } + return hci_recv_frame(hdev, skb); } @@ -2877,6 +2924,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) } #endif +static void btusb_check_needs_reset_resume(struct usb_interface *intf) +{ + if (dmi_check_system(btusb_needs_reset_resume_table)) + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; +} + static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -3098,12 +3151,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; - - /* QCA Rome devices lose their updated firmware over suspend, - * but the USB hub doesn't notice any status change. - * Explicitly request a device reset on resume. - */ - set_bit(BTUSB_RESET_RESUME, &data->flags); + btusb_check_needs_reset_resume(intf); } #ifdef CONFIG_BT_HCIBTUSB_RTL @@ -3114,7 +3162,7 @@ static int btusb_probe(struct usb_interface *intf, * but the USB hub doesn't notice any status change. * Explicitly request a device reset on resume. */ - set_bit(BTUSB_RESET_RESUME, &data->flags); + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } #endif @@ -3279,14 +3327,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) enable_irq(data->oob_wake_irq); } - /* Optionally request a device reset on resume, but only when - * wakeups are disabled. If wakeups are enabled we assume the - * device will stay powered up throughout suspend. - */ - if (test_bit(BTUSB_RESET_RESUME, &data->flags) && - !device_may_wakeup(&data->udev->dev)) - data->udev->reset_resume = 1; - return 0; } diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index e2540113d0da..32527bdf4b50 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -68,7 +68,7 @@ struct bcm_device { u32 init_speed; u32 oper_speed; int irq; - u8 irq_polarity; + bool irq_active_low; #ifdef CONFIG_PM struct hci_uart *hu; @@ -213,7 +213,9 @@ static int bcm_request_irq(struct bcm_data *bcm) } err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake, - IRQF_TRIGGER_RISING, "host_wake", bdev); + bdev->irq_active_low ? IRQF_TRIGGER_FALLING : + IRQF_TRIGGER_RISING, + "host_wake", bdev); if (err) goto unlock; @@ -253,7 +255,7 @@ static int bcm_setup_sleep(struct hci_uart *hu) struct sk_buff *skb; struct bcm_set_sleep_mode sleep_params = default_sleep_params; - sleep_params.host_wake_active = !bcm->dev->irq_polarity; + sleep_params.host_wake_active = !bcm->dev->irq_active_low; skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params), &sleep_params, HCI_INIT_TIMEOUT); @@ -690,35 +692,14 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { }; #ifdef CONFIG_ACPI -static u8 acpi_active_low = ACPI_ACTIVE_LOW; - /* IRQ polarity of some chipsets are not defined correctly in ACPI table. */ -static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { - { - .ident = "Asus T100TA", - .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, - "ASUSTeK COMPUTER INC."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), - }, - .driver_data = &acpi_active_low, - }, - { - .ident = "Asus T100CHI", - .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, - "ASUSTeK COMPUTER INC."), - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"), - }, - .driver_data = &acpi_active_low, - }, +static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = { { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */ .ident = "Lenovo ThinkPad 8", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"), }, - .driver_data = &acpi_active_low, }, { } }; @@ -733,13 +714,15 @@ static int bcm_resource(struct acpi_resource *ares, void *data) switch (ares->type) { case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: irq = &ares->data.extended_irq; - dev->irq_polarity = irq->polarity; + if (irq->polarity != ACPI_ACTIVE_LOW) + dev_info(&dev->pdev->dev, "ACPI Interrupt resource is active-high, this is usually wrong, treating the IRQ as active-low\n"); + dev->irq_active_low = true; break; case ACPI_RESOURCE_TYPE_GPIO: gpio = &ares->data.gpio; if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) - dev->irq_polarity = gpio->polarity; + dev->irq_active_low = gpio->polarity == ACPI_ACTIVE_LOW; break; case ACPI_RESOURCE_TYPE_SERIAL_BUS: @@ -834,11 +817,11 @@ static int bcm_acpi_probe(struct bcm_device *dev) return ret; acpi_dev_free_resource_list(&resources); - dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table); + dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table); if (dmi_id) { bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low", dmi_id->ident); - dev->irq_polarity = *(u8 *)dmi_id->driver_data; + dev->irq_active_low = true; } return 0; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index a746627e784e..c823914b3a80 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -114,12 +115,12 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) struct sk_buff *skb = hu->tx_skb; if (!skb) { - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) skb = hu->proto->dequeue(hu); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); } else { hu->tx_skb = NULL; } @@ -129,7 +130,14 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) int hci_uart_tx_wakeup(struct hci_uart *hu) { - read_lock(&hu->proto_lock); + /* This may be called in an IRQ context, so we can't sleep. Therefore + * we try to acquire the lock only, and if that fails we assume the + * tty is being closed because that is the only time the write lock is + * acquired. If, however, at some point in the future the write lock + * is also acquired in other situations, then this must be revisited. + */ + if (!percpu_down_read_trylock(&hu->proto_lock)) + return 0; if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) goto no_schedule; @@ -144,7 +152,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) schedule_work(&hu->write_work); no_schedule: - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return 0; } @@ -246,12 +254,12 @@ static int hci_uart_flush(struct hci_dev *hdev) tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hu->proto->flush(hu); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return 0; } @@ -274,15 +282,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return -EUNATCH; } hu->proto->enqueue(hu, skb); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); hci_uart_tx_wakeup(hu); @@ -298,6 +306,12 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) unsigned int set = 0; unsigned int clear = 0; + if (hu->serdev) { + serdev_device_set_flow_control(hu->serdev, !enable); + serdev_device_set_rts(hu->serdev, !enable); + return; + } + if (enable) { /* Disable hardware flow control */ ktermios = tty->termios; @@ -479,7 +493,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - rwlock_init(&hu->proto_lock); + percpu_init_rwsem(&hu->proto_lock); /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); @@ -496,7 +510,6 @@ static void hci_uart_tty_close(struct tty_struct *tty) { struct hci_uart *hu = tty->disc_data; struct hci_dev *hdev; - unsigned long flags; BT_DBG("tty %p", tty); @@ -510,12 +523,12 @@ static void hci_uart_tty_close(struct tty_struct *tty) if (hdev) hci_uart_close(hdev); - cancel_work_sync(&hu->write_work); - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { - write_lock_irqsave(&hu->proto_lock, flags); + percpu_down_write(&hu->proto_lock); clear_bit(HCI_UART_PROTO_READY, &hu->flags); - write_unlock_irqrestore(&hu->proto_lock, flags); + percpu_up_write(&hu->proto_lock); + + cancel_work_sync(&hu->write_work); if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) @@ -575,10 +588,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, if (!hu || tty != hu->tty) return; - read_lock(&hu->proto_lock); + percpu_down_read(&hu->proto_lock); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); return; } @@ -586,7 +599,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, * tty caller */ hu->proto->recv(hu, data, count); - read_unlock(&hu->proto_lock); + percpu_up_read(&hu->proto_lock); if (hu->hdev) hu->hdev->stat.byte_rx += count; diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index ffb00669346f..d98d846ca4f9 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -60,6 +60,20 @@ struct hci_mrvl_pkt { } __packed; #define HCI_MRVL_PKT_SIZE 4 +static int get_cts(struct hci_uart *hu) +{ + struct tty_struct *tty = hu->tty; + u32 state = tty->ops->tiocmget(tty); + + if (state & TIOCM_CTS) { + BT_INFO("CTS is low"); + return 1; + } + BT_INFO("CTS is high"); + + return 0; +} + static int mrvl_open(struct hci_uart *hu) { struct mrvl_data *mrvl; @@ -345,6 +359,14 @@ static int mrvl_setup(struct hci_uart *hu) { int err; + if (get_cts(hu)) { + BT_INFO("fw is running"); + hci_uart_set_baudrate(hu, 3000000); + hci_uart_set_flow_control(hu, false); + + return 0; + } + hci_uart_set_flow_control(hu, true); err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin"); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 392f412b4575..a6173ddfb5a7 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -881,7 +881,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_RUNNING); return 0; } @@ -933,6 +933,15 @@ static int qca_setup(struct hci_uart *hu) if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); + } else if (ret == -ENOENT) { + /* No patch/nvm-config found, run with original fw/config */ + ret = 0; + } else if (ret == -EAGAIN) { + /* + * Userspace firmware loader will return -EAGAIN in case no + * patch/nvm-config is found, so run with original fw/config. + */ + ret = 0; } /* Setup bdaddr */ diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index b725ac4f7ff6..52e6d4d1608e 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -304,6 +304,7 @@ int hci_uart_register_device(struct hci_uart *hu, hci_set_drvdata(hdev, hu); INIT_WORK(&hu->write_work, hci_uart_write_work); + percpu_init_rwsem(&hu->proto_lock); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index d9cd95d81149..66e8c68e4607 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -87,7 +87,7 @@ struct hci_uart { struct work_struct write_work; const struct hci_uart_proto *proto; - rwlock_t proto_lock; /* Stop work for proto close */ + struct percpu_rw_semaphore proto_lock; /* Stop work for proto close */ void *priv; struct sk_buff *tx_skb; diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 3e66f4cc1a59..886421f09dc0 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -184,4 +184,28 @@ config DA8XX_MSTPRI configuration. Allows to adjust the priorities of all master peripherals. +config DVC_TRACE_BUS + bool "DvC-Trace pseudobus" + default n + depends on USB_GADGET + help + DvC-Trace pseudobus is meant to group devices capable of sending + trace data via a USB DvC-Trace gadget function. + An usb function driver will be able to choose a source device and + provide the means to transfer the data. + + Say Y to enable it. + +config DVC_TRACE_BUS_DEBUG + bool "DvC-Trace pseudobus debug" + default n + depends on DVC_TRACE_BUS + help + DvC-Trace pseudobus is meant to group devices capable of sending + trace data via a USB DvC-Trace gadget function. + An usb function driver will be able to choose a source device and + provide the means to transfer the data. + + Say Y to enable extended debug messages in this driver. + endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 3ae96cffabd5..cf2051800214 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -25,3 +25,5 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o +obj-$(CONFIG_DVC_TRACE_BUS) += dvctrace.o +subdir-ccflags-$(CONFIG_DVC_TRACE_BUS_DEBUG) += -DDVCT_DEBUG diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 3c29d36702a8..5426c04fe24b 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev) raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); mutex_init(&cci_pmu->reserve_mutex); atomic_set(&cci_pmu->active_events, 0); - cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); + cpumask_set_cpu(get_cpu(), &cci_pmu->cpus); ret = cci_pmu_init(cci_pmu, pdev); - if (ret) + if (ret) { + put_cpu(); return ret; + } cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, &cci_pmu->node); + put_cpu(); pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; } diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index e8c6946fed9d..942d076cbb0a 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -736,7 +736,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) ccn = pmu_to_arm_ccn(event->pmu); if (hw->sample_period) { - dev_warn(ccn->dev, "Sampling not supported!\n"); + dev_dbg(ccn->dev, "Sampling not supported!\n"); return -EOPNOTSUPP; } @@ -744,12 +744,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.exclude_idle || event->attr.exclude_host || event->attr.exclude_guest) { - dev_warn(ccn->dev, "Can't exclude execution levels!\n"); + dev_dbg(ccn->dev, "Can't exclude execution levels!\n"); return -EINVAL; } if (event->cpu < 0) { - dev_warn(ccn->dev, "Can't provide per-task data!\n"); + dev_dbg(ccn->dev, "Can't provide per-task data!\n"); return -EOPNOTSUPP; } /* @@ -771,13 +771,13 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) switch (type) { case CCN_TYPE_MN: if (node_xp != ccn->mn_id) { - dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); + dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp); return -EINVAL; } break; case CCN_TYPE_XP: if (node_xp >= ccn->num_xps) { - dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); + dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp); return -EINVAL; } break; @@ -785,11 +785,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) break; default: if (node_xp >= ccn->num_nodes) { - dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp); + dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp); return -EINVAL; } if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { - dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n", + dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n", type, node_xp); return -EINVAL; } @@ -808,19 +808,19 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) if (event_id != e->event) continue; if (e->num_ports && port >= e->num_ports) { - dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n", + dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n", port, node_xp); return -EINVAL; } if (e->num_vcs && vc >= e->num_vcs) { - dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", + dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n", vc, node_xp); return -EINVAL; } valid = 1; } if (!valid) { - dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", + dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", event_id, node_xp); return -EINVAL; } @@ -1271,11 +1271,16 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id); name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL); + if (!name) { + err = -ENOMEM; + goto error_choose_name; + } snprintf(name, len + 1, "ccn_%d", ccn->dt.id); } /* Perf driver registration */ ccn->dt.pmu = (struct pmu) { + .module = THIS_MODULE, .attr_groups = arm_ccn_pmu_attr_groups, .task_ctx_nr = perf_invalid_context, .event_init = arm_ccn_pmu_event_init, @@ -1297,7 +1302,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) } /* Pick one CPU which we will use to collect data from CCN... */ - cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); + cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); /* Also make sure that the overflow interrupt is handled by this CPU */ if (ccn->irq) { @@ -1314,10 +1319,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, &ccn->dt.node); + put_cpu(); return 0; error_pmu_register: error_set_affinity: + put_cpu(); +error_choose_name: ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); @@ -1580,8 +1588,8 @@ static int __init arm_ccn_init(void) static void __exit arm_ccn_exit(void) { - cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); platform_driver_unregister(&arm_ccn_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); } module_init(arm_ccn_init); diff --git a/drivers/bus/dvctrace.c b/drivers/bus/dvctrace.c new file mode 100644 index 000000000000..915c42593edb --- /dev/null +++ b/drivers/bus/dvctrace.c @@ -0,0 +1,737 @@ +/* + * DvC-Trace(dvct) Bus driver + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include +#include +#include +#include + +#ifdef DVCT_DEBUG +#define DVCT_IN() pr_debug("in\n") +#else +#define DVCT_IN() do {} while (0) +#endif + +/* Count the number of USB descriptors in the given ascii hex string + * What we expect: + * ll tt ss xx xx xx + * | | | +- Fill up the descriptor + * | | +- Descriptor sub-type (1-4) + * | | DC_INPUT_CONNECTION 0x01 + * | | DC_OUTPUT_CONNECTION 0x02 + * | | DC_DEBUG_UNIT 0x03 + * | | DC_DEBUG_ATTRIBUTES 0x04 + * | +- Descriptor type (USB_DT_CS_INTERFACE) + * +- Descriptor length (check > 3 and we have the rest of it) + */ +static int count_descriptors(const char *buf, size_t size) +{ + size_t off = 0; + int i, j, count = 0; + u8 len, tmp; + + DVCT_IN(); + while (off < size) { + /*the length*/ + j = sscanf(buf + off, "%2hhx%n", &len, &i); + if (!j) + break; + if (j < 0 || len < 4) + return -EINVAL; + len--; + off += i; + + /*Type*/ + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0 || tmp != USB_DT_CS_INTERFACE) + return -EINVAL; + len--; + off += i; + + /*Sub Type*/ + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0 || tmp < DC_INPUT_CONNECTION + || tmp > DC_DEBUG_ATTRIBUTES) + return -EINVAL; + len--; + off += i; + + while (len) { + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0) + return -EINVAL; + len--; + off += i; + } + count++; + } + return count; +} + +/* Parse @buf and get a pointer to the descriptor identified + * @idx*/ +static u8 *get_descriptor(const char *buf, size_t size, int idx) +{ + size_t off = 0; + int i, j, k, count = 0; + u8 len, tmp, *ret = NULL; + + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%2hhx%n", &len, &i); + if (j < 0) + return ERR_PTR(-EINVAL); + if (!j) + return ERR_PTR(-ERANGE); + + if (count == idx) { + ret = kmalloc(len, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + ret[0] = len; + } + off += i; + for (k = 1; k < len; k++) { + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0) { + kfree(ret); + return ERR_PTR(-EINVAL); + } + if (count == idx) + ret[k] = tmp; + off += i; + } + if (count == idx) + break; + count++; + } + return ret; +} + + +static void free_strings(struct dvct_usb_descriptors *desc) +{ + struct usb_string *string; + + DVCT_IN(); + for (string = desc->str.strings; string && string->s; string++) + kfree(string->s); + + kfree(desc->str.strings); + desc->str.strings = NULL; + kfree(desc->lk_tbl); + desc->lk_tbl = NULL; +} + +static void free_descriptors(struct dvct_usb_descriptors *desc) +{ + struct usb_descriptor_header **hdr; + + DVCT_IN(); + if (desc->dvc_spec) { + for (hdr = desc->dvc_spec; *hdr; hdr++) + kfree(*hdr); + kfree(desc->dvc_spec); + desc->dvc_spec = NULL; + } + free_strings(desc); + kfree(desc); +} + +static int alloc_strings(struct dvct_usb_descriptors *desc, int count) +{ + DVCT_IN(); + desc->lk_tbl = kzalloc((count + 1) * sizeof(struct dvct_string_lookup), + GFP_KERNEL); + if (!desc->lk_tbl) + goto err; + + desc->str.strings = kzalloc((count + 1) * sizeof(*desc->str.strings), + GFP_KERNEL); + if (!desc->str.strings) + goto err_str; + + desc->str.language = 0x0409; + + return count; +err_str: + kfree(desc->lk_tbl); + desc->lk_tbl = NULL; +err: + return -ENOMEM; +} + +static struct dvct_usb_descriptors *alloc_descriptors(int count) +{ + struct dvct_usb_descriptors *desc; + + DVCT_IN(); + desc = kzalloc(sizeof(struct dvct_usb_descriptors), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + desc->dvc_spec = + kzalloc((count + 1) * sizeof(struct usb_descriptor_header *), + GFP_KERNEL); + + if (!desc->dvc_spec) { + kfree(desc); + return ERR_PTR(-ENOMEM); + } + return desc; +} + +static ssize_t descriptors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + struct usb_descriptor_header **desc; + int ret = 0; + + DVCT_IN(); + if (!ds_dev->desc || !ds_dev->desc->dvc_spec + || !*ds_dev->desc->dvc_spec) + return sprintf(buf, "No Descriptors.\n"); + + for (desc = ds_dev->desc->dvc_spec; *desc; desc++) { + u8 len, *pdesc; + int i; + + len = (*desc)->bLength; + + /* Check if it fits, total output is 3 * len */ + if ((ret + 3 * len) > PAGE_SIZE) { + dev_warn(dev, "Descriptors attribute page overrun\n"); + break; + } + + pdesc = (u8 *)(*desc); + for (i = 0; i < len; i++) + ret += snprintf(buf + ret, PAGE_SIZE - ret, "%02hhX ", + pdesc[i]); + buf[ret - 1] = '\n'; + } + return ret; +} + +static ssize_t descriptors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int desc_count, i; + u8 *hdr; + + DVCT_IN(); + + if (ds_dev->instance_taken) + return -EBUSY; + + /*count the new descriptors, exit if invalid input*/ + desc_count = count_descriptors(buf, size); + if (desc_count <= 0) { + dev_warn(dev, "Invalid descriptor input:[%zu] %s", size, buf); + return -EINVAL; + } + + if (ds_dev->desc && ds_dev->desc != &ds_dev->static_desc) + free_descriptors(ds_dev->desc); + + ds_dev->desc = alloc_descriptors(desc_count); + if (IS_ERR_OR_NULL(ds_dev->desc)) { + ds_dev->desc = NULL; + return -ENOMEM; + } + + for (i = 0; i < desc_count; i++) { + hdr = get_descriptor(buf, size, i); + if (IS_ERR_OR_NULL(hdr)) { + dev_err(dev, "Cannot get descriptor %d, %ld\n", i, + PTR_ERR(hdr)); + free_descriptors(ds_dev->desc); + ds_dev->desc = NULL; + return -EINVAL; + } + ds_dev->desc->dvc_spec[i] = (struct usb_descriptor_header *)hdr; + } + return size; +} + +static DEVICE_ATTR_RW(descriptors); + + +/*find out at which member(offset) of which descriptor the pointer + * points to */ +static int dvctrace_string_ptr_to_offset(struct usb_descriptor_header **first, + u8 *ptr, int *desc_offset, int *offset) +{ + u8 *hdr_start, *hdr_end; + int idx = 0; + + DVCT_IN(); + for (; *first; first++, idx++) { + hdr_start = (u8 *) (*first); + hdr_end = hdr_start + ((*first)->bLength - 1); + if (ptr >= hdr_start && ptr <= hdr_end) { + *desc_offset = idx; + *offset = ptr - hdr_start; + return 0; + } + } + return -EINVAL; +} + +static u8 *dvctrace_offset_to_string_ptr(struct usb_descriptor_header **first, + int desc_offset, int offset) +{ + int idx = 0; + + DVCT_IN(); + for (; *first; first++, idx++) { + if (idx == desc_offset) { + if (offset >= (*first)->bLength) + return ERR_PTR(-ERANGE); + return ((u8 *) (*first)) + offset; + } + } + return ERR_PTR(-ERANGE); +} + +static int count_strings(const char *buf, size_t size) +{ + int count = 0; + size_t off = 0, slen; + int i = 0, j, desc_offset, offset; + + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%d.%d: %n", &desc_offset, &offset, &i); + if (j < 2) + break; + off += i; + slen = 0; + while (off + slen < size) { + if (buf[off + slen] == ';' || buf[off + slen] == '\n') + break; + slen++; + } + off += slen; + if (buf[off] == ';' || buf[off] == '\n') + off++; + count++; + } + return count; +} + +static char *get_string(const char *buf, size_t size, int index, + int *desc_offset, int *offset) +{ + int count = 0; + size_t off = 0, slen; + int i, j; + char *ret = ERR_PTR(-EINVAL); + + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%d.%d: %n", desc_offset, offset, &i); + if (j < 2) + return ERR_PTR(-EINVAL); + off += i; + slen = 0; + while (off + slen < size) { + if (buf[off + slen] == ';' || buf[off + slen] == '\n') + break; + slen++; + } + + if (count == index) { + ret = kmalloc(slen+1, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + memcpy(ret, buf + off, slen); + ret[slen] = 0; + return ret; + } + off += slen; + if (buf[off] == ';' || buf[off] == '\n') + off++; + count++; + } + return ERR_PTR(-EINVAL); +} + +static ssize_t strings_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dvct_string_lookup *lk_s; + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int ret = 0; + + DVCT_IN(); + if (!ds_dev->desc || !ds_dev->desc->dvc_spec + || !*ds_dev->desc->dvc_spec) + return sprintf(buf, "No Descriptors.\n"); + + if (!ds_dev->desc->lk_tbl) + return sprintf(buf, "No Strings.\n"); + + for (lk_s = ds_dev->desc->lk_tbl; lk_s->str && lk_s->id; lk_s++) { + int desc_offset, offset; + + /* + * Check if it fits, worst case is "Unknown(%p): %s\n" + * 8 + 16 + 3 + string length + 1 + */ + if ((ret + 28 + strlen(lk_s->str->s)) > PAGE_SIZE) { + dev_warn(dev, "Strings attribute page overrun\n"); + break; + } + + if (dvctrace_string_ptr_to_offset(ds_dev->desc->dvc_spec, + lk_s->id, &desc_offset, + &offset)) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "Unknown(%p): %s\n", lk_s->id, + lk_s->str->s); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "%d.%d: %s\n", desc_offset, offset, + lk_s->str->s); + } + return ret; +} + +static ssize_t strings_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int count, i, ret; + + DVCT_IN(); + if (ds_dev->instance_taken) + return -EBUSY; + + count = count_strings(buf, size); + if (count <= 0) { + dev_err(dev, "Invalid input string:(%zu) %s\n", size, buf); + return -EINVAL; + } + + if (ds_dev->desc == &ds_dev->static_desc) { + dev_warn(dev, "Cannot set strings in static descriptors\n"); + return -EINVAL; + } + + if (ds_dev->desc->str.strings) + free_strings(ds_dev->desc); + + ret = alloc_strings(ds_dev->desc, count); + if (ret < 0) { + dev_err(dev, "Cannot allocate strings %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + char *tmp; + int d_off, off; + u8 *pid; + + tmp = get_string(buf, size, i, &d_off, &off); + if (IS_ERR_OR_NULL(tmp)) { + free_strings(ds_dev->desc); + return -EINVAL; + } + + pid = dvctrace_offset_to_string_ptr(ds_dev->desc->dvc_spec, + d_off, off); + if (IS_ERR_OR_NULL(pid)) { + dev_warn(dev, "String out of bounds\n"); + free_strings(ds_dev->desc); + return -EINVAL; + } + + ds_dev->desc->lk_tbl[i].id = pid; + ds_dev->desc->lk_tbl[i].str = &ds_dev->desc->str.strings[i]; + ds_dev->desc->str.strings[i].s = tmp; + } + return size; +} + +static DEVICE_ATTR_RW(strings); + +static ssize_t protocol_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + DVCT_IN(); + return sprintf(buf, "%d\n", dev_to_dvct_source_device(dev)->protocol); +} + +static ssize_t protocol_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (ds_dev->instance_taken) + return -EBUSY; + + if (!kstrtou8(buf, 10, &ds_dev->protocol)) + return size; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(protocol); + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (ds_dev->instance_taken) { + if (ds_dev->function_taken) + return sprintf(buf, "In use\n"); + else + return sprintf(buf, "Reserved\n"); + } else { + return sprintf(buf, "Free\n"); + } +} + +static DEVICE_ATTR_RO(status); + +static struct attribute *dvct_source_attrs[] = { + &dev_attr_protocol.attr, + &dev_attr_status.attr, + &dev_attr_strings.attr, + &dev_attr_descriptors.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dvct_source); + + +static int dvct_match(struct device *dev, struct device_driver *drv) +{ + const char *devname = dev_name(dev); + + DVCT_IN(); + if (strlen(devname) <= strlen(drv->name)) + return -1; + if (strncmp(devname, drv->name, strlen(drv->name))) + return -1; + return devname[strlen(drv->name)] == '-'; +}; + +static struct bus_type dvctrace_bus_type = { + .name = "dvctrace", + .match = dvct_match, + .dev_groups = dvct_source_groups, +}; + +static struct device dvctrace_bus = { + .init_name = "dvctrace-bus", +}; + +static int dvct_match_free(struct device *dev, void *data) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + return !ds_dev->instance_taken; +} + +struct dvct_source_device *dvct_source_find_by_name(const char *name) +{ + struct device *dev; + + DVCT_IN(); + dev = bus_find_device_by_name(&dvctrace_bus_type, NULL, name); + if (IS_ERR_OR_NULL(dev)) + return ERR_PTR(-ENODEV); + return dev_to_dvct_source_device(dev); +} +EXPORT_SYMBOL_GPL(dvct_source_find_by_name); + +struct dvct_source_device *dvct_source_find_free_by_name(const char *name) +{ + struct dvct_source_device *ds_dev = dvct_source_find_by_name(name); + + DVCT_IN(); + if (IS_ERR_OR_NULL(ds_dev)) + return ERR_PTR(-ENODEV); + + if (ds_dev->instance_taken) + return ERR_PTR(-EBUSY); + + return ds_dev; +} +EXPORT_SYMBOL_GPL(dvct_source_find_free_by_name); + +struct dvct_source_device *dvct_source_find_free(void) +{ + struct device *dev = bus_find_device(&dvctrace_bus_type, NULL, + NULL, dvct_match_free); + DVCT_IN(); + if (IS_ERR_OR_NULL(dev)) + return ERR_PTR(-ENODEV); + + return dev_to_dvct_source_device(dev); +} +EXPORT_SYMBOL_GPL(dvct_source_find_free); + +static int fn_count_free(struct device *dev, void *data) +{ + int *count = data; + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (!ds_dev->instance_taken) + (*count)++; + return 0; +} + +int dvct_source_count_free(void) +{ + int count = 0; + + DVCT_IN(); + bus_for_each_dev(&dvctrace_bus_type, NULL, &count, fn_count_free); + return count; +} +EXPORT_SYMBOL_GPL(dvct_source_count_free); + +struct dvct_source_driver +*dvct_source_get_drv(struct dvct_source_device *ds_dev) +{ + BUG_ON(ds_dev->device.driver == NULL); + return drv_to_dvct_source_driver(ds_dev->device.driver); +} +EXPORT_SYMBOL_GPL(dvct_source_get_drv); + +int dvct_source_device_add(struct dvct_source_device *ds_dev, + struct dvct_source_driver *ds_drv) +{ + int ret; + + DVCT_IN(); + if (!ds_dev) + return -ENODEV; + if (!ds_drv) + return -EINVAL; + + spin_lock_init(&ds_dev->lock); + spin_lock(&ds_dev->lock); + ds_dev->instance_taken = 0; + ds_dev->function_taken = 0; + spin_unlock(&ds_dev->lock); + + device_initialize(&ds_dev->device); + ds_dev->device.bus = &dvctrace_bus_type; + + if (!ds_dev->device.parent) + ds_dev->device.parent = &dvctrace_bus; + + dev_set_name(&ds_dev->device, "%s-%s", ds_drv->driver.name, + ds_dev->name_add); + + ret = device_add(&ds_dev->device); + if (ret) { + dev_err(&dvctrace_bus, "Cannot add device %s %d\n", + ds_dev->name_add, ret); + return ret; + } + + if (ds_dev->static_desc.dvc_spec) + ds_dev->desc = &ds_dev->static_desc; + + dev_notice(&dvctrace_bus, "Adding device %s\n", ds_dev->name_add); + return 0; +}; +EXPORT_SYMBOL_GPL(dvct_source_device_add); + +void dvct_source_device_del(struct dvct_source_device *ds_dev) +{ + DVCT_IN(); + + if (ds_dev->desc && ds_dev->desc != &ds_dev->static_desc) { + free_descriptors(ds_dev->desc); + ds_dev->desc = NULL; + } + + device_del(&ds_dev->device); +}; +EXPORT_SYMBOL_GPL(dvct_source_device_del); + +int __dvct_source_driver_register(struct dvct_source_driver *ds_drv, + struct module *owner) +{ + DVCT_IN(); + if (!ds_drv->activate || + !ds_drv->binded || + !ds_drv->start_transfer || + !ds_drv->stop_transfer || + !ds_drv->unbinded || + !ds_drv->deactivate) + return -EINVAL; + + ds_drv->driver.owner = owner; + ds_drv->driver.bus = &dvctrace_bus_type; + return driver_register(&ds_drv->driver); +} +EXPORT_SYMBOL_GPL(__dvct_source_driver_register); + +void dvct_source_driver_unregister(struct dvct_source_driver *ds_drv) +{ + DVCT_IN(); + driver_unregister(&ds_drv->driver); +} +EXPORT_SYMBOL_GPL(dvct_source_driver_unregister); + +static int __init dtb_init(void) +{ + int ret; + + DVCT_IN(); + ret = device_register(&dvctrace_bus); + if (ret) { + pr_err("Cannot register bus device %d\n", ret); + return ret; + } + + ret = bus_register(&dvctrace_bus_type); + if (ret) { + pr_err("Cannot register bus %d\n", ret); + return ret; + } + return 0; +} + +static void __exit dtb_exit(void) +{ + DVCT_IN(); + bus_unregister(&dvctrace_bus_type); +} + +subsys_initcall(dtb_init); +module_exit(dtb_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DvC-Trace bus implementation"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 328ca93781cf..1b76d9585902 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = { .match = sunxi_rsb_device_match, .probe = sunxi_rsb_device_probe, .remove = sunxi_rsb_device_remove, + .uevent = of_device_uevent_modalias, }; static void sunxi_rsb_dev_release(struct device *dev) diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index e36d160c458f..bfc566d3f31a 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, cd_dbg(CD_OPEN, "entering cdrom_open\n"); - /* open is event synchronization point, check events first */ - check_disk_change(bdev); - /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; @@ -2374,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) return media_changed(cdi, 1); - if ((unsigned int)arg >= cdi->capacity) + if (arg >= cdi->capacity) return -EINVAL; info = kmalloc(sizeof(*info), GFP_KERNEL); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 6495b03f576c..ae3a7537cf0f 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = { static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) { int ret; + + check_disk_change(bdev); + mutex_lock(&gdrom_mutex); ret = cdrom_open(gd.cd_info, bdev, mode); mutex_unlock(&gdrom_mutex); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index c28dca0c613d..0d85d55fcbc6 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -588,5 +588,7 @@ config TILE_SROM source "drivers/char/xillybus/Kconfig" +source "drivers/char/rpmb/Kconfig" + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7dc3abe66464..f11b33e0c8ae 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -60,3 +60,5 @@ js-rtc-y = rtc.o obj-$(CONFIG_TILE_SROM) += tile-srom.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o +obj-$(CONFIG_ACRN_VHM) += vhm/ +obj-$(CONFIG_RPMB) += rpmb/ diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9b6b6023193b..dde7caac7f9f 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -872,6 +872,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, } } wmb(); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index c381c8e396fc..79d8c84693a1 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty return 0; } -int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; u32 *gp; @@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) return 0; } -void null_cache_flush(void) +static void null_cache_flush(void) { mb(); } diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index 63d84e6f1891..83c695938a2d 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #define RNG_CR 0x00 @@ -46,6 +47,7 @@ struct stm32_rng_private { struct hwrng rng; void __iomem *base; struct clk *clk; + struct reset_control *rst; }; static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) @@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + priv->rst = devm_reset_control_get(&ofdev->dev, NULL); + if (!IS_ERR(priv->rst)) { + reset_control_assert(priv->rst); + udelay(2); + reset_control_deassert(priv->rst); + } + dev_set_drvdata(dev, priv); priv->rng.name = dev_driver_string(dev), diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d1f5bb534e0e..6e9df558325b 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng) /* Enable secondary noise source on CPUs where it is present. */ /* Nehemiah stepping 8 and higher */ - if ((c->x86_model == 9) && (c->x86_mask > 7)) + if ((c->x86_model == 9) && (c->x86_stepping > 7)) lo |= VIA_NOISESRC2; /* Esther */ diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 3fa2f8a009b3..b89df66ea1ae 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev) static int virtrng_restore(struct virtio_device *vdev) { - return probe_common(vdev); + int err; + + err = probe_common(vdev); + if (!err) { + struct virtrng_info *vi = vdev->priv; + + /* + * Set hwrng_removed to ensure that virtio_read() + * does not block waiting for data before the + * registration is complete. + */ + vi->hwrng_removed = true; + err = hwrng_register(&vi->hwrng); + if (!err) { + vi->hwrng_register_done = true; + vi->hwrng_removed = false; + } + } + + return err; } #endif diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index feafdab734ae..4835b588b783 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); + bt->timeout = bt->BT_CAP_req2rsp; + /* Read BT capabilities if it hasn't been done yet */ if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); - bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index 2059f79d669a..c3a23ec3e76f 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -81,7 +81,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, pr_err("ipmi:dmi: Error allocation IPMI platform device"); return; } - pdev->driver_override = override; + pdev->driver_override = kasprintf(GFP_KERNEL, "%s", + override); + if (!pdev->driver_override) + goto err; if (type == IPMI_DMI_TYPE_SSIF) goto add_properties; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 810b138f5897..c82d9fd2f05a 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4030,7 +4030,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, } static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, - struct list_head *timeouts, long timeout_period, + struct list_head *timeouts, + unsigned long timeout_period, int slot, unsigned long *flags, unsigned int *waiting_msgs) { @@ -4043,8 +4044,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, if (!ent->inuse) return; - ent->timeout -= timeout_period; - if (ent->timeout > 0) { + if (timeout_period < ent->timeout) { + ent->timeout -= timeout_period; (*waiting_msgs)++; return; } @@ -4110,7 +4111,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index b338a4becbf8..845efa0f724f 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev) ipmi->irq = opal_event_request(prop); } - if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH, - "opal-ipmi", ipmi)) { + rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH, + "opal-ipmi", ipmi); + if (rc) { dev_warn(dev, "Unable to request irq\n"); goto err_dispose; } diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 9f2e3be2c5b8..676c910e990f 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -66,7 +66,7 @@ static void (*specific_poweroff_func)(ipmi_user_t user); /* Holds the old poweroff function so we can restore it on removal. */ static void (*old_poweroff_func)(void); -static int set_param_ifnum(const char *val, struct kernel_param *kp) +static int set_param_ifnum(const char *val, const struct kernel_param *kp) { int rv = param_set_int(val, kp); if (rv) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 36f47e8d06a3..9abc067f5799 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -242,6 +242,9 @@ struct smi_info { /* The timer for this si. */ struct timer_list si_timer; + /* This flag is set, if the timer can be set */ + bool timer_can_start; + /* This flag is set, if the timer is running (timer_pending() isn't enough) */ bool timer_running; @@ -417,6 +420,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { + if (!smi_info->timer_can_start) + return; smi_info->last_timeout_jiffies = jiffies; mod_timer(&smi_info->si_timer, new_val); smi_info->timer_running = true; @@ -436,21 +441,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); } -static void start_check_enables(struct smi_info *smi_info, bool start_timer) +static void start_check_enables(struct smi_info *smi_info) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - if (start_timer) - start_new_msg(smi_info, msg, 2); - else - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + start_new_msg(smi_info, msg, 2); smi_info->si_state = SI_CHECKING_ENABLES; } -static void start_clear_flags(struct smi_info *smi_info, bool start_timer) +static void start_clear_flags(struct smi_info *smi_info) { unsigned char msg[3]; @@ -459,10 +461,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer) msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; - if (start_timer) - start_new_msg(smi_info, msg, 3); - else - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + start_new_msg(smi_info, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } @@ -497,11 +496,11 @@ static void start_getting_events(struct smi_info *smi_info) * Note that we cannot just use disable_irq(), since the interrupt may * be shared. */ -static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) +static inline bool disable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = true; - start_check_enables(smi_info, start_timer); + start_check_enables(smi_info); return true; } return false; @@ -511,7 +510,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { smi_info->interrupt_disabled = false; - start_check_enables(smi_info, true); + start_check_enables(smi_info); return true; } return false; @@ -529,7 +528,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) msg = ipmi_alloc_smi_msg(); if (!msg) { - if (!disable_si_irq(smi_info, true)) + if (!disable_si_irq(smi_info)) smi_info->si_state = SI_NORMAL; } else if (enable_si_irq(smi_info)) { ipmi_free_smi_msg(msg); @@ -545,7 +544,7 @@ static void handle_flags(struct smi_info *smi_info) /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); - start_clear_flags(smi_info, true); + start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; if (smi_info->intf) ipmi_smi_watchdog_pretimeout(smi_info->intf); @@ -928,7 +927,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, * disable and messages disabled. */ if (smi_info->supports_event_msg_buff || smi_info->irq) { - start_check_enables(smi_info, true); + start_check_enables(smi_info); } else { smi_info->curr_msg = alloc_msg_handle_irq(smi_info); if (!smi_info->curr_msg) @@ -1235,6 +1234,7 @@ static int smi_start_processing(void *send_info, /* Set up the timer that drives the interface. */ setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); + new_smi->timer_can_start = true; smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); /* Try to claim any interrupts. */ @@ -1345,7 +1345,7 @@ static unsigned int num_slave_addrs; #define IPMI_MEM_ADDR_SPACE 1 static const char * const addr_space_to_str[] = { "i/o", "mem" }; -static int hotmod_handler(const char *val, struct kernel_param *kp); +static int hotmod_handler(const char *val, const struct kernel_param *kp); module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" @@ -1811,7 +1811,7 @@ static struct smi_info *smi_info_alloc(void) return info; } -static int hotmod_handler(const char *val, struct kernel_param *kp) +static int hotmod_handler(const char *val, const struct kernel_param *kp) { char *str = kstrdup(val, GFP_KERNEL); int rv; @@ -3416,15 +3416,17 @@ static void check_for_broken_irqs(struct smi_info *smi_info) check_set_rcv_irq(smi_info); } -static inline void wait_for_timer_and_thread(struct smi_info *smi_info) +static inline void stop_timer_and_thread(struct smi_info *smi_info) { if (smi_info->thread != NULL) kthread_stop(smi_info->thread); + + smi_info->timer_can_start = false; if (smi_info->timer_running) del_timer_sync(&smi_info->si_timer); } -static int is_new_interface(struct smi_info *info) +static struct smi_info *find_dup_si(struct smi_info *info) { struct smi_info *e; @@ -3439,24 +3441,36 @@ static int is_new_interface(struct smi_info *info) */ if (info->slave_addr && !e->slave_addr) e->slave_addr = info->slave_addr; - return 0; + return e; } } - return 1; + return NULL; } static int add_smi(struct smi_info *new_smi) { int rv = 0; + struct smi_info *dup; mutex_lock(&smi_infos_lock); - if (!is_new_interface(new_smi)) { - pr_info(PFX "%s-specified %s state machine: duplicate\n", - ipmi_addr_src_to_str(new_smi->addr_source), - si_to_str[new_smi->si_type]); - rv = -EBUSY; - goto out_err; + dup = find_dup_si(new_smi); + if (dup) { + if (new_smi->addr_source == SI_ACPI && + dup->addr_source == SI_SMBIOS) { + /* We prefer ACPI over SMBIOS. */ + dev_info(dup->dev, + "Removing SMBIOS-specified %s state machine in favor of ACPI\n", + si_to_str[new_smi->si_type]); + cleanup_one_si(dup); + } else { + dev_info(new_smi->dev, + "%s-specified %s state machine: duplicate\n", + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type]); + rv = -EBUSY; + goto out_err; + } } pr_info(PFX "Adding %s-specified %s state machine\n", @@ -3593,7 +3607,7 @@ static int try_smi_init(struct smi_info *new_smi) * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ - start_clear_flags(new_smi, false); + start_clear_flags(new_smi); /* * IRQ is defined to be set when non-zero. req_events will @@ -3662,7 +3676,7 @@ static int try_smi_init(struct smi_info *new_smi) return 0; out_err_stop_timer: - wait_for_timer_and_thread(new_smi); + stop_timer_and_thread(new_smi); out_err: new_smi->interrupt_disabled = true; @@ -3854,7 +3868,7 @@ static void cleanup_one_si(struct smi_info *to_clean) */ if (to_clean->irq_cleanup) to_clean->irq_cleanup(to_clean); - wait_for_timer_and_thread(to_clean); + stop_timer_and_thread(to_clean); /* * Timeouts are stopped, now make sure the interrupts are off @@ -3865,7 +3879,8 @@ static void cleanup_one_si(struct smi_info *to_clean) poll(to_clean); schedule_timeout_uninterruptible(1); } - disable_si_irq(to_clean, false); + if (to_clean->handlers) + disable_si_irq(to_clean); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 0aea3bcb6158..6f2eaba1cd6a 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -763,7 +763,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_info->ssif_state = SSIF_NORMAL; ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Error getting flags: %d %d, %x\n", - result, len, data[2]); + result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* @@ -785,7 +785,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, if ((result < 0) || (len < 3) || (data[2] != 0)) { /* Error clearing flags */ pr_warn(PFX "Error clearing flags: %d %d, %x\n", - result, len, data[2]); + result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { pr_warn(PFX "Invalid response clearing flags: %x %x\n", diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 970e1242a282..f11224a5dc5c 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -107,6 +107,8 @@ static ssize_t read_mem(struct file *file, char __user *buf, phys_addr_t p = *ppos; ssize_t read, sz; void *ptr; + char *bounce; + int err; if (p != *ppos) return 0; @@ -129,15 +131,22 @@ static ssize_t read_mem(struct file *file, char __user *buf, } #endif + bounce = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!bounce) + return -ENOMEM; + while (count > 0) { unsigned long remaining; - int allowed; + int allowed, probe; sz = size_inside_page(p, count); + err = -EPERM; allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) - return -EPERM; + goto failed; + + err = -EFAULT; if (allowed == 2) { /* Show zeros for restricted memory. */ remaining = clear_user(buf, sz); @@ -149,24 +158,32 @@ static ssize_t read_mem(struct file *file, char __user *buf, */ ptr = xlate_dev_mem_ptr(p); if (!ptr) - return -EFAULT; - - remaining = copy_to_user(buf, ptr, sz); + goto failed; + probe = probe_kernel_read(bounce, ptr, sz); unxlate_dev_mem_ptr(p, ptr); + if (probe) + goto failed; + + remaining = copy_to_user(buf, bounce, sz); } if (remaining) - return -EFAULT; + goto failed; buf += sz; p += sz; count -= sz; read += sz; } + kfree(bounce); *ppos += read; return read; + +failed: + kfree(bounce); + return err; } static ssize_t write_mem(struct file *file, const char __user *buf, diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..ea4dbfa30657 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -259,9 +259,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -428,8 +428,9 @@ struct crng_state primary_crng = { * its value (from 0->1->2). */ static int crng_init = 0; -#define crng_ready() (likely(crng_init > 0)) +#define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; +static unsigned long crng_global_init_time = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA20_BLOCK_SIZE]); @@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng, static void process_random_ready_list(void); static void _get_random_bytes(void *buf, int nbytes); +static struct ratelimit_state unseeded_warning = + RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); +static struct ratelimit_state urandom_warning = + RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); + +static int ratelimit_disable __read_mostly; + +module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); +MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -733,7 +744,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) static int credit_entropy_bits_safe(struct entropy_store *r, int nbits) { - const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1)); + const int nbits_max = r->poolinfo->poolwords * 32; if (nbits < 0) return -EINVAL; @@ -787,6 +798,43 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +#ifdef CONFIG_NUMA +static void do_numa_crng_init(struct work_struct *work) +{ + int i; + struct crng_state *crng; + struct crng_state **pool; + + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(i) { + crng = kmalloc_node(sizeof(struct crng_state), + GFP_KERNEL | __GFP_NOFAIL, i); + spin_lock_init(&crng->lock); + crng_initialize(crng); + pool[i] = crng; + } + mb(); + if (cmpxchg(&crng_node_pool, NULL, pool)) { + for_each_node(i) + kfree(pool[i]); + kfree(pool); + } +} + +static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init); + +static void numa_crng_init(void) +{ + schedule_work(&numa_crng_init_work); +} +#else +static void numa_crng_init(void) {} +#endif + +/* + * crng_fast_load() can be called by code in the interrupt service + * path. So we can't afford to dilly-dally. + */ static int crng_fast_load(const char *cp, size_t len) { unsigned long flags; @@ -794,7 +842,7 @@ static int crng_fast_load(const char *cp, size_t len) if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; - if (crng_ready()) { + if (crng_init != 0) { spin_unlock_irqrestore(&primary_crng.lock, flags); return 0; } @@ -813,6 +861,51 @@ static int crng_fast_load(const char *cp, size_t len) return 1; } +/* + * crng_slow_load() is called by add_device_randomness, which has two + * attributes. (1) We can't trust the buffer passed to it is + * guaranteed to be unpredictable (so it might not have any entropy at + * all), and (2) it doesn't have the performance constraints of + * crng_fast_load(). + * + * So we do something more comprehensive which is guaranteed to touch + * all of the primary_crng's state, and which uses a LFSR with a + * period of 255 as part of the mixing algorithm. Finally, we do + * *not* advance crng_init_cnt since buffer we may get may be something + * like a fixed DMI table (for example), which might very well be + * unique to the machine, but is otherwise unvarying. + */ +static int crng_slow_load(const char *cp, size_t len) +{ + unsigned long flags; + static unsigned char lfsr = 1; + unsigned char tmp; + unsigned i, max = CHACHA20_KEY_SIZE; + const char * src_buf = cp; + char * dest_buf = (char *) &primary_crng.state[4]; + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; + if (crng_init != 0) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } + if (len > max) + max = len; + + for (i = 0; i < max ; i++) { + tmp = lfsr; + lfsr >>= 1; + if (tmp & 1) + lfsr ^= 0xE1; + tmp = dest_buf[i % CHACHA20_KEY_SIZE]; + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; + lfsr += (tmp << 3) | (tmp >> 5); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 1; +} + static void crng_reseed(struct crng_state *crng, struct entropy_store *r) { unsigned long flags; @@ -831,7 +924,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) _crng_backtrack_protect(&primary_crng, buf.block, CHACHA20_KEY_SIZE); } - spin_lock_irqsave(&primary_crng.lock, flags); + spin_lock_irqsave(&crng->lock, flags); for (i = 0; i < 8; i++) { unsigned long rv; if (!arch_get_random_seed_long(&rv) && @@ -841,13 +934,26 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; - spin_unlock_irqrestore(&primary_crng.lock, flags); + spin_unlock_irqrestore(&crng->lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; process_random_ready_list(); wake_up_interruptible(&crng_init_wait); pr_notice("random: crng init done\n"); + if (unseeded_warning.missed) { + pr_notice("random: %d get_random_xx warning(s) missed " + "due to ratelimiting\n", + unseeded_warning.missed); + unseeded_warning.missed = 0; + } + if (urandom_warning.missed) { + pr_notice("random: %d urandom warning(s) missed " + "due to ratelimiting\n", + urandom_warning.missed); + urandom_warning.missed = 0; + } } } @@ -856,8 +962,9 @@ static void _extract_crng(struct crng_state *crng, { unsigned long v, flags; - if (crng_init > 1 && - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) + if (crng_ready() && + (time_after(crng_global_init_time, crng->init_time) || + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); if (arch_get_random_long(&v)) @@ -982,10 +1089,8 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; - if (!crng_ready()) { - crng_fast_load(buf, size); - return; - } + if (!crng_ready() && size) + crng_slow_load(buf, size); trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); @@ -1142,7 +1247,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, sizeof(fast_pool->pool))) { @@ -1492,8 +1597,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM print_once = true; #endif - pr_notice("random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); + if (__ratelimit(&unseeded_warning)) + pr_notice("random: %s called from %pS with crng_init=%d\n", + func_name, caller, crng_init); } /* @@ -1683,28 +1789,14 @@ static void init_std_data(struct entropy_store *r) */ static int rand_initialize(void) { -#ifdef CONFIG_NUMA - int i; - struct crng_state *crng; - struct crng_state **pool; -#endif - init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); - -#ifdef CONFIG_NUMA - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize(crng); - pool[i] = crng; + crng_global_init_time = jiffies; + if (ratelimit_disable) { + urandom_warning.interval = 0; + unseeded_warning.interval = 0; } - mb(); - crng_node_pool = pool; -#endif return 0; } early_initcall(rand_initialize); @@ -1772,9 +1864,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) if (!crng_ready() && maxwarn > 0) { maxwarn--; - printk(KERN_NOTICE "random: %s: uninitialized urandom read " - "(%zd bytes read)\n", - current->comm, nbytes); + if (__ratelimit(&urandom_warning)) + printk(KERN_NOTICE "random: %s: uninitialized " + "urandom read (%zd bytes read)\n", + current->comm, nbytes); spin_lock_irqsave(&primary_crng.lock, flags); crng_init_cnt = 0; spin_unlock_irqrestore(&primary_crng.lock, flags); @@ -1804,14 +1897,22 @@ static int write_pool(struct entropy_store *r, const char __user *buffer, size_t count) { size_t bytes; - __u32 buf[16]; + __u32 t, buf[16]; const char __user *p = buffer; while (count > 0) { + int b, i = 0; + bytes = min(count, sizeof(buf)); if (copy_from_user(&buf, p, bytes)) return -EFAULT; + for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { + if (!arch_get_random_int(&t)) + break; + buf[i] ^= t; + } + count -= bytes; p += bytes; @@ -1878,6 +1979,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) input_pool.entropy_count = 0; blocking_pool.entropy_count = 0; return 0; + case RNDRESEEDCRNG: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (crng_init < 2) + return -ENODATA; + crng_reseed(&primary_crng, NULL); + crng_global_init_time = jiffies - 1; + return 0; default: return -EINVAL; } @@ -2215,7 +2324,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, { struct entropy_store *poolp = &input_pool; - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); return; } diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig new file mode 100644 index 000000000000..48f11c19bbda --- /dev/null +++ b/drivers/char/rpmb/Kconfig @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +config RPMB + tristate "RPMB partition interface" + help + Unified RPMB partition interface for eMMC and UFS. + Provides interface for in kernel security controllers to + access RPMB partition. + + If unsure, select N. + +config RPMB_INTF_DEV + bool "RPMB character device interface /dev/rpmbN" + depends on RPMB + help + Say yes here if you want to access RPMB from user space + via character device interface /dev/rpmb%d + +config RPMB_SIM + tristate "RPMB partition device simulator" + default n + select RPMB + select CRYPTO_SHA256 + select CRYPTO_HMAC + help + RPMB partition simulation device is a virtual device that + provides simulation of the RPMB protocol and use kernel memory + as storage. + + Be aware it doesn't promise any real security. This driver is + suitable only for testing of the RPMB subsystem or RPMB applications + prior to RPMB key provisioning. + Most people should say N here. + +config VIRTIO_RPMB + tristate "Virtio RPMB character device interface /dev/vrpmb" + default n + depends on VIRTIO + select RPMB + help + Say yes here if you want to access virtio RPMB from user space + via character device interface /dev/vrpmb. + This device interface is only for guest/frontend virtio driver. diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile new file mode 100644 index 000000000000..281c012712ca --- /dev/null +++ b/drivers/char/rpmb/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RPMB) += rpmb.o +rpmb-objs += core.o +rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o +obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o +obj-$(CONFIG_VIRTIO_RPMB) += virtio_rpmb.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/char/rpmb/cdev.c b/drivers/char/rpmb/cdev.c new file mode 100644 index 000000000000..028c7ecd2ac7 --- /dev/null +++ b/drivers/char/rpmb/cdev.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include + +#include "rpmb-cdev.h" + +static dev_t rpmb_devt; +#define RPMB_MAX_DEVS MINORMASK + +#define RPMB_DEV_OPEN 0 /** single open bit (position) */ +/* from MMC_IOC_MAX_CMDS */ +#define RPMB_MAX_FRAMES 255 + +/** + * rpmb_open - the open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_open(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev; + + rdev = container_of(inode->i_cdev, struct rpmb_dev, cdev); + if (!rdev) + return -ENODEV; + + /* the rpmb is single open! */ + if (test_and_set_bit(RPMB_DEV_OPEN, &rdev->status)) + return -EBUSY; + + mutex_lock(&rdev->lock); + + fp->private_data = rdev; + + mutex_unlock(&rdev->lock); + + return nonseekable_open(inode, fp); +} + +/** + * rpmb_release - the cdev release function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 always. + */ +static int rpmb_release(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev = fp->private_data; + + clear_bit(RPMB_DEV_OPEN, &rdev->status); + + return 0; +} + +static size_t rpmb_ioc_frames_len(struct rpmb_dev *rdev, size_t nframes) +{ + if (rdev->ops->type == RPMB_TYPE_NVME) + return rpmb_ioc_frames_len_nvme(nframes); + else + return rpmb_ioc_frames_len_jdec(nframes); +} + +/** + * rpmb_cmd_copy_from_user - copy rpmb command from the user space + * + * @rdev: rpmb device + * @cmd: internal cmd structure + * @ucmd: user space cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_from_user(struct rpmb_dev *rdev, + struct rpmb_cmd *cmd, + struct rpmb_ioc_cmd __user *ucmd) +{ + void *frames; + u64 frames_ptr; + + if (get_user(cmd->flags, &ucmd->flags)) + return -EFAULT; + + if (get_user(cmd->nframes, &ucmd->nframes)) + return -EFAULT; + + if (cmd->nframes > RPMB_MAX_FRAMES) + return -EOVERFLOW; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + frames = memdup_user(u64_to_user_ptr(frames_ptr), + rpmb_ioc_frames_len(rdev, cmd->nframes)); + if (IS_ERR(frames)) + return PTR_ERR(frames); + + cmd->frames = frames; + return 0; +} + +/** + * rpmb_cmd_copy_to_user - copy rpmb command to the user space + * + * @rdev: rpmb device + * @ucmd: user space cmd structure + * @cmd: internal cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_to_user(struct rpmb_dev *rdev, + struct rpmb_ioc_cmd __user *ucmd, + struct rpmb_cmd *cmd) +{ + u64 frames_ptr; + + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + /* some archs have issues with 64bit get_user */ + if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, + rpmb_ioc_frames_len(rdev, cmd->nframes))) + return -EFAULT; + + return 0; +} + +/** + * rpmb_ioctl_seq_cmd - issue an rpmb command sequence + * + * @rdev: rpmb device + * @ptr: rpmb cmd sequence + * + * RPMB_IOC_SEQ_CMD handler + * + * Return: 0 on success, <0 on error + */ +static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_seq_cmd __user *ptr) +{ + __u64 ncmds; + struct rpmb_cmd *cmds; + struct rpmb_ioc_cmd __user *ucmds; + + int i; + int ret; + + /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) + return -EFAULT; + + if (ncmds > 3) { + dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", + ncmds); + return -EINVAL; + } + + cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + + ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_from_user(rdev, &cmds[i], &ucmds[i]); + if (ret) + goto out; + } + + ret = rpmb_cmd_seq(rdev, cmds, ncmds); + if (ret) + goto out; + + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_to_user(rdev, &ucmds[i], &cmds[i]); + if (ret) + goto out; + } +out: + for (i = 0; i < ncmds; i++) + kfree(cmds[i].frames); + kfree(cmds); + return ret; +} + +static long rpmb_ioctl_ver_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_ver_cmd __user *ptr) +{ + struct rpmb_ioc_ver_cmd ver = { + .api_version = RPMB_API_VERSION, + }; + + return copy_to_user(ptr, &ver, sizeof(ver)) ? -EFAULT : 0; +} + +static long rpmb_ioctl_cap_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_cap_cmd __user *ptr) +{ + struct rpmb_ioc_cap_cmd cap; + + cap.device_type = rdev->ops->type; + cap.target = rdev->target; + cap.block_size = rdev->ops->block_size; + cap.wr_cnt_max = rdev->ops->wr_cnt_max; + cap.rd_cnt_max = rdev->ops->rd_cnt_max; + cap.auth_method = rdev->ops->auth_method; + cap.capacity = rpmb_get_capacity(rdev); + cap.reserved = 0; + + return copy_to_user(ptr, &cap, sizeof(cap)) ? -EFAULT : 0; +} + +/** + * rpmb_ioctl - rpmb ioctl dispatcher + * + * @fp: a file pointer + * @cmd: ioctl command RPMB_IOC_SEQ_CMD RPMB_IOC_VER_CMD RPMB_IOC_CAP_CMD + * @arg: ioctl data: rpmb_ioc_ver_cmd rpmb_ioc_cap_cmd pmb_ioc_seq_cmd + * + * Return: 0 on success; < 0 on error + */ +static long rpmb_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + struct rpmb_dev *rdev = fp->private_data; + void __user *ptr = (void __user *)arg; + + switch (cmd) { + case RPMB_IOC_VER_CMD: + return rpmb_ioctl_ver_cmd(rdev, ptr); + case RPMB_IOC_CAP_CMD: + return rpmb_ioctl_cap_cmd(rdev, ptr); + case RPMB_IOC_SEQ_CMD: + return rpmb_ioctl_seq_cmd(rdev, ptr); + default: + dev_err(&rdev->dev, "unsupported ioctl 0x%x.\n", cmd); + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +static long rpmb_compat_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + return rpmb_ioctl(fp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif /* CONFIG_COMPAT */ + +static const struct file_operations rpmb_fops = { + .open = rpmb_open, + .release = rpmb_release, + .unlocked_ioctl = rpmb_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = rpmb_compat_ioctl, +#endif + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +void rpmb_cdev_prepare(struct rpmb_dev *rdev) +{ + rdev->dev.devt = MKDEV(MAJOR(rpmb_devt), rdev->id); + rdev->cdev.owner = THIS_MODULE; + cdev_init(&rdev->cdev, &rpmb_fops); +} + +void rpmb_cdev_add(struct rpmb_dev *rdev) +{ + cdev_add(&rdev->cdev, rdev->dev.devt, 1); +} + +void rpmb_cdev_del(struct rpmb_dev *rdev) +{ + if (rdev->dev.devt) + cdev_del(&rdev->cdev); +} + +int __init rpmb_cdev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&rpmb_devt, 0, RPMB_MAX_DEVS, "rpmb"); + if (ret < 0) + pr_err("unable to allocate char dev region\n"); + + return ret; +} + +void __exit rpmb_cdev_exit(void) +{ + unregister_chrdev_region(rpmb_devt, RPMB_MAX_DEVS); +} diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c new file mode 100644 index 000000000000..e02c12b8046c --- /dev/null +++ b/drivers/char/rpmb/core.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rpmb-cdev.h" + +static DEFINE_IDA(rpmb_ida); + +/** + * rpmb_dev_get - increase rpmb device ref counter + * + * @rdev: rpmb device + */ +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + return get_device(&rdev->dev) ? rdev : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_get); + +/** + * rpmb_dev_put - decrease rpmb device ref counter + * + * @rdev: rpmb device + */ +void rpmb_dev_put(struct rpmb_dev *rdev) +{ + put_device(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_put); + +/** + * rpmb_cmd_fixup - fixup rpmb command + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + */ +static void rpmb_cmd_fixup(struct rpmb_dev *rdev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + int i; + + if (RPMB_TYPE_HW(rdev->ops->type) != RPMB_TYPE_EMMC) + return; + + /* Fixup RPMB_READ_DATA specific to eMMC + * The block count of the RPMB read operation is not indicated + * in the original RPMB Data Read Request packet. + * This is different then implementation for other protocol + * standards. + */ + for (i = 0; i < ncmds; i++) { + struct rpmb_frame_jdec *frame = cmds[i].frames; + + if (frame->req_resp == cpu_to_be16(RPMB_READ_DATA)) { + dev_dbg(&rdev->dev, "Fixing up READ_DATA frame to block_count=0\n"); + frame->block_count = 0; + } + } +} + +/** + * rpmb_cmd_seq - send RPMB command sequence + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + * Return: 0 on success + * -EINVAL on wrong parameters + * -EOPNOTSUPP if device doesn't support the requested operation + * < 0 if the operation fails + */ +int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds) +{ + int err; + + if (!rdev || !cmds || !ncmds) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->cmd_seq) { + rpmb_cmd_fixup(rdev, cmds, ncmds); + err = rdev->ops->cmd_seq(rdev->dev.parent, rdev->target, + cmds, ncmds); + } + mutex_unlock(&rdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(rpmb_cmd_seq); + +int rpmb_get_capacity(struct rpmb_dev *rdev) +{ + int err; + + if (!rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->get_capacity) + err = rdev->ops->get_capacity(rdev->dev.parent, rdev->target); + mutex_unlock(&rdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(rpmb_get_capacity); + +static void rpmb_dev_release(struct device *dev) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + ida_simple_remove(&rpmb_ida, rdev->id); + kfree(rdev); +} + +struct class rpmb_class = { + .name = "rpmb", + .owner = THIS_MODULE, + .dev_release = rpmb_dev_release, +}; +EXPORT_SYMBOL(rpmb_class); + +/** + * rpmb_dev_find_device - return first matching rpmb device + * + * @data: data for the match function + * @match: the matching function + * + * Return: matching rpmb device or NULL on failure + */ +static +struct rpmb_dev *rpmb_dev_find_device(const void *data, + int (*match)(struct device *dev, + const void *data)) +{ + struct device *dev; + + dev = class_find_device(&rpmb_class, NULL, data, match); + + return dev ? to_rpmb_dev(dev) : NULL; +} + +static int match_by_type(struct device *dev, const void *data) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + const u32 *type = data; + + return (*type == RPMB_TYPE_ANY || rdev->ops->type == *type); +} + +/** + * rpmb_dev_get_by_type - return first registered rpmb device + * with matching type. + * If run with RPMB_TYPE_ANY the first an probably only + * device is returned + * + * @type: rpbm underlying device type + * + * Return: matching rpmb device or NULL/ERR_PTR on failure + */ +struct rpmb_dev *rpmb_dev_get_by_type(u32 type) +{ + if (type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + return rpmb_dev_find_device(&type, match_by_type); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_by_type); + +struct device_with_target { + const struct device *dev; + u8 target; +}; + +static int match_by_parent(struct device *dev, const void *data) +{ + const struct device_with_target *d = data; + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return (d->dev && dev->parent == d->dev && rdev->target == d->target); +} + +/** + * rpmb_dev_find_by_device - retrieve rpmb device from the parent device + * + * @parent: parent device of the rpmb device + * @target: RPMB target/region within the physical device + * + * Return: NULL if there is no rpmb device associated with the parent device + */ +struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, u8 target) +{ + struct device_with_target t; + + if (!parent) + return NULL; + + t.dev = parent; + t.target = target; + + return rpmb_dev_find_device(&t, match_by_parent); +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_by_device); + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + const char *sim; + ssize_t ret; + + sim = (rdev->ops->type & RPMB_TYPE_SIM) ? ":SIM" : ""; + switch (RPMB_TYPE_HW(rdev->ops->type)) { + case RPMB_TYPE_EMMC: + ret = sprintf(buf, "EMMC%s\n", sim); + break; + case RPMB_TYPE_UFS: + ret = sprintf(buf, "UFS%s\n", sim); + break; + case RPMB_TYPE_NVME: + ret = sprintf(buf, "NVMe%s\n", sim); + break; + default: + ret = sprintf(buf, "UNKNOWN\n"); + break; + } + + return ret; +} +static DEVICE_ATTR_RO(type); + +static ssize_t id_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + size_t sz = min_t(size_t, rdev->ops->dev_id_len, PAGE_SIZE); + + if (!rdev->ops->dev_id) + return 0; + + return memory_read_from_buffer(buf, count, &off, rdev->ops->dev_id, sz); +} +static BIN_ATTR_RO(id, 0); + +static ssize_t wr_cnt_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->wr_cnt_max); +} +static DEVICE_ATTR_RO(wr_cnt_max); + +static ssize_t rd_cnt_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->rd_cnt_max); +} +static DEVICE_ATTR_RO(rd_cnt_max); + +static struct attribute *rpmb_attrs[] = { + &dev_attr_type.attr, + &dev_attr_wr_cnt_max.attr, + &dev_attr_rd_cnt_max.attr, + NULL, +}; + +static struct bin_attribute *rpmb_bin_attributes[] = { + &bin_attr_id, + NULL, +}; + +static struct attribute_group rpmb_attr_group = { + .attrs = rpmb_attrs, + .bin_attrs = rpmb_bin_attributes, +}; + +static const struct attribute_group *rpmb_attr_groups[] = { + &rpmb_attr_group, + NULL +}; + +/** + * rpmb_dev_unregister - unregister RPMB partition from the RPMB subsystem + * + * @rdev: the rpmb device to unregister + */ +int rpmb_dev_unregister(struct rpmb_dev *rdev) +{ + if (!rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); + rpmb_cdev_del(rdev); + device_del(&rdev->dev); + mutex_unlock(&rdev->lock); + + rpmb_dev_put(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister); + +/** + * rpmb_dev_unregister_by_device - unregister RPMB partition + * from the RPMB subsystem + * + * @dev: the parent device of the rpmb device + * @target: RPMB target/region within the physical device + */ +int rpmb_dev_unregister_by_device(struct device *dev, u8 target) +{ + struct rpmb_dev *rdev; + + if (!dev) + return -EINVAL; + + rdev = rpmb_dev_find_by_device(dev, target); + if (!rdev) { + dev_warn(dev, "no disk found %s\n", dev_name(dev->parent)); + return -ENODEV; + } + + rpmb_dev_put(rdev); + + return rpmb_dev_unregister(rdev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister_by_device); + +/** + * rpmb_dev_get_drvdata - driver data getter + * + * @rdev: rpmb device + * + * Return: driver private data + */ +void *rpmb_dev_get_drvdata(const struct rpmb_dev *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_drvdata); + +/** + * rpmb_dev_set_drvdata - driver data setter + * + * @rdev: rpmb device + * @data: data to store + */ +void rpmb_dev_set_drvdata(struct rpmb_dev *rdev, void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} +EXPORT_SYMBOL_GPL(rpmb_dev_set_drvdata); + +/** + * rpmb_dev_register - register RPMB partition with the RPMB subsystem + * + * @dev: storage device of the rpmb device + * @target: RPMB target/region within the physical device + * @ops: device specific operations + */ +struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, + const struct rpmb_ops *ops) +{ + struct rpmb_dev *rdev; + int id; + int ret; + + if (!dev || !ops) + return ERR_PTR(-EINVAL); + + if (!ops->cmd_seq) + return ERR_PTR(-EINVAL); + + if (!ops->get_capacity) + return ERR_PTR(-EINVAL); + + if (ops->type == RPMB_TYPE_ANY || ops->type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + id = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + ret = id; + goto exit; + } + + mutex_init(&rdev->lock); + rdev->ops = ops; + rdev->id = id; + rdev->target = target; + + dev_set_name(&rdev->dev, "rpmb%d", id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; + rdev->dev.groups = rpmb_attr_groups; + + rpmb_cdev_prepare(rdev); + + ret = device_register(&rdev->dev); + if (ret) + goto exit; + + rpmb_cdev_add(rdev); + + dev_dbg(&rdev->dev, "registered device\n"); + + return rdev; + +exit: + if (id >= 0) + ida_simple_remove(&rpmb_ida, id); + kfree(rdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(rpmb_dev_register); + +static int __init rpmb_init(void) +{ + ida_init(&rpmb_ida); + class_register(&rpmb_class); + return rpmb_cdev_init(); +} + +static void __exit rpmb_exit(void) +{ + rpmb_cdev_exit(); + class_unregister(&rpmb_class); + ida_destroy(&rpmb_ida); +} + +subsys_initcall(rpmb_init); +module_exit(rpmb_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("RPMB class"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/rpmb/rpmb-cdev.h b/drivers/char/rpmb/rpmb-cdev.h new file mode 100644 index 000000000000..e59ff0c05e9d --- /dev/null +++ b/drivers/char/rpmb/rpmb-cdev.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifdef CONFIG_RPMB_INTF_DEV +int __init rpmb_cdev_init(void); +void __exit rpmb_cdev_exit(void); +void rpmb_cdev_prepare(struct rpmb_dev *rdev); +void rpmb_cdev_add(struct rpmb_dev *rdev); +void rpmb_cdev_del(struct rpmb_dev *rdev); +#else +static inline int __init rpmb_cdev_init(void) { return 0; } +static inline void __exit rpmb_cdev_exit(void) {} +static inline void rpmb_cdev_prepare(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_add(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_del(struct rpmb_dev *rdev) {} +#endif /* CONFIG_RPMB_INTF_DEV */ diff --git a/drivers/char/rpmb/rpmb_sim.c b/drivers/char/rpmb/rpmb_sim.c new file mode 100644 index 000000000000..728e25511377 --- /dev/null +++ b/drivers/char/rpmb/rpmb_sim.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include + +static const char id[] = "RPMB:SIM"; +#define CAPACITY_UNIT SZ_128K +#define CAPACITY_MIN SZ_128K +#define CAPACITY_MAX SZ_16M +#define BLK_UNIT SZ_256 + +static unsigned int max_wr_blks = 2; +module_param(max_wr_blks, uint, 0644); +MODULE_PARM_DESC(max_wr_blks, "max blocks that can be written in a single command (default: 2)"); + +static unsigned int daunits = 1; +module_param(daunits, uint, 0644); +MODULE_PARM_DESC(daunits, "number of data area units of 128K (default: 1)"); + +struct blk { + u8 data[BLK_UNIT]; +}; + +/** + * struct rpmb_sim_dev + * + * @dev: back pointer device + * @rdev: rpmb device + * @auth_key: Authentication key register which is used to authenticate + * accesses when MAC is calculated; + * @auth_key_set: true if authentication key was set + * @write_counter: Counter value for the total amount of successful + * authenticated data write requests made by the host. + * The initial value of this register after production is 00000000h. + * The value will be incremented by one along with each successful + * programming access. The value cannot be reset. After the counter + * has reached the maximum value of FFFFFFFFh, + * it will not be incremented anymore (overflow prevention) + * @hash_desc: hmac(sha256) shash descriptor + * + * @res_frames: frame that holds the result of the last write operation + * @out_frames: next read operation result frames + * @out_frames_cnt: number of the output frames + * + * @capacity: size of the partition in bytes multiple of 128K + * @blkcnt: block count + * @da: data area in blocks + */ +struct rpmb_sim_dev { + struct device *dev; + struct rpmb_dev *rdev; + u8 auth_key[32]; + bool auth_key_set; + u32 write_counter; + struct shash_desc *hash_desc; + + struct rpmb_frame_jdec res_frames[1]; + struct rpmb_frame_jdec *out_frames; + unsigned int out_frames_cnt; + + size_t capacity; + size_t blkcnt; + struct blk *da; +}; + +static __be16 op_result(struct rpmb_sim_dev *rsdev, u16 result) +{ + if (!rsdev->auth_key_set) + return cpu_to_be16(RPMB_ERR_NO_KEY); + + if (rsdev->write_counter == 0xFFFFFFFF) + result |= RPMB_ERR_COUNTER_EXPIRED; + + return cpu_to_be16(result); +} + +static __be16 req_to_resp(u16 req) +{ + return cpu_to_be16(RPMB_REQ2RESP(req)); +} + +static int rpmb_sim_calc_hmac(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, + unsigned int blks, u8 *mac) +{ + struct shash_desc *desc = rsdev->hash_desc; + int i; + int ret; + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + for (i = 0; i < blks; i++) { + ret = crypto_shash_update(desc, frames[i].data, + rpmb_jdec_hmac_data_len); + if (ret) + goto out; + } + ret = crypto_shash_final(desc, mac); +out: + if (ret) + dev_err(rsdev->dev, "digest error = %d", ret); + + return ret; +} + +static int rpmb_op_not_programmed(struct rpmb_sim_dev *rsdev, u16 req) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, RPMB_ERR_NO_KEY); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + dev_err(rsdev->dev, "not programmed\n"); + + return 0; +} + +static int rpmb_op_program_key(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + struct crypto_shash *tfm = rsdev->hash_desc->tfm; + u16 req; + int ret; + u16 err = RPMB_ERR_OK; + + req = be16_to_cpu(in_frame[0].req_resp); + + if (req != RPMB_PROGRAM_KEY) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + if (rsdev->auth_key_set) { + dev_err(rsdev->dev, "key already set\n"); + err = RPMB_ERR_WRITE; + goto out; + } + + ret = crypto_shash_setkey(tfm, in_frame[0].key_mac, 32); + if (ret) { + dev_err(rsdev->dev, "set key failed = %d\n", ret); + err = RPMB_ERR_GENERAL; + goto out; + } + + dev_dbg(rsdev->dev, "digest size %u\n", crypto_shash_digestsize(tfm)); + + memcpy(rsdev->auth_key, in_frame[0].key_mac, 32); + rsdev->auth_key_set = true; +out: + + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + + return 0; +} + +static int rpmb_op_get_wr_counter(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *frame; + int ret = 0; + u16 req; + u16 err; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_GET_WRITE_COUNTER) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + frame = kcalloc(1, sizeof(*frame), GFP_KERNEL); + if (!frame) { + err = RPMB_ERR_READ; + ret = -ENOMEM; + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = cnt; + goto out; + } + + rsdev->out_frames = frame; + rsdev->out_frames_cnt = cnt; + + frame->req_resp = req_to_resp(req); + frame->write_counter = cpu_to_be32(rsdev->write_counter); + memcpy(frame->nonce, in_frame[0].nonce, 16); + + err = RPMB_ERR_OK; + if (rpmb_sim_calc_hmac(rsdev, frame, cnt, frame->key_mac)) + err = RPMB_ERR_READ; + +out: + rsdev->out_frames[0].req_resp = req_to_resp(req); + rsdev->out_frames[0].result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_op_write_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret = 0; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_WRITE_DATA) + return -EINVAL; + + if (rsdev->write_counter == 0xFFFFFFFF) { + err = RPMB_ERR_WRITE; + goto out; + } + + blks = be16_to_cpu(in_frame[0].block_count); + if (blks == 0 || blks > cnt) { + dev_err(rsdev->dev, "wrong number of blocks: blks=%u cnt=%u\n", + blks, cnt); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + if (blks > max_wr_blks) { + err = RPMB_ERR_WRITE; + goto out; + } + + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (rpmb_sim_calc_hmac(rsdev, in_frame, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + /* mac is in the last frame */ + if (memcmp(mac, in_frame[blks - 1].key_mac, sizeof(mac)) != 0) { + err = RPMB_ERR_AUTH; + goto out; + } + + if (be32_to_cpu(in_frame[0].write_counter) != rsdev->write_counter) { + err = RPMB_ERR_COUNTER; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_WRITE; + goto out; + } + + dev_dbg(rsdev->dev, "Writing = %u blocks at addr = 0x%X\n", blks, addr); + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + memcpy(rsdev->da[addr + i].data, in_frame[i].data, BLK_UNIT); + + rsdev->write_counter++; + + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->write_counter = cpu_to_be32(rsdev->write_counter); + res_frame->addr = cpu_to_be16(addr); + if (rpmb_sim_calc_hmac(rsdev, res_frame, 1, res_frame->key_mac)) + err = RPMB_ERR_READ; + +out: + if (err != RPMB_ERR_OK) { + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + } + res_frame->result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_do_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + struct rpmb_frame_jdec *out_frames = NULL; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + /* eMMC intentionally set 0 here */ + blks = be16_to_cpu(in_frame->block_count); + blks = blks ?: cnt; + if (blks > cnt) { + dev_err(rsdev->dev, "wrong number of frames cnt %u\n", blks); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + out_frames = kcalloc(blks, sizeof(*out_frames), GFP_KERNEL); + if (!out_frames) { + ret = -ENOMEM; + err = RPMB_ERR_READ; + goto out; + } + + ret = 0; + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_READ; + goto out; + } + + dev_dbg(rsdev->dev, "reading = %u blocks at addr = 0x%X\n", blks, addr); + for (i = 0; i < blks; i++) { + memcpy(out_frames[i].data, rsdev->da[addr + i].data, BLK_UNIT); + memcpy(out_frames[i].nonce, in_frame[0].nonce, 16); + out_frames[i].req_resp = req_to_resp(req); + out_frames[i].addr = in_frame[0].addr; + out_frames[i].block_count = cpu_to_be16(blks); + } + + if (rpmb_sim_calc_hmac(rsdev, out_frames, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + memcpy(out_frames[blks - 1].key_mac, mac, sizeof(mac)); + + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + out_frames[i].result = op_result(rsdev, err); + + rsdev->out_frames = out_frames; + rsdev->out_frames_cnt = cnt; + + return 0; + +out: + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + kfree(out_frames); + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return ret; +} + +static int rpmb_op_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + u16 req; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + memcpy(res_frame, in_frame, sizeof(*res_frame)); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return 0; +} + +static int rpmb_op_result_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + u16 req = be16_to_cpu(frames[0].req_resp); + u16 blks = be16_to_cpu(frames[0].block_count); + + if (req != RPMB_RESULT_READ) + return -EINVAL; + + if (blks != 0) { + dev_err(rsdev->dev, "wrong number of frames %u != 0\n", blks); + return -EINVAL; + } + + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = 1; + return 0; +} + +static int rpmb_sim_write(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + u16 req; + int ret; + + if (!frames) + return -EINVAL; + + if (cnt == 0) + cnt = 1; + + req = be16_to_cpu(frames[0].req_resp); + if (!rsdev->auth_key_set && req != RPMB_PROGRAM_KEY) + return rpmb_op_not_programmed(rsdev, req); + + switch (req) { + case RPMB_PROGRAM_KEY: + dev_dbg(rsdev->dev, "rpmb: program key\n"); + ret = rpmb_op_program_key(rsdev, frames, cnt); + break; + case RPMB_WRITE_DATA: + dev_dbg(rsdev->dev, "rpmb: write data\n"); + ret = rpmb_op_write_data(rsdev, frames, cnt); + break; + case RPMB_GET_WRITE_COUNTER: + dev_dbg(rsdev->dev, "rpmb: get write counter\n"); + ret = rpmb_op_get_wr_counter(rsdev, frames, cnt); + break; + case RPMB_READ_DATA: + dev_dbg(rsdev->dev, "rpmb: read data\n"); + ret = rpmb_op_read_data(rsdev, frames, cnt); + break; + case RPMB_RESULT_READ: + dev_dbg(rsdev->dev, "rpmb: result read\n"); + ret = rpmb_op_result_read(rsdev, frames, cnt); + break; + default: + dev_err(rsdev->dev, "unsupported command %u\n", req); + ret = -EINVAL; + break; + } + + dev_dbg(rsdev->dev, "rpmb: ret=%d\n", ret); + + return ret; +} + +static int rpmb_sim_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + int i; + + if (!frames) + return -EINVAL; + + if (cnt == 0) + cnt = 1; + + if (!rsdev->out_frames || rsdev->out_frames_cnt == 0) { + dev_err(rsdev->dev, "out_frames are not set\n"); + return -EINVAL; + } + + if (rsdev->out_frames->req_resp == cpu_to_be16(RPMB_READ_DATA)) + rpmb_do_read_data(rsdev, rsdev->out_frames, cnt); + + for (i = 0; i < min_t(u32, rsdev->out_frames_cnt, cnt); i++) + memcpy(&frames[i], &rsdev->out_frames[i], sizeof(frames[i])); + + if (rsdev->out_frames != rsdev->res_frames) + kfree(rsdev->out_frames); + + rsdev->out_frames = NULL; + rsdev->out_frames_cnt = 0; + dev_dbg(rsdev->dev, "rpmb: cnt=%d\n", cnt); + + return 0; +} + +static int rpmb_sim_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct rpmb_sim_dev *rsdev; + int i; + int ret; + struct rpmb_cmd *cmd; + + if (!dev) + return -EINVAL; + + rsdev = dev_get_drvdata(dev); + + if (!rsdev) + return -EINVAL; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) + ret = rpmb_sim_write(rsdev, cmd->frames, cmd->nframes); + else + ret = rpmb_sim_read(rsdev, cmd->frames, cmd->nframes); + } + return ret; +} + +static int rpmb_sim_get_capacity(struct device *dev, u8 target) +{ + return daunits; +} + +static struct rpmb_ops rpmb_sim_ops = { + .cmd_seq = rpmb_sim_cmd_seq, + .get_capacity = rpmb_sim_get_capacity, + .type = RPMB_TYPE_EMMC | RPMB_TYPE_SIM, +}; + +static int rpmb_sim_hmac_256_alloc(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + rsdev->hash_desc = desc; + + dev_dbg(rsdev->dev, "hamac(sha256) registered\n"); + return 0; +} + +static void rpmb_sim_hmac_256_free(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc = rsdev->hash_desc; + + if (desc->tfm) + crypto_free_shash(desc->tfm); + kfree(desc); + + rsdev->hash_desc = NULL; +} + +static int rpmb_sim_probe(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + int ret; + + rsdev = kzalloc(sizeof(*rsdev), GFP_KERNEL); + if (!rsdev) + return -ENOMEM; + + rsdev->dev = dev; + + ret = rpmb_sim_hmac_256_alloc(rsdev); + if (ret) + goto err; + + rsdev->capacity = CAPACITY_UNIT * daunits; + rsdev->blkcnt = rsdev->capacity / BLK_UNIT; + rsdev->da = kzalloc(rsdev->capacity, GFP_KERNEL); + if (!rsdev->da) { + ret = -ENOMEM; + goto err; + } + + rpmb_sim_ops.dev_id_len = strlen(id); + rpmb_sim_ops.dev_id = id; + rpmb_sim_ops.wr_cnt_max = max_wr_blks; + rpmb_sim_ops.rd_cnt_max = max_wr_blks; + rpmb_sim_ops.block_size = 1; + + rsdev->rdev = rpmb_dev_register(rsdev->dev, 0, &rpmb_sim_ops); + if (IS_ERR(rsdev->rdev)) { + ret = PTR_ERR(rsdev->rdev); + goto err; + } + + dev_info(dev, "registered RPMB capacity = %zu of %zu blocks\n", + rsdev->capacity, rsdev->blkcnt); + + dev_set_drvdata(dev, rsdev); + + return 0; +err: + rpmb_sim_hmac_256_free(rsdev); + if (rsdev) + kfree(rsdev->da); + kfree(rsdev); + return ret; +} + +static int rpmb_sim_remove(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + + rsdev = dev_get_drvdata(dev); + + rpmb_dev_unregister(rsdev->rdev); + + dev_set_drvdata(dev, NULL); + + rpmb_sim_hmac_256_free(rsdev); + + kfree(rsdev->da); + kfree(rsdev); + return 0; +} + +static void rpmb_sim_shutdown(struct device *dev) +{ + rpmb_sim_remove(dev); +} + +static int rpmb_sim_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static struct bus_type rpmb_sim_bus = { + .name = "rpmb_sim", + .match = rpmb_sim_match, +}; + +static struct device_driver rpmb_sim_drv = { + .name = "rpmb_sim", + .probe = rpmb_sim_probe, + .remove = rpmb_sim_remove, + .shutdown = rpmb_sim_shutdown, +}; + +static void rpmb_sim_dev_release(struct device *dev) +{ +} + +static struct device rpmb_sim_dev; + +static int __init rpmb_sim_init(void) +{ + int ret; + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + ret = bus_register(&rpmb_sim_bus); + if (ret) + return ret; + + dev->bus = &rpmb_sim_bus; + dev->release = rpmb_sim_dev_release; + dev_set_name(dev, "%s", "rpmb_sim"); + ret = device_register(dev); + if (ret) { + pr_err("device register failed %d\n", ret); + goto err_device; + } + + drv->bus = &rpmb_sim_bus; + ret = driver_register(drv); + if (ret) { + pr_err("driver register failed %d\n", ret); + goto err_driver; + } + + return 0; + +err_driver: + device_unregister(dev); +err_device: + bus_unregister(&rpmb_sim_bus); + return ret; +} + +static void __exit rpmb_sim_exit(void) +{ + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + device_unregister(dev); + driver_unregister(drv); + bus_unregister(&rpmb_sim_bus); +} + +module_init(rpmb_sim_init); +module_exit(rpmb_sim_exit); + +MODULE_AUTHOR("Tomas Winkler +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char id[] = "RPMB:VIRTIO"; +#ifndef VIRTIO_ID_RPMB +#define VIRTIO_ID_RPMB 0xFFFF +#endif + +#define RPMB_SEQ_CMD_MAX 3 /* support up to 3 cmds */ + +struct virtio_rpmb_info { + struct virtqueue *vq; + struct mutex lock; /* info lock */ + wait_queue_head_t have_data; + struct rpmb_dev *rdev; +}; + +struct virtio_rpmb_ioc { + unsigned int ioc_cmd; + int result; + u8 target; + u8 reserved[3]; +}; + +static void virtio_rpmb_recv_done(struct virtqueue *vq) +{ + struct virtio_rpmb_info *vi; + struct virtio_device *vdev = vq->vdev; + + vi = vq->vdev->priv; + if (!vi) { + dev_err(&vdev->dev, "Error: no found vi data.\n"); + return; + } + + wake_up(&vi->have_data); +} + +static int rpmb_virtio_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + unsigned int i; + struct virtio_rpmb_ioc *vio_cmd; + struct rpmb_ioc_seq_cmd *seq_cmd; + size_t seq_cmd_sz; + struct scatterlist vio_ioc, vio_seq, frame[3]; + struct scatterlist *sgs[5]; + unsigned int num_out = 0, num_in = 0; + size_t sz; + int ret; + unsigned int len; + + if (ncmds > RPMB_SEQ_CMD_MAX) + return -EINVAL; + + mutex_lock(&vi->lock); + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + seq_cmd_sz = sizeof(*seq_cmd) + sizeof(struct rpmb_ioc_cmd) * ncmds; + seq_cmd = kzalloc(seq_cmd_sz, GFP_KERNEL); + if (!vio_cmd || !seq_cmd) { + ret = -ENOMEM; + goto out; + } + + vio_cmd->ioc_cmd = RPMB_IOC_SEQ_CMD; + vio_cmd->result = 0; + vio_cmd->target = target; + sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); + sgs[num_out + num_in++] = &vio_ioc; + + seq_cmd->num_of_cmds = ncmds; + for (i = 0; i < ncmds; i++) { + seq_cmd->cmds[i].flags = cmds[i].flags; + seq_cmd->cmds[i].nframes = cmds[i].nframes; + seq_cmd->cmds[i].frames_ptr = i; + } + sg_init_one(&vio_seq, seq_cmd, seq_cmd_sz); + sgs[num_out + num_in++] = &vio_seq; + + for (i = 0; i < ncmds; i++) { + sz = sizeof(struct rpmb_frame_jdec) * (cmds[i].nframes ?: 1); + sg_init_one(&frame[i], cmds[i].frames, sz); + sgs[num_out + num_in++] = &frame[i]; + } + + virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); + virtqueue_kick(vi->vq); + + wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); + + ret = 0; + + if (vio_cmd->result != 0) { + dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); + ret = -EIO; + } + +out: + kfree(vio_cmd); + kfree(seq_cmd); + mutex_unlock(&vi->lock); + return ret; +} + +static int rpmb_virtio_cmd_cap(struct device *dev, u8 target) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + struct virtio_rpmb_ioc *vio_cmd; + struct rpmb_ioc_cap_cmd *cap_cmd; + struct scatterlist vio_ioc, cap_ioc; + struct scatterlist *sgs[2]; + unsigned int num_out = 0, num_in = 0; + unsigned int len; + int ret; + + mutex_lock(&vi->lock); + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + cap_cmd = kzalloc(sizeof(*cap_cmd), GFP_KERNEL); + if (!vio_cmd || !cap_cmd) { + ret = -ENOMEM; + goto out; + } + + vio_cmd->ioc_cmd = RPMB_IOC_CAP_CMD; + vio_cmd->result = 0; + vio_cmd->target = target; + sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); + sgs[num_out + num_in++] = &vio_ioc; + + sg_init_one(&cap_ioc, cap_cmd, sizeof(*cap_cmd)); + sgs[num_out + num_in++] = &cap_ioc; + + virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); + virtqueue_kick(vi->vq); + + wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); + + ret = 0; + + if (vio_cmd->result != 0) { + dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); + ret = -EIO; + } + +out: + kfree(vio_cmd); + kfree(cap_cmd); + + mutex_unlock(&vi->lock); + return ret; +} + +static int rpmb_virtio_get_capacity(struct device *dev, u8 target) +{ + return 0; +} + +static struct rpmb_ops rpmb_virtio_ops = { + .cmd_seq = rpmb_virtio_cmd_seq, + .get_capacity = rpmb_virtio_get_capacity, + .type = RPMB_TYPE_EMMC, +}; + +static int rpmb_virtio_dev_init(struct virtio_rpmb_info *vi) +{ + int ret = 0; + struct device *dev = &vi->vq->vdev->dev; + + rpmb_virtio_ops.dev_id_len = strlen(id); + rpmb_virtio_ops.dev_id = id; + rpmb_virtio_ops.wr_cnt_max = 1; + rpmb_virtio_ops.rd_cnt_max = 1; + rpmb_virtio_ops.block_size = 1; + + vi->rdev = rpmb_dev_register(dev, 0, &rpmb_virtio_ops); + if (IS_ERR(vi->rdev)) { + ret = PTR_ERR(vi->rdev); + goto err; + } + + dev_set_drvdata(dev, vi); +err: + return ret; +} + +static int virtio_rpmb_init(struct virtio_device *vdev) +{ + int ret; + struct virtio_rpmb_info *vi; + + vi = kzalloc(sizeof(*vi), GFP_KERNEL); + if (!vi) + return -ENOMEM; + + init_waitqueue_head(&vi->have_data); + mutex_init(&vi->lock); + vdev->priv = vi; + + /* We expect a single virtqueue. */ + vi->vq = virtio_find_single_vq(vdev, virtio_rpmb_recv_done, "request"); + if (IS_ERR(vi->vq)) { + dev_err(&vdev->dev, "get single vq failed!\n"); + ret = PTR_ERR(vi->vq); + goto err; + } + + /* create vrpmb device. */ + ret = rpmb_virtio_dev_init(vi); + if (ret) { + dev_err(&vdev->dev, "create vrpmb device failed.\n"); + goto err; + } + + dev_info(&vdev->dev, "init done!\n"); + + return 0; + +err: + kfree(vi); + return ret; +} + +static void virtio_rpmb_remove(struct virtio_device *vdev) +{ + struct virtio_rpmb_info *vi; + + vi = vdev->priv; + if (!vi) + return; + + if (wq_has_sleeper(&vi->have_data)) + wake_up(&vi->have_data); + + rpmb_dev_unregister(vi->rdev); + + if (vdev->config->reset) + vdev->config->reset(vdev); + + if (vdev->config->del_vqs) + vdev->config->del_vqs(vdev); + + kfree(vi); +} + +static int virtio_rpmb_probe(struct virtio_device *vdev) +{ + return virtio_rpmb_init(vdev); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_rpmb_freeze(struct virtio_device *vdev) +{ + virtio_rpmb_remove(vdev); + return 0; +} + +static int virtio_rpmb_restore(struct virtio_device *vdev) +{ + return virtio_rpmb_init(vdev); +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_RPMB, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_rpmb_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_rpmb_probe, + .remove = virtio_rpmb_remove, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_rpmb_freeze, + .restore = virtio_rpmb_restore, +#endif +}; + +module_virtio_driver(virtio_rpmb_driver); +MODULE_DEVICE_TABLE(virtio, id_table); + +MODULE_DESCRIPTION("Virtio rpmb frontend driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 4d1dc8b46877..f95b9c75175b 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, size_t count) { int size = 0; - int expected; + u32 expected; if (!chip) return -EBUSY; @@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 610638a80383..98cf36fb068d 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work) struct file_priv *priv = container_of(work, struct file_priv, work); mutex_lock(&priv->buffer_mutex); - atomic_set(&priv->data_pending, 0); + priv->data_pending = 0; memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); mutex_unlock(&priv->buffer_mutex); } @@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip, struct file_priv *priv) { priv->chip = chip; - atomic_set(&priv->data_pending, 0); mutex_init(&priv->buffer_mutex); setup_timer(&priv->user_read_timer, user_reader_timeout, (unsigned long)priv); @@ -59,29 +58,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, size_t size, loff_t *off) { struct file_priv *priv = file->private_data; - ssize_t ret_size; - ssize_t orig_ret_size; + ssize_t ret_size = 0; int rc; del_singleshot_timer_sync(&priv->user_read_timer); flush_work(&priv->work); - ret_size = atomic_read(&priv->data_pending); - if (ret_size > 0) { /* relay data */ - orig_ret_size = ret_size; - if (size < ret_size) - ret_size = size; + mutex_lock(&priv->buffer_mutex); - mutex_lock(&priv->buffer_mutex); + if (priv->data_pending) { + ret_size = min_t(ssize_t, size, priv->data_pending); rc = copy_to_user(buf, priv->data_buffer, ret_size); - memset(priv->data_buffer, 0, orig_ret_size); + memset(priv->data_buffer, 0, priv->data_pending); if (rc) ret_size = -EFAULT; - mutex_unlock(&priv->buffer_mutex); + priv->data_pending = 0; } - atomic_set(&priv->data_pending, 0); - + mutex_unlock(&priv->buffer_mutex); return ret_size; } @@ -92,17 +86,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, size_t in_size = size; ssize_t out_size; + if (in_size > TPM_BUFSIZE) + return -E2BIG; + + mutex_lock(&priv->buffer_mutex); + /* Cannot perform a write until the read has cleared either via * tpm_read or a user_read_timer timeout. This also prevents split * buffered writes from blocking here. */ - if (atomic_read(&priv->data_pending) != 0) + if (priv->data_pending != 0) { + mutex_unlock(&priv->buffer_mutex); return -EBUSY; - - if (in_size > TPM_BUFSIZE) - return -E2BIG; - - mutex_lock(&priv->buffer_mutex); + } if (copy_from_user (priv->data_buffer, (void __user *) buf, in_size)) { @@ -110,6 +106,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return -EFAULT; } + if (in_size < 6 || + in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) { + mutex_unlock(&priv->buffer_mutex); + return -EINVAL; + } + /* atomic tpm command send and result receive. We only hold the ops * lock during this period so that the tpm can be unregistered even if * the char dev is held open. @@ -127,7 +129,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return out_size; } - atomic_set(&priv->data_pending, out_size); + priv->data_pending = out_size; mutex_unlock(&priv->buffer_mutex); /* Set a timeout by which the reader must come claim the result */ @@ -144,5 +146,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv) del_singleshot_timer_sync(&priv->user_read_timer); flush_work(&priv->work); file->private_data = NULL; - atomic_set(&priv->data_pending, 0); + priv->data_pending = 0; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index ba3b6f9dacf7..b24cfb4d3ee1 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -8,7 +8,7 @@ struct file_priv { struct tpm_chip *chip; /* Data passed to and from the tpm via the read/write calls */ - atomic_t data_pending; + size_t data_pending; struct mutex buffer_mutex; struct timer_list user_read_timer; /* user needs to claim result */ diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 1d6729be4cd6..96be2ec4a988 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "tpm.h" #include "tpm_eventlog.h" @@ -328,7 +327,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); -static bool tpm_validate_command(struct tpm_chip *chip, +static int tpm_validate_command(struct tpm_chip *chip, struct tpm_space *space, const u8 *cmd, size_t len) @@ -340,10 +339,10 @@ static bool tpm_validate_command(struct tpm_chip *chip, unsigned int nr_handles; if (len < TPM_HEADER_SIZE) - return false; + return -EINVAL; if (!space) - return true; + return 0; if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) { cc = be32_to_cpu(header->ordinal); @@ -352,7 +351,7 @@ static bool tpm_validate_command(struct tpm_chip *chip, if (i < 0) { dev_dbg(&chip->dev, "0x%04X is an invalid command\n", cc); - return false; + return -EOPNOTSUPP; } attrs = chip->cc_attrs_tbl[i]; @@ -362,27 +361,75 @@ static bool tpm_validate_command(struct tpm_chip *chip, goto err_len; } - return true; + return 0; err_len: dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__, len); - return false; + return -EINVAL; } -/** - * tmp_transmit - Internal kernel interface to transmit TPM commands. - * - * @chip: TPM chip to use - * @buf: TPM command buffer - * @bufsiz: length of the TPM command buffer - * @flags: tpm transmit flags - bitmap - * - * Return: - * 0 when the operation is successful. - * A negative number for system errors (errno). - */ -ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, - u8 *buf, size_t bufsiz, unsigned int flags) +static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags) +{ + int rc; + + if (flags & __TPM_TRANSMIT_RAW) + return 0; + + if (!chip->ops->request_locality) + return 0; + + rc = chip->ops->request_locality(chip, 0); + if (rc < 0) + return rc; + + chip->locality = rc; + + return 0; +} + +static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags) +{ + int rc; + + if (flags & __TPM_TRANSMIT_RAW) + return 0; + + if (!chip->ops->relinquish_locality) + return; + + rc = chip->ops->relinquish_locality(chip, chip->locality); + if (rc) + dev_err(&chip->dev, "%s: : error %d\n", __func__, rc); + + chip->locality = -1; +} + +static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags) +{ + if (flags & __TPM_TRANSMIT_RAW) + return 0; + + if (!chip->ops->cmd_ready) + return 0; + + return chip->ops->cmd_ready(chip); +} + +static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags) +{ + if (flags & __TPM_TRANSMIT_RAW) + return 0; + + if (!chip->ops->go_idle) + return 0; + + return chip->ops->go_idle(chip); +} + +static ssize_t tpm_try_transmit(struct tpm_chip *chip, + struct tpm_space *space, + u8 *buf, size_t bufsiz, + unsigned int flags) { struct tpm_output_header *header = (void *)buf; int rc; @@ -391,8 +438,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, unsigned long stop; bool need_locality; - if (!tpm_validate_command(chip, space, buf, bufsiz)) - return -EINVAL; + rc = tpm_validate_command(chip, space, buf, bufsiz); + if (rc == -EINVAL) + return rc; + /* + * If the command is not implemented by the TPM, synthesize a + * response with a TPM2_RC_COMMAND_CODE return for user-space. + */ + if (rc == -EOPNOTSUPP) { + header->length = cpu_to_be32(sizeof(*header)); + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | + TSS2_RESMGR_TPM_RC_LAYER); + return bufsiz; + } if (bufsiz > TPM_BUFSIZE) bufsiz = TPM_BUFSIZE; @@ -410,20 +469,23 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, if (!(flags & TPM_TRANSMIT_UNLOCKED)) mutex_lock(&chip->tpm_mutex); - if (chip->dev.parent) - pm_runtime_get_sync(chip->dev.parent); + + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); /* Store the decision as chip->locality will be changed. */ need_locality = chip->locality == -1; - if (!(flags & TPM_TRANSMIT_RAW) && - need_locality && chip->ops->request_locality) { - rc = chip->ops->request_locality(chip, 0); + if (need_locality) { + rc = tpm_request_locality(chip, flags); if (rc < 0) goto out_no_locality; - chip->locality = rc; } + rc = tpm_cmd_ready(chip, flags); + if (rc) + goto out; + rc = tpm2_prepare_space(chip, space, ordinal, buf); if (rc) goto out; @@ -482,15 +544,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, } rc = tpm2_commit_space(chip, space, ordinal, buf, &len); + if (rc) + dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc); out: - if (need_locality && chip->ops->relinquish_locality) { - chip->ops->relinquish_locality(chip, chip->locality); - chip->locality = -1; - } + rc = tpm_go_idle(chip, flags); + if (rc) + goto out; + + if (need_locality) + tpm_relinquish_locality(chip, flags); + out_no_locality: - if (chip->dev.parent) - pm_runtime_put_sync(chip->dev.parent); + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); if (!(flags & TPM_TRANSMIT_UNLOCKED)) mutex_unlock(&chip->tpm_mutex); @@ -498,10 +565,67 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, } /** - * tmp_transmit_cmd - send a tpm command to the device + * tpm_transmit - Internal kernel interface to transmit TPM commands. + * + * @chip: TPM chip to use + * @space: tpm space + * @buf: TPM command buffer + * @bufsiz: length of the TPM command buffer + * @flags: tpm transmit flags - bitmap + * + * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY + * returns from the TPM and retransmits the command after a delay up + * to a maximum wait of TPM2_DURATION_LONG. + * + * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2 + * only + * + * Return: + * the length of the return when the operation is successful. + * A negative number for system errors (errno). + */ +ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, + u8 *buf, size_t bufsiz, unsigned int flags) +{ + struct tpm_output_header *header = (struct tpm_output_header *)buf; + /* space for header and handles */ + u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)]; + unsigned int delay_msec = TPM2_DURATION_SHORT; + u32 rc = 0; + ssize_t ret; + const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE, + bufsiz); + + /* + * Subtlety here: if we have a space, the handles will be + * transformed, so when we restore the header we also have to + * restore the handles. + */ + memcpy(save, buf, save_size); + + for (;;) { + ret = tpm_try_transmit(chip, space, buf, bufsiz, flags); + if (ret < 0) + break; + rc = be32_to_cpu(header->return_code); + if (rc != TPM2_RC_RETRY) + break; + delay_msec *= 2; + if (delay_msec > TPM2_DURATION_LONG) { + dev_err(&chip->dev, "TPM is in retry loop\n"); + break; + } + tpm_msleep(delay_msec); + memcpy(buf, save, save_size); + } + return ret; +} +/** + * tpm_transmit_cmd - send a tpm command to the device * The function extracts tpm out header return code * * @chip: TPM chip to use + * @space: tpm space * @buf: TPM command buffer * @bufsiz: length of the buffer * @min_rsp_body_length: minimum expected length of response body @@ -953,6 +1077,10 @@ int tpm_do_selftest(struct tpm_chip *chip) loops = jiffies_to_msecs(duration) / delay_msec; rc = tpm_continue_selftest(chip); + if (rc == TPM_ERR_INVALID_POSTINIT) { + chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED; + dev_info(&chip->dev, "TPM not ready (%d)\n", rc); + } /* This may fail if there was no TPM driver during a suspend/resume * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST) */ @@ -1228,6 +1356,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) break; recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); + if (recd > num_bytes) { + total = -EFAULT; + break; + } rlength = be32_to_cpu(tpm_cmd.header.out.length); if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 2d5466a72e40..b7def1ad9228 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -93,14 +93,20 @@ enum tpm2_structures { TPM2_ST_SESSIONS = 0x8002, }; +/* Indicates from what layer of the software stack the error comes from */ +#define TSS2_RC_LAYER_SHIFT 16 +#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT) + enum tpm2_return_codes { TPM2_RC_SUCCESS = 0x0000, TPM2_RC_HASH = 0x0083, /* RC_FMT1 */ TPM2_RC_HANDLE = 0x008B, TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */ TPM2_RC_DISABLED = 0x0120, + TPM2_RC_COMMAND_CODE = 0x0143, TPM2_RC_TESTING = 0x090A, /* RC_WARN */ TPM2_RC_REFERENCE_H0 = 0x0910, + TPM2_RC_RETRY = 0x0922, }; enum tpm2_algorithms { @@ -505,9 +511,18 @@ extern const struct file_operations tpm_fops; extern const struct file_operations tpmrm_fops; extern struct idr dev_nums_idr; +/** + * enum tpm_transmit_flags + * + * @TPM_TRANSMIT_UNLOCKED: used to lock sequence of tpm_transmit calls. + * @__TPM_TRANSMIT_RAW: prevent recursive calls into setup steps + * (go idle, locality,..). Don't use directly. + * @TPM_TRANSMIT_NESTED: Use from nested tpm_transmit calls + */ enum tpm_transmit_flags { - TPM_TRANSMIT_UNLOCKED = BIT(0), - TPM_TRANSMIT_RAW = BIT(1), + TPM_TRANSMIT_UNLOCKED = BIT(0), + __TPM_TRANSMIT_RAW = BIT(1), + TPM_TRANSMIT_NESTED = TPM_TRANSMIT_UNLOCKED | __TPM_TRANSMIT_RAW, }; ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index e1a41b788f08..44a3d16231f6 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, if (!rc) { data_len = be16_to_cpup( (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); + if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { + rc = -EFAULT; + goto out; + } rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) ->header.out.length); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index e2e059d8ffec..a5c9a0e3af00 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -39,7 +39,7 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) { if (space->session_tbl[i]) tpm2_flush_context_cmd(chip, space->session_tbl[i], - TPM_TRANSMIT_UNLOCKED); + TPM_TRANSMIT_NESTED); } } @@ -84,7 +84,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, tpm_buf_append(&tbuf, &buf[*offset], body_size); rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4, - TPM_TRANSMIT_UNLOCKED, NULL); + TPM_TRANSMIT_NESTED, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); @@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, * TPM_RC_REFERENCE_H0 means the session has been * flushed outside the space */ - rc = -ENOENT; + *handle = 0; tpm_buf_destroy(&tbuf); + return -ENOENT; } else if (rc > 0) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); @@ -132,7 +133,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, tpm_buf_append_u32(&tbuf, handle); rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0, - TPM_TRANSMIT_UNLOCKED, NULL); + TPM_TRANSMIT_NESTED, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); @@ -169,7 +170,7 @@ static void tpm2_flush_space(struct tpm_chip *chip) for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) tpm2_flush_context_cmd(chip, space->context_tbl[i], - TPM_TRANSMIT_UNLOCKED); + TPM_TRANSMIT_NESTED); tpm2_flush_sessions(chip, space); } @@ -376,7 +377,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, return 0; out_no_slots: - tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_UNLOCKED); + tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED); dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__, phandle); return -ENOMEM; @@ -464,7 +465,7 @@ static int tpm2_save_space(struct tpm_chip *chip) return rc; tpm2_flush_context_cmd(chip, space->context_tbl[i], - TPM_TRANSMIT_UNLOCKED); + TPM_TRANSMIT_NESTED); space->context_tbl[i] = ~0; } diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 8f0a98dea327..d9c7111d1fa4 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -117,8 +117,27 @@ struct tpm2_crb_smc { u32 smc_func_id; }; +static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, + unsigned long timeout) +{ + ktime_t start; + ktime_t stop; + + start = ktime_get(); + stop = ktime_add(start, ms_to_ktime(timeout)); + + do { + if ((ioread32(reg) & mask) == value) + return true; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), stop)); + + return ((ioread32(reg) & mask) == value); +} + /** - * crb_go_idle - request tpm crb device to go the idle state + * __crb_go_idle - request tpm crb device to go the idle state * * @dev: crb device * @priv: crb private data @@ -132,39 +151,34 @@ struct tpm2_crb_smc { * * Return: 0 always */ -static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv) +static int __crb_go_idle(struct device *dev, struct crb_priv *priv) { if ((priv->flags & CRB_FL_ACPI_START) || (priv->flags & CRB_FL_CRB_SMC_START)) return 0; iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); - /* we don't really care when this settles */ + if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req, + CRB_CTRL_REQ_GO_IDLE/* mask */, + 0, /* value */ + TPM2_TIMEOUT_C)) { + dev_warn(dev, "goIdle timed out\n"); + return -ETIME; + } return 0; } -static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, - unsigned long timeout) +static int crb_go_idle(struct tpm_chip *chip) { - ktime_t start; - ktime_t stop; - - start = ktime_get(); - stop = ktime_add(start, ms_to_ktime(timeout)); + struct device *dev = &chip->dev; + struct crb_priv *priv = dev_get_drvdata(dev); - do { - if ((ioread32(reg) & mask) == value) - return true; - - usleep_range(50, 100); - } while (ktime_before(ktime_get(), stop)); - - return false; + return __crb_go_idle(dev, priv); } /** - * crb_cmd_ready - request tpm crb device to enter ready state + * __crb_cmd_ready - request tpm crb device to enter ready state * * @dev: crb device * @priv: crb private data @@ -177,8 +191,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, * * Return: 0 on success -ETIME on timeout; */ -static int __maybe_unused crb_cmd_ready(struct device *dev, - struct crb_priv *priv) +static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) { if ((priv->flags & CRB_FL_ACPI_START) || (priv->flags & CRB_FL_CRB_SMC_START)) @@ -196,11 +209,19 @@ static int __maybe_unused crb_cmd_ready(struct device *dev, return 0; } -static int crb_request_locality(struct tpm_chip *chip, int loc) +static int crb_cmd_ready(struct tpm_chip *chip) +{ + struct device *dev = &chip->dev; + struct crb_priv *priv = dev_get_drvdata(dev); + + return __crb_cmd_ready(dev, priv); +} + +static int __crb_request_locality(struct device *dev, + struct crb_priv *priv, int loc) { - struct crb_priv *priv = dev_get_drvdata(&chip->dev); u32 value = CRB_LOC_STATE_LOC_ASSIGNED | - CRB_LOC_STATE_TPM_REG_VALID_STS; + CRB_LOC_STATE_TPM_REG_VALID_STS; if (!priv->regs_h) return 0; @@ -208,21 +229,45 @@ static int crb_request_locality(struct tpm_chip *chip, int loc) iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl); if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value, TPM2_TIMEOUT_C)) { - dev_warn(&chip->dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); + dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); return -ETIME; } return 0; } -static void crb_relinquish_locality(struct tpm_chip *chip, int loc) +static int crb_request_locality(struct tpm_chip *chip, int loc) { struct crb_priv *priv = dev_get_drvdata(&chip->dev); + return __crb_request_locality(&chip->dev, priv, loc); +} + +static int __crb_relinquish_locality(struct device *dev, + struct crb_priv *priv, int loc) +{ + u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | + CRB_LOC_STATE_TPM_REG_VALID_STS; + u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS; + if (!priv->regs_h) - return; + return 0; iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, + TPM2_TIMEOUT_C)) { + dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); + return -ETIME; + } + + return 0; +} + +static int crb_relinquish_locality(struct tpm_chip *chip, int loc) +{ + struct crb_priv *priv = dev_get_drvdata(&chip->dev); + + return __crb_relinquish_locality(&chip->dev, priv, loc); } static u8 crb_status(struct tpm_chip *chip) @@ -364,6 +409,8 @@ static const struct tpm_class_ops tpm_crb = { .send = crb_send, .cancel = crb_cancel, .req_canceled = crb_req_canceled, + .go_idle = crb_go_idle, + .cmd_ready = crb_cmd_ready, .request_locality = crb_request_locality, .relinquish_locality = crb_relinquish_locality, .req_complete_mask = CRB_DRV_STS_COMPLETE, @@ -466,6 +513,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } + ret = __crb_request_locality(dev, priv, 0); + if (ret) + return ret; + priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, sizeof(struct crb_regs_tail)); if (IS_ERR(priv->regs_t)) @@ -475,7 +526,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ - ret = crb_cmd_ready(dev, priv); + ret = __crb_cmd_ready(dev, priv); if (ret) return ret; @@ -520,7 +571,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, if (!ret) priv->cmd_size = cmd_size; - crb_go_idle(dev, priv); + __crb_go_idle(dev, priv); + + __crb_relinquish_locality(dev, priv, 0); return ret; } @@ -589,25 +642,7 @@ static int crb_acpi_add(struct acpi_device *device) chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; - rc = crb_cmd_ready(dev, priv); - if (rc) - return rc; - - pm_runtime_get_noresume(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - - rc = tpm_chip_register(chip); - if (rc) { - crb_go_idle(dev, priv); - pm_runtime_put_noidle(dev); - pm_runtime_disable(dev); - return rc; - } - - pm_runtime_put(dev); - - return 0; + return tpm_chip_register(chip); } static int crb_acpi_remove(struct acpi_device *device) @@ -617,52 +652,11 @@ static int crb_acpi_remove(struct acpi_device *device) tpm_chip_unregister(chip); - pm_runtime_disable(dev); - return 0; } -static int __maybe_unused crb_pm_runtime_suspend(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct crb_priv *priv = dev_get_drvdata(&chip->dev); - - return crb_go_idle(dev, priv); -} - -static int __maybe_unused crb_pm_runtime_resume(struct device *dev) -{ - struct tpm_chip *chip = dev_get_drvdata(dev); - struct crb_priv *priv = dev_get_drvdata(&chip->dev); - - return crb_cmd_ready(dev, priv); -} - -static int __maybe_unused crb_pm_suspend(struct device *dev) -{ - int ret; - - ret = tpm_pm_suspend(dev); - if (ret) - return ret; - - return crb_pm_runtime_suspend(dev); -} - -static int __maybe_unused crb_pm_resume(struct device *dev) -{ - int ret; - - ret = crb_pm_runtime_resume(dev); - if (ret) - return ret; - - return tpm_pm_resume(dev); -} - static const struct dev_pm_ops crb_pm = { - SET_SYSTEM_SLEEP_PM_OPS(crb_pm_suspend, crb_pm_resume) - SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume) }; static const struct acpi_device_id crb_device_ids[] = { diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 79d6bbb58e39..d5b44cadac56 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if ((size_t) expected > count) { + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index c6428771841f..caa86b19c76d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; - int expected, status, burst_count, retries, size = 0; + int status; + int burst_count; + int retries; + int size = 0; + u32 expected; if (count < TPM_HEADER_SIZE) { i2c_nuvoton_ready(chip); /* return to idle */ @@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * to machine native */ expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < size) { dev_err(dev, "%s() expected > count\n", __func__); size = -EIO; continue; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7e55aa9ce680..50b59a69dc33 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -132,108 +132,25 @@ static int check_acpi_tpm2(struct device *dev) } #endif -#ifdef CONFIG_X86 -#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000 -#define ILB_REMAP_SIZE 0x100 -#define LPC_CNTRL_REG_OFFSET 0x84 -#define LPC_CLKRUN_EN (1 << 2) - -static void __iomem *ilb_base_addr; - -static inline bool is_bsw(void) -{ - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); -} - -/** - * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running - */ -static void tpm_platform_begin_xfer(void) -{ - u32 clkrun_val; - - if (!is_bsw()) - return; - - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* Disable LPC CLKRUN# */ - clkrun_val &= ~LPC_CLKRUN_EN; - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); - -} - -/** - * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off - */ -static void tpm_platform_end_xfer(void) -{ - u32 clkrun_val; - - if (!is_bsw()) - return; - - clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* Enable LPC CLKRUN# */ - clkrun_val |= LPC_CLKRUN_EN; - iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET); - - /* - * Write any random value on port 0x80 which is on LPC, to make - * sure LPC clock is running before sending any TPM command. - */ - outb(0xCC, 0x80); - -} -#else -static inline bool is_bsw(void) -{ - return false; -} - -static void tpm_platform_begin_xfer(void) -{ -} - -static void tpm_platform_end_xfer(void) -{ -} -#endif - static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - while (len--) *result++ = ioread8(phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *value) + const u8 *value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - while (len--) iowrite8(*value++, phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -241,12 +158,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - *result = ioread16(phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -254,12 +167,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - *result = ioread32(phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -267,12 +176,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - tpm_platform_begin_xfer(); - iowrite32(value, phy->iobase + addr); - tpm_platform_end_xfer(); - return 0; } @@ -460,11 +365,6 @@ static int __init init_tis(void) if (rc) goto err_force; -#ifdef CONFIG_X86 - if (is_bsw()) - ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, - ILB_REMAP_SIZE); -#endif rc = platform_driver_register(&tis_drv); if (rc) goto err_platform; @@ -483,10 +383,6 @@ static int __init init_tis(void) err_platform: if (force_pdev) platform_device_unregister(force_pdev); -#ifdef CONFIG_X86 - if (is_bsw()) - iounmap(ilb_base_addr); -#endif err_force: return rc; } @@ -496,10 +392,6 @@ static void __exit cleanup_tis(void) pnp_unregister_driver(&tis_pnp_driver); platform_driver_unregister(&tis_drv); -#ifdef CONFIG_X86 - if (is_bsw()) - iounmap(ilb_base_addr); -#endif if (force_pdev) platform_device_unregister(force_pdev); } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 63bc6c3b949e..58123df6b5f6 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -31,6 +31,8 @@ #include "tpm.h" #include "tpm_tis_core.h" +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value); + /* Before we attempt to access the TPM we must see that the valid bit is set. * The specification says that this bit is 0 at reset and remains 0 until the * 'TPM has gone through its self test and initialization and has established @@ -75,11 +77,13 @@ static bool check_locality(struct tpm_chip *chip, int l) return false; } -static void release_locality(struct tpm_chip *chip, int l) +static int release_locality(struct tpm_chip *chip, int l) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); + + return 0; } static int request_locality(struct tpm_chip *chip, int l) @@ -202,7 +206,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -217,7 +222,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } @@ -252,7 +257,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc, status, burstcnt; @@ -343,7 +348,7 @@ static void disable_interrupts(struct tpm_chip *chip) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc; @@ -421,19 +426,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip, int i, rc; u32 did_vid; + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); if (rc < 0) - return rc; + goto out; for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { if (vendor_timeout_overrides[i].did_vid != did_vid) continue; memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, sizeof(vendor_timeout_overrides[i].timeout_us)); - return true; + rc = true; } - return false; + rc = false; + +out: + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + + return rc; } /* @@ -653,14 +667,73 @@ void tpm_tis_remove(struct tpm_chip *chip) u32 interrupt; int rc; + tpm_tis_clkrun_enable(chip, true); + rc = tpm_tis_read32(priv, reg, &interrupt); if (rc < 0) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); + + tpm_tis_clkrun_enable(chip, false); + + if (priv->ilb_base_addr) + iounmap(priv->ilb_base_addr); } EXPORT_SYMBOL_GPL(tpm_tis_remove); +/** + * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration + * of a single TPM command + * @chip: TPM chip to use + * @value: 1 - Disable CLKRUN protocol, so that clocks are free running + * 0 - Enable CLKRUN protocol + * Call this function directly in tpm_tis_remove() in error or driver removal + * path, since the chip->ops is set to NULL in tpm_chip_unregister(). + */ +static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value) +{ + struct tpm_tis_data *data = dev_get_drvdata(&chip->dev); + u32 clkrun_val; + + if (!IS_ENABLED(CONFIG_X86) || !is_bsw() || + !data->ilb_base_addr) + return; + + if (value) { + data->clkrun_enabled++; + if (data->clkrun_enabled > 1) + return; + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* Disable LPC CLKRUN# */ + clkrun_val &= ~LPC_CLKRUN_EN; + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); + } else { + data->clkrun_enabled--; + if (data->clkrun_enabled) + return; + + clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* Enable LPC CLKRUN# */ + clkrun_val |= LPC_CLKRUN_EN; + iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET); + + /* + * Write any random value on port 0x80 which is on LPC, to make + * sure LPC clock is running before sending any TPM command. + */ + outb(0xCC, 0x80); + } +} + static const struct tpm_class_ops tpm_tis = { .flags = TPM_OPS_AUTO_STARTUP, .status = tpm_tis_status, @@ -673,6 +746,7 @@ static const struct tpm_class_ops tpm_tis = { .req_canceled = tpm_tis_req_canceled, .request_locality = request_locality, .relinquish_locality = release_locality, + .clk_enable = tpm_tis_clkrun_enable, }; int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, @@ -680,6 +754,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, acpi_handle acpi_dev_handle) { u32 vendor, intfcaps, intmask; + u32 clkrun_val; u8 rid; int rc, probe; struct tpm_chip *chip; @@ -700,6 +775,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->phy_ops = phy_ops; dev_set_drvdata(&chip->dev, priv); + if (is_bsw()) { + priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, + ILB_REMAP_SIZE); + if (!priv->ilb_base_addr) + return -ENOMEM; + + clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET); + /* Check if CLKRUN# is already not enabled in the LPC bus */ + if (!(clkrun_val & LPC_CLKRUN_EN)) { + iounmap(priv->ilb_base_addr); + priv->ilb_base_addr = NULL; + } + } + + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + if (wait_startup(chip, 0) != 0) { rc = -ENODEV; goto out_err; @@ -790,9 +882,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, } } - return tpm_chip_register(chip); + rc = tpm_chip_register(chip); + if (rc) + goto out_err; + + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + + return 0; out_err: + if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL)) + chip->ops->clk_enable(chip, false); + tpm_tis_remove(chip); + return rc; } EXPORT_SYMBOL_GPL(tpm_tis_core_init); @@ -804,22 +907,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) u32 intmask; int rc; + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, true); + /* reenable interrupts that device may have lost or * BIOS/firmware may have disabled */ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq); if (rc < 0) - return; + goto out; rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask); if (rc < 0) - return; + goto out; intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + +out: + if (chip->ops->clk_enable != NULL) + chip->ops->clk_enable(chip, false); + + return; } int tpm_tis_resume(struct device *dev) diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index e2212f021a02..d5c6a2e952b3 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -79,6 +79,11 @@ enum tis_defaults { #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) #define TPM_RID(l) (0x0F04 | ((l) << 12)) +#define LPC_CNTRL_OFFSET 0x84 +#define LPC_CLKRUN_EN (1 << 2) +#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000 +#define ILB_REMAP_SIZE 0x100 + enum tpm_tis_flags { TPM_TIS_ITPM_WORKAROUND = BIT(0), }; @@ -89,6 +94,8 @@ struct tpm_tis_data { int irq; bool irq_tested; unsigned int flags; + void __iomem *ilb_base_addr; + u16 clkrun_enabled; wait_queue_head_t int_queue; wait_queue_head_t read_queue; const struct tpm_tis_phy_ops *phy_ops; @@ -98,7 +105,7 @@ struct tpm_tis_phy_ops { int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result); int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *value); + const u8 *value); int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); @@ -128,7 +135,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr, } static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *value) + u16 len, const u8 *value) { return data->phy_ops->write_bytes(data, addr, len, value); } @@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr, return data->phy_ops->write32(data, addr, value); } +static inline bool is_bsw(void) +{ +#ifdef CONFIG_X86 + return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); +#else + return false; +#endif +} + void tpm_tis_remove(struct tpm_chip *chip); int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, const struct tpm_tis_phy_ops *phy_ops, diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c index 88fe72ae967f..8ab0bd8445f6 100644 --- a/drivers/char/tpm/tpm_tis_spi.c +++ b/drivers/char/tpm/tpm_tis_spi.c @@ -46,9 +46,7 @@ struct tpm_tis_spi_phy { struct tpm_tis_data priv; struct spi_device *spi_device; - - u8 tx_buf[4]; - u8 rx_buf[4]; + u8 *iobuf; }; static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) @@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da } static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *buffer, u8 direction) + u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); int ret = 0; @@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, while (len) { transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); - phy->tx_buf[0] = direction | (transfer_len - 1); - phy->tx_buf[1] = 0xd4; - phy->tx_buf[2] = addr >> 8; - phy->tx_buf[3] = addr; + phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); + phy->iobuf[1] = 0xd4; + phy->iobuf[2] = addr >> 8; + phy->iobuf[3] = addr; memset(&spi_xfer, 0, sizeof(spi_xfer)); - spi_xfer.tx_buf = phy->tx_buf; - spi_xfer.rx_buf = phy->rx_buf; + spi_xfer.tx_buf = phy->iobuf; + spi_xfer.rx_buf = phy->iobuf; spi_xfer.len = 4; spi_xfer.cs_change = 1; @@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; - if ((phy->rx_buf[3] & 0x01) == 0) { + if ((phy->iobuf[3] & 0x01) == 0) { // handle SPI wait states - phy->tx_buf[0] = 0; + phy->iobuf[0] = 0; for (i = 0; i < TPM_RETRY; i++) { spi_xfer.len = 1; @@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) goto exit; - if (phy->rx_buf[0] & 0x01) + if (phy->iobuf[0] & 0x01) break; } @@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, spi_xfer.len = transfer_len; spi_xfer.delay_usecs = 5; - if (direction) { + if (in) { spi_xfer.tx_buf = NULL; - spi_xfer.rx_buf = buffer; - } else { - spi_xfer.tx_buf = buffer; + } else if (out) { spi_xfer.rx_buf = NULL; + memcpy(phy->iobuf, out, transfer_len); + out += transfer_len; } spi_message_init(&m); @@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; + if (in) { + memcpy(in, phy->iobuf, transfer_len); + in += transfer_len; + } + len -= transfer_len; - buffer += transfer_len; } exit: @@ -139,13 +141,13 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { - return tpm_tis_spi_transfer(data, addr, len, result, 0x80); + return tpm_tis_spi_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *value) + u16 len, const u8 *value) { - return tpm_tis_spi_transfer(data, addr, len, value, 0); + return tpm_tis_spi_transfer(data, addr, len, NULL, value); } static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) @@ -194,6 +196,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev) phy->spi_device = dev; + phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + if (!phy->iobuf) + return -ENOMEM; + return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, NULL); } diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 1d877cc9af97..484c29fcc76d 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -418,7 +418,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) proxy_dev->state |= STATE_DRIVER_COMMAND; rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0, - TPM_TRANSMIT_UNLOCKED | TPM_TRANSMIT_RAW, + TPM_TRANSMIT_NESTED, "attempting to set locality"); proxy_dev->state &= ~STATE_DRIVER_COMMAND; diff --git a/drivers/char/vhm/Makefile b/drivers/char/vhm/Makefile new file mode 100644 index 000000000000..cb801c70a37e --- /dev/null +++ b/drivers/char/vhm/Makefile @@ -0,0 +1 @@ +obj-y += vhm_dev.o diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c new file mode 100644 index 000000000000..236a5c70e218 --- /dev/null +++ b/drivers/char/vhm/vhm_dev.c @@ -0,0 +1,796 @@ +/* + * virtio and hyperviosr service module (VHM): main framework + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Liang Ding + * Jason Zeng + * Xiao Zheng + * Jason Chen CJ + * Jack Ren + * Mingqiang Chi + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#define DEVICE_NAME "acrn_vhm" +#define CLASS_NAME "vhm" + +#define VHM_API_VERSION_MAJOR 1 +#define VHM_API_VERSION_MINOR 0 + +static int major; +static struct class *vhm_class; +static struct device *vhm_device; +static struct tasklet_struct vhm_io_req_tasklet; + +struct table_iomems { + /* list node for this table_iomems */ + struct list_head list; + /* device's physical BDF */ + unsigned short phys_bdf; + /* virtual base address of MSI-X table in memory space after ioremap */ + unsigned long mmap_addr; +}; +static LIST_HEAD(table_iomems_list); +static DEFINE_MUTEX(table_iomems_lock); + +static int vhm_dev_open(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm; + int i; + + vm = kzalloc(sizeof(struct vhm_vm), GFP_KERNEL); + pr_info("vhm_dev_open: opening device node\n"); + + if (!vm) + return -ENOMEM; + vm->vmid = ACRN_INVALID_VMID; + vm->dev = vhm_device; + + for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) + INIT_HLIST_HEAD(&vm->hugepage_hlist[i]); + mutex_init(&vm->hugepage_lock); + + INIT_LIST_HEAD(&vm->ioreq_client_list); + spin_lock_init(&vm->ioreq_client_lock); + + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; + vm_list_add(&vm->list); + vm_mutex_unlock(&vhm_vm_list_lock); + filep->private_data = vm; + return 0; +} + +static ssize_t vhm_dev_read(struct file *filep, char *buffer, size_t len, + loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: reading device node\n"); + return 0; +} + +static ssize_t vhm_dev_write(struct file *filep, const char *buffer, + size_t len, loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: writing device node\n"); + return 0; +} + +static long vhm_dev_ioctl(struct file *filep, + unsigned int ioctl_num, unsigned long ioctl_param) +{ + long ret = 0; + struct vhm_vm *vm; + + trace_printk("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); + + if (ioctl_num == IC_GET_API_VERSION) { + struct api_version api_version; + + api_version.major_version = VHM_API_VERSION_MAJOR; + api_version.minor_version = VHM_API_VERSION_MINOR; + + if (copy_to_user((void *)ioctl_param, &api_version, + sizeof(struct api_version))) + return -EFAULT; + + return 0; + } else if (ioctl_num == IC_PM_SET_SSTATE_DATA) { + struct acpi_sstate_data host_sstate_data; + + if (copy_from_user(&host_sstate_data, + (void *)ioctl_param, sizeof(host_sstate_data))) + return -EFAULT; + + ret = hcall_set_sstate_data(virt_to_phys(&host_sstate_data)); + if (ret < 0) { + pr_err("vhm: failed to set host Sstate data!"); + return -EFAULT; + } + return 0; + } + + vm = (struct vhm_vm *)filep->private_data; + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + if ((vm->vmid == ACRN_INVALID_VMID) && (ioctl_num != IC_CREATE_VM)) { + pr_err("vhm: invalid VM ID !\n"); + return -EFAULT; + } + + switch (ioctl_num) { + case IC_CREATE_VM: { + struct acrn_create_vm created_vm; + + if (copy_from_user(&created_vm, (void *)ioctl_param, + sizeof(struct acrn_create_vm))) + return -EFAULT; + + ret = hcall_create_vm(virt_to_phys(&created_vm)); + if ((ret < 0) || + (created_vm.vmid == ACRN_INVALID_VMID)) { + pr_err("vhm: failed to create VM from Hypervisor !\n"); + return -EFAULT; + } + + if (copy_to_user((void *)ioctl_param, &created_vm, + sizeof(struct acrn_create_vm))) { + ret = -EFAULT; + goto create_vm_fail; + } + vm->vmid = created_vm.vmid; + + if (created_vm.vm_flag & SECURE_WORLD_ENABLED) { + ret = init_trusty(vm); + if (ret < 0) { + pr_err("vhm: failed to init trusty for VM!\n"); + goto create_vm_fail; + } + } + + if (created_vm.req_buf) { + ret = acrn_ioreq_init(vm, created_vm.req_buf); + if (ret < 0) + goto ioreq_buf_fail; + } + + pr_info("vhm: VM %d created\n", created_vm.vmid); + break; +ioreq_buf_fail: + if (created_vm.vm_flag & SECURE_WORLD_ENABLED) + deinit_trusty(vm); +create_vm_fail: + hcall_destroy_vm(created_vm.vmid); + vm->vmid = ACRN_INVALID_VMID; + break; + + } + + case IC_START_VM: { + ret = hcall_start_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to start VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_PAUSE_VM: { + ret = hcall_pause_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_RESET_VM: { + ret = hcall_reset_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to restart VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_DESTROY_VM: { + if (vm->trusty_host_gpa) + deinit_trusty(vm); + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { + pr_err("failed to destroy VM %ld\n", vm->vmid); + return -EFAULT; + } + vm->vmid = ACRN_INVALID_VMID; + break; + } + + case IC_CREATE_VCPU: { + struct acrn_create_vcpu cv; + + if (copy_from_user(&cv, (void *)ioctl_param, + sizeof(struct acrn_create_vcpu))) + return -EFAULT; + + ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid, + virt_to_phys(&cv)); + if (ret < 0) { + pr_err("vhm: failed to create vcpu %d!\n", cv.vcpu_id); + return -EFAULT; + } + atomic_inc(&vm->vcpu_num); + + return ret; + } + + case IC_SET_MEMSEG: { + struct vm_memmap memmap; + + if (copy_from_user(&memmap, (void *)ioctl_param, + sizeof(struct vm_memmap))) + return -EFAULT; + + ret = map_guest_memseg(vm, &memmap); + break; + } + + case IC_SET_IOREQ_BUFFER: { + /* init ioreq buffer */ + ret = acrn_ioreq_init(vm, (unsigned long)ioctl_param); + if (ret < 0 && ret != -EEXIST) + return ret; + ret = 0; + break; + } + + case IC_CREATE_IOREQ_CLIENT: { + int client_id; + + client_id = acrn_ioreq_create_fallback_client(vm->vmid, "acrndm"); + if (client_id < 0) + return -EFAULT; + return client_id; + } + + case IC_DESTROY_IOREQ_CLIENT: { + int client = ioctl_param; + + acrn_ioreq_destroy_client(client); + break; + } + + case IC_ATTACH_IOREQ_CLIENT: { + int client = ioctl_param; + + return acrn_ioreq_attach_client(client, 0); + } + + case IC_NOTIFY_REQUEST_FINISH: { + struct ioreq_notify notify; + + if (copy_from_user(¬ify, (void *)ioctl_param, + sizeof(notify))) + return -EFAULT; + + ret = acrn_ioreq_complete_request(notify.client_id, notify.vcpu); + if (ret < 0) + return -EFAULT; + break; + } + + case IC_ASSERT_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_assert_irqline(vm->vmid, virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to assert irq!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSERT_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_deassert_irqline(vm->vmid, virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to deassert irq!\n"); + return -EFAULT; + } + break; + } + case IC_PULSE_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_pulse_irqline(vm->vmid, + virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to assert irq!\n"); + return -EFAULT; + } + break; + } + + case IC_INJECT_MSI: { + struct acrn_msi_entry msi; + + if (copy_from_user(&msi, (void *)ioctl_param, sizeof(msi))) + return -EFAULT; + + ret = hcall_inject_msi(vm->vmid, virt_to_phys(&msi)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + break; + } + + case IC_ASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_assign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to assign ptdev!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_deassign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to deassign ptdev!\n"); + return -EFAULT; + } + break; + } + + case IC_SET_PTDEV_INTR_INFO: { + struct ic_ptdev_irq ic_pt_irq; + struct hc_ptdev_irq hc_pt_irq; + struct table_iomems *new; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_set_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to set intr info for ptdev!\n"); + return -EFAULT; + } + + if ((ic_pt_irq.type == IRQ_MSIX) && + ic_pt_irq.msix.table_paddr) { + new = kmalloc(sizeof(struct table_iomems), GFP_KERNEL); + if (new == NULL) + return -EFAULT; + new->phys_bdf = ic_pt_irq.phys_bdf; + new->mmap_addr = (unsigned long) + ioremap_nocache(ic_pt_irq.msix.table_paddr, + ic_pt_irq.msix.table_size); + + mutex_lock(&table_iomems_lock); + list_add(&new->list, &table_iomems_list); + mutex_unlock(&table_iomems_lock); + } + + break; + } + case IC_RESET_PTDEV_INTR_INFO: { + struct ic_ptdev_irq ic_pt_irq; + struct hc_ptdev_irq hc_pt_irq; + struct table_iomems *ptr; + int dev_found = 0; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_reset_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to reset intr info for ptdev!\n"); + return -EFAULT; + } + + if (ic_pt_irq.type == IRQ_MSIX) { + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == ic_pt_irq.phys_bdf) { + dev_found = 1; + break; + } + } + if (dev_found) { + iounmap((void __iomem *)ptr->mmap_addr); + list_del(&ptr->list); + } + mutex_unlock(&table_iomems_lock); + } + + break; + } + + case IC_VM_PCI_MSIX_REMAP: { + struct acrn_vm_pci_msix_remap msix_remap; + + if (copy_from_user(&msix_remap, + (void *)ioctl_param, sizeof(msix_remap))) + return -EFAULT; + + ret = hcall_remap_pci_msix(vm->vmid, virt_to_phys(&msix_remap)); + + if (copy_to_user((void *)ioctl_param, + &msix_remap, sizeof(msix_remap))) + return -EFAULT; + + if (msix_remap.msix) { + void __iomem *msix_entry; + struct table_iomems *ptr; + int dev_found = 0; + + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == msix_remap.phys_bdf) { + dev_found = 1; + break; + } + } + mutex_unlock(&table_iomems_lock); + + if (!dev_found || !ptr->mmap_addr) + return -EFAULT; + + msix_entry = (void __iomem *) (ptr->mmap_addr + + msix_remap.msix_entry_index * + PCI_MSIX_ENTRY_SIZE); + + /* mask the entry when setup */ + writel(PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + + /* setup the msi entry */ + writel((uint32_t)msix_remap.msi_addr, + msix_entry + PCI_MSIX_ENTRY_LOWER_ADDR); + writel((uint32_t)(msix_remap.msi_addr >> 32), + msix_entry + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msix_remap.msi_data, + msix_entry + PCI_MSIX_ENTRY_DATA); + + /* unmask the entry */ + writel(msix_remap.vector_ctl & + PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + } + break; + } + + case IC_PM_GET_CPU_STATE: { + uint64_t cmd; + + if (copy_from_user(&cmd, + (void *)ioctl_param, sizeof(cmd))) + return -EFAULT; + + switch (cmd & PMCMD_TYPE_MASK) { + case PMCMD_GET_PX_CNT: + case PMCMD_GET_CX_CNT: { + uint64_t pm_info; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&pm_info)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &pm_info, sizeof(pm_info))) + ret = -EFAULT; + + break; + } + case PMCMD_GET_PX_DATA: { + struct cpu_px_data px_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &px_data, sizeof(px_data))) + ret = -EFAULT; + break; + } + case PMCMD_GET_CX_DATA: { + struct cpu_cx_data cx_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&cx_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &cx_data, sizeof(cx_data))) + ret = -EFAULT; + break; + } + default: + ret = -EFAULT; + break; + } + + break; + } + + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; + break; + } + + return ret; +} + +static void io_req_tasklet(unsigned long data) +{ + struct vhm_vm *vm; + + list_for_each_entry(vm, &vhm_vm_list, list) { + if (!vm || !vm->req_buf) + break; + + acrn_ioreq_distribute_request(vm); + } +} + +static void vhm_intr_handler(void) +{ + tasklet_schedule(&vhm_io_req_tasklet); +} + +static int vhm_dev_release(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm = filep->private_data; + + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + put_vm(vm); + filep->private_data = NULL; + return 0; +} + +static const struct file_operations fops = { + .open = vhm_dev_open, + .read = vhm_dev_read, + .write = vhm_dev_write, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, + .poll = vhm_dev_poll, +}; + +static ssize_t +store_offline_cpu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ +#ifdef CONFIG_X86 + u64 cpu, lapicid; + + if (kstrtoull(buf, 0, &cpu) < 0) + return -EINVAL; + + if (cpu_possible(cpu)) { + lapicid = cpu_data(cpu).apicid; + pr_info("vhm: try to offline cpu %lld with lapicid %lld\n", + cpu, lapicid); + if (hcall_sos_offline_cpu(lapicid) < 0) { + pr_err("vhm: failed to offline cpu from Hypervisor!\n"); + return -EINVAL; + } + } +#endif + return count; +} + +static DEVICE_ATTR(offline_cpu, S_IWUSR, NULL, store_offline_cpu); + +static struct attribute *vhm_attrs[] = { + &dev_attr_offline_cpu.attr, + NULL +}; + +static struct attribute_group vhm_attr_group = { + .attrs = vhm_attrs, +}; + +#define SUPPORT_HV_API_VERSION_MAJOR 1 +#define SUPPORT_HV_API_VERSION_MINOR 0 +static int __init vhm_init(void) +{ + unsigned long flag; + struct hc_api_version api_version = {0, 0}; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + pr_info("vhm: initializing\n"); + + if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { + pr_err("vhm: failed to get api version from Hypervisor !\n"); + return -EINVAL; + } + + if (api_version.major_version == SUPPORT_HV_API_VERSION_MAJOR && + api_version.minor_version == SUPPORT_HV_API_VERSION_MINOR) { + pr_info("vhm: hv api version %d.%d\n", + api_version.major_version, api_version.minor_version); + } else { + pr_err("vhm: not support hv api version %d.%d!\n", + api_version.major_version, api_version.minor_version); + return -EINVAL; + } + + /* Try to dynamically allocate a major number for the device */ + major = register_chrdev(0, DEVICE_NAME, &fops); + if (major < 0) { + pr_warn("vhm: failed to register a major number\n"); + return major; + } + pr_info("vhm: registered correctly with major number %d\n", major); + + /* Register the device class */ + vhm_class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(vhm_class)) { + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to register device class\n"); + return PTR_ERR(vhm_class); + } + pr_info("vhm: device class registered correctly\n"); + + /* Register the device driver */ + vhm_device = device_create(vhm_class, NULL, MKDEV(major, 0), + NULL, DEVICE_NAME); + if (IS_ERR(vhm_device)) { + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to create the device\n"); + return PTR_ERR(vhm_device); + } + pr_info("register IPI handler\n"); + tasklet_init(&vhm_io_req_tasklet, io_req_tasklet, 0); + if (x86_platform_ipi_callback) { + pr_warn("vhm: ipi callback was occupied\n"); + return -EINVAL; + } + local_irq_save(flag); + x86_platform_ipi_callback = vhm_intr_handler; + local_irq_restore(flag); + + if (sysfs_create_group(&vhm_device->kobj, &vhm_attr_group)) { + pr_warn("vhm: sysfs create failed\n"); + return -EINVAL; + } + + pr_info("vhm: Virtio & Hypervisor service module initialized\n"); + return 0; +} +static void __exit vhm_exit(void) +{ + tasklet_kill(&vhm_io_req_tasklet); + device_destroy(vhm_class, MKDEV(major, 0)); + class_unregister(vhm_class); + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + sysfs_remove_group(&vhm_device->kobj, &vhm_attr_group); + pr_info("vhm: exit\n"); +} + +module_init(vhm_init); +module_exit(vhm_exit); + +MODULE_AUTHOR("Intel"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("This is a char device driver, acts as a route " + "responsible for transferring IO requsts from other modules " + "either in user-space or in kernel to and from hypervisor"); +MODULE_VERSION("0.1"); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index d1aed2513bd9..a089474cb046 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void) } } -static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size, int pages) { struct port_buffer *buf; @@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, return buf; } - if (is_rproc_serial(vq->vdev)) { + if (is_rproc_serial(vdev)) { /* * Allocate DMA memory from ancestor. When a virtio * device is created by remoteproc, the DMA memory is * associated with the grandparent device: * vdev => rproc => platform-dev. */ - if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) + if (!vdev->dev.parent || !vdev->dev.parent->parent) goto free_buf; - buf->dev = vq->vdev->dev.parent->parent; + buf->dev = vdev->dev.parent->parent; /* Increase device refcnt to avoid freeing it */ get_device(buf->dev); @@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, count = min((size_t)(32 * 1024), count); - buf = alloc_buf(port->out_vq, count, 0); + buf = alloc_buf(port->portdev->vdev, count, 0); if (!buf) return -ENOMEM; @@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, if (ret < 0) goto error_out; - buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); + buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs); if (!buf) { ret = -ENOMEM; goto error_out; @@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) nr_added_bufs = 0; do { - buf = alloc_buf(vq, PAGE_SIZE, 0); + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); if (!buf) break; @@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; struct port *port; - struct port_buffer *buf; dev_t devt; unsigned int nr_added_bufs; int err; @@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id) return 0; free_inbufs: - while ((buf = virtqueue_detach_unused_buf(port->in_vq))) - free_buf(buf, true); free_device: device_destroy(pdrvdata.class, port->dev->devt); free_cdev: @@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref) static void remove_port_data(struct port *port) { - struct port_buffer *buf; - spin_lock_irq(&port->inbuf_lock); /* Remove unused data this port might have received. */ discard_port_data(port); spin_unlock_irq(&port->inbuf_lock); - /* Remove buffers we queued up for the Host to send us data in. */ - do { - spin_lock_irq(&port->inbuf_lock); - buf = virtqueue_detach_unused_buf(port->in_vq); - spin_unlock_irq(&port->inbuf_lock); - if (buf) - free_buf(buf, true); - } while (buf); - spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); - - /* Free pending buffers from the out-queue. */ - do { - spin_lock_irq(&port->outvq_lock); - buf = virtqueue_detach_unused_buf(port->out_vq); - spin_unlock_irq(&port->outvq_lock); - if (buf) - free_buf(buf, true); - } while (buf); } /* @@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work) spin_unlock(&portdev->c_ivq_lock); } +static void flush_bufs(struct virtqueue *vq, bool can_sleep) +{ + struct port_buffer *buf; + unsigned int len; + + while ((buf = virtqueue_get_buf(vq, &len))) + free_buf(buf, can_sleep); +} + static void out_intr(struct virtqueue *vq) { struct port *port; port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) + if (!port) { + flush_bufs(vq, false); return; + } wake_up_interruptible(&port->waitqueue); } @@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq) unsigned long flags; port = find_port_by_vq(vq->vdev->priv, vq); - if (!port) + if (!port) { + flush_bufs(vq, false); return; + } spin_lock_irqsave(&port->inbuf_lock, flags); port->inbuf = get_inbuf(port); @@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = { static void remove_vqs(struct ports_device *portdev) { + struct virtqueue *vq; + + virtio_device_for_each_vq(portdev->vdev, vq) { + struct port_buffer *buf; + + flush_bufs(vq, true); + while ((buf = virtqueue_detach_unused_buf(vq))) + free_buf(buf, true); + } portdev->vdev->config->del_vqs(portdev->vdev); kfree(portdev->in_vqs); kfree(portdev->out_vqs); } -static void remove_controlq_data(struct ports_device *portdev) +static void virtcons_remove(struct virtio_device *vdev) { - struct port_buffer *buf; - unsigned int len; + struct ports_device *portdev; + struct port *port, *port2; - if (!use_multiport(portdev)) - return; + portdev = vdev->priv; - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf, true); + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf, true); + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + remove_vqs(portdev); + kfree(portdev); } /* @@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev) spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); + INIT_LIST_HEAD(&portdev->list); virtio_device_ready(portdev->vdev); @@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev) if (!nr_added_bufs) { dev_err(&vdev->dev, "Error allocating buffers for control queue\n"); - err = -ENOMEM; - goto free_vqs; + /* + * The host might want to notify mgmt sw about device + * add failure. + */ + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 0); + /* Device was functional: we need full cleanup. */ + virtcons_remove(vdev); + return -ENOMEM; } } else { /* @@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev) return 0; -free_vqs: - /* The host might want to notify mgmt sw about device add failure */ - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, - VIRTIO_CONSOLE_DEVICE_READY, 0); - remove_vqs(portdev); free_chrdev: unregister_chrdev(portdev->chr_major, "virtio-portsdev"); free: @@ -2132,43 +2155,6 @@ static int virtcons_probe(struct virtio_device *vdev) return err; } -static void virtcons_remove(struct virtio_device *vdev) -{ - struct ports_device *portdev; - struct port *port, *port2; - - portdev = vdev->priv; - - spin_lock_irq(&pdrvdata_lock); - list_del(&portdev->list); - spin_unlock_irq(&pdrvdata_lock); - - /* Disable interrupts for vqs */ - vdev->config->reset(vdev); - /* Finish up work that's lined up */ - if (use_multiport(portdev)) - cancel_work_sync(&portdev->control_work); - else - cancel_work_sync(&portdev->config_work); - - list_for_each_entry_safe(port, port2, &portdev->ports, list) - unplug_port(port); - - unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - - /* - * When yanking out a device, we immediately lose the - * (device-side) queues. So there's no point in keeping the - * guest side around till we drop our final reference. This - * also means that any ports which are in an open state will - * have to just stop using the port, as the vqs are going - * away. - */ - remove_controlq_data(portdev); - remove_vqs(portdev); - kfree(portdev); -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev) */ if (use_multiport(portdev)) virtqueue_disable_cb(portdev->c_ivq); - remove_controlq_data(portdev); list_for_each_entry(port, &portdev->ports, list) { virtqueue_disable_cb(port->in_vq); diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 7d3223fc7161..72b6091eb7b9 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pll *pll = to_clk_pll(hw); - unsigned int pllr; - u16 mul; - u8 div; - - regmap_read(pll->regmap, PLL_REG(pll->id), &pllr); - - div = PLL_DIV(pllr); - mul = PLL_MUL(pllr, pll->layout); - - if (!div || !mul) - return 0; - return (parent_rate / div) * (mul + 1); + return (parent_rate / pll->div) * (pll->mul + 1); } static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 775af473fe11..5c2b26de303e 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -107,10 +107,20 @@ static int pmc_suspend(void) return 0; } +static bool pmc_ready(unsigned int mask) +{ + unsigned int status; + + regmap_read(pmcreg, AT91_PMC_SR, &status); + + return ((status & mask) == mask) ? 1 : 0; +} + static void pmc_resume(void) { - int i, ret = 0; + int i; u32 tmp; + u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA; regmap_read(pmcreg, AT91_PMC_MCKR, &tmp); if (pmc_cache.mckr != tmp) @@ -134,13 +144,11 @@ static void pmc_resume(void) AT91_PMC_PCR_CMD); } - if (pmc_cache.uckr & AT91_PMC_UPLLEN) { - ret = regmap_read_poll_timeout(pmcreg, AT91_PMC_SR, tmp, - !(tmp & AT91_PMC_LOCKU), - 10, 5000); - if (ret) - pr_crit("USB PLL didn't lock when resuming\n"); - } + if (pmc_cache.uckr & AT91_PMC_UPLLEN) + mask |= AT91_PMC_LOCKU; + + while (!pmc_ready(mask)) + cpu_relax(); } static struct syscore_ops pmc_syscore_ops = { diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 58ce6af8452d..5f8082d89131 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -479,17 +479,17 @@ struct bcm2835_pll_ana_bits { static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { .mask0 = 0, .set0 = 0, - .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), + .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK, .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), - .mask3 = (u32)~A2W_PLL_KA_MASK, + .mask3 = A2W_PLL_KA_MASK, .set3 = (2 << A2W_PLL_KA_SHIFT), .fb_prediv_mask = BIT(14), }; static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { - .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), + .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK, .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), - .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), + .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK, .set1 = (6 << A2W_PLLH_KP_SHIFT), .mask3 = 0, .set3 = 0, @@ -632,9 +632,7 @@ static void bcm2835_pll_off(struct clk_hw *hw) const struct bcm2835_pll_data *data = pll->data; spin_lock(&cprman->regs_lock); - cprman_write(cprman, data->cm_ctrl_reg, - cprman_read(cprman, data->cm_ctrl_reg) | - CM_PLL_ANARST); + cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST); cprman_write(cprman, data->a2w_ctrl_reg, cprman_read(cprman, data->a2w_ctrl_reg) | A2W_PLL_CTRL_PWRDN); @@ -653,8 +651,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) ~A2W_PLL_CTRL_PWRDN); /* Take the PLL out of reset. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, data->cm_ctrl_reg, cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); + spin_unlock(&cprman->regs_lock); /* Wait for the PLL to lock. */ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); @@ -668,6 +668,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) cpu_relax(); } + cprman_write(cprman, data->a2w_ctrl_reg, + cprman_read(cprman, data->a2w_ctrl_reg) | + A2W_PLL_CTRL_PRST_DISABLE); + return 0; } @@ -731,9 +735,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, } /* Unmask the reference clock from the oscillator. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, A2W_XOSC_CTRL, cprman_read(cprman, A2W_XOSC_CTRL) | data->reference_enable_mask); + spin_unlock(&cprman->regs_lock); if (do_ana_setup_first) bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c index 5e918e7afaba..95a6e9834392 100644 --- a/drivers/clk/clk-axi-clkgen.c +++ b/drivers/clk/clk-axi-clkgen.c @@ -40,6 +40,10 @@ #define MMCM_REG_FILTER1 0x4e #define MMCM_REG_FILTER2 0x4f +#define MMCM_CLKOUT_NOCOUNT BIT(6) + +#define MMCM_CLK_DIV_NOCOUNT BIT(12) + struct axi_clkgen { void __iomem *base; struct clk_hw clk_hw; @@ -315,12 +319,27 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw, unsigned int reg; unsigned long long tmp; - axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, ®); - dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_2, ®); + if (reg & MMCM_CLKOUT_NOCOUNT) { + dout = 1; + } else { + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, ®); + dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); + } + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, ®); - d = (reg & 0x3f) + ((reg >> 6) & 0x3f); - axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, ®); - m = (reg & 0x3f) + ((reg >> 6) & 0x3f); + if (reg & MMCM_CLK_DIV_NOCOUNT) + d = 1; + else + d = (reg & 0x3f) + ((reg >> 6) & 0x3f); + + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB2, ®); + if (reg & MMCM_CLKOUT_NOCOUNT) { + m = 1; + } else { + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, ®); + m = (reg & 0x3f) + ((reg >> 6) & 0x3f); + } if (d == 0 || dout == 0) return 0; diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 4ed516cb7276..b49942b9fe50 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -118,12 +118,11 @@ static unsigned int _get_val(const struct clk_div_table *table, unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, unsigned int val, const struct clk_div_table *table, - unsigned long flags) + unsigned long flags, unsigned long width) { - struct clk_divider *divider = to_clk_divider(hw); unsigned int div; - div = _get_div(table, val, flags, divider->width); + div = _get_div(table, val, flags, width); if (!div) { WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO), "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", @@ -145,7 +144,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, val &= div_mask(divider->width); return divider_recalc_rate(hw, parent_rate, val, divider->table, - divider->flags); + divider->flags, divider->width); } static bool _is_valid_table_div(const struct clk_div_table *table, diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 16a3d5717f4e..a062f79bc509 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -101,10 +101,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) return 0; } +static int clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_mux *mux = to_clk_mux(hw); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + const struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_mux_ops); diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 20d90769cced..653b0f38d475 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = { "xtal", "clkin" }; static const char * const si5351_pll_names[] = { - "plla", "pllb", "vxco" + "si5351_plla", "si5351_pllb", "si5351_vxco" }; static const char * const si5351_msynth_names[] = { "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7" diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c index a94c3f56c590..61c3e40507d3 100644 --- a/drivers/clk/clk-stm32h7.c +++ b/drivers/clk/clk-stm32h7.c @@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, mux_ops = div_ops = gate_ops = NULL; mux_hw = div_hw = gate_hw = NULL; - if (gcfg->mux && gcfg->mux) { + if (gcfg->mux && cfg->mux) { mux = _get_cmux(base + cfg->mux->offset, cfg->mux->shift, cfg->mux->width, @@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg, } } - if (gcfg->gate && gcfg->gate) { + if (gcfg->gate && cfg->gate) { gate = _get_cgate(base + cfg->gate->offset, cfg->gate->bit_idx, gcfg->gate->flags, lock); diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c new file mode 100644 index 000000000000..9196c98d26ee --- /dev/null +++ b/drivers/clk/clk-tps68470.c @@ -0,0 +1,280 @@ +// SPDX-License_Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include + +#define TPS68470_CLK_NAME "tps68470-clk" + +#define to_tps68470_clkdata(clkd) \ + container_of(clkd, struct tps68470_clkdata, clkout_hw) + +static int osc_freq_hz = 20000000; +module_param(osc_freq_hz, int, 0644); + +struct tps68470_clkout_freqs { + unsigned int freq; + unsigned int xtaldiv; + unsigned int plldiv; + unsigned int postdiv; + unsigned int buckdiv; + unsigned int boostdiv; + +} clk_freqs[] = { +/* + * The PLL is used to multiply the crystal oscillator + * frequency range of 3 MHz to 27 MHz by a programmable + * factor of F = (M/N)*(1/P) such that the output + * available at the HCLK_A or HCLK_B pins are in the range + * of 4 MHz to 64 MHz in increments of 0.1 MHz + * + * hclk_# = osc_in * (((plldiv*2)+320) / (xtaldiv+30)) * (1 / 2^postdiv) + * + * PLL_REF_CLK should be as close as possible to 100kHz + * PLL_REF_CLK = input clk / XTALDIV[7:0] + 30) + * + * PLL_VCO_CLK = (PLL_REF_CLK * (plldiv*2 + 320)) + * + * BOOST should be as close as possible to 2Mhz + * BOOST = PLL_VCO_CLK / (BOOSTDIV[4:0] + 16) * + * + * BUCK should be as close as possible to 5.2Mhz + * BUCK = PLL_VCO_CLK / (BUCKDIV[3:0] + 5) + * + * osc_in xtaldiv plldiv postdiv hclk_# + * 20Mhz 170 32 1 19.2Mhz + * 20Mhz 170 40 1 20Mhz + * 20Mhz 170 80 1 24Mhz + * + */ + { 19200000, 170, 32, 1, 2, 3 }, + { 20000000, 170, 40, 1, 3, 4 }, + { 24000000, 170, 80, 1, 4, 8 }, +}; + +struct tps68470_clkdata { + struct tps68470 *tps68470; + struct clk_hw clkout_hw; + struct clk *clk; + int clk_cfg_idx; +}; + +static int tps68470_clk_is_prepared(struct clk_hw *hw) +{ + struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw); + struct tps68470 *tps = clkdata->tps68470; + int val; + + if (tps68470_reg_read(tps, TPS68470_REG_PLLCTL, &val)) + return 0; + + return val & TPS68470_PLL_EN_MASK; +} + +static int tps68470_clk_prepare(struct clk_hw *hw) +{ + struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw); + struct tps68470 *tps = clkdata->tps68470; + int idx = clkdata->clk_cfg_idx; + u8 val; + + tps68470_reg_write(tps, TPS68470_REG_BOOSTDIV, clk_freqs[idx].boostdiv); + tps68470_reg_write(tps, TPS68470_REG_BUCKDIV, clk_freqs[idx].buckdiv); + tps68470_reg_write(tps, TPS68470_REG_PLLSWR, TPS68470_PLLSWR_DEFAULT); + tps68470_reg_write(tps, TPS68470_REG_XTALDIV, clk_freqs[idx].xtaldiv); + tps68470_reg_write(tps, TPS68470_REG_PLLDIV, clk_freqs[idx].plldiv); + tps68470_reg_write(tps, TPS68470_REG_POSTDIV, clk_freqs[idx].postdiv); + tps68470_reg_write(tps, TPS68470_REG_POSTDIV2, clk_freqs[idx].postdiv); + + tps68470_reg_write(tps, TPS68470_REG_CLKCFG2, + TPS68470_DRV_STR_2MA << TPS68470_OUTPUT_A_SHIFT); + tps68470_reg_write(tps, TPS68470_REG_CLKCFG1, + (TPS68470_PLL_OUTPUT_ENABLE << + TPS68470_OUTPUT_A_SHIFT) | + (TPS68470_PLL_OUTPUT_ENABLE << + TPS68470_OUTPUT_B_SHIFT)); + val = TPS68470_PLL_EN_MASK | + TPS68470_OSC_EXT_CAP_DEFAULT << TPS68470_OSC_EXT_CAP_SHIFT | + TPS68470_CLK_SRC_XTAL << TPS68470_CLK_SRC_SHIFT; + + tps68470_reg_write(tps, TPS68470_REG_PLLCTL, val); + + return 0; +} + +static void tps68470_clk_unprepare(struct clk_hw *hw) +{ + struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw); + struct tps68470 *tps = clkdata->tps68470; + + /* disable clock first*/ + tps68470_clear_bits(tps, TPS68470_REG_PLLCTL, TPS68470_PLL_EN_MASK); + + /* write hw defaults */ + tps68470_reg_write(tps, TPS68470_REG_BOOSTDIV, 0); + tps68470_reg_write(tps, TPS68470_REG_BUCKDIV, 0); + tps68470_reg_write(tps, TPS68470_REG_PLLSWR, 0); + tps68470_reg_write(tps, TPS68470_REG_XTALDIV, 0); + tps68470_reg_write(tps, TPS68470_REG_PLLDIV, 0); + tps68470_reg_write(tps, TPS68470_REG_POSTDIV, 0); + tps68470_reg_write(tps, TPS68470_REG_CLKCFG2, 0); + tps68470_reg_write(tps, TPS68470_REG_CLKCFG1, 0); +} + +static int tps68470_clk_enable(struct clk_hw *hw) +{ + /* + * FIXME: enabled in prepare because need of + * i2c write and this should not sleep + */ + return 0; +} + +static void tps68470_clk_disable(struct clk_hw *hw) +{ + /* + * FIXME: disabled in unprepare because need of + * i2c write and this should not sleep + */ +} + +static unsigned long tps68470_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw); + + return clk_freqs[clkdata->clk_cfg_idx].freq; +} + +static int tps68470_clk_cfg_lookup(unsigned long rate) +{ + unsigned long best = ULONG_MAX; + int i = 0, best_idx; + + for (i = 0; i < ARRAY_SIZE(clk_freqs); i++) { + long diff = clk_freqs[i].freq - rate; + + if (0 == diff) + return i; + + diff = abs(diff); + if (diff < best) { + best = diff; + best_idx = i; + } + } + + return i; +} + +static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + int idx = tps68470_clk_cfg_lookup(rate); + + return clk_freqs[idx].freq; +} + +static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw); + int idx = tps68470_clk_cfg_lookup(rate); + + if (rate != clk_freqs[idx].freq) + return -EINVAL; + + clkdata->clk_cfg_idx = idx; + + return 0; +} + +static const struct clk_ops tps68470_clk_ops = { + .is_prepared = tps68470_clk_is_prepared, + .prepare = tps68470_clk_prepare, + .unprepare = tps68470_clk_unprepare, + .enable = tps68470_clk_enable, + .disable = tps68470_clk_disable, + .recalc_rate = tps68470_clk_recalc_rate, + .round_rate = tps68470_clk_round_rate, + .set_rate = tps68470_clk_set_rate, +}; + +static struct clk_init_data tps68470_clk_initdata = { + .name = TPS68470_CLK_NAME, + .ops = &tps68470_clk_ops, + .flags = CLK_IS_ROOT, +}; + +static int tps68470_clk_probe(struct platform_device *pdev) +{ + struct tps68470 *tps68470 = dev_get_drvdata(pdev->dev.parent); + struct tps68470_clkdata *tps68470_clkdata; + int ret; + + tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata), + GFP_KERNEL); + if (!tps68470_clkdata) + return -ENOMEM; + + tps68470_clkdata->tps68470 = tps68470; + tps68470_clkdata->clkout_hw.init = &tps68470_clk_initdata; + tps68470_clkdata->clk = + devm_clk_register(&pdev->dev, &tps68470_clkdata->clkout_hw); + if (IS_ERR(tps68470_clkdata->clk)) + return PTR_ERR(tps68470_clkdata->clk); + + /* FIXME: Cannot remove clkdev so block module removal */ + ret = try_module_get(THIS_MODULE); + if (!ret) + goto error; + + ret = clk_register_clkdev(tps68470_clkdata->clk, + TPS68470_CLK_NAME, NULL); + if (ret) { + dev_err(&pdev->dev, "failed to register clkdev:%d\n", ret); + goto error; + } + + platform_set_drvdata(pdev, tps68470_clkdata); + + dev_info(tps68470->dev, "Registered %s clk\n", pdev->name); + + return 0; +error: + clk_unregister(tps68470_clkdata->clk); + + return ret; +} + +static struct platform_driver tps68470_clk = { + .driver = { + .name = TPS68470_CLK_NAME, + }, + .probe = tps68470_clk_probe, +}; + +static int __init tps68470_clk_init(void) +{ + return platform_driver_register(&tps68470_clk); +} +subsys_initcall(tps68470_clk_init); + +static void __exit tps68470_clk_exit(void) +{ + platform_driver_unregister(&tps68470_clk); +} +module_exit(tps68470_clk_exit); + +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Jian Xu Zheng "); +MODULE_AUTHOR("Yuning Pu "); +MODULE_AUTHOR("Antti Laakso "); +MODULE_DESCRIPTION("clock driver for TPS68470 pmic"); +MODULE_ALIAS("platform:tps68470-clk"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c8d83acda006..6f4c98ca6e50 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -351,9 +351,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, return now <= rate && now > best; } -static int -clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, - unsigned long flags) +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags) { struct clk_core *core = hw->core, *parent, *best_parent = NULL; int i, num_parents, ret; @@ -413,6 +413,7 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, return 0; } +EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); struct clk *__clk_lookup(const char *name) { @@ -1931,6 +1932,9 @@ static int clk_core_get_phase(struct clk_core *core) int ret; clk_prepare_lock(); + /* Always try to update cached phase if possible */ + if (core->ops->get_phase) + core->phase = core->ops->get_phase(core->hw); ret = core->phase; clk_prepare_unlock(); @@ -2470,6 +2474,21 @@ static int __clk_core_init(struct clk_core *core) rate = 0; core->rate = core->req_rate = rate; + /* + * Enable CLK_IS_CRITICAL clocks so newly added critical clocks + * don't get accidentally disabled when walking the orphan tree and + * reparenting clocks + */ + if (core->flags & CLK_IS_CRITICAL) { + unsigned long flags; + + clk_core_prepare(core); + + flags = clk_enable_lock(); + clk_core_enable(core); + clk_enable_unlock(flags); + } + /* * walk the list of orphan clocks and reparent any that newly finds a * parent. @@ -2478,10 +2497,13 @@ static int __clk_core_init(struct clk_core *core) struct clk_core *parent = __clk_init_parent(orphan); /* - * we could call __clk_set_parent, but that would result in a - * redundant call to the .set_rate op, if it exists + * We need to use __clk_set_parent_before() and _after() to + * to properly migrate any prepare/enable count of the orphan + * clock. This is important for CLK_IS_CRITICAL clocks, which + * are enabled during init but might not have a parent yet. */ if (parent) { + /* update the clk tree topology */ __clk_set_parent_before(orphan, parent); __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); @@ -2500,16 +2522,6 @@ static int __clk_core_init(struct clk_core *core) if (core->ops->init) core->ops->init(core->hw); - if (core->flags & CLK_IS_CRITICAL) { - unsigned long flags; - - clk_core_prepare(core); - - flags = clk_enable_lock(); - clk_core_enable(core); - clk_enable_unlock(flags); - } - kref_init(&core->ref); out: clk_prepare_unlock(); diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c index a18258eb89cb..f40419959656 100644 --- a/drivers/clk/hisilicon/clk-hi3660.c +++ b/drivers/clk/hisilicon/clk-hi3660.c @@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = { /* crgctrl */ static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = { - { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, }, + { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, }, { HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, }, { HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, }, { HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, }, diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c index e786d717f75d..a87809d4bd52 100644 --- a/drivers/clk/hisilicon/clk-hi6220.c +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -145,7 +145,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { { HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, }, { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, }, { HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, }, - { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, }, + { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, }, }; static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = { diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c index a1c1f684ad58..9f46cf9dcc65 100644 --- a/drivers/clk/hisilicon/clkdivider-hi6220.c +++ b/drivers/clk/hisilicon/clkdivider-hi6220.c @@ -56,7 +56,7 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw, val &= div_mask(dclk->width); return divider_recalc_rate(hw, parent_rate, val, dclk->table, - CLK_DIVIDER_ROUND_CLOSEST); + CLK_DIVIDER_ROUND_CLOSEST, dclk->width); } static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c index 2007123832bb..53450b651e4c 100644 --- a/drivers/clk/hisilicon/crg-hi3516cv300.c +++ b/drivers/clk/hisilicon/crg-hi3516cv300.c @@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = { /* hi3516CV300 sysctrl CRG */ #define HI3516CV300_SYSCTRL_NR_CLKS 16 -static const char *wdt_mux_p[] __initconst = { "3m", "apb" }; +static const char *const wdt_mux_p[] __initconst = { "3m", "apb" }; static u32 wdt_mux_table[] = {0, 1}; static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = { diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index c07df719b8a3..8d518ad5dc13 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -761,7 +761,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); - clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4); + clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4); clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 5e8c18afce9a..41c08fc892b9 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -461,7 +461,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ - clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); + clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]); clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 2305699db467..0ac9b30c8b90 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -797,7 +797,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0); clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); - clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0); + clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0); clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0); clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0); diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index 9598889f972b..ccfe5d30fe10 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c @@ -148,6 +148,7 @@ static const struct mtk_fixed_factor top_fixed_divs[] = { FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8), FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793), FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1), + FACTOR(CLK_TOP_AXISEL_D4, "axisel_d4", "axi_sel", 1, 4), }; static const char * const axi_parents[] = { @@ -857,13 +858,13 @@ static const struct mtk_gate peri_clks[] = { GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11), GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10), GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9), - GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8), - GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7), - GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6), - GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5), - GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4), - GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3), - GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2), + GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axisel_d4", 8), + GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axisel_d4", 7), + GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axisel_d4", 6), + GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axisel_d4", 5), + GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axisel_d4", 4), + GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axisel_d4", 3), + GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axisel_d4", 2), GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1), GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0), diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index f5d6b70ce189..210ce8e8025e 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -216,6 +216,7 @@ struct mtk_pll_data { uint32_t pcw_reg; int pcw_shift; const struct mtk_pll_div_table *div_table; + const char *parent_name; }; void mtk_clk_register_plls(struct device_node *node, diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index a409142e9346..7598477ff60f 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -303,7 +303,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, init.name = data->name; init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0; init.ops = &mtk_pll_ops; - init.parent_names = &parent_name; + if (data->parent_name) + init.parent_names = &data->parent_name; + else + init.parent_names = &parent_name; init.num_parents = 1; clk = clk_register(NULL, &pll->hw); diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 44a5a535ca63..5144360e2c80 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -98,7 +98,7 @@ static void params_from_rate(unsigned long requested_rate, *sdm = SDM_DEN - 1; } else { *n2 = div; - *sdm = DIV_ROUND_UP(rem * SDM_DEN, requested_rate); + *sdm = DIV_ROUND_UP_ULL((u64)rem * SDM_DEN, requested_rate); } } diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index b2d1e8ed7152..92168348ffa6 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -1139,7 +1139,7 @@ static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6); static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7); static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8); static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9); -static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG0, 10); +static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10); static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11); static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12); static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13); @@ -1190,7 +1190,7 @@ static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9); static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11); static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12); static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15); -static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG2, 22); +static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22); static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25); static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26); static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29); diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c index 394aa6f03f01..9ff4ea63932d 100644 --- a/drivers/clk/mvebu/armada-38x.c +++ b/drivers/clk/mvebu/armada-38x.c @@ -46,11 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar) } static const u32 armada_38x_cpu_frequencies[] __initconst = { - 0, 0, 0, 0, - 1066 * 1000 * 1000, 0, 0, 0, + 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0, + 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0, 1332 * 1000 * 1000, 0, 0, 0, 1600 * 1000 * 1000, 0, 0, 0, - 1866 * 1000 * 1000, + 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000, }; static u32 __init armada_38x_get_cpu_freq(void __iomem *sar) @@ -76,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = { }; static const int armada_38x_cpu_l2_ratios[32][2] __initconst = { - {0, 1}, {0, 1}, {0, 1}, {0, 1}, - {1, 2}, {0, 1}, {0, 1}, {0, 1}, - {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {1, 2}, {0, 1}, + {1, 2}, {0, 1}, {1, 2}, {0, 1}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, @@ -91,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = { {1, 2}, {0, 1}, {0, 1}, {0, 1}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, - {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {7, 15}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c index 7b359afd620e..a6438f50e6db 100644 --- a/drivers/clk/nxp/clk-lpc32xx.c +++ b/drivers/clk/nxp/clk-lpc32xx.c @@ -956,7 +956,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, val &= div_mask(divider->width); return divider_recalc_rate(hw, parent_rate, val, divider->table, - divider->flags); + divider->flags, divider->width); } static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c index 53484912301e..928fcc16ee27 100644 --- a/drivers/clk/qcom/clk-regmap-divider.c +++ b/drivers/clk/qcom/clk-regmap-divider.c @@ -59,7 +59,7 @@ static unsigned long div_recalc_rate(struct clk_hw *hw, div &= BIT(divider->width) - 1; return divider_recalc_rate(hw, parent_rate, div, NULL, - CLK_DIVIDER_ROUND_CLOSEST); + CLK_DIVIDER_ROUND_CLOSEST, divider->width); } const struct clk_ops clk_regmap_div_ops = { diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index d523991c945f..28ceaf1e9937 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -143,8 +143,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path, int ret; clocks_node = of_find_node_by_path("/clocks"); - if (clocks_node) - node = of_find_node_by_name(clocks_node, path); + if (clocks_node) { + node = of_get_child_by_name(clocks_node, path); + of_node_put(clocks_node); + } if (!node) { fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL); diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 3410ee68d4bc..2057809219f4 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -1438,6 +1438,7 @@ static const struct freq_tbl ftbl_codec_clk[] = { static struct clk_rcg2 codec_digcodec_clk_src = { .cmd_rcgr = 0x1c09c, + .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll1_emclk_sleep_map, .freq_tbl = ftbl_codec_clk, diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c index eea38f6ea77e..3892346c4fcc 100644 --- a/drivers/clk/renesas/clk-sh73a0.c +++ b/drivers/clk/renesas/clk-sh73a0.c @@ -46,7 +46,7 @@ struct div4_clk { unsigned int shift; }; -static struct div4_clk div4_clks[] = { +static const struct div4_clk div4_clks[] = { { "zg", "pll0", CPG_FRQCRA, 16 }, { "m3", "pll1", CPG_FRQCRA, 12 }, { "b", "pll1", CPG_FRQCRA, 8 }, @@ -79,7 +79,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg, { const struct clk_div_table *table = NULL; unsigned int shift, reg, width; - const char *parent_name; + const char *parent_name = NULL; unsigned int mult = 1; unsigned int div = 1; @@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg, shift = 24; width = 5; } else { - struct div4_clk *c; + const struct div4_clk *c; for (c = div4_clks; c->name; c++) { if (!strcmp(name, c->name)) { diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index e580a5e6346c..30c23b882675 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -248,8 +248,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec, dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, PTR_ERR(clk)); else - dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n", - clkspec->args[0], clkspec->args[1], clk, clk); + dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", + clkspec->args[0], clkspec->args[1], clk, + clk_get_rate(clk)); return clk; } @@ -314,7 +315,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core, if (IS_ERR_OR_NULL(clk)) goto fail; - dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk); + dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); priv->clks[id] = clk; return; @@ -380,7 +381,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, if (IS_ERR(clk)) goto fail; - dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk); + dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); priv->clks[id] = clk; return; diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 077fcdc7908b..fe7d9ed1d436 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -58,6 +58,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) u16 degrees; u32 delay_num = 0; + /* See the comment for rockchip_mmc_set_phase below */ + if (!rate) { + pr_err("%s: invalid clk rate\n", __func__); + return -EINVAL; + } + raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; @@ -84,6 +90,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) u32 raw_value; u32 delay; + /* + * The below calculation is based on the output clock from + * MMC host to the card, which expects the phase clock inherits + * the clock rate from its parent, namely the output clock + * provider of MMC host. However, things may go wrong if + * (1) It is orphan. + * (2) It is assigned to the wrong parent. + * + * This check help debug the case (1), which seems to be the + * most likely problem we often face and which makes it difficult + * for people to debug unstable mmc tuning results. + */ + if (!rate) { + pr_err("%s: invalid clk rate\n", __func__); + return -EINVAL; + } + nineties = degrees / 90; remainder = (degrees % 90); diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index 11e7f2d1c054..7af48184b022 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS, RK2928_CLKGATE_CON(2), 15, GFLAGS), - COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, + COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS, RK2928_CLKGATE_CON(2), 11, GFLAGS), diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index 1b81e283f605..ed36728424a2 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -698,7 +698,7 @@ static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst = PLL_36XX_RATE(144000000, 96, 2, 3, 0), PLL_36XX_RATE( 96000000, 128, 2, 4, 0), PLL_36XX_RATE( 84000000, 112, 2, 4, 0), - PLL_36XX_RATE( 80000004, 106, 2, 4, 43691), + PLL_36XX_RATE( 80000003, 106, 2, 4, 43691), PLL_36XX_RATE( 73728000, 98, 2, 4, 19923), PLL_36XX_RATE( 67737598, 270, 3, 5, 62285), PLL_36XX_RATE( 65535999, 174, 2, 5, 49982), @@ -734,7 +734,7 @@ static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst = PLL_36XX_RATE(148352005, 98, 2, 3, 59070), PLL_36XX_RATE(108000000, 144, 2, 4, 0), PLL_36XX_RATE( 74250000, 99, 2, 4, 0), - PLL_36XX_RATE( 74176002, 98, 3, 4, 59070), + PLL_36XX_RATE( 74176002, 98, 2, 4, 59070), PLL_36XX_RATE( 54054000, 216, 3, 5, 14156), PLL_36XX_RATE( 54000000, 144, 2, 5, 0), { /* sentinel */ } diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 27a227d6620c..6a0cb8a515e8 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -711,13 +711,13 @@ static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ PLL_36XX_RATE(192000000, 64, 2, 2, 0), - PLL_36XX_RATE(180633600, 90, 3, 2, 20762), + PLL_36XX_RATE(180633605, 90, 3, 2, 20762), PLL_36XX_RATE(180000000, 90, 3, 2, 0), PLL_36XX_RATE(73728000, 98, 2, 4, 19923), - PLL_36XX_RATE(67737600, 90, 2, 4, 20762), + PLL_36XX_RATE(67737602, 90, 2, 4, 20762), PLL_36XX_RATE(49152000, 98, 3, 4, 19923), - PLL_36XX_RATE(45158400, 90, 3, 4, 20762), - PLL_36XX_RATE(32768000, 131, 3, 5, 4719), + PLL_36XX_RATE(45158401, 90, 3, 4, 20762), + PLL_36XX_RATE(32768001, 131, 3, 5, 4719), { }, }; diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c index fd1d9bfc151b..8eae1752d700 100644 --- a/drivers/clk/samsung/clk-exynos5260.c +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -65,7 +65,7 @@ static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = { PLL_36XX_RATE(480000000, 160, 2, 2, 0), PLL_36XX_RATE(432000000, 144, 2, 2, 0), PLL_36XX_RATE(400000000, 200, 3, 2, 0), - PLL_36XX_RATE(394073130, 459, 7, 2, 49282), + PLL_36XX_RATE(394073128, 459, 7, 2, 49282), PLL_36XX_RATE(333000000, 111, 2, 2, 0), PLL_36XX_RATE(300000000, 100, 2, 2, 0), PLL_36XX_RATE(266000000, 266, 3, 3, 0), diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 11343a597093..1d2265f9ee97 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -725,7 +725,7 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = PLL_35XX_RATE(800000000U, 400, 6, 1), PLL_35XX_RATE(733000000U, 733, 12, 1), PLL_35XX_RATE(700000000U, 175, 3, 1), - PLL_35XX_RATE(667000000U, 222, 4, 1), + PLL_35XX_RATE(666000000U, 222, 4, 1), PLL_35XX_RATE(633000000U, 211, 4, 1), PLL_35XX_RATE(600000000U, 500, 5, 2), PLL_35XX_RATE(552000000U, 460, 5, 2), @@ -753,12 +753,12 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = /* AUD_PLL */ static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = { PLL_36XX_RATE(400000000U, 200, 3, 2, 0), - PLL_36XX_RATE(393216000U, 197, 3, 2, -25690), + PLL_36XX_RATE(393216003U, 197, 3, 2, -25690), PLL_36XX_RATE(384000000U, 128, 2, 2, 0), - PLL_36XX_RATE(368640000U, 246, 4, 2, -15729), - PLL_36XX_RATE(361507200U, 181, 3, 2, -16148), - PLL_36XX_RATE(338688000U, 113, 2, 2, -6816), - PLL_36XX_RATE(294912000U, 98, 1, 3, 19923), + PLL_36XX_RATE(368639991U, 246, 4, 2, -15729), + PLL_36XX_RATE(361507202U, 181, 3, 2, -16148), + PLL_36XX_RATE(338687988U, 113, 2, 2, -6816), + PLL_36XX_RATE(294912002U, 98, 1, 3, 19923), PLL_36XX_RATE(288000000U, 96, 1, 3, 0), PLL_36XX_RATE(252000000U, 84, 1, 3, 0), { /* sentinel */ } diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index 5931a4140c3d..bbfa57b4e017 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = { }; static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = { - PLL_36XX_RATE(491520000, 20, 1, 0, 31457), + PLL_36XX_RATE(491519897, 20, 1, 0, 31457), {}, }; diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index e0650c33863b..d8e58a659467 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = { PLL_35XX_RATE(226000000, 105, 1, 1), PLL_35XX_RATE(210000000, 132, 2, 1), /* 2410 common */ - PLL_35XX_RATE(203000000, 161, 3, 1), + PLL_35XX_RATE(202800000, 161, 3, 1), PLL_35XX_RATE(192000000, 88, 1, 1), PLL_35XX_RATE(186000000, 85, 1, 1), PLL_35XX_RATE(180000000, 82, 1, 1), @@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = { PLL_35XX_RATE(147000000, 90, 2, 1), PLL_35XX_RATE(135000000, 82, 2, 1), PLL_35XX_RATE(124000000, 116, 1, 2), - PLL_35XX_RATE(118000000, 150, 2, 2), + PLL_35XX_RATE(118500000, 150, 2, 2), PLL_35XX_RATE(113000000, 105, 1, 2), - PLL_35XX_RATE(101000000, 127, 2, 2), + PLL_35XX_RATE(101250000, 127, 2, 2), PLL_35XX_RATE(90000000, 112, 2, 2), - PLL_35XX_RATE(85000000, 105, 2, 2), + PLL_35XX_RATE(84750000, 105, 2, 2), PLL_35XX_RATE(79000000, 71, 1, 2), - PLL_35XX_RATE(68000000, 82, 2, 2), - PLL_35XX_RATE(56000000, 142, 2, 3), + PLL_35XX_RATE(67500000, 82, 2, 2), + PLL_35XX_RATE(56250000, 142, 2, 3), PLL_35XX_RATE(48000000, 120, 2, 3), - PLL_35XX_RATE(51000000, 161, 3, 3), + PLL_35XX_RATE(50700000, 161, 3, 3), PLL_35XX_RATE(45000000, 82, 1, 3), - PLL_35XX_RATE(34000000, 82, 2, 3), + PLL_35XX_RATE(33750000, 82, 2, 3), { /* sentinel */ }, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c index 286b0049b7b6..a48fde191c0a 100644 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c @@ -223,7 +223,7 @@ static struct ccu_mux cpu_clk = { .hw.init = CLK_HW_INIT_PARENTS("cpu", cpu_parents, &ccu_mux_ops, - CLK_IS_CRITICAL), + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), } }; diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index ab9e850b3707..2f385a57cd91 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -982,8 +982,8 @@ static void __init sun5i_ccu_init(struct device_node *node, /* Force the PLL-Audio-1x divider to 4 */ val = readl(reg + SUN5I_PLL_AUDIO_REG); - val &= ~GENMASK(19, 16); - writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG); + val &= ~GENMASK(29, 26); + writel(val | (3 << 26), reg + SUN5I_PLL_AUDIO_REG); /* * Use the peripheral PLL as the AHB parent, instead of CPU / diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 8af434815fba..40d5f74cb2ac 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents, 0x150, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); -static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0); +static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0); static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); @@ -750,7 +750,7 @@ static struct ccu_mp out_a_clk = { .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("out-a", clk_out_parents, - &ccu_div_ops, + &ccu_mp_ops, 0), }, }; @@ -771,7 +771,7 @@ static struct ccu_mp out_b_clk = { .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("out-b", clk_out_parents, - &ccu_div_ops, + &ccu_mp_ops, 0), }, }; @@ -792,7 +792,7 @@ static struct ccu_mp out_c_clk = { .features = CCU_FEATURE_FIXED_PREDIV, .hw.init = CLK_HW_INIT_PARENTS("out-c", clk_out_parents, - &ccu_div_ops, + &ccu_mp_ops, 0), }, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c index e43acebdfbcd..c10160d7a556 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c @@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1", static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x06c, BIT(0), 0); static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", - 0x06c, BIT(0), 0); + 0x06c, BIT(1), 0); static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", - 0x06c, BIT(0), 0); + 0x06c, BIT(2), 0); static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x06c, BIT(16), 0); static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", @@ -493,8 +493,8 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents, 0x118, 24, 3, BIT(31), CLK_SET_RATE_PARENT); static const char * const tcon1_parents[] = { "pll-video1" }; -static SUNXI_CCU_MUX_WITH_GATE(tcon1_clk, "tcon1", tcon1_parents, - 0x11c, 24, 3, BIT(31), CLK_SET_RATE_PARENT); +static SUNXI_CCU_M_WITH_MUX_GATE(tcon1_clk, "tcon1", tcon1_parents, + 0x11c, 0, 4, 24, 2, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0); diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c index baa3cf96507b..302a18efd39f 100644 --- a/drivers/clk/sunxi-ng/ccu_div.c +++ b/drivers/clk/sunxi-ng/ccu_div.c @@ -71,7 +71,7 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw, parent_rate); val = divider_recalc_rate(hw, parent_rate, val, cd->div.table, - cd->div.flags); + cd->div.flags, cd->div.width); if (cd->common.features & CCU_FEATURE_FIXED_POSTDIV) val /= cd->fixed_post_div; diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c index a32158e8f2e3..84a5e7f17f6f 100644 --- a/drivers/clk/sunxi-ng/ccu_nm.c +++ b/drivers/clk/sunxi-ng/ccu_nm.c @@ -99,6 +99,9 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate, struct ccu_nm *nm = hw_to_ccu_nm(hw); struct _ccu_nm _nm; + if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) + return rate; + _nm.min_n = nm->n.min ?: 1; _nm.max_n = nm->n.max ?: 1 << nm->n.width; _nm.min_m = 1; diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index 6041bdba2e97..f69f9e8c6f38 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, return 0; } +static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + sun9i_mmc_reset_assert(rcdev, id); + udelay(10); + sun9i_mmc_reset_deassert(rcdev, id); + + return 0; +} + static const struct reset_control_ops sun9i_mmc_reset_ops = { .assert = sun9i_mmc_reset_assert, .deassert = sun9i_mmc_reset_deassert, + .reset = sun9i_mmc_reset_reset, }; static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 7c369e21c91c..830d1c87fa7c 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = { .enable = clk_pllu_enable, .disable = clk_pll_disable, .recalc_rate = clk_pll_recalc_rate, + .round_rate = clk_pll_round_rate, + .set_rate = clk_pll_set_rate, }; static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params, diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 6d7a613f2656..b92867814e2d 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2566,8 +2566,8 @@ static int tegra210_enable_pllu(void) reg |= PLL_ENABLE; writel(reg, clk_base + PLLU_BASE); - readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg, - reg & PLL_BASE_LOCK, 2, 1000); + readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg, + reg & PLL_BASE_LOCK, 2, 1000); if (!(reg & PLL_BASE_LOCK)) { pr_err("Timed out waiting for PLL_U to lock\n"); return -ETIMEDOUT; diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index a2d163f759b4..07f5203df01c 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -964,7 +964,7 @@ static void __init tegra30_super_clk_init(void) * U71 divider of cclk_lp. */ clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3", - clk_base + SUPER_CCLKG_DIVIDER, 0, + clk_base + SUPER_CCLKLP_DIVIDER, 0, TEGRA_DIVIDER_INT, 16, 8, 1, NULL); clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL); diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 13eb04f72389..148815470431 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) /* Get configuration for the ATL instances */ snprintf(prop, sizeof(prop), "atl%u", i); - of_node_get(node); - cfg_node = of_find_node_by_name(node, prop); + cfg_node = of_get_child_by_name(node, prop); if (cfg_node) { ret = of_property_read_u32(cfg_node, "bws", &cdesc->bws); diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c index 07f3b91a7daf..d244e724e198 100644 --- a/drivers/clk/uniphier/clk-uniphier-sys.c +++ b/drivers/clk/uniphier/clk-uniphier-sys.c @@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = { const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = { UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */ UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */ - UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */ + UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */ UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40), UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48), UNIPHIER_PRO5_SYS_CLK_NAND(2), diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd0..14e2419063e9 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1268,10 +1268,6 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) iounmap(cntctlbase); - if (!best_frame) - pr_err("Unable to find a suitable frame in timer @ %pa\n", - &timer_mem->cntctlbase); - return best_frame; } @@ -1372,6 +1368,8 @@ static int __init arch_timer_mem_of_init(struct device_node *np) frame = arch_timer_mem_find_best_frame(timer_mem); if (!frame) { + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer_mem->cntctlbase); ret = -EINVAL; goto out; } @@ -1420,7 +1418,7 @@ arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) static int __init arch_timer_mem_acpi_init(int platform_timer_count) { struct arch_timer_mem *timers, *timer; - struct arch_timer_mem_frame *frame; + struct arch_timer_mem_frame *frame, *best_frame = NULL; int timer_count, i, ret = 0; timers = kcalloc(platform_timer_count, sizeof(*timers), @@ -1432,14 +1430,6 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) if (ret || !timer_count) goto out; - for (i = 0; i < timer_count; i++) { - ret = arch_timer_mem_verify_cntfrq(&timers[i]); - if (ret) { - pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); - goto out; - } - } - /* * While unlikely, it's theoretically possible that none of the frames * in a timer expose the combination of feature we want. @@ -1448,12 +1438,26 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) timer = &timers[i]; frame = arch_timer_mem_find_best_frame(timer); - if (frame) - break; + if (!best_frame) + best_frame = frame; + + ret = arch_timer_mem_verify_cntfrq(timer); + if (ret) { + pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); + goto out; + } + + if (!best_frame) /* implies !frame */ + /* + * Only complain about missing suitable frames if we + * haven't already found one in a previous iteration. + */ + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer->cntctlbase); } - if (frame) - ret = arch_timer_mem_frame_register(frame); + if (best_frame) + ret = arch_timer_mem_frame_register(best_frame); out: kfree(timers); return ret; diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 3ee7e6fea621..846d18daf893 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, static unsigned long __init ftm_clk_init(struct device_node *np) { - unsigned long freq; + long freq; freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); if (freq <= 0) diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index ae3167c28b12..a07f51231e33 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -164,7 +164,7 @@ static int __init __gic_clocksource_init(void) /* Set clocksource mask. */ count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; - count_width >>= __fls(GIC_CONFIG_COUNTBITS); + count_width >>= __ffs(GIC_CONFIG_COUNTBITS); count_width *= 4; count_width += 32; gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 21bffdcb2f20..d175b9545581 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -20,6 +20,7 @@ #define TPM_SC 0x10 #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) #define TPM_SC_CMOD_DIV_DEFAULT 0x3 +#define TPM_SC_TOF_MASK (0x1 << 7) #define TPM_CNT 0x14 #define TPM_MOD 0x18 #define TPM_STATUS 0x1c @@ -29,6 +30,7 @@ #define TPM_C0SC_MODE_SHIFT 2 #define TPM_C0SC_MODE_MASK 0x3c #define TPM_C0SC_MODE_SW_COMPARE 0x4 +#define TPM_C0SC_CHF_MASK (0x1 << 7) #define TPM_C0V 0x24 static void __iomem *timer_base; @@ -105,7 +107,7 @@ static int tpm_set_next_event(unsigned long delta, * of writing CNT registers which may cause the min_delta event got * missed, so we need add a ETIME check here in case it happened. */ - return (int)((next - now) <= 0) ? -ETIME : 0; + return (int)(next - now) <= 0 ? -ETIME : 0; } static int tpm_set_state_oneshot(struct clock_event_device *evt) @@ -205,9 +207,13 @@ static int __init tpm_timer_init(struct device_node *np) * 4) Channel0 disabled * 5) DMA transfers disabled */ + /* make sure counter is disabled */ writel(0, timer_base + TPM_SC); + /* TOF is W1C */ + writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); writel(0, timer_base + TPM_CNT); - writel(0, timer_base + TPM_C0SC); + /* CHF is W1C */ + writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); /* increase per cnt, div 8 by default */ writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c index 8f2423789ba9..4bfeb9929ab2 100644 --- a/drivers/clocksource/timer-stm32.c +++ b/drivers/clocksource/timer-stm32.c @@ -106,6 +106,10 @@ static int __init stm32_clockevent_init(struct device_node *np) unsigned long rate, max_delta; int irq, ret, bits, prescaler = 1; + data = kmemdup(&clock_event_ddata, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); @@ -156,8 +160,8 @@ static int __init stm32_clockevent_init(struct device_node *np) writel_relaxed(prescaler - 1, data->base + TIM_PSC); writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR); - writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); writel_relaxed(0, data->base + TIM_SR); + writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ); @@ -184,6 +188,7 @@ static int __init stm32_clockevent_init(struct device_node *np) err_clk_enable: clk_put(clk); err_clk_get: + kfree(data); return ret; } diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 4ebae43118ef..b374515f9813 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -37,6 +37,13 @@ config CPU_FREQ_STAT If in doubt, say N. +config CPU_FREQ_TIMES + bool "CPU frequency time-in-state statistics" + help + Export CPU time-in-state information through procfs. + + If in doubt, say N. + choice prompt "Default CPUFreq governor" default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ @@ -275,6 +282,7 @@ config BMIPS_CPUFREQ config LOONGSON2_CPUFREQ tristate "Loongson2 CPUFreq Driver" + depends on LEMOTE_MACH2F help This option adds a CPUFreq driver for loongson processors which support software configurable cpu frequency. @@ -287,6 +295,7 @@ config LOONGSON2_CPUFREQ config LOONGSON1_CPUFREQ tristate "Loongson1 CPUFreq Driver" + depends on LOONGSON1_LS1B help This option adds a CPUFreq driver for loongson1 processors which support software configurable cpu frequency. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 812f9e0d01a3..3ad8aeb687ef 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -5,7 +5,10 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o -# CPUfreq governors +# CPUfreq times +obj-$(CONFIG_CPU_FREQ_TIMES) += cpufreq_times.o + +# CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3a2ca0f79daf..d0c34df0529c 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && - (c->x86_mask == 8)) { + (c->x86_stepping == 8)) { pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); return -ENODEV; } diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 17504129fd77..0c41ab3b16eb 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -213,6 +213,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, { u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; unsigned int freqs_new; + int ret; cur_cluster = cpu_to_cluster(cpu); new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); @@ -229,7 +230,14 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, } } - return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); + ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); + + if (!ret) { + arch_set_freq_scale(policy->related_cpus, freqs_new, + policy->cpuinfo.max_freq); + } + + return ret; } static inline u32 get_table_count(struct cpufreq_frequency_table *table) diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index a1c3025f9df7..c9ce716247c1 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -125,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) cpu->perf_caps.lowest_perf, cpu_num, ret); } +/* + * The PCC subspace describes the rate at which platform can accept commands + * on the shared PCC channel (including READs which do not count towards freq + * trasition requests), so ideally we need to use the PCC values as a fallback + * if we don't have a platform specific transition_delay_us + */ +#ifdef CONFIG_ARM64 +#include + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_num = read_cpuid_part_number(); + unsigned int delay_us = 0; + + switch (implementor) { + case ARM_CPU_IMP_QCOM: + switch (part_num) { + case QCOM_CPU_PART_FALKOR_V1: + case QCOM_CPU_PART_FALKOR: + delay_us = 10000; + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + break; + default: + delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; + break; + } + + return delay_us; +} + +#else + +static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu) +{ + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; +} +#endif + static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { struct cppc_cpudata *cpu; @@ -162,11 +206,22 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.max_freq = cppc_dmi_max_khz; policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num); + policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num); policy->shared_type = cpu->shared_type; - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { + int i; + cpumask_copy(policy->cpus, cpu->shared_cpu_map); - else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { + + for_each_cpu(i, policy->cpus) { + if (unlikely(i == policy->cpu)) + continue; + + memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps, + sizeof(cpu->perf_caps)); + } + } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ pr_debug("Unsupported CPU co-ord type\n"); return -EFAULT; @@ -230,8 +285,13 @@ static int __init cppc_cpufreq_init(void) return ret; out: - for_each_possible_cpu(i) - kfree(all_cpu_data[i]); + for_each_possible_cpu(i) { + cpu = all_cpu_data[i]; + if (!cpu) + break; + free_cpumask_var(cpu->shared_cpu_map); + kfree(cpu); + } kfree(all_cpu_data); return -ENODEV; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index a753c50e9e41..2db1525a3d7b 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -83,8 +83,6 @@ static const struct of_device_id whitelist[] __initconst = { { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, - { .compatible = "socionext,uniphier-ld6b", }, - { .compatible = "st-ericsson,u8500", }, { .compatible = "st-ericsson,u8540", }, { .compatible = "st-ericsson,u9500", }, @@ -111,6 +109,14 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "marvell,armadaxp", }, + { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt2712", }, + { .compatible = "mediatek,mt7622", }, + { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt817x", }, + { .compatible = "mediatek,mt8173", }, + { .compatible = "mediatek,mt8176", }, + { .compatible = "nvidia,tegra124", }, { .compatible = "st,stih407", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index d83ab94d041a..545946ad0752 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -43,9 +43,17 @@ static struct freq_attr *cpufreq_dt_attr[] = { static int set_target(struct cpufreq_policy *policy, unsigned int index) { struct private_data *priv = policy->driver_data; + unsigned long freq = policy->freq_table[index].frequency; + int ret; + + ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000); - return dev_pm_opp_set_rate(priv->cpu_dev, - policy->freq_table[index].frequency * 1000); + if (!ret) { + arch_set_freq_scale(policy->related_cpus, freq, + policy->cpuinfo.max_freq); + } + + return ret; } /* diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ea43b147a7fe..927399454d62 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -339,6 +340,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); cpufreq_stats_record_transition(policy, freqs->new); + cpufreq_times_record_transition(freqs); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) @@ -693,6 +695,8 @@ static ssize_t store_##file_name \ struct cpufreq_policy new_policy; \ \ memcpy(&new_policy, policy, sizeof(*policy)); \ + new_policy.min = policy->user_policy.min; \ + new_policy.max = policy->user_policy.max; \ \ ret = sscanf(buf, "%u", &new_policy.object); \ if (ret != 1) \ @@ -1287,6 +1291,7 @@ static int cpufreq_online(unsigned int cpu) goto out_exit_policy; cpufreq_stats_create_table(policy); + cpufreq_times_create_policy(policy); write_lock_irqsave(&cpufreq_driver_lock, flags); list_add(&policy->policy_list, &cpufreq_policy_list); @@ -1315,14 +1320,14 @@ static int cpufreq_online(unsigned int cpu) return 0; out_exit_policy: + for_each_cpu(j, policy->real_cpus) + remove_cpu_dev_symlink(policy, get_cpu_device(j)); + up_write(&policy->rwsem); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); - for_each_cpu(j, policy->real_cpus) - remove_cpu_dev_symlink(policy, get_cpu_device(j)); - out_free_policy: cpufreq_policy_free(policy); return ret; @@ -2233,6 +2238,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->min = new_policy->min; policy->max = new_policy->max; + arch_set_max_freq_scale(policy->cpus, policy->max); + + trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu); + policy->cached_target_freq = UINT_MAX; pr_debug("new min and max freqs are %u - %u kHz\n", @@ -2430,6 +2439,23 @@ int cpufreq_boost_enabled(void) } EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); +/********************************************************************* + * FREQUENCY INVARIANT ACCOUNTING SUPPORT * + *********************************************************************/ + +__weak void arch_set_freq_scale(struct cpumask *cpus, + unsigned long cur_freq, + unsigned long max_freq) +{ +} +EXPORT_SYMBOL_GPL(arch_set_freq_scale); + +__weak void arch_set_max_freq_scale(struct cpumask *cpus, + unsigned long policy_max_freq) +{ +} +EXPORT_SYMBOL_GPL(arch_set_max_freq_scale); + /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 58d4f4e1ad6a..43e14bb512c8 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -22,6 +22,8 @@ #include "cpufreq_governor.h" +#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) + static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); static DEFINE_MUTEX(gov_dbs_data_mutex); @@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, { struct dbs_data *dbs_data = to_dbs_data(attr_set); struct policy_dbs_info *policy_dbs; + unsigned int sampling_interval; int ret; - ret = sscanf(buf, "%u", &dbs_data->sampling_rate); - if (ret != 1) + + ret = sscanf(buf, "%u", &sampling_interval); + if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL) return -EINVAL; + dbs_data->sampling_rate = sampling_interval; + /* * We are operating under dbs_data->mutex and so the list and its * entries can't be freed concurrently. @@ -159,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely(time_elapsed > 2 * sampling_rate && + } else if (unlikely((int)idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -179,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * clear prev_load to guarantee that the load will be * computed again next time. * - * Detecting this situation is easy: the governor's - * utilization update handler would not have run during - * CPU-idle periods. Hence, an unusually large - * 'time_elapsed' (as compared to the sampling rate) + * Detecting this situation is easy: an unusually large + * 'idle_time' (as compared to the sampling rate) * indicates this scenario. */ load = j_cdbs->prev_load; @@ -211,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) j_cdbs->prev_load = load; } - if (time_elapsed > 2 * sampling_rate) { - unsigned int periods = time_elapsed / sampling_rate; + if (unlikely((int)idle_time > 2 * sampling_rate)) { + unsigned int periods = idle_time / sampling_rate; if (periods < idle_periods) idle_periods = periods; @@ -430,7 +434,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) if (ret) goto free_policy_dbs_info; - dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); + /* + * The sampling interval should not be less than the transition latency + * of the CPU and it also cannot be too small for dbs_update() to work + * correctly. + */ + dbs_data->sampling_rate = max_t(unsigned int, + CPUFREQ_DBS_MIN_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); if (!have_governor_per_policy()) gov->gdbs_data = dbs_data; diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c new file mode 100644 index 000000000000..a43eeee30e8e --- /dev/null +++ b/drivers/cpufreq/cpufreq_times.c @@ -0,0 +1,464 @@ +/* drivers/cpufreq/cpufreq_times.c + * + * Copyright (C) 2018 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UID_HASH_BITS 10 + +static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS); + +static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ +static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */ + +struct uid_entry { + uid_t uid; + unsigned int max_state; + struct hlist_node hash; + struct rcu_head rcu; + u64 time_in_state[0]; +}; + +/** + * struct cpu_freqs - per-cpu frequency information + * @offset: start of these freqs' stats in task time_in_state array + * @max_state: number of entries in freq_table + * @last_index: index in freq_table of last frequency switched to + * @freq_table: list of available frequencies + */ +struct cpu_freqs { + unsigned int offset; + unsigned int max_state; + unsigned int last_index; + unsigned int freq_table[0]; +}; + +static struct cpu_freqs *all_freqs[NR_CPUS]; + +static unsigned int next_offset; + + +/* Caller must hold rcu_read_lock() */ +static struct uid_entry *find_uid_entry_rcu(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_uid_entry_locked(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_or_register_uid_locked(uid_t uid) +{ + struct uid_entry *uid_entry, *temp; + unsigned int max_state = READ_ONCE(next_offset); + size_t alloc_size = sizeof(*uid_entry) + max_state * + sizeof(uid_entry->time_in_state[0]); + + uid_entry = find_uid_entry_locked(uid); + if (uid_entry) { + if (uid_entry->max_state == max_state) + return uid_entry; + /* uid_entry->time_in_state is too small to track all freqs, so + * expand it. + */ + temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC); + if (!temp) + return uid_entry; + temp->max_state = max_state; + memset(temp->time_in_state + uid_entry->max_state, 0, + (max_state - uid_entry->max_state) * + sizeof(uid_entry->time_in_state[0])); + if (temp != uid_entry) { + hlist_replace_rcu(&uid_entry->hash, &temp->hash); + kfree_rcu(uid_entry, rcu); + } + return temp; + } + + uid_entry = kzalloc(alloc_size, GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; + uid_entry->max_state = max_state; + + hash_add_rcu(uid_hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static bool freq_index_invalid(unsigned int index) +{ + unsigned int cpu; + struct cpu_freqs *freqs; + + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || index < freqs->offset || + freqs->offset + freqs->max_state <= index) + continue; + return freqs->freq_table[index - freqs->offset] == + CPUFREQ_ENTRY_INVALID; + } + return true; +} + +static int single_uid_time_in_state_show(struct seq_file *m, void *ptr) +{ + struct uid_entry *uid_entry; + unsigned int i; + u64 time; + uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private); + + if (uid == overflowuid) + return -EINVAL; + + rcu_read_lock(); + + uid_entry = find_uid_entry_rcu(uid); + if (!uid_entry) { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + time = nsec_to_clock_t(uid_entry->time_in_state[i]); + seq_write(m, &time, sizeof(time)); + } + + rcu_read_unlock(); + + return 0; +} + +static void *uid_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void uid_seq_stop(struct seq_file *seq, void *v) { } + +static int uid_time_in_state_seq_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + struct cpu_freqs *freqs, *last_freqs = NULL; + int i, cpu; + + if (v == uid_hash_table) { + seq_puts(m, "uid:"); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == + CPUFREQ_ENTRY_INVALID) + continue; + seq_printf(m, " %d", freqs->freq_table[i]); + } + } + seq_putc(m, '\n'); + } + + rcu_read_lock(); + + hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) { + if (uid_entry->max_state) + seq_printf(m, "%d:", uid_entry->uid); + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t( + uid_entry->time_in_state[i])); + } + if (uid_entry->max_state) + seq_putc(m, '\n'); + } + + rcu_read_unlock(); + return 0; +} + +void cpufreq_task_times_init(struct task_struct *p) +{ + unsigned long flags; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = 0; +} + +void cpufreq_task_times_alloc(struct task_struct *p) +{ + void *temp; + unsigned long flags; + unsigned int max_state = READ_ONCE(next_offset); + + /* We use one array to avoid multiple allocs per task */ + temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC); + if (!temp) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = temp; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = max_state; +} + +/* Caller must hold task_time_in_state_lock */ +static int cpufreq_task_times_realloc_locked(struct task_struct *p) +{ + void *temp; + unsigned int max_state = READ_ONCE(next_offset); + + temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC); + if (!temp) + return -ENOMEM; + p->time_in_state = temp; + memset(p->time_in_state + p->max_state, 0, + (max_state - p->max_state) * sizeof(u64)); + p->max_state = max_state; + return 0; +} + +void cpufreq_task_times_exit(struct task_struct *p) +{ + unsigned long flags; + void *temp; + + if (!p->time_in_state) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + temp = p->time_in_state; + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + kfree(temp); +} + +int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *p) +{ + unsigned int cpu, i; + u64 cputime; + unsigned long flags; + struct cpu_freqs *freqs; + struct cpu_freqs *last_freqs = NULL; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + + seq_printf(m, "cpu%u\n", cpu); + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID) + continue; + cputime = 0; + if (freqs->offset + i < p->max_state && + p->time_in_state) + cputime = p->time_in_state[freqs->offset + i]; + seq_printf(m, "%u %lu\n", freqs->freq_table[i], + (unsigned long)nsec_to_clock_t(cputime)); + } + } + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + return 0; +} + +void cpufreq_acct_update_power(struct task_struct *p, u64 cputime) +{ + unsigned long flags; + unsigned int state; + struct uid_entry *uid_entry; + struct cpu_freqs *freqs = all_freqs[task_cpu(p)]; + uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); + + if (!freqs || p->flags & PF_EXITING) + return; + + state = freqs->offset + READ_ONCE(freqs->last_index); + + spin_lock_irqsave(&task_time_in_state_lock, flags); + if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) && + p->time_in_state) + p->time_in_state[state] += cputime; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + + spin_lock_irqsave(&uid_lock, flags); + uid_entry = find_or_register_uid_locked(uid); + if (uid_entry && state < uid_entry->max_state) + uid_entry->time_in_state[state] += cputime; + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_create_policy(struct cpufreq_policy *policy) +{ + int cpu, index; + unsigned int count = 0; + struct cpufreq_frequency_table *pos, *table; + struct cpu_freqs *freqs; + void *tmp; + + if (all_freqs[policy->cpu]) + return; + + table = policy->freq_table; + if (!table) + return; + + cpufreq_for_each_entry(pos, table) + count++; + + tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count, + GFP_KERNEL); + if (!tmp) + return; + + freqs = tmp; + freqs->max_state = count; + + index = cpufreq_frequency_table_get_index(policy, policy->cur); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_for_each_entry(pos, table) + freqs->freq_table[pos - table] = pos->frequency; + + freqs->offset = next_offset; + WRITE_ONCE(next_offset, freqs->offset + count); + for_each_cpu(cpu, policy->related_cpus) + all_freqs[cpu] = freqs; +} + +void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + unsigned long flags; + + spin_lock_irqsave(&uid_lock, flags); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp, + hash, uid_start) { + if (uid_start == uid_entry->uid) { + hash_del_rcu(&uid_entry->hash); + kfree_rcu(uid_entry, rcu); + } + } + } + + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_record_transition(struct cpufreq_freqs *freq) +{ + int index; + struct cpu_freqs *freqs = all_freqs[freq->cpu]; + struct cpufreq_policy *policy; + + if (!freqs) + return; + + policy = cpufreq_cpu_get(freq->cpu); + if (!policy) + return; + + index = cpufreq_frequency_table_get_index(policy, freq->new); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_cpu_put(policy); +} + +static const struct seq_operations uid_time_in_state_seq_ops = { + .start = uid_seq_start, + .next = uid_seq_next, + .stop = uid_seq_stop, + .show = uid_time_in_state_seq_show, +}; + +static int uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &uid_time_in_state_seq_ops); +} + +int single_uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, single_uid_time_in_state_show, + &(inode->i_uid)); +} + +static const struct file_operations uid_time_in_state_fops = { + .open = uid_time_in_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init cpufreq_times_init(void) +{ + proc_create_data("uid_time_in_state", 0444, NULL, + &uid_time_in_state_fops, NULL); + + return 0; +} + +early_initcall(cpufreq_times_init); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 93a0e88bef76..114dfe67015b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -285,6 +285,7 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; static int hwp_active __read_mostly; +static int hwp_mode_bdw __read_mostly; static bool per_cpu_limits __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -779,6 +780,8 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) return 0; } +static void intel_pstate_hwp_enable(struct cpudata *cpudata); + static int intel_pstate_resume(struct cpufreq_policy *policy) { if (!hwp_active) @@ -786,6 +789,9 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) mutex_lock(&intel_pstate_limits_lock); + if (policy->cpu == 0) + intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); + all_cpu_data[policy->cpu]->epp_policy = 0; intel_pstate_hwp_set(policy->cpu); @@ -1366,7 +1372,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + + if (hwp_active && !hwp_mode_bdw) { + unsigned int phy_max, current_max; + + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + } else { + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + } if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); @@ -2174,6 +2188,18 @@ static bool __init intel_pstate_no_acpi_pss(void) return true; } +static bool __init intel_pstate_no_acpi_pcch(void) +{ + acpi_status status; + acpi_handle handle; + + status = acpi_get_handle(NULL, "\\_SB", &handle); + if (ACPI_FAILURE(status)) + return true; + + return !acpi_has_method(handle, "PCCH"); +} + static bool __init intel_pstate_has_acpi_ppc(void) { int i; @@ -2233,7 +2259,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) switch (plat_info[idx].data) { case PSS: - return intel_pstate_no_acpi_pss(); + if (!intel_pstate_no_acpi_pss()) + return false; + + return intel_pstate_no_acpi_pcch(); case PPC: return intel_pstate_has_acpi_ppc() && !force_load; } @@ -2256,28 +2285,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; } static inline void intel_pstate_request_control_from_smm(void) {} #endif /* CONFIG_ACPI */ +#define INTEL_PSTATE_HWP_BROADWELL 0x01 + +#define ICPU_HWP(model, hwp_mode) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode } + static const struct x86_cpu_id hwp_support_ids[] __initconst = { - { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, + ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(X86_MODEL_ANY, 0), {} }; static int __init intel_pstate_init(void) { + const struct x86_cpu_id *id; int rc; if (no_load) return -ENODEV; - if (x86_match_cpu(hwp_support_ids)) { + id = x86_match_cpu(hwp_support_ids); + if (id) { copy_cpu_funcs(&core_funcs); if (!no_hwp) { hwp_active++; + hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; goto hwp_cpu_matched; } } else { - const struct x86_cpu_id *id; - id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) return -ENODEV; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index c46a12df40dd..859a62ea6120 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 7: - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V2; - if (c->x86_mask < 8) { + if (c->x86_stepping < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) numscales = 32; memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; @@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) longhaul_setup_voltagescaling(); - policy->cpuinfo.transition_latency = 200000; /* nsec */ + policy->transition_delay_us = 200000; /* usec */ return cpufreq_table_validate_and_show(policy, longhaul_table); } diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd77812313f3..a25741b1281b 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) #endif /* Errata workaround */ - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; switch (cpuid) { case 0x0f07: case 0x0f0a: diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 3f0ce2ae35ee..0c56c9759672 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void) { int ret; + /* Skip initialization if another cpufreq driver is there. */ + if (cpufreq_get_current_driver()) + return 0; + if (acpi_disabled) return 0; diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 80ac313e6c59..302e9ce793a0 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -131,7 +131,7 @@ static int check_powernow(void) return 0; } - if ((c->x86_model == 6) && (c->x86_mask == 0)) { + if ((c->x86_model == 6) && (c->x86_stepping == 0)) { pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); have_a0 = 1; } diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 062d71434e47..b01e31db5f83 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1043,7 +1043,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { - pr_err("unable to alloc powernow_k8_data"); + pr_err("unable to alloc powernow_k8_data\n"); return -ENOMEM; } diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 3ff5160451b4..a28bb8f3f395 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -41,11 +41,9 @@ #define POWERNV_MAX_PSTATES 256 #define PMSR_PSAFE_ENABLE (1UL << 30) #define PMSR_SPR_EM_DISABLE (1UL << 31) -#define PMSR_MAX(x) ((x >> 32) & 0xFF) +#define MAX_PSTATE_SHIFT 32 #define LPSTATE_SHIFT 48 #define GPSTATE_SHIFT 56 -#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF) -#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF) #define MAX_RAMP_DOWN_TIME 5120 /* @@ -93,6 +91,7 @@ struct global_pstate_info { }; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; +u32 pstate_sign_prefix; static bool rebooting, throttled, occ_reset; static const char * const throttle_reason[] = { @@ -147,6 +146,20 @@ static struct powernv_pstate_info { bool wof_enabled; } powernv_pstate_info; +static inline int extract_pstate(u64 pmsr_val, unsigned int shift) +{ + int ret = ((pmsr_val >> shift) & 0xFF); + + if (!ret) + return ret; + + return (pstate_sign_prefix | ret); +} + +#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT) +#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT) +#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT) + /* Use following macros for conversions between pstate_id and index */ static inline int idx_to_pstate(unsigned int i) { @@ -277,6 +290,9 @@ static int init_powernv_pstates(void) powernv_pstate_info.nr_pstates = nr_pstates; pr_debug("NR PStates %d\n", nr_pstates); + + pstate_sign_prefix = pstate_min & ~0xFF; + for (i = 0; i < nr_pstates; i++) { u32 id = be32_to_cpu(pstate_ids[i]); u32 freq = be32_to_cpu(pstate_freqs[i]); @@ -287,9 +303,9 @@ static int init_powernv_pstates(void) if (id == pstate_max) powernv_pstate_info.max = i; - else if (id == pstate_nominal) + if (id == pstate_nominal) powernv_pstate_info.nominal = i; - else if (id == pstate_min) + if (id == pstate_min) powernv_pstate_info.min = i; if (powernv_pstate_info.wof_enabled && id == pstate_turbo) { @@ -437,17 +453,10 @@ struct powernv_smp_call_data { static void powernv_read_cpu_freq(void *arg) { unsigned long pmspr_val; - s8 local_pstate_id; struct powernv_smp_call_data *freq_data = arg; pmspr_val = get_pmspr(SPRN_PMSR); - - /* - * The local pstate id corresponds bits 48..55 in the PMSR. - * Note: Watch out for the sign! - */ - local_pstate_id = (pmspr_val >> 48) & 0xFF; - freq_data->pstate_id = local_pstate_id; + freq_data->pstate_id = extract_local_pstate(pmspr_val); freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n", @@ -521,7 +530,7 @@ static void powernv_cpufreq_throttle_check(void *data) chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ - pmsr_pmax = (s8)PMSR_MAX(pmsr); + pmsr_pmax = extract_max_pstate(pmsr); pmsr_pmax_idx = pstate_to_idx(pmsr_pmax); if (pmsr_pmax_idx != powernv_pstate_info.max) { if (chip->throttled) @@ -637,6 +646,16 @@ void gpstate_timer_handler(unsigned long data) if (!spin_trylock(&gpstates->gpstate_lock)) return; + /* + * If the timer has migrated to the different cpu then bring + * it back to one of the policy->cpus + */ + if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) { + gpstates->timer.expires = jiffies + msecs_to_jiffies(1); + add_timer_on(&gpstates->timer, cpumask_first(policy->cpus)); + spin_unlock(&gpstates->gpstate_lock); + return; + } /* * If PMCR was last updated was using fast_swtich then @@ -644,8 +663,8 @@ void gpstate_timer_handler(unsigned long data) * value. Hence, read from PMCR to get correct data. */ val = get_pmspr(SPRN_PMCR); - freq_data.gpstate_id = (s8)GET_GPSTATE(val); - freq_data.pstate_id = (s8)GET_LPSTATE(val); + freq_data.gpstate_id = extract_global_pstate(val); + freq_data.pstate_id = extract_local_pstate(val); if (freq_data.gpstate_id == freq_data.pstate_id) { reset_gpstates(policy); spin_unlock(&gpstates->gpstate_lock); @@ -676,10 +695,8 @@ void gpstate_timer_handler(unsigned long data) if (gpstate_idx != gpstates->last_lpstate_idx) queue_gpstate_timer(gpstates); + set_pstate(&freq_data); spin_unlock(&gpstates->gpstate_lock); - - /* Timer may get migrated to a different cpu on cpu hot unplug */ - smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); } /* diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 7b596fa38ad2..6bebc1f9f55a 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) static int s3c_cpufreq_init(struct cpufreq_policy *policy) { policy->clk = clk_arm; - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); + + policy->cpuinfo.transition_latency = cpu_cur.info->latency; + + if (ftab) + return cpufreq_table_validate_and_show(policy, ftab); + + return 0; } static int __init s3c_cpufreq_initclks(void) diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 4894924a3ca2..195f27f9c1cb 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -177,7 +177,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) np = of_cpu_device_node_get(0); if (!np) { - pr_err("No cpu node found"); + pr_err("No cpu node found\n"); return -ENODEV; } @@ -187,7 +187,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) prop = of_find_property(np, "cpufreq_tbl", NULL); if (!prop || !prop->value) { - pr_err("Invalid cpufreq_tbl"); + pr_err("Invalid cpufreq_tbl\n"); ret = -ENODEV; goto out_put_node; } diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f4bb..4fa5adf16c70 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -37,7 +37,7 @@ struct cpu_id { __u8 x86; /* CPU family */ __u8 x86_model; /* model */ - __u8 x86_mask; /* stepping */ + __u8 x86_stepping; /* stepping */ }; enum { @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, { if ((c->x86 == x->x86) && (c->x86_model == x->x86_model) && - (c->x86_mask == x->x86_mask)) + (c->x86_stepping == x->x86_stepping)) return 1; return 0; } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index ccab452a4ef5..dd7bb00991f4 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 4: /* * B-stepping [M-P4-M] @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void) msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { - if (c->x86_mask == 0x01) { + if (c->x86_stepping == 0x01) { pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 4bf47de6101f..ffcddcd4c5e6 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -217,7 +217,8 @@ static int ti_cpufreq_init(void) opp_data->cpu_dev = get_cpu_device(0); if (!opp_data->cpu_dev) { pr_err("%s: Failed to get device for CPU0\n", __func__); - return -ENODEV; + ret = ENODEV; + goto free_opp_data; } opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev); @@ -262,6 +263,8 @@ static int ti_cpufreq_init(void) fail_put_node: of_node_put(opp_data->opp_node); +free_opp_data: + kfree(opp_data); return ret; } diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 52a75053ee03..f47c54546752 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -104,13 +104,13 @@ static int __init arm_idle_init(void) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); if (ret <= 0) { ret = ret ? : -ENODEV; - goto init_fail; + goto out_kfree_drv; } ret = cpuidle_register_driver(drv); if (ret) { pr_err("Failed to register cpuidle driver\n"); - goto init_fail; + goto out_kfree_drv; } /* @@ -128,14 +128,14 @@ static int __init arm_idle_init(void) if (ret) { pr_err("CPU %d failed to init idle CPU ops\n", cpu); - goto out_fail; + goto out_unregister_drv; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { pr_err("Failed to allocate cpuidle device\n"); ret = -ENOMEM; - goto out_fail; + goto out_unregister_drv; } dev->cpu = cpu; @@ -143,21 +143,25 @@ static int __init arm_idle_init(void) if (ret) { pr_err("Failed to register cpuidle device for CPU %d\n", cpu); - kfree(dev); - goto out_fail; + goto out_kfree_dev; } } return 0; -init_fail: + +out_kfree_dev: + kfree(dev); +out_unregister_drv: + cpuidle_unregister_driver(drv); +out_kfree_drv: kfree(drv); out_fail: while (--cpu >= 0) { dev = per_cpu(cpuidle_devices, cpu); + drv = cpuidle_get_cpu_driver(dev); cpuidle_unregister_device(dev); - kfree(dev); - drv = cpuidle_get_driver(); cpuidle_unregister_driver(drv); + kfree(dev); kfree(drv); } diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index ed6531f075c6..1d7d5d121d55 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -43,9 +43,31 @@ struct stop_psscr_table { static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly; -static u64 snooze_timeout __read_mostly; +static u64 default_snooze_timeout __read_mostly; static bool snooze_timeout_en __read_mostly; +static u64 get_snooze_timeout(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + int i; + + if (unlikely(!snooze_timeout_en)) + return default_snooze_timeout; + + for (i = index + 1; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; + struct cpuidle_state_usage *su = &dev->states_usage[i]; + + if (s->disabled || su->disable) + continue; + + return s->target_residency * tb_ticks_per_usec; + } + + return default_snooze_timeout; +} + static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -56,7 +78,7 @@ static int snooze_loop(struct cpuidle_device *dev, local_irq_enable(); - snooze_exit_time = get_tb() + snooze_timeout; + snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index); ppc64_runlatch_off(); HMT_very_low(); while (!need_resched()) { @@ -384,9 +406,9 @@ static int powernv_add_idle_states(void) * Firmware passes residency and latency values in ns. * cpuidle expects it in us. */ - exit_latency = latency_ns[i] / 1000; + exit_latency = DIV_ROUND_UP(latency_ns[i], 1000); if (!rc) - target_residency = residency_ns[i] / 1000; + target_residency = DIV_ROUND_UP(residency_ns[i], 1000); else target_residency = 0; @@ -463,11 +485,9 @@ static int powernv_idle_probe(void) cpuidle_state_table = powernv_states; /* Device tree can indicate more idle states */ max_idle_state = powernv_add_idle_states(); - if (max_idle_state > 1) { + default_snooze_timeout = TICK_USEC * tb_ticks_per_usec; + if (max_idle_state > 1) snooze_timeout_en = true; - snooze_timeout = powernv_states[1].target_residency * - tb_ticks_per_usec; - } } else return -ENODEV; diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 484cc8909d5c..7c33193216ea 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -208,10 +208,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, return -EBUSY; } target_state = &drv->states[index]; + broadcast = false; } /* Take note of the planned idle state. */ - sched_idle_set_state(target_state); + sched_idle_set_state(target_state, index); trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); @@ -225,7 +226,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ - sched_idle_set_state(NULL); + sched_idle_set_state(NULL, -1); if (broadcast) { if (WARN_ON_ONCE(!irqs_disabled())) @@ -262,12 +263,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, * * @drv: the cpuidle driver * @dev: the cpuidle device + * @stop_tick: indication on whether or not to stop the tick * * Returns the index of the idle state. The return value must not be negative. + * + * The memory location pointed to by @stop_tick is expected to be written the + * 'false' boolean value if the scheduler tick should not be stopped before + * entering the returned state. */ -int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) +int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick) { - return cpuidle_curr_governor->select(drv, dev); + return cpuidle_curr_governor->select(drv, dev, stop_tick); } /** diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index ce1a2ffffb2a..0213e07abe9c 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@ -62,9 +62,10 @@ static inline void ladder_do_selection(struct ladder_device *ldev, * ladder_select_state - selects the next state to enter * @drv: cpuidle driver * @dev: the CPU + * @dummy: not used */ static int ladder_select_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool *dummy) { struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); struct ladder_device_state *last_state; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 48eaf2879228..58c103b5892b 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -123,6 +123,7 @@ struct menu_device { int last_state_idx; int needs_update; + int tick_wakeup; unsigned int next_timer_us; unsigned int predicted_us; @@ -180,7 +181,12 @@ static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned lo /* for higher loadavg, we are more reluctant */ - mult += 2 * get_loadavg(load); + /* + * this doesn't work as intended - it is almost always 0, but can + * sometimes, depending on workload, spike very high into the hundreds + * even when the average cpu load is under 10%. + */ + /* mult += 2 * get_loadavg(); */ /* for IO wait tasks (per cpu!) we add 5x each */ mult += 10 * nr_iowaiters; @@ -279,8 +285,10 @@ static unsigned int get_typical_interval(struct menu_device *data) * menu_select - selects the next idle state to enter * @drv: cpuidle driver containing state data * @dev: the CPU + * @stop_tick: indication on whether or not to stop the tick */ -static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) +static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick) { struct menu_device *data = this_cpu_ptr(&menu_devices); struct device *device = get_cpu_device(dev->cpu); @@ -292,6 +300,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) unsigned int expected_interval; unsigned long nr_iowaiters, cpu_load; int resume_latency = dev_pm_qos_raw_read_value(device); + ktime_t delta_next; if (data->needs_update) { menu_update(drv, dev); @@ -303,11 +312,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) latency_req = resume_latency; /* Special case when user has set very strict latency requirement */ - if (unlikely(latency_req == 0)) + if (unlikely(latency_req == 0)) { + *stop_tick = false; return 0; + } /* determine the expected residency time, round up */ - data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length()); + data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); get_iowait_load(&nr_iowaiters, &cpu_load); data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); @@ -346,14 +357,30 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) */ data->predicted_us = min(data->predicted_us, expected_interval); - /* - * Use the performance multiplier and the user-configurable - * latency_req to determine the maximum exit latency. - */ - interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); - if (latency_req > interactivity_req) - latency_req = interactivity_req; + if (tick_nohz_tick_stopped()) { + /* + * If the tick is already stopped, the cost of possible short + * idle duration misprediction is much higher, because the CPU + * may be stuck in a shallow idle state for a long time as a + * result of it. In that case say we might mispredict and try + * to force the CPU into a state for which we would have stopped + * the tick, unless a timer is going to expire really soon + * anyway. + */ + if (data->predicted_us < TICK_USEC) + data->predicted_us = min_t(unsigned int, TICK_USEC, + ktime_to_us(delta_next)); + } else { + /* + * Use the performance multiplier and the user-configurable + * latency_req to determine the maximum exit latency. + */ + interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); + if (latency_req > interactivity_req) + latency_req = interactivity_req; + } + expected_interval = data->predicted_us; /* * Find the idle state with the lowest power while satisfying * our constraints. @@ -369,15 +396,52 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) idx = i; /* first enabled state */ if (s->target_residency > data->predicted_us) break; - if (s->exit_latency > latency_req) + if (s->exit_latency > latency_req) { + /* + * If we break out of the loop for latency reasons, use + * the target residency of the selected state as the + * expected idle duration so that the tick is retained + * as long as that target residency is low enough. + */ + expected_interval = drv->states[idx].target_residency; break; - + } idx = i; } if (idx == -1) idx = 0; /* No states enabled. Must use 0. */ + /* + * Don't stop the tick if the selected state is a polling one or if the + * expected idle duration is shorter than the tick period length. + */ + if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || + expected_interval < TICK_USEC) { + unsigned int delta_next_us = ktime_to_us(delta_next); + + *stop_tick = false; + + if (!tick_nohz_tick_stopped() && idx > 0 && + drv->states[idx].target_residency > delta_next_us) { + /* + * The tick is not going to be stopped and the target + * residency of the state to be returned is not within + * the time until the next timer event including the + * tick, so try to correct that. + */ + for (i = idx - 1; i >= 0; i--) { + if (drv->states[i].disabled || + dev->states_usage[i].disable) + continue; + + idx = i; + if (drv->states[i].target_residency <= delta_next_us) + break; + } + } + } + data->last_state_idx = idx; return data->last_state_idx; @@ -397,6 +461,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index) data->last_state_idx = index; data->needs_update = 1; + data->tick_wakeup = tick_nohz_idle_got_tick(); } /** @@ -427,14 +492,27 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * assume the state was never reached and the exit latency is 0. */ - /* measured value */ - measured_us = cpuidle_get_last_residency(dev); - - /* Deduct exit latency */ - if (measured_us > 2 * target->exit_latency) - measured_us -= target->exit_latency; - else - measured_us /= 2; + if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { + /* + * The nohz code said that there wouldn't be any events within + * the tick boundary (if the tick was stopped), but the idle + * duration predictor had a differing opinion. Since the CPU + * was woken up by a tick (that wasn't stopped after all), the + * predictor was not quite right, so assume that the CPU could + * have been idle long (but not forever) to help the idle + * duration predictor do a better job next time. + */ + measured_us = 9 * MAX_INTERESTING / 10; + } else { + /* measured value */ + measured_us = cpuidle_get_last_residency(dev); + + /* Deduct exit latency */ + if (measured_us > 2 * target->exit_latency) + measured_us -= target->exit_latency; + else + measured_us /= 2; + } /* Make sure our coefficients do not exceed unity */ if (measured_us > data->next_timer_us) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index fe33c199fc1a..143f8bc403b9 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -721,7 +721,6 @@ config CRYPTO_DEV_ARTPEC6 select CRYPTO_HASH select CRYPTO_SHA1 select CRYPTO_SHA256 - select CRYPTO_SHA384 select CRYPTO_SHA512 help Enables the driver for the on-chip crypto accelerator diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 65dc78b91dea..3f9eee7e555f 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) dev->pdr_pa); return -ENOMEM; } - memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); + memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, &dev->shadow_sa_pool_pa, @@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) { - if (dev->pdr != NULL) + if (dev->pdr) dma_free_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, dev->pdr, dev->pdr_pa); + if (dev->shadow_sa_pool) dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, dev->shadow_sa_pool, dev->shadow_sa_pool_pa); + if (dev->shadow_sr_pool) dma_free_coherent(dev->core_dev->device, sizeof(struct sa_state_record) * PPC4XX_NUM_PD, @@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) { - if (dev->sdr != NULL) + if (dev->sdr) dma_free_coherent(dev->core_dev->device, sizeof(struct ce_sd) * PPC4XX_NUM_SD, dev->sdr, dev->sdr_pa); - if (dev->scatter_buffer_va != NULL) + if (dev->scatter_buffer_va) dma_free_coherent(dev->core_dev->device, dev->scatter_buffer_size * PPC4XX_NUM_SD, dev->scatter_buffer_va, @@ -1033,12 +1035,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, break; } - if (rc) { - list_del(&alg->entry); + if (rc) kfree(alg); - } else { + else list_add_tail(&alg->entry, &sec_dev->alg_list); - } } return 0; @@ -1193,7 +1193,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) rc = crypto4xx_build_gdr(core_dev->dev); if (rc) - goto err_build_gdr; + goto err_build_pdr; rc = crypto4xx_build_sdr(core_dev->dev); if (rc) @@ -1236,12 +1236,11 @@ static int crypto4xx_probe(struct platform_device *ofdev) err_request_irq: irq_dispose_mapping(core_dev->irq); tasklet_kill(&core_dev->tasklet); - crypto4xx_destroy_sdr(core_dev->dev); err_build_sdr: + crypto4xx_destroy_sdr(core_dev->dev); crypto4xx_destroy_gdr(core_dev->dev); -err_build_gdr: - crypto4xx_destroy_pdr(core_dev->dev); err_build_pdr: + crypto4xx_destroy_pdr(core_dev->dev); kfree(core_dev->dev); err_alloc_dev: kfree(core_dev); diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index ecfdcfe3698d..4f41d6da5acc 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -34,12 +34,12 @@ #define PPC405EX_CE_RESET 0x00000008 #define CRYPTO4XX_CRYPTO_PRIORITY 300 -#define PPC4XX_LAST_PD 63 -#define PPC4XX_NUM_PD 64 -#define PPC4XX_LAST_GD 1023 +#define PPC4XX_NUM_PD 256 +#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1) #define PPC4XX_NUM_GD 1024 -#define PPC4XX_LAST_SD 63 -#define PPC4XX_NUM_SD 64 +#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1) +#define PPC4XX_NUM_SD 256 +#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1) #define PPC4XX_SD_BUFFER_SIZE 2048 #define PD_ENTRY_INUSE 1 diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 29e20c37f3a6..11129b796dda 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2145,7 +2145,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, badkey: crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - memzero_explicit(&key, sizeof(keys)); + memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 0f9754e07719..6eb5cb92b986 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); // The HW omits the initial increment of the counter field. - crypto_inc(req_ctx->hw_ctx.J0+12, 4); + memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4); ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); @@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = { .setkey = artpec6_crypto_aead_set_key, .encrypt = artpec6_crypto_aead_encrypt, .decrypt = artpec6_crypto_aead_decrypt, - .ivsize = AES_BLOCK_SIZE, + .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .base = { diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 8685c7e4debd..ee52c355bee0 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, return 0; } +static int mailbox_send_message(struct brcm_message *mssg, u32 flags, + u8 chan_idx) +{ + int err; + int retry_cnt = 0; + struct device *dev = &(iproc_priv.pdev->dev); + + err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { + while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { + /* + * Mailbox queue is full. Since MAY_SLEEP is set, assume + * not in atomic context and we can wait and try again. + */ + retry_cnt++; + usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); + err = mbox_send_message(iproc_priv.mbox[chan_idx], + mssg); + atomic_inc(&iproc_priv.mb_no_spc); + } + } + if (err < 0) { + atomic_inc(&iproc_priv.mb_send_fail); + return err; + } + + /* Check error returned by mailbox controller */ + err = mssg->error; + if (unlikely(err < 0)) { + dev_err(dev, "message error %d", err); + /* Signal txdone for mailbox channel */ + } + + /* Signal txdone for mailbox channel */ + mbox_client_txdone(iproc_priv.mbox[chan_idx], err); + return err; +} + /** * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in * a single SPU request message, starting at the current position in the request @@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) u32 pad_len; /* total length of all padding */ bool update_key = false; struct brcm_message *mssg; /* mailbox message */ - int retry_cnt = 0; /* number of entries in src and dst sg in mailbox message. */ u8 rx_frag_num = 2; /* response header and STATUS */ @@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (unlikely(err < 0)) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) u32 spu_hdr_len; unsigned int digestsize; u16 rem = 0; - int retry_cnt = 0; /* * number of entries in src and dst sg. Always includes SPU msg header. @@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } + return -EINPROGRESS; } @@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) int assoc_nents = 0; bool incl_icv = false; unsigned int digestsize = ctx->digestsize; - int retry_cnt = 0; /* number of entries in src and dst sg. Always includes SPU msg header. */ @@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev) mcl->dev = dev; mcl->tx_block = false; mcl->tx_tout = 0; - mcl->knows_txdone = false; + mcl->knows_txdone = true; mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c index a118b9bed669..bfbf8bf77f03 100644 --- a/drivers/crypto/bfin_crc.c +++ b/drivers/crypto/bfin_crc.c @@ -494,7 +494,8 @@ static struct ahash_alg algs = { .cra_driver_name = DRIVER_NAME, .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | - CRYPTO_ALG_ASYNC, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), .cra_alignmask = 3, diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 54f3b375a453..a8a2a271b63d 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -735,15 +735,18 @@ struct aead_edesc { * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @iv_dma: dma address of iv for checking continuity and link table + * @iv_dir: DMA mapping direction for IV * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables + * and IV */ struct ablkcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; + enum dma_data_direction iv_dir; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; @@ -753,7 +756,8 @@ struct ablkcipher_edesc { static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, - dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + dma_addr_t iv_dma, int ivsize, + enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { if (dst != src) { @@ -765,7 +769,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (sec4_sg_bytes) dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); @@ -776,7 +780,7 @@ static void aead_unmap(struct device *dev, struct aead_request *req) { caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, 0, 0, + edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -789,7 +793,7 @@ static void ablkcipher_unmap(struct device *dev, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, + edesc->iv_dma, ivsize, edesc->iv_dir, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -878,6 +882,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, ivsize, 0); + /* In case initial IV was generated, copy it in GIVCIPHER request */ + if (edesc->iv_dir == DMA_FROM_DEVICE) { + u8 *iv; + struct skcipher_givcrypt_request *greq; + + greq = container_of(req, struct skcipher_givcrypt_request, + creq); + iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) + + edesc->sec4_sg_bytes; + memcpy(greq->giv, iv, ivsize); + } + kfree(edesc); ablkcipher_request_complete(req, err); @@ -888,10 +904,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; +#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); -#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -909,14 +925,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); - - /* - * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. - */ - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, - ivsize, 0); - kfree(edesc); ablkcipher_request_complete(req, err); @@ -1057,15 +1065,14 @@ static void init_authenc_job(struct aead_request *req, */ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) + struct ablkcipher_request *req) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; - int len, sec4_sg_index = 0; + u32 out_options = 0; + dma_addr_t dst_dma; + int len; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", @@ -1081,30 +1088,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - if (iv_contig) { - src_dma = edesc->iv_dma; - in_options = 0; - } else { - src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents + 1; - in_options = LDST_SGF; - } - append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, + LDST_SGF); if (likely(req->src == req->dst)) { - if (edesc->src_nents == 1 && iv_contig) { - dst_dma = sg_dma_address(req->src); - } else { - dst_dma = edesc->sec4_sg_dma + - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; } else { if (edesc->dst_nents == 1) { dst_dma = sg_dma_address(req->dst); } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); + dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * + sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } @@ -1116,13 +1111,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, */ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) + struct ablkcipher_request *req) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); u32 *desc = edesc->hw_desc; - u32 out_options, in_options; + u32 in_options; dma_addr_t dst_dma, src_dma; int len, sec4_sg_index = 0; @@ -1148,15 +1142,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, } append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); - if (iv_contig) { - dst_dma = edesc->iv_dma; - out_options = 0; - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } - append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF); } /* @@ -1245,7 +1233,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, GFP_DMA | flags); if (!edesc) { caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1449,8 +1437,7 @@ static int aead_decrypt(struct aead_request *req) * allocate and map the ablkcipher extended descriptor for ablkcipher */ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, int desc_bytes, - bool *iv_contig_out) + *req, int desc_bytes) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); @@ -1459,8 +1446,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool in_contig; + dma_addr_t iv_dma; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1504,33 +1491,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } } - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - if (mapped_src_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->src)) { - in_contig = true; - sec4_sg_ents = 0; - } else { - in_contig = false; - sec4_sg_ents = 1 + mapped_src_nents; - } + sec4_sg_ents = 1 + mapped_src_nents; dst_sg_idx = sec4_sg_ents; sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1539,13 +1513,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_TO_DEVICE; - if (!in_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, mapped_src_nents, - edesc->sec4_sg + 1, 0); + /* Make sure IV is located in a DMAable area */ + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + memcpy(iv, req->info, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); } + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); + if (mapped_dst_nents > 1) { sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + dst_sg_idx, 0); @@ -1556,7 +1541,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } @@ -1569,7 +1554,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request sec4_sg_bytes, 1); #endif - *iv_contig_out = in_contig; return edesc; } @@ -1579,19 +1563,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_enc, - ctx->sh_desc_enc_dma, edesc, req, iv_contig); + init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -1615,20 +1596,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + ivsize, 0); + /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, iv_contig); + init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); desc = edesc->hw_desc; #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", @@ -1653,8 +1639,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) */ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( struct skcipher_givcrypt_request *greq, - int desc_bytes, - bool *iv_contig_out) + int desc_bytes) { struct ablkcipher_request *req = &greq->creq; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); @@ -1664,8 +1649,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool out_contig; + dma_addr_t iv_dma; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1710,36 +1695,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( } } - /* - * Check if iv can be contiguous with source and destination. - * If so, include it. If not, create scatterlist. - */ - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; dst_sg_idx = sec4_sg_ents; - if (mapped_dst_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->dst)) { - out_contig = true; - } else { - out_contig = false; - sec4_sg_ents += 1 + mapped_dst_nents; - } + sec4_sg_ents += 1 + mapped_dst_nents; - /* allocate space for base edesc and hw desc commands, link tables */ + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1748,24 +1717,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_FROM_DEVICE; + + /* Make sure IV is located in a DMAable area */ + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } if (mapped_src_nents > 1) sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, 0); - if (!out_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, - iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, - edesc->sec4_sg + dst_sg_idx + 1, 0); - } + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + + dst_sg_idx + 1, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } @@ -1778,7 +1756,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( sec4_sg_bytes, 1); #endif - *iv_contig_out = out_contig; return edesc; } @@ -1789,19 +1766,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig = false; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, - edesc, req, iv_contig); + edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@" __stringify(__LINE__) ": ", diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 2eefc4a26bc2..e7966e37a5aa 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -401,7 +401,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, * @assoclen: associated data length, in CAAM endianness * @assoclen_dma: bus physical mapped address of req->assoclen * @drv_req: driver-specific request structure - * @sgt: the h/w link table + * @sgt: the h/w link table, followed by IV */ struct aead_edesc { int src_nents; @@ -412,9 +412,6 @@ struct aead_edesc { unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; -#define CAAM_QI_MAX_AEAD_SG \ - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ - sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -426,7 +423,7 @@ struct aead_edesc { * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table * @drv_req: driver-specific request structure - * @sgt: the h/w link table + * @sgt: the h/w link table, followed by IV */ struct ablkcipher_edesc { int src_nents; @@ -435,9 +432,6 @@ struct ablkcipher_edesc { int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; -#define CAAM_QI_MAX_ABLKCIPHER_SG \ - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ - sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -649,17 +643,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, } } - if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) { + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) ivsize = crypto_aead_ivsize(aead); - iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, - dst_nents, 0, 0, op_type, 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } - } /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. @@ -667,16 +652,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, */ qm_sg_ents = 1 + !!ivsize + mapped_src_nents + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); - if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", - qm_sg_ents, CAAM_QI_MAX_AEAD_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > + CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } - sg_table = &edesc->sgt[0]; - qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + + if (ivsize) { + u8 *iv = (u8 *)(sg_table + qm_sg_ents); + + /* Make sure IV is located in a DMAable area */ + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; @@ -813,15 +815,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) #endif ablkcipher_unmap(qidev, edesc, req); - qi_cache_free(edesc); + + /* In case initial IV was generated, copy it in GIVCIPHER request */ + if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) { + u8 *iv; + struct skcipher_givcrypt_request *greq; + + greq = container_of(req, struct skcipher_givcrypt_request, + creq); + iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes; + memcpy(greq->giv, iv, ivsize); + } /* * The crypto API expects us to set the IV (req->info) to the last * ciphertext block. This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, - ivsize, 0); + if (edesc->drv_req.drv_ctx->op_type != DECRYPT) + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - + ivsize, ivsize, 0); + qi_cache_free(edesc); ablkcipher_request_complete(req, status); } @@ -836,9 +850,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma; - bool in_contig; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - int dst_sg_idx, qm_sg_ents; + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; enum optype op_type = encrypt ? ENCRYPT : DECRYPT; @@ -885,55 +899,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } } - iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - if (mapped_src_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->src)) { - in_contig = true; - qm_sg_ents = 0; - } else { - in_contig = false; - qm_sg_ents = 1 + mapped_src_nents; - } + qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } - /* allocate space for base edesc and link tables */ + /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(GFP_DMA | flags); if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + memcpy(iv, req->info, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - sg_table = &edesc->sgt[0]; - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = ablkcipher_done; edesc->drv_req.drv_ctx = drv_ctx; - if (!in_contig) { - dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); - } + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); if (mapped_dst_nents > 1) sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + @@ -951,20 +963,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request fd_sgt = &edesc->drv_req.fd_sgt[0]; - if (!in_contig) - dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, - ivsize + req->nbytes, 0); - else - dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes, - 0); + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, + ivsize + req->nbytes, 0); if (req->src == req->dst) { - if (!in_contig) - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + - sizeof(*sg_table), req->nbytes, 0); - else - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), - req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + + sizeof(*sg_table), req->nbytes, 0); } else if (mapped_dst_nents > 1) { dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * sizeof(*sg_table), req->nbytes, 0); @@ -988,10 +992,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma; - bool out_contig; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); struct qm_sg_entry *sg_table, *fd_sgt; - int dst_sg_idx, qm_sg_ents; + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, GIVENCRYPT); @@ -1039,46 +1043,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( mapped_dst_nents = src_nents; } - iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; dst_sg_idx = qm_sg_ents; - if (mapped_dst_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->dst)) { - out_contig = true; - } else { - out_contig = false; - qm_sg_ents += 1 + mapped_dst_nents; - } - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); + qm_sg_ents += 1 + mapped_dst_nents; + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } - /* allocate space for base edesc and link tables */ + /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(GFP_DMA | flags); if (!edesc) { dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - sg_table = &edesc->sgt[0]; - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = ablkcipher_done; edesc->drv_req.drv_ctx = drv_ctx; @@ -1086,11 +1089,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (mapped_src_nents > 1) sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); - if (!out_contig) { - dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - dst_sg_idx + 1, 0); - } + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1, + 0); edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); @@ -1111,13 +1112,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src), req->nbytes, 0); - if (!out_contig) - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * - sizeof(*sg_table), ivsize + req->nbytes, - 0); - else - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), - ivsize + req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * + sizeof(*sg_table), ivsize + req->nbytes, 0); return edesc; } @@ -1127,6 +1123,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int ret; if (unlikely(caam_congested)) @@ -1137,6 +1134,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) if (IS_ERR(edesc)) return PTR_ERR(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + if (!encrypt) + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - + ivsize, ivsize, 0); + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); if (!ret) { ret = -EINPROGRESS; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 7a897209f181..7ff4a25440ac 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); @@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); @@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, akcipher_request_complete(req, err); } +static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, + unsigned int nbytes, + unsigned int flags) +{ + struct sg_mapping_iter miter; + int lzeros, ents; + unsigned int len; + unsigned int tbytes = nbytes; + const u8 *buff; + + ents = sg_nents_for_len(sgl, nbytes); + if (ents < 0) + return ents; + + sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); + + lzeros = 0; + len = 0; + while (nbytes > 0) { + while (len && !*buff) { + lzeros++; + len--; + buff++; + } + + if (len && *buff) + break; + + sg_miter_next(&miter); + buff = miter.addr; + len = miter.length; + + nbytes -= lzeros; + lzeros = 0; + } + + miter.consumed = lzeros; + sg_miter_stop(&miter); + nbytes -= lzeros; + + return tbytes - nbytes; +} + static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = ctx->dev; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; int sgc; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int src_nents, dst_nents; + int lzeros; + + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); + if (lzeros < 0) + return ERR_PTR(lzeros); + + req->src_len -= lzeros; + req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); src_nents = sg_nents_for_len(req->src, req->src_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len); @@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; int sec4_sg_index = 0; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->d_dma)) { @@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; int sec4_sg_index = 0; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->p_dma)) { @@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = { .max_size = caam_rsa_max_size, .init = caam_rsa_init_tfm, .exit = caam_rsa_exit_tfm, + .reqsize = sizeof(struct caam_rsa_req_ctx), .base = { .cra_name = "rsa", .cra_driver_name = "rsa-caam", diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index fd145c46eae1..82645bcf8b27 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h @@ -95,6 +95,14 @@ struct caam_rsa_ctx { struct device *dev; }; +/** + * caam_rsa_req_ctx - per request context. + * @src: input scatterlist (stripped of leading zeros) + */ +struct caam_rsa_req_ctx { + struct scatterlist src[2]; +}; + /** * rsa_edesc - s/w-extended rsa descriptor * @src_nents : number of segments in input scatterlist diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 027e121c6f70..39f70411f28f 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, * without any error (HW optimizations for later * CAAM eras), then try again. */ + if (ret) + break; + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || - !(rdsta_val & (1 << sh_idx))) + !(rdsta_val & (1 << sh_idx))) { ret = -EAGAIN; - if (ret) break; + } + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); /* Clear the contents before recreating the descriptor */ memset(desc, 0x00, CAAM_CMD_SZ * 7); @@ -809,9 +813,6 @@ static int caam_probe(struct platform_device *pdev) return 0; caam_remove: -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(ctrlpriv->dfs_root); -#endif caam_remove(pdev); return ret; diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index 169e66231bcf..b0ba4331944b 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); - return -ENOMEM; + ret = -ENOMEM; + goto request_cleanup; } result = (union cpt_res_s *)info->completion_addr; diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h index dc451e0a43c5..58fb3ed6e644 100644 --- a/drivers/crypto/cavium/zip/common.h +++ b/drivers/crypto/cavium/zip/common.h @@ -46,8 +46,10 @@ #ifndef __COMMON_H__ #define __COMMON_H__ +#include #include #include +#include #include #include #include @@ -149,6 +151,25 @@ struct zip_operation { u32 sizeofzops; }; +static inline int zip_poll_result(union zip_zres_s *result) +{ + int retries = 1000; + + while (!result->s.compcode) { + if (!--retries) { + pr_err("ZIP ERR: request timed out"); + return -ETIMEDOUT; + } + udelay(10); + /* + * Force re-reading of compcode which is updated + * by the ZIP coprocessor. + */ + rmb(); + } + return 0; +} + /* error messages */ #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \ fmt "\n", __func__, __LINE__, ## args) diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 8df4d26cf9d4..b92b6e7e100f 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; - struct zip_state zip_state; + struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; @@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen, if (!zip) return -ENODEV; - memset(&zip_state, 0, sizeof(struct zip_state)); + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); + if (!zip_state) + return -ENOMEM; + zip_ops = &zip_ctx->zip_comp; zip_ops->input_len = slen; zip_ops->output_len = *dlen; memcpy(zip_ops->input, src, slen); - ret = zip_deflate(zip_ops, &zip_state, zip); + ret = zip_deflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } - + kfree(zip_state); return ret; } @@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; - struct zip_state zip_state; + struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; @@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen, if (!zip) return -ENODEV; - memset(&zip_state, 0, sizeof(struct zip_state)); + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); + if (!zip_state) + return -ENOMEM; + zip_ops = &zip_ctx->zip_decomp; memcpy(zip_ops->input, src, slen); @@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen, zip_ops->input_len = slen; zip_ops->output_len = *dlen; - ret = zip_inflate(zip_ops, &zip_state, zip); + ret = zip_inflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } - + kfree(zip_state); return ret; } diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c index 9a944b8c1e29..d7133f857d67 100644 --- a/drivers/crypto/cavium/zip/zip_deflate.c +++ b/drivers/crypto/cavium/zip/zip_deflate.c @@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, /* Stats update for compression requests submitted */ atomic64_inc(&zip_dev->stats.comp_req_submit); - while (!result_ptr->s.compcode) - continue; + /* Wait for completion or error */ + zip_poll_result(result_ptr); /* Stats update for compression requests completed */ atomic64_inc(&zip_dev->stats.comp_req_complete); diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c index 50cbdd83dbf2..7e0d73e2f89e 100644 --- a/drivers/crypto/cavium/zip/zip_inflate.c +++ b/drivers/crypto/cavium/zip/zip_inflate.c @@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, /* Decompression requests submitted stats update */ atomic64_inc(&zip_dev->stats.decomp_req_submit); - while (!result_ptr->s.compcode) - continue; + /* Wait for completion or error */ + zip_poll_result(result_ptr); /* Decompression requests completed stats update */ atomic64_inc(&zip_dev->stats.decomp_req_complete); diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c index e6db8672d89c..05850dfd7940 100644 --- a/drivers/crypto/ccp/ccp-crypto-rsa.c +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c @@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) { - if (ccp_version() > CCP_VERSION(3, 0)) - return CCP5_RSA_MAXMOD; - else - return CCP_RSA_MAXMOD; + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ctx->u.rsa.n_len; } static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 59d4ca4e72d8..1a734bd2070a 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = { }; static struct dentry *ccp_debugfs_dir; -static DEFINE_RWLOCK(ccp_debugfs_lock); +static DEFINE_MUTEX(ccp_debugfs_lock); #define MAX_NAME_LEN 20 @@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) struct dentry *debugfs_stats; struct dentry *debugfs_q_instance; struct dentry *debugfs_q_stats; - unsigned long flags; int i; if (!debugfs_initialized()) return; - write_lock_irqsave(&ccp_debugfs_lock, flags); + mutex_lock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - write_unlock_irqrestore(&ccp_debugfs_lock, flags); + mutex_unlock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) return; diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 3e104f5aa0c2..b56b3f711d94 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_AUTHENC + select CRYPTO_GF128MUL ---help--- The Chelsio Crypto Co-processor driver for T6 adapters. diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 89ba9e85c0f3..3ee68ecde9ec 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -462,6 +462,15 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) if (backlog) backlog->complete(backlog, -EINPROGRESS); + /* In case the send() helper did not issue any command to push + * to the engine because the input data was cached, continue to + * dequeue other requests as this is valid and not an error. + */ + if (!commands && !results) { + kfree(request); + continue; + } + spin_lock_bh(&priv->ring[ring].egress_lock); list_add_tail(&request->list, &priv->ring[ring].list); spin_unlock_bh(&priv->ring[ring].egress_lock); @@ -607,6 +616,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ndesc = ctx->handle_result(priv, ring, sreq->req, &should_complete, &ret); if (ndesc < 0) { + kfree(sreq); dev_err(priv->dev, "failed to handle result (%d)", ndesc); return; } @@ -788,7 +798,7 @@ static int safexcel_probe(struct platform_device *pdev) return PTR_ERR(priv->base); } - priv->clk = of_clk_get(dev->of_node, 0); + priv->clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(priv->clk)) { ret = clk_prepare_enable(priv->clk); if (ret) { diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5438552bc6d7..29cf7e00b574 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -14,6 +14,7 @@ #include #include +#include #include "safexcel.h" @@ -33,6 +34,10 @@ struct safexcel_cipher_ctx { unsigned int key_len; }; +struct safexcel_cipher_req { + bool needs_inv; +}; + static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, struct crypto_async_request *async, struct safexcel_command_desc *cdesc, @@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, return 0; } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_result_desc *rdesc; @@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async, spin_unlock_bh(&priv->ring[ring].egress_lock); request->req = &req->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = n_rdesc; @@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_aes_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return ndesc; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int err; + + if (sreq->needs_inv) { + sreq->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_cipher_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, struct safexcel_crypto_priv *priv = ctx->priv; int ret; - ctx->base.handle_result = safexcel_handle_inv_result; - ret = safexcel_invalidate_cache(async, &ctx->base, priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) @@ -381,34 +401,52 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int ret; + + if (sreq->needs_inv) + ret = safexcel_cipher_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_aes_send(async, ring, request, + commands, results); + return ret; +} + static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct skcipher_request req; + SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct skcipher_request)); + memset(req, 0, sizeof(struct skcipher_request)); /* create invalidation request */ init_completion(&result.completion); - skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + safexcel_inv_complete, &result); - skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_cipher_send_inv; + sreq->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (!priv->ring[ring].need_dequeue) safexcel_dequeue(priv, ring); - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); if (result.error) { dev_warn(priv->dev, @@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req, enum safexcel_cipher_direction dir, u32 mode) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; + sreq->needs_inv = false; ctx->direction = dir; ctx->mode = mode; if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_cipher_send_inv; + if (ctx->base.needs_inv) { + sreq->needs_inv = true; + ctx->base.needs_inv = false; + } } else { ctx->base.ring = safexcel_select_ring(priv); - ctx->base.send = safexcel_aes_send; - ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, EIP197_GFP_FLAGS(req->base), &ctx->base.ctxr_dma); @@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) alg.skcipher.base); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_send; + ctx->base.handle_result = safexcel_handle_result; + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct safexcel_cipher_req)); return 0; } diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 3980f946874f..69f29776591a 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -32,6 +32,9 @@ struct safexcel_ahash_req { bool last_req; bool finish; bool hmac; + bool needs_inv; + + int nents; u8 state_sz; /* expected sate size, only set once */ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; @@ -119,9 +122,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, } } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct safexcel_result_desc *rdesc; struct ahash_request *areq = ahash_request_cast(async); @@ -151,8 +154,10 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, result_sz = crypto_ahash_digestsize(ahash); memcpy(sreq->state, areq->result, result_sz); - dma_unmap_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); + if (sreq->nents) { + dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); + sreq->nents = 0; + } safexcel_free_context(priv, async, sreq->state_sz); @@ -165,9 +170,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, return 1; } -static int safexcel_ahash_send(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, - int *results) +static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, + struct safexcel_request *request, + int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); @@ -177,25 +182,39 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; + int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; queued = len = req->len - req->processed; - if (queued < crypto_ahash_blocksize(ahash)) + if (queued <= crypto_ahash_blocksize(ahash)) cache_len = queued; else cache_len = queued - areq->nbytes; - /* - * If this is not the last request and the queued data does not fit - * into full blocks, cache it for the next send() call. - */ - extra = queued & (crypto_ahash_blocksize(ahash) - 1); - if (!req->last_req && extra) { - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), - req->cache_next, extra, areq->nbytes - extra); - - queued -= extra; - len -= extra; + if (!req->last_req) { + /* If this is not the last request and the queued data does not + * fit into full blocks, cache it for the next send() call. + */ + extra = queued & (crypto_ahash_blocksize(ahash) - 1); + if (!extra) + /* If this is not the last request and the queued data + * is a multiple of a block, cache the last one for now. + */ + extra = crypto_ahash_blocksize(ahash); + + if (extra) { + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), + req->cache_next, extra, + areq->nbytes - extra); + + queued -= extra; + len -= extra; + + if (!queued) { + *commands = 0; + *results = 0; + return 0; + } + } } spin_lock_bh(&priv->ring[ring].egress_lock); @@ -233,15 +252,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, } /* Now handle the current ahash request buffer(s) */ - nents = dma_map_sg(priv->dev, areq->src, - sg_nents_for_len(areq->src, areq->nbytes), - DMA_TO_DEVICE); - if (!nents) { + req->nents = dma_map_sg(priv->dev, areq->src, + sg_nents_for_len(areq->src, areq->nbytes), + DMA_TO_DEVICE); + if (!req->nents) { ret = -ENOMEM; goto cdesc_rollback; } - for_each_sg(areq->src, sg, nents, i) { + for_each_sg(areq->src, sg, req->nents, i) { int sglen = sg_dma_len(sg); /* Do not overflow the request */ @@ -292,7 +311,6 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring, req->processed += len; request->req = &areq->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = 1; @@ -376,8 +394,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_ahash_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -394,6 +410,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return 1; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int err; + + if (req->needs_inv) { + req->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_ahash_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -402,7 +438,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); int ret; - ctx->base.handle_result = safexcel_handle_inv_result; ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) @@ -414,34 +449,52 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_ahash_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int ret; + + if (req->needs_inv) + ret = safexcel_ahash_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_ahash_send_req(async, ring, request, + commands, results); + return ret; +} + static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct ahash_request req; + AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm)); + struct safexcel_ahash_req *rctx = ahash_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct ahash_request)); + memset(req, 0, sizeof(struct ahash_request)); /* create invalidation request */ init_completion(&result.completion); - ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, safexcel_inv_complete, &result); - ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_ahash_send_inv; + rctx->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (!priv->ring[ring].need_dequeue) safexcel_dequeue(priv, ring); - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); if (result.error) { dev_warn(priv->dev, "hash: completion error (%d)\n", @@ -483,14 +536,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; - ctx->base.send = safexcel_ahash_send; + req->needs_inv = false; if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_ahash_send_inv; + if (ctx->base.needs_inv) { + ctx->base.needs_inv = false; + req->needs_inv = true; + } } else { ctx->base.ring = safexcel_select_ring(priv); ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, @@ -624,6 +679,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template, alg.ahash); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_ahash_send; + ctx->base.handle_result = safexcel_handle_result; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct safexcel_ahash_req)); @@ -762,7 +819,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq, init_completion(&result.completion); ret = crypto_ahash_digest(areq); - if (ret == -EINPROGRESS) { + if (ret == -EINPROGRESS || ret == -EBUSY) { wait_for_completion_interruptible(&result.completion); ret = result.error; } diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index a9fd8b9e86cd..699ee5a9a8f9 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1625,6 +1625,7 @@ static int queue_cache_init(void) CWQ_ENTRY_SIZE, 0, NULL); if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; return -ENOMEM; } return 0; @@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; + queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; } static long spu_queue_register_workfn(void *arg) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index c40ac30ec002..c1f8da958c78 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1082,7 +1082,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) free_pages((unsigned long)sg_virt(ctx->sg), - get_order(ctx->sg->length)); + get_order(ctx->sg->length + ctx->bufcnt)); if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) kfree(ctx->sg); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b3869748cc6b..7685f557dcc0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, return; } + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) @@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count - initial)); + : "d"(control_word), "b"(key), "c"(count)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, @@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (count < cbc_fetch_blocks) return cbc_crypt(input, output, key, iv, control_word, count); + count -= initial; + if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) @@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count-initial)); + : "d" (control_word), "b" (key), "c" (count)); return iv; } @@ -512,7 +516,7 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 7ac657f46d15..aec66159566d 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -601,15 +601,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) uint32_t aes_control; unsigned long flags; int err; + u8 *iv; aes_control = SSS_AES_KEY_CHANGE_MODE; if (mode & FLAGS_AES_DECRYPT) aes_control |= SSS_AES_MODE_DECRYPT; - if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) + if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) { aes_control |= SSS_AES_CHAIN_MODE_CBC; - else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) + iv = req->info; + } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) { aes_control |= SSS_AES_CHAIN_MODE_CTR; + iv = req->info; + } else { + iv = NULL; /* AES_ECB */ + } if (dev->ctx->keylen == AES_KEYSIZE_192) aes_control |= SSS_AES_KEY_SIZE_192; @@ -640,7 +646,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) goto outdata_error; SSS_AES_WRITE(dev, AES_CONTROL, aes_control); - s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); + s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen); s5p_set_dma_indata(dev, dev->sg_src); s5p_set_dma_outdata(dev, dev->sg_dst); diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c index 090582baecfe..8f09b8430893 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32_crc32.c @@ -208,6 +208,7 @@ static struct shash_alg algs[] = { .cra_name = "crc32", .cra_driver_name = DRIVER_NAME, .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), @@ -229,6 +230,7 @@ static struct shash_alg algs[] = { .cra_name = "crc32c", .cra_driver_name = DRIVER_NAME, .cra_priority = 200, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 1547cbe13dc2..a81d89b3b7d8 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = { module_platform_driver(sun4i_ss_driver); +MODULE_ALIAS("platform:sun4i-ss"); MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corentin LABBE "); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 0d01d1624252..63d636424161 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; - spin_lock(&ss->slock); + spin_lock_bh(&ss->slock); writel(mode, ss->base + SS_CTL); @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, } writel(0, ss->base + SS_CTL); - spin_unlock(&ss->slock); - return dlen; + spin_unlock_bh(&ss->slock); + return 0; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dff88838dce7..57e1b203cf36 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1116,16 +1116,21 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, return count; } -int talitos_sg_map(struct device *dev, struct scatterlist *src, - unsigned int len, struct talitos_edesc *edesc, - struct talitos_ptr *ptr, - int sg_count, unsigned int offset, int tbl_off) +static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, + unsigned int len, struct talitos_edesc *edesc, + struct talitos_ptr *ptr, int sg_count, + unsigned int offset, int tbl_off, int elen) { struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + if (!src) { + *ptr = zero_entry; + return 1; + } + to_talitos_ptr_len(ptr, len, is_sec1); - to_talitos_ptr_ext_set(ptr, 0, is_sec1); + to_talitos_ptr_ext_set(ptr, elen, is_sec1); if (sg_count == 1) { to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1); @@ -1135,7 +1140,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src, to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1); return sg_count; } - sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, + sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen, &edesc->link_tbl[tbl_off]); if (sg_count == 1) { /* Only one segment now, so no link tbl needed*/ @@ -1149,6 +1154,15 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src, return sg_count; } +static int talitos_sg_map(struct device *dev, struct scatterlist *src, + unsigned int len, struct talitos_edesc *edesc, + struct talitos_ptr *ptr, int sg_count, + unsigned int offset, int tbl_off) +{ + return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, + tbl_off, 0); +} + /* * fill in and submit ipsec_esp descriptor */ @@ -1166,7 +1180,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, unsigned int ivsize = crypto_aead_ivsize(aead); int tbl_off = 0; int sg_count, ret; - int sg_link_tbl_len; + int elen = 0; bool sync_needed = false; struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); @@ -1220,24 +1234,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, * extent is bytes of HMAC postpended to ciphertext, * typically 12 for ipsec */ - to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1); - to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1); - - sg_link_tbl_len = cryptlen; - - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { - to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1); + if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) && + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)) + elen = authsize; - if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) - sg_link_tbl_len += authsize; - } - - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, - &desc->ptr[4], sg_count, areq->assoclen, - tbl_off); + ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], + sg_count, areq->assoclen, tbl_off, elen); - if (sg_count > 1) { - tbl_off += sg_count; + if (ret > 1) { + tbl_off += ret; sync_needed = true; } @@ -1248,14 +1253,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); } - sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc, - &desc->ptr[5], sg_count, areq->assoclen, - tbl_off); + ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], + sg_count, areq->assoclen, tbl_off); if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); - if (sg_count > 1) { + /* ICV data */ + if (ret > 1) { + tbl_off += ret; edesc->icv_ool = true; sync_needed = true; @@ -1265,9 +1271,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, sizeof(struct talitos_ptr) + authsize; /* Add an entry to the link table for ICV data */ - tbl_ptr += sg_count - 1; - to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1); - tbl_ptr++; + to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, is_sec1); to_talitos_ptr_len(tbl_ptr, authsize, is_sec1); @@ -1275,18 +1279,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* icv data follows link tables */ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, is_sec1); + } else { + dma_addr_t addr = edesc->dma_link_tbl; + + if (is_sec1) + addr += areq->assoclen + cryptlen; + else + addr += sizeof(struct talitos_ptr) * tbl_off; + + to_talitos_ptr(&desc->ptr[6], addr, is_sec1); + to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); + } + } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { + ret = talitos_sg_map(dev, areq->dst, authsize, edesc, + &desc->ptr[6], sg_count, areq->assoclen + + cryptlen, + tbl_off); + if (ret > 1) { + tbl_off += ret; + edesc->icv_ool = true; + sync_needed = true; + } else { + edesc->icv_ool = false; } } else { edesc->icv_ool = false; } - /* ICV data */ - if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { - to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); - to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl + - areq->assoclen + cryptlen, is_sec1); - } - /* iv out */ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, @@ -1494,12 +1513,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int keylen) { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + u32 tmp[DES_EXPKEY_WORDS]; if (keylen > TALITOS_MAX_KEY_SIZE) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } + if (unlikely(crypto_ablkcipher_get_flags(cipher) & + CRYPTO_TFM_REQ_WEAK_KEY) && + !des_ekey(tmp, key)) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; @@ -2614,7 +2641,7 @@ static struct talitos_alg_template driver_algs[] = { .ivsize = AES_BLOCK_SIZE, } }, - .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CTR, }, @@ -3047,6 +3074,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, t_alg->algt.alg.aead.setkey = aead_setkey; t_alg->algt.alg.aead.encrypt = aead_encrypt; t_alg->algt.alg.aead.decrypt = aead_decrypt; + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && + !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { + kfree(t_alg); + return ERR_PTR(-ENOTSUPP); + } break; case CRYPTO_ALG_TYPE_AHASH: alg = &t_alg->algt.alg.hash.halg.base; diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 96072b9b55c4..d7316f7a3a69 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); crypto_cipher_set_flags(fallback, crypto_cipher_get_flags((struct diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 7394d35d5936..5285ece4f33a 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); - crypto_skcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 17d84217dd76..02ba5f2aa0e6 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -48,8 +48,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); crypto_blkcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8cd6e62e4c90..8bd9aff0f55f 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); crypto_skcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 27a94a119009..1c4b5b889fba 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash diff --git a/drivers/dax/device.c b/drivers/dax/device.c index e9f3b3e4bbf4..7b0bf825c4e7 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -222,7 +222,8 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) { struct resource *res; - phys_addr_t phys; + /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ + phys_addr_t uninitialized_var(phys); int i; for (i = 0; i < dev_dax->num_resources; i++) { @@ -427,9 +428,21 @@ static int dev_dax_fault(struct vm_fault *vmf) return dev_dax_huge_fault(vmf, PE_SIZE_PTE); } +static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) +{ + struct file *filp = vma->vm_file; + struct dev_dax *dev_dax = filp->private_data; + struct dax_region *dax_region = dev_dax->region; + + if (!IS_ALIGNED(addr, dax_region->align)) + return -EINVAL; + return 0; +} + static const struct vm_operations_struct dax_vm_ops = { .fault = dev_dax_fault, .huge_fault = dev_dax_huge_fault, + .split = dev_dax_split, }; static int dax_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 557b93703532..6c179c2a9ff9 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -73,42 +73,50 @@ EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); /** * __bdev_dax_supported() - Check if the device supports dax for filesystem - * @sb: The superblock of the device + * @bdev: block device to check * @blocksize: The block size of the device * * This is a library function for filesystems to check if the block device * can be mounted with dax option. * - * Return: negative errno if unsupported, 0 if supported. + * Return: true if supported, false if unsupported */ -int __bdev_dax_supported(struct super_block *sb, int blocksize) +bool __bdev_dax_supported(struct block_device *bdev, int blocksize) { - struct block_device *bdev = sb->s_bdev; struct dax_device *dax_dev; + struct request_queue *q; pgoff_t pgoff; int err, id; void *kaddr; pfn_t pfn; long len; + char buf[BDEVNAME_SIZE]; if (blocksize != PAGE_SIZE) { - pr_err("VFS (%s): error: unsupported blocksize for dax\n", - sb->s_id); - return -EINVAL; + pr_debug("%s: error: unsupported blocksize for dax\n", + bdevname(bdev, buf)); + return false; + } + + q = bdev_get_queue(bdev); + if (!q || !blk_queue_dax(q)) { + pr_debug("%s: error: request queue doesn't support dax\n", + bdevname(bdev, buf)); + return false; } err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); if (err) { - pr_err("VFS (%s): error: unaligned partition for dax\n", - sb->s_id); - return err; + pr_debug("%s: error: unaligned partition for dax\n", + bdevname(bdev, buf)); + return false; } dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); if (!dax_dev) { - pr_err("VFS (%s): error: device does not support dax\n", - sb->s_id); - return -EOPNOTSUPP; + pr_debug("%s: error: device does not support dax\n", + bdevname(bdev, buf)); + return false; } id = dax_read_lock(); @@ -118,12 +126,12 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) put_dax(dax_dev); if (len < 1) { - pr_err("VFS (%s): error: dax access failed (%ld)", - sb->s_id, len); - return len < 0 ? len : -EIO; + pr_debug("%s: error: dax access failed (%ld)\n", + bdevname(bdev, buf), len); + return false; } - return 0; + return true; } EXPORT_SYMBOL_GPL(__bdev_dax_supported); #endif @@ -344,6 +352,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb) struct inode *inode; dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); + if (!dax_dev) + return NULL; + inode = &dax_dev->inode; inode->i_rdev = 0; return inode; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index a1c4ee818614..8a411514a7c5 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -676,7 +676,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev, devfreq = devfreq_add_device(dev, profile, governor_name, data); if (IS_ERR(devfreq)) { devres_free(ptr); - return ERR_PTR(-ENOMEM); + return devfreq; } *ptr = devfreq; @@ -935,7 +935,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, if (df->governor == governor) { ret = 0; goto out; - } else if (df->governor->immutable || governor->immutable) { + } else if ((df->governor && df->governor->immutable) || + governor->immutable) { ret = -EINVAL; goto out; } diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index ed3b785bae37..09ccac1768e3 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -30,4 +30,6 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. +source "drivers/dma-buf/hyper_dmabuf/Kconfig" + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index c33bf8863147..3f15a841502e 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,4 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o +obj-$(CONFIG_HYPER_DMABUF) += hyper_dmabuf/ diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a038dcf5361..bc1cb284111c 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { - struct sg_table *sg_table = ERR_PTR(-EINVAL); + struct sg_table *sg_table; might_sleep(); diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 0350829ba62e..dd1edfb27b61 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -31,6 +31,14 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; } +static void irq_dma_fence_array_work(struct irq_work *wrk) +{ + struct dma_fence_array *array = container_of(wrk, typeof(*array), work); + + dma_fence_signal(&array->base); + dma_fence_put(&array->base); +} + static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -39,8 +47,9 @@ static void dma_fence_array_cb_func(struct dma_fence *f, struct dma_fence_array *array = array_cb->array; if (atomic_dec_and_test(&array->num_pending)) - dma_fence_signal(&array->base); - dma_fence_put(&array->base); + irq_work_queue(&array->work); + else + dma_fence_put(&array->base); } static bool dma_fence_array_enable_signaling(struct dma_fence *fence) @@ -136,6 +145,7 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, spin_lock_init(&array->lock); dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, context, seqno); + init_irq_work(&array->work, irq_dma_fence_array_work); array->num_fences = num_fences; atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig b/drivers/dma-buf/hyper_dmabuf/Kconfig new file mode 100644 index 000000000000..1d91a114ba61 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Kconfig @@ -0,0 +1,72 @@ +menu "hyper_dmabuf options" + +config HYPER_DMABUF + bool "Enables hyper dmabuf driver" + default y + depends on (X86=y || X86_64=y) + +choice + prompt "Hypervisor" + depends on HYPER_DMABUF + default HYPER_DMABUF_XEN + +config HYPER_DMABUF_XEN + bool "Configure hyper_dmabuf for XEN hypervisor" + depends on HYPER_DMABUF && XEN + help + Configuring hyper_dmabuf driver for XEN hypervisor + +config HYPER_DMABUF_ACRN + bool "Configure hyper_dmabuf for ACRN hypervisor" + depends on HYPER_DMABUF && ACRN_VIRTIO_DEVICES + help + Configuring hyper_dmabuf driver for ACRN hypervisor +endchoice + +choice + prompt "Virtio driver type" + depends on HYPER_DMABUF && HYPER_DMABUF_ACRN + default HYPER_DMABUF_VIRTIO_BE + +config HYPER_DMABUF_VIRTIO_BE + depends on VBS && DRM_I915_GVT + bool "Configure hyper_dmabuf as virtio backend" + help + Configuring hyper_dmabuf driver as virtio backend + +config HYPER_DMABUF_VIRTIO_FE + depends on ACRN_VIRTIO_DEVICES + bool "Configure hyper_dmabuf as virtio frontend" + help + Configuring hyper_dmabuf driver as virtio frontend +endchoice + +config HYPER_DMABUF_SYSFS + bool "Enable sysfs information about hyper DMA buffers" + default y + depends on HYPER_DMABUF + help + Expose information about imported and exported buffers using + hyper_dmabuf driver + +config HYPER_DMABUF_EVENT_GEN + bool "Enable event-generation and polling operation" + default n + depends on HYPER_DMABUF + help + With this config enabled, hyper_dmabuf driver on the importer side + generates events and queue those up in the event list whenever a new + shared DMA-BUF is available. Events in the list can be retrieved by + read operation. + +config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + bool "Enable automatic rx-ch add with 10 secs interval" + default y + depends on HYPER_DMABUF && HYPER_DMABUF_XEN + help + If enabled, driver reads a node in xenstore every 10 seconds + to check whether there is any tx comm ch configured by another + domain then initialize matched rx comm ch automatically for any + existing tx comm chs. + +endmenu diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile b/drivers/dma-buf/hyper_dmabuf/Makefile new file mode 100644 index 000000000000..f63967cc99f6 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Makefile @@ -0,0 +1,57 @@ +TARGET_MODULE:=hyper_dmabuf + +# If we running by kernel building system +ifneq ($(KERNELRELEASE),) + $(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \ + hyper_dmabuf_ioctl.o \ + hyper_dmabuf_list.o \ + hyper_dmabuf_sgl_proc.o \ + hyper_dmabuf_ops.o \ + hyper_dmabuf_msg.o \ + hyper_dmabuf_id.o \ + hyper_dmabuf_remote_sync.o \ + hyper_dmabuf_query.o \ + +ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y) + $(TARGET_MODULE)-objs += hyper_dmabuf_event.o +endif + +ifeq ($(CONFIG_HYPER_DMABUF_XEN), y) + $(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \ + xen/hyper_dmabuf_xen_comm_list.o \ + xen/hyper_dmabuf_xen_shm.o \ + xen/hyper_dmabuf_xen_drv.o +else ifeq ($(CONFIG_HYPER_DMABUF_ACRN), y) + ifeq ($(CONFIG_HYPER_DMABUF_VIRTIO_BE), y) + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_be_drv.o \ + virtio/hyper_dmabuf_virtio_fe_list.o + else + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_fe_drv.o + endif + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_common.o \ + virtio/hyper_dmabuf_virtio_shm.o \ + virtio/hyper_dmabuf_virtio_comm_ring.o +endif + +obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o + +# If we are running without kernel build system +else +BUILDSYSTEM_DIR?=../../../ +PWD:=$(shell pwd) + +all : +# run kernel build system to make module + $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules + +clean: +# run kernel build system to cleanup in current directory + $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean + +load: + insmod ./$(TARGET_MODULE).ko + +unload: + rmmod ./$(TARGET_MODULE).ko + +endif diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c new file mode 100644 index 000000000000..f1afce29d6af --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c @@ -0,0 +1,411 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_event.h" + +#ifdef CONFIG_HYPER_DMABUF_XEN +#include "xen/hyper_dmabuf_xen_drv.h" +#elif defined (CONFIG_HYPER_DMABUF_ACRN) +#include "virtio/hyper_dmabuf_virtio_common.h" +#endif + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Intel Corporation"); + +struct hyper_dmabuf_private *hy_drv_priv; + +static void force_free(struct exported_sgt_info *exported, + void *attr) +{ + struct ioctl_hyper_dmabuf_unexport unexport_attr; + struct file *filp = (struct file *)attr; + + if (!filp || !exported) + return; + + if (exported->filp == filp) { + dev_dbg(hy_drv_priv->dev, + "Forcefully releasing buffer {id:%d key:%d %d %d}\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + unexport_attr.hid = exported->hid; + unexport_attr.delay_ms = 0; + + hyper_dmabuf_unexport_ioctl(filp, &unexport_attr); + } +} + +static int hyper_dmabuf_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + + /* Do not allow exclusive open */ + if (filp->f_flags & O_EXCL) + return -EBUSY; + + return ret; +} + +static int hyper_dmabuf_release(struct inode *inode, struct file *filp) +{ + hyper_dmabuf_foreach_exported(force_free, filp); + + return 0; +} + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + +static unsigned int hyper_dmabuf_event_poll(struct file *filp, + struct poll_table_struct *wait) +{ + poll_wait(filp, &hy_drv_priv->event_wait, wait); + + if (!list_empty(&hy_drv_priv->event_list)) + return POLLIN | POLLRDNORM; + + return 0; +} + +static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + int ret; + + /* only root can read events */ + if (!capable(CAP_DAC_OVERRIDE)) { + dev_err(hy_drv_priv->dev, + "Only root can read events\n"); + return -EPERM; + } + + /* make sure user buffer can be written */ + if (!access_ok(VERIFY_WRITE, buffer, count)) { + dev_err(hy_drv_priv->dev, + "User buffer can't be written.\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock); + if (ret) + return ret; + + while (1) { + struct hyper_dmabuf_event *e = NULL; + + spin_lock_irq(&hy_drv_priv->event_lock); + if (!list_empty(&hy_drv_priv->event_list)) { + e = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&e->link); + } + spin_unlock_irq(&hy_drv_priv->event_lock); + + if (!e) { + if (ret) + break; + + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + ret = wait_event_interruptible(hy_drv_priv->event_wait, + !list_empty(&hy_drv_priv->event_list)); + + if (ret == 0) + ret = mutex_lock_interruptible( + &hy_drv_priv->event_read_lock); + + if (ret) + return ret; + } else { + unsigned int length = (sizeof(e->event_data.hdr) + + e->event_data.hdr.size); + + if (length > count - ret) { +put_back_event: + spin_lock_irq(&hy_drv_priv->event_lock); + list_add(&e->link, &hy_drv_priv->event_list); + spin_unlock_irq(&hy_drv_priv->event_lock); + break; + } + + if (copy_to_user(buffer + ret, &e->event_data.hdr, + sizeof(e->event_data.hdr))) { + if (ret == 0) + ret = -EFAULT; + + goto put_back_event; + } + + ret += sizeof(e->event_data.hdr); + + if (copy_to_user(buffer + ret, e->event_data.data, + e->event_data.hdr.size)) { + /* error while copying void *data */ + + struct hyper_dmabuf_event_hdr dummy_hdr = {0}; + + ret -= sizeof(e->event_data.hdr); + + /* nullifying hdr of the event in user buffer */ + if (copy_to_user(buffer + ret, &dummy_hdr, + sizeof(dummy_hdr))) { + dev_err(hy_drv_priv->dev, + "failed to nullify invalid hdr already in userspace\n"); + } + + ret = -EFAULT; + + goto put_back_event; + } + + ret += e->event_data.hdr.size; + hy_drv_priv->pending--; + kfree(e); + } + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + + return ret; +} + +#endif + +static const struct file_operations hyper_dmabuf_driver_fops = { + .owner = THIS_MODULE, + .open = hyper_dmabuf_open, + .release = hyper_dmabuf_release, + +/* poll and read interfaces are needed only for event-polling */ +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + .read = hyper_dmabuf_event_read, + .poll = hyper_dmabuf_event_poll, +#endif + + .unlocked_ioctl = hyper_dmabuf_ioctl, +}; + +static struct miscdevice hyper_dmabuf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hyper_dmabuf", + .fops = &hyper_dmabuf_driver_fops, +}; + +static int register_device(void) +{ + int ret = 0; + + ret = misc_register(&hyper_dmabuf_miscdev); + + if (ret) { + printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n"); + return ret; + } + + hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device; + + /* TODO: Check if there is a different way to initialize dma mask */ + dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64)); + + return ret; +} + +static void unregister_device(void) +{ + dev_info(hy_drv_priv->dev, + "hyper_dmabuf: unregister_device() is called\n"); + + misc_deregister(&hyper_dmabuf_miscdev); +} + +static int __init hyper_dmabuf_drv_init(void) +{ + int ret = 0; + + printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n"); + + hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private), + GFP_KERNEL); + + if (!hy_drv_priv) + return -ENOMEM; + + ret = register_device(); + if (ret < 0) + return ret; + +/* currently only supports XEN hypervisor */ +#ifdef CONFIG_HYPER_DMABUF_XEN + hy_drv_priv->bknd_ops = &xen_bknd_ops; +#elif defined (CONFIG_HYPER_DMABUF_ACRN) + hy_drv_priv->bknd_ops = &virtio_bknd_ops; +#else + hy_drv_priv->bknd_ops = NULL; + printk(KERN_ERR "No backend configured for hyper_dmabuf in kernel config\n"); +#endif + + if (hy_drv_priv->bknd_ops == NULL) { + printk(KERN_ERR "Hyper_dmabuf: no backend found\n"); + return -1; + } + + mutex_init(&hy_drv_priv->lock); + + mutex_lock(&hy_drv_priv->lock); + + hy_drv_priv->initialized = false; + + dev_info(hy_drv_priv->dev, + "initializing database for imported/exported dmabufs\n"); + + hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue"); + + ret = hyper_dmabuf_table_init(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "fail to init table for exported/imported entries\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } + +#ifdef CONFIG_HYPER_DMABUF_SYSFS + ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to initialize sysfs\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } +#endif + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + mutex_init(&hy_drv_priv->event_read_lock); + spin_lock_init(&hy_drv_priv->event_lock); + + /* Initialize event queue */ + INIT_LIST_HEAD(&hy_drv_priv->event_list); + init_waitqueue_head(&hy_drv_priv->event_wait); + + /* resetting number of pending events */ + hy_drv_priv->pending = 0; +#endif + + if (hy_drv_priv->bknd_ops->init) { + ret = hy_drv_priv->bknd_ops->init(); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "failed to initialize backend.\n"); + return ret; + } + } + + hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id(); + + hy_drv_priv->initialized = true; + if (hy_drv_priv->bknd_ops->init_comm_env) { + ret = hy_drv_priv->bknd_ops->init_comm_env(); + if (ret < 0) { + hy_drv_priv->initialized = false; + dev_dbg(hy_drv_priv->dev, + "failed to initialize comm-env.\n"); + } + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "Finishing up initialization of hyper_dmabuf drv\n"); + + /* interrupt for comm should be registered here: */ + return ret; +} + +static void hyper_dmabuf_drv_exit(void) +{ +#ifdef CONFIG_HYPER_DMABUF_SYSFS + hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev); +#endif + + mutex_lock(&hy_drv_priv->lock); + + /* hash tables for export/import entries and ring_infos */ + hyper_dmabuf_table_destroy(); + + if (hy_drv_priv->bknd_ops->destroy_comm) { + hy_drv_priv->bknd_ops->destroy_comm(); + } + + if (hy_drv_priv->bknd_ops->cleanup) { + hy_drv_priv->bknd_ops->cleanup(); + }; + + /* destroy workqueue */ + if (hy_drv_priv->work_queue) + destroy_workqueue(hy_drv_priv->work_queue); + + /* destroy id_queue */ + if (hy_drv_priv->id_queue) + hyper_dmabuf_free_hid_list(); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* clean up event queue */ + hyper_dmabuf_events_release(); +#endif + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "hyper_dmabuf driver: Exiting\n"); + + kfree(hy_drv_priv); + + unregister_device(); +} + +module_init(hyper_dmabuf_drv_init); +module_exit(hyper_dmabuf_drv_exit); diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h new file mode 100644 index 000000000000..45c24fd8d25d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h @@ -0,0 +1,118 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ +#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ + +#include +#include + +struct hyper_dmabuf_req; + +struct hyper_dmabuf_event { + struct hyper_dmabuf_event_data event_data; + struct list_head link; +}; + +struct hyper_dmabuf_private { + struct device *dev; + + /* VM(domain) id of current VM instance */ + int domid; + + /* workqueue dedicated to hyper_dmabuf driver */ + struct workqueue_struct *work_queue; + + /* list of reusable hyper_dmabuf_ids */ + struct list_reusable_id *id_queue; + + /* backend ops - hypervisor specific */ + struct hyper_dmabuf_bknd_ops *bknd_ops; + + /* device global lock */ + /* TODO: might need a lock per resource (e.g. EXPORT LIST) */ + struct mutex lock; + + /* flag that shows whether backend is initialized */ + bool initialized; + + wait_queue_head_t event_wait; + struct list_head event_list; + + spinlock_t event_lock; + struct mutex event_read_lock; + + /* # of pending events */ + int pending; +}; + +struct list_reusable_id { + hyper_dmabuf_id_t hid; + struct list_head list; +}; + +struct hyper_dmabuf_bknd_ops { + /* backend initialization routine (optional) */ + int (*init)(void); + + /* backend cleanup routine (optional) */ + void (*cleanup)(void); + + /* retreiving id of current virtual machine */ + int (*get_vm_id)(void); + + /* get pages shared via hypervisor-specific method */ + int (*share_pages)(struct page **, int, int, void **); + + /* make shared pages unshared via hypervisor specific method */ + int (*unshare_pages)(void **, int); + + /* map remotely shared pages on importer's side via + * hypervisor-specific method + */ + struct page ** (*map_shared_pages)(unsigned long, int, int, void **); + + /* unmap and free shared pages on importer's side via + * hypervisor-specific method + */ + int (*unmap_shared_pages)(void **, int); + + /* initialize communication environment */ + int (*init_comm_env)(void); + + void (*destroy_comm)(void); + + /* upstream ch setup (receiving and responding) */ + int (*init_rx_ch)(int); + + /* downstream ch setup (transmitting and parsing responses) */ + int (*init_tx_ch)(int); + + int (*send_req)(int, struct hyper_dmabuf_req *, int); +}; + +/* exporting global drv private info */ +extern struct hyper_dmabuf_private *hy_drv_priv; + +#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c new file mode 100644 index 000000000000..392ea99e0784 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c @@ -0,0 +1,122 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_event.h" + +static void send_event(struct hyper_dmabuf_event *e) +{ + struct hyper_dmabuf_event *oldest; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + /* check current number of event then if it hits the max num allowed + * then remove the oldest event in the list + */ + if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) { + oldest = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&oldest->link); + hy_drv_priv->pending--; + kfree(oldest); + } + + list_add_tail(&e->link, + &hy_drv_priv->event_list); + + hy_drv_priv->pending++; + + wake_up_interruptible(&hy_drv_priv->event_wait); + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +void hyper_dmabuf_events_release(void) +{ + struct hyper_dmabuf_event *e, *et; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + list_for_each_entry_safe(e, et, &hy_drv_priv->event_list, + link) { + list_del(&e->link); + kfree(e); + hy_drv_priv->pending--; + } + + if (hy_drv_priv->pending) { + dev_err(hy_drv_priv->dev, + "possible leak on event_list\n"); + } + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid) +{ + struct hyper_dmabuf_event *e; + struct imported_sgt_info *imported; + + imported = hyper_dmabuf_find_imported(hid); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "can't find imported_sgt_info in the list\n"); + return -EINVAL; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + + if (!e) + return -ENOMEM; + + e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT; + e->event_data.hdr.hid = hid; + e->event_data.data = (void *)imported->priv; + e->event_data.hdr.size = imported->sz_priv; + + send_event(e); + + dev_dbg(hy_drv_priv->dev, + "event number = %d :", hy_drv_priv->pending); + + dev_dbg(hy_drv_priv->dev, + "generating events for {%d, %d, %d, %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h new file mode 100644 index 000000000000..50db04faf222 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h @@ -0,0 +1,38 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_EVENT_H__ +#define __HYPER_DMABUF_EVENT_H__ + +#define MAX_DEPTH_EVENT_QUEUE 32 + +enum hyper_dmabuf_event_type { + HYPER_DMABUF_NEW_IMPORT = 0x10000, +}; + +void hyper_dmabuf_events_release(void); + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid); + +#endif /* __HYPER_DMABUF_EVENT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c new file mode 100644 index 000000000000..e67b84a7e64c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c @@ -0,0 +1,133 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" + +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *new_reusable; + + new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL); + + if (!new_reusable) + return; + + new_reusable->hid = hid; + + list_add(&new_reusable->list, &reusable_head->list); +} + +static hyper_dmabuf_id_t get_reusable_hid(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + + /* check there is reusable id */ + if (!list_empty(&reusable_head->list)) { + reusable_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + + list_del(&reusable_head->list); + hid = reusable_head->hid; + kfree(reusable_head); + } + + return hid; +} + +void hyper_dmabuf_free_hid_list(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *temp_head; + + if (reusable_head) { + /* freeing mem space all reusable ids in the stack */ + while (!list_empty(&reusable_head->list)) { + temp_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + list_del(&temp_head->list); + kfree(temp_head); + } + + /* freeing head */ + kfree(reusable_head); + } +} + +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void) +{ + static int count; + hyper_dmabuf_id_t hid; + struct list_reusable_id *reusable_head; + + /* first call to hyper_dmabuf_get_id */ + if (count == 0) { + reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL); + + if (!reusable_head) + return (hyper_dmabuf_id_t){-1, {0, 0, 0} }; + + /* list head has an invalid count */ + reusable_head->hid.id = -1; + INIT_LIST_HEAD(&reusable_head->list); + hy_drv_priv->id_queue = reusable_head; + } + + hid = get_reusable_hid(); + + /*creating a new H-ID only if nothing in the reusable id queue + * and count is less than maximum allowed + */ + if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) + hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++); + + /* random data embedded in the id for security */ + get_random_bytes(&hid.rng_key[0], 12); + + return hid; +} + +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2) +{ + int i; + + /* compare keys */ + for (i = 0; i < 3; i++) { + if (hid1.rng_key[i] != hid2.rng_key[i]) + return false; + } + + return true; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h new file mode 100644 index 000000000000..ed690f3a478c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_ID_H__ +#define __HYPER_DMABUF_ID_H__ + +#define HYPER_DMABUF_ID_CREATE(domid, cnt) \ + ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF)) + +#define HYPER_DMABUF_DOM_ID(hid) \ + (((hid.id) >> 24) & 0xFF) + +/* currently maximum number of buffers shared + * at any given moment is limited to 1000 + */ +#define HYPER_DMABUF_ID_MAX 1000 + +/* adding freed hid to the reusable list */ +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid); + +/* freeing the reusasble list */ +void hyper_dmabuf_free_hid_list(void); + +/* getting a hid available to use. */ +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void); + +/* comparing two different hid */ +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2); + +#endif /*__HYPER_DMABUF_ID_H*/ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c new file mode 100644 index 000000000000..66cdcf6eff78 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -0,0 +1,790 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_query.h" + +static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data; + + if (bknd_ops->init_tx_ch) { + ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain); + } + + return ret; +} + +static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + + rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data; + + if (bknd_ops->init_rx_ch) + ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain); + + return ret; +} + +static int send_export_msg(struct exported_sgt_info *exported, + struct pages_info *pg_info) +{ + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct hyper_dmabuf_req *req; + int op[MAX_NUMBER_OF_OPERANDS] = {0}; + int ret, i; + + /* now create request for importer via ring */ + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + if (pg_info) { + op[4] = pg_info->nents; + op[5] = pg_info->frst_ofst; + op[6] = pg_info->last_len; + op[7] = bknd_ops->share_pages(pg_info->pgs, exported->rdomid, + pg_info->nents, &exported->refs_info); + if (op[7] < 0) { + dev_err(hy_drv_priv->dev, "pages sharing failed\n"); + return op[7]; + } + } + + op[8] = exported->sz_priv; + + /* driver/application specific private info */ + memcpy(&op[9], exported->priv, op[8]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + /* composing a message to the importer */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]); + + ret = bknd_ops->send_req(exported->rdomid, req, true); + + kfree(req); + + return ret; +} + +/* Fast path exporting routine in case same buffer is already exported. + * In this function, we skip normal exporting process and just update + * private data on both VMs (importer and exporter) + * + * return '1' if reexport is needed, return '0' if succeeds, return + * Kernel error code if something goes wrong + */ +static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv) +{ + int reexport = 1; + int ret = 0; + struct exported_sgt_info *exported; + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) + return reexport; + + if (exported->valid == false) + return reexport; + + /* + * Check if unexport is already scheduled for that buffer, + * if so try to cancel it. If that will fail, buffer needs + * to be reexport once again. + */ + if (exported->unexport_sched) { + if (!cancel_delayed_work_sync(&exported->unexport)) + return reexport; + + exported->unexport_sched = false; + } + + /* if there's any change in size of private data. + * we reallocate space for private data with new size + */ + if (sz_priv != exported->sz_priv) { + kfree(exported->priv); + + /* truncating size */ + if (sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = sz_priv; + + exported->priv = kcalloc(1, exported->sz_priv, + GFP_KERNEL); + + if (!exported->priv) { + hyper_dmabuf_remove_exported(exported->hid); + hyper_dmabuf_cleanup_sgt_info(exported, true); + kfree(exported); + return -ENOMEM; + } + } + + /* update private data in sgt_info with new ones */ + ret = copy_from_user(exported->priv, priv, exported->sz_priv); + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to load a new private data\n"); + ret = -EINVAL; + } else { + /* send an export msg for updating priv in importer */ + ret = send_export_msg(exported, NULL); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to send a new private data\n"); + ret = -EBUSY; + } + } + + return ret; +} + +static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_remote *export_remote_attr = + (struct ioctl_hyper_dmabuf_export_remote *)data; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct pages_info *pg_info; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + int ret = 0; + + if (hy_drv_priv->domid == export_remote_attr->remote_domain) { + dev_err(hy_drv_priv->dev, + "exporting to the same VM is not permitted\n"); + return -EINVAL; + } + + dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd); + + if (IS_ERR(dma_buf)) { + dev_err(hy_drv_priv->dev, "Cannot get dma buf\n"); + return PTR_ERR(dma_buf); + } + + /* we check if this specific attachment was already exported + * to the same domain and if yes and it's valid sgt_info, + * it returns hyper_dmabuf_id of pre-exported sgt_info + */ + hid = hyper_dmabuf_find_hid_exported(dma_buf, + export_remote_attr->remote_domain); + + if (hid.id != -1) { + ret = fastpath_export(hid, export_remote_attr->sz_priv, + export_remote_attr->priv); + + /* return if fastpath_export succeeds or + * gets some fatal error + */ + if (ret <= 0) { + dma_buf_put(dma_buf); + export_remote_attr->hid = hid; + return ret; + } + } + + attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev); + if (IS_ERR(attachment)) { + dev_err(hy_drv_priv->dev, "cannot get attachment\n"); + ret = PTR_ERR(attachment); + goto fail_attach; + } + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + + if (IS_ERR(sgt)) { + dev_err(hy_drv_priv->dev, "cannot map attachment\n"); + ret = PTR_ERR(sgt); + goto fail_map_attachment; + } + + exported = kcalloc(1, sizeof(*exported), GFP_KERNEL); + + if (!exported) { + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + /* possible truncation */ + if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = export_remote_attr->sz_priv; + + /* creating buffer for private data of buffer */ + if (exported->sz_priv != 0) { + exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL); + + if (!exported->priv) { + ret = -ENOMEM; + goto fail_priv_creation; + } + } else { + dev_err(hy_drv_priv->dev, "size is 0\n"); + } + + exported->hid = hyper_dmabuf_get_hid(); + + /* no more exported dmabuf allowed */ + if (exported->hid.id == -1) { + dev_err(hy_drv_priv->dev, + "exceeds allowed number of dmabuf to be exported\n"); + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + exported->rdomid = export_remote_attr->remote_domain; + exported->dma_buf = dma_buf; + exported->valid = true; + + exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL); + if (!exported->active_sgts) { + ret = -ENOMEM; + goto fail_map_active_sgts; + } + + exported->active_attached = kmalloc(sizeof(struct attachment_list), + GFP_KERNEL); + if (!exported->active_attached) { + ret = -ENOMEM; + goto fail_map_active_attached; + } + + exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_kmapped) { + ret = -ENOMEM; + goto fail_map_va_kmapped; + } + + exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_vmapped) { + ret = -ENOMEM; + goto fail_map_va_vmapped; + } + + exported->active_sgts->sgt = sgt; + exported->active_attached->attach = attachment; + exported->va_kmapped->vaddr = NULL; + exported->va_vmapped->vaddr = NULL; + + /* initialize list of sgt, attachment and vaddr for dmabuf sync + * via shadow dma-buf + */ + INIT_LIST_HEAD(&exported->active_sgts->list); + INIT_LIST_HEAD(&exported->active_attached->list); + INIT_LIST_HEAD(&exported->va_kmapped->list); + INIT_LIST_HEAD(&exported->va_vmapped->list); + + /* copy private data to sgt_info */ + ret = copy_from_user(exported->priv, export_remote_attr->priv, + exported->sz_priv); + + if (ret) { + dev_err(hy_drv_priv->dev, + "failed to load private data\n"); + ret = -EINVAL; + goto fail_export; + } + + pg_info = hyper_dmabuf_ext_pgs(sgt); + if (!pg_info) { + dev_err(hy_drv_priv->dev, + "failed to construct pg_info\n"); + ret = -ENOMEM; + goto fail_export; + } + + exported->nents = pg_info->nents; + + /* now register it to export list */ + hyper_dmabuf_register_exported(exported); + + export_remote_attr->hid = exported->hid; + + ret = send_export_msg(exported, pg_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to send out the export request\n"); + goto fail_send_request; + } + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + + exported->filp = filp; + + return ret; + +/* Clean-up if error occurs */ + +fail_send_request: + hyper_dmabuf_remove_exported(exported->hid); + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + +fail_export: + kfree(exported->va_vmapped); + +fail_map_va_vmapped: + kfree(exported->va_kmapped); + +fail_map_va_kmapped: + kfree(exported->active_attached); + +fail_map_active_attached: + kfree(exported->active_sgts); + kfree(exported->priv); + +fail_priv_creation: + kfree(exported); + +fail_map_active_sgts: +fail_sgt_info_creation: + dma_buf_unmap_attachment(attachment, sgt, + DMA_BIDIRECTIONAL); + +fail_map_attachment: + dma_buf_detach(dma_buf, attachment); + +fail_attach: + dma_buf_put(dma_buf); + + return ret; +} + +static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_fd *export_fd_attr = + (struct ioctl_hyper_dmabuf_export_fd *)data; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct imported_sgt_info *imported; + struct hyper_dmabuf_req *req; + struct page **data_pgs; + int op[4]; + int i; + int ret = 0; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* look for dmabuf for the id */ + imported = hyper_dmabuf_find_imported(export_fd_attr->hid); + + /* can't find sgt from the table */ + if (!imported) { + dev_err(hy_drv_priv->dev, "can't find the entry\n"); + return -ENOENT; + } + + mutex_lock(&hy_drv_priv->lock); + + imported->importers++; + + /* send notification for export_fd to exporter */ + op[0] = imported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = imported->hid.rng_key[i]; + + dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]); + + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true); + + if (ret < 0) { + /* in case of timeout other end eventually will receive request, + * so we need to undo it + */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false); + kfree(req); + dev_err(hy_drv_priv->dev, + "Failed to create sgt or notify exporter\n"); + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return ret; + } + + kfree(req); + + if (ret == HYPER_DMABUF_REQ_ERROR) { + dev_err(hy_drv_priv->dev, + "Buffer invalid {id:%d key:%d %d %d}, cannot import\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + ret = 0; + + dev_dbg(hy_drv_priv->dev, + "Found buffer gref %d off %d\n", + imported->ref_handle, imported->frst_ofst); + + dev_dbg(hy_drv_priv->dev, + "last len %d nents %d domain %d\n", + imported->last_len, imported->nents, + HYPER_DMABUF_DOM_ID(imported->hid)); + + if (!imported->sgt) { + dev_dbg(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} pages not mapped yet\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + data_pgs = bknd_ops->map_shared_pages(imported->ref_handle, + HYPER_DMABUF_DOM_ID(imported->hid), + imported->nents, + &imported->refs_info); + + if (!data_pgs) { + dev_err(hy_drv_priv->dev, + "can't map pages hid {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], + imported->hid.rng_key[2]); + + imported->importers--; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, + HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, + false); + kfree(req); + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + imported->sgt = hyper_dmabuf_create_sgt(data_pgs, + imported->frst_ofst, + imported->last_len, + imported->nents); + + } + + export_fd_attr->fd = hyper_dmabuf_export_fd(imported, + export_fd_attr->flags); + + if (export_fd_attr->fd < 0) { + /* fail to get fd */ + ret = export_fd_attr->fd; + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return ret; +} + +/* unexport dmabuf from the database and send int req to the source domain + * to unmap it. + */ +static void delayed_unexport(struct work_struct *work) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct exported_sgt_info *exported; + int op[4]; + int i, ret; + + if (!work) + return; + + exported = container_of(work, struct exported_sgt_info, unexport.work); + + dev_dbg(hy_drv_priv->dev, + "Marking buffer {id:%d key:%d %d %d} as invalid\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + /* no longer valid */ + exported->valid = false; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return; + + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]); + + /* Now send unexport request to remote domain, marking + * that buffer should not be used anymore + */ + ret = bknd_ops->send_req(exported->rdomid, req, true); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "unexport message for buffer {id:%d key:%d %d %d} failed\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + } + + kfree(req); + exported->unexport_sched = false; + + /* Immediately clean-up if it has never been exported by importer + * (so no SGT is constructed on importer). + * clean it up later in remote sync when final release ops + * is called (importer does this only when there's no + * no consumer of locally exported FDs) + */ + if (exported->active == 0) { + dev_dbg(hy_drv_priv->dev, + "claning up buffer {id:%d key:%d %d %d} completly\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(exported->hid); + + /* register hyper_dmabuf_id to the list for reuse */ + hyper_dmabuf_store_hid(exported->hid); + + if (exported->sz_priv > 0 && !exported->priv) + kfree(exported->priv); + + kfree(exported); + } +} + +/* Schedule unexport of dmabuf. + */ +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_unexport *unexport_attr = + (struct ioctl_hyper_dmabuf_unexport *)data; + struct exported_sgt_info *exported; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* find dmabuf in export list */ + exported = hyper_dmabuf_find_exported(unexport_attr->hid); + + dev_dbg(hy_drv_priv->dev, + "scheduling unexport of buffer {id:%d key:%d %d %d}\n", + unexport_attr->hid.id, unexport_attr->hid.rng_key[0], + unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]); + + /* failed to find corresponding entry in export list */ + if (exported == NULL) { + unexport_attr->status = -ENOENT; + return -ENOENT; + } + + if (exported->unexport_sched) + return 0; + + exported->unexport_sched = true; + INIT_DELAYED_WORK(&exported->unexport, delayed_unexport); + schedule_delayed_work(&exported->unexport, + msecs_to_jiffies(unexport_attr->delay_ms)); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +static int hyper_dmabuf_query_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_query *query_attr = + (struct ioctl_hyper_dmabuf_query *)data; + struct exported_sgt_info *exported = NULL; + struct imported_sgt_info *imported = NULL; + int ret = 0; + + if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) { + /* query for exported dmabuf */ + exported = hyper_dmabuf_find_exported(query_attr->hid); + if (exported) { + ret = hyper_dmabuf_query_exported(exported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in exp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } else { + /* query for imported dmabuf */ + imported = hyper_dmabuf_find_imported(query_attr->hid); + if (imported) { + ret = hyper_dmabuf_query_imported(imported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in imp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } + + return ret; +} + +const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = { + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, + hyper_dmabuf_tx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, + hyper_dmabuf_rx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, + hyper_dmabuf_export_remote_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, + hyper_dmabuf_export_fd_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, + hyper_dmabuf_unexport_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, + hyper_dmabuf_query_ioctl, 0), +}; + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param) +{ + const struct hyper_dmabuf_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + int ret; + hyper_dmabuf_ioctl_t func; + char *kdata; + + if (nr >= ARRAY_SIZE(hyper_dmabuf_ioctls)) { + dev_err(hy_drv_priv->dev, "invalid ioctl\n"); + return -EINVAL; + } + + ioctl = &hyper_dmabuf_ioctls[nr]; + + func = ioctl->func; + + if (unlikely(!func)) { + dev_err(hy_drv_priv->dev, "no function\n"); + return -EINVAL; + } + + kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (!kdata) + return -ENOMEM; + + if (copy_from_user(kdata, (void __user *)param, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy from user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + + ret = func(filp, kdata); + + if (copy_to_user((void __user *)param, kdata, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy to user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + +ioctl_error: + kfree(kdata); + + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h new file mode 100644 index 000000000000..5991a87b194f --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h @@ -0,0 +1,50 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IOCTL_H__ +#define __HYPER_DMABUF_IOCTL_H__ + +typedef int (*hyper_dmabuf_ioctl_t)(struct file *filp, void *data); + +struct hyper_dmabuf_ioctl_desc { + unsigned int cmd; + int flags; + hyper_dmabuf_ioctl_t func; + const char *name; +}; + +#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param); + +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data); + +#endif //__HYPER_DMABUF_IOCTL_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c new file mode 100644 index 000000000000..84cfb065bddd --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c @@ -0,0 +1,292 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" + +DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED); +DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED); + +#ifdef CONFIG_HYPER_DMABUF_SYSFS +static ssize_t hyper_dmabuf_imported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_imported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->imported->hid; + int nents = info_entry->imported->nents; + bool valid = info_entry->imported->valid; + int num_importers = info_entry->imported->importers; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, numi:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + num_importers); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static ssize_t hyper_dmabuf_exported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_exported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->exported->hid; + int nents = info_entry->exported->nents; + bool valid = info_entry->exported->valid; + int importer_exported = info_entry->exported->active; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, ie:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + importer_exported); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static DEVICE_ATTR(imported, 0400, hyper_dmabuf_imported_show, NULL); +static DEVICE_ATTR(exported, 0400, hyper_dmabuf_exported_show, NULL); + +int hyper_dmabuf_register_sysfs(struct device *dev) +{ + int err; + + err = device_create_file(dev, &dev_attr_imported); + if (err < 0) + goto err1; + err = device_create_file(dev, &dev_attr_exported); + if (err < 0) + goto err2; + + return 0; +err2: + device_remove_file(dev, &dev_attr_imported); +err1: + return -1; +} + +int hyper_dmabuf_unregister_sysfs(struct device *dev) +{ + device_remove_file(dev, &dev_attr_imported); + device_remove_file(dev, &dev_attr_exported); + return 0; +} + +#endif + +int hyper_dmabuf_table_init(void) +{ + hash_init(hyper_dmabuf_hash_imported); + hash_init(hyper_dmabuf_hash_exported); + return 0; +} + +int hyper_dmabuf_table_destroy(void) +{ + /* TODO: cleanup hyper_dmabuf_hash_imported + * and hyper_dmabuf_hash_exported + */ + return 0; +} + +int hyper_dmabuf_register_exported(struct exported_sgt_info *exported) +{ + struct list_entry_exported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->exported = exported; + + hash_add(hyper_dmabuf_hash_exported, &info_entry->node, + info_entry->exported->hid.id); + + return 0; +} + +int hyper_dmabuf_register_imported(struct imported_sgt_info *imported) +{ + struct list_entry_imported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->imported = imported; + + hash_add(hyper_dmabuf_hash_imported, &info_entry->node, + info_entry->imported->hid.id); + + return 0; +} + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) + return info_entry->exported; + + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid) +{ + struct list_entry_exported *info_entry; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + if (info_entry->exported->dma_buf == dmabuf && + info_entry->exported->rdomid == domid) + return info_entry->exported->hid; + + return hid; +} + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) + return info_entry->imported; + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +void hyper_dmabuf_foreach_exported( + void (*func)(struct exported_sgt_info *, void *attr), + void *attr) +{ + struct list_entry_exported *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp, + info_entry, node) { + func(info_entry->exported, attr); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h new file mode 100644 index 000000000000..f7102f5db75d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h @@ -0,0 +1,71 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_LIST_H__ +#define __HYPER_DMABUF_LIST_H__ + +#include "hyper_dmabuf_struct.h" + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_EXPORTED 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_IMPORTED 7 + +struct list_entry_exported { + struct exported_sgt_info *exported; + struct hlist_node node; +}; + +struct list_entry_imported { + struct imported_sgt_info *imported; + struct hlist_node node; +}; + +int hyper_dmabuf_table_init(void); + +int hyper_dmabuf_table_destroy(void); + +int hyper_dmabuf_register_exported(struct exported_sgt_info *info); + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid); + +int hyper_dmabuf_register_imported(struct imported_sgt_info *info); + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid); + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid); + +void hyper_dmabuf_foreach_exported(void (*func)(struct exported_sgt_info *, + void *attr), void *attr); + +int hyper_dmabuf_register_sysfs(struct device *dev); +int hyper_dmabuf_unregister_sysfs(struct device *dev); + +#endif /* __HYPER_DMABUF_LIST_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c new file mode 100644 index 000000000000..c5d99d2f12c9 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c @@ -0,0 +1,411 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_remote_sync.h" +#include "hyper_dmabuf_event.h" +#include "hyper_dmabuf_list.h" + +struct cmd_process { + struct work_struct work; + struct hyper_dmabuf_req *rq; + int domid; +}; + +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command cmd, int *op) +{ + int i; + + req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED; + req->cmd = cmd; + + switch (cmd) { + /* as exporter, commands to importer */ + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : top-level reference number for shared pages + * op8 : size of private data (from op9) + * op9 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + memcpy(&req->op[0], &op[0], 9 * sizeof(int) + op[8]); + break; + + case HYPER_DMABUF_NOTIFY_UNEXPORT: + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : DMABUF_DESTROY, + * op0~op3 : hyper_dmabuf_id_t hid + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_EXPORT_FD: + case HYPER_DMABUF_EXPORT_FD_FAILED: + /* dmabuf fd is being created on imported side or importing + * failed + * + * command : HYPER_DMABUF_EXPORT_FD or + * HYPER_DMABUF_EXPORT_FD_FAILED, + * op0~op3 : hyper_dmabuf_id + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer (probably not needed) + * for dmabuf synchronization + */ + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will make + * the driver to do shadow mapping or unmapping for + * synchronization with original exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4) + */ + for (i = 0; i < 5; i++) + req->op[i] = op[i]; + break; + + default: + /* no command found */ + return; + } +} + +static void cmd_process_work(struct work_struct *work) +{ + struct imported_sgt_info *imported; + struct cmd_process *proc = container_of(work, + struct cmd_process, work); + struct hyper_dmabuf_req *req; + hyper_dmabuf_id_t hid; + int i; + + req = proc->rq; + + switch (req->cmd) { + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : top-level reference number for shared pages + * op8 : size of private data (from op9) + * op9 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + /* if nents == 0, it means it is a message only for + * priv synchronization. for existing imported_sgt_info + * so not creating a new one + */ + if (req->op[4] == 0) { + hyper_dmabuf_id_t exist = {req->op[0], + {req->op[1], req->op[2], + req->op[3] } }; + + imported = hyper_dmabuf_find_imported(exist); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "Can't find imported sgt_info\n"); + break; + } + + /* if size of new private data is different, + * we reallocate it. + */ + if (imported->sz_priv != req->op[8]) { + kfree(imported->priv); + imported->sz_priv = req->op[8]; + imported->priv = kcalloc(1, req->op[8], + GFP_KERNEL); + if (!imported->priv) { + /* set it invalid */ + imported->valid = 0; + break; + } + } + + /* updating priv data */ + memcpy(imported->priv, &req->op[9], req->op[8]); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + } + + imported = kcalloc(1, sizeof(*imported), GFP_KERNEL); + + if (!imported) + break; + + imported->sz_priv = req->op[8]; + imported->priv = kcalloc(1, req->op[8], GFP_KERNEL); + + if (!imported->priv) { + kfree(imported); + break; + } + + imported->hid.id = req->op[0]; + + for (i = 0; i < 3; i++) + imported->hid.rng_key[i] = req->op[i+1]; + + imported->nents = req->op[4]; + imported->frst_ofst = req->op[5]; + imported->last_len = req->op[6]; + imported->ref_handle = req->op[7]; + + dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n"); + dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n", + req->op[0], req->op[1], req->op[2], + req->op[3]); + dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]); + dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]); + dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]); + dev_dbg(hy_drv_priv->dev, "\tgrefid %d\n", req->op[7]); + + memcpy(imported->priv, &req->op[9], req->op[8]); + + imported->valid = true; + hyper_dmabuf_register_imported(imported); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will + * make the driver to do shadow mapping + * or unmapping for synchronization with original + * exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op1 : enum hyper_dmabuf_ops {....} + */ + dev_dbg(hy_drv_priv->dev, + "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__); + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + hyper_dmabuf_remote_sync(hid, req->op[4]); + + break; + + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer + * (probably not needed) for dmabuf synchronization + */ + break; + + default: + /* shouldn't get here */ + break; + } + + kfree(req); + kfree(proc); +} + +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req) +{ + struct cmd_process *proc; + struct hyper_dmabuf_req *temp_req; + struct imported_sgt_info *imported; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + + if (!req) { + dev_err(hy_drv_priv->dev, "request is NULL\n"); + return -EINVAL; + } + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + + if ((req->cmd < HYPER_DMABUF_EXPORT) || + (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) { + dev_err(hy_drv_priv->dev, "invalid command\n"); + return -EINVAL; + } + + req->stat = HYPER_DMABUF_REQ_PROCESSED; + + /* HYPER_DMABUF_DESTROY requires immediate + * follow up so can't be processed in workqueue + */ + if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) { + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : HYPER_DMABUF_NOTIFY_UNEXPORT, + * op0~3 : hyper_dmabuf_id + */ + dev_dbg(hy_drv_priv->dev, + "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n"); + + imported = hyper_dmabuf_find_imported(hid); + + if (imported) { + /* if anything is still using dma_buf */ + if (imported->importers) { + /* Buffer is still in use, just mark that + * it should not be allowed to export its fd + * anymore. + */ + imported->valid = false; + } else { + /* No one is using buffer, remove it from + * imported list + */ + hyper_dmabuf_remove_imported(hid); + kfree(imported->priv); + kfree(imported); + } + } else { + req->stat = HYPER_DMABUF_REQ_ERROR; + } + + return req->cmd; + } + + /* synchronous dma_buf_fd export */ + if (req->cmd == HYPER_DMABUF_EXPORT_FD) { + /* find a corresponding SGT for the id */ + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else if (!exported->valid) { + dev_dbg(hy_drv_priv->dev, + "Buffer no longer valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + dev_dbg(hy_drv_priv->dev, + "Buffer still valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + exported->active++; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) { + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + exported->active--; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + dev_dbg(hy_drv_priv->dev, + "%s: putting request to workqueue\n", __func__); + temp_req = kmalloc(sizeof(*temp_req), GFP_ATOMIC); + + if (!temp_req) + return -ENOMEM; + + memcpy(temp_req, req, sizeof(*temp_req)); + + proc = kcalloc(1, sizeof(struct cmd_process), GFP_ATOMIC); + + if (!proc) { + kfree(temp_req); + return -ENOMEM; + } + + proc->rq = temp_req; + proc->domid = domid; + + INIT_WORK(&(proc->work), cmd_process_work); + + queue_work(hy_drv_priv->work_queue, &(proc->work)); + + return req->cmd; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h new file mode 100644 index 000000000000..9c8a76bf261e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_MSG_H__ +#define __HYPER_DMABUF_MSG_H__ + +#define MAX_NUMBER_OF_OPERANDS 64 + +struct hyper_dmabuf_req { + unsigned int req_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +struct hyper_dmabuf_resp { + unsigned int resp_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +enum hyper_dmabuf_command { + HYPER_DMABUF_EXPORT = 0x10, + HYPER_DMABUF_EXPORT_FD, + HYPER_DMABUF_EXPORT_FD_FAILED, + HYPER_DMABUF_NOTIFY_UNEXPORT, + HYPER_DMABUF_OPS_TO_REMOTE, + HYPER_DMABUF_OPS_TO_SOURCE, +}; + +enum hyper_dmabuf_ops { + HYPER_DMABUF_OPS_ATTACH = 0x1000, + HYPER_DMABUF_OPS_DETACH, + HYPER_DMABUF_OPS_MAP, + HYPER_DMABUF_OPS_UNMAP, + HYPER_DMABUF_OPS_RELEASE, + HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS, + HYPER_DMABUF_OPS_END_CPU_ACCESS, + HYPER_DMABUF_OPS_KMAP_ATOMIC, + HYPER_DMABUF_OPS_KUNMAP_ATOMIC, + HYPER_DMABUF_OPS_KMAP, + HYPER_DMABUF_OPS_KUNMAP, + HYPER_DMABUF_OPS_MMAP, + HYPER_DMABUF_OPS_VMAP, + HYPER_DMABUF_OPS_VUNMAP, +}; + +enum hyper_dmabuf_req_feedback { + HYPER_DMABUF_REQ_PROCESSED = 0x100, + HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP, + HYPER_DMABUF_REQ_ERROR, + HYPER_DMABUF_REQ_NOT_RESPONDED +}; + +/* create a request packet with given command and operands */ +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command command, + int *operands); + +/* parse incoming request packet (or response) and take + * appropriate actions for those + */ +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req); + +#endif // __HYPER_DMABUF_MSG_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c new file mode 100644 index 000000000000..10b5510b3816 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c @@ -0,0 +1,402 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_list.h" + +#define WAIT_AFTER_SYNC_REQ 0 +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +static int dmabuf_refcount(struct dma_buf *dma_buf) +{ + if ((dma_buf != NULL) && (dma_buf->file != NULL)) + return file_count(dma_buf->file); + + return -EINVAL; +} + +static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int op[5]; + int i; + int ret; + + op[0] = hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = hid.rng_key[i]; + + op[4] = dmabuf_ops; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]); + + /* send request and wait for a response */ + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, + WAIT_AFTER_SYNC_REQ); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "dmabuf sync request failed:%d\n", req->op[4]); + } + + kfree(req); + + return ret; +} + +static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf, + struct device *dev, + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + int ret; + + if (!attach->dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_ATTACH); + + return ret; +} + +static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + + if (!attach->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_DETACH); +} + +static struct sg_table *hyper_dmabuf_ops_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct sg_table *st; + struct imported_sgt_info *imported; + struct pages_info *pg_info; + int ret; + + if (!attachment->dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + /* extract pages from sgt */ + pg_info = hyper_dmabuf_ext_pgs(imported->sgt); + + if (!pg_info) + return NULL; + + /* create a new sg_table with extracted pages */ + st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst, + pg_info->last_len, pg_info->nents); + if (!st) + goto err_free_sg; + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) + goto err_free_sg; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MAP); + + kfree(pg_info->pgs); + kfree(pg_info); + + return st; + +err_free_sg: + if (st) { + sg_free_table(st); + kfree(st); + } + + kfree(pg_info->pgs); + kfree(pg_info); + + return NULL; +} + +static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sg, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + + if (!attachment->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + + sg_free_table(sg); + kfree(sg); + + sync_request(imported->hid, HYPER_DMABUF_OPS_UNMAP); +} + +static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf) +{ + struct imported_sgt_info *imported; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int finish; + + if (!dma_buf->priv) + return; + + imported = (struct imported_sgt_info *)dma_buf->priv; + + if (!dmabuf_refcount(imported->dma_buf)) + imported->dma_buf = NULL; + + imported->importers--; + + if (imported->importers == 0) { + bknd_ops->unmap_shared_pages(&imported->refs_info, + imported->nents); + + if (imported->sgt) { + sg_free_table(imported->sgt); + kfree(imported->sgt); + imported->sgt = NULL; + } + } + + finish = imported && !imported->valid && + !imported->importers; + + sync_request(imported->hid, HYPER_DMABUF_OPS_RELEASE); + + /* + * Check if buffer is still valid and if not remove it + * from imported list. That has to be done after sending + * sync request + */ + if (finish) { + hyper_dmabuf_remove_imported(imported->hid); + kfree(imported->priv); + kfree(imported); + } +} + +static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS); + + return ret; +} + +static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + return sync_request(imported->hid, HYPER_DMABUF_OPS_END_CPU_ACCESS); +} + +static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP_ATOMIC); + + /* TODO: NULL for now. Need to return the addr of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum, void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP_ATOMIC); +} + +static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP); + + /* for now NULL.. need to return the address of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, + void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP); +} + +static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MMAP); + + return ret; +} + +static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_VMAP); + + return NULL; +} + +static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_VUNMAP); +} + +static const struct dma_buf_ops hyper_dmabuf_ops = { + .attach = hyper_dmabuf_ops_attach, + .detach = hyper_dmabuf_ops_detach, + .map_dma_buf = hyper_dmabuf_ops_map, + .unmap_dma_buf = hyper_dmabuf_ops_unmap, + .release = hyper_dmabuf_ops_release, + .begin_cpu_access = hyper_dmabuf_ops_begin_cpu_access, + .end_cpu_access = hyper_dmabuf_ops_end_cpu_access, + .map_atomic = hyper_dmabuf_ops_kmap_atomic, + .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic, + .map = hyper_dmabuf_ops_kmap, + .unmap = hyper_dmabuf_ops_kunmap, + .mmap = hyper_dmabuf_ops_mmap, + .vmap = hyper_dmabuf_ops_vmap, + .vunmap = hyper_dmabuf_ops_vunmap, +}; + +/* exporting dmabuf as fd */ +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags) +{ + int fd = -1; + + /* call hyper_dmabuf_export_dmabuf and create + * and bind a handle for it then release + */ + hyper_dmabuf_export_dma_buf(imported); + + if (imported->dma_buf) + fd = dma_buf_fd(imported->dma_buf, flags); + + return fd; +} + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &hyper_dmabuf_ops; + + /* multiple of PAGE_SIZE, not considering offset */ + exp_info.size = imported->sgt->nents * PAGE_SIZE; + exp_info.flags = /* not sure about flag */ 0; + exp_info.priv = imported; + + imported->dma_buf = dma_buf_export(&exp_info); +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h new file mode 100644 index 000000000000..c5505a41f0fe --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h @@ -0,0 +1,32 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_OPS_H__ +#define __HYPER_DMABUF_OPS_H__ + +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags); + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c new file mode 100644 index 000000000000..1f2f56b1162d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_id.h" + +#define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \ + ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len)) + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = EXPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(exported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = exported->rdomid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + *info = exported->dma_buf->size; + break; + + /* whether the buffer is used by importer */ + case HYPER_DMABUF_QUERY_BUSY: + *info = (exported->active > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !exported->valid; + break; + + /* whether the buffer is scheduled to be unexported */ + case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED: + *info = !exported->unexport_sched; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = exported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (exported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *) *info, + exported->priv, + exported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = IMPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(imported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = hy_drv_priv->domid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + if (imported->dma_buf) { + /* if local dma_buf is created (if it's + * ever mapped), retrieve it directly + * from struct dma_buf * + */ + *info = imported->dma_buf->size; + } else { + /* calcuate it from given nents, frst_ofst + * and last_len + */ + *info = HYPER_DMABUF_SIZE(imported->nents, + imported->frst_ofst, + imported->last_len); + } + break; + + /* whether the buffer is used or not */ + case HYPER_DMABUF_QUERY_BUSY: + /* checks if it's used by importer */ + *info = (imported->importers > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !imported->valid; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = imported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (imported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *)*info, + imported->priv, + imported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h new file mode 100644 index 000000000000..65ae738f8f53 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h @@ -0,0 +1,10 @@ +#ifndef __HYPER_DMABUF_QUERY_H__ +#define __HYPER_DMABUF_QUERY_H__ + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info); + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info); + +#endif // __HYPER_DMABUF_QUERY_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c new file mode 100644 index 000000000000..a82fd7b087b8 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c @@ -0,0 +1,322 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_sgl_proc.h" + +/* Whenever importer does dma operations from remote domain, + * a notification is sent to the exporter so that exporter + * issues equivalent dma operation on the original dma buf + * for indirect synchronization via shadow operations. + * + * All ptrs and references (e.g struct sg_table*, + * struct dma_buf_attachment) created via these operations on + * exporter's side are kept in stack (implemented as circular + * linked-lists) separately so that those can be re-referenced + * later when unmapping operations are invoked to free those. + * + * The very first element on the bottom of each stack holds + * is what is created when initial exporting is issued so it + * should not be modified or released by this fuction. + */ +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops) +{ + struct exported_sgt_info *exported; + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + int ret; + + /* find a coresponding SGT for the id */ + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "dmabuf remote sync::can't find exported list\n"); + return -ENOENT; + } + + switch (ops) { + case HYPER_DMABUF_OPS_ATTACH: + attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL); + + if (!attachl) + return -ENOMEM; + + attachl->attach = dma_buf_attach(exported->dma_buf, + hy_drv_priv->dev); + + if (!attachl->attach) { + kfree(attachl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_ATTACH\n"); + return -ENOMEM; + } + + list_add(&attachl->list, &exported->active_attached->list); + break; + + case HYPER_DMABUF_OPS_DETACH: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_DETACH\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be detached\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + break; + + case HYPER_DMABUF_OPS_MAP: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be mapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL); + + if (!sgtl) + return -ENOMEM; + + sgtl->sgt = dma_buf_map_attachment(attachl->attach, + DMA_BIDIRECTIONAL); + if (!sgtl->sgt) { + kfree(sgtl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + return -ENOMEM; + } + list_add(&sgtl->list, &exported->active_sgts->list); + break; + + case HYPER_DMABUF_OPS_UNMAP: + if (list_empty(&exported->active_sgts->list) || + list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_UNMAP\n"); + dev_err(hy_drv_priv->dev, + "no SGT or attach left to be unmapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + break; + + case HYPER_DMABUF_OPS_RELEASE: + dev_dbg(hy_drv_priv->dev, + "id:%d key:%d %d %d} released, ref left: %d\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2], + exported->active - 1); + + exported->active--; + + /* If there are still importers just break, if no then + * continue with final cleanup + */ + if (exported->active) + break; + + /* Importer just released buffer fd, check if there is + * any other importer still using it. + * If not and buffer was unexported, clean up shared + * data and remove that buffer. + */ + dev_dbg(hy_drv_priv->dev, + "Buffer {id:%d key:%d %d %d} final released\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + if (!exported->valid && !exported->active && + !exported->unexport_sched) { + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(hid); + kfree(exported); + /* store hyper_dmabuf_id in the list for reuse */ + hyper_dmabuf_store_hid(hid); + } + + break; + + case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS: + ret = dma_buf_begin_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_END_CPU_ACCESS: + ret = dma_buf_end_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_END_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_KMAP_ATOMIC: + case HYPER_DMABUF_OPS_KMAP: + va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL); + if (!va_kmapl) + return -ENOMEM; + + /* dummy kmapping of 1 page */ + if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC) + va_kmapl->vaddr = dma_buf_kmap_atomic( + exported->dma_buf, 1); + else + va_kmapl->vaddr = dma_buf_kmap( + exported->dma_buf, 1); + + if (!va_kmapl->vaddr) { + kfree(va_kmapl); + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n"); + return -ENOMEM; + } + list_add(&va_kmapl->list, &exported->va_kmapped->list); + break; + + case HYPER_DMABUF_OPS_KUNMAP_ATOMIC: + case HYPER_DMABUF_OPS_KUNMAP: + if (list_empty(&exported->va_kmapped->list)) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + if (!va_kmapl->vaddr) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + return PTR_ERR(va_kmapl->vaddr); + } + + /* unmapping 1 page */ + if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC) + dma_buf_kunmap_atomic(exported->dma_buf, + 1, va_kmapl->vaddr); + else + dma_buf_kunmap(exported->dma_buf, + 1, va_kmapl->vaddr); + + list_del(&va_kmapl->list); + kfree(va_kmapl); + break; + + case HYPER_DMABUF_OPS_MMAP: + /* currently not supported: looking for a way to create + * a dummy vma + */ + dev_warn(hy_drv_priv->dev, + "remote sync::sychronized mmap is not supported\n"); + break; + + case HYPER_DMABUF_OPS_VMAP: + va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL); + + if (!va_vmapl) + return -ENOMEM; + + /* dummy vmapping */ + va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf); + + if (!va_vmapl->vaddr) { + kfree(va_vmapl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VMAP\n"); + return -ENOMEM; + } + list_add(&va_vmapl->list, &exported->va_vmapped->list); + break; + + case HYPER_DMABUF_OPS_VUNMAP: + if (list_empty(&exported->va_vmapped->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + if (!va_vmapl || va_vmapl->vaddr == NULL) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + return -EFAULT; + } + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + + list_del(&va_vmapl->list); + kfree(va_vmapl); + break; + + default: + /* program should not get here */ + break; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h new file mode 100644 index 000000000000..366389287f4e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h @@ -0,0 +1,30 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__ +#define __HYPER_DMABUF_REMOTE_SYNC_H__ + +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops); + +#endif // __HYPER_DMABUF_REMOTE_SYNC_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c new file mode 100644 index 000000000000..c1887d1ad709 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c @@ -0,0 +1,261 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_sgl_proc.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* return total number of pages referenced by a sgt + * for pre-calculation of # of pages behind a given sgt + */ +static int get_num_pgs(struct sg_table *sgt) +{ + struct scatterlist *sgl; + int length, i; + /* at least one page */ + int num_pages = 1; + + sgl = sgt->sgl; + + length = sgl->length - PAGE_SIZE + sgl->offset; + + /* round-up */ + num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); + + for (i = 1; i < sgt->nents; i++) { + sgl = sg_next(sgl); + + /* round-up */ + num_pages += ((sgl->length + PAGE_SIZE - 1) / + PAGE_SIZE); /* round-up */ + } + + return num_pages; +} + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt) +{ + struct pages_info *pg_info; + int i, j, k; + int length; + struct scatterlist *sgl; + + pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL); + if (!pg_info) + return NULL; + + pg_info->pgs = kmalloc_array(get_num_pgs(sgt), + sizeof(struct page *), + GFP_KERNEL); + + if (!pg_info->pgs) { + kfree(pg_info); + return NULL; + } + + sgl = sgt->sgl; + + pg_info->nents = 1; + pg_info->frst_ofst = sgl->offset; + pg_info->pgs[0] = sg_page(sgl); + length = sgl->length - PAGE_SIZE + sgl->offset; + i = 1; + + while (length > 0) { + pg_info->pgs[i] = nth_page(sg_page(sgl), i); + length -= PAGE_SIZE; + pg_info->nents++; + i++; + } + + for (j = 1; j < sgt->nents; j++) { + sgl = sg_next(sgl); + pg_info->pgs[i++] = sg_page(sgl); + length = sgl->length - PAGE_SIZE; + pg_info->nents++; + k = 1; + + while (length > 0) { + pg_info->pgs[i++] = nth_page(sg_page(sgl), k++); + length -= PAGE_SIZE; + pg_info->nents++; + } + } + + /* + * lenght at that point will be 0 or negative, + * so to calculate last page size just add it to PAGE_SIZE + */ + pg_info->last_len = PAGE_SIZE + length; + + return pg_info; +} + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents) +{ + struct sg_table *sgt; + struct scatterlist *sgl; + int i, ret; + + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) + return NULL; + + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } + + return NULL; + } + + sgl = sgt->sgl; + + sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst); + + for (i = 1; i < nents-1; i++) { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], PAGE_SIZE, 0); + } + + if (nents > 1) /* more than one page */ { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], last_len, 0); + } + + return sgt; +} + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force) +{ + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + + if (!exported) { + dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n"); + return -EINVAL; + } + + /* if force != 1, sgt_info can be released only if + * there's no activity on exported dma-buf on importer + * side. + */ + if (!force && + exported->active) { + dev_warn(hy_drv_priv->dev, + "dma-buf is used by importer\n"); + + return -EPERM; + } + + /* force == 1 is not recommended */ + while (!list_empty(&exported->va_kmapped->list)) { + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + + dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr); + list_del(&va_kmapl->list); + kfree(va_kmapl); + } + + while (!list_empty(&exported->va_vmapped->list)) { + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + list_del(&va_vmapl->list); + kfree(va_vmapl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + } + + /* Start cleanup of buffer in reverse order to exporting */ + bknd_ops->unshare_pages(&exported->refs_info, exported->nents); + + /* unmap dma-buf */ + dma_buf_unmap_attachment(exported->active_attached->attach, + exported->active_sgts->sgt, + DMA_BIDIRECTIONAL); + + /* detatch dma-buf */ + dma_buf_detach(exported->dma_buf, exported->active_attached->attach); + + /* close connection to dma-buf completely */ + dma_buf_put(exported->dma_buf); + exported->dma_buf = NULL; + + kfree(exported->active_sgts); + kfree(exported->active_attached); + kfree(exported->va_kmapped); + kfree(exported->va_vmapped); + kfree(exported->priv); + + exported->active_sgts = NULL; + exported->active_attached = NULL; + exported->va_kmapped = NULL; + exported->va_vmapped = NULL; + exported->priv = NULL; + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h new file mode 100644 index 000000000000..869d98204e03 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IMP_H__ +#define __HYPER_DMABUF_IMP_H__ + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt); + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents); + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force); + +void hyper_dmabuf_free_sgt(struct sg_table *sgt); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h new file mode 100644 index 000000000000..a11f804edfb3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h @@ -0,0 +1,141 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_STRUCT_H__ +#define __HYPER_DMABUF_STRUCT_H__ + +/* stack of mapped sgts */ +struct sgt_list { + struct sg_table *sgt; + struct list_head list; +}; + +/* stack of attachments */ +struct attachment_list { + struct dma_buf_attachment *attach; + struct list_head list; +}; + +/* stack of vaddr mapped via kmap */ +struct kmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* stack of vaddr mapped via vmap */ +struct vmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* Exporter builds pages_info before sharing pages */ +struct pages_info { + int frst_ofst; + int last_len; + int nents; + struct page **pgs; +}; + + +/* Exporter stores references to sgt in a hash table + * Exporter keeps these references for synchronization + * and tracking purposes + */ +struct exported_sgt_info { + hyper_dmabuf_id_t hid; + + /* VM ID of importer */ + int rdomid; + + struct dma_buf *dma_buf; + int nents; + + /* list for tracking activities on dma_buf */ + struct sgt_list *active_sgts; + struct attachment_list *active_attached; + struct kmap_vaddr_list *va_kmapped; + struct vmap_vaddr_list *va_vmapped; + + /* set to 0 when unexported. Importer doesn't + * do a new mapping of buffer if valid == false + */ + bool valid; + + /* active == true if the buffer is actively used + * (mapped) by importer + */ + int active; + + /* hypervisor specific reference data for shared pages */ + void *refs_info; + + struct delayed_work unexport; + bool unexport_sched; + + /* list for file pointers associated with all user space + * application that have exported this same buffer to + * another VM. This needs to be tracked to know whether + * the buffer can be completely freed. + */ + struct file *filp; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +/* imported_sgt_info contains information about imported DMA_BUF + * this info is kept in IMPORT list and asynchorously retrieved and + * used to map DMA_BUF on importer VM's side upon export fd ioctl + * request from user-space + */ + +struct imported_sgt_info { + hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */ + + /* hypervisor-specific handle to pages */ + int ref_handle; + + /* offset and size info of DMA_BUF */ + int frst_ofst; + int last_len; + int nents; + + struct dma_buf *dma_buf; + struct sg_table *sgt; + + void *refs_info; + bool valid; + int importers; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +#endif /* __HYPER_DMABUF_STRUCT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c new file mode 100644 index 000000000000..d298069159a9 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c @@ -0,0 +1,505 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to frontent definition + */ +enum virio_queue_type { + HDMA_VIRTIO_RX_QUEUE = 0, + HDMA_VIRTIO_TX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +/* Data required for sending TX messages using virtqueues*/ +struct virtio_be_tx_data { + struct iovec tx_iov; + uint16_t tx_idx; +}; + +struct virtio_be_priv { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[HDMA_VIRTIO_QUEUE_MAX]; + bool busy; + struct hyper_dmabuf_req *pending_tx_req; + struct virtio_comm_ring tx_ring; + struct mutex lock; +}; + + +/* + * Received response to TX request, + * or empty buffer to be used for TX requests in future + */ +static void virtio_be_handle_tx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct virtio_be_priv *priv = fe_info->priv; + /* Fill last used buffer with received buffer details */ + struct virtio_be_tx_data *tx_data = + (struct virtio_be_tx_data *) + virtio_comm_ring_pop(&priv->tx_ring); + + virtio_vq_getchain(vq, &tx_data->tx_idx, &tx_data->tx_iov, 1, NULL); + + /* Copy response if request was synchronous */ + if (priv->busy) { + memcpy(priv->pending_tx_req, + tx_data->tx_iov.iov_base, + tx_data->tx_iov.iov_len); + priv->busy = false; + } +} + +/* + * Received request from frontend + */ +static void virtio_be_handle_rx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct iovec iov; + uint16_t idx; + struct hyper_dmabuf_req *req = NULL; + int len; + int ret; + + /* Make sure we will process all pending requests */ + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + if (iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + /* HACK: if int size buffer was provided, + * treat that as request to get frontend vmid */ + if (iov.iov_len == sizeof(int)) { + *((int *)iov.iov_base) = fe_info->vmid; + len = iov.iov_len; + } else { + len = 0; + dev_warn(hy_drv_priv->dev, + "received request with wrong size"); + dev_warn(hy_drv_priv->dev, + "%zu != %zu\n", + iov.iov_len, + sizeof(struct hyper_dmabuf_req)); + } + + virtio_vq_relchain(vq, idx, len); + continue; + } + + req = (struct hyper_dmabuf_req *)iov.iov_base; + + ret = hyper_dmabuf_msg_parse(1, req); + + len = iov.iov_len; + + virtio_vq_relchain(vq, idx, len); + } + virtio_vq_endchains(vq, 1); +} + +/* + * Check in what virtqueue we received buffer and process it accordingly. + */ +static void virtio_be_handle_vq_kick( + int vq_idx, struct virtio_fe_info *fe_info) +{ + struct virtio_vq_info *vq; + + vq = &fe_info->priv->vqs[vq_idx]; + + if (vq_idx == HDMA_VIRTIO_RX_QUEUE) + virtio_be_handle_rx_kick(vq, fe_info); + else + virtio_be_handle_tx_kick(vq, fe_info); +} + +/* + * Received new buffer in virtqueue + */ +static int virtio_be_handle_kick(int client_id, unsigned long *ioreqs_map) +{ + int val = -1; + struct vhm_request *req; + struct virtio_fe_info *fe_info; + int vcpu; + + fe_info = virtio_fe_find(client_id); + if (fe_info == NULL) { + dev_warn(hy_drv_priv->dev, "Client %d not found\n", client_id); + return -EINVAL; + } + + while (1) { + vcpu = find_first_bit(ioreqs_map, fe_info->max_vcpu); + if (vcpu == fe_info->max_vcpu) + break; + req = &fe_info->req_buf[vcpu]; + if (atomic_read(&req->processed) == REQ_STATE_PROCESSING && + req->client == fe_info->client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) + req->reqs.pio_request.value = 0; + else + val = req->reqs.pio_request.value; + + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + acrn_ioreq_complete_request(fe_info->client_id, vcpu); + } + } + + if (val >= 0) + virtio_be_handle_vq_kick(val, fe_info); + + return 0; +} + +/* + * New frontend is connecting to backend. + * Creates virtqueues for it and registers internally. + */ +static int virtio_be_register_vhm_client(struct virtio_dev_info *d) +{ + unsigned int vmid; + struct vm_info info; + struct virtio_fe_info *fe_info; + int ret; + + fe_info = kcalloc(1, sizeof(*fe_info), GFP_KERNEL); + if (fe_info == NULL) + return -ENOMEM; + + fe_info->priv = + container_of(d, struct virtio_be_priv, dev); + vmid = d->_ctx.vmid; + fe_info->vmid = vmid; + + dev_dbg(hy_drv_priv->dev, + "Virtio frontend from vm %d connected\n", vmid); + + fe_info->client_id = + acrn_ioreq_create_client(vmid, + virtio_be_handle_kick, + "hyper dmabuf kick"); + if (fe_info->client_id < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create client of ACRN ioreq\n"); + goto err; + } + + ret = acrn_ioreq_add_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to add iorange to acrn ioreq\n"); + goto err; + } + + ret = vhm_get_vm_info(vmid, &info); + if (ret < 0) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len); + + dev_err(hy_drv_priv->dev, "Failed in vhm_get_vm_info\n"); + goto err; + } + + fe_info->max_vcpu = info.max_vcpu; + + fe_info->req_buf = acrn_ioreq_get_reqbuf(fe_info->client_id); + if (fe_info->req_buf == NULL) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len); + + dev_err(hy_drv_priv->dev, "Failed in acrn_ioreq_get_reqbuf\n"); + goto err; + } + + acrn_ioreq_attach_client(fe_info->client_id, 0); + + virtio_fe_add(fe_info); + + return 0; + +err: + acrn_ioreq_destroy_client(fe_info->client_id); + kfree(fe_info); + + return -EINVAL; +} + +/* + * DM is opening our VBS interface to create new frontend instance. + */ +static int vbs_k_open(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + priv = kcalloc(1, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + vqs = &priv->vqs[0]; + + dev = &priv->dev; + + for (i = 0; i < HDMA_VIRTIO_QUEUE_MAX; i++) { + vqs[i].dev = dev; + vqs[i].vq_notify = NULL; + } + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, HDMA_VIRTIO_QUEUE_MAX); + + priv->pending_tx_req = + kcalloc(1, sizeof(struct hyper_dmabuf_req), GFP_KERNEL); + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct virtio_be_tx_data), + REQ_RING_SIZE); + + mutex_init(&priv->lock); + + f->private_data = priv; + + return 0; +} + +static int vbs_k_release(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + int i; + +// virtio_dev_stop(&priv->dev); +// virtio_dev_cleanup(&priv->dev, false); + + for (i = 0; i < HDMA_VIRTIO_QUEUE_MAX; i++) + virtio_vq_reset(&priv->vqs[i]); + + kfree(priv->pending_tx_req); + virtio_comm_ring_free(&priv->tx_ring); + kfree(priv); + return 0; +} + +static long vbs_k_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + void __user *argp = (void __user *)arg; + int r; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No backend private data\n"); + + return -EINVAL; + } + + if (ioctl == VBS_SET_VQ) { + /* Overridden to call additionally + * virtio_be_register_vhm_client */ + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + return -EFAULT; + + if (virtio_be_register_vhm_client(&priv->dev) < 0) + return -EFAULT; + } else { + r = virtio_dev_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + } + + return r; +} + +static const struct file_operations vbs_hyper_dmabuf_fops = { + .owner = THIS_MODULE, + .open = vbs_k_open, + .release = vbs_k_release, + .unlocked_ioctl = vbs_k_ioctl, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_hyper_dmabuf_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_hyper_dmabuf", + .fops = &vbs_hyper_dmabuf_fops, +}; + +static int virtio_be_register(void) +{ + return misc_register(&vbs_hyper_dmabuf_misc); +} + +static void virtio_be_unregister(void) +{ + misc_deregister(&vbs_hyper_dmabuf_misc); +} + +/* + * ACRN SOS will always has vmid 0 + * TODO: check if that always will be true + */ +static int virtio_be_get_vmid(void) +{ + return 0; +} + +static int virtio_be_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + int timeout = 1000; + struct virtio_fe_info *fe_info; + struct virtio_be_priv *priv; + struct virtio_be_tx_data *tx_data; + struct virtio_vq_info *vq; + int len; + + fe_info = virtio_fe_find_by_vmid(vmid); + + if (fe_info == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend registered for vmid %d\n", vmid); + return -ENOENT; + } + + priv = fe_info->priv; + + mutex_lock(&priv->lock); + + /* Check if we have any free buffers for sending new request */ + while (virtio_comm_ring_full(&priv->tx_ring) && + timeout--) { + usleep_range(100, 120); + } + + if (timeout <= 0) { + dev_warn(hy_drv_priv->dev, "Requests ring full\n"); + return -EBUSY; + } + + /* Get free buffer for sending request from ring */ + tx_data = (struct virtio_be_tx_data *) + virtio_comm_ring_push(&priv->tx_ring); + + vq = &priv->vqs[HDMA_VIRTIO_TX_QUEUE]; + + if (tx_data->tx_iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + dev_warn(hy_drv_priv->dev, + "received request with wrong size\n"); + virtio_vq_relchain(vq, tx_data->tx_idx, 0); + mutex_unlock(&priv->lock); + return -EINVAL; + } + + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* Copy request data to virtqueue buffer */ + memcpy(tx_data->tx_iov.iov_base, req, sizeof(*req)); + len = tx_data->tx_iov.iov_len; + + /* update req_pending with current request */ + if (wait) { + priv->busy = true; + memcpy(priv->pending_tx_req, req, sizeof(*req)); + } + + virtio_vq_relchain(vq, tx_data->tx_idx, len); + + virtio_vq_endchains(vq, 1); + + if (wait) { + while (timeout--) { + if (priv->pending_tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&priv->lock); + dev_err(hy_drv_priv->dev, "request timed-out\n"); + return -EBUSY; + } + } + + mutex_unlock(&priv->lock); + return 0; +}; + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_be_register, + .cleanup = virtio_be_unregister, + .get_vm_id = virtio_be_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, + .send_req = virtio_be_send_req, +}; + + +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c new file mode 100644 index 000000000000..d73bcbcc8e87 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c @@ -0,0 +1,89 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_virtio_comm_ring.h" + +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries) +{ + ring->data = kcalloc(num_entries, entry_size, GFP_KERNEL); + + if (!ring->data) + return -ENOMEM; + + ring->head = 0; + ring->tail = 0; + ring->used = 0; + ring->num_entries = num_entries; + ring->entry_size = entry_size; + + return 0; +} + +void virtio_comm_ring_free(struct virtio_comm_ring *ring) +{ + kfree(ring->data); + ring->data = NULL; +} + +bool virtio_comm_ring_full(struct virtio_comm_ring *ring) +{ + if (ring->used == ring->num_entries) + return true; + + return false; +} + +void *virtio_comm_ring_push(struct virtio_comm_ring *ring) +{ + int old_head; + + if (virtio_comm_ring_full(ring)) + return NULL; + + old_head = ring->head; + + ring->head++; + ring->head %= ring->num_entries; + ring->used++; + + return ring->data + (ring->entry_size * old_head); +} + +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring) +{ + int old_tail = ring->tail; + + ring->tail++; + ring->tail %= ring->num_entries; + ring->used--; + + return ring->data + (ring->entry_size * old_tail); +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h new file mode 100644 index 000000000000..a95a63af2ba0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMM_RING_H__ +#define __HYPER_DMABUF_VIRTIO_COMM_RING_H__ + +/* Generic ring buffer */ +struct virtio_comm_ring { + /* Buffer allocated for keeping ring entries */ + void *data; + + /* Index pointing to next free element in ring */ + int head; + + /* Index pointing to last released element in ring */ + int tail; + + /* Total number of elements that ring can contain */ + int num_entries; + + /* Size of single ring element in bytes */ + int entry_size; + + /* Number of currently used elements */ + int used; +}; + +/* Initializes given ring for keeping given a + * number of entries of specific size */ +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries); + +/* Frees buffer used for storing ring entries */ +void virtio_comm_ring_free(struct virtio_comm_ring *ring); + +/* Checks if ring is full */ +bool virtio_comm_ring_full(struct virtio_comm_ring *ring); + +/* Gets next free element from ring and marks it as used + * or NULL if ring is full */ +void *virtio_comm_ring_push(struct virtio_comm_ring *ring); + +/* Pops oldest element from ring and marks it as free */ +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring); + +#endif /* __HYPER_DMABUF_VIRTIO_COMM_RING_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c new file mode 100644 index 000000000000..05be74358a74 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c @@ -0,0 +1,35 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include "hyper_dmabuf_virtio_common.h" + +int hyper_dmabuf_virtio_get_next_req_id(void) +{ + static int req_id; + + return req_id++; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h new file mode 100644 index 000000000000..24a652ef54c0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h @@ -0,0 +1,55 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMMON_H__ +#define __HYPER_DMABUF_VIRTIO_COMMON_H__ + +/* + * ACRN uses physicall addresses for memory sharing, + * so size of one page ref will be 64-bits + */ + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64)) + +/* Defines size of requests circular buffer */ +#define REQ_RING_SIZE 128 + +extern struct hyper_dmabuf_bknd_ops virtio_bknd_ops; +struct virtio_be_priv; +struct vhm_request; + +/* Entry describing each connected frontend */ +struct virtio_fe_info { + struct virtio_be_priv *priv; + int client_id; + int vmid; + int max_vcpu; + struct vhm_request *req_buf; +}; + +extern struct hyper_dmabuf_private hyper_dmabuf_private; + +int hyper_dmabuf_virtio_get_next_req_id(void); + +#endif /* __HYPER_DMABUF_VIRTIO_COMMON_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c new file mode 100644 index 000000000000..9ae290435d70 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c @@ -0,0 +1,385 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to backend definition + */ +enum virio_queue_type { + HDMA_VIRTIO_TX_QUEUE = 0, + HDMA_VIRTIO_RX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +struct virtio_hdma_fe_priv { + struct virtqueue *vqs[HDMA_VIRTIO_QUEUE_MAX]; + struct virtio_comm_ring tx_ring; + struct virtio_comm_ring rx_ring; + int vmid; +}; + +/* Assuming there will be one FE instance per VM */ +static struct virtio_hdma_fe_priv *hyper_dmabuf_virtio_fe; + +/* + * Received response for request. + * No need for copying request with updated result, + * as backend is processing original request data directly. + */ +static void virtio_hdma_fe_tx_done(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + int len; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + /* Make sure that all pending responses are processed */ + while (virtqueue_get_buf(vq, &len)) { + if (len == sizeof(struct hyper_dmabuf_req)) { + /* Mark that response was received + * and buffer can be reused */ + virtio_comm_ring_pop(&priv->tx_ring); + } + } +} + +/* + * Sends given data buffer via given virtqueue. + */ +static void virtio_hdma_fe_queue_buffer(struct virtio_hdma_fe_priv *priv, + unsigned int queue_nr, + void *buf, size_t size) +{ + struct scatterlist sg; + + if (queue_nr >= HDMA_VIRTIO_QUEUE_MAX) { + dev_dbg(hy_drv_priv->dev, + "queue_nr exceeding max queue number\n"); + return; + } + + sg_init_one(&sg, buf, size); + + virtqueue_add_inbuf(priv->vqs[queue_nr], &sg, 1, buf, GFP_KERNEL); + + virtqueue_kick(priv->vqs[queue_nr]); +} + +/* + * Handle requests coming from other VMs + */ +static void virtio_hdma_fe_handle_rx(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + struct hyper_dmabuf_req *rx_req; + int size, ret; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + /* Make sure all pending requests will be processed */ + while (virtqueue_get_buf(vq, &size)) { + + /* Get next request from ring */ + rx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_pop(&priv->rx_ring); + + if (size != sizeof(struct hyper_dmabuf_req)) { + dev_dbg(hy_drv_priv->dev, + "Received malformed request\n"); + } else { + ret = hyper_dmabuf_msg_parse(1, rx_req); + } + + /* Send updated request back to virtqueue as a response.*/ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +static int virtio_hdma_fe_probe_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv; + vq_callback_t *callbacks[] = {virtio_hdma_fe_tx_done, + virtio_hdma_fe_handle_rx}; + static const char *names[] = {"txqueue", "rxqueue"}; + int ret; + + priv = kzalloc(sizeof(struct virtio_hdma_fe_priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + virtio_comm_ring_init(&priv->rx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + + /* Set vmid to -1 to mark that it is not initialized yet */ + priv->vmid = -1; + + vdev->priv = priv; + + ret = virtio_find_vqs(vdev, HDMA_VIRTIO_QUEUE_MAX, + priv->vqs, callbacks, names, NULL); + if (ret) + goto err; + + hyper_dmabuf_virtio_fe = priv; + + return 0; +err: + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + return ret; +} + +static void virtio_hdma_fe_remove_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + hyper_dmabuf_virtio_fe = NULL; +} + +static int virtio_hdma_fe_probe(struct virtio_device *vdev) +{ + return virtio_hdma_fe_probe_common(vdev); +} + +static void virtio_hdma_fe_remove(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); +} + +/* + * Queues empty requests buffers to backend, + * which will be used by it to send requests back to frontend. + */ +static void virtio_hdma_fe_scan(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + struct hyper_dmabuf_req *rx_req; + int timeout = 1000; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + /* Send request to query vmid, in ACRN guest instances don't + * know their ids, but host does. Here a small hack is used, + * and buffer of int size is sent to backend, in that case + * backend will fill it with vmid of instance that sent that request + */ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_TX_QUEUE, + &priv->vmid, sizeof(priv->vmid)); + + while (timeout--) { + if (priv->vmid > 0) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + dev_err(hy_drv_priv->dev, + "Cannot query vmid\n"); + + while (!virtio_comm_ring_full(&priv->rx_ring)) { + rx_req = virtio_comm_ring_push(&priv->rx_ring); + + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_hdma_fe_freeze(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); + return 0; +} + +static int virtio_hdma_fe_restore(struct virtio_device *vdev) +{ + return virtio_hdma_fe_probe_common(vdev); +} +#endif + + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_HYPERDMABUF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_hdma_fe_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_hdma_fe_probe, + .remove = virtio_hdma_fe_remove, + .scan = virtio_hdma_fe_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_hdma_fe_freeze, + .restore = virtio_hdma_fe_restore, +#endif +}; + +int virtio_hdma_fe_register(void) +{ + return register_virtio_driver(&virtio_hdma_fe_driver); +} + +void virtio_hdma_fe_unregister(void) +{ + unregister_virtio_driver(&virtio_hdma_fe_driver); +} + +static int virtio_hdma_fe_get_vmid(void) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + + if (hyper_dmabuf_virtio_fe == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -1; + } + + return priv->vmid; +} + +static int virtio_hdma_fe_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + struct hyper_dmabuf_req *tx_req; + int timeout = 1000; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -ENOENT; + } + + /* Check if there are any free buffers in ring */ + while (timeout--) { + if (!virtio_comm_ring_full(&priv->tx_ring)) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + dev_err(hy_drv_priv->dev, + "Timedout while waiting for free request buffers\n"); + return -EBUSY; + } + + /* Get free buffer for sending request from ring */ + tx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_push(&priv->tx_ring); + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* copy request to buffer that will be used in virtqueue */ + memcpy(tx_req, req, sizeof(*req)); + + virtio_hdma_fe_queue_buffer(hyper_dmabuf_virtio_fe, + HDMA_VIRTIO_TX_QUEUE, + tx_req, sizeof(*tx_req)); + + if (wait) { + while (timeout--) { + if (tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + return -EBUSY; + } + + return 0; +} + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_hdma_fe_register, + .cleanup = virtio_hdma_fe_unregister, + .get_vm_id = virtio_hdma_fe_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .send_req = virtio_hdma_fe_send_req, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, +}; + + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c new file mode 100644 index 000000000000..79b30e286b5e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c @@ -0,0 +1,99 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" + +DECLARE_HASHTABLE(virtio_fe_hash, MAX_ENTRY_FE); + +void virtio_fe_table_init(void) +{ + hash_init(virtio_fe_hash); +} + +int virtio_fe_add(struct virtio_fe_info *fe_info) +{ + struct virtio_fe_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = fe_info; + + hash_add(virtio_fe_hash, &info_entry->node, + info_entry->info->client_id); + + return 0; +} + +struct virtio_fe_info *virtio_fe_find(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) + return info_entry->info; + + return NULL; +} + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->vmid == vmid) + return info_entry->info; + + return NULL; +} + +int virtio_fe_remove(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h new file mode 100644 index 000000000000..bc7ef843161c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h @@ -0,0 +1,48 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_FE_LIST_H__ +#define __HYPER_DMABUF_VIRTIO_FE_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_FE 7 + +struct virtio_fe_info; + +struct virtio_fe_info_entry { + struct virtio_fe_info *info; + struct hlist_node node; +}; + +void virtio_fe_table_init(void); + +int virtio_fe_add(struct virtio_fe_info *fe_info); + +int virtio_remove_fe(int client_id); + +struct virtio_fe_info *virtio_fe_find(int client_id); + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid); + +#endif /* __HYPER_DMABUF_VIRTIO_FE_LIST_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c new file mode 100644 index 000000000000..be5141c25191 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c @@ -0,0 +1,343 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +#include +#endif +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_common.h" + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 **lvl2_table; + u64 lvl3_gref; + struct page **data_pages; + int n_lvl2_refs; + int nents_last; + int vmid; +}; +#else +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 *lvl2_table; + u64 lvl3_gref; +}; +#endif + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +static int virtio_be_share_pages(struct page **pages, + int vmid, + int nents, + void **refs_info) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static int virtio_be_unshare_pages(void **refs_info, + int nents) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static struct page **virtio_be_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + u64 *lvl3_table = NULL; + u64 **lvl2_table = NULL; + struct page **data_pages = NULL; + struct virtio_shared_pages_info *sh_pages_info = NULL; + void *pageaddr; + + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_refs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + if (sh_pages_info == NULL) + goto map_failed; + + *refs_info = (void *) sh_pages_info; + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + if (data_pages == NULL) + goto map_failed; + + lvl2_table = kcalloc(n_lvl2_refs, sizeof(u64 *), GFP_KERNEL); + if (lvl2_table == NULL) + goto map_failed; + + lvl3_table = (u64 *)map_guest_phys(vmid, lvl3_gref, PAGE_SIZE); + if (lvl3_table == NULL) + goto map_failed; + + for (i = 0; i < n_lvl2_refs; i++) { + lvl2_table[i] = (u64 *)map_guest_phys(vmid, + lvl3_table[i], + PAGE_SIZE); + if (lvl2_table[i] == NULL) + goto map_failed; + } + + k = 0; + for (i = 0; i < n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + } + + for (j = 0; j < nents_last; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl3_gref = lvl3_gref; + sh_pages_info->n_lvl2_refs = n_lvl2_refs; + sh_pages_info->nents_last = nents_last; + sh_pages_info->data_pages = data_pages; + sh_pages_info->vmid = vmid; + + return data_pages; + +map_failed: + dev_err(hy_drv_priv->dev, + "Cannot map guest memory\n"); + + kfree(lvl2_table); + kfree(data_pages); + kfree(sh_pages_info); + + return NULL; +} + +/* + * TODO: In theory pages don't need to be unmaped, + * as ACRN is just translating memory addresses, + * but not sure if that will work the same way in future + */ +static int virtio_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int vmid; + int i, j; + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "Imported pages already cleaned up"); + dev_warn(hy_drv_priv->dev, + "or buffer was not imported yet\n"); + return 0; + } + vmid = sh_pages_info->vmid; + + for (i = 0; i < sh_pages_info->n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) + unmap_guest_phys(vmid, + sh_pages_info->lvl2_table[i][j]); + } + + for (j = 0; j < sh_pages_info->nents_last; j++) + unmap_guest_phys(vmid, sh_pages_info->lvl2_table[i][j]); + + for (i = 0; i < sh_pages_info->n_lvl2_refs; i++) + unmap_guest_phys(vmid, sh_pages_info->lvl3_table[i]); + + unmap_guest_phys(vmid, sh_pages_info->lvl3_gref); + + kfree(sh_pages_info->lvl2_table); + kfree(sh_pages_info->data_pages); + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + return 0; +} +#else +static int virtio_fe_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + struct virtio_shared_pages_info *sh_pages_info; + u64 lvl3_gref; + u64 *lvl2_table; + u64 *lvl3_table; + int i; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + lvl3_table = (u64 *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (u64 *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (sh_pages_info == NULL) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* Share physical address of pages */ + for (i = 0; i < nents; i++) + lvl2_table[i] = page_to_phys(pages[i]); + + for (i = 0; i < n_lvl2_grefs; i++) + lvl3_table[i] = + virt_to_phys((void *)lvl2_table + i * PAGE_SIZE); + + lvl3_gref = virt_to_phys(lvl3_table); + + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_gref = lvl3_gref; + + return lvl3_gref; +} + +static int virtio_fe_unshare_pages(void **refs_info, + int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info == NULL) { + dev_err(hy_drv_priv->dev, + "No pages info\n"); + return -EINVAL; + } + + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + kfree(sh_pages_info); + + return 0; +} + +static struct page **virtio_fe_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return NULL; +} + +static int virtio_fe_unmap_shared_pages(void **refs_info, int nents) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return -EINVAL; +} + +#endif + +int virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_share_pages(pages, domid, nents, refs_info); +#else + ret = virtio_fe_share_pages(pages, domid, nents, refs_info); +#endif + return ret; +} + +int virtio_unshare_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unshare_pages(refs_info, nents); +#else + ret = virtio_fe_unshare_pages(refs_info, nents); +#endif + return ret; +} + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + struct page **ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#else + ret = virtio_fe_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#endif + return ret; +} + +int virtio_unmap_shared_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unmap_shared_pages(refs_info, nents); +#else + ret = virtio_fe_unmap_shared_pages(refs_info, nents); +#endif + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h new file mode 100644 index 000000000000..05cbf5779f86 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h @@ -0,0 +1,40 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_SHM_H__ +#define __HYPER_DMABUF_VIRTIO_SHM_H__ + +int virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info); + +int virtio_unshare_pages(void **refs_info, int nents); + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info); + +int virtio_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_VIRTIO_SHM_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c new file mode 100644 index 000000000000..3dd49db66e31 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c @@ -0,0 +1,951 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" +#include "../hyper_dmabuf_drv.h" + +static int export_req_id; + +struct hyper_dmabuf_req req_pending = {0}; + +static void xen_get_domid_delayed(struct work_struct *unused); +static void xen_init_comm_env_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed); +static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed); + +/* Creates entry in xen store that will keep details of all + * exporter rings created by this domain + */ +static int xen_comm_setup_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_mkdir(XBT_NIL, buf, ""); +} + +/* Removes entry from xenstore with exporter ring details. + * Other domains that has connected to any of exporter rings + * created by this domain, will be notified about removal of + * this entry and will treat that as signal to cleanup importer + * rings created for this domain + */ +static int xen_comm_destroy_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_rm(XBT_NIL, buf, ""); +} + +/* Adds xenstore entries with details of exporter ring created + * for given remote domain. It requires special daemon running + * in dom0 to make sure that given remote domain will have right + * permissions to access that data. + */ +static int xen_comm_expose_ring_details(int domid, int rdomid, + int gref, int port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + domid, rdomid); + + ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + return 0; +} + +/* + * Queries details of ring exposed by remote domain. + */ +static int xen_comm_get_ring_details(int domid, int rdomid, + int *grefid, int *port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + rdomid, domid); + + ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + return 0; +} + +static void xen_get_domid_delayed(struct work_struct *unused) +{ + struct xenbus_transaction xbt; + int domid, ret; + + /* scheduling another if driver is still running + * and xenstore has not been initialized + */ + if (likely(xenstored_ready == 0)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore is not ready yet. Will retry in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500)); + } else { + xenbus_transaction_start(&xbt); + + ret = xenbus_scanf(xbt, "domid", "", "%d", &domid); + + if (ret <= 0) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + /* try again since -1 is an invalid id for domain + * (but only if driver is still running) + */ + if (unlikely(domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "domid==-1 is invalid. Will retry it in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, + msecs_to_jiffies(500)); + } else { + dev_info(hy_drv_priv->dev, + "Successfully retrieved domid from Xenstore:%d\n", + domid); + hy_drv_priv->domid = domid; + } + } +} + +int xen_be_get_domid(void) +{ + struct xenbus_transaction xbt; + int domid; + + if (unlikely(xenstored_ready == 0)) { + xen_get_domid_delayed(NULL); + return -1; + } + + xenbus_transaction_start(&xbt); + + if (!xenbus_scanf(xbt, "domid", "", "%d", &domid)) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + return domid; +} + +static int xen_comm_next_req_id(void) +{ + export_req_id++; + return export_req_id; +} + +/* For now cache latast rings as global variables TODO: keep them in list*/ +static irqreturn_t front_ring_isr(int irq, void *info); +static irqreturn_t back_ring_isr(int irq, void *info); + +/* Callback function that will be called on any change of xenbus path + * being watched. Used for detecting creation/destruction of remote + * domain exporter ring. + * + * When remote domain's exporter ring will be detected, importer ring + * on this domain will be created. + * + * When remote domain's exporter ring destruction will be detected it + * will celanup this domain importer ring. + * + * Destruction can be caused by unloading module by remote domain or + * it's crash/force shutdown. + */ +static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch, + const char *path, const char *token) +{ + int rdom, ret; + uint32_t grefid, port; + struct xen_comm_rx_ring_info *ring_info; + + /* Check which domain has changed its exporter rings */ + ret = sscanf(watch->node, "/local/domain/%d/", &rdom); + if (ret <= 0) + return; + + /* Check if we have importer ring for given remote domain already + * created + */ + ring_info = xen_comm_find_rx_ring(rdom); + + /* Try to query remote domain exporter ring details - if + * that will fail and we have importer ring that means remote + * domains has cleanup its exporter ring, so our importer ring + * is no longer useful. + * + * If querying details will succeed and we don't have importer ring, + * it means that remote domain has setup it for us and we should + * connect to it. + */ + + ret = xen_comm_get_ring_details(xen_be_get_domid(), + rdom, &grefid, &port); + + if (ring_info && ret != 0) { + dev_info(hy_drv_priv->dev, + "Remote exporter closed, cleaninup importer\n"); + xen_be_cleanup_rx_rbuf(rdom); + } else if (!ring_info && ret == 0) { + dev_info(hy_drv_priv->dev, + "Registering importer\n"); + xen_be_init_rx_rbuf(rdom); + } +} + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_sring *sring; + struct evtchn_alloc_unbound alloc_unbound; + struct evtchn_close close; + + void *shared_ring; + int ret; + + /* check if there's any existing tx channel in the table */ + ring_info = xen_comm_find_tx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n", + ring_info->rdomain, ring_info->gref_ring, ring_info->port); + return 0; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + /* from exporter to importer */ + shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1); + if (shared_ring == 0) { + kfree(ring_info); + return -ENOMEM; + } + + sring = (struct xen_comm_sring *) shared_ring; + + SHARED_RING_INIT(sring); + + FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE); + + ring_info->gref_ring = gnttab_grant_foreign_access(domid, + virt_to_mfn(shared_ring), + 0); + if (ring_info->gref_ring < 0) { + /* fail to get gref */ + kfree(ring_info); + return -EFAULT; + } + + alloc_unbound.dom = DOMID_SELF; + alloc_unbound.remote_dom = domid; + ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, + &alloc_unbound); + if (ret) { + dev_err(hy_drv_priv->dev, + "Cannot allocate event channel\n"); + kfree(ring_info); + return -EIO; + } + + /* setting up interrupt */ + ret = bind_evtchn_to_irqhandler(alloc_unbound.port, + front_ring_isr, 0, + NULL, (void *) ring_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to setup event channel\n"); + close.port = alloc_unbound.port; + HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); + gnttab_end_foreign_access(ring_info->gref_ring, 0, + virt_to_mfn(shared_ring)); + kfree(ring_info); + return -EIO; + } + + ring_info->rdomain = domid; + ring_info->irq = ret; + ring_info->port = alloc_unbound.port; + + mutex_init(&ring_info->lock); + + dev_dbg(hy_drv_priv->dev, + "%s: allocated eventchannel gref %d port: %d irq: %d\n", + __func__, + ring_info->gref_ring, + ring_info->port, + ring_info->irq); + + ret = xen_comm_add_tx_ring(ring_info); + + if (ret < 0) { + kfree(ring_info); + return -ENOMEM; + } + + ret = xen_comm_expose_ring_details(xen_be_get_domid(), + domid, + ring_info->gref_ring, + ring_info->port); + + /* Register watch for remote domain exporter ring. + * When remote domain will setup its exporter ring, + * we will automatically connect our importer ring to it. + */ + ring_info->watch.callback = remote_dom_exporter_watch_cb; + ring_info->watch.node = kmalloc(255, GFP_KERNEL); + + if (!ring_info->watch.node) { + kfree(ring_info); + return -ENOMEM; + } + + sprintf((char *)ring_info->watch.node, + "/local/domain/%d/data/hyper_dmabuf/%d/port", + domid, xen_be_get_domid()); + + register_xenbus_watch(&ring_info->watch); + + return ret; +} + +/* cleans up exporter ring created for given remote domain */ +void xen_be_cleanup_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_rx_ring_info *rx_ring_info; + + /* check if we at all have exporter ring for given rdomain */ + ring_info = xen_comm_find_tx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_tx_ring(domid); + + unregister_xenbus_watch(&ring_info->watch); + kfree(ring_info->watch.node); + + /* No need to close communication channel, will be done by + * this function + */ + unbind_from_irqhandler(ring_info->irq, (void *) ring_info); + + /* No need to free sring page, will be freed by this function + * when other side will end its access + */ + gnttab_end_foreign_access(ring_info->gref_ring, 0, + (unsigned long) ring_info->ring_front.sring); + + kfree(ring_info); + + rx_ring_info = xen_comm_find_rx_ring(domid); + if (!rx_ring_info) + return; + + BACK_RING_INIT(&(rx_ring_info->ring_back), + rx_ring_info->ring_back.sring, + PAGE_SIZE); +} + +/* importer needs to know about shared page and port numbers for + * ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_sring *sring; + + struct page *shared_ring; + + struct gnttab_map_grant_ref *map_ops; + + int ret; + int rx_gref, rx_port; + + /* check if there's existing rx ring channel */ + ring_info = xen_comm_find_rx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "rx ring ch from domid = %d already exist\n", + ring_info->sdomain); + + return 0; + } + + ret = xen_comm_get_ring_details(xen_be_get_domid(), domid, + &rx_gref, &rx_port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Domain %d has not created exporter ring for current domain\n", + domid); + + return ret; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + ring_info->sdomain = domid; + ring_info->evtchn = rx_port; + + map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL); + + if (!map_ops) { + ret = -ENOMEM; + goto fail_no_map_ops; + } + + if (gnttab_alloc_pages(1, &shared_ring)) { + ret = -ENOMEM; + goto fail_others; + } + + gnttab_set_map_op(&map_ops[0], + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, rx_gref, domid); + + gnttab_set_unmap_op(&ring_info->unmap_op, + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, -1); + + ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1); + if (ret < 0) { + dev_err(hy_drv_priv->dev, "Cannot map ring\n"); + ret = -EFAULT; + goto fail_others; + } + + if (map_ops[0].status) { + dev_err(hy_drv_priv->dev, "Ring mapping failed\n"); + ret = -EFAULT; + goto fail_others; + } else { + ring_info->unmap_op.handle = map_ops[0].handle; + } + + kfree(map_ops); + + sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring)); + + BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE); + + ret = bind_interdomain_evtchn_to_irq(domid, rx_port); + + if (ret < 0) { + ret = -EIO; + goto fail_others; + } + + ring_info->irq = ret; + + dev_dbg(hy_drv_priv->dev, + "%s: bound to eventchannel port: %d irq: %d\n", __func__, + rx_port, + ring_info->irq); + + ret = xen_comm_add_rx_ring(ring_info); + + if (ret < 0) { + ret = -ENOMEM; + goto fail_others; + } + + /* Setup communcation channel in opposite direction */ + if (!xen_comm_find_tx_ring(domid)) + ret = xen_be_init_tx_rbuf(domid); + + ret = request_irq(ring_info->irq, + back_ring_isr, 0, + NULL, (void *)ring_info); + + return ret; + +fail_others: + kfree(map_ops); + +fail_no_map_ops: + kfree(ring_info); + + return ret; +} + +/* clenas up importer ring create for given source domain */ +void xen_be_cleanup_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_tx_ring_info *tx_ring_info; + struct page *shared_ring; + + /* check if we have importer ring created for given sdomain */ + ring_info = xen_comm_find_rx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_rx_ring(domid); + + /* no need to close event channel, will be done by that function */ + unbind_from_irqhandler(ring_info->irq, (void *)ring_info); + + /* unmapping shared ring page */ + shared_ring = virt_to_page(ring_info->ring_back.sring); + gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1); + gnttab_free_pages(1, &shared_ring); + + kfree(ring_info); + + tx_ring_info = xen_comm_find_tx_ring(domid); + if (!tx_ring_info) + return; + + SHARED_RING_INIT(tx_ring_info->ring_front.sring); + FRONT_RING_INIT(&(tx_ring_info->ring_front), + tx_ring_info->ring_front.sring, + PAGE_SIZE); +} + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + +static void xen_rx_ch_add_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed); + +#define DOMID_SCAN_START 1 /* domid = 1 */ +#define DOMID_SCAN_END 10 /* domid = 10 */ + +static void xen_rx_ch_add_delayed(struct work_struct *unused) +{ + int ret; + char buf[128]; + int i, dummy; + + dev_dbg(hy_drv_priv->dev, + "Scanning new tx channel comming from another domain\n"); + + /* check other domains and schedule another work if driver + * is still running and backend is valid + */ + if (hy_drv_priv && + hy_drv_priv->initialized) { + for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) { + if (i == hy_drv_priv->domid) + continue; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + i, hy_drv_priv->domid); + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy); + + if (ret > 0) { + if (xen_comm_find_rx_ring(i) != NULL) + continue; + + ret = xen_be_init_rx_rbuf(i); + + if (!ret) + dev_info(hy_drv_priv->dev, + "Done rx ch init for VM %d\n", + i); + } + } + + /* check every 10 seconds */ + schedule_delayed_work(&xen_rx_ch_auto_add_work, + msecs_to_jiffies(10000)); + } +} + +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + +void xen_init_comm_env_delayed(struct work_struct *unused) +{ + int ret; + + /* scheduling another work if driver is still running + * and xenstore hasn't been initialized or dom_id hasn't + * been correctly retrieved. + */ + if (likely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore not ready Will re-try in 500ms\n"); + schedule_delayed_work(&xen_init_comm_env_work, + msecs_to_jiffies(500)); + } else { + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env init\n"); + hy_drv_priv->initialized = true; + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + xen_rx_ch_add_delayed(NULL); +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + } + } +} + +int xen_be_init_comm_env(void) +{ + int ret; + + xen_comm_ring_table_init(); + + if (unlikely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + xen_init_comm_env_delayed(NULL); + return -1; + } + + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env initialization\n"); + + hy_drv_priv->initialized = true; + } + + return ret; +} + +/* cleans up all tx/rx rings */ +static void xen_be_cleanup_all_rbufs(void) +{ + xen_comm_foreach_tx_ring(xen_be_cleanup_tx_rbuf); + xen_comm_foreach_rx_ring(xen_be_cleanup_rx_rbuf); +} + +void xen_be_destroy_comm(void) +{ + xen_be_cleanup_all_rbufs(); + xen_comm_destroy_data_dir(); +} + +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait) +{ + struct xen_comm_front_ring *ring; + struct hyper_dmabuf_req *new_req; + struct xen_comm_tx_ring_info *ring_info; + int notify; + + struct timeval tv_start, tv_end; + struct timeval tv_diff; + + int timeout = 1000; + + /* find a ring info for the channel */ + ring_info = xen_comm_find_tx_ring(domid); + if (!ring_info) { + dev_err(hy_drv_priv->dev, + "Can't find ring info for the channel\n"); + return -ENOENT; + } + + + ring = &ring_info->ring_front; + + do_gettimeofday(&tv_start); + + while (RING_FULL(ring)) { + dev_dbg(hy_drv_priv->dev, "RING_FULL\n"); + + if (timeout == 0) { + dev_err(hy_drv_priv->dev, + "Timeout while waiting for an entry in the ring\n"); + return -EIO; + } + usleep_range(100, 120); + timeout--; + } + + timeout = 1000; + + mutex_lock(&ring_info->lock); + + new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); + if (!new_req) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "NULL REQUEST\n"); + return -EIO; + } + + req->req_id = xen_comm_next_req_id(); + + /* update req_pending with current request */ + memcpy(&req_pending, req, sizeof(req_pending)); + + /* pass current request to the ring */ + memcpy(new_req, req, sizeof(*new_req)); + + ring->req_prod_pvt++; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); + if (notify) + notify_remote_via_irq(ring_info->irq); + + if (wait) { + while (timeout--) { + if (req_pending.stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "request timed-out\n"); + return -EBUSY; + } + + mutex_unlock(&ring_info->lock); + do_gettimeofday(&tv_end); + + /* checking time duration for round-trip of a request + * for debugging + */ + if (tv_end.tv_usec >= tv_start.tv_usec) { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec; + tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec; + } else { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1; + tv_diff.tv_usec = tv_end.tv_usec+1000000- + tv_start.tv_usec; + } + + if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000) + dev_dbg(hy_drv_priv->dev, + "send_req:time diff: %ld sec, %ld usec\n", + tv_diff.tv_sec, tv_diff.tv_usec); + } + + mutex_unlock(&ring_info->lock); + + return 0; +} + +/* ISR for handling request */ +static irqreturn_t back_ring_isr(int irq, void *info) +{ + RING_IDX rc, rp; + struct hyper_dmabuf_req req; + struct hyper_dmabuf_resp resp; + + int notify, more_to_do; + int ret; + + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_back_ring *ring; + + ring_info = (struct xen_comm_rx_ring_info *)info; + ring = &ring_info->ring_back; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + rc = ring->req_cons; + rp = ring->sring->req_prod; + more_to_do = 0; + while (rc != rp) { + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) + break; + + memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req)); + ring->req_cons = ++rc; + + ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req); + + if (ret > 0) { + /* preparing a response for the request and + * send it to the requester + */ + memcpy(&resp, &req, sizeof(resp)); + memcpy(RING_GET_RESPONSE(ring, + ring->rsp_prod_pvt), + &resp, sizeof(resp)); + ring->rsp_prod_pvt++; + + dev_dbg(hy_drv_priv->dev, + "responding to exporter for req:%d\n", + resp.resp_id); + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, + notify); + + if (notify) + notify_remote_via_irq(ring_info->irq); + } + + RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do); + } + } while (more_to_do); + + return IRQ_HANDLED; +} + +/* ISR for handling responses */ +static irqreturn_t front_ring_isr(int irq, void *info) +{ + /* front ring only care about response from back */ + struct hyper_dmabuf_resp *resp; + RING_IDX i, rp; + int more_to_do, ret; + + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_front_ring *ring; + + ring_info = (struct xen_comm_tx_ring_info *)info; + ring = &ring_info->ring_front; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + more_to_do = 0; + rp = ring->sring->rsp_prod; + for (i = ring->rsp_cons; i != rp; i++) { + resp = RING_GET_RESPONSE(ring, i); + + /* update pending request's status with what is + * in the response + */ + + dev_dbg(hy_drv_priv->dev, + "getting response from importer\n"); + + if (req_pending.req_id == resp->resp_id) + req_pending.stat = resp->stat; + + if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) { + /* parsing response */ + ret = hyper_dmabuf_msg_parse(ring_info->rdomain, + (struct hyper_dmabuf_req *)resp); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "err while parsing resp\n"); + } + } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_PROCESSED\n"); + } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_ERROR\n"); + } + } + + ring->rsp_cons = i; + + if (i != ring->req_prod_pvt) + RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do); + else + ring->sring->rsp_event = i+1; + + } while (more_to_do); + + return IRQ_HANDLED; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h new file mode 100644 index 000000000000..70a2b704badd --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h @@ -0,0 +1,78 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_H__ +#define __HYPER_DMABUF_XEN_COMM_H__ + +#include "xen/interface/io/ring.h" +#include "xen/xenbus.h" +#include "../hyper_dmabuf_msg.h" + +extern int xenstored_ready; + +DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp); + +struct xen_comm_tx_ring_info { + struct xen_comm_front_ring ring_front; + int rdomain; + int gref_ring; + int irq; + int port; + struct mutex lock; + struct xenbus_watch watch; +}; + +struct xen_comm_rx_ring_info { + int sdomain; + int irq; + int evtchn; + struct xen_comm_back_ring ring_back; + struct gnttab_unmap_grant_ref unmap_op; +}; + +int xen_be_get_domid(void); + +int xen_be_init_comm_env(void); + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid); + +/* importer needs to know about shared page and port numbers + * for ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid); + +/* cleans up exporter ring created for given domain */ +void xen_be_cleanup_tx_rbuf(int domid); + +/* cleans up importer ring created for given domain */ +void xen_be_cleanup_rx_rbuf(int domid); + +void xen_be_destroy_comm(void); + +/* send request to the remote domain */ +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait); + +#endif /* __HYPER_DMABUF_XEN_COMM_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c new file mode 100644 index 000000000000..15023dbc8ced --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c @@ -0,0 +1,158 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" + +DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING); +DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING); + +void xen_comm_ring_table_init(void) +{ + hash_init(xen_comm_rx_ring_hash); + hash_init(xen_comm_tx_ring_hash); +} + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_tx_ring_hash, &info_entry->node, + info_entry->info->rdomain); + + return 0; +} + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_rx_ring_hash, &info_entry->node, + info_entry->info->sdomain); + + return 0; +} + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) + return info_entry->info; + + return NULL; +} + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) + return info_entry->info; + + return NULL; +} + +int xen_comm_remove_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +int xen_comm_remove_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +void xen_comm_foreach_tx_ring(void (*func)(int domid)) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->rdomain); + } +} + +void xen_comm_foreach_rx_ring(void (*func)(int domid)) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->sdomain); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h new file mode 100644 index 000000000000..8502fe7df578 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h @@ -0,0 +1,67 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_LIST_H__ +#define __HYPER_DMABUF_XEN_COMM_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_TX_RING 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_RX_RING 7 + +struct xen_comm_tx_ring_info_entry { + struct xen_comm_tx_ring_info *info; + struct hlist_node node; +}; + +struct xen_comm_rx_ring_info_entry { + struct xen_comm_rx_ring_info *info; + struct hlist_node node; +}; + +void xen_comm_ring_table_init(void); + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info); + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info); + +int xen_comm_remove_tx_ring(int domid); + +int xen_comm_remove_rx_ring(int domid); + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid); + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid); + +/* iterates over all exporter rings and calls provided + * function for each of them + */ +void xen_comm_foreach_tx_ring(void (*func)(int domid)); + +/* iterates over all importer rings and calls provided + * function for each of them + */ +void xen_comm_foreach_rx_ring(void (*func)(int domid)); + +#endif // __HYPER_DMABUF_XEN_COMM_LIST_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c new file mode 100644 index 000000000000..14ed3bc51e6a --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_shm.h" + +struct hyper_dmabuf_bknd_ops xen_bknd_ops = { + .init = NULL, /* not needed for xen */ + .cleanup = NULL, /* not needed for xen */ + .get_vm_id = xen_be_get_domid, + .share_pages = xen_be_share_pages, + .unshare_pages = xen_be_unshare_pages, + .map_shared_pages = (void *)xen_be_map_shared_pages, + .unmap_shared_pages = xen_be_unmap_shared_pages, + .init_comm_env = xen_be_init_comm_env, + .destroy_comm = xen_be_destroy_comm, + .init_rx_ch = xen_be_init_rx_rbuf, + .init_tx_ch = xen_be_init_tx_rbuf, + .send_req = xen_be_send_req, +}; diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h new file mode 100644 index 000000000000..a4902b747a87 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h @@ -0,0 +1,53 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_DRV_H__ +#define __HYPER_DMABUF_XEN_DRV_H__ +#include + +extern struct hyper_dmabuf_bknd_ops xen_bknd_ops; + +/* Main purpose of this structure is to keep + * all references created or acquired for sharing + * pages with another domain for freeing those later + * when unsharing. + */ +struct xen_shared_pages_info { + /* top level refid */ + grant_ref_t lvl3_gref; + + /* page of top level addressing, it contains refids of 2nd lvl pages */ + grant_ref_t *lvl3_table; + + /* table of 2nd level pages, that contains refids to data pages */ + grant_ref_t *lvl2_table; + + /* unmap ops for mapped pages */ + struct gnttab_unmap_grant_ref *unmap_ops; + + /* data pages to be unmapped */ + struct page **data_pages; +}; + +#endif // __HYPER_DMABUF_XEN_COMM_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c new file mode 100644 index 000000000000..c6a15f187fe3 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c @@ -0,0 +1,525 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_xen_drv.h" +#include "../hyper_dmabuf_drv.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* + * Creates 2 level page directory structure for referencing shared pages. + * Top level page is a single page that contains up to 1024 refids that + * point to 2nd level pages. + * + * Each 2nd level page contains up to 1024 refids that point to shared + * data pages. + * + * There will always be one top level page and number of 2nd level pages + * depends on number of shared data pages. + * + * 3rd level page 2nd level pages Data pages + * +-------------------------+ ┌>+--------------------+ ┌>+------------+ + * |2nd level page 0 refid |---┘ |Data page 0 refid |-┘ |Data page 0 | + * |2nd level page 1 refid |---┐ |Data page 1 refid |-┐ +------------+ + * | ... | | | .... | | + * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+ + * +-------------------------+ | | +--------------------+ |Data page 1 | + * | | +------------+ + * | └>+--------------------+ + * | |Data page 1024 refid| + * | |Data page 1025 refid| + * | | ... | + * | |Data page 2047 refid| + * | +--------------------+ + * | + * | ..... + * └-->+-----------------------+ + * |Data page 1047552 refid| + * |Data page 1047553 refid| + * | ... | + * |Data page 1048575 refid| + * +-----------------------+ + * + * Using such 2 level structure it is possible to reference up to 4GB of + * shared data using single refid pointing to top level page. + * + * Returns refid of top level page. + */ +int xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info) +{ + grant_ref_t lvl3_gref; + grant_ref_t *lvl2_table; + grant_ref_t *lvl3_table; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + struct xen_shared_pages_info *sh_pages_info; + int i; + + lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (!sh_pages_info) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* share data pages in readonly mode for security */ + for (i = 0; i < nents; i++) { + lvl2_table[i] = gnttab_grant_foreign_access(domid, + pfn_to_mfn(page_to_pfn(pages[i])), + true /* read only */); + if (lvl2_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl2 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl2_table[i], 0); + gnttab_free_grant_reference(lvl2_table[i]); + } + goto err_cleanup; + } + } + + /* Share 2nd level addressing pages in readonly mode*/ + for (i = 0; i < n_lvl2_grefs; i++) { + lvl3_table[i] = gnttab_grant_foreign_access(domid, + virt_to_mfn( + (unsigned long)lvl2_table+i*PAGE_SIZE), + true); + + if (lvl3_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref( + lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + } + + /* Share lvl3_table in readonly mode*/ + lvl3_gref = gnttab_grant_foreign_access(domid, + virt_to_mfn((unsigned long)lvl3_table), + true); + + if (lvl3_gref == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref(lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + + /* Store lvl3_table page to be freed later */ + sh_pages_info->lvl3_table = lvl3_table; + + /* Store lvl2_table pages to be freed later */ + sh_pages_info->lvl2_table = lvl2_table; + + + /* Store exported pages refid to be unshared later */ + sh_pages_info->lvl3_gref = lvl3_gref; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return lvl3_gref; + +err_cleanup: + free_pages((unsigned long)lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)lvl3_table, 1); + + return -ENOSPC; +} + +int xen_be_unshare_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + int i; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->lvl3_table == NULL || + sh_pages_info->lvl2_table == NULL || + sh_pages_info->lvl3_gref == -1) { + dev_warn(hy_drv_priv->dev, + "gref table for hyper_dmabuf already cleaned up\n"); + return 0; + } + + /* End foreign access for data pages, but do not free them */ + for (i = 0; i < nents; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0); + gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]); + } + + /* End foreign access for 2nd level addressing pages */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + if (!gnttab_end_foreign_access_ref( + sh_pages_info->lvl3_table[i], 1)) + dev_warn(hy_drv_priv->dev, "refid still in use!!!\n"); + + gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]); + } + + /* End foreign access for top level addressing page */ + if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) + dev_warn(hy_drv_priv->dev, "gref not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1); + gnttab_free_grant_reference(sh_pages_info->lvl3_gref); + + /* freeing all pages used for 2 level addressing */ + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + sh_pages_info->lvl3_gref = -1; + sh_pages_info->lvl2_table = NULL; + sh_pages_info->lvl3_table = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +/* Maps provided top level ref id and then return array of pages + * containing data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, void **refs_info) +{ + struct page *lvl3_table_page; + struct page **lvl2_table_pages; + struct page **data_pages; + struct xen_shared_pages_info *sh_pages_info; + + grant_ref_t *lvl3_table; + grant_ref_t *lvl2_table; + + struct gnttab_map_grant_ref lvl3_map_ops; + struct gnttab_unmap_grant_ref lvl3_unmap_ops; + + struct gnttab_map_grant_ref *lvl2_map_ops; + struct gnttab_unmap_grant_ref *lvl2_unmap_ops; + + struct gnttab_map_grant_ref *data_map_ops; + struct gnttab_unmap_grant_ref *data_unmap_ops; + + /* # of grefs in the last page of lvl2 table */ + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_grefs = (nents / REFS_PER_PAGE) + + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + *refs_info = (void *) sh_pages_info; + + lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *), + GFP_KERNEL); + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + + lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops), + GFP_KERNEL); + + lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops), + GFP_KERNEL); + + data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL); + data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL); + + /* Map top level addressing page */ + if (gnttab_alloc_pages(1, &lvl3_table_page)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + return NULL; + } + + lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page)); + + gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, + (grant_ref_t)lvl3_gref, domid); + + gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, -1); + + if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + if (lvl3_map_ops.status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl3_map_ops.status); + + goto error_cleanup_lvl3; + } else { + lvl3_unmap_ops.handle = lvl3_map_ops.handle; + } + + /* Map all second level pages */ + if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + goto error_cleanup_lvl3; + } + + for (i = 0; i < n_lvl2_grefs; i++) { + lvl2_table = (grant_ref_t *)pfn_to_kaddr( + page_to_pfn(lvl2_table_pages[i])); + gnttab_set_map_op(&lvl2_map_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, + lvl3_table[i], domid); + gnttab_set_unmap_op(&lvl2_unmap_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, -1); + } + + /* Unmap top level page, as it won't be needed any longer */ + if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "xen: cannot unmap top level page\n"); + return NULL; + } + + /* Mark that page was unmapped */ + lvl3_unmap_ops.handle = -1; + + if (gnttab_map_refs(lvl2_map_ops, NULL, + lvl2_table_pages, n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + /* Checks if pages were mapped correctly */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (lvl2_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl2_map_ops[i].status); + goto error_cleanup_lvl2; + } else { + lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle; + } + } + + if (gnttab_alloc_pages(nents, data_pages)) { + dev_err(hy_drv_priv->dev, + "Cannot allocate pages\n"); + goto error_cleanup_lvl2; + } + + k = 0; + + for (i = 0; i < n_lvl2_grefs - 1; i++) { + lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i])); + for (j = 0; j < REFS_PER_PAGE; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + } + + /* for grefs in the last lvl2 table page */ + lvl2_table = pfn_to_kaddr(page_to_pfn( + lvl2_table_pages[n_lvl2_grefs - 1])); + + for (j = 0; j < nents_last; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + + if (gnttab_map_refs(data_map_ops, NULL, + data_pages, nents)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed\n"); + return NULL; + } + + /* unmapping lvl2 table pages */ + if (gnttab_unmap_refs(lvl2_unmap_ops, + NULL, lvl2_table_pages, + n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "Cannot unmap 2nd level refs\n"); + return NULL; + } + + /* Mark that pages were unmapped */ + for (i = 0; i < n_lvl2_grefs; i++) + lvl2_unmap_ops[i].handle = -1; + + for (i = 0; i < nents; i++) { + if (data_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d\n", + data_map_ops[i].status); + goto error_cleanup_data; + } else { + data_unmap_ops[i].handle = data_map_ops[i].handle; + } + } + + /* store these references for unmapping in the future */ + sh_pages_info->unmap_ops = data_unmap_ops; + sh_pages_info->data_pages = data_pages; + + gnttab_free_pages(1, &lvl3_table_page); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return data_pages; + +error_cleanup_data: + gnttab_unmap_refs(data_unmap_ops, NULL, data_pages, + nents); + + gnttab_free_pages(nents, data_pages); + +error_cleanup_lvl2: + if (lvl2_unmap_ops[0].handle != -1) + gnttab_unmap_refs(lvl2_unmap_ops, NULL, + lvl2_table_pages, n_lvl2_grefs); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + +error_cleanup_lvl3: + if (lvl3_unmap_ops.handle != -1) + gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1); + gnttab_free_pages(1, &lvl3_table_page); + + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + + return NULL; +} + +int xen_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->unmap_ops == NULL || + sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "pages already cleaned up or buffer not imported yet\n"); + return 0; + } + + if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL, + sh_pages_info->data_pages, nents)) { + dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n"); + return -EFAULT; + } + + gnttab_free_pages(nents, sh_pages_info->data_pages); + + kfree(sh_pages_info->data_pages); + kfree(sh_pages_info->unmap_ops); + sh_pages_info->unmap_ops = NULL; + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h new file mode 100644 index 000000000000..d5236b500075 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_SHM_H__ +#define __HYPER_DMABUF_XEN_SHM_H__ + +/* This collects all reference numbers for 2nd level shared pages and + * create a table with those in 1st level shared pages then return reference + * numbers for this top level table. + */ +int xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info); + +int xen_be_unshare_pages(void **refs_info, int nents); + +/* Maps provided top level ref id and then return array of pages containing + * data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, + void **refs_info); + +int xen_be_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_XEN_SHM_H__ */ diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index dec3a815455d..012fa3d1f407 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); * @dst: the destination reservation object * @src: the source reservation object * -* Copy all fences from src to dst. Both src->lock as well as dst-lock must be -* held. +* Copy all fences from src to dst. dst-lock must be held. */ int reservation_object_copy_fences(struct reservation_object *dst, struct reservation_object *src) @@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst, size_t size; unsigned i; - src_list = reservation_object_get_list(src); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); +retry: if (src_list) { - size = offsetof(typeof(*src_list), - shared[src_list->shared_count]); + unsigned shared_count = src_list->shared_count; + + size = offsetof(typeof(*src_list), shared[shared_count]); + rcu_read_unlock(); + dst_list = kmalloc(size, GFP_KERNEL); if (!dst_list) return -ENOMEM; - dst_list->shared_count = src_list->shared_count; - dst_list->shared_max = src_list->shared_count; - for (i = 0; i < src_list->shared_count; ++i) - dst_list->shared[i] = - dma_fence_get(src_list->shared[i]); + rcu_read_lock(); + src_list = rcu_dereference(src->fence); + if (!src_list || src_list->shared_count > shared_count) { + kfree(dst_list); + goto retry; + } + + dst_list->shared_count = 0; + dst_list->shared_max = shared_count; + for (i = 0; i < src_list->shared_count; ++i) { + struct dma_fence *fence; + + fence = rcu_dereference(src_list->shared[i]); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags)) + continue; + + if (!dma_fence_get_rcu(fence)) { + kfree(dst_list); + src_list = rcu_dereference(src->fence); + goto retry; + } + + if (dma_fence_is_signaled(fence)) { + dma_fence_put(fence); + continue; + } + + dst_list->shared[dst_list->shared_count++] = fence; + } } else { dst_list = NULL; } + new = dma_fence_get_rcu_safe(&src->fence_excl); + rcu_read_unlock(); + kfree(dst->staged); dst->staged = NULL; src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); - new = reservation_object_get_excl(src); - - dma_fence_get(new); preempt_disable(); write_seqcount_begin(&dst->seq); @@ -427,13 +455,15 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, unsigned long timeout) { struct dma_fence *fence; - unsigned seq, shared_count, i = 0; + unsigned seq, shared_count; long ret = timeout ? timeout : 1; + int i; retry: shared_count = 0; seq = read_seqcount_begin(&obj->seq); rcu_read_lock(); + i = -1; fence = rcu_dereference(obj->fence_excl); if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { @@ -449,14 +479,14 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, fence = NULL; } - if (!fence && wait_all) { + if (wait_all) { struct reservation_object_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; - for (i = 0; i < shared_count; ++i) { + for (i = 0; !fence && i < shared_count; ++i) { struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 38cc7389a6c1..24f83f9eeaed 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file) static int sw_sync_debugfs_release(struct inode *inode, struct file *file) { struct sync_timeline *obj = file->private_data; + struct sync_pt *pt, *next; + + spin_lock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { + dma_fence_set_error(&pt->base, -ENOENT); + dma_fence_signal_locked(&pt->base); + } - smp_wmb(); + spin_unlock_irq(&obj->lock); sync_timeline_put(obj); return 0; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fbab271b3bf9..a861b5b4d443 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan, unsigned long flags) { struct at_dma_chan *atchan = to_at_dma_chan(chan); - struct data_chunk *first = xt->sgl; + struct data_chunk *first; struct at_desc *desc = NULL; size_t xfer_count; unsigned int dwidth; @@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) return NULL; + first = xt->sgl; + dev_info(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", __func__, &xt->src_start, &xt->dst_start, xt->numf, diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index c00e3923d7d8..94236ec9d410 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; rmb(); - initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); - rmb(); cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); rmb(); + initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); + rmb(); cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; rmb(); diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index d50273fed715..afd5e10f8927 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev) ret = dma_async_device_register(dd); if (ret) - return ret; + goto err_clk; irq = platform_get_irq(pdev, 0); ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); @@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev) err_unregister: dma_async_device_unregister(dd); +err_clk: + clk_disable_unprepare(dmadev->clk); return ret; } diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 34ff53290b03..80cc2be6483c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); #define PATTERN_COUNT_MASK 0x1f #define PATTERN_MEMSET_IDX 0x01 +/* poor man's completion - we want to use wait_event_freezable() on it */ +struct dmatest_done { + bool done; + wait_queue_head_t *wait; +}; + struct dmatest_thread { struct list_head node; struct dmatest_info *info; @@ -165,6 +171,8 @@ struct dmatest_thread { u8 **dsts; u8 **udsts; enum dma_transaction_type type; + wait_queue_head_t done_wait; + struct dmatest_done test_done; bool done; }; @@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, return error_count; } -/* poor man's completion - we want to use wait_event_freezable() on it */ -struct dmatest_done { - bool done; - wait_queue_head_t *wait; -}; static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; - - done->done = true; - wake_up_all(done->wait); + struct dmatest_thread *thread = + container_of(done, struct dmatest_thread, test_done); + if (!thread->done) { + done->done = true; + wake_up_all(done->wait); + } else { + /* + * If thread->done, it means that this callback occurred + * after the parent thread has cleaned up. This can + * happen in the case that driver doesn't implement + * the terminate_all() functionality and a dma operation + * did not occur within the timeout period + */ + WARN(1, "dmatest: Kernel memory may be corrupted!!\n"); + } } static unsigned int min_odd(unsigned int x, unsigned int y) @@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) */ static int dmatest_func(void *data) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; - struct dmatest_done done = { .wait = &done_wait }; + struct dmatest_done *done = &thread->test_done; struct dmatest_info *info; struct dmatest_params *params; struct dma_chan *chan; @@ -673,9 +687,9 @@ static int dmatest_func(void *data) continue; } - done.done = false; + done->done = false; tx->callback = dmatest_callback; - tx->callback_param = &done; + tx->callback_param = done; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { @@ -688,20 +702,12 @@ static int dmatest_func(void *data) } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, done.done, + wait_event_freezable_timeout(thread->done_wait, done->done, msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - if (!done.done) { - /* - * We're leaving the timed out dma operation with - * dangling pointer to done_wait. To make this - * correct, we'll need to allocate wait_done for - * each test iteration and perform "who's gonna - * free it this time?" dancing. For now, just - * leave it dangling. - */ + if (!done->done) { dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); @@ -788,7 +794,7 @@ static int dmatest_func(void *data) dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ - if (ret) + if (ret || failed_tests) dmaengine_terminate_all(chan); thread->done = true; @@ -848,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info, thread->info = info; thread->chan = dtc->chan; thread->type = type; + thread->test_done.wait = &thread->done_wait; + init_waitqueue_head(&thread->done_wait); smp_wmb(); thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 6775f2c74e25..c7568869284e 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -863,11 +863,11 @@ static void fsl_edma_irq_exit( } } -static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) +static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) { int i; - for (i = 0; i < DMAMUX_NR; i++) + for (i = 0; i < nr_clocks; i++) clk_disable_unprepare(fsl_edma->muxclk[i]); } @@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(fsl_edma->muxbase[i])) + if (IS_ERR(fsl_edma->muxbase[i])) { + /* on error: disable all previously enabled clks */ + fsl_disable_clocks(fsl_edma, i); return PTR_ERR(fsl_edma->muxbase[i]); + } sprintf(clkname, "dmamux%d", i); fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); if (IS_ERR(fsl_edma->muxclk[i])) { dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); + /* on error: disable all previously enabled clks */ + fsl_disable_clocks(fsl_edma, i); return PTR_ERR(fsl_edma->muxclk[i]); } ret = clk_prepare_enable(fsl_edma->muxclk[i]); - if (ret) { - /* disable only clks which were enabled on error */ - for (; i >= 0; i--) - clk_disable_unprepare(fsl_edma->muxclk[i]); - - dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); - return ret; - } + if (ret) + /* on error: disable all previously enabled clks */ + fsl_disable_clocks(fsl_edma, i); } @@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Can't register Freescale eDMA engine. (%d)\n", ret); - fsl_disable_clocks(fsl_edma); + fsl_disable_clocks(fsl_edma, DMAMUX_NR); return ret; } @@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma); + fsl_disable_clocks(fsl_edma, DMAMUX_NR); return ret; } @@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev) fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma); + fsl_disable_clocks(fsl_edma, DMAMUX_NR); return 0; } diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 1953e57505f4..f5d9624a4f8c 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -496,6 +496,13 @@ static int idma64_terminate_all(struct dma_chan *chan) return 0; } +static void idma64_synchronize(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + + vchan_synchronize(&idma64c->vchan); +} + static int idma64_alloc_chan_resources(struct dma_chan *chan) { struct idma64_chan *idma64c = to_idma64_chan(chan); @@ -583,6 +590,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.device_pause = idma64_pause; idma64->dma.device_resume = idma64_resume; idma64->dma.device_terminate_all = idma64_terminate_all; + idma64->dma.device_synchronize = idma64_synchronize; idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS; @@ -701,7 +709,17 @@ static struct platform_driver idma64_platform_driver = { }, }; -module_platform_driver(idma64_platform_driver); +static int __init idma64_platform_driver_init(void) +{ + return platform_driver_register(&idma64_platform_driver); +} +fs_initcall(idma64_platform_driver_init); + +static void __exit idma64_platform_driver_exit(void) +{ + platform_driver_unregister(&idma64_platform_driver); +} +module_exit(idma64_platform_driver_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("iDMA64 core driver"); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 93e006c3441d..854deb0da07c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) if (memcmp(src, dest, IOAT_TEST_SIZE)) { dev_err(dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } unmap_dma: diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f652a0e0f5a2..3548caa9e933 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -163,6 +163,7 @@ struct mv_xor_v2_device { void __iomem *dma_base; void __iomem *glob_base; struct clk *clk; + struct clk *reg_clk; struct tasklet_struct irq_tasklet; struct list_head free_sw_desc; struct dma_device dmadev; @@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev) if (ret) return ret; + xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); + if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { + if (!IS_ERR(xor_dev->reg_clk)) { + ret = clk_prepare_enable(xor_dev->reg_clk); + if (ret) + return ret; + } else { + return PTR_ERR(xor_dev->reg_clk); + } + } + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { + ret = EPROBE_DEFER; + goto disable_reg_clk; + } if (!IS_ERR(xor_dev->clk)) { ret = clk_prepare_enable(xor_dev->clk); if (ret) - return ret; + goto disable_reg_clk; } ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, @@ -866,8 +880,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev) free_msi_irqs: platform_msi_domain_free_irqs(&pdev->dev); disable_clk: - if (!IS_ERR(xor_dev->clk)) - clk_disable_unprepare(xor_dev->clk); + clk_disable_unprepare(xor_dev->clk); +disable_reg_clk: + clk_disable_unprepare(xor_dev->reg_clk); return ret; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f122c2a7b9f0..7432c8894e32 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data) /* Returns 1 if state was updated, 0 otherwise */ static int pl330_update(struct pl330_dmac *pl330) { - struct dma_pl330_desc *descdone, *tmp; + struct dma_pl330_desc *descdone; unsigned long flags; void __iomem *regs; u32 val; @@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330) } /* Now that we are in no hurry, do the callbacks */ - list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { + while (!list_empty(&pl330->req_done)) { + descdone = list_first_entry(&pl330->req_done, + struct dma_pl330_desc, rqd); list_del(&descdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); dma_pl330_rqcb(descdone, PL330_ERR_NONE); diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 6d89fb6a6a92..8fbf175fdcc7 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -388,6 +388,7 @@ struct bam_device { struct device_dma_parameters dma_parms; struct bam_chan *channels; u32 num_channels; + u32 num_ees; /* execution environment ID, from DT */ u32 ee; @@ -1080,15 +1081,19 @@ static int bam_init(struct bam_device *bdev) u32 val; /* read revision and configuration information */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; - val &= NUM_EES_MASK; + if (!bdev->num_ees) { + val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); + bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK; + } /* check that configured EE is within range */ - if (bdev->ee >= val) + if (bdev->ee >= bdev->num_ees) return -EINVAL; - val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); - bdev->num_channels = val & BAM_NUM_PIPES_MASK; + if (!bdev->num_channels) { + val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); + bdev->num_channels = val & BAM_NUM_PIPES_MASK; + } if (bdev->controlled_remotely) return 0; @@ -1183,6 +1188,18 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, "qcom,controlled-remotely"); + if (bdev->controlled_remotely) { + ret = of_property_read_u32(pdev->dev.of_node, "num-channels", + &bdev->num_channels); + if (ret) + dev_err(bdev->dev, "num-channels unspecified in dt\n"); + + ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees", + &bdev->num_ees); + if (ret) + dev_err(bdev->dev, "num-ees unspecified in dt\n"); + } + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); if (IS_ERR(bdev->bamclk)) return PTR_ERR(bdev->bamclk); diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 4999e266b2de..7c6e2ff212a2 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev) */ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) { + unsigned long irqflags; + if (cause & HIDMA_ERR_INT_MASK) { dev_err(lldev->dev, "error 0x%x, disabling...\n", cause); @@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) return; } + spin_lock_irqsave(&lldev->lock, irqflags); + writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + spin_unlock_irqrestore(&lldev->lock, irqflags); + /* * Fine tuned for this HW... * @@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) * Try to consume as many EVREs as possible. */ hidma_handle_tre_completion(lldev); - - /* We consumed TREs or there are pending TREs or EVREs. */ - writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); } irqreturn_t hidma_ll_inthandler(int chirq, void *arg) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2b2c7db3e480..9d6ce5051d8f 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -880,7 +880,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, rcar_dmac_chan_configure_desc(chan, desc); - max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; + max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; /* * Allocate and fill the transfer chunk descriptors. We own the only @@ -1264,8 +1264,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, * If the cookie doesn't correspond to the currently running transfer * then the descriptor hasn't been processed yet, and the residue is * equal to the full descriptor size. + * Also, a client driver is possible to call this function before + * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" + * will be the next descriptor, and the done list will appear. So, if + * the argument cookie matches the done list's cookie, we can assume + * the residue is zero. */ if (cookie != desc->async_tx.cookie) { + list_for_each_entry(desc, &chan->desc.done, node) { + if (cookie == desc->async_tx.cookie) + return 0; + } list_for_each_entry(desc, &chan->desc.pending, node) { if (cookie == desc->async_tx.cookie) return desc->size; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index f1d04b70ee67..9272b173c746 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -49,12 +49,20 @@ struct ti_am335x_xbar_data { struct ti_am335x_xbar_map { u16 dma_line; - u16 mux_val; + u8 mux_val; }; -static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val & 0x1f, iomem + event); + /* + * TPCC_EVT_MUX_60_63 register layout is different than the + * rest, in the sense, that event 63 is mapped to lowest byte + * and event 60 is mapped to highest, handle it separately. + */ + if (event >= 60 && event <= 63) + writeb_relaxed(val, iomem + (63 - event % 4)); + else + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) @@ -105,7 +113,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, } map->dma_line = (u16)dma_spec->args[0]; - map->mux_val = (u16)dma_spec->args[2]; + map->mux_val = (u8)dma_spec->args[2]; dma_spec->args[2] = 0; dma_spec->args_count = 2; diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 1ee1241ca797..5cc8ed31f26b 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -838,7 +838,8 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) if (!chan) return; - devm_free_irq(chan->zdev->dev, chan->irq, chan); + if (chan->irq) + devm_free_irq(chan->zdev->dev, chan->irq, chan); tasklet_kill(&chan->tasklet); list_del(&chan->common.device_node); clk_disable_unprepare(chan->clk_apb); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 346c4987b284..38983f56ad0d 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1106,7 +1106,7 @@ static void *ocram_alloc_mem(size_t size, void **other) static void ocram_free_mem(void *p, size_t size, void *other) { - gen_pool_free((struct gen_pool *)other, (u32)p, size); + gen_pool_free((struct gen_pool *)other, (unsigned long)p, size); } static const struct edac_device_prv_data ocramecc_data = { diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ac2f30295efe..59ce32e405ac 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) struct amd64_family_type *fam_type = NULL; pvt->ext_model = boot_cpu_data.x86_model >> 4; - pvt->stepping = boot_cpu_data.x86_mask; + pvt->stepping = boot_cpu_data.x86_stepping; pvt->model = boot_cpu_data.x86_model; pvt->fam = boot_cpu_data.x86; diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index e4fcfa84fbd3..c70ea82c815c 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -50,7 +50,7 @@ int edac_mc_get_poll_msec(void) return edac_mc_poll_msec; } -static int edac_set_poll_msec(const char *val, struct kernel_param *kp) +static int edac_set_poll_msec(const char *val, const struct kernel_param *kp) { unsigned long l; int ret; diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 172598a27d7d..32a931d0cb71 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c @@ -19,7 +19,8 @@ #ifdef CONFIG_EDAC_DEBUG -static int edac_set_debug_level(const char *buf, struct kernel_param *kp) +static int edac_set_debug_level(const char *buf, + const struct kernel_param *kp) { unsigned long val; int ret; diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index a11a671c7a38..2ab4d61ee47e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -854,21 +854,24 @@ static void decode_mc6_mce(struct mce *m) static void decode_smca_error(struct mce *m) { struct smca_hwid *hwid; - unsigned int bank_type; + enum smca_bank_types bank_type; const char *ip_name; u8 xec = XEC(m->status, xec_mask); if (m->bank >= ARRAY_SIZE(smca_banks)) return; - if (x86_family(m->cpuid) >= 0x17 && m->bank == 4) - pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n"); - hwid = smca_banks[m->bank].hwid; if (!hwid) return; bank_type = hwid->bank_type; + + if (bank_type == SMCA_RESERVED) { + pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank); + return; + } + ip_name = smca_get_long_name(bank_type); pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec); diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index ec5d695bbb72..3c68bb525d5d 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c @@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) /* Non-ECC RAM? */ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); res = -ENODEV; - goto err2; + goto err; } edac_dbg(3, "init mci\n"); diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c index 9c1ffe3e912b..aeb222ca3ed1 100644 --- a/drivers/edac/octeon_edac-lmc.c +++ b/drivers/edac/octeon_edac-lmc.c @@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci) if (!pvt->inject) int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx)); else { + int_reg.u64 = 0; if (pvt->error_type == 1) int_reg.s.sec_err = 1; if (pvt->error_type == 2) diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index dc0591654011..0dc0d595c47c 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = { * sbridge structs */ -#define NUM_CHANNELS 4 /* Max channels per MC */ +#define NUM_CHANNELS 6 /* Max channels per MC */ #define MAX_DIMMS 3 /* Max DIMMS per channel */ #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ @@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { static const struct pci_id_descr pci_dev_descr_ibridge[] = { /* Processor Home Agent */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, /* Memory controller */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) }, @@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) }, /* Optional, mode 2HA */ - { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) }, @@ -2291,6 +2291,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev, next_imc: sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev); if (!sbridge_dev) { + /* If the HA1 wasn't found, don't create EDAC second memory controller */ + if (dev_descr->dom == IMC1 && devno != 1) { + edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n", + PCI_VENDOR_ID_INTEL, dev_descr->dev_id); + pci_dev_put(pdev); + return 0; + } if (dev_descr->dom == SOCK) goto out_imc; @@ -2491,6 +2498,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: pvt->pci_ta = pdev; + break; case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: pvt->pci_ras = pdev; diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index a7bca4207f44..1101eb3a2242 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -157,4 +157,21 @@ config EXTCON_USBC_CROS_EC Say Y here to enable USB Type C cable detection extcon support when using Chrome OS EC based USB Type-C ports. +config EXTCON_INTEL_USB + bool "Intel USB MUX support" + depends on X86 && USB + help + Intel SoCs and chipsets often have an internal USB mux that is used to + share one USB port between an USB Device Controller and xHCI. The mux + is by default controlled by BIOS/FW, but on some platforms that is not + possible and the OS has to configure the mux with this driver. + + The driver relies on events from some external source. The mux has no + means to detect a change in the cable connection status on its own. + That makes this driver useful only on platforms where a separate + component exists, for example PMIC, that can detect connection status + changes on the USB port behind the mux. + + If unsure, say N. + endif diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index 0888fdeded72..1cb698882fa1 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o +obj-$(CONFIG_EXTCON_INTEL_USB) += extcon-intel-usb.o diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c index 91a0023074af..60baaf693103 100644 --- a/drivers/extcon/extcon-intel-cht-wc.c +++ b/drivers/extcon/extcon-intel-cht-wc.c @@ -66,6 +66,8 @@ #define CHT_WC_VBUS_GPIO_CTLO 0x6e2d #define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0) +#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4) +#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5) enum cht_wc_usb_id { USB_ID_OTG, @@ -183,14 +185,15 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext, { int ret, val; - val = enable ? CHT_WC_VBUS_GPIO_CTLO_OUTPUT : 0; - /* * The 5V boost converter is enabled through a gpio on the PMIC, since * there currently is no gpio driver we access the gpio reg directly. */ - ret = regmap_update_bits(ext->regmap, CHT_WC_VBUS_GPIO_CTLO, - CHT_WC_VBUS_GPIO_CTLO_OUTPUT, val); + val = CHT_WC_VBUS_GPIO_CTLO_DRV_OD | CHT_WC_VBUS_GPIO_CTLO_DIR_OUT; + if (enable) + val |= CHT_WC_VBUS_GPIO_CTLO_OUTPUT; + + ret = regmap_write(ext->regmap, CHT_WC_VBUS_GPIO_CTLO, val); if (ret) dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret); } diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index 1a45e745717d..a6661097b2f9 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c @@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev) return ret; } - /* queue initial processing of id-pin */ + /* process id-pin so that we start with the right status */ queue_delayed_work(system_wq, &data->work, 0); + flush_delayed_work(&data->work); platform_set_drvdata(pdev, data); diff --git a/drivers/extcon/extcon-intel-usb.c b/drivers/extcon/extcon-intel-usb.c new file mode 100644 index 000000000000..26b043db0350 --- /dev/null +++ b/drivers/extcon/extcon-intel-usb.c @@ -0,0 +1,115 @@ +/** + * extcon-intel-usb.c - Driver for Intel USB mux + * + * Copyright (C) 2015 Intel Corporation + * Author: Heikki Krogerus + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include + +#include "extcon.h" + +#define INTEL_MUX_CFG0 0x00 +#define INTEL_MUX_CFG1 0x04 + +#define CFG0_SW_DRD_MODE_MASK 0x3 +#define CFG0_SW_DRD_DYN 0 +#define CFG0_SW_DRD_STATIC_HOST 1 +#define CFG0_SW_DRD_STATIC_DEV 2 +#define CFG0_SW_SYNC_SS_AND_HS BIT(2) +#define CFG0_SW_SWITCH_EN BIT(16) +#define CFG0_SW_IDPIN BIT(20) +#define CFG0_SW_IDPIN_EN BIT(21) +#define CFG0_SW_VBUS_VALID BIT(24) + +#define CFG1_MODE BIT(29) + +struct intel_usb_mux { + struct notifier_block nb; + struct extcon_dev edev; + void __iomem *regs; +}; + +static const int intel_mux_cable[] = { + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +static int intel_usb_mux_notifier(struct notifier_block *nb, + unsigned long old, void *ptr) +{ + struct intel_usb_mux *mux = container_of(nb, struct intel_usb_mux, nb); + u32 val; + + if (mux->edev.state) + val = CFG0_SW_IDPIN_EN | CFG0_SW_DRD_STATIC_HOST | CFG0_SW_SWITCH_EN; + else + val = CFG0_SW_IDPIN_EN | CFG0_SW_IDPIN | CFG0_SW_VBUS_VALID | + CFG0_SW_DRD_STATIC_DEV | CFG0_SW_SWITCH_EN ; + + writel(val, mux->regs); + return NOTIFY_OK; +} + +struct intel_usb_mux *intel_usb_mux_register(struct device *dev, + struct resource *r) +{ + struct intel_usb_mux *mux; + int ret; + u32 val; + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->regs = ioremap_nocache(r->start, resource_size(r)); + if (!mux->regs) { + kfree(mux); + return ERR_PTR(-ENOMEM); + } + + val = CFG0_SW_IDPIN_EN | CFG0_SW_IDPIN | CFG0_SW_VBUS_VALID | + CFG0_SW_DRD_STATIC_DEV | CFG0_SW_SWITCH_EN; + writel(val, mux->regs); + + mux->edev.dev.parent = dev; + mux->edev.supported_cable = intel_mux_cable; + + ret = extcon_dev_register(&mux->edev); + if (ret) + goto err; + + mux->edev.name = "intel_usb_mux"; + mux->edev.state = !!(readl(mux->regs + INTEL_MUX_CFG1) & CFG1_MODE); + + /* An external source needs to tell us what to do */ + mux->nb.notifier_call = intel_usb_mux_notifier; + ret = extcon_register_notifier(&mux->edev, EXTCON_USB_HOST, &mux->nb); + if (ret) { + dev_err(&mux->edev.dev, "failed to register notifier\n"); + extcon_dev_unregister(&mux->edev); + goto err; + } + return mux; +err: + iounmap(mux->regs); + kfree(mux); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(intel_usb_mux_register); + +void intel_usb_mux_unregister(struct intel_usb_mux *mux) +{ + extcon_unregister_notifier(&mux->edev, EXTCON_USB_HOST, &mux->nb); + extcon_dev_unregister(&mux->edev); + iounmap(mux->regs); + kfree(mux); +} +EXPORT_SYMBOL_GPL(intel_usb_mux_unregister); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267dc25..d731b413cb2c 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx) return -ENOMEM; offset = (void *)&desc->buffer - (void *)desc; - desc->buffer_size = PAGE_SIZE - offset; + /* + * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads + * for descriptors, even 0x10-byte ones. This can cause page faults when + * an IOMMU is in use and the oversized read crosses a page boundary. + * Work around this by always leaving at least 0x10 bytes of padding. + */ + desc->buffer_size = PAGE_SIZE - offset - 0x10; desc->buffer_bus = bus_addr + offset; desc->used = 0; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 783041964439..fe0d30340e96 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj); * of and an antecedent to, SMBIOS, which stands for System * Management BIOS. See further: http://www.dmtf.org/standards */ -static const char dmi_empty_string[] = " "; +static const char dmi_empty_string[] = ""; static u32 dmi_ver __initdata; static u32 dmi_len; @@ -44,25 +44,21 @@ static int dmi_memdev_nr; static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s) { const u8 *bp = ((u8 *) dm) + dm->length; + const u8 *nsp; if (s) { - s--; - while (s > 0 && *bp) { + while (--s > 0 && *bp) bp += strlen(bp) + 1; - s--; - } - - if (*bp != 0) { - size_t len = strlen(bp)+1; - size_t cmp_len = len > 8 ? 8 : len; - if (!memcmp(bp, dmi_empty_string, cmp_len)) - return dmi_empty_string; + /* Strings containing only spaces are considered empty */ + nsp = bp; + while (*nsp == ' ') + nsp++; + if (*nsp != '\0') return bp; - } } - return ""; + return dmi_empty_string; } static const char * __init dmi_string(const struct dmi_header *dm, u8 s) @@ -195,7 +191,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, char *s; int is_ff = 1, is_00 = 1, i; - if (dmi_ident[slot] || dm->length <= index + 16) + if (dmi_ident[slot] || dm->length < index + 16) return; d = (u8 *) dm + index; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 2b4c39fdfa91..86210f75d233 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION using the TCG Platform Reset Attack Mitigation specification. This protects against an attacker forcibly rebooting the system while it still contains secrets in RAM, booting another OS and extracting the - secrets. + secrets. This should only be enabled when userland is configured to + clear the MemoryOverwriteRequest flag on clean shutdown after secrets + have been evicted, since otherwise it will trigger even on clean + reboots. endmenu diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 1cc41c3d6315..86a1ad17a32e 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -54,6 +54,9 @@ static struct ptdump_info efi_ptdump_info = { static int __init ptdump_init(void) { + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); } device_initcall(ptdump_init); diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index ec8ac5c4dd84..055e2e8f985a 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -20,10 +20,6 @@ #define NO_FURTHER_WRITE_ACTION -1 -#ifndef phys_to_page -#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT) -#endif - /** * efi_free_all_buff_pages - free all previous allocated buffer pages * @cap_info: pointer to current instance of capsule_info structure @@ -35,7 +31,7 @@ static void efi_free_all_buff_pages(struct capsule_info *cap_info) { while (cap_info->index > 0) - __free_page(phys_to_page(cap_info->pages[--cap_info->index])); + __free_page(cap_info->pages[--cap_info->index]); cap_info->index = NO_FURTHER_WRITE_ACTION; } @@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info) cap_info->pages = temp_page; + temp_page = krealloc(cap_info->phys, + pages_needed * sizeof(phys_addr_t *), + GFP_KERNEL | __GFP_ZERO); + if (!temp_page) + return -ENOMEM; + + cap_info->phys = temp_page; + return 0; } @@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, **/ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) { + bool do_vunmap = false; int ret; - ret = efi_capsule_update(&cap_info->header, cap_info->pages); + /* + * cap_info->capsule may have been assigned already by a quirk + * handler, so only overwrite it if it is NULL + */ + if (!cap_info->capsule) { + cap_info->capsule = vmap(cap_info->pages, cap_info->index, + VM_MAP, PAGE_KERNEL); + if (!cap_info->capsule) + return -ENOMEM; + do_vunmap = true; + } + + ret = efi_capsule_update(cap_info->capsule, cap_info->phys); + if (do_vunmap) + vunmap(cap_info->capsule); if (ret) { pr_err("capsule update failed\n"); return ret; @@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff, goto failed; } - cap_info->pages[cap_info->index++] = page_to_phys(page); + cap_info->pages[cap_info->index] = page; + cap_info->phys[cap_info->index] = page_to_phys(page); cap_info->page_bytes_remain = PAGE_SIZE; + cap_info->index++; } else { - page = phys_to_page(cap_info->pages[cap_info->index - 1]); + page = cap_info->pages[cap_info->index - 1]; } kbuff = kmap(page); @@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file) struct capsule_info *cap_info = file->private_data; kfree(cap_info->pages); + kfree(cap_info->phys); kfree(file->private_data); file->private_data = NULL; return 0; @@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file) return -ENOMEM; } + cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + if (!cap_info->phys) { + kfree(cap_info->pages); + kfree(cap_info); + return -ENOMEM; + } + file->private_data = cap_info; return 0; diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index f70febf680c3..c3eefa126e3b 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -143,8 +143,7 @@ static ssize_t systab_show(struct kobject *kobj, return str - buf; } -static struct kobj_attribute efi_attr_systab = - __ATTR(systab, 0400, systab_show, NULL); +static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); #define EFI_FIELD(var) efi.var diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index bd7ed3c1148a..c47e0c6ec00f 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = { }; /* Generic ESRT Entry ("ESRE") support. */ -static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) +static ssize_t fw_class_show(struct esre_entry *entry, char *buf) { char *str = buf; @@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) return str - buf; } -static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, - esre_fw_class_show, NULL); +static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400); #define esre_attr_decl(name, size, fmt) \ -static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ +static ssize_t name##_show(struct esre_entry *entry, char *buf) \ { \ return sprintf(buf, fmt "\n", \ le##size##_to_cpu(entry->esre.esre1->name)); \ } \ \ -static struct esre_attribute esre_##name = __ATTR(name, 0400, \ - esre_##name##_show, NULL) +static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400) esre_attr_decl(fw_type, 32, "%u"); esre_attr_decl(fw_version, 32, "%u"); @@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) /* support for displaying ESRT fields at the top level */ #define esrt_attr_decl(name, size, fmt) \ -static ssize_t esrt_##name##_show(struct kobject *kobj, \ +static ssize_t name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ { \ return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ } \ \ -static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ - esrt_##name##_show, NULL) +static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400) esrt_attr_decl(fw_resource_count, 32, "%u"); esrt_attr_decl(fw_resource_count_max, 32, "%u"); @@ -431,7 +428,7 @@ static int __init esrt_sysfs_init(void) err_remove_esrt: kobject_put(esrt_kobj); err: - kfree(esrt); + memunmap(esrt); esrt = NULL; return error; } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index adaa4a964f0c..69b3fbfb7f97 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -20,7 +20,8 @@ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ - $(call cc-option,-fno-stack-protector) + $(call cc-option,-fno-stack-protector) \ + $(DISABLE_LTO) GCOV_PROFILE := n KASAN_SANITIZE := n diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index b9bd827caa22..1b4d465cc5d9 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -97,6 +97,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ? (phys_seed >> 32) & mask : TEXT_OFFSET; + /* + * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not + * be a multiple of EFI_KIMG_ALIGN, and we must ensure that + * we preserve the misalignment of 'offset' relative to + * EFI_KIMG_ALIGN so that statically allocated objects whose + * alignment exceeds PAGE_SIZE appear correctly aligned in + * memory. + */ + offset |= TEXT_OFFSET % EFI_KIMG_ALIGN; + /* * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 8e64b77aeac9..f377609ff141 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, return map_attr->show(entry, buf); } -static struct map_attribute map_type_attr = __ATTR_RO(type); -static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr); -static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr); -static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages); -static struct map_attribute map_attribute_attr = __ATTR_RO(attribute); +static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400); +static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400); +static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400); +static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400); +static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400); /* * These are default attributes that are added for every memmap entry. diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index 35e553b3b190..e4b40f2b4627 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev) if (ret) return ret; - return vpd_sections_init(entry.cbmem_addr); + vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); + if (!vpd_kobj) + return -ENOMEM; + + ret = vpd_sections_init(entry.cbmem_addr); + if (ret) { + kobject_put(vpd_kobj); + return ret; + } + + return 0; +} + +static int vpd_remove(struct platform_device *pdev) +{ + vpd_section_destroy(&ro_vpd); + vpd_section_destroy(&rw_vpd); + + kobject_put(vpd_kobj); + + return 0; } static struct platform_driver vpd_driver = { .probe = vpd_probe, + .remove = vpd_remove, .driver = { .name = "vpd", }, }; +static struct platform_device *vpd_pdev; + static int __init vpd_platform_init(void) { - struct platform_device *pdev; - - pdev = platform_device_register_simple("vpd", -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + int ret; - vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); - if (!vpd_kobj) - return -ENOMEM; + ret = platform_driver_register(&vpd_driver); + if (ret) + return ret; - platform_driver_register(&vpd_driver); + vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0); + if (IS_ERR(vpd_pdev)) { + platform_driver_unregister(&vpd_driver); + return PTR_ERR(vpd_pdev); + } return 0; } static void __exit vpd_platform_exit(void) { - vpd_section_destroy(&ro_vpd); - vpd_section_destroy(&rw_vpd); - kobject_put(vpd_kobj); + platform_device_unregister(vpd_pdev); + platform_driver_unregister(&vpd_driver); } module_init(vpd_platform_init); diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index d687ca3d5049..c80ec1d03274 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu) return cpu == resident_cpu; } -struct psci_operations psci_ops; +struct psci_operations psci_ops = { + .conduit = PSCI_CONDUIT_NONE, + .smccc_version = SMCCC_VERSION_1_0, +}; typedef unsigned long (psci_fn)(unsigned long, unsigned long, unsigned long, unsigned long); @@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void) 0, 0, 0); } +static void set_conduit(enum psci_conduit conduit) +{ + switch (conduit) { + case PSCI_CONDUIT_HVC: + invoke_psci_fn = __invoke_psci_fn_hvc; + break; + case PSCI_CONDUIT_SMC: + invoke_psci_fn = __invoke_psci_fn_smc; + break; + default: + WARN(1, "Unexpected PSCI conduit %d\n", conduit); + } + + psci_ops.conduit = conduit; +} + static int get_set_conduit_method(struct device_node *np) { const char *method; @@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np) } if (!strcmp("hvc", method)) { - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); } else if (!strcmp("smc", method)) { - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); } else { pr_warn("invalid \"method\" property: %s\n", method); return -EINVAL; @@ -493,9 +512,36 @@ static void __init psci_init_migrate(void) pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); } +static void __init psci_init_smccc(void) +{ + u32 ver = ARM_SMCCC_VERSION_1_0; + int feature; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + + if (feature != PSCI_RET_NOT_SUPPORTED) { + u32 ret; + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + if (ret == ARM_SMCCC_VERSION_1_1) { + psci_ops.smccc_version = SMCCC_VERSION_1_1; + ver = ret; + } + } + + /* + * Conveniently, the SMCCC and PSCI versions are encoded the + * same way. No, this isn't accidental. + */ + pr_info("SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + +} + static void __init psci_0_2_set_functions(void) { pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_ops.get_version = psci_get_version; + psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_FN_NATIVE(0_2, CPU_SUSPEND); psci_ops.cpu_suspend = psci_cpu_suspend; @@ -539,6 +585,7 @@ static int __init psci_probe(void) psci_init_migrate(); if (PSCI_VERSION_MAJOR(ver) >= 1) { + psci_init_smccc(); psci_init_cpu_suspend(); psci_init_system_suspend(); } @@ -652,9 +699,9 @@ int __init psci_acpi_init(void) pr_info("probing for conduit method from ACPI.\n"); if (acpi_psci_use_hvc()) - invoke_psci_fn = __invoke_psci_fn_hvc; + set_conduit(PSCI_CONDUIT_HVC); else - invoke_psci_fn = __invoke_psci_fn_smc; + set_conduit(PSCI_CONDUIT_SMC); return psci_probe(); } diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 0e2011636fbb..c53c7ac992f8 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -582,9 +582,10 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev) { pr_debug("fw_cfg: unloading.\n"); fw_cfg_sysfs_cache_cleanup(); + sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr); + fw_cfg_io_cleanup(); fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset); fw_cfg_kobj_cleanup(fw_cfg_sel_ko); - fw_cfg_io_cleanup(); return 0; } diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 14f14efdf0d5..06d212a3d49d 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi) conf->data = of_id->data; conf->spi = spi; - conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH); + conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); if (IS_ERR(conf->config)) { dev_err(&spi->dev, "Failed to get config gpio: %ld\n", PTR_ERR(conf->config)); diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index d9ab7c75b14f..e0c73ceba2ed 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -147,6 +147,7 @@ static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region) mgr_node = of_parse_phandle(np, "fpga-mgr", 0); if (mgr_node) { mgr = of_fpga_mgr_get(mgr_node); + of_node_put(mgr_node); of_node_put(np); return mgr; } @@ -192,10 +193,13 @@ static int fpga_region_get_bridges(struct fpga_region *region, parent_br = region_np->parent; /* If overlay has a list of bridges, use it. */ - if (of_parse_phandle(overlay, "fpga-bridges", 0)) + br = of_parse_phandle(overlay, "fpga-bridges", 0); + if (br) { + of_node_put(br); np = overlay; - else + } else { np = region_np; + } for (i = 0; ; i++) { br = of_parse_phandle(np, "fpga-bridges", i); @@ -203,12 +207,15 @@ static int fpga_region_get_bridges(struct fpga_region *region, break; /* If parent bridge is in list, skip it. */ - if (br == parent_br) + if (br == parent_br) { + of_node_put(br); continue; + } /* If node is a bridge, get it and add to list */ ret = fpga_bridge_get_to_list(br, region->info, ®ion->bridge_list); + of_node_put(br); /* If any of the bridges are in use, give up */ if (ret == -EBUSY) { diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c index 6b535ec858cc..15a1f4b348c4 100644 --- a/drivers/gpio/gpio-74x164.c +++ b/drivers/gpio/gpio-74x164.c @@ -23,6 +23,7 @@ struct gen_74x164_chip { struct gpio_chip gpio_chip; struct mutex lock; + struct gpio_desc *gpiod_oe; u32 registers; /* * Since the registers are chained, every byte sent will make @@ -31,8 +32,7 @@ struct gen_74x164_chip { * register at the end of the transfer. So, to have a logical * numbering, store the bytes in reverse order. */ - u8 buffer[0]; - struct gpio_desc *gpiod_oe; + u8 buffer[]; }; static int __gen_74x164_write_config(struct gen_74x164_chip *chip) diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index bfc53995064a..f03fe916eb9d 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -375,7 +375,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) if (set) reg |= bit; else - reg &= bit; + reg &= ~bit; iowrite32(reg, addr); spin_unlock_irqrestore(&gpio->lock, flags); diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index f33d4a5fe671..af0baf8da295 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c @@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = { }; module_platform_driver(ath79_gpio_driver); + +MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index f75d8443ecaf..e4b3d7db68c9 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -383,7 +383,7 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) u32 mask; d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data); - g = (struct davinci_gpio_regs __iomem *)d->regs; + g = (struct davinci_gpio_regs __iomem *)d->regs[0]; mask = __gpio_mask(data->irq - d->base_irq); if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c index 98c7ff2a76e7..8d62db447ec1 100644 --- a/drivers/gpio/gpio-iop.c +++ b/drivers/gpio/gpio-iop.c @@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void) return platform_driver_register(&iop3xx_gpio_driver); } arch_initcall(iop3xx_gpio_init); + +MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors"); +MODULE_AUTHOR("Lennert Buytenhek "); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 16cbc5702865..491b0974c0fe 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) }; int i, j; + /* + * STMPE1600: to be able to get IRQ from pins, + * a read must be done on GPMR register, or a write in + * GPSR or GPCR registers + */ + if (stmpe->partnum == STMPE1600) { + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]); + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]); + } + for (i = 0; i < CACHE_NR_REGS; i++) { /* STMPE801 and STMPE1600 don't have RE and FE registers */ if ((stmpe->partnum == STMPE801 || @@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc); - struct stmpe *stmpe = stmpe_gpio->stmpe; int offset = d->hwirq; int regoffset = offset / 8; int mask = BIT(offset % 8); stmpe_gpio->regs[REG_IE][regoffset] |= mask; - - /* - * STMPE1600 workaround: to be able to get IRQ from pins, - * a read must be done on GPMR register, or a write in - * GPSR or GPCR registers - */ - if (stmpe->partnum == STMPE1600) - stmpe_reg_read(stmpe, - stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]); } static void stmpe_dbg_show_one(struct seq_file *s, diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 57efb251f9c4..10523ce00c38 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -566,8 +566,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain, 0, 0, of_node_to_fwnode(dev->of_node), &thunderx_gpio_irqd_ops, txgpio); - if (!txgpio->irqd) + if (!txgpio->irqd) { + err = -ENOMEM; goto out; + } /* Push on irq_data and the domain for each line. */ for (i = 0; i < ngpio; i++) { diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index eb4528c87c0b..d6f3d9ee1350 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) } if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent)); acpi_gpiochip_request_regions(acpi_gpio); acpi_gpiochip_scan_gpios(acpi_gpio); diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c index 27f383bda7d9..f748aa3e77f7 100644 --- a/drivers/gpio/gpiolib-devprop.c +++ b/drivers/gpio/gpiolib-devprop.c @@ -19,30 +19,27 @@ /** * devprop_gpiochip_set_names - Set GPIO line names using device properties * @chip: GPIO chip whose lines should be named, if possible + * @fwnode: Property Node containing the gpio-line-names property * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned * names belong to the underlying firmware node and should not be released * by the caller. */ -void devprop_gpiochip_set_names(struct gpio_chip *chip) +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode) { struct gpio_device *gdev = chip->gpiodev; const char **names; int ret, i; - if (!chip->parent) { - dev_warn(&gdev->dev, "GPIO chip parent is NULL\n"); - return; - } - - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", NULL, 0); if (ret < 0) return; if (ret != gdev->ngpio) { - dev_warn(chip->parent, + dev_warn(&gdev->dev, "names %d do not match number of GPIOs %d\n", ret, gdev->ngpio); return; @@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return; - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", names, gdev->ngpio); if (ret < 0) { - dev_warn(chip->parent, "failed to read GPIO line names\n"); + dev_warn(&gdev->dev, "failed to read GPIO line names\n"); kfree(names); return; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index bfcd20699ec8..ba38f530e403 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip) /* If the chip defines names itself, these take precedence */ if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, + of_fwnode_handle(chip->of_node)); of_node_get(chip->of_node); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index eb80dac4e26a..7e0bfd7347f6 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -443,7 +443,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) struct gpiohandle_request handlereq; struct linehandle_state *lh; struct file *file; - int fd, i, ret; + int fd, i, count = 0, ret; if (copy_from_user(&handlereq, ip, sizeof(handlereq))) return -EFAULT; @@ -489,6 +489,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) if (ret) goto out_free_descs; lh->descs[i] = desc; + count = i; if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); @@ -555,7 +556,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip) out_put_unused_fd: put_unused_fd(fd); out_free_descs: - for (; i >= 0; i--) + for (i = 0; i < count; i++) gpiod_free(lh->descs[i]); kfree(lh->label); out_free_lh: @@ -723,6 +724,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) struct gpioevent_data ge; int ret, level; + /* Do not leak kernel stack to userspace */ + memset(&ge, 0, sizeof(ge)); + ge.timestamp = ktime_get_real_ns(); level = gpiod_get_value_cansleep(le->desc); @@ -809,7 +813,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) desc = &gdev->descs[offset]; ret = gpiod_request(desc, le->label); if (ret) - goto out_free_desc; + goto out_free_label; le->desc = desc; le->eflags = eflags; @@ -3309,6 +3313,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, struct gpio_desc *desc = NULL; int status; enum gpio_lookup_flags lookupflags = 0; + /* Maybe we have a device name, maybe not */ + const char *devname = dev ? dev_name(dev) : "?"; dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); @@ -3337,7 +3343,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - status = gpiod_request(desc, con_id); + /* + * If a connection label was passed use that, else attempt to use + * the device name as label + */ + status = gpiod_request(desc, con_id ? con_id : devname); if (status < 0) return ERR_PTR(status); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index d003ccb12781..3d4d0634c9dd 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -224,7 +224,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc) return desc - &desc->gdev->descs[0]; } -void devprop_gpiochip_set_names(struct gpio_chip *chip); +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode); /* With descriptor prefix */ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 83cb2a88c204..b1d12a0c77a2 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -110,7 +110,7 @@ config DRM_FBDEV_OVERALLOC config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" - depends on DRM_KMS_HELPER + depends on DRM help Say Y here, if you want to use EDID data to be loaded from the /lib/firmware directory or one of the provided built-in diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 8ce07039bb89..7e1442fa1755 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -29,6 +29,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_AGP) += drm_agpsupport.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o +drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ @@ -37,7 +38,6 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_scdc_helper.o drm_gem_framebuffer_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o -drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 103635ab784c..712ad8c2bdc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -697,7 +697,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr); int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr, - int hw_ip, int instance, int ring, + u32 hw_ip, u32 instance, u32 ring, struct amdgpu_ring **out_ring); /* @@ -1536,18 +1536,14 @@ struct amdgpu_device { /* sdma */ struct amdgpu_sdma sdma; - union { - struct { - /* uvd */ - struct amdgpu_uvd uvd; + /* uvd */ + struct amdgpu_uvd uvd; - /* vce */ - struct amdgpu_vce vce; - }; + /* vce */ + struct amdgpu_vce vce; - /* vcn */ - struct amdgpu_vcn vcn; - }; + /* vcn */ + struct amdgpu_vcn vcn; /* firmwares */ struct amdgpu_firmware firmware; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 57afad79f55d..8fa850a070e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, size_t size; u32 retry = 3; + if (amdgpu_acpi_pcie_notify_device_ready(adev)) + return -EINVAL; + /* Get the device handle */ handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5432af39a674..f7fa7675215c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -265,6 +265,9 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - /* The sclk is in quantas of 10kHz */ - return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100; + /* the sclk is in quantas of 10kHz */ + if (amdgpu_sriov_vf(adev)) + return adev->clock.default_sclk / 100; + + return amdgpu_dpm_get_sclk(adev, false) / 100; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 8d689ab7e429..1ef486b5d54b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -26,6 +26,7 @@ #define AMDGPU_AMDKFD_H_INCLUDED #include +#include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b9dbbf9cb8b0..bdabaa3399db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -369,29 +369,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; + unsigned long end_jiffies; uint32_t sdma_base_addr; + uint32_t data; m = get_sdma_mqd(mqd); sdma_base_addr = get_sdma_base_addr(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, - m->sdma_rlc_virtual_addr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, - m->sdma_rlc_rb_base); + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + if (m->sdma_engine_id) { + data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); + data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); + } else { + data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); + } + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, + m->sdma_rlc_doorbell); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + m->sdma_rlc_virtual_addr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, m->sdma_rlc_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdma_rlc_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdma_rlc_rb_rptr_addr_hi); - - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, - m->sdma_rlc_doorbell); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, m->sdma_rlc_rb_cntl); @@ -564,9 +585,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, } WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index ce443586a0c7..cc4e18dcd8b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) return true; } -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index c13c51af0b68..1ae5ae8c45a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -14,6 +14,16 @@ #include "amd_acpi.h" +#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0) + +struct amdgpu_px_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 px_quirk_flags; +}; + struct amdgpu_atpx_functions { bool px_params; bool power_cntl; @@ -35,6 +45,7 @@ struct amdgpu_atpx { static struct amdgpu_atpx_priv { bool atpx_detected; bool bridge_pm_usable; + unsigned int quirks; /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle other_handle; @@ -205,13 +216,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { - printk("ATPX Hybrid Graphics\n"); - /* - * Disable legacy PM methods only when pcie port PM is usable, - * otherwise the device might fail to power off or power on. - */ - atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; - atpx->is_hybrid = true; + if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) { + printk("ATPX Hybrid Graphics, forcing to ATPX\n"); + atpx->functions.power_cntl = true; + atpx->is_hybrid = false; + } else { + printk("ATPX Hybrid Graphics\n"); + /* + * Disable legacy PM methods only when pcie port PM is usable, + * otherwise the device might fail to power off or power on. + */ + atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; + atpx->is_hybrid = true; + } } atpx->dgpu_req_power_for_displays = false; @@ -547,6 +564,32 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .get_client_id = amdgpu_atpx_get_client_id, }; +static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { + /* HG _PR3 doesn't seem to work on this A+A weston board */ + { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0, 0, 0, 0, 0 }, +}; + +static void amdgpu_atpx_get_quirks(struct pci_dev *pdev) +{ + const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list; + + /* Apply PX quirks */ + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device) { + amdgpu_atpx_priv.quirks |= p->px_quirk_flags; + break; + } + ++p; + } +} + /** * amdgpu_atpx_detect - detect whether we have PX * @@ -570,6 +613,7 @@ static bool amdgpu_atpx_detect(void) parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; + amdgpu_atpx_get_quirks(pdev); } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { @@ -579,6 +623,7 @@ static bool amdgpu_atpx_detect(void) parent_pdev = pci_upstream_bridge(pdev); d3_supported |= parent_pdev && parent_pdev->bridge_d3; + amdgpu_atpx_get_quirks(pdev); } if (has_atpx && vga_count == 2) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index c21adf60a7f2..057e1ecd83ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } - tmp = bios[0x18] | (bios[0x19] << 8); - if (bios[tmp + 0x14] != 0x0) { - DRM_INFO("Not an x86 BIOS ROM\n"); - return false; - } - bios_header_start = bios[0x48] | (bios[0x49] << 8); if (!bios_header_start) { DRM_INFO("Can't locate bios header\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 59089e027f4d..92be7f6de197 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, for (i = 0; i < list->num_entries; i++) { unsigned priority = list->array[i].priority; - list_add_tail(&list->array[i].tv.head, - &bucket[priority]); + if (!list->array[i].robj->parent) + list_add_tail(&list->array[i].tv.head, + &bucket[priority]); + list->array[i].user_pages = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 8d1cf2d3e663..1eff36a87595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && + amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { + /* Don't start link training before we have the DPCD */ + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ amdgpu_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -871,9 +870,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = amdgpu_connector_best_single_encoder(connector); if (!encoder) @@ -927,8 +928,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -991,9 +994,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1118,8 +1123,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1362,9 +1369,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1432,8 +1441,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 60d8bedb694d..5f892ad6476e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -403,6 +403,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, if (candidate->robj == validated) break; + /* We can't move pinned BOs here */ + if (bo->pin_count) + continue; + other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); /* Check if this BO is in one of the domains we need space for */ @@ -518,7 +522,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - if (p->uf_entry.robj) + if (p->uf_entry.robj && !p->uf_entry.robj->parent) list_add(&p->uf_entry.tv.head, &p->validated); if (need_mmap_lock) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e630d918fefc..bc746a6e0ecc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2076,8 +2076,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, * ignore it */ vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); - if (amdgpu_runtime_pm == 1) - runtime = true; if (amdgpu_device_is_px(ddev)) runtime = true; if (!pci_is_thunderbolt_attached(adev->pdev)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..837332e84d78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); if (robj) { - if (robj->gem_base.import_attach) - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); amdgpu_mn_unregister(robj); amdgpu_bo_unref(&robj); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 659997bfff30..cd84bd0b1eaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -322,14 +322,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) { unsigned i; int r, ret = 0; + long tmo_gfx, tmo_mm; + + tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; + if (amdgpu_sriov_vf(adev)) { + /* for MM engines in hypervisor side they are not scheduled together + * with CP and SDMA engines, so even in exclusive mode MM engine could + * still running on other VF thus the IB TEST TIMEOUT for MM engines + * under SR-IOV should be set to a long time. 8 sec should be enough + * for the MM comes back to this VF. + */ + tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; + } + + if (amdgpu_sriov_runtime(adev)) { + /* for CP & SDMA engines since they are scheduled together so + * need to make the timeout width enough to cover the time + * cost waiting for it coming back under RUNTIME only + */ + tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + long tmo; if (!ring || !ring->ready) continue; - r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); + /* MM engine need more time */ + if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || + ring->funcs->type == AMDGPU_RING_TYPE_VCE || + ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + tmo = tmo_mm; + else + tmo = tmo_gfx; + + r = amdgpu_ring_test_ib(ring, tmo); if (r) { ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e495da0bb03..f08624f2f209 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -46,6 +46,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_kunmap(bo); + if (bo->gem_base.import_attach) + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); if (!list_empty(&bo->shadow_list)) { @@ -391,6 +393,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); + if (unlikely(r != 0)) + return r; + bytes_moved = atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved; if (adev->mc.visible_vram_size < adev->mc.real_vram_size && @@ -400,9 +405,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, else amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); - if (unlikely(r != 0)) - return r; - if (kernel) bo->tbo.priority = 1; @@ -681,8 +683,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) - return -EINVAL; + if (bo->prime_shared_count) { + if (domain & AMDGPU_GEM_DOMAIN_GTT) + domain = AMDGPU_GEM_DOMAIN_GTT; + else + return -EINVAL; + } if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; @@ -745,8 +751,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } if (domain == AMDGPU_GEM_DOMAIN_VRAM) { adev->vram_pin_size += amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - adev->invisible_pin_size += amdgpu_bo_size(bo); + adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { adev->gart_pin_size += amdgpu_bo_size(bo); } @@ -784,8 +789,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { adev->vram_pin_size -= amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - adev->invisible_pin_size -= amdgpu_bo_size(bo); + adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { adev->gart_pin_size -= amdgpu_bo_size(bo); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index befc09b68543..b293380bd46c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper, static int amdgpu_identity_map(struct amdgpu_device *adev, struct amdgpu_queue_mapper *mapper, - int ring, + u32 ring, struct amdgpu_ring **out_ring) { switch (mapper->hw_ip) { @@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip) static int amdgpu_lru_map(struct amdgpu_device *adev, struct amdgpu_queue_mapper *mapper, - int user_ring, + u32 user_ring, struct amdgpu_ring **out_ring) { int r, i, j; @@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, */ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_queue_mgr *mgr, - int hw_ip, int instance, int ring, + u32 hw_ip, u32 instance, u32 ring, struct amdgpu_ring **out_ring) { int r, ip_num_rings; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5ce65280b396..90adff83e489 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) if (ring->funcs->end_use) ring->funcs->end_use(ring); - amdgpu_ring_lru_touch(ring->adev, ring); + if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) + amdgpu_ring_lru_touch(ring->adev, ring); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 43093bffa2cf..557829a84778 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -64,6 +64,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e19928dae8e3..17deca0f6255 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -293,12 +293,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (adev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - break; + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; - if (i == AMDGPU_MAX_UVD_HANDLES) - return 0; + if (i == adev->uvd.max_handles) + return 0; + } cancel_delayed_work_sync(&adev->uvd.idle_work); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index c855366521ab..9fc3d387eae3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -647,7 +647,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r, idx = 0; + int i, r = 0, idx = 0; p->job->vm = NULL; ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 041e0121590c..308a9755eae3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -85,6 +85,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bd20ff018512..863c6dd0123a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1201,7 +1201,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent) int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - int r; + int r = 0; r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); if (r) @@ -2586,7 +2586,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; - int i; + struct amdgpu_bo *root; + int i, r; amd_sched_entity_fini(vm->entity.sched, &vm->entity); @@ -2609,7 +2610,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - amdgpu_vm_free_levels(&vm->root); + root = amdgpu_bo_ref(vm->root.bo); + r = amdgpu_bo_reserve(root, true); + if (r) { + dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); + } else { + amdgpu_vm_free_levels(&vm->root); + amdgpu_bo_unreserve(root); + } + amdgpu_bo_unref(&root); dma_fence_put(vm->last_dir_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vm_free_reserved_vmid(adev, vm, i); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 26e900627971..86d8a961518e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -101,6 +101,22 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, adev->mc.visible_vram_size : end) - start; } +/** + * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size + * + * @bo: &amdgpu_bo buffer object (must be in VRAM) + * + * Returns: + * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. + */ +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) +{ + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + return amdgpu_bo_size(bo); + + return 0; +} + /** * amdgpu_vram_mgr_new - allocate new ranges * @@ -140,7 +156,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); } - nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); + nodes = kvmalloc_array(num_nodes, sizeof(*nodes), + GFP_KERNEL | __GFP_ZERO); if (!nodes) return -ENOMEM; @@ -195,7 +212,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, drm_mm_remove_node(&nodes[i]); spin_unlock(&mgr->lock); - kfree(nodes); + kvfree(nodes); return r == -ENOSPC ? 0 : r; } @@ -234,7 +251,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, atomic64_sub(usage, &mgr->usage); atomic64_sub(vis_usage, &mgr->vis_usage); - kfree(mem->mm_node); + kvfree(mem->mm_node); mem->mm_node = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index f508f4d01e4a..11beef7c595f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -866,7 +866,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */ } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index b9ee9073cb0d..f3f93b6b51ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -437,6 +437,8 @@ static int dce_virtual_sw_fini(void *handle) drm_kms_helper_poll_fini(adev->ddev); drm_mode_config_cleanup(adev->ddev); + /* clear crtcs pointer to avoid dce irq finish routine access freed data */ + memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS); adev->mode_info.mode_config_initialized = false; return 0; } @@ -723,7 +725,7 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad int crtc, enum amdgpu_interrupt_state state) { - if (crtc >= adev->mode_info.num_crtc) { + if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) { DRM_DEBUG("invalid crtc %d\n", crtc); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 00868764a0dd..6f76b2646465 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4387,34 +4387,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) case CHIP_KAVERI: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fc260c13b1da..a7e54820a330 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1398,10 +1398,11 @@ static const u32 sgpr_init_compute_shader[] = static const u32 vgpr_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, - mmCOMPUTE_RESOURCE_LIMITS, 0, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*4, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1418,10 +1419,11 @@ static const u32 vgpr_init_regs[] = static const u32 sgpr1_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, - mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1442,6 +1444,7 @@ static const u32 sgpr2_init_regs[] = mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69182eeca264..1a30c54a0889 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2889,7 +2889,13 @@ static int gfx_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (amdgpu_sriov_vf(adev)) { - pr_debug("For SRIOV client, shouldn't do anything.\n"); + gfx_v9_0_cp_gfx_enable(adev, false); + /* must disable polling for SRIOV when hw finished, otherwise + * CPC engine may still keep fetching WB address which is already + * invalid after sw finished and trigger DMAR reading error in + * hypervisor side. + */ + WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); return 0; } gfx_v9_0_cp_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d04d0b123212..a1d71429fb72 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -395,7 +395,16 @@ static int gmc_v9_0_early_init(void *handle) static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; + /* + * The latest engine allocation on gfx9 is: + * Engine 0, 1: idle + * Engine 2, 3: firmware + * Engine 4~13: amdgpu ring, subject to change when ring number changes + * Engine 14~15: idle + * Engine 16: kfd tlb invalidation + * Engine 17: Gart flushes + */ + unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; unsigned i; for(i = 0; i < adev->num_rings; ++i) { @@ -408,9 +417,9 @@ static int gmc_v9_0_late_init(void *handle) ring->funcs->vmhub); } - /* Engine 17 is used for GART flushes */ + /* Engine 16 is used for KFD and 17 for GART flushes */ for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + BUG_ON(vm_inv_eng[i] > 16); return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); } @@ -447,7 +456,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); if (!adev->mc.vram_width) { /* hbm memory channel size */ - chansize = 128; + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 2812d88a8bdd..9b7b01333fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -276,9 +276,17 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, /* see what event we get */ r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); - /* only handle FLR_NOTIFY now */ - if (!r) - schedule_work(&adev->virt.flr_work); + /* sometimes the interrupt is delayed to inject to VM, so under such case + * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus + * above recieve message could be failed, we should schedule the flr_work + * anyway + */ + if (r) { + DRM_ERROR("FLR_NOTIFICATION is missed\n"); + xgpu_ai_mailbox_send_ack(adev); + } + + schedule_work(&adev->virt.flr_work); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f2d0710258cb..9928473234a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -856,7 +856,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index b1de44f22824..f5db1fad3f05 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -1099,7 +1099,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index fd7c72aaafa6..4e5fed7c66bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1136,7 +1136,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xfffffff); /* mask */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 8284d5dbfc30..40520a968eac 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -31,6 +31,7 @@ #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" +#include "amd_pcie.h" #include "amdgpu_powerplay.h" #include "sid.h" #include "si_ih.h" @@ -1230,6 +1231,71 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } +static int si_get_pcie_lanes(struct amdgpu_device *adev) +{ + u32 link_width_cntl; + + if (adev->flags & AMD_IS_APU) + return 0; + + link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + + switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) { + case LC_LINK_WIDTH_X1: + return 1; + case LC_LINK_WIDTH_X2: + return 2; + case LC_LINK_WIDTH_X4: + return 4; + case LC_LINK_WIDTH_X8: + return 8; + case LC_LINK_WIDTH_X0: + case LC_LINK_WIDTH_X16: + default: + return 16; + } +} + +static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes) +{ + u32 link_width_cntl, mask; + + if (adev->flags & AMD_IS_APU) + return; + + switch (lanes) { + case 0: + mask = LC_LINK_WIDTH_X0; + break; + case 1: + mask = LC_LINK_WIDTH_X1; + break; + case 2: + mask = LC_LINK_WIDTH_X2; + break; + case 4: + mask = LC_LINK_WIDTH_X4; + break; + case 8: + mask = LC_LINK_WIDTH_X8; + break; + case 16: + mask = LC_LINK_WIDTH_X16; + break; + default: + DRM_ERROR("invalid pcie lane request: %d\n", lanes); + return; + } + + link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl &= ~LC_LINK_WIDTH_MASK; + link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT; + link_width_cntl |= (LC_RECONFIG_NOW | + LC_RECONFIG_ARC_MISSING_ESCAPE); + + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); +} + static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, @@ -1240,6 +1306,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .get_xclk = &si_get_xclk, .set_uvd_clocks = &si_set_uvd_clocks, .set_vce_clocks = NULL, + .get_pcie_lanes = &si_get_pcie_lanes, + .set_pcie_lanes = &si_set_pcie_lanes, .get_config_memsize = &si_get_config_memsize, }; @@ -1461,8 +1529,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) { struct pci_dev *root = adev->pdev->bus->self; int bridge_pos, gpu_pos; - u32 speed_cntl, mask, current_data_rate; - int ret, i; + u32 speed_cntl, current_data_rate; + int i; u16 tmp16; if (pci_is_root_bus(adev->pdev->bus)) @@ -1474,23 +1542,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret != 0) - return; - - if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) return; speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> LC_CURRENT_DATA_RATE_SHIFT; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate == 2) { DRM_INFO("PCIE gen 3 link speeds already enabled\n"); return; } DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); - } else if (mask & DRM_PCIE_SPEED_50) { + } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { if (current_data_rate == 1) { DRM_INFO("PCIE gen 2 link speeds already enabled\n"); return; @@ -1506,7 +1571,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) if (!gpu_pos) return; - if (mask & DRM_PCIE_SPEED_80) { + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { if (current_data_rate != 2) { u16 bridge_cfg, gpu_cfg; u16 bridge_cfg2, gpu_cfg2; @@ -1589,9 +1654,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev) pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); tmp16 &= ~0xf; - if (mask & DRM_PCIE_SPEED_80) + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) tmp16 |= 3; - else if (mask & DRM_PCIE_SPEED_50) + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) tmp16 |= 2; else tmp16 |= 1; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index d63873f3f574..6f1dea157a77 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -26,6 +26,7 @@ #include "amdgpu_pm.h" #include "amdgpu_dpm.h" #include "amdgpu_atombios.h" +#include "amd_pcie.h" #include "sid.h" #include "r600_dpm.h" #include "si_dpm.h" @@ -3332,29 +3333,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, } } -static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen) -{ - switch (asic_gen) { - case AMDGPU_PCIE_GEN1: - return AMDGPU_PCIE_GEN1; - case AMDGPU_PCIE_GEN2: - return AMDGPU_PCIE_GEN2; - case AMDGPU_PCIE_GEN3: - return AMDGPU_PCIE_GEN3; - default: - if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) - return AMDGPU_PCIE_GEN3; - else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) - return AMDGPU_PCIE_GEN2; - else - return AMDGPU_PCIE_GEN1; - } - return AMDGPU_PCIE_GEN1; -} - static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u) { @@ -3465,6 +3443,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6667)) { max_sclk = 75000; } + if ((adev->pdev->revision == 0xC3) || + (adev->pdev->device == 0x6665)) { + max_sclk = 60000; + max_mclk = 80000; + } } else if (adev->asic_type == CHIP_OLAND) { if ((adev->pdev->revision == 0xC7) || (adev->pdev->revision == 0x80) || @@ -5023,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, table->ACPIState.levels[0].vddc.index, &table->ACPIState.levels[0].std_vddc); } - table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - AMDGPU_PCIE_GEN1); + table->ACPIState.levels[0].gen2PCIE = + (u8)amdgpu_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + AMDGPU_PCIE_GEN1); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, @@ -6390,9 +6374,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, { u32 lane_width; u32 new_lane_width = - (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; + ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; u32 current_lane_width = - (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; + ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; if (new_lane_width != current_lane_width) { amdgpu_set_pcie_lanes(adev, new_lane_width); @@ -7157,10 +7141,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl->flags = le32_to_cpu(clock_info->si.ulFlags); - pl->pcie_gen = r600_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - clock_info->si.ucPCIEGen); + pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + clock_info->si.ucPCIEGen); /* patch up vddc if necessary */ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, @@ -7315,7 +7299,6 @@ static int si_dpm_init(struct amdgpu_device *adev) struct si_power_info *si_pi; struct atom_clock_dividers dividers; int ret; - u32 mask; si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); if (si_pi == NULL) @@ -7325,11 +7308,9 @@ static int si_dpm_init(struct amdgpu_device *adev) eg_pi = &ni_pi->eg; pi = &eg_pi->rv7xx; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret) - si_pi->sys_pcie_mask = 0; - else - si_pi->sys_pcie_mask = mask; + si_pi->sys_pcie_mask = + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f2c3a49f73a0..ff7d4827385e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev) } static u32 soc15_get_xclk(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_VEGA10) - return adev->clock.spll.reference_freq/4; - else - return adev->clock.spll.reference_freq; + return adev->clock.spll.reference_freq; } @@ -664,8 +661,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB; + adev->pg_flags = AMD_PG_SUPPORT_SDMA; + adev->external_rev_id = 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index cf81065e3c5a..5183b46563f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; vce_v3_0_override_vce_clock_gating(adev, true); - if (!(adev->flags & AMD_IS_APU)) - amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); for (i = 0; i < adev->vce.num_rings; i++) adev->vce.ring[i].ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 21e7b88401e1..a098712bdd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 9ff69b90df36..0327e0a6802b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -448,14 +448,19 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { - uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); - /* bit0: 0 means pf and 1 means vf */ - /* bit31: 0 means disable IOV and 1 means enable */ - if (reg & 1) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; - - if (reg & 0x80000000) - adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + uint32_t reg = 0; + + if (adev->asic_type == CHIP_TONGA || + adev->asic_type == CHIP_FIJI) { + reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + /* bit0: 0 means pf and 1 means vf */ + /* bit31: 0 means disable IOV and 1 means enable */ + if (reg & 1) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + } if (reg == 0) { if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ @@ -724,33 +729,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, return r; tmp = RREG32_SMC(cntl_reg); - tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | - CG_DCLK_CNTL__DCLK_DIVIDER_MASK); + + if (adev->flags & AMD_IS_APU) + tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; + else + tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | + CG_DCLK_CNTL__DCLK_DIVIDER_MASK); tmp |= dividers.post_divider; WREG32_SMC(cntl_reg, tmp); for (i = 0; i < 100; i++) { - if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) - break; + tmp = RREG32_SMC(status_reg); + if (adev->flags & AMD_IS_APU) { + if (tmp & 0x10000) + break; + } else { + if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) + break; + } mdelay(10); } if (i == 100) return -ETIMEDOUT; - return 0; } +#define ixGNB_CLK1_DFS_CNTL 0xD82200F0 +#define ixGNB_CLK1_STATUS 0xD822010C +#define ixGNB_CLK2_DFS_CNTL 0xD8220110 +#define ixGNB_CLK2_STATUS 0xD822012C +#define ixGNB_CLK3_DFS_CNTL 0xD8220130 +#define ixGNB_CLK3_STATUS 0xD822014C + static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) { int r; - r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); - if (r) - return r; + if (adev->flags & AMD_IS_APU) { + r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); + if (r) + return r; - r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); - if (r) - return r; + r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); + if (r) + return r; + } else { + r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); + if (r) + return r; + + r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); + if (r) + return r; + } return 0; } @@ -760,6 +791,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) int r, i; struct atom_clock_dividers dividers; u32 tmp; + u32 reg_ctrl; + u32 reg_status; + u32 status_mask; + u32 reg_mask; + + if (adev->flags & AMD_IS_APU) { + reg_ctrl = ixGNB_CLK3_DFS_CNTL; + reg_status = ixGNB_CLK3_STATUS; + status_mask = 0x00010000; + reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; + } else { + reg_ctrl = ixCG_ECLK_CNTL; + reg_status = ixCG_ECLK_STATUS; + status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; + reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; + } r = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, @@ -768,24 +815,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return r; for (i = 0; i < 100; i++) { - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + if (RREG32_SMC(reg_status) & status_mask) break; mdelay(10); } + if (i == 100) return -ETIMEDOUT; - tmp = RREG32_SMC(ixCG_ECLK_CNTL); - tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | - CG_ECLK_CNTL__ECLK_DIVIDER_MASK); + tmp = RREG32_SMC(reg_ctrl); + tmp &= ~reg_mask; tmp |= dividers.post_divider; - WREG32_SMC(ixCG_ECLK_CNTL, tmp); + WREG32_SMC(reg_ctrl, tmp); for (i = 0; i < 100; i++) { - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + if (RREG32_SMC(reg_status) & status_mask) break; mdelay(10); } + if (i == 100) return -ETIMEDOUT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 660b3fbade41..8a05efa7edf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -716,12 +716,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, struct timespec64 time; dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) - return -EINVAL; - - /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = - dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + if (dev) + /* Reading GPU clock counter from KGD */ + args->gpu_clock_counter = + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + else + /* Node without GPU resource */ + args->gpu_clock_counter = 0; /* No access to rdtsc. Using raw monotonic time */ getrawmonotonic64(&time); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 44ffd23348fc..164fa4b1f9a9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -205,8 +205,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, struct cik_sdma_rlc_registers *m; m = get_sdma_mqd(mqd); - m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << - SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | + m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 1d312603de9f..308571b09c6b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -166,8 +166,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; - /* TODO: scratch support */ - packet->sh_hidden_private_base_vmid = 0; + packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 03bec765b03d..f9a1a4db9be7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -184,6 +184,24 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: + if (dev->dqm->queue_count >= + CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { + pr_err("Over-subscription is not allowed for SDMA.\n"); + retval = -EPERM; + goto err_create_queue; + } + + retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); + if (retval != 0) + goto err_create_queue; + pqn->q = q; + pqn->kq = NULL; + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, + &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); + print_queue(q); + break; + case KFD_QUEUE_TYPE_COMPUTE: /* check if there is over subscription */ if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 19ce59028d6b..e0b78fd9804d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -501,11 +501,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr, return ret; } +static void kfd_topology_kobj_release(struct kobject *kobj) +{ + kfree(kobj); +} + static const struct sysfs_ops sysprops_ops = { .show = sysprops_show, }; static struct kobj_type sysprops_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &sysprops_ops, }; @@ -541,6 +547,7 @@ static const struct sysfs_ops iolink_ops = { }; static struct kobj_type iolink_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &iolink_ops, }; @@ -568,6 +575,7 @@ static const struct sysfs_ops mem_ops = { }; static struct kobj_type mem_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &mem_ops, }; @@ -607,6 +615,7 @@ static const struct sysfs_ops cache_ops = { }; static struct kobj_type cache_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &cache_ops, }; @@ -729,6 +738,7 @@ static const struct sysfs_ops node_ops = { }; static struct kobj_type node_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &node_ops, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b33935fcf428..e6c6994e74ba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -176,10 +176,10 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_powerup_uvd(hwmgr); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -208,11 +208,11 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 84f01fd33aff..b50aa292d026 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -850,9 +850,9 @@ static int init_over_drive_limits( const ATOM_Tonga_POWERPLAYTABLE *powerplay_table) { hwmgr->platform_descriptor.overdriveLimit.engineClock = - le16_to_cpu(powerplay_table->ulMaxODEngineClock); + le32_to_cpu(powerplay_table->ulMaxODEngineClock); hwmgr->platform_descriptor.overdriveLimit.memoryClock = - le16_to_cpu(powerplay_table->ulMaxODMemoryClock); + le32_to_cpu(powerplay_table->ulMaxODMemoryClock); hwmgr->platform_descriptor.minOverdriveVDDC = 0; hwmgr->platform_descriptor.maxOverdriveVDDC = 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 261b828ad590..2f3509be226f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) AMD_CG_STATE_UNGATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); smu7_update_uvd_dpm(hwmgr, false); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index b526f49be65d..336fdd8c7db0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2788,10 +2788,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - disable_mclk_switching = ((1 < info.display_count) || - disable_mclk_switching_for_frame_lock || - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || - (mode_info.refresh_rate > 120)); + if (info.display_count == 0) + disable_mclk_switching = false; + else + disable_mclk_switching = ((1 < info.display_count) || + disable_mclk_switching_for_frame_lock || + smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || + (mode_info.refresh_rate > 120)); sclk = smu7_ps->performance_levels[0].engine_clock; mclk = smu7_ps->performance_levels[0].memory_clock; @@ -4576,13 +4579,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr, int tmp_result, result = 0; uint32_t sclk_mask = 0, mclk_mask = 0; - if (hwmgr->chip_id == CHIP_FIJI) { - if (request->type == AMD_PP_GFX_PROFILE) - smu7_enable_power_containment(hwmgr); - else if (request->type == AMD_PP_COMPUTE_PROFILE) - smu7_disable_power_containment(hwmgr); - } - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f8f02e70b8bc..ca232a9e2334 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3243,10 +3243,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ForceMclkHigh); - disable_mclk_switching = (info.display_count > 1) || - disable_mclk_switching_for_frame_lock || - disable_mclk_switching_for_vr || - force_mclk_high; + if (info.display_count == 0) + disable_mclk_switching = false; + else + disable_mclk_switching = (info.display_count > 1) || + disable_mclk_switching_for_frame_lock || + disable_mclk_switching_for_vr || + force_mclk_high; sclk = vega10_ps->performance_levels[0].gfx_clock; mclk = vega10_ps->performance_levels[0].mem_clock; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index 262c8ded87c0..dafc9c4b1e6f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h @@ -40,7 +40,7 @@ struct smu_table_entry { uint32_t table_addr_high; uint32_t table_addr_low; uint8_t *table; - uint32_t handle; + unsigned long handle; }; struct smu_table_array { diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 2a4d163ac76f..79ce877bf45f 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -1225,17 +1225,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", dcrtc); - if (ret < 0) { - kfree(dcrtc); - return ret; - } + if (ret < 0) + goto err_crtc; if (dcrtc->variant->init) { ret = dcrtc->variant->init(dcrtc, dev); - if (ret) { - kfree(dcrtc); - return ret; - } + if (ret) + goto err_crtc; } /* Ensure AXI pipeline is enabled */ @@ -1246,13 +1242,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, dcrtc->crtc.port = port; primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (!primary) - return -ENOMEM; + if (!primary) { + ret = -ENOMEM; + goto err_crtc; + } ret = armada_drm_plane_init(primary); if (ret) { kfree(primary); - return ret; + goto err_crtc; } ret = drm_universal_plane_init(drm, &primary->base, 0, @@ -1263,7 +1261,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); - return ret; + goto err_crtc; } ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, @@ -1282,6 +1280,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, err_crtc_init: primary->base.funcs->destroy(&primary->base); +err_crtc: + kfree(dcrtc); + return ret; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 6f3849ec0c1d..e9f1e6fe7b94 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -644,6 +644,7 @@ static void ast_crtc_commit(struct drm_crtc *crtc) { struct ast_private *ast = crtc->dev->dev_private; ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0); + ast_crtc_load_lut(crtc); } diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 5f4c2e833a65..d665dd5af5dd 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = { {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ @@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 703c2d13603f..eb7c4cf19bf6 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -889,7 +889,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane, drm_object_attach_property(&plane->base.base, props->alpha, 255); - if (desc->layout.xstride && desc->layout.pstride) { + if (desc->layout.xstride[0] && desc->layout.pstride[0]) { int ret; ret = drm_plane_create_rotation_property(&plane->base, diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 5dd3f1cd074a..a8905049b9da 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector) return 0; } + pm_runtime_get_sync(dp->dev); edid = drm_get_edid(connector, &dp->aux.ddc); + pm_runtime_put(dp->dev); if (edid) { drm_mode_connector_update_edid_property(&dp->connector, edid); diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index de5e7dee7ad6..2e6c61d9b8ea 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -55,7 +55,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector) } drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; fallback: /* diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c index 0903ba574f61..75b0d3f6e4de 100644 --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ b/drivers/gpu/drm/bridge/lvds-encoder.c @@ -13,13 +13,37 @@ #include +struct lvds_encoder { + struct drm_bridge bridge; + struct drm_bridge *panel_bridge; +}; + +static int lvds_encoder_attach(struct drm_bridge *bridge) +{ + struct lvds_encoder *lvds_encoder = container_of(bridge, + struct lvds_encoder, + bridge); + + return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, + bridge); +} + +static struct drm_bridge_funcs funcs = { + .attach = lvds_encoder_attach, +}; + static int lvds_encoder_probe(struct platform_device *pdev) { struct device_node *port; struct device_node *endpoint; struct device_node *panel_node; struct drm_panel *panel; - struct drm_bridge *bridge; + struct lvds_encoder *lvds_encoder; + + lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder), + GFP_KERNEL); + if (!lvds_encoder) + return -ENOMEM; /* Locate the panel DT node. */ port = of_graph_get_port_by_id(pdev->dev.of_node, 1); @@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); - if (IS_ERR(bridge)) - return PTR_ERR(bridge); + lvds_encoder->panel_bridge = + devm_drm_panel_bridge_add(&pdev->dev, + panel, DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(lvds_encoder->panel_bridge)) + return PTR_ERR(lvds_encoder->panel_bridge); + + /* The panel_bridge bridge is attached to the panel's of_node, + * but we need a bridge attached to our of_node for our user + * to look up. + */ + lvds_encoder->bridge.of_node = pdev->dev.of_node; + lvds_encoder->bridge.funcs = &funcs; + drm_bridge_add(&lvds_encoder->bridge); - platform_set_drvdata(pdev, bridge); + platform_set_drvdata(pdev, lvds_encoder); return 0; } static int lvds_encoder_remove(struct platform_device *pdev) { - struct drm_bridge *bridge = platform_get_drvdata(pdev); + struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); - drm_bridge_remove(bridge); + drm_bridge_remove(&lvds_encoder->bridge); return 0; } diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index b1ab4ab09532..60373d7eb220 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector) struct sii902x *sii902x = connector_to_sii902x(connector); struct regmap *regmap = sii902x->regmap; u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + struct device *dev = &sii902x->i2c->dev; unsigned long timeout; + unsigned int retries; unsigned int status; struct edid *edid; int num = 0; @@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector) time_before(jiffies, timeout)); if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { - dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n"); + dev_err(dev, "failed to acquire the i2c bus\n"); return -ETIMEDOUT; } @@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector) if (ret) return ret; - ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + /* + * Sometimes the I2C bus can stall after failure to use the + * EDID channel. Retry a few times to see if things clear + * up, else continue anyway. + */ + retries = 5; + do { + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, + &status); + retries--; + } while (ret && retries); if (ret) - return ret; + dev_err(dev, "failed to read status (%d)\n", ret); ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_DDC_BUS_REQ | @@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector) if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | SII902X_SYS_CTRL_DDC_BUS_GRTD)) { - dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n"); + dev_err(dev, "failed to release the i2c bus\n"); return -ETIMEDOUT; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index bf14214fa464..4db31b89507c 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1634,6 +1634,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) * (and possibly on the platform). So far only i.MX6Q (v1.30a) and * i.MX6DL (v1.31a) have been identified as needing the workaround, with * 4 and 1 iterations respectively. + * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing + * the workaround with a single iteration. */ switch (hdmi->version) { @@ -1641,6 +1643,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) count = 4; break; case 0x131a: + case 0x201a: count = 1; break; default: diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8571cfd877c5..8636e7eeb731 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -97,7 +97,7 @@ #define DP0_ACTIVEVAL 0x0650 #define DP0_SYNCVAL 0x0654 #define DP0_MISC 0x0658 -#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ +#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ #define BPC_6 (0 << 5) #define BPC_8 (1 << 5) @@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, tmp = (tmp << 8) | buf[i]; i++; if (((i % 4) == 0) || (i == size)) { - tc_write(DP0_AUXWDATA(i >> 2), tmp); + tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp); tmp = 0; } } @@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc) ret = drm_dp_link_probe(&tc->aux, &tc->link.base); if (ret < 0) goto err_dpcd_read; - if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) - goto err_dpcd_inval; + if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { + dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); + tc->link.base.rate = 270000; + } + + if (tc->link.base.num_lanes > 2) { + dev_dbg(tc->dev, "Falling to 2 lanes\n"); + tc->link.base.num_lanes = 2; + } ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); if (ret < 0) @@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc) err_dpcd_read: dev_err(tc->dev, "failed to read DPCD: %d\n", ret); return ret; -err_dpcd_inval: - dev_err(tc->dev, "invalid DPCD\n"); - return -EINVAL; } static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) @@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) int lower_margin = mode->vsync_start - mode->vdisplay; int vsync_len = mode->vsync_end - mode->vsync_start; + /* + * Recommended maximum number of symbols transferred in a transfer unit: + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, + * (output active video bandwidth in bytes)) + * Must be less than tu_size. + */ + max_tu_symbol = TU_SIZE_RECOMMENDED - 1; + dev_dbg(tc->dev, "set mode %dx%d\n", mode->hdisplay, mode->vdisplay); dev_dbg(tc->dev, "H margin %d,%d sync %d\n", @@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); - /* LCD Ctl Frame Size */ - tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | + /* + * LCD Ctl Frame Size + * datasheet is not clear of vsdelay in case of DPI + * assume we do not need any delay when DPI is a source of + * sync signals + */ + tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ | OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); - tc_write(HTIM01, (left_margin << 16) | /* H back porch */ - (hsync_len << 0)); /* Hsync */ - tc_write(HTIM02, (right_margin << 16) | /* H front porch */ - (mode->hdisplay << 0)); /* width */ + tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */ + (ALIGN(hsync_len, 2) << 0)); /* Hsync */ + tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */ + (ALIGN(mode->hdisplay, 2) << 0)); /* width */ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ (vsync_len << 0)); /* Vsync */ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ @@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) /* DP Main Stream Attributes */ vid_sync_dly = hsync_len + left_margin + mode->hdisplay; tc_write(DP0_VIDSYNCDELAY, - (0x003e << 16) | /* thresh_dly */ + (max_tu_symbol << 16) | /* thresh_dly */ (vid_sync_dly << 0)); tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); @@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); - /* - * Recommended maximum number of symbols transferred in a transfer unit: - * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, - * (output active video bandwidth in bytes)) - * Must be less than tu_size. - */ - max_tu_symbol = TU_SIZE_RECOMMENDED - 1; - tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); + tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | + BPC_8); return 0; err: @@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc) unsigned int rate; u32 dp_phy_ctrl; int timeout; - bool aligned; - bool ready; u32 value; int ret; u8 tmp[8]; @@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc) ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); if (ret < 0) goto err_dpcd_read; - ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ - DP_CHANNEL_EQ_BITS)); /* Lane0 */ - aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; - } while ((--timeout) && !(ready && aligned)); + } while ((--timeout) && + !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes))); if (timeout == 0) { /* Read DPCD 0x200-0x201 */ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); if (ret < 0) goto err_dpcd_read; + dev_err(dev, "channel(s) EQ not ok\n"); dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", tmp[1]); @@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc) dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[6]); - if (!ready) - dev_err(dev, "Lane0/1 not ready\n"); - if (!aligned) - dev_err(dev, "Lane0/1 not aligned\n"); return -EAGAIN; } @@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge, static int tc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - /* Accept any mode */ + /* DPI interface clock limitation: upto 154 MHz */ + if (mode->clock > 154000) + return MODE_CLOCK_HIGH; + return MODE_OK; } diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index a4c4a465b385..130483f2cd7f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc) { } -/* - * This is called after a mode is programmed. It should reverse anything done - * by the prepare function - */ -static void cirrus_crtc_commit(struct drm_crtc *crtc) -{ -} - -/* - * The core can pass us a set of gamma values to program. We actually only - * use this for 8-bit mode so can't perform smooth fades on deeper modes, - * but it's a requirement that we provide the function - */ -static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t size, - struct drm_modeset_acquire_ctx *ctx) +static void cirrus_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct cirrus_device *cdev = dev->dev_private; @@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, int i; if (!crtc->enabled) - return 0; + return; r = crtc->gamma_store; g = r + crtc->gamma_size; @@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, WREG8(PALETTE_DATA, *g++ >> 8); WREG8(PALETTE_DATA, *b++ >> 8); } +} + +/* + * This is called after a mode is programmed. It should reverse anything done + * by the prepare function + */ +static void cirrus_crtc_commit(struct drm_crtc *crtc) +{ + cirrus_crtc_load_lut(crtc); +} + +/* + * The core can pass us a set of gamma values to program. We actually only + * use this for 8-bit mode so can't perform smooth fades on deeper modes, + * but it's a requirement that we provide the function + */ +static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size, + struct drm_modeset_acquire_ctx *ctx) +{ + cirrus_crtc_load_lut(crtc); return 0; } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 2fd383d7253a..df954ff7721b 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -151,6 +151,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->connectors[i].state); state->connectors[i].ptr = NULL; state->connectors[i].state = NULL; + state->connectors[i].old_state = NULL; + state->connectors[i].new_state = NULL; drm_connector_put(connector); } @@ -172,6 +174,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->crtcs[i].commit = NULL; state->crtcs[i].ptr = NULL; state->crtcs[i].state = NULL; + state->crtcs[i].old_state = NULL; + state->crtcs[i].new_state = NULL; } for (i = 0; i < config->num_total_plane; i++) { @@ -184,6 +188,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->planes[i].state); state->planes[i].ptr = NULL; state->planes[i].state = NULL; + state->planes[i].old_state = NULL; + state->planes[i].new_state = NULL; } for (i = 0; i < state->num_private_objs; i++) { @@ -196,6 +202,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) state->private_objs[i].state); state->private_objs[i].ptr = NULL; state->private_objs[i].state = NULL; + state->private_objs[i].old_state = NULL; + state->private_objs[i].new_state = NULL; } state->num_private_objs = 0; @@ -463,6 +471,8 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, if (property == config->prop_active) state->active = val; + else if (property == config->prop_background_color) + state->background_color.v = val; else if (property == config->prop_mode_id) { struct drm_property_blob *mode = drm_property_lookup_blob(dev, val); @@ -485,6 +495,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, &replaced); state->color_mgmt_changed |= replaced; return ret; + } else if (property == config->ctm_post_offset_property) { + ret = drm_atomic_replace_property_blob_from_id(dev, + &state->ctm_post_offset, + val, + sizeof(struct drm_color_ctm_post_offset), + &replaced); + state->color_mgmt_changed |= replaced; + return ret; } else if (property == config->gamma_lut_property) { ret = drm_atomic_replace_property_blob_from_id(dev, &state->gamma_lut, @@ -543,10 +561,14 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc, *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; else if (property == config->ctm_property) *val = (state->ctm) ? state->ctm->base.id : 0; + else if (property == config->ctm_post_offset_property) + *val = (state->ctm_post_offset) ? state->ctm_post_offset->base.id : 0; else if (property == config->gamma_lut_property) *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; else if (property == config->prop_out_fence_ptr) *val = 0; + else if (property == config->prop_background_color) + *val = state->background_color.v; else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); else @@ -1202,6 +1224,12 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, state->picture_aspect_ratio = val; } else if (property == connector->scaling_mode_property) { state->scaling_mode = val; + } else if (property == connector->content_protection_property) { + if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); + return -EINVAL; + } + state->content_protection = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); @@ -1281,6 +1309,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->picture_aspect_ratio; } else if (property == connector->scaling_mode_property) { *val = state->scaling_mode; + } else if (property == connector->content_protection_property) { + *val = state->content_protection; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); @@ -1347,7 +1377,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, { struct drm_plane *plane = plane_state->plane; struct drm_crtc_state *crtc_state; - + /* Nothing to do for same crtc*/ + if (plane_state->crtc == crtc) + return 0; if (plane_state->crtc) { crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0028591f3f95..e8e2b83a02cd 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -951,7 +951,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, crtc->enabled = new_crtc_state->enable; new_plane_state = - drm_atomic_get_new_plane_state(old_state, primary); + primary ? drm_atomic_get_new_plane_state(old_state, primary) : NULL; if (new_plane_state && new_plane_state->crtc == crtc) { crtc->x = new_plane_state->src_x >> 16; @@ -2618,6 +2618,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, int hdisplay, vdisplay; int ret; + if (!crtc->primary) + return -EINVAL; + crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -2683,31 +2686,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, return 0; } -/** - * drm_atomic_helper_disable_all - disable all currently active outputs - * @dev: DRM device - * @ctx: lock acquisition context - * - * Loops through all connectors, finding those that aren't turned off and then - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC - * that they are connected to. - * - * This is used for example in suspend/resume to disable all currently active - * functions when suspending. If you just want to shut down everything at e.g. - * driver unload, look at drm_atomic_helper_shutdown(). - * - * Note that if callers haven't already acquired all modeset locks this might - * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). - * - * Returns: - * 0 on success or a negative error code on failure. - * - * See also: - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and - * drm_atomic_helper_shutdown(). - */ -int drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx) +static int __drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx, + bool clean_old_fbs) { struct drm_atomic_state *state; struct drm_connector_state *conn_state; @@ -2759,8 +2740,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, goto free; drm_atomic_set_fb_for_plane(plane_state, NULL); - plane_mask |= BIT(drm_plane_index(plane)); - plane->old_fb = plane->fb; + + if (clean_old_fbs) { + plane->old_fb = plane->fb; + plane_mask |= BIT(drm_plane_index(plane)); + } } ret = drm_atomic_commit(state); @@ -2771,6 +2755,34 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, return ret; } +/** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. If you just want to shut down everything at e.g. + * driver unload, look at drm_atomic_helper_shutdown(). + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and + * drm_atomic_helper_shutdown(). + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + return __drm_atomic_helper_disable_all(dev, ctx, false); +} EXPORT_SYMBOL(drm_atomic_helper_disable_all); /** @@ -2793,7 +2805,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) while (1) { ret = drm_modeset_lock_all_ctx(dev, &ctx); if (!ret) - ret = drm_atomic_helper_disable_all(dev, &ctx); + ret = __drm_atomic_helper_disable_all(dev, &ctx, true); if (ret != -EDEADLK) break; @@ -2897,16 +2909,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_connector_state *new_conn_state; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; - unsigned plane_mask = 0; - struct drm_device *dev = state->dev; - int ret; state->acquire_ctx = ctx; - for_each_new_plane_in_state(state, plane, new_plane_state, i) { - plane_mask |= BIT(drm_plane_index(plane)); + for_each_new_plane_in_state(state, plane, new_plane_state, i) state->planes[i].old_state = plane->state; - } for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) state->crtcs[i].old_state = crtc->state; @@ -2914,11 +2921,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, for_each_new_connector_in_state(state, connector, new_conn_state, i) state->connectors[i].old_state = connector->state; - ret = drm_atomic_commit(state); - if (plane_mask) - drm_atomic_clean_old_fb(dev, plane_mask, ret); - - return ret; + return drm_atomic_commit(state); } EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); @@ -3179,6 +3182,8 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, drm_property_blob_get(state->degamma_lut); if (state->ctm) drm_property_blob_get(state->ctm); + if (state->ctm_post_offset) + drm_property_blob_get(state->ctm_post_offset); if (state->gamma_lut) drm_property_blob_get(state->gamma_lut); state->mode_changed = false; @@ -3228,6 +3233,7 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) drm_property_blob_put(state->mode_blob); drm_property_blob_put(state->degamma_lut); drm_property_blob_put(state->ctm); + drm_property_blob_put(state->ctm_post_offset); drm_property_blob_put(state->gamma_lut); } EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); @@ -3609,6 +3615,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, /* Reset DEGAMMA_LUT and CTM properties. */ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm_post_offset, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); crtc_state->color_mgmt_changed |= replaced; diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 2e5e089dd912..d96609d22788 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -204,6 +204,21 @@ unsigned int drm_rotation_simplify(unsigned int rotation, } EXPORT_SYMBOL(drm_rotation_simplify); +/** + * drm_mode_create_background_color_property - create CRTC color property + * @dev: DRM device + * + * Creates a property to represent CRTC background/canvas color. Called by a + * driver the first time it's needed, must be attached to desired CRTC's. + */ +struct drm_property * +drm_mode_create_background_color_property(struct drm_device *dev) +{ + return drm_property_create_rgba(dev, DRM_MODE_PROP_ATOMIC, + "background_color"); +} +EXPORT_SYMBOL(drm_mode_create_background_color_property); + /** * drm_plane_create_zpos_property - create mutable zpos property * @plane: drm plane diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index fe0982708e95..7f23bfc89248 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -64,6 +64,16 @@ * boot-up state too. Drivers can access the blob for the color conversion * matrix through &drm_crtc_state.ctm. * + * “CTM_POST_OFFSET”: + * Blob property to set post offset vector used to convert colors after + * applying ctm. The data is interpreted as a struct + * &drm_color_ctm_post_offset. + * + * Setting this to NULL (blob property value set to 0) means a pass-thru + * vector should be used. This is generally the driver boot-up state too. + * Drivers can access the blob for post offset vector through + * &drm_crtc_state.ctm_post_offset. + * * “GAMMA_LUT”: * Blob property to set the gamma lookup table (LUT) mapping pixel data * after the transformation matrix to data sent to the connector. The @@ -148,9 +158,12 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, degamma_lut_size); } - if (has_ctm) + if (has_ctm) { drm_object_attach_property(&crtc->base, config->ctm_property, 0); + drm_object_attach_property(&crtc->base, + config->ctm_post_offset_property, 0); + } if (gamma_lut_size) { drm_object_attach_property(&crtc->base, diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index ba9f36cef68c..ac4a9047570a 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -699,6 +699,13 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) +static struct drm_prop_enum_list drm_cp_enum_list[] = { + { DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" }, + { DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" }, + { DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" }, +}; +DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) + /** * DOC: standard connector properties * @@ -741,6 +748,41 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, * value of link-status is "GOOD". If something fails during or after modeset, * the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers * should update this value using drm_mode_connector_set_link_status_property(). + * Content Protection: + * This property is used by userspace to request the kernel protect future + * content communicated over the link. When requested, kernel will apply + * the appropriate means of protection (most often HDCP), and use the + * property to tell userspace the protection is active. + * + * Drivers can set this up by calling + * drm_connector_attach_content_protection_property() on initialization. + * + * The value of this property can be one of the following: + * + * - DRM_MODE_CONTENT_PROTECTION_UNDESIRED = 0 + * The link is not protected, content is transmitted in the clear. + * - DRM_MODE_CONTENT_PROTECTION_DESIRED = 1 + * Userspace has requested content protection, but the link is not + * currently protected. When in this state, kernel should enable + * Content Protection as soon as possible. + * - DRM_MODE_CONTENT_PROTECTION_ENABLED = 2 + * Userspace has requested content protection, and the link is + * protected. Only the driver can set the property to this value. + * If userspace attempts to set to ENABLED, kernel will return + * -EINVAL. + * + * A few guidelines: + * + * - DESIRED state should be preserved until userspace de-asserts it by + * setting the property to UNDESIRED. This means ENABLED should only + * transition to UNDESIRED when the user explicitly requests it. + * - If the state is DESIRED, kernel should attempt to re-authenticate the + * link whenever possible. This includes across disable/enable, dpms, + * hotplug, downstream device changes, link status failures, etc.. + * - Userspace is responsible for polling the property to determine when + * the value transitions from ENABLED to DESIRED. This signifies the link + * is no longer protected and userspace should take appropriate action + * (whatever that might be). * * Connectors also have one standardized atomic property: * @@ -1024,6 +1066,42 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property); +/** + * drm_connector_attach_content_protection_property - attach content protection + * property + * + * @connector: connector to attach CP property on. + * + * This is used to add support for content protection on select connectors. + * Content Protection is intentionally vague to allow for different underlying + * technologies, however it is most implemented by HDCP. + * + * The content protection will be set to &drm_connector_state.content_protection + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_content_protection_property( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = drm_property_create_enum(dev, 0, "Content Protection", + drm_cp_enum_list, + ARRAY_SIZE(drm_cp_enum_list)); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&connector->base, prop, + DRM_MODE_CONTENT_PROTECTION_UNDESIRED); + + connector->content_protection_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_content_protection_property); + /** * drm_mode_create_aspect_ratio_property - create aspect ratio property * @dev: DRM device diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5af25ce5bf7c..a23f0fe95839 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -406,6 +406,9 @@ int drm_mode_getcrtc(struct drm_device *dev, if (!crtc) return -ENOENT; + if (!crtc->primary) + return -EINVAL; + crtc_resp->gamma_size = crtc->gamma_size; drm_modeset_lock(&crtc->primary->mutex, NULL); @@ -455,13 +458,19 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, struct drm_crtc *tmp; int ret; + if (!crtc->primary) + return -EINVAL; + /* * NOTE: ->set_config can also disable other crtcs (if we steal all * connectors from it), hence we need to refcount the fbs across all * crtcs. Atomic modeset will have saner semantics ... */ - drm_for_each_crtc(tmp, crtc->dev) + drm_for_each_crtc(tmp, crtc->dev) { + if (!tmp->primary) + continue; tmp->primary->old_fb = tmp->primary->fb; + } fb = set->fb; @@ -472,6 +481,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, } drm_for_each_crtc(tmp, crtc->dev) { + if (!tmp->primary) + continue; if (tmp->primary->fb) drm_framebuffer_get(tmp->primary->fb); if (tmp->primary->old_fb) @@ -582,6 +593,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx); if (ret) goto out; + if (!crtc->primary) + return -EINVAL; + if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index eab36a460638..bd7c03038ba8 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -185,6 +185,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) (*crtc_funcs->disable)(crtc); else (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); + if (!crtc->primary) + continue; crtc->primary->fb = NULL; } } @@ -539,6 +541,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, crtc_funcs = set->crtc->helper_private; + if (!set->crtc->primary) + return -EINVAL; + if (!set->mode) set->fb = NULL; @@ -950,6 +955,8 @@ void drm_helper_resume_force_mode(struct drm_device *dev) if (!crtc->enabled) continue; + if (!crtc->primary) + continue; ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); @@ -1072,6 +1079,9 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_plane_state *plane_state; struct drm_plane *plane = crtc->primary; + if (!plane) + return -EINVAL; + if (plane->funcs->atomic_duplicate_state) plane_state = plane->funcs->atomic_duplicate_state(plane); else { diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 0ef9011a1856..e7f4fe2848a5 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, { uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; ssize_t ret; + int retry; if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) return 0; - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, - &tmds_oen, sizeof(tmds_oen)); - if (ret) { - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", - enable ? "enable" : "disable"); - return ret; + /* + * LSPCON adapters in low-power state may ignore the first write, so + * read back and verify the written value a few times. + */ + for (retry = 0; retry < 3; retry++) { + uint8_t tmp; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", + enable ? "enable" : "disable", + retry + 1); + return ret; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmp, sizeof(tmp)); + if (ret) { + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", + enable ? "enabling" : "disabling", + retry + 1); + return ret; + } + + if (tmp == tmds_oen) + return 0; } - return 0; + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", + enable ? "enabling" : "disabling"); + + return -EIO; } EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); @@ -410,6 +435,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, { u8 data; int ret = 0; + int retry; if (!mode) { DRM_ERROR("NULL input\n"); @@ -417,10 +443,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, } /* Read Status: i2c over aux */ - ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE, - &data, sizeof(data)); + for (retry = 0; retry < 6; retry++) { + if (retry) + usleep_range(500, 1000); + + ret = drm_dp_dual_mode_read(adapter, + DP_DUAL_MODE_LSPCON_CURRENT_MODE, + &data, sizeof(data)); + if (!ret) + break; + } + if (ret < 0) { - DRM_ERROR("LSPCON read(0x80, 0x41) failed\n"); + DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n"); return -EFAULT; } diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 08af8d6b844b..493d8f56d14e 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1139,6 +1139,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) static const u16 psr_setup_time_us[] = { PSR_SETUP_TIME(330), PSR_SETUP_TIME(275), + PSR_SETUP_TIME(220), PSR_SETUP_TIME(165), PSR_SETUP_TIME(110), PSR_SETUP_TIME(55), diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 41b492f99955..c022ab6e84bd 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2862,12 +2862,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m, } } +#define DP_PAYLOAD_TABLE_SIZE 64 + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf) { int i; - for (i = 0; i < 64; i += 16) { + for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { if (drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + i, &buf[i], 16) != 16) @@ -2936,7 +2938,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, mutex_lock(&mgr->lock); if (mgr->mst_primary) { - u8 buf[64]; + u8 buf[DP_PAYLOAD_TABLE_SIZE]; int ret; ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); @@ -2954,8 +2956,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); if (dump_dp_payload_table(mgr, buf)) - seq_printf(m, "payload table: %*ph\n", 63, buf); - + seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); } mutex_unlock(&mgr->lock); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index be38ac7050d4..a7b6734bc3c3 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -749,7 +749,7 @@ static void remove_compat_control_link(struct drm_device *dev) if (!minor) return; - name = kasprintf(GFP_KERNEL, "controlD%d", minor->index); + name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); if (!name) return; diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 39ac15ce4702..9e2ae02f31e0 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev, return -EINVAL; /* overflow checks for 32bit size calculations */ - /* NOTE: DIV_ROUND_UP() can overflow */ + if (args->bpp > U32_MAX - 8) + return -EINVAL; cpp = DIV_ROUND_UP(args->bpp, 8); - if (!cpp || cpp > 0xffffffffU / args->width) + if (cpp > U32_MAX / args->width) return -EINVAL; stride = cpp * args->width; - if (args->height > 0xffffffffU / stride) + if (args->height > U32_MAX / stride) return -EINVAL; /* test for wrap-around */ diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6bb6337be920..240e86a7f91e 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -111,6 +111,9 @@ static const struct edid_quirk { /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, + /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ + { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -1533,6 +1536,10 @@ static void connector_bad_edid(struct drm_connector *connector, * level, drivers must make all reasonable efforts to expose it as an I2C * adapter and use drm_get_edid() instead of abusing this function. * + * The EDID may be overridden using debugfs override_edid or firmare EDID + * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority + * order. Having either of them bypasses actual EDID reads. + * * Return: Pointer to valid EDID or NULL if we couldn't find any. */ struct edid *drm_do_get_edid(struct drm_connector *connector, @@ -1542,6 +1549,17 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, { int i, j = 0, valid_extensions = 0; u8 *edid, *new; + struct edid *override = NULL; + + if (connector->override_edid) + override = drm_edid_duplicate((const struct edid *) + connector->edid_blob_ptr->data); + + if (!override) + override = drm_load_edid_firmware(connector); + + if (!IS_ERR_OR_NULL(override)) + return override; if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; @@ -3820,8 +3838,7 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name); * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The - * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to - * fill in. + * HDCP and Port_ID ELD fields are left for the graphics driver to fill in. */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { @@ -3902,6 +3919,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) } eld[5] |= total_sad_count << 4; + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP; + else + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI; + eld[DRM_ELD_BASELINE_ELD_LEN] = DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); @@ -4809,7 +4832,8 @@ void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range, - bool rgb_quant_range_selectable) + bool rgb_quant_range_selectable, + bool is_hdmi2_sink) { /* * CEA-861: @@ -4833,8 +4857,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, * YQ-field to match the RGB Quantization Range being transmitted * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB, * set YQ=1) and the Sink shall ignore the YQ-field." + * + * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused + * by non-zero YQ when receiving RGB. There doesn't seem to be any + * good way to tell which version of CEA-861 the sink supports, so + * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based + * on on CEA-861-F. */ - if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) + if (!is_hdmi2_sink || + rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; else diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1b8f013ffa65..1692e78b7ebe 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -534,6 +534,8 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) drm_for_each_crtc(crtc, dev) { drm_modeset_lock(&crtc->mutex, NULL); + if (!crtc->primary) + continue; if (crtc->primary->fb) crtcs_bound++; if (crtc->primary->fb == fb_helper->fb) @@ -1350,6 +1352,7 @@ static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->ctm_post_offset, NULL); replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, gamma_lut); crtc_state->color_mgmt_changed |= replaced; @@ -1809,6 +1812,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { DRM_INFO("Cannot find any crtc or sizes\n"); + + /* First time: disable all crtc's.. */ + if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master)) + restore_fbdev_mode(fb_helper); return -EAGAIN; } diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index b3c6e997ccdb..03244b3c985d 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) return -ENOMEM; filp->private_data = priv; + filp->f_mode |= FMODE_UNSIGNED_OFFSET; priv->filp = filp; priv->pid = get_pid(task_pid(current)); priv->minor = minor; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index af279844d7ce..2d7461c20db8 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -118,6 +118,10 @@ int drm_mode_addfb(struct drm_device *dev, r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); r.handles[0] = or->handle; + if (r.pixel_format == DRM_FORMAT_XRGB2101010 && + dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) + r.pixel_format = DRM_FORMAT_XBGR2101010; + ret = drm_mode_addfb2(dev, &r, file_priv); if (ret) return ret; @@ -454,6 +458,12 @@ int drm_mode_getfb(struct drm_device *dev, if (!fb) return -ENOENT; + /* Multi-planar framebuffers need getfb2. */ + if (fb->format->num_planes > 1) { + ret = -EINVAL; + goto out; + } + r->height = fb->height; r->width = fb->width; r->depth = fb->format->depth; @@ -477,6 +487,7 @@ int drm_mode_getfb(struct drm_device *dev, ret = -ENODEV; } +out: drm_framebuffer_put(fb); return ret; @@ -854,7 +865,7 @@ static void legacy_remove_fb(struct drm_framebuffer *fb) drm_modeset_lock_all(dev); /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { - if (crtc->primary->fb == fb) { + if (crtc->primary && crtc->primary->fb == fb) { /* should turn off the crtc */ if (drm_crtc_force_disable(crtc)) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index fbc3f308fa19..5de4c6e7435e 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -46,6 +46,7 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr struct dma_buf *dma_buf); /* drm_drv.c */ +#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index a9ae6dd2d593..d01cf222f320 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -547,7 +547,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -618,10 +618,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), @@ -630,7 +630,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), @@ -642,7 +642,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 61a1c8ea74bc..1acf3b1479a1 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -834,9 +834,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) if (!mm->color_adjust) return NULL; - hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); - hole_start = __drm_mm_hole_node_start(hole); - hole_end = hole_start + hole->hole_size; + /* + * The hole found during scanning should ideally be the first element + * in the hole_stack list, but due to side-effects in the driver it + * may not be. + */ + list_for_each_entry(hole, &mm->hole_stack, hole_stack) { + hole_start = __drm_mm_hole_node_start(hole); + hole_end = hole_start + hole->hole_size; + + if (hole_start <= scan->hit_start && + hole_end >= scan->hit_end) + break; + } + + /* We should only be called after we found the hole previously */ + DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); + if (unlikely(&hole->hole_stack == &mm->hole_stack)) + return NULL; DRM_MM_BUG_ON(hole_start > scan->hit_start); DRM_MM_BUG_ON(hole_end < scan->hit_end); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 74f6ff5df656..47647bcef32c 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -323,6 +323,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.ctm_property = prop; + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB, + "CTM_POST_OFFSET", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.ctm_post_offset_property = prop; + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "GAMMA_LUT", 0); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index af4e906c630d..56b9f9b1c3ae 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -88,7 +88,7 @@ void drm_modeset_lock_all(struct drm_device *dev) struct drm_modeset_acquire_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL); if (WARN_ON(!ctx)) return; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 904966cde32b..7cf30d5683c6 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -353,8 +353,6 @@ EXPORT_SYMBOL(drm_helper_probe_detect); * drm_mode_probed_add(). New modes start their life with status as OK. * Modes are added from a single source using the following priority order. * - * - debugfs 'override_edid' (used for testing only) - * - firmware EDID (drm_load_edid_firmware()) * - &drm_connector_helper_funcs.get_modes vfunc * - if the connector status is connector_status_connected, standard * VESA DMT modes up to 1024x768 are automatically added @@ -483,22 +481,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, goto prune; } - if (connector->override_edid) { - struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; - - count = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - } else { - struct edid *edid = drm_load_edid_firmware(connector); - if (!IS_ERR_OR_NULL(edid)) { - drm_mode_connector_update_edid_property(connector, edid); - count = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - kfree(edid); - } - if (count == 0) - count = (*connector_funcs->get_modes)(connector); - } + count = (*connector_funcs->get_modes)(connector); if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); @@ -671,6 +654,26 @@ static void output_poll_execute(struct work_struct *work) schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); } +/** + * drm_kms_helper_is_poll_worker - is %current task an output poll worker? + * + * Determine if %current task is an output poll worker. This can be used + * to select distinct code paths for output polling versus other contexts. + * + * One use case is to avoid a deadlock between the output poll worker and + * the autosuspend worker wherein the latter waits for polling to finish + * upon calling drm_kms_helper_poll_disable(), while the former waits for + * runtime suspend to finish upon calling pm_runtime_get_sync() in a + * connector ->detect hook. + */ +bool drm_kms_helper_is_poll_worker(void) +{ + struct work_struct *work = current_work(); + + return work && work->func == output_poll_execute; +} +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); + /** * drm_kms_helper_poll_disable - disable output polling * @dev: drm_device diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index bc5128203056..f10ccbc4433f 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -354,6 +354,30 @@ struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, } EXPORT_SYMBOL(drm_property_create_bool); +/** + * drm_property_create_rgba - create a new RGBA property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * Userspace should use the DRM_RGBA() macro to build values with the proper + * bit layout. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_rgba(struct drm_device *dev, int flags, + const char *name) +{ + return drm_property_create_range(dev, flags, name, + 0, GENMASK_ULL(63, 0)); +} +EXPORT_SYMBOL(drm_property_create_rgba); + /** * drm_property_add_enum - add a possible value to an enumeration property * @property: enumeration property to change @@ -516,7 +540,7 @@ static void drm_property_free_blob(struct kref *kref) drm_mode_object_unregister(blob->dev, &blob->base); - kfree(blob); + kvfree(blob); } /** @@ -543,7 +567,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); + blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return ERR_PTR(-ENOMEM); @@ -559,7 +583,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length, ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, true, drm_property_free_blob); if (ret) { - kfree(blob); + kvfree(blob); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 0422b8c2c2e7..7bcf5702c91c 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -328,28 +328,11 @@ static const struct file_operations drm_syncobj_file_fops = { .release = drm_syncobj_file_release, }; -static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) -{ - struct file *file = anon_inode_getfile("syncobj_file", - &drm_syncobj_file_fops, - syncobj, 0); - if (IS_ERR(file)) - return PTR_ERR(file); - - drm_syncobj_get(syncobj); - if (cmpxchg(&syncobj->file, NULL, file)) { - /* lost the race */ - fput(file); - } - - return 0; -} - static int drm_syncobj_handle_to_fd(struct drm_file *file_private, u32 handle, int *p_fd) { struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); - int ret; + struct file *file; int fd; if (!syncobj) @@ -361,46 +344,40 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private, return fd; } - if (!syncobj->file) { - ret = drm_syncobj_alloc_file(syncobj); - if (ret) - goto out_put_fd; + file = anon_inode_getfile("syncobj_file", + &drm_syncobj_file_fops, + syncobj, 0); + if (IS_ERR(file)) { + put_unused_fd(fd); + drm_syncobj_put(syncobj); + return PTR_ERR(file); } - fd_install(fd, syncobj->file); - drm_syncobj_put(syncobj); + + drm_syncobj_get(syncobj); + fd_install(fd, file); + *p_fd = fd; return 0; -out_put_fd: - put_unused_fd(fd); - drm_syncobj_put(syncobj); - return ret; } -static struct drm_syncobj *drm_syncobj_fdget(int fd) -{ - struct file *file = fget(fd); - - if (!file) - return NULL; - if (file->f_op != &drm_syncobj_file_fops) - goto err; - - return file->private_data; -err: - fput(file); - return NULL; -}; - static int drm_syncobj_fd_to_handle(struct drm_file *file_private, int fd, u32 *handle) { - struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); + struct drm_syncobj *syncobj; + struct file *file; int ret; - if (!syncobj) + file = fget(fd); + if (!file) + return -EINVAL; + + if (file->f_op != &drm_syncobj_file_fops) { + fput(file); return -EINVAL; + } /* take a reference to put in the idr */ + syncobj = file->private_data; drm_syncobj_get(syncobj); idr_preload(GFP_KERNEL); @@ -409,12 +386,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, spin_unlock(&file_private->syncobj_table_lock); idr_preload_end(); - if (ret < 0) { - fput(syncobj->file); - return ret; - } - *handle = ret; - return 0; + if (ret > 0) { + *handle = ret; + ret = 0; + } else + drm_syncobj_put(syncobj); + + fput(file); + return ret; } int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 70f2b9593edc..17e8ef9a1c11 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -311,8 +311,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) u32 vblank; unsigned long flags; - WARN(!dev->driver->get_vblank_timestamp, - "This function requires support for accurate vblank timestamps."); + WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp, + "This function requires support for accurate vblank timestamps."); spin_lock_irqsave(&dev->vblank_time_lock, flags); @@ -869,7 +869,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, assert_spin_locked(&dev->event_lock); e->pipe = pipe; - e->event.sequence = drm_vblank_count(dev, pipe); + e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1; e->event.crtc_id = crtc->base.id; list_add_tail(&e->base.link, &dev->vblank_event_list); } diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 38b477b5fbf9..4df3c48adcec 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -6,6 +6,7 @@ config DRM_ETNAVIV depends on MMU select SHMEM select SYNC_FILE + select THERMAL if DRM_ETNAVIV_THERMAL select TMPFS select IOMMU_API select IOMMU_SUPPORT @@ -15,6 +16,14 @@ config DRM_ETNAVIV help DRM driver for Vivante GPUs. +config DRM_ETNAVIV_THERMAL + bool "enable ETNAVIV thermal throttling" + depends on DRM_ETNAVIV + default y + help + Compile in support for thermal throttling. + Say Y unless you want to risk burning your SoC. + config DRM_ETNAVIV_REGISTER_LOGGING bool "enable ETNAVIV register logging" depends on DRM_ETNAVIV diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index fc9a6a83dfc7..a1562f89c3d7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1622,7 +1622,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, struct etnaviv_gpu *gpu = dev_get_drvdata(dev); int ret; - if (IS_ENABLED(CONFIG_THERMAL)) { + if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) { gpu->cooling = thermal_of_cooling_device_register(dev->of_node, (char *)dev_name(dev), gpu, &cooling_ops); if (IS_ERR(gpu->cooling)) @@ -1635,7 +1635,8 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, ret = etnaviv_gpu_clk_enable(gpu); #endif if (ret < 0) { - thermal_cooling_device_unregister(gpu->cooling); + if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) + thermal_cooling_device_unregister(gpu->cooling); return ret; } @@ -1692,7 +1693,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, gpu->drm = NULL; - thermal_cooling_device_unregister(gpu->cooling); + if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) + thermal_cooling_device_unregister(gpu->cooling); gpu->cooling = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2b8bf2dd6387..9effe40f5fa5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -926,7 +926,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) struct drm_device *drm_dev = g2d->subdrv.drm_dev; struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; struct drm_exynos_pending_g2d_event *e; - struct timeval now; + struct timespec64 now; if (list_empty(&runqueue_node->event_list)) return; @@ -934,9 +934,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) e = list_first_entry(&runqueue_node->event_list, struct drm_exynos_pending_g2d_event, base.link); - do_gettimeofday(&now); + ktime_get_ts64(&now); e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; + e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; e->event.cmdlist_no = cmdlist_no; drm_send_event(drm_dev, &e->base); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 077de014d610..4400efe3974a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, if (IS_ERR(exynos_gem)) return exynos_gem; + if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { + /* + * when no IOMMU is available, all allocated buffers are + * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag + */ + flags &= ~EXYNOS_BO_NONCONTIG; + DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); + } + /* set memory type and cache attribute from user side. */ exynos_gem->flags = flags; diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h index 30496134a3d0..d7cbe53c4c01 100644 --- a/drivers/gpu/drm/exynos/regs-fimc.h +++ b/drivers/gpu/drm/exynos/regs-fimc.h @@ -569,7 +569,7 @@ #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) -#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) +#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0)) /* Real input DMA size register */ #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 58e9e0601a61..faf17b83b910 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) return PTR_ERR(fsl_dev->state); } - clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk); return 0; @@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); + enable_irq(fsl_dev->irq); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); console_lock(); @@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); - enable_irq(fsl_dev->irq); return 0; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index edd7d8127d19..c54806d08dd7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, { struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; - struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; int ret; fsl_dev->connector.encoder = encoder; @@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_sysfs; - drm_object_property_set_value(&connector->base, - mode_config->dpms_property, - DRM_MODE_DPMS_OFF); - ret = drm_panel_attach(panel, connector); if (ret) { dev_err(fsl_dev->dev, "failed to attach panel\n"); diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index e8e4ea14b12b..e05e5399af2d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev, extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, +extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); extern int psb_intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index be3eefec5152..8baf6325c6e4 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) } } -int psb_intel_lvds_mode_valid(struct drm_connector *connector, +enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_psb_private *dev_priv = connector->dev->dev_private; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 9823477b1855..2269be91f3e1 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc, { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode; if (!ctx->power_on) (void)ade_power_up(ctx); + ade_ldi_set_mode(acrtc, mode, adj_mode); } static void ade_crtc_atomic_flush(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index e9e64e8e9765..c4ef83863947 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -124,6 +124,22 @@ config DRM_I915_GVT_KVMGT help Choose this option if you want to enable KVMGT support for Intel GVT-g. +config DRM_I915_GVT_ACRN_GVT + bool "Enable ACRN support for Intel GVT-g" + depends on DRM_I915_GVT + depends on ACRN + default n + help + Choose this option if you want to enable ACRN_GVT support for + Intel GVT-g under ACRN hypervisor environment. + +config DRM_I915_LOAD_ASYNC_SUPPORT + bool "Async i915_driver_load support" + default n + depends on DRM_I915 + help + Choose this option to support async i915_driver_load for boot-up time saving + menu "drm/i915 Debugging" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 2e034efc4d6d..2da001c019b2 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -45,12 +45,14 @@ i915-y += i915_cmd_parser.o \ i915_gem_request.o \ i915_gem_shrinker.o \ i915_gem_stolen.o \ + i915_gem_splash.o \ i915_gem_tiling.o \ i915_gem_timeline.o \ i915_gem_userptr.o \ i915_trace_points.o \ i915_vma.o \ intel_breadcrumbs.o \ + i915_gem_gvtbuffer.o \ intel_engine_cs.o \ intel_hangcheck.o \ intel_lrc.o \ @@ -85,6 +87,7 @@ i915-y += intel_audio.o \ intel_fbc.o \ intel_fifo_underrun.o \ intel_frontbuffer.o \ + intel_hdcp.o \ intel_hotplug.o \ intel_modes.o \ intel_overlay.o \ @@ -150,6 +153,9 @@ endif # LPE Audio for VLV and CHT i915-y += intel_lpe_audio.o +# initial modeset +i915-y += intel_initial_modeset.o + obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index 2641ba510a61..50c1cc9f5005 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -2,8 +2,9 @@ GVT_DIR := gvt GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ - execlist.o scheduler.o sched_policy.o render.o cmd_parser.o + execlist.o scheduler.o sched_policy.o render.o cmd_parser.o fb_decoder.o -ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_ACRN_GVT) += $(GVT_DIR)/acrn-gvt.o diff --git a/drivers/gpu/drm/i915/gvt/acrn-gvt.c b/drivers/gpu/drm/i915/gvt/acrn-gvt.c new file mode 100644 index 000000000000..73575af3eac9 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrn-gvt.c @@ -0,0 +1,997 @@ +/* + * Interfaces coupled to ACRN + * + * Copyright(c) 2011-2013 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of Version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + */ + +/* + * NOTE: + * This file contains hypervisor specific interactions to + * implement the concept of mediated pass-through framework. + * What this file provides is actually a general abstraction + * of in-kernel device model, which is not gvt specific. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include "acrn-gvt.h" + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("ACRNGT mediated passthrough driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); + +#define ASSERT(x) \ +do { if (x) break; \ + printk(KERN_EMERG "### ASSERTION FAILED %s: %s: %d: %s\n", \ + __FILE__, __func__, __LINE__, #x); dump_stack(); BUG(); \ +} while (0) + + +struct kobject *acrn_gvt_ctrl_kobj; +static struct kset *acrn_gvt_kset; +static DEFINE_MUTEX(acrn_gvt_sysfs_lock); + +struct gvt_acrngt acrngt_priv; +const struct intel_gvt_ops *intel_gvt_ops; + +static void disable_domu_plane(int pipe, int plane) +{ + struct drm_i915_private *dev_priv = acrngt_priv.gvt->dev_priv; + + I915_WRITE(PLANE_CTL(pipe, plane), 0); + + I915_WRITE(PLANE_SURF(pipe, plane), 0); + POSTING_READ(PLANE_SURF(pipe, plane)); +} + +void acrngt_instance_destroy(struct intel_vgpu *vgpu) +{ + int pipe, plane; + struct acrngt_hvm_dev *info = NULL; + struct intel_gvt *gvt = acrngt_priv.gvt; + + if (vgpu) { + info = (struct acrngt_hvm_dev *)vgpu->handle; + if (info && info->emulation_thread != NULL) + kthread_stop(info->emulation_thread); + + for_each_pipe(gvt->dev_priv, pipe) { + for_each_universal_plane(gvt->dev_priv, pipe, plane) { + if (gvt->pipe_info[pipe].plane_owner[plane] == + vgpu->id) { + disable_domu_plane(pipe, plane); + } + } + } + + intel_gvt_ops->vgpu_deactivate(vgpu); + intel_gvt_ops->vgpu_destroy(vgpu); + } + + if (info) { + gvt_dbg_core("destroy vgpu instance, vm id: %d, client %d", + info->vm_id, info->client); + + if (info->client != 0) + acrn_ioreq_destroy_client(info->client); + + if (info->vm) + put_vm(info->vm); + + kfree(info); + } +} + +static bool acrngt_write_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long val) +{ + if (intel_gvt_ops->emulate_cfg_write(vgpu, port, &val, bytes)) { + gvt_err("failed to write config space port 0x%x\n", port); + return false; + } + return true; +} + +static bool acrngt_read_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long *val) +{ + unsigned long data; + + if (intel_gvt_ops->emulate_cfg_read(vgpu, port, &data, bytes)) { + gvt_err("failed to read config space port 0x%x\n", port); + return false; + } + memcpy(val, &data, bytes); + return true; +} + +static int acrngt_hvm_pio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.pci_request.direction == REQUEST_READ) { + /* PIO READ */ + gvt_dbg_core("handle pio read emulation at port 0x%x\n", + req->reqs.pci_request.reg); + if (!acrngt_read_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long *)&req->reqs.pci_request.value)) { + gvt_err("failed to read pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } else if (req->reqs.pci_request.direction == REQUEST_WRITE) { + /* PIO WRITE */ + gvt_dbg_core("handle pio write emulation at address 0x%x, " + "value 0x%x\n", + req->reqs.pci_request.reg, req->reqs.pci_request.value); + if (!acrngt_write_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long)req->reqs.pci_request.value)) { + gvt_err("failed to write pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } + return 0; +} + +static int acrngt_hvm_mmio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.mmio_request.direction == REQUEST_READ) { + /* MMIO READ */ + gvt_dbg_core("handle mmio read emulation at address 0x%llx\n", + req->reqs.mmio_request.address); + if (intel_gvt_ops->emulate_mmio_read(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to read mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + } else if (req->reqs.mmio_request.direction == REQUEST_WRITE) { + /* MMIO Write */ + if (intel_gvt_ops->emulate_mmio_write(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to write mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + gvt_dbg_core("handle mmio write emulation at address 0x%llx, " + "value 0x%llx\n", + req->reqs.mmio_request.address, req->reqs.mmio_request.value); + } + + return 0; +} + +static void handle_request_error(struct intel_vgpu *vgpu) +{ + mutex_lock(&vgpu->gvt->lock); + if (vgpu->failsafe == false) { + vgpu->failsafe= true; + gvt_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); + } + mutex_unlock(&vgpu->gvt->lock); +} + +static int acrngt_emulation_thread(void *priv) +{ + struct intel_vgpu *vgpu = (struct intel_vgpu *)priv; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)vgpu->handle; + struct vhm_request *req; + + int vcpu, ret; + int nr_vcpus = info->nr_vcpu; + + gvt_dbg_core("start kthread for VM%d\n", info->vm_id); + ASSERT(info->nr_vcpu <= MAX_HVM_VCPUS_SUPPORTED); + + set_freezable(); + while (1) { + acrn_ioreq_attach_client(info->client, 1); + + if (kthread_should_stop()) + return 0; + + for (vcpu = 0; vcpu < nr_vcpus; vcpu++) { + req = &info->req_buf[vcpu]; + if (atomic_read(&req->processed) == + REQ_STATE_PROCESSING && + req->client == info->client) { + gvt_dbg_core("handle ioreq type %d\n", + req->type); + switch (req->type) { + case REQ_PCICFG: + ret = acrngt_hvm_pio_emulation(vgpu, req); + break; + case REQ_MMIO: + case REQ_WP: + ret = acrngt_hvm_mmio_emulation(vgpu, req); + break; + default: + gvt_err("Unknown ioreq type %x\n", + req->type); + ret = -EINVAL; + break; + } + /* error handling */ + if (ret) + handle_request_error(vgpu); + + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + /* complete request */ + if (acrn_ioreq_complete_request(info->client, + vcpu)) + gvt_err("failed complete request\n"); + } + } + } + + BUG(); /* It's actually impossible to reach here */ + return 0; +} + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *vgpu_type) +{ + struct acrngt_hvm_dev *info; + struct intel_vgpu *vgpu; + int ret = 0; + struct task_struct *thread; + struct vm_info vm_info; + + gvt_dbg_core("acrngt_instance_create enter\n"); + if (!intel_gvt_ops || !acrngt_priv.gvt) + return NULL; + + vgpu = intel_gvt_ops->vgpu_create(acrngt_priv.gvt, vgpu_type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create vgpu\n"); + return NULL; + } + + info = kzalloc(sizeof(struct acrngt_hvm_dev), GFP_KERNEL); + if (info == NULL) { + gvt_err("failed to alloc acrngt_hvm_dev\n"); + goto err; + } + + info->vm_id = vm_id; + info->vgpu = vgpu; + vgpu->handle = (unsigned long)info; + + if ((info->vm = find_get_vm(vm_id)) == NULL) { + gvt_err("failed to get vm %d\n", vm_id); + acrngt_instance_destroy(vgpu); + return NULL; + } + if (info->vm->req_buf == NULL) { + gvt_err("failed to get req buf for vm %d\n", vm_id); + goto err; + } + gvt_dbg_core("get vm req_buf from vm_id %d\n", vm_id); + + /* create client: no handler -> handle request by itself */ + info->client = acrn_ioreq_create_client(vm_id, NULL, "ioreq gvt-g"); + if (info->client < 0) { + gvt_err("failed to create ioreq client for vm id %d\n", vm_id); + goto err; + } + + /* get vm info */ + ret = vhm_get_vm_info(vm_id, &vm_info); + if (ret < 0) { + gvt_err("failed to get vm info for vm id %d\n", vm_id); + goto err; + } + + info->nr_vcpu = vm_info.max_vcpu; + + /* get req buf */ + info->req_buf = acrn_ioreq_get_reqbuf(info->client); + if (info->req_buf == NULL) { + gvt_err("failed to get req_buf for client %d\n", info->client); + goto err; + } + + /* trap config space access */ + acrn_ioreq_intercept_bdf(info->client, 0, 2, 0); + + thread = kthread_run(acrngt_emulation_thread, vgpu, + "acrngt_emulation:%d", vm_id); + if (IS_ERR(thread)) { + gvt_err("failed to run emulation thread for vm %d\n", vm_id); + goto err; + } + info->emulation_thread = thread; + gvt_dbg_core("create vgpu instance success, vm_id %d, client %d," + " nr_vcpu %d\n", info->vm_id,info->client, info->nr_vcpu); + + return vgpu; + +err: + acrngt_instance_destroy(vgpu); + return NULL; +} + +static ssize_t kobj_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) + ret = kattr->show(kobj, kattr, buf); + return ret; +} + +static ssize_t kobj_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) + ret = kattr->store(kobj, kattr, buf, count); + return ret; +} + +const struct sysfs_ops acrngt_kobj_sysfs_ops = { + .show = kobj_attr_show, + .store = kobj_attr_store, +}; + +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i; + + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) { + if (acrngt_priv.vgpus[i] && + (kobj == &((struct acrngt_hvm_dev *) + (acrngt_priv.vgpus[i]->handle))->kobj)) { + return sprintf(buf, "%d\n", acrngt_priv.vgpus[i]->id); + } + } + return 0; +} + +static struct kobj_attribute acrngt_vm_attr = +__ATTR(vgpu_id, 0440, acrngt_sysfs_vgpu_id, NULL); + + +static struct attribute *acrngt_vm_attrs[] = { + &acrngt_vm_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_instance_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_vm_attrs, +}; + +static int acrngt_sysfs_add_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu; + struct acrngt_hvm_dev *info; + + struct intel_vgpu_type type = acrngt_priv.gvt->types[0]; + + type.low_gm_size = vp->aperture_sz * VMEM_1MB; + type.high_gm_size = (vp->gm_sz - vp->aperture_sz) * VMEM_1MB; + type.fence = vp->fence_sz; + mutex_lock(&acrn_gvt_sysfs_lock); + vgpu = acrngt_instance_create(vp->vm_id, &type); + mutex_unlock(&acrn_gvt_sysfs_lock); + if (vgpu == NULL) { + gvt_err("acrngt_sysfs_add_instance failed.\n"); + ret = -EINVAL; + } else { + info = (struct acrngt_hvm_dev *) vgpu->handle; + info->vm_id = vp->vm_id; + acrngt_priv.vgpus[vgpu->id - 1] = vgpu; + gvt_dbg_core("add acrngt instance for vm-%d with vgpu-%d.\n", + vp->vm_id, vgpu->id); + + kobject_init(&info->kobj, &acrngt_instance_ktype); + info->kobj.kset = acrn_gvt_kset; + /* add kobject, NULL parent indicates using kset as parent */ + ret = kobject_add(&info->kobj, NULL, "vm%u", info->vm_id); + if (ret) { + gvt_err("%s: kobject add error: %d\n", __func__, ret); + kobject_put(&info->kobj); + } + } + + return ret; +} + +static struct intel_vgpu *vgpu_from_id(int vm_id) +{ + int i; + struct acrngt_hvm_dev *hvm_dev = NULL; + + /* vm_id is negtive in del_instance call */ + if (vm_id < 0) + vm_id = -vm_id; + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) + if (acrngt_priv.vgpus[i]) { + hvm_dev = (struct acrngt_hvm_dev *) + acrngt_priv.vgpus[i]->handle; + if (hvm_dev && (vm_id == hvm_dev->vm_id)) + return acrngt_priv.vgpus[i]; + } + return NULL; +} + +static int acrngt_sysfs_del_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu = vgpu_from_id(vp->vm_id); + struct acrngt_hvm_dev *info = NULL; + + if (vgpu) { + info = (struct acrngt_hvm_dev *) vgpu->handle; + gvt_dbg_core("remove vm-%d sysfs node.\n", vp->vm_id); + kobject_put(&info->kobj); + + mutex_lock(&acrn_gvt_sysfs_lock); + acrngt_priv.vgpus[vgpu->id - 1] = NULL; + acrngt_instance_destroy(vgpu); + mutex_unlock(&acrn_gvt_sysfs_lock); + } + + return ret; +} + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct acrngt_hvm_params vp; + int param_cnt; + char param_str[64]; + int rc; + int high_gm_sz; + int low_gm_sz; + + /* We expect the param_str should be vmid,a,b,c (where the guest + * wants a MB aperture and b MB gm, and c fence registers) or -vmid + * (where we want to release the gvt instance). + */ + (void)sscanf(buf, "%63s", param_str); + param_cnt = sscanf(param_str, "%d,%d,%d,%d", &vp.vm_id, + &low_gm_sz, &high_gm_sz, &vp.fence_sz); + gvt_dbg_core("create vm-%d sysfs node, low gm size %d," + " high gm size %d, fence size %d\n", + vp.vm_id, low_gm_sz, high_gm_sz, vp.fence_sz); + vp.aperture_sz = low_gm_sz; + vp.gm_sz = high_gm_sz + low_gm_sz; + if (param_cnt == 1) { + if (vp.vm_id >= 0) + return -EINVAL; + } else if (param_cnt == 4) { + if (!(vp.vm_id > 0 && vp.aperture_sz > 0 && + vp.aperture_sz <= vp.gm_sz && vp.fence_sz > 0)) + return -EINVAL; + } else { + gvt_err("%s: parameter counter incorrect\n", __func__); + return -EINVAL; + } + + rc = (vp.vm_id > 0) ? acrngt_sysfs_add_instance(&vp) : + acrngt_sysfs_del_instance(&vp); + + return rc < 0 ? rc : count; +} + +static ssize_t show_plane_owner(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "Planes:\nPipe A: %d %d %d %d\n" + "Pipe B: %d %d %d %d\nPipe C: %d %d %d\n", + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE1]); +} + +static struct kobj_attribute acrngt_instance_attr = +__ATTR(create_gvt_instance, 0220, NULL, acrngt_sysfs_instance_manage); + +static struct kobj_attribute plane_owner_attr = +__ATTR(plane_owner_show, 0440, show_plane_owner, NULL); + +static struct attribute *acrngt_ctrl_attrs[] = { + &acrngt_instance_attr.attr, + &plane_owner_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_ctrl_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_ctrl_attrs, +}; + +int acrngt_sysfs_init(struct intel_gvt *gvt) +{ + int ret; + + acrn_gvt_kset = kset_create_and_add("gvt", NULL, kernel_kobj); + if (!acrn_gvt_kset) { + ret = -ENOMEM; + goto kset_fail; + } + + acrn_gvt_ctrl_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!acrn_gvt_ctrl_kobj) { + ret = -ENOMEM; + goto ctrl_fail; + } + + acrn_gvt_ctrl_kobj->kset = acrn_gvt_kset; + ret = kobject_init_and_add(acrn_gvt_ctrl_kobj, &acrngt_ctrl_ktype, + NULL, "control"); + if (ret) { + ret = -EINVAL; + goto kobj_fail; + } + + return 0; + +kobj_fail: + kobject_put(acrn_gvt_ctrl_kobj); +ctrl_fail: + kset_unregister(acrn_gvt_kset); +kset_fail: + return ret; +} + +void acrngt_sysfs_del(void) +{ + kobject_put(acrn_gvt_ctrl_kobj); + kset_unregister(acrn_gvt_kset); +} + +static int acrngt_host_init(struct device *dev, void *gvt, const void *ops) +{ + int ret = -EFAULT; + + if (!gvt || !ops) + return -EINVAL; + + acrngt_priv.gvt = (struct intel_gvt *)gvt; + intel_gvt_ops = (const struct intel_gvt_ops *)ops; + + ret = acrngt_sysfs_init(acrngt_priv.gvt); + if (ret) { + gvt_err("failed call acrngt_sysfs_init, error: %d\n", ret); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; + } + + return ret; +} + +static void acrngt_host_exit(struct device *dev, void *gvt) +{ + acrngt_sysfs_del(); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; +} + +static int acrngt_attach_vgpu(void *vgpu, unsigned long *handle) +{ + return 0; +} + +static void acrngt_detach_vgpu(unsigned long handle) +{ + return; +} + +static int acrngt_inject_msi(unsigned long handle, u32 addr_lo, u16 data) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("inject msi irq, addr 0x%x, data 0x%hx\n", addr_lo, data); + + ret = vhm_inject_msi(info->vm_id, addr_lo, data); + if (ret) + gvt_err("failed to inject msi for vm %d\n", info->vm_id); + return ret; +} + +static unsigned long acrngt_virt_to_mfn(void *addr) +{ + uint64_t gpa; + uint64_t hpa; + gvt_dbg_core("virt 0x%lx to mfn\n", (unsigned long)addr); + + gpa = virt_to_phys(addr); + hpa = vhm_vm_gpa2hpa(0, gpa); + + return (unsigned long) (hpa >> PAGE_SHIFT); +} + +static int acrngt_set_wp_page(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set wp page for gfn 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = acrn_ioreq_add_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) { + gvt_err("failed acrn_ioreq_add_iorange for gfn 0x%llx\n", gfn); + return ret; + } + ret = write_protect_page(info->vm_id, gfn << PAGE_SHIFT, true); + if (ret) + gvt_err("failed set write protect for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_unset_wp_page(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("unset wp page for gfx 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = write_protect_page(info->vm_id, gfn << PAGE_SHIFT, false); + if (ret) { + gvt_err("failed unset write protect for gfn 0x%llx\n", gfn); + return ret; + } + ret = acrn_ioreq_del_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) + gvt_err("failed acrn_ioreq_del_iorange for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_read_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("read gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not read gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) buf) = *((uint8_t *) va); + break; + case 2: + *((uint16_t *) buf) = *((uint16_t *) va); + break; + case 4: + *((uint32_t *) buf) = *((uint32_t *) va); + break; + case 8: + *((uint64_t *) buf) = *((uint64_t *) va); + break; + default: + memcpy(buf, va, len); + } + return 0; +} + +static int acrngt_write_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("write gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not write gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) va) = *((uint8_t *) buf); + break; + case 2: + *((uint16_t *) va) = *((uint16_t *) buf); + break; + case 4: + *((uint32_t *) va) = *((uint32_t *) buf); + break; + case 8: + *((uint64_t *) va) = *((uint64_t *) buf); + break; + default: + memcpy(va, buf, len); + } + return 0; +} + +static bool is_identical_mmap(void) +{ + /* todo: need add hypercall to get such info from hypervisor */ + return true; +} + +static unsigned long acrngt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +{ + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + gvt_dbg_core("convert gfn 0x%lx to pfn\n", gfn); + if (is_identical_mmap()) { + void *va = NULL; + + va = map_guest_phys(info->vm_id, gfn << PAGE_SHIFT, + 1 << PAGE_SHIFT); + if (!va) { + gvt_err("GVT: can not map gfn = 0x%lx!!!\n", gfn); + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + } else { + hpa = virt_to_phys(va); + } + } else { + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + } + + return hpa >> PAGE_SHIFT; +} + +static int acrngt_map_gfn_to_mfn(unsigned long handle, unsigned long gfn, + unsigned long mfn, unsigned int nr, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("map/unmap gfn 0x%lx to mfn 0x%lx with %u pages, map %d\n", + gfn, mfn, nr, map); + + if (map) + ret = add_memory_region(info->vm_id, gfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, nr << PAGE_SHIFT, + MEM_TYPE_UC, MEM_ACCESS_RWX); + else + ret = del_memory_region(info->vm_id, gfn << PAGE_SHIFT, + nr << PAGE_SHIFT); + if (ret) + gvt_err("failed map/unmap gfn 0x%lx to mfn 0x%lx with %u pages," + " map %d\n", gfn, mfn, nr, map); + return ret; +} + +static int acrngt_set_trap_area(unsigned long handle, u64 start, + u64 end, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set trap area, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + + if (map) + ret = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + start, end); + else + ret = acrn_ioreq_del_iorange(info->client, REQ_MMIO, + start, end); + if (ret) + gvt_err("failed set trap, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + return ret; +} + +static int acrngt_set_pvmmio(unsigned long handle, u64 start, u64 end, bool map) +{ + int rc, i; + unsigned long mfn, shared_mfn; + unsigned long pfn = start >> PAGE_SHIFT; + u32 mmio_size_fn = acrngt_priv.gvt->device_info.mmio_size >> PAGE_SHIFT; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + if (map) { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + + /* map the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + + /* mmio access is trapped like memory write protection */ + rc = acrn_ioreq_add_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + for (i = 0; i < mmio_size_fn; i++) { + rc = write_protect_page(info->vm_id, + (pfn + i) << PAGE_SHIFT, true); + if (rc) { + gvt_err("failed set wp for pfn 0x%lx\n", pfn + i); + return rc; + } + } + + /* scratch reg access is trapped like mmio access, 1 page */ + rc = acrngt_map_gfn_to_mfn(handle, pfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + mfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), 1, 0); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + (pfn << PAGE_SHIFT) + VGT_PVINFO_PAGE, + ((pfn + 1) << PAGE_SHIFT) + VGT_PVINFO_PAGE - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", + (pfn << PAGE_SHIFT) + VGT_PVINFO_PAGE); + return rc; + } + + } else { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_del_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + rc = acrn_ioreq_add_iorange(info->client, REQ_MMIO, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_del_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + /* unmap the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + } + return rc; +} + +static int acrn_pause_domain(unsigned long handle) +{ + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + if (info == NULL) + return 0; + + /* todo: should be implemented to workaroud hw bug */ + gvt_dbg_core("pause domain\n"); + return 0; +} + +static int acrn_unpause_domain(unsigned long handle) +{ + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + if (info == NULL) + return 0; + + /* todo: should be implemented to workaroud hw bug */ + gvt_dbg_core("unpause domain\n"); + return 0; +} + +static int acrngt_dom0_ready(void) +{ + char *env[] = {"GVT_DOM0_READY=1", NULL}; + if(!acrn_gvt_ctrl_kobj) + return 0; + gvt_dbg_core("acrngt: Dom 0 ready to accept Dom U guests\n"); + return kobject_uevent_env(acrn_gvt_ctrl_kobj, KOBJ_ADD, env); +} + +struct intel_gvt_mpt acrn_gvt_mpt = { + //.detect_host = acrngt_detect_host, + .host_init = acrngt_host_init, + .host_exit = acrngt_host_exit, + .attach_vgpu = acrngt_attach_vgpu, + .detach_vgpu = acrngt_detach_vgpu, + .inject_msi = acrngt_inject_msi, + .from_virt_to_mfn = acrngt_virt_to_mfn, + .set_wp_page = acrngt_set_wp_page, + .unset_wp_page = acrngt_unset_wp_page, + .read_gpa = acrngt_read_gpa, + .write_gpa = acrngt_write_gpa, + .gfn_to_mfn = acrngt_gfn_to_pfn, + .map_gfn_to_mfn = acrngt_map_gfn_to_mfn, + .set_trap_area = acrngt_set_trap_area, + .set_pvmmio = acrngt_set_pvmmio, + .pause_domain = acrn_pause_domain, + .unpause_domain= acrn_unpause_domain, + .dom0_ready = acrngt_dom0_ready, +}; +EXPORT_SYMBOL_GPL(acrn_gvt_mpt); + +static int __init acrngt_init(void) +{ + /* todo: to support need implment check_gfx_iommu_enabled func */ + gvt_dbg_core("acrngt loaded\n"); + return 0; +} + +static void __exit acrngt_exit(void) +{ + gvt_dbg_core("acrngt: unloaded\n"); +} + +module_init(acrngt_init); +module_exit(acrngt_exit); diff --git a/drivers/gpu/drm/i915/gvt/acrn-gvt.h b/drivers/gpu/drm/i915/gvt/acrn-gvt.h new file mode 100644 index 000000000000..0799df2ec557 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrn-gvt.h @@ -0,0 +1,81 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef INTEL_GVT_ACRNGT_H +#define INTEL_GVT_ACRNGT_H + +extern struct intel_gvt *gvt_instance; +extern const struct intel_gvt_ops *acrn_intel_gvt_ops; + +#define MAX_HVM_VCPUS_SUPPORTED 127 + +#define VMEM_1MB (1ULL << 20) /* the size of the first 1MB */ + +typedef uint16_t domid_t; + +/* + * acrngt_hvm_dev is a wrapper of a vGPU instance which is reprensented by the + * intel_vgpu structure. Under acrn hypervisor, the acrngt_instance stands for a + * HVM device, which the related resource. + */ +struct acrngt_hvm_dev { + domid_t vm_id; + struct kobject kobj; + struct intel_vgpu *vgpu; + + int nr_vcpu; + struct task_struct *emulation_thread; + + int client; + struct vhm_request *req_buf; + struct vhm_vm *vm; +}; + +struct acrngt_hvm_params { + int vm_id; + int aperture_sz; /* in MB */ + int gm_sz; /* in MB */ + int fence_sz; +}; + +/* + * struct gvt_acrngt should be a single instance to share global + * information for ACRNGT module. + */ +#define GVT_MAX_VGPU_INSTANCE 15 +struct gvt_acrngt { + struct intel_gvt *gvt; + struct intel_vgpu *vgpus[GVT_MAX_VGPU_INSTANCE]; +}; + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count); +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *type); +void acrngt_instance_destroy(struct intel_vgpu *vgpu); + +#endif diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index ca3d1925beda..93604b0e1cde 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -274,7 +274,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, avail = max - taken; request = param->fence_sz; - if (request > avail) + if (request > avail || request > INTEL_GVT_MAX_NUM_FENCES) goto no_enough_resource; vgpu_fence_sz(vgpu) = request; diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index ff3154fe6588..106b11a13d01 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -56,6 +56,10 @@ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { /** * vgpu_pci_cfg_mem_write - write virtual cfg space memory + * @vgpu: a vGPU + * @off: offset into the PCI configuration space + * @src: data buffer write to vGPU's emulated configure space + * @bytes: size of data to write in bytes * * Use this function to write virtual cfg space memory. * For standard cfg space, only RW bits can be changed, @@ -92,6 +96,11 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read * + * @vgpu: a vGPU + * @offset: offset into the PCI configuration space + * @p_data: data buffer read from vGPU's emulated configure space + * @bytes: size of data to read in bytes + * * Returns: * Zero on success, negative error code if failed. */ @@ -263,6 +272,10 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write + * @vgpu: a vGPU + * @offset: offset into the PCI configuration space + * @p_data: data buffer write to vGPU's emulated configure space + * @bytes: size of data to write in bytes * * Returns: * Zero on success, negative error code if failed. @@ -280,9 +293,22 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, /* First check if it's PCI_COMMAND */ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { - if (WARN_ON(bytes > 2)) + if (WARN_ON(bytes != 2 && bytes != 4)) return -EINVAL; - return emulate_pci_command_write(vgpu, offset, p_data, bytes); + + ret = -EINVAL; + if (bytes == 2) + ret = emulate_pci_command_write(vgpu, offset, + p_data, bytes); + if (bytes == 4) { + ret = emulate_pci_command_write(vgpu, offset, + p_data, 2); + if (ret) + return ret; + vgpu_pci_cfg_mem_write(vgpu, offset + 2, + (u8 *)p_data + 2, 2); + } + return ret; } switch (rounddown(offset, 4)) { @@ -300,6 +326,12 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, break; case INTEL_GVT_PCI_OPREGION: + /* + * To support virtual display, we need to override the real VBT in the + * OpRegion. So here we don't report OpRegion to guest. + */ + if (IS_KABYLAKE(vgpu->gvt->dev_priv)) + return 0; if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index d4726a3358a4..d6b792c2ff63 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -173,6 +173,8 @@ struct decode_info { #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3) #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4) +#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5) + #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0) #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2) #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3) @@ -811,7 +813,7 @@ static bool is_shadowed_mmio(unsigned int offset) return ret; } -static inline bool is_force_nonpriv_mmio(unsigned int offset) +bool is_force_nonpriv_mmio(unsigned int offset) { return (offset >= 0x24d0 && offset < 0x2500); } @@ -863,6 +865,15 @@ static int cmd_reg_handler(struct parser_exec_state *s, patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); } + /* Re-direct the non-context MMIO access to VGT_SCRATCH_REG, it + * has no functional impact to HW. + */ + if (!strcmp(cmd, "lri") || !strcmp(cmd, "lrr-dst") + || !strcmp(cmd, "lrm") || !strcmp(cmd, "pipe_ctrl")) { + if (intel_gvt_mmio_is_non_context(gvt, offset)) + patch_value(s, cmd_ptr(s, index), VGT_SCRATCH_REG); + } + /* TODO: Update the global mask if this MMIO is a masked-MMIO */ intel_gvt_mmio_set_cmd_accessed(gvt, offset); return 0; @@ -899,6 +910,34 @@ static int cmd_handler_lri(struct parser_exec_state *s) if (ret) break; ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); + + if (s->vgpu->entire_nonctxmmio_checked + && intel_gvt_mmio_is_non_context(s->vgpu->gvt, cmd_reg(s, i))) { + int offset = cmd_reg(s, i); + int value = cmd_val(s, i + 1); + u32 *host_cache = s->vgpu->gvt->mmio.mmio_host_cache; + + if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) { + u32 mask = value >> 16; + + vgpu_vreg(s->vgpu, offset) = + (vgpu_vreg(s->vgpu, offset) & ~mask) + | (value & mask); + } else { + vgpu_vreg(s->vgpu, offset) = value; + } + + if (host_cache[cmd_reg(s, i) >> 2] != + vgpu_vreg(s->vgpu, offset)) { + + gvt_err("vgpu%d unexpected non-context MMIOs" + "access by cmd 0x%x:0x%x,0x%x\n", + s->vgpu->id, + (u32)cmd_reg(s, i), + cmd_val(s, i + 1), + host_cache[cmd_reg(s, i) >> 2]); + } + } } return ret; } @@ -1050,6 +1089,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, s->workload->pending_events); + patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } @@ -1209,7 +1249,8 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv)) { stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1237,7 +1278,8 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv)) { set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1261,7 +1303,8 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv)) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1274,6 +1317,7 @@ static int check_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) + || IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) return gen8_check_mi_display_flip(s, info); return -ENODEV; @@ -1287,6 +1331,7 @@ static int update_plane_mmio_from_mi_display_flip( if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) + || IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) return gen8_update_plane_mmio_from_mi_display_flip(s, info); return -ENODEV; @@ -1568,6 +1613,7 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) struct intel_gvt *gvt = s->vgpu->gvt; if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) @@ -1742,6 +1788,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) return ret; } +static int mi_noop_index; + static struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, @@ -2278,6 +2326,9 @@ static struct cmd_info cmd_info[] = { {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, + {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, + NULL}, + {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, @@ -2424,7 +2475,12 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + /* fastpath for MI_NOOP */ + if (cmd == MI_NOOP) + info = &cmd_info[mi_noop_index]; + else + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", cmd, get_opcode(cmd, s->ring_id)); @@ -2599,11 +2655,39 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return ret; } +#define GEN8_PDPES 4 +int gvt_emit_pdps(struct intel_vgpu_workload *workload) +{ + const int num_cmds = GEN8_PDPES * 2; + struct drm_i915_gem_request *req = workload->req; + struct intel_engine_cs *engine = req->engine; + u32 *cs; + u32 *pdps = (u32 *)workload->shadow_mm->shadow_page_table; + int i; + + cs = intel_ring_begin(req, num_cmds * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(num_cmds); + for (i = 0; i < GEN8_PDPES; i++) { + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = pdps[i * 2]; + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = pdps[i * 2 + 1]; + } + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return 0; +} + static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; - u32 *cs; + void *shadow_ring_buffer_va; + int ring_id = workload->ring_id; int ret; guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); @@ -2616,34 +2700,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - /* allocate shadow ring buffer */ - cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); - if (IS_ERR(cs)) - return PTR_ERR(cs); + if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { + void *va = vgpu->reserve_ring_buffer_va[ring_id]; + /* realloc the new ring buffer if needed */ + vgpu->reserve_ring_buffer_va[ring_id] = + krealloc(va, workload->rb_len, GFP_KERNEL); + if (!vgpu->reserve_ring_buffer_va[ring_id]) { + gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + return -ENOMEM; + } + vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; + } + + shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id]; /* get shadow ring buffer va */ - workload->shadow_ring_buffer_va = cs; + workload->shadow_ring_buffer_va = shadow_ring_buffer_va; /* head > tail --> copy head <-> top */ if (gma_head > gma_tail) { ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, - gma_head, gma_top, cs); + gma_head, gma_top, shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } - cs += ret / sizeof(u32); + shadow_ring_buffer_va += ret; gma_head = workload->rb_start; } /* copy head or start <-> tail */ - ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs); + ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, + shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } - cs += ret / sizeof(u32); - intel_ring_advance(workload->req, cs); return 0; } @@ -2804,6 +2896,8 @@ static int init_cmd_table(struct intel_gvt *gvt) info->name); return -EEXIST; } + if (cmd_info[i].opcode == OP_MI_NOOP) + mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 286703643002..1356803a0586 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -46,4 +46,5 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); +int gvt_emit_pdps(struct intel_vgpu_workload *workload); #endif diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 3c318439a659..a78266de7876 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -169,6 +169,23 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + if (IS_BROXTON(dev_priv)) { + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | + BXT_DE_PORT_HP_DDIB | + BXT_DE_PORT_HP_DDIC); + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIA; + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIB; + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) + vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= BXT_DE_PORT_HP_DDIC; + return; + } + vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -203,7 +220,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } @@ -223,7 +240,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } @@ -243,7 +260,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } @@ -279,15 +296,20 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) port->dpcd = NULL; } -static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, - int type, unsigned int resolution) +static int setup_virtual_monitor(struct intel_vgpu *vgpu, int port_num, + int type, unsigned int resolution, void *edid, bool is_dp) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); + int valid_extensions = 1; + struct edid *tmp_edid = NULL; if (WARN_ON(resolution >= GVT_EDID_NUM)) return -EINVAL; - port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); + if (edid) + valid_extensions += ((struct edid *)edid)->extensions; + port->edid = kzalloc(sizeof(*(port->edid)) + + valid_extensions * EDID_SIZE, GFP_KERNEL); if (!port->edid) return -ENOMEM; @@ -297,13 +319,35 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, return -ENOMEM; } - memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], - EDID_SIZE); + if (edid) + memcpy(port->edid->edid_block, edid, + EDID_SIZE * valid_extensions); + else + memcpy(port->edid->edid_block, + virtual_dp_monitor_edid[resolution], + EDID_SIZE); + + /* Sometimes the physical display will report the EDID with no + * digital bit set, which will cause the guest fail to enumerate + * the virtual HDMI monitor. So here we will set the digital + * bit and re-calculate the checksum. + */ + tmp_edid = ((struct edid *)port->edid->edid_block); + if (!(tmp_edid->input & DRM_EDID_INPUT_DIGITAL)) { + tmp_edid->input += DRM_EDID_INPUT_DIGITAL; + tmp_edid->checksum -= DRM_EDID_INPUT_DIGITAL; + } + port->edid->data_valid = true; - memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); - port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + if (is_dp) { + memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); + port->dpcd->data_valid = true; + + + port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + } + port->type = type; emulate_monitor_status_change(vgpu); @@ -403,6 +447,115 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) emulate_vblank(vgpu); } +static void intel_gvt_vblank_work(struct work_struct *w) +{ + struct intel_gvt_pipe_info *pipe_info = container_of(w, + struct intel_gvt_pipe_info, vblank_work); + struct intel_gvt *gvt = pipe_info->gvt; + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + for_each_active_vgpu(gvt, vgpu, id) + emulate_vblank_on_pipe(vgpu, pipe_info->pipe_num); + mutex_unlock(&gvt->lock); +} + +int bxt_check_planes(struct intel_vgpu *vgpu, int pipe) +{ + int plane = 0; + bool ret = false; + + for (plane = 0; + plane < ((INTEL_INFO(vgpu->gvt->dev_priv)->num_sprites[pipe]) + 1); + plane++) { + if (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id) { + ret = true; + break; + } + } + return ret; +} + +#define BITS_PER_DOMAIN 4 +#define MAX_SCALERS_PER_DOMAIN 2 + +#define DOMAIN_SCALER_OWNER(owner, pipe, scaler) \ + ((((owner) >> (pipe) * BITS_PER_DOMAIN * MAX_SCALERS_PER_DOMAIN) >> \ + BITS_PER_DOMAIN * (scaler)) & 0xf) + +void intel_gvt_init_pipe_info(struct intel_gvt *gvt) +{ + enum pipe pipe; + unsigned int scaler; + unsigned int domain_scaler_owner = i915_modparams.domain_scaler_owner; + struct drm_i915_private *dev_priv = gvt->dev_priv; + + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { + gvt->pipe_info[pipe].pipe_num = pipe; + gvt->pipe_info[pipe].gvt = gvt; + INIT_WORK(&gvt->pipe_info[pipe].vblank_work, + intel_gvt_vblank_work); + /* Each nibble represents domain id + * ids can be from 0-F. 0 for Dom0, 1,2,3...0xF for DomUs + * scaler_owner[i] holds the id of the domain that owns it, + * eg:0,1,2 etc + */ + for_each_universal_scaler(dev_priv, pipe, scaler) + gvt->pipe_info[pipe].scaler_owner[scaler] = + DOMAIN_SCALER_OWNER(domain_scaler_owner, pipe, scaler); + } +} + +int setup_virtual_monitors(struct intel_vgpu *vgpu) +{ + struct intel_connector *connector = NULL; + struct drm_connector_list_iter conn_iter; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe = 0; + int ret = 0; + int type = i915_modparams.gvt_emulate_hdmi ? GVT_HDMI_A : GVT_DP_A; + int port = PORT_B; + + /* BXT have to use port A for HDMI to support 3 HDMI monitors */ + if (IS_BROXTON(dev_priv)) + port = PORT_A; + + drm_connector_list_iter_begin(&vgpu->gvt->dev_priv->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->encoder->get_hw_state(connector->encoder, &pipe) + && connector->detect_edid) { + /* if no planes are allocated for this pipe, skip it */ + if (i915_modparams.avail_planes_per_pipe && + !bxt_check_planes(vgpu, pipe)) + continue; + /* Get (Dom0) port associated with current pipe. */ + port = enc_to_dig_port( + &(connector->encoder->base))->port; + ret = setup_virtual_monitor(vgpu, port, + type, 0, connector->detect_edid, + !i915_modparams.gvt_emulate_hdmi); + if (ret) + return ret; + type++; + port++; + } + } + return 0; +} + +void clean_virtual_monitors(struct intel_vgpu *vgpu) +{ + int port = 0; + + for (port = PORT_A; port < INTEL_GVT_MAX_PORT; port++) { + struct intel_vgpu_port *p = intel_vgpu_port(vgpu, port); + + if (p->edid) + clean_virtual_dp_monitor(vgpu, port); + } +} + /** * intel_vgpu_clean_display - clean vGPU virtual display emulation * @vgpu: a vGPU @@ -414,7 +567,9 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) + clean_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -436,12 +591,14 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, - resolution); + if (IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) + return setup_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv)) + return setup_virtual_monitor(vgpu, + PORT_D, GVT_DP_D, resolution, NULL, true); else - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, - resolution); + return setup_virtual_monitor(vgpu, + PORT_B, GVT_DP_B, resolution, NULL, true); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index d73de22102e2..3b8c52cd61b2 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -140,6 +140,7 @@ enum intel_vgpu_port_type { GVT_DP_B, GVT_DP_C, GVT_DP_D, + GVT_HDMI_A, GVT_HDMI_B, GVT_HDMI_C, GVT_HDMI_D, diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 42cd09ec63fa..8acf9373171d 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -55,10 +55,6 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } - if (edid->current_edid_read >= EDID_SIZE) { - gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); - return 0; - } if (!edid->edid_available) { gvt_vgpu_err("Reading EDID but EDID is not available!\n"); @@ -77,19 +73,30 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) return chr; } -static inline int get_port_from_gmbus0(u32 gmbus0) +static inline int get_port_from_gmbus0(struct intel_vgpu *vgpu, u32 gmbus0) { + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port = -EINVAL; - if (port_select == 2) - port = PORT_E; - else if (port_select == 4) - port = PORT_C; - else if (port_select == 5) - port = PORT_B; - else if (port_select == 6) - port = PORT_D; + if (IS_BROXTON(dev_priv)) { + if (port_select == 1) + port = PORT_B; + else if (port_select == 2) + port = PORT_C; + else if (port_select == 3) + port = PORT_A; + } else { + if (port_select == 2) + port = PORT_E; + else if (port_select == 4) + port = PORT_C; + else if (port_select == 5) + port = PORT_B; + else if (port_select == 6) + port = PORT_D; + } + return port; } @@ -116,7 +123,7 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, if (pin_select == 0) return 0; - port = get_port_from_gmbus0(pin_select); + port = get_port_from_gmbus0(vgpu, pin_select); if (WARN_ON(port < 0)) return 0; @@ -434,6 +441,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, u32 value = *(u32 *)p_data; int aux_data_for_write = 0; int reg = get_aux_ch_reg(offset); + uint8_t rxbuf[20] = {0}; + size_t rxsize; if (reg != AUX_CH_CTL) { vgpu_vreg(vgpu, offset) = value; @@ -441,6 +450,12 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } msg_length = AUX_CTL_MSG_LENGTH(value); + if (WARN_ON(msg_length <= 0 || msg_length > 20)) + return; + + for (rxsize = 0; rxsize < msg_length; rxsize += 4) + intel_dp_unpack_aux(vgpu_vreg(vgpu, offset + 4 + rxsize), + rxbuf + rxsize, msg_length - rxsize); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; @@ -480,12 +495,14 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } } } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { - /* TODO - * We only support EDID reading from I2C_over_AUX. And - * we do not expect the index mode to be used. Right now - * the WRITE operation is ignored. It is good enough to - * support the gfx driver to do EDID access. + /* We only support EDID reading from I2C_over_AUX. + * But if EDID has extension blocks, we use this write + * operation to set block starting address */ + if (addr == EDID_ADDR) { + if (msg_length > 4) + i2c_edid->current_edid_read = rxbuf[4]; + } } else { if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ)) return; diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index f6dfc8b795ec..11a75d69062d 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -48,7 +48,7 @@ struct intel_vgpu_edid_data { bool data_valid; - unsigned char edid_block[EDID_SIZE]; + unsigned char edid_block[0]; }; enum gmbus_cycle_type { diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index e5320b4eb698..9d74080f32f1 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -46,6 +46,7 @@ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) +bool gvt_shadow_wa_ctx = false; static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); static int context_switch_events[] = { @@ -368,7 +369,7 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) -static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) +static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; struct intel_shadow_bb_entry *entry_obj; @@ -379,7 +380,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); if (IS_ERR(vma)) { - return; + return PTR_ERR(vma); } /* FIXME: we are not tracking our pinned VMA leaving it @@ -392,6 +393,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) if (gmadr_bytes == 8) entry_obj->bb_start_cmd_va[2] = 0; } + return 0; } static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) @@ -420,7 +422,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { struct i915_vma *vma; unsigned char *per_ctx_va = @@ -428,12 +430,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.size; if (wa_ctx->indirect_ctx.size == 0) - return; + return 0; vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, CACHELINE_BYTES, 0); if (IS_ERR(vma)) { - return; + return PTR_ERR(vma); } /* FIXME: we are not tracking our pinned VMA leaving it @@ -447,26 +449,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) memset(per_ctx_va, 0, CACHELINE_BYTES); update_wa_ctx_2_shadow_ctx(wa_ctx); -} - -static int prepare_execlist_workload(struct intel_vgpu_workload *workload) -{ - struct intel_vgpu *vgpu = workload->vgpu; - struct execlist_ctx_descriptor_format ctx[2]; - int ring_id = workload->ring_id; - - intel_vgpu_pin_mm(workload->shadow_mm); - intel_vgpu_sync_oos_pages(workload->vgpu); - intel_vgpu_flush_post_shadow(workload->vgpu); - prepare_shadow_batch_buffer(workload); - prepare_shadow_wa_ctx(&workload->wa_ctx); - if (!workload->emulate_schedule_in) - return 0; - - ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); - - return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + return 0; } static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) @@ -489,13 +472,64 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) } } -static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { - if (!wa_ctx->indirect_ctx.obj) - return; + struct intel_vgpu *vgpu = workload->vgpu; + struct execlist_ctx_descriptor_format ctx[2]; + int ring_id = workload->ring_id; + int ret; + + ret = intel_vgpu_pin_mm(workload->shadow_mm); + if (ret) { + gvt_vgpu_err("fail to vgpu pin mm\n"); + goto out; + } + + ret = intel_vgpu_sync_oos_pages(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to vgpu sync oos pages\n"); + goto err_unpin_mm; + } + + ret = intel_vgpu_flush_post_shadow(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to flush post shadow\n"); + goto err_unpin_mm; + } + + ret = prepare_shadow_batch_buffer(workload); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); + goto err_unpin_mm; + } - i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); - i915_gem_object_put(wa_ctx->indirect_ctx.obj); + if (gvt_shadow_wa_ctx) + ret = prepare_shadow_wa_ctx(&workload->wa_ctx); + + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); + goto err_shadow_batch; + } + + if (!workload->emulate_schedule_in) + return 0; + + ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); + ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + + ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + if (!ret) + goto out; + else + gvt_vgpu_err("fail to emulate execlist schedule in\n"); + + release_shadow_wa_ctx(&workload->wa_ctx); +err_shadow_batch: + release_shadow_batch_buffer(workload); +err_unpin_mm: + intel_vgpu_unpin_mm(workload->shadow_mm); +out: + return ret; } static int complete_execlist_workload(struct intel_vgpu_workload *workload) @@ -511,8 +545,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); + if(!workload->status || gvt_shadow_wa_ctx) { + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + } if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { /* if workload->status is not successful means HW GPU @@ -569,7 +605,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu, u64 gpa; int i; - gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); + gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) intel_gvt_hypervisor_read_gpa(vgpu, @@ -730,7 +766,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, intel_runtime_pm_put(dev_priv); } - queue_workload(workload); + intel_vgpu_queue_workload(workload); return 0; } @@ -738,7 +774,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) { struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; struct execlist_ctx_descriptor_format desc[2]; - int i, ret; + int i, ret = 0; desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); @@ -757,6 +793,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) } } + mutex_unlock(&vgpu->gvt->lock); + mutex_lock(&vgpu->gvt->sched_lock); + mutex_lock(&vgpu->gvt->lock); /* submit workload */ for (i = 0; i < ARRAY_SIZE(desc); i++) { if (!desc[i].valid) @@ -764,11 +803,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) ret = submit_context(vgpu, ring_id, &desc[i], i == 0); if (ret) { gvt_vgpu_err("failed to submit desc %d\n", i); - return ret; + goto out; } } - return 0; +out: + mutex_unlock(&vgpu->gvt->sched_lock); + return ret; inv_desc: gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n", @@ -819,10 +860,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + clean_workloads(vgpu, ALL_ENGINES); kmem_cache_destroy(vgpu->workloads); + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->reserve_ring_buffer_va[i]); + vgpu->reserve_ring_buffer_va[i] = NULL; + vgpu->reserve_ring_buffer_size[i] = 0; + } + } +#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; @@ -842,7 +894,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) if (!vgpu->workloads) return -ENOMEM; + /* each ring has a shadow ring buffer until vgpu destroyed */ + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu->reserve_ring_buffer_va[i] = + kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); + if (!vgpu->reserve_ring_buffer_va[i]) { + gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + goto out; + } + vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; + } return 0; +out: + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + if (vgpu->reserve_ring_buffer_size[i]) { + kfree(vgpu->reserve_ring_buffer_va[i]); + vgpu->reserve_ring_buffer_va[i] = NULL; + vgpu->reserve_ring_buffer_size[i] = 0; + } + } + return -ENOMEM; } void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 7eced40a1e30..d2348b419303 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -146,14 +146,11 @@ struct execlist_ring_context { u32 nop4; u32 lri_cmd_2; struct execlist_mmio_pair ctx_timestamp; - struct execlist_mmio_pair pdp3_UDW; - struct execlist_mmio_pair pdp3_LDW; - struct execlist_mmio_pair pdp2_UDW; - struct execlist_mmio_pair pdp2_LDW; - struct execlist_mmio_pair pdp1_UDW; - struct execlist_mmio_pair pdp1_LDW; - struct execlist_mmio_pair pdp0_UDW; - struct execlist_mmio_pair pdp0_LDW; + /* + * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW, + * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW} + */ + struct execlist_mmio_pair pdps[8]; }; struct intel_vgpu_elsp_dwords { diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c new file mode 100644 index 000000000000..6f361ec9f70d --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -0,0 +1,427 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "../i915_drv.h" +#include +#include + +#define FORMAT_NUM 16 +struct pixel_format { + int drm_format; /* Pixel format in DRM definition */ + int bpp; /* Bits per pixel, 0 indicates invalid */ + char *desc; /* The description */ +}; + +/* non-supported format has bpp default to 0 */ +static struct pixel_format primary_pixel_formats[FORMAT_NUM] = { + [0b0010] = {DRM_FORMAT_C8, 8, "8-bit Indexed"}, + [0b0101] = {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, + [0b0110] = {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, + [0b1000] = {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"}, + [0b1010] = {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, + [0b1100] = {DRM_FORMAT_XRGB161616_VGT, 64, + "64-bit RGBX Floating Point(16:16:16:16 MSB-X:B:G:R)"}, + [0b1110] = {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, +}; + +/* non-supported format has bpp default to 0 */ +static struct pixel_format skl_pixel_formats[] = { + {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"}, + {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"}, + {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"}, + {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"}, + + {DRM_FORMAT_C8, 8, "8-bit Indexed"}, + {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, + {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"}, + {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, + + {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"}, + {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, + {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"}, + {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, + + {DRM_FORMAT_XRGB161616_VGT, 64, "64-bit XRGB (16:16:16:16 MSB-X:R:G:B)"}, + {DRM_FORMAT_XBGR161616_VGT, 64, "64-bit XBGR (16:16:16:16 MSB-X:B:G:R)"}, + + /* non-supported format has bpp default to 0 */ + {0, 0, NULL}, +}; + +static int skl_format_to_drm(int format, bool rgb_order, bool alpha, int yuv_order) +{ + int skl_pixel_formats_index = 14; + + switch (format) { + case PLANE_CTL_FORMAT_INDEXED: + skl_pixel_formats_index = 4; + break; + case PLANE_CTL_FORMAT_RGB_565: + skl_pixel_formats_index = 5; + break; + case PLANE_CTL_FORMAT_XRGB_8888: + if (rgb_order) + skl_pixel_formats_index = alpha ? 6 : 7; + else + skl_pixel_formats_index = alpha ? 8 : 9; + break; + case PLANE_CTL_FORMAT_XRGB_2101010: + skl_pixel_formats_index = rgb_order ? 10 : 11; + break; + + case PLANE_CTL_FORMAT_XRGB_16161616F: + skl_pixel_formats_index = rgb_order ? 12 : 13; + break; + + case PLANE_CTL_FORMAT_YUV422: + skl_pixel_formats_index = yuv_order >> 16; + if (skl_pixel_formats_index > 3) + return -EINVAL; + break; + + default: + break; + } + + return skl_pixel_formats_index; +} + +static u32 gvt_get_stride(struct intel_vgpu *vgpu, int pipe, u32 tiled, + int stride_mask, int bpp) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask; + u32 stride = stride_reg; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + switch (tiled) { + case PLANE_CTL_TILED_LINEAR: + stride = stride_reg * 64; + break; + case PLANE_CTL_TILED_X: + stride = stride_reg * 512; + break; + case PLANE_CTL_TILED_Y: + stride = stride_reg * 128; + break; + case PLANE_CTL_TILED_YF: + if (bpp == 8) + stride = stride_reg * 64; + else if (bpp == 16 || bpp == 32 || bpp == 64) + stride = stride_reg * 128; + else + DRM_DEBUG_KMS("skl: unsupported bpp:%d\n", bpp); + break; + default: + DRM_DEBUG_KMS("skl: unsupported tile format:%x\n", tiled); + } + } + + return stride; +} + +static int gvt_decode_primary_plane_format(struct intel_vgpu *vgpu, + int pipe, struct gvt_primary_plane_format *plane) +{ + u32 val, fmt; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + val = vgpu_vreg(vgpu, DSPCNTR(pipe)); + plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); + if (!plane->enabled) + return 0; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + plane->tiled = (val & PLANE_CTL_TILED_MASK) >> _PLANE_CTL_TILED_SHIFT; + fmt = skl_format_to_drm( + val & PLANE_CTL_FORMAT_MASK, + val & PLANE_CTL_ORDER_RGBX, + val & PLANE_CTL_ALPHA_MASK, + val & PLANE_CTL_YUV422_ORDER_MASK); + plane->bpp = skl_pixel_formats[fmt].bpp; + plane->drm_format = skl_pixel_formats[fmt].drm_format; + } else { + plane->tiled = !!(val & DISPPLANE_TILED); + fmt = (val & DISPPLANE_PIXFORMAT_MASK) >> _PRI_PLANE_FMT_SHIFT; + plane->bpp = primary_pixel_formats[fmt].bpp; + plane->drm_format = primary_pixel_formats[fmt].drm_format; + } + + if (((IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) && !skl_pixel_formats[fmt].bpp) || + (!(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) && !primary_pixel_formats[fmt].bpp)) { + gvt_err("Non-supported pixel format (0x%x)\n", fmt); + return -EINVAL; + } + + plane->hw_format = fmt; + + plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & GTT_PAGE_MASK; + + plane->stride = gvt_get_stride(vgpu, pipe, (plane->tiled << 10), + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) ? + (_PRI_PLANE_STRIDE_MASK >> 6) + : _PRI_PLANE_STRIDE_MASK, plane->bpp); + + plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> + _PIPE_H_SRCSZ_SHIFT; + plane->width += 1; + plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) & + _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; + plane->height += 1; /* raw height is one minus the real value */ + + val = vgpu_vreg(vgpu, DSPTILEOFF(pipe)); + plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> + _PRI_PLANE_X_OFF_SHIFT; + plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> + _PRI_PLANE_Y_OFF_SHIFT; + + return 0; +} + +#define CURSOR_MODE_NUM (1 << 6) +struct cursor_mode_format { + int drm_format; /* Pixel format in DRM definition */ + u8 bpp; /* Bits per pixel; 0 indicates invalid */ + u32 width; /* In pixel */ + u32 height; /* In lines */ + char *desc; /* The description */ +}; + +/* non-supported format has bpp default to 0 */ +static struct cursor_mode_format cursor_pixel_formats[CURSOR_MODE_NUM] = { + [0b100010] = {DRM_FORMAT_ARGB8888, 32, 128, 128,"128x128 32bpp ARGB"}, + [0b100011] = {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"}, + [0b100111] = {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, + [0b000111] = {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},//actually inverted... figure this out later +}; + +static int gvt_decode_cursor_plane_format(struct intel_vgpu *vgpu, + int pipe, struct gvt_cursor_plane_format *plane) +{ + u32 val, mode; + u32 alpha_plane, alpha_force; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + val = vgpu_vreg(vgpu, CURCNTR(pipe)); + mode = val & CURSOR_MODE; + plane->enabled = (mode != CURSOR_MODE_DISABLE); + if (!plane->enabled) + return 0; + + if (!cursor_pixel_formats[mode].bpp) { + gvt_err("Non-supported cursor mode (0x%x)\n", mode); + return -EINVAL; + } + plane->mode = mode; + plane->bpp = cursor_pixel_formats[mode].bpp; + plane->drm_format = cursor_pixel_formats[mode].drm_format; + plane->width = cursor_pixel_formats[mode].width; + plane->height = cursor_pixel_formats[mode].height; + + alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >> + _CURSOR_ALPHA_PLANE_SHIFT; + alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >> + _CURSOR_ALPHA_FORCE_SHIFT; + if (alpha_plane || alpha_force) + gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", + alpha_plane, alpha_force); + + plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & GTT_PAGE_MASK; + + val = vgpu_vreg(vgpu, CURPOS(pipe)); + plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; + plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; + plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; + plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; + + return 0; +} + +#define FORMAT_NUM_SRRITE (1 << 3) + +static struct pixel_format sprite_pixel_formats[FORMAT_NUM_SRRITE] = { + [0b000] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"}, + [0b001] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"}, + [0b010] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"}, + [0b011] = {DRM_FORMAT_XRGB161616_VGT, 64, + "RGB 64-bit 16:16:16:16 Floating Point"}, + [0b100] = {DRM_FORMAT_AYUV, 32, "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"}, +}; + +static int gvt_decode_sprite_plane_format(struct intel_vgpu *vgpu, + int pipe, struct gvt_sprite_plane_format *plane) +{ + u32 val, fmt; + u32 width; + u32 color_order, yuv_order; + int drm_format; + + val = vgpu_vreg(vgpu, SPRCTL(pipe)); + plane->enabled = !!(val & SPRITE_ENABLE); + if (!plane->enabled) + return 0; + + plane->tiled = !!(val & SPRITE_TILED); + color_order = !!(val & SPRITE_RGB_ORDER_RGBX); + yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >> + _SPRITE_YUV_ORDER_SHIFT; + + fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; + if (!sprite_pixel_formats[fmt].bpp) { + gvt_err("Non-supported pixel format (0x%x)\n", fmt); + return -EINVAL; + } + plane->hw_format = fmt; + plane->bpp = sprite_pixel_formats[fmt].bpp; + drm_format = sprite_pixel_formats[fmt].drm_format; + + /* Order of RGB values in an RGBxxx buffer may be ordered RGB or + * BGR depending on the state of the color_order field + */ + if (!color_order) { + if (drm_format == DRM_FORMAT_XRGB2101010) + drm_format = DRM_FORMAT_XBGR2101010; + else if (drm_format == DRM_FORMAT_XRGB8888) + drm_format = DRM_FORMAT_XBGR8888; + } + + if (drm_format == DRM_FORMAT_YUV422) { + switch (yuv_order){ + case 0: + drm_format = DRM_FORMAT_YUYV; + break; + case 1: + drm_format = DRM_FORMAT_UYVY; + break; + case 2: + drm_format = DRM_FORMAT_YVYU; + break; + case 3: + drm_format = DRM_FORMAT_VYUY; + break; + default: + /* yuv_order has only 2 bits */ + BUG(); + break; + } + } + + plane->drm_format = drm_format; + + plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & GTT_PAGE_MASK; + plane->width = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) & + _SPRITE_STRIDE_MASK; + plane->width /= plane->bpp / 8; /* raw width in bytes */ + + val = vgpu_vreg(vgpu, SPRSIZE(pipe)); + plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> + _SPRITE_SIZE_HEIGHT_SHIFT; + width = (val & _SPRITE_SIZE_WIDTH_MASK) >> _SPRITE_SIZE_WIDTH_SHIFT; + plane->height += 1; /* raw height is one minus the real value */ + width += 1; /* raw width is one minus the real value */ + if (plane->width != width) + gvt_dbg_core("sprite_plane: plane->width=%d, width=%d\n", + plane->width, width); + + val = vgpu_vreg(vgpu, SPRPOS(pipe)); + plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; + plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; + + val = vgpu_vreg(vgpu, SPROFFSET(pipe)); + plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> + _SPRITE_OFFSET_START_X_SHIFT; + plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> + _SPRITE_OFFSET_START_Y_SHIFT; + return 0; +} + +/** + * gvt_decode_fb_format - Decode framebuffer information from raw vMMIO + * @gvt: GVT device + * @vmid: guest domain ID + * @fb: frame buffer infomation of guest. + * This function is called for query frame buffer format, so that gl can + * display guest fb in Dom0 + * + * Returns: + * Zero on success, negative error code if failed. + */ +int gvt_decode_fb_format(struct intel_gvt *gvt, int id, struct gvt_fb_format *fb) +{ + int i; + struct intel_vgpu *vgpu = NULL; + int ret = 0; + struct drm_i915_private *dev_priv = gvt->dev_priv; + + if (!fb) + return -EINVAL; + + /* TODO: use fine-grained refcnt later */ + mutex_lock(&gvt->lock); + + for_each_active_vgpu(gvt, vgpu, i) + if (vgpu->id == id) + break; + + if (!vgpu) { + gvt_err("Invalid vgpu ID (%d)\n", id); + mutex_unlock(&gvt->lock); + return -ENODEV; + } + + for (i = 0; i < I915_MAX_PIPES; i++) { + struct gvt_pipe_format *pipe = &fb->pipes[i]; + u32 ddi_func_ctl = vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(i)); + + if (!(ddi_func_ctl & TRANS_DDI_FUNC_ENABLE)) { + pipe->ddi_port = DDI_PORT_NONE; + } else { + u32 port = (ddi_func_ctl & TRANS_DDI_PORT_MASK) >> + TRANS_DDI_PORT_SHIFT; + if (port <= DDI_PORT_E) + pipe->ddi_port = port; + else + pipe->ddi_port = DDI_PORT_NONE; + } + + ret |= gvt_decode_primary_plane_format(vgpu, i, &pipe->primary); + ret |= gvt_decode_sprite_plane_format(vgpu, i, &pipe->sprite); + ret |= gvt_decode_cursor_plane_format(vgpu, i, &pipe->cursor); + + if (ret) { + gvt_err("Decode format error for pipe(%d)\n", i); + ret = -EINVAL; + break; + } + } + + mutex_unlock(&gvt->lock); + + return ret; +} diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h new file mode 100644 index 000000000000..180d392a96d9 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -0,0 +1,155 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _GVT_FB_DECODER_H_ +#define _GVT_FB_DECODER_H_ + +#define _PLANE_CTL_FORMAT_SHIFT 24 +#define _PLANE_CTL_TILED_SHIFT 10 +#define _PIPE_V_SRCSZ_SHIFT 0 +#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT) +#define _PIPE_H_SRCSZ_SHIFT 16 +#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT) + +#define _PRI_PLANE_FMT_SHIFT 26 +#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6) +#define _PRI_PLANE_X_OFF_SHIFT 0 +#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT) +#define _PRI_PLANE_Y_OFF_SHIFT 16 +#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT) + +#define _CURSOR_MODE 0x3f +#define _CURSOR_ALPHA_FORCE_SHIFT 8 +#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT) +#define _CURSOR_ALPHA_PLANE_SHIFT 10 +#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT) +#define _CURSOR_POS_X_SHIFT 0 +#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT) +#define _CURSOR_SIGN_X_SHIFT 15 +#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT) +#define _CURSOR_POS_Y_SHIFT 16 +#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT) +#define _CURSOR_SIGN_Y_SHIFT 31 +#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT) + +#define _SPRITE_FMT_SHIFT 25 +#define _SPRITE_COLOR_ORDER_SHIFT 20 +#define _SPRITE_YUV_ORDER_SHIFT 16 +#define _SPRITE_STRIDE_SHIFT 6 +#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT) +#define _SPRITE_SIZE_WIDTH_SHIFT 0 +#define _SPRITE_SIZE_HEIGHT_SHIFT 16 +#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT) +#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT) +#define _SPRITE_POS_X_SHIFT 0 +#define _SPRITE_POS_Y_SHIFT 16 +#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT) +#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT) +#define _SPRITE_OFFSET_START_X_SHIFT 0 +#define _SPRITE_OFFSET_START_Y_SHIFT 16 +#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT) +#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT) + +typedef enum { + FB_MODE_SET_START = 1, + FB_MODE_SET_END, + FB_DISPLAY_FLIP, +}gvt_fb_event_t; + +typedef enum { + DDI_PORT_NONE = 0, + DDI_PORT_B = 1, + DDI_PORT_C = 2, + DDI_PORT_D = 3, + DDI_PORT_E = 4 +} ddi_port_t; + +struct intel_gvt; + +struct gvt_fb_notify_msg { + unsigned vm_id; + unsigned pipe_id; /* id starting from 0 */ + unsigned plane_id; /* primary, cursor, or sprite */ +}; + +/* color space conversion and gamma correction are not included */ +struct gvt_primary_plane_format { + u8 enabled; /* plane is enabled */ + u8 tiled; /* X-tiled */ + u8 bpp; /* bits per pixel */ + u32 hw_format; /* format field in the PRI_CTL register */ + u32 drm_format; /* format in DRM definition */ + u32 base; /* framebuffer base in graphics memory */ + u32 x_offset; /* in pixels */ + u32 y_offset; /* in lines */ + u32 width; /* in pixels */ + u32 height; /* in lines */ + u32 stride; /* in bytes */ +}; + +struct gvt_sprite_plane_format { + u8 enabled; /* plane is enabled */ + u8 tiled; /* X-tiled */ + u8 bpp; /* bits per pixel */ + u32 hw_format; /* format field in the SPR_CTL register */ + u32 drm_format; /* format in DRM definition */ + u32 base; /* sprite base in graphics memory */ + u32 x_pos; /* in pixels */ + u32 y_pos; /* in lines */ + u32 x_offset; /* in pixels */ + u32 y_offset; /* in lines */ + u32 width; /* in pixels */ + u32 height; /* in lines */ +}; + +struct gvt_cursor_plane_format { + u8 enabled; + u8 mode; /* cursor mode select */ + u8 bpp; /* bits per pixel */ + u32 drm_format; /* format in DRM definition */ + u32 base; /* cursor base in graphics memory */ + u32 x_pos; /* in pixels */ + u32 y_pos; /* in lines */ + u8 x_sign; /* X Position Sign */ + u8 y_sign; /* Y Position Sign */ + u32 width; /* in pixels */ + u32 height; /* in lines */ + u32 x_hot; /* in pixels */ + u32 y_hot; /* in pixels */ +}; + +struct gvt_pipe_format { + struct gvt_primary_plane_format primary; + struct gvt_sprite_plane_format sprite; + struct gvt_cursor_plane_format cursor; + ddi_port_t ddi_port; /* the DDI port that the pipe is connected to */ +}; + +struct gvt_fb_format{ + struct gvt_pipe_format pipes[4]; +}; + +extern int gvt_decode_fb_format(struct intel_gvt *pdev, int vmid, + struct gvt_fb_format *fb); + +#endif diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index a26c1705430e..be2868867280 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -220,27 +220,27 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) void *mem; int ret; - path = kmalloc(PATH_MAX, GFP_KERNEL); - if (!path) - return -ENOMEM; - mem = kmalloc(info->cfg_space_size, GFP_KERNEL); - if (!mem) { - kfree(path); + if (!mem) return -ENOMEM; - } firmware->cfg_space = mem; mem = kmalloc(info->mmio_size, GFP_KERNEL); if (!mem) { - kfree(path); kfree(firmware->cfg_space); return -ENOMEM; } firmware->mmio = mem; + if (i915_modparams.disable_gvt_fw_loading) + goto expose_firmware; + + path = kmalloc(PATH_MAX, GFP_KERNEL); + if (!path) + return -ENOMEM; + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); @@ -248,6 +248,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) gvt_dbg_core("request hw state firmware %s...\n", path); ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev); + kfree(path); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index e6dfc3331f4b..f15062858d68 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -33,12 +33,12 @@ * */ +#include #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" -static bool enable_out_of_sync = false; static int preallocated_oos_pages = 8192; /* @@ -271,6 +271,17 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { + if (vgpu->ge_cache_enable && vgpu->cached_guest_entry) { + if (index == 0) { + ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa, + vgpu->cached_guest_entry, GTT_PAGE_SIZE); + if (WARN_ON(ret)) + return ret; + } + e->val64 = *(vgpu->cached_guest_entry + index); + return 0; + + } ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); @@ -284,6 +295,50 @@ static inline int gtt_get_entry64(void *pt, return 0; } +struct ggtt_entry64 { + void *pt; + struct intel_gvt_gtt_entry *e; + unsigned long index; + bool hypervisor_access; + unsigned long gpa; + struct intel_vgpu *vgpu; +}; + +#ifdef CONFIG_INTEL_IOMMU +static int gtt_get_entry64__cb(void *_arg) +{ + struct ggtt_entry64 *arg = _arg; + int ret = 0; + + gvt_pause_user_domains(arg->vgpu->gvt->dev_priv); + ret = gtt_get_entry64(arg->pt, arg->e, arg->index, + arg->hypervisor_access, arg->gpa, arg->vgpu); + gvt_unpause_user_domains(arg->vgpu->gvt->dev_priv); + + return ret; +} +#endif + +static inline int gtt_get_entry64__BKL(void *pt, + struct intel_gvt_gtt_entry *e, + unsigned long index, bool hypervisor_access, unsigned long gpa, + struct intel_vgpu *vgpu) +{ +#ifdef CONFIG_INTEL_IOMMU + struct ggtt_entry64 arg = { pt, e, index, hypervisor_access, gpa, vgpu }; + + if (!intel_iommu_gfx_mapped || !IS_BROXTON(vgpu->gvt->dev_priv) || + hypervisor_access || pt) { + return gtt_get_entry64(pt, e, index, hypervisor_access, gpa, vgpu); + } else { + stop_machine(gtt_get_entry64__cb, &arg, NULL); + return 0; + } +#else + return gtt_get_entry64(pt, e, index, hypervisor_access, gpa, vgpu); +#endif +} + static inline int gtt_set_entry64(void *pt, struct intel_gvt_gtt_entry *e, unsigned long index, bool hypervisor_access, unsigned long gpa, @@ -309,11 +364,46 @@ static inline int gtt_set_entry64(void *pt, return 0; } +#ifdef CONFIG_INTEL_IOMMU +static int gtt_set_entry64__cb(void *_arg) +{ + struct ggtt_entry64 *arg = _arg; + int ret; + + gvt_pause_user_domains(arg->vgpu->gvt->dev_priv); + ret = gtt_set_entry64(arg->pt, arg->e, arg->index, arg->hypervisor_access, + arg->gpa, arg->vgpu); + gvt_unpause_user_domains(arg->vgpu->gvt->dev_priv); + + return ret; +} +#endif + +static inline int gtt_set_entry64__BKL(void *pt, + struct intel_gvt_gtt_entry *e, + unsigned long index, bool hypervisor_access, unsigned long gpa, + struct intel_vgpu *vgpu) +{ +#ifdef CONFIG_INTEL_IOMMU + struct ggtt_entry64 arg = { pt, e, index, hypervisor_access, gpa, vgpu }; + + if (!intel_iommu_gfx_mapped || !IS_BROXTON(vgpu->gvt->dev_priv) || + hypervisor_access || pt) { + return gtt_set_entry64(pt, e, index, hypervisor_access, gpa, vgpu); + } else { + stop_machine(gtt_set_entry64__cb, &arg, NULL); + return 0; + } +#else + return gtt_set_entry64(pt, e, index, hypervisor_access, gpa, vgpu); +#endif +} + #define GTT_HAW 46 -#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) -#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) -#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) +#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) +#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) +#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { @@ -403,8 +493,8 @@ DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { - .get_entry = gtt_get_entry64, - .set_entry = gtt_set_entry64, + .get_entry = gtt_get_entry64__BKL, + .set_entry = gtt_set_entry64__BKL, .clear_present = gtt_entry_clear_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, @@ -426,6 +516,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long gfn, mfn; + static unsigned long saved_gfn, saved_mfn; *m = *p; @@ -434,13 +525,19 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, gfn = ops->get_pfn(p); - mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); + if (gfn != saved_gfn) + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); + else + mfn = saved_mfn; + if (mfn == INTEL_GVT_INVALID_ADDR) { gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); return -ENXIO; } ops->set_pfn(m, mfn); + saved_gfn = gfn; + saved_mfn = mfn; return 0; } @@ -834,6 +931,9 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { cur_pt_type = get_next_pt_type(e->type) + 1; + if (unlikely(cur_pt_type <= GTT_TYPE_INVALID || + cur_pt_type >= GTT_TYPE_MAX)) + return -EINVAL; if (ops->get_pfn(e) == vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) return 0; @@ -956,12 +1056,14 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) spt->guest_page.gfn, spt->shadow_page.type); if (gtt_type_is_pte_pt(spt->shadow_page.type)) { + vgpu->ge_cache_enable = true; for_each_present_guest_entry(spt, &ge, i) { ret = gtt_entry_p2m(vgpu, &ge, &se); if (ret) goto fail; ppgtt_set_shadow_entry(spt, &se, i); } + vgpu->ge_cache_enable = false; return 0; } @@ -1219,7 +1321,7 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) struct intel_vgpu_oos_page *oos_page; int ret; - if (!enable_out_of_sync) + if (!i915_modparams.enable_gvt_oos) return 0; list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { @@ -1281,7 +1383,7 @@ static int ppgtt_handle_guest_write_page_table( static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt) { - return enable_out_of_sync + return i915_modparams.enable_gvt_oos && gtt_type_is_pte_pt( guest_page_to_ppgtt_spt(gpt)->guest_page_type) && gpt->write_cnt >= 2; @@ -1349,6 +1451,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; + /* Set guest ppgtt entry. Optional for KVMGT, but MUST for XENGT. */ + intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); ppgtt_get_guest_entry(spt, &we, index); ops->test_pse(&we); @@ -1359,16 +1463,19 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, return ret; } else { if (!test_bit(index, spt->post_shadow_bitmap)) { + int type = spt->shadow_page.type; + ppgtt_get_shadow_entry(spt, &se, index); ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); if (ret) return ret; + ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &se, index); } - ppgtt_set_post_shadow(spt, index); } - if (!enable_out_of_sync) + if (!i915_modparams.enable_gvt_oos) return 0; gpt->write_cnt++; @@ -1437,6 +1544,31 @@ static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm) mm->virtual_page_table = mm->shadow_page_table = NULL; } +static void invalidate_mm_pv(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt *gtt = &gvt->gtt; + struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + struct intel_gvt_gtt_entry se; + + if (WARN_ON(mm->page_table_level != 4)) + return; + + i915_ppgtt_close(&mm->ppgtt->base); + i915_ppgtt_put(mm->ppgtt); + + ppgtt_get_shadow_root_entry(mm, &se, 0); + if (!ops->test_present(&se)) + return; + trace_gpt_change(vgpu->id, "destroy root pointer", + NULL, se.type, se.val64, 0); + se.val64 = 0; + ppgtt_set_shadow_root_entry(mm, &se, 0); + + mm->shadowed = false; +} + static void invalidate_mm(struct intel_vgpu_mm *mm) { struct intel_vgpu *vgpu = mm->vgpu; @@ -1449,6 +1581,11 @@ static void invalidate_mm(struct intel_vgpu_mm *mm) if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed)) return; + if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) { + invalidate_mm_pv(mm); + return; + } + for (i = 0; i < mm->page_table_entry_cnt; i++) { ppgtt_get_shadow_root_entry(mm, &se, i); if (!ops->test_present(&se)) @@ -1492,6 +1629,36 @@ void intel_vgpu_destroy_mm(struct kref *mm_ref) kfree(mm); } +static int shadow_mm_pv(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + char name[16]; + struct intel_gvt_gtt_entry se; + + if (WARN_ON(mm->page_table_level != 4)) + return -EINVAL; + + snprintf(name, sizeof(name), "%p", mm); + + mm->ppgtt = i915_ppgtt_create(gvt->dev_priv, NULL, name); + if (IS_ERR(mm->ppgtt)) { + gvt_vgpu_err("fail to create ppgtt for pdp 0x%llx\n", + px_dma(&mm->ppgtt->pml4)); + return PTR_ERR(mm->ppgtt); + } + + se.type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; + se.val64 = px_dma(&mm->ppgtt->pml4); + ppgtt_set_shadow_root_entry(mm, &se, 0); + + trace_gpt_change(vgpu->id, "populate root pointer", + NULL, se.type, se.val64, 0); + mm->shadowed = true; + + return 0; +} + static int shadow_mm(struct intel_vgpu_mm *mm) { struct intel_vgpu *vgpu = mm->vgpu; @@ -1506,6 +1673,9 @@ static int shadow_mm(struct intel_vgpu_mm *mm) if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed)) return 0; + if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) + return shadow_mm_pv(mm); + mm->shadowed = true; for (i = 0; i < mm->page_table_entry_cnt; i++) { @@ -1960,7 +2130,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ - if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { + if (type > GTT_TYPE_PPGTT_PTE_PT) { struct intel_gvt_gtt_entry se; memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); @@ -2050,6 +2220,12 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) } gtt->ggtt_mm = ggtt_mm; + vgpu->cached_guest_entry = kzalloc(GTT_PAGE_SIZE, GFP_KERNEL); + if (!vgpu->cached_guest_entry) { + gvt_vgpu_err("fail to allocate cached_guest_entry page\n"); + return -ENOMEM; + } + vgpu->ge_cache_enable = false; return create_scratch_page_tree(vgpu); } @@ -2087,6 +2263,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); + kfree(vgpu->cached_guest_entry); } static void clean_spt_oos(struct intel_gvt *gvt) @@ -2264,7 +2441,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_dbg_core("init gtt\n"); if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { + || IS_KABYLAKE(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv)) { gvt->gtt.pte_ops = &gen8_gtt_pte_ops; gvt->gtt.gma_ops = &gen8_gtt_gma_ops; gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; @@ -2289,7 +2466,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt->gtt.scratch_ggtt_page = virt_to_page(page); gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); - if (enable_out_of_sync) { + if (i915_modparams.enable_gvt_oos) { ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); @@ -2320,7 +2497,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) __free_page(gvt->gtt.scratch_ggtt_page); - if (enable_out_of_sync) + if (i915_modparams.enable_gvt_oos) clean_spt_oos(gvt); } @@ -2389,3 +2566,293 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) 0, PAGE_SIZE); } } + +int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdp = pv_ppgtt->pdp; + int ret = 0; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("alloc_4lvl pdp=%llx start=%llx length=%llx\n", + pv_ppgtt->pdp, pv_ppgtt->start, + pv_ppgtt->length); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, &pdp); + if (!mm) { + gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdp); + ret = -EINVAL; + } else { + ret = mm->ppgtt->base.allocate_va_range(&mm->ppgtt->base, + pv_ppgtt->start, pv_ppgtt->length); + if (ret) + gvt_vgpu_err("failed to alloc for pdp %llx\n", pdp); + } + + return ret; +} + +int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdp = pv_ppgtt->pdp; + int ret = 0; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("clear_4lvl pdp=%llx start=%llx length=%llx\n", + pv_ppgtt->pdp, pv_ppgtt->start, + pv_ppgtt->length); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, &pdp); + if (!mm) { + gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdp); + ret = -EINVAL; + } else { + mm->ppgtt->base.clear_range(&mm->ppgtt->base, + pv_ppgtt->start, pv_ppgtt->length); + } + + return ret; +} + +#define GEN8_PML4E_SIZE (1UL << GEN8_PML4E_SHIFT) +#define GEN8_PML4E_SIZE_MASK (~(GEN8_PML4E_SIZE - 1)) +#define GEN8_PDPE_SIZE (1UL << GEN8_PDPE_SHIFT) +#define GEN8_PDPE_SIZE_MASK (~(GEN8_PDPE_SIZE - 1)) +#define GEN8_PDE_SIZE (1UL << GEN8_PDE_SHIFT) +#define GEN8_PDE_SIZE_MASK (~(GEN8_PDE_SIZE - 1)) + +#define pml4_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PML4E_SIZE) & GEN8_PML4E_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +#define pdp_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PDPE_SIZE) & GEN8_PDPE_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +#define pd_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PDE_SIZE) & GEN8_PDE_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +struct ppgtt_walk { + unsigned long *mfns; + int mfn_index; + unsigned long *pt; +}; + +static int walk_pt_range(struct intel_vgpu *vgpu, u64 pt, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long start_index, end_index; + int ret; + int i; + unsigned long mfn, gfn; + + start_index = gma_ops->gma_to_pte_index(start); + end_index = ((end - start) >> PAGE_SHIFT) + start_index; + + gvt_dbg_mm("%s: %llx start=%llx end=%llx start_index=%lx end_index=%lx mfn_index=%x\n", + __func__, pt, start, end, + start_index, end_index, walk->mfn_index); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pt & PAGE_MASK) + (start_index << info->gtt_entry_size_shift), + walk->pt + start_index, + (end_index - start_index) << info->gtt_entry_size_shift); + if (ret) { + gvt_vgpu_err("fail to read gpa %llx\n", pt); + return ret; + } + + for (i = start_index; i < end_index; i++) { + gfn = walk->pt[i] >> PAGE_SHIFT; + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); + return -ENXIO; + } + walk->mfns[walk->mfn_index++] = mfn << PAGE_SHIFT; + } + + return 0; +} + + +static int walk_pd_range(struct intel_vgpu *vgpu, u64 pd, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pt, next; + int ret = 0; + + do { + index = gma_ops->gma_to_pde_index(start); + + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pd & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pt, 8); + if (ret) + return ret; + next = pd_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pd, start, end, next); + walk_pt_range(vgpu, pt, start, next, walk); + + start = next; + } while (start != end); + + return ret; +} + + +static int walk_pdp_range(struct intel_vgpu *vgpu, u64 pdp, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pd, next; + int ret = 0; + + do { + index = gma_ops->gma_to_l4_pdp_index(start); + + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pdp & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pd, 8); + if (ret) + return ret; + next = pdp_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pdp, start, end, next); + + walk_pd_range(vgpu, pd, start, next, walk); + start = next; + } while (start != end); + + return ret; +} + + +static int walk_pml4_range(struct intel_vgpu *vgpu, u64 pml4, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pdp, next; + int ret = 0; + + do { + index = gma_ops->gma_to_pml4_index(start); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pml4 & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pdp, 8); + if (ret) + return ret; + next = pml4_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pml4, start, end, next); + + walk_pdp_range(vgpu, pdp, start, next, walk); + start = next; + } while (start != end); + + return ret; +} + +int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pml4 = pv_ppgtt->pdp; + int ret = 0; + u64 start = pv_ppgtt->start; + u64 length = pv_ppgtt->length; + struct sg_table st; + struct scatterlist *sg = NULL; + int num_pages = length >> PAGE_SHIFT; + struct i915_vma vma; + struct ppgtt_walk walk; + int i; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("insert_4lvl pml4=%llx start=%llx length=%llx cache=%x\n", + pv_ppgtt->pdp, start, length, pv_ppgtt->cache_level); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, &pml4); + if (!mm) { + gvt_vgpu_err("fail to find mm for pml4 0x%llx\n", pml4); + return -EINVAL; + } + + walk.mfn_index = 0; + walk.mfns = NULL; + walk.pt = NULL; + + walk.mfns = kmalloc_array(num_pages, + sizeof(unsigned long), GFP_KERNEL); + if (!walk.mfns) { + ret = -ENOMEM; + goto fail; + } + + walk.pt = (unsigned long *)__get_free_pages(GFP_KERNEL, 0); + if (!walk.pt) { + ret = -ENOMEM; + goto fail; + } + + if (sg_alloc_table(&st, num_pages, GFP_KERNEL)) { + ret = -ENOMEM; + goto fail; + } + + ret = walk_pml4_range(vgpu, pml4, start, start + length, &walk); + if (ret) + goto fail_free_sg; + + WARN_ON(num_pages != walk.mfn_index); + + for_each_sg(st.sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = walk.mfns[i]; + sg_dma_len(sg) = PAGE_SIZE; + } + + /* fake vma for insert call*/ + memset(&vma, 0, sizeof(vma)); + vma.node.start = start; + vma.pages = &st; + mm->ppgtt->base.insert_entries(&mm->ppgtt->base, &vma, + pv_ppgtt->cache_level, 0); + +fail_free_sg: + sg_free_table(&st); +fail: + kfree(walk.mfns); + free_page((unsigned long)walk.pt); + + return ret; +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 30a4c8d16026..d61e1f230d24 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -135,6 +135,7 @@ struct intel_vgpu_mm { u32 page_table_entry_cnt; void *virtual_page_table; void *shadow_page_table; + struct i915_hw_ppgtt *ppgtt; int page_table_level; bool has_shadow_page_table; @@ -312,4 +313,12 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); +int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu, + int page_table_level); #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c27c6838eaca..7fecfa717a0e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -42,6 +42,7 @@ struct intel_gvt_host intel_gvt_host; static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_XEN] = "XEN", [INTEL_GVT_HYPERVISOR_KVM] = "KVM", + [INTEL_GVT_HYPERVISOR_ACRN] = "ACRN", }; static const struct intel_gvt_ops intel_gvt_ops = { @@ -90,6 +91,11 @@ int intel_gvt_init_host(void) symbol_get(kvmgt_mpt), "kvmgt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; #endif + /* not in Xen. Try ACRN */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(acrn_gvt_mpt), "acrn-gvt"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_ACRN; + printk("acrn-gvt %s\n", intel_gvt_host.mpt?"found":"not found"); } /* Fail to load MPT modules - bail out */ @@ -109,10 +115,13 @@ static void init_device_info(struct intel_gvt *gvt) struct pci_dev *pdev = gvt->dev_priv->drm.pdev; if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { info->max_support_vgpus = 8; info->cfg_space_size = 256; info->mmio_size = 2 * 1024 * 1024; + /* order of mmio size. assert(2^order == mmio_size) */ + info->mmio_size_order = 9; info->mmio_bar = 0; info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_entry_size = 8; @@ -176,6 +185,44 @@ static int init_service_thread(struct intel_gvt *gvt) return 0; } +void intel_gvt_init_pipe_info(struct intel_gvt *gvt); + +/* + * When enabling multi-plane in DomU, an issue is that the PLANE_BUF_CFG + * register cannot be updated dynamically, since Dom0 has no idea of the + * plane information of DomU's planes, so here we statically allocate the + * ddb entries for all the possible enabled planes. + */ +static void intel_gvt_init_ddb(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct skl_ddb_allocation *ddb = &gvt->ddb; + unsigned int pipe_size, ddb_size, plane_size, plane_cnt; + u16 start, end; + enum pipe pipe; + enum plane_id plane; + + ddb_size = INTEL_INFO(dev_priv)->ddb_size; + ddb_size -= 4; /* 4 blocks for bypass path allocation */ + pipe_size = ddb_size / INTEL_INFO(dev_priv)->num_pipes; + + memset(ddb, 0, sizeof(*ddb)); + for_each_pipe(dev_priv, pipe) { + start = pipe * ddb_size / INTEL_INFO(dev_priv)->num_pipes; + end = start + pipe_size; + + plane_cnt = (INTEL_INFO(dev_priv)->num_sprites[pipe] + 1); + plane_size = pipe_size / plane_cnt; + + for_each_universal_plane(dev_priv, pipe, plane) { + ddb->plane[pipe][plane].start = start + + (plane * pipe_size / plane_cnt); + ddb->plane[pipe][plane].end = + ddb->plane[pipe][plane].start + plane_size; + } + } +} + /** * intel_gvt_clean_device - clean a GVT device * @gvt: intel gvt device @@ -212,6 +259,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) dev_priv->gvt = NULL; } +#define BITS_PER_DOMAIN 4 +#define MAX_PLANES_PER_DOMAIN 4 +#define DOMAIN_PLANE_OWNER(owner, pipe, plane) \ + ((((owner) >> (pipe) * BITS_PER_DOMAIN * MAX_PLANES_PER_DOMAIN) >> \ + BITS_PER_DOMAIN * (plane)) & 0xf) + /** * intel_gvt_init_device - initialize a GVT device * @dev_priv: drm i915 private data @@ -248,6 +301,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) idr_init(&gvt->vgpu_idr); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); gvt->dev_priv = dev_priv; init_device_info(gvt); @@ -292,6 +346,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) if (ret) goto out_clean_thread; + intel_gvt_init_pipe_info(gvt); + intel_gvt_init_ddb(gvt); + ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, &intel_gvt_ops); if (ret) { @@ -307,8 +364,28 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) } gvt->idle_vgpu = vgpu; - gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; + + if (i915_modparams.avail_planes_per_pipe) { + unsigned long long domain_plane_owners; + int plane; + enum pipe pipe; + + /* + * Each nibble represents domain id + * ids can be from 0-F. 0 for Dom0, 1,2,3...0xF for DomUs + * plane_owner[i] holds the id of the domain that owns it,eg:0,1,2 etc + */ + domain_plane_owners = i915_modparams.domain_plane_owners; + for_each_pipe(dev_priv, pipe) { + for_each_universal_plane(dev_priv, pipe, plane) { + gvt->pipe_info[pipe].plane_owner[plane] = + DOMAIN_PLANE_OWNER(domain_plane_owners, pipe, plane); + } + } + } + + gvt_dbg_core("gvt device initialization is done\n"); return 0; out_clean_types: @@ -336,3 +413,41 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) kfree(gvt); return ret; } + +int gvt_pause_user_domains(struct drm_i915_private *dev_priv) +{ + struct intel_vgpu *vgpu; + int id, ret = 0; + + if (!intel_gvt_active(dev_priv)) + return 0; + + for_each_active_vgpu(dev_priv->gvt, vgpu, id) { + ret = intel_gvt_hypervisor_pause_domain(vgpu); + } + + return ret; +} + +int gvt_unpause_user_domains(struct drm_i915_private *dev_priv) +{ + struct intel_vgpu *vgpu; + int id, ret = 0; + + if (!intel_gvt_active(dev_priv)) + return 0; + + for_each_active_vgpu(dev_priv->gvt, vgpu, id) { + ret = intel_gvt_hypervisor_unpause_domain(vgpu); + } + + return ret; +} + +int gvt_dom0_ready(struct drm_i915_private *dev_priv) +{ + if (!intel_gvt_active(dev_priv)) + return 0; + + return intel_gvt_hypervisor_dom0_ready(); +} diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 44b719eda8c4..e76e081472bf 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -52,6 +52,7 @@ enum { INTEL_GVT_HYPERVISOR_XEN = 0, INTEL_GVT_HYPERVISOR_KVM, + INTEL_GVT_HYPERVISOR_ACRN, }; struct intel_gvt_host { @@ -67,6 +68,7 @@ struct intel_gvt_device_info { u32 max_support_vgpus; u32 cfg_space_size; u32 mmio_size; + u32 mmio_size_order; u32 mmio_bar; unsigned long msi_cap_offset; u32 gtt_start_offset; @@ -96,6 +98,7 @@ struct intel_vgpu_fence { struct intel_vgpu_mmio { void *vreg; void *sreg; + struct gvt_shared_page *shared_page; bool disable_warn_untrack; }; @@ -150,7 +153,7 @@ struct intel_vgpu { bool pv_notified; bool failsafe; unsigned int resetting_eng; - void *sched_data; + void *sched_data[I915_NUM_ENGINES]; struct vgpu_sched_ctl sched_ctl; struct intel_vgpu_fence fence; @@ -165,6 +168,9 @@ struct intel_vgpu { struct list_head workload_q_head[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; + /* 1/2K for each reserve ring buffer */ + void *reserve_ring_buffer_va[I915_NUM_ENGINES]; + int reserve_ring_buffer_size[I915_NUM_ENGINES]; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); @@ -185,6 +191,10 @@ struct intel_vgpu { atomic_t released; } vdev; #endif + + bool entire_nonctxmmio_checked; + unsigned long long *cached_guest_entry; + bool ge_cache_enable; }; struct intel_gvt_gm { @@ -223,10 +233,15 @@ struct intel_gvt_mmio { #define F_CMD_ACCESSED (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) +/* This reg is not in the context */ +#define F_NON_CONTEXT (1 << 7) + struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; + void *mmio_host_cache; + bool host_cache_initialized; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); unsigned int num_tracked_mmio; }; @@ -243,6 +258,7 @@ struct intel_gvt_opregion { }; #define NR_MAX_INTEL_VGPU_TYPES 20 + struct intel_vgpu_type { char name[16]; unsigned int avail_instance; @@ -253,8 +269,35 @@ struct intel_vgpu_type { enum intel_vgpu_edid resolution; }; +struct intel_dom0_plane_regs { + u32 plane_ctl; + u32 plane_stride; + u32 plane_pos; + u32 plane_size; + u32 plane_keyval; + u32 plane_keymsk; + u32 plane_keymax; + u32 plane_offset; + u32 plane_aux_dist; + u32 plane_aux_offset; + u32 plane_surf; + u32 plane_wm[8]; + u32 plane_wm_trans; +}; + +struct intel_gvt_pipe_info { + enum pipe pipe_num; + int owner; + struct intel_gvt *gvt; + struct work_struct vblank_work; + struct intel_dom0_plane_regs dom0_regs[I915_MAX_PLANES]; + int plane_owner[I915_MAX_PLANES]; + int scaler_owner[SKL_NUM_SCALERS]; +}; + struct intel_gvt { struct mutex lock; + struct mutex sched_lock; struct drm_i915_private *dev_priv; struct idr vgpu_idr; /* vGPU IDR pool */ @@ -276,6 +319,9 @@ struct intel_gvt { struct task_struct *service_thread; wait_queue_head_t service_thread_wq; unsigned long service_request; + struct intel_gvt_pipe_info pipe_info[I915_MAX_PIPES]; + + struct skl_ddb_allocation ddb; }; static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) @@ -397,6 +443,11 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ for_each_if(vgpu->active) +#define for_each_universal_scaler(__dev_priv, __pipe, __s) \ + for ((__s) = 0; \ + (__s) < INTEL_INFO(__dev_priv)->num_scalers[(__pipe)] + 1; \ + (__s)++) + static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, u32 offset, u32 val, bool low) { @@ -482,6 +533,9 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); void populate_pvinfo_page(struct intel_vgpu *vgpu); +int gvt_pause_user_domains(struct drm_i915_private *dev_priv); +int gvt_unpause_user_domains(struct drm_i915_private *dev_priv); +int gvt_dom0_ready(struct drm_i915_private *dev_priv); int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); @@ -581,7 +635,35 @@ static inline bool intel_gvt_mmio_has_mode_mask( return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; } + /** + * intel_gvt_mmio_is_non_context - check a MMIO is non-context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline bool intel_gvt_mmio_is_non_context( + struct intel_gvt *gvt, unsigned int offset) +{ + return gvt->mmio.mmio_attribute[offset >> 2] & F_NON_CONTEXT; +} + +/** + * intel_gvt_mmio_set_non_context - mark a MMIO is non-context + * @gvt: a GVT device + * @offset: register offset + * + */ +static inline void intel_gvt_mmio_set_non_context( + struct intel_gvt *gvt, unsigned int offset) +{ + gvt->mmio.mmio_attribute[offset >> 2] |= F_NON_CONTEXT; +} + #include "trace.h" + +void intel_gvt_mark_noncontext_mmios(struct intel_gvt *gvt); +bool is_force_nonpriv_mmio(unsigned int offset); + #include "mpt.h" #endif diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index a5bed2e71b92..53ae0c6c8e0b 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_SKL; else if (IS_KABYLAKE(gvt->dev_priv)) return D_KBL; + else if (IS_BROXTON(gvt->dev_priv)) + return D_BXT; return 0; } @@ -243,6 +245,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: @@ -304,7 +307,11 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } } + mutex_unlock(&vgpu->gvt->lock); + mutex_lock(&vgpu->gvt->sched_lock); + mutex_lock(&vgpu->gvt->lock); intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); + mutex_unlock(&vgpu->gvt->sched_lock); /* sw will wait for the device to ack the reset request */ vgpu_vreg(vgpu, offset) = 0; @@ -372,27 +379,9 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, +static int mmio_write_empty(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - switch (offset) { - case 0xe651c: - case 0xe661c: - case 0xe671c: - case 0xe681c: - vgpu_vreg(vgpu, offset) = 1 << 17; - break; - case 0xe6c04: - vgpu_vreg(vgpu, offset) = 0x3; - break; - case 0xe6e1c: - vgpu_vreg(vgpu, offset) = 0x2f << 16; - break; - default: - return -EINVAL; - } - - read_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -400,14 +389,25 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; + struct drm_device *dev = &vgpu->gvt->dev_priv->drm; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + struct intel_crtc *crtc; + + crtc = intel_get_crtc_for_pipe(vgpu->gvt->dev_priv, pipe); + if (!crtc) { + DRM_ERROR("No CRTC for pipe=%d\n", pipe); + return 0; + } write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) + if (data & PIPECONF_ENABLE) { vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; - else + dev->driver->enable_vblank(dev, drm_crtc_index(&crtc->base)); + } else { vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; + } intel_gvt_check_vblank_emulation(vgpu->gvt); return 0; } @@ -480,6 +480,14 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, return ret; } +static int pipe_dsl_mmio_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -735,6 +743,170 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); +static int skl_ps_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); + +static void pvmmio_update_plane_register(struct intel_vgpu *vgpu, + unsigned int pipe, unsigned int plane) +{ + struct pv_plane_update *pv_plane = &vgpu->mmio.shared_page->pv_plane; + + /* null function for PLANE_COLOR_CTL, PLANE_AUX_DIST, PLANE_AUX_OFFSET, + * and SKL_PS_PWR_GATE register trap + */ + + if (pv_plane->flags & PLANE_KEY_BIT) { + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYVAL(pipe, plane)), + &pv_plane->plane_key_val, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYMAX(pipe, plane)), + &pv_plane->plane_key_max, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYMSK(pipe, plane)), + &pv_plane->plane_key_msk, 4); + } + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_OFFSET(pipe, plane)), + &pv_plane->plane_offset, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_STRIDE(pipe, plane)), + &pv_plane->plane_stride, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_SIZE(pipe, plane)), + &pv_plane->plane_size, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_AUX_DIST(pipe, plane)), + &pv_plane->plane_aux_dist, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_AUX_OFFSET(pipe, plane)), + &pv_plane->plane_aux_offset, 4); + + if (pv_plane->flags & PLANE_SCALER_BIT) { + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_CTRL(pipe, plane)), + &pv_plane->ps_ctrl, 4); + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_WIN_POS(pipe, plane)), + &pv_plane->ps_win_ps, 4); + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_WIN_SZ(pipe, plane)), + &pv_plane->ps_win_sz, 4); + } + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_POS(pipe, plane)), + &pv_plane->plane_pos, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_CTL(pipe, plane)), + &pv_plane->plane_ctl, 4); +} + +static int skl_plane_surf_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + i915_reg_t reg_1ac = _MMIO(_REG_701AC(pipe, plane)); + int flip_event = SKL_FLIP_EVENT(pipe, plane); + + /* plane disable is not pv and it is indicated by value 0 */ + if (*(u32 *)p_data != 0 && VGPU_PVMMIO(vgpu) & PVMMIO_PLANE_UPDATE) + pvmmio_update_plane_register(vgpu, pipe, plane); + + write_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, reg_1ac) = vgpu_vreg(vgpu, offset); + + if ((vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + + set_bit(flip_event, vgpu->irq.flip_done_event[pipe]); + return 0; +} + +static int skl_ps_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PS_REG_TO_PIPE(offset); + unsigned int scaler = SKL_PS_REG_TO_SCALER(offset) - 1; + + if (pipe >= I915_MAX_PIPES || scaler >= SKL_NUM_SCALERS || + vgpu->gvt->pipe_info[pipe].scaler_owner[scaler] != vgpu->id) { + gvt_vgpu_err("Unsupport pipe %d, scaler %d scaling\n", + pipe, scaler); + return 0; + } + + if (!(vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE)) + return 0; + + if (offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || + offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || + offset == _PS_1C_CTRL) { + unsigned int plane; + + if (SKL_PS_REG_VALUE_TO_PLANE(*(u32 *)p_data) == 0) { + gvt_vgpu_err("Unsupport crtc scaling for UOS\n"); + return 0; + } + plane = SKL_PS_REG_VALUE_TO_PLANE(*(u32 *)p_data) - 1; + if (plane >= I915_MAX_PLANES || + vgpu->gvt->pipe_info[pipe].plane_owner[plane] != vgpu->id) { + gvt_vgpu_err("Unsupport plane %d scaling\n", plane); + return 0; + } + } + + write_vreg(vgpu, offset, p_data, bytes); + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + return 0; +} + +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + + if (WARN_ON_ONCE(pipe >= I915_MAX_PIPES)) + return -EINVAL; + + write_vreg(vgpu, offset, p_data, bytes); + if ((vgpu_vreg(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + return 0; +} + +static int pv_plane_wm_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + struct pv_plane_wm_update *pv_plane_wm = + &vgpu->mmio.shared_page->pv_plane_wm; + int level; + + if (VGPU_PVMMIO(vgpu) & PVMMIO_PLANE_WM_UPDATE) { + for (level = 0; level <= pv_plane_wm->max_wm_level; level++) + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_WM(pipe, plane, level)), + &pv_plane_wm->plane_wm_level[level], 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_WM_TRANS(pipe, plane)), + &pv_plane_wm->plane_trans_wm_level, 4); + /* null function for PLANE_BUF_CFG and PLANE_NV12_BUF_CFG */ + } + return 0; +} + static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, unsigned int reg) { @@ -839,6 +1011,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, data = vgpu_vreg(vgpu, offset); if ((IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ @@ -1096,6 +1269,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { bool invalid_read = false; + int ret = 0; read_vreg(vgpu, offset, p_data, bytes); @@ -1110,6 +1284,23 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, _vgtif_reg(avail_rs.fence_num) + 4) invalid_read = true; break; + case _vgtif_reg(pv_mmio): + /* a remap happens from guest mmio read operation, the target reg offset + * is in the first DWORD of shared_page. + */ + { + u32 reg = vgpu->mmio.shared_page->reg_addr; + struct intel_gvt_mmio_info *mmio; + + mmio = find_mmio_info(vgpu->gvt, rounddown(reg, 4)); + if (mmio) + ret = mmio->read(vgpu, reg, p_data, bytes); + else + ret = intel_vgpu_default_mmio_read(vgpu, reg, p_data, + bytes); + break; + } + case 0x78010: /* vgt_caps */ case 0x7881c: break; @@ -1121,7 +1312,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); vgpu->pv_notified = true; - return 0; + return ret; } static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) @@ -1141,6 +1332,15 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4); break; + case VGT_G2V_PPGTT_L4_ALLOC: + ret = intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(vgpu, 4); + break; + case VGT_G2V_PPGTT_L4_INSERT: + ret = intel_vgpu_g2v_pv_ppgtt_insert_4lvl(vgpu, 4); + break; + case VGT_G2V_PPGTT_L4_CLEAR: + ret = intel_vgpu_g2v_pv_ppgtt_clear_4lvl(vgpu, 4); + break; case VGT_G2V_EXECLIST_CONTEXT_CREATE: case VGT_G2V_EXECLIST_CONTEXT_DESTROY: case 1: /* Remove this in guest driver. */ @@ -1168,6 +1368,26 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) return kobject_uevent_env(kobj, KOBJ_ADD, env); } +#define INTEL_GVT_PCI_BAR_GTTMMIO 0 +static int set_pvmmio(struct intel_vgpu *vgpu, bool map) +{ + u64 start, end; + u64 val; + int ret; + + val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; + if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) + start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + else + start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + + start &= ~GENMASK(3, 0); + end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; + + ret = intel_gvt_hypervisor_set_pvmmio(vgpu, start, end, map); + return ret; +} + static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1184,6 +1404,19 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, case _vgtif_reg(g2v_notify): ret = handle_g2v_notification(vgpu, data); break; + case _vgtif_reg(enable_pvmmio): + if (i915_modparams.enable_pvmmio) { + if (set_pvmmio(vgpu, !!data)) { + vgpu_vreg(vgpu, offset) = 0; + break; + } + vgpu_vreg(vgpu, offset) = data & i915_modparams.enable_pvmmio; + DRM_INFO("vgpu id=%d pvmmio=0x%x\n", + vgpu->id, VGPU_PVMMIO(vgpu)); + } else { + vgpu_vreg(vgpu, offset) = 0; + } + break; /* add xhot and yhot to handled list to avoid error log */ case 0x78830: case 0x78834: @@ -1209,22 +1442,6 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int pf_write(struct intel_vgpu *vgpu, - unsigned int offset, void *p_data, unsigned int bytes) -{ - u32 val = *(u32 *)p_data; - - if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || - offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || - offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) { - WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n", - vgpu->id); - return 0; - } - - return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); -} - static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1345,6 +1562,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, *data0 = 0x1e1a1100; else *data0 = 0x61514b3d; + } else if(IS_BROXTON(vgpu->gvt->dev_priv)) { + /** + * "Read memory latency" command on gen9. + * Below memory latency values are read + * from broxton MRB. + */ + if (!*data0) + *data0 = 0x16080707; + else + *data0 = 0x16161616; } break; case SKL_PCODE_CDCLK_CONTROL: @@ -1374,10 +1601,18 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, { u32 v = *(u32 *)p_data; - v &= (1 << 31) | (1 << 29) | (1 << 9) | - (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); + if (IS_BROXTON(vgpu->gvt->dev_priv)) + v &= (1 << 31) | (1 << 29); + else + v &= (1 << 31) | (1 << 29) | (1 << 9) | + (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); v |= (v >> 1); + vgpu_vreg(vgpu, i915_mmio_reg_offset(SKL_FUSE_STATUS)) = + (SKL_FUSE_PG_DIST_STATUS(0) + | SKL_FUSE_PG_DIST_STATUS(1) + | SKL_FUSE_PG_DIST_STATUS(2)); + return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); } @@ -1429,6 +1664,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + if (v & BXT_DE_PLL_PLL_ENABLE) + v |= BXT_DE_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + if (v & PORT_PLL_ENABLE) + v |= PORT_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_dbuf_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + if (v & DBUF_POWER_REQUEST) + v |= DBUF_POWER_STATE; + else + v &= ~DBUF_POWER_STATE; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; + + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = vgpu_vreg(vgpu, offset); + v &= ~UNIQUE_TRANGE_EN_METHOD; + + vgpu_vreg(vgpu, offset) = v; + + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + +static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) { + vgpu_vreg(vgpu, offset - 0x600) = v; + vgpu_vreg(vgpu, offset - 0x800) = v; + } else { + vgpu_vreg(vgpu, offset - 0x400) = v; + vgpu_vreg(vgpu, offset - 0x600) = v; + } + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & BIT(0)) { + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ~PHY_RESERVED; + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= PHY_POWER_GOOD; + } + + if (v & BIT(1)) { + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= ~PHY_RESERVED; + vgpu_vreg(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= PHY_POWER_GOOD; + } + + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1446,6 +1784,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; + u32 *elsp_data = vgpu->mmio.shared_page->elsp_data; int ret = 0; if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1)) @@ -1453,16 +1792,23 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist = &vgpu->execlist[ring_id]; - execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; - if (execlist->elsp_dwords.index == 3) { + if (VGPU_PVMMIO(vgpu) & PVMMIO_ELSP_SUBMIT) { + execlist->elsp_dwords.data[0] = elsp_data[0]; + execlist->elsp_dwords.data[1] = elsp_data[1]; + execlist->elsp_dwords.data[2] = elsp_data[2]; + execlist->elsp_dwords.data[3] = data; ret = intel_vgpu_submit_execlist(vgpu, ring_id); - if(ret) - gvt_vgpu_err("fail submit workload on ring %d\n", - ring_id); + } else { + execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; + if (execlist->elsp_dwords.index == 3) + ret = intel_vgpu_submit_execlist(vgpu, ring_id); + ++execlist->elsp_dwords.index; + execlist->elsp_dwords.index &= 0x3; } - ++execlist->elsp_dwords.index; - execlist->elsp_dwords.index &= 0x3; + if (ret) + gvt_vgpu_err("fail submit workload on ring %d\n", ring_id); + return ret; } @@ -1493,8 +1839,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, (enable_execlist ? "enabling" : "disabling"), ring_id); - if (enable_execlist) + if (enable_execlist) { + mutex_unlock(&vgpu->gvt->lock); + mutex_lock(&vgpu->gvt->sched_lock); + mutex_lock(&vgpu->gvt->lock); intel_vgpu_start_schedule(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); + } } return 0; } @@ -1597,6 +1948,60 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) +#define MMIO_PIPES_SDH(prefix, plane, s, d, r, w) do { \ + int pipe; \ + for_each_pipe(dev_priv, pipe) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_SDH(prefix, s, d, r, w) do { \ + int pipe, plane; \ + for_each_pipe(dev_priv, pipe) \ + for_each_universal_plane(dev_priv, pipe, plane) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_DH(prefix, d, r, w) \ + MMIO_PLANES_SDH(prefix, 4, d, r, w) + +#define MMIO_PORT_CL_REF(phy) \ + MMIO_D(BXT_PORT_CL1CM_DW0(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW9(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW10(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW28(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL1CM_DW30(phy), D_BXT); \ + MMIO_D(BXT_PORT_CL2CM_DW6(phy), D_BXT); \ + MMIO_D(BXT_PORT_REF_DW3(phy), D_BXT); \ + MMIO_D(BXT_PORT_REF_DW6(phy), D_BXT); \ + MMIO_D(BXT_PORT_REF_DW8(phy), D_BXT) + +#define MMIO_PORT_PCS_TX(phy, ch) \ + MMIO_D(BXT_PORT_PLL_EBB_0(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PLL_EBB_4(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW10_LN01(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW10_GRP(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW12_LN01(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_PCS_DW12_LN23(phy, ch), D_BXT); \ + MMIO_DH(BXT_PORT_PCS_DW12_GRP(phy, ch), D_BXT, NULL, bxt_pcs_dw12_grp_write); \ + MMIO_D(BXT_PORT_TX_DW2_LN0(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW2_GRP(phy, ch), D_BXT); \ + MMIO_DH(BXT_PORT_TX_DW3_LN0(phy, ch), D_BXT, bxt_port_tx_dw3_read, NULL); \ + MMIO_D(BXT_PORT_TX_DW3_GRP(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW4_LN0(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW4_GRP(phy, ch), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 0), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 1), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 2), D_BXT); \ + MMIO_D(BXT_PORT_TX_DW14_LN(phy, ch, 3), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 0), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 1), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 2), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 3), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 6), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 8), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 9), D_BXT); \ + MMIO_D(BXT_PORT_PLL(phy, ch, 10), D_BXT) + static int init_generic_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; @@ -1694,9 +2099,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0xc4040, D_ALL); MMIO_D(DERRMR, D_ALL); - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); + MMIO_DH(PIPEDSL(PIPE_A), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_B), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_C), D_ALL, pipe_dsl_mmio_read, NULL); MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); @@ -1740,71 +2145,71 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(0x70098, D_ALL); MMIO_D(0x7009c, D_ALL); - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); - MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); - MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); - MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); - MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); - MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); - MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); + MMIO_D(DSPCNTR(PIPE_A), D_BDW); + MMIO_D(DSPADDR(PIPE_A), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_A), D_BDW); + MMIO_D(DSPPOS(PIPE_A), D_BDW); + MMIO_D(DSPSIZE(PIPE_A), D_BDW); + MMIO_DH(DSPSURF(PIPE_A), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_A), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(DSPCNTR(PIPE_B), D_BDW); + MMIO_D(DSPADDR(PIPE_B), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_B), D_BDW); + MMIO_D(DSPPOS(PIPE_B), D_BDW); + MMIO_D(DSPSIZE(PIPE_B), D_BDW); + MMIO_DH(DSPSURF(PIPE_B), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_B), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(DSPCNTR(PIPE_C), D_BDW); + MMIO_D(DSPADDR(PIPE_C), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_C), D_BDW); + MMIO_D(DSPPOS(PIPE_C), D_BDW); + MMIO_D(DSPSIZE(PIPE_C), D_BDW); + MMIO_DH(DSPSURF(PIPE_C), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_C), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_C), D_BDW); + + MMIO_D(SPRCTL(PIPE_A), D_BDW); + MMIO_D(SPRLINOFF(PIPE_A), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_A), D_BDW); + MMIO_D(SPRPOS(PIPE_A), D_BDW); + MMIO_D(SPRSIZE(PIPE_A), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_A), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_A), D_BDW); + MMIO_DH(SPRSURF(PIPE_A), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_A), D_BDW); + MMIO_D(SPROFFSET(PIPE_A), D_BDW); + MMIO_D(SPRSCALE(PIPE_A), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(SPRCTL(PIPE_B), D_BDW); + MMIO_D(SPRLINOFF(PIPE_B), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_B), D_BDW); + MMIO_D(SPRPOS(PIPE_B), D_BDW); + MMIO_D(SPRSIZE(PIPE_B), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_B), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_B), D_BDW); + MMIO_DH(SPRSURF(PIPE_B), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_B), D_BDW); + MMIO_D(SPROFFSET(PIPE_B), D_BDW); + MMIO_D(SPRSCALE(PIPE_B), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(SPRCTL(PIPE_C), D_BDW); + MMIO_D(SPRLINOFF(PIPE_C), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_C), D_BDW); + MMIO_D(SPRPOS(PIPE_C), D_BDW); + MMIO_D(SPRSIZE(PIPE_C), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_C), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_C), D_BDW); + MMIO_DH(SPRSURF(PIPE_C), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_C), D_BDW); + MMIO_D(SPROFFSET(PIPE_C), D_BDW); + MMIO_D(SPRSCALE(PIPE_C), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_C), D_BDW); MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); MMIO_D(HBLANK(TRANSCODER_A), D_ALL); @@ -1911,8 +2316,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(BLC_PWM_CPU_CTL2, D_ALL); MMIO_D(BLC_PWM_CPU_CTL, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL1, D_ALL); - MMIO_D(BLC_PWM_PCH_CTL2, D_ALL); + MMIO_D(BLC_PWM_PCH_CTL1, D_ALL & ~D_BXT); + MMIO_D(BLC_PWM_PCH_CTL2, D_ALL & ~D_BXT); MMIO_D(0x48268, D_ALL); @@ -2010,12 +2415,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(PCH_PP_ON_DELAYS, D_ALL); MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL); + MMIO_DH(0xe651c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe661c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe671c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe681c, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe6c04, D_ALL, NULL, mmio_write_empty); + MMIO_DH(0xe6e1c, D_ALL, NULL, mmio_write_empty); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK @@ -2632,127 +3037,59 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x6c05c, D_SKL | D_KBL); MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); - - MMIO_D(0x70380, D_SKL_PLUS); - MMIO_D(0x71380, D_SKL_PLUS); - MMIO_D(0x72380, D_SKL_PLUS); - MMIO_D(0x7039c, D_SKL_PLUS); +// MMIO_PLANES_DH(PLANE_COLOR_CTL, D_SKL, NULL, NULL); + MMIO_PLANES_DH(PLANE_CTL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_STRIDE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_POS, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_SIZE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYVAL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYMSK, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + MMIO_PLANES_DH(PLANE_SURF, D_SKL_PLUS, NULL, skl_plane_surf_write); + + MMIO_PLANES_DH(PLANE_KEYMAX, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_OFFSET, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C0, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C4, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_SKL_PLUS, NULL, + pv_plane_wm_mmio_write); + MMIO_PLANES_DH(PLANE_BUF_CFG, D_SKL_PLUS, NULL, NULL); MMIO_D(0x8f074, D_SKL | D_KBL); MMIO_D(0x8f004, D_SKL | D_KBL); @@ -2804,14 +3141,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x71034, D_SKL_PLUS); MMIO_D(0x72034, D_SKL_PLUS); - MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS); - MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS); - MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS); - MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS); - MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS); - MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS); - MMIO_D(0x44500, D_SKL_PLUS); + MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); @@ -2819,6 +3150,241 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(0x4ab8, D_KBL); MMIO_D(0x2248, D_SKL_PLUS | D_KBL); + MMIO_D(HUC_STATUS2, D_GEN9PLUS); + + return 0; +} + +static int init_bxt_mmio_info(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + int ret; + + MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_RENDER_GEN9, D_SKL_PLUS, NULL, NULL); + MMIO_DH(FORCEWAKE_BLITTER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_BLITTER_GEN9, D_SKL_PLUS, NULL, NULL); + MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); + MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); + + MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); + + MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS); + MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, + skl_power_well_ctl_write); + + MMIO_D(0xa210, D_SKL_PLUS); + MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); + MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); + MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DH(0x4ddc, D_BXT, NULL, skl_misc_ctl_write); + MMIO_DH(0x42080, D_BXT, NULL, skl_misc_ctl_write); + MMIO_D(0x45504, D_BXT); + MMIO_D(0x45520, D_BXT); + MMIO_D(0x46000, D_BXT); + MMIO_DH(0x46010, D_BXT, NULL, skl_lcpll_write); + MMIO_DH(0x46014, D_BXT, NULL, skl_lcpll_write); + MMIO_D(0x6C040, D_BXT); + MMIO_D(0x6C048, D_BXT); + MMIO_D(0x6C050, D_BXT); + MMIO_D(0x6C044, D_BXT); + MMIO_D(0x6C04C, D_BXT); + MMIO_D(0x6C054, D_BXT); + MMIO_D(0x6c058, D_BXT); + MMIO_D(0x6c05c, D_BXT); + MMIO_DH(0X6c060, D_BXT, dpll_status_read, NULL); + + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_BXT, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_BXT, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_BXT, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_BXT, NULL, skl_ps_mmio_write); + + MMIO_DH(CUR_BUF_CFG(PIPE_A), D_BXT, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_B), D_BXT, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_C), D_BXT, NULL, NULL); + + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_BXT, NULL, NULL); + + MMIO_DH(CUR_WM_TRANS(PIPE_A), D_BXT, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_B), D_BXT, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_C), D_BXT, NULL, NULL); + + MMIO_PLANES_DH(PLANE_CTL, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_STRIDE, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_POS, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_SIZE, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYVAL, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYMSK, D_BXT, NULL, skl_plane_mmio_write); + + MMIO_PLANES_DH(PLANE_SURF, D_BXT, NULL, skl_plane_surf_write); + + MMIO_PLANES_DH(PLANE_KEYMAX, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_OFFSET, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C0, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(_REG_701C4, D_BXT, NULL, skl_plane_mmio_write); + + if (i915_modparams.avail_planes_per_pipe) { + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_BXT, NULL, NULL); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_BXT, NULL, NULL); + } else { + MMIO_PLANES_SDH(_PLANE_WM_BASE, 4 * 8, D_BXT, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_BXT, NULL, skl_plane_mmio_write); + } + + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_BXT, NULL, pv_plane_wm_mmio_write); + MMIO_PLANES_DH(PLANE_BUF_CFG, D_BXT, NULL, NULL); + + MMIO_F(0x80000, 0x3000, 0, 0, 0, D_BXT, NULL, NULL); + MMIO_D(0x8f074, D_BXT); + MMIO_D(0x8f004, D_BXT); + MMIO_D(0x8f034, D_BXT); + + MMIO_D(0xb11c, D_BXT); + + MMIO_D(0x51000, D_BXT); + MMIO_D(0x6c00c, D_BXT); + + MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_BXT, NULL, NULL); + + MMIO_D(0xd08, D_BXT); + MMIO_D(0x20e0, D_BXT); + MMIO_D(0x20ec, D_BXT); + + /* TRTT */ + MMIO_D(0x4de0, D_BXT); + MMIO_D(0x4de4, D_BXT); + MMIO_D(0x4de8, D_BXT); + MMIO_D(0x4dec, D_BXT); + MMIO_D(0x4df0, D_BXT); + MMIO_DH(0x4df4, D_BXT, NULL, gen9_trtte_write); + MMIO_DH(0x4dfc, D_BXT, NULL, gen9_trtt_chicken_write); + + MMIO_DH(0x45008, D_BXT, NULL, bxt_dbuf_ctl_write); + + MMIO_D(0x46430, D_BXT); + + MMIO_D(0x46520, D_BXT); + + MMIO_D(0xc403c, D_BXT); + MMIO_D(0xb004, D_BXT); + MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); + + MMIO_D(0x65900, D_BXT); + MMIO_D(0x1082c0, D_BXT); + MMIO_D(0x4068, D_BXT); + MMIO_D(0x67054, D_BXT); + MMIO_D(0x6e560, D_BXT); + MMIO_D(0x6e554, D_BXT); + MMIO_D(0x2b20, D_BXT); + MMIO_D(0x65f00, D_BXT); + MMIO_D(0x65f08, D_BXT); + MMIO_D(0x320f0, D_BXT); + + MMIO_D(0x70034, D_BXT); + MMIO_D(0x71034, D_BXT); + MMIO_D(0x72034, D_BXT); + + MMIO_D(0x44500, D_BXT); + + MMIO_D(GEN8_GTCR, D_SKL_PLUS); + + MMIO_D(GEN7_SAMPLER_INSTDONE, D_SKL_PLUS); + MMIO_D(GEN7_ROW_INSTDONE, D_SKL_PLUS); + MMIO_D(GEN8_FAULT_TLB_DATA0, D_SKL_PLUS); + MMIO_D(GEN8_FAULT_TLB_DATA1, D_SKL_PLUS); + MMIO_D(ERROR_GEN6, D_SKL_PLUS); + MMIO_D(DONE_REG, D_SKL_PLUS); + MMIO_D(EIR, D_SKL_PLUS); + MMIO_D(PGTBL_ER, D_SKL_PLUS); + MMIO_D(0x4194, D_SKL_PLUS); + MMIO_D(0x4294, D_SKL_PLUS); + MMIO_D(0x4494, D_SKL_PLUS); + + MMIO_RING_D(RING_PSMI_CTL, D_SKL_PLUS); + MMIO_RING_D(RING_DMA_FADD, D_SKL_PLUS); + MMIO_RING_D(RING_DMA_FADD_UDW, D_SKL_PLUS); + MMIO_RING_D(RING_IPEHR, D_SKL_PLUS); + MMIO_RING_D(RING_INSTPS, D_SKL_PLUS); + MMIO_RING_D(RING_BBADDR_UDW, D_SKL_PLUS); + MMIO_RING_D(RING_BBSTATE, D_SKL_PLUS); + MMIO_RING_D(RING_IPEIR, D_SKL_PLUS); + + MMIO_D(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS); + MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_D(0xc4c8, D_SKL_PLUS); + MMIO_D(GUC_BCS_RCS_IER, D_SKL_PLUS); + MMIO_D(GUC_VCS2_VCS1_IER, D_SKL_PLUS); + MMIO_D(GUC_WD_VECS_IER, D_SKL_PLUS); + MMIO_D(GUC_MAX_IDLE_COUNT, D_SKL_PLUS); + + MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); + MMIO_D(BXT_RP_STATE_CAP, D_BXT); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, NULL, bxt_phy_ctl_family_write); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, NULL, bxt_phy_ctl_family_write); + MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, bxt_port_pll_enable_write); + + MMIO_PORT_CL_REF(DPIO_PHY0); + MMIO_PORT_PCS_TX(DPIO_PHY0, DPIO_CH0); + MMIO_PORT_PCS_TX(DPIO_PHY0, DPIO_CH1); + MMIO_PORT_CL_REF(DPIO_PHY1); + MMIO_PORT_PCS_TX(DPIO_PHY1, DPIO_CH0); + + MMIO_D(BXT_DE_PLL_CTL, D_BXT); + MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); + MMIO_D(BXT_DSI_PLL_CTL, D_BXT); + MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); + + MMIO_D(BXT_BLC_PWM_CTL(0), D_BXT); + MMIO_D(BXT_BLC_PWM_FREQ(0), D_BXT); + MMIO_D(BXT_BLC_PWM_DUTY(0), D_BXT); + MMIO_D(BXT_BLC_PWM_CTL(1), D_BXT); + MMIO_D(BXT_BLC_PWM_FREQ(1), D_BXT); + MMIO_D(BXT_BLC_PWM_DUTY(1), D_BXT); + + MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); + + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); + + MMIO_D(RC6_LOCATION, D_BXT); + MMIO_D(RC6_CTX_BASE, D_BXT); + + MMIO_D(0xA248, D_SKL_PLUS); + MMIO_D(0xA250, D_SKL_PLUS); + MMIO_D(0xA25C, D_SKL_PLUS); + MMIO_D(0xA000, D_SKL_PLUS); + MMIO_D(0xB100, D_SKL_PLUS); + MMIO_D(0xD00, D_SKL_PLUS); + + MMIO_D(HUC_STATUS2, D_GEN9PLUS); + return 0; } @@ -2859,6 +3425,9 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) vfree(gvt->mmio.mmio_attribute); gvt->mmio.mmio_attribute = NULL; + + vfree(gvt->mmio.mmio_host_cache); + gvt->mmio.mmio_host_cache = NULL; } /* Special MMIO blocks. */ @@ -2893,6 +3462,12 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (!gvt->mmio.mmio_attribute) return -ENOMEM; + gvt->mmio.mmio_host_cache = vzalloc(info->mmio_size); + if (!gvt->mmio.mmio_host_cache) { + vfree(gvt->mmio.mmio_attribute); + return -ENOMEM; + } + ret = init_generic_mmio_info(gvt); if (ret) goto err; @@ -2909,6 +3484,13 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ret = init_skl_mmio_info(gvt); if (ret) goto err; + } else if (IS_BROXTON(dev_priv)) { + ret = init_broadwell_mmio_info(gvt); + if (ret) + goto err; + ret = init_bxt_mmio_info(gvt); + if (ret) + goto err; } gvt->mmio.mmio_block = mmio_blocks; @@ -2916,13 +3498,15 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) gvt_dbg_mmio("traced %u virtual mmio registers\n", gvt->mmio.num_tracked_mmio); + + intel_gvt_mark_noncontext_mmios(gvt); + return 0; err: intel_gvt_clean_mmio_info(gvt); return ret; } - /** * intel_vgpu_default_mmio_read - default MMIO read handler * @vgpu: a vGPU diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index df7f33abd393..b15296456334 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -33,7 +33,30 @@ #ifndef _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_ -/* +/** + * struct intel_gvt_mpt - GVT module Mediate Pass-through Interface (MPT) + * + * @host_init: register intel_gvt_ops interface into hypervisor MPT module and + * init GVT-g host side + * @host_exit: exit GVT-g host side + * @attach_vgpu: call hypervisor to initialize vGPU related stuffs inside + * hypervisor + * @detach_vgpu: call hypervisor to release vGPU related stuffs inside + * hypervisor + * @inject_msi: inject a MSI interrupt into vGPU + * @from_virt_to_mfn: translate a host VA into MFN + * @set_wp_page: set a guest page to write-protected + * @unset_wp_page: remove the write-protection of a guest page + * @read_gpa: copy data from GPA to host data buffer + * @write_gpa: copy data from host data buffer to GPA + * @gfn_to_mfn: translate a GFN to MFN + * @map_gfn_to_mfn: map a GFN region to MFN + * @set_trap_area: Trap a guest PA region + * @set_pvmmio: Set the pvmmio area + * @pause_domain: pause a domain + * @unpause_domain: unpause a domain + * @dom0_ready: raise a uevent when Dom 0 is ready for Dom U + * * Specific GVT-g MPT modules function collections. Currently GVT-g supports * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ @@ -55,9 +78,15 @@ struct intel_gvt_mpt { unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, bool map); + int (*set_pvmmio)(unsigned long handle, u64 start, u64 end, + bool map); + int (*pause_domain)(unsigned long handle); + int (*unpause_domain)(unsigned long handle); + int (*dom0_ready)(void); }; extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt kvmgt_mpt; +extern struct intel_gvt_mpt acrn_gvt_mpt; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 7a041b368f68..faa78870ef67 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -69,6 +69,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults", [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt", [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT", + [VCS2_CMD_STREAMER_ERR] = "VCS2 Video CS error interrupt", [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify", [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt", @@ -350,7 +351,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu, clear_bits |= (1 << bit); } - WARN_ON(!up_irq_info); + if (WARN_ON(!up_irq_info)) + return; if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) { u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base); @@ -523,21 +525,26 @@ static void gen8_init_irq( /* GEN8 interrupt GT0 events */ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 3, RCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 19, BCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); /* GEN8 interrupt GT1 events */ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 3, VCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); if (HAS_BSD2(gvt->dev_priv)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 19, VCS2_CMD_STREAMER_ERR, + INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH, @@ -546,6 +553,7 @@ static void gen8_init_irq( /* GEN8 interrupt GT3 events */ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3); + SET_BIT_INFO(irq, 3, VECS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3); @@ -580,7 +588,8 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { + } else if (IS_SKYLAKE(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -592,6 +601,10 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); + + SET_BIT_INFO(irq, 5, PLANE_3_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); + SET_BIT_INFO(irq, 5, PLANE_3_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); + SET_BIT_INFO(irq, 5, PLANE_3_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } /* GEN8 interrupt PCU events */ @@ -651,7 +664,7 @@ static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data) irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer); gvt = container_of(irq, struct intel_gvt, irq); - intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK); +// intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK); hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period); return HRTIMER_RESTART; } @@ -691,6 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) gvt_dbg_core("init irq framework\n"); if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { irq->ops = &gen8_irq_ops; irq->irq_map = gen8_irq_map; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 5313fb1b33e1..6ec761a84557 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -53,6 +53,7 @@ enum intel_gvt_event_type { VCS_AS_CONTEXT_SWITCH, VCS2_MI_USER_INTERRUPT, + VCS2_CMD_STREAMER_ERR, VCS2_MI_FLUSH_DW, VCS2_AS_CONTEXT_SWITCH, @@ -64,6 +65,7 @@ enum intel_gvt_event_type { BCS_AS_CONTEXT_SWITCH, VECS_MI_USER_INTERRUPT, + VECS_CMD_STREAMER_ERR, VECS_MI_FLUSH_DW, VECS_AS_CONTEXT_SWITCH, @@ -92,6 +94,9 @@ enum intel_gvt_event_type { SPRITE_A_FLIP_DONE, SPRITE_B_FLIP_DONE, SPRITE_C_FLIP_DONE, + PLANE_3_A_FLIP_DONE, + PLANE_3_B_FLIP_DONE, + PLANE_3_C_FLIP_DONE, PCU_THERMAL, PCU_PCODE2DRIVER_MAILBOX, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 83e88c70272a..9bf4045cd679 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1153,7 +1153,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return 0; } - return 0; + return -ENOTTY; } static ssize_t diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 980ec8906b1e..86f454173056 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -39,7 +39,7 @@ /** * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset * @vgpu: a vGPU - * + * @gpa: guest physical address * Returns: * Zero on success, negative error code if failed */ @@ -68,7 +68,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, return; gvt = vgpu->gvt; - mutex_lock(&gvt->lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (reg_is_mmio(gvt, offset)) { if (read) @@ -106,32 +105,19 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); } } - mutex_unlock(&gvt->lock); } -/** - * intel_vgpu_emulate_mmio_read - emulate MMIO read - * @vgpu: a vGPU - * @pa: guest physical address - * @p_data: data return buffer - * @bytes: access data length - * - * Returns: - * Zero on success, negative error code if failed - */ -int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_read_locked(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; unsigned int offset = 0; int ret = -EINVAL; - if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } - mutex_lock(&gvt->lock); if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; @@ -146,7 +132,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ret, gp->gfn, pa, *(u32 *)p_data, bytes); } - mutex_unlock(&gvt->lock); return ret; } } @@ -168,13 +153,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); if (ret) goto err; - mutex_unlock(&gvt->lock); return ret; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); - mutex_unlock(&gvt->lock); return ret; } @@ -191,26 +174,37 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; intel_gvt_mmio_set_accessed(gvt, offset); - mutex_unlock(&gvt->lock); return 0; err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); - mutex_unlock(&gvt->lock); return ret; } /** - * intel_vgpu_emulate_mmio_write - emulate MMIO write + * intel_vgpu_emulate_mmio_read - emulate MMIO read * @vgpu: a vGPU * @pa: guest physical address - * @p_data: write data buffer + * @p_data: data return buffer * @bytes: access data length * * Returns: * Zero on success, negative error code if failed */ -int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, +int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes) +{ + int ret; + + mutex_lock(&vgpu->gvt->lock); + ret = intel_vgpu_emulate_mmio_read_locked(vgpu, pa, p_data, bytes); + mutex_unlock(&vgpu->gvt->lock); + + return ret; +} + + +int intel_vgpu_emulate_mmio_write_locked(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; @@ -222,8 +216,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, return 0; } - mutex_lock(&gvt->lock); - if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; @@ -237,7 +229,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ret, gp->gfn, pa, *(u32 *)p_data, bytes); } - mutex_unlock(&gvt->lock); return ret; } } @@ -259,13 +250,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); if (ret) goto err; - mutex_unlock(&gvt->lock); return ret; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); - mutex_unlock(&gvt->lock); return ret; } @@ -273,27 +262,56 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, if (ret < 0) goto err; + if (vgpu->entire_nonctxmmio_checked + && intel_gvt_mmio_is_non_context(vgpu->gvt, offset) + && vgpu_vreg(vgpu, offset) + != *(u32 *)(vgpu->gvt->mmio.mmio_host_cache + offset)) { + gvt_err("vgpu%d unexpected non-context MMIO change at 0x%x:0x%x,0x%x\n", + vgpu->id, offset, vgpu_vreg(vgpu, offset), + *(u32 *)(vgpu->gvt->mmio.mmio_host_cache + offset)); + } + intel_gvt_mmio_set_accessed(gvt, offset); - mutex_unlock(&gvt->lock); return 0; err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); - mutex_unlock(&gvt->lock); return ret; } +/** + * intel_vgpu_emulate_mmio_write - emulate MMIO write + * @vgpu: a vGPU + * @pa: guest physical address + * @p_data: write data buffer + * @bytes: access data length + * + * Returns: + * Zero on success, negative error code if failed + */ +int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes) +{ + int ret; + + mutex_lock(&vgpu->gvt->lock); + ret = intel_vgpu_emulate_mmio_write_locked(vgpu, pa, p_data, bytes); + mutex_unlock(&vgpu->gvt->lock); + + return ret; +} /** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU - * + * @dmlr: vGPU Device Model Level Reset or GT Reset */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; + struct drm_i915_private *dev_priv = gvt->dev_priv; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); @@ -315,6 +333,24 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } + /* below vreg init value are got from handler.c, + * which won't change during vgpu life cycle + */ + vgpu_vreg(vgpu, 0xe651c) = 1 << 17; + vgpu_vreg(vgpu, 0xe661c) = 1 << 17; + vgpu_vreg(vgpu, 0xe671c) = 1 << 17; + vgpu_vreg(vgpu, 0xe681c) = 1 << 17; + vgpu_vreg(vgpu, 0xe6c04) = 3; + vgpu_vreg(vgpu, 0xe6e1c) = 0x2f << 16; + + if (HAS_HUC_UCODE(dev_priv)) { + mmio_hw_access_pre(dev_priv); + vgpu_vreg(vgpu, HUC_STATUS2) = I915_READ(HUC_STATUS2); + mmio_hw_access_post(dev_priv); + } + + /* Non-context MMIOs need entire check again if mmio/vgpu reset */ + vgpu->entire_nonctxmmio_checked = false; } /** @@ -328,11 +364,15 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) - return -ENOMEM; + BUILD_BUG_ON(sizeof(struct gvt_shared_page) != PAGE_SIZE); - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + vgpu->mmio.sreg = vzalloc(info->mmio_size); + vgpu->mmio.vreg = (void *)__get_free_pages(GFP_KERNEL, + info->mmio_size_order); + vgpu->mmio.shared_page = (struct gvt_shared_page *) __get_free_pages( + GFP_KERNEL, 0); + if (!vgpu->mmio.vreg || !vgpu->mmio.sreg || !vgpu->mmio.shared_page) + return -ENOMEM; intel_vgpu_reset_mmio(vgpu, true); @@ -346,6 +386,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) */ void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vfree(vgpu->mmio.sreg); + free_pages((unsigned long) vgpu->mmio.vreg, info->mmio_size_order); + free_pages((unsigned long) vgpu->mmio.shared_page, 0); + vgpu->mmio.vreg = vgpu->mmio.sreg = vgpu->mmio.shared_page = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 32cd64ddad26..ad21866e4ffb 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -42,15 +42,16 @@ struct intel_vgpu; #define D_BDW (1 << 0) #define D_SKL (1 << 1) #define D_KBL (1 << 2) +#define D_BXT (1 << 3) -#define D_GEN9PLUS (D_SKL | D_KBL) -#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) +#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT) -#define D_SKL_PLUS (D_SKL | D_KBL) -#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) +#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT) #define D_PRE_SKL (D_BDW) -#define D_ALL (D_BDW | D_SKL | D_KBL) +#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT) typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, unsigned int); @@ -85,9 +86,12 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes); +int intel_vgpu_emulate_mmio_read_locked(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes); - +int intel_vgpu_emulate_mmio_write_locked(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index f0e5487e6688..4d6a521ee47e 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -45,6 +45,9 @@ /** * intel_gvt_hypervisor_host_init - init GVT-g host side + * @dev: i915 device + * @gvt: GVT device + * @ops: intel_gvt_ops interface * * Returns: * Zero on success, negative error code if failed @@ -61,6 +64,8 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev, /** * intel_gvt_hypervisor_host_exit - exit GVT-g host side + * @dev: i915 device + * @gvt: GVT device */ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt) @@ -75,6 +80,7 @@ static inline void intel_gvt_hypervisor_host_exit(struct device *dev, /** * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU * related stuffs inside hypervisor. + * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. @@ -91,6 +97,7 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) /** * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU * related stuffs inside hypervisor. + * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. @@ -111,6 +118,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) /** * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU + * @vgpu: a vGPU * * Returns: * Zero on success, negative error code if failed. @@ -142,7 +150,7 @@ static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu) } /** - * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN + * intel_gvt_hypervisor_virt_to_mfn - translate a host VA into MFN * @p: host kernel virtual address * * Returns: @@ -237,7 +245,7 @@ static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu, /** * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN * @vgpu: a vGPU - * @gpfn: guest pfn + * @gfn: guest pfn * * Returns: * MFN on success, INTEL_GVT_INVALID_ADDR if failed. @@ -292,4 +300,70 @@ static inline int intel_gvt_hypervisor_set_trap_area( return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); } +/** + * intel_gvt_hypervisor_set_pvmmio - Set the pvmmio area + * @vgpu: a vGPU + * @start: the beginning of the guest physical address region + * @end: the end of the guest physical address region + * @map: map or unmap + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_hypervisor_set_pvmmio( + struct intel_vgpu *vgpu, u64 start, u64 end, bool map) +{ + /* a MPT implementation could have MMIO trapped elsewhere */ + if (!intel_gvt_host.mpt->set_pvmmio) + return -ENOENT; + + return intel_gvt_host.mpt->set_pvmmio(vgpu->handle, start, end, map); +} + +/** + * intel_gvt_hypervisor_pause_domain - Pause a domain + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed + */ +static inline int intel_gvt_hypervisor_pause_domain(struct intel_vgpu *vgpu) +{ + if (!intel_gvt_host.mpt || !intel_gvt_host.mpt->pause_domain) + return 0; + + return intel_gvt_host.mpt->pause_domain(vgpu->handle); +} + +/** + * intel_gvt_hypervisor_unpause_domain - Unpause a domain + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed + */ +static inline int intel_gvt_hypervisor_unpause_domain(struct intel_vgpu *vgpu) +{ + if (!intel_gvt_host.mpt || !intel_gvt_host.mpt->unpause_domain) + return 0; + + return intel_gvt_host.mpt->unpause_domain(vgpu->handle); +} + +/** + * intel_gvt_hypervisor_dom0_ready - Signal Dom 0 is ready for Dom U + * + * It's to raise a uevent to notify Dom 0 is ready to start a Dom U, so that + * Dom U can be started as early as possible + * + * Returns: + * Zero on success, negative error code if failed + */ +static inline int intel_gvt_hypervisor_dom0_ready(void) +{ + if (!intel_gvt_host.mpt->dom0_ready) + return 0; + + return intel_gvt_host.mpt->dom0_ready(); +} #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 311799136d7f..68484b002ce9 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -27,6 +27,7 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) { + void __iomem *host_va = vgpu->gvt->opregion.opregion_va; u8 *buf; int i; @@ -42,19 +43,20 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) if (!vgpu_opregion(vgpu)->va) return -ENOMEM; - memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va, - INTEL_GVT_OPREGION_SIZE); + if (vgpu->gvt->opregion.opregion_va) { + memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, + INTEL_GVT_OPREGION_SIZE); + + /* for unknown reason, the value in LID field is incorrect + * which block the windows guest, so workaround it by force + * setting it to "OPEN" + */ + buf = (u8 *)vgpu_opregion(vgpu)->va; + buf[INTEL_GVT_OPREGION_CLID] = 0x3; + } for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; - - /* for unknown reason, the value in LID field is incorrect - * which block the windows guest, so workaround it by force - * setting it to "OPEN" - */ - buf = (u8 *)vgpu_opregion(vgpu)->va; - buf[INTEL_GVT_OPREGION_CLID] = 0x3; - return 0; } @@ -120,6 +122,12 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { gvt_dbg_core("emulate opregion from kernel\n"); + /* clean opregion content before it could be reused. */ + if (gpa == 0) + return 0; + if (vgpu_opregion(vgpu)->va) + intel_vgpu_clean_opregion(vgpu); + ret = init_vgpu_opregion(vgpu, gpa); if (ret) return ret; @@ -139,6 +147,9 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) */ void intel_gvt_clean_opregion(struct intel_gvt *gvt) { + if (!gvt->opregion.opregion_va) + return; + memunmap(gvt->opregion.opregion_va); gvt->opregion.opregion_va = NULL; } @@ -157,6 +168,11 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt) pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION, &gvt->opregion.opregion_pa); + if (gvt->opregion.opregion_pa == 0) { + gvt_err("host opregion doesn't exist\n"); + return 0; + } + gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa, INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB); if (!gvt->opregion.opregion_va) { diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 7d01c77a0f7a..db26f31d4928 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -54,8 +54,17 @@ #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) -#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) -#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) +#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + plane * 0x100) +#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + plane * 0x100) +#define _REG_701AC(pipe, plane) (0x701ac + pipe * 0x1000 + plane * 0x100) + +#define SKL_PS_REG_TO_PIPE(reg) (((reg) >> 11) & 0x3) +#define SKL_PS_REG_TO_SCALER(reg) (((reg) >> 8) & 0x3) +#define SKL_PS_REG_VALUE_TO_PLANE(val) (((val) >> 25) & 0x7) + +#define SKL_PLANE_REG_TO_PIPE(reg) (((reg) >> 12) & 0x3) +#define SKL_PLANE_REG_TO_PLANE(reg) ((((reg) & 0xFFF) - 0x180) >> 8) +#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane)*3 + pipe) #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 2ea542257f03..70226a43ed9b 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -141,6 +141,27 @@ static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = { {RCS, _MMIO(0x20e4), 0xffff, false}, }; +void intel_gvt_mark_noncontext_mmios(struct intel_gvt *gvt) +{ + struct render_mmio *mmio; + int i, array_size; + + if (IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { + mmio = gen9_render_mmio_list; + array_size = ARRAY_SIZE(gen9_render_mmio_list); + } else { + mmio = gen8_render_mmio_list; + array_size = ARRAY_SIZE(gen8_render_mmio_list); + } + + for (i = 0; i < array_size; i++, mmio++) { + if (mmio->in_context) + continue; + intel_gvt_mmio_set_non_context(gvt, mmio->reg.reg); + } +} + static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; static u32 gen9_render_mocs_L3[32]; @@ -172,7 +193,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) + if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv))) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -271,6 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) i915_reg_t last_reg = _MMIO(0); if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(dev_priv) || IS_KABYLAKE(vgpu->gvt->dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); @@ -293,7 +316,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) */ if (mmio->in_context && ((ctx_ctrl & inhibit_mask) != inhibit_mask) && - i915.enable_execlists) + i915_modparams.enable_execlists) continue; if (mmio->mask) @@ -325,7 +348,8 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) u32 v; int i, array_size; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); restore_mocs(vgpu, ring_id); @@ -403,3 +427,110 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } + +#define gvt_host_reg(gvt, reg) \ + (*(u32 *)(gvt->mmio.mmio_host_cache + reg)) \ + +#define MMIO_COMPARE(vgpu, reg, mask) ({ \ + int ret; \ + u32 value = vgpu_vreg(vgpu, reg); \ + u32 host_value = gvt_host_reg(vgpu->gvt, reg); \ + \ + if (mask) { \ + value &= mask; \ + host_value &= mask; \ + } \ + if (host_value == value) { \ + ret = 0; \ + } else { \ + gvt_err("vgpu%d unconformance mmio 0x%x:0x%x,0x%x\n", \ + vgpu->id, reg, \ + vgpu_vreg(vgpu, reg), \ + gvt_host_reg(vgpu->gvt, reg)); \ + ret = -EINVAL; \ + } \ + ret; \ + }) + +static int noncontext_mmio_compare(struct intel_vgpu *vgpu, int ring_id) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct render_mmio *mmio, *mmio_list; + int i, array_size; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + mmio_list = gen9_render_mmio_list; + array_size = ARRAY_SIZE(gen9_render_mmio_list); + } else { + mmio_list = gen8_render_mmio_list; + array_size = ARRAY_SIZE(gen8_render_mmio_list); + } + + for (i = 0, mmio = mmio_list; i < array_size; i++, mmio++) { + if (mmio->ring_id != ring_id || mmio->in_context + || is_force_nonpriv_mmio(mmio->reg.reg) + || mmio->reg.reg == RING_MODE_GEN7(engine).reg) + continue; + + if (MMIO_COMPARE(vgpu, mmio->reg.reg, mmio->mask)) + return -EINVAL; + } + + return 0; +} + +static void get_host_mmio_snapshot(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct render_mmio *mmio, *mmio_list; + int i, array_size; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + mmio_list = gen9_render_mmio_list; + array_size = ARRAY_SIZE(gen9_render_mmio_list); + } else { + mmio_list = gen8_render_mmio_list; + array_size = ARRAY_SIZE(gen8_render_mmio_list); + } + + if (!gvt->mmio.host_cache_initialized) { + /* Snapshot all the non-context MMIOs */ + for (i = 0, mmio = mmio_list; i < array_size; i++, mmio++) { + + if (mmio->in_context) + continue; + + gvt_host_reg(gvt, mmio->reg.reg) = + I915_READ_FW(mmio->reg); + if (mmio->mask) + gvt_host_reg(gvt, mmio->reg.reg) &= mmio->mask; + } + + gvt->mmio.host_cache_initialized = true; + } +} + +int intel_gvt_vgpu_conformance_check(struct intel_vgpu *vgpu, int ring_id) +{ + + int ret; + + /* Entire non-context MMIOs check only need once */ + if (!vgpu->entire_nonctxmmio_checked) + vgpu->entire_nonctxmmio_checked = true; + else + return 0; + + get_host_mmio_snapshot(vgpu->gvt); + + ret = noncontext_mmio_compare(vgpu, ring_id); + if (ret) + goto err; + + return 0; +err: + return ret; +} diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h index 91db1d39d28f..51471fd063a5 100644 --- a/drivers/gpu/drm/i915/gvt/render.h +++ b/drivers/gpu/drm/i915/gvt/render.h @@ -40,4 +40,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, int ring_id); +int intel_gvt_vgpu_conformance_check(struct intel_vgpu *vgpu, int ring_id); + #endif diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 03532dfc0cd5..4aef4177ad3b 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -34,15 +34,11 @@ #include "i915_drv.h" #include "gvt.h" -static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) +static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu, + enum intel_engine_id ring_id) { - enum intel_engine_id i; - struct intel_engine_cs *engine; - - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (!list_empty(workload_q_head(vgpu, i))) - return true; - } + if (!list_empty(workload_q_head(vgpu, ring_id))) + return true; return false; } @@ -64,29 +60,35 @@ struct gvt_sched_data { struct intel_gvt *gvt; struct hrtimer timer; unsigned long period; - struct list_head lru_runq_head; + struct list_head lru_runq_head[I915_NUM_ENGINES]; }; -static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu) +static void vgpu_update_timeslice(struct intel_vgpu *vgpu, + enum intel_engine_id ring_id, ktime_t cur_time) { ktime_t delta_ts; - struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data; + struct vgpu_sched_data *vgpu_data; - delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time; + if (vgpu == NULL || vgpu == vgpu->gvt->idle_vgpu) + return; - vgpu_data->sched_time += delta_ts; - vgpu_data->left_ts -= delta_ts; + vgpu_data = vgpu->sched_data[ring_id]; + delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); + vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); + vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); + vgpu_data->sched_in_time = cur_time; } #define GVT_TS_BALANCE_PERIOD_MS 100 #define GVT_TS_BALANCE_STAGE_NUM 10 -static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) +static void gvt_balance_timeslice(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct list_head *pos; - static uint64_t stage_check; - int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; + static uint64_t stage_check[I915_NUM_ENGINES]; + int stage = stage_check[ring_id]++ % GVT_TS_BALANCE_STAGE_NUM; /* The timeslice accumulation reset at stage 0, which is * allocated again without adding previous debt. @@ -95,22 +97,24 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) int total_weight = 0; ktime_t fair_timeslice; - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); total_weight += vgpu_data->sched_ctl.weight; } - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); - fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * - vgpu_data->sched_ctl.weight / - total_weight; - + if (WARN_ON_ONCE(total_weight == 0)) { + fair_timeslice = 0; + } else { + fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * + vgpu_data->sched_ctl.weight / total_weight; + } vgpu_data->allocated_ts = fair_timeslice; vgpu_data->left_ts = vgpu_data->allocated_ts; } } else { - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); /* timeslice for next 100ms should add the left/debt @@ -121,73 +125,68 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) } } -static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) +static void try_to_schedule_next_vgpu(struct intel_gvt *gvt, + enum intel_engine_id ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - enum intel_engine_id i; - struct intel_engine_cs *engine; struct vgpu_sched_data *vgpu_data; ktime_t cur_time; /* no need to schedule if next_vgpu is the same with current_vgpu, * let scheduler chose next_vgpu again by setting it to NULL. */ - if (scheduler->next_vgpu == scheduler->current_vgpu) { - scheduler->next_vgpu = NULL; + if (scheduler->next_vgpu[ring_id] == scheduler->current_vgpu[ring_id]) { + scheduler->next_vgpu[ring_id] = NULL; return; } + /* no target to schedule */ + if (!scheduler->next_vgpu[ring_id]) + return; + /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu */ - scheduler->need_reschedule = true; + scheduler->need_reschedule[ring_id] = true; /* still have uncompleted workload? */ - for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) - return; - } + if (scheduler->current_workload[ring_id]) + return; cur_time = ktime_get(); - if (scheduler->current_vgpu) { - vgpu_data = scheduler->current_vgpu->sched_data; - vgpu_data->sched_out_time = cur_time; - vgpu_update_timeslice(scheduler->current_vgpu); - } - vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_update_timeslice(scheduler->current_vgpu[ring_id], ring_id, cur_time); + vgpu_data = scheduler->next_vgpu[ring_id]->sched_data[ring_id]; vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ - scheduler->current_vgpu = scheduler->next_vgpu; - scheduler->next_vgpu = NULL; + scheduler->current_vgpu[ring_id] = scheduler->next_vgpu[ring_id]; + scheduler->next_vgpu[ring_id] = NULL; - scheduler->need_reschedule = false; + scheduler->need_reschedule[ring_id] = false; /* wake up workload dispatch thread */ - for_each_engine(engine, gvt->dev_priv, i) - wake_up(&scheduler->waitq[i]); + wake_up(&scheduler->waitq[ring_id]); } -static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) +static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; - struct list_head *head = &sched_data->lru_runq_head; + struct list_head *head = &sched_data->lru_runq_head[ring_id]; struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { - vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); - if (!vgpu_has_pending_workload(vgpu_data->vgpu)) + if (!vgpu_has_pending_workload(vgpu_data->vgpu, ring_id)) continue; - - /* Return the vGPU only if it has time slice left */ - if (vgpu_data->left_ts > 0) { - vgpu = vgpu_data->vgpu; - break; - } + /* Return the vGPU only if it has time slice left */ + if (vgpu_data->left_ts > 0) { + vgpu = vgpu_data->vgpu; + break; + } } return vgpu; @@ -196,50 +195,67 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) /* in nanosecond */ #define GVT_DEFAULT_TIME_SLICE 1000000 -static void tbs_sched_func(struct gvt_sched_data *sched_data) +static void tbs_sched_func(struct gvt_sched_data *sched_data, enum intel_engine_id ring_id) { struct intel_gvt *gvt = sched_data->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; /* no active vgpu or has already had a target */ - if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) + if (list_empty(&sched_data->lru_runq_head[ring_id]) || scheduler->next_vgpu[ring_id]) goto out; - vgpu = find_busy_vgpu(sched_data); + vgpu = find_busy_vgpu(sched_data, ring_id); if (vgpu) { - scheduler->next_vgpu = vgpu; + scheduler->next_vgpu[ring_id] = vgpu; /* Move the last used vGPU to the tail of lru_list */ - vgpu_data = vgpu->sched_data; + vgpu_data = vgpu->sched_data[ring_id]; list_del_init(&vgpu_data->lru_list); list_add_tail(&vgpu_data->lru_list, - &sched_data->lru_runq_head); + &sched_data->lru_runq_head[ring_id]); + + gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); } else { - scheduler->next_vgpu = gvt->idle_vgpu; + scheduler->next_vgpu[ring_id] = gvt->idle_vgpu; } out: - if (scheduler->next_vgpu) - try_to_schedule_next_vgpu(gvt); + if (scheduler->next_vgpu[ring_id]) { + gvt_dbg_sched("try to schedule next vgpu %d\n", + scheduler->next_vgpu[ring_id]->id); + try_to_schedule_next_vgpu(gvt, ring_id); + } } void intel_gvt_schedule(struct intel_gvt *gvt) { struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; - static uint64_t timer_check; + static ktime_t check_time; + enum intel_engine_id i; + struct intel_engine_cs *engine; + ktime_t cur_time; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); + cur_time = ktime_get(); if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request)) { - if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS)) - gvt_balance_timeslice(sched_data); + + if (ktime_sub(cur_time, check_time) >= + GVT_TS_BALANCE_PERIOD_MS * NSEC_PER_MSEC) { + check_time = cur_time; + for_each_engine(engine, gvt->dev_priv, i) + gvt_balance_timeslice(sched_data, i); + } } clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); - tbs_sched_func(sched_data); + for_each_engine(engine, gvt->dev_priv, i) { + vgpu_update_timeslice(gvt->scheduler.current_vgpu[i], i, cur_time); + tbs_sched_func(sched_data, i); + } - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); } static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) @@ -257,6 +273,9 @@ static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) static int tbs_sched_init(struct intel_gvt *gvt) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -266,7 +285,9 @@ static int tbs_sched_init(struct intel_gvt *gvt) if (!data) return -ENOMEM; - INIT_LIST_HEAD(&data->lru_runq_head); + for_each_engine(engine, gvt->dev_priv, i) + INIT_LIST_HEAD(&data->lru_runq_head[i]); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; @@ -292,35 +313,54 @@ static void tbs_sched_clean(struct intel_gvt *gvt) static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { struct vgpu_sched_data *data; + enum intel_engine_id i; + struct intel_engine_cs *engine; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->sched_ctl.weight = vgpu->sched_ctl.weight; - data->vgpu = vgpu; - INIT_LIST_HEAD(&data->lru_list); + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto err; - vgpu->sched_data = data; + data->sched_ctl.weight = vgpu->sched_ctl.weight; + data->vgpu = vgpu; + INIT_LIST_HEAD(&data->lru_list); + vgpu->sched_data[i] = data; + } return 0; + +err: + for (; i >= 0; i--) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } + return -ENOMEM; } static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) { - kfree(vgpu->sched_data); - vgpu->sched_data = NULL; + enum intel_engine_id i; + struct intel_engine_cs *engine; + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } } static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - - if (!list_empty(&vgpu_data->lru_list)) - return; + struct vgpu_sched_data *vgpu_data[I915_NUM_ENGINES]; + enum intel_engine_id i; + struct intel_engine_cs *engine; - list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data[i] = vgpu->sched_data[i]; + if (!list_empty(&vgpu_data[i]->lru_list)) + continue; + list_add_tail(&vgpu_data[i]->lru_list, &sched_data->lru_runq_head[i]); + } if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), @@ -329,9 +369,14 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data[I915_NUM_ENGINES]; + enum intel_engine_id i; + struct intel_engine_cs *engine; - list_del_init(&vgpu_data->lru_list); + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data[i] = vgpu->sched_data[i]; + list_del_init(&vgpu_data[i]->lru_list); + } } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -372,23 +417,31 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } +void intel_gvt_kick_schedule(struct intel_gvt *gvt) +{ + intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); +} + void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) { struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; int ring_id; + enum intel_engine_id i; + struct intel_engine_cs *engine; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); scheduler->sched_ops->stop_schedule(vgpu); - if (scheduler->next_vgpu == vgpu) - scheduler->next_vgpu = NULL; - - if (scheduler->current_vgpu == vgpu) { - /* stop workload dispatching */ - scheduler->need_reschedule = true; - scheduler->current_vgpu = NULL; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + if (scheduler->next_vgpu[i] == vgpu) + scheduler->next_vgpu[i] = NULL; + if (scheduler->current_vgpu[i] == vgpu) { + /* stop workload dispatching */ + scheduler->need_reschedule[i] = true; + scheduler->current_vgpu[i] = NULL; + } } spin_lock_bh(&scheduler->mmio_context_lock); diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index ba00a5f7455f..7b59e3e88b8b 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h @@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu); void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu); +void intel_gvt_kick_schedule(struct intel_gvt *gvt); + #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 391800d2067b..be910c951e71 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -45,13 +45,61 @@ static void set_context_pdp_root_pointer( struct execlist_ring_context *ring_context, u32 pdp[8]) { - struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW; int i; for (i = 0; i < 8; i++) - pdp_pair[i].val = pdp[7 - i]; + ring_context->pdps[i].val = pdp[7 - i]; } +/* + * when populating shadow ctx from guest, we should not overrride oa related + * registers, so that they will not be overlapped by guest oa configs. Thus + * made it possible to capture oa data from host for both host and guests. + */ +static void sr_oa_regs(struct intel_vgpu_workload *workload, + u32 *reg_state, bool save) +{ + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + int i = 0; + u32 flex_mmio[] = { + i915_mmio_reg_offset(EU_PERF_CNTL0), + i915_mmio_reg_offset(EU_PERF_CNTL1), + i915_mmio_reg_offset(EU_PERF_CNTL2), + i915_mmio_reg_offset(EU_PERF_CNTL3), + i915_mmio_reg_offset(EU_PERF_CNTL4), + i915_mmio_reg_offset(EU_PERF_CNTL5), + i915_mmio_reg_offset(EU_PERF_CNTL6), + }; + + if (!workload || !reg_state || workload->ring_id != RCS) + return; + + if (save) { + workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; + + for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { + u32 state_offset = ctx_flexeu0 + i * 2; + + workload->flex_mmio[i] = reg_state[state_offset + 1]; + } + } else { + reg_state[ctx_oactxctrl] = + i915_mmio_reg_offset(GEN8_OACTXCONTROL); + reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; + + for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { + u32 state_offset = ctx_flexeu0 + i * 2; + u32 mmio = flex_mmio[i]; + + reg_state[state_offset] = mmio; + reg_state[state_offset + 1] = workload->flex_mmio[i]; + } + } +} + +static bool enable_lazy_shadow_ctx = true; static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -64,6 +112,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct page *page; void *dst; unsigned long context_gpa, context_page_num; + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct i915_ggtt *ggtt = &gvt->dev_priv->ggtt; + dma_addr_t addr; + gen8_pte_t __iomem *pte; int i; gvt_dbg_sched("ring id %d workload lrca %x", ring_id, @@ -77,6 +129,18 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = 19; i = 2; +#ifdef CONFIG_INTEL_IOMMU + /* + * In case IOMMU for graphics is turned on, we don't want to + * turn on lazy shadow context feature because it will touch + * GGTT entries which require a BKL and since this is a + * performance enhancement feature, we will end up negating + * the performance. + */ + if(intel_iommu_gfx_mapped) { + enable_lazy_shadow_ctx = false; + } +#endif while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -87,17 +151,44 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EINVAL; } - page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); - dst = kmap(page); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, + if (!enable_lazy_shadow_ctx) { + page = i915_gem_object_get_page(ctx_obj, + LRC_HEADER_PAGES + i); + dst = kmap(page); + intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, GTT_PAGE_SIZE); - kunmap(page); + kunmap(page); + } else { + unsigned long mfn; + + addr = i915_ggtt_offset( + shadow_ctx->engine[ring_id].state) + + (LRC_PPHWSP_PN + i) * PAGE_SIZE; + pte = (gen8_pte_t __iomem *)ggtt->gsm + + (addr >> PAGE_SHIFT); + + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, + context_gpa >> 12); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn during context shadow\n"); + return -ENXIO; + } + + mfn <<= 12; + mfn |= _PAGE_PRESENT | _PAGE_RW | PPAT_CACHED_INDEX; + writeq(mfn, pte); + } + i++; } + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); + sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) @@ -122,6 +213,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); + sr_oa_regs(workload, (u32 *)shadow_ring_context, false); kunmap(page); return 0; } @@ -141,19 +233,8 @@ static int shadow_context_status_change(struct notifier_block *nb, enum intel_engine_id ring_id = req->engine->id; struct intel_vgpu_workload *workload; - if (!is_gvt_request(req)) { - spin_lock_bh(&scheduler->mmio_context_lock); - if (action == INTEL_CONTEXT_SCHEDULE_IN && - scheduler->engine_owner[ring_id]) { - /* Switch ring from vGPU to host. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; - } - spin_unlock_bh(&scheduler->mmio_context_lock); - + if (!is_gvt_request(req)) return NOTIFY_OK; - } workload = scheduler->current_workload[ring_id]; if (unlikely(!workload)) @@ -161,23 +242,12 @@ static int shadow_context_status_change(struct notifier_block *nb, switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: - spin_lock_bh(&scheduler->mmio_context_lock); - if (workload->vgpu != scheduler->engine_owner[ring_id]) { - /* Switch ring from host to vGPU or vGPU to vGPU. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - workload->vgpu, ring_id); - scheduler->engine_owner[ring_id] = workload->vgpu; - } else - gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", - ring_id, workload->vgpu->id); - spin_unlock_bh(&scheduler->mmio_context_lock); atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: atomic_set(&workload->shadow_ctx_active, 0); break; default: - WARN_ON(1); return NOTIFY_OK; } wake_up(&workload->shadow_ctx_status_wq); @@ -201,6 +271,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, ce->lrc_desc = desc; } +static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + void *shadow_ring_buffer_va; + u32 *cs; + + /* allocate shadow ring buffer */ + cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); + if (IS_ERR(cs)) { + gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", + workload->rb_len); + return PTR_ERR(cs); + } + + shadow_ring_buffer_va = workload->shadow_ring_buffer_va; + + /* get shadow ring buffer va */ + workload->shadow_ring_buffer_va = cs; + + memcpy(cs, shadow_ring_buffer_va, + workload->rb_len); + + cs += workload->rb_len / sizeof(u32); + intel_ring_advance(workload->req, cs); + + return 0; +} + +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +{ + if (!wa_ctx->indirect_ctx.obj) + return; + + i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); + i915_gem_object_put(wa_ctx->indirect_ctx.obj); +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -214,8 +321,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; struct intel_vgpu *vgpu = workload->vgpu; + struct intel_ring *ring; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); @@ -231,46 +340,121 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) shadow_context_descriptor_update(shadow_ctx, dev_priv->engine[ring_id]); + ret = intel_gvt_scan_and_shadow_ringbuffer(workload); + if (ret) + goto err_scan; + + if ((workload->ring_id == RCS) && + (workload->wa_ctx.indirect_ctx.size != 0) + && gvt_shadow_wa_ctx) { + ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); + if (ret) + goto err_scan; + } + + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ring = engine->context_pin(engine, shadow_ctx); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + gvt_vgpu_err("fail to pin shadow context\n"); + goto err_shadow; + } + + ret = populate_shadow_context(workload); + if (ret) + goto err_unpin; + rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); ret = PTR_ERR(rq); - goto out; + goto err_unpin; } gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); workload->req = i915_gem_request_get(rq); - ret = intel_gvt_scan_and_shadow_ringbuffer(workload); - if (ret) - goto out; - - if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { - ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); - if (ret) - goto out; + /* we consider this as an workaround to avoid the situation that + * PDP's not updated, and right now we only limit it to BXT platform + * since it's not reported on the other platforms + */ + if (IS_BROXTON(vgpu->gvt->dev_priv)) { + ret = gvt_emit_pdps(workload); + if (ret) { + i915_gem_request_put(rq); + workload->req = NULL; + goto err_unpin; + } } - ret = populate_shadow_context(workload); + ret = copy_workload_to_ring_buffer(workload); if (ret) - goto out; - + goto err_unpin; workload->shadowed = true; + return 0; -out: +err_unpin: + engine->context_unpin(engine, shadow_ctx); +err_shadow: + release_shadow_wa_ctx(&workload->wa_ctx); +err_scan: return ret; } +static void gen8_shadow_pid_cid(struct intel_vgpu_workload *workload) +{ + int ring_id = workload->ring_id; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + u32 *cs; + + /* Copy the PID and CID from the guest's HWS page to the host's one */ + cs = intel_ring_begin(workload->req, 16); + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << GTT_PAGE_SHIFT) + I915_GEM_HWS_PID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_PID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << GTT_PAGE_SHIFT) + I915_GEM_HWS_CID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_CID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + intel_ring_advance(workload->req, cs); +} + +static int sanitize_priority(int priority) +{ + if (priority > I915_CONTEXT_MAX_USER_PRIORITY) + return I915_CONTEXT_MAX_USER_PRIORITY; + else if (priority < I915_CONTEXT_MIN_USER_PRIORITY) + return I915_CONTEXT_MIN_USER_PRIORITY; + return priority; +} + static int dispatch_workload(struct intel_vgpu_workload *workload) { int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct intel_vgpu *vgpu = workload->vgpu; - struct intel_ring *ring; + struct intel_vgpu *vgpu = workload->vgpu; + struct intel_ring *ring; int ret = 0; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", @@ -279,11 +463,22 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); + + if (i915_modparams.enable_conformance_check + && intel_gvt_vgpu_conformance_check(vgpu, ring_id)) + gvt_err("vgpu%d unconformance guest detected\n", vgpu->id); + if (ret) goto out; + gen8_shadow_pid_cid(workload); + if (workload->prepare) { + mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_lock(&vgpu->gvt->lock); + mutex_lock(&dev_priv->drm.struct_mutex); ret = workload->prepare(workload); + mutex_unlock(&vgpu->gvt->lock); if (ret) goto out; } @@ -302,6 +497,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) goto out; } + workload->guilty_count = atomic_read(&workload->req->ctx->guilty_count); out: if (ret) workload->status = ret; @@ -309,6 +505,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (!IS_ERR_OR_NULL(workload->req)) { gvt_dbg_sched("ring id %d submit workload to i915 %p\n", ring_id, workload->req); + shadow_ctx->priority = i915_modparams.gvt_workload_priority = + sanitize_priority(i915_modparams.gvt_workload_priority); i915_add_request(workload->req); workload->dispatched = true; } @@ -323,24 +521,27 @@ static struct intel_vgpu_workload *pick_next_workload( struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); /* * no current vgpu / will be scheduled out / no workload * bail out */ - if (!scheduler->current_vgpu) { + if (!scheduler->current_vgpu[ring_id]) { gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); goto out; } - if (scheduler->need_reschedule) { + if (scheduler->need_reschedule[ring_id]) { gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) + if (list_empty(workload_q_head(scheduler->current_vgpu[ring_id], ring_id))) { + gvt_dbg_sched("ring id %d stop - no available workload\n", + ring_id); goto out; + } /* * still have current workload, maybe the workload disptacher @@ -360,7 +561,7 @@ static struct intel_vgpu_workload *pick_next_workload( * schedule out a vgpu. */ scheduler->current_workload[ring_id] = container_of( - workload_q_head(scheduler->current_vgpu, ring_id)->next, + workload_q_head(scheduler->current_vgpu[ring_id], ring_id)->next, struct intel_vgpu_workload, list); workload = scheduler->current_workload[ring_id]; @@ -369,7 +570,7 @@ static struct intel_vgpu_workload *pick_next_workload( atomic_inc(&workload->vgpu->running_workload_num); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); return workload; } @@ -390,32 +591,33 @@ static void update_guest_context(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, workload->ctx_desc.lrca); - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; - - context_page_num = context_page_num >> PAGE_SHIFT; + if (!enable_lazy_shadow_ctx) { + context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) - context_page_num = 19; + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + context_page_num = 19; - i = 2; + i = 2; - while (i < context_page_num) { - context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + while (i < context_page_num) { + context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, (u32)((workload->ctx_desc.lrca + i) << GTT_PAGE_SHIFT)); - if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid guest context descriptor\n"); - return; + if (context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("invalid guest context descriptor\n"); + return; + } + + page = i915_gem_object_get_page(ctx_obj, + LRC_HEADER_PAGES + i); + src = kmap(page); + intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, + GTT_PAGE_SIZE); + kunmap(page); + i++; } - - page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); - src = kmap(page); - intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, - GTT_PAGE_SIZE); - kunmap(page); - i++; } - intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); @@ -448,7 +650,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) struct intel_vgpu *vgpu; int event; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); workload = scheduler->current_workload[ring_id]; vgpu = workload->vgpu; @@ -483,9 +685,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ENGINE_MASK(ring_id))) { update_guest_context(workload); + mutex_lock(&gvt->lock); for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) intel_vgpu_trigger_virtual_event(vgpu, event); + mutex_unlock(&gvt->lock); } mutex_lock(&dev_priv->drm.struct_mutex); /* unpin shadow ctx as the shadow_ctx update is done */ @@ -498,7 +702,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; + mutex_lock(&gvt->lock); list_del_init(&workload->list); + if (workload->status == -EIO) + intel_vgpu_reset_execlist(vgpu, 1 << ring_id); + workload->complete(workload); atomic_dec(&vgpu->running_workload_num); @@ -508,6 +716,19 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); +} + +static void inject_error_cs_irq(struct intel_vgpu *vgpu, int ring_id) +{ + enum intel_gvt_event_type events[] = { + RCS_CMD_STREAMER_ERR, + BCS_CMD_STREAMER_ERR, + VCS_CMD_STREAMER_ERR, + VCS2_CMD_STREAMER_ERR, + VECS_CMD_STREAMER_ERR, + }; + intel_vgpu_trigger_virtual_event(vgpu, events[ring_id]); } struct workload_thread_param { @@ -524,8 +745,11 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; + long lret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv); + DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); @@ -559,9 +783,9 @@ static int workload_thread(void *priv) intel_uncore_forcewake_get(gvt->dev_priv, FORCEWAKE_ALL); - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); ret = dispatch_workload(workload); - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); if (ret) { vgpu = workload->vgpu; @@ -571,7 +795,24 @@ static int workload_thread(void *priv) gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); - i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); + lret = i915_wait_request(workload->req, 0, + MAX_SCHEDULE_TIMEOUT); + + gvt_dbg_sched("i915_wait_request %p returns %ld\n", + workload, lret); + if (lret >= 0 && workload->status == -EINPROGRESS) + workload->status = 0; + + /* + * increased guilty_count means that this request triggerred + * a GPU reset, so we need to notify the guest about the + * hang. + */ + if (workload->guilty_count < + atomic_read(&workload->req->ctx->guilty_count)) { + workload->status = -EIO; + inject_error_cs_irq(workload->vgpu, ring_id); + } complete: gvt_dbg_sched("will complete workload %p, status: %d\n", @@ -610,9 +851,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) gvt_dbg_core("clean workload scheduler\n"); for_each_engine(engine, gvt->dev_priv, i) { - atomic_notifier_chain_unregister( - &engine->context_status_notifier, - &gvt->shadow_ctx_notifier_block[i]); kthread_stop(scheduler->thread[i]); } } @@ -649,11 +887,13 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) goto err; } - gvt->shadow_ctx_notifier_block[i].notifier_call = - shadow_context_status_change; - atomic_notifier_chain_register(&engine->context_status_notifier, - &gvt->shadow_ctx_notifier_block[i]); + + gvt->shadow_ctx_notifier_block[i].notifier_call = + shadow_context_status_change; + atomic_notifier_chain_register(&engine->context_status_notifier, + &gvt->shadow_ctx_notifier_block[i]); } + return 0; err: intel_gvt_clean_workload_scheduler(gvt); @@ -676,9 +916,25 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) if (IS_ERR(vgpu->shadow_ctx)) return PTR_ERR(vgpu->shadow_ctx); + if (!vgpu->shadow_ctx->name) { + vgpu->shadow_ctx->name = kasprintf(GFP_KERNEL, "Shadow Context %d", vgpu->id); + } + vgpu->shadow_ctx->engine[RCS].initialised = true; bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES); return 0; } + +/** + * intel_vgpu_queue_workload - Qeue a vGPU workload + * @workload: the workload to queue in + */ +void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) +{ + list_add_tail(&workload->list, + workload_q_head(workload->vgpu, workload->ring_id)); + intel_gvt_kick_schedule(workload->vgpu->gvt); + wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]); +} diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 93a49eb0209e..e11862c9b99d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -37,10 +37,10 @@ #define _GVT_SCHEDULER_H_ struct intel_gvt_workload_scheduler { - struct intel_vgpu *current_vgpu; - struct intel_vgpu *next_vgpu; + struct intel_vgpu *current_vgpu[I915_NUM_ENGINES]; + struct intel_vgpu *next_vgpu[I915_NUM_ENGINES]; struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; - bool need_reschedule; + bool need_reschedule[I915_NUM_ENGINES]; spinlock_t mmio_context_lock; /* can be null when owner is host */ @@ -85,6 +85,7 @@ struct intel_vgpu_workload { bool dispatched; bool shadowed; int status; + unsigned int guilty_count; struct intel_vgpu_mm *shadow_mm; @@ -110,6 +111,10 @@ struct intel_vgpu_workload { /* shadow batch buffer */ struct list_head shadow_bb; struct intel_shadow_wa_ctx wa_ctx; + + /* oa registers */ + u32 oactxctrl; + u32 flex_mmio[7]; }; /* Intel shadow batch buffer is a i915 gem object */ @@ -124,12 +129,7 @@ struct intel_shadow_bb_entry { #define workload_q_head(vgpu, ring_id) \ (&(vgpu->workload_q_head[ring_id])) -#define queue_workload(workload) do { \ - list_add_tail(&workload->list, \ - workload_q_head(workload->vgpu, workload->ring_id)); \ - wake_up(&workload->vgpu->gvt-> \ - scheduler.waitq[workload->ring_id]); \ -} while (0) +void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt); @@ -141,4 +141,8 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu); void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); + +extern bool gvt_shadow_wa_ctx; + #endif diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 02c61a1ad56a..f5802413591a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -37,6 +37,11 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) { + enum pipe pipe; + int scaler; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; + /* setup the ballooning information */ vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; @@ -55,6 +60,16 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); + vgpu_vreg(vgpu, vgtif_reg(enable_pvmmio)) = 0; + + vgpu_vreg(vgpu, vgtif_reg(scaler_owned)) = 0; + for_each_pipe(dev_priv, pipe) + for_each_universal_scaler(dev_priv, pipe, scaler) + if (gvt->pipe_info[pipe].scaler_owner[scaler] == + vgpu->id) + vgpu_vreg(vgpu, vgtif_reg(scaler_owned)) |= + 1 << (pipe * SKL_NUM_SCALERS + scaler); + gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); @@ -222,19 +237,25 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&gvt->lock); vgpu->active = false; + idr_remove(&gvt->vgpu_idr, vgpu->id); + if (atomic_read(&vgpu->running_workload_num)) { mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&gvt->lock); } intel_vgpu_stop_schedule(vgpu); mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); } /** @@ -384,6 +405,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_shadow_ctx; + vgpu->active = true; + mutex_unlock(&gvt->lock); return vgpu; @@ -448,6 +471,8 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, return vgpu; } +#define _vgtif_reg(x) \ + (VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)) /** * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset * @vgpu: virtual GPU @@ -482,6 +507,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; + bool enable_pvmmio = vgpu_vreg(vgpu, _vgtif_reg(enable_pvmmio)); gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", @@ -494,9 +520,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, * The current_vgpu will set to NULL after stopping the * scheduler when the reset is triggered by current vgpu. */ - if (scheduler->current_vgpu == NULL) { + if (scheduler->current_vgpu[0] == NULL) { mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&gvt->lock); } @@ -513,6 +541,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); + vgpu_vreg(vgpu, _vgtif_reg(enable_pvmmio)) = enable_pvmmio; intel_vgpu_reset_display(vgpu); if (dmlr) { @@ -537,7 +566,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->sched_lock); mutex_lock(&vgpu->gvt->lock); intel_gvt_reset_vgpu_locked(vgpu, true, 0); mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e4d4b6b41e26..fd52e6f6b936 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -42,13 +42,15 @@ static __always_inline void seq_print_param(struct seq_file *m, const void *x) { if (!__builtin_strcmp(type, "bool")) - seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); + seq_printf(m, "i915_modparams.%s=%s\n", name, yesno(*(const bool *)x)); else if (!__builtin_strcmp(type, "int")) - seq_printf(m, "i915.%s=%d\n", name, *(const int *)x); + seq_printf(m, "i915_modparams.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) - seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); + seq_printf(m, "i915_modparams.%s=%u\n", name, *(const unsigned int *)x); + else if (!__builtin_strcmp(type, "unsigned long long")) + seq_printf(m, "i915_modparams.%s=%llu\n", name, *(const unsigned long long *)x); else if (!__builtin_strcmp(type, "char *")) - seq_printf(m, "i915.%s=%s\n", name, *(const char **)x); + seq_printf(m, "i915_modparams.%s=%s\n", name, *(const char **)x); else BUILD_BUG(); } @@ -67,7 +69,7 @@ static int i915_capabilities(struct seq_file *m, void *data) #undef PRINT_FLAG kernel_param_lock(THIS_MODULE); -#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x); +#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915_modparams.x); I915_PARAMS_FOR_EACH(PRINT_PARAM); #undef PRINT_PARAM kernel_param_unlock(THIS_MODULE); @@ -1043,7 +1045,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 freq_sts; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); @@ -1066,7 +1068,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } else if (INTEL_GEN(dev_priv) >= 6) { u32 rp_state_limits; u32 gt_perf_status; @@ -1267,7 +1269,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) seq_puts(m, "struct_mutex blocked for reset\n"); - if (!i915.enable_hangcheck) { + if (!i915_modparams.enable_hangcheck) { seq_puts(m, "Hangcheck disabled\n"); return 0; } @@ -1422,6 +1424,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) struct intel_uncore_forcewake_domain *fw_domain; unsigned int tmp; + seq_printf(m, "user.bypass_count = %u\n", + i915->uncore.user_forcewake.count); + for_each_fw_domain(fw_domain, i915, tmp) seq_printf(m, "%s.wake_count = %u\n", intel_uncore_forcewake_domain_to_str(fw_domain->id), @@ -1502,9 +1507,9 @@ static int gen6_drpc_info(struct seq_file *m) gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); } - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); @@ -1699,7 +1704,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); seq_printf(m, "Enabled by kernel parameter: %s\n", - yesno(i915.enable_ips)); + yesno(i915_modparams.enable_ips)); if (INTEL_GEN(dev_priv) >= 8) { seq_puts(m, "Currently: unknown\n"); @@ -1786,7 +1791,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) goto out; @@ -1817,7 +1822,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ((ia_freq >> 8) & 0xff) * 100); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); out: intel_runtime_pm_put(dev_priv); @@ -1910,6 +1915,49 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) ring->space, ring->head, ring->tail); } +bool is_shadow_context(struct i915_gem_context *ctx) +{ + if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) + return true; + + return false; +} + +int get_vgt_id(struct i915_gem_context *ctx) +{ + int vgt_id; + + vgt_id = 0; + + if (is_shadow_context(ctx)) + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + + return vgt_id; +} + +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + int pid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + return pid; +} + +static void describe_ctx_ring_shadowed(struct seq_file *m, + struct i915_gem_context *ctx, struct intel_ring *ring, + struct intel_engine_cs *engine) +{ + int pid, cid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + cid = intel_read_status_page(engine, I915_GEM_HWS_CID_INDEX + vgt_id); + seq_printf(m, " (Current DomU Process PID: %d, CID: %d)", + pid, cid); +} + static int i915_context_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1924,6 +1972,7 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + bool is_shadow_context = false; seq_printf(m, "HW context %u ", ctx->hw_id); if (ctx->pid) { struct task_struct *task; @@ -1934,6 +1983,9 @@ static int i915_context_status(struct seq_file *m, void *unused) task->comm, task->pid); put_task_struct(task); } + } else if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) { + seq_puts(m, "DomU Shadow Context "); + is_shadow_context = true; } else if (IS_ERR(ctx->file_priv)) { seq_puts(m, "(deleted) "); } else { @@ -1945,13 +1997,22 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) { struct intel_context *ce = &ctx->engine[engine->id]; + u64 lrc_desc = intel_lr_context_descriptor(ctx, + engine); seq_printf(m, "%s: ", engine->name); + seq_printf(m, "ctx id 0x%x ", (uint32_t)((lrc_desc >> 12) & + 0xFFFFF)); seq_putc(m, ce->initialised ? 'I' : 'i'); if (ce->state) describe_obj(m, ce->state->obj); - if (ce->ring) + if (ce->ring) { describe_ctx_ring(m, ce->ring); + if(is_shadow_context) + describe_ctx_ring_shadowed(m, ctx, + ce->ring, engine); + } + seq_putc(m, '\n'); } @@ -2014,7 +2075,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) enum intel_engine_id id; int ret; - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { seq_printf(m, "Logical Ring Contexts are disabled\n"); return 0; } @@ -2443,12 +2504,8 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", client->priority, client->stage_id, client->proc_desc_offset); - seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", - client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); - seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", - client->wq_size, client->wq_offset, client->wq_tail); - - seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); + seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", + client->doorbell_id, client->doorbell_offset); for_each_engine(engine, dev_priv, id) { u64 submissions = client->submissions[id]; @@ -2594,7 +2651,7 @@ static int i915_guc_log_control_get(void *data, u64 *val) if (!dev_priv->guc.log.vma) return -EINVAL; - *val = i915.guc_log_level; + *val = i915_modparams.guc_log_level; return 0; } @@ -2951,15 +3008,23 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder; - struct drm_plane_state *plane_state = crtc->primary->state; - struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *plane_state; + struct drm_framebuffer *fb; + + if (!crtc->primary) { + seq_puts(m, "\tno primary plane\n"); + } else { + plane_state = crtc->primary->state; + fb = plane_state->fb; - if (fb) - seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", + if (fb) + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", fb->base.id, plane_state->src_x >> 16, plane_state->src_y >> 16, fb->width, fb->height); - else - seq_puts(m, "\tprimary plane disabled\n"); + else + seq_puts(m, "\tprimary plane disabled\n"); + } + for_each_encoder_on_crtc(dev, crtc, intel_encoder) intel_encoder_info(m, intel_crtc, intel_encoder); } @@ -3204,16 +3269,29 @@ static int i915_display_info(struct seq_file *m, void *unused) intel_crtc_info(m, crtc); - seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", - yesno(cursor->base.state->visible), - cursor->base.state->crtc_x, - cursor->base.state->crtc_y, - cursor->base.state->crtc_w, - cursor->base.state->crtc_h, - cursor->cursor.base); + if (cursor) { + seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", + yesno(cursor->base.state->visible), + cursor->base.state->crtc_x, + cursor->base.state->crtc_y, + cursor->base.state->crtc_w, + cursor->base.state->crtc_h, + cursor->cursor.base); + } else { + seq_puts(m, "\tNo cursor plane available on this platform\n"); + } + intel_scaler_info(m, crtc); intel_plane_info(m, crtc); } + if (INTEL_GEN(dev_priv) >= 9 && pipe_config->base.active) { + struct drm_rgba background = pipe_config->base.background_color; + + seq_printf(m, "\tbackground color (10bpc): r=%x g=%x b=%x\n", + DRM_RGBA_REDBITS(background, 10), + DRM_RGBA_GREENBITS(background, 10), + DRM_RGBA_BLUEBITS(background, 10)); + } seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", yesno(!crtc->cpu_fifo_underrun_disabled), @@ -3312,7 +3390,9 @@ static int i915_engine_info(struct seq_file *m, void *unused) seq_printf(m, "\tBBADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); - if (i915.enable_execlists) { + if (i915_modparams.enable_execlists) { + const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + struct intel_engine_execlists * const execlists = &engine->execlists; u32 ptr, read, write; unsigned int idx; @@ -3323,8 +3403,10 @@ static int i915_engine_info(struct seq_file *m, void *unused) ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); - seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n", - read, write, + seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", + read, execlists->csb_head, + write, + intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), yesno(test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))); if (read >= GEN8_CSB_ENTRIES) @@ -3335,18 +3417,19 @@ static int i915_engine_info(struct seq_file *m, void *unused) write += GEN8_CSB_ENTRIES; while (read < write) { idx = ++read % GEN8_CSB_ENTRIES; - seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", + seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", idx, I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); + hws[idx * 2], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), + hws[idx * 2 + 1]); } rcu_read_lock(); - for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { + for (idx = 0; idx < execlists_num_ports(execlists); idx++) { unsigned int count; - rq = port_unpack(&engine->execlist_port[idx], - &count); + rq = port_unpack(&execlists->port[idx], &count); if (rq) { seq_printf(m, "\t\tELSP[%d] count=%d, ", idx, count); @@ -3359,7 +3442,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) rcu_read_unlock(); spin_lock_irq(&engine->timeline->lock); - for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ + for (rb = execlists->first; rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); @@ -3403,7 +3486,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) enum intel_engine_id id; int j, ret; - if (!i915.semaphores) { + if (!i915_modparams.semaphores) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -3549,11 +3632,18 @@ static int i915_ddb_info(struct seq_file *m, void *unused) seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, entry->start, entry->end, skl_ddb_entry_size(entry)); + entry = &ddb->y_plane[pipe][plane]; + seq_printf(m, " YPlane%-8d%8u%8u%8u\n", plane + 1, + entry->start, entry->end, + skl_ddb_entry_size(entry)); } entry = &ddb->plane[pipe][PLANE_CURSOR]; seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, entry->end, skl_ddb_entry_size(entry)); + entry = &ddb->y_plane[pipe][PLANE_CURSOR]; + seq_printf(m, " Y%-13s%8u%8u%8u\n", "Cursor", entry->start, + entry->end, skl_ddb_entry_size(entry)); } drm_modeset_unlock_all(dev); @@ -4153,6 +4243,9 @@ i915_wedged_set(void *data, u64 val) struct intel_engine_cs *engine; unsigned int tmp; + if (intel_vgpu_active(i915)) + return -EINVAL; + /* * There is no safeguard against this debugfs entry colliding * with the hangcheck calling same i915_handle_error() in @@ -4169,7 +4262,8 @@ i915_wedged_set(void *data, u64 val) engine->hangcheck.stalled = true; } - i915_handle_error(i915, val, "Manually setting wedged to %llu", val); + i915_handle_error(i915, val, I915_ERROR_CAPTURE, + "Manually set wedged engine mask = %llx", val); wait_on_bit(&i915->gpu_error.flags, I915_RESET_HANDOFF, @@ -4353,7 +4447,7 @@ i915_max_freq_set(void *data, u64 val) DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) return ret; @@ -4366,7 +4460,7 @@ i915_max_freq_set(void *data, u64 val) hw_min = dev_priv->rps.min_freq; if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return -EINVAL; } @@ -4375,7 +4469,7 @@ i915_max_freq_set(void *data, u64 val) if (intel_set_rps(dev_priv, val)) DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return 0; } @@ -4408,7 +4502,7 @@ i915_min_freq_set(void *data, u64 val) DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); if (ret) return ret; @@ -4422,7 +4516,7 @@ i915_min_freq_set(void *data, u64 val) if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return -EINVAL; } @@ -4431,7 +4525,7 @@ i915_min_freq_set(void *data, u64 val) if (intel_set_rps(dev_priv, val)) DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return 0; } @@ -4674,26 +4768,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct drm_i915_private *i915 = inode->i_private; - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return 0; - intel_runtime_pm_get(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_get(i915); + intel_uncore_forcewake_user_get(i915); return 0; } static int i915_forcewake_release(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct drm_i915_private *i915 = inode->i_private; - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return 0; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_uncore_forcewake_user_put(i915); + intel_runtime_pm_put(i915); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9f45cfeae775..09a58ee253cc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,6 +49,7 @@ #include "i915_drv.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_uc.h" #include "intel_drv.h" #include "intel_uc.h" @@ -58,12 +59,12 @@ static unsigned int i915_load_fail_count; bool __i915_inject_load_failure(const char *func, int line) { - if (i915_load_fail_count >= i915.inject_load_failure) + if (i915_load_fail_count >= i915_modparams.inject_load_failure) return false; - if (++i915_load_fail_count == i915.inject_load_failure) { + if (++i915_load_fail_count == i915_modparams.inject_load_failure) { DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", - i915.inject_load_failure, func, line); + i915_modparams.inject_load_failure, func, line); return true; } @@ -106,8 +107,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, static bool i915_error_injected(struct drm_i915_private *dev_priv) { - return i915.inject_load_failure && - i915_load_fail_count == i915.inject_load_failure; + return i915_modparams.inject_load_failure && + i915_load_fail_count == i915_modparams.inject_load_failure; } #define i915_load_error(dev_priv, fmt, ...) \ @@ -146,6 +147,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { ret = PCH_CNP; DRM_DEBUG_KMS("Assuming CannonPoint PCH\n"); + } else if (IS_BROXTON(dev_priv)) { + ret = PCH_NONE; + DRM_DEBUG_KMS("Assuming None PCH for BXT\n"); } return ret; @@ -320,7 +324,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = USES_PPGTT(dev_priv); break; case I915_PARAM_HAS_SEMAPHORES: - value = i915.semaphores; + value = i915_modparams.semaphores; break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); @@ -339,7 +343,8 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); + value = i915_modparams.enable_hangcheck && + intel_has_gpu_reset(dev_priv); if (value && intel_has_reset_engine(dev_priv)) value = 2; break; @@ -365,9 +370,18 @@ static int i915_getparam(struct drm_device *dev, void *data, value = i915_gem_mmap_gtt_version(); break; case I915_PARAM_HAS_SCHEDULER: - value = dev_priv->engine[RCS] && - dev_priv->engine[RCS]->schedule; + value = 0; + if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { + value |= I915_SCHEDULER_CAP_ENABLED; + value |= I915_SCHEDULER_CAP_PRIORITY; + + if (INTEL_INFO(dev_priv)->has_logical_ring_preemption && + i915_modparams.enable_execlists && + !i915_modparams.enable_guc_submission) + value |= I915_SCHEDULER_CAP_PREEMPTION; + } break; + case I915_PARAM_MMAP_VERSION: /* Remember to bump this if the version changes! */ case I915_PARAM_HAS_GEM: @@ -659,7 +673,12 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_irq; - intel_uc_init_fw(dev_priv); + /* + * ANDROID: we cannot attempt to load the fw here, the filesystem + * where our bin files are located won't be mounted until much + * later. + * intel_uc_init_fw(dev_priv); + */ ret = i915_gem_init(dev_priv); if (ret) @@ -670,9 +689,11 @@ static int i915_load_modeset_init(struct drm_device *dev) if (INTEL_INFO(dev_priv)->num_pipes == 0) return 0; - ret = intel_fbdev_init(dev); - if (ret) - goto cleanup_gem; + if (!i915_modparams.enable_initial_modeset) { + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_gem; + } /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(dev_priv); @@ -872,6 +893,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, device_info->gen_mask = BIT(device_info->gen - 1); spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->shared_page_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); @@ -890,6 +912,14 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, if (ret < 0) goto err_engines; + if (IS_BROXTON(dev_priv)) { + struct intel_device_info *info = mkwrite_device_info(dev_priv); + + info->num_sprites[PIPE_A] = 2; + info->num_sprites[PIPE_B] = 2; + info->num_sprites[PIPE_C] = 1; + } + /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@ -974,6 +1004,9 @@ static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) intel_teardown_mchbar(dev_priv); pci_iounmap(pdev, dev_priv->regs); + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(pdev, dev_priv->shared_page); + } /** @@ -1001,6 +1034,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) intel_uncore_init(dev_priv); + if (intel_vgpu_active(dev_priv) && i915_modparams.enable_pvmmio) { + u32 bar = 0; + u32 mmio_size = 2 * 1024 * 1024; + + /* Map a share page from the end of 2M mmio region in bar0. */ + dev_priv->shared_page = (struct gvt_shared_page *) + pci_iomap_range(dev_priv->drm.pdev, bar, + mmio_size, PAGE_SIZE); + if (dev_priv->shared_page == NULL) { + ret = -EIO; + DRM_ERROR("ivi: failed to map share page.\n"); + goto err_uncore; + } + } + ret = intel_engines_init_mmio(dev_priv); if (ret) goto err_uncore; @@ -1010,6 +1058,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) return 0; err_uncore: + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(dev_priv->drm.pdev, dev_priv->shared_page); intel_uncore_fini(dev_priv); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1030,22 +1080,25 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) static void intel_sanitize_options(struct drm_i915_private *dev_priv) { - i915.enable_execlists = + i915_modparams.enable_execlists = intel_sanitize_enable_execlists(dev_priv, - i915.enable_execlists); + i915_modparams.enable_execlists); /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the + * i915_modparams.enable_ppgtt is read-only, so do an early pass to validate the * user's requested state against the hardware/driver capabilities. We * do this now so that we can print out any log messages once rather * than every time we check intel_enable_ppgtt(). */ - i915.enable_ppgtt = - intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); + i915_modparams.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, + i915_modparams.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); - i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); - DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); + i915_modparams.semaphores = + intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores); + DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", + yesno(i915_modparams.semaphores)); intel_uc_sanitize_options(dev_priv); @@ -1206,7 +1259,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Reveal our presence to userspace */ if (drm_dev_register(dev, 0) == 0) { i915_debugfs_register(dev_priv); - i915_guc_log_register(dev_priv); + /* ANDROID: we defered the guc log registration */ + if (dev_priv->contexts_ready) + i915_guc_log_register(dev_priv); i915_setup_sysfs(dev_priv); /* Depends on sysfs having been initialized */ @@ -1232,7 +1287,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * irqs are fully enabled. We do it last so that the async config * cannot run before the connectors are registered. */ - intel_fbdev_initial_config_async(dev); + if (i915_modparams.enable_initial_modeset) + intel_initial_mode_config_init(dev); + else + intel_fbdev_initial_config_async(dev); } /** @@ -1257,6 +1315,59 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_gem_shrinker_cleanup(dev_priv); } +static inline int get_max_avail_pipes(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + int index = 0; + + if (!intel_vgpu_active(dev_priv) || + !i915_modparams.avail_planes_per_pipe) + return INTEL_INFO(dev_priv)->num_pipes; + + for_each_pipe(dev_priv, pipe) { + if (AVAIL_PLANE_PER_PIPE(dev_priv, i915_modparams.avail_planes_per_pipe, + pipe)) + index++; + } + + return index; +} + +#ifdef CONFIG_DRM_I915_LOAD_ASYNC_SUPPORT + +static int i915_load_finished; +static DECLARE_WAIT_QUEUE_HEAD(i915_load_queue); + +#include +struct drm_i915_load_para { + struct pci_dev *dev; + struct pci_device_id *ent; +}; + +static int drm_i915_load_fn(void *arg) +{ + struct drm_i915_load_para *para = (struct drm_i915_load_para *)arg; + struct pci_dev *dev = para->dev; + struct pci_device_id *ent = para->ent; + int ret = i915_driver_load(dev, ent); + i915_load_finished = 1; + wake_up(&i915_load_queue); + return ret; +} + +int i915_driver_load_async(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_i915_load_para *para; + para = (struct drm_i915_load_para *)kmalloc(sizeof(struct drm_i915_load_para), GFP_KERNEL); + if (para == NULL) + return ERR_PTR(-ENOMEM); + para->dev = pdev; + para->ent = ent; + kthread_run(drm_i915_load_fn, (void *)para, "drm_i915_load_thread"); + return 0; +} +#endif + /** * i915_driver_load - setup chip and create an initial config * @pdev: PCI device @@ -1274,9 +1385,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *)ent->driver_data; struct drm_i915_private *dev_priv; int ret; + int num_crtcs = 0; /* Enable nuclear pageflip on ILK+ */ - if (!i915.nuclear_pageflip && match_info->gen < 5) + if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) driver.driver_features &= ~DRIVER_ATOMIC; ret = -ENOMEM; @@ -1325,9 +1437,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) * of the i915_driver_init_/i915_driver_register functions according * to the role/effect of the given init step. */ - if (INTEL_INFO(dev_priv)->num_pipes) { - ret = drm_vblank_init(&dev_priv->drm, - INTEL_INFO(dev_priv)->num_pipes); + num_crtcs = get_max_avail_pipes(dev_priv); + if (num_crtcs) { + ret = drm_vblank_init(&dev_priv->drm, num_crtcs); if (ret) goto out_cleanup_hw; } @@ -1340,7 +1452,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) intel_runtime_pm_enable(dev_priv); - dev_priv->ipc_enabled = false; + intel_init_ipc(dev_priv); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) DRM_INFO("DRM_I915_DEBUG enabled\n"); @@ -1375,6 +1487,11 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_unregister(dev_priv); + if (!i915_modparams.enable_initial_modeset) + intel_fbdev_fini(dev_priv); + else + intel_initial_mode_config_fini(dev); + if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); @@ -1435,7 +1552,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_private *i915 = to_i915(dev); int ret; - +#ifdef CONFIG_DRM_I915_LOAD_ASYNC_SUPPORT + wait_event_interruptible(i915_load_queue, i915_load_finished == 1); +#endif ret = i915_gem_open(i915, file); if (ret) return ret; @@ -1457,7 +1576,8 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) */ static void i915_driver_lastclose(struct drm_device *dev) { - intel_fbdev_restore_mode(dev); + if (!i915_modparams.enable_initial_modeset) + intel_fbdev_restore_mode(dev); vga_switcheroo_process_delayed_switch(); } @@ -1470,6 +1590,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); + kfree(file_priv->process_name); kfree(file_priv); } @@ -1693,6 +1814,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_guc_resume(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) @@ -1805,6 +1927,8 @@ static int i915_drm_resume_early(struct drm_device *dev) if (IS_GEN9_LP(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) intel_power_domains_init_hw(dev_priv, true); + else + intel_display_set_init_power(dev_priv, true); i915_gem_sanitize(dev_priv); @@ -1833,7 +1957,6 @@ static int i915_resume_switcheroo(struct drm_device *dev) /** * i915_reset - reset chip after a hang * @i915: #drm_i915_private to reset - * @flags: Instructions * * Reset the chip. Useful if a hang is detected. Marks the device as wedged * on failure. @@ -1848,7 +1971,7 @@ static int i915_resume_switcheroo(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -void i915_reset(struct drm_i915_private *i915, unsigned int flags) +void i915_reset(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; int ret; @@ -1863,8 +1986,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) if (!i915_gem_unset_wedged(i915)) goto wakeup; - if (!(flags & I915_RESET_QUIET)) - dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n"); + if (error->reason) + dev_notice(i915->drm.dev, + "Resetting chip for %s\n", error->reason); error->reset_count++; disable_irq(i915->drm.irq); @@ -1933,7 +2057,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) /** * i915_reset_engine - reset GPU engine to recover from a hang * @engine: engine to reset - * @flags: options + * @msg: reason for GPU reset; or NULL for no dev_notice() * * Reset a specific GPU engine. Useful if a hang is detected. * Returns zero on successful reset or otherwise an error code. @@ -1943,7 +2067,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) * - reset engine (which will force the engine to idle) * - re-init/configure engine */ -int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) +int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) { struct i915_gpu_error *error = &engine->i915->gpu_error; struct drm_i915_gem_request *active_request; @@ -1951,19 +2075,18 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - if (!(flags & I915_RESET_QUIET)) { - dev_notice(engine->i915->drm.dev, - "Resetting %s after gpu hang\n", engine->name); - } - error->reset_engine_count[engine->id]++; - active_request = i915_gem_reset_prepare_engine(engine); - if (IS_ERR(active_request)) { - DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n"); + if (IS_ERR_OR_NULL(active_request)) { + /* Either the previous reset failed, or we pardon the reset. */ ret = PTR_ERR(active_request); goto out; } + if (msg) + dev_notice(engine->i915->drm.dev, + "Resetting %s for %s\n", engine->name, msg); + error->reset_engine_count[engine->id]++; + ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine)); if (ret) { /* If we fail here, we expect to fallback to a global reset */ @@ -2591,6 +2714,8 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); } + intel_uncore_runtime_resume(dev_priv); + /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). @@ -2608,6 +2733,8 @@ static int intel_runtime_resume(struct device *kdev) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); + intel_enable_ipc(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); if (ret) @@ -2738,6 +2865,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GVTBUFFER, i915_gem_gvtbuffer_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_ACCESS_USERDATA, i915_gem_access_userdata_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver driver = { @@ -2752,6 +2881,7 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, + .gem_open_object = i915_gem_open_object, .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 18d9da53282b..6b950640c559 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -53,6 +53,7 @@ #include "i915_params.h" #include "i915_reg.h" +#include "i915_pvinfo.h" #include "i915_utils.h" #include "intel_uncore.h" @@ -93,7 +94,7 @@ #define I915_STATE_WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!WARN(i915.verbose_state_checks, format)) \ + if (!WARN(i915_modparams.verbose_state_checks, format)) \ DRM_ERROR(format); \ unlikely(__ret_warn_on); \ }) @@ -576,6 +577,8 @@ struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; struct drm_file *file; + char *process_name; + struct pid *tgid; struct { spinlock_t lock; @@ -595,6 +598,8 @@ struct drm_i915_file_private { unsigned int bsd_engine; + struct bin_attribute *obj_attr; + /* Client can have a maximum of 3 contexts banned before * it is denied of creating new contexts. As one context * ban needs 4 consecutive hangs, and more if there is @@ -767,6 +772,7 @@ struct intel_csr { func(has_l3_dpf); \ func(has_llc); \ func(has_logical_ring_contexts); \ + func(has_logical_ring_preemption); \ func(has_overlay); \ func(has_pipe_cxsr); \ func(has_pooled_eu); \ @@ -780,7 +786,8 @@ struct intel_csr { func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ - func(supports_tv); + func(supports_tv); \ + func(has_ipc); struct sseu_dev_info { u8 slice_mask; @@ -842,6 +849,7 @@ struct intel_device_info { u8 gen; u16 gen_mask; enum intel_platform platform; + u8 gt; /* GT number, 0 if undefined */ u8 ring_mask; /* Rings supported by the HW */ u8 num_rings; #define DEFINE_FLAG(name) u8 name:1 @@ -956,6 +964,7 @@ struct i915_gpu_state { pid_t pid; u32 handle; u32 hw_id; + int priority; int ban_score; int active; int guilty; @@ -978,11 +987,13 @@ struct i915_gpu_state { long jiffies; pid_t pid; u32 context; + int priority; int ban_score; u32 seqno; u32 head; u32 tail; - } *requests, execlist[2]; + } *requests, execlist[EXECLIST_MAX_PORTS]; + unsigned int num_ports; struct drm_i915_error_waiter { char comm[TASK_COMM_LEN]; @@ -1145,6 +1156,22 @@ struct i915_drrs { enum drrs_support_type type; }; +struct splash_screen_info { + struct list_head link; + const struct firmware *fw; + struct drm_i915_gem_object *obj; + char *connector_name; + char *image_name; + int width; + int height; + int pitch; + int crtc_x; + int crtc_y; + int crtc_w; + int crtc_h; + struct drm_framebuffer *fb; +}; + struct i915_psr { struct mutex lock; bool sink_support; @@ -1324,14 +1351,6 @@ struct intel_gen6_power_mgmt { /* manual wa residency calculations */ struct intel_rps_ei ei; - - /* - * Protects RPS/RC6 register access and PCU communication. - * Must be taken after struct_mutex if nested. Note that - * this lock may be held for long periods of time when - * talking to hw - so only take it when talking to hw! - */ - struct mutex hw_lock; }; /* defined intel_pm.c */ @@ -1499,6 +1518,8 @@ struct i915_gem_mm { spinlock_t object_stat_lock; u64 object_memory; u32 object_count; + + size_t phys_mem_total; }; struct drm_i915_error_state_buf { @@ -1597,6 +1618,9 @@ struct i915_gpu_error { /** Number of times an engine has been reset */ u32 reset_engine_count[I915_NUM_ENGINES]; + /** Reason for the current *global* reset */ + const char *reason; + /** * Waitqueue to signal when a hang is detected. Used to for waiters * to release the struct_mutex for the reset to procede. @@ -1907,6 +1931,7 @@ struct i915_workarounds { struct i915_virtual_gpu { bool active; u32 caps; + u32 scaler_owned; }; /* used in computing the new watermarks state */ @@ -2164,6 +2189,8 @@ struct drm_i915_private { const struct intel_device_info info; void __iomem *regs; + struct gvt_shared_page *shared_page; + spinlock_t shared_page_lock; struct intel_uncore uncore; @@ -2197,8 +2224,11 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; - struct i915_gem_context *kernel_context; struct intel_engine_cs *engine[I915_NUM_ENGINES]; + /* Context used internally to idle the GPU and setup initial state */ + struct i915_gem_context *kernel_context; + /* Context only to be used for injecting preemption commands */ + struct i915_gem_context *preempt_context; struct i915_vma *semaphore; struct drm_dma_handle *status_page_dmah; @@ -2235,6 +2265,8 @@ struct drm_i915_private { bool preserve_bios_swizzle; + struct kobject memtrack_kobj; + /* overlay */ struct intel_overlay *overlay; @@ -2316,6 +2348,8 @@ struct drm_i915_private { struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; #endif + struct work_struct initial_modeset_work; + /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; @@ -2351,6 +2385,14 @@ struct drm_i915_private { /* Cannot be determined by PCIID. You must always read a register. */ u32 edram_cap; + /* + * Protects RPS/RC6 register access and PCU communication. + * Must be taken after struct_mutex if nested. Note that + * this lock may be held for long periods of time when + * talking to hw - so only take it when talking to hw! + */ + struct mutex pcu_lock; + /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; @@ -2370,6 +2412,8 @@ struct drm_i915_private { struct intel_fbdev *fbdev; struct work_struct fbdev_suspend_work; + struct list_head splash_list; + struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; @@ -2392,7 +2436,13 @@ struct drm_i915_private { * This is limited in execlists to 21 bits. */ struct ida hw_ida; -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ +#ifdef CONFIG_DRM_I915_GVT + /* In case of virtualization, 3-bits of vgt-id will be added to hw_id */ +#define SIZE_CONTEXT_HW_ID (18) +#else +#define SIZE_CONTEXT_HW_ID (21) +#endif +#define MAX_CONTEXT_HW_ID (1<drm.pdev->revision) #define GEN_FOREVER (0) + +#define INTEL_GEN_MASK(s, e) ( \ + BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ + BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ + GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ + (s) != GEN_FOREVER ? (s) - 1 : 0) \ +) + /* * Returns true if Gen is in inclusive range [Start, End]. * * Use GEN_FOREVER for unbound start and or end. */ -#define IS_GEN(dev_priv, s, e) ({ \ - unsigned int __s = (s), __e = (e); \ - BUILD_BUG_ON(!__builtin_constant_p(s)); \ - BUILD_BUG_ON(!__builtin_constant_p(e)); \ - if ((__s) != GEN_FOREVER) \ - __s = (s) - 1; \ - if ((__e) == GEN_FOREVER) \ - __e = BITS_PER_LONG - 1; \ - else \ - __e = (e) - 1; \ - !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ -}) +#define IS_GEN(dev_priv, s, e) \ + (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) /* * Return true if revision is in range [since,until] inclusive. @@ -3012,9 +3062,9 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ ((dev_priv)->info.has_logical_ring_contexts) -#define USES_PPGTT(dev_priv) (i915.enable_ppgtt) -#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) -#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) +#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) +#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) +#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ @@ -3065,6 +3115,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) +#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) + /* * For now, anything with a GuC requires uCode loading, and then supports * command submission once loaded. But these are logically independent @@ -3127,6 +3179,11 @@ intel_info(const struct drm_i915_private *dev_priv) #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 +#define BITS_PER_PIPE 8 +#define AVAIL_PLANE_PER_PIPE(dev_priv, mask, pipe) \ + (((mask) >> (pipe) * BITS_PER_PIPE) & \ + ((1 << ((INTEL_INFO(dev_priv)->num_sprites[pipe]) + 1)) - 1)) + #include "i915_trace.h" static inline bool intel_vtd_active(void) @@ -3172,14 +3229,14 @@ extern const struct dev_pm_ops i915_pm_ops; extern int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); +extern int i915_driver_load_async(struct pci_dev *pdev, + const struct pci_device_id *ent); extern void i915_driver_unload(struct drm_device *dev); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); -#define I915_RESET_QUIET BIT(0) -extern void i915_reset(struct drm_i915_private *i915, unsigned int flags); -extern int i915_reset_engine(struct intel_engine_cs *engine, - unsigned int flags); +extern void i915_reset(struct drm_i915_private *i915); +extern int i915_reset_engine(struct intel_engine_cs *engine, const char *msg); extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); @@ -3210,7 +3267,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) { unsigned long delay; - if (unlikely(!i915.enable_hangcheck)) + if (unlikely(!i915_modparams.enable_hangcheck)) return; /* Don't continually defer the hangcheck so that it is always run at @@ -3223,10 +3280,12 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) &dev_priv->gpu_error.hangcheck_work, delay); } -__printf(3, 4) +__printf(4, 5) void i915_handle_error(struct drm_i915_private *dev_priv, u32 engine_mask, + unsigned long flags, const char *fmt, ...); +#define I915_ERROR_CAPTURE BIT(0) extern void intel_irq_init(struct drm_i915_private *dev_priv); extern void intel_irq_fini(struct drm_i915_private *dev_priv); @@ -3238,7 +3297,7 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) return dev_priv->gvt; } -static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) +static inline bool intel_vgpu_active(const struct drm_i915_private *dev_priv) { return dev_priv->vgpu.active; } @@ -3270,18 +3329,18 @@ ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) ilk_update_display_irq(dev_priv, bits, 0); } void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, + unsigned int crtc_index, uint32_t interrupt_mask, uint32_t enabled_irq_mask); static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + unsigned int crtc_index, uint32_t bits) { - bdw_update_pipe_irq(dev_priv, pipe, bits, bits); + bdw_update_pipe_irq(dev_priv, crtc_index, bits, bits); } static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + unsigned int crtc_index, uint32_t bits) { - bdw_update_pipe_irq(dev_priv, pipe, bits, 0); + bdw_update_pipe_irq(dev_priv, crtc_index, bits, 0); } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, @@ -3355,7 +3414,18 @@ struct drm_i915_gem_object * i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, const void *data, size_t size); void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); +struct drm_i915_gem_object *i915_gem_object_create_splash_pages( + struct drm_i915_private *dev_priv, + struct page **pages, u32 n_pages); +struct drm_i915_gem_object *i915_gem_object_create_splash( + struct drm_i915_private *dev_priv, + const u8 *ptr, u32 n_pages); void i915_gem_free_object(struct drm_gem_object *obj); +int i915_gem_open_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv); +void i915_gem_close_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv); + static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { @@ -3598,6 +3668,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); +int __must_check i915_gem_init_hw_late(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, @@ -3630,6 +3701,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); +int i915_gem_access_userdata_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); @@ -3697,6 +3771,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, uint32_t *reg_state); +int i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3771,6 +3847,18 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, unsigned int tiling, unsigned int stride); +int i915_get_pid_cmdline(struct task_struct *task, char *buffer); +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj); +void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj); +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj); +int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, bool is_map_gtt, + bool is_mutex_locked); +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev); +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid); + /* i915_debugfs.c */ #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); @@ -3788,11 +3876,16 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); +void i915_error_puts(struct drm_i915_error_state_buf *e, + const char *str); +bool i915_error_ok(struct drm_i915_error_state_buf *e); int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, const struct i915_gpu_state *gpu); int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, struct drm_i915_private *i915, size_t count, loff_t pos); +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *eb, + size_t count); static inline void i915_error_state_buf_release( struct drm_i915_error_state_buf *eb) { @@ -3867,6 +3960,10 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv); /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_i915_private *dev_priv); void i915_teardown_sysfs(struct drm_i915_private *dev_priv); +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); /* intel_lpe_audio.c */ int intel_lpe_audio_init(struct drm_i915_private *dev_priv); @@ -3881,6 +3978,7 @@ extern int intel_setup_gmbus(struct drm_i915_private *dev_priv); extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv); extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, unsigned int pin); +extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter); extern struct i2c_adapter * intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); @@ -3994,7 +4092,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct intel_display_error_state *error); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, + u32 val, int timeout_us); +#define sandybridge_pcode_write(dev_priv, mbox, val) \ + sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500) + int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); @@ -4105,7 +4207,11 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ i915_reg_t reg) \ { \ - return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + if (!intel_vgpu_active(dev_priv) || !i915_modparams.enable_pvmmio || \ + likely(!in_mmio_read_trap_list((reg).reg))) \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + dev_priv->shared_page->reg_addr = i915_mmio_reg_offset(reg); \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(vgtif_reg(pv_mmio))); \ } #define __raw_write(x, s) \ @@ -4333,11 +4439,12 @@ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); -static inline bool -intel_engine_can_store_dword(struct intel_engine_cs *engine) +static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { - return __intel_engine_can_store_dword(INTEL_GEN(engine->i915), - engine->class); + if (INTEL_GEN(i915) >= 10) + return CNL_HWS_CSB_WRITE_INDEX; + else + return I915_HWS_CSB_WRITE_INDEX; } #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dc1faa49687d..7e7c2fdd5cee 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -44,8 +44,964 @@ #include #include #include +#include +#include +#include +#include "../drm_internal.h" static void i915_gem_flush_free_objects(struct drm_i915_private *i915); +static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); + +struct per_file_obj_mem_info { + int num_obj; + int num_obj_shared; + int num_obj_private; + int num_obj_gtt_bound; + int num_obj_purged; + int num_obj_purgeable; + int num_obj_allocated; + int num_obj_fault_mappable; + int num_obj_stolen; + size_t gtt_space_allocated_shared; + size_t gtt_space_allocated_priv; + size_t phys_space_allocated_shared; + size_t phys_space_allocated_priv; + size_t phys_space_purgeable; + size_t phys_space_shared_proportion; + size_t fault_mappable_size; + size_t stolen_space_allocated; + char *process_name; +}; + +struct name_entry { + struct list_head head; + struct drm_hash_item hash_item; +}; + +struct pid_stat_entry { + struct list_head head; + struct list_head namefree; + struct drm_open_hash namelist; + struct per_file_obj_mem_info stats; + struct pid *tgid; + int pid_num; +}; + +struct drm_i915_obj_virt_addr { + struct list_head head; + unsigned long user_virt_addr; +}; + +struct drm_i915_obj_pid_info { + struct list_head head; + pid_t tgid; + int open_handle_count; + struct list_head virt_addr_head; +}; + +struct get_obj_stats_buf { + struct pid_stat_entry *entry; + struct drm_i915_error_state_buf *m; +}; + +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) +#define err_puts(e, s) i915_error_puts(e, s) + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) +{ + switch (i915_gem_object_get_tiling(obj)) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + +/* + * If this mmput call is the last one, it will tear down the mmaps of the + * process and calls drm_gem_vm_close(), which leads deadlock on i915 mutex. + * Instead, asynchronously schedule mmput function here, to avoid recursive + * calls to acquire i915_mutex. + */ +static void async_mmput_func(void *data, async_cookie_t cookie) +{ + struct mm_struct *mm = data; + mmput(mm); +} + +static void async_mmput(struct mm_struct *mm) +{ + async_schedule(async_mmput_func, mm); +} + +int i915_get_pid_cmdline(struct task_struct *task, char *buffer) +{ + int res = 0; + unsigned int len; + struct mm_struct *mm = get_task_mm(task); + + if (!mm) + goto out; + if (!mm->arg_end) + goto out_mm; + + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + res = access_process_vm(task, mm->arg_start, buffer, len, 0); + if (res < 0) { + async_mmput(mm); + return res; + } + + if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) + buffer[res-1] = '\0'; +out_mm: + async_mmput(mm); +out: + return 0; +} + +static int i915_obj_get_shmem_pages_alloced(struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->base.filp) { + struct inode *inode = file_inode(obj->base.filp); + struct shmem_inode_info *info = SHMEM_I(inode); + + if (!inode) + return 0; + spin_lock(&info->lock); + ret = inode->i_mapping->nrpages; + spin_unlock(&info->lock); + return ret; + } + return 0; +} + +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj) +{ + int found = 0; + struct drm_i915_obj_pid_info *entry; + pid_t current_tgid = task_tgid_nr(current); + + if (!i915_modparams.memtrack_debug) + return 0; + + mutex_lock(&obj->base.dev->struct_mutex); + + list_for_each_entry(entry, &obj->pid_info, head) { + if (entry->tgid == current_tgid) { + entry->open_handle_count++; + found = 1; + break; + } + } + if (found == 0) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + mutex_unlock(&obj->base.dev->struct_mutex); + return -ENOMEM; + } + entry->tgid = current_tgid; + entry->open_handle_count = 1; + INIT_LIST_HEAD(&entry->virt_addr_head); + list_add_tail(&entry->head, &obj->pid_info); + } + + mutex_unlock(&obj->base.dev->struct_mutex); + return 0; +} + +void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj) +{ + pid_t current_tgid = task_tgid_nr(current); + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + int found = 0; + + if (!i915_modparams.memtrack_debug) + return; + + mutex_lock(&obj->base.dev->struct_mutex); + + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + if (pid_entry->tgid == current_tgid) { + pid_entry->open_handle_count--; + found = 1; + if (pid_entry->open_handle_count == 0) { + list_for_each_entry_safe(virt_entry, + virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } + break; + } + } + mutex_unlock(&obj->base.dev->struct_mutex); + + if (found == 0) + DRM_DEBUG("Couldn't find matching tgid %d for obj %p\n", + current_tgid, obj); +} + +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj) +{ + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + list_for_each_entry_safe(virt_entry, + virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } +} + + int +i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, + bool is_map_gtt, + bool is_mutex_locked) +{ + struct drm_i915_obj_pid_info *pid_entry; + pid_t current_tgid = task_tgid_nr(current); + int ret = 0, found = 0; + + if (!i915_modparams.memtrack_debug) + return 0; + + if (is_map_gtt) + addr |= 1; + + if (!is_mutex_locked) { + ret = i915_mutex_lock_interruptible(obj->base.dev); + if (ret) + return ret; + } + + list_for_each_entry(pid_entry, &obj->pid_info, head) { + if (pid_entry->tgid == current_tgid) { + struct drm_i915_obj_virt_addr *virt_entry, *new_entry; + + list_for_each_entry(virt_entry, + &pid_entry->virt_addr_head, + head) { + if (virt_entry->user_virt_addr == addr) { + found = 1; + break; + } + } + if (found) + break; + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + goto out; + } + new_entry->user_virt_addr = addr; + list_add_tail(&new_entry->head, + &pid_entry->virt_addr_head); + break; + } + } + +out: + if (!is_mutex_locked) + mutex_unlock(&obj->base.dev->struct_mutex); + + return ret; +} + +static int i915_obj_virt_addr_is_invalid(struct drm_gem_object *obj, + struct pid *tgid, unsigned long addr) +{ + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + int locked, ret = 0; + + task = get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) { + DRM_DEBUG("null task for tgid=%d\n", pid_nr(tgid)); + return -EINVAL; + } + + mm = get_task_mm(task); + if (mm == NULL) { + DRM_DEBUG("null mm for tgid=%d\n", pid_nr(tgid)); + ret = -EINVAL; + goto out_task; + } + + locked = down_read_trylock(&mm->mmap_sem); + if (!locked) + goto out_mm; + + vma = find_vma(mm, addr); + if (vma) { + if (addr & 1) { /* mmap_gtt case */ + if (vma->vm_pgoff*PAGE_SIZE == (unsigned long) + drm_vma_node_offset_addr(&obj->vma_node)) + ret = 0; + else + ret = -EINVAL; + } else { /* mmap case */ + if (vma->vm_file == obj->filp) + ret = 0; + else + ret = -EINVAL; + } + } else + ret = -EINVAL; + + up_read(&mm->mmap_sem); + +out_mm: + async_mmput(mm); +out_task: + put_task_struct(task); + return ret; +} + +static void i915_obj_pidarray_validate(struct drm_gem_object *gem_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = gem_obj->dev; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_file *file; + struct drm_i915_file_private *file_priv; + struct pid *tgid; + int pid_num, present; + + /* + * Run a sanity check on pid_array. All entries in pid_array should + * be subset of the the drm filelist pid entries. + */ + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + if (pid_next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + break; + } + + present = 0; + list_for_each_entry(file, &dev->filelist, lhead) { + file_priv = file->driver_priv; + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + if (pid_num == pid_entry->tgid) { + present = 1; + break; + } + } + if (present == 0) { + DRM_DEBUG("stale_tgid=%d\n", pid_entry->tgid); + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } else { + /* Validate the virtual address list */ + struct task_struct *task = + get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) + continue; + + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + if (i915_obj_virt_addr_is_invalid(gem_obj, tgid, + virt_entry->user_virt_addr)) { + DRM_DEBUG("stale_addr=%ld\n", + virt_entry->user_virt_addr); + list_del(&virt_entry->head); + kfree(virt_entry); + } + } + put_task_struct(task); + } + } +} + +static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *found) +{ + struct drm_hash_item *hash_item; + int ret; + + ret = drm_ht_find_item(&pid_entry->namelist, + (unsigned long)&obj->base, &hash_item); + /* Not found, insert in hash */ + if (ret) { + struct name_entry *entry = + kzalloc(sizeof(*entry), GFP_NOWAIT); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + return -ENOMEM; + } + entry->hash_item.key = (unsigned long)&obj->base; + drm_ht_insert_item(&pid_entry->namelist, + &entry->hash_item); + list_add_tail(&entry->head, &pid_entry->namefree); + *found = false; + } else + *found = true; + + return 0; +} + +static int i915_obj_shared_count(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *discard) +{ + struct drm_i915_obj_pid_info *pid_info_entry; + int ret, obj_shared_count = 0; + + /* + * The object can be shared among different processes by either flink + * or dma-buf mechanism, leading to shared count more than 1. For the + * objects not shared , return the shared count as 1. + * In case of shared dma-buf objects, there's a possibility that these + * may be external to i915. Detect this condition through + * 'import_attach' field. + */ + if (!obj->base.name && !obj->base.dma_buf) + return 1; + else if(obj->base.import_attach) { + /* not our GEM obj */ + *discard = true; + return 0; + } + + ret = i915_obj_find_insert_in_hash(obj, pid_entry, discard); + if (ret) + return ret; + + list_for_each_entry(pid_info_entry, &obj->pid_info, head) + obj_shared_count++; + + if (WARN_ON(obj_shared_count == 0)) + return -EINVAL; + + return obj_shared_count; +} + + static int +i915_describe_obj(struct get_obj_stats_buf *obj_stat_buf, + struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + struct drm_i915_obj_pid_info *pid_info_entry; + struct drm_i915_obj_virt_addr *virt_entry; + struct drm_i915_error_state_buf *m = obj_stat_buf->m; + struct pid_stat_entry *pid_entry = obj_stat_buf->entry; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + + bool discard = false; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (!discard && !obj->stolen && + (obj->mm.madv != __I915_MADV_PURGED) && + (i915_obj_get_shmem_pages_alloced(obj) != 0)) { + if (obj_shared_count > 1) + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + else + stats->phys_space_allocated_priv += + obj->base.size; + } + + err_printf(m, + "%p: %7zdK %s %s %s %s %s %s ", + &obj->base, + obj->base.size / 1024, + get_tiling_flag(obj), + obj->mm.dirty ? "Y" : "N", + (obj_shared_count > 1) ? "Y" : "N", + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", + (obj->pin_display) ? "Y" : "N"); + + if (obj->mm.madv == __I915_MADV_PURGED) + err_puts(m, " purged "); + else if (obj->mm.madv == I915_MADV_DONTNEED) + err_puts(m, " purgeable "); + else if (i915_obj_get_shmem_pages_alloced(obj) != 0) + err_puts(m, " allocated "); + else + err_puts(m, " "); + + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!i915_is_ggtt(vma->vm)) + err_puts(m, " PP "); + else + err_puts(m, " G "); + err_printf(m, " %08llx ", vma->node.start); + } + if (list_empty(&obj->vma_list)) + err_puts(m, " "); + + list_for_each_entry(pid_info_entry, &obj->pid_info, head) { + err_printf(m, " (%d: %d:", + pid_info_entry->tgid, + pid_info_entry->open_handle_count); + list_for_each_entry(virt_entry, + &pid_info_entry->virt_addr_head, head) { + if (virt_entry->user_virt_addr & 1) + err_printf(m, " %p", + (void *)(virt_entry->user_virt_addr & ~1)); + else + err_printf(m, " %p*", + (void *)virt_entry->user_virt_addr); + } + err_puts(m, ") "); + } + + err_puts(m, "\n"); + + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + + static int +i915_drm_gem_obj_info(int id, void *ptr, void *data) +{ + struct drm_i915_gem_object *obj = ptr; + struct get_obj_stats_buf *obj_stat_buf = data; + int ret; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + i915_obj_pidarray_validate(&obj->base); + ret = i915_describe_obj(obj_stat_buf, obj); + + return ret; +} + +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, obj_link) + if (drm_mm_node_allocated(&vma->node)) + return true; + + return false; +} + + static int +i915_drm_gem_object_per_file_summary(int id, void *ptr, void *data) +{ + struct pid_stat_entry *pid_entry = data; + struct drm_i915_gem_object *obj = ptr; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + bool discard = false; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + + i915_obj_pidarray_validate(&obj->base); + + stats->num_obj++; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (discard) + return 0; + + if (obj_shared_count > 1) + stats->num_obj_shared++; + else + stats->num_obj_private++; + + if (i915_gem_obj_bound_any(obj)) { + stats->num_obj_gtt_bound++; + if (obj_shared_count > 1) + stats->gtt_space_allocated_shared += obj->base.size; + else + stats->gtt_space_allocated_priv += obj->base.size; + } + + if (obj->stolen) { + stats->num_obj_stolen++; + stats->stolen_space_allocated += obj->base.size; + } else if (obj->mm.madv == __I915_MADV_PURGED) { + stats->num_obj_purged++; + } else if (obj->mm.madv == I915_MADV_DONTNEED) { + stats->num_obj_purgeable++; + stats->num_obj_allocated++; + if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->phys_space_purgeable += obj->base.size; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } else + stats->phys_space_allocated_priv += + obj->base.size; + } else + WARN_ON(1); + } else if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->num_obj_allocated++; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } + else + stats->phys_space_allocated_priv += obj->base.size; + } + return 0; +} + + static int +__i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + struct drm_file *file; + struct drm_i915_private *dev_priv = dev->dev_private; + + struct name_entry *entry, *next; + struct pid_stat_entry *pid_entry, *temp_entry; + struct pid_stat_entry *new_pid_entry, *new_temp_entry; + struct list_head per_pid_stats, sorted_pid_stats; + int ret = 0; + size_t total_shared_prop_space = 0, total_priv_space = 0; + + INIT_LIST_HEAD(&per_pid_stats); + INIT_LIST_HEAD(&sorted_pid_stats); + + err_puts(m, + "\n\n pid Total Shared Priv Purgeable Alloced SharedPHYsize SharedPHYprop PrivPHYsize PurgeablePHYsize process\n"); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct pid *tgid; + struct drm_i915_file_private *file_priv = file->driver_priv; + int pid_num, found = 0; + + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + list_for_each_entry(pid_entry, &per_pid_stats, head) { + if (pid_entry->pid_num == pid_num) { + found = 1; + break; + } + } + + if (!found) { + struct pid_stat_entry *new_entry = + kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + break; + } + new_entry->tgid = tgid; + new_entry->pid_num = pid_num; + ret = drm_ht_create(&new_entry->namelist, + DRM_MAGIC_HASH_ORDER); + if (ret) { + kfree(new_entry); + break; + } + + list_add_tail(&new_entry->head, &per_pid_stats); + INIT_LIST_HEAD(&new_entry->namefree); + new_entry->stats.process_name = file_priv->process_name; + pid_entry = new_entry; + } + + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_object_per_file_summary, pid_entry); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) { + if (list_empty(&sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, &sorted_pid_stats); + continue; + } + + list_for_each_entry_safe(new_pid_entry, new_temp_entry, + &sorted_pid_stats, head) { + int prev_space = + pid_entry->stats.phys_space_shared_proportion + + pid_entry->stats.phys_space_allocated_priv; + int new_space = + new_pid_entry-> + stats.phys_space_shared_proportion + + new_pid_entry->stats.phys_space_allocated_priv; + if (prev_space > new_space) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &new_pid_entry->head); + break; + } + if (list_is_last(&new_pid_entry->head, + &sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &sorted_pid_stats); + } + } + } + + list_for_each_entry_safe(pid_entry, temp_entry, + &sorted_pid_stats, head) { + struct task_struct *task = get_pid_task(pid_entry->tgid, + PIDTYPE_PID); + err_printf(m, + "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK %14zdK %s", + pid_entry->pid_num, + pid_entry->stats.num_obj, + pid_entry->stats.num_obj_shared, + pid_entry->stats.num_obj_private, + pid_entry->stats.num_obj_purgeable, + pid_entry->stats.num_obj_allocated, + pid_entry->stats.phys_space_allocated_shared/1024, + pid_entry->stats.phys_space_shared_proportion/1024, + pid_entry->stats.phys_space_allocated_priv/1024, + pid_entry->stats.phys_space_purgeable/1024, + pid_entry->stats.process_name); + + if (task == NULL) + err_puts(m, "*\n"); + else + err_puts(m, "\n"); + + total_shared_prop_space += + pid_entry->stats.phys_space_shared_proportion/1024; + total_priv_space += + pid_entry->stats.phys_space_allocated_priv/1024; + list_del(&pid_entry->head); + + list_for_each_entry_safe(entry, next, + &pid_entry->namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry->namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry->namelist); + kfree(pid_entry); + if (task) + put_task_struct(task); + } + + err_puts(m, + "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n"); + err_printf(m, + "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n", + total_shared_prop_space, total_priv_space); + + err_printf(m, "\nTotal used GFX Shmem Physical space %8zdK\n", + dev_priv->mm.phys_mem_total/1024); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + +#define NUM_SPACES 100 +#define INITIAL_SPACES_STR(x) #x +#define SPACES_STR(x) INITIAL_SPACES_STR(x) + + static int +__i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + struct drm_file *file; + struct drm_i915_file_private *file_priv_reqd = NULL; + int bytes_copy, ret = 0; + struct pid_stat_entry pid_entry; + struct name_entry *entry, *next; + + pid_entry.stats.phys_space_shared_proportion = 0; + pid_entry.stats.phys_space_allocated_priv = 0; + pid_entry.tgid = tgid; + pid_entry.pid_num = pid_nr(tgid); + ret = drm_ht_create(&pid_entry.namelist, DRM_MAGIC_HASH_ORDER); + if (ret) + return ret; + + INIT_LIST_HEAD(&pid_entry.namefree); + + /* + * Fill up initial few bytes with spaces, to insert summary data later + * on + */ + err_printf(m, "%"SPACES_STR(NUM_SPACES)"s\n", " "); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv = file->driver_priv; + struct get_obj_stats_buf obj_stat_buf; + + obj_stat_buf.entry = &pid_entry; + obj_stat_buf.m = m; + + if (file_priv->tgid != tgid) + continue; + + file_priv_reqd = file_priv; + err_puts(m, + "\n Obj Identifier Size Tiling Dirty Shared Vmap Stolen Mappable AllocState Global/PP GttOffset (PID: handle count: user virt addrs)\n"); + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_obj_info, &obj_stat_buf); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + if (file_priv_reqd) { + int space_remaining; + + /* Reset the bytes counter to buffer beginning */ + bytes_copy = m->bytes; + m->bytes = 0; + + err_printf(m, "\n PID GfxMem Process\n"); + err_printf(m, "%5d %8zdK ", pid_nr(file_priv_reqd->tgid), + (pid_entry.stats.phys_space_shared_proportion + + pid_entry.stats.phys_space_allocated_priv)/1024); + + space_remaining = NUM_SPACES - m->bytes - 1; + if (strlen(file_priv_reqd->process_name) > space_remaining) + file_priv_reqd->process_name[space_remaining] = '\0'; + + err_printf(m, "%s\n", file_priv_reqd->process_name); + + /* Reinstate the previous saved value of bytes counter */ + m->bytes = bytes_copy; + } else + WARN(1, "drm file corresponding to tgid:%d not found\n", + pid_nr(tgid)); + + list_for_each_entry_safe(entry, next, + &pid_entry.namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry.namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry.namelist); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + return 0; +} + +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + int ret = 0; + + /* + * Protect the access to global drm resources such as filelist. Protect + * against their removal under our noses, while in use. + */ + mutex_lock(&drm_global_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_unlock(&drm_global_mutex); + return ret; + } + + ret = __i915_get_drm_clients_info(m, dev); + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&drm_global_mutex); + + return ret; +} + +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + int ret = 0; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = __i915_gem_get_obj_info(m, dev, tgid); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static bool cpu_cache_is_coherent(struct drm_device *dev, + enum i915_cache_level level) +{ + return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE; +} static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { @@ -325,17 +1281,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) * must wait for all rendering to complete to the object (as unbinding * must anyway), and retire the requests. */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) return ret; - i915_gem_retire_requests(to_i915(obj->base.dev)); - while ((vma = list_first_entry_or_null(&obj->vma_list, struct i915_vma, obj_link))) { @@ -1675,6 +2624,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_object *obj; unsigned long addr; + int ret; if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; @@ -1720,6 +2670,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (IS_ERR((void *)addr)) return addr; + ret = i915_obj_insert_virt_addr(to_intel_bo(obj), addr, false, false); + if (ret) + return ret; + args->addr_ptr = (uint64_t) addr; return 0; @@ -1917,6 +2871,7 @@ int i915_gem_fault(struct vm_fault *vmf) (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, min_t(u64, vma->size, area->vm_end - area->vm_start), &ggtt->mappable); + ret = i915_obj_insert_virt_addr(obj, (unsigned long)area->vm_start, true, true); err_unpin: __i915_vma_unpin(vma); @@ -2156,6 +3111,17 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + + /* + * Mark the object as not having backing pages, as physical space + * returned back to kernel + */ + if (obj->has_backing_pages == 1) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } } /* Try to discard unwanted pages */ @@ -2432,6 +3398,13 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); + if (obj->has_backing_pages == 0) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total += obj->base.size; + obj->has_backing_pages = 1; + } + return st; err_sg: @@ -2777,15 +3750,33 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) return active; } -static bool engine_stalled(struct intel_engine_cs *engine) +static bool engine_stalled(struct intel_engine_cs *engine, + struct drm_i915_gem_request *request) { - if (!engine->hangcheck.stalled) - return false; + if (!intel_vgpu_active(engine->i915)) { + if (engine->fpreempt_stalled) { + /* Pardon the request if it managed to yield the + * engine by completing just prior to the reset. We + * could be even more sophisticated here and pardon + * the request if it preempted out (mid-batch) prior + * to the reset, but that's not so straight-forward + * to detect. Perhaps not worth splitting hairs when + * the request had clearly behaved badly to get here. + */ + if (i915_gem_request_completed(request)) + return false; - /* Check for possible seqno movement after hang declaration */ - if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { - DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); - return false; + return true; + } + + if (!engine->hangcheck.stalled) + return false; + + /* Check for possible seqno movement after hang declaration */ + if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { + DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); + return false; + } } return true; @@ -2798,7 +3789,7 @@ static bool engine_stalled(struct intel_engine_cs *engine) struct drm_i915_gem_request * i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *request = NULL; + struct drm_i915_gem_request *request; /* Prevent the signaler thread from updating the request * state (by calling dma_fence_signal) as we are processing @@ -2811,21 +3802,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) */ kthread_park(engine->breadcrumbs.signaler); - /* Prevent request submission to the hardware until we have - * completed the reset in i915_gem_reset_finish(). If a request - * is completed by one engine, it may then queue a request - * to a second via its engine->irq_tasklet *just* as we are - * calling engine->init_hw() and also writing the ELSP. - * Turning off the engine->irq_tasklet until the reset is over - * prevents the race. - */ - tasklet_kill(&engine->irq_tasklet); - tasklet_disable(&engine->irq_tasklet); - - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - - request = i915_gem_find_active_request(engine); + request = engine->reset.prepare(engine); if (request && request->fence.error == -EIO) request = ERR_PTR(-EIO); /* Previous reset failed! */ @@ -2922,7 +3899,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine, * subsequent hangs. */ - if (engine_stalled(engine)) { + if (engine_stalled(engine, request)) { i915_gem_context_mark_guilty(request->ctx); skip_request(request); @@ -2930,6 +3907,13 @@ i915_gem_reset_request(struct intel_engine_cs *engine, if (i915_gem_context_is_banned(request->ctx)) engine_skip_context(request); } else { + /* If the request that we just pardoned was the target of a + * force preemption there is no possibility of the next + * request in line having started. + */ + if (engine->fpreempt_stalled) + return NULL; + /* * Since this is not the hung engine, it may have advanced * since the hang declaration. Double check by refinding @@ -2966,7 +3950,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, } /* Setup the CS to resume from the breadcrumb of the hung request */ - engine->reset_hw(engine, request); + engine->reset.reset(engine, request); } void i915_gem_reset(struct drm_i915_private *dev_priv) @@ -2999,7 +3983,8 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) { - tasklet_enable(&engine->irq_tasklet); + engine->reset.finish(engine); + kthread_unpark(engine->breadcrumbs.signaler); } @@ -3031,9 +4016,6 @@ static void nop_submit_request(struct drm_i915_gem_request *request) static void engine_set_wedged(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *request; - unsigned long flags; - /* We need to be sure that no thread is running the old callback as * we install the nop handler (otherwise we would submit a request * to hardware that will never complete). In order to prevent this @@ -3043,40 +4025,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine) engine->submit_request = nop_submit_request; /* Mark all executing requests as skipped */ - spin_lock_irqsave(&engine->timeline->lock, flags); - list_for_each_entry(request, &engine->timeline->requests, link) - if (!i915_gem_request_completed(request)) - dma_fence_set_error(&request->fence, -EIO); - spin_unlock_irqrestore(&engine->timeline->lock, flags); - - /* - * Clear the execlists queue up before freeing the requests, as those - * are the ones that keep the context and ringbuffer backing objects - * pinned in place. - */ - - if (i915.enable_execlists) { - struct execlist_port *port = engine->execlist_port; - unsigned long flags; - unsigned int n; - - spin_lock_irqsave(&engine->timeline->lock, flags); - - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) - i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; - - spin_unlock_irqrestore(&engine->timeline->lock, flags); - - /* The port is checked prior to scheduling a tasklet, but - * just in case we have suspended the tasklet to do the - * wedging make sure that when it wakes, it decides there - * is no work to do by clearing the irq_posted bit. - */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - } + engine->cancel_requests(engine); /* Mark all pending requests as complete so that any concurrent * (lockless) lookup doesn't try and wait upon the request as we @@ -3385,24 +4334,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) return 0; } -static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) -{ - return wait_for(intel_engine_is_idle(engine), timeout_ms); -} - static int wait_for_engines(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - if (GEM_WARN_ON(wait_for_engine(engine, 50))) { - i915_gem_set_wedged(i915); - return -EIO; - } - - GEM_BUG_ON(intel_engine_get_seqno(engine) != - intel_engine_last_submit(engine)); + if (wait_for(intel_engines_are_idle(i915), 50)) { + DRM_ERROR("Failed to idle engines, declaring wedged!\n"); + i915_gem_set_wedged(i915); + return -EIO; } return 0; @@ -4295,6 +5232,13 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); + /* + * Mark the object as not having backing pages, as no allocation + * for it yet + */ + obj->has_backing_pages = 0; + INIT_LIST_HEAD(&obj->pid_info); + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } @@ -4404,6 +5348,26 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj) return atomic_long_read(&obj->base.filp->f_count) == 1; } +int +i915_gem_open_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + return i915_gem_obj_insert_pid(obj); +} + +#if 0 +void +i915_gem_close_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + i915_gem_obj_remove_pid(obj); +} +#endif + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { @@ -4448,6 +5412,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); + if (!obj->stolen && (obj->has_backing_pages == 1)) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } + i915_gem_obj_remove_all_pids(obj); + reservation_object_fini(&obj->__builtin_resv); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(i915, obj->base.size); @@ -4551,7 +5523,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * it may impact the display and we are uncertain about the stability * of the reset, so this could be applied to even earlier gen. */ - if (INTEL_GEN(i915) >= 5) { + if (INTEL_GEN(i915) >= 5 && !intel_vgpu_active(i915)) { int reset = intel_gpu_reset(i915, ALL_ENGINES); WARN_ON(reset && reset != -ENODEV); } @@ -4582,7 +5554,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED); - if (ret) + if (ret && ret != -EIO) goto err_unlock; assert_kernel_context_is_current(dev_priv); @@ -4626,11 +5598,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machine in an unusable condition. */ i915_gem_sanitize(dev_priv); - goto out_rpm_put; + + intel_runtime_pm_put(dev_priv); + return 0; err_unlock: mutex_unlock(&dev->struct_mutex); -out_rpm_put: intel_runtime_pm_put(dev_priv); return ret; } @@ -4717,6 +5690,28 @@ static int __i915_gem_restart_engines(void *data) return 0; } +int i915_gem_init_hw_late(struct drm_i915_private *dev_priv) +{ + int ret; + + /* + * Place for things that can be delayed until the first context + * is open. For example, fw loading in android. + */ + + /* fetch firmware */ + intel_uc_init_fw(dev_priv); + + /* We can't enable contexts until all firmware is loaded */ + ret = intel_uc_init_hw(dev_priv); + if (ret) + return ret; + + i915_guc_log_register(dev_priv); + + return 0; +} + int i915_gem_init_hw(struct drm_i915_private *dev_priv) { int ret; @@ -4770,10 +5765,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) intel_mocs_init_l3cc_table(dev_priv); - /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(dev_priv); - if (ret) - goto out; + /* + * Don't call i915_gem_init_hw_late() the very first time (during + * driver load); it will get called during first open instead. + * It should only be called on subsequent (re-initialization) passes. + */ + if (dev_priv->contexts_ready) { + ret = i915_gem_init_hw_late(dev_priv); + if (ret) + goto out; + } else { + DRM_DEBUG_DRIVER("deferring late initialization\n"); + } out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -4786,7 +5789,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) return false; /* TODO: make semaphores and Execlists play nicely together */ - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return false; if (value >= 0) @@ -4807,7 +5810,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { dev_priv->gt.resume = intel_legacy_submission_resume; dev_priv->gt.cleanup_engine = intel_engine_cleanup; } else { @@ -5053,6 +6056,9 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_request *request; + i915_gem_remove_sysfs_file_entry(dev, file); + put_pid(file_priv->tgid); + /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. @@ -5068,8 +6074,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) struct drm_i915_file_private *file_priv; int ret; - DRM_DEBUG("\n"); - file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; @@ -5078,14 +6082,47 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) file_priv->dev_priv = i915; file_priv->file = file; + rcu_read_lock(); + file_priv->tgid = get_pid(find_vpid(task_tgid_nr(current))); + rcu_read_unlock(); + + file_priv->process_name = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!file_priv->process_name) { + ret = -ENOMEM; + goto out_free_file; + } + + ret = i915_get_pid_cmdline(current, file_priv->process_name); + if (ret) + goto out_free_name; + spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); file_priv->bsd_engine = -1; + intel_runtime_pm_get(i915); ret = i915_gem_context_open(i915, file); - if (ret) - kfree(file_priv); + + if (ret) { + intel_runtime_pm_put(i915); + goto out_free_name; + } + intel_runtime_pm_put(i915); + + ret = i915_gem_create_sysfs_file_entry(&i915->drm, file); + if (ret) { + i915_gem_context_close(file); + goto out_free_name; + } + + return 0; + +out_free_name: + kfree(file_priv->process_name); +out_free_file: + put_pid(file_priv->tgid); + kfree(file_priv); return ret; } @@ -5174,6 +6211,39 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, return ERR_PTR(err); } +/** + * i915_gem_access_userdata_ioctl -Reads/writes userdata for the object + * @dev: DRM device + * @data: struct drm_i915_gem_access_userdata + * @file: GEM object info + * + * Set/Get 32-bit private user defined data stored with a given GEM object. + */ +int +i915_gem_access_userdata_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_access_userdata *args = data; + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + mutex_lock(&dev->struct_mutex); + + if (args->write) + obj->userdata = args->userdata; + else + args->userdata = obj->userdata; + + mutex_unlock(&dev->struct_mutex); + + i915_gem_object_put(obj); + + return 0; +} + struct scatterlist * i915_gem_object_get_sg(struct drm_i915_gem_object *obj, unsigned int n, diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8afd2ce59b8d..dfcbb2a97cc0 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -90,6 +90,7 @@ #include #include "i915_drv.h" #include "i915_trace.h" +#include "i915_vgpu.h" #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 @@ -107,14 +108,9 @@ static void lut_close(struct i915_gem_context *ctx) rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); - struct drm_i915_gem_object *obj = vma->obj; radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); - - if (!i915_vma_is_ggtt(vma)) - i915_vma_close(vma); - - __i915_gem_object_release_unless_active(obj); + __i915_gem_object_release_unless_active(vma->obj); } rcu_read_unlock(); } @@ -146,7 +142,11 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + if (intel_vgpu_active(ctx->i915)) + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id & ~(0x7 << SIZE_CONTEXT_HW_ID)); + else + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + kfree_rcu(ctx, rcu); } @@ -200,6 +200,11 @@ static void context_close(struct i915_gem_context *ctx) { i915_gem_context_set_closed(ctx); + /* + * The LUT uses the VMA as a backpointer to unref the object, + * so we need to clear the LUT before we close all the VMA (inside + * the ppgtt). + */ lut_close(ctx); if (ctx->ppgtt) i915_ppgtt_close(&ctx->ppgtt->base); @@ -226,6 +231,12 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) return ret; } + if (intel_vgpu_active(dev_priv)) { + /* add vgpu_id to context hw_id */ + ret = ret | (I915_READ(vgtif_reg(vgt_id)) + << SIZE_CONTEXT_HW_ID); + } + *out = ret; return 0; } @@ -316,7 +327,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, * present or not in use we still need a small bias as ring wraparound * at offset 0 sometimes hangs. No idea why. */ - if (HAS_GUC(dev_priv) && i915.enable_guc_loading) + if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) ctx->ggtt_offset_bias = GUC_WOPCM_TOP; else ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; @@ -409,7 +420,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); i915_gem_context_set_force_single_submission(ctx); - if (!i915.enable_guc_submission) + if (!i915_modparams.enable_guc_submission) ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); @@ -418,14 +429,43 @@ i915_gem_context_create_gvt(struct drm_device *dev) return ctx; } +static struct i915_gem_context * +create_kernel_context(struct drm_i915_private *i915, int prio) +{ + struct i915_gem_context *ctx; + + ctx = i915_gem_create_context(i915, NULL); + if (IS_ERR(ctx)) + return ctx; + + i915_gem_context_clear_bannable(ctx); + ctx->priority = prio; + ctx->ring_size = PAGE_SIZE; + + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + return ctx; +} + +static void +destroy_kernel_context(struct i915_gem_context **ctxp) +{ + struct i915_gem_context *ctx; + + /* Keep the context ref so that we can free it immediately ourselves */ + ctx = i915_gem_context_get(fetch_and_zero(ctxp)); + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + context_close(ctx); + i915_gem_context_free(ctx); +} + int i915_gem_contexts_init(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; + int err; - /* Init should only be called once per module load. Eventually the - * restriction on the context_disabled check can be loosened. */ - if (WARN_ON(dev_priv->kernel_context)) - return 0; + GEM_BUG_ON(dev_priv->kernel_context); INIT_LIST_HEAD(&dev_priv->contexts.list); INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); @@ -433,7 +473,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv) && HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; } @@ -443,28 +483,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); ida_init(&dev_priv->contexts.hw_ida); - ctx = i915_gem_create_context(dev_priv, NULL); + /* lowest priority; idle task */ + ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN); if (IS_ERR(ctx)) { - DRM_ERROR("Failed to create default global context (error %ld)\n", - PTR_ERR(ctx)); - return PTR_ERR(ctx); + DRM_ERROR("Failed to create default global context\n"); + err = PTR_ERR(ctx); + goto err; } - - /* For easy recognisablity, we want the kernel context to be 0 and then + /* + * For easy recognisablity, we want the kernel context to be 0 and then * all user contexts will have non-zero hw_id. */ GEM_BUG_ON(ctx->hw_id); - - i915_gem_context_clear_bannable(ctx); - ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */ dev_priv->kernel_context = ctx; - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + /* highest priority; preempting task */ + ctx = create_kernel_context(dev_priv, INT_MAX); + if (IS_ERR(ctx)) { + DRM_ERROR("Failed to create default preempt context\n"); + err = PTR_ERR(ctx); + goto err_kernel_context; + } + dev_priv->preempt_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->engine[RCS]->context_size ? "logical" : "fake"); return 0; + +err_kernel_context: + destroy_kernel_context(&dev_priv->kernel_context); +err: + return err; } void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) @@ -485,7 +535,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) } /* Force the GPU state to be restored on enabling */ - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { struct i915_gem_context *ctx; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { @@ -509,15 +559,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) void i915_gem_contexts_fini(struct drm_i915_private *i915) { - struct i915_gem_context *ctx; - lockdep_assert_held(&i915->drm.struct_mutex); - /* Keep the context so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - context_close(ctx); - i915_gem_context_free(ctx); + destroy_kernel_context(&i915->preempt_context); + destroy_kernel_context(&i915->kernel_context); /* Must free all deferred contexts (via flush_workqueue) first */ ida_destroy(&i915->contexts.hw_ida); @@ -531,25 +576,57 @@ static int context_idr_cleanup(int id, void *p, void *data) return 0; } +int i915_gem_context_first_open(struct drm_i915_private *dev_priv) +{ + int ret; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + DRM_DEBUG_DRIVER("late initialization starting\n"); + + intel_runtime_pm_get(dev_priv); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + + ret = i915_gem_init_hw_late(dev_priv); + if (ret == 0) + dev_priv->contexts_ready = true; + else + DRM_ERROR("late initialization failed: %d\n", ret); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); + + return ret; +} + int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; + int ret = 0; idr_init(&file_priv->context_idr); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file_priv); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - idr_destroy(&file_priv->context_idr); - return PTR_ERR(ctx); + + if (!(i915->contexts_ready)) + ret = i915_gem_context_first_open(i915); + + if (ret == 0) { + ctx = i915_gem_create_context(i915, file_priv); + if (IS_ERR(ctx)) + ret = PTR_ERR(ctx); + + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); } - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + mutex_unlock(&i915->drm.struct_mutex); - return 0; + if (ret) + idr_destroy(&file_priv->context_idr); + + return ret; } void i915_gem_context_close(struct drm_file *file) @@ -570,7 +647,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags) enum intel_engine_id id; const int num_rings = /* Use an extended w/a on gen7 if signalling from other rings */ - (i915.semaphores && INTEL_GEN(dev_priv) == 7) ? + (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ? INTEL_INFO(dev_priv)->num_rings - 1 : 0; int len; @@ -839,7 +916,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; lockdep_assert_held(&req->i915->drm.struct_mutex); - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return 0; if (!req->ctx->engine[engine->id].state) { @@ -1038,6 +1115,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_BANNABLE: args->value = i915_gem_context_is_bannable(ctx); break; + case I915_CONTEXT_PARAM_PRIORITY: + args->value = ctx->priority; + break; default: ret = -EINVAL; break; @@ -1093,6 +1173,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else i915_gem_context_clear_bannable(ctx); break; + + case I915_CONTEXT_PARAM_PRIORITY: + { + int priority = args->value; + + if (args->size) + ret = -EINVAL; + else if (!to_i915(dev)->engine[RCS]->schedule) + ret = -ENODEV; + else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || + priority < I915_CONTEXT_MIN_USER_PRIORITY) + ret = -EINVAL; + else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && + !capable(CAP_SYS_NICE)) + ret = -EPERM; + else + ctx->priority = priority; + } + break; + default: ret = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 83876a1c8d98..f7771600b89b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -268,6 +268,11 @@ static inline u64 gen8_noncanonical_addr(u64 address) return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); } +static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) +{ + return eb->engine->needs_cmd_parser && eb->batch_len; +} + static int eb_create(struct i915_execbuffer *eb) { if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { @@ -499,6 +504,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) list_add_tail(&vma->exec_link, &eb->unbound); if (drm_mm_node_allocated(&vma->node)) err = i915_vma_unbind(vma); + if (unlikely(err)) + vma->exec_flags = NULL; } return err; } @@ -720,7 +727,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kfree(lut); + kmem_cache_free(eb->i915->luts, lut); goto err_obj; } @@ -1163,6 +1170,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (unlikely(!cache->rq)) { int err; + /* If we need to copy for the cmdparser, we will stall anyway */ + if (eb_use_cmdparser(eb)) + return ERR_PTR(-EWOULDBLOCK); + + if (!intel_engine_can_store_dword(eb->engine)) + return ERR_PTR(-ENODEV); + err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); @@ -1187,9 +1201,7 @@ relocate_entry(struct i915_vma *vma, if (!eb->reloc_cache.vaddr && (DBG_FORCE_RELOC == FORCE_GPU_RELOC || - !reservation_object_test_signaled_rcu(vma->resv, true)) && - __intel_engine_can_store_dword(eb->reloc_cache.gen, - eb->engine->class)) { + !reservation_object_test_signaled_rcu(vma->resv, true))) { const unsigned int gen = eb->reloc_cache.gen; unsigned int len; u32 *batch; @@ -1581,7 +1593,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb) const unsigned int count = eb->buffer_count; unsigned int i; - if (unlikely(i915.prefault_disable)) + if (unlikely(i915_modparams.prefault_disable)) return 0; for (i = 0; i < count; i++) { @@ -2303,7 +2315,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (eb.engine->needs_cmd_parser && eb.batch_len) { + if (eb_use_cmdparser(&eb)) { struct i915_vma *vma; vma = eb_parse(&eb, drm_is_current_master(file)); @@ -2408,7 +2420,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (out_fence) { if (err == 0) { fd_install(out_fence_fd, out_fence->file); - args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ + args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ args->rsvd2 |= (u64)out_fence_fd << 32; out_fence_fd = -1; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 5fe2cd8c8f28..181786d80386 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -63,6 +63,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; u64 val; + struct drm_i915_private *dev_priv = fence->i915; if (INTEL_INFO(fence->i915)->gen >= 6) { fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); @@ -92,9 +93,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, val |= I965_FENCE_REG_VALID; } - if (!pipelined) { - struct drm_i915_private *dev_priv = fence->i915; - + if (intel_vgpu_active(dev_priv)) { + /* Use the 64-bit RW to write fence reg on VGPU mode. + * The GVT-g can trap the written val of VGPU to program the + * fence reg. And the fence write in gvt-g follows the + * sequence of off/read/double-write/read. This assures that + * the fence reg is configured as expected. + * At the same time the 64-bit op can help to reduce the num + * of VGPU trap for the fence reg. + */ + I915_WRITE64_FW(fence_reg_lo, val); + } else if (!pipelined) { /* To w/a incoherency with non-atomic 64-bit register updates, * we split the 64-bit update into two 32-bit writes. In order * for a partial fence not to be evaluated between writes, we diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ad524cb0f6fc..1526ce50854c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -42,6 +42,10 @@ #include "intel_drv.h" #include "intel_frontbuffer.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM) /** @@ -180,7 +184,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, return 0; } - if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) { + if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) { if (has_full_48bit_ppgtt) return 3; @@ -817,6 +821,8 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; unsigned int pml4e; + u64 orig_start = start; + u64 orig_length = length; GEM_BUG_ON(!use_4lvl(vm)); @@ -830,6 +836,17 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, free_pdp(vm, pdp); } + + if (PVMMIO_LEVEL_ENABLE(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(pml4), &pv_ppgtt->pdp); + writeq(orig_start, &pv_ppgtt->start); + writeq(orig_length, &pv_ppgtt->length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_CLEAR); + } } static inline struct sgt_dma { @@ -940,6 +957,20 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter, &idx, cache_level)) GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); + + if (PVMMIO_LEVEL_ENABLE(vm->i915, PVMMIO_PPGTT_UPDATE)) { + + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(&ppgtt->pml4), &pv_ppgtt->pdp); + writeq(vma->node.start, &pv_ppgtt->start); + writeq(vma->node.size, &pv_ppgtt->length); + writel(cache_level, &pv_ppgtt->cache_level); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_INSERT); + } + } static void gen8_free_page_tables(struct i915_address_space *vm, @@ -1177,6 +1208,8 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, u64 from = start; u32 pml4e; int ret; + u64 orig_start = start; + u64 orig_length = length; gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (pml4->pdps[pml4e] == vm->scratch_pdp) { @@ -1193,6 +1226,17 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind_pdp; } + if (PVMMIO_LEVEL_ENABLE(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(pml4), &pv_ppgtt->pdp); + writeq(orig_start, &pv_ppgtt->start); + writeq(orig_length, &pv_ppgtt->length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_ALLOC); + } + return 0; unwind_pdp: @@ -1878,12 +1922,12 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv) * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. */ - /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */ if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); else if (IS_CHERRYVIEW(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_BC(dev_priv)) + else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); else if (IS_GEN9_LP(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); @@ -1896,7 +1940,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) /* In the case of execlists, PPGTT is enabled by the context descriptor * and the PDPs are contained within the context itself. We don't * need to do anything here. */ - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return 0; if (!USES_PPGTT(dev_priv)) @@ -2202,7 +2246,15 @@ static int bxt_vtd_ggtt_insert_page__cb(void *_arg) { struct insert_page *arg = _arg; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_pause_user_domains(arg->vm->i915); +#endif gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_unpause_user_domains(arg->vm->i915); +#endif bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2229,7 +2281,15 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_pause_user_domains(arg->vm->i915); +#endif gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_unpause_user_domains(arg->vm->i915); +#endif bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2255,7 +2315,15 @@ static int bxt_vtd_ggtt_clear_range__cb(void *_arg) { struct clear_range *arg = _arg; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_pause_user_domains(arg->vm->i915); +#endif gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(arg->vm->i915)) + gvt_unpause_user_domains(arg->vm->i915); +#endif bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2558,17 +2626,20 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) if (ret) return ret; - /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); - ggtt->base.clear_range(&ggtt->base, hole_start, - hole_end - hole_start); - } + if (!intel_vgpu_active(dev_priv)) { + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, + hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); + ggtt->base.clear_range(&ggtt->base, hole_start, + hole_end - hole_start); + } - /* And finally clear the reserved guard page */ - ggtt->base.clear_range(&ggtt->base, - ggtt->base.total - PAGE_SIZE, PAGE_SIZE); + /* And finally clear the reserved guard page */ + ggtt->base.clear_range(&ggtt->base, + ggtt->base.total - PAGE_SIZE, PAGE_SIZE); + } if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { ret = i915_gem_init_aliasing_ppgtt(dev_priv); @@ -3014,7 +3085,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) * currently don't have any bits spare to pass in this upper * restriction! */ - if (HAS_GUC(dev_priv) && i915.enable_guc_loading) { + if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) { ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); } diff --git a/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c new file mode 100644 index 000000000000..a08c5caa9b99 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c @@ -0,0 +1,292 @@ +/* + * Copyright © 2012 - 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include + +#include "gvt/fb_decoder.h" + +static struct sg_table * +i915_gem_gvtbuffer_get_pages(struct drm_i915_gem_object *obj) +{ + BUG(); + return ERR_PTR(-EINVAL); +} + +static void i915_gem_gvtbuffer_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + /* like stolen memory, this should only be called during free + * after clearing pin count. + */ + sg_free_table(pages); + kfree(pages); +} + +static void +i915_gem_gvtbuffer_release(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + +static const struct drm_i915_gem_object_ops i915_gem_gvtbuffer_ops = { + .get_pages = i915_gem_gvtbuffer_get_pages, + .put_pages = i915_gem_gvtbuffer_put_pages, + .release = i915_gem_gvtbuffer_release, +}; + +#define GEN8_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) >> 12) & 0x7ffffffULL) << 12)) + +#define GEN7_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) & 0x7f0) << 28) | (u64)(pte & 0xfffff000))) + +static struct sg_table * +i915_create_sg_pages_for_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sg_table *st; + struct scatterlist *sg; + int i; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return NULL; + + if (sg_alloc_table(st, num_pages, GFP_KERNEL)) { + kfree(st); + return NULL; + } + + if (INTEL_INFO(dev_priv)->gen >= 8) { + gen8_pte_t __iomem *gtt_entries = + (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN8_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } else { + gen6_pte_t __iomem *gtt_entries = + (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN7_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } + + return st; +} + +struct drm_i915_gem_object * +i915_gem_object_create_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_gem_object *obj; + obj = i915_gem_object_alloc(to_i915(dev)); + if (obj == NULL) + return NULL; + + drm_gem_private_object_init(dev, &obj->base, num_pages << PAGE_SHIFT); + i915_gem_object_init(obj, &i915_gem_gvtbuffer_ops); + + obj->mm.pages = i915_create_sg_pages_for_gvtbuffer(dev, start, num_pages); + if (obj->mm.pages == NULL) { + i915_gem_object_free(obj); + return NULL; + } + + if (i915_gem_object_pin_pages(obj)) + printk(KERN_ERR "%s:%d> Pin pages failed!\n", __func__, __LINE__); + obj->cache_level = I915_CACHE_L3_LLC; + + DRM_DEBUG_DRIVER("GVT_GEM: backing store base = 0x%x pages = 0x%x\n", + start, num_pages); + return obj; +} + +static int gvt_decode_information(struct drm_device *dev, + struct drm_i915_gem_gvtbuffer *args) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct gvt_fb_format fb; + struct gvt_primary_plane_format *p; + struct gvt_cursor_plane_format *c; + struct gvt_pipe_format *pipe; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + u32 id = args->id; + + if (gvt_decode_fb_format(dev_priv->gvt, id, &fb)) + return -EINVAL; +#else + return -EINVAL; +#endif + + pipe = ((args->pipe_id >= I915_MAX_PIPES) ? + NULL : &fb.pipes[args->pipe_id]); + + if (!pipe || !pipe->primary.enabled) { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid pipe_id: %d\n", + args->pipe_id); + return -EINVAL; + } + + if ((args->plane_id) == I915_GVT_PLANE_PRIMARY) { + p = &pipe->primary; + args->enabled = p->enabled; + args->x_offset = p->x_offset; + args->y_offset = p->y_offset; + args->start = p->base; + args->width = p->width; + args->height = p->height; + args->stride = p->stride; + args->bpp = p->bpp; + args->hw_format = p->hw_format; + args->drm_format = p->drm_format; + args->tiled = p->tiled; + } else if ((args->plane_id) == I915_GVT_PLANE_CURSOR) { + c = &pipe->cursor; + args->enabled = c->enabled; + args->x_offset = c->x_hot; + args->y_offset = c->y_hot; + args->x_pos = c->x_pos; + args->y_pos = c->y_pos; + args->start = c->base; + args->width = c->width; + args->height = c->height; + args->stride = c->width * (c->bpp / 8); + args->bpp = c->bpp; + args->tiled = 0; + } else { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid plaine_id: %d\n", + args->plane_id); + return -EINVAL; + } + + args->size = (((args->width * args->height * args->bpp) / 8) + + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + + if (args->start & (PAGE_SIZE - 1)) { + DRM_DEBUG_DRIVER("GVT_GEM: Not aligned fb start address: " + "0x%x\n", args->start); + return -EINVAL; + } + + if (((args->start >> PAGE_SHIFT) + args->size) > + ggtt_total_entries(&dev_priv->ggtt)) { + DRM_DEBUG_DRIVER("GVT: Invalid GTT offset or size\n"); + return -EINVAL; + } + return 0; +} + +/** + * Creates a new mm object that wraps some user memory. + */ +int +i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_gvtbuffer *args = data; + struct drm_i915_gem_object *obj; + u32 handle; + int ret = 0; + + if (INTEL_INFO(dev_priv)->gen < 7) + return -EPERM; + + if (args->flags & I915_GVTBUFFER_CHECK_CAPABILITY) + return 0; +#if 0 + if (!gvt_check_host()) + return -EPERM; +#endif + /* if args->start != 0 do not decode, but use it as ggtt offset*/ + if (args->start == 0) { + ret = gvt_decode_information(dev, args); + if (ret) + return ret; + } + + if (ret) + return ret; + + if (args->flags & I915_GVTBUFFER_QUERY_ONLY) + return 0; + + obj = i915_gem_object_create_gvtbuffer(dev, args->start, args->size); + if (!obj) { + DRM_DEBUG_DRIVER("GVT_GEM: Failed to create gem object" + " for VM FB!\n"); + return -EINVAL; + } + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + unsigned int tiling_mode = I915_TILING_NONE; + unsigned int stride = 0; + + switch (args->tiled << 10) { + case PLANE_CTL_TILED_LINEAR: + /* Default valid value */ + break; + case PLANE_CTL_TILED_X: + tiling_mode = I915_TILING_X; + stride = args->stride; + break; + case PLANE_CTL_TILED_Y: + tiling_mode = I915_TILING_Y; + stride = args->stride; + break; + default: + DRM_ERROR("gvt: tiling mode %d not supported\n", args->tiled); + } + obj->tiling_and_stride = tiling_mode | stride; + } else { + obj->tiling_and_stride = (args->tiled ? I915_TILING_X : I915_TILING_NONE) | + (args->tiled ? args->stride : 0); + } + + ret = drm_gem_handle_create(file, &obj->base, &handle); + + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + + if (ret) + return ret; + + args->handle = handle; + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index c30d8f808185..367a578b83b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -147,6 +147,8 @@ struct drm_i915_gem_object { #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) unsigned int cache_dirty:1; + unsigned int has_backing_pages:1; + atomic_t frontbuffer_bits; unsigned int frontbuffer_ggtt_origin; /* write once */ struct i915_gem_active frontbuffer_write; @@ -213,6 +215,9 @@ struct drm_i915_gem_object { /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; + /** Object userdata */ + u32 userdata; + union { struct i915_gem_userptr { uintptr_t ptr; @@ -226,6 +231,8 @@ struct drm_i915_gem_object { unsigned long scratch; }; + struct list_head pid_info; + /** for phys allocated objects */ struct drm_dma_handle *phys_handle; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 813a3b546d6e..0be277625841 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt) INIT_LIST_HEAD(&pt->signalers_list); INIT_LIST_HEAD(&pt->waiters_list); INIT_LIST_HEAD(&pt->link); - pt->priority = INT_MIN; + pt->priority = I915_PRIORITY_INVALID; } static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) @@ -587,6 +587,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, lockdep_assert_held(&dev_priv->drm.struct_mutex); + /* + * Preempt contexts are reserved for exclusive use to inject a + * preemption context switch. They are never to be used for any trivial + * request! + */ + GEM_BUG_ON(ctx == dev_priv->preempt_context); + /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ @@ -906,6 +913,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) lockdep_assert_held(&request->i915->drm.struct_mutex); trace_i915_gem_request_add(request); + trace_i915_gem_request_add_domain(request); /* Make sure that no request gazumped us - if it was allocated after * our i915_gem_request_alloc() and called __i915_add_request() before @@ -1073,7 +1081,7 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req return false; __set_current_state(TASK_RUNNING); - i915_reset(request->i915, 0); + i915_reset(request->i915); return true; } diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 49a4c8994ff0..e1292025b501 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -30,6 +30,8 @@ #include "i915_gem.h" #include "i915_sw_fence.h" +#include + struct drm_file; struct drm_i915_gem_object; struct drm_i915_gem_request; @@ -69,9 +71,14 @@ struct i915_priotree { struct list_head waiters_list; /* those after us, they depend upon us */ struct list_head link; int priority; -#define I915_PRIORITY_MAX 1024 -#define I915_PRIORITY_NORMAL 0 -#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX) +}; + +enum { + I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, + I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, + I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + + I915_PRIORITY_INVALID = INT_MIN }; struct i915_gem_capture_list { diff --git a/drivers/gpu/drm/i915/i915_gem_splash.c b/drivers/gpu/drm/i915/i915_gem_splash.c new file mode 100644 index 000000000000..317675aefee1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_splash.c @@ -0,0 +1,144 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include +#include +#include "i915_drv.h" + + +/* + * The memory was allocated outside of the i915 driver and is non-pagable, + * it can not be migrated by either the i915 driver or the OS. + * + * The scatter/gather table has already been initialized when the gem obj + * was created, nothing more needs to be done here, return 0 to + * indicate sg is ready. + */ +static struct sg_table * +i915_gem_object_get_pages_splash(struct drm_i915_gem_object *obj) +{ + return NULL; +} + +/* The sg is going to be freed when the gem obj itself is released. */ +static void i915_gem_object_put_pages_splash(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ +} + + + static void +i915_gem_object_release_splash(struct drm_i915_gem_object *obj) +{ + if (obj->mm.pages) { + dma_unmap_sg(&obj->base.dev->pdev->dev, + obj->mm.pages->sgl, obj->mm.pages->nents, + DMA_TO_DEVICE); + sg_free_table(obj->mm.pages); + kfree(obj->mm.pages); + obj->mm.pages = NULL; + } +} + +static const struct drm_i915_gem_object_ops i915_gem_object_splash_ops = { + .get_pages = i915_gem_object_get_pages_splash, + .put_pages = i915_gem_object_put_pages_splash, + .release = i915_gem_object_release_splash, +}; + +/* create a gem obj from a list of pages */ +struct drm_i915_gem_object * +i915_gem_object_create_splash_pages(struct drm_i915_private *dev_priv, + struct page **pages, u32 n_pages) +{ + struct drm_i915_gem_object *obj; + struct sg_table *st; + unsigned long size = n_pages << PAGE_SHIFT; + + if (n_pages == 0) + return NULL; + + obj = i915_gem_object_alloc(dev_priv); + if (obj == NULL) + return NULL; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + goto cleanup; + + drm_gem_private_object_init(&dev_priv->drm, &obj->base, size); + i915_gem_object_init(obj, &i915_gem_object_splash_ops); + + if (sg_alloc_table_from_pages(st, pages, n_pages, + 0, size, GFP_KERNEL)) + goto cleanup_st; + + obj->mm.pages = st; + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; + + if (!dma_map_sg(&obj->base.dev->pdev->dev, + obj->mm.pages->sgl, obj->mm.pages->nents, + DMA_TO_DEVICE)) { + sg_free_table(obj->mm.pages); + obj->mm.pages = NULL; + goto cleanup_st; + } + return obj; + +cleanup_st: + kfree(st); +cleanup: + i915_gem_object_free(obj); + return NULL; +} + +/* create a gem obj from a virtual address */ +struct drm_i915_gem_object * +i915_gem_object_create_splash(struct drm_i915_private *dev_priv, + const u8 *ptr, u32 n_pages) +{ + struct page **pvec; + u32 i; + struct drm_i915_gem_object *obj = NULL; + + if (ptr == NULL || n_pages == 0) + return NULL; + + WARN_ON (!PAGE_ALIGNED(ptr)); + + pvec = kmalloc(n_pages * sizeof(struct page *), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (pvec == NULL) + return NULL; + + for (i = 0; i < n_pages; i++) { + *(pvec+i) = vmalloc_to_page(ptr); + ptr += PAGE_SIZE; + } + + obj = i915_gem_object_create_splash_pages(dev_priv, pvec, n_pages); + kfree(pvec); + return obj; +} diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0c779671fe2d..e9be07b48e38 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -78,6 +78,11 @@ static bool __i915_error_ok(struct drm_i915_error_state_buf *e) return true; } +bool i915_error_ok(struct drm_i915_error_state_buf *e) +{ + return __i915_error_ok(e); +} + static bool __i915_error_seek(struct drm_i915_error_state_buf *e, unsigned len) { @@ -149,7 +154,7 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, __i915_error_advance(e, len); } -static void i915_error_puts(struct drm_i915_error_state_buf *e, +void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) { unsigned len; @@ -377,9 +382,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n", + err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n", prefix, erq->pid, erq->ban_score, - erq->context, erq->seqno, + erq->context, erq->seqno, erq->priority, jiffies_to_msecs(jiffies - erq->jiffies), erq->head, erq->tail); } @@ -388,14 +393,16 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct drm_i915_error_context *ctx) { - err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n", + err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n", header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id, - ctx->ban_score, ctx->guilty, ctx->active); + ctx->priority, ctx->ban_score, ctx->guilty, ctx->active); } static void error_print_engine(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { + int n; + err_printf(m, "%s command stream:\n", engine_str(ee->engine_id)); err_printf(m, " START: 0x%08x\n", ee->start); err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); @@ -465,8 +472,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); err_printf(m, " engine reset count: %u\n", ee->reset_count); - error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); - error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); + for (n = 0; n < ee->num_ports; n++) { + err_printf(m, " ELSP[%d]:", n); + error_print_request(m, " ", &ee->execlist[n]); + } + error_print_context(m, " Active context: ", &ee->context); } @@ -553,13 +563,15 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m, const void *x) { if (!__builtin_strcmp(type, "bool")) - err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); + err_printf(m, "i915_modparams.%s=%s\n", name, yesno(*(const bool *)x)); else if (!__builtin_strcmp(type, "int")) - err_printf(m, "i915.%s=%d\n", name, *(const int *)x); + err_printf(m, "i915_modparams.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) - err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); + err_printf(m, "i915_modparams.%s=%u\n", name, *(const unsigned int *)x); + else if (!__builtin_strcmp(type, "unsigned long long")) + err_printf(m, "i915_modparams.%s=%llu\n", name, *(const unsigned long long *)x); else if (!__builtin_strcmp(type, "char *")) - err_printf(m, "i915.%s=%s\n", name, *(const char **)x); + err_printf(m, "i915_modparams.%s=%s\n", name, *(const char **)x); else BUILD_BUG(); } @@ -807,6 +819,20 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, return 0; } +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count) +{ + memset(ebuf, 0, sizeof(*ebuf)); + + ebuf->buf = kmalloc(count, GFP_KERNEL); + + if (ebuf->buf == NULL) + return -ENOMEM; + + ebuf->size = count; + return 0; +} + static void i915_error_object_free(struct drm_i915_error_object *obj) { int page; @@ -1266,6 +1292,7 @@ static void record_request(struct drm_i915_gem_request *request, struct drm_i915_error_request *erq) { erq->context = request->ctx->hw_id; + erq->priority = request->priotree.priority; erq->ban_score = atomic_read(&request->ctx->ban_score); erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; @@ -1327,17 +1354,19 @@ static void engine_record_requests(struct intel_engine_cs *engine, static void error_record_engine_execlists(struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) { - const struct execlist_port *port = engine->execlist_port; + const struct intel_engine_execlists * const execlists = &engine->execlists; unsigned int n; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { - struct drm_i915_gem_request *rq = port_request(&port[n]); + for (n = 0; n < execlists_num_ports(execlists); n++) { + struct drm_i915_gem_request *rq = port_request(&execlists->port[n]); if (!rq) break; record_request(rq, &ee->execlist[n]); } + + ee->num_ports = n; } static void record_context(struct drm_i915_error_context *e, @@ -1357,6 +1386,7 @@ static void record_context(struct drm_i915_error_context *e, e->handle = ctx->user_handle; e->hw_id = ctx->hw_id; + e->priority = ctx->priority; e->ban_score = atomic_read(&ctx->ban_score); e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); @@ -1554,7 +1584,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv, struct i915_gpu_state *error) { /* Capturing log buf contents won't be useful if logging was disabled */ - if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0)) + if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0)) return; error->guc_log = i915_error_object_create(dev_priv, @@ -1696,7 +1726,7 @@ static int capture(void *data) ktime_to_timeval(ktime_sub(ktime_get(), error->i915->gt.last_init_time)); - error->params = i915; + error->params = i915_modparams; #define DUP(T, x) dup_param(#T, &error->params.x); I915_PARAMS_FOR_EACH(DUP); #undef DUP @@ -1751,7 +1781,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, struct i915_gpu_state *error; unsigned long flags; - if (!i915.error_capture) + if (!i915_modparams.error_capture) return; if (READ_ONCE(dev_priv->gpu_error.first_error)) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 48a1e9349a2c..3afb22c72158 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -192,13 +192,12 @@ static int __create_doorbell(struct i915_guc_client *client) doorbell = __get_doorbell(client); doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = client->doorbell_cookie; + doorbell->cookie = 0; err = __guc_allocate_doorbell(client->guc, client->stage_id); - if (err) { + if (err) doorbell->db_status = GUC_DOORBELL_DISABLED; - doorbell->cookie = 0; - } + return err; } @@ -306,7 +305,7 @@ static void guc_proc_desc_init(struct intel_guc *guc, desc->db_base_addr = 0; desc->stage_id = client->stage_id; - desc->wq_size_bytes = client->wq_size; + desc->wq_size_bytes = GUC_WQ_SIZE; desc->wq_status = WQ_STATUS_ACTIVE; desc->priority = client->priority; } @@ -391,8 +390,8 @@ static void guc_stage_desc_init(struct intel_guc *guc, desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client); desc->db_trigger_uk = gfx_addr + client->doorbell_offset; desc->process_desc = gfx_addr + client->proc_desc_offset; - desc->wq_addr = gfx_addr + client->wq_offset; - desc->wq_size = client->wq_size; + desc->wq_addr = gfx_addr + GUC_DB_SIZE; + desc->wq_size = GUC_WQ_SIZE; desc->desc_private = (uintptr_t)client; } @@ -406,82 +405,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc, memset(desc, 0, sizeof(*desc)); } -/** - * i915_guc_wq_reserve() - reserve space in the GuC's workqueue - * @request: request associated with the commands - * - * Return: 0 if space is available - * -EAGAIN if space is not currently available - * - * This function must be called (and must return 0) before a request - * is submitted to the GuC via i915_guc_submit() below. Once a result - * of 0 has been returned, it must be balanced by a corresponding - * call to submit(). - * - * Reservation allows the caller to determine in advance that space - * will be available for the next submission before committing resources - * to it, and helps avoid late failures with complicated recovery paths. - */ -int i915_guc_wq_reserve(struct drm_i915_gem_request *request) -{ - const size_t wqi_size = sizeof(struct guc_wq_item); - struct i915_guc_client *client = request->i915->guc.execbuf_client; - struct guc_process_desc *desc = __get_process_desc(client); - u32 freespace; - int ret; - - spin_lock_irq(&client->wq_lock); - freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); - freespace -= client->wq_rsvd; - if (likely(freespace >= wqi_size)) { - client->wq_rsvd += wqi_size; - ret = 0; - } else { - client->no_wq_space++; - ret = -EAGAIN; - } - spin_unlock_irq(&client->wq_lock); - - return ret; -} - -static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size) -{ - unsigned long flags; - - spin_lock_irqsave(&client->wq_lock, flags); - client->wq_rsvd += size; - spin_unlock_irqrestore(&client->wq_lock, flags); -} - -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request) -{ - const int wqi_size = sizeof(struct guc_wq_item); - struct i915_guc_client *client = request->i915->guc.execbuf_client; - - GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size); - guc_client_update_wq_rsvd(client, -wqi_size); -} - /* Construct a Work Item and append it to the GuC's Work Queue */ static void guc_wq_item_append(struct i915_guc_client *client, struct drm_i915_gem_request *rq) { /* wqi_len is in DWords, and does not include the one-word header */ const size_t wqi_size = sizeof(struct guc_wq_item); - const u32 wqi_len = wqi_size/sizeof(u32) - 1; + const u32 wqi_len = wqi_size / sizeof(u32) - 1; struct intel_engine_cs *engine = rq->engine; + struct i915_gem_context *ctx = rq->ctx; struct guc_process_desc *desc = __get_process_desc(client); struct guc_wq_item *wqi; - u32 freespace, tail, wq_off; + u32 ring_tail, wq_off; - /* Free space is guaranteed, see i915_guc_wq_reserve() above */ - freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); - GEM_BUG_ON(freespace < wqi_size); + lockdep_assert_held(&client->wq_lock); - /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; - GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); + ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -491,29 +431,29 @@ static void guc_wq_item_append(struct i915_guc_client *client, * workqueue buffer dw by dw. */ BUILD_BUG_ON(wqi_size != 16); - GEM_BUG_ON(client->wq_rsvd < wqi_size); - /* postincrement WQ tail for next time */ - wq_off = client->wq_tail; + /* Free space is guaranteed. */ + wq_off = READ_ONCE(desc->tail); + GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), + GUC_WQ_SIZE) < wqi_size); GEM_BUG_ON(wq_off & (wqi_size - 1)); - client->wq_tail += wqi_size; - client->wq_tail &= client->wq_size - 1; - client->wq_rsvd -= wqi_size; /* WQ starts from the page after doorbell / process_desc */ wqi = client->vaddr + wq_off + GUC_DB_SIZE; /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (engine->guc_id << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; + (wqi_len << WQ_LEN_SHIFT) | + (engine->guc_id << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; - /* The GuC wants only the low-order word of the context descriptor */ - wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); + wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine)); - wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; wqi->fence_id = rq->global_seqno; + + /* Postincrement WQ tail for next time. */ + WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); } static void guc_reset_wq(struct i915_guc_client *client) @@ -522,106 +462,64 @@ static void guc_reset_wq(struct i915_guc_client *client) desc->head = 0; desc->tail = 0; - - client->wq_tail = 0; } -static int guc_ring_doorbell(struct i915_guc_client *client) +static void guc_ring_doorbell(struct i915_guc_client *client) { - struct guc_process_desc *desc = __get_process_desc(client); - union guc_doorbell_qw db_cmp, db_exc, db_ret; - union guc_doorbell_qw *db; - int attempt = 2, ret = -EAGAIN; - - /* Update the tail so it is visible to GuC */ - desc->tail = client->wq_tail; - - /* current cookie */ - db_cmp.db_status = GUC_DOORBELL_ENABLED; - db_cmp.cookie = client->doorbell_cookie; + struct guc_doorbell_info *db; + u32 cookie; - /* cookie to be updated */ - db_exc.db_status = GUC_DOORBELL_ENABLED; - db_exc.cookie = client->doorbell_cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; + lockdep_assert_held(&client->wq_lock); /* pointer of current doorbell cacheline */ - db = (union guc_doorbell_qw *)__get_doorbell(client); - - while (attempt--) { - /* lets ring the doorbell */ - db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, - db_cmp.value_qw, db_exc.value_qw); - - /* if the exchange was successfully executed */ - if (db_ret.value_qw == db_cmp.value_qw) { - /* db was successfully rung */ - client->doorbell_cookie = db_exc.cookie; - ret = 0; - break; - } - - /* XXX: doorbell was lost and need to acquire it again */ - if (db_ret.db_status == GUC_DOORBELL_DISABLED) - break; + db = __get_doorbell(client); - DRM_WARN("Cookie mismatch. Expected %d, found %d\n", - db_cmp.cookie, db_ret.cookie); - - /* update the cookie to newly read cookie from GuC */ - db_cmp.cookie = db_ret.cookie; - db_exc.cookie = db_ret.cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - } + /* we're not expecting the doorbell cookie to change behind our back */ + cookie = READ_ONCE(db->cookie); + WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie); - return ret; + /* XXX: doorbell was lost and need to acquire it again */ + GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); } /** - * __i915_guc_submit() - Submit commands through GuC - * @rq: request associated with the commands - * - * The caller must have already called i915_guc_wq_reserve() above with - * a result of 0 (success), guaranteeing that there is space in the work - * queue for the new request, so enqueuing the item cannot fail. - * - * Bad Things Will Happen if the caller violates this protocol e.g. calls - * submit() when _reserve() says there's no space, or calls _submit() - * a different number of times from (successful) calls to _reserve(). + * i915_guc_submit() - Submit commands through GuC + * @engine: engine associated with the commands * * The only error here arises if the doorbell hardware isn't functioning * as expected, which really shouln't happen. */ -static void __i915_guc_submit(struct drm_i915_gem_request *rq) +static void i915_guc_submit(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = rq->i915; - struct intel_engine_cs *engine = rq->engine; - unsigned int engine_id = engine->id; - struct intel_guc *guc = &rq->i915->guc; + struct drm_i915_private *dev_priv = engine->i915; + struct intel_guc *guc = &dev_priv->guc; struct i915_guc_client *client = guc->execbuf_client; - unsigned long flags; - int b_ret; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const unsigned int engine_id = engine->id; + unsigned int n; - /* WA to flush out the pending GMADR writes to ring buffer. */ - if (i915_vma_is_map_and_fenceable(rq->ring->vma)) - POSTING_READ_FW(GUC_STATUS); + for (n = 0; n < ARRAY_SIZE(execlists->port); n++) { + struct drm_i915_gem_request *rq; + unsigned int count; - spin_lock_irqsave(&client->wq_lock, flags); + rq = port_unpack(&port[n], &count); + if (rq && count == 0) { + port_set(&port[n], port_pack(rq, ++count)); - guc_wq_item_append(client, rq); - b_ret = guc_ring_doorbell(client); + if (i915_vma_is_map_and_fenceable(rq->ring->vma)) + POSTING_READ_FW(GUC_STATUS); - client->submissions[engine_id] += 1; + spin_lock(&client->wq_lock); - spin_unlock_irqrestore(&client->wq_lock, flags); -} + guc_wq_item_append(client, rq); + guc_ring_doorbell(client); -static void i915_guc_submit(struct drm_i915_gem_request *rq) -{ - __i915_gem_request_submit(rq); - __i915_guc_submit(rq); + client->submissions[engine_id] += 1; + + spin_unlock(&client->wq_lock); + } + } } static void nested_enable_signaling(struct drm_i915_gem_request *rq) @@ -655,27 +553,33 @@ static void port_assign(struct execlist_port *port, if (port_isset(port)) i915_gem_request_put(port_request(port)); - port_set(port, i915_gem_request_get(rq)); + port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); nested_enable_signaling(rq); } -static bool i915_guc_dequeue(struct intel_engine_cs *engine) +static void i915_guc_dequeue(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; - struct drm_i915_gem_request *last = port_request(port); - struct rb_node *rb; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + struct drm_i915_gem_request *last = NULL; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; bool submit = false; + struct rb_node *rb; + + if (port_isset(port)) + port++; spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { if (last && rq->ctx != last->ctx) { - if (port != engine->execlist_port) { + if (port == last_port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -689,50 +593,51 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) INIT_LIST_HEAD(&rq->priotree.link); rq->priotree.priority = INT_MAX; - i915_guc_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, engine)); + __i915_gem_request_submit(rq); + trace_i915_gem_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: - engine->execlist_first = rb; - if (submit) + execlists->first = rb; + if (submit) { port_assign(port, last); + execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); + i915_guc_submit(engine); + } spin_unlock_irq(&engine->timeline->lock); - - return submit; } static void i915_guc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; struct drm_i915_gem_request *rq; - bool submit; - do { - rq = port_request(&port[0]); - while (rq && i915_gem_request_completed(rq)) { - trace_i915_gem_request_out(rq); - i915_gem_request_put(rq); + rq = port_request(&port[0]); + while (rq && i915_gem_request_completed(rq)) { + trace_i915_gem_request_out(rq); + i915_gem_request_put(rq); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); + execlists_port_complete(execlists, port); - rq = port_request(&port[0]); - } + rq = port_request(&port[0]); + } + if (!rq) + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); - submit = false; - if (!port_count(&port[1])) - submit = i915_guc_dequeue(engine); - } while (submit); + if (!port_isset(last_port)) + i915_guc_dequeue(engine); } /* @@ -913,8 +818,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, client->engines = engines; client->priority = priority; client->doorbell_id = GUC_DOORBELL_INVALID; - client->wq_offset = GUC_DB_SIZE; - client->wq_size = GUC_WQ_SIZE; spin_lock_init(&client->wq_lock); ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, @@ -996,28 +899,39 @@ static void guc_client_free(struct i915_guc_client *client) kfree(client); } +static void guc_policy_init(struct guc_policy *policy) +{ + policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US; + policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US; + policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US; + policy->policy_flags = 0; +} + static void guc_policies_init(struct guc_policies *policies) { struct guc_policy *policy; u32 p, i; - policies->dpc_promote_time = 500000; + policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US; policies->max_num_work_items = POLICY_MAX_NUM_WI; for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { policy = &policies->policy[p][i]; - policy->execution_quantum = 1000000; - policy->preemption_time = 500000; - policy->fault_time = 250000; - policy->policy_flags = 0; + guc_policy_init(policy); } } policies->is_valid = 1; } +/* + * The first 80 dwords of the register state context, containing the + * execlists and ppgtt registers. + */ +#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) + static int guc_ads_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -1032,6 +946,8 @@ static int guc_ads_create(struct intel_guc *guc) } __packed *blob; struct intel_engine_cs *engine; enum intel_engine_id id; + const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE; + const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; GEM_BUG_ON(guc->ads_vma); @@ -1062,13 +978,20 @@ static int guc_ads_create(struct intel_guc *guc) * engines after a reset. Here we use the Render ring default * context, which must already exist and be pinned in the GGTT, * so its address won't change after we've told the GuC where - * to find it. + * to find it. Note that we have to skip our header (1 page), + * because our GuC shared data is there. */ blob->ads.golden_context_lrca = - dev_priv->engine[RCS]->status_page.ggtt_offset; + guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset; + /* + * The GuC expects us to exclude the portion of the context image that + * it skips from the size it is to read. It starts reading from after + * the execlist context (so skipping the first page [PPHWSP] and 80 + * dwords). Weird guc is weird. + */ for_each_engine(engine, dev_priv, id) - blob->ads.eng_state_size[engine->guc_id] = engine->context_size; + blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size; base = guc_ggtt_offset(vma); blob->ads.scheduler_policies = base + ptr_offset(blob, policies); @@ -1221,6 +1144,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) enum intel_engine_id id; int err; + /* + * We're using GuC work items for submitting work through GuC. Since + * we're coalescing multiple requests from a single context into a + * single work item prior to assigning it to execlist_port, we can + * never have more work items than the total number of ports (for all + * engines). The GuC firmware is controlling the HEAD of work queue, + * and it is guaranteed that it will remove the work item from the + * queue before our request is completed. + */ + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) * + sizeof(struct guc_wq_item) * + I915_NUM_ENGINES > GUC_WQ_SIZE); + if (!client) { client = guc_client_alloc(dev_priv, INTEL_INFO(dev_priv)->ring_mask, @@ -1248,24 +1184,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) guc_interrupts_capture(dev_priv); for_each_engine(engine, dev_priv, id) { - const int wqi_size = sizeof(struct guc_wq_item); - struct drm_i915_gem_request *rq; - + struct intel_engine_execlists * const execlists = &engine->execlists; /* The tasklet was initialised by execlists, and may be in * a state of flux (across a reset) and so we just want to * take over the callback without changing any other state * in the tasklet. */ - engine->irq_tasklet.func = i915_guc_irq_handler; + execlists->irq_tasklet.func = i915_guc_irq_handler; clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - - /* Replay the current set of previously submitted requests */ - spin_lock_irq(&engine->timeline->lock); - list_for_each_entry(rq, &engine->timeline->requests, link) { - guc_client_update_wq_rsvd(client, wqi_size); - __i915_guc_submit(rq); - } - spin_unlock_irq(&engine->timeline->lock); + tasklet_schedule(&execlists->irq_tasklet); } return 0; @@ -1310,7 +1237,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = guc_ggtt_offset(ctx->engine[RCS].state); + data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } @@ -1328,7 +1255,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return 0; - if (i915.guc_log_level >= 0) + if (i915_modparams.guc_log_level >= 0) gen9_enable_guc_interrupts(dev_priv); ctx = dev_priv->kernel_context; @@ -1336,7 +1263,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = guc_ggtt_offset(ctx->engine[RCS].state); + data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b63893eeca73..c0cbbefa2f17 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -37,6 +37,10 @@ #include "i915_trace.h" #include "intel_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: interrupt handling * @@ -485,11 +489,12 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, + unsigned int crtc_index, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { uint32_t new_val; + enum pipe pipe; lockdep_assert_held(&dev_priv->irq_lock); @@ -498,6 +503,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; + if(get_pipe_from_crtc_index(&dev_priv->drm, crtc_index, &pipe)) + return; + new_val = dev_priv->de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); @@ -765,9 +773,14 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; } -static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +static u32 g4x_get_vblank_counter(struct drm_device *dev, + unsigned int crtc_index) { struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe; + + if(get_pipe_from_crtc_index(dev, crtc_index, &pipe)) + return 0; return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -829,18 +842,21 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) +static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc_index, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + struct intel_crtc *intel_crtc; + enum pipe pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; + intel_crtc = get_intel_crtc_from_index(dev, crtc_index); + pipe = intel_crtc->pipe; + if (WARN_ON(!mode->crtc_clock)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); @@ -1005,6 +1021,8 @@ static void notify_ring(struct intel_engine_cs *engine) spin_lock(&engine->breadcrumbs.irq_lock); wait = engine->breadcrumbs.irq_wait; if (wait) { + bool wakeup = engine->irq_seqno_barrier; + /* We use a callback from the dma-fence to submit * requests after waiting on our own requests. To * ensure minimum delay in queuing the next request to @@ -1017,12 +1035,18 @@ static void notify_ring(struct intel_engine_cs *engine) * and many waiters. */ if (i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &wait->request->fence.flags)) - rq = i915_gem_request_get(wait->request); + wait->seqno)) { + struct drm_i915_gem_request *waiter = wait->request; + + wakeup = true; + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &waiter->fence.flags) && + intel_wait_check_request(wait, waiter)) + rq = i915_gem_request_get(waiter); + } - wake_up_process(wait->tsk); + if (wakeup) + wake_up_process(wait->tsk); } else { __intel_engine_disarm_breadcrumbs(engine); } @@ -1108,7 +1132,7 @@ static void gen6_pm_rps_work(struct work_struct *work) if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) goto out; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); @@ -1162,7 +1186,7 @@ static void gen6_pm_rps_work(struct work_struct *work) dev_priv->rps.last_adj = 0; } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); out: /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ @@ -1305,55 +1329,49 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) { + struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { - if (port_count(&engine->execlist_port[0])) { - __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - tasklet = true; - } + if (READ_ONCE(engine->execlists.active)) + tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, + &engine->irq_posted); } if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { notify_ring(engine); - tasklet |= i915.enable_guc_submission; + tasklet |= i915_modparams.enable_guc_submission; + } + + if ((iir & (GT_RENDER_CS_MASTER_ERROR_INTERRUPT << test_shift)) && + intel_vgpu_active(engine->i915)) { + queue_work(system_highpri_wq, &engine->reset_work); + return; } if (tasklet) - tasklet_hi_schedule(&engine->irq_tasklet); + tasklet_hi_schedule(&execlists->irq_tasklet); } -static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, - u32 master_ctl, - u32 gt_iir[4]) +static void gen8_gt_irq_ack(struct drm_i915_private *dev_priv, + u32 master_ctl, u32 gt_iir[4]) { - irqreturn_t ret = IRQ_NONE; - if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); - if (gt_iir[0]) { + if (gt_iir[0]) I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); - ret = IRQ_HANDLED; - } else - DRM_ERROR("The master control interrupt lied (GT0)!\n"); } if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); - if (gt_iir[1]) { + if (gt_iir[1]) I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); - ret = IRQ_HANDLED; - } else - DRM_ERROR("The master control interrupt lied (GT1)!\n"); } if (master_ctl & GEN8_GT_VECS_IRQ) { gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); - if (gt_iir[3]) { + if (gt_iir[3]) I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); - ret = IRQ_HANDLED; - } else - DRM_ERROR("The master control interrupt lied (GT3)!\n"); } if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { @@ -1363,12 +1381,8 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, I915_WRITE_FW(GEN8_GT_IIR(2), gt_iir[2] & (dev_priv->pm_rps_events | dev_priv->pm_guc_events)); - ret = IRQ_HANDLED; - } else - DRM_ERROR("The master control interrupt lied (PM)!\n"); + } } - - return ret; } static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, @@ -1534,7 +1548,12 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, struct drm_driver *driver = dev_priv->drm.driver; uint32_t crcs[5]; int head, tail; + u32 frame; + if (!crtc) { + DRM_DEBUG_KMS("No CRTC available for pipe %d\n", pipe); + return; + } spin_lock(&pipe_crc->lock); if (pipe_crc->source) { if (!pipe_crc->entries) { @@ -1554,7 +1573,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, entry = &pipe_crc->entries[head]; - entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); + entry->frame = driver->get_vblank_counter(&dev_priv->drm, + drm_crtc_index(&crtc->base)); entry->crc[0] = crc0; entry->crc[1] = crc1; entry->crc[2] = crc2; @@ -1588,9 +1608,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, crcs[2] = crc2; crcs[3] = crc3; crcs[4] = crc4; - drm_crtc_add_crc_entry(&crtc->base, true, - drm_crtc_accurate_vblank_count(&crtc->base), - crcs); + frame = driver->get_vblank_counter(&dev_priv->drm, + drm_crtc_index(&crtc->base)); + drm_crtc_add_crc_entry(&crtc->base, true, frame, crcs); } } #else @@ -1706,6 +1726,17 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) } } + +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +static inline void gvt_notify_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + if (dev_priv->gvt) + queue_work(system_highpri_wq, + &dev_priv->gvt->pipe_info[pipe].vblank_work); +} +#endif + static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { @@ -1786,10 +1817,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); + u32 hotplug_status = 0, hotplug_status_mask; + int i; - if (hotplug_status) + if (IS_G4X(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; + else + hotplug_status_mask = HOTPLUG_INT_STATUS_I915; + + /* + * We absolutely have to clear all the pending interrupt + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port + * interrupt bit won't have an edge, and the i965/g4x + * edge triggered IIR will not notice that an interrupt + * is still pending. We can't use PORT_HOTPLUG_EN to + * guarantee the edge as the act of toggling the enable + * bits can itself generate a new hotplug interrupt :( + */ + for (i = 0; i < 10; i++) { + u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; + + if (tmp == 0) + return hotplug_status; + + hotplug_status |= tmp; I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + } + + WARN_ONCE(1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + I915_READ(PORT_HOTPLUG_STAT)); return hotplug_status; } @@ -2387,6 +2446,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) irqreturn_t ret = IRQ_NONE; u32 iir; enum pipe pipe; + struct intel_crtc *crtc; if (master_ctl & GEN8_DE_MISC_IRQ) { iir = I915_READ(GEN8_DE_MISC_IIR); @@ -2465,8 +2525,13 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (iir & GEN8_PIPE_VBLANK) - drm_handle_vblank(&dev_priv->drm, pipe); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (iir & GEN8_PIPE_VBLANK) { + drm_handle_vblank(&dev_priv->drm, drm_crtc_index(&crtc->base)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + gvt_notify_vblank(dev_priv, pipe); +#endif + } if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -2521,7 +2586,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) struct drm_i915_private *dev_priv = to_i915(dev); u32 master_ctl; u32 gt_iir[4] = {}; - irqreturn_t ret; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -2537,16 +2601,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) disable_rpm_wakeref_asserts(dev_priv); /* Find, clear, then process each source of interrupt */ - ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); + gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); gen8_gt_irq_handler(dev_priv, gt_iir); - ret |= gen8_de_irq_handler(dev_priv, master_ctl); + gen8_de_irq_handler(dev_priv, master_ctl); I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ_FW(GEN8_MASTER_IRQ); enable_rpm_wakeref_asserts(dev_priv); - return ret; + return IRQ_HANDLED; } struct wedge_me { @@ -2589,15 +2653,10 @@ static void __fini_wedge(struct wedge_me *w) (W)->i915; \ __fini_wedge((W))) -/** - * i915_reset_device - do process context error handling work - * @dev_priv: i915 device private - * - * Fire an error uevent so userspace can see that a hang or error - * was detected. - */ -static void i915_reset_device(struct drm_i915_private *dev_priv) +static void i915_reset_device(struct drm_i915_private *dev_priv, + const char *msg) { + struct i915_gpu_error *error = &dev_priv->gpu_error; struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; @@ -2613,29 +2672,32 @@ static void i915_reset_device(struct drm_i915_private *dev_priv) i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { intel_prepare_reset(dev_priv); + error->reason = msg; + /* Signal that locked waiters should reset the GPU */ - set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.wait_queue); + set_bit(I915_RESET_HANDOFF, &error->flags); + wake_up_all(&error->wait_queue); /* Wait for anyone holding the lock to wakeup, without * blocking indefinitely on struct_mutex. */ do { if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - i915_reset(dev_priv, 0); + i915_reset(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); } - } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, + } while (wait_on_bit_timeout(&error->flags, I915_RESET_HANDOFF, TASK_UNINTERRUPTIBLE, 1)); + error->reason = NULL; + intel_finish_reset(dev_priv); } - if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) - kobject_uevent_env(kobj, - KOBJ_CHANGE, reset_done_event); + if (!test_bit(I915_WEDGED, &error->flags)) + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } static void i915_clear_error_registers(struct drm_i915_private *dev_priv) @@ -2667,6 +2729,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) * i915_handle_error - handle a gpu error * @dev_priv: i915 device private * @engine_mask: mask representing engines that are hung + * @flags: control flags * @fmt: Error message format string * * Do some basic checking of register state at error time and @@ -2677,16 +2740,23 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) */ void i915_handle_error(struct drm_i915_private *dev_priv, u32 engine_mask, + unsigned long flags, const char *fmt, ...) { struct intel_engine_cs *engine; unsigned int tmp; - va_list args; char error_msg[80]; + char *msg = NULL; + + if (fmt) { + va_list args; - va_start(args, fmt); - vscnprintf(error_msg, sizeof(error_msg), fmt, args); - va_end(args); + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); + + msg = error_msg; + } /* * In most cases it's guaranteed that we get here with an RPM @@ -2697,8 +2767,10 @@ void i915_handle_error(struct drm_i915_private *dev_priv, */ intel_runtime_pm_get(dev_priv); - i915_capture_error_state(dev_priv, engine_mask, error_msg); - i915_clear_error_registers(dev_priv); + if (flags & I915_ERROR_CAPTURE) { + i915_capture_error_state(dev_priv, engine_mask, error_msg); + i915_clear_error_registers(dev_priv); + } /* * Try engine reset when available. We fall back to full reset if @@ -2711,7 +2783,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, &dev_priv->gpu_error.flags)) continue; - if (i915_reset_engine(engine, 0) == 0) + if (i915_reset_engine(engine, msg) == 0) engine_mask &= ~intel_engine_flag(engine); clear_bit(I915_RESET_ENGINE + engine->id, @@ -2741,7 +2813,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, TASK_UNINTERRUPTIBLE); } - i915_reset_device(dev_priv); + i915_reset_device(dev_priv, msg); for_each_engine(engine, dev_priv, tmp) { clear_bit(I915_RESET_ENGINE + engine->id, @@ -2851,7 +2923,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + /*since guest will see all the pipes, we don't want it disable vblank*/ + if (!dev_priv->gvt) + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -3407,6 +3481,19 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_L3_DPF(dev_priv)) gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + if (intel_vgpu_active(dev_priv)) { + gt_interrupts[0] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_RCS_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_BCS_IRQ_SHIFT; + gt_interrupts[1] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS2_IRQ_SHIFT; + gt_interrupts[3] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VECS_IRQ_SHIFT; + } + dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); @@ -3429,8 +3516,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) enum pipe pipe; if (INTEL_GEN(dev_priv) >= 9) { - de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | - GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; if (IS_GEN9_LP(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8ab003dca113..2faae2d21aed 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -24,8 +24,16 @@ #include "i915_params.h" #include "i915_drv.h" +#include "i915_pvinfo.h" -struct i915_params i915 __read_mostly = { +#define i915_param_named(name, T, perm, desc) \ + module_param_named(name, i915_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) +#define i915_param_named_unsafe(name, T, perm, desc) \ + module_param_named_unsafe(name, i915_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) + +struct i915_params i915_modparams __read_mostly = { .modeset = -1, .panel_ignore_lid = 1, .semaphores = -1, @@ -56,8 +64,8 @@ struct i915_params i915 __read_mostly = { .verbose_state_checks = 1, .nuclear_pageflip = 0, .edp_vswing = 0, - .enable_guc_loading = 0, - .enable_guc_submission = 0, + .enable_guc_loading = 1, + .enable_guc_submission = 1, .guc_log_level = -1, .guc_firmware_path = NULL, .huc_firmware_path = NULL, @@ -65,195 +73,284 @@ struct i915_params i915 __read_mostly = { .inject_load_failure = 0, .enable_dpcd_backlight = false, .enable_gvt = false, + .enable_pvmmio = PVMMIO_ELSP_SUBMIT | PVMMIO_PLANE_UPDATE | + PVMMIO_PLANE_WM_UPDATE | PVMMIO_PPGTT_UPDATE, + .enable_gvt_oos = 1, + .enable_conformance_check = true, + .disable_gvt_fw_loading = true, + .gvt_workload_priority = 0, + .enable_initial_modeset = false, + .splash = NULL, + .avail_planes_per_pipe = 0, + .domain_plane_owners = 0, + .bg_color = 0x00000000, + .gvt_emulate_hdmi = true, + .domain_scaler_owner = 0x21100, + .memtrack_debug = 1, }; -module_param_named(modeset, i915.modeset, int, 0400); -MODULE_PARM_DESC(modeset, +i915_param_named(modeset, int, 0400, "Use kernel modesetting [KMS] (0=disable, " "1=on, -1=force vga console preference [default])"); -module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); -MODULE_PARM_DESC(panel_ignore_lid, +i915_param_named_unsafe(panel_ignore_lid, int, 0600, "Override lid status (0=autodetect, 1=autodetect disabled [default], " "-1=force lid closed, -2=force lid open)"); -module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); -MODULE_PARM_DESC(semaphores, +i915_param_named_unsafe(semaphores, int, 0400, "Use semaphores for inter-ring sync " "(default: -1 (use per-chip defaults))"); -module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400); -MODULE_PARM_DESC(enable_rc6, +i915_param_named_unsafe(enable_rc6, int, 0400, "Enable power-saving render C-state 6. " "Different stages can be selected via bitmask values " "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); -module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400); -MODULE_PARM_DESC(enable_dc, +i915_param_named_unsafe(enable_dc, int, 0400, "Enable power-saving display C-states. " "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); -module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); -MODULE_PARM_DESC(enable_fbc, +i915_param_named_unsafe(enable_fbc, int, 0600, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); -module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400); -MODULE_PARM_DESC(lvds_channel_mode, +i915_param_named_unsafe(lvds_channel_mode, int, 0400, "Specify LVDS channel mode " "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); -module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600); -MODULE_PARM_DESC(lvds_use_ssc, +i915_param_named_unsafe(panel_use_ssc, int, 0600, "Use Spread Spectrum Clock with panels [LVDS/eDP] " "(default: auto from VBT)"); -module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400); -MODULE_PARM_DESC(vbt_sdvo_panel_type, +i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, "Override/Ignore selection of SDVO panel mode in the VBT " "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); -module_param_named_unsafe(reset, i915.reset, int, 0600); -MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); +i915_param_named_unsafe(reset, int, 0600, + "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); -module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400); -MODULE_PARM_DESC(vbt_firmware, - "Load VBT from specified file under /lib/firmware"); +i915_param_named_unsafe(vbt_firmware, charp, 0400, + "Load VBT from specified file under /lib/firmware"); #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -module_param_named(error_capture, i915.error_capture, bool, 0600); -MODULE_PARM_DESC(error_capture, +i915_param_named(error_capture, bool, 0600, "Record the GPU state following a hang. " "This information in /sys/class/drm/card/error is vital for " "triaging and debugging hangs."); #endif -module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644); -MODULE_PARM_DESC(enable_hangcheck, +i915_param_named_unsafe(enable_hangcheck, bool, 0644, "Periodically check GPU activity for detecting hangs. " "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); -MODULE_PARM_DESC(enable_ppgtt, +i915_param_named_unsafe(enable_ppgtt, int, 0400, "Override PPGTT usage. " "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); -module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); -MODULE_PARM_DESC(enable_execlists, +i915_param_named_unsafe(enable_execlists, int, 0400, "Override execlists usage. " "(-1=auto [default], 0=disabled, 1=enabled)"); -module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR " - "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " - "Default: -1 (use per-chip default)"); +i915_param_named_unsafe(enable_psr, int, 0600, + "Enable PSR " + "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " + "Default: -1 (use per-chip default)"); -module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400); -MODULE_PARM_DESC(alpha_support, +i915_param_named_unsafe(alpha_support, bool, 0400, "Enable alpha quality driver support for latest hardware. " "See also CONFIG_DRM_I915_ALPHA_SUPPORT."); -module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400); -MODULE_PARM_DESC(disable_power_well, +i915_param_named_unsafe(disable_power_well, int, 0400, "Disable display power wells when possible " "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); -module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); -MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); +i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)"); -module_param_named(fastboot, i915.fastboot, bool, 0600); -MODULE_PARM_DESC(fastboot, +i915_param_named(fastboot, bool, 0600, "Try to skip unnecessary mode sets at boot time (default: false)"); -module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); -MODULE_PARM_DESC(prefault_disable, +i915_param_named_unsafe(prefault_disable, bool, 0600, "Disable page prefaulting for pread/pwrite/reloc (default:false). " "For developers only."); -module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600); -MODULE_PARM_DESC(load_detect_test, +i915_param_named_unsafe(load_detect_test, bool, 0600, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); -module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600); -MODULE_PARM_DESC(force_reset_modeset_test, +i915_param_named_unsafe(force_reset_modeset_test, bool, 0600, "Force a modeset during gpu reset for testing (default:false). " "For developers only."); -module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600); -MODULE_PARM_DESC(invert_brightness, +i915_param_named_unsafe(invert_brightness, int, 0600, "Invert backlight brightness " "(-1 force normal, 0 machine defaults, 1 force inversion), please " "report PCI device ID, subsystem vendor and subsystem device ID " "to dri-devel@lists.freedesktop.org, if your machine needs it. " "It will then be included in an upcoming module version."); -module_param_named(disable_display, i915.disable_display, bool, 0400); -MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); +i915_param_named(disable_display, bool, 0400, + "Disable display (default: false)"); -module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); -MODULE_PARM_DESC(enable_cmd_parser, - "Enable command parsing (true=enabled [default], false=disabled)"); +i915_param_named_unsafe(enable_cmd_parser, bool, 0400, + "Enable command parsing (true=enabled [default], false=disabled)"); -module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); -MODULE_PARM_DESC(use_mmio_flip, - "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); +i915_param_named_unsafe(use_mmio_flip, int, 0600, + "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); -module_param_named(mmio_debug, i915.mmio_debug, int, 0600); -MODULE_PARM_DESC(mmio_debug, +i915_param_named(mmio_debug, int, 0600, "Enable the MMIO debug code for the first N failures (default: off). " "This may negatively affect performance."); -module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); -MODULE_PARM_DESC(verbose_state_checks, +i915_param_named(verbose_state_checks, bool, 0600, "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); -module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400); -MODULE_PARM_DESC(nuclear_pageflip, - "Force enable atomic functionality on platforms that don't have full support yet."); +i915_param_named_unsafe(nuclear_pageflip, bool, 0400, + "Force enable atomic functionality on platforms that don't have full support yet."); /* WA to get away with the default setting in VBT for early platforms.Will be removed */ -module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); -MODULE_PARM_DESC(edp_vswing, - "Ignore/Override vswing pre-emph table selection from VBT " - "(0=use value from vbt [default], 1=low power swing(200mV)," - "2=default swing(400mV))"); - -module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); -MODULE_PARM_DESC(enable_guc_loading, - "Enable GuC firmware loading " - "(-1=auto, 0=never [default], 1=if available, 2=required)"); - -module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); -MODULE_PARM_DESC(enable_guc_submission, - "Enable GuC submission " - "(-1=auto, 0=never [default], 1=if available, 2=required)"); - -module_param_named(guc_log_level, i915.guc_log_level, int, 0400); -MODULE_PARM_DESC(guc_log_level, +i915_param_named_unsafe(edp_vswing, int, 0400, + "Ignore/Override vswing pre-emph table selection from VBT " + "(0=use value from vbt [default], 1=low power swing(200mV)," + "2=default swing(400mV))"); + +i915_param_named_unsafe(enable_guc_loading, int, 0400, + "Enable GuC firmware loading " + "(-1=auto, 0=never, 1=if available [default], 2=required)"); + +i915_param_named_unsafe(enable_guc_submission, int, 0400, + "Enable GuC submission " + "(-1=auto, 0=never, 1=if available [default], 2=required)"); + +i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); -module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400); -MODULE_PARM_DESC(guc_firmware_path, +i915_param_named_unsafe(guc_firmware_path, charp, 0400, "GuC firmware path to use instead of the default one"); -module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400); -MODULE_PARM_DESC(huc_firmware_path, +i915_param_named_unsafe(huc_firmware_path, charp, 0400, "HuC firmware path to use instead of the default one"); -module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600); -MODULE_PARM_DESC(enable_dp_mst, +i915_param_named_unsafe(enable_dp_mst, bool, 0600, "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); -module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); -MODULE_PARM_DESC(inject_load_failure, + +i915_param_named_unsafe(inject_load_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); -module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); -MODULE_PARM_DESC(enable_dpcd_backlight, + +i915_param_named(enable_dpcd_backlight, bool, 0600, "Enable support for DPCD backlight control (default:false)"); -module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); -MODULE_PARM_DESC(enable_gvt, +i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); + +module_param_named(enable_pvmmio, i915_modparams.enable_pvmmio, uint, 0400); +MODULE_PARM_DESC(enable_pvmmio, + "Enable pv mmio feature and set pvmmio level, default 1. This parameter " + "could only set from host, guest value is set through vgt_if"); + +module_param_named(enable_gvt_oos, i915_modparams.enable_gvt_oos, bool, 0400); +MODULE_PARM_DESC(enable_gvt_oos, "To toggle the gvt ppgtt page table OOS (Out of Sync) feature."); + +module_param_named(enable_conformance_check, i915_modparams.enable_conformance_check, bool, 0400); +MODULE_PARM_DESC(enable_conformance_check, "To toggle the GVT guest conformance feature."); + +module_param_named(disable_gvt_fw_loading, i915_modparams.disable_gvt_fw_loading, bool, 0400); +MODULE_PARM_DESC(disable_gvt_fw_loading, "Disable GVT-g fw loading."); + +module_param_named(gvt_workload_priority, i915_modparams.gvt_workload_priority, int, 0600); +MODULE_PARM_DESC(gvt_workload_priority, + "Set GVT-g workload priority, (range: (-1023, 1023), default: 0, " + "more positive value means higher priority)."); +module_param_named_unsafe(enable_initial_modeset, i915_modparams.enable_initial_modeset, bool, 0400); +MODULE_PARM_DESC(enable_initial_modeset, + "Do initial modeset (default : false)"); + +i915_param_named_unsafe(splash, charp, 0400, + "Load a splash screen binary image for a specific display splash=::w,h,pitch,crtc_x,crtc_y,crtc_w,crtc_h"); + +module_param_named_unsafe(avail_planes_per_pipe, i915_modparams.avail_planes_per_pipe, uint, 0400); +/* pipeA = BITS 0-3, pipeB = BITS 8-11, pipeC = BITS 16-18 + * +----------+-------+---------+--------+--------+--------+--------+ + * |unused |unused | Pipe C | unused | Pipe B | unused | Pipe A | + * +----------+-------+---------+--------+--------+--------+--------+ + * 31 23 18 15 11 7 3 0 + * + * + * BITS 0,1,2,3 - needs to be set planes assigned for pipes A and B + * and BITs 0,1,2 - for pipe C + * eg: avail_planes_per_pipe = 0x3 - pipe A=2(planes 1 and 2) , pipeB=0 and pipeC=0 planes + * eg: avail_planes_per_pipe = 0x5 - pipe A=2(planes 1 and 3) , pipeB=0 and pipeC=0 planes + * avail_planes_per_pipe = 0x030701 - pipe A =1(plane 1, pipeB=3(planes 1,2 and 3), pipeC=2( planes 1 and 2) + * + */ +MODULE_PARM_DESC(avail_planes_per_pipe, "plane mask for each pipe: \ + set BITS 0-3:pipeA 8-11:pipeB 16-18:pipeC to specify the planes that \ + are available eg: 0x030701 : planes 1:pipeA 1,2,3:pipeB \ + 1,2:pipeC (0x0 - default value)"); +module_param_named_unsafe(domain_plane_owners, i915_modparams.domain_plane_owners, ullong, 0400); +/* pipeA = BITS 0-15 pipeB = 16-31, pipeC = 32-47 + * + * +----------+------------+-------------+------------+ + * |unused | Pipe C | Pipe B | Pipe A | + * +----------+------------+-------------+------------+ + * 63 47 31 15 0 + * + * Each nibble represents domain id. 0 for Dom0, 1,2,3...0xF for DomUs + * eg: domain_plane_owners = 0x022111000010 // 0x0221|1100|0010 + * plane domain + * plane_owner1A -0 + * plane_owner2A -1 + * plane_owner3A -0 + * plane_owner4A -0 + * plane_owner1B -0 + * plane_owner2B -0 + * plane_owner3B -1 + * plane_owner4B -1 + * plane_owner1C -1 + * plane_owner2C -2 + * plane_owner3C -2 + * + * + */ +MODULE_PARM_DESC(domain_plane_owners, "plane owners for each domain and for each pipe \ + ids can be from 0-F, eg: domain_plane_owners = 0x022111000010 \ + planes owner: 3C:2 2C:2 1C:1 4B:1 3B:1 2B:1 1B:0 4A:0 3A:0 2A:1 1A:0 \ + (0x0 - default value)"); + +module_param_named_unsafe(bg_color, i915_modparams.bg_color, uint, 0400); +MODULE_PARM_DESC(bg_color, "Set the background (canvas) color"); + +module_param_named(gvt_emulate_hdmi, i915_modparams.gvt_emulate_hdmi, bool, 0400); +MODULE_PARM_DESC(gvt_emulate_hdmi, "GVT-g emulate HDMI or DP port for Guest OS."); + +module_param_named_unsafe(domain_scaler_owner, + i915_modparams.domain_scaler_owner, int, 0400); +/* pipeA Scaler = BITS 0-7 pipeB scaler = 8-15, pipeC = 16-19 + * + * +----------+------------+-------------+------------+ + * |unused | Pipe C | Pipe B | Pipe A | + * +----------+------------+-------------+------------+ + * 31 20 19 16 15 8 7 0 + * + * Each nibble represents domain id. 0 for Dom0, 1,2,3...0xF for DomUs + * eg: domain_plane_owners = 0x00030210 // 0x000|3|02|10 + * plane domain + * scaler_owner1A -0 + * scaler_owner2A -1 + * scaler_owner3A -2 + * scaler_owner4A -0 + * scaler_owner1B -3 + * scaler_owner2B -0 + * + */ +MODULE_PARM_DESC(domain_scaler_owner, "scaler owners for each domain and for\n" + "each pipe ids can be from 0-F, eg domain_scaler_owners = 0x00030210\n" + "scaler owner: 1C:3 2B:0 1B:2 2A:1 1A:0 (0x0 - default value)\n"); + +i915_param_named(fpreempt_timeout, uint, 0600, + "Wait time in msecs before forcing a preemption with reset (0:never force [default])"); +module_param_named(memtrack_debug, i915_modparams.memtrack_debug, int, 0600); +MODULE_PARM_DESC(memtrack_debug, + "use Memtrack debug capability (0=never, 1=always)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index ac844709c97e..68fe9a3b9378 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -54,6 +54,11 @@ func(int, edp_vswing); \ func(int, reset); \ func(unsigned int, inject_load_failure); \ + func(char *, splash); \ + func(unsigned int, avail_planes_per_pipe); \ + func(unsigned long long, domain_plane_owners); \ + func(unsigned int, bg_color); \ + func(unsigned int, fpreempt_timeout); \ /* leave bools at the end to not create holes */ \ func(bool, alpha_support); \ func(bool, enable_cmd_parser); \ @@ -68,7 +73,16 @@ func(bool, nuclear_pageflip); \ func(bool, enable_dp_mst); \ func(bool, enable_dpcd_backlight); \ - func(bool, enable_gvt) + func(bool, enable_gvt); \ + func(unsigned int, enable_pvmmio); \ + func(bool, enable_gvt_oos); \ + func(bool, enable_conformance_check); \ + func(bool, disable_gvt_fw_loading); \ + func(bool, enable_initial_modeset); \ + func(bool, gvt_emulate_hdmi); \ + func(int, domain_scaler_owner);\ + func(int, gvt_workload_priority);\ + func(int, memtrack_debug); #define MEMBER(T, member) T member struct i915_params { @@ -76,7 +90,7 @@ struct i915_params { }; #undef MEMBER -extern struct i915_params i915 __read_mostly; +extern struct i915_params i915_modparams __read_mostly; #endif diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 09d97e0990b7..af10ffa9580b 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -54,6 +54,8 @@ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } #define CHV_COLORS \ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } +#define GLK_COLORS \ + .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } /* Keep in gen based order, and chronological order within a gen */ #define GEN2_FEATURES \ @@ -66,19 +68,19 @@ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i830_info = { +static const struct intel_device_info intel_i830_info __initconst = { GEN2_FEATURES, .platform = INTEL_I830, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, /* legal, last one wins */ }; -static const struct intel_device_info intel_i845g_info = { +static const struct intel_device_info intel_i845g_info __initconst = { GEN2_FEATURES, .platform = INTEL_I845G, }; -static const struct intel_device_info intel_i85x_info = { +static const struct intel_device_info intel_i85x_info __initconst = { GEN2_FEATURES, .platform = INTEL_I85X, .is_mobile = 1, .num_pipes = 2, /* legal, last one wins */ @@ -86,7 +88,7 @@ static const struct intel_device_info intel_i85x_info = { .has_fbc = 1, }; -static const struct intel_device_info intel_i865g_info = { +static const struct intel_device_info intel_i865g_info __initconst = { GEN2_FEATURES, .platform = INTEL_I865G, }; @@ -98,7 +100,7 @@ static const struct intel_device_info intel_i865g_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i915g_info = { +static const struct intel_device_info intel_i915g_info __initconst = { GEN3_FEATURES, .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, @@ -106,7 +108,7 @@ static const struct intel_device_info intel_i915g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i915gm_info = { +static const struct intel_device_info intel_i915gm_info __initconst = { GEN3_FEATURES, .platform = INTEL_I915GM, .is_mobile = 1, @@ -118,7 +120,7 @@ static const struct intel_device_info intel_i915gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945g_info = { +static const struct intel_device_info intel_i945g_info __initconst = { GEN3_FEATURES, .platform = INTEL_I945G, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -127,7 +129,7 @@ static const struct intel_device_info intel_i945g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945gm_info = { +static const struct intel_device_info intel_i945gm_info __initconst = { GEN3_FEATURES, .platform = INTEL_I945GM, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -138,14 +140,14 @@ static const struct intel_device_info intel_i945gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_g33_info = { +static const struct intel_device_info intel_g33_info __initconst = { GEN3_FEATURES, .platform = INTEL_G33, .has_hotplug = 1, .has_overlay = 1, }; -static const struct intel_device_info intel_pineview_info = { +static const struct intel_device_info intel_pineview_info __initconst = { GEN3_FEATURES, .platform = INTEL_PINEVIEW, .is_mobile = 1, .has_hotplug = 1, @@ -160,14 +162,14 @@ static const struct intel_device_info intel_pineview_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i965g_info = { +static const struct intel_device_info intel_i965g_info __initconst = { GEN4_FEATURES, .platform = INTEL_I965G, .has_overlay = 1, .hws_needs_physical = 1, }; -static const struct intel_device_info intel_i965gm_info = { +static const struct intel_device_info intel_i965gm_info __initconst = { GEN4_FEATURES, .platform = INTEL_I965GM, .is_mobile = 1, .has_fbc = 1, @@ -176,14 +178,14 @@ static const struct intel_device_info intel_i965gm_info = { .hws_needs_physical = 1, }; -static const struct intel_device_info intel_g45_info = { +static const struct intel_device_info intel_g45_info __initconst = { GEN4_FEATURES, .platform = INTEL_G45, .has_pipe_cxsr = 1, .ring_mask = RENDER_RING | BSD_RING, }; -static const struct intel_device_info intel_gm45_info = { +static const struct intel_device_info intel_gm45_info __initconst = { GEN4_FEATURES, .platform = INTEL_GM45, .is_mobile = 1, .has_fbc = 1, @@ -200,12 +202,12 @@ static const struct intel_device_info intel_gm45_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_ironlake_d_info = { +static const struct intel_device_info intel_ironlake_d_info __initconst = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, }; -static const struct intel_device_info intel_ironlake_m_info = { +static const struct intel_device_info intel_ironlake_m_info __initconst = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, .is_mobile = 1, .has_fbc = 1, @@ -224,15 +226,34 @@ static const struct intel_device_info intel_ironlake_m_info = { GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_sandybridge_d_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, +#define SNB_D_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE + +static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = { + SNB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, - .is_mobile = 1, +static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = { + SNB_D_PLATFORM, + .gt = 2, +}; + +#define SNB_M_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE, \ + .is_mobile = 1 + + +static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = { + SNB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = { + SNB_M_PLATFORM, + .gt = 2, }; #define GEN7_FEATURES \ @@ -249,27 +270,46 @@ static const struct intel_device_info intel_sandybridge_m_info = { GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS -static const struct intel_device_info intel_ivybridge_d_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .has_l3_dpf = 1, +#define IVB_D_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = { + IVB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .is_mobile = 1, - .has_l3_dpf = 1, +static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = { + IVB_D_PLATFORM, + .gt = 2, +}; + +#define IVB_M_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .is_mobile = 1, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = { + IVB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = { + IVB_M_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_ivybridge_q_info = { +static const struct intel_device_info intel_ivybridge_q_info __initconst = { GEN7_FEATURES, .platform = INTEL_IVYBRIDGE, + .gt = 2, .num_pipes = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; -static const struct intel_device_info intel_valleyview_info = { +static const struct intel_device_info intel_valleyview_info __initconst = { .platform = INTEL_VALLEYVIEW, .gen = 7, .is_lp = 1, @@ -288,7 +328,7 @@ static const struct intel_device_info intel_valleyview_info = { CURSOR_OFFSETS }; -#define HSW_FEATURES \ +#define G75_FEATURES \ GEN7_FEATURES, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ .has_ddi = 1, \ @@ -299,14 +339,28 @@ static const struct intel_device_info intel_valleyview_info = { .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 -static const struct intel_device_info intel_haswell_info = { - HSW_FEATURES, - .platform = INTEL_HASWELL, - .has_l3_dpf = 1, +#define HSW_PLATFORM \ + G75_FEATURES, \ + .platform = INTEL_HASWELL, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_haswell_gt1_info __initconst = { + HSW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_haswell_gt2_info __initconst = { + HSW_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_haswell_gt3_info __initconst = { + HSW_PLATFORM, + .gt = 3, }; -#define BDW_FEATURES \ - HSW_FEATURES, \ +#define GEN8_FEATURES \ + G75_FEATURES, \ BDW_COLORS, \ .has_logical_ring_contexts = 1, \ .has_full_48bit_ppgtt = 1, \ @@ -314,20 +368,35 @@ static const struct intel_device_info intel_haswell_info = { .has_reset_engine = 1 #define BDW_PLATFORM \ - BDW_FEATURES, \ + GEN8_FEATURES, \ .gen = 8, \ .platform = INTEL_BROADWELL -static const struct intel_device_info intel_broadwell_info = { +static const struct intel_device_info intel_broadwell_gt1_info __initconst = { + BDW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_broadwell_gt2_info __initconst = { BDW_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_broadwell_gt3_info = { +static const struct intel_device_info intel_broadwell_rsvd_info __initconst = { BDW_PLATFORM, + .gt = 3, + /* According to the device ID those devices are GT3, they were + * previously treated as not GT3, keep it like that. + */ +}; + +static const struct intel_device_info intel_broadwell_gt3_info __initconst = { + BDW_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cherryview_info = { +static const struct intel_device_info intel_cherryview_info __initconst = { .gen = 8, .num_pipes = 3, .has_hotplug = 1, .is_lp = 1, @@ -350,21 +419,42 @@ static const struct intel_device_info intel_cherryview_info = { CHV_COLORS, }; -#define SKL_PLATFORM \ - BDW_FEATURES, \ - .gen = 9, \ - .platform = INTEL_SKYLAKE, \ +#define GEN9_FEATURES \ + GEN8_FEATURES, \ + .has_logical_ring_preemption = 1, \ .has_csr = 1, \ .has_guc = 1, \ + .has_ipc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_skylake_info = { +#define SKL_PLATFORM \ + GEN9_FEATURES, \ + .gen = 9, \ + .platform = INTEL_SKYLAKE + +static const struct intel_device_info intel_skylake_gt1_info __initconst = { SKL_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_skylake_gt3_info = { +static const struct intel_device_info intel_skylake_gt2_info __initconst = { SKL_PLATFORM, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .gt = 2, +}; + +#define SKL_GT3_PLUS_PLATFORM \ + SKL_PLATFORM, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING + + +static const struct intel_device_info intel_skylake_gt3_info __initconst = { + SKL_GT3_PLUS_PLATFORM, + .gt = 3, +}; + +static const struct intel_device_info intel_skylake_gt4_info __initconst = { + SKL_GT3_PLUS_PLATFORM, + .gt = 4, }; #define GEN9_LP_FEATURES \ @@ -385,72 +475,84 @@ static const struct intel_device_info intel_skylake_gt3_info = { .has_dp_mst = 1, \ .has_gmbus_irq = 1, \ .has_logical_ring_contexts = 1, \ + .has_logical_ring_preemption = 1, \ .has_guc = 1, \ .has_aliasing_ppgtt = 1, \ .has_full_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \ .has_reset_engine = 1, \ + .has_snoop = true, \ + .has_ipc = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS, \ BDW_COLORS -static const struct intel_device_info intel_broxton_info = { +static const struct intel_device_info intel_broxton_info __initconst = { GEN9_LP_FEATURES, .platform = INTEL_BROXTON, .ddb_size = 512, - .has_reset_engine = false, }; -static const struct intel_device_info intel_geminilake_info = { +static const struct intel_device_info intel_geminilake_info __initconst = { GEN9_LP_FEATURES, .platform = INTEL_GEMINILAKE, .ddb_size = 1024, - .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } + GLK_COLORS, }; #define KBL_PLATFORM \ - BDW_FEATURES, \ + GEN9_FEATURES, \ .gen = 9, \ - .platform = INTEL_KABYLAKE, \ - .has_csr = 1, \ - .has_guc = 1, \ - .ddb_size = 896 + .platform = INTEL_KABYLAKE + +static const struct intel_device_info intel_kabylake_gt1_info __initconst = { + KBL_PLATFORM, + .gt = 1, +}; -static const struct intel_device_info intel_kabylake_info = { +static const struct intel_device_info intel_kabylake_gt2_info __initconst = { KBL_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_kabylake_gt3_info = { +static const struct intel_device_info intel_kabylake_gt3_info __initconst = { KBL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; #define CFL_PLATFORM \ - .is_alpha_support = 1, \ - BDW_FEATURES, \ + GEN9_FEATURES, \ .gen = 9, \ - .platform = INTEL_COFFEELAKE, \ - .has_csr = 1, \ - .has_guc = 1, \ - .ddb_size = 896 + .platform = INTEL_COFFEELAKE + +static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { + CFL_PLATFORM, + .gt = 1, +}; -static const struct intel_device_info intel_coffeelake_info = { +static const struct intel_device_info intel_coffeelake_gt2_info __initconst = { CFL_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_coffeelake_gt3_info = { +static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { CFL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cannonlake_info = { - BDW_FEATURES, +#define GEN10_FEATURES \ + GEN9_FEATURES, \ + .ddb_size = 1024, \ + GLK_COLORS + +static const struct intel_device_info intel_cannonlake_gt2_info __initconst = { + GEN10_FEATURES, .is_alpha_support = 1, .platform = INTEL_CANNONLAKE, .gen = 10, - .ddb_size = 1024, - .has_csr = 1, - .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } + .gt = 2, }; /* @@ -476,31 +578,40 @@ static const struct pci_device_id pciidlist[] = { INTEL_PINEVIEW_IDS(&intel_pineview_info), INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), + INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info), + INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info), + INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info), INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), - INTEL_HSW_IDS(&intel_haswell_info), + INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info), + INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info), + INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info), + INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info), + INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info), + INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info), + INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info), INTEL_VLV_IDS(&intel_valleyview_info), - INTEL_BDW_GT12_IDS(&intel_broadwell_info), + INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info), + INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info), INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), - INTEL_BDW_RSVD_IDS(&intel_broadwell_info), + INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info), INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_info), - INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info), + INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info), INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info), INTEL_BXT_IDS(&intel_broxton_info), INTEL_GLK_IDS(&intel_geminilake_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_CFL_S_IDS(&intel_coffeelake_info), - INTEL_CFL_H_IDS(&intel_coffeelake_info), - INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info), - INTEL_CNL_IDS(&intel_cannonlake_info), + INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info), + INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -519,9 +630,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *) ent->driver_data; int err; - if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) { + if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) { DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n" - "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n" + "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915_modparams.alpha_support module parameter\n" "to enable support in this kernel version, or check for kernel updates.\n"); return -ENODEV; } @@ -541,6 +652,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; +#ifdef CONFIG_DRM_I915_LOAD_ASYNC_SUPPORT + i915_driver_load_async(pdev, ent); +#else err = i915_driver_load(pdev, ent); if (err) return err; @@ -550,7 +664,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915_pci_remove(pdev); return err > 0 ? -ENOTTY : err; } - +#endif return 0; } @@ -577,10 +691,10 @@ static int __init i915_init(void) * vga_text_mode_force boot option. */ - if (i915.modeset == 0) + if (i915_modparams.modeset == 0) use_kms = false; - if (vgacon_text_force() && i915.modeset == -1) + if (vgacon_text_force() && i915_modparams.modeset == -1) use_kms = false; if (!use_kms) { diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 370b9d248fed..2017ec404827 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -42,7 +42,7 @@ * Streams representing a single context are accessible to applications with a * corresponding drm file descriptor, such that OpenGL can use the interface * without special privileges. Access to system-wide metrics requires root - * privileges by default, unless changed via the dev.i915.perf_event_paranoid + * privileges by default, unless changed via the dev.i915_modparams.perf_event_paranoid * sysctl option. * */ @@ -265,7 +265,7 @@ #define POLL_FREQUENCY 200 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) -/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ +/* for sysctl proc_dointvec_minmax of dev.i915_modparams.perf_stream_paranoid */ static int zero; static int one = 1; static u32 i915_perf_stream_paranoid = true; @@ -1213,7 +1213,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; @@ -1259,7 +1259,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - if (i915.enable_execlists) { + if (i915_modparams.enable_execlists) { dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; } else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; @@ -1300,9 +1300,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) */ mutex_lock(&dev_priv->drm.struct_mutex); dev_priv->perf.oa.exclusive_stream = NULL; - mutex_unlock(&dev_priv->drm.struct_mutex); - dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); free_oa_buffer(dev_priv); @@ -1754,22 +1753,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr * Note: it's only the RCS/Render context that has any OA state. */ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, - const struct i915_oa_config *oa_config, - bool interruptible) + const struct i915_oa_config *oa_config) { struct i915_gem_context *ctx; int ret; unsigned int wait_flags = I915_WAIT_LOCKED; - if (interruptible) { - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - return ret; - - wait_flags |= I915_WAIT_INTERRUPTIBLE; - } else { - mutex_lock(&dev_priv->drm.struct_mutex); - } + lockdep_assert_held(&dev_priv->drm.struct_mutex); /* Switch away from any user context. */ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); @@ -1817,8 +1807,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, } out: - mutex_unlock(&dev_priv->drm.struct_mutex); - return ret; } @@ -1862,7 +1850,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = gen8_configure_all_contexts(dev_priv, oa_config, true); + ret = gen8_configure_all_contexts(dev_priv, oa_config); if (ret) return ret; @@ -1877,7 +1865,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) { /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(dev_priv, NULL, false); + gen8_configure_all_contexts(dev_priv, NULL); I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & ~GT_NOA_ENABLE)); @@ -2127,6 +2115,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (ret) goto err_oa_buf_alloc; + ret = i915_mutex_lock_interruptible(&dev_priv->drm); + if (ret) + goto err_lock; + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, stream->oa_config); if (ret) @@ -2134,23 +2126,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, stream->ops = &i915_oa_stream_ops; - /* Lock device for exclusive_stream access late because - * enable_metric_set() might lock as well on gen8+. - */ - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - goto err_lock; - dev_priv->perf.oa.exclusive_stream = stream; mutex_unlock(&dev_priv->drm.struct_mutex); return 0; -err_lock: +err_enable: dev_priv->perf.oa.ops.disable_metric_set(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); -err_enable: +err_lock: free_oa_buffer(dev_priv); err_oa_buf_alloc: @@ -2612,7 +2598,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, privileged_op = false; /* Similar to perf's kernel.perf_paranoid_cpu sysctl option - * we check a dev.i915.perf_stream_paranoid sysctl option + * we check a dev.i915_modparams.perf_stream_paranoid sysctl option * to determine if it's ok to access system wide OA counters * without CAP_SYS_ADMIN privileges. */ @@ -2801,7 +2787,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, if (oa_freq_hz > i915_oa_max_sample_rate && !capable(CAP_SYS_ADMIN)) { - DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", + DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915_modparams.oa_max_sample_rate) %uHz without root privileges\n", i915_oa_max_sample_rate); return -EACCES; } @@ -3409,7 +3395,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.timestamp_frequency = 12500000; dev_priv->perf.oa.oa_formats = hsw_oa_formats; - } else if (i915.enable_execlists) { + } else if (i915_modparams.enable_execlists) { /* Note: that although we could theoretically also support the * legacy ringbuffer mode on BDW (and earlier iterations of * this driver, before upstreaming did this) it didn't seem diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index 0679a58cdbae..59ccc8e3e75c 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -28,6 +28,12 @@ #define VGT_PVINFO_PAGE 0x78000 #define VGT_PVINFO_SIZE 0x1000 +/* Scratch reg used for redirecting command access to registers, any + * command access to PVINFO page would be discarded, so it has no HW + * impact. + */ +#define VGT_SCRATCH_REG VGT_PVINFO_PAGE + /* * The following structure pages are defined in GEN MMIO space * for virtualization. (One page for now) @@ -46,14 +52,81 @@ enum vgt_g2v_type { VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, VGT_G2V_EXECLIST_CONTEXT_CREATE, VGT_G2V_EXECLIST_CONTEXT_DESTROY, + VGT_G2V_PPGTT_L4_ALLOC, + VGT_G2V_PPGTT_L4_CLEAR, + VGT_G2V_PPGTT_L4_INSERT, VGT_G2V_MAX, }; +#define PLANE_COLOR_CTL_BIT (1 << 0) +#define PLANE_KEY_BIT (1 << 1) +#define PLANE_SCALER_BIT (1 << 2) + +struct pv_plane_update { + u32 flags; + u32 plane_color_ctl; + u32 plane_key_val; + u32 plane_key_max; + u32 plane_key_msk; + u32 plane_offset; + u32 plane_stride; + u32 plane_size; + u32 plane_aux_dist; + u32 plane_aux_offset; + u32 ps_ctrl; + u32 ps_pwr_gate; + u32 ps_win_ps; + u32 ps_win_sz; + u32 plane_pos; + u32 plane_ctl; +}; + +struct pv_plane_wm_update { + u32 max_wm_level; + u32 plane_wm_level[8]; + u32 plane_trans_wm_level; + u32 plane_buf_cfg; +}; + +struct pv_ppgtt_update { + u64 pdp; + u64 start; + u64 length; + u32 cache_level; +}; + +/* shared page(4KB) between gvt and VM, located at the first page next + * to MMIO region(2MB size normally). + */ +struct gvt_shared_page { + u32 elsp_data[4]; + u32 reg_addr; + struct pv_plane_update pv_plane; + struct pv_plane_wm_update pv_plane_wm; + struct pv_ppgtt_update pv_ppgtt; + u32 rsvd2[0x400 - 40]; +}; + +#define VGPU_PVMMIO(vgpu) vgpu_vreg(vgpu, vgtif_reg(enable_pvmmio)) + /* * VGT capabilities type */ #define VGT_CAPS_FULL_48BIT_PPGTT BIT(2) +/* + * define different levels of PVMMIO optimization + */ +enum pvmmio_levels { + PVMMIO_ELSP_SUBMIT = 0x1, + PVMMIO_PLANE_UPDATE = 0x2, + PVMMIO_PLANE_WM_UPDATE = 0x4, + PVMMIO_PPGTT_UPDATE = 0x10, +}; + +#define PVMMIO_LEVEL_ENABLE(dev_priv, level) \ + (intel_vgpu_active(dev_priv) && (i915_modparams.enable_pvmmio & level)) + struct vgt_if { u64 magic; /* VGT_MAGIC */ u16 version_major; @@ -101,8 +174,11 @@ struct vgt_if { u32 execlist_context_descriptor_lo; u32 execlist_context_descriptor_hi; + u32 enable_pvmmio; + u32 pv_mmio; /* vgpu trapped mmio read will be redirected here */ + u32 scaler_owned; - u32 rsv7[0x200 - 24]; /* pad to one page */ + u32 rsv7[0x200 - 27]; /* pad to one page */ } __packed; #define vgtif_reg(x) \ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c9bcc6c45012..4194ddf37a32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2373,6 +2373,7 @@ enum i915_power_well_id { #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) +#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24) #if 0 #define PRB0_TAIL _MMIO(0x2030) @@ -2484,13 +2485,19 @@ enum i915_power_well_id { #define _3D_CHICKEN _MMIO(0x2084) #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) #define _3D_CHICKEN2 _MMIO(0x208c) + +#define FF_SLICE_CHICKEN _MMIO(0x2088) +#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) + /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the * particular danger of not doing so is not specified. */ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 _MMIO(0x2090) +#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) +#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ @@ -2993,6 +3000,7 @@ enum i915_power_well_id { # define GPIO_DATA_PULLUP_DISABLE (1 << 13) #define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ +#define GMBUS_AKSV_SELECT (1<<11) #define GMBUS_RATE_100KHZ (0<<8) #define GMBUS_RATE_50KHZ (1<<8) #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ @@ -3806,6 +3814,12 @@ enum { #define PWM2_GATING_DIS (1 << 14) #define PWM1_GATING_DIS (1 << 13) +/* + * GEN10 clock gating regs + */ +#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) +#define SARBUNIT_CLKGATE_DIS (1 << 5) + /* * Display engine regs */ @@ -5422,6 +5436,15 @@ enum { #define PIPEMISC_DITHER_TYPE_SP (0<<2) #define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) +/* Skylake pipe bottom color */ +#define _PIPE_BOTTOM_COLOR_A 0x70034 +#define _PIPE_BOTTOM_COLOR_B 0x71034 +#define _PIPE_BOTTOM_COLOR_C 0x72034 +#define PIPE_BOTTOM_GAMMA_ENABLE (1 << 31) +#define PIPE_BOTTOM_CSC_ENABLE (1 << 30) +#define PIPE_BOTTOM_COLOR_MASK 0x3FFFFFFF +#define PIPE_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _PIPE_BOTTOM_COLOR_A, _PIPE_BOTTOM_COLOR_B) + #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) #define PIPEB_LINE_COMPARE_INT_EN (1<<29) #define PIPEB_HLINE_INT_EN (1<<28) @@ -6159,6 +6182,12 @@ enum { #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) #define SP_CONST_ALPHA_ENABLE (1<<31) +#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) +#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */ +#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */ +#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) +#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */ +#define SP_SH_COS(x) (x) /* u3.7 */ #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) @@ -6172,6 +6201,8 @@ enum { #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) +#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) +#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ @@ -6188,6 +6219,8 @@ enum { #define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) #define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) +#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) +#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) #define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) /* @@ -6902,7 +6935,7 @@ enum { # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 _MMIO(0x42080) -#define SKL_RC_HASH_OUTSIDE (1 << 15) +#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) #define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) @@ -6934,6 +6967,7 @@ enum { #define DISP_FBC_WM_DIS (1<<15) #define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_DATA_PARTITION_5_6 (1<<6) +#define DISP_IPC_ENABLE (1<<3) #define DBUF_CTL _MMIO(0x45008) #define DBUF_POWER_REQUEST (1<<31) #define DBUF_POWER_STATE (1<<30) @@ -6944,6 +6978,7 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30) #define MASK_WAKEMEM (1<<13) #define SKL_DFSM _MMIO(0x51000) @@ -6969,12 +7004,19 @@ enum { #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN8_CS_CHICKEN1 _MMIO(0x2580) +#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0) +#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) +#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0) +#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1) +#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0) +#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1) /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13) # define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) @@ -6986,6 +7028,8 @@ enum { #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) #define DISABLE_PIXEL_MASK_CAMMING (1<<14) +#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) + #define GEN7_L3SQCREG1 _MMIO(0xB010) #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 @@ -7018,6 +7062,7 @@ enum { /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) +#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0) #define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14) #define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) @@ -7031,6 +7076,9 @@ enum { #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) +#define GEN9_WM_CHICKEN3 _MMIO(0x5588) +#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) + /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) @@ -7947,6 +7995,7 @@ enum { #define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 #define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16 #define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24 +#define SKL_PCODE_LOAD_HDCP_KEYS 0x5 #define SKL_PCODE_CDCLK_CONTROL 0x7 #define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3 #define SKL_CDCLK_READY_FOR_CHANGE 0x1 @@ -8049,6 +8098,7 @@ enum { #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1<<0) +#define PUSH_CONSTANT_DEREF_DISABLE (1<<8) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -8060,9 +8110,11 @@ enum { #define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) +#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8) #define GEN9_ENABLE_YV12_BUGFIX (1<<4) #define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2) @@ -8235,6 +8287,89 @@ enum skl_power_gate { #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) + +/* HDCP Key Registers */ +#define HDCP_KEY_CONF _MMIO(0x66c00) +#define HDCP_AKSV_SEND_TRIGGER BIT(31) +#define HDCP_CLEAR_KEYS_TRIGGER BIT(30) +#define HDCP_KEY_LOAD_TRIGGER BIT(8) +#define HDCP_KEY_STATUS _MMIO(0x66c04) +#define HDCP_FUSE_IN_PROGRESS BIT(7) +#define HDCP_FUSE_ERROR BIT(6) +#define HDCP_FUSE_DONE BIT(5) +#define HDCP_KEY_LOAD_STATUS BIT(1) +#define HDCP_KEY_LOAD_DONE BIT(0) +#define HDCP_AKSV_LO _MMIO(0x66c10) +#define HDCP_AKSV_HI _MMIO(0x66c14) + +/* HDCP Repeater Registers */ +#define HDCP_REP_CTL _MMIO(0x66d00) +#define HDCP_DDIB_REP_PRESENT BIT(30) +#define HDCP_DDIA_REP_PRESENT BIT(29) +#define HDCP_DDIC_REP_PRESENT BIT(28) +#define HDCP_DDID_REP_PRESENT BIT(27) +#define HDCP_DDIF_REP_PRESENT BIT(26) +#define HDCP_DDIE_REP_PRESENT BIT(25) +#define HDCP_DDIB_SHA1_M0 (1 << 20) +#define HDCP_DDIA_SHA1_M0 (2 << 20) +#define HDCP_DDIC_SHA1_M0 (3 << 20) +#define HDCP_DDID_SHA1_M0 (4 << 20) +#define HDCP_DDIF_SHA1_M0 (5 << 20) +#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */ +#define HDCP_SHA1_BUSY BIT(16) +#define HDCP_SHA1_READY BIT(17) +#define HDCP_SHA1_COMPLETE BIT(18) +#define HDCP_SHA1_V_MATCH BIT(19) +#define HDCP_SHA1_TEXT_32 (1 << 1) +#define HDCP_SHA1_COMPLETE_HASH (2 << 1) +#define HDCP_SHA1_TEXT_24 (4 << 1) +#define HDCP_SHA1_TEXT_16 (5 << 1) +#define HDCP_SHA1_TEXT_8 (6 << 1) +#define HDCP_SHA1_TEXT_0 (7 << 1) +#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04) +#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08) +#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) +#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) +#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) +#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4)) +#define HDCP_SHA_TEXT _MMIO(0x66d18) + +/* HDCP Auth Registers */ +#define _PORTA_HDCP_AUTHENC 0x66800 +#define _PORTB_HDCP_AUTHENC 0x66500 +#define _PORTC_HDCP_AUTHENC 0x66600 +#define _PORTD_HDCP_AUTHENC 0x66700 +#define _PORTE_HDCP_AUTHENC 0x66A00 +#define _PORTF_HDCP_AUTHENC 0x66900 +#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \ + _PORTA_HDCP_AUTHENC, \ + _PORTB_HDCP_AUTHENC, \ + _PORTC_HDCP_AUTHENC, \ + _PORTD_HDCP_AUTHENC, \ + _PORTE_HDCP_AUTHENC, \ + _PORTF_HDCP_AUTHENC) + x) +#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) +#define HDCP_CONF_CAPTURE_AN BIT(0) +#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) +#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) +#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) +#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) +#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) +#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) +#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) +#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) +#define HDCP_STATUS_STREAM_A_ENC BIT(31) +#define HDCP_STATUS_STREAM_B_ENC BIT(30) +#define HDCP_STATUS_STREAM_C_ENC BIT(29) +#define HDCP_STATUS_STREAM_D_ENC BIT(28) +#define HDCP_STATUS_AUTH BIT(21) +#define HDCP_STATUS_ENC BIT(20) +#define HDCP_STATUS_RI_MATCH BIT(19) +#define HDCP_STATUS_R0_READY BIT(18) +#define HDCP_STATUS_AN_READY BIT(17) +#define HDCP_STATUS_CIPHER BIT(16) +#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff) + /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 @@ -8266,6 +8401,7 @@ enum skl_power_gate { #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) +#define TRANS_DDI_HDCP_SIGNALLING (1<<9) #define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) #define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7) #define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6) @@ -8475,6 +8611,7 @@ enum skl_power_gate { #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) @@ -9363,4 +9500,35 @@ enum skl_power_gate { #define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ #define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ +#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */ +#define MMCD_PCLA (1 << 31) +#define MMCD_HOTSPOT_EN (1 << 27) + +/* GVT has special read process from some MMIO register, + * which so that should be trapped to GVT to make a + * complete emulation. Such MMIO is not too much, now using + * a static list to cover them. + */ +static inline bool in_mmio_read_trap_list(u32 reg) +{ + if (unlikely(reg >= PCH_GMBUS0.reg && reg <= PCH_GMBUS5.reg)) + return true; + + if (unlikely(reg == RING_TIMESTAMP(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP(VEBOX_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN8_BSD2_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(VEBOX_RING_BASE).reg)) + return true; + + if (unlikely(reg == SBI_DATA.reg || reg == 0x6c060 || reg == 0x206c)) + return true; + + return false; +} + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index d61c8727f756..c7c4ff34b279 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -31,6 +31,7 @@ #include #include "intel_drv.h" #include "i915_drv.h" +#include "../drm_internal.h" static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { @@ -246,7 +247,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 freq; freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); @@ -261,7 +262,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; ret = intel_gpu_freq(dev_priv, ret); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); @@ -304,9 +305,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) return -EINVAL; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); dev_priv->rps.boost_freq = val; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return count; } @@ -344,14 +345,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = intel_freq_opcode(dev_priv, val); if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq || val < dev_priv->rps.min_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); return -EINVAL; } @@ -371,7 +372,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, * frequency request may be unchanged. */ ret = intel_set_rps(dev_priv, val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); @@ -401,14 +402,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = intel_freq_opcode(dev_priv, val); if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq_softlimit) { - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); return -EINVAL; } @@ -424,7 +425,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, * frequency request may be unchanged. */ ret = intel_set_rps(dev_priv, val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_runtime_pm_put(dev_priv); @@ -532,7 +533,274 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, return count; } -static const struct bin_attribute error_state_attr = { +#define dev_to_drm_minor(d) dev_get_drvdata((d)) + +static ssize_t i915_gem_clients_state_read(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + ret = i915_error_state_buf_init(&error_str, dev_priv, count, off); + if (ret) + return ret; + + ret = i915_get_drm_clients_info(&error_str, dev); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +#define GEM_OBJ_STAT_BUF_SIZE (4*1024) /* 4KB */ +#define GEM_OBJ_STAT_BUF_SIZE_MAX (1024*1024) /* 1MB */ + +struct i915_gem_file_attr_priv { + char tgid_str[16]; + struct pid *tgid; + struct drm_i915_error_state_buf buf; +}; + +static ssize_t i915_gem_read_objects(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct i915_gem_file_attr_priv *attr_priv; + struct pid *tgid; + ssize_t ret_count = 0; + long bytes_available; + int ret = 0, buf_size = GEM_OBJ_STAT_BUF_SIZE; + unsigned long timeout = msecs_to_jiffies(500) + 1; + + /* + * There may arise a scenario where syfs file entry is being removed, + * and may race against sysfs read. Sysfs file remove function would + * have taken the drm_global_mutex and would wait for read to finish, + * which is again waiting to acquire drm_global_mutex, leading to + * deadlock. To avoid this, use mutex_trylock here with a timeout. + */ + while (!mutex_trylock(&drm_global_mutex) && --timeout) + schedule_timeout_killable(1); + if (timeout == 0) { + DRM_DEBUG_DRIVER("Unable to acquire drm global mutex.\n"); + return -EBUSY; + } + + if (!attr || !attr->private) { + ret = -EINVAL; + DRM_ERROR("attr | attr->private pointer is NULL\n"); + goto out; + } + attr_priv = attr->private; + tgid = attr_priv->tgid; + + if (off && !attr_priv->buf.buf) { + ret = -EINVAL; + DRM_ERROR( + "Buf not allocated during read with non-zero offset\n"); + goto out; + } + + if (off == 0) { +retry: + if (!attr_priv->buf.buf) { + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj state buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + } else { + /* Reset the buf parameters before filling data */ + attr_priv->buf.pos = 0; + attr_priv->buf.bytes = 0; + } + + /* Read the gfx device stats */ + ret = i915_gem_get_obj_info(&attr_priv->buf, dev, tgid); + if (ret) + goto out; + + ret = i915_error_ok(&attr_priv->buf); + if (ret) { + ret = 0; + goto copy_data; + } + if (buf_size >= GEM_OBJ_STAT_BUF_SIZE_MAX) { + DRM_DEBUG_DRIVER("obj stat buf size limit reached\n"); + ret = -ENOMEM; + goto out; + } else { + /* Try to reallocate buf of larger size */ + i915_error_state_buf_release(&attr_priv->buf); + buf_size *= 2; + + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj stat buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + goto retry; + } + } +copy_data: + + bytes_available = (long)attr_priv->buf.bytes - (long)off; + + if (bytes_available > 0) { + ret_count = count < bytes_available ? count : bytes_available; + memcpy(buf, attr_priv->buf.buf + off, ret_count); + } else + ret_count = 0; + +out: + mutex_unlock(&drm_global_mutex); + + return ret ?: ret_count; +} + +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gem_file_attr_priv *attr_priv; + struct bin_attribute *obj_attr; + struct drm_file *file_local; + int ret; + + if (!i915_modparams.memtrack_debug) + return 0; + + /* + * Check for multiple drm files having same tgid. If found, copy the + * bin attribute into the new file priv. Otherwise allocate a new + * copy of bin attribute, and create its corresponding sysfs file. + */ + mutex_lock(&dev->struct_mutex); + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (file_priv->tgid == file_priv_local->tgid) { + file_priv->obj_attr = file_priv_local->obj_attr; + mutex_unlock(&dev->struct_mutex); + return 0; + } + } + mutex_unlock(&dev->struct_mutex); + + obj_attr = kzalloc(sizeof(*obj_attr), GFP_KERNEL); + if (!obj_attr) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out; + } + + attr_priv = kzalloc(sizeof(*attr_priv), GFP_KERNEL); + if (!attr_priv) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out_obj_attr; + } + + snprintf(attr_priv->tgid_str, 16, "%d", task_tgid_nr(current)); + obj_attr->attr.name = attr_priv->tgid_str; + obj_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + obj_attr->size = 0; + obj_attr->read = i915_gem_read_objects; + + attr_priv->tgid = file_priv->tgid; + obj_attr->private = attr_priv; + + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + obj_attr); + if (ret) { + DRM_ERROR( + "sysfs tgid file setup failed. tgid=%d, process:%s, ret:%d\n", + pid_nr(file_priv->tgid), file_priv->process_name, ret); + + goto out_attr_priv; + } + + file_priv->obj_attr = obj_attr; + return 0; + +out_attr_priv: + kfree(attr_priv); +out_obj_attr: + kfree(obj_attr); +out: + return ret; +} + +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_file *file_local; + int open_count = 1; + + if (!i915_modparams.memtrack_debug) + return; + + /* + * The current drm file instance is already removed from filelist at + * this point. + * Check if this particular drm file being removed is the last one for + * that particular tgid, and no other instances for this tgid exist in + * the filelist. If so, remove the corresponding sysfs file entry also. + */ + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (pid_nr(file_priv->tgid) == pid_nr(file_priv_local->tgid)) + open_count++; + } + + if (open_count == 1) { + struct i915_gem_file_attr_priv *attr_priv; + + if (WARN_ON(file_priv->obj_attr == NULL)) + return; + attr_priv = file_priv->obj_attr->private; + + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + file_priv->obj_attr); + + i915_error_state_buf_release(&attr_priv->buf); + kfree(file_priv->obj_attr->private); + kfree(file_priv->obj_attr); + } +} + +static struct bin_attribute error_state_attr = { .attr.name = "error", .attr.mode = S_IRUSR | S_IWUSR, .size = 0, @@ -540,6 +808,21 @@ static const struct bin_attribute error_state_attr = { .write = error_state_write, }; +static struct bin_attribute i915_gem_client_state_attr = { + .attr.name = "i915_gem_meminfo", + .attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, + .size = 0, + .read = i915_gem_clients_state_read, +}; + +static struct attribute *memtrack_kobj_attrs[] = {NULL}; + +static struct kobj_type memtrack_kobj_type = { + .release = NULL, + .sysfs_ops = NULL, + .default_attrs = memtrack_kobj_attrs, +}; + static void i915_setup_error_capture(struct device *kdev) { if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) @@ -602,6 +885,28 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) DRM_ERROR("RPS sysfs setup failed\n"); i915_setup_error_capture(kdev); + + if (i915_modparams.memtrack_debug) { + /* + * Create the gfx_memtrack directory for memtrack sysfs files + */ + ret = kobject_init_and_add( + &dev_priv->memtrack_kobj, &memtrack_kobj_type, + &kdev->kobj, "gfx_memtrack"); + if (unlikely(ret != 0)) { + DRM_ERROR( + "i915 sysfs setup memtrack directory failed\n" + ); + kobject_put(&dev_priv->memtrack_kobj); + } else { + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + if (ret) + DRM_ERROR( + "i915_gem_client_state sysfs setup failed\n" + ); + } + } } void i915_teardown_sysfs(struct drm_i915_private *dev_priv) @@ -620,4 +925,10 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); #endif + if (i915_modparams.memtrack_debug) { + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + kobject_del(&dev_priv->memtrack_kobj); + kobject_put(&dev_priv->memtrack_kobj); + } } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index ef72da74b87f..13cfbab46327 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -281,7 +281,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + drm_crtc_index(&crtc->base)); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; @@ -673,6 +673,54 @@ TRACE_EVENT(i915_gem_ring_flush, __entry->invalidate, __entry->flush) ); +TRACE_EVENT(i915_gem_multi_domains, + TP_PROTO(struct drm_i915_gem_request *req), + TP_ARGS(req), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ctx) + __field(u32, ring) + __field(u32, seqno) + __field(u32, global) + __field(int, prio_req) + __field(int, prio_ctx) + __field(bool, shadow_ctx) + __field(u32, hw_id) + __field(int, vgt_id) + __field(u32, pid) + ), + + TP_fast_assign( + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->ctx = req->fence.context; + __entry->seqno = req->fence.seqno; + __entry->global = req->global_seqno; + __entry->prio_req = req->priotree.priority; + __entry->prio_ctx = req->ctx->priority; + __entry->shadow_ctx = is_shadow_context(req->ctx); + __entry->hw_id = req->ctx->hw_id; + __entry->vgt_id = get_vgt_id(req->ctx); + __entry->pid = is_shadow_context(req->ctx) ? + get_pid_shadowed(req->ctx, req->engine) : + pid_nr(req->ctx->pid); + ), + + TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, " + "priority=%d (%d), is_shadow_ctx=%u, hw_id=%u, " + "vgt_id=%u, pid=%u", __entry->dev, __entry->ring, + __entry->ctx, __entry->seqno, __entry->global, + __entry->prio_req, __entry->prio_ctx, __entry->shadow_ctx, + __entry->hw_id, __entry->vgt_id, __entry->pid) +); + +DEFINE_EVENT(i915_gem_multi_domains, i915_gem_request_add_domain, + TP_PROTO(struct drm_i915_gem_request *req), + TP_ARGS(req) +); + + DECLARE_EVENT_CLASS(i915_gem_request, TP_PROTO(struct drm_i915_gem_request *req), TP_ARGS(req), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 5fe9f3f39467..1ebddd934906 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -76,6 +76,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) } dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps)); + dev_priv->vgpu.scaler_owned = + __raw_i915_read32(dev_priv, vgtif_reg(scaler_owned)); + + /* If guest wants to enable pvmmio, it needs to enable it explicitly + * through vgt_if interface, and then read back the enable state from + * gvt layer. + */ + __raw_i915_write32(dev_priv, vgtif_reg(enable_pvmmio), + i915_modparams.enable_pvmmio); + i915_modparams.enable_pvmmio = __raw_i915_read16(dev_priv, + vgtif_reg(enable_pvmmio)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 36d4e635e4ce..34a6942c6e02 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -110,6 +110,8 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, to_intel_digital_connector_state(old_state); struct drm_crtc_state *crtc_state; + intel_hdcp_atomic_check(conn, old_state, new_state); + if (!new_state->crtc) return 0; @@ -309,7 +311,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, if (*scaler_id < 0) { /* find a free scaler */ for (j = 0; j < intel_crtc->num_scalers; j++) { - if (!scaler_state->scalers[j].in_use) { + if (!scaler_state->scalers[j].in_use && + scaler_state->scalers[j].owned == 1) { scaler_state->scalers[j].in_use = 1; *scaler_id = j; DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", @@ -333,10 +336,13 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, * scaler 0 operates in high quality (HQ) mode. * In this case use scaler 0 to take advantage of HQ mode */ - *scaler_id = 0; - scaler_state->scalers[0].in_use = 1; - scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; - scaler_state->scalers[1].in_use = 0; + if (scaler_state->scalers[0].owned == 1) { + *scaler_id = 0; + scaler_state->scalers[0].in_use = 1; + scaler_state->scalers[0].mode = + PS_SCALER_MODE_HQ; + scaler_state->scalers[1].in_use = 0; + } } else { scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 27743be5b768..9240fa79de7c 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -704,7 +704,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); u32 tmp; - if (!IS_GEN9_BC(dev_priv)) + if (!IS_GEN9(dev_priv)) return; i915_audio_component_get_power(kdev); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 5d4cd3d00564..b852befa7d5a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct drm_display_mode *panel_fixed_mode; int index; - index = i915.vbt_sdvo_panel_type; + index = i915_modparams.vbt_sdvo_panel_type; if (index == -2) { DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); return; @@ -676,8 +676,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) uint8_t vswing; /* Don't read from VBT if module parameter has valid value*/ - if (i915.edp_vswing) { - dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1; + if (i915_modparams.edp_vswing) { + dev_priv->vbt.edp.low_vswing = + i915_modparams.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; dev_priv->vbt.edp.low_vswing = vswing == 0; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 4e00e5cb9fa1..5095c095da04 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -541,29 +541,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, spin_unlock_irq(&b->rb_lock); } -static bool signal_valid(const struct drm_i915_gem_request *request) -{ - return intel_wait_check_request(&request->signaling.wait, request); -} - static bool signal_complete(const struct drm_i915_gem_request *request) { if (!request) return false; - /* If another process served as the bottom-half it may have already - * signalled that this wait is already completed. - */ - if (intel_wait_complete(&request->signaling.wait)) - return signal_valid(request); - - /* Carefully check if the request is complete, giving time for the + /* + * Carefully check if the request is complete, giving time for the * seqno to be visible or if the GPU hung. */ - if (__i915_request_irq_complete(request)) - return true; - - return false; + return __i915_request_irq_complete(request); } static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) @@ -606,9 +593,13 @@ static int intel_breadcrumbs_signaler(void *arg) request = i915_gem_request_get_rcu(request); rcu_read_unlock(); if (signal_complete(request)) { - local_bh_disable(); - dma_fence_signal(&request->fence); - local_bh_enable(); /* kick start the tasklets */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &request->fence.flags)) { + local_bh_disable(); + dma_fence_signal(&request->fence); + GEM_BUG_ON(!i915_gem_request_completed(request)); + local_bh_enable(); /* kick start the tasklets */ + } spin_lock_irq(&b->rb_lock); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 1241e5891b29..67bf84987dd9 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -506,7 +506,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, else cmd = 0; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); @@ -516,7 +516,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); mutex_lock(&dev_priv->sb_lock); @@ -593,7 +593,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, */ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); @@ -603,7 +603,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); @@ -659,10 +659,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, "trying to change cdclk frequency with cdclk not enabled\n")) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("failed to inform pcode about cdclk change\n"); return; @@ -711,9 +711,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); @@ -859,16 +859,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - int min_cdclk = skl_calc_cdclk(0, vco); u32 val; WARN_ON(vco != 8100000 && vco != 8640000); - /* select the minimum CDCLK before enabling DPLL 0 */ - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); - I915_WRITE(CDCLK_CTL, val); - POSTING_READ(CDCLK_CTL); - /* * We always enable DPLL0 with the lowest link rate possible, but still * taking into account the VCO required to operate the eDP panel at the @@ -922,24 +916,24 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; - u32 freq_select, pcu_ack; + u32 freq_select, pcu_ack, cdclk_ctl; int ret; WARN_ON((cdclk == 24000) != (vco == 0)); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", ret); return; } - /* set CDCLK_CTL */ + /* Choose frequency for this cdclk */ switch (cdclk) { case 450000: case 432000: @@ -967,16 +961,39 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); + cdclk_ctl = I915_READ(CDCLK_CTL); + + if (dev_priv->cdclk.hw.vco != vco) { + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + } + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); + POSTING_READ(CDCLK_CTL); + if (dev_priv->cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); } @@ -1273,10 +1290,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, } /* Inform power controller of upcoming frequency change */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", @@ -1305,10 +1322,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; I915_WRITE(CDCLK_CTL, val); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, DIV_ROUND_UP(cdclk, 25000)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", @@ -1523,12 +1540,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, u32 val, divider, pcu_ack; int ret; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", ret); @@ -1580,9 +1597,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, I915_WRITE(CDCLK_CTL, val); /* inform PCU of the change */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index b8315bca852b..d95aa81304cf 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -91,19 +91,16 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input) { int i; - for (i = 0; i < 9; i++) - result[i] = 0; - - for (i = 0; i < 3; i++) { - int64_t user_coeff = input[i * 3 + i]; + for (i = 0; i < 9; i++) { + int64_t user_coeff = input[i]; uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2; uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0, CTM_COEFF_4_0 - 1) >> 2; - result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27; + result[i] = (limited_coeff * abs_coeff) >> 28; if (CTM_COEFF_NEGATIVE(user_coeff)) - result[i * 3 + i] |= CTM_COEFF_SIGN; + result[i] |= CTM_COEFF_SIGN; } } @@ -224,13 +221,34 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) > 6) { uint16_t postoff = 0; + uint16_t postoff_red = 0; + uint16_t postoff_green = 0; + uint16_t postoff_blue = 0; - if (intel_crtc_state->limited_color_range) + if (intel_crtc_state->limited_color_range) { postoff = (16 * (1 << 12) / 255) & 0x1fff; + postoff_red = postoff; + postoff_green = postoff; + postoff_blue = postoff; + } + + if (crtc_state->ctm_post_offset) { + struct drm_color_ctm_post_offset *ctm_post_offset = + (struct drm_color_ctm_post_offset *)crtc_state->ctm_post_offset->data; + + /* Convert to U0.12 format. */ + postoff_red = ctm_post_offset->red >> 4; + postoff_green = ctm_post_offset->green >> 4; + postoff_blue = ctm_post_offset->blue >> 4; + + postoff_red = clamp_val(postoff_red, postoff, 0xfff); + postoff_green = clamp_val(postoff_green, postoff, 0xfff); + postoff_blue = clamp_val(postoff_blue, postoff, 0xfff); + } - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff_red); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff_green); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff_blue); I915_WRITE(PIPE_CSC_MODE(pipe), 0); } else { diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 70e0ff41070c..70ccd79ec766 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -712,7 +712,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) { + if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) { status = connector_status_disconnected; goto out; } @@ -730,7 +730,7 @@ intel_crt_detect(struct drm_connector *connector, else if (INTEL_GEN(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (i915.load_detect_test) + else if (i915_modparams.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 92c1f8e166dc..c83a2ff47ea5 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -440,7 +440,13 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) INIT_WORK(&dev_priv->csr.work, csr_load_work_fn); - if (!HAS_CSR(dev_priv)) + /* + * In a GVTg enabled environment, loading the CSR firmware for DomU doesn't + * make much sense since we don't allow it to control display power + * management settings. Furthermore, we can save some time for DomU bootup + * by skipping CSR loading. + */ + if (!HAS_CSR(dev_priv) || intel_vgpu_active(dev_priv)) return; if (IS_CANNONLAKE(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5e5fe03b638c..41203a7854ad 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1533,6 +1533,35 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, I915_WRITE(reg, val); } +int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + bool enable) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe = 0; + int ret = 0; + uint32_t tmp; + + if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv, + intel_encoder->power_domain))) + return -ENXIO; + + if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) { + ret = -EIO; + goto out; + } + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe)); + if (enable) + tmp |= TRANS_DDI_HDCP_SIGNALLING; + else + tmp &= ~TRANS_DDI_HDCP_SIGNALLING; + I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp); +out: + intel_display_power_put(dev_priv, intel_encoder->power_domain); + return ret; +} + bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; @@ -1554,7 +1583,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -2334,6 +2363,12 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, if (pipe_config->has_audio) intel_audio_codec_enable(intel_encoder, pipe_config, conn_state); + + + /* Enable hdcp if it's desired */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); } static void intel_disable_ddi(struct intel_encoder *intel_encoder, @@ -2343,6 +2378,8 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder, struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; + intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); + if (old_crtc_state->has_audio) intel_audio_codec_disable(intel_encoder); @@ -2541,7 +2578,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (type == INTEL_OUTPUT_HDMI) @@ -2632,11 +2669,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) } } - init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || + /* + * For port A check whether vgpu is active and we have a monitor + * attached to port A. + * */ + init_hdmi = (intel_vgpu_active(dev_priv) && port == PORT_A && + (I915_READ(GEN8_DE_PORT_ISR) & BXT_DE_PORT_HP_DDIA)) || + (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (!intel_vgpu_active(dev_priv) && + intel_bios_is_lspcon_present(dev_priv, port)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP @@ -2739,7 +2783,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if ((intel_vgpu_active(dev_priv) && IS_BROXTON(dev_priv)) || + (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)) { if (!intel_ddi_init_hdmi_connector(intel_dig_port)) goto err; } diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 5f91ddc78c7a..870df613ddd9 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -343,7 +343,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->num_sprites[pipe] = 1; } - if (i915.disable_display) { + if (i915_modparams.disable_display) { DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (info->num_pipes > 0 && diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ebdb63330dd..01b432438bac 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -49,6 +49,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, @@ -1000,7 +1004,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, return crtc->config->cpu_transcoder; } -static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) +static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, + enum pipe pipe) { i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; @@ -1015,7 +1020,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) msleep(5); line2 = I915_READ(reg) & line_mask; - return line1 == line2; + return line1 != line2; +} + +static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* Wait for the display line to settle/start moving */ + if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) + DRM_ERROR("pipe %c scanline %s wait timed out\n", + pipe_name(pipe), onoff(state)); +} + +static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) +{ + wait_for_pipe_scanline_moving(crtc, false); +} + +static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) +{ + wait_for_pipe_scanline_moving(crtc, true); } /* @@ -1038,7 +1064,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; - enum pipe pipe = crtc->pipe; if (INTEL_GEN(dev_priv) >= 4) { i915_reg_t reg = PIPECONF(cpu_transcoder); @@ -1049,9 +1074,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 100)) WARN(1, "pipe_off wait timed out\n"); } else { - /* Wait for the display line to settle */ - if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) - WARN(1, "pipe_off wait timed out\n"); + intel_wait_for_pipe_scanline_stopped(crtc); } } @@ -1192,23 +1215,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe)); } -static void assert_cursor(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - bool cur_state; - - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) - cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; - else - cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; - - I915_STATE_WARN(cur_state != state, - "cursor on pipe %c assertion failure (expected %s, current %s)\n", - pipe_name(pipe), onoff(state), onoff(cur_state)); -} -#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) -#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) - void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { @@ -1236,77 +1242,25 @@ void assert_pipe(struct drm_i915_private *dev_priv, pipe_name(pipe), onoff(state), onoff(cur_state)); } -static void assert_plane(struct drm_i915_private *dev_priv, - enum plane plane, bool state) +static void assert_plane(struct intel_plane *plane, bool state) { - u32 val; - bool cur_state; + bool cur_state = plane->get_hw_state(plane); - val = I915_READ(DSPCNTR(plane)); - cur_state = !!(val & DISPLAY_PLANE_ENABLE); I915_STATE_WARN(cur_state != state, - "plane %c assertion failure (expected %s, current %s)\n", - plane_name(plane), onoff(state), onoff(cur_state)); + "%s assertion failure (expected %s, current %s)\n", + plane->base.name, onoff(state), onoff(cur_state)); } -#define assert_plane_enabled(d, p) assert_plane(d, p, true) -#define assert_plane_disabled(d, p) assert_plane(d, p, false) - -static void assert_planes_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - int i; - - /* Primary planes are fixed to pipes on gen4+ */ - if (INTEL_GEN(dev_priv) >= 4) { - u32 val = I915_READ(DSPCNTR(pipe)); - I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, - "plane %c assertion failure, should be disabled but not\n", - plane_name(pipe)); - return; - } +#define assert_plane_enabled(p) assert_plane(p, true) +#define assert_plane_disabled(p) assert_plane(p, false) - /* Need to check both planes against the pipe */ - for_each_pipe(dev_priv, i) { - u32 val = I915_READ(DSPCNTR(i)); - enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> - DISPPLANE_SEL_PIPE_SHIFT; - I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, - "plane %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(i), pipe_name(pipe)); - } -} - -static void assert_sprites_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) +static void assert_planes_disabled(struct intel_crtc *crtc) { - int sprite; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; - if (INTEL_GEN(dev_priv) >= 9) { - for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(PLANE_CTL(pipe, sprite)); - I915_STATE_WARN(val & PLANE_CTL_ENABLE, - "plane %d assertion failure, should be off on pipe %c but is still active\n", - sprite, pipe_name(pipe)); - } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - for_each_sprite(dev_priv, pipe, sprite) { - u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); - I915_STATE_WARN(val & SP_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - sprite_name(pipe, sprite), pipe_name(pipe)); - } - } else if (INTEL_GEN(dev_priv) >= 7) { - u32 val = I915_READ(SPRCTL(pipe)); - I915_STATE_WARN(val & SPRITE_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(pipe), pipe_name(pipe)); - } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { - u32 val = I915_READ(DVSCNTR(pipe)); - I915_STATE_WARN(val & DVS_ENABLE, - "sprite %c assertion failure, should be off on pipe %c but is still active\n", - plane_name(pipe), pipe_name(pipe)); - } + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + assert_plane_disabled(plane); } static void assert_vblank_disabled(struct drm_crtc *crtc) @@ -1907,9 +1861,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); - assert_planes_disabled(dev_priv, pipe); - assert_cursor_disabled(dev_priv, pipe); - assert_sprites_disabled(dev_priv, pipe); + assert_planes_disabled(crtc); /* * A pipe without a PLL won't actually be able to drive bits from @@ -1944,15 +1896,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc) POSTING_READ(reg); /* - * Until the pipe starts DSL will read as 0, which would cause - * an apparent vblank timestamp jump, which messes up also the - * frame count when it's derived from the timestamps. So let's - * wait for the pipe to start properly before we call - * drm_crtc_vblank_on() + * Until the pipe starts PIPEDSL reads will return a stale value, + * which causes an apparent vblank timestamp jump when PIPEDSL + * resets to its proper value. That also messes up the frame count + * when it's derived from the timestamps. So let's wait for the + * pipe to start properly before we call drm_crtc_vblank_on() */ - if (dev->max_vblank_count == 0 && - wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) - DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); + if (dev->max_vblank_count == 0) + intel_wait_for_pipe_scanline_moving(crtc); } /** @@ -1979,9 +1930,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc) * Make sure planes won't keep trying to pump pixels to us, * or we might hang the display. */ - assert_planes_disabled(dev_priv, pipe); - assert_cursor_disabled(dev_priv, pipe); - assert_sprites_disabled(dev_priv, pipe); + assert_planes_disabled(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -2811,6 +2760,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, crtc_state->active_planes); } +static void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane) +{ + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + intel_set_plane_visible(crtc_state, plane_state, false); + + if (plane->id == PLANE_PRIMARY) + intel_pre_disable_primary_noatomic(&crtc->base); + + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, crtc); +} + static void intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -2868,12 +2834,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ - intel_set_plane_visible(to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state), - false); - intel_pre_disable_primary_noatomic(&intel_crtc->base); - trace_intel_disable_plane(primary, intel_crtc); - intel_plane->disable_plane(intel_plane, intel_crtc); + intel_plane_disable_noatomic(intel_crtc, intel_plane); return; @@ -3379,6 +3340,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool i9xx_plane_get_hw_state(struct intel_plane *primary) +{ + + struct drm_i915_private *dev_priv = to_i915(primary->base.dev); + enum intel_display_power_domain power_domain; + enum plane plane = primary->plane; + enum pipe pipe = primary->pipe; + bool ret; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-4 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) { @@ -3553,6 +3539,61 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, return plane_ctl; } +static void pv_update_primary_plane_reg(struct intel_plane *plane, + u32 stride, uint32_t src_w, uint32_t src_h, + uint32_t dst_w, uint32_t dst_h, u32 aux_stride, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int i; + struct pv_plane_update tmp_plane; + int src_x = plane_state->main.x; + int src_y = plane_state->main.y; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + u32 __iomem *pv_plane = (u32 *)&(dev_priv->shared_page->pv_plane); + + memset(&tmp_plane, 0, sizeof(struct pv_plane_update)); + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { + tmp_plane.flags |= PLANE_COLOR_CTL_BIT; + tmp_plane.plane_color_ctl = PLANE_COLOR_PIPE_GAMMA_ENABLE | + PLANE_COLOR_PIPE_CSC_ENABLE | + PLANE_COLOR_PLANE_GAMMA_DISABLE; + } + + tmp_plane.plane_ctl = plane_state->ctl; + tmp_plane.plane_offset = (src_y << 16) | src_x; + tmp_plane.plane_stride = stride; + tmp_plane.plane_size = (src_h << 16) | src_w; + tmp_plane.plane_aux_dist = + (plane_state->aux.offset - plane_state->main.offset) | aux_stride; + tmp_plane.plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + + /* program plane scaler */ + if (plane_state->scaler_id >= 0) { + WARN_ON(!dst_w || !dst_h); + + tmp_plane.flags |= PLANE_SCALER_BIT; + tmp_plane.ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane->id) | + crtc_state->scaler_state.scalers[plane_state->scaler_id].mode; + tmp_plane.ps_pwr_gate = 0; + tmp_plane.ps_win_ps = + (plane_state->base.dst.x1 << 16) | plane_state->base.dst.y1; + tmp_plane.ps_win_sz = (dst_w << 16) | dst_h; + tmp_plane.plane_pos = 0; + } else { + tmp_plane.plane_pos = + (plane_state->base.dst.y1 << 16) | plane_state->base.dst.x1; + } + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_update) / 4; i++) + writel(*((u32 *)(&tmp_plane) + i), pv_plane + i); + I915_WRITE_FW(PLANE_SURF(plane->pipe, plane->id), + intel_plane_ggtt_offset(plane_state) + plane_state->main.offset); + spin_unlock(&dev_priv->shared_page_lock); +} + static void skylake_update_primary_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -3577,6 +3618,9 @@ static void skylake_update_primary_plane(struct intel_plane *plane, int dst_w = drm_rect_width(&plane_state->base.dst); int dst_h = drm_rect_height(&plane_state->base.dst); unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; +#endif /* Sizes are 0 based */ src_w--; @@ -3589,6 +3633,13 @@ static void skylake_update_primary_plane(struct intel_plane *plane, crtc->adjusted_x = src_x; crtc->adjusted_y = src_y; + if (intel_vgpu_active(dev_priv) && + i915_modparams.enable_pvmmio & PVMMIO_PLANE_UPDATE) { + pv_update_primary_plane_reg(plane, stride, src_w, src_h, + dst_w, dst_h, aux_stride, crtc_state, plane_state); + return; + } + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { @@ -3598,6 +3649,30 @@ static void skylake_update_primary_plane(struct intel_plane *plane, PLANE_COLOR_PLANE_GAMMA_DISABLE); } +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; + + dom0_regs->plane_ctl = plane_ctl; + dom0_regs->plane_offset = (src_y << 16) | src_x; + dom0_regs->plane_stride = stride; + dom0_regs->plane_size = (src_h << 16) | src_w; + dom0_regs->plane_aux_dist = + (plane_state->aux.offset - surf_addr) | aux_stride; + dom0_regs->plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + dom0_regs->plane_pos = (dst_y << 16) | dst_x; + dom0_regs->plane_surf = intel_plane_ggtt_offset(plane_state) + + surf_addr; + /* TODO: to support plane scaling in gvt*/ + if (scaler_id >= 0) + DRM_ERROR("GVT not support plane scaling yet\n"); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + return; + } +#endif + I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); @@ -3637,6 +3712,17 @@ static void skylake_disable_primary_plane(struct intel_plane *primary, enum plane_id plane_id = primary->id; enum pipe pipe = primary->pipe; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_ctl = 0; + dom0_regs->plane_surf = 0; + return; + } +#endif spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -3701,7 +3787,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) /* reset doesn't touch the display */ - if (!i915.force_reset_modeset_test && + if (!i915_modparams.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -3757,7 +3843,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) int ret; /* reset doesn't touch the display */ - if (!i915.force_reset_modeset_test && + if (!i915_modparams.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -3782,6 +3868,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); + intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) @@ -3841,6 +3928,26 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, } } +static void intel_update_background_color(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->base.state); + struct drm_rgba background = pipe_config->base.background_color; + uint32_t val; + + if (INTEL_GEN(dev_priv) >= 9) { + /* BGR 16bpc ==> RGB 10bpc */ + val = DRM_RGBA_REDBITS(background, 10) << 20 + | DRM_RGBA_GREENBITS(background, 10) << 10 + | DRM_RGBA_BLUEBITS(background, 10); + + + I915_WRITE(PIPE_BOTTOM_COLOR(crtc->pipe), val); + } +} + static void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -4954,11 +5061,12 @@ void hsw_enable_ips(struct intel_crtc *crtc) * a vblank wait. */ - assert_plane_enabled(dev_priv, crtc->plane); + assert_plane_enabled(to_intel_plane(crtc->base.primary)); + if (IS_BROADWELL(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the * mailbox." Moreover, the mailbox may return a bogus state, @@ -4986,11 +5094,12 @@ void hsw_disable_ips(struct intel_crtc *crtc) if (!crtc->config->ips_enabled) return; - assert_plane_enabled(dev_priv, crtc->plane); + assert_plane_enabled(to_intel_plane(crtc->base.primary)); + if (IS_BROADWELL(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* wait for pcode to finish disabling IPS, which may take up to 42ms */ if (intel_wait_for_register(dev_priv, IPS_CTL, IPS_ENABLE, 0, @@ -5120,8 +5229,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_pri_state = - drm_atomic_get_existing_plane_state(old_state, primary); + struct drm_plane_state *old_pri_state = primary ? + drm_atomic_get_existing_plane_state(old_state, primary) : NULL; intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); @@ -5151,8 +5260,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *old_state = old_crtc_state->base.state; struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_pri_state = - drm_atomic_get_existing_plane_state(old_state, primary); + struct drm_plane_state *old_pri_state = primary ? + drm_atomic_get_existing_plane_state(old_state, primary) : NULL; bool modeset = needs_modeset(&pipe_config->base); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); @@ -5221,15 +5330,37 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, intel_update_watermarks(crtc); } +static void disable_primary_plane(struct drm_i915_private *dev_priv, int pipe) +{ + u32 val; + + val = I915_READ(PLANE_CTL(pipe, PLANE_PRIMARY)); + if (val & PLANE_CTL_ENABLE) { + I915_WRITE(PLANE_CTL(pipe, PLANE_PRIMARY), 0); + I915_WRITE(PLANE_SURF(pipe, PLANE_PRIMARY), 0); + POSTING_READ(PLANE_SURF(pipe, PLANE_PRIMARY)); + } +} + static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *p; int pipe = intel_crtc->pipe; intel_crtc_dpms_overlay_disable(intel_crtc); + /* + * On BIOS based systems, if Dom0 doesn't own Plane 0 (Primary Plane), + * then during modeset, it wouldn't be able to disable this plane and + * this can lead to unexpected behavior after the modeset. Therefore, + * disable the primary plane if it was enabled by the BIOS/GOP. + */ + if (dev_priv->gvt && i915_modparams.avail_planes_per_pipe) + disable_primary_plane(dev_priv, pipe); + drm_for_each_plane_mask(p, dev, plane_mask) to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); @@ -5981,6 +6112,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum intel_display_power_domain domain; + struct intel_plane *plane; u64 domains; struct drm_atomic_state *state; struct intel_crtc_state *crtc_state; @@ -5989,11 +6121,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, if (!intel_crtc->active) return; - if (crtc->primary->state->visible) { - intel_pre_disable_primary_noatomic(crtc); + for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); - crtc->primary->state->visible = false; + if (plane_state->base.visible) + intel_plane_disable_noatomic(intel_crtc, plane); } state = drm_atomic_state_alloc(crtc->dev); @@ -6307,7 +6440,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - pipe_config->ips_enabled = i915.enable_ips && + pipe_config->ips_enabled = i915_modparams.enable_ips && hsw_crtc_supports_ips(crtc) && pipe_config_supports_ips(dev_priv, pipe_config); } @@ -6488,8 +6621,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes, static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - if (i915.panel_use_ssc >= 0) - return i915.panel_use_ssc != 0; + if (i915_modparams.panel_use_ssc >= 0) + return i915_modparams.panel_use_ssc != 0; return dev_priv->vbt.lvds_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } @@ -8500,7 +8633,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); - if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { + if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK) && + scaler_state->scalers[i].owned) { id = i; pipe_config->pch_pfit.enabled = true; pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); @@ -8840,11 +8974,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) { if (IS_HASWELL(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) DRM_DEBUG_KMS("Failed to write to D_COMP\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } else { I915_WRITE(D_COMP_BDW, val); POSTING_READ(D_COMP_BDW); @@ -9558,6 +9692,23 @@ static void i845_disable_cursor(struct intel_plane *plane, i845_update_cursor(plane, NULL, NULL); } +static bool i845_cursor_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(PIPE_A); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -9751,6 +9902,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane, i9xx_update_cursor(plane, NULL, NULL); } +static bool i9xx_cursor_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-3 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} /* VESA 640x480x72Hz mode to set on the pipe */ static struct drm_display_mode load_detect_mode = { @@ -10565,6 +10738,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, pipe_config); } + if (crtc->state->background_color.v != crtc_state->background_color.v) { + pipe_config->update_pipe = true; + crtc_state->planes_changed = true; + } + return ret; } @@ -11024,7 +11202,8 @@ intel_modeset_update_crtc_state(struct drm_atomic_state *state) * Update legacy state to satisfy fbc code. This can * be removed when fbc uses the atomic state. */ - if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { + if (crtc->primary && + drm_atomic_get_existing_plane_state(state, crtc->primary)) { struct drm_plane_state *plane_state = crtc->primary->state; crtc->primary->fb = plane_state->fb; @@ -11382,7 +11561,8 @@ static void verify_wm_state(struct drm_crtc *crtc, const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_state->active || + i915_modparams.avail_planes_per_pipe) return; skl_pipe_wm_get_hw_state(crtc, &hw_wm); @@ -11624,7 +11804,16 @@ verify_crtc_state(struct drm_crtc *crtc, intel_pipe_config_sanity_check(dev_priv, pipe_config); sw_config = to_intel_crtc_state(new_crtc_state); - if (!intel_pipe_config_compare(dev_priv, sw_config, + + /* + * Only check for pipe config if we are not in a GVT guest environment, + * because such a check in a GVT guest environment doesn't make any sense + * as we don't allow the guest to do a mode set, so there can very well + * be a difference between what it has programmed vs. what the host + * truly configured the HW pipe to be in. + */ + if (!intel_vgpu_active(dev_priv) && + !intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(intel_crtc, pipe_config, @@ -11673,11 +11862,13 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, if (new_state->active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(to_intel_crtc(crtc)->pipe), + pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(to_intel_crtc(crtc)->pipe), + pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -11708,10 +11899,10 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(to_intel_crtc(crtc)->pipe)); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(to_intel_crtc(crtc)->pipe)); } } @@ -12080,7 +12271,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } - if (i915.fastboot && + if (i915_modparams.fastboot && intel_pipe_config_compare(dev_priv, to_intel_crtc_state(old_crtc_state), pipe_config, true)) { @@ -12130,7 +12321,8 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) if (!dev->max_vblank_count) return drm_crtc_accurate_vblank_count(&crtc->base); - return dev->driver->get_vblank_counter(dev, crtc->pipe); + return dev->driver->get_vblank_counter(dev, + drm_crtc_index(&crtc->base)); } static void intel_atomic_wait_for_vblanks(struct drm_device *dev, @@ -12140,42 +12332,44 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev, unsigned last_vblank_count[I915_MAX_PIPES]; enum pipe pipe; int ret; + struct drm_crtc *c; if (!crtc_mask) return; + for_each_crtc(dev, c) { + struct intel_crtc *crtc = to_intel_crtc(c); - for_each_pipe(dev_priv, pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + pipe = crtc->pipe; - if (!((1 << pipe) & crtc_mask)) + if (!((1 << c->index) & crtc_mask)) continue; ret = drm_crtc_vblank_get(&crtc->base); if (WARN_ON(ret != 0)) { - crtc_mask &= ~(1 << pipe); + crtc_mask &= ~(1 << (c->index)); continue; } - - last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base); + last_vblank_count[pipe] = drm_crtc_vblank_count(c); } - for_each_pipe(dev_priv, pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + for_each_crtc(dev, c) { + struct intel_crtc *crtc = to_intel_crtc(c); long lret; - if (!((1 << pipe) & crtc_mask)) + pipe = crtc->pipe; + + if (!((1 << c->index) & crtc_mask)) continue; - lret = wait_event_timeout(dev->vblank[pipe].queue, + lret = wait_event_timeout(dev->vblank[c->index].queue, last_vblank_count[pipe] != - drm_crtc_vblank_count(&crtc->base), + drm_crtc_vblank_count(c), msecs_to_jiffies(50)); WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); - drm_crtc_vblank_put(&crtc->base); + drm_crtc_vblank_put(c); + } } @@ -12215,7 +12409,8 @@ static void intel_update_crtc(struct drm_crtc *crtc, pipe_config); } - if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { + if (crtc->primary && + drm_atomic_get_existing_plane_state(state, crtc->primary)) { intel_fbc_enable( intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); @@ -12895,6 +13090,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_crtc); + if (to_intel_crtc_state(crtc->state)->update_pipe) + intel_update_background_color(intel_crtc); + if (modeset) goto out; @@ -13214,13 +13412,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 10) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); modifiers = skl_format_modifiers_ccs; primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; + primary->get_hw_state = skl_plane_get_hw_state; } else if (INTEL_GEN(dev_priv) >= 9) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); @@ -13231,6 +13430,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; + primary->get_hw_state = skl_plane_get_hw_state; } else if (INTEL_GEN(dev_priv) >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); @@ -13238,6 +13438,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; + primary->get_hw_state = i9xx_plane_get_hw_state; } else { intel_primary_formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); @@ -13245,6 +13446,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; + primary->get_hw_state = i9xx_plane_get_hw_state; } if (INTEL_GEN(dev_priv) >= 9) @@ -13302,6 +13504,109 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) return ERR_PTR(ret); } +static struct intel_plane * +intel_skl_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, + int plane, bool is_primary) +{ + struct intel_plane *intel_plane = NULL; + struct intel_plane_state *state = NULL; + unsigned long possible_crtcs; + const uint32_t *plane_formats; + unsigned int supported_rotations, plane_type; + unsigned int num_formats; + const uint64_t *modifiers; + int ret; + + intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); + if (!intel_plane) { + ret = -ENOMEM; + goto fail; + } + + state = intel_create_plane_state(&intel_plane->base); + if (!state) { + ret = -ENOMEM; + goto fail; + } + + intel_plane->base.state = &state->base; + intel_plane->can_scale = false; + state->scaler_id = -1; + intel_plane->pipe = pipe; + + /* + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS + * port is hooked to pipe B. Hence we want plane A feeding pipe B. + */ + if (is_primary) { + intel_plane->plane = (enum plane) pipe; + intel_plane->check_plane = intel_check_primary_plane; + plane_type = DRM_PLANE_TYPE_PRIMARY; + } else { + intel_plane->plane = plane; + intel_plane->check_plane = intel_check_sprite_plane; + plane_type = DRM_PLANE_TYPE_OVERLAY; + } + + if (plane == PLANE_PRIMARY) { + intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); + intel_plane->update_plane = skylake_update_primary_plane; + intel_plane->disable_plane = skylake_disable_primary_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; + } else { + intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); + intel_plane->update_plane = skl_update_plane; + intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; + } + + intel_plane->id = plane; + plane_formats = skl_primary_formats; + + if (pipe < PIPE_C) + modifiers = skl_format_modifiers_ccs; + else + modifiers = skl_format_modifiers_noccs; + + num_formats = ARRAY_SIZE(skl_primary_formats); + + /* + * Drop final format (NV12) for pipes or hardware steppings + * that don't support it. + */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_C0) || pipe >= PIPE_C + || plane >= 2) + num_formats--; + + + possible_crtcs = (1 << dev_priv->drm.mode_config.num_crtc); + ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, + possible_crtcs, &intel_plane_funcs, + plane_formats, num_formats, + modifiers, + plane_type, + "plane %d%c", plane+1, pipe_name(pipe)); + + if (ret) + goto fail; + + supported_rotations = DRM_MODE_ROTATE_0; + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&intel_plane->base, + DRM_MODE_ROTATE_0, + supported_rotations); + + drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); + + return intel_plane; + +fail: + kfree(state); + kfree(intel_plane); + + return ERR_PTR(ret); +} + static struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -13334,10 +13639,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->update_plane = i845_update_cursor; cursor->disable_plane = i845_disable_cursor; + cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->update_plane = i9xx_update_cursor; cursor->disable_plane = i9xx_disable_cursor; + cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } @@ -13394,11 +13701,109 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, scaler->in_use = 0; scaler->mode = PS_SCALER_MODE_DYN; + scaler->owned = 1; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (!intel_vgpu_active(dev_priv) && + intel_gvt_active(dev_priv) && + dev_priv->gvt->pipe_info[crtc->pipe].scaler_owner[i] != 0) + scaler->owned = 0; +#endif + if (intel_vgpu_active(dev_priv) && + !(1 << (crtc->pipe * SKL_NUM_SCALERS + i) & + dev_priv->vgpu.scaler_owned)) + scaler->owned = 0; } scaler_state->scaler_id = -1; } +static void intel_create_background_color_property(struct drm_device *dev, + struct intel_crtc *crtc) +{ + if (!dev->mode_config.prop_background_color) + dev->mode_config.prop_background_color = + drm_mode_create_background_color_property(dev); + if (!dev->mode_config.prop_background_color) + return; + + drm_object_attach_property(&crtc->base.base, + dev->mode_config.prop_background_color, + crtc->base.state->background_color.v); +} + +static int intel_crtc_init_restrict_planes(struct drm_i915_private *dev_priv, + enum pipe pipe, int planes_mask) +{ + struct intel_crtc *intel_crtc; + struct intel_crtc_state *crtc_state; + struct intel_plane *primary = NULL, *intel_plane = NULL; + bool is_primary = true; + int plane, ret; + + intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); + if (!intel_crtc) + return -ENOMEM; + + crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + if (!crtc_state) { + ret = -ENOMEM; + goto fail; + } + intel_crtc->config = crtc_state; + intel_crtc->base.state = &crtc_state->base; + crtc_state->base.crtc = &intel_crtc->base; + + for_each_universal_plane(dev_priv, pipe, plane) { + if (planes_mask & BIT(plane)) { + intel_plane = intel_skl_plane_create(dev_priv, + pipe, plane, is_primary); + if (IS_ERR(intel_plane)) { + DRM_DEBUG_KMS(" plane %d failed for pipe %d\n", plane, pipe); + ret = PTR_ERR(intel_plane); + goto fail; + } + if (is_primary) { + primary = intel_plane; + is_primary = false; + } + DRM_DEBUG_KMS(" plane %d created for pipe %d\n", plane, pipe); + intel_crtc->plane_ids_mask |= BIT(intel_plane->id); + } + } + + ret = drm_crtc_init_with_planes(&dev_priv->drm, + &intel_crtc->base, + primary ? &primary->base : NULL, NULL, + &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); + if (ret) + goto fail; + + intel_crtc->pipe = pipe; + intel_crtc->plane = primary ? primary->plane : 0; + + dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; + dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; + + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); + + intel_color_init(&intel_crtc->base); + + WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + + return 0; + +fail: + /* + * drm_mode_config_cleanup() will free up any + * crtcs/planes already initialized. + */ + kfree(crtc_state); + kfree(intel_crtc); + + return ret; +} + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *intel_crtc; @@ -13469,6 +13874,12 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + if (INTEL_GEN(dev_priv) >= 9) { + crtc_state->base.background_color = drm_rgba(16, 0, 0, 0, 0); + intel_create_background_color_property(&dev_priv->drm, + intel_crtc); + } + return 0; fail: @@ -13511,6 +13922,27 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, return 0; } +int get_pipe_from_crtc_index(struct drm_device *dev, unsigned int index, enum pipe *pipe) +{ + struct drm_crtc *c = drm_crtc_from_index(dev, index); + + if (WARN_ON(!c)) + return -ENOENT; + + *pipe = (to_intel_crtc(c)->pipe); + return 0; +} + +struct intel_crtc *get_intel_crtc_from_index(struct drm_device *dev, + unsigned int index) +{ + struct drm_crtc *c = drm_crtc_from_index(dev, index); + + WARN_ON(!c); + return to_intel_crtc(c); +} + + static int intel_encoder_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -13783,6 +14215,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_encoder_clones(encoder); } +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* + * Encoders have been initialized. If we are in VGT mode, + * let's inform the HV that it can start Dom U as Dom 0 + * is ready to accept new Dom Us. + */ + gvt_dom0_ready(dev_priv); +#endif + intel_init_pch_refclk(dev_priv); drm_helper_move_panel_connectors_to_head(&dev_priv->drm); @@ -14388,8 +14829,6 @@ void intel_modeset_init_hw(struct drm_device *dev) intel_update_cdclk(dev_priv); dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; - - intel_init_clock_gating(dev_priv); } /* @@ -14468,6 +14907,8 @@ static void sanitize_watermarks(struct drm_device *dev) cs->wm.need_postvbl_update = true; dev_priv->display.optimize_watermarks(intel_state, cs); + + to_intel_crtc_state(crtc->state)->wm = cs->wm; } put_state: @@ -14477,12 +14918,28 @@ static void sanitize_watermarks(struct drm_device *dev) drm_modeset_acquire_fini(&ctx); } + +static int intel_sanitize_plane_restriction(struct drm_i915_private *dev_priv) +{ + /*plane restriction feature is only for APL and KBL for now*/ + if (!(IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) || + (!intel_vgpu_active(dev_priv) && + !i915_modparams.enable_initial_modeset)) { + i915_modparams.avail_planes_per_pipe = 0; + DRM_INFO("Turning off Plane Restrictions feature\n"); + } + + return i915_modparams.avail_planes_per_pipe; +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; enum pipe pipe; struct intel_crtc *crtc; + unsigned int planes_mask[I915_MAX_PIPES]; + unsigned int avail_plane_per_pipe_mask = 0; drm_mode_config_init(dev); @@ -14553,10 +15010,29 @@ int intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev_priv)->num_pipes, INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); + avail_plane_per_pipe_mask = intel_sanitize_plane_restriction(dev_priv); + DRM_DEBUG_KMS("avail_planes_per_pipe = 0x%x \n", i915_modparams.avail_planes_per_pipe); + DRM_DEBUG_KMS("domain_plane_owners = 0x%llx \n", i915_modparams.domain_plane_owners); + for_each_pipe(dev_priv, pipe) { - int ret; + planes_mask[pipe] = AVAIL_PLANE_PER_PIPE(dev_priv, + avail_plane_per_pipe_mask, pipe); + DRM_DEBUG_KMS("for pipe %d plane_mask = %d \n", pipe, planes_mask[pipe]); + } - ret = intel_crtc_init(dev_priv, pipe); + for_each_pipe(dev_priv, pipe) { + int ret = 0; + + if (!i915_modparams.avail_planes_per_pipe) { + ret = intel_crtc_init(dev_priv, pipe); + } else { + if (!intel_vgpu_active(dev_priv) || (intel_vgpu_active(dev_priv) + && planes_mask[pipe])) { + ret = intel_crtc_init_restrict_planes(dev_priv, + pipe, + planes_mask[pipe]); + } + } if (ret) { drm_mode_config_cleanup(dev); return ret; @@ -14682,38 +15158,56 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", pipe_name(pipe)); - assert_plane_disabled(dev_priv, PLANE_A); - assert_plane_disabled(dev_priv, PLANE_B); + WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); + WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE); + WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE); I915_WRITE(PIPECONF(pipe), 0); POSTING_READ(PIPECONF(pipe)); - if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) - DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe)); + intel_wait_for_pipe_scanline_stopped(crtc); I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); POSTING_READ(DPLL(pipe)); } -static bool -intel_check_plane_mapping(struct intel_crtc *crtc) +static bool intel_plane_mapping_ok(struct intel_crtc *crtc, + struct intel_plane *primary) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 val; + enum plane plane = primary->plane; + u32 val = I915_READ(DSPCNTR(plane)); - if (INTEL_INFO(dev_priv)->num_pipes == 1) - return true; + return (val & DISPLAY_PLANE_ENABLE) == 0 || + (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe); +} - val = I915_READ(DSPCNTR(!crtc->plane)); +static void +intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; - if ((val & DISPLAY_PLANE_ENABLE) && - (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) - return false; + if (INTEL_GEN(dev_priv) >= 4) + return; - return true; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + + if (intel_plane_mapping_ok(crtc, plane)) + continue; + + DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", + plane->base.name); + intel_plane_disable_noatomic(crtc, plane); + } } static bool intel_crtc_has_encoders(struct intel_crtc *crtc) @@ -14769,33 +15263,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, /* Disable everything but the primary plane */ for_each_intel_plane_on_crtc(dev, crtc, plane) { - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) - continue; + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc); + if (plane_state->base.visible && + plane->base.type != DRM_PLANE_TYPE_PRIMARY) + intel_plane_disable_noatomic(crtc, plane); } } - /* We need to sanitize the plane -> pipe mapping first because this will - * disable the crtc (and hence change the state) if it is wrong. Note - * that gen4+ has a fixed plane -> pipe mapping. */ - if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { - bool plane; - - DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", - crtc->base.base.id, crtc->base.name); - - /* Pipe has the wrong plane attached and the plane is active. - * Temporarily change the plane mapping and disable everything - * ... */ - plane = crtc->plane; - crtc->base.primary->state->visible = true; - crtc->plane = !plane; - intel_crtc_disable_noatomic(&crtc->base, ctx); - crtc->plane = plane; - } - /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ if (crtc->active && !intel_crtc_has_encoders(crtc)) @@ -14900,24 +15376,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv) intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); } -static bool primary_get_hw_state(struct intel_plane *plane) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - - return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; -} - /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct intel_crtc *crtc) { - struct intel_plane *primary = to_intel_plane(crtc->base.primary); - bool visible; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane; - visible = crtc->active && primary_get_hw_state(primary); + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + bool visible = plane->get_hw_state(plane); - intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), - to_intel_plane_state(primary->base.state), - visible); + intel_set_plane_visible(crtc_state, plane_state, visible); + } } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -14947,9 +15420,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = crtc_state->base.active; if (crtc_state->base.active) - dev_priv->active_crtcs |= 1 << crtc->pipe; + dev_priv->active_crtcs |= + 1 << drm_crtc_index(&crtc->base); - readout_plane_state(crtc); + if (crtc->base.primary) + readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, @@ -14968,7 +15443,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active && crtc_state->shared_dpll == pll) - pll->state.crtc_mask |= 1 << crtc->pipe; + pll->state.crtc_mask |= + 1 << drm_crtc_index(&crtc->base); } pll->active_mask = pll->state.crtc_mask; @@ -14983,11 +15459,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_crtc_state *crtc_state; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = to_intel_crtc_state(crtc->base.state); - - encoder->base.crtc = &crtc->base; - crtc_state->output_types |= 1 << encoder->type; - encoder->get_config(encoder, crtc_state); + if (!crtc) { + encoder->base.crtc = NULL; + } else { + crtc_state = + to_intel_crtc_state(crtc->base.state); + + encoder->base.crtc = &crtc->base; + crtc_state->output_types |= 1 << encoder->type; + encoder->get_config(encoder, crtc_state); + } } else { encoder->base.crtc = NULL; } @@ -15105,11 +15586,22 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; int i; + if (IS_HASWELL(dev_priv)) { + /* + * WaRsPkgCStateDisplayPMReq:hsw + * System hang if this isn't done before disabling all planes! + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); + } + intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ get_encoder_power_domains(dev_priv); + intel_sanitize_plane_mapping(dev_priv); + for_each_intel_encoder(dev, encoder) { intel_sanitize_encoder(encoder); } @@ -15117,9 +15609,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_pipe(dev_priv, pipe) { crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - intel_sanitize_crtc(crtc, ctx); - intel_dump_pipe_config(crtc, crtc->config, - "[setup_hw_state]"); + if (crtc) { + intel_sanitize_crtc(crtc, ctx); + intel_dump_pipe_config(crtc, crtc->config, + "[setup_hw_state]"); + } } intel_modeset_update_connector_atomic_state(dev); @@ -15186,6 +15680,7 @@ void intel_display_resume(struct drm_device *dev) if (!ret) ret = __intel_display_resume(dev, state, &ctx); + intel_enable_ipc(dev_priv); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); @@ -15201,6 +15696,8 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_init_gt_powersave(dev_priv); + intel_init_clock_gating(dev_priv); + intel_setup_overlay(dev_priv); } @@ -15240,6 +15737,10 @@ static void intel_hpd_poll_fini(struct drm_device *dev) for_each_intel_connector_iter(connector, &conn_iter) { if (connector->modeset_retry_work.func) cancel_work_sync(&connector->modeset_retry_work); + if (connector->hdcp_shim) { + cancel_delayed_work_sync(&connector->hdcp_check_work); + cancel_work_sync(&connector->hdcp_prop_work); + } } drm_connector_list_iter_end(&conn_iter); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09f274419eea..c2315e71bc43 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -36,7 +36,9 @@ #include #include #include +#include #include +#include #include "intel_drv.h" #include #include "i915_drv.h" @@ -428,7 +430,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) return v; } -static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -640,19 +642,15 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + int backlight_controller = dev_priv->vbt.backlight.controller; lockdep_assert_held(&dev_priv->pps_mutex); /* We should never land here with regular DP ports */ WARN_ON(!is_edp(intel_dp)); - /* - * TODO: BXT has 2 PPS instances. The correct port->PPS instance - * mapping needs to be retrieved from VBT, for now just hard-code to - * use instance #0 always. - */ if (!intel_dp->pps_reset) - return 0; + return backlight_controller; intel_dp->pps_reset = false; @@ -662,7 +660,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) */ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false); - return 0; + return backlight_controller; } typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, @@ -1050,10 +1048,29 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); } +static uint32_t intel_dp_get_aux_send_ctl(struct intel_dp *intel_dp, + bool has_aux_irq, + int send_bytes, + uint32_t aux_clock_divider, + bool aksv_write) +{ + uint32_t val = 0; + + if (aksv_write) { + send_bytes += 5; + val |= DP_AUX_CH_CTL_AUX_AKSV_SELECT; + } + + return val | intel_dp->get_aux_send_ctl(intel_dp, + has_aux_irq, + send_bytes, + aux_clock_divider); +} + static int intel_dp_aux_ch(struct intel_dp *intel_dp, const uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size) + uint8_t *recv, int recv_size, bool aksv_write) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = @@ -1113,10 +1130,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, } while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { - u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, - has_aux_irq, - send_bytes, - aux_clock_divider); + u32 send_ctl = intel_dp_get_aux_send_ctl(intel_dp, + has_aux_irq, + send_bytes, + aux_clock_divider, + aksv_write); /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { @@ -1253,7 +1271,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (msg->buffer) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); - ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); + ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize, + false); if (ret > 0) { msg->reply = rxbuf[0] >> 4; @@ -1275,7 +1294,8 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (WARN_ON(rxsize > 20)) return -E2BIG; - ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); + ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize, + false); if (ret > 0) { msg->reply = rxbuf[0] >> 4; /* @@ -1977,7 +1997,12 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (intel_wait_for_register(dev_priv, + /* + * Only wait for panel status if we are not in a GVT guest environment, + * because such a wait in a GVT guest environment doesn't make any sense + * as we are exposing virtual DP monitors to the guest. + */ + if (!intel_vgpu_active(dev_priv) && intel_wait_for_register(dev_priv, pp_stat_reg, mask, value, 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", @@ -3835,7 +3860,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) { u8 mstm_cap; - if (!i915.enable_dp_mst) + if (!i915_modparams.enable_dp_mst) return false; if (!intel_dp->can_mst) @@ -3853,7 +3878,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) static void intel_dp_configure_mst(struct intel_dp *intel_dp) { - if (!i915.enable_dp_mst) + if (!i915_modparams.enable_dp_mst) return; if (!intel_dp->can_mst) @@ -4988,6 +5013,234 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) pps_unlock(intel_dp); } +static +int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, + u8 *an) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base); + uint8_t txbuf[4], rxbuf[2], reply = 0; + ssize_t dpcd_ret; + int ret; + + /* Output An first, that's easy */ + dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, + an, DRM_HDCP_AN_LEN); + if (dpcd_ret != DRM_HDCP_AN_LEN) { + DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + + /* + * Since Aksv is Oh-So-Secret, we can't access it in software. So in + * order to get it on the wire, we need to create the AUX header as if + * we were writing the data, and then tickle the hardware to output the + * data once the header is sent out. + */ + txbuf[0] = (DP_AUX_NATIVE_WRITE << 4) | + ((DP_AUX_HDCP_AKSV >> 16) & 0xf); + txbuf[1] = (DP_AUX_HDCP_AKSV >> 8) & 0xff; + txbuf[2] = DP_AUX_HDCP_AKSV & 0xff; + txbuf[3] = DRM_HDCP_KSV_LEN - 1; + + ret = intel_dp_aux_ch(intel_dp, txbuf, sizeof(txbuf), rxbuf, + sizeof(rxbuf), true); + if (ret < 0) { + DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); + return ret; + } else if (ret == 0) { + DRM_ERROR("Aksv write over DP/AUX was empty\n"); + return -EIO; + } + + reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; + return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO; +} + +static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, + u8 *bksv) +{ + ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret != DRM_HDCP_KSV_LEN) { + DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, + u8 *bstatus) +{ + ssize_t ret; + /* + * For some reason the HDMI and DP HDCP specs call this register + * definition by different names. In the HDMI spec, it's called BSTATUS, + * but in DP it's called BINFO. + */ + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret != DRM_HDCP_BSTATUS_LEN) { + DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, + u8 *bcaps) +{ + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + bcaps, 1); + if (ret != 1) { + DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, + bool *repeater_present) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + if (ret) + return ret; + + *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, + u8 *ri_prime) +{ + ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret != DRM_HDCP_RI_LEN) { + DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, + bool *ksv_ready) +{ + ssize_t ret; + u8 bstatus; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + *ksv_ready = bstatus & DP_BSTATUS_READY; + return 0; +} + +static +int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo) +{ + ssize_t ret; + int i; + + /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ + for (i = 0; i < num_downstream; i += 3) { + size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_AUX_HDCP_KSV_FIFO, + ksv_fifo + i * DRM_HDCP_KSV_LEN, + len); + if (ret != len) { + DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i, + ret); + return ret >= 0 ? -EIO : ret; + } + } + return 0; +} + +static +int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, + int i, u32 *part) +{ + ssize_t ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_AUX_HDCP_V_PRIME(i), part, + DRM_HDCP_V_PRIME_PART_LEN); + if (ret != DRM_HDCP_V_PRIME_PART_LEN) { + DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, + bool enable) +{ + /* Not used for single stream DisplayPort setups */ + return 0; +} + +static +bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) +{ + ssize_t ret; + u8 bstatus; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); +} + +static +int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, + bool *hdcp_capable) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + if (ret) + return ret; + + *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; + return 0; +} + +static const struct intel_hdcp_shim intel_dp_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_hdcp_toggle_signalling, + .check_link = intel_dp_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, +}; + static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -5112,6 +5365,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_display_power_get(dev_priv, intel_dp->aux_power_domain); + /* Short pulse can signify loss of hdcp authentication */ + intel_hdcp_check_link(intel_dp->attached_connector); + if (intel_dp->is_mst) { if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { /* @@ -5183,7 +5439,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) { - intel_dp->panel_power_off_time = ktime_get_boottime(); + intel_dp->panel_power_off_time = ktime_set(0, 0); intel_dp->last_power_on = jiffies; intel_dp->last_backlight_off = jiffies; } @@ -5340,6 +5596,12 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, */ final->t8 = 1; final->t9 = 1; + + /* + * HW has only a 100msec granularity for t11_t12 so round it up + * accordingly. + */ + final->t11_t12 = roundup(final->t11_t12, 100 * 10); } static void @@ -6108,6 +6370,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_add_properties(intel_dp, connector); + if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(dev_priv, port)) { + int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index d2830ba3162e..2bb2ceb9d463 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; - if (!i915.enable_dpcd_backlight) + if (!i915_modparams.enable_dpcd_backlight) return -ENODEV; if (!intel_dp_aux_display_control_capable(intel_connector)) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index df808a94c511..49a696315b94 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -301,7 +301,7 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, pipe_name(crtc->pipe)); - shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe; + shared_dpll[pll->id].crtc_mask |= 1 << (drm_crtc_index(&crtc->base)); } /** @@ -2504,7 +2504,8 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_shared_dpll_state *shared_dpll_state; shared_dpll_state = intel_atomic_get_shared_dpll_state(state); - shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe); + shared_dpll_state[dpll->id].crtc_mask &= + ~(1 << drm_crtc_index(&crtc->base)); } /** diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 79fbaf78f604..2e6e18d73589 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -41,22 +41,21 @@ #include /** - * _wait_for - magic (register) wait macro + * __wait_for - magic wait macro * - * Does the right thing for modeset paths when run under kdgb or similar atomic - * contexts. Note that it's important that we check the condition again after - * having timed out, since the timeout could be due to preemption or similar and - * we've never had a chance to check the condition before the timeout. - * - * TODO: When modesetting has fully transitioned to atomic, the below - * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts - * added. + * Macro to help avoid open coding check/wait/timeout patterns. Note that it's + * important that we check the condition again after having timed out, since the + * timeout could be due to preemption or similar and we've never had a chance to + * check the condition before the timeout. */ -#define _wait_for(COND, US, W) ({ \ +#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ + long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ int ret__; \ + might_sleep(); \ for (;;) { \ bool expired__ = time_after(jiffies, timeout__); \ + OP; \ if (COND) { \ ret__ = 0; \ break; \ @@ -65,16 +64,16 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if ((W) && drm_can_sleep()) { \ - usleep_range((W), (W)*2); \ - } else { \ - cpu_relax(); \ - } \ + usleep_range(wait__, wait__ * 2); \ + if (wait__ < (Wmax)) \ + wait__ <<= 1; \ } \ ret__; \ }) -#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) +#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ + (Wmax)) +#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) @@ -123,7 +122,7 @@ int ret__; \ BUILD_BUG_ON(!__builtin_constant_p(US)); \ if ((US) > 10) \ - ret__ = _wait_for((COND), (US), 10); \ + ret__ = _wait_for((COND), (US), 10, 10); \ else \ ret__ = _wait_for_atomic((COND), (US), 0); \ ret__; \ @@ -299,6 +298,80 @@ struct intel_panel { } backlight; }; +/* + * This structure serves as a translation layer between the generic HDCP code + * and the bus-specific code. What that means is that HDCP over HDMI differs + * from HDCP over DP, so to account for these differences, we need to + * communicate with the receiver through this shim. + * + * For completeness, the 2 buses differ in the following ways: + * - DP AUX vs. DDC + * HDCP registers on the receiver are set via DP AUX for DP, and + * they are set via DDC for HDMI. + * - Receiver register offsets + * The offsets of the registers are different for DP vs. HDMI + * - Receiver register masks/offsets + * For instance, the ready bit for the KSV fifo is in a different + * place on DP vs HDMI + * - Receiver register names + * Seriously. In the DP spec, the 16-bit register containing + * downstream information is called BINFO, on HDMI it's called + * BSTATUS. To confuse matters further, DP has a BSTATUS register + * with a completely different definition. + * - KSV FIFO + * On HDMI, the ksv fifo is read all at once, whereas on DP it must + * be read 3 keys at a time + * - Aksv output + * Since Aksv is hidden in hardware, there's different procedures + * to send it over DP AUX vs DDC + */ +struct intel_hdcp_shim { + /* Outputs the transmitter's An and Aksv values to the receiver. */ + int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an); + + /* Reads the receiver's key selection vector */ + int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv); + + /* + * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The + * definitions are the same in the respective specs, but the names are + * different. Call it BSTATUS since that's the name the HDMI spec + * uses and it was there first. + */ + int (*read_bstatus)(struct intel_digital_port *intel_dig_port, + u8 *bstatus); + + /* Determines whether a repeater is present downstream */ + int (*repeater_present)(struct intel_digital_port *intel_dig_port, + bool *repeater_present); + + /* Reads the receiver's Ri' value */ + int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri); + + /* Determines if the receiver's KSV FIFO is ready for consumption */ + int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port, + bool *ksv_ready); + + /* Reads the ksv fifo for num_downstream devices */ + int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo); + + /* Reads a 32-bit part of V' from the receiver */ + int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port, + int i, u32 *part); + + /* Enables HDCP signalling on the port */ + int (*toggle_signalling)(struct intel_digital_port *intel_dig_port, + bool enable); + + /* Ensures the link is still protected */ + bool (*check_link)(struct intel_digital_port *intel_dig_port); + + /* Detects panel's hdcp capability. This is optional for HDMI. */ + int (*hdcp_capable)(struct intel_digital_port *intel_dig_port, + bool *hdcp_capable); +}; + struct intel_connector { struct drm_connector base; /* @@ -330,6 +403,12 @@ struct intel_connector { /* Work struct to schedule a uevent on link train failure */ struct work_struct modeset_retry_work; + + const struct intel_hdcp_shim *hdcp_shim; + struct mutex hdcp_mutex; + uint64_t hdcp_value; /* protected by hdcp_mutex */ + struct delayed_work hdcp_check_work; + struct work_struct hdcp_prop_work; }; struct intel_digital_connector_state { @@ -461,6 +540,7 @@ struct intel_initial_plane_config { struct intel_scaler { int in_use; uint32_t mode; + int owned; }; struct intel_crtc_scaler_state { @@ -495,7 +575,7 @@ struct intel_crtc_scaler_state { #define I915_MODE_FLAG_INHERITED 1 struct intel_pipe_wm { - struct intel_wm_level wm[5]; + struct intel_wm_level wm[7]; uint32_t linetime; bool fbc_wm_enabled; bool pipe_enabled; @@ -863,6 +943,7 @@ struct intel_plane { const struct intel_plane_state *plane_state); void (*disable_plane)(struct intel_plane *plane, struct intel_crtc *crtc); + bool (*get_hw_state)(struct intel_plane *plane); int (*check_plane)(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state); @@ -1230,6 +1311,11 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) return dev_priv->pm.irqs_enabled; } +bool is_shadow_context(struct i915_gem_context *ctx); +int get_vgt_id(struct i915_gem_context *ctx); +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); @@ -1273,6 +1359,8 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); uint32_t ddi_signal_levels(struct intel_dp *intel_dp); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); +int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + bool enable); unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int plane, unsigned int height); @@ -1339,6 +1427,9 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); +int get_pipe_from_crtc_index(struct drm_device *dev, unsigned int index, enum pipe *pipe); +struct intel_crtc *get_intel_crtc_from_index(struct drm_device *dev, + unsigned int index); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe); static inline bool @@ -1358,7 +1449,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) static inline void intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - drm_wait_one_vblank(&dev_priv->drm, pipe); + struct intel_crtc *crtc; + + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (crtc) + drm_wait_one_vblank(&dev_priv->drm, + drm_crtc_index(&crtc->base)); } static inline void intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) @@ -1515,6 +1611,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); @@ -1610,6 +1707,10 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) } #endif +/* initial modesetting support */ +extern void intel_initial_mode_config_init(struct drm_device *dev); +extern void intel_initial_mode_config_fini(struct drm_device *dev); + /* intel_fbc.c */ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct drm_atomic_state *state); @@ -1707,7 +1808,7 @@ extern struct drm_display_mode *intel_find_panel_downclock( int intel_backlight_device_register(struct intel_connector *connector); void intel_backlight_device_unregister(struct intel_connector *connector); #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ -static int intel_backlight_device_register(struct intel_connector *connector) +static inline int intel_backlight_device_register(struct intel_connector *connector) { return 0; } @@ -1716,6 +1817,17 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con } #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +/* intel_hdcp.c */ +void intel_hdcp_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state); +int intel_hdcp_init(struct intel_connector *connector, + const struct intel_hdcp_shim *hdcp_shim); +int intel_hdcp_enable(struct intel_connector *connector); +int intel_hdcp_disable(struct intel_connector *connector); +int intel_hdcp_check_link(struct intel_connector *connector); +bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); + /* intel_psr.c */ void intel_psr_enable(struct intel_dp *intel_dp); @@ -1866,9 +1978,11 @@ bool ilk_disable_lp_wm(struct drm_device *dev); int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, struct intel_crtc_state *cstate); +void intel_init_ipc(struct drm_i915_private *dev_priv); +void intel_enable_ipc(struct drm_i915_private *dev_priv); static inline int intel_enable_rc6(void) { - return i915.enable_rc6; + return i915_modparams.enable_rc6; } /* intel_sdvo.c */ @@ -1885,6 +1999,14 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(struct intel_crtc *crtc); void intel_pipe_update_end(struct intel_crtc *crtc); +int intel_check_sprite_plane(struct intel_plane *plane, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *state); +void skl_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); +bool skl_plane_get_hw_state(struct intel_plane *plane); /* intel_tv.c */ void intel_tv_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 3c2d9cf22ed5..52166ff74b19 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -153,7 +153,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) case 9: return GEN9_LR_CONTEXT_RENDER_SIZE; case 8: - return i915.enable_execlists ? + return i915_modparams.enable_execlists ? GEN8_LR_CONTEXT_RENDER_SIZE : GEN8_CXT_TOTAL_SIZE; case 7: @@ -301,7 +301,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv) &intel_engine_classes[engine->class]; int (*init)(struct intel_engine_cs *engine); - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) init = class_info->init_execlists; else init = class_info->init_legacy; @@ -380,6 +380,76 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id]; } +static bool csb_force_mmio(struct drm_i915_private *i915) +{ + /* GVT emulation depends upon intercepting CSB mmio */ + if (intel_vgpu_active(i915)) + return true; + + /* + * IOMMU adds unpredictable latency causing the CSB write (from the + * GPU into the HWSP) to only be visible some time after the interrupt + * (missed breadcrumb syndrome). + */ + if (intel_vtd_active()) + return true; + + return false; +} + +static void intel_engine_init_execlist(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + execlists->csb_use_mmio = csb_force_mmio(engine->i915); + + execlists->port_mask = 1; + BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); + GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); + + execlists->queue = RB_ROOT; + execlists->first = NULL; +} + +static enum hrtimer_restart +intel_engine_fpreempt_timer(struct hrtimer *hrtimer) +{ + struct intel_engine_cs *engine = + container_of(hrtimer, struct intel_engine_cs, + fpreempt_timer); + + if (execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) + queue_work(system_highpri_wq, &engine->fpreempt_work); + + return HRTIMER_NORESTART; +} + +static void intel_engine_fpreempt_work(struct work_struct *work) +{ + struct intel_engine_cs *engine = + container_of(work, struct intel_engine_cs, + fpreempt_work); + + tasklet_kill(&engine->execlists.irq_tasklet); + tasklet_disable(&engine->execlists.irq_tasklet); + + if (execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) { + engine->fpreempt_stalled = true; + i915_handle_error(engine->i915, intel_engine_flag(engine), + 0, "force preemption"); + } + + tasklet_enable(&engine->execlists.irq_tasklet); +} + +static void intel_engine_init_fpreempt(struct intel_engine_cs *engine) +{ + hrtimer_init(&engine->fpreempt_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + engine->fpreempt_timer.function = intel_engine_fpreempt_timer; + INIT_WORK(&engine->fpreempt_work, intel_engine_fpreempt_work); +} + /** * intel_engines_setup_common - setup engine state not requiring hw access * @engine: Engine to setup. @@ -391,11 +461,11 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) */ void intel_engine_setup_common(struct intel_engine_cs *engine) { - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; + intel_engine_init_execlist(engine); intel_engine_init_timeline(engine); intel_engine_init_hangcheck(engine); + intel_engine_init_fpreempt(engine); i915_gem_batch_pool_init(engine, &engine->batch_pool); intel_engine_init_cmd_parser(engine); @@ -442,6 +512,116 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) i915_vma_unpin_and_release(&engine->scratch); } +static void cleanup_phys_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + if (!dev_priv->status_page_dmah) + return; + + drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); + engine->status_page.page_addr = NULL; +} + +static void cleanup_status_page(struct intel_engine_cs *engine) +{ + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) + return; + + obj = vma->obj; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_gem_object_unpin_map(obj); + __i915_gem_object_release_unless_active(obj); +} + +static int init_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags; + void *vaddr; + int ret; + + obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate status page\n"); + return PTR_ERR(obj); + } + + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err; + + vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actually map it). + */ + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + ret = i915_vma_pin(vma, 0, 4096, flags); + if (ret) + goto err; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_unpin; + } + + engine->status_page.vma = vma; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma); + engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); + + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); + return 0; + +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return ret; +} + +static int init_phys_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + GEM_BUG_ON(engine->id != RCS); + + dev_priv->status_page_dmah = + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; + + engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(engine->status_page.page_addr, 0, PAGE_SIZE); + + return 0; +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -471,17 +651,44 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (IS_ERR(ring)) return PTR_ERR(ring); + /* + * Similarly the preempt context must always be available so that + * we can interrupt the engine at any time. + */ + if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) { + ring = engine->context_pin(engine, + engine->i915->preempt_context); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + goto err_unpin_kernel; + } + } + ret = intel_engine_init_breadcrumbs(engine); if (ret) - goto err_unpin; + goto err_unpin_preempt; ret = i915_gem_render_state_init(engine); if (ret) - goto err_unpin; + goto err_breadcrumbs; + + if (HWS_NEEDS_PHYSICAL(engine->i915)) + ret = init_phys_status_page(engine); + else + ret = init_status_page(engine); + if (ret) + goto err_rs_fini; return 0; -err_unpin: +err_rs_fini: + i915_gem_render_state_fini(engine); +err_breadcrumbs: + intel_engine_fini_breadcrumbs(engine); +err_unpin_preempt: + if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) + engine->context_unpin(engine, engine->i915->preempt_context); +err_unpin_kernel: engine->context_unpin(engine, engine->i915->kernel_context); return ret; } @@ -497,11 +704,18 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { intel_engine_cleanup_scratch(engine); + if (HWS_NEEDS_PHYSICAL(engine->i915)) + cleanup_phys_status_page(engine); + else + cleanup_status_page(engine); + i915_gem_render_state_fini(engine); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); i915_gem_batch_pool_fini(&engine->batch_pool); + if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) + engine->context_unpin(engine, engine->i915->preempt_context); engine->context_unpin(engine, engine->i915->kernel_context); } @@ -672,11 +886,6 @@ static int wa_add(struct drm_i915_private *dev_priv, #define WA_SET_FIELD_MASKED(addr, mask, value) \ WA_REG(addr, mask, _MASKED_FIELD(mask, value)) -#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) -#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) - -#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) - static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { @@ -687,8 +896,8 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) return -EINVAL; - WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), - i915_mmio_reg_offset(reg)); + I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), + i915_mmio_reg_offset(reg)); wa->hw_whitelist_count[engine->id]++; return 0; @@ -812,6 +1021,23 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB); + if (HAS_LLC(dev_priv)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN9_PBE_COMPRESSED_HASH_SELECTION); + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); + + I915_WRITE(MMCD_MISC_CTRL, + I915_READ(MMCD_MISC_CTRL) | + MMCD_PCLA | + MMCD_HOTSPOT_EN); + } + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, @@ -900,13 +1126,37 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES)); + /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ + if (IS_GEN9_LP(dev_priv)) + WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); + + /* + * Supporting preemption with fine-granularity requires changes in the + * batch buffer programming. Since we can't break old userspace, we + * need to set our default preemption level to safe value. Userspace is + * still able to use more fine-grained preemption levels, since in + * WaEnablePreemptionGranularityControlByUMD we're whitelisting the + * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are + * not real HW workarounds, but merely a way to start using preemption + * while maintaining old contract with userspace. + */ + + /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); if (ret) return ret; - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */ - ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ + I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, + _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); if (ret) return ret; @@ -968,25 +1218,19 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) if (ret) return ret; - /* - * Actual WA is to disable percontext preemption granularity control - * until D0 which is the default case so this is equivalent to - * !WaDisablePerCtxtPreemptionGranularityControl:skl - */ - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); - /* WaEnableGapsTsvCreditFix:skl */ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); /* WaDisableGafsUnitClkGating:skl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaInPlaceDecompressionHang:skl */ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); @@ -1022,8 +1266,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisablePooledEuLoadBalancingFix:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { - WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, - GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + I915_WRITE(FF_SLICE_CS_CHICKEN2, + _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); } /* WaDisableSbeCacheDispatchPortSharing:bxt */ @@ -1062,8 +1306,61 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaInPlaceDecompressionHang:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); + + return 0; +} + +static int cnl_init_workarounds(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int ret; + + /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) + I915_WRITE(GAMT_CHKN_BIT_REG, + (I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT)); + + /* WaForceContextSaveRestoreNonCoherent:cnl */ + WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); + + /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); + + /* WaInPlaceDecompressionHang:cnl */ + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); + + /* WaPushConstantDereferenceHoldDisable:cnl */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); + + /* FtrEnableFastAnisoL1BankingFix: cnl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); + + /* WaDisable3DMidCmdPreemption:cnl */ + WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + + /* WaDisableGPGPUMidCmdPreemption:cnl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); + + /* WaEnablePreemptionGranularityControlByUMD:cnl */ + I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, + _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); + if (ret) + return ret; return 0; } @@ -1083,8 +1380,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) /* WaDisableDynamicCreditSharing:kbl */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) - WA_SET_BIT(GAMT_CHKN_BIT_REG, - GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + I915_WRITE(GAMT_CHKN_BIT_REG, + (I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING)); /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) @@ -1097,7 +1395,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableGafsUnitClkGating:kbl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaDisableSbeCacheDispatchPortSharing:kbl */ WA_SET_BIT_MASKED( @@ -1105,8 +1404,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaInPlaceDecompressionHang:kbl */ - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); /* WaDisableLSQCROPERFforOCL:kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); @@ -1125,6 +1425,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine) if (ret) return ret; + /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ + ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1); + if (ret) + return ret; + /* WaToEnableHwFixForPushConstHWBug:glk */ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -1150,7 +1455,8 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableGafsUnitClkGating:cfl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaDisableSbeCacheDispatchPortSharing:cfl */ WA_SET_BIT_MASKED( @@ -1158,8 +1464,9 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaInPlaceDecompressionHang:cfl */ - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); return 0; } @@ -1188,6 +1495,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine) err = glk_init_workarounds(engine); else if (IS_COFFEELAKE(dev_priv)) err = cfl_init_workarounds(engine); + else if (IS_CANNONLAKE(dev_priv)) + err = cnl_init_workarounds(engine); else err = 0; if (err) @@ -1279,12 +1588,12 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) return false; - /* Both ports drained, no more ELSP submission? */ - if (port_request(&engine->execlist_port[0])) + /* Waiting to drain ELSP? */ + if (READ_ONCE(engine->execlists.active)) return false; /* ELSP is empty, but there are ready requests? */ - if (READ_ONCE(engine->execlist_first)) + if (READ_ONCE(engine->execlists.first)) return false; /* Ring stopped? */ @@ -1333,11 +1642,188 @@ void intel_engines_mark_idle(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { intel_engine_disarm_breadcrumbs(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - tasklet_kill(&engine->irq_tasklet); - engine->no_priolist = false; + tasklet_kill(&engine->execlists.irq_tasklet); + engine->execlists.no_priolist = false; } } +bool intel_engine_can_store_dword(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 2: + return false; /* uses physical not virtual addresses */ + case 3: + /* maybe only uses physical not virtual addresses */ + return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); + case 6: + return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ + default: + return true; + } +} + +static void print_request(struct drm_printer *m, + struct drm_i915_gem_request *rq, + const char *prefix) +{ + drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix, + rq->global_seqno, + i915_gem_request_completed(rq) ? "!" : "", + rq->ctx->hw_id, rq->fence.seqno, + rq->priotree.priority, + jiffies_to_msecs(jiffies - rq->emitted_jiffies), + rq->timeline->common->name); +} + +void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) +{ + struct intel_breadcrumbs * const b = &engine->breadcrumbs; + const struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_gpu_error * const error = &engine->i915->gpu_error; + struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_gem_request *rq; + struct rb_node *rb; + u64 addr; + + drm_printf(m, "%s\n", engine->name); + drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", + intel_engine_get_seqno(engine), + intel_engine_last_submit(engine), + engine->hangcheck.seqno, + jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), + engine->timeline->inflight_seqnos); + drm_printf(m, "\tReset count: %d\n", + i915_reset_engine_count(error, engine)); + + rcu_read_lock(); + + drm_printf(m, "\tRequests:\n"); + + rq = list_first_entry(&engine->timeline->requests, + struct drm_i915_gem_request, link); + if (&rq->link != &engine->timeline->requests) + print_request(m, rq, "\t\tfirst "); + + rq = list_last_entry(&engine->timeline->requests, + struct drm_i915_gem_request, link); + if (&rq->link != &engine->timeline->requests) + print_request(m, rq, "\t\tlast "); + + rq = i915_gem_find_active_request(engine); + if (rq) { + print_request(m, rq, "\t\tactive "); + drm_printf(m, + "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", + rq->head, rq->postfix, rq->tail, + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + } + + drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n", + I915_READ(RING_START(engine->mmio_base)), + rq ? i915_ggtt_offset(rq->ring->vma) : 0); + drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n", + I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR, + rq ? rq->ring->head : 0); + drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", + I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, + rq ? rq->ring->tail : 0); + drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n", + I915_READ(RING_CTL(engine->mmio_base)), + I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : ""); + + rcu_read_unlock(); + + addr = intel_engine_get_active_head(engine); + drm_printf(m, "\tACTHD: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + addr = intel_engine_get_last_batch_head(engine); + drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", + upper_32_bits(addr), lower_32_bits(addr)); + + if (i915_modparams.enable_execlists) { + const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + u32 ptr, read, write; + unsigned int idx; + + drm_printf(m, "\tExeclist status: 0x%08x %08x\n", + I915_READ(RING_EXECLIST_STATUS_LO(engine)), + I915_READ(RING_EXECLIST_STATUS_HI(engine))); + + ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); + read = GEN8_CSB_READ_PTR(ptr); + write = GEN8_CSB_WRITE_PTR(ptr); + drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", + read, execlists->csb_head, + write, + intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), + yesno(test_bit(ENGINE_IRQ_EXECLIST, + &engine->irq_posted))); + if (read >= GEN8_CSB_ENTRIES) + read = 0; + if (write >= GEN8_CSB_ENTRIES) + write = 0; + if (read > write) + write += GEN8_CSB_ENTRIES; + while (read < write) { + idx = ++read % GEN8_CSB_ENTRIES; + drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", + idx, + I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), + hws[idx * 2], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), + hws[idx * 2 + 1]); + } + + rcu_read_lock(); + for (idx = 0; idx < execlists_num_ports(execlists); idx++) { + unsigned int count; + + rq = port_unpack(&execlists->port[idx], &count); + if (rq) { + drm_printf(m, "\t\tELSP[%d] count=%d, ", + idx, count); + print_request(m, rq, "rq: "); + } else { + drm_printf(m, "\t\tELSP[%d] idle\n", + idx); + } + } + drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); + rcu_read_unlock(); + } else if (INTEL_GEN(dev_priv) > 6) { + drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE(engine))); + drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE_READ(engine))); + drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", + I915_READ(RING_PP_DIR_DCLV(engine))); + } + + spin_lock_irq(&engine->timeline->lock); + list_for_each_entry(rq, &engine->timeline->requests, link) + print_request(m, rq, "\t\tE "); + for (rb = execlists->first; rb; rb = rb_next(rb)) { + struct i915_priolist *p = + rb_entry(rb, typeof(*p), node); + + list_for_each_entry(rq, &p->requests, priotree.link) + print_request(m, rq, "\t\tQ "); + } + spin_unlock_irq(&engine->timeline->lock); + + spin_lock_irq(&b->rb_lock); + for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { + struct intel_wait *w = rb_entry(rb, typeof(*w), node); + + drm_printf(m, "\t%s [%d] waiting for %x\n", + w->tsk->comm, w->tsk->pid, w->seqno); + } + spin_unlock_irq(&b->rb_lock); + + drm_printf(m, "\n"); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_engine.c" #endif diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 8c8ead2276e0..20934f1ea29a 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -53,7 +53,7 @@ static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) { - return INTEL_GEN(dev_priv) < 4; + return INTEL_GEN(dev_priv) < 4 || INTEL_GEN(dev_priv) >= 9; } static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) @@ -846,7 +846,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) return false; } - if (!i915.enable_fbc) { + if (!i915_modparams.enable_fbc) { fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -1061,6 +1061,8 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, to_intel_plane_state(plane_state); struct intel_crtc_state *intel_crtc_state; struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc); + struct drm_plane *primary = crtc ? crtc->base.primary : NULL; + struct intel_plane *intel_plane = primary ? to_intel_plane(primary) : NULL; if (!intel_plane_state->base.visible) continue; @@ -1068,7 +1070,8 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) continue; - if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) + if (fbc_on_plane_a_only(dev_priv) && (!primary || + intel_plane->id != PLANE_PRIMARY)) continue; intel_crtc_state = to_intel_crtc_state( @@ -1293,8 +1296,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) */ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) { - if (i915.enable_fbc >= 0) - return !!i915.enable_fbc; + if (i915_modparams.enable_fbc >= 0) + return !!i915_modparams.enable_fbc; if (!HAS_FBC(dev_priv)) return 0; @@ -1338,8 +1341,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->has_fbc = false; - i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); - DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); + i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 262e75c00dd2..598d4e28178b 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -540,10 +540,17 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, /* Find the largest fb */ for_each_crtc(dev, crtc) { - struct drm_i915_gem_object *obj = - intel_fb_obj(crtc->primary->state->fb); + struct drm_i915_gem_object *obj; intel_crtc = to_intel_crtc(crtc); + if (!crtc->primary) { + DRM_DEBUG_KMS("pipe %c has no primary plane\n", + pipe_name(intel_crtc->pipe)); + continue; + } + + obj = intel_fb_obj(crtc->primary->state->fb); + if (!crtc->state->active || !obj) { DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", pipe_name(intel_crtc->pipe)); @@ -694,10 +701,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) /* Due to peculiar init order wrt to hpd handling this is separate. */ if (drm_fb_helper_initial_config(&ifbdev->helper, - ifbdev->preferred_bpp)) { + ifbdev->preferred_bpp)) intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); - intel_fbdev_fini(to_i915(ifbdev->helper.dev)); - } } void intel_fbdev_initial_config_async(struct drm_device *dev) @@ -775,7 +780,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous WARN_ON(state != FBINFO_STATE_RUNNING); if (!console_trylock()) { /* Don't block our own workqueue as this can - * be run in parallel with other i915.ko tasks. + * be run in parallel with other i915_modparams.ko tasks. */ schedule_work(&dev_priv->fbdev_suspend_work); return; @@ -797,7 +802,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev) + if (!ifbdev) + return; + + intel_fbdev_sync(ifbdev); + if (ifbdev->vma) drm_fb_helper_hotplug_event(&ifbdev->helper); } diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 04689600e337..aeed15a93b87 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -179,11 +179,18 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (!crtc) { + DRM_DEBUG("No crtc for pipe=%d\n", pipe); + return; + } if (enable) - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_enable_pipe_irq(dev_priv, drm_crtc_index(&crtc->base), + GEN8_PIPE_FIFO_UNDERRUN); else - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_disable_pipe_irq(dev_priv, drm_crtc_index(&crtc->base), + GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 5fa286074811..83bd401196eb 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -388,7 +388,11 @@ struct guc_ct_buffer_desc { /* Preempt to idle on quantum expiry */ #define POLICY_PREEMPT_TO_IDLE (1<<1) -#define POLICY_MAX_NUM_WI 15 +#define POLICY_MAX_NUM_WI 15 +#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 +#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 +#define POLICY_DEFAULT_FAULT_TIME_US 250000 struct guc_policy { /* Time for one workload to execute. (in micro seconds) */ diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 8b0ae7fce7f2..1aa89aaabe61 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -55,15 +55,12 @@ #define SKL_FW_MAJOR 6 #define SKL_FW_MINOR 1 -#define BXT_FW_MAJOR 8 -#define BXT_FW_MINOR 7 +#define BXT_FW_MAJOR 9 +#define BXT_FW_MINOR 29 #define KBL_FW_MAJOR 9 #define KBL_FW_MINOR 14 -#define GLK_FW_MAJOR 10 -#define GLK_FW_MINOR 56 - #define GUC_FW_PATH(platform, major, minor) \ "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" @@ -76,8 +73,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE); #define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) MODULE_FIRMWARE(I915_KBL_GUC_UCODE); -#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR) - static u32 get_gttype(struct drm_i915_private *dev_priv) { @@ -131,14 +126,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv) params[GUC_CTL_LOG_PARAMS] = guc->log.flags; - if (i915.guc_log_level >= 0) { + if (i915_modparams.guc_log_level >= 0) { params[GUC_CTL_DEBUG] = - i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; + i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } else params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; /* If GuC submission is enabled, set up additional parameters here */ - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool); u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; @@ -368,7 +363,8 @@ int intel_guc_init_hw(struct intel_guc *guc) guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS; DRM_INFO("GuC %s (firmware %s [version %u.%u])\n", - i915.enable_guc_submission ? "submission enabled" : "loaded", + i915_modparams.enable_guc_submission ? "submission enabled" : + "loaded", guc->fw.path, guc->fw.major_ver_found, guc->fw.minor_ver_found); @@ -390,8 +386,8 @@ int intel_guc_select_fw(struct intel_guc *guc) guc->fw.load_status = INTEL_UC_FIRMWARE_NONE; guc->fw.type = INTEL_UC_FW_TYPE_GUC; - if (i915.guc_firmware_path) { - guc->fw.path = i915.guc_firmware_path; + if (i915_modparams.guc_firmware_path) { + guc->fw.path = i915_modparams.guc_firmware_path; guc->fw.major_ver_wanted = 0; guc->fw.minor_ver_wanted = 0; } else if (IS_SKYLAKE(dev_priv)) { @@ -406,10 +402,6 @@ int intel_guc_select_fw(struct intel_guc *guc) guc->fw.path = I915_KBL_GUC_UCODE; guc->fw.major_ver_wanted = KBL_FW_MAJOR; guc->fw.minor_ver_wanted = KBL_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - guc->fw.path = I915_GLK_GUC_UCODE; - guc->fw.major_ver_wanted = GLK_FW_MAJOR; - guc->fw.minor_ver_wanted = GLK_FW_MINOR; } else { DRM_ERROR("No GuC firmware known for platform with GuC!\n"); return -ENOENT; diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 16d3b8719cab..a6a8490bc985 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -144,7 +144,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc) struct dentry *log_dir; int ret; - if (i915.guc_log_level < 0) + if (i915_modparams.guc_log_level < 0) return 0; /* For now create the log file in /sys/kernel/debug/dri/0 dir */ @@ -480,7 +480,7 @@ static int guc_log_late_setup(struct intel_guc *guc) guc_log_runtime_destroy(guc); err: /* logging will remain off */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; return ret; } @@ -502,7 +502,8 @@ static void guc_flush_logs(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - if (!i915.enable_guc_submission || (i915.guc_log_level < 0)) + if (!i915_modparams.enable_guc_submission || + (i915_modparams.guc_log_level < 0)) return; /* First disable the interrupts, will be renabled afterwards */ @@ -529,8 +530,8 @@ int intel_guc_log_create(struct intel_guc *guc) GEM_BUG_ON(guc->log.vma); - if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) - i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; + if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX) + i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX; /* The first page is to save log buffer state. Allocate one * extra page for others in case for overlap */ @@ -555,7 +556,7 @@ int intel_guc_log_create(struct intel_guc *guc) guc->log.vma = vma; - if (i915.guc_log_level >= 0) { + if (i915_modparams.guc_log_level >= 0) { ret = guc_log_runtime_create(guc); if (ret < 0) goto err_vma; @@ -576,7 +577,7 @@ int intel_guc_log_create(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->log.vma); err: /* logging will be off */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; return ret; } @@ -600,7 +601,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) return -EINVAL; /* This combination doesn't make sense & won't have any effect */ - if (!log_param.logging_enabled && (i915.guc_log_level < 0)) + if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0)) return 0; ret = guc_log_control(guc, log_param.value); @@ -610,7 +611,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) } if (log_param.logging_enabled) { - i915.guc_log_level = log_param.verbosity; + i915_modparams.guc_log_level = log_param.verbosity; /* If log_level was set as -1 at boot time, then the relay channel file * wouldn't have been created by now and interrupts also would not have @@ -633,7 +634,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) guc_flush_logs(guc); /* As logging is disabled, update log level to reflect that */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; } return ret; @@ -641,17 +642,16 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) void i915_guc_log_register(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission || i915.guc_log_level < 0) + if (!i915_modparams.enable_guc_submission || + (i915_modparams.guc_log_level < 0)) return; - mutex_lock(&dev_priv->drm.struct_mutex); guc_log_late_setup(&dev_priv->guc); - mutex_unlock(&dev_priv->drm.struct_mutex); } void i915_guc_log_unregister(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission) + if (!i915_modparams.enable_guc_submission) return; mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index c17ed0e62b67..f0575d4ca113 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_KABYLAKE(dev_priv)) return true; + if (IS_BROXTON(dev_priv)) + return true; return false; } @@ -58,7 +60,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) */ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) { - if (!i915.enable_gvt) + if (!i915_modparams.enable_gvt) return; if (intel_vgpu_active(dev_priv)) { @@ -73,7 +75,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) return; bail: - i915.enable_gvt = 0; + i915_modparams.enable_gvt = 0; } /** @@ -90,17 +92,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; - if (!i915.enable_gvt) { + if (!i915_modparams.enable_gvt) { DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); return 0; } - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n"); return -EIO; } - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); return -EIO; } @@ -123,7 +125,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return 0; bail: - i915.enable_gvt = 0; + i915_modparams.enable_gvt = 0; return 0; } diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index d9d87d96fb69..870186cb4b55 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -266,9 +266,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev_priv, 0, - "Kicking stuck wait on %s", - engine->name); + i915_handle_error(dev_priv, BIT(engine->id), 0, + "stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return ENGINE_WAIT_KICK; } @@ -278,8 +277,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) default: return ENGINE_DEAD; case 1: - i915_handle_error(dev_priv, 0, - "Kicking stuck semaphore on %s", + i915_handle_error(dev_priv, ALL_ENGINES, 0, + "stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); return ENGINE_WAIT_KICK; @@ -401,13 +400,13 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, if (stuck != hung) hung &= ~stuck; len = scnprintf(msg, sizeof(msg), - "%s on ", stuck == hung ? "No progress" : "Hang"); + "%s on ", stuck == hung ? "no progress" : "hang"); for_each_engine_masked(engine, i915, hung, tmp) len += scnprintf(msg + len, sizeof(msg) - len, "%s, ", engine->name); msg[len-2] = '\0'; - return i915_handle_error(i915, hung, "%s", msg); + return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg); } /* @@ -428,7 +427,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work) unsigned int hung = 0, stuck = 0; int busy_count = 0; - if (!i915.enable_hangcheck) + if (!i915_modparams.enable_hangcheck) + return; + + if (intel_vgpu_active(dev_priv)) return; if (!READ_ONCE(dev_priv->gt.awake)) diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c new file mode 100644 index 000000000000..253d22dd5f88 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -0,0 +1,837 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2017 Google, Inc. + * + * Authors: + * Sean Paul + */ + +#include +#include +#include +#include + +#include "intel_drv.h" +#include "i915_reg.h" + +#define KEY_LOAD_TRIES 5 + +static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim) +{ + int ret, read_ret; + bool ksv_ready; + + /* Poll for ksv list ready (spec says max time allowed is 5s) */ + ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port, + &ksv_ready), + read_ret || ksv_ready, 5 * 1000 * 1000, 1000, + 100 * 1000); + if (ret) + return ret; + if (read_ret) + return read_ret; + if (!ksv_ready) + return -ETIMEDOUT; + + return 0; +} + +static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + bool enabled = false; + + mutex_lock(&power_domains->lock); + + /* PG1 (power well #1) needs to be enabled */ + for_each_power_well(dev_priv, power_well) { + if (power_well->id == SKL_DISP_PW_1) { + enabled = power_well->ops->is_enabled(dev_priv, + power_well); + break; + } + } + mutex_unlock(&power_domains->lock); + + /* + * Another req for hdcp key loadability is enabled state of pll for + * cdclk. Without active crtc we wont land here. So we are assuming that + * cdclk is already on. + */ + + return enabled; +} + +static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) +{ + I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); + I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | + HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); +} + +static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) +{ + int ret; + u32 val; + + val = I915_READ(HDCP_KEY_STATUS); + if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) + return 0; + + /* + * On HSW and BDW HW loads the HDCP1.4 Key when Display comes + * out of reset. So if Key is not already loaded, its an error state. + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) + return -ENXIO; + + /* + * Initiate loading the HDCP key from fuses. + * + * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL + * differ in the key load trigger process from other platforms. + */ + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + mutex_lock(&dev_priv->pcu_lock); + ret = sandybridge_pcode_write(dev_priv, + SKL_PCODE_LOAD_HDCP_KEYS, 1); + mutex_unlock(&dev_priv->pcu_lock); + if (ret) { + DRM_ERROR("Failed to initiate HDCP key load (%d)\n", + ret); + return ret; + } + } else { + I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); + } + + /* Wait for the keys to load (500us) */ + ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS, + HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, + 10, 1, &val); + if (ret) + return ret; + else if (!(val & HDCP_KEY_LOAD_STATUS)) + return -ENXIO; + + /* Send Aksv over to PCH display for use in authentication */ + I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); + + return 0; +} + +/* Returns updated SHA-1 index */ +static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) +{ + I915_WRITE(HDCP_SHA_TEXT, sha_text); + if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) { + DRM_ERROR("Timed out waiting for SHA1 ready\n"); + return -ETIMEDOUT; + } + return 0; +} + +static +u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + switch (port) { + case PORT_A: + return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; + case PORT_B: + return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; + case PORT_C: + return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; + case PORT_D: + return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; + case PORT_E: + return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; + default: + break; + } + DRM_ERROR("Unknown port %d\n", port); + return -EINVAL; +} + +static +bool intel_hdcp_is_ksv_valid(u8 *ksv) +{ + int i, ones = 0; + /* KSV has 20 1's and 20 0's */ + for (i = 0; i < DRM_HDCP_KSV_LEN; i++) + ones += hweight8(ksv[i]); + if (ones != 20) + return false; + return true; +} + +/* Implements Part 2 of the HDCP authorization procedure */ +static +int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim) +{ + struct drm_i915_private *dev_priv; + u32 vprime, sha_text, sha_leftovers, rep_ctl; + u8 bstatus[2], num_downstream, *ksv_fifo; + int ret, i, j, sha_idx; + + dev_priv = intel_dig_port->base.base.dev->dev_private; + + ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); + if (ret) { + DRM_ERROR("KSV list failed to become ready (%d)\n", ret); + return ret; + } + + ret = shim->read_bstatus(intel_dig_port, bstatus); + if (ret) + return ret; + + if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || + DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { + DRM_ERROR("Max Topology Limit Exceeded\n"); + return -EPERM; + } + + /* + * When repeater reports 0 device count, HDCP1.4 spec allows disabling + * the HDCP encryption. That implies that repeater can't have its own + * display. As there is no consumption of encrypted content in the + * repeater with 0 downstream devices, we are failing the + * authentication. + */ + num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); + if (num_downstream == 0) + return -EINVAL; + + ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL); + if (!ksv_fifo) + return -ENOMEM; + + ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); + if (ret) + goto err; + + /* Process V' values from the receiver */ + for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { + ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); + if (ret) + goto err; + I915_WRITE(HDCP_SHA_V_PRIME(i), vprime); + } + + /* + * We need to write the concatenation of all device KSVs, BINFO (DP) || + * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte + * stream is written via the HDCP_SHA_TEXT register in 32-bit + * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This + * index will keep track of our progress through the 64 bytes as well as + * helping us work the 40-bit KSVs through our 32-bit register. + * + * NOTE: data passed via HDCP_SHA_TEXT should be big-endian + */ + sha_idx = 0; + sha_text = 0; + sha_leftovers = 0; + rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port); + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + for (i = 0; i < num_downstream; i++) { + unsigned int sha_empty; + u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; + + /* Fill up the empty slots in sha_text and write it out */ + sha_empty = sizeof(sha_text) - sha_leftovers; + for (j = 0; j < sha_empty; j++) + sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); + + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + goto err; + + /* Programming guide writes this every 64 bytes */ + sha_idx += sizeof(sha_text); + if (!(sha_idx % 64)) + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + + /* Store the leftover bytes from the ksv in sha_text */ + sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; + sha_text = 0; + for (j = 0; j < sha_leftovers; j++) + sha_text |= ksv[sha_empty + j] << + ((sizeof(sha_text) - j - 1) * 8); + + /* + * If we still have room in sha_text for more data, continue. + * Otherwise, write it out immediately. + */ + if (sizeof(sha_text) > sha_leftovers) + continue; + + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + goto err; + sha_leftovers = 0; + sha_text = 0; + sha_idx += sizeof(sha_text); + } + + /* + * We need to write BINFO/BSTATUS, and M0 now. Depending on how many + * bytes are leftover from the last ksv, we might be able to fit them + * all in sha_text (first 2 cases), or we might need to split them up + * into 2 writes (last 2 cases). + */ + if (sha_leftovers == 0) { + /* Write 16 bits of text, 16 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); + ret = intel_write_sha_text(dev_priv, + bstatus[0] << 8 | bstatus[1]); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 32 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 16 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + } else if (sha_leftovers == 1) { + /* Write 24 bits of text, 8 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); + sha_text |= bstatus[0] << 16 | bstatus[1] << 8; + /* Only 24-bits of data, must be in the LSB */ + sha_text = (sha_text & 0xffffff00) >> 8; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 32 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 24 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + } else if (sha_leftovers == 2) { + /* Write 32 bits of text */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + sha_text |= bstatus[0] << 24 | bstatus[1] << 16; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 64 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + for (i = 0; i < 2; i++) { + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + } + } else if (sha_leftovers == 3) { + /* Write 32 bits of text */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + sha_text |= bstatus[0] << 24; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 8 bits of text, 24 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); + ret = intel_write_sha_text(dev_priv, bstatus[1]); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 32 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + + /* Write 8 bits of M0 */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + } else { + DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers); + return -EINVAL; + } + + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ + while ((sha_idx % 64) < (64 - sizeof(sha_text))) { + ret = intel_write_sha_text(dev_priv, 0); + if (ret < 0) + goto err; + sha_idx += sizeof(sha_text); + } + + /* + * Last write gets the length of the concatenation in bits. That is: + * - 5 bytes per device + * - 10 bytes for BINFO/BSTATUS(2), M0(8) + */ + sha_text = (num_downstream * 5 + 10) * 8; + ret = intel_write_sha_text(dev_priv, sha_text); + if (ret < 0) + goto err; + + /* Tell the HW we're done with the hash and wait for it to ACK */ + I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); + if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + HDCP_SHA1_COMPLETE, + HDCP_SHA1_COMPLETE, 1)) { + DRM_ERROR("Timed out waiting for SHA1 complete\n"); + return -ETIMEDOUT; + } + if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { + DRM_ERROR("SHA-1 mismatch, HDCP failed\n"); + return -ENXIO; + } + + DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", + num_downstream); + ret = 0; +err: + kfree(ksv_fifo); + return ret; +} + +/* Implements Part 1 of the HDCP authorization procedure */ +static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, + const struct intel_hdcp_shim *shim) +{ + struct drm_i915_private *dev_priv; + enum port port; + unsigned long r0_prime_gen_start; + int ret, i, tries = 2; + union { + u32 reg[2]; + u8 shim[DRM_HDCP_AN_LEN]; + } an; + union { + u32 reg[2]; + u8 shim[DRM_HDCP_KSV_LEN]; + } bksv; + union { + u32 reg; + u8 shim[DRM_HDCP_RI_LEN]; + } ri; + bool repeater_present, hdcp_capable; + + dev_priv = intel_dig_port->base.base.dev->dev_private; + + port = intel_dig_port->base.port; + + /* + * Detects whether the display is HDCP capable. Although we check for + * valid Bksv below, the HDCP over DP spec requires that we check + * whether the display supports HDCP before we write An. For HDMI + * displays, this is not necessary. + */ + if (shim->hdcp_capable) { + ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable); + if (ret) + return ret; + if (!hdcp_capable) { + DRM_ERROR("Panel is not HDCP capable\n"); + return -EINVAL; + } + } + + /* Initialize An with 2 random values and acquire it */ + for (i = 0; i < 2; i++) + I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32()); + I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); + + /* Wait for An to be acquired */ + if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + HDCP_STATUS_AN_READY, + HDCP_STATUS_AN_READY, 1)) { + DRM_ERROR("Timed out waiting for An\n"); + return -ETIMEDOUT; + } + + an.reg[0] = I915_READ(PORT_HDCP_ANLO(port)); + an.reg[1] = I915_READ(PORT_HDCP_ANHI(port)); + ret = shim->write_an_aksv(intel_dig_port, an.shim); + if (ret) + return ret; + + r0_prime_gen_start = jiffies; + + memset(&bksv, 0, sizeof(bksv)); + + /* HDCP spec states that we must retry the bksv if it is invalid */ + for (i = 0; i < tries; i++) { + ret = shim->read_bksv(intel_dig_port, bksv.shim); + if (ret) + return ret; + if (intel_hdcp_is_ksv_valid(bksv.shim)) + break; + } + if (i == tries) { + DRM_ERROR("HDCP failed, Bksv is invalid\n"); + return -ENODEV; + } + + I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); + I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); + + ret = shim->repeater_present(intel_dig_port, &repeater_present); + if (ret) + return ret; + if (repeater_present) + I915_WRITE(HDCP_REP_CTL, + intel_hdcp_get_repeater_ctl(intel_dig_port)); + + ret = shim->toggle_signalling(intel_dig_port, true); + if (ret) + return ret; + + I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC); + + /* Wait for R0 ready */ + if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { + DRM_ERROR("Timed out waiting for R0 ready\n"); + return -ETIMEDOUT; + } + + /* + * Wait for R0' to become available. The spec says 100ms from Aksv, but + * some monitors can take longer than this. We'll set the timeout at + * 300ms just to be sure. + * + * On DP, there's an R0_READY bit available but no such bit + * exists on HDMI. Since the upper-bound is the same, we'll just do + * the stupid thing instead of polling on one and not the other. + */ + wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); + + ri.reg = 0; + ret = shim->read_ri_prime(intel_dig_port, ri.shim); + if (ret) + return ret; + I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + + /* Wait for Ri prime match */ + if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { + DRM_ERROR("Timed out waiting for Ri prime match (%x)\n", + I915_READ(PORT_HDCP_STATUS(port))); + return -ETIMEDOUT; + } + + /* Wait for encryption confirmation */ + if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) { + DRM_ERROR("Timed out waiting for encryption\n"); + return -ETIMEDOUT; + } + + /* + * XXX: If we have MST-connected devices, we need to enable encryption + * on those as well. + */ + + if (repeater_present) + return intel_hdcp_auth_downstream(intel_dig_port, shim); + + DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); + return 0; +} + +static +struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) +{ + return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); +} + +static int _intel_hdcp_disable(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + enum port port = intel_dig_port->base.port; + int ret; + + DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", + connector->base.name, connector->base.base.id); + + I915_WRITE(PORT_HDCP_CONF(port), 0); + if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0, + 20)) { + DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); + return -ETIMEDOUT; + } + + ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false); + if (ret) { + DRM_ERROR("Failed to disable HDCP signalling\n"); + return ret; + } + + DRM_DEBUG_KMS("HDCP is disabled\n"); + return 0; +} + +static int _intel_hdcp_enable(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + int i, ret, tries = 3; + + DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n", + connector->base.name, connector->base.base.id); + + if (!hdcp_key_loadable(dev_priv)) { + DRM_ERROR("HDCP key Load is not possible\n"); + return -ENXIO; + } + + for (i = 0; i < KEY_LOAD_TRIES; i++) { + ret = intel_hdcp_load_keys(dev_priv); + if (!ret) + break; + intel_hdcp_clear_keys(dev_priv); + } + if (ret) { + DRM_ERROR("Could not load HDCP keys, (%d)\n", ret); + return ret; + } + + /* Incase of authentication failures, HDCP spec expects reauth. */ + for (i = 0; i < tries; i++) { + ret = intel_hdcp_auth(conn_to_dig_port(connector), + connector->hdcp_shim); + if (!ret) + return 0; + + DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); + + /* Ensuring HDCP encryption and signalling are stopped. */ + _intel_hdcp_disable(connector); + } + + DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); + return ret; +} + +static void intel_hdcp_check_work(struct work_struct *work) +{ + struct intel_connector *connector = container_of(to_delayed_work(work), + struct intel_connector, + hdcp_check_work); + if (!intel_hdcp_check_link(connector)) + schedule_delayed_work(&connector->hdcp_check_work, + DRM_HDCP_CHECK_PERIOD_MS); +} + +static void intel_hdcp_prop_work(struct work_struct *work) +{ + struct intel_connector *connector = container_of(work, + struct intel_connector, + hdcp_prop_work); + struct drm_device *dev = connector->base.dev; + struct drm_connector_state *state; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&connector->hdcp_mutex); + + /* + * This worker is only used to flip between ENABLED/DESIRED. Either of + * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED, + * we're running just after hdcp has been disabled, so just exit + */ + if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + state = connector->base.state; + state->content_protection = connector->hdcp_value; + } + + mutex_unlock(&connector->hdcp_mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) +{ + /* PORT E doesn't have HDCP, and PORT F is disabled */ + return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && + !IS_CHERRYVIEW(dev_priv) && port < PORT_E); +} + +int intel_hdcp_init(struct intel_connector *connector, + const struct intel_hdcp_shim *hdcp_shim) +{ + int ret; + + ret = drm_connector_attach_content_protection_property( + &connector->base); + if (ret) + return ret; + + connector->hdcp_shim = hdcp_shim; + mutex_init(&connector->hdcp_mutex); + INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); + INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); + return 0; +} + +int intel_hdcp_enable(struct intel_connector *connector) +{ + int ret; + + if (!connector->hdcp_shim) + return -ENOENT; + + mutex_lock(&connector->hdcp_mutex); + + ret = _intel_hdcp_enable(connector); + if (ret) + goto out; + + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&connector->hdcp_prop_work); + schedule_delayed_work(&connector->hdcp_check_work, + DRM_HDCP_CHECK_PERIOD_MS); +out: + mutex_unlock(&connector->hdcp_mutex); + return ret; +} + +int intel_hdcp_disable(struct intel_connector *connector) +{ + int ret = 0; + + if (!connector->hdcp_shim) + return -ENOENT; + + mutex_lock(&connector->hdcp_mutex); + + if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; + ret = _intel_hdcp_disable(connector); + } + + mutex_unlock(&connector->hdcp_mutex); + cancel_delayed_work_sync(&connector->hdcp_check_work); + return ret; +} + +void intel_hdcp_atomic_check(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state) +{ + uint64_t old_cp = old_state->content_protection; + uint64_t new_cp = new_state->content_protection; + struct drm_crtc_state *crtc_state; + + if (!new_state->crtc) { + /* + * If the connector is being disabled with CP enabled, mark it + * desired so it's re-enabled when the connector is brought back + */ + if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_state->content_protection = + DRM_MODE_CONTENT_PROTECTION_DESIRED; + return; + } + + /* + * Nothing to do if the state didn't change, or HDCP was activated since + * the last commit + */ + if (old_cp == new_cp || + (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && + new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + crtc_state->mode_changed = true; +} + +/* Implements Part 3 of the HDCP authorization procedure */ +int intel_hdcp_check_link(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + enum port port = intel_dig_port->base.port; + int ret = 0; + + if (!connector->hdcp_shim) + return -ENOENT; + + mutex_lock(&connector->hdcp_mutex); + + if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + goto out; + + if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { + DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n", + connector->base.name, connector->base.base.id, + I915_READ(PORT_HDCP_STATUS(port))); + ret = -ENXIO; + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&connector->hdcp_prop_work); + goto out; + } + + if (connector->hdcp_shim->check_link(intel_dig_port)) { + if (connector->hdcp_value != + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + connector->hdcp_value = + DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&connector->hdcp_prop_work); + } + goto out; + } + + DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", + connector->base.name, connector->base.base.id); + + ret = _intel_hdcp_disable(connector); + if (ret) { + DRM_ERROR("Failed to disable hdcp (%d)\n", ret); + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&connector->hdcp_prop_work); + goto out; + } + + ret = _intel_hdcp_enable(connector); + if (ret) { + DRM_ERROR("Failed to enable hdcp (%d)\n", ret); + connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&connector->hdcp_prop_work); + goto out; + } + +out: + mutex_unlock(&connector->hdcp_mutex); + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index e8abea7594ec..8e60071988ad 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include "intel_drv.h" #include @@ -481,7 +482,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL, - intel_hdmi->rgb_quant_range_selectable); + intel_hdmi->rgb_quant_range_selectable, + is_hdmi2_sink); /* TODO: handle pixel repetition for YCBCR420 outputs */ intel_write_infoframe(encoder, crtc_state, &frame); @@ -867,6 +869,249 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) adapter, enable); } +static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port, + unsigned int offset, void *buffer, size_t size) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + hdmi->ddc_bus); + int ret; + u8 start = offset & 0xff; + struct i2c_msg msgs[] = { + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = 0, + .len = 1, + .buf = &start, + }, + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = I2C_M_RD, + .len = size, + .buf = buffer + } + }; + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (ret == ARRAY_SIZE(msgs)) + return 0; + return ret >= 0 ? -EIO : ret; +} + +static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, + unsigned int offset, void *buffer, size_t size) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + hdmi->ddc_bus); + int ret; + u8 *write_buf; + struct i2c_msg msg; + + write_buf = kzalloc(size + 1, GFP_KERNEL); + if (!write_buf) + return -ENOMEM; + + write_buf[0] = offset & 0xff; + memcpy(&write_buf[1], buffer, size); + + msg.addr = DRM_HDCP_DDC_ADDR; + msg.flags = 0, + msg.len = size + 1, + msg.buf = write_buf; + + ret = i2c_transfer(adapter, &msg, 1); + kfree(write_buf); + if (ret == 1) + return 0; + return ret >= 0 ? -EIO : ret; +} + +static +int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, + u8 *an) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + hdmi->ddc_bus); + int ret; + + ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, + DRM_HDCP_AN_LEN); + if (ret) { + DRM_ERROR("Write An over DDC failed (%d)\n", ret); + return ret; + } + + ret = intel_gmbus_output_aksv(adapter); + if (ret < 0) { + DRM_ERROR("Failed to output aksv (%d)\n", ret); + return ret; + } + return 0; +} + +static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, + u8 *bksv) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret) + DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret); + return ret; +} + +static +int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, + u8 *bstatus) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret) + DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret); + return ret; +} + +static +int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, + bool *repeater_present) +{ + int ret; + u8 val; + + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); + if (ret) { + DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); + return ret; + } + *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, + u8 *ri_prime) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret) + DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret); + return ret; +} + +static +int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, + bool *ksv_ready) +{ + int ret; + u8 val; + + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); + if (ret) { + DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); + return ret; + } + *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; + return 0; +} + +static +int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo) +{ + int ret; + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, + ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); + if (ret) { + DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret); + return ret; + } + return 0; +} + +static +int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, + int i, u32 *part) +{ + int ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), + part, DRM_HDCP_V_PRIME_PART_LEN); + if (ret) + DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret); + return ret; +} + +static +int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, + bool enable) +{ + int ret; + + if (!enable) + usleep_range(6, 60); /* Bspec says >= 6us */ + + ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable); + if (ret) { + DRM_ERROR("%s HDCP signalling failed (%d)\n", + enable ? "Enable" : "Disable", ret); + return ret; + } + return 0; +} + +static +bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) +{ + struct drm_i915_private *dev_priv = + intel_dig_port->base.base.dev->dev_private; + enum port port = intel_dig_port->base.port; + int ret; + union { + u32 reg; + u8 shim[DRM_HDCP_RI_LEN]; + } ri; + + ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim); + if (ret) + return false; + + I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + + /* Wait for Ri prime match */ + if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { + DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n", + I915_READ(PORT_HDCP_STATUS(port))); + return false; + } + return true; +} + +static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { + .write_an_aksv = intel_hdmi_hdcp_write_an_aksv, + .read_bksv = intel_hdmi_hdcp_read_bksv, + .read_bstatus = intel_hdmi_hdcp_read_bstatus, + .repeater_present = intel_hdmi_hdcp_repeater_present, + .read_ri_prime = intel_hdmi_hdcp_read_ri_prime, + .read_ksv_ready = intel_hdmi_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_hdmi_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part, + .toggle_signalling = intel_hdmi_hdcp_toggle_signalling, + .check_link = intel_hdmi_hdcp_check_link, +}; + static void intel_hdmi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { @@ -1562,12 +1807,20 @@ intel_hdmi_set_edid(struct drm_connector *connector) struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct edid *edid; bool connected = false; + struct i2c_adapter *i2c; intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - edid = drm_get_edid(connector, - intel_gmbus_get_adapter(dev_priv, - intel_hdmi->ddc_bus)); + i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); @@ -1602,7 +1855,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (intel_hdmi_set_edid(connector)) { struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); - hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; status = connector_status_connected; } else @@ -1870,6 +2122,14 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) u8 ddc_pin; switch (port) { + case PORT_A: + if ((IS_GEN9_LP(dev_priv)) && (intel_vgpu_active(dev_priv))) + ddc_pin = GMBUS_PIN_3_BXT; + else { + MISSING_CASE(port); + ddc_pin = GMBUS_PIN_DPB; + } + break; case PORT_B: ddc_pin = GMBUS_PIN_1_BXT; break; @@ -1989,7 +2249,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); - if (WARN_ON(port == PORT_A)) + if (!intel_vgpu_active(dev_priv) && + WARN_ON(port == PORT_A)) return; intel_encoder->hpd_pin = intel_hpd_pin(port); @@ -2022,6 +2283,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); + if (is_hdcp_supported(dev_priv, port)) { + int ret = intel_hdcp_init(intel_connector, + &intel_hdmi_hdcp_shim); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_hdmi->attached_connector = intel_connector; diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 6145fa0d6773..637c8d5a7ac5 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -52,10 +52,6 @@ #define KBL_HUC_FW_MINOR 00 #define KBL_BLD_NUM 1810 -#define GLK_HUC_FW_MAJOR 02 -#define GLK_HUC_FW_MINOR 00 -#define GLK_BLD_NUM 1748 - #define HUC_FW_PATH(platform, major, minor, bld_num) \ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ __stringify(minor) "_" __stringify(bld_num) ".bin" @@ -72,9 +68,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE); KBL_HUC_FW_MINOR, KBL_BLD_NUM) MODULE_FIRMWARE(I915_KBL_HUC_UCODE); -#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ - GLK_HUC_FW_MINOR, GLK_BLD_NUM) - /** * huc_ucode_xfer() - DMA's the firmware * @dev_priv: the drm_i915_private device @@ -155,8 +148,8 @@ void intel_huc_select_fw(struct intel_huc *huc) huc->fw.load_status = INTEL_UC_FIRMWARE_NONE; huc->fw.type = INTEL_UC_FW_TYPE_HUC; - if (i915.huc_firmware_path) { - huc->fw.path = i915.huc_firmware_path; + if (i915_modparams.huc_firmware_path) { + huc->fw.path = i915_modparams.huc_firmware_path; huc->fw.major_ver_wanted = 0; huc->fw.minor_ver_wanted = 0; } else if (IS_SKYLAKE(dev_priv)) { @@ -171,10 +164,6 @@ void intel_huc_select_fw(struct intel_huc *huc) huc->fw.path = I915_KBL_HUC_UCODE; huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR; huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - huc->fw.path = I915_GLK_HUC_UCODE; - huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR; - huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR; } else { DRM_ERROR("No HuC firmware known for platform with HuC!\n"); return; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index eb5827110d8f..3466c501c9b5 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "intel_drv.h" #include #include "i915_drv.h" @@ -373,7 +374,8 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, static int gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, - unsigned short addr, u8 *buf, unsigned int len) + unsigned short addr, u8 *buf, unsigned int len, + u32 gmbus1_index) { unsigned int chunk_size = len; u32 val, loop; @@ -386,7 +388,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, I915_WRITE_FW(GMBUS3, val); I915_WRITE_FW(GMBUS1, - GMBUS_CYCLE_WAIT | + gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); @@ -409,7 +411,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, + u32 gmbus1_index) { u8 *buf = msg->buf; unsigned int tx_size = msg->len; @@ -419,7 +422,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) do { len = min(tx_size, GMBUS_BYTE_COUNT_MAX); - ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len); + ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len, + gmbus1_index); if (ret) return ret; @@ -431,19 +435,21 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg) } /* - * The gmbus controller can combine a 1 or 2 byte write with a read that - * immediately follows it by using an "INDEX" cycle. + * The gmbus controller can combine a 1 or 2 byte write with another read/write + * that immediately follows it by using an "INDEX" cycle. */ static bool -gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) +gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) { return (i + 1 < num && - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && + msgs[i].addr == msgs[i + 1].addr && + !(msgs[i].flags & I2C_M_RD) && + (msgs[i].len == 1 || msgs[i].len == 2) && (msgs[i + 1].flags & I2C_M_RD)); } static int -gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) +gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) { u32 gmbus1_index = 0; u32 gmbus5 = 0; @@ -460,7 +466,10 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) if (gmbus5) I915_WRITE_FW(GMBUS5, gmbus5); - ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + if (msgs[1].flags & I2C_M_RD) + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + else + ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) @@ -470,7 +479,8 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) } static int -do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) +do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, + u32 gmbus0_source) { struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, @@ -480,17 +490,17 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) int ret = 0; retry: - I915_WRITE_FW(GMBUS0, bus->reg0); + I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; - if (gmbus_is_index_read(msgs, i, num)) { - ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); - inc = 2; /* an index read is two msgs */ + if (gmbus_is_index_xfer(msgs, i, num)) { + ret = gmbus_index_xfer(dev_priv, &msgs[i]); + inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { - ret = gmbus_xfer_write(dev_priv, &msgs[i]); + ret = gmbus_xfer_write(dev_priv, &msgs[i], 0); } if (!ret) @@ -598,7 +608,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) if (ret < 0) bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY; } else { - ret = do_gmbus_xfer(adapter, msgs, num); + ret = do_gmbus_xfer(adapter, msgs, num, 0); if (ret == -EAGAIN) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } @@ -608,6 +618,45 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) return ret; } +int intel_gmbus_output_aksv(struct i2c_adapter *adapter) +{ + struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + int ret; + u8 cmd = DRM_HDCP_DDC_AKSV; + u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; + struct i2c_msg msgs[] = { + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = 0, + .len = sizeof(cmd), + .buf = &cmd, + }, + { + .addr = DRM_HDCP_DDC_ADDR, + .flags = 0, + .len = sizeof(buf), + .buf = buf, + } + }; + + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + mutex_lock(&dev_priv->gmbus_mutex); + + /* + * In order to output Aksv to the receiver, use an indexed write to + * pass the i2c command, and tell GMBUS to use the HW-provided value + * instead of sourcing GMBUS3 for the data. + */ + ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); + + mutex_unlock(&dev_priv->gmbus_mutex); + intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); + + return ret; +} + static u32 gmbus_func(struct i2c_adapter *adapter) { return i2c_bit_algo.functionality(adapter) & diff --git a/drivers/gpu/drm/i915/intel_initial_modeset.c b/drivers/gpu/drm/i915/intel_initial_modeset.c new file mode 100644 index 000000000000..986182435a2d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_initial_modeset.c @@ -0,0 +1,756 @@ +/* + * + * Copyright (c) 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * DOC: Boot-time mode setting. + * + * There exists a use case where the kernel graphics needs to be initialized + * with a valid display configuration with full display pipeline programming + * in place before user space is initialized and without a fbdev & fb console. + * + * The primary motivation is to allow early user space applications to + * display a frame (or frames) as soon as possible after user space starts. + * Eliminating the time it takes userspace to program the display configuration + * benefits this use case. + * + * By doing all the display programming in the kernel, it can be done in + * parallel with other kernel startup tasks without adding significant + * elapshed time before user space starts. + */ + +#include +#include "intel_drv.h" +#include "i915_drv.h" + +static inline struct drm_encoder *get_encoder(struct drm_connector *connector) +{ + struct intel_encoder *encoder; + + encoder = intel_attached_encoder(connector); + + return &encoder->base; +} + +/* + * This makes use of the video= kernel command line to determine what + * connectors to configure. See Documentation/fb/modedb.txt for details + * on the format. There are 3 specific cases that are used: + * + * 1) video= + * - assume monitor is connected, use EDID preferred mode + * 2) video= + * - use regardless of monitor connected, use EDID prefered mode + * 3) video=cmdline_mode; + + fb_get_options(connector->name, &option); + if (option) { + switch(connector->force) { + + case DRM_FORCE_OFF : + return false; + case DRM_FORCE_ON : + case DRM_FORCE_ON_DIGITAL: + return true; + case DRM_FORCE_UNSPECIFIED: + break; + } + + connector->status = connector->funcs->detect(connector, true); + if (connector->status != connector_status_connected) { + connector->force = cl_mode->force; + connector->status = connector_status_connected; + } + return true; + } + + return false; +} + +static bool attach_crtc(struct drm_device *dev, struct drm_encoder *encoder, + uint32_t *used_crtcs) +{ + struct drm_crtc *possible_crtc; + + if(encoder->crtc != NULL && + !(*used_crtcs & drm_crtc_mask(encoder->crtc))) { + *used_crtcs |= drm_crtc_mask(encoder->crtc); + return true; + } + + drm_for_each_crtc(possible_crtc, dev) { + if (!(encoder->possible_crtcs & drm_crtc_mask(possible_crtc)) + || (*used_crtcs & drm_crtc_mask(possible_crtc))) + continue; + *used_crtcs |= drm_crtc_mask(possible_crtc); + encoder->crtc = possible_crtc; + return true; + } + + return false; +} + +static struct drm_framebuffer * +intel_splash_screen_fb(struct drm_device *dev, + struct splash_screen_info *splash_info) +{ + struct drm_framebuffer *fb; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + + if (splash_info->obj == NULL) + return NULL; + + mode_cmd.width = splash_info->width; + mode_cmd.height = splash_info->height; + + mode_cmd.pitches[0] = splash_info->pitch; + mode_cmd.pixel_format = DRM_FORMAT_C8; + + mutex_lock(&dev->struct_mutex); + fb = intel_framebuffer_create(splash_info->obj, &mode_cmd); + mutex_unlock(&dev->struct_mutex); + + return fb; +} + +static bool shared_image(struct drm_i915_private *dev_priv, + char *image, + struct splash_screen_info *info) +{ + struct splash_screen_info *splash_info; + + list_for_each_entry(splash_info, &dev_priv->splash_list, link) { + if (strcmp(splash_info->image_name, image) == 0) { + info->image_name = NULL; + info->fw = NULL; + info->obj = splash_info->obj; + return true; + } + } + return false; +} + +static struct splash_screen_info *match_splash_info( + struct drm_i915_private *dev_priv, + char* name) +{ + struct splash_screen_info *splash_info, *info = NULL; + list_for_each_entry(splash_info, &dev_priv->splash_list, link) { + if (strcmp(splash_info->connector_name, name) == 0) + info = splash_info; + } + return info; +} + +static void intel_splash_screen_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct splash_screen_info *splash_info = NULL; + char *splash_dup = NULL; + char *splash_str = NULL; + char *sep; + u32 fw_npages; + char *splash = i915_modparams.splash; + + INIT_LIST_HEAD(&dev_priv->splash_list); + + if (!splash) + return; + + splash_dup = kstrdup(splash, GFP_KERNEL); + if (!splash_dup) + goto fail; + splash_str = splash_dup; + + /* + * The loop condition find the connector name portion of the + * string. Once we have that, we parse the following fields + * from the string: + * Image data file name, image data width, image data height, + * crtc rectangle (x, y, w, h). + * Then the loop condition will execute again to get the next + * connector name. + */ + while ((sep = strchr(splash_str, ':'))) { + splash_info = kzalloc(sizeof(struct splash_screen_info), + GFP_KERNEL); + if (splash_info == NULL) + goto fail; + + *sep = '\0'; + splash_info->connector_name = kstrdup(splash_str, GFP_KERNEL); + if (!splash_info->connector_name) + goto fail; + splash_str = sep + 1; + + /* + * Pull firmware file name from string and check to see + * if this image has been previously loaded. request + * firmware only needs to be called once for each file. + */ + sep = strchr(splash_str, ':'); + if (sep == NULL) + goto fail; + + *sep = '\0'; + + if (!shared_image(dev_priv, splash_str, splash_info)) { + splash_info->image_name = kstrdup(splash_str, + GFP_KERNEL); + if (!splash_info->image_name) + goto fail; + request_firmware(&splash_info->fw, splash_str, + &dev_priv->drm.pdev->dev); + if (splash_info->fw == NULL) + goto fail; + } + splash_str = sep + 1; + + /* Pull splash screen width, height, crtc */ + sscanf(splash_str, "%d,%d,%d,%d,%d,%d,%d", + &splash_info->width, + &splash_info->height, + &splash_info->pitch, + &splash_info->crtc_x, + &splash_info->crtc_y, + &splash_info->crtc_w, + &splash_info->crtc_h); + + /* Only do this we haven't mapped this firmware image before */ + if (splash_info->fw) { + /* + * If splash image is baked into the kernel, we just get + * a pointer. Otherwise we'll get a list of pages. + */ + fw_npages = DIV_ROUND_UP_ULL(splash_info->fw->size, + PAGE_SIZE); + if (splash_info->fw->pages == NULL) + splash_info->obj = i915_gem_object_create_splash( + dev_priv, + splash_info->fw->data, + fw_npages); + else + splash_info->obj = i915_gem_object_create_splash_pages( + dev_priv, + splash_info->fw->pages, fw_npages); + } + + list_add_tail(&splash_info->link, &dev_priv->splash_list); + + /* move to the next entry, break if reaching the end */ + splash_str = strchr(splash_str, ':'); + if(splash_str != NULL) + splash_str += 1; + else + break; + } + + kfree(splash_dup); + return; + +fail: + /* Clean up failed entry data */ + if (splash_info) { + release_firmware(splash_info->fw); + kfree(splash_info->connector_name); + kfree(splash_info->image_name); + } + kfree(splash_info); + kfree(splash_dup); + return; +} + +static struct drm_display_mode *get_modeline(struct drm_i915_private *dev_priv, + struct drm_connector *connector, + int width, int height) +{ + struct drm_display_mode *mode; + struct drm_cmdline_mode *cl_mode = &connector->cmdline_mode; + + /* + * fill_modes() takes a bit of time but is necessary. + * It is reading the EDID (or loading the EDID firmware blob + * and building the connector mode list. The time can be + * minimized by using a small EDID blob built into the kernel. + */ + + connector->funcs->fill_modes(connector, width, height); + + /* + * Search the mode list. If a mode was specified using the + * video= command line, use that. Otherwise look for the + * preferred mode. + * + * x[M][R][-][@][i][m][eDd] + */ + list_for_each_entry(mode, &connector->modes, head) { + if (cl_mode && cl_mode->specified && + cl_mode->refresh_specified) { + if (mode->hdisplay == cl_mode->xres && + mode->vdisplay == cl_mode->yres && + mode->vrefresh == cl_mode->refresh) + return mode; + } else if (cl_mode && cl_mode->specified) { + if (mode->hdisplay == cl_mode->xres && + mode->vdisplay == cl_mode->yres) + return mode; + } else { + if (mode->type & DRM_MODE_TYPE_PREFERRED) + return mode; + } + } + + DRM_ERROR("Failed to find a valid mode.\n"); + return NULL; +} + +static int update_crtc_state(struct drm_atomic_state *state, + struct drm_display_mode *mode, + struct drm_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + struct drm_rgba bgcolor; + unsigned int bg_color = i915_modparams.bg_color; + int ret; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); + if (ret) { + crtc_state->active = false; + return ret; + } + + crtc_state->active = true; + + if (!IS_GEN9(to_i915(state->dev))) + return 0; + + /* Set the background color based on module parameter */ + bgcolor =drm_rgba(8, + (bg_color & 0x000000ff), + (bg_color & 0x0000ff00) >> 8, + (bg_color & 0x00ff0000) >> 16, + (bg_color & 0xff000000) >> 24); + + ret = drm_atomic_crtc_set_property(crtc, crtc_state, + state->dev->mode_config.prop_background_color, + bgcolor.v); + WARN_ON(ret); + + return 0; +} + +static int update_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_connector_state *conn_state; + int ret; + + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) { + DRM_DEBUG_KMS("failed to get connector %s state\n", + connector->name); + return PTR_ERR(conn_state); + } + + ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); + if (ret) { + DRM_DEBUG_KMS("failed to set crtc for connector\n"); + return ret; + } + + return 0; +} + +static int update_primary_plane_state(struct drm_atomic_state *state, + struct splash_screen_info *splash_info, + struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + int hdisplay, vdisplay; + struct drm_plane_state *primary_state; + struct drm_property_blob *blob = NULL; + struct drm_color_lut *blob_data; + struct drm_crtc_state *crtc_state; + struct drm_device *dev = crtc->dev; + uint32_t i, palette_size; + const char *palette_data; + int ret; + + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); + if (ret) + return ret; + drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); + drm_atomic_set_fb_for_plane(primary_state, splash_info->fb); + + primary_state->crtc_x = splash_info->crtc_x; + primary_state->crtc_y = splash_info->crtc_y; + primary_state->crtc_w = (splash_info->crtc_w) ? + splash_info->crtc_w : hdisplay; + primary_state->crtc_h = (splash_info->crtc_h) ? + splash_info->crtc_h : vdisplay; + + primary_state->src_x = 0 << 16; + primary_state->src_y = 0 << 16; + primary_state->src_w = ((splash_info->width) ? + splash_info->width : hdisplay) << 16; + primary_state->src_h = ((splash_info->height) ? + splash_info->height : vdisplay) << 16; + primary_state->rotation = DRM_MODE_ROTATE_0; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + + /* Color palette is appended after image data, it uses 24-bits for each index entry */ + palette_size = (splash_info->fw->size - (splash_info->width * splash_info->height)) / 3; + printk("Splash size %zu, palette size %u\n", splash_info->fw->size, palette_size); + if (0 == palette_size) { + DRM_ERROR("Splash image does not contain color palette data\n"); + return -1; + } + + if (palette_size > 256) { + DRM_ERROR("Splash image color palette too big\n"); + return -1; + } + + /* i915 expects that palette color will be of 256 size */ + blob = drm_property_create_blob(dev, sizeof(struct drm_color_lut) * 256, NULL); + + if (IS_ERR(blob)) { + blob = NULL; + return -1; + } + + palette_data = &splash_info->fw->data[splash_info->width * splash_info->height]; + + blob_data = (struct drm_color_lut *) blob->data; + for (i = 0; i < palette_size; i++) { + blob_data[i].red = (*palette_data++) << 8; + blob_data[i].green = (*palette_data++) << 8; + blob_data[i].blue = (*palette_data++) << 8; + } + + drm_atomic_crtc_set_property(crtc, crtc_state, state->dev->mode_config.gamma_lut_property, blob->base.id); + + return 0; +} + +static void create_splash_fb(struct drm_device *dev, + struct splash_screen_info *splash) +{ + struct splash_screen_info *splash_info; + struct drm_i915_private *dev_priv = dev->dev_private; + + splash->fb = intel_splash_screen_fb(dev, splash); + if (IS_ERR(splash->fb)) + splash->fb = NULL; + + if (splash->fb) + list_for_each_entry(splash_info, &dev_priv->splash_list, link) + if (splash->obj == splash_info->obj && + splash != splash_info) { + splash_info->fb = splash->fb; + drm_framebuffer_reference(splash_info->fb); + } +} + +static int update_atomic_state(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + int ret; + struct splash_screen_info *splash_info; + + if (get_encoder(connector)) + crtc = get_encoder(connector)->crtc; + else + return -EINVAL; + + ret = update_crtc_state(state, mode, crtc); + if (ret) + return ret; + + /* attach connector to atomic state */ + ret = update_connector_state(state, connector, crtc); + if (ret) + return ret; + + /* set up primary plane if a splash screen is requested */ + splash_info = match_splash_info(dev_priv, connector->name); + if (splash_info) { + if (splash_info->fb == NULL) + create_splash_fb(dev, splash_info); + if (splash_info->fb) { + ret = update_primary_plane_state(state, + splash_info, + crtc, mode); + if (ret) + return ret; + } + } + + return 0; +} + + +static int disable_planes(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_plane *plane; + int ret; + + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + plane->old_fb = plane->fb; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + return PTR_ERR(plane_state); + } + + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret != 0) + return ret; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + } + + return 0; +} + + +/* + * The modeset_config is scheduled to run via an async + * schedule call from the main driver load. + */ +static void modeset_config_fn(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), initial_modeset_work); + struct drm_device *dev = &dev_priv->drm; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + struct drm_plane *plane; + int ret; + bool found = false; + uint32_t used_crtcs = 0; + struct drm_display_mode *connector_mode[20]; + struct drm_encoder *encoder; + struct drm_display_mode *mode; + + intel_splash_screen_init(dev); + + memset(connector_mode, 0, sizeof(connector_mode)); + mutex_lock(&dev->mode_config.mutex); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (use_connector(connector)) { + if (!(encoder = get_encoder(connector))) + continue; + if (!attach_crtc(dev, encoder, &used_crtcs)) + continue; + mode = get_modeline(dev_priv, connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + if (mode) { + found = true; + WARN_ON(connector->index >= 20); + connector_mode[connector->index] = mode; + } + } + } + drm_connector_list_iter_end(&conn_iter); + if (!found) { + used_crtcs = 0; + /* Try to detect attached connectors */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (connector->funcs && connector->funcs->detect) + connector->status = connector->funcs->detect(connector, + true); + else if (connector->helper_private && connector->helper_private->detect_ctx) + connector->status = connector->helper_private->detect_ctx(connector, + NULL, true); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + if (connector->status == connector_status_connected) { + if (!(encoder = get_encoder(connector))) + continue; + if (!attach_crtc(dev, encoder, &used_crtcs)) + continue; + mode = get_modeline(dev_priv, connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + if (mode) { + found = true; + WARN_ON(connector->index >= 20); + connector_mode[connector->index] = mode; + } + } + } + drm_connector_list_iter_end(&conn_iter); + } + mutex_unlock(&dev->mode_config.mutex); + + if (!found) + return; + + state = drm_atomic_state_alloc(dev); + if (!state) + return; + + mutex_lock(&dev->mode_config.mutex); + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; +retry: + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } else if(ret) { + goto out; + } + + ret = disable_planes(dev, state); + if (ret) + goto fail; + + /* + * For each connector that we want to set up, update the atomic + * state to include the connector and crtc mode. + */ + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector_mode[connector->index]) { + ret = update_atomic_state(dev, state, connector, + connector_mode[connector->index]); + if (ret) + goto fail; + } + } + drm_connector_list_iter_end(&conn_iter); + + ret = drm_atomic_commit(state); + if (ret) + goto fail; + goto out; + +fail: + if (ret == -EDEADLK) { + DRM_DEBUG_KMS("modeset commit deadlock, retry...\n"); + drm_modeset_backoff(&ctx); + drm_atomic_state_clear(state); + goto retry; + } + +out: + if (!ret) { + drm_for_each_plane(plane, dev) { + if (plane->old_fb) + drm_framebuffer_unreference(plane->old_fb); + } + } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_atomic_state_put(state); + + mutex_unlock(&dev->mode_config.mutex); +} + +void intel_initial_mode_config_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + INIT_WORK(&dev_priv->initial_modeset_work, modeset_config_fn); + schedule_work(&dev_priv->initial_modeset_work); +} + +static void initial_mode_destroy(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + int ret; + + state = drm_atomic_state_alloc(dev); + if (!state) + return; + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; + drm_modeset_lock_all_ctx(dev, &ctx); + +retry: + ret = disable_planes(dev, state); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + drm_atomic_state_clear(state); + goto retry; + } + + ret = drm_atomic_commit(state); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + drm_atomic_state_clear(state); + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +void intel_initial_mode_config_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct splash_screen_info *splash_info, *tmp; + + flush_work(&dev_priv->initial_modeset_work); + initial_mode_destroy(dev); + + list_for_each_entry_safe(splash_info, tmp, + &dev_priv->splash_list, link) { + if (splash_info->fb) + drm_framebuffer_unreference(splash_info->fb); + release_firmware(splash_info->fw); + kfree(splash_info->connector_name); + kfree(splash_info->image_name); + kfree(splash_info); + } +} diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 3bf65288ffff..2fdf302ebdad 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -62,6 +62,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6f972e6ec663..f97fe79f78fe 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -208,8 +208,9 @@ /* Typical size of the average request (2 pipecontrols and a MI_BB) */ #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ - #define WA_TAIL_DWORDS 2 +#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) +#define PREEMPT_ID 0x1 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); @@ -219,9 +220,9 @@ static void execlists_init_reg_state(u32 *reg_state, struct intel_ring *ring); /** - * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists + * intel_sanitize_enable_execlists() - sanitize i915_modparams.enable_execlists * @dev_priv: i915 device private - * @enable_execlists: value of i915.enable_execlists module parameter. + * @enable_execlists: value of i915_modparams.enable_execlists module parameter. * * Only certain platforms support Execlists (the prerequisites being * support for Logical Ring Contexts and Aliasing PPGTT or better). @@ -244,7 +245,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && USES_PPGTT(dev_priv) && - i915.use_mmio_flip >= 0) + i915_modparams.use_mmio_flip >= 0) return 1; return 0; @@ -279,17 +280,110 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<desc_template; /* bits 0-11 */ - desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ ce->lrc_desc = desc; } -uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static struct i915_priolist * +lookup_priolist(struct intel_engine_cs *engine, + struct i915_priotree *pt, + int prio) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_priolist *p; + struct rb_node **parent, *rb; + bool first = true; + + if (unlikely(execlists->no_priolist)) + prio = I915_PRIORITY_NORMAL; + +find_priolist: + /* most positive priority is scheduled first, equal priorities fifo */ + rb = NULL; + parent = &execlists->queue.rb_node; + while (*parent) { + rb = *parent; + p = rb_entry(rb, typeof(*p), node); + if (prio > p->priority) { + parent = &rb->rb_left; + } else if (prio < p->priority) { + parent = &rb->rb_right; + first = false; + } else { + return p; + } + } + + if (prio == I915_PRIORITY_NORMAL) { + p = &execlists->default_priolist; + } else { + p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + /* Convert an allocation failure to a priority bump */ + if (unlikely(!p)) { + prio = I915_PRIORITY_NORMAL; /* recurses just once */ + + /* To maintain ordering with all rendering, after an + * allocation failure we have to disable all scheduling. + * Requests will then be executed in fifo, and schedule + * will ensure that dependencies are emitted in fifo. + * There will be still some reordering with existing + * requests, so if userspace lied about their + * dependencies that reordering may be visible. + */ + execlists->no_priolist = true; + goto find_priolist; + } + } + + p->priority = prio; + INIT_LIST_HEAD(&p->requests); + rb_link_node(&p->node, rb, parent); + rb_insert_color(&p->node, &execlists->queue); + + if (first) + execlists->first = &p->node; + + return ptr_pack_bits(p, first, 1); +} + +static void unwind_wa_tail(struct drm_i915_gem_request *rq) +{ + rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); + assert_ring_tail_valid(rq->ring, rq->tail); +} + +static void unwind_incomplete_requests(struct intel_engine_cs *engine) { - return ctx->engine[engine->id].lrc_desc; + struct drm_i915_gem_request *rq, *rn; + struct i915_priolist *uninitialized_var(p); + int last_prio = I915_PRIORITY_INVALID; + + lockdep_assert_held(&engine->timeline->lock); + + list_for_each_entry_safe_reverse(rq, rn, + &engine->timeline->requests, + link) { + if (i915_gem_request_completed(rq)) + return; + + __i915_gem_request_unsubmit(rq); + unwind_wa_tail(rq); + + GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID); + if (rq->priotree.priority != last_prio) { + p = lookup_priolist(engine, + &rq->priotree, + rq->priotree.priority); + p = ptr_mask_bits(p, 1); + + last_prio = rq->priotree.priority; + } + + list_add(&rq->priotree.link, &p->requests); + } } static inline void @@ -336,14 +430,22 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) return ce->lrc_desc; } +static inline void elsp_write(u64 desc, u32 __iomem *elsp) +{ + writel(upper_32_bits(desc), elsp); + writel(lower_32_bits(desc), elsp); +} + static void execlists_submit_ports(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = engine->execlists.port; u32 __iomem *elsp = engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); unsigned int n; + u32 descs[4]; + int i = 0; - for (n = ARRAY_SIZE(engine->execlist_port); n--; ) { + for (n = execlists_num_ports(&engine->execlists); n--; ) { struct drm_i915_gem_request *rq; unsigned int count; u64 desc; @@ -360,9 +462,26 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) GEM_BUG_ON(!n); desc = 0; } + if (intel_vgpu_active(engine->i915) && + i915_modparams.enable_pvmmio & PVMMIO_ELSP_SUBMIT) { + BUG_ON(i >= 4); + descs[i] = upper_32_bits(desc); + descs[i + 1] = lower_32_bits(desc); + i += 2; + } else { + elsp_write(desc, elsp); + } + } - writel(upper_32_bits(desc), elsp); - writel(lower_32_bits(desc), elsp); + if (intel_vgpu_active(engine->i915) && + i915_modparams.enable_pvmmio & PVMMIO_ELSP_SUBMIT) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + spin_lock(&engine->i915->shared_page_lock); + writel(descs[0], elsp_data); + writel(descs[1], elsp_data + 1); + writel(descs[2], elsp_data + 2); + writel(descs[3], elsp); + spin_unlock(&engine->i915->shared_page_lock); } } @@ -395,25 +514,63 @@ static void port_assign(struct execlist_port *port, port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); } +static void inject_preempt_context(struct intel_engine_cs *engine) +{ + struct intel_context *ce = + &engine->i915->preempt_context->engine[engine->id]; + u32 __iomem *elsp = + engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); + unsigned int n; + + GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID); + GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES)); + + memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES); + ce->ring->tail += WA_TAIL_BYTES; + ce->ring->tail &= (ce->ring->size - 1); + ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail; + + if (intel_vgpu_active(engine->i915) && + i915_modparams.enable_pvmmio & PVMMIO_ELSP_SUBMIT) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + + spin_lock(&engine->i915->shared_page_lock); + writel(0, elsp_data); + writel(0, elsp_data + 1); + writel(upper_32_bits(ce->lrc_desc), elsp_data + 2); + writel(lower_32_bits(ce->lrc_desc), elsp); + spin_unlock(&engine->i915->shared_page_lock); + + return; + } + + for (n = execlists_num_ports(&engine->execlists); --n; ) + elsp_write(0, elsp); + + elsp_write(ce->lrc_desc, elsp); + + if (i915_modparams.fpreempt_timeout) + hrtimer_start(&engine->fpreempt_timer, + ms_to_ktime(i915_modparams.fpreempt_timeout), + HRTIMER_MODE_REL); +} + +static bool can_preempt(struct intel_engine_cs *engine) +{ + return !intel_vgpu_active(engine->i915) && + INTEL_INFO(engine->i915)->has_logical_ring_preemption; +} + static void execlists_dequeue(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *last; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; + struct drm_i915_gem_request *last = port_request(port); struct rb_node *rb; bool submit = false; - last = port_request(port); - if (last) - /* WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL - * as we resubmit the request. See gen8_emit_breadcrumb() - * for where we prepare the padding after the end of the - * request. - */ - last->tail = last->wa_tail; - - GEM_BUG_ON(port_isset(&port[1])); - /* Hardware submission is through 2 ports. Conceptually each port * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * static for a context, and unique to each, so we only execute @@ -436,9 +593,68 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); - while (rb) { + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); + if (!rb) + goto unlock; + + if (last) { + /* + * Don't resubmit or switch until all outstanding + * preemptions (lite-restore) are seen. Then we + * know the next preemption status we see corresponds + * to this ELSP update. + */ + if (port_count(&port[0]) > 1) + goto unlock; + + if (can_preempt(engine) && + rb_entry(rb, struct i915_priolist, node)->priority > + max(last->priotree.priority, 0)) { + /* + * Switch to our empty preempt context so + * the state of the GPU is known (idle). + */ + inject_preempt_context(engine); + execlists_set_active(execlists, + EXECLISTS_ACTIVE_PREEMPT); + goto unlock; + } else { + /* + * In theory, we could coalesce more requests onto + * the second port (the first port is active, with + * no preemptions pending). However, that means we + * then have to deal with the possible lite-restore + * of the second port (as we submit the ELSP, there + * may be a context-switch) but also we may complete + * the resubmission before the context-switch. Ergo, + * coalescing onto the second port will cause a + * preemption event, but we cannot predict whether + * that will affect port[0] or port[1]. + * + * If the second port is already active, we can wait + * until the next context-switch before contemplating + * new requests. The GPU will be busy and we should be + * able to resubmit the new ELSP before it idles, + * avoiding pipeline bubbles (momentary pauses where + * the driver is unable to keep up the supply of new + * work). + */ + if (port_count(&port[1])) + goto unlock; + + /* WaIdleLiteRestore:bdw,skl + * Apply the wa NOOPs to prevent + * ring:HEAD == req:TAIL as we resubmit the + * request. See gen8_emit_breadcrumb() for + * where we prepare the padding after the + * end of the request. + */ + last->tail = last->wa_tail; + } + } + + do { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; @@ -460,7 +676,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * combine this request with the last, then we * are done. */ - if (port != engine->execlist_port) { + if (port == last_port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -485,212 +701,291 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (submit) port_assign(port, last); port++; + + GEM_BUG_ON(port_isset(port)); } INIT_LIST_HEAD(&rq->priotree.link); - rq->priotree.priority = INT_MAX; - __i915_gem_request_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, engine)); + trace_i915_gem_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); - } + } while (rb); done: - engine->execlist_first = rb; + execlists->first = rb; if (submit) port_assign(port, last); +unlock: spin_unlock_irq(&engine->timeline->lock); - if (submit) + if (submit) { + execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); execlists_submit_ports(engine); + } } -static bool execlists_elsp_ready(const struct intel_engine_cs *engine) +static void +execlist_cancel_port_requests(struct intel_engine_execlists *execlists) { - const struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = execlists->port; + unsigned int num_ports = ARRAY_SIZE(execlists->port); - return port_count(&port[0]) + port_count(&port[1]) < 2; + while (num_ports-- && port_isset(port)) { + struct drm_i915_gem_request *rq = port_request(port); + + GEM_BUG_ON(!execlists->active); + + execlists_context_status_change(rq, + i915_gem_request_completed(rq) ? + INTEL_CONTEXT_SCHEDULE_OUT : + INTEL_CONTEXT_SCHEDULE_PREEMPTED); + + i915_gem_request_put(rq); + + memset(port, 0, sizeof(*port)); + port++; + } } -/* - * Check the unread Context Status Buffers and manage the submission of new - * contexts to the ELSP accordingly. - */ -static void intel_lrc_irq_handler(unsigned long data) +static void execlists_cancel_requests(struct intel_engine_cs *engine) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; - struct drm_i915_private *dev_priv = engine->i915; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_gem_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; - /* We can skip acquiring intel_runtime_pm_get() here as it was taken - * on our behalf by the request (see i915_gem_mark_busy()) and it will - * not be relinquished until the device is idle (see - * i915_gem_idle_work_handler()). As a precaution, we make sure - * that all ELSP are drained i.e. we have processed the CSB, - * before allowing ourselves to idle and calling intel_runtime_pm_put(). + spin_lock_irqsave(&engine->timeline->lock, flags); + + /* Cancel the requests on the HW and clear the ELSP tracker. */ + execlist_cancel_port_requests(execlists); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->timeline->requests, link) { + GEM_BUG_ON(!rq->global_seqno); + if (!i915_gem_request_completed(rq)) + dma_fence_set_error(&rq->fence, -EIO); + } + + /* Flush the queued requests to the timeline list (for retiring). */ + rb = execlists->first; + while (rb) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + + list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { + INIT_LIST_HEAD(&rq->priotree.link); + + dma_fence_set_error(&rq->fence, -EIO); + __i915_gem_request_submit(rq); + } + + rb = rb_next(rb); + rb_erase(&p->node, &execlists->queue); + INIT_LIST_HEAD(&p->requests); + if (p->priority != I915_PRIORITY_NORMAL) + kmem_cache_free(engine->i915->priorities, p); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + + execlists->queue = RB_ROOT; + execlists->first = NULL; + GEM_BUG_ON(port_isset(execlists->port)); + + /* Mark all CS interrupts as complete */ + smp_store_mb(execlists->active, 0); + synchronize_irq(engine->i915->drm.irq); + + /* + * The port is checked prior to scheduling a tasklet, but + * just in case we have suspended the tasklet to do the + * wedging make sure that when it wakes, it decides there + * is no work to do by clearing the irq_posted bit. */ - GEM_BUG_ON(!dev_priv->gt.awake); + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - intel_uncore_forcewake_get(dev_priv, engine->fw_domains); + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} - /* Prefer doing test_and_clear_bit() as a two stage operation to avoid - * imposing the cost of a locked atomic transaction when submitting a - * new request (outside of the context-switch interrupt). +static void process_csb(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port * const port = execlists->port; + struct drm_i915_private *dev_priv = engine->i915; + unsigned int head, tail; + const u32 *buf; + + /* However GVT emulation depends upon intercepting CSB mmio */ + if (unlikely(execlists->csb_use_mmio)) { + buf = (u32 * __force) + (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + execlists->csb_head = -1; /* force mmio read of CSB ptrs */ + } else { + /* The HWSP contains a (cacheable) mirror of the CSB */ + buf = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + } + + /* The write will be ordered by the uncached read (itself + * a memory barrier), so we do not need another in the form + * of a locked instruction. The race between the interrupt + * handler and the split test/clear is harmless as we order + * our clear before the CSB read. If the interrupt arrived + * first between the test and the clear, we read the updated + * CSB and clear the bit. If the interrupt arrives as we read + * the CSB or later (i.e. after we had cleared the bit) the bit + * is set and we do a new loop. */ - while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { - u32 __iomem *csb_mmio = - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); - u32 __iomem *buf = - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); - unsigned int head, tail; - - /* The write will be ordered by the uncached read (itself - * a memory barrier), so we do not need another in the form - * of a locked instruction. The race between the interrupt - * handler and the split test/clear is harmless as we order - * our clear before the CSB read. If the interrupt arrived - * first between the test and the clear, we read the updated - * CSB and clear the bit. If the interrupt arrives as we read - * the CSB or later (i.e. after we had cleared the bit) the bit - * is set and we do a new loop. - */ - __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - head = readl(csb_mmio); + __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + if (unlikely(execlists->csb_head == -1)) { /* following a reset */ + head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); tail = GEN8_CSB_WRITE_PTR(head); head = GEN8_CSB_READ_PTR(head); - while (head != tail) { - struct drm_i915_gem_request *rq; - unsigned int status; - unsigned int count; - - if (++head == GEN8_CSB_ENTRIES) - head = 0; + execlists->csb_head = head; + } else { + const int write_idx = + intel_hws_csb_write_index(dev_priv) - + I915_HWS_CSB_BUF0_INDEX; - /* We are flying near dragons again. - * - * We hold a reference to the request in execlist_port[] - * but no more than that. We are operating in softirq - * context and so cannot hold any mutex or sleep. That - * prevents us stopping the requests we are processing - * in port[] from being retired simultaneously (the - * breadcrumb will be complete before we see the - * context-switch). As we only hold the reference to the - * request, any pointer chasing underneath the request - * is subject to a potential use-after-free. Thus we - * store all of the bookkeeping within port[] as - * required, and avoid using unguarded pointers beneath - * request itself. The same applies to the atomic - * status notifier. - */ + head = execlists->csb_head; + tail = READ_ONCE(buf[write_idx]); + } - status = readl(buf + 2 * head); - if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) - continue; + while (head != tail) { + struct drm_i915_gem_request *rq; + unsigned int status; + unsigned int count; - /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) != - port->context_id); + if (++head == GEN8_CSB_ENTRIES) + head = 0; + + /* We are flying near dragons again. + * + * We hold a reference to the request in execlist_port[] + * but no more than that. We are operating in softirq + * context and so cannot hold any mutex or sleep. That + * prevents us stopping the requests we are processing + * in port[] from being retired simultaneously (the + * breadcrumb will be complete before we see the + * context-switch). As we only hold the reference to the + * request, any pointer chasing underneath the request + * is subject to a potential use-after-free. Thus we + * store all of the bookkeeping within port[] as + * required, and avoid using unguarded pointers beneath + * request itself. The same applies to the atomic + * status notifier. + */ - rq = port_unpack(port, &count); - GEM_BUG_ON(count == 0); - if (--count == 0) { - GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); - GEM_BUG_ON(!i915_gem_request_completed(rq)); - execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); + status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ + if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) + continue; - trace_i915_gem_request_out(rq); - i915_gem_request_put(rq); + if (status & GEN8_CTX_STATUS_ACTIVE_IDLE && + buf[2*head + 1] == PREEMPT_ID) { + execlist_cancel_port_requests(execlists); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); - } else { - port_set(port, port_pack(rq, count)); - } + spin_lock_irq(&engine->timeline->lock); + unwind_incomplete_requests(engine); + spin_unlock_irq(&engine->timeline->lock); - /* After the final element, the hw should be idle */ - GEM_BUG_ON(port_count(port) == 0 && - !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)); + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_PREEMPT); + hrtimer_try_to_cancel(&engine->fpreempt_timer); + continue; } - writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), - csb_mmio); - } + if (status & GEN8_CTX_STATUS_PREEMPTED && + execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)) + continue; - if (execlists_elsp_ready(engine)) - execlists_dequeue(engine); + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); - intel_uncore_forcewake_put(dev_priv, engine->fw_domains); -} + /* Check the context/desc id for this event matches */ + GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); -static bool -insert_request(struct intel_engine_cs *engine, - struct i915_priotree *pt, - int prio) -{ - struct i915_priolist *p; - struct rb_node **parent, *rb; - bool first = true; + rq = port_unpack(port, &count); + GEM_BUG_ON(count == 0); + if (--count == 0) { + GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); + GEM_BUG_ON(!i915_gem_request_completed(rq)); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); - if (unlikely(engine->no_priolist)) - prio = I915_PRIORITY_NORMAL; + trace_i915_gem_request_out(rq); + i915_gem_request_put(rq); -find_priolist: - /* most positive priority is scheduled first, equal priorities fifo */ - rb = NULL; - parent = &engine->execlist_queue.rb_node; - while (*parent) { - rb = *parent; - p = rb_entry(rb, typeof(*p), node); - if (prio > p->priority) { - parent = &rb->rb_left; - } else if (prio < p->priority) { - parent = &rb->rb_right; - first = false; + execlists_port_complete(execlists, port); } else { - list_add_tail(&pt->link, &p->requests); - return false; + port_set(port, port_pack(rq, count)); } - } - if (prio == I915_PRIORITY_NORMAL) { - p = &engine->default_priolist; - } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); - /* Convert an allocation failure to a priority bump */ - if (unlikely(!p)) { - prio = I915_PRIORITY_NORMAL; /* recurses just once */ + /* After the final element, the hw should be idle */ + GEM_BUG_ON(port_count(port) == 0 && + !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + if (port_count(port) == 0) + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_USER); + } - /* To maintain ordering with all rendering, after an - * allocation failure we have to disable all scheduling. - * Requests will then be executed in fifo, and schedule - * will ensure that dependencies are emitted in fifo. - * There will be still some reordering with existing - * requests, so if userspace lied about their - * dependencies that reordering may be visible. - */ - engine->no_priolist = true; - goto find_priolist; - } + if (head != execlists->csb_head) { + execlists->csb_head = head; + writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), + dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); } +} - p->priority = prio; - rb_link_node(&p->node, rb, parent); - rb_insert_color(&p->node, &engine->execlist_queue); +/* + * Check the unread Context Status Buffers and manage the submission of new + * contexts to the ELSP accordingly. + */ +static void intel_lrc_irq_handler(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_private *dev_priv = engine->i915; - INIT_LIST_HEAD(&p->requests); - list_add_tail(&pt->link, &p->requests); + /* We can skip acquiring intel_runtime_pm_get() here as it was taken + * on our behalf by the request (see i915_gem_mark_busy()) and it will + * not be relinquished until the device is idle (see + * i915_gem_idle_work_handler()). As a precaution, we make sure + * that all ELSP are drained i.e. we have processed the CSB, + * before allowing ourselves to idle and calling intel_runtime_pm_put(). + */ + GEM_BUG_ON(!engine->i915->gt.awake); - if (first) - engine->execlist_first = &p->node; + intel_uncore_forcewake_get(dev_priv, execlists->fw_domains); + + /* Prefer doing test_and_clear_bit() as a two stage operation to avoid + * imposing the cost of a locked atomic transaction when submitting a + * new request (outside of the context-switch interrupt). + */ + while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) + process_csb(engine); + + if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) + execlists_dequeue(engine); - return first; + intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); +} + +static void insert_request(struct intel_engine_cs *engine, + struct i915_priotree *pt, + int prio) +{ + struct i915_priolist *p = lookup_priolist(engine, pt, prio); + + list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); + if (ptr_unmask_bits(p, 1)) + tasklet_hi_schedule(&engine->execlists.irq_tasklet); } static void execlists_submit_request(struct drm_i915_gem_request *request) @@ -701,24 +996,23 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&engine->timeline->lock, flags); - if (insert_request(engine, - &request->priotree, - request->priotree.priority)) { - if (execlists_elsp_ready(engine)) - tasklet_hi_schedule(&engine->irq_tasklet); - } + insert_request(engine, &request->priotree, request->priotree.priority); - GEM_BUG_ON(!engine->execlist_first); + GEM_BUG_ON(!engine->execlists.first); GEM_BUG_ON(list_empty(&request->priotree.link)); spin_unlock_irqrestore(&engine->timeline->lock, flags); } +static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt) +{ + return container_of(pt, struct drm_i915_gem_request, priotree); +} + static struct intel_engine_cs * pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) { - struct intel_engine_cs *engine = - container_of(pt, struct drm_i915_gem_request, priotree)->engine; + struct intel_engine_cs *engine = pt_to_request(pt)->engine; GEM_BUG_ON(!locked); @@ -737,6 +1031,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) struct i915_dependency stack; LIST_HEAD(dfs); + GEM_BUG_ON(prio == I915_PRIORITY_INVALID); + if (prio <= READ_ONCE(request->priotree.priority)) return; @@ -772,6 +1068,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) * engines. */ list_for_each_entry(p, &pt->signalers_list, signal_link) { + if (i915_gem_request_completed(pt_to_request(p->signaler))) + continue; + GEM_BUG_ON(p->signaler->priority < pt->priority); if (prio > READ_ONCE(p->signaler->priority)) list_move_tail(&p->dfs_link, &dfs); @@ -785,7 +1084,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) * execlists_submit_request()), we can set our own priority and skip * acquiring the engine locks. */ - if (request->priotree.priority == INT_MIN) { + if (request->priotree.priority == I915_PRIORITY_INVALID) { GEM_BUG_ON(!list_empty(&request->priotree.link)); request->priotree.priority = prio; if (stack.dfs_link.next == stack.dfs_link.prev) @@ -815,8 +1114,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) } spin_unlock_irq(&engine->timeline->lock); - - /* XXX Do we need to preempt to make room for us and our deps? */ } static struct intel_ring * @@ -914,27 +1211,14 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) */ request->reserved_space += EXECLISTS_REQUEST_SIZE; - if (i915.enable_guc_submission) { - /* - * Check that the GuC has space for the request before - * going any further, as the i915_add_request() call - * later on mustn't fail ... - */ - ret = i915_guc_wq_reserve(request); - if (ret) - goto err; - } - cs = intel_ring_begin(request, 0); - if (IS_ERR(cs)) { - ret = PTR_ERR(cs); - goto err_unreserve; - } + if (IS_ERR(cs)) + return PTR_ERR(cs); if (!ce->initialised) { ret = engine->init_context(request); if (ret) - goto err_unreserve; + return ret; ce->initialised = true; } @@ -948,12 +1232,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) request->reserved_space -= EXECLISTS_REQUEST_SIZE; return 0; - -err_unreserve: - if (i915.enable_guc_submission) - i915_guc_wq_unreserve(request); -err: - return ret; } /* @@ -1031,6 +1309,8 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES); + *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + /* Pad to end of cacheline */ while ((unsigned long)batch % CACHELINE_BYTES) *batch++ = MI_NOOP; @@ -1044,34 +1324,28 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) return batch; } -/* - * This batch is started immediately after indirect_ctx batch. Since we ensure - * that indirect_ctx ends on a cacheline this batch is aligned automatically. - * - * The number of DWORDS written are returned using this field. - * - * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding - * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. - */ -static u32 *gen8_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch) -{ - /* WaDisableCtxRestoreArbitration:bdw,chv */ - *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - *batch++ = MI_BATCH_BUFFER_END; - - return batch; -} - static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) { + *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ batch = gen8_emit_flush_coherentl3_wa(engine, batch); + *batch++ = MI_LOAD_REGISTER_IMM(3); + /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ - *batch++ = MI_LOAD_REGISTER_IMM(1); *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); *batch++ = _MASKED_BIT_DISABLE( GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); + + /* BSpec: 11391 */ + *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN); + *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX); + + /* BSpec: 11299 */ + *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3); + *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX); + *batch++ = MI_NOOP; /* WaClearSlmSpaceAtContextSwitch:kbl */ @@ -1109,6 +1383,8 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) *batch++ = 0; } + *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + /* Pad to end of cacheline */ while ((unsigned long)batch % CACHELINE_BYTES) *batch++ = MI_NOOP; @@ -1175,13 +1451,15 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return -EINVAL; switch (INTEL_GEN(engine->i915)) { + case 10: + return 0; case 9: wa_bb_fn[0] = gen9_init_indirectctx_bb; wa_bb_fn[1] = gen9_init_perctx_bb; break; case 8: wa_bb_fn[0] = gen8_init_indirectctx_bb; - wa_bb_fn[1] = gen8_init_perctx_bb; + wa_bb_fn[1] = NULL; break; default: MISSING_CASE(INTEL_GEN(engine->i915)); @@ -1232,9 +1510,7 @@ static u8 gtiir[] = { static int gen8_init_common_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - struct execlist_port *port = engine->execlist_port; - unsigned int n; - bool submit; + struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_mocs_init_engine(engine); @@ -1255,36 +1531,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); - /* - * Clear any pending interrupt state. - * - * We do it twice out of paranoia that some of the IIR are double - * buffered, and if we only reset it once there may still be - * an interrupt pending. - */ - I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), - GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); - I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), - GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + execlists->csb_head = -1; + execlists->active = 0; /* After a GPU reset, we may have requests to replay */ - submit = false; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { - if (!port_isset(&port[n])) - break; - - DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n", - engine->name, n, - port_request(&port[n])->global_seqno); - - /* Discard the current inflight count */ - port_set(&port[n], port_request(&port[n])); - submit = true; - } - - if (submit && !i915.enable_guc_submission) - execlists_submit_ports(engine); + if (!i915_modparams.enable_guc_submission && execlists->first) + tasklet_schedule(&execlists->irq_tasklet); return 0; } @@ -1322,12 +1574,105 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) return init_workarounds_ring(engine); } -static void reset_common_ring(struct intel_engine_cs *engine, - struct drm_i915_gem_request *request) +static struct drm_i915_gem_request * +execlists_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_gem_request *request, *active; + + /* Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its engine->irq_tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the engine->irq_tasklet until the reset is over + * prevents the race. + * + * Note that this needs to be a single atomic operation on the + * tasklet (flush existing tasks, prevent new tasks) to prevent + * a race between reset and set-wedged. It is not, so we do the best + * we can atm and make sure we don't lock the machine up in the more + * common case of recursively being called from set-wedged from inside + * i915_reset. + */ + if (!atomic_read(&engine->execlists.irq_tasklet.count)) + tasklet_kill(&engine->execlists.irq_tasklet); + tasklet_disable(&engine->execlists.irq_tasklet); + + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + + /* + * We want to flush the pending context switches, having disabled + * the tasklet above, we can assume exclusive access to the execlists. + * For this allows us to catch up with an inflight preemption event, + * and avoid blaming an innocent request if the stall was due to the + * preemption itself. + */ + intel_uncore_forcewake_get(dev_priv, execlists->fw_domains); + process_csb(engine); + intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + + /* + * The last active request can then be no later than the last request + * now in ELSP[0]. So search backwards from there, so that is the GPU + * has advanced beyond the last CSB update, it will be pardoned. + */ + active = NULL; + request = port_request(execlists->port); + if (request) { + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + list_for_each_entry_from_reverse(request, + &engine->timeline->requests, + link) { + if (__i915_gem_request_completed(request, + request->global_seqno)) + break; + + active = request; + } + spin_unlock_irqrestore(&engine->timeline->lock, flags); + } + + return active; +} + +static void reset_irq(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int i; + + /* + * Clear any pending interrupt state. + * + * We do it twice out of paranoia that some of the IIR are double + * buffered, and if we only reset it once there may still be + * an interrupt pending. + */ + for (i = 0; i < 2; i++) { + I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), + GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); + POSTING_READ(GEN8_GT_IIR(gtiir[engine->id])); + } + GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) & + (GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift)); + + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); +} + +static void execlists_reset(struct intel_engine_cs *engine, + struct drm_i915_gem_request *request) { - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_context *ce; - unsigned int n; + unsigned long flags; + + reset_irq(engine); + + spin_lock_irqsave(&engine->timeline->lock, flags); /* * Catch up with any missed context-switch interrupts. @@ -1338,20 +1683,31 @@ static void reset_common_ring(struct intel_engine_cs *engine, * guessing the missed context-switch events by looking at what * requests were completed. */ - if (!request) { - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) - i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - return; - } + execlist_cancel_port_requests(execlists); - if (request->ctx != port_request(port)->ctx) { - i915_gem_request_put(port_request(port)); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); - } + /* Push back any incomplete requests for replay after the reset. */ + unwind_incomplete_requests(engine); + + spin_unlock_irqrestore(&engine->timeline->lock, flags); + + /* If a preemption was pending when the reset occurred, and no + * active request was found when the reset completed, it is + * possible that the preemption context was hit by the reset. + * We must assume that the context is corrupted so repair it. + */ + if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) && + !request) { + struct i915_gem_context *ctx = engine->i915->preempt_context; + ce = &ctx->engine[engine->id]; - GEM_BUG_ON(request->ctx != port_request(port)->ctx); + execlists_init_reg_state(ce->lrc_reg_state, + ctx, engine, ce->ring); + ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = + i915_ggtt_offset(ce->ring->vma); + ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->tail; + + return; + } /* If the request was innocent, we leave the request in the ELSP * and will try to replay it on restarting. The context image may @@ -1363,7 +1719,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - if (request->fence.error != -EIO) + if (!request || request->fence.error != -EIO) return; /* We want a simple context + ring to execute the breadcrumb update. @@ -1386,10 +1742,21 @@ static void reset_common_ring(struct intel_engine_cs *engine, intel_ring_update_space(request->ring); /* Reset WaIdleLiteRestore:bdw,skl as well */ - request->tail = - intel_ring_wrap(request->ring, - request->wa_tail - WA_TAIL_DWORDS*sizeof(u32)); - assert_ring_tail_valid(request->ring, request->tail); + unwind_wa_tail(request); +} + +static void execlists_reset_finish(struct intel_engine_cs *engine) +{ + /* Mark any force preemption as resolved */ + engine->fpreempt_stalled = false; + + /* Get things going again if we have queued requests. Needed + * if the reset was executed or aborted. + */ + if (engine->execlists.first) + tasklet_schedule(&engine->execlists.irq_tasklet); + + tasklet_enable(&engine->execlists.irq_tasklet); } static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) @@ -1448,13 +1815,15 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, if (IS_ERR(cs)) return PTR_ERR(cs); + /* WaDisableCtxRestoreArbitration:bdw,chv */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + /* FIXME(BDW): Address space and security selectors. */ *cs++ = MI_BATCH_BUFFER_START_GEN8 | (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) | (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); *cs++ = lower_32_bits(offset); *cs++ = upper_32_bits(offset); - *cs++ = MI_NOOP; intel_ring_advance(req, cs); return 0; @@ -1583,7 +1952,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, */ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs) { - *cs++ = MI_NOOP; + /* Ensure there's always at least one preemption point per-request. */ + *cs++ = MI_ARB_CHECK; *cs++ = MI_NOOP; request->wa_tail = intel_ring_offset(request, cs); } @@ -1604,7 +1974,6 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) gen8_emit_wa_tail(request, cs); } - static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request, @@ -1632,7 +2001,6 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request, gen8_emit_wa_tail(request, cs); } - static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS; static int gen8_init_rcs_context(struct drm_i915_gem_request *req) @@ -1666,8 +2034,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) * Tasklet cannot be active at this point due intel_mark_active/idle * so this is just for documentation. */ - if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) - tasklet_kill(&engine->irq_tasklet); + if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state))) + tasklet_kill(&engine->execlists.irq_tasklet); dev_priv = engine->i915; @@ -1678,11 +2046,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (engine->status_page.vma) { - i915_gem_object_unpin_map(engine->status_page.vma->obj); - engine->status_page.vma = NULL; - } - intel_engine_cleanup_common(engine); lrc_destroy_wa_ctx(engine); @@ -1694,8 +2057,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) static void execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; + engine->cancel_requests = execlists_cancel_requests; engine->schedule = execlists_schedule; - engine->irq_tasklet.func = intel_lrc_irq_handler; + engine->execlists.irq_tasklet.func = intel_lrc_irq_handler; } static void @@ -1703,7 +2067,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ engine->init_hw = gen8_init_common_ring; - engine->reset_hw = reset_common_ring; + + engine->reset.prepare = execlists_reset_prepare; + engine->reset.reset = execlists_reset; + engine->reset.finish = execlists_reset_finish; engine->context_pin = execlists_context_pin; engine->context_unpin = execlists_context_unpin; @@ -1729,22 +2096,13 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } -static int -lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) -{ - const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; - void *hws; - - /* The HWSP is part of the default context object in LRC mode. */ - hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - engine->status_page.page_addr = hws + hws_offset; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; - engine->status_page.vma = vma; - - return 0; +static void i915_error_reset(struct work_struct *work) { + struct intel_engine_cs *engine = + container_of(work, struct intel_engine_cs, + reset_work); + i915_handle_error(engine->i915, 1 << engine->id, I915_ERROR_CAPTURE, + "Received error interrupt from engine %d", + engine->id); } static void @@ -1770,32 +2128,25 @@ logical_ring_setup(struct intel_engine_cs *engine) RING_CONTEXT_STATUS_BUF_BASE(engine), FW_REG_READ); - engine->fw_domains = fw_domains; + engine->execlists.fw_domains = fw_domains; - tasklet_init(&engine->irq_tasklet, + tasklet_init(&engine->execlists.irq_tasklet, intel_lrc_irq_handler, (unsigned long)engine); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); + + INIT_WORK(&engine->reset_work, i915_error_reset); } -static int -logical_ring_init(struct intel_engine_cs *engine) +static int logical_ring_init(struct intel_engine_cs *engine) { - struct i915_gem_context *dctx = engine->i915->kernel_context; int ret; ret = intel_engine_init_common(engine); if (ret) goto error; - /* And setup the hardware status page. */ - ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); - if (ret) { - DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); - goto error; - } - return 0; error: @@ -2033,6 +2384,14 @@ populate_lr_context(struct i915_gem_context *ctx, execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, ctx, engine, ring); + /* write the context's pid and hw_id/cid to the per-context HWS page */ + if(intel_vgpu_active(engine->i915) && pid_nr(ctx->pid)) { + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_PID_ADDR) + = pid_nr(ctx->pid) & 0x3fffff; + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_CID_ADDR) + = ctx->hw_id & 0x3fffff; + } + i915_gem_object_unpin_map(ctx_obj); return 0; @@ -2052,8 +2411,11 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); - /* One extra page as the sharing data between driver and GuC */ - context_size += PAGE_SIZE * LRC_PPHWSP_PN; + /* + * Before the actual start of the context image, we insert a few pages + * for our own use and for sharing with the GuC. + */ + context_size += LRC_HEADER_PAGES * PAGE_SIZE; ctx_obj = i915_gem_object_create(ctx->i915, context_size); if (IS_ERR(ctx_obj)) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 57ef5833c427..689fde1a63a9 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,7 @@ #define _INTEL_LRC_H_ #include "intel_ringbuffer.h" +#include "i915_gem_context.h" #define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT @@ -60,6 +61,7 @@ enum { INTEL_CONTEXT_SCHEDULE_IN = 0, INTEL_CONTEXT_SCHEDULE_OUT, + INTEL_CONTEXT_SCHEDULE_PREEMPTED, }; /* Logical Rings */ @@ -69,17 +71,42 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine); /* Logical Ring Contexts */ -/* One extra page is added before LRC for GuC as shared data */ +/* + * We allocate a header at the start of the context image for our own + * use, therefore the actual location of the logical state is offset + * from the start of the VMA. The layout is + * + * | [guc] | [hwsp] [logical state] | + * |<- our header ->|<- context image ->| + * + */ +/* The first page is used for sharing data with the GuC */ #define LRC_GUCSHR_PN (0) -#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) -#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) +#define LRC_GUCSHR_SZ (1) +/* At the start of the context image is its per-process HWS page */ +#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ) +#define LRC_PPHWSP_SZ (1) +/* Finally we have the logical state for the context */ +#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) + +/* + * Currently we include the PPHWSP in __intel_engine_context_size() so + * the size of the header is synonymous with the start of the PPHWSP. + */ +#define LRC_HEADER_PAGES LRC_PPHWSP_PN struct drm_i915_private; struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); -uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); + +static inline uint64_t +intel_lr_context_descriptor(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return ctx->engine[engine->id].lrc_desc; +} + /* Execlists */ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8e215777c7f4..5d21a89939b5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -317,7 +317,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); POSTING_READ(lvds_encoder->reg); - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000)) + + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(pipe_config, conn_state); @@ -564,6 +565,36 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, return NOTIFY_OK; } +static int +intel_lvds_connector_register(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds = to_lvds_connector(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + lvds->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&lvds->lid_notifier)) { + DRM_DEBUG_KMS("lid notifier registration failed\n"); + lvds->lid_notifier.notifier_call = NULL; + } + + return 0; +} + +static void +intel_lvds_connector_unregister(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds = to_lvds_connector(connector); + + if (lvds->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&lvds->lid_notifier); + + intel_connector_unregister(connector); +} + /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free @@ -576,9 +607,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); - if (lvds_connector->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) kfree(lvds_connector->base.edid); @@ -599,8 +627,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_connector_register, - .early_unregister = intel_connector_unregister, + .late_register = intel_lvds_connector_register, + .early_unregister = intel_lvds_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, @@ -817,6 +845,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Radiant P845", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "P845"), + }, + }, { } /* terminating entry */ }; @@ -880,8 +916,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) struct drm_i915_private *dev_priv = to_i915(dev); /* use the module option value if specified */ - if (i915.lvds_channel_mode > 0) - return i915.lvds_channel_mode == 2; + if (i915_modparams.lvds_channel_mode > 0) + return i915_modparams.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock @@ -1148,12 +1184,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { - DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds_connector->lid_notifier.notifier_call = NULL; - } - return; failed: diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 98154efcb2f4..1d946240e55f 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; const struct firmware *fw = NULL; - const char *name = i915.vbt_firmware; + const char *name = i915_modparams.vbt_firmware; int ret; if (!name || !*name) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 3b1c5d783ee7..adc51e452e3e 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -379,13 +379,13 @@ enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv) { /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { + if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) { return *dev_priv->opregion.lid_state & 0x1 ? connector_status_connected : connector_status_disconnected; } - switch (i915.panel_ignore_lid) { + switch (i915_modparams.panel_ignore_lid) { case -2: return connector_status_connected; case -1: @@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, WARN_ON(panel->backlight.max == 0); - if (i915.invert_brightness < 0) + if (i915_modparams.invert_brightness < 0) return val; - if (i915.invert_brightness > 0 || + if (i915_modparams.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { return panel->backlight.max - val + panel->backlight.min; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cb950752c346..b380c5daec63 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -33,6 +33,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: RC6 * @@ -58,24 +62,23 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { + if (HAS_LLC(dev_priv)) { + /* + * WaCompressedResourceDisplayNewHashMode:skl,kbl + * Display WA#0390: skl,kbl + * + * Must match Sampler, Pixel Back End, and Media. See + * WaCompressedResourceSamplerPbeMediaNewHashMode. + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | + SKL_DE_COMPRESSED_HASH_MODE); + } + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ I915_WRITE(CHICKEN_PAR1_1, I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); - /* - * Display WA#0390: skl,bxt,kbl,glk - * - * Must match Sampler, Pixel Back End, and Media - * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31). - * - * Including bits outside the page in the hash would - * require 2 (or 4?) MiB alignment of resources. Just - * assume the defaul hashing mode which only uses bits - * within the page. - */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE); - I915_WRITE(GEN8_CONFIG0, I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); @@ -317,7 +320,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) { u32 val; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if (enable) @@ -332,14 +335,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) { u32 val; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); if (enable) @@ -348,7 +351,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) val &= ~DSP_MAXFIFO_PM5_ENABLE; vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } #define FW_WM(value, plane) \ @@ -807,11 +810,14 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv) static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = plane_state ? to_intel_plane(plane_state->base.plane) : NULL; /* FIXME check the 'enable' instead */ if (!crtc_state->base.active) return false; + if (!plane_state && i915_modparams.avail_planes_per_pipe) { + return true; + } /* * Treat cursor with fb as always visible since cursor updates @@ -2785,11 +2791,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, &val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("SKL Mailbox read error = %d\n", ret); @@ -2806,11 +2812,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, &val); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); if (ret) { DRM_ERROR("SKL Mailbox read error = %d\n", ret); return; @@ -3594,13 +3600,13 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; DRM_DEBUG_KMS("Enabling the SAGV\n"); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_ENABLE); /* We don't need to wait for the SAGV when enabling */ - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* * Some skl systems, pre-release machines in particular, @@ -3631,14 +3637,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; DRM_DEBUG_KMS("Disabling the SAGV\n"); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); /* bspec says to keep retrying for at least 1 ms */ ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, GEN9_SAGV_DISABLE, GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); /* * Some skl systems, pre-release machines in particular, @@ -3665,7 +3671,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) struct intel_crtc *crtc; struct intel_plane *plane; struct intel_crtc_state *cstate; - enum pipe pipe; int level, latency; int sagv_block_time_us = IS_GEN9(dev_priv) ? 30 : 20; @@ -3684,8 +3689,10 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(intel_state->active_crtcs) - 1; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = get_intel_crtc_from_index(dev, + ffs(intel_state->active_crtcs) - 1); + if (!crtc) + return false; cstate = to_intel_crtc_state(crtc->base.state); if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) @@ -3707,6 +3714,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) latency = dev_priv->wm.skl_latency[level]; if (skl_needs_memory_bw_wa(intel_state) && + plane->base.state->fb && plane->base.state->fb->modifier == I915_FORMAT_MOD_X_TILED) latency += 15; @@ -3844,11 +3852,15 @@ static uint_fixed_16_16_t skl_plane_downscale_amount(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { - struct intel_plane *plane = to_intel_plane(pstate->base.plane); + struct intel_plane *plane = pstate ? to_intel_plane(pstate->base.plane) : NULL; uint32_t src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; + if (!pstate && i915_modparams.avail_planes_per_pipe) { + return mul_fixed16(u32_to_fixed16(1), u32_to_fixed16(1)); + } + if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) return u32_to_fixed16(0); @@ -4370,9 +4382,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint8_t *out_lines, /* out */ bool *enabled /* out */) { - struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); - const struct drm_plane_state *pstate = &intel_pstate->base; - const struct drm_framebuffer *fb = pstate->fb; + struct intel_plane *plane = intel_pstate ? to_intel_plane(intel_pstate->base.plane) : NULL; + const struct drm_plane_state *pstate = intel_pstate ? &intel_pstate->base : NULL; + const struct drm_framebuffer *fb = pstate ? pstate->fb : NULL; uint32_t latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t plane_blocks_per_line; @@ -4396,6 +4408,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, return 0; } + if (!intel_pstate && i915_modparams.avail_planes_per_pipe) { + y_tiled = false; + x_tiled = true; + cpp = 4; + y_min_scanlines = 8; + latency += 15; + + width = cstate->pipe_src_w; + goto calculate_wm; + } + y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || fb->modifier == I915_FORMAT_MOD_Yf_TILED || fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || @@ -4423,8 +4446,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] : fb->format->cpp[0]; - plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); - if (drm_rotation_90_or_270(pstate->rotation)) { switch (cpp) { @@ -4448,7 +4469,10 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (apply_memory_bw_wa) y_min_scanlines *= 2; +calculate_wm: + plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); plane_bytes_per_line = width * cpp; + if (y_tiled) { interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512); @@ -4499,7 +4523,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, plane_blocks_per_line); /* Display WA #1125: skl,bxt,kbl,glk */ - if (level == 0 && + if (fb && level == 0 && (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) res_blocks += fixed16_to_u32_round_up(y_tile_minimum); @@ -4524,12 +4548,16 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (level) { return 0; } else { - struct drm_plane *plane = pstate->plane; + struct drm_plane *plane = pstate ? pstate->plane : NULL; DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); - DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", + + if (plane) { + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", plane->base.id, plane->name, res_blocks, ddb_allocation, res_lines); + } + return -EINVAL; } } @@ -4546,20 +4574,19 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb, struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, + enum plane_id plane_id, struct skl_plane_wm *wm) { struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_plane *plane = intel_pstate->base.plane; - struct intel_plane *intel_plane = to_intel_plane(plane); uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; int level, max_level = ilk_wm_max_level(dev_priv); int ret; - if (WARN_ON(!intel_pstate->base.fb)) + if (WARN_ON(intel_pstate && !intel_pstate->base.fb)) return -EINVAL; - ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]); + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &wm->wm[level]; @@ -4611,6 +4638,55 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, trans_wm->plane_en = false; } +static int skl_build_pipe_all_plane_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm) +{ + struct drm_device *dev = cstate->base.crtc->dev; + const struct drm_i915_private *dev_priv = to_i915(dev); + struct skl_plane_wm *wm; + struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); + struct drm_crtc_state *crtc_state = &cstate->base; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + struct intel_plane_state *intel_pstate; + int pipe = crtc->pipe; + int plane_id; + int ret; + + memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); + + /* + * Since Dom0 may not own all planes on this pipe, there will + * not be a valid intel_plane for the planes it doesn't own. + * Therefore, we have to pass NULL to skl_compute_wm_level() + * which will then know that this plane is not owned by Dom0 + * and hence will use width and height from the crtc and will + * also assume cpp = 4 and tiling = x_tiled. + */ + for_each_universal_plane(dev_priv, pipe, plane_id) { + intel_pstate = NULL; + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + if (plane_id == to_intel_plane(plane)->id) { + intel_pstate = to_intel_plane_state(pstate); + break; + } + } + + wm = &pipe_wm->planes[plane_id]; + ret = skl_compute_wm_levels(dev_priv, ddb, cstate, + intel_pstate, plane_id, wm); + if (ret) + return ret; + + skl_compute_transition_wm(cstate, &wm->trans_wm); + } + pipe_wm->linetime = skl_compute_linetime_wm(cstate); + + return 0; +} + static int skl_build_pipe_wm(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb, struct skl_pipe_wm *pipe_wm) @@ -4637,7 +4713,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, wm = &pipe_wm->planes[plane_id]; ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, wm); + intel_pstate, plane_id, wm); if (ret) return ret; skl_compute_transition_wm(cstate, &wm->trans_wm); @@ -4657,21 +4733,61 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, I915_WRITE(reg, 0); } -static void skl_write_wm_level(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const struct skl_wm_level *level) +static inline uint32_t skl_calc_wm_level(const struct skl_wm_level *level) { uint32_t val = 0; - if (level->plane_en) { val |= PLANE_WM_EN; val |= level->plane_res_b; val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; } + return val; +} +static void skl_write_wm_level(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const struct skl_wm_level *level) +{ + uint32_t val = skl_calc_wm_level(level); I915_WRITE(reg, val); } +static void skl_pv_write_plane_wm(struct intel_crtc *intel_crtc, + const struct skl_plane_wm *wm, + const struct skl_ddb_allocation *ddb, + enum plane_id plane_id) +{ + int i, level; + struct pv_plane_wm_update tmp_plane_wm; + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + int max_level = ilk_wm_max_level(dev_priv); + u32 __iomem *pv_plane_wm = (u32 *)&(dev_priv->shared_page->pv_plane_wm); + enum pipe pipe = intel_crtc->pipe; + const struct skl_ddb_entry *entry; + + memset(&tmp_plane_wm, 0, sizeof(struct pv_plane_wm_update)); + tmp_plane_wm.max_wm_level = max_level; + for (level = 0; level <= max_level; level++) { + tmp_plane_wm.plane_wm_level[level] = + skl_calc_wm_level(&wm->wm[level]); + } + tmp_plane_wm.plane_trans_wm_level = skl_calc_wm_level(&wm->trans_wm); + + entry = &ddb->plane[pipe][plane_id]; + if (entry->end) + tmp_plane_wm.plane_buf_cfg = + (entry->end - 1) << 16 | entry->start; + else + tmp_plane_wm.plane_buf_cfg = 0; + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_wm_update) / 4; i++) + writel(*((u32 *)(&tmp_plane_wm) + i), pv_plane_wm + i); + skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id), + &ddb->y_plane[pipe][plane_id]); + spin_unlock(&dev_priv->shared_page_lock); +} + static void skl_write_plane_wm(struct intel_crtc *intel_crtc, const struct skl_plane_wm *wm, const struct skl_ddb_allocation *ddb, @@ -4682,13 +4798,54 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, struct drm_i915_private *dev_priv = to_i915(dev); int level, max_level = ilk_wm_max_level(dev_priv); enum pipe pipe = intel_crtc->pipe; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + struct intel_dom0_plane_regs *dom0_regs = NULL; +#endif + + if (intel_vgpu_active(dev_priv)) { + /* + * when plane restriction feature is enabled, + * sos trap handlers for plane wm related registers are null + */ + if (i915_modparams.avail_planes_per_pipe) + return; + + if (i915_modparams.enable_pvmmio & PVMMIO_PLANE_WM_UPDATE) + return skl_pv_write_plane_wm(intel_crtc, wm, + ddb, plane_id); + } for (level = 0; level <= max_level; level++) { +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + dom0_regs = &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_wm[level] = skl_calc_wm_level( + &wm->wm[level]); + } else { + skl_write_wm_level(dev_priv, + PLANE_WM(pipe, plane_id, level), + &wm->wm[level]); + } +#else skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), - &wm->wm[level]); + &wm->wm[level]); +#endif + } + +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + dom0_regs = &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_wm_trans = skl_calc_wm_level( + &wm->trans_wm); + } else { + skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), + &wm->trans_wm); } +#else skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), - &wm->trans_wm); + &wm->trans_wm); +#endif skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), &ddb->plane[pipe][plane_id]); @@ -4759,7 +4916,10 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate, struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); int ret; - ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + if (i915_modparams.avail_planes_per_pipe) + ret = skl_build_pipe_all_plane_wm(intel_cstate, ddb, pipe_wm); + else + ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); if (ret) return ret; @@ -4877,6 +5037,14 @@ skl_compute_ddb(struct drm_atomic_state *state) */ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* In GVT environemnt, we only use the statically allocated ddb */ + if (dev_priv->gvt) { + memcpy(ddb, &dev_priv->gvt->ddb, sizeof(*ddb)); + return 0; + } +#endif + for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { struct intel_crtc_state *cstate; @@ -4952,10 +5120,14 @@ skl_compute_wm(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct skl_wm_values *results = &intel_state->wm_results; struct drm_device *dev = state->dev; + struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); struct skl_pipe_wm *pipe_wm; bool changed = false; int ret, i; + if (intel_vgpu_active(dev_priv) && i915_modparams.avail_planes_per_pipe) + return 0; + /* * When we distrust bios wm we always need to recompute to set the * expected DDB allocations for each CRTC. @@ -5031,11 +5203,23 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, enum pipe pipe = crtc->pipe; enum plane_id plane_id; + if (intel_vgpu_active(dev_priv) && i915_modparams.avail_planes_per_pipe) + return; + if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); + if (i915_modparams.avail_planes_per_pipe) { + for_each_universal_plane(dev_priv, pipe, plane_id) { + skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], + ddb, plane_id); + } + + return; + } + for_each_plane_id_on_crtc(crtc, plane_id) { if (plane_id != PLANE_CURSOR) skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], @@ -5535,7 +5719,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) wm->level = VLV_WM_LEVEL_PM2; if (IS_CHERRYVIEW(dev_priv)) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); if (val & DSP_MAXFIFO_PM5_ENABLE) @@ -5565,7 +5749,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) wm->level = VLV_WM_LEVEL_DDR_DVFS; } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } for_each_intel_crtc(dev, crtc) { @@ -5669,12 +5853,30 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->wm.wm_mutex); } +/* + * FIXME should probably kill this and improve + * the real watermark readout/sanitation instead + */ +static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +{ + I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); + I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + + /* + * Don't touch WM1S_LP_EN here. + * Doing so could cause underruns. + */ +} + void ilk_wm_get_hw_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; struct drm_crtc *crtc; + ilk_init_lp_watermarks(dev_priv); + for_each_crtc(dev, crtc) ilk_pipe_wm_get_hw_state(crtc); @@ -5739,6 +5941,36 @@ void intel_update_watermarks(struct intel_crtc *crtc) dev_priv->display.update_wm(crtc); } +void intel_enable_ipc(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* Display WA #0477 WaDisableIPC: skl */ + if (IS_SKYLAKE(dev_priv)) { + dev_priv->ipc_enabled = false; + return; + } + + val = I915_READ(DISP_ARB_CTL2); + + if (dev_priv->ipc_enabled) + val |= DISP_IPC_ENABLE; + else + val &= ~DISP_IPC_ENABLE; + + I915_WRITE(DISP_ARB_CTL2, val); +} + +void intel_init_ipc(struct drm_i915_private *dev_priv) +{ + dev_priv->ipc_enabled = false; + if (!HAS_IPC(dev_priv)) + return; + + dev_priv->ipc_enabled = true; + intel_enable_ipc(dev_priv); +} + /* * Lock protecting IPS related data structures */ @@ -6108,7 +6340,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) void gen6_rps_busy(struct drm_i915_private *dev_priv) { - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (dev_priv->rps.enabled) { u8 freq; @@ -6131,7 +6363,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) dev_priv->rps.max_freq_softlimit))) DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } void gen6_rps_idle(struct drm_i915_private *dev_priv) @@ -6143,7 +6375,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) */ gen6_disable_rps_interrupts(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (dev_priv->rps.enabled) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); @@ -6153,7 +6385,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); } - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } void gen6_rps_boost(struct drm_i915_gem_request *rq, @@ -6189,7 +6421,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) { int err; - lockdep_assert_held(&dev_priv->rps.hw_lock); + lockdep_assert_held(&dev_priv->pcu_lock); GEM_BUG_ON(val > dev_priv->rps.max_freq); GEM_BUG_ON(val < dev_priv->rps.min_freq); @@ -6586,7 +6818,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) int rc6_mode; int ret; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); /* Here begins a magic sequence of register writes to enable * auto-downclocking. @@ -6679,7 +6911,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) int scaling_factor = 180; struct cpufreq_policy *policy; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); policy = cpufreq_cpu_get(0); if (policy) { @@ -7072,7 +7304,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) enum intel_engine_id id; u32 gtfifodbg, val, rc6_mode = 0, pcbr; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | GT_FIFO_FREE_ENTRIES_CHV); @@ -7161,7 +7393,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) enum intel_engine_id id; u32 gtfifodbg, val, rc6_mode = 0; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); valleyview_check_pctx(dev_priv); @@ -7716,13 +7948,13 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. */ - if (!i915.enable_rc6) { + if (!i915_modparams.enable_rc6) { DRM_INFO("RC6 disabled, disabling runtime PM support\n"); intel_runtime_pm_get(dev_priv); } mutex_lock(&dev_priv->drm.struct_mutex); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) @@ -7762,7 +7994,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) /* Finally allow us to boost to max by default */ dev_priv->rps.boost_freq = dev_priv->rps.max_freq; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->drm.struct_mutex); intel_autoenable_gt_powersave(dev_priv); @@ -7773,7 +8005,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); - if (!i915.enable_rc6) + if (!i915_modparams.enable_rc6) intel_runtime_pm_put(dev_priv); } @@ -7809,7 +8041,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) if (!READ_ONCE(dev_priv->rps.enabled)) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (INTEL_GEN(dev_priv) >= 9) { gen9_disable_rc6(dev_priv); @@ -7825,7 +8057,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) } dev_priv->rps.enabled = false; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) @@ -7840,7 +8072,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv)) return; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); if (IS_CHERRYVIEW(dev_priv)) { cherryview_enable_rps(dev_priv); @@ -7869,7 +8101,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); dev_priv->rps.enabled = true; - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void __intel_autoenable_gt_powersave(struct work_struct *work) @@ -7879,6 +8111,13 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) struct intel_engine_cs *rcs; struct drm_i915_gem_request *req; + /* + * ANDROID: In deferred fw mode, we can't submit anything until we know + * we loaded and setup the guc and we're ready to handle submissions. + */ + if (!dev_priv->contexts_ready) + goto out; + if (READ_ONCE(dev_priv->rps.enabled)) goto out; @@ -7895,7 +8134,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) if (IS_ERR(req)) goto unlock; - if (!i915.enable_execlists && i915_switch_context(req) == 0) + if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0) rcs->init_context(req); /* Mark the device busy, calling intel_enable_gt_powersave() */ @@ -7959,18 +8198,6 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) } } -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) -{ - I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); - - /* - * Don't touch WM1S_LP_EN here. - * Doing so could cause underruns. - */ -} - static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; @@ -8004,8 +8231,6 @@ static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); - ilk_init_lp_watermarks(dev_priv); - /* * Based on the document from hardware guys the following bits * should be set unconditionally in order to enable FBC. @@ -8118,8 +8343,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_GT_MODE, _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); @@ -8257,6 +8480,27 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, I915_WRITE(GEN7_MISCCPCTL, misccpctl); } +static void cannonlake_init_clock_gating(struct drm_i915_private *dev_priv) +{ + /* This is not an Wa. Enable for better image quality */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); + + /* WaEnableChickenDCPR:cnl */ + I915_WRITE(GEN8_CHICKEN_DCPR_1, + I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + + /* WaFbcWakeMemOn:cnl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_MEMORY_WAKE); + + /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) + I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, + I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | + SARBUNIT_CLKGATE_DIS); +} + static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv) { gen9_init_clock_gating(dev_priv); @@ -8293,8 +8537,6 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) { enum pipe pipe; - ilk_init_lp_watermarks(dev_priv); - /* WaSwitchSolVfFArbitrationPriority:bdw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -8349,8 +8591,6 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) { - ilk_init_lp_watermarks(dev_priv); - /* L3 caching of data atomics doesn't work -- disable it. */ I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); I915_WRITE(HSW_ROW_CHICKEN3, @@ -8394,10 +8634,6 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); - /* WaRsPkgCStateDisplayPMReq:hsw */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); - lpt_init_clock_gating(dev_priv); } @@ -8405,8 +8641,6 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t snpcr; - ilk_init_lp_watermarks(dev_priv); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:ivb */ @@ -8737,7 +8971,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_SKYLAKE(dev_priv)) + if (IS_CANNONLAKE(dev_priv)) + dev_priv->display.init_clock_gating = cannonlake_init_clock_gating; + else if (IS_SKYLAKE(dev_priv)) dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) dev_priv->display.init_clock_gating = kabylake_init_clock_gating; @@ -8907,7 +9143,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val { int status; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); /* GEN6_PCODE_* are outside of the forcewake domain, we can * use te fw I915_READ variants to reduce the amount of work @@ -8949,12 +9185,12 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, - u32 mbox, u32 val) +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, + u32 mbox, u32 val, int timeout_us) { int status; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); /* GEN6_PCODE_* are outside of the forcewake domain, we can * use te fw I915_READ variants to reduce the amount of work @@ -8973,7 +9209,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, if (__intel_wait_for_register_fw(dev_priv, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, - 500, 0, NULL)) { + timeout_us, 0, NULL)) { DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n", val, mbox, __builtin_return_address(0)); return -ETIMEDOUT; @@ -9031,7 +9267,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, u32 status; int ret; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ &status) @@ -9046,7 +9282,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, ret = 0; goto out; } - ret = _wait_for(COND, timeout_base_ms * 1000, 10); + ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); if (!ret) goto out; @@ -9165,7 +9401,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) void intel_pm_setup(struct drm_i915_private *dev_priv) { - mutex_init(&dev_priv->rps.hw_lock); + mutex_init(&dev_priv->pcu_lock); INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, __intel_autoenable_gt_powersave); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 1b31ab002dae..0c57a33a0347 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -397,7 +397,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!i915.enable_psr) { + if (!i915_modparams.enable_psr) { DRM_DEBUG_KMS("PSR disable by flag\n"); return false; } @@ -943,8 +943,8 @@ void intel_psr_init(struct drm_i915_private *dev_priv) HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; /* Per platform default: all disabled. */ - if (i915.enable_psr == -1) - i915.enable_psr = 0; + if (i915_modparams.enable_psr == -1) + i915_modparams.enable_psr = 0; /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) @@ -958,11 +958,11 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; /* Override link_standby x link_off defaults */ - if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) { + if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { DRM_DEBUG_KMS("PSR: Forcing link standby\n"); dev_priv->psr.link_standby = true; } - if (i915.enable_psr == 3 && dev_priv->psr.link_standby) { + if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { DRM_DEBUG_KMS("PSR: Forcing main link off\n"); dev_priv->psr.link_standby = false; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cdf084ef5aae..0167692a96c2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -572,8 +572,16 @@ static int init_ring_common(struct intel_engine_cs *engine) return ret; } -static void reset_ring_common(struct intel_engine_cs *engine, - struct drm_i915_gem_request *request) +static struct drm_i915_gem_request *reset_prepare(struct intel_engine_cs *engine) +{ + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + + return i915_gem_find_active_request(engine); +} + +static void reset_ring(struct intel_engine_cs *engine, + struct drm_i915_gem_request *request) { /* Try to restore the logical GPU state to match the continuation * of the request queue. If we skip the context/PD restore, then @@ -629,6 +637,10 @@ static void reset_ring_common(struct intel_engine_cs *engine, } } +static void reset_finish(struct intel_engine_cs *engine) +{ +} + static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) { int ret; @@ -778,6 +790,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) return cs; } +static void cancel_requests(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->timeline->requests, link) { + GEM_BUG_ON(!request->global_seqno); + if (!i915_gem_request_completed(request)) + dma_fence_set_error(&request->fence, -EIO); + } + /* Remaining _unready_ requests will be nop'ed when submitted */ + + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + static void i9xx_submit_request(struct drm_i915_gem_request *request) { struct drm_i915_private *dev_priv = request->i915; @@ -1174,113 +1204,7 @@ i915_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static void cleanup_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - if (!dev_priv->status_page_dmah) - return; - drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); - engine->status_page.page_addr = NULL; -} - -static void cleanup_status_page(struct intel_engine_cs *engine) -{ - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - - vma = fetch_and_zero(&engine->status_page.vma); - if (!vma) - return; - - obj = vma->obj; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_unpin_map(obj); - __i915_gem_object_release_unless_active(obj); -} - -static int init_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - unsigned int flags; - void *vaddr; - int ret; - - obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate status page\n"); - return PTR_ERR(obj); - } - - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err; - - vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err; - } - - flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actualy map it). - */ - flags |= PIN_MAPPABLE; - ret = i915_vma_pin(vma, 0, 4096, flags); - if (ret) - goto err; - - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_unpin; - } - - engine->status_page.vma = vma; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma); - engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); - - DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", - engine->name, i915_ggtt_offset(vma)); - return 0; - -err_unpin: - i915_vma_unpin(vma); -err: - i915_gem_object_put(obj); - return ret; -} - -static int init_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - GEM_BUG_ON(engine->id != RCS); - - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - - engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(engine->status_page.page_addr, 0, PAGE_SIZE); - - return 0; -} int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915, @@ -1567,17 +1491,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) if (err) goto err; - if (HWS_NEEDS_PHYSICAL(engine->i915)) - err = init_phys_status_page(engine); - else - err = init_status_page(engine); - if (err) - goto err; - ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); if (IS_ERR(ring)) { err = PTR_ERR(ring); - goto err_hws; + goto err; } /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ @@ -1592,11 +1509,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) err_ring: intel_ring_free(ring); -err_hws: - if (HWS_NEEDS_PHYSICAL(engine->i915)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); err: intel_engine_cleanup_common(engine); return err; @@ -1615,11 +1527,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (HWS_NEEDS_PHYSICAL(dev_priv)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); - intel_engine_cleanup_common(engine); dev_priv->engine[engine->id] = NULL; @@ -1983,7 +1890,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *obj; int ret, i; - if (!i915.semaphores) + if (!i915_modparams.semaphores) return; if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { @@ -2083,7 +1990,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, i915_gem_object_put(obj); err: DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); - i915.semaphores = 0; + i915_modparams.semaphores = 0; } static void intel_ring_init_irq(struct drm_i915_private *dev_priv, @@ -2115,11 +2022,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv, static void i9xx_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = i9xx_submit_request; + engine->cancel_requests = cancel_requests; } static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = gen6_bsd_submit_request; + engine->cancel_requests = cancel_requests; } static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, @@ -2129,7 +2038,9 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, intel_ring_init_semaphores(dev_priv, engine); engine->init_hw = init_ring_common; - engine->reset_hw = reset_ring_common; + engine->reset.prepare = reset_prepare; + engine->reset.reset = reset_ring; + engine->reset.finish = reset_finish; engine->context_pin = intel_ring_context_pin; engine->context_unpin = intel_ring_context_unpin; @@ -2138,7 +2049,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->emit_breadcrumb = i9xx_emit_breadcrumb; engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; - if (i915.semaphores) { + if (i915_modparams.semaphores) { int num_rings; engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; @@ -2182,7 +2093,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) engine->emit_breadcrumb = gen8_render_emit_breadcrumb; engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz; engine->emit_flush = gen8_render_ring_flush; - if (i915.semaphores) { + if (i915_modparams.semaphores) { int num_rings; engine->semaphore.signal = gen8_rcs_signal; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6b2067f10824..d2db454e5351 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -185,6 +185,104 @@ struct i915_priolist { int priority; }; +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @irq_tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct irq_tasklet; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @port: execlist port states + * + * For each hardware ELSP (ExecList Submission Port) we keep + * track of the last request and the number of times we submitted + * that port to hw. We then count the number of times the hw reports + * a context completion or preemption. As only one context can + * be active on hw, we limit resubmission of context to port[0]. This + * is called Lite Restore, of the context. + */ + struct execlist_port { + /** + * @request_count: combined request and submission count + */ + struct drm_i915_gem_request *request_count; +#define EXECLIST_COUNT_BITS 2 +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) +#define port_set(p, packed) ((p)->request_count = (packed)) +#define port_isset(p) ((p)->request_count) +#define port_index(p, execlists) ((p) - (execlists)->port) + + /** + * @context_id: context ID for port + */ + GEM_DEBUG_DECL(u32 context_id); + +#define EXECLIST_MAX_PORTS 2 + } port[EXECLIST_MAX_PORTS]; + + /** + * @active: is the HW active? We consider the HW as active after + * submitting any context for execution and until we have seen the + * last context completion event. After that, we do not expect any + * more events until we submit, and so can park the HW. + * + * As we have a small number of different sources from which we feed + * the HW, we track the state of each inside a single bitfield. + */ + unsigned int active; +#define EXECLISTS_ACTIVE_USER 0 +#define EXECLISTS_ACTIVE_PREEMPT 1 + + /** + * @port_mask: number of execlist ports - 1 + */ + unsigned int port_mask; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root queue; + + /** + * @first: leftmost level in priority @queue + */ + struct rb_node *first; + + /** + * @fw_domains: forcewake domains for irq tasklet + */ + unsigned int fw_domains; + + /** + * @csb_head: context status buffer head + */ + unsigned int csb_head; + + /** + * @csb_use_mmio: access csb through mmio, instead of hwsp + */ + bool csb_use_mmio; +}; + #define INTEL_ENGINE_CS_MAX_NAME 8 struct intel_engine_cs { @@ -266,6 +364,13 @@ struct intel_engine_cs { void (*reset_hw)(struct intel_engine_cs *engine, struct drm_i915_gem_request *req); + struct { + struct drm_i915_gem_request *(*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, + struct drm_i915_gem_request *rq); + void (*finish)(struct intel_engine_cs *engine); + } reset; + void (*set_default_submission)(struct intel_engine_cs *engine); struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, @@ -307,6 +412,14 @@ struct intel_engine_cs { void (*schedule)(struct drm_i915_gem_request *request, int priority); + /* + * Cancel all requests on the hardware, or queued for execution. + * This should only cancel the ready requests that have been + * submitted to the engine (via the engine->submit_request callback). + * This is called when marking the device as wedged. + */ + void (*cancel_requests)(struct intel_engine_cs *engine); + /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. * However, the up-to-date seqno is not always required and the last @@ -373,25 +486,8 @@ struct intel_engine_cs { u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); } semaphore; - /* Execlists */ - struct tasklet_struct irq_tasklet; - struct i915_priolist default_priolist; - bool no_priolist; - struct execlist_port { - struct drm_i915_gem_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, e) ((p) - (e)->execlist_port) - GEM_DEBUG_DECL(u32 context_id); - } execlist_port[2]; - struct rb_root execlist_queue; - struct rb_node *execlist_first; - unsigned int fw_domains; + struct work_struct reset_work; + struct intel_engine_execlists execlists; /* Contexts are pinned whilst they are active on the GPU. The last * context executed remains active whilst the GPU is idle - the @@ -417,6 +513,10 @@ struct intel_engine_cs { struct intel_engine_hangcheck hangcheck; + struct hrtimer fpreempt_timer; + struct work_struct fpreempt_work; + bool fpreempt_stalled; + bool needs_cmd_parser; /* @@ -444,6 +544,46 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; +static inline void +execlists_set_active(struct intel_engine_execlists *execlists, + unsigned int bit) +{ + __set_bit(bit, (unsigned long *)&execlists->active); +} + +static inline void +execlists_clear_active(struct intel_engine_execlists *execlists, + unsigned int bit) +{ + __clear_bit(bit, (unsigned long *)&execlists->active); +} + +static inline bool +execlists_is_active(const struct intel_engine_execlists *execlists, + unsigned int bit) +{ + return test_bit(bit, (unsigned long *)&execlists->active); +} + +static inline unsigned int +execlists_num_ports(const struct intel_engine_execlists * const execlists) +{ + return execlists->port_mask + 1; +} + +static inline void +execlists_port_complete(struct intel_engine_execlists * const execlists, + struct execlist_port * const port) +{ + const unsigned int m = execlists->port_mask; + + GEM_BUG_ON(port_index(port, execlists) != 0); + GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); + + memmove(port, port + 1, m * sizeof(struct execlist_port)); + memset(port + m, 0, sizeof(struct execlist_port)); +} + static inline unsigned int intel_engine_flag(const struct intel_engine_cs *engine) { @@ -494,9 +634,17 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) */ #define I915_GEM_HWS_INDEX 0x30 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) -#define I915_GEM_HWS_SCRATCH_INDEX 0x40 +#define I915_GEM_HWS_PID_INDEX 0x40 +#define I915_GEM_HWS_PID_ADDR (I915_GEM_HWS_PID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_CID_INDEX 0x48 +#define I915_GEM_HWS_CID_ADDR (I915_GEM_HWS_CID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_SCRATCH_INDEX 0x50 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_HWS_CSB_BUF0_INDEX 0x10 +#define I915_HWS_CSB_WRITE_INDEX 0x1f +#define CNL_HWS_CSB_WRITE_INDEX 0x2f + struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size); int intel_ring_pin(struct intel_ring *ring, @@ -736,16 +884,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv); void intel_engines_mark_idle(struct drm_i915_private *i915); void intel_engines_reset_default_submission(struct drm_i915_private *i915); -static inline bool -__intel_engine_can_store_dword(unsigned int gen, unsigned int class) -{ - if (gen <= 2) - return false; /* uses physical not virtual addresses */ - - if (gen == 6 && class == VIDEO_DECODE_CLASS) - return false; /* b0rked */ - - return true; -} +bool intel_engine_can_store_dword(struct intel_engine_cs *engine); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 49577eba8e7e..9c81dd3a71e3 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC5\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -617,8 +622,12 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC6\n"); - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } void skl_disable_dc6(struct drm_i915_private *dev_priv) @@ -785,7 +794,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : PUNIT_PWRGT_PWR_GATE(power_well_id); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); #define COND \ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) @@ -806,7 +815,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, #undef COND out: - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void vlv_power_well_enable(struct drm_i915_private *dev_priv, @@ -833,7 +842,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, mask = PUNIT_PWRGT_MASK(power_well_id); ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; /* @@ -852,7 +861,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; WARN_ON(ctrl != state); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return enabled; } @@ -1364,7 +1373,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, bool enabled; u32 state, ctrl; - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); /* @@ -1381,7 +1390,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); WARN_ON(ctrl << 16 != state); - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); return enabled; } @@ -1396,7 +1405,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - mutex_lock(&dev_priv->rps.hw_lock); + mutex_lock(&dev_priv->pcu_lock); #define COND \ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) @@ -1417,7 +1426,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, #undef COND out: - mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->pcu_lock); } static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, @@ -1786,6 +1795,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ @@ -1833,6 +1843,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ BIT_ULL(POWER_DOMAIN_INIT)) static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { @@ -2413,7 +2424,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, mask = 0; } - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) max_dc = 0; if (enable_dc >= 0 && enable_dc <= max_dc) { @@ -2471,10 +2482,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, - i915.disable_power_well); - dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, - i915.enable_dc); + i915_modparams.disable_power_well = + sanitize_disable_power_well_option(dev_priv, + i915_modparams.disable_power_well); + dev_priv->csr.allowed_dc_mask = + get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); @@ -2535,7 +2547,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv) intel_display_set_init_power(dev_priv, true); /* Remove the refcount we took to keep power well support disabled. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); /* @@ -2975,7 +2987,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) /* For now, we need the power well to be always enabled. */ intel_display_set_init_power(dev_priv, true); /* Disable power support if the user asked so. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(dev_priv); power_domains->initializing = false; @@ -2994,7 +3006,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) * Even if power well support was disabled we still want to disable * power wells while we are system suspended. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); if (IS_CANNONLAKE(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 7d971cb56116..75c872bb8cc9 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -81,7 +81,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr) { u32 val = 0; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); mutex_lock(&dev_priv->sb_lock); vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, @@ -95,7 +95,7 @@ int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val) { int err; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); mutex_lock(&dev_priv->sb_lock); err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, @@ -125,7 +125,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) { u32 val = 0; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); mutex_lock(&dev_priv->sb_lock); vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 524933b01483..64dcd1d73652 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,6 +41,10 @@ #include #include "i915_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + static bool format_is_yuv(uint32_t format) { @@ -225,7 +229,69 @@ void intel_pipe_update_end(struct intel_crtc *crtc) #endif } -static void +static void pv_update_plane_reg(struct intel_plane *plane, + u32 stride, uint32_t src_w, uint32_t src_h, + uint32_t crtc_w, uint32_t crtc_h, u32 aux_stride, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int i; + struct pv_plane_update tmp_plane; + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + u32 __iomem *pv_plane = (u32 *)&(dev_priv->shared_page->pv_plane); + + memset(&tmp_plane, 0, sizeof(struct pv_plane_update)); + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { + tmp_plane.flags |= PLANE_COLOR_CTL_BIT; + tmp_plane.plane_color_ctl = PLANE_COLOR_PIPE_GAMMA_ENABLE | + PLANE_COLOR_PIPE_CSC_ENABLE | + PLANE_COLOR_PLANE_GAMMA_DISABLE; + } + + if (plane_state->ckey.flags) { + tmp_plane.flags |= PLANE_KEY_BIT; + tmp_plane.plane_key_val = plane_state->ckey.min_value; + tmp_plane.plane_key_max = plane_state->ckey.max_value; + tmp_plane.plane_key_msk = plane_state->ckey.channel_mask; + } + + tmp_plane.plane_offset = (y << 16) | x; + tmp_plane.plane_stride = stride; + tmp_plane.plane_size = (src_h << 16) | src_w; + tmp_plane.plane_aux_dist = + (plane_state->aux.offset - plane_state->main.offset) | + aux_stride; + tmp_plane.plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + + /* program plane scaler */ + if (plane_state->scaler_id >= 0) { + tmp_plane.flags |= PLANE_SCALER_BIT; + tmp_plane.ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane->id) | + crtc_state->scaler_state.scalers[plane_state->scaler_id].mode; + tmp_plane.ps_pwr_gate = 0; + tmp_plane.ps_win_ps = + (plane_state->base.dst.x1 << 16) | plane_state->base.dst.y1; + tmp_plane.ps_win_sz = ((crtc_w + 1) << 16) | (crtc_h + 1); + tmp_plane.plane_pos = 0; + } else { + tmp_plane.plane_pos = + (plane_state->base.dst.y1 << 16) | plane_state->base.dst.x1; + } + + tmp_plane.plane_ctl = plane_state->ctl; + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_update) / 4; i++) + writel(*((u32 *)(&tmp_plane) + i), pv_plane + i); + I915_WRITE_FW(PLANE_SURF(plane->pipe, plane->id), + intel_plane_ggtt_offset(plane_state) + plane_state->main.offset); + spin_unlock(&dev_priv->shared_page_lock); +} + +void skl_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -249,6 +315,11 @@ skl_update_plane(struct intel_plane *plane, uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; +#endif /* Sizes are 0 based */ src_w--; @@ -256,8 +327,41 @@ skl_update_plane(struct intel_plane *plane, crtc_w--; crtc_h--; + if (intel_vgpu_active(dev_priv) && + i915_modparams.enable_pvmmio & PVMMIO_PLANE_UPDATE) { + pv_update_plane_reg(plane, stride, src_w, src_h, + crtc_w, crtc_h, aux_stride, crtc_state, plane_state); + return; + } + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + dom0_regs->plane_keyval = key->min_value; + dom0_regs->plane_keymax = key->max_value; + dom0_regs->plane_keymsk = key->channel_mask; + dom0_regs->plane_offset = (y << 16) | x; + dom0_regs->plane_stride = stride; + dom0_regs->plane_size = (src_h << 16) | src_w; + dom0_regs->plane_aux_dist = + (plane_state->aux.offset - surf_addr) | aux_stride; + dom0_regs->plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + dom0_regs->plane_pos = (crtc_y << 16) | crtc_x; + dom0_regs->plane_ctl = plane_ctl; + dom0_regs->plane_surf = + intel_plane_ggtt_offset(plane_state) + + surf_addr; + + if (plane_state->scaler_id >= 0) + DRM_ERROR("GVT not support plane scaling yet\n"); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + return; + } +#endif + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), PLANE_COLOR_PIPE_GAMMA_ENABLE | @@ -306,13 +410,24 @@ skl_update_plane(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void +void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + struct intel_gvt *gvt = dev_priv->gvt; + + if (gvt && gvt->pipe_info[pipe].plane_owner[plane_id]) { + struct intel_dom0_plane_regs *dom0_regs = + &gvt->pipe_info[pipe].dom0_regs[plane_id]; + dom0_regs->plane_ctl = 0; + dom0_regs->plane_surf = 0; + return; + } +#endif spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -324,45 +439,108 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +bool +skl_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static void -chv_update_csc(struct intel_plane *plane, uint32_t format) +chv_update_csc(const struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; enum plane_id plane_id = plane->id; /* Seems RGB data bypasses the CSC always */ - if (!format_is_yuv(format)) + if (!format_is_yuv(fb->format->format)) return; /* - * BT.601 limited range YCbCr -> full range RGB + * BT.601 full range YCbCr -> full range RGB * - * |r| | 6537 4769 0| |cr | - * |g| = |-3330 4769 -1605| x |y-64| - * |b| | 0 4769 8263| |cb | + * |r| | 5743 4096 0| |cr| + * |g| = |-2925 4096 -1410| x |y | + * |b| | 0 4096 7258| |cb| * - * Cb and Cr apparently come in as signed already, so no - * need for any offset. For Y we need to remove the offset. + * Cb and Cr apparently come in as signed already, + * and we get full range data in on account of CLRC0/1 */ - I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64)); + I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537)); - I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0)); - I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769)); - I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0)); - I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263)); + I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4096) | SPCSC_C0(5743)); + I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-2925) | SPCSC_C0(0)); + I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1410) | SPCSC_C0(4096)); + I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4096) | SPCSC_C0(0)); + I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(7258)); - I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64)); - I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); - I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448)); + I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0)); + I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); + I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); } +#define SIN_0 0 +#define COS_0 1 + +static void +vlv_update_clrc(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + int contrast, brightness, sh_scale, sh_sin, sh_cos; + + if (format_is_yuv(fb->format->format)) { + /* + * Expand limited range to full range: + * Contrast is applied first and is used to expand Y range. + * Brightness is applied second and is used to remove the + * offset from Y. Saturation/hue is used to expand CbCr range. + */ + contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16); + brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16); + sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128); + sh_sin = SIN_0 * sh_scale; + sh_cos = COS_0 * sh_scale; + } else { + /* Pass-through everything. */ + contrast = 1 << 6; + brightness = 0; + sh_scale = 1 << 7; + sh_sin = SIN_0 * sh_scale; + sh_cos = COS_0 * sh_scale; + } + + /* FIXME these register are single buffered :( */ + I915_WRITE_FW(SPCLRC0(pipe, plane_id), + SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness)); + I915_WRITE_FW(SPCLRC1(pipe, plane_id), + SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); +} + static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -456,8 +634,10 @@ vlv_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + vlv_update_clrc(plane_state); + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) - chv_update_csc(plane, fb->format->format); + chv_update_csc(plane_state); if (key->flags) { I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value); @@ -501,6 +681,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +vlv_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -641,6 +841,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static bool +ivb_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -772,7 +991,26 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static int +static bool +g4x_plane_get_hw_state(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = plane->pipe; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + + ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE; + + intel_display_power_put(dev_priv, power_domain); + + return ret; +} + +int intel_check_sprite_plane(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) @@ -1227,6 +1465,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); @@ -1237,6 +1476,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); @@ -1247,6 +1487,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = vlv_update_plane; intel_plane->disable_plane = vlv_disable_plane; + intel_plane->get_hw_state = vlv_plane_get_hw_state; plane_formats = vlv_plane_formats; num_plane_formats = ARRAY_SIZE(vlv_plane_formats); @@ -1262,6 +1503,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; + intel_plane->get_hw_state = ivb_plane_get_hw_state; plane_formats = snb_plane_formats; num_plane_formats = ARRAY_SIZE(snb_plane_formats); @@ -1272,6 +1514,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->update_plane = g4x_update_plane; intel_plane->disable_plane = g4x_disable_plane; + intel_plane->get_hw_state = g4x_plane_get_hw_state; modifiers = i9xx_plane_format_modifiers; if (IS_GEN6(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 0178ba42a0e5..901854007664 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -63,35 +63,35 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) { if (!HAS_GUC(dev_priv)) { - if (i915.enable_guc_loading > 0 || - i915.enable_guc_submission > 0) + if (i915_modparams.enable_guc_loading > 0 || + i915_modparams.enable_guc_submission > 0) DRM_INFO("Ignoring GuC options, no hardware\n"); - i915.enable_guc_loading = 0; - i915.enable_guc_submission = 0; + i915_modparams.enable_guc_loading = 0; + i915_modparams.enable_guc_submission = 0; return; } /* A negative value means "use platform default" */ - if (i915.enable_guc_loading < 0) - i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv); + if (i915_modparams.enable_guc_loading < 0) + i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv); /* Verify firmware version */ - if (i915.enable_guc_loading) { + if (i915_modparams.enable_guc_loading) { if (HAS_HUC_UCODE(dev_priv)) intel_huc_select_fw(&dev_priv->huc); if (intel_guc_select_fw(&dev_priv->guc)) - i915.enable_guc_loading = 0; + i915_modparams.enable_guc_loading = 0; } /* Can't enable guc submission without guc loaded */ - if (!i915.enable_guc_loading) - i915.enable_guc_submission = 0; + if (!i915_modparams.enable_guc_loading) + i915_modparams.enable_guc_submission = 0; /* A negative value means "use platform default" */ - if (i915.enable_guc_submission < 0) - i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv); + if (i915_modparams.enable_guc_submission < 0) + i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv); } static void gen8_guc_raise_irq(struct intel_guc *guc) @@ -290,7 +290,7 @@ static void guc_init_send_regs(struct intel_guc *guc) static void guc_capture_load_err_log(struct intel_guc *guc) { - if (!guc->log.vma || i915.guc_log_level < 0) + if (!guc->log.vma || i915_modparams.guc_log_level < 0) return; if (!guc->load_err_log) @@ -333,7 +333,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) struct intel_guc *guc = &dev_priv->guc; int ret, attempts; - if (!i915.enable_guc_loading) + if (!i915_modparams.enable_guc_loading) return 0; guc_disable_communication(guc); @@ -342,7 +342,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -391,8 +391,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) goto err_log_capture; intel_guc_auth_huc(dev_priv); - if (i915.enable_guc_submission) { - if (i915.guc_log_level >= 0) + if (i915_modparams.enable_guc_submission) { + if (i915_modparams.guc_log_level >= 0) gen9_enable_guc_interrupts(dev_priv); ret = i915_guc_submission_enable(dev_priv); @@ -417,23 +417,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) err_log_capture: guc_capture_load_err_log(guc); err_submission: - if (i915.enable_guc_submission) + if (i915_modparams.enable_guc_submission) i915_guc_submission_fini(dev_priv); err_guc: i915_ggtt_disable_guc(dev_priv); DRM_ERROR("GuC init failed\n"); - if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1) + if (i915_modparams.enable_guc_loading > 1 || + i915_modparams.enable_guc_submission > 1) ret = -EIO; else ret = 0; - if (i915.enable_guc_submission) { - i915.enable_guc_submission = 0; + if (i915_modparams.enable_guc_submission) { + i915_modparams.enable_guc_submission = 0; DRM_NOTE("Falling back from GuC submission to execlist mode\n"); } - i915.enable_guc_loading = 0; + i915_modparams.enable_guc_loading = 0; DRM_NOTE("GuC firmware loading disabled\n"); return ret; @@ -443,15 +444,15 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv) { guc_free_load_err_log(&dev_priv->guc); - if (!i915.enable_guc_loading) + if (!i915_modparams.enable_guc_loading) return; - if (i915.enable_guc_submission) + if (i915_modparams.enable_guc_submission) i915_guc_submission_disable(dev_priv); guc_disable_communication(&dev_priv->guc); - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { gen9_disable_guc_interrupts(dev_priv); i915_guc_submission_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 22ae52b17b0f..7703c9ad6511 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -52,17 +52,6 @@ struct drm_i915_gem_request; * GuC). The subsequent pages of the client object constitute the work * queue (a circular array of work items), again described in the process * descriptor. Work queue pages are mapped momentarily as required. - * - * We also keep a few statistics on failures. Ideally, these should all - * be zero! - * no_wq_space: times that the submission pre-check found no space was - * available in the work queue (note, the queue is shared, - * not per-engine). It is OK for this to be nonzero, but - * it should not be huge! - * b_fail: failed to ring the doorbell. This should never happen, unless - * somehow the hardware misbehaves, or maybe if the GuC firmware - * crashes? We probably need to reset the GPU to recover. - * retcode: errno from last guc_submit() */ struct i915_guc_client { struct i915_vma *vma; @@ -77,15 +66,8 @@ struct i915_guc_client { u16 doorbell_id; unsigned long doorbell_offset; - u32 doorbell_cookie; spinlock_t wq_lock; - uint32_t wq_offset; - uint32_t wq_size; - uint32_t wq_tail; - uint32_t wq_rsvd; - uint32_t no_wq_space; - /* Per-engine counts of GuC submissions */ uint64_t submissions[I915_NUM_ENGINES]; }; @@ -250,8 +232,6 @@ u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); /* i915_guc_submission.c */ int i915_guc_submission_init(struct drm_i915_private *dev_priv); int i915_guc_submission_enable(struct drm_i915_private *dev_priv); -int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request); void i915_guc_submission_disable(struct drm_i915_private *dev_priv); void i915_guc_submission_fini(struct drm_i915_private *dev_priv); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1d7b879cc68c..4653b0e1aa79 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -434,9 +434,16 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv) i915_check_and_clear_faults(dev_priv); } +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) +{ + iosf_mbi_register_pmic_bus_access_notifier( + &dev_priv->uncore.pmic_bus_access_nb); +} + void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); + i915_modparams.enable_rc6 = + sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ intel_sanitize_gt_powersave(dev_priv); @@ -489,6 +496,57 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +/** + * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace + * @dev_priv: i915 device instance + * + * This function is a wrapper around intel_uncore_forcewake_get() to acquire + * the GT powerwell and in the process disable our debugging for the + * duration of userspace's bypass. + */ +void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->uncore.lock); + if (!dev_priv->uncore.user_forcewake.count++) { + intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); + + /* Save and disable mmio debugging for the user bypass */ + dev_priv->uncore.user_forcewake.saved_mmio_check = + dev_priv->uncore.unclaimed_mmio_check; + dev_priv->uncore.user_forcewake.saved_mmio_debug = + i915_modparams.mmio_debug; + + dev_priv->uncore.unclaimed_mmio_check = 0; + i915_modparams.mmio_debug = 0; + } + spin_unlock_irq(&dev_priv->uncore.lock); +} + +/** + * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace + * @dev_priv: i915 device instance + * + * This function complements intel_uncore_forcewake_user_get() and releases + * the GT powerwell taken on behalf of the userspace bypass. + */ +void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->uncore.lock); + if (!--dev_priv->uncore.user_forcewake.count) { + if (intel_uncore_unclaimed_mmio(dev_priv)) + dev_info(dev_priv->drm.dev, + "Invalid mmio detected during user access\n"); + + dev_priv->uncore.unclaimed_mmio_check = + dev_priv->uncore.user_forcewake.saved_mmio_check; + i915_modparams.mmio_debug = + dev_priv->uncore.user_forcewake.saved_mmio_debug; + + intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); + } + spin_unlock_irq(&dev_priv->uncore.lock); +} + /** * intel_uncore_forcewake_get__locked - grab forcewake domain references * @dev_priv: i915 device instance @@ -790,7 +848,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, "Unclaimed %s register 0x%x\n", read ? "read from" : "write to", i915_mmio_reg_offset(reg))) - i915.mmio_debug--; /* Only report the first N failures */ + /* Only report the first N failures */ + i915_modparams.mmio_debug--; } static inline void @@ -799,7 +858,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, const bool read, const bool before) { - if (likely(!i915.mmio_debug)) + if (likely(!i915_modparams.mmio_debug)) return; __unclaimed_reg_debug(dev_priv, reg, read, before); @@ -1171,8 +1230,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, * bus, which will be busy after this notification, leading to: * "render: timed out waiting for forcewake ack request." * errors. + * + * The notifier is unregistered during intel_runtime_suspend(), + * so it's ok to access the HW here without holding a RPM + * wake reference -> disable wakeref asserts for the time of + * the access. */ + disable_rpm_wakeref_asserts(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + enable_rpm_wakeref_asserts(dev_priv); break; case MBI_PMIC_BUS_ACCESS_END: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -1241,72 +1307,65 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv) intel_uncore_forcewake_reset(dev_priv, false); } -#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) - -static const struct register_whitelist { - i915_reg_t offset_ldw, offset_udw; - uint32_t size; - /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ - uint32_t gen_bitmask; -} whitelist[] = { - { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), - .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), - .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, -}; +static const struct reg_whitelist { + i915_reg_t offset_ldw; + i915_reg_t offset_udw; + u16 gen_mask; + u8 size; +} reg_read_whitelist[] = { { + .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), + .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), + .gen_mask = INTEL_GEN_MASK(4, 10), + .size = 8 +} }; int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; - struct register_whitelist const *entry = whitelist; - unsigned size; - i915_reg_t offset_ldw, offset_udw; - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) + struct reg_whitelist const *entry; + unsigned int flags; + int remain; + int ret = 0; + + entry = reg_read_whitelist; + remain = ARRAY_SIZE(reg_read_whitelist); + while (remain) { + u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); + + GEM_BUG_ON(!is_power_of_2(entry->size)); + GEM_BUG_ON(entry->size > 8); + GEM_BUG_ON(entry_offset & (entry->size - 1)); + + if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && + entry_offset == (reg->offset & -entry->size)) break; + entry++; + remain--; } - if (i == ARRAY_SIZE(whitelist)) + if (!remain) return -EINVAL; - /* We use the low bits to encode extra flags as the register should - * be naturally aligned (and those that are not so aligned merely - * limit the available flags for that register). - */ - offset_ldw = entry->offset_ldw; - offset_udw = entry->offset_udw; - size = entry->size; - size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); + flags = reg->offset & (entry->size - 1); intel_runtime_pm_get(dev_priv); - - switch (size) { - case 8 | 1: - reg->val = I915_READ64_2x32(offset_ldw, offset_udw); - break; - case 8: - reg->val = I915_READ64(offset_ldw); - break; - case 4: - reg->val = I915_READ(offset_ldw); - break; - case 2: - reg->val = I915_READ16(offset_ldw); - break; - case 1: - reg->val = I915_READ8(offset_ldw); - break; - default: + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = I915_READ64_2x32(entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = I915_READ64(entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = I915_READ(entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = I915_READ16(entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = I915_READ8(entry->offset_ldw); + else ret = -EINVAL; - goto out; - } - -out: intel_runtime_pm_put(dev_priv); + return ret; } @@ -1567,12 +1626,14 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, } /** - * intel_wait_for_register - wait until register matches expected state + * __intel_wait_for_register - wait until register matches expected state * @dev_priv: the i915 device * @reg: the register to read * @mask: mask to apply to register value * @value: expected value - * @timeout_ms: timeout in millisecond + * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait + * @slow_timeout_ms: slow timeout in millisecond + * @out_value: optional placeholder to hold registry value * * This routine waits until the target register @reg contains the expected * @value after applying the @mask, i.e. it waits until :: @@ -1583,14 +1644,17 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. */ -int intel_wait_for_register(struct drm_i915_private *dev_priv, +int __intel_wait_for_register(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, u32 value, - unsigned int timeout_ms) + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, + u32 *out_value) { unsigned fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + u32 reg_value; int ret; might_sleep(); @@ -1600,14 +1664,18 @@ int intel_wait_for_register(struct drm_i915_private *dev_priv, ret = __intel_wait_for_register_fw(dev_priv, reg, mask, value, - 2, 0, NULL); + fast_timeout_us, 0, ®_value); intel_uncore_forcewake_put__locked(dev_priv, fw); spin_unlock_irq(&dev_priv->uncore.lock); if (ret) - ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, - timeout_ms); + ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), + (reg_value & mask) == value, + slow_timeout_ms * 1000, 10, 1000); + + if (out_value) + *out_value = reg_value; return ret; } @@ -1662,7 +1730,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { - if (!i915.reset) + if (!i915_modparams.reset) return NULL; if (INTEL_INFO(dev_priv)->gen >= 8) @@ -1722,7 +1790,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv) { return (dev_priv->info.has_reset_engine && !dev_priv->guc.execbuf_client && - i915.reset >= 2); + i915_modparams.reset >= 2); } int intel_guc_reset(struct drm_i915_private *dev_priv) @@ -1747,15 +1815,15 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) { - if (unlikely(i915.mmio_debug || + if (unlikely(i915_modparams.mmio_debug || dev_priv->uncore.unclaimed_mmio_check <= 0)) return false; if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { DRM_DEBUG("Unclaimed register detected, " "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); - i915.mmio_debug++; + "Please use i915_modparams.mmio_debug=N for more information.\n"); + i915_modparams.mmio_debug++; dev_priv->uncore.unclaimed_mmio_check--; return true; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 5f90278da461..8ed41edc8d5b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -102,6 +102,13 @@ struct intel_uncore { i915_reg_t reg_ack; } fw_domain[FW_DOMAIN_ID_COUNT]; + struct { + unsigned int count; + + int saved_mmio_check; + int saved_mmio_debug; + } user_forcewake; + int unclaimed_mmio_check; }; @@ -121,6 +128,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv void intel_uncore_fini(struct drm_i915_private *dev_priv); void intel_uncore_suspend(struct drm_i915_private *dev_priv); void intel_uncore_resume_early(struct drm_i915_private *dev_priv); +void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv); u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); @@ -144,11 +152,28 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, enum forcewake_domains domains); +void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv); +void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv); + + +int __intel_wait_for_register(struct drm_i915_private *dev_priv, + i915_reg_t reg, + u32 mask, + u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, + u32 *out_value); + +static inline int intel_wait_for_register(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, u32 value, - unsigned int timeout_ms); + unsigned int timeout_ms) +{ + return __intel_wait_for_register(dev_priv, reg, mask, value, 2, + timeout_ms, NULL); +} int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 02e52a146ed8..1f7cad2a972f 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -342,7 +342,7 @@ static int igt_global_reset(void *arg) mutex_lock(&i915->drm.struct_mutex); reset_count = i915_reset_count(&i915->gpu_error); - i915_reset(i915, I915_RESET_QUIET); + i915_reset(i915); if (i915_reset_count(&i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); @@ -378,7 +378,7 @@ static int igt_reset_engine(void *arg) reset_engine_count = i915_reset_engine_count(&i915->gpu_error, engine); - err = i915_reset_engine(engine, I915_RESET_QUIET); + err = i915_reset_engine(engine, NULL); if (err) { pr_err("i915_reset_engine failed\n"); break; @@ -511,7 +511,7 @@ static int igt_reset_active_engines(void *arg) set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags); do { - err = i915_reset_engine(engine, I915_RESET_QUIET); + err = i915_reset_engine(engine, NULL); if (err) { pr_err("i915_reset_engine(%s) failed, err=%d\n", engine->name, err); @@ -718,7 +718,7 @@ static int igt_reset_queue(void *arg) reset_count = fake_hangcheck(prev); - i915_reset(i915, I915_RESET_QUIET); + i915_reset(i915); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); @@ -819,7 +819,7 @@ static int igt_handle_error(void *arg) engine->hangcheck.stalled = true; engine->hangcheck.seqno = intel_engine_get_seqno(engine); - i915_handle_error(i915, intel_engine_flag(engine), "%s", __func__); + i915_handle_error(i915, intel_engine_flag(engine), 0, NULL); xchg(&i915->gpu_error.first_error, error); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 53e0b24beda6..d976391dfa31 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { drm_crtc_vblank_on(crtc); +} +static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc)); @@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .mode_set_nofb = ipu_crtc_mode_set_nofb, .atomic_check = ipu_crtc_atomic_check, .atomic_begin = ipu_crtc_atomic_begin, + .atomic_flush = ipu_crtc_atomic_flush, .atomic_disable = ipu_crtc_atomic_disable, .atomic_enable = ipu_crtc_atomic_enable, }; diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 5155f0179b61..05520202c967 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -36,6 +36,7 @@ #include "meson_venc.h" #include "meson_vpp.h" #include "meson_viu.h" +#include "meson_canvas.h" #include "meson_registers.h" /* CRTC definition */ @@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv) } else meson_vpp_disable_interlace_vscaler_osd1(priv); + meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, + priv->viu.osd1_addr, priv->viu.osd1_stride, + priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR); + /* Enable OSD1 */ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, priv->io_base + _REG(VPP_MISC)); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 7742c7d81ed8..4ad8223c60ea 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -180,40 +180,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); regs = devm_ioremap_resource(dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto free_drm; + } priv->io_base = regs; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!regs) - return -EADDRNOTAVAIL; + if (!regs) { + ret = -EADDRNOTAVAIL; + goto free_drm; + } priv->hhi = devm_regmap_init_mmio(dev, regs, &meson_regmap_config); if (IS_ERR(priv->hhi)) { dev_err(&pdev->dev, "Couldn't create the HHI regmap\n"); - return PTR_ERR(priv->hhi); + ret = PTR_ERR(priv->hhi); + goto free_drm; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!regs) - return -EADDRNOTAVAIL; + if (!regs) { + ret = -EADDRNOTAVAIL; + goto free_drm; + } priv->dmc = devm_regmap_init_mmio(dev, regs, &meson_regmap_config); if (IS_ERR(priv->dmc)) { dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); - return PTR_ERR(priv->dmc); + ret = PTR_ERR(priv->dmc); + goto free_drm; } priv->vsync_irq = platform_get_irq(pdev, 0); - drm_vblank_init(drm, 1); + ret = drm_vblank_init(drm, 1); + if (ret) + goto free_drm; + drm_mode_config_init(drm); drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 5e8b392b9d1f..8450d6ac8c9b 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -43,6 +43,9 @@ struct meson_drm { bool osd1_commit; uint32_t osd1_ctrl_stat; uint32_t osd1_blk0_cfg[5]; + uint32_t osd1_addr; + uint32_t osd1_stride; + uint32_t osd1_height; } viu; struct { diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 17e96fa47868..0b6011b8d632 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane, /* Update Canvas with buffer address */ gem = drm_fb_cma_get_gem_obj(fb, 0); - meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, - gem->paddr, fb->pitches[0], - fb->height, MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR); + priv->viu.osd1_addr = gem->paddr; + priv->viu.osd1_stride = fb->pitches[0]; + priv->viu.osd1_height = fb->height; spin_unlock_irqrestore(&priv->drm->event_lock, flags); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index deaf869374ea..a9a0b56f1fbc 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -740,7 +740,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( switch (mipi_fmt) { case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666_PACKED: - case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; default: return CMD_DST_FORMAT_RGB888; } diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c index fe15aa64086f..71fe60e5f01f 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c @@ -698,7 +698,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, val &= div_mask(width); return divider_recalc_rate(hw, parent_rate, val, NULL, - postdiv->flags); + postdiv->flags, width); } static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index c178563fcd4d..456622b46335 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); - ret = PTR_ERR(fb); - goto fail; + return PTR_ERR(fb); } bo = msm_framebuffer_bo(fb, 0); @@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fail_unlock: mutex_unlock(&dev->struct_mutex); -fail: - - if (ret) { - if (fb) - drm_framebuffer_remove(fb); - } - + drm_framebuffer_remove(fb); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ea5bb0e1632c..f2df718af370 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -93,14 +93,17 @@ static struct page **get_pages(struct drm_gem_object *obj) return p; } + msm_obj->pages = p; + msm_obj->sgt = drm_prime_pages_to_sg(p, npages); if (IS_ERR(msm_obj->sgt)) { + void *ptr = ERR_CAST(msm_obj->sgt); + dev_err(dev->dev, "failed to allocate sgt\n"); - return ERR_CAST(msm_obj->sgt); + msm_obj->sgt = NULL; + return ptr; } - msm_obj->pages = p; - /* For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent: */ @@ -129,14 +132,19 @@ static void put_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { - /* For non-cached buffers, ensure the new pages are clean - * because display controller, GPU, etc. are not coherent: - */ - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); - sg_free_table(msm_obj->sgt); - kfree(msm_obj->sgt); + if (msm_obj->sgt) { + /* For non-cached buffers, ensure the new + * pages are clean because display controller, + * GPU, etc. are not coherent: + */ + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, + DMA_BIDIRECTIONAL); + + sg_free_table(msm_obj->sgt); + kfree(msm_obj->sgt); + } if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 5b9d549aa791..e7926da59214 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) nouveau_display(dev)->init = nv04_display_init; nouveau_display(dev)->fini = nv04_display_fini; + /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ + dev->driver->driver_features &= ~DRIVER_ATOMIC; + nouveau_hw_save_vga_fonts(dev, 1); nv04_crtc_create(dev, 0); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 380f340204e8..408b955e5c39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div = 1025; u32 val; @@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div = 1025; u32 val = (bd->props.brightness * div) / 100; @@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div, val; div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); @@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div, val; div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); @@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) return -ENODEV; } - if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) + if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) return 0; if (drm->client.device.info.chipset <= 0xa0 || @@ -267,15 +267,17 @@ nouveau_backlight_init(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + INIT_LIST_HEAD(&drm->bl_connectors); if (apple_gmux_present()) { NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); return 0; } - INIT_LIST_HEAD(&drm->bl_connectors); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && connector->connector_type != DRM_MODE_CONNECTOR_eDP) continue; @@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev) break; } } - + drm_connector_list_iter_end(&conn_iter); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 70d8e0d69ad5..430830d63a33 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } - ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) - return conn_status; + /* Outputs are only polled while runtime active, so acquiring a + * runtime PM ref here is unnecessary (and would deadlock upon + * runtime suspend because it waits for polling to finish). + */ + if (!drm_kms_helper_is_poll_worker()) { + ret = pm_runtime_get_sync(connector->dev->dev); + if (ret < 0 && ret != -EACCES) + return conn_status; + } nv_encoder = nouveau_connector_ddc_detect(connector); if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { @@ -647,8 +653,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return conn_status; } @@ -1200,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_connector *nv_connector = NULL; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int type, ret = 0; bool dummy; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { nv_connector = nouveau_connector(connector); - if (nv_connector->index == index) + if (nv_connector->index == index) { + drm_connector_list_iter_end(&conn_iter); return connector; + } } + drm_connector_list_iter_end(&conn_iter); nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); if (!nv_connector) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index a4d1a059bd3d..dc7454e7f19a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -33,6 +33,7 @@ #include #include #include "nouveau_crtc.h" +#include "nouveau_encoder.h" struct nvkm_i2c_port; @@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } +static inline bool +nouveau_connector_is_mst(struct drm_connector *connector) +{ + const struct nouveau_encoder *nv_encoder; + const struct drm_encoder *encoder; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return false; + + nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY); + if (!nv_encoder) + return false; + + encoder = &nv_encoder->base.base; + return encoder->encoder_type == DRM_MODE_ENCODER_DPMST; +} + +#define nouveau_for_each_non_mst_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + for_each_if(!nouveau_connector_is_mst(connector)) + static inline struct nouveau_connector * nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) { struct drm_device *dev = nv_crtc->base.dev; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct nouveau_connector *nv_connector = NULL; struct drm_crtc *crtc = to_drm_crtc(nv_crtc); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder && connector->encoder->crtc == crtc) - return nouveau_connector(connector); + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + if (connector->encoder && connector->encoder->crtc == crtc) { + nv_connector = nouveau_connector(connector); + break; + } } + drm_connector_list_iter_end(&conn_iter); - return NULL; + return nv_connector; } struct drm_connector * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2e7785f49e6d..caf53503c0f7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -405,6 +405,7 @@ nouveau_display_init(struct drm_device *dev) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int ret; ret = disp->init(dev); @@ -412,10 +413,12 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_get(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); /* enable flip completion events */ nvif_notify_get(&drm->flip); @@ -428,6 +431,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -440,10 +444,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) nvif_notify_put(&drm->flip); /* disable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { struct nouveau_connector *conn = nouveau_connector(connector); nvif_notify_put(&conn->hpd); } + drm_connector_list_iter_end(&conn_iter); drm_kms_helper_poll_disable(dev); disp->fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 595630d1fb9e..362a34cb435d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -79,6 +79,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " int nouveau_modeset = -1; module_param_named(modeset, nouveau_modeset, int, 0400); +MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); +static int nouveau_atomic = 0; +module_param_named(atomic, nouveau_atomic, int, 0400); + MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); static int nouveau_runtime_pm = -1; module_param_named(runpm, nouveau_runtime_pm, int, 0400); @@ -383,6 +387,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, pci_set_master(pdev); + if (nouveau_atomic) + driver_pci.driver_features |= DRIVER_ATOMIC; + ret = drm_get_pci_dev(pdev, pent, &driver_pci); if (ret) { nvkm_device_del(&device); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index fb47d46050ec..926ec51ba5be 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3216,10 +3216,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_unregister(&mstc->connector); - drm_modeset_lock_all(drm->dev); drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector); + + drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL); mstc->port = NULL; - drm_modeset_unlock_all(drm->dev); + drm_modeset_unlock(&drm->dev->mode_config.connection_mutex); drm_connector_unreference(&mstc->connector); } @@ -3229,9 +3230,7 @@ nv50_mstm_register_connector(struct drm_connector *connector) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - drm_modeset_lock_all(drm->dev); drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector); - drm_modeset_unlock_all(drm->dev); drm_connector_register(connector); } @@ -4151,7 +4150,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, nv50_disp_atomic_commit_tail(state); drm_for_each_crtc(crtc, dev) { - if (crtc->state->enable) { + if (crtc->state->active) { if (!drm->have_disp_power_ref) { drm->have_disp_power_ref = true; return 0; @@ -4399,10 +4398,6 @@ nv50_display_destroy(struct drm_device *dev) kfree(disp); } -MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); -static int nouveau_atomic = 0; -module_param_named(atomic, nouveau_atomic, int, 0400); - int nv50_display_create(struct drm_device *dev) { @@ -4426,8 +4421,7 @@ nv50_display_create(struct drm_device *dev) nouveau_display(dev)->fini = nv50_display_fini; disp->disp = &nouveau_display(dev)->disp; dev->mode_config.funcs = &nv50_disp_func; - if (nouveau_atomic) - dev->driver->driver_features |= DRIVER_ATOMIC; + dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index a2978a37b4f3..700fc754f28a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c @@ -174,6 +174,7 @@ gf119_sor = { .links = gf119_sor_dp_links, .power = g94_sor_dp_power, .pattern = gf119_sor_dp_pattern, + .drive = gf119_sor_dp_drive, .vcpi = gf119_sor_dp_vcpi, .audio = gf119_sor_dp_audio, .audio_sym = gf119_sor_dp_audio_sym, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index a7e55c422501..0b632dc0cf7d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -155,10 +155,10 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) (target << 28)); nvkm_wr32(device, 0x002274, (runl << 20) | nr); - if (wait_event_timeout(fifo->runlist[runl].wait, - !(nvkm_rd32(device, 0x002284 + (runl * 0x08)) - & 0x00100000), - msecs_to_jiffies(2000)) == 0) + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) + break; + ) < 0) nvkm_error(subdev, "runlist %d update timeout\n", runl); unlock: mutex_unlock(&subdev->mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index a4cb82495cee..245c946ea661 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev) return ret; pci->irq = pdev->irq; + + /* Ensure MSI interrupts are armed, for the case where there are + * already interrupts pending (for whatever reason) at load time. + */ + if (pci->msi) + pci->func->msi_rearm(pci); + return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h index 53d01fb00a8b..1dbe593e5960 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h @@ -47,8 +47,8 @@ static uint32_t gf100_pmu_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x00000756, - 0x00000748, + 0x00000754, + 0x00000746, 0x00000000, 0x00000000, 0x00000000, @@ -69,8 +69,8 @@ static uint32_t gf100_pmu_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x0000075a, 0x00000758, + 0x00000756, 0x00000000, 0x00000000, 0x00000000, @@ -91,8 +91,8 @@ static uint32_t gf100_pmu_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x00000b8a, - 0x00000a2d, + 0x00000b88, + 0x00000a2b, 0x00000000, 0x00000000, 0x00000000, @@ -113,8 +113,8 @@ static uint32_t gf100_pmu_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x00000bb3, - 0x00000b8c, + 0x00000bb1, + 0x00000b8a, 0x00000000, 0x00000000, 0x00000000, @@ -135,8 +135,8 @@ static uint32_t gf100_pmu_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x00000bbf, 0x00000bbd, + 0x00000bbb, 0x00000000, 0x00000000, 0x00000000, @@ -237,19 +237,19 @@ static uint32_t gf100_pmu_data[] = { 0x000005d3, 0x00000003, 0x00000002, - 0x0000069d, + 0x0000069b, 0x00040004, 0x00000000, - 0x000006b9, + 0x000006b7, 0x00010005, 0x00000000, - 0x000006d6, + 0x000006d4, 0x00010006, 0x00000000, 0x0000065b, 0x00000007, 0x00000000, - 0x000006e1, + 0x000006df, /* 0x03c4: memx_func_tail */ /* 0x03c4: memx_ts_start */ 0x00000000, @@ -1373,432 +1373,432 @@ static uint32_t gf100_pmu_code[] = { /* 0x065b: memx_func_wait_vblank */ 0x9800f840, 0x66b00016, - 0x130bf400, + 0x120bf400, 0xf40166b0, 0x0ef4060b, /* 0x066d: memx_func_wait_vblank_head1 */ - 0x2077f12e, - 0x070ef400, -/* 0x0674: memx_func_wait_vblank_head0 */ - 0x000877f1, -/* 0x0678: memx_func_wait_vblank_0 */ - 0x07c467f1, - 0xcf0664b6, - 0x67fd0066, - 0xf31bf404, -/* 0x0688: memx_func_wait_vblank_1 */ - 0x07c467f1, - 0xcf0664b6, - 0x67fd0066, - 0xf30bf404, -/* 0x0698: memx_func_wait_vblank_fini */ - 0xf80410b6, -/* 0x069d: memx_func_wr32 */ - 0x00169800, - 0xb6011598, - 0x60f90810, - 0xd0fc50f9, - 0x21f4e0fc, - 0x0242b640, - 0xf8e91bf4, -/* 0x06b9: memx_func_wait */ - 0x2c87f000, - 0xcf0684b6, - 0x1e980088, - 0x011d9800, - 0x98021c98, - 0x10b6031b, - 0xa321f410, -/* 0x06d6: memx_func_delay */ - 0x1e9800f8, - 0x0410b600, - 0xf87e21f4, -/* 0x06e1: memx_func_train */ -/* 0x06e3: memx_exec */ - 0xf900f800, - 0xb9d0f9e0, - 0xb2b902c1, -/* 0x06ed: memx_exec_next */ - 0x00139802, - 0xe70410b6, - 0xe701f034, - 0xb601e033, - 0x30f00132, - 0xde35980c, - 0x12b855f9, - 0xe41ef406, - 0x98f10b98, - 0xcbbbf20c, - 0xc4b7f102, - 0x06b4b607, - 0xfc00bbcf, - 0xf5e0fcd0, - 0xf8033621, -/* 0x0729: memx_info */ - 0x01c67000, -/* 0x072f: memx_info_data */ - 0xf10e0bf4, - 0xf103ccc7, - 0xf40800b7, -/* 0x073a: memx_info_train */ - 0xc7f10b0e, - 0xb7f10bcc, -/* 0x0742: memx_info_send */ - 0x21f50100, - 0x00f80336, -/* 0x0748: memx_recv */ - 0xf401d6b0, - 0xd6b0980b, - 0xd80bf400, -/* 0x0756: memx_init */ - 0x00f800f8, -/* 0x0758: perf_recv */ -/* 0x075a: perf_init */ + 0x2077f02c, +/* 0x0673: memx_func_wait_vblank_head0 */ + 0xf0060ef4, +/* 0x0676: memx_func_wait_vblank_0 */ + 0x67f10877, + 0x64b607c4, + 0x0066cf06, + 0xf40467fd, +/* 0x0686: memx_func_wait_vblank_1 */ + 0x67f1f31b, + 0x64b607c4, + 0x0066cf06, + 0xf40467fd, +/* 0x0696: memx_func_wait_vblank_fini */ + 0x10b6f30b, +/* 0x069b: memx_func_wr32 */ + 0x9800f804, + 0x15980016, + 0x0810b601, + 0x50f960f9, + 0xe0fcd0fc, + 0xb64021f4, + 0x1bf40242, +/* 0x06b7: memx_func_wait */ + 0xf000f8e9, + 0x84b62c87, + 0x0088cf06, + 0x98001e98, + 0x1c98011d, + 0x031b9802, + 0xf41010b6, + 0x00f8a321, +/* 0x06d4: memx_func_delay */ + 0xb6001e98, + 0x21f40410, +/* 0x06df: memx_func_train */ + 0xf800f87e, +/* 0x06e1: memx_exec */ + 0xf9e0f900, + 0x02c1b9d0, +/* 0x06eb: memx_exec_next */ + 0x9802b2b9, + 0x10b60013, + 0xf034e704, + 0xe033e701, + 0x0132b601, + 0x980c30f0, + 0x55f9de35, + 0xf40612b8, + 0x0b98e41e, + 0xf20c98f1, + 0xf102cbbb, + 0xb607c4b7, + 0xbbcf06b4, + 0xfcd0fc00, + 0x3621f5e0, +/* 0x0727: memx_info */ + 0x7000f803, + 0x0bf401c6, +/* 0x072d: memx_info_data */ + 0xccc7f10e, + 0x00b7f103, + 0x0b0ef408, +/* 0x0738: memx_info_train */ + 0x0bccc7f1, + 0x0100b7f1, +/* 0x0740: memx_info_send */ + 0x033621f5, +/* 0x0746: memx_recv */ + 0xd6b000f8, + 0x980bf401, + 0xf400d6b0, + 0x00f8d80b, +/* 0x0754: memx_init */ +/* 0x0756: perf_recv */ 0x00f800f8, -/* 0x075c: i2c_drive_scl */ - 0xf40036b0, - 0x07f1110b, - 0x04b607e0, - 0x0001d006, - 0x00f804bd, -/* 0x0770: i2c_drive_scl_lo */ - 0x07e407f1, - 0xd00604b6, - 0x04bd0001, -/* 0x077e: i2c_drive_sda */ +/* 0x0758: perf_init */ +/* 0x075a: i2c_drive_scl */ 0x36b000f8, 0x110bf400, 0x07e007f1, 0xd00604b6, - 0x04bd0002, -/* 0x0792: i2c_drive_sda_lo */ + 0x04bd0001, +/* 0x076e: i2c_drive_scl_lo */ 0x07f100f8, 0x04b607e4, + 0x0001d006, + 0x00f804bd, +/* 0x077c: i2c_drive_sda */ + 0xf40036b0, + 0x07f1110b, + 0x04b607e0, 0x0002d006, 0x00f804bd, -/* 0x07a0: i2c_sense_scl */ - 0xf10132f4, - 0xb607c437, - 0x33cf0634, - 0x0431fd00, - 0xf4060bf4, -/* 0x07b6: i2c_sense_scl_done */ - 0x00f80131, -/* 0x07b8: i2c_sense_sda */ - 0xf10132f4, - 0xb607c437, - 0x33cf0634, - 0x0432fd00, - 0xf4060bf4, -/* 0x07ce: i2c_sense_sda_done */ - 0x00f80131, -/* 0x07d0: i2c_raise_scl */ - 0x47f140f9, - 0x37f00898, - 0x5c21f501, -/* 0x07dd: i2c_raise_scl_wait */ - 0xe8e7f107, - 0x7e21f403, - 0x07a021f5, - 0xb60901f4, - 0x1bf40142, -/* 0x07f1: i2c_raise_scl_done */ - 0xf840fcef, -/* 0x07f5: i2c_start */ - 0xa021f500, - 0x0d11f407, - 0x07b821f5, - 0xf40611f4, -/* 0x0806: i2c_start_rep */ - 0x37f0300e, - 0x5c21f500, - 0x0137f007, - 0x077e21f5, - 0xb60076bb, - 0x50f90465, - 0xbb046594, - 0x50bd0256, - 0xfc0475fd, - 0xd021f550, - 0x0464b607, -/* 0x0833: i2c_start_send */ - 0xf01f11f4, +/* 0x0790: i2c_drive_sda_lo */ + 0x07e407f1, + 0xd00604b6, + 0x04bd0002, +/* 0x079e: i2c_sense_scl */ + 0x32f400f8, + 0xc437f101, + 0x0634b607, + 0xfd0033cf, + 0x0bf40431, + 0x0131f406, +/* 0x07b4: i2c_sense_scl_done */ +/* 0x07b6: i2c_sense_sda */ + 0x32f400f8, + 0xc437f101, + 0x0634b607, + 0xfd0033cf, + 0x0bf40432, + 0x0131f406, +/* 0x07cc: i2c_sense_sda_done */ +/* 0x07ce: i2c_raise_scl */ + 0x40f900f8, + 0x089847f1, + 0xf50137f0, +/* 0x07db: i2c_raise_scl_wait */ + 0xf1075a21, + 0xf403e8e7, + 0x21f57e21, + 0x01f4079e, + 0x0142b609, +/* 0x07ef: i2c_raise_scl_done */ + 0xfcef1bf4, +/* 0x07f3: i2c_start */ + 0xf500f840, + 0xf4079e21, + 0x21f50d11, + 0x11f407b6, + 0x300ef406, +/* 0x0804: i2c_start_rep */ + 0xf50037f0, + 0xf0075a21, + 0x21f50137, + 0x76bb077c, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0xf550fc04, + 0xb607ce21, + 0x11f40464, +/* 0x0831: i2c_start_send */ + 0x0037f01f, + 0x077c21f5, + 0x1388e7f1, + 0xf07e21f4, 0x21f50037, - 0xe7f1077e, + 0xe7f1075a, 0x21f41388, - 0x0037f07e, - 0x075c21f5, - 0x1388e7f1, -/* 0x084f: i2c_start_out */ - 0xf87e21f4, -/* 0x0851: i2c_stop */ - 0x0037f000, - 0x075c21f5, - 0xf50037f0, - 0xf1077e21, - 0xf403e8e7, - 0x37f07e21, - 0x5c21f501, - 0x88e7f107, - 0x7e21f413, +/* 0x084d: i2c_start_out */ +/* 0x084f: i2c_stop */ + 0xf000f87e, + 0x21f50037, + 0x37f0075a, + 0x7c21f500, + 0xe8e7f107, + 0x7e21f403, 0xf50137f0, - 0xf1077e21, + 0xf1075a21, 0xf41388e7, - 0x00f87e21, -/* 0x0884: i2c_bitw */ - 0x077e21f5, - 0x03e8e7f1, - 0xbb7e21f4, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x07d021f5, - 0xf40464b6, - 0xe7f11811, - 0x21f41388, - 0x0037f07e, - 0x075c21f5, - 0x1388e7f1, -/* 0x08c3: i2c_bitw_out */ - 0xf87e21f4, -/* 0x08c5: i2c_bitr */ - 0x0137f000, - 0x077e21f5, - 0x03e8e7f1, - 0xbb7e21f4, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x07d021f5, - 0xf40464b6, - 0x21f51b11, - 0x37f007b8, - 0x5c21f500, + 0x37f07e21, + 0x7c21f501, 0x88e7f107, 0x7e21f413, - 0xf4013cf0, -/* 0x090a: i2c_bitr_done */ - 0x00f80131, -/* 0x090c: i2c_get_byte */ - 0xf00057f0, -/* 0x0912: i2c_get_byte_next */ - 0x54b60847, - 0x0076bb01, +/* 0x0882: i2c_bitw */ + 0x21f500f8, + 0xe7f1077c, + 0x21f403e8, + 0x0076bb7e, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b608c5, - 0x2b11f404, - 0xb60553fd, - 0x1bf40142, - 0x0137f0d8, - 0xb60076bb, - 0x50f90465, - 0xbb046594, - 0x50bd0256, - 0xfc0475fd, - 0x8421f550, - 0x0464b608, -/* 0x095c: i2c_get_byte_done */ -/* 0x095e: i2c_put_byte */ - 0x47f000f8, -/* 0x0961: i2c_put_byte_next */ - 0x0142b608, - 0xbb3854ff, + 0x64b607ce, + 0x1811f404, + 0x1388e7f1, + 0xf07e21f4, + 0x21f50037, + 0xe7f1075a, + 0x21f41388, +/* 0x08c1: i2c_bitw_out */ +/* 0x08c3: i2c_bitr */ + 0xf000f87e, + 0x21f50137, + 0xe7f1077c, + 0x21f403e8, + 0x0076bb7e, + 0xf90465b6, + 0x04659450, + 0xbd0256bb, + 0x0475fd50, + 0x21f550fc, + 0x64b607ce, + 0x1b11f404, + 0x07b621f5, + 0xf50037f0, + 0xf1075a21, + 0xf41388e7, + 0x3cf07e21, + 0x0131f401, +/* 0x0908: i2c_bitr_done */ +/* 0x090a: i2c_get_byte */ + 0x57f000f8, + 0x0847f000, +/* 0x0910: i2c_get_byte_next */ + 0xbb0154b6, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x088421f5, + 0x08c321f5, 0xf40464b6, - 0x46b03411, - 0xd81bf400, - 0xb60076bb, - 0x50f90465, - 0xbb046594, - 0x50bd0256, - 0xfc0475fd, - 0xc521f550, - 0x0464b608, - 0xbb0f11f4, - 0x36b00076, - 0x061bf401, -/* 0x09b7: i2c_put_byte_done */ - 0xf80132f4, -/* 0x09b9: i2c_addr */ - 0x0076bb00, + 0x53fd2b11, + 0x0142b605, + 0xf0d81bf4, + 0x76bb0137, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0xf550fc04, + 0xb6088221, +/* 0x095a: i2c_get_byte_done */ + 0x00f80464, +/* 0x095c: i2c_put_byte */ +/* 0x095f: i2c_put_byte_next */ + 0xb60847f0, + 0x54ff0142, + 0x0076bb38, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b607f5, - 0x2911f404, - 0x012ec3e7, - 0xfd0134b6, - 0x76bb0553, + 0x64b60882, + 0x3411f404, + 0xf40046b0, + 0x76bbd81b, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6095e21, -/* 0x09fe: i2c_addr_done */ - 0x00f80464, -/* 0x0a00: i2c_acquire_addr */ - 0xb6f8cec7, - 0xe0b702e4, - 0xee980d1c, -/* 0x0a0f: i2c_acquire */ - 0xf500f800, - 0xf40a0021, - 0xd9f00421, - 0x4021f403, -/* 0x0a1e: i2c_release */ - 0x21f500f8, - 0x21f40a00, - 0x03daf004, - 0xf84021f4, -/* 0x0a2d: i2c_recv */ - 0x0132f400, - 0xb6f8c1c7, - 0x16b00214, - 0x3a1ff528, - 0xf413a001, - 0x0032980c, - 0x0ccc13a0, - 0xf4003198, - 0xd0f90231, - 0xd0f9e0f9, - 0x000067f1, - 0x100063f1, - 0xbb016792, + 0xb608c321, + 0x11f40464, + 0x0076bb0f, + 0xf40136b0, + 0x32f4061b, +/* 0x09b5: i2c_put_byte_done */ +/* 0x09b7: i2c_addr */ + 0xbb00f801, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x0a0f21f5, - 0xfc0464b6, - 0x00d6b0d0, - 0x00b31bf5, - 0xbb0057f0, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x09b921f5, - 0xf50464b6, - 0xc700d011, - 0x76bbe0c5, - 0x0465b600, - 0x659450f9, - 0x0256bb04, - 0x75fd50bd, - 0xf550fc04, - 0xb6095e21, - 0x11f50464, - 0x57f000ad, + 0x07f321f5, + 0xf40464b6, + 0xc3e72911, + 0x34b6012e, + 0x0553fd01, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x5c21f550, + 0x0464b609, +/* 0x09fc: i2c_addr_done */ +/* 0x09fe: i2c_acquire_addr */ + 0xcec700f8, + 0x02e4b6f8, + 0x0d1ce0b7, + 0xf800ee98, +/* 0x0a0d: i2c_acquire */ + 0xfe21f500, + 0x0421f409, + 0xf403d9f0, + 0x00f84021, +/* 0x0a1c: i2c_release */ + 0x09fe21f5, + 0xf00421f4, + 0x21f403da, +/* 0x0a2b: i2c_recv */ + 0xf400f840, + 0xc1c70132, + 0x0214b6f8, + 0xf52816b0, + 0xa0013a1f, + 0x980cf413, + 0x13a00032, + 0x31980ccc, + 0x0231f400, + 0xe0f9d0f9, + 0x67f1d0f9, + 0x63f10000, + 0x67921000, 0x0076bb01, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b609b9, - 0x8a11f504, + 0x64b60a0d, + 0xb0d0fc04, + 0x1bf500d6, + 0x57f000b3, 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b6090c, - 0x6a11f404, - 0xbbe05bcb, + 0x64b609b7, + 0xd011f504, + 0xe0c5c700, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x5c21f550, + 0x0464b609, + 0x00ad11f5, + 0xbb0157f0, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x085121f5, - 0xb90464b6, - 0x74bd025b, -/* 0x0b33: i2c_recv_not_rd08 */ - 0xb0430ef4, - 0x1bf401d6, - 0x0057f03d, - 0x09b921f5, - 0xc73311f4, - 0x21f5e0c5, - 0x11f4095e, - 0x0057f029, - 0x09b921f5, - 0xc71f11f4, - 0x21f5e0b5, - 0x11f4095e, - 0x5121f515, - 0xc774bd08, - 0x1bf408c5, - 0x0232f409, -/* 0x0b73: i2c_recv_not_wr08 */ -/* 0x0b73: i2c_recv_done */ - 0xc7030ef4, - 0x21f5f8ce, - 0xe0fc0a1e, - 0x12f4d0fc, - 0x027cb90a, - 0x033621f5, -/* 0x0b88: i2c_recv_exit */ -/* 0x0b8a: i2c_init */ - 0x00f800f8, -/* 0x0b8c: test_recv */ - 0x05d817f1, + 0x09b721f5, + 0xf50464b6, + 0xbb008a11, + 0x65b60076, + 0x9450f904, + 0x56bb0465, + 0xfd50bd02, + 0x50fc0475, + 0x090a21f5, + 0xf40464b6, + 0x5bcb6a11, + 0x0076bbe0, + 0xf90465b6, + 0x04659450, + 0xbd0256bb, + 0x0475fd50, + 0x21f550fc, + 0x64b6084f, + 0x025bb904, + 0x0ef474bd, +/* 0x0b31: i2c_recv_not_rd08 */ + 0x01d6b043, + 0xf03d1bf4, + 0x21f50057, + 0x11f409b7, + 0xe0c5c733, + 0x095c21f5, + 0xf02911f4, + 0x21f50057, + 0x11f409b7, + 0xe0b5c71f, + 0x095c21f5, + 0xf51511f4, + 0xbd084f21, + 0x08c5c774, + 0xf4091bf4, + 0x0ef40232, +/* 0x0b71: i2c_recv_not_wr08 */ +/* 0x0b71: i2c_recv_done */ + 0xf8cec703, + 0x0a1c21f5, + 0xd0fce0fc, + 0xb90a12f4, + 0x21f5027c, +/* 0x0b86: i2c_recv_exit */ + 0x00f80336, +/* 0x0b88: i2c_init */ +/* 0x0b8a: test_recv */ + 0x17f100f8, + 0x14b605d8, + 0x0011cf06, + 0xf10110b6, + 0xb605d807, + 0x01d00604, + 0xf104bd00, + 0xf1d900e7, + 0xf5134fe3, + 0xf8025621, +/* 0x0bb1: test_init */ + 0x00e7f100, + 0x5621f508, +/* 0x0bbb: idle_recv */ + 0xf800f802, +/* 0x0bbd: idle */ + 0x0031f400, + 0x05d417f1, 0xcf0614b6, 0x10b60011, - 0xd807f101, + 0xd407f101, 0x0604b605, 0xbd0001d0, - 0x00e7f104, - 0x4fe3f1d9, - 0x5621f513, -/* 0x0bb3: test_init */ - 0xf100f802, - 0xf50800e7, - 0xf8025621, -/* 0x0bbd: idle_recv */ -/* 0x0bbf: idle */ - 0xf400f800, - 0x17f10031, - 0x14b605d4, - 0x0011cf06, - 0xf10110b6, - 0xb605d407, - 0x01d00604, -/* 0x0bdb: idle_loop */ - 0xf004bd00, - 0x32f45817, -/* 0x0be1: idle_proc */ -/* 0x0be1: idle_proc_exec */ - 0xb910f902, - 0x21f5021e, - 0x10fc033f, - 0xf40911f4, - 0x0ef40231, -/* 0x0bf5: idle_proc_next */ - 0x5810b6ef, - 0xf4061fb8, - 0x02f4e61b, - 0x0028f4dd, - 0x00bb0ef4, +/* 0x0bd9: idle_loop */ + 0x5817f004, +/* 0x0bdf: idle_proc */ +/* 0x0bdf: idle_proc_exec */ + 0xf90232f4, + 0x021eb910, + 0x033f21f5, + 0x11f410fc, + 0x0231f409, +/* 0x0bf3: idle_proc_next */ + 0xb6ef0ef4, + 0x1fb85810, + 0xe61bf406, + 0xf4dd02f4, + 0x0ef40028, + 0x000000bb, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h index c4edbc79e41a..e0222cb832fb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h @@ -47,8 +47,8 @@ static uint32_t gk208_pmu_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x000005f3, - 0x000005e5, + 0x000005ee, + 0x000005e0, 0x00000000, 0x00000000, 0x00000000, @@ -69,8 +69,8 @@ static uint32_t gk208_pmu_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x000005f7, - 0x000005f5, + 0x000005f2, + 0x000005f0, 0x00000000, 0x00000000, 0x00000000, @@ -91,8 +91,8 @@ static uint32_t gk208_pmu_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x000009f8, - 0x000008a2, + 0x000009f3, + 0x0000089d, 0x00000000, 0x00000000, 0x00000000, @@ -113,8 +113,8 @@ static uint32_t gk208_pmu_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x00000a16, - 0x000009fa, + 0x00000a11, + 0x000009f5, 0x00000000, 0x00000000, 0x00000000, @@ -135,8 +135,8 @@ static uint32_t gk208_pmu_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x00000a21, - 0x00000a1f, + 0x00000a1c, + 0x00000a1a, 0x00000000, 0x00000000, 0x00000000, @@ -234,22 +234,22 @@ static uint32_t gk208_pmu_data[] = { /* 0x037c: memx_func_next */ 0x00000002, 0x00000000, - 0x000004cf, + 0x000004cc, 0x00000003, 0x00000002, - 0x00000546, + 0x00000541, 0x00040004, 0x00000000, - 0x00000563, + 0x0000055e, 0x00010005, 0x00000000, - 0x0000057d, + 0x00000578, 0x00010006, 0x00000000, - 0x00000541, + 0x0000053c, 0x00000007, 0x00000000, - 0x00000589, + 0x00000584, /* 0x03c4: memx_func_tail */ /* 0x03c4: memx_ts_start */ 0x00000000, @@ -1239,454 +1239,454 @@ static uint32_t gk208_pmu_code[] = { 0x0001f604, 0x00f804bd, /* 0x045c: memx_func_enter */ - 0x162067f1, - 0xf55d77f1, - 0x047e6eb2, - 0xd8b20000, - 0xf90487fd, - 0xfc80f960, - 0x7ee0fcd0, - 0x0700002d, - 0x7e6eb2fe, + 0x47162046, + 0x6eb2f55d, + 0x0000047e, + 0x87fdd8b2, + 0xf960f904, + 0xfcd0fc80, + 0x002d7ee0, + 0xb2fe0700, + 0x00047e6e, + 0xfdd8b200, + 0x60f90487, + 0xd0fc80f9, + 0x2d7ee0fc, + 0xf0460000, + 0x7e6eb226, 0xb2000004, 0x0487fdd8, 0x80f960f9, 0xe0fcd0fc, 0x00002d7e, - 0x26f067f1, - 0x047e6eb2, - 0xd8b20000, - 0xf90487fd, - 0xfc80f960, - 0x7ee0fcd0, - 0x0600002d, - 0x07e04004, - 0xbd0006f6, -/* 0x04b9: memx_func_enter_wait */ - 0x07c04604, - 0xf00066cf, - 0x0bf40464, - 0xcf2c06f7, - 0x06b50066, -/* 0x04cf: memx_func_leave */ - 0x0600f8f1, - 0x0066cf2c, - 0x06f206b5, - 0x07e44004, - 0xbd0006f6, -/* 0x04e1: memx_func_leave_wait */ - 0x07c04604, - 0xf00066cf, - 0x1bf40464, - 0xf067f1f7, + 0xe0400406, + 0x0006f607, +/* 0x04b6: memx_func_enter_wait */ + 0xc04604bd, + 0x0066cf07, + 0xf40464f0, + 0x2c06f70b, + 0xb50066cf, + 0x00f8f106, +/* 0x04cc: memx_func_leave */ + 0x66cf2c06, + 0xf206b500, + 0xe4400406, + 0x0006f607, +/* 0x04de: memx_func_leave_wait */ + 0xc04604bd, + 0x0066cf07, + 0xf40464f0, + 0xf046f71b, 0xb2010726, 0x00047e6e, 0xfdd8b200, 0x60f90587, 0xd0fc80f9, 0x2d7ee0fc, - 0x67f10000, - 0x6eb21620, - 0x0000047e, - 0x87fdd8b2, - 0xf960f905, - 0xfcd0fc80, - 0x002d7ee0, - 0x0aa24700, - 0x047e6eb2, - 0xd8b20000, - 0xf90587fd, - 0xfc80f960, - 0x7ee0fcd0, - 0xf800002d, -/* 0x0541: memx_func_wait_vblank */ + 0x20460000, + 0x7e6eb216, + 0xb2000004, + 0x0587fdd8, + 0x80f960f9, + 0xe0fcd0fc, + 0x00002d7e, + 0xb20aa247, + 0x00047e6e, + 0xfdd8b200, + 0x60f90587, + 0xd0fc80f9, + 0x2d7ee0fc, + 0x00f80000, +/* 0x053c: memx_func_wait_vblank */ + 0xf80410b6, +/* 0x0541: memx_func_wr32 */ + 0x00169800, + 0xb6011598, + 0x60f90810, + 0xd0fc50f9, + 0x2d7ee0fc, + 0x42b60000, + 0xe81bf402, +/* 0x055e: memx_func_wait */ + 0x2c0800f8, + 0x980088cf, + 0x1d98001e, + 0x021c9801, + 0xb6031b98, + 0x747e1010, + 0x00f80000, +/* 0x0578: memx_func_delay */ + 0xb6001e98, + 0x587e0410, + 0x00f80000, +/* 0x0584: memx_func_train */ +/* 0x0586: memx_exec */ + 0xe0f900f8, + 0xc1b2d0f9, +/* 0x058e: memx_exec_next */ + 0x1398b2b2, 0x0410b600, -/* 0x0546: memx_func_wr32 */ - 0x169800f8, - 0x01159800, - 0xf90810b6, - 0xfc50f960, + 0x01f034e7, + 0x01e033e7, + 0xf00132b6, + 0x35980c30, + 0xa655f9de, + 0xe51ef412, + 0x98f10b98, + 0xcbbbf20c, + 0x07c44b02, + 0xfc00bbcf, 0x7ee0fcd0, - 0xb600002d, - 0x1bf40242, -/* 0x0563: memx_func_wait */ - 0x0800f8e8, - 0x0088cf2c, - 0x98001e98, - 0x1c98011d, - 0x031b9802, - 0x7e1010b6, - 0xf8000074, -/* 0x057d: memx_func_delay */ - 0x001e9800, - 0x7e0410b6, - 0xf8000058, -/* 0x0589: memx_func_train */ -/* 0x058b: memx_exec */ - 0xf900f800, - 0xb2d0f9e0, -/* 0x0593: memx_exec_next */ - 0x98b2b2c1, - 0x10b60013, - 0xf034e704, - 0xe033e701, - 0x0132b601, - 0x980c30f0, - 0x55f9de35, - 0x1ef412a6, - 0xf10b98e5, - 0xbbf20c98, - 0xc44b02cb, - 0x00bbcf07, - 0xe0fcd0fc, - 0x00029f7e, -/* 0x05ca: memx_info */ - 0xc67000f8, - 0x0c0bf401, -/* 0x05d0: memx_info_data */ - 0x4b03cc4c, - 0x0ef40800, -/* 0x05d9: memx_info_train */ - 0x0bcc4c09, -/* 0x05df: memx_info_send */ - 0x7e01004b, 0xf800029f, -/* 0x05e5: memx_recv */ - 0x01d6b000, - 0xb0a30bf4, - 0x0bf400d6, -/* 0x05f3: memx_init */ - 0xf800f8dc, -/* 0x05f5: perf_recv */ -/* 0x05f7: perf_init */ - 0xf800f800, -/* 0x05f9: i2c_drive_scl */ - 0x0036b000, - 0x400d0bf4, - 0x01f607e0, - 0xf804bd00, -/* 0x0609: i2c_drive_scl_lo */ - 0x07e44000, - 0xbd0001f6, -/* 0x0613: i2c_drive_sda */ - 0xb000f804, - 0x0bf40036, - 0x07e0400d, - 0xbd0002f6, -/* 0x0623: i2c_drive_sda_lo */ - 0x4000f804, - 0x02f607e4, - 0xf804bd00, -/* 0x062d: i2c_sense_scl */ - 0x0132f400, - 0xcf07c443, - 0x31fd0033, - 0x060bf404, -/* 0x063f: i2c_sense_scl_done */ - 0xf80131f4, -/* 0x0641: i2c_sense_sda */ - 0x0132f400, - 0xcf07c443, - 0x32fd0033, - 0x060bf404, -/* 0x0653: i2c_sense_sda_done */ - 0xf80131f4, -/* 0x0655: i2c_raise_scl */ - 0x4440f900, - 0x01030898, - 0x0005f97e, -/* 0x0660: i2c_raise_scl_wait */ - 0x7e03e84e, - 0x7e000058, - 0xf400062d, - 0x42b60901, - 0xef1bf401, -/* 0x0674: i2c_raise_scl_done */ - 0x00f840fc, -/* 0x0678: i2c_start */ - 0x00062d7e, - 0x7e0d11f4, - 0xf4000641, - 0x0ef40611, -/* 0x0689: i2c_start_rep */ - 0x7e00032e, - 0x030005f9, - 0x06137e01, +/* 0x05c5: memx_info */ + 0x01c67000, +/* 0x05cb: memx_info_data */ + 0x4c0c0bf4, + 0x004b03cc, + 0x090ef408, +/* 0x05d4: memx_info_train */ + 0x4b0bcc4c, +/* 0x05da: memx_info_send */ + 0x9f7e0100, + 0x00f80002, +/* 0x05e0: memx_recv */ + 0xf401d6b0, + 0xd6b0a30b, + 0xdc0bf400, +/* 0x05ee: memx_init */ + 0x00f800f8, +/* 0x05f0: perf_recv */ +/* 0x05f2: perf_init */ + 0x00f800f8, +/* 0x05f4: i2c_drive_scl */ + 0xf40036b0, + 0xe0400d0b, + 0x0001f607, + 0x00f804bd, +/* 0x0604: i2c_drive_scl_lo */ + 0xf607e440, + 0x04bd0001, +/* 0x060e: i2c_drive_sda */ + 0x36b000f8, + 0x0d0bf400, + 0xf607e040, + 0x04bd0002, +/* 0x061e: i2c_drive_sda_lo */ + 0xe44000f8, + 0x0002f607, + 0x00f804bd, +/* 0x0628: i2c_sense_scl */ + 0x430132f4, + 0x33cf07c4, + 0x0431fd00, + 0xf4060bf4, +/* 0x063a: i2c_sense_scl_done */ + 0x00f80131, +/* 0x063c: i2c_sense_sda */ + 0x430132f4, + 0x33cf07c4, + 0x0432fd00, + 0xf4060bf4, +/* 0x064e: i2c_sense_sda_done */ + 0x00f80131, +/* 0x0650: i2c_raise_scl */ + 0x984440f9, + 0x7e010308, +/* 0x065b: i2c_raise_scl_wait */ + 0x4e0005f4, + 0x587e03e8, + 0x287e0000, + 0x01f40006, + 0x0142b609, +/* 0x066f: i2c_raise_scl_done */ + 0xfcef1bf4, +/* 0x0673: i2c_start */ + 0x7e00f840, + 0xf4000628, + 0x3c7e0d11, + 0x11f40006, + 0x2e0ef406, +/* 0x0684: i2c_start_rep */ + 0xf47e0003, + 0x01030005, + 0x00060e7e, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x06507e50, + 0x0464b600, +/* 0x06af: i2c_start_send */ + 0x031d11f4, + 0x060e7e00, + 0x13884e00, + 0x0000587e, + 0xf47e0003, + 0x884e0005, + 0x00587e13, +/* 0x06c9: i2c_start_out */ +/* 0x06cb: i2c_stop */ + 0x0300f800, + 0x05f47e00, + 0x7e000300, + 0x4e00060e, + 0x587e03e8, + 0x01030000, + 0x0005f47e, + 0x7e13884e, + 0x03000058, + 0x060e7e01, + 0x13884e00, + 0x0000587e, +/* 0x06fa: i2c_bitw */ + 0x0e7e00f8, + 0xe84e0006, + 0x00587e03, 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, - 0x557e50fc, + 0x507e50fc, 0x64b60006, - 0x1d11f404, -/* 0x06b4: i2c_start_send */ - 0x137e0003, - 0x884e0006, - 0x00587e13, - 0x7e000300, - 0x4e0005f9, - 0x587e1388, -/* 0x06ce: i2c_start_out */ - 0x00f80000, -/* 0x06d0: i2c_stop */ - 0xf97e0003, - 0x00030005, - 0x0006137e, - 0x7e03e84e, + 0x1711f404, + 0x7e13884e, 0x03000058, - 0x05f97e01, + 0x05f47e00, 0x13884e00, 0x0000587e, - 0x137e0103, - 0x884e0006, - 0x00587e13, -/* 0x06ff: i2c_bitw */ - 0x7e00f800, - 0x4e000613, - 0x587e03e8, - 0x76bb0000, +/* 0x0738: i2c_bitw_out */ +/* 0x073a: i2c_bitr */ + 0x010300f8, + 0x00060e7e, + 0x7e03e84e, + 0xbb000058, + 0x65b60076, + 0x9450f904, + 0x56bb0465, + 0xfd50bd02, + 0x50fc0475, + 0x0006507e, + 0xf40464b6, + 0x3c7e1a11, + 0x00030006, + 0x0005f47e, + 0x7e13884e, + 0xf0000058, + 0x31f4013c, +/* 0x077d: i2c_bitr_done */ +/* 0x077f: i2c_get_byte */ + 0x0500f801, +/* 0x0783: i2c_get_byte_next */ + 0xb6080400, + 0x76bb0154, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0x7e50fc04, - 0xb6000655, + 0xb600073a, 0x11f40464, - 0x13884e17, - 0x0000587e, - 0xf97e0003, - 0x884e0005, - 0x00587e13, -/* 0x073d: i2c_bitw_out */ -/* 0x073f: i2c_bitr */ - 0x0300f800, - 0x06137e01, - 0x03e84e00, - 0x0000587e, + 0x0553fd2a, + 0xf40142b6, + 0x0103d81b, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x06557e50, + 0x06fa7e50, 0x0464b600, - 0x7e1a11f4, - 0x03000641, - 0x05f97e00, - 0x13884e00, - 0x0000587e, - 0xf4013cf0, -/* 0x0782: i2c_bitr_done */ - 0x00f80131, -/* 0x0784: i2c_get_byte */ - 0x08040005, -/* 0x0788: i2c_get_byte_next */ - 0xbb0154b6, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x00073f7e, - 0xf40464b6, - 0x53fd2a11, - 0x0142b605, - 0x03d81bf4, - 0x0076bb01, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0xff7e50fc, - 0x64b60006, -/* 0x07d1: i2c_get_byte_done */ -/* 0x07d3: i2c_put_byte */ - 0x0400f804, -/* 0x07d5: i2c_put_byte_next */ - 0x0142b608, - 0xbb3854ff, +/* 0x07cc: i2c_get_byte_done */ +/* 0x07ce: i2c_put_byte */ + 0x080400f8, +/* 0x07d0: i2c_put_byte_next */ + 0xff0142b6, + 0x76bb3854, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0x7e50fc04, + 0xb60006fa, + 0x11f40464, + 0x0046b034, + 0xbbd81bf4, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x0006ff7e, + 0x00073a7e, 0xf40464b6, - 0x46b03411, - 0xd81bf400, + 0x76bb0f11, + 0x0136b000, + 0xf4061bf4, +/* 0x0826: i2c_put_byte_done */ + 0x00f80132, +/* 0x0828: i2c_addr */ 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x073f7e50, + 0x06737e50, 0x0464b600, - 0xbb0f11f4, - 0x36b00076, - 0x061bf401, -/* 0x082b: i2c_put_byte_done */ - 0xf80132f4, -/* 0x082d: i2c_addr */ - 0x0076bb00, + 0xe72911f4, + 0xb6012ec3, + 0x53fd0134, + 0x0076bb05, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, - 0x787e50fc, - 0x64b60006, - 0x2911f404, - 0x012ec3e7, - 0xfd0134b6, - 0x76bb0553, - 0x0465b600, - 0x659450f9, - 0x0256bb04, - 0x75fd50bd, - 0x7e50fc04, - 0xb60007d3, -/* 0x0872: i2c_addr_done */ - 0x00f80464, -/* 0x0874: i2c_acquire_addr */ - 0xb6f8cec7, - 0xe0b705e4, - 0x00f8d014, -/* 0x0880: i2c_acquire */ - 0x0008747e, + 0xce7e50fc, + 0x64b60007, +/* 0x086d: i2c_addr_done */ +/* 0x086f: i2c_acquire_addr */ + 0xc700f804, + 0xe4b6f8ce, + 0x14e0b705, +/* 0x087b: i2c_acquire */ + 0x7e00f8d0, + 0x7e00086f, + 0xf0000004, + 0x2d7e03d9, + 0x00f80000, +/* 0x088c: i2c_release */ + 0x00086f7e, 0x0000047e, - 0x7e03d9f0, + 0x7e03daf0, 0xf800002d, -/* 0x0891: i2c_release */ - 0x08747e00, - 0x00047e00, - 0x03daf000, - 0x00002d7e, -/* 0x08a2: i2c_recv */ - 0x32f400f8, - 0xf8c1c701, - 0xb00214b6, - 0x1ff52816, - 0x13b80134, - 0x98000cf4, - 0x13b80032, - 0x98000ccc, - 0x31f40031, - 0xf9d0f902, - 0xd6d0f9e0, - 0x10000000, - 0xbb016792, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x0008807e, - 0xfc0464b6, - 0x00d6b0d0, - 0x00b01bf5, - 0x76bb0005, +/* 0x089d: i2c_recv */ + 0x0132f400, + 0xb6f8c1c7, + 0x16b00214, + 0x341ff528, + 0xf413b801, + 0x3298000c, + 0xcc13b800, + 0x3198000c, + 0x0231f400, + 0xe0f9d0f9, + 0x00d6d0f9, + 0x92100000, + 0x76bb0167, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0x7e50fc04, - 0xb600082d, - 0x11f50464, - 0xc5c700cc, - 0x0076bbe0, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0xd37e50fc, - 0x64b60007, - 0xa911f504, - 0xbb010500, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x00082d7e, - 0xf50464b6, - 0xbb008711, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x0007847e, - 0xf40464b6, - 0x5bcb6711, - 0x0076bbe0, + 0xb600087b, + 0xd0fc0464, + 0xf500d6b0, + 0x0500b01b, + 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, - 0xd07e50fc, - 0x64b60006, - 0xbd5bb204, - 0x410ef474, -/* 0x09a4: i2c_recv_not_rd08 */ - 0xf401d6b0, - 0x00053b1b, - 0x00082d7e, - 0xc73211f4, - 0xd37ee0c5, - 0x11f40007, - 0x7e000528, - 0xf400082d, - 0xb5c71f11, - 0x07d37ee0, - 0x1511f400, - 0x0006d07e, - 0xc5c774bd, - 0x091bf408, - 0xf40232f4, -/* 0x09e2: i2c_recv_not_wr08 */ -/* 0x09e2: i2c_recv_done */ - 0xcec7030e, - 0x08917ef8, - 0xfce0fc00, - 0x0912f4d0, - 0x9f7e7cb2, -/* 0x09f6: i2c_recv_exit */ - 0x00f80002, -/* 0x09f8: i2c_init */ -/* 0x09fa: test_recv */ - 0x584100f8, - 0x0011cf04, - 0x400110b6, - 0x01f60458, - 0xde04bd00, - 0x134fd900, - 0x0001de7e, -/* 0x0a16: test_init */ - 0x004e00f8, - 0x01de7e08, -/* 0x0a1f: idle_recv */ + 0x287e50fc, + 0x64b60008, + 0xcc11f504, + 0xe0c5c700, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x07ce7e50, + 0x0464b600, + 0x00a911f5, + 0x76bb0105, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0x7e50fc04, + 0xb6000828, + 0x11f50464, + 0x76bb0087, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0x7e50fc04, + 0xb600077f, + 0x11f40464, + 0xe05bcb67, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x06cb7e50, + 0x0464b600, + 0x74bd5bb2, +/* 0x099f: i2c_recv_not_rd08 */ + 0xb0410ef4, + 0x1bf401d6, + 0x7e00053b, + 0xf4000828, + 0xc5c73211, + 0x07ce7ee0, + 0x2811f400, + 0x287e0005, + 0x11f40008, + 0xe0b5c71f, + 0x0007ce7e, + 0x7e1511f4, + 0xbd0006cb, + 0x08c5c774, + 0xf4091bf4, + 0x0ef40232, +/* 0x09dd: i2c_recv_not_wr08 */ +/* 0x09dd: i2c_recv_done */ + 0xf8cec703, + 0x00088c7e, + 0xd0fce0fc, + 0xb20912f4, + 0x029f7e7c, +/* 0x09f1: i2c_recv_exit */ +/* 0x09f3: i2c_init */ 0xf800f800, -/* 0x0a21: idle */ - 0x0031f400, - 0xcf045441, - 0x10b60011, - 0x04544001, - 0xbd0001f6, -/* 0x0a35: idle_loop */ - 0xf4580104, -/* 0x0a3a: idle_proc */ -/* 0x0a3a: idle_proc_exec */ - 0x10f90232, - 0xa87e1eb2, - 0x10fc0002, - 0xf40911f4, - 0x0ef40231, -/* 0x0a4d: idle_proc_next */ - 0x5810b6f0, - 0x1bf41fa6, - 0xe002f4e8, - 0xf40028f4, - 0x0000c60e, +/* 0x09f5: test_recv */ + 0x04584100, + 0xb60011cf, + 0x58400110, + 0x0001f604, + 0x00de04bd, + 0x7e134fd9, + 0xf80001de, +/* 0x0a11: test_init */ + 0x08004e00, + 0x0001de7e, +/* 0x0a1a: idle_recv */ + 0x00f800f8, +/* 0x0a1c: idle */ + 0x410031f4, + 0x11cf0454, + 0x0110b600, + 0xf6045440, + 0x04bd0001, +/* 0x0a30: idle_loop */ + 0x32f45801, +/* 0x0a35: idle_proc */ +/* 0x0a35: idle_proc_exec */ + 0xb210f902, + 0x02a87e1e, + 0xf410fc00, + 0x31f40911, + 0xf00ef402, +/* 0x0a48: idle_proc_next */ + 0xa65810b6, + 0xe81bf41f, + 0xf4e002f4, + 0x0ef40028, + 0x000000c6, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h index 6a2572e8945a..defddf5957ee 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h @@ -47,8 +47,8 @@ static uint32_t gt215_pmu_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x0000083a, - 0x0000082c, + 0x00000833, + 0x00000825, 0x00000000, 0x00000000, 0x00000000, @@ -69,8 +69,8 @@ static uint32_t gt215_pmu_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x0000083e, - 0x0000083c, + 0x00000837, + 0x00000835, 0x00000000, 0x00000000, 0x00000000, @@ -91,8 +91,8 @@ static uint32_t gt215_pmu_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x00000c6e, - 0x00000b11, + 0x00000c67, + 0x00000b0a, 0x00000000, 0x00000000, 0x00000000, @@ -113,8 +113,8 @@ static uint32_t gt215_pmu_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x00000c97, - 0x00000c70, + 0x00000c90, + 0x00000c69, 0x00000000, 0x00000000, 0x00000000, @@ -135,8 +135,8 @@ static uint32_t gt215_pmu_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x00000ca3, - 0x00000ca1, + 0x00000c9c, + 0x00000c9a, 0x00000000, 0x00000000, 0x00000000, @@ -234,22 +234,22 @@ static uint32_t gt215_pmu_data[] = { /* 0x037c: memx_func_next */ 0x00000002, 0x00000000, - 0x000005a0, + 0x0000059f, 0x00000003, 0x00000002, - 0x00000632, + 0x0000062f, 0x00040004, 0x00000000, - 0x0000064e, + 0x0000064b, 0x00010005, 0x00000000, - 0x0000066b, + 0x00000668, 0x00010006, 0x00000000, - 0x000005f0, + 0x000005ef, 0x00000007, 0x00000000, - 0x00000676, + 0x00000673, /* 0x03c4: memx_func_tail */ /* 0x03c4: memx_ts_start */ 0x00000000, @@ -1305,560 +1305,560 @@ static uint32_t gt215_pmu_code[] = { 0x67f102d7, 0x63f1fffc, 0x76fdffff, - 0x0267f104, - 0x0576fd00, - 0x70f980f9, - 0xe0fcd0fc, - 0xf04021f4, + 0x0267f004, + 0xf90576fd, + 0xfc70f980, + 0xf4e0fcd0, + 0x67f04021, + 0xe007f104, + 0x0604b607, + 0xbd0006d0, +/* 0x0581: memx_func_enter_wait */ + 0xc067f104, + 0x0664b607, + 0xf00066cf, + 0x0bf40464, + 0x2c67f0f3, + 0xcf0664b6, + 0x06800066, +/* 0x059f: memx_func_leave */ + 0xf000f8f1, + 0x64b62c67, + 0x0066cf06, + 0xf0f20680, 0x07f10467, - 0x04b607e0, + 0x04b607e4, 0x0006d006, -/* 0x0582: memx_func_enter_wait */ +/* 0x05ba: memx_func_leave_wait */ 0x67f104bd, 0x64b607c0, 0x0066cf06, 0xf40464f0, - 0x67f0f30b, - 0x0664b62c, - 0x800066cf, - 0x00f8f106, -/* 0x05a0: memx_func_leave */ - 0xb62c67f0, - 0x66cf0664, - 0xf2068000, - 0xf10467f0, - 0xb607e407, - 0x06d00604, -/* 0x05bb: memx_func_leave_wait */ - 0xf104bd00, - 0xb607c067, - 0x66cf0664, - 0x0464f000, - 0xf1f31bf4, - 0xb9161087, - 0x21f4028e, - 0x02d7b904, - 0xffcc67f1, - 0xffff63f1, - 0xf90476fd, - 0xfc70f980, - 0xf4e0fcd0, - 0x00f84021, -/* 0x05f0: memx_func_wait_vblank */ - 0xb0001698, - 0x0bf40066, - 0x0166b013, - 0xf4060bf4, -/* 0x0602: memx_func_wait_vblank_head1 */ - 0x77f12e0e, - 0x0ef40020, -/* 0x0609: memx_func_wait_vblank_head0 */ - 0x0877f107, -/* 0x060d: memx_func_wait_vblank_0 */ - 0xc467f100, - 0x0664b607, - 0xfd0066cf, - 0x1bf40467, -/* 0x061d: memx_func_wait_vblank_1 */ - 0xc467f1f3, - 0x0664b607, - 0xfd0066cf, - 0x0bf40467, -/* 0x062d: memx_func_wait_vblank_fini */ - 0x0410b6f3, -/* 0x0632: memx_func_wr32 */ - 0x169800f8, - 0x01159800, - 0xf90810b6, - 0xfc50f960, - 0xf4e0fcd0, - 0x42b64021, - 0xe91bf402, -/* 0x064e: memx_func_wait */ - 0x87f000f8, - 0x0684b62c, - 0x980088cf, - 0x1d98001e, - 0x021c9801, - 0xb6031b98, - 0x21f41010, -/* 0x066b: memx_func_delay */ - 0x9800f8a3, - 0x10b6001e, - 0x7e21f404, -/* 0x0676: memx_func_train */ - 0x57f100f8, - 0x77f10003, - 0x97f10000, - 0x93f00000, - 0x029eb970, - 0xb90421f4, - 0xe7f102d8, - 0x21f42710, -/* 0x0695: memx_func_train_loop_outer */ - 0x0158e07e, - 0x0083f101, - 0xe097f102, - 0x1193f011, - 0x80f990f9, + 0x87f1f31b, + 0x8eb91610, + 0x0421f402, + 0xf102d7b9, + 0xf1ffcc67, + 0xfdffff63, + 0x80f90476, + 0xd0fc70f9, + 0x21f4e0fc, +/* 0x05ef: memx_func_wait_vblank */ + 0x9800f840, + 0x66b00016, + 0x120bf400, + 0xf40166b0, + 0x0ef4060b, +/* 0x0601: memx_func_wait_vblank_head1 */ + 0x2077f02c, +/* 0x0607: memx_func_wait_vblank_head0 */ + 0xf0060ef4, +/* 0x060a: memx_func_wait_vblank_0 */ + 0x67f10877, + 0x64b607c4, + 0x0066cf06, + 0xf40467fd, +/* 0x061a: memx_func_wait_vblank_1 */ + 0x67f1f31b, + 0x64b607c4, + 0x0066cf06, + 0xf40467fd, +/* 0x062a: memx_func_wait_vblank_fini */ + 0x10b6f30b, +/* 0x062f: memx_func_wr32 */ + 0x9800f804, + 0x15980016, + 0x0810b601, + 0x50f960f9, 0xe0fcd0fc, - 0xf94021f4, - 0x0067f150, -/* 0x06b5: memx_func_train_loop_inner */ - 0x1187f100, - 0x9068ff11, - 0xfd109894, - 0x97f10589, - 0x93f00720, - 0xf990f910, - 0xfcd0fc80, - 0x4021f4e0, - 0x008097f1, - 0xb91093f0, - 0x21f4029e, - 0x02d8b904, - 0xf92088c5, + 0xb64021f4, + 0x1bf40242, +/* 0x064b: memx_func_wait */ + 0xf000f8e9, + 0x84b62c87, + 0x0088cf06, + 0x98001e98, + 0x1c98011d, + 0x031b9802, + 0xf41010b6, + 0x00f8a321, +/* 0x0668: memx_func_delay */ + 0xb6001e98, + 0x21f40410, +/* 0x0673: memx_func_train */ + 0xf000f87e, + 0x77f00357, + 0x0097f100, + 0x7093f000, + 0xf4029eb9, + 0xd8b90421, + 0x10e7f102, + 0x7e21f427, +/* 0x0690: memx_func_train_loop_outer */ + 0x010158e0, + 0x020083f1, + 0x11e097f1, + 0xf91193f0, + 0xfc80f990, + 0xf4e0fcd0, + 0x50f94021, +/* 0x06af: memx_func_train_loop_inner */ + 0xf10067f0, + 0xff111187, + 0x98949068, + 0x0589fd10, + 0x072097f1, + 0xf91093f0, 0xfc80f990, 0xf4e0fcd0, 0x97f14021, - 0x93f0053c, - 0x0287f110, - 0x0083f130, - 0xf990f980, + 0x93f00080, + 0x029eb910, + 0xb90421f4, + 0x88c502d8, + 0xf990f920, 0xfcd0fc80, 0x4021f4e0, - 0x0560e7f1, - 0xf110e3f0, - 0xf10000d7, - 0x908000d3, - 0xb7f100dc, - 0xb3f08480, - 0xa321f41e, - 0x000057f1, - 0xffff97f1, - 0x830093f1, -/* 0x0734: memx_func_train_loop_4x */ - 0x0080a7f1, - 0xb910a3f0, - 0x21f402ae, - 0x02d8b904, - 0xffdfb7f1, - 0xffffb3f1, - 0xf9048bfd, - 0xfc80f9a0, + 0x053c97f1, + 0xf11093f0, + 0xf1300287, + 0xf9800083, + 0xfc80f990, 0xf4e0fcd0, - 0xa7f14021, - 0xa3f0053c, - 0x0287f110, - 0x0083f130, - 0xf9a0f980, - 0xfcd0fc80, - 0x4021f4e0, - 0x0560e7f1, - 0xf110e3f0, - 0xf10000d7, - 0xb98000d3, - 0xb7f102dc, - 0xb3f02710, - 0xa321f400, - 0xf402eeb9, - 0xddb90421, - 0x949dff02, + 0xe7f14021, + 0xe3f00560, + 0x00d7f110, + 0x00d3f100, + 0x00dc9080, + 0x8480b7f1, + 0xf41eb3f0, + 0x57f0a321, + 0xff97f100, + 0x0093f1ff, +/* 0x072d: memx_func_train_loop_4x */ + 0x80a7f183, + 0x10a3f000, + 0xf402aeb9, + 0xd8b90421, + 0xdfb7f102, + 0xffb3f1ff, + 0x048bfdff, + 0x80f9a0f9, + 0xe0fcd0fc, + 0xf14021f4, + 0xf0053ca7, + 0x87f110a3, + 0x83f13002, + 0xa0f98000, + 0xd0fc80f9, + 0x21f4e0fc, + 0x60e7f140, + 0x10e3f005, + 0x0000d7f1, + 0x8000d3f1, + 0xf102dcb9, + 0xf02710b7, + 0x21f400b3, + 0x02eeb9a3, + 0xb90421f4, + 0x9dff02dd, + 0x0150b694, + 0xf4045670, + 0x7aa0921e, + 0xa9800bcc, + 0x0160b600, + 0x700470b6, + 0x1ef51066, + 0x50fcff01, 0x700150b6, - 0x1ef40456, - 0xcc7aa092, - 0x00a9800b, - 0xb60160b6, - 0x66700470, - 0x001ef510, - 0xb650fcff, - 0x56700150, - 0xd41ef507, -/* 0x07c7: memx_exec */ - 0xf900f8fe, - 0xb9d0f9e0, - 0xb2b902c1, -/* 0x07d1: memx_exec_next */ - 0x00139802, - 0xe70410b6, - 0xe701f034, - 0xb601e033, - 0x30f00132, - 0xde35980c, - 0x12b855f9, - 0xe41ef406, - 0x98f10b98, - 0xcbbbf20c, - 0xc4b7f102, - 0x06b4b607, - 0xfc00bbcf, - 0xf5e0fcd0, + 0x1ef50756, + 0x00f8fed6, +/* 0x07c0: memx_exec */ + 0xd0f9e0f9, + 0xb902c1b9, +/* 0x07ca: memx_exec_next */ + 0x139802b2, + 0x0410b600, + 0x01f034e7, + 0x01e033e7, + 0xf00132b6, + 0x35980c30, + 0xb855f9de, + 0x1ef40612, + 0xf10b98e4, + 0xbbf20c98, + 0xb7f102cb, + 0xb4b607c4, + 0x00bbcf06, + 0xe0fcd0fc, + 0x033621f5, +/* 0x0806: memx_info */ + 0xc67000f8, + 0x0e0bf401, +/* 0x080c: memx_info_data */ + 0x03ccc7f1, + 0x0800b7f1, +/* 0x0817: memx_info_train */ + 0xf10b0ef4, + 0xf10bccc7, +/* 0x081f: memx_info_send */ + 0xf50100b7, 0xf8033621, -/* 0x080d: memx_info */ - 0x01c67000, -/* 0x0813: memx_info_data */ - 0xf10e0bf4, - 0xf103ccc7, - 0xf40800b7, -/* 0x081e: memx_info_train */ - 0xc7f10b0e, - 0xb7f10bcc, -/* 0x0826: memx_info_send */ - 0x21f50100, - 0x00f80336, -/* 0x082c: memx_recv */ - 0xf401d6b0, - 0xd6b0980b, - 0xd80bf400, -/* 0x083a: memx_init */ - 0x00f800f8, -/* 0x083c: perf_recv */ -/* 0x083e: perf_init */ - 0x00f800f8, -/* 0x0840: i2c_drive_scl */ - 0xf40036b0, - 0x07f1110b, - 0x04b607e0, - 0x0001d006, - 0x00f804bd, -/* 0x0854: i2c_drive_scl_lo */ - 0x07e407f1, - 0xd00604b6, - 0x04bd0001, -/* 0x0862: i2c_drive_sda */ - 0x36b000f8, - 0x110bf400, - 0x07e007f1, - 0xd00604b6, - 0x04bd0002, -/* 0x0876: i2c_drive_sda_lo */ - 0x07f100f8, - 0x04b607e4, - 0x0002d006, - 0x00f804bd, -/* 0x0884: i2c_sense_scl */ - 0xf10132f4, - 0xb607c437, - 0x33cf0634, - 0x0431fd00, - 0xf4060bf4, -/* 0x089a: i2c_sense_scl_done */ - 0x00f80131, -/* 0x089c: i2c_sense_sda */ - 0xf10132f4, - 0xb607c437, - 0x33cf0634, - 0x0432fd00, - 0xf4060bf4, -/* 0x08b2: i2c_sense_sda_done */ - 0x00f80131, -/* 0x08b4: i2c_raise_scl */ - 0x47f140f9, - 0x37f00898, - 0x4021f501, -/* 0x08c1: i2c_raise_scl_wait */ +/* 0x0825: memx_recv */ + 0x01d6b000, + 0xb0980bf4, + 0x0bf400d6, +/* 0x0833: memx_init */ + 0xf800f8d8, +/* 0x0835: perf_recv */ +/* 0x0837: perf_init */ + 0xf800f800, +/* 0x0839: i2c_drive_scl */ + 0x0036b000, + 0xf1110bf4, + 0xb607e007, + 0x01d00604, + 0xf804bd00, +/* 0x084d: i2c_drive_scl_lo */ + 0xe407f100, + 0x0604b607, + 0xbd0001d0, +/* 0x085b: i2c_drive_sda */ + 0xb000f804, + 0x0bf40036, + 0xe007f111, + 0x0604b607, + 0xbd0002d0, +/* 0x086f: i2c_drive_sda_lo */ + 0xf100f804, + 0xb607e407, + 0x02d00604, + 0xf804bd00, +/* 0x087d: i2c_sense_scl */ + 0x0132f400, + 0x07c437f1, + 0xcf0634b6, + 0x31fd0033, + 0x060bf404, +/* 0x0893: i2c_sense_scl_done */ + 0xf80131f4, +/* 0x0895: i2c_sense_sda */ + 0x0132f400, + 0x07c437f1, + 0xcf0634b6, + 0x32fd0033, + 0x060bf404, +/* 0x08ab: i2c_sense_sda_done */ + 0xf80131f4, +/* 0x08ad: i2c_raise_scl */ + 0xf140f900, + 0xf0089847, + 0x21f50137, +/* 0x08ba: i2c_raise_scl_wait */ + 0xe7f10839, + 0x21f403e8, + 0x7d21f57e, + 0x0901f408, + 0xf40142b6, +/* 0x08ce: i2c_raise_scl_done */ + 0x40fcef1b, +/* 0x08d2: i2c_start */ + 0x21f500f8, + 0x11f4087d, + 0x9521f50d, + 0x0611f408, +/* 0x08e3: i2c_start_rep */ + 0xf0300ef4, + 0x21f50037, + 0x37f00839, + 0x5b21f501, + 0x0076bb08, + 0xf90465b6, + 0x04659450, + 0xbd0256bb, + 0x0475fd50, + 0x21f550fc, + 0x64b608ad, + 0x1f11f404, +/* 0x0910: i2c_start_send */ + 0xf50037f0, + 0xf1085b21, + 0xf41388e7, + 0x37f07e21, + 0x3921f500, + 0x88e7f108, + 0x7e21f413, +/* 0x092c: i2c_start_out */ +/* 0x092e: i2c_stop */ + 0x37f000f8, + 0x3921f500, + 0x0037f008, + 0x085b21f5, + 0x03e8e7f1, + 0xf07e21f4, + 0x21f50137, + 0xe7f10839, + 0x21f41388, + 0x0137f07e, + 0x085b21f5, + 0x1388e7f1, + 0xf87e21f4, +/* 0x0961: i2c_bitw */ + 0x5b21f500, 0xe8e7f108, 0x7e21f403, - 0x088421f5, - 0xb60901f4, - 0x1bf40142, -/* 0x08d5: i2c_raise_scl_done */ - 0xf840fcef, -/* 0x08d9: i2c_start */ - 0x8421f500, - 0x0d11f408, - 0x089c21f5, - 0xf40611f4, -/* 0x08ea: i2c_start_rep */ - 0x37f0300e, - 0x4021f500, - 0x0137f008, - 0x086221f5, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xb421f550, + 0xad21f550, 0x0464b608, -/* 0x0917: i2c_start_send */ - 0xf01f11f4, - 0x21f50037, - 0xe7f10862, - 0x21f41388, - 0x0037f07e, - 0x084021f5, - 0x1388e7f1, -/* 0x0933: i2c_start_out */ - 0xf87e21f4, -/* 0x0935: i2c_stop */ - 0x0037f000, - 0x084021f5, - 0xf50037f0, - 0xf1086221, - 0xf403e8e7, + 0xf11811f4, + 0xf41388e7, 0x37f07e21, - 0x4021f501, + 0x3921f500, 0x88e7f108, 0x7e21f413, - 0xf50137f0, - 0xf1086221, - 0xf41388e7, - 0x00f87e21, -/* 0x0968: i2c_bitw */ - 0x086221f5, - 0x03e8e7f1, - 0xbb7e21f4, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x08b421f5, - 0xf40464b6, - 0xe7f11811, +/* 0x09a0: i2c_bitw_out */ +/* 0x09a2: i2c_bitr */ + 0x37f000f8, + 0x5b21f501, + 0xe8e7f108, + 0x7e21f403, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0xad21f550, + 0x0464b608, + 0xf51b11f4, + 0xf0089521, + 0x21f50037, + 0xe7f10839, 0x21f41388, - 0x0037f07e, - 0x084021f5, - 0x1388e7f1, -/* 0x09a7: i2c_bitw_out */ - 0xf87e21f4, -/* 0x09a9: i2c_bitr */ - 0x0137f000, - 0x086221f5, - 0x03e8e7f1, - 0xbb7e21f4, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x08b421f5, - 0xf40464b6, - 0x21f51b11, - 0x37f0089c, - 0x4021f500, - 0x88e7f108, - 0x7e21f413, - 0xf4013cf0, -/* 0x09ee: i2c_bitr_done */ - 0x00f80131, -/* 0x09f0: i2c_get_byte */ - 0xf00057f0, -/* 0x09f6: i2c_get_byte_next */ - 0x54b60847, + 0x013cf07e, +/* 0x09e7: i2c_bitr_done */ + 0xf80131f4, +/* 0x09e9: i2c_get_byte */ + 0x0057f000, +/* 0x09ef: i2c_get_byte_next */ + 0xb60847f0, + 0x76bb0154, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0xf550fc04, + 0xb609a221, + 0x11f40464, + 0x0553fd2b, + 0xf40142b6, + 0x37f0d81b, 0x0076bb01, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b609a9, - 0x2b11f404, - 0xb60553fd, - 0x1bf40142, - 0x0137f0d8, - 0xb60076bb, - 0x50f90465, - 0xbb046594, - 0x50bd0256, - 0xfc0475fd, - 0x6821f550, - 0x0464b609, -/* 0x0a40: i2c_get_byte_done */ -/* 0x0a42: i2c_put_byte */ - 0x47f000f8, -/* 0x0a45: i2c_put_byte_next */ - 0x0142b608, - 0xbb3854ff, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x096821f5, - 0xf40464b6, - 0x46b03411, - 0xd81bf400, + 0x64b60961, +/* 0x0a39: i2c_get_byte_done */ +/* 0x0a3b: i2c_put_byte */ + 0xf000f804, +/* 0x0a3e: i2c_put_byte_next */ + 0x42b60847, + 0x3854ff01, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xa921f550, + 0x6121f550, 0x0464b609, - 0xbb0f11f4, - 0x36b00076, - 0x061bf401, -/* 0x0a9b: i2c_put_byte_done */ - 0xf80132f4, -/* 0x0a9d: i2c_addr */ - 0x0076bb00, + 0xb03411f4, + 0x1bf40046, + 0x0076bbd8, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b608d9, - 0x2911f404, - 0x012ec3e7, - 0xfd0134b6, - 0x76bb0553, + 0x64b609a2, + 0x0f11f404, + 0xb00076bb, + 0x1bf40136, + 0x0132f406, +/* 0x0a94: i2c_put_byte_done */ +/* 0x0a96: i2c_addr */ + 0x76bb00f8, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb60a4221, -/* 0x0ae2: i2c_addr_done */ - 0x00f80464, -/* 0x0ae4: i2c_acquire_addr */ - 0xb6f8cec7, - 0xe0b702e4, - 0xee980d1c, -/* 0x0af3: i2c_acquire */ - 0xf500f800, - 0xf40ae421, - 0xd9f00421, - 0x4021f403, -/* 0x0b02: i2c_release */ - 0x21f500f8, - 0x21f40ae4, - 0x03daf004, - 0xf84021f4, -/* 0x0b11: i2c_recv */ - 0x0132f400, - 0xb6f8c1c7, - 0x16b00214, - 0x3a1ff528, - 0xf413a001, - 0x0032980c, - 0x0ccc13a0, - 0xf4003198, - 0xd0f90231, - 0xd0f9e0f9, - 0x000067f1, - 0x100063f1, - 0xbb016792, + 0xb608d221, + 0x11f40464, + 0x2ec3e729, + 0x0134b601, + 0xbb0553fd, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x0af321f5, - 0xfc0464b6, - 0x00d6b0d0, - 0x00b31bf5, - 0xbb0057f0, + 0x0a3b21f5, +/* 0x0adb: i2c_addr_done */ + 0xf80464b6, +/* 0x0add: i2c_acquire_addr */ + 0xf8cec700, + 0xb702e4b6, + 0x980d1ce0, + 0x00f800ee, +/* 0x0aec: i2c_acquire */ + 0x0add21f5, + 0xf00421f4, + 0x21f403d9, +/* 0x0afb: i2c_release */ + 0xf500f840, + 0xf40add21, + 0xdaf00421, + 0x4021f403, +/* 0x0b0a: i2c_recv */ + 0x32f400f8, + 0xf8c1c701, + 0xb00214b6, + 0x1ff52816, + 0x13a0013a, + 0x32980cf4, + 0xcc13a000, + 0x0031980c, + 0xf90231f4, + 0xf9e0f9d0, + 0x0067f1d0, + 0x0063f100, + 0x01679210, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0xec21f550, + 0x0464b60a, + 0xd6b0d0fc, + 0xb31bf500, + 0x0057f000, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x9621f550, + 0x0464b60a, + 0x00d011f5, + 0xbbe0c5c7, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x0a9d21f5, + 0x0a3b21f5, 0xf50464b6, - 0xc700d011, - 0x76bbe0c5, + 0xf000ad11, + 0x76bb0157, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb60a4221, + 0xb60a9621, 0x11f50464, - 0x57f000ad, - 0x0076bb01, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0x21f550fc, - 0x64b60a9d, - 0x8a11f504, - 0x0076bb00, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0x21f550fc, - 0x64b609f0, - 0x6a11f404, - 0xbbe05bcb, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x093521f5, - 0xb90464b6, - 0x74bd025b, -/* 0x0c17: i2c_recv_not_rd08 */ - 0xb0430ef4, - 0x1bf401d6, - 0x0057f03d, - 0x0a9d21f5, - 0xc73311f4, - 0x21f5e0c5, - 0x11f40a42, - 0x0057f029, - 0x0a9d21f5, - 0xc71f11f4, - 0x21f5e0b5, - 0x11f40a42, - 0x3521f515, - 0xc774bd09, - 0x1bf408c5, - 0x0232f409, -/* 0x0c57: i2c_recv_not_wr08 */ -/* 0x0c57: i2c_recv_done */ - 0xc7030ef4, - 0x21f5f8ce, - 0xe0fc0b02, - 0x12f4d0fc, - 0x027cb90a, - 0x033621f5, -/* 0x0c6c: i2c_recv_exit */ -/* 0x0c6e: i2c_init */ + 0x76bb008a, + 0x0465b600, + 0x659450f9, + 0x0256bb04, + 0x75fd50bd, + 0xf550fc04, + 0xb609e921, + 0x11f40464, + 0xe05bcb6a, + 0xb60076bb, + 0x50f90465, + 0xbb046594, + 0x50bd0256, + 0xfc0475fd, + 0x2e21f550, + 0x0464b609, + 0xbd025bb9, + 0x430ef474, +/* 0x0c10: i2c_recv_not_rd08 */ + 0xf401d6b0, + 0x57f03d1b, + 0x9621f500, + 0x3311f40a, + 0xf5e0c5c7, + 0xf40a3b21, + 0x57f02911, + 0x9621f500, + 0x1f11f40a, + 0xf5e0b5c7, + 0xf40a3b21, + 0x21f51511, + 0x74bd092e, + 0xf408c5c7, + 0x32f4091b, + 0x030ef402, +/* 0x0c50: i2c_recv_not_wr08 */ +/* 0x0c50: i2c_recv_done */ + 0xf5f8cec7, + 0xfc0afb21, + 0xf4d0fce0, + 0x7cb90a12, + 0x3621f502, +/* 0x0c65: i2c_recv_exit */ +/* 0x0c67: i2c_init */ + 0xf800f803, +/* 0x0c69: test_recv */ + 0xd817f100, + 0x0614b605, + 0xb60011cf, + 0x07f10110, + 0x04b605d8, + 0x0001d006, + 0xe7f104bd, + 0xe3f1d900, + 0x21f5134f, + 0x00f80256, +/* 0x0c90: test_init */ + 0x0800e7f1, + 0x025621f5, +/* 0x0c9a: idle_recv */ 0x00f800f8, -/* 0x0c70: test_recv */ - 0x05d817f1, - 0xcf0614b6, - 0x10b60011, - 0xd807f101, - 0x0604b605, - 0xbd0001d0, - 0x00e7f104, - 0x4fe3f1d9, - 0x5621f513, -/* 0x0c97: test_init */ - 0xf100f802, - 0xf50800e7, - 0xf8025621, -/* 0x0ca1: idle_recv */ -/* 0x0ca3: idle */ - 0xf400f800, - 0x17f10031, - 0x14b605d4, - 0x0011cf06, - 0xf10110b6, - 0xb605d407, - 0x01d00604, -/* 0x0cbf: idle_loop */ - 0xf004bd00, - 0x32f45817, -/* 0x0cc5: idle_proc */ -/* 0x0cc5: idle_proc_exec */ - 0xb910f902, - 0x21f5021e, - 0x10fc033f, - 0xf40911f4, - 0x0ef40231, -/* 0x0cd9: idle_proc_next */ - 0x5810b6ef, - 0xf4061fb8, - 0x02f4e61b, - 0x0028f4dd, - 0x00bb0ef4, +/* 0x0c9c: idle */ + 0xf10031f4, + 0xb605d417, + 0x11cf0614, + 0x0110b600, + 0x05d407f1, + 0xd00604b6, + 0x04bd0001, +/* 0x0cb8: idle_loop */ + 0xf45817f0, +/* 0x0cbe: idle_proc */ +/* 0x0cbe: idle_proc_exec */ + 0x10f90232, + 0xf5021eb9, + 0xfc033f21, + 0x0911f410, + 0xf40231f4, +/* 0x0cd2: idle_proc_next */ + 0x10b6ef0e, + 0x061fb858, + 0xf4e61bf4, + 0x28f4dd02, + 0xbb0ef400, + 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc index ec03f9a4290b..1663bf943d77 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc @@ -82,15 +82,15 @@ memx_train_tail: // $r0 - zero memx_func_enter: #if NVKM_PPWR_CHIPSET == GT215 - movw $r8 0x1610 + mov $r8 0x1610 nv_rd32($r7, $r8) imm32($r6, 0xfffffffc) and $r7 $r6 - movw $r6 0x2 + mov $r6 0x2 or $r7 $r6 nv_wr32($r8, $r7) #else - movw $r6 0x001620 + mov $r6 0x001620 imm32($r7, ~0x00000aa2); nv_rd32($r8, $r6) and $r8 $r7 @@ -101,7 +101,7 @@ memx_func_enter: and $r8 $r7 nv_wr32($r6, $r8) - movw $r6 0x0026f0 + mov $r6 0x0026f0 nv_rd32($r8, $r6) and $r8 $r7 nv_wr32($r6, $r8) @@ -136,19 +136,19 @@ memx_func_leave: bra nz #memx_func_leave_wait #if NVKM_PPWR_CHIPSET == GT215 - movw $r8 0x1610 + mov $r8 0x1610 nv_rd32($r7, $r8) imm32($r6, 0xffffffcc) and $r7 $r6 nv_wr32($r8, $r7) #else - movw $r6 0x0026f0 + mov $r6 0x0026f0 imm32($r7, 0x00000001) nv_rd32($r8, $r6) or $r8 $r7 nv_wr32($r6, $r8) - movw $r6 0x001620 + mov $r6 0x001620 nv_rd32($r8, $r6) or $r8 $r7 nv_wr32($r6, $r8) @@ -177,11 +177,11 @@ memx_func_wait_vblank: bra #memx_func_wait_vblank_fini memx_func_wait_vblank_head1: - movw $r7 0x20 + mov $r7 0x20 bra #memx_func_wait_vblank_0 memx_func_wait_vblank_head0: - movw $r7 0x8 + mov $r7 0x8 memx_func_wait_vblank_0: nv_iord($r6, NV_PPWR_INPUT) @@ -273,13 +273,13 @@ memx_func_train: // $r5 - outer loop counter // $r6 - inner loop counter // $r7 - entry counter (#memx_train_head + $r7) - movw $r5 0x3 - movw $r7 0x0 + mov $r5 0x3 + mov $r7 0x0 // Read random memory to wake up... things imm32($r9, 0x700000) nv_rd32($r8,$r9) - movw $r14 0x2710 + mov $r14 0x2710 call(nsec) memx_func_train_loop_outer: @@ -289,9 +289,9 @@ memx_func_train: nv_wr32($r9, $r8) push $r5 - movw $r6 0x0 + mov $r6 0x0 memx_func_train_loop_inner: - movw $r8 0x1111 + mov $r8 0x1111 mulu $r9 $r6 $r8 shl b32 $r8 $r9 0x10 or $r8 $r9 @@ -315,7 +315,7 @@ memx_func_train: // $r5 - inner inner loop counter // $r9 - result - movw $r5 0 + mov $r5 0 imm32($r9, 0x8300ffff) memx_func_train_loop_4x: imm32($r10, 0x100080) diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index c226da145fb3..a349cb61961e 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV config DRM_OMAP_PANEL_DPI tristate "Generic DPI panel" + depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DPI panels. diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 0a38a0e8c925..a0dfa14f4fab 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -452,6 +452,8 @@ static int td028ttec1_panel_remove(struct spi_device *spi) } static const struct of_device_id td028ttec1_of_match[] = { + { .compatible = "omapdss,tpo,td028ttec1", }, + /* keep to not break older DTB */ { .compatible = "omapdss,toppoly,td028ttec1", }, {}, }; @@ -471,6 +473,7 @@ static struct spi_driver td028ttec1_spi_driver = { module_spi_driver(td028ttec1_spi_driver); +MODULE_ALIAS("spi:tpo,td028ttec1"); MODULE_ALIAS("spi:toppoly,td028ttec1"); MODULE_AUTHOR("H. Nikolaus Schaller "); MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver"); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index daf286fc8a40..ca1e3b489540 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll) } static const struct soc_device_attribute dpi_soc_devices[] = { - { .family = "OMAP3[456]*" }, - { .family = "[AD]M37*" }, + { .machine = "OMAP3[456]*" }, + { .machine = "[AD]M37*" }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index d1755f12236b..41ebb37aaa79 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1299,88 +1299,18 @@ static const struct soc_device_attribute dss_soc_devices[] = { static int dss_bind(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct resource *dss_mem; - u32 rev; int r; - dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); - dss.base = devm_ioremap_resource(&pdev->dev, dss_mem); - if (IS_ERR(dss.base)) - return PTR_ERR(dss.base); - - r = dss_get_clocks(); + r = component_bind_all(dev, NULL); if (r) return r; - r = dss_setup_default_clock(); - if (r) - goto err_setup_clocks; - - r = dss_video_pll_probe(pdev); - if (r) - goto err_pll_init; - - r = dss_init_ports(pdev); - if (r) - goto err_init_ports; - - pm_runtime_enable(&pdev->dev); - - r = dss_runtime_get(); - if (r) - goto err_runtime_get; - - dss.dss_clk_rate = clk_get_rate(dss.dss_clk); - - /* Select DPLL */ - REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); - - dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); - -#ifdef CONFIG_OMAP2_DSS_VENC - REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ - REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ - REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ -#endif - dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; - dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; - dss.dispc_clk_source = DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; - - rev = dss_read_reg(DSS_REVISION); - pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); - - dss_runtime_put(); - - r = component_bind_all(&pdev->dev, NULL); - if (r) - goto err_component; - - dss_debugfs_create_file("dss", dss_dump_regs); - pm_set_vt_switch(0); omapdss_gather_components(dev); omapdss_set_is_initialized(true); return 0; - -err_component: -err_runtime_get: - pm_runtime_disable(&pdev->dev); - dss_uninit_ports(pdev); -err_init_ports: - if (dss.video1_pll) - dss_video_pll_uninit(dss.video1_pll); - - if (dss.video2_pll) - dss_video_pll_uninit(dss.video2_pll); -err_pll_init: -err_setup_clocks: - dss_put_clocks(); - return r; } static void dss_unbind(struct device *dev) @@ -1390,18 +1320,6 @@ static void dss_unbind(struct device *dev) omapdss_set_is_initialized(false); component_unbind_all(&pdev->dev, NULL); - - if (dss.video1_pll) - dss_video_pll_uninit(dss.video1_pll); - - if (dss.video2_pll) - dss_video_pll_uninit(dss.video2_pll); - - dss_uninit_ports(pdev); - - pm_runtime_disable(&pdev->dev); - - dss_put_clocks(); } static const struct component_master_ops dss_component_ops = { @@ -1433,10 +1351,46 @@ static int dss_add_child_component(struct device *dev, void *data) return 0; } +static int dss_probe_hardware(void) +{ + u32 rev; + int r; + + r = dss_runtime_get(); + if (r) + return r; + + dss.dss_clk_rate = clk_get_rate(dss.dss_clk); + + /* Select DPLL */ + REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); + + dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); + +#ifdef CONFIG_OMAP2_DSS_VENC + REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ + REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ + REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ +#endif + dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; + dss.dispc_clk_source = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; + + rev = dss_read_reg(DSS_REVISION); + pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dss_runtime_put(); + + return 0; +} + static int dss_probe(struct platform_device *pdev) { const struct soc_device_attribute *soc; struct component_match *match = NULL; + struct resource *dss_mem; int r; dss.pdev = pdev; @@ -1451,20 +1405,69 @@ static int dss_probe(struct platform_device *pdev) else dss.feat = of_match_device(dss_of_match, &pdev->dev)->data; - r = dss_initialize_debugfs(); + /* Map I/O registers, get and setup clocks. */ + dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dss.base = devm_ioremap_resource(&pdev->dev, dss_mem); + if (IS_ERR(dss.base)) + return PTR_ERR(dss.base); + + r = dss_get_clocks(); if (r) return r; - /* add all the child devices as components */ + r = dss_setup_default_clock(); + if (r) + goto err_put_clocks; + + /* Setup the video PLLs and the DPI and SDI ports. */ + r = dss_video_pll_probe(pdev); + if (r) + goto err_put_clocks; + + r = dss_init_ports(pdev); + if (r) + goto err_uninit_plls; + + /* Enable runtime PM and probe the hardware. */ + pm_runtime_enable(&pdev->dev); + + r = dss_probe_hardware(); + if (r) + goto err_pm_runtime_disable; + + /* Initialize debugfs. */ + r = dss_initialize_debugfs(); + if (r) + goto err_pm_runtime_disable; + + dss_debugfs_create_file("dss", dss_dump_regs); + + /* Add all the child devices as components. */ device_for_each_child(&pdev->dev, &match, dss_add_child_component); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); - if (r) { - dss_uninitialize_debugfs(); - return r; - } + if (r) + goto err_uninit_debugfs; return 0; + +err_uninit_debugfs: + dss_uninitialize_debugfs(); + +err_pm_runtime_disable: + pm_runtime_disable(&pdev->dev); + dss_uninit_ports(pdev); + +err_uninit_plls: + if (dss.video1_pll) + dss_video_pll_uninit(dss.video1_pll); + if (dss.video2_pll) + dss_video_pll_uninit(dss.video2_pll); + +err_put_clocks: + dss_put_clocks(); + + return r; } static int dss_remove(struct platform_device *pdev) @@ -1473,6 +1476,18 @@ static int dss_remove(struct platform_device *pdev) dss_uninitialize_debugfs(); + pm_runtime_disable(&pdev->dev); + + dss_uninit_ports(pdev); + + if (dss.video1_pll) + dss_video_pll_uninit(dss.video1_pll); + + if (dss.video2_pll) + dss_video_pll_uninit(dss.video2_pll); + + dss_put_clocks(); + return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index f169348da377..ef3731d2f2e7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -634,7 +634,7 @@ static int hdmi_audio_config(struct device *dev, struct omap_dss_audio *dss_audio) { struct omap_hdmi *hd = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&hd->lock); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 365cf07daa01..1359bf50598f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -889,25 +889,36 @@ struct hdmi4_features { bool audio_use_mclk; }; -static const struct hdmi4_features hdmi4_es1_features = { +static const struct hdmi4_features hdmi4430_es1_features = { .cts_swmode = false, .audio_use_mclk = false, }; -static const struct hdmi4_features hdmi4_es2_features = { +static const struct hdmi4_features hdmi4430_es2_features = { .cts_swmode = true, .audio_use_mclk = false, }; -static const struct hdmi4_features hdmi4_es3_features = { +static const struct hdmi4_features hdmi4_features = { .cts_swmode = true, .audio_use_mclk = true, }; static const struct soc_device_attribute hdmi4_soc_devices[] = { - { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features }, - { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features }, - { .family = "OMAP4", .data = &hdmi4_es3_features }, + { + .machine = "OMAP4430", + .revision = "ES1.?", + .data = &hdmi4430_es1_features, + }, + { + .machine = "OMAP4430", + .revision = "ES2.?", + .data = &hdmi4430_es2_features, + }, + { + .family = "OMAP4", + .data = &hdmi4_features, + }, { /* sentinel */ } }; @@ -915,8 +926,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { const struct hdmi4_features *features; struct resource *res; + const struct soc_device_attribute *soc; - features = soc_device_match(hdmi4_soc_devices)->data; + soc = soc_device_match(hdmi4_soc_devices); + if (!soc) + return -ENODEV; + + features = soc->data; core->cts_swmode = features->cts_swmode; core->audio_use_mclk = features->audio_use_mclk; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index b3221ca5bcd8..26db0ce7a085 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -660,7 +660,7 @@ static int hdmi_audio_config(struct device *dev, struct omap_dss_audio *dss_audio) { struct omap_hdmi *hd = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&hd->lock); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index aa5ba9ae2191..556335ecb2b7 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -123,6 +123,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) if (dssdrv->read_edid) { void *edid = kzalloc(MAX_EDID, GFP_KERNEL); + if (!edid) + return 0; + if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && drm_edid_is_valid(edid)) { drm_mode_connector_update_edid_property( @@ -141,6 +144,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = drm_mode_create(dev); struct videomode vm = {0}; + if (!mode) + return 0; + dssdrv->get_timings(dssdev, &vm); drm_display_mode_from_videomode(&vm, mode); @@ -196,6 +202,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector, if (!r) { /* check if vrefresh is still valid */ new_mode = drm_mode_duplicate(dev, mode); + + if (!new_mode) + return MODE_BAD; + new_mode->clock = vm.pixelclock / 1000; new_mode->vrefresh = 0; if (mode->vrefresh == drm_mode_vrefresh(new_mode)) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 1dd3dafc59af..df05fe53c399 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -298,7 +298,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) msecs_to_jiffies(100))) { dev_err(dmm->dev, "timed out waiting for done\n"); ret = -ETIMEDOUT; + goto cleanup; } + + /* Check the engine status before continue */ + ret = wait_status(engine, DMM_PATSTATUS_READY | + DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE); } cleanup: @@ -384,12 +389,16 @@ int tiler_unpin(struct tiler_block *block) struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h, uint16_t align) { - struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); + struct tiler_block *block; u32 min_align = 128; int ret; unsigned long flags; u32 slot_bytes; + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return ERR_PTR(-ENOMEM); + BUG_ON(!validfmt(fmt)); /* convert width/height to slots */ @@ -638,7 +647,8 @@ static int omap_dmm_probe(struct platform_device *dev) match = of_match_node(dmm_of_match, dev->dev.of_node); if (!match) { dev_err(&dev->dev, "failed to find matching device node\n"); - return -ENODEV; + ret = -ENODEV; + goto fail; } omap_dmm->plat_data = match->data; diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index c10fdfc0930f..1cd39507b634 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -92,7 +92,7 @@ static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset, { int i; unsigned long index; - bool area_free; + bool area_free = false; unsigned long slots_per_band = PAGE_SIZE / slot_bytes; unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0; unsigned long curr_bit = bit_offset; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 474fa759e06e..fc56d033febe 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev) drm_panel_remove(&panel->base); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); if (panel->ddc) put_device(&panel->ddc->dev); @@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev) struct panel_simple *panel = dev_get_drvdata(dev); panel_simple_disable(&panel->base); + panel_simple_unprepare(&panel->base); } static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = { @@ -1559,7 +1561,7 @@ static const struct panel_desc ontat_yx700wv03 = { .width = 154, .height = 83, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 74fc9362ecf9..3eb920851141 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -388,7 +388,11 @@ void qxl_io_create_primary(struct qxl_device *qdev, create->width = bo->surf.width; create->height = bo->surf.height; create->stride = bo->surf.stride; - create->mem = qxl_bo_physical_address(qdev, bo, offset); + if (bo->shadow) { + create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset); + } else { + create->mem = qxl_bo_physical_address(qdev, bo, offset); + } QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, bo->kptr); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index afbf50d0c08f..573bab222123 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -289,6 +289,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) { struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); + qxl_bo_unref(&qxl_crtc->cursor_bo); drm_crtc_cleanup(crtc); kfree(qxl_crtc); } @@ -305,7 +306,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = { void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); + struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj); + WARN_ON(bo->shadow); drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); @@ -493,6 +496,53 @@ static int qxl_primary_atomic_check(struct drm_plane *plane, return 0; } +static int qxl_primary_apply_cursor(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct qxl_device *qdev = dev->dev_private; + struct drm_framebuffer *fb = plane->state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); + struct qxl_cursor_cmd *cmd; + struct qxl_release *release; + int ret = 0; + + if (!qcrtc->cursor_bo) + return 0; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return ret; + + ret = qxl_release_list_add(release, qcrtc->cursor_bo); + if (ret) + goto out_free_release; + + ret = qxl_release_reserve_list(release, false); + if (ret) + goto out_free_release; + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_SET; + cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x; + cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y; + + cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); + + cmd->u.set.visible = 1; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + qxl_release_fence_buffer_objects(release); + + return ret; + +out_free_release: + qxl_release_free(qdev, release); + return ret; +} + static void qxl_primary_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -508,6 +558,8 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, .x2 = qfb->base.width, .y2 = qfb->base.height }; + int ret; + bool same_shadow = false; if (old_state->fb) { qfb_old = to_qxl_framebuffer(old_state->fb); @@ -519,15 +571,28 @@ static void qxl_primary_atomic_update(struct drm_plane *plane, if (bo == bo_old) return; + if (bo_old && bo_old->shadow && bo->shadow && + bo_old->shadow == bo->shadow) { + same_shadow = true; + } + if (bo_old && bo_old->is_primary) { - qxl_io_destroy_primary(qdev); + if (!same_shadow) + qxl_io_destroy_primary(qdev); bo_old->is_primary = false; + + ret = qxl_primary_apply_cursor(plane); + if (ret) + DRM_ERROR( + "could not set cursor after creating primary"); } if (!bo->is_primary) { - qxl_io_create_primary(qdev, 0, bo); + if (!same_shadow) + qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; } + qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); } @@ -560,11 +625,12 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct qxl_device *qdev = dev->dev_private; struct drm_framebuffer *fb = plane->state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc); struct qxl_release *release; struct qxl_cursor_cmd *cmd; struct qxl_cursor *cursor; struct drm_gem_object *obj; - struct qxl_bo *cursor_bo, *user_bo = NULL; + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; int ret; void *user_ptr; int size = 64*64*4; @@ -617,6 +683,10 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); cmd->type = QXL_CURSOR_SET; + + old_cursor_bo = qcrtc->cursor_bo; + qcrtc->cursor_bo = cursor_bo; + cursor_bo = NULL; } else { ret = qxl_release_reserve_list(release, true); @@ -634,6 +704,11 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + if (old_cursor_bo) + qxl_bo_unref(&old_cursor_bo); + + qxl_bo_unref(&cursor_bo); + return; out_backoff: @@ -679,8 +754,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, static int qxl_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { + struct qxl_device *qdev = plane->dev->dev_private; struct drm_gem_object *obj; - struct qxl_bo *user_bo; + struct qxl_bo *user_bo, *old_bo = NULL; int ret; if (!new_state->fb) @@ -689,6 +765,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(new_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + user_bo->is_dumb && !user_bo->shadow) { + if (plane->state->fb) { + obj = to_qxl_framebuffer(plane->state->fb)->obj; + old_bo = gem_to_qxl_bo(obj); + } + if (old_bo && old_bo->shadow && + user_bo->gem_base.size == old_bo->gem_base.size && + plane->state->crtc == new_state->crtc && + plane->state->crtc_w == new_state->crtc_w && + plane->state->crtc_h == new_state->crtc_h && + plane->state->src_x == new_state->src_x && + plane->state->src_y == new_state->src_y && + plane->state->src_w == new_state->src_w && + plane->state->src_h == new_state->src_h && + plane->state->rotation == new_state->rotation && + plane->state->zpos == new_state->zpos) { + drm_gem_object_get(&old_bo->shadow->gem_base); + user_bo->shadow = old_bo->shadow; + } else { + qxl_bo_create(qdev, user_bo->gem_base.size, + true, true, QXL_GEM_DOMAIN_VRAM, NULL, + &user_bo->shadow); + } + } + ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); if (ret) return ret; @@ -713,6 +815,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane, obj = to_qxl_framebuffer(old_state->fb)->obj; user_bo = gem_to_qxl_bo(obj); qxl_bo_unpin(user_bo); + + if (user_bo->shadow && !user_bo->is_primary) { + drm_gem_object_put_unlocked(&user_bo->shadow->gem_base); + user_bo->shadow = NULL; + } } static const uint32_t qxl_cursor_plane_formats[] = { diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3397a1907336..c0a927efa653 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -113,6 +113,8 @@ struct qxl_bo { /* Constant after initialization */ struct drm_gem_object gem_base; bool is_primary; /* is this now a primary surface */ + bool is_dumb; + struct qxl_bo *shadow; bool hw_surf_alloc; struct qxl_surface surf; uint32_t surface_id; @@ -133,6 +135,8 @@ struct qxl_bo_list { struct qxl_crtc { struct drm_crtc base; int index; + + struct qxl_bo *cursor_bo; }; struct qxl_output { diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index 5e65d5d2d937..11085ab01374 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, &handle); if (r) return r; + qobj->is_dumb = true; args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 432cb46f6a34..fd7682bf335d 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -45,34 +45,32 @@ static char *pre_emph_names[] = { /***** radeon AUX functions *****/ -/* Atom needs data in little endian format - * so swap as appropriate when copying data to - * or from atom. Note that atom operates on - * dw units. +/* Atom needs data in little endian format so swap as appropriate when copying + * data to or from atom. Note that atom operates on dw units. + * + * Use to_le=true when sending data to atom and provide at least + * ALIGN(num_bytes,4) bytes in the dst buffer. + * + * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4) + * byes in the src buffer. */ void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN - u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ - u32 *dst32, *src32; + u32 src_tmp[5], dst_tmp[5]; int i; + u8 align_num_bytes = ALIGN(num_bytes, 4); - memcpy(src_tmp, src, num_bytes); - src32 = (u32 *)src_tmp; - dst32 = (u32 *)dst_tmp; if (to_le) { - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = cpu_to_le32(src32[i]); - memcpy(dst, dst_tmp, num_bytes); + memcpy(src_tmp, src, num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = cpu_to_le32(src_tmp[i]); + memcpy(dst, dst_tmp, align_num_bytes); } else { - u8 dws = num_bytes & ~3; - for (i = 0; i < ((num_bytes + 3) / 4); i++) - dst32[i] = le32_to_cpu(src32[i]); - memcpy(dst, dst_tmp, dws); - if (num_bytes % 4) { - for (i = 0; i < (num_bytes % 4); i++) - dst[dws+i] = dst_tmp[dws+i]; - } + memcpy(src_tmp, src, align_num_bytes); + for (i = 0; i < align_num_bytes / 4; i++) + dst_tmp[i] = le32_to_cpu(src_tmp[i]); + memcpy(dst, dst_tmp, num_bytes); } #else memcpy(dst, src, num_bytes); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 3cb6c55b268d..ce8b353b5753 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3229,35 +3229,8 @@ static void cik_gpu_init(struct radeon_device *rdev) case CHIP_KAVERI: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 4; - if ((rdev->pdev->device == 0x1304) || - (rdev->pdev->device == 0x1305) || - (rdev->pdev->device == 0x130C) || - (rdev->pdev->device == 0x130F) || - (rdev->pdev->device == 0x1310) || - (rdev->pdev->device == 0x1311) || - (rdev->pdev->device == 0x131C)) { - rdev->config.cik.max_cu_per_sh = 8; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1309) || - (rdev->pdev->device == 0x130A) || - (rdev->pdev->device == 0x130D) || - (rdev->pdev->device == 0x1313) || - (rdev->pdev->device == 0x131D)) { - rdev->config.cik.max_cu_per_sh = 6; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1306) || - (rdev->pdev->device == 0x1307) || - (rdev->pdev->device == 0x130B) || - (rdev->pdev->device == 0x130E) || - (rdev->pdev->device == 0x1315) || - (rdev->pdev->device == 0x1318) || - (rdev->pdev->device == 0x131B)) { - rdev->config.cik.max_cu_per_sh = 4; - rdev->config.cik.max_backends_per_se = 1; - } else { - rdev->config.cik.max_cu_per_sh = 3; - rdev->config.cik.max_backends_per_se = 1; - } + rdev->config.cik.max_cu_per_sh = 8; + rdev->config.cik.max_backends_per_se = 2; rdev->config.cik.max_sh_per_se = 1; rdev->config.cik.max_texture_channel_caches = 4; rdev->config.cik.max_gprs = 256; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2f642cbefd8e..337d3a1c2a40 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (radeon_dp_needs_link_train(radeon_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (!radeon_dp_getdpcd(radeon_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && + radeon_dp_needs_link_train(radeon_connector)) { + /* Don't start link training before we have the DPCD */ + if (!radeon_dp_getdpcd(radeon_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -860,7 +853,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) return ret; } -static int radeon_lvds_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = radeon_best_single_encoder(connector); @@ -900,9 +893,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -925,8 +920,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1014,7 +1013,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector) return ret; } -static int radeon_vga_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1040,9 +1039,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1109,8 +1110,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1154,7 +1157,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) return 1; } -static int radeon_tv_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) @@ -1174,9 +1177,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!radeon_connector->dac_load_detect) return ret; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1188,8 +1193,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1252,9 +1261,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (radeon_connector->detected_hpd_without_ddc) { force = true; @@ -1437,8 +1448,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) } exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1486,7 +1499,7 @@ static void radeon_dvi_force(struct drm_connector *connector) radeon_connector->use_digital = true; } -static int radeon_dvi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1689,9 +1702,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dig_connector->is_mst) return connector_status_disconnected; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1778,13 +1793,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force) } out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } -static int radeon_dp_mode_valid(struct drm_connector *connector, +static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ffc10cadcf34..58488eac8462 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -139,6 +139,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugs.freedesktop.org/show_bug.cgi?id=101491 */ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52 + */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX }, { 0, 0, 0, 0, 0 }, }; @@ -1397,6 +1401,10 @@ int radeon_device_init(struct radeon_device *rdev, if ((rdev->flags & RADEON_IS_PCI) && (rdev->family <= CHIP_RS740)) rdev->need_dma32 = true; +#ifdef CONFIG_PPC64 + if (rdev->family == CHIP_CEDAR) + rdev->need_dma32 = true; +#endif dma_bits = rdev->need_dma32 ? 32 : 40; r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fd25361ac681..4ef967d1a9de 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..ac467b80edc7 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) struct radeon_bo *robj = gem_to_radeon_bo(gobj); if (robj) { - if (robj->gem_base.import_attach) - drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); radeon_mn_unregister(robj); radeon_bo_unref(&robj); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 093594976126..b19a54dd18de 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); WARN_ON_ONCE(!list_empty(&bo->va)); + if (bo->gem_base.import_attach) + drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); drm_gem_object_release(&bo->gem_base); kfree(bo); } @@ -238,9 +240,10 @@ int radeon_bo_create(struct radeon_device *rdev, * may be slow * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 */ - +#ifndef CONFIG_COMPILE_TEST #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ thanks to write-combining +#endif if (bo->flags & RADEON_GEM_GTT_WC) DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 326ad068c15a..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev); static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); static void radeon_pm_update_profile(struct radeon_device *rdev); static void radeon_pm_set_clocks(struct radeon_device *rdev); -static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev); int radeon_pm_get_type_index(struct radeon_device *rdev, enum radeon_pm_state_type ps_type, @@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); } mutex_unlock(&rdev->pm.mutex); - /* allow new DPM state to be picked */ - radeon_pm_compute_clocks_dpm(rdev); } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { mutex_lock(&rdev->pm.mutex); @@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; /* balanced states don't exist at the moment */ if (dpm_state == POWER_STATE_TYPE_BALANCED) - dpm_state = rdev->pm.dpm.ac_power ? - POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; + dpm_state = POWER_STATE_TYPE_PERFORMANCE; restart_search: /* Pick the best power state based on current conditions */ diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index d34d1cf33895..95f4db70dd22 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, /* calc dclk divider with current vco freq */ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, pd_min, pd_even); - if (vclk_div > pd_max) + if (dclk_div > pd_max) break; /* vco is too big, it has to stop */ /* calc score with current vco freq */ diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index ee3e74266a13..90d5b41007bf 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2984,6 +2984,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6667)) { max_sclk = 75000; } + if ((rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6665)) { + max_sclk = 60000; + max_mclk = 80000; + } } else if (rdev->family == CHIP_OLAND) { if ((rdev->pdev->revision == 0xC7) || (rdev->pdev->revision == 0x80) || @@ -5907,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev, { u32 lane_width; u32 new_lane_width = - (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; + ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; u32 current_lane_width = - (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; + ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; if (new_lane_width != current_lane_width) { radeon_set_pcie_lanes(rdev, new_lane_width); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index 12d22f3db1af..6a4b8c98a719 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -59,11 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, rcar_lvds_write(lvds, LVDPLLCR, pllcr); - /* - * Select the input, hardcode mode 0, enable LVDS operation and turn - * bias circuitry on. - */ - lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN; + /* Select the input and set the LVDS mode. */ + lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; if (rcrtc->index == 2) lvdcr0 |= LVDCR0_DUSEL; rcar_lvds_write(lvds, LVDCR0, lvdcr0); @@ -74,6 +71,10 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) | LVDCR1_CLKSTBY_GEN2); + /* Enable LVDS operation and turn bias circuitry on. */ + lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + /* * Turn the PLL on, wait for the startup delay, and turn the output * on. @@ -95,7 +96,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, u32 lvdcr0; u32 pllcr; - /* PLL clock configuration */ + /* Set the PLL clock configuration and LVDS mode. */ if (freq < 42000) pllcr = LVDPLLCR_PLLDIVCNT_42M; else if (freq < 85000) @@ -107,6 +108,9 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, rcar_lvds_write(lvds, LVDPLLCR, pllcr); + lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + /* Turn all the channels on. */ rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) | @@ -117,7 +121,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, * Turn the PLL on, set it to LVDS normal mode, wait for the startup * delay and turn the output on. */ - lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON; + lvdcr0 |= LVDCR0_PLLON; rcar_lvds_write(lvds, LVDCR0, lvdcr0); lvdcr0 |= LVDCR0_PWD; diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index 9a20b9dc27c8..f7fc652b0027 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -1275,8 +1275,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, goto err_pllref; } - pm_runtime_enable(dev); - dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; dsi->dsi_host.dev = dev; ret = mipi_dsi_host_register(&dsi->dsi_host); @@ -1291,6 +1289,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master, } dev_set_drvdata(dev, dsi); + pm_runtime_enable(dev); return 0; err_mipi_dsi_host: diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 1869c8bb76c8..bde65186a3c3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -262,7 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ vma->vm_flags &= ~VM_PFNMAP; - vma->vm_pgoff = 0; if (rk_obj->pages) ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); @@ -297,6 +296,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) if (ret) return ret; + /* + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the + * whole buffer from the start. + */ + vma->vm_pgoff = 0; + obj = vma->vm_private_data; return rockchip_drm_gem_object_mmap(obj, vma); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index bf9ed0e63973..f1fa8d5c9b52 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1413,6 +1413,9 @@ static int vop_initial(struct vop *vop) usleep_range(10, 20); reset_control_deassert(ahb_rst); + VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1); + VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0); + memcpy(vop->regsbak, vop->regs, vop->len); VOP_REG_SET(vop, misc, global_regdone_en, 1); @@ -1568,17 +1571,9 @@ static int vop_bind(struct device *dev, struct device *master, void *data) mutex_init(&vop->vsync_mutex); - ret = devm_request_irq(dev, vop->irq, vop_isr, - IRQF_SHARED, dev_name(dev), vop); - if (ret) - return ret; - - /* IRQ is initially disabled; it gets enabled in power_on */ - disable_irq(vop->irq); - ret = vop_create_crtc(vop); if (ret) - goto err_enable_irq; + return ret; pm_runtime_enable(&pdev->dev); @@ -1588,13 +1583,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data) goto err_disable_pm_runtime; } + ret = devm_request_irq(dev, vop->irq, vop_isr, + IRQF_SHARED, dev_name(dev), vop); + if (ret) + goto err_disable_pm_runtime; + + /* IRQ is initially disabled; it gets enabled in power_on */ + disable_irq(vop->irq); + return 0; err_disable_pm_runtime: pm_runtime_disable(&pdev->dev); vop_destroy_crtc(vop); -err_enable_irq: - enable_irq(vop->irq); /* To balance out the disable_irq above */ return ret; } diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index d401156490f3..4460ca46a350 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c @@ -129,10 +129,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw) static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) { struct sun4i_dclk *dclk = hw_to_dclk(hw); + u32 val = degrees / 120; + + val <<= 28; regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, GENMASK(29, 28), - degrees / 120); + val); return 0; } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index d9791292553e..7b909d814d38 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -567,12 +567,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, if (IS_ERR(tcon->crtc)) { dev_err(dev, "Couldn't create our CRTC\n"); ret = PTR_ERR(tcon->crtc); - goto err_free_clocks; + goto err_free_dotclock; } ret = sun4i_rgb_init(drm, tcon); if (ret < 0) - goto err_free_clocks; + goto err_free_dotclock; list_add_tail(&tcon->list, &drv->tcon_list); diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 4785ac090b8c..c142fbb8661e 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -80,7 +80,7 @@ #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN BIT(0) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1) -#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(11, 8) +#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_DEF (1 << 1) #define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_ARGB8888 (0 << 8) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 597d563d636a..0598b4c18c25 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm) drm_kms_helper_poll_fini(drm); tegra_drm_fb_exit(drm); + drm_atomic_helper_shutdown(drm); drm_mode_config_cleanup(drm); err = host1x_device_exit(device); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 406fe4544b83..06d6e785c920 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "tilcdc_drv.h" #include "tilcdc_regs.h" @@ -48,6 +49,7 @@ struct tilcdc_crtc { unsigned int lcd_fck_rate; ktime_t last_vblank; + unsigned int hvtotal_us; struct drm_framebuffer *curr_fb; struct drm_framebuffer *next_fb; @@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc) LCDC_V2_CORE_CLK_EN); } +uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode) +{ + return (uint) div_u64(1000llu * mode->htotal * mode->vtotal, + mode->clock); +} + static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); @@ -459,6 +467,9 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) drm_framebuffer_reference(fb); crtc->hwmode = crtc->state->adjusted_mode; + + tilcdc_crtc->hvtotal_us = + tilcdc_mode_hvtotal(&crtc->hwmode); } static void tilcdc_crtc_enable(struct drm_crtc *crtc) @@ -648,7 +659,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, - 1000000 / crtc->hwmode.vrefresh); + tilcdc_crtc->hvtotal_us); tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h index 9d528c0a67a4..5048ebb86835 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h @@ -133,7 +133,7 @@ static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data) struct tilcdc_drm_private *priv = dev->dev_private; volatile void __iomem *addr = priv->mmio + reg; -#ifdef iowrite64 +#if defined(iowrite64) && !defined(iowrite64_is_nonatomic) iowrite64(data, addr); #else __iowmb(); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 180ce6296416..68eed684dff5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); dma_fence_put(bo->moving); - if (bo->resv == &bo->ttm_resv) - reservation_object_fini(&bo->ttm_resv); + reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); if (bo->destroy) bo->destroy(bo); @@ -176,7 +175,8 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) list_add_tail(&bo->lru, &man->lru[bo->priority]); kref_get(&bo->list_kref); - if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { + if (bo->ttm && !(bo->ttm->page_flags & + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, &bo->glob->swap_lru[bo->priority]); kref_get(&bo->list_kref); @@ -402,14 +402,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) if (bo->resv == &bo->ttm_resv) return 0; - reservation_object_init(&bo->ttm_resv); BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); - if (r) { + if (r) reservation_object_unlock(&bo->ttm_resv); - reservation_object_fini(&bo->ttm_resv); - } return r; } @@ -440,28 +437,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) struct ttm_bo_global *glob = bo->glob; int ret; + ret = ttm_bo_individualize_resv(bo); + if (ret) { + /* Last resort, if we fail to allocate memory for the + * fences block for the BO to become idle + */ + reservation_object_wait_timeout_rcu(bo->resv, true, false, + 30 * HZ); + spin_lock(&glob->lru_lock); + goto error; + } + spin_lock(&glob->lru_lock); ret = __ttm_bo_reserve(bo, false, true, NULL); - if (!ret) { - if (!ttm_bo_wait(bo, false, true)) { + if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) { ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - ttm_bo_cleanup_memtype_use(bo); + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); - return; - } - - ret = ttm_bo_individualize_resv(bo); - if (ret) { - /* Last resort, if we fail to allocate memory for the - * fences block for the BO to become idle and free it. - */ - spin_unlock(&glob->lru_lock); - ttm_bo_wait(bo, true, true); ttm_bo_cleanup_memtype_use(bo); return; } + ttm_bo_flush_all_fences(bo); /* @@ -474,11 +473,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); } - if (bo->resv != &bo->ttm_resv) - reservation_object_unlock(&bo->ttm_resv); __ttm_bo_unreserve(bo); } + if (bo->resv != &bo->ttm_resv) + reservation_object_unlock(&bo->ttm_resv); +error: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); @@ -1203,8 +1203,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, lockdep_assert_held(&bo->resv->lock.base); } else { bo->resv = &bo->ttm_resv; - reservation_object_init(&bo->ttm_resv); } + reservation_object_init(&bo->ttm_resv); atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); bo->priority = 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c934ad5b3903..7c2fbdbbd048 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->io_reserve_lru); + mutex_init(&fbo->wu_mutex); fbo->moving = NULL; drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index c8ebb757e36b..b17d0d38f290 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -299,7 +299,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, unsigned long offset, - void *buf, int len, int write) + uint8_t *buf, int len, int write) { unsigned long page = offset >> PAGE_SHIFT; unsigned long bytes_left = len; @@ -328,6 +328,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, ttm_bo_kunmap(&map); page++; + buf += bytes; bytes_left -= bytes; offset = 0; } while (bytes_left); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 871599826773..91f9263f3c3b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -821,6 +821,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) pr_info("Initializing pool allocator\n"); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); + if (!_manager) + return -ENOMEM; ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index b5b335c9b2bb..d5583190f3e4 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, if (cmd > (char *) urb->transfer_buffer) { /* Send partial buffer remaining before exiting */ - int len = cmd - (char *) urb->transfer_buffer; + int len; + if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) + *cmd++ = 0xAF; + len = cmd - (char *) urb->transfer_buffer; ret = udl_submit_urb(dev, urb, len); bytes_sent += len; } else @@ -159,10 +162,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long offset; unsigned long page, pos; - if (offset + size > info->fix.smem_len) + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (offset > info->fix.smem_len || size > info->fix.smem_len - offset) return -EINVAL; pos = (unsigned long)info->fix.smem_start + offset; diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 0c87b1ac6b68..b992644c17e6 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -153,11 +153,11 @@ static void udl_compress_hline16( raw_pixels_count_byte = cmd++; /* we'll know this later */ raw_pixel_start = pixel; - cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, - min((int)(pixel_end - pixel) / bpp, - (int)(cmd_buffer_end - cmd) / 2))) * bpp; + cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, + (unsigned long)(pixel_end - pixel) / bpp, + (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; - prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); + prefetch_range((void *) pixel, cmd_pixel_end - pixel); pixel_val16 = get_pixel_val16(pixel, bpp); while (pixel < cmd_pixel_end) { @@ -193,6 +193,9 @@ static void udl_compress_hline16( if (pixel > raw_pixel_start) { /* finalize last RAW span */ *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; + } else { + /* undo unused byte */ + cmd--; } *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 3afdbf4bc10b..eff0a8ece8bc 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -173,6 +173,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) vc4_bo_set_label(obj, -1); if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; @@ -432,6 +433,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo) } if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index d1e0dc908048..04796d7d0fdb 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -866,7 +866,8 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder, adjusted_mode->clock = pixel_clock_hz / 1000 + 1; /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */ - adjusted_mode->htotal = pixel_clock_hz / (mode->vrefresh * mode->vtotal); + adjusted_mode->htotal = adjusted_mode->clock * mode->htotal / + mode->clock; adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal; adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal; diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index d0c6bfb68c4e..d31b3d0c9955 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev) struct vc4_exec_info *exec[2]; struct vc4_bo *bo; unsigned long irqflags; - unsigned int i, j, unref_list_count, prev_idx; + unsigned int i, j, k, unref_list_count; kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); if (!kernel_state) @@ -182,24 +182,24 @@ vc4_save_hang_state(struct drm_device *dev) return; } - prev_idx = 0; + k = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue; for (j = 0; j < exec[i]->bo_count; j++) { drm_gem_object_get(&exec[i]->bo[j]->base); - kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; + kernel_state->bo[k++] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { drm_gem_object_get(&bo->base.base); - kernel_state->bo[j + prev_idx] = &bo->base.base; - j++; + kernel_state->bo[k++] = &bo->base.base; } - prev_idx = j + 1; } + WARN_ON_ONCE(k != state->bo_count); + if (exec[0]) state->start_bin = exec[0]->ct0ca; if (exec[1]) @@ -829,8 +829,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) /* If we got force-completed because of GPU reset rather than * through our IRQ handler, signal the fence now. */ - if (exec->fence) + if (exec->fence) { dma_fence_signal(exec->fence); + dma_fence_put(exec->fence); + } if (exec->bo) { for (i = 0; i < exec->bo_count; i++) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 937da8dd65b8..8f71157a2b06 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -433,7 +433,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) vc4_encoder->limited_rgb_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL, - vc4_encoder->rgb_range_selectable); + vc4_encoder->rgb_range_selectable, + false); vc4_hdmi_write_infoframe(encoder, &frame); } diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 7d7af3a93d94..3dd62d75f531 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev) list_move_tail(&exec->head, &vc4->job_done_list); if (exec->fence) { dma_fence_signal_locked(exec->fence); + dma_fence_put(exec->fence); exec->fence = NULL; } vc4_submit_next_render_job(dev); @@ -225,6 +226,9 @@ vc4_irq_uninstall(struct drm_device *dev) /* Clear any pending interrupts we might have left. */ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); + /* Finish any interrupt handler still in flight. */ + disable_irq(dev->irq); + cancel_work_sync(&vc4->overflow_mem_work); } diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 2968b3ebb895..17590cb2b80d 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -352,6 +352,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->x_scaling[0] = VC4_SCALING_TPZ; if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + } else { + vc4_state->x_scaling[1] = VC4_SCALING_NONE; + vc4_state->y_scaling[1] = VC4_SCALING_NONE; } vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && @@ -535,7 +538,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * the scl fields here. */ if (num_planes == 1) { - scl0 = vc4_get_scl_field(state, 1); + scl0 = vc4_get_scl_field(state, 0); scl1 = scl0; } else { scl0 = vc4_get_scl_field(state, 1); diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 622cd43840b8..493f392b3a0a 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev) return ret; vc4_v3d_init_hw(vc4->dev); + + /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ + enable_irq(vc4->dev->irq); vc4_irq_postinstall(vc4->dev); return 0; diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index d3f15bf60900..7cf82b071de2 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) fail: kfree(validation_state.branch_targets); if (validated_shader) { + kfree(validated_shader->uniform_addr_offsets); kfree(validated_shader->texture_samples); kfree(validated_shader); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index b94bd5440e57..ed9c443bb8a1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -196,6 +196,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_3D_FEATURES: value = vgdev->has_virgl_3d == true ? 1 : 0; break; + case VIRTGPU_PARAM_CAPSET_QUERY_FIX: + value = 1; + break; default: return -EINVAL; } @@ -471,7 +474,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, { struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_virtgpu_get_caps *args = data; - int size; + unsigned size, host_caps_size; int i; int found_valid = -1; int ret; @@ -480,6 +483,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, if (vgdev->num_capsets == 0) return -ENOSYS; + /* don't allow userspace to pass 0 */ + if (args->size == 0) + return -EINVAL; + spin_lock(&vgdev->display_info_lock); for (i = 0; i < vgdev->num_capsets; i++) { if (vgdev->capsets[i].id == args->cap_set_id) { @@ -495,11 +502,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, return -EINVAL; } - size = vgdev->capsets[found_valid].max_size; - if (args->size > size) { - spin_unlock(&vgdev->display_info_lock); - return -EINVAL; - } + host_caps_size = vgdev->capsets[found_valid].max_size; + /* only copy to user the minimum of the host caps size or the guest caps size */ + size = min(args->size, host_caps_size); list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { if (cache_ent->id == args->cap_set_id && diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 9eb96fb2c147..26a2da1f712d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -291,7 +291,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->ctrlq.qlock); - wait_event(vgdev->ctrlq.ack_queue, vq->num_free); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); spin_lock(&vgdev->ctrlq.qlock); goto retry; } else { @@ -366,7 +366,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); if (ret == -ENOSPC) { spin_unlock(&vgdev->cursorq.qlock); - wait_event(vgdev->cursorq.ack_queue, vq->num_free); + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); spin_lock(&vgdev->cursorq.qlock); goto retry; } else { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 184340d486c3..86d25f18aa99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) */ void vmw_svga_disable(struct vmw_private *dev_priv) { + /* + * Disabling SVGA will turn off device modesetting capabilities, so + * notify KMS about that so that it doesn't cache atomic state that + * isn't valid anymore, for example crtcs turned on. + * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), + * but vmw_kms_lost_device() takes the reservation sem and thus we'll + * end up with lock order reversal. Thus, a master may actually perform + * a new modeset just after we call vmw_kms_lost_device() and race with + * vmw_svga_disable(), but that should at worst cause atomic KMS state + * to be inconsistent with the device, causing modesetting problems. + * + */ + vmw_kms_lost_device(dev_priv->dev); ttm_write_lock(&dev_priv->reservation_sem, false); spin_lock(&dev_priv->svga_lock); if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 7e5f30e234b1..8c65cc3b0dda 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); +void vmw_kms_lost_device(struct drm_device *dev); int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 21c62a34e558..87e8af5776a3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, } view_type = vmw_view_cmd_to_type(header->id); + if (view_type == vmw_view_max) + return -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b850562fbdd6..11f1c30ead54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -31,7 +31,6 @@ #include #include - /* Might need a hrtimer here? */ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) @@ -697,7 +696,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; /* Mapping is managed by prepare_fb/cleanup_fb */ - memset(&vps->guest_map, 0, sizeof(vps->guest_map)); memset(&vps->host_map, 0, sizeof(vps->host_map)); vps->cpp = 0; @@ -760,11 +758,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, /* Should have been freed by cleanup_fb */ - if (vps->guest_map.virtual) { - DRM_ERROR("Guest mapping not freed\n"); - ttm_bo_kunmap(&vps->guest_map); - } - if (vps->host_map.virtual) { DRM_ERROR("Host mapping not freed\n"); ttm_bo_kunmap(&vps->host_map); @@ -2537,9 +2530,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, * Helper to be used if an error forces the caller to undo the actions of * vmw_kms_helper_resource_prepare. */ -void vmw_kms_helper_resource_revert(struct vmw_resource *res) +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) { - vmw_kms_helper_buffer_revert(res->backup); + struct vmw_resource *res = ctx->res; + + vmw_kms_helper_buffer_revert(ctx->buf); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2556,10 +2552,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) * interrupted by a signal. */ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible) + bool interruptible, + struct vmw_validation_ctx *ctx) { int ret = 0; + ctx->buf = NULL; + ctx->res = res; + if (interruptible) ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); else @@ -2578,6 +2578,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, res->dev_priv->has_mob); if (ret) goto out_unreserve; + + ctx->buf = vmw_dmabuf_reference(res->backup); } ret = vmw_resource_validate(res); if (ret) @@ -2585,7 +2587,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, return 0; out_revert: - vmw_kms_helper_buffer_revert(res->backup); + vmw_kms_helper_buffer_revert(ctx->buf); out_unreserve: vmw_resource_unreserve(res, false, NULL, 0); out_unlock: @@ -2601,13 +2603,16 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, * @out_fence: Optional pointer to a fence pointer. If non-NULL, a * ref-counted fence pointer is returned here. */ -void vmw_kms_helper_resource_finish(struct vmw_resource *res, - struct vmw_fence_obj **out_fence) +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, + struct vmw_fence_obj **out_fence) { - if (res->backup || out_fence) - vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, + struct vmw_resource *res = ctx->res; + + if (ctx->buf || out_fence) + vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -2871,3 +2876,14 @@ int vmw_kms_set_config(struct drm_mode_set *set, return drm_atomic_helper_set_config(set, ctx); } + + +/** + * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost + * + * @dev: Pointer to the drm device + */ +void vmw_kms_lost_device(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index ff9c8389ff21..3d2ca280eaa7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -175,7 +175,7 @@ struct vmw_plane_state { int pinned; /* For CPU Blit */ - struct ttm_bo_kmap_obj host_map, guest_map; + struct ttm_bo_kmap_obj host_map; unsigned int cpp; }; @@ -240,6 +240,11 @@ struct vmw_display_unit { int set_gui_y; }; +struct vmw_validation_ctx { + struct vmw_resource *res; + struct vmw_dma_buffer *buf; +}; + #define vmw_crtc_to_du(x) \ container_of(x, struct vmw_display_unit, crtc) #define vmw_connector_to_du(x) \ @@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_vmw_fence_rep __user * user_fence_rep); int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible); -void vmw_kms_helper_resource_revert(struct vmw_resource *res); -void vmw_kms_helper_resource_finish(struct vmw_resource *res, + bool interruptible, + struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, struct vmw_fence_obj **out_fence); int vmw_kms_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, @@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, int vmw_kms_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx); - #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b8a09807c5de..3824595fece1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_ldu_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h index 557a033fb610..8545488aa0cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h @@ -135,17 +135,24 @@ #else -/* In the 32-bit version of this macro, we use "m" because there is no - * more register left for bp +/* + * In the 32-bit version of this macro, we store bp in a memory location + * because we've ran out of registers. + * Now we can't reference that memory location while we've modified + * %esp or %ebp, so we first push it on the stack, just before we push + * %ebp, and then when we need it we read it from the stack where we + * just pushed it. */ #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ port_num, magic, bp, \ eax, ebx, ecx, edx, si, di) \ ({ \ - asm volatile ("push %%ebp;" \ - "mov %12, %%ebp;" \ + asm volatile ("push %12;" \ + "push %%ebp;" \ + "mov 0x04(%%esp), %%ebp;" \ "rep outsb;" \ - "pop %%ebp;" : \ + "pop %%ebp;" \ + "add $0x04, %%esp;" : \ "=a"(eax), \ "=b"(ebx), \ "=c"(ecx), \ @@ -167,10 +174,12 @@ port_num, magic, bp, \ eax, ebx, ecx, edx, si, di) \ ({ \ - asm volatile ("push %%ebp;" \ - "mov %12, %%ebp;" \ + asm volatile ("push %12;" \ + "push %%ebp;" \ + "mov 0x04(%%esp), %%ebp;" \ "rep insb;" \ - "pop %%ebp" : \ + "pop %%ebp;" \ + "add $0x04, %%esp;" : \ "=a"(eax), \ "=b"(ebx), \ "=c"(ecx), \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index d1552d3e0652..205a5f4b58f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_sou_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; @@ -453,7 +453,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); + struct drm_crtc *crtc = plane->state->crtc ? + plane->state->crtc : old_state->crtc; + if (vps->dmabuf) + vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; @@ -491,10 +495,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, } size = new_state->crtc_w * new_state->crtc_h * 4; + dev_priv = vmw_priv(crtc->dev); if (vps->dmabuf) { - if (vps->dmabuf_size == size) - return 0; + if (vps->dmabuf_size == size) { + /* + * Note that this might temporarily up the pin-count + * to 2, until cleanup_fb() is called. + */ + return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, + true); + } vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; @@ -504,7 +515,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, if (!vps->dmabuf) return -ENOMEM; - dev_priv = vmw_priv(crtc->dev); vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to @@ -515,13 +525,18 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); - - if (ret != 0) + if (ret) { vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ - else - vps->dmabuf_size = size; + return ret; + } - return ret; + vps->dmabuf_size = size; + + /* + * TTM already thinks the buffer is pinned, but make sure the + * pin_count is upped. + */ + return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); } @@ -909,12 +924,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_kms_sou_surface_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -933,7 +949,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, dest_x, dest_y, num_clips, inc, &sdirty.base); - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index ca3afae2db1f..6c576f8df4b2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit { bool defined; /* For CPU Blit */ - struct ttm_bo_kmap_obj host_map, guest_map; + struct ttm_bo_kmap_obj host_map; unsigned int cpp; }; @@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) s32 src_pitch, dst_pitch; u8 *src, *dst; bool not_used; - + struct ttm_bo_kmap_obj guest_map; + int ret; if (!dirty->num_hits) return; @@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) if (width == 0 || height == 0) return; + ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages, + &guest_map); + if (ret) { + DRM_ERROR("Failed mapping framebuffer for blit: %d\n", + ret); + goto out_cleanup; + } /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ src_pitch = stdu->display_srf->base_size.width * stdu->cpp; @@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; dst_pitch = ddirty->pitch; - dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used); + dst = ttm_kmap_obj_virtual(&guest_map, ¬_used); dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; @@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) vmw_fifo_commit(dev_priv, sizeof(*cmd)); } + ttm_bo_kunmap(&guest_map); out_cleanup: ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; ddirty->right = ddirty->bottom = S32_MIN; @@ -971,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_stdu_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -999,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, dest_x, dest_y, num_clips, inc, &sdirty.base); out_finish: - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } @@ -1109,9 +1119,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - if (vps->guest_map.virtual) - ttm_bo_kunmap(&vps->guest_map); - if (vps->host_map.virtual) ttm_bo_kunmap(&vps->host_map); @@ -1277,33 +1284,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, */ if (vps->content_fb_type == SEPARATE_DMA && !(dev_priv->capabilities & SVGA_CAP_3D)) { - - struct vmw_framebuffer_dmabuf *new_vfbd; - - new_vfbd = vmw_framebuffer_to_vfbd(new_fb); - - ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false, - NULL); - if (ret) - goto out_srf_unpin; - - ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0, - new_vfbd->buffer->base.num_pages, - &vps->guest_map); - - ttm_bo_unreserve(&new_vfbd->buffer->base); - - if (ret) { - DRM_ERROR("Failed to map content buffer to CPU\n"); - goto out_srf_unpin; - } - ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, vps->surf->res.backup->base.num_pages, &vps->host_map); if (ret) { DRM_ERROR("Failed to map display buffer to CPU\n"); - ttm_bo_kunmap(&vps->guest_map); goto out_srf_unpin; } @@ -1350,7 +1335,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, stdu->display_srf = vps->surf; stdu->content_fb_type = vps->content_fb_type; stdu->cpp = vps->cpp; - memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map)); memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); if (!stdu->defined) diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c860a7997cb5..1d1612e28854 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -125,11 +125,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index) if (pre_node == pre->dev->of_node) { mutex_unlock(&ipu_pre_list_mutex); device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); + of_node_put(pre_node); return pre; } } mutex_unlock(&ipu_pre_list_mutex); + of_node_put(pre_node); + return NULL; } diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index 0013ca9f72c8..1c36fa3a90e2 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c @@ -101,11 +101,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id) mutex_unlock(&ipu_prg_list_mutex); device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); prg->id = ipu_id; + of_node_put(prg_node); return prg; } } mutex_unlock(&ipu_prg_list_mutex); + of_node_put(prg_node); + return NULL; } @@ -249,10 +252,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan) { int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); struct ipu_prg *prg = ipu_chan->ipu->prg_priv; - struct ipu_prg_channel *chan = &prg->chan[prg_chan]; + struct ipu_prg_channel *chan; u32 val; - if (!chan->enabled || prg_chan < 0) + if (prg_chan < 0) + return; + + chan = &prg->chan[prg_chan]; + if (!chan->enabled) return; clk_prepare_enable(prg->clk_ipg); @@ -279,13 +286,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, { int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); struct ipu_prg *prg = ipu_chan->ipu->prg_priv; - struct ipu_prg_channel *chan = &prg->chan[prg_chan]; + struct ipu_prg_channel *chan; u32 val; int ret; if (prg_chan < 0) return prg_chan; + chan = &prg->chan[prg_chan]; + if (chan->enabled) { ipu_pre_update(prg->pres[chan->used_pre], *eba); return 0; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 374301fcbc86..eca4c9d97110 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -230,7 +230,7 @@ config HID_CMEDIA config HID_CP2112 tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support" - depends on USB_HID && I2C && GPIOLIB + depends on USB_HID && HIDRAW && I2C && GPIOLIB select GPIOLIB_IRQCHIP ---help--- Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge. @@ -436,10 +436,11 @@ config HID_LENOVO select NEW_LEDS select LEDS_CLASS ---help--- - Support for Lenovo devices that are not fully compliant with HID standard. + Support for IBM/Lenovo devices that are not fully compliant with HID standard. - Say Y if you want support for the non-compliant features of the Lenovo - Thinkpad standalone keyboards, e.g: + Say Y if you want support for horizontal scrolling of the IBM/Lenovo + Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad + standalone keyboards, e.g: - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint configuration) - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 330ca983828b..672b0be41d44 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1390,7 +1390,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) * of implement() working on 8 byte chunks */ - int len = hid_report_len(report) + 7; + u32 len = hid_report_len(report) + 7; return kmalloc(len, flags); } @@ -1455,7 +1455,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report, { char *buf; int ret; - int len; + u32 len; buf = hid_alloc_report_buf(report, GFP_KERNEL); if (!buf) @@ -1481,14 +1481,14 @@ void __hid_request(struct hid_device *hid, struct hid_report *report, } EXPORT_SYMBOL_GPL(__hid_request); -int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, +int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt) { struct hid_report_enum *report_enum = hid->report_enum + type; struct hid_report *report; struct hid_driver *hdrv; unsigned int a; - int rsize, csize = size; + u32 rsize, csize = size; u8 *cdata = data; int ret = 0; @@ -1546,7 +1546,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event); * * This is data entry for lower layers. */ -int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt) +int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt) { struct hid_report_enum *report_enum; struct hid_driver *hdrv; @@ -2638,7 +2638,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -2717,6 +2716,9 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, @@ -2908,6 +2910,17 @@ bool hid_ignore(struct hid_device *hdev) strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) return true; break; + case USB_VENDOR_ID_ELAN: + /* + * Many Elan devices have a product id of 0x0401 and are handled + * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev + * is not (and cannot be) handled by that driver -> + * Ignore all 0x0401 devs except for the ELAN0800 dev. + */ + if (hdev->product == 0x0401 && + strncmp(hdev->name, "ELAN0800", 8) != 0) + return true; + break; } if (hdev->type == HID_TYPE_USBMOUSE && diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 078026f63b6f..4e940a096b2a 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -196,6 +196,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); + if (ret >= 0) + ret = -EIO; goto exit; } @@ -205,8 +207,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) { + if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error setting GPIO config: %d\n", ret); + if (ret >= 0) + ret = -EIO; goto exit; } @@ -214,7 +218,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) exit: mutex_unlock(&dev->lock); - return ret < 0 ? ret : -EIO; + return ret; } static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 5271db593478..ae8c8e66a6c4 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1154,6 +1154,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, goto out; if (list->tail > list->head) { len = list->tail - list->head; + if (len > count) + len = count; if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; @@ -1163,6 +1165,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, list->head += len; } else { len = HID_DEBUG_BUFSIZE - list->head; + if (len > count) + len = count; if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { ret = -EFAULT; @@ -1170,7 +1174,9 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, } list->head = 0; ret += len; - goto copy_rest; + count -= len; + if (count > 0) + goto copy_rest; } } diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 0cd4f7216239..5eea6fe0d7bd 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev, { struct input_dev *input = hidinput->input; + /* + * ELO devices have one Button usage in GenDesk field, which makes + * hid-input map it to BTN_LEFT; that confuses userspace, which then + * considers the device to be a mouse/touchpad instead of touchscreen. + */ + clear_bit(BTN_LEFT, input->keybit); set_bit(BTN_TOUCH, input->keybit); set_bit(ABS_PRESSURE, input->absbit); input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index be2e005c3c51..9e478f03e845 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -532,6 +532,13 @@ #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e +#define USB_VENDOR_ID_IBM 0x04b3 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109 + #define USB_VENDOR_ID_IDEACOM 0x1cb6 #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 @@ -634,6 +641,9 @@ #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 +#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 +#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 +#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 #define USB_DEVICE_ID_LD_JWM 0x1080 #define USB_DEVICE_ID_LD_DMMP 0x1081 #define USB_DEVICE_ID_LD_UMIP 0x1090 @@ -661,6 +671,7 @@ #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 +#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 199f6a01fc62..bb984cc9753b 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: - if (dev->battery_report_type == HID_FEATURE_REPORT) { + if (dev->battery_status != HID_BATTERY_REPORTED && + !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; @@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_STATUS: - if (!dev->battery_reported && - dev->battery_report_type == HID_FEATURE_REPORT) { + if (dev->battery_status != HID_BATTERY_REPORTED && + !dev->battery_avoid_query) { value = hidinput_query_battery_capacity(dev); if (value < 0) return value; dev->battery_capacity = value; - dev->battery_reported = true; + dev->battery_status = HID_BATTERY_QUERIED; } - if (!dev->battery_reported) + if (dev->battery_status == HID_BATTERY_UNKNOWN) val->intval = POWER_SUPPLY_STATUS_UNKNOWN; else if (dev->battery_capacity == 100) val->intval = POWER_SUPPLY_STATUS_FULL; @@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, dev->battery_report_type = report_type; dev->battery_report_id = field->report->id; + /* + * Stylus is normally not connected to the device and thus we + * can't query the device and get meaningful battery strength. + * We have to wait for the device to report it on its own. + */ + dev->battery_avoid_query = report_type == HID_INPUT_REPORT && + field->physical == HID_DG_STYLUS; + dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { error = PTR_ERR(dev->battery); @@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value) capacity = hidinput_scale_battery_capacity(dev, value); - if (!dev->battery_reported || capacity != dev->battery_capacity) { + if (dev->battery_status != HID_BATTERY_REPORTED || + capacity != dev->battery_capacity) { dev->battery_capacity = capacity; - dev->battery_reported = true; + dev->battery_status = HID_BATTERY_REPORTED; power_supply_changed(dev->battery); } } @@ -1359,7 +1369,8 @@ static void hidinput_led_worker(struct work_struct *work) led_work); struct hid_field *field; struct hid_report *report; - int len, ret; + int ret; + u32 len; __u8 *buf; field = hidinput_get_led_field(hid); diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 1ac4ff4d57a6..643b6eb54442 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -6,6 +6,17 @@ * * Copyright (c) 2012 Bernhard Seibold * Copyright (c) 2014 Jamie Lentin + * + * Linux IBM/Lenovo Scrollpoint mouse driver: + * - IBM Scrollpoint III + * - IBM Scrollpoint Pro + * - IBM Scrollpoint Optical + * - IBM Scrollpoint Optical 800dpi + * - IBM Scrollpoint Optical 800dpi Pro + * - Lenovo Scrollpoint Optical + * + * Copyright (c) 2012 Peter De Wachter + * Copyright (c) 2018 Peter Ganzhorn */ /* @@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, return 0; } +static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + if (usage->hid == HID_GD_Z) { + hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); + return 1; + } + return 0; +} + static int lenovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev, case USB_DEVICE_ID_LENOVO_CBTKBD: return lenovo_input_mapping_cptkbd(hdev, hi, field, usage, bit, max); + case USB_DEVICE_ID_IBM_SCROLLPOINT_III: + case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO: + case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL: + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL: + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO: + case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL: + return lenovo_input_mapping_scrollpoint(hdev, hi, field, + usage, bit, max); default: return 0; } @@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) }, { } }; diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 20b40ad26325..42ed887ba0be 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -34,7 +34,8 @@ module_param(emulate_scroll_wheel, bool, 0644); MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); static unsigned int scroll_speed = 32; -static int param_set_scroll_speed(const char *val, struct kernel_param *kp) { +static int param_set_scroll_speed(const char *val, + const struct kernel_param *kp) { unsigned long speed; if (!val || kstrtoul(val, 0, &speed) || speed > 63) return -EINVAL; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 9e8c4d2ba11d..c3b9bd5dba75 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -354,7 +354,8 @@ static const struct attribute_group mt_attribute_group = { static void mt_get_feature(struct hid_device *hdev, struct hid_report *report) { struct mt_device *td = hid_get_drvdata(hdev); - int ret, size = hid_report_len(report); + int ret; + u32 size = hid_report_len(report); u8 *buf; /* @@ -738,9 +739,11 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field, } static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, - struct hid_usage *usage, __s32 value) + struct hid_usage *usage, __s32 value, + bool first_packet) { struct mt_device *td = hid_get_drvdata(hid); + __s32 cls = td->mtclass.name; __s32 quirks = td->mtclass.quirks; struct input_dev *input = field->hidinput->input; @@ -794,6 +797,15 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, break; default: + /* + * For Win8 PTP touchpads we should only look at + * non finger/touch events in the first_packet of + * a (possible) multi-packet frame. + */ + if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) && + !first_packet) + return; + if (usage->type) input_event(input, usage->type, usage->code, value); @@ -813,6 +825,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report) { struct mt_device *td = hid_get_drvdata(hid); struct hid_field *field; + bool first_packet; unsigned count; int r, n; @@ -831,6 +844,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report) td->num_expected = value; } + first_packet = td->num_received == 0; for (r = 0; r < report->maxfield; r++) { field = report->field[r]; count = field->report_count; @@ -840,7 +854,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report) for (n = 0; n < count; n++) mt_process_mt_event(hid, field, &field->usage[n], - field->value[n]); + field->value[n], first_packet); } if (td->num_received >= td->num_expected) @@ -1036,7 +1050,7 @@ static void mt_set_input_mode(struct hid_device *hdev) struct hid_report_enum *re; struct mt_class *cls = &td->mtclass; char *buf; - int report_len; + u32 report_len; if (td->inputmode < 0) return; diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index febb21ee190e..584b10d3fc3d 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -2,7 +2,7 @@ * Plantronics USB HID Driver * * Copyright (c) 2014 JD Cole - * Copyright (c) 2015 Terry Junge + * Copyright (c) 2015-2018 Terry Junge */ /* @@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev, unsigned short mapped_key; unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); + /* special case for PTT products */ + if (field->application == HID_GD_JOYSTICK) + goto defaulted; + /* handle volume up/down mapping */ /* non-standard types or multi-HID interfaces - plt_type is PID */ if (!(plt_type & HID_USAGE_PAGE)) { diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index ef241d66562e..cf5812188c37 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -89,8 +89,8 @@ struct rmi_data { u8 *writeReport; u8 *readReport; - int input_report_size; - int output_report_size; + u32 input_report_size; + u32 output_report_size; unsigned long flags; diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 43617fb28b87..317c9c2c0a7c 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value) static void kovaplus_profile_activated(struct kovaplus_device *kovaplus, uint new_profile_index) { + if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings)) + return; kovaplus->actual_profile = new_profile_index; kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level; kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x; diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 5fbe0f81ab2e..01b5a9f01814 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t int ret = 0, len; unsigned char report_number; + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { + ret = -ENODEV; + goto out; + } + dev = hidraw_table[minor]->hid; if (!dev->ll_driver->raw_request) { diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 9145c2129a96..136a34dc31b8 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -143,10 +143,10 @@ struct i2c_hid { * register of the HID * descriptor. */ unsigned int bufsize; /* i2c buffer size */ - char *inbuf; /* Input buffer */ - char *rawbuf; /* Raw Input buffer */ - char *cmdbuf; /* Command buffer */ - char *argsbuf; /* Command arguments buffer */ + u8 *inbuf; /* Input buffer */ + u8 *rawbuf; /* Raw Input buffer */ + u8 *cmdbuf; /* Command buffer */ + u8 *argsbuf; /* Command arguments buffer */ unsigned long flags; /* device flags */ unsigned long quirks; /* Various quirks */ @@ -450,7 +450,8 @@ static int i2c_hid_hwreset(struct i2c_client *client) static void i2c_hid_get_input(struct i2c_hid *ihid) { - int ret, ret_size; + int ret; + u32 ret_size; int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); if (size > ihid->bufsize) @@ -475,7 +476,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) return; } - if (ret_size > size) { + if ((ret_size > size) || (ret_size < 2)) { dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", __func__, size, ret_size); return; @@ -1035,6 +1036,14 @@ static int i2c_hid_probe(struct i2c_client *client, pm_runtime_enable(&client->dev); device_enable_async_suspend(&client->dev); + /* Make sure there is something at this address */ + ret = i2c_smbus_read_byte(client); + if (ret < 0) { + dev_dbg(&client->dev, "nothing at this address: %d\n", ret); + ret = -ENXIO; + goto err_pm; + } + ret = i2c_hid_fetch_hid_descriptor(ihid); if (ret < 0) goto err_pm; diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig index 519e4c8b53c4..aef9967fdc1f 100644 --- a/drivers/hid/intel-ish-hid/Kconfig +++ b/drivers/hid/intel-ish-hid/Kconfig @@ -14,4 +14,16 @@ config INTEL_ISH_HID Broxton and Kaby Lake. Say Y here if you want to support Intel ISH. If unsure, say N. + +config INTEL_ISH_UART_INTERFACE + tristate "Intel Integrated Sensor Hub UART Interface" + help + The Integrated Sensor Hub (ISH) firmware has an in built UART driver. + This UART driver is enumerated as one of the ISH firmware client. + This driver uses tty interface to allow user space application to + use this UART driver. + + One of the use case is to read/write/control a sensor connected via + UART. + endmenu diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile index 825b70af672f..ca9af9efcd3b 100644 --- a/drivers/hid/intel-ish-hid/Makefile +++ b/drivers/hid/intel-ish-hid/Makefile @@ -20,4 +20,6 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o intel-ishtp-hid-objs := ishtp-hid.o intel-ishtp-hid-objs += ishtp-hid-client.o +obj-$(CONFIG_INTEL_ISH_UART_INTERFACE) += ishtp-tty-client.o + ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 2aac097c3f70..97869b7410eb 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -28,6 +28,7 @@ #define SPT_Ax_DEVICE_ID 0x9D35 #define CNL_Ax_DEVICE_ID 0x9DFC #define GLK_Ax_DEVICE_ID 0x31A2 +#define CNL_H_DEVICE_ID 0xA37C #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 20d824f74f99..a2c53ea3b5ed 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -37,6 +37,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); @@ -204,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev) kfree(ishtp_dev); } -#ifdef CONFIG_PM -static struct device *ish_resume_device; +static struct device __maybe_unused *ish_resume_device; /* 50ms to get resume response */ #define WAIT_FOR_RESUME_ACK_MS 50 @@ -219,7 +219,7 @@ static struct device *ish_resume_device; * in that case a simple resume message is enough, others we need * a reset sequence. */ -static void ish_resume_handler(struct work_struct *work) +static void __maybe_unused ish_resume_handler(struct work_struct *work) { struct pci_dev *pdev = to_pci_dev(ish_resume_device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -261,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work) * * Return: 0 to the pm core */ -static int ish_suspend(struct device *device) +static int __maybe_unused ish_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -287,7 +287,7 @@ static int ish_suspend(struct device *device) return 0; } -static DECLARE_WORK(resume_work, ish_resume_handler); +static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler); /** * ish_resume() - ISH resume callback * @device: device pointer @@ -296,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler); * * Return: 0 to the pm core */ -static int ish_resume(struct device *device) +static int __maybe_unused ish_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -310,21 +310,14 @@ static int ish_resume(struct device *device) return 0; } -static const struct dev_pm_ops ish_pm_ops = { - .suspend = ish_suspend, - .resume = ish_resume, -}; -#define ISHTP_ISH_PM_OPS (&ish_pm_ops) -#else -#define ISHTP_ISH_PM_OPS NULL -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume); static struct pci_driver ish_driver = { .name = KBUILD_MODNAME, .id_table = ish_pci_tbl, .probe = ish_probe, .remove = ish_remove, - .driver.pm = ISHTP_ISH_PM_OPS, + .driver.pm = &ish_pm_ops, }; module_pci_driver(ish_driver); diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index 157b44aacdff..3bd2683e89d7 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -320,23 +320,14 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf, */ static void ish_cl_event_cb(struct ishtp_cl_device *device) { - struct ishtp_cl *hid_ishtp_cl = device->driver_data; + struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(device); struct ishtp_cl_rb *rb_in_proc; size_t r_length; - unsigned long flags; if (!hid_ishtp_cl) return; - spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags); - while (!list_empty(&hid_ishtp_cl->in_process_list.list)) { - rb_in_proc = list_entry( - hid_ishtp_cl->in_process_list.list.next, - struct ishtp_cl_rb, list); - list_del_init(&rb_in_proc->list); - spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, - flags); - + while ((rb_in_proc = ishtp_cl_rx_get_rb(hid_ishtp_cl)) != NULL) { if (!rb_in_proc->buffer.data) return; @@ -346,9 +337,7 @@ static void ish_cl_event_cb(struct ishtp_cl_device *device) process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length); ishtp_cl_io_rb_recycle(rb_in_proc); - spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags); } - spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, flags); } /** @@ -641,8 +630,8 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl, static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset) { struct ishtp_device *dev; - unsigned long flags; struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; + struct ishtp_fw_client *fw_client; int i; int rv; @@ -664,16 +653,14 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset) hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE; hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE; - spin_lock_irqsave(&dev->fw_clients_lock, flags); - i = ishtp_fw_cl_by_uuid(dev, &hid_ishtp_guid); - if (i < 0) { - spin_unlock_irqrestore(&dev->fw_clients_lock, flags); + fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid); + if (!fw_client) { dev_err(&client_data->cl_device->dev, "ish client uuid not found\n"); - return i; + return -ENOENT; } - hid_ishtp_cl->fw_client_id = dev->fw_clients[i].client_id; - spin_unlock_irqrestore(&dev->fw_clients_lock, flags); + + hid_ishtp_cl->fw_client_id = fw_client->client_id; hid_ishtp_cl->state = ISHTP_CL_CONNECTING; rv = ishtp_cl_connect(hid_ishtp_cl); @@ -769,7 +756,7 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work) if (!hid_ishtp_cl) return; - cl_device->driver_data = hid_ishtp_cl; + ishtp_set_drvdata(cl_device, hid_ishtp_cl); hid_ishtp_cl->client_data = client_data; client_data->hid_ishtp_cl = hid_ishtp_cl; @@ -818,7 +805,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device) if (!hid_ishtp_cl) return -ENOMEM; - cl_device->driver_data = hid_ishtp_cl; + ishtp_set_drvdata(cl_device, hid_ishtp_cl); hid_ishtp_cl->client_data = client_data; client_data->hid_ishtp_cl = hid_ishtp_cl; client_data->cl_device = cl_device; @@ -848,7 +835,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device) */ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device) { - struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data; + struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__, @@ -878,7 +865,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device) */ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device) { - struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data; + struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__, @@ -902,7 +889,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device) static int hid_ishtp_cl_suspend(struct device *device) { struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device); - struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data; + struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__, @@ -923,7 +910,7 @@ static int hid_ishtp_cl_suspend(struct device *device) static int hid_ishtp_cl_resume(struct device *device) { struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device); - struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data; + struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data; hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__, diff --git a/drivers/hid/intel-ish-hid/ishtp-tty-client.c b/drivers/hid/intel-ish-hid/ishtp-tty-client.c new file mode 100644 index 000000000000..f483d37de676 --- /dev/null +++ b/drivers/hid/intel-ish-hid/ishtp-tty-client.c @@ -0,0 +1,890 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include + +#include "ishtp-dev.h" +#include "client.h" + +/* + * ISH TX/RX ring buffer pool size. ISH transport reserves the space for these + * during connect request. + */ +#define TTY_CL_RX_RING_SIZE 32 +#define TTY_CL_TX_RING_SIZE 16 + +/* Commands to send to ISH UART driver */ +enum ish_uart_command{ + UART_GET_CONFIG = 1, + UART_SET_CONFIG, + UART_SEND_DATA, + UART_RECV_DATA, + UART_ABORT_WRITE, + UART_ABORT_READ, +}; + +#define CMD_MASK GENMASK(6,0) +#define IS_RESPONSE BIT(7) + +/** + * struct ishtp_tty_msg - Command header for ISH UART commands. + * @command: One of the ish_uart_command. Bit 7 is the response. + * @status: Command response status. Non 0, is error condition. + * @size: Size of the command excluding header. + * + * This structure is used as header for every command/data sent to ISH + * UART. + */ +struct ishtp_tty_msg { + u8 command; + u8 status; + u16 size; +}; + +/** + * struct ish_uart_config - UART configuration data + * @baud: Baud rate in bits per second. + * @bits_length: Number of bits. + * @stop_bits: Number of stop bits. + * @flow_control: Flow control ON/OFF. + * + * This structure is used for every UART GET/SET configuration commands. + */ +struct ish_uart_config { + u32 baud; + u8 bits_length:4; + u8 stop_bits:2; + u8 unused_0:1; + u8 unused_1:1; + u8 flow_control:1; + u8 reserved:7; +}; + +/** + * struct ishtp_cl_info - ISH transport client instance data + * @cl_device: ISH transport client device information. + * @ishtp_cl: ISH transport firmware client information. + * @get_report_done: Set once a command is sent and response is received. + * @wait_send_report_done: Set when send data response is received. + * @last_cmd_status: Last command status. + * @ishtp_response_wait: Wait Q to wait for command response. + * @ishtp_send_wait: Wait Q to wait for send response. + * @max_msg_size: Max message size of a single command. + * @max_ring_buffer_bytes: Max buffer size allocated by ISH transort layer. + * @msg_buffer: Pre allocated message buffer to copy and send. + * + * Encapsulate all information related to an ISH client. This information is + * necessary to connect, send and receive data from ISH UART via ISH transport. + */ +struct ishtp_cl_info { + struct ishtp_cl_device *cl_device; + struct ishtp_cl *ishtp_cl; + bool get_report_done; + bool wait_send_report_done; + int last_cmd_status; + wait_queue_head_t ishtp_response_wait; + wait_queue_head_t ishtp_send_wait; + int max_msg_size; + int max_ring_buffer_bytes; + u8 *msg_buffer; +}; + +/** + * struct ishtp_cl_tty - TTY client instance data + * @cl_info: ishtp client information related to this tty client. + * @port: Stores the port information. + * @baud: Stores the current buad rate. + * @bits_length: Stores the current number of bits. + * + * Encapsulate all information related to a tty client. + */ +struct ishtp_cl_tty { + struct ishtp_cl_info *cl_info; + struct tty_port port; + u32 baud; + u8 bits_length; +}; + +#define cl_dev_info(cl_info) &cl_info->ishtp_cl->device->dev + +/* + * Each ISH Transport client has a unique GUID. This defines GUID of tty + * client. + */ +static const guid_t tty_ishtp_guid = GUID_INIT(0x6f2647c7, 0x3e16, 0x4d79, + 0xb4, 0xff, 0x02, 0x89, 0x28, + 0xee, 0xeb, 0xca); +/** + * ish_get_free_buffer_size() - Get Free buffers in the ring + * @tty: TTY instance of this client. + * + * Each request is queued in the ISH transort ring buffers to send via IPC. + * This function gets available free space in these ring buffers. + * + * Return: Size of current free space in the ring buffer in bytes. + */ +static int ish_get_free_buffer_size(struct tty_struct *tty) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + + return ishtp_cl_get_tx_free_rings(cl_tty->cl_info->ishtp_cl) * + (cl_tty->cl_info->max_msg_size - sizeof(struct ishtp_tty_msg)); +} + +/** + * ishtp_wait_for_response() - Wait for response for a command + * @cl_info: client instance info. + * + * If the sender is waiting for a command response, then call to this + * function will suspend caller till the timeout or the response from the + * firmware. + * + * Return: 0 for success and -ETIMEDOUT for timeout. + */ +static int ishtp_wait_for_response(struct ishtp_cl_info *cl_info) +{ + if (cl_info->get_report_done) + return 0; + + /* + * ISH firmware max delay for one time sending failure is 1Hz, + * and firmware will retry 2 times, so 3Hz is used for timeout. + */ + wait_event_interruptible_timeout(cl_info->ishtp_response_wait, + cl_info->get_report_done, 3 * HZ); + + if (!cl_info->get_report_done) { + dev_err(cl_dev_info(cl_info), "Timeout: ISHTP response\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/** + * ish_tty_install() - tty operation callback for install + * @driver: tty driver registerd to handle. + * @tty: tty structure passed from the core. + * + * During install callback create a new tty client instance for the registered + * ISH transport client device and initialize port. + * + * Return: 0 for success and negative value for error. + */ +static int ish_tty_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct ishtp_cl_tty *cl_tty; + struct ishtp_cl_device *cl_device; + static const struct tty_port_operations ish_port_ops; + + cl_tty = kzalloc(sizeof(*cl_tty), GFP_KERNEL); + if (!cl_tty) + return -ENOMEM; + + cl_device = container_of(tty->dev->parent, struct ishtp_cl_device, dev); + + cl_tty->cl_info = ishtp_get_drvdata(cl_device); + cl_tty->cl_info->ishtp_cl->client_data = cl_tty; + + tty_port_init(&cl_tty->port); + cl_tty->port.ops = &ish_port_ops; + + tty->driver_data = cl_tty; + + return tty_port_install(&cl_tty->port, driver, tty); +} + +/** + * ish_tty_open() - tty operation callback for open + * @tty: tty structure passed from the core. + * @filp: file pointer passed from the tty core, + * + * Calls tty_port_open once called from tty core. + * + * Return: Return value of tty_port_open(). + */ +static int ish_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + + return tty_port_open(&cl_tty->port, tty, filp); +} + +/** + * ish_tty_close() - tty operation callback for close + * @tty: tty structure passed from the core. + * @filp: file pointer passed from the tty core, + * + * Calls tty_port_close once called from tty core and frees + * tty instance data. + * + * Return: None. + */ +static void ish_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + + tty_port_close(&cl_tty->port, tty, filp); + kfree(cl_tty); +} + +/** + * ish_tty_write() - tty operation callback for write + * @tty: tty structure passed from the core. + * @buf: Buffer to send + * @count: Length of buffer. + * + * This request simply queues the data upto available buffer space in + * the transport TX ring buffers. + * This function shouldn't call any function which sleeps as the write + * callback can be called in the interrupt context from the core. + * + * Return: Return the size of data copied to ring buffers or error code. + */ +static int ish_tty_write(struct tty_struct *tty, const unsigned char *buf, + int count) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + struct ishtp_cl_info *cl_info = cl_tty->cl_info; + struct ishtp_tty_msg *ishtp_msg; + unsigned char *msg_buf; + u32 max_payload_sz; + int c, sent = 0, ret; + + ishtp_msg = (struct ishtp_tty_msg *)cl_info->msg_buffer; + ishtp_msg->command = UART_SEND_DATA; + msg_buf = (unsigned char *)&ishtp_msg[1]; + + max_payload_sz = cl_info->max_msg_size - sizeof(struct ishtp_tty_msg); + + while (count > 0) { + if (ish_get_free_buffer_size(tty) < count) { + break; /* return partial sent char count or 0 */ + } + + c = count; + if (c > max_payload_sz) + c = max_payload_sz; + + ishtp_msg->size = c; + memcpy(msg_buf, buf, c); + + ret = ishtp_cl_send(cl_info->ishtp_cl, cl_info->msg_buffer, + sizeof(struct ishtp_tty_msg) + c); + if (ret) + return ret; + + buf += c; + count -= c; + sent += c; + } + + return sent; +} + +/* List of supported baud rates */ +static u32 supported_rates[] = { + 115200, + 921600, + 2000000, +}; + +/** + * ish_tty_match_baudrate() - Match the baud rate requested + * @baud: Requestd baud rate. + * + * Look for a match in the supported baud rates. If not return the lower + * supported baud rate. + + * Return: Matched baud rate. + */ +static u32 ish_tty_match_baudrate(unsigned int baud) +{ + int count = ARRAY_SIZE(supported_rates); + int i; + + for (i = 0; i < count; ++i) { + if (supported_rates[i] == baud) + return baud; + else if ((i != count - 1) && (baud < supported_rates[i + 1])) + return supported_rates[i]; + } + + return supported_rates[count -1]; +} + +/** + * ish_tty_set_termios() -Set terminal parameters + * @tty: tty structure passed from the core. + * @old_termios: Previous termios settings + * + * Sets terminal parameters. In case the parameters are not supported then, + * this updates the tty termios with the acceptable parameters. + * + * Return: None. + */ +static void ish_tty_set_termios(struct tty_struct *tty, + struct ktermios *old_termios) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + struct ishtp_cl_info *cl_info = cl_tty->cl_info; + + struct ishtp_tty_msg *ishtp_msg; + struct ktermios *termios; + struct ish_uart_config *cfg; + int ret; + + if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) + return; + + termios = &tty->termios; + ishtp_msg = (struct ishtp_tty_msg *)cl_info->msg_buffer; + + ishtp_msg->command = UART_SET_CONFIG; + cfg = (struct ish_uart_config *)&ishtp_msg[1]; + + switch (C_CSIZE(tty)) { + case CS5: + cfg->bits_length = 5; + break; + case CS6: + cfg->bits_length = 6; + break; + case CS7: + cfg->bits_length = 7; + break; + default: + case CS8: + cfg->bits_length = 8; + break; + } + + if (C_CRTSCTS(tty)) + cfg->flow_control = true; + else + cfg->flow_control = false; + + cfg->baud = tty_termios_baud_rate(termios); + cfg->baud = ish_tty_match_baudrate(cfg->baud); + + ishtp_msg->size = sizeof(struct ish_uart_config); + + cl_info->get_report_done = false; + /* ishtp message send out */ + ret = ishtp_cl_send(cl_info->ishtp_cl, cl_info->msg_buffer, + sizeof(struct ishtp_tty_msg) + ishtp_msg->size); + if (ret) + return; + + /* wait for message send completely */ + if (ishtp_wait_for_response(cl_info)) { + return; + } + + /* Update termios with the supported properties */ + termios->c_cflag &= ~CMSPAR; + tty_termios_encode_baud_rate(termios, cfg->baud, cfg->baud); +} + +/** + * ish_tty_write_room() - write_room() callback + * @tty: tty structure passed from the core. + * + * Returns space availble in ISH transport TX ring buffers in bytes. + * + * Return: space available in bytes. + */ +static int ish_tty_write_room(struct tty_struct *tty) +{ + return ish_get_free_buffer_size(tty); +} + +/** + * ish_tty_chars_in_buffer() - chars_in_buffer() callback + * @tty: tty structure passed from the core. + * + * Returns number of characters in ISH transport ring buffers, which are + * not yet sent via IPC to ISH. + * + * Return: space space used in bytes. + */ +static int ish_tty_chars_in_buffer(struct tty_struct *tty) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + struct ishtp_cl_info *cl_info = cl_tty->cl_info; + + return cl_info->max_ring_buffer_bytes - ish_get_free_buffer_size(tty); +} + +/** + * ish_tty_wait_until_sent() - wait_until_sent() callback + * @tty: tty structure passed from the core. + * + * Wait till all the bytes in the ISH transort ring buffers sent or timeout. + * This callback is allowed to sleep. + * + * Return: None. + */ +static void ish_tty_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct ishtp_cl_tty *cl_tty = tty->driver_data; + struct ishtp_cl_info *cl_info = cl_tty->cl_info; + u64 begin, end, elapsed; + + elapsed = begin = get_jiffies_64(); + end = begin + timeout; + while (ishtp_cl_get_tx_free_buffer_size(cl_info->ishtp_cl) != + cl_info->max_ring_buffer_bytes) { + + cl_info->wait_send_report_done = true; + if (timeout) { + wait_event_interruptible_timeout(cl_info->ishtp_send_wait, + cl_info->wait_send_report_done, + end - elapsed); + elapsed = get_jiffies_64() - begin; + if (end <= (begin + elapsed)) + break; + } else { + wait_event_interruptible(cl_info->ishtp_send_wait, + cl_info->wait_send_report_done); + } + cl_info->wait_send_report_done = false; + } +} + +/** + * process_recv() - Process receive data callback + * @cl_info: client instance for which data received. + * @recv_buf: Received buffer + * @data_len; Received size + * + * Process received data from ISH UART driver. This can be a new RX data + * indication or response for sent commands. + * + * Return: None. + */ +static void process_recv(struct ishtp_cl_info *cl_info, void *recv_buf, + size_t data_len) +{ + struct ishtp_cl_device *cl_device = cl_info->cl_device; + struct ishtp_cl_tty *cl_tty = cl_info->ishtp_cl->client_data; + struct ishtp_tty_msg *ishtp_msg; + struct ish_uart_config *cfg; + unsigned char *payload; + size_t payload_len, total_len, cur_pos; + + if (data_len < sizeof(struct ishtp_tty_msg)) { + dev_err(&cl_device->dev, + "Error, received %zu which is < min %lu", data_len, + sizeof(struct ishtp_tty_msg)); + return; + } + + payload = recv_buf + sizeof(struct ishtp_tty_msg); + total_len = data_len; + cur_pos = 0; + + do { + ishtp_msg = (struct ishtp_tty_msg *)(recv_buf + cur_pos); + payload_len = ishtp_msg->size; + + switch (ishtp_msg->command & CMD_MASK) { + case UART_GET_CONFIG: + cl_info->get_report_done = true; + if (!(ishtp_msg->command & IS_RESPONSE) || + (ishtp_msg->status)) { + dev_err(&cl_device->dev, + "Recv command with status error\n"); + wake_up_interruptible(&cl_info->ishtp_response_wait); + break; + } + + if (cl_tty) { + cfg = (struct ish_uart_config *)payload; + cl_tty->baud = cfg->baud; + cl_tty->bits_length = cfg->bits_length; + dev_dbg(&cl_device->dev, + "Command: get config: %d:%d\n", + cl_tty->baud, cl_tty->bits_length); + } + wake_up_interruptible(&cl_info->ishtp_response_wait); + break; + case UART_SET_CONFIG: + cl_info->get_report_done = true; + if (!(ishtp_msg->command & IS_RESPONSE) || + (ishtp_msg->status)) { + dev_err(&cl_device->dev, + "Recv command with status error\n"); + wake_up_interruptible(&cl_info->ishtp_response_wait); + break; + } + + cl_info->last_cmd_status = 0; + dev_dbg(&cl_device->dev, + "Command: set config success\n"); + wake_up_interruptible(&cl_info->ishtp_response_wait); + break; + case UART_SEND_DATA: + if (!(ishtp_msg->command & IS_RESPONSE) || + (ishtp_msg->status)) { + cl_info->last_cmd_status = -EIO; + dev_err(&cl_device->dev, + "Recv command with status error\n"); + } else { + dev_dbg (&cl_device->dev, + "Command: send data done\n"); + } + if (cl_info->wait_send_report_done) + wake_up_interruptible(&cl_info->ishtp_send_wait); + break; + case UART_RECV_DATA: + dev_dbg(&cl_device->dev, + "Command: recv data: len=%ld\n", + payload_len); + print_hex_dump_bytes("", DUMP_PREFIX_NONE, payload, + payload_len); + if (cl_tty) { + tty_insert_flip_string(&cl_tty->port, payload, + payload_len); + tty_flip_buffer_push(&cl_tty->port); + } + break; + default: + break; + } + + cur_pos += payload_len + sizeof(struct ishtp_tty_msg); + payload += payload_len + sizeof(struct ishtp_tty_msg); + + } while (cur_pos < total_len); +} + +/** + * tty_ishtp_cl_event_cb() - Receive data callback + * @cl_device: client device for which data received. + * + * Extract data from RX ring buffer and process received data. + * + * Return: None. + */ +static void tty_ishtp_cl_event_cb(struct ishtp_cl_device *cl_device) +{ + struct ishtp_cl_info *cl_info = ishtp_get_drvdata(cl_device); + struct ishtp_cl *cl = cl_info->ishtp_cl; + struct ishtp_cl_rb *rb_in_proc; + + while ((rb_in_proc = ishtp_cl_rx_get_rb(cl)) && + rb_in_proc->buffer.data) { + process_recv(cl_info, rb_in_proc->buffer.data, + rb_in_proc->buf_idx); + ishtp_cl_io_rb_recycle(rb_in_proc); + } +} + +/** + * ishtp_cl_tty_connect() - Connect to ISH UART driver client + * @cl_device: client device for which data received. + * + * Issue a connect request using ISH transport service and register + * a RX callback. + * + * Return: 0 for succcess or error code for failure. + */ +static int ishtp_cl_tty_connect(struct ishtp_cl_device *cl_device) +{ + struct ishtp_cl *cl; + struct ishtp_fw_client *fw_client; + struct ishtp_cl_info *cl_info; + int ret; + + cl_info = kzalloc(sizeof(*cl_info), GFP_KERNEL); + if (!cl_info) + return -ENOMEM; + + cl_info->max_msg_size = cl_device->fw_client->props.max_msg_length; + + cl_info->msg_buffer = kzalloc(sizeof(struct ishtp_tty_msg) + + cl_info->max_msg_size, GFP_KERNEL); + if (!cl_info->msg_buffer) { + ret = -ENOMEM; + goto out_cl_info_free; + } + + cl = ishtp_cl_allocate(cl_device->ishtp_dev); + if (!cl) { + ret = -ENOMEM; + goto out_msg_buff_free; + } + + ret = ishtp_cl_link(cl, ISHTP_HOST_CLIENT_ID_ANY); + if (ret) + goto out_client_free; + + + fw_client = ishtp_fw_cl_get_client(cl->dev, &tty_ishtp_guid); + if (!fw_client) { + ret = -ENOENT; + goto out_client_free; + } + + cl->fw_client_id = fw_client->client_id; + cl->state = ISHTP_CL_CONNECTING; + cl->rx_ring_size = TTY_CL_RX_RING_SIZE; + cl->tx_ring_size = TTY_CL_TX_RING_SIZE; + ret = ishtp_cl_connect(cl); + if (ret) { + dev_err(&cl_device->dev, "client connect failed\n"); + goto out_client_free; + } + + cl_info->cl_device = cl_device; + cl_info->ishtp_cl = cl; + + init_waitqueue_head(&cl_info->ishtp_response_wait); + init_waitqueue_head(&cl_info->ishtp_send_wait); + cl_info->max_ring_buffer_bytes = ishtp_cl_get_tx_free_buffer_size(cl); + + ishtp_set_drvdata(cl_device, cl_info); + + /* Register read callback */ + ishtp_register_event_cb(cl_device, tty_ishtp_cl_event_cb); + + ishtp_get_device(cl_device); + + return 0; + +out_client_free: + ishtp_cl_free(cl); +out_msg_buff_free: + kfree(cl_info->msg_buffer); +out_cl_info_free: + kfree(cl_info); + + return ret; +} + +/** + * ishtp_cl_tty_disconnect() - Disonnect to ISH UART driver client + * @cl_device: client device for which data received. + * + * Issue a disconnect request and cleanup queues. + * + * Return: None. + */ +static void ishtp_cl_tty_disconnect(struct ishtp_cl_device *cl_device) +{ + struct ishtp_cl_info *cl_info = ishtp_get_drvdata(cl_device); + struct ishtp_cl *cl = cl_info->ishtp_cl; + + cl->state = ISHTP_CL_DISCONNECTING; + ishtp_cl_disconnect(cl); + ishtp_put_device(cl_device); + ishtp_cl_unlink(cl); + ishtp_cl_flush_queues(cl); + ishtp_cl_free(cl); + kfree(cl_info->msg_buffer); + kfree(cl_info); +} + +/** + * ish_tty_get_bootup_termios() - Get ISH UART default baud rate + * @cl_info: client instance to probe + * @cl_tty: A place holder to get the received parameters. + * + * Issue command to get UART configuration during driver init. + * The output can be used to set the default baud rate during + * tty driver register. + * + * Return: None. + */ +static void ish_tty_get_bootup_termios(struct ishtp_cl_info *cl_info, + struct ishtp_cl_tty *cl_tty) +{ + struct ishtp_tty_msg *ishtp_msg; + + ishtp_msg = (struct ishtp_tty_msg *)cl_info->msg_buffer; + + ishtp_msg->command = UART_GET_CONFIG; + ishtp_msg->size = 0; + + cl_tty->baud = 0; + + cl_info->ishtp_cl->client_data = cl_tty; + cl_info->get_report_done = false; + ishtp_cl_send(cl_info->ishtp_cl, cl_info->msg_buffer, + sizeof(struct ishtp_tty_msg)); + ishtp_wait_for_response(cl_info); + cl_info->ishtp_cl->client_data = NULL; +} + +static struct tty_operations ish_tty_ops = { + .install = ish_tty_install, + .open = ish_tty_open, + .close = ish_tty_close, + .write = ish_tty_write, + .set_termios = ish_tty_set_termios, + .write_room = ish_tty_write_room, + .chars_in_buffer = ish_tty_chars_in_buffer, + .wait_until_sent = ish_tty_wait_until_sent, +}; + +static struct tty_driver *ish_tty_driver; + +/** + * ishtp_cl_tty_init() - TTY driver init + * @cl_device: client device for which data received. + * + * Register a tty driver and device for ISH UART. + * + * Return: 0 for success of error code for failure. + */ +static int ishtp_cl_tty_init(struct ishtp_cl_device *cl_device) +{ + struct ishtp_cl_info *cl_info = ishtp_get_drvdata(cl_device); + static struct ktermios termios; + struct ishtp_cl_tty cl_tty; + struct device *dev; + int ret; + + ret = ishtp_cl_tty_connect(cl_device); + if (ret) + return ret; + + termios.c_cflag = CS8 | HUPCL | CLOCAL; + ish_tty_get_bootup_termios(cl_info, &cl_tty); + if (cl_tty.baud) + tty_termios_encode_baud_rate(&termios, cl_tty.baud, + cl_tty.baud); + else + termios.c_cflag |= B115200; + + ish_tty_driver = alloc_tty_driver(1); + if (!ish_tty_driver) { + ret = -ENOMEM; + goto disconnect_tty_driver; + } + + ish_tty_driver->owner = THIS_MODULE; + ish_tty_driver->driver_name = "ish-serial"; + ish_tty_driver->name = "ttyISH"; + ish_tty_driver->minor_start = 0; + ish_tty_driver->major = 0; + ish_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + ish_tty_driver->subtype = SERIAL_TYPE_NORMAL; + ish_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + + ish_tty_driver->init_termios = tty_std_termios; + ish_tty_driver->init_termios.c_cflag = termios.c_cflag; + ish_tty_driver->init_termios.c_lflag = ISIG | ICANON | IEXTEN; + tty_set_operations(ish_tty_driver, &ish_tty_ops); + + ret = tty_register_driver(ish_tty_driver); + if (ret) { + goto free_tty_driver; + } + + dev = tty_register_device(ish_tty_driver, 0, &cl_device->dev); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto unreg_tty_driver; + } + + return 0; + +unreg_tty_driver: + tty_unregister_driver(ish_tty_driver); +free_tty_driver: + put_tty_driver(ish_tty_driver); +disconnect_tty_driver: + ishtp_cl_tty_disconnect(cl_device); + + return ret; +} + +/** + * ishtp_cl_tty_deinit() - TTY driver deinit + * @cl_device: client device for which data received. + * + * Unregister a tty driver and device for ISH UART. + * + * Return: None. + */ +static void ishtp_cl_tty_deinit(struct ishtp_cl_device *cl_device) +{ + tty_unregister_device(ish_tty_driver, 0); + tty_unregister_driver(ish_tty_driver); + put_tty_driver(ish_tty_driver); + ishtp_cl_tty_disconnect(cl_device); + ishtp_set_drvdata(cl_device, NULL); +} + +/** + * ishtp_cl_tty_probe() - ISH client driver probe + * @cl_device: client device for which data received. + * + * Once ISH core identifies a FW client this probe is called to + * bind the handler by matching GUID. + * + * Return: 0 for success or error code. + */ +static int ishtp_cl_tty_probe(struct ishtp_cl_device *cl_device) +{ + int ret; + + if (!cl_device) + return -ENODEV; + + if (uuid_le_cmp(tty_ishtp_guid, + cl_device->fw_client->props.protocol_name) != 0) + return -ENODEV; + + ret = ishtp_cl_tty_init(cl_device); + if (ret) + return ret; + + return 0; +} + +/** + * ishtp_cl_tty_remove() - ISH client driver remove + * @cl_device: client device for which data received. + * + * Called during client drive removal to clean up. + * + * Return: 0 and no error code. + */ +static int ishtp_cl_tty_remove(struct ishtp_cl_device *cl_device) +{ + ishtp_cl_tty_deinit(cl_device); + + return 0; +} + +static struct ishtp_cl_driver ishtp_cl_tty_driver = { + .name = "ishtp-client", + .probe = ishtp_cl_tty_probe, + .remove = ishtp_cl_tty_remove, +}; + +static int __init ishtp_tty_client_init(void) +{ + return ishtp_cl_driver_register(&ishtp_cl_tty_driver); +} +module_init(ishtp_tty_client_init); + +static void __exit ishtp_tty_client_exit(void) +{ + ishtp_cl_driver_unregister(&ishtp_cl_tty_driver); +} +module_exit(ishtp_tty_client_exit); + +MODULE_DESCRIPTION("ISH ISHTP TTY client driver"); +MODULE_AUTHOR("Even Xu "); +MODULE_AUTHOR("Srinivas Pandruvada "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("ishtp:*"); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index f272cdd9bd55..728dc6d4561a 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -148,6 +148,31 @@ int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid) } EXPORT_SYMBOL(ishtp_fw_cl_by_uuid); +/** + * ishtp_fw_cl_get_client() - return client information to client + * @dev: the ishtp device structure + * @uuid: uuid of the client to search + * + * Search firmware client using UUID and reture related client information. + * + * Return: pointer of client information on success, NULL on failure. + */ +struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev, + const uuid_le *uuid) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->fw_clients_lock, flags); + i = ishtp_fw_cl_by_uuid(dev, uuid); + spin_unlock_irqrestore(&dev->fw_clients_lock, flags); + if (i < 0 || dev->fw_clients[i].props.fixed_address) + return NULL; + + return &dev->fw_clients[i]; +} +EXPORT_SYMBOL(ishtp_fw_cl_get_client); + /** * ishtp_fw_cl_by_id() - return index to fw_clients for client_id * @dev: the ishtp device structure @@ -418,7 +443,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev, list_del(&device->device_link); spin_unlock_irqrestore(&dev->device_list_lock, flags); dev_err(dev->devc, "Failed to register ISHTP client device\n"); - kfree(device); + put_device(&device->dev); return NULL; } @@ -563,6 +588,33 @@ void ishtp_put_device(struct ishtp_cl_device *cl_device) } EXPORT_SYMBOL(ishtp_put_device); +/** + * ishtp_set_drvdata() - set client driver data + * @cl_device: client device instance + * @data: driver data need to be set + * + * Set client driver data to cl_device->driver_data. + */ +void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data) +{ + cl_device->driver_data = data; +} +EXPORT_SYMBOL(ishtp_set_drvdata); + +/** + * ishtp_get_drvdata() - get client driver data + * @cl_device: client device instance + * + * Get client driver data from cl_device->driver_data. + * + * Return: pointer of driver data + */ +void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device) +{ + return cl_device->driver_data; +} +EXPORT_SYMBOL(ishtp_get_drvdata); + /** * ishtp_bus_new_client() - Create a new client * @dev: ISHTP device instance diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h index a1ffae7f26ad..b8a5bcc82536 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.h +++ b/drivers/hid/intel-ish-hid/ishtp/bus.h @@ -101,6 +101,9 @@ void ishtp_reset_compl_handler(struct ishtp_device *dev); void ishtp_put_device(struct ishtp_cl_device *); void ishtp_get_device(struct ishtp_cl_device *); +void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data); +void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device); + int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver, struct module *owner); #define ishtp_cl_driver_register(driver) \ @@ -110,5 +113,7 @@ void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver); int ishtp_register_event_cb(struct ishtp_cl_device *device, void (*read_cb)(struct ishtp_cl_device *)); int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid); +struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev, + const uuid_le *uuid); #endif /* _LINUX_ISHTP_CL_BUS_H */ diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c index b9b917d2d50d..248651c35497 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c +++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c @@ -69,6 +69,8 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl) int j; unsigned long flags; + cl->tx_ring_free_size = 0; + /* Allocate pool to free Tx bufs */ for (j = 0; j < cl->tx_ring_size; ++j) { struct ishtp_cl_tx_ring *tx_buf; @@ -85,6 +87,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl) spin_lock_irqsave(&cl->tx_free_list_spinlock, flags); list_add_tail(&tx_buf->list, &cl->tx_free_list.list); + ++cl->tx_ring_free_size; spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags); } return 0; @@ -144,6 +147,7 @@ void ishtp_cl_free_tx_ring(struct ishtp_cl *cl) tx_buf = list_entry(cl->tx_free_list.list.next, struct ishtp_cl_tx_ring, list); list_del(&tx_buf->list); + --cl->tx_ring_free_size; kfree(tx_buf->send_buf.data); kfree(tx_buf); } @@ -255,3 +259,48 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb) return rets; } EXPORT_SYMBOL(ishtp_cl_io_rb_recycle); + +/** + * ishtp_cl_tx_empty() -test whether client device tx buffer is empty + * @cl: Pointer to client device instance + * + * Look client device tx buffer list, and check whether this list is empty + * + * Return: true if client tx buffer list is empty else false + */ +bool ishtp_cl_tx_empty(struct ishtp_cl *cl) +{ + int tx_list_empty; + unsigned long tx_flags; + + spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); + tx_list_empty = list_empty(&cl->tx_list.list); + spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); + + return !!tx_list_empty; +} +EXPORT_SYMBOL(ishtp_cl_tx_empty); + +/** + * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list + * @cl: Pointer to client device instance + * + * Check client device in-processing buffer list and get a rb from it. + * + * Return: rb pointer if buffer list isn't empty else NULL + */ +struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl) +{ + unsigned long rx_flags; + struct ishtp_cl_rb *rb; + + spin_lock_irqsave(&cl->in_process_spinlock, rx_flags); + rb = list_first_entry_or_null(&cl->in_process_list.list, + struct ishtp_cl_rb, list); + if (rb) + list_del_init(&rb->list); + spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags); + + return rb; +} +EXPORT_SYMBOL(ishtp_cl_rx_get_rb); diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c index 007443ef5fca..faeccdb1475b 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.c +++ b/drivers/hid/intel-ish-hid/ishtp/client.c @@ -22,6 +22,25 @@ #include "hbm.h" #include "client.h" +int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl) +{ + unsigned long tx_free_flags; + int size; + + spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); + size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length; + spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); + + return size; +} +EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size); + +int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl) +{ + return cl->tx_ring_free_size; +} +EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings); + /** * ishtp_read_list_flush() - Flush read queue * @cl: ishtp client instance @@ -90,6 +109,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev) cl->rx_ring_size = CL_DEF_RX_RING_SIZE; cl->tx_ring_size = CL_DEF_TX_RING_SIZE; + cl->tx_ring_free_size = cl->tx_ring_size; /* dma */ cl->last_tx_path = CL_TX_PATH_IPC; @@ -577,6 +597,8 @@ int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length) * max ISHTP message size per client */ list_del_init(&cl_msg->list); + --cl->tx_ring_free_size; + spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); memcpy(cl_msg->send_buf.data, buf, length); cl_msg->send_buf.size = length; @@ -685,6 +707,7 @@ static void ipc_tx_callback(void *prm) ishtp_write_message(dev, &ishtp_hdr, pmsg); spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); list_add_tail(&cl_msg->list, &cl->tx_free_list.list); + ++cl->tx_ring_free_size; spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); } else { @@ -778,6 +801,7 @@ static void ishtp_cl_send_msg_dma(struct ishtp_device *dev, ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer); spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); list_add_tail(&cl_msg->list, &cl->tx_free_list.list); + ++cl->tx_ring_free_size; spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); ++cl->send_msg_cnt_dma; } diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h index 79eade547f5d..042f4c4853b1 100644 --- a/drivers/hid/intel-ish-hid/ishtp/client.h +++ b/drivers/hid/intel-ish-hid/ishtp/client.h @@ -84,6 +84,7 @@ struct ishtp_cl { /* Client Tx buffers list */ unsigned int tx_ring_size; struct ishtp_cl_tx_ring tx_list, tx_free_list; + int tx_ring_free_size; spinlock_t tx_list_spinlock; spinlock_t tx_free_list_spinlock; size_t tx_offs; /* Offset in buffer at head of 'tx_list' */ @@ -137,6 +138,8 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl); int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl); void ishtp_cl_free_rx_ring(struct ishtp_cl *cl); void ishtp_cl_free_tx_ring(struct ishtp_cl *cl); +int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl); +int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl); /* DMA I/F functions */ void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg, @@ -178,5 +181,7 @@ int ishtp_cl_flush_queues(struct ishtp_cl *cl); /* exported functions from ISHTP client buffer management scope */ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb); +bool ishtp_cl_tx_empty(struct ishtp_cl *cl); +struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl); #endif /* _ISHTP_CLIENT_H_ */ diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 7d749b19c27c..cf307bdc3d53 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "usbhid.h" #ifdef CONFIG_USB_DYNAMIC_MINORS @@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (uref->field_index >= report->maxfield) goto inval; + uref->field_index = array_index_nospec(uref->field_index, + report->maxfield); field = report->field[uref->field_index]; if (uref->usage_index >= field->maxusage) goto inval; + uref->usage_index = array_index_nospec(uref->usage_index, + field->maxusage); uref->usage_code = field->usage[uref->usage_index].hid; @@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, if (uref->field_index >= report->maxfield) goto inval; + uref->field_index = array_index_nospec(uref->field_index, + report->maxfield); field = report->field[uref->field_index]; @@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (finfo.field_index >= report->maxfield) break; + finfo.field_index = array_index_nospec(finfo.field_index, + report->maxfield); field = report->field[finfo.field_index]; memset(&finfo, 0, sizeof(finfo)); @@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (cinfo.index >= hid->maxcollection) break; + cinfo.index = array_index_nospec(cinfo.index, + hid->maxcollection); cinfo.type = hid->collection[cinfo.index].type; cinfo.usage = hid->collection[cinfo.index].usage; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 906e654fb0ba..18d5b99d13f1 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -115,7 +115,7 @@ static void wacom_feature_mapping(struct hid_device *hdev, unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); u8 *data; int ret; - int n; + u32 n; switch (equivalent_usage) { case HID_DG_CONTACTMAX: @@ -284,6 +284,14 @@ static void wacom_usage_mapping(struct hid_device *hdev, } } + /* 2nd-generation Intuos Pro Large has incorrect Y maximum */ + if (hdev->vendor == USB_VENDOR_ID_WACOM && + hdev->product == 0x0358 && + WACOM_PEN_FIELD(field) && + wacom_equivalent_usage(usage->hid) == HID_GD_Y) { + field->logical_maximum = 43200; + } + switch (usage->hid) { case HID_GD_X: features->x_max = field->logical_maximum; @@ -408,7 +416,7 @@ static int wacom_set_device_mode(struct hid_device *hdev, u8 *rep_data; struct hid_report *r; struct hid_report_enum *re; - int length; + u32 length; int error = -ENOMEM, limit = 0; if (wacom_wac->mode_report < 0) @@ -1102,8 +1110,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom, devres->root = root; error = sysfs_create_group(devres->root, group); - if (error) + if (error) { + devres_free(devres); return error; + } devres_add(&wacom->hdev->dev, devres); @@ -2340,23 +2350,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index) int i; unsigned long flags; - spin_lock_irqsave(&remote->remote_lock, flags); - remote->remotes[index].registered = false; - spin_unlock_irqrestore(&remote->remote_lock, flags); + for (i = 0; i < WACOM_MAX_REMOTES; i++) { + if (remote->remotes[i].serial == serial) { - if (remote->remotes[index].battery.battery) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[index].battery.bat_desc); + spin_lock_irqsave(&remote->remote_lock, flags); + remote->remotes[i].registered = false; + spin_unlock_irqrestore(&remote->remote_lock, flags); - if (remote->remotes[index].group.name) - devres_release_group(&wacom->hdev->dev, - &remote->remotes[index]); + if (remote->remotes[i].battery.battery) + devres_release_group(&wacom->hdev->dev, + &remote->remotes[i].battery.bat_desc); + + if (remote->remotes[i].group.name) + devres_release_group(&wacom->hdev->dev, + &remote->remotes[i]); - for (i = 0; i < WACOM_MAX_REMOTES; i++) { - if (remote->remotes[i].serial == serial) { remote->remotes[i].serial = 0; remote->remotes[i].group.name = NULL; - remote->remotes[i].registered = false; remote->remotes[i].battery.battery = NULL; wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN; } diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index aa692e28b2cd..c401b5b63f4c 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id) return tool_type; } +static void wacom_exit_report(struct wacom_wac *wacom) +{ + struct input_dev *input = wacom->pen_input; + struct wacom_features *features = &wacom->features; + unsigned char *data = wacom->data; + int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0; + + /* + * Reset all states otherwise we lose the initial states + * when in-prox next time + */ + input_report_abs(input, ABS_X, 0); + input_report_abs(input, ABS_Y, 0); + input_report_abs(input, ABS_DISTANCE, 0); + input_report_abs(input, ABS_TILT_X, 0); + input_report_abs(input, ABS_TILT_Y, 0); + if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { + input_report_key(input, BTN_LEFT, 0); + input_report_key(input, BTN_MIDDLE, 0); + input_report_key(input, BTN_RIGHT, 0); + input_report_key(input, BTN_SIDE, 0); + input_report_key(input, BTN_EXTRA, 0); + input_report_abs(input, ABS_THROTTLE, 0); + input_report_abs(input, ABS_RZ, 0); + } else { + input_report_abs(input, ABS_PRESSURE, 0); + input_report_key(input, BTN_STYLUS, 0); + input_report_key(input, BTN_STYLUS2, 0); + input_report_key(input, BTN_TOUCH, 0); + input_report_abs(input, ABS_WHEEL, 0); + if (features->type >= INTUOS3S) + input_report_abs(input, ABS_Z, 0); + } + input_report_key(input, wacom->tool[idx], 0); + input_report_abs(input, ABS_MISC, 0); /* reset tool id */ + input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); + wacom->id[idx] = 0; +} + static int wacom_intuos_inout(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; @@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) if (!wacom->id[idx]) return 1; - /* - * Reset all states otherwise we lose the initial states - * when in-prox next time - */ - input_report_abs(input, ABS_X, 0); - input_report_abs(input, ABS_Y, 0); - input_report_abs(input, ABS_DISTANCE, 0); - input_report_abs(input, ABS_TILT_X, 0); - input_report_abs(input, ABS_TILT_Y, 0); - if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { - input_report_key(input, BTN_LEFT, 0); - input_report_key(input, BTN_MIDDLE, 0); - input_report_key(input, BTN_RIGHT, 0); - input_report_key(input, BTN_SIDE, 0); - input_report_key(input, BTN_EXTRA, 0); - input_report_abs(input, ABS_THROTTLE, 0); - input_report_abs(input, ABS_RZ, 0); - } else { - input_report_abs(input, ABS_PRESSURE, 0); - input_report_key(input, BTN_STYLUS, 0); - input_report_key(input, BTN_STYLUS2, 0); - input_report_key(input, BTN_TOUCH, 0); - input_report_abs(input, ABS_WHEEL, 0); - if (features->type >= INTUOS3S) - input_report_abs(input, ABS_Z, 0); - } - input_report_key(input, wacom->tool[idx], 0); - input_report_abs(input, ABS_MISC, 0); /* reset tool id */ - input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); - wacom->id[idx] = 0; + wacom_exit_report(wacom); return 2; } @@ -1226,6 +1236,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) if (!valid) continue; + if (!prox) { + wacom->shared->stylus_in_proximity = false; + wacom_exit_report(wacom); + input_sync(pen_input); + return; + } if (range) { /* Fix rotation alignment: userspace expects zero at left */ int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]); @@ -1924,7 +1940,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field struct wacom_features *features = &wacom_wac->features; unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); int i; - bool is_touch_on = value; bool do_report = false; /* @@ -1969,16 +1984,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field break; case WACOM_HID_WD_MUTE_DEVICE: - if (wacom_wac->shared->touch_input && value) { - wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on; - is_touch_on = wacom_wac->shared->is_touch_on; - } - - /* fall through*/ case WACOM_HID_WD_TOUCHONOFF: if (wacom_wac->shared->touch_input) { + bool *is_touch_on = &wacom_wac->shared->is_touch_on; + + if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value) + *is_touch_on = !(*is_touch_on); + else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF) + *is_touch_on = value; + input_report_switch(wacom_wac->shared->touch_input, - SW_MUTE_DEVICE, !is_touch_on); + SW_MUTE_DEVICE, !(*is_touch_on)); input_sync(wacom_wac->shared->touch_input); } break; diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 8a03654048bf..feb62fd4dfc3 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -166,6 +166,7 @@ ((f)->physical == HID_DG_PEN) || \ ((f)->application == HID_DG_PEN) || \ ((f)->application == HID_DG_DIGITIZER) || \ + ((f)->application == WACOM_HID_WD_PEN) || \ ((f)->application == WACOM_HID_WD_DIGITIZER) || \ ((f)->application == WACOM_HID_G9_PEN) || \ ((f)->application == WACOM_HID_G11_PEN)) diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 894b67ac2cae..05964347008d 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -640,22 +640,28 @@ void vmbus_close(struct vmbus_channel *channel) */ return; } - mutex_lock(&vmbus_connection.channel_mutex); /* * Close all the sub-channels first and then close the * primary channel. */ list_for_each_safe(cur, tmp, &channel->sc_list) { cur_channel = list_entry(cur, struct vmbus_channel, sc_list); - vmbus_close_internal(cur_channel); if (cur_channel->rescind) { + wait_for_completion(&cur_channel->rescind_event); + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_close_internal(cur_channel); hv_process_channel_removal( cur_channel->offermsg.child_relid); + } else { + mutex_lock(&vmbus_connection.channel_mutex); + vmbus_close_internal(cur_channel); } + mutex_unlock(&vmbus_connection.channel_mutex); } /* * Now close the primary. */ + mutex_lock(&vmbus_connection.channel_mutex); vmbus_close_internal(channel); mutex_unlock(&vmbus_connection.channel_mutex); } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 379b0df123be..1939c0ca3741 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -71,7 +71,7 @@ static const struct vmbus_device vmbus_devs[] = { /* PCIE */ { .dev_type = HV_PCIE, HV_PCIE_GUID, - .perf_device = true, + .perf_device = false, }, /* Synthetic Frame Buffer */ @@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void) return NULL; spin_lock_init(&channel->lock); + init_completion(&channel->rescind_event); INIT_LIST_HEAD(&channel->sc_list); INIT_LIST_HEAD(&channel->percpu_list); @@ -883,6 +884,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) /* * Now wait for offer handling to complete. */ + vmbus_rescind_cleanup(channel); while (READ_ONCE(channel->probe_done) == false) { /* * We wait here until any channel offer is currently @@ -898,7 +900,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) if (channel->device_obj) { if (channel->chn_rescind_callback) { channel->chn_rescind_callback(channel); - vmbus_rescind_cleanup(channel); return; } /* @@ -907,7 +908,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) */ dev = get_device(&channel->device_obj->device); if (dev) { - vmbus_rescind_cleanup(channel); vmbus_device_unregister(channel->device_obj); put_device(dev); } @@ -921,13 +921,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) * 2. Then close the primary channel. */ mutex_lock(&vmbus_connection.channel_mutex); - vmbus_rescind_cleanup(channel); if (channel->state == CHANNEL_OPEN_STATE) { /* * The channel is currently not open; * it is safe for us to cleanup the channel. */ hv_process_channel_removal(rescind->child_relid); + } else { + complete(&channel->rescind_event); } mutex_unlock(&vmbus_connection.channel_mutex); } diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 12eb8caa4263..3f8dde8d59ba 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -394,13 +394,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel, } EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); +/* How many bytes were read in this iterator cycle */ +static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, + u32 start_read_index) +{ + if (rbi->priv_read_index >= start_read_index) + return rbi->priv_read_index - start_read_index; + else + return rbi->ring_datasize - start_read_index + + rbi->priv_read_index; +} + /* * Update host ring buffer after iterating over packets. */ void hv_pkt_iter_close(struct vmbus_channel *channel) { struct hv_ring_buffer_info *rbi = &channel->inbound; - u32 orig_write_sz = hv_get_bytes_to_write(rbi); + u32 curr_write_sz, pending_sz, bytes_read, start_read_index; /* * Make sure all reads are done before we update the read index since @@ -408,8 +419,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) * is updated. */ virt_rmb(); + start_read_index = rbi->ring_buffer->read_index; rbi->ring_buffer->read_index = rbi->priv_read_index; + if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) + return; + /* * Issue a full memory barrier before making the signaling decision. * Here is the reason for having this barrier: @@ -423,26 +438,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) */ virt_mb(); - /* If host has disabled notifications then skip */ - if (rbi->ring_buffer->interrupt_mask) + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + if (!pending_sz) return; - if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { - u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); + /* + * Ensure the read of write_index in hv_get_bytes_to_write() + * happens after the read of pending_send_sz. + */ + virt_rmb(); + curr_write_sz = hv_get_bytes_to_write(rbi); + bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); - /* - * If there was space before we began iteration, - * then host was not blocked. Also handles case where - * pending_sz is zero then host has nothing pending - * and does not need to be signaled. - */ - if (orig_write_sz > pending_sz) - return; + /* + * If there was space before we began iteration, + * then host was not blocked. + */ - /* If pending write will not fit, don't give false hope. */ - if (hv_get_bytes_to_write(rbi) < pending_sz) - return; - } + if (curr_write_sz - bytes_read > pending_sz) + return; + + /* If pending write will not fit, don't give false hope. */ + if (curr_write_sz <= pending_sz) + return; vmbus_setevent(channel); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 937801ac2fe0..2cd134dd94d2 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1534,7 +1534,7 @@ static int __init hv_acpi_init(void) { int ret, t; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) return -ENODEV; init_completion(&probe_event); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index c13a4fd86b3c..a42744c7665b 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -268,13 +268,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { const struct tjmax_model *tm = &tjmax_model_table[i]; if (c->x86_model == tm->model && - (tm->mask == ANY || c->x86_mask == tm->mask)) + (tm->mask == ANY || c->x86_stepping == tm->mask)) return tm->tjmax; } /* Early chips have no MSR for TjMax */ - if (c->x86_model == 0xf && c->x86_mask < 4) + if (c->x86_model == 0xf && c->x86_stepping < 4) usemsr_ee = 0; if (c->x86_model > 0xe && usemsr_ee) { @@ -425,7 +425,7 @@ static int chk_ucode_version(unsigned int cpu) * Readings might stop update when processor visited too deep sleep, * fixed for stepping D0 (6EC). */ - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); return -ENODEV; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index ef91b8a67549..84e91286fc4f 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -293,7 +293,7 @@ u8 vid_which_vrm(void) if (c->x86 < 6) /* Any CPU with family lower than 6 */ return 0; /* doesn't have VID */ - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); if (vrm_ret == 134) vrm_ret = get_via_model_d_vrm(); if (vrm_ret == 0) diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index 62e38fa8cda2..e9e6aeabbf84 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { u16 config_default; - int calibration_factor; + int calibration_value; int registers; int shunt_div; int bus_voltage_shift; int bus_voltage_lsb; /* uV */ - int power_lsb; /* uW */ + int power_lsb_factor; }; struct ina2xx_data { const struct ina2xx_config *config; long rshunt; + long current_lsb_uA; + long power_lsb_uW; struct mutex config_lock; struct regmap *regmap; @@ -116,21 +118,21 @@ struct ina2xx_data { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { .config_default = INA219_CONFIG_DEFAULT, - .calibration_factor = 40960000, + .calibration_value = 4096, .registers = INA219_REGISTERS, .shunt_div = 100, .bus_voltage_shift = 3, .bus_voltage_lsb = 4000, - .power_lsb = 20000, + .power_lsb_factor = 20, }, [ina226] = { .config_default = INA226_CONFIG_DEFAULT, - .calibration_factor = 5120000, + .calibration_value = 2048, .registers = INA226_REGISTERS, .shunt_div = 400, .bus_voltage_shift = 0, .bus_voltage_lsb = 1250, - .power_lsb = 25000, + .power_lsb_factor = 25, }, }; @@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval) return INA226_SHIFT_AVG(avg_bits); } +/* + * Calibration register is set to the best value, which eliminates + * truncation errors on calculating current register in hardware. + * According to datasheet (eq. 3) the best values are 2048 for + * ina226 and 4096 for ina219. They are hardcoded as calibration_value. + */ static int ina2xx_calibrate(struct ina2xx_data *data) { - u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - data->rshunt); - - return regmap_write(data->regmap, INA2XX_CALIBRATION, val); + return regmap_write(data->regmap, INA2XX_CALIBRATION, + data->config->calibration_value); } /* @@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data) if (ret < 0) return ret; - /* - * Set current LSB to 1mA, shunt is in uOhms - * (equation 13 in datasheet). - */ return ina2xx_calibrate(data); } @@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_POWER: - val = regval * data->config->power_lsb; + val = regval * data->power_lsb_uW; break; case INA2XX_CURRENT: - /* signed register, LSB=1mA (selected), in mA */ - val = (s16)regval; + /* signed register, result in mA */ + val = regval * data->current_lsb_uA; + val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_CALIBRATION: - val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - regval); + val = regval; break; default: /* programmer goofed */ @@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev, ina2xx_get_value(data, attr->index, regval)); } -static ssize_t ina2xx_set_shunt(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) +/* + * In order to keep calibration register value fixed, the product + * of current_lsb and shunt_resistor should also be fixed and equal + * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order + * to keep the scale. + */ +static int ina2xx_set_shunt(struct ina2xx_data *data, long val) +{ + unsigned int dividend = DIV_ROUND_CLOSEST(1000000000, + data->config->shunt_div); + if (val <= 0 || val > dividend) + return -EINVAL; + + mutex_lock(&data->config_lock); + data->rshunt = val; + data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val); + data->power_lsb_uW = data->config->power_lsb_factor * + data->current_lsb_uA; + mutex_unlock(&data->config_lock); + + return 0; +} + +static ssize_t ina2xx_store_shunt(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) { unsigned long val; int status; @@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev, if (status < 0) return status; - if (val == 0 || - /* Values greater than the calibration factor make no sense. */ - val > data->config->calibration_factor) - return -EINVAL; - - mutex_lock(&data->config_lock); - data->rshunt = val; - status = ina2xx_calibrate(data); - mutex_unlock(&data->config_lock); + status = ina2xx_set_shunt(data, val); if (status < 0) return status; - return count; } @@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_set_shunt, + ina2xx_show_value, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ @@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client, /* set the device type */ data->config = &ina2xx_config[chip]; + mutex_init(&data->config_lock); if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) { struct ina2xx_platform_data *pdata = dev_get_platdata(dev); @@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client, val = INA2XX_RSHUNT_DEFAULT; } - if (val <= 0 || val > data->config->calibration_factor) - return -ENODEV; - - data->rshunt = val; + ina2xx_set_shunt(data, val); ina2xx_regmap_config.max_register = data->config->registers; @@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client, return -ENODEV; } - mutex_init(&data->config_lock); - data->groups[group++] = &ina2xx_group; if (id->driver_data == ina226) data->groups[group++] = &ina226_group; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 5f11dc014ed6..e5234f953a6d 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -22,6 +22,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include #include #include #include @@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = { #define JC42_REG_TEMP 0x05 #define JC42_REG_MANID 0x06 #define JC42_REG_DEVICEID 0x07 +#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */ /* Status bits in temperature register */ #define JC42_ALARM_CRIT_BIT 15 @@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = { #define GT_MANID 0x1c68 /* Giantec */ #define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */ +/* SMBUS register */ +#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */ + /* Supported chips */ /* Analog Devices */ @@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id) data->extended = !!(cap & JC42_CAP_RANGE); + if (device_property_read_bool(dev, "smbus-timeout-disable")) { + int smbus; + + /* + * Not all chips support this register, but from a + * quick read of various datasheets no chip appears + * incompatible with the below attempt to disable + * the timeout. And the whole thing is opt-in... + */ + smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS); + if (smbus < 0) + return smbus; + i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS, + smbus | SMBUS_STMOUT); + } + config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); if (config < 0) return config; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index ce3b91f22e30..5c740996aa62 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -179,7 +179,7 @@ static bool has_erratum_319(struct pci_dev *pdev) * and AM3 formats, but that's the best we can do. */ return boot_cpu_data.x86_model < 4 || - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } static int k10temp_probe(struct pci_dev *pdev, diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5a632bcf869b..e59f9113fb93 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, return -ENOMEM; model = boot_cpu_data.x86_model; - stepping = boot_cpu_data.x86_mask; + stepping = boot_cpu_data.x86_stepping; /* feature available since SH-C0, exclude older revisions */ if ((model == 4 && stepping == 0) || diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c219e43b8f02..f5f3f8cf57ea 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1469,7 +1469,7 @@ static void nct6775_update_pwm(struct device *dev) duty_is_dc = data->REG_PWM_MODE[i] && (nct6775_read_value(data, data->REG_PWM_MODE[i]) & data->PWM_MODE_MASK[i]); - data->pwm_mode[i] = duty_is_dc; + data->pwm_mode[i] = !duty_is_dc; fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]); for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) { @@ -2350,7 +2350,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) struct nct6775_data *data = nct6775_update_device(dev); struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); - return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]); + return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]); } static ssize_t @@ -2371,9 +2371,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, if (val > 1) return -EINVAL; - /* Setting DC mode is not supported for all chips/channels */ + /* Setting DC mode (0) is not supported for all chips/channels */ if (data->REG_PWM_MODE[nr] == 0) { - if (val) + if (!val) return -EINVAL; return count; } @@ -2382,7 +2382,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, data->pwm_mode[nr] = val; reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]); reg &= ~data->PWM_MODE_MASK[nr]; - if (val) + if (!val) reg |= data->PWM_MODE_MASK[nr]; nct6775_write_value(data, data->REG_PWM_MODE[nr], reg); mutex_unlock(&data->update_lock); diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 00d6995af4c2..8a44e94d5679 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg) const struct adm1275_data *data = to_adm1275_data(info); int ret = 0; - if (page) + if (page > 0) return -ENXIO; switch (reg) { @@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg, const struct adm1275_data *data = to_adm1275_data(info); int ret; - if (page) + if (page > 0) return -ENXIO; switch (reg) { diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c index dd4883a19045..e951f9b87abb 100644 --- a/drivers/hwmon/pmbus/max8688.c +++ b/drivers/hwmon/pmbus/max8688.c @@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg) { int ret; - if (page) + if (page > 0) return -ENXIO; switch (reg) { diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 4efa2bd4f6d8..fa613bd209e3 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops; /* Function declarations */ void pmbus_clear_cache(struct i2c_client *client); -int pmbus_set_page(struct i2c_client *client, u8 page); -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); +int pmbus_set_page(struct i2c_client *client, int page); +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg); +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word); int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); int pmbus_write_byte(struct i2c_client *client, int page, u8 value); int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 302f0aef59de..a139940cd991 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -136,13 +137,13 @@ void pmbus_clear_cache(struct i2c_client *client) } EXPORT_SYMBOL_GPL(pmbus_clear_cache); -int pmbus_set_page(struct i2c_client *client, u8 page) +int pmbus_set_page(struct i2c_client *client, int page) { struct pmbus_data *data = i2c_get_clientdata(client); int rv = 0; int newpage; - if (page != data->currpage) { + if (page >= 0 && page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); if (newpage != page) @@ -158,11 +159,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_write_byte(client, value); } @@ -186,7 +185,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) return pmbus_write_byte(client, page, value); } -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, + u16 word) { int rv; @@ -219,7 +219,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg, return pmbus_write_word_data(client, page, reg, word); } -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg) { int rv; @@ -255,11 +255,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_read_byte_data(client, reg); } @@ -502,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data, static long pmbus_reg2data_direct(struct pmbus_data *data, struct pmbus_sensor *sensor) { - long val = (s16) sensor->data; - long m, b, R; + s64 b, val = (s16)sensor->data; + s32 m, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; @@ -531,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data, R--; } while (R < 0) { - val = DIV_ROUND_CLOSEST(val, 10); + val = div_s64(val + 5LL, 10L); /* round closest */ R++; } - return (val - b) / m; + val = div_s64(val - b, m); + return clamp_val(val, LONG_MIN, LONG_MAX); } /* @@ -659,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data, static u16 pmbus_data2reg_direct(struct pmbus_data *data, struct pmbus_sensor *sensor, long val) { - long m, b, R; + s64 b, val64 = val; + s32 m, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; @@ -676,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data, R -= 3; /* Adjust R and b for data in milli-units */ b *= 1000; } - val = val * m + b; + val64 = val64 * m + b; while (R > 0) { - val *= 10; + val64 *= 10; R--; } while (R < 0) { - val = DIV_ROUND_CLOSEST(val, 10); + val64 = div_s64(val64 + 5LL, 10L); /* round closest */ R++; } - return val; + return (u16)clamp_val(val64, S16_MIN, S16_MAX); } static u16 pmbus_data2reg_vid(struct pmbus_data *data, diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 6ea62c62ff27..9cdb3fbc8c1f 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -315,7 +315,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata) } pc = debug_adjust_pc(drvdata); - dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc); + dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc); if (drvdata->edcidsr_present) dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr); diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index d7a3e453016d..735dca089389 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -46,8 +46,11 @@ #define TPIU_ITATBCTR0 0xef8 /** register definition **/ +/* FFSR - 0x300 */ +#define FFSR_FT_STOPPED BIT(1) /* FFCR - 0x304 */ #define FFCR_FON_MAN BIT(6) +#define FFCR_STOP_FI BIT(12) /** * @base: memory mapped base address for this component. @@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata) { CS_UNLOCK(drvdata->base); - /* Clear formatter controle reg. */ - writel_relaxed(0x0, drvdata->base + TPIU_FFCR); + /* Clear formatter and stop on flush */ + writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR); /* Generate manual flush */ - writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + /* Wait for flush to complete */ + coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0); + /* Wait for formatter to stop */ + coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1); CS_LOCK(drvdata->base); } diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index 1b412f8a56b5..9aa257d79197 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig @@ -1,6 +1,6 @@ config INTEL_TH tristate "Intel(R) Trace Hub controller" - depends on HAS_DMA && HAS_IOMEM + depends on HAS_DMA && HAS_IOMEM && X86 help Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that produce, switch and output trace data from multiple hardware and @@ -55,6 +55,28 @@ config INTEL_TH_MSU Say Y here to enable MSU output device for Intel TH. +config INTEL_TH_MSU_DVC + tristate "Intel Trace Hub Memory Storage Unit to USB-dvc" + depends on DVC_TRACE_BUS + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. + This provides the means to route this data over USB, + using DvC-Trace. + + Say Y here to enable DvC-Trace output device for Intel TH. + +config INTEL_TH_MSU_DVC_DEBUG + tristate "Intel Trace Hub Memory Storage Unit to USB-dvc debug" + depends on INTEL_TH_MSU_DVC + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. + This enables extensive logging and collection of + statistical data on MSU/DvC-Trace device performance. + + Say Y to enable extended debug features on MSU-DvC. + config INTEL_TH_PTI tristate "Intel(R) Trace Hub PTI output" help @@ -70,4 +92,15 @@ config INTEL_TH_DEBUG help Say Y here to enable debugging. +config INTEL_TH_EARLY_PRINTK + bool "Intel TH early printk console" + depends on INTEL_TH=y + default n + ---help--- + Enables early printk console. + When the early printk console is enabled in the kernel + command line, kernel log messages are sent to Intel TH + (hence they are aggregated with the other trace messages + from the platform). + endif diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile index 880c9b5e8566..f33d016f3d80 100644 --- a/drivers/hwtracing/intel_th/Makefile +++ b/drivers/hwtracing/intel_th/Makefile @@ -15,5 +15,12 @@ intel_th_sth-y := sth.o obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu.o intel_th_msu-y := msu.o +obj-$(CONFIG_INTEL_TH_MSU_DVC) += intel_th_msu_dvc.o +intel_th_msu_dvc-y := msu-dvc.o +subdir-ccflags-$(CONFIG_INTEL_TH_MSU_DVC_DEBUG) += -DMDD_DEBUG + obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o intel_th_pti-y := pti.o + +obj-$(CONFIG_INTEL_TH_EARLY_PRINTK) += intel_th_early_printk.o +intel_th_early_printk-y := early_printk.o diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 1a023e30488c..ef9be9b59c70 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -222,48 +222,75 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(port); -static int intel_th_output_activate(struct intel_th_device *thdev) +/** + * intel_th_output_activate() - call output initialization procedure + * @output: output to activate + */ +int intel_th_output_activate(struct intel_th_output *output) { - struct intel_th_driver *thdrv = - to_intel_th_driver_or_null(thdev->dev.driver); - struct intel_th *th = to_intel_th(thdev); - int ret = 0; + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + struct intel_th_driver *outdrv = + to_intel_th_driver(outdev->dev.driver); - if (!thdrv) - return -ENODEV; - - if (!try_module_get(thdrv->driver.owner)) - return -ENODEV; + if (WARN_ON_ONCE(outdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; - pm_runtime_get_sync(&thdev->dev); + if (outdrv->activate) + return outdrv->activate(outdev); - if (th->activate) - ret = th->activate(th); - if (ret) - goto fail_put; + return 0; +} +EXPORT_SYMBOL_GPL(intel_th_output_activate); - if (thdrv->activate) - ret = thdrv->activate(thdev); - else - intel_th_trace_enable(thdev); +/** + * intel_th_first_trace() - notification callback for first trace + * + * Notify each child device that the first capture is about to begin. + * This gives a chance to save the current data as the Trace Hub may have + * already been configured by the BIOS to trace to a given output. + * + * @dev: output device to notify + * @data: private data - unused + */ +static int intel_th_first_trace(struct device *dev, void *data) +{ + struct intel_th_device *thdev = + container_of(dev, struct intel_th_device, dev); + struct intel_th_driver *thdrv = + to_intel_th_driver(thdev->dev.driver); - if (ret) - goto fail_deactivate; + if (thdrv && thdrv->first_trace) + thdrv->first_trace(thdev); return 0; +} -fail_deactivate: - if (th->deactivate) - th->deactivate(th); +/** + * intel_th_start_trace() - start tracing to an output device + * @thdev: output device that requests tracing + */ +static int intel_th_start_trace(struct intel_th_device *thdev) +{ + struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + static atomic_t first = { .counter = 1, }; -fail_put: - pm_runtime_put(&thdev->dev); - module_put(thdrv->driver.owner); + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return -EINVAL; - return ret; + if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; + + if (atomic_dec_if_positive(&first) == 0) + device_for_each_child(&hub->dev, NULL, intel_th_first_trace); + + /* The hub has control over Intel Trace Hub. + * Let the hub start a trace if possible and activate the output. */ + return hubdrv->enable(hub, &thdev->output); } -static void intel_th_output_deactivate(struct intel_th_device *thdev) +static void intel_th_stop_trace(struct intel_th_device *thdev) { struct intel_th_driver *thdrv = to_intel_th_driver_or_null(thdev->dev.driver); @@ -305,9 +332,9 @@ static ssize_t active_store(struct device *dev, struct device_attribute *attr, if (!!val != thdev->output.active) { if (val) - ret = intel_th_output_activate(thdev); + ret = intel_th_start_trace(thdev); else - intel_th_output_deactivate(thdev); + intel_th_stop_trace(thdev); } return ret ? ret : size; @@ -376,6 +403,7 @@ intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name, thdev->id = id; thdev->type = type; + thdev->th = th; strcpy(thdev->name, name); device_initialize(&thdev->dev); @@ -807,10 +835,11 @@ static const struct file_operations intel_th_output_fops = { * @devres: parent's resources * @ndevres: number of resources * @irq: irq number + * @reset: parent's reset function */ struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq) + struct resource *devres, unsigned int ndevres, int irq, void (*reset)(struct intel_th *th)) { struct intel_th *th; int err; @@ -837,6 +866,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, th->resource = devres; th->num_resources = ndevres; th->irq = irq; + th->reset = reset; dev_set_drvdata(dev, th); @@ -891,10 +921,26 @@ void intel_th_free(struct intel_th *th) EXPORT_SYMBOL_GPL(intel_th_free); /** - * intel_th_trace_enable() - enable tracing for an output device - * @thdev: output device that requests tracing be enabled + * intel_th_reset() - reset hardware registers + * @hub: hub requesting the reset + */ +void intel_th_reset(struct intel_th_device *hub) +{ + struct intel_th *th = hub->th; + + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return; + + if (th->reset) + th->reset(th); +} +EXPORT_SYMBOL_GPL(intel_th_reset); + +/** + * intel_th_trace_switch() - execute a switch sequence + * @thdev: output device that requests tracing switch */ -int intel_th_trace_enable(struct intel_th_device *thdev) +int intel_th_trace_switch(struct intel_th_device *thdev) { struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); @@ -905,12 +951,11 @@ int intel_th_trace_enable(struct intel_th_device *thdev) if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) return -EINVAL; - pm_runtime_get_sync(&thdev->dev); - hubdrv->enable(hub, &thdev->output); + hubdrv->trig_switch(hub, &thdev->output); return 0; } -EXPORT_SYMBOL_GPL(intel_th_trace_enable); +EXPORT_SYMBOL_GPL(intel_th_trace_switch); /** * intel_th_trace_disable() - disable tracing for an output device @@ -935,7 +980,7 @@ EXPORT_SYMBOL_GPL(intel_th_trace_disable); int intel_th_set_output(struct intel_th_device *thdev, unsigned int master) { - struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); + struct intel_th_device *hub = to_intel_th_hub(thdev); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); if (!hubdrv->set_output) diff --git a/drivers/hwtracing/intel_th/early_printk.c b/drivers/hwtracing/intel_th/early_printk.c new file mode 100644 index 000000000000..bbcb9f89161d --- /dev/null +++ b/drivers/hwtracing/intel_th/early_printk.c @@ -0,0 +1,98 @@ +#include +#include +#include +#include "sth.h" + +static unsigned long sth_phys_addr; + +void early_intel_th_init(const char *s) +{ + size_t n; + unsigned long addr, chan; + char buf[32] = {0, }; + char *match, *next; + + /* Expect ,0x:[,keep] */ + if (*s == ',') + ++s; + if (strncmp(s, "0x", 2)) + goto fail; + + n = strcspn(s, ","); + if (n > sizeof(buf) - 1) + goto fail; + strncpy(buf, s, n); + next = buf; + + /* Get sw_bar */ + match = strsep(&next, ":"); + if (!match) + goto fail; + + if (kstrtoul(match, 16, &addr)) + goto fail; + + /* Get channel */ + if (kstrtoul(next, 0, &chan)) + goto fail; + + sth_phys_addr = addr + chan * sizeof(struct intel_th_channel); + return; + +fail: + pr_err("%s invalid parameter %s", __func__, s); +} + +static void intel_th_early_write(struct console *con, const char *buf, + unsigned len) +{ + struct intel_th_channel *channel; + const u8 *p = buf; + const u32 sven_header = 0x01000242; + + if (WARN_ON_ONCE(!sth_phys_addr)) + return; + + /* Software can send messages to Intel TH by writing to an MMIO space + * that is divided in several Master/Channel regions. + * Write directly to the address provided through the cmdline. + */ + set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, sth_phys_addr); + channel = (struct intel_th_channel *) + (__fix_to_virt(FIX_EARLYCON_MEM_BASE) + + (sth_phys_addr & (PAGE_SIZE - 1))); + + /* Add hardcoded SVEN header + * type: DEBUG_STRING + * severity: SVEN_SEVERITY_NORMAL + * length: payload size + * subtype: SVEN_DEBUGSTR_Generic + */ + iowrite32(sven_header, &channel->DnTS); + iowrite16(len, &channel->Dn); + + while (len) { + if (len >= 4) { + iowrite32(*(u32 *)p, &channel->Dn); + p += 4; + len -= 4; + } else if (len >= 2) { + iowrite16(*(u16 *)p, &channel->Dn); + p += 2; + len -= 2; + } else { + iowrite8(*(u8 *)p, &channel->Dn); + p += 1; + len -= 1; + } + } + + iowrite32(0, &channel->FLAG); +} + +struct console intel_th_early_console = { + .name = "earlyintelth", + .write = intel_th_early_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index 018678ec3c13..571f1b6956ef 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -35,14 +35,16 @@ struct gth_device; * @output: link to output device's output descriptor * @index: output port number * @port_type: one of GTH_* port type values - * @master: bitmap of masters configured for this output + * @config: output configuration backup + * @smcfreq: maintenance packet frequency backup */ struct gth_output { struct gth_device *gth; struct intel_th_output *output; unsigned int index; unsigned int port_type; - DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1); + u32 config; + u32 smcfreq; }; /** @@ -73,6 +75,8 @@ static void gth_output_set(struct gth_device *gth, int port, u32 val; int shift = (port & 3) * 8; + gth->output[port].config = config; + val = ioread32(gth->base + reg); val &= ~(0xff << shift); val |= config << shift; @@ -99,6 +103,8 @@ static void gth_smcfreq_set(struct gth_device *gth, int port, int shift = (port & 1) * 16; u32 val; + gth->output[port].smcfreq = freq; + val = ioread32(gth->base + reg); val &= ~(0xffff << shift); val |= freq << shift; @@ -147,6 +153,24 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port) iowrite32(val, gth->base + reg); } +static int gth_master_get(struct gth_device *gth, unsigned int master) +{ + unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u); + unsigned int shift = (master & 0x7) * 4; + u32 val; + + if (master >= 256) { + reg = REG_GTH_GSWTDEST; + shift = 0; + } + + val = ioread32(gth->base + reg); + val &= (0xf << shift); + val >>= shift; + + return val ? val & 0x7 : -1; +} + static ssize_t master_attr_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -190,14 +214,7 @@ static ssize_t master_attr_store(struct device *dev, old_port = gth->master[ma->master]; if (old_port >= 0) { gth->master[ma->master] = -1; - clear_bit(ma->master, gth->output[old_port].master); - - /* - * if the port is active, program this setting, - * implies that runtime PM is on - */ - if (gth->output[old_port].output->active) - gth_master_set(gth, ma->master, -1); + gth_master_set(gth, ma->master, -1); } /* connect to the new output port, if any */ @@ -208,11 +225,8 @@ static ssize_t master_attr_store(struct device *dev, goto unlock; } - set_bit(ma->master, gth->output[port].master); - - /* if the port is active, program this setting, see above */ - if (gth->output[port].output->active) - gth_master_set(gth, ma->master, port); + gth_master_set(gth, ma->master, port); + gth->master[ma->master] = port; } gth->master[ma->master] = port; @@ -280,45 +294,6 @@ gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm) return config; } -/* - * Reset outputs and sources - */ -static int intel_th_gth_reset(struct gth_device *gth) -{ - u32 reg; - int port, i; - - reg = ioread32(gth->base + REG_GTH_SCRPD0); - if (reg & SCRPD_DEBUGGER_IN_USE) - return -EBUSY; - - /* Always save/restore STH and TU registers in S0ix entry/exit */ - reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; - iowrite32(reg, gth->base + REG_GTH_SCRPD0); - - /* output ports */ - for (port = 0; port < 8; port++) { - if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == - GTH_NONE) - continue; - - gth_output_set(gth, port, 0); - gth_smcfreq_set(gth, port, 16); - } - /* disable overrides */ - iowrite32(0, gth->base + REG_GTH_DESTOVR); - - /* masters swdest_0~31 and gswdest */ - for (i = 0; i < 33; i++) - iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4); - - /* sources */ - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfc, gth->base + REG_GTH_SCR2); - - return 0; -} - /* * "outputs" attribute group */ @@ -464,6 +439,66 @@ static int intel_th_output_attributes(struct gth_device *gth) return sysfs_create_group(>h->dev->kobj, >h->output_group); } +/** + * intel_th_gth_stop() - stop tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * @capture_done: set when no more traces will be captured + * + * This will stop tracing using force storeEn off signal and wait for the + * pipelines to be empty for the corresponding output port. + */ +static void intel_th_gth_stop(struct gth_device *gth, + struct intel_th_output *output, + bool capture_done) +{ + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + unsigned long count; + u32 reg; + u32 scr2 = 0xfc | (capture_done ? 1 : 0); + + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(scr2, gth->base + REG_GTH_SCR2); + + /* wait on pipeline empty for the given port */ + for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; + count && !(reg & BIT(output->port)); count--) { + reg = ioread32(gth->base + REG_GTH_STAT); + cpu_relax(); + } + + if (!count) + dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n", + output->port); + + /* wait on output piepline empty */ + if (output->wait_empty) + output->wait_empty(outdev); + + /* clear force capture done for next captures */ + iowrite32(0xfc, gth->base + REG_GTH_SCR2); +} + +/** + * intel_th_gth_start() - start tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * + * This will start tracing using force storeEn signal. + */ +static void intel_th_gth_start(struct gth_device *gth, + struct intel_th_output *output) +{ + u32 scr = 0xfc0000; + + if (output->multiblock) + scr |= 0xff; + + iowrite32(scr, gth->base + REG_GTH_SCR); + iowrite32(0, gth->base + REG_GTH_SCR2); +} + /** * intel_th_gth_disable() - disable tracing to an output device * @thdev: GTH device @@ -477,48 +512,49 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - unsigned long count; - int master; + int i; u32 reg; spin_lock(>h->gth_lock); output->active = false; - for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS) { - gth_master_set(gth, master, -1); - } - spin_unlock(>h->gth_lock); - - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfd, gth->base + REG_GTH_SCR2); - - /* wait on pipeline empty for the given port */ - for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; - count && !(reg & BIT(output->port)); count--) { - reg = ioread32(gth->base + REG_GTH_STAT); - cpu_relax(); - } + for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) + if (gth->master[i] == output->port) + gth_master_set(gth, i, -1); - /* clear force capture done for next captures */ - iowrite32(0xfc, gth->base + REG_GTH_SCR2); + spin_unlock(>h->gth_lock); - if (!count) - dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", - output->port); + intel_th_gth_stop(gth, output, true); reg = ioread32(gth->base + REG_GTH_SCRPD0); reg &= ~output->scratchpad; iowrite32(reg, gth->base + REG_GTH_SCRPD0); + + /* Workaround for PTI pipeline empty not set by hardware */ + if (output->type == GTH_PTI && + !(BIT(output->port) & ioread32(gth->base + REG_GTH_STAT))) + intel_th_reset(thdev); } -static void gth_tscu_resync(struct gth_device *gth) +/* + * Set default configuration. + */ +static void intel_th_gth_reset(struct gth_device *gth) { u32 reg; - reg = ioread32(gth->base + REG_TSCU_TSUCTRL); - reg &= ~TSUCTRL_CTCRESYNC; - iowrite32(reg, gth->base + REG_TSCU_TSUCTRL); + /* Always save/restore STH and TU registers in S0ix entry/exit */ + reg = ioread32(gth->base + REG_GTH_SCRPD0); + reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; + iowrite32(reg, gth->base + REG_GTH_SCRPD0); + + /* Force sources off */ + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(0xfc, gth->base + REG_GTH_SCR2); + + /* Setup CTS for single trigger */ + iowrite32(0x80000000, gth->base + REG_CTS_C0S0_EN); + iowrite32(0x40000010, gth->base + REG_CTS_C0S0_ACT); } /** @@ -529,35 +565,91 @@ static void gth_tscu_resync(struct gth_device *gth) * This will configure all masters set to output to this device and * enable tracing using force storeEn signal. */ -static void intel_th_gth_enable(struct intel_th_device *thdev, - struct intel_th_output *output) +static int intel_th_gth_enable(struct intel_th_device *thdev, + struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - struct intel_th *th = to_intel_th(thdev); - u32 scr = 0xfc0000, scrpd; - int master; + u32 scrpd; + int i; + int ret = -EBUSY; + + /* No operation allowed while a debugger is connected */ + scrpd = ioread32(gth->base + REG_GTH_SCRPD0); + if (scrpd & SCRPD_DEBUGGER_IN_USE) + return ret; spin_lock(>h->gth_lock); - for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS + 1) { - gth_master_set(gth, master, output->port); + + /* Only allow one output active at a time */ + for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) { + if (gth->output[i].output && + gth->output[i].output->active) { + spin_unlock(>h->gth_lock); + return ret; + } } - if (output->multiblock) - scr |= 0xff; + intel_th_reset(thdev); + intel_th_gth_reset(gth); + + /* Re-configure output */ + gth_output_set(gth, output->port, gth->output[output->port].config); + gth_smcfreq_set(gth, output->port, gth->output[output->port].smcfreq); + + /* Enable masters for the output, disable others */ + for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) + gth_master_set(gth, i, gth->master[i] == output->port ? + output->port : -1); output->active = true; spin_unlock(>h->gth_lock); - if (INTEL_TH_CAP(th, tscu_enable)) - gth_tscu_resync(gth); + /* Setup the output */ + ret = intel_th_output_activate(output); + if (ret) + return ret; scrpd = ioread32(gth->base + REG_GTH_SCRPD0); scrpd |= output->scratchpad; iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); - iowrite32(scr, gth->base + REG_GTH_SCR); - iowrite32(0, gth->base + REG_GTH_SCR2); + /* Enable sources */ + intel_th_gth_start(gth, output); + + return 0; +} + +/** + * intel_th_gth_switch() - execute a switch sequence + * @thdev: GTH device + * @output: output device's descriptor + * + * This will execute a switch sequence that will trigger a switch window + * when tracing to MSC in multi-block mode. + */ +static void intel_th_gth_switch(struct intel_th_device *thdev, + struct intel_th_output *output) +{ + struct gth_device *gth = dev_get_drvdata(&thdev->dev); + unsigned long count, flags; + u32 reg; + + /* trigger */ + iowrite32(0, gth->base + REG_CTS_CTL); + iowrite32(1, gth->base + REG_CTS_CTL); + /* wait on trigger status */ + for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH; + count && !(reg & BIT(4)); count--) { + reg = ioread32(gth->base + REG_CTS_STAT); + cpu_relax(); + } + if (!count) + dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n"); + + local_irq_save(flags); + intel_th_gth_stop(gth, output, false); + intel_th_gth_start(gth, output); + local_irq_restore(flags); } /** @@ -574,9 +666,16 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, static int intel_th_gth_assign(struct intel_th_device *thdev, struct intel_th_device *othdev) { - struct gth_device *gth = dev_get_drvdata(&thdev->dev); + struct gth_device *gth; int i, id; + if(thdev == NULL || othdev == NULL) + return -EINVAL; + + gth = dev_get_drvdata(&thdev->dev); + if(gth == NULL) + return -EINVAL; + if (thdev->host_mode) return -EBUSY; @@ -640,10 +739,9 @@ intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master) master = TH_CONFIGURABLE_MASTERS; spin_lock(>h->gth_lock); - if (gth->master[master] == -1) { - set_bit(master, gth->output[port].master); + if (gth->master[master] == -1) gth->master[master] = port; - } + spin_unlock(>h->gth_lock); return 0; @@ -674,29 +772,19 @@ static int intel_th_gth_probe(struct intel_th_device *thdev) gth->base = base; spin_lock_init(>h->gth_lock); - dev_set_drvdata(dev, gth); - - /* - * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE - * bit. Either way, don't reset HW in this case, and don't export any - * capture configuration attributes. Also, refuse to assign output - * drivers to ports, see intel_th_gth_assign(). - */ - if (thdev->host_mode) - return 0; + dev_set_drvdata(dev, gth); - ret = intel_th_gth_reset(gth); - if (ret) { - if (ret != -EBUSY) - return ret; - - thdev->host_mode = true; - - return 0; - } + /* + * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE + * bit. Either way, don't reset HW in this case, and don't export any + * capture configuration attributes. Also, refuse to assign output + * drivers to ports, see intel_th_gth_assign(). + */ + if (thdev->host_mode) + return 0; for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) - gth->master[i] = -1; + gth->master[i] = gth_master_get(gth, i); for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) { gth->output[i].gth = gth; @@ -739,6 +827,7 @@ static struct intel_th_driver intel_th_gth_driver = { .unassign = intel_th_gth_unassign, .set_output = intel_th_gth_set_output, .enable = intel_th_gth_enable, + .trig_switch = intel_th_gth_switch, .disable = intel_th_gth_disable, .driver = { .name = "gth", diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h index f3d234251a12..cdfd5588a62a 100644 --- a/drivers/hwtracing/intel_th/gth.h +++ b/drivers/hwtracing/intel_th/gth.h @@ -57,6 +57,11 @@ enum { REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */ REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */ + /* Common Capture Sequencer (CTS) registers */ + REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */ + REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */ + REG_CTS_STAT = 0x32a0, /* cts_status */ + REG_CTS_CTL = 0x32a4, /* cts_control */ }; /* waiting for Pipeline Empty bit(s) to assert for GTH */ @@ -64,5 +69,7 @@ enum { #define TSUCTRL_CTCRESYNC BIT(0) #define TSCUSTAT_CTCSYNCING BIT(1) +/* waiting for Trigger status to assert for CTS */ +#define CTS_TRIG_WAITLOOP_DEPTH 10000 #endif /* __INTEL_TH_GTH_H__ */ diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 99ad563fc40d..d503fb8cc06d 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -26,6 +26,8 @@ enum { INTEL_TH_SWITCH, }; +struct intel_th_device; + /** * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices * @port: output port number, assigned by the switch @@ -33,6 +35,7 @@ enum { * @scratchpad: scratchpad bits to flag when this output is enabled * @multiblock: true for multiblock output configuration * @active: true when this output is enabled + * @wait_empty: wait for device pipeline to be empty * * Output port descriptor, used by switch driver to tell which output * port this output device corresponds to. Filled in at output device's @@ -45,6 +48,7 @@ struct intel_th_output { unsigned int scratchpad; bool multiblock; bool active; + void (*wait_empty)(struct intel_th_device *); }; /** @@ -61,6 +65,7 @@ struct intel_th_drvdata { * struct intel_th_device - device on the intel_th bus * @dev: device * @drvdata: hardware capabilities/quirks + * @th: core device * @resource: array of resources available to this device * @num_resources: number of resources in @resource array * @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH} @@ -70,12 +75,13 @@ struct intel_th_drvdata { * @name: device name to match the driver */ struct intel_th_device { - struct device dev; + struct device dev; struct intel_th_drvdata *drvdata; - struct resource *resource; - unsigned int num_resources; - unsigned int type; - int id; + struct intel_th *th; + struct resource *resource; + unsigned int num_resources; + unsigned int type; + int id; /* INTEL_TH_SWITCH specific */ bool host_mode; @@ -154,6 +160,7 @@ intel_th_output_assigned(struct intel_th_device *thdev) */ struct intel_th_driver { struct device_driver driver; + void (*first_trace)(struct intel_th_device *thdev); int (*probe)(struct intel_th_device *thdev); void (*remove)(struct intel_th_device *thdev); /* switch (GTH) ops */ @@ -161,8 +168,10 @@ struct intel_th_driver { struct intel_th_device *othdev); void (*unassign)(struct intel_th_device *thdev, struct intel_th_device *othdev); - void (*enable)(struct intel_th_device *thdev, + int (*enable)(struct intel_th_device *thdev, struct intel_th_output *output); + void (*trig_switch)(struct intel_th_device *thdev, + struct intel_th_output *output); void (*disable)(struct intel_th_device *thdev, struct intel_th_output *output); /* output ops */ @@ -219,13 +228,15 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev) struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq); + struct resource *devres, unsigned int ndevres, int irq, void (*reset)(struct intel_th *th)); void intel_th_free(struct intel_th *th); int intel_th_driver_register(struct intel_th_driver *thdrv); void intel_th_driver_unregister(struct intel_th_driver *thdrv); -int intel_th_trace_enable(struct intel_th_device *thdev); +int intel_th_output_activate(struct intel_th_output *output); +void intel_th_reset(struct intel_th_device *hub); +int intel_th_trace_switch(struct intel_th_device *thdev); int intel_th_trace_disable(struct intel_th_device *thdev); int intel_th_set_output(struct intel_th_device *thdev, unsigned int master); @@ -252,6 +263,7 @@ enum { * @num_thdevs: number of devices in the @thdev array * @num_resources: number or resources in the @resource array * @irq: irq number + * @reset: reset function of the core device * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -269,6 +281,8 @@ struct intel_th { unsigned int num_resources; int irq; + void (*reset)(struct intel_th *th); + int id; int major; #ifdef CONFIG_MODULES @@ -296,11 +310,11 @@ to_intel_th_hub(struct intel_th_device *thdev) enum { /* Global Trace Hub (GTH) */ REG_GTH_OFFSET = 0x0000, - REG_GTH_LENGTH = 0x2000, + REG_GTH_LENGTH = 0x4000, /* Timestamp counter unit (TSCU) */ REG_TSCU_OFFSET = 0x2000, - REG_TSCU_LENGTH = 0x1000, + REG_TSCU_LENGTH = 0x2000, /* Software Trace Hub (STH) [0x4000..0x4fff] */ REG_STH_OFFSET = 0x4000, diff --git a/drivers/hwtracing/intel_th/msu-dvc.c b/drivers/hwtracing/intel_th/msu-dvc.c new file mode 100755 index 000000000000..91693c0f8175 --- /dev/null +++ b/drivers/hwtracing/intel_th/msu-dvc.c @@ -0,0 +1,1184 @@ +/* + * Intel Trace Hub to USB dvc-trace driver + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msu.h" + +#ifdef MDD_DEBUG +#define MDD_F_DEBUG() pr_debug("\n") +#else +#define MDD_F_DEBUG() do {} while (0) +#endif + +#define DTC_DRV_NAME "dvcith" + +#define MDD_MIN_TRANSFER_DEF 2048 +#define MDD_RETRY_TIMEOUT_DEF 2 +#define MDD_MAX_RETRY_CNT_DEF 150 + +/* The DWC3 gadget is able to handle a maximum of 32 TRBs per-ep (an sg based + * request counts as the number of sg-s). + * This should be updated in case some other UDC has a lower threshold. */ +#define MDD_MAX_TRB_CNT 32 + +#define mdd_err(mdd, ...) dev_err(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_warn(mdd, ...) dev_warn(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_info(mdd, ...) dev_info(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_debug(mdd, ...) dev_debug(&(mdd)->ddev.device, ## __VA_ARGS__) + +#ifdef MDD_DEBUG +struct msu_dvc_stats { + unsigned long work_start; + unsigned long work_end; + + unsigned long loop_count; + unsigned long hits; + + u64 full_block_size; + u64 valid_block_size; + u64 valid_data_size; + + u32 transfer_type:2; + u32 process_type:2; + + enum usb_device_speed speed; +}; +#endif + +enum { + MDD_TRANSFER_NO_CHANGE, + MDD_TRANSFER_AUTO, + MDD_TRANSFER_MIN = MDD_TRANSFER_AUTO, + MDD_TRANSFER_SINGLE, + MDD_TRANSFER_MULTI, + MDD_TRANSFER_MAX = MDD_TRANSFER_MULTI, +}; + +static const char *const transfer_type_name[] = { + [MDD_TRANSFER_AUTO] = "Auto", + [MDD_TRANSFER_SINGLE] = "Single", + [MDD_TRANSFER_MULTI] = "Multi", +}; + +enum { + MDD_PROC_NO_CHANGE, + MDD_PROC_NONE, + MDD_PROC_MIN = MDD_PROC_NONE, + MDD_PROC_REM_TAIL, + MDD_PROC_REM_ALL, + MDD_PROC_MAX = MDD_PROC_REM_ALL, +}; + +static const char *const process_type_name[] = { + [MDD_PROC_NONE] = "Full-Blocks", + [MDD_PROC_REM_TAIL] = "Trimmed-Blocks", + [MDD_PROC_REM_ALL] = "STP", +}; + +struct mdd_transfer_data { + u8 *buffer; + u8 *buffer_sg; + size_t buffer_sg_len; + dma_addr_t buffer_dma; + size_t buffer_len; + struct scatterlist *sg_raw; + struct scatterlist *sg_proc; + struct scatterlist *sg_trans; /* not be allocated */ + size_t block_count; + size_t block_size; + spinlock_t lock; +}; + +#define mdd_lock_transfer(mdd) spin_lock(&mdd->tdata.lock) +#define mdd_unlock_transfer(mdd) spin_unlock(&mdd->tdata.lock) + +struct msu_dvc_dev { + struct dvct_source_device ddev; + atomic_t *dtc_status; + struct usb_ep *ep; + struct usb_function *func; + enum usb_device_speed speed; + struct intel_th_device *th_dev; + + struct workqueue_struct *wrq; + struct work_struct work; + struct usb_request *req; + wait_queue_head_t wq; + atomic_t req_ongoing; + + /*attributes */ + u32 retry_timeout; + u32 max_retry_count; + u32 transfer_type:2; + u32 process_type:2; + u32 min_transfer; + +#ifdef MDD_DEBUG + struct msu_dvc_stats stats; +#endif + struct mdd_transfer_data tdata; + + struct list_head mdd_list; +}; + +static LIST_HEAD(mdd_devs); +static DEFINE_SPINLOCK(mdd_devs_lock); + +static inline struct usb_gadget *mdd_gadget(struct msu_dvc_dev *mdd) +{ + BUG_ON(!mdd->func); + BUG_ON(!mdd->func->config); + BUG_ON(!mdd->func->config->cdev); + BUG_ON(!mdd->func->config->cdev->gadget); + return mdd->func->config->cdev->gadget; +} + +/* Back-cast to msu_dvc_dev */ +static inline struct msu_dvc_dev *dtc_to_mdd(struct dvct_source_device *p_dtc) +{ + return container_of(p_dtc, struct msu_dvc_dev, ddev); +} + +static ssize_t mdd_min_transfer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->min_transfer); +} + +static ssize_t mdd_min_transfer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + /* 48 represents the size of sync frames that are generated by + * a window switch, from this point on we have "real data" + * Going under this value could result in unneeded switching */ + if (!kstrtou32(buf, 10, &mdd->min_transfer)) { + if (mdd->min_transfer < 48) + mdd->min_transfer = 48; + return count; + } + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_min_transfer); + + +static ssize_t mdd_retry_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->retry_timeout); +} + +static ssize_t mdd_retry_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + if (!kstrtou32(buf, 10, &mdd->retry_timeout)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_retry_timeout); + +static ssize_t mdd_max_retry_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->max_retry_count); +} + +static ssize_t mdd_max_retry_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + if (!kstrtou32(buf, 10, &mdd->max_retry_count)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_max_retry); + +static ssize_t mdd_transfer_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%d %s\n", mdd->transfer_type, + transfer_type_name[mdd->transfer_type]); +} + +static ssize_t mdd_transfer_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + u8 tmp; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + if (!kstrtou8(buf, 10, &tmp) && tmp <= MDD_TRANSFER_MAX + && tmp >= MDD_TRANSFER_MIN) { + mdd->transfer_type = tmp; + return count; + } + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_transfer_type); + +static ssize_t mdd_proc_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%d %s\n", mdd->process_type, + process_type_name[mdd->process_type]); +} + +static ssize_t mdd_proc_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + u8 tmp; + + MDD_F_DEBUG(); + + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + if (!kstrtou8(buf, 10, &tmp) && tmp <= MDD_PROC_MAX + && tmp >= MDD_PROC_MIN) { + mdd->process_type = tmp; + return count; + } + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_proc_type); + +#ifdef MDD_DEBUG + +static ssize_t mdd_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + int len = 0; + + static const char *const u_speed_names[] = { + [USB_SPEED_UNKNOWN] = "?", + [USB_SPEED_LOW] = "LS", + [USB_SPEED_FULL] = "FS", + [USB_SPEED_HIGH] = "HS", + [USB_SPEED_WIRELESS] = "WR", + [USB_SPEED_SUPER] = "SS", + }; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + len += snprintf(buf + len, PAGE_SIZE - len, "R.count\tR.hits\t"); + + len += snprintf(buf + len, PAGE_SIZE - len, "T.tot_j\tT.tot_ms\t"); + len += + snprintf(buf + len, PAGE_SIZE - len, "D.total\tD.block\tT.stp\t"); + len += snprintf(buf + len, PAGE_SIZE - len, "Tr.type\tProc.type\t"); + + len += snprintf(buf + len, PAGE_SIZE - len, "USB.speed\n"); + + /* Actual values starts here */ + len += + snprintf(buf + len, PAGE_SIZE - len, "%lu\t%lu\t", + mdd->stats.loop_count, mdd->stats.hits); + + len += + snprintf(buf + len, PAGE_SIZE - len, "%lu\t%u\t", + (mdd->stats.work_end - mdd->stats.work_start), + jiffies_to_msecs(mdd->stats.work_end - + mdd->stats.work_start)); + len += + snprintf(buf + len, PAGE_SIZE - len, "%llu\t%llu\t%llu\t", + mdd->stats.full_block_size, mdd->stats.valid_block_size, + mdd->stats.valid_data_size); + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\t", + transfer_type_name[mdd->transfer_type]); + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\t", + process_type_name[mdd->process_type]); + + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\n", + u_speed_names[mdd->stats.speed]); + + return len; +} + +static DEVICE_ATTR_RO(mdd_stats); + +static void init_stats_start(struct msu_dvc_dev *mdd) +{ + mdd->stats.loop_count = 0; + mdd->stats.hits = 0; + + mdd->stats.work_start = jiffies; + + mdd->stats.full_block_size = 0; + mdd->stats.valid_block_size = 0; + mdd->stats.valid_data_size = 0; + + mdd->stats.process_type = mdd->process_type; + mdd->stats.transfer_type = mdd->transfer_type; + mdd->stats.speed = mdd->speed; +} + +#define stats_loop(mdd) ((mdd)->stats.loop_count++) +#define stats_hit(mdd) ((mdd)->stats.hits++) +#else +#define init_stats_start(n) do {} while (0) +#define stats_loop(mdd) do {} while (0) +#define stats_hit(mdd) do {} while (0) +#endif + +static void mdd_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msu_dvc_dev *mdd = (struct msu_dvc_dev *)req->context; + + mdd_lock_transfer(mdd); + + if (req->status != 0) { + mdd_err(mdd, "Usb request error %d\n", req->status); + dvct_clr_status(mdd->dtc_status, DVCT_MASK_TRANS); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + } + atomic_set(&mdd->req_ongoing, 0); + wake_up(&mdd->wq); + mdd_unlock_transfer(mdd); +} + +static int mdd_setup_transfer_data(struct msu_dvc_dev *mdd) +{ + int ret = -EINVAL; + + MDD_F_DEBUG(); + + if (!mdd->ep || !mdd->req) { + mdd_err(mdd, "Invalid endpoint data\n"); + goto err; + } + + mdd->tdata.block_count = msc_max_blocks(mdd->th_dev); + if (mdd->tdata.block_count == 0) { + mdd_err(mdd, "Invalid block count %zu\n", + mdd->tdata.block_count); + goto err; + } + + mdd->tdata.block_size = msc_block_max_size(mdd->th_dev); + if (mdd->tdata.block_size == 0) { + mdd_err(mdd, "Invalid block size %zu\n", mdd->tdata.block_size); + goto err; + } + + mdd->tdata.sg_raw = kmalloc_array(mdd->tdata.block_count, + sizeof(*mdd->tdata.sg_raw), + GFP_KERNEL); + if (!mdd->tdata.sg_raw) { + mdd_err(mdd, "Cannot allocate sg memory %zu\n", + mdd->tdata.block_size); + goto err_sg_raw; + } + + if (mdd->process_type != MDD_PROC_NONE) { + mdd->tdata.sg_proc = kmalloc_array(mdd->tdata.block_count, + sizeof(*mdd->tdata.sg_proc), + GFP_KERNEL); + if (!mdd->tdata.sg_proc) { + mdd_err(mdd, "Cannot allocate sg memory %zu\n", + mdd->tdata.block_size); + goto err_sg_proc; + } + mdd->tdata.sg_trans = mdd->tdata.sg_proc; + } else { + mdd->tdata.sg_trans = mdd->tdata.sg_raw; + } + + if (mdd->transfer_type == MDD_TRANSFER_SINGLE) { + mdd->tdata.buffer_len = + mdd->tdata.block_count * mdd->tdata.block_size; + mdd->tdata.buffer = + dma_alloc_coherent(&(mdd_gadget(mdd)->dev), + mdd->tdata.buffer_len, + &mdd->tdata.buffer_dma, GFP_KERNEL); + if (!mdd->tdata.buffer) { + mdd_err(mdd, "Cannot allocate DMA memory\n"); + goto err_l_buf; + } + } else { + mdd->tdata.buffer_sg_len = + mdd->tdata.block_count * mdd->tdata.block_size; + mdd->tdata.buffer_sg = kmalloc(mdd->tdata.buffer_sg_len, GFP_KERNEL); + if(mdd->tdata.buffer_sg == NULL) + mdd->tdata.buffer_sg_len = 0; + + mdd->tdata.buffer = NULL; + mdd->tdata.buffer_dma = 0; + mdd->tdata.buffer_len = 0; + } + return 0; +err_l_buf: + kfree(mdd->tdata.sg_proc); + mdd->tdata.sg_proc = NULL; +err_sg_proc: + kfree(mdd->tdata.sg_raw); + mdd->tdata.sg_raw = NULL; +err_sg_raw: + ret = -ENOMEM; +err: + return ret; +} + +static void mdd_reset_transfer_data(struct msu_dvc_dev *mdd) +{ + MDD_F_DEBUG(); + kfree(mdd->tdata.sg_proc); + mdd->tdata.sg_proc = NULL; + kfree(mdd->tdata.sg_raw); + mdd->tdata.sg_raw = NULL; + if (mdd->tdata.buffer) { + dma_free_coherent(&(mdd_gadget(mdd)->dev), + mdd->tdata.buffer_len, mdd->tdata.buffer, + mdd->tdata.buffer_dma); + mdd->tdata.buffer = NULL; + mdd->tdata.buffer_dma = 0; + mdd->tdata.buffer_len = 0; + } + + if(mdd->tdata.buffer_sg != NULL) + { + mdd->tdata.buffer_sg_len = 0; + kfree(mdd->tdata.buffer_sg); + } +} + +static unsigned mdd_sg_len(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + unsigned ret = 0; + + /*MDD_F_DEBUG(); */ + for_each_sg(sgl, sg, nents, i) { + ret += sg->length; + } + return ret; +} + +static int mdd_send_sg_buffer(struct msu_dvc_dev *mdd, int nents) +{ + size_t transfer_len; + + /*MDD_F_DEBUG(); */ + mdd_lock_transfer(mdd); + transfer_len = + sg_copy_to_buffer(mdd->tdata.sg_trans, nents, mdd->tdata.buffer_sg, + mdd->tdata.buffer_sg_len); + + if (!transfer_len) { + mdd_err(mdd, "Cannot copy into nonsg memory\n"); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + mdd->req->buf = mdd->tdata.buffer_sg; + mdd->req->length = transfer_len; + mdd->req->dma = 0; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + return 0; +} + +static int mdd_send_sg(struct msu_dvc_dev *mdd, int nents) +{ + struct scatterlist *sgl = mdd->tdata.sg_trans; + + if(mdd->tdata.buffer_sg != NULL) + { + return mdd_send_sg_buffer(mdd, nents); + } + + /*MDD_F_DEBUG(); */ + while (nents) { + int trans_ents; + + mdd_lock_transfer(mdd); + + if (nents > MDD_MAX_TRB_CNT) { + trans_ents = MDD_MAX_TRB_CNT; + sg_mark_end(&sgl[trans_ents - 1]); + } else { + trans_ents = nents; + } + + if (trans_ents == 1) { + mdd->req->buf = sg_virt(sgl); + mdd->req->length = sgl->length; + mdd->req->dma = 0; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + } else { + mdd->req->buf = NULL; + mdd->req->length = mdd_sg_len(sgl, trans_ents); + mdd->req->dma = 0; + mdd->req->sg = sgl; + mdd->req->num_sgs = trans_ents; + } + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + nents -= trans_ents; + sgl += trans_ents; + + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + } + return 0; +} + +static int mdd_send_buffer(struct msu_dvc_dev *mdd, int nents) +{ + size_t transfer_len; + + /*MDD_F_DEBUG(); */ + mdd_lock_transfer(mdd); + transfer_len = + sg_copy_to_buffer(mdd->tdata.sg_trans, nents, mdd->tdata.buffer, + mdd->tdata.buffer_len); + if (!transfer_len) { + mdd_err(mdd, "Cannot copy into nonsg memory\n"); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + mdd->req->buf = mdd->tdata.buffer; + mdd->req->length = transfer_len; + mdd->req->dma = mdd->tdata.buffer_dma; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + return 0; +} + +static int mdd_send_auto(struct msu_dvc_dev *mdd, int nents) +{ + /*MDD_F_DEBUG(); */ + if (!mdd_gadget(mdd)->sg_supported) + return mdd_send_buffer(mdd, nents); + else + return mdd_send_sg(mdd, nents); +} + +static int (*send_funcs[])(struct msu_dvc_dev *, int) = { + [MDD_TRANSFER_AUTO] = mdd_send_auto, + [MDD_TRANSFER_SINGLE] = mdd_send_buffer, + [MDD_TRANSFER_MULTI] = mdd_send_sg, +}; + +#ifdef MDD_DEBUG +static int mdd_proc_add_stats(struct msu_dvc_dev *mdd, int nents) +{ + int i, count; + struct scatterlist *sg; + + /*MDD_F_DEBUG(); */ + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + count = msc_data_sz((struct msc_block_desc *)sg_virt(sg)); + mdd->stats.full_block_size += sg->length; + mdd->stats.valid_block_size += (count + MSC_BDESC); + mdd->stats.valid_data_size += count; + } + + return i; +} +#else +#define mdd_proc_add_stats(m, n) do {} while (0) +#endif + +static int mdd_proc_trimmed_blocks(struct msu_dvc_dev *mdd, int nents) +{ + u8 *ptr; + size_t len; + int i, out_cnt = 0; + struct scatterlist *sg, *sg_dest = NULL; + + /*MDD_F_DEBUG(); */ + mdd_proc_add_stats(mdd, nents); + + sg_init_table(mdd->tdata.sg_proc, nents); + + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + ptr = sg_virt(sg); + len = msc_data_sz((struct msc_block_desc *)ptr); + if (!len) { + mdd_err(mdd, "Zero length block"); + continue; + } + + len += MSC_BDESC; + + if (!sg_dest) + sg_dest = mdd->tdata.sg_proc; + else + sg_dest = sg_next(sg_dest); + sg_set_buf(sg_dest, ptr, len); + out_cnt++; + } + if (sg_dest) + sg_mark_end(sg_dest); + + return out_cnt; +} + +static int mdd_proc_stp_only(struct msu_dvc_dev *mdd, int nents) +{ + u8 *ptr; + size_t len; + int i, out_cnt = 0; + struct scatterlist *sg, *sg_dest = NULL; + + /*MDD_F_DEBUG(); */ + mdd_proc_add_stats(mdd, nents); + + sg_init_table(mdd->tdata.sg_proc, nents); + + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + ptr = sg_virt(sg); + len = msc_data_sz((struct msc_block_desc *)ptr); + ptr += MSC_BDESC; + if (!len) { + mdd_err(mdd, "Zero data length block"); + } else { + if (!sg_dest) + sg_dest = mdd->tdata.sg_proc; + else + sg_dest = sg_next(sg_dest); + sg_set_buf(sg_dest, ptr, len); + out_cnt++; + } + } + if (sg_dest) + sg_mark_end(sg_dest); + + return out_cnt; +} + +static int (*proc_funcs[]) (struct msu_dvc_dev *, int) = { +#ifdef MDD_DEBUG + [MDD_PROC_NONE] = mdd_proc_add_stats, +#endif + [MDD_PROC_REM_TAIL] = mdd_proc_trimmed_blocks, + [MDD_PROC_REM_ALL] = mdd_proc_stp_only, +}; + +static void mdd_work(struct work_struct *work) +{ + int nents, current_bytes, retry_cnt = 0; + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(work, struct msu_dvc_dev, work); + init_stats_start(mdd); + + if (mdd_setup_transfer_data(mdd)) { + mdd_err(mdd, "Cannot setup transfer data\n"); + return; + } + mdd_info(mdd, "Start transfer loop\n"); + while (atomic_read(mdd->dtc_status) == DVCT_MASK_ONLINE_TRANS) { + sg_init_table(mdd->tdata.sg_raw, mdd->tdata.block_count); + /* Maybe will be better if msc_sg_oldest_win changes the window + * if the "oldest" one contains data and is the current one..*/ + + preempt_disable(); + current_bytes = msc_current_win_bytes(mdd->th_dev); + if (current_bytes > mdd->min_transfer || + (current_bytes && retry_cnt >= mdd->max_retry_count)) { + msc_switch_window(mdd->th_dev); + nents = msc_sg_oldest_win(mdd->th_dev, + mdd->tdata.sg_raw); + retry_cnt = 0; + } else { + if (unlikely(current_bytes < 0)) { + mdd_warn(mdd, "Unexpected state (%d), switch", + current_bytes); + msc_switch_window(mdd->th_dev); + } else { + if (retry_cnt < mdd->max_retry_count) + retry_cnt++; + } + nents = 0; + } + preempt_enable(); + stats_loop(mdd); + + if (nents < 0) { + mdd_err(mdd, "Cannot get ith data\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + break; + } + + if (nents && proc_funcs[mdd->process_type]) { + nents = proc_funcs[mdd->process_type] (mdd, nents); + if (nents < 0) { + mdd_err(mdd, "Cannot process data\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + break; + } + } + + if (nents) { + stats_hit(mdd); + if (send_funcs[mdd->transfer_type] (mdd, nents)) + break; + } else { + /*wait for stop or timeout */ + wait_event_timeout(mdd->wq, + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS), + msecs_to_jiffies(mdd-> + retry_timeout)); + } + } + mdd_info(mdd, "End transfer loop\n"); + if (atomic_read(&mdd->req_ongoing)) { + usb_ep_dequeue(mdd->ep, mdd->req); + atomic_set(&mdd->req_ongoing, 0); + } + +#ifdef MDD_DEBUG + mdd->stats.work_end = jiffies; +#endif + mdd_reset_transfer_data(mdd); +} + +static int mdd_activate(struct dvct_source_device *client, atomic_t *status) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + + mdd->dtc_status = status; + + return 0; +} + +static int mdd_binded(struct dvct_source_device *client, struct usb_ep *ep, + struct usb_function *func) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->ep = ep; + mdd->func = func; + + mdd->req = usb_ep_alloc_request(mdd->ep, GFP_KERNEL); + if (!mdd->req) { + mdd_err(mdd, "Cannot allocate usb request\n"); + return -ENOMEM; + } + return 0; +} + +static void mdd_connected(struct dvct_source_device *client, + enum usb_device_speed speed) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->speed = speed; +} + +union mdd_config { + u8 config; + struct { + u8 enable:1; /* one */ + u8 tr_type:2; + u8 proc_type:2; + } params; +}; + +static int mdd_start_transfer(struct dvct_source_device *client, u8 config) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + union mdd_config cfg; + + MDD_F_DEBUG(); + /* If we share the resources with node base reading this is + * the place where we should lock.*/ + + cfg.config = config; + + if (cfg.params.proc_type <= MDD_PROC_MAX + && cfg.params.proc_type >= MDD_PROC_MIN) { + mdd_info(mdd, "Set process type %d", cfg.params.proc_type); + mdd->process_type = cfg.params.proc_type; + } + + if (cfg.params.tr_type <= MDD_TRANSFER_MAX + && cfg.params.tr_type >= MDD_TRANSFER_MIN) { + mdd_info(mdd, "Set transfer type %d", cfg.params.tr_type); + mdd->transfer_type = cfg.params.tr_type; + } + + /*Force linear buffer transfer if the gadget is not supporting SGs */ + if (mdd->transfer_type != MDD_TRANSFER_SINGLE + && !mdd_gadget(mdd)->sg_supported) { + mdd_info(mdd, "Force linear buffer transfer"); + mdd->transfer_type = MDD_TRANSFER_SINGLE; + } + + dvct_clr_status(mdd->dtc_status, DVCT_MASK_ERR); + dvct_set_status(mdd->dtc_status, DVCT_MASK_TRANS); + queue_work(mdd->wrq, &mdd->work); + return 0; +} + +static int mdd_stop_transfer(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + dvct_clr_status(mdd->dtc_status, DVCT_MASK_TRANS); + wake_up(&mdd->wq); + + return 0; +} + +static void mdd_disconnected(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->speed = USB_SPEED_UNKNOWN; +} + +static void mdd_unbinded(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + + if (mdd->req) { + usb_ep_free_request(mdd->ep, mdd->req); + mdd->req = NULL; + } + mdd->ep = NULL; +} + +static void mdd_deactivate(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->dtc_status = NULL; +} + +/*the driver*/ +static struct dvct_source_driver mdd_drv = { + .activate = mdd_activate, + .binded = mdd_binded, + .connected = mdd_connected, + .start_transfer = mdd_start_transfer, + .stop_transfer = mdd_stop_transfer, + .disconnected = mdd_disconnected, + .unbinded = mdd_unbinded, + .deactivate = mdd_deactivate, + .driver.name = DTC_DRV_NAME, +}; + +static struct msu_dvc_dev *mdd_alloc_device(const char *name) +{ + struct msu_dvc_dev *mdd; + + mdd = kzalloc(sizeof(*mdd), GFP_KERNEL); + + if (!mdd) + return ERR_PTR(-ENOMEM); + + mdd->ddev.name_add = kstrdup(name, GFP_KERNEL); + if (!mdd->ddev.name_add) { + kfree(mdd); + return ERR_PTR(-ENOMEM); + } + + /* mdd->ddev.protocol = 0; */ + /* mdd->ddev.desc = NULL; */ + /* mdd->dtc_status = NULL; */ + /* mdd->ep = NULL; */ + mdd->speed = USB_SPEED_UNKNOWN; + /* mdd->msu_dev = NULL; */ + /* mdd->wrq = NULL; */ + mdd->retry_timeout = MDD_RETRY_TIMEOUT_DEF; + mdd->max_retry_count = MDD_MAX_RETRY_CNT_DEF; + /* mdd->req = NULL; */ + atomic_set(&mdd->req_ongoing, 0); + /*mdd->tdata is all NULL */ + mdd->transfer_type = MDD_TRANSFER_AUTO; + mdd->process_type = MDD_PROC_REM_ALL; + mdd->min_transfer = MDD_MIN_TRANSFER_DEF; + + spin_lock_init(&mdd->tdata.lock); + + return mdd; +}; + +static void mdd_free_device(struct msu_dvc_dev *mdd) +{ + kfree(mdd->ddev.name_add); + kfree(mdd); +}; + +static struct attribute *mdd_attrs[] = { + &dev_attr_mdd_min_transfer.attr, + &dev_attr_mdd_retry_timeout.attr, + &dev_attr_mdd_max_retry.attr, + &dev_attr_mdd_transfer_type.attr, + &dev_attr_mdd_proc_type.attr, +#ifdef MDD_DEBUG + &dev_attr_mdd_stats.attr, +#endif + NULL, +}; + +static struct attribute_group mdd_attrs_group = { + .attrs = mdd_attrs, +}; + +void mdd_msc_probe(struct intel_th_device *thdev) +{ + int ret; + struct msu_dvc_dev *mdd; + struct device *dev; + + pr_info("New th-msc device %s", dev_name(&thdev->dev)); + mdd = mdd_alloc_device(dev_name(&thdev->dev)); + + if (IS_ERR_OR_NULL(mdd)) { + pr_err("Cannot allocate device %s (%ld)", dev_name(&thdev->dev), + PTR_ERR(mdd)); + return; + } + + ret = dvct_source_device_add(&mdd->ddev, &mdd_drv); + if (ret) { + pr_err("Cannot register dvc device %d", ret); + mdd_free_device(mdd); + return; + } + + mdd->th_dev = thdev; + dev = &mdd->ddev.device; + + mdd->wrq = alloc_workqueue("%s_workqueue", WQ_MEM_RECLAIM | WQ_HIGHPRI, + 1, dev_name(&mdd->ddev.device)); + if (!mdd->wrq) { + mdd_err(mdd, "Cannot allocate work queue\n"); + mdd_free_device(mdd); + return; + } + + INIT_WORK(&mdd->work, mdd_work); + + init_waitqueue_head(&mdd->wq); + + /*Attributes */ + ret = sysfs_create_group(&dev->kobj, &mdd_attrs_group); + if (ret) + mdd_warn(mdd, "Cannot add attribute group %d\n", ret); + + ret = sysfs_create_link(&dev->kobj, &thdev->dev.kobj, "msc"); + if (ret) + mdd_warn(mdd, "Cannot add msc link %d\n", ret); + + spin_lock(&mdd_devs_lock); + list_add(&mdd->mdd_list, &mdd_devs); + spin_unlock(&mdd_devs_lock); +} + +void mdd_msc_remove(struct intel_th_device *thdev) +{ + struct msu_dvc_dev *mdd = NULL; + struct msu_dvc_dev *mdd_iter = NULL; + + spin_lock(&mdd_devs_lock); + list_for_each_entry(mdd_iter, &mdd_devs, mdd_list) { + if (mdd_iter->th_dev == thdev) + mdd = mdd_iter; + } + + if (!mdd) { + pr_err("No such mdd device, %s", dev_name(&thdev->dev)); + spin_unlock(&mdd_devs_lock); + return; + } + list_del(&mdd->mdd_list); + + spin_unlock(&mdd_devs_lock); + + flush_workqueue(mdd->wrq); + destroy_workqueue(mdd->wrq); + + sysfs_remove_group(&mdd->ddev.device.kobj, &mdd_attrs_group); + sysfs_remove_link(&mdd->ddev.device.kobj, "msc"); + + mdd->wrq = NULL; + + dvct_source_device_del(&mdd->ddev); + mdd_free_device(mdd); +} + +struct msc_probe_rem_cb mdd_msc_cbs = { + .probe = mdd_msc_probe, + .remove = mdd_msc_remove, +}; + +static int __init msu_dvc_init(void) +{ + int ret; + + MDD_F_DEBUG(); + ret = dvct_source_driver_register(&mdd_drv); + if (ret) { + pr_err("Cannot register dvc driver %d", ret); + return ret; + } + + msc_register_callbacks(mdd_msc_cbs); + return 0; +} + +static void __exit msu_dvc_exit(void) +{ + MDD_F_DEBUG(); + msc_unregister_callbacks(); + dvct_source_driver_unregister(&mdd_drv); +} + +module_init(msu_dvc_init); +module_exit(msu_dvc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index dfb57eaa9f22..19b9e1eac35b 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -31,6 +31,9 @@ #include #endif +#include +#include + #include "intel_th.h" #include "msu.h" @@ -97,6 +100,7 @@ struct msc_iter { * @single_wrap: single mode wrap occurred * @base: buffer's base pointer * @base_addr: buffer's base address + * @nwsa: next window start address backup * @user_count: number of users of the buffer * @mmap_count: number of mappings * @buf_mutex: mutex to serialize access to buffer-related bits @@ -106,6 +110,8 @@ struct msc_iter { * @mode: MSC operating mode * @burst_len: write burst length * @index: number of this MSC in the MSU + * + * @max_blocks: Maximum number of blocks in a window */ struct msc { void __iomem *reg_base; @@ -117,6 +123,7 @@ struct msc { unsigned int single_wrap : 1; void *base; dma_addr_t base_addr; + unsigned long nwsa; /* <0: no buffer, 0: no users, >0: active users */ atomic_t user_count; @@ -132,8 +139,99 @@ struct msc { unsigned int mode; unsigned int burst_len; unsigned int index; + unsigned int max_blocks; +}; + +static struct msc_probe_rem_cb msc_probe_rem_cb; + +struct msc_device_instance { + struct list_head list; + struct intel_th_device *thdev; }; +static LIST_HEAD(msc_dev_instances); +static DEFINE_MUTEX(msc_dev_reg_lock); +/** + * msc_register_callbacks() + * @cbs + */ +int msc_register_callbacks(struct msc_probe_rem_cb cbs) +{ + struct msc_device_instance *it; + + mutex_lock(&msc_dev_reg_lock); + + msc_probe_rem_cb.probe = cbs.probe; + msc_probe_rem_cb.remove = cbs.remove; + /* Call the probe callback for the already existing ones*/ + list_for_each_entry(it, &msc_dev_instances, list) { + cbs.probe(it->thdev); + } + + mutex_unlock(&msc_dev_reg_lock); + return 0; +} +EXPORT_SYMBOL_GPL(msc_register_callbacks); + +/** + * msc_unregister_callbacks() + */ +void msc_unregister_callbacks(void) +{ + mutex_lock(&msc_dev_reg_lock); + + msc_probe_rem_cb.probe = NULL; + msc_probe_rem_cb.remove = NULL; + + mutex_unlock(&msc_dev_reg_lock); +} +EXPORT_SYMBOL_GPL(msc_unregister_callbacks); + +static void msc_add_instance(struct intel_th_device *thdev) +{ + struct msc_device_instance *instance; + + instance = kmalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) + return; + + mutex_lock(&msc_dev_reg_lock); + + instance->thdev = thdev; + list_add(&instance->list, &msc_dev_instances); + + if (msc_probe_rem_cb.probe) + msc_probe_rem_cb.probe(thdev); + + mutex_unlock(&msc_dev_reg_lock); +} + +static void msc_rm_instance(struct intel_th_device *thdev) +{ + struct msc_device_instance *instance = NULL, *it; + + mutex_lock(&msc_dev_reg_lock); + + if (msc_probe_rem_cb.remove) + msc_probe_rem_cb.remove(thdev); + + list_for_each_entry(it, &msc_dev_instances, list) { + if (it->thdev == thdev) { + instance = it; + break; + } + } + + if (instance) { + list_del(&instance->list); + kfree(instance); + } else { + pr_warn("msu: cannot remove %p (not found)", thdev); + } + + mutex_unlock(&msc_dev_reg_lock); +} + static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) { /* header hasn't been written */ @@ -147,6 +245,37 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) return false; } +/** + * msc_current_window() - locate the window in use + * @msc: MSC device + * + * This should only be used in multiblock mode. Caller should hold the + * msc::user_count reference. + * + * Return: the current output window + */ +static struct msc_window *msc_current_window(struct msc *msc) +{ + struct msc_window *win, *prev = NULL; + /*BAR is never changing, so the current one is the one before the next*/ + u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; + + if (list_empty(&msc->win_list)) + return NULL; + + list_for_each_entry(win, &msc->win_list, entry) { + if (win->block[0].addr == win_addr) + break; + prev = win; + } + if (!prev) + prev = list_entry(msc->win_list.prev, struct msc_window, entry); + + return prev; +} + + /** * msc_oldest_window() - locate the window with oldest data * @msc: MSC device @@ -159,20 +288,26 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) static struct msc_window *msc_oldest_window(struct msc *msc) { struct msc_window *win; - u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); - unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; unsigned int found = 0; + unsigned long nwsa; if (list_empty(&msc->win_list)) return NULL; + if (msc->enabled) { + u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + + nwsa = (unsigned long)reg << PAGE_SHIFT; + } else { + nwsa = msc->nwsa; + } /* * we might need a radix tree for this, depending on how * many windows a typical user would allocate; ideally it's * something like 2, in which case we're good */ list_for_each_entry(win, &msc->win_list, entry) { - if (win->block[0].addr == win_addr) + if (win->block[0].addr == nwsa) found++; /* skip the empty ones */ @@ -215,6 +350,160 @@ static unsigned int msc_win_oldest_block(struct msc_window *win) return 0; } +/** + * msc_max_blocks() - get the maximum number of block + * @thdev: the sub-device + * + * Return: the maximum number of blocks / window + */ +unsigned int msc_max_blocks(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + + return msc->max_blocks; +} +EXPORT_SYMBOL_GPL(msc_max_blocks); + +/** + * msc_block_max_size() - get the size of biggest block + * @thdev: the sub-device + * + * Return: the size of biggest block + */ +unsigned int msc_block_max_size(struct intel_th_device *thdev) +{ + return PAGE_SIZE; +} +EXPORT_SYMBOL_GPL(msc_block_max_size); + +/** + * msc_switch_window() - perform a window switch + * @thdev: the sub-device + */ +int msc_switch_window(struct intel_th_device *thdev) +{ + intel_th_trace_switch(thdev); + return 0; +} +EXPORT_SYMBOL_GPL(msc_switch_window); + +/** + * msc_current_win_bytes() - get the current window data size + * @thdev: the sub-device + * + * Get the number of valid data bytes in the current window. + * Based on this the dvc-source part can decide to request a window switch. + */ +int msc_current_win_bytes(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + struct msc_window *win; + u32 reg_mwp, blk, offset, i; + int size = 0; + + /* proceed only if actively storing in muli-window mode */ + if (!msc->enabled || + (msc->mode != MSC_MODE_MULTI) || + !atomic_inc_unless_negative(&msc->user_count)) + return -EINVAL; + + win = msc_current_window(msc); + reg_mwp = ioread32(msc->reg_base + REG_MSU_MSC0MWP); + + if (!win) { + atomic_dec(&msc->user_count); + return -EINVAL; + } + + blk = 0; + while (blk < win->nr_blocks) { + if (win->block[blk].addr == (reg_mwp & PAGE_MASK)) + break; + blk++; + } + + if (blk >= win->nr_blocks) { + atomic_dec(&msc->user_count); + return -EINVAL; + } + + offset = (reg_mwp & (PAGE_SIZE - 1)); + + + /*if wrap*/ + if (msc_block_wrapped(win->block[blk].bdesc)) { + for (i = blk+1; i < win->nr_blocks; i++) + size += msc_data_sz(win->block[i].bdesc); + } + + for (i = 0; i < blk; i++) + size += msc_data_sz(win->block[i].bdesc); + + /*finaly the current one*/ + size += (offset - MSC_BDESC); + + atomic_dec(&msc->user_count); + return size; +} +EXPORT_SYMBOL_GPL(msc_current_win_bytes); + +/** + * msc_sg_oldest_win() - get the data from the oldest window + * @thdev: the sub-device + * @sg_array: destination sg array + * + * Return: sg count + */ +int msc_sg_oldest_win(struct intel_th_device *thdev, + struct scatterlist *sg_array) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + struct msc_window *win, *c_win; + struct msc_block_desc *bdesc; + unsigned int blk, sg = 0; + + /* proceed only if actively storing in muli-window mode */ + if (!msc->enabled || + (msc->mode != MSC_MODE_MULTI) || + !atomic_inc_unless_negative(&msc->user_count)) + return -EINVAL; + + win = msc_oldest_window(msc); + if (!win) + return 0; + + c_win = msc_current_window(msc); + + if (win == c_win) + return 0; + + blk = msc_win_oldest_block(win); + + /* start with the first block containing only oldest data */ + if (msc_block_wrapped(win->block[blk].bdesc)) + if (++blk == win->nr_blocks) + blk = 0; + + do { + bdesc = win->block[blk].bdesc; + sg_set_buf(&sg_array[sg++], bdesc, PAGE_SIZE); + + if (bdesc->hw_tag & MSC_HW_TAG_ENDBIT) + break; + + if (++blk == win->nr_blocks) + blk = 0; + + } while (sg <= win->nr_blocks); + + sg_mark_end(&sg_array[sg - 1]); + + atomic_dec(&msc->user_count); + + return sg; +} +EXPORT_SYMBOL_GPL(msc_sg_oldest_win); + /** * msc_is_last_win() - check if a window is the last one for a given MSC * @win: window @@ -486,9 +775,9 @@ static void msc_buffer_clear_hw_header(struct msc *msc) * msc_configure() - set up MSC hardware * @msc: the MSC device to configure * - * Program storage mode, wrapping, burst length and trace buffer address - * into a given MSC. Then, enable tracing and set msc::enabled. - * The latter is serialized on msc::buf_mutex, so make sure to hold it. + * Program all relevant registers for a given MSC. + * Programming registers must be delayed until this stage since the hardware + * will be reset before a capture is started. */ static int msc_configure(struct msc *msc) { @@ -523,10 +812,8 @@ static int msc_configure(struct msc *msc) iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; - intel_th_trace_enable(msc->thdev); msc->enabled = 1; - return 0; } @@ -539,23 +826,14 @@ static int msc_configure(struct msc *msc) */ static void msc_disable(struct msc *msc) { - unsigned long count; u32 reg; lockdep_assert_held(&msc->buf_mutex); intel_th_trace_disable(msc->thdev); - for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; - count && !(reg & MSCSTS_PLE); count--) { - reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); - cpu_relax(); - } - - if (!count) - dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); - if (msc->mode == MSC_MODE_SINGLE) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); @@ -564,6 +842,10 @@ static void msc_disable(struct msc *msc) reg, msc->single_sz, msc->single_wrap); } + /* Save next window start address before disabling */ + reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + msc->nwsa = (unsigned long)reg << PAGE_SHIFT; + reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); reg &= ~MSC_EN; iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); @@ -572,8 +854,7 @@ static void msc_disable(struct msc *msc) iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); - dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", - ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); + dev_dbg(msc_dev(msc), "MSCnNWSA: %08lx\n", msc->nwsa); reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); @@ -741,8 +1022,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, - win->block[i].addr); + dma_free_coherent(msc_dev(msc)->parent->parent, size, + win->block[i].bdesc, win->block[i].addr); } kfree(win); @@ -777,7 +1058,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, win->block[i].bdesc, win->block[i].addr); } @@ -1069,16 +1350,19 @@ static int intel_th_msc_release(struct inode *inode, struct file *file) } static ssize_t -msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) +msc_single_to_user(void *in_buf, unsigned long in_pages, + unsigned long in_sz, bool wrapped, + char __user *buf, loff_t off, size_t len) { - unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; + unsigned long size = in_pages << PAGE_SHIFT, rem = len; unsigned long start = off, tocopy = 0; - if (msc->single_wrap) { - start += msc->single_sz; + /* With wrapping, copy the end of the buffer first */ + if (wrapped) { + start += in_sz; if (start < size) { tocopy = min(rem, size - start); - if (copy_to_user(buf, msc->base + start, tocopy)) + if (copy_to_user(buf, in_buf + start, tocopy)) return -EFAULT; buf += tocopy; @@ -1087,21 +1371,17 @@ msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) } start &= size - 1; - if (rem) { - tocopy = min(rem, msc->single_sz - start); - if (copy_to_user(buf, msc->base + start, tocopy)) - return -EFAULT; - - rem -= tocopy; - } - - return len - rem; } + /* Copy the beginning of the buffer */ + if (rem) { + tocopy = min(rem, in_sz - start); + if (copy_to_user(buf, in_buf + start, tocopy)) + return -EFAULT; - if (copy_to_user(buf, msc->base + start, rem)) - return -EFAULT; + rem -= tocopy; + } - return len; + return len - rem; } static ssize_t intel_th_msc_read(struct file *file, char __user *buf, @@ -1131,8 +1411,10 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, len = size - off; if (msc->mode == MSC_MODE_SINGLE) { - ret = msc_single_to_user(msc, buf, off, len); - if (ret >= 0) + ret = msc_single_to_user(msc->base, msc->nr_pages, + msc->single_sz, msc->single_wrap, + buf, off, len); + if (ret > 0) *ppos += ret; } else if (msc->mode == MSC_MODE_MULTI) { struct msc_win_to_user_struct u = { @@ -1258,6 +1540,283 @@ static const struct file_operations intel_th_msc_fops = { .owner = THIS_MODULE, }; +static void msc_wait_ple(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + unsigned long count; + u32 reg; + + for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; + count && !(reg & MSCSTS_PLE); count--) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); + cpu_relax(); + } + + if (!count) + dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); +} + +#ifdef CONFIG_ACPI +#define ACPI_SIG_NPKT "NPKT" + +/* Buffers that may be handed through NPKT ACPI table */ +enum NPKT_BUF_TYPE { + NPKT_MTB = 0, + NPKT_MTB_REC, + NPKT_CSR, + NPKT_CSR_REC, + NPKT_NBUF +}; +static const char * const npkt_buf_name[NPKT_NBUF] = { + [NPKT_MTB] = "mtb", + [NPKT_MTB_REC] = "mtb_rec", + [NPKT_CSR] = "csr", + [NPKT_CSR_REC] = "csr_rec" +}; + +/* CSR capture still active */ +#define NPKT_CSR_USED BIT(4) + +struct acpi_npkt_buf { + u64 addr; + u32 size; + u32 offset; +}; + +/* NPKT ACPI table */ +struct acpi_table_npkt { + struct acpi_table_header header; + struct acpi_npkt_buf buffers[NPKT_NBUF]; + u8 flags; +} __packed; + +/* Trace buffer obtained from NPKT table */ +struct npkt_buf { + dma_addr_t phy; + void *buf; + u32 size; + u32 offset; + bool wrapped; + atomic_t active; + struct msc *msc; +}; + +static struct npkt_buf *npkt_bufs; +static struct dentry *npkt_dump_dir; +static DEFINE_MUTEX(npkt_lock); + +/** + * Stop current trace if a buffer was marked with a capture in pogress. + * + * Update buffer write offset and wrap status after stopping the trace. + */ +static void stop_buffer_trace(struct npkt_buf *buf) +{ + u32 reg, mode; + struct msc *msc = buf->msc; + + mutex_lock(&npkt_lock); + if (!atomic_read(&buf->active)) + goto unlock; + + reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); + mode = (reg & MSC_MODE) >> __ffs(MSC_MODE); + if (!(reg & MSC_EN) || mode != MSC_MODE_SINGLE) { + /* Assume full buffer */ + pr_warn("NPKT reported CSR in use but not tracing to CSR\n"); + buf->offset = 0; + buf->wrapped = true; + atomic_set(&buf->active, 0); + goto unlock; + } + + /* The hub must be able to stop a capture not started by the driver */ + intel_th_trace_disable(msc->thdev); + + /* Update offset and wrap status */ + reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); + buf->offset = reg - (u32)buf->phy; + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); + buf->wrapped = !!(reg & MSCSTS_WRAPSTAT); + atomic_set(&buf->active, 0); + +unlock: + mutex_unlock(&npkt_lock); +} + +/** + * Copy re-ordered data from an NPKT buffer to a user buffer. + */ +static ssize_t read_npkt_dump_buf(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct npkt_buf *buf = file->private_data; + size_t size = buf->size; + loff_t off = *ppos; + ssize_t ret; + + if (atomic_read(&buf->active)) + stop_buffer_trace(buf); + + if (off >= size) + return 0; + + ret = msc_single_to_user(buf->buf, size >> PAGE_SHIFT, + buf->offset, buf->wrapped, + user_buf, off, count); + if (ret > 0) + *ppos += ret; + + return ret; +} + +static const struct file_operations npkt_dump_buf_fops = { + .read = read_npkt_dump_buf, + .open = simple_open, + .llseek = noop_llseek, +}; + +/** + * Prepare a buffer with remapped address for a given NPKT buffer and add + * an entry for it in debugfs. + */ +static void npkt_bind_buffer(enum NPKT_BUF_TYPE type, + struct acpi_npkt_buf *abuf, u8 flags, + struct npkt_buf *buf, struct msc *msc) +{ + const char *name = npkt_buf_name[type]; + + /* No buffer handed through ACPI */ + if (!abuf->addr || !abuf->size) + return; + + /* Only expect multiples of page size */ + if (abuf->size & (PAGE_SIZE - 1)) { + pr_warn("invalid size 0x%x for buffer %s\n", + abuf->size, name); + return; + } + + buf->size = abuf->size; + buf->offset = abuf->offset; + buf->wrapped = !!(flags & BIT(type)); + /* CSR may still be active */ + if (type == NPKT_CSR && (flags & NPKT_CSR_USED)) { + atomic_set(&buf->active, 1); + buf->msc = msc; + } + + buf->phy = abuf->addr; + buf->buf = (__force void *)ioremap(buf->phy, buf->size); + if (!buf->buf) { + pr_err("ioremap failed for buffer %s 0x%llx size:0x%x\n", + name, buf->phy, buf->size); + return; + } + + debugfs_create_file(name, S_IRUGO, npkt_dump_dir, buf, + &npkt_dump_buf_fops); +} + +static void npkt_bind_buffers(struct acpi_table_npkt *npkt, + struct npkt_buf *bufs, struct msc *msc) +{ + int i; + + for (i = 0; i < NPKT_NBUF; i++) + npkt_bind_buffer(i, &npkt->buffers[i], npkt->flags, + &bufs[i], msc); +} + +static void npkt_unbind_buffers(struct npkt_buf *bufs) +{ + int i; + + for (i = 0; i < NPKT_NBUF; i++) + if (bufs[i].buf) + iounmap((__force void __iomem *)bufs[i].buf); +} + +/** + * Prepare debugfs access to NPKT buffers. + */ +static void intel_th_npkt_init(struct msc *msc) +{ + acpi_status status; + struct acpi_table_npkt *npkt; + + /* Associate NPKT to msc0 */ + if (npkt_bufs || msc->index != 0) + return; + + status = acpi_get_table(ACPI_SIG_NPKT, 0, + (struct acpi_table_header **)&npkt); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to get NPKT table, %s\n", + acpi_format_exception(status)); + return; + } + + npkt_bufs = kzalloc(sizeof(struct npkt_buf) * NPKT_NBUF, GFP_KERNEL); + if (!npkt_bufs) + return; + + npkt_dump_dir = debugfs_create_dir("npkt_dump", NULL); + if (!npkt_dump_dir) { + pr_err("npkt_dump debugfs create dir failed\n"); + goto free_npkt_bufs; + } + + npkt_bind_buffers(npkt, npkt_bufs, msc); + + return; + +free_npkt_bufs: + kfree(npkt_bufs); + npkt_bufs = NULL; +} + +/** + * Remove debugfs access to NPKT buffers and release resources. + */ +static void intel_th_npkt_remove(struct msc *msc) +{ + /* Only clean for msc 0 if necessary */ + if (!npkt_bufs || msc->index != 0) + return; + + npkt_unbind_buffers(npkt_bufs); + debugfs_remove_recursive(npkt_dump_dir); + kfree(npkt_bufs); + npkt_bufs = NULL; +} + +/** + * First trace callback. + * + * If NPKT notified a CSR capture is in progress, stop it and update buffer + * write offset and wrap status. + */ +static void intel_th_msc_first_trace(struct intel_th_device *thdev) +{ + struct device *dev = &thdev->dev; + struct msc *msc = dev_get_drvdata(dev); + struct npkt_buf *buf; + + if (!npkt_bufs || msc->index != 0) + return; + + buf = &npkt_bufs[NPKT_CSR]; + if (atomic_read(&buf->active)) + stop_buffer_trace(buf); +} + +#else /* !CONFIG_ACPI */ +static inline void intel_th_npkt_init(struct msc *msc) {} +static inline void intel_th_npkt_remove(struct msc *msc) {} +#define intel_th_msc_first_trace NULL +#endif /* !CONFIG_ACPI */ + static int intel_th_msc_init(struct msc *msc) { atomic_set(&msc->user_count, -1); @@ -1271,6 +1830,8 @@ static int intel_th_msc_init(struct msc *msc) (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> __ffs(MSC_LEN); + msc->thdev->output.wait_empty = msc_wait_ple; + return 0; } @@ -1394,6 +1955,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; + msc->max_blocks = 0; + /* scan the comma-separated list of allocation sizes */ end = memchr(buf, '\n', len); if (end) @@ -1428,6 +1991,9 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, win = rewin; win[nr_wins - 1] = val; + msc->max_blocks = + (val > msc->max_blocks) ? val : msc->max_blocks; + if (!end) break; @@ -1447,10 +2013,32 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(nr_pages); +static ssize_t +win_switch_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct msc *msc = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val != 1) + return -EINVAL; + + intel_th_trace_switch(msc->thdev); + return size; +} + +static DEVICE_ATTR_WO(win_switch); + static struct attribute *msc_output_attrs[] = { &dev_attr_wrap.attr, &dev_attr_mode.attr, &dev_attr_nr_pages.attr, + &dev_attr_win_switch.attr, NULL, }; @@ -1487,28 +2075,25 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (err) return err; + msc->max_blocks = 0; dev_set_drvdata(dev, msc); + intel_th_npkt_init(msc); + msc_add_instance(thdev); + return 0; } static void intel_th_msc_remove(struct intel_th_device *thdev) { struct msc *msc = dev_get_drvdata(&thdev->dev); - int ret; - - intel_th_msc_deactivate(thdev); - - /* - * Buffers should not be used at this point except if the - * output character device is still open and the parent - * device gets detached from its bus, which is a FIXME. - */ - ret = msc_buffer_free_unless_used(msc); - WARN_ON_ONCE(ret); + intel_th_npkt_remove(msc); + msc_rm_instance(thdev); + sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); } static struct intel_th_driver intel_th_msc_driver = { + .first_trace = intel_th_msc_first_trace, .probe = intel_th_msc_probe, .remove = intel_th_msc_remove, .activate = intel_th_msc_activate, diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h index 9b710e4aa98a..bd6933b80e68 100644 --- a/drivers/hwtracing/intel_th/msu.h +++ b/drivers/hwtracing/intel_th/msu.h @@ -16,6 +16,8 @@ #ifndef __INTEL_TH_MSU_H__ #define __INTEL_TH_MSU_H__ +#include "intel_th.h" + enum { REG_MSU_MSUPARAMS = 0x0000, REG_MSU_MSUSTS = 0x0008, @@ -113,4 +115,19 @@ static inline bool msc_block_last_written(struct msc_block_desc *bdesc) /* waiting for Pipeline Empty bit(s) to assert for MSC */ #define MSC_PLE_WAITLOOP_DEPTH 10000 +/* API */ +struct msc_probe_rem_cb { + void (*probe)(struct intel_th_device *thdev); + void (*remove)(struct intel_th_device *thdev); +}; + +int msc_register_callbacks(struct msc_probe_rem_cb cbs); +void msc_unregister_callbacks(void); +unsigned int msc_max_blocks(struct intel_th_device *thdev); +unsigned int msc_block_max_size(struct intel_th_device *thdev); +int msc_switch_window(struct intel_th_device *thdev); +int msc_sg_oldest_win(struct intel_th_device *thdev, + struct scatterlist *sg_array); +int msc_current_win_bytes(struct intel_th_device *thdev); + #endif /* __INTEL_TH_MSU_H__ */ diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c2a2ce8ee541..b48d54a75f51 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -69,6 +69,35 @@ static void intel_th_pci_deactivate(struct intel_th *th) if (err) dev_err(&pdev->dev, "failed to read NPKDSC register\n"); } +/* + * PCI Configuration Registers + */ +enum { + REG_PCI_NPKDSC = 0x80, /* NPK Device Specific Control */ + REG_PCI_NPKDSD = 0x90, /* NPK Device Specific Defeature */ +}; + +/* Trace Hub software reset */ +#define NPKDSC_RESET BIT(1) + +/* Force On */ +#define NPKDSD_FON BIT(0) + +static void intel_th_pci_reset(struct intel_th *th) +{ + struct pci_dev *pdev = container_of(th->dev, struct pci_dev, dev); + u32 val; + + /* Software reset */ + pci_read_config_dword(pdev, REG_PCI_NPKDSC, &val); + val |= NPKDSC_RESET; + pci_write_config_dword(pdev, REG_PCI_NPKDSC, val); + + /* Always set FON for S0ix flow */ + pci_read_config_dword(pdev, REG_PCI_NPKDSD, &val); + val |= NPKDSD_FON; + pci_write_config_dword(pdev, REG_PCI_NPKDSD, val); +} static int intel_th_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -86,7 +115,7 @@ static int intel_th_pci_probe(struct pci_dev *pdev, return err; th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource, - DEVICE_COUNT_RESOURCE, pdev->irq); + DEVICE_COUNT_RESOURCE, pdev->irq, intel_th_pci_reset); if (IS_ERR(th)) return PTR_ERR(th); @@ -173,11 +202,34 @@ static const struct pci_device_id intel_th_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, intel_th_pci_id_table); +static int intel_th_suspend(struct device *dev) +{ + /* + * Stub the call to avoid disabling the device. + * Suspend is fully handled by firmwares. + */ + return 0; +} + +static int intel_th_resume(struct device *dev) +{ + /* Firmwares have already restored the device state. */ + return 0; +} + +static const struct dev_pm_ops intel_th_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(intel_th_suspend, + intel_th_resume) +}; + static struct pci_driver intel_th_pci_driver = { .name = DRIVER_NAME, .id_table = intel_th_pci_id_table, .probe = intel_th_pci_probe, .remove = intel_th_pci_remove, + .driver = { + .pm = &intel_th_pm_ops, + }, }; module_pci_driver(intel_th_pci_driver); diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index e96a1fcb57b2..10f08f7b67d1 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c @@ -169,8 +169,6 @@ static int intel_th_pti_activate(struct intel_th_device *thdev) iowrite32(ctl, pti->base + REG_PTI_CTL); - intel_th_trace_enable(thdev); - return 0; } diff --git a/drivers/hwtracing/stm/console.c b/drivers/hwtracing/stm/console.c index c9d9a8d2ff52..00f7bbbeab79 100644 --- a/drivers/hwtracing/stm/console.c +++ b/drivers/hwtracing/stm/console.c @@ -39,8 +39,22 @@ static void stm_console_write(struct console *con, const char *buf, unsigned len) { struct stm_console *sc = container_of(con, struct stm_console, console); + static char svenbuf[1024]; + char *p = svenbuf; + unsigned int towrite; + u16 textlen; + const u32 sven_header = 0x01000242; - stm_source_write(&sc->data, 0, buf, len); + textlen = min_t(u16, len, 1024 - sizeof(sven_header) - sizeof(textlen)); + towrite = textlen + sizeof(sven_header) + sizeof(textlen); + + memcpy(p, &sven_header, sizeof(sven_header)); + p += sizeof(sven_header); + memcpy(p, &textlen, sizeof(textlen)); + p += sizeof(textlen); + memcpy(p, buf, textlen); + + stm_source_write(&sc->data, 0, svenbuf, towrite); } static int stm_console_link(struct stm_source_data *data) diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index f129869e05a9..736862967e32 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "stm.h" #include @@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev) { struct stm_device *stm = to_stm_device(dev); - kfree(stm); + vfree(stm); } int stm_register_device(struct device *parent, struct stm_data *stm_data, @@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, return -EINVAL; nmasters = stm_data->sw_end - stm_data->sw_start + 1; - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *)); if (!stm) return -ENOMEM; @@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, /* matches device_initialize() above */ put_device(&stm->dev); err_free: - kfree(stm); + vfree(stm); return err; } diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index cd07a69e2e93..44deae78913e 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -50,6 +50,9 @@ #define BCM2835_I2C_S_CLKT BIT(9) #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ +#define BCM2835_I2C_FEDL_SHIFT 16 +#define BCM2835_I2C_REDL_SHIFT 0 + #define BCM2835_I2C_CDIV_MIN 0x0002 #define BCM2835_I2C_CDIV_MAX 0xFFFE @@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg) static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) { - u32 divider; + u32 divider, redl, fedl; divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), i2c_dev->bus_clk_rate); @@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); + /* + * Number of core clocks to wait after falling edge before + * outputting the next data bit. Note that both FEDL and REDL + * can't be greater than CDIV/2. + */ + fedl = max(divider / 16, 1u); + + /* + * Number of core clocks to wait after rising edge before + * sampling the next incoming data bit. + */ + redl = max(divider / 4, 1u); + + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL, + (fedl << BCM2835_I2C_FEDL_SHIFT) | + (redl << BCM2835_I2C_REDL_SHIFT)); return 0; } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 418c233075d3..4915fa303a7e 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -209,6 +209,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* Enable the adapter */ __i2c_dw_enable(dev, true); + /* Dummy read to avoid the register getting stuck on Bay Trail */ + dw_readl(dev, DW_IC_ENABLE_STATUS); + /* Clear and enable interrupts */ dw_readl(dev, DW_IC_CLR_INTR); dw_writel(dev, DW_IC_INTR_MASTER_MASK, DW_IC_INTR_MASK); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 9e12a53ef7b8..ba8df2fde1b2 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -965,8 +965,6 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter) if (!(priv->features & FEATURE_HOST_NOTIFY)) return; - priv->original_slvcmd = inb_p(SMBSLVCMD(priv)); - if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd)) outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd, SMBSLVCMD(priv)); @@ -1614,9 +1612,16 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); + /* Remember original Host Notify setting */ + if (priv->features & FEATURE_HOST_NOTIFY) + priv->original_slvcmd = inb_p(SMBSLVCMD(priv)); + /* Default timeout in interrupt mode: 200 ms */ priv->adapter.timeout = HZ / 5; + if (dev->irq == IRQ_NOTCONNECTED) + priv->features &= ~FEATURE_IRQ; + if (priv->features & FEATURE_IRQ) { u16 pcictl, pcists; @@ -1695,6 +1700,15 @@ static void i801_remove(struct pci_dev *dev) */ } +static void i801_shutdown(struct pci_dev *dev) +{ + struct i801_priv *priv = pci_get_drvdata(dev); + + /* Restore config registers to avoid hard hang on some systems */ + i801_disable_host_notify(priv); + pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); +} + #ifdef CONFIG_PM static int i801_suspend(struct device *dev) { @@ -1724,6 +1738,7 @@ static struct pci_driver i801_driver = { .id_table = i801_ids, .probe = i801_probe, .remove = i801_remove, + .shutdown = i801_shutdown, .driver = { .pm = &i801_pm_ops, }, diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index f96830ffd9f1..75c6b98585ba 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, goto err_desc; } + reinit_completion(&dma->cmd_complete); txdesc->callback = i2c_imx_dma_callback; txdesc->callback_param = i2c_imx; if (dma_submit_error(dmaengine_submit(txdesc))) { @@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, * The first byte must be transmitted by the CPU. */ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); @@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, if (result) return result; - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index a832c45276a4..b0fb97823d6a 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -844,12 +844,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, */ if (of_device_is_compatible(np, "marvell,mv78230-i2c")) { drv_data->offload_enabled = true; - drv_data->errata_delay = true; + /* The delay is only needed in standard mode (100kHz) */ + if (bus_freq <= 100000) + drv_data->errata_delay = true; } if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) { drv_data->offload_enabled = false; - drv_data->errata_delay = true; + /* The delay is only needed in standard mode (100kHz) */ + if (bus_freq <= 100000) + drv_data->errata_delay = true; } if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 2aa0e83174c5..dae8ac618a52 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap, * TODO: We could potentially loop and retry in the case * of MSP_TWI_XFER_TIMEOUT. */ - return -1; + return -EIO; } - return 0; + return num; } static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 25fcc3c1e32b..4053259bccb8 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -86,6 +86,7 @@ struct sprd_i2c { u32 count; int irq; int err; + bool is_suspended; }; static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) @@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct sprd_i2c *i2c_dev = i2c_adap->algo_data; int im, ret; + if (i2c_dev->is_suspended) + return -EBUSY; + ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) return ret; @@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id) struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we got one ACK from slave when writing data, and we did not @@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id) { struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we did not get one ACK from slave when writing data, then we @@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev) static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = true; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_suspend(pdev); } static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = false; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_resume(pdev); } diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index d4a6e9c2e9aa..124f9b1cf1b0 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -887,6 +887,11 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) } setup = of_device_get_match_data(&pdev->dev); + if (!setup) { + dev_err(&pdev->dev, "Can't get device data\n"); + ret = -ENODEV; + goto clk_free; + } i2c_dev->setup = *setup; ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 60292d243e24..ec2d11af6c78 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -547,6 +547,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev) { u32 cnfg; + /* + * NACK interrupt is generated before the I2C controller generates + * the STOP condition on the bus. So wait for 2 clock periods + * before disabling the controller so that the STOP condition has + * been delivered properly. + */ + udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); + cnfg = i2c_readl(i2c_dev, I2C_CNFG); if (cnfg & I2C_CNFG_PACKET_MODE_EN) i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); @@ -708,15 +716,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) return 0; - /* - * NACK interrupt is generated before the I2C controller generates - * the STOP condition on the bus. So wait for 2 clock periods - * before resetting the controller so that the STOP condition has - * been delivered properly. - */ - if (i2c_dev->msg_err == I2C_ERR_NO_ACK) - udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); - tegra_i2c_init(i2c_dev); if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index e4be86b3de9a..7235c7302bb7 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c @@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, } mutex_unlock(&vb->lock); } - return 0; + return num; error: mutex_unlock(&vb->lock); return error; diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c index 31186ead5a40..509a6007cdf6 100644 --- a/drivers/i2c/i2c-boardinfo.c +++ b/drivers/i2c/i2c-boardinfo.c @@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig property_entries_dup(info->properties); if (IS_ERR(devinfo->board_info.properties)) { status = PTR_ERR(devinfo->board_info.properties); + kfree(devinfo); break; } } @@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig GFP_KERNEL); if (!devinfo->board_info.resources) { status = -ENOMEM; + kfree(devinfo); break; } } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 56e46581b84b..6f2fe63e8f5a 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -808,8 +808,11 @@ EXPORT_SYMBOL_GPL(i2c_new_device); */ void i2c_unregister_device(struct i2c_client *client) { - if (client->dev.of_node) + if (client->dev.of_node) { of_node_clear_flag(client->dev.of_node, OF_POPULATED); + of_node_put(client->dev.of_node); + } + if (ACPI_COMPANION(&client->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); device_unregister(&client->dev); diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index 10f00a82ec9d..e54a9b835b62 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -396,16 +396,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, the underlying bus driver */ break; case I2C_SMBUS_I2C_BLOCK_DATA: + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, "Invalid block %s size %d\n", + read_write == I2C_SMBUS_READ ? "read" : "write", + data->block[0]); + return -EINVAL; + } + if (read_write == I2C_SMBUS_READ) { msg[1].len = data->block[0]; } else { msg[0].len = data->block[0] + 1; - if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) { - dev_err(&adapter->dev, - "Invalid block write size %d\n", - data->block[0]); - return -EINVAL; - } for (i = 1; i <= data->block[0]; i++) msgbuf0[i] = data->block[i]; } diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 14d1e7d9a1d6..0e6bc631a1ca 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive) struct request *rq = drive->hwif->rq; unsigned long wait = 0; - debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); + debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]); /* * Some commands are *slow* and normally take a long time to complete. @@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) return ide_do_reset(drive); } - debug_log("[cmd %x]: check condition\n", rq->cmd[0]); + debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]); /* Retry operation */ ide_retry_pc(drive); @@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) ide_pad_transfer(drive, write, bcount); debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", - rq->cmd[0], done, bcount, scsi_req(rq)->resid_len); + scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len); /* And set the interrupt handler again */ ide_set_handler(drive, ide_pc_intr, timeout); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 6ff0be8cbdc9..4de45db76756 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1614,6 +1614,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode) struct cdrom_info *info; int rc = -ENXIO; + check_disk_change(bdev); + mutex_lock(&ide_cd_mutex); info = ide_cd_get(bdev->bd_disk); if (!info) diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index d127ace6aa57..6ee866fcc5dd 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c @@ -244,7 +244,7 @@ struct chs_geom { static unsigned int ide_disks; static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES]; -static int ide_set_disk_chs(const char *str, struct kernel_param *kp) +static int ide_set_disk_chs(const char *str, const struct kernel_param *kp) { unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1; @@ -328,7 +328,7 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit) static unsigned int ide_ignore_cable; -static int ide_set_ignore_cable(const char *s, struct kernel_param *kp) +static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp) { int i, j = 1; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index f0b06b14e782..16249b0953ff 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = { }; #define ICPU(model, cpu) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu } static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem), @@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void) return -ENODEV; } + if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + pr_debug("Please enable MWAIT in BIOS SETUP\n"); + return -ENODEV; + } + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return -ENODEV; diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c index 98fbb628d5bd..38411e1c155b 100644 --- a/drivers/iio/accel/kxsd9-i2c.c +++ b/drivers/iio/accel/kxsd9-i2c.c @@ -63,3 +63,6 @@ static struct i2c_driver kxsd9_i2c_driver = { .id_table = kxsd9_i2c_id, }; module_i2c_driver(kxsd9_i2c_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("KXSD9 accelerometer I2C interface"); diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 39ab210c44f6..565f7d8d3304 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev) { struct iio_buffer *buffer; - buffer = iio_kfifo_allocate(); + buffer = devm_iio_kfifo_allocate(&indio_dev->dev); if (!buffer) return -ENOMEM; @@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev) return 0; } -static void sca3000_unconfigure_ring(struct iio_dev *indio_dev) -{ - iio_kfifo_free(indio_dev->buffer); -} - static inline int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state) { @@ -1547,8 +1542,6 @@ static int sca3000_remove(struct spi_device *spi) if (spi->irq) free_irq(spi->irq, indio_dev); - sca3000_unconfigure_ring(indio_dev); - return 0; } diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 752856b3a849..bef1f96c177c 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -164,7 +164,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x25, .mask_ihl = 0x02, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -236,7 +239,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -318,7 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x23, .mask_ihl = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, .ig1 = { .en_addr = 0x23, .en_mask = 0x08, @@ -389,7 +398,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = 0x21, .mask_int1 = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -451,7 +463,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -569,7 +584,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = 0x21, .mask_int1 = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x21, @@ -640,7 +658,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int2 = 0x00, .addr_ihl = 0x25, .mask_ihl = 0x02, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .sim = { .addr = 0x23, @@ -773,7 +794,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) if (!pdata) pdata = (struct st_sensors_platform_data *)&default_accel_pdata; - err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) goto st_accel_power_off; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 1d13bf03c758..369a2c632e46 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC tristate "Atmel AT91 SAMA5D2 ADC" depends on ARCH_AT91 || COMPILE_TEST depends on HAS_IOMEM + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say yes here to build support for Atmel SAMA5D2 ADC which is diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c index 34e353c43ac8..677f812f372a 100644 --- a/drivers/iio/adc/ad7791.c +++ b/drivers/iio/adc/ad7791.c @@ -244,58 +244,9 @@ static int ad7791_read_raw(struct iio_dev *indio_dev, return -EINVAL; } -static const char * const ad7791_sample_freq_avail[] = { - [AD7791_FILTER_RATE_120] = "120", - [AD7791_FILTER_RATE_100] = "100", - [AD7791_FILTER_RATE_33_3] = "33.3", - [AD7791_FILTER_RATE_20] = "20", - [AD7791_FILTER_RATE_16_6] = "16.6", - [AD7791_FILTER_RATE_16_7] = "16.7", - [AD7791_FILTER_RATE_13_3] = "13.3", - [AD7791_FILTER_RATE_9_5] = "9.5", -}; - -static ssize_t ad7791_read_frequency(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7791_state *st = iio_priv(indio_dev); - unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK; - - return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]); -} - -static ssize_t ad7791_write_frequency(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7791_state *st = iio_priv(indio_dev); - int i, ret; - - i = sysfs_match_string(ad7791_sample_freq_avail, buf); - if (i < 0) - return i; - - ret = iio_device_claim_direct_mode(indio_dev); - if (ret) - return ret; - st->filter &= ~AD7791_FILTER_RATE_MASK; - st->filter |= i; - ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter), - st->filter); - iio_device_release_direct_mode(indio_dev); - - return len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ad7791_read_frequency, - ad7791_write_frequency); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5"); static struct attribute *ad7791_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, NULL }; diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 47c3d7f32900..07246a6037e3 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 33, 0, 17, 16, 12, 10, 8, 6, 4}; -static ssize_t ad7793_read_frequency(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7793_state *st = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", - st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); -} - -static ssize_t ad7793_write_frequency(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7793_state *st = iio_priv(indio_dev); - long lval; - int i, ret; - - ret = kstrtol(buf, 10, &lval); - if (ret) - return ret; - - if (lval == 0) - return -EINVAL; - - for (i = 0; i < 16; i++) - if (lval == st->chip_info->sample_freq_avail[i]) - break; - if (i == 16) - return -EINVAL; - - ret = iio_device_claim_direct_mode(indio_dev); - if (ret) - return ret; - st->mode &= ~AD7793_MODE_RATE(-1); - st->mode |= AD7793_MODE_RATE(i); - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); - iio_device_release_direct_mode(indio_dev); - - return len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ad7793_read_frequency, - ad7793_write_frequency); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); @@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, NULL @@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = { }; static struct attribute *ad7797_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, NULL }; @@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, *val -= offset; } return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + *val = st->chip_info + ->sample_freq_avail[AD7793_MODE_RATE(st->mode)]; + return IIO_VAL_INT; } return -EINVAL; } @@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, break; } break; + case IIO_CHAN_INFO_SAMP_FREQ: + if (!val) { + ret = -EINVAL; + break; + } + + for (i = 0; i < 16; i++) + if (val == st->chip_info->sample_freq_avail[i]) + break; + + if (i == 16) { + ret = -EINVAL; + break; + } + + st->mode &= ~AD7793_MODE_RATE(-1); + st->mode |= AD7793_MODE_RATE(i); + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), + st->mode); + break; default: ret = -EINVAL; } diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c index 6e419d5a7c14..f153e02686a0 100644 --- a/drivers/iio/adc/cpcap-adc.c +++ b/drivers/iio/adc/cpcap-adc.c @@ -1012,7 +1012,7 @@ static int cpcap_adc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, indio_dev); ddata->irq = platform_get_irq_byname(pdev, "adcdone"); - if (!ddata->irq) + if (ddata->irq < 0) return -ENODEV; error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL, diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index f387b972e4f4..59f99b3a180d 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -44,7 +44,6 @@ #define INA226_MASK_ENABLE 0x06 #define INA226_CVRF BIT(3) -#define INA219_CNVR BIT(1) #define INA2XX_MAX_REGISTERS 8 @@ -79,6 +78,11 @@ #define INA226_ITS_MASK GENMASK(5, 3) #define INA226_SHIFT_ITS(val) ((val) << 3) +/* INA219 Bus voltage register, low bits are flags */ +#define INA219_OVF BIT(0) +#define INA219_CNVR BIT(1) +#define INA219_BUS_VOLTAGE_SHIFT 3 + /* Cosmetic macro giving the sampling period for a full P=UxI cycle */ #define SAMPLING_PERIOD(c) ((c->int_time_vbus + c->int_time_vshunt) \ * c->avg) @@ -112,7 +116,7 @@ struct ina2xx_config { u16 config_default; int calibration_factor; int shunt_div; - int bus_voltage_shift; + int bus_voltage_shift; /* position of lsb */ int bus_voltage_lsb; /* uV */ int power_lsb; /* uW */ enum ina2xx_ids chip_id; @@ -135,7 +139,7 @@ static const struct ina2xx_config ina2xx_config[] = { .config_default = INA219_CONFIG_DEFAULT, .calibration_factor = 40960000, .shunt_div = 100, - .bus_voltage_shift = 3, + .bus_voltage_shift = INA219_BUS_VOLTAGE_SHIFT, .bus_voltage_lsb = 4000, .power_lsb = 20000, .chip_id = ina219, @@ -170,6 +174,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev, else *val = regval; + if (chan->address == INA2XX_BUS_VOLTAGE) + *val >>= chip->config->bus_voltage_shift; + return IIO_VAL_INT; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: @@ -203,9 +210,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev, return IIO_VAL_FRACTIONAL; case INA2XX_BUS_VOLTAGE: - /* processed (mV) = raw*lsb (uV) / (1000 << shift) */ + /* processed (mV) = raw * lsb (uV) / 1000 */ *val = chip->config->bus_voltage_lsb; - *val2 = 1000 << chip->config->bus_voltage_shift; + *val2 = 1000; return IIO_VAL_FRACTIONAL; case INA2XX_POWER: @@ -532,7 +539,7 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev, * Sampling Freq is a consequence of the integration times of * the Voltage channels. */ -#define INA219_CHAN_VOLTAGE(_index, _address) { \ +#define INA219_CHAN_VOLTAGE(_index, _address, _shift) { \ .type = IIO_VOLTAGE, \ .address = (_address), \ .indexed = 1, \ @@ -544,7 +551,8 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev, .scan_index = (_index), \ .scan_type = { \ .sign = 'u', \ - .realbits = 16, \ + .shift = _shift, \ + .realbits = 16 - _shift, \ .storagebits = 16, \ .endianness = IIO_LE, \ } \ @@ -579,8 +587,8 @@ static const struct iio_chan_spec ina226_channels[] = { }; static const struct iio_chan_spec ina219_channels[] = { - INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE), - INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE), + INA219_CHAN_VOLTAGE(0, INA2XX_SHUNT_VOLTAGE, 0), + INA219_CHAN_VOLTAGE(1, INA2XX_BUS_VOLTAGE, INA219_BUS_VOLTAGE_SHIFT), INA219_CHAN(IIO_POWER, 2, INA2XX_POWER), INA219_CHAN(IIO_CURRENT, 3, INA2XX_CURRENT), IIO_CHAN_SOFT_TIMESTAMP(4), diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 2e8dbb89c8c9..11484cb38b84 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel { struct meson_sar_adc_data { bool has_bl30_integration; + u32 bandgap_reg; unsigned int resolution; const char *name; + const struct regmap_config *regmap_config; }; struct meson_sar_adc_priv { @@ -242,13 +244,20 @@ struct meson_sar_adc_priv { int calibscale; }; -static const struct regmap_config meson_sar_adc_regmap_config = { +static const struct regmap_config meson_sar_adc_regmap_config_gxbb = { .reg_bits = 8, .val_bits = 32, .reg_stride = 4, .max_register = MESON_SAR_ADC_REG13, }; +static const struct regmap_config meson_sar_adc_regmap_config_meson8 = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = MESON_SAR_ADC_DELTA_10, +}; + static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); @@ -453,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev) regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); - if (timeout < 0) + if (timeout < 0) { + mutex_unlock(&indio_dev->mlock); return -ETIMEDOUT; + } } return 0; @@ -600,7 +611,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev, init.num_parents = 1; priv->clk_gate.reg = base + MESON_SAR_ADC_REG3; - priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN); + priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN); priv->clk_gate.hw.init = &init; priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw); @@ -685,6 +696,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev) return 0; } +static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off) +{ + struct meson_sar_adc_priv *priv = iio_priv(indio_dev); + u32 enable_mask; + + if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11) + enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN; + else + enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN; + + regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask, + on_off ? enable_mask : 0); +} + static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) { struct meson_sar_adc_priv *priv = iio_priv(indio_dev); @@ -717,9 +742,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1); regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0, MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, - MESON_SAR_ADC_REG11_BANDGAP_EN); + + meson_sar_adc_set_bandgap(indio_dev, true); + regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, MESON_SAR_ADC_REG3_ADC_EN); @@ -739,8 +764,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev) err_adc_clk: regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, 0); + meson_sar_adc_set_bandgap(indio_dev, false); clk_disable_unprepare(priv->sana_clk); err_sana_clk: clk_disable_unprepare(priv->core_clk); @@ -765,8 +789,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev) regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3, MESON_SAR_ADC_REG3_ADC_EN, 0); - regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11, - MESON_SAR_ADC_REG11_BANDGAP_EN, 0); + + meson_sar_adc_set_bandgap(indio_dev, false); clk_disable_unprepare(priv->sana_clk); clk_disable_unprepare(priv->core_clk); @@ -845,30 +869,40 @@ static const struct iio_info meson_sar_adc_iio_info = { static const struct meson_sar_adc_data meson_sar_adc_meson8_data = { .has_bl30_integration = false, + .bandgap_reg = MESON_SAR_ADC_DELTA_10, + .regmap_config = &meson_sar_adc_regmap_config_meson8, .resolution = 10, .name = "meson-meson8-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = { .has_bl30_integration = false, + .bandgap_reg = MESON_SAR_ADC_DELTA_10, + .regmap_config = &meson_sar_adc_regmap_config_meson8, .resolution = 10, .name = "meson-meson8b-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 10, .name = "meson-gxbb-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxl_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 12, .name = "meson-gxl-saradc", }; static const struct meson_sar_adc_data meson_sar_adc_gxm_data = { .has_bl30_integration = true, + .bandgap_reg = MESON_SAR_ADC_REG11, + .regmap_config = &meson_sar_adc_regmap_config_gxbb, .resolution = 12, .name = "meson-gxm-saradc", }; @@ -946,7 +980,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev) return ret; priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, - &meson_sar_adc_regmap_config); + priv->data->regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c index 47d24ae5462f..fe3d7826783c 100644 --- a/drivers/iio/adc/qcom-vadc-common.c +++ b/drivers/iio/adc/qcom-vadc-common.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "qcom-vadc-common.h" @@ -229,3 +230,6 @@ int qcom_vadc_decimation_from_dt(u32 value) return __ffs64(value / VADC_DECIMATION_MIN); } EXPORT_SYMBOL(qcom_vadc_decimation_from_dt); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm ADC common functionality"); diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 4df32cf1650e..04be8bd951be 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -764,8 +764,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) int ret; u32 val; - /* Clear ADRDY by writing one, then enable ADC */ - stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); /* Poll for ADRDY to be set (after adc startup time) */ @@ -773,8 +771,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) val & STM32H7_ADRDY, 100, STM32_ADC_TIMEOUT_US); if (ret) { - stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); + stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS); dev_err(&indio_dev->dev, "Failed to enable ADC\n"); + } else { + /* Clear ADRDY by writing one */ + stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); } return ret; @@ -1314,6 +1315,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) { struct stm32_adc *adc = iio_priv(indio_dev); unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2; + unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE; /* * dma cyclic transfers are used, buffer is split into two periods. @@ -1322,7 +1324,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val) * - one buffer (period) driver can push with iio_trigger_poll(). */ watermark = min(watermark, val * (unsigned)(sizeof(u16))); - adc->rx_buf_sz = watermark * 2; + adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv); return 0; } diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index e0dc20488335..9ac2fb032df6 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -369,6 +369,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]); conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); + conv_time += conv_time / 10; /* 10% internal clock inaccuracy */ usleep_range(conv_time, conv_time + 1); data->conv_invalid = false; } diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index ff03324dee13..0a7289571b68 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); * Should be used as the set_length callback for iio_buffer_access_ops * struct for DMA buffers. */ -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) { /* Avoid an invalid state */ if (length < 2) diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 047fe757ab97..70c302a93d7f 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -22,11 +22,18 @@ struct iio_kfifo { #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, - int bytes_per_datum, int length) + size_t bytes_per_datum, unsigned int length) { if ((length == 0) || (bytes_per_datum == 0)) return -EINVAL; + /* + * Make sure we don't overflow an unsigned int after kfifo rounds up to + * the next power of 2. + */ + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) + return -EINVAL; + return __kfifo_alloc((struct __kfifo *)&buf->kf, length, bytes_per_datum, GFP_KERNEL); } @@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) return 0; } -static int iio_set_length_kfifo(struct iio_buffer *r, int length) +static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) { /* Avoid an invalid state */ if (length < 2) diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 840a6cbd5f0f..9dd0e1cd93dd 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -91,7 +91,6 @@ static const struct iio_chan_spec ccs811_channels[] = { .channel2 = IIO_MOD_CO2, .modified = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .scan_index = 0, .scan_type = { @@ -129,6 +128,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client) if (ret < 0) return ret; + if ((ret & CCS811_STATUS_FW_MODE_APPLICATION)) + return 0; + if ((ret & CCS811_STATUS_APP_VALID_MASK) != CCS811_STATUS_APP_VALID_LOADED) return -EIO; @@ -245,24 +247,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev, switch (chan->channel2) { case IIO_MOD_CO2: *val = 0; - *val2 = 12834; + *val2 = 100; return IIO_VAL_INT_PLUS_MICRO; case IIO_MOD_VOC: *val = 0; - *val2 = 84246; - return IIO_VAL_INT_PLUS_MICRO; + *val2 = 100; + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } default: return -EINVAL; } - case IIO_CHAN_INFO_OFFSET: - if (!(chan->type == IIO_CONCENTRATION && - chan->channel2 == IIO_MOD_CO2)) - return -EINVAL; - *val = -400; - return IIO_VAL_INT; default: return -EINVAL; } diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 0e4b379ada45..04a63fba41f8 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -178,10 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) #ifdef CONFIG_PM int ret; - atomic_set(&st->user_requested_state, state); - if (state) + if (atomic_add_unless(&st->runtime_pm_enable, 1, 1)) + pm_runtime_enable(&st->pdev->dev); + + if (state) { + atomic_inc(&st->user_requested_state); ret = pm_runtime_get_sync(&st->pdev->dev); - else { + } else { + atomic_dec(&st->user_requested_state); pm_runtime_mark_last_busy(&st->pdev->dev); pm_runtime_use_autosuspend(&st->pdev->dev); ret = pm_runtime_put_autosuspend(&st->pdev->dev); @@ -221,7 +225,8 @@ static void hid_sensor_set_power_work(struct work_struct *work) if (attrb->latency_ms > 0) hid_sensor_set_report_latency(attrb, attrb->latency_ms); - _hid_sensor_power_state(attrb, true); + if (atomic_read(&attrb->user_requested_state)) + _hid_sensor_power_state(attrb, true); } static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, @@ -232,7 +237,9 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, void hid_sensor_remove_trigger(struct hid_sensor_common *attrb) { - pm_runtime_disable(&attrb->pdev->dev); + if (atomic_read(&attrb->runtime_pm_enable)) + pm_runtime_disable(&attrb->pdev->dev); + pm_runtime_set_suspended(&attrb->pdev->dev); pm_runtime_put_noidle(&attrb->pdev->dev); @@ -283,7 +290,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, INIT_WORK(&attrb->work, hid_sensor_set_power_work); pm_suspend_ignore_children(&attrb->pdev->dev, true); - pm_runtime_enable(&attrb->pdev->dev); /* Default to 3 seconds, but can be changed from sysfs */ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 3000); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 02e833b14db0..34115f05d5c4 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -470,7 +470,7 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) * different one. Take into account irq status register * to understand if irq trigger can be properly supported */ - if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) + if (sdata->sensor_settings->drdy_irq.stat_drdy.addr) sdata->hw_irq_trigger = enable; return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index fa73e6795359..fdcc5a891958 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -31,7 +31,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, int ret; /* How would I know if I can't check it? */ - if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy) + if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) return -EINVAL; /* No scan mask, no interrupt */ @@ -39,23 +39,15 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev, return 0; ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, - sdata->sensor_settings->drdy_irq.addr_stat_drdy, + sdata->sensor_settings->drdy_irq.stat_drdy.addr, &status); if (ret < 0) { dev_err(sdata->dev, "error checking samples available\n"); return ret; } - /* - * the lower bits of .active_scan_mask[0] is directly mapped - * to the channels on the sensor: either bit 0 for - * one-dimensional sensors, or e.g. x,y,z for accelerometers, - * gyroscopes or magnetometers. No sensor use more than 3 - * channels, so cut the other status bits here. - */ - status &= 0x07; - if (status & (u8)indio_dev->active_scan_mask[0]) + if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask) return 1; return 0; @@ -212,7 +204,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, * it was "our" interrupt. */ if (sdata->int_pin_open_drain && - sdata->sensor_settings->drdy_irq.addr_stat_drdy) + sdata->sensor_settings->drdy_irq.stat_drdy.addr) irq_trig |= IRQF_SHARED; err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index e366422e8512..2536a8400c98 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -118,7 +118,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -188,7 +191,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -253,7 +259,10 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { * drain settings, but only for INT1 and not * for the DRDY line on INT2. */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c index 839b875c29b9..3ac25b21cbfc 100644 --- a/drivers/iio/health/max30102.c +++ b/drivers/iio/health/max30102.c @@ -329,20 +329,31 @@ static int max30102_read_temp(struct max30102_data *data, int *val) return 0; } -static int max30102_get_temp(struct max30102_data *data, int *val) +static int max30102_get_temp(struct max30102_data *data, int *val, bool en) { int ret; + if (en) { + ret = max30102_set_powermode(data, true); + if (ret) + return ret; + } + /* start acquisition */ ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG, MAX30102_REG_TEMP_CONFIG_TEMP_EN, MAX30102_REG_TEMP_CONFIG_TEMP_EN); if (ret) - return ret; + goto out; msleep(35); + ret = max30102_read_temp(data, val); + +out: + if (en) + max30102_set_powermode(data, false); - return max30102_read_temp(data, val); + return ret; } static int max30102_read_raw(struct iio_dev *indio_dev, @@ -355,23 +366,22 @@ static int max30102_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: /* - * Temperature reading can only be acquired while engine - * is running + * Temperature reading can only be acquired when not in + * shutdown; leave shutdown briefly when buffer not running */ mutex_lock(&indio_dev->mlock); - if (!iio_buffer_enabled(indio_dev)) - ret = -EBUSY; - else { - ret = max30102_get_temp(data, val); - if (!ret) - ret = IIO_VAL_INT; - } - + ret = max30102_get_temp(data, val, true); + else + ret = max30102_get_temp(data, val, false); mutex_unlock(&indio_dev->mlock); + if (ret) + return ret; + + ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = 1; /* 0.0625 */ + *val = 1000; /* 62.5 */ *val2 = 16; ret = IIO_VAL_FRACTIONAL; break; diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index f53e9a803a0e..93b99bd93738 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c @@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) if (adis->trig == NULL) return -ENOMEM; + adis->trig->dev.parent = &adis->spi->dev; + adis->trig->ops = &adis_trigger_ops; + iio_trigger_set_drvdata(adis->trig, adis); + ret = request_irq(adis->spi->irq, &iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING, @@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) if (ret) goto error_free_trig; - adis->trig->dev.parent = &adis->spi->dev; - adis->trig->ops = &adis_trigger_ops; - iio_trigger_set_drvdata(adis->trig, adis); ret = iio_trigger_register(adis->trig); indio_dev->trig = iio_trigger_get(adis->trig); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index b485540da89e..cce0c93accef 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -392,7 +392,7 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, st_lsm6dsx_sensor_disable(sensor); - *val = (s16)data; + *val = (s16)le16_to_cpu(data); return IIO_VAL_INT; } diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index d2b465140a6b..78482d456c3b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -175,7 +175,7 @@ unsigned int iio_buffer_poll(struct file *filp, struct iio_dev *indio_dev = filp->private_data; struct iio_buffer *rb = indio_dev->buffer; - if (!indio_dev->info) + if (!indio_dev->info || rb == NULL) return 0; poll_wait(filp, &rb->pollq, wait); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index a47428b4d31b..e565fd4fc414 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -631,7 +631,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, * iio_format_value() - Formats a IIO value into its string representation * @buf: The buffer to which the formatted value gets written * which is assumed to be big enough (i.e. PAGE_SIZE). - * @type: One of the IIO_VAL_... constants. This decides how the val + * @type: One of the IIO_VAL_* constants. This decides how the val * and val2 parameters are formatted. * @size: Number of IIO value entries contained in vals * @vals: Pointer to the values, exact meaning depends on the @@ -639,7 +639,7 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, * * Return: 0 by default, a negative number on failure or the * total number of characters written for a type that belongs - * to the IIO_VAL_... constant. + * to the IIO_VAL_* constant. */ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) { diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 08aafba4481c..19031a7bce23 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -317,7 +317,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { }, .drdy_irq = { /* drdy line is routed drdy pin */ - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x07, + }, }, .multi_read_bit = true, .bootime = 2, @@ -361,7 +364,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { .drdy_irq = { .addr = 0x62, .mask_int1 = 0x01, - .addr_stat_drdy = 0x67, + .stat_drdy = { + .addr = 0x67, + .mask = 0x07, + }, }, .multi_read_bit = false, .bootime = 2, diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c index 37ba007f8dca..74831fcd0313 100644 --- a/drivers/iio/multiplexer/iio-mux.c +++ b/drivers/iio/multiplexer/iio-mux.c @@ -285,6 +285,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, child->ext_info_cache = devm_kzalloc(dev, sizeof(*child->ext_info_cache) * num_ext_info, GFP_KERNEL); + if (!child->ext_info_cache) + return -ENOMEM; + for (i = 0; i < num_ext_info; ++i) { child->ext_info_cache[i].size = -1; @@ -309,6 +312,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux, child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1, GFP_KERNEL); + if (!child->ext_info_cache[i].data) + return -ENOMEM; + child->ext_info_cache[i].data[ret] = 0; child->ext_info_cache[i].size = ret; } diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 34611a8ea2ce..ec5ca03529b5 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -287,7 +287,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = true, .bootime = 2, @@ -395,7 +398,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x22, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = true, .bootime = 2, @@ -454,7 +460,10 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_ihl = 0x80, .addr_od = 0x12, .mask_od = 0x40, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .stat_drdy = { + .addr = ST_SENSORS_DEFAULT_STAT_ADDR, + .mask = 0x03, + }, }, .multi_read_bit = false, .bootime = 2, @@ -608,7 +617,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) if (!pdata && press_data->sensor_settings->drdy_irq.addr) pdata = (struct st_sensors_platform_data *)&default_press_pdata; - err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) goto st_press_power_off; diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index ae070950f920..c03de24d3c51 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig @@ -58,6 +58,8 @@ config SX9500 config SRF08 tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER depends on I2C help Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index f42b3a1c75ff..dba796c06ba6 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -871,6 +871,7 @@ static int sx9500_init_device(struct iio_dev *indio_dev) static void sx9500_gpio_probe(struct i2c_client *client, struct sx9500_data *data) { + struct gpio_desc *gpiod_int; struct device *dev; if (!client) @@ -878,6 +879,14 @@ static void sx9500_gpio_probe(struct i2c_client *client, dev = &client->dev; + if (client->irq <= 0) { + gpiod_int = devm_gpiod_get(dev, SX9500_GPIO_INT, GPIOD_IN); + if (IS_ERR(gpiod_int)) + dev_err(dev, "gpio get irq failed\n"); + else + client->irq = gpiod_to_irq(gpiod_int); + } + data->gpiod_rst = devm_gpiod_get(dev, SX9500_GPIO_RESET, GPIOD_OUT_HIGH); if (IS_ERR(data->gpiod_rst)) { dev_warn(dev, "gpio get reset pin failed\n"); diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 3726205c8704..27b3c39e586a 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -34,6 +34,18 @@ config INFINIBAND_USER_ACCESS libibverbs, libibcm and a hardware driver library from . +config INFINIBAND_USER_ACCESS_UCM + bool "Userspace CM (UCM, DEPRECATED)" + depends on BROKEN + depends on INFINIBAND_USER_ACCESS + help + The UCM module has known security flaws, which no one is + interested to fix. The user-space part of this code was + dropped from the upstream a long time ago. + + This option is DEPRECATED and planned to be removed. + + config INFINIBAND_EXP_USER_ACCESS bool "Allow experimental support for Infiniband ABI" depends on INFINIBAND_USER_ACCESS @@ -60,9 +72,12 @@ config INFINIBAND_ON_DEMAND_PAGING pages on demand instead. config INFINIBAND_ADDR_TRANS - bool + bool "RDMA/CM" depends on INFINIBAND default y + ---help--- + Support for RDMA communication manager (CM). + This allows for a generic connection abstraction over RDMA. config INFINIBAND_ADDR_TRANS_CONFIGFS bool diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 9c0a2b5c834e..991c2522fb41 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -5,8 +5,8 @@ user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \ $(infiniband-y) obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o -obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ - $(user_access-y) +obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y) +obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y) ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 12523f630b61..40475ebf3a61 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -207,6 +207,22 @@ int rdma_addr_size(struct sockaddr *addr) } EXPORT_SYMBOL(rdma_addr_size); +int rdma_addr_size_in6(struct sockaddr_in6 *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_in6); + +int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_kss); + static struct rdma_addr_client self; void rdma_addr_register_client(struct rdma_addr_client *client) @@ -597,6 +613,15 @@ static void process_one_req(struct work_struct *_work) list_del(&req->list); mutex_unlock(&lock); + /* + * Although the work will normally have been canceled by the + * workqueue, it can still be requeued as long as it is on the + * req_list, so it could have been requeued before we grabbed &lock. + * We need to cancel it after it is removed from req_list to really be + * sure it is safe to free. + */ + cancel_delayed_work(&req->work); + req->callback(req->status, (struct sockaddr *)&req->src_addr, req->addr, req->context); put_client(req->client); @@ -852,7 +877,7 @@ static struct notifier_block nb = { int addr_init(void) { - addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); + addr_wq = alloc_ordered_workqueue("ib_addr", 0); if (!addr_wq) return -ENOMEM; diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 77515638c55c..896cfd9303b0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, return -EINVAL; if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) - return -EAGAIN; + return -EINVAL; memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); if (attr) { diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 4c4b46586af2..2af79e4f3235 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work, param->bth_pkey = cm_get_bth_pkey(work); param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; - if (req_msg->alt_local_lid) + if (cm_req_has_alt_path(req_msg)) param->alternate_path = &work->path[1]; else param->alternate_path = NULL; @@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work) cm_process_routed_req(req_msg, work->mad_recv_wc->wc); memset(&work->path[0], 0, sizeof(work->path[0])); - memset(&work->path[1], 0, sizeof(work->path[1])); + if (cm_req_has_alt_path(req_msg)) + memset(&work->path[1], 0, sizeof(work->path[1])); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); ret = ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, @@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; + bool alt_path = false; u16 attr_id; int paths = 0; int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: - paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> - alt_local_lid != 0); + alt_path = cm_req_has_alt_path((struct cm_req_msg *) + mad_recv_wc->recv_buf.mad); + paths = 1 + (alt_path != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 852c8fec8088..79843a3ca9dc 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -420,6 +420,8 @@ struct cma_hdr { #define CMA_VERSION 0x00 struct cma_req_info { + struct sockaddr_storage listen_addr_storage; + struct sockaddr_storage src_addr_storage; struct ib_device *device; int port; union ib_gid local_gid; @@ -624,11 +626,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port, if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) return ret; - if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) + if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { ndev = dev_get_by_index(&init_net, bound_if_index); - else + if (!ndev) + return ret; + } else { gid_type = IB_GID_TYPE_IB; - + } ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, ndev, NULL); @@ -801,6 +805,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net, INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); + id_priv->seq_num &= 0x00ffffff; return &id_priv->id; } @@ -895,7 +900,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; - union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { @@ -918,12 +922,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; - ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, - rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index, - &sgid, NULL); - if (ret) - goto out; - BUG_ON(id_priv->cma_dev->device != id_priv->id.device); if (conn_param) @@ -1369,11 +1367,11 @@ static bool validate_net_dev(struct net_device *net_dev, } static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, - const struct cma_req_info *req) + struct cma_req_info *req) { - struct sockaddr_storage listen_addr_storage, src_addr_storage; - struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, - *src_addr = (struct sockaddr *)&src_addr_storage; + struct sockaddr *listen_addr = + (struct sockaddr *)&req->listen_addr_storage; + struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; struct net_device *net_dev; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; int err; @@ -1388,11 +1386,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, if (!net_dev) return ERR_PTR(-ENODEV); - if (!validate_net_dev(net_dev, listen_addr, src_addr)) { - dev_put(net_dev); - return ERR_PTR(-EHOSTUNREACH); - } - return net_dev; } @@ -1528,19 +1521,55 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, } } + /* + * Net namespace might be getting deleted while route lookup, + * cm_id lookup is in progress. Therefore, perform netdevice + * validation, cm_id lookup under rcu lock. + * RCU lock along with netdevice state check, synchronizes with + * netdevice migrating to different net namespace and also avoids + * case where net namespace doesn't get deleted while lookup is in + * progress. + * If the device state is not IFF_UP, its properties such as ifindex + * and nd_net cannot be trusted to remain valid without rcu lock. + * net/core/dev.c change_net_namespace() ensures to synchronize with + * ongoing operations on net device after device is closed using + * synchronize_net(). + */ + rcu_read_lock(); + if (*net_dev) { + /* + * If netdevice is down, it is likely that it is administratively + * down or it might be migrating to different namespace. + * In that case avoid further processing, as the net namespace + * or ifindex may change. + */ + if (((*net_dev)->flags & IFF_UP) == 0) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + + if (!validate_net_dev(*net_dev, + (struct sockaddr *)&req.listen_addr_storage, + (struct sockaddr *)&req.src_addr_storage)) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + } + bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, rdma_ps_from_service_id(req.service_id), cma_port_from_service_id(req.service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); +err: + rcu_read_unlock(); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } - return id_priv; } -static inline int cma_user_data_offset(struct rdma_id_private *id_priv) +static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } @@ -1942,7 +1971,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event; struct net_device *net_dev; - int offset, ret; + u8 offset; + int ret; listen_id = cma_id_from_event(cm_id, ib_event, &net_dev); if (IS_ERR(listen_id)) @@ -3015,7 +3045,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, continue; /* different dest port -> unique */ - if (!cma_any_port(cur_daddr) && + if (!cma_any_port(daddr) && + !cma_any_port(cur_daddr) && (dport != cur_dport)) continue; @@ -3026,7 +3057,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, continue; /* different dst address -> unique */ - if (!cma_any_addr(cur_daddr) && + if (!cma_any_addr(daddr) && + !cma_any_addr(cur_daddr) && cma_addr_cmp(daddr, cur_daddr)) continue; @@ -3324,13 +3356,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) } #endif } + daddr = cma_dst_addr(id_priv); + daddr->sa_family = addr->sa_family; + ret = cma_get_port(id_priv); if (ret) goto err2; - daddr = cma_dst_addr(id_priv); - daddr->sa_family = addr->sa_family; - return 0; err2: if (id_priv->cma_dev) @@ -3440,7 +3472,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct ib_cm_sidr_req_param req; struct ib_cm_id *id; void *private_data; - int offset, ret; + u8 offset; + int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); @@ -3497,7 +3530,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, struct rdma_route *route; void *private_data; struct ib_cm_id *id; - int offset, ret; + u8 offset; + int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); @@ -4114,6 +4148,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, struct cma_multicast *mc; int ret; + if (!id->device) + return -EINVAL; + id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) @@ -4432,7 +4469,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) goto out; if (ibnl_put_attr(skb, nlh, - rdma_addr_size(cma_src_addr(id_priv)), + rdma_addr_size(cma_dst_addr(id_priv)), cma_dst_addr(id_priv), RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) goto out; @@ -4444,6 +4481,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) id_stats->qp_type = id->qp_type; i_id++; + nlmsg_end(skb, nlh); } cb->args[1] = 0; @@ -4458,7 +4496,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static const struct rdma_nl_cbs cma_cb_table[] = { +static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = { [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, }; diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index a1d687a664f8..66f0268f37a6 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, } #endif -struct ib_device *__ib_device_get_by_index(u32 ifindex); +struct ib_device *ib_device_get_by_index(u32 ifindex); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index f2ae75fa3128..757d308bebe8 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -17,6 +17,7 @@ /* # of WCs to poll for with a single call to ib_poll_cq */ #define IB_POLL_BATCH 16 +#define IB_POLL_BATCH_DIRECT 8 /* # of WCs to iterate over before yielding */ #define IB_POLL_BUDGET_IRQ 256 @@ -25,7 +26,8 @@ #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) -static int __ib_process_cq(struct ib_cq *cq, int budget) +static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, + int batch) { int i, n, completed = 0; @@ -34,10 +36,10 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) * want to bound this call, thus we need unsigned * minimum here. */ - while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, - budget - completed), cq->wc)) > 0) { + while ((n = ib_poll_cq(cq, min_t(u32, batch, + budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { - struct ib_wc *wc = &cq->wc[i]; + struct ib_wc *wc = &wcs[i]; if (wc->wr_cqe) wc->wr_cqe->done(cq, wc); @@ -47,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) completed += n; - if (n != IB_POLL_BATCH || - (budget != -1 && completed >= budget)) + if (n != batch || (budget != -1 && completed >= budget)) break; } @@ -60,18 +61,20 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) * @cq: CQ to process * @budget: number of CQEs to poll for * - * This function is used to process all outstanding CQ entries on a - * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different - * context and does not ask for completion interrupts from the HCA. + * This function is used to process all outstanding CQ entries. + * It does not offload CQ processing to a different context and does + * not ask for completion interrupts from the HCA. + * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger + * concurrent processing. * * Note: do not pass -1 as %budget unless it is guaranteed that the number * of completions that will be processed is small. */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { - WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT); + struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; - return __ib_process_cq(cq, budget); + return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); } EXPORT_SYMBOL(ib_process_cq_direct); @@ -85,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) struct ib_cq *cq = container_of(iop, struct ib_cq, iop); int completed; - completed = __ib_process_cq(cq, budget); + completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) @@ -105,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; - completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE); + completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, + IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(ib_comp_wq, &cq->work); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 84fc32a2c8b3..4dff06ab771e 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device) return 0; } -struct ib_device *__ib_device_get_by_index(u32 index) +static struct ib_device *__ib_device_get_by_index(u32 index) { struct ib_device *device; @@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index) return NULL; } +/* + * Caller is responsible to return refrerence count by calling put_device() + */ +struct ib_device *ib_device_get_by_index(u32 index) +{ + struct ib_device *device; + + down_read(&lists_rwsem); + device = __ib_device_get_by_index(index); + if (device) + get_device(&device->dev); + + up_read(&lists_rwsem); + return device; +} + static struct ib_device *__ib_device_get_by_name(const char *name) { struct ib_device *device; @@ -446,7 +462,6 @@ int ib_register_device(struct ib_device *device, struct ib_udata uhw = {.outlen = 0, .inlen = 0}; struct device *parent = device->dev.parent; - WARN_ON_ONCE(!parent); WARN_ON_ONCE(device->dma_device); if (device->dev.dma_ops) { /* @@ -455,16 +470,25 @@ int ib_register_device(struct ib_device *device, * into device->dev. */ device->dma_device = &device->dev; - if (!device->dev.dma_mask) - device->dev.dma_mask = parent->dma_mask; - if (!device->dev.coherent_dma_mask) - device->dev.coherent_dma_mask = - parent->coherent_dma_mask; + if (!device->dev.dma_mask) { + if (parent) + device->dev.dma_mask = parent->dma_mask; + else + WARN_ON_ONCE(true); + } + if (!device->dev.coherent_dma_mask) { + if (parent) + device->dev.coherent_dma_mask = + parent->coherent_dma_mask; + else + WARN_ON_ONCE(true); + } } else { /* * The caller did not provide custom DMA operations. Use the * DMA mapping operations of the parent device. */ + WARN_ON_ONCE(!parent); device->dma_device = parent; } @@ -510,14 +534,14 @@ int ib_register_device(struct ib_device *device, ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); - goto cache_cleanup; + goto cg_cleanup; } ret = ib_device_register_sysfs(device, port_callback); if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); - goto cache_cleanup; + goto cg_cleanup; } device->reg_state = IB_DEV_REGISTERED; @@ -533,6 +557,8 @@ int ib_register_device(struct ib_device *device, mutex_unlock(&device_mutex); return 0; +cg_cleanup: + ib_device_unregister_rdmacg(device); cache_cleanup: ib_cache_cleanup_one(device); ib_cache_release_one(device); @@ -1146,7 +1172,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, } EXPORT_SYMBOL(ib_get_net_dev_by_params); -static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { +static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { [RDMA_NL_LS_OP_RESOLVE] = { .doit = ib_nl_handle_resolve_resp, .flags = RDMA_NL_ADMIN_PERM, @@ -1253,5 +1279,5 @@ static void __exit ib_core_cleanup(void) MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4); -module_init(ib_core_init); +subsys_initcall(ib_core_init); module_exit(ib_core_cleanup); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index fcf42f6bb82a..30d7277249b8 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason) } EXPORT_SYMBOL(iwcm_reject_msg); -static struct rdma_nl_cbs iwcm_nl_cb_table[] = { +static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 3c4faadb8cdd..cb0fecc958b5 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_sockaddr, u8 nl_client) { - struct hlist_head *hash_bucket_head; + struct hlist_head *hash_bucket_head = NULL; struct iwpm_mapping_info *map_info; unsigned long flags; int ret = -EINVAL; @@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); + + if (!hash_bucket_head) + kfree(map_info); return ret; } @@ -654,6 +657,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) } skb_num++; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); + ret = -EINVAL; for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], hlist_node) { diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index f8f53bb90837..55252079faf6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -60,7 +60,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static struct list_head ib_mad_port_list; -static u32 ib_mad_client_id = 0; +static atomic_t ib_mad_client_id = ATOMIC_INIT(0); /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); @@ -378,7 +378,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, } spin_lock_irqsave(&port_priv->reg_lock, flags); - mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; + mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id); /* * Make sure MAD registration (if supplied) @@ -1558,7 +1558,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, mad_reg_req->oui, 3)) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; - BUG_ON(!*method); + if (!*method) + goto error3; goto check_in_use; } } @@ -1568,10 +1569,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, vclass]->oui[i])) { method = &(*vendor_table)->vendor_class[ vclass]->method_table[i]; - BUG_ON(*method); /* Allocate method table for this OUI */ - if ((ret = allocate_method_table(method))) - goto error3; + if (!*method) { + ret = allocate_method_table(method); + if (ret) + goto error3; + } memcpy((*vendor_table)->vendor_class[vclass]->oui[i], mad_reg_req->oui, 3); goto check_in_use; @@ -1974,14 +1977,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, unsigned long flags; int ret; + INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); ret = ib_mad_enforce_security(mad_agent_priv, mad_recv_wc->wc->pkey_index); if (ret) { ib_free_recv_mad(mad_recv_wc); deref_mad_agent(mad_agent_priv); + return; } - INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 45f2f095f793..4eb72ff539fc 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, { int ret; u16 gid_index; - u8 p; - - if (rdma_protocol_roce(device, port_num)) { - ret = ib_find_cached_gid_by_port(device, &rec->port_gid, - gid_type, port_num, - ndev, - &gid_index); - } else if (rdma_protocol_ib(device, port_num)) { - ret = ib_find_cached_gid(device, &rec->port_gid, - IB_GID_TYPE_IB, NULL, &p, - &gid_index); - } else { - ret = -EINVAL; - } + /* GID table is not based on the netdevice for IB link layer, + * so ignore ndev during search. + */ + if (rdma_protocol_ib(device, port_num)) + ndev = NULL; + else if (!rdma_protocol_roce(device, port_num)) + return -EINVAL; + + ret = ib_find_cached_gid_by_port(device, &rec->port_gid, + gid_type, port_num, + ndev, + &gid_index); if (ret) return ret; diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 2fae850a3eff..0dcd1aa6f683 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(index); + device = ib_device_get_by_index(index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!msg) { + err = -ENOMEM; + goto err; + } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); err = fill_dev_info(msg, device); - if (err) { - nlmsg_free(msg); - return err; - } + if (err) + goto err_free; nlmsg_end(msg, nlh); + put_device(&device->dev); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + +err_free: + nlmsg_free(msg); +err: + put_device(&device->dev); + return err; } static int _nldev_get_dumpit(struct ib_device *device, @@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(index); + device = ib_device_get_by_index(index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); - if (!rdma_is_port_valid(device, port)) - return -EINVAL; + if (!rdma_is_port_valid(device, port)) { + err = -EINVAL; + goto err; + } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!msg) { + err = -ENOMEM; + goto err; + } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); err = fill_port_info(msg, device, port); - if (err) { - nlmsg_free(msg); - return err; - } + if (err) + goto err_free; nlmsg_end(msg, nlh); + put_device(&device->dev); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + +err_free: + nlmsg_free(msg); +err: + put_device(&device->dev); + return err; } static int nldev_port_get_dumpit(struct sk_buff *skb, @@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, return -EINVAL; ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(ifindex); + device = ib_device_get_by_index(ifindex); if (!device) return -EINVAL; @@ -299,11 +315,13 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, nlmsg_end(skb, nlh); } -out: cb->args[0] = idx; +out: + put_device(&device->dev); + cb->args[0] = idx; return skb->len; } -static const struct rdma_nl_cbs nldev_cb_table[] = { +static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, .dump = nldev_get_dumpit, diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 85b5ee4defa4..1984d6cee3e0 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -196,7 +196,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t goto free; } - uverbs_uobject_get(uobj); + /* + * The idr_find is guaranteed to return a pointer to something that + * isn't freed yet, or NULL, as the free after idr_remove goes through + * kfree_rcu(). However the object may still have been released and + * kfree() could be called at any time. + */ + if (!kref_get_unless_zero(&uobj->ref)) + uobj = ERR_PTR(-ENOENT); + free: rcu_read_unlock(); return uobj; @@ -399,13 +407,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj, return ret; } -static void lockdep_check(struct ib_uobject *uobj, bool exclusive) +static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive) { #ifdef CONFIG_LOCKDEP if (exclusive) - WARN_ON(atomic_read(&uobj->usecnt) > 0); + WARN_ON(atomic_read(&uobj->usecnt) != -1); else - WARN_ON(atomic_read(&uobj->usecnt) == -1); + WARN_ON(atomic_read(&uobj->usecnt) <= 0); #endif } @@ -444,7 +452,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj) WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); return 0; } - lockdep_check(uobj, true); + assert_uverbs_usecnt(uobj, true); ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); up_read(&ucontext->cleanup_rwsem); @@ -474,16 +482,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); return 0; } - lockdep_check(uobject, true); + assert_uverbs_usecnt(uobject, true); ret = uobject->type->type_class->remove_commit(uobject, RDMA_REMOVE_DESTROY); if (ret) - return ret; + goto out; uobject->type = &null_obj_type; +out: up_read(&ucontext->cleanup_rwsem); - return 0; + return ret; } static void alloc_commit_idr_uobject(struct ib_uobject *uobj) @@ -561,7 +570,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive) void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) { - lockdep_check(uobj, exclusive); + assert_uverbs_usecnt(uobj, exclusive); uobj->type->type_class->lookup_put(uobj, exclusive); /* * In order to unlock an object, either decrease its usecnt for diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index ab5e1024fea9..b81d2597f563 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1291,10 +1291,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, resolved_dev = dev_get_by_index(dev_addr.net, dev_addr.bound_dev_if); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); + if (!resolved_dev) { + dev_put(idev); + return -ENODEV; } ndev = ib_get_ndev_from_path(rec); rcu_read_lock(); diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 88bdafb297f5..59b2f96d986a 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey, if (ret) return ret; - if (qp_sec->qp == qp_sec->qp->real_qp) { - list_for_each_entry(shared_qp_sec, - &qp_sec->shared_qp_list, - shared_qp_list) { - ret = security_ib_pkey_access(shared_qp_sec->security, - subnet_prefix, - pkey); - if (ret) - return ret; - } + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; } return 0; } @@ -388,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) if (ret) return ret; + if (!qp->qp_sec) + return 0; + mutex_lock(&real_qp->qp_sec->mutex); ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, qp->qp_sec); @@ -419,8 +420,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec) int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) { + u8 i = rdma_start_port(dev); + bool is_ib = false; int ret; + while (i <= rdma_end_port(dev) && !is_ib) + is_ib = rdma_protocol_ib(dev, i++); + + /* If this isn't an IB device don't create the security context */ + if (!is_ib) + return 0; + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); if (!qp->qp_sec) return -ENOMEM; @@ -443,6 +453,10 @@ EXPORT_SYMBOL(ib_create_qp_security); void ib_destroy_qp_security_begin(struct ib_qp_security *sec) { + /* Return if not IB */ + if (!sec) + return; + mutex_lock(&sec->mutex); /* Remove the QP from the lists so it won't get added to @@ -472,6 +486,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec) int ret; int i; + /* Return if not IB */ + if (!sec) + return; + /* If a concurrent cache update is in progress this * QP security could be marked for an error state * transition. Wait for this to complete. @@ -507,6 +525,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec) { int i; + /* Return if not IB */ + if (!sec) + return; + /* If a concurrent cache update is occurring we must * wait until this QP security structure is processed * in the QP to error flow before destroying it because @@ -559,19 +581,35 @@ int ib_security_modify_qp(struct ib_qp *qp, { int ret = 0; struct ib_ports_pkeys *tmp_pps; - struct ib_ports_pkeys *new_pps; - bool special_qp = (qp->qp_type == IB_QPT_SMI || - qp->qp_type == IB_QPT_GSI || - qp->qp_type >= IB_QPT_RESERVED1); + struct ib_ports_pkeys *new_pps = NULL; + struct ib_qp *real_qp = qp->real_qp; + bool special_qp = (real_qp->qp_type == IB_QPT_SMI || + real_qp->qp_type == IB_QPT_GSI || + real_qp->qp_type >= IB_QPT_RESERVED1); bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || (qp_attr_mask & IB_QP_ALT_PATH)); - if (pps_change && !special_qp) { - mutex_lock(&qp->qp_sec->mutex); - new_pps = get_new_pps(qp, + WARN_ONCE((qp_attr_mask & IB_QP_PORT && + rdma_protocol_ib(real_qp->device, qp_attr->port_num) && + !real_qp->qp_sec), + "%s: QP security is not initialized for IB QP: %d\n", + __func__, real_qp->qp_num); + + /* The port/pkey settings are maintained only for the real QP. Open + * handles on the real QP will be in the shared_qp_list. When + * enforcing security on the real QP all the shared QPs will be + * checked as well. + */ + + if (pps_change && !special_qp && real_qp->qp_sec) { + mutex_lock(&real_qp->qp_sec->mutex); + new_pps = get_new_pps(real_qp, qp_attr, qp_attr_mask); - + if (!new_pps) { + mutex_unlock(&real_qp->qp_sec->mutex); + return -ENOMEM; + } /* Add this QP to the lists for the new port * and pkey settings before checking for permission * in case there is a concurrent cache update @@ -586,24 +624,24 @@ int ib_security_modify_qp(struct ib_qp *qp, if (!ret) ret = check_qp_port_pkey_settings(new_pps, - qp->qp_sec); + real_qp->qp_sec); } if (!ret) - ret = qp->device->modify_qp(qp->real_qp, - qp_attr, - qp_attr_mask, - udata); + ret = real_qp->device->modify_qp(real_qp, + qp_attr, + qp_attr_mask, + udata); - if (pps_change && !special_qp) { + if (new_pps) { /* Clean up the lists and free the appropriate * ports_pkeys structure. */ if (ret) { tmp_pps = new_pps; } else { - tmp_pps = qp->qp_sec->ports_pkeys; - qp->qp_sec->ports_pkeys = new_pps; + tmp_pps = real_qp->qp_sec->ports_pkeys; + real_qp->qp_sec->ports_pkeys = new_pps; } if (tmp_pps) { @@ -611,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp, port_pkey_list_remove(&tmp_pps->alt); } kfree(tmp_pps); - mutex_unlock(&qp->qp_sec->mutex); + mutex_unlock(&real_qp->qp_sec->mutex); } return ret; } @@ -626,6 +664,9 @@ int ib_security_pkey_access(struct ib_device *dev, u16 pkey; int ret; + if (!rdma_protocol_ib(dev, port_num)) + return 0; + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); if (ret) return ret; @@ -660,6 +701,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, { int ret; + if (!rdma_protocol_ib(agent->device, agent->port_num)) + return 0; + ret = security_ib_alloc_security(&agent->security); if (ret) return ret; @@ -685,6 +729,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent, void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) { + if (!rdma_protocol_ib(agent->device, agent->port_num)) + return; + security_ib_free_security(agent->security); if (agent->lsm_nb_reg) unregister_lsm_notifier(&agent->lsm_nb); @@ -692,20 +739,19 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) { - int ret; - - if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) - return -EACCES; - - ret = ib_security_pkey_access(map->agent.device, - map->agent.port_num, - pkey_index, - map->agent.security); + if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) + return 0; - if (ret) - return ret; + if (map->agent.qp->qp_type == IB_QPT_SMI) { + if (!map->agent.smp_allowed) + return -EACCES; + return 0; + } - return 0; + return ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); } #endif /* CONFIG_SECURITY_INFINIBAND */ diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index abc5ab581f82..0a1e96c25ca3 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1262,7 +1262,6 @@ int ib_device_register_sysfs(struct ib_device *device, int ret; int i; - WARN_ON_ONCE(!device->dev.parent); ret = dev_set_name(class_dev, "%s", device->name); if (ret) return ret; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index eb85b546e223..a22b992cde38 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id, ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); - else if (ctx->file != file) + else if (ctx->file != file || !ctx->cm_id) ctx = ERR_PTR(-EINVAL); return ctx; } @@ -218,7 +218,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) return NULL; mutex_lock(&mut); - mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); + mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL); mutex_unlock(&mut); if (mc->id < 0) goto error; @@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; + struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; @@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, return -ENOMEM; ctx->uid = cmd.uid; - ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, - ucma_event_handler, ctx, cmd.ps, qp_type); - if (IS_ERR(ctx->cm_id)) { - ret = PTR_ERR(ctx->cm_id); + cm_id = rdma_create_id(current->nsproxy->net_ns, + ucma_event_handler, ctx, cmd.ps, qp_type); + if (IS_ERR(cm_id)) { + ret = PTR_ERR(cm_id); goto err1; } @@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ret = -EFAULT; goto err2; } + + ctx->cm_id = cm_id; return 0; err2: - rdma_destroy_id(ctx->cm_id); + rdma_destroy_id(cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); + mutex_lock(&file->mut); + list_del(&ctx->list); + mutex_unlock(&file->mut); kfree(ctx); return ret; } @@ -626,6 +632,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -639,22 +648,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; - struct sockaddr *addr; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - addr = (struct sockaddr *) &cmd.addr; - if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) + if (cmd.reserved || !cmd.addr_size || + cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_bind_addr(ctx->cm_id, addr); + ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } @@ -670,13 +678,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || + !rdma_addr_size_in6(&cmd.dst_addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, - (struct sockaddr *) &cmd.dst_addr, - cmd.timeout_ms); + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -686,24 +697,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; - struct sockaddr *src, *dst; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - src = (struct sockaddr *) &cmd.src_addr; - dst = (struct sockaddr *) &cmd.dst_addr; - if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || - !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst))) + if (cmd.reserved || + (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || + !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); + ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -904,13 +914,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx, resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; - if (rec->rec_type == SA_PATH_REC_TYPE_IB) { - ib_sa_pack_path(rec, &resp->path_data[i].path_rec); - } else { + if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { struct sa_path_rec ib; sa_convert_path_opa_to_ib(&ib, rec); ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); + + } else { + ib_sa_pack_path(rec, &resp->path_data[i].path_rec); } } @@ -1148,10 +1159,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (!ctx->cm_id->device) { + ret = -EINVAL; + goto out; + } + resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; @@ -1222,6 +1241,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx, if (!optlen) return -EINVAL; + if (!ctx->cm_id->device) + return -EINVAL; + memset(&sa_path, 0, sizeof(sa_path)); sa_path.rec_type = SA_PATH_REC_TYPE_IB; @@ -1293,6 +1315,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { @@ -1314,7 +1339,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, { struct rdma_ucm_notify cmd; struct ucma_context *ctx; - int ret; + int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; @@ -1323,7 +1348,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); + if (ctx->cm_id->device) + ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + ucma_put_ctx(ctx); return ret; } @@ -1342,7 +1369,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; - if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) + if (cmd->addr_size != rdma_addr_size(addr)) return -EINVAL; if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) @@ -1377,6 +1404,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, goto err3; } + mutex_lock(&mut); + idr_replace(&multicast_idr, mc, mc->id); + mutex_unlock(&mut); + mutex_unlock(&file->mut); ucma_put_ctx(ctx); return 0; @@ -1409,7 +1440,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; - join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); + join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); + if (!join_cmd.addr_size) + return -EINVAL; + join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); @@ -1425,6 +1459,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_kss(&cmd.addr)) + return -EINVAL; + return ucma_process_join(file, &cmd, out_len); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 21e60b1e2ff4..d76455edd292 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -119,20 +119,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->length = size; umem->address = addr; umem->page_shift = PAGE_SHIFT; - umem->pid = get_task_pid(current, PIDTYPE_PID); - /* - * We ask for writable memory if any of the following - * access flags are set. "Local write" and "remote write" - * obviously require write access. "Remote atomic" can do - * things like fetch and add, which will modify memory, and - * "MW bind" can change permissions by binding a window. - */ - umem->writable = !!(access & - (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); + umem->writable = ib_access_writable(access); if (access & IB_ACCESS_ON_DEMAND) { - put_pid(umem->pid); ret = ib_umem_odp_get(context, umem, access); if (ret) { kfree(umem); @@ -148,7 +137,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { - put_pid(umem->pid); kfree(umem); return ERR_PTR(-ENOMEM); } @@ -191,7 +179,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, sg_list_start = umem->sg_head.sgl; while (npages) { - ret = get_user_pages(cur_base, + ret = get_user_pages_longterm(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), gup_flags, page_list, vma_list); @@ -231,7 +219,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, if (ret < 0) { if (need_release) __ib_umem_release(context->device, umem, 0); - put_pid(umem->pid); kfree(umem); } else current->mm->pinned_vm = locked; @@ -274,8 +261,7 @@ void ib_umem_release(struct ib_umem *umem) __ib_umem_release(umem->context->device, umem, 1); - task = get_pid_task(umem->pid, PIDTYPE_PID); - put_pid(umem->pid); + task = get_pid_task(umem->context->tgid, PIDTYPE_PID); if (!task) goto out; mm = get_task_mm(task); @@ -352,7 +338,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, return -EINVAL; } - ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, + ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length, offset + ib_umem_offset(umem)); if (ret < 0) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index c1696e6084b2..6511cb21f6e2 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent, packet->mad.hdr.status = 0; packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); - packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); + /* + * On OPA devices it is okay to lose the upper 16 bits of LID as this + * information is obtained elsewhere. Mask off the upper 16 bits. + */ + if (agent->device->port_immutable[agent->port_num].core_cap_flags & + RDMA_CORE_PORT_INTEL_OPA) + packet->mad.hdr.lid = ib_lid_be16(0xFFFF & + mad_recv_wc->wc->slid); + else + packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); packet->mad.hdr.sl = mad_recv_wc->wc->sl; packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; @@ -491,7 +500,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, } memset(&ah_attr, 0, sizeof ah_attr); - ah_attr.type = rdma_ah_find_type(file->port->ib_dev, + ah_attr.type = rdma_ah_find_type(agent->device, file->port->port_num); rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid)); rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 52a2cf2d83aa..f836ed1dd300 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -565,9 +565,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, if (f.file) fdput(f); + mutex_unlock(&file->device->xrcd_tree_mutex); + uobj_alloc_commit(&obj->uobject); - mutex_unlock(&file->device->xrcd_tree_mutex); return in_len; err_copy: @@ -606,10 +607,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, file->ucontext); - if (IS_ERR(uobj)) { - mutex_unlock(&file->device->xrcd_tree_mutex); + if (IS_ERR(uobj)) return PTR_ERR(uobj); - } ret = uobj_remove_commit(uobj); return ret ?: in_len; @@ -1982,6 +1981,68 @@ static int modify_qp(struct ib_uverbs_file *file, goto release_qp; } + if ((cmd->base.attr_mask & IB_QP_AV)) { + if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { + ret = -EINVAL; + goto release_qp; + } + + if (cmd->base.attr_mask & IB_QP_STATE && + cmd->base.qp_state == IB_QPS_RTR) { + /* We are in INIT->RTR TRANSITION (if we are not, + * this transition will be rejected in subsequent checks). + * In the INIT->RTR transition, we cannot have IB_QP_PORT set, + * but the IB_QP_STATE flag is required. + * + * Since kernel 3.14 (commit dbf727de7440), the uverbs driver, + * when IB_QP_AV is set, has required inclusion of a valid + * port number in the primary AV. (AVs are created and handled + * differently for infiniband and ethernet (RoCE) ports). + * + * Check the port number included in the primary AV against + * the port number in the qp struct, which was set (and saved) + * in the RST->INIT transition. + */ + if (cmd->base.dest.port_num != qp->real_qp->port) { + ret = -EINVAL; + goto release_qp; + } + } else { + /* We are in SQD->SQD. (If we are not, this transition will + * be rejected later in the verbs layer checks). + * Check for both IB_QP_PORT and IB_QP_AV, these can be set + * together in the SQD->SQD transition. + * + * If only IP_QP_AV was set, add in IB_QP_PORT as well (the + * verbs layer driver does not track primary port changes + * resulting from path migration. Thus, in SQD, if the primary + * AV is modified, the primary port should also be modified). + * + * Note that in this transition, the IB_QP_STATE flag + * is not allowed. + */ + if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == (IB_QP_AV | IB_QP_PORT)) && + cmd->base.port_num != cmd->base.dest.port_num) { + ret = -EINVAL; + goto release_qp; + } + if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT)) + == IB_QP_AV) { + cmd->base.attr_mask |= IB_QP_PORT; + cmd->base.port_num = cmd->base.dest.port_num; + } + } + } + + if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && + (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || + !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) || + cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd->base.qp_state; attr->cur_qp_state = cmd->base.cur_qp_state; attr->path_mtu = cmd->base.path_mtu; @@ -2079,8 +2140,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, return -EOPNOTSUPP; if (ucore->inlen > sizeof(cmd)) { - if (ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) + if (!ib_is_udata_cleared(ucore, sizeof(cmd), + ucore->inlen - sizeof(cmd))) return -EOPNOTSUPP; } @@ -3364,6 +3425,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_uobj; } + if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { + err = -EINVAL; + goto err_put; + } + flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * sizeof(union ib_flow_spec), GFP_KERNEL); if (!flow_attr) { diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 5286ad57d903..5feb8bbeff18 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev, return 0; } + if (test_bit(attr_id, attr_bundle_h->valid_bitmap)) + return -EINVAL; + spec = &attr_spec_bucket->attrs[attr_id]; e = &elements[attr_id]; e->uattr = uattr_ptr; @@ -188,6 +191,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met return -EINVAL; } + for (; i < method_spec->num_buckets; i++) { + struct uverbs_attr_spec_hash *attr_spec_bucket = + method_spec->attr_buckets[i]; + + if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask, + attr_spec_bucket->num_attrs)) + return -EINVAL; + } + return 0; } @@ -245,16 +257,13 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev, uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)]; #endif - if (hdr->reserved) - return -EINVAL; - object_spec = uverbs_get_object(ib_dev, hdr->object_id); if (!object_spec) - return -EOPNOTSUPP; + return -EPROTONOSUPPORT; method_spec = uverbs_get_method(object_spec, hdr->method_id); if (!method_spec) - return -EOPNOTSUPP; + return -EPROTONOSUPPORT; if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext) return -EINVAL; @@ -310,6 +319,16 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev, err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev, file, method_spec, ctx->uverbs_attr_bundle); + + /* + * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can + * not invoke the method because the request is not supported. No + * other cases should return this code. + */ + if (unlikely(err == -EPROTONOSUPPORT)) { + WARN_ON_ONCE(err == -EPROTONOSUPPORT); + err = -EINVAL; + } out: #ifdef UVERBS_OPTIMIZE_USING_STACK_SZ if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ) @@ -348,7 +367,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } if (hdr.reserved) { - err = -EOPNOTSUPP; + err = -EPROTONOSUPPORT; goto out; } diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c index 76ddb6564578..48a99dce976c 100644 --- a/drivers/infiniband/core/uverbs_ioctl_merge.c +++ b/drivers/infiniband/core/uverbs_ioctl_merge.c @@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters, short min = SHRT_MAX; const void *elem; int i, j, last_stored = -1; + unsigned int equal_min = 0; for_each_element(elem, i, j, elements, num_elements, num_offset, data_offset) { @@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters, */ iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; last_stored = i; + if (min == GET_ID(id)) + equal_min++; + else + equal_min = 1; min = GET_ID(id); } @@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters, * Therefore, we need to clean the beginning of the array to make sure * all ids of final elements are equal to min. */ - for (i = num_iters - 1; i >= 0 && - GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--) - ; - - num_iters -= i + 1; - memmove(iters, iters + i + 1, sizeof(*iters) * num_iters); + memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min); *min_id = min; - return num_iters; + return equal_min; } #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ @@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me hash = kzalloc(sizeof(*hash) + ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), sizeof(long)) + - BITS_TO_LONGS(attr_max_bucket) * sizeof(long), + BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long), GFP_KERNEL); if (!hash) { res = -ENOMEM; @@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_ * first handler which != NULL. This also defines the * set of flags used for this handler. */ - for (i = num_object_defs - 1; + for (i = num_method_defs - 1; i >= 0 && !method_defs[i]->handler; i--) ; hash->methods[min_id++] = method; diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index dc2aed6fb21b..0f70ff91276e 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -647,12 +647,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command) return -1; } +static bool verify_command_idx(u32 command, bool extended) +{ + if (extended) + return command < ARRAY_SIZE(uverbs_ex_cmd_table); + + return command < ARRAY_SIZE(uverbs_cmd_table); +} + static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; struct ib_device *ib_dev; struct ib_uverbs_cmd_hdr hdr; + bool extended_command; __u32 command; __u32 flags; int srcu_key; @@ -685,6 +694,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, } command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; + flags = (hdr.command & + IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; + + extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED; + if (!verify_command_idx(command, extended_command)) { + ret = -EINVAL; + goto out; + } + if (verify_command_mask(ib_dev, command)) { ret = -EOPNOTSUPP; goto out; @@ -696,12 +714,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, goto out; } - flags = (hdr.command & - IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; - if (!flags) { - if (command >= ARRAY_SIZE(uverbs_cmd_table) || - !uverbs_cmd_table[command]) { + if (!uverbs_cmd_table[command]) { ret = -EINVAL; goto out; } @@ -722,8 +736,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, struct ib_udata uhw; size_t written_count = count; - if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || - !uverbs_ex_cmd_table[command]) { + if (!uverbs_ex_cmd_table[command]) { ret = -ENOSYS; goto out; } diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 0a98579700ec..5f9321eda1b7 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -315,7 +315,7 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; obj->uobject.object = cq; obj->uobject.user_handle = user_handle; atomic_set(&cq->usecnt, 0); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index de57d6c11a25..feb80dbb5948 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1400,7 +1400,8 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); - ib_close_shared_qp_security(qp->qp_sec); + if (qp->qp_sec) + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; @@ -2114,10 +2115,16 @@ static void __ib_drain_sq(struct ib_qp *qp) struct ib_cq *cq = qp->send_cq; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_drain_cqe sdrain; - struct ib_send_wr swr = {}, *bad_swr; + struct ib_send_wr *bad_swr; + struct ib_rdma_wr swr = { + .wr = { + .next = NULL, + { .wr_cqe = &sdrain.cqe, }, + .opcode = IB_WR_RDMA_WRITE, + }, + }; int ret; - swr.wr_cqe = &sdrain.cqe; sdrain.cqe.done = ib_drain_qp_done; init_completion(&sdrain.done); @@ -2127,7 +2134,7 @@ static void __ib_drain_sq(struct ib_qp *qp) return; } - ret = ib_post_send(qp, &swr, &bad_swr); + ret = ib_post_send(qp, &swr.wr, &bad_swr); if (ret) { WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); return; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 0d89621d9fe8..ef9135aa392c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num, ctx->idx = tbl_idx; ctx->refcnt = 1; ctx_tbl[tbl_idx] = ctx; + *context = ctx; return rc; } @@ -1179,7 +1180,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); - goto fail; + goto free_umem; } } @@ -1207,6 +1208,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, return &qp->ib_qp; qp_destroy: bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); +free_umem: + if (udata) { + if (qp->rumem) + ib_umem_release(qp->rumem); + if (qp->sumem) + ib_umem_release(qp->sumem); + } fail: kfree(qp); return ERR_PTR(rc); @@ -1955,10 +1963,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + /* Need unconditional fence for local invalidate + * opcode to work as expected. + */ + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; - if (wr->send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; @@ -1979,8 +1990,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; - if (wr->wr.send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + /* Need unconditional fence for reg_mr + * opcode to function as expected. + */ + + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index e7450ea92aa9..bf811b23bc95 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1240,9 +1240,12 @@ static void bnxt_re_task(struct work_struct *work) switch (re_work->event) { case NETDEV_REGISTER: rc = bnxt_re_ib_reg(rdev); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register with IB: %#x", rc); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } break; case NETDEV_UP: bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, @@ -1398,6 +1401,11 @@ static void __exit bnxt_re_mod_exit(void) list_for_each_entry(rdev, &to_be_deleted, list) { dev_info(rdev_to_dev(rdev), "Unregistering Device"); + /* + * Flush out any scheduled tasks before destroying the + * resources + */ + flush_workqueue(bnxt_re_wq); bnxt_re_dev_stop(rdev); bnxt_re_ib_unreg(rdev, true); bnxt_re_remove_one(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 2bdb1562bd21..8d91733009a4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -457,7 +457,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); - + /* Supply (log-base-2-of-host-page-size - base-page-shift) + * to bono to adjust the doorbell page sizes. + */ + req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - + RCFW_DBR_BASE_PAGE_SHIFT); /* * VFs need not setup the HW context area, PF * shall setup this area for VF. Skipping the diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 85b16da287f9..7c85e3c4445b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -49,6 +49,7 @@ #define RCFW_COMM_SIZE 0x104 #define RCFW_DBR_PCI_BAR_REGION 2 +#define RCFW_DBR_BASE_PAGE_SHIFT 12 #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ do { \ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index e277e54a05eb..9536de8c5fb8 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -130,7 +130,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_pkey = le32_to_cpu(sb->max_pkeys); attr->max_inline_data = le32_to_cpu(sb->max_inline_data); - attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; + attr->l2_db_size = (sb->l2_db_space_size + 1) * + (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = le32_to_cpu(sb->max_gid); strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver)); diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index eeb55b2db57e..480f592e5b4b 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1734,7 +1734,30 @@ struct cmdq_initialize_fw { #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) - __le16 reserved16; + /* This value is (log-base-2-of-DBR-page-size - 12). + * 0 for 4KB. HW supported values are enumerated below. + */ + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ + CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M __le64 qpc_page_dir; __le64 mrw_page_dir; __le64 srq_page_dir; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index be07da1997e6..6b15508ce17e 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -330,7 +330,7 @@ static void advance_oldest_read(struct t4_wq *wq) * Deal with out-of-order and/or completions that complete * prior unsignalled WRs. */ -void c4iw_flush_hw_cq(struct c4iw_cq *chp) +void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp) { struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct c4iw_qp *qhp; @@ -354,6 +354,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) if (qhp == NULL) goto next_cqe; + if (flush_qhp != qhp) { + spin_lock(&qhp->lock); + + if (qhp->wq.flushed == 1) + goto next_cqe; + } + if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) goto next_cqe; @@ -405,11 +412,18 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) next_cqe: t4_hwcq_consume(&chp->cq); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); + if (qhp && flush_qhp != qhp) + spin_unlock(&qhp->lock); } } static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) { + if (DRAIN_CQE(cqe)) { + WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); + return 0; + } + if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) return 0; @@ -504,7 +518,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, /* * Special cqe for drain WR completions... */ - if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + if (DRAIN_CQE(hw_cqe)) { *cookie = CQE_DRAIN_COOKIE(hw_cqe); *cqe = *hw_cqe; goto skip_cqe; @@ -581,10 +595,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ret = -EAGAIN; goto skip_cqe; } - if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { + if (unlikely(!CQE_STATUS(hw_cqe) && + CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { t4_set_wq_in_error(wq); - hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); - goto proc_cqe; + hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); } goto proc_cqe; } @@ -761,9 +775,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; - case C4IW_DRAIN_OPCODE: - wc->opcode = IB_WC_SEND; - break; default: pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index fc886f81b885..99f232e3ea93 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -884,6 +884,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->status_page->db_off = 0; + init_completion(&rdev->rqt_compl); + init_completion(&rdev->pbl_compl); + kref_init(&rdev->rqt_kref); + kref_init(&rdev->pbl_kref); + return 0; err_free_status_page_and_wr_log: if (c4iw_wr_log && rdev->wr_log) @@ -902,13 +907,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) static void c4iw_rdev_close(struct c4iw_rdev *rdev) { - destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + wait_for_completion(&rdev->pbl_compl); + wait_for_completion(&rdev->rqt_compl); c4iw_ocqp_pool_destroy(rdev); + destroy_workqueue(rdev->free_workq); c4iw_destroy_resource(&rdev->resource); } diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 8f963df0bffc..9d25298d96fa 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); - spin_lock_irqsave(&chp->comp_handler_lock, flag); - (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); - spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&chp->cq)) { + spin_lock_irqsave(&chp->comp_handler_lock, flag); + (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + } } void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 819a30635d53..f52779871d04 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -185,6 +185,10 @@ struct c4iw_rdev { struct wr_log_entry *wr_log; int wr_log_size; struct workqueue_struct *free_workq; + struct completion rqt_compl; + struct completion pbl_compl; + struct kref rqt_kref; + struct kref pbl_kref; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -631,8 +635,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } -#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN - static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | @@ -991,7 +993,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); -void c4iw_flush_hw_cq(struct c4iw_cq *chp); +void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index c2fba76becd4..b5784cb145f5 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -720,7 +720,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); - if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) + if (unlikely(mhp->mpl_len == mhp->attr.pbl_size)) return -ENOMEM; mhp->mpl[mhp->mpl_len++] = addr; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index cb7fc0d35d1d..a8a8f65a1e51 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -794,21 +794,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) return 0; } -static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +static int ib_to_fw_opcode(int ib_opcode) +{ + int opcode; + + switch (ib_opcode) { + case IB_WR_SEND_WITH_INV: + opcode = FW_RI_SEND_WITH_INV; + break; + case IB_WR_SEND: + opcode = FW_RI_SEND; + break; + case IB_WR_RDMA_WRITE: + opcode = FW_RI_RDMA_WRITE; + break; + case IB_WR_RDMA_READ: + case IB_WR_RDMA_READ_WITH_INV: + opcode = FW_RI_READ_REQ; + break; + case IB_WR_REG_MR: + opcode = FW_RI_FAST_REGISTER; + break; + case IB_WR_LOCAL_INV: + opcode = FW_RI_LOCAL_INV; + break; + default: + opcode = -EINVAL; + } + return opcode; +} + +static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *schp; unsigned long flag; struct t4_cq *cq; + int opcode; schp = to_c4iw_cq(qhp->ibqp.send_cq); cq = &schp->cq; + opcode = ib_to_fw_opcode(wr->opcode); + if (opcode < 0) + return opcode; + cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(opcode) | CQE_TYPE_V(1) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&schp->lock, flag); @@ -817,10 +853,29 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&schp->lock, flag); - spin_lock_irqsave(&schp->comp_handler_lock, flag); - (*schp->ibcq.comp_handler)(&schp->ibcq, - schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&schp->cq)) { + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + } + return 0; +} + +static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + int ret = 0; + + while (wr) { + ret = complete_sq_drain_wr(qhp, wr); + if (ret) { + *bad_wr = wr; + break; + } + wr = wr->next; + } + return ret; } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) @@ -835,9 +890,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(FW_RI_SEND) | CQE_TYPE_V(0) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&rchp->lock, flag); @@ -846,10 +902,20 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&rchp->lock, flag); - spin_lock_irqsave(&rchp->comp_handler_lock, flag); - (*rchp->ibcq.comp_handler)(&rchp->ibcq, - rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&rchp->cq)) { + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + } +} + +static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + while (wr) { + complete_rq_drain_wr(qhp, wr); + wr = wr->next; + } } int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, @@ -868,9 +934,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); - if (t4_wq_in_error(&qhp->wq)) { + + /* + * If the qp has been flushed, then just insert a special + * drain cqe. + */ + if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_sq_drain_wr(qhp, wr); + err = complete_sq_drain_wrs(qhp, wr, bad_wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); @@ -1012,9 +1083,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, qhp = to_c4iw_qp(ibqp); spin_lock_irqsave(&qhp->lock, flag); - if (t4_wq_in_error(&qhp->wq)) { + + /* + * If the qp has been flushed, then just insert a special + * drain cqe. + */ + if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_rq_drain_wr(qhp, wr); + complete_rq_drain_wrs(qhp, wr); return err; } num_wrs = t4_rq_avail(&qhp->wq); @@ -1257,48 +1333,51 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); - /* locking hierarchy: cq lock first, then qp lock. */ + /* locking hierarchy: cqs lock first, then qp lock. */ spin_lock_irqsave(&rchp->lock, flag); + if (schp != rchp) + spin_lock(&schp->lock); spin_lock(&qhp->lock); if (qhp->wq.flushed) { spin_unlock(&qhp->lock); + if (schp != rchp) + spin_unlock(&schp->lock); spin_unlock_irqrestore(&rchp->lock, flag); return; } qhp->wq.flushed = 1; + t4_set_wq_in_error(&qhp->wq); - c4iw_flush_hw_cq(rchp); + c4iw_flush_hw_cq(rchp, qhp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); - spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&rchp->lock, flag); - /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&schp->lock, flag); - spin_lock(&qhp->lock); if (schp != rchp) - c4iw_flush_hw_cq(schp); + c4iw_flush_hw_cq(schp, qhp); sq_flushed = c4iw_flush_sq(qhp); + spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&schp->lock, flag); + if (schp != rchp) + spin_unlock(&schp->lock); + spin_unlock_irqrestore(&rchp->lock, flag); if (schp == rchp) { - if (t4_clear_cq_armed(&rchp->cq) && - (rq_flushed || sq_flushed)) { + if ((rq_flushed || sq_flushed) && + t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } } else { - if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { + if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); } - if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { + if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { spin_lock_irqsave(&schp->comp_handler_lock, flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); @@ -1315,8 +1394,8 @@ static void flush_qp(struct c4iw_qp *qhp) rchp = to_c4iw_cq(qhp->ibqp.recv_cq); schp = to_c4iw_cq(qhp->ibqp.send_cq); - t4_set_wq_in_error(&qhp->wq); if (qhp->ibqp.uobject) { + t4_set_wq_in_error(&qhp->wq); t4_set_cq_in_error(&rchp->cq); spin_lock_irqsave(&rchp->comp_handler_lock, flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 8ff0cbe5cb16..755a77a9178b 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c @@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; + kref_get(&rdev->pbl_kref); } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } +static void destroy_pblpool(struct kref *kref) +{ + struct c4iw_rdev *rdev; + + rdev = container_of(kref, struct c4iw_rdev, pbl_kref); + gen_pool_destroy(rdev->pbl_pool); + complete(&rdev->pbl_compl); +} + void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("%s addr 0x%x size %d\n", __func__, addr, size); @@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); + kref_put(&rdev->pbl_kref, destroy_pblpool); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) @@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { - gen_pool_destroy(rdev->pbl_pool); + kref_put(&rdev->pbl_kref, destroy_pblpool); } /* @@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; + kref_get(&rdev->rqt_kref); } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } +static void destroy_rqtpool(struct kref *kref) +{ + struct c4iw_rdev *rdev; + + rdev = container_of(kref, struct c4iw_rdev, rqt_kref); + gen_pool_destroy(rdev->rqt_pool); + complete(&rdev->rqt_compl); +} + void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6); @@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); + kref_put(&rdev->rqt_kref, destroy_rqtpool); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) @@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { - gen_pool_destroy(rdev->rqt_pool); + kref_put(&rdev->rqt_kref, destroy_rqtpool); } /* diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index e765c00303cd..80b390e861dc 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -171,7 +171,7 @@ struct t4_cqe { __be32 msn; } rcqe; struct { - u32 stag; + __be32 stag; u16 nada2; u16 cidx; } scqe; @@ -197,6 +197,11 @@ struct t4_cqe { #define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M) #define CQE_SWCQE_V(x) ((x)<> CQE_DRAIN_S)) & CQE_DRAIN_M) +#define CQE_DRAIN_V(x) ((x)<> CQE_STATUS_S)) & CQE_STATUS_M) @@ -213,6 +218,7 @@ struct t4_cqe { #define CQE_OPCODE_V(x) ((x)<header))) +#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header))) #define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) #define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 010c709ba3bb..58c531db4f4a 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr { __u16 wrid; __u8 r1[3]; __u8 len16; - __u32 r2; - __u32 stag; + __be32 r2; + __be32 stag; struct fw_ri_tpte tpte; __u64 pbl[2]; }; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a97055dd4fbd..b5fab55cc275 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix) static int get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { - int ret; cpumask_var_t diff; struct hfi1_affinity_node *entry; struct cpu_mask_set *set = NULL; @@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, extra[0] = '\0'; cpumask_clear(&msix->mask); - ret = zalloc_cpumask_var(&diff, GFP_KERNEL); - if (!ret) - return -ENOMEM; - entry = node_affinity_lookup(dd->node); switch (msix->type) { @@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd, * finds its CPU here. */ if (cpu == -1 && set) { + if (!zalloc_cpumask_var(&diff, GFP_KERNEL)) + return -ENOMEM; + if (cpumask_equal(&set->mask, &set->used)) { /* * We've used up all the CPUs, bump up the generation @@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd, cpumask_andnot(diff, &set->mask, &set->used); cpu = cpumask_first(diff); cpumask_set_cpu(cpu, &set->used); + + free_cpumask_var(diff); } cpumask_set_cpu(cpu, &msix->mask); @@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, hfi1_setup_sdma_notifier(msix); } - free_cpumask_var(diff); return 0; } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 0be42787759f..33cf1734c4e5 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -5945,6 +5945,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, u64 status; u32 sw_index; int i = 0; + unsigned long irq_flags; sw_index = dd->hw_to_sw[hw_context]; if (sw_index >= dd->num_send_contexts) { @@ -5954,10 +5955,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, return; } sci = &dd->send_contexts[sw_index]; + spin_lock_irqsave(&dd->sc_lock, irq_flags); sc = sci->sc; if (!sc) { dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, sw_index, hw_context); + spin_unlock_irqrestore(&dd->sc_lock, irq_flags); return; } @@ -5979,6 +5982,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, */ if (sc->type != SC_USER) queue_work(dd->pport->hfi1_wq, &sc->halt_work); + spin_unlock_irqrestore(&dd->sc_lock, irq_flags); /* * Update the counters for the corresponding status bits. @@ -6825,7 +6829,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) } rcvmask = HFI1_RCVCTRL_CTXT_ENB; /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ - rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? + rcvmask |= rcd->rcvhdrtail_kvaddr ? HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(dd, rcvmask, rcd); hfi1_rcd_put(rcd); @@ -8294,8 +8298,8 @@ static irqreturn_t sdma_interrupt(int irq, void *data) /* handle the interrupt(s) */ sdma_engine_interrupt(sde, status); } else { - dd_dev_err_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", - sde->this_idx); + dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", + sde->this_idx); } return IRQ_HANDLED; } @@ -8337,7 +8341,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) u32 tail; int present; - if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) + if (!rcd->rcvhdrtail_kvaddr) present = (rcd->seq_cnt == rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); else /* is RDMA rtail */ @@ -9952,7 +9956,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) goto unimplemented; case HFI1_IB_CFG_OP_VLS: - val = ppd->vls_operational; + val = ppd->actual_vls_operational; break; case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ val = VL_ARB_HIGH_PRIO_TABLE_SIZE; @@ -11809,7 +11813,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, /* reset the tail and hdr addresses, and sequence count */ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, rcd->rcvhdrq_dma); - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) + if (rcd->rcvhdrtail_kvaddr) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, rcd->rcvhdrqtailaddr_dma); rcd->seq_cnt = 1; @@ -11889,7 +11893,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; - if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma) + if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; if (op & HFI1_RCVCTRL_TAILUPD_DIS) { /* See comment on RcvCtxtCtrl.TailUpd above */ @@ -12967,7 +12971,14 @@ static void disable_intx(struct pci_dev *pdev) pci_intx(pdev, 0); } -static void clean_up_interrupts(struct hfi1_devdata *dd) +/** + * hfi1_clean_up_interrupts() - Free all IRQ resources + * @dd: valid device data data structure + * + * Free the MSI or INTx IRQs and assoicated PCI resources, + * if they have been allocated. + */ +void hfi1_clean_up_interrupts(struct hfi1_devdata *dd) { int i; @@ -13074,7 +13085,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) first_sdma = last_general; last_sdma = first_sdma + dd->num_sdma; first_rx = last_sdma; - last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; + last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts; /* VNIC MSIx interrupts get mapped when VNIC contexts are created */ dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues; @@ -13294,8 +13305,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd) * slow source, SDMACleanupDone) * N interrupts - one per used SDMA engine * M interrupt - one per kernel receive context + * V interrupt - one for each VNIC context */ - total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; + total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts; /* ask for MSI-X interrupts */ request = request_msix(dd, total); @@ -13343,7 +13355,7 @@ static int set_up_interrupts(struct hfi1_devdata *dd) return 0; fail: - clean_up_interrupts(dd); + hfi1_clean_up_interrupts(dd); return ret; } @@ -13356,10 +13368,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd) * in array of contexts * freectxts - number of free user contexts * num_send_contexts - number of PIO send contexts being used + * num_vnic_contexts - number of contexts reserved for VNIC */ static int set_up_context_variables(struct hfi1_devdata *dd) { unsigned long num_kernel_contexts; + u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT; int total_contexts; int ret; unsigned ngroups; @@ -13393,6 +13407,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd) num_kernel_contexts); num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; } + + /* Accommodate VNIC contexts if possible */ + if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) { + dd_dev_err(dd, "No receive contexts available for VNIC\n"); + num_vnic_contexts = 0; + } + total_contexts = num_kernel_contexts + num_vnic_contexts; + /* * User contexts: * - default to 1 user context per real (non-HT) CPU core if @@ -13402,19 +13424,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd) num_user_contexts = cpumask_weight(&node_affinity.real_cpu_mask); - total_contexts = num_kernel_contexts + num_user_contexts; - /* * Adjust the counts given a global max. */ - if (total_contexts > dd->chip_rcv_contexts) { + if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) { dd_dev_err(dd, "Reducing # user receive contexts to: %d, from %d\n", - (int)(dd->chip_rcv_contexts - num_kernel_contexts), + (int)(dd->chip_rcv_contexts - total_contexts), (int)num_user_contexts); - num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts; /* recalculate */ - total_contexts = num_kernel_contexts + num_user_contexts; + num_user_contexts = dd->chip_rcv_contexts - total_contexts; } /* each user context requires an entry in the RMT */ @@ -13427,25 +13446,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd) user_rmt_reduced); /* recalculate */ num_user_contexts = user_rmt_reduced; - total_contexts = num_kernel_contexts + num_user_contexts; } - /* Accommodate VNIC contexts */ - if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts) - total_contexts += HFI1_NUM_VNIC_CTXT; + total_contexts += num_user_contexts; /* the first N are kernel contexts, the rest are user/vnic contexts */ dd->num_rcv_contexts = total_contexts; dd->n_krcv_queues = num_kernel_contexts; dd->first_dyn_alloc_ctxt = num_kernel_contexts; + dd->num_vnic_contexts = num_vnic_contexts; dd->num_user_contexts = num_user_contexts; dd->freectxts = num_user_contexts; dd_dev_info(dd, - "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", + "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n", (int)dd->chip_rcv_contexts, (int)dd->num_rcv_contexts, (int)dd->n_krcv_queues, - (int)dd->num_rcv_contexts - dd->n_krcv_queues); + dd->num_vnic_contexts, + dd->num_user_contexts); /* * Receive array allocation: @@ -14763,7 +14781,6 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd) aspm_exit(dd); free_cntrs(dd); free_rcverr(dd); - clean_up_interrupts(dd); finish_chip_resources(dd); } @@ -15222,7 +15239,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, bail_free_cntrs: free_cntrs(dd); bail_clear_intr: - clean_up_interrupts(dd); + hfi1_clean_up_interrupts(dd); bail_cleanup: hfi1_pcie_ddcleanup(dd); bail_free: diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index 36ae1fd86502..f661b387e916 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -1179,7 +1179,8 @@ DEBUGFS_FILE_OPS(fault_stats); static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd) { - debugfs_remove_recursive(ibd->fault_opcode->dir); + if (ibd->fault_opcode) + debugfs_remove_recursive(ibd->fault_opcode->dir); kfree(ibd->fault_opcode); ibd->fault_opcode = NULL; } @@ -1207,6 +1208,7 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd) &ibd->fault_opcode->attr); if (IS_ERR(ibd->fault_opcode->dir)) { kfree(ibd->fault_opcode); + ibd->fault_opcode = NULL; return -ENOENT; } @@ -1230,7 +1232,8 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd) static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd) { - debugfs_remove_recursive(ibd->fault_packet->dir); + if (ibd->fault_packet) + debugfs_remove_recursive(ibd->fault_packet->dir); kfree(ibd->fault_packet); ibd->fault_packet = NULL; } @@ -1256,6 +1259,7 @@ static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd) &ibd->fault_opcode->attr); if (IS_ERR(ibd->fault_packet->dir)) { kfree(ibd->fault_packet); + ibd->fault_packet = NULL; return -ENOENT; } diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 7372cc00cb2d..72c836b826ca 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -437,31 +437,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, bool do_cnp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct ib_other_headers *ohdr = pkt->ohdr; struct ib_grh *grh = pkt->grh; u32 rqpn = 0, bth1; - u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr); + u16 pkey; + u32 rlid, slid, dlid = 0; u8 hdr_type, sc, svc_type; bool is_mcast = false; + /* can be called from prescan */ if (pkt->etype == RHF_RCV_TYPE_BYPASS) { is_mcast = hfi1_is_16B_mcast(dlid); pkey = hfi1_16B_get_pkey(pkt->hdr); sc = hfi1_16B_get_sc(pkt->hdr); + dlid = hfi1_16B_get_dlid(pkt->hdr); + slid = hfi1_16B_get_slid(pkt->hdr); hdr_type = HFI1_PKT_TYPE_16B; } else { is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); pkey = ib_bth_get_pkey(ohdr); sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); + dlid = ib_get_dlid(pkt->hdr); + slid = ib_get_slid(pkt->hdr); hdr_type = HFI1_PKT_TYPE_9B; } switch (qp->ibqp.qp_type) { + case IB_QPT_UD: + dlid = ppd->lid; + rlid = slid; + rqpn = ib_get_sqpn(pkt->ohdr); + svc_type = IB_CC_SVCTYPE_UD; + break; case IB_QPT_SMI: case IB_QPT_GSI: - case IB_QPT_UD: - rlid = ib_get_slid(pkt->hdr); + rlid = slid; rqpn = ib_get_sqpn(pkt->ohdr); svc_type = IB_CC_SVCTYPE_UD; break; @@ -486,7 +498,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, dlid, rlid, sc, grh); if (!is_mcast && (bth1 & IB_BECN_SMASK)) { - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 lqpn = bth1 & RVT_QPN_MASK; u8 sl = ibp->sc_to_sl[sc]; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..9abc5a9c47a0 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -191,9 +191,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) if (!atomic_inc_not_zero(&dd->user_refcount)) return -ENXIO; - /* Just take a ref now. Not all opens result in a context assign */ - kobject_get(&dd->kobj); - /* The real work is performed later in assign_ctxt() */ fd = kzalloc(sizeof(*fd), GFP_KERNEL); @@ -203,6 +200,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) fd->mm = current->mm; mmgrab(fd->mm); fd->dd = dd; + kobject_get(&fd->dd->kobj); fp->private_data = fd; } else { fp->private_data = NULL; @@ -624,7 +622,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) ret = -EINVAL; goto done; } - if (flags & VM_WRITE) { + if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) { ret = -EPERM; goto done; } @@ -809,8 +807,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) * checks to default and disable the send context. */ if (uctxt->sc) { - set_pio_integrity(uctxt->sc); sc_disable(uctxt->sc); + set_pio_integrity(uctxt->sc); } hfi1_free_ctxt_rcv_groups(uctxt); @@ -881,11 +879,11 @@ static int complete_subctxt(struct hfi1_filedata *fd) } if (ret) { - hfi1_rcd_put(fd->uctxt); - fd->uctxt = NULL; spin_lock_irqsave(&fd->dd->uctxt_lock, flags); __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); + hfi1_rcd_put(fd->uctxt); + fd->uctxt = NULL; } return ret; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 3ac9c307a285..13a7bcaa58e6 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1047,6 +1047,8 @@ struct hfi1_devdata { u64 z_send_schedule; u64 __percpu *send_schedule; + /* number of reserved contexts for VNIC usage */ + u16 num_vnic_contexts; /* number of receive contexts in use by the driver */ u32 num_rcv_contexts; /* number of pio send contexts in use by the driver */ @@ -1127,7 +1129,6 @@ struct hfi1_devdata { u16 pcie_lnkctl; u16 pcie_devctl2; u32 pci_msix0; - u32 pci_lnkctl3; u32 pci_tph2; /* @@ -1522,13 +1523,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd); void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, u32 rqpn, u8 svc_type); void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, - u32 pkey, u32 slid, u32 dlid, u8 sc5, + u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); /* We support only two types - 9B and 16B for now */ @@ -1850,6 +1851,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) #define HFI1_HAS_SDMA_TIMEOUT 0x8 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */ #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */ +#define HFI1_SHUTDOWN 0x100 /* device is shutting down */ /* IB dword length mask in PBC (lower 11 bits); same for all chips */ #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) @@ -1953,6 +1955,7 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd); int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); +void hfi1_clean_up_interrupts(struct hfi1_devdata *dd); void hfi1_pcie_cleanup(struct pci_dev *pdev); int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); void hfi1_pcie_ddcleanup(struct hfi1_devdata *); @@ -2429,7 +2432,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr, ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT); lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) | ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT); - lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT); + lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT); lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4; hdr->lrh[0] = lrh0; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index fba77001c3a7..ee5cbdfeb3ab 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -88,9 +88,9 @@ * pio buffers per ctxt, etc.) Zero means use one user context per CPU. */ int num_user_contexts = -1; -module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); +module_param_named(num_user_contexts, num_user_contexts, int, 0444); MODULE_PARM_DESC( - num_user_contexts, "Set max number of user contexts to use"); + num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); uint krcvqs[RXE_NUM_DATA_VL]; int krcvqsset; @@ -1029,6 +1029,10 @@ static void shutdown_device(struct hfi1_devdata *dd) unsigned pidx; int i; + if (dd->flags & HFI1_SHUTDOWN) + return; + dd->flags |= HFI1_SHUTDOWN; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; @@ -1039,8 +1043,9 @@ static void shutdown_device(struct hfi1_devdata *dd) } dd->flags &= ~HFI1_INITTED; - /* mask interrupts, but not errors */ + /* mask and clean up interrupts, but not errors */ set_intr_state(dd, 0); + hfi1_clean_up_interrupts(dd); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; @@ -1233,6 +1238,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) return ERR_PTR(-ENOMEM); dd->num_pports = nports; dd->pport = (struct hfi1_pportdata *)(dd + 1); + dd->pcidev = pdev; + pci_set_drvdata(pdev, dd); INIT_LIST_HEAD(&dd->list); idr_preload(GFP_KERNEL); @@ -1350,6 +1357,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd) static void remove_one(struct pci_dev *); static int init_one(struct pci_dev *, const struct pci_device_id *); +static void shutdown_one(struct pci_dev *); #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " #define PFX DRIVER_NAME ": " @@ -1366,6 +1374,7 @@ static struct pci_driver hfi1_pci_driver = { .name = DRIVER_NAME, .probe = init_one, .remove = remove_one, + .shutdown = shutdown_one, .id_table = hfi1_pci_tbl, .err_handler = &hfi1_pci_err_handler, }; @@ -1696,6 +1705,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j); if (initfail || ret) { + hfi1_clean_up_interrupts(dd); stop_timers(dd); flush_workqueue(ib_wq); for (pidx = 0; pidx < dd->num_pports; ++pidx) { @@ -1776,6 +1786,13 @@ static void remove_one(struct pci_dev *pdev) postinit_cleanup(dd); } +static void shutdown_one(struct pci_dev *pdev) +{ + struct hfi1_devdata *dd = pci_get_drvdata(pdev); + + shutdown_device(dd); +} + /** * hfi1_create_rcvhdrq - create a receive header queue * @dd: the hfi1_ib device @@ -1791,7 +1808,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) u64 reg; if (!rcd->rcvhdrq) { - dma_addr_t dma_hdrqtail; gfp_t gfp_flags; /* @@ -1817,13 +1833,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) goto bail; } - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { + if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || + HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( - &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail, - gfp_flags); + &dd->pcidev->dev, PAGE_SIZE, + &rcd->rcvhdrqtailaddr_dma, gfp_flags); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; - rcd->rcvhdrqtailaddr_dma = dma_hdrqtail; } rcd->rcvhdrq_size = amt; diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index f4c0ffc040cc..07b80faf1675 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -4293,7 +4293,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp, const struct ib_wc *in_wc) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - u16 slid = ib_lid_cpu16(in_wc->slid); u16 pkey; if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys)) @@ -4320,7 +4319,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp, */ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) return 0; - ingress_pkey_table_fail(ppd, pkey, slid); + /* + * On OPA devices it is okay to lose the upper 16 bits of LID as this + * information is obtained elsewhere. Mask off the upper 16 bits. + */ + ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid)); return 1; } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 09e50fd2a08f..51a5416b1da4 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) resource_size_t addr; int ret = 0; - dd->pcidev = pdev; - pci_set_drvdata(pdev, dd); - addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); @@ -411,15 +408,12 @@ int restore_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, + dd->pci_tph2); + if (ret) + goto error; + } return 0; error: @@ -469,15 +463,12 @@ int save_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - &dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, + &dd->pci_tph2); + if (ret) + goto error; + } return 0; error: diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94c..a95ac6246559 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -50,8 +50,6 @@ #include "qp.h" #include "trace.h" -#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */ - #define SC(name) SEND_CTXT_##name /* * Send Context functions @@ -977,15 +975,40 @@ void sc_disable(struct send_context *sc) } /* return SendEgressCtxtStatus.PacketOccupancy */ -#define packet_occupancy(r) \ - (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\ - >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT) +static u64 packet_occupancy(u64 reg) +{ + return (reg & + SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK) + >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT; +} /* is egress halted on the context? */ -#define egress_halted(r) \ - ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK) +static bool egress_halted(u64 reg) +{ + return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK); +} -/* wait for packet egress, optionally pause for credit return */ +/* is the send context halted? */ +static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) +{ + return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & + SC(STATUS_CTXT_HALTED_SMASK)); +} + +/** + * sc_wait_for_packet_egress + * @sc: valid send context + * @pause: wait for credit return + * + * Wait for packet egress, optionally pause for credit return + * + * Egress halt and Context halt are not necessarily the same thing, so + * check for both. + * + * NOTE: The context halt bit may not be set immediately. Because of this, + * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW + * context bit to determine if the context is halted. + */ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) { struct hfi1_devdata *dd = sc->dd; @@ -997,8 +1020,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) reg_prev = reg; reg = read_csr(dd, sc->hw_context * 8 + SEND_EGRESS_CTXT_STATUS); - /* done if egress is stopped */ - if (egress_halted(reg)) + /* done if any halt bits, SW or HW are set */ + if (sc->flags & SCF_HALTED || + is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) break; reg = packet_occupancy(reg); if (reg == 0) diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index e1cf0c08ca6f..818bac1a4056 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -273,7 +273,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) lockdep_assert_held(&qp->s_lock); ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type; @@ -815,7 +815,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp, struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_16b_header *hdr = &opa_hdr->opah; struct ib_other_headers *ohdr; - u32 bth0, bth1; + u32 bth0, bth1 = 0; u16 len, pkey; u8 becn = !!is_fecn; u8 l4 = OPA_16B_L4_IB_LOCAL; diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b3291f0fde9a..5866ccc0fc21 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -745,6 +745,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp, ohdr->bth[2] = cpu_to_be32(bth2); } +/** + * hfi1_make_ruc_header_16B - build a 16B header + * @qp: the queue pair + * @ohdr: a pointer to the destination header memory + * @bth0: bth0 passed in from the RC/UC builder + * @bth2: bth2 passed in from the RC/UC builder + * @middle: non zero implies indicates ahg "could" be used + * @ps: the current packet state + * + * This routine may disarm ahg under these situations: + * - packet needs a GRH + * - BECN needed + * - migration state not IB_MIG_MIGRATED + */ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2, int middle, @@ -789,6 +803,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, else middle = 0; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; + /* we recently received a FECN, so return a BECN */ + becn = true; + middle = 0; + } if (middle) build_ahg(qp, bth2); else @@ -796,11 +816,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, bth0 |= pkey; bth0 |= extra_bytes << 20; - if (qp->s_flags & RVT_S_ECN) { - qp->s_flags &= ~RVT_S_ECN; - /* we recently received a FECN, so return a BECN */ - becn = 1; - } hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); if (!ppd->lid) @@ -818,6 +833,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, pkey, becn, 0, l4, priv->s_sc); } +/** + * hfi1_make_ruc_header_9B - build a 9B header + * @qp: the queue pair + * @ohdr: a pointer to the destination header memory + * @bth0: bth0 passed in from the RC/UC builder + * @bth2: bth2 passed in from the RC/UC builder + * @middle: non zero implies indicates ahg "could" be used + * @ps: the current packet state + * + * This routine may disarm ahg under these situations: + * - packet needs a GRH + * - BECN needed + * - migration state not IB_MIG_MIGRATED + */ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2, int middle, @@ -853,6 +882,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, else middle = 0; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; + /* we recently received a FECN, so return a BECN */ + bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); + middle = 0; + } if (middle) build_ahg(qp, bth2); else @@ -860,11 +895,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, bth0 |= pkey; bth0 |= extra_bytes << 20; - if (qp->s_flags & RVT_S_ECN) { - qp->s_flags &= ~RVT_S_ECN; - /* we recently received a FECN, so return a BECN */ - bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); - } hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); if (!ppd->lid) diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 6d2702ef34ac..25e867393463 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device, * give a more accurate picture of total contexts available. */ return scnprintf(buf, PAGE_SIZE, "%u\n", - min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt, + min(dd->num_user_contexts, (u32)dd->sc_sizes[SC_USER].count)); } diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 9938bb983ce6..9749ec9dd9f2 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c @@ -154,7 +154,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr, *opcode = ib_bth_get_opcode(ohdr); *tver = ib_bth_get_tver(ohdr); *pkey = ib_bth_get_pkey(ohdr); - *psn = ib_bth_get_psn(ohdr); + *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } @@ -169,7 +169,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr, *pad = ib_bth_get_pad(ohdr); *se = ib_bth_get_se(ohdr); *tver = ib_bth_get_tver(ohdr); - *psn = ib_bth_get_psn(ohdr); + *psn = mask_psn(ib_bth_get_psn(ohdr)); *qpn = ib_bth_get_qpn(ohdr); } diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 0b646173ca22..92e033fbb048 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) int middle = 0; ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 2ba74fdd6f15..37abd150fad3 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2015, 2016 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -479,7 +479,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) u32 lid; ps->s_txreq = get_txreq(ps->dev, qp); - if (IS_ERR(ps->s_txreq)) + if (!ps->s_txreq) goto bail_no_tx; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { @@ -630,7 +630,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey) } void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, - u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, + u32 remote_qpn, u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; @@ -688,7 +688,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, } void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, - u32 pkey, u32 slid, u32 dlid, u8 sc5, + u16 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { u64 pbc, pbc_flags = 0; diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index 873e48ea923f..c4ab2d5b4502 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 - 2017 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) __must_hold(&qp->s_lock) { - struct verbs_txreq *tx = ERR_PTR(-EBUSY); + struct verbs_txreq *tx = NULL; write_seqlock(&dev->txwait_lock); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 76216f2ef35a..22fc5ddf01ca 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, if (unlikely(!tx)) { /* call slow path to get the lock */ tx = __get_txreq(dev, qp); - if (IS_ERR(tx)) + if (!tx) return tx; } tx->qp = qp; diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index f419cbb05928..1a17708be46a 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, struct rdma_netdev *rn; int i, size, rc; + if (!dd->num_vnic_contexts) + return ERR_PTR(-ENOMEM); + if (!port_num || (port_num > dd->num_pports)) return ERR_PTR(-EINVAL); @@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, - dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT); + dd->chip_sdma_engines, dd->num_vnic_contexts); if (!netdev) return ERR_PTR(-ENOMEM); @@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, vinfo = opa_vnic_dev_priv(netdev); vinfo->dd = dd; vinfo->num_tx_q = dd->chip_sdma_engines; - vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; + vinfo->num_rx_q = dd->num_vnic_contexts; vinfo->netdev = netdev; rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->set_id = hfi1_vnic_set_vesw_id; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 747efd1ae5a6..8208c30f03c5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1001,6 +1001,11 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) } } + if (!ne) { + dev_err(dev, "Reseved loop qp is absent!\n"); + goto free_work; + } + do { ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); if (ret < 0) { diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 5230dd3c938c..b7f1ce5333cb 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -125,7 +125,8 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird) * @conn_ird: connection IRD * @conn_ord: connection ORD */ -static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord) +static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird, + u32 conn_ord) { if (conn_ird > I40IW_MAX_IRD_SIZE) conn_ird = I40IW_MAX_IRD_SIZE; @@ -1043,7 +1044,7 @@ static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, * i40iw_schedule_cm_timer * @@cm_node: connection's node * @sqbuf: buffer to send - * @type: if it es send ot close + * @type: if it is send or close * @send_retrans: if rexmits to be done * @close_when_complete: is cm_node to be removed * @@ -1067,7 +1068,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) { - i40iw_free_sqbuf(vsi, (void *)sqbuf); + if (type != I40IW_TIMER_TYPE_CLOSE) + i40iw_free_sqbuf(vsi, (void *)sqbuf); return -ENOMEM; } new_send->retrycount = I40IW_DEFAULT_RETRYS; @@ -1082,7 +1084,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, new_send->timetosend += (HZ / 10); if (cm_node->close_entry) { kfree(new_send); - i40iw_free_sqbuf(vsi, (void *)sqbuf); i40iw_pr_err("already close entry\n"); return -EINVAL; } @@ -3841,7 +3842,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } cm_node->apbvt_set = true; - i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); + i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord); if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && !cm_node->ord_size) cm_node->ord_size = 1; diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 42ca5346777d..472ef4d6e858 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -506,7 +506,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf, - 128, + I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size, I40IW_SD_BUF_ALIGNMENT); if (ret_code) @@ -589,14 +589,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) } /** - * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq - * @cqp: struct for cqp hw - * @wqe_idx: we index of cqp ring + * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index + * @cqp: pointer to CQP structure + * @scratch: private data for CQP WQE + * @wqe_idx: WQE index for next WQE on CQP SQ */ -u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp, + u64 scratch, u32 *wqe_idx) { u64 *wqe = NULL; - u32 wqe_idx; enum i40iw_status_code ret_code; if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { @@ -609,20 +610,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) cqp->sq_ring.size); return NULL; } - I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code); + I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++; if (ret_code) return NULL; - if (!wqe_idx) + if (!*wqe_idx) cqp->polarity = !cqp->polarity; - wqe = cqp->sq_base[wqe_idx].elem; - cqp->scratch_array[wqe_idx] = scratch; + wqe = cqp->sq_base[*wqe_idx].elem; + cqp->scratch_array[*wqe_idx] = scratch; I40IW_CQP_INIT_WQE(wqe); return wqe; } +/** + * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @scratch: private data for CQP WQE + */ +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +{ + u32 wqe_idx; + + return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); +} + /** * i40iw_sc_cqp_destroy - destroy cqp during close * @cqp: struct for cqp hw @@ -3534,8 +3547,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, u64 *wqe; int mem_entries, wqe_entries; struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; + u64 offset; + u32 wqe_idx; - wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); if (!wqe) return I40IW_ERR_RING_FULL; @@ -3548,8 +3563,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); if (mem_entries) { - memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4)); - data = sdbuf->pa; + offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE; + memcpy((char *)sdbuf->va + offset, &info->entry[3], + mem_entries << 4); + data = (u64)sdbuf->pa + offset; } else { data = 0; } @@ -3858,8 +3875,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1; hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted; - hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted; - hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted; + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = + roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted); + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = + roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted); hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt = hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt = diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index 2ebaadbed379..019ad3b939f9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -93,6 +93,7 @@ #define RDMA_OPCODE_MASK 0x0f #define RDMA_READ_REQ_OPCODE 1 #define Q2_BAD_FRAME_OFFSET 72 +#define Q2_FPSN_OFFSET 64 #define CQE_MAJOR_DRV 0x8000 #define I40IW_TERM_SENT 0x01 @@ -1109,7 +1110,7 @@ #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT) #define I40IWQPC_ARPIDX_SHIFT 48 -#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT) +#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT) #define I40IWQPC_FLOWLABEL_SHIFT 0 #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT) @@ -1516,7 +1517,7 @@ enum i40iw_alignment { I40IW_AEQ_ALIGNMENT = 0x100, I40IW_CEQ_ALIGNMENT = 0x100, I40IW_CQ0_ALIGNMENT = 0x100, - I40IW_SD_BUF_ALIGNMENT = 0x100 + I40IW_SD_BUF_ALIGNMENT = 0x80 }; #define I40IW_WQE_SIZE_64 64 @@ -1524,6 +1525,8 @@ enum i40iw_alignment { #define I40IW_QP_WQE_MIN_SIZE 32 #define I40IW_QP_WQE_MAX_SIZE 128 +#define I40IW_UPDATE_SD_BUF_SIZE 128 + #define I40IW_CQE_QTYPE_RQ 0 #define I40IW_CQE_QTYPE_SQ 1 diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index 59f70676f0e0..27a2d782f6d9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -48,7 +48,6 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid); static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx); static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc, bool initial); -static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp); /** * i40iw_puda_get_listbuf - get buffer from puda list * @list: list to use for buffers (ILQ or IEQ) @@ -1376,7 +1375,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; u32 rcv_wnd = hw_host_ctx[23]; /* first partial seq # in q2 */ - u32 fps = qp->q2_buf[16]; + u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); struct list_head *rxlist = &pfpdu->rxlist; struct list_head *plist; @@ -1480,7 +1479,7 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid) * @ieq: ieq resource * @qp: all pending fpdu buffers */ -static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp) +void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp) { struct i40iw_puda_buf *buf; struct i40iw_pfpdu *pfpdu = &qp->pfpdu; diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h index dba05ce7d392..ebe37f157d90 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.h +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h @@ -186,4 +186,5 @@ enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq); void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq); +void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp); #endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 62be0a41ad0b..b7961f21b555 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -428,6 +428,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev, { struct i40iw_pbl *iwpbl = &iwqp->iwpbl; + i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); if (qp_num) i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); @@ -1655,6 +1656,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, err_code = -EOVERFLOW; goto err; } + stag &= ~I40IW_CQPSQ_STAG_KEY_MASK; iwmr->stag = stag; iwmr->ibmr.rkey = stag; iwmr->ibmr.lkey = stag; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index cab796341697..d92f639c287f 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -597,6 +597,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct wc->dlid_path_bits = 0; if (is_eth) { + wc->slid = 0; wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); @@ -845,7 +846,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, } } - wc->slid = be16_to_cpu(cqe->rlid); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; @@ -854,6 +854,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (is_eth) { + wc->slid = 0; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_CVLAN_PRESENT_MASK) { @@ -865,6 +866,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { + wc->slid = be16_to_cpu(cqe->rlid); wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->vlan_id = 0xffff; } diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 0793a21d76f4..d604b3d5aa3e 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) "buf:%lld\n", wc.wr_id); break; default: - BUG_ON(1); break; } } else { diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c636842c5be0..e2beb182d54c 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, gid_tbl[i].version = 2; if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) gid_tbl[i].type = 1; - else - memset(&gid_tbl[i].gid, 0, 12); } } @@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, if (!gids) { ret = -ENOMEM; } else { - for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) - memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { + memcpy(&gids[i].gid, + &port_gid_table->gids[i].gid, + sizeof(union ib_gid)); + gids[i].gid_type = + port_gid_table->gids[i].gid_type; + } } } spin_unlock_bh(&iboe->lock); @@ -2972,9 +2975,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) kfree(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, - ibdev->steer_qpn_count); + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); err_counter: for (i = 0; i < ibdev->num_ports; ++i) mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); @@ -3079,11 +3081,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ibdev->iboe.nb.notifier_call = NULL; } - if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { - mlx4_qp_release_range(dev, ibdev->steer_qpn_base, - ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); - } + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); + kfree(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index e6f77f63da75..1587cedee13e 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -131,6 +131,40 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, return err; } +static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start, + u64 length, u64 virt_addr, + int access_flags) +{ + /* + * Force registering the memory as writable if the underlying pages + * are writable. This is so rereg can change the access permissions + * from readable to writable without having to run through ib_umem_get + * again + */ + if (!ib_access_writable(access_flags)) { + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_sem); + /* + * FIXME: Ideally this would iterate over all the vmas that + * cover the memory, but for now it requires a single vma to + * entirely cover the MR to support RO mappings. + */ + vma = find_vma(current->mm, start); + if (vma && vma->vm_end >= start + length && + vma->vm_start <= start) { + if (vma->vm_flags & VM_WRITE) + access_flags |= IB_ACCESS_LOCAL_WRITE; + } else { + access_flags |= IB_ACCESS_LOCAL_WRITE; + } + + up_read(¤t->mm->mmap_sem); + } + + return ib_umem_get(context, start, length, access_flags, 0); +} + struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) @@ -145,10 +179,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); - /* Force registering the memory as writable. */ - /* Used for memory re-registeration. HCA protects the access */ - mr->umem = ib_umem_get(pd->uobject->context, start, length, - access_flags | IB_ACCESS_LOCAL_WRITE, 0); + mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length, + virt_addr, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err_free; @@ -215,6 +247,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, } if (flags & IB_MR_REREG_ACCESS) { + if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) + return -EPERM; + err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, convert_access(mr_access_flags)); @@ -228,10 +263,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); ib_umem_release(mmr->umem); - mmr->umem = ib_umem_get(mr->uobject->context, start, length, - mr_access_flags | - IB_ACCESS_LOCAL_WRITE, - 0); + mmr->umem = + mlx4_get_umem_mr(mr->uobject->context, start, length, + virt_addr, mr_access_flags); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ @@ -406,7 +440,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, goto err_free_mr; mr->max_pages = max_num_sg; - err = mlx4_mr_enable(dev->dev, &mr->mmr); if (err) goto err_free_pl; @@ -417,6 +450,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, return &mr->ibmr; err_free_pl: + mr->ibmr.device = pd->device; mlx4_free_priv_pages(mr); err_free_mr: (void) mlx4_mr_free(dev->dev, &mr->mmr); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b6b33d99b0b4..9354fec8efe7 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } + if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP)) { + pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", + ucmd->rx_hash_fields_mask); + return (-EOPNOTSUPP); + } + if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { rss_ctx->flags = MLX4_RSS_IPV4; @@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (rss_ctx->flags & MLX4_RSS_IPV4) { + if (rss_ctx->flags & MLX4_RSS_IPV4) rss_ctx->flags |= MLX4_RSS_UDP_IPV4; - } else if (rss_ctx->flags & MLX4_RSS_IPV6) { + if (rss_ctx->flags & MLX4_RSS_IPV6) rss_ctx->flags |= MLX4_RSS_UDP_IPV6; - } else { + if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP); } @@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { - if (rss_ctx->flags & MLX4_RSS_IPV4) { + if (rss_ctx->flags & MLX4_RSS_IPV4) rss_ctx->flags |= MLX4_RSS_TCP_IPV4; - } else if (rss_ctx->flags & MLX4_RSS_IPV6) { + if (rss_ctx->flags & MLX4_RSS_IPV6) rss_ctx->flags |= MLX4_RSS_TCP_IPV6; - } else { + if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP); } - } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); @@ -2182,11 +2194,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); - if (rwq_ind_tbl) { - fill_qp_rss_context(context, qp); - context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); - } - if (!(attr_mask & IB_QP_PATH_MIG_STATE)) context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); else { @@ -2216,7 +2223,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->mtu_msgmax = (IB_MTU_4096 << 5) | ilog2(dev->dev->caps.max_gso_sz); else - context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; + context->mtu_msgmax = (IB_MTU_4096 << 5) | 13; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { pr_err("path MTU (%u) is invalid\n", @@ -2387,6 +2394,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, context->pd = cpu_to_be32(pd->pdn); if (!rwq_ind_tbl) { + context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); get_cqs(qp, src_type, &send_cq, &recv_cq); } else { /* Set dummy CQs to be compatible with HV and PRM */ send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq); @@ -2394,7 +2402,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, } context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); - context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); /* Set "fast registration enabled" for all kernel QPs */ if (!ibuobject) @@ -2513,7 +2520,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, MLX4_IB_LINK_TYPE_ETH; if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { /* set QP to receive both tunneled & non-tunneled packets */ - if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) + if (!rwq_ind_tbl) context->srqn = cpu_to_be32(7 << 28); } } @@ -2562,6 +2569,13 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, } } + if (rwq_ind_tbl && + cur_state == IB_QPS_RESET && + new_state == IB_QPS_INIT) { + fill_qp_rss_context(context, qp); + context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); + } + err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), to_mlx4_state(new_state), context, optpar, sqd_event, &qp->mqp); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 2aa53f427685..be6612fc33ac 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -224,7 +224,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -239,10 +238,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, } if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; } + wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { @@ -645,7 +646,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, } static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, - struct ib_wc *wc) + struct ib_wc *wc, bool is_fatal_err) { struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); struct mlx5_ib_wc *soft_wc, *next; @@ -658,6 +659,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", cq->mcq.cqn); + if (unlikely(is_fatal_err)) { + soft_wc->wc.status = IB_WC_WR_FLUSH_ERR; + soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; + } wc[npolled++] = soft_wc->wc; list_del(&soft_wc->list); kfree(soft_wc); @@ -678,12 +683,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_lock_irqsave(&cq->lock, flags); if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); + /* make sure no soft wqe's are waiting */ + if (unlikely(!list_empty(&cq->wc_list))) + soft_polled = poll_soft_wc(cq, num_entries, wc, true); + + mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled, + wc + soft_polled, &npolled); goto out; } if (unlikely(!list_empty(&cq->wc_list))) - soft_polled = poll_soft_wc(cq, num_entries, wc); + soft_polled = poll_soft_wc(cq, num_entries, wc, false); for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) @@ -1154,7 +1164,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 552f7bd4ecc3..ab70194a73db 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -270,6 +270,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, if (err) return err; + props->active_width = IB_WIDTH_4X; + props->active_speed = IB_SPEED_QDR; + translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width); @@ -1276,7 +1279,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn) return err; if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || - !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return err; mutex_lock(&dev->lb_mutex); @@ -1294,7 +1298,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) mlx5_core_dealloc_transport_domain(dev->mdev, tdn); if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || - !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) + (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && + !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) return; mutex_lock(&dev->lb_mutex); @@ -1415,6 +1420,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, } INIT_LIST_HEAD(&context->vma_private_list); + mutex_init(&context->vma_private_list_mutex); INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1576,7 +1582,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area) * mlx5_ib_disassociate_ucontext(). */ mlx5_ib_vma_priv_data->vma = NULL; + mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex); list_del(&mlx5_ib_vma_priv_data->list); + mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex); kfree(mlx5_ib_vma_priv_data); } @@ -1596,10 +1604,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, return -ENOMEM; vma_prv->vma = vma; + vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex; vma->vm_private_data = vma_prv; vma->vm_ops = &mlx5_ib_vm_ops; + mutex_lock(&ctx->vma_private_list_mutex); list_add(&vma_prv->list, vma_head); + mutex_unlock(&ctx->vma_private_list_mutex); return 0; } @@ -1642,6 +1653,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * mlx5_ib_vma_close. */ down_write(&owning_mm->mmap_sem); + mutex_lock(&context->vma_private_list_mutex); list_for_each_entry_safe(vma_private, n, &context->vma_private_list, list) { vma = vma_private->vma; @@ -1656,6 +1668,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) list_del(&vma_private->list); kfree(vma_private); } + mutex_unlock(&context->vma_private_list_mutex); up_write(&owning_mm->mmap_sem); mmput(owning_mm); put_task_struct(owning_process); @@ -3097,6 +3110,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev) qp->real_qp = qp; qp->uobject = NULL; qp->qp_type = MLX5_IB_QPT_REG_UMR; + qp->send_cq = init_attr->send_cq; + qp->recv_cq = init_attr->recv_cq; attr->qp_state = IB_QPS_INIT; attr->port_num = 1; @@ -4151,7 +4166,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) } if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && - MLX5_CAP_GEN(mdev, disable_local_lb)) + (MLX5_CAP_GEN(mdev, disable_local_lb_uc) || + MLX5_CAP_GEN(mdev, disable_local_lb_mc))) mutex_init(&dev->lb_mutex); dev->ib_active = true; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 189e80cd6b2f..754103372faa 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -115,6 +115,8 @@ enum { struct mlx5_ib_vma_private_data { struct list_head list; struct vm_area_struct *vma; + /* protect vma_private_list add/del */ + struct mutex *vma_private_list_mutex; }; struct mlx5_ib_ucontext { @@ -129,6 +131,8 @@ struct mlx5_ib_ucontext { /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + /* protect vma_private_list add/del */ + struct mutex vma_private_list_mutex; unsigned long upd_xlt_page; /* protect ODP/KSM */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 37bbc543847a..9866c5d1b99f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -833,24 +833,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int *order) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_umem *u; int err; - *umem = ib_umem_get(pd->uobject->context, start, length, - access_flags, 0); - err = PTR_ERR_OR_ZERO(*umem); - if (err < 0) { - mlx5_ib_err(dev, "umem get failed (%d)\n", err); + *umem = NULL; + + u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0); + err = PTR_ERR_OR_ZERO(u); + if (err) { + mlx5_ib_dbg(dev, "umem get failed (%d)\n", err); return err; } - mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, + mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, page_shift, ncont, order); if (!*npages) { mlx5_ib_warn(dev, "avoid zero region\n"); - ib_umem_release(*umem); + ib_umem_release(u); return -EINVAL; } + *umem = u; + mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", *npages, *ncont, *order, *page_shift); @@ -1206,6 +1210,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int err; bool use_umr = true; + if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM)) + return ERR_PTR(-EINVAL); + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", start, virt_addr, length, access_flags); @@ -1216,6 +1223,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ERR_PTR(-EINVAL); mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags); + if (IS_ERR(mr)) + return ERR_CAST(mr); return &mr->ibmr; } #endif @@ -1334,13 +1343,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, int access_flags = flags & IB_MR_REREG_ACCESS ? new_access_flags : mr->access_flags; - u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; - u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; int page_shift = 0; int upd_flags = 0; int npages = 0; int ncont = 0; int order = 0; + u64 addr, len; int err; mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", @@ -1348,6 +1356,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + if (!mr->umem) + return -EINVAL; + + if (flags & IB_MR_REREG_TRANS) { + addr = virt_addr; + len = length; + } else { + addr = mr->umem->address; + len = mr->umem->length; + } + if (flags != IB_MR_REREG_PD) { /* * Replace umem. This needs to be done whether or not UMR is @@ -1355,6 +1374,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, */ flags |= IB_MR_REREG_TRANS; ib_umem_release(mr->umem); + mr->umem = NULL; err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, &npages, &page_shift, &ncont, &order); if (err < 0) { @@ -1412,6 +1432,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (err) { mlx5_ib_warn(dev, "Failed to rereg UMR\n"); ib_umem_release(mr->umem); + mr->umem = NULL; clean_mr(dev, mr); return err; } @@ -1495,14 +1516,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) u32 key = mr->mmkey.key; err = destroy_mkey(dev, mr); - kfree(mr); if (err) { mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", key, err); return err; } - } else { - mlx5_mr_cache_free(dev, mr); } return 0; @@ -1545,6 +1563,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) atomic_sub(npages, &dev->mdev->priv.reg_pages); } + if (!mr->allocated_from_cache) + kfree(mr); + else + mlx5_mr_cache_free(dev, mr); + return 0; } @@ -1637,6 +1660,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, MLX5_SET(mkc, mkc, access_mode, mr->access_mode); MLX5_SET(mkc, mkc, umr_en, 1); + mr->ibmr.device = pd->device; err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) goto err_destroy_psv; @@ -1812,7 +1836,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@ -1824,6 +1847,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, sg_offset = 0; } + mr->ndescs = i; if (sg_offset_p) *sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index acb79d3a4f1d..ef9ee6c328a1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -256,7 +256,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, } else { if (ucmd) { qp->rq.wqe_cnt = ucmd->rq_wqe_count; + if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) + return -EINVAL; qp->rq.wqe_shift = ucmd->rq_wqe_shift; + if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig) + return -EINVAL; qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; qp->rq.max_post = qp->rq.wqe_cnt; } else { @@ -1130,7 +1134,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ib_umem_release(sq->ubuffer.umem); } -static int get_rq_pas_size(void *qpc) +static size_t get_rq_pas_size(void *qpc) { u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); @@ -1146,7 +1150,8 @@ static int get_rq_pas_size(void *qpc) } static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, - struct mlx5_ib_rq *rq, void *qpin) + struct mlx5_ib_rq *rq, void *qpin, + size_t qpinlen) { struct mlx5_ib_qp *mqp = rq->base.container_mibqp; __be64 *pas; @@ -1155,9 +1160,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, void *rqc; void *wq; void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); - int inlen; + size_t rq_pas_size = get_rq_pas_size(qpc); + size_t inlen; int err; - u32 rq_pas_size = get_rq_pas_size(qpc); + + if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) + return -EINVAL; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; in = kvzalloc(inlen, GFP_KERNEL); @@ -1236,7 +1244,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, } static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, - u32 *in, + u32 *in, size_t inlen, struct ib_pd *pd) { struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -1266,7 +1274,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING) rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; - err = create_raw_packet_qp_rq(dev, rq, in); + err = create_raw_packet_qp_rq(dev, rq, in, inlen); if (err) goto err_destroy_sq; @@ -1781,11 +1789,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qp->flags |= MLX5_IB_QP_LSO; } + if (inlen < 0) { + err = -EINVAL; + goto err; + } + if (init_attr->qp_type == IB_QPT_RAW_PACKET || qp->flags & MLX5_IB_QP_UNDERLAY) { qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); - err = create_raw_packet_qp(dev, qp, in, pd); + err = create_raw_packet_qp(dev, qp, in, inlen, pd); } else { err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); } @@ -1825,6 +1838,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, else if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); +err: kvfree(in); return err; } @@ -2185,18 +2199,18 @@ enum { static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { - if (rate == IB_RATE_PORT_CURRENT) { + if (rate == IB_RATE_PORT_CURRENT) return 0; - } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { + + if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) return -EINVAL; - } else { - while (rate != IB_RATE_2_5_GBPS && - !(1 << (rate + MLX5_STAT_RATE_OFFSET) & - MLX5_CAP_GEN(dev->mdev, stat_rate_support))) - --rate; - } - return rate + MLX5_STAT_RATE_OFFSET; + while (rate != IB_RATE_PORT_CURRENT && + !(1 << (rate + MLX5_STAT_RATE_OFFSET) & + MLX5_CAP_GEN(dev->mdev, stat_rate_support))) + --rate; + + return rate ? rate + MLX5_STAT_RATE_OFFSET : rate; } static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, @@ -2867,8 +2881,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || - !optab[mlx5_cur][mlx5_new]) + !optab[mlx5_cur][mlx5_new]) { + err = -EINVAL; goto out; + } op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); @@ -2913,7 +2929,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ - if (new_state == IB_QPS_RESET && !ibqp->uobject) { + if (new_state == IB_QPS_RESET && + !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) { mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, ibqp->srq ? to_msrq(ibqp->srq) : NULL); if (send_cq != recv_cq) @@ -4303,12 +4320,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, memset(ah_attr, 0, sizeof(*ah_attr)); - ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); - rdma_ah_set_port_num(ah_attr, path->port); - if (rdma_ah_get_port_num(ah_attr) == 0 || - rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports)) + if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports)) return; + ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); + rdma_ah_set_port_num(ah_attr, path->port); rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); @@ -4627,13 +4643,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) int err; err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); - if (err) { + if (err) mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); - return err; - } kfree(xrcd); - return 0; } diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 6d5fadad9090..3c7522d025f2 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_srq *srq; - int desc_size; - int buf_size; + size_t desc_size; + size_t buf_size; int err; struct mlx5_srq_attr in = {0}; __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); @@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); + if (desc_size == 0 || srq->msrq.max_gs > desc_size) + return ERR_PTR(-EINVAL); desc_size = roundup_pow_of_two(desc_size); - desc_size = max_t(int, 32, desc_size); + desc_size = max_t(size_t, 32, desc_size); + if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) + return ERR_PTR(-EINVAL); srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / sizeof(struct mlx5_wqe_data_seg); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; - mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", - desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, - srq->msrq.max_avail_gather); + if (buf_size < desc_size) + return ERR_PTR(-EINVAL); in.type = init_attr->srq_type; if (pd->uobject) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 66056f9a9700..48a49f8a5014 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -834,7 +834,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) dev->reset_stats.type = OCRDMA_RESET_STATS; dev->reset_stats.dev = dev; - if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + if (!debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats, &ocrdma_dbg_ops)) goto err; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 97d033f51dc9..ddb05b42e5e6 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -782,7 +782,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); if (!dev->num_cnq) { - DP_ERR(dev, "not enough CNQ resources.\n"); + DP_ERR(dev, "Failed. At least one CNQ is required.\n"); + rc = -ENOMEM; goto init_err; } diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 769ac07c3c8e..7f4cc9336442 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1663,14 +1663,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) static int qedr_update_qp_state(struct qedr_dev *dev, struct qedr_qp *qp, + enum qed_roce_qp_state cur_state, enum qed_roce_qp_state new_state) { int status = 0; - if (new_state == qp->state) + if (new_state == cur_state) return 0; - switch (qp->state) { + switch (cur_state) { case QED_ROCE_QP_STATE_RESET: switch (new_state) { case QED_ROCE_QP_STATE_INIT: @@ -1774,6 +1775,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); enum ib_qp_state old_qp_state, new_qp_state; + enum qed_roce_qp_state cur_state; int rc = 0; DP_DEBUG(dev, QEDR_MSG_QP, @@ -1903,18 +1905,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); - qp_params.ack_timeout = attr->timeout; - if (attr->timeout) { - u32 temp; - - temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; - /* FW requires [msec] */ - qp_params.ack_timeout = temp; - } else { - /* Infinite */ + /* The received timeout value is an exponent used like this: + * "12.7.34 LOCAL ACK TIMEOUT + * Value representing the transport (ACK) timeout for use by + * the remote, expressed as: 4.096 * 2^timeout [usec]" + * The FW expects timeout in msec so we need to divide the usec + * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2, + * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8). + * The value of zero means infinite so we use a 'max_t' to make + * sure that sub 1 msec values will be configured as 1 msec. + */ + if (attr->timeout) + qp_params.ack_timeout = + 1 << max_t(int, attr->timeout - 8, 0); + else qp_params.ack_timeout = 0; - } } + if (attr_mask & IB_QP_RETRY_CNT) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); @@ -1987,13 +1994,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->dest_qp_num = attr->dest_qp_num; } + cur_state = qp->state; + + /* Update the QP state before the actual ramrod to prevent a race with + * fast path. Modifying the QP state to error will cause the device to + * flush the CQEs and while polling the flushed CQEs will considered as + * a potential issue if the QP isn't in error state. + */ + if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI && + !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR) + qp->state = QED_ROCE_QP_STATE_ERR; + if (qp->qp_type != IB_QPT_GSI) rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); if (attr_mask & IB_QP_STATE) { if ((qp->qp_type != IB_QPT_GSI) && (!udata)) - rc = qedr_update_qp_state(dev, qp, qp_params.new_state); + rc = qedr_update_qp_state(dev, qp, cur_state, + qp_params.new_state); qp->state = qp_params.new_state; } @@ -2832,6 +2851,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; @@ -2873,6 +2897,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_RDMA_WRITE_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; @@ -3518,7 +3547,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); - union rdma_cqe *cqe = cq->latest_cqe; + union rdma_cqe *cqe; u32 old_cons, new_cons; unsigned long flags; int update = 0; @@ -3535,6 +3564,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return qedr_gsi_poll_cq(ibcq, num_entries, wc); spin_lock_irqsave(&cq->cq_lock, flags); + cqe = cq->latest_cqe; old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { struct qedr_qp *qp; diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index f9e1c69603a5..1dda4a2623c9 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1250,6 +1250,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port) #define QIB_BADINTR 0x8000 /* severe interrupt problems */ #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */ #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */ +#define QIB_SHUTDOWN 0x40000 /* device is shutting down */ /* * values for ppd->lflags (_ib_port_ related flags) @@ -1448,8 +1449,7 @@ u64 qib_sps_ints(void); /* * dma_addr wrappers - all 0's invalid for hw */ -dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long, - size_t, int); +int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr); const char *qib_get_unit_name(int unit); const char *qib_get_card_name(struct rvt_dev_info *rdi); struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9396c1807cc3..40efc9151ec4 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, goto done; } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { + dma_addr_t daddr; + for (; ntids--; tid++) { if (tid == tidcnt) tid = 0; @@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, ret = -ENOMEM; break; } + ret = qib_map_page(dd->pcidev, pagep[i], &daddr); + if (ret) + break; + tidlist[i] = tid + tidoff; /* we "know" system pages and TID pages are same size */ dd->pageshadow[ctxttid + tid] = pagep[i]; - dd->physshadow[ctxttid + tid] = - qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + dd->physshadow[ctxttid + tid] = daddr; /* * don't need atomic or it's overhead */ diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 14cadf6d6214..a45e46098914 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -150,7 +150,7 @@ static struct kparam_string kp_txselect = { .string = txselect_list, .maxlen = MAX_ATTEN_LEN }; -static int setup_txselect(const char *, struct kernel_param *); +static int setup_txselect(const char *, const struct kernel_param *); module_param_call(txselect, setup_txselect, param_get_string, &kp_txselect, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(txselect, @@ -6169,7 +6169,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) } /* handle the txselect parameter changing */ -static int setup_txselect(const char *str, struct kernel_param *kp) +static int setup_txselect(const char *str, const struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index c5a4c65636d6..7ba7d2122f3b 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -850,6 +850,10 @@ static void qib_shutdown_device(struct qib_devdata *dd) struct qib_pportdata *ppd; unsigned pidx; + if (dd->flags & QIB_SHUTDOWN) + return; + dd->flags |= QIB_SHUTDOWN; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; @@ -1189,6 +1193,7 @@ void qib_disable_after_error(struct qib_devdata *dd) static void qib_remove_one(struct pci_dev *); static int qib_init_one(struct pci_dev *, const struct pci_device_id *); +static void qib_shutdown_one(struct pci_dev *); #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " @@ -1206,6 +1211,7 @@ static struct pci_driver qib_driver = { .name = QIB_DRV_NAME, .probe = qib_init_one, .remove = qib_remove_one, + .shutdown = qib_shutdown_one, .id_table = qib_pci_tbl, .err_handler = &qib_pci_err_handler, }; @@ -1556,6 +1562,13 @@ static void qib_remove_one(struct pci_dev *pdev) qib_postinit_cleanup(dd); } +static void qib_shutdown_one(struct pci_dev *pdev) +{ + struct qib_devdata *dd = pci_get_drvdata(pdev); + + qib_shutdown_device(dd); +} + /** * qib_create_rcvhdrq - create a receive header queue * @dd: the qlogic_ib device diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e9a91736b12d..d80b61a71eb8 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -434,13 +434,13 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) qp->s_state = OP(COMPARE_SWAP); put_ib_ateth_swap(wqe->atomic_wr.swap, &ohdr->u.atomic_eth); - put_ib_ateth_swap(wqe->atomic_wr.compare_add, - &ohdr->u.atomic_eth); + put_ib_ateth_compare(wqe->atomic_wr.compare_add, + &ohdr->u.atomic_eth); } else { qp->s_state = OP(FETCH_ADD); put_ib_ateth_swap(wqe->atomic_wr.compare_add, &ohdr->u.atomic_eth); - put_ib_ateth_swap(0, &ohdr->u.atomic_eth); + put_ib_ateth_compare(0, &ohdr->u.atomic_eth); } put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr, &ohdr->u.atomic_eth); diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index ce83ba9a12ef..16543d5e80c3 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -99,23 +99,27 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, * * I'm sure we won't be so lucky with other iommu's, so FIXME. */ -dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) +int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr) { dma_addr_t phys; - phys = pci_map_page(hwdev, page, offset, size, direction); + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(hwdev, phys)) + return -ENOMEM; - if (phys == 0) { - pci_unmap_page(hwdev, phys, size, direction); - phys = pci_map_page(hwdev, page, offset, size, direction); + if (!phys) { + pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE); + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(hwdev, phys)) + return -ENOMEM; /* * FIXME: If we get 0 again, we should keep this page, * map another, then free the 0 page. */ } - - return phys; + *daddr = phys; + return 0; } /** diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 3562c0c30492..6286b95d77ed 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_cq *cmd = &req.create_cq; struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; + struct pvrdma_create_cq_resp cq_resp = {0}; struct pvrdma_create_cq ucmd; BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); @@ -198,6 +199,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, cq->ibcq.cqe = resp->cqe; cq->cq_handle = resp->cq_handle; + cq_resp.cqn = resp->cq_handle; spin_lock_irqsave(&dev->cq_tbl_lock, flags); dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); @@ -206,7 +208,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, cq->uar = &(to_vucontext(context)->uar); /* Copy udata back. */ - if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); pvrdma_destroy_cq(&cq->ibcq); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index ed34d5a581fa..d7162f2b7979 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -406,6 +406,13 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) atomic_dec(&qp->refcnt); wait_event(qp->wait, !atomic_read(&qp->refcnt)); + if (!qp->is_kernel) { + if (qp->rumem) + ib_umem_release(qp->rumem); + if (qp->sumem) + ib_umem_release(qp->sumem); + } + pvrdma_page_dir_cleanup(dev, &qp->pdir); kfree(qp); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 48776f5ffb0e..aa533f08e017 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -444,6 +444,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, union pvrdma_cmd_resp rsp; struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; + struct pvrdma_alloc_pd_resp pd_resp = {0}; int ret; void *ptr; @@ -472,9 +473,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, pd->privileged = !context; pd->pd_handle = resp->pd_handle; pd->pdn = resp->pd_handle; + pd_resp.pdn = resp->pd_handle; if (context) { - if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { dev_warn(&dev->pdev->dev, "failed to copy back protection domain\n"); pvrdma_dealloc_pd(&pd->ibpd); diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 97d71e49c092..76a86f805233 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -121,17 +121,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && (solicited || entry->status != IB_WC_SUCCESS))) { + struct kthread_worker *worker; + /* * This will cause send_complete() to be called in * another thread. */ - spin_lock(&cq->rdi->n_cqs_lock); - if (likely(cq->rdi->worker)) { + rcu_read_lock(); + worker = rcu_dereference(cq->rdi->worker); + if (likely(worker)) { cq->notify = RVT_CQ_NONE; cq->triggered++; - kthread_queue_work(cq->rdi->worker, &cq->comptask); + kthread_queue_work(worker, &cq->comptask); } - spin_unlock(&cq->rdi->n_cqs_lock); + rcu_read_unlock(); } spin_unlock_irqrestore(&cq->lock, flags); @@ -198,7 +201,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, return ERR_PTR(-EINVAL); /* Allocate the completion queue structure. */ - cq = kzalloc(sizeof(*cq), GFP_KERNEL); + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node); if (!cq) return ERR_PTR(-ENOMEM); @@ -214,7 +217,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, sz += sizeof(struct ib_uverbs_wc) * (entries + 1); else sz += sizeof(struct ib_wc) * (entries + 1); - wc = vmalloc_user(sz); + wc = udata ? + vmalloc_user(sz) : + vzalloc_node(sz, rdi->dparms.node); if (!wc) { ret = ERR_PTR(-ENOMEM); goto bail_cq; @@ -369,7 +374,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); else sz += sizeof(struct ib_wc) * (cqe + 1); - wc = vmalloc_user(sz); + wc = udata ? + vmalloc_user(sz) : + vzalloc_node(sz, rdi->dparms.node); if (!wc) return -ENOMEM; @@ -509,7 +516,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) int cpu; struct kthread_worker *worker; - if (rdi->worker) + if (rcu_access_pointer(rdi->worker)) return 0; spin_lock_init(&rdi->n_cqs_lock); @@ -521,7 +528,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) return PTR_ERR(worker); set_user_nice(worker->task, MIN_NICE); - rdi->worker = worker; + RCU_INIT_POINTER(rdi->worker, worker); return 0; } @@ -533,15 +540,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi) { struct kthread_worker *worker; - /* block future queuing from send_complete() */ - spin_lock_irq(&rdi->n_cqs_lock); - worker = rdi->worker; + if (!rcu_access_pointer(rdi->worker)) + return; + + spin_lock(&rdi->n_cqs_lock); + worker = rcu_dereference_protected(rdi->worker, + lockdep_is_held(&rdi->n_cqs_lock)); if (!worker) { - spin_unlock_irq(&rdi->n_cqs_lock); + spin_unlock(&rdi->n_cqs_lock); return; } - rdi->worker = NULL; - spin_unlock_irq(&rdi->n_cqs_lock); + RCU_INIT_POINTER(rdi->worker, NULL); + spin_unlock(&rdi->n_cqs_lock); + synchronize_rcu(); kthread_destroy_worker(worker); } diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 42713511b53b..524e6134642e 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t) unsigned long timeout; struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); - if (percpu_ref_is_zero(&mr->refcount)) - return 0; - /* avoid dma mr */ - if (mr->lkey) + if (mr->lkey) { + /* avoid dma mr */ rvt_dereg_clean_qps(mr); + /* @mr was indexed on rcu protected @lkey_table */ + synchronize_rcu(); + } + timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); if (!timeout) { rvt_pr_err(rdi, diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 77b3ed0df936..7f945f65d8cd 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, void rxe_release(struct kref *kref); -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); int rxe_completer(void *arg); int rxe_requester(void *arg); int rxe_responder(void *arg); diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 61927c165b59..4cf11063e0b5 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK, + | RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index c1b5f38f31a5..3b4916680018 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -404,6 +404,8 @@ void *rxe_alloc(struct rxe_pool *pool) elem = kmem_cache_zalloc(pool_cache(pool), (pool->flags & RXE_POOL_ATOMIC) ? GFP_ATOMIC : GFP_KERNEL); + if (!elem) + return NULL; elem->pool = pool; kref_init(&elem->ref_cnt); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 00bda9380a2e..aeea994b04c4 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp) } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +static void rxe_qp_do_cleanup(struct work_struct *work) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); rxe_drop_all_mcast_groups(qp); @@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg) kernel_sock_shutdown(qp->sk, SHUT_RDWR); sock_release(qp->sk); } + +/* called when the last reference to the qp is dropped */ +void rxe_qp_cleanup(struct rxe_pool_entry *arg) +{ + struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + + execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); +} diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index d84222f9d5d2..54cc9cb1e3b7 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -594,15 +594,8 @@ int rxe_requester(void *arg) rxe_add_ref(qp); next_wqe: - if (unlikely(!qp->valid)) { - rxe_drain_req_pkts(qp, true); + if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) goto exit; - } - - if (unlikely(qp->req.state == QP_STATE_ERROR)) { - rxe_drain_req_pkts(qp, true); - goto exit; - } if (unlikely(qp->req.state == QP_STATE_RESET)) { qp->req.wqe_index = consumer_index(qp->sq.queue); @@ -735,7 +728,6 @@ int rxe_requester(void *arg) rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (ret == -EAGAIN) { - kfree_skb(skb); rxe_run_task(&qp->req.task, 1); goto exit; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 4240866a5331..bd43c1c7a42f 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp, err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); if (err) { pr_err("Failed sending RDMA reply.\n"); - kfree_skb(skb); return RESPST_ERR_RNR; } @@ -955,10 +954,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, } err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); - if (err) { + if (err) pr_err_ratelimited("Failed sending ack\n"); - kfree_skb(skb); - } err1: return err; @@ -1151,7 +1148,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (rc) { pr_err("Failed resending result. This flow is not handled - skb ignored\n"); rxe_drop_ref(qp); - kfree_skb(skb_copy); rc = RESPST_CLEANUP; goto out; } @@ -1210,7 +1206,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp) } } -void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) +static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) { struct sk_buff *skb; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 0b362f49a10a..1cbf4e407afa 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -712,9 +712,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, memcpy(wqe->dma.sge, ibwr->sg_list, num_sge * sizeof(struct ib_sge)); - wqe->iova = (mask & WR_ATOMIC_MASK) ? - atomic_wr(ibwr)->remote_addr : - rdma_wr(ibwr)->remote_addr; + wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr : + mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0; wqe->mask = mask; wqe->dma.length = length; wqe->dma.resid = length; @@ -813,6 +812,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr, (queue_count(qp->sq.queue) > 1); rxe_run_task(&qp->req.task, must_sched); + if (unlikely(qp->req.state == QP_STATE_ERROR)) + rxe_run_task(&qp->comp.task, 1); return err; } @@ -1205,7 +1206,7 @@ int rxe_register_device(struct rxe_dev *rxe) rxe->ndev->dev_addr); dev->dev.dma_ops = &dma_virt_ops; dma_coerce_mask_and_coherent(&dev->dev, - dma_get_required_mask(dev->dev.parent)); + dma_get_required_mask(&dev->dev)); dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 0c2dbe45c729..1019f5e7dbdd 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -35,6 +35,7 @@ #define RXE_VERBS_H #include +#include #include #include "rxe_pool.h" #include "rxe_task.h" @@ -281,6 +282,8 @@ struct rxe_qp { struct timer_list rnr_nak_timer; spinlock_t state_lock; /* guard requester and completer */ + + struct execute_work cleanup_work; }; enum rxe_mem_state { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7774654c2ccb..7a5ed5a5391e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1447,8 +1447,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, struct ipoib_dev_priv *priv = ipoib_priv(dev); int e = skb_queue_empty(&priv->cm.skb_queue); - if (skb_dst(skb)) - skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); + skb_dst_update_pmtu(skb, mtu); skb_queue_tail(&priv->cm.skb_queue, skb); if (e) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6cd61638b441..c97384c914a4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1203,10 +1203,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { + rtnl_lock(); if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); - if (ipoib_ib_dev_open(dev) != 0) + + result = ipoib_ib_dev_open(dev); + rtnl_unlock(); + if (result) return; + if (netif_queue_stopped(dev)) netif_start_queue(dev); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index dcc77014018d..6bc9a768f721 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -776,6 +776,22 @@ static void path_rec_completion(int status, spin_lock_irqsave(&priv->lock, flags); if (!IS_ERR_OR_NULL(ah)) { + /* + * pathrec.dgid is used as the database key from the LLADDR, + * it must remain unchanged even if the SA returns a different + * GID to use in the AH. + */ + if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid))) { + ipoib_dbg( + priv, + "%s got PathRec for gid %pI6 while asked for %pI6\n", + dev->name, pathrec->dgid.raw, + path->pathrec.dgid.raw); + memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid)); + } + path->pathrec = *pathrec; old_ah = path->ah; @@ -903,8 +919,8 @@ static int path_rec_start(struct net_device *dev, return 0; } -static void neigh_add_path(struct sk_buff *skb, u8 *daddr, - struct net_device *dev) +static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, + struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); struct rdma_netdev *rn = netdev_priv(dev); @@ -918,7 +934,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); - return; + return NULL; + } + + /* To avoid race condition, make sure that the + * neigh will be added only once. + */ + if (unlikely(!list_empty(&neigh->list))) { + spin_unlock_irqrestore(&priv->lock, flags); + return neigh; } path = __path_find(dev, daddr + 4); @@ -957,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, path->ah->last_send = rn->send(dev, skb, path->ah->ah, IPOIB_QPN(daddr)); ipoib_neigh_put(neigh); - return; + return NULL; } } else { neigh->ah = NULL; @@ -974,7 +998,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ipoib_neigh_put(neigh); - return; + return NULL; err_path: ipoib_neigh_free(neigh); @@ -984,6 +1008,8 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ipoib_neigh_put(neigh); + + return NULL; } static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, @@ -1092,8 +1118,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) case htons(ETH_P_TIPC): neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (unlikely(!neigh)) { - neigh_add_path(skb, phdr->hwaddr, dev); - return NETDEV_TX_OK; + neigh = neigh_add_path(skb, phdr->hwaddr, dev); + if (likely(!neigh)) + return NETDEV_TX_OK; } break; case htons(ETH_P_ARP): @@ -2246,6 +2273,9 @@ static struct net_device *ipoib_add_port(const char *format, priv->ca, ipoib_event); ib_register_event_handler(&priv->event_handler); + /* call event handler to ensure pkey in sync */ + queue_work(ipoib_workqueue, &priv->flush_heavy); + result = register_netdev(priv->dev); if (result) { printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 93e149efc1f5..9b3f47ae2016 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) spin_lock_irqsave(&priv->lock, flags); if (!neigh) { neigh = ipoib_neigh_alloc(daddr, dev); - if (neigh) { + /* Make sure that the neigh will be added only + * once to mcast list. + */ + if (neigh && list_empty(&neigh->list)) { kref_get(&mcast->ah->ref); neigh->ah = mcast->ah; list_add_tail(&neigh->list, &mcast->neigh_list); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ceabdb85df8b..ee3f630c9217 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; + ib_drain_qp(isert_conn->qp); list_del_init(&isert_conn->node); isert_conn->cm_id = NULL; isert_put_conn(isert_conn); @@ -884,15 +885,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des } static void -isert_create_send_desc(struct isert_conn *isert_conn, - struct isert_cmd *isert_cmd, - struct iser_tx_desc *tx_desc) +__isert_create_send_desc(struct isert_device *device, + struct iser_tx_desc *tx_desc) { - struct isert_device *device = isert_conn->device; - struct ib_device *ib_dev = device->ib_device; - - ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, - ISER_HEADERS_LEN, DMA_TO_DEVICE); memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); tx_desc->iser_header.flags = ISCSI_CTRL; @@ -905,6 +900,20 @@ isert_create_send_desc(struct isert_conn *isert_conn, } } +static void +isert_create_send_desc(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, + struct iser_tx_desc *tx_desc) +{ + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + + __isert_create_send_desc(device, tx_desc); +} + static int isert_init_tx_hdrs(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) @@ -992,7 +1001,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; int ret; - isert_create_send_desc(isert_conn, NULL, tx_desc); + __isert_create_send_desc(device, tx_desc); memcpy(&tx_desc->iscsi_header, &login->rsp[0], sizeof(struct iscsi_hdr)); @@ -2107,7 +2116,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) sig_attrs->check_mask = (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | - (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | + (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) | (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); return 0; } @@ -2123,6 +2132,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, u32 rkey, offset; int ret; + if (cmd->ctx_init_done) + goto rdma_ctx_post; + if (dir == DMA_FROM_DEVICE) { addr = cmd->write_va; rkey = cmd->write_stag; @@ -2150,11 +2162,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn, se_cmd->t_data_sg, se_cmd->t_data_nents, offset, addr, rkey, dir); } + if (ret < 0) { isert_err("Cmd: %p failed to prepare RDMA res\n", cmd); return ret; } + cmd->ctx_init_done = true; + +rdma_ctx_post: ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr); if (ret < 0) isert_err("Cmd: %p failed to post RDMA res\n", cmd); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index d6fd248320ae..3b296bac4f60 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -126,6 +126,7 @@ struct isert_cmd { struct rdma_rw_ctx rw; struct work_struct comp_work; struct scatterlist sg; + bool ctx_init_done; }; static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc) diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index afa938bd26d6..a72278e9cd27 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter) rcu_assign_pointer(adapter->mactbl, NULL); synchronize_rcu(); opa_vnic_free_mac_tbl(mactbl); + adapter->info.vport.mac_tbl_digest = 0; mutex_unlock(&adapter->mactbl_lock); } diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c index c2733964379c..9655cc3aa3a0 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c @@ -348,7 +348,7 @@ void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter, void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, struct opa_veswport_iface_macs *macs) { - u16 start_idx, tot_macs, num_macs, idx = 0, count = 0; + u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0; struct netdev_hw_addr *ha; start_idx = be16_to_cpu(macs->start_idx); @@ -359,8 +359,10 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, /* Do not include EM specified MAC address */ if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr, - ARRAY_SIZE(adapter->info.vport.base_mac_addr))) + ARRAY_SIZE(adapter->info.vport.base_mac_addr))) { + em_macs++; continue; + } if (start_idx > idx++) continue; @@ -383,7 +385,7 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter, } tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) + - netdev_uc_count(adapter->netdev); + netdev_uc_count(adapter->netdev) - em_macs; macs->tot_macs_in_lst = cpu_to_be16(tot_macs); macs->num_macs_in_msg = cpu_to_be16(count); macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count); diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig index c74ee9633041..99db8fe5173a 100644 --- a/drivers/infiniband/ulp/srp/Kconfig +++ b/drivers/infiniband/ulp/srp/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRP tristate "InfiniBand SCSI RDMA Protocol" - depends on SCSI + depends on SCSI && INFINIBAND_ADDR_TRANS select SCSI_SRP_ATTRS ---help--- Support for the SCSI RDMA Protocol over InfiniBand. This diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index fa5ccdb3bb2a..299a97b7e17f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status, static int srp_lookup_path(struct srp_rdma_ch *ch) { struct srp_target_port *target = ch->target; - int ret; + int ret = -ENODEV; ch->path.numb_path = 1; init_completion(&ch->done); + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before srp_path_rec_completion() is called. + */ + if (!scsi_host_get(target->scsi_host)) + goto out; + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, target->srp_host->srp_dev->dev, target->srp_host->port, @@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch) GFP_KERNEL, srp_path_rec_completion, ch, &ch->path_query); - if (ch->path_query_id < 0) - return ch->path_query_id; + ret = ch->path_query_id; + if (ret < 0) + goto put; ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) - return ret; + goto put; - if (ch->status < 0) + ret = ch->status; + if (ret < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed\n"); - return ch->status; +put: + scsi_host_put(target->scsi_host); + +out: + return ret; } static int srp_send_req(struct srp_rdma_ch *ch, bool multich) @@ -2643,9 +2656,11 @@ static int srp_abort(struct scsi_cmnd *scmnd) ret = FAST_IO_FAIL; else ret = FAILED; - srp_free_req(ch, req, scmnd, 0); - scmnd->result = DID_ABORT << 16; - scmnd->scsi_done(scmnd); + if (ret == SUCCESS) { + srp_free_req(ch, req, scmnd, 0); + scmnd->result = DID_ABORT << 16; + scmnd->scsi_done(scmnd); + } return ret; } @@ -3415,12 +3430,10 @@ static ssize_t srp_create_target(struct device *dev, num_online_nodes()); const int ch_end = ((node_idx + 1) * target->ch_count / num_online_nodes()); - const int cv_start = (node_idx * ibdev->num_comp_vectors / - num_online_nodes() + target->comp_vector) - % ibdev->num_comp_vectors; - const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / - num_online_nodes() + target->comp_vector) - % ibdev->num_comp_vectors; + const int cv_start = node_idx * ibdev->num_comp_vectors / + num_online_nodes(); + const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors / + num_online_nodes(); int cpu_idx = 0; for_each_online_cpu(cpu) { diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index 31ee83d528d9..fb8b7182f05e 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRPT tristate "InfiniBand SCSI RDMA Protocol target support" - depends on INFINIBAND && TARGET_CORE + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE ---help--- Support for the SCSI RDMA Protocol (SRP) Target driver. The diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 9e8e9220f816..2e7982042fe5 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -80,7 +80,7 @@ module_param(srpt_srq_size, int, 0444); MODULE_PARM_DESC(srpt_srq_size, "Shared receive queue (SRQ) size."); -static int srpt_get_u64_x(char *buffer, struct kernel_param *kp) +static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); } @@ -787,13 +787,17 @@ static int srpt_post_recv(struct srpt_device *sdev, */ static int srpt_zerolength_write(struct srpt_rdma_ch *ch) { - struct ib_send_wr wr, *bad_wr; + struct ib_send_wr *bad_wr; + struct ib_rdma_wr wr = { + .wr = { + .next = NULL, + { .wr_cqe = &ch->zw_cqe, }, + .opcode = IB_WR_RDMA_WRITE, + .send_flags = IB_SEND_SIGNALED, + } + }; - memset(&wr, 0, sizeof(wr)); - wr.opcode = IB_WR_RDMA_WRITE; - wr.wr_cqe = &ch->zw_cqe; - wr.send_flags = IB_SEND_SIGNALED; - return ib_post_send(ch->qp, &wr, &bad_wr); + return ib_post_send(ch->qp, &wr.wr, &bad_wr); } static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) @@ -1000,8 +1004,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) return -ENOMEM; attr->qp_state = IB_QPS_INIT; - attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE; + attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; attr->port_num = ch->sport->port; attr->pkey_index = 0; @@ -1992,7 +1995,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, goto destroy_ib; } - guid = (__be16 *)¶m->primary_path->sgid.global.interface_id; + guid = (__be16 *)¶m->primary_path->dgid.global.interface_id; snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); @@ -2777,7 +2780,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) { const char *p; unsigned len, count, leading_zero_bytes; - int ret, rc; + int ret; p = name; if (strncasecmp(p, "0x", 2) == 0) @@ -2789,10 +2792,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) count = min(len / 2, 16U); leading_zero_bytes = 16 - count; memset(i_port_id, 0, leading_zero_bytes); - rc = hex2bin(i_port_id + leading_zero_bytes, p, count); - if (rc < 0) - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); - ret = 0; + ret = hex2bin(i_port_id + leading_zero_bytes, p, count); + if (ret < 0) + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret); out: return ret; } diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index ff8037798779..724715e4f8bc 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -184,6 +184,19 @@ config INPUT_APMPOWER To compile this driver as a module, choose M here: the module will be called apm-power. +config INPUT_KEYRESET + bool "Reset key" + depends on INPUT + select INPUT_KEYCOMBO + ---help--- + Say Y here if you want to reboot when some keys are pressed; + +config INPUT_KEYCOMBO + bool "Key combo" + depends on INPUT + ---help--- + Say Y here if you want to take action when some keys are pressed; + comment "Input Device Drivers" source "drivers/input/keyboard/Kconfig" diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 40de6a7be641..f0351af763bd 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile @@ -27,5 +27,7 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ obj-$(CONFIG_INPUT_MISC) += misc/ obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o +obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o +obj-$(CONFIG_INPUT_KEYCOMBO) += keycombo.o obj-$(CONFIG_RMI4_CORE) += rmi4/ diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 766bf2660116..5f04b2d94635 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c @@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler, const struct input_device_id *id) { struct input_leds *leds; + struct input_led *led; unsigned int num_leds; unsigned int led_code; int led_no; @@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler, led_no = 0; for_each_set_bit(led_code, dev->ledbit, LED_CNT) { - struct input_led *led = &leds->leds[led_no]; + if (!input_led_info[led_code].name) + continue; + led = &leds->leds[led_no]; led->handle = &leds->handle; led->code = led_code; - if (!input_led_info[led_code].name) - continue; - led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", dev_name(&dev->dev), input_led_info[led_code].name); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d86e59515b9c..53f775c41cd1 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -126,6 +126,7 @@ static const struct xpad_device { u8 mapping; u8 xtype; } xpad_device[] = { + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -229,6 +230,7 @@ static const struct xpad_device { { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, @@ -410,6 +412,7 @@ static const signed short xpad_abs_triggers[] = { static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ + XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ @@ -475,6 +478,22 @@ static const u8 xboxone_hori_init[] = { 0x00, 0x00, 0x00, 0x80, 0x00 }; +/* + * This packet is required for some of the PDP pads to start + * sending input reports. One of those pads is (0x0e6f:0x02ab). + */ +static const u8 xboxone_pdp_init1[] = { + 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 +}; + +/* + * This packet is required for some of the PDP pads to start + * sending input reports. One of those pads is (0x0e6f:0x02ab). + */ +static const u8 xboxone_pdp_init2[] = { + 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 +}; + /* * A specific rumble packet is required for some PowerA pads to start * sending input reports. One of those pads is (0x24c6:0x543a). @@ -505,6 +524,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), + XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c index f6e643b589b6..c877e56a9bd5 100644 --- a/drivers/input/keyboard/goldfish_events.c +++ b/drivers/input/keyboard/goldfish_events.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,8 @@ #include #include +#define GOLDFISH_MAX_FINGERS 5 + enum { REG_READ = 0x00, REG_SET_PAGE = 0x00, @@ -52,7 +55,21 @@ static irqreturn_t events_interrupt(int irq, void *dev_id) value = __raw_readl(edev->addr + REG_READ); input_event(edev->input, type, code, value); - input_sync(edev->input); + // Send an extra (EV_SYN, SYN_REPORT, 0x0) event + // if a key was pressed. Some keyboard device + // drivers may only send the EV_KEY event and + // not EV_SYN. + // Note that sending an extra SYN_REPORT is not + // necessary nor correct protocol with other + // devices such as touchscreens, which will send + // their own SYN_REPORT's when sufficient event + // information has been collected (e.g., for + // touchscreens, when pressure and X/Y coordinates + // have been received). Hence, we will only send + // this extra SYN_REPORT if type == EV_KEY. + if (type == EV_KEY) { + input_sync(edev->input); + } return IRQ_HANDLED; } @@ -154,6 +171,15 @@ static int events_probe(struct platform_device *pdev) input_dev->name = edev->name; input_dev->id.bustype = BUS_HOST; + // Set the Goldfish Device to be multi-touch. + // In the Ranchu kernel, there is multi-touch-specific + // code for handling ABS_MT_SLOT events. + // See drivers/input/input.c:input_handle_abs_event. + // If we do not issue input_mt_init_slots, + // the kernel will filter out needed ABS_MT_SLOT + // events when we touch the screen in more than one place, + // preventing multi-touch with more than one finger from working. + input_mt_init_slots(input_dev, GOLDFISH_MAX_FINGERS, 0); events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX); events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 1f316d66e6f7..41614c185918 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev) { struct matrix_keypad *keypad = input_get_drvdata(dev); + spin_lock_irq(&keypad->lock); keypad->stopped = true; - mb(); + spin_unlock_irq(&keypad->lock); + flush_work(&keypad->work.work); /* * matrix_keypad_scan() will leave IRQs enabled; diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c new file mode 100644 index 000000000000..2fba451b91d5 --- /dev/null +++ b/drivers/input/keycombo.c @@ -0,0 +1,261 @@ +/* drivers/input/keycombo.c + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct keycombo_state { + struct input_handler input_handler; + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; + spinlock_t lock; + struct workqueue_struct *wq; + int key_down_target; + int key_down; + int key_up; + struct delayed_work key_down_work; + int delay; + struct work_struct key_up_work; + void (*key_up_fn)(void *); + void (*key_down_fn)(void *); + void *priv; + int key_is_down; + struct wakeup_source combo_held_wake_source; + struct wakeup_source combo_up_wake_source; +}; + +static void do_key_down(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct keycombo_state *state = container_of(dwork, + struct keycombo_state, key_down_work); + if (state->key_down_fn) + state->key_down_fn(state->priv); +} + +static void do_key_up(struct work_struct *work) +{ + struct keycombo_state *state = container_of(work, struct keycombo_state, + key_up_work); + if (state->key_up_fn) + state->key_up_fn(state->priv); + __pm_relax(&state->combo_up_wake_source); +} + +static void keycombo_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + unsigned long flags; + struct keycombo_state *state = handle->private; + + if (type != EV_KEY) + return; + + if (code >= KEY_MAX) + return; + + if (!test_bit(code, state->keybit)) + return; + + spin_lock_irqsave(&state->lock, flags); + if (!test_bit(code, state->key) == !value) + goto done; + __change_bit(code, state->key); + if (test_bit(code, state->upbit)) { + if (value) + state->key_up++; + else + state->key_up--; + } else { + if (value) + state->key_down++; + else + state->key_down--; + } + if (state->key_down == state->key_down_target && state->key_up == 0) { + __pm_stay_awake(&state->combo_held_wake_source); + state->key_is_down = 1; + if (queue_delayed_work(state->wq, &state->key_down_work, + state->delay)) + pr_debug("Key down work already queued!"); + } else if (state->key_is_down) { + if (!cancel_delayed_work(&state->key_down_work)) { + __pm_stay_awake(&state->combo_up_wake_source); + queue_work(state->wq, &state->key_up_work); + } + __pm_relax(&state->combo_held_wake_source); + state->key_is_down = 0; + } +done: + spin_unlock_irqrestore(&state->lock, flags); +} + +static int keycombo_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i; + int ret; + struct input_handle *handle; + struct keycombo_state *state = + container_of(handler, struct keycombo_state, input_handler); + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, state->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = KEYCOMBO_NAME; + handle->private = state; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keycombo_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id keycombo_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { }, +}; +MODULE_DEVICE_TABLE(input, keycombo_ids); + +static int keycombo_probe(struct platform_device *pdev) +{ + int ret; + int key, *keyp; + struct keycombo_state *state; + struct keycombo_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + spin_lock_init(&state->lock); + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + state->key_down_target++; + __set_bit(key, state->keybit); + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + __set_bit(key, state->keybit); + __set_bit(key, state->upbit); + } + } + + state->wq = alloc_ordered_workqueue("keycombo", 0); + if (!state->wq) + return -ENOMEM; + + state->priv = pdata->priv; + + if (pdata->key_down_fn) + state->key_down_fn = pdata->key_down_fn; + INIT_DELAYED_WORK(&state->key_down_work, do_key_down); + + if (pdata->key_up_fn) + state->key_up_fn = pdata->key_up_fn; + INIT_WORK(&state->key_up_work, do_key_up); + + wakeup_source_init(&state->combo_held_wake_source, "key combo"); + wakeup_source_init(&state->combo_up_wake_source, "key combo up"); + state->delay = msecs_to_jiffies(pdata->key_down_delay); + + state->input_handler.event = keycombo_event; + state->input_handler.connect = keycombo_connect; + state->input_handler.disconnect = keycombo_disconnect; + state->input_handler.name = KEYCOMBO_NAME; + state->input_handler.id_table = keycombo_ids; + ret = input_register_handler(&state->input_handler); + if (ret) { + kfree(state); + return ret; + } + platform_set_drvdata(pdev, state); + return 0; +} + +int keycombo_remove(struct platform_device *pdev) +{ + struct keycombo_state *state = platform_get_drvdata(pdev); + input_unregister_handler(&state->input_handler); + destroy_workqueue(state->wq); + kfree(state); + return 0; +} + + +struct platform_driver keycombo_driver = { + .driver.name = KEYCOMBO_NAME, + .probe = keycombo_probe, + .remove = keycombo_remove, +}; + +static int __init keycombo_init(void) +{ + return platform_driver_register(&keycombo_driver); +} + +static void __exit keycombo_exit(void) +{ + return platform_driver_unregister(&keycombo_driver); +} + +module_init(keycombo_init); +module_exit(keycombo_exit); diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c new file mode 100644 index 000000000000..7e5222aec7c1 --- /dev/null +++ b/drivers/input/keyreset.c @@ -0,0 +1,144 @@ +/* drivers/input/keyreset.c + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct keyreset_state { + int restart_requested; + int (*reset_fn)(void); + struct platform_device *pdev_child; + struct work_struct restart_work; +}; + +static void do_restart(struct work_struct *unused) +{ + orderly_reboot(); +} + +static void do_reset_fn(void *priv) +{ + struct keyreset_state *state = priv; + if (state->restart_requested) + panic("keyboard reset failed, %d", state->restart_requested); + if (state->reset_fn) { + state->restart_requested = state->reset_fn(); + } else { + pr_info("keyboard reset\n"); + schedule_work(&state->restart_work); + state->restart_requested = 1; + } +} + +static int keyreset_probe(struct platform_device *pdev) +{ + int ret = -ENOMEM; + struct keycombo_platform_data *pdata_child; + struct keyreset_platform_data *pdata = pdev->dev.platform_data; + int up_size = 0, down_size = 0, size; + int key, *keyp; + struct keyreset_state *state; + + if (!pdata) + return -EINVAL; + state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->pdev_child = platform_device_alloc(KEYCOMBO_NAME, + PLATFORM_DEVID_AUTO); + if (!state->pdev_child) + return -ENOMEM; + state->pdev_child->dev.parent = &pdev->dev; + INIT_WORK(&state->restart_work, do_restart); + + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + down_size++; + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + up_size++; + } + } + size = sizeof(struct keycombo_platform_data) + + sizeof(int) * (down_size + 1); + pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!pdata_child) + goto error; + memcpy(pdata_child->keys_down, pdata->keys_down, + sizeof(int) * down_size); + if (up_size > 0) { + pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1, + GFP_KERNEL); + if (!pdata_child->keys_up) + goto error; + memcpy(pdata_child->keys_up, pdata->keys_up, + sizeof(int) * up_size); + if (!pdata_child->keys_up) + goto error; + } + state->reset_fn = pdata->reset_fn; + pdata_child->key_down_fn = do_reset_fn; + pdata_child->priv = state; + pdata_child->key_down_delay = pdata->key_down_delay; + ret = platform_device_add_data(state->pdev_child, pdata_child, size); + if (ret) + goto error; + platform_set_drvdata(pdev, state); + return platform_device_add(state->pdev_child); +error: + platform_device_put(state->pdev_child); + return ret; +} + +int keyreset_remove(struct platform_device *pdev) +{ + struct keyreset_state *state = platform_get_drvdata(pdev); + platform_device_put(state->pdev_child); + return 0; +} + + +struct platform_driver keyreset_driver = { + .driver.name = KEYRESET_NAME, + .probe = keyreset_probe, + .remove = keyreset_remove, +}; + +static int __init keyreset_init(void) +{ + return platform_driver_register(&keyreset_driver); +} + +static void __exit keyreset_exit(void) +{ + return platform_driver_unregister(&keyreset_driver); +} + +module_init(keyreset_init); +module_exit(keyreset_exit); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 9f082a388388..4b269b332636 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -367,6 +367,17 @@ config INPUT_ATI_REMOTE2 To compile this driver as a module, choose M here: the module will be called ati_remote2. +config INPUT_KEYCHORD + tristate "Key chord input driver support" + help + Say Y here if you want to enable the key chord driver + accessible at /dev/keychord. This driver can be used + for receiving notifications when client specified key + combinations are pressed. + + To compile this driver as a module, choose M here: the + module will be called keychord. + config INPUT_KEYSPAN_REMOTE tristate "Keyspan DMR USB remote control" depends on USB_ARCH_HAS_HCD @@ -535,6 +546,11 @@ config INPUT_SGI_BTNS To compile this driver as a module, choose M here: the module will be called sgi_btns. +config INPUT_GPIO + tristate "GPIO driver support" + help + Say Y here if you want to support gpio based keys, wheels etc... + config HP_SDC_RTC tristate "HP SDC Real Time Clock" depends on (GSC || HP300) && SERIO diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 4b6118d313fe..2ecb6869e4b6 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -38,10 +38,12 @@ obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o +obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o +obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c new file mode 100644 index 000000000000..0acf4a576f53 --- /dev/null +++ b/drivers/input/misc/gpio_axis.c @@ -0,0 +1,192 @@ +/* drivers/input/misc/gpio_axis.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +struct gpio_axis_state { + struct gpio_event_input_devs *input_devs; + struct gpio_event_axis_info *info; + uint32_t pos; +}; + +uint16_t gpio_axis_4bit_gray_map_table[] = { + [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */ + [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */ + [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */ + [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */ + [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */ + [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */ + [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */ + [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */ +}; +uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_4bit_gray_map_table[in]; +} + +uint16_t gpio_axis_5bit_singletrack_map_table[] = { + [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */ + [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */ + [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */ + [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */ + [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */ + [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */ + [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */ + [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */ + [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */ + [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */ +}; +uint16_t gpio_axis_5bit_singletrack_map( + struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_5bit_singletrack_map_table[in]; +} + +static void gpio_event_update_axis(struct gpio_axis_state *as, int report) +{ + struct gpio_event_axis_info *ai = as->info; + int i; + int change; + uint16_t state = 0; + uint16_t pos; + uint16_t old_pos = as->pos; + for (i = ai->count - 1; i >= 0; i--) + state = (state << 1) | gpio_get_value(ai->gpio[i]); + pos = ai->map(ai, state); + if (ai->flags & GPIOEAF_PRINT_RAW) + pr_info("axis %d-%d raw %x, pos %d -> %d\n", + ai->type, ai->code, state, old_pos, pos); + if (report && pos != old_pos) { + if (ai->type == EV_REL) { + change = (ai->decoded_size + pos - old_pos) % + ai->decoded_size; + if (change > ai->decoded_size / 2) + change -= ai->decoded_size; + if (change == ai->decoded_size / 2) { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d unknown direction, " + "pos %d -> %d\n", ai->type, + ai->code, old_pos, pos); + change = 0; /* no closest direction */ + } + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d change %d\n", + ai->type, ai->code, change); + input_report_rel(as->input_devs->dev[ai->dev], + ai->code, change); + } else { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d now %d\n", + ai->type, ai->code, pos); + input_event(as->input_devs->dev[ai->dev], + ai->type, ai->code, pos); + } + input_sync(as->input_devs->dev[ai->dev]); + } + as->pos = pos; +} + +static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id) +{ + struct gpio_axis_state *as = dev_id; + gpio_event_update_axis(as, 1); + return IRQ_HANDLED; +} + +int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + int irq; + struct gpio_event_axis_info *ai; + struct gpio_axis_state *as; + + ai = container_of(info, struct gpio_event_axis_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND) { + for (i = 0; i < ai->count; i++) + disable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + for (i = 0; i < ai->count; i++) + enable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + *data = as = kmalloc(sizeof(*as), GFP_KERNEL); + if (as == NULL) { + ret = -ENOMEM; + goto err_alloc_axis_state_failed; + } + as->input_devs = input_devs; + as->info = ai; + if (ai->dev >= input_devs->count) { + pr_err("gpio_event_axis: bad device index %d >= %d " + "for %d:%d\n", ai->dev, input_devs->count, + ai->type, ai->code); + ret = -EINVAL; + goto err_bad_device_index; + } + + input_set_capability(input_devs->dev[ai->dev], + ai->type, ai->code); + if (ai->type == EV_ABS) { + input_set_abs_params(input_devs->dev[ai->dev], ai->code, + 0, ai->decoded_size - 1, 0, 0); + } + for (i = 0; i < ai->count; i++) { + ret = gpio_request(ai->gpio[i], "gpio_event_axis"); + if (ret < 0) + goto err_request_gpio_failed; + ret = gpio_direction_input(ai->gpio[i]); + if (ret < 0) + goto err_gpio_direction_input_failed; + ret = irq = gpio_to_irq(ai->gpio[i]); + if (ret < 0) + goto err_get_irq_num_failed; + ret = request_irq(irq, gpio_axis_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "gpio_event_axis", as); + if (ret < 0) + goto err_request_irq_failed; + } + gpio_event_update_axis(as, 0); + return 0; + } + + ret = 0; + as = *data; + for (i = ai->count - 1; i >= 0; i--) { + free_irq(gpio_to_irq(ai->gpio[i]), as); +err_request_irq_failed: +err_get_irq_num_failed: +err_gpio_direction_input_failed: + gpio_free(ai->gpio[i]); +err_request_gpio_failed: + ; + } +err_bad_device_index: + kfree(as); + *data = NULL; +err_alloc_axis_state_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c new file mode 100644 index 000000000000..90f07eba3ce9 --- /dev/null +++ b/drivers/input/misc/gpio_event.c @@ -0,0 +1,228 @@ +/* drivers/input/misc/gpio_event.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +struct gpio_event { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_platform_data *info; + void *state[0]; +}; + +static int gpio_input_event( + struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + int i; + int devnr; + int ret = 0; + int tmp_ret; + struct gpio_event_info **ii; + struct gpio_event *ip = input_get_drvdata(dev); + + for (devnr = 0; devnr < ip->input_devs->count; devnr++) + if (ip->input_devs->dev[devnr] == dev) + break; + if (devnr == ip->input_devs->count) { + pr_err("gpio_input_event: unknown device %p\n", dev); + return -EIO; + } + + for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) { + if ((*ii)->event) { + tmp_ret = (*ii)->event(ip->input_devs, *ii, + &ip->state[i], + devnr, type, code, value); + if (tmp_ret) + ret = tmp_ret; + } + } + return ret; +} + +static int gpio_event_call_all_func(struct gpio_event *ip, int func) +{ + int i; + int ret; + struct gpio_event_info **ii; + + if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) { + ii = ip->info->info; + for (i = 0; i < ip->info->info_count; i++, ii++) { + if ((*ii)->func == NULL) { + ret = -ENODEV; + pr_err("gpio_event_probe: Incomplete pdata, " + "no function\n"); + goto err_no_func; + } + if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend) + continue; + ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i], + func); + if (ret) { + pr_err("gpio_event_probe: function failed\n"); + goto err_func_failed; + } + } + return 0; + } + + ret = 0; + i = ip->info->info_count; + ii = ip->info->info + i; + while (i > 0) { + i--; + ii--; + if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend) + continue; + (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1); +err_func_failed: +err_no_func: + ; + } + return ret; +} + +static void __maybe_unused gpio_event_suspend(struct gpio_event *ip) +{ + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND); + if (ip->info->power) + ip->info->power(ip->info, 0); +} + +static void __maybe_unused gpio_event_resume(struct gpio_event *ip) +{ + if (ip->info->power) + ip->info->power(ip->info, 1); + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME); +} + +static int gpio_event_probe(struct platform_device *pdev) +{ + int err; + struct gpio_event *ip; + struct gpio_event_platform_data *event_info; + int dev_count = 1; + int i; + int registered = 0; + + event_info = pdev->dev.platform_data; + if (event_info == NULL) { + pr_err("gpio_event_probe: No pdata\n"); + return -ENODEV; + } + if ((!event_info->name && !event_info->names[0]) || + !event_info->info || !event_info->info_count) { + pr_err("gpio_event_probe: Incomplete pdata\n"); + return -ENODEV; + } + if (!event_info->name) + while (event_info->names[dev_count]) + dev_count++; + ip = kzalloc(sizeof(*ip) + + sizeof(ip->state[0]) * event_info->info_count + + sizeof(*ip->input_devs) + + sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL); + if (ip == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + ip->input_devs = (void*)&ip->state[event_info->info_count]; + platform_set_drvdata(pdev, ip); + + for (i = 0; i < dev_count; i++) { + struct input_dev *input_dev = input_allocate_device(); + if (input_dev == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: " + "Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + input_set_drvdata(input_dev, ip); + input_dev->name = event_info->name ? + event_info->name : event_info->names[i]; + input_dev->event = gpio_input_event; + ip->input_devs->dev[i] = input_dev; + } + ip->input_devs->count = dev_count; + ip->info = event_info; + if (event_info->power) + ip->info->power(ip->info, 1); + + err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT); + if (err) + goto err_call_all_func_failed; + + for (i = 0; i < dev_count; i++) { + err = input_register_device(ip->input_devs->dev[i]); + if (err) { + pr_err("gpio_event_probe: Unable to register %s " + "input device\n", ip->input_devs->dev[i]->name); + goto err_input_register_device_failed; + } + registered++; + } + + return 0; + +err_input_register_device_failed: + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); +err_call_all_func_failed: + if (event_info->power) + ip->info->power(ip->info, 0); + for (i = 0; i < registered; i++) + input_unregister_device(ip->input_devs->dev[i]); + for (i = dev_count - 1; i >= registered; i--) { + input_free_device(ip->input_devs->dev[i]); +err_input_dev_alloc_failed: + ; + } + kfree(ip); +err_kp_alloc_failed: + return err; +} + +static int gpio_event_remove(struct platform_device *pdev) +{ + struct gpio_event *ip = platform_get_drvdata(pdev); + int i; + + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); + if (ip->info->power) + ip->info->power(ip->info, 0); + for (i = 0; i < ip->input_devs->count; i++) + input_unregister_device(ip->input_devs->dev[i]); + kfree(ip); + return 0; +} + +static struct platform_driver gpio_event_driver = { + .probe = gpio_event_probe, + .remove = gpio_event_remove, + .driver = { + .name = GPIO_EVENT_DEV_NAME, + }, +}; + +module_platform_driver(gpio_event_driver); + +MODULE_DESCRIPTION("GPIO Event Driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c new file mode 100644 index 000000000000..5875d739c550 --- /dev/null +++ b/drivers/input/misc/gpio_input.c @@ -0,0 +1,390 @@ +/* drivers/input/misc/gpio_input.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */ + DEBOUNCE_PRESSED = BIT(1), + DEBOUNCE_NOTPRESSED = BIT(2), + DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */ + DEBOUNCE_POLL = BIT(4), /* Stable polling state */ + + DEBOUNCE_UNKNOWN = + DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED, +}; + +struct gpio_key_state { + struct gpio_input_state *ds; + uint8_t debounce; +}; + +struct gpio_input_state { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_input_info *info; + struct hrtimer timer; + int use_irq; + int debounce_count; + spinlock_t irq_lock; + struct wakeup_source *ws; + struct gpio_key_state key_state[0]; +}; + +static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer) +{ + int i; + int pressed; + struct gpio_input_state *ds = + container_of(timer, struct gpio_input_state, timer); + unsigned gpio_flags = ds->info->flags; + unsigned npolarity; + int nkeys = ds->info->keymap_size; + const struct gpio_event_direct_entry *key_entry; + struct gpio_key_state *key_state; + unsigned long irqflags; + uint8_t debounce; + bool sync_needed; + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); +#endif + key_entry = ds->info->keymap; + key_state = ds->key_state; + sync_needed = false; + spin_lock_irqsave(&ds->irq_lock, irqflags); + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + debounce = key_state->debounce; + if (debounce & DEBOUNCE_WAIT_IRQ) + continue; + if (key_state->debounce & DEBOUNCE_UNSTABLE) { + debounce = key_state->debounce = DEBOUNCE_UNKNOWN; + enable_irq(gpio_to_irq(key_entry->gpio)); + if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) continue debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH); + pressed = gpio_get_value(key_entry->gpio) ^ npolarity; + if (debounce & DEBOUNCE_POLL) { + if (pressed == !(debounce & DEBOUNCE_PRESSED)) { + ds->debounce_count++; + key_state->debounce = DEBOUNCE_UNKNOWN; + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-" + "%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + continue; + } + if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 1\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_PRESSED; + continue; + } + if (!pressed && (debounce & DEBOUNCE_PRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 0\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_NOTPRESSED; + continue; + } + /* key is stable */ + ds->debounce_count--; + if (ds->use_irq) + key_state->debounce |= DEBOUNCE_WAIT_IRQ; + else + key_state->debounce |= DEBOUNCE_POLL; + if (gpio_flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) " + "changed to %d\n", ds->info->type, + key_entry->code, i, key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + sync_needed = true; + } + if (sync_needed) { + for (i = 0; i < ds->input_devs->count; i++) + input_sync(ds->input_devs->dev[i]); + } + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); + } +#endif + + if (ds->debounce_count) + hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL); + else if (!ds->use_irq) + hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL); + else + __pm_relax(ds->ws); + + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id) +{ + struct gpio_key_state *ks = dev_id; + struct gpio_input_state *ds = ks->ds; + int keymap_index = ks - ds->key_state; + const struct gpio_event_direct_entry *key_entry; + unsigned long irqflags; + int pressed; + + if (!ds->use_irq) + return IRQ_HANDLED; + + key_entry = &ds->info->keymap[keymap_index]; + + if (ds->info->debounce_time) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ks->debounce & DEBOUNCE_WAIT_IRQ) { + ks->debounce = DEBOUNCE_UNKNOWN; + if (ds->debounce_count++ == 0) { + __pm_stay_awake(ds->ws); + hrtimer_start( + &ds->timer, ds->info->debounce_time, + HRTIMER_MODE_REL); + } + if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_event_input_irq_handler: " + "key %x-%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + keymap_index, key_entry->gpio); + } else { + disable_irq_nosync(irq); + ks->debounce = DEBOUNCE_UNSTABLE; + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + } else { + pressed = gpio_get_value(key_entry->gpio) ^ + !(ds->info->flags & GPIOEDF_ACTIVE_HIGH); + if (ds->info->flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_event_input_irq_handler: key %x-%x, %d " + "(%d) changed to %d\n", + ds->info->type, key_entry->code, keymap_index, + key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + input_sync(ds->input_devs->dev[key_entry->dev]); + } + return IRQ_HANDLED; +} + +static int gpio_event_input_request_irqs(struct gpio_input_state *ds) +{ + int i; + int err; + unsigned int irq; + unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + + for (i = 0; i < ds->info->keymap_size; i++) { + err = irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_event_input_irq_handler, + req_flags, "gpio_keys", &ds->key_state[i]); + if (err) { + pr_err("gpio_event_input_request_irqs: request_irq " + "failed for input %d, irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_request_irq_failed; + } + if (ds->info->info.no_suspend) { + err = enable_irq_wake(irq); + if (err) { + pr_err("gpio_event_input_request_irqs: " + "enable_irq_wake failed for input %d, " + "irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_enable_irq_wake_failed; + } + } + } + return 0; + + for (i = ds->info->keymap_size - 1; i >= 0; i--) { + irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (ds->info->info.no_suspend) + disable_irq_wake(irq); +err_enable_irq_wake_failed: + free_irq(irq, &ds->key_state[i]); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_input_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + unsigned long irqflags; + struct gpio_event_input_info *di; + struct gpio_input_state *ds = *data; + char *wlname; + + di = container_of(info, struct gpio_event_input_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND) { + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + disable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_cancel(&ds->timer); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + enable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (ktime_to_ns(di->poll_time) <= 0) + di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC); + + *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) * + di->keymap_size, GFP_KERNEL); + if (ds == NULL) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate private data\n"); + goto err_ds_alloc_failed; + } + ds->debounce_count = di->keymap_size; + ds->input_devs = input_devs; + ds->info = di; + wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s", + input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : ""); + + ds->ws = wakeup_source_register(wlname); + kfree(wlname); + if (!ds->ws) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate wakeup source\n"); + goto err_ws_failed; + } + + spin_lock_init(&ds->irq_lock); + + for (i = 0; i < di->keymap_size; i++) { + int dev = di->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_input_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + di->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], di->type, + di->keymap[i].code); + ds->key_state[i].ds = ds; + ds->key_state[i].debounce = DEBOUNCE_UNKNOWN; + } + + for (i = 0; i < di->keymap_size; i++) { + ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in"); + if (ret) { + pr_err("gpio_event_input_func: gpio_request " + "failed for %d\n", di->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_input(di->keymap[i].gpio); + if (ret) { + pr_err("gpio_event_input_func: " + "gpio_direction_input failed for %d\n", + di->keymap[i].gpio); + goto err_gpio_configure_failed; + } + } + + ret = gpio_event_input_request_irqs(ds); + + spin_lock_irqsave(&ds->irq_lock, irqflags); + ds->use_irq = ret == 0; + + pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s " + "mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + ret == 0 ? "interrupt" : "polling"); + + hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ds->timer.function = gpio_event_input_timer_func; + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + ret = 0; + spin_lock_irqsave(&ds->irq_lock, irqflags); + hrtimer_cancel(&ds->timer); + if (ds->use_irq) { + for (i = di->keymap_size - 1; i >= 0; i--) { + int irq = gpio_to_irq(di->keymap[i].gpio); + if (ds->info->info.no_suspend) + disable_irq_wake(irq); + free_irq(irq, &ds->key_state[i]); + } + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + for (i = di->keymap_size - 1; i >= 0; i--) { +err_gpio_configure_failed: + gpio_free(di->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + wakeup_source_unregister(ds->ws); +err_ws_failed: + kfree(ds); +err_ds_alloc_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c new file mode 100644 index 000000000000..08769dd88f56 --- /dev/null +++ b/drivers/input/misc/gpio_matrix.c @@ -0,0 +1,440 @@ +/* drivers/input/misc/gpio_matrix.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +struct gpio_kp { + struct gpio_event_input_devs *input_devs; + struct gpio_event_matrix_info *keypad_info; + struct hrtimer timer; + struct wakeup_source wake_src; + int current_output; + unsigned int use_irq:1; + unsigned int key_state_changed:1; + unsigned int last_key_state_changed:1; + unsigned int some_keys_pressed:2; + unsigned int disabled_irq:1; + unsigned long keys_pressed[0]; +}; + +static void clear_phantom_key(struct gpio_kp *kp, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int key_index = out * mi->ninputs + in; + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + __clear_bit(key_index, kp->keys_pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "not cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + } +} + +static int restore_keys_for_input(struct gpio_kp *kp, int out, int in) +{ + int rv = 0; + int key_index; + + key_index = out * kp->keypad_info->ninputs + in; + while (out < kp->keypad_info->noutputs) { + if (test_bit(key_index, kp->keys_pressed)) { + rv = 1; + clear_phantom_key(kp, out, in); + } + key_index += kp->keypad_info->ninputs; + out++; + } + return rv; +} + +static void remove_phantom_keys(struct gpio_kp *kp) +{ + int out, in, inp; + int key_index; + + if (kp->some_keys_pressed < 3) + return; + + for (out = 0; out < kp->keypad_info->noutputs; out++) { + inp = -1; + key_index = out * kp->keypad_info->ninputs; + for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) { + if (test_bit(key_index, kp->keys_pressed)) { + if (inp == -1) { + inp = in; + continue; + } + if (inp >= 0) { + if (!restore_keys_for_input(kp, out + 1, + inp)) + break; + clear_phantom_key(kp, out, inp); + inp = -2; + } + restore_keys_for_input(kp, out, in); + } + } + } +} + +static void report_key(struct gpio_kp *kp, int key_index, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int pressed = test_bit(key_index, kp->keys_pressed); + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (keycode == KEY_RESERVED) { + if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS) + pr_info("gpiomatrix: unmapped key, %d-%d " + "(%d-%d) changed to %d\n", + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS) + pr_info("gpiomatrix: key %x, %d-%d (%d-%d) " + "changed to %d\n", keycode, + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + input_report_key(kp->input_devs->dev[dev], keycode, pressed); + } + } +} + +static void report_sync(struct gpio_kp *kp) +{ + int i; + + for (i = 0; i < kp->input_devs->count; i++) + input_sync(kp->input_devs->dev[i]); +} + +static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer) +{ + int out, in; + int key_index; + int gpio; + struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer); + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH); + + out = kp->current_output; + if (out == mi->noutputs) { + out = 0; + kp->last_key_state_changed = kp->key_state_changed; + kp->key_state_changed = 0; + kp->some_keys_pressed = 0; + } else { + key_index = out * mi->ninputs; + for (in = 0; in < mi->ninputs; in++, key_index++) { + gpio = mi->input_gpios[in]; + if (gpio_get_value(gpio) ^ !polarity) { + if (kp->some_keys_pressed < 3) + kp->some_keys_pressed++; + kp->key_state_changed |= !__test_and_set_bit( + key_index, kp->keys_pressed); + } else + kp->key_state_changed |= __test_and_clear_bit( + key_index, kp->keys_pressed); + } + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, !polarity); + else + gpio_direction_input(gpio); + out++; + } + kp->current_output = out; + if (out < mi->noutputs) { + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, polarity); + else + gpio_direction_output(gpio, polarity); + hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) { + if (kp->key_state_changed) { + hrtimer_start(&kp->timer, mi->debounce_delay, + HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + kp->key_state_changed = kp->last_key_state_changed; + } + if (kp->key_state_changed) { + if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS) + remove_phantom_keys(kp); + key_index = 0; + for (out = 0; out < mi->noutputs; out++) + for (in = 0; in < mi->ninputs; in++, key_index++) + report_key(kp, key_index, out, in); + report_sync(kp); + } + if (!kp->use_irq || kp->some_keys_pressed) { + hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + + /* No keys are pressed, reenable interrupt */ + for (out = 0; out < mi->noutputs; out++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[out], polarity); + else + gpio_direction_output(mi->output_gpios[out], polarity); + } + for (in = 0; in < mi->ninputs; in++) + enable_irq(gpio_to_irq(mi->input_gpios[in])); + __pm_relax(&kp->wake_src); + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id) +{ + int i; + struct gpio_kp *kp = dev_id; + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + + if (!kp->use_irq) { + /* ignore interrupt while registering the handler */ + kp->disabled_irq = 1; + disable_irq_nosync(irq_in); + return IRQ_HANDLED; + } + + for (i = 0; i < mi->ninputs; i++) + disable_irq_nosync(gpio_to_irq(mi->input_gpios[i])); + for (i = 0; i < mi->noutputs; i++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[i], + !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH)); + else + gpio_direction_input(mi->output_gpios[i]); + } + __pm_stay_awake(&kp->wake_src); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + return IRQ_HANDLED; +} + +static int gpio_keypad_request_irqs(struct gpio_kp *kp) +{ + int i; + int err; + unsigned int irq; + unsigned long request_flags; + struct gpio_event_matrix_info *mi = kp->keypad_info; + + switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) { + default: + request_flags = IRQF_TRIGGER_FALLING; + break; + case GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_RISING; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ: + request_flags = IRQF_TRIGGER_LOW; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_HIGH; + break; + } + + for (i = 0; i < mi->ninputs; i++) { + err = irq = gpio_to_irq(mi->input_gpios[i]); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_keypad_irq_handler, request_flags, + "gpio_kp", kp); + if (err) { + pr_err("gpiomatrix: request_irq failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + goto err_request_irq_failed; + } + err = enable_irq_wake(irq); + if (err) { + pr_err("gpiomatrix: set_irq_wake failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + } + disable_irq(irq); + if (kp->disabled_irq) { + kp->disabled_irq = 0; + enable_irq(irq); + } + } + return 0; + + for (i = mi->noutputs - 1; i >= 0; i--) { + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int i; + int err; + int key_count; + struct gpio_kp *kp; + struct gpio_event_matrix_info *mi; + + mi = container_of(info, struct gpio_event_matrix_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) { + /* TODO: disable scanning */ + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (mi->keymap == NULL || + mi->input_gpios == NULL || + mi->output_gpios == NULL) { + err = -ENODEV; + pr_err("gpiomatrix: Incomplete pdata\n"); + goto err_invalid_platform_data; + } + key_count = mi->ninputs * mi->noutputs; + + *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) * + BITS_TO_LONGS(key_count), GFP_KERNEL); + if (kp == NULL) { + err = -ENOMEM; + pr_err("gpiomatrix: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + kp->input_devs = input_devs; + kp->keypad_info = mi; + for (i = 0; i < key_count; i++) { + unsigned short keyentry = mi->keymap[i]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + if (dev >= input_devs->count) { + pr_err("gpiomatrix: bad device index %d >= " + "%d for key code %d\n", + dev, input_devs->count, keycode); + err = -EINVAL; + goto err_bad_keymap; + } + if (keycode && keycode <= KEY_MAX) + input_set_capability(input_devs->dev[dev], + EV_KEY, keycode); + } + + for (i = 0; i < mi->noutputs; i++) { + err = gpio_request(mi->output_gpios[i], "gpio_kp_out"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "output %d\n", mi->output_gpios[i]); + goto err_request_output_gpio_failed; + } + if (gpio_cansleep(mi->output_gpios[i])) { + pr_err("gpiomatrix: unsupported output gpio %d," + " can sleep\n", mi->output_gpios[i]); + err = -EINVAL; + goto err_output_gpio_configure_failed; + } + if (mi->flags & GPIOKPF_DRIVE_INACTIVE) + err = gpio_direction_output(mi->output_gpios[i], + !(mi->flags & GPIOKPF_ACTIVE_HIGH)); + else + err = gpio_direction_input(mi->output_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_configure failed for " + "output %d\n", mi->output_gpios[i]); + goto err_output_gpio_configure_failed; + } + } + for (i = 0; i < mi->ninputs; i++) { + err = gpio_request(mi->input_gpios[i], "gpio_kp_in"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "input %d\n", mi->input_gpios[i]); + goto err_request_input_gpio_failed; + } + err = gpio_direction_input(mi->input_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_direction_input failed" + " for input %d\n", mi->input_gpios[i]); + goto err_gpio_direction_input_failed; + } + } + kp->current_output = mi->noutputs; + kp->key_state_changed = 1; + + hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + kp->timer.function = gpio_keypad_timer_func; + wakeup_source_init(&kp->wake_src, "gpio_kp"); + err = gpio_keypad_request_irqs(kp); + kp->use_irq = err == 0; + + pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for " + "%s%s in %s mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + kp->use_irq ? "interrupt" : "polling"); + + if (kp->use_irq) + __pm_stay_awake(&kp->wake_src); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + + return 0; + } + + err = 0; + kp = *data; + + if (kp->use_irq) + for (i = mi->noutputs - 1; i >= 0; i--) + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); + + hrtimer_cancel(&kp->timer); + wakeup_source_trash(&kp->wake_src); + for (i = mi->noutputs - 1; i >= 0; i--) { +err_gpio_direction_input_failed: + gpio_free(mi->input_gpios[i]); +err_request_input_gpio_failed: + ; + } + for (i = mi->noutputs - 1; i >= 0; i--) { +err_output_gpio_configure_failed: + gpio_free(mi->output_gpios[i]); +err_request_output_gpio_failed: + ; + } +err_bad_keymap: + kfree(kp); +err_kp_alloc_failed: +err_invalid_platform_data: + return err; +} diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c new file mode 100644 index 000000000000..2aac2fad0a17 --- /dev/null +++ b/drivers/input/misc/gpio_output.c @@ -0,0 +1,97 @@ +/* drivers/input/misc/gpio_output.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +int gpio_event_output_event( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, unsigned int dev, unsigned int type, + unsigned int code, int value) +{ + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + if (type != oi->type) + return 0; + if (!(oi->flags & GPIOEDF_ACTIVE_HIGH)) + value = !value; + for (i = 0; i < oi->keymap_size; i++) + if (dev == oi->keymap[i].dev && code == oi->keymap[i].code) + gpio_set_value(oi->keymap[i].gpio, value); + return 0; +} + +int gpio_event_output_func( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, int func) +{ + int ret; + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) + return 0; + + if (func == GPIO_EVENT_FUNC_INIT) { + int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH); + + for (i = 0; i < oi->keymap_size; i++) { + int dev = oi->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_output_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + oi->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], oi->type, + oi->keymap[i].code); + } + + for (i = 0; i < oi->keymap_size; i++) { + ret = gpio_request(oi->keymap[i].gpio, + "gpio_event_output"); + if (ret) { + pr_err("gpio_event_output_func: gpio_request " + "failed for %d\n", oi->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_output(oi->keymap[i].gpio, + output_level); + if (ret) { + pr_err("gpio_event_output_func: " + "gpio_direction_output failed for %d\n", + oi->keymap[i].gpio); + goto err_gpio_direction_output_failed; + } + } + return 0; + } + + ret = 0; + for (i = oi->keymap_size - 1; i >= 0; i--) { +err_gpio_direction_output_failed: + gpio_free(oi->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + return ret; +} + diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c new file mode 100644 index 000000000000..791f285b0c13 --- /dev/null +++ b/drivers/input/misc/keychord.c @@ -0,0 +1,467 @@ +/* + * drivers/input/misc/keychord.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KEYCHORD_NAME "keychord" +#define BUFFER_SIZE 16 + +MODULE_AUTHOR("Mike Lockwood "); +MODULE_DESCRIPTION("Key chord input driver"); +MODULE_SUPPORTED_DEVICE("keychord"); +MODULE_LICENSE("GPL"); + +#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \ + ((char *)kc + sizeof(struct input_keychord) + \ + kc->count * sizeof(kc->keycodes[0]))) + +struct keychord_device { + struct input_handler input_handler; + int registered; + + /* list of keychords to monitor */ + struct input_keychord *keychords; + int keychord_count; + + /* bitmask of keys contained in our keychords */ + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + /* current state of the keys */ + unsigned long keystate[BITS_TO_LONGS(KEY_CNT)]; + /* number of keys that are currently pressed */ + int key_down; + + /* second input_device_id is needed for null termination */ + struct input_device_id device_ids[2]; + + spinlock_t lock; + wait_queue_head_t waitq; + unsigned char head; + unsigned char tail; + __u16 buff[BUFFER_SIZE]; + /* Bit to serialize writes to this device */ +#define KEYCHORD_BUSY 0x01 + unsigned long flags; + wait_queue_head_t write_waitq; +}; + +static int check_keychord(struct keychord_device *kdev, + struct input_keychord *keychord) +{ + int i; + + if (keychord->count != kdev->key_down) + return 0; + + for (i = 0; i < keychord->count; i++) { + if (!test_bit(keychord->keycodes[i], kdev->keystate)) + return 0; + } + + /* we have a match */ + return 1; +} + +static void keychord_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + struct keychord_device *kdev = handle->private; + struct input_keychord *keychord; + unsigned long flags; + int i, got_chord = 0; + + if (type != EV_KEY || code >= KEY_MAX) + return; + + spin_lock_irqsave(&kdev->lock, flags); + /* do nothing if key state did not change */ + if (!test_bit(code, kdev->keystate) == !value) + goto done; + __change_bit(code, kdev->keystate); + if (value) + kdev->key_down++; + else + kdev->key_down--; + + /* don't notify on key up */ + if (!value) + goto done; + /* ignore this event if it is not one of the keys we are monitoring */ + if (!test_bit(code, kdev->keybit)) + goto done; + + keychord = kdev->keychords; + if (!keychord) + goto done; + + /* check to see if the keyboard state matches any keychords */ + for (i = 0; i < kdev->keychord_count; i++) { + if (check_keychord(kdev, keychord)) { + kdev->buff[kdev->head] = keychord->id; + kdev->head = (kdev->head + 1) % BUFFER_SIZE; + got_chord = 1; + break; + } + /* skip to next keychord */ + keychord = NEXT_KEYCHORD(keychord); + } + +done: + spin_unlock_irqrestore(&kdev->lock, flags); + + if (got_chord) { + pr_info("keychord: got keychord id %d. Any tasks: %d\n", + keychord->id, + !list_empty_careful(&kdev->waitq.head)); + wake_up_interruptible(&kdev->waitq); + } +} + +static int keychord_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i, ret; + struct input_handle *handle; + struct keychord_device *kdev = + container_of(handler, struct keychord_device, input_handler); + + /* + * ignore this input device if it does not contain any keycodes + * that we are monitoring + */ + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = KEYCHORD_NAME; + handle->private = kdev; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + pr_info("keychord: using input dev %s for fevent\n", dev->name); + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keychord_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +/* + * keychord_read is used to read keychord events from the driver + */ +static ssize_t keychord_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct keychord_device *kdev = file->private_data; + __u16 id; + int retval; + unsigned long flags; + + if (count < sizeof(id)) + return -EINVAL; + count = sizeof(id); + + if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + retval = wait_event_interruptible(kdev->waitq, + kdev->head != kdev->tail); + if (retval) + return retval; + + spin_lock_irqsave(&kdev->lock, flags); + /* pop a keychord ID off the queue */ + id = kdev->buff[kdev->tail]; + kdev->tail = (kdev->tail + 1) % BUFFER_SIZE; + spin_unlock_irqrestore(&kdev->lock, flags); + + if (copy_to_user(buffer, &id, count)) + return -EFAULT; + + return count; +} + +/* + * serializes writes on a device. can use mutex_lock_interruptible() + * for this particular use case as well - a matter of preference. + */ +static int +keychord_write_lock(struct keychord_device *kdev) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&kdev->lock, flags); + while (kdev->flags & KEYCHORD_BUSY) { + spin_unlock_irqrestore(&kdev->lock, flags); + ret = wait_event_interruptible(kdev->write_waitq, + ((kdev->flags & KEYCHORD_BUSY) == 0)); + if (ret) + return ret; + spin_lock_irqsave(&kdev->lock, flags); + } + kdev->flags |= KEYCHORD_BUSY; + spin_unlock_irqrestore(&kdev->lock, flags); + return 0; +} + +static void +keychord_write_unlock(struct keychord_device *kdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kdev->lock, flags); + kdev->flags &= ~KEYCHORD_BUSY; + spin_unlock_irqrestore(&kdev->lock, flags); + wake_up_interruptible(&kdev->write_waitq); +} + +/* + * keychord_write is used to configure the driver + */ +static ssize_t keychord_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct keychord_device *kdev = file->private_data; + struct input_keychord *keychords = 0; + struct input_keychord *keychord; + int ret, i, key; + unsigned long flags; + size_t resid = count; + size_t key_bytes; + + if (count < sizeof(struct input_keychord) || count > PAGE_SIZE) + return -EINVAL; + keychords = kzalloc(count, GFP_KERNEL); + if (!keychords) + return -ENOMEM; + + /* read list of keychords from userspace */ + if (copy_from_user(keychords, buffer, count)) { + kfree(keychords); + return -EFAULT; + } + + /* + * Serialize writes to this device to prevent various races. + * 1) writers racing here could do duplicate input_unregister_handler() + * calls, resulting in attempting to unlink a node from a list that + * does not exist. + * 2) writers racing here could do duplicate input_register_handler() calls + * below, resulting in a duplicate insertion of a node into the list. + * 3) a double kfree of keychords can occur (in the event that + * input_register_handler() fails below. + */ + ret = keychord_write_lock(kdev); + if (ret) { + kfree(keychords); + return ret; + } + + /* unregister handler before changing configuration */ + if (kdev->registered) { + input_unregister_handler(&kdev->input_handler); + kdev->registered = 0; + } + + spin_lock_irqsave(&kdev->lock, flags); + /* clear any existing configuration */ + kfree(kdev->keychords); + kdev->keychords = 0; + kdev->keychord_count = 0; + kdev->key_down = 0; + memset(kdev->keybit, 0, sizeof(kdev->keybit)); + memset(kdev->keystate, 0, sizeof(kdev->keystate)); + kdev->head = kdev->tail = 0; + + keychord = keychords; + + while (resid > 0) { + /* Is the entire keychord entry header present ? */ + if (resid < sizeof(struct input_keychord)) { + pr_err("keychord: Insufficient bytes present for header %zu\n", + resid); + goto err_unlock_return; + } + resid -= sizeof(struct input_keychord); + if (keychord->count <= 0) { + pr_err("keychord: invalid keycode count %d\n", + keychord->count); + goto err_unlock_return; + } + key_bytes = keychord->count * sizeof(keychord->keycodes[0]); + /* Do we have all the expected keycodes ? */ + if (resid < key_bytes) { + pr_err("keychord: Insufficient bytes present for keycount %zu\n", + resid); + goto err_unlock_return; + } + resid -= key_bytes; + + if (keychord->version != KEYCHORD_VERSION) { + pr_err("keychord: unsupported version %d\n", + keychord->version); + goto err_unlock_return; + } + + /* keep track of the keys we are monitoring in keybit */ + for (i = 0; i < keychord->count; i++) { + key = keychord->keycodes[i]; + if (key < 0 || key >= KEY_CNT) { + pr_err("keychord: keycode %d out of range\n", + key); + goto err_unlock_return; + } + __set_bit(key, kdev->keybit); + } + + kdev->keychord_count++; + keychord = NEXT_KEYCHORD(keychord); + } + + kdev->keychords = keychords; + spin_unlock_irqrestore(&kdev->lock, flags); + + ret = input_register_handler(&kdev->input_handler); + if (ret) { + kfree(keychords); + kdev->keychords = 0; + keychord_write_unlock(kdev); + return ret; + } + kdev->registered = 1; + + keychord_write_unlock(kdev); + + return count; + +err_unlock_return: + spin_unlock_irqrestore(&kdev->lock, flags); + kfree(keychords); + keychord_write_unlock(kdev); + return -EINVAL; +} + +static unsigned int keychord_poll(struct file *file, poll_table *wait) +{ + struct keychord_device *kdev = file->private_data; + + poll_wait(file, &kdev->waitq, wait); + + if (kdev->head != kdev->tail) + return POLLIN | POLLRDNORM; + + return 0; +} + +static int keychord_open(struct inode *inode, struct file *file) +{ + struct keychord_device *kdev; + + kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL); + if (!kdev) + return -ENOMEM; + + spin_lock_init(&kdev->lock); + init_waitqueue_head(&kdev->waitq); + init_waitqueue_head(&kdev->write_waitq); + + kdev->input_handler.event = keychord_event; + kdev->input_handler.connect = keychord_connect; + kdev->input_handler.disconnect = keychord_disconnect; + kdev->input_handler.name = KEYCHORD_NAME; + kdev->input_handler.id_table = kdev->device_ids; + + kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT; + __set_bit(EV_KEY, kdev->device_ids[0].evbit); + + file->private_data = kdev; + + return 0; +} + +static int keychord_release(struct inode *inode, struct file *file) +{ + struct keychord_device *kdev = file->private_data; + + if (kdev->registered) + input_unregister_handler(&kdev->input_handler); + kfree(kdev->keychords); + kfree(kdev); + + return 0; +} + +static const struct file_operations keychord_fops = { + .owner = THIS_MODULE, + .open = keychord_open, + .release = keychord_release, + .read = keychord_read, + .write = keychord_write, + .poll = keychord_poll, +}; + +static struct miscdevice keychord_misc = { + .fops = &keychord_fops, + .name = KEYCHORD_NAME, + .minor = MISC_DYNAMIC_MINOR, +}; + +static int __init keychord_init(void) +{ + return misc_register(&keychord_misc); +} + +static void __exit keychord_exit(void) +{ + misc_deregister(&keychord_misc); +} + +module_init(keychord_init); +module_exit(keychord_exit); diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index 6c51d404874b..c37aea9ac272 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops, twl4030_vibra_suspend, twl4030_vibra_resume); static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, - struct device_node *node) + struct device_node *parent) { + struct device_node *node; + if (pdata && pdata->coexist) return true; - node = of_find_node_by_name(node, "codec"); + node = of_get_child_by_name(parent, "codec"); if (node) { of_node_put(node); return true; diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 5690eb7ff954..15e0d352c4cc 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c @@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev) int vddvibr_uV = 0; int error; - of_node_get(twl6040_core_dev->of_node); - twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node, + twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node, "vibra"); if (!twl6040_core_node) { dev_err(&pdev->dev, "parent of node is missing?\n"); diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 6bf56bb5f8d9..d91f3b1c5375 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev, 0, width, 0, 0); input_set_abs_params(mtouch, ABS_MT_POSITION_Y, 0, height, 0, 0); - input_set_abs_params(mtouch, ABS_MT_PRESSURE, - 0, 255, 0, 0); ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); if (ret) { diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 850b00e3ad8e..9a234da8cac2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_MULTI: if (priv->flags & ALPS_BUTTONPAD) { if (IS_SS4PLUS_DEV(priv->dev_id)) { - f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); - f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL; } else { f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX_BL; } + no_data_y = SS4_MFPACKET_NO_AY_BL; f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); - no_data_x = SS4_MFPACKET_NO_AX_BL; - no_data_y = SS4_MFPACKET_NO_AY_BL; } else { if (IS_SS4PLUS_DEV(priv->dev_id)) { - f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); - f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1); + no_data_x = SS4_PLUS_MFPACKET_NO_AX; } else { - f->mt[0].x = SS4_STD_MF_X_V2(p, 0); - f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + f->mt[2].x = SS4_STD_MF_X_V2(p, 0); + f->mt[3].x = SS4_STD_MF_X_V2(p, 1); + no_data_x = SS4_MFPACKET_NO_AX; } + no_data_y = SS4_MFPACKET_NO_AY; + f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); - no_data_x = SS4_MFPACKET_NO_AX; - no_data_y = SS4_MFPACKET_NO_AY; } f->first_mp = 0; @@ -2541,13 +2544,31 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], } static int alps_update_dual_info_ss4_v2(unsigned char otp[][4], - struct alps_data *priv) + struct alps_data *priv, + struct psmouse *psmouse) { bool is_dual = false; + int reg_val = 0; + struct ps2dev *ps2dev = &psmouse->ps2dev; - if (IS_SS4PLUS_DEV(priv->dev_id)) + if (IS_SS4PLUS_DEV(priv->dev_id)) { is_dual = (otp[0][0] >> 4) & 0x01; + if (!is_dual) { + /* For support TrackStick of Thinkpad L/E series */ + if (alps_exit_command_mode(psmouse) == 0 && + alps_enter_command_mode(psmouse) == 0) { + reg_val = alps_command_mode_read_reg(psmouse, + 0xD7); + } + alps_exit_command_mode(psmouse); + ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); + + if (reg_val == 0x0C || reg_val == 0x1D) + is_dual = true; + } + } + if (is_dual) priv->flags |= ALPS_DUALPOINT | ALPS_DUALPOINT_WITH_PRESSURE; @@ -2570,7 +2591,7 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, alps_update_btn_info_ss4_v2(otp, priv); - alps_update_dual_info_ss4_v2(otp, priv); + alps_update_dual_info_ss4_v2(otp, priv, psmouse); return 0; } diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index c80a7c76cb76..79b6d69d1486 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -141,10 +141,12 @@ enum SS4_PACKET_ID { #define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F) -#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ -#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ -#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */ -#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */ +#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */ +#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */ +#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */ +#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */ +#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */ +#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */ /* * enum V7_PACKET_ID - defines the packet type for V7 diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 599544c1a91c..243e0fa6e3e3 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -27,6 +27,8 @@ #define ETP_DISABLE_POWER 0x0001 #define ETP_PRESSURE_OFFSET 25 +#define ETP_CALIBRATE_MAX_LEN 3 + /* IAP Firmware handling */ #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index d6135900da64..696e540304fd 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -610,7 +610,7 @@ static ssize_t calibrate_store(struct device *dev, int tries = 20; int retval; int error; - u8 val[3]; + u8 val[ETP_CALIBRATE_MAX_LEN]; retval = mutex_lock_interruptible(&data->sysfs_mutex); if (retval) @@ -1260,6 +1260,10 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN060B", 0 }, { "ELAN060C", 0 }, { "ELAN0611", 0 }, + { "ELAN0612", 0 }, + { "ELAN0618", 0 }, + { "ELAN061D", 0 }, + { "ELAN0622", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index 29f99529b187..c060d270bc4d 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -56,7 +56,7 @@ static int elan_smbus_initialize(struct i2c_client *client) { u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 }; - u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 }; + u8 values[I2C_SMBUS_BLOCK_MAX] = {0}; int len, error; /* Get hello packet */ @@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client) static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val) { int error; + u8 buf[I2C_SMBUS_BLOCK_MAX] = {0}; + + BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf)); error = i2c_smbus_read_block_data(client, - ETP_SMBUS_CALIBRATE_QUERY, val); + ETP_SMBUS_CALIBRATE_QUERY, buf); if (error < 0) return error; + memcpy(val, buf, ETP_CALIBRATE_MAX_LEN); return 0; } @@ -130,7 +134,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client, bool max_baseline, u8 *value) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, max_baseline ? @@ -149,7 +153,7 @@ static int elan_smbus_get_version(struct i2c_client *client, bool iap, u8 *version) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, iap ? ETP_SMBUS_IAP_VERSION_CMD : @@ -170,7 +174,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, u8 *clickpad) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_SM_VERSION_CMD, val); @@ -188,7 +192,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_UNIQUEID_CMD, val); @@ -205,7 +209,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client, bool iap, u16 *csum) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, iap ? ETP_SMBUS_FW_CHECKSUM_CMD : @@ -226,7 +230,7 @@ static int elan_smbus_get_max(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); if (ret != 3) { @@ -246,7 +250,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); if (ret != 3) { @@ -267,7 +271,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); if (ret != 3) { @@ -294,7 +298,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client, { int error; u16 constant; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); if (error < 0) { @@ -345,7 +349,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client) int len; int error; enum tp_mode mode; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; u16 password; @@ -419,7 +423,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, struct device *dev = &client->dev; int error; u16 result; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; /* * Due to the limitation of smbus protocol limiting @@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report) { int len; + BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN); + len = i2c_smbus_read_block_data(client, ETP_SMBUS_PACKET_QUERY, &report[ETP_SMBUS_REPORT_OFFSET]); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index b84cd978fce2..a250f433eb96 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) else if (ic_version == 7 && etd->samples[1] == 0x2A) sanity_check = ((packet[3] & 0x1c) == 0x10); else - sanity_check = ((packet[0] & 0x0c) == 0x04 && + sanity_check = ((packet[0] & 0x08) == 0x00 && (packet[3] & 0x1c) == 0x10); if (!sanity_check) @@ -1177,6 +1177,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { { } }; +static const char * const middle_button_pnp_ids[] = { + "LEN2131", /* ThinkPad P52 w/ NFC */ + "LEN2132", /* ThinkPad P52 */ + NULL +}; + /* * Set the appropriate event bits for the input subsystem */ @@ -1196,7 +1202,8 @@ static int elantech_set_input_params(struct psmouse *psmouse) __clear_bit(EV_REL, dev->evbit); __set_bit(BTN_LEFT, dev->keybit); - if (dmi_check_system(elantech_dmi_has_middle_button)) + if (dmi_check_system(elantech_dmi_has_middle_button) || + psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids)) __set_bit(BTN_MIDDLE, dev->keybit); __set_bit(BTN_RIGHT, dev->keybit); @@ -1613,7 +1620,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 5: etd->hw_version = 3; break; - case 6 ... 14: + case 6 ... 15: etd->hw_version = 4; break; default: diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 6a5649e52eed..8ac9e03c05b4 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -975,6 +975,21 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) psmouse->pt_deactivate = NULL; } +static bool psmouse_do_detect(int (*detect)(struct psmouse *, bool), + struct psmouse *psmouse, bool allow_passthrough, + bool set_properties) +{ + if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU && + !allow_passthrough) { + return false; + } + + if (set_properties) + psmouse_apply_defaults(psmouse); + + return detect(psmouse, set_properties) == 0; +} + static bool psmouse_try_protocol(struct psmouse *psmouse, enum psmouse_type type, unsigned int *max_proto, @@ -986,15 +1001,8 @@ static bool psmouse_try_protocol(struct psmouse *psmouse, if (!proto) return false; - if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU && - !proto->try_passthru) { - return false; - } - - if (set_properties) - psmouse_apply_defaults(psmouse); - - if (proto->detect(psmouse, set_properties) != 0) + if (!psmouse_do_detect(proto->detect, psmouse, proto->try_passthru, + set_properties)) return false; if (set_properties && proto->init && init_allowed) { @@ -1027,8 +1035,8 @@ static int psmouse_extensions(struct psmouse *psmouse, * Always check for focaltech, this is safe as it uses pnp-id * matching. */ - if (psmouse_try_protocol(psmouse, PSMOUSE_FOCALTECH, - &max_proto, set_properties, false)) { + if (psmouse_do_detect(focaltech_detect, + psmouse, false, set_properties)) { if (max_proto > PSMOUSE_IMEX && IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH) && (!set_properties || focaltech_init(psmouse) == 0)) { @@ -1074,8 +1082,8 @@ static int psmouse_extensions(struct psmouse *psmouse, * probing for IntelliMouse. */ if (max_proto > PSMOUSE_PS2 && - psmouse_try_protocol(psmouse, PSMOUSE_SYNAPTICS, &max_proto, - set_properties, false)) { + psmouse_do_detect(synaptics_detect, + psmouse, false, set_properties)) { synaptics_hardware = true; if (max_proto > PSMOUSE_IMEX) { diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ee5466a374bf..6c4bbd38700e 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = { "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN004a", /* W541 */ + "LEN0071", /* T480 */ + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ + "LEN0073", /* X1 Carbon G5 (Elantech) */ + "LEN0092", /* X1 Carbon 6 */ + "LEN0096", /* X280 */ + "LEN0097", /* X280 -> ALPS trackpoint */ "LEN200f", /* T450s */ NULL }; @@ -1280,6 +1286,16 @@ static void set_input_params(struct psmouse *psmouse, INPUT_MT_POINTER | (cr48_profile_sensor ? INPUT_MT_TRACK : INPUT_MT_SEMI_MT)); + + /* + * For semi-mt devices we send ABS_X/Y ourselves instead of + * input_mt_report_pointer_emulation. But + * input_mt_init_slots() resets the fuzz to 0, leading to a + * filtered ABS_MT_POSITION_X but an unfiltered ABS_X + * position. Let's re-initialize ABS_X/Y here. + */ + if (!cr48_profile_sensor) + set_abs_position_params(dev, &priv->info, ABS_X, ABS_Y); } if (SYN_CAP_PALMDETECT(info->capabilities)) diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 0871010f18d5..bbd29220dbe9 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -19,6 +19,13 @@ #include "psmouse.h" #include "trackpoint.h" +static const char * const trackpoint_variants[] = { + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", +}; + /* * Power-on Reset: Resets all trackpoint parameters, including RAM values, * to defaults. @@ -26,7 +33,7 @@ */ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) { - unsigned char results[2]; + u8 results[2]; int tries = 0; /* Issue POR command, and repeat up to once if 0xFC00 received */ @@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) /* Check for success response -- 0xAA00 */ if (results[0] != 0xAA || results[1] != 0x00) - return -1; + return -ENODEV; return 0; } @@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev) /* * Device IO: read, write and toggle bit */ -static int trackpoint_read(struct ps2dev *ps2dev, - unsigned char loc, unsigned char *results) +static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results) { if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) { @@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev, return 0; } -static int trackpoint_write(struct ps2dev *ps2dev, - unsigned char loc, unsigned char val) +static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val) { if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) || ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) || @@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev, return 0; } -static int trackpoint_toggle_bit(struct ps2dev *ps2dev, - unsigned char loc, unsigned char mask) +static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask) { /* Bad things will happen if the loc param isn't in this range */ if (loc < 0x20 || loc >= 0x2F) @@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev, return 0; } -static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, - unsigned char mask, unsigned char value) +static int trackpoint_update_bit(struct ps2dev *ps2dev, + u8 loc, u8 mask, u8 value) { int retval = 0; - unsigned char data; + u8 data; trackpoint_read(ps2dev, loc, &data); if (((data & mask) == mask) != !!value) @@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc, */ struct trackpoint_attr_data { size_t field_offset; - unsigned char command; - unsigned char mask; - unsigned char inverted; - unsigned char power_on_default; + u8 command; + u8 mask; + bool inverted; + u8 power_on_default; }; -static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf) +static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, + void *data, char *buf) { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset); + u8 value = *(u8 *)((void *)tp + attr->field_offset); if (attr->inverted) value = !value; @@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data, { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); - unsigned char value; + u8 *field = (void *)tp + attr->field_offset; + u8 value; int err; err = kstrtou8(buf, 10, &value); @@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data, { struct trackpoint_data *tp = psmouse->private; struct trackpoint_attr_data *attr = data; - unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset); - unsigned int value; + bool *field = (void *)tp + attr->field_offset; + bool value; int err; - err = kstrtouint(buf, 10, &value); + err = kstrtobool(buf, &value); if (err) return err; - if (value > 1) - return -EINVAL; - if (attr->inverted) value = !value; @@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \ &trackpoint_attr_##_name, \ trackpoint_show_int_attr, trackpoint_set_bit_attr) -#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \ -do { \ - struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \ - \ - trackpoint_update_bit(&_psmouse->ps2dev, \ - _attr->command, _attr->mask, _tp->_name); \ -} while (0) - -#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \ -do { \ - if (!_power_on || \ - _tp->_name != trackpoint_attr_##_name.power_on_default) { \ - if (!trackpoint_attr_##_name.mask) \ - trackpoint_write(&_psmouse->ps2dev, \ - trackpoint_attr_##_name.command, \ - _tp->_name); \ - else \ - TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \ - } \ -} while (0) - -#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \ - (_tp->_name = trackpoint_attr_##_name.power_on_default) - TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS); TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED); TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA); @@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME); TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME); -TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, +TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false, TP_DEF_PTSON); -TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0, +TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false, TP_DEF_SKIPBACK); -TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1, +TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true, TP_DEF_EXT_DEV); +static bool trackpoint_is_attr_available(struct psmouse *psmouse, + struct attribute *attr) +{ + struct trackpoint_data *tp = psmouse->private; + + return tp->variant_id == TP_VARIANT_IBM || + attr == &psmouse_attr_sensitivity.dattr.attr || + attr == &psmouse_attr_press_to_select.dattr.attr; +} + +static umode_t trackpoint_is_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct serio *serio = to_serio_port(dev); + struct psmouse *psmouse = serio_get_drvdata(serio); + + return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0; +} + static struct attribute *trackpoint_attrs[] = { &psmouse_attr_sensitivity.dattr.attr, &psmouse_attr_speed.dattr.attr, @@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = { }; static struct attribute_group trackpoint_attr_group = { - .attrs = trackpoint_attrs, + .is_visible = trackpoint_is_attr_visible, + .attrs = trackpoint_attrs, }; -static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id) -{ - unsigned char param[2] = { 0 }; +#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \ +do { \ + struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \ + \ + if ((!_power_on || _tp->_name != _attr->power_on_default) && \ + trackpoint_is_attr_available(_psmouse, \ + &psmouse_attr_##_name.dattr.attr)) { \ + if (!_attr->mask) \ + trackpoint_write(&_psmouse->ps2dev, \ + _attr->command, _tp->_name); \ + else \ + trackpoint_update_bit(&_psmouse->ps2dev, \ + _attr->command, _attr->mask, \ + _tp->_name); \ + } \ +} while (0) - if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) - return -1; +#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \ +do { \ + _tp->_name = trackpoint_attr_##_name.power_on_default; \ +} while (0) - /* add new TP ID. */ - if (!(param[0] & TP_MAGIC_IDENT)) - return -1; +static int trackpoint_start_protocol(struct psmouse *psmouse, + u8 *variant_id, u8 *firmware_id) +{ + u8 param[2] = { 0 }; + int error; - if (firmware_id) - *firmware_id = param[1]; + error = ps2_command(&psmouse->ps2dev, + param, MAKE_PS2_CMD(0, 2, TP_READ_ID)); + if (error) + return error; + + switch (param[0]) { + case TP_VARIANT_IBM: + case TP_VARIANT_ALPS: + case TP_VARIANT_ELAN: + case TP_VARIANT_NXP: + if (variant_id) + *variant_id = param[0]; + if (firmware_id) + *firmware_id = param[1]; + return 0; + } - return 0; + return -ENODEV; } /* @@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state) { struct trackpoint_data *tp = psmouse->private; - if (!in_power_on_state) { + if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) { /* * Disable features that may make device unusable * with this driver. @@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp) static void trackpoint_disconnect(struct psmouse *psmouse) { - sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group); + device_remove_group(&psmouse->ps2dev.serio->dev, + &trackpoint_attr_group); kfree(psmouse->private); psmouse->private = NULL; @@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse) static int trackpoint_reconnect(struct psmouse *psmouse) { - int reset_fail; + struct trackpoint_data *tp = psmouse->private; + int error; + bool was_reset; - if (trackpoint_start_protocol(psmouse, NULL)) - return -1; + error = trackpoint_start_protocol(psmouse, NULL, NULL); + if (error) + return error; - reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev); - if (trackpoint_sync(psmouse, !reset_fail)) - return -1; + was_reset = tp->variant_id == TP_VARIANT_IBM && + trackpoint_power_on_reset(&psmouse->ps2dev) == 0; + + error = trackpoint_sync(psmouse, was_reset); + if (error) + return error; return 0; } @@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse) int trackpoint_detect(struct psmouse *psmouse, bool set_properties) { struct ps2dev *ps2dev = &psmouse->ps2dev; - unsigned char firmware_id; - unsigned char button_info; + struct trackpoint_data *tp; + u8 variant_id; + u8 firmware_id; + u8 button_info; int error; - if (trackpoint_start_protocol(psmouse, &firmware_id)) - return -1; + error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id); + if (error) + return error; if (!set_properties) return 0; - if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { - psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); - button_info = 0x33; - } - - psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); - if (!psmouse->private) + tp = kzalloc(sizeof(*tp), GFP_KERNEL); + if (!tp) return -ENOMEM; - psmouse->vendor = "IBM"; + trackpoint_defaults(tp); + tp->variant_id = variant_id; + tp->firmware_id = firmware_id; + + psmouse->private = tp; + + psmouse->vendor = trackpoint_variants[variant_id]; psmouse->name = "TrackPoint"; psmouse->reconnect = trackpoint_reconnect; psmouse->disconnect = trackpoint_disconnect; + if (variant_id != TP_VARIANT_IBM) { + /* Newer variants do not support extended button query. */ + button_info = 0x33; + } else { + error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info); + if (error) { + psmouse_warn(psmouse, + "failed to get extended button data, assuming 3 buttons\n"); + button_info = 0x33; + } else if (!button_info) { + psmouse_warn(psmouse, + "got 0 in extended button data, assuming 3 buttons\n"); + button_info = 0x33; + } + } + if ((button_info & 0x0f) >= 3) - __set_bit(BTN_MIDDLE, psmouse->dev->keybit); + input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE); __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); - trackpoint_defaults(psmouse->private); - - error = trackpoint_power_on_reset(ps2dev); - - /* Write defaults to TP only if reset fails. */ - if (error) + if (variant_id != TP_VARIANT_IBM || + trackpoint_power_on_reset(ps2dev) != 0) { + /* + * Write defaults to TP if we did not reset the trackpoint. + */ trackpoint_sync(psmouse, false); + } - error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group); + error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group); if (error) { psmouse_err(psmouse, "failed to create sysfs attributes, error: %d\n", @@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) } psmouse_info(psmouse, - "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n", - firmware_id, + "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n", + psmouse->vendor, firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f); return 0; diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 88055755f82e..10a039148234 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -21,10 +21,16 @@ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ -#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ - /* by the firmware ID */ - /* Firmware ID includes 0x1, 0x2, 0x3 */ +/* + * Valid first byte responses to the "Read Secondary ID" (0xE1) command. + * 0x01 was the original IBM trackpoint, others implement very limited + * subset of trackpoint features. + */ +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 /* * Commands @@ -136,18 +142,20 @@ #define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd)) -struct trackpoint_data -{ - unsigned char sensitivity, speed, inertia, reach; - unsigned char draghys, mindrag; - unsigned char thresh, upthresh; - unsigned char ztime, jenks; - unsigned char drift_time; +struct trackpoint_data { + u8 variant_id; + u8 firmware_id; + + u8 sensitivity, speed, inertia, reach; + u8 draghys, mindrag; + u8 thresh, upthresh; + u8 ztime, jenks; + u8 drift_time; /* toggles */ - unsigned char press_to_select; - unsigned char skipback; - unsigned char ext_dev; + bool press_to_select; + bool skipback; + bool ext_dev; }; #ifdef CONFIG_MOUSE_PS2_TRACKPOINT diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index 0f586780ceb4..1ae5c1ef3f5b 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse) /* * Array of supported hypervisors. */ -static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = { - &x86_hyper_vmware, -#ifdef CONFIG_KVM_GUEST - &x86_hyper_kvm, -#endif +static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = { + X86_HYPER_VMWARE, + X86_HYPER_KVM, }; /** @@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void) int i; for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++) - if (vmmouse_supported_hypervisors[i] == x86_hyper) + if (vmmouse_supported_hypervisors[i] == x86_hyper_type) return true; return false; diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c index 4f2bb5947a4e..f5954981e9ee 100644 --- a/drivers/input/rmi4/rmi_driver.c +++ b/drivers/input/rmi4/rmi_driver.c @@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); + /* Doing it in the reverse order so F01 will be removed last */ + list_for_each_entry_safe_reverse(fn, tmp, + &data->function_list, node) { + list_del(&fn->node); + rmi_unregister_function(fn); + } + devm_kfree(&rmi_dev->dev, data->irq_memory); data->irq_memory = NULL; data->irq_status = NULL; @@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev) data->f01_container = NULL; data->f34_container = NULL; - - /* Doing it in the reverse order so F01 will be removed last */ - list_for_each_entry_safe_reverse(fn, tmp, - &data->function_list, node) { - list_del(&fn->node); - rmi_unregister_function(fn); - } } static int reset_one_function(struct rmi_function *fn) @@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id) rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Failed to process interrupt request: %d\n", ret); - if (count) + if (count) { kfree(attn_data.data); + attn_data.data = NULL; + } if (!kfifo_is_empty(&drvdata->attn_fifo)) return rmi_irq_fn(irq, dev_id); diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c index ad71a5e768dc..7ccbb370a9a8 100644 --- a/drivers/input/rmi4/rmi_f03.c +++ b/drivers/input/rmi4/rmi_f03.c @@ -32,6 +32,7 @@ struct f03_data { struct rmi_function *fn; struct serio *serio; + bool serio_registered; unsigned int overwrite_buttons; @@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03) return 0; } +static int rmi_f03_pt_open(struct serio *serio) +{ + struct f03_data *f03 = serio->port_data; + struct rmi_function *fn = f03->fn; + const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; + u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; + int error; + + /* + * Consume any pending data. Some devices like to spam with + * 0xaa 0x00 announcements which may confuse us as we try to + * probe the device. + */ + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); + if (!error) + rmi_dbg(RMI_DEBUG_FN, &fn->dev, + "%s: Consumed %*ph (%d) from PS2 guest\n", + __func__, ob_len, obs, ob_len); + + return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); +} + +static void rmi_f03_pt_close(struct serio *serio) +{ + struct f03_data *f03 = serio->port_data; + struct rmi_function *fn = f03->fn; + + fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask); +} + static int rmi_f03_register_pt(struct f03_data *f03) { struct serio *serio; @@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03) serio->id.type = SERIO_PS_PSTHRU; serio->write = rmi_f03_pt_write; + serio->open = rmi_f03_pt_open; + serio->close = rmi_f03_pt_close; serio->port_data = f03; strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through", @@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn) f03->device_count); dev_set_drvdata(dev, f03); - - error = rmi_f03_register_pt(f03); - if (error) - return error; - return 0; } static int rmi_f03_config(struct rmi_function *fn) { - fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); + struct f03_data *f03 = dev_get_drvdata(&fn->dev); + int error; + + if (!f03->serio_registered) { + error = rmi_f03_register_pt(f03); + if (error) + return error; + + f03->serio_registered = true; + } else { + /* + * We must be re-configuring the sensor, just enable + * interrupts for this function. + */ + fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask); + } return 0; } @@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) struct rmi_device *rmi_dev = fn->rmi_dev; struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); struct f03_data *f03 = dev_get_drvdata(&fn->dev); - u16 data_addr = fn->fd.data_base_addr; + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET; const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE; u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE]; u8 ob_status; @@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) drvdata->attn_data.size -= ob_len; } else { /* Grab all of the data registers, and check them for data */ - error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET, - &obs, ob_len); + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len); if (error) { dev_err(&fn->dev, "%s: Failed to read F03 output buffers: %d\n", @@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn) { struct f03_data *f03 = dev_get_drvdata(&fn->dev); - serio_unregister_port(f03->serio); + if (f03->serio_registered) + serio_unregister_port(f03->serio); } struct rmi_function_handler rmi_f03_handler = { diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index d97a85907ed6..d0c3d275bf9f 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi, if (len > RMI_SPI_XFER_SIZE_LIMIT) return -EINVAL; - if (rmi_spi->xfer_buf_size < len) - rmi_spi_manage_pools(rmi_spi, len); + if (rmi_spi->xfer_buf_size < len) { + ret = rmi_spi_manage_pools(rmi_spi, len); + if (ret < 0) + return ret; + } if (addr == 0) /* diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 6cbbdc6e9687..136f6e7bf797 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -527,6 +527,27 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), }, }, + { + /* Lenovo LaVie Z */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), + }, + }, + { } +}; + +static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { + { + /* + * Sony Vaio VGN-CS series require MUX or the touch sensor + * buttons will disturb touchpad operation + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + }, + }, { } }; @@ -620,6 +641,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "20046"), }, }, + { + /* Lenovo ThinkPad L460 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), + }, + }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { @@ -1163,6 +1191,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = true; + if (dmi_check_system(i8042_dmi_forcemux_table)) + i8042_nomux = false; + if (dmi_check_system(i8042_dmi_notimeout_table)) i8042_notimeout = true; diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c index 7ed828a51f4c..3486d9403805 100644 --- a/drivers/input/touchscreen/88pm860x-ts.c +++ b/drivers/input/touchscreen/88pm860x-ts.c @@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, int data, n, ret; if (!np) return -ENODEV; - np = of_find_node_by_name(np, "touch"); + np = of_get_child_by_name(np, "touch"); if (!np) { dev_err(&pdev->dev, "Can't find touch node\n"); return -EINVAL; @@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } /* set tsi prebias time */ if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) { ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } /* set prebias & prechg time of pen detect */ data = 0; @@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev, if (data) { ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data); if (ret < 0) - return -EINVAL; + goto err_put_node; } of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x); + + of_node_put(np); + return 0; + +err_put_node: + of_node_put(np); + + return -EINVAL; } #else #define pm860x_touch_dt_init(x, y, z) (-1) diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 7659bc48f1db..fc149ea64be7 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -275,7 +275,8 @@ struct mxt_data { char phys[64]; /* device physical location */ const struct mxt_platform_data *pdata; struct mxt_object *object_table; - struct mxt_info info; + struct mxt_info *info; + void *raw_info_block; unsigned int irq; unsigned int max_x; unsigned int max_y; @@ -450,12 +451,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) { u8 appmode = data->client->addr; u8 bootloader; + u8 family_id = data->info ? data->info->family_id : 0; switch (appmode) { case 0x4a: case 0x4b: /* Chips after 1664S use different scheme */ - if (retry || data->info.family_id >= 0xa2) { + if (retry || family_id >= 0xa2) { bootloader = appmode - 0x24; break; } @@ -682,7 +684,7 @@ mxt_get_object(struct mxt_data *data, u8 type) struct mxt_object *object; int i; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (object->type == type) return object; @@ -1453,12 +1455,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) data_pos += offset; } - if (cfg_info.family_id != data->info.family_id) { + if (cfg_info.family_id != data->info->family_id) { dev_err(dev, "Family ID mismatch!\n"); return -EINVAL; } - if (cfg_info.variant_id != data->info.variant_id) { + if (cfg_info.variant_id != data->info->variant_id) { dev_err(dev, "Variant ID mismatch!\n"); return -EINVAL; } @@ -1503,7 +1505,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) /* Malloc memory to store configuration */ cfg_start_ofs = MXT_OBJECT_START + - data->info.object_num * sizeof(struct mxt_object) + + data->info->object_num * sizeof(struct mxt_object) + MXT_INFO_CHECKSUM_SIZE; config_mem_size = data->mem_size - cfg_start_ofs; config_mem = kzalloc(config_mem_size, GFP_KERNEL); @@ -1554,20 +1556,6 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) return ret; } -static int mxt_get_info(struct mxt_data *data) -{ - struct i2c_client *client = data->client; - struct mxt_info *info = &data->info; - int error; - - /* Read 7-byte info block starting at address 0 */ - error = __mxt_read_reg(client, 0, sizeof(*info), info); - if (error) - return error; - - return 0; -} - static void mxt_free_input_device(struct mxt_data *data) { if (data->input_dev) { @@ -1582,9 +1570,10 @@ static void mxt_free_object_table(struct mxt_data *data) video_unregister_device(&data->dbg.vdev); v4l2_device_unregister(&data->dbg.v4l2); #endif - - kfree(data->object_table); data->object_table = NULL; + data->info = NULL; + kfree(data->raw_info_block); + data->raw_info_block = NULL; kfree(data->msg_buf); data->msg_buf = NULL; data->T5_address = 0; @@ -1600,34 +1589,18 @@ static void mxt_free_object_table(struct mxt_data *data) data->max_reportid = 0; } -static int mxt_get_object_table(struct mxt_data *data) +static int mxt_parse_object_table(struct mxt_data *data, + struct mxt_object *object_table) { struct i2c_client *client = data->client; - size_t table_size; - struct mxt_object *object_table; - int error; int i; u8 reportid; u16 end_address; - table_size = data->info.object_num * sizeof(struct mxt_object); - object_table = kzalloc(table_size, GFP_KERNEL); - if (!object_table) { - dev_err(&data->client->dev, "Failed to allocate memory\n"); - return -ENOMEM; - } - - error = __mxt_read_reg(client, MXT_OBJECT_START, table_size, - object_table); - if (error) { - kfree(object_table); - return error; - } - /* Valid Report IDs start counting from 1 */ reportid = 1; data->mem_size = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { struct mxt_object *object = object_table + i; u8 min_id, max_id; @@ -1651,8 +1624,8 @@ static int mxt_get_object_table(struct mxt_data *data) switch (object->type) { case MXT_GEN_MESSAGE_T5: - if (data->info.family_id == 0x80 && - data->info.version < 0x20) { + if (data->info->family_id == 0x80 && + data->info->version < 0x20) { /* * On mXT224 firmware versions prior to V2.0 * read and discard unused CRC byte otherwise @@ -1707,24 +1680,102 @@ static int mxt_get_object_table(struct mxt_data *data) /* If T44 exists, T5 position has to be directly after */ if (data->T44_address && (data->T5_address != data->T44_address + 1)) { dev_err(&client->dev, "Invalid T44 position\n"); - error = -EINVAL; - goto free_object_table; + return -EINVAL; } data->msg_buf = kcalloc(data->max_reportid, data->T5_msg_size, GFP_KERNEL); - if (!data->msg_buf) { - dev_err(&client->dev, "Failed to allocate message buffer\n"); + if (!data->msg_buf) + return -ENOMEM; + + return 0; +} + +static int mxt_read_info_block(struct mxt_data *data) +{ + struct i2c_client *client = data->client; + int error; + size_t size; + void *id_buf, *buf; + uint8_t num_objects; + u32 calculated_crc; + u8 *crc_ptr; + + /* If info block already allocated, free it */ + if (data->raw_info_block) + mxt_free_object_table(data); + + /* Read 7-byte ID information block starting at address 0 */ + size = sizeof(struct mxt_info); + id_buf = kzalloc(size, GFP_KERNEL); + if (!id_buf) + return -ENOMEM; + + error = __mxt_read_reg(client, 0, size, id_buf); + if (error) + goto err_free_mem; + + /* Resize buffer to give space for rest of info block */ + num_objects = ((struct mxt_info *)id_buf)->object_num; + size += (num_objects * sizeof(struct mxt_object)) + + MXT_INFO_CHECKSUM_SIZE; + + buf = krealloc(id_buf, size, GFP_KERNEL); + if (!buf) { error = -ENOMEM; - goto free_object_table; + goto err_free_mem; + } + id_buf = buf; + + /* Read rest of info block */ + error = __mxt_read_reg(client, MXT_OBJECT_START, + size - MXT_OBJECT_START, + id_buf + MXT_OBJECT_START); + if (error) + goto err_free_mem; + + /* Extract & calculate checksum */ + crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE; + data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16); + + calculated_crc = mxt_calculate_crc(id_buf, 0, + size - MXT_INFO_CHECKSUM_SIZE); + + /* + * CRC mismatch can be caused by data corruption due to I2C comms + * issue or else device is not using Object Based Protocol (eg i2c-hid) + */ + if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) { + dev_err(&client->dev, + "Info Block CRC error calculated=0x%06X read=0x%06X\n", + calculated_crc, data->info_crc); + error = -EIO; + goto err_free_mem; + } + + data->raw_info_block = id_buf; + data->info = (struct mxt_info *)id_buf; + + dev_info(&client->dev, + "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", + data->info->family_id, data->info->variant_id, + data->info->version >> 4, data->info->version & 0xf, + data->info->build, data->info->object_num); + + /* Parse object table information */ + error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START); + if (error) { + dev_err(&client->dev, "Error %d parsing object table\n", error); + mxt_free_object_table(data); + goto err_free_mem; } - data->object_table = object_table; + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START); return 0; -free_object_table: - mxt_free_object_table(data); +err_free_mem: + kfree(id_buf); return error; } @@ -2039,7 +2090,7 @@ static int mxt_initialize(struct mxt_data *data) int error; while (1) { - error = mxt_get_info(data); + error = mxt_read_info_block(data); if (!error) break; @@ -2070,16 +2121,9 @@ static int mxt_initialize(struct mxt_data *data) msleep(MXT_FW_RESET_TIME); } - /* Get object table information */ - error = mxt_get_object_table(data); - if (error) { - dev_err(&client->dev, "Error %d reading object table\n", error); - return error; - } - error = mxt_acquire_irq(data); if (error) - goto err_free_object_table; + return error; error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, &client->dev, GFP_KERNEL, data, @@ -2087,14 +2131,10 @@ static int mxt_initialize(struct mxt_data *data) if (error) { dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", error); - goto err_free_object_table; + return error; } return 0; - -err_free_object_table: - mxt_free_object_table(data); - return error; } static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) @@ -2155,7 +2195,7 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data) static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, unsigned int y) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; unsigned int ofs, page; unsigned int col = 0; @@ -2483,7 +2523,7 @@ static const struct video_device mxt_video_device = { static void mxt_debug_init(struct mxt_data *data) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; struct mxt_object *object; int error; @@ -2569,7 +2609,6 @@ static int mxt_configure_objects(struct mxt_data *data, const struct firmware *cfg) { struct device *dev = &data->client->dev; - struct mxt_info *info = &data->info; int error; error = mxt_init_t7_power_cfg(data); @@ -2594,11 +2633,6 @@ static int mxt_configure_objects(struct mxt_data *data, mxt_debug_init(data); - dev_info(dev, - "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", - info->family_id, info->variant_id, info->version >> 4, - info->version & 0xf, info->build, info->object_num); - return 0; } @@ -2607,7 +2641,7 @@ static ssize_t mxt_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", info->version >> 4, info->version & 0xf, info->build); } @@ -2617,7 +2651,7 @@ static ssize_t mxt_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u\n", info->family_id, info->variant_id); } @@ -2656,7 +2690,7 @@ static ssize_t mxt_object_show(struct device *dev, return -ENOMEM; error = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (!mxt_object_readable(object->type)) @@ -3030,6 +3064,15 @@ static const struct dmi_system_id mxt_dmi_table[] = { }, .driver_data = samus_platform_data, }, + { + /* Samsung Chromebook Pro */ + .ident = "Samsung Chromebook Pro", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"), + }, + .driver_data = samus_platform_data, + }, { /* Other Google Chromebooks */ .ident = "Chromebook", diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index b3bbad7d2282..2bfa89ec552c 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -808,8 +808,10 @@ static int __maybe_unused goodix_suspend(struct device *dev) int error; /* We need gpio pins to suspend/resume */ - if (!ts->gpiod_int || !ts->gpiod_rst) + if (!ts->gpiod_int || !ts->gpiod_rst) { + disable_irq(client->irq); return 0; + } wait_for_completion(&ts->firmware_loading_complete); @@ -849,8 +851,10 @@ static int __maybe_unused goodix_resume(struct device *dev) struct goodix_ts_data *ts = i2c_get_clientdata(client); int error; - if (!ts->gpiod_int || !ts->gpiod_rst) + if (!ts->gpiod_int || !ts->gpiod_rst) { + enable_irq(client->irq); return 0; + } /* * Exit sleep mode by outputting HIGH level to INT pin @@ -884,6 +888,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id); #ifdef CONFIG_ACPI static const struct acpi_device_id goodix_acpi_match[] = { { "GDIX1001", 0 }, + { "GDIX1002", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, goodix_acpi_match); diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 8c6c6178ec12..025bae3853cc 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -687,6 +687,14 @@ static int stmfts_probe(struct i2c_client *client, input_set_drvdata(sdata->input, sdata); + /* + * stmfts_power_on expects interrupt to be disabled, but + * at this point the device is still off and I do not trust + * the status of the irq line that can generate some spurious + * interrupts. To be on the safe side it's better to not enable + * the interrupts during their request. + */ + irq_set_status_flags(client->irq, IRQ_NOAUTOEN); err = devm_request_threaded_irq(&client->dev, client->irq, NULL, stmfts_irq_handler, IRQF_ONESHOT, @@ -694,9 +702,6 @@ static int stmfts_probe(struct i2c_client *client, if (err) return err; - /* stmfts_power_on expects interrupt to be disabled */ - disable_irq(client->irq); - dev_dbg(&client->dev, "initializing ST-Microelectronics FTS...\n"); err = stmfts_power_on(sdata); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 8e8874d23717..10190e361a13 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -311,6 +311,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid) if (dev_data == NULL) { dev_data = alloc_dev_data(devid); + if (!dev_data) + return NULL; if (translation_pre_enabled(iommu)) dev_data->defer_attach = true; @@ -3155,7 +3157,7 @@ static void amd_iommu_apply_resv_region(struct device *dev, unsigned long start, end; start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length); + end = IOVA_PFN(region->start + region->length - 1); WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); } diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e67ba6c40faf..8f7a3c00b6cf 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1611,13 +1611,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1UL << ias) - 1; domain->geometry.force_aperture = true; - smmu_domain->pgtbl_ops = pgtbl_ops; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); - if (ret < 0) + if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); + return ret; + } - return ret; + smmu_domain->pgtbl_ops = pgtbl_ops; + return 0; } static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) @@ -1644,7 +1646,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) { - int i; + int i, j; struct arm_smmu_master_data *master = fwspec->iommu_priv; struct arm_smmu_device *smmu = master->smmu; @@ -1652,6 +1654,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) u32 sid = fwspec->ids[i]; __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + /* Bridged PCI devices may end up with duplicated IDs */ + for (j = 0; j < i; j++) + if (fwspec->ids[j] == sid) + break; + if (j < i) + continue; + arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); } } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 57c920c1372d..e3dbb6101b4a 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1342,7 +1342,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, struct qi_desc desc; if (mask) { - BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); + BUG_ON(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 25c2c75f5332..13485a40dd46 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1344,8 +1344,15 @@ static const struct iommu_ops exynos_iommu_ops = { static int __init exynos_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, sysmmu_of_match); + if (!np) + return 0; + + of_node_put(np); + lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); if (!lv2table_kmem_cache) { diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 6784a05dd6b2..e8414bcf8390 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1603,8 +1603,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, * flush. However, device IOTLB doesn't need to be flushed in this case. */ if (!cap_caching_mode(iommu->cap) || !map) - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), - addr, mask); + iommu_flush_dev_iotlb(domain, addr, mask); } static void iommu_flush_iova(struct iova_domain *iovad) @@ -2254,10 +2253,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, uint64_t tmp; if (!sg_res) { + unsigned int pgoff = sg->offset & ~PAGE_MASK; + sg_res = aligned_nrpages(sg->offset, sg->length); - sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; + sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff; sg->dma_length = sg->length; - pteval = page_to_phys(sg_page(sg)) | prot; + pteval = (sg_phys(sg) - pgoff) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } @@ -3790,7 +3791,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; + sg->dma_address = sg_phys(sg); sg->dma_length = sg->length; } return nelems; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f6697e55c2d4..d7def26ccf79 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -129,6 +129,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); + iommu->pr_irq = 0; goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); @@ -144,9 +145,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); - free_irq(iommu->pr_irq, iommu); - dmar_free_hwirq(iommu->pr_irq); - iommu->pr_irq = 0; + if (iommu->pr_irq) { + free_irq(iommu->pr_irq, iommu); + dmar_free_hwirq(iommu->pr_irq); + iommu->pr_irq = 0; + } free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; @@ -379,6 +382,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ pasid_max - 1, GFP_KERNEL); if (ret < 0) { kfree(svm); + kfree(sdev); goto out; } svm->pasid = ret; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 16d33ac19db0..c30f62700431 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -60,7 +60,7 @@ (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) #define REG_MMU_IVRP_PADDR 0x114 -#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31)) + #define REG_MMU_VLD_PA_RNG 0x118 #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) @@ -532,8 +532,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) F_INT_PRETETCH_TRANSATION_FIFO_FAULT; writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); - writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), - data->base + REG_MMU_IVRP_PADDR); + if (data->m4u_plat == M4U_MT8173) + regval = (data->protect_base >> 1) | (data->enable_4GB << 31); + else + regval = lower_32_bits(data->protect_base) | + upper_32_bits(data->protect_base); + writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); + if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { /* * If 4GB mode is enabled, the validate PA range is from @@ -688,6 +693,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); + reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); clk_disable_unprepare(data->bclk); return 0; } @@ -710,8 +716,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); - writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), - base + REG_MMU_IVRP_PADDR); + writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); if (data->m4u_dom) writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], base + REG_MMU_PT_BASE_ADDR); diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index b4451a1c7c2f..778498b8633f 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg { u32 ctrl_reg; u32 int_control0; u32 int_main_control; + u32 ivrp_paddr; }; enum mtk_iommu_plat { diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index bc1efbfb9ddf..542930cd183d 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = { .probe = mtk_iommu_probe, .remove = mtk_iommu_remove, .driver = { - .name = "mtk-iommu", + .name = "mtk-iommu-v1", .of_match_table = mtk_iommu_of_ids, .pm = &mtk_iommu_pm_ops, } diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 9ae71804b5dd..1c2ca8d51a70 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -21,6 +21,8 @@ #include "irq-gic-common.h" +static DEFINE_RAW_SPINLOCK(irq_controller_lock); + static const struct gic_kvm_info *gic_kvm_info; const struct gic_kvm_info *gic_get_kvm_info(void) @@ -52,11 +54,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type, u32 confoff = (irq / 16) * 4; u32 val, oldval; int ret = 0; + unsigned long flags; /* * Read current configuration register, and insert the config * for "irq", depending on "type". */ + raw_spin_lock_irqsave(&irq_controller_lock, flags); val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); if (type & IRQ_TYPE_LEVEL_MASK) val &= ~confmask; @@ -64,8 +68,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type, val |= confmask; /* If the current configuration is the same, then we are done */ - if (val == oldval) + if (val == oldval) { + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return 0; + } /* * Write back the new configuration, and possibly re-enable @@ -83,6 +89,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type, pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); } + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); if (sync_access) sync_access(); diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 14a8c0a7e095..25a98de5cfb2 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void) for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) continue; diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 833a90fe33ae..8881a053c173 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void) for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) continue; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e88395605e32..2ea39a83737f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1310,7 +1310,7 @@ static struct irq_chip its_irq_chip = { * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. */ #define IRQS_PER_CHUNK_SHIFT 5 -#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) +#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ static unsigned long *lpi_bitmap; @@ -2026,11 +2026,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* - * At least one bit of EventID is being used, hence a minimum - * of two entries. No, the architecture doesn't let you - * express an ITT with a single entry. + * We allocate at least one chunk worth of LPIs bet device, + * and thus that many ITEs. The device may require less though. */ - nr_ites = max(2UL, roundup_pow_of_two(nvecs)); + nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); @@ -2222,7 +2221,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first(cpu_mask); + cpu = cpumask_first_and(cpu_mask, cpu_online_mask); + if (cpu >= nr_cpu_ids) { + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) + return; + + cpu = cpumask_first(cpu_online_mask); + } + its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -3084,6 +3090,8 @@ static int __init its_of_probe(struct device_node *node) for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) { pr_warn("%pOF: no msi-controller property, ITS ignored\n", np); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680..3d7374655587 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -645,7 +645,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); - pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); gic_write_sgi1r(val); } @@ -660,7 +660,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ - smp_wmb(); + wmb(); for_each_cpu(cpu, mask) { unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; @@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) int nr_parts; struct partition_affinity *parts; - parts_node = of_find_node_by_name(gic_node, "ppi-partitions"); + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); if (!parts_node) return; nr_parts = of_get_child_count(parts_node); if (!nr_parts) - return; + goto out_put_node; parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL); if (WARN_ON(!parts)) - return; + goto out_put_node; for_each_child_of_node(parts_node, child_part) { struct partition_affinity *part; @@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs[i] = desc; } + +out_put_node: + of_node_put(parts_node); } static void __init gic_of_setup_kvm_info(struct device_node *node) @@ -1294,6 +1297,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; void __iomem *redist_base; + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + redist_base = ioremap(gicc->gicr_base_address, size); if (!redist_base) return -ENOMEM; @@ -1343,6 +1350,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) return 0; + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + return -ENODEV; } diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index 119f4ef0d421..b7f943f96068 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c @@ -21,6 +21,7 @@ #include #include #include +#include #define MSI_IRQS_PER_MSIR 32 #define MSI_MSIR_OFFSET 4 @@ -94,6 +95,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) if (msi_affinity_flag) msg->data |= cpumask_first(data->common->affinity); + + iommu_dma_map_msi_msg(data->irq, msg); } static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c90976d7e53c..a9f300efce54 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -427,8 +427,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, spin_lock_irqsave(&gic_lock, flags); write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); - gic_clear_pcpu_masks(intr); - set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); irq_data_update_effective_affinity(data, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c index 6aa3ea479214..7f0c0be322e0 100644 --- a/drivers/irqchip/qcom-irq-combiner.c +++ b/drivers/irqchip/qcom-irq-combiner.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc) bit = readl_relaxed(combiner->regs[reg].addr); status = bit & combiner->regs[reg].enabled; - if (!status) + if (bit && !status) pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n", smp_processor_id(), bit, combiner->regs[reg].enabled, @@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev) { struct combiner *combiner; size_t alloc_sz; - u32 nregs; + int nregs; int err; nregs = count_registers(pdev); diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c index 944a7f338099..1b25d8bc153a 100644 --- a/drivers/isdn/hardware/eicon/diva.c +++ b/drivers/isdn/hardware/eicon/diva.c @@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void) ** Receive and process command from user mode utility */ void *diva_xdi_open_adapter(void *os_handle, const void __user *src, - int length, + int length, void *mptr, divas_xdi_copy_from_user_fn_t cp_fn) { - diva_xdi_um_cfg_cmd_t msg; + diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr; diva_os_xdi_adapter_t *a = NULL; diva_os_spin_lock_magic_t old_irql; struct list_head *tmp; @@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src, length, sizeof(diva_xdi_um_cfg_cmd_t))) return NULL; } - if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { + if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) { DBG_ERR(("A: A(?) open, write error")) return NULL; } diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); list_for_each(tmp, &adapter_queue) { a = list_entry(tmp, diva_os_xdi_adapter_t, link); - if (a->controller == (int)msg.adapter) + if (a->controller == (int)msg->adapter) break; a = NULL; } diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); if (!a) { - DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) + DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter)) } return (a); @@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle) int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, - int length, divas_xdi_copy_from_user_fn_t cp_fn) + int length, void *mptr, + divas_xdi_copy_from_user_fn_t cp_fn) { + diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr; diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; void *data; @@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src, return (-2); } - length = (*cp_fn) (os_handle, data, src, length); + if (msg) { + *(diva_xdi_um_cfg_cmd_t *)data = *msg; + length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg), + src + sizeof(*msg), length - sizeof(*msg)); + } else { + length = (*cp_fn) (os_handle, data, src, length); + } if (length > 0) { if ((*(a->interface.cmd_proc)) (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h index b067032093a8..1ad76650fbf9 100644 --- a/drivers/isdn/hardware/eicon/diva.h +++ b/drivers/isdn/hardware/eicon/diva.h @@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst, int max_length, divas_xdi_copy_to_user_fn_t cp_fn); int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, - int length, divas_xdi_copy_from_user_fn_t cp_fn); + int length, void *msg, + divas_xdi_copy_from_user_fn_t cp_fn); void *diva_xdi_open_adapter(void *os_handle, const void __user *src, - int length, + int length, void *msg, divas_xdi_copy_from_user_fn_t cp_fn); void diva_xdi_close_adapter(void *adapter, void *os_handle); diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index b2023e08dcd2..932e98d0d901 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c @@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file) static ssize_t divas_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { + diva_xdi_um_cfg_cmd_t msg; int ret = -EINVAL; if (!file->private_data) { file->private_data = diva_xdi_open_adapter(file, buf, - count, + count, &msg, xdi_copy_from_user); - } - if (!file->private_data) { - return (-ENODEV); + if (!file->private_data) + return (-ENODEV); + ret = diva_xdi_write(file->private_data, file, + buf, count, &msg, xdi_copy_from_user); + } else { + ret = diva_xdi_write(file->private_data, file, + buf, count, NULL, xdi_copy_from_user); } - ret = diva_xdi_write(file->private_data, file, - buf, count, xdi_copy_from_user); switch (ret) { case -1: /* Message should be removed from rx mailbox first */ ret = -EBUSY; @@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf, static ssize_t divas_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { + diva_xdi_um_cfg_cmd_t msg; int ret = -EINVAL; if (!file->private_data) { file->private_data = diva_xdi_open_adapter(file, buf, - count, + count, &msg, xdi_copy_from_user); } if (!file->private_data) { diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index dce6632daae1..ae2b2669af1b 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -156,7 +156,7 @@ _set_debug(struct fritzcard *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct fritzcard *card; diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index d5bdbaf93a1a..1fc290659e94 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -244,7 +244,7 @@ _set_debug(struct inf_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct inf_hw *card; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 6a6d848bd18e..89d9ba8ed535 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -111,7 +111,7 @@ _set_debug(struct tiger_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct tiger_hw *card; diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c index 9815bb4eec9c..1f1446ed8d5f 100644 --- a/drivers/isdn/hardware/mISDN/speedfax.c +++ b/drivers/isdn/hardware/mISDN/speedfax.c @@ -94,7 +94,7 @@ _set_debug(struct sfax_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct sfax_hw *card; diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index d80072fef434..209036a4af3a 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -101,7 +101,7 @@ _set_debug(struct w6692_hw *card) } static int -set_debug(const char *val, struct kernel_param *kp) +set_debug(const char *val, const struct kernel_param *kp) { int ret; struct w6692_hw *card; diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 38a5bb764c7b..598724ffde4e 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1640,13 +1640,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) } else return -EINVAL; case IIOCDBGVAR: - if (arg) { - if (copy_to_user(argp, &dev, sizeof(ulong))) - return -EFAULT; - return 0; - } else - return -EINVAL; - break; + return -EINVAL; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index ef1360445413..9ce6b32f52a1 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -189,6 +189,7 @@ void led_blink_set(struct led_classdev *led_cdev, { del_timer_sync(&led_cdev->blink_timer); + clear_bit(LED_BLINK_SW, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 905729191d3e..78183f90820e 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -61,6 +61,10 @@ #define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */ #define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */ +#define PCA955X_GPIO_INPUT LED_OFF +#define PCA955X_GPIO_HIGH LED_OFF +#define PCA955X_GPIO_LOW LED_FULL + enum pca955x_type { pca9550, pca9551, @@ -329,9 +333,9 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset, struct pca955x_led *led = &pca955x->leds[offset]; if (val) - return pca955x_led_set(&led->led_cdev, LED_FULL); - else - return pca955x_led_set(&led->led_cdev, LED_OFF); + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_HIGH); + + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW); } static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset, @@ -355,8 +359,11 @@ static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset) static int pca955x_gpio_direction_input(struct gpio_chip *gc, unsigned int offset) { - /* To use as input ensure pin is not driven */ - return pca955x_set_value(gc, offset, 0); + struct pca955x *pca955x = gpiochip_get_data(gc); + struct pca955x_led *led = &pca955x->leds[offset]; + + /* To use as input ensure pin is not driven. */ + return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_INPUT); } static int pca955x_gpio_direction_output(struct gpio_chip *gc, diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c index a52674327857..8988ba3b2d65 100644 --- a/drivers/leds/leds-pm8058.c +++ b/drivers/leds/leds-pm8058.c @@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev) if (!led) return -ENOMEM; - led->ledtype = (u32)of_device_get_match_data(&pdev->dev); + led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev); map = dev_get_regmap(pdev->dev.parent, NULL); if (!map) { diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 81501644fb15..3f0ddc0d7393 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -193,7 +193,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE); for (i = off; i < nr_pages + off; i++) { bv = bio->bi_io_vec[i]; - mempool_free(bv.bv_page, pblk->page_pool); + mempool_free(bv.bv_page, pblk->page_bio_pool); } } @@ -205,14 +205,14 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, int i, ret; for (i = 0; i < nr_pages; i++) { - page = mempool_alloc(pblk->page_pool, flags); + page = mempool_alloc(pblk->page_bio_pool, flags); if (!page) goto err; ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); if (ret != PBLK_EXPOSED_PAGE_SIZE) { pr_err("pblk: could not add page to bio\n"); - mempool_free(page, pblk->page_pool); + mempool_free(page, pblk->page_bio_pool); goto err; } } @@ -486,12 +486,14 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) u64 addr; int i; + spin_lock(&line->lock); addr = find_next_zero_bit(line->map_bitmap, pblk->lm.sec_per_line, line->cur_sec); line->cur_sec = addr - nr_secs; for (i = 0; i < nr_secs; i++, line->cur_sec--) WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap)); + spin_unlock(&line->lock); } u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 6090d28f7995..d6bae085e1d2 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -486,10 +486,10 @@ void pblk_gc_should_start(struct pblk *pblk) { struct pblk_gc *gc = &pblk->gc; - if (gc->gc_enabled && !gc->gc_active) + if (gc->gc_enabled && !gc->gc_active) { pblk_gc_start(pblk); - - pblk_gc_kick(pblk); + pblk_gc_kick(pblk); + } } /* @@ -628,7 +628,8 @@ void pblk_gc_exit(struct pblk *pblk) flush_workqueue(gc->gc_reader_wq); flush_workqueue(gc->gc_line_reader_wq); - del_timer(&gc->gc_timer); + gc->gc_enabled = 0; + del_timer_sync(&gc->gc_timer); pblk_gc_stop(pblk, 1); if (gc->gc_ts) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 1b0f61233c21..1b75675ee67b 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -132,7 +132,6 @@ static int pblk_rwb_init(struct pblk *pblk) } /* Minimum pages needed within a lun */ -#define PAGE_POOL_SIZE 16 #define ADDR_POOL_SIZE 64 static int pblk_set_ppaf(struct pblk *pblk) @@ -247,14 +246,16 @@ static int pblk_core_init(struct pblk *pblk) if (pblk_init_global_caches(pblk)) return -ENOMEM; - pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); - if (!pblk->page_pool) + /* internal bios can be at most the sectors signaled by the device. */ + pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev), + 0); + if (!pblk->page_bio_pool) return -ENOMEM; pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE, pblk_blk_ws_cache); if (!pblk->line_ws_pool) - goto free_page_pool; + goto free_page_bio_pool; pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache); if (!pblk->rec_pool) @@ -309,8 +310,8 @@ static int pblk_core_init(struct pblk *pblk) mempool_destroy(pblk->rec_pool); free_blk_ws_pool: mempool_destroy(pblk->line_ws_pool); -free_page_pool: - mempool_destroy(pblk->page_pool); +free_page_bio_pool: + mempool_destroy(pblk->page_bio_pool); return -ENOMEM; } @@ -322,7 +323,7 @@ static void pblk_core_free(struct pblk *pblk) if (pblk->bb_wq) destroy_workqueue(pblk->bb_wq); - mempool_destroy(pblk->page_pool); + mempool_destroy(pblk->page_bio_pool); mempool_destroy(pblk->line_ws_pool); mempool_destroy(pblk->rec_pool); mempool_destroy(pblk->g_rq_pool); @@ -681,8 +682,8 @@ static int pblk_lines_init(struct pblk *pblk) lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long); - lm->high_thrs = lm->sec_per_line / 2; - lm->mid_thrs = lm->sec_per_line / 4; + lm->mid_thrs = lm->sec_per_line / 2; + lm->high_thrs = lm->sec_per_line / 4; lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs; /* Calculate necessary pages for smeta. See comment over struct @@ -923,6 +924,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, pblk->dev = dev; pblk->disk = tdisk; pblk->state = PBLK_STATE_RUNNING; + pblk->gc.gc_enabled = 0; spin_lock_init(&pblk->trans_lock); spin_lock_init(&pblk->lock); @@ -944,6 +946,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, atomic_long_set(&pblk->recov_writes, 0); atomic_long_set(&pblk->recov_writes, 0); atomic_long_set(&pblk->recov_gc_writes, 0); + atomic_long_set(&pblk->recov_gc_reads, 0); #endif atomic_long_set(&pblk->read_failed, 0); diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 9bc32578a766..c0dd17a82170 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -142,10 +142,9 @@ static void clean_wctx(struct pblk_w_ctx *w_ctx) { int flags; -try: flags = READ_ONCE(w_ctx->flags); - if (!(flags & PBLK_SUBMITTED_ENTRY)) - goto try; + WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY), + "pblk: overwriting unsubmitted data\n"); /* Release flags on context. Protect from writes and reads */ smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index d682e89e6493..402c732f0970 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -238,7 +238,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, kunmap_atomic(src_p); kunmap_atomic(dst_p); - mempool_free(src_bv.bv_page, pblk->page_pool); + mempool_free(src_bv.bv_page, pblk->page_bio_pool); hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); } while (hole < nr_secs); @@ -499,7 +499,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, data_len = (*secs_to_gc) * geo->sec_size; bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len, - PBLK_KMALLOC_META, GFP_KERNEL); + PBLK_VMALLOC_META, GFP_KERNEL); if (IS_ERR(bio)) { pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio)); goto err_free_dma; @@ -519,7 +519,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, if (ret) { bio_endio(bio); pr_err("pblk: GC read request failed\n"); - goto err_free_dma; + goto err_free_bio; } if (!wait_for_completion_io_timeout(&wait, @@ -541,10 +541,13 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data, atomic_long_sub(*secs_to_gc, &pblk->inflight_reads); #endif + bio_put(bio); out: nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); return NVM_IO_OK; +err_free_bio: + bio_put(bio); err_free_dma: nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); return NVM_IO_ERR; diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 67e623bd5c2d..053164deb072 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -618,7 +618,7 @@ struct pblk { struct list_head compl_list; - mempool_t *page_pool; + mempool_t *page_bio_pool; mempool_t *line_ws_pool; mempool_t *rec_pool; mempool_t *g_rq_pool; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 910b5b6f96b1..eb65b6e78d57 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause) DBDMA_DO_STOP(rm->dma_regs); return; } - memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1)); - memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2)); + memset(rdma->buf1, 0, sizeof(rdma->buf1)); + memset(rdma->buf2, 0, sizeof(rdma->buf2)); rm->dma_buf_v->mark = 0; diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index ae6146311934..f052a3eb2098 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c @@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan) /* Disable/inactivate ring */ writel_relaxed(0x0, ring->regs + RING_CONTROL); - /* Flush ring with timeout of 1s */ - timeout = 1000; + /* Set ring flush state */ + timeout = 1000; /* timeout of 1s */ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring->regs + RING_CONTROL); do { @@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan) FLUSH_DONE_MASK) break; mdelay(1); - } while (timeout--); + } while (--timeout); + if (!timeout) + dev_err(ring->mbox->dev, + "setting ring%d flush state timedout\n", ring->num); + + /* Clear ring flush state */ + timeout = 1000; /* timeout of 1s */ + writel_relaxed(0x0, ring + RING_CONTROL); + do { + if (!(readl_relaxed(ring + RING_FLUSH_DONE) & + FLUSH_DONE_MASK)) + break; + mdelay(1); + } while (--timeout); + if (!timeout) + dev_err(ring->mbox->dev, + "clearing ring%d flush state timedout\n", ring->num); /* Abort all in-flight requests */ for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 97fb956bb6e0..93f3d4d61fa7 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -30,6 +30,7 @@ #define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \ (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) +static bool mbox_data_ready; static struct dentry *root_debugfs_dir; struct mbox_test_device { @@ -152,16 +153,14 @@ static ssize_t mbox_test_message_write(struct file *filp, static bool mbox_test_message_data_ready(struct mbox_test_device *tdev) { - unsigned char data; + bool data_ready; unsigned long flags; spin_lock_irqsave(&tdev->lock, flags); - data = tdev->rx_buffer[0]; + data_ready = mbox_data_ready; spin_unlock_irqrestore(&tdev->lock, flags); - if (data != '\0') - return true; - return false; + return data_ready; } static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, @@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, *(touser + l) = '\0'; memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN); + mbox_data_ready = false; spin_unlock_irqrestore(&tdev->lock, flags); @@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message) message, MBOX_MAX_MSG_LEN); memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN); } + mbox_data_ready = true; spin_unlock_irqrestore(&tdev->lock, flags); wake_up_interruptible(&tdev->waitq); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 4a249ee86364..d87c4725ed5b 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -460,6 +460,21 @@ config DM_VERITY If unsure, say N. +config DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + bool "Prefetch size 128" + +config DM_VERITY_HASH_PREFETCH_MIN_SIZE + int "Verity hash prefetch minimum size" + depends on DM_VERITY + range 1 4096 + default 128 if DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + default 1 + ---help--- + This sets minimum number of hash blocks to prefetch for dm-verity. + For devices like eMMC, having larger prefetch size like 128 can improve + performance with increased memory consumption for keeping more hashes + in RAM. + config DM_VERITY_FEC bool "Verity forward error correction support" depends on DM_VERITY @@ -540,4 +555,42 @@ config DM_ZONED If unsure, say N. +config DM_ANDROID_VERITY + bool "Android verity target support" + depends on BLK_DEV_DM=y + depends on DM_VERITY=y + depends on X509_CERTIFICATE_PARSER + depends on SYSTEM_TRUSTED_KEYRING + depends on CRYPTO_RSA + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + ---help--- + This device-mapper target is virtually a VERITY target. This + target is setup by reading the metadata contents piggybacked + to the actual data blocks in the block device. The signature + of the metadata contents are verified against the key included + in the system keyring. Upon success, the underlying verity + target is setup. + +config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED + bool "Verity will validate blocks at most once" + depends on DM_VERITY + ---help--- + Default enables at_most_once option for dm-verity + + Verify data blocks only the first time they are read from the + data device, rather than every time. This reduces the overhead + of dm-verity so that it can be used on systems that are memory + and/or CPU constrained. However, it provides a reduced level + of security because only offline tampering of the data device's + content will be detected, not online tampering. + + Hash blocks are still verified each time they are read from the + hash device, since verification of hash blocks is less performance + critical than data blocks, and a hash block will not be verified + any more after all the data blocks it covers have been verified anyway. + + If unsure, say N. endif # MD diff --git a/drivers/md/Makefile b/drivers/md/Makefile index e94b6f9be941..83109ad30a48 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -71,3 +71,7 @@ endif ifeq ($(CONFIG_DM_VERITY_FEC),y) dm-verity-objs += dm-verity-fec.o endif + +ifeq ($(CONFIG_DM_ANDROID_VERITY),y) +dm-verity-objs += dm-android-verity.o +endif diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 08035634795c..8c13a9036d07 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -287,8 +287,10 @@ do { \ break; \ \ mutex_unlock(&(ca)->set->bucket_lock); \ - if (kthread_should_stop()) \ + if (kthread_should_stop()) { \ + set_current_state(TASK_RUNNING); \ return 0; \ + } \ \ schedule(); \ mutex_lock(&(ca)->set->bucket_lock); \ @@ -407,7 +409,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) finish_wait(&ca->set->bucket_wait, &w); out: - wake_up_process(ca->alloc_thread); + if (ca->alloc_thread) + wake_up_process(ca->alloc_thread); trace_bcache_alloc(ca, reserve); @@ -479,7 +482,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, if (b == -1) goto err; - k->ptr[i] = PTR(ca->buckets[b].gen, + k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, bucket_to_sector(c, b), ca->sb.nr_this_dev); @@ -514,15 +517,21 @@ struct open_bucket { /* * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. + * write streams for better cache utilization: first we try to segregate flash + * only volume write streams from cached devices, secondly we look for a bucket + * where the last write to it was sequential with the current write, and + * failing that we look for a bucket that was last used by the same task. * * The ideas is if you've got multiple tasks pulling data into the cache at the * same time, you'll get better cache utilization if you try to segregate their * data and preserve locality. * - * For example, say you've starting Firefox at the same time you're copying a + * For example, dirty sectors of flash only volume is not reclaimable, if their + * dirty sectors mixed with dirty sectors of cached device, such buckets will + * be marked as dirty and won't be reclaimed, though the dirty data of cached + * device have been written back to backend device. + * + * And say you've starting Firefox at the same time you're copying a * bunch of files. Firefox will likely end up being fairly hot and stay in the * cache awhile, but the data you copied might not be; if you wrote all that * data to the same buckets it'd get invalidated at the same time. @@ -539,7 +548,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, struct open_bucket *ret, *ret_task = NULL; list_for_each_entry_reverse(ret, &c->data_buckets, list) - if (!bkey_cmp(&ret->key, search)) + if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != + UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) + continue; + else if (!bkey_cmp(&ret->key, search)) goto found; else if (ret->last_write_point == write_point) ret_task = ret; diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index abd31e847f96..e4a3f692057b 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -906,7 +906,7 @@ void bcache_write_super(struct cache_set *); int bch_flash_dev_create(struct cache_set *c, uint64_t size); -int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); +int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *); void bch_cached_dev_detach(struct cached_dev *); void bch_cached_dev_run(struct cached_dev *); void bcache_device_stop(struct bcache_device *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 658c54b3b07a..89d088cf95d9 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c) c->shrink.scan_objects = bch_mca_scan; c->shrink.seeks = 4; c->shrink.batch = c->btree_pages * 2; - register_shrinker(&c->shrink); + + if (register_shrinker(&c->shrink)) + pr_warn("bcache: %s: could not register shrinker", + __func__); return 0; } @@ -1865,14 +1868,17 @@ void bch_initial_gc_finish(struct cache_set *c) */ for_each_cache(ca, c, i) { for_each_bucket(b, ca) { - if (fifo_full(&ca->free[RESERVE_PRIO])) + if (fifo_full(&ca->free[RESERVE_PRIO]) && + fifo_full(&ca->free[RESERVE_BTREE])) break; if (bch_can_invalidate_bucket(ca, b) && !GC_MARK(b)) { __bch_invalidate_one_bucket(ca, b); - fifo_push(&ca->free[RESERVE_PRIO], - b - ca->buckets); + if (!fifo_push(&ca->free[RESERVE_PRIO], + b - ca->buckets)) + fifo_push(&ca->free[RESERVE_BTREE], + b - ca->buckets); } } } diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 41c238fc3733..f9d391711595 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey return false; for (i = 0; i < KEY_PTRS(l); i++) - if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || + if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) return false; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 02a98ddb592d..03cc0722ae48 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -507,7 +507,7 @@ static void journal_reclaim(struct cache_set *c) continue; ja->cur_idx = next; - k->ptr[n++] = PTR(0, + k->ptr[n++] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 3475d6628e21..5b63afff46d5 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -463,6 +463,7 @@ struct search { unsigned recoverable:1; unsigned write:1; unsigned read_dirty_data:1; + unsigned cache_missed:1; unsigned long start_time; @@ -567,6 +568,7 @@ static void cache_lookup(struct closure *cl) { struct search *s = container_of(cl, struct search, iop.cl); struct bio *bio = &s->bio.bio; + struct cached_dev *dc; int ret; bch_btree_op_init(&s->op, -1); @@ -579,6 +581,27 @@ static void cache_lookup(struct closure *cl) return; } + /* + * We might meet err when searching the btree, If that happens, we will + * get negative ret, in this scenario we should not recover data from + * backing device (when cache device is dirty) because we don't know + * whether bkeys the read request covered are all clean. + * + * And after that happened, s->iop.status is still its initial value + * before we submit s->bio.bio + */ + if (ret < 0) { + BUG_ON(ret == -EINTR); + if (s->d && s->d->c && + !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { + dc = container_of(s->d, struct cached_dev, disk); + if (dc && atomic_read(&dc->has_dirty)) + s->recoverable = false; + } + if (!s->iop.status) + s->iop.status = BLK_STS_IOERR; + } + closure_return(cl); } @@ -628,11 +651,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) static void search_free(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); - bio_complete(s); if (s->iop.bio) bio_put(s->iop.bio); + bio_complete(s); closure_debug_destroy(cl); mempool_free(s, s->d->c->search); } @@ -649,6 +672,7 @@ static inline struct search *search_alloc(struct bio *bio, s->orig_bio = bio; s->cache_miss = NULL; + s->cache_missed = 0; s->d = d; s->recoverable = 1; s->write = op_is_write(bio_op(bio)); @@ -699,7 +723,14 @@ static void cached_dev_read_error(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct bio *bio = &s->bio.bio; - if (s->recoverable) { + /* + * If read request hit dirty data (s->read_dirty_data is true), + * then recovery a failed read request from cached device may + * get a stale data back. So read failure recovery is only + * permitted when read request hit clean data in cache device, + * or when cache read race happened. + */ + if (s->recoverable && !s->read_dirty_data) { /* Retry from the backing device: */ trace_bcache_read_retry(s->orig_bio); @@ -760,7 +791,7 @@ static void cached_dev_read_done_bh(struct closure *cl) struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); bch_mark_cache_accounting(s->iop.c, s->d, - !s->cache_miss, s->iop.bypass); + !s->cache_missed, s->iop.bypass); trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); if (s->iop.status) @@ -779,6 +810,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; + s->cache_missed = 1; + if (s->cache_miss || s->iop.bypass) { miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); ret = miss == bio ? MAP_DONE : MAP_CONTINUE; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fc0a31b13ac4..fe6e4c319b7c 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -893,6 +893,12 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_lock(&bch_register_lock); + cancel_delayed_work_sync(&dc->writeback_rate_update); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { + kthread_stop(dc->writeback_thread); + dc->writeback_thread = NULL; + } + memset(&dc->sb.set_uuid, 0, 16); SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); @@ -933,15 +939,18 @@ void bch_cached_dev_detach(struct cached_dev *dc) cached_dev_put(dc); } -int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, + uint8_t *set_uuid) { uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; char buf[BDEVNAME_SIZE]; + struct cached_dev *exist_dc, *t; bdevname(dc->bdev, buf); - if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) + if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || + (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) return -ENOENT; if (dc->disk.c) { @@ -961,6 +970,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) return -EINVAL; } + /* Check whether already attached */ + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { + pr_err("Tried to attach %s but duplicate UUID already attached", + buf); + + return -EINVAL; + } + } + u = uuid_find(c, dc->sb.uuid); if (u && @@ -1173,7 +1192,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) - bch_cached_dev_attach(dc, c); + bch_cached_dev_attach(dc, c, NULL); if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) @@ -1181,7 +1200,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, return; err: - pr_notice("error opening %s: %s", bdevname(bdev, name), err); + pr_notice("error %s: %s", bdevname(bdev, name), err); bcache_device_stop(&dc->disk); } @@ -1695,7 +1714,7 @@ static void run_cache_set(struct cache_set *c) bcache_write_super(c); list_for_each_entry_safe(dc, t, &uncached_devices, list) - bch_cached_dev_attach(dc, c); + bch_cached_dev_attach(dc, c, NULL); flash_devs_run(c); @@ -1812,6 +1831,7 @@ void bch_cache_release(struct kobject *kobj) static int cache_alloc(struct cache *ca) { size_t free; + size_t btree_buckets; struct bucket *b; __module_get(THIS_MODULE); @@ -1819,9 +1839,19 @@ static int cache_alloc(struct cache *ca) bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); + /* + * when ca->sb.njournal_buckets is not zero, journal exists, + * and in bch_journal_replay(), tree node may split, + * so bucket of RESERVE_BTREE type is needed, + * the worst situation is all journal buckets are valid journal, + * and all the keys need to replay, + * so the number of RESERVE_BTREE type buckets should be as much + * as journal buckets + */ + btree_buckets = ca->sb.njournal_buckets ?: 8; free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; - if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || + if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || @@ -1849,6 +1879,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, const char *err = NULL; /* must be set for any error case */ int ret = 0; + bdevname(bdev, name); + memcpy(&ca->sb, sb, sizeof(struct cache_sb)); ca->bdev = bdev; ca->bdev->bd_holder = ca; @@ -1857,11 +1889,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ca->sb_bio.bi_io_vec[0].bv_page = sb_page; get_page(sb_page); - if (blk_queue_discard(bdev_get_queue(ca->bdev))) + if (blk_queue_discard(bdev_get_queue(bdev))) ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(ca); if (ret != 0) { + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); if (ret == -ENOMEM) err = "cache_alloc(): -ENOMEM"; else @@ -1884,14 +1917,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, goto out; } - pr_info("registered cache device %s", bdevname(bdev, name)); + pr_info("registered cache device %s", name); out: kobject_put(&ca->kobj); err: if (err) - pr_notice("error opening %s: %s", bdevname(bdev, name), err); + pr_notice("error %s: %s", name, err); return ret; } @@ -1980,6 +2013,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (err) goto err_close; + err = "failed to register device"; if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); if (!dc) @@ -1994,7 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, goto err_close; if (register_cache(sb, sb_page, bdev, ca) != 0) - goto err_close; + goto err; } out: if (sb_page) @@ -2007,7 +2041,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, err_close: blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); err: - pr_info("error opening %s: %s", path, err); + pr_info("error %s: %s", path, err); ret = -EINVAL; goto out; } @@ -2085,6 +2119,7 @@ static void bcache_exit(void) if (bcache_major) unregister_blkdev(bcache_major, "bcache"); unregister_reboot_notifier(&reboot); + mutex_destroy(&bch_register_lock); } static int __init bcache_init(void) @@ -2103,14 +2138,15 @@ static int __init bcache_init(void) bcache_major = register_blkdev(0, "bcache"); if (bcache_major < 0) { unregister_reboot_notifier(&reboot); + mutex_destroy(&bch_register_lock); return bcache_major; } if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || - sysfs_create_files(bcache_kobj, files) || bch_request_init() || - bch_debug_init(bcache_kobj)) + bch_debug_init(bcache_kobj) || + sysfs_create_files(bcache_kobj, files)) goto err; return 0; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 234b2f5b286d..5d81cd06af00 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -193,7 +193,7 @@ STORE(__cached_dev) { struct cached_dev *dc = container_of(kobj, struct cached_dev, disk.kobj); - ssize_t v = size; + ssize_t v; struct cache_set *c; struct kobj_uevent_env *env; @@ -265,17 +265,20 @@ STORE(__cached_dev) } if (attr == &sysfs_attach) { - if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16) + uint8_t set_uuid[16]; + + if (bch_parse_uuid(buf, set_uuid) < 16) return -EINVAL; + v = -ENOENT; list_for_each_entry(c, &bch_cache_sets, list) { - v = bch_cached_dev_attach(dc, c); + v = bch_cached_dev_attach(dc, c, set_uuid); if (!v) return size; } pr_err("Can't attach %s: cache set not found", buf); - size = v; + return v; } if (attr == &sysfs_detach && dc->disk.c) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 70454f2ad2fa..930b00f6a3a2 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -420,18 +420,27 @@ static int bch_writeback_thread(void *arg) while (!kthread_should_stop()) { down_write(&dc->writeback_lock); - if (!atomic_read(&dc->has_dirty) || - (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && - !dc->writeback_running)) { + set_current_state(TASK_INTERRUPTIBLE); + /* + * If the bache device is detaching, skip here and continue + * to perform writeback. Otherwise, if no dirty data on cache, + * or there is dirty data on cache but writeback is disabled, + * the writeback thread should sleep here and wait for others + * to wake up it. + */ + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { up_write(&dc->writeback_lock); - set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); return 0; + } schedule(); continue; } + set_current_state(TASK_RUNNING); searched_full_index = refill_dirty(dc); @@ -441,6 +450,14 @@ static int bch_writeback_thread(void *arg) cached_dev_put(dc); SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, NULL); + /* + * If bcache device is detaching via sysfs interface, + * writeback thread should stop after there is no dirty + * data on cache. BCACHE_DEV_DETACHING flag is set in + * bch_cached_dev_detach(). + */ + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + break; } up_write(&dc->writeback_lock); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index d2121637b4ab..0cabf31fb163 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -625,7 +625,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) err = read_sb_page(bitmap->mddev, offset, sb_page, - 0, PAGE_SIZE); + 0, sizeof(bitmap_super_t)); } if (err) return err; @@ -1816,6 +1816,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot) BUG_ON(file && mddev->bitmap_info.offset); + if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { + pr_notice("md/raid:%s: array with journal cannot have bitmap\n", + mdname(mddev)); + return ERR_PTR(-EBUSY); + } + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return ERR_PTR(-ENOMEM); @@ -2123,7 +2129,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (store.sb_page && bitmap->storage.sb_page) memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), - PAGE_SIZE); + sizeof(bitmap_super_t)); bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; @@ -2152,6 +2158,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, for (k = 0; k < page; k++) { kfree(new_bp[k].map); } + kfree(new_bp); /* restore some fields from old_counts */ bitmap->counts.bp = old_counts.bp; @@ -2202,6 +2209,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, block += old_blocks; } + if (bitmap->counts.bp != old_counts.bp) { + unsigned long k; + for (k = 0; k < old_counts.pages; k++) + if (!old_counts.bp[k].hijacked) + kfree(old_counts.bp[k].map); + kfree(old_counts.bp); + } + if (!init) { int i; while (block < (chunks << chunkshift)) { diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c new file mode 100644 index 000000000000..20e05936551f --- /dev/null +++ b/drivers/md/dm-android-verity.c @@ -0,0 +1,925 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "dm-verity.h" +#include "dm-android-verity.h" + +static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH]; +static char buildvariant[BUILD_VARIANT]; + +static bool target_added; +static bool verity_enabled = true; +struct dentry *debug_dir; +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv); + +static struct target_type android_verity_target = { + .name = "android-verity", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = android_verity_ctr, + .dtr = verity_dtr, + .map = verity_map, + .status = verity_status, + .prepare_ioctl = verity_prepare_ioctl, + .iterate_devices = verity_iterate_devices, + .io_hints = verity_io_hints, +}; + +static int __init verified_boot_state_param(char *line) +{ + strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate)); + return 1; +} + +__setup("androidboot.verifiedbootstate=", verified_boot_state_param); + +static int __init verity_mode_param(char *line) +{ + strlcpy(veritymode, line, sizeof(veritymode)); + return 1; +} + +__setup("androidboot.veritymode=", verity_mode_param); + +static int __init verity_keyid_param(char *line) +{ + strlcpy(veritykeyid, line, sizeof(veritykeyid)); + return 1; +} + +__setup("veritykeyid=", verity_keyid_param); + +static int __init verity_buildvariant(char *line) +{ + strlcpy(buildvariant, line, sizeof(buildvariant)); + return 1; +} + +__setup("buildvariant=", verity_buildvariant); + +static inline bool default_verity_key_id(void) +{ + return veritykeyid[0] != '\0'; +} + +static inline bool is_eng(void) +{ + static const char typeeng[] = "eng"; + + return !strncmp(buildvariant, typeeng, sizeof(typeeng)); +} + +static inline bool is_userdebug(void) +{ + static const char typeuserdebug[] = "userdebug"; + + return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug)); +} + +static inline bool is_unlocked(void) +{ + static const char unlocked[] = "orange"; + + return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked)); +} + +static int read_block_dev(struct bio_read *payload, struct block_device *bdev, + sector_t offset, int length) +{ + struct bio *bio; + int err = 0, i; + + payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE); + + bio = bio_alloc(GFP_KERNEL, payload->number_of_pages); + if (!bio) { + DMERR("Error while allocating bio"); + return -ENOMEM; + } + + bio_set_dev(bio, bdev); + bio->bi_iter.bi_sector = offset; + bio_set_op_attrs(bio, REQ_OP_READ, 0); + + payload->page_io = kzalloc(sizeof(struct page *) * + payload->number_of_pages, GFP_KERNEL); + if (!payload->page_io) { + DMERR("page_io array alloc failed"); + err = -ENOMEM; + goto free_bio; + } + + for (i = 0; i < payload->number_of_pages; i++) { + payload->page_io[i] = alloc_page(GFP_KERNEL); + if (!payload->page_io[i]) { + DMERR("alloc_page failed"); + err = -ENOMEM; + goto free_pages; + } + if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) { + DMERR("bio_add_page error"); + err = -EIO; + goto free_pages; + } + } + + if (!submit_bio_wait(bio)) + /* success */ + goto free_bio; + DMERR("bio read failed"); + err = -EIO; + +free_pages: + for (i = 0; i < payload->number_of_pages; i++) + if (payload->page_io[i]) + __free_page(payload->page_io[i]); + kfree(payload->page_io); +free_bio: + bio_put(bio); + return err; +} + +static inline u64 fec_div_round_up(u64 x, u64 y) +{ + u64 remainder; + + return div64_u64_rem(x, y, &remainder) + + (remainder > 0 ? 1 : 0); +} + +static inline void populate_fec_metadata(struct fec_header *header, + struct fec_ecc_metadata *ecc) +{ + ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size), + FEC_BLOCK_SIZE); + ecc->roots = le32_to_cpu(header->roots); + ecc->start = le64_to_cpu(header->inp_size); +} + +static inline int validate_fec_header(struct fec_header *header, u64 offset) +{ + /* move offset to make the sanity check work for backup header + * as well. */ + offset -= offset % FEC_BLOCK_SIZE; + if (le32_to_cpu(header->magic) != FEC_MAGIC || + le32_to_cpu(header->version) != FEC_VERSION || + le32_to_cpu(header->size) != sizeof(struct fec_header) || + le32_to_cpu(header->roots) == 0 || + le32_to_cpu(header->roots) >= FEC_RSM) + return -EINVAL; + + return 0; +} + +static int extract_fec_header(dev_t dev, struct fec_header *fec, + struct fec_ecc_metadata *ecc) +{ + u64 device_size; + struct bio_read payload; + int i, err = 0; + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("bdev get error"); + return PTR_ERR(bdev); + } + + device_size = i_size_read(bdev->bd_inode); + + /* fec metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + */ + BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE); + /* 512 byte sector alignment */ + BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0); + + err = read_block_dev(&payload, bdev, (device_size - + FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto error; + } + + BUG_ON(sizeof(struct fec_header) > PAGE_SIZE); + memcpy(fec, page_address(payload.page_io[0]), + sizeof(*fec)); + + ecc->valid = true; + if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) { + /* Try the backup header */ + memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE + - sizeof(*fec) , + sizeof(*fec)); + if (validate_fec_header(fec, device_size - + sizeof(struct fec_header))) + ecc->valid = false; + } + + if (ecc->valid) + populate_fec_metadata(fec, ecc); + + for (i = 0; i < payload.number_of_pages; i++) + __free_page(payload.page_io[i]); + kfree(payload.page_io); + +error: + blkdev_put(bdev, FMODE_READ); + return err; +} +static void find_metadata_offset(struct fec_header *fec, + struct block_device *bdev, u64 *metadata_offset) +{ + u64 device_size; + + device_size = i_size_read(bdev->bd_inode); + + if (le32_to_cpu(fec->magic) == FEC_MAGIC) + *metadata_offset = le64_to_cpu(fec->inp_size) - + VERITY_METADATA_SIZE; + else + *metadata_offset = device_size - VERITY_METADATA_SIZE; +} + +static int find_size(dev_t dev, u64 *device_size) +{ + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return PTR_ERR(bdev); + } + + *device_size = i_size_read(bdev->bd_inode); + *device_size >>= SECTOR_SHIFT; + + DMINFO("blkdev size in sectors: %llu", *device_size); + blkdev_put(bdev, FMODE_READ); + return 0; +} + +static int verify_header(struct android_metadata_header *header) +{ + int retval = -EINVAL; + + if (is_userdebug() && le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE) + return VERITY_STATE_DISABLE; + + if (!(le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_NUMBER) || + (le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE)) { + DMERR("Incorrect magic number"); + return retval; + } + + if (le32_to_cpu(header->protocol_version) != + VERITY_METADATA_VERSION) { + DMERR("Unsupported version %u", + le32_to_cpu(header->protocol_version)); + return retval; + } + + return 0; +} + +static int extract_metadata(dev_t dev, struct fec_header *fec, + struct android_metadata **metadata, + bool *verity_enabled) +{ + struct block_device *bdev; + struct android_metadata_header *header; + int i; + u32 table_length, copy_length, offset; + u64 metadata_offset; + struct bio_read payload; + int err = 0; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return -ENODEV; + } + + find_metadata_offset(fec, bdev, &metadata_offset); + + /* Verity metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + * PAGE_SIZE is also a multiple of 512 bytes. + */ + if (VERITY_METADATA_SIZE > PAGE_SIZE) + BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0); + /* 512 byte sector alignment */ + BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0); + + err = read_block_dev(&payload, bdev, metadata_offset / + (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto blkdev_release; + } + + header = kzalloc(sizeof(*header), GFP_KERNEL); + if (!header) { + DMERR("kzalloc failed for header"); + err = -ENOMEM; + goto free_payload; + } + + memcpy(header, page_address(payload.page_io[0]), + sizeof(*header)); + + DMINFO("bio magic_number:%u protocol_version:%d table_length:%u", + le32_to_cpu(header->magic_number), + le32_to_cpu(header->protocol_version), + le32_to_cpu(header->table_length)); + + err = verify_header(header); + + if (err == VERITY_STATE_DISABLE) { + DMERR("Mounting root with verity disabled"); + *verity_enabled = false; + /* we would still have to read the metadata to figure out + * the data blocks size. Or may be could map the entire + * partition similar to mounting the device. + * + * Reset error as well as the verity_enabled flag is changed. + */ + err = 0; + } else if (err) + goto free_header; + + *metadata = kzalloc(sizeof(**metadata), GFP_KERNEL); + if (!*metadata) { + DMERR("kzalloc for metadata failed"); + err = -ENOMEM; + goto free_header; + } + + (*metadata)->header = header; + table_length = le32_to_cpu(header->table_length); + + if (table_length == 0 || + table_length > (VERITY_METADATA_SIZE - + sizeof(struct android_metadata_header))) { + DMERR("table_length too long"); + err = -EINVAL; + goto free_metadata; + } + + (*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL); + + if (!(*metadata)->verity_table) { + DMERR("kzalloc verity_table failed"); + err = -ENOMEM; + goto free_metadata; + } + + if (sizeof(struct android_metadata_header) + + table_length <= PAGE_SIZE) { + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + table_length); + } else { + copy_length = PAGE_SIZE - + sizeof(struct android_metadata_header); + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + copy_length); + table_length -= copy_length; + offset = copy_length; + i = 1; + while (table_length != 0) { + if (table_length > PAGE_SIZE) { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + PAGE_SIZE); + offset += PAGE_SIZE; + table_length -= PAGE_SIZE; + } else { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + table_length); + table_length = 0; + } + i++; + } + } + (*metadata)->verity_table[table_length] = '\0'; + + DMINFO("verity_table: %s", (*metadata)->verity_table); + goto free_payload; + +free_metadata: + kfree(*metadata); +free_header: + kfree(header); +free_payload: + for (i = 0; i < payload.number_of_pages; i++) + if (payload.page_io[i]) + __free_page(payload.page_io[i]); + kfree(payload.page_io); +blkdev_release: + blkdev_put(bdev, FMODE_READ); + return err; +} + +/* helper functions to extract properties from dts */ +const char *find_dt_value(const char *name) +{ + struct device_node *firmware; + const char *value; + + firmware = of_find_node_by_path("/firmware/android"); + if (!firmware) + return NULL; + value = of_get_property(firmware, name, NULL); + of_node_put(firmware); + + return value; +} + +static int verity_mode(void) +{ + static const char enforcing[] = "enforcing"; + static const char verified_mode_prop[] = "veritymode"; + const char *value; + + value = find_dt_value(verified_mode_prop); + if (!value) + value = veritymode; + if (!strncmp(value, enforcing, sizeof(enforcing) - 1)) + return DM_VERITY_MODE_RESTART; + + return DM_VERITY_MODE_EIO; +} + +static void handle_error(void) +{ + int mode = verity_mode(); + if (mode == DM_VERITY_MODE_RESTART) { + DMERR("triggering restart"); + kernel_restart("dm-verity device corrupted"); + } else { + DMERR("Mounting verity root failed"); + } +} + +static struct public_key_signature *table_make_digest( + enum hash_algo hash, + const void *table, + unsigned long table_len) +{ + struct public_key_signature *pks = NULL; + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t digest_size, desc_size; + int ret; + + /* Allocate the hashing algorithm we're going to need and find out how + * big the hash operational data will be. + */ + tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + digest_size = crypto_shash_digestsize(tfm); + + /* We allocate the hash operational data storage on the end of out + * context data and the digest output buffer on the end of that. + */ + ret = -ENOMEM; + pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); + if (!pks) + goto error; + + pks->pkey_algo = "rsa"; + pks->hash_algo = hash_algo_name[hash]; + pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; + pks->digest_size = digest_size; + + desc = (struct shash_desc *)(pks + 1); + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + ret = crypto_shash_finup(desc, table, table_len, pks->digest); + if (ret < 0) + goto error; + + crypto_free_shash(tfm); + return pks; + +error: + kfree(pks); + crypto_free_shash(tfm); + return ERR_PTR(ret); +} + + +static int verify_verity_signature(char *key_id, + struct android_metadata *metadata) +{ + struct public_key_signature *pks = NULL; + int retval = -EINVAL; + + if (!key_id) + goto error; + + pks = table_make_digest(HASH_ALGO_SHA256, + (const void *)metadata->verity_table, + le32_to_cpu(metadata->header->table_length)); + if (IS_ERR(pks)) { + DMERR("hashing failed"); + retval = PTR_ERR(pks); + pks = NULL; + goto error; + } + + pks->s = kmemdup(&metadata->header->signature[0], RSANUMBYTES, GFP_KERNEL); + if (!pks->s) { + DMERR("Error allocating memory for signature"); + goto error; + } + pks->s_size = RSANUMBYTES; + + retval = verify_signature_one(pks, NULL, key_id); + kfree(pks->s); +error: + kfree(pks); + return retval; +} + +static inline bool test_mult_overflow(sector_t a, u32 b) +{ + sector_t r = (sector_t)~0ULL; + + sector_div(r, b); + return a > r; +} + +static int add_as_linear_device(struct dm_target *ti, char *dev) +{ + /*Move to linear mapping defines*/ + char *linear_table_args[DM_LINEAR_ARGS] = {dev, + DM_LINEAR_TARGET_OFFSET}; + int err = 0; + + android_verity_target.dtr = dm_linear_dtr, + android_verity_target.map = dm_linear_map, + android_verity_target.status = dm_linear_status, + android_verity_target.end_io = dm_linear_end_io, + android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl, + android_verity_target.iterate_devices = dm_linear_iterate_devices, + android_verity_target.direct_access = dm_linear_dax_direct_access, + android_verity_target.dax_copy_from_iter = dm_linear_dax_copy_from_iter, + android_verity_target.io_hints = NULL; + + set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0); + + err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args); + + if (!err) { + DMINFO("Added android-verity as a linear target"); + target_added = true; + } else + DMERR("Failed to add android-verity as linear target"); + + return err; +} + +static int create_linear_device(struct dm_target *ti, dev_t dev, + char *target_device) +{ + u64 device_size = 0; + int err = find_size(dev, &device_size); + + if (err) { + DMERR("error finding bdev size"); + handle_error(); + return err; + } + + ti->len = device_size; + err = add_as_linear_device(ti, target_device); + if (err) { + handle_error(); + return err; + } + verity_enabled = false; + return 0; +} + +/* + * Target parameters: + * Key id of the public key in the system keyring. + * Verity metadata's signature would be verified against + * this. If the key id contains spaces, replace them + * with '#'. + * The block device for which dm-verity is being setup. + */ +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + dev_t uninitialized_var(dev); + struct android_metadata *metadata = NULL; + int err = 0, i, mode; + char *key_id = NULL, *table_ptr, dummy, *target_device; + char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; + /* One for specifying number of opt args and one for mode */ + sector_t data_sectors; + u32 data_block_size; + unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS; + struct fec_header uninitialized_var(fec); + struct fec_ecc_metadata uninitialized_var(ecc); + char buf[FEC_ARG_LENGTH], *buf_ptr; + unsigned long long tmpll; + + if (argc == 1) { + /* Use the default keyid */ + if (default_verity_key_id()) + key_id = veritykeyid; + else if (!is_eng()) { + DMERR("veritykeyid= is not set"); + handle_error(); + return -EINVAL; + } + target_device = argv[0]; + } else if (argc == 2) { + key_id = argv[0]; + target_device = argv[1]; + } else { + DMERR("Incorrect number of arguments"); + handle_error(); + return -EINVAL; + } + + dev = name_to_dev_t(target_device); + if (!dev) { + DMERR("no dev found for %s", target_device); + handle_error(); + return -EINVAL; + } + + if (is_eng()) + return create_linear_device(ti, dev, target_device); + + strreplace(key_id, '#', ' '); + + DMINFO("key:%s dev:%s", key_id, target_device); + + if (extract_fec_header(dev, &fec, &ecc)) { + DMERR("Error while extracting fec header"); + handle_error(); + return -EINVAL; + } + + err = extract_metadata(dev, &fec, &metadata, &verity_enabled); + + if (err) { + /* Allow invalid metadata when the device is unlocked */ + if (is_unlocked()) { + DMWARN("Allow invalid metadata when unlocked"); + return create_linear_device(ti, dev, target_device); + } + DMERR("Error while extracting metadata"); + handle_error(); + goto free_metadata; + } + + if (verity_enabled) { + err = verify_verity_signature(key_id, metadata); + + if (err) { + DMERR("Signature verification failed"); + handle_error(); + goto free_metadata; + } else + DMINFO("Signature verification success"); + } + + table_ptr = metadata->verity_table; + + for (i = 0; i < VERITY_TABLE_ARGS; i++) { + verity_table_args[i] = strsep(&table_ptr, " "); + if (verity_table_args[i] == NULL) + break; + } + + if (i != VERITY_TABLE_ARGS) { + DMERR("Verity table not in the expected format"); + err = -EINVAL; + handle_error(); + goto free_metadata; + } + + if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (tmpll > ULONG_MAX) { + DMERR(" too large. Forgot to turn on CONFIG_LBDAF?"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + data_sectors = tmpll; + + if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (test_mult_overflow(data_sectors, data_block_size >> + SECTOR_SHIFT)) { + DMERR("data_sectors too large"); + handle_error(); + err = -EOVERFLOW; + goto free_metadata; + } + + data_sectors *= data_block_size >> SECTOR_SHIFT; + DMINFO("Data sectors %llu", (unsigned long long)data_sectors); + + /* update target length */ + ti->len = data_sectors; + + /* Setup linear target and free */ + if (!verity_enabled) { + err = add_as_linear_device(ti, target_device); + goto free_metadata; + } + + /*substitute data_dev and hash_dev*/ + verity_table_args[1] = target_device; + verity_table_args[2] = target_device; + + mode = verity_mode(); + + if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) { + if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u %s " VERITY_TABLE_OPT_FEC_FORMAT, + 1 + VERITY_TABLE_OPT_FEC_ARGS, + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : + VERITY_TABLE_OPT_LOGGING, + target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u " VERITY_TABLE_OPT_FEC_FORMAT, + VERITY_TABLE_OPT_FEC_ARGS, target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } + } else if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "2 " VERITY_TABLE_OPT_IGNZERO " %s", + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, "1 %s", + "ignore_zero_blocks"); + } + + if (err < 0 || err >= FEC_ARG_LENGTH) + goto free_metadata; + + buf_ptr = buf; + + for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS + + VERITY_TABLE_OPT_FEC_ARGS + 2); i++) { + verity_table_args[i] = strsep(&buf_ptr, " "); + if (verity_table_args[i] == NULL) { + no_of_args = i; + break; + } + } + + err = verity_ctr(ti, no_of_args, verity_table_args); + if (err) { + DMERR("android-verity failed to create a verity target"); + } else { + target_added = true; + DMINFO("android-verity created as verity target"); + } + +free_metadata: + if (metadata) { + kfree(metadata->header); + kfree(metadata->verity_table); + } + kfree(metadata); + return err; +} + +static int __init dm_android_verity_init(void) +{ + int r; + struct dentry *file; + + r = dm_register_target(&android_verity_target); + if (r < 0) + DMERR("register failed %d", r); + + /* Tracks the status of the last added target */ + debug_dir = debugfs_create_dir("android_verity", NULL); + + if (IS_ERR_OR_NULL(debug_dir)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + goto end; + } + + file = debugfs_create_bool("target_added", S_IRUGO, debug_dir, + &target_added); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + goto end; + } + + file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir, + &verity_enabled); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + } + +end: + return r; +} + +static void __exit dm_android_verity_exit(void) +{ + if (!IS_ERR_OR_NULL(debug_dir)) + debugfs_remove_recursive(debug_dir); + + dm_unregister_target(&android_verity_target); +} + +module_init(dm_android_verity_init); +module_exit(dm_android_verity_exit); diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h new file mode 100644 index 000000000000..ef406c136fcd --- /dev/null +++ b/drivers/md/dm-android-verity.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef DM_ANDROID_VERITY_H +#define DM_ANDROID_VERITY_H + +#include + +#define RSANUMBYTES 256 +#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001 +#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56 +#define VERITY_METADATA_VERSION 0 +#define VERITY_STATE_DISABLE 1 +#define DATA_BLOCK_SIZE (4 * 1024) +#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE) +#define VERITY_TABLE_ARGS 10 +#define VERITY_COMMANDLINE_PARAM_LENGTH 20 +#define BUILD_VARIANT 20 + +/* + * : is the format for the identifier. + * subject can either be the Common Name(CN) + Organization Name(O) or + * just the CN if the it is prefixed with O + * From https://tools.ietf.org/html/rfc5280#appendix-A + * ub-organization-name-length INTEGER ::= 64 + * ub-common-name-length INTEGER ::= 64 + * + * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278 + * ctx->o_size + 2 + ctx->cn_size + 1 + * + 41 characters for ":" and sha1 id + * 64 + 2 + 64 + 1 + 1 + 40 (172) + * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters. + */ +#define VERITY_DEFAULT_KEY_ID_LENGTH 200 + +#define FEC_MAGIC 0xFECFECFE +#define FEC_BLOCK_SIZE (4 * 1024) +#define FEC_VERSION 0 +#define FEC_RSM 255 +#define FEC_ARG_LENGTH 300 + +#define VERITY_TABLE_OPT_RESTART "restart_on_corruption" +#define VERITY_TABLE_OPT_LOGGING "ignore_corruption" +#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks" + +#define VERITY_TABLE_OPT_FEC_FORMAT \ + "use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks" +#define VERITY_TABLE_OPT_FEC_ARGS 9 + +#define VERITY_DEBUG 0 + +#define DM_MSG_PREFIX "android-verity" + +#define DM_LINEAR_ARGS 2 +#define DM_LINEAR_TARGET_OFFSET "0" + +/* + * There can be two formats. + * if fec is present + * + * if fec is not present + * + */ +struct fec_header { + __le32 magic; + __le32 version; + __le32 size; + __le32 roots; + __le32 fec_size; + __le64 inp_size; + u8 hash[SHA256_DIGEST_SIZE]; +} __attribute__((packed)); + +struct android_metadata_header { + __le32 magic_number; + __le32 protocol_version; + char signature[RSANUMBYTES]; + __le32 table_length; +}; + +struct android_metadata { + struct android_metadata_header *header; + char *verity_table; +}; + +struct fec_ecc_metadata { + bool valid; + u32 roots; + u64 blocks; + u64 rounds; + u64 start; +}; + +struct bio_read { + struct page **page_io; + int number_of_pages; +}; + +extern struct target_type linear_target; + +extern void dm_linear_dtr(struct dm_target *ti); +extern int dm_linear_map(struct dm_target *ti, struct bio *bio); +extern int dm_linear_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error); +extern void dm_linear_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int dm_linear_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); +extern int dm_linear_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv); +extern long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, + pfn_t *pfn); +extern size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i); +#endif /* DM_ANDROID_VERITY_H */ diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..1e17e6421da3 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -386,9 +386,6 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { - unsigned noio_flag; - void *ptr; - if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, * all allocations done by this process (including pagetables) are done * as if GFP_NOIO was specified. */ + if (gfp_mask & __GFP_NORETRY) { + unsigned noio_flag = memalloc_noio_save(); + void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - if (gfp_mask & __GFP_NORETRY) - noio_flag = memalloc_noio_save(); - - ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); - - if (gfp_mask & __GFP_NORETRY) memalloc_noio_restore(noio_flag); + return ptr; + } - return ptr; + return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); } /* @@ -974,7 +970,8 @@ static void __get_memory_limit(struct dm_bufio_client *c, buffers = c->minimum_buffers; *limit_buffers = buffers; - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; + *threshold_buffers = mult_frac(buffers, + DM_BUFIO_WRITEBACK_PERCENT, 100); } /* @@ -1610,7 +1607,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, int l; struct dm_buffer *b, *tmp; unsigned long freed = 0; - unsigned long count = nr_to_scan; + unsigned long count = c->n_buffers[LIST_CLEAN] + + c->n_buffers[LIST_DIRTY]; unsigned long retain_target = get_retain_buffers(c); for (l = 0; l < LIST_SIZE; l++) { @@ -1646,8 +1644,11 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); + unsigned long count = ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + unsigned long retain_target = get_retain_buffers(c); - return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + return (count < retain_target) ? 0 : (count - retain_target); } /* @@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void) memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); - mem = (__u64)((totalram_pages - totalhigh_pages) * - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages, + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT; if (mem > ULONG_MAX) mem = ULONG_MAX; #ifdef CONFIG_MMU - /* - * Get the size of vmalloc space the same way as VMALLOC_TOTAL - * in fs/proc/internal.h - */ - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100) - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100; + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100)) + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100); #endif dm_bufio_default_cache_size = mem; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 8785134c9f1f..71c3507df9a0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache) /*----------------------------------------------------------------*/ +static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) +{ + return (bio_data_dir(bio) == WRITE) && + (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); +} + +static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) +{ + return writeback_mode(&cache->features) && + (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); +} + static void quiesce(struct dm_cache_migration *mg, void (*continuation)(struct work_struct *)) { @@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws) } } +static void mg_full_copy(struct work_struct *ws) +{ + struct dm_cache_migration *mg = ws_to_mg(ws); + struct cache *cache = mg->cache; + struct policy_work *op = mg->op; + bool is_policy_promote = (op->op == POLICY_PROMOTE); + + if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || + is_discarded_oblock(cache, op->oblock)) { + mg_upgrade_lock(ws); + return; + } + + init_continuation(&mg->k, mg_upgrade_lock); + + if (copy(mg, is_policy_promote)) { + DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); + mg->k.input = BLK_STS_IOERR; + mg_complete(mg, false); + } +} + static void mg_copy(struct work_struct *ws) { - int r; struct dm_cache_migration *mg = ws_to_mg(ws); if (mg->overwrite_bio) { + /* + * No exclusive lock was held when we last checked if the bio + * was optimisable. So we have to check again in case things + * have changed (eg, the block may no longer be discarded). + */ + if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { + /* + * Fallback to a real full copy after doing some tidying up. + */ + bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); + BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */ + mg->overwrite_bio = NULL; + inc_io_migrations(mg->cache); + mg_full_copy(ws); + return; + } + /* * It's safe to do this here, even though it's new data * because all IO has been locked out of the block. @@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct *ws) */ overwrite(mg, mg_update_metadata_after_copy); - } else { - struct cache *cache = mg->cache; - struct policy_work *op = mg->op; - bool is_policy_promote = (op->op == POLICY_PROMOTE); - - if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || - is_discarded_oblock(cache, op->oblock)) { - mg_upgrade_lock(ws); - return; - } - - init_continuation(&mg->k, mg_upgrade_lock); - - r = copy(mg, is_policy_promote); - if (r) { - DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); - mg->k.input = BLK_STS_IOERR; - mg_complete(mg, false); - } - } + } else + mg_full_copy(ws); } static int mg_lock_writes(struct dm_cache_migration *mg) @@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio) /*----------------------------------------------------------------*/ -static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) -{ - return (bio_data_dir(bio) == WRITE) && - (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); -} - -static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) -{ - return writeback_mode(&cache->features) && - (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); -} - static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, bool *commit_needed) { @@ -3534,18 +3554,18 @@ static int __init dm_cache_init(void) { int r; - r = dm_register_target(&cache_target); - if (r) { - DMERR("cache target registration failed: %d", r); - return r; - } - migration_cache = KMEM_CACHE(dm_cache_migration, 0); if (!migration_cache) { dm_unregister_target(&cache_target); return -ENOMEM; } + r = dm_register_target(&cache_target); + if (r) { + DMERR("cache target registration failed: %d", r); + return r; + } + return 0; } diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 203144762f36..6a14f945783c 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -29,7 +29,6 @@ struct dm_kobject_holder { * DM targets must _not_ deference a mapped_device to directly access its members! */ struct mapped_device { - struct srcu_struct io_barrier; struct mutex suspend_lock; /* @@ -127,6 +126,8 @@ struct mapped_device { struct blk_mq_tag_set *tag_set; bool use_blk_mq:1; bool init_tio_pdu:1; + + struct srcu_struct io_barrier; }; void dm_init_md_queue(struct mapped_device *md); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 96ab46512e1f..f575110454b6 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -148,6 +148,8 @@ struct crypt_config { mempool_t *tag_pool; unsigned tag_pool_max_sectors; + struct percpu_counter n_allocated_pages; + struct bio_set *bs; struct mutex bio_alloc_lock; @@ -219,6 +221,12 @@ struct crypt_config { #define MAX_TAG_SIZE 480 #define POOL_ENTRY_SIZE 512 +static DEFINE_SPINLOCK(dm_crypt_clients_lock); +static unsigned dm_crypt_clients_n = 0; +static volatile unsigned long dm_crypt_pages_per_client; +#define DM_CRYPT_MEMORY_PERCENT 2 +#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16) + static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, @@ -1075,7 +1083,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc, BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); /* Reject unexpected unaligned bio. */ - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) return -EIO; dmreq = dmreq_of_req(cc, req); @@ -1168,7 +1176,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, int r = 0; /* Reject unexpected unaligned bio. */ - if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + if (unlikely(bv_in.bv_len & (cc->sector_size - 1))) return -EIO; dmreq = dmreq_of_req(cc, req); @@ -1954,10 +1962,15 @@ static int crypt_setkey(struct crypt_config *cc) /* Ignore extra keys (which are used for IV etc) */ subkey_size = crypt_subkey_size(cc); - if (crypt_integrity_hmac(cc)) + if (crypt_integrity_hmac(cc)) { + if (subkey_size < cc->key_mac_size) + return -EINVAL; + crypt_copy_authenckey(cc->authenc_key, cc->key, subkey_size - cc->key_mac_size, cc->key_mac_size); + } + for (i = 0; i < cc->tfms_count; i++) { if (crypt_integrity_hmac(cc)) r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], @@ -2053,9 +2066,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string ret = crypt_setkey(cc); - /* wipe the kernel key payload copy in each case */ - memset(cc->key, 0, cc->key_size * sizeof(u8)); - if (!ret) { set_bit(DM_CRYPT_KEY_VALID, &cc->flags); kzfree(cc->key_string); @@ -2154,6 +2164,43 @@ static int crypt_wipe_key(struct crypt_config *cc) return r; } +static void crypt_calculate_pages_per_client(void) +{ + unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100; + + if (!dm_crypt_clients_n) + return; + + pages /= dm_crypt_clients_n; + if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT) + pages = DM_CRYPT_MIN_PAGES_PER_CLIENT; + dm_crypt_pages_per_client = pages; +} + +static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data) +{ + struct crypt_config *cc = pool_data; + struct page *page; + + if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) && + likely(gfp_mask & __GFP_NORETRY)) + return NULL; + + page = alloc_page(gfp_mask); + if (likely(page != NULL)) + percpu_counter_add(&cc->n_allocated_pages, 1); + + return page; +} + +static void crypt_page_free(void *page, void *pool_data) +{ + struct crypt_config *cc = pool_data; + + __free_page(page); + percpu_counter_sub(&cc->n_allocated_pages, 1); +} + static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; @@ -2180,6 +2227,10 @@ static void crypt_dtr(struct dm_target *ti) mempool_destroy(cc->req_pool); mempool_destroy(cc->tag_pool); + if (cc->page_pool) + WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0); + percpu_counter_destroy(&cc->n_allocated_pages); + if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); @@ -2194,6 +2245,12 @@ static void crypt_dtr(struct dm_target *ti) /* Must zero key material before freeing */ kzfree(cc); + + spin_lock(&dm_crypt_clients_lock); + WARN_ON(!dm_crypt_clients_n); + dm_crypt_clients_n--; + crypt_calculate_pages_per_client(); + spin_unlock(&dm_crypt_clients_lock); } static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) @@ -2523,6 +2580,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) } } + /* wipe the kernel key payload copy */ + if (cc->key_string) + memset(cc->key, 0, cc->key_size * sizeof(u8)); + return ret; } @@ -2637,6 +2698,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->private = cc; + spin_lock(&dm_crypt_clients_lock); + dm_crypt_clients_n++; + crypt_calculate_pages_per_client(); + spin_unlock(&dm_crypt_clients_lock); + + ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL); + if (ret < 0) + goto bad; + /* Optional parameters need to be read before cipher constructor */ if (argc > 5) { ret = crypt_ctr_optional(ti, argc - 5, &argv[5]); @@ -2691,7 +2761,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, ARCH_KMALLOC_MINALIGN); - cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0); + cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc); if (!cc->page_pool) { ti->error = "Cannot allocate page mempool"; goto bad; @@ -2740,6 +2810,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->tag_pool_max_sectors * cc->on_disk_tag_size); if (!cc->tag_pool) { ti->error = "Cannot allocate integrity tags mempool"; + ret = -ENOMEM; goto bad; } @@ -2961,6 +3032,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) return ret; if (cc->iv_gen_ops && cc->iv_gen_ops->init) ret = cc->iv_gen_ops->init(cc); + /* wipe the kernel key payload copy */ + if (cc->key_string) + memset(cc->key, 0, cc->key_size * sizeof(u8)); return ret; } if (argc == 2 && !strcasecmp(argv[1], "wipe")) { @@ -3007,7 +3081,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 18, 0}, + .version = {1, 18, 1}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 096fe9b66c50..cbc56372ff97 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio) struct bvec_iter iter; struct bio_vec bv; bio_for_each_segment(bv, bio, iter) { - if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { + if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", bv.bv_offset, bv.bv_len, ic->sectors_per_block); return DM_MAPIO_KILL; @@ -2439,7 +2439,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str unsigned i; for (i = 0; i < ic->journal_sections; i++) kvfree(sl[i]); - kfree(sl); + kvfree(sl); } static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl) @@ -2558,7 +2558,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error) int r = 0; unsigned i; __u64 journal_pages, journal_desc_size, journal_tree_size; - unsigned char *crypt_data = NULL; + unsigned char *crypt_data = NULL, *crypt_iv = NULL; + struct skcipher_request *req = NULL; ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); @@ -2616,9 +2617,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error) if (blocksize == 1) { struct scatterlist *sg; - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); - unsigned char iv[ivsize]; - skcipher_request_set_tfm(req, ic->journal_crypt); + + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); + if (!req) { + *error = "Could not allocate crypt request"; + r = -ENOMEM; + goto bad; + } + + crypt_iv = kmalloc(ivsize, GFP_KERNEL); + if (!crypt_iv) { + *error = "Could not allocate iv"; + r = -ENOMEM; + goto bad; + } ic->journal_xor = dm_integrity_alloc_page_list(ic); if (!ic->journal_xor) { @@ -2640,9 +2652,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) sg_set_buf(&sg[i], va, PAGE_SIZE); } sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); - memset(iv, 0x00, ivsize); + memset(crypt_iv, 0x00, ivsize); - skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); + skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) @@ -2658,10 +2670,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error) crypto_free_skcipher(ic->journal_crypt); ic->journal_crypt = NULL; } else { - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); - unsigned char iv[ivsize]; unsigned crypt_len = roundup(ivsize, blocksize); + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); + if (!req) { + *error = "Could not allocate crypt request"; + r = -ENOMEM; + goto bad; + } + + crypt_iv = kmalloc(ivsize, GFP_KERNEL); + if (!crypt_iv) { + *error = "Could not allocate iv"; + r = -ENOMEM; + goto bad; + } + crypt_data = kmalloc(crypt_len, GFP_KERNEL); if (!crypt_data) { *error = "Unable to allocate crypt data"; @@ -2669,8 +2693,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error) goto bad; } - skcipher_request_set_tfm(req, ic->journal_crypt); - ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); if (!ic->journal_scatterlist) { *error = "Unable to allocate sg list"; @@ -2694,12 +2716,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error) struct skcipher_request *section_req; __u32 section_le = cpu_to_le32(i); - memset(iv, 0x00, ivsize); + memset(crypt_iv, 0x00, ivsize); memset(crypt_data, 0x00, crypt_len); memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le))); sg_init_one(&sg, crypt_data, crypt_len); - skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); + skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); init_completion(&comp.comp); comp.in_flight = (atomic_t)ATOMIC_INIT(1); if (do_crypt(true, req, &comp)) @@ -2757,6 +2779,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error) } bad: kfree(crypt_data); + kfree(crypt_iv); + skcipher_request_free(req); + return r; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index e52676fa9832..4a94d510aeff 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1992,6 +1992,45 @@ void dm_interface_exit(void) dm_hash_exit(); } + +/** + * dm_ioctl_export - Permanently export a mapped device via the ioctl interface + * @md: Pointer to mapped_device + * @name: Buffer (size DM_NAME_LEN) for name + * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired + */ +int dm_ioctl_export(struct mapped_device *md, const char *name, + const char *uuid) +{ + int r = 0; + struct hash_cell *hc; + + if (!md) { + r = -ENXIO; + goto out; + } + + /* The name and uuid can only be set once. */ + mutex_lock(&dm_hash_cells_mutex); + hc = dm_get_mdptr(md); + mutex_unlock(&dm_hash_cells_mutex); + if (hc) { + DMERR("%s: already exported", dm_device_name(md)); + r = -ENXIO; + goto out; + } + + r = dm_hash_insert(name, uuid, md); + if (r) { + DMERR("%s: could not bind to '%s'", dm_device_name(md), name); + goto out; + } + + /* Let udev know we've changed. */ + dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md)); +out: + return r; +} /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index d5f8eff7c11d..e6fd31b03c38 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -26,7 +26,7 @@ struct linear_c { /* * Construct a linear mapping: */ -static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) +int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct linear_c *lc; unsigned long long tmp; @@ -69,7 +69,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) return ret; } -static void linear_dtr(struct dm_target *ti) +void dm_linear_dtr(struct dm_target *ti) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -94,14 +94,14 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) linear_map_sector(ti, bio->bi_iter.bi_sector); } -static int linear_map(struct dm_target *ti, struct bio *bio) +int dm_linear_map(struct dm_target *ti, struct bio *bio) { linear_map_bio(ti, bio); return DM_MAPIO_REMAPPED; } -static int linear_end_io(struct dm_target *ti, struct bio *bio, +int dm_linear_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) { struct linear_c *lc = ti->private; @@ -111,8 +111,9 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio, return DM_ENDIO_DONE; } +EXPORT_SYMBOL_GPL(dm_linear_end_io); -static void linear_status(struct dm_target *ti, status_type_t type, +void dm_linear_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -129,7 +130,7 @@ static void linear_status(struct dm_target *ti, status_type_t type, } } -static int linear_prepare_ioctl(struct dm_target *ti, +int dm_linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -146,7 +147,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, return 0; } -static int linear_iterate_devices(struct dm_target *ti, +int dm_linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct linear_c *lc = ti->private; @@ -154,7 +155,7 @@ static int linear_iterate_devices(struct dm_target *ti, return fn(ti, lc->dev, lc->start, ti->len, data); } -static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, +long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { long ret; @@ -169,8 +170,9 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, return ret; return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } +EXPORT_SYMBOL_GPL(dm_linear_dax_direct_access); -static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, +size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct linear_c *lc = ti->private; @@ -183,21 +185,22 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return 0; return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } +EXPORT_SYMBOL_GPL(dm_linear_dax_copy_from_iter); static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, .module = THIS_MODULE, - .ctr = linear_ctr, - .dtr = linear_dtr, - .map = linear_map, - .end_io = linear_end_io, - .status = linear_status, - .prepare_ioctl = linear_prepare_ioctl, - .iterate_devices = linear_iterate_devices, - .direct_access = linear_dax_direct_access, - .dax_copy_from_iter = linear_dax_copy_from_iter, + .ctr = dm_linear_ctr, + .dtr = dm_linear_dtr, + .map = dm_linear_map, + .status = dm_linear_status, + .end_io = dm_linear_end_io, + .prepare_ioctl = dm_linear_prepare_ioctl, + .iterate_devices = dm_linear_iterate_devices, + .direct_access = dm_linear_dax_direct_access, + .dax_copy_from_iter = dm_linear_dax_copy_from_iter, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 11f273d2f018..8b7328666eaa 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, pgpath = path_to_pgpath(path); - if (unlikely(lockless_dereference(m->current_pg) != pg)) { + if (unlikely(READ_ONCE(m->current_pg) != pg)) { /* Only update current_pgpath if pg changed */ spin_lock_irqsave(&m->lock, flags); m->current_pgpath = pgpath; @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) } /* Were we instructed to switch PG? */ - if (lockless_dereference(m->next_pg)) { + if (READ_ONCE(m->next_pg)) { spin_lock_irqsave(&m->lock, flags); pg = m->next_pg; if (!pg) { @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) /* Don't change PG until it has no remaining paths */ check_current_pg: - pg = lockless_dereference(m->current_pg); + pg = READ_ONCE(m->current_pg); if (pg) { pgpath = choose_path_in_pg(m, pg, nr_bytes); if (!IS_ERR_OR_NULL(pgpath)) @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, struct request *clone; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, nr_bytes); @@ -499,13 +499,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, if (IS_ERR(clone)) { /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ bool queue_dying = blk_queue_dying(q); - DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", - PTR_ERR(clone), queue_dying ? " (path offline)" : ""); if (queue_dying) { atomic_inc(&m->pg_init_in_progress); activate_or_offline_path(pgpath); + return DM_MAPIO_DELAY_REQUEUE; } - return DM_MAPIO_DELAY_REQUEUE; + + /* + * blk-mq's SCHED_RESTART can cover this requeue, so we + * needn't deal with it by DELAY_REQUEUE. More importantly, + * we have to return DM_MAPIO_REQUEUE so that blk-mq can + * get the queue busy feedback (via BLK_STS_RESOURCE), + * otherwise I/O merging can suffer. + */ + if (q->mq_ops) + return DM_MAPIO_REQUEUE; + else + return DM_MAPIO_DELAY_REQUEUE; } clone->bio = clone->biotail = NULL; clone->rq_disk = bdev->bd_disk; @@ -535,7 +545,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m bool queue_io; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !queue_io) pgpath = choose_pgpath(m, nr_bytes); @@ -1804,7 +1814,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct pgpath *current_pgpath; int r; - current_pgpath = lockless_dereference(m->current_pgpath); + current_pgpath = READ_ONCE(m->current_pgpath); if (!current_pgpath) current_pgpath = choose_pgpath(m, 0); @@ -1826,7 +1836,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, } if (r == -ENOTCONN) { - if (!lockless_dereference(m->current_pg)) { + if (!READ_ONCE(m->current_pg)) { /* Path status changed, redo selection */ (void) choose_pgpath(m, 0); } @@ -1895,9 +1905,9 @@ static int multipath_busy(struct dm_target *ti) return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); /* Guess which priority_group will be used at next mapping time */ - pg = lockless_dereference(m->current_pg); - next_pg = lockless_dereference(m->next_pg); - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) + pg = READ_ONCE(m->current_pg); + next_pg = READ_ONCE(m->next_pg); + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) pg = next_pg; if (!pg) { @@ -1943,8 +1953,9 @@ static int multipath_busy(struct dm_target *ti) *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 12, 0}, - .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, + .version = {1, 13, 0}, + .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | + DM_TARGET_PASSES_INTEGRITY, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, @@ -1967,13 +1978,6 @@ static int __init dm_multipath_init(void) { int r; - r = dm_register_target(&multipath_target); - if (r < 0) { - DMERR("request-based register failed %d", r); - r = -EINVAL; - goto bad_register_target; - } - kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); if (!kmultipathd) { DMERR("failed to create workqueue kmpathd"); @@ -1995,13 +1999,20 @@ static int __init dm_multipath_init(void) goto bad_alloc_kmpath_handlerd; } + r = dm_register_target(&multipath_target); + if (r < 0) { + DMERR("request-based register failed %d", r); + r = -EINVAL; + goto bad_register_target; + } + return 0; +bad_register_target: + destroy_workqueue(kmpath_handlerd); bad_alloc_kmpath_handlerd: destroy_workqueue(kmultipathd); bad_alloc_kmultipathd: - dm_unregister_target(&multipath_target); -bad_register_target: return r; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2245d06d2045..38a2ac24428e 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -675,15 +675,11 @@ static struct raid_type *get_raid_type_by_ll(const int level, const int layout) return NULL; } -/* - * Conditionally change bdev capacity of @rs - * in case of a disk add/remove reshape - */ -static void rs_set_capacity(struct raid_set *rs) +/* Adjust rdev sectors */ +static void rs_set_rdev_sectors(struct raid_set *rs) { struct mddev *mddev = &rs->md; struct md_rdev *rdev; - struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); /* * raid10 sets rdev->sector to the device size, which @@ -692,8 +688,16 @@ static void rs_set_capacity(struct raid_set *rs) rdev_for_each(rdev, mddev) if (!test_bit(Journal, &rdev->flags)) rdev->sectors = mddev->dev_sectors; +} - set_capacity(gendisk, mddev->array_sectors); +/* + * Change bdev capacity of @rs in case of a disk add/remove reshape + */ +static void rs_set_capacity(struct raid_set *rs) +{ + struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); + + set_capacity(gendisk, rs->md.array_sectors); revalidate_disk(gendisk); } @@ -1674,8 +1678,11 @@ static void do_table_event(struct work_struct *ws) struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); smp_rmb(); /* Make sure we access most actual mddev properties */ - if (!rs_is_reshaping(rs)) + if (!rs_is_reshaping(rs)) { + if (rs_is_raid10(rs)) + rs_set_rdev_sectors(rs); rs_set_capacity(rs); + } dm_table_event(rs->ti->table); } @@ -2143,13 +2150,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) struct dm_raid_superblock *refsb; uint64_t events_sb, events_refsb; - rdev->sb_start = 0; - rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); - if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { - DMERR("superblock size of a logical block is no longer valid"); - return -EINVAL; - } - r = read_disk_sb(rdev, rdev->sb_size, false); if (r) return r; @@ -2494,6 +2494,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (test_bit(Journal, &rdev->flags)) continue; + if (!rdev->meta_bdev) + continue; + + /* Set superblock offset/size for metadata device. */ + rdev->sb_start = 0; + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); + if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) { + DMERR("superblock size of a logical block is no longer valid"); + return -EINVAL; + } + /* * Skipping super_load due to CTR_FLAG_SYNC will cause * the array to undergo initialization again as @@ -2506,9 +2517,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) continue; - if (!rdev->meta_bdev) - continue; - r = super_load(rdev, freshest); switch (r) { @@ -3629,8 +3637,11 @@ static void raid_postsuspend(struct dm_target *ti) { struct raid_set *rs = ti->private; - if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) + if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { + mddev_lock_nointr(&rs->md); mddev_suspend(&rs->md); + mddev_unlock(&rs->md); + } rs->md.ro = 1; } @@ -3844,11 +3855,10 @@ static int raid_preresume(struct dm_target *ti) mddev->resync_min = mddev->recovery_cp; } - rs_set_capacity(rs); - /* Check for any reshape request unless new raid set */ if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { /* Initiate a reshape. */ + rs_set_rdev_sectors(rs); mddev_lock_nointr(mddev); r = rs_start_reshape(rs); mddev_unlock(mddev); @@ -3877,6 +3887,10 @@ static void raid_resume(struct dm_target *ti) mddev->ro = 0; mddev->in_sync = 0; + /* Only reduce raid set size before running a disk removing reshape. */ + if (mddev->delta_disks < 0) + rs_set_capacity(rs); + /* * Keep the RAID set frozen if reshape/rebuild flags are set. * The RAID set is unfrozen once the next table load/resume, @@ -3887,8 +3901,11 @@ static void raid_resume(struct dm_target *ti) if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { + mddev_lock_nointr(mddev); mddev_resume(mddev); + mddev_unlock(mddev); + } } static struct target_type raid_target = { diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1113b42e1eda..a0613bd8ed00 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void) return r; } - r = dm_register_target(&snapshot_target); - if (r < 0) { - DMERR("snapshot target register failed %d", r); - goto bad_register_snapshot_target; - } - - r = dm_register_target(&origin_target); - if (r < 0) { - DMERR("Origin target register failed %d", r); - goto bad_register_origin_target; - } - - r = dm_register_target(&merge_target); - if (r < 0) { - DMERR("Merge target register failed %d", r); - goto bad_register_merge_target; - } - r = init_origin_hash(); if (r) { DMERR("init_origin_hash failed."); @@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void) goto bad_pending_cache; } + r = dm_register_target(&snapshot_target); + if (r < 0) { + DMERR("snapshot target register failed %d", r); + goto bad_register_snapshot_target; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Origin target register failed %d", r); + goto bad_register_origin_target; + } + + r = dm_register_target(&merge_target); + if (r < 0) { + DMERR("Merge target register failed %d", r); + goto bad_register_merge_target; + } + return 0; -bad_pending_cache: - kmem_cache_destroy(exception_cache); -bad_exception_cache: - exit_origin_hash(); -bad_origin_hash: - dm_unregister_target(&merge_target); bad_register_merge_target: dm_unregister_target(&origin_target); bad_register_origin_target: dm_unregister_target(&snapshot_target); bad_register_snapshot_target: + kmem_cache_destroy(pending_cache); +bad_pending_cache: + kmem_cache_destroy(exception_cache); +bad_exception_cache: + exit_origin_hash(); +bad_origin_hash: dm_exception_store_exit(); return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ef7b8f201f73..23e8bde4c500 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -883,9 +884,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && blk_queue_dax(q); + return bdev_dax_supported(dev->bdev, PAGE_SIZE); } static bool dm_table_supports_dax(struct dm_table *t) @@ -1758,13 +1757,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) return true; } - -static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_discard(q); + return q && !blk_queue_discard(q); } static bool dm_table_supports_discards(struct dm_table *t) @@ -1772,28 +1770,24 @@ static bool dm_table_supports_discards(struct dm_table *t) struct dm_target *ti; unsigned i; - /* - * Unless any target used by the table set discards_supported, - * require at least one underlying device to support discards. - * t->devices includes internal dm devices such as mirror logs - * so we need to use iterate_devices here, which targets - * supporting discard selectively must provide. - */ for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); if (!ti->num_discard_bios) - continue; - - if (ti->discards_supported) - return true; + return false; - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_discard_capable, NULL)) - return true; + /* + * Either the target provides discard support (as implied by setting + * 'discards_supported') or it relies on _all_ data devices having + * discard support. + */ + if (!ti->discards_supported && + (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) + return false; } - return false; + return true; } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, @@ -1818,6 +1812,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); + if (dm_table_supports_dax(t)) + queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + else + queue_flag_clear_unlocked(QUEUE_FLAG_DAX, q); + if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index d31d18d9727c..36ef284ad086 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -80,10 +80,14 @@ #define SECTOR_TO_BLOCK_SHIFT 3 /* + * For btree insert: * 3 for btree insert + * 2 for btree lookup used within space map + * For btree remove: + * 2 for shadow spine + + * 4 for rebalance 3 child node */ -#define THIN_MAX_CONCURRENT_LOCKS 5 +#define THIN_MAX_CONCURRENT_LOCKS 6 /* This should be plenty */ #define SPACE_MAP_ROOT_SIZE 128 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..72ae5dc50532 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1380,6 +1380,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); +static void requeue_bios(struct pool *pool); + static void check_for_space(struct pool *pool) { int r; @@ -1392,8 +1394,10 @@ static void check_for_space(struct pool *pool) if (r) return; - if (nr_free) + if (nr_free) { set_pool_mode(pool, PM_WRITE); + requeue_bios(pool); + } } /* @@ -1470,7 +1474,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) r = dm_pool_alloc_data_block(pool->pmd, result); if (r) { - metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); + if (r == -ENOSPC) + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); + else + metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); return r; } @@ -4355,30 +4362,28 @@ static struct target_type thin_target = { static int __init dm_thin_init(void) { - int r; + int r = -ENOMEM; pool_table_init(); + _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); + if (!_new_mapping_cache) + return r; + r = dm_register_target(&thin_target); if (r) - return r; + goto bad_new_mapping_cache; r = dm_register_target(&pool_target); if (r) - goto bad_pool_target; - - r = -ENOMEM; - - _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); - if (!_new_mapping_cache) - goto bad_new_mapping_cache; + goto bad_thin_target; return 0; -bad_new_mapping_cache: - dm_unregister_target(&pool_target); -bad_pool_target: +bad_thin_target: dm_unregister_target(&thin_target); +bad_new_mapping_cache: + kmem_cache_destroy(_new_mapping_cache); return r; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index e13f90832b6b..776a4f77f76c 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -11,6 +11,7 @@ #include "dm-verity-fec.h" #include +#include #define DM_MSG_PREFIX "verity-fec" @@ -175,9 +176,11 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); - else if (r > 0) + else if (r > 0) { DMWARN_LIMIT("%s: FEC %llu: corrected %d errors", v->data_dev->name, (unsigned long long)rsb, r); + atomic_add_unless(&v->fec->corrected, 1, INT_MAX); + } return r; } @@ -545,6 +548,7 @@ unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz, void verity_fec_dtr(struct dm_verity *v) { struct dm_verity_fec *f = v->fec; + struct kobject *kobj = &f->kobj_holder.kobj; if (!verity_fec_is_enabled(v)) goto out; @@ -561,6 +565,12 @@ void verity_fec_dtr(struct dm_verity *v) if (f->dev) dm_put_device(v->ti, f->dev); + + if (kobj->state_initialized) { + kobject_put(kobj); + wait_for_completion(dm_get_completion_from_kobject(kobj)); + } + out: kfree(f); v->fec = NULL; @@ -649,6 +659,28 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, return 0; } +static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec, + kobj_holder.kobj); + + return sprintf(buf, "%d\n", atomic_read(&f->corrected)); +} + +static struct kobj_attribute attr_corrected = __ATTR_RO(corrected); + +static struct attribute *fec_attrs[] = { + &attr_corrected.attr, + NULL +}; + +static struct kobj_type fec_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = fec_attrs, + .release = dm_kobject_release +}; + /* * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr. */ @@ -672,8 +704,10 @@ int verity_fec_ctr_alloc(struct dm_verity *v) */ int verity_fec_ctr(struct dm_verity *v) { + int r; struct dm_verity_fec *f = v->fec; struct dm_target *ti = v->ti; + struct mapped_device *md = dm_table_get_md(ti->table); u64 hash_blocks; if (!verity_fec_is_enabled(v)) { @@ -681,6 +715,16 @@ int verity_fec_ctr(struct dm_verity *v) return 0; } + /* Create a kobject and sysfs attributes */ + init_completion(&f->kobj_holder.completion); + + r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype, + &disk_to_dev(dm_disk(md))->kobj, "%s", "fec"); + if (r) { + ti->error = "Cannot create kobject"; + return r; + } + /* * FEC is computed over data blocks, possible metadata, and * hash blocks. In other words, FEC covers total of fec_blocks diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index bb31ce87a933..4db0cae262eb 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -12,6 +12,8 @@ #ifndef DM_VERITY_FEC_H #define DM_VERITY_FEC_H +#include "dm.h" +#include "dm-core.h" #include "dm-verity.h" #include @@ -51,6 +53,8 @@ struct dm_verity_fec { mempool_t *extra_pool; /* mempool for extra buffers */ mempool_t *output_pool; /* mempool for output */ struct kmem_cache *cache; /* cache for buffers */ + atomic_t corrected; /* corrected errors */ + struct dm_kobject_holder kobj_holder; /* for sysfs attributes */ }; /* per-bio data */ diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..ecb506b6923d 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -32,6 +32,7 @@ #define DM_VERITY_OPT_LOGGING "ignore_corruption" #define DM_VERITY_OPT_RESTART "restart_on_corruption" #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" +#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" #define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC) @@ -473,6 +474,18 @@ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io, return 0; } +/* + * Moves the bio iter one data block forward. + */ +static inline void verity_bv_skip_block(struct dm_verity *v, + struct dm_verity_io *io, + struct bvec_iter *iter) +{ + struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); + + bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits); +} + /* * Verify one "dm_verity_io" structure. */ @@ -486,9 +499,16 @@ static int verity_verify_io(struct dm_verity_io *io) for (b = 0; b < io->n_blocks; b++) { int r; + sector_t cur_block = io->block + b; struct ahash_request *req = verity_io_hash_req(v, io); - r = verity_hash_for_block(v, io, io->block + b, + if (v->validated_blocks && + likely(test_bit(cur_block, v->validated_blocks))) { + verity_bv_skip_block(v, io, &io->iter); + continue; + } + + r = verity_hash_for_block(v, io, cur_block, verity_io_want_digest(v, io), &is_zero); if (unlikely(r < 0)) @@ -522,13 +542,16 @@ static int verity_verify_io(struct dm_verity_io *io) return r; if (likely(memcmp(verity_io_real_digest(v, io), - verity_io_want_digest(v, io), v->digest_size) == 0)) + verity_io_want_digest(v, io), v->digest_size) == 0)) { + if (v->validated_blocks) + set_bit(cur_block, v->validated_blocks); continue; + } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, - io->block + b, NULL, &start) == 0) + cur_block, NULL, &start) == 0) continue; else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, - io->block + b)) + cur_block)) return -EIO; } @@ -582,6 +605,7 @@ static void verity_prefetch_io(struct work_struct *work) container_of(work, struct dm_verity_prefetch_work, work); struct dm_verity *v = pw->v; int i; + sector_t prefetch_size; for (i = v->levels - 2; i >= 0; i--) { sector_t hash_block_start; @@ -604,8 +628,14 @@ static void verity_prefetch_io(struct work_struct *work) hash_block_end = v->hash_blocks - 1; } no_prefetch_cluster: + // for emmc, it is more efficient to send bigger read + prefetch_size = max((sector_t)CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE, + hash_block_end - hash_block_start + 1); + if ((hash_block_start + prefetch_size) >= (v->hash_start + v->hash_blocks)) { + prefetch_size = hash_block_end - hash_block_start + 1; + } dm_bufio_prefetch(v->bufio, hash_block_start, - hash_block_end - hash_block_start + 1); + prefetch_size); } kfree(pw); @@ -632,7 +662,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) * Bio map function. It allocates dm_verity_io structure and bio vector and * fills them. Then it issues prefetches and the I/O. */ -static int verity_map(struct dm_target *ti, struct bio *bio) +int verity_map(struct dm_target *ti, struct bio *bio) { struct dm_verity *v = ti->private; struct dm_verity_io *io; @@ -677,7 +707,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) /* * Status: V (valid) or C (corruption found) */ -static void verity_status(struct dm_target *ti, status_type_t type, +void verity_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; @@ -714,6 +744,8 @@ static void verity_status(struct dm_target *ti, status_type_t type, args += DM_VERITY_OPTS_FEC; if (v->zero_digest) args++; + if (v->validated_blocks) + args++; if (!args) return; DMEMIT(" %u", args); @@ -732,12 +764,14 @@ static void verity_status(struct dm_target *ti, status_type_t type, } if (v->zero_digest) DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES); + if (v->validated_blocks) + DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE); sz = verity_fec_status_table(v, sz, result, maxlen); break; } } -static int verity_prepare_ioctl(struct dm_target *ti, +int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, fmode_t *mode) { struct dm_verity *v = ti->private; @@ -750,7 +784,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, return 0; } -static int verity_iterate_devices(struct dm_target *ti, +int verity_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_verity *v = ti->private; @@ -758,7 +792,7 @@ static int verity_iterate_devices(struct dm_target *ti, return fn(ti, v->data_dev, v->data_start, ti->len, data); } -static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) +void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct dm_verity *v = ti->private; @@ -771,7 +805,7 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, limits->logical_block_size); } -static void verity_dtr(struct dm_target *ti) +void verity_dtr(struct dm_target *ti) { struct dm_verity *v = ti->private; @@ -781,6 +815,7 @@ static void verity_dtr(struct dm_target *ti) if (v->bufio) dm_bufio_client_destroy(v->bufio); + kvfree(v->validated_blocks); kfree(v->salt); kfree(v->root_digest); kfree(v->zero_digest); @@ -801,6 +836,26 @@ static void verity_dtr(struct dm_target *ti) kfree(v); } +static int verity_alloc_most_once(struct dm_verity *v) +{ + struct dm_target *ti = v->ti; + + /* the bitset can only handle INT_MAX blocks */ + if (v->data_blocks > INT_MAX) { + ti->error = "device too large to use check_at_most_once"; + return -E2BIG; + } + + v->validated_blocks = kvzalloc(BITS_TO_LONGS(v->data_blocks) * + sizeof(unsigned long), GFP_KERNEL); + if (!v->validated_blocks) { + ti->error = "failed to allocate bitset for check_at_most_once"; + return -ENOMEM; + } + + return 0; +} + static int verity_alloc_zero_digest(struct dm_verity *v) { int r = -ENOMEM; @@ -870,6 +925,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) } continue; + } else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) { + r = verity_alloc_most_once(v); + if (r) + return r; + continue; + } else if (verity_is_fec_opt_arg(arg_name)) { r = verity_fec_parse_opt_args(as, v, &argc, arg_name); if (r) @@ -898,7 +959,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) * * Hex string or "-" if no salt. */ -static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) { struct dm_verity *v; struct dm_arg_set as; @@ -1062,6 +1123,14 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } +#ifdef CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED + if (!v->validated_blocks) { + r = verity_alloc_most_once(v); + if (r) + goto bad; + } +#endif + v->hash_per_block_bits = __fls((1 << v->hash_dev_block_bits) / v->digest_size); @@ -1137,7 +1206,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) static struct target_type verity_target = { .name = "verity", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index a59e0ada6fd3..e80e06aa5ec6 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -63,6 +63,7 @@ struct dm_verity { sector_t hash_level_block[DM_VERITY_MAX_LEVELS]; struct dm_verity_fec *fec; /* forward error correction */ + unsigned long *validated_blocks; /* bitset blocks validated */ }; struct dm_verity_io { @@ -131,4 +132,14 @@ extern int verity_hash(struct dm_verity *v, struct ahash_request *req, extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); +extern void verity_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int verity_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); +extern int verity_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits); +extern void verity_dtr(struct dm_target *ti); +extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv); +extern int verity_map(struct dm_target *ti, struct bio *bio); #endif /* DM_VERITY_H */ diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index b87c1741da4b..ba6b0a90ecfb 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) struct dmz_target *dmz = ti->private; struct request_queue *q; struct dmz_dev *dev; + sector_t aligned_capacity; int ret; /* Get the target device */ @@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path) goto err; } + q = bdev_get_queue(dev->bdev); dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (ti->begin || (ti->len != dev->capacity)) { + aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1); + if (ti->begin || + ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { ti->error = "Partial mapping not supported"; ret = -EINVAL; goto err; } - q = bdev_get_queue(dev->bdev); - dev->zone_nr_sectors = q->limits.chunk_sectors; + dev->zone_nr_sectors = blk_queue_zone_sectors(q); dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); @@ -785,7 +788,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Chunk BIO work */ mutex_init(&dmz->chunk_lock); - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, dev->name); if (!dmz->chunk_wq) { @@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dmz_target *dmz = ti->private; + struct dmz_dev *dev = dmz->dev; + sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); - return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data); + return fn(ti, dmz->ddev, 0, capacity, data); } static struct target_type dmz_type = { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..24ec6e039448 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -815,7 +815,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ - bio->bi_status = io_error; + if (io_error) + bio->bi_status = io_error; bio_endio(bio); } } @@ -960,8 +961,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (len < 1) goto out; nr_pages = min(len, nr_pages); - if (ti->type->direct_access) - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); + ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); @@ -1695,7 +1695,7 @@ static struct mapped_device *alloc_dev(int minor) struct mapped_device *md; void *old_md; - md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); + md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; @@ -1795,7 +1795,7 @@ static struct mapped_device *alloc_dev(int minor) bad_minor: module_put(THIS_MODULE); bad_module_get: - kfree(md); + kvfree(md); return NULL; } @@ -1814,7 +1814,7 @@ static void free_dev(struct mapped_device *md) free_minor(minor); module_put(THIS_MODULE); - kfree(md); + kvfree(md); } static void __bind_mempools(struct mapped_device *md, struct dm_table *t) @@ -2049,9 +2049,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) */ bioset_free(md->queue->bio_split); md->queue->bio_split = NULL; - - if (type == DM_TYPE_DAX_BIO_BASED) - queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); @@ -2709,11 +2706,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) md = container_of(kobj, struct mapped_device, kobj_holder.kobj); - if (test_bit(DMF_FREEING, &md->flags) || - dm_deleting_md(md)) - return NULL; - + spin_lock(&_minor_lock); + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { + md = NULL; + goto out; + } dm_get(md); +out: + spin_unlock(&_minor_lock); + return md; } diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 38c84c0a35d4..ab289ce9c3cd 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -80,8 +80,6 @@ void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type); enum dm_queue_mode dm_get_md_type(struct mapped_device *md); struct target_type *dm_get_immutable_target_type(struct mapped_device *md); -int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); - /* * To check the return value from dm_table_find_target(). */ diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 03082e17c65c..72ce0bccc865 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -442,10 +442,11 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot) static void remove_suspend_info(struct mddev *mddev, int slot) { struct md_cluster_info *cinfo = mddev->cluster_info; + mddev->pers->quiesce(mddev, 1); spin_lock_irq(&cinfo->suspend_lock); __remove_suspend_info(cinfo, slot); spin_unlock_irq(&cinfo->suspend_lock); - mddev->pers->quiesce(mddev, 2); + mddev->pers->quiesce(mddev, 0); } @@ -492,13 +493,12 @@ static void process_suspend_info(struct mddev *mddev, s->lo = lo; s->hi = hi; mddev->pers->quiesce(mddev, 1); - mddev->pers->quiesce(mddev, 0); spin_lock_irq(&cinfo->suspend_lock); /* Remove existing entry (if exists) before adding */ __remove_suspend_info(cinfo, slot); list_add(&s->list, &cinfo->suspend_list); spin_unlock_irq(&cinfo->suspend_lock); - mddev->pers->quiesce(mddev, 2); + mddev->pers->quiesce(mddev, 0); } static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..24f0f7c0d5be 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -266,16 +266,31 @@ static DEFINE_SPINLOCK(all_mddevs_lock); * call has finished, the bio has been linked into some internal structure * and so is visible to ->quiesce(), so we don't need the refcount any more. */ +static bool is_suspended(struct mddev *mddev, struct bio *bio) +{ + if (mddev->suspended) + return true; + if (bio_data_dir(bio) != WRITE) + return false; + if (mddev->suspend_lo >= mddev->suspend_hi) + return false; + if (bio->bi_iter.bi_sector >= mddev->suspend_hi) + return false; + if (bio_end_sector(bio) < mddev->suspend_lo) + return false; + return true; +} + void md_handle_request(struct mddev *mddev, struct bio *bio) { check_suspended: rcu_read_lock(); - if (mddev->suspended) { + if (is_suspended(mddev, bio)) { DEFINE_WAIT(__wait); for (;;) { prepare_to_wait(&mddev->sb_wait, &__wait, TASK_UNINTERRUPTIBLE); - if (!mddev->suspended) + if (!is_suspended(mddev, bio)) break; rcu_read_unlock(); schedule(); @@ -344,12 +359,17 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) void mddev_suspend(struct mddev *mddev) { WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); + lockdep_assert_held(&mddev->reconfig_mutex); if (mddev->suspended++) return; synchronize_rcu(); wake_up(&mddev->sb_wait); + set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); + smp_mb__after_atomic(); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); + clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); + wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); del_timer_sync(&mddev->safemode_timer); } @@ -357,6 +377,7 @@ EXPORT_SYMBOL_GPL(mddev_suspend); void mddev_resume(struct mddev *mddev) { + lockdep_assert_held(&mddev->reconfig_mutex); if (--mddev->suspended) return; wake_up(&mddev->sb_wait); @@ -663,6 +684,7 @@ void mddev_unlock(struct mddev *mddev) */ spin_lock(&pers_lock); md_wakeup_thread(mddev->thread); + wake_up(&mddev->sb_wait); spin_unlock(&pers_lock); } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -779,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio; int ff = 0; + if (!page) + return; + if (test_bit(Faulty, &rdev->flags)) return; @@ -2820,7 +2845,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = 0; } } else if (cmd_match(buf, "re-add")) { - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { + if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && + rdev->saved_raid_disk >= 0) { /* clear_bit is performed _after_ all the devices * have their local Faulty bit cleared. If any writes * happen in the meantime in the local node, they @@ -4824,7 +4850,7 @@ suspend_lo_show(struct mddev *mddev, char *page) static ssize_t suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) { - unsigned long long old, new; + unsigned long long new; int err; err = kstrtoull(buf, 10, &new); @@ -4840,16 +4866,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) if (mddev->pers == NULL || mddev->pers->quiesce == NULL) goto unlock; - old = mddev->suspend_lo; + mddev_suspend(mddev); mddev->suspend_lo = new; - if (new >= old) - /* Shrinking suspended region */ - mddev->pers->quiesce(mddev, 2); - else { - /* Expanding suspended region - need to wait */ - mddev->pers->quiesce(mddev, 1); - mddev->pers->quiesce(mddev, 0); - } + mddev_resume(mddev); + err = 0; unlock: mddev_unlock(mddev); @@ -4867,7 +4887,7 @@ suspend_hi_show(struct mddev *mddev, char *page) static ssize_t suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) { - unsigned long long old, new; + unsigned long long new; int err; err = kstrtoull(buf, 10, &new); @@ -4880,19 +4900,13 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) if (err) return err; err = -EINVAL; - if (mddev->pers == NULL || - mddev->pers->quiesce == NULL) + if (mddev->pers == NULL) goto unlock; - old = mddev->suspend_hi; + + mddev_suspend(mddev); mddev->suspend_hi = new; - if (new <= old) - /* Shrinking suspended region */ - mddev->pers->quiesce(mddev, 2); - else { - /* Expanding suspended region - need to wait */ - mddev->pers->quiesce(mddev, 1); - mddev->pers->quiesce(mddev, 0); - } + mddev_resume(mddev); + err = 0; unlock: mddev_unlock(mddev); @@ -5357,7 +5371,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) return NULL; } -static int add_named_array(const char *val, struct kernel_param *kp) +static int add_named_array(const char *val, const struct kernel_param *kp) { /* * val must be "md_*" or "mdNNN". @@ -5434,6 +5448,7 @@ int md_run(struct mddev *mddev) * the only valid external interface is through the md * device. */ + mddev->has_superblocks = false; rdev_for_each(rdev, mddev) { if (test_bit(Faulty, &rdev->flags)) continue; @@ -5447,6 +5462,9 @@ int md_run(struct mddev *mddev) set_disk_ro(mddev->gendisk, 1); } + if (rdev->sb_page) + mddev->has_superblocks = true; + /* perform some consistency tests on the device. * We don't want the data to overlap the metadata, * Internal Bitmap issues have been handled elsewhere. @@ -5479,8 +5497,10 @@ int md_run(struct mddev *mddev) } if (mddev->sync_set == NULL) { mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); - if (!mddev->sync_set) - return -ENOMEM; + if (!mddev->sync_set) { + err = -ENOMEM; + goto abort; + } } spin_lock(&pers_lock); @@ -5493,7 +5513,8 @@ int md_run(struct mddev *mddev) else pr_warn("md: personality for level %s is not loaded!\n", mddev->clevel); - return -EINVAL; + err = -EINVAL; + goto abort; } spin_unlock(&pers_lock); if (mddev->level != pers->level) { @@ -5506,7 +5527,8 @@ int md_run(struct mddev *mddev) pers->start_reshape == NULL) { /* This personality cannot handle reshaping... */ module_put(pers->owner); - return -EINVAL; + err = -EINVAL; + goto abort; } if (pers->sync_request) { @@ -5580,7 +5602,7 @@ int md_run(struct mddev *mddev) mddev->private = NULL; module_put(pers->owner); bitmap_destroy(mddev); - return err; + goto abort; } if (mddev->queue) { bool nonrot = true; @@ -5642,6 +5664,18 @@ int md_run(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_action); sysfs_notify(&mddev->kobj, NULL, "degraded"); return 0; + +abort: + if (mddev->bio_set) { + bioset_free(mddev->bio_set); + mddev->bio_set = NULL; + } + if (mddev->sync_set) { + bioset_free(mddev->sync_set); + mddev->sync_set = NULL; + } + + return err; } EXPORT_SYMBOL_GPL(md_run); @@ -6362,7 +6396,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) break; } } - if (has_journal) { + if (has_journal || mddev->bitmap) { export_rdev(rdev); return -EBUSY; } @@ -6464,6 +6498,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) char b[BDEVNAME_SIZE]; struct md_rdev *rdev; + if (!mddev->pers) + return -ENODEV; + rdev = find_rdev(mddev, dev); if (!rdev) return -ENXIO; @@ -6618,22 +6655,26 @@ static int set_bitmap_file(struct mddev *mddev, int fd) return -ENOENT; /* cannot remove what isn't there */ err = 0; if (mddev->pers) { - mddev->pers->quiesce(mddev, 1); if (fd >= 0) { struct bitmap *bitmap; bitmap = bitmap_create(mddev, -1); + mddev_suspend(mddev); if (!IS_ERR(bitmap)) { mddev->bitmap = bitmap; err = bitmap_load(mddev); } else err = PTR_ERR(bitmap); - } - if (fd < 0 || err) { + if (err) { + bitmap_destroy(mddev); + fd = -1; + } + mddev_resume(mddev); + } else if (fd < 0) { + mddev_suspend(mddev); bitmap_destroy(mddev); - fd = -1; /* make sure to put the file */ + mddev_resume(mddev); } - mddev->pers->quiesce(mddev, 0); } if (fd < 0) { struct file *f = mddev->bitmap_info.file; @@ -6917,8 +6958,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->bitmap_info.default_offset; mddev->bitmap_info.space = mddev->bitmap_info.default_space; - mddev->pers->quiesce(mddev, 1); bitmap = bitmap_create(mddev, -1); + mddev_suspend(mddev); if (!IS_ERR(bitmap)) { mddev->bitmap = bitmap; rv = bitmap_load(mddev); @@ -6926,7 +6967,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) rv = PTR_ERR(bitmap); if (rv) bitmap_destroy(mddev); - mddev->pers->quiesce(mddev, 0); + mddev_resume(mddev); } else { /* remove the bitmap */ if (!mddev->bitmap) { @@ -6949,9 +6990,9 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->bitmap_info.nodes = 0; md_cluster_ops->leave(mddev); } - mddev->pers->quiesce(mddev, 1); + mddev_suspend(mddev); bitmap_destroy(mddev); - mddev->pers->quiesce(mddev, 0); + mddev_resume(mddev); mddev->bitmap_info.offset = 0; } } @@ -7468,8 +7509,8 @@ void md_wakeup_thread(struct md_thread *thread) { if (thread) { pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); - if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags)) - wake_up(&thread->wqueue); + set_bit(THREAD_WAKEUP, &thread->flags); + wake_up(&thread->wqueue); } } EXPORT_SYMBOL(md_wakeup_thread); @@ -8006,6 +8047,7 @@ EXPORT_SYMBOL(md_done_sync); bool md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; + if (bio_data_dir(bi) != WRITE) return true; @@ -8038,8 +8080,11 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) rcu_read_unlock(); if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); + if (!mddev->has_superblocks) + return true; wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || + mddev->suspended); if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { percpu_ref_put(&mddev->writes_pending); return false; @@ -8110,7 +8155,6 @@ void md_allow_write(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); /* wait for the dirty state to be recorded in the metadata */ wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) && !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } else spin_unlock(&mddev->lock); @@ -8496,6 +8540,19 @@ void md_do_sync(struct md_thread *thread) set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && + mddev->delta_disks > 0 && + mddev->pers->finish_reshape && + mddev->pers->size && + mddev->queue) { + mddev_lock_nointr(mddev); + md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); + mddev_unlock(mddev); + set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); + } + spin_lock(&mddev->lock); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* We completed so min/max setting can be forgotten if used. */ @@ -8522,6 +8579,10 @@ static int remove_and_add_spares(struct mddev *mddev, int removed = 0; bool remove_some = false; + if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + /* Mustn't remove devices when resync thread is running */ + return 0; + rdev_for_each(rdev, mddev) { if ((this == NULL || rdev == this) && rdev->raid_disk >= 0 && @@ -8551,6 +8612,7 @@ static int remove_and_add_spares(struct mddev *mddev, if (mddev->pers->hot_remove_disk( mddev, rdev) == 0) { sysfs_unlink_rdev(mddev, rdev); + rdev->saved_raid_disk = rdev->raid_disk; rdev->raid_disk = -1; removed++; } @@ -8813,6 +8875,16 @@ void md_check_recovery(struct mddev *mddev) unlock: wake_up(&mddev->sb_wait); mddev_unlock(mddev); + } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { + /* Write superblock - thread that called mddev_suspend() + * holds reconfig_mutex for us. + */ + set_bit(MD_UPDATING_SB, &mddev->flags); + smp_mb__after_atomic(); + if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) + md_update_sb(mddev, 0); + clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); + wake_up(&mddev->sb_wait); } } EXPORT_SYMBOL(md_check_recovery); @@ -9274,11 +9346,11 @@ static __exit void md_exit(void) subsys_initcall(md_init); module_exit(md_exit) -static int get_ro(char *buffer, struct kernel_param *kp) +static int get_ro(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%d", start_readonly); } -static int set_ro(const char *val, struct kernel_param *kp) +static int set_ro(const char *val, const struct kernel_param *kp) { return kstrtouint(val, 10, (unsigned int *)&start_readonly); } diff --git a/drivers/md/md.h b/drivers/md/md.h index d8287d3cd1bf..11696aba94e3 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -237,6 +237,12 @@ enum mddev_flags { */ MD_HAS_PPL, /* The raid array has PPL feature set */ MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */ + MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update + * the metadata without taking reconfig_mutex. + */ + MD_UPDATING_SB, /* md_check_recovery is updating the metadata + * without explicitly holding reconfig_mutex. + */ }; enum mddev_sb_flags { @@ -462,6 +468,8 @@ struct mddev { void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; unsigned int good_device_nr; /* good device num within cluster raid */ + + bool has_superblocks:1; }; enum recovery_flags { @@ -538,12 +546,11 @@ struct md_personality int (*check_reshape) (struct mddev *mddev); int (*start_reshape) (struct mddev *mddev); void (*finish_reshape) (struct mddev *mddev); - /* quiesce moves between quiescence states - * 0 - fully active - * 1 - no new requests allowed - * others - reserved + /* quiesce suspends or resumes internal processing. + * 1 - stop new actions and wait for action io to complete + * 0 - return to normal behaviour */ - void (*quiesce) (struct mddev *mddev, int state); + void (*quiesce) (struct mddev *mddev, int quiesce); /* takeover is used to transition an array from one * personality to another. The new personality must be able * to handle the data in the current layout. diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index f21ce6a3d4cf..58b319757b1e 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) pn->keys[1] = rn->keys[0]; memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); - /* - * rejig the spine. This is ugly, since it knows too - * much about the spine - */ - if (s->nodes[0] != new_parent) { - unlock_block(s->info, s->nodes[0]); - s->nodes[0] = new_parent; - } - if (key < le64_to_cpu(rn->keys[0])) { - unlock_block(s->info, right); - s->nodes[1] = left; - } else { - unlock_block(s->info, left); - s->nodes[1] = right; - } - s->count = 2; - + unlock_block(s->info, left); + unlock_block(s->info, right); return 0; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 5a00fc118470..5ecba9eef441 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -768,7 +768,7 @@ static void *raid0_takeover(struct mddev *mddev) return ERR_PTR(-EINVAL); } -static void raid0_quiesce(struct mddev *mddev, int state) +static void raid0_quiesce(struct mddev *mddev, int quiesce) { } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f3f3e40dc9d8..78d830763704 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -810,11 +810,15 @@ static void flush_pending_writes(struct r1conf *conf) spin_lock_irq(&conf->device_lock); if (conf->pending_bio_list.head) { + struct blk_plug plug; struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); conf->pending_count = 0; spin_unlock_irq(&conf->device_lock); + blk_start_plug(&plug); flush_bio_list(conf, bio); + blk_finish_plug(&plug); } else spin_unlock_irq(&conf->device_lock); } @@ -990,14 +994,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr) _wait_barrier(conf, idx); } -static void wait_all_barriers(struct r1conf *conf) -{ - int idx; - - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) - _wait_barrier(conf, idx); -} - static void _allow_barrier(struct r1conf *conf, int idx) { atomic_dec(&conf->nr_pending[idx]); @@ -1011,14 +1007,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr) _allow_barrier(conf, idx); } -static void allow_all_barriers(struct r1conf *conf) -{ - int idx; - - for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) - _allow_barrier(conf, idx); -} - /* conf->resync_lock should be held */ static int get_unqueued_pending(struct r1conf *conf) { @@ -1310,11 +1298,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, */ - if ((bio_end_sector(bio) > mddev->suspend_lo && - bio->bi_iter.bi_sector < mddev->suspend_hi) || - (mddev_is_clustered(mddev) && + if (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio)))) { + bio->bi_iter.bi_sector, bio_end_sector(bio))) { /* * As the suspend_* range is controlled by userspace, we want @@ -1325,12 +1311,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); - if (bio_end_sector(bio) <= mddev->suspend_lo || - bio->bi_iter.bi_sector >= mddev->suspend_hi || - (mddev_is_clustered(mddev) && - !md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, - bio_end_sector(bio)))) + if (!mddev_is_clustered(mddev) || + !md_cluster_ops->area_resyncing(mddev, WRITE, + bio->bi_iter.bi_sector, + bio_end_sector(bio))) break; sigfillset(&full); sigprocmask(SIG_BLOCK, &full, &old); @@ -1654,8 +1638,12 @@ static void print_conf(struct r1conf *conf) static void close_sync(struct r1conf *conf) { - wait_all_barriers(conf); - allow_all_barriers(conf); + int idx; + + for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) { + _wait_barrier(conf, idx); + _allow_barrier(conf, idx); + } mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; @@ -1821,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct md_rdev *repl = conf->mirrors[conf->raid_disks + number].rdev; freeze_array(conf, 0); + if (atomic_read(&repl->nr_pending)) { + /* It means that some queued IO of retry_list + * hold repl. Thus, we cannot set replacement + * as NULL, avoiding rdev NULL pointer + * dereference in sync_request_write and + * handle_write_finished. + */ + err = -EBUSY; + unfreeze_array(conf); + goto abort; + } clear_bit(Replacement, &repl->flags); p->rdev = repl; conf->mirrors[conf->raid_disks + number].rdev = NULL; @@ -2463,6 +2462,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) fix_read_error(conf, r1_bio->read_disk, r1_bio->sector, r1_bio->sectors); unfreeze_array(conf); + } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { + md_error(mddev, rdev); } else { r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; } @@ -3277,21 +3278,14 @@ static int raid1_reshape(struct mddev *mddev) return 0; } -static void raid1_quiesce(struct mddev *mddev, int state) +static void raid1_quiesce(struct mddev *mddev, int quiesce) { struct r1conf *conf = mddev->private; - switch(state) { - case 2: /* wake for suspend */ - wake_up(&conf->wait_barrier); - break; - case 1: + if (quiesce) freeze_array(conf, 0); - break; - case 0: + else unfreeze_array(conf); - break; - } } static void *raid1_takeover(struct mddev *mddev) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 374df5796649..b20c23f970f4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -890,10 +890,13 @@ static void flush_pending_writes(struct r10conf *conf) spin_lock_irq(&conf->device_lock); if (conf->pending_bio_list.head) { + struct blk_plug plug; struct bio *bio; + bio = bio_list_get(&conf->pending_bio_list); conf->pending_count = 0; spin_unlock_irq(&conf->device_lock); + blk_start_plug(&plug); /* flush any pending bitmap writes to disk * before proceeding w/ I/O */ bitmap_unplug(conf->mddev->bitmap); @@ -914,6 +917,7 @@ static void flush_pending_writes(struct r10conf *conf) generic_make_request(bio); bio = next; } + blk_finish_plug(&plug); } else spin_unlock_irq(&conf->device_lock); } @@ -2621,7 +2625,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) for (m = 0; m < conf->copies; m++) { int dev = r10_bio->devs[m].devnum; rdev = conf->mirrors[dev].rdev; - if (r10_bio->devs[m].bio == NULL) + if (r10_bio->devs[m].bio == NULL || + r10_bio->devs[m].bio->bi_end_io == NULL) continue; if (!r10_bio->devs[m].bio->bi_status) { rdev_clear_badblocks( @@ -2636,7 +2641,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) md_error(conf->mddev, rdev); } rdev = conf->mirrors[dev].replacement; - if (r10_bio->devs[m].repl_bio == NULL) + if (r10_bio->devs[m].repl_bio == NULL || + r10_bio->devs[m].repl_bio->bi_end_io == NULL) continue; if (!r10_bio->devs[m].repl_bio->bi_status) { @@ -3832,18 +3838,14 @@ static void raid10_free(struct mddev *mddev, void *priv) kfree(conf); } -static void raid10_quiesce(struct mddev *mddev, int state) +static void raid10_quiesce(struct mddev *mddev, int quiesce) { struct r10conf *conf = mddev->private; - switch(state) { - case 1: + if (quiesce) raise_barrier(conf, 0); - break; - case 0: + else lower_barrier(conf); - break; - } } static int raid10_resize(struct mddev *mddev, sector_t sectors) @@ -4687,17 +4689,11 @@ static void raid10_finish_reshape(struct mddev *mddev) return; if (mddev->delta_disks > 0) { - sector_t size = raid10_size(mddev, 0, 0); - md_set_array_sectors(mddev, size); if (mddev->recovery_cp > mddev->resync_max_sectors) { mddev->recovery_cp = mddev->resync_max_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } - mddev->resync_max_sectors = size; - if (mddev->queue) { - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); - } + mddev->resync_max_sectors = mddev->array_sectors; } else { int d; rcu_read_lock(); diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 0b7406ac8ce1..0d535b40cb3b 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -693,6 +693,8 @@ static void r5c_disable_writeback_async(struct work_struct *work) struct r5l_log *log = container_of(work, struct r5l_log, disable_writeback_work); struct mddev *mddev = log->rdev->mddev; + struct r5conf *conf = mddev->private; + int locked = 0; if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) return; @@ -701,11 +703,15 @@ static void r5c_disable_writeback_async(struct work_struct *work) /* wait superblock change before suspend */ wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); - - mddev_suspend(mddev); - log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; - mddev_resume(mddev); + conf->log == NULL || + (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && + (locked = mddev_trylock(mddev)))); + if (locked) { + mddev_suspend(mddev); + log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; + mddev_resume(mddev); + mddev_unlock(mddev); + } } static void r5l_submit_current_io(struct r5l_log *log) @@ -1583,21 +1589,21 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space) md_wakeup_thread(log->reclaim_thread); } -void r5l_quiesce(struct r5l_log *log, int state) +void r5l_quiesce(struct r5l_log *log, int quiesce) { struct mddev *mddev; - if (!log || state == 2) + if (!log) return; - if (state == 0) - kthread_unpark(log->reclaim_thread->tsk); - else if (state == 1) { + + if (quiesce) { /* make sure r5l_write_super_and_discard_space exits */ mddev = log->rdev->mddev; wake_up(&mddev->sb_wait); kthread_park(log->reclaim_thread->tsk); r5l_wake_reclaim(log, MaxSector); r5l_do_reclaim(log); - } + } else + kthread_unpark(log->reclaim_thread->tsk); } bool r5l_log_disk_error(struct r5conf *conf) @@ -2571,31 +2577,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) int r5c_journal_mode_set(struct mddev *mddev, int mode) { struct r5conf *conf; - int err; if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || mode > R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; - err = mddev_lock(mddev); - if (err) - return err; conf = mddev->private; - if (!conf || !conf->log) { - mddev_unlock(mddev); + if (!conf || !conf->log) return -ENODEV; - } if (raid5_calc_degraded(conf) > 0 && - mode == R5C_JOURNAL_MODE_WRITE_BACK) { - mddev_unlock(mddev); + mode == R5C_JOURNAL_MODE_WRITE_BACK) return -EINVAL; - } mddev_suspend(mddev); conf->log->r5c_journal_mode = mode; mddev_resume(mddev); - mddev_unlock(mddev); pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", mdname(mddev), mode, r5c_journal_mode_str[mode]); @@ -2608,6 +2605,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, { int mode = ARRAY_SIZE(r5c_journal_mode_str); size_t len = length; + int ret; if (len < 2) return -EINVAL; @@ -2619,8 +2617,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev, if (strlen(r5c_journal_mode_str[mode]) == len && !strncmp(page, r5c_journal_mode_str[mode], len)) break; - - return r5c_journal_mode_set(mddev, mode) ?: length; + ret = mddev_lock(mddev); + if (ret) + return ret; + ret = r5c_journal_mode_set(mddev, mode); + mddev_unlock(mddev); + return ret ?: length; } struct md_sysfs_entry @@ -3165,6 +3167,8 @@ void r5l_exit_log(struct r5conf *conf) conf->log = NULL; synchronize_rcu(); + /* Ensure disable_writeback_work wakes up and exits */ + wake_up(&conf->mddev->sb_wait); flush_work(&log->disable_writeback_work); md_unregister_thread(&log->reclaim_thread); mempool_destroy(log->meta_pool); diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index 7f9ad5f7cda0..284578b0a349 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h @@ -9,7 +9,7 @@ extern void r5l_write_stripe_run(struct r5l_log *log); extern void r5l_flush_stripe_to_raid(struct r5l_log *log); extern void r5l_stripe_write_finished(struct stripe_head *sh); extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio); -extern void r5l_quiesce(struct r5l_log *log, int state); +extern void r5l_quiesce(struct r5l_log *log, int quiesce); extern bool r5l_log_disk_error(struct r5conf *conf); extern bool r5c_is_writeback(struct r5l_log *log); extern int diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index cd026c88f7ef..702b76008886 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -758,7 +758,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)sector); rdev = conf->disks[dd_idx].rdev; - if (!rdev) { + if (!rdev || (!test_bit(In_sync, &rdev->flags) && + sector >= rdev->recovery_offset)) { pr_debug("%s:%*s data member disk %d missing\n", __func__, indent, "", dd_idx); update_parity = false; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..07ca2fd10189 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1818,8 +1818,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref) struct r5dev *dev = &sh->dev[i]; if (dev->written || i == pd_idx || i == qd_idx) { - if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) + if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) { set_bit(R5_UPTODATE, &dev->flags); + if (test_bit(STRIPE_EXPAND_READY, &sh->state)) + set_bit(R5_Expanded, &dev->flags); + } if (fua) set_bit(R5_WantFUA, &dev->flags); if (sync) @@ -2194,15 +2197,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) static int grow_stripes(struct r5conf *conf, int num) { struct kmem_cache *sc; + size_t namelen = sizeof(conf->cache_name[0]); int devs = max(conf->raid_disks, conf->previous_raid_disks); if (conf->mddev->gendisk) - sprintf(conf->cache_name[0], + snprintf(conf->cache_name[0], namelen, "raid%d-%s", conf->level, mdname(conf->mddev)); else - sprintf(conf->cache_name[0], + snprintf(conf->cache_name[0], namelen, "raid%d-%p", conf->level, conf->mddev); - sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); + snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); conf->active_name = 0; sc = kmem_cache_create(conf->cache_name[conf->active_name], @@ -2675,13 +2679,13 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) pr_debug("raid456: error called\n"); spin_lock_irqsave(&conf->device_lock, flags); + set_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); mddev->degraded = raid5_calc_degraded(conf); spin_unlock_irqrestore(&conf->device_lock, flags); set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); - set_bit(Faulty, &rdev->flags); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" @@ -5682,28 +5686,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) goto retry; } - if (rw == WRITE && - logical_sector >= mddev->suspend_lo && - logical_sector < mddev->suspend_hi) { - raid5_release_stripe(sh); - /* As the suspend_* range is controlled by - * userspace, we want an interruptible - * wait. - */ - prepare_to_wait(&conf->wait_for_overlap, - &w, TASK_INTERRUPTIBLE); - if (logical_sector >= mddev->suspend_lo && - logical_sector < mddev->suspend_hi) { - sigset_t full, old; - sigfillset(&full); - sigprocmask(SIG_BLOCK, &full, &old); - schedule(); - sigprocmask(SIG_SETMASK, &old, NULL); - do_prepare = true; - } - goto retry; - } - if (test_bit(STRIPE_EXPANDING, &sh->state) || !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { /* Stripe is busy expanding or @@ -7156,6 +7138,13 @@ static int raid5_run(struct mddev *mddev) min_offset_diff = diff; } + if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && + (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { + pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", + mdname(mddev)); + return -EINVAL; + } + if (mddev->reshape_position != MaxSector) { /* Check that we can continue the reshape. * Difficulties arise if the stripe we would write to @@ -7990,13 +7979,7 @@ static void raid5_finish_reshape(struct mddev *mddev) if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { - if (mddev->delta_disks > 0) { - md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); - if (mddev->queue) { - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); - } - } else { + if (mddev->delta_disks <= 0) { int d; spin_lock_irq(&conf->device_lock); mddev->degraded = raid5_calc_degraded(conf); @@ -8020,16 +8003,12 @@ static void raid5_finish_reshape(struct mddev *mddev) } } -static void raid5_quiesce(struct mddev *mddev, int state) +static void raid5_quiesce(struct mddev *mddev, int quiesce) { struct r5conf *conf = mddev->private; - switch(state) { - case 2: /* resume for a suspend */ - wake_up(&conf->wait_for_overlap); - break; - - case 1: /* stop all writes */ + if (quiesce) { + /* stop all writes */ lock_all_device_hash_locks_irq(conf); /* '2' tells resync/reshape to pause so that all * active stripes can drain @@ -8045,17 +8024,15 @@ static void raid5_quiesce(struct mddev *mddev, int state) unlock_all_device_hash_locks_irq(conf); /* allow reshape to continue */ wake_up(&conf->wait_for_overlap); - break; - - case 0: /* re-enable writes */ + } else { + /* re-enable writes */ lock_all_device_hash_locks_irq(conf); conf->quiesce = 0; wake_up(&conf->wait_for_quiescent); wake_up(&conf->wait_for_overlap); unlock_all_device_hash_locks_irq(conf); - break; } - r5l_quiesce(conf->log, state); + r5l_quiesce(conf->log, quiesce); } static void *raid45_takeover_raid0(struct mddev *mddev, int level) diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c index bfe831c10b1c..b95a631f23f9 100644 --- a/drivers/media/common/siano/smsendian.c +++ b/drivers/media/common/siano/smsendian.c @@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer) switch (msg->x_msg_header.msg_type) { case MSG_SMS_DATA_DOWNLOAD_REQ: { - msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]); + msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0])); break; } @@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer) sizeof(struct sms_msg_hdr))/4; for (i = 0; i < msg_words; i++) - msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); break; } @@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer) { struct sms_version_res *ver = (struct sms_version_res *) msg; - ver->chip_model = le16_to_cpu(ver->chip_model); + ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model); break; } @@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer) sizeof(struct sms_msg_hdr))/4; for (i = 0; i < msg_words; i++) - msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); break; } @@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg) #ifdef __BIG_ENDIAN struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; - phdr->msg_type = le16_to_cpu(phdr->msg_type); - phdr->msg_length = le16_to_cpu(phdr->msg_length); - phdr->msg_flags = le16_to_cpu(phdr->msg_flags); + phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type); + phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length); + phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags); #endif /* __BIG_ENDIAN */ } EXPORT_SYMBOL_GPL(smsendian_handle_message_header); diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 18e4230865be..51009b2718a3 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -1055,7 +1055,7 @@ static int dvb_demux_do_ioctl(struct file *file, break; default: - ret = -EINVAL; + ret = -ENOTTY; break; } mutex_unlock(&dmxdev->mutex); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 9139d01ba7ed..f7d4ec37fdbc 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; - if (!fepriv) - return; - - dvb_free_device(fepriv->dvbdev); + if (fepriv) + dvb_free_device(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); - kfree(fepriv); - fe->frontend_priv = NULL; + if (fepriv) + kfree(fepriv); } static void dvb_frontend_free(struct kref *ref) @@ -277,8 +275,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe, wake_up_interruptible (&events->wait_queue); } +static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv, + struct dvb_fe_events *events) +{ + int ret; + + up(&fepriv->sem); + ret = events->eventw != events->eventr; + down(&fepriv->sem); + + return ret; +} + static int dvb_frontend_get_event(struct dvb_frontend *fe, - struct dvb_frontend_event *event, int flags) + struct dvb_frontend_event *event, int flags) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; @@ -296,13 +306,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, if (flags & O_NONBLOCK) return -EWOULDBLOCK; - up(&fepriv->sem); - - ret = wait_event_interruptible (events->wait_queue, - events->eventw != events->eventr); - - if (down_interruptible (&fepriv->sem)) - return -ERESTARTSYS; + ret = wait_event_interruptible(events->wait_queue, + dvb_frontend_test_event(fepriv, events)); if (ret < 0) return ret; diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c index 0ee0df53b91b..79d5d89bc95e 100644 --- a/drivers/media/dvb-frontends/ascot2e.c +++ b/drivers/media/dvb-frontends/ascot2e.c @@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv, static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val) { - return ascot2e_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return ascot2e_write_regs(priv, reg, &tmp, 1); } static int ascot2e_read_regs(struct ascot2e_priv *priv, diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 48ee9bc00c06..ccbd84fd6428 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -257,7 +257,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv, static int cxd2841er_write_reg(struct cxd2841er_priv *priv, u8 addr, u8 reg, u8 val) { - return cxd2841er_write_regs(priv, addr, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return cxd2841er_write_regs(priv, addr, reg, &tmp, 1); } static int cxd2841er_read_regs(struct cxd2841er_priv *priv, diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c index 4bf5a551ba40..2ab8d83e5576 100644 --- a/drivers/media/dvb-frontends/helene.c +++ b/drivers/media/dvb-frontends/helene.c @@ -331,7 +331,9 @@ static int helene_write_regs(struct helene_priv *priv, static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val) { - return helene_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return helene_write_regs(priv, reg, &tmp, 1); } static int helene_read_regs(struct helene_priv *priv, diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c index 68d759c4c52e..5c8b405f2ddc 100644 --- a/drivers/media/dvb-frontends/horus3a.c +++ b/drivers/media/dvb-frontends/horus3a.c @@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv, static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val) { - return horus3a_write_regs(priv, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return horus3a_write_regs(priv, reg, &tmp, 1); } static int horus3a_enter_power_save(struct horus3a_priv *priv) diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c index 5bb1e73a10b4..ce7c443d3eac 100644 --- a/drivers/media/dvb-frontends/itd1000.c +++ b/drivers/media/dvb-frontends/itd1000.c @@ -95,8 +95,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg) static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v) { - int ret = itd1000_write_regs(state, r, &v, 1); - state->shadow[r] = v; + u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + int ret = itd1000_write_regs(state, r, &tmp, 1); + state->shadow[r] = tmp; return ret; } diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index c9b1eb38444e..fbb3b2f49d2d 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -1767,7 +1767,13 @@ static void lgdt3306a_release(struct dvb_frontend *fe) struct lgdt3306a_state *state = fe->demodulator_priv; dbg_info("\n"); - kfree(state); + + /* + * If state->muxc is not NULL, then we are an i2c device + * and lgdt3306a_remove will clean up state + */ + if (!state->muxc) + kfree(state); } static const struct dvb_frontend_ops lgdt3306a_ops; @@ -2168,7 +2174,7 @@ static int lgdt3306a_probe(struct i2c_client *client, sizeof(struct lgdt3306a_config)); config->i2c_addr = client->addr; - fe = lgdt3306a_attach(config, client->adapter); + fe = dvb_attach(lgdt3306a_attach, config, client->adapter); if (fe == NULL) { ret = -ENODEV; goto err_fe; diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index 50bce68ffd66..65d157fe76d1 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan) * New users must use I2C client binding directly! */ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) + struct i2c_adapter *i2c, + struct i2c_adapter **tuner_i2c_adapter) { struct i2c_client *client; struct i2c_board_info board_info; - struct m88ds3103_platform_data pdata; + struct m88ds3103_platform_data pdata = {}; pdata.clk = cfg->clock; pdata.i2c_wr_max = cfg->i2c_wr_max; @@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client, case M88DS3103_CHIP_ID: break; default: + ret = -ENODEV; + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id); goto err_kfree; } diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index 961b9a2508e0..0b23cbc021b8 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state, static inline int mt312_writereg(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 val) { - return mt312_write(state, reg, &val, 1); + u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + + return mt312_write(state, reg, &tmp, 1); } static inline u32 mt312_div(u32 a, u32 b) diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 172fc367ccaa..24840a2e5a75 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -14,6 +14,8 @@ * GNU General Public License for more details. */ +#include + #include "si2168_priv.h" static const struct dvb_frontend_ops si2168_ops; @@ -435,6 +437,7 @@ static int si2168_init(struct dvb_frontend *fe) if (ret) goto err; + udelay(100); memcpy(cmd.args, "\x85", 1); cmd.wlen = 1; cmd.rlen = 1; diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c index 02347598277a..db5dde3215f0 100644 --- a/drivers/media/dvb-frontends/stb0899_drv.c +++ b/drivers/media/dvb-frontends/stb0899_drv.c @@ -539,7 +539,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data, int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data) { - return stb0899_write_regs(state, reg, &data, 1); + u8 tmp = data; + return stb0899_write_regs(state, reg, &tmp, 1); } /* diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c index 17a955d0031b..75509bec66e4 100644 --- a/drivers/media/dvb-frontends/stb6100.c +++ b/drivers/media/dvb-frontends/stb6100.c @@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data) { + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + if (unlikely(reg >= STB6100_NUMREGS)) { dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg); return -EREMOTEIO; } - data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set; - return stb6100_write_reg_range(state, &data, reg, 1); + tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set; + return stb6100_write_reg_range(state, &tmp, reg, 1); } diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index f3529df8211d..1a726196c126 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -166,7 +166,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) { - return stv0367_writeregs(state, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv0367_writeregs(state, reg, &tmp, 1); } static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c index 7ef469c0c866..2695e1eb6d9c 100644 --- a/drivers/media/dvb-frontends/stv090x.c +++ b/drivers/media/dvb-frontends/stv090x.c @@ -755,7 +755,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data) { - return stv090x_write_regs(state, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv090x_write_regs(state, reg, &tmp, 1); } static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable) diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c index 66eba38f1014..7e8e01389c55 100644 --- a/drivers/media/dvb-frontends/stv6110x.c +++ b/drivers/media/dvb-frontends/stv6110x.c @@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data) { - return stv6110x_write_regs(stv6110x, reg, &data, 1); + u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return stv6110x_write_regs(stv6110x, reg, &tmp, 1); } static int stv6110x_init(struct dvb_frontend *fe) diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 931e5c98da8a..b879e1571469 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -368,7 +368,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, gain2 = clamp_t(long, gain2, 0, 13); v_agc = clamp_t(long, v_agc, 400, 1100); - *_gain = -(gain1 * 2330 + + *_gain = -((__s64)gain1 * 2330 + gain2 * 3500 + v_agc * 24 / 10 * 10 + 10000); @@ -386,7 +386,7 @@ static int ts2020_read_tuner_gain(struct dvb_frontend *fe, unsigned v_agc, gain3 = clamp_t(long, gain3, 0, 6); v_agc = clamp_t(long, v_agc, 600, 1600); - *_gain = -(gain1 * 2650 + + *_gain = -((__s64)gain1 * 2650 + gain2 * 3380 + gain3 * 2850 + v_agc * 176 / 100 * 10 - diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c index 623355fc2666..3208b866d1cb 100644 --- a/drivers/media/dvb-frontends/zl10039.c +++ b/drivers/media/dvb-frontends/zl10039.c @@ -134,7 +134,9 @@ static inline int zl10039_writereg(struct zl10039_state *state, const enum zl10039_reg_addr reg, const u8 val) { - return zl10039_write(state, reg, &val, 1); + const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ + + return zl10039_write(state, reg, &tmp, 1); } static int zl10039_init(struct dvb_frontend *fe) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 94153895fcd4..6fbb72174684 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -660,10 +660,17 @@ config VIDEO_OV13858 tristate "OmniVision OV13858 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on MEDIA_CAMERA_SUPPORT + select V4L2_FWNODE ---help--- This is a Video4Linux2 sensor-level driver for the OmniVision OV13858 camera. +config VIDEO_MAX9286 + tristate "MAX96705/MAX9286 Serializer/Deserializer" + depends on I2C && VIDEO_V4L2 && REGMAP_I2C + ---help--- + This is a MAXIM 96705 Serializer and MAXIM 9286 CSI-2 Deserializer driver. + config VIDEO_VS6624 tristate "ST VS6624 sensor support" depends on VIDEO_V4L2 && I2C @@ -777,6 +784,9 @@ config VIDEO_S5K5BAF source "drivers/media/i2c/smiapp/Kconfig" source "drivers/media/i2c/et8ek8/Kconfig" +source "drivers/media/i2c/crlmodule/Kconfig" +source "drivers/media/i2c/crlmodule-lite/Kconfig" +source "drivers/media/i2c/lc898122/Kconfig" config VIDEO_S5C73M3 tristate "Samsung S5C73M3 sensor support" @@ -901,4 +911,39 @@ endif endmenu +config VIDEO_LM3643 + tristate "LM3643 led flash controller support" + select REGMAP_I2C + +config VIDEO_BU64295 + tristate "bu64295 voice coil support" + depends on I2C && VIDEO_V4L2 + ---help--- + This is a driver for the ROHM BU64295 camera lens voice coil. + +config VIDEO_AD5816G + tristate "ad5816g voice coil support" + depends on I2C && VIDEO_V4L2 + ---help--- + This driver is for ad5816g camera lens voice coil. + +config VIDEO_VCM_STUB + tristate "stub vcm support" + depends on I2C && VIDEO_V4L2 + ---help--- + This driver is a VCM stub driver for verification. + +config VIDEO_TI964 + tristate "TI964 driver support" + depends on I2C && VIDEO_V4L2 + ---help--- + This is a driver for TI964 camera. + +config VIDEO_AS3638 + tristate "AS3638 flash driver support" + depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER + depends on MEDIA_CAMERA_SUPPORT + ---help--- + This is a driver for the AS3638 triple flash controller. + endif diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index f104650d6000..90a213dc0b78 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_OV7640) += ov7640.o obj-$(CONFIG_VIDEO_OV7670) += ov7670.o obj-$(CONFIG_VIDEO_OV9650) += ov9650.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o +obj-$(CONFIG_VIDEO_MAX9286) += max9286.o obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o @@ -93,5 +94,13 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o obj-$(CONFIG_VIDEO_OV2659) += ov2659.o obj-$(CONFIG_VIDEO_TC358743) += tc358743.o - obj-$(CONFIG_SDR_MAX2175) += max2175.o +obj-$(CONFIG_VIDEO_LM3643) += lm3643.o +obj-$(CONFIG_VIDEO_BU64295) += bu64295.o +obj-$(CONFIG_VIDEO_AD5816G) += ad5816g.o +obj-$(CONFIG_VIDEO_VCM_STUB) += vcm_stub.o +obj-$(CONFIG_VIDEO_CRLMODULE) += crlmodule/ +obj-$(CONFIG_VIDEO_CRLMODULE_LITE) += crlmodule-lite/ +obj-$(CONFIG_VIDEO_TI964) += ti964.o +obj-$(CONFIG_VIDEO_LC898122) += lc898122/ +obj-$(CONFIG_VIDEO_AS3638) += as3638.o diff --git a/drivers/media/i2c/ad5816g.c b/drivers/media/i2c/ad5816g.c new file mode 100644 index 000000000000..a1d23f26a184 --- /dev/null +++ b/drivers/media/i2c/ad5816g.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ad5816g.h" + +struct ad5816g_device { + struct i2c_client *client; + struct v4l2_ctrl_handler ctrls_vcm; + struct v4l2_subdev subdev; +}; + +#define ctrl_to_ad5816g_dev(_ctrl) \ + container_of(_ctrl->handler, struct ad5816g_device, ctrls_vcm) +#define subdev_to_ad5816g_dev(_sd) \ + container_of(_sd, struct ad5816g_device, subdev) + +static int ad5816g_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val) +{ + struct i2c_msg msg[2]; + int ret; + u8 buf[2]; + + buf[0] = reg; + buf[1] = 0; + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = &buf[0]; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 1; + msg[1].buf = &buf[1]; + *val = 0; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret != 2) { + dev_err(&client->dev, "i2c_rd8 failed, num of read:%d\n", ret); + return -EIO; + } + + *val = buf[1]; + + return 0; +} + +static int ad5816g_i2c_wr8(struct i2c_client *client, u8 reg, u8 val) +{ + struct i2c_msg msg; + int ret; + u8 buf[2]; + + buf[0] = reg; + buf[1] = val; + msg.addr = client->addr; + msg.flags = 0; + msg.len = 2; + msg.buf = &buf[0]; + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret != 1) { + dev_err(&client->dev, "i2c_wr8 failed, num of write:%d\n", ret); + return -EIO; + } + + return 0; +} + +static int ad5816g_i2c_wr16(struct i2c_client *client, u8 reg, u16 val) +{ + struct i2c_msg msg; + int ret; + u8 buf[3]; + + buf[0] = reg; + buf[1] = (u8)(val >> 8); + buf[2] = (u8)(val & 0xff); + msg.addr = client->addr; + msg.flags = 0; + msg.len = 3; + msg.buf = &buf[0]; + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret != 1) { + dev_err(&client->dev, + "i2c_wr16 failed, num of write:%d\n", ret); + return -EIO; + } + + return 0; +} + +static int ad5816g_set_arc_mode(struct i2c_client *client) +{ + int ret; + + ret = ad5816g_i2c_wr8(client, AD5816G_CONTROL, AD5816G_ARC_EN); + if (ret) + return ret; + + ret = ad5816g_i2c_wr8(client, AD5816G_MODE, + AD5816G_MODE_2_5M_SWITCH_CLOCK); + if (ret) + return ret; + + ret = ad5816g_i2c_wr8(client, AD5816G_VCM_FREQ, AD5816G_DEF_FREQ); + return ret; +} + +static int ad5816g_vcm_init(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret; + u8 vcm_id; + + /* Detect device */ + ret = ad5816g_i2c_rd8(client, AD5816G_IC_INFO, &vcm_id); + if (ret < 0) { + dev_err(&client->dev, "Failed to detect ad5816g, rd:%d\n", ret); + return -ENXIO; + } + + if (vcm_id != AD5816G_ID) { + dev_err(&client->dev, "Wrong VCM ID:0x%x, Correct ID:0x%x", + vcm_id, AD5816G_ID); + return -ENXIO; + } + + /* Software reset */ + ret = ad5816g_i2c_wr8(client, AD5816G_CONTROL, AD5816G_RESET); + usleep_range(100, 110); + + /* set VCM ARC mode */ + ret = ad5816g_set_arc_mode(client); + if (ret) { + dev_err(&client->dev, "Failed to set arc mode, ret:%d\n", ret); + return ret; + } + + /* set the VCM_THRESHOLD */ + ret = ad5816g_i2c_wr8(client, AD5816G_VCM_THRESHOLD, + AD5816G_DEF_THRESHOLD); + if (ret) { + dev_err(&client->dev, "Failed to set threshold, ret:%d\n", ret); + return ret; + } + + return 0; +} + +/* + * VCM will drop down to power-down mode, + * if vcm_code_msb and vcm_code_lsb are set to zero + * A valid value is a integer between 0 ~ 1023 + */ +static int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, s32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret; + u16 data; + + data = clamp(val, 0, AD5816G_MAX_FOCUS_POS) & VCM_CODE_MASK; + + ret = ad5816g_i2c_wr16(client, AD5816G_VCM_CODE_MSB, data); + if (ret) { + dev_err(&client->dev, "Failed to set vcm pos:%d, ret:%d\n", + data, ret); + return ret; + } + + return 0; +} + +static int ad5816g_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ad5816g_device *ad5816g_dev = ctrl_to_ad5816g_dev(ctrl); + + if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) + return ad5816g_t_focus_vcm(&ad5816g_dev->subdev, ctrl->val); + else + return -EINVAL; +} + +static const struct v4l2_ctrl_ops ad5816g_vcm_ctrl_ops = { + .s_ctrl = ad5816g_set_ctrl, +}; + +static int ad5816g_init_controls(struct ad5816g_device *ad5816g_dev) +{ + struct v4l2_ctrl_handler *hnd = &ad5816g_dev->ctrls_vcm; + const struct v4l2_ctrl_ops *ops = &ad5816g_vcm_ctrl_ops; + struct i2c_client *client = ad5816g_dev->client; + + v4l2_ctrl_handler_init(hnd, 1); + + v4l2_ctrl_new_std(hnd, ops, V4L2_CID_FOCUS_ABSOLUTE, + 0, AD5816G_MAX_FOCUS_POS, 1, 0); + + if (hnd->error) + dev_err(&client->dev, "ad5816g_init_controls fail\n"); + + ad5816g_dev->subdev.ctrl_handler = hnd; + + return hnd->error; +} + +static void ad5816g_subdev_cleanup(struct ad5816g_device *ad5816g_dev) +{ + v4l2_ctrl_handler_free(&ad5816g_dev->ctrls_vcm); + v4l2_device_unregister_subdev(&ad5816g_dev->subdev); + media_entity_cleanup(&ad5816g_dev->subdev.entity); +} + +static const struct v4l2_subdev_ops ad5816g_ops = { }; + +static int ad5816g_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ad5816g_device *ad5816g_dev; + int ret; + + ad5816g_dev = devm_kzalloc(&client->dev, sizeof(*ad5816g_dev), + GFP_KERNEL); + if (ad5816g_dev == NULL) + return -ENOMEM; + + i2c_set_clientdata(client, ad5816g_dev); + ad5816g_dev->client = client; + + v4l2_i2c_subdev_init(&ad5816g_dev->subdev, client, &ad5816g_ops); + ad5816g_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + + snprintf(ad5816g_dev->subdev.name, + sizeof(ad5816g_dev->subdev.name), + AD5816G_NAME " %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); + + ret = ad5816g_init_controls(ad5816g_dev); + if (ret) { + dev_err(&client->dev, "Initial controls failed: %d\n", ret); + goto err_cleanup; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + ret = media_entity_init(&ad5816g_dev->subdev.entity, 0, NULL, 0); +#else + ret = media_entity_pads_init(&ad5816g_dev->subdev.entity, 0, NULL); +#endif + if (ret < 0) + goto err_cleanup; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + ad5816g_dev->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_LENS; +#else + ad5816g_dev->subdev.entity.function = MEDIA_ENT_F_LENS; +#endif + + ret = ad5816g_vcm_init(&ad5816g_dev->subdev); + if (ret) { + dev_err(&client->dev, "ad5816g init failed\n"); + goto err_cleanup; + } + + return 0; + +err_cleanup: + ad5816g_subdev_cleanup(ad5816g_dev); + dev_err(&client->dev, "Probe failed: %d\n", ret); + + return ret; +} + +static int ad5816g_remove(struct i2c_client *client) +{ + struct ad5816g_device *ad5816g_dev = i2c_get_clientdata(client); + + ad5816g_subdev_cleanup(ad5816g_dev); + return 0; +} + +static const struct i2c_device_id ad5816g_id_table[] = { + { AD5816G_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ad5816g_id_table); + +static struct i2c_driver ad5816g_i2c_driver = { + .driver = { + .name = AD5816G_NAME, + }, + .probe = ad5816g_probe, + .remove = ad5816g_remove, + .id_table = ad5816g_id_table, +}; + +module_i2c_driver(ad5816g_i2c_driver); + +MODULE_AUTHOR("Mingda Xu "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_DESCRIPTION("AD5816G VCM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/ad5816g.h b/drivers/media/i2c/ad5816g.h new file mode 100644 index 000000000000..9c417a820e57 --- /dev/null +++ b/drivers/media/i2c/ad5816g.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ +#ifndef __AD5816G_H__ +#define __AD5816G_H__ + +#include + +#define AD5816G_ADDR 0x0e +#define AD5816G_NAME "ad5816g" + +#define AD5816G_MAX_FOCUS_POS 1023 + +/* Register Definitions */ +#define AD5816G_IC_INFO 0x00 +#define AD5816G_IC_VERSION 0x01 +#define AD5816G_CONTROL 0x02 +#define AD5816G_VCM_CODE_MSB 0x03 +#define AD5816G_VCM_CODE_LSB 0x04 +#define AD5816G_STATUS 0x05 +#define AD5816G_MODE 0x06 +#define AD5816G_VCM_FREQ 0x07 +#define AD5816G_VCM_THRESHOLD 0x08 + +/* ARC MODE ENABLE */ +#define AD5816G_ARC_EN 0x02 +/* ARC RES2 MODE */ +#define AD5816G_ARC_RES2 0x01 +/* ARC VCM FREQ - 78.1Hz */ +#define AD5816G_DEF_FREQ 0x7a +/* ARC VCM THRESHOLD - 0x08 << 1 */ +#define AD5816G_DEF_THRESHOLD 0x64 +#define AD5816G_ID 0x24 +#define VCM_CODE_MASK 0x03ff +#define AD5816G_MODE_2_5M_SWITCH_CLOCK 0x14 +/* VCM SW RESET */ +#define AD5816G_RESET 0x01 + +#endif diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c index 4da4253553fc..10d229a4f088 100644 --- a/drivers/media/i2c/adv748x/adv748x-hdmi.c +++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c @@ -105,6 +105,9 @@ static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi, fmt->width = hdmi->timings.bt.width; fmt->height = hdmi->timings.bt.height; + + if (fmt->field == V4L2_FIELD_ALTERNATE) + fmt->height /= 2; } static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings) diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c new file mode 100644 index 000000000000..9150f770badd --- /dev/null +++ b/drivers/media/i2c/ak7375.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#define AK7375_NAME "ak7375" +#define AK7375_MAX_FOCUS_POS 4095 +/* + * This sets the minimum granularity for the focus positions. + * A value of 1 gives maximum accuracy for a desired focus position + */ +#define AK7375_FOCUS_STEPS 1 +/* + * This acts as the minimum granularity of lens movement. + * Keep this value power of 2, so the control steps can be + * uniformly adjusted for gradual lens movement, with desired + * number of control steps. + */ +#define AK7375_CTRL_STEPS 64 +#define AK7375_CTRL_DELAY_US 1000 +#define AK7375_FOCUS_VALUE(x) cpu_to_be16((x) << 4) + +#define AK7375_REG_POSITION 0x0 +#define AK7375_REG_CONT 0x2 +#define AK7375_MODE_ACTIVE 0x0 +#define AK7375_MODE_STANDBY 0x40 + +/* ak7375 device structure */ +struct ak7375_device { + struct v4l2_ctrl_handler ctrls_vcm; + struct v4l2_subdev sd; + u16 current_val; +}; + +static inline struct ak7375_device *to_ak7375_vcm(struct v4l2_ctrl *ctrl) +{ + return container_of(ctrl->handler, struct ak7375_device, ctrls_vcm); +} + +static inline struct ak7375_device *sd_to_ak7375_vcm(struct v4l2_subdev *subdev) +{ + return container_of(subdev, struct ak7375_device, sd); +} + +static int ak7375_i2c_write(struct ak7375_device *ak7375, + u8 addr, u16 data, int size) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ak7375->sd); + int ret; + u8 buf[3]; + + if (size != 1 && size != 2) + return -EINVAL; + buf[0] = addr; + buf[1] = data & 0xff; + buf[2] = data >> 8; + ret = i2c_master_send(client, (const char *)buf, size + 1); + if (ret < 0) + return ret; + if (ret != size + 1) + return -EIO; + return 0; +} + +static int ak7375_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ak7375_device *dev_vcm = to_ak7375_vcm(ctrl); + + if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) + return ak7375_i2c_write(dev_vcm, AK7375_REG_POSITION, + AK7375_FOCUS_VALUE(ctrl->val), 2); + + return -EINVAL; +} + +static const struct v4l2_ctrl_ops ak7375_vcm_ctrl_ops = { + .s_ctrl = ak7375_set_ctrl, +}; + +static int ak7375_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + int rval; + + rval = pm_runtime_get_sync(sd->dev); + if (rval < 0) { + pm_runtime_put_noidle(sd->dev); + return rval; + } + + return 0; +} + +static int ak7375_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + pm_runtime_put(sd->dev); + + return 0; +} + +static const struct v4l2_subdev_internal_ops ak7375_int_ops = { + .open = ak7375_open, + .close = ak7375_close, +}; + +static const struct v4l2_subdev_core_ops ak7375_subdev_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_ops ak7375_ops = { + .core = &ak7375_subdev_core_ops, +}; + +static void ak7375_subdev_cleanup(struct ak7375_device *ak7375_dev) +{ + v4l2_async_unregister_subdev(&ak7375_dev->sd); + v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm); + media_entity_cleanup(&ak7375_dev->sd.entity); +} + +static int ak7375_init_controls(struct ak7375_device *dev_vcm) +{ + struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; + const struct v4l2_ctrl_ops *ops = &ak7375_vcm_ctrl_ops; + + v4l2_ctrl_handler_init(hdl, 1); + + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE, + 0, AK7375_MAX_FOCUS_POS, AK7375_FOCUS_STEPS, 0); + + if (hdl->error) + dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n", + __func__, hdl->error); + dev_vcm->sd.ctrl_handler = hdl; + return hdl->error; +} + +static int ak7375_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ak7375_device *ak7375_dev; + int rval; + + ak7375_dev = devm_kzalloc(&client->dev, sizeof(*ak7375_dev), + GFP_KERNEL); + if (ak7375_dev == NULL) + return -ENOMEM; + + v4l2_i2c_subdev_init(&ak7375_dev->sd, client, &ak7375_ops); + ak7375_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; + ak7375_dev->sd.internal_ops = &ak7375_int_ops; + + rval = ak7375_init_controls(ak7375_dev); + if (rval) + goto err_cleanup; + + ak7375_dev->sd.entity.function = MEDIA_ENT_F_LENS; + + rval = v4l2_async_register_subdev(&ak7375_dev->sd); + if (rval < 0) + goto err_cleanup; + + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_idle(&client->dev); + + return 0; + +err_cleanup: + ak7375_subdev_cleanup(ak7375_dev); + dev_err(&client->dev, "Probe failed: %d\n", rval); + return rval; +} + +static int ak7375_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd); + + pm_runtime_disable(&client->dev); + ak7375_subdev_cleanup(ak7375_dev); + + return 0; +} + +/* + * This function sets the vcm position, so it consumes least current + * The lens position is gradually moved in units of AK7375_CTRL_STEPS, + * to make the movements smoothly. + */ +static int __maybe_unused ak7375_vcm_suspend(struct device *dev) +{ + + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd); + int ret, val; + + for (val = ak7375_dev->current_val & ~(AK7375_CTRL_STEPS - 1); + val >= 0; val -= AK7375_CTRL_STEPS) { + ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION, + AK7375_FOCUS_VALUE(val), 2); + if (ret) + dev_err_once(dev, "%s I2C failure: %d\n", __func__, ret); + usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10); + } + + ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT, AK7375_MODE_STANDBY, 1); + if (ret) { + dev_err(dev, "%s I2C failure: %d\n", __func__, ret); + return ret; + } + return ret; +} + +/* + * This function sets the vcm position to the value set by the user + * through v4l2_ctrl_ops s_ctrl handler + * The lens position is gradually moved in units of AK7375_CTRL_STEPS, + * to make the movements smoothly. + */ +static int __maybe_unused ak7375_vcm_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd); + int ret, val; + + ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT, + AK7375_MODE_ACTIVE, 1); + if (ret) { + dev_err(dev, "%s I2C failure: %d\n", __func__, ret); + return ret; + } + + for (val = ak7375_dev->current_val % AK7375_CTRL_STEPS; + val < ak7375_dev->current_val + AK7375_CTRL_STEPS - 1; + val += AK7375_CTRL_STEPS) { + ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION, + AK7375_FOCUS_VALUE(val), 2); + if (ret) + dev_err_ratelimited(dev, "%s I2C failure: %d\n", + __func__, ret); + usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10); + } + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ak7375_acpi_match[] = { + {"AK7375", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, ak7375_acpi_match); +#endif + +static const struct i2c_device_id ak7375_id_table[] = { + {AK7375_NAME, 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, ak7375_id_table); + +static const struct of_device_id ak7375_of_table[] = { + { .compatible = "ak7375" }, + { "" } +}; +MODULE_DEVICE_TABLE(of, ak7375_of_table); + +static const struct dev_pm_ops ak7375_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume) + SET_RUNTIME_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume, NULL) +}; + +static struct i2c_driver ak7375_i2c_driver = { + .driver = { + .name = AK7375_NAME, + .pm = &ak7375_pm_ops, + .acpi_match_table = ACPI_PTR(ak7375_acpi_match), + .of_match_table = ak7375_of_table, + }, + .probe = ak7375_probe, + .remove = ak7375_remove, + .id_table = ak7375_id_table, +}; + +module_i2c_driver(ak7375_i2c_driver); + +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_DESCRIPTION("AK7375 VCM driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/media/i2c/as3638.c b/drivers/media/i2c/as3638.c new file mode 100644 index 000000000000..7ccdab91c053 --- /dev/null +++ b/drivers/media/i2c/as3638.c @@ -0,0 +1,1078 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include "../../../include/media/as3638.h" +#include +#include + +/* Registers */ +#define REG_DESIGN_INFO 0x00 +#define REG_VERSION_CONTROL 0x01 +#define REG_CURRENT_SET1 0x02 +#define REG_CURRENT_SET2 0x03 +#define REG_CURRENT_SET3 0x04 +#define REG_CONFIG 0x05 +#define REG_LOW_VOLTAGE 0x06 +#define REG_TIMER 0x07 +#define REG_CONTROL 0x08 +#define REG_FAULT_AND_INFO 0x09 +#define REG_FLASH_CURRENT_REACHED 0x20 +#define REG_PROTECT 0x21 +#define REG_HIG_OVERVOLTAGE_PROTECTION 0x22 + +/* Register fields */ +#define VERSION_SHIFT 0 +#define VERSION_MASK 0x0F + +#define FLASH_CURRENT_SHIFT 0 +#define FLASH_CURRENT_MASK 0x1F +#define TORCH_CURRENT_SHIFT 5 +#define TORCH_CURRENT_MASK 0xE0 + +#define CONFIG_ASSIST_STROBE_SHIFT 1 +#define CONFIG_ASSIST_STROBE_MASK 0x02 +#define CONFIG_EXT_TORCH_ON_SHIFT 2 +#define CONFIG_EXT_TORCH_ON_MASK 0x04 +#define CONFIG_COIL_PEAK_SHIFT 3 +#define CONFIG_COIL_PEAK_MASK 0x18 +#define CONFIG_MUTE_POLARITY_SHIFT 5 +#define CONFIG_MUTE_POLARITY_MASK 0x20 +#define CONFIG_STROBE_TYPE_SHIFT 6 +#define CONFIG_STROBE_TYPE_MASK 0x40 +#define CONFIG_STROBE_ON_SHIFT 7 +#define CONFIG_STROBE_ON_MASK 0x80 + +#define LOW_VOLTAGE_RESET_SHIFT 0 +#define LOW_VOLTAGE_RESET_MASK 0x01 +#define LOW_VOLTAGE_LOWV_RED_CURR_SHIFT 1 +#define LOW_VOLTAGE_LOWV_RED_CURR_MASK 0x06 +#define LOW_VOLTAGE_LOWV_SEL_SHIFT 3 +#define LOW_VOLTAGE_LOWV_SEL_MASK 0x38 +#define LOW_VOLTAGE_LOWV_ON_SHIFT 6 +#define LOW_VOLTAGE_LOWV_ON_MASK 0x40 +#define LOW_VOLTAGE_BOOST_SHIFT 7 +#define LOW_VOLTAGE_BOOST_MASK 0x80 + +#define TIMER_FLASH_TIMEOUT_SHIFT 0 +#define TIMER_FLASH_TIMEOUT_MASK 0x1F +#define TIMER_MUTE_THRESHOLD_SHIFT 5 +#define TIMER_MUTE_THRESHOLD_MASK 0xE0 + +#define CONTROL_MODE_SETTING_SHIFT 0 +#define CONTROL_MODE_SETTING_MASK 0x03 +#define CONTROL_OUT_ON_SHIFT 2 +#define CONTROL_OUT_ON_MASK 0x04 +#define CONTROL_DCDC_SKIP_ENABLE_SHIFT 4 +#define CONTROL_DCDC_SKIP_ENABLE_MASK 0x10 +#define CONTROL_TXMASK_RED_CURR_SHIFT 5 +#define CONTROL_TXMASK_RED_CURR_MASK 0x60 +#define CONTROL_TXMASK_EN_SHIFT 7 +#define CONTROL_TXMASK_EN_MASK 0x80 + +#define FAULT_UNDER_VOLTAGE_LO_SHIFT 0 +#define FAULT_UNDER_VOLTAGE_LO_MASK 0x01 +#define FAULT_LOW_VOLTAGE_SHIFT 1 +#define FAULT_LOW_VOLTAGE_MASK 0x02 +#define INFO_TORCH_DETECTED_SHIFT 2 +#define INFO_TORCH_DETECTED_MASK 0x04 +#define INFO_TXMASK_EVENT_SHIFT 3 +#define INFO_TXMASK_EVENT_MASK 0x08 +#define FAULT_TIMEOUT_SHIFT 4 +#define FAULT_TIMEOUT_MASK 0x10 +#define FAULT_OVERTEMP_SHIFT 5 +#define FAULT_OVERTEMP_MASK 0x20 +#define FAULT_LED_SHORT_SHIFT 6 +#define FAULT_LED_SHORT_MASK 0x40 +#define FAULT_LED_OPEN_SHIFT 7 +#define FAULT_LED_OPEN_MASK 0x80 + +#define HOVP_SEL_HIGH_OVP_SHIFT 0 +#define HOVP_SEL_HIGH_OVP_MASK 0x01 +#define HOVP_LED_STATUS_ON_SHIFT 1 +#define HOVP_LED_STATUS_ON_MASK 0x02 +#define HOVP_LED_STATUS_1_SHIFT 2 +#define HOVP_LED_STATUS_1_MASK 0x04 +#define HOVP_LED_STATUS_2_SHIFT 3 +#define HOVP_LED_STATUS_2_MASK 0x08 +#define HOVP_LED_STATUS_3_SHIFT 4 +#define HOVP_LED_STATUS_3_MASK 0x10 + +#define DESIGN_INFO_FIXED_ID 0x16 +#define CONFIG_COIL_PEAK_1_3_AMP 0x0 +#define CONFIG_COIL_PEAK_1_6_AMP 0x1 +#define CONFIG_COIL_PEAK_1_95_AMP 0x2 +#define CONFIG_COIL_PEAK_2_4_AMP 0x3 +#define CONTROL_RED_CURR_100_MILLI_AMP 0x0 +#define CONTROL_RED_CURR_200_MILLI_AMP 0x1 +#define CONTROL_RED_CURR_300_MILLI_AMP 0x2 +#define CONTROL_RED_CURR_400_MILLI_AMP 0x3 +#define ALL_BITS_MASK 0xFF + +#define AS3638_FLASH_TOUT_MIN 0 +#define AS3638_FLASH_TOUT_STEP 1 /* Each step is 4ms */ +#define AS3638_FLASH_TOUT_MAX 31 /* Max timeout is 128 ms */ +#define AS3638_FLASH_TOUT_DEF 0x0F /* 64 ms timeout default */ + +#define AS3638_FLASH_INT_STEP 50000 /* uA */ +#define AS3638_FLASH_INT_MILLI_A_TO_REG(a) \ + (((a) * 1000) / AS3638_FLASH_INT_STEP) + +#define AS3638_TORCH_INT_STEP 12500 /* uA */ +#define AS3638_TORCH_INT_MILLI_A_TO_REG(a) \ + (((a) * 1000) / AS3638_TORCH_INT_STEP) + +const int torch_led1_intensity_table[] = { 0, 9400, 14100, 18100, + 23500, 32900, 51800, 98800}; /* uA */ + +enum mode_setting { + MODE_EXT_TORCH = 0x0, + MODE_MEM_INTERFACE = 0x1, /* Not supported */ + MODE_TORCH = 0x2, + MODE_FLASH = 0x3, +}; + +struct as3638_subdev { + struct v4l2_subdev subdev; + enum as3638_led_id led; +}; + +struct as3638_handler { + struct v4l2_ctrl_handler handler; + enum as3638_led_id led; +}; + +/* + * struct as3638_flash + * + * @dev: Device structure + * @subdev_led: V4L2 subdevices for each LED + * @pdata: Platform data + * @regmap: Register map for I2C accesses + * @current_led: The LED for which all HW registers currently is configured + * @ctrls_led: V4L2 control handlers for each LED + * @led_mode: V4L2 LED mode for each LED + * @strobe_source: V4L2 strobe source (SW or HW GPIO) for each LED + * @timeout: V4L2 timeout in steps of 4ms for each LED + * @flash_intensity: Current flash intensity in steps of 50 mA for each LED + * @torch_intensity: Current torch intensity in steps of 12.5 mA for each LED + */ +struct as3638_flash { + struct device *dev; + struct as3638_subdev subdev_led[AS3638_LED_MAX]; + struct as3638_platform_data *pdata; + struct regmap *regmap; + struct mutex lock; + bool open[AS3638_LED_MAX]; + + int current_led; + + struct as3638_handler ctrls_led[AS3638_LED_MAX]; + struct v4l2_ctrl *led_mode[AS3638_LED_MAX]; + struct v4l2_ctrl *strobe_source[AS3638_LED_MAX]; + struct v4l2_ctrl *timeout[AS3638_LED_MAX]; + struct v4l2_ctrl *flash_intensity[AS3638_LED_MAX]; + struct v4l2_ctrl *torch_intensity[AS3638_LED_MAX]; +}; + +#define ctrl_to_as3638_handler(_ctrl) \ + container_of(_ctrl->handler, struct as3638_handler, handler) + +#define ctrls_led_to_as3638_flash(_ctrls_led, _no) \ + container_of(_ctrls_led, struct as3638_flash, ctrls_led[_no]) + +#define subdev_to_as3638_subdev_led(_subdev) \ + container_of(_subdev, struct as3638_subdev, subdev) + +#define subdev_led_to_as3638_flash(_subdev_led, _no) \ + container_of(_subdev_led, struct as3638_flash, subdev_led[_no]) + +static int as3638_dump_registers(struct as3638_flash *flash) +{ + int i; + int reg[22]; + + memset(®, 0, sizeof(reg)); + for (i = 0; i <= 9; i++) + regmap_read(flash->regmap, i, ®[i]); + + regmap_read(flash->regmap, 0x20, ®[10]); + regmap_read(flash->regmap, 0x21, ®[11]); + regmap_read(flash->regmap, 0x22, ®[12]); + + dev_dbg(flash->dev, + "Addr: 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x20 0x21 0x22\n"); + dev_dbg(flash->dev, + "Val: 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", + reg[0], reg[1], reg[2], reg[3], reg[4], reg[5], reg[6], + reg[7], reg[8], reg[9], reg[10], reg[11], reg[12]); + return 0; +} + +static int as3638_disable_outputs(struct as3638_flash *flash) +{ + int rval; + + dev_dbg(flash->dev, "disable outputs\n"); + + rval = regmap_update_bits(flash->regmap, REG_CONTROL, + CONTROL_OUT_ON_MASK, 0); + if (rval < 0) + dev_err(flash->dev, "Register write fail\n"); + + return rval; +} + +static int as3638_mode_ctrl(struct as3638_flash *flash, + enum as3638_led_id led_no) +{ + int rval; + + switch (flash->led_mode[led_no]->val) { + case V4L2_FLASH_LED_MODE_NONE: + dev_dbg(flash->dev, "LED%d: FLASH_LED_MODE_NONE\n", led_no + 1); + rval = regmap_update_bits( + flash->regmap, REG_CONTROL, CONTROL_MODE_SETTING_MASK, + MODE_EXT_TORCH << CONTROL_MODE_SETTING_SHIFT); + break; + case V4L2_FLASH_LED_MODE_TORCH: + dev_dbg(flash->dev, "LED%d: FLASH_LED_MODE_TORCH\n", + led_no + 1); + rval = regmap_update_bits( + flash->regmap, REG_CONTROL, CONTROL_MODE_SETTING_MASK, + MODE_TORCH << CONTROL_MODE_SETTING_SHIFT); + break; + case V4L2_FLASH_LED_MODE_FLASH: + dev_dbg(flash->dev, "LED%d: FLASH_LED_MODE_FLASH\n", + led_no + 1); + rval = regmap_update_bits( + flash->regmap, REG_CONTROL, CONTROL_MODE_SETTING_MASK, + MODE_FLASH << CONTROL_MODE_SETTING_SHIFT); + break; + default: + dev_err(flash->dev, "LED%d: Invalid mode %d\n", + led_no + 1, flash->led_mode[led_no]->val); + return -EINVAL; + } + return rval; +} + +/* + * The Infra-Red LED is connected to the LED1 output. + * + * For IR LED safety, be absolutely 100% sure that the HW current reduction + * feature is enabled and setup correctly for using the maximum 400mA reduction. + * + * LED2 and LED3 currents must be set to zero to ensure the IR LED (LED1) is the + * one with the highest current setting. Otherwise the reduction current will + * be smaller than 400 mA for the IR LED. + */ +static int as3638_force_reduction_current(struct as3638_flash *flash) +{ + int rval; + unsigned int reg_val; + unsigned int reg_mask; + + reg_mask = CONTROL_TXMASK_RED_CURR_MASK | CONTROL_TXMASK_EN_MASK; + + reg_val = CONTROL_RED_CURR_400_MILLI_AMP << + CONTROL_TXMASK_RED_CURR_SHIFT; + reg_val = reg_val | 1 << CONTROL_TXMASK_EN_SHIFT; + + rval = regmap_update_bits(flash->regmap, REG_CONTROL, + reg_mask, reg_val); + if (rval < 0) + return rval; + + rval = regmap_read(flash->regmap, REG_CONTROL, ®_val); + if (rval < 0) { + dev_err(flash->dev, "Device read failed\n"); + return rval; + } + dev_dbg(flash->dev, "Control register = 0x%X\n", reg_val); + + if (!(reg_val & CONTROL_TXMASK_EN_MASK)) { + dev_err(flash->dev, "Reduction enable fail\n"); + return -EINVAL; + } + + if ((reg_val & CONTROL_TXMASK_RED_CURR_MASK) != + (CONTROL_RED_CURR_400_MILLI_AMP << CONTROL_TXMASK_RED_CURR_SHIFT)) { + dev_err(flash->dev, "Reduction init fail\n"); + return -EINVAL; + } + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET2, + ALL_BITS_MASK, 0); /* Clear reg */ + if (rval < 0) + return rval; + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET3, + ALL_BITS_MASK, 0); /* Clear reg */ + if (rval < 0) + return rval; + return 0; +} + +/* The torch values for LED1 are non-linear, so use a table look-up instead. */ +static int as3638_torch_intensity_to_register_led1(int value) +{ + int i; + + for (i = ARRAY_SIZE(torch_led1_intensity_table) - 1; i >= 0; i--) { + if (torch_led1_intensity_table[i] <= value * 1000) + break; + } + return i; +} + +static int as3638_set_intensity(struct as3638_flash *flash, + enum as3638_led_id led_no) +{ + int rval; + unsigned int reg_val; + unsigned int reg_mask; + + /* + * The HW is configured with an IR LED on the LED1 output and two + * visible light LEDs on LED2 and LED3. + * LED1 is always fired alone. LED2 and LED3 are always fired together. + */ + reg_mask = FLASH_CURRENT_MASK | TORCH_CURRENT_MASK; + switch (led_no) { + case AS3638_LED1: + dev_dbg(flash->dev, "LED1: flash = %d mA, torch = %d mA\n", + flash->flash_intensity[AS3638_LED1]->val, + flash->torch_intensity[AS3638_LED1]->val); + + reg_val = as3638_torch_intensity_to_register_led1( + flash->torch_intensity[AS3638_LED1]->val); + reg_val = reg_val << TORCH_CURRENT_SHIFT; + reg_val = reg_val | AS3638_FLASH_INT_MILLI_A_TO_REG( + flash->flash_intensity[AS3638_LED1]->val); + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET1, + reg_mask, reg_val); + if (rval < 0) + return rval; + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET2, + ALL_BITS_MASK, 0); /* Clear reg */ + if (rval < 0) + return rval; + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET3, + ALL_BITS_MASK, 0); /* Clear reg */ + if (rval < 0) + return rval; + break; + + case AS3638_LED2: + case AS3638_LED3: + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET1, + ALL_BITS_MASK, 0); /* Clear reg */ + if (rval < 0) + return rval; + + dev_dbg(flash->dev, "LED2: flash = %d mA, torch = %d mA\n", + flash->flash_intensity[AS3638_LED2]->val, + flash->torch_intensity[AS3638_LED2]->val); + + reg_val = AS3638_TORCH_INT_MILLI_A_TO_REG( + flash->torch_intensity[AS3638_LED2]->val); + reg_val = reg_val << TORCH_CURRENT_SHIFT; + reg_val = reg_val | AS3638_FLASH_INT_MILLI_A_TO_REG( + flash->flash_intensity[AS3638_LED2]->val); + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET2, + reg_mask, reg_val); + if (rval < 0) + return rval; + + dev_dbg(flash->dev, "LED3: flash = %d mA, torch = %d mA\n", + flash->flash_intensity[AS3638_LED3]->val, + flash->torch_intensity[AS3638_LED3]->val); + + reg_val = AS3638_TORCH_INT_MILLI_A_TO_REG( + flash->torch_intensity[AS3638_LED3]->val); + reg_val = reg_val << TORCH_CURRENT_SHIFT; + reg_val = reg_val | AS3638_FLASH_INT_MILLI_A_TO_REG( + flash->flash_intensity[AS3638_LED3]->val); + + rval = regmap_update_bits(flash->regmap, REG_CURRENT_SET3, + reg_mask, reg_val); + if (rval < 0) + return rval; + break; + + default: + dev_warn(flash->dev, "Invalid led_no %d\n", led_no + 1); + return -EINVAL; + } + return rval; +} + +static int as3638_turn_on_leds(struct as3638_flash *flash, + enum as3638_led_id led_no) +{ + int rval; + + dev_dbg(flash->dev, "turn on LED%d\n", led_no + 1); + + if (led_no == AS3638_LED1 && + flash->led_mode[led_no]->val == V4L2_FLASH_LED_MODE_FLASH) { + /* For safety reasons ensure current reduction is enabled */ + rval = as3638_force_reduction_current(flash); + if (rval < 0) + return rval; + } + + dev_dbg(flash->dev, "%s\n", + as3638_dump_registers(flash) ? " " : " "); + + return regmap_update_bits(flash->regmap, REG_CONTROL, + CONTROL_OUT_ON_MASK, + 1 << CONTROL_OUT_ON_SHIFT); +} + +/* + * This init function must be called everytime the controller is powered on or + * when the driver detects that a different LED is being configured/used than + * what the controller is currently setup for. + */ +static int as3638_init_device(struct as3638_flash *flash, + enum as3638_led_id led_no) +{ + int rval; + unsigned int reg_val; + unsigned int reg_mask; + + dev_dbg(flash->dev, "LED%d\n", led_no + 1); + + rval = as3638_set_intensity(flash, led_no); + if (rval < 0) + return rval; + + /* + * Max coil peak current set to 2.4A to match the HW design. + * Initialize also the strobe mode (SW or HW). + */ + reg_val = CONFIG_COIL_PEAK_2_4_AMP << CONFIG_COIL_PEAK_SHIFT | + flash->strobe_source[led_no]->val << CONFIG_STROBE_ON_SHIFT; + reg_mask = CONFIG_COIL_PEAK_MASK | CONFIG_STROBE_ON_MASK; + rval = regmap_update_bits(flash->regmap, REG_CONFIG, + reg_mask, reg_val); + if (rval < 0) + return rval; + + rval = regmap_update_bits(flash->regmap, REG_TIMER, + TIMER_FLASH_TIMEOUT_MASK, + flash->timeout[led_no]->val); + if (rval < 0) + return rval; + + rval = as3638_mode_ctrl(flash, led_no); + if (rval < 0) + return rval; + + flash->current_led = led_no; + + dev_dbg(flash->dev, "Resetting fault flags\n"); + return regmap_read(flash->regmap, REG_FAULT_AND_INFO, ®_val); +} + +static int as3638_get_ctrl(struct v4l2_ctrl *ctrl) +{ + struct as3638_handler *ctrls_led = ctrl_to_as3638_handler(ctrl); + enum as3638_led_id led_no = ctrls_led->led; + struct as3638_flash *flash = ctrls_led_to_as3638_flash(ctrls_led, + led_no); + unsigned int fault_val; + int rval = -EINVAL; + + dev_dbg(flash->dev, "LED%d: ctrl->id = 0x%X\n", led_no + 1, ctrl->id); + + if (ctrl->id != V4L2_CID_FLASH_FAULT) { + dev_warn(flash->dev, "Invalid control\n"); + return rval; + } + + rval = regmap_read(flash->regmap, REG_FAULT_AND_INFO, &fault_val); + if (rval < 0) { + dev_err(flash->dev, "Register read fail\n"); + return rval; + } + dev_dbg(flash->dev, "fault_and_info = 0x%X\n", fault_val); + + ctrl->val = 0; + if (fault_val & FAULT_UNDER_VOLTAGE_LO_MASK) + ctrl->val |= V4L2_FLASH_FAULT_UNDER_VOLTAGE; + if (fault_val & FAULT_LOW_VOLTAGE_MASK) + ctrl->val |= V4L2_FLASH_FAULT_INPUT_VOLTAGE; + if (fault_val & FAULT_TIMEOUT_MASK) + ctrl->val |= V4L2_FLASH_FAULT_TIMEOUT; + if (fault_val & FAULT_OVERTEMP_MASK) + ctrl->val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; + if (fault_val & FAULT_LED_SHORT_MASK) + ctrl->val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; + if (fault_val & FAULT_LED_OPEN_MASK) + ctrl->val |= V4L2_CID_FLASH_FAULT; + + return rval; +} + +static int as3638_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct as3638_handler *ctrls_led = ctrl_to_as3638_handler(ctrl); + enum as3638_led_id led_no = ctrls_led->led; + struct as3638_flash *flash = ctrls_led_to_as3638_flash(ctrls_led, + led_no); + int rval = 0; + + mutex_lock(&flash->lock); + + if (flash->current_led != led_no) { + rval = as3638_init_device(flash, led_no); + if (rval < 0) { + dev_err(flash->dev, "Init device fail\n"); + goto leave; + } + } + + switch (ctrl->id) { + case V4L2_CID_FLASH_LED_MODE: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_LED_MODE, val = %d\n", + led_no + 1, ctrl->val); + rval = as3638_mode_ctrl(flash, led_no); + break; + + case V4L2_CID_FLASH_STROBE: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_STROBE, val = %d\n", + led_no + 1, ctrl->val); + rval = as3638_turn_on_leds(flash, led_no); + break; + + case V4L2_CID_FLASH_STROBE_SOURCE: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_STROBE_SOURCE, val = %d\n", + led_no + 1, ctrl->val); + rval = regmap_update_bits( + flash->regmap, REG_CONFIG, CONFIG_STROBE_ON_MASK, + flash->strobe_source[led_no]->val << + CONFIG_STROBE_ON_SHIFT); + break; + + case V4L2_CID_FLASH_STROBE_STOP: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_STROBE_STOP, val = %d\n", + led_no + 1, ctrl->val); + rval = as3638_disable_outputs(flash); + break; + + case V4L2_CID_FLASH_TIMEOUT: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_TIMEOUT, val = %d\n", + led_no + 1, ctrl->val); + rval = regmap_update_bits(flash->regmap, REG_TIMER, + TIMER_FLASH_TIMEOUT_MASK, + flash->timeout[led_no]->val); + break; + + case V4L2_CID_FLASH_INTENSITY: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_INTENSITY, val = %d\n", + led_no + 1, ctrl->val); + rval = as3638_disable_outputs(flash); + if (rval < 0) + goto leave; + rval = as3638_set_intensity(flash, led_no); + if (rval < 0) + goto leave; + break; + + case V4L2_CID_FLASH_TORCH_INTENSITY: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_TORCH_INTENSITY, val = %d\n", + led_no + 1, ctrl->val); + rval = as3638_set_intensity(flash, led_no); + if (rval < 0) + goto leave; + break; + + case V4L2_CID_FLASH_FAULT: + dev_dbg(flash->dev, + "LED%d: V4L2_CID_FLASH_FAULT, val = %d\n", + led_no + 1, ctrl->val); + rval = 0; + break; + default: + dev_warn(flash->dev, + "LED%d: Invalid control, id = 0x%X, val = %d\n", + led_no + 1, ctrl->id, ctrl->val); + rval = -EINVAL; + break; + } +leave: + mutex_unlock(&flash->lock); + return rval; +} + +static const struct v4l2_ctrl_ops as3638_led_ctrl_ops = { + .g_volatile_ctrl = as3638_get_ctrl, + .s_ctrl = as3638_set_ctrl, +}; + +static int as3638_init_controls(struct as3638_flash *flash, + enum as3638_led_id led_no) +{ + struct v4l2_ctrl *fault; + u32 max_flash_brt = flash->pdata->flash_max_brightness[led_no]; + u32 max_torch_brt = flash->pdata->torch_max_brightness[led_no]; + struct v4l2_ctrl_handler *hdl = &flash->ctrls_led[led_no].handler; + const struct v4l2_ctrl_ops *ops = &as3638_led_ctrl_ops; + + dev_dbg(flash->dev, "Init controls: LED%d\n", led_no + 1); + + flash->ctrls_led[led_no].led = led_no; + v4l2_ctrl_handler_init(hdl, 8); + + flash->led_mode[led_no] = v4l2_ctrl_new_std_menu( + hdl, ops, V4L2_CID_FLASH_LED_MODE, + V4L2_FLASH_LED_MODE_TORCH, ~0x7, + V4L2_FLASH_LED_MODE_NONE); + + flash->strobe_source[led_no] = v4l2_ctrl_new_std_menu( + hdl, ops, V4L2_CID_FLASH_STROBE_SOURCE, + 0x1, ~0x3, V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); + + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); + + flash->timeout[led_no] = v4l2_ctrl_new_std( + hdl, ops, V4L2_CID_FLASH_TIMEOUT, + AS3638_FLASH_TOUT_MIN, AS3638_FLASH_TOUT_MAX, + AS3638_FLASH_TOUT_STEP, AS3638_FLASH_TOUT_DEF); + + flash->flash_intensity[led_no] = v4l2_ctrl_new_std( + hdl, ops, V4L2_CID_FLASH_INTENSITY, + 0, max_flash_brt, 1, 0); + + flash->torch_intensity[led_no] = v4l2_ctrl_new_std( + hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY, + 0, max_torch_brt, 1, 0); + + fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0, + V4L2_FLASH_FAULT_UNDER_VOLTAGE + | V4L2_FLASH_FAULT_INPUT_VOLTAGE + | V4L2_FLASH_FAULT_TIMEOUT + | V4L2_FLASH_FAULT_OVER_TEMPERATURE + | V4L2_FLASH_FAULT_SHORT_CIRCUIT + | V4L2_CID_FLASH_FAULT, 0, 0); + if (fault) + fault->flags |= V4L2_CTRL_FLAG_VOLATILE; + + if (hdl->error) { + dev_err(flash->dev, "Fail, LED = %d\n", led_no + 1); + return hdl->error; + } + flash->subdev_led[led_no].subdev.ctrl_handler = hdl; + return 0; +} + +static const struct v4l2_subdev_ops as3638_ops = { + .core = NULL, +}; + +static const struct regmap_config as3638_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = REG_HIG_OVERVOLTAGE_PROTECTION, +}; + +static const +struct v4l2_subdev_internal_ops as3638_internal_ops; + +static int as3638_subdev_init(struct as3638_flash *flash, + enum as3638_led_id led_no, char *led_name) +{ + struct i2c_client *client = to_i2c_client(flash->dev); + int rval = -ENODEV; + + dev_dbg(flash->dev, "LED = %d, led_name = %s\n", led_no + 1, led_name); + + v4l2_subdev_init(&flash->subdev_led[led_no].subdev, &as3638_ops); + flash->subdev_led[led_no].subdev.internal_ops = &as3638_internal_ops; + flash->subdev_led[led_no].subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(flash->subdev_led[led_no].subdev.name, + sizeof(flash->subdev_led[led_no].subdev.name), + AS3638_NAME " LED%d %d-%4.4x", + led_no + 1, + i2c_adapter_id(client->adapter), + client->addr); + flash->subdev_led[led_no].led = led_no; + + if (flash->subdev_led[AS3638_LED1].subdev.v4l2_dev) + rval = v4l2_device_register_subdev( + flash->subdev_led[AS3638_LED1].subdev.v4l2_dev, + &flash->subdev_led[led_no].subdev); + if (rval) { + dev_err(flash->dev, "Register subdev fail LED%d\n", led_no + 1); + goto err_out; + } + + rval = as3638_init_controls(flash, led_no); + if (rval) { + dev_err(flash->dev, "Init controls fail LED%d\n", led_no + 1); + goto err_out; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&flash->subdev_led[led_no].subdev.entity, + 0, NULL, 0); +#else + rval = media_entity_pads_init(&flash->subdev_led[led_no].subdev.entity, + 0, NULL); +#endif + if (rval < 0) { + dev_err(flash->dev, "Media init fail LED%d\n", led_no + 1); + goto err_out; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + flash->subdev_led[led_no].subdev.entity.type = + MEDIA_ENT_T_V4L2_SUBDEV_FLASH; +#else + flash->subdev_led[led_no].subdev.entity.function = + MEDIA_ENT_F_FLASH; +#endif +err_out: + return rval; +} + +static int as3638_registered(struct v4l2_subdev *subdev) +{ + struct as3638_subdev *subdev_led = subdev_to_as3638_subdev_led(subdev); + enum as3638_led_id led_no = subdev_led->led; + struct as3638_flash *flash = subdev_led_to_as3638_flash(subdev_led, + led_no); + int rval; + + /* + * Only initialize the additional subdevices for LED2 and LED3 as a + * result of the registration of the subdevice for LED1. The + * registration for LED2 and LED3 will also end up in this function. + */ + if (led_no != AS3638_LED1) + return 0; + + dev_dbg(flash->dev, "register LED%d\n", led_no + 1); + + /* The LED1 subdevice was already initialized during the probe call */ + + rval = as3638_subdev_init(flash, AS3638_LED2, "as3638-led2"); + if (rval < 0) { + dev_err(flash->dev, "Subdev init LED2 fail\n"); + return rval; + } + rval = as3638_subdev_init(flash, AS3638_LED3, "as3638-led3"); + if (rval < 0) { + dev_err(flash->dev, "Subdev init LED3 fail\n"); + return rval; + } + return rval; +} + +static void as3638_unregistered(struct v4l2_subdev *subdev) +{ + struct as3638_subdev *subdev_led = subdev_to_as3638_subdev_led(subdev); + enum as3638_led_id led_no = subdev_led->led; + struct as3638_flash *flash = subdev_led_to_as3638_flash(subdev_led, + led_no); + dev_dbg(flash->dev, "Unregistered\n"); +} + +static int as3638_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh) +{ + struct as3638_subdev *subdev_led = subdev_to_as3638_subdev_led(subdev); + enum as3638_led_id led_no = subdev_led->led; + struct as3638_flash *flash = subdev_led_to_as3638_flash(subdev_led, + led_no); + int rval = 0; + + dev_dbg(flash->dev, "open LED%d\n", led_no + 1); + + pm_runtime_get_sync(flash->dev); + + mutex_lock(&flash->lock); + + if ((led_no == AS3638_LED1 && (flash->open[AS3638_LED2] || + flash->open[AS3638_LED3])) || + (led_no != AS3638_LED1 && (flash->open[AS3638_LED1]))) { + dev_info(flash->dev, + "led 1 and leds 2&3 can't be controlled in parallel"); + rval = -EBUSY; + goto error; + } + + rval = as3638_init_device(flash, led_no); + if (rval < 0) { + dev_err(flash->dev, "Init device fail\n"); + goto error; + } + + flash->open[led_no] = true; + mutex_unlock(&flash->lock); + return rval; +error: + mutex_unlock(&flash->lock); + pm_runtime_put(flash->dev); + return rval; +} + +static int as3638_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh) +{ + struct as3638_subdev *subdev_led = subdev_to_as3638_subdev_led(subdev); + enum as3638_led_id led_no = subdev_led->led; + struct as3638_flash *flash = subdev_led_to_as3638_flash(subdev_led, + led_no); + dev_dbg(flash->dev, "close LED%d\n", led_no + 1); + + mutex_lock(&flash->lock); + flash->open[led_no] = false; + mutex_unlock(&flash->lock); + + pm_runtime_put(flash->dev); + + return 0; +} + +static const struct v4l2_subdev_internal_ops as3638_internal_ops = { + .registered = as3638_registered, + .unregistered = as3638_unregistered, + .open = as3638_open, + .close = as3638_close, +}; + +static int as3638_i2c_subdev_init(struct as3638_flash *flash, + enum as3638_led_id led_no, char *led_name) +{ + struct i2c_client *client = to_i2c_client(flash->dev); + int rval; + + dev_dbg(flash->dev, "subdev init LED = %d, led_name = %s\n", + led_no + 1, led_name); + + v4l2_i2c_subdev_init(&flash->subdev_led[led_no].subdev, + client, &as3638_ops); + flash->subdev_led[led_no].subdev.internal_ops = &as3638_internal_ops; + flash->subdev_led[led_no].subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(flash->subdev_led[led_no].subdev.name, + sizeof(flash->subdev_led[led_no].subdev.name), + AS3638_NAME " LED%d %d-%4.4x", + led_no + 1, + i2c_adapter_id(client->adapter), + client->addr); + flash->subdev_led[led_no].led = led_no; + + rval = as3638_init_controls(flash, AS3638_LED1); + if (rval) { + dev_err(flash->dev, "Init controls fail LED1\n"); + return rval; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&flash->subdev_led[led_no].subdev.entity, + 0, NULL, 0); +#else + rval = media_entity_pads_init(&flash->subdev_led[led_no].subdev.entity, + 0, NULL); +#endif + if (rval < 0) { + dev_err(flash->dev, "Media init fail LED1\n"); + goto err_out; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + flash->subdev_led[led_no].subdev.entity.type = + MEDIA_ENT_T_V4L2_SUBDEV_FLASH; +#else + flash->subdev_led[led_no].subdev.entity.function = + MEDIA_ENT_F_FLASH; +#endif +err_out: + return rval; +} + +static int as3638_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct as3638_flash *flash; + struct as3638_platform_data *pdata = dev_get_platdata(&client->dev); + unsigned int reg_val; + int rval; + + flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); + if (!flash) + return -ENOMEM; + + mutex_init(&flash->lock); + + flash->regmap = devm_regmap_init_i2c(client, &as3638_regmap); + if (IS_ERR(flash->regmap)) + return PTR_ERR(flash->regmap); + + if (!pdata) { + dev_err(flash->dev, "Missing platform data\n"); + return -ENXIO; + } + flash->pdata = pdata; + flash->dev = &client->dev; + + rval = gpio_request(flash->pdata->gpio_reset, "flash reset"); + if (rval < 0) { + dev_err(flash->dev, "Request reset GPIO fail\n"); + goto error; + } + rval = gpio_direction_output(flash->pdata->gpio_reset, 1); + if (rval < 0) { + dev_err(flash->dev, "Setting reset GPIO fail\n"); + goto error; + } + + rval = regmap_read(flash->regmap, REG_DESIGN_INFO, ®_val); + if (rval < 0) { + dev_err(flash->dev, "Device read failed\n"); + goto error; + } + if (reg_val != DESIGN_INFO_FIXED_ID) { + dev_err(flash->dev, + "Wrong ID returned (0x%X). Must be 0x%X\n", + reg_val, DESIGN_INFO_FIXED_ID); + goto error; + } + + rval = regmap_read(flash->regmap, REG_VERSION_CONTROL, ®_val); + if (rval < 0) { + dev_err(flash->dev, "Device read failed\n"); + goto error; + } + dev_dbg(flash->dev, "AS3638 chip version 0x%4X\n", + reg_val & VERSION_MASK); + + rval = as3638_i2c_subdev_init(flash, AS3638_LED1, "as3638-led1"); + if (rval < 0) { + dev_err(flash->dev, "Subdev init LED1 fail\n"); + goto error2; + } + + gpio_set_value(flash->pdata->gpio_reset, 0); + + flash->current_led = AS3638_NO_LED; + pm_runtime_enable(flash->dev); + + dev_dbg(flash->dev, "Success\n"); + return 0; +error2: + v4l2_device_unregister_subdev(&flash->subdev_led[AS3638_LED1].subdev); + media_entity_cleanup(&flash->subdev_led[AS3638_LED1].subdev.entity); +error: + gpio_free(flash->pdata->gpio_reset); + return rval; +} + +static void as3638_subdev_cleanup(struct as3638_flash *flash) +{ + int i; + + dev_dbg(flash->dev, "Clean up\n"); + + for (i = AS3638_LED1; i < AS3638_LED_MAX; i++) { + v4l2_device_unregister_subdev(&flash->subdev_led[i].subdev); + v4l2_ctrl_handler_free(&flash->ctrls_led[i].handler); + media_entity_cleanup(&flash->subdev_led[i].subdev.entity); + } +} + +static int as3638_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct as3638_flash *flash = + container_of(sd, struct as3638_flash, + subdev_led[AS3638_LED1].subdev); + + dev_dbg(flash->dev, "remove\n"); + + as3638_subdev_cleanup(flash); + gpio_free(flash->pdata->gpio_reset); + return 0; +} + +#ifdef CONFIG_PM +static int as3638_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct as3638_subdev *sd_led = subdev_to_as3638_subdev_led(sd); + struct as3638_flash *flash = subdev_led_to_as3638_flash(sd_led, + sd_led->led); + gpio_set_value(flash->pdata->gpio_reset, 0); + return 0; +} + +static int as3638_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct as3638_subdev *sd_led = subdev_to_as3638_subdev_led(sd); + struct as3638_flash *flash = subdev_led_to_as3638_flash(sd_led, + sd_led->led); + int rval; + + gpio_set_value(flash->pdata->gpio_reset, 1); + + if (flash->current_led == AS3638_NO_LED) + return 0; + + rval = v4l2_ctrl_handler_setup( + &flash->ctrls_led[flash->current_led].handler); + + return rval; +} +#else +#define as3638_suspend NULL +#define as3638_resume NULL +#endif + +static const struct i2c_device_id as3638_id_table[] = { + {AS3638_NAME, 0}, + {} +}; + +static const struct dev_pm_ops as3638_pm_ops = { + .suspend = as3638_suspend, + .resume = as3638_resume, + .runtime_suspend = as3638_suspend, + .runtime_resume = as3638_resume, +}; + +MODULE_DEVICE_TABLE(i2c, as3638_id_table); + +static struct i2c_driver as3638_i2c_driver = { + .driver = { + .name = AS3638_NAME, + .pm = &as3638_pm_ops, + }, + .probe = as3638_probe, + .remove = as3638_remove, + .id_table = as3638_id_table, +}; + +module_i2c_driver(as3638_i2c_driver); + +MODULE_AUTHOR("Soren Friis "); +MODULE_DESCRIPTION("AMS AS3638 Triple Flash LED driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/bu64295.c b/drivers/media/i2c/bu64295.c new file mode 100644 index 000000000000..6d1c4c0de566 --- /dev/null +++ b/drivers/media/i2c/bu64295.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include "bu64295.h" + +/* bu64295 device structure */ +struct bu64295_device { + enum bu64295_vcm_mode vcm_mode; + struct i2c_client *client; + struct v4l2_ctrl_handler ctrls_vcm; + struct v4l2_subdev subdev_vcm; +}; + +#define to_bu64295_vcm_via_ctrl(_ctrl) \ + container_of(_ctrl->handler, struct bu64295_device, ctrls_vcm) + +#define to_bu64295_vcm_via_subdev(_subdev) \ + container_of(_subdev, struct bu64295_device, subdev_vcm) + +static int bu64295_i2c_write(struct i2c_client *client, u16 data) +{ + const int num_msg = 1; + int ret; + int retry = 1; + + struct i2c_msg msg = { + .addr = client->addr, + .flags = 0, + .len = sizeof(data), + .buf = (u8 *)&data, + }; + + do { + ret = i2c_transfer(client->adapter, &msg, num_msg); + if (ret == num_msg) + break; + } while (retry--); + + if (ret != num_msg) { + dev_err(&client->dev, "I2C write(0x%4.4x) failed\n", msg.addr); + return -EIO; + } + + return 0; +} + +static int bu64295_init_vcm_params(struct bu64295_device *bu64295_device) +{ + struct i2c_client *client = bu64295_device->client; + int ret; + + ret = bu64295_i2c_write(client, + VCM_VAL(BU64295_PS_OFF, + 0, + BU64295_ISRC_ADDR, + BU64295_ISRC, + BU64295_SR_P8)); + if (ret) + return ret; + + ret = bu64295_i2c_write(client, + VCM_VAL(BU64295_PS_OFF, + 0, + BU64295_RF_ADDR, + BU64295_ISRC, + BU64295_RF_81HZ)); + return ret; +} + +static int bu64295_open(struct v4l2_subdev *subdev_vcm, + struct v4l2_subdev_fh *fh) +{ + struct bu64295_device *bu64295_device = + to_bu64295_vcm_via_subdev(subdev_vcm); + struct i2c_client *client = bu64295_device->client; + int ret; + + ret = bu64295_init_vcm_params(bu64295_device); + if (ret) + dev_err(&client->dev, "bu64295_open failed"); + + return ret; +} + +static int bu64295_close(struct v4l2_subdev *subdev_vcm, + struct v4l2_subdev_fh *fh) +{ + return 0; +} + +static int bu64295_t_focus_vcm(struct bu64295_device *bu64295_dev, u16 val) +{ + struct i2c_client *client = bu64295_dev->client; + int ret = -EINVAL; + + dev_dbg(&client->dev, "Setting new value VCM: %d\n", val); + switch (bu64295_dev->vcm_mode) { + case BU64295_DIRECT: + ret = bu64295_i2c_write(client, + VCM_VAL(BU64295_PS_OFF, + 1, + BU64295_TDAC_ADDR, + BU64295_DIRECT, + val)); + break; + case BU64295_ISRC: + ret = bu64295_i2c_write(client, + VCM_VAL(BU64295_PS_OFF, + 1, + BU64295_TDAC_ADDR, + BU64295_ISRC, + val)); + break; + default: + break; + } + + return ret; +} + +static int bu64295_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct bu64295_device *dev_vcm = to_bu64295_vcm_via_ctrl(ctrl); + + if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) + return bu64295_t_focus_vcm(dev_vcm, ctrl->val); + + return -EINVAL; +} + +static const struct v4l2_ctrl_ops bu64295_vcm_ctrl_ops = { + .s_ctrl = bu64295_set_ctrl, +}; + +static const struct v4l2_subdev_internal_ops bu64295_internal_ops = { + .open = bu64295_open, + .close = bu64295_close, +}; + +static int bu64295_init_controls(struct bu64295_device *dev_vcm) +{ + struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; + const struct v4l2_ctrl_ops *ops = &bu64295_vcm_ctrl_ops; + struct i2c_client *client = dev_vcm->client; + + v4l2_ctrl_handler_init(hdl, 1); + + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE, + 0, BU64295_MAX_FOCUS_POS, 1, 0); + + if (hdl->error) + dev_err(&client->dev, "bu64295_init_controls failed\n"); + dev_vcm->subdev_vcm.ctrl_handler = hdl; + + return hdl->error; +} + +static void bu64295_subdev_cleanup(struct bu64295_device *bu64295_dev) +{ + v4l2_ctrl_handler_free(&bu64295_dev->ctrls_vcm); + v4l2_device_unregister_subdev(&bu64295_dev->subdev_vcm); + media_entity_cleanup(&bu64295_dev->subdev_vcm.entity); +} + +static const struct v4l2_subdev_ops bu64295_ops = { }; + +static int bu64295_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct bu64295_device *bu64295_dev; + int rval; + + bu64295_dev = devm_kzalloc(&client->dev, + sizeof(*bu64295_dev), GFP_KERNEL); + if (bu64295_dev == NULL) + return -ENOMEM; + + i2c_set_clientdata(client, bu64295_dev); + bu64295_dev->client = client; + + rval = bu64295_init_vcm_params(bu64295_dev); + if (rval) { + dev_err(&client->dev, "bu64295 init failed\n"); + return -ENODEV; + } + + v4l2_i2c_subdev_init(&bu64295_dev->subdev_vcm, client, &bu64295_ops); + bu64295_dev->subdev_vcm.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + bu64295_dev->subdev_vcm.internal_ops = &bu64295_internal_ops; + snprintf(bu64295_dev->subdev_vcm.name, + sizeof(bu64295_dev->subdev_vcm.name), + BU64295_NAME " %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); + + rval = bu64295_init_controls(bu64295_dev); + if (rval) + goto err_cleanup; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&bu64295_dev->subdev_vcm.entity, 0, NULL, 0); +#else + rval = media_entity_pads_init(&bu64295_dev->subdev_vcm.entity, 0, + NULL); +#endif + if (rval) + goto err_cleanup; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + bu64295_dev->subdev_vcm.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_LENS; +#else + bu64295_dev->subdev_vcm.entity.function = MEDIA_ENT_F_LENS; +#endif + bu64295_dev->vcm_mode = BU64295_DIRECT; + + return 0; + +err_cleanup: + bu64295_subdev_cleanup(bu64295_dev); + dev_err(&client->dev, "Probe failed: %d\n", rval); + return rval; +} + +static int bu64295_remove(struct i2c_client *client) +{ + struct bu64295_device *bu64295_dev = i2c_get_clientdata(client); + + bu64295_subdev_cleanup(bu64295_dev); + return 0; +} + +static const struct i2c_device_id bu64295_id_table[] = { + { BU64295_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, bu64295_id_table); + +static struct i2c_driver bu64295_i2c_driver = { + .driver = { + .name = BU64295_NAME, + }, + .probe = bu64295_probe, + .remove = bu64295_remove, + .id_table = bu64295_id_table, +}; + +module_i2c_driver(bu64295_i2c_driver); + +MODULE_AUTHOR("Kamal Ramamoorthy "); +MODULE_DESCRIPTION("BU64295 VCM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/bu64295.h b/drivers/media/i2c/bu64295.h new file mode 100644 index 000000000000..36b873d2adde --- /dev/null +++ b/drivers/media/i2c/bu64295.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ +#ifndef __BU64295_H__ +#define __BU64295_H__ + +#include + +#define BU64295_VCM_ADDR 0x0c +#define BU64295_NAME "bu64295" + +enum bu64295_vcm_mode { + BU64295_DIRECT = 0x0, /* direct control */ + BU64295_ISRC = 0x1, /* intelligent slew rate control */ +}; + +#define BU64295_MAX_FOCUS_POS 1023 + +#define BU64295_TDAC_ADDR 0x0 +#define BU64295_ISRC_ADDR 0x1 +#define BU64295_RF_ADDR 0x2 +#define BU64295_ST_ADDR 0x3 +#define BU64295_SR_ADDR 0x4 +#define BU64295_T1_ADDR 0x5 +#define BU64295_T2_ADDR 0x6 + +#define BU64295_PS_ON 0x0 +#define BU64295_PS_OFF 0x1 + +#define BU64295_SR_P8 0x1 +#define BU64295_RF_81HZ 0x7F + +#define VCM_VAL(ps, en, addr, mode, data) (u16)((ps << 15) | \ + (en << 14) | (addr << 11) | (mode << 10) | (data & 0x3FF)) + +#endif diff --git a/drivers/media/i2c/crlmodule-lite/Kconfig b/drivers/media/i2c/crlmodule-lite/Kconfig new file mode 100644 index 000000000000..5f6b506ae749 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/Kconfig @@ -0,0 +1,7 @@ +config VIDEO_CRLMODULE_LITE + tristate "CRL Module sensor support for ICI driver" + depends on I2C + depends on VIDEO_INTEL_ICI + depends on !VIDEO_CRLMODULE + ---help--- + This is a generic driver for CRL based camera modules. diff --git a/drivers/media/i2c/crlmodule-lite/Makefile b/drivers/media/i2c/crlmodule-lite/Makefile new file mode 100644 index 000000000000..de5f5e4d3ccb --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + +crlmodule-lite-objs += crlmodule-core.o crlmodule-data.o \ + crlmodule-regs.o crlmodule-nvm.o \ + crl_adv7481_hdmi_configuration.o \ + crlmodule-msrlist.o +obj-$(CONFIG_VIDEO_CRLMODULE_LITE) += crlmodule-lite.o + +ccflags-y += -Idrivers/media/i2c diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_configuration.h new file mode 100644 index 000000000000..130dc91e4942 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_configuration.h @@ -0,0 +1,707 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_powerup_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* SW reset */ + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, /* Delay 5ms */ + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI recommended setting */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* I2C Rd Auto-Increment=1 */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Addr */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Addr */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address Set */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address Set */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address */ + {0x00, CRL_REG_LEN_08BIT, 0x50, 0xE0}, /* Disable Chip Powerdown & + HDMI Rx Block */ + {0x40, CRL_REG_LEN_08BIT, 0x83, 0x64}, /* Enable HDCP 1.1 */ + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x3E, CRL_REG_LEN_08BIT, 0x69, 0x68}, /* ADI recommended setting */ + {0x3F, CRL_REG_LEN_08BIT, 0x46, 0x68}, /* ADI recommended setting */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI recommended setting */ + {0x4F, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI recommended setting */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI recommended setting */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminations */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI recommended setting */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed = + Fastest Smallest Step Size */ + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ + + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* No MIPI frame start */ + {0x26, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable sleep mode */ + {0x27, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable escape mode */ + {0x7E, CRL_REG_LEN_08BIT, 0xA0, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, /* ADI recommended setting */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI recommended setting */ + {0x34, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* ADI recommended setting */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI recommended setting */ + {0xCA, CRL_REG_LEN_08BIT, 0x02, 0x94}, /* ADI recommended setting */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI recommended setting */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Power up DPHY */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, /* Select Resolution 1080P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080P shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080P shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_720p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, /* Select Resolution 720P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 720P shift left 40 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 720P shift left 40 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_VGA[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, /* Select Resolution VGA */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, /* Select Resolution 1080i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080i shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080i shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_480i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Select Resolution 480i */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, /* Select Resolution 576p*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, /* Select Resolution 576i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_sensor_detect_config adv7481_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static const s64 adv7481_op_sys_clock[] = {400000000, }; + +static struct crl_pll_configuration adv7481_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + +}; + +static struct crl_subdev_rect_rep adv7481_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080p_rects), + .sd_rects = adv7481_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .mode_regs = adv7481_mode_1080p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_720p_rects), + .sd_rects = adv7481_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_720p), + .mode_regs = adv7481_mode_720p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_VGA_rects), + .sd_rects = adv7481_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_VGA), + .mode_regs = adv7481_mode_VGA, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080i_rects), + .sd_rects = adv7481_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080i), + .mode_regs = adv7481_mode_1080i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_480i_rects), + .sd_rects = adv7481_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_480i), + .mode_regs = adv7481_mode_480i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576p_rects), + .sd_rects = adv7481_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_576p), + .mode_regs = adv7481_mode_576p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576i_rects), + .sd_rects = adv7481_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = adv7481_mode_576i, + }, +}; + +static struct crl_sensor_subdev_config adv7481_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .regs = adv7481_mode_1080p, /* default yuv422 format */ + }, +}; + +static struct crl_ctrl_data adv7481_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_pll_configurations) - 1, + .data.int_menu.menu = adv7481_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity adv7481_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +static struct crl_sensor_configuration adv7481_crl_configuration = { + + .power_items = ARRAY_SIZE(adv7481_power_items), + .power_entities = adv7481_power_items, + + .powerup_regs_items = ARRAY_SIZE(adv7481_powerup_regset), + .powerup_regs = adv7481_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .poweroff_regs = adv7481_streamoff_regs, + + .id_reg_items = ARRAY_SIZE(adv7481_sensor_detect_regset), + .id_regs = adv7481_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(adv7481_sensor_subdevs), + .subdevs = adv7481_sensor_subdevs, + + .sensor_limits = &adv7481_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_pll_configurations), + .pll_configs = adv7481_pll_configurations, + .op_sys_clk = adv7481_op_sys_clock, + + .modes_items = ARRAY_SIZE(adv7481_modes), + .modes = adv7481_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_streamon_regs), + .streamon_regs = adv7481_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .streamoff_regs = adv7481_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(adv7481_ctrls), + .ctrl_bank = adv7481_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_crl_csi_data_fmt), + .csi_fmts = adv7481_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_cvbs_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_cvbs_configuration.h new file mode 100644 index 000000000000..7e00727496a3 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_cvbs_configuration.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_cvbs_powerup_regset[] = { + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, //LLC/PIX/AUD/SPI PINS TRISTATED + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0xF2}, //Exit Power Down Mode + {0x52, CRL_REG_LEN_08BIT, 0xCD, 0xF2}, //ADI Required Write + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xF2}, //INSEL = CVBS in on Ain 1 + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, //ADI Required Write + {0x9C, CRL_REG_LEN_08BIT, 0x00, 0xF2}, //ADI Required Write + {0x9C, CRL_REG_LEN_08BIT, 0xFF, 0xF2}, //ADI Required Write + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, //ADI Required Write + {0x80, CRL_REG_LEN_08BIT, 0x51, 0xF2}, //ADI Required Write + {0x81, CRL_REG_LEN_08BIT, 0x51, 0xF2}, //ADI Required Write + {0x82, CRL_REG_LEN_08BIT, 0x68, 0xF2}, //ADI Required Write + {0x03, CRL_REG_LEN_08BIT, 0x42, 0xF2}, //Tri-S Output Drivers, PwrDwn 656 pads + {0x04, CRL_REG_LEN_08BIT, 0x07, 0xF2}, //Power-up INTRQ pad, & Enable SFL + {0x13, CRL_REG_LEN_08BIT, 0x00, 0xF2}, //ADI Required Write + {0x17, CRL_REG_LEN_08BIT, 0x41, 0xF2}, //Select SH1 + {0x31, CRL_REG_LEN_08BIT, 0x12, 0xF2}, //ADI Required Write + {0x10, CRL_REG_LEN_08BIT, 0xC0, 0xE0}, //Enable 1-Lane MIPI Tx, enable pixel output and route SD through Pixel port + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, //Enable 1-lane MIPI + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x90}, //Set Auto DPHY Timing + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, //ADI Required Write + {0xD2, CRL_REG_LEN_08BIT, 0x40, 0x90}, //ADI Required Write + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x90}, //ADI Required Write + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x90}, //ADI Required Write + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x90}, //ADI Required Write + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, //i2c_dphy_pwdn - 1'b0 + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, //ADI Required Write + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x90}, //ADI Required Write +}; + + +static struct crl_register_write_rep adv7481_cvbs_streamon_regs[] = { + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x90}, //ADI Required Write + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, //i2c_mipi_pll_en - 1'b1 + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x90}, //Power-up CSI-TX 21 + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x90}, //ADI Required Write +}; + +static struct crl_register_write_rep adv7481_cvbs_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x90}, +}; + +static const s64 adv7481_cvbs_op_sys_clock[] = {130000000}; + +static struct crl_pll_configuration adv7481_cvbs_pll_configurations[] = { + { + .input_clk = 286363636, + .op_sys_clk = 216000000, + .bitsperpixel = 16, + .pixel_rate_csi = 130000000, + .pixel_rate_pa = 130000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 130000000, + .bitsperpixel = 16, + .pixel_rate_csi = 130000000, + .pixel_rate_pa = 130000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + +}; + +static struct crl_subdev_rect_rep adv7481_cvbs_ntsc_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_mode_rep adv7481_cvbs_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_cvbs_ntsc_rects), + .sd_rects = adv7481_cvbs_ntsc_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, +}; + +static struct crl_sensor_subdev_config adv7481_cvbs_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 cvbs binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 cvbs pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_cvbs_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 720, + .y_addr_max = 240, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_cvbs_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, +}; + +static struct crl_ctrl_data adv7481_cvbs_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_cvbs_pll_configurations) - 1, + .data.int_menu.menu = adv7481_cvbs_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +static struct crl_sensor_configuration adv7481_cvbs_crl_configuration = { + .sensor_init = NULL, + .sensor_cleanup = NULL, + + .onetime_init_regs_items = 0, //one time initialization is done by HDMI part + .onetime_init_regs = NULL, + + .powerup_regs_items = ARRAY_SIZE(adv7481_cvbs_powerup_regset), + .powerup_regs = adv7481_cvbs_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .poweroff_regs = adv7481_cvbs_streamoff_regs, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481_cvbs_sensor_subdevs), + .subdevs = adv7481_cvbs_sensor_subdevs, + + .sensor_limits = &adv7481_cvbs_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_cvbs_pll_configurations), + .pll_configs = adv7481_cvbs_pll_configurations, + .op_sys_clk = adv7481_cvbs_op_sys_clock, + + .modes_items = ARRAY_SIZE(adv7481_cvbs_modes), + .modes = adv7481_cvbs_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_cvbs_streamon_regs), + .streamon_regs = adv7481_cvbs_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .streamoff_regs = adv7481_cvbs_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(adv7481_cvbs_ctrls), + .ctrl_bank = adv7481_cvbs_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_cvbs_crl_csi_data_fmt), + .csi_fmts = adv7481_cvbs_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_7BIT, +}; + +#endif /* __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_eval_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_eval_configuration.h new file mode 100644 index 000000000000..a0cd825afb4a --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_eval_configuration.h @@ -0,0 +1,535 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static const s64 adv7481_eval_op_sys_clock[] = {400000000, }; + +struct crl_ctrl_data_pair ctrl_data_lanes[] = { + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_MIPI_LANES, + .data = 4, + }, + { + .ctrl_id = ICI_EXT_SD_PARAM_ID_MIPI_LANES, + .data = 2, + }, +}; +static struct crl_pll_configuration adv7481_eval_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 24, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_eval_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080p_rects), + .sd_rects = adv7481_eval_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_720p_rects), + .sd_rects = adv7481_eval_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_VGA_rects), + .sd_rects = adv7481_eval_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080i_rects), + .sd_rects = adv7481_eval_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_480i_rects), + .sd_rects = adv7481_eval_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576p_rects), + .sd_rects = adv7481_eval_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576i_rects), + .sd_rects = adv7481_eval_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config adv7481_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_subdev_config adv7481b_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481b binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481b pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_eval_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_eval_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = ICI_FORMAT_RGB565, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = ICI_FORMAT_RGB888, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = 0, + .regs = NULL, + }, +}; + +static struct crl_ctrl_data adv7481_eval_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_eval_pll_configurations) - 1, + .data.int_menu.menu = adv7481_eval_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_MIPI_LANES, + .name = "CTRL_ID_MIPI_LANES", + .type = CRL_CTRL_TYPE_CUSTOM, + .data.std_data.min = 2, + .data.std_data.max = 4, + .data.std_data.step = 2, + .data.std_data.def = 4, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .param.type = ICI_EXT_SD_PARAM_TYPE_INT32, + }, +}; + +static struct crl_sensor_configuration adv7481_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = 0, + .subdevs = adv7481_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + .op_sys_clk = adv7481_eval_op_sys_clock, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .ctrl_items = ARRAY_SIZE(adv7481_eval_ctrls), + .ctrl_bank = adv7481_eval_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +static struct crl_sensor_configuration adv7481b_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = 0, + .subdevs = adv7481b_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + .op_sys_clk = adv7481_eval_op_sys_clock, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .ctrl_items = ARRAY_SIZE(adv7481_eval_ctrls), + .ctrl_bank = adv7481_eval_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.c b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.c new file mode 100644 index 000000000000..d38b9d29c8c2 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include "crlmodule.h" +#include "crlmodule-regs.h" + +#define CREATE_ATTRIBUTE(attr) \ + if (device_create_file(&client->dev, &attr) != 0) { \ + dev_err(&client->dev, "ADV7481 couldn't register %s sysfs entry.\n", \ + #attr); \ + } \ + +#define REMOVE_ATTRIBUTE(attr) \ + device_remove_file(&client->dev, &attr); + +/* Size of the mondello KSV buffer in bytes */ +#define ADV7481_KSV_BUFFER_SIZE 0x80 +/* Size of a single KSV */ +#define ADV7481_KSV_SIZE 0x05 +/* Max number of devices (MAX_MONDELO_KSV_SIZE / HDCP_KSV_SIZE */ +#define ADV7481_MAX_DEVICES 0x19 + +#define ADV7481_AKSV_UPDATE_A_ST 0x08 +#define ADV7481_CABLE_DET_A_ST 0x40 +#define ADV7481_V_LOCKED_A_ST 0x02 +#define ADV7481_DE_REGEN_A_ST 0x01 + +#define ADV7481_GPIO 456 + +/* + * Prevents executing another hot plug reset until current one will finish + */ +static unsigned int in_hot_plug_reset = 0; + +/* + * When hot plug reset is executed, HPA bit is deasserted for 2 seconds. + * This timer is used to assert HPA bit again after that time without blocking. + */ +static struct timer_list hot_plug_reset_timer; + +static struct workqueue_struct *irq_workqueue = NULL; +static int hdmi_res_width; +static int hdmi_res_height; +static int hdmi_res_interlaced; + +static DEFINE_MUTEX(hot_plug_reset_lock); + +typedef struct { + struct work_struct work; + struct i2c_client *client; +} irq_task_t; + +/* ADV7481 HDCP B-status register */ +struct adv7481_bstatus { + union { + __u8 bstatus[2]; + struct { + __u8 device_count:7; + __u8 max_devs_exceeded:1; + __u8 depth:3; + __u8 max_cascade_exceeded:1; + __u8 hdmi_mode:1; + __u8 hdmi_reserved_2:1; + __u8 rsvd:2; + }; + }; +}; + +struct adv7481_dev_info { + struct adv7481_bstatus bstatus; + __u8 ksv[ADV7481_KSV_BUFFER_SIZE]; +}; + +struct adv7481_bcaps { + union { + __u8 bcaps; + struct { + __u8 fast_reauth:1; + __u8 features:1; + __u8 reserved:2; + __u8 fast:1; + __u8 ksv_fifo_ready:1; + __u8 repeater:1; + __u8 hdmi_reserved:1; + }; + }; +}; + +static int adv_i2c_write(struct i2c_client *client, u16 i2c_addr, u16 reg, u8 val) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + return crlmodule_write_reg(sensor, i2c_addr, reg, 1, 0xFF, val); +} + +static int adv_i2c_read(struct i2c_client *client, u16 i2c_addr, u16 reg, u32 *val) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_register_read_rep read_reg; + + read_reg.address = reg; + read_reg.len = CRL_REG_LEN_08BIT; + read_reg.dev_i2c_addr = i2c_addr; + return crlmodule_read_reg(sensor, read_reg, val); +} + +/* + * Writes the HDCP BKSV list & status when the system acts + * as an HDCP 1.4 repeater + */ +static long adv_write_bksv(struct i2c_client *client, + struct adv7481_dev_info *dev_info) +{ + unsigned int k = 0; + int ret = 0; + u32 reg; + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + dev_dbg(&client->dev, "%s: Writing ADV7481 BKSV list.\n", __func__); + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret) { + dev_err(&client->dev, "%s: Error clearing BCAPS KSV list ready!\n", __func__); + return ret; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret) { + dev_err(&client->dev, "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", __func__); + return ret; + } + + /* Write the BSKV list, one device at a time */ + /* Writing the entire list in one call exceeds frame size */ + for (k = 0; k < ADV7481_MAX_DEVICES; ++k) { + unsigned int j = k * ADV7481_KSV_SIZE; + struct crl_register_write_rep adv_ksv_cmd[] = { + {0x80 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 0], 0x64}, + {0x81 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 1], 0x64}, + {0x82 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 2], 0x64}, + {0x83 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 3], 0x64}, + {0x84 + j, CRL_REG_LEN_08BIT, dev_info->ksv[j + 4], 0x64}, + }; + ret = crlmodule_write_regs(sensor, adv_ksv_cmd, ARRAY_SIZE(adv_ksv_cmd)); + + if (ret) { + dev_err(&client->dev, "%s: Error while writing BKSV list!\n", __func__); + return ret; + } + } + + /* Finally update the bstatus registers */ + ret = adv_i2c_read(client, 0x64, 0x42, ®); + + if (ret) { + dev_err(&client->dev, "%s: Error reading bstatus register!\n", __func__); + return ret; + } + + /* ADV recommendation: only update bits [0:11] */ + /* Take the lower nibble (bits [11:8]) of the input bstatus */ + /* Take the upper nibble (bits [15:12]) of the current register */ + dev_info->bstatus.bstatus[1] = + (dev_info->bstatus.bstatus[1] & 0x0F) | (reg & 0xF0); + { + struct crl_register_write_rep adv_cmd[] = { + {0x41, CRL_REG_LEN_08BIT, dev_info->bstatus.bstatus[0], 0x64}, + {0x42, CRL_REG_LEN_08BIT, dev_info->bstatus.bstatus[1], 0x64}, + {0x69, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* KSV_LIST_READY_PORT_A */ + }; + + ret = crlmodule_write_regs(sensor, adv_cmd, ARRAY_SIZE(adv_cmd)); + } + + return ret; +} + +static ssize_t adv_bcaps_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 val; + int ret; + struct i2c_client* client = container_of(dev, struct i2c_client, dev); + + ret = adv_i2c_read(client, 0x64, 0x40, &val); + + if (ret != 0) { + return -EIO; + } + + val = val & 0xFF; + *buf = val; + return 1; +} + +/* Declares bcaps attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bcaps, S_IRUGO, adv_bcaps_show, NULL); + +/* + * Writes provided BKSV value from user space to chip. + * BKSV should be formatted as adv7481_dev_info struct, + * it does basic validation and checks if provided buffer size matches size of adv7481_dev_info struct. + * In case of error return EIO. + */ +static ssize_t adv_bksv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + struct adv7481_dev_info dev_info; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "%s\n", __func__); + if (count != sizeof (struct adv7481_dev_info)) { + return -EIO; + } + + dev_info = *((struct adv7481_dev_info*) buf); + + ret = adv_write_bksv(client, &dev_info); + + if (ret != 0) { + return -EIO; + } + + return count; +} + +/* Declares bksv attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bksv, S_IWUSR | S_IWGRP, NULL, adv_bksv_store); + +/* + * Enables HPA_MAN_VALUE_PORT_A to enable hot plug detection. + */ +static void adv_hpa_assert(struct work_struct *work) +{ + irq_task_t *task = (irq_task_t*) work; + struct i2c_client *client = task->client; + + adv_i2c_write(client, 0x68, 0xF8, 0x01); + in_hot_plug_reset = 0; + kfree(work); +} + +/* + * Handles hpa timer interrupt, defers enalbing of HPA to adv_hpa_assert + */ +static void adv_hpa_reset_callback(unsigned long data) +{ + irq_task_t *task = NULL; + + task = (irq_task_t*) kmalloc(sizeof(irq_task_t), GFP_ATOMIC); + if (task) { + INIT_WORK( (struct work_struct*) task, adv_hpa_assert); + task->client = (struct i2c_client*) data; + queue_work(irq_workqueue, (struct work_struct*)task); + } +} + +/* + * Reauthenticates HDCP by disabling hot plug detection for 2 seconds. + * It can be triggered by user space by writing any value to "reauthenticate" attribute. + * After that time connected source will automatically ask for HDCP authentication once again. + * To prevent sleep, timer is used to delay enabling of hot plug by 2 seconds. + * In case that previous reauthentication is not completed, returns EBUSY. + * In case of error returns EIO. + */ +static ssize_t adv_reauthenticate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "%s\n", __func__); + + mutex_lock(&hot_plug_reset_lock); + + if (in_hot_plug_reset) { + mutex_unlock(&hot_plug_reset_lock); + return -EBUSY; + } + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret != 0) { + dev_err(&client->dev, "%s: Error clearing BCAPS KSV list ready!\n", __func__); + mutex_unlock(&hot_plug_reset_lock); + return -EIO; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret != 0) { + dev_err(&client->dev, "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", __func__); + mutex_unlock(&hot_plug_reset_lock); + return -EIO; + } + + ret = adv_i2c_write(client, 0x68, 0xF8, 0x00); + + if (ret != 0) { + mutex_unlock(&hot_plug_reset_lock); + return -EIO; + } + + in_hot_plug_reset = 1; + mod_timer(&hot_plug_reset_timer, jiffies + msecs_to_jiffies(2000)); + + mutex_unlock(&hot_plug_reset_lock); + return count; +} + +/* Declares reauthenticate attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(reauthenticate, S_IWUSR | S_IWGRP, NULL, adv_reauthenticate_store); + +/* Dummy show to prevent WARN when registering aksv attribute */ +static ssize_t adv_aksv_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + (void) dev; + (void) attr; + (void) buf; + + return -EIO; +} + +/* Declares aksv attribute that will be exposed to user space via sysfs, to notify about AKSV events */ +static DEVICE_ATTR(aksv, S_IRUGO, adv_aksv_show, NULL); + + +static ssize_t adv_hdmi_cable_connected_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + char interlaced = 'p'; + if (hdmi_res_interlaced) { + interlaced = 'i'; + } + + return snprintf(buf, 20, "%dx%d%c", hdmi_res_width, hdmi_res_height, interlaced); +} +static DEVICE_ATTR(hdmi_cable_connected, S_IRUGO, adv_hdmi_cable_connected_show, NULL); + +static ssize_t adv_bstatus_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 b0, b1; + int ret; + struct i2c_client* client = container_of(dev, struct i2c_client, dev); + dev_dbg(&client->dev, "Getting bstatus\n"); + ret = adv_i2c_read(client, 0x64, 0x41, &b0); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(0)\n"); + return -EIO; + } + dev_dbg(&client->dev, "btatus(0): 0x%x\n", b0 & 0xff); + ret = adv_i2c_read(client, 0x64, 0x42, &b1); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(1)\n"); + return -EIO; + } + dev_dbg(&client->dev, "bstatus(1): 0x%x\n", b1 & 0xff); + *buf = b0 & 0xff; + buf++; + *buf = b1 & 0xff; + return 2; +} +static DEVICE_ATTR(bstatus, S_IRUGO, adv_bstatus_show, NULL); + +// irq GPIO ping unavailable on ACRN UOS +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) +static void adv_isr_bh(struct work_struct *work) +{ + irq_task_t *task = (irq_task_t*) work; + struct i2c_client *client = task->client; + + u32 interrupt_st; + u32 raw_value; + u32 temp[3]; + int ret = 0; + + struct crl_register_read_rep reg; + reg.address = 0x90; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xFF; + reg.dev_i2c_addr = 0xE0; + + dev_dbg(&client->dev, "%s\n", __func__); + + /* AKSV_UPDATE_A_ST: check interrupt status */ + ret = adv_i2c_read(client, 0xE0, 0x90, &interrupt_st); + + if (interrupt_st & 0x08 /*ADV7481_AKSV_UPDATE_A_ST*/) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: AKSV_UPDATE_A_ST: 0x%x\n", + __func__, interrupt_st); + + /* Notify user space about AKSV event */ + sysfs_notify(&client->dev.kobj, NULL, "aksv"); + + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x91, 0x08); + } + + /* Check interrupt status for: CABLE_DET_A_ST, V_LOCKED_A_ST and DE_REGEN_LCK_A_ST */ + ret = adv_i2c_read(client, 0xE0, 0x72, &interrupt_st); + + /* If any of CABLE_DET_A_ST, V_LOCKED_A_ST and DE_REGEN_LCK_A_ST interrupts was set, + * get updated values of CABLE_DET_RAW, V_LOCKED_RAW and DE_REGEN_LCK_RAW + */ + if (interrupt_st) { + ret = adv_i2c_read(client, 0xE0, 0x71, &raw_value); + } + + /* Check CABLE_DET_A_ST interrupt */ + if ((interrupt_st & ADV7481_CABLE_DET_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x40); + + /* HDMI cable is connected */ + if (raw_value & ADV7481_CABLE_DET_A_ST) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: HDMI cable connected\n", __func__); + ret = adv_i2c_write(client, 0xE0, 0x10, 0xA1); + } + else { + dev_dbg(&client->dev, "%s: ADV7481 ISR: HDMI cable disconnected\n", __func__); + } + } + + /* Check V_LOCKED_A_ST interrupt */ + if((interrupt_st & ADV7481_V_LOCKED_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x02); + /* Vertical sync filter has been locked, resolution height can be read */ + if (raw_value & ADV7481_V_LOCKED_A_ST) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: Vertical Sync Filter Locked\n", __func__); + reg.dev_i2c_addr = 0x68; //HDMI_RX_MAP; + reg.address = 0x09; + adv_i2c_read(client, 0x68, 0x09, &temp[0]); + adv_i2c_read(client, 0x68, 0x0A, &temp[1]); + adv_i2c_read(client, 0x68, 0x0B, &temp[2]); + + temp[0] = temp[0] & 0x1F; + hdmi_res_height = (temp[0]<<8) + temp[1]; + if (temp[2] & 0x20) { + hdmi_res_height = hdmi_res_height << 1; + hdmi_res_interlaced = 1; + } + else { + hdmi_res_interlaced = 0; + } + + /* If resolution width was already read, notify user space about new resolution */ + if (hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + else { + dev_dbg(&client->dev, "%s: ADV7481 ISR: Vertical Sync Filter Lost\n", __func__); + hdmi_res_height = 0; + /* Notify user space about losing resolution */ + if (!hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + } + + /* Check DE_REGEN_A_ST interrupt */ + if((interrupt_st & ADV7481_DE_REGEN_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x01); + + /* DE regeneration has been locked, resolution height can be read */ + if (raw_value & ADV7481_DE_REGEN_A_ST) { + dev_dbg(&client->dev, "%s: ADV7481 ISR: DE Regeneration Locked\n", __func__); + reg.dev_i2c_addr = 0x68; //HDMI_RX_MAP; + reg.address = 0x07; + adv_i2c_read(client, 0x68, 0x07, &temp[0]); + adv_i2c_read(client, 0x68, 0x08, &temp[1]); + + temp[0] = temp[0] & 0x1F; + hdmi_res_width = (temp[0]<<8) + temp[1]; + + /* If resolution height was already read back, notify user space about new resolution */ + if (hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + else { + dev_dbg(&client->dev, "%s: ADV7481 ISR: DE Regeneration Lost\n", __func__); + hdmi_res_width = 0; + /* Notfiy user space about losing resolution */ + if (!hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, "hdmi_cable_connected"); + } + } + } +} + +static irq_handler_t adv7481_irq_handler(unsigned int irq, void *dev_id, + struct pt_regs *regs) +{ + irq_task_t *task = NULL; + struct i2c_client *client = (struct i2c_client*)dev_id; + + dev_dbg(&client->dev, "%s: Interrupt in ADV7481\n", __func__); + + task = (irq_task_t*) kmalloc(sizeof(irq_task_t), GFP_ATOMIC); + if (task) { + INIT_WORK( (struct work_struct*) task, adv_isr_bh); + task->client = client; + queue_work(irq_workqueue, (struct work_struct*)task); + } + + return (irq_handler_t)IRQ_HANDLED; +} + +static int unregister_gpio_irq(void) +{ + gpio_free(ADV7481_GPIO); + return 0; +} + +static int register_gpio_irq(struct i2c_client *client) +{ + int res = 0; + unsigned int irq; + + + if (!gpio_is_valid(ADV7481_GPIO)) { + dev_err(&client->dev, "%s: ADV7481 GPIO pin %d is invalid!\n", + __func__, ADV7481_GPIO); + return -ENODEV; + } else { + dev_dbg(&client->dev, "%s: GPIO %d is valid.\n", __func__, ADV7481_GPIO); + } + + res = gpio_request(ADV7481_GPIO, "ADV7481 Interrupt"); + if (res) { + dev_err(&client->dev, "%s: ADV7481 GPIO pin request failed!\n", __func__); + return -ENODEV; + } + + gpio_direction_input(ADV7481_GPIO); + irq = gpio_to_irq(ADV7481_GPIO); + res = request_irq(irq, + (irq_handler_t)adv7481_irq_handler, + IRQF_TRIGGER_RISING, + "adv7481_irq_handler", + client); + + dev_dbg(&client->dev, "%s: GPIO register GPIO IRQ result: %d\n", __func__, res); + + return res; +} +#endif + +int adv7481_sensor_init(struct i2c_client *client) +{ + dev_dbg(&client->dev, "%s ADV7481_sensor_init\n", __func__); + irq_workqueue = create_workqueue("adv7481_irq_workqueue"); +// irq GPIO ping unavailable on ACRN UOS +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) + register_gpio_irq(client); +#endif + setup_timer(&hot_plug_reset_timer, adv_hpa_reset_callback, (unsigned long) client); + + CREATE_ATTRIBUTE(dev_attr_hdmi_cable_connected); + CREATE_ATTRIBUTE(dev_attr_bcaps); + CREATE_ATTRIBUTE(dev_attr_aksv); + CREATE_ATTRIBUTE(dev_attr_bksv); + CREATE_ATTRIBUTE(dev_attr_reauthenticate); + CREATE_ATTRIBUTE(dev_attr_bstatus); + + return 0; +} + +int adv7481_sensor_cleanup(struct i2c_client *client) +{ + dev_dbg(&client->dev, "%s: ADV7481_sensor_cleanup\n", __func__); + if (irq_workqueue != NULL) { + free_irq(gpio_to_irq(ADV7481_GPIO), client); +// irq GPIO ping unavailable on ACRN UOS +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) + unregister_gpio_irq(); +#endif + del_timer(&hot_plug_reset_timer); + flush_workqueue(irq_workqueue); + destroy_workqueue(irq_workqueue); + irq_workqueue = NULL; + } + REMOVE_ATTRIBUTE(dev_attr_bstatus); + REMOVE_ATTRIBUTE(dev_attr_reauthenticate); + REMOVE_ATTRIBUTE(dev_attr_bksv); + REMOVE_ATTRIBUTE(dev_attr_aksv); + REMOVE_ATTRIBUTE(dev_attr_bcaps); + REMOVE_ATTRIBUTE(dev_attr_hdmi_cable_connected); + return 0; +} diff --git a/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.h b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.h new file mode 100644 index 000000000000..599ed55a8d35 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_adv7481_hdmi_configuration.h @@ -0,0 +1,686 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_hdmi_onetime_init_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, //ADI Required Write + {0x05, CRL_REG_LEN_08BIT, 0x96, 0xE0}, //Setting Vid_Std to 1600x1200(UXGA)@60 + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, //Enable I2C Read Auto-Increment + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, //DPLL Map Address Set to 0x4C + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, //CP Map Address Set to 0x44 + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, //HDMI RX Map Address Set to 0x68 + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, //EDID Map Address Set to 0x6C + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, //HDMI RX Repeater Map Address Set to 0x64 + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, //HDMI RX Infoframe Map Address Set to 0x62 + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, //CBUS Map Address Set to 0xF0 + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, //CEC Map Address Set to 0x82 + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, //SDP Main Map Address Set to 0xF2 + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, //CSI-TXB Map Address Set to 0x90 + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, //CSI-TXA Map Address Set to 0x94 + {0x00, CRL_REG_LEN_08BIT, 0x40, 0xE0}, //Disable chip powerdown & Enable HDMI Rx block + + {0x40, CRL_REG_LEN_08BIT, 0xC3, 0x64}, //Enable HDCP 1.1 Repeater + {0x69, CRL_REG_LEN_08BIT, 0x00, 0x64}, //KSV List not ready port A + {0x77, CRL_REG_LEN_08BIT, 0x08, 0x64}, //Clear KSV List + {0x78, CRL_REG_LEN_08BIT, 0x01, 0x64}, //KSV_LIST_READY_CLR_A: Clears the BCAPS ready bit + {0x68, CRL_REG_LEN_08BIT, 0x00, 0x64}, //Disable dual ksv list for port A + {0x41, CRL_REG_LEN_08BIT, 0x00, 0x64}, //Reset b-status (1) + {0x42, CRL_REG_LEN_08BIT, 0x00, 0x64}, //Reset b-status (2) + {0x91, CRL_REG_LEN_08BIT, 0x08, 0xE0}, //AKSV Update Clear + + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, //Foreground Channel = A + {0x98, CRL_REG_LEN_08BIT, 0xFF, 0x68}, //ADI Required Write + {0x99, CRL_REG_LEN_08BIT, 0xA3, 0x68}, //ADI Required Write + {0x9A, CRL_REG_LEN_08BIT, 0x00, 0x68}, //ADI Required Write + {0x9B, CRL_REG_LEN_08BIT, 0x0A, 0x68}, //ADI Required Write + {0x9D, CRL_REG_LEN_08BIT, 0x40, 0x68}, //ADI Required Write + {0xCB, CRL_REG_LEN_08BIT, 0x09, 0x68}, //ADI Required Write + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, //ADI Required Write + {0x3E, CRL_REG_LEN_08BIT, 0x7B, 0x68}, //ADI Required Write + {0x3F, CRL_REG_LEN_08BIT, 0x5E, 0x68}, //ADI Required Write + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, //ADI Required Write + {0x4F, CRL_REG_LEN_08BIT, 0x18, 0x68}, //ADI Required Write + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, //ADI Required Write + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, //ADI Required Write + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, //ADI Required Write + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, //Enable All Terminations + {0xA3, CRL_REG_LEN_08BIT, 0x01, 0x68}, //ADI Required Write + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, //ADI Required Write + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, //HPA Manual Enable + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, //HPA Asserted + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, //Audio Mute Speed Set to Fastest (Smallest Step Size) + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, //LLC/PIX/AUD/SPI PINS TRISTATED + + {0x74, CRL_REG_LEN_08BIT, 0x43, 0xE0}, //Enable interrupts + {0x75, CRL_REG_LEN_08BIT, 0x43, 0xE0}, + + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb565[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, //RGB Out of CP + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, //CSC Depends on ip Packets - SDR 444 + {0x17, CRL_REG_LEN_08BIT, 0xB8, 0xE0}, //Configure for RGB565 & Luma & Chroma Values Can Reach 254d + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, //CP-Insert_AV_Code + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, //ADI Required Write + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, //Enable LLC_DLL & Double LLC Timing + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, //LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled + {0x10, CRL_REG_LEN_08BIT, 0xC0, 0xE0}, //Enable 4-lane CSI Tx & Pixel Port + {0x7E, CRL_REG_LEN_08BIT, 0x98, 0x94}, //ADI Required Write +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb888[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, //RGB Out of CP + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, //CSC Depends on ip Packets - SDR 444 + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, //Luma & Chroma Values Can Reach 254d + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, //CP-Insert_AV_Code + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, //ADI Required Write + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, //Enable LLC_DLL & Double LLC Timing + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, //LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled + {0x10, CRL_REG_LEN_08BIT, 0xC0, 0xE0}, //Enable 4-lane CSI Tx & Pixel Port + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, //ADI Required Write +}; + + +static struct crl_register_write_rep adv7481_hdmi_mode_uyvy[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, //YCrCb output + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, //CSC Depends on ip Packets - SDR422 set + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, //Luma & Chroma Values Can Reach 254d + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, //CP-Insert_AV_Code + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, //ADI Required Write + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, //Enable LLC_DLL & Double LLC Timing + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, //LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled + {0x10, CRL_REG_LEN_08BIT, 0xC0, 0xE0}, //Enable 4-lane CSI Tx & Pixel Port + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, //Enable 4-lane MIPI + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, //Set Auto DPHY Timing + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, //ADI Required Write + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, //ADI Required Write +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_yuyv[] = { + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* Enable Interrupt*/ + {0x04, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* YCrCb output good=0xE0*/ + /* CSC Depends on ip Packets - SDR422 set */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, + /* Luma & Chroma Values Can Reach 254d */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x3E, CRL_REG_LEN_08BIT, 0x08, 0x44}, /* Invert order of Cb and Cr*/ + /* Enable LLC_DLL & Double LLC Timing */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, + /* LLC/PIX/SPI PINS TRISTATED AUD Outputs Enabled */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0}, + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_powerup_regset[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, //Enable 4-lane MIPI + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, //Set Auto DPHY Timing + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, //ADI Required Write + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, //ADI Required Write + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, //ADI Required Write + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, //ADI Required Write + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, //ADI Required Write + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, //i2c_dphy_pwdn - 1'b0 + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, //ADI Required Write + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x94}, //ADI Required Write, transmit only Frame Start/End packets + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, //i2c_mipi_pll_en - 1'b1 +}; + +static struct crl_register_write_rep adv7481_hdmi_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static const s64 adv7481_hdmi_op_sys_clock[] = {297000000, 445500000}; + +static struct crl_pll_configuration adv7481_hdmi_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 297000000, + .bitsperpixel = 16, + .pixel_rate_csi = 594000000, + .pixel_rate_pa = 594000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 445500000, + .bitsperpixel = 24, + .pixel_rate_csi = 891000000, + .pixel_rate_pa = 891000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 480, + }, +}; +static struct crl_subdev_rect_rep adv7481_hdmi_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_hdmi_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080p_rects), + .sd_rects = adv7481_hdmi_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_720p_rects), + .sd_rects = adv7481_hdmi_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_VGA_rects), + .sd_rects = adv7481_hdmi_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080i_rects), + .sd_rects = adv7481_hdmi_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480p_rects), + .sd_rects = adv7481_hdmi_480p_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 720, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576p_rects), + .sd_rects = adv7481_hdmi_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576i_rects), + .sd_rects = adv7481_hdmi_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config adv7481_hdmi_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 hdmi binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 hdmi pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_hdmi_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_hdmi_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_RGB565, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb565), + .regs = adv7481_hdmi_mode_rgb565, + }, + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_uyvy), + .regs = adv7481_hdmi_mode_uyvy, + }, + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_yuyv), + .regs = adv7481_hdmi_mode_yuyv, + }, + { + .code = ICI_FORMAT_RGB888, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb888), + .regs = adv7481_hdmi_mode_rgb888, + }, +}; + +static struct crl_ctrl_data adv7481_hdmi_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = ARRAY_SIZE(adv7481_hdmi_pll_configurations) - 1, + .data.int_menu.menu = adv7481_hdmi_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +int adv7481_sensor_init(struct i2c_client*); +int adv7481_sensor_cleanup(struct i2c_client*); + +static struct crl_sensor_configuration adv7481_hdmi_crl_configuration = { + + .sensor_init = adv7481_sensor_init, + .sensor_cleanup = adv7481_sensor_cleanup, + + .onetime_init_regs_items = ARRAY_SIZE(adv7481_hdmi_onetime_init_regset), + .onetime_init_regs = adv7481_hdmi_onetime_init_regset, + + .powerup_regs_items = ARRAY_SIZE(adv7481_hdmi_powerup_regset), + .powerup_regs = adv7481_hdmi_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .poweroff_regs = adv7481_hdmi_streamoff_regs, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481_hdmi_sensor_subdevs), + .subdevs = adv7481_hdmi_sensor_subdevs, + + .sensor_limits = &adv7481_hdmi_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_hdmi_pll_configurations), + .pll_configs = adv7481_hdmi_pll_configurations, + .op_sys_clk = adv7481_hdmi_op_sys_clock, + + .modes_items = ARRAY_SIZE(adv7481_hdmi_modes), + .modes = adv7481_hdmi_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_hdmi_streamon_regs), + .streamon_regs = adv7481_hdmi_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .streamoff_regs = adv7481_hdmi_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(adv7481_hdmi_ctrls), + .ctrl_bank = adv7481_hdmi_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_hdmi_crl_csi_data_fmt), + .csi_fmts = adv7481_hdmi_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_7BIT, +}; + +#endif /* __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crl_magna_configuration_ti964.h b/drivers/media/i2c/crlmodule-lite/crl_magna_configuration_ti964.h new file mode 100644 index 000000000000..c8d0d7b3550d --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crl_magna_configuration_ti964.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_MAGNA_TI964_CONFIGURATION_H_ +#define __CRLMODULE_MAGNA_TI964_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define TI964_I2C_PHY_ADDR 0x3d + +static struct crl_pll_configuration magna_ti964_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep magna_ti964_1280_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_register_write_rep magna_ti964_powerup_regs[] = { + {0x4c, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, /* Select RX port 0 */ +}; + +static struct crl_register_write_rep magna_ti964_poweroff_regs[] = { + {0x1, CRL_REG_LEN_08BIT, 0x20, TI964_I2C_PHY_ADDR}, +}; + +static struct crl_mode_rep magna_ti964_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(magna_ti964_1280_720_rects), + .sd_rects = magna_ti964_1280_720_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + }, +}; + +static struct crl_sensor_subdev_config magna_ti964_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ti964", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "magna binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "magna pixel array", + } +}; + +static struct crl_sensor_limits magna_ti964_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 720, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_csi_data_fmt magna_ti964_crl_csi_data_fmt[] = { + { + .code = ICI_FORMAT_YUYV, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = ICI_FORMAT_UYVY, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, +}; + +static struct crl_ctrl_data magna_ti964_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .name = "CTRL_ID_LINK_FREQ", + .type = CRL_CTRL_TYPE_MENU_INT, + .data.int_menu.def = 0, + .data.int_menu.max = 0, + .data.int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_PA", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = ICI_EXT_SD_PARAM_ID_PIXEL_RATE, + .name = "CTRL_ID_PIXEL_RATE_CSI", + .type = CRL_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_register_write_rep magna_ti964_streamon_regs[] = { + {0x1f, CRL_REG_LEN_08BIT, 0x2, TI964_I2C_PHY_ADDR}, + {0x33, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x7c, CRL_REG_LEN_08BIT, 0x80, TI964_I2C_PHY_ADDR}, + {0x20, CRL_REG_LEN_08BIT, 0xe0, TI964_I2C_PHY_ADDR}, +}; + +struct crl_register_write_rep magna_ti964_streamoff_regs[] = { + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, + {0x20, CRL_REG_LEN_08BIT, 0xf0, TI964_I2C_PHY_ADDR}, +}; + +struct crl_register_write_rep magna_ti964_onetime_init_regs[] = { + {0x8, CRL_REG_LEN_08BIT, 0x1c, TI964_I2C_PHY_ADDR}, + {0xa, CRL_REG_LEN_08BIT, 0x79, TI964_I2C_PHY_ADDR}, + {0xb, CRL_REG_LEN_08BIT, 0x79, TI964_I2C_PHY_ADDR}, + {0xd, CRL_REG_LEN_08BIT, 0xb9, TI964_I2C_PHY_ADDR}, + {0x10, CRL_REG_LEN_08BIT, 0x91, TI964_I2C_PHY_ADDR}, + {0x11, CRL_REG_LEN_08BIT, 0x85, TI964_I2C_PHY_ADDR}, + {0x12, CRL_REG_LEN_08BIT, 0x89, TI964_I2C_PHY_ADDR}, + {0x13, CRL_REG_LEN_08BIT, 0xc1, TI964_I2C_PHY_ADDR}, + {0x17, CRL_REG_LEN_08BIT, 0xe1, TI964_I2C_PHY_ADDR}, + {0x18, CRL_REG_LEN_08BIT, 0x0, TI964_I2C_PHY_ADDR}, /* Disable frame sync. */ + {0x19, CRL_REG_LEN_08BIT, 0x0, TI964_I2C_PHY_ADDR}, /* Frame sync high time. */ + {0x1a, CRL_REG_LEN_08BIT, 0x2, TI964_I2C_PHY_ADDR}, + {0x1b, CRL_REG_LEN_08BIT, 0xa, TI964_I2C_PHY_ADDR}, /* Frame sync low time. */ + {0x1c, CRL_REG_LEN_08BIT, 0xd3, TI964_I2C_PHY_ADDR}, + {0x21, CRL_REG_LEN_08BIT, 0x43, TI964_I2C_PHY_ADDR}, /* Enable best effort mode. */ + {0xb0, CRL_REG_LEN_08BIT, 0x10, TI964_I2C_PHY_ADDR}, + {0xb1, CRL_REG_LEN_08BIT, 0x14, TI964_I2C_PHY_ADDR}, + {0xb2, CRL_REG_LEN_08BIT, 0x1f, TI964_I2C_PHY_ADDR}, + {0xb3, CRL_REG_LEN_08BIT, 0x8, TI964_I2C_PHY_ADDR}, + {0x32, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, /* Select CSI port 0 */ + {0x4c, CRL_REG_LEN_08BIT, 0x1, TI964_I2C_PHY_ADDR}, /* Select RX port 0 */ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x18, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xc */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0x1e, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x4c, CRL_REG_LEN_08BIT, 0x12, TI964_I2C_PHY_ADDR}, /* Select RX port 1 */ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x1a, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xd */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0x5e, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x4c, CRL_REG_LEN_08BIT, 0x24, TI964_I2C_PHY_ADDR}, /* Select RX port 2*/ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x1c, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xe */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0x9e, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x4c, CRL_REG_LEN_08BIT, 0x38, TI964_I2C_PHY_ADDR}, /* Select RX port3 */ + {0x58, CRL_REG_LEN_08BIT, 0x58, TI964_I2C_PHY_ADDR}, + {0x5c, CRL_REG_LEN_08BIT, 0x1e, TI964_I2C_PHY_ADDR}, /* TI913 alias addr 0xf */ + {0x6d, CRL_REG_LEN_08BIT, 0x7f, TI964_I2C_PHY_ADDR}, + {0x70, CRL_REG_LEN_08BIT, 0xde, TI964_I2C_PHY_ADDR}, /* YUV422_8 */ + {0x7c, CRL_REG_LEN_08BIT, 0x81, TI964_I2C_PHY_ADDR}, /* Use RAW10 8bit mode */ + {0xd2, CRL_REG_LEN_08BIT, 0x84, TI964_I2C_PHY_ADDR}, + {0x6e, CRL_REG_LEN_08BIT, 0x89, TI964_I2C_PHY_ADDR}, +}; + +struct crl_sensor_configuration magna_ti964_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(magna_ti964_powerup_regs), + .powerup_regs = magna_ti964_powerup_regs, + + .poweroff_regs_items = ARRAY_SIZE(magna_ti964_poweroff_regs), + .poweroff_regs = magna_ti964_poweroff_regs, + + .onetime_init_regs_items = ARRAY_SIZE(magna_ti964_onetime_init_regs), + .onetime_init_regs = magna_ti964_onetime_init_regs, + + .subdev_items = ARRAY_SIZE(magna_ti964_subdevs), + .subdevs = magna_ti964_subdevs, + + .pll_config_items = ARRAY_SIZE(magna_ti964_pll_configurations), + .pll_configs = magna_ti964_pll_configurations, + + .sensor_limits = &magna_ti964_limits, + + .modes_items = ARRAY_SIZE(magna_ti964_modes), + .modes = magna_ti964_modes, + + .streamon_regs_items = ARRAY_SIZE(magna_ti964_streamon_regs), + .streamon_regs = magna_ti964_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(magna_ti964_streamoff_regs), + .streamoff_regs = magna_ti964_streamoff_regs, + + .ctrl_items = ARRAY_SIZE(magna_ti964_ctrls), + .ctrl_bank = magna_ti964_ctrls, + + .csi_fmts_items = ARRAY_SIZE(magna_ti964_crl_csi_data_fmt), + .csi_fmts = magna_ti964_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_8BIT, +}; + +#endif /* __CRLMODULE_MAGNA_TI964_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-core.c b/drivers/media/i2c/crlmodule-lite/crlmodule-core.c new file mode 100644 index 000000000000..59896f45b0db --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-core.c @@ -0,0 +1,2696 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" +#include "crlmodule-msrlist.h" + +static int init_ext_sd(struct i2c_client *client, + struct crl_subdev *ssd, int idx); +static void crlmodule_update_current_mode(struct crl_sensor *sensor); + +static int __crlmodule_get_variable_ref(struct crl_sensor *sensor, + enum crl_member_data_reference_ids ref, + u32 *val) +{ + switch (ref) { + case CRL_VAR_REF_OUTPUT_WIDTH: + *val = sensor->src->crop[CRL_PAD_SRC].width; + break; + case CRL_VAR_REF_OUTPUT_HEIGHT: + *val = sensor->src->crop[CRL_PAD_SRC].height; + break; + case CRL_VAR_REF_BITSPERPIXEL: + *val = sensor->sensor_ds->csi_fmts[ + sensor->fmt_index].bits_per_pixel; + break; + default: + return -EINVAL; + }; + + return 0; +} + +/* + * Get the data format index from the configuration definition data + */ +static int __crlmodule_get_data_fmt_index(struct crl_sensor *sensor, + u32 code) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + if (sensor->sensor_ds->csi_fmts[i].code == code) + return i; + } + + return -EINVAL; +} + +/* + * Find the index of the ctrl pointer from the array of ctrls + * maintained by the CRL module based on the ctrl id. + */ +static int __crlmodule_get_crl_ctrl_index(struct crl_sensor *sensor, + u32 id, unsigned int *index) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) + if (sensor->ctrl_bank[i].ctrl_id == id) + break; + + if (i >= sensor->sensor_ds->ctrl_items) + return -EINVAL; + + *index = i; + return 0; +} + +/* + * Finds the value of a specific ctrl based on the ctrl-id + */ +static int __crlmodule_get_param_value(struct crl_sensor *sensor, + u32 id, u32 *val) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + int ret; + struct ici_ext_sd_param* param; + + ret = __crlmodule_get_crl_ctrl_index(sensor, id, &i); + if (ret) + return ret; + + /* If no corresponding ctrl created, return */ + if (sensor->ctrl_bank[i].param.id != id) { + dev_dbg(&client->dev, + "%s ctrl_id: 0x%x desc: %s not ready\n", __func__, id, + sensor->ctrl_bank[i].name); + return -ENODATA; + } + + param = &sensor->ctrl_bank[i].param; + switch (sensor->ctrl_bank[i].type) { + case CRL_CTRL_TYPE_MENU_INT: + if (param->val <= sensor->ctrl_bank[i].data.int_menu.max) + *val = sensor->ctrl_bank[i].data.int_menu.menu[param->val]; + else + *val = 0; + break; + case CRL_CTRL_TYPE_INTEGER: + default: + *val = param->val; + break; + } + + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x desc: %s val: %d\n", + __func__, id, + sensor->ctrl_bank[i].name, *val); + return 0; +} + +/* + * Finds the v4l2 based on the control id + */ +static struct crl_ctrl_data *__crlmodule_get_ctrl( + struct crl_sensor *sensor, + u32 id) +{ + unsigned int i; + + if (__crlmodule_get_crl_ctrl_index(sensor, id, &i)) + return NULL; + + return &sensor->ctrl_bank[i]; +} + +/* + * Grab / Release controls based on the ctrl update context + */ +static void __crlmodule_enable_param(struct crl_sensor *sensor, + enum crl_ctrl_update_context ctxt, + bool enable) +{ + struct crl_ctrl_data *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + crl_ctrl = &sensor->ctrl_bank[i]; + + if (crl_ctrl->context == ctxt) + crl_ctrl->enabled = enable; + } +} + +/* + * Checks if the ctrl sepecific data is satisfied in the mode and PLL + * selection logic. + */ +static bool __crlmodule_compare_ctrl_specific_data( + struct crl_sensor *sensor, + unsigned int items, + struct crl_ctrl_data_pair *ctrl_val) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + u32 val; + int ret; + + /* Go through all the controls associated with this config */ + for (i = 0; i < items; i++) { + /* Get the value set for the control */ + ret = __crlmodule_get_param_value(sensor, ctrl_val[i].ctrl_id, + &val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, ctrl_val[i].ctrl_id); + return false; + } + + /* Compare the value from the sensor definition file config */ + if (val != ctrl_val[i].data) { + dev_err(&client->dev, + "%s ctrl_id: 0x%x value not match %d != %d\n", + __func__, ctrl_val[i].ctrl_id, val, + ctrl_val[i].data); + return false; + } + } + + dev_dbg(&client->dev, "%s success\n", __func__); + return true; +} + +/* + * Finds the correct PLL settings index based on the parameters + */ +static int __crlmodule_update_pll_index(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_pll_configuration *pll_config; + const struct crl_csi_data_fmt *fmts = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + unsigned int i; + u32 link_freq = 0; + + if (!sensor->link_freq || + sensor->link_freq->type != CRL_CTRL_TYPE_MENU_INT) { + dev_err(&client->dev, "%s Invalid link freq ctrl\n", + __func__); + return -EINVAL; + } + + sensor->link_freq->param.val = crl_ctrl->param.val; + if (crl_ctrl->param.val <= + sensor->link_freq->data.int_menu.max) { + link_freq =sensor->link_freq->data.int_menu.menu[ + crl_ctrl->param.val]; + } + + dev_dbg(&client->dev, "%s PLL Items: %d link_freq: %d\n", + __func__, sensor->sensor_ds->pll_config_items, + link_freq); + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + pll_config = &sensor->sensor_ds->pll_configs[i]; + + if (pll_config->op_sys_clk != link_freq) + continue; + + if (pll_config->input_clk != sensor->platform_data->ext_clk) + continue; + + /* if pll_config->csi_lanes == 0, lanes do not matter */ + if (pll_config->csi_lanes) + if (sensor->platform_data->lanes != pll_config->csi_lanes) + continue; + + /* PLL config must match to bpps*/ + if (fmts->bits_per_pixel != pll_config->bitsperpixel) + continue; + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_pll_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + pll_config->comp_items, + pll_config->ctrl_data)) + continue; + + /* Found PLL index */ + dev_dbg(&client->dev, "%s Found PLL index: %d for freq: %d\n", + __func__, i, link_freq); + + sensor->pll_index = i; + + /* Update the control values for pixelrate_pa and csi */ + sensor->pixel_rate_pa->param.s64val = pll_config->pixel_rate_pa; + sensor->pixel_rate_csi->param.s64val = pll_config->pixel_rate_csi; + return 0; + } + + dev_err(&client->dev, "%s no configuration found for freq: %d\n", + __func__, link_freq); + return -EINVAL; +} + +/* + * Perform the action for the dependency control + */ +static void __crlmodule_dep_ctrl_perform_action( + struct crl_sensor *sensor, + struct crl_dep_ctrl_provision *prov, + u32 *val, u32 *dep_val) +{ + enum crl_dep_ctrl_condition cond; + unsigned int i; + u32 temp; + + if (*val > *dep_val) + cond = CRL_DEP_CTRL_CONDITION_GREATER; + else if (*val < *dep_val) + cond = CRL_DEP_CTRL_CONDITION_LESSER; + else + cond = CRL_DEP_CTRL_CONDITION_EQUAL; + + for (i = 0; i < prov->action_items; i++) { + if (prov->action[i].cond == cond) + break; + } + + /* No handler found-. Return completed */ + if (i >= prov->action_items) + return; + + /* if this is dependency control, switch val and dep val */ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + switch (prov->action[i].action) { + case CRL_DEP_CTRL_CONDITION_ADD: + *val = *dep_val + prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_SUBTRACT: + *val = *dep_val - prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_MULTIPLY: + *val = *dep_val * prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_DIVIDE: + *val = *dep_val / prov->action[i].action_value; + break; + } + + /* if this is dependency control, switch val and dep val back*/ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + return; +} + +/* + * Parse the dynamic entity based on the Operand type + */ +static int __crlmodule_parse_dynamic_entity(struct crl_sensor *sensor, + struct crl_dynamic_entity entity, + u32 *val) +{ + switch (entity.entity_type) { + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST: + *val = entity.entity_val; + return 0; + case CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF: + return __crlmodule_get_variable_ref(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL: + return __crlmodule_get_param_value(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL: { + struct crl_register_read_rep reg; + + /* Note: Only 8bit registers are supported. */ + reg.address = entity.entity_val; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xff; + reg.dev_i2c_addr = CRL_I2C_ADDRESS_NO_OVERRIDE; + return crlmodule_read_reg(sensor, reg, val); + } + default: + break; + }; + + return -EINVAL; +} + +static int __crlmodule_calc_dynamic_entity_values( + struct crl_sensor *sensor, + unsigned int ops_items, + struct crl_arithmetic_ops *ops_arr, + unsigned int *val) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + + /* perform the bitwise operation on val one by one */ + for (i = 0; i < ops_items; i++) { + struct crl_arithmetic_ops *ops = &ops_arr[i]; + u32 operand; + int ret = __crlmodule_parse_dynamic_entity(sensor, ops->operand, + &operand); + if (ret) { + dev_dbg(&client->dev, + "%s failed to parse dynamic entity: %d %d\n", + __func__, ops->operand.entity_type, + ops->operand.entity_val); + return ret; + } + + switch (ops->op) { + case CRL_BITWISE_AND: + *val &= operand; + break; + case CRL_BITWISE_OR: + *val |= operand; + break; + case CRL_BITWISE_LSHIFT: + *val <<= operand; + break; + case CRL_BITWISE_RSHIFT: + *val >>= operand; + break; + case CRL_BITWISE_XOR: + *val ^= operand; + break; + case CRL_BITWISE_COMPLEMENT: + *val = ~(*val); + break; + case CRL_ADD: + *val += operand; + break; + case CRL_SUBTRACT: + *val = *val > operand ? *val - operand : operand - *val; + break; + case CRL_MULTIPLY: + *val *= operand; + break; + case CRL_DIV: + if(operand==0) { + dev_err(&client->dev, "CRL_DIV error for operand returned is zero."); + return -EINVAL; + } + *val /= operand; + break; + case CRL_ASSIGNMENT: + *val = operand; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +/* + * Dynamic registers' value is not direct but depends on a referrence value. + * This kind of registers are mainly used in crlmodule's ctrl logic. + * + * This is to handle cases like the below examples, where mutliple registers + * need to be modified based on the input value "val" + * R3000 = val & 0xff and R3001 = val >> 8 & 0xff and R3002 = val >> 16 & 0xff + * R4001 = val and R4002 = val or + * R2800 = FLL - val and R2802 = LLP - val + */ +static int __crlmodule_update_dynamic_regs(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < crl_ctrl->regs_items; i++) { + struct crl_dynamic_register_access *reg = &crl_ctrl->regs[i]; + /* + * Each register group must start from the initial value, not + * as a continuation of the previous calculations. The sensor + * configurations must take care of this restriction. + */ + u32 val_t = val; + int ret; + + /* Get the value associated with the dynamic entity */ + ret = __crlmodule_calc_dynamic_entity_values(sensor, + reg->ops_items, + reg->ops, &val_t); + if (ret) + return ret; + + /* Now ready to write the value */ + ret = crlmodule_write_reg(sensor, reg->dev_i2c_addr, + reg->address, reg->len, + reg->mask, val_t); + if (ret) + return ret; + } + + return 0; +} + +/* + * Handles the dependency control actions. Dependency control is a control + * which' value depends on the current control. This information is encoded in + * the sensor configuration file. + */ +static int __crlmodule_handle_dependency_ctrl( + struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + unsigned int *val, + enum crl_dep_ctrl_action_type type) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *dep_crl_ctrl; + struct crl_dep_ctrl_provision *dep_prov; + unsigned int i, idx; + u32 dep_val; + int ret; + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x dependency controls: %d\n", + __func__, crl_ctrl->ctrl_id, + crl_ctrl->dep_items); + + for (i = 0; i < crl_ctrl->dep_items; i++) { + dep_prov = &crl_ctrl->dep_ctrls[i]; + + /* If not the type, continue */ + if (dep_prov->action_type != type) + continue; + + /* Get the value from the dependency ctrl */ + ret = __crlmodule_get_param_value(sensor, dep_prov->ctrl_id, + &dep_val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, dep_prov->ctrl_id); + /* TODO! Shoud continue? */ + continue; + } + + /* Perform the action */ + __crlmodule_dep_ctrl_perform_action(sensor, dep_prov, val, + &dep_val); + + /* if this is dependency control, update the register */ + if (dep_prov->action_type == + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + ret = __crlmodule_get_crl_ctrl_index(sensor, + dep_prov->ctrl_id, &idx); + if (ret) + continue; + + dep_crl_ctrl = &sensor->ctrl_bank[idx]; + dev_dbg(&client->dev, + "%s crl_ctrl: 0x%p 0x%p\n", __func__, + &sensor->ctrl_bank[idx], + dep_crl_ctrl); + + ret = __crlmodule_update_dynamic_regs(sensor, + dep_crl_ctrl, dep_val); + if (ret) + continue; + } + } + return 0; +} + + +static int crlmodule_get_fmt_index(struct crl_sensor *sensor, + u8 pixel_order, u8 bpp) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_csi_data_fmt *f; + int i; + + /* + * Go through the fmt list and check if this format with matching bpp + * is supported by this module definition file + */ + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + f = &sensor->sensor_ds->csi_fmts[i]; + + if (f->pixel_order == pixel_order && f->bits_per_pixel == bpp) + return i; + } + + dev_err(&client->dev, "%s no supported format for order: %d bpp: %d\n", + __func__, pixel_order, bpp); + + return -EINVAL; +} + +static int __crlmodule_update_flip_info(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + struct ici_ext_sd_param *param) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_csi_data_fmt *fmt = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + u8 bpp = fmt->bits_per_pixel; + u8 flip_info = sensor->flip_info; + u8 new_order = 0; + int i, ret; + + dev_dbg(&client->dev, "%s current flip_info: %d curr index: %d\n", + __func__, flip_info, sensor->fmt_index); + + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_HFLIP: + flip_info &= CRL_FLIP_HFLIP_MASK; + flip_info |= param->val > 0 ? CRL_FLIP_HFLIP : 0; + break; + case ICI_EXT_SD_PARAM_ID_VFLIP: + flip_info &= CRL_FLIP_VFLIP_MASK; + flip_info |= param->val > 0 ? CRL_FLIP_VFLIP : 0; + break; + } + + dev_dbg(&client->dev, "%s flip success new flip_info: %d\n", + __func__, flip_info); + + /* First check if the module actually supports any pixelorder changes */ + for (i = 0; i < sensor->sensor_ds->flip_items; i++) { + if (flip_info == sensor->sensor_ds->flip_data[i].flip) { + new_order = sensor->sensor_ds->flip_data[i].pixel_order; + break; + } + } + + if (i >= sensor->sensor_ds->flip_items) { + dev_err(&client->dev, "%s flip not supported %d\n", + __func__, flip_info); + return -EINVAL; + } + + /* + * Flip changes only pixel order. So check if the supported format list + * has any format with new pixel order and current bits per pixel + */ + i = crlmodule_get_fmt_index(sensor, new_order, bpp); + if (i < 0) { + dev_err(&client->dev, "%s no format found order: %d bpp: %d\n", + __func__, new_order, bpp); + return -EINVAL; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, param->val); + if (ret) { + dev_err(&client->dev, "%s register access failed\n", __func__); + return ret; + } + + /* New format found. Update info */ + sensor->fmt_index = i; + sensor->flip_info = flip_info; + + dev_dbg(&client->dev, "%s flip success flip: %d new fmt index: %d\n", + __func__, flip_info, i); + + return 0; +} +static int __crlmodule_update_framesize(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + struct ici_ext_sd_param *param) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + unsigned int val; + + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES: + val = max(param->val, mode->min_fll); + break; + case ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS: + val = max(param->val, mode->min_llp); + break; + default: + return -EINVAL; + } + + return __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); +} +static int __crlmodule_update_blanking(struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl, + struct ici_ext_sd_param *param) +{ + unsigned int val; + + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_HBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width + + param->val; + break; + case ICI_EXT_SD_PARAM_ID_VBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height + + param->val; + break; + default: + return -EINVAL; + } + + return __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); +} + +static void __crlmodule_update_selection_impact_flags( + struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl) +{ + if (crl_ctrl->impact & CRL_IMPACTS_PLL_SELECTION) + sensor->ext_ctrl_impacts_pll_selection = true; + + if (crl_ctrl->impact & CRL_IMPACTS_MODE_SELECTION) + sensor->ext_ctrl_impacts_mode_selection = true; +} + +static struct crl_ctrl_data *__crlmodule_find_crlctrl( + struct crl_sensor *sensor, + struct ici_ext_sd_param *param) +{ + struct crl_ctrl_data *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + crl_ctrl = &sensor->ctrl_bank[i]; + if (crl_ctrl->param.sd == param->sd && + crl_ctrl->ctrl_id == param->id) + return crl_ctrl; + } + + return NULL; +} + +static int crlmodule_set_param(struct ici_ext_sd_param *param) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(param->sd); + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *crl_ctrl = NULL; + int ret = 0; + + dev_dbg(&client->dev, "%s id:%d val:%d\n", __func__, param->id, + param->val); + + /* + * Need to find the corresponding crlmodule wrapper for this param. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, param); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, param->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, param->id, + crl_ctrl->name); + + if (!crl_ctrl->enabled || + crl_ctrl->flags & CRL_CTRL_FLAG_READ_ONLY) { + dev_err(&client->dev, "%s Control id:0x%x is not writeable\n", + __func__, param->id); + return -EINVAL; + } + + if (param->type != ICI_EXT_SD_PARAM_TYPE_INT32) { + dev_err(&client->dev, "%s Control id:0x%x only INT32 is supported\n", + __func__, param->id); + return -EINVAL; + } + + crl_ctrl->param.val = param->val; + + /* Then go through the mandatory controls */ + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_LINK_FREQ: + /* Go through the supported list and compare the values */ + ret = __crlmodule_update_pll_index(sensor, crl_ctrl); + goto out; + }; + + /* update the selection impacts flags */ + __crlmodule_update_selection_impact_flags(sensor, crl_ctrl); + + /* + * Dependency control is a control whose value is affected by the value + * for the current control. For example, vblank can be a dependency + * control for exposure. Whenever exposure changes, the sensor can + * automatically adjust the vblank or rely on manual adjustment. In + * case of manual adjustment the sensor configuration file needs to + * specify the dependency control, the condition for an action and + * typs of action. + * + * Now check if there is any dependency controls for this. And if there + * are any we need to split the action to two. First if the current + * control needs to be changed, then do it before updating the register. + * If some other control is affected, then do it after wrriting the + * current values + * + * Now check in the dependency control list, if the action type is + * "self" and update the value accordingly now + */ + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, ¶m->val, + CRL_DEP_CTRL_ACTION_TYPE_SELF); + + /* Handle specific controls */ + switch (param->id) { + case ICI_EXT_SD_PARAM_ID_HFLIP: + case ICI_EXT_SD_PARAM_ID_VFLIP: + ret = __crlmodule_update_flip_info(sensor, crl_ctrl, param); + goto out; + + case ICI_EXT_SD_PARAM_ID_VBLANK: + case ICI_EXT_SD_PARAM_ID_HBLANK: + if (sensor->blanking_ctrl_not_use) { + dev_info(&client->dev, "%s Blanking controls are not used \ + in this configuration, setting them has no effect\n", __func__); + /* Disable control*/ + crl_ctrl->enabled = false; + + } else { + ret = __crlmodule_update_blanking(sensor, crl_ctrl, param); + } + goto out; + + case ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES: + case ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS: + ret = __crlmodule_update_framesize(sensor, crl_ctrl, param); + goto out; + + case ICI_EXT_SD_PARAM_ID_SENSOR_MODE: + sensor->sensor_mode = param->val; + crlmodule_update_current_mode(sensor); + goto out; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, param->val); + +out: + /* + * Now check in the dependency control list, if the action type is + * "dependency control" and update the value accordingly now + */ + if (!ret && crl_ctrl) + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, ¶m->val, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL); + + return ret; +} + +static int crlmodule_get_param(struct ici_ext_sd_param *param) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(param->sd); + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *crl_ctrl; + struct crl_dynamic_register_access *reg; + + /* + * Need to find the corresponding crlmodule wrapper for this param. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, param); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, param->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, param->id, + crl_ctrl->name); + + if (crl_ctrl->flags & CRL_CTRL_FLAG_WRITE_ONLY) { + dev_err(&client->dev, "%s Control id:0x%x is not readable\n", + __func__, param->id); + return -EINVAL; + } + + param->type = ICI_EXT_SD_PARAM_TYPE_INT32; + if (!(crl_ctrl->flags & CRL_CTRL_FLAG_READ_ONLY)) { + param->val = crl_ctrl->param.val; + return 0; + } + + /* + * Found the crl control wrapper. Use the dynamic entity information + * to calculate the value for this control. For get control, there + * could be only one item in the crl_dynamic_register_access. ctrl-> + * regs_items must be 1. Also the crl_dynamic_register_access.address + * and crl_dynamic_register_access.len are not used. + * Instead the values to be found or calculated need to be encoded into + * crl_dynamic_register_access.crl_arithmetic_ops. It has possibility + * to read from registers, existing control values and simple arithmetic + * operations etc. + */ + if (!crl_ctrl->regs || !crl_ctrl->regs_items) { + dev_err(&client->dev, "%s no dynamic entities found\n", + __func__); + return -EINVAL; + } + if (crl_ctrl->regs_items > 1) + dev_warn(&client->dev, + "%s multiple dynamic entities, will skip the rest\n", + __func__); + reg = &crl_ctrl->regs[0]; + + /* Get the value associated with the dynamic entity */ + return __crlmodule_calc_dynamic_entity_values(sensor, reg->ops_items, + reg->ops, ¶m->val); +} + +static int crlmodule_get_menu_item( + struct ici_ext_sd_param *param, u32 idx) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(param->sd); + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *crl_ctrl; + + crl_ctrl = __crlmodule_find_crlctrl(sensor, param); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, param->id); + return -EINVAL; + } + + if (idx > crl_ctrl->max) { + dev_err(&client->dev, "%s Control id:0x%x has invalid index %u\n", + __func__, param->id, idx); + return -EINVAL; + } + switch (crl_ctrl->type) + { + case CRL_CTRL_TYPE_MENU_INT: + param->type = ICI_EXT_SD_PARAM_TYPE_INT64; + param->s64val = crl_ctrl->data.int_menu.menu[idx]; + break; + case CRL_CTRL_TYPE_MENU_ITEMS: + if (!param->custom.size || !param->custom.data) { + dev_err(&client->dev, "%s Control id:0x%x param->custom.data must be preallocated by caller\n", + __func__, param->id); + return -EINVAL; + } + param->type = ICI_EXT_SD_PARAM_TYPE_STR; + strncpy(param->custom.data, + crl_ctrl->data.menu_items.menu[idx], + param->custom.size - 1); + param->custom.data[param->custom.size - 1] = '\0'; + break; + default: + dev_err(&client->dev, "%s Control id:0x%x does not have a menu\n", + __func__, param->id); + return -EINVAL; + } + return 0; +} + +static int __crlmodule_init_link_freq_ctrl_menu( + struct crl_sensor *sensor, + struct crl_ctrl_data *crl_ctrl) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int items = 0; + unsigned int i; + + /* Cannot handle if the control type is not integer menu */ + if (crl_ctrl->type != CRL_CTRL_TYPE_MENU_INT) + return 0; + + /* If the menu contents exist, skip filling it dynamically */ + if (crl_ctrl->data.int_menu.menu) + return 0; + + sensor->link_freq_menu = devm_kzalloc(&client->dev, sizeof(s64) * + sensor->sensor_ds->pll_config_items, + GFP_KERNEL); + if (!sensor->link_freq_menu) + return -ENOMEM; + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + bool dup = false; + unsigned int j; + + /* + * Skip the duplicate entries. We are using the value to match + * not the index + */ + for (j = 0; j < items && !dup; j++) + dup = (sensor->link_freq_menu[j] == + sensor->sensor_ds->pll_configs[i].op_sys_clk); + if (dup) + continue; + + sensor->link_freq_menu[items] = + sensor->sensor_ds->pll_configs[i].op_sys_clk; + items++; + } + + crl_ctrl->data.int_menu.menu = sensor->link_freq_menu; + + /* items will not be 0 as there will be atleast one pll_config_item */ + crl_ctrl->data.int_menu.max = items - 1; + + return 0; +} + +static int crlmodule_init_controls(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int pa_ctrls = 0; + unsigned int src_ctrls = 0; + struct crl_ctrl_data *crl_ctrl; + unsigned int i; + int rval; + + sensor->ctrl_bank = devm_kzalloc(&client->dev, + sizeof(struct crl_ctrl_data) * + sensor->sensor_ds->ctrl_items, + GFP_KERNEL); + if (!sensor->ctrl_bank) + return -ENOMEM; + + /* Prepare to initialise the ctrls from the crl wrapper */ + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + /* + * First copy the ctrls to the sensor as there could be + * more than one similar sensors in a product which could share + * the same configuration files + */ + sensor->ctrl_bank[i] = + sensor->sensor_ds->ctrl_bank[i]; + + crl_ctrl = &sensor->ctrl_bank[i]; + crl_ctrl->param.id = crl_ctrl->ctrl_id; + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) { + if (sensor->pixel_array) { + crl_ctrl->param.sd = + &sensor->pixel_array->sd; + } + pa_ctrls++; + } + + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER) { + if (sensor->scaler) { + crl_ctrl->param.sd = + &sensor->scaler->sd; + } + src_ctrls++; + } + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER) { + if (sensor->binner) { + crl_ctrl->param.sd = + &sensor->binner->sd; + } + src_ctrls++; + } + + /* populate the ctrl for the Link_freq dynamically */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) { + rval = __crlmodule_init_link_freq_ctrl_menu(sensor, + crl_ctrl); + if (rval) + return rval; + } + } + dev_dbg(&client->dev, "%s pa_ctrls: %d src_ctrls: %d\n", __func__, + pa_ctrls, src_ctrls); + for (i = 0; i < sensor->sensor_ds->ctrl_items; i++) { + crl_ctrl = &sensor->ctrl_bank[i]; + switch (crl_ctrl->type) { + case CRL_CTRL_TYPE_MENU_ITEMS: + crl_ctrl->max = crl_ctrl->data.menu_items.size - 1; + break; + case CRL_CTRL_TYPE_MENU_INT: + crl_ctrl->max = crl_ctrl->data.int_menu.max; + crl_ctrl->def = crl_ctrl->data.int_menu.def; + break; + case CRL_CTRL_TYPE_INTEGER64: + case CRL_CTRL_TYPE_INTEGER: + case CRL_CTRL_TYPE_CUSTOM: + crl_ctrl->min = crl_ctrl->data.std_data.min; + crl_ctrl->max = crl_ctrl->data.std_data.max; + crl_ctrl->step = crl_ctrl->data.std_data.step; + crl_ctrl->def = crl_ctrl->data.std_data.def; + break; + case CRL_CTRL_TYPE_BOOLEAN: + case CRL_CTRL_TYPE_BUTTON: + case CRL_CTRL_TYPE_CTRL_CLASS: + default: + dev_err(&client->dev, + "%s Invalid control type\n", __func__); + continue; + break; + } + + /* + * Blanking and framesize controls access to same register, + * Blank controls are disabled if framesize controls exists. + */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES || + crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS) + sensor->blanking_ctrl_not_use = 1; + + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_SENSOR_MODE) + sensor->direct_mode_in_use = 1; + + /* Save mandatory control references - link_freq in src sd */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->link_freq = crl_ctrl; + + /* Save mandatory control references - pixel_rate_pa PA sd */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_PIXEL_RATE && + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) + sensor->pixel_rate_pa = crl_ctrl; + + /* Save mandatory control references - pixel_rate_csi src sd */ + if (crl_ctrl->ctrl_id == ICI_EXT_SD_PARAM_ID_PIXEL_RATE && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->pixel_rate_csi = crl_ctrl; + + dev_dbg(&client->dev, + "%s idx: %d ctrl_id: 0x%x ctrl_name: %s\n", + __func__, i, crl_ctrl->ctrl_id, crl_ctrl->name); + } + + return 0; +} + + +static bool __crlmodule_rect_matches(struct i2c_client *client, + const struct ici_rect *const rect1, + const struct ici_rect *const rect2) +{ + dev_dbg(&client->dev, "%s rect1 l:%d t:%d w:%d h:%d\n", __func__, + rect1->left, rect1->top, rect1->width, rect1->height); + dev_dbg(&client->dev, "%s rect2 l:%d t:%d w:%d h:%d\n", __func__, + rect2->left, rect2->top, rect2->width, rect2->height); + + return (rect1->left == rect2->left && + rect1->top == rect2->top && + rect1->width == rect2->width && + rect1->height == rect2->height); +} + +static int __crlmodule_update_hblank(struct crl_sensor *sensor, + struct crl_ctrl_data *hblank) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = sensor->sensor_ds->sensor_limits; + unsigned int width = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width; + unsigned int min_llp, max_llp; + + if (mode->min_llp) + min_llp = mode->min_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + min_llp = limits->min_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + min_llp = width; + + if (mode->max_llp) + max_llp = mode->max_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_llp = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_llp = USHRT_MAX; + + hblank->min = min_llp - width; + hblank->max = max_llp - width; + hblank->def = hblank->min; + return 0; +} + +static int __crlmodule_update_vblank(struct crl_sensor *sensor, + struct crl_ctrl_data *vblank) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = sensor->sensor_ds->sensor_limits; + unsigned int height = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height; + unsigned int min_fll, max_fll; + + if (mode->min_fll) + min_fll = mode->min_fll; /* mode specific limit */ + else if (limits->min_frame_length_lines) + min_fll = limits->min_frame_length_lines; /* sensor limit */ + else /* No restrictions */ + min_fll = height; + + if (mode->max_fll) + max_fll = mode->max_fll; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_fll = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_fll = USHRT_MAX; + + vblank->min = min_fll - height; + vblank->max = max_fll - height; + vblank->def = vblank->min; + return 0; +} + +static void crlmodule_update_framesize(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + struct crl_ctrl_data *llength; + struct crl_ctrl_data *flength; + + llength = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_LINE_LENGTH_PIXELS); + flength = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_FRAME_LENGTH_LINES); + + if (llength) { + llength->min = mode->min_llp; + llength->def = llength->min; + } + + if (flength) { + flength->min = mode->min_fll; + flength->def = flength->min; + } +} + +static int crlmodule_update_frame_blanking(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_ctrl_data *vblank; + struct crl_ctrl_data *hblank; + int ret; + + vblank = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_VBLANK); + hblank = __crlmodule_get_ctrl(sensor, ICI_EXT_SD_PARAM_ID_HBLANK); + + if (hblank) { + ret = __crlmodule_update_hblank(sensor, hblank); + if (ret) + return ret; + dev_dbg(&client->dev, "%s hblank:%d\n", __func__, hblank->param.val); + } + + if (vblank) { + ret = __crlmodule_update_vblank(sensor, vblank); + if (ret) + return ret; + dev_dbg(&client->dev, "%s vblank:%d\n", __func__, vblank->param.val); + } + + return 0; +} + +static void crlmodule_update_mode_bysel(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_mode_rep *this; + unsigned int i; + + dev_dbg(&client->dev, "%s look for w: %d, h: %d, in [%d] modes\n", + __func__, sensor->src->crop[CRL_PAD_SRC].width, + sensor->src->crop[CRL_PAD_SRC].height, + sensor->sensor_ds->modes_items); + + for (i = 0; i < sensor->sensor_ds->modes_items; i++) { + this = &sensor->sensor_ds->modes[i]; + + dev_dbg(&client->dev, "%s check mode list[%d] w: %d, h: %d\n", + __func__, i, this->width, this->height); + if (this->width != sensor->src->crop[CRL_PAD_SRC].width || + this->height != sensor->src->crop[CRL_PAD_SRC].height) + continue; + + if (sensor->pixel_array) { + dev_dbg(&client->dev, "%s Compare PA out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->pixel_array->crop[CRL_PA_PAD_SRC], + &this->sd_rects[CRL_SD_PA_INDEX].out_rect)) + continue; + } + if (sensor->binner) { + dev_dbg(&client->dev, "%s binning hor: %d vs. %d\n", + __func__, + sensor->binning_horizontal, + this->binn_hor); + if (sensor->binning_horizontal != this->binn_hor) + continue; + + dev_dbg(&client->dev, "%s binning vert: %d vs. %d\n", + __func__, + sensor->binning_vertical, + this->binn_vert); + if (sensor->binning_vertical != this->binn_vert) + continue; + + dev_dbg(&client->dev, "%s binner in rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SINK], + &this->sd_rects[CRL_SD_BINNER_INDEX].in_rect)) + continue; + + dev_dbg(&client->dev, "%s binner out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SRC], + &this->sd_rects[CRL_SD_BINNER_INDEX].out_rect)) + continue; + } + + if (sensor->scaler) { + dev_dbg(&client->dev, "%s scaler scale_m %d vs. %d\n", + __func__, sensor->scale_m, + this->scale_m); + if (sensor->scale_m != this->scale_m) + continue; + + dev_dbg(&client->dev, "%s scaler in rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SINK], + &this->sd_rects[CRL_SD_SCALER_INDEX].in_rect)) + continue; + + dev_dbg(&client->dev, "%s scaler out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SRC], + &this->sd_rects[CRL_SD_SCALER_INDEX].out_rect)) + continue; + } + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_mode_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + this->comp_items, + this->ctrl_data)) + continue; + + /* Found a perfect match! */ + dev_dbg(&client->dev, "%s found mode. idx: %d\n", __func__, i); + break; + } + + /* If no modes found, fall back to the fail safe mode index */ + if (i >= sensor->sensor_ds->modes_items) { + i = sensor->sensor_ds->fail_safe_mode_index; + this = &sensor->sensor_ds->modes[i]; + dev_info(&client->dev, + "%s no matching mode, set to default: %d\n", + __func__, i); + } + + sensor->current_mode = this; +} + +static void crlmodule_update_mode_ctrl(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_mode_rep *this; + int i; + + dev_dbg(&client->dev, "%s Sensor Mode :%d\n", + __func__, sensor->sensor_mode); + /* point to selected mode */ + this = &sensor->sensor_ds->modes[sensor->sensor_mode]; + sensor->current_mode = this; + + for (i = 0; i < this->sd_rects_items; i++) { + + if (CRL_SUBDEV_TYPE_PIXEL_ARRAY == + this->sd_rects[i].subdev_type) { + sensor->pixel_array->crop[CRL_PA_PAD_SRC] = + this->sd_rects[CRL_SD_PA_INDEX].out_rect; + } + + if (CRL_SUBDEV_TYPE_BINNER == + this->sd_rects[i].subdev_type) { + sensor->binner->sink_fmt = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->binning_vertical = this->binn_vert; + sensor->binning_horizontal = this->binn_hor; + if (this->binn_vert > 1) + sensor->binner->compose = + this->sd_rects[i].out_rect; + } + + if (CRL_SUBDEV_TYPE_SCALER == + this->sd_rects[i].subdev_type) { + sensor->scaler->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->scaler->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->scaler->sink_fmt = + this->sd_rects[i].in_rect; + sensor->scale_m = this->scale_m; + if (this->scale_m != 1) + sensor->scaler->compose = + this->sd_rects[i].out_rect; + } + } + + /* Set source */ + sensor->src->crop[CRL_PAD_SRC].width = this->width; + sensor->src->crop[CRL_PAD_SRC].height = this->height; +} + +static void crlmodule_update_current_mode(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *this; + int i; + + if (sensor->direct_mode_in_use) + crlmodule_update_mode_ctrl(sensor); + else + crlmodule_update_mode_bysel(sensor); + + /* + * We have a valid mode now. If there are any mode specific "get" + * controls defined in the configuration it could be queried by the + * user space for any mode specific information. So go through the + * mode specific ctrls and update its value from the selected mode. + */ + + this = sensor->current_mode; + + for (i = 0; i < this->comp_items; i++) { + struct crl_ctrl_data_pair *ctrl_comp = &this->ctrl_data[i]; + unsigned int idx; + + /* Get the ctl_ctrl pointer corresponding ctrl id */ + if (__crlmodule_get_crl_ctrl_index(sensor, ctrl_comp->ctrl_id, + &idx)) + /* If not found, move to the next ctrl */ + continue; + + /* No need to update this control, if this is a set op ctrl */ + if (sensor->ctrl_bank[idx].op_type == CRL_CTRL_SET_OP) + continue; + + /* Update the control value */ + sensor->ctrl_bank[idx].param.val = ctrl_comp->data; + } + + if (sensor->blanking_ctrl_not_use) + crlmodule_update_framesize(sensor); + else + crlmodule_update_frame_blanking(sensor); +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int __crlmodule_get_format( + struct ici_ext_subdev* subdev, + struct ici_pad_framefmt* pff) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct ici_rect *r; + + if (pff->pad.pad_idx == ssd->source_pad) + r = &ssd->crop[ssd->source_pad]; + else + r = &ssd->sink_fmt; + + pff->ffmt.width = r->width; + pff->ffmt.height = r->height; + pff->ffmt.pixelformat = + sensor->sensor_ds->csi_fmts[sensor->fmt_index].code; + pff->ffmt.field = + ((ssd->field == ICI_FIELD_ANY) ? + ICI_FIELD_NONE : ssd->field); + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_enum_pixelformat( + struct ici_isys_node* node, + struct ici_pad_supported_format_desc* psfd) +{ + struct ici_ext_subdev* subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (psfd->idx >= sensor->sensor_ds->csi_fmts_items) + return -EINVAL; + + psfd->color_format = + sensor->sensor_ds->csi_fmts[psfd->idx].code; + psfd->min_width = sensor->sensor_ds->sensor_limits->x_addr_min; + psfd->max_width = sensor->sensor_ds->sensor_limits->x_addr_max; + psfd->min_height = sensor->sensor_ds->sensor_limits->y_addr_min; + psfd->max_height = sensor->sensor_ds->sensor_limits->y_addr_max; + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_format( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + struct ici_ext_subdev* subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + int rval; + + mutex_lock(&sensor->mutex); + rval = __crlmodule_get_format(subdev, pff); + mutex_unlock(&sensor->mutex); + + return rval; +} + +static int __crlmodule_sel_supported( + struct ici_ext_subdev *subdev, + u32 pad, + u32 type) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (ssd == sensor->pixel_array && pad == CRL_PA_PAD_SRC) { + switch (type) { + case ICI_EXT_SEL_TYPE_NATIVE: + case ICI_EXT_SEL_TYPE_CROP: + case ICI_EXT_SEL_TYPE_CROP_BOUNDS: + return 0; + } + } + if (ssd == sensor->binner) { + switch (type) { + case ICI_EXT_SEL_TYPE_COMPOSE: + case ICI_EXT_SEL_TYPE_COMPOSE_BOUNDS: + if (pad == CRL_PAD_SINK) + return 0; + break; + } + } + if (ssd == sensor->scaler) { + switch (type) { + case ICI_EXT_SEL_TYPE_CROP: + case ICI_EXT_SEL_TYPE_CROP_BOUNDS: + if (pad == CRL_PAD_SRC) + return 0; + break; + case ICI_EXT_SEL_TYPE_COMPOSE: + case ICI_EXT_SEL_TYPE_COMPOSE_BOUNDS: + if (pad == CRL_PAD_SINK) + return 0; + break; + } + } + return -EINVAL; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_get_crop_compose( + struct ici_ext_subdev *subdev, + struct ici_rect **crops, + struct ici_rect **comps) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + unsigned int i; + + /* Currently we support only 2 pads */ + BUG_ON(subdev->num_pads > CRL_PADS); + + if (crops) + for (i = 0; i < subdev->num_pads; i++) + crops[i] = &ssd->crop[i]; + if (comps) + *comps = &ssd->compose; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_selection( + struct ici_isys_node *node, + struct ici_pad_selection* ps) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct ici_rect *comp, *crops[CRL_PADS]; + struct ici_rect sink_fmt; + int ret; + + ret = __crlmodule_sel_supported(subdev, ps->pad.pad_idx, + ps->sel_type); + if (ret) + return ret; + + crlmodule_get_crop_compose(subdev, crops, &comp); + + sink_fmt = ssd->sink_fmt; + + switch (ps->sel_type) { + case ICI_EXT_SEL_TYPE_CROP_BOUNDS: + case ICI_EXT_SEL_TYPE_NATIVE: + if (ssd == sensor->pixel_array) { + ps->rect.left = ps->rect.top = 0; + ps->rect.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + ps->rect.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + } else if (ps->pad.pad_idx == ssd->sink_pad) { + ps->rect = sink_fmt; + } else { + ps->rect = *comp; + } + break; + case ICI_EXT_SEL_TYPE_CROP: + case ICI_EXT_SEL_TYPE_COMPOSE_BOUNDS: + ps->rect = *crops[ps->pad.pad_idx]; + break; + case ICI_EXT_SEL_TYPE_COMPOSE: + ps->rect = *comp; + break; + } + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_propagate( + struct ici_ext_subdev *subdev, + u32 type) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct ici_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, crops, &comp); + + switch (type) { + case ICI_EXT_SEL_TYPE_CROP: + comp->width = crops[CRL_PAD_SINK]->width; + comp->height = crops[CRL_PAD_SINK]->height; + if (ssd == sensor->scaler) { + sensor->scale_m = 1; + } else if (ssd == sensor->binner) { + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + } + /* Fall through */ + case ICI_EXT_SEL_TYPE_COMPOSE: + *crops[CRL_PAD_SRC] = *comp; + break; + default: + BUG(); + } +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_compose( + struct ici_ext_subdev *subdev, + struct ici_rect *r) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct ici_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, crops, &comp); + + r->top = 0; + r->left = 0; + + if (ssd == sensor->binner) { + sensor->binning_horizontal = crops[CRL_PAD_SINK]->width / + r->width; + sensor->binning_vertical = crops[CRL_PAD_SINK]->height / + r->height; + } else { + sensor->scale_m = crops[CRL_PAD_SINK]->width * + sensor->sensor_ds->sensor_limits->scaler_m_min / + r->width; + } + + *comp = *r; + + crlmodule_propagate(subdev, + ICI_EXT_SEL_TYPE_COMPOSE); + + crlmodule_update_current_mode(sensor); + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_crop( + struct ici_ext_subdev *subdev, + u32 pad, + struct ici_rect *r) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct ici_rect *src_size, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, crops, NULL); + + if (pad == ssd->sink_pad) + src_size = &ssd->sink_fmt; + else + src_size = &ssd->compose; + + if (ssd == sensor->src && pad == CRL_PAD_SRC) { + r->left = 0; + r->top = 0; + } + + r->width = min(r->width, src_size->width); + r->height = min(r->height, src_size->height); + + r->left = min_t(s32, r->left, src_size->width - r->width); + r->top = min_t(s32, r->top, src_size->height - r->height); + + *crops[pad] = *r; + + if (ssd != sensor->pixel_array && pad == CRL_PAD_SINK) + crlmodule_propagate(subdev, + ICI_EXT_SEL_TYPE_CROP); + + /* TODO! Should we short list supported mode? */ + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_set_format( + struct ici_isys_node *node, + struct ici_pad_framefmt *pff) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct i2c_client *client = sensor->src->sd.client; + struct ici_rect *crops[CRL_PADS]; + + dev_dbg(&client->dev, "%s sd_name: %s pad: %d w: %d, h: %d code: 0x%x", + __func__, node->name, pff->pad.pad_idx, + pff->ffmt.width, + pff->ffmt.height, + pff->ffmt.pixelformat); + + mutex_lock(&sensor->mutex); + + /* Currently we only support ALTERNATE interlaced mode. */ + if (pff->ffmt.field != ICI_FIELD_ALTERNATE) + pff->ffmt.field = ICI_FIELD_NONE; + pff->ffmt.colorspace = 0; + memset(pff->ffmt.reserved, 0, sizeof(pff->ffmt.reserved)); + ssd->field = pff->ffmt.field; + + if (pff->pad.pad_idx == ssd->source_pad) { + u32 code = pff->ffmt.pixelformat; + int rval = __crlmodule_get_format(subdev, pff); + + if (!rval && subdev == &sensor->src->sd) { + /* Check if this code is supported, if yes get index */ + int idx = __crlmodule_get_data_fmt_index(sensor, code); + + if (idx < 0) { + dev_err(&client->dev, "%s invalid format\n", + __func__); + mutex_unlock(&sensor->mutex); + return -EINVAL; + } + + sensor->fmt_index = idx; + rval = __crlmodule_get_format(subdev, pff); + /* TODO! validate PLL? */ + } + mutex_unlock(&sensor->mutex); + return rval; + } + + pff->ffmt.width = + clamp_t(uint32_t, pff->ffmt.width, + sensor->sensor_ds->sensor_limits->x_addr_min, + sensor->sensor_ds->sensor_limits->x_addr_max); + pff->ffmt.height = + clamp_t(uint32_t, pff->ffmt.height, + sensor->sensor_ds->sensor_limits->y_addr_min, + sensor->sensor_ds->sensor_limits->y_addr_max); + + crlmodule_get_crop_compose(subdev, crops, NULL); + + crops[ssd->sink_pad]->left = 0; + crops[ssd->sink_pad]->top = 0; + crops[ssd->sink_pad]->width = pff->ffmt.width; + crops[ssd->sink_pad]->height = pff->ffmt.height; + ssd->sink_fmt = *crops[ssd->sink_pad]; + + crlmodule_propagate(subdev, ICI_EXT_SEL_TYPE_CROP); + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_selection( + struct ici_isys_node *node, + struct ici_pad_selection* ps) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + int ret; + + dev_dbg(&client->dev, "%s sd_name: %s sel w: %d, h: %d", + __func__, node->name, ps->rect.width, + ps->rect.height); + + ret = __crlmodule_sel_supported(subdev, ps->pad.pad_idx, + ps->sel_type); + if (ret) { + dev_dbg(&client->dev, + "%s sd_name: %s w: %d, h: %d not supported", + __func__, node->name, ps->rect.width, + ps->rect.height); + return ret; + } + + mutex_lock(&sensor->mutex); + + ps->rect.width = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->x_addr_min, + ps->rect.width); + ps->rect.height = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->y_addr_min, + ps->rect.height); + switch (ps->sel_type) { + case ICI_EXT_SEL_TYPE_CROP: + ret = crlmodule_set_crop(subdev, ps->pad.pad_idx, + &ps->rect); + break; + case ICI_EXT_SEL_TYPE_COMPOSE: + ret = crlmodule_set_compose(subdev, &ps->rect); + break; + default: + ret = -EINVAL; + } + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + return ret; +} + +static int crlmodule_start_streaming(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + const struct crl_pll_configuration *pll; + const struct crl_csi_data_fmt *fmt; + int rval; + + dev_dbg(&client->dev, "%s start streaming pll_idx: %d fmt_idx: %d\n", + __func__, sensor->pll_index, + sensor->fmt_index); + + pll = &sensor->sensor_ds->pll_configs[sensor->pll_index]; + fmt = &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + + crlmodule_update_current_mode(sensor); + + rval = crlmodule_write_regs(sensor, fmt->regs, fmt->regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set format\n", __func__); + return rval; + } + + rval = crlmodule_write_regs(sensor, pll->pll_regs, pll->pll_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return rval; + } + + /* Write mode list */ + rval = crlmodule_write_regs(sensor, + sensor->current_mode->mode_regs, + sensor->current_mode->mode_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return rval; + } + + /* Write stream on list */ + rval = crlmodule_write_regs(sensor, + sensor->sensor_ds->streamon_regs, + sensor->sensor_ds->streamon_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set stream\n", __func__); + return rval; + } + + return 0; +} + +static int crlmodule_stop_streaming(struct crl_sensor *sensor) +{ + return crlmodule_write_regs(sensor, + sensor->sensor_ds->streamoff_regs, + sensor->sensor_ds->streamoff_regs_items); +} + +static int crlmodule_set_stream( + struct ici_isys_node* node, + void* ip, + int enable) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + int rval = 0; + + mutex_lock(&sensor->mutex); + + if (sensor->streaming == enable) + goto out; + + if (enable) { + + if (sensor->msr_list) { + rval = crlmodule_apply_msrlist(client, + sensor->msr_list); + if (rval) + dev_warn(&client->dev, "msrlist write error %d\n", + rval); + } + rval = crlmodule_start_streaming(sensor); + if (!rval) + sensor->streaming = 1; + } else { + rval = crlmodule_stop_streaming(sensor); + sensor->streaming = 0; + } + +out: + mutex_unlock(&sensor->mutex); + + /* SENSOR_IDLE control cannot be set when streaming*/ + __crlmodule_enable_param(sensor, SENSOR_IDLE, enable); + + /* SENSOR_STREAMING controls cannot be set when not streaming */ + __crlmodule_enable_param(sensor, SENSOR_STREAMING, !enable); + + /* SENSOR_POWERED_ON controls does not matter about streaming. */ + __crlmodule_enable_param(sensor, SENSOR_POWERED_ON, false); + + return rval; +} + +static int crlmodule_identify_module( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + unsigned int size = 0; + char *id_string; + char *temp; + int i, ret; + u32 val; + + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) + size += sensor->sensor_ds->id_regs[i].width + 1; + + /* TODO! If no ID! return success? */ + if (!size) + return 0; + + /* Create string variabel to append module ID */ + id_string = kzalloc(size, GFP_KERNEL); + if (!id_string) + return -ENOMEM; + *id_string = '\0'; + + /* Go through each regs in the list and append to id_string */ + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) { + ret = crlmodule_read_reg(sensor, + sensor->sensor_ds->id_regs[i].reg, + &val); + if (ret) + goto out; + + temp = kzalloc(sensor->sensor_ds->id_regs[i].width, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + goto out; + } + snprintf(temp, sensor->sensor_ds->id_regs[i].width, "0x%x ", + val); + strcat(id_string, temp); + snprintf(id_string, sensor->sensor_ds->id_regs[i].width, + "%s 0x%x ", temp, val); + + kfree(temp); + } + + /* TODO! Check here if this module in the supported list + * Ideally the module manufacturer and id should be in platform + * data or ACPI and here the driver should read the value from the + * register and check if this matches to any in the supported + * platform data */ + +out: + dev_dbg(&client->dev, "%s module: %s", __func__, id_string); + kfree(id_string); + if (ret) + dev_err(&client->dev, "sensor detection failed\n"); + return ret; +} + +/* + * This function executes the initialisation routines after the power on + * is successfully completed. Following operations are done + * + * Initiases registers after sensor power up - if any such list is configured + * Ctrl handler framework intialisation + */ +static int crlmodule_run_poweron_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + int rval; + + dev_dbg(&client->dev, "%s set power up registers: %d\n", + __func__, sensor->sensor_ds->powerup_regs_items); + + /* Write the power up registers */ + rval = crlmodule_write_regs(sensor, sensor->sensor_ds->powerup_regs, + sensor->sensor_ds->powerup_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return rval; + } + + /* Are we still initialising...? If yes, return here. */ + if (!sensor->pixel_array) + return 0; + + dev_dbg(&client->dev, "%s init controls", __func__); + + + /* SENSOR_IDLE control can be set only when not streaming*/ + __crlmodule_enable_param(sensor, SENSOR_IDLE, false); + + /* SENSOR_STREAMING controls can be set only when streaming */ + __crlmodule_enable_param(sensor, SENSOR_STREAMING, true); + + /* SENSOR_POWERED_ON controls can be set after power on */ + __crlmodule_enable_param(sensor, SENSOR_POWERED_ON, false); + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + + return rval; +} + + +/* + * This function handles sensor power up routine failure because of any failed + * step in the routine. The index "i" is the index to last successfull power + * sequence entity successfull completed. This function executes the power + * senquence entities in the reverse or with undo value. + */ +static void crlmodule_undo_poweron_entities( + struct crl_sensor *sensor, + int rev_idx) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_power_seq_entity *entity; + int idx; + + for (idx = rev_idx; idx >= 0; idx--) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, + entity->undo_val); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + gpio_set_value(entity->ent_number, entity->undo_val); + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + regulator_disable(entity->regulator_priv); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + clk_disable_unprepare(sensor->xclk); + break; + default: + dev_err(&client->dev, "%s Invalid power type\n", __func__); + break; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } +} + +static int __crlmodule_powerup_sequence(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + struct crl_power_seq_entity *entity; + unsigned idx; + int rval; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, entity->val); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + gpio_set_value(entity->ent_number, entity->val); + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + rval = regulator_enable(entity->regulator_priv); + if (rval) { + dev_err(&client->dev, "Failed to enable regulator: %d\n", + rval); + devm_regulator_put(entity->regulator_priv); + entity->regulator_priv = NULL; + goto error; + } + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + rval = clk_set_rate(sensor->xclk, sensor->platform_data->ext_clk); + if (rval < 0) { + dev_err(&client->dev, + "unable to set clock freq to %u\n", + sensor->platform_data->ext_clk); + goto error; + } + if (clk_get_rate(sensor->xclk) != sensor->platform_data->ext_clk) + dev_warn(&client->dev, + "warning: unable to set accurate clock freq %u\n", + sensor->platform_data->ext_clk); + rval = clk_prepare_enable(sensor->xclk); + if (rval) { + dev_err(&client->dev, "Failed to enable clock: %d\n", rval); + goto error; + } + break; + default: + dev_err(&client->dev, "Invalid power type\n"); + rval = -ENODEV; + goto error; + break; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } + return 0; +error: + dev_err(&client->dev, "Error:Power sequece failed\n"); + if (idx > 0) + crlmodule_undo_poweron_entities(sensor, idx-1); + return rval; +} + +static int crlmodule_set_power( + struct ici_isys_node* node, + int on) +{ + struct ici_ext_subdev *subdev = node->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + int ret = 0; + + pr_err("crlmodule_set_power %d\n", on); + if (on) { + ret = pm_runtime_get_sync(&client->dev); + pr_err("crlmodule_set_power val %d\n", ret); + if (ret < 0) { + pm_runtime_put(&client->dev); + return ret; + } + } + + mutex_lock(&sensor->power_mutex); + if (on && !sensor->power_count) { + usleep_range(2000, 3000); + ret = crlmodule_run_poweron_init(sensor); + if (ret < 0) { + pr_err("crlmodule_set_power err (2) %d\n", ret); + pm_runtime_put(&client->dev); + goto out; + } + } + + /* Update the power count. */ + sensor->power_count += on ? 1 : -1; + WARN_ON(sensor->power_count < 0); + +out: + mutex_unlock(&sensor->power_mutex); + + if (!on) + pm_runtime_put(&client->dev); + + pr_err("crlmodule_set_power ret %d\n", ret); + return ret; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_init_subdevs( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + struct crl_subdev *prev_sd = NULL; + int i = 0; + struct crl_subdev *sd; + int rval = 0; + + dev_dbg(&client->dev, "%s\n", __func__); + + /* + * The scaler, binner and PA order matters. Sensor configuration file + * must maintain this order. PA sub dev is a must and binner and + * scaler can be omitted based on the sensor. But if scaler is present + * it must be the first sd. + */ + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_SCALER) { + sensor->scaler = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_BINNER) { + sensor->binner = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_PIXEL_ARRAY) { + sensor->pixel_array = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + /* CRL MediaCTL IF driver can't handle if none of these sd's present! */ + if (!sensor->ssds_used) { + dev_err(&client->dev, "%s no subdevs present\n", __func__); + return -ENODEV; + } + + if (!sensor->sensor_ds->pll_config_items) { + dev_err(&client->dev, "%s no pll configurations\n", __func__); + return -ENODEV; + } + + /* TODO validate rest of the settings from the sensor definition file */ + + dev_dbg(&client->dev, "%s subdevs: %d\n", __func__, i); + + for (i = 0; i < sensor->ssds_used; i++) { + sd = &sensor->ssds[i]; + + sd->sensor = sensor; + if (sd == sensor->pixel_array) { + sd->npads = 1; + } else { + sd->npads = 2; + sd->source_pad = 1; + } + + sd->sink_fmt.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + sd->sink_fmt.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + sd->compose.width = sd->sink_fmt.width; + sd->compose.height = sd->sink_fmt.height; + sd->crop[sd->source_pad] = sd->compose; + //sd->pads[sd->source_pad].flags = ICI_PAD_FLAGS_SOURCE; + if (sd != sensor->pixel_array) { + sd->crop[sd->sink_pad] = sd->compose; + //sd->pads[sd->sink_pad].flags = ICI_PAD_FLAGS_SINK; + } + + rval = init_ext_sd(client, sd, i); + if (rval) + return rval; + + if (prev_sd == NULL) { + prev_sd = sd; + continue; + } + + if (sensor->reg.create_link) { + rval = sensor->reg.create_link(&sd->sd.node, + sd->source_pad, + &prev_sd->sd.node, + prev_sd->sink_pad, + 0); + if (rval) + return rval; + } + prev_sd = sd; + } + + return rval; +} + +static int __init_power_resources( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + struct crl_power_seq_entity *entity; + unsigned idx; + + sensor->pwr_entity = devm_kzalloc(&client->dev, + sizeof(struct crl_power_seq_entity) * + sensor->sensor_ds->power_items, GFP_KERNEL); + + if (!sensor->pwr_entity) + return -ENOMEM; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) + sensor->pwr_entity[idx] = + sensor->sensor_ds->power_entities[idx]; + + dev_dbg(&client->dev, "%s\n", __func__); + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + int rval; + entity = &sensor->pwr_entity[idx]; + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + if (devm_gpio_request_one(&client->dev, + sensor->platform_data->xshutdown, 0, + "CRL xshutdown") != 0) { + dev_err(&client->dev, "unable to acquire xshutdown %d\n", + sensor->platform_data->xshutdown); + return -ENODEV; + } + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (devm_gpio_request_one(&client->dev, + entity->ent_number, 0, + "CRL Custom") != 0) { + dev_err(&client->dev, "unable to acquire custom gpio %d\n", + entity->ent_number); + return -ENODEV; + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + entity->regulator_priv = devm_regulator_get(&client->dev, + entity->ent_name); + if (IS_ERR(entity->regulator_priv)) { + dev_err(&client->dev, "Failed to get regulator: %s\n", + entity->ent_name); + entity->regulator_priv = NULL; + return -ENODEV; + } + rval = regulator_set_voltage(entity->regulator_priv, + entity->val, + entity->val); + /* Not all regulator supports voltage change */ + if (rval < 0) + dev_info(&client->dev, + "Failed to set voltage %s %d\n", + entity->ent_name, entity->val); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + sensor->xclk = devm_clk_get(&client->dev, NULL); + if (IS_ERR(sensor->xclk)) { + dev_err(&client->dev, "Cannot get sensor clk\n"); + return -ENODEV; + } + break; + default: + dev_err(&client->dev, "Invalid Power item\n"); + return -ENODEV; + } + } + return 0; +} + +static int crlmodule_registered( + struct ici_ext_subdev_register *reg) +{ + struct ici_ext_subdev* subdev = reg->sd; + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + + int rval; + + if (!reg->sd || !reg->setup_node || !reg->create_link) + return -EINVAL; + + rval = __init_power_resources(subdev); + if (rval) + return -ENODEV; + + + /* Power up the sensor */ + if (pm_runtime_get_sync(&client->dev) < 0) { + pm_runtime_put(&client->dev); + return -ENODEV; + } + + /* one time init */ + rval = crlmodule_write_regs(sensor, sensor->sensor_ds->onetime_init_regs, + sensor->sensor_ds->onetime_init_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return -ENODEV; + } + + /* sensor specific init */ + if (sensor->sensor_ds->sensor_init) { + rval = sensor->sensor_ds->sensor_init(client); + + if (rval) { + dev_err(&client->dev, "%s failed to run sensor specific init\n", + __func__); + return -ENODEV; + } + } + /* Identify the module */ + rval = crlmodule_identify_module(subdev); + if (rval) { + rval = -ENODEV; + goto out; + } + + sensor->reg = *reg; + + rval = crlmodule_init_subdevs(subdev); + if (rval) + goto out; + + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + sensor->scale_m = 1; + sensor->flip_info = CRL_FLIP_DEFAULT_NONE; + sensor->ext_ctrl_impacts_pll_selection = false; + sensor->ext_ctrl_impacts_mode_selection = false; + + rval = crlmodule_init_controls(sensor); + if (rval) + goto out; + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + rval = crlmodule_nvm_init(sensor); + +out: + dev_dbg(&client->dev, "%s rval: %d\n", __func__, rval); + /* crlmodule_power_off(sensor); */ + pm_runtime_put(&client->dev); + + return rval; +} + +static void crlmodule_unregistered( + struct ici_ext_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = sensor->src->sd.client; + dev_dbg(&client->dev, "%s\n", __func__); +} + +static int init_ext_sd(struct i2c_client *client, + struct crl_subdev *ssd, int idx) +{ + int rval; + struct ici_ext_subdev* sd = &ssd->sd; + struct crl_sensor *sensor = ssd->sensor; + char name[ICI_MAX_NODE_NAME]; + if (sensor->platform_data->suffix) + snprintf(name, + sizeof(name), "%s %c", + sensor->sensor_ds->subdevs[idx].name, + sensor->platform_data->suffix); + else + snprintf(name, + sizeof(name), "%s", + sensor->sensor_ds->subdevs[idx].name); + + sd->client = client; + sd->num_pads = ssd->npads; + sd->src_pad = ssd->source_pad; + sd->set_param = crlmodule_set_param; + sd->get_param = crlmodule_get_param; + sd->get_menu_item = crlmodule_get_menu_item; + if (sensor->reg.setup_node) { + rval = sensor->reg.setup_node(sensor->reg.ipu_data, + sd, name); + if (rval) + return rval; + } + sd->node.node_set_power = crlmodule_set_power; + sd->node.node_set_streaming = crlmodule_set_stream; + sd->node.node_get_pad_supported_format = + crlmodule_enum_pixelformat; + sd->node.node_set_pad_ffmt = crlmodule_set_format; + sd->node.node_get_pad_ffmt = crlmodule_get_format; + sd->node.node_set_pad_sel = crlmodule_set_selection; + sd->node.node_get_pad_sel = crlmodule_get_selection; + + return 0; +} + +#ifdef CONFIG_PM + +static int crlmodule_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + if (sensor->streaming) + crlmodule_stop_streaming(sensor); + + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + return __crlmodule_powerup_sequence(sensor); +} + +static int crlmodule_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ici_ext_subdev *sd = + i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + int rval; + + rval = __crlmodule_powerup_sequence(sensor); + if (!rval && sensor->power_count) + rval = crlmodule_run_poweron_init(sensor); + if (!rval && sensor->streaming) + rval = crlmodule_start_streaming(sensor); + + return rval; +} + +#else + +#define crlmodule_runtime_suspend NULL +#define crlmodule_runtime_resume NULL +#define crlmodule_suspend NULL +#define crlmodule_resume NULL + +#endif /* CONFIG_PM */ + + +static int crlmodule_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct crl_sensor *sensor; + int ret; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + /* TODO! Create the sensor based on the interface */ + sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); + if (sensor == NULL) + return -ENOMEM; + + sensor->platform_data = client->dev.platform_data; + mutex_init(&sensor->mutex); + mutex_init(&sensor->power_mutex); + + ret = crlmodule_populate_ds(sensor, &client->dev); + if (ret) + return -ENODEV; + + sensor->src = &sensor->ssds[sensor->ssds_used]; + sensor->src->sensor = sensor; + + sensor->src->sd.client = client; + sensor->src->sd.do_register = crlmodule_registered; + sensor->src->sd.do_unregister = crlmodule_unregistered; + i2c_set_clientdata(client, &sensor->src->sd); + + pm_runtime_enable(&client->dev); + + /* Load IQ tuning registers from drvb file*/ + if (sensor->sensor_ds->msr_file_name) { + ret = crlmodule_load_msrlist(client, + sensor->sensor_ds->msr_file_name, + &sensor->msr_list); + if (ret) + dev_warn(&client->dev, + "msrlist loading failed. Ignore, move on\n"); + } else { + /* sensor will still continue streaming */ + dev_warn(&client->dev, "No msrlists associated with sensor\n"); + } + + return 0; +} + +static int crlmodule_remove(struct i2c_client *client) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + unsigned int i; + + if (sensor->sensor_ds->sensor_cleanup) + sensor->sensor_ds->sensor_cleanup(client); + + for (i = 0; i < sensor->ssds_used; i++) { + struct ici_ext_subdev *sd = + &sensor->ssds[i].sd; + if (sd->do_unregister) + sd->do_unregister(sd); + } + + i2c_set_clientdata(client, NULL); + + crlmodule_nvm_deinit(sensor); + crlmodule_release_ds(sensor); + crlmodule_release_msrlist(&sensor->msr_list); + + pm_runtime_disable(&client->dev); + + return 0; +} + + +static const struct i2c_device_id crlmodule_id_table[] = { + { CRLMODULE_LITE_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, crlmodule_id_table); + +static const struct dev_pm_ops crlmodule_pm_ops = { + .runtime_suspend = crlmodule_runtime_suspend, + .runtime_resume = crlmodule_runtime_resume, + .suspend = crlmodule_suspend, + .resume = crlmodule_resume, +}; + +static struct i2c_driver crlmodule_i2c_driver = { + .driver = { + .name = CRLMODULE_LITE_NAME, + .pm = &crlmodule_pm_ops, + }, + .probe = crlmodule_probe, + .remove = crlmodule_remove, + .id_table = crlmodule_id_table, +}; + +module_i2c_driver(crlmodule_i2c_driver); + +MODULE_AUTHOR("Vinod Govindapillai "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_AUTHOR("Tommi Franttila "); +MODULE_DESCRIPTION("Generic driver for common register list based camera sensor modules"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-data.c b/drivers/media/i2c/crlmodule-lite/crlmodule-data.c new file mode 100644 index 000000000000..c22dc1fbf87a --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-data.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "crlmodule.h" +#include "crl_adv7481_cvbs_configuration.h" +#include "crl_adv7481_hdmi_configuration.h" +#include "crl_adv7481_eval_configuration.h" +#include "crl_magna_configuration_ti964.h" + +static const struct crlmodule_sensors supported_sensors[] = { + { "ADV7481 CVBS", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, + { "ADV7481 HDMI", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "ADV7481_EVAL", "adv7481_eval", &adv7481_eval_crl_configuration }, + { "ADV7481B_EVAL", "adv7481b_eval", &adv7481b_eval_crl_configuration }, + { "MAGNA_TI964", "magna_ti964", &magna_ti964_crl_configuration }, + { "i2c-ADV7481A:00", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "i2c-ADV7481B:00", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_sensors); i++) { + /* Check the ACPI supported modules */ + if (!strcmp(dev_name(dev), supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + + /* Check the non ACPI modules */ + if (!strcmp(sensor->platform_data->module_name, + supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + } + + dev_err(dev, "%s No suitable configuration found for %s\n", + __func__, dev_name(dev)); + return -EINVAL; +} + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor) +{ + /* TODO! Revisit this. */ + return 0; +} + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor) +{ + /* + * TODO! Revisit this. + * Place for cleaning all the resources used for the generation + * of CRL data structure. + */ +} + diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.c b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.c new file mode 100644 index 000000000000..c2ad74e59be9 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include "crlmodule-msrlist.h" +#include "crlmodule.h" + +/* + * + * DRVB file is part of the old structure of tagged + * binary container, which is used as such in crlmodule. + * Changes needs to be done in cameralibs to remove the + * tagged structure and convert to untagged drvb format. + * Below are the tagged binary data container structure + * definitions. Most of it is copied from libmsrlisthelper.c + * and some changes done for crlmodule. + * + */ + +static int crlmodule_write_msrlist(struct i2c_client *client, u8 *bufptr, + unsigned int size) +{ + /* + * + * The configuration data contains any number of sequences where + * the first byte (that is, u8) that marks the number of bytes + * in the sequence to follow, is indeed followed by the indicated + * number of bytes of actual data to be written to sensor. + * By convention, the first two bytes of actual data should be + * understood as an address in the sensor address space (hibyte + * followed by lobyte) where the remaining data in the sequence + * will be written. + * + */ + + u8 *ptr = bufptr; + int ret; + + while (ptr < bufptr + size) { + struct i2c_msg msg = { + .addr = client->addr, + .flags = 0, + }; + + msg.len = *ptr++; + msg.buf = ptr; + ptr += msg.len; + + if (ptr > bufptr + size) + return -EINVAL; + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret < 0) { + dev_err(&client->dev, "i2c write error: %d", ret); + return ret; + } + } + return 0; +} + +static int crlmodule_parse_msrlist(struct i2c_client *client, u8 *buffer, + unsigned int size) +{ + u8 *endptr8 = buffer + size; + int ret; + unsigned int dataset = 0; + struct tbd_data_record_header *header = + (struct tbd_data_record_header *)buffer; + + do { + + if ((u8 *)header + sizeof(*header) > endptr8) + return -EINVAL; + + if ((u8 *)header + header->data_offset + + header->data_size > endptr8) + return -EINVAL; + + dataset++; + + if (header->data_size && (header->flags & 1)) { + + ret = crlmodule_write_msrlist(client, + buffer + header->data_offset, + header->data_size); + if (ret) + return ret; + } + header = (struct tbd_data_record_header *)(buffer + + header->next_offset); + } while (header->next_offset); + + return 0; +} + + +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw) +{ + struct tbd_header *header; + struct tbd_record_header *record; + + header = (struct tbd_header *)fw->data; + record = (struct tbd_record_header *)(header + 1); + + if (record->size && record->class_id != TBD_CLASS_DRV_ID) + return -EINVAL; + + return crlmodule_parse_msrlist(client, (u8 *)(record + 1), + record->size); +} + + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw) +{ + + struct tbd_header *header; + struct tbd_record_header *record; + int ret = -ENOENT; + + ret = request_firmware(fw, name, &client->dev); + if (ret) { + dev_err(&client->dev, + "Error %d while requesting firmware %s\n", + ret, name); + return ret; + } + header = (struct tbd_header *)(*fw)->data; + + if (sizeof(*header) > (*fw)->size) + goto out; + + /* Check that we have drvb block. */ + if (memcmp(&header->tag, "DRVB", 4)) + goto out; + + if (header->size != (*fw)->size) + goto out; + + if (sizeof(*header) + sizeof(*record) > (*fw)->size) + goto out; + + + return 0; + +out: + crlmodule_release_msrlist(fw); + return ret; +} + + +void crlmodule_release_msrlist(const struct firmware **fw) +{ + release_firmware(*fw); + *fw = NULL; +} diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.h b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.h new file mode 100644 index 000000000000..2b296c9f9d74 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-msrlist.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_MSRLIST_H__ +#define __CRLMODULE_MSRLIST_H__ + +#define TBD_CLASS_DRV_ID 2 + +struct i2c_client; +struct firmware; + +struct tbd_header { + /* Tag identifier, also checks endianness */ + u32 tag; + /* Container size including this header */ + u32 size; + /* Version, format 0xYYMMDDVV */ + u32 version; + /* Revision, format 0xYYMMDDVV */ + u32 revision; + /* Configuration flag bits set */ + u32 config_bits; + /* Global checksum, header included */ + u32 checksum; +} __packed; + +struct tbd_record_header { + /* Size of record including header */ + u32 size; + /* tbd_format_t enumeration values used */ + u8 format_id; + /* Packing method; 0 = no packing */ + u8 packing_key; + /* tbd_class_t enumeration values used */ + u16 class_id; +} __packed; + +struct tbd_data_record_header { + u16 next_offset; + u16 flags; + u16 data_offset; + u16 data_size; +} __packed; + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw); +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw); +void crlmodule_release_msrlist(const struct firmware **fw); + +#endif /* ifndef __CRLMODULE_MSRLIST_H__ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.c b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.c new file mode 100644 index 000000000000..935a967a525a --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static ssize_t crlmodule_sysfs_nvm_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ici_ext_subdev *subdev = + i2c_get_clientdata(to_i2c_client(dev)); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + memcpy(buf, sensor->nvm_data, sensor->nvm_size); + return sensor->nvm_size; +} + +DEVICE_ATTR(nvm, S_IRUGO, crlmodule_sysfs_nvm_read, NULL); + +static unsigned int crlmodule_get_nvm_size(struct crl_sensor *sensor) +{ + + struct i2c_client *client = sensor->src->sd.client; + unsigned int i, size = 0; + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) + size += sensor->sensor_ds->crl_nvm_info.nvm_config[i].size; + + if (size > PAGE_SIZE) { + dev_err(&client->dev, "nvm size too big\n"); + size = 0; + } + return size; +} + +static int crlmodule_get_nvm_data(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + int i; + int rval = 0; + + u8 *nvm_data = sensor->nvm_data; + + if (sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items) { + dev_dbg(&client->dev, + "%s perform pre-operations\n", __func__); + + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm pre-operations\n"); + return rval; + } + } + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) { + + dev_dbg(&client->dev, + "%s read blob %d dev_addr: 0x%x start_addr: 0x%x size: %d", + __func__, i, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->size); + + crlmodule_block_read(sensor, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_flags + & CRL_NVM_ADDR_MODE_MASK, + sensor->sensor_ds->crl_nvm_info.nvm_config->size, + nvm_data); + + nvm_data += sensor->sensor_ds->crl_nvm_info.nvm_config->size; + sensor->sensor_ds->crl_nvm_info.nvm_config++; + } + + if (sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items) { + dev_dbg(&client->dev, "%s perform post-operations\n", + __func__); + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm post-operations\n"); + return rval; + } + } + return rval; +} + +int crlmodule_nvm_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int size = crlmodule_get_nvm_size(sensor); + int rval; + + if (size) { + sensor->nvm_data = devm_kzalloc(&client->dev, size, GFP_KERNEL); + if (sensor->nvm_data == NULL) { + dev_err(&client->dev, "nvm buf allocation failed\n"); + return -ENOMEM; + } + sensor->nvm_size = size; + + rval = crlmodule_get_nvm_data(sensor); + if (rval) + goto err; + if (device_create_file(&client->dev, &dev_attr_nvm) != 0) { + dev_err(&client->dev, "sysfs nvm entry failed\n"); + rval = -EBUSY; + goto err; + } + } + + return 0; +err: + sensor->nvm_size = 0; + return rval; +} + +void crlmodule_nvm_deinit(struct crl_sensor *sensor) +{ + struct i2c_client *client = sensor->src->sd.client; + + if (sensor->nvm_size) { + device_remove_file(&client->dev, &dev_attr_nvm); + sensor->nvm_size = 0; + } +} diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.h b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.h new file mode 100644 index 000000000000..9cbabfa950bd --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-nvm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_NVM_H_ +#define __CRLMODULE_NVM_H_ + +#include "crlmodule.h" + +#define CRL_NVM_ADDR_MODE_8BIT 0x00000001 +#define CRL_NVM_ADDR_MODE_16BIT 0x00000002 + +#define CRL_NVM_ADDR_MODE_MASK (CRL_NVM_ADDR_MODE_8BIT | \ + CRL_NVM_ADDR_MODE_16BIT) + + +int crlmodule_nvm_init(struct crl_sensor *sensor); +void crlmodule_nvm_deinit(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_NVM_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-regs.c b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.c new file mode 100644 index 000000000000..d7b6d0181410 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static int crlmodule_i2c_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 *val) +{ + struct i2c_client *client = sensor->src->sd.client; + struct i2c_msg msg[2]; + unsigned char data[4]; + int r; + + dev_dbg(&client->dev, "%s reg, len: [0x%04x, %d]", __func__, reg, len); + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg[0].addr = client->addr; + else + msg[0].addr = dev_i2c_addr; + + msg[1].addr = msg[0].addr; + + msg[0].flags = 0; + msg[0].buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) { + msg[0].addr = msg[0].addr>>1; + msg[1].addr = msg[1].addr>>1; + } + + if ((sensor->sensor_ds->addr_len == CRL_ADDR_8BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT)) { + data[0] = (u8) (reg & 0xff); + msg[0].len = 1; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg[0].len = 2; + } + + msg[1].flags = I2C_M_RD; + msg[1].buf = data; + msg[1].len = len; + + r = i2c_transfer(client->adapter, msg, 2); + + if (r < 0) { + goto err; + } + + *val = 0; + /* high byte comes first */ + switch (len) { + case CRL_REG_LEN_32BIT: + *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + + data[3]; + break; + case CRL_REG_LEN_24BIT: + *val = (data[0] << 16) + (data[1] << 8) + data[2]; + break; + case CRL_REG_LEN_16BIT: + *val = (data[0] << 8) + data[1]; + break; + case CRL_REG_LEN_08BIT: + *val = data[0]; + break; + } + + return 0; + +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r); + + return r; +} + +static int crlmodule_i2c_write(struct crl_sensor *sensor, u16 dev_i2c_addr, + u16 reg, u8 len, u32 val) +{ + struct i2c_client *client = sensor->src->sd.client; + struct i2c_msg msg; + unsigned char data[6]; + unsigned int retries; + int r; + unsigned char *data_offset; + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg.addr = client->addr; + else + msg.addr = dev_i2c_addr; + + msg.flags = 0; /* Write */ + msg.buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) + msg.addr = msg.addr>>1; + if ((sensor->sensor_ds->addr_len == CRL_ADDR_8BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT)) { + data[0] = (u8) (reg & 0xff); + msg.len = 1 + len; + data_offset = &data[1]; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg.len = 2 + len; + data_offset = &data[2]; + } + + dev_dbg(&client->dev, "%s len reg, val: [%d, 0x%04x, 0x%04x]", + __func__, len, reg, val); + + switch (len) { + case CRL_REG_LEN_08BIT: + data_offset[0] = val; + break; + case CRL_REG_LEN_16BIT: + data_offset[0] = val >> 8; + data_offset[1] = val; + break; + case CRL_REG_LEN_24BIT: + data_offset[0] = val >> 16; + data_offset[1] = val >> 8; + data_offset[2] = val; + break; + case CRL_REG_LEN_32BIT: + data_offset[0] = val >> 24; + data_offset[1] = val >> 16; + data_offset[2] = val >> 8; + data_offset[3] = val; + break; + } + + for (retries = 0; retries < 5; retries++) { + /* + * Due to unknown reason sensor stops responding. This + * loop is a temporaty solution until the root cause + * is found. + */ + r = i2c_transfer(client->adapter, &msg, 1); + if (r == 1) { + if (retries) + dev_err(&client->dev, + "sensor i2c stall encountered. retries: %d\n", + retries); + return 0; + } + + usleep_range(2000, 2000); + } + + dev_err(&client->dev, + "wrote 0x%x to offset 0x%x error %d\n", val, reg, r); + + return r; +} + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val) +{ + return crlmodule_i2c_read(sensor, reg.dev_i2c_addr, reg.address, + reg.len, val); +} + +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len) +{ + struct i2c_client *client = sensor->src->sd.client; + unsigned int i; + int ret; + u32 val; + + for (i = 0; i < len; i++) { + /* + * Sensor setting sequence may need some delay. + * delay value is specified by reg.val field + */ + if (regs[i].len == CRL_REG_LEN_DELAY) { + msleep(regs[i].val); + continue; + } + /* + * If the same register is being used for two settings, updating + * one value should not overwrite the other one. Such registers + * must be marked as CRL_REG_READ_AND_UPDATE. For such registers + * first read the register and update it + */ + val = regs[i].val; + if (regs[i].len & CRL_REG_READ_AND_UPDATE) { + ret = crlmodule_i2c_read(sensor, regs[i].dev_i2c_addr, + regs[i].address, + regs[i].len & CRL_REG_LEN_READ_MASK, &val); + if (ret) + return ret; + val |= regs[i].val; + } + + ret = crlmodule_i2c_write(sensor, regs[i].dev_i2c_addr, + regs[i].address, + regs[i].len & CRL_REG_LEN_READ_MASK, + val); + if (ret < 0) { + dev_err(&client->dev, + "error %d writing reg 0x%4.4x, val 0x%2.2x", + ret, regs[i].address, regs[i].val); + return ret; + } + }; + + return 0; +} + +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val) +{ + struct i2c_client *client = sensor->src->sd.client; + int ret; + u32 val2; + + /* + * Sensor setting sequence may need some delay. + * delay value is specified by reg.val field + */ + if (len == CRL_REG_LEN_DELAY) { + msleep(val); + return 0; + } + + /* + * If the same register is being used for two settings, updating + * one value should not overwrite the other one. Such registers + * must be marked as CRL_REG_READ_AND_UPDATE. For such registers + * first read the register and update it + */ + if (len & CRL_REG_READ_AND_UPDATE) { + u32 tmp; + + ret = crlmodule_i2c_read(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, &val2); + if (ret) + return ret; + + tmp = val2 & ~mask; + tmp |= val & mask; + val = tmp; + } else { + val &= mask; + } + + ret = crlmodule_i2c_write(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, val); + if (ret < 0) { + dev_err(&client->dev, + "error %d writing reg 0x%4.4x, val 0x%2.2x", + ret, reg, val); + return ret; + } + + return 0; +} + +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf) +{ + struct i2c_client *client = sensor->src->sd.client; + struct i2c_msg msg[2]; + u8 data[2]; + u16 offset = 0; + int r; + + memset(msg, 0, sizeof(msg)); + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) { + msg[0].addr = client->addr; + msg[1].addr = client->addr; + } else { + msg[0].addr = dev_i2c_addr; + msg[1].addr = dev_i2c_addr; + } + + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) + msg[0].len = 1; + else if (addr_mode & CRL_NVM_ADDR_MODE_16BIT) + msg[0].len = 2; + else + return -EINVAL; + + msg[0].flags = 0; + msg[1].flags = I2C_M_RD; + + while (offset < len) { + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) { + data[0] = addr & 0xff; + } else { + data[0] = (addr >> 8) & 0xff; + data[1] = addr & 0xff; + } + + msg[0].buf = data; + msg[1].len = min(CRLMODULE_I2C_BLOCK_SIZE, len - offset); + msg[1].buf = &buf[offset]; + r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (r != ARRAY_SIZE(msg)) { + if (r >= 0) + r = -EIO; + goto err; + } + addr += msg[1].len; + offset += msg[1].len; + } + return 0; +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r); + return r; +} diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-regs.h b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.h new file mode 100644 index 000000000000..45341a16025d --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-regs.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_REGS_H_ +#define __CRLMODULE_REGS_H_ + +struct crl_sensor; +struct crl_register_read_rep; +struct crl_register_write_rep; + +#define CRLMODULE_I2C_BLOCK_SIZE 0x20 + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val); +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len); +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val); +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf); + +#endif /* __CRLMODULE_REGS_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule-sensor-ds.h b/drivers/media/i2c/crlmodule-lite/crlmodule-sensor-ds.h new file mode 100644 index 000000000000..b2021ae1bb40 --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule-sensor-ds.h @@ -0,0 +1,552 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_SENSOR_DS_H_ +#define __CRLMODULE_SENSOR_DS_H_ + +#include "crlmodule.h" + +#define CRL_SUBDEVS 3 + +/* Index for subdevs in any structure with multiple SDs */ +#define CRL_SD_PA_INDEX 0 +#define CRL_SD_BINNER_INDEX 1 +#define CRL_SD_SCALER_INDEX 2 + +#define CRL_REG_LEN_08BIT 1 +#define CRL_REG_LEN_16BIT 2 +#define CRL_REG_LEN_24BIT 3 +#define CRL_REG_LEN_32BIT 4 + +#define CRL_REG_READ_AND_UPDATE (1 << 3) +#define CRL_REG_LEN_READ_MASK 0x07 +#define CRL_REG_LEN_DELAY 0x10 + +#define CRL_FLIP_DEFAULT_NONE 0 +#define CRL_FLIP_HFLIP 1 +#define CRL_FLIP_VFLIP 2 +#define CRL_FLIP_HFLIP_VFLIP 3 + +#define CRL_FLIP_HFLIP_MASK 0xfe +#define CRL_FLIP_VFLIP_MASK 0xfd + +#define CRL_PIXEL_ORDER_GRBG 0 +#define CRL_PIXEL_ORDER_RGGB 1 +#define CRL_PIXEL_ORDER_BGGR 2 +#define CRL_PIXEL_ORDER_GBRG 3 +#define CRL_PIXEL_ORDER_IGNORE 255 + +/* Flag to notify configuration selction imact from Ctrls */ +#define CRL_IMPACTS_NO_IMPACT 0 +#define CRL_IMPACTS_PLL_SELECTION (1 << 1) +#define CRL_IMPACTS_MODE_SELECTION (1 << 2) + +/* + * In crl_dynamic_entity::entity_type is denoted by bits 6 and 7 + * 0 -> crl_dynamic_entity:entity_value is a constant + * 1 -> crl_dynamic_entity:entity_value is a referene to variable + * 2 -> crl_dynamic_entity:entity_value is a ctrl value + * 3 -> crl_dynamic_entity:entity_value is a 8 bit register address + */ +enum crl_dynamic_entity_type { + CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST = 0, + CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, /* Only 8bit registers */ +}; + +/* + * For some combo device which has some devices inside itself with different + * i2c address, adding flag to specify whether current device needs i2c + * address override. + * For back-compatibility, making flag equals 0. So existing sensor configure + * doesn't need to be modified. + */ +#define CRL_I2C_ADDRESS_NO_OVERRIDE 0 + +struct crl_sensor; +struct i2c_client; + +enum crl_subdev_type { + CRL_SUBDEV_TYPE_SCALER, + CRL_SUBDEV_TYPE_BINNER, + CRL_SUBDEV_TYPE_PIXEL_ARRAY, +}; + +enum crl_ctrl_op_type { + CRL_CTRL_SET_OP, + CRL_CTRL_GET_OP, +}; + +enum crl_ctrl_update_context { + SENSOR_IDLE, /* Powered on. But not streamind */ + SENSOR_STREAMING, /* Sensor streaming */ + SENSOR_POWERED_ON, /* streaming or idle */ +}; + +enum crl_operators { + CRL_BITWISE_AND = 0, + CRL_BITWISE_OR, + CRL_BITWISE_LSHIFT, + CRL_BITWISE_RSHIFT, + CRL_BITWISE_XOR, + CRL_BITWISE_COMPLEMENT, + CRL_ADD, + CRL_SUBTRACT, + CRL_MULTIPLY, + CRL_DIV, + CRL_ASSIGNMENT, +}; + +/* Replicated from videodev2.h */ +enum crl_ctrl_type { + CRL_CTRL_TYPE_INTEGER = 1, + CRL_CTRL_TYPE_BOOLEAN, + CRL_CTRL_TYPE_MENU_INT, + CRL_CTRL_TYPE_MENU_ITEMS, + CRL_CTRL_TYPE_BUTTON, + CRL_CTRL_TYPE_INTEGER64, + CRL_CTRL_TYPE_CTRL_CLASS, + CRL_CTRL_TYPE_CUSTOM, +}; + +enum crl_addr_len { + CRL_ADDR_16BIT = 0, + CRL_ADDR_8BIT, + CRL_ADDR_7BIT, +}; + +enum crl_operands { + CRL_CONSTANT = 0, + CRL_VARIABLE, + CRL_CONTROL, +}; + +/* References to the CRL driver member variables */ +enum crl_member_data_reference_ids { + CRL_VAR_REF_OUTPUT_WIDTH = 1, + CRL_VAR_REF_OUTPUT_HEIGHT, + CRL_VAR_REF_PA_CROP_WIDTH, + CRL_VAR_REF_PA_CROP_HEIGHT, + CRL_VAR_REF_FRAME_TIMING_WIDTH, + CRL_VAR_REF_FRAME_TIMING_HEIGHT, + CRL_VAR_REF_BINNER_WIDTH, + CRL_VAR_REF_BINNER_HEIGHT, + CRL_VAR_REF_H_BINN_FACTOR, + CRL_VAR_REF_V_BINN_FACTOR, + CRL_VAR_REF_SCALE_FACTOR, + CRL_VAR_REF_BITSPERPIXEL, + CRL_VAR_REF_PIXELRATE_PA, + CRL_VAR_REF_PIXELRATE_CSI, + CRL_VAR_REF_PIXELRATE_LINK_FREQ, +}; + +enum crl_frame_desc_type { + CRL_MBUS_FRAME_DESC_TYPE_PLATFORM, + CRL_MBUS_FRAME_DESC_TYPE_PARALLEL, + CRL_MBUS_FRAME_DESC_TYPE_CCP2, + CRL_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +enum crl_pwr_ent_type { + CRL_POWER_ETY_GPIO_FROM_PDATA = 1, + CRL_POWER_ETY_GPIO_CUSTOM, + CRL_POWER_ETY_REGULATOR_FRAMEWORK, + CRL_POWER_ETY_CLK_FRAMEWORK, +}; + +struct crl_dynamic_entity { + enum crl_dynamic_entity_type entity_type; + u32 entity_val; +}; + +struct crl_arithmetic_ops { + enum crl_operators op; + struct crl_dynamic_entity operand; +}; + +struct crl_dynamic_calculated_entity { + u8 ops_items; + struct crl_arithmetic_ops *ops; +}; + +struct crl_register_write_rep { + u16 address; + u8 len; + u32 val; + u16 dev_i2c_addr; +}; + +struct crl_register_read_rep { + u16 address; + u8 len; + u32 mask; + u16 dev_i2c_addr; +}; + +/* + * crl_dynamic_register_access is used mainly in the ctrl context. + * This is intended to provide some generic arithmetic operations on the values + * to be written to a control's register or on the values read from a register. + * These arithmetic operations are controlled using struct crl_arithmetic_ops. + * + * One important information is that this structure behave differently for the + * set controls and volatile get controls. + * + * For the set control operation, the usage of the members are straight forward. + * The set control can result into multiple register write operations. Hence + * there can be more than one crl_dynamic_register_access entries associated + * with a control which results into separate register writes. + * + * But for the volatile get control operation, where a control is used + * to query read only information from the sensor, there could be only one + * crl_dynamic_register_access entry. Because the result of a get control is + * a single value. crl_dynamic_register_access.address, len and mask values are + * not used in volatile get control context. Instead all the needed information + * must be encoded into member -> ops (struct crl_arithmetic_ops) + */ +struct crl_dynamic_register_access { + u16 address; + u8 len; + u32 mask; + u8 ops_items; + struct crl_arithmetic_ops *ops; + u16 dev_i2c_addr; +}; + +struct crl_sensor_detect_config { + struct crl_register_read_rep reg; /* Register to read */ + unsigned int width; /* width of the value in chars*/ +}; + +struct crl_sensor_subdev_config { + enum crl_subdev_type subdev_type; + char name[32]; +}; + +enum crl_ctrl_flag { + CRL_CTRL_FLAG_UPDATE = 1, + CRL_CTRL_FLAG_READ_ONLY = 2, + CRL_CTRL_FLAG_WRITE_ONLY = 4, +}; + +/* + * The ctrl id value pair which should be compared when selecting a + * configuration. This gives flexibility to provide any data through set ctrl + * and provide selection mechanism for a particular configuration + */ +struct crl_ctrl_data_pair { + u32 ctrl_id; + u32 data; +}; + +enum crl_dep_ctrl_action_type { + CRL_DEP_CTRL_ACTION_TYPE_SELF = 0, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL, +}; + +enum crl_dep_ctrl_condition { + CRL_DEP_CTRL_CONDITION_GREATER = 0, + CRL_DEP_CTRL_CONDITION_LESSER, + CRL_DEP_CTRL_CONDITION_EQUAL, +}; + +enum crl_dep_ctrl_action { + CRL_DEP_CTRL_CONDITION_ADD = 0, + CRL_DEP_CTRL_CONDITION_SUBTRACT, + CRL_DEP_CTRL_CONDITION_MULTIPLY, + CRL_DEP_CTRL_CONDITION_DIVIDE, +}; + +struct crl_dep_ctrl_cond_action { + enum crl_dep_ctrl_condition cond; + u32 cond_value; + enum crl_dep_ctrl_action action; + u32 action_value; +}; + +/* Dependency control provision */ +struct crl_dep_ctrl_provision { + u32 ctrl_id; + enum crl_dep_ctrl_action_type action_type; + unsigned int action_items; + struct crl_dep_ctrl_cond_action *action; +}; + +struct crl_sensor_limits { + unsigned int x_addr_max; + unsigned int y_addr_max; + unsigned int x_addr_min; + unsigned int y_addr_min; + unsigned int min_frame_length_lines; + unsigned int max_frame_length_lines; + unsigned int min_line_length_pixels; + unsigned int max_line_length_pixels; + u8 scaler_m_min; + u8 scaler_m_max; + u8 scaler_n_min; + u8 scaler_n_max; + u8 min_even_inc; + u8 max_even_inc; + u8 min_odd_inc; + u8 max_odd_inc; +}; + +struct crl_ctrl_data_std { + s64 min; + s64 max; + u64 step; + s64 def; +}; + +struct crl_ctrl_data_menu_items { + const char *const *menu; + unsigned int size; +}; + +struct crl_ctrl_data_int_menu { + const s64 *menu; + s64 max; + s64 def; +}; + +union crl_ctrl_data_types { + struct crl_ctrl_data_std std_data; + struct crl_ctrl_data_menu_items menu_items; + struct crl_ctrl_data_int_menu int_menu; +}; + +/* + * Please note a difference in the usage of "regs" member in case of a + * volatile get control for read only purpose. Please check the + * "struct crl_dynamic_register_access" declaration comments for more details. + * + * Read only controls must have "flags" CRL_CTRL_FLAG_READ_ONLY set. + */ +struct crl_ctrl_data { + enum crl_subdev_type sd_type; + enum crl_ctrl_op_type op_type; + enum crl_ctrl_update_context context; + char name[32]; + u32 ctrl_id; + enum crl_ctrl_type type; + union crl_ctrl_data_types data; + unsigned long flags; + u32 impact; /* If this control impact any config selection */ + struct ici_ext_sd_param param; + bool enabled; + unsigned int regs_items; + struct crl_dynamic_register_access *regs; + unsigned int dep_items; + struct crl_dep_ctrl_provision *dep_ctrls; + s64 min; + s64 max; + u64 step; + s64 def; +}; + +struct crl_pll_configuration { + s64 input_clk; + s64 op_sys_clk; + u8 bitsperpixel; + u32 pixel_rate_csi; + u32 pixel_rate_pa; + u8 csi_lanes; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int pll_regs_items; + const struct crl_register_write_rep *pll_regs; +}; + +struct crl_subdev_rect_rep { + enum crl_subdev_type subdev_type; + struct ici_rect in_rect; + struct ici_rect out_rect; +}; + +struct crl_mode_rep { + unsigned int sd_rects_items; + const struct crl_subdev_rect_rep *sd_rects; + u8 binn_hor; + u8 binn_vert; + u8 scale_m; + s32 width; + s32 height; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int mode_regs_items; + const struct crl_register_write_rep *mode_regs; + + /* + * Minimum and maximum value for line length pixels and frame length + * lines are added for modes. This facilitates easy handling of + * modes which binning skipping and affects the calculation of vblank and + * hblank values. + * + * The blank values are limited based on the following logic + * + * If mode specific limits are available + * vblank = clamp(min_llp - PA_width, max_llp - PA_width) + * hblank = clamp(min_fll - PA_Height, max_fll - PA_Height + * + * If mode specific blanking limits are not available, then the sensor + * limits will be used in the same manner. + * + * If sensor mode limits are not available, then the values will be + * written directly to the associated control registers. + */ + s32 min_llp; /* minimum/maximum value for line length pixels */ + s32 max_llp; + s32 min_fll; + s32 max_fll; /* minimum/maximum value for frame length lines */ +}; + +struct crl_csi_data_fmt { + u32 code; + u8 pixel_order; + u8 bits_per_pixel; + unsigned int regs_items; + const struct crl_register_write_rep *regs; +}; + +struct crl_flip_data { + u8 flip; + u8 pixel_order; +}; + +struct crl_power_seq_entity { + enum crl_pwr_ent_type type; + char ent_name[12]; + int ent_number; + u16 address; + unsigned int val; + unsigned int undo_val; /* Undo value if any previous step failed */ + unsigned int delay; /* delay in micro seconds */ + struct regulator *regulator_priv; /* R/W */ +}; + +struct crl_nvm_blob { + u8 dev_addr; + u16 start_addr; + u16 size; +}; + +struct crl_nvm { + unsigned int nvm_preop_regs_items; + const struct crl_register_write_rep *nvm_preop_regs; + + unsigned int nvm_postop_regs_items; + const struct crl_register_write_rep *nvm_postop_regs; + + unsigned int nvm_blobs_items; + struct crl_nvm_blob *nvm_config; + u32 nvm_flags; +}; + +/* Representation for v4l2_mbus_frame_desc_entry */ +struct crl_frame_desc { + struct crl_dynamic_entity flags; + struct crl_dynamic_entity bpp; + struct crl_dynamic_entity pixelcode; + struct crl_dynamic_entity start_line; + struct crl_dynamic_entity start_pixel; + struct crl_dynamic_calculated_entity width; + struct crl_dynamic_calculated_entity height; + struct crl_dynamic_entity length; + struct crl_dynamic_entity csi2_channel; + struct crl_dynamic_entity csi2_data_type; +}; + +typedef int (*sensor_specific_init)(struct i2c_client*); +typedef int (*sensor_specific_cleanup)(struct i2c_client*); + +struct crl_sensor_configuration { + + const struct crl_clock_entity *clock_entity; + + const unsigned int power_items; + const struct crl_power_seq_entity *power_entities; + const unsigned int power_delay; /* in micro seconds */ + + const unsigned int onetime_init_regs_items; + const struct crl_register_write_rep *onetime_init_regs; + + const unsigned int powerup_regs_items; + const struct crl_register_write_rep *powerup_regs; + + const unsigned int poweroff_regs_items; + const struct crl_register_write_rep *poweroff_regs; + + const unsigned int id_reg_items; + const struct crl_sensor_detect_config *id_regs; + + const unsigned int subdev_items; + const struct crl_sensor_subdev_config *subdevs; + + const struct crl_sensor_limits *sensor_limits; + + const unsigned int pll_config_items; + const struct crl_pll_configuration *pll_configs; + const s64 *op_sys_clk; + + const unsigned int modes_items; + const struct crl_mode_rep *modes; + /* + * Fail safe mode should be the largest resolution available in the + * mode list. If none of the mode parameters are matched, the driver + * will select this mode for streaming. + */ + const unsigned int fail_safe_mode_index; + + const unsigned int streamon_regs_items; + const struct crl_register_write_rep *streamon_regs; + + const unsigned int streamoff_regs_items; + const struct crl_register_write_rep *streamoff_regs; + + const unsigned int ctrl_items; + const struct crl_ctrl_data *ctrl_bank; + + const unsigned int csi_fmts_items; + const struct crl_csi_data_fmt *csi_fmts; + + const unsigned int flip_items; + const struct crl_flip_data *flip_data; + + struct crl_nvm crl_nvm_info; + + enum crl_addr_len addr_len; + + unsigned int frame_desc_entries; + enum crl_frame_desc_type frame_desc_type; + struct crl_frame_desc *frame_desc; + char *msr_file_name; + + sensor_specific_init sensor_init; + sensor_specific_cleanup sensor_cleanup; +}; + +struct crlmodule_sensors { + char *pname; + char *name; + struct crl_sensor_configuration *ds; +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev); + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor); + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_SENSOR_DS_H_ */ diff --git a/drivers/media/i2c/crlmodule-lite/crlmodule.h b/drivers/media/i2c/crlmodule-lite/crlmodule.h new file mode 100644 index 000000000000..f522409cb22e --- /dev/null +++ b/drivers/media/i2c/crlmodule-lite/crlmodule.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __CRLMODULE_PRIV_H_ +#define __CRLMODULE_PRIV_H_ + +#include +#include +#include +#include +#include "../../../../include/media/crlmodule-lite.h" +#include +#include "crlmodule-sensor-ds.h" + +#define CRL_SUBDEVS 3 + +#define CRL_PA_PAD_SRC 0 +#define CRL_PAD_SINK 0 +#define CRL_PAD_SRC 1 +#define CRL_PADS 2 + +struct crl_subdev { + struct ici_ext_subdev sd; + struct ici_rect sink_fmt; + struct ici_rect crop[2]; + struct ici_rect compose; /* compose on sink */ + unsigned short sink_pad; + unsigned short source_pad; + int npads; + struct crl_sensor *sensor; + unsigned int field; +}; + +struct crl_sensor { + /* + * "mutex" is used to serialise access to all fields here + * except ctrls at the end of the struct. "mutex" is also + * used to serialise access to file handle specific + * information. The exception to this rule is the power_mutex + * below. + */ + struct mutex mutex; + /* + * power mutex became necessity because of the v4l2_ctrl_handler_setup + * is being called from power on function which needs to be serialised + * but v4l2_ctrl_handler setup uses "mutex" so it cannot be used. + */ + struct mutex power_mutex; + + struct crl_subdev ssds[CRL_SUBDEVS]; + u32 ssds_used; + struct crl_subdev *src; + struct crl_subdev *binner; + struct crl_subdev *scaler; + struct crl_subdev *pixel_array; + + struct crlmodule_lite_platform_data *platform_data; + + u8 binning_horizontal; + u8 binning_vertical; + + u8 sensor_mode; + u8 scale_m; + u8 fmt_index; + u8 flip_info; + u8 pll_index; + + + int power_count; + + bool streaming; + + struct crl_sensor_configuration *sensor_ds; + struct crl_ctrl_data *ctrl_bank; + + /* These are mandatory controls. So good to have reference to these */ + struct crl_ctrl_data *pixel_rate_pa; + struct crl_ctrl_data *link_freq; + struct crl_ctrl_data *pixel_rate_csi; + + s64 *link_freq_menu; + + /* If extra v4l2 contrl has an impact on PLL selection */ + bool ext_ctrl_impacts_pll_selection; + bool ext_ctrl_impacts_mode_selection; + bool blanking_ctrl_not_use; + bool direct_mode_in_use; + const struct crl_mode_rep *current_mode; + + struct clk *xclk; + struct crl_power_seq_entity *pwr_entity; + + u8 *nvm_data; + u16 nvm_size; + + /* Pointer to binary file which contains + * tunable IQ parameters like NR, DPC, BLC + * Not all MSR's are moved to the binary + * at the moment. + */ + const struct firmware *msr_list; + + struct ici_ext_subdev_register reg; +}; + +#define to_crlmodule_subdev(_sd) \ + container_of(_sd, struct crl_subdev, sd) + +#define to_crlmodule_sensor(_sd) \ + (to_crlmodule_subdev(_sd)->sensor) + +#endif /* __CRLMODULE_PRIV_H_ */ diff --git a/drivers/media/i2c/crlmodule/Kconfig b/drivers/media/i2c/crlmodule/Kconfig new file mode 100755 index 000000000000..13ad4307cc17 --- /dev/null +++ b/drivers/media/i2c/crlmodule/Kconfig @@ -0,0 +1,12 @@ +config VIDEO_CRLMODULE + tristate "CRL Module sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + ---help--- + This is a generic driver for CRL based camera modules. +config CRLMODULE_RD_NVM_TO_VCM + bool "CRL Module sensor pass value to vcm" + depends on VIDEO_CRLMODULE + ---help--- + This is a driver for sensors that pass nvm value to + control vcm. diff --git a/drivers/media/i2c/crlmodule/Makefile b/drivers/media/i2c/crlmodule/Makefile new file mode 100644 index 000000000000..c3a1fed9c6bb --- /dev/null +++ b/drivers/media/i2c/crlmodule/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +# force check the compile warning to make sure zero warnings +# note we may have build issue when gcc upgraded. +ccflags-y := -Wall -Wextra +ccflags-y += $(call cc-disable-warning, unused-parameter) +ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +ccflags-y += $(call cc-disable-warning, missing-field-initializers) +ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror + +crlmodule-objs += crlmodule-core.o crlmodule-data.o \ + crlmodule-regs.o crlmodule-nvm.o \ + crl_adv7481_hdmi_configuration.o \ + crlmodule-msrlist.o +obj-$(CONFIG_VIDEO_CRLMODULE) += crlmodule.o + +ccflags-y += -Idrivers/media/i2c diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_configuration.h new file mode 100644 index 000000000000..9cf6e37074a8 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_configuration.h @@ -0,0 +1,706 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Jianxu Zheng + * + */ + +#ifndef __CRLMODULE_ADV7481_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_powerup_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* SW reset */ + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, /* Delay 5ms */ + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI recommended setting */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* I2C Rd Auto-Increment=1 */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Addr */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Addr */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address Set */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address Set */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address */ + {0x00, CRL_REG_LEN_08BIT, 0x50, 0xE0}, /* Disable Chip Powerdown & + HDMI Rx Block */ + {0x40, CRL_REG_LEN_08BIT, 0x83, 0x64}, /* Enable HDCP 1.1 */ + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x3E, CRL_REG_LEN_08BIT, 0x69, 0x68}, /* ADI recommended setting */ + {0x3F, CRL_REG_LEN_08BIT, 0x46, 0x68}, /* ADI recommended setting */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI recommended setting */ + {0x4F, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* ADI recommended setting */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI recommended setting */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI recommended setting */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI recommended setting */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminations */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI recommended setting */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed = + Fastest Smallest Step Size */ + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ + + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* No MIPI frame start */ + {0x26, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable sleep mode */ + {0x27, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* Disable escape mode */ + {0x7E, CRL_REG_LEN_08BIT, 0xA0, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, /* ADI recommended setting */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI recommended setting */ + {0x34, CRL_REG_LEN_08BIT, 0x55, 0x94}, /* ADI recommended setting */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI recommended setting */ + {0xCA, CRL_REG_LEN_08BIT, 0x02, 0x94}, /* ADI recommended setting */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI recommended setting */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI recommended setting */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Power up DPHY */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI recommended setting */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, /* Select Resolution 1080P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080P shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080P shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080P shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_720p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, /* Select Resolution 720P */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 720P shift left 40 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 720P shift left 40 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD8, 0x44}, /* 720P shift left 40 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_VGA[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, /* Select Resolution VGA */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_1080i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, /* Select Resolution 1080i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x8B, CRL_REG_LEN_08BIT, 0x43, 0x44}, /* 1080i shift left 44 pixel */ + {0x8C, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + {0x8B, CRL_REG_LEN_08BIT, 0x4F, 0x44}, /* 1080i shift left 44 pixel */ + {0x8D, CRL_REG_LEN_08BIT, 0xD4, 0x44}, /* 1080i shift left 44 pixel */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_480i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Select Resolution 480i */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576p[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, /* Select Resolution 576p*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_mode_576i[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, /* Select Resolution 576i*/ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* ADI recommended setting */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI recommended setting */ + + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS TRISTATED */ + {0x10, CRL_REG_LEN_08BIT, 0xA0, 0xE0}, /* Enable 4-lane CSI Tx & Pixel Port */ + {0x1C, CRL_REG_LEN_08BIT, 0x3A, 0xE0}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_mipi_pll_en - 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_sensor_detect_config adv7481_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration adv7481_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + +}; + +static struct crl_subdev_rect_rep adv7481_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080p_rects), + .sd_rects = adv7481_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .mode_regs = adv7481_mode_1080p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_720p_rects), + .sd_rects = adv7481_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_720p), + .mode_regs = adv7481_mode_720p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_VGA_rects), + .sd_rects = adv7481_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_VGA), + .mode_regs = adv7481_mode_VGA, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_1080i_rects), + .sd_rects = adv7481_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_1080i), + .mode_regs = adv7481_mode_1080i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_480i_rects), + .sd_rects = adv7481_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_480i), + .mode_regs = adv7481_mode_480i, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576p_rects), + .sd_rects = adv7481_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(adv7481_mode_576p), + .mode_regs = adv7481_mode_576p, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_576i_rects), + .sd_rects = adv7481_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = adv7481_mode_576i, + }, +}; + +static struct crl_sensor_subdev_config adv7481_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_mode_1080p), + .regs = adv7481_mode_1080p, /* default yuv422 format */ + }, +}; + +static struct crl_v4l2_ctrl adv7481_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity adv7481_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +static struct crl_sensor_configuration adv7481_crl_configuration = { + + .power_items = ARRAY_SIZE(adv7481_power_items), + .power_entities = adv7481_power_items, + + .powerup_regs_items = ARRAY_SIZE(adv7481_powerup_regset), + .powerup_regs = adv7481_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .poweroff_regs = adv7481_streamoff_regs, + + .id_reg_items = ARRAY_SIZE(adv7481_sensor_detect_regset), + .id_regs = adv7481_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(adv7481_sensor_subdevs), + .subdevs = adv7481_sensor_subdevs, + + .sensor_limits = &adv7481_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_pll_configurations), + .pll_configs = adv7481_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_modes), + .modes = adv7481_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_streamon_regs), + .streamon_regs = adv7481_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_streamoff_regs), + .streamoff_regs = adv7481_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_crl_csi_data_fmt), + .csi_fmts = adv7481_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_cvbs_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_cvbs_configuration.h new file mode 100644 index 000000000000..34f33ac8762c --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_cvbs_configuration.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Jianxu Zheng + * + */ + +#ifndef __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep adv7481_cvbs_powerup_regset[] = { + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/ + SPI PINS TRISTATED */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* Exit Power Down Mode */ + {0x52, CRL_REG_LEN_08BIT, 0xC0, 0xF2}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_08BIT, 0x0E, 0xF2}, /* INSEL = CVBS in on Ain 1 */ + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, /* ADI Required Write */ + {0x9C, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x9C, CRL_REG_LEN_08BIT, 0xFF, 0xF2}, /* ADI Required Write */ + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x5A, CRL_REG_LEN_08BIT, 0x90, 0xF2}, /* ADI Required Write */ + {0x60, CRL_REG_LEN_08BIT, 0xA0, 0xF2}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_DELAY, 0x19, 0x00}, /* Delay 25*/ + {0x60, CRL_REG_LEN_08BIT, 0xB0, 0xF2}, /* ADI Required Write */ + {0x5F, CRL_REG_LEN_08BIT, 0xA8, 0xF2}, + {0x0E, CRL_REG_LEN_08BIT, 0x80, 0xF2}, /* ADI Required Write */ + {0xB6, CRL_REG_LEN_08BIT, 0x08, 0xF2}, /* ADI Required Write */ + {0xC0, CRL_REG_LEN_08BIT, 0xA0, 0xF2}, /* ADI Required Write */ + {0xD9, CRL_REG_LEN_08BIT, 0x44, 0xF2}, + {0x0E, CRL_REG_LEN_08BIT, 0x40, 0xF2}, + {0xE0, CRL_REG_LEN_08BIT, 0x01, 0xF2}, /* Fast Lock enable*/ + {0x0E, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x80, CRL_REG_LEN_08BIT, 0x51, 0xF2}, /* ADI Required Write */ + {0x81, CRL_REG_LEN_08BIT, 0x51, 0xF2}, /* ADI Required Write */ + {0x82, CRL_REG_LEN_08BIT, 0x68, 0xF2}, /* ADI Required Write */ + {0x03, CRL_REG_LEN_08BIT, 0x42, 0xF2}, /* Tri-S Output Drivers, + PwrDwn 656 pads */ + {0x04, CRL_REG_LEN_08BIT, 0x07, 0xF2}, /* Power-up INTRQ pad, + & Enable SFL */ + {0x13, CRL_REG_LEN_08BIT, 0x00, 0xF2}, /* ADI Required Write */ + {0x17, CRL_REG_LEN_08BIT, 0x41, 0xF2}, /* Select SH1 */ + {0x31, CRL_REG_LEN_08BIT, 0x12, 0xF2}, /* ADI Required Write */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x70, 0xE0, 0x70 }, + /* Enable 1-Lane MIPI Tx, + enable pixel output and route + SD through Pixel port */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x90}, /* Set Auto DPHY Timing */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ + {0xD2, CRL_REG_LEN_08BIT, 0x40, 0x90}, /* ADI Required Write */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x90}, /* ADI Required Write */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x90}, /* ADI Required Write */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x90}, /* ADI Required Write */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_dphy_pwdn - 1'b0 */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Required Write */ + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x90}, /* ADI Required Write */ +}; + + +static struct crl_register_write_rep adv7481_cvbs_streamon_regs[] = { + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x90}, /* ADI Required Write */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x90}, /* i2c_mipi_pll_en - 1'b1 */ + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x90}, /* Power-up CSI-TX 21 */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x90}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_cvbs_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x90}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x90}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x90}, /* i2c_mipi_pll_en - + 1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x90}, +}; + + +static struct crl_pll_configuration adv7481_cvbs_pll_configurations[] = { + { + .input_clk = 286363636, + .op_sys_clk = 216000000, + .bitsperpixel = 16, + .pixel_rate_csi = 27000000, + .pixel_rate_pa = 27000000, + .csi_lanes = 1, + }, + { + .input_clk = 24000000, + .op_sys_clk = 130000000, + .bitsperpixel = 16, + .pixel_rate_csi = 130000000, + .pixel_rate_pa = 130000000, + .csi_lanes = 1, + }, +}; + +static struct crl_subdev_rect_rep adv7481_cvbs_ntsc_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 720, + .in_rect.height = 240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_mode_rep adv7481_cvbs_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_cvbs_ntsc_rects), + .sd_rects = adv7481_cvbs_ntsc_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 240, + }, +}; + +static struct crl_sensor_subdev_config adv7481_cvbs_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481-cvbs binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481-cvbs pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_cvbs_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 720, + .y_addr_max = 240, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_cvbs_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + }, +}; + +static struct crl_v4l2_ctrl adv7481_cvbs_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, +}; + +static struct crl_sensor_configuration adv7481_cvbs_crl_configuration = { + + /* one time initialization is done by HDMI part */ + + .powerup_regs_items = ARRAY_SIZE(adv7481_cvbs_powerup_regset), + .powerup_regs = adv7481_cvbs_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .poweroff_regs = adv7481_cvbs_streamoff_regs, + + .subdev_items = ARRAY_SIZE(adv7481_cvbs_sensor_subdevs), + .subdevs = adv7481_cvbs_sensor_subdevs, + + .sensor_limits = &adv7481_cvbs_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_cvbs_pll_configurations), + .pll_configs = adv7481_cvbs_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_cvbs_modes), + .modes = adv7481_cvbs_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_cvbs_streamon_regs), + .streamon_regs = adv7481_cvbs_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_cvbs_streamoff_regs), + .streamoff_regs = adv7481_cvbs_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_cvbs_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_cvbs_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_cvbs_crl_csi_data_fmt), + .csi_fmts = adv7481_cvbs_crl_csi_data_fmt, + + .addr_len = CRL_ADDR_7BIT, + .i2c_mutex_in_use = true, +}; + +#endif /* __CRLMODULE_ADV7481_CVBS_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_eval_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_eval_configuration.h new file mode 100644 index 000000000000..c7781706d1f3 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_eval_configuration.h @@ -0,0 +1,577 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Jianxu Zheng + * + */ + +#ifndef __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + + +struct crl_ctrl_data_pair ctrl_data_lanes[] = { + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 4, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 2, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 1, + }, +}; + +static struct crl_pll_configuration adv7481_eval_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 24, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_WVGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 800, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_eval_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_eval_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080p_rects), + .sd_rects = adv7481_eval_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_720p_rects), + .sd_rects = adv7481_eval_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[0], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_WVGA_rects), + .sd_rects = adv7481_eval_WVGA_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 800, + .height = 480, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_VGA_rects), + .sd_rects = adv7481_eval_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_1080i_rects), + .sd_rects = adv7481_eval_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[1], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_480i_rects), + .sd_rects = adv7481_eval_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576p_rects), + .sd_rects = adv7481_eval_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_eval_576i_rects), + .sd_rects = adv7481_eval_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .comp_items = 1, + .ctrl_data = &ctrl_data_lanes[2], + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config adv7481_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481 pixel array", + }, +}; + +static struct crl_sensor_subdev_config adv7481b_eval_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481b binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481b pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_eval_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_eval_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = MEDIA_BUS_FMT_RGB565_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, + { + .code = MEDIA_BUS_FMT_RGB888_1X24, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = 0, + .regs = NULL, + }, +}; + +static struct crl_v4l2_ctrl adv7481_eval_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_MIPI_LANES, + .name = "V4L2_CID_MIPI_LANES", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 4, + .data.std_data.step = 1, + .data.std_data.def = 4, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_sensor_configuration adv7481_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481_eval_sensor_subdevs), + .subdevs = adv7481_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_eval_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_eval_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +static struct crl_sensor_configuration adv7481b_eval_crl_configuration = { + + .powerup_regs_items = 0, + .powerup_regs = NULL, + + .poweroff_regs_items = 0, + .poweroff_regs = NULL, + + .id_reg_items = 0, + .id_regs = NULL, + + .subdev_items = ARRAY_SIZE(adv7481b_eval_sensor_subdevs), + .subdevs = adv7481b_eval_sensor_subdevs, + + .sensor_limits = &adv7481_eval_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_eval_pll_configurations), + .pll_configs = adv7481_eval_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_eval_modes), + .modes = adv7481_eval_modes, + + .streamon_regs_items = 0, + .streamon_regs = NULL, + + .streamoff_regs_items = 0, + .streamoff_regs = NULL, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_eval_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_eval_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_eval_crl_csi_data_fmt), + .csi_fmts = adv7481_eval_crl_csi_data_fmt, +}; + +#endif /* __CRLMODULE_ADV7481_EVAL_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.c b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.c new file mode 100644 index 000000000000..d8d9c7b930a8 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.c @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include + +#include "crlmodule.h" +#include "crlmodule-regs.h" + +/* Size of the mondello KSV buffer in bytes */ +#define ADV7481_KSV_BUFFER_SIZE 0x80 +/* Size of a single KSV */ +#define ADV7481_KSV_SIZE 0x05 +/* Max number of devices (MAX_MONDELO_KSV_SIZE / HDCP_KSV_SIZE */ +#define ADV7481_MAX_DEVICES 0x19 +#define ADV7481_AKSV_UPDATE_A_ST 0x08 +#define ADV7481_CABLE_DET_A_ST 0x40 +#define ADV7481_V_LOCKED_A_ST 0x02 +#define ADV7481_DE_REGEN_A_ST 0x01 + +struct crl_adv7481_hdmi { + unsigned int in_hot_plug_reset; + int hdmi_res_width; + int hdmi_res_height; + int hdmi_res_interlaced; + int hdmi_cable_connected; + struct delayed_work work; + struct mutex hot_plug_reset_lock; + struct i2c_client *client; +}; + +/* ADV7481 HDCP B-status register */ +struct v4l2_adv7481_bstatus { + union { + __u8 bstatus[2]; + struct { + __u8 device_count:7; + __u8 max_devs_exceeded:1; + __u8 depth:3; + __u8 max_cascade_exceeded:1; + __u8 hdmi_mode:1; + __u8 hdmi_reserved_2:1; + __u8 rsvd:2; + }; + }; +}; + +struct v4l2_adv7481_dev_info { + struct v4l2_adv7481_bstatus bstatus; + __u8 ksv[ADV7481_KSV_BUFFER_SIZE]; +}; + +struct v4l2_adv7481_bcaps { + union { + __u8 bcaps; + struct { + __u8 fast_reauth:1; + __u8 features:1; + __u8 reserved:2; + __u8 fast:1; + __u8 ksv_fifo_ready:1; + __u8 repeater:1; + __u8 hdmi_reserved:1; + }; + }; +}; + +static int adv_i2c_write(struct i2c_client *client, + u16 i2c_addr, + u16 reg, + u8 val) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + return crlmodule_write_reg(sensor, i2c_addr, reg, + CRL_REG_LEN_08BIT, 0xFF, val); +} + +static int adv_i2c_read(struct i2c_client *client, + u16 i2c_addr, + u16 reg, + u32 *val) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_register_read_rep read_reg; + + read_reg.address = reg; + read_reg.len = CRL_REG_LEN_08BIT; + read_reg.dev_i2c_addr = i2c_addr; + return crlmodule_read_reg(sensor, read_reg, val); +} + +/* + * Writes the HDCP BKSV list & status when the system acts + * as an HDCP 1.4 repeater + */ +static long adv_write_bksv(struct i2c_client *client, + struct v4l2_adv7481_dev_info *dev_info) +{ + unsigned int k = 0; + int ret = 0; + u32 reg; + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + dev_dbg(&client->dev, "%s: Writing ADV7481 BKSV list.\n", __func__); + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret) { + dev_err(&client->dev, + "%s: Error clearing BCAPS KSV list ready!\n", + __func__); + return ret; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret) { + dev_err(&client->dev, + "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", + __func__); + return ret; + } + + /* Write the BSKV list, one device at a time */ + /* Writing the entire list in one call exceeds frame size */ + for (k = 0; k < ADV7481_MAX_DEVICES; ++k) { + unsigned int j = k * ADV7481_KSV_SIZE; + struct crl_register_write_rep adv_ksv_cmd[] = { + {0x80 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 0], 0x64}, + {0x81 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 1], 0x64}, + {0x82 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 2], 0x64}, + {0x83 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 3], 0x64}, + {0x84 + j, CRL_REG_LEN_08BIT, + dev_info->ksv[j + 4], 0x64}, + }; + ret = crlmodule_write_regs(sensor, adv_ksv_cmd, + ARRAY_SIZE(adv_ksv_cmd)); + + if (ret) { + dev_err(&client->dev, + "%s: Error while writing BKSV list!\n", + __func__); + return ret; + } + } + + /* Finally update the bstatus registers */ + ret = adv_i2c_read(client, 0x64, 0x42, ®); + + if (ret) { + dev_err(&client->dev, + "%s: Error reading bstatus register!\n", + __func__); + return ret; + } + + /* ADV recommendation: only update bits [0:11] */ + /* Take the lower nibble (bits [11:8]) of the input bstatus */ + /* Take the upper nibble (bits [15:12]) of the current register */ + dev_info->bstatus.bstatus[1] = + (dev_info->bstatus.bstatus[1] & 0x0F) | (reg & 0xF0); + { + struct crl_register_write_rep adv_cmd[] = { + {0x41, CRL_REG_LEN_08BIT, + dev_info->bstatus.bstatus[0], 0x64}, + {0x42, CRL_REG_LEN_08BIT, + dev_info->bstatus.bstatus[1], 0x64}, + /* KSV_LIST_READY_PORT_A */ + {0x69, CRL_REG_LEN_08BIT, 0x01, 0x64}, + }; + + ret = crlmodule_write_regs(sensor, adv_cmd, + ARRAY_SIZE(adv_cmd)); + } + + return ret; +} + +static ssize_t adv_bcaps_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u32 val; + int ret; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + ret = adv_i2c_read(client, 0x64, 0x40, &val); + + if (ret != 0) + return -EIO; + + val = val & 0xFF; + *buf = val; + return 1; +} + +/* Declares bcaps attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bcaps, S_IRUGO, adv_bcaps_show, NULL); + +/* + * Writes provided BKSV value from user space to chip. + * BKSV should be formatted as v4l2_adv7481_dev_info struct, + * it does basic validation and checks if provided buffer size matches + * size of v4l2_adv7481_dev_info struct. In case of error return EIO. + */ +static ssize_t adv_bksv_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + struct v4l2_adv7481_dev_info dev_info; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "%s\n", __func__); + if (count != sizeof(struct v4l2_adv7481_dev_info)) + return -EIO; + + dev_info = *((struct v4l2_adv7481_dev_info *) buf); + + ret = adv_write_bksv(client, &dev_info); + + if (ret != 0) + return -EIO; + + return count; +} + +/* Declares bksv attribute that will be exposed to user space via sysfs */ +static DEVICE_ATTR(bksv, S_IWUSR | S_IWGRP, NULL, adv_bksv_store); + +/* + * Enables HPA_MAN_VALUE_PORT_A to enable hot plug detection. + */ +static void adv_hpa_assert(struct work_struct *work) +{ + + struct crl_adv7481_hdmi *adv7481_hdmi + = container_of(work, struct crl_adv7481_hdmi, work.work); + struct i2c_client *client = adv7481_hdmi->client; + + adv_i2c_write(client, 0x68, 0xF8, 0x01); + adv7481_hdmi->in_hot_plug_reset = 0; +} + +/* + * Reauthenticates HDCP by disabling hot plug detection for 2 seconds. + * It can be triggered by user space by writing any value to "reauthenticate" + * attribute. After that time connected source will automatically ask for HDCP + * authentication once again. To prevent sleep, timer is used to delay enabling + * of hot plug by 2 seconds. + * In case that previous reauthentication is not completed, returns EBUSY. + * In case of error returns EIO. + */ +static ssize_t adv_reauthenticate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + struct crl_adv7481_hdmi *adv7481_hdmi; + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + adv7481_hdmi = sensor->sensor_specific_data; + + dev_dbg(&client->dev, "%s\n", __func__); + + mutex_lock(&adv7481_hdmi->hot_plug_reset_lock); + + if (adv7481_hdmi->in_hot_plug_reset) { + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EBUSY; + } + + /* Clear BCAPS KSV list ready */ + ret = adv_i2c_write(client, 0x64, 0x78, 0x01); + if (ret != 0) { + dev_err(&client->dev, + "%s: Error clearing BCAPS KSV list ready!\n", __func__); + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EIO; + } + + /* KSV_LIST_READY_PORT_A KSV list not ready */ + ret = adv_i2c_write(client, 0x64, 0x69, 0x00); + if (ret != 0) { + dev_err(&client->dev, + "%s: Error clearing KSV_LIST_READY_PORT_A register!\n", + __func__); + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EIO; + } + + ret = adv_i2c_write(client, 0x68, 0xF8, 0x00); + + if (ret != 0) { + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return -EIO; + } + + adv7481_hdmi->in_hot_plug_reset = 1; + schedule_delayed_work(&adv7481_hdmi->work, msecs_to_jiffies(2000)); + + mutex_unlock(&adv7481_hdmi->hot_plug_reset_lock); + return count; +} + +/* Declares reauthenticate attribute that will be exposed + * to user space via sysfs + */ +static DEVICE_ATTR(reauthenticate, S_IWUSR | S_IWGRP, NULL, + adv_reauthenticate_store); + +/* Dummy show to prevent WARN when registering aksv attribute */ +static ssize_t adv_aksv_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + (void) dev; + (void) attr; + (void) buf; + + return -EIO; +} + +/* Declares aksv attribute that will be exposed to user space via sysfs, + * to notify about AKSV events. + */ +static DEVICE_ATTR(aksv, S_IRUGO, adv_aksv_show, NULL); + + +static ssize_t adv_hdmi_cable_connected_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct crl_adv7481_hdmi *adv7481_hdmi; + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + char interlaced = 'p'; + adv7481_hdmi = sensor->sensor_specific_data; + + if (adv7481_hdmi->hdmi_res_interlaced) + interlaced = 'i'; + + return snprintf(buf, PAGE_SIZE, "%dx%d%c", + adv7481_hdmi->hdmi_res_width, + adv7481_hdmi->hdmi_res_height, interlaced); +} +static DEVICE_ATTR(hdmi_cable_connected, S_IRUGO, + adv_hdmi_cable_connected_show, NULL); + +static ssize_t adv_bstatus_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u32 b0, b1; + int ret; + struct i2c_client *client = container_of(dev, struct i2c_client, dev); + + dev_dbg(&client->dev, "Getting bstatus\n"); + ret = adv_i2c_read(client, 0x64, 0x41, &b0); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(0)\n"); + return -EIO; + } + dev_dbg(&client->dev, "btatus(0): 0x%x\n", b0 & 0xff); + ret = adv_i2c_read(client, 0x64, 0x42, &b1); + if (ret != 0) { + dev_err(&client->dev, "Error getting bstatus(1)\n"); + return -EIO; + } + dev_dbg(&client->dev, "bstatus(1): 0x%x\n", b1 & 0xff); + *buf = b0 & 0xff; + buf++; + *buf = b1 & 0xff; + return 2; +} +static DEVICE_ATTR(bstatus, S_IRUGO, adv_bstatus_show, NULL); + +irqreturn_t crl_adv7481_threaded_irq_fn(int irq, void *sensor_struct) +{ + struct crl_sensor *sensor = sensor_struct; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + + u32 interrupt_st; + u32 raw_value; + u32 temp[3]; + int ret = 0; + struct crl_register_read_rep reg; + struct crl_adv7481_hdmi *adv7481_hdmi; + + adv7481_hdmi = sensor->sensor_specific_data; + reg.address = 0x90; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xFF; + reg.dev_i2c_addr = 0xE0; + + dev_dbg(&client->dev, "%s\n", __func__); + + if (!adv7481_hdmi) + return IRQ_HANDLED; + + /* AKSV_UPDATE_A_ST: check interrupt status */ + ret = adv_i2c_read(client, 0xE0, 0x90, &interrupt_st); + + if (interrupt_st & 0x08 /*ADV7481_AKSV_UPDATE_A_ST*/) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: AKSV_UPDATE_A_ST: 0x%x\n", + __func__, interrupt_st); + + /* Notify user space about AKSV event */ + sysfs_notify(&client->dev.kobj, NULL, "aksv"); + + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x91, 0x08); + } + + /* + * Check interrupt status for: CABLE_DET_A_ST, + * V_LOCKED_A_ST and DE_REGEN_LCK_A_ST + */ + ret = adv_i2c_read(client, 0xE0, 0x72, &interrupt_st); + + /* If any of CABLE_DET_A_ST, V_LOCKED_A_ST and DE_REGEN_LCK_A_ST + * interrupts was set, get updated values of CABLE_DET_RAW, + * V_LOCKED_RAW and DE_REGEN_LCK_RAW + */ + if (interrupt_st) { + ret = adv_i2c_read(client, 0xE0, 0x71, &raw_value); + } + + /* Check CABLE_DET_A_ST interrupt */ + if ((interrupt_st & ADV7481_CABLE_DET_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x40); + + /* HDMI cable is connected */ + if (raw_value & ADV7481_CABLE_DET_A_ST) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: HDMI cable connected\n", + __func__); + ret = adv_i2c_write(client, 0xE0, 0x10, 0xA1); + } else { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: HDMI cable disconnected\n", + __func__); + } + } + + /* Check V_LOCKED_A_ST interrupt */ + if ((interrupt_st & ADV7481_V_LOCKED_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x02); + /* Vertical sync filter has been locked, + * resolution height can be read + */ + if (raw_value & ADV7481_V_LOCKED_A_ST) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: Vertical Sync Filter Locked\n", + __func__); + reg.dev_i2c_addr = 0x68; /* HDMI_RX_MAP; */ + reg.address = 0x09; + adv_i2c_read(client, 0x68, 0x09, &temp[0]); + adv_i2c_read(client, 0x68, 0x0A, &temp[1]); + adv_i2c_read(client, 0x68, 0x0B, &temp[2]); + + temp[0] = temp[0] & 0x1F; + adv7481_hdmi->hdmi_res_height = + (temp[0] << 8) + temp[1]; + if (temp[2] & 0x20) { + adv7481_hdmi->hdmi_res_height = + adv7481_hdmi->hdmi_res_height << 1; + adv7481_hdmi->hdmi_res_interlaced = 1; + } else { + adv7481_hdmi->hdmi_res_interlaced = 0; + } + + /* + * If resolution width was already read, + * notify user space about new resolution + */ + if (adv7481_hdmi->hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } else { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: Vertical Sync Filte Lost\n", + __func__); + adv7481_hdmi->hdmi_res_height = 0; + /* Notify user space about losing resolution */ + if (!adv7481_hdmi->hdmi_res_width) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } + } + + /* Check DE_REGEN_A_ST interrupt */ + if ((interrupt_st & ADV7481_DE_REGEN_A_ST)) { + /* Clear interrupt bit */ + ret = adv_i2c_write(client, 0xE0, 0x73, 0x01); + + /* DE regeneration has been locked, + * resolution height can be read + */ + if (raw_value & ADV7481_DE_REGEN_A_ST) { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: DE Regeneration Locked\n", + __func__); + reg.dev_i2c_addr = 0x68; /* HDMI_RX_MAP; */ + reg.address = 0x07; + adv_i2c_read(client, 0x68, 0x07, &temp[0]); + adv_i2c_read(client, 0x68, 0x08, &temp[1]); + + temp[0] = temp[0] & 0x1F; + adv7481_hdmi->hdmi_res_width = (temp[0] << 8) + temp[1]; + + /* If resolution height was already read back, + notify user space about new resolution */ + if (adv7481_hdmi->hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } else { + dev_dbg(&client->dev, + "%s: ADV7481 ISR: DE Regeneration Lost\n", + __func__); + adv7481_hdmi->hdmi_res_width = 0; + /* Notfiy user space about losing resolution */ + if (!adv7481_hdmi->hdmi_res_height) { + sysfs_notify(&client->dev.kobj, NULL, + "hdmi_cable_connected"); + } + } + } + return IRQ_HANDLED; +} + +static struct attribute *adv7481_attributes[] = { + &dev_attr_bstatus.attr, + &dev_attr_hdmi_cable_connected.attr, + &dev_attr_aksv.attr, + &dev_attr_reauthenticate.attr, + &dev_attr_bksv.attr, + &dev_attr_bcaps.attr, + NULL +}; + +static const struct attribute_group adv7481_attr_group = { + .attrs = adv7481_attributes, +}; + +int adv7481_sensor_init(struct i2c_client *client) +{ + struct crl_adv7481_hdmi *adv7481_hdmi; + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + adv7481_hdmi = devm_kzalloc(&client->dev, + sizeof(*adv7481_hdmi), GFP_KERNEL); + + if (!adv7481_hdmi) + return -ENOMEM; + + sensor->sensor_specific_data = adv7481_hdmi; + adv7481_hdmi->client = client; + mutex_init(&adv7481_hdmi->hot_plug_reset_lock); + INIT_DELAYED_WORK(&adv7481_hdmi->work, adv_hpa_assert); + dev_dbg(&client->dev, "%s ADV7481_sensor_init\n", __func__); + + return sysfs_create_group(&client->dev.kobj, &adv7481_attr_group); + +} + +int adv7481_sensor_cleanup(struct i2c_client *client) +{ + struct crl_adv7481_hdmi *adv7481_hdmi; + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + adv7481_hdmi = sensor->sensor_specific_data; + + /* + * This can be NULL if crlmodule_registered call failed before + * sensor_init call. + */ + if (!adv7481_hdmi) + return 0; + + dev_dbg(&client->dev, "%s: ADV7481_sensor_cleanup\n", __func__); + cancel_delayed_work_sync(&adv7481_hdmi->work); + + sysfs_remove_group(&client->dev.kobj, &adv7481_attr_group); + return 0; +} diff --git a/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.h b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.h new file mode 100644 index 000000000000..aa13b0e8af03 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_adv7481_hdmi_configuration.h @@ -0,0 +1,951 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ +#define __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" +irqreturn_t crl_adv7481_threaded_irq_fn(int irq, void *sensor_struct); + +struct crl_ctrl_data_pair hdmi_ctrl_data_lanes[] = { + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 4, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 2, + }, + { + .ctrl_id = V4L2_CID_MIPI_LANES, + .data = 1, + }, +}; + + +static struct crl_register_write_rep adv7481_hdmi_onetime_init_regset[] = { + {0xFF, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, + {0x00, CRL_REG_LEN_DELAY, 0x05, 0x00}, + {0x01, CRL_REG_LEN_08BIT, 0x76, 0xE0}, /* ADI Required Write */ + {0x05, CRL_REG_LEN_08BIT, 0x96, 0xE0}, /* Setting Vid_Std to + 1600x1200(UXGA)@60 */ + {0xF2, CRL_REG_LEN_08BIT, 0x01, 0xE0}, /* Enable I2C Read + Auto-Increment */ + {0xF3, CRL_REG_LEN_08BIT, 0x4C, 0xE0}, /* DPLL Map Address + Set to 0x4C */ + {0xF4, CRL_REG_LEN_08BIT, 0x44, 0xE0}, /* CP Map Address + Set to 0x44 */ + {0xF5, CRL_REG_LEN_08BIT, 0x68, 0xE0}, /* HDMI RX Map Address + Set to 0x68 */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* EDID Map Address + Set to 0x6C */ + {0xF7, CRL_REG_LEN_08BIT, 0x64, 0xE0}, /* HDMI RX Repeater Map Address + Set to 0x64 */ + {0xF8, CRL_REG_LEN_08BIT, 0x62, 0xE0}, /* HDMI RX Infoframe Map Address + Set to 0x62 */ + {0xF9, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CBUS Map Address + Set to 0xF0 */ + {0xFA, CRL_REG_LEN_08BIT, 0x82, 0xE0}, /* CEC Map Address + Set to 0x82 */ + {0xFB, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* SDP Main Map Address + Set to 0xF2 */ + {0xFC, CRL_REG_LEN_08BIT, 0x90, 0xE0}, /* CSI-TXB Map Address + Set to 0x90 */ + {0xFD, CRL_REG_LEN_08BIT, 0x94, 0xE0}, /* CSI-TXA Map Address + Set to 0x94 */ + {0x00, CRL_REG_LEN_08BIT, 0x40, 0xE0}, /* Disable chip powerdown & + Enable HDMI Rx block */ + + {0x40, CRL_REG_LEN_08BIT, 0xC3, 0x64}, /* Enable HDCP 1.1 Repeater */ + {0x69, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* KSV List not ready port A */ + {0x77, CRL_REG_LEN_08BIT, 0x08, 0x64}, /* Clear KSV List */ + {0x78, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* KSV_LIST_READY_CLR_A: + Clears the BCAPS ready bit */ + {0x68, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Disable dual ksv list + for port A */ + {0x41, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Reset b-status (1) */ + {0x42, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Reset b-status (2) */ + {0x91, CRL_REG_LEN_08BIT, 0x08, 0xE0}, /* AKSV Update Clear */ + + {0x00, CRL_REG_LEN_08BIT, 0x08, 0x68}, /* Foreground Channel = A */ + {0x98, CRL_REG_LEN_08BIT, 0xFF, 0x68}, /* ADI Required Write */ + {0x99, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI Required Write */ + {0x9A, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI Required Write */ + {0x9B, CRL_REG_LEN_08BIT, 0x0A, 0x68}, /* ADI Required Write */ + {0x9D, CRL_REG_LEN_08BIT, 0x40, 0x68}, /* ADI Required Write */ + {0xCB, CRL_REG_LEN_08BIT, 0x09, 0x68}, /* ADI Required Write */ + {0x3D, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI Required Write */ + {0x3E, CRL_REG_LEN_08BIT, 0x7B, 0x68}, /* ADI Required Write */ + {0x3F, CRL_REG_LEN_08BIT, 0x5E, 0x68}, /* ADI Required Write */ + {0x4E, CRL_REG_LEN_08BIT, 0xFE, 0x68}, /* ADI Required Write */ + {0x4F, CRL_REG_LEN_08BIT, 0x18, 0x68}, /* ADI Required Write */ + {0x57, CRL_REG_LEN_08BIT, 0xA3, 0x68}, /* ADI Required Write */ + {0x58, CRL_REG_LEN_08BIT, 0x04, 0x68}, /* ADI Required Write */ + {0x85, CRL_REG_LEN_08BIT, 0x10, 0x68}, /* ADI Required Write */ + {0x83, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Enable All Terminatio ns */ + {0xA3, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* ADI Required Write */ + {0xBE, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* ADI Required Write */ + {0x6C, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Manual Enable */ + {0xF8, CRL_REG_LEN_08BIT, 0x01, 0x68}, /* HPA Asserted */ + {0x0F, CRL_REG_LEN_08BIT, 0x00, 0x68}, /* Audio Mute Speed + Set to Fastest (Smallest Step Size) */ + {0x0E, CRL_REG_LEN_08BIT, 0xFF, 0xE0}, /* LLC/PIX/AUD/SPI PINS + TRISTATED */ + + {0x74, CRL_REG_LEN_08BIT, 0x43, 0xE0}, /* Enable interrupts */ + {0x75, CRL_REG_LEN_08BIT, 0x43, 0xE0}, + + {0x70, CRL_REG_LEN_08BIT, 0xA0, 0x64}, /* Write primary edid size */ + {0x74, CRL_REG_LEN_08BIT, 0x01, 0x64}, /* Enable manual edid */ + {0x7A, CRL_REG_LEN_08BIT, 0x00, 0x64}, /* Write edid sram select */ + {0xF6, CRL_REG_LEN_08BIT, 0x6C, 0xE0}, /* Write edid map bus address */ + + {0x00*4, CRL_REG_LEN_32BIT, 0x00FFFFFF, 0x6C}, /* EDID programming */ + {0x01*4, CRL_REG_LEN_32BIT, 0xFFFFFF00, 0x6C}, /* EDID programming */ + {0x02*4, CRL_REG_LEN_32BIT, 0x4DD90100, 0x6C}, /* EDID programming */ + {0x03*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x04*4, CRL_REG_LEN_32BIT, 0x00110103, 0x6C}, /* EDID programming */ + {0x05*4, CRL_REG_LEN_32BIT, 0x80000078, 0x6C}, /* EDID programming */ + {0x06*4, CRL_REG_LEN_32BIT, 0x0A0DC9A0, 0x6C}, /* EDID programming */ + {0x07*4, CRL_REG_LEN_32BIT, 0x57479827, 0x6C}, /* EDID programming */ + {0x08*4, CRL_REG_LEN_32BIT, 0x12484C00, 0x6C}, /* EDID programming */ + {0x09*4, CRL_REG_LEN_32BIT, 0x00000101, 0x6C}, /* EDID programming */ + {0x0A*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0B*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0C*4, CRL_REG_LEN_32BIT, 0x01010101, 0x6C}, /* EDID programming */ + {0x0D*4, CRL_REG_LEN_32BIT, 0x0101011D, 0x6C}, /* EDID programming */ + {0x0E*4, CRL_REG_LEN_32BIT, 0x80D0721C, 0x6C}, /* EDID programming */ + {0x0F*4, CRL_REG_LEN_32BIT, 0x1620102C, 0x6C}, /* EDID programming */ + {0x10*4, CRL_REG_LEN_32BIT, 0x2580C48E, 0x6C}, /* EDID programming */ + {0x11*4, CRL_REG_LEN_32BIT, 0x2100009E, 0x6C}, /* EDID programming */ + {0x12*4, CRL_REG_LEN_32BIT, 0x011D8018, 0x6C}, /* EDID programming */ + {0x13*4, CRL_REG_LEN_32BIT, 0x711C1620, 0x6C}, /* EDID programming */ + {0x14*4, CRL_REG_LEN_32BIT, 0x582C2500, 0x6C}, /* EDID programming */ + {0x15*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x16*4, CRL_REG_LEN_32BIT, 0x009E0000, 0x6C}, /* EDID programming */ + {0x17*4, CRL_REG_LEN_32BIT, 0x00FC0048, 0x6C}, /* EDID programming */ + {0x18*4, CRL_REG_LEN_32BIT, 0x444D4920, 0x6C}, /* EDID programming */ + {0x19*4, CRL_REG_LEN_32BIT, 0x4C4C430A, 0x6C}, /* EDID programming */ + {0x1A*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1B*4, CRL_REG_LEN_32BIT, 0x000000FD, 0x6C}, /* EDID programming */ + {0x1C*4, CRL_REG_LEN_32BIT, 0x003B3D0F, 0x6C}, /* EDID programming */ + {0x1D*4, CRL_REG_LEN_32BIT, 0x2D08000A, 0x6C}, /* EDID programming */ + {0x1E*4, CRL_REG_LEN_32BIT, 0x20202020, 0x6C}, /* EDID programming */ + {0x1F*4, CRL_REG_LEN_32BIT, 0x202001C1, 0x6C}, /* EDID programming */ + {0x20*4, CRL_REG_LEN_32BIT, 0x02031E77, 0x6C}, /* EDID programming */ + {0x21*4, CRL_REG_LEN_32BIT, 0x4F941305, 0x6C}, /* EDID programming */ + {0x22*4, CRL_REG_LEN_32BIT, 0x03040201, 0x6C}, /* EDID programming */ + {0x23*4, CRL_REG_LEN_32BIT, 0x16150706, 0x6C}, /* EDID programming */ + {0x24*4, CRL_REG_LEN_32BIT, 0x1110121F, 0x6C}, /* EDID programming */ + {0x25*4, CRL_REG_LEN_32BIT, 0x23090701, 0x6C}, /* EDID programming */ + {0x26*4, CRL_REG_LEN_32BIT, 0x65030C00, 0x6C}, /* EDID programming */ + {0x27*4, CRL_REG_LEN_32BIT, 0x10008C0A, 0x6C}, /* EDID programming */ + {0x28*4, CRL_REG_LEN_32BIT, 0xD0902040, 0x6C}, /* EDID programming */ + {0x29*4, CRL_REG_LEN_32BIT, 0x31200C40, 0x6C}, /* EDID programming */ + {0x2A*4, CRL_REG_LEN_32BIT, 0x5500138E, 0x6C}, /* EDID programming */ + {0x2B*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x2C*4, CRL_REG_LEN_32BIT, 0x011D00BC, 0x6C}, /* EDID programming */ + {0x2D*4, CRL_REG_LEN_32BIT, 0x52D01E20, 0x6C}, /* EDID programming */ + {0x2E*4, CRL_REG_LEN_32BIT, 0xB8285540, 0x6C}, /* EDID programming */ + {0x2F*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x30*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x31*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x32*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x33*4, CRL_REG_LEN_32BIT, 0x9600C48E, 0x6C}, /* EDID programming */ + {0x34*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x35*4, CRL_REG_LEN_32BIT, 0x011D0072, 0x6C}, /* EDID programming */ + {0x36*4, CRL_REG_LEN_32BIT, 0x51D01E20, 0x6C}, /* EDID programming */ + {0x37*4, CRL_REG_LEN_32BIT, 0x6E285500, 0x6C}, /* EDID programming */ + {0x38*4, CRL_REG_LEN_32BIT, 0xC48E2100, 0x6C}, /* EDID programming */ + {0x39*4, CRL_REG_LEN_32BIT, 0x001E8C0A, 0x6C}, /* EDID programming */ + {0x3A*4, CRL_REG_LEN_32BIT, 0xD08A20E0, 0x6C}, /* EDID programming */ + {0x3B*4, CRL_REG_LEN_32BIT, 0x2D10103E, 0x6C}, /* EDID programming */ + {0x3C*4, CRL_REG_LEN_32BIT, 0x9600138E, 0x6C}, /* EDID programming */ + {0x3D*4, CRL_REG_LEN_32BIT, 0x21000018, 0x6C}, /* EDID programming */ + {0x3E*4, CRL_REG_LEN_32BIT, 0x00000000, 0x6C}, /* EDID programming */ + {0x3F*4, CRL_REG_LEN_32BIT, 0x000000CB, 0x6C}, /* EDID programming */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb565[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, /* RGB Out of CP */ + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CSC Depends on ip Packets - + SDR 444 */ + {0x17, CRL_REG_LEN_08BIT, 0xB8, 0xE0}, /* Configure for RGB565 & Luma & + Chroma Values Can Reach 254d */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & + Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, /* LLC/PIX/SPI PINS TRISTATED + AUD Outputs Enabled */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x7E, CRL_REG_LEN_08BIT, 0x98, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_rgb888[] = { + {0x04, CRL_REG_LEN_08BIT, 0x02, 0xE0}, /* RGB Out of CP */ + {0x12, CRL_REG_LEN_08BIT, 0xF0, 0xE0}, /* CSC Depends on ip Packets - + SDR 444 */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* Luma & Chroma Values Can + Reach 254d */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & + Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, /* LLC/PIX/SPI PINS TRISTATED + AUD Outputs Enabled */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x1B, 0x94}, /* ADI Required Write */ +}; + + +static struct crl_register_write_rep adv7481_hdmi_mode_yuv[] = { + {0x04, CRL_REG_LEN_08BIT, 0x00, 0xE0}, /* YCrCb output */ + {0x12, CRL_REG_LEN_08BIT, 0xF2, 0xE0}, /* CSC Depends on ip Packets - + SDR422 set */ + {0x17, CRL_REG_LEN_08BIT, 0x80, 0xE0}, /* Luma & Chroma Values Can + Reach 254d */ + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, /* CP-Insert_AV_Code */ + {0x7C, CRL_REG_LEN_08BIT, 0x00, 0x44}, /* ADI Required Write */ + {0x0C, CRL_REG_LEN_08BIT, 0xE0, 0xE0}, /* Enable LLC_DLL & + Double LLC Timing */ + {0x0E, CRL_REG_LEN_08BIT, 0xDD, 0xE0}, /* LLC/PIX/SPI PINS TRISTATED + AUD Outputs Enabled */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + /* Enable 4-lane CSI TXB & Pixel Port */ + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0x7E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* ADI Required Write */ +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_1080p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x5E, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_1080i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x54, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_480p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x4A, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_720p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x53, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_576p[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x4B, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_576i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x94}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x41, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_480i[] = { + {0x00, CRL_REG_LEN_08BIT, 0x81, 0x94}, /* Enable 1-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x21, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x40, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_mode_vga[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0x10, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0xA0, 0xE0, 0xA0}, + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, + {0x1E, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, + {0x00, CRL_REG_LEN_08BIT, 0x24, 0x94}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, + {0xC9, CRL_REG_LEN_08BIT, 0x2D, 0x44}, + {0x05, CRL_REG_LEN_08BIT, 0x88, 0xE0}, + {0x03, CRL_REG_LEN_08BIT, 0x86, 0xE0}, + {0x00, CRL_REG_LEN_08BIT, 0x00, 0xE0}, + {0x04, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x80, 0xE0, 0xFD}, + {0x37, CRL_REG_LEN_08BIT, 0x81, 0x44}, +}; + +static struct crl_register_write_rep adv7481_hdmi_powerup_regset[] = { + {0x00, CRL_REG_LEN_08BIT, 0x84, 0x94}, /* Enable 4-lane MIPI */ + {0x00, CRL_REG_LEN_08BIT, 0xA4, 0x94}, /* Set Auto DPHY Timing */ + {0xDB, CRL_REG_LEN_08BIT, 0x10, 0x94}, /* ADI Required Write */ + {0xD6, CRL_REG_LEN_08BIT, 0x07, 0x94}, /* ADI Required Write */ + {0xC4, CRL_REG_LEN_08BIT, 0x0A, 0x94}, /* ADI Required Write */ + {0x71, CRL_REG_LEN_08BIT, 0x33, 0x94}, /* ADI Required Write */ + {0x72, CRL_REG_LEN_08BIT, 0x11, 0x94}, /* ADI Required Write */ + {0xF0, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* i2c_dphy_pwdn - 1'b0 */ + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Required Write */ + {0x1E, CRL_REG_LEN_08BIT, 0xC0, 0x94}, + /* ADI Required Write, transmit only Frame Start/End packets */ + {0xDA, CRL_REG_LEN_08BIT, 0x01, 0x94}, /* i2c_mipi_pll_en - 1'b1 */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 0x02, 0x00}, + {0x00, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x21, 0x94, 0xF8}, + /* Power-up CSI-TX */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0xC1, CRL_REG_LEN_08BIT, 0x2B, 0x94}, /* ADI recommended setting */ + {0x00, CRL_REG_LEN_DELAY, 0x01, 0x00}, + {0x31, CRL_REG_LEN_08BIT, 0x80, 0x94}, /* ADI recommended setting */ +}; + +static struct crl_register_write_rep adv7481_hdmi_streamoff_regs[] = { + {0x31, CRL_REG_LEN_08BIT, 0x82, 0x94}, /* ADI Recommended Write */ + {0x1E, CRL_REG_LEN_08BIT, 0x00, 0x94}, /* Reset the clock Lane */ + {0x00, CRL_REG_LEN_08BIT, 0xA1, 0x94}, + {0xDA, CRL_REG_LEN_08BIT, 0x00, 0x94}, + /* i2c_mipi_pll_en -1'b0 Disable MIPI PLL */ + {0xC1, CRL_REG_LEN_08BIT, 0x3B, 0x94}, +}; + +static struct crl_pll_configuration adv7481_hdmi_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 297000000, + .bitsperpixel = 16, + .pixel_rate_csi = 594000000, + .pixel_rate_pa = 594000000, + }, + { + .input_clk = 24000000, + .op_sys_clk = 445500000, + .bitsperpixel = 24, + .pixel_rate_csi = 891000000, + .pixel_rate_pa = 891000000, + }, + /* 28.636 input clock */ + { + .input_clk = 286363636, + .op_sys_clk = 297000000, + .bitsperpixel = 16, + .pixel_rate_csi = 148500000, + .pixel_rate_pa = 297000000, + }, + { + .input_clk = 286363636, + .op_sys_clk = 297000000, + .bitsperpixel = 24, + .pixel_rate_csi = 148500000, + .pixel_rate_pa = 297000000, + }, + { + .input_clk = 286363636, + .op_sys_clk = 148500000, + .bitsperpixel = 16, + .pixel_rate_csi = 74250000, + .pixel_rate_pa = 148500000, + .csi_lanes = 4, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_VGA_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_1080i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 540, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 480, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_480i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 240, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 576, + }, +}; + +static struct crl_subdev_rect_rep adv7481_hdmi_576i_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 720, + .out_rect.height = 288, + }, +}; +static struct crl_mode_rep adv7481_hdmi_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080p_rects), + .sd_rects = adv7481_hdmi_1080p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_1080p), + .mode_regs = adv7481_hdmi_mode_1080p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_720p_rects), + .sd_rects = adv7481_hdmi_720p_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_720p), + .mode_regs = adv7481_hdmi_mode_720p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_VGA_rects), + .sd_rects = adv7481_hdmi_VGA_rects, + .binn_hor = 3, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_vga), + .mode_regs = adv7481_hdmi_mode_vga, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_1080i_rects), + .sd_rects = adv7481_hdmi_1080i_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 540, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_1080i), + .mode_regs = adv7481_hdmi_mode_1080i, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480p_rects), + .sd_rects = adv7481_hdmi_480p_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 720, + .height = 480, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_480p), + .mode_regs = adv7481_hdmi_mode_480p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_480i_rects), + .sd_rects = adv7481_hdmi_480i_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 720, + .height = 240, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_480i), + .mode_regs = adv7481_hdmi_mode_480i, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[2], + }, + + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576p_rects), + .sd_rects = adv7481_hdmi_576p_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 720, + .height = 576, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_576p), + .mode_regs = adv7481_hdmi_mode_576p, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[0], + }, + { + .sd_rects_items = ARRAY_SIZE(adv7481_hdmi_576i_rects), + .sd_rects = adv7481_hdmi_576i_rects, + .binn_hor = 2, + .binn_vert = 3, + .scale_m = 1, + .width = 720, + .height = 288, + .mode_regs_items = ARRAY_SIZE(adv7481_hdmi_mode_576i), + .mode_regs = adv7481_hdmi_mode_576i, + .comp_items = 1, + .ctrl_data = &hdmi_ctrl_data_lanes[2], + }, +}; + +static struct crl_sensor_subdev_config adv7481_hdmi_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "adv7481-hdmi binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "adv7481-hdmi pixel array", + }, +}; + +static struct crl_sensor_limits adv7481_hdmi_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_csi_data_fmt adv7481_hdmi_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_RGB565_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb565), + .regs = adv7481_hdmi_mode_rgb565, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_yuv), + .regs = adv7481_hdmi_mode_yuv, + }, + { + .code = MEDIA_BUS_FMT_RGB888_1X24, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + .regs_items = ARRAY_SIZE(adv7481_hdmi_mode_rgb888), + .regs = adv7481_hdmi_mode_rgb888, + }, +}; + +static struct crl_v4l2_ctrl adv7481_hdmi_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_MIPI_LANES, + .name = "V4L2_CID_MIPI_LANES", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 4, + .data.std_data.step = 1, + .data.std_data.def = 4, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, +}; + +int adv7481_sensor_init(struct i2c_client *); +int adv7481_sensor_cleanup(struct i2c_client *); + +static struct crl_sensor_configuration adv7481_hdmi_crl_configuration = { + + .sensor_init = adv7481_sensor_init, + .sensor_cleanup = adv7481_sensor_cleanup, + + .onetime_init_regs_items = + ARRAY_SIZE(adv7481_hdmi_onetime_init_regset), + .onetime_init_regs = adv7481_hdmi_onetime_init_regset, + + .powerup_regs_items = ARRAY_SIZE(adv7481_hdmi_powerup_regset), + .powerup_regs = adv7481_hdmi_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .poweroff_regs = adv7481_hdmi_streamoff_regs, + + .subdev_items = ARRAY_SIZE(adv7481_hdmi_sensor_subdevs), + .subdevs = adv7481_hdmi_sensor_subdevs, + + .sensor_limits = &adv7481_hdmi_sensor_limits, + + .pll_config_items = ARRAY_SIZE(adv7481_hdmi_pll_configurations), + .pll_configs = adv7481_hdmi_pll_configurations, + + .modes_items = ARRAY_SIZE(adv7481_hdmi_modes), + .modes = adv7481_hdmi_modes, + + .streamon_regs_items = ARRAY_SIZE(adv7481_hdmi_streamon_regs), + .streamon_regs = adv7481_hdmi_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(adv7481_hdmi_streamoff_regs), + .streamoff_regs = adv7481_hdmi_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(adv7481_hdmi_v4l2_ctrls), + .v4l2_ctrl_bank = adv7481_hdmi_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(adv7481_hdmi_crl_csi_data_fmt), + .csi_fmts = adv7481_hdmi_crl_csi_data_fmt, + + .irq_in_use = false, + .crl_irq_fn = NULL, + .crl_threaded_irq_fn = crl_adv7481_threaded_irq_fn, + + .addr_len = CRL_ADDR_7BIT, + .i2c_mutex_in_use = true, +}; + +#endif /* __CRLMODULE_ADV7481_HDMI_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ar0231at_configuration.h b/drivers/media/i2c/crlmodule/crl_ar0231at_configuration.h new file mode 100644 index 000000000000..fc639c060b50 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ar0231at_configuration.h @@ -0,0 +1,1958 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __CRLMODULE_AR0231AT_CONFIGURATION_H_ +#define __CRLMODULE_AR0231AT_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_pll_configuration ar0231at_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 87750000, + .bitsperpixel = 12, + .pixel_rate_csi = 176000000, + .pixel_rate_pa = 176000000, /* pixel_rate = op_sys_clk*2 *csi_lanes/bitsperpixel */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +struct crl_sensor_subdev_config ar0231at_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ar0231at binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ar0231at pixel array", + }, +}; + +struct crl_subdev_rect_rep ar0231at_1920_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1088, + } +}; + +/* + * Exposure mode: + * 0: Linear mode + * 1: 2-HDR mode + * 2: 3-HDR mode + * 3: 4-HDR mode + */ +struct crl_ctrl_data_pair ar0231at_ctrl_data_modes[] = { + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 0, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 1, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 2, + }, + { + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .data = 3, + }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_linear_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x1058, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F0, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C08, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x0C0C, 0x10 }, + /* try sync mode */ + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_2hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F1, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_3hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x0872, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x054A, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0163, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0882, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, +}; + +static struct crl_register_write_rep ar0231at_1920_1088_4hdr_mode[] = { + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 100, 0x10 }, + { 0x3092, CRL_REG_LEN_16BIT, 0x0C24, 0x10 }, + { 0x337A, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3520, CRL_REG_LEN_16BIT, 0x1288, 0x10 }, + { 0x3522, CRL_REG_LEN_16BIT, 0x880C, 0x10 }, + { 0x3524, CRL_REG_LEN_16BIT, 0x0C12, 0x10 }, + { 0x352C, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x354A, CRL_REG_LEN_16BIT, 0x007F, 0x10 }, + { 0x350C, CRL_REG_LEN_16BIT, 0x055C, 0x10 }, + { 0x3506, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3508, CRL_REG_LEN_16BIT, 0x3333, 0x10 }, + { 0x3100, CRL_REG_LEN_16BIT, 0x4000, 0x10 }, + { 0x3280, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3282, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3284, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3286, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3288, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x328E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3290, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3292, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3294, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3296, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x3298, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329A, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329C, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x329E, CRL_REG_LEN_16BIT, 0x0FA0, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x10D8, 0x10 }, + { 0x0000, CRL_REG_LEN_DELAY, 200, 0x10 }, + { 0x2512, CRL_REG_LEN_16BIT, 0x8000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3350, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1578, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7B24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xEA24, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1022, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2410, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x155A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24FF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x24EA, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2324, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x647A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2404, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x052C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x400A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFF0A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3851, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0801, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0408, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1180, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2652, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1518, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0906, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1348, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1002, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1016, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1181, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1189, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0D09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1413, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2B15, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0311, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1409, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0110, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDD11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xDB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x9B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0F11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1A12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x7610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xE609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x290B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0904, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0923, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13C8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x092C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1388, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C14, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1112, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBF11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB10, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xFB09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3511, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xBB12, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6312, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x6014, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xB812, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xA012, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2610, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0011, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x3053, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4010, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1611, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8111, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8910, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5612, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x010D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0815, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD013, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x5009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1313, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0215, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC015, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0515, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8813, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0213, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0411, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xC909, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0814, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0109, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B11, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0xD908, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x091A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1440, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0903, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1214, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10D6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1212, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11D9, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1056, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0917, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11DB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0913, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0905, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x121A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1210, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1460, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1250, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1076, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x10E6, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13A8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1240, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0925, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x13AD, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0902, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0907, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1588, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0901, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x138D, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B09, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0914, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4009, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B13, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x8809, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1C0C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0920, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1262, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BF, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1066, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x090A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11FB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x093B, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11BB, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1263, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1260, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1400, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1508, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x11B8, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x12A0, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1200, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1026, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1000, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1300, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x1100, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x437A, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0609, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0B05, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0708, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x4137, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x502C, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x2CFE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x15FE, 0x10 }, + { 0x2510, CRL_REG_LEN_16BIT, 0x0C2C, 0x10 }, + { 0x32E6, CRL_REG_LEN_16BIT, 0x00E0, 0x10 }, + { 0x1008, CRL_REG_LEN_16BIT, 0x036F, 0x10 }, + { 0x100C, CRL_REG_LEN_16BIT, 0x058F, 0x10 }, + { 0x100E, CRL_REG_LEN_16BIT, 0x07AF, 0x10 }, + { 0x1010, CRL_REG_LEN_16BIT, 0x014F, 0x10 }, + { 0x3230, CRL_REG_LEN_16BIT, 0x0312, 0x10 }, + { 0x3232, CRL_REG_LEN_16BIT, 0x0532, 0x10 }, + { 0x3234, CRL_REG_LEN_16BIT, 0x0752, 0x10 }, + { 0x3236, CRL_REG_LEN_16BIT, 0x00F2, 0x10 }, + { 0x3566, CRL_REG_LEN_16BIT, 0x3328, 0x10 }, + { 0x32D0, CRL_REG_LEN_16BIT, 0x3A02, 0x10 }, + { 0x32D2, CRL_REG_LEN_16BIT, 0x3508, 0x10 }, + { 0x32D4, CRL_REG_LEN_16BIT, 0x3702, 0x10 }, + { 0x32D6, CRL_REG_LEN_16BIT, 0x3C04, 0x10 }, + { 0x32DC, CRL_REG_LEN_16BIT, 0x370A, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x302A, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x302C, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x302E, CRL_REG_LEN_16BIT, 0x0003, 0x10 }, + { 0x3030, CRL_REG_LEN_16BIT, 0x004E, 0x10 }, + { 0x3036, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3038, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x30A2, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x30A6, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3040, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x0008, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F2, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3044, CRL_REG_LEN_16BIT, 0x0400, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x3064, CRL_REG_LEN_16BIT, 0x1802, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3180, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E4, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x33E0, CRL_REG_LEN_16BIT, 0x0C80, 0x10 }, + { 0x3004, CRL_REG_LEN_16BIT, 0x0004, 0x10 }, + { 0x3008, CRL_REG_LEN_16BIT, 0x0783, 0x10 }, + { 0x3002, CRL_REG_LEN_16BIT, 0x003C, 0x10 }, + { 0x3006, CRL_REG_LEN_16BIT, 0x047B, 0x10 }, + { 0x3032, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0010, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0788, 0x10 }, + { 0x3402, CRL_REG_LEN_16BIT, 0x0F10, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x04B8, 0x10 }, + { 0x3404, CRL_REG_LEN_16BIT, 0x0970, 0x10 }, + { 0x3082, CRL_REG_LEN_16BIT, 0x000C, 0x10 }, + { 0x30BA, CRL_REG_LEN_16BIT, 0x11F3, 0x10 }, + { 0x300C, CRL_REG_LEN_16BIT, 0x09B8, 0x10 }, + { 0x300A, CRL_REG_LEN_16BIT, 0x0498, 0x10 }, + { 0x3042, CRL_REG_LEN_16BIT, 0x0000, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3238, CRL_REG_LEN_16BIT, 0x0222, 0x10 }, + { 0x3012, CRL_REG_LEN_16BIT, 0x0131, 0x10 }, + { 0x3014, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x321E, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x3222, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x3226, CRL_REG_LEN_16BIT, 0x098E, 0x10 }, + { 0x30B0, CRL_REG_LEN_16BIT, 0x0800, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EA, CRL_REG_LEN_16BIT, 0x3C0E, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x32EC, CRL_REG_LEN_16BIT, 0x72A1, 0x10 }, + { 0x31D0, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0201, 0x10 }, + { 0x31AE, CRL_REG_LEN_16BIT, 0x0001, 0x10 }, + { 0x31AC, CRL_REG_LEN_16BIT, 0x140C, 0x10 }, + { 0x340A, CRL_REG_LEN_16BIT, 0x0077, 0x10 }, + { 0x340C, CRL_REG_LEN_16BIT, 0x0080, 0x10 }, + { 0x30CE, CRL_REG_LEN_16BIT, 0x0120, 0x10 }, + { 0x301A, CRL_REG_LEN_16BIT, 0x19DC, 0x10 }, +}; + +struct crl_mode_rep ar0231at_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2162, + .min_fll = 1354, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_linear_mode), + .mode_regs = ar0231at_1920_1088_linear_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 1978, + .min_fll = 1480, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[1], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_2hdr_mode), + .mode_regs = ar0231at_1920_1088_2hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 1978, + .min_fll = 1480, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[2], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_3hdr_mode), + .mode_regs = ar0231at_1920_1088_3hdr_mode, + }, + { + .sd_rects_items = ARRAY_SIZE(ar0231at_1920_1088_rects), + .sd_rects = ar0231at_1920_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1088, + .min_llp = 2246, + .min_fll = 1304, + .comp_items = 1, + .ctrl_data = &ar0231at_ctrl_data_modes[3], + .mode_regs_items = ARRAY_SIZE(ar0231at_1920_1088_4hdr_mode), + .mode_regs = ar0231at_1920_1088_4hdr_mode, + }, +}; + +struct crl_csi_data_fmt ar0231at_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + } +}; + +static struct crl_arithmetic_ops ar0231at_ls2_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + } +}; + +/* Line length pixel */ +static struct crl_dynamic_register_access ar0231at_llp_regs[] = { + { + .address = 0x300C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Frame length lines */ +static struct crl_dynamic_register_access ar0231at_fll_regs[] = { + { + .address = 0x300A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Analog gain register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ar0231at_ana_gain_regs[] = { + { + .address = 0x3366, /* analog gain */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Digital gain register */ +static struct crl_dynamic_register_access ar0231at_gl_regs[] = { + { + .address = 0x305E, /* global digital gain */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x07ff, + }, +}; + +/* + * Exposure mode: + * 0: Linear mode + * 1: 2-HDR mode + * 2: 3-HDR mode + * 3: 4-HDR mode + */ +static struct crl_dynamic_register_access ar0231at_exposure_mode_regs[] = { + { + .address = 0x3082, + .len = CRL_REG_LEN_16BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ar0231at_ls2_ops), + .ops = ar0231at_ls2_ops, + .mask = 0x000C, + }, +}; + +/* + * Exposure Ratio in HDR mode + * 0x8000: + * Select exposure ratio mode or + * configure exposure time for each x-HDR individually. + * 0x0222: + * Selected exposure ratio mode and each ratio is 4x. + * The ratio also can be 2x, 8x, 16x + */ +static struct crl_dynamic_register_access ar0231at_hdr_exposure_ratio_regs[] = { + { + .address = 0x3238, + .len = CRL_REG_LEN_16BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x8777, + }, +}; + +/* t1 exposure register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ar0231at_t1expotime_regs[] = { + { + .address = 0x3012, /* coarse integration time T1 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t2 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t2expotime_regs[] = { + { + .address = 0x3212, /* coarse integration time T2 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t3 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t3expotime_regs[] = { + { + .address = 0x3216, /* coarse integration time T3 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* t4 exposure register, only used in HDR mode */ +static struct crl_dynamic_register_access ar0231at_t4expotime_regs[] = { + { + .address = 0x321A, /* coarse integration time T4 */ + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +struct crl_v4l2_ctrl ar0231at_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1920, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1978, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_llp_regs), + .regs = ar0231at_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1088, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1480, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_fll_regs), + .regs = ar0231at_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0000, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0xAAAA, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_ana_gain_regs), + .regs = ar0231at_ana_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0080, + .data.std_data.max = 0x07FF, + .data.std_data.step = 1, + .data.std_data.def = 0x0080, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_gl_regs), + .regs = ar0231at_gl_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_MODE, + .name = "CRL_CID_EXPOSURE_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 3, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_exposure_mode_regs), + .regs = ar0231at_exposure_mode_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_HDR_RATIO, + .name = "CRL_CID_EXPOSURE_HDR_RATIO", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x0222, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_hdr_exposure_ratio_regs), + .regs = ar0231at_hdr_exposure_ratio_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "T1_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0x0, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x0163, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t1expotime_regs), + .regs = ar0231at_t1expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "T2_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x0002, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t2expotime_regs), + .regs = ar0231at_t2expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .name = "T3_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x0001, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t3expotime_regs), + .regs = ar0231at_t3expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .name = "T4_COARSE_EXPOSURE_TIME", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x0, + .data.std_data.max = 0xFFFF, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ar0231at_t4expotime_regs), + .regs = ar0231at_t4expotime_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +struct crl_sensor_detect_config ar0231at_sensor_detect_regset[] = { + { + .reg = { 0x3000, CRL_REG_LEN_16BIT, 0xFFFF }, + .width = 15, + }, +}; + +static struct crl_sensor_limits ar0231at_maxim_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1088, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +struct crl_sensor_configuration ar0231at_crl_configuration = { + .powerup_regs_items = 0, + .powerup_regs = 0, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .power_items = 0, + .power_entities = 0, + + .pll_config_items = ARRAY_SIZE(ar0231at_pll_configurations), + .pll_configs = ar0231at_pll_configurations, + + .id_reg_items = ARRAY_SIZE(ar0231at_sensor_detect_regset), + .id_regs = ar0231at_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ar0231at_sensor_subdevs), + .subdevs = ar0231at_sensor_subdevs, + + .modes_items = ARRAY_SIZE(ar0231at_modes), + .modes = ar0231at_modes, + + .csi_fmts_items = ARRAY_SIZE(ar0231at_crl_csi_data_fmt), + .csi_fmts = ar0231at_crl_csi_data_fmt, + + .v4l2_ctrls_items = ARRAY_SIZE(ar0231at_v4l2_ctrls), + .v4l2_ctrl_bank = ar0231at_v4l2_ctrls, + + .streamon_regs_items = 0, + .streamon_regs = 0, + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .sensor_limits = &ar0231at_maxim_sensor_limits, + +}; + +#endif /* __CRLMODULE_AR0231AT_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ar023z_configuration.h b/drivers/media/i2c/crlmodule/crl_ar023z_configuration.h new file mode 100644 index 000000000000..2bd2b0f06b18 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ar023z_configuration.h @@ -0,0 +1,1903 @@ +/* + * Copyright (c) 2018 Intel Corporation. + * + * Author: Alexei Zavjalov + * Author: Kiran Kumar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __CRLMODULE_AR023Z_CONFIGURATION_H_ +#define __CRLMODULE_AR023Z_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define TC358778_I2C_ADDRESS 0x0E /* Toshiba TC358778 Parallel-MIPI Bridge */ +#define AR023Z_I2C_ADDRESS 0x48 /* OnSemi AP0202AT ISP */ + +static struct crl_register_write_rep ar023z_1920_1080[] = { + { 0x0004, CRL_REG_LEN_16BIT, 0x0004, TC358778_I2C_ADDRESS }, + { 0x0002, CRL_REG_LEN_16BIT, 0x0001, TC358778_I2C_ADDRESS }, + { 0x0002, CRL_REG_LEN_16BIT, 0x0000, TC358778_I2C_ADDRESS }, + { 0x0016, CRL_REG_LEN_16BIT, 0x50cd, TC358778_I2C_ADDRESS }, + { 0x0018, CRL_REG_LEN_16BIT, 0x0213, TC358778_I2C_ADDRESS }, + + { 0x0006, CRL_REG_LEN_16BIT, 0x0040, TC358778_I2C_ADDRESS }, + { 0x0008, CRL_REG_LEN_16BIT, 0x0060, TC358778_I2C_ADDRESS }, + { 0x0022, CRL_REG_LEN_16BIT, 0x0F00, TC358778_I2C_ADDRESS }, + + { 0x0140, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS }, + { 0x0144, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS }, + { 0x0148, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS }, + { 0x014C, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + { 0x0150, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + + { 0x0210, CRL_REG_LEN_32BIT, 0x21000000, TC358778_I2C_ADDRESS }, + { 0x0214, CRL_REG_LEN_32BIT, 0x00040000, TC358778_I2C_ADDRESS }, + { 0x0218, CRL_REG_LEN_32BIT, 0x17050000, TC358778_I2C_ADDRESS }, + { 0x021C, CRL_REG_LEN_32BIT, 0x00020000, TC358778_I2C_ADDRESS }, + { 0x0220, CRL_REG_LEN_32BIT, 0x0a070000, TC358778_I2C_ADDRESS }, + { 0x0224, CRL_REG_LEN_32BIT, 0x41880000, TC358778_I2C_ADDRESS }, + { 0x0228, CRL_REG_LEN_32BIT, 0x00080000, TC358778_I2C_ADDRESS }, + { 0x022C, CRL_REG_LEN_32BIT, 0x00020000, TC358778_I2C_ADDRESS }, + { 0x0234, CRL_REG_LEN_32BIT, 0x00070000, TC358778_I2C_ADDRESS }, + { 0x0238, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + { 0x0204, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + + { 0x0518, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS }, + { 0x0500, CRL_REG_LEN_32BIT, 0x80A3A300, TC358778_I2C_ADDRESS }, + + { 0x0004, CRL_REG_LEN_16BIT, 0x0245, TC358778_I2C_ADDRESS }, + + { 0x0040, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x098E, CRL_REG_LEN_16BIT, 0x7C00, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0054, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8706, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0982, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0x098A, CRL_REG_LEN_16BIT, 0x4750, AR023Z_I2C_ADDRESS }, + { 0xC750, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC752, CRL_REG_LEN_16BIT, 0x0CEA, AR023Z_I2C_ADDRESS }, + { 0xC754, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC756, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC758, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC75A, CRL_REG_LEN_16BIT, 0xA1A8, AR023Z_I2C_ADDRESS }, + { 0xC75C, CRL_REG_LEN_16BIT, 0x8DC4, AR023Z_I2C_ADDRESS }, + { 0xC75E, CRL_REG_LEN_16BIT, 0x0E0B, AR023Z_I2C_ADDRESS }, + { 0xC760, CRL_REG_LEN_16BIT, 0x10D1, AR023Z_I2C_ADDRESS }, + { 0xC762, CRL_REG_LEN_16BIT, 0xD804, AR023Z_I2C_ADDRESS }, + { 0xC764, CRL_REG_LEN_16BIT, 0xAD04, AR023Z_I2C_ADDRESS }, + { 0xC766, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC768, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC76A, CRL_REG_LEN_16BIT, 0x7C06, AR023Z_I2C_ADDRESS }, + { 0xC76C, CRL_REG_LEN_16BIT, 0x7840, AR023Z_I2C_ADDRESS }, + { 0xC76E, CRL_REG_LEN_16BIT, 0x0E0F, AR023Z_I2C_ADDRESS }, + { 0xC770, CRL_REG_LEN_16BIT, 0x1111, AR023Z_I2C_ADDRESS }, + { 0xC772, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC774, CRL_REG_LEN_16BIT, 0x0CEE, AR023Z_I2C_ADDRESS }, + { 0xC776, CRL_REG_LEN_16BIT, 0x0760, AR023Z_I2C_ADDRESS }, + { 0xC778, CRL_REG_LEN_16BIT, 0xAD04, AR023Z_I2C_ADDRESS }, + { 0xC77A, CRL_REG_LEN_16BIT, 0x0531, AR023Z_I2C_ADDRESS }, + { 0xC77C, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC77E, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC780, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC782, CRL_REG_LEN_16BIT, 0xF00A, AR023Z_I2C_ADDRESS }, + { 0xC784, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC786, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC788, CRL_REG_LEN_16BIT, 0xC79C, AR023Z_I2C_ADDRESS }, + { 0xC78A, CRL_REG_LEN_16BIT, 0x7835, AR023Z_I2C_ADDRESS }, + { 0xC78C, CRL_REG_LEN_16BIT, 0x8041, AR023Z_I2C_ADDRESS }, + { 0xC78E, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0xC790, CRL_REG_LEN_16BIT, 0xE102, AR023Z_I2C_ADDRESS }, + { 0xC792, CRL_REG_LEN_16BIT, 0xA040, AR023Z_I2C_ADDRESS }, + { 0xC794, CRL_REG_LEN_16BIT, 0x09F1, AR023Z_I2C_ADDRESS }, + { 0xC796, CRL_REG_LEN_16BIT, 0x8094, AR023Z_I2C_ADDRESS }, + { 0xC798, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC79A, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC79C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC79E, CRL_REG_LEN_16BIT, 0xC160, AR023Z_I2C_ADDRESS }, + { 0xC7A0, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC7A2, CRL_REG_LEN_16BIT, 0xC750, AR023Z_I2C_ADDRESS }, + { 0x098E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0030, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0140, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA103, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0054, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8702, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8701, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0054, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x01CC, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8706, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0982, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0x098A, CRL_REG_LEN_16BIT, 0x47A4, AR023Z_I2C_ADDRESS }, + { 0xC7A4, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC7A6, CRL_REG_LEN_16BIT, 0x0C96, AR023Z_I2C_ADDRESS }, + { 0xC7A8, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC7AA, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC7AC, CRL_REG_LEN_16BIT, 0xC1A1, AR023Z_I2C_ADDRESS }, + { 0xC7AE, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC7B0, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC7B2, CRL_REG_LEN_16BIT, 0x82A4, AR023Z_I2C_ADDRESS }, + { 0xC7B4, CRL_REG_LEN_16BIT, 0x8DC0, AR023Z_I2C_ADDRESS }, + { 0xC7B6, CRL_REG_LEN_16BIT, 0x0BEE, AR023Z_I2C_ADDRESS }, + { 0xC7B8, CRL_REG_LEN_16BIT, 0x03E0, AR023Z_I2C_ADDRESS }, + { 0xC7BA, CRL_REG_LEN_16BIT, 0x708B, AR023Z_I2C_ADDRESS }, + { 0xC7BC, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC7BE, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC7C0, CRL_REG_LEN_16BIT, 0x7E2A, AR023Z_I2C_ADDRESS }, + { 0xC7C2, CRL_REG_LEN_16BIT, 0x081B, AR023Z_I2C_ADDRESS }, + { 0xC7C4, CRL_REG_LEN_16BIT, 0x0051, AR023Z_I2C_ADDRESS }, + { 0xC7C6, CRL_REG_LEN_16BIT, 0xC020, AR023Z_I2C_ADDRESS }, + { 0xC7C8, CRL_REG_LEN_16BIT, 0xE080, AR023Z_I2C_ADDRESS }, + { 0xC7CA, CRL_REG_LEN_16BIT, 0x20CC, AR023Z_I2C_ADDRESS }, + { 0xC7CC, CRL_REG_LEN_16BIT, 0x8062, AR023Z_I2C_ADDRESS }, + { 0xC7CE, CRL_REG_LEN_16BIT, 0xF407, AR023Z_I2C_ADDRESS }, + { 0xC7D0, CRL_REG_LEN_16BIT, 0xD802, AR023Z_I2C_ADDRESS }, + { 0xC7D2, CRL_REG_LEN_16BIT, 0x7960, AR023Z_I2C_ADDRESS }, + { 0xC7D4, CRL_REG_LEN_16BIT, 0xAD00, AR023Z_I2C_ADDRESS }, + { 0xC7D6, CRL_REG_LEN_16BIT, 0xADC0, AR023Z_I2C_ADDRESS }, + { 0xC7D8, CRL_REG_LEN_16BIT, 0xF002, AR023Z_I2C_ADDRESS }, + { 0xC7DA, CRL_REG_LEN_16BIT, 0x7940, AR023Z_I2C_ADDRESS }, + { 0xC7DC, CRL_REG_LEN_16BIT, 0x04CD, AR023Z_I2C_ADDRESS }, + { 0xC7DE, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC7E0, CRL_REG_LEN_16BIT, 0xC0A1, AR023Z_I2C_ADDRESS }, + { 0xC7E2, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC7E4, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC7E6, CRL_REG_LEN_16BIT, 0x0C4E, AR023Z_I2C_ADDRESS }, + { 0xC7E8, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC7EA, CRL_REG_LEN_16BIT, 0x0CE6, AR023Z_I2C_ADDRESS }, + { 0xC7EC, CRL_REG_LEN_16BIT, 0x03C0, AR023Z_I2C_ADDRESS }, + { 0xC7EE, CRL_REG_LEN_16BIT, 0x701A, AR023Z_I2C_ADDRESS }, + { 0xC7F0, CRL_REG_LEN_16BIT, 0x0D0A, AR023Z_I2C_ADDRESS }, + { 0xC7F2, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC7F4, CRL_REG_LEN_16BIT, 0x218A, AR023Z_I2C_ADDRESS }, + { 0xC7F6, CRL_REG_LEN_16BIT, 0x0A0F, AR023Z_I2C_ADDRESS }, + { 0xC7F8, CRL_REG_LEN_16BIT, 0x7708, AR023Z_I2C_ADDRESS }, + { 0xC7FA, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC7FC, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC7FE, CRL_REG_LEN_16BIT, 0xA168, AR023Z_I2C_ADDRESS }, + { 0xC800, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC802, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC804, CRL_REG_LEN_16BIT, 0x8712, AR023Z_I2C_ADDRESS }, + { 0xC806, CRL_REG_LEN_16BIT, 0x7840, AR023Z_I2C_ADDRESS }, + { 0xC808, CRL_REG_LEN_16BIT, 0x1524, AR023Z_I2C_ADDRESS }, + { 0xC80A, CRL_REG_LEN_16BIT, 0x1080, AR023Z_I2C_ADDRESS }, + { 0xC80C, CRL_REG_LEN_16BIT, 0xE82D, AR023Z_I2C_ADDRESS }, + { 0xC80E, CRL_REG_LEN_16BIT, 0x76CF, AR023Z_I2C_ADDRESS }, + { 0xC810, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC812, CRL_REG_LEN_16BIT, 0xB530, AR023Z_I2C_ADDRESS }, + { 0xC814, CRL_REG_LEN_16BIT, 0x9623, AR023Z_I2C_ADDRESS }, + { 0xC816, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC818, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC81A, CRL_REG_LEN_16BIT, 0xB5B0, AR023Z_I2C_ADDRESS }, + { 0xC81C, CRL_REG_LEN_16BIT, 0x9516, AR023Z_I2C_ADDRESS }, + { 0xC81E, CRL_REG_LEN_16BIT, 0xDB7D, AR023Z_I2C_ADDRESS }, + { 0xC820, CRL_REG_LEN_16BIT, 0xBB0A, AR023Z_I2C_ADDRESS }, + { 0xC822, CRL_REG_LEN_16BIT, 0x782C, AR023Z_I2C_ADDRESS }, + { 0xC824, CRL_REG_LEN_16BIT, 0x2942, AR023Z_I2C_ADDRESS }, + { 0xC826, CRL_REG_LEN_16BIT, 0x77C0, AR023Z_I2C_ADDRESS }, + { 0xC828, CRL_REG_LEN_16BIT, 0x712F, AR023Z_I2C_ADDRESS }, + { 0xC82A, CRL_REG_LEN_16BIT, 0x0EFE, AR023Z_I2C_ADDRESS }, + { 0xC82C, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC82E, CRL_REG_LEN_16BIT, 0xDA00, AR023Z_I2C_ADDRESS }, + { 0xC830, CRL_REG_LEN_16BIT, 0x730A, AR023Z_I2C_ADDRESS }, + { 0xC832, CRL_REG_LEN_16BIT, 0x0E1A, AR023Z_I2C_ADDRESS }, + { 0xC834, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC836, CRL_REG_LEN_16BIT, 0xDA00, AR023Z_I2C_ADDRESS }, + { 0xC838, CRL_REG_LEN_16BIT, 0x72CF, AR023Z_I2C_ADDRESS }, + { 0xC83A, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC83C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC83E, CRL_REG_LEN_16BIT, 0x7150, AR023Z_I2C_ADDRESS }, + { 0xC840, CRL_REG_LEN_16BIT, 0x22CA, AR023Z_I2C_ADDRESS }, + { 0xC842, CRL_REG_LEN_16BIT, 0x0045, AR023Z_I2C_ADDRESS }, + { 0xC844, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC846, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC848, CRL_REG_LEN_16BIT, 0xADB4, AR023Z_I2C_ADDRESS }, + { 0xC84A, CRL_REG_LEN_16BIT, 0x9122, AR023Z_I2C_ADDRESS }, + { 0xC84C, CRL_REG_LEN_16BIT, 0x1EC0, AR023Z_I2C_ADDRESS }, + { 0xC84E, CRL_REG_LEN_16BIT, 0x1084, AR023Z_I2C_ADDRESS }, + { 0xC850, CRL_REG_LEN_16BIT, 0x854A, AR023Z_I2C_ADDRESS }, + { 0xC852, CRL_REG_LEN_16BIT, 0x7230, AR023Z_I2C_ADDRESS }, + { 0xC854, CRL_REG_LEN_16BIT, 0x21CA, AR023Z_I2C_ADDRESS }, + { 0xC856, CRL_REG_LEN_16BIT, 0x008D, AR023Z_I2C_ADDRESS }, + { 0xC858, CRL_REG_LEN_16BIT, 0xB907, AR023Z_I2C_ADDRESS }, + { 0xC85A, CRL_REG_LEN_16BIT, 0x61F8, AR023Z_I2C_ADDRESS }, + { 0xC85C, CRL_REG_LEN_16BIT, 0xB861, AR023Z_I2C_ADDRESS }, + { 0xC85E, CRL_REG_LEN_16BIT, 0x0C9E, AR023Z_I2C_ADDRESS }, + { 0xC860, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC862, CRL_REG_LEN_16BIT, 0x71E9, AR023Z_I2C_ADDRESS }, + { 0xC864, CRL_REG_LEN_16BIT, 0xB51F, AR023Z_I2C_ADDRESS }, + { 0xC866, CRL_REG_LEN_16BIT, 0x0435, AR023Z_I2C_ADDRESS }, + { 0xC868, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC86A, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC86C, CRL_REG_LEN_16BIT, 0x8850, AR023Z_I2C_ADDRESS }, + { 0xC86E, CRL_REG_LEN_16BIT, 0xD980, AR023Z_I2C_ADDRESS }, + { 0xC870, CRL_REG_LEN_16BIT, 0xEA08, AR023Z_I2C_ADDRESS }, + { 0xC872, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC874, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC876, CRL_REG_LEN_16BIT, 0xAD10, AR023Z_I2C_ADDRESS }, + { 0xC878, CRL_REG_LEN_16BIT, 0x21F4, AR023Z_I2C_ADDRESS }, + { 0xC87A, CRL_REG_LEN_16BIT, 0x0081, AR023Z_I2C_ADDRESS }, + { 0xC87C, CRL_REG_LEN_16BIT, 0xB907, AR023Z_I2C_ADDRESS }, + { 0xC87E, CRL_REG_LEN_16BIT, 0xB925, AR023Z_I2C_ADDRESS }, + { 0xC880, CRL_REG_LEN_16BIT, 0x8851, AR023Z_I2C_ADDRESS }, + { 0xC882, CRL_REG_LEN_16BIT, 0xEA09, AR023Z_I2C_ADDRESS }, + { 0xC884, CRL_REG_LEN_16BIT, 0x72CF, AR023Z_I2C_ADDRESS }, + { 0xC886, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC888, CRL_REG_LEN_16BIT, 0xACF4, AR023Z_I2C_ADDRESS }, + { 0xC88A, CRL_REG_LEN_16BIT, 0x9254, AR023Z_I2C_ADDRESS }, + { 0xC88C, CRL_REG_LEN_16BIT, 0x7A2C, AR023Z_I2C_ADDRESS }, + { 0xC88E, CRL_REG_LEN_16BIT, 0x2941, AR023Z_I2C_ADDRESS }, + { 0xC890, CRL_REG_LEN_16BIT, 0x7141, AR023Z_I2C_ADDRESS }, + { 0xC892, CRL_REG_LEN_16BIT, 0x9043, AR023Z_I2C_ADDRESS }, + { 0xC894, CRL_REG_LEN_16BIT, 0x7A2C, AR023Z_I2C_ADDRESS }, + { 0xC896, CRL_REG_LEN_16BIT, 0x9011, AR023Z_I2C_ADDRESS }, + { 0xC898, CRL_REG_LEN_16BIT, 0x2941, AR023Z_I2C_ADDRESS }, + { 0xC89A, CRL_REG_LEN_16BIT, 0x7141, AR023Z_I2C_ADDRESS }, + { 0xC89C, CRL_REG_LEN_16BIT, 0x782C, AR023Z_I2C_ADDRESS }, + { 0xC89E, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC8A0, CRL_REG_LEN_16BIT, 0x2941, AR023Z_I2C_ADDRESS }, + { 0xC8A2, CRL_REG_LEN_16BIT, 0x71C0, AR023Z_I2C_ADDRESS }, + { 0xC8A4, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC8A6, CRL_REG_LEN_16BIT, 0x0B92, AR023Z_I2C_ADDRESS }, + { 0xC8A8, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC8AA, CRL_REG_LEN_16BIT, 0x7508, AR023Z_I2C_ADDRESS }, + { 0xC8AC, CRL_REG_LEN_16BIT, 0xFFF0, AR023Z_I2C_ADDRESS }, + { 0xC8AE, CRL_REG_LEN_16BIT, 0xB807, AR023Z_I2C_ADDRESS }, + { 0xC8B0, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC8B2, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC8B4, CRL_REG_LEN_16BIT, 0xB5B0, AR023Z_I2C_ADDRESS }, + { 0xC8B6, CRL_REG_LEN_16BIT, 0x0C46, AR023Z_I2C_ADDRESS }, + { 0xC8B8, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC8BA, CRL_REG_LEN_16BIT, 0x913D, AR023Z_I2C_ADDRESS }, + { 0xC8BC, CRL_REG_LEN_16BIT, 0x7708, AR023Z_I2C_ADDRESS }, + { 0xC8BE, CRL_REG_LEN_16BIT, 0x9500, AR023Z_I2C_ADDRESS }, + { 0xC8C0, CRL_REG_LEN_16BIT, 0x9521, AR023Z_I2C_ADDRESS }, + { 0xC8C2, CRL_REG_LEN_16BIT, 0x0A16, AR023Z_I2C_ADDRESS }, + { 0xC8C4, CRL_REG_LEN_16BIT, 0x13E0, AR023Z_I2C_ADDRESS }, + { 0xC8C6, CRL_REG_LEN_16BIT, 0x9547, AR023Z_I2C_ADDRESS }, + { 0xC8C8, CRL_REG_LEN_16BIT, 0x7608, AR023Z_I2C_ADDRESS }, + { 0xC8CA, CRL_REG_LEN_16BIT, 0x70E9, AR023Z_I2C_ADDRESS }, + { 0xC8CC, CRL_REG_LEN_16BIT, 0x0A56, AR023Z_I2C_ADDRESS }, + { 0xC8CE, CRL_REG_LEN_16BIT, 0x10E0, AR023Z_I2C_ADDRESS }, + { 0xC8D0, CRL_REG_LEN_16BIT, 0xD908, AR023Z_I2C_ADDRESS }, + { 0xC8D2, CRL_REG_LEN_16BIT, 0x7508, AR023Z_I2C_ADDRESS }, + { 0xC8D4, CRL_REG_LEN_16BIT, 0x2582, AR023Z_I2C_ADDRESS }, + { 0xC8D6, CRL_REG_LEN_16BIT, 0x101C, AR023Z_I2C_ADDRESS }, + { 0xC8D8, CRL_REG_LEN_16BIT, 0x70C9, AR023Z_I2C_ADDRESS }, + { 0xC8DA, CRL_REG_LEN_16BIT, 0x0A4A, AR023Z_I2C_ADDRESS }, + { 0xC8DC, CRL_REG_LEN_16BIT, 0x10E0, AR023Z_I2C_ADDRESS }, + { 0xC8DE, CRL_REG_LEN_16BIT, 0xD908, AR023Z_I2C_ADDRESS }, + { 0xC8E0, CRL_REG_LEN_16BIT, 0x03C1, AR023Z_I2C_ADDRESS }, + { 0xC8E2, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC8E4, CRL_REG_LEN_16BIT, 0x60B8, AR023Z_I2C_ADDRESS }, + { 0xC8E6, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC8E8, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC8EA, CRL_REG_LEN_16BIT, 0x0B4E, AR023Z_I2C_ADDRESS }, + { 0xC8EC, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC8EE, CRL_REG_LEN_16BIT, 0x77CF, AR023Z_I2C_ADDRESS }, + { 0xC8F0, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC8F2, CRL_REG_LEN_16BIT, 0x8004, AR023Z_I2C_ADDRESS }, + { 0xC8F4, CRL_REG_LEN_16BIT, 0x0BC2, AR023Z_I2C_ADDRESS }, + { 0xC8F6, CRL_REG_LEN_16BIT, 0x03C0, AR023Z_I2C_ADDRESS }, + { 0xC8F8, CRL_REG_LEN_16BIT, 0x75CF, AR023Z_I2C_ADDRESS }, + { 0xC8FA, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC8FC, CRL_REG_LEN_16BIT, 0xAC00, AR023Z_I2C_ADDRESS }, + { 0xC8FE, CRL_REG_LEN_16BIT, 0x7608, AR023Z_I2C_ADDRESS }, + { 0xC900, CRL_REG_LEN_16BIT, 0x8F05, AR023Z_I2C_ADDRESS }, + { 0xC902, CRL_REG_LEN_16BIT, 0x9522, AR023Z_I2C_ADDRESS }, + { 0xC904, CRL_REG_LEN_16BIT, 0x7610, AR023Z_I2C_ADDRESS }, + { 0xC906, CRL_REG_LEN_16BIT, 0x21D1, AR023Z_I2C_ADDRESS }, + { 0xC908, CRL_REG_LEN_16BIT, 0x80A2, AR023Z_I2C_ADDRESS }, + { 0xC90A, CRL_REG_LEN_16BIT, 0xF213, AR023Z_I2C_ADDRESS }, + { 0xC90C, CRL_REG_LEN_16BIT, 0xE680, AR023Z_I2C_ADDRESS }, + { 0xC90E, CRL_REG_LEN_16BIT, 0x26CC, AR023Z_I2C_ADDRESS }, + { 0xC910, CRL_REG_LEN_16BIT, 0x9062, AR023Z_I2C_ADDRESS }, + { 0xC912, CRL_REG_LEN_16BIT, 0xF40F, AR023Z_I2C_ADDRESS }, + { 0xC914, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC916, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC918, CRL_REG_LEN_16BIT, 0xB960, AR023Z_I2C_ADDRESS }, + { 0xC91A, CRL_REG_LEN_16BIT, 0xFFE3, AR023Z_I2C_ADDRESS }, + { 0xC91C, CRL_REG_LEN_16BIT, 0xB504, AR023Z_I2C_ADDRESS }, + { 0xC91E, CRL_REG_LEN_16BIT, 0x08DE, AR023Z_I2C_ADDRESS }, + { 0xC920, CRL_REG_LEN_16BIT, 0x0220, AR023Z_I2C_ADDRESS }, + { 0xC922, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC924, CRL_REG_LEN_16BIT, 0xD801, AR023Z_I2C_ADDRESS }, + { 0xC926, CRL_REG_LEN_16BIT, 0xAD0E, AR023Z_I2C_ADDRESS }, + { 0xC928, CRL_REG_LEN_16BIT, 0xAFC5, AR023Z_I2C_ADDRESS }, + { 0xC92A, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC92C, CRL_REG_LEN_16BIT, 0xF005, AR023Z_I2C_ADDRESS }, + { 0xC92E, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC930, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC932, CRL_REG_LEN_16BIT, 0x0F7E, AR023Z_I2C_ADDRESS }, + { 0xC934, CRL_REG_LEN_16BIT, 0x7840, AR023Z_I2C_ADDRESS }, + { 0xC936, CRL_REG_LEN_16BIT, 0x036D, AR023Z_I2C_ADDRESS }, + { 0xC938, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC93A, CRL_REG_LEN_16BIT, 0x78E0, AR023Z_I2C_ADDRESS }, + { 0xC93C, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC93E, CRL_REG_LEN_16BIT, 0xF00A, AR023Z_I2C_ADDRESS }, + { 0xC940, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC942, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC944, CRL_REG_LEN_16BIT, 0xC958, AR023Z_I2C_ADDRESS }, + { 0xC946, CRL_REG_LEN_16BIT, 0x7835, AR023Z_I2C_ADDRESS }, + { 0xC948, CRL_REG_LEN_16BIT, 0x8041, AR023Z_I2C_ADDRESS }, + { 0xC94A, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0xC94C, CRL_REG_LEN_16BIT, 0xE102, AR023Z_I2C_ADDRESS }, + { 0xC94E, CRL_REG_LEN_16BIT, 0xA040, AR023Z_I2C_ADDRESS }, + { 0xC950, CRL_REG_LEN_16BIT, 0x09F1, AR023Z_I2C_ADDRESS }, + { 0xC952, CRL_REG_LEN_16BIT, 0x8194, AR023Z_I2C_ADDRESS }, + { 0xC954, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC956, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC958, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC95A, CRL_REG_LEN_16BIT, 0xC164, AR023Z_I2C_ADDRESS }, + { 0xC95C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC95E, CRL_REG_LEN_16BIT, 0xC7A4, AR023Z_I2C_ADDRESS }, + { 0xC960, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC962, CRL_REG_LEN_16BIT, 0xC198, AR023Z_I2C_ADDRESS }, + { 0xC964, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC966, CRL_REG_LEN_16BIT, 0xC7E4, AR023Z_I2C_ADDRESS }, + { 0xC968, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC96A, CRL_REG_LEN_16BIT, 0xBB6C, AR023Z_I2C_ADDRESS }, + { 0xC96C, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC96E, CRL_REG_LEN_16BIT, 0xC8E8, AR023Z_I2C_ADDRESS }, + { 0x098E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x01EC, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0240, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA103, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x01CC, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8702, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8701, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0220, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x005C, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8706, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0982, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0x098A, CRL_REG_LEN_16BIT, 0x4970, AR023Z_I2C_ADDRESS }, + { 0xC970, CRL_REG_LEN_16BIT, 0xC0F1, AR023Z_I2C_ADDRESS }, + { 0xC972, CRL_REG_LEN_16BIT, 0x0ACA, AR023Z_I2C_ADDRESS }, + { 0xC974, CRL_REG_LEN_16BIT, 0x1340, AR023Z_I2C_ADDRESS }, + { 0xC976, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC978, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC97A, CRL_REG_LEN_16BIT, 0x2896, AR023Z_I2C_ADDRESS }, + { 0xC97C, CRL_REG_LEN_16BIT, 0x7940, AR023Z_I2C_ADDRESS }, + { 0xC97E, CRL_REG_LEN_16BIT, 0x250A, AR023Z_I2C_ADDRESS }, + { 0xC980, CRL_REG_LEN_16BIT, 0x9000, AR023Z_I2C_ADDRESS }, + { 0xC982, CRL_REG_LEN_16BIT, 0x76CF, AR023Z_I2C_ADDRESS }, + { 0xC984, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC986, CRL_REG_LEN_16BIT, 0xB51C, AR023Z_I2C_ADDRESS }, + { 0xC988, CRL_REG_LEN_16BIT, 0xF407, AR023Z_I2C_ADDRESS }, + { 0xC98A, CRL_REG_LEN_16BIT, 0x0D4A, AR023Z_I2C_ADDRESS }, + { 0xC98C, CRL_REG_LEN_16BIT, 0x0B20, AR023Z_I2C_ADDRESS }, + { 0xC98E, CRL_REG_LEN_16BIT, 0x8E12, AR023Z_I2C_ADDRESS }, + { 0xC990, CRL_REG_LEN_16BIT, 0x0C6A, AR023Z_I2C_ADDRESS }, + { 0xC992, CRL_REG_LEN_16BIT, 0x0AE0, AR023Z_I2C_ADDRESS }, + { 0xC994, CRL_REG_LEN_16BIT, 0xD801, AR023Z_I2C_ADDRESS }, + { 0xC996, CRL_REG_LEN_16BIT, 0x0315, AR023Z_I2C_ADDRESS }, + { 0xC998, CRL_REG_LEN_16BIT, 0x1360, AR023Z_I2C_ADDRESS }, + { 0xC99A, CRL_REG_LEN_16BIT, 0x70A9, AR023Z_I2C_ADDRESS }, + { 0xC99C, CRL_REG_LEN_16BIT, 0xD900, AR023Z_I2C_ADDRESS }, + { 0xC99E, CRL_REG_LEN_16BIT, 0xF00A, AR023Z_I2C_ADDRESS }, + { 0xC9A0, CRL_REG_LEN_16BIT, 0x70CF, AR023Z_I2C_ADDRESS }, + { 0xC9A2, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9A4, CRL_REG_LEN_16BIT, 0xC9C4, AR023Z_I2C_ADDRESS }, + { 0xC9A6, CRL_REG_LEN_16BIT, 0x7835, AR023Z_I2C_ADDRESS }, + { 0xC9A8, CRL_REG_LEN_16BIT, 0x8041, AR023Z_I2C_ADDRESS }, + { 0xC9AA, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0xC9AC, CRL_REG_LEN_16BIT, 0xE102, AR023Z_I2C_ADDRESS }, + { 0xC9AE, CRL_REG_LEN_16BIT, 0xA040, AR023Z_I2C_ADDRESS }, + { 0xC9B0, CRL_REG_LEN_16BIT, 0x09F1, AR023Z_I2C_ADDRESS }, + { 0xC9B2, CRL_REG_LEN_16BIT, 0x8094, AR023Z_I2C_ADDRESS }, + { 0xC9B4, CRL_REG_LEN_16BIT, 0x71CF, AR023Z_I2C_ADDRESS }, + { 0xC9B6, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9B8, CRL_REG_LEN_16BIT, 0xB51C, AR023Z_I2C_ADDRESS }, + { 0xC9BA, CRL_REG_LEN_16BIT, 0xD808, AR023Z_I2C_ADDRESS }, + { 0xC9BC, CRL_REG_LEN_16BIT, 0xA912, AR023Z_I2C_ADDRESS }, + { 0xC9BE, CRL_REG_LEN_16BIT, 0x7FE0, AR023Z_I2C_ADDRESS }, + { 0xC9C0, CRL_REG_LEN_16BIT, 0xD800, AR023Z_I2C_ADDRESS }, + { 0xC9C2, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9C4, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9C6, CRL_REG_LEN_16BIT, 0xBFE4, AR023Z_I2C_ADDRESS }, + { 0xC9C8, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xC9CA, CRL_REG_LEN_16BIT, 0xC970, AR023Z_I2C_ADDRESS }, + { 0x098E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x024C, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0340, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA103, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x005C, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8702, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8701, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xCC02, CRL_REG_LEN_16BIT, 0x0493, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3088, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0280, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5872, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x9B4A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x3143, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x428E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x032A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x787B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3DFF, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x3DFF, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3DEA, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2A04, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C10, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A05, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2A15, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x352A, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x053D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x1045, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C2A, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x042A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x143D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xFF3D, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0xFF3D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0xEA2A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C62, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A28, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x362A, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x083D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x647A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C04, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x442C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4B8F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0043, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x0C2D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x6343, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C8E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x032A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xFC5C, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1D57, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x5449, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5F53, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0500, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C53, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x074D, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2BF8, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1016, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4C08, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5556, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0CB8, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2B98, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4E11, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2904, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2984, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2994, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x6000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C5C, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x195C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x1B45, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4845, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x0845, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8829, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0xB600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C8E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x012A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xF83E, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x022A, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0xFA3F, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x095C, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C29, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xB23F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x023E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x135C, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x133F, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1100, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0B5F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2B90, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AF2, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2B80, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x3E04, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C06, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6029, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xA229, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xA35F, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4D19, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C83, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x45A8, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E07, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AFB, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3E29, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4588, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2100, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x082A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xFA5D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2992, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8810, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2B04, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C16, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x858D, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x484D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4E2B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x804C, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x0B60, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C28, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2AF2, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3F0F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2982, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2983, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2943, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C15, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5F4D, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x192A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xFA45, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x588E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x002A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x9800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3F, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0612, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x444A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0443, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1605, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C43, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x165A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x0643, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1607, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C03, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A9C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x4578, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x7B3F, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x072A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x9D3E, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5825, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E06, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C44, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4B03, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x432D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4643, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x16A3, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C0D, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2944, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8810, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2B04, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x530D, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8B16, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8500, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C44, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8E03, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2AFC, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x5C1D, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8D60, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5754, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x4900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C5F, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5305, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x5307, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4D2B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0xF810, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x164C, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C55, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x562B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0xB82B, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x984E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1129, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x0429, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C29, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9460, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x5C19, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x5C1B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4548, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4508, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x4500, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C88, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x29B6, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AF8, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3E02, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x3F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C09, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x5C1B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x29B2, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x3F0C, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x3E02, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x3E13, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C13, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x3F11, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E0B, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x5F2B, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x902A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0xF22B, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x043F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0660, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x29A2, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x29A3, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x5F4D, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C2A, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xFA29, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8345, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xA83E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x072A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0xFB3E, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C45, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8824, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3E08, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x5D29, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x9288, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C2B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x048B, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x1686, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x8D48, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4D4E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2B80, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x4C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C0B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x603F, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x282A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0xF23F, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x0F29, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8229, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C29, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x435C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x155F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4D1C, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x2AFA, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4558, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C00, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3F06, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4A73, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x9D0A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C43, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x168E, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x032A, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x9C45, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x783F, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x072A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x9D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C3E, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x1245, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x583F, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x048E, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x012A, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x988E, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C91, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x769C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x779C, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x4644, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x1616, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x907A, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C44, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4B4A, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x0043, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1663, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x0843, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C50, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x6543, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x1666, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x4316, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8E03, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x2A00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C9C, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4578, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x3F07, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2A9D, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x5D0C, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x2944, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x8800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C10, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2B04, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x530D, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x8B16, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x863E, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x1F45, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x5800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0C28, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x3E06, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8D60, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3086, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0444, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4B2C, AR023Z_I2C_ADDRESS }, + { 0xFC06, CRL_REG_LEN_16BIT, 0x2C00, AR023Z_I2C_ADDRESS }, + { 0xFC08, CRL_REG_LEN_16BIT, 0x2A98, AR023Z_I2C_ADDRESS }, + { 0xFC0A, CRL_REG_LEN_16BIT, 0x8E00, AR023Z_I2C_ADDRESS }, + { 0xFC0C, CRL_REG_LEN_16BIT, 0x8D60, AR023Z_I2C_ADDRESS }, + { 0xFC0E, CRL_REG_LEN_16BIT, 0x1200, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D02, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8E01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xCCCC, CRL_REG_LEN_08BIT, 0x69, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED6, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0234, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xB300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x2436, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0E00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x320C, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0201, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x320E, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0203, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3210, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0205, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3204, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x020B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6D00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30FE, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED8, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x027B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9900, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EDC, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x029B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0xA800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EDA, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x029B, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9B00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3092, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EEC, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x021C, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30BA, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0277, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x9C00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EF6, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x02A7, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3044, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0204, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x1000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED0, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x02FF, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x4400, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3ED4, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0203, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x1F00, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30FE, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EE2, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0288, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6600, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EE4, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0266, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3EE6, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0222, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x6300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30E0, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0242, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x30F0, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0212, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x8300, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D02, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xCAC8, CRL_REG_LEN_08BIT, 0x41, AR023Z_I2C_ADDRESS }, + { 0xCACA, CRL_REG_LEN_16BIT, 0x022F, AR023Z_I2C_ADDRESS }, + { 0xCACE, CRL_REG_LEN_16BIT, 0x010E, AR023Z_I2C_ADDRESS }, + { 0xCAD0, CRL_REG_LEN_16BIT, 0x0033, AR023Z_I2C_ADDRESS }, + { 0xCAD4, CRL_REG_LEN_16BIT, 0x001F, AR023Z_I2C_ADDRESS }, + { 0xCAD4, CRL_REG_LEN_16BIT, 0x001F, AR023Z_I2C_ADDRESS }, + { 0xCAD8, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCADA, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC806, CRL_REG_LEN_16BIT, 0x000C, AR023Z_I2C_ADDRESS }, + { 0xC80A, CRL_REG_LEN_16BIT, 0x078B, AR023Z_I2C_ADDRESS }, + { 0xC804, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC808, CRL_REG_LEN_16BIT, 0x0437, AR023Z_I2C_ADDRESS }, + { 0xC838, CRL_REG_LEN_16BIT, 0x0302, AR023Z_I2C_ADDRESS }, + { 0xC83A, CRL_REG_LEN_16BIT, 0x211B, AR023Z_I2C_ADDRESS }, + { 0xC840, CRL_REG_LEN_16BIT, 0x010C, AR023Z_I2C_ADDRESS }, + { 0xC844, CRL_REG_LEN_16BIT, 0x0802, AR023Z_I2C_ADDRESS }, + { 0xC844, CRL_REG_LEN_16BIT, 0x0801, AR023Z_I2C_ADDRESS }, + { 0xC80C, CRL_REG_LEN_16BIT, 0x04BA, AR023Z_I2C_ADDRESS }, + { 0xC80E, CRL_REG_LEN_16BIT, 0x3674, AR023Z_I2C_ADDRESS }, + { 0xC814, CRL_REG_LEN_16BIT, 0x049E, AR023Z_I2C_ADDRESS }, + { 0xC816, CRL_REG_LEN_16BIT, 0x08BC, AR023Z_I2C_ADDRESS }, + { 0xC846, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC890, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xC8A0, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC8A2, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC8A4, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xC8A6, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xC9F8, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9FA, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9FC, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xC9FE, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xCA00, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCA02, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCA04, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xCA06, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xCAE4, CRL_REG_LEN_16BIT, 0x0780, AR023Z_I2C_ADDRESS }, + { 0xCAE6, CRL_REG_LEN_16BIT, 0x0438, AR023Z_I2C_ADDRESS }, + { 0xCAE8, CRL_REG_LEN_16BIT, 0x0011, AR023Z_I2C_ADDRESS }, + { 0xCAE8, CRL_REG_LEN_16BIT, 0x0011, AR023Z_I2C_ADDRESS }, + { 0xCAEA, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xCAEB, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xCAF4, CRL_REG_LEN_16BIT, 0x249F, AR023Z_I2C_ADDRESS }, + { 0xCAF8, CRL_REG_LEN_08BIT, 0x0E, AR023Z_I2C_ADDRESS }, + { 0xCAFC, CRL_REG_LEN_16BIT, 0x4201, AR023Z_I2C_ADDRESS }, + { 0xCAFE, CRL_REG_LEN_16BIT, 0x08BC, AR023Z_I2C_ADDRESS }, + { 0xCB00, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0x8C16, CRL_REG_LEN_08BIT, 0x19, AR023Z_I2C_ADDRESS }, + { 0xCAC4, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x3600, CRL_REG_LEN_16BIT, 0x00F0, AR023Z_I2C_ADDRESS }, + { 0x3602, CRL_REG_LEN_16BIT, 0xD789, AR023Z_I2C_ADDRESS }, + { 0x3604, CRL_REG_LEN_16BIT, 0x4D70, AR023Z_I2C_ADDRESS }, + { 0x3606, CRL_REG_LEN_16BIT, 0x6A8D, AR023Z_I2C_ADDRESS }, + { 0x3608, CRL_REG_LEN_16BIT, 0x7CEE, AR023Z_I2C_ADDRESS }, + { 0x360A, CRL_REG_LEN_16BIT, 0x00D0, AR023Z_I2C_ADDRESS }, + { 0x360C, CRL_REG_LEN_16BIT, 0x8F0B, AR023Z_I2C_ADDRESS }, + { 0x360E, CRL_REG_LEN_16BIT, 0x58B0, AR023Z_I2C_ADDRESS }, + { 0x3610, CRL_REG_LEN_16BIT, 0x2E2D, AR023Z_I2C_ADDRESS }, + { 0x3612, CRL_REG_LEN_16BIT, 0x0BCF, AR023Z_I2C_ADDRESS }, + { 0x3614, CRL_REG_LEN_16BIT, 0x00B0, AR023Z_I2C_ADDRESS }, + { 0x3616, CRL_REG_LEN_16BIT, 0xC149, AR023Z_I2C_ADDRESS }, + { 0x3618, CRL_REG_LEN_16BIT, 0x4950, AR023Z_I2C_ADDRESS }, + { 0x361A, CRL_REG_LEN_16BIT, 0x024E, AR023Z_I2C_ADDRESS }, + { 0x361C, CRL_REG_LEN_16BIT, 0x0B4E, AR023Z_I2C_ADDRESS }, + { 0x361E, CRL_REG_LEN_16BIT, 0x00D0, AR023Z_I2C_ADDRESS }, + { 0x3620, CRL_REG_LEN_16BIT, 0xD2E9, AR023Z_I2C_ADDRESS }, + { 0x3622, CRL_REG_LEN_16BIT, 0x4D10, AR023Z_I2C_ADDRESS }, + { 0x3624, CRL_REG_LEN_16BIT, 0x67ED, AR023Z_I2C_ADDRESS }, + { 0x3626, CRL_REG_LEN_16BIT, 0x1ACF, AR023Z_I2C_ADDRESS }, + { 0x3628, CRL_REG_LEN_16BIT, 0x406B, AR023Z_I2C_ADDRESS }, + { 0x362A, CRL_REG_LEN_16BIT, 0x1FC9, AR023Z_I2C_ADDRESS }, + { 0x362C, CRL_REG_LEN_16BIT, 0x6750, AR023Z_I2C_ADDRESS }, + { 0x362E, CRL_REG_LEN_16BIT, 0x4E0F, AR023Z_I2C_ADDRESS }, + { 0x3630, CRL_REG_LEN_16BIT, 0xBCF3, AR023Z_I2C_ADDRESS }, + { 0x3632, CRL_REG_LEN_16BIT, 0x138C, AR023Z_I2C_ADDRESS }, + { 0x3634, CRL_REG_LEN_16BIT, 0x366A, AR023Z_I2C_ADDRESS }, + { 0x3636, CRL_REG_LEN_16BIT, 0x6390, AR023Z_I2C_ADDRESS }, + { 0x3638, CRL_REG_LEN_16BIT, 0x2E2F, AR023Z_I2C_ADDRESS }, + { 0x363A, CRL_REG_LEN_16BIT, 0xB9D3, AR023Z_I2C_ADDRESS }, + { 0x363C, CRL_REG_LEN_16BIT, 0x2B4A, AR023Z_I2C_ADDRESS }, + { 0x363E, CRL_REG_LEN_16BIT, 0x008B, AR023Z_I2C_ADDRESS }, + { 0x3640, CRL_REG_LEN_16BIT, 0x6B30, AR023Z_I2C_ADDRESS }, + { 0x3642, CRL_REG_LEN_16BIT, 0x710F, AR023Z_I2C_ADDRESS }, + { 0x3644, CRL_REG_LEN_16BIT, 0xC413, AR023Z_I2C_ADDRESS }, + { 0x3646, CRL_REG_LEN_16BIT, 0x2A4B, AR023Z_I2C_ADDRESS }, + { 0x3648, CRL_REG_LEN_16BIT, 0x080A, AR023Z_I2C_ADDRESS }, + { 0x364A, CRL_REG_LEN_16BIT, 0x6BD0, AR023Z_I2C_ADDRESS }, + { 0x364C, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0x364E, CRL_REG_LEN_16BIT, 0xC4D3, AR023Z_I2C_ADDRESS }, + { 0x3650, CRL_REG_LEN_16BIT, 0x6F90, AR023Z_I2C_ADDRESS }, + { 0x3652, CRL_REG_LEN_16BIT, 0x5A2F, AR023Z_I2C_ADDRESS }, + { 0x3654, CRL_REG_LEN_16BIT, 0xE631, AR023Z_I2C_ADDRESS }, + { 0x3656, CRL_REG_LEN_16BIT, 0x8812, AR023Z_I2C_ADDRESS }, + { 0x3658, CRL_REG_LEN_16BIT, 0x2155, AR023Z_I2C_ADDRESS }, + { 0x365A, CRL_REG_LEN_16BIT, 0x6A30, AR023Z_I2C_ADDRESS }, + { 0x365C, CRL_REG_LEN_16BIT, 0x7FCF, AR023Z_I2C_ADDRESS }, + { 0x365E, CRL_REG_LEN_16BIT, 0xE291, AR023Z_I2C_ADDRESS }, + { 0x3660, CRL_REG_LEN_16BIT, 0x9C92, AR023Z_I2C_ADDRESS }, + { 0x3662, CRL_REG_LEN_16BIT, 0x2C75, AR023Z_I2C_ADDRESS }, + { 0x3664, CRL_REG_LEN_16BIT, 0x5F90, AR023Z_I2C_ADDRESS }, + { 0x3666, CRL_REG_LEN_16BIT, 0x618F, AR023Z_I2C_ADDRESS }, + { 0x3668, CRL_REG_LEN_16BIT, 0xED91, AR023Z_I2C_ADDRESS }, + { 0x366A, CRL_REG_LEN_16BIT, 0x9FB2, AR023Z_I2C_ADDRESS }, + { 0x366C, CRL_REG_LEN_16BIT, 0x1915, AR023Z_I2C_ADDRESS }, + { 0x366E, CRL_REG_LEN_16BIT, 0x6E90, AR023Z_I2C_ADDRESS }, + { 0x3670, CRL_REG_LEN_16BIT, 0x5C0F, AR023Z_I2C_ADDRESS }, + { 0x3672, CRL_REG_LEN_16BIT, 0xE111, AR023Z_I2C_ADDRESS }, + { 0x3674, CRL_REG_LEN_16BIT, 0x9352, AR023Z_I2C_ADDRESS }, + { 0x3676, CRL_REG_LEN_16BIT, 0x2135, AR023Z_I2C_ADDRESS }, + { 0x3678, CRL_REG_LEN_16BIT, 0x7230, AR023Z_I2C_ADDRESS }, + { 0x367A, CRL_REG_LEN_16BIT, 0x2D92, AR023Z_I2C_ADDRESS }, + { 0x367C, CRL_REG_LEN_16BIT, 0xEEB5, AR023Z_I2C_ADDRESS }, + { 0x367E, CRL_REG_LEN_16BIT, 0x8495, AR023Z_I2C_ADDRESS }, + { 0x3680, CRL_REG_LEN_16BIT, 0x3C38, AR023Z_I2C_ADDRESS }, + { 0x3682, CRL_REG_LEN_16BIT, 0x7B50, AR023Z_I2C_ADDRESS }, + { 0x3684, CRL_REG_LEN_16BIT, 0x2332, AR023Z_I2C_ADDRESS }, + { 0x3686, CRL_REG_LEN_16BIT, 0xED55, AR023Z_I2C_ADDRESS }, + { 0x3688, CRL_REG_LEN_16BIT, 0x8355, AR023Z_I2C_ADDRESS }, + { 0x368A, CRL_REG_LEN_16BIT, 0x3978, AR023Z_I2C_ADDRESS }, + { 0x368C, CRL_REG_LEN_16BIT, 0x74F0, AR023Z_I2C_ADDRESS }, + { 0x368E, CRL_REG_LEN_16BIT, 0x4032, AR023Z_I2C_ADDRESS }, + { 0x3690, CRL_REG_LEN_16BIT, 0xF9B5, AR023Z_I2C_ADDRESS }, + { 0x3692, CRL_REG_LEN_16BIT, 0x8D75, AR023Z_I2C_ADDRESS }, + { 0x3694, CRL_REG_LEN_16BIT, 0x4338, AR023Z_I2C_ADDRESS }, + { 0x3696, CRL_REG_LEN_16BIT, 0x7550, AR023Z_I2C_ADDRESS }, + { 0x3698, CRL_REG_LEN_16BIT, 0x2CB2, AR023Z_I2C_ADDRESS }, + { 0x369A, CRL_REG_LEN_16BIT, 0xF135, AR023Z_I2C_ADDRESS }, + { 0x369C, CRL_REG_LEN_16BIT, 0x80F5, AR023Z_I2C_ADDRESS }, + { 0x369E, CRL_REG_LEN_16BIT, 0x3B98, AR023Z_I2C_ADDRESS }, + { 0x36A0, CRL_REG_LEN_16BIT, 0x90F2, AR023Z_I2C_ADDRESS }, + { 0x36A2, CRL_REG_LEN_16BIT, 0xD4D2, AR023Z_I2C_ADDRESS }, + { 0x36A4, CRL_REG_LEN_16BIT, 0x35B7, AR023Z_I2C_ADDRESS }, + { 0x36A6, CRL_REG_LEN_16BIT, 0x1A75, AR023Z_I2C_ADDRESS }, + { 0x36A8, CRL_REG_LEN_16BIT, 0x9B5A, AR023Z_I2C_ADDRESS }, + { 0x36AA, CRL_REG_LEN_16BIT, 0xFF71, AR023Z_I2C_ADDRESS }, + { 0x36AC, CRL_REG_LEN_16BIT, 0xC832, AR023Z_I2C_ADDRESS }, + { 0x36AE, CRL_REG_LEN_16BIT, 0x3277, AR023Z_I2C_ADDRESS }, + { 0x36B0, CRL_REG_LEN_16BIT, 0x16F5, AR023Z_I2C_ADDRESS }, + { 0x36B2, CRL_REG_LEN_16BIT, 0x97BA, AR023Z_I2C_ADDRESS }, + { 0x36B4, CRL_REG_LEN_16BIT, 0x95B2, AR023Z_I2C_ADDRESS }, + { 0x36B6, CRL_REG_LEN_16BIT, 0x9373, AR023Z_I2C_ADDRESS }, + { 0x36B8, CRL_REG_LEN_16BIT, 0x3C77, AR023Z_I2C_ADDRESS }, + { 0x36BA, CRL_REG_LEN_16BIT, 0x6115, AR023Z_I2C_ADDRESS }, + { 0x36BC, CRL_REG_LEN_16BIT, 0xA0BA, AR023Z_I2C_ADDRESS }, + { 0x36BE, CRL_REG_LEN_16BIT, 0x95B2, AR023Z_I2C_ADDRESS }, + { 0x36C0, CRL_REG_LEN_16BIT, 0xC492, AR023Z_I2C_ADDRESS }, + { 0x36C2, CRL_REG_LEN_16BIT, 0x3517, AR023Z_I2C_ADDRESS }, + { 0x36C4, CRL_REG_LEN_16BIT, 0x15B5, AR023Z_I2C_ADDRESS }, + { 0x36C6, CRL_REG_LEN_16BIT, 0x9A9A, AR023Z_I2C_ADDRESS }, + { 0x36C8, CRL_REG_LEN_16BIT, 0x018A, AR023Z_I2C_ADDRESS }, + { 0x36CA, CRL_REG_LEN_16BIT, 0x03BE, AR023Z_I2C_ADDRESS }, + { 0xCAC4, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC91E, CRL_REG_LEN_16BIT, 0x0A8C, AR023Z_I2C_ADDRESS }, + { 0xC920, CRL_REG_LEN_16BIT, 0x0FA0, AR023Z_I2C_ADDRESS }, + { 0xC922, CRL_REG_LEN_16BIT, 0x1964, AR023Z_I2C_ADDRESS }, + { 0xC924, CRL_REG_LEN_16BIT, 0x09C4, AR023Z_I2C_ADDRESS }, + { 0xC926, CRL_REG_LEN_16BIT, 0x1964, AR023Z_I2C_ADDRESS }, + { 0xC912, CRL_REG_LEN_16BIT, 0x005F, AR023Z_I2C_ADDRESS }, + { 0xC914, CRL_REG_LEN_16BIT, 0x016D, AR023Z_I2C_ADDRESS }, + { 0xC916, CRL_REG_LEN_16BIT, 0x00AF, AR023Z_I2C_ADDRESS }, + { 0xC918, CRL_REG_LEN_16BIT, 0x0148, AR023Z_I2C_ADDRESS }, + { 0xC91A, CRL_REG_LEN_16BIT, 0x0096, AR023Z_I2C_ADDRESS }, + { 0xC91C, CRL_REG_LEN_16BIT, 0x00B4, AR023Z_I2C_ADDRESS }, + { 0xC982, CRL_REG_LEN_08BIT, 0x82, AR023Z_I2C_ADDRESS }, + { 0xC983, CRL_REG_LEN_08BIT, 0x80, AR023Z_I2C_ADDRESS }, + { 0xC984, CRL_REG_LEN_08BIT, 0x86, AR023Z_I2C_ADDRESS }, + { 0xC985, CRL_REG_LEN_08BIT, 0x84, AR023Z_I2C_ADDRESS }, + { 0xC986, CRL_REG_LEN_08BIT, 0x82, AR023Z_I2C_ADDRESS }, + { 0xC987, CRL_REG_LEN_08BIT, 0x80, AR023Z_I2C_ADDRESS }, + { 0xC980, CRL_REG_LEN_16BIT, 0x1450, AR023Z_I2C_ADDRESS }, + { 0xC8DC, CRL_REG_LEN_16BIT, 0x013E, AR023Z_I2C_ADDRESS }, + { 0xC8DE, CRL_REG_LEN_16BIT, 0xFFDB, AR023Z_I2C_ADDRESS }, + { 0xC8E0, CRL_REG_LEN_16BIT, 0xFFE7, AR023Z_I2C_ADDRESS }, + { 0xC8E2, CRL_REG_LEN_16BIT, 0xFF75, AR023Z_I2C_ADDRESS }, + { 0xC8E4, CRL_REG_LEN_16BIT, 0x01B8, AR023Z_I2C_ADDRESS }, + { 0xC8E6, CRL_REG_LEN_16BIT, 0xFFD2, AR023Z_I2C_ADDRESS }, + { 0xC8E8, CRL_REG_LEN_16BIT, 0xFF52, AR023Z_I2C_ADDRESS }, + { 0xC8EA, CRL_REG_LEN_16BIT, 0xFF1A, AR023Z_I2C_ADDRESS }, + { 0xC8EC, CRL_REG_LEN_16BIT, 0x0295, AR023Z_I2C_ADDRESS }, + { 0xC8EE, CRL_REG_LEN_16BIT, 0x01B0, AR023Z_I2C_ADDRESS }, + { 0xC8F0, CRL_REG_LEN_16BIT, 0xFF40, AR023Z_I2C_ADDRESS }, + { 0xC8F2, CRL_REG_LEN_16BIT, 0x0010, AR023Z_I2C_ADDRESS }, + { 0xC8F4, CRL_REG_LEN_16BIT, 0xFF87, AR023Z_I2C_ADDRESS }, + { 0xC8F6, CRL_REG_LEN_16BIT, 0x01A2, AR023Z_I2C_ADDRESS }, + { 0xC8F8, CRL_REG_LEN_16BIT, 0xFFD7, AR023Z_I2C_ADDRESS }, + { 0xC8FA, CRL_REG_LEN_16BIT, 0xFFD3, AR023Z_I2C_ADDRESS }, + { 0xC8FC, CRL_REG_LEN_16BIT, 0xFF63, AR023Z_I2C_ADDRESS }, + { 0xC8FE, CRL_REG_LEN_16BIT, 0x01CB, AR023Z_I2C_ADDRESS }, + { 0xC900, CRL_REG_LEN_16BIT, 0x0154, AR023Z_I2C_ADDRESS }, + { 0xC902, CRL_REG_LEN_16BIT, 0xFFCD, AR023Z_I2C_ADDRESS }, + { 0xC904, CRL_REG_LEN_16BIT, 0xFFDE, AR023Z_I2C_ADDRESS }, + { 0xC906, CRL_REG_LEN_16BIT, 0xFFB1, AR023Z_I2C_ADDRESS }, + { 0xC908, CRL_REG_LEN_16BIT, 0x013B, AR023Z_I2C_ADDRESS }, + { 0xC90A, CRL_REG_LEN_16BIT, 0xFFEC, AR023Z_I2C_ADDRESS }, + { 0xC90C, CRL_REG_LEN_16BIT, 0xFFD9, AR023Z_I2C_ADDRESS }, + { 0xC90E, CRL_REG_LEN_16BIT, 0xFF9C, AR023Z_I2C_ADDRESS }, + { 0xC910, CRL_REG_LEN_16BIT, 0x018B, AR023Z_I2C_ADDRESS }, + { 0xC97D, CRL_REG_LEN_08BIT, 0x10, AR023Z_I2C_ADDRESS }, + { 0xC92A, CRL_REG_LEN_16BIT, 0x0020, AR023Z_I2C_ADDRESS }, + { 0xC92C, CRL_REG_LEN_16BIT, 0x0018, AR023Z_I2C_ADDRESS }, + { 0xC92E, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC930, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC932, CRL_REG_LEN_16BIT, 0x0005, AR023Z_I2C_ADDRESS }, + { 0xC934, CRL_REG_LEN_16BIT, 0xFFE0, AR023Z_I2C_ADDRESS }, + { 0xC936, CRL_REG_LEN_08BIT, 0x33, AR023Z_I2C_ADDRESS }, + { 0xC937, CRL_REG_LEN_08BIT, 0x26, AR023Z_I2C_ADDRESS }, + { 0xC938, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC93A, CRL_REG_LEN_16BIT, 0x0047, AR023Z_I2C_ADDRESS }, + { 0xC93C, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC93E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC93E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC940, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC942, CRL_REG_LEN_16BIT, 0x0022, AR023Z_I2C_ADDRESS }, + { 0xC944, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC946, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC948, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC94A, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xC94C, CRL_REG_LEN_16BIT, 0x3000, AR023Z_I2C_ADDRESS }, + { 0xC94E, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC950, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC952, CRL_REG_LEN_16BIT, 0x0123, AR023Z_I2C_ADDRESS }, + { 0xC954, CRL_REG_LEN_16BIT, 0x2000, AR023Z_I2C_ADDRESS }, + { 0xC956, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC958, CRL_REG_LEN_16BIT, 0x0150, AR023Z_I2C_ADDRESS }, + { 0xC95A, CRL_REG_LEN_16BIT, 0x5300, AR023Z_I2C_ADDRESS }, + { 0xC95C, CRL_REG_LEN_16BIT, 0x1112, AR023Z_I2C_ADDRESS }, + { 0xC95E, CRL_REG_LEN_16BIT, 0x2010, AR023Z_I2C_ADDRESS }, + { 0xC960, CRL_REG_LEN_16BIT, 0x5574, AR023Z_I2C_ADDRESS }, + { 0xC962, CRL_REG_LEN_16BIT, 0x5000, AR023Z_I2C_ADDRESS }, + { 0xC964, CRL_REG_LEN_16BIT, 0x0202, AR023Z_I2C_ADDRESS }, + { 0xC966, CRL_REG_LEN_16BIT, 0x5300, AR023Z_I2C_ADDRESS }, + { 0xC968, CRL_REG_LEN_16BIT, 0x0371, AR023Z_I2C_ADDRESS }, + { 0xC96A, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0xC96C, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xC96E, CRL_REG_LEN_16BIT, 0x2000, AR023Z_I2C_ADDRESS }, + { 0xC970, CRL_REG_LEN_16BIT, 0x0023, AR023Z_I2C_ADDRESS }, + { 0xC972, CRL_REG_LEN_16BIT, 0x0330, AR023Z_I2C_ADDRESS }, + { 0xC974, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC976, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCC02, CRL_REG_LEN_16BIT, 0x0083, AR023Z_I2C_ADDRESS }, + { 0xC88C, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC84A, CRL_REG_LEN_16BIT, 0x0BA0, AR023Z_I2C_ADDRESS }, + { 0xC84C, CRL_REG_LEN_16BIT, 0x0FA0, AR023Z_I2C_ADDRESS }, + { 0xC84E, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xCA0C, CRL_REG_LEN_16BIT, 0xF8C0, AR023Z_I2C_ADDRESS }, + { 0xC846, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCAE8, CRL_REG_LEN_16BIT, 0x0010, AR023Z_I2C_ADDRESS }, + { 0x3210, CRL_REG_LEN_16BIT, 0x0EB0, AR023Z_I2C_ADDRESS }, + { 0xBC02, CRL_REG_LEN_16BIT, 0x03C5, AR023Z_I2C_ADDRESS }, + { 0xA802, CRL_REG_LEN_16BIT, 0x001C, AR023Z_I2C_ADDRESS }, + { 0xA812, CRL_REG_LEN_08BIT, 0x08, AR023Z_I2C_ADDRESS }, + { 0xA81C, CRL_REG_LEN_08BIT, 0x8C, AR023Z_I2C_ADDRESS }, + { 0xC8CE, CRL_REG_LEN_16BIT, 0x0035, AR023Z_I2C_ADDRESS }, + { 0xC8CA, CRL_REG_LEN_16BIT, 0x0030, AR023Z_I2C_ADDRESS }, + { 0xC8CC, CRL_REG_LEN_16BIT, 0x0180, AR023Z_I2C_ADDRESS }, + { 0xC8C6, CRL_REG_LEN_16BIT, 0x008C, AR023Z_I2C_ADDRESS }, + { 0xC8C8, CRL_REG_LEN_16BIT, 0x03FF, AR023Z_I2C_ADDRESS }, + { 0xC8BE, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xA83C, CRL_REG_LEN_16BIT, 0x03E6, AR023Z_I2C_ADDRESS }, + { 0xA83E, CRL_REG_LEN_16BIT, 0x0300, AR023Z_I2C_ADDRESS }, + { 0xA840, CRL_REG_LEN_16BIT, 0x0133, AR023Z_I2C_ADDRESS }, + { 0xC988, CRL_REG_LEN_16BIT, 0x0E17, AR023Z_I2C_ADDRESS }, + { 0x2402, CRL_REG_LEN_16BIT, 0x0008, AR023Z_I2C_ADDRESS }, + { 0xBCBE, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + { 0xBCC0, CRL_REG_LEN_16BIT, 0x00C8, AR023Z_I2C_ADDRESS }, + { 0xBCBA, CRL_REG_LEN_16BIT, 0x0010, AR023Z_I2C_ADDRESS }, + { 0xBCBC, CRL_REG_LEN_16BIT, 0x0017, AR023Z_I2C_ADDRESS }, + { 0xBCC2, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xBCC4, CRL_REG_LEN_16BIT, 0x003B, AR023Z_I2C_ADDRESS }, + { 0xC9CC, CRL_REG_LEN_16BIT, 0xFD00, AR023Z_I2C_ADDRESS }, + { 0xC9CE, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xC99A, CRL_REG_LEN_16BIT, 0x0600, AR023Z_I2C_ADDRESS }, + { 0xC99C, CRL_REG_LEN_16BIT, 0x0B00, AR023Z_I2C_ADDRESS }, + { 0xC9A0, CRL_REG_LEN_16BIT, 0x00C8, AR023Z_I2C_ADDRESS }, + { 0xC9A2, CRL_REG_LEN_16BIT, 0x0B54, AR023Z_I2C_ADDRESS }, + { 0x2414, CRL_REG_LEN_16BIT, 0x0BA0, AR023Z_I2C_ADDRESS }, + { 0x2416, CRL_REG_LEN_16BIT, 0x0FA0, AR023Z_I2C_ADDRESS }, + { 0x2418, CRL_REG_LEN_16BIT, 0xC350, AR023Z_I2C_ADDRESS }, + { 0x241A, CRL_REG_LEN_16BIT, 0xFA00, AR023Z_I2C_ADDRESS }, + { 0x241C, CRL_REG_LEN_16BIT, 0x0005, AR023Z_I2C_ADDRESS }, + { 0x241E, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0x2420, CRL_REG_LEN_16BIT, 0x00A5, AR023Z_I2C_ADDRESS }, + { 0x2422, CRL_REG_LEN_16BIT, 0x00A5, AR023Z_I2C_ADDRESS }, + { 0x2424, CRL_REG_LEN_16BIT, 0x00A5, AR023Z_I2C_ADDRESS }, + { 0x2426, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC996, CRL_REG_LEN_16BIT, 0x03E8, AR023Z_I2C_ADDRESS }, + { 0xC998, CRL_REG_LEN_16BIT, 0x03E8, AR023Z_I2C_ADDRESS }, + { 0xC98A, CRL_REG_LEN_16BIT, 0x000F, AR023Z_I2C_ADDRESS }, + { 0xC9E6, CRL_REG_LEN_16BIT, 0x0AF0, AR023Z_I2C_ADDRESS }, + { 0xCA2A, CRL_REG_LEN_08BIT, 0x32, AR023Z_I2C_ADDRESS }, + { 0xCA2B, CRL_REG_LEN_08BIT, 0x05, AR023Z_I2C_ADDRESS }, + { 0xCA2E, CRL_REG_LEN_08BIT, 0x32, AR023Z_I2C_ADDRESS }, + { 0xCA2F, CRL_REG_LEN_08BIT, 0x0A, AR023Z_I2C_ADDRESS }, + { 0x3222, CRL_REG_LEN_16BIT, 0x0912, AR023Z_I2C_ADDRESS }, + { 0x3224, CRL_REG_LEN_16BIT, 0x0612, AR023Z_I2C_ADDRESS }, + { 0xCAB4, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCAAE, CRL_REG_LEN_16BIT, 0x0022, AR023Z_I2C_ADDRESS }, + { 0x3414, CRL_REG_LEN_16BIT, 0x3700, AR023Z_I2C_ADDRESS }, + { 0x3408, CRL_REG_LEN_16BIT, 0x3700, AR023Z_I2C_ADDRESS }, + { 0x340C, CRL_REG_LEN_16BIT, 0x2A00, AR023Z_I2C_ADDRESS }, + { 0x3412, CRL_REG_LEN_16BIT, 0x0400, AR023Z_I2C_ADDRESS }, + { 0x3416, CRL_REG_LEN_16BIT, 0x0036, AR023Z_I2C_ADDRESS }, + { 0x341E, CRL_REG_LEN_16BIT, 0x0004, AR023Z_I2C_ADDRESS }, + { 0x3420, CRL_REG_LEN_16BIT, 0x2A3B, AR023Z_I2C_ADDRESS }, + { 0x341A, CRL_REG_LEN_16BIT, 0x0A00, AR023Z_I2C_ADDRESS }, + { 0x3400, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0x3402, CRL_REG_LEN_16BIT, 0x073B, AR023Z_I2C_ADDRESS }, + { 0x3406, CRL_REG_LEN_16BIT, 0x0500, AR023Z_I2C_ADDRESS }, + { 0x3404, CRL_REG_LEN_16BIT, 0x3E1E, AR023Z_I2C_ADDRESS }, + { 0x3454, CRL_REG_LEN_16BIT, 0x0004, AR023Z_I2C_ADDRESS }, + { 0x3432, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0x3452, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0x345A, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0x3462, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0x344A, CRL_REG_LEN_16BIT, 0x0007, AR023Z_I2C_ADDRESS }, + { 0x342E, CRL_REG_LEN_16BIT, 0x0006, AR023Z_I2C_ADDRESS }, + { 0xCA20, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCA22, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xCA24, CRL_REG_LEN_16BIT, 0x0C80, AR023Z_I2C_ADDRESS }, + { 0xCA26, CRL_REG_LEN_16BIT, 0x002D, AR023Z_I2C_ADDRESS }, + { 0xCA78, CRL_REG_LEN_16BIT, 0x0030, AR023Z_I2C_ADDRESS }, + { 0xCA80, CRL_REG_LEN_16BIT, 0x0056, AR023Z_I2C_ADDRESS }, + { 0xCA88, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCA90, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xCA7A, CRL_REG_LEN_16BIT, 0x002D, AR023Z_I2C_ADDRESS }, + { 0xCA7C, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCA82, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0xCA84, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCA8A, CRL_REG_LEN_16BIT, 0x00B8, AR023Z_I2C_ADDRESS }, + { 0xCA8C, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCA92, CRL_REG_LEN_16BIT, 0x0173, AR023Z_I2C_ADDRESS }, + { 0xCA94, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB20, CRL_REG_LEN_16BIT, 0x002D, AR023Z_I2C_ADDRESS }, + { 0xCB22, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB24, CRL_REG_LEN_16BIT, 0x0050, AR023Z_I2C_ADDRESS }, + { 0xCB26, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB28, CRL_REG_LEN_16BIT, 0x00B8, AR023Z_I2C_ADDRESS }, + { 0xCB2A, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB2C, CRL_REG_LEN_16BIT, 0x0180, AR023Z_I2C_ADDRESS }, + { 0xCB2E, CRL_REG_LEN_16BIT, 0x007D, AR023Z_I2C_ADDRESS }, + { 0xCB40, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCB42, CRL_REG_LEN_16BIT, 0x07D0, AR023Z_I2C_ADDRESS }, + { 0xCB44, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xCB46, CRL_REG_LEN_16BIT, 0x0DAC, AR023Z_I2C_ADDRESS }, + { 0xCA70, CRL_REG_LEN_16BIT, 0x0003, AR023Z_I2C_ADDRESS }, + { 0xCA72, CRL_REG_LEN_16BIT, 0x0003, AR023Z_I2C_ADDRESS }, + { 0xCA74, CRL_REG_LEN_16BIT, 0x03E8, AR023Z_I2C_ADDRESS }, + { 0xCA76, CRL_REG_LEN_16BIT, 0x0D00, AR023Z_I2C_ADDRESS }, + { 0xCA42, CRL_REG_LEN_08BIT, 0x02, AR023Z_I2C_ADDRESS }, + { 0xCA43, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA48, CRL_REG_LEN_08BIT, 0x02, AR023Z_I2C_ADDRESS }, + { 0xCA49, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA4E, CRL_REG_LEN_08BIT, 0x14, AR023Z_I2C_ADDRESS }, + { 0xCA4F, CRL_REG_LEN_08BIT, 0x04, AR023Z_I2C_ADDRESS }, + { 0xCA5E, CRL_REG_LEN_08BIT, 0x01, AR023Z_I2C_ADDRESS }, + { 0xCA5F, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA64, CRL_REG_LEN_08BIT, 0x01, AR023Z_I2C_ADDRESS }, + { 0xCA65, CRL_REG_LEN_08BIT, 0x16, AR023Z_I2C_ADDRESS }, + { 0xCA6A, CRL_REG_LEN_08BIT, 0x1E, AR023Z_I2C_ADDRESS }, + { 0xCA6B, CRL_REG_LEN_08BIT, 0x05, AR023Z_I2C_ADDRESS }, + { 0xBC0A, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xBC0C, CRL_REG_LEN_16BIT, 0x000A, AR023Z_I2C_ADDRESS }, + { 0xBC0E, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0xBC10, CRL_REG_LEN_16BIT, 0x001A, AR023Z_I2C_ADDRESS }, + { 0xBC12, CRL_REG_LEN_16BIT, 0x0027, AR023Z_I2C_ADDRESS }, + { 0xBC14, CRL_REG_LEN_16BIT, 0x0032, AR023Z_I2C_ADDRESS }, + { 0xBC16, CRL_REG_LEN_16BIT, 0x003D, AR023Z_I2C_ADDRESS }, + { 0xBC18, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xBC1A, CRL_REG_LEN_16BIT, 0x004F, AR023Z_I2C_ADDRESS }, + { 0xBC1C, CRL_REG_LEN_16BIT, 0x005F, AR023Z_I2C_ADDRESS }, + { 0xBC1E, CRL_REG_LEN_16BIT, 0x006D, AR023Z_I2C_ADDRESS }, + { 0xBC20, CRL_REG_LEN_16BIT, 0x007A, AR023Z_I2C_ADDRESS }, + { 0xBC22, CRL_REG_LEN_16BIT, 0x0087, AR023Z_I2C_ADDRESS }, + { 0xBC24, CRL_REG_LEN_16BIT, 0x009D, AR023Z_I2C_ADDRESS }, + { 0xBC26, CRL_REG_LEN_16BIT, 0x00B1, AR023Z_I2C_ADDRESS }, + { 0xBC28, CRL_REG_LEN_16BIT, 0x00C4, AR023Z_I2C_ADDRESS }, + { 0xBC2A, CRL_REG_LEN_16BIT, 0x00D6, AR023Z_I2C_ADDRESS }, + { 0xBC2C, CRL_REG_LEN_16BIT, 0x00F5, AR023Z_I2C_ADDRESS }, + { 0xBC2E, CRL_REG_LEN_16BIT, 0x0112, AR023Z_I2C_ADDRESS }, + { 0xBC30, CRL_REG_LEN_16BIT, 0x012D, AR023Z_I2C_ADDRESS }, + { 0xBC32, CRL_REG_LEN_16BIT, 0x0145, AR023Z_I2C_ADDRESS }, + { 0xBC34, CRL_REG_LEN_16BIT, 0x0172, AR023Z_I2C_ADDRESS }, + { 0xBC36, CRL_REG_LEN_16BIT, 0x019B, AR023Z_I2C_ADDRESS }, + { 0xBC38, CRL_REG_LEN_16BIT, 0x01C1, AR023Z_I2C_ADDRESS }, + { 0xBC3A, CRL_REG_LEN_16BIT, 0x01E3, AR023Z_I2C_ADDRESS }, + { 0xBC3C, CRL_REG_LEN_16BIT, 0x0223, AR023Z_I2C_ADDRESS }, + { 0xBC3E, CRL_REG_LEN_16BIT, 0x025D, AR023Z_I2C_ADDRESS }, + { 0xBC40, CRL_REG_LEN_16BIT, 0x0292, AR023Z_I2C_ADDRESS }, + { 0xBC42, CRL_REG_LEN_16BIT, 0x02C3, AR023Z_I2C_ADDRESS }, + { 0xBC44, CRL_REG_LEN_16BIT, 0x031D, AR023Z_I2C_ADDRESS }, + { 0xBC46, CRL_REG_LEN_16BIT, 0x036F, AR023Z_I2C_ADDRESS }, + { 0xBC48, CRL_REG_LEN_16BIT, 0x03B9, AR023Z_I2C_ADDRESS }, + { 0xBC4A, CRL_REG_LEN_16BIT, 0x03FF, AR023Z_I2C_ADDRESS }, + { 0xBC4C, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xBC4E, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xBC50, CRL_REG_LEN_16BIT, 0x0004, AR023Z_I2C_ADDRESS }, + { 0xBC52, CRL_REG_LEN_16BIT, 0x0007, AR023Z_I2C_ADDRESS }, + { 0xBC54, CRL_REG_LEN_16BIT, 0x0009, AR023Z_I2C_ADDRESS }, + { 0xBC56, CRL_REG_LEN_16BIT, 0x000B, AR023Z_I2C_ADDRESS }, + { 0xBC58, CRL_REG_LEN_16BIT, 0x000D, AR023Z_I2C_ADDRESS }, + { 0xBC5A, CRL_REG_LEN_16BIT, 0x000F, AR023Z_I2C_ADDRESS }, + { 0xBC5C, CRL_REG_LEN_16BIT, 0x0011, AR023Z_I2C_ADDRESS }, + { 0xBC5E, CRL_REG_LEN_16BIT, 0x0016, AR023Z_I2C_ADDRESS }, + { 0xBC60, CRL_REG_LEN_16BIT, 0x001A, AR023Z_I2C_ADDRESS }, + { 0xBC62, CRL_REG_LEN_16BIT, 0x001F, AR023Z_I2C_ADDRESS }, + { 0xBC64, CRL_REG_LEN_16BIT, 0x0023, AR023Z_I2C_ADDRESS }, + { 0xBC66, CRL_REG_LEN_16BIT, 0x002C, AR023Z_I2C_ADDRESS }, + { 0xBC68, CRL_REG_LEN_16BIT, 0x0034, AR023Z_I2C_ADDRESS }, + { 0xBC6A, CRL_REG_LEN_16BIT, 0x003D, AR023Z_I2C_ADDRESS }, + { 0xBC6C, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xBC6E, CRL_REG_LEN_16BIT, 0x0057, AR023Z_I2C_ADDRESS }, + { 0xBC70, CRL_REG_LEN_16BIT, 0x0069, AR023Z_I2C_ADDRESS }, + { 0xBC72, CRL_REG_LEN_16BIT, 0x007A, AR023Z_I2C_ADDRESS }, + { 0xBC74, CRL_REG_LEN_16BIT, 0x008C, AR023Z_I2C_ADDRESS }, + { 0xBC76, CRL_REG_LEN_16BIT, 0x00AF, AR023Z_I2C_ADDRESS }, + { 0xBC78, CRL_REG_LEN_16BIT, 0x00D2, AR023Z_I2C_ADDRESS }, + { 0xBC7A, CRL_REG_LEN_16BIT, 0x00F5, AR023Z_I2C_ADDRESS }, + { 0xBC7C, CRL_REG_LEN_16BIT, 0x0118, AR023Z_I2C_ADDRESS }, + { 0xBC7E, CRL_REG_LEN_16BIT, 0x015E, AR023Z_I2C_ADDRESS }, + { 0xBC80, CRL_REG_LEN_16BIT, 0x01A4, AR023Z_I2C_ADDRESS }, + { 0xBC82, CRL_REG_LEN_16BIT, 0x01EA, AR023Z_I2C_ADDRESS }, + { 0xBC84, CRL_REG_LEN_16BIT, 0x022F, AR023Z_I2C_ADDRESS }, + { 0xBC86, CRL_REG_LEN_16BIT, 0x02B4, AR023Z_I2C_ADDRESS }, + { 0xBC88, CRL_REG_LEN_16BIT, 0x032B, AR023Z_I2C_ADDRESS }, + { 0xBC8A, CRL_REG_LEN_16BIT, 0x0399, AR023Z_I2C_ADDRESS }, + { 0xBC8C, CRL_REG_LEN_16BIT, 0x03FF, AR023Z_I2C_ADDRESS }, + { 0xCA30, CRL_REG_LEN_16BIT, 0x0B00, AR023Z_I2C_ADDRESS }, + { 0xCA32, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCA08, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xC9C0, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xC9C2, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xC9C8, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xC9CA, CRL_REG_LEN_16BIT, 0x0800, AR023Z_I2C_ADDRESS }, + { 0xC9BC, CRL_REG_LEN_16BIT, 0x0028, AR023Z_I2C_ADDRESS }, + { 0xC9BE, CRL_REG_LEN_16BIT, 0x0023, AR023Z_I2C_ADDRESS }, + { 0xC9C4, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xC9C6, CRL_REG_LEN_16BIT, 0x0046, AR023Z_I2C_ADDRESS }, + { 0xC9A4, CRL_REG_LEN_16BIT, 0x0002, AR023Z_I2C_ADDRESS }, + { 0xC9A6, CRL_REG_LEN_16BIT, 0x001E, AR023Z_I2C_ADDRESS }, + { 0xCA2C, CRL_REG_LEN_08BIT, 0x01, AR023Z_I2C_ADDRESS }, + { 0xCA2D, CRL_REG_LEN_08BIT, 0x03, AR023Z_I2C_ADDRESS }, + { 0xCA9C, CRL_REG_LEN_16BIT, 0x0700, AR023Z_I2C_ADDRESS }, + { 0xCAA8, CRL_REG_LEN_16BIT, 0x0100, AR023Z_I2C_ADDRESS }, + { 0xCAA4, CRL_REG_LEN_16BIT, 0x01C0, AR023Z_I2C_ADDRESS }, + { 0xCAB0, CRL_REG_LEN_16BIT, 0x00B3, AR023Z_I2C_ADDRESS }, + { 0xCA28, CRL_REG_LEN_08BIT, 0x5A, AR023Z_I2C_ADDRESS }, + { 0xA82C, CRL_REG_LEN_16BIT, 0x0880, AR023Z_I2C_ADDRESS }, + { 0xA82E, CRL_REG_LEN_16BIT, 0x095A, AR023Z_I2C_ADDRESS }, + { 0xA830, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA832, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA834, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA836, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA838, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xA83A, CRL_REG_LEN_16BIT, 0x0980, AR023Z_I2C_ADDRESS }, + { 0xC88C, CRL_REG_LEN_16BIT, 0x0080, AR023Z_I2C_ADDRESS }, + { 0xB00C, CRL_REG_LEN_08BIT, 0x00, AR023Z_I2C_ADDRESS }, + { 0xC8BE, CRL_REG_LEN_16BIT, 0x0000, AR023Z_I2C_ADDRESS }, + { 0xB00D, CRL_REG_LEN_08BIT, 0x1E, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D00, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D01, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x3028, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0200, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x2000, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D06, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D08, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8D02, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x0001, AR023Z_I2C_ADDRESS }, + { 0xFC02, CRL_REG_LEN_16BIT, 0x0101, AR023Z_I2C_ADDRESS }, + { 0xFC04, CRL_REG_LEN_16BIT, 0x0101, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8102, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, + { 0xFC00, CRL_REG_LEN_16BIT, 0x2800, AR023Z_I2C_ADDRESS }, + { 0x0040, CRL_REG_LEN_16BIT, 0x8100, AR023Z_I2C_ADDRESS }, + { 0x00, CRL_REG_LEN_DELAY, 20, 0x00 }, +}; + +struct crl_sensor_detect_config ar023z_sensor_detect_regset[] = { + { + .reg = { 0x0000, CRL_REG_LEN_16BIT, 0xFFFF, TC358778_I2C_ADDRESS }, + .width = 15, + }, + { + .reg = { 0x0000, CRL_REG_LEN_16BIT, 0xFFFF, AR023Z_I2C_ADDRESS }, + .width = 16, + }, +}; + +struct crl_pll_configuration ar023z_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 317250000, + .bitsperpixel = 16, + .pixel_rate_csi = 79312500, + .pixel_rate_pa = 79312500, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +struct crl_sensor_subdev_config ar023z_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ar023z binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ar023z pixel array", + }, +}; + +struct crl_register_write_rep ar023z_poweroff_regset[] = { + {0xFC00, CRL_REG_LEN_16BIT, 0x5000, AR023Z_I2C_ADDRESS}, + {0x0040, CRL_REG_LEN_16BIT, 0x8100, AR023Z_I2C_ADDRESS}, + {0x0002, CRL_REG_LEN_16BIT, 0x0001, TC358778_I2C_ADDRESS}, +}; + +struct crl_register_write_rep ar023z_streamon_regs[] = { + /* Turn on D-Phy clock and enable MIPI lanes 2 and 3 */ + {0x0140, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS}, /* CLK On */ + {0x0144, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS}, /* lane 0 */ + {0x0148, CRL_REG_LEN_32BIT, 0x00000000, TC358778_I2C_ADDRESS}, /* lane 1 */ + {0x014C, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 2 */ + {0x0150, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 3 */ +}; + +struct crl_register_write_rep ar023z_streamoff_regs[] = { + {0x0140, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* CLK Off */ + {0x0144, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 0 */ + {0x0148, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 1 */ + {0x014C, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 2 */ + {0x0150, CRL_REG_LEN_32BIT, 0x00010000, TC358778_I2C_ADDRESS}, /* lane 3 */ +}; + +struct crl_subdev_rect_rep ar023z_1920_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1920, + .in_rect.height = 1080, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + } +}; + +struct crl_mode_rep ar023z_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ar023z_1920_1080_rects), + .sd_rects = ar023z_1920_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 2350, + .min_fll = 1320, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ar023z_1920_1080), + .mode_regs = ar023z_1920_1080, + }, +}; + +struct crl_csi_data_fmt ar023z_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = 0, + .regs = NULL, + }, +}; + +struct crl_sensor_limits ar023z_mipi_bridge_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1920, + .y_addr_max = 1080, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +/* Power items, they are enabled in the order they are listed here */ +struct crl_power_seq_entity ar023z_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 27000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_v4l2_ctrl ar023z_v4l2_ctrls[] ={ + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration ar023z_crl_configuration = { + .power_items = ARRAY_SIZE(ar023z_power_items), + .power_entities = ar023z_power_items, + + .poweroff_regs_items = ARRAY_SIZE(ar023z_poweroff_regset), + .poweroff_regs = ar023z_poweroff_regset, + + .id_reg_items = ARRAY_SIZE(ar023z_sensor_detect_regset), + .id_regs = ar023z_sensor_detect_regset, + + .onetime_init_regs_items = 0, + .onetime_init_regs = NULL, + + .subdev_items = ARRAY_SIZE(ar023z_sensor_subdevs), + .subdevs = ar023z_sensor_subdevs, + + .sensor_limits = &ar023z_mipi_bridge_limits, + + .pll_config_items = ARRAY_SIZE(ar023z_pll_configurations), + .pll_configs = ar023z_pll_configurations, + + .modes_items = ARRAY_SIZE(ar023z_modes), + .modes = ar023z_modes, + + .streamon_regs_items = ARRAY_SIZE(ar023z_streamon_regs), + .streamon_regs = ar023z_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ar023z_streamoff_regs), + .streamoff_regs = ar023z_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ar023z_v4l2_ctrls), + .v4l2_ctrl_bank = ar023z_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ar023z_crl_csi_data_fmt), + .csi_fmts = ar023z_crl_csi_data_fmt, + + .flip_items = 0, + .flip_data = NULL, + + .frame_desc_entries = 0, + .frame_desc_type = 0, + .frame_desc = 0, +}; + +#endif /* __CRLMODULE_AR023Z_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx132_configuration.h b/drivers/media/i2c/crlmodule/crl_imx132_configuration.h new file mode 100644 index 000000000000..128ccb50e4f8 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx132_configuration.h @@ -0,0 +1,699 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_IMX132_CONFIGURATION_H_ +#define __CRLMODULE_IMX132_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +struct crl_register_write_rep imx132_powerup_regset[] = { + { 0x3087, CRL_REG_LEN_08BIT, 0x53 }, + { 0x308B, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3094, CRL_REG_LEN_08BIT, 0x11 }, + { 0x309D, CRL_REG_LEN_08BIT, 0xA4 }, + { 0x30AA, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30C6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30C7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3118, CRL_REG_LEN_08BIT, 0x2F }, + { 0x312A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x312B, CRL_REG_LEN_08BIT, 0x0B }, + { 0x312C, CRL_REG_LEN_08BIT, 0x0B }, + { 0x312D, CRL_REG_LEN_08BIT, 0x13 }, + { 0x303D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x303E, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3040, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3041, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3048, CRL_REG_LEN_08BIT, 0x00 }, + { 0x304C, CRL_REG_LEN_08BIT, 0x2F }, + { 0x304D, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3064, CRL_REG_LEN_08BIT, 0x92 }, + { 0x306A, CRL_REG_LEN_08BIT, 0x10 }, + { 0x309B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x309E, CRL_REG_LEN_08BIT, 0x41 }, + { 0x30A0, CRL_REG_LEN_08BIT, 0x10 }, + { 0x30A1, CRL_REG_LEN_08BIT, 0x0B }, + { 0x30B2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30D9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DA, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DB, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30DE, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3102, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3103, CRL_REG_LEN_08BIT, 0x33 }, + { 0x3104, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3105, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3106, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3107, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3108, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3109, CRL_REG_LEN_08BIT, 0x04 }, + { 0x310A, CRL_REG_LEN_08BIT, 0x04 }, + { 0x315C, CRL_REG_LEN_08BIT, 0x3D }, + { 0x315D, CRL_REG_LEN_08BIT, 0x3C }, + { 0x316E, CRL_REG_LEN_08BIT, 0x3E }, + { 0x316F, CRL_REG_LEN_08BIT, 0x3D }, + { 0x020e, CRL_REG_LEN_16BIT, 0x0100 }, + { 0x0210, CRL_REG_LEN_16BIT, 0x01a0 }, + { 0x0212, CRL_REG_LEN_16BIT, 0x0200 }, + { 0x0214, CRL_REG_LEN_16BIT, 0x0100 }, + { 0x0204, CRL_REG_LEN_16BIT, 0x0000 }, + { 0x0202, CRL_REG_LEN_16BIT, 0x0000 }, + { 0x0600, CRL_REG_LEN_16BIT, 0x0000 }, + { 0x0602, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0604, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0606, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0608, CRL_REG_LEN_16BIT, 0x03ff }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* + .input_clk = 24000000, + .op_sys_clk = 405000000, + .bitsperpixel = 10, + .pixel_rate_csi = 810000000, + .pixel_rate_pa = 768000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx132_pll_384), + .pll_regs = imx132_pll_384, +*/ +struct crl_register_write_rep imx132_pll_405[] = { + /* PLL setting */ + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x87 }, + { 0x30A4, CRL_REG_LEN_08BIT, 0x01 }, + { 0x303C, CRL_REG_LEN_08BIT, 0x4B }, + /* Global timing */ + { 0x3304, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3305, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3306, CRL_REG_LEN_08BIT, 0x19 }, + { 0x3307, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3308, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3309, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x330B, CRL_REG_LEN_08BIT, 0x06 }, + { 0x330C, CRL_REG_LEN_08BIT, 0x0B }, + { 0x330D, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3318, CRL_REG_LEN_08BIT, 0x62 }, + { 0x3322, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3342, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3348, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3301, CRL_REG_LEN_08BIT, 0x00 }, /* Lanes = 2*/ +}; + +struct crl_register_write_rep imx132_pll_312[] = { + /* PLL setting */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x0D }, + { 0x30A4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x303C, CRL_REG_LEN_08BIT, 0x4B }, + /* Global timing */ + { 0x3304, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3305, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3306, CRL_REG_LEN_08BIT, 0x19 }, + { 0x3307, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3308, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3309, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x330B, CRL_REG_LEN_08BIT, 0x06 }, + { 0x330C, CRL_REG_LEN_08BIT, 0x0B }, + { 0x330D, CRL_REG_LEN_08BIT, 0x07 }, + { 0x330E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3318, CRL_REG_LEN_08BIT, 0x62 }, + { 0x3322, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3342, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3348, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3301, CRL_REG_LEN_08BIT, 0x01 }, /* Lanes = 1*/ +}; + +struct crl_register_write_rep imx132_mode_1080P[] = { + {0x0344, CRL_REG_LEN_08BIT, 0x00}, + {0x0345, CRL_REG_LEN_08BIT, 0x14}, + {0x0346, CRL_REG_LEN_08BIT, 0x00}, + {0x0347, CRL_REG_LEN_08BIT, 0x32}, + {0x0348, CRL_REG_LEN_08BIT, 0x07}, + {0x0349, CRL_REG_LEN_08BIT, 0xA3}, + {0x034A, CRL_REG_LEN_08BIT, 0x04}, + {0x034B, CRL_REG_LEN_08BIT, 0x79}, + {0x034C, CRL_REG_LEN_08BIT, 0x07}, + {0x034D, CRL_REG_LEN_08BIT, 0x90}, + {0x034E, CRL_REG_LEN_08BIT, 0x04}, + {0x034F, CRL_REG_LEN_08BIT, 0x48}, + {0x0381, CRL_REG_LEN_08BIT, 0x01}, + {0x0383, CRL_REG_LEN_08BIT, 0x01}, + {0x0385, CRL_REG_LEN_08BIT, 0x01}, + {0x0387, CRL_REG_LEN_08BIT, 0x01}, +}; + +struct crl_register_write_rep imx132_mode_1636x1096[] = { + {0x0344, CRL_REG_LEN_08BIT, 0x00}, + {0x0345, CRL_REG_LEN_08BIT, 0xAA}, + {0x0346, CRL_REG_LEN_08BIT, 0x00}, + {0x0347, CRL_REG_LEN_08BIT, 0x32}, + {0x0348, CRL_REG_LEN_08BIT, 0x07}, + {0x0349, CRL_REG_LEN_08BIT, 0x0D}, + {0x034A, CRL_REG_LEN_08BIT, 0x04}, + {0x034B, CRL_REG_LEN_08BIT, 0x79}, + {0x034C, CRL_REG_LEN_08BIT, 0x06}, + {0x034D, CRL_REG_LEN_08BIT, 0x64}, + {0x034E, CRL_REG_LEN_08BIT, 0x04}, + {0x034F, CRL_REG_LEN_08BIT, 0x48}, + {0x0381, CRL_REG_LEN_08BIT, 0x01}, + {0x0383, CRL_REG_LEN_08BIT, 0x01}, + {0x0385, CRL_REG_LEN_08BIT, 0x01}, + {0x0387, CRL_REG_LEN_08BIT, 0x01}, +}; + +struct crl_register_write_rep imx132_fll_regs[] = { + { 0x0340, CRL_REG_LEN_16BIT, 0x045c }, /* LLP and FLL */ +}; + +struct crl_register_write_rep imx132_llp_regs[] = { + { 0x0342, CRL_REG_LEN_16BIT, 0x08fc }, /* LLP and FLL */ +}; + +struct crl_register_write_rep imx132_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +struct crl_register_write_rep imx132_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +struct crl_register_write_rep imx132_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +struct crl_register_write_rep imx132_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +struct crl_subdev_rect_rep imx132_1080P_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1976, + .in_rect.height = 1200, + .out_rect.left = 20, + .out_rect.top = 50, + .out_rect.width = 1936, + .out_rect.height = 1096, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1936, + .in_rect.height = 1096, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1936, + .out_rect.height = 1096, + }, +}; + +struct crl_subdev_rect_rep imx132_1636x1096_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1976, + .in_rect.height = 1200, + .out_rect.left = 170, + .out_rect.top = 50, + .out_rect.width = 1636, + .out_rect.height = 1096, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1636, + .in_rect.height = 1096, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1636, + .out_rect.height = 1096, + }, +}; + +struct crl_mode_rep imx132_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx132_1636x1096_rects), + .sd_rects = imx132_1636x1096_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1636, + .height = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx132_mode_1636x1096), + .mode_regs = imx132_mode_1636x1096, + }, + { + .sd_rects_items = ARRAY_SIZE(imx132_1080P_rects), + .sd_rects = imx132_1080P_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1936, + .height = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx132_mode_1080P), + .mode_regs = imx132_mode_1080P, + }, +}; + +struct crl_register_write_rep imx132_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +struct crl_sensor_detect_config imx132_sensor_detect_regset[] = { + { + .reg = { 0x0003, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0000, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +struct crl_sensor_subdev_config imx132_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx132 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx132 pixel array", + }, +}; + +struct crl_pll_configuration imx132_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 312000000, + .bitsperpixel = 8, + .pixel_rate_csi = 624000000, + .pixel_rate_pa = 624000000, + .csi_lanes = 1, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx132_pll_312), + .pll_regs = imx132_pll_312, + }, + { + .input_clk = 24000000, + .op_sys_clk = 405000000, + .bitsperpixel = 10, + .pixel_rate_csi = 810000000, + .pixel_rate_pa = 810000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx132_pll_405), + .pll_regs = imx132_pll_405, + }, +}; + +struct crl_sensor_limits imx132_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1976, + .y_addr_max = 1200, + .min_frame_length_lines = 202, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 560, + .max_line_length_pixels = 65520, +}; + +struct crl_flip_data imx132_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +struct crl_csi_data_fmt imx132_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx132_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx132_data_fmt_width8, + }, +}; + +struct crl_dynamic_register_access imx132_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + }, +}; + + +struct crl_dynamic_register_access imx132_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +struct crl_dynamic_register_access imx132_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + } +}; + +struct crl_dynamic_register_access imx132_vblank_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +struct crl_dynamic_register_access imx132_hblank_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +static struct crl_dynamic_register_access imx132_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + }, +}; + +static const char * const imx132_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Colour Bars", + "Fade to Gray", + "PN9", +}; + +struct crl_v4l2_ctrl imx132_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 220, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_ana_gain_global_regs), + .regs = imx132_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_exposure_regs), + .regs = imx132_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_flip_regs), + .regs = imx132_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_flip_regs), + .regs = imx132_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VBLANK, + .name = "V4L2_CID_VBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = -65535, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_vblank_regs), + .regs = imx132_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_HBLANK, + .name = "V4L2_CID_HBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx132_hblank_regs), + .regs = imx132_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx132_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx132_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx132_test_pattern_regs), + .regs = imx132_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration imx132_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(imx132_powerup_regset), + .powerup_regs = imx132_powerup_regset, + + .poweroff_regs_items = ARRAY_SIZE(imx132_poweroff_regset), + .poweroff_regs = imx132_poweroff_regset, + + .id_reg_items = ARRAY_SIZE(imx132_sensor_detect_regset), + .id_regs = imx132_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx132_sensor_subdevs), + .subdevs = imx132_sensor_subdevs, + + .sensor_limits = &imx132_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx132_pll_configurations), + .pll_configs = imx132_pll_configurations, + + .modes_items = ARRAY_SIZE(imx132_modes), + .modes = imx132_modes, + + .streamon_regs_items = ARRAY_SIZE(imx132_streamon_regs), + .streamon_regs = imx132_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx132_streamoff_regs), + .streamoff_regs = imx132_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx132_v4l2_ctrls), + .v4l2_ctrl_bank = imx132_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx132_crl_csi_data_fmt), + .csi_fmts = imx132_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx132_flip_configurations), + .flip_data = imx132_flip_configurations, +}; + +#endif /* __CRLMODULE_IMX132_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx135_configuration.h b/drivers/media/i2c/crlmodule/crl_imx135_configuration.h new file mode 100644 index 000000000000..82efac824e73 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx135_configuration.h @@ -0,0 +1,781 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_IMX135_CONFIGURATION_H_ +#define __CRLMODULE_IMX135_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +/* MIPI 451.2MHz 902.4mbps PIXCLK: 360.96MHz */ +static struct crl_register_write_rep imx135_pll_451[] = { + { 0x011e, CRL_REG_LEN_08BIT, 0x13 }, /* This is not correct for 24MHz* */ + { 0x011f, CRL_REG_LEN_08BIT, 0x33 }, /* But it is that way in vendor sheets */ + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x0f }, + { 0x0309, CRL_REG_LEN_08BIT, 0x05 }, + { 0x030b, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030d, CRL_REG_LEN_08BIT, 0x34 }, + { 0x030e, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3a06, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0830, CRL_REG_LEN_08BIT, 0x87 }, + { 0x0831, CRL_REG_LEN_08BIT, 0x3f }, + { 0x0832, CRL_REG_LEN_08BIT, 0x67 }, + { 0x0833, CRL_REG_LEN_08BIT, 0x3f }, + { 0x0834, CRL_REG_LEN_08BIT, 0x3f }, + { 0x0835, CRL_REG_LEN_08BIT, 0x4f }, + { 0x0836, CRL_REG_LEN_08BIT, 0xdf }, + { 0x0837, CRL_REG_LEN_08BIT, 0x47 }, + { 0x0839, CRL_REG_LEN_08BIT, 0x1f }, + { 0x083a, CRL_REG_LEN_08BIT, 0x17 }, + { 0x083b, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0108, CRL_REG_LEN_08BIT, 0x03 }, /* CSI lane */ +}; + + +static struct crl_register_write_rep imx135_powerup_regset[] = { + { 0x0101, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0105, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0110, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3302, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3833, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3893, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3906, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3907, CRL_REG_LEN_08BIT, 0x01 }, + { 0x391B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3C09, CRL_REG_LEN_08BIT, 0x01 }, + { 0x600A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3008, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x320A, CRL_REG_LEN_08BIT, 0x01 }, + { 0x320D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3216, CRL_REG_LEN_08BIT, 0x2E }, + { 0x322C, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3409, CRL_REG_LEN_08BIT, 0x0C }, + { 0x340C, CRL_REG_LEN_08BIT, 0x2D }, + { 0x3411, CRL_REG_LEN_08BIT, 0x39 }, + { 0x3414, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3427, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3480, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3484, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3488, CRL_REG_LEN_08BIT, 0x1E }, + { 0x348C, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3490, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3494, CRL_REG_LEN_08BIT, 0x1E }, + { 0x3511, CRL_REG_LEN_08BIT, 0x8F }, + { 0x364F, CRL_REG_LEN_08BIT, 0x2D }, + { 0x0700, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3a63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4100, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x4203, CRL_REG_LEN_08BIT, 0xff }, + { 0x4344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4100, CRL_REG_LEN_08BIT, 0xf8 }, + { 0x441c, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020e, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx135_mode_13M[] = { + { 0x0108, CRL_REG_LEN_08BIT, 0x03 }, /* lanes */ + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0390, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0391, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0392, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4082, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4083, CRL_REG_LEN_08BIT, 0x11 }, /* Sony settings do not work */ + { 0x4203, CRL_REG_LEN_08BIT, 0xFF }, + { 0x7006, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x2F }, + { 0x034C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034F, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0350, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0351, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0352, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0353, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0354, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0355, CRL_REG_LEN_08BIT, 0x70 }, + { 0x0356, CRL_REG_LEN_08BIT, 0x0C }, + { 0x0357, CRL_REG_LEN_08BIT, 0x30 }, + { 0x301D, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3310, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3311, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3312, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3313, CRL_REG_LEN_08BIT, 0x30 }, + { 0x331C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x331D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4084, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4085, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4086, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4087, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4400, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx135_mode_1936M_binn_scale[] = { + + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0390, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0391, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0392, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4082, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4083, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7006, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x2E }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x8C }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x41 }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xA7 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x90 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x48 }, + { 0x0350, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0351, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0352, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0353, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0354, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0355, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0356, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0357, CRL_REG_LEN_08BIT, 0x8E }, + { 0x301D, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3310, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3311, CRL_REG_LEN_08BIT, 0x90 }, + { 0x3312, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3313, CRL_REG_LEN_08BIT, 0x48 }, + { 0x331C, CRL_REG_LEN_08BIT, 0x04 }, + { 0x331D, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x4084, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4085, CRL_REG_LEN_08BIT, 0x90 }, + { 0x4086, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4087, CRL_REG_LEN_08BIT, 0x48 }, + { 0x4400, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx135_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx135_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx135_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx135_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_arithmetic_ops imx135_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx135_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx135_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx135_vflip_ops), + .ops = imx135_vflip_ops, + .mask = 0x2, + }, +}; + + +static struct crl_dynamic_register_access imx135_ana_gain_global_regs[] = { + { + .address = 0x0205, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access imx135_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx135_vblank_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx135_hblank_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; +static struct crl_sensor_detect_config imx135_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx135_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 451200000, + .pixel_rate_csi = 360960000, + .pixel_rate_pa = 360960000, + .bitsperpixel = 10, + .comp_items = 0, + .ctrl_data = 0, + .csi_lanes = 4, + .pll_regs_items = ARRAY_SIZE(imx135_pll_451), + .pll_regs = imx135_pll_451, + }, +}; + +static struct crl_subdev_rect_rep imx135_13M_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, +}; + +static struct crl_subdev_rect_rep imx135_mode_1936M_binn_scale_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 46, + .out_rect.top = 396, + .out_rect.width = 4116, + .out_rect.height = 2332, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4116, + .in_rect.height = 2332, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2058, + .out_rect.height = 1166, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2058, + .in_rect.height = 1166, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1936, + .out_rect.height = 1096, + }, +}; + +static struct crl_mode_rep imx135_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx135_13M_rects), + .sd_rects = imx135_13M_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 3120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx135_mode_13M), + .mode_regs = imx135_mode_13M, + }, + { + .sd_rects_items = + ARRAY_SIZE(imx135_mode_1936M_binn_scale_rects), + .sd_rects = imx135_mode_1936M_binn_scale_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 17, + .width = 1936, + .height = 1096, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx135_mode_1936M_binn_scale), + .mode_regs = imx135_mode_1936M_binn_scale, + }, +}; + +static struct crl_sensor_subdev_config imx135_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx135 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx135 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx135 pixel array", + }, +}; + +static struct crl_sensor_limits imx135_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4208, + .y_addr_max = 3120, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 4572, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx135_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx135_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx135_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx135_data_fmt_width8, + }, +}; + +static struct crl_dynamic_register_access imx135_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static const char * const imx135_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Colour Bars", +}; + +static const s64 imx135_op_sys_clock[] = { 451200000 }; + +static struct crl_v4l2_ctrl imx135_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(imx135_pll_configurations) - 1, + .data.v4l2_int_menu.menu = imx135_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_ANALOGUE_GAIN", + .data.std_data.min = 0, + .data.std_data.max = 224, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_ana_gain_global_regs), + .regs = imx135_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_EXPOSURE", + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 4500, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_exposure_regs), + .regs = imx135_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_HFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_h_flip_regs), + .regs = imx135_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_VFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_v_flip_regs), + .regs = imx135_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx135_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx135_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_test_pattern_regs), + .regs = imx135_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 3800, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_vblank_regs), + .regs = imx135_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 4280, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 4600, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx135_hblank_regs), + .regs = imx135_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx135_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2700000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1100000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 250, + }, +}; + + +static struct crl_sensor_configuration imx135_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(imx135_powerup_regset), + .powerup_regs = imx135_powerup_regset, + + .power_items = ARRAY_SIZE(imx135_power_items), + .power_entities = imx135_power_items, + + .id_reg_items = ARRAY_SIZE(imx135_sensor_detect_regset), + .id_regs = imx135_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx135_sensor_subdevs), + .subdevs = imx135_sensor_subdevs, + + .sensor_limits = &imx135_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx135_pll_configurations), + .pll_configs = imx135_pll_configurations, + + .modes_items = ARRAY_SIZE(imx135_modes), + .modes = imx135_modes, + + .streamon_regs_items = ARRAY_SIZE(imx135_streamon_regs), + .streamon_regs = imx135_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx135_streamoff_regs), + .streamoff_regs = imx135_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx135_v4l2_ctrls), + .v4l2_ctrl_bank = imx135_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx135_crl_csi_data_fmt), + .csi_fmts = imx135_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx135_flip_configurations), + .flip_data = imx135_flip_configurations, +}; + + +#endif /* __CRLMODULE_IMX135_CONFIGURATION_H_ */ + + diff --git a/drivers/media/i2c/crlmodule/crl_imx185_configuration.h b/drivers/media/i2c/crlmodule/crl_imx185_configuration.h new file mode 100644 index 000000000000..168455b63d20 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx185_configuration.h @@ -0,0 +1,1772 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Shuguang Gong + * + */ + +#ifndef __CRLMODULE_IMX185_CONFIGURATION_H_ +#define __CRLMODULE_IMX185_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX185_REG_STANDBY 0x3000 +#define IMX185_REG_XMSTA 0x3002 +#define IMX185_REG_SW_RESET 0x3003 + +#define IMX185_HMAX 65535 +#define IMX185_VMAX 131071 +#define IMX185_MAX_SHS1 (IMX185_VMAX - 2) + +struct crl_ctrl_data_pair ctrl_data_modes[] = { + { + .ctrl_id = V4L2_CID_WDR_MODE, + .data = 0, + }, + { + .ctrl_id = V4L2_CID_WDR_MODE, + .data = 1, + }, +}; + +/* 111Mbps for imx185 720p 30fps */ +static struct crl_register_write_rep imx185_pll_111mbps[] = { + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* frame speed */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, + {0x300C, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0xee}, + {0x3019, CRL_REG_LEN_08BIT, 0x02}, + {0x301b, CRL_REG_LEN_08BIT, 0xe4}, + {0x301c, CRL_REG_LEN_08BIT, 0x0C}, + {0x300F, CRL_REG_LEN_08BIT, 0x01}, + {0x3010, CRL_REG_LEN_08BIT, 0x39}, + {0x3012, CRL_REG_LEN_08BIT, 0x50}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x3065, CRL_REG_LEN_08BIT, 0x00}, + {0x3084, CRL_REG_LEN_08BIT, 0x0F}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x30CF, CRL_REG_LEN_08BIT, 0xE1}, + {0x30D0, CRL_REG_LEN_08BIT, 0x29}, + {0x30D2, CRL_REG_LEN_08BIT, 0x9B}, + {0x30D3, CRL_REG_LEN_08BIT, 0x01}, + {0x30E1, CRL_REG_LEN_08BIT, 0xFF}, + {0x3303, CRL_REG_LEN_08BIT, 0x20}, /* repetation */ + {0x3305, CRL_REG_LEN_08BIT, 0x03}, /* 1: 2lanes, 3: 4lanes */ + {0x332C, CRL_REG_LEN_08BIT, 0x28}, /* mipi timing */ + {0x332D, CRL_REG_LEN_08BIT, 0x20}, + {0x3341, CRL_REG_LEN_08BIT, 0x00}, + {0x3342, CRL_REG_LEN_08BIT, 0x1B}, + {0x3343, CRL_REG_LEN_08BIT, 0x58}, + {0x3344, CRL_REG_LEN_08BIT, 0x0C}, + {0x3345, CRL_REG_LEN_08BIT, 0x24}, + {0x3346, CRL_REG_LEN_08BIT, 0x10}, + {0x3347, CRL_REG_LEN_08BIT, 0x0B}, + {0x3348, CRL_REG_LEN_08BIT, 0x08}, + {0x3349, CRL_REG_LEN_08BIT, 0x30}, + {0x334A, CRL_REG_LEN_08BIT, 0x20}, +}; + +/* 222Mbps for imx185 1080p 30fps */ +static struct crl_register_write_rep imx185_pll_222mbps[] = { + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* frame speed */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, + {0x300C, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x98}, + {0x301c, CRL_REG_LEN_08BIT, 0x08}, + {0x300F, CRL_REG_LEN_08BIT, 0x01}, + {0x3010, CRL_REG_LEN_08BIT, 0x39}, + {0x3012, CRL_REG_LEN_08BIT, 0x50}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x3065, CRL_REG_LEN_08BIT, 0x00}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x01}, + {0x30CF, CRL_REG_LEN_08BIT, 0xD1}, + {0x30D0, CRL_REG_LEN_08BIT, 0x1B}, + {0x30D2, CRL_REG_LEN_08BIT, 0x5F}, + {0x30D3, CRL_REG_LEN_08BIT, 0x00}, + {0x30E1, CRL_REG_LEN_08BIT, 0xFF}, + {0x3303, CRL_REG_LEN_08BIT, 0x10}, /* repetation */ + {0x3305, CRL_REG_LEN_08BIT, 0x03}, /* 1: 2lanes, 3: 4lanes */ + {0x332C, CRL_REG_LEN_08BIT, 0x30}, /* mipi timing */ + {0x332D, CRL_REG_LEN_08BIT, 0x20}, + {0x3341, CRL_REG_LEN_08BIT, 0x00}, + {0x3342, CRL_REG_LEN_08BIT, 0x1B}, + {0x3343, CRL_REG_LEN_08BIT, 0x58}, + {0x3344, CRL_REG_LEN_08BIT, 0x10}, + {0x3345, CRL_REG_LEN_08BIT, 0x30}, + {0x3346, CRL_REG_LEN_08BIT, 0x18}, + {0x3347, CRL_REG_LEN_08BIT, 0x10}, + {0x3348, CRL_REG_LEN_08BIT, 0x10}, + {0x3349, CRL_REG_LEN_08BIT, 0x48}, + {0x334A, CRL_REG_LEN_08BIT, 0x28}, +}; + +/* 445Mbps for imx185 1080p 60fps */ +static struct crl_register_write_rep imx185_pll_445mbps[] = { + {0x3009, CRL_REG_LEN_08BIT, 0x01}, /* frame speed */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, /* BLK */ + {0x300C, CRL_REG_LEN_08BIT, 0x00}, /* fixed settings */ + {0x3018, CRL_REG_LEN_08BIT, 0x65}, + {0x3019, CRL_REG_LEN_08BIT, 0x04}, + {0x301B, CRL_REG_LEN_08BIT, 0x4C}, + {0x301C, CRL_REG_LEN_08BIT, 0x04}, + {0x300F, CRL_REG_LEN_08BIT, 0x01}, + {0x3010, CRL_REG_LEN_08BIT, 0x39}, + {0x3012, CRL_REG_LEN_08BIT, 0x50}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x3065, CRL_REG_LEN_08BIT, 0x20}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x01}, + {0x30CF, CRL_REG_LEN_08BIT, 0xD1}, + {0x30D0, CRL_REG_LEN_08BIT, 0x1B}, + {0x30D2, CRL_REG_LEN_08BIT, 0x5F}, + {0x30D3, CRL_REG_LEN_08BIT, 0x00}, + {0x30E1, CRL_REG_LEN_08BIT, 0xFF}, + {0x3303, CRL_REG_LEN_08BIT, 0x00}, /* repetation */ + {0x3305, CRL_REG_LEN_08BIT, 0x03}, + {0x332C, CRL_REG_LEN_08BIT, 0x40}, /* mipi timing */ + {0x332D, CRL_REG_LEN_08BIT, 0x20}, + {0x3341, CRL_REG_LEN_08BIT, 0x00}, + {0x3342, CRL_REG_LEN_08BIT, 0x1B}, + {0x3343, CRL_REG_LEN_08BIT, 0x68}, + {0x3344, CRL_REG_LEN_08BIT, 0x20}, + {0x3345, CRL_REG_LEN_08BIT, 0x40}, + {0x3346, CRL_REG_LEN_08BIT, 0x28}, + {0x3347, CRL_REG_LEN_08BIT, 0x20}, + {0x3348, CRL_REG_LEN_08BIT, 0x18}, + {0x3349, CRL_REG_LEN_08BIT, 0x78}, + {0x334A, CRL_REG_LEN_08BIT, 0x28}, +}; + +static struct crl_register_write_rep imx185_fmt_raw10[] = { + {0x333E, CRL_REG_LEN_08BIT, 0x0a}, /* FMT RAW10 */ + {0x333F, CRL_REG_LEN_08BIT, 0x0a}, +}; + +static struct crl_register_write_rep imx185_fmt_raw12[] = { + {0x333E, CRL_REG_LEN_08BIT, 0x0c}, /* FMT RAW12 */ + {0x333F, CRL_REG_LEN_08BIT, 0x0c}, +}; + +static struct crl_register_write_rep imx185_powerup_standby[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 50, 0x00}, + {0x3002, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 200, 0x00}, +}; + +static struct crl_register_write_rep imx185_1312_728_27MHZ_CROPPING[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT: 10/12 ADBIT: + 10/12 , raw 10 */ + {0x3007, CRL_REG_LEN_08BIT, 0x60}, /* mode selection */ + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3044, CRL_REG_LEN_08BIT, 0xE1}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xE0}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x02}, + {0x303C, CRL_REG_LEN_08BIT, 0x04}, /* WPH = 4 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x1C}, /* Effective size = 1308*/ + {0x303F, CRL_REG_LEN_08BIT, 0x05}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3316, CRL_REG_LEN_08BIT, 0x02}, + {0x3317, CRL_REG_LEN_08BIT, 0x02}, + {0x3318, CRL_REG_LEN_08BIT, 0xD8}, /* PIC_SIZE = 728 */ + {0x3319, CRL_REG_LEN_08BIT, 0x02}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1096_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x10}, /* 1080p mode */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xC0}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, /* WPH = 0 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0xDF}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x01}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0x48}, /* PIC_SIZE = 1096 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1096_BUILD_IN_WDR_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x10}, /* mode selection */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, + {0x300C, CRL_REG_LEN_08BIT, 0x02}, + {0x300F, CRL_REG_LEN_08BIT, 0x05}, + {0x3010, CRL_REG_LEN_08BIT, 0x38}, + {0x3012, CRL_REG_LEN_08BIT, 0x0F}, + {0x301B, CRL_REG_LEN_08BIT, 0x98}, + {0x301C, CRL_REG_LEN_08BIT, 0x08}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x64}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0x4C}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, /* WPH = 0 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, /* Effective size = 1948*/ + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0xDF}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x01}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3303, CRL_REG_LEN_08BIT, 0x10}, /* repetation wdr */ + {0x3314, CRL_REG_LEN_08BIT, 0x08}, + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0x48}, /* PIC_SIZE = 1096 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1208_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x00}, /* WUXGA cropping */ + {0x3018, CRL_REG_LEN_08BIT, 0x28}, + {0x3019, CRL_REG_LEN_08BIT, 0x05}, + {0x301B, CRL_REG_LEN_08BIT, 0x53}, + {0x301C, CRL_REG_LEN_08BIT, 0x07}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xC0}, /* WV = PIC_SIZE + 8 */ + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, /* WPH = 0 */ + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x00}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0xB8}, /* PIC_SIZE = 1208 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_1952_1208_BUILD_IN_WDR_27MHZ[] = { + /* 0x02h */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT: 10/12 */ + {0x3007, CRL_REG_LEN_08BIT, 0x00}, /* WUXGA cropping */ + {0x300C, CRL_REG_LEN_08BIT, 0x02}, + {0x300F, CRL_REG_LEN_08BIT, 0x05}, + {0x3010, CRL_REG_LEN_08BIT, 0x38}, + {0x3012, CRL_REG_LEN_08BIT, 0x0F}, + {0x3018, CRL_REG_LEN_08BIT, 0x98}, + {0x3019, CRL_REG_LEN_08BIT, 0x08}, + {0x301B, CRL_REG_LEN_08BIT, 0x65}, + {0x301C, CRL_REG_LEN_08BIT, 0x04}, + {0x301D, CRL_REG_LEN_08BIT, 0x08}, + {0x301E, CRL_REG_LEN_08BIT, 0x02}, + {0x3048, CRL_REG_LEN_08BIT, 0x33}, + {0x3056, CRL_REG_LEN_08BIT, 0xC9}, + {0x3057, CRL_REG_LEN_08BIT, 0x33}, + {0x305C, CRL_REG_LEN_08BIT, 0x2c}, /* INCLKSEL default */ + {0x305E, CRL_REG_LEN_08BIT, 0x21}, + {0x3063, CRL_REG_LEN_08BIT, 0x54}, + {0x30E1, CRL_REG_LEN_08BIT, 0xE1}, + /* Crop settings */ + {0x3038, CRL_REG_LEN_08BIT, 0x00}, /* WPV = 0 */ + {0x3039, CRL_REG_LEN_08BIT, 0x00}, + {0x303A, CRL_REG_LEN_08BIT, 0xC9}, + {0x303B, CRL_REG_LEN_08BIT, 0x04}, + {0x303C, CRL_REG_LEN_08BIT, 0x00}, + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x9C}, + {0x303F, CRL_REG_LEN_08BIT, 0x07}, + /* 0x03h */ + {0x311D, CRL_REG_LEN_08BIT, 0x0A}, + {0x3123, CRL_REG_LEN_08BIT, 0x0F}, + {0x3126, CRL_REG_LEN_08BIT, 0xDF}, + {0x3147, CRL_REG_LEN_08BIT, 0x87}, + {0x31E0, CRL_REG_LEN_08BIT, 0x01}, + {0x31E1, CRL_REG_LEN_08BIT, 0x9E}, + {0x31E2, CRL_REG_LEN_08BIT, 0x01}, + {0x31E5, CRL_REG_LEN_08BIT, 0x05}, + {0x31E6, CRL_REG_LEN_08BIT, 0x05}, + {0x31E7, CRL_REG_LEN_08BIT, 0x3A}, + {0x31E8, CRL_REG_LEN_08BIT, 0x3A}, + /* 0x04h */ + {0x3203, CRL_REG_LEN_08BIT, 0xC8}, + {0x3207, CRL_REG_LEN_08BIT, 0x54}, + {0x3213, CRL_REG_LEN_08BIT, 0x16}, + {0x3215, CRL_REG_LEN_08BIT, 0xF6}, + {0x321A, CRL_REG_LEN_08BIT, 0x14}, + {0x321B, CRL_REG_LEN_08BIT, 0x51}, + {0x3229, CRL_REG_LEN_08BIT, 0xE7}, + {0x322A, CRL_REG_LEN_08BIT, 0xF0}, + {0x322B, CRL_REG_LEN_08BIT, 0x10}, + {0x3231, CRL_REG_LEN_08BIT, 0xE7}, + {0x3232, CRL_REG_LEN_08BIT, 0xF0}, + {0x3233, CRL_REG_LEN_08BIT, 0x10}, + {0x323C, CRL_REG_LEN_08BIT, 0xE8}, + {0x323D, CRL_REG_LEN_08BIT, 0x70}, + {0x3243, CRL_REG_LEN_08BIT, 0x08}, + {0x3244, CRL_REG_LEN_08BIT, 0xE1}, + {0x3245, CRL_REG_LEN_08BIT, 0x10}, + {0x3247, CRL_REG_LEN_08BIT, 0xE7}, + {0x3248, CRL_REG_LEN_08BIT, 0x60}, + {0x3249, CRL_REG_LEN_08BIT, 0x1E}, + {0x324B, CRL_REG_LEN_08BIT, 0x00}, + {0x324C, CRL_REG_LEN_08BIT, 0x41}, + {0x3250, CRL_REG_LEN_08BIT, 0x30}, + {0x3251, CRL_REG_LEN_08BIT, 0x0A}, + {0x3252, CRL_REG_LEN_08BIT, 0xFF}, + {0x3253, CRL_REG_LEN_08BIT, 0xFF}, + {0x3254, CRL_REG_LEN_08BIT, 0xFF}, + {0x3255, CRL_REG_LEN_08BIT, 0x02}, + {0x3257, CRL_REG_LEN_08BIT, 0xF0}, + {0x325A, CRL_REG_LEN_08BIT, 0xA6}, + {0x325D, CRL_REG_LEN_08BIT, 0x14}, + {0x325E, CRL_REG_LEN_08BIT, 0x51}, + {0x3261, CRL_REG_LEN_08BIT, 0x61}, + {0x3266, CRL_REG_LEN_08BIT, 0x30}, + {0x3267, CRL_REG_LEN_08BIT, 0x05}, + {0x3275, CRL_REG_LEN_08BIT, 0xE7}, + {0x3281, CRL_REG_LEN_08BIT, 0xEA}, + {0x3282, CRL_REG_LEN_08BIT, 0x70}, + {0x3285, CRL_REG_LEN_08BIT, 0xFF}, + {0x328A, CRL_REG_LEN_08BIT, 0xF0}, + {0x328D, CRL_REG_LEN_08BIT, 0xB6}, + {0x328E, CRL_REG_LEN_08BIT, 0x40}, + {0x3290, CRL_REG_LEN_08BIT, 0x42}, + {0x3291, CRL_REG_LEN_08BIT, 0x51}, + {0x3292, CRL_REG_LEN_08BIT, 0x1E}, + {0x3294, CRL_REG_LEN_08BIT, 0xC4}, + {0x3295, CRL_REG_LEN_08BIT, 0x20}, + {0x3297, CRL_REG_LEN_08BIT, 0x50}, + {0x3298, CRL_REG_LEN_08BIT, 0x31}, + {0x3299, CRL_REG_LEN_08BIT, 0x1F}, + {0x329B, CRL_REG_LEN_08BIT, 0xC0}, + {0x329C, CRL_REG_LEN_08BIT, 0x60}, + {0x329E, CRL_REG_LEN_08BIT, 0x4C}, + {0x329F, CRL_REG_LEN_08BIT, 0x71}, + {0x32A0, CRL_REG_LEN_08BIT, 0x1F}, + {0x32A2, CRL_REG_LEN_08BIT, 0xB6}, + {0x32A3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32A4, CRL_REG_LEN_08BIT, 0x0B}, + {0x32A9, CRL_REG_LEN_08BIT, 0x24}, + {0x32AA, CRL_REG_LEN_08BIT, 0x41}, + {0x32B0, CRL_REG_LEN_08BIT, 0x25}, + {0x32B1, CRL_REG_LEN_08BIT, 0x51}, + {0x32B7, CRL_REG_LEN_08BIT, 0x1C}, + {0x32B8, CRL_REG_LEN_08BIT, 0xC1}, + {0x32B9, CRL_REG_LEN_08BIT, 0x12}, + {0x32BE, CRL_REG_LEN_08BIT, 0x1D}, + {0x32BF, CRL_REG_LEN_08BIT, 0xD1}, + {0x32C0, CRL_REG_LEN_08BIT, 0x12}, + {0x32C2, CRL_REG_LEN_08BIT, 0xA8}, + {0x32C3, CRL_REG_LEN_08BIT, 0xC0}, + {0x32C4, CRL_REG_LEN_08BIT, 0x0A}, + {0x32C5, CRL_REG_LEN_08BIT, 0x1E}, + {0x32C6, CRL_REG_LEN_08BIT, 0x21}, + {0x32C9, CRL_REG_LEN_08BIT, 0xB0}, + {0x32CA, CRL_REG_LEN_08BIT, 0x40}, + {0x32CC, CRL_REG_LEN_08BIT, 0x26}, + {0x32CD, CRL_REG_LEN_08BIT, 0xA1}, + {0x32D0, CRL_REG_LEN_08BIT, 0xB6}, + {0x32D1, CRL_REG_LEN_08BIT, 0xC0}, + {0x32D2, CRL_REG_LEN_08BIT, 0x0B}, + {0x32D4, CRL_REG_LEN_08BIT, 0xE2}, + {0x32D5, CRL_REG_LEN_08BIT, 0x40}, + {0x32D8, CRL_REG_LEN_08BIT, 0x4E}, + {0x32D9, CRL_REG_LEN_08BIT, 0xA1}, + {0x32EC, CRL_REG_LEN_08BIT, 0xF0}, + /* 0x05h */ + {0x3303, CRL_REG_LEN_08BIT, 0x00}, + {0x3314, CRL_REG_LEN_08BIT, 0x08}, + {0x3316, CRL_REG_LEN_08BIT, 0x04}, + {0x3317, CRL_REG_LEN_08BIT, 0x04}, + {0x3318, CRL_REG_LEN_08BIT, 0xB8}, /* PIC_SIZE = 1208 */ + {0x3319, CRL_REG_LEN_08BIT, 0x04}, + {0x334E, CRL_REG_LEN_08BIT, 0x3D}, /* INCL selection 27MHz */ + {0x334F, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep imx185_streamon_regs[] = { + {IMX185_REG_STANDBY, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ + {IMX185_REG_XMSTA, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ +}; + +static struct crl_register_write_rep imx185_streamoff_regs[] = { + {IMX185_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ + {IMX185_REG_XMSTA, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 30, 0x00}, /* Delay 30ms */ +}; + +static struct crl_arithmetic_ops imx185_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + } +}; + +/* shs1 = fll - exposure -1 */ +static struct crl_arithmetic_ops imx185_shs1_lsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops imx185_shs1_msb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_shs1_hsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 16, + } +}; + +/* shs2 = fll - exposure * 16 -1 */ +static struct crl_arithmetic_ops imx185_shs2_lsb_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops imx185_shs2_msb_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_shs2_hsb_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 16, + } +}; + +static struct crl_arithmetic_ops imx185_fll_msb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_llp_msb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx185_fll_hsb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + } +}; + +static struct crl_dynamic_register_access imx185_h_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx185_hflip_ops), + .ops = imx185_hflip_ops, + .mask = 0x2, + } +}; + +static struct crl_dynamic_register_access imx185_v_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + } +}; + +static struct crl_dynamic_register_access imx185_ana_gain_global_regs[] = { + { + .address = 0x3014, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + } +}; + +static struct crl_dynamic_register_access imx185_shs_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x3020, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs1_lsb_ops), + .ops = imx185_shs1_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x3021, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs1_msb_ops), + .ops = imx185_shs1_msb_ops, + .mask = 0xff, + }, + { + .address = 0x3022, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs1_hsb_ops), + .ops = imx185_shs1_hsb_ops, + .mask = 0x1, + }, + { + .address = 0x3023, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs2_lsb_ops), + .ops = imx185_shs2_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x3024, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs2_msb_ops), + .ops = imx185_shs2_msb_ops, + .mask = 0xff, + }, + { + .address = 0x3025, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_shs2_hsb_ops), + .ops = imx185_shs2_hsb_ops, + .mask = 0x1, + } +}; + +static struct crl_dynamic_register_access imx185_fll_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x3018, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3019, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_fll_msb_ops), + .ops = imx185_fll_msb_ops, + .mask = 0xff, + }, + { + .address = 0x301a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_fll_hsb_ops), + .ops = imx185_fll_hsb_ops, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx185_llp_regs[] = { + { + .address = 0x301b, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x301c, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx185_llp_msb_ops), + .ops = imx185_llp_msb_ops, + .mask = 0xff, + }, +}; + +/* ctrl-val == 1 ? 1 * 0x02 : 0 * 0x02 -> 2 and 0 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r300c_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x02, + } +}; + +/* ctrl-val == 1 ? (1 * 0x04 + 0x1) : (0 * 0x04 + 0x1) -> 0x05 and 0x01 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r300f_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x04, + }, + { + .op = CRL_ADD, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x01, + } +}; + +/* ctrl-val == 1 ? (0x39 - 1 * 0x01) : (0x39 - 0 * 0x01) -> 0x38 and 0x39 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r3010_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x01, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x39, + } +}; + +/* ctrl-val == 1 ? (0x50 - 1 * 0x41) : (0x50 - 0 * 0x41) -> 0x0f and 0x50 */ +static struct crl_arithmetic_ops imx185_wdr_switch_r3012_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x41, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x50, + } +}; + +static struct crl_dynamic_register_access imx185_wdr_switch_regs[] = { + { 0x300c, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r300c_ops), + imx185_wdr_switch_r300c_ops, 0 }, + { 0x300f, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r300f_ops), + imx185_wdr_switch_r300f_ops, 0 }, + { 0x3010, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r3010_ops), + imx185_wdr_switch_r3010_ops, 0 }, + { 0x3012, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx185_wdr_switch_r3012_ops), + imx185_wdr_switch_r3012_ops, 0 }, +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config imx185_sensor_detect_regset[] = { + { + .reg = { 0x3385, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x3384, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + } +}; + +static struct crl_pll_configuration imx185_pll_configurations[] = { + { + .input_clk = 27000000, + .op_sys_clk = 56250000, + .bitsperpixel = 10, + .pixel_rate_csi = 45000000, + .pixel_rate_pa = 45000000, /* pixel_rate = MIPICLK*2 *4/10 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_111mbps), + .pll_regs = imx185_pll_111mbps, + }, + { + .input_clk = 27000000, + .op_sys_clk = 112500000, + .bitsperpixel = 10, + .pixel_rate_csi = 90000000, + .pixel_rate_pa = 90000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_222mbps), + .pll_regs = imx185_pll_222mbps, + }, + { + .input_clk = 27000000, + .op_sys_clk = 112500000, + .bitsperpixel = 12, + .pixel_rate_csi = 75000000, + .pixel_rate_pa = 75000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_222mbps), + .pll_regs = imx185_pll_222mbps, + }, + { + .input_clk = 27000000, + .op_sys_clk = 225000000, + .bitsperpixel = 12, + .pixel_rate_csi = 150000000, + .pixel_rate_pa = 150000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx185_pll_445mbps), + .pll_regs = imx185_pll_445mbps, + } +}; + +static struct crl_subdev_rect_rep imx185_1952_1208_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + } +}; + +static struct crl_subdev_rect_rep imx185_1952_1096_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1096, + } +}; + +static struct crl_subdev_rect_rep imx185_1312_728_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1208, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 1208, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1312, + .out_rect.height = 728, + } +}; + +static struct crl_mode_rep imx185_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1208_rects), + .sd_rects = imx185_1952_1208_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1208, + .min_llp = 2250, + .min_fll = 1333, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(imx185_1952_1208_27MHZ), + .mode_regs = imx185_1952_1208_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1208_rects), + .sd_rects = imx185_1952_1208_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1208, + .min_llp = 2250, + .min_fll = 1333, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[1], + .mode_regs_items = + ARRAY_SIZE(imx185_1952_1208_BUILD_IN_WDR_27MHZ), + .mode_regs = imx185_1952_1208_BUILD_IN_WDR_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1096_rects), + .sd_rects = imx185_1952_1096_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1096, + .min_llp = 2200, + .min_fll = 1135, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(imx185_1952_1096_27MHZ), + .mode_regs = imx185_1952_1096_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1952_1096_rects), + .sd_rects = imx185_1952_1096_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 1096, + .min_llp = 2200, + .min_fll = 1135, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[1], + .mode_regs_items = + ARRAY_SIZE(imx185_1952_1096_BUILD_IN_WDR_27MHZ), + .mode_regs = imx185_1952_1096_BUILD_IN_WDR_27MHZ, + }, + { + .sd_rects_items = ARRAY_SIZE(imx185_1312_728_rects), + .sd_rects = imx185_1312_728_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1312, + .height = 728, + .min_llp = 1300, + .min_fll = 787, + .comp_items = 1, + .ctrl_data = &ctrl_data_modes[0], + .mode_regs_items = ARRAY_SIZE(imx185_1312_728_27MHZ_CROPPING), + .mode_regs = imx185_1312_728_27MHZ_CROPPING, + } +}; + +static struct crl_sensor_subdev_config imx185_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx185 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx185 pixel array", + } +}; + +static struct crl_sensor_limits imx185_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1952, + .y_addr_max = 1208, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data imx185_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +static struct crl_csi_data_fmt imx185_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx185_fmt_raw10), + .regs = imx185_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx185_fmt_raw12), + .regs = imx185_fmt_raw12, + } +}; + +static struct crl_v4l2_ctrl imx185_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_h_flip_regs), + .regs = imx185_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_v_flip_regs), + .regs = imx185_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 160, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_ana_gain_global_regs), + .regs = imx185_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = IMX185_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x47, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_shs_regs), + .regs = imx185_shs_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 720, + .data.std_data.max = IMX185_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 0x465, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_fll_regs), + .regs = imx185_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x898, + .data.std_data.max = IMX185_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 0x898, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_llp_regs), + .regs = imx185_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx185_wdr_switch_regs), + .regs = imx185_wdr_switch_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops imx185_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx185_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx185_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx185_frame_desc_width_ops), + .ops = imx185_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx185_frame_desc_height_ops), + .ops = imx185_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx185_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 27000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration imx185_crl_configuration = { + + .power_items = ARRAY_SIZE(imx185_power_items), + .power_entities = imx185_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx185_powerup_standby), + .powerup_regs = imx185_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx185_sensor_detect_regset), + .id_regs = imx185_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx185_sensor_subdevs), + .subdevs = imx185_sensor_subdevs, + + .sensor_limits = &imx185_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx185_pll_configurations), + .pll_configs = imx185_pll_configurations, + + .modes_items = ARRAY_SIZE(imx185_modes), + .modes = imx185_modes, + + .streamon_regs_items = ARRAY_SIZE(imx185_streamon_regs), + .streamon_regs = imx185_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx185_streamoff_regs), + .streamoff_regs = imx185_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx185_v4l2_ctrls), + .v4l2_ctrl_bank = imx185_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx185_crl_csi_data_fmt), + .csi_fmts = imx185_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx185_flip_configurations), + .flip_data = imx185_flip_configurations, + + .frame_desc_entries = ARRAY_SIZE(imx185_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx185_frame_desc, +}; + +#endif /* __CRLMODULE_IMX185_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx214_configuration.h b/drivers/media/i2c/crlmodule/crl_imx214_configuration.h new file mode 100644 index 000000000000..8dc536fe6280 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx214_configuration.h @@ -0,0 +1,1430 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_imx214_CONFIGURATION_H_ +#define __CRLMODULE_imx214_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep imx214_pll_1080mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x87 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0a }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0821, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x66 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x66 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_pll_8_1080mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x87 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0821, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_pll_1200mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x96 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0a }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12}, + { 0x0821, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x66 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x66 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_pll_8_1200mbps[] = { + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x96 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12}, + { 0x0821, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_powerup_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /*24Mhz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0101, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0105, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0106, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4550, CRL_REG_LEN_08BIT, 0x02 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4642, CRL_REG_LEN_08BIT, 0x05 }, + { 0x6227, CRL_REG_LEN_08BIT, 0x11 }, + { 0x6276, CRL_REG_LEN_08BIT, 0x00 }, + { 0x900E, CRL_REG_LEN_08BIT, 0x06 }, + { 0xA802, CRL_REG_LEN_08BIT, 0x90 }, + { 0xA803, CRL_REG_LEN_08BIT, 0x11 }, + { 0xA804, CRL_REG_LEN_08BIT, 0x62 }, + { 0xA805, CRL_REG_LEN_08BIT, 0x77 }, + { 0xA806, CRL_REG_LEN_08BIT, 0xAE }, + { 0xA807, CRL_REG_LEN_08BIT, 0x34 }, + { 0xA808, CRL_REG_LEN_08BIT, 0xAE }, + { 0xA809, CRL_REG_LEN_08BIT, 0x35 }, + { 0xA80A, CRL_REG_LEN_08BIT, 0x62 }, + { 0xA80B, CRL_REG_LEN_08BIT, 0x83 }, + { 0xAE33, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4174, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4175, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4612, CRL_REG_LEN_08BIT, 0x29 }, + { 0x461B, CRL_REG_LEN_08BIT, 0x12 }, + { 0x461F, CRL_REG_LEN_08BIT, 0x06 }, + { 0x4635, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4637, CRL_REG_LEN_08BIT, 0x30 }, + { 0x463F, CRL_REG_LEN_08BIT, 0x18 }, + { 0x4641, CRL_REG_LEN_08BIT, 0x0D }, + { 0x465B, CRL_REG_LEN_08BIT, 0x12 }, + { 0x465F, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4663, CRL_REG_LEN_08BIT, 0x11 }, + { 0x4667, CRL_REG_LEN_08BIT, 0x0F }, + { 0x466F, CRL_REG_LEN_08BIT, 0x0F }, + { 0x470E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x4909, CRL_REG_LEN_08BIT, 0xAB }, + { 0x490B, CRL_REG_LEN_08BIT, 0x95 }, + { 0x4915, CRL_REG_LEN_08BIT, 0x5D }, + { 0x4A5F, CRL_REG_LEN_08BIT, 0xFF }, + { 0x4A61, CRL_REG_LEN_08BIT, 0xFF }, + { 0x4A73, CRL_REG_LEN_08BIT, 0x62 }, + { 0x4A85, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4A87, CRL_REG_LEN_08BIT, 0xFF }, + { 0x583C, CRL_REG_LEN_08BIT, 0x04 }, + { 0x620E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x6EB2, CRL_REG_LEN_08BIT, 0x01 }, + { 0x6EB3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x9300, CRL_REG_LEN_08BIT, 0x02 }, +}; + +/* + * 0, 4207, 0, 3119 + * 4208, 3120 + * 4208x3120 + */ +static struct crl_register_write_rep imx214_mode_13m[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x2F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034F, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x040F, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_2k[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x2F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x38 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x06 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x18 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x38 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x06 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x18 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_4k2k[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_1120[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x38 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x60 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x60 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x68 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_1080[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB8 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x38 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0x68 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_mode_720[] = { + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0344, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x8E }, + { 0x0348, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0349, CRL_REG_LEN_08BIT, 0xF7 }, + { 0x034A, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034B, CRL_REG_LEN_08BIT, 0xA1 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x35 }, + { 0x3054, CRL_REG_LEN_08BIT, 0x01 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xD0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A03, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3A04, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A05, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0B06, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A02, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3013, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4170, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4171, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4176, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4177, CRL_REG_LEN_08BIT, 0x3C }, + { 0xAE20, CRL_REG_LEN_08BIT, 0x04 }, + { 0xAE21, CRL_REG_LEN_08BIT, 0x5C }, + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx214_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx214_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx214_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx214_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_arithmetic_ops imx214_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx214_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx214_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx214_vflip_ops), + .ops = imx214_vflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep imx214_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + + +static struct crl_dynamic_register_access imx214_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx214_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx214_vblank_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx214_hblank_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx214_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff + }, +}; + +static struct crl_sensor_detect_config imx214_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +const s64 imx214_op_sys_clock[] = { 504000000, 504000000, 600000000, + 600000000}; + +static struct crl_pll_configuration imx214_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 504000000, + .bitsperpixel = 8, + .pixel_rate_csi = 432000000, + .pixel_rate_pa = 432000000, + .comp_items = 0, + .ctrl_data = 0, + .csi_lanes = 4, + .pll_regs_items = ARRAY_SIZE(imx214_pll_8_1080mbps), + .pll_regs = imx214_pll_8_1080mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 504000000, + .bitsperpixel = 10, + .pixel_rate_csi = 432000000, + .pixel_rate_pa = 432000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx214_pll_1080mbps), + .pll_regs = imx214_pll_1080mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 600000000, + .bitsperpixel = 8, + .pixel_rate_csi = 480000000, + .pixel_rate_pa = 480000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx214_pll_8_1200mbps), + .pll_regs = imx214_pll_8_1200mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 600000000, + .bitsperpixel = 10, + .pixel_rate_csi = 480000000, + .pixel_rate_pa = 480000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx214_pll_1200mbps), + .pll_regs = imx214_pll_1200mbps, + }, + +}; + +/* + * 0,5343,448,3567 + * 5344, 3120 + * Dig Crop: (568,0)->4208x3120 + * Scale_m 16 + * 4208x3120 + */ + +static struct crl_subdev_rect_rep imx214_13m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, +}; + +static struct crl_subdev_rect_rep imx214_4k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 376, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 2368, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 2368, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, +}; + +static struct crl_subdev_rect_rep imx214_2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2104, + .out_rect.height = 1560, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2104, + .in_rect.height = 1560, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2104, + .out_rect.height = 1560, + }, +}; + +static struct crl_subdev_rect_rep imx214_1120_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 56, + .out_rect.top = 440, + .out_rect.width = 4096, + .out_rect.height = 2240, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4096, + .in_rect.height = 2240, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2048, + .out_rect.height = 1120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2048, + .in_rect.height = 1120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2048, + .out_rect.height = 1120, + }, +}; + +static struct crl_subdev_rect_rep imx214_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 376, + .out_rect.width = 4208, + .out_rect.height = 2368, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 2368, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2104, + .out_rect.height = 1184, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 2104, + .in_rect.height = 1184, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep imx214_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4208, + .in_rect.height = 3120, + .out_rect.left = 1400, + .out_rect.top = 1166, + .out_rect.width = 1408, + .out_rect.height = 788, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1408, + .in_rect.height = 788, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1408, + .out_rect.height = 788, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1408, + .in_rect.height = 788, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_mode_rep imx214_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx214_13m_rects), + .sd_rects = imx214_13m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 3120, + .min_llp = 5008, + .min_fll = 3180, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_13m), + .mode_regs = imx214_mode_13m, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_4k2k_rects), + .sd_rects = imx214_4k2k_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 2368, + .min_llp = 5008, + .min_fll = 2408, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_4k2k), + .mode_regs = imx214_mode_4k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_2k_rects), + .sd_rects = imx214_2k_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2104, + .height = 1560, + .min_llp = 5008, + .min_fll = 1700, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_2k), + .mode_regs = imx214_mode_2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_1120_rects), + .sd_rects = imx214_1120_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2048, + .height = 1120, + .min_llp = 5008, + .min_fll = 1600, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_1120), + .mode_regs = imx214_mode_1120, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_1080_rects), + .sd_rects = imx214_1080_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 5008, + .min_fll = 1200, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_1080), + .mode_regs = imx214_mode_1080, + }, + { + .sd_rects_items = ARRAY_SIZE(imx214_720_rects), + .sd_rects = imx214_720_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 5008, + .min_fll = 828, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx214_mode_720), + .mode_regs = imx214_mode_720, + }, +}; + +static struct crl_sensor_subdev_config imx214_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx214 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx214 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx214 pixel array", + }, +}; + +static struct crl_sensor_limits imx214_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4208, + .y_addr_max = 3120, + .min_frame_length_lines = 184, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 5008, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx214_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx214_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .bits_per_pixel = 10, + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .bits_per_pixel = 10, + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width10), + .bits_per_pixel = 10, + .regs = imx214_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = ARRAY_SIZE(imx214_data_fmt_width8), + .bits_per_pixel = 8, + .regs = imx214_data_fmt_width8, + }, +}; + + +static const char * const imx214_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Colour Bars", +}; + + +static struct crl_v4l2_ctrl imx214_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(imx214_pll_configurations) - 1, + .data.v4l2_int_menu.menu = imx214_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 480, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_ana_gain_global_regs), + .regs = imx214_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 1700, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_exposure_regs), + .regs = imx214_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_h_flip_regs), + .regs = imx214_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_v_flip_regs), + .regs = imx214_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_vblank_regs), + .regs = imx214_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 5008, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 5008, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_hblank_regs), + .regs = imx214_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx214_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx214_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx214_test_pattern_regs), + .regs = imx214_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx214_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2700000, + .delay = 30000, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1100000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + }, +}; + +static struct crl_nvm_blob imx214_nvm_blobs[] = { + { 0x50, 0x00, 0x100 }, + { 0x51, 0x00, 0x100 }, + { 0x52, 0x00, 0x20 }, +}; + +struct crl_sensor_configuration imx214_crl_configuration = { + + .power_items = ARRAY_SIZE(imx214_power_items), + .power_entities = imx214_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx214_powerup_regset), + .powerup_regs = imx214_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + + .id_reg_items = ARRAY_SIZE(imx214_sensor_detect_regset), + .id_regs = imx214_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx214_sensor_subdevs), + .subdevs = imx214_sensor_subdevs, + + .sensor_limits = &imx214_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx214_pll_configurations), + .pll_configs = imx214_pll_configurations, + + .modes_items = ARRAY_SIZE(imx214_modes), + .modes = imx214_modes, + .fail_safe_mode_index = 3, + + .streamon_regs_items = ARRAY_SIZE(imx214_streamon_regs), + .streamon_regs = imx214_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx214_streamoff_regs), + .streamoff_regs = imx214_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx214_v4l2_ctrls), + .v4l2_ctrl_bank = imx214_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx214_crl_csi_data_fmt), + .csi_fmts = imx214_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx214_flip_configurations), + .flip_data = imx214_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(imx214_nvm_blobs), + .crl_nvm_info.nvm_config = imx214_nvm_blobs, +}; + + + + + + +#endif /* __CRLMODULE_DUMMY_imx230_CONFIGURATION_H_ */ + + diff --git a/drivers/media/i2c/crlmodule/crl_imx230_configuration.h b/drivers/media/i2c/crlmodule/crl_imx230_configuration.h new file mode 100644 index 000000000000..c19afad589e5 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx230_configuration.h @@ -0,0 +1,2367 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_imx230_CONFIGURATION_H_ +#define __CRLMODULE_imx230_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + + +static struct crl_register_write_rep imx230_pll_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x0F }, + { 0x030E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xa9 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, /* Mipi settings, 4 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x17 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x6c }, + { 0x0822, CRL_REG_LEN_08BIT, 0xcc }, + { 0x0823, CRL_REG_LEN_08BIT, 0xcc }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +/* PLL settings for CSI lanes: 4, RAW14 output */ +static struct crl_register_write_rep imx230_pll_4_14_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xbf }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0e }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xfa }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, /* Mipi settings, 4 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x17 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x70 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx230_pll_2_10_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x0F }, + { 0x030E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xa9 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x01 }, /* Mipi settings, 2 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x09 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x60 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx230_pll_2_8_1500mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* EXT clock 24 MHz*/ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0307, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x0F }, + { 0x030E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x030F, CRL_REG_LEN_08BIT, 0xa9 }, + { 0x0310, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0114, CRL_REG_LEN_08BIT, 0x01 }, /* Mipi settings, 2 lane */ + { 0x0820, CRL_REG_LEN_08BIT, 0x09 }, /*Data rate setting*/ + { 0x0821, CRL_REG_LEN_08BIT, 0x60 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0808, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep imx230_powerup_regset[] = { + { 0x4800, CRL_REG_LEN_08BIT, 0x0E }, + { 0x4890, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4D1E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4D1F, CRL_REG_LEN_08BIT, 0xFF }, + { 0x4FA0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4FA1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4FA2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4FA3, CRL_REG_LEN_08BIT, 0x83 }, + { 0x6153, CRL_REG_LEN_08BIT, 0x01 }, + { 0x6156, CRL_REG_LEN_08BIT, 0x01 }, + { 0x69BB, CRL_REG_LEN_08BIT, 0x01 }, + { 0x69BC, CRL_REG_LEN_08BIT, 0x05 }, + { 0x69BD, CRL_REG_LEN_08BIT, 0x05 }, + { 0x69C1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x69C4, CRL_REG_LEN_08BIT, 0x01 }, + { 0x69C6, CRL_REG_LEN_08BIT, 0x01 }, + { 0x7300, CRL_REG_LEN_08BIT, 0x00 }, + { 0x9009, CRL_REG_LEN_08BIT, 0x1A }, + { 0xB040, CRL_REG_LEN_08BIT, 0x90 }, + { 0xB041, CRL_REG_LEN_08BIT, 0x14 }, + { 0xB042, CRL_REG_LEN_08BIT, 0x6B }, + { 0xB043, CRL_REG_LEN_08BIT, 0x43 }, + { 0xB044, CRL_REG_LEN_08BIT, 0x63 }, + { 0xB045, CRL_REG_LEN_08BIT, 0x2A }, + { 0xB046, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB047, CRL_REG_LEN_08BIT, 0x06 }, + { 0xB048, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB049, CRL_REG_LEN_08BIT, 0x07 }, + { 0xB04A, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB04B, CRL_REG_LEN_08BIT, 0x04 }, + { 0xB04C, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB04D, CRL_REG_LEN_08BIT, 0x05 }, + { 0xB04E, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB04F, CRL_REG_LEN_08BIT, 0x16 }, + { 0xB050, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB051, CRL_REG_LEN_08BIT, 0x17 }, + { 0xB052, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB053, CRL_REG_LEN_08BIT, 0x74 }, + { 0xB054, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB055, CRL_REG_LEN_08BIT, 0x75 }, + { 0xB056, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB057, CRL_REG_LEN_08BIT, 0x76 }, + { 0xB058, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB059, CRL_REG_LEN_08BIT, 0x77 }, + { 0xB05A, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB05B, CRL_REG_LEN_08BIT, 0x7A }, + { 0xB05C, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB05D, CRL_REG_LEN_08BIT, 0x7B }, + { 0xB05E, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB05F, CRL_REG_LEN_08BIT, 0x0A }, + { 0xB060, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB061, CRL_REG_LEN_08BIT, 0x0B }, + { 0xB062, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB063, CRL_REG_LEN_08BIT, 0x08 }, + { 0xB064, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB065, CRL_REG_LEN_08BIT, 0x09 }, + { 0xB066, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB067, CRL_REG_LEN_08BIT, 0x0E }, + { 0xB068, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB069, CRL_REG_LEN_08BIT, 0x0F }, + { 0xB06A, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB06B, CRL_REG_LEN_08BIT, 0x0C }, + { 0xB06C, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB06D, CRL_REG_LEN_08BIT, 0x0D }, + { 0xB06E, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB06F, CRL_REG_LEN_08BIT, 0x13 }, + { 0xB070, CRL_REG_LEN_08BIT, 0x68 }, + { 0xB071, CRL_REG_LEN_08BIT, 0x12 }, + { 0xB072, CRL_REG_LEN_08BIT, 0x90 }, + { 0xB073, CRL_REG_LEN_08BIT, 0x0E }, + { 0xD000, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD001, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD002, CRL_REG_LEN_08BIT, 0xAF }, + { 0xD003, CRL_REG_LEN_08BIT, 0xE1 }, + { 0xD004, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD005, CRL_REG_LEN_08BIT, 0x34 }, + { 0xD006, CRL_REG_LEN_08BIT, 0x21 }, + { 0xD007, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD008, CRL_REG_LEN_08BIT, 0x1C }, + { 0xD009, CRL_REG_LEN_08BIT, 0x80 }, + { 0xD00A, CRL_REG_LEN_08BIT, 0xFE }, + { 0xD00B, CRL_REG_LEN_08BIT, 0xC5 }, + { 0xD00C, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD00D, CRL_REG_LEN_08BIT, 0xDC }, + { 0xD00E, CRL_REG_LEN_08BIT, 0xB6 }, + { 0xD00F, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD010, CRL_REG_LEN_08BIT, 0x31 }, + { 0xD011, CRL_REG_LEN_08BIT, 0x02 }, + { 0xD012, CRL_REG_LEN_08BIT, 0x4A }, + { 0xD013, CRL_REG_LEN_08BIT, 0x0E }, + { 0xD014, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD015, CRL_REG_LEN_08BIT, 0xF0 }, + { 0xD016, CRL_REG_LEN_08BIT, 0x1B }, + { 0xD017, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD018, CRL_REG_LEN_08BIT, 0xFA }, + { 0xD019, CRL_REG_LEN_08BIT, 0x2C }, + { 0xD01A, CRL_REG_LEN_08BIT, 0xF1 }, + { 0xD01B, CRL_REG_LEN_08BIT, 0x7E }, + { 0xD01C, CRL_REG_LEN_08BIT, 0x55 }, + { 0xD01D, CRL_REG_LEN_08BIT, 0x1C }, + { 0xD01E, CRL_REG_LEN_08BIT, 0xD8 }, + { 0xD01F, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD020, CRL_REG_LEN_08BIT, 0x76 }, + { 0xD021, CRL_REG_LEN_08BIT, 0xC1 }, + { 0xD022, CRL_REG_LEN_08BIT, 0xBF }, + { 0xD044, CRL_REG_LEN_08BIT, 0x40 }, + { 0xD045, CRL_REG_LEN_08BIT, 0xBA }, + { 0xD046, CRL_REG_LEN_08BIT, 0x70 }, + { 0xD047, CRL_REG_LEN_08BIT, 0x47 }, + { 0xD048, CRL_REG_LEN_08BIT, 0xC0 }, + { 0xD049, CRL_REG_LEN_08BIT, 0xBA }, + { 0xD04A, CRL_REG_LEN_08BIT, 0x70 }, + { 0xD04B, CRL_REG_LEN_08BIT, 0x47 }, + { 0xD04C, CRL_REG_LEN_08BIT, 0x82 }, + { 0xD04D, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD04E, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD04F, CRL_REG_LEN_08BIT, 0xFA }, + { 0xD050, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD051, CRL_REG_LEN_08BIT, 0xF0 }, + { 0xD052, CRL_REG_LEN_08BIT, 0x02 }, + { 0xD053, CRL_REG_LEN_08BIT, 0xF8 }, + { 0xD054, CRL_REG_LEN_08BIT, 0x81 }, + { 0xD055, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD056, CRL_REG_LEN_08BIT, 0xCE }, + { 0xD057, CRL_REG_LEN_08BIT, 0xFD }, + { 0xD058, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD059, CRL_REG_LEN_08BIT, 0xB5 }, + { 0xD05A, CRL_REG_LEN_08BIT, 0x0D }, + { 0xD05B, CRL_REG_LEN_08BIT, 0x48 }, + { 0xD05C, CRL_REG_LEN_08BIT, 0x40 }, + { 0xD05D, CRL_REG_LEN_08BIT, 0x7A }, + { 0xD05E, CRL_REG_LEN_08BIT, 0x01 }, + { 0xD05F, CRL_REG_LEN_08BIT, 0x28 }, + { 0xD060, CRL_REG_LEN_08BIT, 0x15 }, + { 0xD061, CRL_REG_LEN_08BIT, 0xD1 }, + { 0xD062, CRL_REG_LEN_08BIT, 0x0C }, + { 0xD063, CRL_REG_LEN_08BIT, 0x49 }, + { 0xD064, CRL_REG_LEN_08BIT, 0x0C }, + { 0xD065, CRL_REG_LEN_08BIT, 0x46 }, + { 0xD066, CRL_REG_LEN_08BIT, 0x40 }, + { 0xD067, CRL_REG_LEN_08BIT, 0x3C }, + { 0xD068, CRL_REG_LEN_08BIT, 0x48 }, + { 0xD069, CRL_REG_LEN_08BIT, 0x8A }, + { 0xD06A, CRL_REG_LEN_08BIT, 0x62 }, + { 0xD06B, CRL_REG_LEN_08BIT, 0x8A }, + { 0xD06C, CRL_REG_LEN_08BIT, 0x80 }, + { 0xD06D, CRL_REG_LEN_08BIT, 0x1A }, + { 0xD06E, CRL_REG_LEN_08BIT, 0x8A }, + { 0xD06F, CRL_REG_LEN_08BIT, 0x89 }, + { 0xD070, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD071, CRL_REG_LEN_08BIT, 0xB2 }, + { 0xD072, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD073, CRL_REG_LEN_08BIT, 0x18 }, + { 0xD074, CRL_REG_LEN_08BIT, 0x0A }, + { 0xD075, CRL_REG_LEN_08BIT, 0x46 }, + { 0xD076, CRL_REG_LEN_08BIT, 0x20 }, + { 0xD077, CRL_REG_LEN_08BIT, 0x32 }, + { 0xD078, CRL_REG_LEN_08BIT, 0x12 }, + { 0xD079, CRL_REG_LEN_08BIT, 0x88 }, + { 0xD07A, CRL_REG_LEN_08BIT, 0x90 }, + { 0xD07B, CRL_REG_LEN_08BIT, 0x42 }, + { 0xD07C, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD07D, CRL_REG_LEN_08BIT, 0xDA }, + { 0xD07E, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD07F, CRL_REG_LEN_08BIT, 0x46 }, + { 0xD080, CRL_REG_LEN_08BIT, 0x80 }, + { 0xD081, CRL_REG_LEN_08BIT, 0xB2 }, + { 0xD082, CRL_REG_LEN_08BIT, 0x88 }, + { 0xD083, CRL_REG_LEN_08BIT, 0x81 }, + { 0xD084, CRL_REG_LEN_08BIT, 0x84 }, + { 0xD085, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD086, CRL_REG_LEN_08BIT, 0x06 }, + { 0xD087, CRL_REG_LEN_08BIT, 0xF8 }, + { 0xD088, CRL_REG_LEN_08BIT, 0xE0 }, + { 0xD089, CRL_REG_LEN_08BIT, 0x67 }, + { 0xD08A, CRL_REG_LEN_08BIT, 0x85 }, + { 0xD08B, CRL_REG_LEN_08BIT, 0xF6 }, + { 0xD08C, CRL_REG_LEN_08BIT, 0x4B }, + { 0xD08D, CRL_REG_LEN_08BIT, 0xFC }, + { 0xD08E, CRL_REG_LEN_08BIT, 0x10 }, + { 0xD08F, CRL_REG_LEN_08BIT, 0xBD }, + { 0xD090, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD091, CRL_REG_LEN_08BIT, 0x18 }, + { 0xD092, CRL_REG_LEN_08BIT, 0x1E }, + { 0xD093, CRL_REG_LEN_08BIT, 0x78 }, + { 0xD094, CRL_REG_LEN_08BIT, 0x00 }, + { 0xD095, CRL_REG_LEN_08BIT, 0x18 }, + { 0xD096, CRL_REG_LEN_08BIT, 0x17 }, + { 0xD097, CRL_REG_LEN_08BIT, 0x98 }, + { 0x5869, CRL_REG_LEN_08BIT, 0x01 }, /*Global settings done*/ + { 0x0216, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0217, CRL_REG_LEN_08BIT, 0x00 }, + { 0x020E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x020F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0210, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0211, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0212, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0213, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0214, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0215, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A21, CRL_REG_LEN_08BIT, 0x00 }, /* LSC setting */ + { 0x3011, CRL_REG_LEN_08BIT, 0x00 }, /* STATS Calc enable/disable */ + { 0x3013, CRL_REG_LEN_08BIT, 0x00 }, /*stats output enable/disable */ + { 0x5041, CRL_REG_LEN_08BIT, 0x04 }, /*embedded data on/off, 4 lines */ + { 0x0138, CRL_REG_LEN_08BIT, 0x01 }, /* Temperature control enable */ +}; + +static struct crl_register_write_rep imx230_mode_2k2k[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0A }, /*Output*/ + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xD7 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x07 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xD7 }, + { 0x697D, CRL_REG_LEN_08BIT, 0x02 }, /* PAF settings */ + { 0x6985, CRL_REG_LEN_08BIT, 0x02 }, + { 0x698D, CRL_REG_LEN_08BIT, 0x0B }, + { 0x6995, CRL_REG_LEN_08BIT, 0x0B }, + { 0x699D, CRL_REG_LEN_08BIT, 0x16 }, + { 0x69A5, CRL_REG_LEN_08BIT, 0x16 }, + { 0x69AD, CRL_REG_LEN_08BIT, 0x1F }, + { 0x69B5, CRL_REG_LEN_08BIT, 0x1F }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xD8 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* UHD Scale */ +static struct crl_register_write_rep imx230_mode_4k2k[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034D, CRL_REG_LEN_08BIT, 0x2E }, + { 0x034E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x88 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x040F, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0B }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +/* UHD crop*/ +static struct crl_register_write_rep imx230_mode_uhd_crop[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x70 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3A36, CRL_REG_LEN_08BIT, 0x0F }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +/*5344 x 40160*/ +static struct crl_register_write_rep imx230_mode_full_4_3[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x034D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x00 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A21, CRL_REG_LEN_08BIT, 0x02 }, +}; + +/*5344 x 4016*/ +static struct crl_register_write_rep imx230_mode_full_16_9[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x034D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x034F, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x040F, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0B }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3264x2448_crop[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034B, CRL_REG_LEN_08BIT, 0x9F }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034D, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x90 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x040D, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x90 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x09 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0x90 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3A36, CRL_REG_LEN_08BIT, 0x9F }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3264x2448_scale[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034D, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x90 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x1A }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, /*dig crop y*/ + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3280x2460_scale[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0C }, + { 0x034D, CRL_REG_LEN_08BIT, 0xD0 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x9C }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x1A }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_3336x2502_scale[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034B, CRL_REG_LEN_08BIT, 0xAF }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034D, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x09 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xC6 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x19 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0F }, + { 0x040F, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xAF }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_2672x1504[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x74 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_1940x1092[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0347, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 } /*1920 x 1080*/, + { 0x034D, CRL_REG_LEN_08BIT, 0x94 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x44 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xB7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep imx230_mode_1440[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x08 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x034B, CRL_REG_LEN_08BIT, 0xA7 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0A }, /* 2560 x 1440 */ + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3A36, CRL_REG_LEN_08BIT, 0xA7 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +static struct crl_register_write_rep imx230_mode_720[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x18 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x14 }, + { 0x0349, CRL_REG_LEN_08BIT, 0xDF }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0D }, + { 0x034B, CRL_REG_LEN_08BIT, 0x97 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x44 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x305C, CRL_REG_LEN_08BIT, 0x11 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x05 } /* 1296 x 736 */, + { 0x034D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A22, CRL_REG_LEN_08BIT, 0x20 }, /* DPC2D settings */ + { 0x3A23, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A24, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A25, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3A26, CRL_REG_LEN_08BIT, 0xE0 }, + { 0x3A2F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A30, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A31, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3A32, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3A33, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3A34, CRL_REG_LEN_08BIT, 0xDF }, + { 0x3A35, CRL_REG_LEN_08BIT, 0x0D }, + { 0x3A36, CRL_REG_LEN_08BIT, 0x97 }, + { 0x3A37, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3A38, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3A39, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +static struct crl_register_write_rep imx230_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx230_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx230_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx230_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_register_write_rep imx230_data_fmt_width14[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0e0e } +}; + +static struct crl_arithmetic_ops imx230_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx230_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx230_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx230_vflip_ops), + .ops = imx230_vflip_ops, + .mask = 0x2, + }, +}; + + +static struct crl_dynamic_register_access imx230_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx230_dig_gain_regs[] = { + { + .address = 0x020e, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, + { + .address = 0x0210, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, + { + .address = 0x0212, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, + { + .address = 0x0214, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, +}; + + + +static struct crl_dynamic_register_access imx230_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx230_fll_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx230_llp_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx230_hdr_et_ratio_regs[] = { + { 0x0222, CRL_REG_LEN_08BIT, 0xff, 0, NULL, 0 }, +}; + +static struct crl_register_write_rep imx230_hdr_mode_off[] = { + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x74 }, /* HDR output control */ + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x02 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x03 }, + { 0x31e1, CRL_REG_LEN_08BIT, 0xff }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* HDR Type3 ZIGZAG */ +static struct crl_register_write_rep imx230_hdr_mode_type3[] = { + /* + * 0x220 HDR control register + * bit 0: 0:HDR Disable 1:HDR enable *1-> below + * bit 1: 0:Combined gain 1:separate gain *0-> below + * bit 5: 0:Use ET Ratio 1:Short exposure by direct control *0-> below + */ + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + /* Enable ATR 0x3000 bit 0 */ + { 0x3000, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x02 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x03 }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x01 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x01 }, +}; + +/* HDR Type2 */ +static struct crl_register_write_rep imx230_hdr_mode_type2[] = { + /* + * 0x220 HDR control register + * bit 0: 0:HDR Disable 1:HDR enable *1-> below + * bit 1: 0:Combined gain 1:separate gain *0-> below + * bit 5: 0:Use ET Ratio 1:Short exposure by direct control *0-> below + */ + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + + /* Disable ATR for Type 2 0x3000 bit 0 */ + { 0x3000, CRL_REG_LEN_08BIT, 0x64 }, + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x01 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x3f }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* HDR Type1 */ +static struct crl_register_write_rep imx230_hdr_mode_type1[] = { + /* + * 0x220 HDR control register + * bit 0: 0:HDR Disable 1:HDR enable *1-> below + * bit 1: 0:Combined gain 1:separate gain *0-> below + * bit 5: 0:Use ET Ratio 1:Short exposure by direct control *0-> below + */ + { 0x0220, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0224, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0225, CRL_REG_LEN_08BIT, 0xF4 }, + /* ATR is enabled 0x3000 bit 0 */ + { 0x3000, CRL_REG_LEN_08BIT, 0x75 }, + { 0x3001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x01 }, + { 0x31e0, CRL_REG_LEN_08BIT, 0x3f }, + { 0x31e4, CRL_REG_LEN_08BIT, 0x02 }, + { 0x30b4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30b9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30ba, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bb, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30bc, CRL_REG_LEN_08BIT, 0x00 }, +}; + +/* + * IMX230 HDR types + * Type 1 10bit output after HDR and ATR blocks + * Type 2 14bit RAW after HDR block + * Type 3 10bit ZIGZAG pattern + */ +static struct crl_dep_reg_list imx230_hdr_types_regs[] = { + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 }, + ARRAY_SIZE(imx230_hdr_mode_off), imx230_hdr_mode_off, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 }, + ARRAY_SIZE(imx230_hdr_mode_type1), imx230_hdr_mode_type1, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 2 }, + ARRAY_SIZE(imx230_hdr_mode_type2), imx230_hdr_mode_type2, 0, 0 }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 3 }, + ARRAY_SIZE(imx230_hdr_mode_type3), imx230_hdr_mode_type3, 0, 0 }, +}; + +/* PDAF ON -> 0X3121 = 1 when HDR is off and 0x3121 = 0 when HDR is on */ +static struct crl_arithmetic_ops imx230_reg3121_pdaf_on[] = { + { CRL_ASSIGNMENT, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, CRL_CID_IMX230_HDR_MODE } }, + { CRL_BITWISE_COMPLEMENT, { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 } }, + { CRL_BITWISE_AND, { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 } }, +}; + +/* PDAF ON -> 0X3001 = 0 when HDR is off and 0x3001 = 1 when HDR is on */ +static struct crl_arithmetic_ops imx230_reg3001_pdaf_on[] = { + { CRL_ASSIGNMENT, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, CRL_CID_IMX230_HDR_MODE } }, + { CRL_BITWISE_AND, { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 } }, +}; + +/* 0x3001 and 0x3121 behaves differently when HDR is ON or OFF */ +static struct crl_dynamic_register_access imx230_pdaf_on[] = { + { 0x3121, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx230_reg3121_pdaf_on), imx230_reg3121_pdaf_on, 0 }, + { 0x3001, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(imx230_reg3001_pdaf_on), imx230_reg3001_pdaf_on, 0 }, + { 0x3123, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, +}; + +/* All the following registers are set to 0 when PDAF is Off*/ +static struct crl_dynamic_register_access imx230_pdaf_off[] = { + { 0x3121, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, + { 0x3001, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, + { 0x3123, CRL_REG_LEN_08BIT, 0xff, 0, 0, 0 }, +}; + +/* + * There are two different registers to enable/disable PDAF with HDR On and Off + * + * PDAF On, HDR Off -> 0x3121: 1, 0x3001: 0, 0x3123: 1 + * PDAF Off, HDR Off-> 0x3121: 0, 0x3001: 0, 0x3123: 0 + * PDAF Off, HDR On -> 0x3121: 0, 0x3001: 0, 0x3123: 0 + * PDAF On, HDR On -> 0x3121: 0, 0x3001: 1, 0x3123: 1 + */ +static struct crl_dep_reg_list imx230_pdaf_ctrl_regs[] = { + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 1 }, 0, 0, + ARRAY_SIZE(imx230_pdaf_on), imx230_pdaf_on }, + { CRL_DEP_CTRL_CONDITION_EQUAL, + { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 0 }, 0, 0, + ARRAY_SIZE(imx230_pdaf_off), imx230_pdaf_off }, +}; + +/* PDAF enable controls are dependent on HDR on or OFF */ +struct crl_dep_ctrl_provision imx230_hdr_dep_controls[] = { + /* Self update PDAF settins after change in HDR settings */ + { CRL_CID_SENSOR_PDAF, CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL, 0, 0 }, +}; + +static struct crl_sensor_detect_config imx230_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0018, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_arithmetic_ops imx230_thermal_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, + .operand.entity_val = 0x013a, + }, +}; + +static struct crl_dynamic_register_access imx230_thermal_regs[] = { + { + .address = 0x013a, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx230_thermal_ops), + .ops = imx230_thermal_ops, + .mask = 0xff, + }, +}; + +static struct crl_pll_configuration imx230_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 749600000, + .bitsperpixel = 10, + .pixel_rate_csi = 599680000, + .pixel_rate_pa = 600000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_1500mbps), + .pll_regs = imx230_pll_1500mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 749600000, /* Actual value is 750000000 */ + .bitsperpixel = 14, + .pixel_rate_csi = 428570000, + .pixel_rate_pa = 573000000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_4_14_1500mbps), + .pll_regs = imx230_pll_4_14_1500mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 749600000, + .bitsperpixel = 8, + .pixel_rate_csi = 374800000, + .pixel_rate_pa = 600000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_2_8_1500mbps), + .pll_regs = imx230_pll_2_8_1500mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 749600000, + .bitsperpixel = 10, + .pixel_rate_csi = 299840000, + .pixel_rate_pa = 600000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx230_pll_2_10_1500mbps), + .pll_regs = imx230_pll_2_10_1500mbps, + }, +}; + +static struct crl_subdev_rect_rep imx230_full_4_3_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3280x2460_s_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 3280, 2460 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3264x2448_s_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 3264, 2448 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3336x2502_s_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 3336, 2502 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_3264x2448_c_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 3264, 2448 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_full_16_9_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_4k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 3886, 2184 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_uhd_crop_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 928, 5344, 2160 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 2160 }, + .out_rect = { 0, 0, 5344, 2160 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 2160 }, + .out_rect = { 0, 0, 3840, 2160 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_2k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 4016 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 2672, 2008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2672, 2008 }, + .out_rect = { 0, 0, 2672, 2008 }, + }, +}; + + +static struct crl_subdev_rect_rep imx230_1940x1092_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 2672, 1504 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2672, 1504 }, + .out_rect = { 0, 0, 1940, 1092 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_2672x1504_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 504, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 2672, 1504 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2672, 1504 }, + .out_rect = { 0, 0, 2672, 1504 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_1440_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 1288, 5344, 1440 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 1440 }, + .out_rect = { 0, 0, 5344, 1440 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5344, 1440 }, + .out_rect = { 0, 0, 2560, 1440 }, + }, +}; + +static struct crl_subdev_rect_rep imx230_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5344, 4016 }, + .out_rect = { 0, 0, 5344, 3008 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5344, 3008 }, + .out_rect = { 0, 0, 1336, 752 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1336, 752 }, + .out_rect = { 0, 0, 1296, 736 }, + }, +}; + +static struct crl_mode_rep imx230_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx230_full_4_3_rects), + .sd_rects = imx230_full_4_3_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 5344, + .height = 4016, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_full_4_3), + .mode_regs = imx230_mode_full_4_3, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3280x2460_s_rects), + .sd_rects = imx230_3280x2460_s_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 26, + .width = 3280, + .height = 2460, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3280x2460_scale), + .mode_regs = imx230_mode_3280x2460_scale, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3264x2448_s_rects), + .sd_rects = imx230_3264x2448_s_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 26, + .width = 3264, + .height = 2448, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3264x2448_scale), + .mode_regs = imx230_mode_3264x2448_scale, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3336x2502_s_rects), + .sd_rects = imx230_3336x2502_s_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 25, + .width = 3336, + .height = 2502, + .min_llp = 6024, + .min_fll = 4106, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3336x2502_scale), + .mode_regs = imx230_mode_3336x2502_scale, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_3264x2448_c_rects), + .sd_rects = imx230_3264x2448_c_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3264, + .height = 2448, + .min_llp = 6024, + .min_fll = 2538, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_3264x2448_crop), + .mode_regs = imx230_mode_3264x2448_crop, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_full_16_9_rects), + .sd_rects = imx230_full_16_9_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 5344, + .height = 3008, + .min_llp = 6024, + .min_fll = 3098, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_full_16_9), + .mode_regs = imx230_mode_full_16_9, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_4k2k_rects), + .sd_rects = imx230_4k2k_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 22, + .width = 3886, + .height = 2184, + .min_llp = 6024, + .min_fll = 3300, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_4k2k), + .mode_regs = imx230_mode_4k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_uhd_crop_rects), + .sd_rects = imx230_uhd_crop_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3840, + .height = 2160, + .min_llp = 6024, + .min_fll = 2250, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_uhd_crop), + .mode_regs = imx230_mode_uhd_crop, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_2k2k_rects), + .sd_rects = imx230_2k2k_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2672, + .height = 2008, + .min_llp = 6024, + .min_fll = 2108, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_2k2k), + .mode_regs = imx230_mode_2k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_2672x1504_rects), + .sd_rects = imx230_2672x1504_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2672, + .height = 1504, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_2672x1504), + .mode_regs = imx230_mode_2672x1504, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_1940x1092_rects), + .sd_rects = imx230_1940x1092_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 22, + .width = 1940, + .height = 1092, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_1940x1092), + .mode_regs = imx230_mode_1940x1092, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_1440_rects), + .sd_rects = imx230_1440_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2560, + .height = 1440, + .min_llp = 6024, + .min_fll = 1530, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_1440), + .mode_regs = imx230_mode_1440, + }, + { + .sd_rects_items = ARRAY_SIZE(imx230_720_rects), + .sd_rects = imx230_720_rects, + .binn_hor = 4, + .binn_vert = 4, + .scale_m = 1, + .width = 1296, + .height = 736, + .min_llp = 6024, + .min_fll = 826, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx230_mode_720), + .mode_regs = imx230_mode_720, + }, +}; + +static struct crl_sensor_subdev_config imx230_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx230 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx230 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx230 pixel array", + }, +}; + +static struct crl_sensor_limits imx230_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 5344, + .y_addr_max = 4016, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx230_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx230_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx230_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx230_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGRBG14_1X14, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, + { + .code = MEDIA_BUS_FMT_SRGGB14_1X14, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, + { + .code = MEDIA_BUS_FMT_SBGGR14_1X14, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, + { + .code = MEDIA_BUS_FMT_SGBRG14_1X14, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 14, + .regs = imx230_data_fmt_width14, + }, +}; + +static const char * const imx132_hdr_types[] = { + "HDR Off", + "HDR Type1", + "HDR Type2", + "HDRC Type3", +}; + +static struct crl_v4l2_ctrl imx230_vl42_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 448, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_ana_gain_global_regs), + .regs = imx230_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_exposure_regs), + .regs = imx230_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_h_flip_regs), + .regs = imx230_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_v_flip_regs), + .regs = imx230_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_fll_regs), + .regs = imx230_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 6024, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_llp_regs), + .regs = imx230_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 256, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_dig_gain_regs), + .regs = imx230_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_STREAMING, + .ctrl_id = CRL_CID_SENSOR_THERMAL_DATA, + .name = "Sensor Thermal Data", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_thermal_regs), + .regs = imx230_thermal_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, /* Cannot be set when streaming? */ + .ctrl_id = CRL_CID_IMX230_HDR_ET_RATIO, + .name = "imx230 HDR ET Ratio", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = 16, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx230_hdr_et_ratio_regs), + .regs = imx230_hdr_et_ratio_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = CRL_CID_IMX230_HDR_MODE, + .name = "imx230 HDR mode", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.v4l2_menu_items.menu = imx132_hdr_types, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx132_hdr_types), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = ARRAY_SIZE(imx230_hdr_dep_controls), + .dep_ctrls = imx230_hdr_dep_controls, + .v4l2_type = V4L2_CTRL_TYPE_MENU, + .crl_ctrl_dep_reg_list = ARRAY_SIZE(imx230_hdr_types_regs), + .dep_regs = imx230_hdr_types_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_PDAF, + .name = "CRL_CID_SENSOR_PDAF", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = NULL, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + .crl_ctrl_dep_reg_list = ARRAY_SIZE(imx230_pdaf_ctrl_regs), + .dep_regs = imx230_pdaf_ctrl_regs, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx230_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2500000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1100000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VAF", + .val = 3000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10700, + }, +}; + +static struct crl_nvm_blob imx230_nvm_blobs[] = { + { 0x54, 0x00, 0x100 }, + { 0x55, 0x00, 0x100 }, + { 0x56, 0x00, 0x021 }, +}; + +static struct crl_arithmetic_ops imx230_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx230_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 4, + }, +}; + +static struct crl_frame_desc imx230_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx230_frame_desc_width_ops), + .ops = imx230_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx230_frame_desc_height_ops), + .ops = imx230_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +struct crl_sensor_configuration imx230_crl_configuration = { + + + .power_items = ARRAY_SIZE(imx230_power_items), + .power_entities = imx230_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx230_powerup_regset), + .powerup_regs = imx230_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx230_sensor_detect_regset), + .id_regs = imx230_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx230_sensor_subdevs), + .subdevs = imx230_sensor_subdevs, + + .sensor_limits = &imx230_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx230_pll_configurations), + .pll_configs = imx230_pll_configurations, + + .modes_items = ARRAY_SIZE(imx230_modes), + .modes = imx230_modes, + .fail_safe_mode_index = 3, + + .streamon_regs_items = ARRAY_SIZE(imx230_streamon_regs), + .streamon_regs = imx230_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx230_streamoff_regs), + .streamoff_regs = imx230_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx230_vl42_ctrls), + .v4l2_ctrl_bank = imx230_vl42_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx230_crl_csi_data_fmt), + .csi_fmts = imx230_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx230_flip_configurations), + .flip_data = imx230_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(imx230_nvm_blobs), + .crl_nvm_info.nvm_config = imx230_nvm_blobs, + + .frame_desc_entries = ARRAY_SIZE(imx230_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx230_frame_desc, + + .msr_file_name = "00imx230.bxt_rvp.drvb", +}; + +#endif /* __CRLMODULE_imx230_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx274_configuration.h b/drivers/media/i2c/crlmodule/crl_imx274_configuration.h new file mode 100644 index 000000000000..6ec84fb42a32 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx274_configuration.h @@ -0,0 +1,1272 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Yuning Pu + * + */ + +#ifndef __CRLMODULE_IMX274_CONFIGURATION_H_ +#define __CRLMODULE_IMX274_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX274_REG_STANDBY 0x3000 /* STBLOGIC STBMIPI STBDV */ + +#define IMX274_HMAX 65535 +#define IMX274_VMAX 1048575 +#define IMX274_MAX_SHS1 65535 +#define IMX274_MAX_SHS2 65535 +#define IMX274_MAX_RHS1 65535 + +/* imx274 mode standby cancel sequence */ +static struct crl_register_write_rep imx274_powerup_standby[] = { + {IMX274_REG_STANDBY, CRL_REG_LEN_08BIT, 0x12}, +}; + +/* 1440Mbps for imx274 4K 30fps 1080p 60fps */ +static struct crl_register_write_rep imx274_pll_1440mbps[] = { + {0x3120, CRL_REG_LEN_08BIT, 0xF0}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x02}, + {0x3129, CRL_REG_LEN_08BIT, 0x9C}, + {0x312A, CRL_REG_LEN_08BIT, 0x02}, + {0x312D, CRL_REG_LEN_08BIT, 0x02}, + {0x310B, CRL_REG_LEN_08BIT, 0x00}, /* PLL standby */ + {0x304C, CRL_REG_LEN_08BIT, 0x00}, /* PLSTMG01 */ + {0x304D, CRL_REG_LEN_08BIT, 0x03}, + {0x331C, CRL_REG_LEN_08BIT, 0x1A}, + {0x331D, CRL_REG_LEN_08BIT, 0x00}, + {0x3502, CRL_REG_LEN_08BIT, 0x02}, + {0x3529, CRL_REG_LEN_08BIT, 0x0E}, + {0x352A, CRL_REG_LEN_08BIT, 0x0E}, + {0x352B, CRL_REG_LEN_08BIT, 0x0E}, + {0x3538, CRL_REG_LEN_08BIT, 0x0E}, + {0x3539, CRL_REG_LEN_08BIT, 0x0E}, + {0x3553, CRL_REG_LEN_08BIT, 0x00}, + {0x357D, CRL_REG_LEN_08BIT, 0x05}, + {0x357F, CRL_REG_LEN_08BIT, 0x05}, + {0x3581, CRL_REG_LEN_08BIT, 0x04}, + {0x3583, CRL_REG_LEN_08BIT, 0x76}, + {0x3587, CRL_REG_LEN_08BIT, 0x01}, + {0x35BB, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BC, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BD, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BE, CRL_REG_LEN_08BIT, 0x0E}, + {0x35BF, CRL_REG_LEN_08BIT, 0x0E}, + {0x366E, CRL_REG_LEN_08BIT, 0x00}, + {0x366F, CRL_REG_LEN_08BIT, 0x00}, + {0x3670, CRL_REG_LEN_08BIT, 0x00}, + {0x3671, CRL_REG_LEN_08BIT, 0x00}, /* PLSTMG01 */ + {0x30EE, CRL_REG_LEN_08BIT, 0x01}, + {0x3304, CRL_REG_LEN_08BIT, 0x32}, /* For Mipi */ + {0x3305, CRL_REG_LEN_08BIT, 0x00}, + {0x3306, CRL_REG_LEN_08BIT, 0x32}, + {0x3307, CRL_REG_LEN_08BIT, 0x00}, + {0x3590, CRL_REG_LEN_08BIT, 0x32}, + {0x3591, CRL_REG_LEN_08BIT, 0x00}, + {0x3686, CRL_REG_LEN_08BIT, 0x32}, + {0x3687, CRL_REG_LEN_08BIT, 0x00}, +}; + +static struct crl_register_write_rep imx274_3864_2202_RAW12_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x00}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0xAA}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x08}, + {0x3132, CRL_REG_LEN_08BIT, 0x9A}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x08}, + {0x3004, CRL_REG_LEN_08BIT, 0x01}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x07}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x02}, + {0x3A41, CRL_REG_LEN_08BIT, 0x10}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0xFF}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x01}, + {0x3344, CRL_REG_LEN_08BIT, 0xFF}, + {0x3345, CRL_REG_LEN_08BIT, 0x01}, + {0x3528, CRL_REG_LEN_08BIT, 0x0F}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x18}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x0F}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x00}, + {0x3556, CRL_REG_LEN_08BIT, 0x00}, + {0x3557, CRL_REG_LEN_08BIT, 0x00}, + {0x3558, CRL_REG_LEN_08BIT, 0x00}, + {0x3559, CRL_REG_LEN_08BIT, 0x1F}, + {0x355A, CRL_REG_LEN_08BIT, 0x1F}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0F}, + {0x366A, CRL_REG_LEN_08BIT, 0x00}, + {0x366B, CRL_REG_LEN_08BIT, 0x00}, + {0x366C, CRL_REG_LEN_08BIT, 0x00}, + {0x366D, CRL_REG_LEN_08BIT, 0x00}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x07}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_3864_2174_RAW10_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x01}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x86}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x08}, + {0x3132, CRL_REG_LEN_08BIT, 0x7E}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x08}, + {0x3004, CRL_REG_LEN_08BIT, 0x01}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x02}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x16}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x18}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x0F}, + {0x3554, CRL_REG_LEN_08BIT, 0x1F}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_3868_4536_RAW10_DOL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x01}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x86}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x08}, + {0x3132, CRL_REG_LEN_08BIT, 0x8E}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x08}, + {0x3004, CRL_REG_LEN_08BIT, 0x06}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x02}, + {0x3A41, CRL_REG_LEN_08BIT, 0x00}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x16}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x1C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x0F}, + {0x3554, CRL_REG_LEN_08BIT, 0x1F}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + /* DOL mode settings */ + {0x3019, CRL_REG_LEN_08BIT, 0x01}, /* DOLMODE,DOLSCDEN,HINFOEN */ + {0x3041, CRL_REG_LEN_08BIT, 0x31}, /* DOLSET1 */ + {0x3042, CRL_REG_LEN_08BIT, 0x04}, /* HCYCLE */ + {0x3043, CRL_REG_LEN_08BIT, 0x01}, + {0x30E9, CRL_REG_LEN_08BIT, 0x01}, /* DOLSET2 */ +}; + +static struct crl_register_write_rep imx274_1932_1094_RAW10_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x02}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x4E}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x04}, + {0x3132, CRL_REG_LEN_08BIT, 0x46}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x04}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x21}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x11}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x1A}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x8C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x07}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_1932_1094_RAW12_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x02}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x4E}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x04}, + {0x3132, CRL_REG_LEN_08BIT, 0x46}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x04}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x27}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x11}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0xFF}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x01}, + {0x3344, CRL_REG_LEN_08BIT, 0xFF}, + {0x3345, CRL_REG_LEN_08BIT, 0x01}, + {0x3528, CRL_REG_LEN_08BIT, 0x0F}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x8C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x07}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x00}, + {0x3556, CRL_REG_LEN_08BIT, 0x00}, + {0x3557, CRL_REG_LEN_08BIT, 0x00}, + {0x3558, CRL_REG_LEN_08BIT, 0x00}, + {0x3559, CRL_REG_LEN_08BIT, 0x1F}, + {0x355A, CRL_REG_LEN_08BIT, 0x1F}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0F}, + {0x366A, CRL_REG_LEN_08BIT, 0x00}, + {0x366B, CRL_REG_LEN_08BIT, 0x00}, + {0x366C, CRL_REG_LEN_08BIT, 0x00}, + {0x366D, CRL_REG_LEN_08BIT, 0x00}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x07}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_1936_2376_RAW10_DOL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x02}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0x4E}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x04}, + {0x3132, CRL_REG_LEN_08BIT, 0x54}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x04}, + {0x3004, CRL_REG_LEN_08BIT, 0x07}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x21}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x11}, + {0x3A41, CRL_REG_LEN_08BIT, 0x08}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x1A}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x90}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x07}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x1A}, + {0x366C, CRL_REG_LEN_08BIT, 0x19}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + /* DOL mode settings */ + {0x3019, CRL_REG_LEN_08BIT, 0x01}, /* DOLMODE,DOLSCDEN,HINFOEN */ + {0x3041, CRL_REG_LEN_08BIT, 0x31}, /* DOLSET1 */ + {0x3042, CRL_REG_LEN_08BIT, 0x04}, /* HCYCLE */ + {0x3043, CRL_REG_LEN_08BIT, 0x01}, + {0x30E9, CRL_REG_LEN_08BIT, 0x01}, /* DOLSET2 */ +}; + +static struct crl_register_write_rep imx274_1288_738_RAW10_NORMAL[] = { + {0x30E2, CRL_REG_LEN_08BIT, 0x03}, /* VCUTMODE */ + {0x3130, CRL_REG_LEN_08BIT, 0xE2}, /* WRITE_VSIZE */ + {0x3131, CRL_REG_LEN_08BIT, 0x02}, + {0x3132, CRL_REG_LEN_08BIT, 0xDE}, /* Y_OUT_SIZE */ + {0x3133, CRL_REG_LEN_08BIT, 0x02}, + {0x3004, CRL_REG_LEN_08BIT, 0x03}, /* MDSEL */ + {0x3005, CRL_REG_LEN_08BIT, 0x31}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x09}, + {0x3A41, CRL_REG_LEN_08BIT, 0x04}, /* MDSEL5 */ + {0x3342, CRL_REG_LEN_08BIT, 0x0A}, /* MDPLS01 */ + {0x3343, CRL_REG_LEN_08BIT, 0x00}, + {0x3344, CRL_REG_LEN_08BIT, 0x1B}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3528, CRL_REG_LEN_08BIT, 0x0E}, /* MDPLS03 */ + {0x3A54, CRL_REG_LEN_08BIT, 0x8C}, /* Metadata Size */ + {0x3A55, CRL_REG_LEN_08BIT, 0x00}, + {0x3554, CRL_REG_LEN_08BIT, 0x00}, + {0x3555, CRL_REG_LEN_08BIT, 0x01}, + {0x3556, CRL_REG_LEN_08BIT, 0x01}, + {0x3557, CRL_REG_LEN_08BIT, 0x01}, + {0x3558, CRL_REG_LEN_08BIT, 0x01}, + {0x3559, CRL_REG_LEN_08BIT, 0x00}, + {0x355A, CRL_REG_LEN_08BIT, 0x00}, + {0x35BA, CRL_REG_LEN_08BIT, 0x0E}, + {0x366A, CRL_REG_LEN_08BIT, 0x1B}, + {0x366B, CRL_REG_LEN_08BIT, 0x19}, + {0x366C, CRL_REG_LEN_08BIT, 0x17}, + {0x366D, CRL_REG_LEN_08BIT, 0x17}, + {0x33A6, CRL_REG_LEN_08BIT, 0x01}, + {0x306B, CRL_REG_LEN_08BIT, 0x05}, /* MDPLS17 */ + {0x3019, CRL_REG_LEN_08BIT, 0x00}, /* Disable DOL */ +}; + +static struct crl_register_write_rep imx274_streamon_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + {IMX274_REG_STANDBY, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x02}, + {0x00, CRL_REG_LEN_DELAY, 7, 0x00}, /* Add a 7ms delay */ + {0x30F4, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x02}, +}; + +static struct crl_register_write_rep imx274_streamoff_regs[] = { + {0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + {IMX274_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {0x303E, CRL_REG_LEN_08BIT, 0x02}, + {0x00, CRL_REG_LEN_DELAY, 7, 0x00}, /* Add a delay */ + {0x30F4, CRL_REG_LEN_08BIT, 0x01}, + {0x3018, CRL_REG_LEN_08BIT, 0x02}, +}; + +static struct crl_arithmetic_ops imx274_rshift8_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx274_rshift16_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + } +}; + +static struct crl_arithmetic_ops imx274_nan_gain_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x07, + } +}; + +/* imx274 use register PGC[10:0] 300A 300B to indicate analog gain */ +static struct crl_dynamic_register_access imx274_ana_gain_global_regs[] = { + { + .address = 0x300A, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + }, + { + .address = 0x300B, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_nan_gain_ops), + .ops = imx274_nan_gain_ops, + }, +}; + +static struct crl_dynamic_register_access imx274_dig_gain_regs[] = { + { + .address = 0x3012, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xf, + }, +}; + +/* shr = fll - exposure */ +static struct crl_arithmetic_ops imx274_shr_lsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + } +}; + +static struct crl_arithmetic_ops imx274_shr_msb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_dynamic_register_access imx274_shr_regs[] = { + { + .address = 0x300C, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_shr_lsb_ops), + .ops = imx274_shr_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x300D, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_shr_msb_ops), + .ops = imx274_shr_msb_ops, + .mask = 0xff, + }, +}; + +/* Short exposure for DOL */ +static struct crl_dynamic_register_access imx274_shs1_regs[] = { + { + .address = 0x302E, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x302F, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +/* Long exposure for DOL */ +static struct crl_dynamic_register_access imx274_shs2_regs[] = { + { + .address = 0x3030, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3031, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access imx274_rhs1_regs[] = { + { + .address = 0x3032, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3033, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access imx274_fll_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x30F8, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x30F9, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, + { + .address = 0x30FA, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift16_ops), + .ops = imx274_rshift16_ops, + .mask = 0xf, + }, +}; + +static struct crl_dynamic_register_access imx274_llp_regs[] = { + { + .address = 0x30F6, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x30F7, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx274_rshift8_ops), + .ops = imx274_rshift8_ops, + .mask = 0xff, + }, +}; + +static struct crl_sensor_detect_config imx274_sensor_detect_regset[] = { + { + .reg = { 0x30F8, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x30F9, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx274_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 720000000, /* 1440000000/2 */ + .bitsperpixel = 10, + .pixel_rate_csi = 72000000, + .pixel_rate_pa = 72000000, /* 72MHz */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx274_pll_1440mbps), + .pll_regs = imx274_pll_1440mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 720000000, /* 1440000000/2 */ + .bitsperpixel = 12, + .pixel_rate_csi = 72000000, + .pixel_rate_pa = 72000000, /* 72MHz */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx274_pll_1440mbps), + .pll_regs = imx274_pll_1440mbps, + } +}; + +static struct crl_subdev_rect_rep imx274_3864_2202_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3864, + .out_rect.height = 2202, + } +}; + +static struct crl_subdev_rect_rep imx274_3864_2174_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3864, + .out_rect.height = 2174, + } +}; + +/* DOL pixel array includes 4 pixel sync code each line */ +static struct crl_subdev_rect_rep imx274_3868_4536_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + } +}; + +static struct crl_subdev_rect_rep imx274_1932_1094_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1932, + .out_rect.height = 1094, + } +}; + +/* DOL pixel array includes 4 pixel sync code each line */ +static struct crl_subdev_rect_rep imx274_1936_2376_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1936, + .out_rect.height = 2376, + } +}; + +static struct crl_subdev_rect_rep imx274_1288_738_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3868, + .out_rect.height = 4536, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3868, + .in_rect.height = 4536, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1288, + .out_rect.height = 738, + } +}; + +static struct crl_mode_rep imx274_modes[] = { + { + /* mode 0 12bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_3864_2202_rects), + .sd_rects = imx274_3864_2202_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 3864, + .height = 2202, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(imx274_3864_2202_RAW12_NORMAL), + .mode_regs = imx274_3864_2202_RAW12_NORMAL, + }, + { + /* mode 1 10bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_3864_2174_rects), + .sd_rects = imx274_3864_2174_rects, + .binn_hor = 1, + .binn_vert = 2, + .scale_m = 1, + .width = 3864, + .height = 2174, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(imx274_3864_2174_RAW10_NORMAL), + .mode_regs = imx274_3864_2174_RAW10_NORMAL, + }, + { + /* mode 1 DOL 10bit per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_3868_4536_rects), + .sd_rects = imx274_3868_4536_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3868, + .height = 4536, /* 2*(2160+22+VBP) */ + .min_llp = 1052, /* 041Ch */ + .min_fll = 2281, /* 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = + ARRAY_SIZE(imx274_3868_4536_RAW10_DOL), + .mode_regs = imx274_3868_4536_RAW10_DOL, + }, + { + /* mode 3 10bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1932_1094_rects), + .sd_rects = imx274_1932_1094_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 1932, + .height = 1094, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1932_1094_RAW10_NORMAL), + .mode_regs = imx274_1932_1094_RAW10_NORMAL, + }, + { + /* mode 3 12bit all pixel scan per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1932_1094_rects), + .sd_rects = imx274_1932_1094_rects, + .binn_hor = 2, + .binn_vert = 4, + .scale_m = 1, + .width = 1932, + .height = 1094, + .min_llp = 493, /* 01EDh */ + .min_fll = 4868, /* default 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1932_1094_RAW12_NORMAL), + .mode_regs = imx274_1932_1094_RAW12_NORMAL, + }, + { + /* mode 3 DOL bit10 per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1936_2376_rects), + .sd_rects = imx274_1936_2376_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1936, + .height = 2376, + .min_llp = 1052, /* 041Ch */ + .min_fll = 2281, /* 30fps */ + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1936_2376_RAW10_DOL), + .mode_regs = imx274_1936_2376_RAW10_DOL, + }, + { + /* mode 5 bit10 per datasheet */ + .sd_rects_items = ARRAY_SIZE(imx274_1288_738_rects), + .sd_rects = imx274_1288_738_rects, + .binn_hor = 3, + .binn_vert = 6, + .scale_m = 1, + .width = 1288, + .height = 738, + .min_llp = 260, + .min_fll = 2310, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE( + imx274_1288_738_RAW10_NORMAL), + .mode_regs = imx274_1288_738_RAW10_NORMAL, + }, +}; + +struct crl_sensor_subdev_config imx274_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx274 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx274 pixel array", + } +}; + +static struct crl_sensor_limits imx274_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 3868, /* pixel area length and width */ + .y_addr_max = 4536, + .min_frame_length_lines = 1111, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 260, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data imx274_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +static struct crl_csi_data_fmt imx274_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, /* default order */ + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, /* default order */ + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + } +}; + +static struct crl_v4l2_ctrl imx274_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1111, + .data.std_data.max = IMX274_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 1111, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_fll_regs), + .regs = imx274_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 260, + .data.std_data.max = IMX274_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 260, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_llp_regs), + .regs = imx274_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 6, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_dig_gain_regs), + .regs = imx274_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0x7A5, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_ana_gain_global_regs), + .regs = imx274_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 4, + .data.std_data.max = IMX274_MAX_SHS2, + .data.std_data.step = 1, + .data.std_data.def = 0x400, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_shr_regs), + .regs = imx274_shr_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "CRL_CID_EXPOSURE_SHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 4, + .data.std_data.max = IMX274_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x06, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_shs1_regs), + .regs = imx274_shs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .name = "CRL_CID_EXPOSURE_SHS2", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 10, + .data.std_data.max = IMX274_MAX_SHS2, + .data.std_data.step = 1, + .data.std_data.def = 0x2d, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_shs2_regs), + .regs = imx274_shs2_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_RHS1, + .name = "CRL_CID_EXPOSURE_RHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6, + .data.std_data.max = IMX274_MAX_RHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x56, /* Fixed to 86 by default */ + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx274_rhs1_regs), + .regs = imx274_rhs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_MODE, + .name = "CRL_CID_SENSOR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = ARRAY_SIZE(imx274_modes) - 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops imx274_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx274_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx274_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx274_frame_desc_width_ops), + .ops = imx274_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx274_frame_desc_height_ops), + .ops = imx274_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +static struct crl_power_seq_entity imx274_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration imx274_crl_configuration = { + + .power_items = ARRAY_SIZE(imx274_power_items), + .power_entities = imx274_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx274_powerup_standby), + .powerup_regs = imx274_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx274_sensor_detect_regset), + .id_regs = imx274_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx274_sensor_subdevs), + .subdevs = imx274_sensor_subdevs, + + .sensor_limits = &imx274_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx274_pll_configurations), + .pll_configs = imx274_pll_configurations, + + .modes_items = ARRAY_SIZE(imx274_modes), + .modes = imx274_modes, + + .streamon_regs_items = ARRAY_SIZE(imx274_streamon_regs), + .streamon_regs = imx274_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx274_streamoff_regs), + .streamoff_regs = imx274_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx274_v4l2_ctrls), + .v4l2_ctrl_bank = imx274_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx274_crl_csi_data_fmt), + .csi_fmts = imx274_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx274_flip_configurations), + .flip_data = imx274_flip_configurations, + + .frame_desc_entries = ARRAY_SIZE(imx274_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx274_frame_desc, + +}; + +#endif /* __CRLMODULE_IMX274_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx290_configuration.h b/drivers/media/i2c/crlmodule/crl_imx290_configuration.h new file mode 100644 index 000000000000..6a3561bea27f --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx290_configuration.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Yuning Pu + * + */ + +#ifndef __CRLMODULE_IMX290_CONFIGURATION_H_ +#define __CRLMODULE_IMX290_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX290_REG_STANDBY 0x3000 +#define IMX290_REG_XMSTA 0x3002 + +#define IMX290_HMAX 65535 +#define IMX290_VMAX 131071 +#define IMX290_MAX_SHS1 (IMX290_VMAX - 2) + +static struct crl_register_write_rep imx290_pll_891mbps[] = { + {0x3405, CRL_REG_LEN_08BIT, 0x00}, /* repetition */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x00}, /* FRSEL FDG_SEL */ + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, + {0x3415, CRL_REG_LEN_08BIT, 0x00}, + {0x3016, CRL_REG_LEN_08BIT, 0x09}, /* changed */ + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3446, CRL_REG_LEN_08BIT, 0x77}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x67}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x344A, CRL_REG_LEN_08BIT, 0x47}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x37}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x3F}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0xFF}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x3F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x37}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, +}; + +/* 445Mbps for imx290 1080p 30fps */ +static struct crl_register_write_rep imx290_pll_445mbps[] = { + {0x3405, CRL_REG_LEN_08BIT, 0x20}, /* repetition */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* FRSEL FDG_SEL */ + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, + {0x3016, CRL_REG_LEN_08BIT, 0x09}, /* changed */ + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3446, CRL_REG_LEN_08BIT, 0x47}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x1F}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x344A, CRL_REG_LEN_08BIT, 0x17}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x0F}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x17}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0x47}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x0F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x0F}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, +}; + +static struct crl_register_write_rep imx290_fmt_raw10[] = { + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, /* BLKLEVEL */ + {0x3129, CRL_REG_LEN_08BIT, 0x1D}, /* ADBIT1 */ + {0x3441, CRL_REG_LEN_08BIT, 0x0A}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0A}, + {0x3046, CRL_REG_LEN_08BIT, 0x00}, /* ODBIT OPORTSEL */ + {0x317C, CRL_REG_LEN_08BIT, 0x12}, /* ADBIT2 */ + {0x31EC, CRL_REG_LEN_08BIT, 0x37}, /* ADBIT3 */ +}; + +static struct crl_register_write_rep imx290_fmt_raw12[] = { + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, /* BLKLEVEL */ + {0x3129, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT1 */ + {0x3441, CRL_REG_LEN_08BIT, 0x0C}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0C}, + {0x3046, CRL_REG_LEN_08BIT, 0x01}, /* ODBIT OPORTSEL */ + {0x317C, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT2 */ + {0x31EC, CRL_REG_LEN_08BIT, 0x0E}, /* ADBIT3 */ +}; + +static struct crl_register_write_rep imx290_powerup_standby[] = { + {IMX290_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, + {IMX290_REG_XMSTA, CRL_REG_LEN_08BIT, 0x01}, +}; + +/* Horizontal dumpy added 1097(1094+3) */ +static struct crl_register_write_rep imx290_1948_1096_37MHZ_CROPPING[] = { + /*TODO need a test if necessary to open XMSTA*/ + {0x3000, CRL_REG_LEN_08BIT, 0x01}, /* reset to standby mode */ + {0x3002, CRL_REG_LEN_08BIT, 0x01}, /* default:reset slave mode */ + {0x3005, CRL_REG_LEN_08BIT, 0x01}, /* ADBIT */ + {0x3405, CRL_REG_LEN_08BIT, 0x20}, /* repetition */ + {0x3007, CRL_REG_LEN_08BIT, 0x04}, /* H/V verse and WINMODE */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x02}, /* FRSEL FDG_SEL */ + {0x300A, CRL_REG_LEN_08BIT, 0xF0}, /* BLKLEVEL */ + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, /* OPB_SIZE_V */ + {0x3016, CRL_REG_LEN_08BIT, 0x09}, + {0x3018, CRL_REG_LEN_08BIT, 0x65}, /* VMAX */ + {0x3019, CRL_REG_LEN_08BIT, 0x04}, + {0x3418, CRL_REG_LEN_08BIT, 0x49}, /* Y_OUT_SIZE */ + {0x3419, CRL_REG_LEN_08BIT, 0x04}, + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x301C, CRL_REG_LEN_08BIT, 0x30}, /* HMAX */ + {0x301D, CRL_REG_LEN_08BIT, 0x11}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x3129, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT1 */ + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3441, CRL_REG_LEN_08BIT, 0x0C}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0C}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3046, CRL_REG_LEN_08BIT, 0x01}, /* ODBIT OPORTSEL */ + {0x3446, CRL_REG_LEN_08BIT, 0x47}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x1F}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x304B, CRL_REG_LEN_08BIT, 0x0A}, /* XH/VS OUTSEL */ + {0x344A, CRL_REG_LEN_08BIT, 0x17}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x0F}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x17}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0x47}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x0F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x0F}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x3472, CRL_REG_LEN_08BIT, 0x9C}, /* X_OUT_SIZE */ + {0x3473, CRL_REG_LEN_08BIT, 0x07}, + {0x317C, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT2 */ + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, + {0x31EC, CRL_REG_LEN_08BIT, 0x0E}, /* ADBIT3 */ + /* WINDOW CROPPING */ + {0x303C, CRL_REG_LEN_08BIT, 0x01}, + {0x303D, CRL_REG_LEN_08BIT, 0x00}, + {0x303E, CRL_REG_LEN_08BIT, 0x48}, + {0x303F, CRL_REG_LEN_08BIT, 0x04}, +}; + +static struct crl_register_write_rep imx290_1952_3435_37MHZ_CROPPING[] = { + /*TODO need a test if necessary to open XMSTA*/ + {0x3000, CRL_REG_LEN_08BIT, 0x01}, /* reset to standby mode */ + {0x3002, CRL_REG_LEN_08BIT, 0x01}, /* default:reset to slave mode */ + {0x3005, CRL_REG_LEN_08BIT, 0x00}, /* ADBIT */ + {0x3405, CRL_REG_LEN_08BIT, 0x00}, /* repetition */ + {0x3106, CRL_REG_LEN_08BIT, 0x33}, + {0x3007, CRL_REG_LEN_08BIT, 0x00}, /* H/V verse and WINMODE */ + {0x3407, CRL_REG_LEN_08BIT, 0x03}, /* physical lane num(fixed) */ + {0x3009, CRL_REG_LEN_08BIT, 0x00}, /* FRSEL FDG_SEL */ + {0x300A, CRL_REG_LEN_08BIT, 0x3C}, /* BLKLEVEL */ + {0x300C, CRL_REG_LEN_08BIT, 0x21}, + {0x300F, CRL_REG_LEN_08BIT, 0x00}, /* fixed setting */ + {0x3010, CRL_REG_LEN_08BIT, 0x21}, + {0x3012, CRL_REG_LEN_08BIT, 0x64}, + {0x3414, CRL_REG_LEN_08BIT, 0x0A}, /* OPB_SIZE_V */ + {0x3415, CRL_REG_LEN_08BIT, 0x00}, + {0x3016, CRL_REG_LEN_08BIT, 0x09}, + {0x3018, CRL_REG_LEN_08BIT, 0x65}, /* VMAX */ + {0x3019, CRL_REG_LEN_08BIT, 0x04}, + {0x3418, CRL_REG_LEN_08BIT, 0x55}, /* Y_OUT_SIZE */ + {0x3419, CRL_REG_LEN_08BIT, 0x11}, + {0x3119, CRL_REG_LEN_08BIT, 0x9E}, + {0x301C, CRL_REG_LEN_08BIT, 0x4C}, /* HMAX */ + {0x301D, CRL_REG_LEN_08BIT, 0x04}, + {0x311C, CRL_REG_LEN_08BIT, 0x1E}, + {0x311E, CRL_REG_LEN_08BIT, 0x08}, + {0x3020, CRL_REG_LEN_08BIT, 0x04}, /* SHS1 */ + {0x3021, CRL_REG_LEN_08BIT, 0x00}, + {0x3024, CRL_REG_LEN_08BIT, 0x89}, /* SHS2 */ + {0x3025, CRL_REG_LEN_08BIT, 0x00}, + {0x3028, CRL_REG_LEN_08BIT, 0x93}, /* SHS3 */ + {0x3029, CRL_REG_LEN_08BIT, 0x01}, + {0x3128, CRL_REG_LEN_08BIT, 0x05}, + {0x3129, CRL_REG_LEN_08BIT, 0x1D}, /* ADBIT1 */ + {0x332C, CRL_REG_LEN_08BIT, 0xD3}, + {0x332D, CRL_REG_LEN_08BIT, 0x10}, + {0x332E, CRL_REG_LEN_08BIT, 0x0D}, + {0x3030, CRL_REG_LEN_08BIT, 0x85}, /* RHS1 */ + {0x3031, CRL_REG_LEN_08BIT, 0x00}, + {0x3034, CRL_REG_LEN_08BIT, 0x92}, /* RHS2 */ + {0x3035, CRL_REG_LEN_08BIT, 0x00}, + {0x313D, CRL_REG_LEN_08BIT, 0x83}, + {0x3441, CRL_REG_LEN_08BIT, 0x0A}, /* CSI_DT_FMT */ + {0x3442, CRL_REG_LEN_08BIT, 0x0A}, + {0x3443, CRL_REG_LEN_08BIT, 0x03}, /* csi_lane_mode(fixed) */ + {0x3444, CRL_REG_LEN_08BIT, 0x20}, /* extck_freq */ + {0x3045, CRL_REG_LEN_08BIT, 0x05}, /* DOL sp */ + {0x3445, CRL_REG_LEN_08BIT, 0x25}, + {0x3046, CRL_REG_LEN_08BIT, 0x00}, /* ODBIT OPORTSEL */ + {0x3446, CRL_REG_LEN_08BIT, 0x77}, /* tclkpost */ + {0x3447, CRL_REG_LEN_08BIT, 0x00}, + {0x3448, CRL_REG_LEN_08BIT, 0x67}, /* thszero */ + {0x3449, CRL_REG_LEN_08BIT, 0x00}, + {0x304B, CRL_REG_LEN_08BIT, 0x0A}, /* XH/VS OUTSEL */ + {0x344A, CRL_REG_LEN_08BIT, 0x47}, /* thsprepare */ + {0x344B, CRL_REG_LEN_08BIT, 0x00}, + {0x344C, CRL_REG_LEN_08BIT, 0x37}, /* thstrail */ + {0x344D, CRL_REG_LEN_08BIT, 0x00}, + {0x344E, CRL_REG_LEN_08BIT, 0x3F}, /* thstrail */ + {0x344F, CRL_REG_LEN_08BIT, 0x00}, + {0x3150, CRL_REG_LEN_08BIT, 0x03}, + {0x3450, CRL_REG_LEN_08BIT, 0xFF}, /* tclkzero */ + {0x3451, CRL_REG_LEN_08BIT, 0x00}, + {0x3452, CRL_REG_LEN_08BIT, 0x3F}, /* tclkprepare */ + {0x3453, CRL_REG_LEN_08BIT, 0x00}, + {0x3454, CRL_REG_LEN_08BIT, 0x37}, /* tlpx */ + {0x3455, CRL_REG_LEN_08BIT, 0x00}, + {0x3358, CRL_REG_LEN_08BIT, 0x06}, /* fixed setting */ + {0x3359, CRL_REG_LEN_08BIT, 0xE1}, + {0x335A, CRL_REG_LEN_08BIT, 0x11}, + {0x305C, CRL_REG_LEN_08BIT, 0x18}, /* incksel1 */ + {0x305D, CRL_REG_LEN_08BIT, 0x03}, /* incksel2 */ + {0x305E, CRL_REG_LEN_08BIT, 0x20}, /* incksel3 */ + {0x315E, CRL_REG_LEN_08BIT, 0x1A}, /* incksel5 */ + {0x305F, CRL_REG_LEN_08BIT, 0x01}, /* incksel4 */ + {0x3360, CRL_REG_LEN_08BIT, 0x1E}, + {0x3361, CRL_REG_LEN_08BIT, 0x61}, + {0x3362, CRL_REG_LEN_08BIT, 0x10}, + {0x3164, CRL_REG_LEN_08BIT, 0x1A}, /* incksel6 */ + {0x3070, CRL_REG_LEN_08BIT, 0x02}, + {0x3071, CRL_REG_LEN_08BIT, 0x11}, + {0x3472, CRL_REG_LEN_08BIT, 0xA0}, /* X_OUT_SIZE */ + {0x3473, CRL_REG_LEN_08BIT, 0x07}, + {0x347B, CRL_REG_LEN_08BIT, 0x23}, + {0x317C, CRL_REG_LEN_08BIT, 0x12}, /* ADBIT2 */ + {0x317E, CRL_REG_LEN_08BIT, 0x00}, + {0x3480, CRL_REG_LEN_08BIT, 0x49}, /* inclsel7 */ + {0x309B, CRL_REG_LEN_08BIT, 0x10}, + {0x309C, CRL_REG_LEN_08BIT, 0x22}, + {0x30A2, CRL_REG_LEN_08BIT, 0x02}, + {0x30A6, CRL_REG_LEN_08BIT, 0x20}, + {0x30A8, CRL_REG_LEN_08BIT, 0x20}, + {0x30AA, CRL_REG_LEN_08BIT, 0x20}, + {0x30AC, CRL_REG_LEN_08BIT, 0x20}, + {0x30B0, CRL_REG_LEN_08BIT, 0x43}, + {0x33B0, CRL_REG_LEN_08BIT, 0x50}, + {0x33B2, CRL_REG_LEN_08BIT, 0x1A}, + {0x33B3, CRL_REG_LEN_08BIT, 0x04}, + {0x32B8, CRL_REG_LEN_08BIT, 0x50}, + {0x32B9, CRL_REG_LEN_08BIT, 0x10}, + {0x32BA, CRL_REG_LEN_08BIT, 0x00}, + {0x32BB, CRL_REG_LEN_08BIT, 0x04}, + {0x32C8, CRL_REG_LEN_08BIT, 0x50}, + {0x32C9, CRL_REG_LEN_08BIT, 0x10}, + {0x32CA, CRL_REG_LEN_08BIT, 0x00}, + {0x32CB, CRL_REG_LEN_08BIT, 0x04}, + {0x31EC, CRL_REG_LEN_08BIT, 0x37}, /* ADBIT3 */ +}; + +static struct crl_register_write_rep imx290_streamon_regs[] = { + {IMX290_REG_STANDBY, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 50, 0x00}, /* Add a 50ms delay */ + {IMX290_REG_XMSTA, CRL_REG_LEN_08BIT, 0x00}, +}; + +static struct crl_register_write_rep imx290_streamoff_regs[] = { + {IMX290_REG_STANDBY, CRL_REG_LEN_08BIT, 0x01}, + {IMX290_REG_XMSTA, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_arithmetic_ops imx290_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + } +}; + +static struct crl_dynamic_register_access imx290_h_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx290_hflip_ops), + .ops = imx290_hflip_ops, + .mask = 0x2, + } +}; + +static struct crl_dynamic_register_access imx290_v_flip_regs[] = { + { + .address = 0x3007, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + } +}; + +static struct crl_dynamic_register_access imx290_ana_gain_global_regs[] = { + { + .address = 0x3014, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* shs1[17:0] = fll - exposure - 1 */ +static struct crl_arithmetic_ops imx290_shs1_lsb_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops imx290_shs1_msb0_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx290_shs1_msb1_ops[] = { + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + .operand.entity_val = V4L2_CID_FRAME_LENGTH_LINES, + }, + { + .op = CRL_SUBTRACT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 16, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_val = 0x03, + } +}; + +static struct crl_dynamic_register_access imx290_shs1_regs[] = { + { + .address = 0x3020, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_shs1_lsb_ops), + .ops = imx290_shs1_lsb_ops, + .mask = 0xff, + }, + { + .address = 0x3021, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_shs1_msb0_ops), + .ops = imx290_shs1_msb0_ops, + .mask = 0xff, + }, + { + .address = 0x3022, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_shs1_msb1_ops), + .ops = imx290_shs1_msb1_ops, + .mask = 0xff, + }, +}; + +static struct crl_arithmetic_ops imx290_fll_msb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_arithmetic_ops imx290_fll_hsb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + } +}; + +static struct crl_dynamic_register_access imx290_fll_regs[] = { + /* + * Use 8bits access since 24bits or 32bits access will fail + * TODO: root cause the 24bits and 32bits access issues + */ + { + .address = 0x3018, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, + { + .address = 0x3019, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_fll_msb_ops), + .ops = imx290_fll_msb_ops, + .mask = 0xff, + }, + { + .address = 0x301A, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(imx290_fll_hsb_ops), + .ops = imx290_fll_hsb_ops, + .mask = 0x3, + }, +}; + +static struct crl_dynamic_register_access imx290_llp_regs[] = { + { + .address = 0x301C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_sensor_detect_config imx290_sensor_detect_regset[] = { + { + .reg = { 0x348F, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x348E, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx290_pll_configurations[] = { + /* + * IMX290 supports only 37.125MHz and 72.25MHz input clocks. + * IPU4 supports up to 38.4MHz, however the sensor module we use + * has its own oscillator. + * The "input_clk" value is specified here for the reference. + */ + { + .input_clk = 37125000, + .op_sys_clk = 222750000,/* 445500000/2 */ + .bitsperpixel = 12, + .pixel_rate_csi = 148500000, + .pixel_rate_pa = 148500000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx290_pll_445mbps), + .pll_regs = imx290_pll_445mbps, + }, + { + .input_clk = 37125000, + .op_sys_clk = 445500000,/* 891000000/2 */ + .bitsperpixel = 10, + .pixel_rate_csi = 356400000, + .pixel_rate_pa = 356400000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx290_pll_891mbps), + .pll_regs = imx290_pll_891mbps, + } +}; + +/* Temporary use a single rect range */ +static struct crl_subdev_rect_rep imx290_1948_1096_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 3435, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1948, + .out_rect.height = 1096, + } +}; + +static struct crl_subdev_rect_rep imx290_1952_3435_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 3435, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1952, + .in_rect.height = 3435, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 3435, + } +}; + +static struct crl_mode_rep imx290_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx290_1948_1096_rects), + .sd_rects = imx290_1948_1096_rects, + .binn_hor = 1, + .binn_vert = 3, + .scale_m = 1, + .width = 1948, + .height = 1096, + .min_llp = 2220, + .min_fll = 1112, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx290_1948_1096_37MHZ_CROPPING), + .mode_regs = imx290_1948_1096_37MHZ_CROPPING, + }, + { + .sd_rects_items = ARRAY_SIZE(imx290_1952_3435_rects), + .sd_rects = imx290_1952_3435_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1952, + .height = 3435, + .min_llp = 2220, + .min_fll = 1112, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx290_1952_3435_37MHZ_CROPPING), + .mode_regs = imx290_1952_3435_37MHZ_CROPPING, + }, +}; + +struct crl_sensor_subdev_config imx290_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx290 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx290 pixel array", + } +}; + +static struct crl_sensor_limits imx290_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1952, + .y_addr_max = 3435, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data imx290_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +static struct crl_csi_data_fmt imx290_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx290_fmt_raw10), + .regs = imx290_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, /*default pixel order*/ + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx290_fmt_raw12), + .regs = imx290_fmt_raw12, + } +}; + +static struct crl_v4l2_ctrl imx290_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_h_flip_regs), + .regs = imx290_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_v_flip_regs), + .regs = imx290_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 240, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_ana_gain_global_regs), + .regs = imx290_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = IMX290_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x264, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_shs1_regs), + .regs = imx290_shs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 720, + .data.std_data.max = IMX290_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 1097, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_fll_regs), + .regs = imx290_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1948, + .data.std_data.max = IMX290_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 1948, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx290_llp_regs), + .regs = imx290_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops imx290_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx290_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx290_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx290_frame_desc_width_ops), + .ops = imx290_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx290_frame_desc_height_ops), + .ops = imx290_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +static struct crl_power_seq_entity imx290_power_items[] = { + /* If your sensor uses IPU reference clock, make sure it's enabled here. */ + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration imx290_crl_configuration = { + + .power_items = ARRAY_SIZE(imx290_power_items), + .power_entities = imx290_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx290_powerup_standby), + .powerup_regs = imx290_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx290_sensor_detect_regset), + .id_regs = imx290_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx290_sensor_subdevs), + .subdevs = imx290_sensor_subdevs, + + .sensor_limits = &imx290_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx290_pll_configurations), + .pll_configs = imx290_pll_configurations, + + .modes_items = ARRAY_SIZE(imx290_modes), + .modes = imx290_modes, + + .streamon_regs_items = ARRAY_SIZE(imx290_streamon_regs), + .streamon_regs = imx290_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx290_streamoff_regs), + .streamoff_regs = imx290_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx290_v4l2_ctrls), + .v4l2_ctrl_bank = imx290_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx290_crl_csi_data_fmt), + .csi_fmts = imx290_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx290_flip_configurations), + .flip_data = imx290_flip_configurations, + + .frame_desc_entries = ARRAY_SIZE(imx290_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx290_frame_desc, +}; + +#endif /* __CRLMODULE_IMX274_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx318_configuration.h b/drivers/media/i2c/crlmodule/crl_imx318_configuration.h new file mode 100644 index 000000000000..631a6d10400f --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx318_configuration.h @@ -0,0 +1,1050 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Jouni Ukkonen + * + */ +#ifndef __CRLMODULE_IMX318_CONFIGURATION_H_ +#define __CRLMODULE_IMX318_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static const struct crl_register_write_rep imx318_pll_1164mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x84 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12 }, + { 0x0821, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static const struct crl_register_write_rep imx318_pll_8_1164mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x84 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x12 }, + { 0x0821, CRL_REG_LEN_08BIT, 0x30 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static const struct crl_register_write_rep imx318_pll_1920mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x0A }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x1E }, + { 0x0821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static const struct crl_register_write_rep imx318_pll_8_1920mbps[] = { + { 0x0136, CRL_REG_LEN_08BIT, 0x18 }, /* 24 Mhz */ + { 0x0137, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0111, CRL_REG_LEN_08BIT, 0x02 }, /* 2 = DPHY, 3 = CPHY */ + { 0x0112, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0113, CRL_REG_LEN_08BIT, 0x0A }, + { 0x0114, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x05 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0307, CRL_REG_LEN_08BIT, 0x4D }, + { 0x0309, CRL_REG_LEN_08BIT, 0x08 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x030E, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030F, CRL_REG_LEN_08BIT, 0x40 }, + { 0x0820, CRL_REG_LEN_08BIT, 0x1E }, + { 0x0821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0822, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0823, CRL_REG_LEN_08BIT, 0x00 }, +}; + + +static const struct crl_register_write_rep imx318_powerup_regset[] = { + { 0x3067, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x1B }, + { 0x46C2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4877, CRL_REG_LEN_08BIT, 0x11 }, + { 0x487B, CRL_REG_LEN_08BIT, 0x4D }, + { 0x487F, CRL_REG_LEN_08BIT, 0x3B }, + { 0x4883, CRL_REG_LEN_08BIT, 0xB4 }, + { 0x4C6F, CRL_REG_LEN_08BIT, 0x5E }, + { 0x5113, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x5115, CRL_REG_LEN_08BIT, 0xF6 }, + { 0x5125, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x5127, CRL_REG_LEN_08BIT, 0xF8 }, + { 0x51CF, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x51E9, CRL_REG_LEN_08BIT, 0xF4 }, + { 0x5483, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5485, CRL_REG_LEN_08BIT, 0x7C }, + { 0x5495, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5497, CRL_REG_LEN_08BIT, 0x7F }, + { 0x5515, CRL_REG_LEN_08BIT, 0xC3 }, + { 0x5517, CRL_REG_LEN_08BIT, 0xC7 }, + { 0x552B, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5535, CRL_REG_LEN_08BIT, 0x7A }, + { 0x5A35, CRL_REG_LEN_08BIT, 0x1B }, + { 0x5C13, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5D89, CRL_REG_LEN_08BIT, 0xB1 }, + { 0x5D8B, CRL_REG_LEN_08BIT, 0x2C }, + { 0x5D8D, CRL_REG_LEN_08BIT, 0x61 }, + { 0x5D8F, CRL_REG_LEN_08BIT, 0xE1 }, + { 0x5D91, CRL_REG_LEN_08BIT, 0x4D }, + { 0x5D93, CRL_REG_LEN_08BIT, 0xB4 }, + { 0x5D95, CRL_REG_LEN_08BIT, 0x41 }, + { 0x5D97, CRL_REG_LEN_08BIT, 0x96 }, + { 0x5D99, CRL_REG_LEN_08BIT, 0x37 }, + { 0x5D9B, CRL_REG_LEN_08BIT, 0x81 }, + { 0x5D9D, CRL_REG_LEN_08BIT, 0x31 }, + { 0x5D9F, CRL_REG_LEN_08BIT, 0x71 }, + { 0x5DA1, CRL_REG_LEN_08BIT, 0x2B }, + { 0x5DA3, CRL_REG_LEN_08BIT, 0x64 }, + { 0x5DA5, CRL_REG_LEN_08BIT, 0x27 }, + { 0x5DA7, CRL_REG_LEN_08BIT, 0x5A }, + { 0x6009, CRL_REG_LEN_08BIT, 0x03 }, + { 0x613A, CRL_REG_LEN_08BIT, 0x05 }, + { 0x613C, CRL_REG_LEN_08BIT, 0x23 }, + { 0x6142, CRL_REG_LEN_08BIT, 0x02 }, + { 0x6143, CRL_REG_LEN_08BIT, 0x62 }, + { 0x6144, CRL_REG_LEN_08BIT, 0x89 }, + { 0x6145, CRL_REG_LEN_08BIT, 0x0A }, + { 0x6146, CRL_REG_LEN_08BIT, 0x24 }, + { 0x6147, CRL_REG_LEN_08BIT, 0x28 }, + { 0x6148, CRL_REG_LEN_08BIT, 0x90 }, + { 0x6149, CRL_REG_LEN_08BIT, 0xA2 }, + { 0x614A, CRL_REG_LEN_08BIT, 0x40 }, + { 0x614B, CRL_REG_LEN_08BIT, 0x8A }, + { 0x614C, CRL_REG_LEN_08BIT, 0x01 }, + { 0x614D, CRL_REG_LEN_08BIT, 0x12 }, + { 0x614E, CRL_REG_LEN_08BIT, 0x2C }, + { 0x614F, CRL_REG_LEN_08BIT, 0x98 }, + { 0x6150, CRL_REG_LEN_08BIT, 0xA2 }, + { 0x615D, CRL_REG_LEN_08BIT, 0x37 }, + { 0x615E, CRL_REG_LEN_08BIT, 0xE6 }, + { 0x615F, CRL_REG_LEN_08BIT, 0x4B }, + { 0x616C, CRL_REG_LEN_08BIT, 0x41 }, + { 0x616D, CRL_REG_LEN_08BIT, 0x05 }, + { 0x616E, CRL_REG_LEN_08BIT, 0x48 }, + { 0x616F, CRL_REG_LEN_08BIT, 0xC5 }, + { 0x6174, CRL_REG_LEN_08BIT, 0xB9 }, + { 0x6175, CRL_REG_LEN_08BIT, 0x42 }, + { 0x6176, CRL_REG_LEN_08BIT, 0x44 }, + { 0x6177, CRL_REG_LEN_08BIT, 0xC3 }, + { 0x6178, CRL_REG_LEN_08BIT, 0x81 }, + { 0x6179, CRL_REG_LEN_08BIT, 0x78 }, + { 0x6182, CRL_REG_LEN_08BIT, 0x15 }, + { 0x6A5F, CRL_REG_LEN_08BIT, 0x03 }, + { 0x9302, CRL_REG_LEN_08BIT, 0xFF }, +}; + +static const struct crl_register_write_rep imx318_mode_full[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x01 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x10 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x15 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x10 }, + { 0x040F, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034C, CRL_REG_LEN_08BIT, 0x15 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x70 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x10 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x50 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x50 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x10 }, +}; + + +static const struct crl_register_write_rep imx318_mode_uhd[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x68 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x3A }, + { 0x040C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x040D, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x0B }, + { 0x040F, CRL_REG_LEN_08BIT, 0x9C }, + { 0x034C, CRL_REG_LEN_08BIT, 0x0F }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x70 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x33 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x33 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x16 }, +}; + +static const struct crl_register_write_rep imx318_mode_1080[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x034B, CRL_REG_LEN_08BIT, 0x0F }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x22 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x16 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x34 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x1C }, + { 0x040C, CRL_REG_LEN_08BIT, 0x0A }, + { 0x040D, CRL_REG_LEN_08BIT, 0x50 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xCE }, + { 0x034C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x80 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x034F, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x33 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x33 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x16 }, +}; + +static const struct crl_register_write_rep imx318_mode_720[] = { + { 0x0344, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0345, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0346, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0347, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0348, CRL_REG_LEN_08BIT, 0x15 }, + { 0x0349, CRL_REG_LEN_08BIT, 0x6F }, + { 0x034A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x034B, CRL_REG_LEN_08BIT, 0x13 }, + { 0x0220, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0221, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0222, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0381, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0383, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0385, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0387, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0900, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0901, CRL_REG_LEN_08BIT, 0x44 }, + { 0x0902, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x65 }, + { 0x3011, CRL_REG_LEN_08BIT, 0x11 }, + { 0x30FC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x30FD, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3194, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x31A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4711, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6669, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0401, CRL_REG_LEN_08BIT, 0x02 }, + { 0x0404, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0405, CRL_REG_LEN_08BIT, 0x11 }, + { 0x0408, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0409, CRL_REG_LEN_08BIT, 0x04 }, + { 0x040A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x040B, CRL_REG_LEN_08BIT, 0x02 }, + { 0x040C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x040D, CRL_REG_LEN_08BIT, 0x52 }, + { 0x040E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x040F, CRL_REG_LEN_08BIT, 0xFE }, + { 0x034C, CRL_REG_LEN_08BIT, 0x05 }, + { 0x034D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x034E, CRL_REG_LEN_08BIT, 0x02 }, + { 0x034F, CRL_REG_LEN_08BIT, 0xD0 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3033, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3035, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3037, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x00 }, + { 0x303B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x306E, CRL_REG_LEN_08BIT, 0x0D }, + { 0x306F, CRL_REG_LEN_08BIT, 0x56 }, + { 0x6636, CRL_REG_LEN_08BIT, 0x00 }, + { 0x6637, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3066, CRL_REG_LEN_08BIT, 0x00 }, + { 0x7B63, CRL_REG_LEN_08BIT, 0x00 }, + { 0x56FB, CRL_REG_LEN_08BIT, 0x33 }, + { 0x56FF, CRL_REG_LEN_08BIT, 0x33 }, + { 0x9323, CRL_REG_LEN_08BIT, 0x16 }, +}; + +static struct crl_register_write_rep imx318_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep imx318_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep imx318_data_fmt_width10[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0a0a } +}; + +static struct crl_register_write_rep imx318_data_fmt_width8[] = { + { 0x0112, CRL_REG_LEN_16BIT, 0x0808 } +}; + +static struct crl_arithmetic_ops imx318_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx318_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx318_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx318_vflip_ops), + .ops = imx318_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access imx318_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx318_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx318_fll_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx318_llp_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config imx318_sensor_detect_regset[] = { + { + .reg = { 0x0019, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0018, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 5, + }, + { + .reg = { 0x0016, CRL_REG_LEN_16BIT, 0x0000ffff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration imx318_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 582000000, + .bitsperpixel = 10, + .pixel_rate_csi = 465600000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_1164mbps), + .pll_regs = imx318_pll_1164mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 582000000, + .bitsperpixel = 8, + .pixel_rate_csi = 465600000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_8_1164mbps), + .pll_regs = imx318_pll_8_1164mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 960000000, + .bitsperpixel = 10, + .pixel_rate_csi = 768000000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_1920mbps), + .pll_regs = imx318_pll_1920mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 960000000, + .bitsperpixel = 8, + .pixel_rate_csi = 960000000, + .pixel_rate_pa = 799206000, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx318_pll_8_1920mbps), + .pll_regs = imx318_pll_8_1920mbps, + }, + +}; + +static struct crl_subdev_rect_rep imx318_full_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 0, 5488, 4112 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 0, 5488, 4112 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 0, 5488, 4112 }, + }, +}; + + +static struct crl_subdev_rect_rep imx318_uhd_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 512, 5280, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5280, 3088 }, + .out_rect = { 0, 0, 5280, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 5280, 3088 }, + .out_rect = { 0, 0, 3840, 2160 }, + }, +}; + + +static struct crl_subdev_rect_rep imx318_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 512, 5488, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5488, 3088 }, + .out_rect = { 0, 0, 2744, 1544 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2744, 1544 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, +}; + +static struct crl_subdev_rect_rep imx318_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 5488, 4112 }, + .out_rect = { 0, 516, 5488, 3088 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 5488, 3088 }, + .out_rect = { 0, 0, 1372, 772 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1372, 772 }, + .out_rect = { 0, 0, 1280, 720 }, + }, +}; + +static struct crl_mode_rep imx318_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(imx318_full_rects), + .sd_rects = imx318_full_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 5488, + .height = 4112, + .min_llp = 6224, + .min_fll = 4280, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_full), + .mode_regs = imx318_mode_full, + }, + { + .sd_rects_items = ARRAY_SIZE(imx318_uhd_rects), + .sd_rects = imx318_uhd_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 22, + .width = 3840, + .height = 2160, + .min_llp = 6224, + .min_fll = 3622, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_uhd), + .mode_regs = imx318_mode_uhd, + }, + { + .sd_rects_items = ARRAY_SIZE(imx318_1080_rects), + .sd_rects = imx318_1080_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 22, + .width = 1920, + .height = 1080, + .min_llp = 6224, + .min_fll = 1600, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_1080), + .mode_regs = imx318_mode_1080, + }, + { + .sd_rects_items = ARRAY_SIZE(imx318_720_rects), + .sd_rects = imx318_720_rects, + .binn_hor = 4, + .binn_vert = 4, + .scale_m = 17, + .width = 1280, + .height = 720, + .min_llp = 6224, + .min_fll = 904, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx318_mode_720), + .mode_regs = imx318_mode_720, + }, +}; + +static struct crl_sensor_subdev_config imx318_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "imx318 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx318 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx318 pixel array", + }, +}; + +static struct crl_sensor_limits imx318_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 5488, + .y_addr_max = 4112, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6224, /*TBD*/ + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data imx318_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt imx318_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = imx318_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 8, + .regs = imx318_data_fmt_width8, + }, +}; + +static const s64 imx318_op_sys_clock[] = { 582000000, + 582000000, + 960000000, + 960000000, }; + +static struct crl_v4l2_ctrl imx318_vl42_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(imx318_pll_configurations) - 1, + .data.v4l2_int_menu.menu = imx318_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 480, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_ana_gain_global_regs), + .regs = imx318_ana_gain_global_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_exposure_regs), + .regs = imx318_exposure_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx318_h_flip_regs), + .regs = imx318_h_flip_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_v_flip_regs), + .regs = imx318_v_flip_regs, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_fll_regs), + .regs = imx318_fll_regs, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 6024, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = ARRAY_SIZE(imx318_llp_regs), + .regs = imx318_llp_regs, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx318_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1050000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VAF", + .val = 3000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10700, + }, +}; + + +struct crl_sensor_configuration imx318_crl_configuration = { + + .power_items = ARRAY_SIZE(imx318_power_items), + .power_entities = imx318_power_items, + + .powerup_regs_items = ARRAY_SIZE(imx318_powerup_regset), + .powerup_regs = imx318_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx318_sensor_detect_regset), + .id_regs = imx318_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx318_sensor_subdevs), + .subdevs = imx318_sensor_subdevs, + + .sensor_limits = &imx318_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx318_pll_configurations), + .pll_configs = imx318_pll_configurations, + + .modes_items = ARRAY_SIZE(imx318_modes), + .modes = imx318_modes, + .fail_safe_mode_index = 0, + + .streamon_regs_items = ARRAY_SIZE(imx318_streamon_regs), + .streamon_regs = imx318_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx318_streamoff_regs), + .streamoff_regs = imx318_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx318_vl42_ctrls), + .v4l2_ctrl_bank = imx318_vl42_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx318_crl_csi_data_fmt), + .csi_fmts = imx318_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx318_flip_configurations), + .flip_data = imx318_flip_configurations, + +}; + +#endif /* __CRLMODULE_imx318_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx477_common_regs.h b/drivers/media/i2c/crlmodule/crl_imx477_common_regs.h new file mode 100644 index 000000000000..eebb884df8c8 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx477_common_regs.h @@ -0,0 +1,1096 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Alexei Zavjalov + * + */ + +#ifndef __CRLMODULE_IMX477_COMMON_REGS_H_ +#define __CRLMODULE_IMX477_COMMON_REGS_H_ + +#include "crlmodule-sensor-ds.h" + +#define IMX477_CAPTURE_MODE_MAX 10 + +static struct crl_dynamic_register_access imx477_fll_regs[] = { + { + .address = 0x0340, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx477_llp_regs[] = { + { + .address = 0x0342, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access imx477_exposure_regs[] = { + { + .address = 0x0202, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access imx477_ana_gain_global_regs[] = { + { + .address = 0x0204, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xfff, + }, +}; + +static struct crl_dynamic_register_access imx477_wdr_switch_regs[] = { +}; + +static struct crl_arithmetic_ops imx477_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access imx477_h_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access imx477_v_flip_regs[] = { + { + .address = 0x0101, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(imx477_vflip_ops), + .ops = imx477_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access imx477_test_pattern_regs[] = { + { + .address = 0x0600, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* 1st exposure for DOL */ +static struct crl_dynamic_register_access imx477_shs1_regs[] = { + { + .address = 0x00EA, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* 2nd exposure for DOL */ +static struct crl_dynamic_register_access imx477_shs2_regs[] = { + { + .address = 0x00EC, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* 3rd exposure for DOL */ +static struct crl_dynamic_register_access imx477_shs3_regs[] = { + { + .address = 0x00EE, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Line number of 2nd frame readout start from XVS for DOL */ +static struct crl_dynamic_register_access imx477_rhs1_regs[] = { + { + .address = 0x00E6, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* Line number of 3rd frame readout start from XVS for DOL */ +static struct crl_dynamic_register_access imx477_rhs2_regs[] = { + { + .address = 0x00E8, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set analog gain for 1st HDR frame */ +static struct crl_dynamic_register_access imx477_ana_gain_1st_regs[] = { + { + .address = 0x00F0, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set analog gain for 2nd HDR frames */ +static struct crl_dynamic_register_access imx477_ana_gain_2nd_regs[] = { + { + .address = 0x00F2, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set analog gain for 3rd HDR frames */ +static struct crl_dynamic_register_access imx477_ana_gain_3rd_regs[] = { + { + .address = 0x00F4, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set digital gain for 1st HDR frames */ +static struct crl_dynamic_register_access imx477_dig_gain_1st_regs[] = { + { + .address = 0x00F6, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set digital gain for 2nd HDR frames */ +static struct crl_dynamic_register_access imx477_dig_gain_2nd_regs[] = { + { + .address = 0x00F8, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +/* set digital gain for 3rd HDR frames */ +static struct crl_dynamic_register_access imx477_dig_gain_3rd_regs[] = { + { + .address = 0x00FA, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static const char * const imx477_test_patterns[] = { + "Disabled", + "Solid Colour", + "Eight Vertical Color Bars", + "Fade to Grey Color Bars", + "PN9", +}; + +static struct crl_v4l2_ctrl imx477_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 720, + .data.std_data.max = 131071, + .data.std_data.step = 1, + .data.std_data.def = 8209, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_fll_regs), + .regs = imx477_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1280, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 14612, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_llp_regs), + .regs = imx477_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_HFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_h_flip_regs), + .regs = imx477_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_VFLIP", + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_v_flip_regs), + .regs = imx477_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + .data.v4l2_menu_items.menu = imx477_test_patterns, + .data.v4l2_menu_items.size = ARRAY_SIZE(imx477_test_patterns), + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_test_pattern_regs), + .regs = imx477_test_pattern_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_ANALOGUE_GAIN", + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_global_regs), + .regs = imx477_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .name = "V4L2_CID_EXPOSURE", + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 5500, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_exposure_regs), + .regs = imx477_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_wdr_switch_regs), + .regs = imx477_wdr_switch_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS1", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0X5500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_shs1_regs), + .regs = imx477_shs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS2", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0X500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_shs2_regs), + .regs = imx477_shs2_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .name = "CRL_CID_EXPOSURE_SHS3", + .data.std_data.min = 4, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0X1000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_shs3_regs), + .regs = imx477_shs3_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_MODE, + .name = "CRL_CID_SENSOR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = IMX477_CAPTURE_MODE_MAX - 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_RHS1, + .name = "CRL_CID_EXPOSURE_RHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x1000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_rhs1_regs), + .regs = imx477_rhs1_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_RHS2, + .name = "CRL_CID_EXPOSURE_RHS2", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0x1500, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_rhs2_regs), + .regs = imx477_rhs2_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_L, + .name = "CRL_CID_ANALOG_GAIN_L", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_1st_regs), + .regs = imx477_ana_gain_1st_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_S, + .name = "CRL_CID_ANALOG_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_2nd_regs), + .regs = imx477_ana_gain_2nd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_VS, + .name = "CRL_CID_ANALOG_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_ana_gain_3rd_regs), + .regs = imx477_ana_gain_3rd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_L, + .name = "CRL_CID_DIGITAL_GAIN_L", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_dig_gain_1st_regs), + .regs = imx477_dig_gain_1st_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_dig_gain_2nd_regs), + .regs = imx477_dig_gain_2nd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 0x978, + .data.std_data.step = 1, + .data.std_data.def = 64, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(imx477_dig_gain_3rd_regs), + .regs = imx477_dig_gain_3rd_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_register_write_rep imx477_streamon_regs[] = { + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, /* Delay 20ms */ +}; + +static struct crl_register_write_rep imx477_streamoff_regs[] = { + {0x0100, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, /* Delay 20ms */ +}; + +static struct crl_register_write_rep imx477_fmt_raw10[] = { + {0x0112, CRL_REG_LEN_08BIT, 0x0a}, /* FMT RAW10 */ + {0x0113, CRL_REG_LEN_08BIT, 0x0a}, + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FE, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FF, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ +}; + +static struct crl_register_write_rep imx477_fmt_raw12[] = { + {0x0112, CRL_REG_LEN_08BIT, 0x0c}, /* FMT RAW12 */ + {0x0113, CRL_REG_LEN_08BIT, 0x0c}, + {0x3F0D, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_csi_data_fmt imx477_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + .regs_items = ARRAY_SIZE(imx477_fmt_raw10), + .regs = imx477_fmt_raw10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = ARRAY_SIZE(imx477_fmt_raw12), + .regs = imx477_fmt_raw12, + }, +}; + +static struct crl_subdev_rect_rep imx477_4056_3040_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + } +}; + +static struct crl_subdev_rect_rep imx477_4056_2288_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 2288, + } +}; + +static struct crl_subdev_rect_rep imx477_2832_1632_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2832, + .out_rect.height = 1632, + } +}; + +static struct crl_subdev_rect_rep imx477_2028_1128_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2028, + .out_rect.height = 1128, + } +}; + +static struct crl_subdev_rect_rep imx477_1296_768_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1296, + .out_rect.height = 768, + } +}; + +static struct crl_subdev_rect_rep imx477_656_512_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4056, + .out_rect.height = 3040, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4056, + .in_rect.height = 3040, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 656, + .out_rect.height = 512, + } +}; + +static struct crl_register_write_rep imx477_pll_1200mbps[] = { + /* MIPI Settings */ + {0x0114, CRL_REG_LEN_08BIT, 0x01}, /* 2-lane Mode */ + + /* Clock Setting */ + {0x0301, CRL_REG_LEN_08BIT, 0x05}, /* The Pixel Clock Divider for IVTS */ + {0x0303, CRL_REG_LEN_08BIT, 0x02}, /* The System Clock Divider for IVTS */ + {0x0305, CRL_REG_LEN_08BIT, 0x03}, /* The pre-PLL Clock Divider for IVTS */ + {0x0306, CRL_REG_LEN_08BIT, 0x01}, /* The PLL multiplier for IVTS [10:8] */ + {0x0307, CRL_REG_LEN_08BIT, 0x48}, /* The PLL multiplier for IVTS [7:0] */ + {0x0309, CRL_REG_LEN_08BIT, 0x0A}, /* The Pixel Clock Divider for IOPS */ + {0x030B, CRL_REG_LEN_08BIT, 0x01}, /* The System Clock Divider for IOPS */ + {0x030D, CRL_REG_LEN_08BIT, 0x02}, /* The pre-PLL Clock Divider for IOPS */ + {0x030E, CRL_REG_LEN_08BIT, 0x00}, /* The PLL multiplier for IOPS [10:8] */ + {0x030F, CRL_REG_LEN_08BIT, 0x7D}, /* The PLL multiplier for IOPS [7:0] */ + {0x0310, CRL_REG_LEN_08BIT, 0x01}, /* PLL mode select: Dual Mode */ + {0x0820, CRL_REG_LEN_08BIT, 0x09}, /* Output Data Rate, Mbps [31:24] */ + {0x0821, CRL_REG_LEN_08BIT, 0x60}, /* Output Data Rate, Mbps [23:16] */ + {0x0822, CRL_REG_LEN_08BIT, 0x00}, /* Output Data Rate, Mbps [15:8] */ + {0x0823, CRL_REG_LEN_08BIT, 0x00}, /* Output Data Rate, Mbps [7:0] */ + + /* Global Timing Setting */ + {0x080A, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tclk) [9:8] */ + {0x080B, CRL_REG_LEN_08BIT, 0x87}, /* MIPI Global Timing (Tclk) [7:0] */ + {0x080C, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (ths_prepare) */ + {0x080D, CRL_REG_LEN_08BIT, 0x4F}, /* MIPI Global Timing (ths_prepare) */ + {0x080E, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (ths_zero_min) */ + {0x080F, CRL_REG_LEN_08BIT, 0x87}, /* MIPI Global Timing (ths_zero_min) */ + {0x0810, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (ths_trail) */ + {0x0811, CRL_REG_LEN_08BIT, 0x5F}, /* MIPI Global Timing (ths_trail) */ + {0x0812, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tclk_trail_min)*/ + {0x0813, CRL_REG_LEN_08BIT, 0x5F}, /* MIPI Global Timing (Tclk_trail_min)*/ + {0x0814, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tclk_prepare) */ + {0x0815, CRL_REG_LEN_08BIT, 0x4F}, /* MIPI Global Timing (Tclk_prepare) */ + {0x0816, CRL_REG_LEN_08BIT, 0x01}, /* MIPI Global Timing (Tclk_zero) */ + {0x0817, CRL_REG_LEN_08BIT, 0x3F}, /* MIPI Global Timing (Tclk_zero) */ + {0x0818, CRL_REG_LEN_08BIT, 0x00}, /* MIPI Global Timing (Tlpx) */ + {0x0819, CRL_REG_LEN_08BIT, 0x3F}, /* MIPI Global Timing (Tlpx) */ + {0xE04C, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xE04D, CRL_REG_LEN_08BIT, 0x87}, /* Undocumented */ + {0xE04E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xE04F, CRL_REG_LEN_08BIT, 0x1F}, /* Undocumented */ + + /* Output Data Select Setting */ + {0x3E20, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x3E37, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* PowerSave Setting */ + {0x3F50, CRL_REG_LEN_08BIT, 0x00}, /* Power save: Disable */ + {0x3F56, CRL_REG_LEN_08BIT, 0x01}, + {0x3F57, CRL_REG_LEN_08BIT, 0x4F}, +}; + +static struct crl_pll_configuration imx477_pll_configurations[] = { + { + .input_clk = 19200000, + .op_sys_clk = 600000000, /* 1200mbps / 2 */ + .bitsperpixel = 10, + .pixel_rate_csi = 240000000, + /* pixel_rate = (MIPICLK*2 * CSILANES)/10 */ + .pixel_rate_pa = 240000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx477_pll_1200mbps), + .pll_regs = imx477_pll_1200mbps, + }, + { + .input_clk = 19200000, + .op_sys_clk = 600000000, /* 1200mbps / 2 */ + .bitsperpixel = 12, + .pixel_rate_csi = 240000000, + /* pixel_rate = (MIPICLK*2 * CSILANES)/10 */ + .pixel_rate_pa = 240000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(imx477_pll_1200mbps), + .pll_regs = imx477_pll_1200mbps, + }, +}; + +static struct crl_sensor_subdev_config imx477_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "imx477 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "imx477 pixel array", + } +}; + +static struct crl_sensor_limits imx477_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4056, + .y_addr_max = 3040, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_sensor_detect_config imx477_sensor_detect_regset[] = { + { + .reg = { 0x0016, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x0017, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + } +}; + +static struct crl_register_write_rep imx477_powerup_standby[] = { + {0x0100, CRL_REG_LEN_08BIT, 0x00}, + {0x00, CRL_REG_LEN_DELAY, 20, 0x00}, /* Delay 20ms */ +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity imx477_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 19200000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 1, + }, +}; + +static struct crl_arithmetic_ops imx477_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops imx477_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc imx477_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_width_ops), + .ops = imx477_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_height_ops), + .ops = imx477_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_width_ops), + .ops = imx477_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_height_ops), + .ops = imx477_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 1, + .csi2_data_type.entity_val = 0x12, + }, + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_width_ops), + .ops = imx477_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(imx477_frame_desc_height_ops), + .ops = imx477_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 2, + .csi2_data_type.entity_val = 0x12, + }, +}; + +#endif /* __CRLMODULE_IMX477_COMMON_REGS_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx477_master_configuration.h b/drivers/media/i2c/crlmodule/crl_imx477_master_configuration.h new file mode 100644 index 000000000000..10be93b07215 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx477_master_configuration.h @@ -0,0 +1,1375 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Alexei Zavjalov + * + */ + +#ifndef __CRLMODULE_IMX477_MASTER_CONFIGURATION_H_ +#define __CRLMODULE_IMX477_MASTER_CONFIGURATION_H_ + +#include "crl_imx477_common_regs.h" + +static struct crl_register_write_rep imx477_onetime_init_regset_master[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, /* Software reset */ + + {0x3010, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_EN_2ND */ + {0x3011, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_ACKEN_2ND */ + + {0x3F0B, CRL_REG_LEN_08BIT, 0x01}, /* Multi camera mode: on */ + + {0x3041, CRL_REG_LEN_08BIT, 0x01}, /* Mode: Master */ + {0x3040, CRL_REG_LEN_08BIT, 0x01}, /* XVS pin: out */ + {0x4B81, CRL_REG_LEN_08BIT, 0x01}, /* Mode: Master */ + + {0x3042, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [15:8] */ + {0x3043, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [7:0] */ + {0x3044, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [15:8] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [7:0] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC thin down setting */ + + /* External Clock Setting */ + {0x0136, CRL_REG_LEN_08BIT, 0x13}, /* External clock freq (dec) [15:8] */ + {0x0137, CRL_REG_LEN_08BIT, 0x33}, /* External clock freq (dec) [7:0] */ + + /* Global Setting */ + {0x0808, CRL_REG_LEN_08BIT, 0x02}, /* MIPI Global Timing: Register Control */ + {0xE07A, CRL_REG_LEN_08BIT, 0x01}, + {0xE000, CRL_REG_LEN_08BIT, 0x00}, /* RUN/STOP of CSI2 during Frame Blanking: HS */ + {0x4AE9, CRL_REG_LEN_08BIT, 0x18}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x08}, + {0xF61C, CRL_REG_LEN_08BIT, 0x04}, + {0xF61E, CRL_REG_LEN_08BIT, 0x04}, + {0x4AE9, CRL_REG_LEN_08BIT, 0x21}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x80}, + {0x38A8, CRL_REG_LEN_08BIT, 0x1F}, + {0x38A9, CRL_REG_LEN_08BIT, 0xFF}, + {0x38AA, CRL_REG_LEN_08BIT, 0x1F}, + {0x38AB, CRL_REG_LEN_08BIT, 0xFF}, + {0x420B, CRL_REG_LEN_08BIT, 0x01}, + {0x55D4, CRL_REG_LEN_08BIT, 0x00}, + {0x55D5, CRL_REG_LEN_08BIT, 0x00}, + {0x55D6, CRL_REG_LEN_08BIT, 0x07}, + {0x55D7, CRL_REG_LEN_08BIT, 0xFF}, + {0x55E8, CRL_REG_LEN_08BIT, 0x07}, + {0x55E9, CRL_REG_LEN_08BIT, 0xFF}, + {0x55EA, CRL_REG_LEN_08BIT, 0x00}, + {0x55EB, CRL_REG_LEN_08BIT, 0x00}, + {0x574C, CRL_REG_LEN_08BIT, 0x07}, + {0x574D, CRL_REG_LEN_08BIT, 0xFF}, + {0x574E, CRL_REG_LEN_08BIT, 0x00}, + {0x574F, CRL_REG_LEN_08BIT, 0x00}, + {0x5754, CRL_REG_LEN_08BIT, 0x00}, + {0x5755, CRL_REG_LEN_08BIT, 0x00}, + {0x5756, CRL_REG_LEN_08BIT, 0x07}, + {0x5757, CRL_REG_LEN_08BIT, 0xFF}, + {0x5973, CRL_REG_LEN_08BIT, 0x04}, + {0x5974, CRL_REG_LEN_08BIT, 0x01}, + {0x5D13, CRL_REG_LEN_08BIT, 0xC3}, + {0x5D14, CRL_REG_LEN_08BIT, 0x58}, + {0x5D15, CRL_REG_LEN_08BIT, 0xA3}, + {0x5D16, CRL_REG_LEN_08BIT, 0x1D}, + {0x5D17, CRL_REG_LEN_08BIT, 0x65}, + {0x5D18, CRL_REG_LEN_08BIT, 0x8C}, + {0x5D1A, CRL_REG_LEN_08BIT, 0x06}, + {0x5D1B, CRL_REG_LEN_08BIT, 0xA9}, + {0x5D1C, CRL_REG_LEN_08BIT, 0x45}, + {0x5D1D, CRL_REG_LEN_08BIT, 0x3A}, + {0x5D1E, CRL_REG_LEN_08BIT, 0xAB}, + {0x5D1F, CRL_REG_LEN_08BIT, 0x15}, + {0x5D21, CRL_REG_LEN_08BIT, 0x0E}, + {0x5D22, CRL_REG_LEN_08BIT, 0x52}, + {0x5D23, CRL_REG_LEN_08BIT, 0xAA}, + {0x5D24, CRL_REG_LEN_08BIT, 0x7D}, + {0x5D25, CRL_REG_LEN_08BIT, 0x57}, + {0x5D26, CRL_REG_LEN_08BIT, 0xA8}, + {0x5D37, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D38, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D77, CRL_REG_LEN_08BIT, 0x7F}, + {0x7B7C, CRL_REG_LEN_08BIT, 0x00}, + {0x7B7D, CRL_REG_LEN_08BIT, 0x00}, + {0x8D1F, CRL_REG_LEN_08BIT, 0x00}, + {0x8D27, CRL_REG_LEN_08BIT, 0x00}, + {0x9004, CRL_REG_LEN_08BIT, 0x03}, + {0x9200, CRL_REG_LEN_08BIT, 0x50}, + {0x9201, CRL_REG_LEN_08BIT, 0x6C}, + {0x9202, CRL_REG_LEN_08BIT, 0x71}, + {0x9203, CRL_REG_LEN_08BIT, 0x00}, + {0x9204, CRL_REG_LEN_08BIT, 0x71}, + {0x9205, CRL_REG_LEN_08BIT, 0x01}, + {0x9371, CRL_REG_LEN_08BIT, 0x6A}, + {0x9373, CRL_REG_LEN_08BIT, 0x6A}, + {0x9375, CRL_REG_LEN_08BIT, 0x64}, + {0x990C, CRL_REG_LEN_08BIT, 0x00}, + {0x990D, CRL_REG_LEN_08BIT, 0x08}, + {0x9956, CRL_REG_LEN_08BIT, 0x8C}, + {0x9957, CRL_REG_LEN_08BIT, 0x64}, + {0x9958, CRL_REG_LEN_08BIT, 0x50}, + {0x9A48, CRL_REG_LEN_08BIT, 0x06}, + {0x9A49, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4A, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4B, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4C, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4D, CRL_REG_LEN_08BIT, 0x06}, + {0xA001, CRL_REG_LEN_08BIT, 0x0A}, + {0xA003, CRL_REG_LEN_08BIT, 0x0A}, + {0xA005, CRL_REG_LEN_08BIT, 0x0A}, + {0xA006, CRL_REG_LEN_08BIT, 0x01}, + {0xA007, CRL_REG_LEN_08BIT, 0xC0}, + {0xA009, CRL_REG_LEN_08BIT, 0xC0}, + + /* Image Tuning */ + {0x3D8A, CRL_REG_LEN_08BIT, 0x01}, + {0x7B3B, CRL_REG_LEN_08BIT, 0x01}, + {0x7B4C, CRL_REG_LEN_08BIT, 0x00}, + {0x9905, CRL_REG_LEN_08BIT, 0x00}, + {0x9907, CRL_REG_LEN_08BIT, 0x00}, + {0x9909, CRL_REG_LEN_08BIT, 0x00}, + {0x990B, CRL_REG_LEN_08BIT, 0x00}, + {0x9944, CRL_REG_LEN_08BIT, 0x3C}, + {0x9947, CRL_REG_LEN_08BIT, 0x3C}, + {0x994A, CRL_REG_LEN_08BIT, 0x8C}, + {0x994B, CRL_REG_LEN_08BIT, 0x50}, + {0x994C, CRL_REG_LEN_08BIT, 0x1B}, + {0x994D, CRL_REG_LEN_08BIT, 0x8C}, + {0x994E, CRL_REG_LEN_08BIT, 0x50}, + {0x994F, CRL_REG_LEN_08BIT, 0x1B}, + {0x9950, CRL_REG_LEN_08BIT, 0x8C}, + {0x9951, CRL_REG_LEN_08BIT, 0x1B}, + {0x9952, CRL_REG_LEN_08BIT, 0x0A}, + {0x9953, CRL_REG_LEN_08BIT, 0x8C}, + {0x9954, CRL_REG_LEN_08BIT, 0x1B}, + {0x9955, CRL_REG_LEN_08BIT, 0x0A}, + {0x9A13, CRL_REG_LEN_08BIT, 0x04}, + {0x9A14, CRL_REG_LEN_08BIT, 0x04}, + {0x9A19, CRL_REG_LEN_08BIT, 0x00}, + {0x9A1C, CRL_REG_LEN_08BIT, 0x04}, + {0x9A1D, CRL_REG_LEN_08BIT, 0x04}, + {0x9A26, CRL_REG_LEN_08BIT, 0x05}, + {0x9A27, CRL_REG_LEN_08BIT, 0x05}, + {0x9A2C, CRL_REG_LEN_08BIT, 0x01}, + {0x9A2D, CRL_REG_LEN_08BIT, 0x03}, + {0x9A2F, CRL_REG_LEN_08BIT, 0x05}, + {0x9A30, CRL_REG_LEN_08BIT, 0x05}, + {0x9A41, CRL_REG_LEN_08BIT, 0x00}, + {0x9A46, CRL_REG_LEN_08BIT, 0x00}, + {0x9A47, CRL_REG_LEN_08BIT, 0x00}, + {0x9C17, CRL_REG_LEN_08BIT, 0x35}, + {0x9C1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9C29, CRL_REG_LEN_08BIT, 0x50}, + {0x9C3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9C41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9C47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9C6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9C71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9C73, CRL_REG_LEN_08BIT, 0x32}, + {0x9C75, CRL_REG_LEN_08BIT, 0x04}, + {0x9C7D, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C83, CRL_REG_LEN_08BIT, 0x40}, + {0x9C94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C97, CRL_REG_LEN_08BIT, 0x00}, + {0x9C98, CRL_REG_LEN_08BIT, 0x00}, + {0x9C99, CRL_REG_LEN_08BIT, 0x00}, + {0x9C9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9CA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9CBD, CRL_REG_LEN_08BIT, 0x50}, + {0x9CBF, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC1, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC3, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC5, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC7, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9D17, CRL_REG_LEN_08BIT, 0x35}, + {0x9D1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9D29, CRL_REG_LEN_08BIT, 0x50}, + {0x9D3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9D41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9D47, CRL_REG_LEN_08BIT, 0x42}, + {0x9D4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9D71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9D73, CRL_REG_LEN_08BIT, 0x32}, + {0x9D75, CRL_REG_LEN_08BIT, 0x04}, + {0x9D7D, CRL_REG_LEN_08BIT, 0x42}, + {0x9D83, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D97, CRL_REG_LEN_08BIT, 0x00}, + {0x9D98, CRL_REG_LEN_08BIT, 0x00}, + {0x9D99, CRL_REG_LEN_08BIT, 0x00}, + {0x9D9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9D, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9E, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9F, CRL_REG_LEN_08BIT, 0x1F}, + {0x9DA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9DC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9E17, CRL_REG_LEN_08BIT, 0x35}, + {0x9E1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9E29, CRL_REG_LEN_08BIT, 0x50}, + {0x9E3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9E41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9E47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9E4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9E6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9E71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9E73, CRL_REG_LEN_08BIT, 0x32}, + {0x9E75, CRL_REG_LEN_08BIT, 0x04}, + {0x9E94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E97, CRL_REG_LEN_08BIT, 0x00}, + {0x9E98, CRL_REG_LEN_08BIT, 0x00}, + {0x9E99, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA6, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA7, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA8, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9EC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9F17, CRL_REG_LEN_08BIT, 0x35}, + {0x9F1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9F29, CRL_REG_LEN_08BIT, 0x50}, + {0x9F3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9F47, CRL_REG_LEN_08BIT, 0x42}, + {0x9F4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9F6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9F71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9F73, CRL_REG_LEN_08BIT, 0x32}, + {0x9F75, CRL_REG_LEN_08BIT, 0x04}, + {0x9F94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F97, CRL_REG_LEN_08BIT, 0x00}, + {0x9F98, CRL_REG_LEN_08BIT, 0x00}, + {0x9F99, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9A, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9C, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9D, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9E, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9F, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9FC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCD, CRL_REG_LEN_08BIT, 0x0A}, + {0xA14B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA151, CRL_REG_LEN_08BIT, 0x0C}, + {0xA153, CRL_REG_LEN_08BIT, 0x50}, + {0xA155, CRL_REG_LEN_08BIT, 0x02}, + {0xA157, CRL_REG_LEN_08BIT, 0x00}, + {0xA1AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA1B3, CRL_REG_LEN_08BIT, 0x0C}, + {0xA1B5, CRL_REG_LEN_08BIT, 0x50}, + {0xA1B9, CRL_REG_LEN_08BIT, 0x00}, + {0xA24B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA257, CRL_REG_LEN_08BIT, 0x00}, + {0xA2AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA2B9, CRL_REG_LEN_08BIT, 0x00}, + {0xB21F, CRL_REG_LEN_08BIT, 0x04}, + {0xB35C, CRL_REG_LEN_08BIT, 0x00}, + {0xB35E, CRL_REG_LEN_08BIT, 0x08}, +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_DOL_2f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR Enable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x01}, /* DOL Mode: 2 frames in DOL-HDR */ + /* virtual channel ID of visible line and embedded line of DOL 2nd frame */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_DOL_3f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR Enable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x02}, /* DOL Mode: 2 frames in DOL-HDR */ + /* virtual channel ID of visible line and embedded line of DOL 2nd frame */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, + /* virtual channel ID of visible line and embedded line of DOL 3rd frame */ + {0x3E11, CRL_REG_LEN_08BIT, 0x02}, + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_2288_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + /* (0,376) to (4055, 2664) */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x78}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x68}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xF0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x08}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xF0}, /* Y output size [7:0] */ +}; + + +static struct crl_register_write_rep imx477_2832_1632_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + /* scale factor 16/22, 3894x2244 to 2832x1632 */ + {0x0401, CRL_REG_LEN_08BIT, 0x02}, /* Scaling mode: Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x16}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x52}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x01}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x8E}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0x36}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xC4}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0B}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0x10}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x06}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x60}, /* Y output size [7:0] */ +}; + + +static struct crl_register_write_rep imx477_2028_1128_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x88}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x58}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x01}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x22}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x07}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xEC}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x04}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0x68}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x07}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xEC}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x04}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x68}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_1296_768_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + /* scale factor 16/50, 4050x2400 to 1296x768 */ + {0x0401, CRL_REG_LEN_08BIT, 0x02}, /* Scaling mode: Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x32}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x04}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x01}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x40}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD2}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x09}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0x60}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x05}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0x10}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x03}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x00}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_656_512_19MHZ_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + /* scale factor 16/95, 3895x3040 to 656x512 */ + {0x0401, CRL_REG_LEN_08BIT, 0x02}, /* Scaling mode: Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x5F}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x50}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0x37}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x02}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0x90}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x02}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0x00}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_2288_19MHZ_DOL_2f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + /* (0,376) to (4055, 2664) */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x78}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x68}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR enabled */ + {0x00E4, CRL_REG_LEN_08BIT, 0x01}, /* DOL Mode: DOL2 */ + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, /* VC ID of DOL 2nd frame */ + + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xF0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x08}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xF0}, /* Y output size [7:0] */ +}; + +static struct crl_register_write_rep imx477_4056_2288_19MHZ_DOL_3f_master[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + /* (0,376) to (4055, 2664) */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x01}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x78}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0A}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0x68}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x01}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x02}, /* DOL Mode: DOL3 */ + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FE, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FF, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x3E10, CRL_REG_LEN_08BIT, 0x01}, /* VC ID of DOL 2nd frame */ + {0x3E11, CRL_REG_LEN_08BIT, 0x02}, /* VC ID of DOL 3rd frame */ + + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x08}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xF0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x08}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xF0}, /* Y output size [7:0] */ +}; + +static struct crl_mode_rep imx477_modes_master[] = { + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_master), + .mode_regs = imx477_4056_3040_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_DOL_2f_master), + .mode_regs = imx477_4056_3040_19MHZ_DOL_2f_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_DOL_3f_master), + .mode_regs = imx477_4056_3040_19MHZ_DOL_3f_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_2288_rects), + .sd_rects = imx477_4056_2288_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 2288, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_2288_19MHZ_master), + .mode_regs = imx477_4056_2288_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_2832_1632_rects), + .sd_rects = imx477_2832_1632_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2832, + .height = 1632, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_2832_1632_19MHZ_master), + .mode_regs = imx477_2832_1632_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_2028_1128_rects), + .sd_rects = imx477_2028_1128_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2028, + .height = 1128, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_2028_1128_19MHZ_master), + .mode_regs = imx477_2028_1128_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_1296_768_rects), + .sd_rects = imx477_1296_768_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1296, + .height = 768, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_1296_768_19MHZ_master), + .mode_regs = imx477_1296_768_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_656_512_rects), + .sd_rects = imx477_656_512_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 656, + .height = 512, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_656_512_19MHZ_master), + .mode_regs = imx477_656_512_19MHZ_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_2288_rects), + .sd_rects = imx477_4056_2288_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 2288, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_2288_19MHZ_DOL_2f_master), + .mode_regs = imx477_4056_2288_19MHZ_DOL_2f_master, + }, + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_2288_rects), + .sd_rects = imx477_4056_2288_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 2288, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_2288_19MHZ_DOL_3f_master), + .mode_regs = imx477_4056_2288_19MHZ_DOL_3f_master, + }, +}; + +static struct crl_flip_data imx477_flip_configurations_master[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +struct crl_sensor_configuration imx477_master_crl_configuration = { + + .power_items = ARRAY_SIZE(imx477_power_items), + .power_entities = imx477_power_items, + + .onetime_init_regs_items = ARRAY_SIZE(imx477_onetime_init_regset_master), + .onetime_init_regs = imx477_onetime_init_regset_master, + + .powerup_regs_items = ARRAY_SIZE(imx477_powerup_standby), + .powerup_regs = imx477_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx477_sensor_detect_regset), + .id_regs = imx477_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx477_sensor_subdevs), + .subdevs = imx477_sensor_subdevs, + + .sensor_limits = &imx477_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx477_pll_configurations), + .pll_configs = imx477_pll_configurations, + + .modes_items = ARRAY_SIZE(imx477_modes_master), + .modes = imx477_modes_master, + + .streamon_regs_items = ARRAY_SIZE(imx477_streamon_regs), + .streamon_regs = imx477_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx477_streamoff_regs), + .streamoff_regs = imx477_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx477_v4l2_ctrls), + .v4l2_ctrl_bank = imx477_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx477_crl_csi_data_fmt), + .csi_fmts = imx477_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx477_flip_configurations_master), + .flip_data = imx477_flip_configurations_master, + + .frame_desc_entries = ARRAY_SIZE(imx477_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx477_frame_desc, +}; + +#endif /* __CRLMODULE_IMX477_MASTER_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_imx477_slave_configuration.h b/drivers/media/i2c/crlmodule/crl_imx477_slave_configuration.h new file mode 100644 index 000000000000..b8dc15c0f1f8 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_imx477_slave_configuration.h @@ -0,0 +1,509 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Alexei Zavjalov + * + */ + +#ifndef __CRLMODULE_IMX477_SLAVE_CONFIGURATION_H_ +#define __CRLMODULE_IMX477_SLAVE_CONFIGURATION_H_ + +#include "crl_imx477_common_regs.h" + +static struct crl_register_write_rep imx477_onetime_init_regset_slave[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, /* Software reset */ + + {0x3010, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_EN_2ND */ + {0x3011, CRL_REG_LEN_08BIT, 0x01}, /* SLAVE_ADD_ACKEN_2ND */ + + {0x3F0B, CRL_REG_LEN_08BIT, 0x01}, /* Multi camera mode: on */ + + {0x3041, CRL_REG_LEN_08BIT, 0x00}, /* Mode: Slave */ + {0x3040, CRL_REG_LEN_08BIT, 0x00}, /* XVS pin: in */ + {0x4B81, CRL_REG_LEN_08BIT, 0x00}, /* Mode: Slave */ + + {0x3042, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [15:8] */ + {0x3043, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in lines [7:0] */ + {0x3044, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [15:8] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC Delay in clocks [7:0] */ + {0x3045, CRL_REG_LEN_08BIT, 0x00}, /* VSYNC thin down setting */ + + /* External Clock Setting */ + {0x0136, CRL_REG_LEN_08BIT, 0x13}, /* External clock freq (dec) [15:8] */ + {0x0137, CRL_REG_LEN_08BIT, 0x33}, /* External clock freq (dec) [7:0] */ + + /* Global Setting */ + {0x0808, CRL_REG_LEN_08BIT, 0x02}, /* MIPI Global Timing: Register Control */ + {0xE07A, CRL_REG_LEN_08BIT, 0x01}, + {0xE000, CRL_REG_LEN_08BIT, 0x00}, /* RUN/STOP of CSI2 during Frame Blanking: HS */ + {0x4AE9, CRL_REG_LEN_08BIT, 0x18}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x08}, + {0xF61C, CRL_REG_LEN_08BIT, 0x04}, + {0xF61E, CRL_REG_LEN_08BIT, 0x04}, + {0x4AE9, CRL_REG_LEN_08BIT, 0x21}, + {0x4AEA, CRL_REG_LEN_08BIT, 0x80}, + {0x38A8, CRL_REG_LEN_08BIT, 0x1F}, + {0x38A9, CRL_REG_LEN_08BIT, 0xFF}, + {0x38AA, CRL_REG_LEN_08BIT, 0x1F}, + {0x38AB, CRL_REG_LEN_08BIT, 0xFF}, + {0x420B, CRL_REG_LEN_08BIT, 0x01}, + {0x55D4, CRL_REG_LEN_08BIT, 0x00}, + {0x55D5, CRL_REG_LEN_08BIT, 0x00}, + {0x55D6, CRL_REG_LEN_08BIT, 0x07}, + {0x55D7, CRL_REG_LEN_08BIT, 0xFF}, + {0x55E8, CRL_REG_LEN_08BIT, 0x07}, + {0x55E9, CRL_REG_LEN_08BIT, 0xFF}, + {0x55EA, CRL_REG_LEN_08BIT, 0x00}, + {0x55EB, CRL_REG_LEN_08BIT, 0x00}, + {0x574C, CRL_REG_LEN_08BIT, 0x07}, + {0x574D, CRL_REG_LEN_08BIT, 0xFF}, + {0x574E, CRL_REG_LEN_08BIT, 0x00}, + {0x574F, CRL_REG_LEN_08BIT, 0x00}, + {0x5754, CRL_REG_LEN_08BIT, 0x00}, + {0x5755, CRL_REG_LEN_08BIT, 0x00}, + {0x5756, CRL_REG_LEN_08BIT, 0x07}, + {0x5757, CRL_REG_LEN_08BIT, 0xFF}, + {0x5973, CRL_REG_LEN_08BIT, 0x04}, + {0x5974, CRL_REG_LEN_08BIT, 0x01}, + {0x5D13, CRL_REG_LEN_08BIT, 0xC3}, + {0x5D14, CRL_REG_LEN_08BIT, 0x58}, + {0x5D15, CRL_REG_LEN_08BIT, 0xA3}, + {0x5D16, CRL_REG_LEN_08BIT, 0x1D}, + {0x5D17, CRL_REG_LEN_08BIT, 0x65}, + {0x5D18, CRL_REG_LEN_08BIT, 0x8C}, + {0x5D1A, CRL_REG_LEN_08BIT, 0x06}, + {0x5D1B, CRL_REG_LEN_08BIT, 0xA9}, + {0x5D1C, CRL_REG_LEN_08BIT, 0x45}, + {0x5D1D, CRL_REG_LEN_08BIT, 0x3A}, + {0x5D1E, CRL_REG_LEN_08BIT, 0xAB}, + {0x5D1F, CRL_REG_LEN_08BIT, 0x15}, + {0x5D21, CRL_REG_LEN_08BIT, 0x0E}, + {0x5D22, CRL_REG_LEN_08BIT, 0x52}, + {0x5D23, CRL_REG_LEN_08BIT, 0xAA}, + {0x5D24, CRL_REG_LEN_08BIT, 0x7D}, + {0x5D25, CRL_REG_LEN_08BIT, 0x57}, + {0x5D26, CRL_REG_LEN_08BIT, 0xA8}, + {0x5D37, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D38, CRL_REG_LEN_08BIT, 0x5A}, + {0x5D77, CRL_REG_LEN_08BIT, 0x7F}, + {0x7B7C, CRL_REG_LEN_08BIT, 0x00}, + {0x7B7D, CRL_REG_LEN_08BIT, 0x00}, + {0x8D1F, CRL_REG_LEN_08BIT, 0x00}, + {0x8D27, CRL_REG_LEN_08BIT, 0x00}, + {0x9004, CRL_REG_LEN_08BIT, 0x03}, + {0x9200, CRL_REG_LEN_08BIT, 0x50}, + {0x9201, CRL_REG_LEN_08BIT, 0x6C}, + {0x9202, CRL_REG_LEN_08BIT, 0x71}, + {0x9203, CRL_REG_LEN_08BIT, 0x00}, + {0x9204, CRL_REG_LEN_08BIT, 0x71}, + {0x9205, CRL_REG_LEN_08BIT, 0x01}, + {0x9371, CRL_REG_LEN_08BIT, 0x6A}, + {0x9373, CRL_REG_LEN_08BIT, 0x6A}, + {0x9375, CRL_REG_LEN_08BIT, 0x64}, + {0x990C, CRL_REG_LEN_08BIT, 0x00}, + {0x990D, CRL_REG_LEN_08BIT, 0x08}, + {0x9956, CRL_REG_LEN_08BIT, 0x8C}, + {0x9957, CRL_REG_LEN_08BIT, 0x64}, + {0x9958, CRL_REG_LEN_08BIT, 0x50}, + {0x9A48, CRL_REG_LEN_08BIT, 0x06}, + {0x9A49, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4A, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4B, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4C, CRL_REG_LEN_08BIT, 0x06}, + {0x9A4D, CRL_REG_LEN_08BIT, 0x06}, + {0xA001, CRL_REG_LEN_08BIT, 0x0A}, + {0xA003, CRL_REG_LEN_08BIT, 0x0A}, + {0xA005, CRL_REG_LEN_08BIT, 0x0A}, + {0xA006, CRL_REG_LEN_08BIT, 0x01}, + {0xA007, CRL_REG_LEN_08BIT, 0xC0}, + {0xA009, CRL_REG_LEN_08BIT, 0xC0}, + + /* Image Tuning */ + {0x3D8A, CRL_REG_LEN_08BIT, 0x01}, + {0x7B3B, CRL_REG_LEN_08BIT, 0x01}, + {0x7B4C, CRL_REG_LEN_08BIT, 0x00}, + {0x9905, CRL_REG_LEN_08BIT, 0x00}, + {0x9907, CRL_REG_LEN_08BIT, 0x00}, + {0x9909, CRL_REG_LEN_08BIT, 0x00}, + {0x990B, CRL_REG_LEN_08BIT, 0x00}, + {0x9944, CRL_REG_LEN_08BIT, 0x3C}, + {0x9947, CRL_REG_LEN_08BIT, 0x3C}, + {0x994A, CRL_REG_LEN_08BIT, 0x8C}, + {0x994B, CRL_REG_LEN_08BIT, 0x50}, + {0x994C, CRL_REG_LEN_08BIT, 0x1B}, + {0x994D, CRL_REG_LEN_08BIT, 0x8C}, + {0x994E, CRL_REG_LEN_08BIT, 0x50}, + {0x994F, CRL_REG_LEN_08BIT, 0x1B}, + {0x9950, CRL_REG_LEN_08BIT, 0x8C}, + {0x9951, CRL_REG_LEN_08BIT, 0x1B}, + {0x9952, CRL_REG_LEN_08BIT, 0x0A}, + {0x9953, CRL_REG_LEN_08BIT, 0x8C}, + {0x9954, CRL_REG_LEN_08BIT, 0x1B}, + {0x9955, CRL_REG_LEN_08BIT, 0x0A}, + {0x9A13, CRL_REG_LEN_08BIT, 0x04}, + {0x9A14, CRL_REG_LEN_08BIT, 0x04}, + {0x9A19, CRL_REG_LEN_08BIT, 0x00}, + {0x9A1C, CRL_REG_LEN_08BIT, 0x04}, + {0x9A1D, CRL_REG_LEN_08BIT, 0x04}, + {0x9A26, CRL_REG_LEN_08BIT, 0x05}, + {0x9A27, CRL_REG_LEN_08BIT, 0x05}, + {0x9A2C, CRL_REG_LEN_08BIT, 0x01}, + {0x9A2D, CRL_REG_LEN_08BIT, 0x03}, + {0x9A2F, CRL_REG_LEN_08BIT, 0x05}, + {0x9A30, CRL_REG_LEN_08BIT, 0x05}, + {0x9A41, CRL_REG_LEN_08BIT, 0x00}, + {0x9A46, CRL_REG_LEN_08BIT, 0x00}, + {0x9A47, CRL_REG_LEN_08BIT, 0x00}, + {0x9C17, CRL_REG_LEN_08BIT, 0x35}, + {0x9C1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9C29, CRL_REG_LEN_08BIT, 0x50}, + {0x9C3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9C41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9C47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9C6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9C71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9C73, CRL_REG_LEN_08BIT, 0x32}, + {0x9C75, CRL_REG_LEN_08BIT, 0x04}, + {0x9C7D, CRL_REG_LEN_08BIT, 0x2D}, + {0x9C83, CRL_REG_LEN_08BIT, 0x40}, + {0x9C94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C97, CRL_REG_LEN_08BIT, 0x00}, + {0x9C98, CRL_REG_LEN_08BIT, 0x00}, + {0x9C99, CRL_REG_LEN_08BIT, 0x00}, + {0x9C9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9C9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9CA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9CA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9CA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9CA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9CAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9CAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9CBD, CRL_REG_LEN_08BIT, 0x50}, + {0x9CBF, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC1, CRL_REG_LEN_08BIT, 0x50}, + {0x9CC3, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC5, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC7, CRL_REG_LEN_08BIT, 0x40}, + {0x9CC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9CCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9D17, CRL_REG_LEN_08BIT, 0x35}, + {0x9D1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9D29, CRL_REG_LEN_08BIT, 0x50}, + {0x9D3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9D41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9D47, CRL_REG_LEN_08BIT, 0x42}, + {0x9D4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9D71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9D73, CRL_REG_LEN_08BIT, 0x32}, + {0x9D75, CRL_REG_LEN_08BIT, 0x04}, + {0x9D7D, CRL_REG_LEN_08BIT, 0x42}, + {0x9D83, CRL_REG_LEN_08BIT, 0x5A}, + {0x9D94, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D95, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D96, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D97, CRL_REG_LEN_08BIT, 0x00}, + {0x9D98, CRL_REG_LEN_08BIT, 0x00}, + {0x9D99, CRL_REG_LEN_08BIT, 0x00}, + {0x9D9A, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9B, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9C, CRL_REG_LEN_08BIT, 0x3F}, + {0x9D9D, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9E, CRL_REG_LEN_08BIT, 0x1F}, + {0x9D9F, CRL_REG_LEN_08BIT, 0x1F}, + {0x9DA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9DA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9DA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9DA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9DAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9DAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9DC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9DCD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9E17, CRL_REG_LEN_08BIT, 0x35}, + {0x9E1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9E29, CRL_REG_LEN_08BIT, 0x50}, + {0x9E3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9E41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9E47, CRL_REG_LEN_08BIT, 0x2D}, + {0x9E4D, CRL_REG_LEN_08BIT, 0x40}, + {0x9E6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9E71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9E73, CRL_REG_LEN_08BIT, 0x32}, + {0x9E75, CRL_REG_LEN_08BIT, 0x04}, + {0x9E94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9E97, CRL_REG_LEN_08BIT, 0x00}, + {0x9E98, CRL_REG_LEN_08BIT, 0x00}, + {0x9E99, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9EA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9EA6, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA7, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA8, CRL_REG_LEN_08BIT, 0x3F}, + {0x9EA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9EAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9EAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9EC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9ECD, CRL_REG_LEN_08BIT, 0x0A}, + {0x9F17, CRL_REG_LEN_08BIT, 0x35}, + {0x9F1D, CRL_REG_LEN_08BIT, 0x31}, + {0x9F29, CRL_REG_LEN_08BIT, 0x50}, + {0x9F3B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F41, CRL_REG_LEN_08BIT, 0x6B}, + {0x9F47, CRL_REG_LEN_08BIT, 0x42}, + {0x9F4D, CRL_REG_LEN_08BIT, 0x5A}, + {0x9F6B, CRL_REG_LEN_08BIT, 0x00}, + {0x9F71, CRL_REG_LEN_08BIT, 0xC8}, + {0x9F73, CRL_REG_LEN_08BIT, 0x32}, + {0x9F75, CRL_REG_LEN_08BIT, 0x04}, + {0x9F94, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F95, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F96, CRL_REG_LEN_08BIT, 0x0F}, + {0x9F97, CRL_REG_LEN_08BIT, 0x00}, + {0x9F98, CRL_REG_LEN_08BIT, 0x00}, + {0x9F99, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9A, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9B, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9C, CRL_REG_LEN_08BIT, 0x2F}, + {0x9F9D, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9E, CRL_REG_LEN_08BIT, 0x00}, + {0x9F9F, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA0, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA1, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA2, CRL_REG_LEN_08BIT, 0x0F}, + {0x9FA3, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA4, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA5, CRL_REG_LEN_08BIT, 0x00}, + {0x9FA6, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA7, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA8, CRL_REG_LEN_08BIT, 0x1E}, + {0x9FA9, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAA, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAB, CRL_REG_LEN_08BIT, 0x00}, + {0x9FAC, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAD, CRL_REG_LEN_08BIT, 0x09}, + {0x9FAE, CRL_REG_LEN_08BIT, 0x09}, + {0x9FC9, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCB, CRL_REG_LEN_08BIT, 0x0A}, + {0x9FCD, CRL_REG_LEN_08BIT, 0x0A}, + {0xA14B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA151, CRL_REG_LEN_08BIT, 0x0C}, + {0xA153, CRL_REG_LEN_08BIT, 0x50}, + {0xA155, CRL_REG_LEN_08BIT, 0x02}, + {0xA157, CRL_REG_LEN_08BIT, 0x00}, + {0xA1AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA1B3, CRL_REG_LEN_08BIT, 0x0C}, + {0xA1B5, CRL_REG_LEN_08BIT, 0x50}, + {0xA1B9, CRL_REG_LEN_08BIT, 0x00}, + {0xA24B, CRL_REG_LEN_08BIT, 0xFF}, + {0xA257, CRL_REG_LEN_08BIT, 0x00}, + {0xA2AD, CRL_REG_LEN_08BIT, 0xFF}, + {0xA2B9, CRL_REG_LEN_08BIT, 0x00}, + {0xB21F, CRL_REG_LEN_08BIT, 0x04}, + {0xB35C, CRL_REG_LEN_08BIT, 0x00}, + {0xB35E, CRL_REG_LEN_08BIT, 0x08}, +}; + +static struct crl_register_write_rep imx477_4056_3040_19MHZ_slave[] = { + /* Frame Horizontal Clock Count */ + {0x0342, CRL_REG_LEN_08BIT, 0x39}, /* Line length [15:8] */ + {0x0343, CRL_REG_LEN_08BIT, 0x14}, /* Line length [7:0] */ + + /* Frame Vertical Clock Count */ + {0x0340, CRL_REG_LEN_08BIT, 0x20}, /* Frame length [15:8] */ + {0x0341, CRL_REG_LEN_08BIT, 0x11}, /* Frame length [7:0] */ + + /* Visible Size */ + {0x0344, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [12:8] */ + {0x0345, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start X [7:0] */ + {0x0346, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [12:8] */ + {0x0347, CRL_REG_LEN_08BIT, 0x00}, /* Analog cropping start Y [7:0] */ + {0x0348, CRL_REG_LEN_08BIT, 0x0F}, /* Analog cropping end X [12:8] */ + {0x0349, CRL_REG_LEN_08BIT, 0xD7}, /* Analog cropping end X [7:0] */ + {0x034A, CRL_REG_LEN_08BIT, 0x0B}, /* Analog cropping end Y [12:8] */ + {0x034B, CRL_REG_LEN_08BIT, 0xDF}, /* Analog cropping end Y [7:0] */ + + /* Mode Setting */ + {0x00E3, CRL_REG_LEN_08BIT, 0x00}, /* DOL-HDR Disable */ + {0x00E4, CRL_REG_LEN_08BIT, 0x00}, /* DOL Mode: DOL-HDR Disable */ + {0x00FC, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FD, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FE, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x00FF, CRL_REG_LEN_08BIT, 0x0A}, /* The output data fmt for CSI: RAW10 */ + {0x0220, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x0221, CRL_REG_LEN_08BIT, 0x11}, /* Undocumented */ + {0x0381, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, even -> odd */ + {0x0383, CRL_REG_LEN_08BIT, 0x01}, /* Num of pixels skipped, odd -> even */ + {0x0385, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, even -> odd */ + {0x0387, CRL_REG_LEN_08BIT, 0x01}, /* Num of lines skipped, odd -> even */ + {0x0900, CRL_REG_LEN_08BIT, 0x00}, /* Binning mode: Disable */ + {0x0901, CRL_REG_LEN_08BIT, 0x11}, /* Binning Type for Horizontal */ + {0x0902, CRL_REG_LEN_08BIT, 0x02}, /* Binning Type for Vertical */ + {0x3140, CRL_REG_LEN_08BIT, 0x02}, /* Undocumented */ + {0x3C00, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x3C01, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x3C02, CRL_REG_LEN_08BIT, 0xDC}, /* Undocumented */ + {0x3F0D, CRL_REG_LEN_08BIT, 0x00}, /* AD converter: 10 bit */ + {0x5748, CRL_REG_LEN_08BIT, 0x07}, /* Undocumented */ + {0x5749, CRL_REG_LEN_08BIT, 0xFF}, /* Undocumented */ + {0x574A, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x574B, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x7B75, CRL_REG_LEN_08BIT, 0x0E}, /* Undocumented */ + {0x7B76, CRL_REG_LEN_08BIT, 0x09}, /* Undocumented */ + {0x7B77, CRL_REG_LEN_08BIT, 0x0C}, /* Undocumented */ + {0x7B78, CRL_REG_LEN_08BIT, 0x06}, /* Undocumented */ + {0x7B79, CRL_REG_LEN_08BIT, 0x3B}, /* Undocumented */ + {0x7B53, CRL_REG_LEN_08BIT, 0x01}, /* Undocumented */ + {0x9369, CRL_REG_LEN_08BIT, 0x5A}, /* Undocumented */ + {0x936B, CRL_REG_LEN_08BIT, 0x55}, /* Undocumented */ + {0x936D, CRL_REG_LEN_08BIT, 0x28}, /* Undocumented */ + {0x9304, CRL_REG_LEN_08BIT, 0x03}, /* Undocumented */ + {0x9305, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9A, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9B, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9C, CRL_REG_LEN_08BIT, 0x2F}, /* Undocumented */ + {0x9E9D, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9E, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0x9E9F, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + {0xA2A9, CRL_REG_LEN_08BIT, 0x60}, /* Undocumented */ + {0xA2B7, CRL_REG_LEN_08BIT, 0x00}, /* Undocumented */ + + /* Digital Crop & Scaling */ + {0x0401, CRL_REG_LEN_08BIT, 0x00}, /* Scaling mode: No Scaling */ + {0x0404, CRL_REG_LEN_08BIT, 0x00}, /* Down Scaling Factor M [8] */ + {0x0405, CRL_REG_LEN_08BIT, 0x10}, /* Down Scaling Factor M [7:0] */ + {0x0408, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [12:8] */ + {0x0409, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from X [7:0] */ + {0x040A, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [12:8] */ + {0x040B, CRL_REG_LEN_08BIT, 0x00}, /* Crop Offset from Y [7:0] */ + {0x040C, CRL_REG_LEN_08BIT, 0x0F}, /* Width after cropping [12:8] */ + {0x040D, CRL_REG_LEN_08BIT, 0xD8}, /* Width after cropping [7:0] */ + {0x040E, CRL_REG_LEN_08BIT, 0x0B}, /* Height after cropping [12:8] */ + {0x040F, CRL_REG_LEN_08BIT, 0xE0}, /* Height after cropping [7:0] */ + + /* Output Crop */ + {0x034C, CRL_REG_LEN_08BIT, 0x0F}, /* X output size [12:8] */ + {0x034D, CRL_REG_LEN_08BIT, 0xD8}, /* X output size [7:0] */ + {0x034E, CRL_REG_LEN_08BIT, 0x0B}, /* Y output size [12:8] */ + {0x034F, CRL_REG_LEN_08BIT, 0xE0}, /* Y output size [7:0] */ +}; + +static struct crl_mode_rep imx477_modes_slave[] = { + { + .sd_rects_items = ARRAY_SIZE(imx477_4056_3040_rects), + .sd_rects = imx477_4056_3040_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4056, + .height = 3040, + .min_llp = 14612, + .min_fll = 8209, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(imx477_4056_3040_19MHZ_slave), + .mode_regs = imx477_4056_3040_19MHZ_slave, + }, +}; + +static struct crl_flip_data imx477_flip_configurations_slave[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + } +}; + +struct crl_sensor_configuration imx477_slave_crl_configuration = { + + .power_items = ARRAY_SIZE(imx477_power_items), + .power_entities = imx477_power_items, + + .onetime_init_regs_items = ARRAY_SIZE(imx477_onetime_init_regset_slave), + .onetime_init_regs = imx477_onetime_init_regset_slave, + + .powerup_regs_items = ARRAY_SIZE(imx477_powerup_standby), + .powerup_regs = imx477_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(imx477_sensor_detect_regset), + .id_regs = imx477_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(imx477_sensor_subdevs), + .subdevs = imx477_sensor_subdevs, + + .sensor_limits = &imx477_sensor_limits, + + .pll_config_items = ARRAY_SIZE(imx477_pll_configurations), + .pll_configs = imx477_pll_configurations, + + .modes_items = ARRAY_SIZE(imx477_modes_slave), + .modes = imx477_modes_slave, + + .streamon_regs_items = ARRAY_SIZE(imx477_streamon_regs), + .streamon_regs = imx477_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(imx477_streamoff_regs), + .streamoff_regs = imx477_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(imx477_v4l2_ctrls), + .v4l2_ctrl_bank = imx477_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(imx477_crl_csi_data_fmt), + .csi_fmts = imx477_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(imx477_flip_configurations_slave), + .flip_data = imx477_flip_configurations_slave, + + .frame_desc_entries = ARRAY_SIZE(imx477_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = imx477_frame_desc, +}; + +#endif /* __CRLMODULE_IMX477_SLAVE_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_magna_configuration.h b/drivers/media/i2c/crlmodule/crl_magna_configuration.h new file mode 100644 index 000000000000..cd1e316a2cab --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_magna_configuration.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Kishore Bodke + * + */ + +#ifndef __CRLMODULE_MAGNA_CONFIGURATION_H_ +#define __CRLMODULE_MAGNA_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_pll_configuration magna_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep magna_1280_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_mode_rep magna_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(magna_1280_720_rects), + .sd_rects = magna_1280_720_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + }, +}; + +static struct crl_sensor_subdev_config magna_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "magna binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "magna pixel array", + } +}; + +static struct crl_sensor_limits magna_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 720, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_csi_data_fmt magna_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + }, +}; + +static struct crl_v4l2_ctrl magna_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration magna_crl_configuration = { + + .subdev_items = ARRAY_SIZE(magna_sensor_subdevs), + .subdevs = magna_sensor_subdevs, + + .pll_config_items = ARRAY_SIZE(magna_pll_configurations), + .pll_configs = magna_pll_configurations, + + .sensor_limits = &magna_sensor_limits, + + .modes_items = ARRAY_SIZE(magna_modes), + .modes = magna_modes, + + .streamon_regs_items = 0, + .streamon_regs = 0, + + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .v4l2_ctrls_items = ARRAY_SIZE(magna_v4l2_ctrls), + .v4l2_ctrl_bank = magna_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(magna_crl_csi_data_fmt), + .csi_fmts = magna_crl_csi_data_fmt, + +}; + +#endif /* __CRLMODULE_MAGNA_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov10635_configuration.h b/drivers/media/i2c/crlmodule/crl_ov10635_configuration.h new file mode 100644 index 000000000000..d771953a2fbf --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov10635_configuration.h @@ -0,0 +1,6368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Yunliang Ding + * + */ + +#ifndef __CRLMODULE_OV10635_CONFIGURATION_H_ +#define __CRLMODULE_OV10635_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define OV10635_REG_RESET 0x0103 + +static struct crl_register_write_rep ov10635_1280_800_YUV_HDR[] = { + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x20}, + {0x3004, CRL_REG_LEN_08BIT, 0x21}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x20}, + {0x3703, CRL_REG_LEN_08BIT, 0x48}, + {0x3704, CRL_REG_LEN_08BIT, 0x32}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x03}, + {0x3807, CRL_REG_LEN_08BIT, 0x29}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x04}, + {0x3808, CRL_REG_LEN_08BIT, 0x05}, + {0x3809, CRL_REG_LEN_08BIT, 0x00}, + {0x380a, CRL_REG_LEN_08BIT, 0x03}, + {0x380b, CRL_REG_LEN_08BIT, 0x20}, + {0x380c, CRL_REG_LEN_08BIT, 0x07}, + {0x380d, CRL_REG_LEN_08BIT, 0x71}, + {0x6e42, CRL_REG_LEN_08BIT, 0x03}, + {0x6e43, CRL_REG_LEN_08BIT, 0x48}, + {0x380e, CRL_REG_LEN_08BIT, 0x03}, + {0x380f, CRL_REG_LEN_08BIT, 0x48}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x64}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x0d}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x02}, + {0x460b, CRL_REG_LEN_08BIT, 0x70}, + {0x460c, CRL_REG_LEN_08BIT, 0x00}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x04}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3833, CRL_REG_LEN_08BIT, 0x02}, + {0x3834, CRL_REG_LEN_08BIT, 0x03}, + {0x3835, CRL_REG_LEN_08BIT, 0x48}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x13}, + {0xd017, CRL_REG_LEN_08BIT, 0xd0}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x14}, + {0xd027, CRL_REG_LEN_08BIT, 0xb8}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x14}, + {0xd037, CRL_REG_LEN_08BIT, 0xdc}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x3f}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x3c}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x34}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x2c}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x20}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0x00}, + {0xd355, CRL_REG_LEN_08BIT, 0x00}, + {0xd356, CRL_REG_LEN_08BIT, 0x00}, + {0xd357, CRL_REG_LEN_08BIT, 0x11}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0x83}, + {0xd35a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd35b, CRL_REG_LEN_08BIT, 0x43}, + {0xd35c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd35d, CRL_REG_LEN_08BIT, 0x6c}, + {0xd35e, CRL_REG_LEN_08BIT, 0x28}, + {0xd35f, CRL_REG_LEN_08BIT, 0x02}, + {0xd360, CRL_REG_LEN_08BIT, 0xe0}, + {0xd361, CRL_REG_LEN_08BIT, 0x84}, + {0xd362, CRL_REG_LEN_08BIT, 0x28}, + {0xd363, CRL_REG_LEN_08BIT, 0x02}, + {0xd364, CRL_REG_LEN_08BIT, 0x07}, + {0xd365, CRL_REG_LEN_08BIT, 0xff}, + {0xd366, CRL_REG_LEN_08BIT, 0xf8}, + {0xd367, CRL_REG_LEN_08BIT, 0x30}, + {0xd368, CRL_REG_LEN_08BIT, 0xb8}, + {0xd369, CRL_REG_LEN_08BIT, 0x63}, + {0xd36a, CRL_REG_LEN_08BIT, 0x00}, + {0xd36b, CRL_REG_LEN_08BIT, 0x08}, + {0xd36c, CRL_REG_LEN_08BIT, 0x03}, + {0xd36d, CRL_REG_LEN_08BIT, 0xff}, + {0xd36e, CRL_REG_LEN_08BIT, 0xff}, + {0xd36f, CRL_REG_LEN_08BIT, 0xc0}, + {0xd370, CRL_REG_LEN_08BIT, 0x85}, + {0xd371, CRL_REG_LEN_08BIT, 0x4e}, + {0xd372, CRL_REG_LEN_08BIT, 0x00}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x03}, + {0xd375, CRL_REG_LEN_08BIT, 0xff}, + {0xd376, CRL_REG_LEN_08BIT, 0xff}, + {0xd377, CRL_REG_LEN_08BIT, 0xe7}, + {0xd378, CRL_REG_LEN_08BIT, 0xd4}, + {0xd379, CRL_REG_LEN_08BIT, 0x01}, + {0xd37a, CRL_REG_LEN_08BIT, 0x40}, + {0xd37b, CRL_REG_LEN_08BIT, 0x18}, + {0xd37c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x60}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0x03}, + {0xd381, CRL_REG_LEN_08BIT, 0xff}, + {0xd382, CRL_REG_LEN_08BIT, 0xff}, + {0xd383, CRL_REG_LEN_08BIT, 0xdb}, + {0xd384, CRL_REG_LEN_08BIT, 0xd4}, + {0xd385, CRL_REG_LEN_08BIT, 0x01}, + {0xd386, CRL_REG_LEN_08BIT, 0x18}, + {0xd387, CRL_REG_LEN_08BIT, 0x14}, + {0xd388, CRL_REG_LEN_08BIT, 0x03}, + {0xd389, CRL_REG_LEN_08BIT, 0xff}, + {0xd38a, CRL_REG_LEN_08BIT, 0xff}, + {0xd38b, CRL_REG_LEN_08BIT, 0xce}, + {0xd38c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd38d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0xff}, + {0xd390, CRL_REG_LEN_08BIT, 0x03}, + {0xd391, CRL_REG_LEN_08BIT, 0xff}, + {0xd392, CRL_REG_LEN_08BIT, 0xff}, + {0xd393, CRL_REG_LEN_08BIT, 0xc6}, + {0xd394, CRL_REG_LEN_08BIT, 0x9c}, + {0xd395, CRL_REG_LEN_08BIT, 0x63}, + {0xd396, CRL_REG_LEN_08BIT, 0x00}, + {0xd397, CRL_REG_LEN_08BIT, 0xff}, + {0xd398, CRL_REG_LEN_08BIT, 0xa8}, + {0xd399, CRL_REG_LEN_08BIT, 0xe3}, + {0xd39a, CRL_REG_LEN_08BIT, 0x38}, + {0xd39b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd39c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd39d, CRL_REG_LEN_08BIT, 0x84}, + {0xd39e, CRL_REG_LEN_08BIT, 0x00}, + {0xd39f, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0xa3}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x38}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x0e}, + {0xd3a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xc3}, + {0xd3a6, CRL_REG_LEN_08BIT, 0x6e}, + {0xd3a7, CRL_REG_LEN_08BIT, 0x42}, + {0xd3a8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x07}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x20}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3ad, CRL_REG_LEN_08BIT, 0x66}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0x05}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x18}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x41}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x85}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x81}, + {0xd3be, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c0, CRL_REG_LEN_08BIT, 0x85}, + {0xd3c1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3c4, CRL_REG_LEN_08BIT, 0x86}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x10}, + {0xd3c8, CRL_REG_LEN_08BIT, 0x44}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x48}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x21}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x1c}, + {0xd3d0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3d1, CRL_REG_LEN_08BIT, 0x21}, + {0xd3d2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3d3, CRL_REG_LEN_08BIT, 0xfc}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xd4}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3da, CRL_REG_LEN_08BIT, 0x00}, + {0xd3db, CRL_REG_LEN_08BIT, 0x01}, + {0xd3dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3dd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3de, CRL_REG_LEN_08BIT, 0x07}, + {0xd3df, CRL_REG_LEN_08BIT, 0x80}, + {0xd3e0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x68}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x03}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x10}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3ec, CRL_REG_LEN_08BIT, 0x15}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x07}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xd9}, + {0xd3f3, CRL_REG_LEN_08BIT, 0x98}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x80}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x06}, + {0xd3fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3fe, CRL_REG_LEN_08BIT, 0xc4}, + {0xd3ff, CRL_REG_LEN_08BIT, 0xb8}, + {0xd400, CRL_REG_LEN_08BIT, 0x8c}, + {0xd401, CRL_REG_LEN_08BIT, 0x63}, + {0xd402, CRL_REG_LEN_08BIT, 0x00}, + {0xd403, CRL_REG_LEN_08BIT, 0x00}, + {0xd404, CRL_REG_LEN_08BIT, 0xbc}, + {0xd405, CRL_REG_LEN_08BIT, 0x23}, + {0xd406, CRL_REG_LEN_08BIT, 0x00}, + {0xd407, CRL_REG_LEN_08BIT, 0x01}, + {0xd408, CRL_REG_LEN_08BIT, 0x10}, + {0xd409, CRL_REG_LEN_08BIT, 0x00}, + {0xd40a, CRL_REG_LEN_08BIT, 0x00}, + {0xd40b, CRL_REG_LEN_08BIT, 0x25}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd40d, CRL_REG_LEN_08BIT, 0x00}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0x00}, + {0xd410, CRL_REG_LEN_08BIT, 0x00}, + {0xd411, CRL_REG_LEN_08BIT, 0x00}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x0b}, + {0xd414, CRL_REG_LEN_08BIT, 0xb8}, + {0xd415, CRL_REG_LEN_08BIT, 0xe8}, + {0xd416, CRL_REG_LEN_08BIT, 0x00}, + {0xd417, CRL_REG_LEN_08BIT, 0x02}, + {0xd418, CRL_REG_LEN_08BIT, 0x07}, + {0xd419, CRL_REG_LEN_08BIT, 0xff}, + {0xd41a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd41b, CRL_REG_LEN_08BIT, 0x24}, + {0xd41c, CRL_REG_LEN_08BIT, 0x15}, + {0xd41d, CRL_REG_LEN_08BIT, 0x00}, + {0xd41e, CRL_REG_LEN_08BIT, 0x00}, + {0xd41f, CRL_REG_LEN_08BIT, 0x00}, + {0xd420, CRL_REG_LEN_08BIT, 0x18}, + {0xd421, CRL_REG_LEN_08BIT, 0x60}, + {0xd422, CRL_REG_LEN_08BIT, 0x80}, + {0xd423, CRL_REG_LEN_08BIT, 0x06}, + {0xd424, CRL_REG_LEN_08BIT, 0xa8}, + {0xd425, CRL_REG_LEN_08BIT, 0x63}, + {0xd426, CRL_REG_LEN_08BIT, 0xc4}, + {0xd427, CRL_REG_LEN_08BIT, 0xb8}, + {0xd428, CRL_REG_LEN_08BIT, 0x8c}, + {0xd429, CRL_REG_LEN_08BIT, 0x63}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x00}, + {0xd42c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd42d, CRL_REG_LEN_08BIT, 0x23}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x01}, + {0xd430, CRL_REG_LEN_08BIT, 0x10}, + {0xd431, CRL_REG_LEN_08BIT, 0x00}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x1b}, + {0xd434, CRL_REG_LEN_08BIT, 0x9d}, + {0xd435, CRL_REG_LEN_08BIT, 0x00}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x00}, + {0xd438, CRL_REG_LEN_08BIT, 0xb8}, + {0xd439, CRL_REG_LEN_08BIT, 0xe8}, + {0xd43a, CRL_REG_LEN_08BIT, 0x00}, + {0xd43b, CRL_REG_LEN_08BIT, 0x02}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x00}, + {0xd440, CRL_REG_LEN_08BIT, 0x18}, + {0xd441, CRL_REG_LEN_08BIT, 0xa0}, + {0xd442, CRL_REG_LEN_08BIT, 0x80}, + {0xd443, CRL_REG_LEN_08BIT, 0x06}, + {0xd444, CRL_REG_LEN_08BIT, 0xe0}, + {0xd445, CRL_REG_LEN_08BIT, 0x67}, + {0xd446, CRL_REG_LEN_08BIT, 0x30}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0xa8}, + {0xd449, CRL_REG_LEN_08BIT, 0xa5}, + {0xd44a, CRL_REG_LEN_08BIT, 0xce}, + {0xd44b, CRL_REG_LEN_08BIT, 0xb0}, + {0xd44c, CRL_REG_LEN_08BIT, 0x19}, + {0xd44d, CRL_REG_LEN_08BIT, 0x60}, + {0xd44e, CRL_REG_LEN_08BIT, 0x00}, + {0xd44f, CRL_REG_LEN_08BIT, 0x01}, + {0xd450, CRL_REG_LEN_08BIT, 0xa9}, + {0xd451, CRL_REG_LEN_08BIT, 0x6b}, + {0xd452, CRL_REG_LEN_08BIT, 0x06}, + {0xd453, CRL_REG_LEN_08BIT, 0x14}, + {0xd454, CRL_REG_LEN_08BIT, 0xe0}, + {0xd455, CRL_REG_LEN_08BIT, 0x83}, + {0xd456, CRL_REG_LEN_08BIT, 0x28}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x9c}, + {0xd459, CRL_REG_LEN_08BIT, 0xc6}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x01}, + {0xd45c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd45d, CRL_REG_LEN_08BIT, 0x63}, + {0xd45e, CRL_REG_LEN_08BIT, 0x18}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x8c}, + {0xd461, CRL_REG_LEN_08BIT, 0x84}, + {0xd462, CRL_REG_LEN_08BIT, 0x00}, + {0xd463, CRL_REG_LEN_08BIT, 0x00}, + {0xd464, CRL_REG_LEN_08BIT, 0xe0}, + {0xd465, CRL_REG_LEN_08BIT, 0xa3}, + {0xd466, CRL_REG_LEN_08BIT, 0x58}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0xa4}, + {0xd469, CRL_REG_LEN_08BIT, 0xc6}, + {0xd46a, CRL_REG_LEN_08BIT, 0x00}, + {0xd46b, CRL_REG_LEN_08BIT, 0xff}, + {0xd46c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x64}, + {0xd46e, CRL_REG_LEN_08BIT, 0x00}, + {0xd46f, CRL_REG_LEN_08BIT, 0x18}, + {0xd470, CRL_REG_LEN_08BIT, 0xbc}, + {0xd471, CRL_REG_LEN_08BIT, 0x46}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x03}, + {0xd474, CRL_REG_LEN_08BIT, 0x94}, + {0xd475, CRL_REG_LEN_08BIT, 0x85}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x00}, + {0xd478, CRL_REG_LEN_08BIT, 0xb8}, + {0xd479, CRL_REG_LEN_08BIT, 0x63}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x98}, + {0xd47c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd47d, CRL_REG_LEN_08BIT, 0x64}, + {0xd47e, CRL_REG_LEN_08BIT, 0x18}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x0f}, + {0xd481, CRL_REG_LEN_08BIT, 0xff}, + {0xd482, CRL_REG_LEN_08BIT, 0xff}, + {0xd483, CRL_REG_LEN_08BIT, 0xf0}, + {0xd484, CRL_REG_LEN_08BIT, 0xdc}, + {0xd485, CRL_REG_LEN_08BIT, 0x05}, + {0xd486, CRL_REG_LEN_08BIT, 0x18}, + {0xd487, CRL_REG_LEN_08BIT, 0x00}, + {0xd488, CRL_REG_LEN_08BIT, 0x9c}, + {0xd489, CRL_REG_LEN_08BIT, 0x68}, + {0xd48a, CRL_REG_LEN_08BIT, 0x00}, + {0xd48b, CRL_REG_LEN_08BIT, 0x01}, + {0xd48c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd48d, CRL_REG_LEN_08BIT, 0x03}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0xff}, + {0xd490, CRL_REG_LEN_08BIT, 0xbc}, + {0xd491, CRL_REG_LEN_08BIT, 0x48}, + {0xd492, CRL_REG_LEN_08BIT, 0x00}, + {0xd493, CRL_REG_LEN_08BIT, 0x01}, + {0xd494, CRL_REG_LEN_08BIT, 0x0f}, + {0xd495, CRL_REG_LEN_08BIT, 0xff}, + {0xd496, CRL_REG_LEN_08BIT, 0xff}, + {0xd497, CRL_REG_LEN_08BIT, 0xea}, + {0xd498, CRL_REG_LEN_08BIT, 0xb8}, + {0xd499, CRL_REG_LEN_08BIT, 0xe8}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x02}, + {0xd49c, CRL_REG_LEN_08BIT, 0x18}, + {0xd49d, CRL_REG_LEN_08BIT, 0x60}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x07}, + {0xd4a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd4a6, CRL_REG_LEN_08BIT, 0xe4}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x05}, + {0xd4a8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4a9, CRL_REG_LEN_08BIT, 0x83}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x10}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x85}, + {0xd4ad, CRL_REG_LEN_08BIT, 0x21}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x44}, + {0xd4b1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x48}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x04}, + {0xd4b8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4be, CRL_REG_LEN_08BIT, 0xff}, + {0xd4bf, CRL_REG_LEN_08BIT, 0xff}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x09}, + {0xd4c3, CRL_REG_LEN_08BIT, 0xef}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x03}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x20}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4c9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x80}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x06}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0xc9}, + {0xd4cf, CRL_REG_LEN_08BIT, 0xef}, + {0xd4d0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x03}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x20}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4d9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0x00}, + {0xd4dc, CRL_REG_LEN_08BIT, 0x18}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x01}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x0a}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x12}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x64}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x03}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ec, CRL_REG_LEN_08BIT, 0x13}, + {0xd4ed, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ee, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ef, CRL_REG_LEN_08BIT, 0xfe}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fc, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd500, CRL_REG_LEN_08BIT, 0x00}, + {0xd501, CRL_REG_LEN_08BIT, 0x00}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x08}, + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x01}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x34}, + {0xc489, CRL_REG_LEN_08BIT, 0x00}, + {0xc48a, CRL_REG_LEN_08BIT, 0x34}, + {0xc48b, CRL_REG_LEN_08BIT, 0x00}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x10}, + {0xc4cd, CRL_REG_LEN_08BIT, 0x18}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x10}, + {0xc4cf, CRL_REG_LEN_08BIT, 0x18}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x01}, + {0xc512, CRL_REG_LEN_08BIT, 0x01}, + {0xc513, CRL_REG_LEN_08BIT, 0x80}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, +}; + +static struct crl_register_write_rep ov10635_1280_720_YUV_HDR_BT656[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x14}, + {0x3004, CRL_REG_LEN_08BIT, 0x11}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x4300, CRL_REG_LEN_08BIT, 0x3a}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x1a}, + {0x3703, CRL_REG_LEN_08BIT, 0x40}, + {0x3704, CRL_REG_LEN_08BIT, 0x2a}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0xfd}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x2c}, + {0x3808, CRL_REG_LEN_08BIT, 0x05}, + {0x3809, CRL_REG_LEN_08BIT, 0x00}, + {0x380a, CRL_REG_LEN_08BIT, 0x02}, + {0x380b, CRL_REG_LEN_08BIT, 0xd0}, + {0x380c, CRL_REG_LEN_08BIT, 0x06}, + {0x380d, CRL_REG_LEN_08BIT, 0xf6}, + {0x6e42, CRL_REG_LEN_08BIT, 0x02}, + {0x6e43, CRL_REG_LEN_08BIT, 0xec}, + {0x380e, CRL_REG_LEN_08BIT, 0x02}, + {0x380f, CRL_REG_LEN_08BIT, 0xec}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x64}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x0e}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x03}, + {0x460b, CRL_REG_LEN_08BIT, 0xe7}, + {0x460c, CRL_REG_LEN_08BIT, 0x40}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x06}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + {0x4302, CRL_REG_LEN_08BIT, 0x03}, + {0x4303, CRL_REG_LEN_08BIT, 0xff}, + {0x4304, CRL_REG_LEN_08BIT, 0x00}, + {0x4305, CRL_REG_LEN_08BIT, 0x10}, + {0x4306, CRL_REG_LEN_08BIT, 0x03}, + {0x4307, CRL_REG_LEN_08BIT, 0xff}, + {0x4308, CRL_REG_LEN_08BIT, 0x00}, + {0x4309, CRL_REG_LEN_08BIT, 0x10}, + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x14}, + {0xd017, CRL_REG_LEN_08BIT, 0x40}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x15}, + {0xd027, CRL_REG_LEN_08BIT, 0x28}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x15}, + {0xd037, CRL_REG_LEN_08BIT, 0x4c}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x4d}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x5a}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x59}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x51}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x43}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0xa8}, + {0xd355, CRL_REG_LEN_08BIT, 0x83}, + {0xd356, CRL_REG_LEN_08BIT, 0x6e}, + {0xd357, CRL_REG_LEN_08BIT, 0x43}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0xa3}, + {0xd35a, CRL_REG_LEN_08BIT, 0x38}, + {0xd35b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd35c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd35d, CRL_REG_LEN_08BIT, 0x84}, + {0xd35e, CRL_REG_LEN_08BIT, 0x00}, + {0xd35f, CRL_REG_LEN_08BIT, 0x00}, + {0xd360, CRL_REG_LEN_08BIT, 0xa8}, + {0xd361, CRL_REG_LEN_08BIT, 0xc3}, + {0xd362, CRL_REG_LEN_08BIT, 0x38}, + {0xd363, CRL_REG_LEN_08BIT, 0x0e}, + {0xd364, CRL_REG_LEN_08BIT, 0xa8}, + {0xd365, CRL_REG_LEN_08BIT, 0xe3}, + {0xd366, CRL_REG_LEN_08BIT, 0x6e}, + {0xd367, CRL_REG_LEN_08BIT, 0x42}, + {0xd368, CRL_REG_LEN_08BIT, 0xd8}, + {0xd369, CRL_REG_LEN_08BIT, 0x05}, + {0xd36a, CRL_REG_LEN_08BIT, 0x20}, + {0xd36b, CRL_REG_LEN_08BIT, 0x00}, + {0xd36c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd36d, CRL_REG_LEN_08BIT, 0x67}, + {0xd36e, CRL_REG_LEN_08BIT, 0x00}, + {0xd36f, CRL_REG_LEN_08BIT, 0x00}, + {0xd370, CRL_REG_LEN_08BIT, 0xd8}, + {0xd371, CRL_REG_LEN_08BIT, 0x06}, + {0xd372, CRL_REG_LEN_08BIT, 0x18}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x18}, + {0xd375, CRL_REG_LEN_08BIT, 0x60}, + {0xd376, CRL_REG_LEN_08BIT, 0x80}, + {0xd377, CRL_REG_LEN_08BIT, 0x01}, + {0xd378, CRL_REG_LEN_08BIT, 0xa8}, + {0xd379, CRL_REG_LEN_08BIT, 0x63}, + {0xd37a, CRL_REG_LEN_08BIT, 0x00}, + {0xd37b, CRL_REG_LEN_08BIT, 0xc8}, + {0xd37c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x63}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0xbc}, + {0xd381, CRL_REG_LEN_08BIT, 0x23}, + {0xd382, CRL_REG_LEN_08BIT, 0x00}, + {0xd383, CRL_REG_LEN_08BIT, 0x01}, + {0xd384, CRL_REG_LEN_08BIT, 0x10}, + {0xd385, CRL_REG_LEN_08BIT, 0x00}, + {0xd386, CRL_REG_LEN_08BIT, 0x00}, + {0xd387, CRL_REG_LEN_08BIT, 0x28}, + {0xd388, CRL_REG_LEN_08BIT, 0x9c}, + {0xd389, CRL_REG_LEN_08BIT, 0xa0}, + {0xd38a, CRL_REG_LEN_08BIT, 0x00}, + {0xd38b, CRL_REG_LEN_08BIT, 0x00}, + {0xd38c, CRL_REG_LEN_08BIT, 0x00}, + {0xd38d, CRL_REG_LEN_08BIT, 0x00}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0x08}, + {0xd390, CRL_REG_LEN_08BIT, 0x15}, + {0xd391, CRL_REG_LEN_08BIT, 0x00}, + {0xd392, CRL_REG_LEN_08BIT, 0x00}, + {0xd393, CRL_REG_LEN_08BIT, 0x00}, + {0xd394, CRL_REG_LEN_08BIT, 0xe0}, + {0xd395, CRL_REG_LEN_08BIT, 0x6c}, + {0xd396, CRL_REG_LEN_08BIT, 0x28}, + {0xd397, CRL_REG_LEN_08BIT, 0x02}, + {0xd398, CRL_REG_LEN_08BIT, 0xe0}, + {0xd399, CRL_REG_LEN_08BIT, 0x84}, + {0xd39a, CRL_REG_LEN_08BIT, 0x28}, + {0xd39b, CRL_REG_LEN_08BIT, 0x02}, + {0xd39c, CRL_REG_LEN_08BIT, 0x07}, + {0xd39d, CRL_REG_LEN_08BIT, 0xff}, + {0xd39e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd39f, CRL_REG_LEN_08BIT, 0x22}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x08}, + {0xd3a4, CRL_REG_LEN_08BIT, 0x03}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd3a6, CRL_REG_LEN_08BIT, 0xff}, + {0xd3a7, CRL_REG_LEN_08BIT, 0xb2}, + {0xd3a8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x4e}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x18}, + {0xd3ad, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x01}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0xe7}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x06}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x55}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x87}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x02}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x19}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x00}, + {0xd3be, CRL_REG_LEN_08BIT, 0x80}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x06}, + {0xd3c0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x20}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c4, CRL_REG_LEN_08BIT, 0xa9}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x56}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x63}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x18}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x80}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x80}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3d1, CRL_REG_LEN_08BIT, 0xc5}, + {0xd3d2, CRL_REG_LEN_08BIT, 0x40}, + {0xd3d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x28}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x84}, + {0xd3da, CRL_REG_LEN_08BIT, 0x1d}, + {0xd3db, CRL_REG_LEN_08BIT, 0x00}, + {0xd3dc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3dd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd3de, CRL_REG_LEN_08BIT, 0x00}, + {0xd3df, CRL_REG_LEN_08BIT, 0x01}, + {0xd3e0, CRL_REG_LEN_08BIT, 0xe0}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x20}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x45}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x48}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x63}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ec, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x06}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x18}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x0f}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f3, CRL_REG_LEN_08BIT, 0xf1}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x0b}, + {0xd3fc, CRL_REG_LEN_08BIT, 0x15}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd3fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd400, CRL_REG_LEN_08BIT, 0x03}, + {0xd401, CRL_REG_LEN_08BIT, 0xff}, + {0xd402, CRL_REG_LEN_08BIT, 0xff}, + {0xd403, CRL_REG_LEN_08BIT, 0xc4}, + {0xd404, CRL_REG_LEN_08BIT, 0xd4}, + {0xd405, CRL_REG_LEN_08BIT, 0x01}, + {0xd406, CRL_REG_LEN_08BIT, 0x40}, + {0xd407, CRL_REG_LEN_08BIT, 0x18}, + {0xd408, CRL_REG_LEN_08BIT, 0x03}, + {0xd409, CRL_REG_LEN_08BIT, 0xff}, + {0xd40a, CRL_REG_LEN_08BIT, 0xff}, + {0xd40b, CRL_REG_LEN_08BIT, 0xa8}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd40d, CRL_REG_LEN_08BIT, 0x63}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0xff}, + {0xd410, CRL_REG_LEN_08BIT, 0x9c}, + {0xd411, CRL_REG_LEN_08BIT, 0x60}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x00}, + {0xd414, CRL_REG_LEN_08BIT, 0x03}, + {0xd415, CRL_REG_LEN_08BIT, 0xff}, + {0xd416, CRL_REG_LEN_08BIT, 0xff}, + {0xd417, CRL_REG_LEN_08BIT, 0xb6}, + {0xd418, CRL_REG_LEN_08BIT, 0xd4}, + {0xd419, CRL_REG_LEN_08BIT, 0x01}, + {0xd41a, CRL_REG_LEN_08BIT, 0x18}, + {0xd41b, CRL_REG_LEN_08BIT, 0x14}, + {0xd41c, CRL_REG_LEN_08BIT, 0x03}, + {0xd41d, CRL_REG_LEN_08BIT, 0xff}, + {0xd41e, CRL_REG_LEN_08BIT, 0xff}, + {0xd41f, CRL_REG_LEN_08BIT, 0xa9}, + {0xd420, CRL_REG_LEN_08BIT, 0x9d}, + {0xd421, CRL_REG_LEN_08BIT, 0x6b}, + {0xd422, CRL_REG_LEN_08BIT, 0x00}, + {0xd423, CRL_REG_LEN_08BIT, 0xff}, + {0xd424, CRL_REG_LEN_08BIT, 0x85}, + {0xd425, CRL_REG_LEN_08BIT, 0x21}, + {0xd426, CRL_REG_LEN_08BIT, 0x00}, + {0xd427, CRL_REG_LEN_08BIT, 0x00}, + {0xd428, CRL_REG_LEN_08BIT, 0x85}, + {0xd429, CRL_REG_LEN_08BIT, 0x41}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x04}, + {0xd42c, CRL_REG_LEN_08BIT, 0x85}, + {0xd42d, CRL_REG_LEN_08BIT, 0x81}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x08}, + {0xd430, CRL_REG_LEN_08BIT, 0x85}, + {0xd431, CRL_REG_LEN_08BIT, 0xc1}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x0c}, + {0xd434, CRL_REG_LEN_08BIT, 0x86}, + {0xd435, CRL_REG_LEN_08BIT, 0x01}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x10}, + {0xd438, CRL_REG_LEN_08BIT, 0x44}, + {0xd439, CRL_REG_LEN_08BIT, 0x00}, + {0xd43a, CRL_REG_LEN_08BIT, 0x48}, + {0xd43b, CRL_REG_LEN_08BIT, 0x00}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0x21}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x1c}, + {0xd440, CRL_REG_LEN_08BIT, 0x9c}, + {0xd441, CRL_REG_LEN_08BIT, 0x21}, + {0xd442, CRL_REG_LEN_08BIT, 0xff}, + {0xd443, CRL_REG_LEN_08BIT, 0xfc}, + {0xd444, CRL_REG_LEN_08BIT, 0xd4}, + {0xd445, CRL_REG_LEN_08BIT, 0x01}, + {0xd446, CRL_REG_LEN_08BIT, 0x48}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0x18}, + {0xd449, CRL_REG_LEN_08BIT, 0x60}, + {0xd44a, CRL_REG_LEN_08BIT, 0x00}, + {0xd44b, CRL_REG_LEN_08BIT, 0x01}, + {0xd44c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd44d, CRL_REG_LEN_08BIT, 0x63}, + {0xd44e, CRL_REG_LEN_08BIT, 0x07}, + {0xd44f, CRL_REG_LEN_08BIT, 0x80}, + {0xd450, CRL_REG_LEN_08BIT, 0x8c}, + {0xd451, CRL_REG_LEN_08BIT, 0x63}, + {0xd452, CRL_REG_LEN_08BIT, 0x00}, + {0xd453, CRL_REG_LEN_08BIT, 0x68}, + {0xd454, CRL_REG_LEN_08BIT, 0xbc}, + {0xd455, CRL_REG_LEN_08BIT, 0x03}, + {0xd456, CRL_REG_LEN_08BIT, 0x00}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x10}, + {0xd459, CRL_REG_LEN_08BIT, 0x00}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x0c}, + {0xd45c, CRL_REG_LEN_08BIT, 0x15}, + {0xd45d, CRL_REG_LEN_08BIT, 0x00}, + {0xd45e, CRL_REG_LEN_08BIT, 0x00}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x07}, + {0xd461, CRL_REG_LEN_08BIT, 0xff}, + {0xd462, CRL_REG_LEN_08BIT, 0xd9}, + {0xd463, CRL_REG_LEN_08BIT, 0x7c}, + {0xd464, CRL_REG_LEN_08BIT, 0x15}, + {0xd465, CRL_REG_LEN_08BIT, 0x00}, + {0xd466, CRL_REG_LEN_08BIT, 0x00}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0x18}, + {0xd469, CRL_REG_LEN_08BIT, 0x60}, + {0xd46a, CRL_REG_LEN_08BIT, 0x80}, + {0xd46b, CRL_REG_LEN_08BIT, 0x06}, + {0xd46c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x63}, + {0xd46e, CRL_REG_LEN_08BIT, 0xc4}, + {0xd46f, CRL_REG_LEN_08BIT, 0xb8}, + {0xd470, CRL_REG_LEN_08BIT, 0x8c}, + {0xd471, CRL_REG_LEN_08BIT, 0x63}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x00}, + {0xd474, CRL_REG_LEN_08BIT, 0xbc}, + {0xd475, CRL_REG_LEN_08BIT, 0x23}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x01}, + {0xd478, CRL_REG_LEN_08BIT, 0x10}, + {0xd479, CRL_REG_LEN_08BIT, 0x00}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x25}, + {0xd47c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd47d, CRL_REG_LEN_08BIT, 0x00}, + {0xd47e, CRL_REG_LEN_08BIT, 0x00}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x00}, + {0xd481, CRL_REG_LEN_08BIT, 0x00}, + {0xd482, CRL_REG_LEN_08BIT, 0x00}, + {0xd483, CRL_REG_LEN_08BIT, 0x0b}, + {0xd484, CRL_REG_LEN_08BIT, 0xb8}, + {0xd485, CRL_REG_LEN_08BIT, 0xe8}, + {0xd486, CRL_REG_LEN_08BIT, 0x00}, + {0xd487, CRL_REG_LEN_08BIT, 0x02}, + {0xd488, CRL_REG_LEN_08BIT, 0x07}, + {0xd489, CRL_REG_LEN_08BIT, 0xff}, + {0xd48a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd48b, CRL_REG_LEN_08BIT, 0x08}, + {0xd48c, CRL_REG_LEN_08BIT, 0x15}, + {0xd48d, CRL_REG_LEN_08BIT, 0x00}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0x00}, + {0xd490, CRL_REG_LEN_08BIT, 0x18}, + {0xd491, CRL_REG_LEN_08BIT, 0x60}, + {0xd492, CRL_REG_LEN_08BIT, 0x80}, + {0xd493, CRL_REG_LEN_08BIT, 0x06}, + {0xd494, CRL_REG_LEN_08BIT, 0xa8}, + {0xd495, CRL_REG_LEN_08BIT, 0x63}, + {0xd496, CRL_REG_LEN_08BIT, 0xc4}, + {0xd497, CRL_REG_LEN_08BIT, 0xb8}, + {0xd498, CRL_REG_LEN_08BIT, 0x8c}, + {0xd499, CRL_REG_LEN_08BIT, 0x63}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x00}, + {0xd49c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd49d, CRL_REG_LEN_08BIT, 0x23}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0x10}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x1b}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x9d}, + {0xd4a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4a8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4a9, CRL_REG_LEN_08BIT, 0xe8}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x02}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4ad, CRL_REG_LEN_08BIT, 0xc0}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b1, CRL_REG_LEN_08BIT, 0xa0}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x80}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd4b4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x67}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x30}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4b9, CRL_REG_LEN_08BIT, 0xa5}, + {0xd4ba, CRL_REG_LEN_08BIT, 0xce}, + {0xd4bb, CRL_REG_LEN_08BIT, 0xb0}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x19}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xd4be, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bf, CRL_REG_LEN_08BIT, 0x01}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa9}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4c3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x83}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x28}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4c9, CRL_REG_LEN_08BIT, 0xc6}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0x18}, + {0xd4cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4d5, CRL_REG_LEN_08BIT, 0xa3}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x58}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd4d9, CRL_REG_LEN_08BIT, 0xc6}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0xff}, + {0xd4dc, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x64}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x18}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x46}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x03}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x94}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x85}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x98}, + {0xd4ec, CRL_REG_LEN_08BIT, 0xe0}, + {0xd4ed, CRL_REG_LEN_08BIT, 0x64}, + {0xd4ee, CRL_REG_LEN_08BIT, 0x18}, + {0xd4ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x0f}, + {0xd4f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd4f2, CRL_REG_LEN_08BIT, 0xff}, + {0xd4f3, CRL_REG_LEN_08BIT, 0xf0}, + {0xd4f4, CRL_REG_LEN_08BIT, 0xdc}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x05}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x18}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x68}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4fc, CRL_REG_LEN_08BIT, 0xa5}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x03}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0xff}, + {0xd500, CRL_REG_LEN_08BIT, 0xbc}, + {0xd501, CRL_REG_LEN_08BIT, 0x48}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x01}, + {0xd504, CRL_REG_LEN_08BIT, 0x0f}, + {0xd505, CRL_REG_LEN_08BIT, 0xff}, + {0xd506, CRL_REG_LEN_08BIT, 0xff}, + {0xd507, CRL_REG_LEN_08BIT, 0xea}, + {0xd508, CRL_REG_LEN_08BIT, 0xb8}, + {0xd509, CRL_REG_LEN_08BIT, 0xe8}, + {0xd50a, CRL_REG_LEN_08BIT, 0x00}, + {0xd50b, CRL_REG_LEN_08BIT, 0x02}, + {0xd50c, CRL_REG_LEN_08BIT, 0x18}, + {0xd50d, CRL_REG_LEN_08BIT, 0x60}, + {0xd50e, CRL_REG_LEN_08BIT, 0x00}, + {0xd50f, CRL_REG_LEN_08BIT, 0x01}, + {0xd510, CRL_REG_LEN_08BIT, 0xa8}, + {0xd511, CRL_REG_LEN_08BIT, 0x63}, + {0xd512, CRL_REG_LEN_08BIT, 0x06}, + {0xd513, CRL_REG_LEN_08BIT, 0x14}, + {0xd514, CRL_REG_LEN_08BIT, 0x07}, + {0xd515, CRL_REG_LEN_08BIT, 0xff}, + {0xd516, CRL_REG_LEN_08BIT, 0xe3}, + {0xd517, CRL_REG_LEN_08BIT, 0xe9}, + {0xd518, CRL_REG_LEN_08BIT, 0x9c}, + {0xd519, CRL_REG_LEN_08BIT, 0x83}, + {0xd51a, CRL_REG_LEN_08BIT, 0x00}, + {0xd51b, CRL_REG_LEN_08BIT, 0x10}, + {0xd51c, CRL_REG_LEN_08BIT, 0x85}, + {0xd51d, CRL_REG_LEN_08BIT, 0x21}, + {0xd51e, CRL_REG_LEN_08BIT, 0x00}, + {0xd51f, CRL_REG_LEN_08BIT, 0x00}, + {0xd520, CRL_REG_LEN_08BIT, 0x44}, + {0xd521, CRL_REG_LEN_08BIT, 0x00}, + {0xd522, CRL_REG_LEN_08BIT, 0x48}, + {0xd523, CRL_REG_LEN_08BIT, 0x00}, + {0xd524, CRL_REG_LEN_08BIT, 0x9c}, + {0xd525, CRL_REG_LEN_08BIT, 0x21}, + {0xd526, CRL_REG_LEN_08BIT, 0x00}, + {0xd527, CRL_REG_LEN_08BIT, 0x04}, + {0xd528, CRL_REG_LEN_08BIT, 0x18}, + {0xd529, CRL_REG_LEN_08BIT, 0x60}, + {0xd52a, CRL_REG_LEN_08BIT, 0x00}, + {0xd52b, CRL_REG_LEN_08BIT, 0x01}, + {0xd52c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd52d, CRL_REG_LEN_08BIT, 0x80}, + {0xd52e, CRL_REG_LEN_08BIT, 0xff}, + {0xd52f, CRL_REG_LEN_08BIT, 0xff}, + {0xd530, CRL_REG_LEN_08BIT, 0xa8}, + {0xd531, CRL_REG_LEN_08BIT, 0x63}, + {0xd532, CRL_REG_LEN_08BIT, 0x09}, + {0xd533, CRL_REG_LEN_08BIT, 0xef}, + {0xd534, CRL_REG_LEN_08BIT, 0xd8}, + {0xd535, CRL_REG_LEN_08BIT, 0x03}, + {0xd536, CRL_REG_LEN_08BIT, 0x20}, + {0xd537, CRL_REG_LEN_08BIT, 0x00}, + {0xd538, CRL_REG_LEN_08BIT, 0x18}, + {0xd539, CRL_REG_LEN_08BIT, 0x60}, + {0xd53a, CRL_REG_LEN_08BIT, 0x80}, + {0xd53b, CRL_REG_LEN_08BIT, 0x06}, + {0xd53c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd53d, CRL_REG_LEN_08BIT, 0x63}, + {0xd53e, CRL_REG_LEN_08BIT, 0xc9}, + {0xd53f, CRL_REG_LEN_08BIT, 0xef}, + {0xd540, CRL_REG_LEN_08BIT, 0xd8}, + {0xd541, CRL_REG_LEN_08BIT, 0x03}, + {0xd542, CRL_REG_LEN_08BIT, 0x20}, + {0xd543, CRL_REG_LEN_08BIT, 0x00}, + {0xd544, CRL_REG_LEN_08BIT, 0x44}, + {0xd545, CRL_REG_LEN_08BIT, 0x00}, + {0xd546, CRL_REG_LEN_08BIT, 0x48}, + {0xd547, CRL_REG_LEN_08BIT, 0x00}, + {0xd548, CRL_REG_LEN_08BIT, 0x15}, + {0xd549, CRL_REG_LEN_08BIT, 0x00}, + {0xd54a, CRL_REG_LEN_08BIT, 0x00}, + {0xd54b, CRL_REG_LEN_08BIT, 0x00}, + {0xd54c, CRL_REG_LEN_08BIT, 0x18}, + {0xd54d, CRL_REG_LEN_08BIT, 0x80}, + {0xd54e, CRL_REG_LEN_08BIT, 0x00}, + {0xd54f, CRL_REG_LEN_08BIT, 0x01}, + {0xd550, CRL_REG_LEN_08BIT, 0xa8}, + {0xd551, CRL_REG_LEN_08BIT, 0x84}, + {0xd552, CRL_REG_LEN_08BIT, 0x0a}, + {0xd553, CRL_REG_LEN_08BIT, 0x12}, + {0xd554, CRL_REG_LEN_08BIT, 0x8c}, + {0xd555, CRL_REG_LEN_08BIT, 0x64}, + {0xd556, CRL_REG_LEN_08BIT, 0x00}, + {0xd557, CRL_REG_LEN_08BIT, 0x00}, + {0xd558, CRL_REG_LEN_08BIT, 0xbc}, + {0xd559, CRL_REG_LEN_08BIT, 0x03}, + {0xd55a, CRL_REG_LEN_08BIT, 0x00}, + {0xd55b, CRL_REG_LEN_08BIT, 0x00}, + {0xd55c, CRL_REG_LEN_08BIT, 0x13}, + {0xd55d, CRL_REG_LEN_08BIT, 0xff}, + {0xd55e, CRL_REG_LEN_08BIT, 0xff}, + {0xd55f, CRL_REG_LEN_08BIT, 0xfe}, + {0xd560, CRL_REG_LEN_08BIT, 0x15}, + {0xd561, CRL_REG_LEN_08BIT, 0x00}, + {0xd562, CRL_REG_LEN_08BIT, 0x00}, + {0xd563, CRL_REG_LEN_08BIT, 0x00}, + {0xd564, CRL_REG_LEN_08BIT, 0x44}, + {0xd565, CRL_REG_LEN_08BIT, 0x00}, + {0xd566, CRL_REG_LEN_08BIT, 0x48}, + {0xd567, CRL_REG_LEN_08BIT, 0x00}, + {0xd568, CRL_REG_LEN_08BIT, 0x15}, + {0xd569, CRL_REG_LEN_08BIT, 0x00}, + {0xd56a, CRL_REG_LEN_08BIT, 0x00}, + {0xd56b, CRL_REG_LEN_08BIT, 0x00}, + {0xd56c, CRL_REG_LEN_08BIT, 0x00}, + {0xd56d, CRL_REG_LEN_08BIT, 0x00}, + {0xd56e, CRL_REG_LEN_08BIT, 0x00}, + {0xd56f, CRL_REG_LEN_08BIT, 0x00}, + {0xd570, CRL_REG_LEN_08BIT, 0x00}, + {0xd571, CRL_REG_LEN_08BIT, 0x00}, + {0xd572, CRL_REG_LEN_08BIT, 0x00}, + {0xd573, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x0b}, + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x4602, CRL_REG_LEN_08BIT, 0x02}, + {0x4603, CRL_REG_LEN_08BIT, 0xd8}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x00}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x2e}, + {0xc489, CRL_REG_LEN_08BIT, 0x40}, + {0xc48a, CRL_REG_LEN_08BIT, 0x2e}, + {0xc48b, CRL_REG_LEN_08BIT, 0x40}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cd, CRL_REG_LEN_08BIT, 0x51}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cf, CRL_REG_LEN_08BIT, 0x51}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0xe5}, + {0xc513, CRL_REG_LEN_08BIT, 0x14}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0x5608, CRL_REG_LEN_08BIT, 0x0d}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, +}; + +static struct crl_register_write_rep ov10635_640_480_YUV_HDR[] = { + {0x301b, CRL_REG_LEN_08BIT, 0xff}, + {0x301c, CRL_REG_LEN_08BIT, 0xff}, + {0x301a, CRL_REG_LEN_08BIT, 0xff}, + {0x3011, CRL_REG_LEN_08BIT, 0x42}, + {0x6900, CRL_REG_LEN_08BIT, 0x0c}, + {0x6901, CRL_REG_LEN_08BIT, 0x11}, + {0x3503, CRL_REG_LEN_08BIT, 0x10}, + {0x3025, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x14}, + {0x3004, CRL_REG_LEN_08BIT, 0x23}, + {0x3005, CRL_REG_LEN_08BIT, 0x20}, + {0x3006, CRL_REG_LEN_08BIT, 0x91}, + {0x3600, CRL_REG_LEN_08BIT, 0x74}, + {0x3601, CRL_REG_LEN_08BIT, 0x2b}, + {0x3612, CRL_REG_LEN_08BIT, 0x00}, + {0x3611, CRL_REG_LEN_08BIT, 0x67}, + {0x3633, CRL_REG_LEN_08BIT, 0xca}, + {0x3602, CRL_REG_LEN_08BIT, 0x2f}, + {0x3603, CRL_REG_LEN_08BIT, 0x00}, + {0x3630, CRL_REG_LEN_08BIT, 0x28}, + {0x3631, CRL_REG_LEN_08BIT, 0x16}, + {0x3714, CRL_REG_LEN_08BIT, 0x10}, + {0x371d, CRL_REG_LEN_08BIT, 0x01}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x3020, CRL_REG_LEN_08BIT, 0x0b}, + {0x3702, CRL_REG_LEN_08BIT, 0x0a}, + {0x3703, CRL_REG_LEN_08BIT, 0x17}, + {0x3704, CRL_REG_LEN_08BIT, 0x0f}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x3709, CRL_REG_LEN_08BIT, 0xa8}, + {0x370c, CRL_REG_LEN_08BIT, 0xc7}, + {0x370d, CRL_REG_LEN_08BIT, 0x80}, + {0x3712, CRL_REG_LEN_08BIT, 0x00}, + {0x3713, CRL_REG_LEN_08BIT, 0x20}, + {0x3715, CRL_REG_LEN_08BIT, 0x04}, + {0x381d, CRL_REG_LEN_08BIT, 0x40}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0x50}, + {0x3824, CRL_REG_LEN_08BIT, 0x50}, + {0x3815, CRL_REG_LEN_08BIT, 0x8c}, + {0x3804, CRL_REG_LEN_08BIT, 0x05}, + {0x3805, CRL_REG_LEN_08BIT, 0x1f}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0x89}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0xa4}, + {0x3808, CRL_REG_LEN_08BIT, 0x02}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x01}, + {0x380b, CRL_REG_LEN_08BIT, 0xe0}, + {0x380c, CRL_REG_LEN_08BIT, 0x03}, + {0x380d, CRL_REG_LEN_08BIT, 0xc0}, + {0x6e42, CRL_REG_LEN_08BIT, 0x02}, + {0x6e43, CRL_REG_LEN_08BIT, 0x08}, + {0x380e, CRL_REG_LEN_08BIT, 0x02}, + {0x380f, CRL_REG_LEN_08BIT, 0x08}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x3811, CRL_REG_LEN_08BIT, 0x08}, + {0x381f, CRL_REG_LEN_08BIT, 0x0c}, + {0x3828, CRL_REG_LEN_08BIT, 0x03}, + {0x3829, CRL_REG_LEN_08BIT, 0x10}, + {0x382a, CRL_REG_LEN_08BIT, 0x10}, + {0x382b, CRL_REG_LEN_08BIT, 0x10}, + {0x3621, CRL_REG_LEN_08BIT, 0x74}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x56d5, CRL_REG_LEN_08BIT, 0x00}, + {0x56d6, CRL_REG_LEN_08BIT, 0x80}, + {0x56d7, CRL_REG_LEN_08BIT, 0x00}, + {0x56d8, CRL_REG_LEN_08BIT, 0x00}, + {0x56d9, CRL_REG_LEN_08BIT, 0x00}, + {0x56da, CRL_REG_LEN_08BIT, 0x80}, + {0x56db, CRL_REG_LEN_08BIT, 0x00}, + {0x56dc, CRL_REG_LEN_08BIT, 0x00}, + {0x56e8, CRL_REG_LEN_08BIT, 0x00}, + {0x56e9, CRL_REG_LEN_08BIT, 0x7f}, + {0x56ea, CRL_REG_LEN_08BIT, 0x00}, + {0x56eb, CRL_REG_LEN_08BIT, 0x7f}, + {0x5100, CRL_REG_LEN_08BIT, 0x00}, + {0x5101, CRL_REG_LEN_08BIT, 0x80}, + {0x5102, CRL_REG_LEN_08BIT, 0x00}, + {0x5103, CRL_REG_LEN_08BIT, 0x80}, + {0x5104, CRL_REG_LEN_08BIT, 0x00}, + {0x5105, CRL_REG_LEN_08BIT, 0x80}, + {0x5106, CRL_REG_LEN_08BIT, 0x00}, + {0x5107, CRL_REG_LEN_08BIT, 0x80}, + {0x5108, CRL_REG_LEN_08BIT, 0x00}, + {0x5109, CRL_REG_LEN_08BIT, 0x00}, + {0x510a, CRL_REG_LEN_08BIT, 0x00}, + {0x510b, CRL_REG_LEN_08BIT, 0x00}, + {0x510c, CRL_REG_LEN_08BIT, 0x00}, + {0x510d, CRL_REG_LEN_08BIT, 0x00}, + {0x510e, CRL_REG_LEN_08BIT, 0x00}, + {0x510f, CRL_REG_LEN_08BIT, 0x00}, + {0x5110, CRL_REG_LEN_08BIT, 0x00}, + {0x5111, CRL_REG_LEN_08BIT, 0x80}, + {0x5112, CRL_REG_LEN_08BIT, 0x00}, + {0x5113, CRL_REG_LEN_08BIT, 0x80}, + {0x5114, CRL_REG_LEN_08BIT, 0x00}, + {0x5115, CRL_REG_LEN_08BIT, 0x80}, + {0x5116, CRL_REG_LEN_08BIT, 0x00}, + {0x5117, CRL_REG_LEN_08BIT, 0x80}, + {0x5118, CRL_REG_LEN_08BIT, 0x00}, + {0x5119, CRL_REG_LEN_08BIT, 0x00}, + {0x511a, CRL_REG_LEN_08BIT, 0x00}, + {0x511b, CRL_REG_LEN_08BIT, 0x00}, + {0x511c, CRL_REG_LEN_08BIT, 0x00}, + {0x511d, CRL_REG_LEN_08BIT, 0x00}, + {0x511e, CRL_REG_LEN_08BIT, 0x00}, + {0x511f, CRL_REG_LEN_08BIT, 0x00}, + {0x56d0, CRL_REG_LEN_08BIT, 0x00}, + {0x5006, CRL_REG_LEN_08BIT, 0x24}, + {0x5608, CRL_REG_LEN_08BIT, 0x19}, + {0x52d7, CRL_REG_LEN_08BIT, 0x06}, + {0x528d, CRL_REG_LEN_08BIT, 0x08}, + {0x5293, CRL_REG_LEN_08BIT, 0x12}, + {0x52d3, CRL_REG_LEN_08BIT, 0x12}, + {0x5288, CRL_REG_LEN_08BIT, 0x06}, + {0x5289, CRL_REG_LEN_08BIT, 0x20}, + {0x52c8, CRL_REG_LEN_08BIT, 0x06}, + {0x52c9, CRL_REG_LEN_08BIT, 0x20}, + {0x52cd, CRL_REG_LEN_08BIT, 0x04}, + {0x5381, CRL_REG_LEN_08BIT, 0x00}, + {0x5382, CRL_REG_LEN_08BIT, 0xff}, + {0x5589, CRL_REG_LEN_08BIT, 0x76}, + {0x558a, CRL_REG_LEN_08BIT, 0x47}, + {0x558b, CRL_REG_LEN_08BIT, 0xef}, + {0x558c, CRL_REG_LEN_08BIT, 0xc9}, + {0x558d, CRL_REG_LEN_08BIT, 0x49}, + {0x558e, CRL_REG_LEN_08BIT, 0x30}, + {0x558f, CRL_REG_LEN_08BIT, 0x67}, + {0x5590, CRL_REG_LEN_08BIT, 0x3f}, + {0x5591, CRL_REG_LEN_08BIT, 0xf0}, + {0x5592, CRL_REG_LEN_08BIT, 0x10}, + {0x55a2, CRL_REG_LEN_08BIT, 0x6d}, + {0x55a3, CRL_REG_LEN_08BIT, 0x55}, + {0x55a4, CRL_REG_LEN_08BIT, 0xc3}, + {0x55a5, CRL_REG_LEN_08BIT, 0xb5}, + {0x55a6, CRL_REG_LEN_08BIT, 0x43}, + {0x55a7, CRL_REG_LEN_08BIT, 0x38}, + {0x55a8, CRL_REG_LEN_08BIT, 0x5f}, + {0x55a9, CRL_REG_LEN_08BIT, 0x4b}, + {0x55aa, CRL_REG_LEN_08BIT, 0xf0}, + {0x55ab, CRL_REG_LEN_08BIT, 0x10}, + {0x5581, CRL_REG_LEN_08BIT, 0x52}, + {0x5300, CRL_REG_LEN_08BIT, 0x01}, + {0x5301, CRL_REG_LEN_08BIT, 0x00}, + {0x5302, CRL_REG_LEN_08BIT, 0x00}, + {0x5303, CRL_REG_LEN_08BIT, 0x0e}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x0e}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x36}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xd9}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0x0f}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0x2c}, + {0x530e, CRL_REG_LEN_08BIT, 0x00}, + {0x530f, CRL_REG_LEN_08BIT, 0x59}, + {0x5310, CRL_REG_LEN_08BIT, 0x00}, + {0x5311, CRL_REG_LEN_08BIT, 0x7b}, + {0x5312, CRL_REG_LEN_08BIT, 0x00}, + {0x5313, CRL_REG_LEN_08BIT, 0x22}, + {0x5314, CRL_REG_LEN_08BIT, 0x00}, + {0x5315, CRL_REG_LEN_08BIT, 0xd5}, + {0x5316, CRL_REG_LEN_08BIT, 0x00}, + {0x5317, CRL_REG_LEN_08BIT, 0x13}, + {0x5318, CRL_REG_LEN_08BIT, 0x00}, + {0x5319, CRL_REG_LEN_08BIT, 0x18}, + {0x531a, CRL_REG_LEN_08BIT, 0x00}, + {0x531b, CRL_REG_LEN_08BIT, 0x26}, + {0x531c, CRL_REG_LEN_08BIT, 0x00}, + {0x531d, CRL_REG_LEN_08BIT, 0xdc}, + {0x531e, CRL_REG_LEN_08BIT, 0x00}, + {0x531f, CRL_REG_LEN_08BIT, 0x02}, + {0x5320, CRL_REG_LEN_08BIT, 0x00}, + {0x5321, CRL_REG_LEN_08BIT, 0x24}, + {0x5322, CRL_REG_LEN_08BIT, 0x00}, + {0x5323, CRL_REG_LEN_08BIT, 0x56}, + {0x5324, CRL_REG_LEN_08BIT, 0x00}, + {0x5325, CRL_REG_LEN_08BIT, 0x85}, + {0x5326, CRL_REG_LEN_08BIT, 0x00}, + {0x5327, CRL_REG_LEN_08BIT, 0x20}, + {0x5609, CRL_REG_LEN_08BIT, 0x01}, + {0x560a, CRL_REG_LEN_08BIT, 0x40}, + {0x560b, CRL_REG_LEN_08BIT, 0x01}, + {0x560c, CRL_REG_LEN_08BIT, 0x40}, + {0x560d, CRL_REG_LEN_08BIT, 0x00}, + {0x560e, CRL_REG_LEN_08BIT, 0xfa}, + {0x560f, CRL_REG_LEN_08BIT, 0x00}, + {0x5610, CRL_REG_LEN_08BIT, 0xfa}, + {0x5611, CRL_REG_LEN_08BIT, 0x02}, + {0x5612, CRL_REG_LEN_08BIT, 0x80}, + {0x5613, CRL_REG_LEN_08BIT, 0x02}, + {0x5614, CRL_REG_LEN_08BIT, 0x80}, + {0x5615, CRL_REG_LEN_08BIT, 0x01}, + {0x5616, CRL_REG_LEN_08BIT, 0x2c}, + {0x5617, CRL_REG_LEN_08BIT, 0x01}, + {0x5618, CRL_REG_LEN_08BIT, 0x2c}, + {0x563b, CRL_REG_LEN_08BIT, 0x01}, + {0x563c, CRL_REG_LEN_08BIT, 0x01}, + {0x563d, CRL_REG_LEN_08BIT, 0x01}, + {0x563e, CRL_REG_LEN_08BIT, 0x01}, + {0x563f, CRL_REG_LEN_08BIT, 0x03}, + {0x5640, CRL_REG_LEN_08BIT, 0x03}, + {0x5641, CRL_REG_LEN_08BIT, 0x03}, + {0x5642, CRL_REG_LEN_08BIT, 0x05}, + {0x5643, CRL_REG_LEN_08BIT, 0x09}, + {0x5644, CRL_REG_LEN_08BIT, 0x05}, + {0x5645, CRL_REG_LEN_08BIT, 0x05}, + {0x5646, CRL_REG_LEN_08BIT, 0x05}, + {0x5647, CRL_REG_LEN_08BIT, 0x05}, + {0x5651, CRL_REG_LEN_08BIT, 0x00}, + {0x5652, CRL_REG_LEN_08BIT, 0x80}, + {0x521a, CRL_REG_LEN_08BIT, 0x01}, + {0x521b, CRL_REG_LEN_08BIT, 0x03}, + {0x521c, CRL_REG_LEN_08BIT, 0x06}, + {0x521d, CRL_REG_LEN_08BIT, 0x0a}, + {0x521e, CRL_REG_LEN_08BIT, 0x0e}, + {0x521f, CRL_REG_LEN_08BIT, 0x12}, + {0x5220, CRL_REG_LEN_08BIT, 0x16}, + {0x5223, CRL_REG_LEN_08BIT, 0x02}, + {0x5225, CRL_REG_LEN_08BIT, 0x04}, + {0x5227, CRL_REG_LEN_08BIT, 0x08}, + {0x5229, CRL_REG_LEN_08BIT, 0x0c}, + {0x522b, CRL_REG_LEN_08BIT, 0x12}, + {0x522d, CRL_REG_LEN_08BIT, 0x18}, + {0x522f, CRL_REG_LEN_08BIT, 0x1e}, + {0x5241, CRL_REG_LEN_08BIT, 0x04}, + {0x5242, CRL_REG_LEN_08BIT, 0x01}, + {0x5243, CRL_REG_LEN_08BIT, 0x03}, + {0x5244, CRL_REG_LEN_08BIT, 0x06}, + {0x5245, CRL_REG_LEN_08BIT, 0x0a}, + {0x5246, CRL_REG_LEN_08BIT, 0x0e}, + {0x5247, CRL_REG_LEN_08BIT, 0x12}, + {0x5248, CRL_REG_LEN_08BIT, 0x16}, + {0x524a, CRL_REG_LEN_08BIT, 0x03}, + {0x524c, CRL_REG_LEN_08BIT, 0x04}, + {0x524e, CRL_REG_LEN_08BIT, 0x08}, + {0x5250, CRL_REG_LEN_08BIT, 0x0c}, + {0x5252, CRL_REG_LEN_08BIT, 0x12}, + {0x5254, CRL_REG_LEN_08BIT, 0x18}, + {0x5256, CRL_REG_LEN_08BIT, 0x1e}, + {0x4606, CRL_REG_LEN_08BIT, 0x07}, + {0x4607, CRL_REG_LEN_08BIT, 0x71}, + {0x460a, CRL_REG_LEN_08BIT, 0x02}, + {0x460b, CRL_REG_LEN_08BIT, 0x70}, + {0x460c, CRL_REG_LEN_08BIT, 0x00}, + {0x4620, CRL_REG_LEN_08BIT, 0x0e}, + {0x4700, CRL_REG_LEN_08BIT, 0x04}, + {0x4701, CRL_REG_LEN_08BIT, 0x00}, + {0x4702, CRL_REG_LEN_08BIT, 0x01}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4005, CRL_REG_LEN_08BIT, 0x18}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4052, CRL_REG_LEN_08BIT, 0x02}, + {0x4057, CRL_REG_LEN_08BIT, 0x9c}, + {0x405a, CRL_REG_LEN_08BIT, 0x00}, + /*FSIN enable*/ + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3833, CRL_REG_LEN_08BIT, 0x02}, + {0x3834, CRL_REG_LEN_08BIT, 0x02}, + {0x3835, CRL_REG_LEN_08BIT, 0x08}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + /*FSIN end*/ + {0x4202, CRL_REG_LEN_08BIT, 0x02}, + {0x3023, CRL_REG_LEN_08BIT, 0x10}, + {0x3003, CRL_REG_LEN_08BIT, 0x20}, + {0x3004, CRL_REG_LEN_08BIT, 0x21}, + {0x3005, CRL_REG_LEN_08BIT, 0x14}, + {0x3006, CRL_REG_LEN_08BIT, 0x11}, + {0x3024, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x0100, CRL_REG_LEN_08BIT, 0x01}, + {0x6f10, CRL_REG_LEN_08BIT, 0x07}, + {0x6f11, CRL_REG_LEN_08BIT, 0x82}, + {0x6f12, CRL_REG_LEN_08BIT, 0x04}, + {0x6f13, CRL_REG_LEN_08BIT, 0x00}, + {0x6f14, CRL_REG_LEN_08BIT, 0x1f}, + {0x6f15, CRL_REG_LEN_08BIT, 0xdd}, + {0x6f16, CRL_REG_LEN_08BIT, 0x04}, + {0x6f17, CRL_REG_LEN_08BIT, 0x04}, + {0x6f18, CRL_REG_LEN_08BIT, 0x36}, + {0x6f19, CRL_REG_LEN_08BIT, 0x66}, + {0x6f1a, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1b, CRL_REG_LEN_08BIT, 0x08}, + {0x6f1c, CRL_REG_LEN_08BIT, 0x0c}, + {0x6f1d, CRL_REG_LEN_08BIT, 0xe7}, + {0x6f1e, CRL_REG_LEN_08BIT, 0x04}, + {0x6f1f, CRL_REG_LEN_08BIT, 0x0c}, + {0xd000, CRL_REG_LEN_08BIT, 0x19}, + {0xd001, CRL_REG_LEN_08BIT, 0xa0}, + {0xd002, CRL_REG_LEN_08BIT, 0x00}, + {0xd003, CRL_REG_LEN_08BIT, 0x01}, + {0xd004, CRL_REG_LEN_08BIT, 0xa9}, + {0xd005, CRL_REG_LEN_08BIT, 0xad}, + {0xd006, CRL_REG_LEN_08BIT, 0x10}, + {0xd007, CRL_REG_LEN_08BIT, 0x40}, + {0xd008, CRL_REG_LEN_08BIT, 0x44}, + {0xd009, CRL_REG_LEN_08BIT, 0x00}, + {0xd00a, CRL_REG_LEN_08BIT, 0x68}, + {0xd00b, CRL_REG_LEN_08BIT, 0x00}, + {0xd00c, CRL_REG_LEN_08BIT, 0x15}, + {0xd00d, CRL_REG_LEN_08BIT, 0x00}, + {0xd00e, CRL_REG_LEN_08BIT, 0x00}, + {0xd00f, CRL_REG_LEN_08BIT, 0x00}, + {0xd010, CRL_REG_LEN_08BIT, 0x19}, + {0xd011, CRL_REG_LEN_08BIT, 0xa0}, + {0xd012, CRL_REG_LEN_08BIT, 0x00}, + {0xd013, CRL_REG_LEN_08BIT, 0x01}, + {0xd014, CRL_REG_LEN_08BIT, 0xa9}, + {0xd015, CRL_REG_LEN_08BIT, 0xad}, + {0xd016, CRL_REG_LEN_08BIT, 0x13}, + {0xd017, CRL_REG_LEN_08BIT, 0xd0}, + {0xd018, CRL_REG_LEN_08BIT, 0x44}, + {0xd019, CRL_REG_LEN_08BIT, 0x00}, + {0xd01a, CRL_REG_LEN_08BIT, 0x68}, + {0xd01b, CRL_REG_LEN_08BIT, 0x00}, + {0xd01c, CRL_REG_LEN_08BIT, 0x15}, + {0xd01d, CRL_REG_LEN_08BIT, 0x00}, + {0xd01e, CRL_REG_LEN_08BIT, 0x00}, + {0xd01f, CRL_REG_LEN_08BIT, 0x00}, + {0xd020, CRL_REG_LEN_08BIT, 0x19}, + {0xd021, CRL_REG_LEN_08BIT, 0xa0}, + {0xd022, CRL_REG_LEN_08BIT, 0x00}, + {0xd023, CRL_REG_LEN_08BIT, 0x01}, + {0xd024, CRL_REG_LEN_08BIT, 0xa9}, + {0xd025, CRL_REG_LEN_08BIT, 0xad}, + {0xd026, CRL_REG_LEN_08BIT, 0x14}, + {0xd027, CRL_REG_LEN_08BIT, 0xb8}, + {0xd028, CRL_REG_LEN_08BIT, 0x44}, + {0xd029, CRL_REG_LEN_08BIT, 0x00}, + {0xd02a, CRL_REG_LEN_08BIT, 0x68}, + {0xd02b, CRL_REG_LEN_08BIT, 0x00}, + {0xd02c, CRL_REG_LEN_08BIT, 0x15}, + {0xd02d, CRL_REG_LEN_08BIT, 0x00}, + {0xd02e, CRL_REG_LEN_08BIT, 0x00}, + {0xd02f, CRL_REG_LEN_08BIT, 0x00}, + {0xd030, CRL_REG_LEN_08BIT, 0x19}, + {0xd031, CRL_REG_LEN_08BIT, 0xa0}, + {0xd032, CRL_REG_LEN_08BIT, 0x00}, + {0xd033, CRL_REG_LEN_08BIT, 0x01}, + {0xd034, CRL_REG_LEN_08BIT, 0xa9}, + {0xd035, CRL_REG_LEN_08BIT, 0xad}, + {0xd036, CRL_REG_LEN_08BIT, 0x14}, + {0xd037, CRL_REG_LEN_08BIT, 0xdc}, + {0xd038, CRL_REG_LEN_08BIT, 0x44}, + {0xd039, CRL_REG_LEN_08BIT, 0x00}, + {0xd03a, CRL_REG_LEN_08BIT, 0x68}, + {0xd03b, CRL_REG_LEN_08BIT, 0x00}, + {0xd03c, CRL_REG_LEN_08BIT, 0x15}, + {0xd03d, CRL_REG_LEN_08BIT, 0x00}, + {0xd03e, CRL_REG_LEN_08BIT, 0x00}, + {0xd03f, CRL_REG_LEN_08BIT, 0x00}, + {0xd040, CRL_REG_LEN_08BIT, 0x9c}, + {0xd041, CRL_REG_LEN_08BIT, 0x21}, + {0xd042, CRL_REG_LEN_08BIT, 0xff}, + {0xd043, CRL_REG_LEN_08BIT, 0xe4}, + {0xd044, CRL_REG_LEN_08BIT, 0xd4}, + {0xd045, CRL_REG_LEN_08BIT, 0x01}, + {0xd046, CRL_REG_LEN_08BIT, 0x48}, + {0xd047, CRL_REG_LEN_08BIT, 0x00}, + {0xd048, CRL_REG_LEN_08BIT, 0xd4}, + {0xd049, CRL_REG_LEN_08BIT, 0x01}, + {0xd04a, CRL_REG_LEN_08BIT, 0x50}, + {0xd04b, CRL_REG_LEN_08BIT, 0x04}, + {0xd04c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd04d, CRL_REG_LEN_08BIT, 0x01}, + {0xd04e, CRL_REG_LEN_08BIT, 0x60}, + {0xd04f, CRL_REG_LEN_08BIT, 0x08}, + {0xd050, CRL_REG_LEN_08BIT, 0xd4}, + {0xd051, CRL_REG_LEN_08BIT, 0x01}, + {0xd052, CRL_REG_LEN_08BIT, 0x70}, + {0xd053, CRL_REG_LEN_08BIT, 0x0c}, + {0xd054, CRL_REG_LEN_08BIT, 0xd4}, + {0xd055, CRL_REG_LEN_08BIT, 0x01}, + {0xd056, CRL_REG_LEN_08BIT, 0x80}, + {0xd057, CRL_REG_LEN_08BIT, 0x10}, + {0xd058, CRL_REG_LEN_08BIT, 0x19}, + {0xd059, CRL_REG_LEN_08BIT, 0xc0}, + {0xd05a, CRL_REG_LEN_08BIT, 0x00}, + {0xd05b, CRL_REG_LEN_08BIT, 0x01}, + {0xd05c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd05d, CRL_REG_LEN_08BIT, 0xce}, + {0xd05e, CRL_REG_LEN_08BIT, 0x02}, + {0xd05f, CRL_REG_LEN_08BIT, 0xa4}, + {0xd060, CRL_REG_LEN_08BIT, 0x9c}, + {0xd061, CRL_REG_LEN_08BIT, 0xa0}, + {0xd062, CRL_REG_LEN_08BIT, 0x00}, + {0xd063, CRL_REG_LEN_08BIT, 0x00}, + {0xd064, CRL_REG_LEN_08BIT, 0x84}, + {0xd065, CRL_REG_LEN_08BIT, 0x6e}, + {0xd066, CRL_REG_LEN_08BIT, 0x00}, + {0xd067, CRL_REG_LEN_08BIT, 0x00}, + {0xd068, CRL_REG_LEN_08BIT, 0xd8}, + {0xd069, CRL_REG_LEN_08BIT, 0x03}, + {0xd06a, CRL_REG_LEN_08BIT, 0x28}, + {0xd06b, CRL_REG_LEN_08BIT, 0x76}, + {0xd06c, CRL_REG_LEN_08BIT, 0x1a}, + {0xd06d, CRL_REG_LEN_08BIT, 0x00}, + {0xd06e, CRL_REG_LEN_08BIT, 0x00}, + {0xd06f, CRL_REG_LEN_08BIT, 0x01}, + {0xd070, CRL_REG_LEN_08BIT, 0xaa}, + {0xd071, CRL_REG_LEN_08BIT, 0x10}, + {0xd072, CRL_REG_LEN_08BIT, 0x03}, + {0xd073, CRL_REG_LEN_08BIT, 0xf0}, + {0xd074, CRL_REG_LEN_08BIT, 0x18}, + {0xd075, CRL_REG_LEN_08BIT, 0x60}, + {0xd076, CRL_REG_LEN_08BIT, 0x00}, + {0xd077, CRL_REG_LEN_08BIT, 0x01}, + {0xd078, CRL_REG_LEN_08BIT, 0xa8}, + {0xd079, CRL_REG_LEN_08BIT, 0x63}, + {0xd07a, CRL_REG_LEN_08BIT, 0x07}, + {0xd07b, CRL_REG_LEN_08BIT, 0x80}, + {0xd07c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd07d, CRL_REG_LEN_08BIT, 0xa0}, + {0xd07e, CRL_REG_LEN_08BIT, 0x00}, + {0xd07f, CRL_REG_LEN_08BIT, 0x04}, + {0xd080, CRL_REG_LEN_08BIT, 0x18}, + {0xd081, CRL_REG_LEN_08BIT, 0xc0}, + {0xd082, CRL_REG_LEN_08BIT, 0x00}, + {0xd083, CRL_REG_LEN_08BIT, 0x00}, + {0xd084, CRL_REG_LEN_08BIT, 0xa8}, + {0xd085, CRL_REG_LEN_08BIT, 0xc6}, + {0xd086, CRL_REG_LEN_08BIT, 0x00}, + {0xd087, CRL_REG_LEN_08BIT, 0x00}, + {0xd088, CRL_REG_LEN_08BIT, 0x8c}, + {0xd089, CRL_REG_LEN_08BIT, 0x63}, + {0xd08a, CRL_REG_LEN_08BIT, 0x00}, + {0xd08b, CRL_REG_LEN_08BIT, 0x00}, + {0xd08c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd08d, CRL_REG_LEN_08BIT, 0x01}, + {0xd08e, CRL_REG_LEN_08BIT, 0x28}, + {0xd08f, CRL_REG_LEN_08BIT, 0x14}, + {0xd090, CRL_REG_LEN_08BIT, 0xd4}, + {0xd091, CRL_REG_LEN_08BIT, 0x01}, + {0xd092, CRL_REG_LEN_08BIT, 0x30}, + {0xd093, CRL_REG_LEN_08BIT, 0x18}, + {0xd094, CRL_REG_LEN_08BIT, 0x07}, + {0xd095, CRL_REG_LEN_08BIT, 0xff}, + {0xd096, CRL_REG_LEN_08BIT, 0xf8}, + {0xd097, CRL_REG_LEN_08BIT, 0xfd}, + {0xd098, CRL_REG_LEN_08BIT, 0x9c}, + {0xd099, CRL_REG_LEN_08BIT, 0x80}, + {0xd09a, CRL_REG_LEN_08BIT, 0x00}, + {0xd09b, CRL_REG_LEN_08BIT, 0x03}, + {0xd09c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd09d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd09e, CRL_REG_LEN_08BIT, 0x00}, + {0xd09f, CRL_REG_LEN_08BIT, 0xff}, + {0xd0a0, CRL_REG_LEN_08BIT, 0x18}, + {0xd0a1, CRL_REG_LEN_08BIT, 0xc0}, + {0xd0a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0a3, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0a5, CRL_REG_LEN_08BIT, 0xc6}, + {0xd0a6, CRL_REG_LEN_08BIT, 0x01}, + {0xd0a7, CRL_REG_LEN_08BIT, 0x02}, + {0xd0a8, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0a9, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0aa, CRL_REG_LEN_08BIT, 0x58}, + {0xd0ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ac, CRL_REG_LEN_08BIT, 0x84}, + {0xd0ad, CRL_REG_LEN_08BIT, 0x8e}, + {0xd0ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd0af, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd0b1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd0b2, CRL_REG_LEN_08BIT, 0x30}, + {0xd0b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b4, CRL_REG_LEN_08BIT, 0x98}, + {0xd0b5, CRL_REG_LEN_08BIT, 0xb0}, + {0xd0b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0b8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0b9, CRL_REG_LEN_08BIT, 0x64}, + {0xd0ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd0bb, CRL_REG_LEN_08BIT, 0x6e}, + {0xd0bc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0bd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd0be, CRL_REG_LEN_08BIT, 0x18}, + {0xd0bf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c0, CRL_REG_LEN_08BIT, 0x10}, + {0xd0c1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c3, CRL_REG_LEN_08BIT, 0x06}, + {0xd0c4, CRL_REG_LEN_08BIT, 0x95}, + {0xd0c5, CRL_REG_LEN_08BIT, 0x8b}, + {0xd0c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0c8, CRL_REG_LEN_08BIT, 0x94}, + {0xd0c9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd0ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd0cb, CRL_REG_LEN_08BIT, 0x70}, + {0xd0cc, CRL_REG_LEN_08BIT, 0xe5}, + {0xd0cd, CRL_REG_LEN_08BIT, 0x65}, + {0xd0ce, CRL_REG_LEN_08BIT, 0x60}, + {0xd0cf, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd0d1, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d3, CRL_REG_LEN_08BIT, 0x62}, + {0xd0d4, CRL_REG_LEN_08BIT, 0x15}, + {0xd0d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd0d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd0da, CRL_REG_LEN_08BIT, 0x80}, + {0xd0db, CRL_REG_LEN_08BIT, 0x06}, + {0xd0dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0dd, CRL_REG_LEN_08BIT, 0x83}, + {0xd0de, CRL_REG_LEN_08BIT, 0x38}, + {0xd0df, CRL_REG_LEN_08BIT, 0x29}, + {0xd0e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e1, CRL_REG_LEN_08BIT, 0xe3}, + {0xd0e2, CRL_REG_LEN_08BIT, 0x40}, + {0xd0e3, CRL_REG_LEN_08BIT, 0x08}, + {0xd0e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0e5, CRL_REG_LEN_08BIT, 0x84}, + {0xd0e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0e8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0e9, CRL_REG_LEN_08BIT, 0xa3}, + {0xd0ea, CRL_REG_LEN_08BIT, 0x40}, + {0xd0eb, CRL_REG_LEN_08BIT, 0x09}, + {0xd0ec, CRL_REG_LEN_08BIT, 0xa8}, + {0xd0ed, CRL_REG_LEN_08BIT, 0xc3}, + {0xd0ee, CRL_REG_LEN_08BIT, 0x38}, + {0xd0ef, CRL_REG_LEN_08BIT, 0x2a}, + {0xd0f0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f1, CRL_REG_LEN_08BIT, 0x07}, + {0xd0f2, CRL_REG_LEN_08BIT, 0x20}, + {0xd0f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd0f5, CRL_REG_LEN_08BIT, 0x66}, + {0xd0f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd0f8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd0f9, CRL_REG_LEN_08BIT, 0x05}, + {0xd0fa, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd0fc, CRL_REG_LEN_08BIT, 0x18}, + {0xd0fd, CRL_REG_LEN_08BIT, 0x60}, + {0xd0fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd0ff, CRL_REG_LEN_08BIT, 0x01}, + {0xd100, CRL_REG_LEN_08BIT, 0x98}, + {0xd101, CRL_REG_LEN_08BIT, 0x90}, + {0xd102, CRL_REG_LEN_08BIT, 0x00}, + {0xd103, CRL_REG_LEN_08BIT, 0x00}, + {0xd104, CRL_REG_LEN_08BIT, 0x84}, + {0xd105, CRL_REG_LEN_08BIT, 0xae}, + {0xd106, CRL_REG_LEN_08BIT, 0x00}, + {0xd107, CRL_REG_LEN_08BIT, 0x00}, + {0xd108, CRL_REG_LEN_08BIT, 0xa8}, + {0xd109, CRL_REG_LEN_08BIT, 0x63}, + {0xd10a, CRL_REG_LEN_08BIT, 0x06}, + {0xd10b, CRL_REG_LEN_08BIT, 0x4c}, + {0xd10c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd10d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd10e, CRL_REG_LEN_08BIT, 0x00}, + {0xd10f, CRL_REG_LEN_08BIT, 0x00}, + {0xd110, CRL_REG_LEN_08BIT, 0xd8}, + {0xd111, CRL_REG_LEN_08BIT, 0x03}, + {0xd112, CRL_REG_LEN_08BIT, 0x30}, + {0xd113, CRL_REG_LEN_08BIT, 0x00}, + {0xd114, CRL_REG_LEN_08BIT, 0x8c}, + {0xd115, CRL_REG_LEN_08BIT, 0x65}, + {0xd116, CRL_REG_LEN_08BIT, 0x00}, + {0xd117, CRL_REG_LEN_08BIT, 0x6e}, + {0xd118, CRL_REG_LEN_08BIT, 0xe5}, + {0xd119, CRL_REG_LEN_08BIT, 0x84}, + {0xd11a, CRL_REG_LEN_08BIT, 0x18}, + {0xd11b, CRL_REG_LEN_08BIT, 0x00}, + {0xd11c, CRL_REG_LEN_08BIT, 0x10}, + {0xd11d, CRL_REG_LEN_08BIT, 0x00}, + {0xd11e, CRL_REG_LEN_08BIT, 0x00}, + {0xd11f, CRL_REG_LEN_08BIT, 0x07}, + {0xd120, CRL_REG_LEN_08BIT, 0x18}, + {0xd121, CRL_REG_LEN_08BIT, 0x80}, + {0xd122, CRL_REG_LEN_08BIT, 0x80}, + {0xd123, CRL_REG_LEN_08BIT, 0x06}, + {0xd124, CRL_REG_LEN_08BIT, 0x94}, + {0xd125, CRL_REG_LEN_08BIT, 0x65}, + {0xd126, CRL_REG_LEN_08BIT, 0x00}, + {0xd127, CRL_REG_LEN_08BIT, 0x70}, + {0xd128, CRL_REG_LEN_08BIT, 0xe5}, + {0xd129, CRL_REG_LEN_08BIT, 0x43}, + {0xd12a, CRL_REG_LEN_08BIT, 0x60}, + {0xd12b, CRL_REG_LEN_08BIT, 0x00}, + {0xd12c, CRL_REG_LEN_08BIT, 0x0c}, + {0xd12d, CRL_REG_LEN_08BIT, 0x00}, + {0xd12e, CRL_REG_LEN_08BIT, 0x00}, + {0xd12f, CRL_REG_LEN_08BIT, 0x3e}, + {0xd130, CRL_REG_LEN_08BIT, 0xa8}, + {0xd131, CRL_REG_LEN_08BIT, 0x64}, + {0xd132, CRL_REG_LEN_08BIT, 0x38}, + {0xd133, CRL_REG_LEN_08BIT, 0x24}, + {0xd134, CRL_REG_LEN_08BIT, 0x18}, + {0xd135, CRL_REG_LEN_08BIT, 0x80}, + {0xd136, CRL_REG_LEN_08BIT, 0x80}, + {0xd137, CRL_REG_LEN_08BIT, 0x06}, + {0xd138, CRL_REG_LEN_08BIT, 0xa8}, + {0xd139, CRL_REG_LEN_08BIT, 0x64}, + {0xd13a, CRL_REG_LEN_08BIT, 0x38}, + {0xd13b, CRL_REG_LEN_08BIT, 0x24}, + {0xd13c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd13d, CRL_REG_LEN_08BIT, 0x63}, + {0xd13e, CRL_REG_LEN_08BIT, 0x00}, + {0xd13f, CRL_REG_LEN_08BIT, 0x00}, + {0xd140, CRL_REG_LEN_08BIT, 0xa4}, + {0xd141, CRL_REG_LEN_08BIT, 0x63}, + {0xd142, CRL_REG_LEN_08BIT, 0x00}, + {0xd143, CRL_REG_LEN_08BIT, 0x40}, + {0xd144, CRL_REG_LEN_08BIT, 0xbc}, + {0xd145, CRL_REG_LEN_08BIT, 0x23}, + {0xd146, CRL_REG_LEN_08BIT, 0x00}, + {0xd147, CRL_REG_LEN_08BIT, 0x00}, + {0xd148, CRL_REG_LEN_08BIT, 0x0c}, + {0xd149, CRL_REG_LEN_08BIT, 0x00}, + {0xd14a, CRL_REG_LEN_08BIT, 0x00}, + {0xd14b, CRL_REG_LEN_08BIT, 0x2a}, + {0xd14c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd14d, CRL_REG_LEN_08BIT, 0x64}, + {0xd14e, CRL_REG_LEN_08BIT, 0x6e}, + {0xd14f, CRL_REG_LEN_08BIT, 0x44}, + {0xd150, CRL_REG_LEN_08BIT, 0x19}, + {0xd151, CRL_REG_LEN_08BIT, 0x00}, + {0xd152, CRL_REG_LEN_08BIT, 0x80}, + {0xd153, CRL_REG_LEN_08BIT, 0x06}, + {0xd154, CRL_REG_LEN_08BIT, 0xa8}, + {0xd155, CRL_REG_LEN_08BIT, 0xe8}, + {0xd156, CRL_REG_LEN_08BIT, 0x3d}, + {0xd157, CRL_REG_LEN_08BIT, 0x05}, + {0xd158, CRL_REG_LEN_08BIT, 0x8c}, + {0xd159, CRL_REG_LEN_08BIT, 0x67}, + {0xd15a, CRL_REG_LEN_08BIT, 0x00}, + {0xd15b, CRL_REG_LEN_08BIT, 0x00}, + {0xd15c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd15d, CRL_REG_LEN_08BIT, 0x63}, + {0xd15e, CRL_REG_LEN_08BIT, 0x00}, + {0xd15f, CRL_REG_LEN_08BIT, 0x18}, + {0xd160, CRL_REG_LEN_08BIT, 0xb8}, + {0xd161, CRL_REG_LEN_08BIT, 0x63}, + {0xd162, CRL_REG_LEN_08BIT, 0x00}, + {0xd163, CRL_REG_LEN_08BIT, 0x98}, + {0xd164, CRL_REG_LEN_08BIT, 0xbc}, + {0xd165, CRL_REG_LEN_08BIT, 0x03}, + {0xd166, CRL_REG_LEN_08BIT, 0x00}, + {0xd167, CRL_REG_LEN_08BIT, 0x00}, + {0xd168, CRL_REG_LEN_08BIT, 0x10}, + {0xd169, CRL_REG_LEN_08BIT, 0x00}, + {0xd16a, CRL_REG_LEN_08BIT, 0x00}, + {0xd16b, CRL_REG_LEN_08BIT, 0x10}, + {0xd16c, CRL_REG_LEN_08BIT, 0xa9}, + {0xd16d, CRL_REG_LEN_08BIT, 0x48}, + {0xd16e, CRL_REG_LEN_08BIT, 0x67}, + {0xd16f, CRL_REG_LEN_08BIT, 0x02}, + {0xd170, CRL_REG_LEN_08BIT, 0xb8}, + {0xd171, CRL_REG_LEN_08BIT, 0xa3}, + {0xd172, CRL_REG_LEN_08BIT, 0x00}, + {0xd173, CRL_REG_LEN_08BIT, 0x19}, + {0xd174, CRL_REG_LEN_08BIT, 0x8c}, + {0xd175, CRL_REG_LEN_08BIT, 0x8a}, + {0xd176, CRL_REG_LEN_08BIT, 0x00}, + {0xd177, CRL_REG_LEN_08BIT, 0x00}, + {0xd178, CRL_REG_LEN_08BIT, 0xa9}, + {0xd179, CRL_REG_LEN_08BIT, 0x68}, + {0xd17a, CRL_REG_LEN_08BIT, 0x67}, + {0xd17b, CRL_REG_LEN_08BIT, 0x03}, + {0xd17c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd17d, CRL_REG_LEN_08BIT, 0xc4}, + {0xd17e, CRL_REG_LEN_08BIT, 0x00}, + {0xd17f, CRL_REG_LEN_08BIT, 0x08}, + {0xd180, CRL_REG_LEN_08BIT, 0x8c}, + {0xd181, CRL_REG_LEN_08BIT, 0x6b}, + {0xd182, CRL_REG_LEN_08BIT, 0x00}, + {0xd183, CRL_REG_LEN_08BIT, 0x00}, + {0xd184, CRL_REG_LEN_08BIT, 0xb8}, + {0xd185, CRL_REG_LEN_08BIT, 0x85}, + {0xd186, CRL_REG_LEN_08BIT, 0x00}, + {0xd187, CRL_REG_LEN_08BIT, 0x98}, + {0xd188, CRL_REG_LEN_08BIT, 0xe0}, + {0xd189, CRL_REG_LEN_08BIT, 0x63}, + {0xd18a, CRL_REG_LEN_08BIT, 0x30}, + {0xd18b, CRL_REG_LEN_08BIT, 0x04}, + {0xd18c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd18d, CRL_REG_LEN_08BIT, 0x64}, + {0xd18e, CRL_REG_LEN_08BIT, 0x18}, + {0xd18f, CRL_REG_LEN_08BIT, 0x00}, + {0xd190, CRL_REG_LEN_08BIT, 0xa4}, + {0xd191, CRL_REG_LEN_08BIT, 0x83}, + {0xd192, CRL_REG_LEN_08BIT, 0xff}, + {0xd193, CRL_REG_LEN_08BIT, 0xff}, + {0xd194, CRL_REG_LEN_08BIT, 0xb8}, + {0xd195, CRL_REG_LEN_08BIT, 0x64}, + {0xd196, CRL_REG_LEN_08BIT, 0x00}, + {0xd197, CRL_REG_LEN_08BIT, 0x48}, + {0xd198, CRL_REG_LEN_08BIT, 0xd8}, + {0xd199, CRL_REG_LEN_08BIT, 0x0a}, + {0xd19a, CRL_REG_LEN_08BIT, 0x18}, + {0xd19b, CRL_REG_LEN_08BIT, 0x00}, + {0xd19c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd19d, CRL_REG_LEN_08BIT, 0x0b}, + {0xd19e, CRL_REG_LEN_08BIT, 0x20}, + {0xd19f, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1a1, CRL_REG_LEN_08BIT, 0x60}, + {0xd1a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1a5, CRL_REG_LEN_08BIT, 0x07}, + {0xd1a6, CRL_REG_LEN_08BIT, 0x18}, + {0xd1a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1a8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1a9, CRL_REG_LEN_08BIT, 0x68}, + {0xd1aa, CRL_REG_LEN_08BIT, 0x38}, + {0xd1ab, CRL_REG_LEN_08BIT, 0x22}, + {0xd1ac, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ad, CRL_REG_LEN_08BIT, 0x80}, + {0xd1ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd1af, CRL_REG_LEN_08BIT, 0x70}, + {0xd1b0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1b1, CRL_REG_LEN_08BIT, 0xe8}, + {0xd1b2, CRL_REG_LEN_08BIT, 0x38}, + {0xd1b3, CRL_REG_LEN_08BIT, 0x43}, + {0xd1b4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd1b5, CRL_REG_LEN_08BIT, 0x03}, + {0xd1b6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1b8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1b9, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bb, CRL_REG_LEN_08BIT, 0x00}, + {0xd1bc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1bd, CRL_REG_LEN_08BIT, 0xc8}, + {0xd1be, CRL_REG_LEN_08BIT, 0x38}, + {0xd1bf, CRL_REG_LEN_08BIT, 0x42}, + {0xd1c0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1c1, CRL_REG_LEN_08BIT, 0x66}, + {0xd1c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1c5, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1c7, CRL_REG_LEN_08BIT, 0x01}, + {0xd1c8, CRL_REG_LEN_08BIT, 0xb8}, + {0xd1c9, CRL_REG_LEN_08BIT, 0x83}, + {0xd1ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cb, CRL_REG_LEN_08BIT, 0x08}, + {0xd1cc, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1cd, CRL_REG_LEN_08BIT, 0xa5}, + {0xd1ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd1cf, CRL_REG_LEN_08BIT, 0xff}, + {0xd1d0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1d1, CRL_REG_LEN_08BIT, 0x67}, + {0xd1d2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d4, CRL_REG_LEN_08BIT, 0xe0}, + {0xd1d5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1d6, CRL_REG_LEN_08BIT, 0x20}, + {0xd1d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1d8, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1d9, CRL_REG_LEN_08BIT, 0x63}, + {0xd1da, CRL_REG_LEN_08BIT, 0xff}, + {0xd1db, CRL_REG_LEN_08BIT, 0xff}, + {0xd1dc, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1dd, CRL_REG_LEN_08BIT, 0x43}, + {0xd1de, CRL_REG_LEN_08BIT, 0x00}, + {0xd1df, CRL_REG_LEN_08BIT, 0x07}, + {0xd1e0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd1e1, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e3, CRL_REG_LEN_08BIT, 0x5b}, + {0xd1e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd1e5, CRL_REG_LEN_08BIT, 0x05}, + {0xd1e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1e7, CRL_REG_LEN_08BIT, 0x02}, + {0xd1e8, CRL_REG_LEN_08BIT, 0x03}, + {0xd1e9, CRL_REG_LEN_08BIT, 0xff}, + {0xd1ea, CRL_REG_LEN_08BIT, 0xff}, + {0xd1eb, CRL_REG_LEN_08BIT, 0xf6}, + {0xd1ec, CRL_REG_LEN_08BIT, 0x9c}, + {0xd1ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xd1ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd1ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f1, CRL_REG_LEN_08BIT, 0xa4}, + {0xd1f2, CRL_REG_LEN_08BIT, 0x55}, + {0xd1f3, CRL_REG_LEN_08BIT, 0x86}, + {0xd1f4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd1f5, CRL_REG_LEN_08BIT, 0x63}, + {0xd1f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd1f8, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1f9, CRL_REG_LEN_08BIT, 0xc4}, + {0xd1fa, CRL_REG_LEN_08BIT, 0x6e}, + {0xd1fb, CRL_REG_LEN_08BIT, 0x45}, + {0xd1fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd1fd, CRL_REG_LEN_08BIT, 0xe4}, + {0xd1fe, CRL_REG_LEN_08BIT, 0x55}, + {0xd1ff, CRL_REG_LEN_08BIT, 0x87}, + {0xd200, CRL_REG_LEN_08BIT, 0xd8}, + {0xd201, CRL_REG_LEN_08BIT, 0x05}, + {0xd202, CRL_REG_LEN_08BIT, 0x18}, + {0xd203, CRL_REG_LEN_08BIT, 0x00}, + {0xd204, CRL_REG_LEN_08BIT, 0x8c}, + {0xd205, CRL_REG_LEN_08BIT, 0x66}, + {0xd206, CRL_REG_LEN_08BIT, 0x00}, + {0xd207, CRL_REG_LEN_08BIT, 0x00}, + {0xd208, CRL_REG_LEN_08BIT, 0xa8}, + {0xd209, CRL_REG_LEN_08BIT, 0xa4}, + {0xd20a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd20b, CRL_REG_LEN_08BIT, 0x46}, + {0xd20c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd20d, CRL_REG_LEN_08BIT, 0x07}, + {0xd20e, CRL_REG_LEN_08BIT, 0x18}, + {0xd20f, CRL_REG_LEN_08BIT, 0x00}, + {0xd210, CRL_REG_LEN_08BIT, 0xa8}, + {0xd211, CRL_REG_LEN_08BIT, 0x84}, + {0xd212, CRL_REG_LEN_08BIT, 0x55}, + {0xd213, CRL_REG_LEN_08BIT, 0x88}, + {0xd214, CRL_REG_LEN_08BIT, 0x8c}, + {0xd215, CRL_REG_LEN_08BIT, 0x65}, + {0xd216, CRL_REG_LEN_08BIT, 0x00}, + {0xd217, CRL_REG_LEN_08BIT, 0x00}, + {0xd218, CRL_REG_LEN_08BIT, 0xd8}, + {0xd219, CRL_REG_LEN_08BIT, 0x04}, + {0xd21a, CRL_REG_LEN_08BIT, 0x18}, + {0xd21b, CRL_REG_LEN_08BIT, 0x00}, + {0xd21c, CRL_REG_LEN_08BIT, 0x03}, + {0xd21d, CRL_REG_LEN_08BIT, 0xff}, + {0xd21e, CRL_REG_LEN_08BIT, 0xff}, + {0xd21f, CRL_REG_LEN_08BIT, 0xce}, + {0xd220, CRL_REG_LEN_08BIT, 0x19}, + {0xd221, CRL_REG_LEN_08BIT, 0x00}, + {0xd222, CRL_REG_LEN_08BIT, 0x80}, + {0xd223, CRL_REG_LEN_08BIT, 0x06}, + {0xd224, CRL_REG_LEN_08BIT, 0x8c}, + {0xd225, CRL_REG_LEN_08BIT, 0x63}, + {0xd226, CRL_REG_LEN_08BIT, 0x00}, + {0xd227, CRL_REG_LEN_08BIT, 0x00}, + {0xd228, CRL_REG_LEN_08BIT, 0xa4}, + {0xd229, CRL_REG_LEN_08BIT, 0x63}, + {0xd22a, CRL_REG_LEN_08BIT, 0x00}, + {0xd22b, CRL_REG_LEN_08BIT, 0x40}, + {0xd22c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd22d, CRL_REG_LEN_08BIT, 0x23}, + {0xd22e, CRL_REG_LEN_08BIT, 0x00}, + {0xd22f, CRL_REG_LEN_08BIT, 0x00}, + {0xd230, CRL_REG_LEN_08BIT, 0x13}, + {0xd231, CRL_REG_LEN_08BIT, 0xff}, + {0xd232, CRL_REG_LEN_08BIT, 0xff}, + {0xd233, CRL_REG_LEN_08BIT, 0xc8}, + {0xd234, CRL_REG_LEN_08BIT, 0x9d}, + {0xd235, CRL_REG_LEN_08BIT, 0x00}, + {0xd236, CRL_REG_LEN_08BIT, 0x00}, + {0xd237, CRL_REG_LEN_08BIT, 0x40}, + {0xd238, CRL_REG_LEN_08BIT, 0xa8}, + {0xd239, CRL_REG_LEN_08BIT, 0x64}, + {0xd23a, CRL_REG_LEN_08BIT, 0x55}, + {0xd23b, CRL_REG_LEN_08BIT, 0x86}, + {0xd23c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd23d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd23e, CRL_REG_LEN_08BIT, 0x55}, + {0xd23f, CRL_REG_LEN_08BIT, 0x87}, + {0xd240, CRL_REG_LEN_08BIT, 0xd8}, + {0xd241, CRL_REG_LEN_08BIT, 0x03}, + {0xd242, CRL_REG_LEN_08BIT, 0x40}, + {0xd243, CRL_REG_LEN_08BIT, 0x00}, + {0xd244, CRL_REG_LEN_08BIT, 0xa8}, + {0xd245, CRL_REG_LEN_08BIT, 0x64}, + {0xd246, CRL_REG_LEN_08BIT, 0x55}, + {0xd247, CRL_REG_LEN_08BIT, 0x88}, + {0xd248, CRL_REG_LEN_08BIT, 0xd8}, + {0xd249, CRL_REG_LEN_08BIT, 0x05}, + {0xd24a, CRL_REG_LEN_08BIT, 0x40}, + {0xd24b, CRL_REG_LEN_08BIT, 0x00}, + {0xd24c, CRL_REG_LEN_08BIT, 0xd8}, + {0xd24d, CRL_REG_LEN_08BIT, 0x03}, + {0xd24e, CRL_REG_LEN_08BIT, 0x40}, + {0xd24f, CRL_REG_LEN_08BIT, 0x00}, + {0xd250, CRL_REG_LEN_08BIT, 0x03}, + {0xd251, CRL_REG_LEN_08BIT, 0xff}, + {0xd252, CRL_REG_LEN_08BIT, 0xff}, + {0xd253, CRL_REG_LEN_08BIT, 0xc1}, + {0xd254, CRL_REG_LEN_08BIT, 0x19}, + {0xd255, CRL_REG_LEN_08BIT, 0x00}, + {0xd256, CRL_REG_LEN_08BIT, 0x80}, + {0xd257, CRL_REG_LEN_08BIT, 0x06}, + {0xd258, CRL_REG_LEN_08BIT, 0x94}, + {0xd259, CRL_REG_LEN_08BIT, 0x84}, + {0xd25a, CRL_REG_LEN_08BIT, 0x00}, + {0xd25b, CRL_REG_LEN_08BIT, 0x72}, + {0xd25c, CRL_REG_LEN_08BIT, 0xe5}, + {0xd25d, CRL_REG_LEN_08BIT, 0xa4}, + {0xd25e, CRL_REG_LEN_08BIT, 0x60}, + {0xd25f, CRL_REG_LEN_08BIT, 0x00}, + {0xd260, CRL_REG_LEN_08BIT, 0x0c}, + {0xd261, CRL_REG_LEN_08BIT, 0x00}, + {0xd262, CRL_REG_LEN_08BIT, 0x00}, + {0xd263, CRL_REG_LEN_08BIT, 0x3f}, + {0xd264, CRL_REG_LEN_08BIT, 0x9d}, + {0xd265, CRL_REG_LEN_08BIT, 0x60}, + {0xd266, CRL_REG_LEN_08BIT, 0x01}, + {0xd267, CRL_REG_LEN_08BIT, 0x00}, + {0xd268, CRL_REG_LEN_08BIT, 0x85}, + {0xd269, CRL_REG_LEN_08BIT, 0x4e}, + {0xd26a, CRL_REG_LEN_08BIT, 0x00}, + {0xd26b, CRL_REG_LEN_08BIT, 0x00}, + {0xd26c, CRL_REG_LEN_08BIT, 0x98}, + {0xd26d, CRL_REG_LEN_08BIT, 0x70}, + {0xd26e, CRL_REG_LEN_08BIT, 0x00}, + {0xd26f, CRL_REG_LEN_08BIT, 0x00}, + {0xd270, CRL_REG_LEN_08BIT, 0x8c}, + {0xd271, CRL_REG_LEN_08BIT, 0x8a}, + {0xd272, CRL_REG_LEN_08BIT, 0x00}, + {0xd273, CRL_REG_LEN_08BIT, 0x6f}, + {0xd274, CRL_REG_LEN_08BIT, 0xe5}, + {0xd275, CRL_REG_LEN_08BIT, 0x63}, + {0xd276, CRL_REG_LEN_08BIT, 0x20}, + {0xd277, CRL_REG_LEN_08BIT, 0x00}, + {0xd278, CRL_REG_LEN_08BIT, 0x10}, + {0xd279, CRL_REG_LEN_08BIT, 0x00}, + {0xd27a, CRL_REG_LEN_08BIT, 0x00}, + {0xd27b, CRL_REG_LEN_08BIT, 0x07}, + {0xd27c, CRL_REG_LEN_08BIT, 0x15}, + {0xd27d, CRL_REG_LEN_08BIT, 0x00}, + {0xd27e, CRL_REG_LEN_08BIT, 0x00}, + {0xd27f, CRL_REG_LEN_08BIT, 0x00}, + {0xd280, CRL_REG_LEN_08BIT, 0x8c}, + {0xd281, CRL_REG_LEN_08BIT, 0xaa}, + {0xd282, CRL_REG_LEN_08BIT, 0x00}, + {0xd283, CRL_REG_LEN_08BIT, 0x6e}, + {0xd284, CRL_REG_LEN_08BIT, 0xe0}, + {0xd285, CRL_REG_LEN_08BIT, 0x63}, + {0xd286, CRL_REG_LEN_08BIT, 0x28}, + {0xd287, CRL_REG_LEN_08BIT, 0x02}, + {0xd288, CRL_REG_LEN_08BIT, 0xe0}, + {0xd289, CRL_REG_LEN_08BIT, 0x84}, + {0xd28a, CRL_REG_LEN_08BIT, 0x28}, + {0xd28b, CRL_REG_LEN_08BIT, 0x02}, + {0xd28c, CRL_REG_LEN_08BIT, 0x07}, + {0xd28d, CRL_REG_LEN_08BIT, 0xff}, + {0xd28e, CRL_REG_LEN_08BIT, 0xf8}, + {0xd28f, CRL_REG_LEN_08BIT, 0x66}, + {0xd290, CRL_REG_LEN_08BIT, 0xe0}, + {0xd291, CRL_REG_LEN_08BIT, 0x63}, + {0xd292, CRL_REG_LEN_08BIT, 0x5b}, + {0xd293, CRL_REG_LEN_08BIT, 0x06}, + {0xd294, CRL_REG_LEN_08BIT, 0x8c}, + {0xd295, CRL_REG_LEN_08BIT, 0x6a}, + {0xd296, CRL_REG_LEN_08BIT, 0x00}, + {0xd297, CRL_REG_LEN_08BIT, 0x77}, + {0xd298, CRL_REG_LEN_08BIT, 0xe0}, + {0xd299, CRL_REG_LEN_08BIT, 0x63}, + {0xd29a, CRL_REG_LEN_08BIT, 0x5b}, + {0xd29b, CRL_REG_LEN_08BIT, 0x06}, + {0xd29c, CRL_REG_LEN_08BIT, 0xbd}, + {0xd29d, CRL_REG_LEN_08BIT, 0x63}, + {0xd29e, CRL_REG_LEN_08BIT, 0x00}, + {0xd29f, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a0, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2a1, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a3, CRL_REG_LEN_08BIT, 0x3c}, + {0xd2a4, CRL_REG_LEN_08BIT, 0x15}, + {0xd2a5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2a8, CRL_REG_LEN_08BIT, 0x8c}, + {0xd2a9, CRL_REG_LEN_08BIT, 0x8a}, + {0xd2aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ab, CRL_REG_LEN_08BIT, 0x78}, + {0xd2ac, CRL_REG_LEN_08BIT, 0xb8}, + {0xd2ad, CRL_REG_LEN_08BIT, 0x63}, + {0xd2ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd2af, CRL_REG_LEN_08BIT, 0x88}, + {0xd2b0, CRL_REG_LEN_08BIT, 0xe1}, + {0xd2b1, CRL_REG_LEN_08BIT, 0x64}, + {0xd2b2, CRL_REG_LEN_08BIT, 0x5b}, + {0xd2b3, CRL_REG_LEN_08BIT, 0x06}, + {0xd2b4, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2b5, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2b8, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2b9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd2bb, CRL_REG_LEN_08BIT, 0x34}, + {0xd2bc, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2bd, CRL_REG_LEN_08BIT, 0x01}, + {0xd2be, CRL_REG_LEN_08BIT, 0x18}, + {0xd2bf, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c0, CRL_REG_LEN_08BIT, 0xb9}, + {0xd2c1, CRL_REG_LEN_08BIT, 0x6b}, + {0xd2c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c3, CRL_REG_LEN_08BIT, 0x88}, + {0xd2c4, CRL_REG_LEN_08BIT, 0x85}, + {0xd2c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd2c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2c7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2c8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2c9, CRL_REG_LEN_08BIT, 0x68}, + {0xd2ca, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cc, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2cd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd2cf, CRL_REG_LEN_08BIT, 0x2c}, + {0xd2d0, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2d1, CRL_REG_LEN_08BIT, 0x01}, + {0xd2d2, CRL_REG_LEN_08BIT, 0x58}, + {0xd2d3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2d4, CRL_REG_LEN_08BIT, 0x84}, + {0xd2d5, CRL_REG_LEN_08BIT, 0x81}, + {0xd2d6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2d7, CRL_REG_LEN_08BIT, 0x14}, + {0xd2d8, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2d9, CRL_REG_LEN_08BIT, 0xa4}, + {0xd2da, CRL_REG_LEN_08BIT, 0x01}, + {0xd2db, CRL_REG_LEN_08BIT, 0x00}, + {0xd2dc, CRL_REG_LEN_08BIT, 0x10}, + {0xd2dd, CRL_REG_LEN_08BIT, 0x00}, + {0xd2de, CRL_REG_LEN_08BIT, 0x00}, + {0xd2df, CRL_REG_LEN_08BIT, 0x05}, + {0xd2e0, CRL_REG_LEN_08BIT, 0x84}, + {0xd2e1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e3, CRL_REG_LEN_08BIT, 0x18}, + {0xd2e4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd2e5, CRL_REG_LEN_08BIT, 0xa0}, + {0xd2e6, CRL_REG_LEN_08BIT, 0x01}, + {0xd2e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd2e8, CRL_REG_LEN_08BIT, 0xd4}, + {0xd2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xd2ea, CRL_REG_LEN_08BIT, 0x28}, + {0xd2eb, CRL_REG_LEN_08BIT, 0x14}, + {0xd2ec, CRL_REG_LEN_08BIT, 0x84}, + {0xd2ed, CRL_REG_LEN_08BIT, 0xc1}, + {0xd2ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ef, CRL_REG_LEN_08BIT, 0x18}, + {0xd2f0, CRL_REG_LEN_08BIT, 0xbd}, + {0xd2f1, CRL_REG_LEN_08BIT, 0x66}, + {0xd2f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f4, CRL_REG_LEN_08BIT, 0x0c}, + {0xd2f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd2f7, CRL_REG_LEN_08BIT, 0x20}, + {0xd2f8, CRL_REG_LEN_08BIT, 0x9d}, + {0xd2f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd2fc, CRL_REG_LEN_08BIT, 0x84}, + {0xd2fd, CRL_REG_LEN_08BIT, 0x61}, + {0xd2fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd2ff, CRL_REG_LEN_08BIT, 0x18}, + {0xd300, CRL_REG_LEN_08BIT, 0xbd}, + {0xd301, CRL_REG_LEN_08BIT, 0xa3}, + {0xd302, CRL_REG_LEN_08BIT, 0x01}, + {0xd303, CRL_REG_LEN_08BIT, 0x00}, + {0xd304, CRL_REG_LEN_08BIT, 0x10}, + {0xd305, CRL_REG_LEN_08BIT, 0x00}, + {0xd306, CRL_REG_LEN_08BIT, 0x00}, + {0xd307, CRL_REG_LEN_08BIT, 0x03}, + {0xd308, CRL_REG_LEN_08BIT, 0x9c}, + {0xd309, CRL_REG_LEN_08BIT, 0x80}, + {0xd30a, CRL_REG_LEN_08BIT, 0x01}, + {0xd30b, CRL_REG_LEN_08BIT, 0x00}, + {0xd30c, CRL_REG_LEN_08BIT, 0xd4}, + {0xd30d, CRL_REG_LEN_08BIT, 0x01}, + {0xd30e, CRL_REG_LEN_08BIT, 0x20}, + {0xd30f, CRL_REG_LEN_08BIT, 0x18}, + {0xd310, CRL_REG_LEN_08BIT, 0x18}, + {0xd311, CRL_REG_LEN_08BIT, 0x60}, + {0xd312, CRL_REG_LEN_08BIT, 0x80}, + {0xd313, CRL_REG_LEN_08BIT, 0x06}, + {0xd314, CRL_REG_LEN_08BIT, 0x85}, + {0xd315, CRL_REG_LEN_08BIT, 0x01}, + {0xd316, CRL_REG_LEN_08BIT, 0x00}, + {0xd317, CRL_REG_LEN_08BIT, 0x14}, + {0xd318, CRL_REG_LEN_08BIT, 0xa8}, + {0xd319, CRL_REG_LEN_08BIT, 0x83}, + {0xd31a, CRL_REG_LEN_08BIT, 0x38}, + {0xd31b, CRL_REG_LEN_08BIT, 0x29}, + {0xd31c, CRL_REG_LEN_08BIT, 0xa8}, + {0xd31d, CRL_REG_LEN_08BIT, 0xc3}, + {0xd31e, CRL_REG_LEN_08BIT, 0x40}, + {0xd31f, CRL_REG_LEN_08BIT, 0x08}, + {0xd320, CRL_REG_LEN_08BIT, 0x8c}, + {0xd321, CRL_REG_LEN_08BIT, 0x84}, + {0xd322, CRL_REG_LEN_08BIT, 0x00}, + {0xd323, CRL_REG_LEN_08BIT, 0x00}, + {0xd324, CRL_REG_LEN_08BIT, 0xa8}, + {0xd325, CRL_REG_LEN_08BIT, 0xa3}, + {0xd326, CRL_REG_LEN_08BIT, 0x38}, + {0xd327, CRL_REG_LEN_08BIT, 0x2a}, + {0xd328, CRL_REG_LEN_08BIT, 0xa8}, + {0xd329, CRL_REG_LEN_08BIT, 0xe3}, + {0xd32a, CRL_REG_LEN_08BIT, 0x40}, + {0xd32b, CRL_REG_LEN_08BIT, 0x09}, + {0xd32c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd32d, CRL_REG_LEN_08BIT, 0x64}, + {0xd32e, CRL_REG_LEN_08BIT, 0x40}, + {0xd32f, CRL_REG_LEN_08BIT, 0x00}, + {0xd330, CRL_REG_LEN_08BIT, 0xd8}, + {0xd331, CRL_REG_LEN_08BIT, 0x06}, + {0xd332, CRL_REG_LEN_08BIT, 0x18}, + {0xd333, CRL_REG_LEN_08BIT, 0x00}, + {0xd334, CRL_REG_LEN_08BIT, 0x8c}, + {0xd335, CRL_REG_LEN_08BIT, 0x65}, + {0xd336, CRL_REG_LEN_08BIT, 0x00}, + {0xd337, CRL_REG_LEN_08BIT, 0x00}, + {0xd338, CRL_REG_LEN_08BIT, 0x84}, + {0xd339, CRL_REG_LEN_08BIT, 0x81}, + {0xd33a, CRL_REG_LEN_08BIT, 0x00}, + {0xd33b, CRL_REG_LEN_08BIT, 0x18}, + {0xd33c, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33d, CRL_REG_LEN_08BIT, 0xe3}, + {0xd33e, CRL_REG_LEN_08BIT, 0x20}, + {0xd33f, CRL_REG_LEN_08BIT, 0x00}, + {0xd340, CRL_REG_LEN_08BIT, 0xd8}, + {0xd341, CRL_REG_LEN_08BIT, 0x07}, + {0xd342, CRL_REG_LEN_08BIT, 0xf8}, + {0xd343, CRL_REG_LEN_08BIT, 0x00}, + {0xd344, CRL_REG_LEN_08BIT, 0x03}, + {0xd345, CRL_REG_LEN_08BIT, 0xff}, + {0xd346, CRL_REG_LEN_08BIT, 0xff}, + {0xd347, CRL_REG_LEN_08BIT, 0x6f}, + {0xd348, CRL_REG_LEN_08BIT, 0x18}, + {0xd349, CRL_REG_LEN_08BIT, 0x60}, + {0xd34a, CRL_REG_LEN_08BIT, 0x00}, + {0xd34b, CRL_REG_LEN_08BIT, 0x01}, + {0xd34c, CRL_REG_LEN_08BIT, 0x0f}, + {0xd34d, CRL_REG_LEN_08BIT, 0xff}, + {0xd34e, CRL_REG_LEN_08BIT, 0xff}, + {0xd34f, CRL_REG_LEN_08BIT, 0x9d}, + {0xd350, CRL_REG_LEN_08BIT, 0x18}, + {0xd351, CRL_REG_LEN_08BIT, 0x60}, + {0xd352, CRL_REG_LEN_08BIT, 0x80}, + {0xd353, CRL_REG_LEN_08BIT, 0x06}, + {0xd354, CRL_REG_LEN_08BIT, 0x00}, + {0xd355, CRL_REG_LEN_08BIT, 0x00}, + {0xd356, CRL_REG_LEN_08BIT, 0x00}, + {0xd357, CRL_REG_LEN_08BIT, 0x11}, + {0xd358, CRL_REG_LEN_08BIT, 0xa8}, + {0xd359, CRL_REG_LEN_08BIT, 0x83}, + {0xd35a, CRL_REG_LEN_08BIT, 0x6e}, + {0xd35b, CRL_REG_LEN_08BIT, 0x43}, + {0xd35c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd35d, CRL_REG_LEN_08BIT, 0x6c}, + {0xd35e, CRL_REG_LEN_08BIT, 0x28}, + {0xd35f, CRL_REG_LEN_08BIT, 0x02}, + {0xd360, CRL_REG_LEN_08BIT, 0xe0}, + {0xd361, CRL_REG_LEN_08BIT, 0x84}, + {0xd362, CRL_REG_LEN_08BIT, 0x28}, + {0xd363, CRL_REG_LEN_08BIT, 0x02}, + {0xd364, CRL_REG_LEN_08BIT, 0x07}, + {0xd365, CRL_REG_LEN_08BIT, 0xff}, + {0xd366, CRL_REG_LEN_08BIT, 0xf8}, + {0xd367, CRL_REG_LEN_08BIT, 0x30}, + {0xd368, CRL_REG_LEN_08BIT, 0xb8}, + {0xd369, CRL_REG_LEN_08BIT, 0x63}, + {0xd36a, CRL_REG_LEN_08BIT, 0x00}, + {0xd36b, CRL_REG_LEN_08BIT, 0x08}, + {0xd36c, CRL_REG_LEN_08BIT, 0x03}, + {0xd36d, CRL_REG_LEN_08BIT, 0xff}, + {0xd36e, CRL_REG_LEN_08BIT, 0xff}, + {0xd36f, CRL_REG_LEN_08BIT, 0xc0}, + {0xd370, CRL_REG_LEN_08BIT, 0x85}, + {0xd371, CRL_REG_LEN_08BIT, 0x4e}, + {0xd372, CRL_REG_LEN_08BIT, 0x00}, + {0xd373, CRL_REG_LEN_08BIT, 0x00}, + {0xd374, CRL_REG_LEN_08BIT, 0x03}, + {0xd375, CRL_REG_LEN_08BIT, 0xff}, + {0xd376, CRL_REG_LEN_08BIT, 0xff}, + {0xd377, CRL_REG_LEN_08BIT, 0xe7}, + {0xd378, CRL_REG_LEN_08BIT, 0xd4}, + {0xd379, CRL_REG_LEN_08BIT, 0x01}, + {0xd37a, CRL_REG_LEN_08BIT, 0x40}, + {0xd37b, CRL_REG_LEN_08BIT, 0x18}, + {0xd37c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd37d, CRL_REG_LEN_08BIT, 0x60}, + {0xd37e, CRL_REG_LEN_08BIT, 0x00}, + {0xd37f, CRL_REG_LEN_08BIT, 0x00}, + {0xd380, CRL_REG_LEN_08BIT, 0x03}, + {0xd381, CRL_REG_LEN_08BIT, 0xff}, + {0xd382, CRL_REG_LEN_08BIT, 0xff}, + {0xd383, CRL_REG_LEN_08BIT, 0xdb}, + {0xd384, CRL_REG_LEN_08BIT, 0xd4}, + {0xd385, CRL_REG_LEN_08BIT, 0x01}, + {0xd386, CRL_REG_LEN_08BIT, 0x18}, + {0xd387, CRL_REG_LEN_08BIT, 0x14}, + {0xd388, CRL_REG_LEN_08BIT, 0x03}, + {0xd389, CRL_REG_LEN_08BIT, 0xff}, + {0xd38a, CRL_REG_LEN_08BIT, 0xff}, + {0xd38b, CRL_REG_LEN_08BIT, 0xce}, + {0xd38c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd38d, CRL_REG_LEN_08BIT, 0x6b}, + {0xd38e, CRL_REG_LEN_08BIT, 0x00}, + {0xd38f, CRL_REG_LEN_08BIT, 0xff}, + {0xd390, CRL_REG_LEN_08BIT, 0x03}, + {0xd391, CRL_REG_LEN_08BIT, 0xff}, + {0xd392, CRL_REG_LEN_08BIT, 0xff}, + {0xd393, CRL_REG_LEN_08BIT, 0xc6}, + {0xd394, CRL_REG_LEN_08BIT, 0x9c}, + {0xd395, CRL_REG_LEN_08BIT, 0x63}, + {0xd396, CRL_REG_LEN_08BIT, 0x00}, + {0xd397, CRL_REG_LEN_08BIT, 0xff}, + {0xd398, CRL_REG_LEN_08BIT, 0xa8}, + {0xd399, CRL_REG_LEN_08BIT, 0xe3}, + {0xd39a, CRL_REG_LEN_08BIT, 0x38}, + {0xd39b, CRL_REG_LEN_08BIT, 0x0f}, + {0xd39c, CRL_REG_LEN_08BIT, 0x8c}, + {0xd39d, CRL_REG_LEN_08BIT, 0x84}, + {0xd39e, CRL_REG_LEN_08BIT, 0x00}, + {0xd39f, CRL_REG_LEN_08BIT, 0x00}, + {0xd3a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a1, CRL_REG_LEN_08BIT, 0xa3}, + {0xd3a2, CRL_REG_LEN_08BIT, 0x38}, + {0xd3a3, CRL_REG_LEN_08BIT, 0x0e}, + {0xd3a4, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3a5, CRL_REG_LEN_08BIT, 0xc3}, + {0xd3a6, CRL_REG_LEN_08BIT, 0x6e}, + {0xd3a7, CRL_REG_LEN_08BIT, 0x42}, + {0xd3a8, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3a9, CRL_REG_LEN_08BIT, 0x07}, + {0xd3aa, CRL_REG_LEN_08BIT, 0x20}, + {0xd3ab, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ac, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3ad, CRL_REG_LEN_08BIT, 0x66}, + {0xd3ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd3af, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd3b1, CRL_REG_LEN_08BIT, 0x05}, + {0xd3b2, CRL_REG_LEN_08BIT, 0x18}, + {0xd3b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b4, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd3b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3b8, CRL_REG_LEN_08BIT, 0x85}, + {0xd3b9, CRL_REG_LEN_08BIT, 0x41}, + {0xd3ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bb, CRL_REG_LEN_08BIT, 0x04}, + {0xd3bc, CRL_REG_LEN_08BIT, 0x85}, + {0xd3bd, CRL_REG_LEN_08BIT, 0x81}, + {0xd3be, CRL_REG_LEN_08BIT, 0x00}, + {0xd3bf, CRL_REG_LEN_08BIT, 0x08}, + {0xd3c0, CRL_REG_LEN_08BIT, 0x85}, + {0xd3c1, CRL_REG_LEN_08BIT, 0xc1}, + {0xd3c2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c3, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3c4, CRL_REG_LEN_08BIT, 0x86}, + {0xd3c5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3c6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3c7, CRL_REG_LEN_08BIT, 0x10}, + {0xd3c8, CRL_REG_LEN_08BIT, 0x44}, + {0xd3c9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ca, CRL_REG_LEN_08BIT, 0x48}, + {0xd3cb, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3cd, CRL_REG_LEN_08BIT, 0x21}, + {0xd3ce, CRL_REG_LEN_08BIT, 0x00}, + {0xd3cf, CRL_REG_LEN_08BIT, 0x1c}, + {0xd3d0, CRL_REG_LEN_08BIT, 0x9c}, + {0xd3d1, CRL_REG_LEN_08BIT, 0x21}, + {0xd3d2, CRL_REG_LEN_08BIT, 0xff}, + {0xd3d3, CRL_REG_LEN_08BIT, 0xfc}, + {0xd3d4, CRL_REG_LEN_08BIT, 0xd4}, + {0xd3d5, CRL_REG_LEN_08BIT, 0x01}, + {0xd3d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd3d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3d8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3d9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3da, CRL_REG_LEN_08BIT, 0x00}, + {0xd3db, CRL_REG_LEN_08BIT, 0x01}, + {0xd3dc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3dd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3de, CRL_REG_LEN_08BIT, 0x07}, + {0xd3df, CRL_REG_LEN_08BIT, 0x80}, + {0xd3e0, CRL_REG_LEN_08BIT, 0x8c}, + {0xd3e1, CRL_REG_LEN_08BIT, 0x63}, + {0xd3e2, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e3, CRL_REG_LEN_08BIT, 0x68}, + {0xd3e4, CRL_REG_LEN_08BIT, 0xbc}, + {0xd3e5, CRL_REG_LEN_08BIT, 0x03}, + {0xd3e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3e8, CRL_REG_LEN_08BIT, 0x10}, + {0xd3e9, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd3eb, CRL_REG_LEN_08BIT, 0x0c}, + {0xd3ec, CRL_REG_LEN_08BIT, 0x15}, + {0xd3ed, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ee, CRL_REG_LEN_08BIT, 0x00}, + {0xd3ef, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f0, CRL_REG_LEN_08BIT, 0x07}, + {0xd3f1, CRL_REG_LEN_08BIT, 0xff}, + {0xd3f2, CRL_REG_LEN_08BIT, 0xd9}, + {0xd3f3, CRL_REG_LEN_08BIT, 0x98}, + {0xd3f4, CRL_REG_LEN_08BIT, 0x15}, + {0xd3f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f6, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd3f8, CRL_REG_LEN_08BIT, 0x18}, + {0xd3f9, CRL_REG_LEN_08BIT, 0x60}, + {0xd3fa, CRL_REG_LEN_08BIT, 0x80}, + {0xd3fb, CRL_REG_LEN_08BIT, 0x06}, + {0xd3fc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd3fd, CRL_REG_LEN_08BIT, 0x63}, + {0xd3fe, CRL_REG_LEN_08BIT, 0xc4}, + {0xd3ff, CRL_REG_LEN_08BIT, 0xb8}, + {0xd400, CRL_REG_LEN_08BIT, 0x8c}, + {0xd401, CRL_REG_LEN_08BIT, 0x63}, + {0xd402, CRL_REG_LEN_08BIT, 0x00}, + {0xd403, CRL_REG_LEN_08BIT, 0x00}, + {0xd404, CRL_REG_LEN_08BIT, 0xbc}, + {0xd405, CRL_REG_LEN_08BIT, 0x23}, + {0xd406, CRL_REG_LEN_08BIT, 0x00}, + {0xd407, CRL_REG_LEN_08BIT, 0x01}, + {0xd408, CRL_REG_LEN_08BIT, 0x10}, + {0xd409, CRL_REG_LEN_08BIT, 0x00}, + {0xd40a, CRL_REG_LEN_08BIT, 0x00}, + {0xd40b, CRL_REG_LEN_08BIT, 0x25}, + {0xd40c, CRL_REG_LEN_08BIT, 0x9d}, + {0xd40d, CRL_REG_LEN_08BIT, 0x00}, + {0xd40e, CRL_REG_LEN_08BIT, 0x00}, + {0xd40f, CRL_REG_LEN_08BIT, 0x00}, + {0xd410, CRL_REG_LEN_08BIT, 0x00}, + {0xd411, CRL_REG_LEN_08BIT, 0x00}, + {0xd412, CRL_REG_LEN_08BIT, 0x00}, + {0xd413, CRL_REG_LEN_08BIT, 0x0b}, + {0xd414, CRL_REG_LEN_08BIT, 0xb8}, + {0xd415, CRL_REG_LEN_08BIT, 0xe8}, + {0xd416, CRL_REG_LEN_08BIT, 0x00}, + {0xd417, CRL_REG_LEN_08BIT, 0x02}, + {0xd418, CRL_REG_LEN_08BIT, 0x07}, + {0xd419, CRL_REG_LEN_08BIT, 0xff}, + {0xd41a, CRL_REG_LEN_08BIT, 0xd6}, + {0xd41b, CRL_REG_LEN_08BIT, 0x24}, + {0xd41c, CRL_REG_LEN_08BIT, 0x15}, + {0xd41d, CRL_REG_LEN_08BIT, 0x00}, + {0xd41e, CRL_REG_LEN_08BIT, 0x00}, + {0xd41f, CRL_REG_LEN_08BIT, 0x00}, + {0xd420, CRL_REG_LEN_08BIT, 0x18}, + {0xd421, CRL_REG_LEN_08BIT, 0x60}, + {0xd422, CRL_REG_LEN_08BIT, 0x80}, + {0xd423, CRL_REG_LEN_08BIT, 0x06}, + {0xd424, CRL_REG_LEN_08BIT, 0xa8}, + {0xd425, CRL_REG_LEN_08BIT, 0x63}, + {0xd426, CRL_REG_LEN_08BIT, 0xc4}, + {0xd427, CRL_REG_LEN_08BIT, 0xb8}, + {0xd428, CRL_REG_LEN_08BIT, 0x8c}, + {0xd429, CRL_REG_LEN_08BIT, 0x63}, + {0xd42a, CRL_REG_LEN_08BIT, 0x00}, + {0xd42b, CRL_REG_LEN_08BIT, 0x00}, + {0xd42c, CRL_REG_LEN_08BIT, 0xbc}, + {0xd42d, CRL_REG_LEN_08BIT, 0x23}, + {0xd42e, CRL_REG_LEN_08BIT, 0x00}, + {0xd42f, CRL_REG_LEN_08BIT, 0x01}, + {0xd430, CRL_REG_LEN_08BIT, 0x10}, + {0xd431, CRL_REG_LEN_08BIT, 0x00}, + {0xd432, CRL_REG_LEN_08BIT, 0x00}, + {0xd433, CRL_REG_LEN_08BIT, 0x1b}, + {0xd434, CRL_REG_LEN_08BIT, 0x9d}, + {0xd435, CRL_REG_LEN_08BIT, 0x00}, + {0xd436, CRL_REG_LEN_08BIT, 0x00}, + {0xd437, CRL_REG_LEN_08BIT, 0x00}, + {0xd438, CRL_REG_LEN_08BIT, 0xb8}, + {0xd439, CRL_REG_LEN_08BIT, 0xe8}, + {0xd43a, CRL_REG_LEN_08BIT, 0x00}, + {0xd43b, CRL_REG_LEN_08BIT, 0x02}, + {0xd43c, CRL_REG_LEN_08BIT, 0x9c}, + {0xd43d, CRL_REG_LEN_08BIT, 0xc0}, + {0xd43e, CRL_REG_LEN_08BIT, 0x00}, + {0xd43f, CRL_REG_LEN_08BIT, 0x00}, + {0xd440, CRL_REG_LEN_08BIT, 0x18}, + {0xd441, CRL_REG_LEN_08BIT, 0xa0}, + {0xd442, CRL_REG_LEN_08BIT, 0x80}, + {0xd443, CRL_REG_LEN_08BIT, 0x06}, + {0xd444, CRL_REG_LEN_08BIT, 0xe0}, + {0xd445, CRL_REG_LEN_08BIT, 0x67}, + {0xd446, CRL_REG_LEN_08BIT, 0x30}, + {0xd447, CRL_REG_LEN_08BIT, 0x00}, + {0xd448, CRL_REG_LEN_08BIT, 0xa8}, + {0xd449, CRL_REG_LEN_08BIT, 0xa5}, + {0xd44a, CRL_REG_LEN_08BIT, 0xce}, + {0xd44b, CRL_REG_LEN_08BIT, 0xb0}, + {0xd44c, CRL_REG_LEN_08BIT, 0x19}, + {0xd44d, CRL_REG_LEN_08BIT, 0x60}, + {0xd44e, CRL_REG_LEN_08BIT, 0x00}, + {0xd44f, CRL_REG_LEN_08BIT, 0x01}, + {0xd450, CRL_REG_LEN_08BIT, 0xa9}, + {0xd451, CRL_REG_LEN_08BIT, 0x6b}, + {0xd452, CRL_REG_LEN_08BIT, 0x06}, + {0xd453, CRL_REG_LEN_08BIT, 0x14}, + {0xd454, CRL_REG_LEN_08BIT, 0xe0}, + {0xd455, CRL_REG_LEN_08BIT, 0x83}, + {0xd456, CRL_REG_LEN_08BIT, 0x28}, + {0xd457, CRL_REG_LEN_08BIT, 0x00}, + {0xd458, CRL_REG_LEN_08BIT, 0x9c}, + {0xd459, CRL_REG_LEN_08BIT, 0xc6}, + {0xd45a, CRL_REG_LEN_08BIT, 0x00}, + {0xd45b, CRL_REG_LEN_08BIT, 0x01}, + {0xd45c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd45d, CRL_REG_LEN_08BIT, 0x63}, + {0xd45e, CRL_REG_LEN_08BIT, 0x18}, + {0xd45f, CRL_REG_LEN_08BIT, 0x00}, + {0xd460, CRL_REG_LEN_08BIT, 0x8c}, + {0xd461, CRL_REG_LEN_08BIT, 0x84}, + {0xd462, CRL_REG_LEN_08BIT, 0x00}, + {0xd463, CRL_REG_LEN_08BIT, 0x00}, + {0xd464, CRL_REG_LEN_08BIT, 0xe0}, + {0xd465, CRL_REG_LEN_08BIT, 0xa3}, + {0xd466, CRL_REG_LEN_08BIT, 0x58}, + {0xd467, CRL_REG_LEN_08BIT, 0x00}, + {0xd468, CRL_REG_LEN_08BIT, 0xa4}, + {0xd469, CRL_REG_LEN_08BIT, 0xc6}, + {0xd46a, CRL_REG_LEN_08BIT, 0x00}, + {0xd46b, CRL_REG_LEN_08BIT, 0xff}, + {0xd46c, CRL_REG_LEN_08BIT, 0xb8}, + {0xd46d, CRL_REG_LEN_08BIT, 0x64}, + {0xd46e, CRL_REG_LEN_08BIT, 0x00}, + {0xd46f, CRL_REG_LEN_08BIT, 0x18}, + {0xd470, CRL_REG_LEN_08BIT, 0xbc}, + {0xd471, CRL_REG_LEN_08BIT, 0x46}, + {0xd472, CRL_REG_LEN_08BIT, 0x00}, + {0xd473, CRL_REG_LEN_08BIT, 0x03}, + {0xd474, CRL_REG_LEN_08BIT, 0x94}, + {0xd475, CRL_REG_LEN_08BIT, 0x85}, + {0xd476, CRL_REG_LEN_08BIT, 0x00}, + {0xd477, CRL_REG_LEN_08BIT, 0x00}, + {0xd478, CRL_REG_LEN_08BIT, 0xb8}, + {0xd479, CRL_REG_LEN_08BIT, 0x63}, + {0xd47a, CRL_REG_LEN_08BIT, 0x00}, + {0xd47b, CRL_REG_LEN_08BIT, 0x98}, + {0xd47c, CRL_REG_LEN_08BIT, 0xe0}, + {0xd47d, CRL_REG_LEN_08BIT, 0x64}, + {0xd47e, CRL_REG_LEN_08BIT, 0x18}, + {0xd47f, CRL_REG_LEN_08BIT, 0x00}, + {0xd480, CRL_REG_LEN_08BIT, 0x0f}, + {0xd481, CRL_REG_LEN_08BIT, 0xff}, + {0xd482, CRL_REG_LEN_08BIT, 0xff}, + {0xd483, CRL_REG_LEN_08BIT, 0xf0}, + {0xd484, CRL_REG_LEN_08BIT, 0xdc}, + {0xd485, CRL_REG_LEN_08BIT, 0x05}, + {0xd486, CRL_REG_LEN_08BIT, 0x18}, + {0xd487, CRL_REG_LEN_08BIT, 0x00}, + {0xd488, CRL_REG_LEN_08BIT, 0x9c}, + {0xd489, CRL_REG_LEN_08BIT, 0x68}, + {0xd48a, CRL_REG_LEN_08BIT, 0x00}, + {0xd48b, CRL_REG_LEN_08BIT, 0x01}, + {0xd48c, CRL_REG_LEN_08BIT, 0xa5}, + {0xd48d, CRL_REG_LEN_08BIT, 0x03}, + {0xd48e, CRL_REG_LEN_08BIT, 0x00}, + {0xd48f, CRL_REG_LEN_08BIT, 0xff}, + {0xd490, CRL_REG_LEN_08BIT, 0xbc}, + {0xd491, CRL_REG_LEN_08BIT, 0x48}, + {0xd492, CRL_REG_LEN_08BIT, 0x00}, + {0xd493, CRL_REG_LEN_08BIT, 0x01}, + {0xd494, CRL_REG_LEN_08BIT, 0x0f}, + {0xd495, CRL_REG_LEN_08BIT, 0xff}, + {0xd496, CRL_REG_LEN_08BIT, 0xff}, + {0xd497, CRL_REG_LEN_08BIT, 0xea}, + {0xd498, CRL_REG_LEN_08BIT, 0xb8}, + {0xd499, CRL_REG_LEN_08BIT, 0xe8}, + {0xd49a, CRL_REG_LEN_08BIT, 0x00}, + {0xd49b, CRL_REG_LEN_08BIT, 0x02}, + {0xd49c, CRL_REG_LEN_08BIT, 0x18}, + {0xd49d, CRL_REG_LEN_08BIT, 0x60}, + {0xd49e, CRL_REG_LEN_08BIT, 0x00}, + {0xd49f, CRL_REG_LEN_08BIT, 0x01}, + {0xd4a0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4a1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4a2, CRL_REG_LEN_08BIT, 0x06}, + {0xd4a3, CRL_REG_LEN_08BIT, 0x14}, + {0xd4a4, CRL_REG_LEN_08BIT, 0x07}, + {0xd4a5, CRL_REG_LEN_08BIT, 0xff}, + {0xd4a6, CRL_REG_LEN_08BIT, 0xe4}, + {0xd4a7, CRL_REG_LEN_08BIT, 0x05}, + {0xd4a8, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4a9, CRL_REG_LEN_08BIT, 0x83}, + {0xd4aa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ab, CRL_REG_LEN_08BIT, 0x10}, + {0xd4ac, CRL_REG_LEN_08BIT, 0x85}, + {0xd4ad, CRL_REG_LEN_08BIT, 0x21}, + {0xd4ae, CRL_REG_LEN_08BIT, 0x00}, + {0xd4af, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b0, CRL_REG_LEN_08BIT, 0x44}, + {0xd4b1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b2, CRL_REG_LEN_08BIT, 0x48}, + {0xd4b3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b4, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4b5, CRL_REG_LEN_08BIT, 0x21}, + {0xd4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4b7, CRL_REG_LEN_08BIT, 0x04}, + {0xd4b8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4b9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ba, CRL_REG_LEN_08BIT, 0x00}, + {0xd4bb, CRL_REG_LEN_08BIT, 0x01}, + {0xd4bc, CRL_REG_LEN_08BIT, 0x9c}, + {0xd4bd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4be, CRL_REG_LEN_08BIT, 0xff}, + {0xd4bf, CRL_REG_LEN_08BIT, 0xff}, + {0xd4c0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4c1, CRL_REG_LEN_08BIT, 0x63}, + {0xd4c2, CRL_REG_LEN_08BIT, 0x09}, + {0xd4c3, CRL_REG_LEN_08BIT, 0xef}, + {0xd4c4, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4c5, CRL_REG_LEN_08BIT, 0x03}, + {0xd4c6, CRL_REG_LEN_08BIT, 0x20}, + {0xd4c7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4c8, CRL_REG_LEN_08BIT, 0x18}, + {0xd4c9, CRL_REG_LEN_08BIT, 0x60}, + {0xd4ca, CRL_REG_LEN_08BIT, 0x80}, + {0xd4cb, CRL_REG_LEN_08BIT, 0x06}, + {0xd4cc, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4cd, CRL_REG_LEN_08BIT, 0x63}, + {0xd4ce, CRL_REG_LEN_08BIT, 0xc9}, + {0xd4cf, CRL_REG_LEN_08BIT, 0xef}, + {0xd4d0, CRL_REG_LEN_08BIT, 0xd8}, + {0xd4d1, CRL_REG_LEN_08BIT, 0x03}, + {0xd4d2, CRL_REG_LEN_08BIT, 0x20}, + {0xd4d3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4d5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4d7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4d8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4d9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4da, CRL_REG_LEN_08BIT, 0x00}, + {0xd4db, CRL_REG_LEN_08BIT, 0x00}, + {0xd4dc, CRL_REG_LEN_08BIT, 0x18}, + {0xd4dd, CRL_REG_LEN_08BIT, 0x80}, + {0xd4de, CRL_REG_LEN_08BIT, 0x00}, + {0xd4df, CRL_REG_LEN_08BIT, 0x01}, + {0xd4e0, CRL_REG_LEN_08BIT, 0xa8}, + {0xd4e1, CRL_REG_LEN_08BIT, 0x84}, + {0xd4e2, CRL_REG_LEN_08BIT, 0x0a}, + {0xd4e3, CRL_REG_LEN_08BIT, 0x12}, + {0xd4e4, CRL_REG_LEN_08BIT, 0x8c}, + {0xd4e5, CRL_REG_LEN_08BIT, 0x64}, + {0xd4e6, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4e8, CRL_REG_LEN_08BIT, 0xbc}, + {0xd4e9, CRL_REG_LEN_08BIT, 0x03}, + {0xd4ea, CRL_REG_LEN_08BIT, 0x00}, + {0xd4eb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ec, CRL_REG_LEN_08BIT, 0x13}, + {0xd4ed, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ee, CRL_REG_LEN_08BIT, 0xff}, + {0xd4ef, CRL_REG_LEN_08BIT, 0xfe}, + {0xd4f0, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f1, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f2, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f3, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f4, CRL_REG_LEN_08BIT, 0x44}, + {0xd4f5, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f6, CRL_REG_LEN_08BIT, 0x48}, + {0xd4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xd4f8, CRL_REG_LEN_08BIT, 0x15}, + {0xd4f9, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fc, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xd4fe, CRL_REG_LEN_08BIT, 0x00}, + {0xd4ff, CRL_REG_LEN_08BIT, 0x00}, + {0xd500, CRL_REG_LEN_08BIT, 0x00}, + {0xd501, CRL_REG_LEN_08BIT, 0x00}, + {0xd502, CRL_REG_LEN_08BIT, 0x00}, + {0xd503, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0e, CRL_REG_LEN_08BIT, 0x33}, + {0x6f0f, CRL_REG_LEN_08BIT, 0x33}, + {0x460e, CRL_REG_LEN_08BIT, 0x08}, + {0x460f, CRL_REG_LEN_08BIT, 0x01}, + {0x4610, CRL_REG_LEN_08BIT, 0x00}, + {0x4611, CRL_REG_LEN_08BIT, 0x01}, + {0x4612, CRL_REG_LEN_08BIT, 0x00}, + {0x4613, CRL_REG_LEN_08BIT, 0x01}, + {0x4605, CRL_REG_LEN_08BIT, 0x08},/*YUV 8bit*/ + {0x4608, CRL_REG_LEN_08BIT, 0x00}, + {0x4609, CRL_REG_LEN_08BIT, 0x08}, + {0x6804, CRL_REG_LEN_08BIT, 0x00}, + {0x6805, CRL_REG_LEN_08BIT, 0x06}, + {0x6806, CRL_REG_LEN_08BIT, 0x00}, + {0x5120, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3504, CRL_REG_LEN_08BIT, 0x00}, + {0x6800, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0d, CRL_REG_LEN_08BIT, 0x0f}, + {0x5000, CRL_REG_LEN_08BIT, 0xff}, + {0x5001, CRL_REG_LEN_08BIT, 0xbf}, + {0x5002, CRL_REG_LEN_08BIT, 0x7e}, + {0x5003, CRL_REG_LEN_08BIT, 0x0c}, + {0x503d, CRL_REG_LEN_08BIT, 0x00}, + {0xc450, CRL_REG_LEN_08BIT, 0x01}, + {0xc452, CRL_REG_LEN_08BIT, 0x04}, + {0xc453, CRL_REG_LEN_08BIT, 0x00}, + {0xc454, CRL_REG_LEN_08BIT, 0x01}, + {0xc455, CRL_REG_LEN_08BIT, 0x00}, + {0xc456, CRL_REG_LEN_08BIT, 0x00}, + {0xc457, CRL_REG_LEN_08BIT, 0x00}, + {0xc458, CRL_REG_LEN_08BIT, 0x00}, + {0xc459, CRL_REG_LEN_08BIT, 0x00}, + {0xc45b, CRL_REG_LEN_08BIT, 0x00}, + {0xc45c, CRL_REG_LEN_08BIT, 0x00}, + {0xc45d, CRL_REG_LEN_08BIT, 0x00}, + {0xc45e, CRL_REG_LEN_08BIT, 0x02}, + {0xc45f, CRL_REG_LEN_08BIT, 0x01}, + {0xc460, CRL_REG_LEN_08BIT, 0x01}, + {0xc461, CRL_REG_LEN_08BIT, 0x01}, + {0xc462, CRL_REG_LEN_08BIT, 0x01}, + {0xc464, CRL_REG_LEN_08BIT, 0x88}, + {0xc465, CRL_REG_LEN_08BIT, 0x00}, + {0xc466, CRL_REG_LEN_08BIT, 0x8a}, + {0xc467, CRL_REG_LEN_08BIT, 0x00}, + {0xc468, CRL_REG_LEN_08BIT, 0x86}, + {0xc469, CRL_REG_LEN_08BIT, 0x00}, + {0xc46a, CRL_REG_LEN_08BIT, 0x40}, + {0xc46b, CRL_REG_LEN_08BIT, 0x50}, + {0xc46c, CRL_REG_LEN_08BIT, 0x30}, + {0xc46d, CRL_REG_LEN_08BIT, 0x28}, + {0xc46e, CRL_REG_LEN_08BIT, 0x60}, + {0xc46f, CRL_REG_LEN_08BIT, 0x40}, + {0xc47c, CRL_REG_LEN_08BIT, 0x01}, + {0xc47d, CRL_REG_LEN_08BIT, 0x38}, + {0xc47e, CRL_REG_LEN_08BIT, 0x00}, + {0xc47f, CRL_REG_LEN_08BIT, 0x00}, + {0xc480, CRL_REG_LEN_08BIT, 0x00}, + {0xc481, CRL_REG_LEN_08BIT, 0xff}, + {0xc482, CRL_REG_LEN_08BIT, 0x00}, + {0xc483, CRL_REG_LEN_08BIT, 0x40}, + {0xc484, CRL_REG_LEN_08BIT, 0x00}, + {0xc485, CRL_REG_LEN_08BIT, 0x18}, + {0xc486, CRL_REG_LEN_08BIT, 0x00}, + {0xc487, CRL_REG_LEN_08BIT, 0x18}, + {0xc488, CRL_REG_LEN_08BIT, 0x20}, + {0xc489, CRL_REG_LEN_08BIT, 0x00}, + {0xc48a, CRL_REG_LEN_08BIT, 0x20}, + {0xc48b, CRL_REG_LEN_08BIT, 0x00}, + {0xc48c, CRL_REG_LEN_08BIT, 0x00}, + {0xc48d, CRL_REG_LEN_08BIT, 0x04}, + {0xc48e, CRL_REG_LEN_08BIT, 0x00}, + {0xc48f, CRL_REG_LEN_08BIT, 0x04}, + {0xc490, CRL_REG_LEN_08BIT, 0x07}, + {0xc492, CRL_REG_LEN_08BIT, 0x20}, + {0xc493, CRL_REG_LEN_08BIT, 0x08}, + {0xc498, CRL_REG_LEN_08BIT, 0x02}, + {0xc499, CRL_REG_LEN_08BIT, 0x00}, + {0xc49a, CRL_REG_LEN_08BIT, 0x02}, + {0xc49b, CRL_REG_LEN_08BIT, 0x00}, + {0xc49c, CRL_REG_LEN_08BIT, 0x02}, + {0xc49d, CRL_REG_LEN_08BIT, 0x00}, + {0xc49e, CRL_REG_LEN_08BIT, 0x02}, + {0xc49f, CRL_REG_LEN_08BIT, 0x60}, + {0xc4a0, CRL_REG_LEN_08BIT, 0x03}, + {0xc4a1, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a2, CRL_REG_LEN_08BIT, 0x04}, + {0xc4a3, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a5, CRL_REG_LEN_08BIT, 0x10}, + {0xc4a6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4a8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4a9, CRL_REG_LEN_08BIT, 0x80}, + {0xc4aa, CRL_REG_LEN_08BIT, 0x0d}, + {0xc4ab, CRL_REG_LEN_08BIT, 0x00}, + {0xc4ac, CRL_REG_LEN_08BIT, 0x03}, + {0xc4ad, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4b4, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b5, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b6, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b7, CRL_REG_LEN_08BIT, 0x01}, + {0xc4b8, CRL_REG_LEN_08BIT, 0x00}, + {0xc4b9, CRL_REG_LEN_08BIT, 0x01}, + {0xc4ba, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0xc4be, CRL_REG_LEN_08BIT, 0x02}, + {0xc4bf, CRL_REG_LEN_08BIT, 0x33}, + {0xc4c8, CRL_REG_LEN_08BIT, 0x03}, + {0xc4c9, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4ca, CRL_REG_LEN_08BIT, 0x0e}, + {0xc4cb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cd, CRL_REG_LEN_08BIT, 0xd8}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cf, CRL_REG_LEN_08BIT, 0xd8}, + {0xc4d0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4d1, CRL_REG_LEN_08BIT, 0x80}, + {0xc4e0, CRL_REG_LEN_08BIT, 0x04}, + {0xc4e1, CRL_REG_LEN_08BIT, 0x02}, + {0xc4e2, CRL_REG_LEN_08BIT, 0x01}, + {0xc4e4, CRL_REG_LEN_08BIT, 0x10}, + {0xc4e5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4e6, CRL_REG_LEN_08BIT, 0x30}, + {0xc4e7, CRL_REG_LEN_08BIT, 0x40}, + {0xc4e8, CRL_REG_LEN_08BIT, 0x50}, + {0xc4e9, CRL_REG_LEN_08BIT, 0x60}, + {0xc4ea, CRL_REG_LEN_08BIT, 0x70}, + {0xc4eb, CRL_REG_LEN_08BIT, 0x80}, + {0xc4ec, CRL_REG_LEN_08BIT, 0x90}, + {0xc4ed, CRL_REG_LEN_08BIT, 0xa0}, + {0xc4ee, CRL_REG_LEN_08BIT, 0xb0}, + {0xc4ef, CRL_REG_LEN_08BIT, 0xc0}, + {0xc4f0, CRL_REG_LEN_08BIT, 0xd0}, + {0xc4f1, CRL_REG_LEN_08BIT, 0xe0}, + {0xc4f2, CRL_REG_LEN_08BIT, 0xf0}, + {0xc4f3, CRL_REG_LEN_08BIT, 0x80}, + {0xc4f4, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f5, CRL_REG_LEN_08BIT, 0x20}, + {0xc4f6, CRL_REG_LEN_08BIT, 0x02}, + {0xc4f7, CRL_REG_LEN_08BIT, 0x00}, + {0xc4f8, CRL_REG_LEN_08BIT, 0x04}, + {0xc4f9, CRL_REG_LEN_08BIT, 0x0b}, + {0xc4fa, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fb, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fc, CRL_REG_LEN_08BIT, 0x01}, + {0xc4fd, CRL_REG_LEN_08BIT, 0x00}, + {0xc4fe, CRL_REG_LEN_08BIT, 0x04}, + {0xc4ff, CRL_REG_LEN_08BIT, 0x02}, + {0xc500, CRL_REG_LEN_08BIT, 0x48}, + {0xc501, CRL_REG_LEN_08BIT, 0x74}, + {0xc502, CRL_REG_LEN_08BIT, 0x58}, + {0xc503, CRL_REG_LEN_08BIT, 0x80}, + {0xc504, CRL_REG_LEN_08BIT, 0x05}, + {0xc505, CRL_REG_LEN_08BIT, 0x80}, + {0xc506, CRL_REG_LEN_08BIT, 0x03}, + {0xc507, CRL_REG_LEN_08BIT, 0x80}, + {0xc508, CRL_REG_LEN_08BIT, 0x01}, + {0xc509, CRL_REG_LEN_08BIT, 0xc0}, + {0xc50a, CRL_REG_LEN_08BIT, 0x01}, + {0xc50b, CRL_REG_LEN_08BIT, 0xa0}, + {0xc50c, CRL_REG_LEN_08BIT, 0x01}, + {0xc50d, CRL_REG_LEN_08BIT, 0x2c}, + {0xc50e, CRL_REG_LEN_08BIT, 0x01}, + {0xc50f, CRL_REG_LEN_08BIT, 0x0a}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0x4d}, + {0xc513, CRL_REG_LEN_08BIT, 0x84}, + {0xc514, CRL_REG_LEN_08BIT, 0x04}, + {0xc515, CRL_REG_LEN_08BIT, 0x00}, + {0xc518, CRL_REG_LEN_08BIT, 0x03}, + {0xc519, CRL_REG_LEN_08BIT, 0x48}, + {0xc51a, CRL_REG_LEN_08BIT, 0x07}, + {0xc51b, CRL_REG_LEN_08BIT, 0x70}, + {0xc2e0, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e1, CRL_REG_LEN_08BIT, 0x51}, + {0xc2e2, CRL_REG_LEN_08BIT, 0x00}, + {0xc2e3, CRL_REG_LEN_08BIT, 0xd6}, + {0xc2e4, CRL_REG_LEN_08BIT, 0x01}, + {0xc2e5, CRL_REG_LEN_08BIT, 0x5e}, + {0xc2e9, CRL_REG_LEN_08BIT, 0x01}, + {0xc2ea, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2eb, CRL_REG_LEN_08BIT, 0x90}, + {0xc2ed, CRL_REG_LEN_08BIT, 0x00}, + {0xc2ee, CRL_REG_LEN_08BIT, 0x7a}, + {0xc2ef, CRL_REG_LEN_08BIT, 0x64}, + {0xc308, CRL_REG_LEN_08BIT, 0x00}, + {0xc309, CRL_REG_LEN_08BIT, 0x00}, + {0xc30a, CRL_REG_LEN_08BIT, 0x00}, + {0xc30c, CRL_REG_LEN_08BIT, 0x00}, + {0xc30d, CRL_REG_LEN_08BIT, 0x01}, + {0xc30e, CRL_REG_LEN_08BIT, 0x00}, + {0xc30f, CRL_REG_LEN_08BIT, 0x00}, + {0xc310, CRL_REG_LEN_08BIT, 0x01}, + {0xc311, CRL_REG_LEN_08BIT, 0x60}, + {0xc312, CRL_REG_LEN_08BIT, 0xff}, + {0xc313, CRL_REG_LEN_08BIT, 0x08}, + {0xc314, CRL_REG_LEN_08BIT, 0x01}, + {0xc315, CRL_REG_LEN_08BIT, 0x7f}, + {0xc316, CRL_REG_LEN_08BIT, 0xff}, + {0xc317, CRL_REG_LEN_08BIT, 0x0b}, + {0xc318, CRL_REG_LEN_08BIT, 0x00}, + {0xc319, CRL_REG_LEN_08BIT, 0x0c}, + {0xc31a, CRL_REG_LEN_08BIT, 0x00}, + {0xc31b, CRL_REG_LEN_08BIT, 0xe0}, + {0xc31c, CRL_REG_LEN_08BIT, 0x00}, + {0xc31d, CRL_REG_LEN_08BIT, 0x14}, + {0xc31e, CRL_REG_LEN_08BIT, 0x00}, + {0xc31f, CRL_REG_LEN_08BIT, 0xc5}, + {0xc320, CRL_REG_LEN_08BIT, 0xff}, + {0xc321, CRL_REG_LEN_08BIT, 0x4b}, + {0xc322, CRL_REG_LEN_08BIT, 0xff}, + {0xc323, CRL_REG_LEN_08BIT, 0xf0}, + {0xc324, CRL_REG_LEN_08BIT, 0xff}, + {0xc325, CRL_REG_LEN_08BIT, 0xe8}, + {0xc326, CRL_REG_LEN_08BIT, 0x00}, + {0xc327, CRL_REG_LEN_08BIT, 0x46}, + {0xc328, CRL_REG_LEN_08BIT, 0xff}, + {0xc329, CRL_REG_LEN_08BIT, 0xd2}, + {0xc32a, CRL_REG_LEN_08BIT, 0xff}, + {0xc32b, CRL_REG_LEN_08BIT, 0xe4}, + {0xc32c, CRL_REG_LEN_08BIT, 0xff}, + {0xc32d, CRL_REG_LEN_08BIT, 0xbb}, + {0xc32e, CRL_REG_LEN_08BIT, 0x00}, + {0xc32f, CRL_REG_LEN_08BIT, 0x61}, + {0xc330, CRL_REG_LEN_08BIT, 0xff}, + {0xc331, CRL_REG_LEN_08BIT, 0xf9}, + {0xc332, CRL_REG_LEN_08BIT, 0x00}, + {0xc333, CRL_REG_LEN_08BIT, 0xd9}, + {0xc334, CRL_REG_LEN_08BIT, 0x00}, + {0xc335, CRL_REG_LEN_08BIT, 0x2e}, + {0xc336, CRL_REG_LEN_08BIT, 0x00}, + {0xc337, CRL_REG_LEN_08BIT, 0xb1}, + {0xc338, CRL_REG_LEN_08BIT, 0xff}, + {0xc339, CRL_REG_LEN_08BIT, 0x64}, + {0xc33a, CRL_REG_LEN_08BIT, 0xff}, + {0xc33b, CRL_REG_LEN_08BIT, 0xeb}, + {0xc33c, CRL_REG_LEN_08BIT, 0xff}, + {0xc33d, CRL_REG_LEN_08BIT, 0xe8}, + {0xc33e, CRL_REG_LEN_08BIT, 0x00}, + {0xc33f, CRL_REG_LEN_08BIT, 0x48}, + {0xc340, CRL_REG_LEN_08BIT, 0xff}, + {0xc341, CRL_REG_LEN_08BIT, 0xd0}, + {0xc342, CRL_REG_LEN_08BIT, 0xff}, + {0xc343, CRL_REG_LEN_08BIT, 0xed}, + {0xc344, CRL_REG_LEN_08BIT, 0xff}, + {0xc345, CRL_REG_LEN_08BIT, 0xad}, + {0xc346, CRL_REG_LEN_08BIT, 0x00}, + {0xc347, CRL_REG_LEN_08BIT, 0x66}, + {0xc348, CRL_REG_LEN_08BIT, 0x01}, + {0xc349, CRL_REG_LEN_08BIT, 0x00}, + {0x6700, CRL_REG_LEN_08BIT, 0x04}, + {0x6701, CRL_REG_LEN_08BIT, 0x7b}, + {0x6702, CRL_REG_LEN_08BIT, 0xfd}, + {0x6703, CRL_REG_LEN_08BIT, 0xf9}, + {0x6704, CRL_REG_LEN_08BIT, 0x3d}, + {0x6705, CRL_REG_LEN_08BIT, 0x71}, + {0x6706, CRL_REG_LEN_08BIT, 0x78}, + {0x6708, CRL_REG_LEN_08BIT, 0x05}, + {0x6f06, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f07, CRL_REG_LEN_08BIT, 0x00}, + {0x6f0a, CRL_REG_LEN_08BIT, 0x6f}, + {0x6f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x6f00, CRL_REG_LEN_08BIT, 0x03}, + {0xc34c, CRL_REG_LEN_08BIT, 0x01}, + {0xc34d, CRL_REG_LEN_08BIT, 0x00}, + {0xc34e, CRL_REG_LEN_08BIT, 0x46}, + {0xc34f, CRL_REG_LEN_08BIT, 0x55}, + {0xc350, CRL_REG_LEN_08BIT, 0x00}, + {0xc351, CRL_REG_LEN_08BIT, 0x40}, + {0xc352, CRL_REG_LEN_08BIT, 0x00}, + {0xc353, CRL_REG_LEN_08BIT, 0xff}, + {0xc354, CRL_REG_LEN_08BIT, 0x04}, + {0xc355, CRL_REG_LEN_08BIT, 0x08}, + {0xc356, CRL_REG_LEN_08BIT, 0x01}, + {0xc357, CRL_REG_LEN_08BIT, 0xef}, + {0xc358, CRL_REG_LEN_08BIT, 0x30}, + {0xc359, CRL_REG_LEN_08BIT, 0x01}, + {0xc35a, CRL_REG_LEN_08BIT, 0x64}, + {0xc35b, CRL_REG_LEN_08BIT, 0x46}, + {0xc35c, CRL_REG_LEN_08BIT, 0x00}, + {0x3621, CRL_REG_LEN_08BIT, 0x73}, + {0x3702, CRL_REG_LEN_08BIT, 0x20}, + {0x3703, CRL_REG_LEN_08BIT, 0x48}, + {0x3704, CRL_REG_LEN_08BIT, 0x32}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0xA4}, + {0x3804, CRL_REG_LEN_08BIT, 0x00}, + {0x3805, CRL_REG_LEN_08BIT, 0xFF}, + {0x3806, CRL_REG_LEN_08BIT, 0x02}, + {0x3807, CRL_REG_LEN_08BIT, 0x89}, + {0x3808, CRL_REG_LEN_08BIT, 0x02}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x01}, + {0x380b, CRL_REG_LEN_08BIT, 0xE0}, + {0x380c, CRL_REG_LEN_08BIT, 0x04}, + {0x380d, CRL_REG_LEN_08BIT, 0xAC}, + {0x6e42, CRL_REG_LEN_08BIT, 0x05}, + {0x6e43, CRL_REG_LEN_08BIT, 0x3A}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x08}, + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x02}, + {0x381c, CRL_REG_LEN_08BIT, 0x00}, + {0x381e, CRL_REG_LEN_08BIT, 0x00}, + {0x381f, CRL_REG_LEN_08BIT, 0x0C}, + {0x4001, CRL_REG_LEN_08BIT, 0x06}, + {0x4004, CRL_REG_LEN_08BIT, 0x04}, + {0x4050, CRL_REG_LEN_08BIT, 0x22}, + {0x4051, CRL_REG_LEN_08BIT, 0x24}, + {0x4605, CRL_REG_LEN_08BIT, 0x08}, + {0x4606, CRL_REG_LEN_08BIT, 0x09}, + {0x4607, CRL_REG_LEN_08BIT, 0x58}, + {0xc488, CRL_REG_LEN_08BIT, 0x53}, + {0xc489, CRL_REG_LEN_08BIT, 0x20}, + {0xc48a, CRL_REG_LEN_08BIT, 0x53}, + {0xc48b, CRL_REG_LEN_08BIT, 0x20}, + {0xc4cc, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cd, CRL_REG_LEN_08BIT, 0xD8}, + {0xc4ce, CRL_REG_LEN_08BIT, 0x04}, + {0xc4cf, CRL_REG_LEN_08BIT, 0xD8}, + {0xc510, CRL_REG_LEN_08BIT, 0x00}, + {0xc511, CRL_REG_LEN_08BIT, 0x00}, + {0xc512, CRL_REG_LEN_08BIT, 0x4D}, + {0xc513, CRL_REG_LEN_08BIT, 0x84}, + {0x5005, CRL_REG_LEN_08BIT, 0x08}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, + {0xc518, CRL_REG_LEN_08BIT, 0x05}, + {0xc519, CRL_REG_LEN_08BIT, 0x3A}, + {0xc51a, CRL_REG_LEN_08BIT, 0x04}, + {0xc51b, CRL_REG_LEN_08BIT, 0xAC}, + {0x5608, CRL_REG_LEN_08BIT, 0x15}, + {0x3815, CRL_REG_LEN_08BIT, 0x8C}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x3042, CRL_REG_LEN_08BIT, 0xf0}, + {0x302e, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0xf0}, + {0x301c, CRL_REG_LEN_08BIT, 0xf0}, + {0x301a, CRL_REG_LEN_08BIT, 0xf0}, + {0xceb0, CRL_REG_LEN_08BIT, 0x00}, + {0xceb1, CRL_REG_LEN_08BIT, 0x00}, + {0xceb2, CRL_REG_LEN_08BIT, 0x00}, + {0xceb3, CRL_REG_LEN_08BIT, 0x00}, + {0xceb4, CRL_REG_LEN_08BIT, 0x00}, + {0xceb5, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb6, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xceb7, CRL_REG_LEN_08BIT, 0x00}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bc, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, + {0xc4bd, CRL_REG_LEN_08BIT, 0x60}, + {0x0000, CRL_REG_LEN_DELAY, 0x0c}, +}; + +static struct crl_dynamic_register_access ov10635_h_flip_regs[] = { + { + .address = 0x381d, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x3, + } +}; + +static struct crl_dynamic_register_access ov10635_v_flip_regs[] = { + { + .address = 0x381c, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0xc0, + } +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config ov10635_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + } +}; + +static struct crl_pll_configuration ov10635_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 16, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 20, + .pixel_rate_csi = 529000000, + .pixel_rate_pa = 529000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + } +}; + +static struct crl_subdev_rect_rep ov10635_1280_800_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, +}; + +static struct crl_subdev_rect_rep ov10635_1280_720_rects_BT656[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 720, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep ov10635_640_480_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_register_write_rep ov10635_powerup_regs[] = { + {OV10635_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, + {0x300c, CRL_REG_LEN_08BIT, 0x61}, +}; + +static struct crl_register_write_rep ov10635_poweroff_regs[] = { + {OV10635_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_power_seq_entity ov10635_power_items[] = { + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 0, + .undo_val = 1, + }, +}; + +static struct crl_mode_rep ov10635_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov10635_1280_800_rects), + .sd_rects = ov10635_1280_800_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 800, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_1280_800_YUV_HDR), + .mode_regs = ov10635_1280_800_YUV_HDR, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10635_1280_720_rects_BT656), + .sd_rects = ov10635_1280_720_rects_BT656, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 720, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_1280_720_YUV_HDR_BT656), + .mode_regs = ov10635_1280_720_YUV_HDR_BT656, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10635_640_480_rects), + .sd_rects = ov10635_640_480_rects, + .binn_hor = 2, + .binn_vert = 1, + .scale_m = 1, + .width = 640, + .height = 480, + .min_llp = 2250, + .min_fll = 1320, + .mode_regs_items = ARRAY_SIZE(ov10635_640_480_YUV_HDR), + .mode_regs = ov10635_640_480_YUV_HDR, + }, +}; + +static struct crl_sensor_subdev_config ov10635_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov10635 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov10635 pixel array", + } +}; + +static struct crl_sensor_limits ov10635_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 800, + .min_frame_length_lines = 240, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 320, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov10635_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + } +}; + +static struct crl_register_write_rep ov10635_yuyv_regs[] = { + {0x4300, CRL_REG_LEN_08BIT, 0x38}, +}; + +static struct crl_register_write_rep ov10635_uyvy_regs[] = { + {0x4300, CRL_REG_LEN_08BIT, 0x3a}, +}; + +static struct crl_csi_data_fmt ov10635_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(ov10635_yuyv_regs), + .regs = ov10635_yuyv_regs, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_IGNORE, + .bits_per_pixel = 16, + .regs_items = ARRAY_SIZE(ov10635_uyvy_regs), + .regs = ov10635_uyvy_regs, + }, +}; + +static struct crl_v4l2_ctrl ov10635_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10635_h_flip_regs), + .regs = ov10635_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10635_v_flip_regs), + .regs = ov10635_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +struct crl_sensor_configuration ov10635_crl_configuration = { + + .powerup_regs_items = ARRAY_SIZE(ov10635_powerup_regs), + .powerup_regs = ov10635_powerup_regs, + + .poweroff_regs_items = ARRAY_SIZE(ov10635_poweroff_regs), + .poweroff_regs = ov10635_poweroff_regs, + + .power_items = ARRAY_SIZE(ov10635_power_items), + .power_entities = ov10635_power_items, + + .id_reg_items = ARRAY_SIZE(ov10635_sensor_detect_regset), + .id_regs = ov10635_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov10635_sensor_subdevs), + .subdevs = ov10635_sensor_subdevs, + + .pll_config_items = ARRAY_SIZE(ov10635_pll_configurations), + .pll_configs = ov10635_pll_configurations, + + .sensor_limits = &ov10635_sensor_limits, + + .modes_items = ARRAY_SIZE(ov10635_modes), + .modes = ov10635_modes, + + .streamon_regs_items = 0, + .streamon_regs = 0, + + .streamoff_regs_items = 0, + .streamoff_regs = 0, + + .v4l2_ctrls_items = ARRAY_SIZE(ov10635_v4l2_ctrls), + .v4l2_ctrl_bank = ov10635_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov10635_crl_csi_data_fmt), + .csi_fmts = ov10635_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov10635_flip_configurations), + .flip_data = ov10635_flip_configurations, +}; + +#endif /* __CRLMODULE_OV10635_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov10640_configuration.h b/drivers/media/i2c/crlmodule/crl_ov10640_configuration.h new file mode 100644 index 000000000000..ab8378bc0988 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov10640_configuration.h @@ -0,0 +1,3235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Shuguang Gong + * + */ + +#ifndef __CRLMODULE_OV10640_CONFIGURATION_H_ +#define __CRLMODULE_OV10640_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +#define OV10640_REG_STREAM 0x3012 +#define OV10640_REG_RESET 0x3013 + +#define OV10640_HMAX 65535 +#define OV10640_VMAX 65535 +#define OV10640_MAX_SHS1 (OV10640_VMAX - 6) +#define OV10640_MAX_SHS3 0x7F +#define OV10640_MAX_DGAIN 0x3FFF + +/* 800Mbps for ov10640 1280x1080 30fps */ +static struct crl_register_write_rep ov10640_pll_800mbps[] = { + {0x3000, CRL_REG_LEN_08BIT, 0x03}, + {0x3001, CRL_REG_LEN_08BIT, 0x48}, + {0x3002, CRL_REG_LEN_08BIT, 0x07}, + {0x3004, CRL_REG_LEN_08BIT, 0x03}, + {0x3005, CRL_REG_LEN_08BIT, 0x48}, + {0x3006, CRL_REG_LEN_08BIT, 0x07}, + {0x3007, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep ov10640_powerup_standby[] = { + {OV10640_REG_RESET, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_power_seq_entity ov10640_power_items[] = { + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + }, +}; + +static struct crl_register_write_rep ov10640_1280_1080_LONG_RAW[] = { + {0x328a, CRL_REG_LEN_08BIT, 0x11}, + {0x313f, CRL_REG_LEN_08BIT, 0x80}, + {0x3132, CRL_REG_LEN_08BIT, 0x24}, + {0x3014, CRL_REG_LEN_08BIT, 0x03}, + {0x3023, CRL_REG_LEN_08BIT, 0x05}, + {0x3032, CRL_REG_LEN_08BIT, 0x35}, + {0x3033, CRL_REG_LEN_08BIT, 0x04}, + {0x3054, CRL_REG_LEN_08BIT, 0x00}, + {0x3055, CRL_REG_LEN_08BIT, 0x08}, + {0x3056, CRL_REG_LEN_08BIT, 0x01}, + {0x3057, CRL_REG_LEN_08BIT, 0xff}, + {0x3058, CRL_REG_LEN_08BIT, 0xaf}, + {0x3059, CRL_REG_LEN_08BIT, 0x44}, + {0x305a, CRL_REG_LEN_08BIT, 0x02}, + {0x305b, CRL_REG_LEN_08BIT, 0x00}, + {0x305c, CRL_REG_LEN_08BIT, 0x30}, + {0x305d, CRL_REG_LEN_08BIT, 0x9e}, + {0x305e, CRL_REG_LEN_08BIT, 0x19}, + {0x305f, CRL_REG_LEN_08BIT, 0x18}, + {0x3060, CRL_REG_LEN_08BIT, 0xf9}, + {0x3061, CRL_REG_LEN_08BIT, 0xf0}, + {0x308c, CRL_REG_LEN_08BIT, 0xB3}, + {0x308f, CRL_REG_LEN_08BIT, 0x10}, + {0x3091, CRL_REG_LEN_08BIT, 0x00}, + {0x3093, CRL_REG_LEN_08BIT, 0x01}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30ad, CRL_REG_LEN_08BIT, 0x03}, + {0x30ae, CRL_REG_LEN_08BIT, 0x80}, + {0x30af, CRL_REG_LEN_08BIT, 0x80}, + {0x30b0, CRL_REG_LEN_08BIT, 0xff}, + {0x30b1, CRL_REG_LEN_08BIT, 0x3f}, + {0x30b2, CRL_REG_LEN_08BIT, 0x22}, + {0x30b9, CRL_REG_LEN_08BIT, 0x22}, + {0x30bb, CRL_REG_LEN_08BIT, 0x00}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x00}, + {0x30be, CRL_REG_LEN_08BIT, 0x00}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x00}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x00}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x80}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c6, CRL_REG_LEN_08BIT, 0x80}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x80}, + {0x3119, CRL_REG_LEN_08BIT, 0x45}, + {0x311a, CRL_REG_LEN_08BIT, 0x01}, + {0x311b, CRL_REG_LEN_08BIT, 0x4a}, + {0x3074, CRL_REG_LEN_08BIT, 0x00}, + {0x3075, CRL_REG_LEN_08BIT, 0x00}, + {0x3076, CRL_REG_LEN_08BIT, 0x00}, + {0x3077, CRL_REG_LEN_08BIT, 0x02}, + {0x3078, CRL_REG_LEN_08BIT, 0x05}, + {0x3079, CRL_REG_LEN_08BIT, 0x07}, + {0x307a, CRL_REG_LEN_08BIT, 0x04}, + {0x307b, CRL_REG_LEN_08BIT, 0x41}, + {0x307c, CRL_REG_LEN_08BIT, 0x05}, + {0x307d, CRL_REG_LEN_08BIT, 0x00}, + {0x307e, CRL_REG_LEN_08BIT, 0x04}, + {0x307f, CRL_REG_LEN_08BIT, 0x38}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3085, CRL_REG_LEN_08BIT, 0x04}, + {0x3086, CRL_REG_LEN_08BIT, 0x00}, + {0x3087, CRL_REG_LEN_08BIT, 0x04}, + {0x3088, CRL_REG_LEN_08BIT, 0x00}, + {0x3089, CRL_REG_LEN_08BIT, 0x40}, + {0x308d, CRL_REG_LEN_08BIT, 0x92}, + {0x3094, CRL_REG_LEN_08BIT, 0xa5}, + {0x30fa, CRL_REG_LEN_08BIT, 0x06}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x01}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3127, CRL_REG_LEN_08BIT, 0x63}, + {0x3128, CRL_REG_LEN_08BIT, 0xc0}, + {0x3129, CRL_REG_LEN_08BIT, 0x00}, + {0x31be, CRL_REG_LEN_08BIT, 0x01}, + {0x30a5, CRL_REG_LEN_08BIT, 0x78}, + {0x30a6, CRL_REG_LEN_08BIT, 0x40}, + {0x30a7, CRL_REG_LEN_08BIT, 0x78}, + {0x30a8, CRL_REG_LEN_08BIT, 0x80}, + {0x30a9, CRL_REG_LEN_08BIT, 0x78}, + {0x30aa, CRL_REG_LEN_08BIT, 0xe0}, + {0x30ab, CRL_REG_LEN_08BIT, 0xf9}, + {0x30ac, CRL_REG_LEN_08BIT, 0xc0}, + {0x3440, CRL_REG_LEN_08BIT, 0x04}, + {0x3444, CRL_REG_LEN_08BIT, 0x28}, + {0x344e, CRL_REG_LEN_08BIT, 0x2c}, + {0x3457, CRL_REG_LEN_08BIT, 0x33}, + {0x345e, CRL_REG_LEN_08BIT, 0x38}, + {0x3461, CRL_REG_LEN_08BIT, 0xa8}, + {0x7002, CRL_REG_LEN_08BIT, 0xaa}, + {0x7001, CRL_REG_LEN_08BIT, 0xdf}, + {0x7048, CRL_REG_LEN_08BIT, 0x00}, + {0x7049, CRL_REG_LEN_08BIT, 0x02}, + {0x704a, CRL_REG_LEN_08BIT, 0x02}, + {0x704b, CRL_REG_LEN_08BIT, 0x00}, + {0x704c, CRL_REG_LEN_08BIT, 0x01}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x7043, CRL_REG_LEN_08BIT, 0x04}, + {0x7040, CRL_REG_LEN_08BIT, 0x3c}, + {0x7047, CRL_REG_LEN_08BIT, 0x00}, + {0x7044, CRL_REG_LEN_08BIT, 0x01}, + {0x7000, CRL_REG_LEN_08BIT, 0x1f}, + {0x7084, CRL_REG_LEN_08BIT, 0x01}, + {0x7085, CRL_REG_LEN_08BIT, 0x03}, + {0x7086, CRL_REG_LEN_08BIT, 0x02}, + {0x7087, CRL_REG_LEN_08BIT, 0x40}, + {0x7088, CRL_REG_LEN_08BIT, 0x01}, + {0x7089, CRL_REG_LEN_08BIT, 0x20}, + {0x707f, CRL_REG_LEN_08BIT, 0x04}, + {0x707c, CRL_REG_LEN_08BIT, 0x3c}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0x01}, + {0x7003, CRL_REG_LEN_08BIT, 0xdf}, + {0x70c0, CRL_REG_LEN_08BIT, 0x00}, + {0x70c1, CRL_REG_LEN_08BIT, 0x02}, + {0x70c2, CRL_REG_LEN_08BIT, 0x02}, + {0x70c3, CRL_REG_LEN_08BIT, 0x00}, + {0x70c4, CRL_REG_LEN_08BIT, 0x01}, + {0x70c5, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0x03}, + {0x70b9, CRL_REG_LEN_08BIT, 0x98}, + {0x70bc, CRL_REG_LEN_08BIT, 0x00}, + {0x70bd, CRL_REG_LEN_08BIT, 0x80}, + {0x7004, CRL_REG_LEN_08BIT, 0x02}, + {0x7005, CRL_REG_LEN_08BIT, 0x00}, + {0x7006, CRL_REG_LEN_08BIT, 0x01}, + {0x7007, CRL_REG_LEN_08BIT, 0x80}, + {0x7008, CRL_REG_LEN_08BIT, 0x02}, + {0x7009, CRL_REG_LEN_08BIT, 0x00}, + {0x700a, CRL_REG_LEN_08BIT, 0x04}, + {0x700b, CRL_REG_LEN_08BIT, 0x00}, + {0x700e, CRL_REG_LEN_08BIT, 0x00}, + {0x700f, CRL_REG_LEN_08BIT, 0x60}, + {0x701a, CRL_REG_LEN_08BIT, 0x02}, + {0x701b, CRL_REG_LEN_08BIT, 0x00}, + {0x701c, CRL_REG_LEN_08BIT, 0x01}, + {0x701d, CRL_REG_LEN_08BIT, 0x80}, + {0x701e, CRL_REG_LEN_08BIT, 0x02}, + {0x701f, CRL_REG_LEN_08BIT, 0x00}, + {0x7020, CRL_REG_LEN_08BIT, 0x04}, + {0x7021, CRL_REG_LEN_08BIT, 0x00}, + {0x7024, CRL_REG_LEN_08BIT, 0x00}, + {0x7025, CRL_REG_LEN_08BIT, 0x60}, + {0x70e7, CRL_REG_LEN_08BIT, 0x00}, + {0x70e4, CRL_REG_LEN_08BIT, 0x10}, + {0x70e5, CRL_REG_LEN_08BIT, 0x00}, + {0x70e6, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70e8, CRL_REG_LEN_08BIT, 0x10}, + {0x70e9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ea, CRL_REG_LEN_08BIT, 0x00}, + {0x70ef, CRL_REG_LEN_08BIT, 0x00}, + {0x70ec, CRL_REG_LEN_08BIT, 0xfd}, + {0x70ed, CRL_REG_LEN_08BIT, 0x00}, + {0x70ee, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70f0, CRL_REG_LEN_08BIT, 0xfd}, + {0x70f1, CRL_REG_LEN_08BIT, 0x00}, + {0x70f2, CRL_REG_LEN_08BIT, 0x00}, + {0x30fb, CRL_REG_LEN_08BIT, 0x06}, + {0x30fc, CRL_REG_LEN_08BIT, 0x80}, + {0x30fd, CRL_REG_LEN_08BIT, 0x02}, + {0x30fe, CRL_REG_LEN_08BIT, 0x93}, + {0x6000, CRL_REG_LEN_08BIT, 0xc1}, + {0x6001, CRL_REG_LEN_08BIT, 0xb9}, + {0x6002, CRL_REG_LEN_08BIT, 0xba}, + {0x6003, CRL_REG_LEN_08BIT, 0xa4}, + {0x6004, CRL_REG_LEN_08BIT, 0xb5}, + {0x6005, CRL_REG_LEN_08BIT, 0xa0}, + {0x6006, CRL_REG_LEN_08BIT, 0x82}, + {0x6007, CRL_REG_LEN_08BIT, 0xa7}, + {0x6008, CRL_REG_LEN_08BIT, 0xb7}, + {0x6009, CRL_REG_LEN_08BIT, 0x5c}, + {0x600a, CRL_REG_LEN_08BIT, 0x9e}, + {0x600b, CRL_REG_LEN_08BIT, 0xc0}, + {0x600c, CRL_REG_LEN_08BIT, 0xd2}, + {0x600d, CRL_REG_LEN_08BIT, 0x33}, + {0x600e, CRL_REG_LEN_08BIT, 0xcc}, + {0x600f, CRL_REG_LEN_08BIT, 0xe2}, + {0x6010, CRL_REG_LEN_08BIT, 0xc1}, + {0x6011, CRL_REG_LEN_08BIT, 0xab}, + {0x6012, CRL_REG_LEN_08BIT, 0xb7}, + {0x6013, CRL_REG_LEN_08BIT, 0x00}, + {0x6014, CRL_REG_LEN_08BIT, 0x00}, + {0x6015, CRL_REG_LEN_08BIT, 0x00}, + {0x6016, CRL_REG_LEN_08BIT, 0x00}, + {0x6017, CRL_REG_LEN_08BIT, 0x00}, + {0x6018, CRL_REG_LEN_08BIT, 0x00}, + {0x6019, CRL_REG_LEN_08BIT, 0x00}, + {0x601a, CRL_REG_LEN_08BIT, 0x00}, + {0x601b, CRL_REG_LEN_08BIT, 0x00}, + {0x601c, CRL_REG_LEN_08BIT, 0x00}, + {0x601d, CRL_REG_LEN_08BIT, 0x00}, + {0x601e, CRL_REG_LEN_08BIT, 0x9c}, + {0x601f, CRL_REG_LEN_08BIT, 0x94}, + {0x6020, CRL_REG_LEN_08BIT, 0x90}, + {0x6021, CRL_REG_LEN_08BIT, 0xc5}, + {0x6022, CRL_REG_LEN_08BIT, 0x01}, + {0x6023, CRL_REG_LEN_08BIT, 0x54}, + {0x6024, CRL_REG_LEN_08BIT, 0x2a}, + {0x6025, CRL_REG_LEN_08BIT, 0x61}, + {0x6026, CRL_REG_LEN_08BIT, 0xd2}, + {0x6027, CRL_REG_LEN_08BIT, 0xcc}, + {0x6028, CRL_REG_LEN_08BIT, 0x04}, + {0x6029, CRL_REG_LEN_08BIT, 0x35}, + {0x602a, CRL_REG_LEN_08BIT, 0xb1}, + {0x602b, CRL_REG_LEN_08BIT, 0xb2}, + {0x602c, CRL_REG_LEN_08BIT, 0xb3}, + {0x602d, CRL_REG_LEN_08BIT, 0xd2}, + {0x602e, CRL_REG_LEN_08BIT, 0xd3}, + {0x602f, CRL_REG_LEN_08BIT, 0x12}, + {0x6030, CRL_REG_LEN_08BIT, 0x31}, + {0x6031, CRL_REG_LEN_08BIT, 0xcc}, + {0x6032, CRL_REG_LEN_08BIT, 0x06}, + {0x6033, CRL_REG_LEN_08BIT, 0xd2}, + {0x6034, CRL_REG_LEN_08BIT, 0xc4}, + {0x6035, CRL_REG_LEN_08BIT, 0xce}, + {0x6036, CRL_REG_LEN_08BIT, 0x18}, + {0x6037, CRL_REG_LEN_08BIT, 0xcf}, + {0x6038, CRL_REG_LEN_08BIT, 0x1e}, + {0x6039, CRL_REG_LEN_08BIT, 0xd0}, + {0x603a, CRL_REG_LEN_08BIT, 0x24}, + {0x603b, CRL_REG_LEN_08BIT, 0xc5}, + {0x603c, CRL_REG_LEN_08BIT, 0xd2}, + {0x603d, CRL_REG_LEN_08BIT, 0xbc}, + {0x603e, CRL_REG_LEN_08BIT, 0xcc}, + {0x603f, CRL_REG_LEN_08BIT, 0x52}, + {0x6040, CRL_REG_LEN_08BIT, 0x2b}, + {0x6041, CRL_REG_LEN_08BIT, 0xd2}, + {0x6042, CRL_REG_LEN_08BIT, 0xd3}, + {0x6043, CRL_REG_LEN_08BIT, 0x02}, + {0x6044, CRL_REG_LEN_08BIT, 0xcc}, + {0x6045, CRL_REG_LEN_08BIT, 0x0a}, + {0x6046, CRL_REG_LEN_08BIT, 0xd2}, + {0x6047, CRL_REG_LEN_08BIT, 0xd3}, + {0x6048, CRL_REG_LEN_08BIT, 0x0f}, + {0x6049, CRL_REG_LEN_08BIT, 0x1a}, + {0x604a, CRL_REG_LEN_08BIT, 0x2a}, + {0x604b, CRL_REG_LEN_08BIT, 0xd4}, + {0x604c, CRL_REG_LEN_08BIT, 0xf6}, + {0x604d, CRL_REG_LEN_08BIT, 0xba}, + {0x604e, CRL_REG_LEN_08BIT, 0x56}, + {0x604f, CRL_REG_LEN_08BIT, 0xd3}, + {0x6050, CRL_REG_LEN_08BIT, 0x2e}, + {0x6051, CRL_REG_LEN_08BIT, 0x54}, + {0x6052, CRL_REG_LEN_08BIT, 0x26}, + {0x6053, CRL_REG_LEN_08BIT, 0xd2}, + {0x6054, CRL_REG_LEN_08BIT, 0xcc}, + {0x6055, CRL_REG_LEN_08BIT, 0x60}, + {0x6056, CRL_REG_LEN_08BIT, 0xd2}, + {0x6057, CRL_REG_LEN_08BIT, 0xd3}, + {0x6058, CRL_REG_LEN_08BIT, 0x27}, + {0x6059, CRL_REG_LEN_08BIT, 0x27}, + {0x605a, CRL_REG_LEN_08BIT, 0x08}, + {0x605b, CRL_REG_LEN_08BIT, 0x1a}, + {0x605c, CRL_REG_LEN_08BIT, 0xcc}, + {0x605d, CRL_REG_LEN_08BIT, 0x88}, + {0x605e, CRL_REG_LEN_08BIT, 0x00}, + {0x605f, CRL_REG_LEN_08BIT, 0x12}, + {0x6060, CRL_REG_LEN_08BIT, 0x2c}, + {0x6061, CRL_REG_LEN_08BIT, 0x60}, + {0x6062, CRL_REG_LEN_08BIT, 0xc2}, + {0x6063, CRL_REG_LEN_08BIT, 0xb9}, + {0x6064, CRL_REG_LEN_08BIT, 0xa5}, + {0x6065, CRL_REG_LEN_08BIT, 0xb5}, + {0x6066, CRL_REG_LEN_08BIT, 0xa0}, + {0x6067, CRL_REG_LEN_08BIT, 0x82}, + {0x6068, CRL_REG_LEN_08BIT, 0x5c}, + {0x6069, CRL_REG_LEN_08BIT, 0xd4}, + {0x606a, CRL_REG_LEN_08BIT, 0xbe}, + {0x606b, CRL_REG_LEN_08BIT, 0xd4}, + {0x606c, CRL_REG_LEN_08BIT, 0xbe}, + {0x606d, CRL_REG_LEN_08BIT, 0xd3}, + {0x606e, CRL_REG_LEN_08BIT, 0x01}, + {0x606f, CRL_REG_LEN_08BIT, 0x7c}, + {0x6070, CRL_REG_LEN_08BIT, 0x74}, + {0x6071, CRL_REG_LEN_08BIT, 0x00}, + {0x6072, CRL_REG_LEN_08BIT, 0x61}, + {0x6073, CRL_REG_LEN_08BIT, 0x2a}, + {0x6074, CRL_REG_LEN_08BIT, 0xd2}, + {0x6075, CRL_REG_LEN_08BIT, 0xcc}, + {0x6076, CRL_REG_LEN_08BIT, 0xdf}, + {0x6077, CRL_REG_LEN_08BIT, 0xc6}, + {0x6078, CRL_REG_LEN_08BIT, 0x35}, + {0x6079, CRL_REG_LEN_08BIT, 0xd2}, + {0x607a, CRL_REG_LEN_08BIT, 0xcc}, + {0x607b, CRL_REG_LEN_08BIT, 0x06}, + {0x607c, CRL_REG_LEN_08BIT, 0x31}, + {0x607d, CRL_REG_LEN_08BIT, 0xd2}, + {0x607e, CRL_REG_LEN_08BIT, 0xc5}, + {0x607f, CRL_REG_LEN_08BIT, 0xbb}, + {0x6080, CRL_REG_LEN_08BIT, 0xcc}, + {0x6081, CRL_REG_LEN_08BIT, 0x18}, + {0x6082, CRL_REG_LEN_08BIT, 0xc6}, + {0x6083, CRL_REG_LEN_08BIT, 0xd2}, + {0x6084, CRL_REG_LEN_08BIT, 0xbd}, + {0x6085, CRL_REG_LEN_08BIT, 0xcc}, + {0x6086, CRL_REG_LEN_08BIT, 0x52}, + {0x6087, CRL_REG_LEN_08BIT, 0x2b}, + {0x6088, CRL_REG_LEN_08BIT, 0xd2}, + {0x6089, CRL_REG_LEN_08BIT, 0xd3}, + {0x608a, CRL_REG_LEN_08BIT, 0x01}, + {0x608b, CRL_REG_LEN_08BIT, 0xcc}, + {0x608c, CRL_REG_LEN_08BIT, 0x0a}, + {0x608d, CRL_REG_LEN_08BIT, 0xd2}, + {0x608e, CRL_REG_LEN_08BIT, 0xd3}, + {0x608f, CRL_REG_LEN_08BIT, 0x0f}, + {0x6090, CRL_REG_LEN_08BIT, 0x1a}, + {0x6091, CRL_REG_LEN_08BIT, 0x71}, + {0x6092, CRL_REG_LEN_08BIT, 0x2a}, + {0x6093, CRL_REG_LEN_08BIT, 0xd4}, + {0x6094, CRL_REG_LEN_08BIT, 0xf6}, + {0x6095, CRL_REG_LEN_08BIT, 0xd3}, + {0x6096, CRL_REG_LEN_08BIT, 0x22}, + {0x6097, CRL_REG_LEN_08BIT, 0x70}, + {0x6098, CRL_REG_LEN_08BIT, 0xca}, + {0x6099, CRL_REG_LEN_08BIT, 0x26}, + {0x609a, CRL_REG_LEN_08BIT, 0xd2}, + {0x609b, CRL_REG_LEN_08BIT, 0xcc}, + {0x609c, CRL_REG_LEN_08BIT, 0x60}, + {0x609d, CRL_REG_LEN_08BIT, 0xd2}, + {0x609e, CRL_REG_LEN_08BIT, 0xd3}, + {0x609f, CRL_REG_LEN_08BIT, 0x27}, + {0x60a0, CRL_REG_LEN_08BIT, 0x27}, + {0x60a1, CRL_REG_LEN_08BIT, 0x08}, + {0x60a2, CRL_REG_LEN_08BIT, 0x1a}, + {0x60a3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60a4, CRL_REG_LEN_08BIT, 0x88}, + {0x60a5, CRL_REG_LEN_08BIT, 0x12}, + {0x60a6, CRL_REG_LEN_08BIT, 0x2c}, + {0x60a7, CRL_REG_LEN_08BIT, 0x60}, + {0x60a8, CRL_REG_LEN_08BIT, 0x00}, + {0x60a9, CRL_REG_LEN_08BIT, 0x00}, + {0x60aa, CRL_REG_LEN_08BIT, 0xc0}, + {0x60ab, CRL_REG_LEN_08BIT, 0xb9}, + {0x60ac, CRL_REG_LEN_08BIT, 0xa3}, + {0x60ad, CRL_REG_LEN_08BIT, 0xb5}, + {0x60ae, CRL_REG_LEN_08BIT, 0x00}, + {0x60af, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b0, CRL_REG_LEN_08BIT, 0x82}, + {0x60b1, CRL_REG_LEN_08BIT, 0x5c}, + {0x60b2, CRL_REG_LEN_08BIT, 0xd4}, + {0x60b3, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b4, CRL_REG_LEN_08BIT, 0x9d}, + {0x60b5, CRL_REG_LEN_08BIT, 0xd3}, + {0x60b6, CRL_REG_LEN_08BIT, 0x26}, + {0x60b7, CRL_REG_LEN_08BIT, 0xb0}, + {0x60b8, CRL_REG_LEN_08BIT, 0xb7}, + {0x60b9, CRL_REG_LEN_08BIT, 0x00}, + {0x60ba, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bb, CRL_REG_LEN_08BIT, 0x0a}, + {0x60bc, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bd, CRL_REG_LEN_08BIT, 0x10}, + {0x60be, CRL_REG_LEN_08BIT, 0x9c}, + {0x60bf, CRL_REG_LEN_08BIT, 0x94}, + {0x60c0, CRL_REG_LEN_08BIT, 0x90}, + {0x60c1, CRL_REG_LEN_08BIT, 0xc8}, + {0x60c2, CRL_REG_LEN_08BIT, 0xba}, + {0x60c3, CRL_REG_LEN_08BIT, 0x7c}, + {0x60c4, CRL_REG_LEN_08BIT, 0x74}, + {0x60c5, CRL_REG_LEN_08BIT, 0x00}, + {0x60c6, CRL_REG_LEN_08BIT, 0x61}, + {0x60c7, CRL_REG_LEN_08BIT, 0x2a}, + {0x60c8, CRL_REG_LEN_08BIT, 0x00}, + {0x60c9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60ca, CRL_REG_LEN_08BIT, 0xcc}, + {0x60cb, CRL_REG_LEN_08BIT, 0xdf}, + {0x60cc, CRL_REG_LEN_08BIT, 0xc4}, + {0x60cd, CRL_REG_LEN_08BIT, 0x35}, + {0x60ce, CRL_REG_LEN_08BIT, 0xd2}, + {0x60cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d0, CRL_REG_LEN_08BIT, 0x06}, + {0x60d1, CRL_REG_LEN_08BIT, 0x31}, + {0x60d2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d4, CRL_REG_LEN_08BIT, 0x15}, + {0x60d5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d6, CRL_REG_LEN_08BIT, 0xbb}, + {0x60d7, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d8, CRL_REG_LEN_08BIT, 0x1a}, + {0x60d9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60da, CRL_REG_LEN_08BIT, 0xbe}, + {0x60db, CRL_REG_LEN_08BIT, 0xce}, + {0x60dc, CRL_REG_LEN_08BIT, 0x52}, + {0x60dd, CRL_REG_LEN_08BIT, 0xcf}, + {0x60de, CRL_REG_LEN_08BIT, 0x56}, + {0x60df, CRL_REG_LEN_08BIT, 0xd0}, + {0x60e0, CRL_REG_LEN_08BIT, 0x5b}, + {0x60e1, CRL_REG_LEN_08BIT, 0x2b}, + {0x60e2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e3, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e4, CRL_REG_LEN_08BIT, 0x01}, + {0x60e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x60e6, CRL_REG_LEN_08BIT, 0x0a}, + {0x60e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e8, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e9, CRL_REG_LEN_08BIT, 0x0f}, + {0x60ea, CRL_REG_LEN_08BIT, 0xd9}, + {0x60eb, CRL_REG_LEN_08BIT, 0xc7}, + {0x60ec, CRL_REG_LEN_08BIT, 0xda}, + {0x60ed, CRL_REG_LEN_08BIT, 0xce}, + {0x60ee, CRL_REG_LEN_08BIT, 0x1a}, + {0x60ef, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f0, CRL_REG_LEN_08BIT, 0xf6}, + {0x60f1, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f2, CRL_REG_LEN_08BIT, 0xa9}, + {0x60f3, CRL_REG_LEN_08BIT, 0x27}, + {0x60f4, CRL_REG_LEN_08BIT, 0x00}, + {0x60f5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f6, CRL_REG_LEN_08BIT, 0xcc}, + {0x60f7, CRL_REG_LEN_08BIT, 0x60}, + {0x60f8, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f9, CRL_REG_LEN_08BIT, 0xd3}, + {0x60fa, CRL_REG_LEN_08BIT, 0x2d}, + {0x60fb, CRL_REG_LEN_08BIT, 0xd9}, + {0x60fc, CRL_REG_LEN_08BIT, 0xdf}, + {0x60fd, CRL_REG_LEN_08BIT, 0xda}, + {0x60fe, CRL_REG_LEN_08BIT, 0xe5}, + {0x60ff, CRL_REG_LEN_08BIT, 0x1a}, + {0x6100, CRL_REG_LEN_08BIT, 0x12}, + {0x6101, CRL_REG_LEN_08BIT, 0xcc}, + {0x6102, CRL_REG_LEN_08BIT, 0x88}, + {0x6103, CRL_REG_LEN_08BIT, 0xd6}, + {0x6104, CRL_REG_LEN_08BIT, 0xb1}, + {0x6105, CRL_REG_LEN_08BIT, 0xb9}, + {0x6106, CRL_REG_LEN_08BIT, 0xba}, + {0x6107, CRL_REG_LEN_08BIT, 0xaf}, + {0x6108, CRL_REG_LEN_08BIT, 0xdc}, + {0x6109, CRL_REG_LEN_08BIT, 0x00}, + {0x610a, CRL_REG_LEN_08BIT, 0xcb}, + {0x610b, CRL_REG_LEN_08BIT, 0xc3}, + {0x610c, CRL_REG_LEN_08BIT, 0xb9}, + {0x610d, CRL_REG_LEN_08BIT, 0xa4}, + {0x610e, CRL_REG_LEN_08BIT, 0xb5}, + {0x610f, CRL_REG_LEN_08BIT, 0x5c}, + {0x6110, CRL_REG_LEN_08BIT, 0x12}, + {0x6111, CRL_REG_LEN_08BIT, 0x2a}, + {0x6112, CRL_REG_LEN_08BIT, 0x61}, + {0x6113, CRL_REG_LEN_08BIT, 0xd2}, + {0x6114, CRL_REG_LEN_08BIT, 0xcc}, + {0x6115, CRL_REG_LEN_08BIT, 0xdf}, + {0x6116, CRL_REG_LEN_08BIT, 0xc7}, + {0x6117, CRL_REG_LEN_08BIT, 0x35}, + {0x6118, CRL_REG_LEN_08BIT, 0xd2}, + {0x6119, CRL_REG_LEN_08BIT, 0xcc}, + {0x611a, CRL_REG_LEN_08BIT, 0x06}, + {0x611b, CRL_REG_LEN_08BIT, 0x31}, + {0x611c, CRL_REG_LEN_08BIT, 0xc6}, + {0x611d, CRL_REG_LEN_08BIT, 0xbb}, + {0x611e, CRL_REG_LEN_08BIT, 0xd2}, + {0x611f, CRL_REG_LEN_08BIT, 0xcc}, + {0x6120, CRL_REG_LEN_08BIT, 0x18}, + {0x6121, CRL_REG_LEN_08BIT, 0xd2}, + {0x6122, CRL_REG_LEN_08BIT, 0xbe}, + {0x6123, CRL_REG_LEN_08BIT, 0xcc}, + {0x6124, CRL_REG_LEN_08BIT, 0x52}, + {0x6125, CRL_REG_LEN_08BIT, 0xc7}, + {0x6126, CRL_REG_LEN_08BIT, 0xd2}, + {0x6127, CRL_REG_LEN_08BIT, 0xcc}, + {0x6128, CRL_REG_LEN_08BIT, 0x0a}, + {0x6129, CRL_REG_LEN_08BIT, 0xb4}, + {0x612a, CRL_REG_LEN_08BIT, 0xb7}, + {0x612b, CRL_REG_LEN_08BIT, 0x94}, + {0x612c, CRL_REG_LEN_08BIT, 0xd2}, + {0x612d, CRL_REG_LEN_08BIT, 0x12}, + {0x612e, CRL_REG_LEN_08BIT, 0x26}, + {0x612f, CRL_REG_LEN_08BIT, 0x42}, + {0x6130, CRL_REG_LEN_08BIT, 0x46}, + {0x6131, CRL_REG_LEN_08BIT, 0x42}, + {0x6132, CRL_REG_LEN_08BIT, 0xd3}, + {0x6133, CRL_REG_LEN_08BIT, 0x20}, + {0x6134, CRL_REG_LEN_08BIT, 0x27}, + {0x6135, CRL_REG_LEN_08BIT, 0x00}, + {0x6136, CRL_REG_LEN_08BIT, 0x1a}, + {0x6137, CRL_REG_LEN_08BIT, 0xcc}, + {0x6138, CRL_REG_LEN_08BIT, 0x88}, + {0x6139, CRL_REG_LEN_08BIT, 0x60}, + {0x613a, CRL_REG_LEN_08BIT, 0x2c}, + {0x613b, CRL_REG_LEN_08BIT, 0x12}, + {0x613c, CRL_REG_LEN_08BIT, 0x40}, + {0x613d, CRL_REG_LEN_08BIT, 0xb8}, + {0x613e, CRL_REG_LEN_08BIT, 0x90}, + {0x613f, CRL_REG_LEN_08BIT, 0xd5}, + {0x6140, CRL_REG_LEN_08BIT, 0xba}, + {0x6141, CRL_REG_LEN_08BIT, 0x00}, + {0x6142, CRL_REG_LEN_08BIT, 0x00}, + {0x6143, CRL_REG_LEN_08BIT, 0x00}, + {0x6144, CRL_REG_LEN_08BIT, 0x00}, + {0x6145, CRL_REG_LEN_08BIT, 0x00}, + {0x6146, CRL_REG_LEN_08BIT, 0x00}, + {0x6147, CRL_REG_LEN_08BIT, 0xaa}, + {0x6148, CRL_REG_LEN_08BIT, 0xb7}, + {0x6149, CRL_REG_LEN_08BIT, 0x00}, + {0x614a, CRL_REG_LEN_08BIT, 0x00}, + {0x614b, CRL_REG_LEN_08BIT, 0x00}, + {0x614c, CRL_REG_LEN_08BIT, 0x00}, + {0x614d, CRL_REG_LEN_08BIT, 0xa6}, + {0x614e, CRL_REG_LEN_08BIT, 0xb7}, + {0x614f, CRL_REG_LEN_08BIT, 0x00}, + {0x6150, CRL_REG_LEN_08BIT, 0xd5}, + {0x6151, CRL_REG_LEN_08BIT, 0x00}, + {0x6152, CRL_REG_LEN_08BIT, 0x71}, + {0x6153, CRL_REG_LEN_08BIT, 0xd3}, + {0x6154, CRL_REG_LEN_08BIT, 0x30}, + {0x6155, CRL_REG_LEN_08BIT, 0xba}, + {0x6156, CRL_REG_LEN_08BIT, 0x00}, + {0x6157, CRL_REG_LEN_08BIT, 0x00}, + {0x6158, CRL_REG_LEN_08BIT, 0x00}, + {0x6159, CRL_REG_LEN_08BIT, 0x00}, + {0x615a, CRL_REG_LEN_08BIT, 0xd3}, + {0x615b, CRL_REG_LEN_08BIT, 0x10}, + {0x615c, CRL_REG_LEN_08BIT, 0x70}, + {0x615d, CRL_REG_LEN_08BIT, 0x00}, + {0x615e, CRL_REG_LEN_08BIT, 0x00}, + {0x615f, CRL_REG_LEN_08BIT, 0x00}, + {0x6160, CRL_REG_LEN_08BIT, 0x00}, + {0x6161, CRL_REG_LEN_08BIT, 0xd5}, + {0x6162, CRL_REG_LEN_08BIT, 0xba}, + {0x6163, CRL_REG_LEN_08BIT, 0xb0}, + {0x6164, CRL_REG_LEN_08BIT, 0xb7}, + {0x6165, CRL_REG_LEN_08BIT, 0x00}, + {0x6166, CRL_REG_LEN_08BIT, 0x9d}, + {0x6167, CRL_REG_LEN_08BIT, 0xd3}, + {0x6168, CRL_REG_LEN_08BIT, 0x0a}, + {0x6169, CRL_REG_LEN_08BIT, 0x9d}, + {0x616a, CRL_REG_LEN_08BIT, 0x9d}, + {0x616b, CRL_REG_LEN_08BIT, 0xd3}, + {0x616c, CRL_REG_LEN_08BIT, 0x10}, + {0x616d, CRL_REG_LEN_08BIT, 0x9c}, + {0x616e, CRL_REG_LEN_08BIT, 0x94}, + {0x616f, CRL_REG_LEN_08BIT, 0x90}, + {0x6170, CRL_REG_LEN_08BIT, 0xc8}, + {0x6171, CRL_REG_LEN_08BIT, 0xba}, + {0x6172, CRL_REG_LEN_08BIT, 0xd2}, + {0x6173, CRL_REG_LEN_08BIT, 0x60}, + {0x6174, CRL_REG_LEN_08BIT, 0x2c}, + {0x6175, CRL_REG_LEN_08BIT, 0x50}, + {0x6176, CRL_REG_LEN_08BIT, 0x11}, + {0x6177, CRL_REG_LEN_08BIT, 0xcc}, + {0x6178, CRL_REG_LEN_08BIT, 0x00}, + {0x6179, CRL_REG_LEN_08BIT, 0x30}, + {0x617a, CRL_REG_LEN_08BIT, 0xd5}, + {0x617b, CRL_REG_LEN_08BIT, 0x00}, + {0x617c, CRL_REG_LEN_08BIT, 0xba}, + {0x617d, CRL_REG_LEN_08BIT, 0xb0}, + {0x617e, CRL_REG_LEN_08BIT, 0xb7}, + {0x617f, CRL_REG_LEN_08BIT, 0x00}, + {0x6180, CRL_REG_LEN_08BIT, 0x9d}, + {0x6181, CRL_REG_LEN_08BIT, 0xd3}, + {0x6182, CRL_REG_LEN_08BIT, 0x0a}, + {0x6183, CRL_REG_LEN_08BIT, 0x9d}, + {0x6184, CRL_REG_LEN_08BIT, 0x9d}, + {0x6185, CRL_REG_LEN_08BIT, 0xd3}, + {0x6186, CRL_REG_LEN_08BIT, 0x10}, + {0x6187, CRL_REG_LEN_08BIT, 0x9c}, + {0x6188, CRL_REG_LEN_08BIT, 0x94}, + {0x6189, CRL_REG_LEN_08BIT, 0x90}, + {0x618a, CRL_REG_LEN_08BIT, 0xc8}, + {0x618b, CRL_REG_LEN_08BIT, 0xba}, + {0x618c, CRL_REG_LEN_08BIT, 0xd5}, + {0x618d, CRL_REG_LEN_08BIT, 0x00}, + {0x618e, CRL_REG_LEN_08BIT, 0x01}, + {0x618f, CRL_REG_LEN_08BIT, 0x1a}, + {0x6190, CRL_REG_LEN_08BIT, 0xcc}, + {0x6191, CRL_REG_LEN_08BIT, 0x12}, + {0x6192, CRL_REG_LEN_08BIT, 0x12}, + {0x6193, CRL_REG_LEN_08BIT, 0x00}, + {0x6194, CRL_REG_LEN_08BIT, 0xcc}, + {0x6195, CRL_REG_LEN_08BIT, 0x9c}, + {0x6196, CRL_REG_LEN_08BIT, 0xd2}, + {0x6197, CRL_REG_LEN_08BIT, 0xcc}, + {0x6198, CRL_REG_LEN_08BIT, 0x60}, + {0x6199, CRL_REG_LEN_08BIT, 0xd2}, + {0x619a, CRL_REG_LEN_08BIT, 0x04}, + {0x619b, CRL_REG_LEN_08BIT, 0xd5}, + {0x619c, CRL_REG_LEN_08BIT, 0x1a}, + {0x619d, CRL_REG_LEN_08BIT, 0xcc}, + {0x619e, CRL_REG_LEN_08BIT, 0x12}, + {0x619f, CRL_REG_LEN_08BIT, 0x00}, + {0x61a0, CRL_REG_LEN_08BIT, 0x12}, + {0x61a1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a2, CRL_REG_LEN_08BIT, 0x9c}, + {0x61a3, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a4, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a5, CRL_REG_LEN_08BIT, 0x60}, + {0x61a6, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a7, CRL_REG_LEN_08BIT, 0x1a}, + {0x61a8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a9, CRL_REG_LEN_08BIT, 0x12}, + {0x61aa, CRL_REG_LEN_08BIT, 0x00}, + {0x61ab, CRL_REG_LEN_08BIT, 0x12}, + {0x61ac, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ad, CRL_REG_LEN_08BIT, 0x9c}, + {0x61ae, CRL_REG_LEN_08BIT, 0xd2}, + {0x61af, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b0, CRL_REG_LEN_08BIT, 0x60}, + {0x61b1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61b2, CRL_REG_LEN_08BIT, 0x1a}, + {0x61b3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b4, CRL_REG_LEN_08BIT, 0x12}, + {0x61b5, CRL_REG_LEN_08BIT, 0x00}, + {0x61b6, CRL_REG_LEN_08BIT, 0x12}, + {0x61b7, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b8, CRL_REG_LEN_08BIT, 0x9c}, + {0x61b9, CRL_REG_LEN_08BIT, 0xd2}, + {0x61ba, CRL_REG_LEN_08BIT, 0xcc}, + {0x61bb, CRL_REG_LEN_08BIT, 0x60}, + {0x61bc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61bd, CRL_REG_LEN_08BIT, 0xd5}, + {0x61be, CRL_REG_LEN_08BIT, 0x1a}, + {0x61bf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c0, CRL_REG_LEN_08BIT, 0x12}, + {0x61c1, CRL_REG_LEN_08BIT, 0x12}, + {0x61c2, CRL_REG_LEN_08BIT, 0x00}, + {0x61c3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c4, CRL_REG_LEN_08BIT, 0x8a}, + {0x61c5, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c7, CRL_REG_LEN_08BIT, 0x74}, + {0x61c8, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c9, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ca, CRL_REG_LEN_08BIT, 0x1a}, + {0x61cb, CRL_REG_LEN_08BIT, 0xcc}, + {0x61cc, CRL_REG_LEN_08BIT, 0x12}, + {0x61cd, CRL_REG_LEN_08BIT, 0x00}, + {0x61ce, CRL_REG_LEN_08BIT, 0x12}, + {0x61cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d0, CRL_REG_LEN_08BIT, 0x8a}, + {0x61d1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d2, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d3, CRL_REG_LEN_08BIT, 0x74}, + {0x61d4, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d5, CRL_REG_LEN_08BIT, 0x1a}, + {0x61d6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d7, CRL_REG_LEN_08BIT, 0x12}, + {0x61d8, CRL_REG_LEN_08BIT, 0x00}, + {0x61d9, CRL_REG_LEN_08BIT, 0x12}, + {0x61da, CRL_REG_LEN_08BIT, 0xcc}, + {0x61db, CRL_REG_LEN_08BIT, 0x8a}, + {0x61dc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61dd, CRL_REG_LEN_08BIT, 0xcc}, + {0x61de, CRL_REG_LEN_08BIT, 0x74}, + {0x61df, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e0, CRL_REG_LEN_08BIT, 0x1a}, + {0x61e1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e2, CRL_REG_LEN_08BIT, 0x12}, + {0x61e3, CRL_REG_LEN_08BIT, 0x00}, + {0x61e4, CRL_REG_LEN_08BIT, 0x12}, + {0x61e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e6, CRL_REG_LEN_08BIT, 0x8a}, + {0x61e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e9, CRL_REG_LEN_08BIT, 0x74}, + {0x61ea, CRL_REG_LEN_08BIT, 0xd2}, + {0x61eb, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ec, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ed, CRL_REG_LEN_08BIT, 0x12}, + {0x61ee, CRL_REG_LEN_08BIT, 0x00}, + {0x61ef, CRL_REG_LEN_08BIT, 0x12}, + {0x61f0, CRL_REG_LEN_08BIT, 0xcc}, + {0x61f1, CRL_REG_LEN_08BIT, 0x9c}, + {0x61f2, CRL_REG_LEN_08BIT, 0xd5}, + {0x6400, CRL_REG_LEN_08BIT, 0x04}, + {0x6401, CRL_REG_LEN_08BIT, 0x04}, + {0x6402, CRL_REG_LEN_08BIT, 0x00}, + {0x6403, CRL_REG_LEN_08BIT, 0xff}, + {0x6404, CRL_REG_LEN_08BIT, 0x00}, + {0x6405, CRL_REG_LEN_08BIT, 0x08}, + {0x6406, CRL_REG_LEN_08BIT, 0x00}, + {0x6407, CRL_REG_LEN_08BIT, 0xff}, + {0x6408, CRL_REG_LEN_08BIT, 0x04}, + {0x6409, CRL_REG_LEN_08BIT, 0x70}, + {0x640a, CRL_REG_LEN_08BIT, 0x00}, + {0x640b, CRL_REG_LEN_08BIT, 0xff}, + {0x640c, CRL_REG_LEN_08BIT, 0x05}, + {0x640d, CRL_REG_LEN_08BIT, 0x14}, + {0x640e, CRL_REG_LEN_08BIT, 0x04}, + {0x640f, CRL_REG_LEN_08BIT, 0x71}, + {0x6410, CRL_REG_LEN_08BIT, 0x05}, + {0x6411, CRL_REG_LEN_08BIT, 0x74}, + {0x6412, CRL_REG_LEN_08BIT, 0x00}, + {0x6413, CRL_REG_LEN_08BIT, 0xff}, + {0x6414, CRL_REG_LEN_08BIT, 0x05}, + {0x6415, CRL_REG_LEN_08BIT, 0x54}, + {0x6416, CRL_REG_LEN_08BIT, 0x05}, + {0x6417, CRL_REG_LEN_08BIT, 0x44}, + {0x6418, CRL_REG_LEN_08BIT, 0x04}, + {0x6419, CRL_REG_LEN_08BIT, 0x30}, + {0x641a, CRL_REG_LEN_08BIT, 0x05}, + {0x641b, CRL_REG_LEN_08BIT, 0x46}, + {0x641c, CRL_REG_LEN_08BIT, 0x00}, + {0x641d, CRL_REG_LEN_08BIT, 0xff}, + {0x641e, CRL_REG_LEN_08BIT, 0x04}, + {0x641f, CRL_REG_LEN_08BIT, 0x31}, + {0x6420, CRL_REG_LEN_08BIT, 0x04}, + {0x6421, CRL_REG_LEN_08BIT, 0x30}, + {0x6422, CRL_REG_LEN_08BIT, 0x00}, + {0x6423, CRL_REG_LEN_08BIT, 0xff}, + {0x6424, CRL_REG_LEN_08BIT, 0x04}, + {0x6425, CRL_REG_LEN_08BIT, 0x20}, + {0x6426, CRL_REG_LEN_08BIT, 0x05}, + {0x6427, CRL_REG_LEN_08BIT, 0x06}, + {0x6428, CRL_REG_LEN_08BIT, 0x00}, + {0x6429, CRL_REG_LEN_08BIT, 0xff}, + {0x642a, CRL_REG_LEN_08BIT, 0x08}, + {0x642b, CRL_REG_LEN_08BIT, 0x2a}, + {0x642c, CRL_REG_LEN_08BIT, 0x08}, + {0x642d, CRL_REG_LEN_08BIT, 0x31}, + {0x642e, CRL_REG_LEN_08BIT, 0x00}, + {0x642f, CRL_REG_LEN_08BIT, 0xff}, + {0x6430, CRL_REG_LEN_08BIT, 0x08}, + {0x6431, CRL_REG_LEN_08BIT, 0x2a}, + {0x6432, CRL_REG_LEN_08BIT, 0x08}, + {0x6433, CRL_REG_LEN_08BIT, 0x31}, + {0x6434, CRL_REG_LEN_08BIT, 0x06}, + {0x6435, CRL_REG_LEN_08BIT, 0x20}, + {0x6436, CRL_REG_LEN_08BIT, 0x07}, + {0x6437, CRL_REG_LEN_08BIT, 0x00}, + {0x6438, CRL_REG_LEN_08BIT, 0x08}, + {0x6439, CRL_REG_LEN_08BIT, 0x40}, + {0x643a, CRL_REG_LEN_08BIT, 0x00}, + {0x643b, CRL_REG_LEN_08BIT, 0xff}, + {0x643c, CRL_REG_LEN_08BIT, 0x08}, + {0x643d, CRL_REG_LEN_08BIT, 0x2a}, + {0x643e, CRL_REG_LEN_08BIT, 0x08}, + {0x643f, CRL_REG_LEN_08BIT, 0x36}, + {0x6440, CRL_REG_LEN_08BIT, 0x06}, + {0x6441, CRL_REG_LEN_08BIT, 0x10}, + {0x6442, CRL_REG_LEN_08BIT, 0x07}, + {0x6443, CRL_REG_LEN_08BIT, 0x00}, + {0x6444, CRL_REG_LEN_08BIT, 0x08}, + {0x6445, CRL_REG_LEN_08BIT, 0x40}, + {0x6446, CRL_REG_LEN_08BIT, 0x00}, + {0x6447, CRL_REG_LEN_08BIT, 0xff}, + {0x6448, CRL_REG_LEN_08BIT, 0x08}, + {0x6449, CRL_REG_LEN_08BIT, 0x2a}, + {0x644a, CRL_REG_LEN_08BIT, 0x08}, + {0x644b, CRL_REG_LEN_08BIT, 0x3b}, + {0x644c, CRL_REG_LEN_08BIT, 0x06}, + {0x644d, CRL_REG_LEN_08BIT, 0x00}, + {0x644e, CRL_REG_LEN_08BIT, 0x07}, + {0x644f, CRL_REG_LEN_08BIT, 0x00}, + {0x6450, CRL_REG_LEN_08BIT, 0x08}, + {0x6451, CRL_REG_LEN_08BIT, 0x40}, + {0x6452, CRL_REG_LEN_08BIT, 0x00}, + {0x6453, CRL_REG_LEN_08BIT, 0xff}, + {0x6454, CRL_REG_LEN_08BIT, 0x06}, + {0x6455, CRL_REG_LEN_08BIT, 0x00}, + {0x6456, CRL_REG_LEN_08BIT, 0x07}, + {0x6457, CRL_REG_LEN_08BIT, 0x05}, + {0x6458, CRL_REG_LEN_08BIT, 0x01}, + {0x6459, CRL_REG_LEN_08BIT, 0xaf}, + {0x645a, CRL_REG_LEN_08BIT, 0x01}, + {0x645b, CRL_REG_LEN_08BIT, 0x0f}, + {0x645c, CRL_REG_LEN_08BIT, 0x01}, + {0x645d, CRL_REG_LEN_08BIT, 0x90}, + {0x645e, CRL_REG_LEN_08BIT, 0x01}, + {0x645f, CRL_REG_LEN_08BIT, 0xc8}, + {0x6460, CRL_REG_LEN_08BIT, 0x00}, + {0x6461, CRL_REG_LEN_08BIT, 0xff}, + {0x6462, CRL_REG_LEN_08BIT, 0x01}, + {0x6463, CRL_REG_LEN_08BIT, 0xac}, + {0x6464, CRL_REG_LEN_08BIT, 0x01}, + {0x6465, CRL_REG_LEN_08BIT, 0x0c}, + {0x6466, CRL_REG_LEN_08BIT, 0x01}, + {0x6467, CRL_REG_LEN_08BIT, 0x90}, + {0x6468, CRL_REG_LEN_08BIT, 0x01}, + {0x6469, CRL_REG_LEN_08BIT, 0xe8}, + {0x646a, CRL_REG_LEN_08BIT, 0x00}, + {0x646b, CRL_REG_LEN_08BIT, 0xff}, + {0x646c, CRL_REG_LEN_08BIT, 0x01}, + {0x646d, CRL_REG_LEN_08BIT, 0xad}, + {0x646e, CRL_REG_LEN_08BIT, 0x01}, + {0x646f, CRL_REG_LEN_08BIT, 0x0d}, + {0x6470, CRL_REG_LEN_08BIT, 0x01}, + {0x6471, CRL_REG_LEN_08BIT, 0x90}, + {0x6472, CRL_REG_LEN_08BIT, 0x01}, + {0x6473, CRL_REG_LEN_08BIT, 0xe8}, + {0x6474, CRL_REG_LEN_08BIT, 0x00}, + {0x6475, CRL_REG_LEN_08BIT, 0xff}, + {0x6476, CRL_REG_LEN_08BIT, 0x01}, + {0x6477, CRL_REG_LEN_08BIT, 0xae}, + {0x6478, CRL_REG_LEN_08BIT, 0x01}, + {0x6479, CRL_REG_LEN_08BIT, 0x0e}, + {0x647a, CRL_REG_LEN_08BIT, 0x01}, + {0x647b, CRL_REG_LEN_08BIT, 0x90}, + {0x647c, CRL_REG_LEN_08BIT, 0x01}, + {0x647d, CRL_REG_LEN_08BIT, 0xe8}, + {0x647e, CRL_REG_LEN_08BIT, 0x00}, + {0x647f, CRL_REG_LEN_08BIT, 0xff}, + {0x6480, CRL_REG_LEN_08BIT, 0x01}, + {0x6481, CRL_REG_LEN_08BIT, 0xb0}, + {0x6482, CRL_REG_LEN_08BIT, 0x01}, + {0x6483, CRL_REG_LEN_08BIT, 0xb1}, + {0x6484, CRL_REG_LEN_08BIT, 0x01}, + {0x6485, CRL_REG_LEN_08BIT, 0xb2}, + {0x6486, CRL_REG_LEN_08BIT, 0x01}, + {0x6487, CRL_REG_LEN_08BIT, 0xb3}, + {0x6488, CRL_REG_LEN_08BIT, 0x01}, + {0x6489, CRL_REG_LEN_08BIT, 0xb4}, + {0x648a, CRL_REG_LEN_08BIT, 0x01}, + {0x648b, CRL_REG_LEN_08BIT, 0xb5}, + {0x648c, CRL_REG_LEN_08BIT, 0x01}, + {0x648d, CRL_REG_LEN_08BIT, 0xb6}, + {0x648e, CRL_REG_LEN_08BIT, 0x01}, + {0x648f, CRL_REG_LEN_08BIT, 0xb7}, + {0x6490, CRL_REG_LEN_08BIT, 0x01}, + {0x6491, CRL_REG_LEN_08BIT, 0xb8}, + {0x6492, CRL_REG_LEN_08BIT, 0x01}, + {0x6493, CRL_REG_LEN_08BIT, 0xb9}, + {0x6494, CRL_REG_LEN_08BIT, 0x01}, + {0x6495, CRL_REG_LEN_08BIT, 0xba}, + {0x6496, CRL_REG_LEN_08BIT, 0x01}, + {0x6497, CRL_REG_LEN_08BIT, 0xbb}, + {0x6498, CRL_REG_LEN_08BIT, 0x01}, + {0x6499, CRL_REG_LEN_08BIT, 0xbc}, + {0x649a, CRL_REG_LEN_08BIT, 0x01}, + {0x649b, CRL_REG_LEN_08BIT, 0xbd}, + {0x649c, CRL_REG_LEN_08BIT, 0x01}, + {0x649d, CRL_REG_LEN_08BIT, 0xbe}, + {0x649e, CRL_REG_LEN_08BIT, 0x01}, + {0x649f, CRL_REG_LEN_08BIT, 0xbf}, + {0x64a0, CRL_REG_LEN_08BIT, 0x01}, + {0x64a1, CRL_REG_LEN_08BIT, 0xc0}, + {0x64a2, CRL_REG_LEN_08BIT, 0x00}, + {0x64a3, CRL_REG_LEN_08BIT, 0xff}, + {0x64a4, CRL_REG_LEN_08BIT, 0x06}, + {0x64a5, CRL_REG_LEN_08BIT, 0x00}, + {0x64a6, CRL_REG_LEN_08BIT, 0x01}, + {0x64a7, CRL_REG_LEN_08BIT, 0xf6}, + {0x64a8, CRL_REG_LEN_08BIT, 0x04}, + {0x64a9, CRL_REG_LEN_08BIT, 0x30}, + {0x64aa, CRL_REG_LEN_08BIT, 0x00}, + {0x64ab, CRL_REG_LEN_08BIT, 0xff}, + {0x64ac, CRL_REG_LEN_08BIT, 0x06}, + {0x64ad, CRL_REG_LEN_08BIT, 0x10}, + {0x64ae, CRL_REG_LEN_08BIT, 0x01}, + {0x64af, CRL_REG_LEN_08BIT, 0xf6}, + {0x64b0, CRL_REG_LEN_08BIT, 0x04}, + {0x64b1, CRL_REG_LEN_08BIT, 0x30}, + {0x64b2, CRL_REG_LEN_08BIT, 0x06}, + {0x64b3, CRL_REG_LEN_08BIT, 0x00}, + {0x64b4, CRL_REG_LEN_08BIT, 0x00}, + {0x64b5, CRL_REG_LEN_08BIT, 0xff}, + {0x64b6, CRL_REG_LEN_08BIT, 0x06}, + {0x64b7, CRL_REG_LEN_08BIT, 0x20}, + {0x64b8, CRL_REG_LEN_08BIT, 0x01}, + {0x64b9, CRL_REG_LEN_08BIT, 0xf6}, + {0x64ba, CRL_REG_LEN_08BIT, 0x04}, + {0x64bb, CRL_REG_LEN_08BIT, 0x30}, + {0x64bc, CRL_REG_LEN_08BIT, 0x06}, + {0x64bd, CRL_REG_LEN_08BIT, 0x00}, + {0x64be, CRL_REG_LEN_08BIT, 0x00}, + {0x64bf, CRL_REG_LEN_08BIT, 0xff}, + {0x64c0, CRL_REG_LEN_08BIT, 0x04}, + {0x64c1, CRL_REG_LEN_08BIT, 0x31}, + {0x64c2, CRL_REG_LEN_08BIT, 0x04}, + {0x64c3, CRL_REG_LEN_08BIT, 0x30}, + {0x64c4, CRL_REG_LEN_08BIT, 0x01}, + {0x64c5, CRL_REG_LEN_08BIT, 0x20}, + {0x64c6, CRL_REG_LEN_08BIT, 0x01}, + {0x64c7, CRL_REG_LEN_08BIT, 0x31}, + {0x64c8, CRL_REG_LEN_08BIT, 0x01}, + {0x64c9, CRL_REG_LEN_08BIT, 0x32}, + {0x64ca, CRL_REG_LEN_08BIT, 0x01}, + {0x64cb, CRL_REG_LEN_08BIT, 0x33}, + {0x64cc, CRL_REG_LEN_08BIT, 0x01}, + {0x64cd, CRL_REG_LEN_08BIT, 0x34}, + {0x64ce, CRL_REG_LEN_08BIT, 0x01}, + {0x64cf, CRL_REG_LEN_08BIT, 0x35}, + {0x64d0, CRL_REG_LEN_08BIT, 0x01}, + {0x64d1, CRL_REG_LEN_08BIT, 0x36}, + {0x64d2, CRL_REG_LEN_08BIT, 0x01}, + {0x64d3, CRL_REG_LEN_08BIT, 0x37}, + {0x64d4, CRL_REG_LEN_08BIT, 0x01}, + {0x64d5, CRL_REG_LEN_08BIT, 0x38}, + {0x64d6, CRL_REG_LEN_08BIT, 0x01}, + {0x64d7, CRL_REG_LEN_08BIT, 0x39}, + {0x64d8, CRL_REG_LEN_08BIT, 0x01}, + {0x64d9, CRL_REG_LEN_08BIT, 0x3a}, + {0x64da, CRL_REG_LEN_08BIT, 0x01}, + {0x64db, CRL_REG_LEN_08BIT, 0x3b}, + {0x64dc, CRL_REG_LEN_08BIT, 0x01}, + {0x64dd, CRL_REG_LEN_08BIT, 0x3c}, + {0x64de, CRL_REG_LEN_08BIT, 0x01}, + {0x64df, CRL_REG_LEN_08BIT, 0x3d}, + {0x64e0, CRL_REG_LEN_08BIT, 0x01}, + {0x64e1, CRL_REG_LEN_08BIT, 0x3e}, + {0x64e2, CRL_REG_LEN_08BIT, 0x01}, + {0x64e3, CRL_REG_LEN_08BIT, 0x3f}, + {0x64e4, CRL_REG_LEN_08BIT, 0x02}, + {0x64e5, CRL_REG_LEN_08BIT, 0xa0}, + {0x64e6, CRL_REG_LEN_08BIT, 0x00}, + {0x64e7, CRL_REG_LEN_08BIT, 0xff}, + {0x64e8, CRL_REG_LEN_08BIT, 0x04}, + {0x64e9, CRL_REG_LEN_08BIT, 0x31}, + {0x64ea, CRL_REG_LEN_08BIT, 0x04}, + {0x64eb, CRL_REG_LEN_08BIT, 0x30}, + {0x64ec, CRL_REG_LEN_08BIT, 0x01}, + {0x64ed, CRL_REG_LEN_08BIT, 0x00}, + {0x64ee, CRL_REG_LEN_08BIT, 0x01}, + {0x64ef, CRL_REG_LEN_08BIT, 0x11}, + {0x64f0, CRL_REG_LEN_08BIT, 0x01}, + {0x64f1, CRL_REG_LEN_08BIT, 0x12}, + {0x64f2, CRL_REG_LEN_08BIT, 0x01}, + {0x64f3, CRL_REG_LEN_08BIT, 0x13}, + {0x64f4, CRL_REG_LEN_08BIT, 0x01}, + {0x64f5, CRL_REG_LEN_08BIT, 0x14}, + {0x64f6, CRL_REG_LEN_08BIT, 0x01}, + {0x64f7, CRL_REG_LEN_08BIT, 0x15}, + {0x64f8, CRL_REG_LEN_08BIT, 0x01}, + {0x64f9, CRL_REG_LEN_08BIT, 0x16}, + {0x64fa, CRL_REG_LEN_08BIT, 0x01}, + {0x64fb, CRL_REG_LEN_08BIT, 0x17}, + {0x64fc, CRL_REG_LEN_08BIT, 0x01}, + {0x64fd, CRL_REG_LEN_08BIT, 0x18}, + {0x64fe, CRL_REG_LEN_08BIT, 0x01}, + {0x64ff, CRL_REG_LEN_08BIT, 0x19}, + {0x6500, CRL_REG_LEN_08BIT, 0x01}, + {0x6501, CRL_REG_LEN_08BIT, 0x1a}, + {0x6502, CRL_REG_LEN_08BIT, 0x01}, + {0x6503, CRL_REG_LEN_08BIT, 0x1b}, + {0x6504, CRL_REG_LEN_08BIT, 0x01}, + {0x6505, CRL_REG_LEN_08BIT, 0x1c}, + {0x6506, CRL_REG_LEN_08BIT, 0x01}, + {0x6507, CRL_REG_LEN_08BIT, 0x1d}, + {0x6508, CRL_REG_LEN_08BIT, 0x01}, + {0x6509, CRL_REG_LEN_08BIT, 0x1e}, + {0x650a, CRL_REG_LEN_08BIT, 0x01}, + {0x650b, CRL_REG_LEN_08BIT, 0x1f}, + {0x650c, CRL_REG_LEN_08BIT, 0x02}, + {0x650d, CRL_REG_LEN_08BIT, 0xa0}, + {0x650e, CRL_REG_LEN_08BIT, 0x00}, + {0x650f, CRL_REG_LEN_08BIT, 0xff}, + {0x6510, CRL_REG_LEN_08BIT, 0x04}, + {0x6511, CRL_REG_LEN_08BIT, 0x20}, + {0x6512, CRL_REG_LEN_08BIT, 0x05}, + {0x6513, CRL_REG_LEN_08BIT, 0x86}, + {0x6514, CRL_REG_LEN_08BIT, 0x03}, + {0x6515, CRL_REG_LEN_08BIT, 0x0b}, + {0x6516, CRL_REG_LEN_08BIT, 0x05}, + {0x6517, CRL_REG_LEN_08BIT, 0x86}, + {0x6518, CRL_REG_LEN_08BIT, 0x00}, + {0x6519, CRL_REG_LEN_08BIT, 0x00}, + {0x651a, CRL_REG_LEN_08BIT, 0x05}, + {0x651b, CRL_REG_LEN_08BIT, 0x06}, + {0x651c, CRL_REG_LEN_08BIT, 0x00}, + {0x651d, CRL_REG_LEN_08BIT, 0x04}, + {0x651e, CRL_REG_LEN_08BIT, 0x05}, + {0x651f, CRL_REG_LEN_08BIT, 0x04}, + {0x6520, CRL_REG_LEN_08BIT, 0x00}, + {0x6521, CRL_REG_LEN_08BIT, 0x04}, + {0x6522, CRL_REG_LEN_08BIT, 0x05}, + {0x6523, CRL_REG_LEN_08BIT, 0x00}, + {0x6524, CRL_REG_LEN_08BIT, 0x05}, + {0x6525, CRL_REG_LEN_08BIT, 0x0a}, + {0x6526, CRL_REG_LEN_08BIT, 0x03}, + {0x6527, CRL_REG_LEN_08BIT, 0x9a}, + {0x6528, CRL_REG_LEN_08BIT, 0x05}, + {0x6529, CRL_REG_LEN_08BIT, 0x86}, + {0x652a, CRL_REG_LEN_08BIT, 0x00}, + {0x652b, CRL_REG_LEN_08BIT, 0x00}, + {0x652c, CRL_REG_LEN_08BIT, 0x05}, + {0x652d, CRL_REG_LEN_08BIT, 0x06}, + {0x652e, CRL_REG_LEN_08BIT, 0x00}, + {0x652f, CRL_REG_LEN_08BIT, 0x01}, + {0x6530, CRL_REG_LEN_08BIT, 0x05}, + {0x6531, CRL_REG_LEN_08BIT, 0x04}, + {0x6532, CRL_REG_LEN_08BIT, 0x00}, + {0x6533, CRL_REG_LEN_08BIT, 0x04}, + {0x6534, CRL_REG_LEN_08BIT, 0x05}, + {0x6535, CRL_REG_LEN_08BIT, 0x00}, + {0x6536, CRL_REG_LEN_08BIT, 0x05}, + {0x6537, CRL_REG_LEN_08BIT, 0x0a}, + {0x6538, CRL_REG_LEN_08BIT, 0x03}, + {0x6539, CRL_REG_LEN_08BIT, 0x99}, + {0x653a, CRL_REG_LEN_08BIT, 0x05}, + {0x653b, CRL_REG_LEN_08BIT, 0x06}, + {0x653c, CRL_REG_LEN_08BIT, 0x00}, + {0x653d, CRL_REG_LEN_08BIT, 0x00}, + {0x653e, CRL_REG_LEN_08BIT, 0x05}, + {0x653f, CRL_REG_LEN_08BIT, 0x04}, + {0x6540, CRL_REG_LEN_08BIT, 0x00}, + {0x6541, CRL_REG_LEN_08BIT, 0x04}, + {0x6542, CRL_REG_LEN_08BIT, 0x05}, + {0x6543, CRL_REG_LEN_08BIT, 0x00}, + {0x6544, CRL_REG_LEN_08BIT, 0x05}, + {0x6545, CRL_REG_LEN_08BIT, 0x0a}, + {0x6546, CRL_REG_LEN_08BIT, 0x03}, + {0x6547, CRL_REG_LEN_08BIT, 0x98}, + {0x6548, CRL_REG_LEN_08BIT, 0x05}, + {0x6549, CRL_REG_LEN_08BIT, 0x06}, + {0x654a, CRL_REG_LEN_08BIT, 0x00}, + {0x654b, CRL_REG_LEN_08BIT, 0x00}, + {0x654c, CRL_REG_LEN_08BIT, 0x05}, + {0x654d, CRL_REG_LEN_08BIT, 0x04}, + {0x654e, CRL_REG_LEN_08BIT, 0x00}, + {0x654f, CRL_REG_LEN_08BIT, 0x04}, + {0x6550, CRL_REG_LEN_08BIT, 0x05}, + {0x6551, CRL_REG_LEN_08BIT, 0x00}, + {0x6552, CRL_REG_LEN_08BIT, 0x05}, + {0x6553, CRL_REG_LEN_08BIT, 0x0a}, + {0x6554, CRL_REG_LEN_08BIT, 0x03}, + {0x6555, CRL_REG_LEN_08BIT, 0x97}, + {0x6556, CRL_REG_LEN_08BIT, 0x05}, + {0x6557, CRL_REG_LEN_08BIT, 0x06}, + {0x6558, CRL_REG_LEN_08BIT, 0x05}, + {0x6559, CRL_REG_LEN_08BIT, 0x04}, + {0x655a, CRL_REG_LEN_08BIT, 0x00}, + {0x655b, CRL_REG_LEN_08BIT, 0x04}, + {0x655c, CRL_REG_LEN_08BIT, 0x05}, + {0x655d, CRL_REG_LEN_08BIT, 0x00}, + {0x655e, CRL_REG_LEN_08BIT, 0x05}, + {0x655f, CRL_REG_LEN_08BIT, 0x0a}, + {0x6560, CRL_REG_LEN_08BIT, 0x03}, + {0x6561, CRL_REG_LEN_08BIT, 0x96}, + {0x6562, CRL_REG_LEN_08BIT, 0x05}, + {0x6563, CRL_REG_LEN_08BIT, 0x06}, + {0x6564, CRL_REG_LEN_08BIT, 0x05}, + {0x6565, CRL_REG_LEN_08BIT, 0x04}, + {0x6566, CRL_REG_LEN_08BIT, 0x00}, + {0x6567, CRL_REG_LEN_08BIT, 0x04}, + {0x6568, CRL_REG_LEN_08BIT, 0x05}, + {0x6569, CRL_REG_LEN_08BIT, 0x00}, + {0x656a, CRL_REG_LEN_08BIT, 0x05}, + {0x656b, CRL_REG_LEN_08BIT, 0x0a}, + {0x656c, CRL_REG_LEN_08BIT, 0x03}, + {0x656d, CRL_REG_LEN_08BIT, 0x95}, + {0x656e, CRL_REG_LEN_08BIT, 0x05}, + {0x656f, CRL_REG_LEN_08BIT, 0x06}, + {0x6570, CRL_REG_LEN_08BIT, 0x05}, + {0x6571, CRL_REG_LEN_08BIT, 0x04}, + {0x6572, CRL_REG_LEN_08BIT, 0x00}, + {0x6573, CRL_REG_LEN_08BIT, 0x04}, + {0x6574, CRL_REG_LEN_08BIT, 0x05}, + {0x6575, CRL_REG_LEN_08BIT, 0x00}, + {0x6576, CRL_REG_LEN_08BIT, 0x05}, + {0x6577, CRL_REG_LEN_08BIT, 0x0a}, + {0x6578, CRL_REG_LEN_08BIT, 0x03}, + {0x6579, CRL_REG_LEN_08BIT, 0x94}, + {0x657a, CRL_REG_LEN_08BIT, 0x05}, + {0x657b, CRL_REG_LEN_08BIT, 0x06}, + {0x657c, CRL_REG_LEN_08BIT, 0x00}, + {0x657d, CRL_REG_LEN_08BIT, 0x00}, + {0x657e, CRL_REG_LEN_08BIT, 0x05}, + {0x657f, CRL_REG_LEN_08BIT, 0x04}, + {0x6580, CRL_REG_LEN_08BIT, 0x00}, + {0x6581, CRL_REG_LEN_08BIT, 0x04}, + {0x6582, CRL_REG_LEN_08BIT, 0x05}, + {0x6583, CRL_REG_LEN_08BIT, 0x00}, + {0x6584, CRL_REG_LEN_08BIT, 0x05}, + {0x6585, CRL_REG_LEN_08BIT, 0x0a}, + {0x6586, CRL_REG_LEN_08BIT, 0x03}, + {0x6587, CRL_REG_LEN_08BIT, 0x93}, + {0x6588, CRL_REG_LEN_08BIT, 0x05}, + {0x6589, CRL_REG_LEN_08BIT, 0x06}, + {0x658a, CRL_REG_LEN_08BIT, 0x00}, + {0x658b, CRL_REG_LEN_08BIT, 0x00}, + {0x658c, CRL_REG_LEN_08BIT, 0x05}, + {0x658d, CRL_REG_LEN_08BIT, 0x04}, + {0x658e, CRL_REG_LEN_08BIT, 0x00}, + {0x658f, CRL_REG_LEN_08BIT, 0x04}, + {0x6590, CRL_REG_LEN_08BIT, 0x05}, + {0x6591, CRL_REG_LEN_08BIT, 0x00}, + {0x6592, CRL_REG_LEN_08BIT, 0x05}, + {0x6593, CRL_REG_LEN_08BIT, 0x0a}, + {0x6594, CRL_REG_LEN_08BIT, 0x03}, + {0x6595, CRL_REG_LEN_08BIT, 0x92}, + {0x6596, CRL_REG_LEN_08BIT, 0x05}, + {0x6597, CRL_REG_LEN_08BIT, 0x06}, + {0x6598, CRL_REG_LEN_08BIT, 0x05}, + {0x6599, CRL_REG_LEN_08BIT, 0x04}, + {0x659a, CRL_REG_LEN_08BIT, 0x00}, + {0x659b, CRL_REG_LEN_08BIT, 0x04}, + {0x659c, CRL_REG_LEN_08BIT, 0x05}, + {0x659d, CRL_REG_LEN_08BIT, 0x00}, + {0x659e, CRL_REG_LEN_08BIT, 0x05}, + {0x659f, CRL_REG_LEN_08BIT, 0x0a}, + {0x65a0, CRL_REG_LEN_08BIT, 0x03}, + {0x65a1, CRL_REG_LEN_08BIT, 0x91}, + {0x65a2, CRL_REG_LEN_08BIT, 0x05}, + {0x65a3, CRL_REG_LEN_08BIT, 0x06}, + {0x65a4, CRL_REG_LEN_08BIT, 0x05}, + {0x65a5, CRL_REG_LEN_08BIT, 0x04}, + {0x65a6, CRL_REG_LEN_08BIT, 0x00}, + {0x65a7, CRL_REG_LEN_08BIT, 0x04}, + {0x65a8, CRL_REG_LEN_08BIT, 0x05}, + {0x65a9, CRL_REG_LEN_08BIT, 0x00}, + {0x65aa, CRL_REG_LEN_08BIT, 0x05}, + {0x65ab, CRL_REG_LEN_08BIT, 0x0a}, + {0x65ac, CRL_REG_LEN_08BIT, 0x03}, + {0x65ad, CRL_REG_LEN_08BIT, 0x90}, + {0x65ae, CRL_REG_LEN_08BIT, 0x05}, + {0x65af, CRL_REG_LEN_08BIT, 0x06}, + {0x65b0, CRL_REG_LEN_08BIT, 0x05}, + {0x65b1, CRL_REG_LEN_08BIT, 0x04}, + {0x65b2, CRL_REG_LEN_08BIT, 0x00}, + {0x65b3, CRL_REG_LEN_08BIT, 0x04}, + {0x65b4, CRL_REG_LEN_08BIT, 0x05}, + {0x65b5, CRL_REG_LEN_08BIT, 0x00}, + {0x65b6, CRL_REG_LEN_08BIT, 0x05}, + {0x65b7, CRL_REG_LEN_08BIT, 0x0a}, + {0x65b8, CRL_REG_LEN_08BIT, 0x02}, + {0x65b9, CRL_REG_LEN_08BIT, 0x90}, + {0x65ba, CRL_REG_LEN_08BIT, 0x05}, + {0x65bb, CRL_REG_LEN_08BIT, 0x06}, + {0x65bc, CRL_REG_LEN_08BIT, 0x00}, + {0x65bd, CRL_REG_LEN_08BIT, 0xff}, + {0x65be, CRL_REG_LEN_08BIT, 0x04}, + {0x65bf, CRL_REG_LEN_08BIT, 0x70}, + {0x65c0, CRL_REG_LEN_08BIT, 0x08}, + {0x65c1, CRL_REG_LEN_08BIT, 0x76}, + {0x65c2, CRL_REG_LEN_08BIT, 0x00}, + {0x65c3, CRL_REG_LEN_08BIT, 0xff}, + {0x65c4, CRL_REG_LEN_08BIT, 0x08}, + {0x65c5, CRL_REG_LEN_08BIT, 0x76}, + {0x65c6, CRL_REG_LEN_08BIT, 0x04}, + {0x65c7, CRL_REG_LEN_08BIT, 0x0c}, + {0x65c8, CRL_REG_LEN_08BIT, 0x05}, + {0x65c9, CRL_REG_LEN_08BIT, 0x07}, + {0x65ca, CRL_REG_LEN_08BIT, 0x04}, + {0x65cb, CRL_REG_LEN_08BIT, 0x04}, + {0x65cc, CRL_REG_LEN_08BIT, 0x00}, + {0x65cd, CRL_REG_LEN_08BIT, 0xff}, + {0x65ce, CRL_REG_LEN_08BIT, 0x00}, + {0x65cf, CRL_REG_LEN_08BIT, 0xff}, + {0x65d0, CRL_REG_LEN_08BIT, 0x00}, + {0x65d1, CRL_REG_LEN_08BIT, 0xff}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x7f}, + {0x303c, CRL_REG_LEN_08BIT, 0xfe}, + {0x303d, CRL_REG_LEN_08BIT, 0x19}, + {0x303e, CRL_REG_LEN_08BIT, 0xd7}, + {0x303f, CRL_REG_LEN_08BIT, 0x09}, + {0x3040, CRL_REG_LEN_08BIT, 0x78}, + {0x3042, CRL_REG_LEN_08BIT, 0x05}, + {0x328a, CRL_REG_LEN_08BIT, 0x10}, +}; + +static struct crl_register_write_rep ov10640_1280_1088_LONG_RAW[] = { + {0x328a, CRL_REG_LEN_08BIT, 0x11}, + {0x313f, CRL_REG_LEN_08BIT, 0x80}, + {0x3132, CRL_REG_LEN_08BIT, 0x24}, + {0x3014, CRL_REG_LEN_08BIT, 0x03}, + {0x3023, CRL_REG_LEN_08BIT, 0x05}, + {0x3032, CRL_REG_LEN_08BIT, 0x35}, + {0x3033, CRL_REG_LEN_08BIT, 0x04}, + {0x3054, CRL_REG_LEN_08BIT, 0x00}, + {0x3055, CRL_REG_LEN_08BIT, 0x08}, + {0x3056, CRL_REG_LEN_08BIT, 0x01}, + {0x3057, CRL_REG_LEN_08BIT, 0xff}, + {0x3058, CRL_REG_LEN_08BIT, 0xaf}, + {0x3059, CRL_REG_LEN_08BIT, 0x44}, + {0x305a, CRL_REG_LEN_08BIT, 0x02}, + {0x305b, CRL_REG_LEN_08BIT, 0x00}, + {0x305c, CRL_REG_LEN_08BIT, 0x30}, + {0x305d, CRL_REG_LEN_08BIT, 0x9e}, + {0x305e, CRL_REG_LEN_08BIT, 0x19}, + {0x305f, CRL_REG_LEN_08BIT, 0x18}, + {0x3060, CRL_REG_LEN_08BIT, 0xf9}, + {0x3061, CRL_REG_LEN_08BIT, 0xf0}, + {0x308c, CRL_REG_LEN_08BIT, 0xB3}, + {0x308f, CRL_REG_LEN_08BIT, 0x10}, + {0x3091, CRL_REG_LEN_08BIT, 0x00}, + {0x3093, CRL_REG_LEN_08BIT, 0x01}, + {0x30a3, CRL_REG_LEN_08BIT, 0x08}, + {0x30ad, CRL_REG_LEN_08BIT, 0x03}, + {0x30ae, CRL_REG_LEN_08BIT, 0x80}, + {0x30af, CRL_REG_LEN_08BIT, 0x80}, + {0x30b0, CRL_REG_LEN_08BIT, 0xff}, + {0x30b1, CRL_REG_LEN_08BIT, 0x3f}, + {0x30b2, CRL_REG_LEN_08BIT, 0x22}, + {0x30b9, CRL_REG_LEN_08BIT, 0x22}, + {0x30bb, CRL_REG_LEN_08BIT, 0x00}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x00}, + {0x30be, CRL_REG_LEN_08BIT, 0x00}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x00}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x00}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x80}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c6, CRL_REG_LEN_08BIT, 0x80}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x80}, + {0x3119, CRL_REG_LEN_08BIT, 0x45}, + {0x311a, CRL_REG_LEN_08BIT, 0x01}, + {0x311b, CRL_REG_LEN_08BIT, 0x4a}, + {0x3074, CRL_REG_LEN_08BIT, 0x00}, + {0x3075, CRL_REG_LEN_08BIT, 0x00}, + {0x3076, CRL_REG_LEN_08BIT, 0x00}, + {0x3077, CRL_REG_LEN_08BIT, 0x02}, + {0x3078, CRL_REG_LEN_08BIT, 0x05}, + {0x3079, CRL_REG_LEN_08BIT, 0x07}, + {0x307a, CRL_REG_LEN_08BIT, 0x04}, + {0x307b, CRL_REG_LEN_08BIT, 0x45}, + {0x307c, CRL_REG_LEN_08BIT, 0x05}, + {0x307d, CRL_REG_LEN_08BIT, 0x00}, + {0x307e, CRL_REG_LEN_08BIT, 0x04}, + {0x307f, CRL_REG_LEN_08BIT, 0x40}, + {0x3084, CRL_REG_LEN_08BIT, 0x00}, + {0x3085, CRL_REG_LEN_08BIT, 0x04}, + {0x3086, CRL_REG_LEN_08BIT, 0x00}, + {0x3087, CRL_REG_LEN_08BIT, 0x04}, + {0x3088, CRL_REG_LEN_08BIT, 0x00}, + {0x3089, CRL_REG_LEN_08BIT, 0x40}, + {0x308d, CRL_REG_LEN_08BIT, 0x92}, + {0x3094, CRL_REG_LEN_08BIT, 0xa5}, + {0x30fa, CRL_REG_LEN_08BIT, 0x06}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x01}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3127, CRL_REG_LEN_08BIT, 0x63}, + {0x3128, CRL_REG_LEN_08BIT, 0xc0}, + {0x3129, CRL_REG_LEN_08BIT, 0x00}, + {0x31be, CRL_REG_LEN_08BIT, 0x01}, + {0x30a5, CRL_REG_LEN_08BIT, 0x78}, + {0x30a6, CRL_REG_LEN_08BIT, 0x40}, + {0x30a7, CRL_REG_LEN_08BIT, 0x78}, + {0x30a8, CRL_REG_LEN_08BIT, 0x80}, + {0x30a9, CRL_REG_LEN_08BIT, 0x78}, + {0x30aa, CRL_REG_LEN_08BIT, 0xe0}, + {0x30ab, CRL_REG_LEN_08BIT, 0xf9}, + {0x30ac, CRL_REG_LEN_08BIT, 0xc0}, + {0x3440, CRL_REG_LEN_08BIT, 0x04}, + {0x3444, CRL_REG_LEN_08BIT, 0x28}, + {0x344e, CRL_REG_LEN_08BIT, 0x2c}, + {0x3457, CRL_REG_LEN_08BIT, 0x33}, + {0x345e, CRL_REG_LEN_08BIT, 0x38}, + {0x3461, CRL_REG_LEN_08BIT, 0xa8}, + {0x7002, CRL_REG_LEN_08BIT, 0xaa}, + {0x7001, CRL_REG_LEN_08BIT, 0xdf}, + {0x7048, CRL_REG_LEN_08BIT, 0x00}, + {0x7049, CRL_REG_LEN_08BIT, 0x02}, + {0x704a, CRL_REG_LEN_08BIT, 0x02}, + {0x704b, CRL_REG_LEN_08BIT, 0x00}, + {0x704c, CRL_REG_LEN_08BIT, 0x01}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x7043, CRL_REG_LEN_08BIT, 0x04}, + {0x7040, CRL_REG_LEN_08BIT, 0x3c}, + {0x7047, CRL_REG_LEN_08BIT, 0x00}, + {0x7044, CRL_REG_LEN_08BIT, 0x01}, + {0x7000, CRL_REG_LEN_08BIT, 0x1f}, + {0x7084, CRL_REG_LEN_08BIT, 0x01}, + {0x7085, CRL_REG_LEN_08BIT, 0x03}, + {0x7086, CRL_REG_LEN_08BIT, 0x02}, + {0x7087, CRL_REG_LEN_08BIT, 0x40}, + {0x7088, CRL_REG_LEN_08BIT, 0x01}, + {0x7089, CRL_REG_LEN_08BIT, 0x20}, + {0x707f, CRL_REG_LEN_08BIT, 0x04}, + {0x707c, CRL_REG_LEN_08BIT, 0x3c}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0x01}, + {0x7003, CRL_REG_LEN_08BIT, 0xdf}, + {0x70c0, CRL_REG_LEN_08BIT, 0x00}, + {0x70c1, CRL_REG_LEN_08BIT, 0x02}, + {0x70c2, CRL_REG_LEN_08BIT, 0x02}, + {0x70c3, CRL_REG_LEN_08BIT, 0x00}, + {0x70c4, CRL_REG_LEN_08BIT, 0x01}, + {0x70c5, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0x03}, + {0x70b9, CRL_REG_LEN_08BIT, 0x98}, + {0x70bc, CRL_REG_LEN_08BIT, 0x00}, + {0x70bd, CRL_REG_LEN_08BIT, 0x80}, + {0x7004, CRL_REG_LEN_08BIT, 0x02}, + {0x7005, CRL_REG_LEN_08BIT, 0x00}, + {0x7006, CRL_REG_LEN_08BIT, 0x01}, + {0x7007, CRL_REG_LEN_08BIT, 0x80}, + {0x7008, CRL_REG_LEN_08BIT, 0x02}, + {0x7009, CRL_REG_LEN_08BIT, 0x00}, + {0x700a, CRL_REG_LEN_08BIT, 0x04}, + {0x700b, CRL_REG_LEN_08BIT, 0x00}, + {0x700e, CRL_REG_LEN_08BIT, 0x00}, + {0x700f, CRL_REG_LEN_08BIT, 0x60}, + {0x701a, CRL_REG_LEN_08BIT, 0x02}, + {0x701b, CRL_REG_LEN_08BIT, 0x00}, + {0x701c, CRL_REG_LEN_08BIT, 0x01}, + {0x701d, CRL_REG_LEN_08BIT, 0x80}, + {0x701e, CRL_REG_LEN_08BIT, 0x02}, + {0x701f, CRL_REG_LEN_08BIT, 0x00}, + {0x7020, CRL_REG_LEN_08BIT, 0x04}, + {0x7021, CRL_REG_LEN_08BIT, 0x00}, + {0x7024, CRL_REG_LEN_08BIT, 0x00}, + {0x7025, CRL_REG_LEN_08BIT, 0x60}, + {0x70e7, CRL_REG_LEN_08BIT, 0x00}, + {0x70e4, CRL_REG_LEN_08BIT, 0x10}, + {0x70e5, CRL_REG_LEN_08BIT, 0x00}, + {0x70e6, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70e8, CRL_REG_LEN_08BIT, 0x10}, + {0x70e9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ea, CRL_REG_LEN_08BIT, 0x00}, + {0x70ef, CRL_REG_LEN_08BIT, 0x00}, + {0x70ec, CRL_REG_LEN_08BIT, 0xfd}, + {0x70ed, CRL_REG_LEN_08BIT, 0x00}, + {0x70ee, CRL_REG_LEN_08BIT, 0x00}, + {0x70eb, CRL_REG_LEN_08BIT, 0x00}, + {0x70f0, CRL_REG_LEN_08BIT, 0xfd}, + {0x70f1, CRL_REG_LEN_08BIT, 0x00}, + {0x70f2, CRL_REG_LEN_08BIT, 0x00}, + {0x30fb, CRL_REG_LEN_08BIT, 0x06}, + {0x30fc, CRL_REG_LEN_08BIT, 0x80}, + {0x30fd, CRL_REG_LEN_08BIT, 0x02}, + {0x30fe, CRL_REG_LEN_08BIT, 0x93}, + {0x6000, CRL_REG_LEN_08BIT, 0xc1}, + {0x6001, CRL_REG_LEN_08BIT, 0xb9}, + {0x6002, CRL_REG_LEN_08BIT, 0xba}, + {0x6003, CRL_REG_LEN_08BIT, 0xa4}, + {0x6004, CRL_REG_LEN_08BIT, 0xb5}, + {0x6005, CRL_REG_LEN_08BIT, 0xa0}, + {0x6006, CRL_REG_LEN_08BIT, 0x82}, + {0x6007, CRL_REG_LEN_08BIT, 0xa7}, + {0x6008, CRL_REG_LEN_08BIT, 0xb7}, + {0x6009, CRL_REG_LEN_08BIT, 0x5c}, + {0x600a, CRL_REG_LEN_08BIT, 0x9e}, + {0x600b, CRL_REG_LEN_08BIT, 0xc0}, + {0x600c, CRL_REG_LEN_08BIT, 0xd2}, + {0x600d, CRL_REG_LEN_08BIT, 0x33}, + {0x600e, CRL_REG_LEN_08BIT, 0xcc}, + {0x600f, CRL_REG_LEN_08BIT, 0xe2}, + {0x6010, CRL_REG_LEN_08BIT, 0xc1}, + {0x6011, CRL_REG_LEN_08BIT, 0xab}, + {0x6012, CRL_REG_LEN_08BIT, 0xb7}, + {0x6013, CRL_REG_LEN_08BIT, 0x00}, + {0x6014, CRL_REG_LEN_08BIT, 0x00}, + {0x6015, CRL_REG_LEN_08BIT, 0x00}, + {0x6016, CRL_REG_LEN_08BIT, 0x00}, + {0x6017, CRL_REG_LEN_08BIT, 0x00}, + {0x6018, CRL_REG_LEN_08BIT, 0x00}, + {0x6019, CRL_REG_LEN_08BIT, 0x00}, + {0x601a, CRL_REG_LEN_08BIT, 0x00}, + {0x601b, CRL_REG_LEN_08BIT, 0x00}, + {0x601c, CRL_REG_LEN_08BIT, 0x00}, + {0x601d, CRL_REG_LEN_08BIT, 0x00}, + {0x601e, CRL_REG_LEN_08BIT, 0x9c}, + {0x601f, CRL_REG_LEN_08BIT, 0x94}, + {0x6020, CRL_REG_LEN_08BIT, 0x90}, + {0x6021, CRL_REG_LEN_08BIT, 0xc5}, + {0x6022, CRL_REG_LEN_08BIT, 0x01}, + {0x6023, CRL_REG_LEN_08BIT, 0x54}, + {0x6024, CRL_REG_LEN_08BIT, 0x2a}, + {0x6025, CRL_REG_LEN_08BIT, 0x61}, + {0x6026, CRL_REG_LEN_08BIT, 0xd2}, + {0x6027, CRL_REG_LEN_08BIT, 0xcc}, + {0x6028, CRL_REG_LEN_08BIT, 0x04}, + {0x6029, CRL_REG_LEN_08BIT, 0x35}, + {0x602a, CRL_REG_LEN_08BIT, 0xb1}, + {0x602b, CRL_REG_LEN_08BIT, 0xb2}, + {0x602c, CRL_REG_LEN_08BIT, 0xb3}, + {0x602d, CRL_REG_LEN_08BIT, 0xd2}, + {0x602e, CRL_REG_LEN_08BIT, 0xd3}, + {0x602f, CRL_REG_LEN_08BIT, 0x12}, + {0x6030, CRL_REG_LEN_08BIT, 0x31}, + {0x6031, CRL_REG_LEN_08BIT, 0xcc}, + {0x6032, CRL_REG_LEN_08BIT, 0x06}, + {0x6033, CRL_REG_LEN_08BIT, 0xd2}, + {0x6034, CRL_REG_LEN_08BIT, 0xc4}, + {0x6035, CRL_REG_LEN_08BIT, 0xce}, + {0x6036, CRL_REG_LEN_08BIT, 0x18}, + {0x6037, CRL_REG_LEN_08BIT, 0xcf}, + {0x6038, CRL_REG_LEN_08BIT, 0x1e}, + {0x6039, CRL_REG_LEN_08BIT, 0xd0}, + {0x603a, CRL_REG_LEN_08BIT, 0x24}, + {0x603b, CRL_REG_LEN_08BIT, 0xc5}, + {0x603c, CRL_REG_LEN_08BIT, 0xd2}, + {0x603d, CRL_REG_LEN_08BIT, 0xbc}, + {0x603e, CRL_REG_LEN_08BIT, 0xcc}, + {0x603f, CRL_REG_LEN_08BIT, 0x52}, + {0x6040, CRL_REG_LEN_08BIT, 0x2b}, + {0x6041, CRL_REG_LEN_08BIT, 0xd2}, + {0x6042, CRL_REG_LEN_08BIT, 0xd3}, + {0x6043, CRL_REG_LEN_08BIT, 0x02}, + {0x6044, CRL_REG_LEN_08BIT, 0xcc}, + {0x6045, CRL_REG_LEN_08BIT, 0x0a}, + {0x6046, CRL_REG_LEN_08BIT, 0xd2}, + {0x6047, CRL_REG_LEN_08BIT, 0xd3}, + {0x6048, CRL_REG_LEN_08BIT, 0x0f}, + {0x6049, CRL_REG_LEN_08BIT, 0x1a}, + {0x604a, CRL_REG_LEN_08BIT, 0x2a}, + {0x604b, CRL_REG_LEN_08BIT, 0xd4}, + {0x604c, CRL_REG_LEN_08BIT, 0xf6}, + {0x604d, CRL_REG_LEN_08BIT, 0xba}, + {0x604e, CRL_REG_LEN_08BIT, 0x56}, + {0x604f, CRL_REG_LEN_08BIT, 0xd3}, + {0x6050, CRL_REG_LEN_08BIT, 0x2e}, + {0x6051, CRL_REG_LEN_08BIT, 0x54}, + {0x6052, CRL_REG_LEN_08BIT, 0x26}, + {0x6053, CRL_REG_LEN_08BIT, 0xd2}, + {0x6054, CRL_REG_LEN_08BIT, 0xcc}, + {0x6055, CRL_REG_LEN_08BIT, 0x60}, + {0x6056, CRL_REG_LEN_08BIT, 0xd2}, + {0x6057, CRL_REG_LEN_08BIT, 0xd3}, + {0x6058, CRL_REG_LEN_08BIT, 0x27}, + {0x6059, CRL_REG_LEN_08BIT, 0x27}, + {0x605a, CRL_REG_LEN_08BIT, 0x08}, + {0x605b, CRL_REG_LEN_08BIT, 0x1a}, + {0x605c, CRL_REG_LEN_08BIT, 0xcc}, + {0x605d, CRL_REG_LEN_08BIT, 0x88}, + {0x605e, CRL_REG_LEN_08BIT, 0x00}, + {0x605f, CRL_REG_LEN_08BIT, 0x12}, + {0x6060, CRL_REG_LEN_08BIT, 0x2c}, + {0x6061, CRL_REG_LEN_08BIT, 0x60}, + {0x6062, CRL_REG_LEN_08BIT, 0xc2}, + {0x6063, CRL_REG_LEN_08BIT, 0xb9}, + {0x6064, CRL_REG_LEN_08BIT, 0xa5}, + {0x6065, CRL_REG_LEN_08BIT, 0xb5}, + {0x6066, CRL_REG_LEN_08BIT, 0xa0}, + {0x6067, CRL_REG_LEN_08BIT, 0x82}, + {0x6068, CRL_REG_LEN_08BIT, 0x5c}, + {0x6069, CRL_REG_LEN_08BIT, 0xd4}, + {0x606a, CRL_REG_LEN_08BIT, 0xbe}, + {0x606b, CRL_REG_LEN_08BIT, 0xd4}, + {0x606c, CRL_REG_LEN_08BIT, 0xbe}, + {0x606d, CRL_REG_LEN_08BIT, 0xd3}, + {0x606e, CRL_REG_LEN_08BIT, 0x01}, + {0x606f, CRL_REG_LEN_08BIT, 0x7c}, + {0x6070, CRL_REG_LEN_08BIT, 0x74}, + {0x6071, CRL_REG_LEN_08BIT, 0x00}, + {0x6072, CRL_REG_LEN_08BIT, 0x61}, + {0x6073, CRL_REG_LEN_08BIT, 0x2a}, + {0x6074, CRL_REG_LEN_08BIT, 0xd2}, + {0x6075, CRL_REG_LEN_08BIT, 0xcc}, + {0x6076, CRL_REG_LEN_08BIT, 0xdf}, + {0x6077, CRL_REG_LEN_08BIT, 0xc6}, + {0x6078, CRL_REG_LEN_08BIT, 0x35}, + {0x6079, CRL_REG_LEN_08BIT, 0xd2}, + {0x607a, CRL_REG_LEN_08BIT, 0xcc}, + {0x607b, CRL_REG_LEN_08BIT, 0x06}, + {0x607c, CRL_REG_LEN_08BIT, 0x31}, + {0x607d, CRL_REG_LEN_08BIT, 0xd2}, + {0x607e, CRL_REG_LEN_08BIT, 0xc5}, + {0x607f, CRL_REG_LEN_08BIT, 0xbb}, + {0x6080, CRL_REG_LEN_08BIT, 0xcc}, + {0x6081, CRL_REG_LEN_08BIT, 0x18}, + {0x6082, CRL_REG_LEN_08BIT, 0xc6}, + {0x6083, CRL_REG_LEN_08BIT, 0xd2}, + {0x6084, CRL_REG_LEN_08BIT, 0xbd}, + {0x6085, CRL_REG_LEN_08BIT, 0xcc}, + {0x6086, CRL_REG_LEN_08BIT, 0x52}, + {0x6087, CRL_REG_LEN_08BIT, 0x2b}, + {0x6088, CRL_REG_LEN_08BIT, 0xd2}, + {0x6089, CRL_REG_LEN_08BIT, 0xd3}, + {0x608a, CRL_REG_LEN_08BIT, 0x01}, + {0x608b, CRL_REG_LEN_08BIT, 0xcc}, + {0x608c, CRL_REG_LEN_08BIT, 0x0a}, + {0x608d, CRL_REG_LEN_08BIT, 0xd2}, + {0x608e, CRL_REG_LEN_08BIT, 0xd3}, + {0x608f, CRL_REG_LEN_08BIT, 0x0f}, + {0x6090, CRL_REG_LEN_08BIT, 0x1a}, + {0x6091, CRL_REG_LEN_08BIT, 0x71}, + {0x6092, CRL_REG_LEN_08BIT, 0x2a}, + {0x6093, CRL_REG_LEN_08BIT, 0xd4}, + {0x6094, CRL_REG_LEN_08BIT, 0xf6}, + {0x6095, CRL_REG_LEN_08BIT, 0xd3}, + {0x6096, CRL_REG_LEN_08BIT, 0x22}, + {0x6097, CRL_REG_LEN_08BIT, 0x70}, + {0x6098, CRL_REG_LEN_08BIT, 0xca}, + {0x6099, CRL_REG_LEN_08BIT, 0x26}, + {0x609a, CRL_REG_LEN_08BIT, 0xd2}, + {0x609b, CRL_REG_LEN_08BIT, 0xcc}, + {0x609c, CRL_REG_LEN_08BIT, 0x60}, + {0x609d, CRL_REG_LEN_08BIT, 0xd2}, + {0x609e, CRL_REG_LEN_08BIT, 0xd3}, + {0x609f, CRL_REG_LEN_08BIT, 0x27}, + {0x60a0, CRL_REG_LEN_08BIT, 0x27}, + {0x60a1, CRL_REG_LEN_08BIT, 0x08}, + {0x60a2, CRL_REG_LEN_08BIT, 0x1a}, + {0x60a3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60a4, CRL_REG_LEN_08BIT, 0x88}, + {0x60a5, CRL_REG_LEN_08BIT, 0x12}, + {0x60a6, CRL_REG_LEN_08BIT, 0x2c}, + {0x60a7, CRL_REG_LEN_08BIT, 0x60}, + {0x60a8, CRL_REG_LEN_08BIT, 0x00}, + {0x60a9, CRL_REG_LEN_08BIT, 0x00}, + {0x60aa, CRL_REG_LEN_08BIT, 0xc0}, + {0x60ab, CRL_REG_LEN_08BIT, 0xb9}, + {0x60ac, CRL_REG_LEN_08BIT, 0xa3}, + {0x60ad, CRL_REG_LEN_08BIT, 0xb5}, + {0x60ae, CRL_REG_LEN_08BIT, 0x00}, + {0x60af, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b0, CRL_REG_LEN_08BIT, 0x82}, + {0x60b1, CRL_REG_LEN_08BIT, 0x5c}, + {0x60b2, CRL_REG_LEN_08BIT, 0xd4}, + {0x60b3, CRL_REG_LEN_08BIT, 0xa0}, + {0x60b4, CRL_REG_LEN_08BIT, 0x9d}, + {0x60b5, CRL_REG_LEN_08BIT, 0xd3}, + {0x60b6, CRL_REG_LEN_08BIT, 0x26}, + {0x60b7, CRL_REG_LEN_08BIT, 0xb0}, + {0x60b8, CRL_REG_LEN_08BIT, 0xb7}, + {0x60b9, CRL_REG_LEN_08BIT, 0x00}, + {0x60ba, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bb, CRL_REG_LEN_08BIT, 0x0a}, + {0x60bc, CRL_REG_LEN_08BIT, 0xd3}, + {0x60bd, CRL_REG_LEN_08BIT, 0x10}, + {0x60be, CRL_REG_LEN_08BIT, 0x9c}, + {0x60bf, CRL_REG_LEN_08BIT, 0x94}, + {0x60c0, CRL_REG_LEN_08BIT, 0x90}, + {0x60c1, CRL_REG_LEN_08BIT, 0xc8}, + {0x60c2, CRL_REG_LEN_08BIT, 0xba}, + {0x60c3, CRL_REG_LEN_08BIT, 0x7c}, + {0x60c4, CRL_REG_LEN_08BIT, 0x74}, + {0x60c5, CRL_REG_LEN_08BIT, 0x00}, + {0x60c6, CRL_REG_LEN_08BIT, 0x61}, + {0x60c7, CRL_REG_LEN_08BIT, 0x2a}, + {0x60c8, CRL_REG_LEN_08BIT, 0x00}, + {0x60c9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60ca, CRL_REG_LEN_08BIT, 0xcc}, + {0x60cb, CRL_REG_LEN_08BIT, 0xdf}, + {0x60cc, CRL_REG_LEN_08BIT, 0xc4}, + {0x60cd, CRL_REG_LEN_08BIT, 0x35}, + {0x60ce, CRL_REG_LEN_08BIT, 0xd2}, + {0x60cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d0, CRL_REG_LEN_08BIT, 0x06}, + {0x60d1, CRL_REG_LEN_08BIT, 0x31}, + {0x60d2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d3, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d4, CRL_REG_LEN_08BIT, 0x15}, + {0x60d5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60d6, CRL_REG_LEN_08BIT, 0xbb}, + {0x60d7, CRL_REG_LEN_08BIT, 0xcc}, + {0x60d8, CRL_REG_LEN_08BIT, 0x1a}, + {0x60d9, CRL_REG_LEN_08BIT, 0xd2}, + {0x60da, CRL_REG_LEN_08BIT, 0xbe}, + {0x60db, CRL_REG_LEN_08BIT, 0xce}, + {0x60dc, CRL_REG_LEN_08BIT, 0x52}, + {0x60dd, CRL_REG_LEN_08BIT, 0xcf}, + {0x60de, CRL_REG_LEN_08BIT, 0x56}, + {0x60df, CRL_REG_LEN_08BIT, 0xd0}, + {0x60e0, CRL_REG_LEN_08BIT, 0x5b}, + {0x60e1, CRL_REG_LEN_08BIT, 0x2b}, + {0x60e2, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e3, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e4, CRL_REG_LEN_08BIT, 0x01}, + {0x60e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x60e6, CRL_REG_LEN_08BIT, 0x0a}, + {0x60e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x60e8, CRL_REG_LEN_08BIT, 0xd3}, + {0x60e9, CRL_REG_LEN_08BIT, 0x0f}, + {0x60ea, CRL_REG_LEN_08BIT, 0xd9}, + {0x60eb, CRL_REG_LEN_08BIT, 0xc7}, + {0x60ec, CRL_REG_LEN_08BIT, 0xda}, + {0x60ed, CRL_REG_LEN_08BIT, 0xce}, + {0x60ee, CRL_REG_LEN_08BIT, 0x1a}, + {0x60ef, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f0, CRL_REG_LEN_08BIT, 0xf6}, + {0x60f1, CRL_REG_LEN_08BIT, 0xd4}, + {0x60f2, CRL_REG_LEN_08BIT, 0xa9}, + {0x60f3, CRL_REG_LEN_08BIT, 0x27}, + {0x60f4, CRL_REG_LEN_08BIT, 0x00}, + {0x60f5, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f6, CRL_REG_LEN_08BIT, 0xcc}, + {0x60f7, CRL_REG_LEN_08BIT, 0x60}, + {0x60f8, CRL_REG_LEN_08BIT, 0xd2}, + {0x60f9, CRL_REG_LEN_08BIT, 0xd3}, + {0x60fa, CRL_REG_LEN_08BIT, 0x2d}, + {0x60fb, CRL_REG_LEN_08BIT, 0xd9}, + {0x60fc, CRL_REG_LEN_08BIT, 0xdf}, + {0x60fd, CRL_REG_LEN_08BIT, 0xda}, + {0x60fe, CRL_REG_LEN_08BIT, 0xe5}, + {0x60ff, CRL_REG_LEN_08BIT, 0x1a}, + {0x6100, CRL_REG_LEN_08BIT, 0x12}, + {0x6101, CRL_REG_LEN_08BIT, 0xcc}, + {0x6102, CRL_REG_LEN_08BIT, 0x88}, + {0x6103, CRL_REG_LEN_08BIT, 0xd6}, + {0x6104, CRL_REG_LEN_08BIT, 0xb1}, + {0x6105, CRL_REG_LEN_08BIT, 0xb9}, + {0x6106, CRL_REG_LEN_08BIT, 0xba}, + {0x6107, CRL_REG_LEN_08BIT, 0xaf}, + {0x6108, CRL_REG_LEN_08BIT, 0xdc}, + {0x6109, CRL_REG_LEN_08BIT, 0x00}, + {0x610a, CRL_REG_LEN_08BIT, 0xcb}, + {0x610b, CRL_REG_LEN_08BIT, 0xc3}, + {0x610c, CRL_REG_LEN_08BIT, 0xb9}, + {0x610d, CRL_REG_LEN_08BIT, 0xa4}, + {0x610e, CRL_REG_LEN_08BIT, 0xb5}, + {0x610f, CRL_REG_LEN_08BIT, 0x5c}, + {0x6110, CRL_REG_LEN_08BIT, 0x12}, + {0x6111, CRL_REG_LEN_08BIT, 0x2a}, + {0x6112, CRL_REG_LEN_08BIT, 0x61}, + {0x6113, CRL_REG_LEN_08BIT, 0xd2}, + {0x6114, CRL_REG_LEN_08BIT, 0xcc}, + {0x6115, CRL_REG_LEN_08BIT, 0xdf}, + {0x6116, CRL_REG_LEN_08BIT, 0xc7}, + {0x6117, CRL_REG_LEN_08BIT, 0x35}, + {0x6118, CRL_REG_LEN_08BIT, 0xd2}, + {0x6119, CRL_REG_LEN_08BIT, 0xcc}, + {0x611a, CRL_REG_LEN_08BIT, 0x06}, + {0x611b, CRL_REG_LEN_08BIT, 0x31}, + {0x611c, CRL_REG_LEN_08BIT, 0xc6}, + {0x611d, CRL_REG_LEN_08BIT, 0xbb}, + {0x611e, CRL_REG_LEN_08BIT, 0xd2}, + {0x611f, CRL_REG_LEN_08BIT, 0xcc}, + {0x6120, CRL_REG_LEN_08BIT, 0x18}, + {0x6121, CRL_REG_LEN_08BIT, 0xd2}, + {0x6122, CRL_REG_LEN_08BIT, 0xbe}, + {0x6123, CRL_REG_LEN_08BIT, 0xcc}, + {0x6124, CRL_REG_LEN_08BIT, 0x52}, + {0x6125, CRL_REG_LEN_08BIT, 0xc7}, + {0x6126, CRL_REG_LEN_08BIT, 0xd2}, + {0x6127, CRL_REG_LEN_08BIT, 0xcc}, + {0x6128, CRL_REG_LEN_08BIT, 0x0a}, + {0x6129, CRL_REG_LEN_08BIT, 0xb4}, + {0x612a, CRL_REG_LEN_08BIT, 0xb7}, + {0x612b, CRL_REG_LEN_08BIT, 0x94}, + {0x612c, CRL_REG_LEN_08BIT, 0xd2}, + {0x612d, CRL_REG_LEN_08BIT, 0x12}, + {0x612e, CRL_REG_LEN_08BIT, 0x26}, + {0x612f, CRL_REG_LEN_08BIT, 0x42}, + {0x6130, CRL_REG_LEN_08BIT, 0x46}, + {0x6131, CRL_REG_LEN_08BIT, 0x42}, + {0x6132, CRL_REG_LEN_08BIT, 0xd3}, + {0x6133, CRL_REG_LEN_08BIT, 0x20}, + {0x6134, CRL_REG_LEN_08BIT, 0x27}, + {0x6135, CRL_REG_LEN_08BIT, 0x00}, + {0x6136, CRL_REG_LEN_08BIT, 0x1a}, + {0x6137, CRL_REG_LEN_08BIT, 0xcc}, + {0x6138, CRL_REG_LEN_08BIT, 0x88}, + {0x6139, CRL_REG_LEN_08BIT, 0x60}, + {0x613a, CRL_REG_LEN_08BIT, 0x2c}, + {0x613b, CRL_REG_LEN_08BIT, 0x12}, + {0x613c, CRL_REG_LEN_08BIT, 0x40}, + {0x613d, CRL_REG_LEN_08BIT, 0xb8}, + {0x613e, CRL_REG_LEN_08BIT, 0x90}, + {0x613f, CRL_REG_LEN_08BIT, 0xd5}, + {0x6140, CRL_REG_LEN_08BIT, 0xba}, + {0x6141, CRL_REG_LEN_08BIT, 0x00}, + {0x6142, CRL_REG_LEN_08BIT, 0x00}, + {0x6143, CRL_REG_LEN_08BIT, 0x00}, + {0x6144, CRL_REG_LEN_08BIT, 0x00}, + {0x6145, CRL_REG_LEN_08BIT, 0x00}, + {0x6146, CRL_REG_LEN_08BIT, 0x00}, + {0x6147, CRL_REG_LEN_08BIT, 0xaa}, + {0x6148, CRL_REG_LEN_08BIT, 0xb7}, + {0x6149, CRL_REG_LEN_08BIT, 0x00}, + {0x614a, CRL_REG_LEN_08BIT, 0x00}, + {0x614b, CRL_REG_LEN_08BIT, 0x00}, + {0x614c, CRL_REG_LEN_08BIT, 0x00}, + {0x614d, CRL_REG_LEN_08BIT, 0xa6}, + {0x614e, CRL_REG_LEN_08BIT, 0xb7}, + {0x614f, CRL_REG_LEN_08BIT, 0x00}, + {0x6150, CRL_REG_LEN_08BIT, 0xd5}, + {0x6151, CRL_REG_LEN_08BIT, 0x00}, + {0x6152, CRL_REG_LEN_08BIT, 0x71}, + {0x6153, CRL_REG_LEN_08BIT, 0xd3}, + {0x6154, CRL_REG_LEN_08BIT, 0x30}, + {0x6155, CRL_REG_LEN_08BIT, 0xba}, + {0x6156, CRL_REG_LEN_08BIT, 0x00}, + {0x6157, CRL_REG_LEN_08BIT, 0x00}, + {0x6158, CRL_REG_LEN_08BIT, 0x00}, + {0x6159, CRL_REG_LEN_08BIT, 0x00}, + {0x615a, CRL_REG_LEN_08BIT, 0xd3}, + {0x615b, CRL_REG_LEN_08BIT, 0x10}, + {0x615c, CRL_REG_LEN_08BIT, 0x70}, + {0x615d, CRL_REG_LEN_08BIT, 0x00}, + {0x615e, CRL_REG_LEN_08BIT, 0x00}, + {0x615f, CRL_REG_LEN_08BIT, 0x00}, + {0x6160, CRL_REG_LEN_08BIT, 0x00}, + {0x6161, CRL_REG_LEN_08BIT, 0xd5}, + {0x6162, CRL_REG_LEN_08BIT, 0xba}, + {0x6163, CRL_REG_LEN_08BIT, 0xb0}, + {0x6164, CRL_REG_LEN_08BIT, 0xb7}, + {0x6165, CRL_REG_LEN_08BIT, 0x00}, + {0x6166, CRL_REG_LEN_08BIT, 0x9d}, + {0x6167, CRL_REG_LEN_08BIT, 0xd3}, + {0x6168, CRL_REG_LEN_08BIT, 0x0a}, + {0x6169, CRL_REG_LEN_08BIT, 0x9d}, + {0x616a, CRL_REG_LEN_08BIT, 0x9d}, + {0x616b, CRL_REG_LEN_08BIT, 0xd3}, + {0x616c, CRL_REG_LEN_08BIT, 0x10}, + {0x616d, CRL_REG_LEN_08BIT, 0x9c}, + {0x616e, CRL_REG_LEN_08BIT, 0x94}, + {0x616f, CRL_REG_LEN_08BIT, 0x90}, + {0x6170, CRL_REG_LEN_08BIT, 0xc8}, + {0x6171, CRL_REG_LEN_08BIT, 0xba}, + {0x6172, CRL_REG_LEN_08BIT, 0xd2}, + {0x6173, CRL_REG_LEN_08BIT, 0x60}, + {0x6174, CRL_REG_LEN_08BIT, 0x2c}, + {0x6175, CRL_REG_LEN_08BIT, 0x50}, + {0x6176, CRL_REG_LEN_08BIT, 0x11}, + {0x6177, CRL_REG_LEN_08BIT, 0xcc}, + {0x6178, CRL_REG_LEN_08BIT, 0x00}, + {0x6179, CRL_REG_LEN_08BIT, 0x30}, + {0x617a, CRL_REG_LEN_08BIT, 0xd5}, + {0x617b, CRL_REG_LEN_08BIT, 0x00}, + {0x617c, CRL_REG_LEN_08BIT, 0xba}, + {0x617d, CRL_REG_LEN_08BIT, 0xb0}, + {0x617e, CRL_REG_LEN_08BIT, 0xb7}, + {0x617f, CRL_REG_LEN_08BIT, 0x00}, + {0x6180, CRL_REG_LEN_08BIT, 0x9d}, + {0x6181, CRL_REG_LEN_08BIT, 0xd3}, + {0x6182, CRL_REG_LEN_08BIT, 0x0a}, + {0x6183, CRL_REG_LEN_08BIT, 0x9d}, + {0x6184, CRL_REG_LEN_08BIT, 0x9d}, + {0x6185, CRL_REG_LEN_08BIT, 0xd3}, + {0x6186, CRL_REG_LEN_08BIT, 0x10}, + {0x6187, CRL_REG_LEN_08BIT, 0x9c}, + {0x6188, CRL_REG_LEN_08BIT, 0x94}, + {0x6189, CRL_REG_LEN_08BIT, 0x90}, + {0x618a, CRL_REG_LEN_08BIT, 0xc8}, + {0x618b, CRL_REG_LEN_08BIT, 0xba}, + {0x618c, CRL_REG_LEN_08BIT, 0xd5}, + {0x618d, CRL_REG_LEN_08BIT, 0x00}, + {0x618e, CRL_REG_LEN_08BIT, 0x01}, + {0x618f, CRL_REG_LEN_08BIT, 0x1a}, + {0x6190, CRL_REG_LEN_08BIT, 0xcc}, + {0x6191, CRL_REG_LEN_08BIT, 0x12}, + {0x6192, CRL_REG_LEN_08BIT, 0x12}, + {0x6193, CRL_REG_LEN_08BIT, 0x00}, + {0x6194, CRL_REG_LEN_08BIT, 0xcc}, + {0x6195, CRL_REG_LEN_08BIT, 0x9c}, + {0x6196, CRL_REG_LEN_08BIT, 0xd2}, + {0x6197, CRL_REG_LEN_08BIT, 0xcc}, + {0x6198, CRL_REG_LEN_08BIT, 0x60}, + {0x6199, CRL_REG_LEN_08BIT, 0xd2}, + {0x619a, CRL_REG_LEN_08BIT, 0x04}, + {0x619b, CRL_REG_LEN_08BIT, 0xd5}, + {0x619c, CRL_REG_LEN_08BIT, 0x1a}, + {0x619d, CRL_REG_LEN_08BIT, 0xcc}, + {0x619e, CRL_REG_LEN_08BIT, 0x12}, + {0x619f, CRL_REG_LEN_08BIT, 0x00}, + {0x61a0, CRL_REG_LEN_08BIT, 0x12}, + {0x61a1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a2, CRL_REG_LEN_08BIT, 0x9c}, + {0x61a3, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a4, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a5, CRL_REG_LEN_08BIT, 0x60}, + {0x61a6, CRL_REG_LEN_08BIT, 0xd2}, + {0x61a7, CRL_REG_LEN_08BIT, 0x1a}, + {0x61a8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61a9, CRL_REG_LEN_08BIT, 0x12}, + {0x61aa, CRL_REG_LEN_08BIT, 0x00}, + {0x61ab, CRL_REG_LEN_08BIT, 0x12}, + {0x61ac, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ad, CRL_REG_LEN_08BIT, 0x9c}, + {0x61ae, CRL_REG_LEN_08BIT, 0xd2}, + {0x61af, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b0, CRL_REG_LEN_08BIT, 0x60}, + {0x61b1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61b2, CRL_REG_LEN_08BIT, 0x1a}, + {0x61b3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b4, CRL_REG_LEN_08BIT, 0x12}, + {0x61b5, CRL_REG_LEN_08BIT, 0x00}, + {0x61b6, CRL_REG_LEN_08BIT, 0x12}, + {0x61b7, CRL_REG_LEN_08BIT, 0xcc}, + {0x61b8, CRL_REG_LEN_08BIT, 0x9c}, + {0x61b9, CRL_REG_LEN_08BIT, 0xd2}, + {0x61ba, CRL_REG_LEN_08BIT, 0xcc}, + {0x61bb, CRL_REG_LEN_08BIT, 0x60}, + {0x61bc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61bd, CRL_REG_LEN_08BIT, 0xd5}, + {0x61be, CRL_REG_LEN_08BIT, 0x1a}, + {0x61bf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c0, CRL_REG_LEN_08BIT, 0x12}, + {0x61c1, CRL_REG_LEN_08BIT, 0x12}, + {0x61c2, CRL_REG_LEN_08BIT, 0x00}, + {0x61c3, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c4, CRL_REG_LEN_08BIT, 0x8a}, + {0x61c5, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61c7, CRL_REG_LEN_08BIT, 0x74}, + {0x61c8, CRL_REG_LEN_08BIT, 0xd2}, + {0x61c9, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ca, CRL_REG_LEN_08BIT, 0x1a}, + {0x61cb, CRL_REG_LEN_08BIT, 0xcc}, + {0x61cc, CRL_REG_LEN_08BIT, 0x12}, + {0x61cd, CRL_REG_LEN_08BIT, 0x00}, + {0x61ce, CRL_REG_LEN_08BIT, 0x12}, + {0x61cf, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d0, CRL_REG_LEN_08BIT, 0x8a}, + {0x61d1, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d2, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d3, CRL_REG_LEN_08BIT, 0x74}, + {0x61d4, CRL_REG_LEN_08BIT, 0xd2}, + {0x61d5, CRL_REG_LEN_08BIT, 0x1a}, + {0x61d6, CRL_REG_LEN_08BIT, 0xcc}, + {0x61d7, CRL_REG_LEN_08BIT, 0x12}, + {0x61d8, CRL_REG_LEN_08BIT, 0x00}, + {0x61d9, CRL_REG_LEN_08BIT, 0x12}, + {0x61da, CRL_REG_LEN_08BIT, 0xcc}, + {0x61db, CRL_REG_LEN_08BIT, 0x8a}, + {0x61dc, CRL_REG_LEN_08BIT, 0xd2}, + {0x61dd, CRL_REG_LEN_08BIT, 0xcc}, + {0x61de, CRL_REG_LEN_08BIT, 0x74}, + {0x61df, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e0, CRL_REG_LEN_08BIT, 0x1a}, + {0x61e1, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e2, CRL_REG_LEN_08BIT, 0x12}, + {0x61e3, CRL_REG_LEN_08BIT, 0x00}, + {0x61e4, CRL_REG_LEN_08BIT, 0x12}, + {0x61e5, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e6, CRL_REG_LEN_08BIT, 0x8a}, + {0x61e7, CRL_REG_LEN_08BIT, 0xd2}, + {0x61e8, CRL_REG_LEN_08BIT, 0xcc}, + {0x61e9, CRL_REG_LEN_08BIT, 0x74}, + {0x61ea, CRL_REG_LEN_08BIT, 0xd2}, + {0x61eb, CRL_REG_LEN_08BIT, 0xd5}, + {0x61ec, CRL_REG_LEN_08BIT, 0xcc}, + {0x61ed, CRL_REG_LEN_08BIT, 0x12}, + {0x61ee, CRL_REG_LEN_08BIT, 0x00}, + {0x61ef, CRL_REG_LEN_08BIT, 0x12}, + {0x61f0, CRL_REG_LEN_08BIT, 0xcc}, + {0x61f1, CRL_REG_LEN_08BIT, 0x9c}, + {0x61f2, CRL_REG_LEN_08BIT, 0xd5}, + {0x6400, CRL_REG_LEN_08BIT, 0x04}, + {0x6401, CRL_REG_LEN_08BIT, 0x04}, + {0x6402, CRL_REG_LEN_08BIT, 0x00}, + {0x6403, CRL_REG_LEN_08BIT, 0xff}, + {0x6404, CRL_REG_LEN_08BIT, 0x00}, + {0x6405, CRL_REG_LEN_08BIT, 0x08}, + {0x6406, CRL_REG_LEN_08BIT, 0x00}, + {0x6407, CRL_REG_LEN_08BIT, 0xff}, + {0x6408, CRL_REG_LEN_08BIT, 0x04}, + {0x6409, CRL_REG_LEN_08BIT, 0x70}, + {0x640a, CRL_REG_LEN_08BIT, 0x00}, + {0x640b, CRL_REG_LEN_08BIT, 0xff}, + {0x640c, CRL_REG_LEN_08BIT, 0x05}, + {0x640d, CRL_REG_LEN_08BIT, 0x14}, + {0x640e, CRL_REG_LEN_08BIT, 0x04}, + {0x640f, CRL_REG_LEN_08BIT, 0x71}, + {0x6410, CRL_REG_LEN_08BIT, 0x05}, + {0x6411, CRL_REG_LEN_08BIT, 0x74}, + {0x6412, CRL_REG_LEN_08BIT, 0x00}, + {0x6413, CRL_REG_LEN_08BIT, 0xff}, + {0x6414, CRL_REG_LEN_08BIT, 0x05}, + {0x6415, CRL_REG_LEN_08BIT, 0x54}, + {0x6416, CRL_REG_LEN_08BIT, 0x05}, + {0x6417, CRL_REG_LEN_08BIT, 0x44}, + {0x6418, CRL_REG_LEN_08BIT, 0x04}, + {0x6419, CRL_REG_LEN_08BIT, 0x30}, + {0x641a, CRL_REG_LEN_08BIT, 0x05}, + {0x641b, CRL_REG_LEN_08BIT, 0x46}, + {0x641c, CRL_REG_LEN_08BIT, 0x00}, + {0x641d, CRL_REG_LEN_08BIT, 0xff}, + {0x641e, CRL_REG_LEN_08BIT, 0x04}, + {0x641f, CRL_REG_LEN_08BIT, 0x31}, + {0x6420, CRL_REG_LEN_08BIT, 0x04}, + {0x6421, CRL_REG_LEN_08BIT, 0x30}, + {0x6422, CRL_REG_LEN_08BIT, 0x00}, + {0x6423, CRL_REG_LEN_08BIT, 0xff}, + {0x6424, CRL_REG_LEN_08BIT, 0x04}, + {0x6425, CRL_REG_LEN_08BIT, 0x20}, + {0x6426, CRL_REG_LEN_08BIT, 0x05}, + {0x6427, CRL_REG_LEN_08BIT, 0x06}, + {0x6428, CRL_REG_LEN_08BIT, 0x00}, + {0x6429, CRL_REG_LEN_08BIT, 0xff}, + {0x642a, CRL_REG_LEN_08BIT, 0x08}, + {0x642b, CRL_REG_LEN_08BIT, 0x2a}, + {0x642c, CRL_REG_LEN_08BIT, 0x08}, + {0x642d, CRL_REG_LEN_08BIT, 0x31}, + {0x642e, CRL_REG_LEN_08BIT, 0x00}, + {0x642f, CRL_REG_LEN_08BIT, 0xff}, + {0x6430, CRL_REG_LEN_08BIT, 0x08}, + {0x6431, CRL_REG_LEN_08BIT, 0x2a}, + {0x6432, CRL_REG_LEN_08BIT, 0x08}, + {0x6433, CRL_REG_LEN_08BIT, 0x31}, + {0x6434, CRL_REG_LEN_08BIT, 0x06}, + {0x6435, CRL_REG_LEN_08BIT, 0x20}, + {0x6436, CRL_REG_LEN_08BIT, 0x07}, + {0x6437, CRL_REG_LEN_08BIT, 0x00}, + {0x6438, CRL_REG_LEN_08BIT, 0x08}, + {0x6439, CRL_REG_LEN_08BIT, 0x40}, + {0x643a, CRL_REG_LEN_08BIT, 0x00}, + {0x643b, CRL_REG_LEN_08BIT, 0xff}, + {0x643c, CRL_REG_LEN_08BIT, 0x08}, + {0x643d, CRL_REG_LEN_08BIT, 0x2a}, + {0x643e, CRL_REG_LEN_08BIT, 0x08}, + {0x643f, CRL_REG_LEN_08BIT, 0x36}, + {0x6440, CRL_REG_LEN_08BIT, 0x06}, + {0x6441, CRL_REG_LEN_08BIT, 0x10}, + {0x6442, CRL_REG_LEN_08BIT, 0x07}, + {0x6443, CRL_REG_LEN_08BIT, 0x00}, + {0x6444, CRL_REG_LEN_08BIT, 0x08}, + {0x6445, CRL_REG_LEN_08BIT, 0x40}, + {0x6446, CRL_REG_LEN_08BIT, 0x00}, + {0x6447, CRL_REG_LEN_08BIT, 0xff}, + {0x6448, CRL_REG_LEN_08BIT, 0x08}, + {0x6449, CRL_REG_LEN_08BIT, 0x2a}, + {0x644a, CRL_REG_LEN_08BIT, 0x08}, + {0x644b, CRL_REG_LEN_08BIT, 0x3b}, + {0x644c, CRL_REG_LEN_08BIT, 0x06}, + {0x644d, CRL_REG_LEN_08BIT, 0x00}, + {0x644e, CRL_REG_LEN_08BIT, 0x07}, + {0x644f, CRL_REG_LEN_08BIT, 0x00}, + {0x6450, CRL_REG_LEN_08BIT, 0x08}, + {0x6451, CRL_REG_LEN_08BIT, 0x40}, + {0x6452, CRL_REG_LEN_08BIT, 0x00}, + {0x6453, CRL_REG_LEN_08BIT, 0xff}, + {0x6454, CRL_REG_LEN_08BIT, 0x06}, + {0x6455, CRL_REG_LEN_08BIT, 0x00}, + {0x6456, CRL_REG_LEN_08BIT, 0x07}, + {0x6457, CRL_REG_LEN_08BIT, 0x05}, + {0x6458, CRL_REG_LEN_08BIT, 0x01}, + {0x6459, CRL_REG_LEN_08BIT, 0xaf}, + {0x645a, CRL_REG_LEN_08BIT, 0x01}, + {0x645b, CRL_REG_LEN_08BIT, 0x0f}, + {0x645c, CRL_REG_LEN_08BIT, 0x01}, + {0x645d, CRL_REG_LEN_08BIT, 0x90}, + {0x645e, CRL_REG_LEN_08BIT, 0x01}, + {0x645f, CRL_REG_LEN_08BIT, 0xc8}, + {0x6460, CRL_REG_LEN_08BIT, 0x00}, + {0x6461, CRL_REG_LEN_08BIT, 0xff}, + {0x6462, CRL_REG_LEN_08BIT, 0x01}, + {0x6463, CRL_REG_LEN_08BIT, 0xac}, + {0x6464, CRL_REG_LEN_08BIT, 0x01}, + {0x6465, CRL_REG_LEN_08BIT, 0x0c}, + {0x6466, CRL_REG_LEN_08BIT, 0x01}, + {0x6467, CRL_REG_LEN_08BIT, 0x90}, + {0x6468, CRL_REG_LEN_08BIT, 0x01}, + {0x6469, CRL_REG_LEN_08BIT, 0xe8}, + {0x646a, CRL_REG_LEN_08BIT, 0x00}, + {0x646b, CRL_REG_LEN_08BIT, 0xff}, + {0x646c, CRL_REG_LEN_08BIT, 0x01}, + {0x646d, CRL_REG_LEN_08BIT, 0xad}, + {0x646e, CRL_REG_LEN_08BIT, 0x01}, + {0x646f, CRL_REG_LEN_08BIT, 0x0d}, + {0x6470, CRL_REG_LEN_08BIT, 0x01}, + {0x6471, CRL_REG_LEN_08BIT, 0x90}, + {0x6472, CRL_REG_LEN_08BIT, 0x01}, + {0x6473, CRL_REG_LEN_08BIT, 0xe8}, + {0x6474, CRL_REG_LEN_08BIT, 0x00}, + {0x6475, CRL_REG_LEN_08BIT, 0xff}, + {0x6476, CRL_REG_LEN_08BIT, 0x01}, + {0x6477, CRL_REG_LEN_08BIT, 0xae}, + {0x6478, CRL_REG_LEN_08BIT, 0x01}, + {0x6479, CRL_REG_LEN_08BIT, 0x0e}, + {0x647a, CRL_REG_LEN_08BIT, 0x01}, + {0x647b, CRL_REG_LEN_08BIT, 0x90}, + {0x647c, CRL_REG_LEN_08BIT, 0x01}, + {0x647d, CRL_REG_LEN_08BIT, 0xe8}, + {0x647e, CRL_REG_LEN_08BIT, 0x00}, + {0x647f, CRL_REG_LEN_08BIT, 0xff}, + {0x6480, CRL_REG_LEN_08BIT, 0x01}, + {0x6481, CRL_REG_LEN_08BIT, 0xb0}, + {0x6482, CRL_REG_LEN_08BIT, 0x01}, + {0x6483, CRL_REG_LEN_08BIT, 0xb1}, + {0x6484, CRL_REG_LEN_08BIT, 0x01}, + {0x6485, CRL_REG_LEN_08BIT, 0xb2}, + {0x6486, CRL_REG_LEN_08BIT, 0x01}, + {0x6487, CRL_REG_LEN_08BIT, 0xb3}, + {0x6488, CRL_REG_LEN_08BIT, 0x01}, + {0x6489, CRL_REG_LEN_08BIT, 0xb4}, + {0x648a, CRL_REG_LEN_08BIT, 0x01}, + {0x648b, CRL_REG_LEN_08BIT, 0xb5}, + {0x648c, CRL_REG_LEN_08BIT, 0x01}, + {0x648d, CRL_REG_LEN_08BIT, 0xb6}, + {0x648e, CRL_REG_LEN_08BIT, 0x01}, + {0x648f, CRL_REG_LEN_08BIT, 0xb7}, + {0x6490, CRL_REG_LEN_08BIT, 0x01}, + {0x6491, CRL_REG_LEN_08BIT, 0xb8}, + {0x6492, CRL_REG_LEN_08BIT, 0x01}, + {0x6493, CRL_REG_LEN_08BIT, 0xb9}, + {0x6494, CRL_REG_LEN_08BIT, 0x01}, + {0x6495, CRL_REG_LEN_08BIT, 0xba}, + {0x6496, CRL_REG_LEN_08BIT, 0x01}, + {0x6497, CRL_REG_LEN_08BIT, 0xbb}, + {0x6498, CRL_REG_LEN_08BIT, 0x01}, + {0x6499, CRL_REG_LEN_08BIT, 0xbc}, + {0x649a, CRL_REG_LEN_08BIT, 0x01}, + {0x649b, CRL_REG_LEN_08BIT, 0xbd}, + {0x649c, CRL_REG_LEN_08BIT, 0x01}, + {0x649d, CRL_REG_LEN_08BIT, 0xbe}, + {0x649e, CRL_REG_LEN_08BIT, 0x01}, + {0x649f, CRL_REG_LEN_08BIT, 0xbf}, + {0x64a0, CRL_REG_LEN_08BIT, 0x01}, + {0x64a1, CRL_REG_LEN_08BIT, 0xc0}, + {0x64a2, CRL_REG_LEN_08BIT, 0x00}, + {0x64a3, CRL_REG_LEN_08BIT, 0xff}, + {0x64a4, CRL_REG_LEN_08BIT, 0x06}, + {0x64a5, CRL_REG_LEN_08BIT, 0x00}, + {0x64a6, CRL_REG_LEN_08BIT, 0x01}, + {0x64a7, CRL_REG_LEN_08BIT, 0xf6}, + {0x64a8, CRL_REG_LEN_08BIT, 0x04}, + {0x64a9, CRL_REG_LEN_08BIT, 0x30}, + {0x64aa, CRL_REG_LEN_08BIT, 0x00}, + {0x64ab, CRL_REG_LEN_08BIT, 0xff}, + {0x64ac, CRL_REG_LEN_08BIT, 0x06}, + {0x64ad, CRL_REG_LEN_08BIT, 0x10}, + {0x64ae, CRL_REG_LEN_08BIT, 0x01}, + {0x64af, CRL_REG_LEN_08BIT, 0xf6}, + {0x64b0, CRL_REG_LEN_08BIT, 0x04}, + {0x64b1, CRL_REG_LEN_08BIT, 0x30}, + {0x64b2, CRL_REG_LEN_08BIT, 0x06}, + {0x64b3, CRL_REG_LEN_08BIT, 0x00}, + {0x64b4, CRL_REG_LEN_08BIT, 0x00}, + {0x64b5, CRL_REG_LEN_08BIT, 0xff}, + {0x64b6, CRL_REG_LEN_08BIT, 0x06}, + {0x64b7, CRL_REG_LEN_08BIT, 0x20}, + {0x64b8, CRL_REG_LEN_08BIT, 0x01}, + {0x64b9, CRL_REG_LEN_08BIT, 0xf6}, + {0x64ba, CRL_REG_LEN_08BIT, 0x04}, + {0x64bb, CRL_REG_LEN_08BIT, 0x30}, + {0x64bc, CRL_REG_LEN_08BIT, 0x06}, + {0x64bd, CRL_REG_LEN_08BIT, 0x00}, + {0x64be, CRL_REG_LEN_08BIT, 0x00}, + {0x64bf, CRL_REG_LEN_08BIT, 0xff}, + {0x64c0, CRL_REG_LEN_08BIT, 0x04}, + {0x64c1, CRL_REG_LEN_08BIT, 0x31}, + {0x64c2, CRL_REG_LEN_08BIT, 0x04}, + {0x64c3, CRL_REG_LEN_08BIT, 0x30}, + {0x64c4, CRL_REG_LEN_08BIT, 0x01}, + {0x64c5, CRL_REG_LEN_08BIT, 0x20}, + {0x64c6, CRL_REG_LEN_08BIT, 0x01}, + {0x64c7, CRL_REG_LEN_08BIT, 0x31}, + {0x64c8, CRL_REG_LEN_08BIT, 0x01}, + {0x64c9, CRL_REG_LEN_08BIT, 0x32}, + {0x64ca, CRL_REG_LEN_08BIT, 0x01}, + {0x64cb, CRL_REG_LEN_08BIT, 0x33}, + {0x64cc, CRL_REG_LEN_08BIT, 0x01}, + {0x64cd, CRL_REG_LEN_08BIT, 0x34}, + {0x64ce, CRL_REG_LEN_08BIT, 0x01}, + {0x64cf, CRL_REG_LEN_08BIT, 0x35}, + {0x64d0, CRL_REG_LEN_08BIT, 0x01}, + {0x64d1, CRL_REG_LEN_08BIT, 0x36}, + {0x64d2, CRL_REG_LEN_08BIT, 0x01}, + {0x64d3, CRL_REG_LEN_08BIT, 0x37}, + {0x64d4, CRL_REG_LEN_08BIT, 0x01}, + {0x64d5, CRL_REG_LEN_08BIT, 0x38}, + {0x64d6, CRL_REG_LEN_08BIT, 0x01}, + {0x64d7, CRL_REG_LEN_08BIT, 0x39}, + {0x64d8, CRL_REG_LEN_08BIT, 0x01}, + {0x64d9, CRL_REG_LEN_08BIT, 0x3a}, + {0x64da, CRL_REG_LEN_08BIT, 0x01}, + {0x64db, CRL_REG_LEN_08BIT, 0x3b}, + {0x64dc, CRL_REG_LEN_08BIT, 0x01}, + {0x64dd, CRL_REG_LEN_08BIT, 0x3c}, + {0x64de, CRL_REG_LEN_08BIT, 0x01}, + {0x64df, CRL_REG_LEN_08BIT, 0x3d}, + {0x64e0, CRL_REG_LEN_08BIT, 0x01}, + {0x64e1, CRL_REG_LEN_08BIT, 0x3e}, + {0x64e2, CRL_REG_LEN_08BIT, 0x01}, + {0x64e3, CRL_REG_LEN_08BIT, 0x3f}, + {0x64e4, CRL_REG_LEN_08BIT, 0x02}, + {0x64e5, CRL_REG_LEN_08BIT, 0xa0}, + {0x64e6, CRL_REG_LEN_08BIT, 0x00}, + {0x64e7, CRL_REG_LEN_08BIT, 0xff}, + {0x64e8, CRL_REG_LEN_08BIT, 0x04}, + {0x64e9, CRL_REG_LEN_08BIT, 0x31}, + {0x64ea, CRL_REG_LEN_08BIT, 0x04}, + {0x64eb, CRL_REG_LEN_08BIT, 0x30}, + {0x64ec, CRL_REG_LEN_08BIT, 0x01}, + {0x64ed, CRL_REG_LEN_08BIT, 0x00}, + {0x64ee, CRL_REG_LEN_08BIT, 0x01}, + {0x64ef, CRL_REG_LEN_08BIT, 0x11}, + {0x64f0, CRL_REG_LEN_08BIT, 0x01}, + {0x64f1, CRL_REG_LEN_08BIT, 0x12}, + {0x64f2, CRL_REG_LEN_08BIT, 0x01}, + {0x64f3, CRL_REG_LEN_08BIT, 0x13}, + {0x64f4, CRL_REG_LEN_08BIT, 0x01}, + {0x64f5, CRL_REG_LEN_08BIT, 0x14}, + {0x64f6, CRL_REG_LEN_08BIT, 0x01}, + {0x64f7, CRL_REG_LEN_08BIT, 0x15}, + {0x64f8, CRL_REG_LEN_08BIT, 0x01}, + {0x64f9, CRL_REG_LEN_08BIT, 0x16}, + {0x64fa, CRL_REG_LEN_08BIT, 0x01}, + {0x64fb, CRL_REG_LEN_08BIT, 0x17}, + {0x64fc, CRL_REG_LEN_08BIT, 0x01}, + {0x64fd, CRL_REG_LEN_08BIT, 0x18}, + {0x64fe, CRL_REG_LEN_08BIT, 0x01}, + {0x64ff, CRL_REG_LEN_08BIT, 0x19}, + {0x6500, CRL_REG_LEN_08BIT, 0x01}, + {0x6501, CRL_REG_LEN_08BIT, 0x1a}, + {0x6502, CRL_REG_LEN_08BIT, 0x01}, + {0x6503, CRL_REG_LEN_08BIT, 0x1b}, + {0x6504, CRL_REG_LEN_08BIT, 0x01}, + {0x6505, CRL_REG_LEN_08BIT, 0x1c}, + {0x6506, CRL_REG_LEN_08BIT, 0x01}, + {0x6507, CRL_REG_LEN_08BIT, 0x1d}, + {0x6508, CRL_REG_LEN_08BIT, 0x01}, + {0x6509, CRL_REG_LEN_08BIT, 0x1e}, + {0x650a, CRL_REG_LEN_08BIT, 0x01}, + {0x650b, CRL_REG_LEN_08BIT, 0x1f}, + {0x650c, CRL_REG_LEN_08BIT, 0x02}, + {0x650d, CRL_REG_LEN_08BIT, 0xa0}, + {0x650e, CRL_REG_LEN_08BIT, 0x00}, + {0x650f, CRL_REG_LEN_08BIT, 0xff}, + {0x6510, CRL_REG_LEN_08BIT, 0x04}, + {0x6511, CRL_REG_LEN_08BIT, 0x20}, + {0x6512, CRL_REG_LEN_08BIT, 0x05}, + {0x6513, CRL_REG_LEN_08BIT, 0x86}, + {0x6514, CRL_REG_LEN_08BIT, 0x03}, + {0x6515, CRL_REG_LEN_08BIT, 0x0b}, + {0x6516, CRL_REG_LEN_08BIT, 0x05}, + {0x6517, CRL_REG_LEN_08BIT, 0x86}, + {0x6518, CRL_REG_LEN_08BIT, 0x00}, + {0x6519, CRL_REG_LEN_08BIT, 0x00}, + {0x651a, CRL_REG_LEN_08BIT, 0x05}, + {0x651b, CRL_REG_LEN_08BIT, 0x06}, + {0x651c, CRL_REG_LEN_08BIT, 0x00}, + {0x651d, CRL_REG_LEN_08BIT, 0x04}, + {0x651e, CRL_REG_LEN_08BIT, 0x05}, + {0x651f, CRL_REG_LEN_08BIT, 0x04}, + {0x6520, CRL_REG_LEN_08BIT, 0x00}, + {0x6521, CRL_REG_LEN_08BIT, 0x04}, + {0x6522, CRL_REG_LEN_08BIT, 0x05}, + {0x6523, CRL_REG_LEN_08BIT, 0x00}, + {0x6524, CRL_REG_LEN_08BIT, 0x05}, + {0x6525, CRL_REG_LEN_08BIT, 0x0a}, + {0x6526, CRL_REG_LEN_08BIT, 0x03}, + {0x6527, CRL_REG_LEN_08BIT, 0x9a}, + {0x6528, CRL_REG_LEN_08BIT, 0x05}, + {0x6529, CRL_REG_LEN_08BIT, 0x86}, + {0x652a, CRL_REG_LEN_08BIT, 0x00}, + {0x652b, CRL_REG_LEN_08BIT, 0x00}, + {0x652c, CRL_REG_LEN_08BIT, 0x05}, + {0x652d, CRL_REG_LEN_08BIT, 0x06}, + {0x652e, CRL_REG_LEN_08BIT, 0x00}, + {0x652f, CRL_REG_LEN_08BIT, 0x01}, + {0x6530, CRL_REG_LEN_08BIT, 0x05}, + {0x6531, CRL_REG_LEN_08BIT, 0x04}, + {0x6532, CRL_REG_LEN_08BIT, 0x00}, + {0x6533, CRL_REG_LEN_08BIT, 0x04}, + {0x6534, CRL_REG_LEN_08BIT, 0x05}, + {0x6535, CRL_REG_LEN_08BIT, 0x00}, + {0x6536, CRL_REG_LEN_08BIT, 0x05}, + {0x6537, CRL_REG_LEN_08BIT, 0x0a}, + {0x6538, CRL_REG_LEN_08BIT, 0x03}, + {0x6539, CRL_REG_LEN_08BIT, 0x99}, + {0x653a, CRL_REG_LEN_08BIT, 0x05}, + {0x653b, CRL_REG_LEN_08BIT, 0x06}, + {0x653c, CRL_REG_LEN_08BIT, 0x00}, + {0x653d, CRL_REG_LEN_08BIT, 0x00}, + {0x653e, CRL_REG_LEN_08BIT, 0x05}, + {0x653f, CRL_REG_LEN_08BIT, 0x04}, + {0x6540, CRL_REG_LEN_08BIT, 0x00}, + {0x6541, CRL_REG_LEN_08BIT, 0x04}, + {0x6542, CRL_REG_LEN_08BIT, 0x05}, + {0x6543, CRL_REG_LEN_08BIT, 0x00}, + {0x6544, CRL_REG_LEN_08BIT, 0x05}, + {0x6545, CRL_REG_LEN_08BIT, 0x0a}, + {0x6546, CRL_REG_LEN_08BIT, 0x03}, + {0x6547, CRL_REG_LEN_08BIT, 0x98}, + {0x6548, CRL_REG_LEN_08BIT, 0x05}, + {0x6549, CRL_REG_LEN_08BIT, 0x06}, + {0x654a, CRL_REG_LEN_08BIT, 0x00}, + {0x654b, CRL_REG_LEN_08BIT, 0x00}, + {0x654c, CRL_REG_LEN_08BIT, 0x05}, + {0x654d, CRL_REG_LEN_08BIT, 0x04}, + {0x654e, CRL_REG_LEN_08BIT, 0x00}, + {0x654f, CRL_REG_LEN_08BIT, 0x04}, + {0x6550, CRL_REG_LEN_08BIT, 0x05}, + {0x6551, CRL_REG_LEN_08BIT, 0x00}, + {0x6552, CRL_REG_LEN_08BIT, 0x05}, + {0x6553, CRL_REG_LEN_08BIT, 0x0a}, + {0x6554, CRL_REG_LEN_08BIT, 0x03}, + {0x6555, CRL_REG_LEN_08BIT, 0x97}, + {0x6556, CRL_REG_LEN_08BIT, 0x05}, + {0x6557, CRL_REG_LEN_08BIT, 0x06}, + {0x6558, CRL_REG_LEN_08BIT, 0x05}, + {0x6559, CRL_REG_LEN_08BIT, 0x04}, + {0x655a, CRL_REG_LEN_08BIT, 0x00}, + {0x655b, CRL_REG_LEN_08BIT, 0x04}, + {0x655c, CRL_REG_LEN_08BIT, 0x05}, + {0x655d, CRL_REG_LEN_08BIT, 0x00}, + {0x655e, CRL_REG_LEN_08BIT, 0x05}, + {0x655f, CRL_REG_LEN_08BIT, 0x0a}, + {0x6560, CRL_REG_LEN_08BIT, 0x03}, + {0x6561, CRL_REG_LEN_08BIT, 0x96}, + {0x6562, CRL_REG_LEN_08BIT, 0x05}, + {0x6563, CRL_REG_LEN_08BIT, 0x06}, + {0x6564, CRL_REG_LEN_08BIT, 0x05}, + {0x6565, CRL_REG_LEN_08BIT, 0x04}, + {0x6566, CRL_REG_LEN_08BIT, 0x00}, + {0x6567, CRL_REG_LEN_08BIT, 0x04}, + {0x6568, CRL_REG_LEN_08BIT, 0x05}, + {0x6569, CRL_REG_LEN_08BIT, 0x00}, + {0x656a, CRL_REG_LEN_08BIT, 0x05}, + {0x656b, CRL_REG_LEN_08BIT, 0x0a}, + {0x656c, CRL_REG_LEN_08BIT, 0x03}, + {0x656d, CRL_REG_LEN_08BIT, 0x95}, + {0x656e, CRL_REG_LEN_08BIT, 0x05}, + {0x656f, CRL_REG_LEN_08BIT, 0x06}, + {0x6570, CRL_REG_LEN_08BIT, 0x05}, + {0x6571, CRL_REG_LEN_08BIT, 0x04}, + {0x6572, CRL_REG_LEN_08BIT, 0x00}, + {0x6573, CRL_REG_LEN_08BIT, 0x04}, + {0x6574, CRL_REG_LEN_08BIT, 0x05}, + {0x6575, CRL_REG_LEN_08BIT, 0x00}, + {0x6576, CRL_REG_LEN_08BIT, 0x05}, + {0x6577, CRL_REG_LEN_08BIT, 0x0a}, + {0x6578, CRL_REG_LEN_08BIT, 0x03}, + {0x6579, CRL_REG_LEN_08BIT, 0x94}, + {0x657a, CRL_REG_LEN_08BIT, 0x05}, + {0x657b, CRL_REG_LEN_08BIT, 0x06}, + {0x657c, CRL_REG_LEN_08BIT, 0x00}, + {0x657d, CRL_REG_LEN_08BIT, 0x00}, + {0x657e, CRL_REG_LEN_08BIT, 0x05}, + {0x657f, CRL_REG_LEN_08BIT, 0x04}, + {0x6580, CRL_REG_LEN_08BIT, 0x00}, + {0x6581, CRL_REG_LEN_08BIT, 0x04}, + {0x6582, CRL_REG_LEN_08BIT, 0x05}, + {0x6583, CRL_REG_LEN_08BIT, 0x00}, + {0x6584, CRL_REG_LEN_08BIT, 0x05}, + {0x6585, CRL_REG_LEN_08BIT, 0x0a}, + {0x6586, CRL_REG_LEN_08BIT, 0x03}, + {0x6587, CRL_REG_LEN_08BIT, 0x93}, + {0x6588, CRL_REG_LEN_08BIT, 0x05}, + {0x6589, CRL_REG_LEN_08BIT, 0x06}, + {0x658a, CRL_REG_LEN_08BIT, 0x00}, + {0x658b, CRL_REG_LEN_08BIT, 0x00}, + {0x658c, CRL_REG_LEN_08BIT, 0x05}, + {0x658d, CRL_REG_LEN_08BIT, 0x04}, + {0x658e, CRL_REG_LEN_08BIT, 0x00}, + {0x658f, CRL_REG_LEN_08BIT, 0x04}, + {0x6590, CRL_REG_LEN_08BIT, 0x05}, + {0x6591, CRL_REG_LEN_08BIT, 0x00}, + {0x6592, CRL_REG_LEN_08BIT, 0x05}, + {0x6593, CRL_REG_LEN_08BIT, 0x0a}, + {0x6594, CRL_REG_LEN_08BIT, 0x03}, + {0x6595, CRL_REG_LEN_08BIT, 0x92}, + {0x6596, CRL_REG_LEN_08BIT, 0x05}, + {0x6597, CRL_REG_LEN_08BIT, 0x06}, + {0x6598, CRL_REG_LEN_08BIT, 0x05}, + {0x6599, CRL_REG_LEN_08BIT, 0x04}, + {0x659a, CRL_REG_LEN_08BIT, 0x00}, + {0x659b, CRL_REG_LEN_08BIT, 0x04}, + {0x659c, CRL_REG_LEN_08BIT, 0x05}, + {0x659d, CRL_REG_LEN_08BIT, 0x00}, + {0x659e, CRL_REG_LEN_08BIT, 0x05}, + {0x659f, CRL_REG_LEN_08BIT, 0x0a}, + {0x65a0, CRL_REG_LEN_08BIT, 0x03}, + {0x65a1, CRL_REG_LEN_08BIT, 0x91}, + {0x65a2, CRL_REG_LEN_08BIT, 0x05}, + {0x65a3, CRL_REG_LEN_08BIT, 0x06}, + {0x65a4, CRL_REG_LEN_08BIT, 0x05}, + {0x65a5, CRL_REG_LEN_08BIT, 0x04}, + {0x65a6, CRL_REG_LEN_08BIT, 0x00}, + {0x65a7, CRL_REG_LEN_08BIT, 0x04}, + {0x65a8, CRL_REG_LEN_08BIT, 0x05}, + {0x65a9, CRL_REG_LEN_08BIT, 0x00}, + {0x65aa, CRL_REG_LEN_08BIT, 0x05}, + {0x65ab, CRL_REG_LEN_08BIT, 0x0a}, + {0x65ac, CRL_REG_LEN_08BIT, 0x03}, + {0x65ad, CRL_REG_LEN_08BIT, 0x90}, + {0x65ae, CRL_REG_LEN_08BIT, 0x05}, + {0x65af, CRL_REG_LEN_08BIT, 0x06}, + {0x65b0, CRL_REG_LEN_08BIT, 0x05}, + {0x65b1, CRL_REG_LEN_08BIT, 0x04}, + {0x65b2, CRL_REG_LEN_08BIT, 0x00}, + {0x65b3, CRL_REG_LEN_08BIT, 0x04}, + {0x65b4, CRL_REG_LEN_08BIT, 0x05}, + {0x65b5, CRL_REG_LEN_08BIT, 0x00}, + {0x65b6, CRL_REG_LEN_08BIT, 0x05}, + {0x65b7, CRL_REG_LEN_08BIT, 0x0a}, + {0x65b8, CRL_REG_LEN_08BIT, 0x02}, + {0x65b9, CRL_REG_LEN_08BIT, 0x90}, + {0x65ba, CRL_REG_LEN_08BIT, 0x05}, + {0x65bb, CRL_REG_LEN_08BIT, 0x06}, + {0x65bc, CRL_REG_LEN_08BIT, 0x00}, + {0x65bd, CRL_REG_LEN_08BIT, 0xff}, + {0x65be, CRL_REG_LEN_08BIT, 0x04}, + {0x65bf, CRL_REG_LEN_08BIT, 0x70}, + {0x65c0, CRL_REG_LEN_08BIT, 0x08}, + {0x65c1, CRL_REG_LEN_08BIT, 0x76}, + {0x65c2, CRL_REG_LEN_08BIT, 0x00}, + {0x65c3, CRL_REG_LEN_08BIT, 0xff}, + {0x65c4, CRL_REG_LEN_08BIT, 0x08}, + {0x65c5, CRL_REG_LEN_08BIT, 0x76}, + {0x65c6, CRL_REG_LEN_08BIT, 0x04}, + {0x65c7, CRL_REG_LEN_08BIT, 0x0c}, + {0x65c8, CRL_REG_LEN_08BIT, 0x05}, + {0x65c9, CRL_REG_LEN_08BIT, 0x07}, + {0x65ca, CRL_REG_LEN_08BIT, 0x04}, + {0x65cb, CRL_REG_LEN_08BIT, 0x04}, + {0x65cc, CRL_REG_LEN_08BIT, 0x00}, + {0x65cd, CRL_REG_LEN_08BIT, 0xff}, + {0x65ce, CRL_REG_LEN_08BIT, 0x00}, + {0x65cf, CRL_REG_LEN_08BIT, 0xff}, + {0x65d0, CRL_REG_LEN_08BIT, 0x00}, + {0x65d1, CRL_REG_LEN_08BIT, 0xff}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x7f}, + {0x303c, CRL_REG_LEN_08BIT, 0xfe}, + {0x303d, CRL_REG_LEN_08BIT, 0x19}, + {0x303e, CRL_REG_LEN_08BIT, 0xd7}, + {0x303f, CRL_REG_LEN_08BIT, 0x09}, + {0x3040, CRL_REG_LEN_08BIT, 0x78}, + {0x3042, CRL_REG_LEN_08BIT, 0x05}, + {0x328a, CRL_REG_LEN_08BIT, 0x10}, +}; + +static struct crl_register_write_rep ov10640_streamon_regs[] = { + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep ov10640_streamoff_regs[] = { + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x00}, +}; + +static struct crl_arithmetic_ops ov10640_ls2_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + } +}; + +static struct crl_dynamic_register_access ov10640_h_flip_regs[] = { + { + .address = 0x3090, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls2_ops), + .ops = ov10640_ls2_ops, + .mask = 0x04, + } +}; + +static struct crl_arithmetic_ops ov10640_ls3_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 3, + } +}; + +static struct crl_dynamic_register_access ov10640_v_flip_regs[] = { + { + .address = 0x3090, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls3_ops), + .ops = ov10640_ls3_ops, + .mask = 0x08, + } +}; + +static struct crl_arithmetic_ops ov10640_hsb_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + } +}; + +static struct crl_dynamic_register_access ov10640_llp_regs[] = { + { + .address = 0x3080, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x3081, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov10640_fll_regs[] = { + { + .address = 0x3082, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x3083, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov10640_ana_gain_regs[] = { + { + .address = 0x30EB, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + } +}; + +/* Long digital gain register */ +static struct crl_dynamic_register_access ov10640_gl_regs[] = { + { + .address = 0x30EC, /* High Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0x3f, + }, + { + .address = 0x30ED, /* Low Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_arithmetic_ops ov10640_ls1_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + } +}; + +static struct crl_arithmetic_ops ov10640_ls5_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 5, + } +}; + +/* enable ae debug */ +static struct crl_dynamic_register_access ov10640_ae_debug_regs[] = { + { + .address = 0x30FA, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls5_ops), + .ops = ov10640_ls5_ops, + .mask = 0x60, + }, +}; + +/* Short digital gain register */ +static struct crl_dynamic_register_access ov10640_gs_regs[] = { + { + .address = 0x30EE, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0x3f, + }, + { + .address = 0x30EF, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Very short digital gain register */ +static struct crl_dynamic_register_access ov10640_gvs_regs[] = { + { + .address = 0x30F0, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0x3f, + }, + { + .address = 0x30F1, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Long exposure register, also used in linear(non-HDR) mode */ +static struct crl_dynamic_register_access ov10640_el_regs[] = { + { + .address = 0x30E6, /* High Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x30E7, /* Low Byte */ + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Short exposure register */ +static struct crl_dynamic_register_access ov10640_es_regs[] = { + { + .address = 0x30E8, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_hsb_ops), + .ops = ov10640_hsb_ops, + .mask = 0xff, + }, + { + .address = 0x30E9, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* Very short exposure register */ +static struct crl_dynamic_register_access ov10640_evs_regs[] = { + { + .address = 0x30EA, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* MSB register */ +static struct crl_dynamic_register_access ov10640_msb_regs[] = { + { + .address = 0x328a, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov10640_ls1_ops), + .ops = ov10640_ls1_ops, + .mask = 0x02, + }, +}; + +/* Needed for acpi support for runtime detection */ +static struct crl_sensor_detect_config ov10640_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 8, + }, +}; + +/* ctrl-val == 1 ? (1 * 0x0F + 0x45) : (0 * 0x0F + 0x45) -> 0x54 and 0x45 */ +static struct crl_arithmetic_ops ov10640_wdr_ops[] = { + { + .op = CRL_MULTIPLY, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x0F, + }, + { + .op = CRL_ADD, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x45, + } +}; + +static struct crl_dynamic_register_access ov10640_wdr_regs[] = { + { 0x3119, CRL_REG_LEN_08BIT, 0xff, + ARRAY_SIZE(ov10640_wdr_ops), + ov10640_wdr_ops, 0 }, +}; + +static struct crl_arithmetic_ops ov10640_linear_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, + .operand.entity_val = 0x31BE, + }, + { + .op = CRL_BITWISE_AND, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 0x01, + }, +}; + +static struct crl_dynamic_register_access ov10640_linear_regs[] = { + { + .address = 0x31BE, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov10640_linear_ops), + .ops = ov10640_linear_ops, + .mask = 0xff, + }, +}; + +static struct crl_pll_configuration ov10640_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 12, + .pixel_rate_csi = 72000000, /* Ignore the value here, no use */ + .pixel_rate_pa = 72000000, /* pixel_rate = MIPICLK*2 *4/12 */ + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov10640_pll_800mbps), + .pll_regs = ov10640_pll_800mbps, + } +}; + +static struct crl_subdev_rect_rep ov10640_1280_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = {0, 0, 1280, 1080}, + .out_rect = {0, 0, 1280, 1080}, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = {0, 0, 1280, 1080}, + .out_rect = {0, 0, 1280, 1080}, + } +}; + +static struct crl_subdev_rect_rep ov10640_1280_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = {0, 0, 1280, 1088}, + .out_rect = {0, 0, 1280, 1088}, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = {0, 0, 1280, 1088}, + .out_rect = {0, 0, 1280, 1088}, + } +}; + +static struct crl_mode_rep ov10640_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov10640_1280_1080_rects), + .sd_rects = ov10640_1280_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 1080, + .min_llp = 2000, + .min_fll = 1200, + .mode_regs_items = ARRAY_SIZE(ov10640_1280_1080_LONG_RAW), + .mode_regs = ov10640_1280_1080_LONG_RAW, + }, + { + .sd_rects_items = ARRAY_SIZE(ov10640_1280_1088_rects), + .sd_rects = ov10640_1280_1088_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 1088, + .min_llp = 2000, + .min_fll = 1200, + .mode_regs_items = ARRAY_SIZE(ov10640_1280_1088_LONG_RAW), + .mode_regs = ov10640_1280_1088_LONG_RAW, + }, +}; + +static struct crl_sensor_subdev_config ov10640_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov10640 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov10640 pixel array", + } +}; + +static struct crl_sensor_limits ov10640_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 1088, + .min_frame_length_lines = 320, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 380, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov10640_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + } +}; + +static struct crl_csi_data_fmt ov10640_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + } +}; + +static struct crl_v4l2_ctrl ov10640_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_h_flip_regs), + .regs = ov10640_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_v_flip_regs), + .regs = ov10640_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1280, + .data.std_data.max = OV10640_HMAX, + .data.std_data.step = 1, + .data.std_data.def = 2000, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_llp_regs), + .regs = ov10640_llp_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1088, + .data.std_data.max = OV10640_VMAX, + .data.std_data.step = 1, + .data.std_data.def = 1200, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_fll_regs), + .regs = ov10640_fll_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 160, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_ana_gain_regs), + .regs = ov10640_ana_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_AUTO_EXPOSURE_DEBUG, + .name = "CRL_CID_AUTO_EXPOSURE_DEBUG", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 3, + .data.std_data.step = 1, + .data.std_data.def = 0x0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_ae_debug_regs), + .regs = ov10640_ae_debug_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 1, + .data.std_data.max = OV10640_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x040, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_el_regs), + .regs = ov10640_el_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = OV10640_MAX_DGAIN, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_gl_regs), + .regs = ov10640_gl_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_S", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = OV10640_MAX_DGAIN, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_gs_regs), + .regs = ov10640_gs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = OV10640_MAX_DGAIN, + .data.std_data.step = 1, + .data.std_data.def = 0x100, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_gvs_regs), + .regs = ov10640_gvs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_STREAMING, + .ctrl_id = CRL_CID_SENSOR_BIT_LINEAR, + .name = "Sensor bit linear", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_linear_regs), + .regs = ov10640_linear_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_MSB_ALIGN, + .name = "CRL_CID_MSB_ALIGN", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 1, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_msb_regs), + .regs = ov10640_msb_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "CRL_CID_EXPOSURE_SHS1", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1, + .data.std_data.max = OV10640_MAX_SHS1, + .data.std_data.step = 1, + .data.std_data.def = 0x40, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_es_regs), + .regs = ov10640_es_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS3, + .name = "CRL_CID_EXPOSURE_SHS3", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0x11, + .data.std_data.max = OV10640_MAX_SHS3, + .data.std_data.step = 1, + .data.std_data.def = 0x20, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_evs_regs), + .regs = ov10640_evs_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov10640_wdr_regs), + .regs = ov10640_wdr_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +#define OV10640_OTP_BLANK0_START_ADDR 0x349E +#define OV10640_OTP_BLANK0_END_ADDR 0x34AD +#define OV10640_OTP_BLANK1_START_ADDR 0x34AE +#define OV10640_OTP_BLANK1_END_ADDR 0x34BD +#define OV10640_OTP_BLANK0_LEN (OV10640_OTP_BLANK0_END_ADDR - \ + OV10640_OTP_BLANK0_START_ADDR + 1) +#define OV10640_OTP_BLANK1_LEN (OV10640_OTP_BLANK1_END_ADDR - \ + OV10640_OTP_BLANK1_START_ADDR + 1) + +static struct crl_register_write_rep ov10640_nvm_preop_regset[] = { + /* Start streaming */ + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x01}, + /* clear blank 0 data registers buffer */ + { 0x349E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x349F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34A9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AA, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AB, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AD, CRL_REG_LEN_08BIT, 0x00 }, + /* set registers buffer range */ + { 0x3496, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3497, CRL_REG_LEN_08BIT, 0x0F }, + /* select blank 0 */ + { 0x3495, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x00, 0x00, 0x01 }, + /* enable read strobe */ + { 0x349C, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 25 }, + + /* clear blank 1 data registers buffer */ + { 0x34AE, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34AF, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34B9, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BA, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BB, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BC, CRL_REG_LEN_08BIT, 0x00 }, + { 0x34BD, CRL_REG_LEN_08BIT, 0x00 }, + /* set registers buffer range */ + { 0x3496, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3497, CRL_REG_LEN_08BIT, 0x0F }, + /* select blank 1 */ + { 0x3495, CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, 0x01, 0x00, 0x01 }, + /* enable read strobe */ + { 0x349C, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 25 }, +}; + +static struct crl_register_write_rep ov10640_nvm_postop_regset[] = { + {OV10640_REG_STREAM, CRL_REG_LEN_08BIT, 0x00} /* Stop streaming */ +}; + +static struct crl_nvm_blob ov10640_nvm_blobs[] = { + {CRL_I2C_ADDRESS_NO_OVERRIDE, OV10640_OTP_BLANK0_START_ADDR, OV10640_OTP_BLANK0_LEN}, + {CRL_I2C_ADDRESS_NO_OVERRIDE, OV10640_OTP_BLANK1_START_ADDR, OV10640_OTP_BLANK1_LEN}, +}; + +struct crl_sensor_configuration ov10640_crl_configuration = { + .powerup_regs_items = ARRAY_SIZE(ov10640_powerup_standby), + .powerup_regs = ov10640_powerup_standby, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .power_items = ARRAY_SIZE(ov10640_power_items), + .power_entities = ov10640_power_items, + + .id_reg_items = ARRAY_SIZE(ov10640_sensor_detect_regset), + .id_regs = ov10640_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov10640_sensor_subdevs), + .subdevs = ov10640_sensor_subdevs, + + .sensor_limits = &ov10640_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov10640_pll_configurations), + .pll_configs = ov10640_pll_configurations, + + .modes_items = ARRAY_SIZE(ov10640_modes), + .modes = ov10640_modes, + + .streamon_regs_items = ARRAY_SIZE(ov10640_streamon_regs), + .streamon_regs = ov10640_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov10640_streamoff_regs), + .streamoff_regs = ov10640_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov10640_v4l2_ctrls), + .v4l2_ctrl_bank = ov10640_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov10640_crl_csi_data_fmt), + .csi_fmts = ov10640_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov10640_flip_configurations), + .flip_data = ov10640_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = + ARRAY_SIZE(ov10640_nvm_preop_regset), + .crl_nvm_info.nvm_preop_regs = ov10640_nvm_preop_regset, + .crl_nvm_info.nvm_postop_regs_items = + ARRAY_SIZE(ov10640_nvm_postop_regset), + .crl_nvm_info.nvm_postop_regs = ov10640_nvm_postop_regset, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov10640_nvm_blobs), + .crl_nvm_info.nvm_config = ov10640_nvm_blobs, +}; + +#endif /* __CRLMODULE_OV10640_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov13858_configuration.h b/drivers/media/i2c/crlmodule/crl_ov13858_configuration.h new file mode 100755 index 000000000000..7a98ce7e18f6 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov13858_configuration.h @@ -0,0 +1,965 @@ +/* + * Copyright (c) 2017 Intel Corporation. + * + * Author: Lance Hou + + * + */ + +#ifndef __CRLMODULE_ov13858_CONFIGURATION_H_ +#define __CRLMODULE_ov13858_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM +#include "crlmodule-nvm.h" +#endif + +static struct crl_register_write_rep ov13858_powerup_regset[] = { + {0x0103, CRL_REG_LEN_08BIT, 0x01}, + {0x0300, CRL_REG_LEN_08BIT, 0x07}, + {0x0301, CRL_REG_LEN_08BIT, 0x01}, + {0x0302, CRL_REG_LEN_08BIT, 0xc2}, + {0x0303, CRL_REG_LEN_08BIT, 0x00}, + {0x0304, CRL_REG_LEN_08BIT, 0x00}, + {0x0305, CRL_REG_LEN_08BIT, 0x01}, + {0x030b, CRL_REG_LEN_08BIT, 0x05}, + {0x030c, CRL_REG_LEN_08BIT, 0x01}, + {0x030d, CRL_REG_LEN_08BIT, 0x0e}, + {0x3022, CRL_REG_LEN_08BIT, 0x01}, + {0x3013, CRL_REG_LEN_08BIT, 0x32}, + {0x3016, CRL_REG_LEN_08BIT, 0x72}, + {0x301b, CRL_REG_LEN_08BIT, 0xF0}, + {0x301f, CRL_REG_LEN_08BIT, 0xd0}, + {0x3106, CRL_REG_LEN_08BIT, 0x15}, + {0x3107, CRL_REG_LEN_08BIT, 0x23}, + {0x3500, CRL_REG_LEN_08BIT, 0x00}, + {0x3501, CRL_REG_LEN_08BIT, 0x80}, + {0x3502, CRL_REG_LEN_08BIT, 0x00}, + {0x3508, CRL_REG_LEN_08BIT, 0x02}, + {0x3509, CRL_REG_LEN_08BIT, 0x00}, + {0x350a, CRL_REG_LEN_08BIT, 0x00}, + {0x350e, CRL_REG_LEN_08BIT, 0x00}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3511, CRL_REG_LEN_08BIT, 0x02}, + {0x3512, CRL_REG_LEN_08BIT, 0x00}, + {0x3600, CRL_REG_LEN_08BIT, 0x2b}, + {0x3601, CRL_REG_LEN_08BIT, 0x52}, + {0x3602, CRL_REG_LEN_08BIT, 0x60}, + {0x3612, CRL_REG_LEN_08BIT, 0x05}, + {0x3613, CRL_REG_LEN_08BIT, 0xa4}, + {0x3620, CRL_REG_LEN_08BIT, 0x80}, + {0x3621, CRL_REG_LEN_08BIT, 0x08}, + {0x3622, CRL_REG_LEN_08BIT, 0x30}, + {0x3624, CRL_REG_LEN_08BIT, 0x1c}, + {0x3640, CRL_REG_LEN_08BIT, 0x08}, + {0x3641, CRL_REG_LEN_08BIT, 0x70}, + {0x3661, CRL_REG_LEN_08BIT, 0x80}, + {0x3662, CRL_REG_LEN_08BIT, 0x12}, + {0x3664, CRL_REG_LEN_08BIT, 0x73}, + {0x3665, CRL_REG_LEN_08BIT, 0xa7}, + {0x366e, CRL_REG_LEN_08BIT, 0xff}, + {0x366f, CRL_REG_LEN_08BIT, 0xf4}, + {0x3674, CRL_REG_LEN_08BIT, 0x00}, + {0x3679, CRL_REG_LEN_08BIT, 0x0c}, + {0x367f, CRL_REG_LEN_08BIT, 0x01}, + {0x3680, CRL_REG_LEN_08BIT, 0x0c}, + {0x3681, CRL_REG_LEN_08BIT, 0x60}, + {0x3682, CRL_REG_LEN_08BIT, 0x17}, + {0x3683, CRL_REG_LEN_08BIT, 0xa9}, + {0x3684, CRL_REG_LEN_08BIT, 0x9a}, + {0x3709, CRL_REG_LEN_08BIT, 0x68}, + {0x3714, CRL_REG_LEN_08BIT, 0x24}, + {0x371a, CRL_REG_LEN_08BIT, 0x3e}, + {0x3737, CRL_REG_LEN_08BIT, 0x04}, + {0x3738, CRL_REG_LEN_08BIT, 0xcc}, + {0x3739, CRL_REG_LEN_08BIT, 0x12}, + {0x373d, CRL_REG_LEN_08BIT, 0x26}, + {0x3764, CRL_REG_LEN_08BIT, 0x20}, + {0x3765, CRL_REG_LEN_08BIT, 0x20}, + {0x37a1, CRL_REG_LEN_08BIT, 0x36}, + {0x37a8, CRL_REG_LEN_08BIT, 0x3b}, + {0x37ab, CRL_REG_LEN_08BIT, 0x31}, + {0x37c2, CRL_REG_LEN_08BIT, 0x04}, + {0x37c3, CRL_REG_LEN_08BIT, 0xf1}, + {0x37c5, CRL_REG_LEN_08BIT, 0x00}, + {0x37d8, CRL_REG_LEN_08BIT, 0x03}, + {0x37d9, CRL_REG_LEN_08BIT, 0x0c}, + {0x37da, CRL_REG_LEN_08BIT, 0xc2}, + {0x37dc, CRL_REG_LEN_08BIT, 0x02}, + {0x37e0, CRL_REG_LEN_08BIT, 0x00}, + {0x37e1, CRL_REG_LEN_08BIT, 0x0a}, + {0x37e2, CRL_REG_LEN_08BIT, 0x14}, + {0x37e3, CRL_REG_LEN_08BIT, 0x04}, + {0x37e4, CRL_REG_LEN_08BIT, 0x2A}, + {0x37e5, CRL_REG_LEN_08BIT, 0x03}, + {0x37e6, CRL_REG_LEN_08BIT, 0x04}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x08}, + {0x3804, CRL_REG_LEN_08BIT, 0x10}, + {0x3805, CRL_REG_LEN_08BIT, 0x9f}, + {0x3806, CRL_REG_LEN_08BIT, 0x0c}, + {0x3807, CRL_REG_LEN_08BIT, 0x57}, + {0x3808, CRL_REG_LEN_08BIT, 0x10}, + {0x3809, CRL_REG_LEN_08BIT, 0x80}, + {0x380a, CRL_REG_LEN_08BIT, 0x0c}, + {0x380b, CRL_REG_LEN_08BIT, 0x40}, + {0x380c, CRL_REG_LEN_08BIT, 0x04}, + {0x380d, CRL_REG_LEN_08BIT, 0x62}, + {0x380e, CRL_REG_LEN_08BIT, 0x0c}, + {0x380f, CRL_REG_LEN_08BIT, 0x8e}, + {0x3811, CRL_REG_LEN_08BIT, 0x10}, + {0x3813, CRL_REG_LEN_08BIT, 0x08}, + {0x3814, CRL_REG_LEN_08BIT, 0x01}, + {0x3815, CRL_REG_LEN_08BIT, 0x01}, + {0x3816, CRL_REG_LEN_08BIT, 0x01}, + {0x3817, CRL_REG_LEN_08BIT, 0x01}, + {0x3820, CRL_REG_LEN_08BIT, 0xa8}, + {0x3821, CRL_REG_LEN_08BIT, 0x00}, + {0x3822, CRL_REG_LEN_08BIT, 0xc2}, + {0x3823, CRL_REG_LEN_08BIT, 0x18}, + {0x3826, CRL_REG_LEN_08BIT, 0x11}, + {0x3827, CRL_REG_LEN_08BIT, 0x1c}, + {0x3829, CRL_REG_LEN_08BIT, 0x03}, + {0x3832, CRL_REG_LEN_08BIT, 0x00}, + {0x3c80, CRL_REG_LEN_08BIT, 0x00}, + {0x3c87, CRL_REG_LEN_08BIT, 0x01}, + {0x3c8c, CRL_REG_LEN_08BIT, 0x19}, + {0x3c8d, CRL_REG_LEN_08BIT, 0x1c}, + {0x3c90, CRL_REG_LEN_08BIT, 0x00}, + {0x3c91, CRL_REG_LEN_08BIT, 0x00}, + {0x3c92, CRL_REG_LEN_08BIT, 0x00}, + {0x3c93, CRL_REG_LEN_08BIT, 0x00}, + {0x3c94, CRL_REG_LEN_08BIT, 0x40}, + {0x3c95, CRL_REG_LEN_08BIT, 0x54}, + {0x3c96, CRL_REG_LEN_08BIT, 0x34}, + {0x3c97, CRL_REG_LEN_08BIT, 0x04}, + {0x3c98, CRL_REG_LEN_08BIT, 0x00}, + {0x3d8c, CRL_REG_LEN_08BIT, 0x73}, + {0x3d8d, CRL_REG_LEN_08BIT, 0xc0}, + {0x3f00, CRL_REG_LEN_08BIT, 0x0b}, + {0x3f03, CRL_REG_LEN_08BIT, 0x00}, + {0x4001, CRL_REG_LEN_08BIT, 0xe0}, + {0x4008, CRL_REG_LEN_08BIT, 0x00}, + {0x4009, CRL_REG_LEN_08BIT, 0x0f}, + {0x4011, CRL_REG_LEN_08BIT, 0xf0}, + {0x4017, CRL_REG_LEN_08BIT, 0x08}, + {0x4050, CRL_REG_LEN_08BIT, 0x04}, + {0x4051, CRL_REG_LEN_08BIT, 0x0b}, + {0x4052, CRL_REG_LEN_08BIT, 0x00}, + {0x4053, CRL_REG_LEN_08BIT, 0x80}, + {0x4054, CRL_REG_LEN_08BIT, 0x00}, + {0x4055, CRL_REG_LEN_08BIT, 0x80}, + {0x4056, CRL_REG_LEN_08BIT, 0x00}, + {0x4057, CRL_REG_LEN_08BIT, 0x80}, + {0x4058, CRL_REG_LEN_08BIT, 0x00}, + {0x4059, CRL_REG_LEN_08BIT, 0x80}, + {0x405e, CRL_REG_LEN_08BIT, 0x20}, + {0x4500, CRL_REG_LEN_08BIT, 0x07}, + {0x4503, CRL_REG_LEN_08BIT, 0x00}, + {0x450a, CRL_REG_LEN_08BIT, 0x04}, + {0x4809, CRL_REG_LEN_08BIT, 0x04}, + {0x480c, CRL_REG_LEN_08BIT, 0x12}, + {0x481f, CRL_REG_LEN_08BIT, 0x30}, + {0x4833, CRL_REG_LEN_08BIT, 0x10}, + {0x4837, CRL_REG_LEN_08BIT, 0x0e}, + {0x4902, CRL_REG_LEN_08BIT, 0x01}, + {0x4d00, CRL_REG_LEN_08BIT, 0x03}, + {0x4d01, CRL_REG_LEN_08BIT, 0xc9}, + {0x4d02, CRL_REG_LEN_08BIT, 0xbc}, + {0x4d03, CRL_REG_LEN_08BIT, 0xd7}, + {0x4d04, CRL_REG_LEN_08BIT, 0xf0}, + {0x4d05, CRL_REG_LEN_08BIT, 0xa2}, + {0x5000, CRL_REG_LEN_08BIT, 0xfD}, + {0x5001, CRL_REG_LEN_08BIT, 0x01}, + {0x5040, CRL_REG_LEN_08BIT, 0x39}, + {0x5041, CRL_REG_LEN_08BIT, 0x10}, + {0x5042, CRL_REG_LEN_08BIT, 0x10}, + {0x5043, CRL_REG_LEN_08BIT, 0x84}, + {0x5044, CRL_REG_LEN_08BIT, 0x62}, + {0x5180, CRL_REG_LEN_08BIT, 0x00}, + {0x5181, CRL_REG_LEN_08BIT, 0x10}, + {0x5182, CRL_REG_LEN_08BIT, 0x02}, + {0x5183, CRL_REG_LEN_08BIT, 0x0f}, + {0x5200, CRL_REG_LEN_08BIT, 0x1b}, + {0x520b, CRL_REG_LEN_08BIT, 0x07}, + {0x520c, CRL_REG_LEN_08BIT, 0x0f}, + {0x5300, CRL_REG_LEN_08BIT, 0x04}, + {0x5301, CRL_REG_LEN_08BIT, 0x0C}, + {0x5302, CRL_REG_LEN_08BIT, 0x0C}, + {0x5303, CRL_REG_LEN_08BIT, 0x0f}, + {0x5304, CRL_REG_LEN_08BIT, 0x00}, + {0x5305, CRL_REG_LEN_08BIT, 0x70}, + {0x5306, CRL_REG_LEN_08BIT, 0x00}, + {0x5307, CRL_REG_LEN_08BIT, 0x80}, + {0x5308, CRL_REG_LEN_08BIT, 0x00}, + {0x5309, CRL_REG_LEN_08BIT, 0xa5}, + {0x530a, CRL_REG_LEN_08BIT, 0x00}, + {0x530b, CRL_REG_LEN_08BIT, 0xd3}, + {0x530c, CRL_REG_LEN_08BIT, 0x00}, + {0x530d, CRL_REG_LEN_08BIT, 0xf0}, + {0x530e, CRL_REG_LEN_08BIT, 0x01}, + {0x530f, CRL_REG_LEN_08BIT, 0x10}, + {0x5310, CRL_REG_LEN_08BIT, 0x01}, + {0x5311, CRL_REG_LEN_08BIT, 0x20}, + {0x5312, CRL_REG_LEN_08BIT, 0x01}, + {0x5313, CRL_REG_LEN_08BIT, 0x20}, + {0x5314, CRL_REG_LEN_08BIT, 0x01}, + {0x5315, CRL_REG_LEN_08BIT, 0x20}, + {0x5316, CRL_REG_LEN_08BIT, 0x08}, + {0x5317, CRL_REG_LEN_08BIT, 0x08}, + {0x5318, CRL_REG_LEN_08BIT, 0x10}, + {0x5319, CRL_REG_LEN_08BIT, 0x88}, + {0x531a, CRL_REG_LEN_08BIT, 0x88}, + {0x531b, CRL_REG_LEN_08BIT, 0xa9}, + {0x531c, CRL_REG_LEN_08BIT, 0xaa}, + {0x531d, CRL_REG_LEN_08BIT, 0x0a}, + {0x5405, CRL_REG_LEN_08BIT, 0x02}, + {0x5406, CRL_REG_LEN_08BIT, 0x67}, + {0x5407, CRL_REG_LEN_08BIT, 0x01}, + {0x5408, CRL_REG_LEN_08BIT, 0x4a}, +}; + +static struct crl_register_write_rep ov13858_3864x2202_RAW10_NORMAL[] = { + {0x0100,CRL_REG_LEN_08BIT, 0x00}, + {0x0300,CRL_REG_LEN_08BIT, 0x07}, + {0x0301,CRL_REG_LEN_08BIT, 0x01}, + {0x0302,CRL_REG_LEN_08BIT, 0xc2}, + {0x0303,CRL_REG_LEN_08BIT, 0x00}, + {0x030b,CRL_REG_LEN_08BIT, 0x05}, + {0x030c,CRL_REG_LEN_08BIT, 0x01}, + {0x030d,CRL_REG_LEN_08BIT, 0x0e}, + {0x0312,CRL_REG_LEN_08BIT, 0x01}, + {0x3662,CRL_REG_LEN_08BIT, 0x12}, + {0x3714,CRL_REG_LEN_08BIT, 0x24}, + {0x3737,CRL_REG_LEN_08BIT, 0x04}, + {0x3739,CRL_REG_LEN_08BIT, 0x12}, + {0x37c2,CRL_REG_LEN_08BIT, 0x04}, + {0x37d9,CRL_REG_LEN_08BIT, 0x0c}, + {0x37e3,CRL_REG_LEN_08BIT, 0x04}, + {0x37e4,CRL_REG_LEN_08BIT, 0x2a}, + {0x37e6,CRL_REG_LEN_08BIT, 0x04}, + {0x3800,CRL_REG_LEN_08BIT, 0x00}, + {0x3801,CRL_REG_LEN_08BIT, 0x00}, + {0x3802,CRL_REG_LEN_08BIT, 0x00}, + {0x3803,CRL_REG_LEN_08BIT, 0x08}, + {0x3804,CRL_REG_LEN_08BIT, 0x10}, + {0x3805,CRL_REG_LEN_08BIT, 0x9f}, + {0x3806,CRL_REG_LEN_08BIT, 0x0c}, + {0x3807,CRL_REG_LEN_08BIT, 0x57}, + {0x3808,CRL_REG_LEN_08BIT, 0x0f}, + {0x3809,CRL_REG_LEN_08BIT, 0x18}, + {0x380a,CRL_REG_LEN_08BIT, 0x08}, + {0x380b,CRL_REG_LEN_08BIT, 0x9a}, + {0x380c,CRL_REG_LEN_08BIT, 0x04}, + {0x380d,CRL_REG_LEN_08BIT, 0x62}, + {0x380e,CRL_REG_LEN_08BIT, 0x0c}, + {0x380f,CRL_REG_LEN_08BIT, 0x8e}, + {0x3810,CRL_REG_LEN_08BIT, 0x00}, + {0x3811,CRL_REG_LEN_08BIT, 0xc5}, //default C4 for BGGR X-axis + {0x3812,CRL_REG_LEN_08BIT, 0x01}, + {0x3813,CRL_REG_LEN_08BIT, 0xdb}, //default da for BGGR Y-axis + {0x3814,CRL_REG_LEN_08BIT, 0x01}, + {0x3815,CRL_REG_LEN_08BIT, 0x01}, + {0x3816,CRL_REG_LEN_08BIT, 0x01}, + {0x3817,CRL_REG_LEN_08BIT, 0x01}, + {0x3820,CRL_REG_LEN_08BIT, 0xa8}, + {0x3821,CRL_REG_LEN_08BIT, 0x00}, + {0x3826,CRL_REG_LEN_08BIT, 0x11}, + {0x3827,CRL_REG_LEN_08BIT, 0x1c}, + {0x3829,CRL_REG_LEN_08BIT, 0x03}, + {0x4009,CRL_REG_LEN_08BIT, 0x0f}, + {0x4837,CRL_REG_LEN_08BIT, 0x0e}, + {0x4050,CRL_REG_LEN_08BIT, 0x04}, + {0x4051,CRL_REG_LEN_08BIT, 0x0b}, + {0x4902,CRL_REG_LEN_08BIT, 0x01}, + {0x5000,CRL_REG_LEN_08BIT, 0xfd}, + {0x5001,CRL_REG_LEN_08BIT, 0x01}, + {0x0100,CRL_REG_LEN_08BIT, 0x01}, +}; + +static struct crl_register_write_rep ov13858_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov13858_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_arithmetic_ops ov13858_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov13858_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov13858_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + } +}; + +static struct crl_dynamic_register_access ov13858_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov13858_vflip_ops), + .ops = ov13858_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access ov13858_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov13858_hflip_ops), + .ops = ov13858_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov13858_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM +static struct crl_nvm_blob ov13858_nvm_blobs[] = { + { 0x50, 0x00, 0x100 }, +}; +#endif + +static struct crl_dynamic_register_access ov13858_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov13858_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov13858_exposure_ops), + .ops = ov13858_exposure_ops, + .mask = 0x0ffff0, + } +}; + +static struct crl_dynamic_register_access ov13858_dig_gain_regs[] = { + { + .address = 0x5100, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7fff, + }, + { + .address = 0x5102, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7fff, + }, + { + .address = 0x5104, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7fff, + } +}; + +static struct crl_dynamic_register_access ov13858_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov13858_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov13858_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov13858_pll_configurations[] = { + { + .input_clk = 19200000, + .op_sys_clk = 54000000, + .bitsperpixel = 10, + .pixel_rate_csi = 43200000, + .pixel_rate_pa = 43200000, + .csi_lanes= 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +static struct crl_subdev_rect_rep ov13858_13m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3136, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3136, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3136, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3136, + }, +}; + +static struct crl_subdev_rect_rep ov13858_2202_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3136, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3136, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3136, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3864, + .out_rect.height = 2202, + }, +}; + + +static struct crl_mode_rep ov13858_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov13858_13m_rects), + .sd_rects = ov13858_13m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4224, + .height = 3136, + .min_llp = 1122, + .min_fll = 3214, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13858_2202_rects), + .sd_rects = ov13858_2202_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3864, + .height = 2202, + .min_llp = 1122, + .min_fll = 3214, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13858_3864x2202_RAW10_NORMAL), + .mode_regs = ov13858_3864x2202_RAW10_NORMAL, + }, +}; + +static struct crl_sensor_subdev_config ov13858_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov13858 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov13858 pixel array", + }, +}; + +static struct crl_sensor_limits ov13858_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4224, + .y_addr_max = 3136, + .min_frame_length_lines = 3214, + .max_frame_length_lines = 32767, + .min_line_length_pixels = 1122, + .max_line_length_pixels = 65535, + .scaler_m_min = 16, + .scaler_m_max = 16, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov13858_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov13858_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov13858_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_ana_gain_global_regs), + .regs = ov13858_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_exposure_regs), + .regs = ov13858_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_h_flip_regs), + .regs = ov13858_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_v_flip_regs), + .regs = ov13858_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 3214, + .data.std_data.max = 32767, + .data.std_data.step = 1, + .data.std_data.def = 3214, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_vblank_regs), + .regs = ov13858_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1122, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1122, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_hblank_regs), + .regs = ov13858_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 16384, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13858_dig_gain_regs), + .regs = ov13858_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +#ifdef CONFIG_VIDEO_CRLMODULE_OTP_VALIDATE +static struct crl_register_write_rep ov13858_otp_preop_regset[] = { + /*sensor OTP module check*/ + { 0x5000, CRL_REG_LEN_08BIT, 0x5f }, + /* Manual mode, program disable */ + { 0x3D84, CRL_REG_LEN_08BIT, 0xC0 }, + /* Manual OTP start address for access */ + { 0x3D88, CRL_REG_LEN_08BIT, 0x72}, + { 0x3D89, CRL_REG_LEN_08BIT, 0x20}, + /* Manual OTP end address for access */ + { 0x3D8A, CRL_REG_LEN_08BIT, 0x72}, + { 0x3D8B, CRL_REG_LEN_08BIT, 0x21}, + /*streaming on*/ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + /* OTP load enable */ + { 0x3D81, CRL_REG_LEN_08BIT, 0x31 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 0x14 }, +}; + +static struct crl_register_write_rep ov13858_otp_postop_regset[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, /* Stop streaming */ +}; + +static struct crl_register_write_rep ov13858_otp_mode_regset[] = { + { 0x5000, CRL_REG_LEN_08BIT, 0x7F }, /*ISP Control*/ + { 0x5040, CRL_REG_LEN_08BIT, 0xA8 }, /*Set Test Mode*/ +}; + +struct crl_register_read_rep ov13858_sensor_otp_read_regset[] = { + { 0x7220, CRL_REG_LEN_16BIT, 0x0000ffff }, +}; +#endif + +#if 0 +static struct crl_arithmetic_ops ov13858_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov13858_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov13858_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov13858_frame_desc_width_ops), + .ops = ov13858_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov13858_frame_desc_height_ops), + .ops = ov13858_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; +#endif + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov13858_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 19200000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + .delay = 1000, + }, +}; + +struct crl_sensor_configuration ov13858_crl_configuration = { + + .power_items = ARRAY_SIZE(ov13858_power_items), + .power_entities = ov13858_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov13858_powerup_regset), + .powerup_regs = ov13858_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov13858_sensor_detect_regset), + .id_regs = ov13858_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov13858_sensor_subdevs), + .subdevs = ov13858_sensor_subdevs, + + .sensor_limits = &ov13858_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov13858_pll_configurations), + .pll_configs = ov13858_pll_configurations, + + .modes_items = ARRAY_SIZE(ov13858_modes), + .modes = ov13858_modes, + + .streamon_regs_items = ARRAY_SIZE(ov13858_streamon_regs), + .streamon_regs = ov13858_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov13858_streamoff_regs), + .streamoff_regs = ov13858_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov13858_v4l2_ctrls), + .v4l2_ctrl_bank = ov13858_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov13858_crl_csi_data_fmt), + .csi_fmts = ov13858_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov13858_flip_configurations), + .flip_data = ov13858_flip_configurations, + +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_8BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov13858_nvm_blobs), + .crl_nvm_info.nvm_config = ov13858_nvm_blobs, +#endif + +#ifdef CONFIG_VIDEO_CRLMODULE_OTP_VALIDATE + .crl_otp_info.otp_preop_regs_items = + ARRAY_SIZE(ov13858_otp_preop_regset), + .crl_otp_info.otp_preop_regs = ov13858_otp_preop_regset, + .crl_otp_info.otp_postop_regs_items = + ARRAY_SIZE(ov13858_otp_postop_regset), + .crl_otp_info.otp_postop_regs = ov13858_otp_postop_regset, + + .crl_otp_info.otp_mode_regs_items = + ARRAY_SIZE(ov13858_otp_mode_regset), + .crl_otp_info.otp_mode_regs = ov13858_otp_mode_regset, + + .crl_otp_info.otp_read_regs_items = + ARRAY_SIZE(ov13858_sensor_otp_read_regset), + .crl_otp_info.otp_read_regs = ov13858_sensor_otp_read_regset, + + .crl_otp_info.otp_id = 0x5168, /*chicony module*/ +#endif + + + +}; + +#endif /* __CRLMODULE_ov13858_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov13860_configuration.h b/drivers/media/i2c/crlmodule/crl_ov13860_configuration.h new file mode 100644 index 000000000000..4f8e7c651fdd --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov13860_configuration.h @@ -0,0 +1,1539 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Kamal Ramamoorthy + * + */ + +#ifndef __CRLMODULE_OV13860_CONFIGURATION_H_ +#define __CRLMODULE_OV13860_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov13860_pll_600mbps[] = { + { 0x0300, CRL_REG_LEN_08BIT, 0x00 },/* pll1_pre_div = default*/ + { 0x0301, CRL_REG_LEN_08BIT, 0x00 },/* pll1_multiplier Bit[8-9] = default */ + { 0x0302, CRL_REG_LEN_08BIT, 0x32 },/* pll1_multiplier Bit[0-7] = default */ + { 0x0303, CRL_REG_LEN_08BIT, 0x01 },/* pll1_divm = /(1 + 1) */ + { 0x0304, CRL_REG_LEN_08BIT, 0x07 },/* pll1_div_mipi = default */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 },/* pll1 pix clock div */ + { 0x0306, CRL_REG_LEN_08BIT, 0x01 },/* pll1 sys clock div */ + { 0x0308, CRL_REG_LEN_08BIT, 0x00 },/* pll1 bypass = default */ + { 0x0309, CRL_REG_LEN_08BIT, 0x01 },/* pll1 cp = default */ + { 0x030A, CRL_REG_LEN_08BIT, 0x00 },/* pll1 ctr = default */ + { 0x030B, CRL_REG_LEN_08BIT, 0x00 },/* pll2_pre_div = default */ + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x28 },/* pll2_r_divp = default */ + { 0x030E, CRL_REG_LEN_08BIT, 0x02 },/* pll2_r_divs = default */ + { 0x030F, CRL_REG_LEN_08BIT, 0x07 },/* pll2_r_divsp = default */ + { 0x0310, CRL_REG_LEN_08BIT, 0x01 },/* pll2_cp = default */ + { 0x0311, CRL_REG_LEN_08BIT, 0x00 },/* pll2_cp = default */ + { 0x0312, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0313, CRL_REG_LEN_08BIT, 0x03 }, + { 0x031B, CRL_REG_LEN_08BIT, 0x00 },/* pll1 rst = default */ + { 0x031C, CRL_REG_LEN_08BIT, 0x00 },/* pll2 rst = default */ + { 0x031E, CRL_REG_LEN_08BIT, 0x01 },/* pll ctr::mipi_bitsel_man = 1 */ + { 0x4837, CRL_REG_LEN_08BIT, 0x1a },/* pclk period */ +}; + +static struct crl_register_write_rep ov13860_pll_1200mbps[] = { + { 0x0300, CRL_REG_LEN_08BIT, 0x00 },/* pll1_pre_div = default*/ + { 0x0301, CRL_REG_LEN_08BIT, 0x00 },/* pll1_multiplier Bit[8-9] = default */ + { 0x0302, CRL_REG_LEN_08BIT, 0x32 },/* pll1_multiplier Bit[0-7] = default */ + { 0x0303, CRL_REG_LEN_08BIT, 0x00 },/* pll1_divm = /(1 + 0) */ + { 0x0304, CRL_REG_LEN_08BIT, 0x07 },/* pll1_div_mipi = default */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 },/* pll1 pix clock div */ + { 0x0306, CRL_REG_LEN_08BIT, 0x01 },/* pll1 sys clock div */ + { 0x0308, CRL_REG_LEN_08BIT, 0x00 },/* pll1 bypass = default */ + { 0x0309, CRL_REG_LEN_08BIT, 0x01 },/* pll1 cp = default */ + { 0x030A, CRL_REG_LEN_08BIT, 0x00 },/* pll1 ctr = default */ + { 0x030B, CRL_REG_LEN_08BIT, 0x00 },/* pll2_pre_div = default */ + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x28 },/* pll2_r_divp = default */ + { 0x030E, CRL_REG_LEN_08BIT, 0x02 },/* pll2_r_divs = default */ + { 0x030F, CRL_REG_LEN_08BIT, 0x07 },/* pll2_r_divsp = default */ + { 0x0310, CRL_REG_LEN_08BIT, 0x01 },/* pll2_cp = default */ + { 0x0311, CRL_REG_LEN_08BIT, 0x00 },/* pll2_cp = default */ + { 0x0312, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0313, CRL_REG_LEN_08BIT, 0x03 }, + { 0x031B, CRL_REG_LEN_08BIT, 0x00 },/* pll1 rst = default */ + { 0x031C, CRL_REG_LEN_08BIT, 0x00 },/* pll2 rst = default */ + { 0x031E, CRL_REG_LEN_08BIT, 0x01 },/* pll ctr::mipi_bitsel_man = 1 */ + { 0x4837, CRL_REG_LEN_08BIT, 0x0d },/* pclk period */ +}; + +static struct crl_register_write_rep ov13860_powerup_regset[] = { + { 0x3010, CRL_REG_LEN_08BIT, 0x01 }, /* MIPI PHY1 = 1 */ + + /* + * MIPI sc ctrl = 1 + * Bit [7:4] num lane + * Bit [0] phy pad enable + */ + { 0x3012, CRL_REG_LEN_08BIT, 0x21 }, + + { 0x340C, CRL_REG_LEN_08BIT, 0xff }, + { 0x340D, CRL_REG_LEN_08BIT, 0xff }, + + /* + * R Manual + * Bit 0:aec_manual, Bit 1:acg_manual, Bit 2 vts manual + * Bit 4:delay option, Bit 5:gain delay option + */ + { 0x3503, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3507, CRL_REG_LEN_08BIT, 0x00 },/* MEC Median Exposure Bit[15:8] */ + { 0x3508, CRL_REG_LEN_08BIT, 0x00 },/* MEC Median Exposure Bit[7:8] */ + + { 0x3509, CRL_REG_LEN_08BIT, 0x12 },/* R CTRL9 = default */ + + { 0x350A, CRL_REG_LEN_08BIT, 0x00 },/* MEC Long gain [10:8] */ + { 0x350B, CRL_REG_LEN_08BIT, 0xff },/* MEC Long gain [7:0] */ + + { 0x350F, CRL_REG_LEN_08BIT, 0x10 },/* Median gain [7:0] */ + + { 0x3541, CRL_REG_LEN_08BIT, 0x02 },/* MEC Short exposure [15:8] */ + { 0x3542, CRL_REG_LEN_08BIT, 0x00 },/* Median gain [7:0] */ + { 0x3543, CRL_REG_LEN_08BIT, 0x00 },/* Magic */ + + /* + * HDR related setting + */ + { 0x3547, CRL_REG_LEN_08BIT, 0x00 },/* Very short exposure */ + { 0x3548, CRL_REG_LEN_08BIT, 0x00 },/* Very short exposure */ + { 0x3549, CRL_REG_LEN_08BIT, 0x12 },/* Magic */ + { 0x354B, CRL_REG_LEN_08BIT, 0x10 },/* MEC short gain [7:0] */ + { 0x354F, CRL_REG_LEN_08BIT, 0x10 },/* MEC very short gain [7:0] */ + + /* Analog setting control */ + { 0x3600, CRL_REG_LEN_08BIT, 0x41 }, + { 0x3601, CRL_REG_LEN_08BIT, 0xd4 }, + { 0x3603, CRL_REG_LEN_08BIT, 0x97 }, + { 0x3604, CRL_REG_LEN_08BIT, 0x08 }, + { 0x360A, CRL_REG_LEN_08BIT, 0x35 }, + { 0x360C, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x360D, CRL_REG_LEN_08BIT, 0x53 }, + { 0x3618, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3620, CRL_REG_LEN_08BIT, 0x55 }, + { 0x3622, CRL_REG_LEN_08BIT, 0x8C }, + { 0x3623, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3628, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3660, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x3662, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3663, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3664, CRL_REG_LEN_08BIT, 0x04 }, + { 0x366B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3701, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3702, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3703, CRL_REG_LEN_08BIT, 0x3B }, + { 0x3704, CRL_REG_LEN_08BIT, 0x26 }, + { 0x3705, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x3F }, + { 0x3708, CRL_REG_LEN_08BIT, 0x3C }, + { 0x3709, CRL_REG_LEN_08BIT, 0x18 }, + { 0x370E, CRL_REG_LEN_08BIT, 0x32 }, + { 0x3710, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3712, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3714, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3717, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3719, CRL_REG_LEN_08BIT, 0x03 }, + { 0x371E, CRL_REG_LEN_08BIT, 0x31 }, + { 0x371F, CRL_REG_LEN_08BIT, 0x7F }, + { 0x3720, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3721, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3726, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3727, CRL_REG_LEN_08BIT, 0x44 }, + { 0x3728, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3729, CRL_REG_LEN_08BIT, 0x00 }, + { 0x372A, CRL_REG_LEN_08BIT, 0x20 }, + { 0x372B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x372E, CRL_REG_LEN_08BIT, 0x2B }, + { 0x3730, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3731, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3732, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3735, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3736, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3737, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3744, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3745, CRL_REG_LEN_08BIT, 0x5E }, + { 0x3746, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3747, CRL_REG_LEN_08BIT, 0x1F }, + { 0x3748, CRL_REG_LEN_08BIT, 0x00 }, + { 0x374A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3760, CRL_REG_LEN_08BIT, 0xD1 }, + { 0x3761, CRL_REG_LEN_08BIT, 0x31 }, + { 0x3762, CRL_REG_LEN_08BIT, 0x53 }, + { 0x3763, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3767, CRL_REG_LEN_08BIT, 0x24 }, + { 0x3768, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3769, CRL_REG_LEN_08BIT, 0x24 }, + { 0x376C, CRL_REG_LEN_08BIT, 0x43 }, + { 0x376D, CRL_REG_LEN_08BIT, 0x01 }, + { 0x376E, CRL_REG_LEN_08BIT, 0x53 }, + { 0x378C, CRL_REG_LEN_08BIT, 0x1F }, + { 0x378D, CRL_REG_LEN_08BIT, 0x13 }, + { 0x378F, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3790, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3791, CRL_REG_LEN_08BIT, 0x5A }, + { 0x3792, CRL_REG_LEN_08BIT, 0x21 }, + { 0x3794, CRL_REG_LEN_08BIT, 0x71 }, + { 0x3796, CRL_REG_LEN_08BIT, 0x01 }, + { 0x379F, CRL_REG_LEN_08BIT, 0x3E }, + { 0x37A0, CRL_REG_LEN_08BIT, 0x44 }, + { 0x37A1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A2, CRL_REG_LEN_08BIT, 0x44 }, + { 0x37A3, CRL_REG_LEN_08BIT, 0x41 }, + { 0x37A4, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37A5, CRL_REG_LEN_08BIT, 0xA9 }, + { 0x37B3, CRL_REG_LEN_08BIT, 0xDC }, + { 0x37B4, CRL_REG_LEN_08BIT, 0x0E }, + { 0x37B7, CRL_REG_LEN_08BIT, 0x84 }, + { 0x37B9, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x04 }, + { 0x382A, CRL_REG_LEN_08BIT, 0x04 }, + { 0x382F, CRL_REG_LEN_08BIT, 0x84 }, + { 0x3835, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3837, CRL_REG_LEN_08BIT, 0x02 }, + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, + { 0x383D, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3845, CRL_REG_LEN_08BIT, 0x10 }, + + { 0x3D85, CRL_REG_LEN_08BIT, 0x16 },/* OTP_REGS */ + { 0x3D8C, CRL_REG_LEN_08BIT, 0x79 },/* OTP_REGS */ + { 0x3D8D, CRL_REG_LEN_08BIT, 0x7F },/* OTP_REGS */ + + { 0x4000, CRL_REG_LEN_08BIT, 0x17 },/* BLC_00 */ + + /* + * Magic Registers + */ + { 0x400F, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4011, CRL_REG_LEN_08BIT, 0xFB }, + { 0x4017, CRL_REG_LEN_08BIT, 0x08 }, + { 0x401A, CRL_REG_LEN_08BIT, 0x0E }, + { 0x4020, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4022, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4024, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4026, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402A, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4030, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4032, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4034, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4036, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4038, CRL_REG_LEN_08BIT, 0x08 }, + { 0x403A, CRL_REG_LEN_08BIT, 0x08 }, + { 0x403C, CRL_REG_LEN_08BIT, 0x08 }, + { 0x403E, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4052, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4053, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4054, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4055, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4056, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4057, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4058, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4059, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4202, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4203, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4d00, CRL_REG_LEN_08BIT, 0x05 }, + { 0x4d01, CRL_REG_LEN_08BIT, 0x05 }, + { 0x4d02, CRL_REG_LEN_08BIT, 0xCA }, + { 0x4d03, CRL_REG_LEN_08BIT, 0xD7 }, + { 0x4d04, CRL_REG_LEN_08BIT, 0xAE }, + { 0x4d05, CRL_REG_LEN_08BIT, 0x13 }, + { 0x4813, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4815, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4837, CRL_REG_LEN_08BIT, 0x0D }, + { 0x486E, CRL_REG_LEN_08BIT, 0x03 }, + { 0x4B01, CRL_REG_LEN_08BIT, 0x80 }, + { 0x4B06, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4C01, CRL_REG_LEN_08BIT, 0xDF }, + + /* + * DSP control related registers required for RAW + * Sensor path + */ + { 0x5001, CRL_REG_LEN_08BIT, 0x40 }, + { 0x5002, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5003, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5004, CRL_REG_LEN_08BIT, 0x80 }, + { 0x5005, CRL_REG_LEN_08BIT, 0x00 }, + { 0x501D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x501F, CRL_REG_LEN_08BIT, 0x06 }, + { 0x5021, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5022, CRL_REG_LEN_08BIT, 0x13 }, + { 0x5058, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5200, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5209, CRL_REG_LEN_08BIT, 0x00 }, + { 0x520A, CRL_REG_LEN_08BIT, 0x80 }, + { 0x520B, CRL_REG_LEN_08BIT, 0x04 }, + { 0x520C, CRL_REG_LEN_08BIT, 0x01 }, + { 0x520E, CRL_REG_LEN_08BIT, 0x34 }, + { 0x5210, CRL_REG_LEN_08BIT, 0x10 }, + { 0x5211, CRL_REG_LEN_08BIT, 0xA0 }, + { 0x5280, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5292, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5C80, CRL_REG_LEN_08BIT, 0x05 }, + { 0x5C81, CRL_REG_LEN_08BIT, 0x90 }, + { 0x5C82, CRL_REG_LEN_08BIT, 0x09 }, + { 0x5C83, CRL_REG_LEN_08BIT, 0x5F }, + { 0x5D00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4001, CRL_REG_LEN_08BIT, 0x60 }, /* BLC control */ + + /* + * Magic Registers + */ + { 0x560F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5610, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5611, CRL_REG_LEN_08BIT, 0x10 }, + { 0x562F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5630, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5631, CRL_REG_LEN_08BIT, 0x10 }, + { 0x564F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5650, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5651, CRL_REG_LEN_08BIT, 0x10 }, + { 0x566F, CRL_REG_LEN_08BIT, 0xFC }, + { 0x5670, CRL_REG_LEN_08BIT, 0xF0 }, + { 0x5671, CRL_REG_LEN_08BIT, 0x10 }, +}; + +static struct crl_register_write_rep ov13860_mode_13m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x10 },/* h_output_size high 4208 x 3120 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x70 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x0C },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x30 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_8m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 2448 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x90 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_4k2k[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x10 },/* h_output_size high 4096 x 2160 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x08 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x70 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_uhd[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0F },/* h_output_size high 3840 x 2160 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x08 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x70 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_6m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x0D },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x88 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x23 }, + { 0x372F, CRL_REG_LEN_08BIT, 0xA0 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 1836 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x07 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x2C },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x11 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x00 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0x99 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x88 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x13 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x18 }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x03 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x04 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_3m[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x06 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0xB8 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x08 },/* h_output_size high 2048 x 1536 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x06 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x00 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_1952_1088[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x06 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0xB8 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high 1952 x 1088 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xA0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x04 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x40 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_720[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x03 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x44 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x05 },/* h_output_size high 1280 x 720 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x00 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x02 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xD0 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_mode_480[] = { + + /* + * Exposure & Gain + */ + { 0x3501, CRL_REG_LEN_08BIT, 0x03 },/* Long Exposure */ + { 0x3502, CRL_REG_LEN_08BIT, 0x44 },/* Long Exposure */ + + { 0x370A, CRL_REG_LEN_08BIT, 0x63 }, + { 0x372F, CRL_REG_LEN_08BIT, 0x90 }, + + /* + * Windowing + */ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x14 },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x10 },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0x8B },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0x43 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x02 },/* h_output_size high 640 x 480 */ + { 0x3809, CRL_REG_LEN_08BIT, 0x80 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x01 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xE0 },/* v_output_size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* Manual horizontal window offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* Manual horizontal window offset low */ + { 0x3813, CRL_REG_LEN_08BIT, 0x04 },/* Manual vertical window offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x11 },/* Horizontal sub-sample odd inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x31 },/* Vertical sub-sample odd inc */ + { 0x383D, CRL_REG_LEN_08BIT, 0xFF },/* Vertical sub-sample odd inc */ + + { 0x3820, CRL_REG_LEN_08BIT, 0x02 },/* Binning */ + { 0x3842, CRL_REG_LEN_08BIT, 0x40 },/* Binning */ + { 0x5000, CRL_REG_LEN_08BIT, 0xD9 },/* Binning */ + + { 0x3836, CRL_REG_LEN_08BIT, 0x0C }, /* ablc_use_num */ + { 0x383C, CRL_REG_LEN_08BIT, 0x48 }, /* Boundary Pix num */ + + { 0x4008, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + { 0x4009, CRL_REG_LEN_08BIT, 0x09 }, /* Magic */ + { 0x4019, CRL_REG_LEN_08BIT, 0x0C }, /* Magic */ + { 0x4051, CRL_REG_LEN_08BIT, 0x01 }, /* Magic */ + { 0x4066, CRL_REG_LEN_08BIT, 0x02 }, /* Magic */ + + { 0x5201, CRL_REG_LEN_08BIT, 0x71 }, /* Magic */ + { 0x5204, CRL_REG_LEN_08BIT, 0x00 }, /* Magic */ + { 0x5205, CRL_REG_LEN_08BIT, 0x80 }, /* Magic */ +}; + +static struct crl_register_write_rep ov13860_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov13860_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_arithmetic_ops ov13860_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov13860_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_dynamic_register_access ov13860_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov13860_vflip_ops), + .ops = ov13860_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access ov13860_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov13860_hflip_ops), + .ops = ov13860_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov13860_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov13860_ana_gain_global_regs[] = { + { + .address = 0x350A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov13860_exposure_regs[] = { + { + .address = 0x3501, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + } +}; + +static struct crl_dynamic_register_access ov13860_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov13860_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov13860_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov13860_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 600000000, + .bitsperpixel = 10, + .pixel_rate_csi = 150000000, + .pixel_rate_pa = 240000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov13860_pll_1200mbps), + .pll_regs = ov13860_pll_1200mbps, + }, + { + .input_clk = 24000000, + .op_sys_clk = 300000000, + .bitsperpixel = 10, + .pixel_rate_csi = 75000000, + .pixel_rate_pa = 240000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov13860_pll_600mbps), + .pll_regs = ov13860_pll_600mbps, + } +}; + +static struct crl_subdev_rect_rep ov13860_13m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4208, + .out_rect.height = 3120, + }, +}; + +static struct crl_subdev_rect_rep ov13860_8m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2448, + }, +}; + +static struct crl_subdev_rect_rep ov13860_4k2k_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4096, + .out_rect.height = 2160, + }, +}; + +static struct crl_subdev_rect_rep ov13860_uhd_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3840, + .out_rect.height = 2160, + }, +}; + +static struct crl_subdev_rect_rep ov13860_6m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 1836, + }, +}; + +static struct crl_subdev_rect_rep ov13860_3m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 2048, + .out_rect.height = 1536, + }, +}; + +static struct crl_subdev_rect_rep ov13860_1952_1088_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1952, + .out_rect.height = 1088, + }, +}; + +static struct crl_subdev_rect_rep ov13860_720_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 720, + }, +}; + +static struct crl_subdev_rect_rep ov13860_480_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 4224, + .out_rect.height = 3120, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 4224, + .in_rect.height = 3120, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 640, + .out_rect.height = 480, + }, +}; + +static struct crl_mode_rep ov13860_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov13860_13m_rects), + .sd_rects = ov13860_13m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4208, + .height = 3120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_13m), + .mode_regs = ov13860_mode_13m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_8m_rects), + .sd_rects = ov13860_8m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 2448, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_8m), + .mode_regs = ov13860_mode_8m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_4k2k_rects), + .sd_rects = ov13860_4k2k_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 4096, + .height = 2160, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_4k2k), + .mode_regs = ov13860_mode_4k2k, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_uhd_rects), + .sd_rects = ov13860_uhd_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3840, + .height = 2160, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_uhd), + .mode_regs = ov13860_mode_uhd, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_6m_rects), + .sd_rects = ov13860_6m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 1836, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_6m), + .mode_regs = ov13860_mode_6m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_3m_rects), + .sd_rects = ov13860_3m_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 2048, + .height = 1536, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_3m), + .mode_regs = ov13860_mode_3m, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_1952_1088_rects), + .sd_rects = ov13860_1952_1088_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 1952, + .height = 1088, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_1952_1088), + .mode_regs = ov13860_mode_1952_1088, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_720_rects), + .sd_rects = ov13860_720_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 1280, + .height = 720, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_720), + .mode_regs = ov13860_mode_720, + }, + { + .sd_rects_items = ARRAY_SIZE(ov13860_480_rects), + .sd_rects = ov13860_480_rects, + .binn_hor = 2, + .binn_vert = 2, + .scale_m = 1, + .width = 640, + .height = 480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov13860_mode_480), + .mode_regs = ov13860_mode_480, + }, +}; + +static struct crl_sensor_subdev_config ov13860_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov13860 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov13860 pixel array", + }, +}; + +static struct crl_sensor_limits ov13860_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 4224, + .y_addr_max = 3120, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 16, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov13860_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, +}; + +static struct crl_csi_data_fmt ov13860_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov13860_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_ana_gain_global_regs), + .regs = ov13860_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_exposure_regs), + .regs = ov13860_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_h_flip_regs), + .regs = ov13860_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_v_flip_regs), + .regs = ov13860_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VBLANK, + .name = "V4L2_CID_VBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_vblank_regs), + .regs = ov13860_vblank_regs, + .dep_items = 0, /* FLL changed automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HBLANK, + .name = "V4L2_CID_HBLANK", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov13860_hblank_regs), + .regs = ov13860_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov13860_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + }, +}; + +struct crl_sensor_configuration ov13860_crl_configuration = { + + .power_items = ARRAY_SIZE(ov13860_power_items), + .power_entities = ov13860_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov13860_powerup_regset), + .powerup_regs = ov13860_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov13860_sensor_detect_regset), + .id_regs = ov13860_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov13860_sensor_subdevs), + .subdevs = ov13860_sensor_subdevs, + + .sensor_limits = &ov13860_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov13860_pll_configurations), + .pll_configs = ov13860_pll_configurations, + + .modes_items = ARRAY_SIZE(ov13860_modes), + .modes = ov13860_modes, + + .streamon_regs_items = ARRAY_SIZE(ov13860_streamon_regs), + .streamon_regs = ov13860_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov13860_streamoff_regs), + .streamoff_regs = ov13860_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov13860_v4l2_ctrls), + .v4l2_ctrl_bank = ov13860_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov13860_crl_csi_data_fmt), + .csi_fmts = ov13860_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov13860_flip_configurations), + .flip_data = ov13860_flip_configurations, +}; + +#endif /* __CRLMODULE_OV13860_CONFIGURATION_H_ */ + + diff --git a/drivers/media/i2c/crlmodule/crl_ov2740_configuration.h b/drivers/media/i2c/crlmodule/crl_ov2740_configuration.h new file mode 100755 index 000000000000..f6c56f31c354 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov2740_configuration.h @@ -0,0 +1,804 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Roy Yang + * + */ + +#ifndef __CRLMODULE_OV2740_CONFIGURATION_H_ +#define __CRLMODULE_OV2740_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov2740_powerup_regset[] = { + /*Reset*/ + {0x0103, CRL_REG_LEN_08BIT, 0x01}, + {0x0302, CRL_REG_LEN_08BIT, 0x4b},/* 26;1e */ + {0x030d, CRL_REG_LEN_08BIT, 0x4b},/* 26;1e */ + {0x030e, CRL_REG_LEN_08BIT, 0x02}, + {0x030a, CRL_REG_LEN_08BIT, 0x01}, + {0x0312, CRL_REG_LEN_08BIT, 0x11},/* 01 */ + {0x3000, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x32},/* 12(2 lane for 32; 1lane for 12) */ + {0x3031, CRL_REG_LEN_08BIT, 0x0a}, + {0x3080, CRL_REG_LEN_08BIT, 0x08}, + {0x3083, CRL_REG_LEN_08BIT, 0xB4}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x01}, + {0x3106, CRL_REG_LEN_08BIT, 0x01}, + {0x3500, CRL_REG_LEN_08BIT, 0x00}, + {0x3501, CRL_REG_LEN_08BIT, 0x44}, + {0x3502, CRL_REG_LEN_08BIT, 0x40}, + {0x3503, CRL_REG_LEN_08BIT, 0x88}, + {0x3507, CRL_REG_LEN_08BIT, 0x00}, + {0x3508, CRL_REG_LEN_08BIT, 0x00}, + {0x3509, CRL_REG_LEN_08BIT, 0x80}, + {0x350c, CRL_REG_LEN_08BIT, 0x00}, + {0x350d, CRL_REG_LEN_08BIT, 0x80}, + {0x3510, CRL_REG_LEN_08BIT, 0x00}, + {0x3511, CRL_REG_LEN_08BIT, 0x00}, + {0x3512, CRL_REG_LEN_08BIT, 0x20}, + {0x3632, CRL_REG_LEN_08BIT, 0x00}, + {0x3633, CRL_REG_LEN_08BIT, 0x10}, + {0x3634, CRL_REG_LEN_08BIT, 0x10}, + {0x3635, CRL_REG_LEN_08BIT, 0x10}, + {0x3645, CRL_REG_LEN_08BIT, 0x13}, + {0x3646, CRL_REG_LEN_08BIT, 0x81}, + {0x3636, CRL_REG_LEN_08BIT, 0x10}, + {0x3651, CRL_REG_LEN_08BIT, 0x0a}, + {0x3656, CRL_REG_LEN_08BIT, 0x02}, + {0x3659, CRL_REG_LEN_08BIT, 0x04}, + {0x365a, CRL_REG_LEN_08BIT, 0xda}, + {0x365b, CRL_REG_LEN_08BIT, 0xa2}, + {0x365c, CRL_REG_LEN_08BIT, 0x04}, + {0x365d, CRL_REG_LEN_08BIT, 0x1d}, + {0x365e, CRL_REG_LEN_08BIT, 0x1a}, + {0x3662, CRL_REG_LEN_08BIT, 0xd7}, + {0x3667, CRL_REG_LEN_08BIT, 0x78}, + {0x3669, CRL_REG_LEN_08BIT, 0x0a}, + {0x366a, CRL_REG_LEN_08BIT, 0x92}, + {0x3700, CRL_REG_LEN_08BIT, 0x54}, + {0x3702, CRL_REG_LEN_08BIT, 0x10}, + {0x3706, CRL_REG_LEN_08BIT, 0x42}, + {0x3709, CRL_REG_LEN_08BIT, 0x30}, + {0x370b, CRL_REG_LEN_08BIT, 0xc2}, + {0x3714, CRL_REG_LEN_08BIT, 0x63}, + {0x3715, CRL_REG_LEN_08BIT, 0x01}, + {0x3716, CRL_REG_LEN_08BIT, 0x00}, + {0x371a, CRL_REG_LEN_08BIT, 0x3e}, + {0x3732, CRL_REG_LEN_08BIT, 0x0e}, + {0x3733, CRL_REG_LEN_08BIT, 0x10}, + {0x375f, CRL_REG_LEN_08BIT, 0x0e}, + {0x3768, CRL_REG_LEN_08BIT, 0x30}, + {0x3769, CRL_REG_LEN_08BIT, 0x44}, + {0x376a, CRL_REG_LEN_08BIT, 0x22}, + {0x377b, CRL_REG_LEN_08BIT, 0x20}, + {0x377c, CRL_REG_LEN_08BIT, 0x00}, + {0x377d, CRL_REG_LEN_08BIT, 0x0c}, + {0x3798, CRL_REG_LEN_08BIT, 0x00}, + {0x37a1, CRL_REG_LEN_08BIT, 0x55}, + {0x37a8, CRL_REG_LEN_08BIT, 0x6d}, + {0x37c2, CRL_REG_LEN_08BIT, 0x04}, + {0x37c5, CRL_REG_LEN_08BIT, 0x00}, + {0x37c8, CRL_REG_LEN_08BIT, 0x00}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x00}, + {0x3804, CRL_REG_LEN_08BIT, 0x07}, + {0x3805, CRL_REG_LEN_08BIT, 0x8f}, + {0x3806, CRL_REG_LEN_08BIT, 0x04}, + {0x3807, CRL_REG_LEN_08BIT, 0x47}, + {0x3808, CRL_REG_LEN_08BIT, 0x07}, + {0x3809, CRL_REG_LEN_08BIT, 0x88}, + {0x380a, CRL_REG_LEN_08BIT, 0x04}, + {0x380b, CRL_REG_LEN_08BIT, 0x40}, + {0x380c, CRL_REG_LEN_08BIT, 0x08}, + {0x380d, CRL_REG_LEN_08BIT, 0x70}, + {0x380e, CRL_REG_LEN_08BIT, 0x04}, + {0x380f, CRL_REG_LEN_08BIT, 0x60}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x04}, + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x05}, + {0x3814, CRL_REG_LEN_08BIT, 0x01}, + {0x3815, CRL_REG_LEN_08BIT, 0x01}, + {0x3820, CRL_REG_LEN_08BIT, 0x80}, + {0x3821, CRL_REG_LEN_08BIT, 0x46}, + {0x3822, CRL_REG_LEN_08BIT, 0x84}, + {0x3829, CRL_REG_LEN_08BIT, 0x00}, + {0x382a, CRL_REG_LEN_08BIT, 0x01}, + {0x382b, CRL_REG_LEN_08BIT, 0x01}, + {0x3830, CRL_REG_LEN_08BIT, 0x04}, + {0x3836, CRL_REG_LEN_08BIT, 0x01}, + {0x3837, CRL_REG_LEN_08BIT, 0x08}, + {0x3839, CRL_REG_LEN_08BIT, 0x01}, + {0x383a, CRL_REG_LEN_08BIT, 0x00}, + {0x383b, CRL_REG_LEN_08BIT, 0x08}, + {0x383c, CRL_REG_LEN_08BIT, 0x00}, + {0x3f0b, CRL_REG_LEN_08BIT, 0x00}, + {0x4001, CRL_REG_LEN_08BIT, 0x20}, + {0x4009, CRL_REG_LEN_08BIT, 0x07}, + {0x4003, CRL_REG_LEN_08BIT, 0x10}, + {0x4010, CRL_REG_LEN_08BIT, 0xe0}, + {0x4016, CRL_REG_LEN_08BIT, 0x00}, + {0x4017, CRL_REG_LEN_08BIT, 0x10}, + {0x4044, CRL_REG_LEN_08BIT, 0x02}, + {0x4304, CRL_REG_LEN_08BIT, 0x08}, + {0x4307, CRL_REG_LEN_08BIT, 0x30}, + {0x4320, CRL_REG_LEN_08BIT, 0x80}, + {0x4322, CRL_REG_LEN_08BIT, 0x00}, + {0x4323, CRL_REG_LEN_08BIT, 0x00}, + {0x4324, CRL_REG_LEN_08BIT, 0x00}, + {0x4325, CRL_REG_LEN_08BIT, 0x00}, + {0x4326, CRL_REG_LEN_08BIT, 0x00}, + {0x4327, CRL_REG_LEN_08BIT, 0x00}, + {0x4328, CRL_REG_LEN_08BIT, 0x00}, + {0x4329, CRL_REG_LEN_08BIT, 0x00}, + {0x432c, CRL_REG_LEN_08BIT, 0x03}, + {0x432d, CRL_REG_LEN_08BIT, 0x81}, + {0x4501, CRL_REG_LEN_08BIT, 0x84}, + {0x4502, CRL_REG_LEN_08BIT, 0x40}, + {0x4503, CRL_REG_LEN_08BIT, 0x18}, + {0x4504, CRL_REG_LEN_08BIT, 0x04}, + {0x4508, CRL_REG_LEN_08BIT, 0x02}, + {0x4601, CRL_REG_LEN_08BIT, 0x10}, + {0x4800, CRL_REG_LEN_08BIT, 0x00}, + {0x4816, CRL_REG_LEN_08BIT, 0x52}, + {0x4837, CRL_REG_LEN_08BIT, 0x16}, + {0x5000, CRL_REG_LEN_08BIT, 0x7f}, + {0x5001, CRL_REG_LEN_08BIT, 0x00}, + {0x5005, CRL_REG_LEN_08BIT, 0x38}, + {0x501e, CRL_REG_LEN_08BIT, 0x0d}, + {0x5040, CRL_REG_LEN_08BIT, 0x00}, + {0x5901, CRL_REG_LEN_08BIT, 0x00}, + {0x3800, CRL_REG_LEN_08BIT, 0x00}, + {0x3801, CRL_REG_LEN_08BIT, 0x00}, + {0x3802, CRL_REG_LEN_08BIT, 0x00}, + {0x3803, CRL_REG_LEN_08BIT, 0x00}, + {0x3804, CRL_REG_LEN_08BIT, 0x07}, + {0x3805, CRL_REG_LEN_08BIT, 0x8f}, + {0x3806, CRL_REG_LEN_08BIT, 0x04}, + {0x3807, CRL_REG_LEN_08BIT, 0x47}, + {0x3808, CRL_REG_LEN_08BIT, 0x07}, + {0x3809, CRL_REG_LEN_08BIT, 0x8c}, + {0x380a, CRL_REG_LEN_08BIT, 0x04}, + {0x380b, CRL_REG_LEN_08BIT, 0x44}, + {0x3810, CRL_REG_LEN_08BIT, 0x00}, + {0x3811, CRL_REG_LEN_08BIT, 0x00},/* 00 */ + {0x3812, CRL_REG_LEN_08BIT, 0x00}, + {0x3813, CRL_REG_LEN_08BIT, 0x03},/* 00 */ + {0x4003, CRL_REG_LEN_08BIT, 0x40},/* set Black level to 0x40 */ +}; + +static struct crl_register_write_rep ov2740_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov2740_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep ov2740_data_fmt_width10[] = { + { 0x3031, CRL_REG_LEN_08BIT, 0x0a } +}; + +static struct crl_arithmetic_ops ov2740_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov2740_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov2740_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_dynamic_register_access ov2740_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2740_vflip_ops), + .ops = ov2740_vflip_ops, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access ov2740_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2740_hflip_ops), + .ops = ov2740_hflip_ops, + .mask = 0x1, + }, +}; + +static struct crl_dynamic_register_access ov2740_dig_gain_regs[] = { + { + .address = 0x500A, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x500C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x500E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +struct crl_register_write_rep ov2740_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov2740_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov2740_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov2740_exposure_ops), + .ops = ov2740_exposure_ops, + .mask = 0x0ffff0, + }, +}; + +static struct crl_dynamic_register_access ov2740_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7fff, + }, +}; + +static struct crl_dynamic_register_access ov2740_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov2740_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov2740_pll_configurations[] = { + { + .input_clk = 19200000, + .op_sys_clk = 72000000, + .bitsperpixel = 10, + .pixel_rate_csi = 72000000, + .pixel_rate_pa = 72000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, + +}; + +static struct crl_subdev_rect_rep ov2740_1932x1092_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1932, + .in_rect.height = 1092, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1932, + .out_rect.height = 1092, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1932, + .in_rect.height = 1092, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1932, + .out_rect.height = 1092, + }, +}; + +static struct crl_mode_rep ov2740_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov2740_1932x1092_rects_native), + .sd_rects = ov2740_1932x1092_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1932, + .height = 1092, + .min_llp = 2160, + .min_fll = 1120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = 0, + }, +}; + +static struct crl_sensor_subdev_config ov2740_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov2740 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov2740 pixel array", + }, +}; + +static struct crl_sensor_limits ov2740_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1932, + .y_addr_max = 1092, + .min_frame_length_lines = 1120, + .max_frame_length_lines = 32767, + .min_line_length_pixels = 2160, + .max_line_length_pixels = 65535, +}; + +static struct crl_flip_data ov2740_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, +}; + +static struct crl_csi_data_fmt ov2740_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = ov2740_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov2740_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov2740_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov2740_data_fmt_width10, + }, +}; + +static struct crl_v4l2_ctrl ov2740_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_ana_gain_global_regs), + .regs = ov2740_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_exposure_regs), + .regs = ov2740_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_h_flip_regs), + .regs = ov2740_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_v_flip_regs), + .regs = ov2740_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1120, + .data.std_data.max = 32767, + .data.std_data.step = 1, + .data.std_data.def = 1120, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_vblank_regs), + .regs = ov2740_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 2160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 2160, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_hblank_regs), + .regs = ov2740_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2740_dig_gain_regs), + .regs = ov2740_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_WDR_MODE, + .name = "V4L2_CID_WDR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +#ifdef CONFIG_VIDEO_CRLMODULE_OTP_VALIDATE +static struct crl_register_write_rep ov2740_otp_preop_regset[] = { + /*sensor OTP module check*/ + { 0x5000, CRL_REG_LEN_08BIT, 0x5f }, + /* Manual mode, program disable */ + { 0x3D84, CRL_REG_LEN_08BIT, 0xC0 }, + /* Manual OTP start address for access */ + { 0x3D88, CRL_REG_LEN_08BIT, 0x70}, + { 0x3D89, CRL_REG_LEN_08BIT, 0x10}, + /* Manual OTP end address for access */ + { 0x3D8A, CRL_REG_LEN_08BIT, 0x70}, + { 0x3D8B, CRL_REG_LEN_08BIT, 0x11}, + /*streaming on*/ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + /* OTP load enable */ + { 0x3D81, CRL_REG_LEN_08BIT, 0x31 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 0x14 }, +}; + +static struct crl_register_write_rep ov2740_otp_postop_regset[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, /* Stop streaming */ +}; + +static struct crl_register_write_rep ov2740_otp_mode_regset[] = { + { 0x5000, CRL_REG_LEN_08BIT, 0x7F }, /*ISP Control*/ + { 0x5040, CRL_REG_LEN_08BIT, 0xA8 }, /*Set Test Mode*/ +}; + +struct crl_register_read_rep ov2740_sensor_otp_read_regset[] = { + { 0x7010, CRL_REG_LEN_16BIT, 0x0000ffff }, +}; +#endif + +static struct crl_arithmetic_ops ov2740_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov2740_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov2740_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov2740_frame_desc_width_ops), + .ops = ov2740_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov2740_frame_desc_height_ops), + .ops = ov2740_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov2740_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 19200000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + .delay = 1000, + }, +}; + +static struct crl_sensor_configuration ov2740_crl_configuration = { + + .power_items = ARRAY_SIZE(ov2740_power_items), + .power_entities = ov2740_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov2740_powerup_regset), + .powerup_regs = ov2740_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + + .id_reg_items = ARRAY_SIZE(ov2740_sensor_detect_regset), + .id_regs = ov2740_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov2740_sensor_subdevs), + .subdevs = ov2740_sensor_subdevs, + + .sensor_limits = &ov2740_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov2740_pll_configurations), + .pll_configs = ov2740_pll_configurations, + + .modes_items = ARRAY_SIZE(ov2740_modes), + .modes = ov2740_modes, + + .streamon_regs_items = ARRAY_SIZE(ov2740_streamon_regs), + .streamon_regs = ov2740_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov2740_streamoff_regs), + .streamoff_regs = ov2740_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov2740_v4l2_ctrls), + .v4l2_ctrl_bank = ov2740_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov2740_crl_csi_data_fmt), + .csi_fmts = ov2740_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov2740_flip_configurations), + .flip_data = ov2740_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = 0, + +#ifdef CONFIG_VIDEO_CRLMODULE_OTP_VALIDATE + .crl_otp_info.otp_preop_regs_items = + ARRAY_SIZE(ov2740_otp_preop_regset), + .crl_otp_info.otp_preop_regs = ov2740_otp_preop_regset, + .crl_otp_info.otp_postop_regs_items = + ARRAY_SIZE(ov2740_otp_postop_regset), + .crl_otp_info.otp_postop_regs = ov2740_otp_postop_regset, + + .crl_otp_info.otp_mode_regs_items = + ARRAY_SIZE(ov2740_otp_mode_regset), + .crl_otp_info.otp_mode_regs = ov2740_otp_mode_regset, + + .crl_otp_info.otp_read_regs_items = + ARRAY_SIZE(ov2740_sensor_otp_read_regset), + .crl_otp_info.otp_read_regs = ov2740_sensor_otp_read_regset, + + .crl_otp_info.otp_id = 0x5168, /*chicony module*/ +#endif + .frame_desc_entries = ARRAY_SIZE(ov2740_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov2740_frame_desc, +}; + +#endif /* __CRLMODULE_OV2740_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov2775_configuration.h b/drivers/media/i2c/crlmodule/crl_ov2775_configuration.h new file mode 100644 index 000000000000..282eb7ed519b --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov2775_configuration.h @@ -0,0 +1,2807 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation + * + * Author: Leo Zhao + * + */ + +#ifndef __CRLMODULE_OV2775_CONFIGURATION_H_ +#define __CRLMODULE_OV2775_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +#define OV2775_CAPTURE_MODE_MAX 4 + +/* default linear HCG 1928x1088 30fps mipi960 */ +static struct crl_register_write_rep ov2775_onetime_init_regset[] = { + + /* missing below part yet + * 64 0100 43 + * 64 0101 01 + * 64 1000 0f + * 64 0102 81 + * 5c 0001 ad + * 5c 0002 ad + */ + {0x3013, CRL_REG_LEN_08BIT, 0x01}, + {0x0000, CRL_REG_LEN_DELAY, 0x0a}, + {0x3000, CRL_REG_LEN_08BIT, 0x02}, + {0x3001, CRL_REG_LEN_08BIT, 0x28}, + {0x3002, CRL_REG_LEN_08BIT, 0x03}, + {0x3003, CRL_REG_LEN_08BIT, 0x01}, + {0x3004, CRL_REG_LEN_08BIT, 0x02}, + {0x3005, CRL_REG_LEN_08BIT, 0x26}, + {0x3006, CRL_REG_LEN_08BIT, 0x00}, + {0x3007, CRL_REG_LEN_08BIT, 0x07}, + {0x3008, CRL_REG_LEN_08BIT, 0x01}, + {0x3009, CRL_REG_LEN_08BIT, 0x00}, + {0x300c, CRL_REG_LEN_08BIT, 0x6c}, + {0x300e, CRL_REG_LEN_08BIT, 0x80}, + {0x300f, CRL_REG_LEN_08BIT, 0x00}, + {0x3012, CRL_REG_LEN_08BIT, 0x00}, + {0x3013, CRL_REG_LEN_08BIT, 0x00}, + {0x3014, CRL_REG_LEN_08BIT, 0xc4}, + {0x3015, CRL_REG_LEN_08BIT, 0x00}, + {0x3017, CRL_REG_LEN_08BIT, 0x00}, + {0x3018, CRL_REG_LEN_08BIT, 0x00}, + {0x3019, CRL_REG_LEN_08BIT, 0x00}, + {0x301a, CRL_REG_LEN_08BIT, 0x00}, + {0x301b, CRL_REG_LEN_08BIT, 0x0e}, + {0x301e, CRL_REG_LEN_08BIT, 0x17}, + {0x301f, CRL_REG_LEN_08BIT, 0xe1}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x3031, CRL_REG_LEN_08BIT, 0x62}, + {0x3032, CRL_REG_LEN_08BIT, 0xf0}, + {0x3033, CRL_REG_LEN_08BIT, 0x30}, + {0x3034, CRL_REG_LEN_08BIT, 0x3f}, + {0x3035, CRL_REG_LEN_08BIT, 0x5f}, + {0x3036, CRL_REG_LEN_08BIT, 0x02}, + {0x3037, CRL_REG_LEN_08BIT, 0x9f}, + {0x3038, CRL_REG_LEN_08BIT, 0x04}, + {0x3039, CRL_REG_LEN_08BIT, 0xb7}, + {0x303a, CRL_REG_LEN_08BIT, 0x04}, + {0x303b, CRL_REG_LEN_08BIT, 0x07}, + {0x303c, CRL_REG_LEN_08BIT, 0xf0}, + {0x303d, CRL_REG_LEN_08BIT, 0x00}, + {0x303e, CRL_REG_LEN_08BIT, 0x0b}, + {0x303f, CRL_REG_LEN_08BIT, 0xe3}, + {0x3040, CRL_REG_LEN_08BIT, 0xf3}, + {0x3041, CRL_REG_LEN_08BIT, 0x29}, + {0x3042, CRL_REG_LEN_08BIT, 0xf6}, + {0x3043, CRL_REG_LEN_08BIT, 0x65}, + {0x3044, CRL_REG_LEN_08BIT, 0x06}, + {0x3045, CRL_REG_LEN_08BIT, 0x0f}, + {0x3046, CRL_REG_LEN_08BIT, 0x59}, + {0x3047, CRL_REG_LEN_08BIT, 0x07}, + {0x3048, CRL_REG_LEN_08BIT, 0x82}, + {0x3049, CRL_REG_LEN_08BIT, 0xcf}, + {0x304a, CRL_REG_LEN_08BIT, 0x12}, + {0x304b, CRL_REG_LEN_08BIT, 0x40}, + {0x304c, CRL_REG_LEN_08BIT, 0x33}, + {0x304d, CRL_REG_LEN_08BIT, 0xa4}, + {0x304e, CRL_REG_LEN_08BIT, 0x0b}, + {0x304f, CRL_REG_LEN_08BIT, 0x3d}, + {0x3050, CRL_REG_LEN_08BIT, 0x10}, + {0x3060, CRL_REG_LEN_08BIT, 0x00}, + {0x3061, CRL_REG_LEN_08BIT, 0x64}, + {0x3062, CRL_REG_LEN_08BIT, 0x00}, + {0x3063, CRL_REG_LEN_08BIT, 0xe4}, + {0x3066, CRL_REG_LEN_08BIT, 0x80}, + {0x3080, CRL_REG_LEN_08BIT, 0x00}, + {0x3081, CRL_REG_LEN_08BIT, 0x00}, + {0x3082, CRL_REG_LEN_08BIT, 0x01}, + {0x3083, CRL_REG_LEN_08BIT, 0xe3}, + {0x3084, CRL_REG_LEN_08BIT, 0x06}, + {0x3085, CRL_REG_LEN_08BIT, 0x00}, + {0x3086, CRL_REG_LEN_08BIT, 0x10}, + {0x3087, CRL_REG_LEN_08BIT, 0x10}, + {0x3089, CRL_REG_LEN_08BIT, 0x00}, + {0x308a, CRL_REG_LEN_08BIT, 0x01}, + {0x3093, CRL_REG_LEN_08BIT, 0x00}, + {0x30a0, CRL_REG_LEN_08BIT, 0x00}, + {0x30a1, CRL_REG_LEN_08BIT, 0x00}, + {0x30a2, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x00}, + {0x30a4, CRL_REG_LEN_08BIT, 0x07}, + {0x30a5, CRL_REG_LEN_08BIT, 0x8f}, + {0x30a6, CRL_REG_LEN_08BIT, 0x04}, + {0x30a7, CRL_REG_LEN_08BIT, 0x47}, + {0x30a8, CRL_REG_LEN_08BIT, 0x00}, + {0x30a9, CRL_REG_LEN_08BIT, 0x00}, + {0x30aa, CRL_REG_LEN_08BIT, 0x00}, + {0x30ab, CRL_REG_LEN_08BIT, 0x00}, + {0x30ac, CRL_REG_LEN_08BIT, 0x07}, + {0x30ad, CRL_REG_LEN_08BIT, 0x90}, + {0x30ae, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x48}, + {0x30b0, CRL_REG_LEN_08BIT, 0x04}, + {0x30b1, CRL_REG_LEN_08BIT, 0x7e}, + {0x30b2, CRL_REG_LEN_08BIT, 0x04}, + {0x30b3, CRL_REG_LEN_08BIT, 0x65}, + {0x30b4, CRL_REG_LEN_08BIT, 0x00}, + {0x30b5, CRL_REG_LEN_08BIT, 0x00}, + {0x30b6, CRL_REG_LEN_08BIT, 0x00}, + {0x30b7, CRL_REG_LEN_08BIT, 0x10}, + {0x30b8, CRL_REG_LEN_08BIT, 0x00}, + {0x30b9, CRL_REG_LEN_08BIT, 0x02}, + {0x30ba, CRL_REG_LEN_08BIT, 0x10}, + {0x30bb, CRL_REG_LEN_08BIT, 0x00}, + {0x30bc, CRL_REG_LEN_08BIT, 0x00}, + {0x30bd, CRL_REG_LEN_08BIT, 0x03}, + {0x30be, CRL_REG_LEN_08BIT, 0x5c}, + {0x30bf, CRL_REG_LEN_08BIT, 0x00}, + {0x30c0, CRL_REG_LEN_08BIT, 0x05}, + {0x30c1, CRL_REG_LEN_08BIT, 0x00}, + {0x30c2, CRL_REG_LEN_08BIT, 0x20}, + {0x30c3, CRL_REG_LEN_08BIT, 0x00}, + {0x30c4, CRL_REG_LEN_08BIT, 0x4a}, + {0x30c5, CRL_REG_LEN_08BIT, 0x00}, + {0x30c7, CRL_REG_LEN_08BIT, 0x00}, + {0x30c8, CRL_REG_LEN_08BIT, 0x00}, + {0x30d1, CRL_REG_LEN_08BIT, 0x00}, + {0x30d2, CRL_REG_LEN_08BIT, 0x00}, + {0x30d3, CRL_REG_LEN_08BIT, 0x80}, + {0x30d4, CRL_REG_LEN_08BIT, 0x00}, + {0x30d9, CRL_REG_LEN_08BIT, 0x09}, + {0x30da, CRL_REG_LEN_08BIT, 0x64}, + {0x30dd, CRL_REG_LEN_08BIT, 0x00}, + {0x30de, CRL_REG_LEN_08BIT, 0x16}, + {0x30df, CRL_REG_LEN_08BIT, 0x00}, + {0x30e0, CRL_REG_LEN_08BIT, 0x17}, + {0x30e1, CRL_REG_LEN_08BIT, 0x00}, + {0x30e2, CRL_REG_LEN_08BIT, 0x18}, + {0x30e3, CRL_REG_LEN_08BIT, 0x10}, + {0x30e4, CRL_REG_LEN_08BIT, 0x04}, + {0x30e5, CRL_REG_LEN_08BIT, 0x00}, + {0x30e6, CRL_REG_LEN_08BIT, 0x00}, + {0x30e7, CRL_REG_LEN_08BIT, 0x00}, + {0x30e8, CRL_REG_LEN_08BIT, 0x00}, + {0x30e9, CRL_REG_LEN_08BIT, 0x00}, + {0x30ea, CRL_REG_LEN_08BIT, 0x00}, + {0x30eb, CRL_REG_LEN_08BIT, 0x00}, + {0x30ec, CRL_REG_LEN_08BIT, 0x00}, + {0x30ed, CRL_REG_LEN_08BIT, 0x00}, + {0x3101, CRL_REG_LEN_08BIT, 0x00}, + {0x3102, CRL_REG_LEN_08BIT, 0x00}, + {0x3103, CRL_REG_LEN_08BIT, 0x00}, + {0x3104, CRL_REG_LEN_08BIT, 0x00}, + {0x3105, CRL_REG_LEN_08BIT, 0x8c}, + {0x3106, CRL_REG_LEN_08BIT, 0x87}, + {0x3107, CRL_REG_LEN_08BIT, 0xc0}, + {0x3108, CRL_REG_LEN_08BIT, 0x9d}, + {0x3109, CRL_REG_LEN_08BIT, 0x8d}, + {0x310a, CRL_REG_LEN_08BIT, 0x8d}, + {0x310b, CRL_REG_LEN_08BIT, 0x6a}, + {0x310c, CRL_REG_LEN_08BIT, 0x3a}, + {0x310d, CRL_REG_LEN_08BIT, 0x5a}, + {0x310e, CRL_REG_LEN_08BIT, 0x00}, + {0x3120, CRL_REG_LEN_08BIT, 0x00}, + {0x3121, CRL_REG_LEN_08BIT, 0x00}, + {0x3122, CRL_REG_LEN_08BIT, 0x00}, + {0x3123, CRL_REG_LEN_08BIT, 0xf0}, + {0x3124, CRL_REG_LEN_08BIT, 0x00}, + {0x3125, CRL_REG_LEN_08BIT, 0x70}, + {0x3126, CRL_REG_LEN_08BIT, 0x1f}, + {0x3127, CRL_REG_LEN_08BIT, 0x0f}, + {0x3128, CRL_REG_LEN_08BIT, 0x00}, + {0x3129, CRL_REG_LEN_08BIT, 0x3a}, + {0x312a, CRL_REG_LEN_08BIT, 0x02}, + {0x312b, CRL_REG_LEN_08BIT, 0x0f}, + {0x312c, CRL_REG_LEN_08BIT, 0x00}, + {0x312d, CRL_REG_LEN_08BIT, 0x0f}, + {0x312e, CRL_REG_LEN_08BIT, 0x1d}, + {0x312f, CRL_REG_LEN_08BIT, 0x00}, + {0x3130, CRL_REG_LEN_08BIT, 0x00}, + {0x3131, CRL_REG_LEN_08BIT, 0x00}, + {0x3132, CRL_REG_LEN_08BIT, 0x00}, + {0x3140, CRL_REG_LEN_08BIT, 0x0a}, + {0x3141, CRL_REG_LEN_08BIT, 0x03}, + {0x3142, CRL_REG_LEN_08BIT, 0x00}, + {0x3143, CRL_REG_LEN_08BIT, 0x00}, + {0x3144, CRL_REG_LEN_08BIT, 0x00}, + {0x3145, CRL_REG_LEN_08BIT, 0x00}, + {0x3146, CRL_REG_LEN_08BIT, 0x00}, + {0x3147, CRL_REG_LEN_08BIT, 0x00}, + {0x3148, CRL_REG_LEN_08BIT, 0x00}, + {0x3149, CRL_REG_LEN_08BIT, 0x00}, + {0x314a, CRL_REG_LEN_08BIT, 0x00}, + {0x314b, CRL_REG_LEN_08BIT, 0x00}, + {0x314c, CRL_REG_LEN_08BIT, 0x00}, + {0x314d, CRL_REG_LEN_08BIT, 0x00}, + {0x314e, CRL_REG_LEN_08BIT, 0x1c}, + {0x314f, CRL_REG_LEN_08BIT, 0xff}, + {0x3150, CRL_REG_LEN_08BIT, 0xff}, + {0x3151, CRL_REG_LEN_08BIT, 0xff}, + {0x3152, CRL_REG_LEN_08BIT, 0x10}, + {0x3153, CRL_REG_LEN_08BIT, 0x10}, + {0x3154, CRL_REG_LEN_08BIT, 0x10}, + {0x3155, CRL_REG_LEN_08BIT, 0x00}, + {0x3156, CRL_REG_LEN_08BIT, 0x03}, + {0x3157, CRL_REG_LEN_08BIT, 0x00}, + {0x3158, CRL_REG_LEN_08BIT, 0x0f}, + {0x3159, CRL_REG_LEN_08BIT, 0xff}, + {0x315a, CRL_REG_LEN_08BIT, 0x01}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x315c, CRL_REG_LEN_08BIT, 0x01}, + {0x315d, CRL_REG_LEN_08BIT, 0x00}, + {0x315e, CRL_REG_LEN_08BIT, 0x01}, + {0x315f, CRL_REG_LEN_08BIT, 0x00}, + {0x3160, CRL_REG_LEN_08BIT, 0x00}, + {0x3161, CRL_REG_LEN_08BIT, 0x40}, + {0x3162, CRL_REG_LEN_08BIT, 0x00}, + {0x3163, CRL_REG_LEN_08BIT, 0x40}, + {0x3164, CRL_REG_LEN_08BIT, 0x00}, + {0x3165, CRL_REG_LEN_08BIT, 0x40}, + {0x3190, CRL_REG_LEN_08BIT, 0x08}, + {0x3191, CRL_REG_LEN_08BIT, 0x99}, + {0x3193, CRL_REG_LEN_08BIT, 0x08}, + {0x3194, CRL_REG_LEN_08BIT, 0x13}, + {0x3195, CRL_REG_LEN_08BIT, 0x33}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x10}, + {0x3198, CRL_REG_LEN_08BIT, 0x00}, + {0x3199, CRL_REG_LEN_08BIT, 0x7f}, + {0x319a, CRL_REG_LEN_08BIT, 0x80}, + {0x319b, CRL_REG_LEN_08BIT, 0xff}, + {0x319c, CRL_REG_LEN_08BIT, 0x80}, + {0x319d, CRL_REG_LEN_08BIT, 0xbf}, + {0x319e, CRL_REG_LEN_08BIT, 0xc0}, + {0x319f, CRL_REG_LEN_08BIT, 0xff}, + {0x31a0, CRL_REG_LEN_08BIT, 0x24}, + {0x31a1, CRL_REG_LEN_08BIT, 0x55}, + {0x31a2, CRL_REG_LEN_08BIT, 0x00}, + {0x31a3, CRL_REG_LEN_08BIT, 0x00}, + {0x31a6, CRL_REG_LEN_08BIT, 0x00}, + {0x31a7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b0, CRL_REG_LEN_08BIT, 0x00}, + {0x31b1, CRL_REG_LEN_08BIT, 0x00}, + {0x31b2, CRL_REG_LEN_08BIT, 0x02}, + {0x31b3, CRL_REG_LEN_08BIT, 0x00}, + {0x31b4, CRL_REG_LEN_08BIT, 0x00}, + {0x31b5, CRL_REG_LEN_08BIT, 0x01}, + {0x31b6, CRL_REG_LEN_08BIT, 0x00}, + {0x31b7, CRL_REG_LEN_08BIT, 0x00}, + {0x31b8, CRL_REG_LEN_08BIT, 0x00}, + {0x31b9, CRL_REG_LEN_08BIT, 0x00}, + {0x31ba, CRL_REG_LEN_08BIT, 0x00}, + {0x31d0, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d1, CRL_REG_LEN_08BIT, 0x34}, + {0x31d2, CRL_REG_LEN_08BIT, 0x3c}, + {0x31d3, CRL_REG_LEN_08BIT, 0x00}, + {0x31d4, CRL_REG_LEN_08BIT, 0x2d}, + {0x31d5, CRL_REG_LEN_08BIT, 0x00}, + {0x31d6, CRL_REG_LEN_08BIT, 0x01}, + {0x31d7, CRL_REG_LEN_08BIT, 0x06}, + {0x31d8, CRL_REG_LEN_08BIT, 0x00}, + {0x31d9, CRL_REG_LEN_08BIT, 0x64}, + {0x31da, CRL_REG_LEN_08BIT, 0x00}, + {0x31db, CRL_REG_LEN_08BIT, 0x30}, + {0x31dc, CRL_REG_LEN_08BIT, 0x04}, + {0x31dd, CRL_REG_LEN_08BIT, 0x69}, + {0x31de, CRL_REG_LEN_08BIT, 0x0a}, + {0x31df, CRL_REG_LEN_08BIT, 0x3c}, + {0x31e0, CRL_REG_LEN_08BIT, 0x04}, + {0x31e1, CRL_REG_LEN_08BIT, 0x32}, + {0x31e2, CRL_REG_LEN_08BIT, 0x00}, + {0x31e3, CRL_REG_LEN_08BIT, 0x00}, + {0x31e4, CRL_REG_LEN_08BIT, 0x08}, + {0x31e5, CRL_REG_LEN_08BIT, 0x80}, + {0x31e6, CRL_REG_LEN_08BIT, 0x00}, + {0x31e7, CRL_REG_LEN_08BIT, 0x2c}, + {0x31e8, CRL_REG_LEN_08BIT, 0x6c}, + {0x31e9, CRL_REG_LEN_08BIT, 0xac}, + {0x31ea, CRL_REG_LEN_08BIT, 0xec}, + {0x31eb, CRL_REG_LEN_08BIT, 0x3f}, + {0x31ec, CRL_REG_LEN_08BIT, 0x0f}, + {0x31ed, CRL_REG_LEN_08BIT, 0x20}, + {0x31ee, CRL_REG_LEN_08BIT, 0x04}, + {0x31ef, CRL_REG_LEN_08BIT, 0x48}, + {0x31f0, CRL_REG_LEN_08BIT, 0x07}, + {0x31f1, CRL_REG_LEN_08BIT, 0x90}, + {0x31f2, CRL_REG_LEN_08BIT, 0x04}, + {0x31f3, CRL_REG_LEN_08BIT, 0x48}, + {0x31f4, CRL_REG_LEN_08BIT, 0x07}, + {0x31f5, CRL_REG_LEN_08BIT, 0x90}, + {0x31f6, CRL_REG_LEN_08BIT, 0x04}, + {0x31f7, CRL_REG_LEN_08BIT, 0x48}, + {0x31f8, CRL_REG_LEN_08BIT, 0x07}, + {0x31f9, CRL_REG_LEN_08BIT, 0x90}, + {0x31fa, CRL_REG_LEN_08BIT, 0x04}, + {0x31fb, CRL_REG_LEN_08BIT, 0x48}, + {0x31fd, CRL_REG_LEN_08BIT, 0xcb}, + {0x31fe, CRL_REG_LEN_08BIT, 0x0f}, + {0x31ff, CRL_REG_LEN_08BIT, 0x03}, + {0x3200, CRL_REG_LEN_08BIT, 0x00}, + {0x3201, CRL_REG_LEN_08BIT, 0xff}, + {0x3202, CRL_REG_LEN_08BIT, 0x00}, + {0x3203, CRL_REG_LEN_08BIT, 0xff}, + {0x3204, CRL_REG_LEN_08BIT, 0xff}, + {0x3205, CRL_REG_LEN_08BIT, 0xff}, + {0x3206, CRL_REG_LEN_08BIT, 0xff}, + {0x3207, CRL_REG_LEN_08BIT, 0xff}, + {0x3208, CRL_REG_LEN_08BIT, 0xff}, + {0x3209, CRL_REG_LEN_08BIT, 0xff}, + {0x320a, CRL_REG_LEN_08BIT, 0xff}, + {0x320b, CRL_REG_LEN_08BIT, 0x1b}, + {0x320c, CRL_REG_LEN_08BIT, 0x1f}, + {0x320d, CRL_REG_LEN_08BIT, 0x1e}, + {0x320e, CRL_REG_LEN_08BIT, 0x30}, + {0x320f, CRL_REG_LEN_08BIT, 0x2d}, + {0x3210, CRL_REG_LEN_08BIT, 0x2c}, + {0x3211, CRL_REG_LEN_08BIT, 0x2b}, + {0x3212, CRL_REG_LEN_08BIT, 0x2a}, + {0x3213, CRL_REG_LEN_08BIT, 0x24}, + {0x3214, CRL_REG_LEN_08BIT, 0x22}, + {0x3215, CRL_REG_LEN_08BIT, 0x00}, + {0x3216, CRL_REG_LEN_08BIT, 0x04}, + {0x3217, CRL_REG_LEN_08BIT, 0x2c}, + {0x3218, CRL_REG_LEN_08BIT, 0x6c}, + {0x3219, CRL_REG_LEN_08BIT, 0xac}, + {0x321a, CRL_REG_LEN_08BIT, 0xec}, + {0x321b, CRL_REG_LEN_08BIT, 0x00}, + {0x3230, CRL_REG_LEN_08BIT, 0x3a}, + {0x3231, CRL_REG_LEN_08BIT, 0x00}, + {0x3232, CRL_REG_LEN_08BIT, 0x80}, + {0x3233, CRL_REG_LEN_08BIT, 0x00}, + {0x3234, CRL_REG_LEN_08BIT, 0x10}, + {0x3235, CRL_REG_LEN_08BIT, 0xaa}, + {0x3236, CRL_REG_LEN_08BIT, 0x55}, + {0x3237, CRL_REG_LEN_08BIT, 0x99}, + {0x3238, CRL_REG_LEN_08BIT, 0x66}, + {0x3239, CRL_REG_LEN_08BIT, 0x08}, + {0x323a, CRL_REG_LEN_08BIT, 0x88}, + {0x323b, CRL_REG_LEN_08BIT, 0x00}, + {0x323c, CRL_REG_LEN_08BIT, 0x00}, + {0x323d, CRL_REG_LEN_08BIT, 0x03}, + {0x3250, CRL_REG_LEN_08BIT, 0x33}, + {0x3251, CRL_REG_LEN_08BIT, 0x00}, + {0x3252, CRL_REG_LEN_08BIT, 0x20}, + {0x3253, CRL_REG_LEN_08BIT, 0x00}, + {0x3254, CRL_REG_LEN_08BIT, 0x00}, + {0x3255, CRL_REG_LEN_08BIT, 0x01}, + {0x3256, CRL_REG_LEN_08BIT, 0x00}, + {0x3257, CRL_REG_LEN_08BIT, 0x00}, + {0x3258, CRL_REG_LEN_08BIT, 0x00}, + {0x3270, CRL_REG_LEN_08BIT, 0x01}, + {0x3271, CRL_REG_LEN_08BIT, 0x60}, + {0x3272, CRL_REG_LEN_08BIT, 0xc0}, + {0x3273, CRL_REG_LEN_08BIT, 0x00}, + {0x3274, CRL_REG_LEN_08BIT, 0x80}, + {0x3275, CRL_REG_LEN_08BIT, 0x40}, + {0x3276, CRL_REG_LEN_08BIT, 0x02}, + {0x3277, CRL_REG_LEN_08BIT, 0x08}, + {0x3278, CRL_REG_LEN_08BIT, 0x10}, + {0x3279, CRL_REG_LEN_08BIT, 0x04}, + {0x327a, CRL_REG_LEN_08BIT, 0x00}, + {0x327b, CRL_REG_LEN_08BIT, 0x03}, + {0x327c, CRL_REG_LEN_08BIT, 0x10}, + {0x327d, CRL_REG_LEN_08BIT, 0x60}, + {0x327e, CRL_REG_LEN_08BIT, 0xc0}, + {0x327f, CRL_REG_LEN_08BIT, 0x06}, + {0x3288, CRL_REG_LEN_08BIT, 0x10}, + {0x3289, CRL_REG_LEN_08BIT, 0x00}, + {0x328a, CRL_REG_LEN_08BIT, 0x08}, + {0x328b, CRL_REG_LEN_08BIT, 0x00}, + {0x328c, CRL_REG_LEN_08BIT, 0x04}, + {0x328d, CRL_REG_LEN_08BIT, 0x00}, + {0x328e, CRL_REG_LEN_08BIT, 0x02}, + {0x328f, CRL_REG_LEN_08BIT, 0x00}, + {0x3290, CRL_REG_LEN_08BIT, 0x20}, + {0x3291, CRL_REG_LEN_08BIT, 0x00}, + {0x3292, CRL_REG_LEN_08BIT, 0x10}, + {0x3293, CRL_REG_LEN_08BIT, 0x00}, + {0x3294, CRL_REG_LEN_08BIT, 0x08}, + {0x3295, CRL_REG_LEN_08BIT, 0x00}, + {0x3296, CRL_REG_LEN_08BIT, 0x04}, + {0x3297, CRL_REG_LEN_08BIT, 0x00}, + {0x3298, CRL_REG_LEN_08BIT, 0x40}, + {0x3299, CRL_REG_LEN_08BIT, 0x00}, + {0x329a, CRL_REG_LEN_08BIT, 0x20}, + {0x329b, CRL_REG_LEN_08BIT, 0x00}, + {0x329c, CRL_REG_LEN_08BIT, 0x10}, + {0x329d, CRL_REG_LEN_08BIT, 0x00}, + {0x329e, CRL_REG_LEN_08BIT, 0x08}, + {0x329f, CRL_REG_LEN_08BIT, 0x00}, + {0x32a0, CRL_REG_LEN_08BIT, 0x7f}, + {0x32a1, CRL_REG_LEN_08BIT, 0xff}, + {0x32a2, CRL_REG_LEN_08BIT, 0x40}, + {0x32a3, CRL_REG_LEN_08BIT, 0x00}, + {0x32a4, CRL_REG_LEN_08BIT, 0x20}, + {0x32a5, CRL_REG_LEN_08BIT, 0x00}, + {0x32a6, CRL_REG_LEN_08BIT, 0x10}, + {0x32a7, CRL_REG_LEN_08BIT, 0x00}, + {0x32a8, CRL_REG_LEN_08BIT, 0x00}, + {0x32a9, CRL_REG_LEN_08BIT, 0x00}, + {0x32aa, CRL_REG_LEN_08BIT, 0x00}, + {0x32ab, CRL_REG_LEN_08BIT, 0x00}, + {0x32ac, CRL_REG_LEN_08BIT, 0x00}, + {0x32ad, CRL_REG_LEN_08BIT, 0x00}, + {0x32ae, CRL_REG_LEN_08BIT, 0x00}, + {0x32af, CRL_REG_LEN_08BIT, 0x00}, + {0x32b0, CRL_REG_LEN_08BIT, 0x00}, + {0x32b1, CRL_REG_LEN_08BIT, 0x00}, + {0x32b2, CRL_REG_LEN_08BIT, 0x00}, + {0x32b3, CRL_REG_LEN_08BIT, 0x00}, + {0x32b4, CRL_REG_LEN_08BIT, 0x00}, + {0x32b5, CRL_REG_LEN_08BIT, 0x00}, + {0x32b6, CRL_REG_LEN_08BIT, 0x00}, + {0x32b7, CRL_REG_LEN_08BIT, 0x00}, + {0x32b8, CRL_REG_LEN_08BIT, 0x00}, + {0x32b9, CRL_REG_LEN_08BIT, 0x00}, + {0x32ba, CRL_REG_LEN_08BIT, 0x00}, + {0x32bb, CRL_REG_LEN_08BIT, 0x00}, + {0x32bc, CRL_REG_LEN_08BIT, 0x00}, + {0x32bd, CRL_REG_LEN_08BIT, 0x00}, + {0x32be, CRL_REG_LEN_08BIT, 0x00}, + {0x32bf, CRL_REG_LEN_08BIT, 0x00}, + {0x32c0, CRL_REG_LEN_08BIT, 0x00}, + {0x32c1, CRL_REG_LEN_08BIT, 0x00}, + {0x32c2, CRL_REG_LEN_08BIT, 0x00}, + {0x32c3, CRL_REG_LEN_08BIT, 0x00}, + {0x32c4, CRL_REG_LEN_08BIT, 0x00}, + {0x32c5, CRL_REG_LEN_08BIT, 0x00}, + {0x32c6, CRL_REG_LEN_08BIT, 0x00}, + {0x32c7, CRL_REG_LEN_08BIT, 0x00}, + {0x32c8, CRL_REG_LEN_08BIT, 0x87}, + {0x32c9, CRL_REG_LEN_08BIT, 0x00}, + {0x3330, CRL_REG_LEN_08BIT, 0x03}, + {0x3331, CRL_REG_LEN_08BIT, 0xc8}, + {0x3332, CRL_REG_LEN_08BIT, 0x02}, + {0x3333, CRL_REG_LEN_08BIT, 0x24}, + {0x3334, CRL_REG_LEN_08BIT, 0x00}, + {0x3335, CRL_REG_LEN_08BIT, 0x00}, + {0x3336, CRL_REG_LEN_08BIT, 0x00}, + {0x3337, CRL_REG_LEN_08BIT, 0x00}, + {0x3338, CRL_REG_LEN_08BIT, 0x03}, + {0x3339, CRL_REG_LEN_08BIT, 0xc8}, + {0x333a, CRL_REG_LEN_08BIT, 0x02}, + {0x333b, CRL_REG_LEN_08BIT, 0x24}, + {0x333c, CRL_REG_LEN_08BIT, 0x00}, + {0x333d, CRL_REG_LEN_08BIT, 0x00}, + {0x333e, CRL_REG_LEN_08BIT, 0x00}, + {0x333f, CRL_REG_LEN_08BIT, 0x00}, + {0x3340, CRL_REG_LEN_08BIT, 0x03}, + {0x3341, CRL_REG_LEN_08BIT, 0xc8}, + {0x3342, CRL_REG_LEN_08BIT, 0x02}, + {0x3343, CRL_REG_LEN_08BIT, 0x24}, + {0x3344, CRL_REG_LEN_08BIT, 0x00}, + {0x3345, CRL_REG_LEN_08BIT, 0x00}, + {0x3346, CRL_REG_LEN_08BIT, 0x00}, + {0x3347, CRL_REG_LEN_08BIT, 0x00}, + {0x3348, CRL_REG_LEN_08BIT, 0x40}, + {0x3349, CRL_REG_LEN_08BIT, 0x00}, + {0x334a, CRL_REG_LEN_08BIT, 0x00}, + {0x334b, CRL_REG_LEN_08BIT, 0x00}, + {0x334c, CRL_REG_LEN_08BIT, 0x00}, + {0x334d, CRL_REG_LEN_08BIT, 0x00}, + {0x334e, CRL_REG_LEN_08BIT, 0x80}, + {0x3360, CRL_REG_LEN_08BIT, 0x01}, + {0x3361, CRL_REG_LEN_08BIT, 0x00}, + {0x3362, CRL_REG_LEN_08BIT, 0x01}, + {0x3363, CRL_REG_LEN_08BIT, 0x00}, + {0x3364, CRL_REG_LEN_08BIT, 0x01}, + {0x3365, CRL_REG_LEN_08BIT, 0x00}, + {0x3366, CRL_REG_LEN_08BIT, 0x01}, + {0x3367, CRL_REG_LEN_08BIT, 0x00}, + {0x3368, CRL_REG_LEN_08BIT, 0x01}, + {0x3369, CRL_REG_LEN_08BIT, 0x00}, + {0x336a, CRL_REG_LEN_08BIT, 0x01}, + {0x336b, CRL_REG_LEN_08BIT, 0x00}, + {0x336c, CRL_REG_LEN_08BIT, 0x01}, + {0x336d, CRL_REG_LEN_08BIT, 0x00}, + {0x336e, CRL_REG_LEN_08BIT, 0x01}, + {0x336f, CRL_REG_LEN_08BIT, 0x00}, + {0x3370, CRL_REG_LEN_08BIT, 0x01}, + {0x3371, CRL_REG_LEN_08BIT, 0x00}, + {0x3372, CRL_REG_LEN_08BIT, 0x01}, + {0x3373, CRL_REG_LEN_08BIT, 0x00}, + {0x3374, CRL_REG_LEN_08BIT, 0x01}, + {0x3375, CRL_REG_LEN_08BIT, 0x00}, + {0x3376, CRL_REG_LEN_08BIT, 0x01}, + {0x3377, CRL_REG_LEN_08BIT, 0x00}, + {0x3378, CRL_REG_LEN_08BIT, 0x00}, + {0x3379, CRL_REG_LEN_08BIT, 0x00}, + {0x337a, CRL_REG_LEN_08BIT, 0x00}, + {0x337b, CRL_REG_LEN_08BIT, 0x00}, + {0x337c, CRL_REG_LEN_08BIT, 0x00}, + {0x337d, CRL_REG_LEN_08BIT, 0x00}, + {0x337e, CRL_REG_LEN_08BIT, 0x00}, + {0x337f, CRL_REG_LEN_08BIT, 0x00}, + {0x3380, CRL_REG_LEN_08BIT, 0x00}, + {0x3381, CRL_REG_LEN_08BIT, 0x00}, + {0x3382, CRL_REG_LEN_08BIT, 0x00}, + {0x3383, CRL_REG_LEN_08BIT, 0x00}, + {0x3384, CRL_REG_LEN_08BIT, 0x00}, + {0x3385, CRL_REG_LEN_08BIT, 0x00}, + {0x3386, CRL_REG_LEN_08BIT, 0x00}, + {0x3387, CRL_REG_LEN_08BIT, 0x00}, + {0x3388, CRL_REG_LEN_08BIT, 0x00}, + {0x3389, CRL_REG_LEN_08BIT, 0x00}, + {0x338a, CRL_REG_LEN_08BIT, 0x00}, + {0x338b, CRL_REG_LEN_08BIT, 0x00}, + {0x338c, CRL_REG_LEN_08BIT, 0x00}, + {0x338d, CRL_REG_LEN_08BIT, 0x00}, + {0x338e, CRL_REG_LEN_08BIT, 0x00}, + {0x338f, CRL_REG_LEN_08BIT, 0x00}, + {0x3390, CRL_REG_LEN_08BIT, 0x00}, + {0x3391, CRL_REG_LEN_08BIT, 0x00}, + {0x3392, CRL_REG_LEN_08BIT, 0x00}, + {0x3393, CRL_REG_LEN_08BIT, 0x00}, + {0x3394, CRL_REG_LEN_08BIT, 0x00}, + {0x3395, CRL_REG_LEN_08BIT, 0x00}, + {0x3396, CRL_REG_LEN_08BIT, 0x00}, + {0x3397, CRL_REG_LEN_08BIT, 0x00}, + {0x3398, CRL_REG_LEN_08BIT, 0x00}, + {0x3399, CRL_REG_LEN_08BIT, 0x00}, + {0x339a, CRL_REG_LEN_08BIT, 0x00}, + {0x339b, CRL_REG_LEN_08BIT, 0x00}, + {0x33b0, CRL_REG_LEN_08BIT, 0x00}, + {0x33b1, CRL_REG_LEN_08BIT, 0x50}, + {0x33b2, CRL_REG_LEN_08BIT, 0x01}, + {0x33b3, CRL_REG_LEN_08BIT, 0xff}, + {0x33b4, CRL_REG_LEN_08BIT, 0xe0}, + {0x33b5, CRL_REG_LEN_08BIT, 0x6b}, + {0x33b6, CRL_REG_LEN_08BIT, 0x00}, + {0x33b7, CRL_REG_LEN_08BIT, 0x00}, + {0x33b8, CRL_REG_LEN_08BIT, 0x00}, + {0x33b9, CRL_REG_LEN_08BIT, 0x00}, + {0x33ba, CRL_REG_LEN_08BIT, 0x00}, + {0x33bb, CRL_REG_LEN_08BIT, 0x1f}, + {0x33bc, CRL_REG_LEN_08BIT, 0x01}, + {0x33bd, CRL_REG_LEN_08BIT, 0x01}, + {0x33be, CRL_REG_LEN_08BIT, 0x01}, + {0x33bf, CRL_REG_LEN_08BIT, 0x01}, + {0x33c0, CRL_REG_LEN_08BIT, 0x00}, + {0x33c1, CRL_REG_LEN_08BIT, 0x00}, + {0x33c2, CRL_REG_LEN_08BIT, 0x00}, + {0x33c3, CRL_REG_LEN_08BIT, 0x00}, + {0x33e0, CRL_REG_LEN_08BIT, 0x14}, + {0x33e1, CRL_REG_LEN_08BIT, 0x0f}, + {0x33e2, CRL_REG_LEN_08BIT, 0x02}, + {0x33e3, CRL_REG_LEN_08BIT, 0x01}, + {0x33e4, CRL_REG_LEN_08BIT, 0x01}, + {0x33e5, CRL_REG_LEN_08BIT, 0x01}, + {0x33e6, CRL_REG_LEN_08BIT, 0x00}, + {0x33e7, CRL_REG_LEN_08BIT, 0x04}, + {0x33e8, CRL_REG_LEN_08BIT, 0x0c}, + {0x33e9, CRL_REG_LEN_08BIT, 0x02}, + {0x33ea, CRL_REG_LEN_08BIT, 0x02}, + {0x33eb, CRL_REG_LEN_08BIT, 0x02}, + {0x33ec, CRL_REG_LEN_08BIT, 0x03}, + {0x33ed, CRL_REG_LEN_08BIT, 0x01}, + {0x33ee, CRL_REG_LEN_08BIT, 0x02}, + {0x33ef, CRL_REG_LEN_08BIT, 0x08}, + {0x33f0, CRL_REG_LEN_08BIT, 0x08}, + {0x33f1, CRL_REG_LEN_08BIT, 0x04}, + {0x33f2, CRL_REG_LEN_08BIT, 0x04}, + {0x33f3, CRL_REG_LEN_08BIT, 0x00}, + {0x33f4, CRL_REG_LEN_08BIT, 0x03}, + {0x33f5, CRL_REG_LEN_08BIT, 0x14}, + {0x33f6, CRL_REG_LEN_08BIT, 0x0f}, + {0x33f7, CRL_REG_LEN_08BIT, 0x02}, + {0x33f8, CRL_REG_LEN_08BIT, 0x01}, + {0x33f9, CRL_REG_LEN_08BIT, 0x01}, + {0x33fa, CRL_REG_LEN_08BIT, 0x01}, + {0x33fb, CRL_REG_LEN_08BIT, 0x00}, + {0x33fc, CRL_REG_LEN_08BIT, 0x04}, + {0x33fd, CRL_REG_LEN_08BIT, 0x0c}, + {0x33fe, CRL_REG_LEN_08BIT, 0x02}, + {0x33ff, CRL_REG_LEN_08BIT, 0x02}, + {0x3400, CRL_REG_LEN_08BIT, 0x02}, + {0x3401, CRL_REG_LEN_08BIT, 0x03}, + {0x3402, CRL_REG_LEN_08BIT, 0x01}, + {0x3403, CRL_REG_LEN_08BIT, 0x02}, + {0x3404, CRL_REG_LEN_08BIT, 0x08}, + {0x3405, CRL_REG_LEN_08BIT, 0x08}, + {0x3406, CRL_REG_LEN_08BIT, 0x04}, + {0x3407, CRL_REG_LEN_08BIT, 0x04}, + {0x3408, CRL_REG_LEN_08BIT, 0x00}, + {0x3409, CRL_REG_LEN_08BIT, 0x03}, + {0x340a, CRL_REG_LEN_08BIT, 0x14}, + {0x340b, CRL_REG_LEN_08BIT, 0x0f}, + {0x340c, CRL_REG_LEN_08BIT, 0x04}, + {0x340d, CRL_REG_LEN_08BIT, 0x02}, + {0x340e, CRL_REG_LEN_08BIT, 0x01}, + {0x340f, CRL_REG_LEN_08BIT, 0x01}, + {0x3410, CRL_REG_LEN_08BIT, 0x00}, + {0x3411, CRL_REG_LEN_08BIT, 0x04}, + {0x3412, CRL_REG_LEN_08BIT, 0x0c}, + {0x3413, CRL_REG_LEN_08BIT, 0x02}, + {0x3414, CRL_REG_LEN_08BIT, 0x02}, + {0x3415, CRL_REG_LEN_08BIT, 0x02}, + {0x3416, CRL_REG_LEN_08BIT, 0x03}, + {0x3417, CRL_REG_LEN_08BIT, 0x02}, + {0x3418, CRL_REG_LEN_08BIT, 0x05}, + {0x3419, CRL_REG_LEN_08BIT, 0x0a}, + {0x341a, CRL_REG_LEN_08BIT, 0x08}, + {0x341b, CRL_REG_LEN_08BIT, 0x04}, + {0x341c, CRL_REG_LEN_08BIT, 0x04}, + {0x341d, CRL_REG_LEN_08BIT, 0x00}, + {0x341e, CRL_REG_LEN_08BIT, 0x03}, + {0x3440, CRL_REG_LEN_08BIT, 0x00}, + {0x3441, CRL_REG_LEN_08BIT, 0x00}, + {0x3442, CRL_REG_LEN_08BIT, 0x00}, + {0x3443, CRL_REG_LEN_08BIT, 0x00}, + {0x3444, CRL_REG_LEN_08BIT, 0x02}, + {0x3445, CRL_REG_LEN_08BIT, 0xf0}, + {0x3446, CRL_REG_LEN_08BIT, 0x02}, + {0x3447, CRL_REG_LEN_08BIT, 0x08}, + {0x3448, CRL_REG_LEN_08BIT, 0x00}, + {0x3460, CRL_REG_LEN_08BIT, 0x40}, + {0x3461, CRL_REG_LEN_08BIT, 0x40}, + {0x3462, CRL_REG_LEN_08BIT, 0x40}, + {0x3463, CRL_REG_LEN_08BIT, 0x40}, + {0x3464, CRL_REG_LEN_08BIT, 0x03}, + {0x3465, CRL_REG_LEN_08BIT, 0x01}, + {0x3466, CRL_REG_LEN_08BIT, 0x01}, + {0x3467, CRL_REG_LEN_08BIT, 0x02}, + {0x3468, CRL_REG_LEN_08BIT, 0x30}, + {0x3469, CRL_REG_LEN_08BIT, 0x00}, + {0x346a, CRL_REG_LEN_08BIT, 0x33}, + {0x346b, CRL_REG_LEN_08BIT, 0xbf}, + {0x3480, CRL_REG_LEN_08BIT, 0x40}, + {0x3481, CRL_REG_LEN_08BIT, 0x00}, + {0x3482, CRL_REG_LEN_08BIT, 0x00}, + {0x3483, CRL_REG_LEN_08BIT, 0x00}, + {0x3484, CRL_REG_LEN_08BIT, 0x0d}, + {0x3485, CRL_REG_LEN_08BIT, 0x00}, + {0x3486, CRL_REG_LEN_08BIT, 0x00}, + {0x3487, CRL_REG_LEN_08BIT, 0x00}, + {0x3488, CRL_REG_LEN_08BIT, 0x00}, + {0x3489, CRL_REG_LEN_08BIT, 0x00}, + {0x348a, CRL_REG_LEN_08BIT, 0x00}, + {0x348b, CRL_REG_LEN_08BIT, 0x04}, + {0x348c, CRL_REG_LEN_08BIT, 0x00}, + {0x348d, CRL_REG_LEN_08BIT, 0x01}, + {0x348f, CRL_REG_LEN_08BIT, 0x01}, + {0x3030, CRL_REG_LEN_08BIT, 0x0a}, + {0x3030, CRL_REG_LEN_08BIT, 0x02}, + {0x7000, CRL_REG_LEN_08BIT, 0x58}, + {0x7001, CRL_REG_LEN_08BIT, 0x7a}, + {0x7002, CRL_REG_LEN_08BIT, 0x1a}, + {0x7003, CRL_REG_LEN_08BIT, 0xc1}, + {0x7004, CRL_REG_LEN_08BIT, 0x03}, + {0x7005, CRL_REG_LEN_08BIT, 0xda}, + {0x7006, CRL_REG_LEN_08BIT, 0xbd}, + {0x7007, CRL_REG_LEN_08BIT, 0x03}, + {0x7008, CRL_REG_LEN_08BIT, 0xbd}, + {0x7009, CRL_REG_LEN_08BIT, 0x06}, + {0x700a, CRL_REG_LEN_08BIT, 0xe6}, + {0x700b, CRL_REG_LEN_08BIT, 0xec}, + {0x700c, CRL_REG_LEN_08BIT, 0xbc}, + {0x700d, CRL_REG_LEN_08BIT, 0xff}, + {0x700e, CRL_REG_LEN_08BIT, 0xbc}, + {0x700f, CRL_REG_LEN_08BIT, 0x73}, + {0x7010, CRL_REG_LEN_08BIT, 0xda}, + {0x7011, CRL_REG_LEN_08BIT, 0x72}, + {0x7012, CRL_REG_LEN_08BIT, 0x76}, + {0x7013, CRL_REG_LEN_08BIT, 0xb6}, + {0x7014, CRL_REG_LEN_08BIT, 0xee}, + {0x7015, CRL_REG_LEN_08BIT, 0xcf}, + {0x7016, CRL_REG_LEN_08BIT, 0xac}, + {0x7017, CRL_REG_LEN_08BIT, 0xd0}, + {0x7018, CRL_REG_LEN_08BIT, 0xac}, + {0x7019, CRL_REG_LEN_08BIT, 0xd1}, + {0x701a, CRL_REG_LEN_08BIT, 0x50}, + {0x701b, CRL_REG_LEN_08BIT, 0xac}, + {0x701c, CRL_REG_LEN_08BIT, 0xd2}, + {0x701d, CRL_REG_LEN_08BIT, 0xbc}, + {0x701e, CRL_REG_LEN_08BIT, 0x2e}, + {0x701f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7020, CRL_REG_LEN_08BIT, 0x00}, + {0x7021, CRL_REG_LEN_08BIT, 0xdc}, + {0x7022, CRL_REG_LEN_08BIT, 0xdf}, + {0x7023, CRL_REG_LEN_08BIT, 0xb0}, + {0x7024, CRL_REG_LEN_08BIT, 0x6e}, + {0x7025, CRL_REG_LEN_08BIT, 0xbd}, + {0x7026, CRL_REG_LEN_08BIT, 0x01}, + {0x7027, CRL_REG_LEN_08BIT, 0xd7}, + {0x7028, CRL_REG_LEN_08BIT, 0xed}, + {0x7029, CRL_REG_LEN_08BIT, 0xe1}, + {0x702a, CRL_REG_LEN_08BIT, 0x36}, + {0x702b, CRL_REG_LEN_08BIT, 0x30}, + {0x702c, CRL_REG_LEN_08BIT, 0xd3}, + {0x702d, CRL_REG_LEN_08BIT, 0x2e}, + {0x702e, CRL_REG_LEN_08BIT, 0x54}, + {0x702f, CRL_REG_LEN_08BIT, 0x46}, + {0x7030, CRL_REG_LEN_08BIT, 0xbc}, + {0x7031, CRL_REG_LEN_08BIT, 0x22}, + {0x7032, CRL_REG_LEN_08BIT, 0x66}, + {0x7033, CRL_REG_LEN_08BIT, 0xbc}, + {0x7034, CRL_REG_LEN_08BIT, 0x24}, + {0x7035, CRL_REG_LEN_08BIT, 0x2c}, + {0x7036, CRL_REG_LEN_08BIT, 0x28}, + {0x7037, CRL_REG_LEN_08BIT, 0xbc}, + {0x7038, CRL_REG_LEN_08BIT, 0x3c}, + {0x7039, CRL_REG_LEN_08BIT, 0xa1}, + {0x703a, CRL_REG_LEN_08BIT, 0xac}, + {0x703b, CRL_REG_LEN_08BIT, 0xd8}, + {0x703c, CRL_REG_LEN_08BIT, 0xd6}, + {0x703d, CRL_REG_LEN_08BIT, 0xb4}, + {0x703e, CRL_REG_LEN_08BIT, 0x04}, + {0x703f, CRL_REG_LEN_08BIT, 0x46}, + {0x7040, CRL_REG_LEN_08BIT, 0xb7}, + {0x7041, CRL_REG_LEN_08BIT, 0x04}, + {0x7042, CRL_REG_LEN_08BIT, 0xbe}, + {0x7043, CRL_REG_LEN_08BIT, 0x08}, + {0x7044, CRL_REG_LEN_08BIT, 0xc3}, + {0x7045, CRL_REG_LEN_08BIT, 0xd9}, + {0x7046, CRL_REG_LEN_08BIT, 0xad}, + {0x7047, CRL_REG_LEN_08BIT, 0xc3}, + {0x7048, CRL_REG_LEN_08BIT, 0xbc}, + {0x7049, CRL_REG_LEN_08BIT, 0x19}, + {0x704a, CRL_REG_LEN_08BIT, 0xc1}, + {0x704b, CRL_REG_LEN_08BIT, 0x27}, + {0x704c, CRL_REG_LEN_08BIT, 0xe7}, + {0x704d, CRL_REG_LEN_08BIT, 0x00}, + {0x704e, CRL_REG_LEN_08BIT, 0x50}, + {0x704f, CRL_REG_LEN_08BIT, 0x20}, + {0x7050, CRL_REG_LEN_08BIT, 0xb8}, + {0x7051, CRL_REG_LEN_08BIT, 0x02}, + {0x7052, CRL_REG_LEN_08BIT, 0xbc}, + {0x7053, CRL_REG_LEN_08BIT, 0x17}, + {0x7054, CRL_REG_LEN_08BIT, 0xdb}, + {0x7055, CRL_REG_LEN_08BIT, 0xc7}, + {0x7056, CRL_REG_LEN_08BIT, 0xb8}, + {0x7057, CRL_REG_LEN_08BIT, 0x00}, + {0x7058, CRL_REG_LEN_08BIT, 0x28}, + {0x7059, CRL_REG_LEN_08BIT, 0x54}, + {0x705a, CRL_REG_LEN_08BIT, 0xb4}, + {0x705b, CRL_REG_LEN_08BIT, 0x14}, + {0x705c, CRL_REG_LEN_08BIT, 0xab}, + {0x705d, CRL_REG_LEN_08BIT, 0xbe}, + {0x705e, CRL_REG_LEN_08BIT, 0x06}, + {0x705f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7060, CRL_REG_LEN_08BIT, 0xd6}, + {0x7061, CRL_REG_LEN_08BIT, 0x00}, + {0x7062, CRL_REG_LEN_08BIT, 0xb4}, + {0x7063, CRL_REG_LEN_08BIT, 0xc7}, + {0x7064, CRL_REG_LEN_08BIT, 0x07}, + {0x7065, CRL_REG_LEN_08BIT, 0xb9}, + {0x7066, CRL_REG_LEN_08BIT, 0x05}, + {0x7067, CRL_REG_LEN_08BIT, 0xee}, + {0x7068, CRL_REG_LEN_08BIT, 0xe6}, + {0x7069, CRL_REG_LEN_08BIT, 0xad}, + {0x706a, CRL_REG_LEN_08BIT, 0xb4}, + {0x706b, CRL_REG_LEN_08BIT, 0x26}, + {0x706c, CRL_REG_LEN_08BIT, 0x19}, + {0x706d, CRL_REG_LEN_08BIT, 0xc1}, + {0x706e, CRL_REG_LEN_08BIT, 0x3a}, + {0x706f, CRL_REG_LEN_08BIT, 0xc3}, + {0x7070, CRL_REG_LEN_08BIT, 0xaf}, + {0x7071, CRL_REG_LEN_08BIT, 0x00}, + {0x7072, CRL_REG_LEN_08BIT, 0xc0}, + {0x7073, CRL_REG_LEN_08BIT, 0x3c}, + {0x7074, CRL_REG_LEN_08BIT, 0xc3}, + {0x7075, CRL_REG_LEN_08BIT, 0xbe}, + {0x7076, CRL_REG_LEN_08BIT, 0xe7}, + {0x7077, CRL_REG_LEN_08BIT, 0x00}, + {0x7078, CRL_REG_LEN_08BIT, 0x15}, + {0x7079, CRL_REG_LEN_08BIT, 0xc2}, + {0x707a, CRL_REG_LEN_08BIT, 0x40}, + {0x707b, CRL_REG_LEN_08BIT, 0xc3}, + {0x707c, CRL_REG_LEN_08BIT, 0xa4}, + {0x707d, CRL_REG_LEN_08BIT, 0xc0}, + {0x707e, CRL_REG_LEN_08BIT, 0x3c}, + {0x707f, CRL_REG_LEN_08BIT, 0x00}, + {0x7080, CRL_REG_LEN_08BIT, 0xb9}, + {0x7081, CRL_REG_LEN_08BIT, 0x64}, + {0x7082, CRL_REG_LEN_08BIT, 0x29}, + {0x7083, CRL_REG_LEN_08BIT, 0x00}, + {0x7084, CRL_REG_LEN_08BIT, 0xb8}, + {0x7085, CRL_REG_LEN_08BIT, 0x12}, + {0x7086, CRL_REG_LEN_08BIT, 0xbe}, + {0x7087, CRL_REG_LEN_08BIT, 0x01}, + {0x7088, CRL_REG_LEN_08BIT, 0xd0}, + {0x7089, CRL_REG_LEN_08BIT, 0xbc}, + {0x708a, CRL_REG_LEN_08BIT, 0x01}, + {0x708b, CRL_REG_LEN_08BIT, 0xac}, + {0x708c, CRL_REG_LEN_08BIT, 0x37}, + {0x708d, CRL_REG_LEN_08BIT, 0xd2}, + {0x708e, CRL_REG_LEN_08BIT, 0xac}, + {0x708f, CRL_REG_LEN_08BIT, 0x45}, + {0x7090, CRL_REG_LEN_08BIT, 0xad}, + {0x7091, CRL_REG_LEN_08BIT, 0x28}, + {0x7092, CRL_REG_LEN_08BIT, 0x00}, + {0x7093, CRL_REG_LEN_08BIT, 0xb8}, + {0x7094, CRL_REG_LEN_08BIT, 0x00}, + {0x7095, CRL_REG_LEN_08BIT, 0xbc}, + {0x7096, CRL_REG_LEN_08BIT, 0x01}, + {0x7097, CRL_REG_LEN_08BIT, 0x36}, + {0x7098, CRL_REG_LEN_08BIT, 0xd3}, + {0x7099, CRL_REG_LEN_08BIT, 0x30}, + {0x709a, CRL_REG_LEN_08BIT, 0x04}, + {0x709b, CRL_REG_LEN_08BIT, 0xe0}, + {0x709c, CRL_REG_LEN_08BIT, 0xd8}, + {0x709d, CRL_REG_LEN_08BIT, 0xb4}, + {0x709e, CRL_REG_LEN_08BIT, 0xe9}, + {0x709f, CRL_REG_LEN_08BIT, 0x00}, + {0x70a0, CRL_REG_LEN_08BIT, 0xbe}, + {0x70a1, CRL_REG_LEN_08BIT, 0x05}, + {0x70a2, CRL_REG_LEN_08BIT, 0x62}, + {0x70a3, CRL_REG_LEN_08BIT, 0x07}, + {0x70a4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70a5, CRL_REG_LEN_08BIT, 0x05}, + {0x70a6, CRL_REG_LEN_08BIT, 0xad}, + {0x70a7, CRL_REG_LEN_08BIT, 0xc3}, + {0x70a8, CRL_REG_LEN_08BIT, 0xcf}, + {0x70a9, CRL_REG_LEN_08BIT, 0x00}, + {0x70aa, CRL_REG_LEN_08BIT, 0x15}, + {0x70ab, CRL_REG_LEN_08BIT, 0xc2}, + {0x70ac, CRL_REG_LEN_08BIT, 0x59}, + {0x70ad, CRL_REG_LEN_08BIT, 0xc3}, + {0x70ae, CRL_REG_LEN_08BIT, 0xc9}, + {0x70af, CRL_REG_LEN_08BIT, 0xc0}, + {0x70b0, CRL_REG_LEN_08BIT, 0x55}, + {0x70b1, CRL_REG_LEN_08BIT, 0x00}, + {0x70b2, CRL_REG_LEN_08BIT, 0x46}, + {0x70b3, CRL_REG_LEN_08BIT, 0xa1}, + {0x70b4, CRL_REG_LEN_08BIT, 0xb9}, + {0x70b5, CRL_REG_LEN_08BIT, 0x64}, + {0x70b6, CRL_REG_LEN_08BIT, 0x29}, + {0x70b7, CRL_REG_LEN_08BIT, 0x00}, + {0x70b8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70b9, CRL_REG_LEN_08BIT, 0x02}, + {0x70ba, CRL_REG_LEN_08BIT, 0xbe}, + {0x70bb, CRL_REG_LEN_08BIT, 0x02}, + {0x70bc, CRL_REG_LEN_08BIT, 0xd0}, + {0x70bd, CRL_REG_LEN_08BIT, 0xdc}, + {0x70be, CRL_REG_LEN_08BIT, 0xac}, + {0x70bf, CRL_REG_LEN_08BIT, 0xbc}, + {0x70c0, CRL_REG_LEN_08BIT, 0x01}, + {0x70c1, CRL_REG_LEN_08BIT, 0x37}, + {0x70c2, CRL_REG_LEN_08BIT, 0xac}, + {0x70c3, CRL_REG_LEN_08BIT, 0xd2}, + {0x70c4, CRL_REG_LEN_08BIT, 0x45}, + {0x70c5, CRL_REG_LEN_08BIT, 0xad}, + {0x70c6, CRL_REG_LEN_08BIT, 0x28}, + {0x70c7, CRL_REG_LEN_08BIT, 0x00}, + {0x70c8, CRL_REG_LEN_08BIT, 0xb8}, + {0x70c9, CRL_REG_LEN_08BIT, 0x00}, + {0x70ca, CRL_REG_LEN_08BIT, 0xbc}, + {0x70cb, CRL_REG_LEN_08BIT, 0x01}, + {0x70cc, CRL_REG_LEN_08BIT, 0x36}, + {0x70cd, CRL_REG_LEN_08BIT, 0x30}, + {0x70ce, CRL_REG_LEN_08BIT, 0xe0}, + {0x70cf, CRL_REG_LEN_08BIT, 0xd8}, + {0x70d0, CRL_REG_LEN_08BIT, 0xb5}, + {0x70d1, CRL_REG_LEN_08BIT, 0x0b}, + {0x70d2, CRL_REG_LEN_08BIT, 0xd6}, + {0x70d3, CRL_REG_LEN_08BIT, 0xbe}, + {0x70d4, CRL_REG_LEN_08BIT, 0x07}, + {0x70d5, CRL_REG_LEN_08BIT, 0x00}, + {0x70d6, CRL_REG_LEN_08BIT, 0x62}, + {0x70d7, CRL_REG_LEN_08BIT, 0x07}, + {0x70d8, CRL_REG_LEN_08BIT, 0xb9}, + {0x70d9, CRL_REG_LEN_08BIT, 0x05}, + {0x70da, CRL_REG_LEN_08BIT, 0xad}, + {0x70db, CRL_REG_LEN_08BIT, 0xc3}, + {0x70dc, CRL_REG_LEN_08BIT, 0xcf}, + {0x70dd, CRL_REG_LEN_08BIT, 0x46}, + {0x70de, CRL_REG_LEN_08BIT, 0xcd}, + {0x70df, CRL_REG_LEN_08BIT, 0x07}, + {0x70e0, CRL_REG_LEN_08BIT, 0xcd}, + {0x70e1, CRL_REG_LEN_08BIT, 0x00}, + {0x70e2, CRL_REG_LEN_08BIT, 0xe3}, + {0x70e3, CRL_REG_LEN_08BIT, 0x18}, + {0x70e4, CRL_REG_LEN_08BIT, 0xc2}, + {0x70e5, CRL_REG_LEN_08BIT, 0xa2}, + {0x70e6, CRL_REG_LEN_08BIT, 0xb9}, + {0x70e7, CRL_REG_LEN_08BIT, 0x64}, + {0x70e8, CRL_REG_LEN_08BIT, 0xd1}, + {0x70e9, CRL_REG_LEN_08BIT, 0xdd}, + {0x70ea, CRL_REG_LEN_08BIT, 0xac}, + {0x70eb, CRL_REG_LEN_08BIT, 0xcf}, + {0x70ec, CRL_REG_LEN_08BIT, 0xdf}, + {0x70ed, CRL_REG_LEN_08BIT, 0xb5}, + {0x70ee, CRL_REG_LEN_08BIT, 0x19}, + {0x70ef, CRL_REG_LEN_08BIT, 0x46}, + {0x70f0, CRL_REG_LEN_08BIT, 0x50}, + {0x70f1, CRL_REG_LEN_08BIT, 0xb6}, + {0x70f2, CRL_REG_LEN_08BIT, 0xee}, + {0x70f3, CRL_REG_LEN_08BIT, 0xe8}, + {0x70f4, CRL_REG_LEN_08BIT, 0xe6}, + {0x70f5, CRL_REG_LEN_08BIT, 0xbc}, + {0x70f6, CRL_REG_LEN_08BIT, 0x31}, + {0x70f7, CRL_REG_LEN_08BIT, 0xe1}, + {0x70f8, CRL_REG_LEN_08BIT, 0x36}, + {0x70f9, CRL_REG_LEN_08BIT, 0x30}, + {0x70fa, CRL_REG_LEN_08BIT, 0xd3}, + {0x70fb, CRL_REG_LEN_08BIT, 0x2e}, + {0x70fc, CRL_REG_LEN_08BIT, 0x54}, + {0x70fd, CRL_REG_LEN_08BIT, 0xbd}, + {0x70fe, CRL_REG_LEN_08BIT, 0x03}, + {0x70ff, CRL_REG_LEN_08BIT, 0xec}, + {0x7100, CRL_REG_LEN_08BIT, 0x2c}, + {0x7101, CRL_REG_LEN_08BIT, 0x50}, + {0x7102, CRL_REG_LEN_08BIT, 0x20}, + {0x7103, CRL_REG_LEN_08BIT, 0x04}, + {0x7104, CRL_REG_LEN_08BIT, 0xb8}, + {0x7105, CRL_REG_LEN_08BIT, 0x02}, + {0x7106, CRL_REG_LEN_08BIT, 0xbc}, + {0x7107, CRL_REG_LEN_08BIT, 0x18}, + {0x7108, CRL_REG_LEN_08BIT, 0xc7}, + {0x7109, CRL_REG_LEN_08BIT, 0xb8}, + {0x710a, CRL_REG_LEN_08BIT, 0x00}, + {0x710b, CRL_REG_LEN_08BIT, 0x28}, + {0x710c, CRL_REG_LEN_08BIT, 0x54}, + {0x710d, CRL_REG_LEN_08BIT, 0xbc}, + {0x710e, CRL_REG_LEN_08BIT, 0x02}, + {0x710f, CRL_REG_LEN_08BIT, 0xb4}, + {0x7110, CRL_REG_LEN_08BIT, 0xda}, + {0x7111, CRL_REG_LEN_08BIT, 0xbe}, + {0x7112, CRL_REG_LEN_08BIT, 0x04}, + {0x7113, CRL_REG_LEN_08BIT, 0xd6}, + {0x7114, CRL_REG_LEN_08BIT, 0xd8}, + {0x7115, CRL_REG_LEN_08BIT, 0xab}, + {0x7116, CRL_REG_LEN_08BIT, 0x00}, + {0x7117, CRL_REG_LEN_08BIT, 0x62}, + {0x7118, CRL_REG_LEN_08BIT, 0x07}, + {0x7119, CRL_REG_LEN_08BIT, 0xb9}, + {0x711a, CRL_REG_LEN_08BIT, 0x05}, + {0x711b, CRL_REG_LEN_08BIT, 0xad}, + {0x711c, CRL_REG_LEN_08BIT, 0xc3}, + {0x711d, CRL_REG_LEN_08BIT, 0xbc}, + {0x711e, CRL_REG_LEN_08BIT, 0xe7}, + {0x711f, CRL_REG_LEN_08BIT, 0xb9}, + {0x7120, CRL_REG_LEN_08BIT, 0x64}, + {0x7121, CRL_REG_LEN_08BIT, 0x29}, + {0x7122, CRL_REG_LEN_08BIT, 0x00}, + {0x7123, CRL_REG_LEN_08BIT, 0xb8}, + {0x7124, CRL_REG_LEN_08BIT, 0x02}, + {0x7125, CRL_REG_LEN_08BIT, 0xbe}, + {0x7126, CRL_REG_LEN_08BIT, 0x00}, + {0x7127, CRL_REG_LEN_08BIT, 0x45}, + {0x7128, CRL_REG_LEN_08BIT, 0xad}, + {0x7129, CRL_REG_LEN_08BIT, 0xe2}, + {0x712a, CRL_REG_LEN_08BIT, 0x28}, + {0x712b, CRL_REG_LEN_08BIT, 0x00}, + {0x712c, CRL_REG_LEN_08BIT, 0xb8}, + {0x712d, CRL_REG_LEN_08BIT, 0x00}, + {0x712e, CRL_REG_LEN_08BIT, 0xe0}, + {0x712f, CRL_REG_LEN_08BIT, 0xd8}, + {0x7130, CRL_REG_LEN_08BIT, 0xb4}, + {0x7131, CRL_REG_LEN_08BIT, 0xe9}, + {0x7132, CRL_REG_LEN_08BIT, 0xbe}, + {0x7133, CRL_REG_LEN_08BIT, 0x03}, + {0x7134, CRL_REG_LEN_08BIT, 0x00}, + {0x7135, CRL_REG_LEN_08BIT, 0x30}, + {0x7136, CRL_REG_LEN_08BIT, 0x62}, + {0x7137, CRL_REG_LEN_08BIT, 0x07}, + {0x7138, CRL_REG_LEN_08BIT, 0xb9}, + {0x7139, CRL_REG_LEN_08BIT, 0x05}, + {0x713a, CRL_REG_LEN_08BIT, 0xad}, + {0x713b, CRL_REG_LEN_08BIT, 0xc3}, + {0x713c, CRL_REG_LEN_08BIT, 0xcf}, + {0x713d, CRL_REG_LEN_08BIT, 0x42}, + {0x713e, CRL_REG_LEN_08BIT, 0xe4}, + {0x713f, CRL_REG_LEN_08BIT, 0xcd}, + {0x7140, CRL_REG_LEN_08BIT, 0x07}, + {0x7141, CRL_REG_LEN_08BIT, 0xcd}, + {0x7142, CRL_REG_LEN_08BIT, 0x00}, + {0x7143, CRL_REG_LEN_08BIT, 0x00}, + {0x7144, CRL_REG_LEN_08BIT, 0x17}, + {0x7145, CRL_REG_LEN_08BIT, 0xc2}, + {0x7146, CRL_REG_LEN_08BIT, 0xbb}, + {0x7147, CRL_REG_LEN_08BIT, 0xde}, + {0x7148, CRL_REG_LEN_08BIT, 0xcf}, + {0x7149, CRL_REG_LEN_08BIT, 0xdf}, + {0x714a, CRL_REG_LEN_08BIT, 0xac}, + {0x714b, CRL_REG_LEN_08BIT, 0xd1}, + {0x714c, CRL_REG_LEN_08BIT, 0x44}, + {0x714d, CRL_REG_LEN_08BIT, 0xac}, + {0x714e, CRL_REG_LEN_08BIT, 0xb9}, + {0x714f, CRL_REG_LEN_08BIT, 0x76}, + {0x7150, CRL_REG_LEN_08BIT, 0xb8}, + {0x7151, CRL_REG_LEN_08BIT, 0x08}, + {0x7152, CRL_REG_LEN_08BIT, 0xb6}, + {0x7153, CRL_REG_LEN_08BIT, 0xfe}, + {0x7154, CRL_REG_LEN_08BIT, 0xb4}, + {0x7155, CRL_REG_LEN_08BIT, 0xca}, + {0x7156, CRL_REG_LEN_08BIT, 0xd6}, + {0x7157, CRL_REG_LEN_08BIT, 0xd8}, + {0x7158, CRL_REG_LEN_08BIT, 0xab}, + {0x7159, CRL_REG_LEN_08BIT, 0x00}, + {0x715a, CRL_REG_LEN_08BIT, 0xe1}, + {0x715b, CRL_REG_LEN_08BIT, 0x36}, + {0x715c, CRL_REG_LEN_08BIT, 0x30}, + {0x715d, CRL_REG_LEN_08BIT, 0xd3}, + {0x715e, CRL_REG_LEN_08BIT, 0xbc}, + {0x715f, CRL_REG_LEN_08BIT, 0x29}, + {0x7160, CRL_REG_LEN_08BIT, 0xb4}, + {0x7161, CRL_REG_LEN_08BIT, 0x1f}, + {0x7162, CRL_REG_LEN_08BIT, 0xaa}, + {0x7163, CRL_REG_LEN_08BIT, 0xbd}, + {0x7164, CRL_REG_LEN_08BIT, 0x01}, + {0x7165, CRL_REG_LEN_08BIT, 0xb8}, + {0x7166, CRL_REG_LEN_08BIT, 0x0c}, + {0x7167, CRL_REG_LEN_08BIT, 0x45}, + {0x7168, CRL_REG_LEN_08BIT, 0xa4}, + {0x7169, CRL_REG_LEN_08BIT, 0xbd}, + {0x716a, CRL_REG_LEN_08BIT, 0x03}, + {0x716b, CRL_REG_LEN_08BIT, 0xec}, + {0x716c, CRL_REG_LEN_08BIT, 0xbc}, + {0x716d, CRL_REG_LEN_08BIT, 0x3d}, + {0x716e, CRL_REG_LEN_08BIT, 0xc3}, + {0x716f, CRL_REG_LEN_08BIT, 0xcf}, + {0x7170, CRL_REG_LEN_08BIT, 0x42}, + {0x7171, CRL_REG_LEN_08BIT, 0xb8}, + {0x7172, CRL_REG_LEN_08BIT, 0x00}, + {0x7173, CRL_REG_LEN_08BIT, 0xe4}, + {0x7174, CRL_REG_LEN_08BIT, 0xd5}, + {0x7175, CRL_REG_LEN_08BIT, 0x00}, + {0x7176, CRL_REG_LEN_08BIT, 0xb6}, + {0x7177, CRL_REG_LEN_08BIT, 0x00}, + {0x7178, CRL_REG_LEN_08BIT, 0x74}, + {0x7179, CRL_REG_LEN_08BIT, 0xbd}, + {0x717a, CRL_REG_LEN_08BIT, 0x03}, + {0x717b, CRL_REG_LEN_08BIT, 0xb5}, + {0x717c, CRL_REG_LEN_08BIT, 0x39}, + {0x717d, CRL_REG_LEN_08BIT, 0x40}, + {0x717e, CRL_REG_LEN_08BIT, 0x58}, + {0x717f, CRL_REG_LEN_08BIT, 0xdd}, + {0x7180, CRL_REG_LEN_08BIT, 0x19}, + {0x7181, CRL_REG_LEN_08BIT, 0xc1}, + {0x7182, CRL_REG_LEN_08BIT, 0xc8}, + {0x7183, CRL_REG_LEN_08BIT, 0xbd}, + {0x7184, CRL_REG_LEN_08BIT, 0x06}, + {0x7185, CRL_REG_LEN_08BIT, 0x17}, + {0x7186, CRL_REG_LEN_08BIT, 0xc1}, + {0x7187, CRL_REG_LEN_08BIT, 0xc6}, + {0x7188, CRL_REG_LEN_08BIT, 0xe8}, + {0x7189, CRL_REG_LEN_08BIT, 0x00}, + {0x718a, CRL_REG_LEN_08BIT, 0xc0}, + {0x718b, CRL_REG_LEN_08BIT, 0xc8}, + {0x718c, CRL_REG_LEN_08BIT, 0xe6}, + {0x718d, CRL_REG_LEN_08BIT, 0x95}, + {0x718e, CRL_REG_LEN_08BIT, 0x15}, + {0x718f, CRL_REG_LEN_08BIT, 0x00}, + {0x7190, CRL_REG_LEN_08BIT, 0xbc}, + {0x7191, CRL_REG_LEN_08BIT, 0x19}, + {0x7192, CRL_REG_LEN_08BIT, 0xb9}, + {0x7193, CRL_REG_LEN_08BIT, 0xf6}, + {0x7194, CRL_REG_LEN_08BIT, 0x14}, + {0x7195, CRL_REG_LEN_08BIT, 0xc1}, + {0x7196, CRL_REG_LEN_08BIT, 0xd0}, + {0x7197, CRL_REG_LEN_08BIT, 0xd1}, + {0x7198, CRL_REG_LEN_08BIT, 0xac}, + {0x7199, CRL_REG_LEN_08BIT, 0x37}, + {0x719a, CRL_REG_LEN_08BIT, 0xbc}, + {0x719b, CRL_REG_LEN_08BIT, 0x35}, + {0x719c, CRL_REG_LEN_08BIT, 0x36}, + {0x719d, CRL_REG_LEN_08BIT, 0x30}, + {0x719e, CRL_REG_LEN_08BIT, 0xe1}, + {0x719f, CRL_REG_LEN_08BIT, 0xd3}, + {0x71a0, CRL_REG_LEN_08BIT, 0x7a}, + {0x71a1, CRL_REG_LEN_08BIT, 0xb6}, + {0x71a2, CRL_REG_LEN_08BIT, 0x0c}, + {0x71a3, CRL_REG_LEN_08BIT, 0xff}, + {0x71a4, CRL_REG_LEN_08BIT, 0xb4}, + {0x71a5, CRL_REG_LEN_08BIT, 0xc7}, + {0x71a6, CRL_REG_LEN_08BIT, 0xd9}, + {0x71a7, CRL_REG_LEN_08BIT, 0x00}, + {0x71a8, CRL_REG_LEN_08BIT, 0xbd}, + {0x71a9, CRL_REG_LEN_08BIT, 0x01}, + {0x71aa, CRL_REG_LEN_08BIT, 0x56}, + {0x71ab, CRL_REG_LEN_08BIT, 0xc0}, + {0x71ac, CRL_REG_LEN_08BIT, 0xda}, + {0x71ad, CRL_REG_LEN_08BIT, 0xb4}, + {0x71ae, CRL_REG_LEN_08BIT, 0x1f}, + {0x71af, CRL_REG_LEN_08BIT, 0x56}, + {0x71b0, CRL_REG_LEN_08BIT, 0xaa}, + {0x71b1, CRL_REG_LEN_08BIT, 0xbc}, + {0x71b2, CRL_REG_LEN_08BIT, 0x08}, + {0x71b3, CRL_REG_LEN_08BIT, 0x00}, + {0x71b4, CRL_REG_LEN_08BIT, 0x57}, + {0x71b5, CRL_REG_LEN_08BIT, 0xe8}, + {0x71b6, CRL_REG_LEN_08BIT, 0xb5}, + {0x71b7, CRL_REG_LEN_08BIT, 0x36}, + {0x71b8, CRL_REG_LEN_08BIT, 0x00}, + {0x71b9, CRL_REG_LEN_08BIT, 0x54}, + {0x71ba, CRL_REG_LEN_08BIT, 0xe7}, + {0x71bb, CRL_REG_LEN_08BIT, 0xc8}, + {0x71bc, CRL_REG_LEN_08BIT, 0xb4}, + {0x71bd, CRL_REG_LEN_08BIT, 0x1f}, + {0x71be, CRL_REG_LEN_08BIT, 0x56}, + {0x71bf, CRL_REG_LEN_08BIT, 0xaa}, + {0x71c0, CRL_REG_LEN_08BIT, 0xbc}, + {0x71c1, CRL_REG_LEN_08BIT, 0x08}, + {0x71c2, CRL_REG_LEN_08BIT, 0x57}, + {0x71c3, CRL_REG_LEN_08BIT, 0x00}, + {0x71c4, CRL_REG_LEN_08BIT, 0xb5}, + {0x71c5, CRL_REG_LEN_08BIT, 0x36}, + {0x71c6, CRL_REG_LEN_08BIT, 0x00}, + {0x71c7, CRL_REG_LEN_08BIT, 0x54}, + {0x71c8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71c9, CRL_REG_LEN_08BIT, 0xb5}, + {0x71ca, CRL_REG_LEN_08BIT, 0x18}, + {0x71cb, CRL_REG_LEN_08BIT, 0xd9}, + {0x71cc, CRL_REG_LEN_08BIT, 0x00}, + {0x71cd, CRL_REG_LEN_08BIT, 0xbd}, + {0x71ce, CRL_REG_LEN_08BIT, 0x01}, + {0x71cf, CRL_REG_LEN_08BIT, 0x56}, + {0x71d0, CRL_REG_LEN_08BIT, 0x08}, + {0x71d1, CRL_REG_LEN_08BIT, 0x57}, + {0x71d2, CRL_REG_LEN_08BIT, 0xe8}, + {0x71d3, CRL_REG_LEN_08BIT, 0xb4}, + {0x71d4, CRL_REG_LEN_08BIT, 0x42}, + {0x71d5, CRL_REG_LEN_08BIT, 0x00}, + {0x71d6, CRL_REG_LEN_08BIT, 0x54}, + {0x71d7, CRL_REG_LEN_08BIT, 0xe7}, + {0x71d8, CRL_REG_LEN_08BIT, 0xc8}, + {0x71d9, CRL_REG_LEN_08BIT, 0xab}, + {0x71da, CRL_REG_LEN_08BIT, 0x00}, + {0x71db, CRL_REG_LEN_08BIT, 0x66}, + {0x71dc, CRL_REG_LEN_08BIT, 0x62}, + {0x71dd, CRL_REG_LEN_08BIT, 0x06}, + {0x71de, CRL_REG_LEN_08BIT, 0x74}, + {0x71df, CRL_REG_LEN_08BIT, 0xb9}, + {0x71e0, CRL_REG_LEN_08BIT, 0x05}, + {0x71e1, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e2, CRL_REG_LEN_08BIT, 0x14}, + {0x71e3, CRL_REG_LEN_08BIT, 0x0e}, + {0x71e4, CRL_REG_LEN_08BIT, 0xb7}, + {0x71e5, CRL_REG_LEN_08BIT, 0x04}, + {0x71e6, CRL_REG_LEN_08BIT, 0xc8}, + {0x7600, CRL_REG_LEN_08BIT, 0x04}, + {0x7601, CRL_REG_LEN_08BIT, 0x80}, + {0x7602, CRL_REG_LEN_08BIT, 0x07}, + {0x7603, CRL_REG_LEN_08BIT, 0x44}, + {0x7604, CRL_REG_LEN_08BIT, 0x05}, + {0x7605, CRL_REG_LEN_08BIT, 0x33}, + {0x7606, CRL_REG_LEN_08BIT, 0x0f}, + {0x7607, CRL_REG_LEN_08BIT, 0x00}, + {0x7608, CRL_REG_LEN_08BIT, 0x07}, + {0x7609, CRL_REG_LEN_08BIT, 0x40}, + {0x760a, CRL_REG_LEN_08BIT, 0x04}, + {0x760b, CRL_REG_LEN_08BIT, 0xe5}, + {0x760c, CRL_REG_LEN_08BIT, 0x06}, + {0x760d, CRL_REG_LEN_08BIT, 0x50}, + {0x760e, CRL_REG_LEN_08BIT, 0x04}, + {0x760f, CRL_REG_LEN_08BIT, 0xe4}, + {0x7610, CRL_REG_LEN_08BIT, 0x00}, + {0x7611, CRL_REG_LEN_08BIT, 0x00}, + {0x7612, CRL_REG_LEN_08BIT, 0x06}, + {0x7613, CRL_REG_LEN_08BIT, 0x5c}, + {0x7614, CRL_REG_LEN_08BIT, 0x00}, + {0x7615, CRL_REG_LEN_08BIT, 0x0f}, + {0x7616, CRL_REG_LEN_08BIT, 0x06}, + {0x7617, CRL_REG_LEN_08BIT, 0x1c}, + {0x7618, CRL_REG_LEN_08BIT, 0x00}, + {0x7619, CRL_REG_LEN_08BIT, 0x02}, + {0x761a, CRL_REG_LEN_08BIT, 0x06}, + {0x761b, CRL_REG_LEN_08BIT, 0xa2}, + {0x761c, CRL_REG_LEN_08BIT, 0x00}, + {0x761d, CRL_REG_LEN_08BIT, 0x01}, + {0x761e, CRL_REG_LEN_08BIT, 0x06}, + {0x761f, CRL_REG_LEN_08BIT, 0xae}, + {0x7620, CRL_REG_LEN_08BIT, 0x00}, + {0x7621, CRL_REG_LEN_08BIT, 0x0e}, + {0x7622, CRL_REG_LEN_08BIT, 0x05}, + {0x7623, CRL_REG_LEN_08BIT, 0x30}, + {0x7624, CRL_REG_LEN_08BIT, 0x07}, + {0x7625, CRL_REG_LEN_08BIT, 0x00}, + {0x7626, CRL_REG_LEN_08BIT, 0x0f}, + {0x7627, CRL_REG_LEN_08BIT, 0x00}, + {0x7628, CRL_REG_LEN_08BIT, 0x04}, + {0x7629, CRL_REG_LEN_08BIT, 0xe5}, + {0x762a, CRL_REG_LEN_08BIT, 0x05}, + {0x762b, CRL_REG_LEN_08BIT, 0x33}, + {0x762c, CRL_REG_LEN_08BIT, 0x06}, + {0x762d, CRL_REG_LEN_08BIT, 0x12}, + {0x762e, CRL_REG_LEN_08BIT, 0x00}, + {0x762f, CRL_REG_LEN_08BIT, 0x01}, + {0x7630, CRL_REG_LEN_08BIT, 0x06}, + {0x7631, CRL_REG_LEN_08BIT, 0x52}, + {0x7632, CRL_REG_LEN_08BIT, 0x00}, + {0x7633, CRL_REG_LEN_08BIT, 0x01}, + {0x7634, CRL_REG_LEN_08BIT, 0x06}, + {0x7635, CRL_REG_LEN_08BIT, 0x5e}, + {0x7636, CRL_REG_LEN_08BIT, 0x04}, + {0x7637, CRL_REG_LEN_08BIT, 0xe4}, + {0x7638, CRL_REG_LEN_08BIT, 0x00}, + {0x7639, CRL_REG_LEN_08BIT, 0x01}, + {0x763a, CRL_REG_LEN_08BIT, 0x05}, + {0x763b, CRL_REG_LEN_08BIT, 0x30}, + {0x763c, CRL_REG_LEN_08BIT, 0x0f}, + {0x763d, CRL_REG_LEN_08BIT, 0x00}, + {0x763e, CRL_REG_LEN_08BIT, 0x06}, + {0x763f, CRL_REG_LEN_08BIT, 0xa6}, + {0x7640, CRL_REG_LEN_08BIT, 0x00}, + {0x7641, CRL_REG_LEN_08BIT, 0x02}, + {0x7642, CRL_REG_LEN_08BIT, 0x06}, + {0x7643, CRL_REG_LEN_08BIT, 0x26}, + {0x7644, CRL_REG_LEN_08BIT, 0x00}, + {0x7645, CRL_REG_LEN_08BIT, 0x02}, + {0x7646, CRL_REG_LEN_08BIT, 0x05}, + {0x7647, CRL_REG_LEN_08BIT, 0x33}, + {0x7648, CRL_REG_LEN_08BIT, 0x06}, + {0x7649, CRL_REG_LEN_08BIT, 0x20}, + {0x764a, CRL_REG_LEN_08BIT, 0x0f}, + {0x764b, CRL_REG_LEN_08BIT, 0x00}, + {0x764c, CRL_REG_LEN_08BIT, 0x06}, + {0x764d, CRL_REG_LEN_08BIT, 0x56}, + {0x764e, CRL_REG_LEN_08BIT, 0x00}, + {0x764f, CRL_REG_LEN_08BIT, 0x02}, + {0x7650, CRL_REG_LEN_08BIT, 0x06}, + {0x7651, CRL_REG_LEN_08BIT, 0x16}, + {0x7652, CRL_REG_LEN_08BIT, 0x05}, + {0x7653, CRL_REG_LEN_08BIT, 0x33}, + {0x7654, CRL_REG_LEN_08BIT, 0x06}, + {0x7655, CRL_REG_LEN_08BIT, 0x10}, + {0x7656, CRL_REG_LEN_08BIT, 0x0f}, + {0x7657, CRL_REG_LEN_08BIT, 0x00}, + {0x7658, CRL_REG_LEN_08BIT, 0x06}, + {0x7659, CRL_REG_LEN_08BIT, 0x10}, + {0x765a, CRL_REG_LEN_08BIT, 0x0f}, + {0x765b, CRL_REG_LEN_08BIT, 0x00}, + {0x765c, CRL_REG_LEN_08BIT, 0x06}, + {0x765d, CRL_REG_LEN_08BIT, 0x20}, + {0x765e, CRL_REG_LEN_08BIT, 0x0f}, + {0x765f, CRL_REG_LEN_08BIT, 0x00}, + {0x7660, CRL_REG_LEN_08BIT, 0x00}, + {0x7661, CRL_REG_LEN_08BIT, 0x00}, + {0x7662, CRL_REG_LEN_08BIT, 0x00}, + {0x7663, CRL_REG_LEN_08BIT, 0x02}, + {0x7664, CRL_REG_LEN_08BIT, 0x04}, + {0x7665, CRL_REG_LEN_08BIT, 0xe5}, + {0x7666, CRL_REG_LEN_08BIT, 0x04}, + {0x7667, CRL_REG_LEN_08BIT, 0xe4}, + {0x7668, CRL_REG_LEN_08BIT, 0x0f}, + {0x7669, CRL_REG_LEN_08BIT, 0x00}, + {0x766a, CRL_REG_LEN_08BIT, 0x00}, + {0x766b, CRL_REG_LEN_08BIT, 0x00}, + {0x766c, CRL_REG_LEN_08BIT, 0x00}, + {0x766d, CRL_REG_LEN_08BIT, 0x01}, + {0x766e, CRL_REG_LEN_08BIT, 0x04}, + {0x766f, CRL_REG_LEN_08BIT, 0xe5}, + {0x7670, CRL_REG_LEN_08BIT, 0x04}, + {0x7671, CRL_REG_LEN_08BIT, 0xe4}, + {0x7672, CRL_REG_LEN_08BIT, 0x0f}, + {0x7673, CRL_REG_LEN_08BIT, 0x00}, + {0x7674, CRL_REG_LEN_08BIT, 0x00}, + {0x7675, CRL_REG_LEN_08BIT, 0x02}, + {0x7676, CRL_REG_LEN_08BIT, 0x04}, + {0x7677, CRL_REG_LEN_08BIT, 0xe4}, + {0x7678, CRL_REG_LEN_08BIT, 0x00}, + {0x7679, CRL_REG_LEN_08BIT, 0x02}, + {0x767a, CRL_REG_LEN_08BIT, 0x04}, + {0x767b, CRL_REG_LEN_08BIT, 0xc4}, + {0x767c, CRL_REG_LEN_08BIT, 0x00}, + {0x767d, CRL_REG_LEN_08BIT, 0x02}, + {0x767e, CRL_REG_LEN_08BIT, 0x04}, + {0x767f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7680, CRL_REG_LEN_08BIT, 0x05}, + {0x7681, CRL_REG_LEN_08BIT, 0x83}, + {0x7682, CRL_REG_LEN_08BIT, 0x0f}, + {0x7683, CRL_REG_LEN_08BIT, 0x00}, + {0x7684, CRL_REG_LEN_08BIT, 0x00}, + {0x7685, CRL_REG_LEN_08BIT, 0x02}, + {0x7686, CRL_REG_LEN_08BIT, 0x04}, + {0x7687, CRL_REG_LEN_08BIT, 0xe4}, + {0x7688, CRL_REG_LEN_08BIT, 0x00}, + {0x7689, CRL_REG_LEN_08BIT, 0x02}, + {0x768a, CRL_REG_LEN_08BIT, 0x04}, + {0x768b, CRL_REG_LEN_08BIT, 0xc4}, + {0x768c, CRL_REG_LEN_08BIT, 0x00}, + {0x768d, CRL_REG_LEN_08BIT, 0x02}, + {0x768e, CRL_REG_LEN_08BIT, 0x04}, + {0x768f, CRL_REG_LEN_08BIT, 0xc4}, + {0x7690, CRL_REG_LEN_08BIT, 0x05}, + {0x7691, CRL_REG_LEN_08BIT, 0x83}, + {0x7692, CRL_REG_LEN_08BIT, 0x03}, + {0x7693, CRL_REG_LEN_08BIT, 0x0b}, + {0x7694, CRL_REG_LEN_08BIT, 0x05}, + {0x7695, CRL_REG_LEN_08BIT, 0x83}, + {0x7696, CRL_REG_LEN_08BIT, 0x00}, + {0x7697, CRL_REG_LEN_08BIT, 0x07}, + {0x7698, CRL_REG_LEN_08BIT, 0x05}, + {0x7699, CRL_REG_LEN_08BIT, 0x03}, + {0x769a, CRL_REG_LEN_08BIT, 0x00}, + {0x769b, CRL_REG_LEN_08BIT, 0x05}, + {0x769c, CRL_REG_LEN_08BIT, 0x05}, + {0x769d, CRL_REG_LEN_08BIT, 0x32}, + {0x769e, CRL_REG_LEN_08BIT, 0x05}, + {0x769f, CRL_REG_LEN_08BIT, 0x30}, + {0x76a0, CRL_REG_LEN_08BIT, 0x00}, + {0x76a1, CRL_REG_LEN_08BIT, 0x02}, + {0x76a2, CRL_REG_LEN_08BIT, 0x05}, + {0x76a3, CRL_REG_LEN_08BIT, 0x78}, + {0x76a4, CRL_REG_LEN_08BIT, 0x00}, + {0x76a5, CRL_REG_LEN_08BIT, 0x01}, + {0x76a6, CRL_REG_LEN_08BIT, 0x05}, + {0x76a7, CRL_REG_LEN_08BIT, 0x7c}, + {0x76a8, CRL_REG_LEN_08BIT, 0x03}, + {0x76a9, CRL_REG_LEN_08BIT, 0x9a}, + {0x76aa, CRL_REG_LEN_08BIT, 0x05}, + {0x76ab, CRL_REG_LEN_08BIT, 0x83}, + {0x76ac, CRL_REG_LEN_08BIT, 0x00}, + {0x76ad, CRL_REG_LEN_08BIT, 0x04}, + {0x76ae, CRL_REG_LEN_08BIT, 0x05}, + {0x76af, CRL_REG_LEN_08BIT, 0x03}, + {0x76b0, CRL_REG_LEN_08BIT, 0x00}, + {0x76b1, CRL_REG_LEN_08BIT, 0x03}, + {0x76b2, CRL_REG_LEN_08BIT, 0x05}, + {0x76b3, CRL_REG_LEN_08BIT, 0x32}, + {0x76b4, CRL_REG_LEN_08BIT, 0x05}, + {0x76b5, CRL_REG_LEN_08BIT, 0x30}, + {0x76b6, CRL_REG_LEN_08BIT, 0x00}, + {0x76b7, CRL_REG_LEN_08BIT, 0x02}, + {0x76b8, CRL_REG_LEN_08BIT, 0x05}, + {0x76b9, CRL_REG_LEN_08BIT, 0x78}, + {0x76ba, CRL_REG_LEN_08BIT, 0x00}, + {0x76bb, CRL_REG_LEN_08BIT, 0x01}, + {0x76bc, CRL_REG_LEN_08BIT, 0x05}, + {0x76bd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76be, CRL_REG_LEN_08BIT, 0x03}, + {0x76bf, CRL_REG_LEN_08BIT, 0x99}, + {0x76c0, CRL_REG_LEN_08BIT, 0x05}, + {0x76c1, CRL_REG_LEN_08BIT, 0x83}, + {0x76c2, CRL_REG_LEN_08BIT, 0x00}, + {0x76c3, CRL_REG_LEN_08BIT, 0x03}, + {0x76c4, CRL_REG_LEN_08BIT, 0x05}, + {0x76c5, CRL_REG_LEN_08BIT, 0x03}, + {0x76c6, CRL_REG_LEN_08BIT, 0x00}, + {0x76c7, CRL_REG_LEN_08BIT, 0x01}, + {0x76c8, CRL_REG_LEN_08BIT, 0x05}, + {0x76c9, CRL_REG_LEN_08BIT, 0x32}, + {0x76ca, CRL_REG_LEN_08BIT, 0x05}, + {0x76cb, CRL_REG_LEN_08BIT, 0x30}, + {0x76cc, CRL_REG_LEN_08BIT, 0x00}, + {0x76cd, CRL_REG_LEN_08BIT, 0x02}, + {0x76ce, CRL_REG_LEN_08BIT, 0x05}, + {0x76cf, CRL_REG_LEN_08BIT, 0x78}, + {0x76d0, CRL_REG_LEN_08BIT, 0x00}, + {0x76d1, CRL_REG_LEN_08BIT, 0x01}, + {0x76d2, CRL_REG_LEN_08BIT, 0x05}, + {0x76d3, CRL_REG_LEN_08BIT, 0x7c}, + {0x76d4, CRL_REG_LEN_08BIT, 0x03}, + {0x76d5, CRL_REG_LEN_08BIT, 0x98}, + {0x76d6, CRL_REG_LEN_08BIT, 0x05}, + {0x76d7, CRL_REG_LEN_08BIT, 0x83}, + {0x76d8, CRL_REG_LEN_08BIT, 0x00}, + {0x76d9, CRL_REG_LEN_08BIT, 0x00}, + {0x76da, CRL_REG_LEN_08BIT, 0x05}, + {0x76db, CRL_REG_LEN_08BIT, 0x03}, + {0x76dc, CRL_REG_LEN_08BIT, 0x00}, + {0x76dd, CRL_REG_LEN_08BIT, 0x01}, + {0x76de, CRL_REG_LEN_08BIT, 0x05}, + {0x76df, CRL_REG_LEN_08BIT, 0x32}, + {0x76e0, CRL_REG_LEN_08BIT, 0x05}, + {0x76e1, CRL_REG_LEN_08BIT, 0x30}, + {0x76e2, CRL_REG_LEN_08BIT, 0x00}, + {0x76e3, CRL_REG_LEN_08BIT, 0x02}, + {0x76e4, CRL_REG_LEN_08BIT, 0x05}, + {0x76e5, CRL_REG_LEN_08BIT, 0x78}, + {0x76e6, CRL_REG_LEN_08BIT, 0x00}, + {0x76e7, CRL_REG_LEN_08BIT, 0x01}, + {0x76e8, CRL_REG_LEN_08BIT, 0x05}, + {0x76e9, CRL_REG_LEN_08BIT, 0x7c}, + {0x76ea, CRL_REG_LEN_08BIT, 0x03}, + {0x76eb, CRL_REG_LEN_08BIT, 0x97}, + {0x76ec, CRL_REG_LEN_08BIT, 0x05}, + {0x76ed, CRL_REG_LEN_08BIT, 0x83}, + {0x76ee, CRL_REG_LEN_08BIT, 0x00}, + {0x76ef, CRL_REG_LEN_08BIT, 0x00}, + {0x76f0, CRL_REG_LEN_08BIT, 0x05}, + {0x76f1, CRL_REG_LEN_08BIT, 0x03}, + {0x76f2, CRL_REG_LEN_08BIT, 0x05}, + {0x76f3, CRL_REG_LEN_08BIT, 0x32}, + {0x76f4, CRL_REG_LEN_08BIT, 0x05}, + {0x76f5, CRL_REG_LEN_08BIT, 0x30}, + {0x76f6, CRL_REG_LEN_08BIT, 0x00}, + {0x76f7, CRL_REG_LEN_08BIT, 0x02}, + {0x76f8, CRL_REG_LEN_08BIT, 0x05}, + {0x76f9, CRL_REG_LEN_08BIT, 0x78}, + {0x76fa, CRL_REG_LEN_08BIT, 0x00}, + {0x76fb, CRL_REG_LEN_08BIT, 0x01}, + {0x76fc, CRL_REG_LEN_08BIT, 0x05}, + {0x76fd, CRL_REG_LEN_08BIT, 0x7c}, + {0x76fe, CRL_REG_LEN_08BIT, 0x03}, + {0x76ff, CRL_REG_LEN_08BIT, 0x96}, + {0x7700, CRL_REG_LEN_08BIT, 0x05}, + {0x7701, CRL_REG_LEN_08BIT, 0x83}, + {0x7702, CRL_REG_LEN_08BIT, 0x05}, + {0x7703, CRL_REG_LEN_08BIT, 0x03}, + {0x7704, CRL_REG_LEN_08BIT, 0x05}, + {0x7705, CRL_REG_LEN_08BIT, 0x32}, + {0x7706, CRL_REG_LEN_08BIT, 0x05}, + {0x7707, CRL_REG_LEN_08BIT, 0x30}, + {0x7708, CRL_REG_LEN_08BIT, 0x00}, + {0x7709, CRL_REG_LEN_08BIT, 0x02}, + {0x770a, CRL_REG_LEN_08BIT, 0x05}, + {0x770b, CRL_REG_LEN_08BIT, 0x78}, + {0x770c, CRL_REG_LEN_08BIT, 0x00}, + {0x770d, CRL_REG_LEN_08BIT, 0x01}, + {0x770e, CRL_REG_LEN_08BIT, 0x05}, + {0x770f, CRL_REG_LEN_08BIT, 0x7c}, + {0x7710, CRL_REG_LEN_08BIT, 0x03}, + {0x7711, CRL_REG_LEN_08BIT, 0x95}, + {0x7712, CRL_REG_LEN_08BIT, 0x05}, + {0x7713, CRL_REG_LEN_08BIT, 0x83}, + {0x7714, CRL_REG_LEN_08BIT, 0x05}, + {0x7715, CRL_REG_LEN_08BIT, 0x03}, + {0x7716, CRL_REG_LEN_08BIT, 0x05}, + {0x7717, CRL_REG_LEN_08BIT, 0x32}, + {0x7718, CRL_REG_LEN_08BIT, 0x05}, + {0x7719, CRL_REG_LEN_08BIT, 0x30}, + {0x771a, CRL_REG_LEN_08BIT, 0x00}, + {0x771b, CRL_REG_LEN_08BIT, 0x02}, + {0x771c, CRL_REG_LEN_08BIT, 0x05}, + {0x771d, CRL_REG_LEN_08BIT, 0x78}, + {0x771e, CRL_REG_LEN_08BIT, 0x00}, + {0x771f, CRL_REG_LEN_08BIT, 0x01}, + {0x7720, CRL_REG_LEN_08BIT, 0x05}, + {0x7721, CRL_REG_LEN_08BIT, 0x7c}, + {0x7722, CRL_REG_LEN_08BIT, 0x03}, + {0x7723, CRL_REG_LEN_08BIT, 0x94}, + {0x7724, CRL_REG_LEN_08BIT, 0x05}, + {0x7725, CRL_REG_LEN_08BIT, 0x83}, + {0x7726, CRL_REG_LEN_08BIT, 0x00}, + {0x7727, CRL_REG_LEN_08BIT, 0x01}, + {0x7728, CRL_REG_LEN_08BIT, 0x05}, + {0x7729, CRL_REG_LEN_08BIT, 0x03}, + {0x772a, CRL_REG_LEN_08BIT, 0x00}, + {0x772b, CRL_REG_LEN_08BIT, 0x01}, + {0x772c, CRL_REG_LEN_08BIT, 0x05}, + {0x772d, CRL_REG_LEN_08BIT, 0x32}, + {0x772e, CRL_REG_LEN_08BIT, 0x05}, + {0x772f, CRL_REG_LEN_08BIT, 0x30}, + {0x7730, CRL_REG_LEN_08BIT, 0x00}, + {0x7731, CRL_REG_LEN_08BIT, 0x02}, + {0x7732, CRL_REG_LEN_08BIT, 0x05}, + {0x7733, CRL_REG_LEN_08BIT, 0x78}, + {0x7734, CRL_REG_LEN_08BIT, 0x00}, + {0x7735, CRL_REG_LEN_08BIT, 0x01}, + {0x7736, CRL_REG_LEN_08BIT, 0x05}, + {0x7737, CRL_REG_LEN_08BIT, 0x7c}, + {0x7738, CRL_REG_LEN_08BIT, 0x03}, + {0x7739, CRL_REG_LEN_08BIT, 0x93}, + {0x773a, CRL_REG_LEN_08BIT, 0x05}, + {0x773b, CRL_REG_LEN_08BIT, 0x83}, + {0x773c, CRL_REG_LEN_08BIT, 0x00}, + {0x773d, CRL_REG_LEN_08BIT, 0x00}, + {0x773e, CRL_REG_LEN_08BIT, 0x05}, + {0x773f, CRL_REG_LEN_08BIT, 0x03}, + {0x7740, CRL_REG_LEN_08BIT, 0x00}, + {0x7741, CRL_REG_LEN_08BIT, 0x00}, + {0x7742, CRL_REG_LEN_08BIT, 0x05}, + {0x7743, CRL_REG_LEN_08BIT, 0x32}, + {0x7744, CRL_REG_LEN_08BIT, 0x05}, + {0x7745, CRL_REG_LEN_08BIT, 0x30}, + {0x7746, CRL_REG_LEN_08BIT, 0x00}, + {0x7747, CRL_REG_LEN_08BIT, 0x02}, + {0x7748, CRL_REG_LEN_08BIT, 0x05}, + {0x7749, CRL_REG_LEN_08BIT, 0x78}, + {0x774a, CRL_REG_LEN_08BIT, 0x00}, + {0x774b, CRL_REG_LEN_08BIT, 0x01}, + {0x774c, CRL_REG_LEN_08BIT, 0x05}, + {0x774d, CRL_REG_LEN_08BIT, 0x7c}, + {0x774e, CRL_REG_LEN_08BIT, 0x03}, + {0x774f, CRL_REG_LEN_08BIT, 0x92}, + {0x7750, CRL_REG_LEN_08BIT, 0x05}, + {0x7751, CRL_REG_LEN_08BIT, 0x83}, + {0x7752, CRL_REG_LEN_08BIT, 0x05}, + {0x7753, CRL_REG_LEN_08BIT, 0x03}, + {0x7754, CRL_REG_LEN_08BIT, 0x00}, + {0x7755, CRL_REG_LEN_08BIT, 0x00}, + {0x7756, CRL_REG_LEN_08BIT, 0x05}, + {0x7757, CRL_REG_LEN_08BIT, 0x32}, + {0x7758, CRL_REG_LEN_08BIT, 0x05}, + {0x7759, CRL_REG_LEN_08BIT, 0x30}, + {0x775a, CRL_REG_LEN_08BIT, 0x00}, + {0x775b, CRL_REG_LEN_08BIT, 0x02}, + {0x775c, CRL_REG_LEN_08BIT, 0x05}, + {0x775d, CRL_REG_LEN_08BIT, 0x78}, + {0x775e, CRL_REG_LEN_08BIT, 0x00}, + {0x775f, CRL_REG_LEN_08BIT, 0x01}, + {0x7760, CRL_REG_LEN_08BIT, 0x05}, + {0x7761, CRL_REG_LEN_08BIT, 0x7c}, + {0x7762, CRL_REG_LEN_08BIT, 0x03}, + {0x7763, CRL_REG_LEN_08BIT, 0x91}, + {0x7764, CRL_REG_LEN_08BIT, 0x05}, + {0x7765, CRL_REG_LEN_08BIT, 0x83}, + {0x7766, CRL_REG_LEN_08BIT, 0x05}, + {0x7767, CRL_REG_LEN_08BIT, 0x03}, + {0x7768, CRL_REG_LEN_08BIT, 0x05}, + {0x7769, CRL_REG_LEN_08BIT, 0x32}, + {0x776a, CRL_REG_LEN_08BIT, 0x05}, + {0x776b, CRL_REG_LEN_08BIT, 0x30}, + {0x776c, CRL_REG_LEN_08BIT, 0x00}, + {0x776d, CRL_REG_LEN_08BIT, 0x02}, + {0x776e, CRL_REG_LEN_08BIT, 0x05}, + {0x776f, CRL_REG_LEN_08BIT, 0x78}, + {0x7770, CRL_REG_LEN_08BIT, 0x00}, + {0x7771, CRL_REG_LEN_08BIT, 0x01}, + {0x7772, CRL_REG_LEN_08BIT, 0x05}, + {0x7773, CRL_REG_LEN_08BIT, 0x7c}, + {0x7774, CRL_REG_LEN_08BIT, 0x03}, + {0x7775, CRL_REG_LEN_08BIT, 0x90}, + {0x7776, CRL_REG_LEN_08BIT, 0x05}, + {0x7777, CRL_REG_LEN_08BIT, 0x83}, + {0x7778, CRL_REG_LEN_08BIT, 0x05}, + {0x7779, CRL_REG_LEN_08BIT, 0x03}, + {0x777a, CRL_REG_LEN_08BIT, 0x05}, + {0x777b, CRL_REG_LEN_08BIT, 0x32}, + {0x777c, CRL_REG_LEN_08BIT, 0x05}, + {0x777d, CRL_REG_LEN_08BIT, 0x30}, + {0x777e, CRL_REG_LEN_08BIT, 0x00}, + {0x777f, CRL_REG_LEN_08BIT, 0x02}, + {0x7780, CRL_REG_LEN_08BIT, 0x05}, + {0x7781, CRL_REG_LEN_08BIT, 0x78}, + {0x7782, CRL_REG_LEN_08BIT, 0x00}, + {0x7783, CRL_REG_LEN_08BIT, 0x01}, + {0x7784, CRL_REG_LEN_08BIT, 0x05}, + {0x7785, CRL_REG_LEN_08BIT, 0x7c}, + {0x7786, CRL_REG_LEN_08BIT, 0x02}, + {0x7787, CRL_REG_LEN_08BIT, 0x90}, + {0x7788, CRL_REG_LEN_08BIT, 0x05}, + {0x7789, CRL_REG_LEN_08BIT, 0x03}, + {0x778a, CRL_REG_LEN_08BIT, 0x07}, + {0x778b, CRL_REG_LEN_08BIT, 0x00}, + {0x778c, CRL_REG_LEN_08BIT, 0x0f}, + {0x778d, CRL_REG_LEN_08BIT, 0x00}, + {0x778e, CRL_REG_LEN_08BIT, 0x08}, + {0x778f, CRL_REG_LEN_08BIT, 0x30}, + {0x7790, CRL_REG_LEN_08BIT, 0x08}, + {0x7791, CRL_REG_LEN_08BIT, 0xee}, + {0x7792, CRL_REG_LEN_08BIT, 0x0f}, + {0x7793, CRL_REG_LEN_08BIT, 0x00}, + {0x7794, CRL_REG_LEN_08BIT, 0x05}, + {0x7795, CRL_REG_LEN_08BIT, 0x33}, + {0x7796, CRL_REG_LEN_08BIT, 0x04}, + {0x7797, CRL_REG_LEN_08BIT, 0xe5}, + {0x7798, CRL_REG_LEN_08BIT, 0x06}, + {0x7799, CRL_REG_LEN_08BIT, 0x52}, + {0x779a, CRL_REG_LEN_08BIT, 0x04}, + {0x779b, CRL_REG_LEN_08BIT, 0xe4}, + {0x779c, CRL_REG_LEN_08BIT, 0x00}, + {0x779d, CRL_REG_LEN_08BIT, 0x00}, + {0x779e, CRL_REG_LEN_08BIT, 0x06}, + {0x779f, CRL_REG_LEN_08BIT, 0x5e}, + {0x77a0, CRL_REG_LEN_08BIT, 0x00}, + {0x77a1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77a2, CRL_REG_LEN_08BIT, 0x06}, + {0x77a3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77a4, CRL_REG_LEN_08BIT, 0x00}, + {0x77a5, CRL_REG_LEN_08BIT, 0x02}, + {0x77a6, CRL_REG_LEN_08BIT, 0x06}, + {0x77a7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77a8, CRL_REG_LEN_08BIT, 0x00}, + {0x77a9, CRL_REG_LEN_08BIT, 0x01}, + {0x77aa, CRL_REG_LEN_08BIT, 0x06}, + {0x77ab, CRL_REG_LEN_08BIT, 0xae}, + {0x77ac, CRL_REG_LEN_08BIT, 0x00}, + {0x77ad, CRL_REG_LEN_08BIT, 0x03}, + {0x77ae, CRL_REG_LEN_08BIT, 0x05}, + {0x77af, CRL_REG_LEN_08BIT, 0x30}, + {0x77b0, CRL_REG_LEN_08BIT, 0x09}, + {0x77b1, CRL_REG_LEN_08BIT, 0x19}, + {0x77b2, CRL_REG_LEN_08BIT, 0x0f}, + {0x77b3, CRL_REG_LEN_08BIT, 0x00}, + {0x77b4, CRL_REG_LEN_08BIT, 0x05}, + {0x77b5, CRL_REG_LEN_08BIT, 0x33}, + {0x77b6, CRL_REG_LEN_08BIT, 0x04}, + {0x77b7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77b8, CRL_REG_LEN_08BIT, 0x06}, + {0x77b9, CRL_REG_LEN_08BIT, 0x52}, + {0x77ba, CRL_REG_LEN_08BIT, 0x04}, + {0x77bb, CRL_REG_LEN_08BIT, 0xe4}, + {0x77bc, CRL_REG_LEN_08BIT, 0x00}, + {0x77bd, CRL_REG_LEN_08BIT, 0x00}, + {0x77be, CRL_REG_LEN_08BIT, 0x06}, + {0x77bf, CRL_REG_LEN_08BIT, 0x5e}, + {0x77c0, CRL_REG_LEN_08BIT, 0x00}, + {0x77c1, CRL_REG_LEN_08BIT, 0x0f}, + {0x77c2, CRL_REG_LEN_08BIT, 0x06}, + {0x77c3, CRL_REG_LEN_08BIT, 0x1e}, + {0x77c4, CRL_REG_LEN_08BIT, 0x00}, + {0x77c5, CRL_REG_LEN_08BIT, 0x02}, + {0x77c6, CRL_REG_LEN_08BIT, 0x06}, + {0x77c7, CRL_REG_LEN_08BIT, 0xa2}, + {0x77c8, CRL_REG_LEN_08BIT, 0x00}, + {0x77c9, CRL_REG_LEN_08BIT, 0x01}, + {0x77ca, CRL_REG_LEN_08BIT, 0x06}, + {0x77cb, CRL_REG_LEN_08BIT, 0xae}, + {0x77cc, CRL_REG_LEN_08BIT, 0x00}, + {0x77cd, CRL_REG_LEN_08BIT, 0x03}, + {0x77ce, CRL_REG_LEN_08BIT, 0x05}, + {0x77cf, CRL_REG_LEN_08BIT, 0x30}, + {0x77d0, CRL_REG_LEN_08BIT, 0x0f}, + {0x77d1, CRL_REG_LEN_08BIT, 0x00}, + {0x77d2, CRL_REG_LEN_08BIT, 0x00}, + {0x77d3, CRL_REG_LEN_08BIT, 0x00}, + {0x77d4, CRL_REG_LEN_08BIT, 0x00}, + {0x77d5, CRL_REG_LEN_08BIT, 0x02}, + {0x77d6, CRL_REG_LEN_08BIT, 0x04}, + {0x77d7, CRL_REG_LEN_08BIT, 0xe5}, + {0x77d8, CRL_REG_LEN_08BIT, 0x04}, + {0x77d9, CRL_REG_LEN_08BIT, 0xe4}, + {0x77da, CRL_REG_LEN_08BIT, 0x05}, + {0x77db, CRL_REG_LEN_08BIT, 0x33}, + {0x77dc, CRL_REG_LEN_08BIT, 0x07}, + {0x77dd, CRL_REG_LEN_08BIT, 0x10}, + {0x77de, CRL_REG_LEN_08BIT, 0x00}, + {0x77df, CRL_REG_LEN_08BIT, 0x00}, + {0x77e0, CRL_REG_LEN_08BIT, 0x01}, + {0x77e1, CRL_REG_LEN_08BIT, 0xbb}, + {0x77e2, CRL_REG_LEN_08BIT, 0x00}, + {0x77e3, CRL_REG_LEN_08BIT, 0x00}, + {0x77e4, CRL_REG_LEN_08BIT, 0x01}, + {0x77e5, CRL_REG_LEN_08BIT, 0xaa}, + {0x77e6, CRL_REG_LEN_08BIT, 0x00}, + {0x77e7, CRL_REG_LEN_08BIT, 0x00}, + {0x77e8, CRL_REG_LEN_08BIT, 0x01}, + {0x77e9, CRL_REG_LEN_08BIT, 0x99}, + {0x77ea, CRL_REG_LEN_08BIT, 0x00}, + {0x77eb, CRL_REG_LEN_08BIT, 0x00}, + {0x77ec, CRL_REG_LEN_08BIT, 0x01}, + {0x77ed, CRL_REG_LEN_08BIT, 0x88}, + {0x77ee, CRL_REG_LEN_08BIT, 0x00}, + {0x77ef, CRL_REG_LEN_08BIT, 0x00}, + {0x77f0, CRL_REG_LEN_08BIT, 0x01}, + {0x77f1, CRL_REG_LEN_08BIT, 0x77}, + {0x77f2, CRL_REG_LEN_08BIT, 0x00}, + {0x77f3, CRL_REG_LEN_08BIT, 0x00}, + {0x77f4, CRL_REG_LEN_08BIT, 0x01}, + {0x77f5, CRL_REG_LEN_08BIT, 0x66}, + {0x77f6, CRL_REG_LEN_08BIT, 0x00}, + {0x77f7, CRL_REG_LEN_08BIT, 0x00}, + {0x77f8, CRL_REG_LEN_08BIT, 0x01}, + {0x77f9, CRL_REG_LEN_08BIT, 0x55}, + {0x77fa, CRL_REG_LEN_08BIT, 0x00}, + {0x77fb, CRL_REG_LEN_08BIT, 0x00}, + {0x77fc, CRL_REG_LEN_08BIT, 0x01}, + {0x77fd, CRL_REG_LEN_08BIT, 0x44}, + {0x77fe, CRL_REG_LEN_08BIT, 0x00}, + {0x77ff, CRL_REG_LEN_08BIT, 0x00}, + {0x7800, CRL_REG_LEN_08BIT, 0x01}, + {0x7801, CRL_REG_LEN_08BIT, 0x33}, + {0x7802, CRL_REG_LEN_08BIT, 0x00}, + {0x7803, CRL_REG_LEN_08BIT, 0x00}, + {0x7804, CRL_REG_LEN_08BIT, 0x01}, + {0x7805, CRL_REG_LEN_08BIT, 0x22}, + {0x7806, CRL_REG_LEN_08BIT, 0x00}, + {0x7807, CRL_REG_LEN_08BIT, 0x00}, + {0x7808, CRL_REG_LEN_08BIT, 0x01}, + {0x7809, CRL_REG_LEN_08BIT, 0x11}, + {0x780a, CRL_REG_LEN_08BIT, 0x00}, + {0x780b, CRL_REG_LEN_08BIT, 0x00}, + {0x780c, CRL_REG_LEN_08BIT, 0x01}, + {0x780d, CRL_REG_LEN_08BIT, 0x00}, + {0x780e, CRL_REG_LEN_08BIT, 0x01}, + {0x780f, CRL_REG_LEN_08BIT, 0xff}, + {0x7810, CRL_REG_LEN_08BIT, 0x07}, + {0x7811, CRL_REG_LEN_08BIT, 0x00}, + {0x7812, CRL_REG_LEN_08BIT, 0x02}, + {0x7813, CRL_REG_LEN_08BIT, 0xa0}, + {0x7814, CRL_REG_LEN_08BIT, 0x0f}, + {0x7815, CRL_REG_LEN_08BIT, 0x00}, + {0x7816, CRL_REG_LEN_08BIT, 0x08}, + {0x7817, CRL_REG_LEN_08BIT, 0x35}, + {0x7818, CRL_REG_LEN_08BIT, 0x06}, + {0x7819, CRL_REG_LEN_08BIT, 0x52}, + {0x781a, CRL_REG_LEN_08BIT, 0x04}, + {0x781b, CRL_REG_LEN_08BIT, 0xe4}, + {0x781c, CRL_REG_LEN_08BIT, 0x00}, + {0x781d, CRL_REG_LEN_08BIT, 0x00}, + {0x781e, CRL_REG_LEN_08BIT, 0x06}, + {0x781f, CRL_REG_LEN_08BIT, 0x5e}, + {0x7820, CRL_REG_LEN_08BIT, 0x05}, + {0x7821, CRL_REG_LEN_08BIT, 0x33}, + {0x7822, CRL_REG_LEN_08BIT, 0x09}, + {0x7823, CRL_REG_LEN_08BIT, 0x19}, + {0x7824, CRL_REG_LEN_08BIT, 0x06}, + {0x7825, CRL_REG_LEN_08BIT, 0x1e}, + {0x7826, CRL_REG_LEN_08BIT, 0x05}, + {0x7827, CRL_REG_LEN_08BIT, 0x33}, + {0x7828, CRL_REG_LEN_08BIT, 0x00}, + {0x7829, CRL_REG_LEN_08BIT, 0x01}, + {0x782a, CRL_REG_LEN_08BIT, 0x06}, + {0x782b, CRL_REG_LEN_08BIT, 0x24}, + {0x782c, CRL_REG_LEN_08BIT, 0x06}, + {0x782d, CRL_REG_LEN_08BIT, 0x20}, + {0x782e, CRL_REG_LEN_08BIT, 0x0f}, + {0x782f, CRL_REG_LEN_08BIT, 0x00}, + {0x7830, CRL_REG_LEN_08BIT, 0x08}, + {0x7831, CRL_REG_LEN_08BIT, 0x35}, + {0x7832, CRL_REG_LEN_08BIT, 0x07}, + {0x7833, CRL_REG_LEN_08BIT, 0x10}, + {0x7834, CRL_REG_LEN_08BIT, 0x00}, + {0x7835, CRL_REG_LEN_08BIT, 0x00}, + {0x7836, CRL_REG_LEN_08BIT, 0x01}, + {0x7837, CRL_REG_LEN_08BIT, 0xbb}, + {0x7838, CRL_REG_LEN_08BIT, 0x00}, + {0x7839, CRL_REG_LEN_08BIT, 0x00}, + {0x783a, CRL_REG_LEN_08BIT, 0x01}, + {0x783b, CRL_REG_LEN_08BIT, 0xaa}, + {0x783c, CRL_REG_LEN_08BIT, 0x00}, + {0x783d, CRL_REG_LEN_08BIT, 0x00}, + {0x783e, CRL_REG_LEN_08BIT, 0x01}, + {0x783f, CRL_REG_LEN_08BIT, 0x99}, + {0x7840, CRL_REG_LEN_08BIT, 0x00}, + {0x7841, CRL_REG_LEN_08BIT, 0x00}, + {0x7842, CRL_REG_LEN_08BIT, 0x01}, + {0x7843, CRL_REG_LEN_08BIT, 0x88}, + {0x7844, CRL_REG_LEN_08BIT, 0x00}, + {0x7845, CRL_REG_LEN_08BIT, 0x00}, + {0x7846, CRL_REG_LEN_08BIT, 0x01}, + {0x7847, CRL_REG_LEN_08BIT, 0x77}, + {0x7848, CRL_REG_LEN_08BIT, 0x00}, + {0x7849, CRL_REG_LEN_08BIT, 0x00}, + {0x784a, CRL_REG_LEN_08BIT, 0x01}, + {0x784b, CRL_REG_LEN_08BIT, 0x66}, + {0x784c, CRL_REG_LEN_08BIT, 0x00}, + {0x784d, CRL_REG_LEN_08BIT, 0x00}, + {0x784e, CRL_REG_LEN_08BIT, 0x01}, + {0x784f, CRL_REG_LEN_08BIT, 0x55}, + {0x7850, CRL_REG_LEN_08BIT, 0x00}, + {0x7851, CRL_REG_LEN_08BIT, 0x00}, + {0x7852, CRL_REG_LEN_08BIT, 0x01}, + {0x7853, CRL_REG_LEN_08BIT, 0x44}, + {0x7854, CRL_REG_LEN_08BIT, 0x00}, + {0x7855, CRL_REG_LEN_08BIT, 0x00}, + {0x7856, CRL_REG_LEN_08BIT, 0x01}, + {0x7857, CRL_REG_LEN_08BIT, 0x33}, + {0x7858, CRL_REG_LEN_08BIT, 0x00}, + {0x7859, CRL_REG_LEN_08BIT, 0x00}, + {0x785a, CRL_REG_LEN_08BIT, 0x01}, + {0x785b, CRL_REG_LEN_08BIT, 0x22}, + {0x785c, CRL_REG_LEN_08BIT, 0x00}, + {0x785d, CRL_REG_LEN_08BIT, 0x00}, + {0x785e, CRL_REG_LEN_08BIT, 0x01}, + {0x785f, CRL_REG_LEN_08BIT, 0x11}, + {0x7860, CRL_REG_LEN_08BIT, 0x00}, + {0x7861, CRL_REG_LEN_08BIT, 0x00}, + {0x7862, CRL_REG_LEN_08BIT, 0x01}, + {0x7863, CRL_REG_LEN_08BIT, 0x00}, + {0x7864, CRL_REG_LEN_08BIT, 0x07}, + {0x7865, CRL_REG_LEN_08BIT, 0x00}, + {0x7866, CRL_REG_LEN_08BIT, 0x01}, + {0x7867, CRL_REG_LEN_08BIT, 0xff}, + {0x7868, CRL_REG_LEN_08BIT, 0x02}, + {0x7869, CRL_REG_LEN_08BIT, 0xa0}, + {0x786a, CRL_REG_LEN_08BIT, 0x0f}, + {0x786b, CRL_REG_LEN_08BIT, 0x00}, + {0x786c, CRL_REG_LEN_08BIT, 0x08}, + {0x786d, CRL_REG_LEN_08BIT, 0x3a}, + {0x786e, CRL_REG_LEN_08BIT, 0x08}, + {0x786f, CRL_REG_LEN_08BIT, 0x6a}, + {0x7870, CRL_REG_LEN_08BIT, 0x0f}, + {0x7871, CRL_REG_LEN_08BIT, 0x00}, + {0x7872, CRL_REG_LEN_08BIT, 0x04}, + {0x7873, CRL_REG_LEN_08BIT, 0xc0}, + {0x7874, CRL_REG_LEN_08BIT, 0x09}, + {0x7875, CRL_REG_LEN_08BIT, 0x19}, + {0x7876, CRL_REG_LEN_08BIT, 0x04}, + {0x7877, CRL_REG_LEN_08BIT, 0x99}, + {0x7878, CRL_REG_LEN_08BIT, 0x07}, + {0x7879, CRL_REG_LEN_08BIT, 0x14}, + {0x787a, CRL_REG_LEN_08BIT, 0x00}, + {0x787b, CRL_REG_LEN_08BIT, 0x01}, + {0x787c, CRL_REG_LEN_08BIT, 0x04}, + {0x787d, CRL_REG_LEN_08BIT, 0xa4}, + {0x787e, CRL_REG_LEN_08BIT, 0x00}, + {0x787f, CRL_REG_LEN_08BIT, 0x07}, + {0x7880, CRL_REG_LEN_08BIT, 0x04}, + {0x7881, CRL_REG_LEN_08BIT, 0xa6}, + {0x7882, CRL_REG_LEN_08BIT, 0x00}, + {0x7883, CRL_REG_LEN_08BIT, 0x00}, + {0x7884, CRL_REG_LEN_08BIT, 0x04}, + {0x7885, CRL_REG_LEN_08BIT, 0xa0}, + {0x7886, CRL_REG_LEN_08BIT, 0x04}, + {0x7887, CRL_REG_LEN_08BIT, 0x80}, + {0x7888, CRL_REG_LEN_08BIT, 0x04}, + {0x7889, CRL_REG_LEN_08BIT, 0x00}, + {0x788a, CRL_REG_LEN_08BIT, 0x05}, + {0x788b, CRL_REG_LEN_08BIT, 0x03}, + {0x788c, CRL_REG_LEN_08BIT, 0x06}, + {0x788d, CRL_REG_LEN_08BIT, 0x00}, + {0x788e, CRL_REG_LEN_08BIT, 0x0f}, + {0x788f, CRL_REG_LEN_08BIT, 0x00}, + {0x7890, CRL_REG_LEN_08BIT, 0x0f}, + {0x7891, CRL_REG_LEN_08BIT, 0x00}, + {0x7892, CRL_REG_LEN_08BIT, 0x0f}, + {0x7893, CRL_REG_LEN_08BIT, 0x00}, + {0x30a0, CRL_REG_LEN_08BIT, 0x00}, + {0x30a1, CRL_REG_LEN_08BIT, 0x00}, + {0x30a2, CRL_REG_LEN_08BIT, 0x00}, + {0x30a3, CRL_REG_LEN_08BIT, 0x00}, + {0x30a4, CRL_REG_LEN_08BIT, 0x07}, + {0x30a5, CRL_REG_LEN_08BIT, 0x8f}, + {0x30a6, CRL_REG_LEN_08BIT, 0x04}, + {0x30a7, CRL_REG_LEN_08BIT, 0x47}, + {0x30a8, CRL_REG_LEN_08BIT, 0x00}, + {0x30a9, CRL_REG_LEN_08BIT, 0x05}, + {0x30aa, CRL_REG_LEN_08BIT, 0x00}, + {0x30ab, CRL_REG_LEN_08BIT, 0x04}, + {0x30ac, CRL_REG_LEN_08BIT, 0x07}, + {0x30ad, CRL_REG_LEN_08BIT, 0x88}, + {0x30ae, CRL_REG_LEN_08BIT, 0x04}, + {0x30af, CRL_REG_LEN_08BIT, 0x40}, + {0x30b0, CRL_REG_LEN_08BIT, 0x0d}, + {0x30b1, CRL_REG_LEN_08BIT, 0xde}, + {0x30b2, CRL_REG_LEN_08BIT, 0x04}, + {0x30b3, CRL_REG_LEN_08BIT, 0x66}, + {0x30b6, CRL_REG_LEN_08BIT, 0x04}, + {0x30b7, CRL_REG_LEN_08BIT, 0x62}, + {0x3196, CRL_REG_LEN_08BIT, 0x00}, + {0x3197, CRL_REG_LEN_08BIT, 0x0a}, + {0x3195, CRL_REG_LEN_08BIT, 0x29}, + {0x315a, CRL_REG_LEN_08BIT, 0x02}, + {0x315b, CRL_REG_LEN_08BIT, 0x00}, + {0x30bb, CRL_REG_LEN_08BIT, 0x40}, + {0x3250, CRL_REG_LEN_08BIT, 0xf7}, +}; + +/* ov2775_1928x1088_linearHCG_30fps_mipi960_regset */ +static struct crl_register_write_rep ov2775_linearHCG_30fps_mipi960_regset[] = { + {0x3190, CRL_REG_LEN_08BIT, 0x08}, /* interface control, output format setting */ + {0x30bb, CRL_REG_LEN_08BIT, 0x40}, /* Conversion gain, analog gain control */ + + {0x315a, CRL_REG_LEN_08BIT, 0x02}, /* Digital gain H in linear mode */ + {0x315b, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain L in linear mode */ + {0x315c, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for LCG MSB */ + {0x315d, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain for LCG LSB */ + {0x315e, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for VS MSB */ + {0x315f, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain for VS LSB */ + + {0x30b7, CRL_REG_LEN_08BIT, 0x62}, /* DCG exposure time */ + {0x30b8, CRL_REG_LEN_08BIT, 0x00}, /* integer part of VS exposure time */ + {0x30b9, CRL_REG_LEN_08BIT, 0x02}, /* integer part of VS exposure time */ + + {0x3123, CRL_REG_LEN_08BIT, 0xf0}, /* unknown */ + {0x3199, CRL_REG_LEN_08BIT, 0x7f}, /* unknown */ + {0x319a, CRL_REG_LEN_08BIT, 0x80}, /* unknown */ + {0x319b, CRL_REG_LEN_08BIT, 0xff}, /* unknown */ + {0x3254, CRL_REG_LEN_08BIT, 0x00}, /* pre control */ + {0x33e2, CRL_REG_LEN_08BIT, 0x02}, /* DPC white threshold list */ + {0x33e3, CRL_REG_LEN_08BIT, 0x01}, /* DPC white threshold list */ + {0x33ed, CRL_REG_LEN_08BIT, 0x01}, /* DPC gain list VS */ + {0x33ee, CRL_REG_LEN_08BIT, 0x02}, /* DPC gain list VS */ + {0x33ef, CRL_REG_LEN_08BIT, 0x08}, /* DPC gain list VS */ + {0x346a, CRL_REG_LEN_08BIT, 0x33}, /* last embedded data range H */ + {0x346b, CRL_REG_LEN_08BIT, 0xbf}, /* last embedded data range L */ + {0x3195, CRL_REG_LEN_08BIT, 0x29}, /* vfifo read level */ +}; + +/* ov2775_1928x1088_linearLCG_30fps_mipi960_regset */ +static struct crl_register_write_rep ov2775_linearLCG_30fps_mipi960_regset[] = { + {0x3190, CRL_REG_LEN_08BIT, 0x08}, /* interface control, output format setting */ + {0x30bb, CRL_REG_LEN_08BIT, 0x01}, /* Conversion gain, analog gain control */ + + {0x315a, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain H in linear mode */ + {0x315b, CRL_REG_LEN_08BIT, 0x80}, /* Digital gain L in linear mode */ + {0x315c, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for LCG MSB */ + {0x315d, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain for LCG LSB */ + {0x315e, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for VS MSB */ + {0x315f, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain for VS LSB */ + + {0x30b7, CRL_REG_LEN_08BIT, 0x62}, /* DCG exposure time */ + {0x30b8, CRL_REG_LEN_08BIT, 0x00}, /* integer part of VS exposure time */ + {0x30b9, CRL_REG_LEN_08BIT, 0x02}, /* integer part of VS exposure time */ + + {0x3123, CRL_REG_LEN_08BIT, 0xf0}, /* unknown */ + {0x3199, CRL_REG_LEN_08BIT, 0x7f}, /* unknown */ + {0x319a, CRL_REG_LEN_08BIT, 0x80}, /* unknown */ + {0x319b, CRL_REG_LEN_08BIT, 0xff}, /* unknown */ + {0x3254, CRL_REG_LEN_08BIT, 0x00}, /* pre control */ + {0x33e2, CRL_REG_LEN_08BIT, 0x02}, /* DPC white threshold list */ + {0x33e3, CRL_REG_LEN_08BIT, 0x01}, /* DPC white threshold list */ + {0x33ed, CRL_REG_LEN_08BIT, 0x01}, /* DPC gain list VS */ + {0x33ee, CRL_REG_LEN_08BIT, 0x02}, /* DPC gain list VS */ + {0x33ef, CRL_REG_LEN_08BIT, 0x08}, /* DPC gain list VS */ + {0x346a, CRL_REG_LEN_08BIT, 0x33}, /* last embedded data range H */ + {0x346b, CRL_REG_LEN_08BIT, 0xbf}, /* last embedded data range L */ + {0x3195, CRL_REG_LEN_08BIT, 0x29}, /* vfifo read level */ +}; + +/* ov2775_1928x1088_2x12_30fps_mipi960_regset */ +static struct crl_register_write_rep ov2775_2x12_30fps_mipi960_regset[] = { + {0x3190, CRL_REG_LEN_08BIT, 0x01}, /* interface control, output format setting */ + {0x30bb, CRL_REG_LEN_08BIT, 0x14}, /* Conversion gain, analog gain control */ + + {0x315a, CRL_REG_LEN_08BIT, 0x02}, /* Digital gain H in linear mode */ + {0x315b, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain L in linear mode */ + {0x315c, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for LCG MSB */ + {0x315d, CRL_REG_LEN_08BIT, 0x80}, /* Digital gain for LCG LSB */ + {0x315e, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for VS MSB */ + {0x315f, CRL_REG_LEN_08BIT, 0x80}, /* Digital gain for VS LSB */ + + {0x30b7, CRL_REG_LEN_08BIT, 0x62}, /* DCG exposure time */ + {0x30b8, CRL_REG_LEN_08BIT, 0x00}, /* integer part of VS exposure time */ + {0x30b9, CRL_REG_LEN_08BIT, 0x02}, /* integer part of VS exposure time */ + + {0x3123, CRL_REG_LEN_08BIT, 0x00}, /* unknown */ + {0x3199, CRL_REG_LEN_08BIT, 0x3f}, /* unknown */ + {0x319a, CRL_REG_LEN_08BIT, 0x40}, /* unknown */ + {0x319b, CRL_REG_LEN_08BIT, 0x7f}, /* unknown */ + {0x3254, CRL_REG_LEN_08BIT, 0x11}, /* pre control */ + {0x33e2, CRL_REG_LEN_08BIT, 0x04}, /* DPC white threshold list */ + {0x33e3, CRL_REG_LEN_08BIT, 0x02}, /* DPC white threshold list */ + {0x33ed, CRL_REG_LEN_08BIT, 0x02}, /* DPC gain list VS */ + {0x33ee, CRL_REG_LEN_08BIT, 0x05}, /* DPC gain list VS */ + {0x33ef, CRL_REG_LEN_08BIT, 0x0a}, /* DPC gain list VS */ + {0x346a, CRL_REG_LEN_08BIT, 0x35}, /* last embedded data range H */ + {0x346b, CRL_REG_LEN_08BIT, 0x00}, /* last embedded data range L */ + {0x3195, CRL_REG_LEN_08BIT, 0x2e}, /* vfifo read level */ +}; + +/* ov2775_1928x1088_3x12_30fps_mipi960_regset */ +static struct crl_register_write_rep ov2775_3x12_30fps_mipi960_regset[] = { + {0x3190, CRL_REG_LEN_08BIT, 0x05}, /* interface control, output format setting */ + {0x30bb, CRL_REG_LEN_08BIT, 0x14}, /* Conversion gain, analog gain control */ + + {0x315a, CRL_REG_LEN_08BIT, 0x02}, /* Digital gain H in linear mode */ + {0x315b, CRL_REG_LEN_08BIT, 0x00}, /* Digital gain L in linear mode */ + {0x315c, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for LCG MSB */ + {0x315d, CRL_REG_LEN_08BIT, 0x80}, /* Digital gain for LCG LSB */ + {0x315e, CRL_REG_LEN_08BIT, 0x01}, /* Digital gain for VS MSB */ + {0x315f, CRL_REG_LEN_08BIT, 0x80}, /* Digital gain for VS LSB */ + + {0x30b7, CRL_REG_LEN_08BIT, 0x22}, /* DCG exposure time */ + {0x30b8, CRL_REG_LEN_08BIT, 0x00}, /* integer part of VS exposure time */ + {0x30b9, CRL_REG_LEN_08BIT, 0x40}, /* integer part of VS exposure time */ + + {0x3123, CRL_REG_LEN_08BIT, 0x00}, /* unknown */ + {0x3199, CRL_REG_LEN_08BIT, 0x3f}, /* unknown */ + {0x319a, CRL_REG_LEN_08BIT, 0x40}, /* unknown */ + {0x319b, CRL_REG_LEN_08BIT, 0x7f}, /* unknown */ + {0x3254, CRL_REG_LEN_08BIT, 0x11}, /* pre control */ + {0x33e2, CRL_REG_LEN_08BIT, 0x04}, /* DPC white threshold list */ + {0x33e3, CRL_REG_LEN_08BIT, 0x02}, /* DPC white threshold list */ + {0x33ed, CRL_REG_LEN_08BIT, 0x02}, /* DPC gain list VS */ + {0x33ee, CRL_REG_LEN_08BIT, 0x05}, /* DPC gain list VS */ + {0x33ef, CRL_REG_LEN_08BIT, 0x0a}, /* DPC gain list VS */ + {0x346a, CRL_REG_LEN_08BIT, 0x35}, /* last embedded data range H */ + {0x346b, CRL_REG_LEN_08BIT, 0x00}, /* last embedded data range L */ + {0x3195, CRL_REG_LEN_08BIT, 0x2e}, /* vfifo read level */ +}; + +static struct crl_register_write_rep ov2775_powerup_standby_regset[] = { + { 0x3012, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep ov2775_streamon_regs[] = { + { 0x3012, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov2775_streamoff_regs[] = { + { 0x3012, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_arithmetic_ops ov2775_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 3, + }, +}; + +static struct crl_arithmetic_ops ov2775_vblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov2775_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + }, +}; + +static struct crl_arithmetic_ops ov2775_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov2775_exposure_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov2775_ana_gain_l_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 2, + }, +}; + +static struct crl_arithmetic_ops ov2775_ana_gain_vs_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_arithmetic_ops ov2775_digital_gain_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_dynamic_register_access ov2775_v_flip_regs[] = { + { + .address = 0x30C0, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_vflip_ops), + .ops = ov2775_vflip_ops, + .mask = 0x08, + }, +}; + +static struct crl_dynamic_register_access ov2775_h_flip_regs[] = { + { + .address = 0x30C0, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_hflip_ops), + .ops = ov2775_hflip_ops, + .mask = 0x04, + }, +}; + +struct crl_register_write_rep ov2775_poweroff_regset[] = { + { 0x3012, CRL_REG_LEN_08BIT, 0x00 } +}; + +/* 0: 1x, 1: 2x, 2: 4x, 3: 8x + * linear mode analog gain uses ana_gain_h + */ +static struct crl_dynamic_register_access ov2775_ana_gain_h_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = 0, + .ops = 0, + .mask = 0x03, + }, +}; + +static struct crl_dynamic_register_access ov2775_ana_gain_l_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_ana_gain_l_ops), + .ops = ov2775_ana_gain_l_ops, + .mask = 0x0c, + }, +}; + +static struct crl_dynamic_register_access ov2775_ana_gain_vs_regs[] = { + { + .address = 0x30BB, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov2775_ana_gain_vs_ops), + .ops = ov2775_ana_gain_vs_ops, + .mask = 0x30, + }, +}; + +static struct crl_dynamic_register_access ov2775_digital_gain_h_regs[] = { + { + .address = 0x305A, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_digital_gain_ops), + .ops = ov2775_digital_gain_ops, + .mask = 0xff, + }, + { + .address = 0x305B, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_digital_gain_l_regs[] = { + { + .address = 0x305C, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_digital_gain_ops), + .ops = ov2775_digital_gain_ops, + .mask = 0xff, + }, + { + .address = 0x305D, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_digital_gain_vs_regs[] = { + { + .address = 0x305E, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_digital_gain_ops), + .ops = ov2775_digital_gain_ops, + .mask = 0xff, + }, + { + .address = 0x305F, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_exposure_dcg_regs[] = { + { + .address = 0x30B6, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_exposure_ops), + .ops = ov2775_exposure_ops, + .mask = 0xff, + }, + { + .address = 0x30B7, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +/* 03B8 and 03B9 are integer part, 03BA is fractional part with N/32 + * just use integer part + */ +static struct crl_dynamic_register_access ov2775_exposure_vs_regs[] = { + { + .address = 0x30B8, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_exposure_ops), + .ops = ov2775_exposure_ops, + .mask = 0xff, + }, + { + .address = 0x30B9, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_vblank_regs[] = { + { + .address = 0x30B2, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_vblank_ops), + .ops = ov2775_vblank_ops, + .mask = 0xff, + }, + { + .address = 0x30B3, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_dynamic_register_access ov2775_hblank_regs[] = { + { + .address = 0x30B0, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov2775_hblank_ops), + .ops = ov2775_hblank_ops, + .mask = 0xff, + }, + { + .address = 0x30B1, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_sensor_detect_config ov2775_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +/* pixel_rate = op_sys_clk*2 * csi_lanes / bitsperpixel */ +static struct crl_pll_configuration ov2775_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 480000000, + .bitsperpixel = 12, + .pixel_rate_csi = 80000000, + .pixel_rate_pa = 80000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = 0, + }, +}; + +static struct crl_subdev_rect_rep ov2775_1928x1088_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1928, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1928, + .out_rect.height = 1088, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1928, + .in_rect.height = 1088, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1928, + .out_rect.height = 1088, + }, +}; + +static struct crl_mode_rep ov2775_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov2775_1928x1088_rects_native), + .sd_rects = ov2775_1928x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1928, + .height = 1088, + .min_llp = 3550, + .min_fll = 1126, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov2775_linearHCG_30fps_mipi960_regset), + .mode_regs = ov2775_linearHCG_30fps_mipi960_regset, + }, + { + .sd_rects_items = ARRAY_SIZE(ov2775_1928x1088_rects_native), + .sd_rects = ov2775_1928x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1928, + .height = 1088, + .min_llp = 3550, + .min_fll = 1126, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov2775_linearLCG_30fps_mipi960_regset), + .mode_regs = ov2775_linearLCG_30fps_mipi960_regset, + }, + { + .sd_rects_items = ARRAY_SIZE(ov2775_1928x1088_rects_native), + .sd_rects = ov2775_1928x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1928, + .height = 1088, + .min_llp = 3550, + .min_fll = 1126, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov2775_2x12_30fps_mipi960_regset), + .mode_regs = ov2775_2x12_30fps_mipi960_regset, + }, + { + .sd_rects_items = ARRAY_SIZE(ov2775_1928x1088_rects_native), + .sd_rects = ov2775_1928x1088_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1928, + .height = 1088, + .min_llp = 3550, + .min_fll = 1126, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov2775_3x12_30fps_mipi960_regset), + .mode_regs = ov2775_3x12_30fps_mipi960_regset, + }, +}; + +static struct crl_sensor_subdev_config ov2775_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov2775 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov2775 pixel array", + }, +}; + +static struct crl_sensor_limits ov2775_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1936, + .y_addr_max = 1096, + .min_frame_length_lines = 1126, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 3550, + .max_line_length_pixels = 32752, +}; + +static struct crl_flip_data ov2775_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov2775_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + .regs_items = 0, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov2775_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_S, + .name = "CRL_CID_ANALOG_GAIN_HCG", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_h_regs), + .regs = ov2775_ana_gain_h_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_L, + .name = "CRL_CID_ANALOG_GAIN_LCG", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_l_regs), + .regs = ov2775_ana_gain_l_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_ANALOG_GAIN_VS, + .name = "CRL_CID_ANALOG_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_ana_gain_vs_regs), + .regs = ov2775_ana_gain_vs_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_L, + .name = "CRL_CID_DIGITAL_GAIN_LCG", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_digital_gain_l_regs), + .regs = ov2775_digital_gain_l_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_S, + .name = "CRL_CID_DIGITAL_GAIN_HCG", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_digital_gain_h_regs), + .regs = ov2775_digital_gain_h_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_DIGITAL_GAIN_VS, + .name = "CRL_CID_DIGITAL_GAIN_VS", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_digital_gain_vs_regs), + .regs = ov2775_digital_gain_vs_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS1, + .name = "CRL_CID_EXPOSURE_DCG", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 1, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_exposure_dcg_regs), + .regs = ov2775_exposure_dcg_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_EXPOSURE_SHS2, + .name = "CRL_CID_EXPOSURE_VS", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 1, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_exposure_vs_regs), + .regs = ov2775_exposure_vs_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_h_flip_regs), + .regs = ov2775_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_v_flip_regs), + .regs = ov2775_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 1126, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_vblank_regs), + .regs = ov2775_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 3550, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov2775_hblank_regs), + .regs = ov2775_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = CRL_CID_SENSOR_MODE, + .name = "CRL_CID_SENSOR_MODE", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 0, + .data.std_data.max = OV2775_CAPTURE_MODE_MAX - 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_MODE_SELECTION, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +static struct crl_arithmetic_ops ov2775_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov2775_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov2775_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_width_ops), + .ops = ov2775_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov2775_frame_desc_height_ops), + .ops = ov2775_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov2775_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .undo_val = 0, + .delay = 1000, + }, +}; + +static struct crl_sensor_configuration ov2775_crl_configuration = { + + .power_items = ARRAY_SIZE(ov2775_power_items), + .power_entities = ov2775_power_items, + + .onetime_init_regs_items = ARRAY_SIZE(ov2775_onetime_init_regset), + .onetime_init_regs = ov2775_onetime_init_regset, + + .powerup_regs_items = ARRAY_SIZE(ov2775_powerup_standby_regset), + .powerup_regs = ov2775_powerup_standby_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov2775_sensor_detect_regset), + .id_regs = ov2775_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov2775_sensor_subdevs), + .subdevs = ov2775_sensor_subdevs, + + .sensor_limits = &ov2775_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov2775_pll_configurations), + .pll_configs = ov2775_pll_configurations, + + .modes_items = ARRAY_SIZE(ov2775_modes), + .modes = ov2775_modes, + + .streamon_regs_items = ARRAY_SIZE(ov2775_streamon_regs), + .streamon_regs = ov2775_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov2775_streamoff_regs), + .streamoff_regs = ov2775_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov2775_v4l2_ctrls), + .v4l2_ctrl_bank = ov2775_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov2775_crl_csi_data_fmt), + .csi_fmts = ov2775_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov2775_flip_configurations), + .flip_data = ov2775_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = 0, + .crl_nvm_info.nvm_postop_regs_items = 0, + .crl_nvm_info.nvm_blobs_items = 0, + + .frame_desc_entries = ARRAY_SIZE(ov2775_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov2775_frame_desc, +}; + +#endif /* __CRLMODULE_OV2775_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov5670_configuration.h b/drivers/media/i2c/crlmodule/crl_ov5670_configuration.h new file mode 100644 index 000000000000..7badb609dd45 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov5670_configuration.h @@ -0,0 +1,1136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Tommi Franttila + * + */ + +#ifndef __CRLMODULE_ov5670_CONFIGURATION_H_ +#define __CRLMODULE_ov5670_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov5670_pll_840mbps[] = { + { 0x030a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0300, CRL_REG_LEN_08BIT, 0x04 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0302, CRL_REG_LEN_08BIT, 0x78 }, + { 0x0304, CRL_REG_LEN_08BIT, 0x03 }, + { 0x0303, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0305, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0312, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030d, CRL_REG_LEN_08BIT, 0x1e }, + { 0x030f, CRL_REG_LEN_08BIT, 0x06 }, + { 0x030e, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_powerup_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3000, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3002, CRL_REG_LEN_08BIT, 0x21 }, + { 0x3005, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3015, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3018, CRL_REG_LEN_08BIT, 0x32 }, + { 0x301a, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301b, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301c, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301d, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x301e, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3021, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3030, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x0a }, + { 0x303c, CRL_REG_LEN_08BIT, 0xff }, + { 0x303e, CRL_REG_LEN_08BIT, 0xff }, + { 0x3040, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3041, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3042, CRL_REG_LEN_08BIT, 0xf0 }, + { 0x3106, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3500, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3502, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3503, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3504, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3505, CRL_REG_LEN_08BIT, 0x83 }, + { 0x3508, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3509, CRL_REG_LEN_08BIT, 0x00 }, + { 0x350e, CRL_REG_LEN_08BIT, 0x04 }, + { 0x350f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3510, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3511, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3512, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3601, CRL_REG_LEN_08BIT, 0xc8 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3612, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3614, CRL_REG_LEN_08BIT, 0x5b }, + { 0x3615, CRL_REG_LEN_08BIT, 0x96 }, + { 0x3621, CRL_REG_LEN_08BIT, 0xd0 }, + { 0x3622, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3623, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3633, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3634, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3635, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3636, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3645, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3646, CRL_REG_LEN_08BIT, 0x82 }, + { 0x3650, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3652, CRL_REG_LEN_08BIT, 0xff }, + { 0x3655, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3656, CRL_REG_LEN_08BIT, 0xff }, + { 0x365a, CRL_REG_LEN_08BIT, 0xff }, + { 0x365e, CRL_REG_LEN_08BIT, 0xff }, + { 0x3668, CRL_REG_LEN_08BIT, 0x00 }, + { 0x366a, CRL_REG_LEN_08BIT, 0x07 }, + { 0x366e, CRL_REG_LEN_08BIT, 0x10 }, + { 0x366d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x366f, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3700, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3701, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3702, CRL_REG_LEN_08BIT, 0x3a }, + { 0x3703, CRL_REG_LEN_08BIT, 0x19 }, + { 0x3704, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3705, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x66 }, + { 0x3707, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3708, CRL_REG_LEN_08BIT, 0x34 }, + { 0x3709, CRL_REG_LEN_08BIT, 0x40 }, + { 0x370a, CRL_REG_LEN_08BIT, 0x01 }, + { 0x370b, CRL_REG_LEN_08BIT, 0x1b }, + { 0x3714, CRL_REG_LEN_08BIT, 0x24 }, + { 0x371a, CRL_REG_LEN_08BIT, 0x3e }, + { 0x3733, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x00 }, + { 0x373a, CRL_REG_LEN_08BIT, 0x05 }, + { 0x373b, CRL_REG_LEN_08BIT, 0x06 }, + { 0x373c, CRL_REG_LEN_08BIT, 0x0a }, + { 0x373f, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3755, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3758, CRL_REG_LEN_08BIT, 0x00 }, + { 0x375b, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3766, CRL_REG_LEN_08BIT, 0x5f }, + { 0x3768, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3769, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3773, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3774, CRL_REG_LEN_08BIT, 0x1f }, + { 0x3776, CRL_REG_LEN_08BIT, 0x06 }, + { 0x37a0, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37a1, CRL_REG_LEN_08BIT, 0x5c }, + { 0x37a7, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37a8, CRL_REG_LEN_08BIT, 0x70 }, + { 0x37aa, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37ab, CRL_REG_LEN_08BIT, 0x48 }, + { 0x37b3, CRL_REG_LEN_08BIT, 0x66 }, + { 0x37c2, CRL_REG_LEN_08BIT, 0x04 }, + { 0x37c5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37c8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3801, CRL_REG_LEN_08BIT, 0x0c }, + { 0x3802, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3803, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3804, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3805, CRL_REG_LEN_08BIT, 0x33 }, + { 0x3806, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3807, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3811, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3813, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3815, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3816, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3817, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3818, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3819, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3820, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3822, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3826, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3827, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3830, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3836, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3837, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3838, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3841, CRL_REG_LEN_08BIT, 0xff }, /* Auto size function enabled */ + { 0x3846, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3861, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3862, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3863, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3a11, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3a12, CRL_REG_LEN_08BIT, 0x78 }, + { 0x3b00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b02, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b04, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3b05, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c00, CRL_REG_LEN_08BIT, 0x89 }, + { 0x3c01, CRL_REG_LEN_08BIT, 0xab }, + { 0x3c02, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3c03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c04, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c05, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3c06, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c07, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3c0c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c0d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c0e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c0f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c40, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3c41, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3c43, CRL_REG_LEN_08BIT, 0x7d }, + { 0x3c45, CRL_REG_LEN_08BIT, 0xd7 }, + { 0x3c47, CRL_REG_LEN_08BIT, 0xfc }, + { 0x3c50, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3c52, CRL_REG_LEN_08BIT, 0xaa }, + { 0x3c54, CRL_REG_LEN_08BIT, 0x71 }, + { 0x3c56, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3d85, CRL_REG_LEN_08BIT, 0x17 }, + { 0x3d8d, CRL_REG_LEN_08BIT, 0xea }, + { 0x3f03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3f0a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3f0b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4001, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4009, CRL_REG_LEN_08BIT, 0x0d }, + { 0x4017, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4020, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4021, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4022, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4024, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4025, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4026, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4027, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4029, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402d, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402e, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4040, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4041, CRL_REG_LEN_08BIT, 0x03 }, + { 0x4042, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4043, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4044, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4045, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4046, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4047, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4048, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4049, CRL_REG_LEN_08BIT, 0x7A }, + { 0x4303, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4307, CRL_REG_LEN_08BIT, 0x30 }, + { 0x4500, CRL_REG_LEN_08BIT, 0x58 }, + { 0x4501, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4502, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4503, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4508, CRL_REG_LEN_08BIT, 0xaa }, + { 0x4509, CRL_REG_LEN_08BIT, 0xaa }, + { 0x450a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x450b, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4700, CRL_REG_LEN_08BIT, 0xa4 }, + { 0x4800, CRL_REG_LEN_08BIT, 0x4c }, + { 0x4816, CRL_REG_LEN_08BIT, 0x53 }, + { 0x481f, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4837, CRL_REG_LEN_08BIT, 0x13 }, + { 0x5000, CRL_REG_LEN_08BIT, 0x56 }, + { 0x5001, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5002, CRL_REG_LEN_08BIT, 0x28 }, + { 0x5004, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5006, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5007, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x5008, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5009, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x5901, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5a01, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5a03, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5a04, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5a05, CRL_REG_LEN_08BIT, 0xe0 }, + { 0x5a06, CRL_REG_LEN_08BIT, 0x09 }, + { 0x5a07, CRL_REG_LEN_08BIT, 0xb0 }, + { 0x5a08, CRL_REG_LEN_08BIT, 0x06 }, + { 0x5e00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x40 }, + { 0x5b00, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5b01, CRL_REG_LEN_08BIT, 0x10 }, + { 0x5b02, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5b03, CRL_REG_LEN_08BIT, 0xdb }, + { 0x3d8c, CRL_REG_LEN_08BIT, 0x71 }, + { 0x370b, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3618, CRL_REG_LEN_08BIT, 0x2a }, + { 0x5780, CRL_REG_LEN_08BIT, 0x3e }, + { 0x5781, CRL_REG_LEN_08BIT, 0x0f }, + { 0x5782, CRL_REG_LEN_08BIT, 0x44 }, + { 0x5783, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5784, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5785, CRL_REG_LEN_08BIT, 0x01 }, + { 0x5786, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5787, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5788, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5789, CRL_REG_LEN_08BIT, 0x0f }, + { 0x578a, CRL_REG_LEN_08BIT, 0xfd }, + { 0x578b, CRL_REG_LEN_08BIT, 0xf5 }, + { 0x578c, CRL_REG_LEN_08BIT, 0xf5 }, + { 0x578d, CRL_REG_LEN_08BIT, 0x03 }, + { 0x578e, CRL_REG_LEN_08BIT, 0x08 }, + { 0x578f, CRL_REG_LEN_08BIT, 0x0c }, + { 0x5790, CRL_REG_LEN_08BIT, 0x08 }, + { 0x5791, CRL_REG_LEN_08BIT, 0x06 }, + { 0x5792, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5793, CRL_REG_LEN_08BIT, 0x52 }, + { 0x5794, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x3503, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x60 }, + { 0x3002, CRL_REG_LEN_08BIT, 0x61 }, + { 0x3010, CRL_REG_LEN_08BIT, 0x40 }, + { 0x300D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5045, CRL_REG_LEN_08BIT, 0x05 }, + { 0x5048, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3610, CRL_REG_LEN_08BIT, 0xa8 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x40 }, +}; + +static struct crl_register_write_rep ov5670_mode_1944[] = { + /* Auto size function in use, but no cropping in this mode */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x20 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x07 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x98 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x03 }, +}; + +static struct crl_register_write_rep ov5670_mode_1940[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x07 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x94 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_mode_1458[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x20 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380b, CRL_REG_LEN_08BIT, 0xB2 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x03 }, +}; + +static struct crl_register_write_rep ov5670_mode_1456[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3809, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x05 }, + { 0x380b, CRL_REG_LEN_08BIT, 0xB0 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_mode_1152[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xc6 }, +}; + +static struct crl_register_write_rep ov5670_mode_1080[] = { + /* Auto size function in use, cropping from the centre of the image */ + { 0x3808, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3809, CRL_REG_LEN_08BIT, 0x80 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x04 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x38 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xc0 }, +}; + +static struct crl_register_write_rep ov5670_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov5670_streamoff_regs[] = { + /* MIPI stream off when current frame finish */ + { 0x4202, CRL_REG_LEN_08BIT, 0x0f }, + /* Wait to finish the current frame */ + { 0x0000, CRL_REG_LEN_DELAY, 0x40 }, + /* Sensor to standby */ + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov5670_data_fmt_width10[] = { + { 0x3031, CRL_REG_LEN_08BIT, 0x0a } +}; + +static struct crl_arithmetic_ops ov5670_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov5670_swap_flip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 5, + }, +}; + +static struct crl_arithmetic_ops ov5670_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov5670_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov5670_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_dynamic_register_access ov5670_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov5670_vflip_ops), + .ops = ov5670_vflip_ops, + .mask = 0x2, + }, + { + .address = 0x450B, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov5670_swap_flip_ops), + .ops = ov5670_swap_flip_ops, + .mask = 0x20, + }, +}; + +static struct crl_dynamic_register_access ov5670_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov5670_hflip_ops), + .ops = ov5670_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov5670_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov5670_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov5670_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov5670_exposure_ops), + .ops = ov5670_exposure_ops, + .mask = 0x0ffff0, + }, +}; + +static struct crl_dynamic_register_access ov5670_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov5670_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = ARRAY_SIZE(ov5670_hblank_ops), + .ops = ov5670_hblank_ops, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov5670_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static const s64 ov5670_op_sys_clock[] = { 420000000, }; + +static struct crl_pll_configuration ov5670_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 420000000, + .bitsperpixel = 10, + .pixel_rate_csi = 240000000, + .pixel_rate_pa = 199180800, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov5670_pll_840mbps), + .pll_regs = ov5670_pll_840mbps, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1944_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 0, 2592, 1944 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 0, 2592, 1944 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 0, 2592, 1944 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1940_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 16, 2, 2560, 1940 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2560, 1940 }, + .out_rect = { 0, 0, 2560, 1940 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2560, 1940 }, + .out_rect = { 0, 0, 2560, 1940 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1458_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 0, 244, 2592, 1458 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2592, 1458 }, + .out_rect = { 0, 0, 2592, 1458 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2592, 1458 }, + .out_rect = { 0, 0, 2592, 1458 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1456_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 16, 244, 2560, 1456 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 2560, 1456 }, + .out_rect = { 0, 0, 2560, 1456 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 2560, 1456 }, + .out_rect = { 0, 0, 2560, 1456 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1152_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 304, 396, 1984, 1152 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 1984, 1152 }, + .out_rect = { 0, 0, 1984, 1152 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1984, 1152 }, + .out_rect = { 0, 0, 1984, 1152 }, + }, +}; + +static struct crl_subdev_rect_rep ov5670_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 2592, 1944 }, + .out_rect = { 336, 432, 1920, 1080 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect = { 0, 0, 1920, 1080 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 1920, 1080 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, +}; + +static struct crl_mode_rep ov5670_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov5670_1944_rects), + .sd_rects = ov5670_1944_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2592, + .height = 1944, + .min_llp = 3360, + .min_fll = 1976, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1944), + .mode_regs = ov5670_mode_1944, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1940_rects), + .sd_rects = ov5670_1940_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2560, + .height = 1940, + .min_llp = 3366, + .min_fll = 1972, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1940), + .mode_regs = ov5670_mode_1940, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1458_rects), + .sd_rects = ov5670_1458_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2592, + .height = 1458, + .min_llp = 4455, + .min_fll = 1490, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1458), + .mode_regs = ov5670_mode_1458, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1456_rects), + .sd_rects = ov5670_1456_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 2560, + .height = 1456, + .min_llp = 4461, + .min_fll = 1488, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1456), + .mode_regs = ov5670_mode_1456, + }, + + { + .sd_rects_items = ARRAY_SIZE(ov5670_1152_rects), + .sd_rects = ov5670_1152_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1984, + .height = 1152, + .min_llp = 2803, + .min_fll = 1184, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1152), + .mode_regs = ov5670_mode_1152, + }, + { + .sd_rects_items = ARRAY_SIZE(ov5670_1080_rects), + .sd_rects = ov5670_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 2985, + .min_fll = 1112, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov5670_mode_1080), + .mode_regs = ov5670_mode_1080, + }, +}; + +static struct crl_sensor_subdev_config ov5670_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "ov5670 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov5670 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov5670 pixel array", + }, +}; + +static struct crl_sensor_limits ov5670_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 2592, + .y_addr_max = 1944, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 2700, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov5670_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov5670_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = ov5670_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov5670_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov5670_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov5670_data_fmt_width10, + }, +}; + +static struct crl_v4l2_ctrl ov5670_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = + ARRAY_SIZE(ov5670_pll_configurations) - 1, + .data.v4l2_int_menu.menu = ov5670_op_sys_clock, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_ana_gain_global_regs), + .regs = ov5670_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_exposure_regs), + .regs = ov5670_exposure_regs, + .dep_items = 0, /* FLL is changed automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_h_flip_regs), + .regs = ov5670_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_v_flip_regs), + .regs = ov5670_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 2474, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_vblank_regs), + .regs = ov5670_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 3880, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov5670_hblank_regs), + .regs = ov5670_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +#define ov5670_OTP_START_ADDR 0x7010 +#define ov5670_OTP_END_ADDR 0x7063 + +#define ov5670_OTP_LEN (ov5670_OTP_END_ADDR - ov5670_OTP_START_ADDR + 1) +#define ov5670_OTP_L_ADDR(x) (x & 0xff) +#define ov5670_OTP_H_ADDR(x) ((x >> 8) & 0xff) + +static struct crl_register_write_rep ov5670_nvm_preop_regset[] = { + /* Start streaming */ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + /* Manual mode, program disable */ + { 0x3D84, CRL_REG_LEN_08BIT, 0xC0 }, + /* Manual OTP start address for access */ + { 0x3D88, CRL_REG_LEN_08BIT, ov5670_OTP_H_ADDR(ov5670_OTP_START_ADDR)}, + { 0x3D89, CRL_REG_LEN_08BIT, ov5670_OTP_L_ADDR(ov5670_OTP_START_ADDR)}, + /* Manual OTP end address for access */ + { 0x3D8A, CRL_REG_LEN_08BIT, ov5670_OTP_H_ADDR(ov5670_OTP_END_ADDR)}, + { 0x3D8B, CRL_REG_LEN_08BIT, ov5670_OTP_L_ADDR(ov5670_OTP_END_ADDR)}, + /* OTP load enable */ + { 0x3D81, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 0x05 }, +}; + +static struct crl_register_write_rep ov5670_nvm_postop_regset[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, /* Stop streaming */ +}; + +static struct crl_nvm_blob ov5670_nvm_blobs[] = { + {CRL_I2C_ADDRESS_NO_OVERRIDE, ov5670_OTP_START_ADDR, ov5670_OTP_LEN }, +}; + +static struct crl_arithmetic_ops ov5670_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov5670_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov5670_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov5670_frame_desc_width_ops), + .ops = ov5670_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov5670_frame_desc_height_ops), + .ops = ov5670_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static const struct crl_power_seq_entity ov5670_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1200000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VAF", + .val = 3000000, + .delay = 2000, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10700, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER, + }, + +}; + +static struct crl_sensor_configuration ov5670_crl_configuration = { + + .power_items = ARRAY_SIZE(ov5670_power_items), + .power_entities = ov5670_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov5670_powerup_regset), + .powerup_regs = ov5670_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov5670_sensor_detect_regset), + .id_regs = ov5670_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov5670_sensor_subdevs), + .subdevs = ov5670_sensor_subdevs, + + .sensor_limits = &ov5670_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov5670_pll_configurations), + .pll_configs = ov5670_pll_configurations, + + .modes_items = ARRAY_SIZE(ov5670_modes), + .modes = ov5670_modes, + + .streamon_regs_items = ARRAY_SIZE(ov5670_streamon_regs), + .streamon_regs = ov5670_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov5670_streamoff_regs), + .streamoff_regs = ov5670_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov5670_v4l2_ctrls), + .v4l2_ctrl_bank = ov5670_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov5670_crl_csi_data_fmt), + .csi_fmts = ov5670_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov5670_flip_configurations), + .flip_data = ov5670_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = + ARRAY_SIZE(ov5670_nvm_preop_regset), + .crl_nvm_info.nvm_preop_regs = ov5670_nvm_preop_regset, + .crl_nvm_info.nvm_postop_regs_items = + ARRAY_SIZE(ov5670_nvm_postop_regset), + .crl_nvm_info.nvm_postop_regs = ov5670_nvm_postop_regset, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov5670_nvm_blobs), + .crl_nvm_info.nvm_config = ov5670_nvm_blobs, + + .frame_desc_entries = ARRAY_SIZE(ov5670_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov5670_frame_desc, +}; + +#endif /* __CRLMODULE_ov5670_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov8858_configuration.h b/drivers/media/i2c/crlmodule/crl_ov8858_configuration.h new file mode 100644 index 000000000000..63faedcf85ce --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov8858_configuration.h @@ -0,0 +1,1429 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_OV8858_CONFIGURATION_H_ +#define __CRLMODULE_OV8858_CONFIGURATION_H_ + +#include "crlmodule-nvm.h" +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov8858_pll_360mbps[] = { + { 0x0300, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0301, CRL_REG_LEN_08BIT, 0x00 }, + { 0x0302, CRL_REG_LEN_08BIT, 0x1e },/* pll1_multiplier = 30 */ + { 0x0303, CRL_REG_LEN_08BIT, 0x00 },/* pll1_divm = /(1 + 0) */ + { 0x0304, CRL_REG_LEN_08BIT, 0x03 },/* pll1_div_mipi = /8 */ + { 0x0305, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0306, CRL_REG_LEN_08BIT, 0x01 }, + { 0x030A, CRL_REG_LEN_08BIT, 0x02 }, + { 0x030B, CRL_REG_LEN_08BIT, 0x01 },/* pll2_pre_div = /2 */ + { 0x030c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x030D, CRL_REG_LEN_08BIT, 0x44 },/* pll2_r_divp = 30 */ + { 0x030E, CRL_REG_LEN_08BIT, 0x01 },/* pll2_r_divs = /2 */ + { 0x030F, CRL_REG_LEN_08BIT, 0x04 },/* pll2_r_divsp = /(1 + 4) */ + /* pll2_pre_div0 = /1, pll2_r_divdac = /(1 + 1) */ + { 0x0312, CRL_REG_LEN_08BIT, 0x02 }, + /* mipi_lane_mode = 1+3, mipi_lvds_sel = 1 = MIPI enable, + * r_phy_pd_mipi_man = 0, lane_dis_option = 0 + */ + { 0x3018, CRL_REG_LEN_08BIT, 0x72 }, +}; + + +static struct crl_register_write_rep ov8858_powerup_regset[] = { + /*Reset*/ + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3007, CRL_REG_LEN_08BIT, 0x80 }, + /* Npump clock div = /2, Ppump clock div = /4 */ + { 0x3015, CRL_REG_LEN_08BIT, 0x01 }, + /* Clock switch output = normal, pclk_div = /1 */ + { 0x3020, CRL_REG_LEN_08BIT, 0x93 }, + { 0x3031, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3032, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3022, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3034, CRL_REG_LEN_08BIT, 0x00 }, + /* sclk_div = /1, sclk_pre_div = /1, chip debug = 1 */ + { 0x3106, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3305, CRL_REG_LEN_08BIT, 0xF1 }, + { 0x3307, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3308, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3309, CRL_REG_LEN_08BIT, 0x28 }, + { 0x330A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330B, CRL_REG_LEN_08BIT, 0x20 }, + { 0x330C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x330F, CRL_REG_LEN_08BIT, 0x40 }, + /* + * Digital fraction gain delay option = Delay 1 frame, + * Gain change delay option = Delay 1 frame, + * Gain delay option = Delay 1 frame, + * Gain manual as sensor gain = Input gain as real gain format, + * Exposure delay option (must be 0 = Delay 1 frame, + * Exposure change delay option (must be 0) = Delay 1 frame + */ + { 0x3503, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3505, CRL_REG_LEN_08BIT, 0x80 },/* gain conversation option */ + /* + * [10:7] are integer gain, [6:0] are fraction gain. For example: + * 0x80 is 1x gain, CRL_REG_LEN_08BIT, 0x100 is 2x gain, + * CRL_REG_LEN_08BIT, 0x1C0 is 3.5x gain + */ + { 0x3508, CRL_REG_LEN_08BIT, 0x02 },/* long gain = 0x0200 */ + { 0x3509, CRL_REG_LEN_08BIT, 0x00 },/* long gain = 0x0200 */ + { 0x350C, CRL_REG_LEN_08BIT, 0x00 },/* short gain = 0x0080 */ + { 0x350D, CRL_REG_LEN_08BIT, 0x80 },/* short gain = 0x0080 */ + { 0x3510, CRL_REG_LEN_08BIT, 0x00 },/* short exposure = 0x000200 */ + { 0x3511, CRL_REG_LEN_08BIT, 0x02 },/* short exposure = 0x000200 */ + { 0x3512, CRL_REG_LEN_08BIT, 0x00 },/* short exposure = 0x000200 */ + { 0x3600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3601, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3602, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3603, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3604, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3605, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3606, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3607, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3608, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3609, CRL_REG_LEN_08BIT, 0x28 }, + { 0x360A, CRL_REG_LEN_08BIT, 0x00 }, + { 0x360B, CRL_REG_LEN_08BIT, 0x06 }, + { 0x360C, CRL_REG_LEN_08BIT, 0xD4 }, + { 0x360D, CRL_REG_LEN_08BIT, 0x40 }, + { 0x360E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x360F, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3611, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3612, CRL_REG_LEN_08BIT, 0x88 }, + { 0x3613, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3614, CRL_REG_LEN_08BIT, 0x58 }, + { 0x3615, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3616, CRL_REG_LEN_08BIT, 0x4A }, + { 0x3617, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3618, CRL_REG_LEN_08BIT, 0x5a }, + { 0x3619, CRL_REG_LEN_08BIT, 0x70 }, + { 0x361A, CRL_REG_LEN_08BIT, 0x99 }, + { 0x361B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x361C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x361D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x361E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x361F, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3638, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3633, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3634, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3635, CRL_REG_LEN_08BIT, 0x0f }, + { 0x3636, CRL_REG_LEN_08BIT, 0x12 }, + { 0x3645, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3646, CRL_REG_LEN_08BIT, 0x83 }, + { 0x364A, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3700, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3701, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3702, CRL_REG_LEN_08BIT, 0x50 }, + { 0x3703, CRL_REG_LEN_08BIT, 0x32 }, + { 0x3704, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3705, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3706, CRL_REG_LEN_08BIT, 0x82 }, + { 0x3707, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3708, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3709, CRL_REG_LEN_08BIT, 0x66 }, + { 0x370A, CRL_REG_LEN_08BIT, 0x01 }, + { 0x370B, CRL_REG_LEN_08BIT, 0x82 }, + { 0x370C, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3712, CRL_REG_LEN_08BIT, 0x44 }, + { 0x3714, CRL_REG_LEN_08BIT, 0x24 }, + { 0x3718, CRL_REG_LEN_08BIT, 0x14 }, + { 0x3719, CRL_REG_LEN_08BIT, 0x31 }, + { 0x371E, CRL_REG_LEN_08BIT, 0x31 }, + { 0x371F, CRL_REG_LEN_08BIT, 0x7F }, + { 0x3720, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3721, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3724, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3725, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3726, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3728, CRL_REG_LEN_08BIT, 0x0A }, + { 0x3729, CRL_REG_LEN_08BIT, 0x03 }, + { 0x372A, CRL_REG_LEN_08BIT, 0x06 }, + { 0x372B, CRL_REG_LEN_08BIT, 0xA6 }, + { 0x372C, CRL_REG_LEN_08BIT, 0xA6 }, + { 0x372D, CRL_REG_LEN_08BIT, 0xA6 }, + { 0x372E, CRL_REG_LEN_08BIT, 0x0C }, + { 0x372F, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3730, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3731, CRL_REG_LEN_08BIT, 0x0C }, + { 0x3732, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3733, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3734, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3736, CRL_REG_LEN_08BIT, 0x30 }, + { 0x373A, CRL_REG_LEN_08BIT, 0x0A }, + { 0x373B, CRL_REG_LEN_08BIT, 0x0B }, + { 0x373C, CRL_REG_LEN_08BIT, 0x14 }, + { 0x373E, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3750, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3751, CRL_REG_LEN_08BIT, 0x0e }, + { 0x3755, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3758, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3759, CRL_REG_LEN_08BIT, 0x4C }, + { 0x375A, CRL_REG_LEN_08BIT, 0x0C }, + { 0x375B, CRL_REG_LEN_08BIT, 0x26 }, + { 0x375C, CRL_REG_LEN_08BIT, 0x20 }, + { 0x375D, CRL_REG_LEN_08BIT, 0x04 }, + { 0x375E, CRL_REG_LEN_08BIT, 0x00 }, + { 0x375F, CRL_REG_LEN_08BIT, 0x28 }, + { 0x3760, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3761, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3762, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3763, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3766, CRL_REG_LEN_08BIT, 0xFF }, + { 0x376B, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3772, CRL_REG_LEN_08BIT, 0x46 }, + { 0x3773, CRL_REG_LEN_08BIT, 0x04 }, + { 0x3774, CRL_REG_LEN_08BIT, 0x2C }, + { 0x3775, CRL_REG_LEN_08BIT, 0x13 }, + { 0x3776, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3777, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3778, CRL_REG_LEN_08BIT, 0x17 }, + { 0x37A0, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37A1, CRL_REG_LEN_08BIT, 0x7A }, + { 0x37A2, CRL_REG_LEN_08BIT, 0x7A }, + { 0x37A3, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A4, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A5, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37A7, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37A8, CRL_REG_LEN_08BIT, 0x98 }, + { 0x37A9, CRL_REG_LEN_08BIT, 0x98 }, + { 0x37AA, CRL_REG_LEN_08BIT, 0x88 }, + { 0x37AB, CRL_REG_LEN_08BIT, 0x5C }, + { 0x37AC, CRL_REG_LEN_08BIT, 0x5C }, + { 0x37AD, CRL_REG_LEN_08BIT, 0x55 }, + { 0x37AE, CRL_REG_LEN_08BIT, 0x19 }, + { 0x37AF, CRL_REG_LEN_08BIT, 0x19 }, + { 0x37B0, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B2, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B3, CRL_REG_LEN_08BIT, 0x84 }, + { 0x37B4, CRL_REG_LEN_08BIT, 0x84 }, + { 0x37B5, CRL_REG_LEN_08BIT, 0x60 }, + { 0x37B6, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B7, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B8, CRL_REG_LEN_08BIT, 0x00 }, + { 0x37B9, CRL_REG_LEN_08BIT, 0xFF }, + { 0x3800, CRL_REG_LEN_08BIT, 0x00 },/* h_crop_start high */ + { 0x3801, CRL_REG_LEN_08BIT, 0x0C },/* h_crop_start low */ + { 0x3802, CRL_REG_LEN_08BIT, 0x00 },/* v_crop_start high */ + { 0x3803, CRL_REG_LEN_08BIT, 0x0C },/* v_crop_start low */ + { 0x3804, CRL_REG_LEN_08BIT, 0x0C },/* h_crop_end high */ + { 0x3805, CRL_REG_LEN_08BIT, 0xD3 },/* h_crop_end low */ + { 0x3806, CRL_REG_LEN_08BIT, 0x09 },/* v_crop_end high */ + { 0x3807, CRL_REG_LEN_08BIT, 0xA3 },/* v_crop_end low */ + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high */ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x90 },/* v_output_size low */ + { 0x380C, CRL_REG_LEN_08BIT, 0x07 },/* horizontal timing size high */ + { 0x380D, CRL_REG_LEN_08BIT, 0x94 },/* horizontal timing size low */ + { 0x380E, CRL_REG_LEN_08BIT, 0x0A },/* vertical timing size high */ + { 0x380F, CRL_REG_LEN_08BIT, 0x0D },/* vertical timing size low */ + { 0x3810, CRL_REG_LEN_08BIT, 0x00 },/* h_win offset high */ + { 0x3811, CRL_REG_LEN_08BIT, 0x04 },/* h_win offset low */ + { 0x3812, CRL_REG_LEN_08BIT, 0x00 },/* v_win offset high */ + { 0x3813, CRL_REG_LEN_08BIT, 0x02 },/* v_win offset low */ + { 0x3814, CRL_REG_LEN_08BIT, 0x01 },/* h_odd_inc */ + { 0x3815, CRL_REG_LEN_08BIT, 0x01 },/* h_even_inc */ + { 0x3820, CRL_REG_LEN_08BIT, 0x00 },/* format1 */ + { 0x3821, CRL_REG_LEN_08BIT, 0x40 },/* format2 */ + { 0x382A, CRL_REG_LEN_08BIT, 0x01 },/* v_odd_inc */ + { 0x382B, CRL_REG_LEN_08BIT, 0x01 },/* v_even_inc */ + { 0x3830, CRL_REG_LEN_08BIT, 0x06 }, + { 0x3836, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3837, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3841, CRL_REG_LEN_08BIT, 0xFF },/* AUTO_SIZE_CTRL */ + { 0x3846, CRL_REG_LEN_08BIT, 0x48 }, + { 0x3D85, CRL_REG_LEN_08BIT, 0x16 },/* OTP_REG85 */ + { 0x3D8C, CRL_REG_LEN_08BIT, 0x73 }, + { 0x3D8D, CRL_REG_LEN_08BIT, 0xde }, + { 0x3F08, CRL_REG_LEN_08BIT, 0x10 },/* PSRAM control register */ + { 0x4000, CRL_REG_LEN_08BIT, 0xF1 },/* BLC CTRL00 = default */ + { 0x4001, CRL_REG_LEN_08BIT, 0x00 },/* BLC CTRL01 */ + { 0x4002, CRL_REG_LEN_08BIT, 0x27 },/* BLC offset = 0x27 */ + { 0x4005, CRL_REG_LEN_08BIT, 0x10 },/* BLC target = 0x0010 */ + { 0x4009, CRL_REG_LEN_08BIT, 0x81 },/* BLC CTRL09 */ + { 0x400B, CRL_REG_LEN_08BIT, 0x0C },/* BLC CTRL0B = default */ + { 0x4011, CRL_REG_LEN_08BIT, 0x20 }, + { 0x401B, CRL_REG_LEN_08BIT, 0x00 },/* Zero line R coeff. = 0x0000 */ + { 0x401D, CRL_REG_LEN_08BIT, 0x00 },/* Zero line T coeff. = 0x0000 */ + { 0x401F, CRL_REG_LEN_08BIT, 0x00 },/* BLC CTRL1F */ + { 0x4020, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4021, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4024, CRL_REG_LEN_08BIT, 0x0f }, + { 0x4025, CRL_REG_LEN_08BIT, 0x36 }, + { 0x4026, CRL_REG_LEN_08BIT, 0x0f }, + { 0x4027, CRL_REG_LEN_08BIT, 0x37 }, + { 0x4028, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4029, CRL_REG_LEN_08BIT, 0x02 }, + { 0x402A, CRL_REG_LEN_08BIT, 0x04 }, + { 0x402B, CRL_REG_LEN_08BIT, 0x08 }, + { 0x402C, CRL_REG_LEN_08BIT, 0x00 }, + { 0x402D, CRL_REG_LEN_08BIT, 0x02 }, + { 0x402E, CRL_REG_LEN_08BIT, 0x04 }, + { 0x402F, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4034, CRL_REG_LEN_08BIT, 0x3F }, + { 0x403D, CRL_REG_LEN_08BIT, 0x04 },/* BLC CTRL3D */ + { 0x4300, CRL_REG_LEN_08BIT, 0xFF },/* clip_max[11:4] = 0xFFF */ + { 0x4301, CRL_REG_LEN_08BIT, 0x00 },/* clip_min[11:4] = 0 */ + { 0x4302, CRL_REG_LEN_08BIT, 0x0F },/* clip_min/max[3:0] */ + { 0x4316, CRL_REG_LEN_08BIT, 0x00 },/* CTRL16 = default */ + { 0x4503, CRL_REG_LEN_08BIT, 0x18 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, + /* wkup_dly = Mark1 wakeup delay/2^10 = 0x25 */ + { 0x4808, CRL_REG_LEN_08BIT, 0x25 }, + { 0x4816, CRL_REG_LEN_08BIT, 0x12 },/* Embedded data type */ + { 0x5A08, CRL_REG_LEN_08BIT, 0x02 },/* Data in beginning of the frame */ + { 0x5041, CRL_REG_LEN_08BIT, 0x01 },/* ISP CTRL41 - embedded data=on */ + { 0x4307, CRL_REG_LEN_08BIT, 0x31 },/* Embedded_en */ + { 0x481F, CRL_REG_LEN_08BIT, 0x32 },/* clk_prepare_min = 0x32 */ + { 0x4837, CRL_REG_LEN_08BIT, 0x16 },/* pclk_period = 0x14 */ + { 0x4850, CRL_REG_LEN_08BIT, 0x10 },/* LANE SEL01 */ + { 0x4851, CRL_REG_LEN_08BIT, 0x32 },/* LANE SEL02 */ + { 0x4B00, CRL_REG_LEN_08BIT, 0x2A }, + { 0x4B0D, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4D00, CRL_REG_LEN_08BIT, 0x04 },/* TPM_CTRL_REG */ + { 0x4D01, CRL_REG_LEN_08BIT, 0x18 },/* TPM_CTRL_REG */ + { 0x4D02, CRL_REG_LEN_08BIT, 0xC3 },/* TPM_CTRL_REG */ + { 0x4D03, CRL_REG_LEN_08BIT, 0xFF },/* TPM_CTRL_REG */ + { 0x4D04, CRL_REG_LEN_08BIT, 0xFF },/* TPM_CTRL_REG */ + { 0x4D05, CRL_REG_LEN_08BIT, 0xFF },/* TPM_CTRL_REG */ + /* + * Lens correction (LENC) function enable = 0 + * Slave sensor AWB Gain function enable = 1 + * Slave sensor AWB Statistics function enable = 1 + * Master sensor AWB Gain function enable = 1 + * Master sensor AWB Statistics function enable = 1 + * Black DPC function enable = 1 + * White DPC function enable =1 + */ + { 0x5000, CRL_REG_LEN_08BIT, 0x7E }, + { 0x5001, CRL_REG_LEN_08BIT, 0x01 },/* BLC function enable = 1 */ + /* + * Horizontal scale function enable = 0 + * WBMATCH bypass mode = Select slave sensor's gain + * WBMATCH function enable = 0 + * Master MWB gain support RGBC = 0 + * OTP_DPC function enable = 1 + * Manual mode of VarioPixel function enable = 0 + * Manual enable of VarioPixel function enable = 0 + * Use VSYNC to latch ISP modules's function enable signals = 0 + */ + { 0x5002, CRL_REG_LEN_08BIT, 0x08 }, + /* + * Bypass all ISP modules after BLC module = 0 + * DPC_DBC buffer control enable = 1 + * WBMATCH VSYNC selection = Select master sensor's VSYNC fall + * Select master AWB gain to embed line = AWB gain before manual mode + * Enable BLC's input flip_i signal = 0 + */ + { 0x5003, CRL_REG_LEN_08BIT, 0x20 }, + { 0x5041, CRL_REG_LEN_08BIT, 0x1D },/* ISP CTRL41 - embedded data=on */ + { 0x5046, CRL_REG_LEN_08BIT, 0x12 },/* ISP CTRL46 = default */ + /* + * Tail enable = 1 + * Saturate cross cluster enable = 1 + * Remove cross cluster enable = 1 + * Enable to remove connected defect pixels in same channel = 1 + * Enable to remove connected defect pixels in different channel = 1 + * Smooth enable, use average G for recovery = 1 + * Black/white sensor mode enable = 0 + * Manual mode enable = 0 + */ + { 0x5780, CRL_REG_LEN_08BIT, 0x3e }, + { 0x5781, CRL_REG_LEN_08BIT, 0x0f }, + { 0x5782, CRL_REG_LEN_08BIT, 0x44 }, + { 0x5783, CRL_REG_LEN_08BIT, 0x02 }, + { 0x5784, CRL_REG_LEN_08BIT, 0x01 },/* DPC CTRL04 */ + { 0x5785, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5786, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5787, CRL_REG_LEN_08BIT, 0x04 },/* DPC CTRL07 */ + { 0x5788, CRL_REG_LEN_08BIT, 0x02 },/* DPC CTRL08 */ + { 0x5789, CRL_REG_LEN_08BIT, 0x0f }, + { 0x578A, CRL_REG_LEN_08BIT, 0xfd },/* DPC CTRL0A */ + { 0x578B, CRL_REG_LEN_08BIT, 0xf5 },/* DPC CTRL0B */ + { 0x578C, CRL_REG_LEN_08BIT, 0xf5 },/* DPC CTRL0C */ + { 0x578D, CRL_REG_LEN_08BIT, 0x03 },/* DPC CTRL0D */ + { 0x578E, CRL_REG_LEN_08BIT, 0x08 },/* DPC CTRL0E */ + { 0x578F, CRL_REG_LEN_08BIT, 0x0c },/* DPC CTRL0F */ + { 0x5790, CRL_REG_LEN_08BIT, 0x08 },/* DPC CTRL10 */ + { 0x5791, CRL_REG_LEN_08BIT, 0x04 }, + { 0x5792, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5793, CRL_REG_LEN_08BIT, 0x52 }, + { 0x5794, CRL_REG_LEN_08BIT, 0xa3 }, + { 0x586E, CRL_REG_LEN_08BIT, 0x10 }, + { 0x586F, CRL_REG_LEN_08BIT, 0x08 }, + { 0x58F8, CRL_REG_LEN_08BIT, 0x3d }, + { 0x5871, CRL_REG_LEN_08BIT, 0x0d }, + { 0x5870, CRL_REG_LEN_08BIT, 0x18 }, + { 0x5901, CRL_REG_LEN_08BIT, 0x00 },/* VAP CTRL01 = default */ + { 0x5B00, CRL_REG_LEN_08BIT, 0x02 },/* OTP CTRL00 */ + { 0x5B01, CRL_REG_LEN_08BIT, 0x10 },/* OTP CTRL01 */ + { 0x5B02, CRL_REG_LEN_08BIT, 0x03 },/* OTP CTRL02 */ + { 0x5B03, CRL_REG_LEN_08BIT, 0xCF },/* OTP CTRL03 */ + { 0x5B05, CRL_REG_LEN_08BIT, 0x6C },/* OTP CTRL05 = default */ + { 0x5E00, CRL_REG_LEN_08BIT, 0x00 },/* PRE CTRL00 = default */ + { 0x5E01, CRL_REG_LEN_08BIT, 0x41 },/* PRE_CTRL01 = default */ + { 0x4825, CRL_REG_LEN_08BIT, 0x3a }, + { 0x4826, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4808, CRL_REG_LEN_08BIT, 0x25 }, + { 0x3763, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3768, CRL_REG_LEN_08BIT, 0xcc }, + { 0x470b, CRL_REG_LEN_08BIT, 0x28 }, + { 0x4202, CRL_REG_LEN_08BIT, 0x00 }, + { 0x400d, CRL_REG_LEN_08BIT, 0x10 }, + { 0x4040, CRL_REG_LEN_08BIT, 0x07 }, + { 0x403e, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4041, CRL_REG_LEN_08BIT, 0xc6 }, + { 0x400a, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_register_write_rep ov8858_mode_8m_native[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 2448 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xc0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x90 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_6m_native[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3264 x 1836 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xc0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x07 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x2c },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_8m_full[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x20 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3280 x 2464 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xD0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x09 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xA0 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_6m_full[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x0C },/* h_output_size high 3280 x 1852 */ + { 0x3809, CRL_REG_LEN_08BIT, 0xD0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x07 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x3c },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x0C }, + { 0x4023, CRL_REG_LEN_08BIT, 0x60 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x01 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x97 }, +}; + +static struct crl_register_write_rep ov8858_mode_1080[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high*/ + { 0x3809, CRL_REG_LEN_08BIT, 0x80 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x04 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x38 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x20 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xef }, +}; + +static struct crl_register_write_rep ov8858_mode_1920x1440_crop[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high*/ + { 0x3809, CRL_REG_LEN_08BIT, 0x80 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x05 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0xA0 },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x20 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xef }, +}; + +static struct crl_register_write_rep ov8858_mode_1984x1116_crop[] = { + { 0x382d, CRL_REG_LEN_08BIT, 0xa0 }, + { 0x3808, CRL_REG_LEN_08BIT, 0x07 },/* h_output_size high*/ + { 0x3809, CRL_REG_LEN_08BIT, 0xC0 },/* h_output_size low */ + { 0x380A, CRL_REG_LEN_08BIT, 0x04 },/* v_output_size high */ + { 0x380B, CRL_REG_LEN_08BIT, 0x5C },/* v_output_size low */ + { 0x4022, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4023, CRL_REG_LEN_08BIT, 0x20 }, + { 0x4600, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4601, CRL_REG_LEN_08BIT, 0xef }, +}; + +static struct crl_register_write_rep ov8858_streamon_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x01 } +}; + +static struct crl_register_write_rep ov8858_streamoff_regs[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 } +}; + +static struct crl_register_write_rep ov8858_data_fmt_width10[] = { + { 0x3031, CRL_REG_LEN_08BIT, 0x0a } +}; + +static struct crl_arithmetic_ops ov8858_vflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov8858_hflip_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov8858_hblank_ops[] = { + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 1, + }, +}; + +static struct crl_arithmetic_ops ov8858_exposure_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_dynamic_register_access ov8858_v_flip_regs[] = { + { + .address = 0x3820, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov8858_vflip_ops), + .ops = ov8858_vflip_ops, + .mask = 0x2, + }, +}; + +static struct crl_dynamic_register_access ov8858_dig_gain_regs[] = { + { + .address = 0x5032, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x5034, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, + { + .address = 0x5036, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov8858_h_flip_regs[] = { + { + .address = 0x3821, + .len = CRL_REG_LEN_08BIT | CRL_REG_READ_AND_UPDATE, + .ops_items = ARRAY_SIZE(ov8858_hflip_ops), + .ops = ov8858_hflip_ops, + .mask = 0x2, + }, +}; + +struct crl_register_write_rep ov8858_poweroff_regset[] = { + { 0x0103, CRL_REG_LEN_08BIT, 0x01 }, +}; + +static struct crl_dynamic_register_access ov8858_ana_gain_global_regs[] = { + { + .address = 0x3508, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0x7ff, + }, +}; + +static struct crl_dynamic_register_access ov8858_exposure_regs[] = { + { + .address = 0x3500, + .len = CRL_REG_LEN_24BIT, + .ops_items = ARRAY_SIZE(ov8858_exposure_ops), + .ops = ov8858_exposure_ops, + .mask = 0x0ffff0, + }, +}; + +static struct crl_dynamic_register_access ov8858_vblank_regs[] = { + { + .address = 0x380E, + .len = CRL_REG_LEN_16BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xffff, + }, +}; + +static struct crl_dynamic_register_access ov8858_hblank_regs[] = { + { + .address = 0x380C, + .len = CRL_REG_LEN_16BIT, + .ops_items = ARRAY_SIZE(ov8858_hblank_ops), + .ops = ov8858_hblank_ops, + .mask = 0xffff, + }, +}; + +static struct crl_sensor_detect_config ov8858_sensor_detect_regset[] = { + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300C, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov8858_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 360000000, + .bitsperpixel = 10, + .pixel_rate_csi = 180000000, + .pixel_rate_pa = 290133334, + .csi_lanes = 4, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov8858_pll_360mbps), + .pll_regs = ov8858_pll_360mbps, + }, + +}; + +static struct crl_subdev_rect_rep ov8858_8m_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, +}; + +static struct crl_subdev_rect_rep ov8858_6m_rects_native[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 1836, + }, +}; + + +static struct crl_subdev_rect_rep ov8858_8m_rects_full[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, +}; + +static struct crl_subdev_rect_rep ov8858_6m_rects_full[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 2464, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3280, + .in_rect.height = 2464, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3280, + .out_rect.height = 1852, + }, +}; + +static struct crl_subdev_rect_rep ov8858_1080_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1080, + }, +}; + +static struct crl_subdev_rect_rep ov8858_1920x1440_rects_crop[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1920, + .out_rect.height = 1440, + }, +}; + +static struct crl_subdev_rect_rep ov8858_1984x1116_rects_crop[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 3264, + .out_rect.height = 2448, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 3264, + .in_rect.height = 2448, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1984, + .out_rect.height = 1116, + }, +}; + +static struct crl_mode_rep ov8858_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov8858_8m_rects_native), + .sd_rects = ov8858_8m_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3264, + .height = 2448, + .min_llp = 3880, + .min_fll = 2474, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_8m_native), + .mode_regs = ov8858_mode_8m_native, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_6m_rects_native), + .sd_rects = ov8858_6m_rects_native, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3264, + .height = 1836, + .min_llp = 5132, + .min_fll = 1872, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_6m_native), + .mode_regs = ov8858_mode_6m_native, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_8m_rects_full), + .sd_rects = ov8858_8m_rects_full, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 2464, + .min_llp = 3880, + .min_fll = 2474, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_8m_full), + .mode_regs = ov8858_mode_8m_full, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_6m_rects_full), + .sd_rects = ov8858_6m_rects_full, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 3280, + .height = 1852, + .min_llp = 5132, + .min_fll = 1872, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_6m_full), + .mode_regs = ov8858_mode_6m_full, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_1080_rects), + .sd_rects = ov8858_1080_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1080, + .min_llp = 4284, + .min_fll = 1120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_1080), + .mode_regs = ov8858_mode_1080, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_1920x1440_rects_crop), + .sd_rects = ov8858_1920x1440_rects_crop, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1920, + .height = 1440, + .min_llp = 3880, + .min_fll = 1480, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_1920x1440_crop), + .mode_regs = ov8858_mode_1920x1440_crop, + }, + { + .sd_rects_items = ARRAY_SIZE(ov8858_1984x1116_rects_crop), + .sd_rects = ov8858_1984x1116_rects_crop, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1984, + .height = 1116, + .min_llp = 3880, + .min_fll = 1120, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov8858_mode_1984x1116_crop), + .mode_regs = ov8858_mode_1984x1116_crop, + }, + +}; + +static struct crl_sensor_subdev_config ov8858_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "ov8858 scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov8858 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov8858 pixel array", + }, +}; + +static struct crl_sensor_limits ov8858_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 3280, + .y_addr_max = 2464, + .min_frame_length_lines = 160, + .max_frame_length_lines = 65535, + .min_line_length_pixels = 3880, + .max_line_length_pixels = 32752, + .scaler_m_min = 16, + .scaler_m_max = 255, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov8858_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt ov8858_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 1, + .regs = ov8858_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov8858_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov8858_data_fmt_width10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .regs_items = 1, + .bits_per_pixel = 10, + .regs = ov8858_data_fmt_width10, + }, +}; + +static struct crl_v4l2_ctrl ov8858_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_ana_gain_global_regs), + .regs = ov8858_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_exposure_regs), + .regs = ov8858_exposure_regs, + .dep_items = 0, /* FLL is changes automatically */ + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_h_flip_regs), + .regs = ov8858_h_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_v_flip_regs), + .regs = ov8858_v_flip_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame Length Lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 2474, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_vblank_regs), + .regs = ov8858_vblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 1024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 3880, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_hblank_regs), + .regs = ov8858_hblank_regs, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov8858_dig_gain_regs), + .regs = ov8858_dig_gain_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +#define OV8858_OTP_START_ADDR 0x7010 +#define OV8858_OTP_END_ADDR 0x7186 + +#define OV8858_OTP_LEN (OV8858_OTP_END_ADDR - OV8858_OTP_START_ADDR + 1) +#define OV8858_OTP_L_ADDR(x) (x & 0xff) +#define OV8858_OTP_H_ADDR(x) ((x >> 8) & 0xff) + +static struct crl_register_write_rep ov8858_nvm_preop_regset[] = { + /* Start streaming */ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + /* Manual mode, program disable */ + { 0x3D84, CRL_REG_LEN_08BIT, 0xC0 }, + /* Manual OTP start address for access */ + { 0x3D88, CRL_REG_LEN_08BIT, OV8858_OTP_H_ADDR(OV8858_OTP_START_ADDR)}, + { 0x0D89, CRL_REG_LEN_08BIT, OV8858_OTP_L_ADDR(OV8858_OTP_START_ADDR)}, + /* Manual OTP end address for access */ + { 0x3D8A, CRL_REG_LEN_08BIT, OV8858_OTP_H_ADDR(OV8858_OTP_END_ADDR)}, + { 0x3D8B, CRL_REG_LEN_08BIT, OV8858_OTP_L_ADDR(OV8858_OTP_END_ADDR)}, + /* OTP load enable */ + { 0x3D81, CRL_REG_LEN_08BIT, 0x01 }, + /* Wait for the data to load into the buffer */ + { 0x0000, CRL_REG_LEN_DELAY, 0x05 }, +}; + +static struct crl_register_write_rep ov8858_nvm_postop_regset[] = { + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, /* Stop streaming */ +}; + +static struct crl_nvm_blob ov8858_nvm_blobs[] = { + {CRL_I2C_ADDRESS_NO_OVERRIDE, OV8858_OTP_START_ADDR, OV8858_OTP_LEN }, +}; + +static struct crl_arithmetic_ops ov8858_frame_desc_width_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .operand.entity_val = CRL_VAR_REF_OUTPUT_WIDTH, + }, +}; + +static struct crl_arithmetic_ops ov8858_frame_desc_height_ops[] = { + { + .op = CRL_ASSIGNMENT, + .operand.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, + .operand.entity_val = 1, + }, +}; + +static struct crl_frame_desc ov8858_frame_desc[] = { + { + .flags.entity_val = 0, + .bpp.entity_type = CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + .bpp.entity_val = CRL_VAR_REF_BITSPERPIXEL, + .pixelcode.entity_val = MEDIA_BUS_FMT_FIXED, + .length.entity_val = 0, + .start_line.entity_val = 0, + .start_pixel.entity_val = 0, + .width = { + .ops_items = ARRAY_SIZE(ov8858_frame_desc_width_ops), + .ops = ov8858_frame_desc_width_ops, + }, + .height = { + .ops_items = ARRAY_SIZE(ov8858_frame_desc_height_ops), + .ops = ov8858_frame_desc_height_ops, + }, + .csi2_channel.entity_val = 0, + .csi2_data_type.entity_val = 0x12, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov8858_power_items[] = { + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VANA", + .val = 2800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VDIG", + .val = 1200000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_REGULATOR_FRAMEWORK, + .ent_name = "VIO", + .val = 1800000, + .delay = 0, + }, + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 10000, + }, +}; + +static struct crl_sensor_configuration ov8858_crl_configuration = { + + .power_items = ARRAY_SIZE(ov8858_power_items), + .power_entities = ov8858_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov8858_powerup_regset), + .powerup_regs = ov8858_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + + .id_reg_items = ARRAY_SIZE(ov8858_sensor_detect_regset), + .id_regs = ov8858_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov8858_sensor_subdevs), + .subdevs = ov8858_sensor_subdevs, + + .sensor_limits = &ov8858_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov8858_pll_configurations), + .pll_configs = ov8858_pll_configurations, + + .modes_items = ARRAY_SIZE(ov8858_modes), + .modes = ov8858_modes, + + .streamon_regs_items = ARRAY_SIZE(ov8858_streamon_regs), + .streamon_regs = ov8858_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov8858_streamoff_regs), + .streamoff_regs = ov8858_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov8858_v4l2_ctrls), + .v4l2_ctrl_bank = ov8858_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov8858_crl_csi_data_fmt), + .csi_fmts = ov8858_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov8858_flip_configurations), + .flip_data = ov8858_flip_configurations, + + .crl_nvm_info.nvm_flags = CRL_NVM_ADDR_MODE_16BIT, + .crl_nvm_info.nvm_preop_regs_items = + ARRAY_SIZE(ov8858_nvm_preop_regset), + .crl_nvm_info.nvm_preop_regs = ov8858_nvm_preop_regset, + .crl_nvm_info.nvm_postop_regs_items = + ARRAY_SIZE(ov8858_nvm_postop_regset), + .crl_nvm_info.nvm_postop_regs = ov8858_nvm_postop_regset, + .crl_nvm_info.nvm_blobs_items = ARRAY_SIZE(ov8858_nvm_blobs), + .crl_nvm_info.nvm_config = ov8858_nvm_blobs, + + .frame_desc_entries = ARRAY_SIZE(ov8858_frame_desc), + .frame_desc_type = CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, + .frame_desc = ov8858_frame_desc, + + .msr_file_name = "00ov8858.bxt_rvp.drvb", +}; + +#endif /* __CRLMODULE_OV8858_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crl_ov9281_configuration.h b/drivers/media/i2c/crlmodule/crl_ov9281_configuration.h new file mode 100644 index 000000000000..b052d3058431 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_ov9281_configuration.h @@ -0,0 +1,522 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation + * + * Author: Wu Xia + * + */ + +#ifndef __CRLMODULE_OV9281_CONFIGURATION_H_ +#define __CRLMODULE_OV9281_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_register_write_rep ov9281_pll_800mbps[] = { + { 0x0302, CRL_REG_LEN_08BIT, 0x32 }, + { 0x030d, CRL_REG_LEN_08BIT, 0x50 }, + { 0x030e, CRL_REG_LEN_08BIT, 0x02 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_register_write_rep ov9281_powerup_regset[] = { + { 0x4f00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_register_write_rep ov9281_mode_1m[] = { + { 0x3001, CRL_REG_LEN_08BIT, 0x60 }, + { 0x3004, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3005, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3006, CRL_REG_LEN_08BIT, 0x04 }, + + { 0x3011, CRL_REG_LEN_08BIT, 0x0a }, + { 0x3013, CRL_REG_LEN_08BIT, 0x18 }, + { 0x3022, CRL_REG_LEN_08BIT, 0x01 }, + { 0x3030, CRL_REG_LEN_08BIT, 0x10 }, + { 0x3039, CRL_REG_LEN_08BIT, 0x32 }, + { 0x303a, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3503, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3505, CRL_REG_LEN_08BIT, 0x8c }, + { 0x3507, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3508, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3610, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3611, CRL_REG_LEN_08BIT, 0xa0 }, + + { 0x3620, CRL_REG_LEN_08BIT, 0x6f }, + { 0x3632, CRL_REG_LEN_08BIT, 0x56 }, + { 0x3633, CRL_REG_LEN_08BIT, 0x78 }, + { 0x3662, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3666, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x366f, CRL_REG_LEN_08BIT, 0x5a }, + { 0x3680, CRL_REG_LEN_08BIT, 0x84 }, + + { 0x3712, CRL_REG_LEN_08BIT, 0x80 }, + { 0x372d, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3731, CRL_REG_LEN_08BIT, 0x80 }, + { 0x3732, CRL_REG_LEN_08BIT, 0x30 }, + { 0x3778, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x377d, CRL_REG_LEN_08BIT, 0x22 }, + { 0x3788, CRL_REG_LEN_08BIT, 0x02 }, + { 0x3789, CRL_REG_LEN_08BIT, 0xa4 }, + { 0x378a, CRL_REG_LEN_08BIT, 0x00 }, + { 0x378b, CRL_REG_LEN_08BIT, 0x4a }, + { 0x3799, CRL_REG_LEN_08BIT, 0x20 }, + + /* window setting*/ + { 0x3800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3801, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3802, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3803, CRL_REG_LEN_08BIT, 0x08 }, + { 0x3804, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3805, CRL_REG_LEN_08BIT, 0x07 }, + { 0x3806, CRL_REG_LEN_08BIT, 0x03 }, + { 0x3807, CRL_REG_LEN_08BIT, 0x27 }, + + { 0x3808, CRL_REG_LEN_08BIT, 0x05 }, + { 0x3809, CRL_REG_LEN_08BIT, 0x00 }, + { 0x380a, CRL_REG_LEN_08BIT, 0x03 }, + { 0x380b, CRL_REG_LEN_08BIT, 0x20 }, + + { 0x380c, CRL_REG_LEN_08BIT, 0x02 }, + { 0x380d, CRL_REG_LEN_08BIT, 0xd8 }, + { 0x380e, CRL_REG_LEN_08BIT, 0x03 }, + { 0x380f, CRL_REG_LEN_08BIT, 0x8e }, + + { 0x3810, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3811, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x3812, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3813, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3814, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3815, CRL_REG_LEN_08BIT, 0x11 }, + { 0x3820, CRL_REG_LEN_08BIT, 0x40 }, + { 0x3821, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3881, CRL_REG_LEN_08BIT, 0x42 }, + { 0x38b1, CRL_REG_LEN_08BIT, 0x00 }, + { 0x3920, CRL_REG_LEN_08BIT, 0xff }, + { 0x4003, CRL_REG_LEN_08BIT, 0x40 }, + + { 0x4008, CRL_REG_LEN_08BIT, 0x04 }, + { 0x4009, CRL_REG_LEN_08BIT, 0x0b }, + { 0x400c, CRL_REG_LEN_08BIT, 0x00 }, + { 0x400d, CRL_REG_LEN_08BIT, 0x07 }, + { 0x4010, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4043, CRL_REG_LEN_08BIT, 0x40 }, + { 0x4307, CRL_REG_LEN_08BIT, 0x30 }, + + { 0x4317, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4501, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4507, CRL_REG_LEN_08BIT, 0x00 }, + + { 0x4509, CRL_REG_LEN_08BIT, 0x00 }, + { 0x450a, CRL_REG_LEN_08BIT, 0x08 }, + { 0x4601, CRL_REG_LEN_08BIT, 0x04 }, + { 0x470f, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4f07, CRL_REG_LEN_08BIT, 0x00 }, + { 0x4800, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5000, CRL_REG_LEN_08BIT, 0x9f }, + { 0x5001, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5e00, CRL_REG_LEN_08BIT, 0x00 }, + { 0x5d00, CRL_REG_LEN_08BIT, 0x07 }, + { 0x5d01, CRL_REG_LEN_08BIT, 0x00 }, +}; + +static struct crl_register_write_rep ov9281_streamon_regs[] = { + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + { 0x0100, CRL_REG_LEN_08BIT, 0x01 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_register_write_rep ov9281_streamoff_regs[] = { + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ + { 0x0100, CRL_REG_LEN_08BIT, 0x00 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +struct crl_register_write_rep ov9281_poweroff_regset[] = { + { 0x4f00, CRL_REG_LEN_08BIT, 0x01 }, + { 0x00, CRL_REG_LEN_DELAY, 10, 0x00}, /* Add a pre 10ms delay */ +}; + +static struct crl_dynamic_register_access ov9281_ana_gain_global_regs[] = { + { + .address = 0x3509, + .len = CRL_REG_LEN_08BIT, + .ops_items = 0, + .ops = 0, + .mask = 0xff, + }, +}; + +static struct crl_arithmetic_ops ov9281_expol_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, +}; + +static struct crl_arithmetic_ops ov9281_expom_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 8, + }, +}; + +static struct crl_arithmetic_ops ov9281_expoh_ops[] = { + { + .op = CRL_BITWISE_LSHIFT, + .operand.entity_val = 4, + }, + { + .op = CRL_BITWISE_RSHIFT, + .operand.entity_val = 16, + }, +}; + +static struct crl_dynamic_register_access ov9281_exposure_regs[] = { + { + .address = 0x3502, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov9281_expol_ops), + .ops = ov9281_expol_ops, + .mask = 0xff, + }, + { + .address = 0x3501, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov9281_expom_ops), + .ops = ov9281_expom_ops, + .mask = 0xff, + }, + { + .address = 0x3500, + .len = CRL_REG_LEN_08BIT, + .ops_items = ARRAY_SIZE(ov9281_expoh_ops), + .ops = ov9281_expoh_ops, + .mask = 0x0f, + }, +}; + +static struct crl_sensor_detect_config ov9281_sensor_detect_regset[] = { + { + .reg = { 0x300A, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, + { + .reg = { 0x300B, CRL_REG_LEN_08BIT, 0x000000ff }, + .width = 7, + }, +}; + +static struct crl_pll_configuration ov9281_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 80000000, + .pixel_rate_pa = 80000000, + .csi_lanes = 2, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = ARRAY_SIZE(ov9281_pll_800mbps), + .pll_regs = ov9281_pll_800mbps, + }, +}; + +static struct crl_subdev_rect_rep ov9281_1m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .in_rect.left = 0, + .in_rect.top = 0, + .in_rect.width = 1280, + .in_rect.height = 800, + .out_rect.left = 0, + .out_rect.top = 0, + .out_rect.width = 1280, + .out_rect.height = 800, + }, +}; + +static struct crl_mode_rep ov9281_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(ov9281_1m_rects), + .sd_rects = ov9281_1m_rects, + .binn_hor = 1, + .binn_vert = 1, + .scale_m = 1, + .width = 1280, + .height = 800, + .min_llp = 728, + .min_fll = 910, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = ARRAY_SIZE(ov9281_mode_1m), + .mode_regs = ov9281_mode_1m, + }, +}; + +static struct crl_sensor_subdev_config ov9281_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_BINNER, + .name = "ov9281 binner", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "ov9281 pixel array", + }, +}; + +static struct crl_sensor_limits ov9281_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 1280, + .y_addr_max = 800, + .min_frame_length_lines = 910, + .max_frame_length_lines = 910, + .min_line_length_pixels = 728, + .max_line_length_pixels = 728, + .scaler_m_min = 16, + .scaler_m_max = 16, + .scaler_n_min = 16, + .scaler_n_max = 16, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 3, +}; + +static struct crl_flip_data ov9281_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, +}; + +static struct crl_csi_data_fmt ov9281_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + .regs_items = 0, + .regs = 0, + }, +}; + +static struct crl_v4l2_ctrl ov9281_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_BINNER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = INT_MAX, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 0xFF, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov9281_ana_gain_global_regs), + .regs = ov9281_ana_gain_global_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 885, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = ARRAY_SIZE(ov9281_exposure_regs), + .regs = ov9281_exposure_regs, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 910, + .data.std_data.max = 910, + .data.std_data.step = 1, + .data.std_data.def = 910, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 728, + .data.std_data.max = 728, + .data.std_data.step = 1, + .data.std_data.def = 728, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, +}; + +/* Power items, they are enabled in the order they are listed here */ +static struct crl_power_seq_entity ov9281_power_items[] = { + { + .type = CRL_POWER_ETY_CLK_FRAMEWORK, + .val = 24000000, + .delay = 500, + }, + { + .type = CRL_POWER_ETY_GPIO_FROM_PDATA, + .val = 1, + .delay = 5000, + }, +}; + +struct crl_sensor_configuration ov9281_crl_configuration = { + + .power_items = ARRAY_SIZE(ov9281_power_items), + .power_entities = ov9281_power_items, + + .powerup_regs_items = ARRAY_SIZE(ov9281_powerup_regset), + .powerup_regs = ov9281_powerup_regset, + + .poweroff_regs_items = 0, + .poweroff_regs = 0, + + .id_reg_items = ARRAY_SIZE(ov9281_sensor_detect_regset), + .id_regs = ov9281_sensor_detect_regset, + + .subdev_items = ARRAY_SIZE(ov9281_sensor_subdevs), + .subdevs = ov9281_sensor_subdevs, + + .sensor_limits = &ov9281_sensor_limits, + + .pll_config_items = ARRAY_SIZE(ov9281_pll_configurations), + .pll_configs = ov9281_pll_configurations, + + .modes_items = ARRAY_SIZE(ov9281_modes), + .modes = ov9281_modes, + + .streamon_regs_items = ARRAY_SIZE(ov9281_streamon_regs), + .streamon_regs = ov9281_streamon_regs, + + .streamoff_regs_items = ARRAY_SIZE(ov9281_streamoff_regs), + .streamoff_regs = ov9281_streamoff_regs, + + .v4l2_ctrls_items = ARRAY_SIZE(ov9281_v4l2_ctrls), + .v4l2_ctrl_bank = ov9281_v4l2_ctrls, + + .csi_fmts_items = ARRAY_SIZE(ov9281_crl_csi_data_fmt), + .csi_fmts = ov9281_crl_csi_data_fmt, + + .flip_items = ARRAY_SIZE(ov9281_flip_configurations), + .flip_data = ov9281_flip_configurations, +}; + +#endif /* __CRLMODULE_OV9281_CONFIGURATION_H_ */ + + diff --git a/drivers/media/i2c/crlmodule/crl_pixter_stub_configuration.h b/drivers/media/i2c/crlmodule/crl_pixter_stub_configuration.h new file mode 100644 index 000000000000..e6ffbb231d66 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crl_pixter_stub_configuration.h @@ -0,0 +1,710 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Wang, Zaikuo + * + */ + +#ifndef __CRLMODULE_PIXTER_STUB_CONFIGURATION_H_ +#define __CRLMODULE_PIXTER_STUB_CONFIGURATION_H_ + +#include "crlmodule-sensor-ds.h" + +static struct crl_pll_configuration pixter_stub_pll_configurations[] = { + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 8, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 10, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, + { + .input_clk = 24000000, + .op_sys_clk = 400000000, + .bitsperpixel = 12, + .pixel_rate_csi = 800000000, + .pixel_rate_pa = 800000000, + .comp_items = 0, + .ctrl_data = 0, + .pll_regs_items = 0, + .pll_regs = NULL, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_720p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 1280, 720 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_1080p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 1920, 1080 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_1440p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 2560, 1440 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_1836p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 3264, 1836 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_1920p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 2560, 1920 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_2304p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 4096, 2304 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_2448p_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 3264, 2448 }, + }, +}; + +static struct crl_subdev_rect_rep pixter_stub_13m_rects[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 100000, 100000 }, + }, + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .in_rect = { 0, 0, 100000, 100000 }, + .out_rect = { 0, 0, 4096, 3072 }, + }, +}; + +static struct crl_mode_rep pixter_stub_modes[] = { + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_720p_rects), + .sd_rects = pixter_stub_720p_rects, + .scale_m = 78, + .width = 1280, + .height = 720, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1080p_rects), + .sd_rects = pixter_stub_1080p_rects, + .scale_m = 52, + .width = 1920, + .height = 1080, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1440p_rects), + .sd_rects = pixter_stub_1440p_rects, + .scale_m = 39, + .width = 2560, + .height = 1440, + .min_llp = 6024, + .min_fll = 1660, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1836p_rects), + .sd_rects = pixter_stub_1836p_rects, + .scale_m = 30, + .width = 3264, + .height = 1836, + .min_llp = 6024, + .min_fll = 1900, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_1920p_rects), + .sd_rects = pixter_stub_1920p_rects, + .scale_m = 39, + .width = 2560, + .height = 1920, + .min_llp = 6024, + .min_fll = 2000, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_2304p_rects), + .sd_rects = pixter_stub_2304p_rects, + .scale_m = 24, + .width = 4096, + .height = 2304, + .min_llp = 6024, + .min_fll = 2400, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_2448p_rects), + .sd_rects = pixter_stub_2448p_rects, + .scale_m = 30, + .width = 3264, + .height = 2448, + .min_llp = 6024, + .min_fll = 2600, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, + { + .sd_rects_items = ARRAY_SIZE(pixter_stub_13m_rects), + .sd_rects = pixter_stub_13m_rects, + .scale_m = 24, + .width = 4096, + .height = 3072, + .min_llp = 6024, + .min_fll = 3200, + .comp_items = 0, + .ctrl_data = 0, + .mode_regs_items = 0, + .mode_regs = NULL, + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stub scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stub pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_b_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubB scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubB pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_c_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubC scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubC pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_d_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubD scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubD pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_e_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubE scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubE pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_f_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubF scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubF pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_g_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubG scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubG pixel array", + }, +}; + +static struct crl_sensor_subdev_config pixter_stub_h_sensor_subdevs[] = { + { + .subdev_type = CRL_SUBDEV_TYPE_SCALER, + .name = "pixter_stubH scaler", + }, + { + .subdev_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .name = "pixter_stubH pixel array", + }, +}; + +static struct crl_sensor_limits pixter_stub_sensor_limits = { + .x_addr_min = 0, + .y_addr_min = 0, + .x_addr_max = 100000, + .y_addr_max = 100000, + .min_frame_length_lines = 160, + .max_frame_length_lines = 100000, + .min_line_length_pixels = 6024, + .max_line_length_pixels = 100000, + .scaler_m_min = 1, + .scaler_m_max = 1, + .scaler_n_min = 1, + .scaler_n_max = 1, + .min_even_inc = 1, + .max_even_inc = 1, + .min_odd_inc = 1, + .max_odd_inc = 1, +}; + +static struct crl_flip_data pixter_stub_flip_configurations[] = { + { + .flip = CRL_FLIP_DEFAULT_NONE, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + }, + { + .flip = CRL_FLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + }, + { + .flip = CRL_FLIP_HFLIP, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + }, + { + .flip = CRL_FLIP_HFLIP_VFLIP, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + }, +}; + +static struct crl_csi_data_fmt pixter_stub_crl_csi_data_fmt[] = { + { + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 8, + }, + { + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_RGB565_1X16, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 16, + }, + { + .code = MEDIA_BUS_FMT_RGB888_1X24, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 24, + }, + { + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 10, + }, + { + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GRBG, + .bits_per_pixel = 12, + }, + { + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .pixel_order = CRL_PIXEL_ORDER_RGGB, + .bits_per_pixel = 12, + }, + { + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .pixel_order = CRL_PIXEL_ORDER_BGGR, + .bits_per_pixel = 12, + }, + { + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .pixel_order = CRL_PIXEL_ORDER_GBRG, + .bits_per_pixel = 12, + }, +}; + +static struct crl_v4l2_ctrl pixter_stub_v4l2_ctrls[] = { + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_IDLE, + .ctrl_id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = CRL_V4L2_CTRL_TYPE_MENU_INT, + .data.v4l2_int_menu.def = 0, + .data.v4l2_int_menu.max = 0, + .data.v4l2_int_menu.menu = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_PA", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 800000000, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_SCALER, + .op_type = CRL_V4L2_CTRL_GET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_PIXEL_RATE, + .name = "V4L2_CID_PIXEL_RATE_CSI", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 800000000, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_ANALOGUE_GAIN, + .name = "V4L2_CID_ANALOGUE_GAIN", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4096, + .data.std_data.step = 1, + .data.std_data.def = 128, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_EXPOSURE, + .name = "V4L2_CID_EXPOSURE", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 65500, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_HFLIP, + .name = "V4L2_CID_HFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_VFLIP, + .name = "V4L2_CID_VFLIP", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 1, + .data.std_data.step = 1, + .data.std_data.def = 0, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_FRAME_LENGTH_LINES, + .name = "Frame length lines", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 160, + .data.std_data.max = 65535, + .data.std_data.step = 1, + .data.std_data.def = 4130, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_LINE_LENGTH_PIXELS, + .name = "Line Length Pixels", + .type = CRL_V4L2_CTRL_TYPE_CUSTOM, + .data.std_data.min = 6024, + .data.std_data.max = 65520, + .data.std_data.step = 1, + .data.std_data.def = 6024, + .flags = V4L2_CTRL_FLAG_UPDATE, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + .v4l2_type = V4L2_CTRL_TYPE_INTEGER, + }, + { + .sd_type = CRL_SUBDEV_TYPE_PIXEL_ARRAY, + .op_type = CRL_V4L2_CTRL_SET_OP, + .context = SENSOR_POWERED_ON, + .ctrl_id = V4L2_CID_GAIN, + .name = "Digital Gain", + .type = CRL_V4L2_CTRL_TYPE_INTEGER, + .data.std_data.min = 0, + .data.std_data.max = 4095, + .data.std_data.step = 1, + .data.std_data.def = 1024, + .flags = 0, + .impact = CRL_IMPACTS_NO_IMPACT, + .ctrl = 0, + .regs_items = 0, + .regs = 0, + .dep_items = 0, + .dep_ctrls = 0, + }, +}; + +#define DEFINE_PIXTER_CRL_CONFIGURATION(port) \ +static struct crl_sensor_configuration pixter_##port##_crl_configuration = { \ + .powerup_regs_items = 0, \ + .powerup_regs = NULL, \ +\ + .poweroff_regs_items = 0, \ + .poweroff_regs = NULL, \ +\ + .id_reg_items = 0, \ + .id_regs = NULL, \ +\ + .subdev_items = ARRAY_SIZE(pixter_##port##_sensor_subdevs), \ + .subdevs = pixter_##port##_sensor_subdevs, \ +\ + .sensor_limits = &pixter_stub_sensor_limits, \ +\ + .pll_config_items = ARRAY_SIZE(pixter_stub_pll_configurations), \ + .pll_configs = pixter_stub_pll_configurations, \ +\ + .modes_items = ARRAY_SIZE(pixter_stub_modes), \ + .modes = pixter_stub_modes, \ +\ + .streamon_regs_items = 0, \ + .streamon_regs = NULL, \ +\ + .streamoff_regs_items = 0, \ + .streamoff_regs = NULL, \ +\ + .v4l2_ctrls_items = ARRAY_SIZE(pixter_stub_v4l2_ctrls), \ + .v4l2_ctrl_bank = pixter_stub_v4l2_ctrls, \ +\ + .flip_items = ARRAY_SIZE(pixter_stub_flip_configurations), \ + .flip_data = pixter_stub_flip_configurations, \ +\ + .csi_fmts_items = ARRAY_SIZE(pixter_stub_crl_csi_data_fmt), \ + .csi_fmts = pixter_stub_crl_csi_data_fmt, \ +} +DEFINE_PIXTER_CRL_CONFIGURATION(stub); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_b); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_c); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_d); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_e); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_f); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_g); +DEFINE_PIXTER_CRL_CONFIGURATION(stub_h); + + +#endif /* __CRLMODULE_PIXTER_STUB_CONFIGURATION_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-core.c b/drivers/media/i2c/crlmodule/crlmodule-core.c new file mode 100755 index 000000000000..a4cd4bc4033f --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-core.c @@ -0,0 +1,3527 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" +#include "crlmodule-msrlist.h" + +#ifdef CONFIG_INTEL_IPU4_OV13858 +bool vcm_in_use; +EXPORT_SYMBOL(vcm_in_use); +void crlmodule_vcm_gpio_set_value(unsigned int gpio, int value) +{ + gpio_set_value(gpio, value); +} +EXPORT_SYMBOL(crlmodule_vcm_gpio_set_value); +#endif + +static void crlmodule_update_current_mode(struct crl_sensor *sensor); + +static int __crlmodule_get_variable_ref(struct crl_sensor *sensor, + enum crl_member_data_reference_ids ref, + u32 *val) +{ + switch (ref) { + case CRL_VAR_REF_OUTPUT_WIDTH: + *val = sensor->src->crop[CRL_PAD_SRC].width; + break; + case CRL_VAR_REF_OUTPUT_HEIGHT: + *val = sensor->src->crop[CRL_PAD_SRC].height; + break; + case CRL_VAR_REF_BITSPERPIXEL: + *val = sensor->sensor_ds->csi_fmts[ + sensor->fmt_index].bits_per_pixel; + break; + default: + return -EINVAL; + }; + + return 0; +} + +/* + * Get the data format index from the configuration definition data + */ +static int __crlmodule_get_data_fmt_index(struct crl_sensor *sensor, + u32 code) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + if (sensor->sensor_ds->csi_fmts[i].code == code) + return i; + } + + return -EINVAL; +} + +/* + * Find the index of the v4l2 ctrl pointer from the array of v4l2 ctrls + * maintained by the CRL module based on the ctrl id. + */ +static int __crlmodule_get_crl_ctrl_index(struct crl_sensor *sensor, + u32 id, unsigned int *index) +{ + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) + if (sensor->v4l2_ctrl_bank[i].ctrl_id == id) + break; + + if (i >= sensor->sensor_ds->v4l2_ctrls_items) + return -EINVAL; + + *index = i; + return 0; +} + +/* + * Finds the value of a specific v4l2 ctrl based on the ctrl-id + */ +static int __crlmodule_get_ctrl_value(struct crl_sensor *sensor, + u32 id, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct v4l2_ctrl *ctrl; + unsigned int i; + int ret; + + ret = __crlmodule_get_crl_ctrl_index(sensor, id, &i); + if (ret) + return ret; + + /* If no corresponding v4l2 ctrl created, return */ + if (!sensor->v4l2_ctrl_bank[i].ctrl) { + dev_dbg(&client->dev, + "%s ctrl_id: 0x%x desc: %s not ready\n", __func__, id, + sensor->v4l2_ctrl_bank[i].name); + return -ENODATA; + } + + ctrl = sensor->v4l2_ctrl_bank[i].ctrl; + switch (sensor->v4l2_ctrl_bank[i].type) { + case CRL_V4L2_CTRL_TYPE_MENU_INT: + *val = ctrl->qmenu_int[ctrl->val]; + break; + case CRL_V4L2_CTRL_TYPE_INTEGER: + default: + *val = ctrl->val; + } + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x desc: %s val: %d\n", + __func__, id, + sensor->v4l2_ctrl_bank[i].name, *val); + return 0; +} + +/* + * Finds the v4l2 ctrl based on the control id + */ +static struct v4l2_ctrl *__crlmodule_get_v4l2_ctrl(struct crl_sensor *sensor, + u32 id) +{ + unsigned int i; + + if (__crlmodule_get_crl_ctrl_index(sensor, id, &i)) + return NULL; + + return sensor->v4l2_ctrl_bank[i].ctrl; +} + +/* + * Grab / Release controls based on the ctrl update context + */ +static void __crlmodule_grab_v4l2_ctrl(struct crl_sensor *sensor, + enum crl_v4l2ctrl_update_context ctxt, + bool action) +{ + struct crl_v4l2_ctrl *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + + if (crl_ctrl->context == ctxt) + v4l2_ctrl_grab(crl_ctrl->ctrl, action); + } +} + +/* + * Checks if the v4l2 ctrl sepecific data is satisfied in the mode and PLL + * selection logic. + */ +static bool __crlmodule_compare_ctrl_specific_data( + struct crl_sensor *sensor, + unsigned int items, + struct crl_ctrl_data_pair *ctrl_val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i; + u32 val; + int ret; + + /* Go through all the controls associated with this config */ + for (i = 0; i < items; i++) { + /* Get the value set for the control */ + ret = __crlmodule_get_ctrl_value(sensor, ctrl_val[i].ctrl_id, + &val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, ctrl_val[i].ctrl_id); + return false; + } + + /* Compare the value from the sensor definition file config */ + if (val != ctrl_val[i].data) { + dev_dbg(&client->dev, + "%s ctrl_id: 0x%x value not match %d != %d\n", + __func__, ctrl_val[i].ctrl_id, val, + ctrl_val[i].data); + return false; + } + } + + dev_dbg(&client->dev, "%s success\n", __func__); + return true; +} + +/* + * Finds the correct PLL settings index based on the parameters + */ +static int __crlmodule_update_pll_index(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_pll_configuration *pll_config; + const struct crl_csi_data_fmt *fmts = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + u32 link_freq; + unsigned int i; + + link_freq = sensor->link_freq->qmenu_int[sensor->link_freq->val]; + + dev_dbg(&client->dev, "%s PLL Items: %d link_freq: %d\n", __func__, + sensor->sensor_ds->pll_config_items, link_freq); + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + pll_config = &sensor->sensor_ds->pll_configs[i]; + + if (pll_config->op_sys_clk != link_freq) + continue; + + if (pll_config->input_clk != sensor->platform_data->ext_clk) + continue; + + /* if pll_config->csi_lanes == 0, lanes do not matter */ + if (pll_config->csi_lanes) + if (sensor->platform_data->lanes != + pll_config->csi_lanes) + continue; + + /* PLL config must match to bpps*/ + if (fmts->bits_per_pixel != pll_config->bitsperpixel) + continue; + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_pll_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + pll_config->comp_items, + pll_config->ctrl_data)) + continue; + + /* Found PLL index */ + dev_dbg(&client->dev, "%s Found PLL index: %d for freq: %d\n", + __func__, i, link_freq); + + sensor->pll_index = i; + + /* Update the control values for pixelrate_pa and csi */ + __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_pa, + pll_config->pixel_rate_pa); + __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate_csi, + pll_config->pixel_rate_csi); + return 0; + } + + dev_err(&client->dev, "%s no configuration found for freq: %d\n", + __func__, link_freq); + return -EINVAL; +} + +/* + * Perform the action for the dependency control + */ +static void __crlmodule_dep_ctrl_perform_action( + struct crl_sensor *sensor, + struct crl_dep_ctrl_provision *prov, + u32 *val, u32 *dep_val) +{ + enum crl_dep_ctrl_condition cond; + unsigned int i; + u32 temp; + + if (*val > *dep_val) + cond = CRL_DEP_CTRL_CONDITION_GREATER; + else if (*val < *dep_val) + cond = CRL_DEP_CTRL_CONDITION_LESSER; + else + cond = CRL_DEP_CTRL_CONDITION_EQUAL; + + for (i = 0; i < prov->action_items; i++) { + if (prov->action[i].cond == cond) + break; + } + + /* No handler found-. Return completed */ + if (i >= prov->action_items) + return; + + /* if this is dependency control, switch val and dep val */ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + switch (prov->action[i].action) { + case CRL_DEP_CTRL_CONDITION_ADD: + *val = *dep_val + prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_SUBTRACT: + *val = *dep_val - prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_MULTIPLY: + *val = *dep_val * prov->action[i].action_value; + break; + case CRL_DEP_CTRL_CONDITION_DIVIDE: + *val = *dep_val / prov->action[i].action_value; + break; + } + + /* if this is dependency control, switch val and dep val back*/ + if (prov->action_type == CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + temp = *val; + *val = *dep_val; + *dep_val = temp; + } + + return; +} + +/* + * Parse the dynamic entity based on the Operand type + */ +static int __crlmodule_parse_dynamic_entity(struct crl_sensor *sensor, + struct crl_dynamic_entity entity, + u32 *val) +{ + switch (entity.entity_type) { + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST: + *val = entity.entity_val; + return 0; + case CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF: + return __crlmodule_get_variable_ref(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL: + return __crlmodule_get_ctrl_value(sensor, + entity.entity_val, val); + case CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL: { + struct crl_register_read_rep reg; + + /* Note: Only 8bit registers are supported. */ + reg.address = entity.entity_val; + reg.len = CRL_REG_LEN_08BIT; + reg.mask = 0xff; + reg.dev_i2c_addr = CRL_I2C_ADDRESS_NO_OVERRIDE; + return crlmodule_read_reg(sensor, reg, val); + } + default: + break; + }; + + return -EINVAL; +} + +static int __crlmodule_calc_dynamic_entity_values( + struct crl_sensor *sensor, + unsigned int ops_items, + struct crl_arithmetic_ops *ops_arr, + unsigned int *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i; + + /* perform the bitwise operation on val one by one */ + for (i = 0; i < ops_items; i++) { + struct crl_arithmetic_ops *ops = &ops_arr[i]; + u32 operand; + int ret = __crlmodule_parse_dynamic_entity(sensor, ops->operand, + &operand); + if (ret) { + dev_dbg(&client->dev, + "%s failed to parse dynamic entity: %d %d\n", + __func__, ops->operand.entity_type, + ops->operand.entity_val); + return ret; + } + + switch (ops->op) { + case CRL_BITWISE_AND: + *val &= operand; + break; + case CRL_BITWISE_OR: + *val |= operand; + break; + case CRL_BITWISE_LSHIFT: + *val <<= operand; + break; + case CRL_BITWISE_RSHIFT: + *val >>= operand; + break; + case CRL_BITWISE_XOR: + *val ^= operand; + break; + case CRL_BITWISE_COMPLEMENT: + *val = ~(*val); + break; + case CRL_ADD: + *val += operand; + break; + case CRL_SUBTRACT: + *val = *val > operand ? *val - operand : operand - *val; + break; + case CRL_MULTIPLY: + *val *= operand; + break; + case CRL_DIV: + *val /= operand; + break; + case CRL_ASSIGNMENT: + *val = operand; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +/* + * Dynamic registers' value is not direct but depends on a referrence value. + * This kind of registers are mainly used in crlmodule's v4l2 ctrl logic. + * + * This is to handle cases like the below examples, where mutliple registers + * need to be modified based on the input value "val" + * R3000 = val & 0xff and R3001 = val >> 8 & 0xff and R3002 = val >> 16 & 0xff + * R4001 = val and R4002 = val or + * R2800 = FLL - val and R2802 = LLP - val + */ +static int __crlmodule_parse_and_write_dynamic_reg(struct crl_sensor *sensor, + struct crl_dynamic_register_access *reg, + unsigned int val) +{ + int ret; + + /* + * Get the value associated with the dynamic entity. "val" might + * change after this call based on the arithmetic operations added for + * this group + */ + ret = __crlmodule_calc_dynamic_entity_values(sensor, reg->ops_items, + reg->ops, &val); + if (ret) + return ret; + + /* Now ready to write the value */ + return crlmodule_write_reg(sensor, reg->dev_i2c_addr, reg->address, + reg->len, reg->mask, val); +} + +static int __crlmodule_update_dynamic_regs(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + unsigned int val) +{ + unsigned int i; + int ret; + + for (i = 0; i < crl_ctrl->regs_items; i++) { + /* + * Each register group must start from the initial value, not + * as a continuation of the previous calculations. The sensor + * configurations must take care of this restriction. + */ + ret = __crlmodule_parse_and_write_dynamic_reg(sensor, + &crl_ctrl->regs[i], val); + if (ret) + return ret; + } + + return 0; +} + +/* + * Perform the action for the dependent register lists + */ +static int __crlmodule_handle_dependency_regs( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + unsigned int val) +{ + unsigned int i; + int ret; + + for (i = 0; i < crl_ctrl->crl_ctrl_dep_reg_list; i++) { + struct crl_dep_reg_list *list = &crl_ctrl->dep_regs[i]; + enum crl_dep_ctrl_condition condition; + unsigned int j; + u32 dep_val; + + /* Parse the condition value */ + ret = __crlmodule_parse_dynamic_entity(sensor, list->cond_value, + &dep_val); + if (ret) + return ret; + + /* Get the kind of condition for this value */ + if (val > dep_val) + condition = CRL_DEP_CTRL_CONDITION_GREATER; + else if (val < dep_val) + condition = CRL_DEP_CTRL_CONDITION_LESSER; + else + condition = CRL_DEP_CTRL_CONDITION_EQUAL; + + /* + * Compare the register list specific condition and if matching + * write the corresponding register lists to the sensor. + */ + if (condition == list->reg_cond) { + /* Handle the direct registers if any */ + if (list->no_direct_regs && list->direct_regs) { + ret = crlmodule_write_regs(sensor, + list->direct_regs, list->no_direct_regs); + if (ret) + return ret; + } + + /* Handle the dynamic registers if any */ + for (j = 0; j < list->no_dyn_items; j++) { + ret = __crlmodule_parse_and_write_dynamic_reg( + sensor, &list->dyn_regs[j], val); + if (ret) + return ret; + } + break; + } + } + + return 0; +} + +/* + * Handles the dependency control actions. Dependency control is a control + * which' value depends on the current control. This information is encoded in + * the sensor configuration file. + */ +static int __crlmodule_handle_dependency_ctrl( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + unsigned int *val, + enum crl_dep_ctrl_action_type type) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_v4l2_ctrl *dep_crl_ctrl; + struct crl_dep_ctrl_provision *dep_prov; + unsigned int i, idx; + u32 dep_val; + int ret; + + dev_dbg(&client->dev, "%s ctrl_id: 0x%x dependency controls: %d\n", + __func__, crl_ctrl->ctrl_id, + crl_ctrl->dep_items); + + for (i = 0; i < crl_ctrl->dep_items; i++) { + dep_prov = &crl_ctrl->dep_ctrls[i]; + + /* If not the type, continue */ + if (dep_prov->action_type != type) + continue; + + /* Get the value from the dependency ctrl */ + ret = __crlmodule_get_ctrl_value(sensor, dep_prov->ctrl_id, + &dep_val); + if (ret) { + dev_err(&client->dev, "%s ctrl_id: 0x%x not found\n", + __func__, dep_prov->ctrl_id); + /* TODO! Shoud continue? */ + continue; + } + + /* Perform the action */ + __crlmodule_dep_ctrl_perform_action(sensor, dep_prov, val, + &dep_val); + + /* if this is dependency control, update the register */ + if (dep_prov->action_type == + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL) { + ret = __crlmodule_get_crl_ctrl_index(sensor, + dep_prov->ctrl_id, &idx); + if (ret) + continue; + + dep_crl_ctrl = &sensor->v4l2_ctrl_bank[idx]; + + /* Update the dynamic registers for the dep control */ + ret = __crlmodule_update_dynamic_regs(sensor, + dep_crl_ctrl, dep_val); + if (ret) + dev_info(&client->dev, + "%s dynamic reg update failed for %s\n", + __func__, dep_crl_ctrl->name); + + /* Handle dependened register lists for dep control */ + ret = __crlmodule_handle_dependency_regs(sensor, + dep_crl_ctrl, dep_val); + if (ret) + dev_info(&client->dev, + "%s handle dep regs failed for %s\n", + __func__, dep_crl_ctrl->name); + } + } + + return 0; +} + +static int crlmodule_get_fmt_index(struct crl_sensor *sensor, + u8 pixel_order, u8 bpp) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_csi_data_fmt *f; + int i; + + /* + * Go through the fmt list and check if this format with matching bpp + * is supported by this module definition file + */ + for (i = 0; i < sensor->sensor_ds->csi_fmts_items; i++) { + f = &sensor->sensor_ds->csi_fmts[i]; + + if (f->pixel_order == pixel_order && f->bits_per_pixel == bpp) + return i; + } + + dev_err(&client->dev, "%s no supported format for order: %d bpp: %d\n", + __func__, pixel_order, bpp); + + return -EINVAL; +} + +static int __crlmodule_update_flip_info(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + struct v4l2_ctrl *ctrl) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_csi_data_fmt *fmt = + &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + u8 bpp = fmt->bits_per_pixel; + u8 flip_info = sensor->flip_info; + u8 new_order; + int i, ret; + + dev_dbg(&client->dev, "%s current flip_info: %d curr index: %d\n", + __func__, flip_info, sensor->fmt_index); + + switch (ctrl->id) { + case V4L2_CID_HFLIP: + flip_info &= CRL_FLIP_HFLIP_MASK; + flip_info |= ctrl->val > 0 ? CRL_FLIP_HFLIP : 0; + break; + case V4L2_CID_VFLIP: + flip_info &= CRL_FLIP_VFLIP_MASK; + flip_info |= ctrl->val > 0 ? CRL_FLIP_VFLIP : 0; + break; + } + + dev_dbg(&client->dev, "%s flip success new flip_info: %d\n", + __func__, flip_info); + + /* First check if the module actually supports any pixelorder changes */ + for (i = 0; i < sensor->sensor_ds->flip_items; i++) { + if (flip_info == sensor->sensor_ds->flip_data[i].flip) { + new_order = sensor->sensor_ds->flip_data[i].pixel_order; + break; + } + } + + if (i >= sensor->sensor_ds->flip_items) { + dev_err(&client->dev, "%s flip not supported %d\n", + __func__, flip_info); + return -EINVAL; + } + + /* Skip format re-selection if pixel order is unrelated to flipping. */ + if (new_order == CRL_PIXEL_ORDER_IGNORE) + return 0; + + /* + * Flip changes only pixel order. So check if the supported format list + * has any format with new pixel order and current bits per pixel + */ + i = crlmodule_get_fmt_index(sensor, new_order, bpp); + if (i < 0) { + dev_err(&client->dev, "%s no format found order: %d bpp: %d\n", + __func__, new_order, bpp); + return -EINVAL; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, ctrl->val); + if (ret) { + dev_err(&client->dev, "%s register access failed\n", __func__); + return ret; + } + + /* New format found. Update info */ + sensor->fmt_index = i; + sensor->flip_info = flip_info; + + dev_dbg(&client->dev, "%s flip success flip: %d new fmt index: %d\n", + __func__, flip_info, i); + + return 0; +} +static int __crlmodule_update_framesize(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + struct v4l2_ctrl *ctrl) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_mode_rep *mode = sensor->current_mode; + unsigned int val; + int ret; + + switch (ctrl->id) { + case V4L2_CID_FRAME_LENGTH_LINES: + val = max(ctrl->val, mode->min_fll); + break; + case V4L2_CID_LINE_LENGTH_PIXELS: + val = max(ctrl->val, mode->min_llp); + break; + default: + return -EINVAL; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); + if (ret) + return ret; + + ctrl->val = val; + ctrl->cur.val = val; + dev_dbg(&client->dev, "%s: set v4l2 id:0x%0x value %d\n", + __func__, ctrl->id, val); + + return 0; +} +static int __crlmodule_update_blanking(struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl, + struct v4l2_ctrl *ctrl) +{ + unsigned int val; + + switch (ctrl->id) { + case V4L2_CID_HBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width + + ctrl->val; + break; + case V4L2_CID_VBLANK: + val = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height + + ctrl->val; + break; + default: + return -EINVAL; + } + + return __crlmodule_update_dynamic_regs(sensor, crl_ctrl, val); +} + +static void __crlmodule_update_selection_impact_flags( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl) +{ + if (crl_ctrl->impact & CRL_IMPACTS_PLL_SELECTION) + sensor->ext_ctrl_impacts_pll_selection = true; + + if (crl_ctrl->impact & CRL_IMPACTS_MODE_SELECTION) + sensor->ext_ctrl_impacts_mode_selection = true; +} + +static struct crl_v4l2_ctrl *__crlmodule_find_crlctrl( + struct crl_sensor *sensor, + struct v4l2_ctrl *ctrl) +{ + struct crl_v4l2_ctrl *crl_ctrl; + unsigned int i; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + if (crl_ctrl->ctrl == ctrl) + return crl_ctrl; + } + + return NULL; +} + +static int crlmodule_reset_crlctrl_value(struct crl_sensor *sensor, + unsigned int new_mode) +{ + struct crl_v4l2_ctrl *crl_ctrl; + const struct crl_mode_rep *this; + unsigned int i; + + if (!sensor->v4l2_ctrl_bank) + return -EINVAL; + + this = &sensor->sensor_ds->modes[new_mode]; + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + + switch (crl_ctrl->ctrl_id) { + case V4L2_CID_FRAME_LENGTH_LINES: + if (crl_ctrl->ctrl) { + crl_ctrl->ctrl->val = this->min_fll; + crl_ctrl->ctrl->cur.val = this->min_fll; + } + break; + case V4L2_CID_LINE_LENGTH_PIXELS: + if (crl_ctrl->ctrl) { + crl_ctrl->ctrl->val = this->min_llp; + crl_ctrl->ctrl->cur.val = this->min_llp; + } + break; + } + } + + return 0; +} + +static int crlmodule_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct crl_sensor *sensor = container_of(ctrl->handler, + struct crl_subdev, ctrl_handler)->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_v4l2_ctrl *crl_ctrl = NULL; + int ret = 0; + + dev_dbg(&client->dev, "%s id:0x%0x val:%d\n", __func__, ctrl->id, + ctrl->val); + + /* + * Need to find the corresponding crlmodule wrapper for this v4l2_ctrl. + * This is needed because all the register information is associated + * with the crlmodule's wrapper v4l2ctrl. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, ctrl); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, ctrl->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, ctrl->id, + crl_ctrl->name); + + /* Then go through the mandatory controls */ + switch (ctrl->id) { + case V4L2_CID_LINK_FREQ: + /* Go through the supported list and compare the values */ + ret = __crlmodule_update_pll_index(sensor); + goto out; + }; + + /* update the selection impacts flags */ + __crlmodule_update_selection_impact_flags(sensor, crl_ctrl); + + /* + * Dependency control is a control whose value is affected by the value + * for the current control. For example, vblank can be a dependency + * control for exposure. Whenever exposure changes, the sensor can + * automatically adjust the vblank or rely on manual adjustment. In + * case of manual adjustment the sensor configuration file needs to + * specify the dependency control, the condition for an action and + * typs of action. + * + * Now check if there is any dependency controls for this. And if there + * are any we need to split the action to two. First if the current + * control needs to be changed, then do it before updating the register. + * If some other control is affected, then do it after wrriting the + * current values + * + * Now check in the dependency control list, if the action type is + * "self" and update the value accordingly now + */ + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, &ctrl->val, + CRL_DEP_CTRL_ACTION_TYPE_SELF); + + /* Handle specific controls */ + switch (ctrl->id) { + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + ret = __crlmodule_update_flip_info(sensor, crl_ctrl, ctrl); + goto out; + + case V4L2_CID_VBLANK: + case V4L2_CID_HBLANK: + if (sensor->blanking_ctrl_not_use) { + dev_info(&client->dev, "%s Blanking controls are \ + not used in this configuration, setting them has \ + no effect\n", __func__); + /* Disable control*/ + v4l2_ctrl_activate(ctrl, false); + + } else { + ret = __crlmodule_update_blanking(sensor, + crl_ctrl, ctrl); + } + goto out; + + case V4L2_CID_FRAME_LENGTH_LINES: + case V4L2_CID_LINE_LENGTH_PIXELS: + ret = __crlmodule_update_framesize(sensor, crl_ctrl, ctrl); + goto out; + + case CRL_CID_SENSOR_MODE: + /* + * If sensor mode is changed, some v4l2 ctrls value need + * to be reset to default value, or else the value set in + * previous mode will influence the setting in the current + * mode. Especially for llp and fll. + */ + if (sensor->sensor_mode != ctrl->val) + crlmodule_reset_crlctrl_value(sensor, ctrl->val); + + sensor->sensor_mode = ctrl->val; + crlmodule_update_current_mode(sensor); + goto out; + } + + ret = __crlmodule_update_dynamic_regs(sensor, crl_ctrl, ctrl->val); + if (ret) + goto out; + + ret = __crlmodule_handle_dependency_regs(sensor, crl_ctrl, + ctrl->val); + +out: + /* + * Now check in the dependency control list, if the action type is + * "dependency control" and update the value accordingly now + */ + if (!ret && crl_ctrl) + __crlmodule_handle_dependency_ctrl(sensor, crl_ctrl, &ctrl->val, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL); + return ret; +} + +static int crlmodule_get_ctrl(struct v4l2_ctrl *ctrl) +{ + struct crl_sensor *sensor = container_of(ctrl->handler, + struct crl_subdev, ctrl_handler)->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_v4l2_ctrl *crl_ctrl; + struct crl_dynamic_register_access *reg; + + /* + * Need to find the corresponding crlmodule wrapper for this v4l2_ctrl. + * This is needed because all the register information is associated + * with the crlmodule's wrapper v4l2ctrl. + */ + crl_ctrl = __crlmodule_find_crlctrl(sensor, ctrl); + if (!crl_ctrl) { + dev_err(&client->dev, "%s ctrl :0x%x not supported\n", + __func__, ctrl->id); + return -EINVAL; + } + + dev_dbg(&client->dev, "%s id:0x%x name:%s\n", __func__, ctrl->id, + crl_ctrl->name); + + /* cannot handle if the V4L2_CTRL_FLAG_READ_ONLY flag is not set */ + if (!(ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) { + dev_err(&client->dev, "%s Control id:0x%x is not read only\n", + __func__, ctrl->id); + return -EINVAL; + } + + /* + * Found the crl control wrapper. Use the dynamic entity information + * to calculate the value for this control. For get control, there + * could be only one item in the crl_dynamic_register_access. ctrl-> + * regs_items must be 1. Also the crl_dynamic_register_access.address + * and crl_dynamic_register_access.len are not used. + * Instead the values to be found or calculated need to be encoded into + * crl_dynamic_register_access.crl_arithmetic_ops. It has possibility + * to read from registers, existing control values and simple arithmetic + * operations etc. + */ + if (crl_ctrl->regs_items > 1) + dev_warn(&client->dev, + "%s multiple dynamic entities, will skip the rest\n", + __func__); + reg = &crl_ctrl->regs[0]; + + /* Get the value associated with the dynamic entity */ + return __crlmodule_calc_dynamic_entity_values(sensor, reg->ops_items, + reg->ops, &ctrl->val); +} + +static const struct v4l2_ctrl_ops crlmodule_ctrl_ops = { + .s_ctrl = crlmodule_set_ctrl, + .g_volatile_ctrl = crlmodule_get_ctrl, +}; + +static struct v4l2_ctrl_handler *__crlmodule_get_sd_ctrl_handler( + struct crl_sensor *sensor, + enum crl_subdev_type sd_type) +{ + switch (sd_type) { + case CRL_SUBDEV_TYPE_SCALER: + case CRL_SUBDEV_TYPE_BINNER: + return &sensor->src->ctrl_handler; + + case CRL_SUBDEV_TYPE_PIXEL_ARRAY: + if (sensor->pixel_array) + return &sensor->pixel_array->ctrl_handler; + break; + }; + + return NULL; +} + +static int __crlmodule_init_link_freq_ctrl_menu( + struct crl_sensor *sensor, + struct crl_v4l2_ctrl *crl_ctrl) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int items = 0; + unsigned int i; + + /* Cannot handle if the control type is not integer menu */ + if (crl_ctrl->type != CRL_V4L2_CTRL_TYPE_MENU_INT) + return 0; + + /* If the menu contents exist, skip filling it dynamically */ + if (crl_ctrl->data.v4l2_int_menu.menu) + return 0; + + sensor->link_freq_menu = devm_kzalloc(&client->dev, sizeof(s64) * + sensor->sensor_ds->pll_config_items, + GFP_KERNEL); + if (!sensor->link_freq_menu) + return -ENOMEM; + + for (i = 0; i < sensor->sensor_ds->pll_config_items; i++) { + bool dup = false; + unsigned int j; + + /* + * Skip the duplicate entries. We are using the value to match + * not the index + */ + for (j = 0; j < items && !dup; j++) + dup = (sensor->link_freq_menu[j] == + sensor->sensor_ds->pll_configs[i].op_sys_clk); + if (dup) + continue; + + sensor->link_freq_menu[items] = + sensor->sensor_ds->pll_configs[i].op_sys_clk; + items++; + } + + crl_ctrl->data.v4l2_int_menu.menu = sensor->link_freq_menu; + + /* items will not be 0 as there will be atleast one pll_config_item */ + crl_ctrl->data.v4l2_int_menu.max = items - 1; + + return 0; +} + +static int crlmodule_init_controls(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int pa_ctrls = 0; + unsigned int src_ctrls = 0; + struct crl_v4l2_ctrl *crl_ctrl; + struct v4l2_ctrl_handler *ctrl_handler; + struct v4l2_ctrl_config cfg = { 0 }; + unsigned int i; + int rval; + + sensor->v4l2_ctrl_bank = devm_kzalloc(&client->dev, + sizeof(struct crl_v4l2_ctrl) * + sensor->sensor_ds->v4l2_ctrls_items, + GFP_KERNEL); + if (!sensor->v4l2_ctrl_bank) + return -ENOMEM; + + /* Prepare to initialise the v4l2_ctrls from the crl wrapper */ + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + /* + * First copy the v4l2_ctrls to the sensor as there could be + * more than one similar sensors in a product which could share + * the same configuration files + */ + sensor->v4l2_ctrl_bank[i] = + sensor->sensor_ds->v4l2_ctrl_bank[i]; + + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) + pa_ctrls++; + + if (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER) + src_ctrls++; + + /* populate the v4l2_ctrl for the Link_freq dynamically */ + if (crl_ctrl->ctrl_id == V4L2_CID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) { + rval = __crlmodule_init_link_freq_ctrl_menu(sensor, + crl_ctrl); + if (rval) + return rval; + } + } + dev_dbg(&client->dev, "%s pa_ctrls: %d src_ctrls: %d\n", __func__, + pa_ctrls, src_ctrls); + + if (pa_ctrls) { + rval = v4l2_ctrl_handler_init( + &sensor->pixel_array->ctrl_handler, + pa_ctrls); + if (rval) + return rval; + sensor->pixel_array->ctrl_handler.lock = &sensor->mutex; + } + + if (src_ctrls) { + rval = v4l2_ctrl_handler_init( + &sensor->src->ctrl_handler, + src_ctrls); + if (rval) + return rval; + sensor->src->ctrl_handler.lock = &sensor->mutex; + } + + for (i = 0; i < sensor->sensor_ds->v4l2_ctrls_items; i++) { + crl_ctrl = &sensor->v4l2_ctrl_bank[i]; + ctrl_handler = __crlmodule_get_sd_ctrl_handler(sensor, + crl_ctrl->sd_type); + + if (!ctrl_handler) + continue; + + switch (crl_ctrl->type) { + case CRL_V4L2_CTRL_TYPE_MENU_ITEMS: + crl_ctrl->ctrl = v4l2_ctrl_new_std_menu_items( + ctrl_handler, &crlmodule_ctrl_ops, + crl_ctrl->ctrl_id, + crl_ctrl->data.v4l2_menu_items.size, + 0, 0, + crl_ctrl->data.v4l2_menu_items.menu); + break; + case CRL_V4L2_CTRL_TYPE_MENU_INT: + crl_ctrl->ctrl = v4l2_ctrl_new_int_menu(ctrl_handler, + &crlmodule_ctrl_ops, crl_ctrl->ctrl_id, + crl_ctrl->data.v4l2_int_menu.max, + crl_ctrl->data.v4l2_int_menu.def, + crl_ctrl->data.v4l2_int_menu.menu); + break; + case CRL_V4L2_CTRL_TYPE_INTEGER64: + case CRL_V4L2_CTRL_TYPE_INTEGER: + crl_ctrl->ctrl = v4l2_ctrl_new_std(ctrl_handler, + &crlmodule_ctrl_ops, crl_ctrl->ctrl_id, + crl_ctrl->data.std_data.min, + crl_ctrl->data.std_data.max, + crl_ctrl->data.std_data.step, + crl_ctrl->data.std_data.def); + break; + case CRL_V4L2_CTRL_TYPE_CUSTOM: + cfg.ops = &crlmodule_ctrl_ops; + cfg.id = crl_ctrl->ctrl_id; + cfg.name = crl_ctrl->name; + cfg.type = crl_ctrl->v4l2_type; + if ((crl_ctrl->v4l2_type == V4L2_CTRL_TYPE_INTEGER) || + (crl_ctrl->v4l2_type == + V4L2_CTRL_TYPE_INTEGER64)) { + cfg.max = crl_ctrl->data.std_data.max; + cfg.min = crl_ctrl->data.std_data.min; + cfg.step = crl_ctrl->data.std_data.step; + cfg.def = crl_ctrl->data.std_data.def; + cfg.qmenu = 0; + cfg.elem_size = 0; + } else if (crl_ctrl->v4l2_type == V4L2_CTRL_TYPE_MENU) { + cfg.max = crl_ctrl->data.v4l2_menu_items.size + - 1; + cfg.min = 0; + cfg.step = 0; + cfg.def = 0; + cfg.qmenu = crl_ctrl->data.v4l2_menu_items.menu; + cfg.elem_size = 0; + } else { + dev_dbg(&client->dev, + "%s Custom Control: type %d\n", + __func__, crl_ctrl->v4l2_type); + continue; + } + crl_ctrl->ctrl = v4l2_ctrl_new_custom(ctrl_handler, + &cfg, NULL); + break; + case CRL_V4L2_CTRL_TYPE_BOOLEAN: + case CRL_V4L2_CTRL_TYPE_BUTTON: + case CRL_V4L2_CTRL_TYPE_CTRL_CLASS: + default: + dev_err(&client->dev, + "%s Invalid control type\n", __func__); + continue; + break; + } + + if (!crl_ctrl->ctrl) + continue; + /* + * Blanking and framesize controls access to same register, + * Blank controls are disabled if framesize controls exists. + */ + if (crl_ctrl->ctrl_id == V4L2_CID_FRAME_LENGTH_LINES || + crl_ctrl->ctrl_id == V4L2_CID_LINE_LENGTH_PIXELS) + sensor->blanking_ctrl_not_use = 1; + + if (crl_ctrl->ctrl_id == CRL_CID_SENSOR_MODE) + sensor->direct_mode_in_use = 1; + + /* Save mandatory control references - link_freq in src sd */ + if (crl_ctrl->ctrl_id == V4L2_CID_LINK_FREQ && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->link_freq = crl_ctrl->ctrl; + + /* Save mandatory control references - pixel_rate_pa PA sd */ + if (crl_ctrl->ctrl_id == V4L2_CID_PIXEL_RATE && + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_PIXEL_ARRAY) + sensor->pixel_rate_pa = crl_ctrl->ctrl; + + /* Save mandatory control references - pixel_rate_csi src sd */ + if (crl_ctrl->ctrl_id == V4L2_CID_PIXEL_RATE && + (crl_ctrl->sd_type == CRL_SUBDEV_TYPE_SCALER || + crl_ctrl->sd_type == CRL_SUBDEV_TYPE_BINNER)) + sensor->pixel_rate_csi = crl_ctrl->ctrl; + + crl_ctrl->ctrl->flags |= crl_ctrl->flags; + + dev_dbg(&client->dev, + "%s idx: %d ctrl_id: 0x%x ctrl_name: %s\n ctrl: 0x%p", + __func__, i, crl_ctrl->ctrl_id, crl_ctrl->name, + crl_ctrl->ctrl); + + if (ctrl_handler->error) { + dev_err(&client->dev, + "%s controls initialization failed (%d)\n", + __func__, ctrl_handler->error); + rval = ctrl_handler->error; + goto error; + } + } + + sensor->pixel_array->sd.ctrl_handler = + &sensor->pixel_array->ctrl_handler; + + sensor->src->sd.ctrl_handler = &sensor->src->ctrl_handler; + + return 0; + +error: + v4l2_ctrl_handler_free(&sensor->pixel_array->ctrl_handler); + v4l2_ctrl_handler_free(&sensor->src->ctrl_handler); + + return rval; +} + +static bool __crlmodule_rect_matches(struct i2c_client *client, + const struct v4l2_rect *const rect1, + const struct v4l2_rect *const rect2) +{ + dev_dbg(&client->dev, "%s rect1 l:%d t:%d w:%d h:%d\n", __func__, + rect1->left, rect1->top, rect1->width, rect1->height); + dev_dbg(&client->dev, "%s rect2 l:%d t:%d w:%d h:%d\n", __func__, + rect2->left, rect2->top, rect2->width, rect2->height); + + return (rect1->left == rect2->left && + rect1->top == rect2->top && + rect1->width == rect2->width && + rect1->height == rect2->height); +} + +static unsigned int __crlmodule_get_mode_min_llp(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int width = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width; + unsigned int min_llp; + + if (mode->min_llp) + min_llp = mode->min_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + min_llp = limits->min_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + min_llp = width; + + return min_llp; +} + +static unsigned int __crlmodule_get_mode_max_llp(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int max_llp; + + if (mode->max_llp) + max_llp = mode->max_llp; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_llp = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_llp = USHRT_MAX; + + return max_llp; +} + +static unsigned int __crlmodule_get_mode_min_fll(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int height = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height; + unsigned int min_fll; + + if (mode->min_fll) + min_fll = mode->min_fll; /* mode specific limit */ + else if (limits->min_frame_length_lines) + min_fll = limits->min_frame_length_lines; /* sensor limit */ + else /* No restrictions */ + min_fll = height; + + return min_fll; +} + +static unsigned int __crlmodule_get_mode_max_fll(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *mode = sensor->current_mode; + const struct crl_sensor_limits *limits = + sensor->sensor_ds->sensor_limits; + unsigned int max_fll; + + if (mode->max_fll) + max_fll = mode->max_fll; /* mode specific limit */ + else if (limits->min_line_length_pixels) + max_fll = limits->max_line_length_pixels; /* sensor limit */ + else /* No restrictions */ + max_fll = USHRT_MAX; + + return max_fll; +} + +static void crlmodule_update_framesize(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int min_llp, max_llp, min_fll, max_fll; + struct v4l2_ctrl *llength; + struct v4l2_ctrl *flength; + + llength = __crlmodule_get_v4l2_ctrl(sensor, + V4L2_CID_LINE_LENGTH_PIXELS); + flength = __crlmodule_get_v4l2_ctrl(sensor, + V4L2_CID_FRAME_LENGTH_LINES); + + if (llength) { + min_llp = __crlmodule_get_mode_min_llp(sensor); + max_llp = __crlmodule_get_mode_max_llp(sensor); + + llength->minimum = min_llp; + llength->maximum = max_llp; + llength->default_value = llength->minimum; + dev_dbg(&client->dev, "%s llp:%d\n", __func__, llength->val); + } + + if (flength) { + min_fll = __crlmodule_get_mode_min_fll(sensor); + max_fll = __crlmodule_get_mode_max_fll(sensor); + flength->minimum = min_fll; + flength->maximum = max_fll; + flength->default_value = flength->minimum; + dev_dbg(&client->dev, "%s fll:%d\n", __func__, flength->val); + } +} + +static int crlmodule_update_frame_blanking(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int width = sensor->pixel_array->crop[CRL_PA_PAD_SRC].width; + unsigned int height = sensor->pixel_array->crop[CRL_PA_PAD_SRC].height; + unsigned int min_llp, max_llp, min_fll, max_fll; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + + vblank = __crlmodule_get_v4l2_ctrl(sensor, V4L2_CID_VBLANK); + hblank = __crlmodule_get_v4l2_ctrl(sensor, V4L2_CID_HBLANK); + + if (hblank) { + min_llp = __crlmodule_get_mode_min_llp(sensor); + max_llp = __crlmodule_get_mode_max_llp(sensor); + + hblank->minimum = min_llp - width; + hblank->maximum = max_llp - width; + hblank->default_value = hblank->minimum; + dev_dbg(&client->dev, "%s hblank:%d\n", __func__, hblank->val); + } + + if (vblank) { + min_fll = __crlmodule_get_mode_min_fll(sensor); + max_fll = __crlmodule_get_mode_max_fll(sensor); + + vblank->minimum = min_fll - height; + vblank->maximum = max_fll - height; + vblank->default_value = vblank->minimum; + dev_dbg(&client->dev, "%s vblank:%d\n", __func__, vblank->val); + } + + return 0; +} + +static int __crlmodule_rect_index(enum crl_subdev_type type, + const struct crl_mode_rep *mode) +{ + int i; + + for (i = 0; i < mode->sd_rects_items; i++) { + if (type == mode->sd_rects[i].subdev_type) + return i; + } + + return -1; +} + +static void crlmodule_update_mode_bysel(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_mode_rep *this; + unsigned int i; + int rect_index; + + dev_dbg(&client->dev, "%s look for w: %d, h: %d, in [%d] modes\n", + __func__, sensor->src->crop[CRL_PAD_SRC].width, + sensor->src->crop[CRL_PAD_SRC].height, + sensor->sensor_ds->modes_items); + + for (i = 0; i < sensor->sensor_ds->modes_items; i++) { + this = &sensor->sensor_ds->modes[i]; + + dev_dbg(&client->dev, "%s check mode list[%d] w: %d, h: %d\n", + __func__, i, this->width, this->height); + if (this->width != sensor->src->crop[CRL_PAD_SRC].width || + this->height != sensor->src->crop[CRL_PAD_SRC].height) + continue; + + if (sensor->pixel_array) { + dev_dbg(&client->dev, "%s Compare PA out rect\n", + __func__); + rect_index = + __crlmodule_rect_index(CRL_SUBDEV_TYPE_PIXEL_ARRAY, + this); + if (rect_index < 0) + continue; + if (!__crlmodule_rect_matches(client, + &sensor->pixel_array->crop[CRL_PA_PAD_SRC], + &this->sd_rects[rect_index].out_rect)) + continue; + } + if (sensor->binner) { + dev_dbg(&client->dev, "%s binning hor: %d vs. %d\n", + __func__, + sensor->binning_horizontal, + this->binn_hor); + if (sensor->binning_horizontal != this->binn_hor) + continue; + + dev_dbg(&client->dev, "%s binning vert: %d vs. %d\n", + __func__, + sensor->binning_vertical, + this->binn_vert); + if (sensor->binning_vertical != this->binn_vert) + continue; + + dev_dbg(&client->dev, "%s binner in rect\n", __func__); + rect_index = + __crlmodule_rect_index(CRL_SUBDEV_TYPE_BINNER, + this); + if (rect_index < 0) + continue; + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SINK], + &this->sd_rects[rect_index].in_rect)) + continue; + + dev_dbg(&client->dev, "%s binner out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->binner->crop[CRL_PAD_SRC], + &this->sd_rects[rect_index].out_rect)) + continue; + } + + if (sensor->scaler) { + dev_dbg(&client->dev, "%s scaler scale_m %d vs. %d\n", + __func__, sensor->scale_m, + this->scale_m); + if (sensor->scale_m != this->scale_m) + continue; + + rect_index = + __crlmodule_rect_index(CRL_SUBDEV_TYPE_SCALER, + this); + if (rect_index < 0) + continue; + + dev_dbg(&client->dev, "%s scaler in rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SINK], + &this->sd_rects[rect_index].in_rect)) + continue; + + dev_dbg(&client->dev, "%s scaler out rect\n", __func__); + if (!__crlmodule_rect_matches(client, + &sensor->scaler->crop[CRL_PAD_SRC], + &this->sd_rects[rect_index].out_rect)) + continue; + } + + /* Check if there are any dynamic compare items */ + if (sensor->ext_ctrl_impacts_mode_selection && + !__crlmodule_compare_ctrl_specific_data(sensor, + this->comp_items, + this->ctrl_data)) + continue; + + /* Found a perfect match! */ + dev_dbg(&client->dev, "%s found mode. idx: %d\n", __func__, i); + break; + } + + /* If no modes found, fall back to the fail safe mode index */ + if (i >= sensor->sensor_ds->modes_items) { + i = sensor->sensor_ds->fail_safe_mode_index; + this = &sensor->sensor_ds->modes[i]; + dev_dbg(&client->dev, + "%s no matching mode, set to default: %d\n", + __func__, i); + } + + sensor->current_mode = this; +} + +static void crlmodule_update_mode_v4l2ctrl(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_mode_rep *this; + int i; + + dev_dbg(&client->dev, "%s Sensor Mode :%d\n", + __func__, sensor->sensor_mode); + /* point to selected mode */ + this = &sensor->sensor_ds->modes[sensor->sensor_mode]; + sensor->current_mode = this; + + for (i = 0; i < this->sd_rects_items; i++) { + + if (CRL_SUBDEV_TYPE_PIXEL_ARRAY == + this->sd_rects[i].subdev_type) { + sensor->pixel_array->crop[CRL_PA_PAD_SRC] = + this->sd_rects[i].out_rect; + } + + if (CRL_SUBDEV_TYPE_BINNER == + this->sd_rects[i].subdev_type) { + sensor->binner->sink_fmt = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->binner->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->binning_vertical = this->binn_vert; + sensor->binning_horizontal = this->binn_hor; + if (this->binn_vert > 1) + sensor->binner->compose = + this->sd_rects[i].out_rect; + } + + if (CRL_SUBDEV_TYPE_SCALER == + this->sd_rects[i].subdev_type) { + sensor->scaler->crop[CRL_PAD_SINK] = + this->sd_rects[i].in_rect; + sensor->scaler->crop[CRL_PAD_SRC] = + this->sd_rects[i].out_rect; + sensor->scaler->sink_fmt = + this->sd_rects[i].in_rect; + sensor->scale_m = this->scale_m; + if (this->scale_m != 1) + sensor->scaler->compose = + this->sd_rects[i].out_rect; + } + } + + /* Set source */ + sensor->src->crop[CRL_PAD_SRC].width = this->width; + sensor->src->crop[CRL_PAD_SRC].height = this->height; +} + +static void crlmodule_update_current_mode(struct crl_sensor *sensor) +{ + const struct crl_mode_rep *this; + int i; + + if (sensor->direct_mode_in_use) + crlmodule_update_mode_v4l2ctrl(sensor); + else + crlmodule_update_mode_bysel(sensor); + + /* + * We have a valid mode now. If there are any mode specific "get" + * controls defined in the configuration it could be queried by the + * user space for any mode specific information. So go through the + * mode specific v4l2_ctrls and update its value from the selected mode. + */ + + this = sensor->current_mode; + + for (i = 0; i < this->comp_items; i++) { + struct crl_ctrl_data_pair *ctrl_comp = &this->ctrl_data[i]; + unsigned int idx; + + /* Get the v4l2_ctrl pointer corresponding ctrl id */ + if (__crlmodule_get_crl_ctrl_index(sensor, ctrl_comp->ctrl_id, + &idx)) + /* If not found, move to the next ctrl */ + continue; + + /* No need to update this control, if this is a set op ctrl */ + if (sensor->v4l2_ctrl_bank[idx].op_type == CRL_V4L2_CTRL_SET_OP) + continue; + + /* Update the control value */ + __v4l2_ctrl_s_ctrl(sensor->v4l2_ctrl_bank[idx].ctrl, + ctrl_comp->data); + } + + if (sensor->blanking_ctrl_not_use) + crlmodule_update_framesize(sensor); + else + crlmodule_update_frame_blanking(sensor); +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int __crlmodule_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct v4l2_rect *r; + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + fmt->format = *v4l2_subdev_get_try_format(subdev, cfg, + fmt->pad); + return 0; + } + + if (fmt->pad == ssd->source_pad) + r = &ssd->crop[ssd->source_pad]; + else + r = &ssd->sink_fmt; + + fmt->format.width = r->width; + fmt->format.height = r->height; + fmt->format.code = + sensor->sensor_ds->csi_fmts[sensor->fmt_index].code; + fmt->format.field = (ssd->field == V4L2_FIELD_ANY) ? + V4L2_FIELD_NONE : ssd->field; + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_enum_mbus_code(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (code->index >= sensor->sensor_ds->csi_fmts_items) + return -EINVAL; + + code->code = sensor->sensor_ds->csi_fmts[code->index].code; + + return 0; +} + +static int crlmodule_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + if (fse->index >= sensor->sensor_ds->modes_items) + return -EINVAL; + + fse->min_width = sensor->sensor_ds->modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = sensor->sensor_ds->modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + int rval; + + mutex_lock(&sensor->mutex); + rval = __crlmodule_get_format(subdev, cfg, fmt); + mutex_unlock(&sensor->mutex); + + return rval; +} + +static int __crlmodule_sel_supported(struct v4l2_subdev *subdev, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + if (ssd == sensor->pixel_array + && sel->pad == CRL_PA_PAD_SRC) { + switch (sel->target) { + case V4L2_SEL_TGT_NATIVE_SIZE: + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_CROP_BOUNDS: + return 0; + } + } + if (ssd == sensor->binner) { + switch (sel->target) { + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + if (sel->pad == CRL_PAD_SINK) + return 0; + } + } + if (ssd == sensor->scaler) { + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_CROP_BOUNDS: + if (sel->pad == CRL_PAD_SRC) + return 0; + break; + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + if (sel->pad == CRL_PAD_SINK) + return 0; + } + } + return -EINVAL; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_get_crop_compose(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect **crops, + struct v4l2_rect **comps, int which) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + unsigned int i; + + /* Currently we support only 2 pads */ + BUG_ON(subdev->entity.num_pads > CRL_PADS); + + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { + if (crops) + for (i = 0; i < subdev->entity.num_pads; i++) + crops[i] = &ssd->crop[i]; + if (comps) + *comps = &ssd->compose; + } else { + if (crops) { + for (i = 0; i < subdev->entity.num_pads; i++) { + crops[i] = v4l2_subdev_get_try_crop(subdev, + cfg, i); + BUG_ON(!crops[i]); + } + } + if (comps) { + *comps = v4l2_subdev_get_try_compose(subdev, cfg, + CRL_PAD_SINK); + BUG_ON(!*comps); + } + } +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_get_selection(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct v4l2_rect *comp, *crops[CRL_PADS]; + struct v4l2_rect sink_fmt; + int ret; + + ret = __crlmodule_sel_supported(subdev, sel); + if (ret) + return ret; + + crlmodule_get_crop_compose(subdev, cfg, crops, &comp, sel->which); + + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + sink_fmt = ssd->sink_fmt; + } else { + struct v4l2_mbus_framefmt *fmt = + v4l2_subdev_get_try_format(subdev, cfg, ssd->sink_pad); + sink_fmt.left = 0; + sink_fmt.top = 0; + sink_fmt.width = fmt->width; + sink_fmt.height = fmt->height; + } + + switch (sel->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + case V4L2_SEL_TGT_NATIVE_SIZE: + if (ssd == sensor->pixel_array) { + sel->r.left = sel->r.top = 0; + sel->r.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + sel->r.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + } else if (sel->pad == ssd->sink_pad) { + sel->r = sink_fmt; + } else { + sel->r = *comp; + } + break; + case V4L2_SEL_TGT_CROP: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + sel->r = *crops[sel->pad]; + break; + case V4L2_SEL_TGT_COMPOSE: + sel->r = *comp; + break; + } + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static void crlmodule_propagate(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, int which, + int target) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct v4l2_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, cfg, crops, &comp, which); + + switch (target) { + case V4L2_SEL_TGT_CROP: + comp->width = crops[CRL_PAD_SINK]->width; + comp->height = crops[CRL_PAD_SINK]->height; + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { + if (ssd == sensor->scaler) { + sensor->scale_m = 1; + } else if (ssd == sensor->binner) { + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + } + } + /* Fall through */ + case V4L2_SEL_TGT_COMPOSE: + *crops[CRL_PAD_SRC] = *comp; + break; + default: + BUG(); + } +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_compose(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct v4l2_rect *comp, *crops[CRL_PADS]; + + crlmodule_get_crop_compose(subdev, cfg, crops, &comp, sel->which); + + sel->r.top = 0; + sel->r.left = 0; + + if (ssd == sensor->binner) { + sensor->binning_horizontal = crops[CRL_PAD_SINK]->width / + sel->r.width; + sensor->binning_vertical = crops[CRL_PAD_SINK]->height / + sel->r.height; + } else { + sensor->scale_m = crops[CRL_PAD_SINK]->width * + sensor->sensor_ds->sensor_limits->scaler_m_min / + sel->r.width; + } + + *comp = sel->r; + + crlmodule_propagate(subdev, cfg, sel->which, + V4L2_SEL_TGT_COMPOSE); + + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) + crlmodule_update_current_mode(sensor); + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_crop(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct v4l2_rect *src_size, *crops[CRL_PADS]; + struct v4l2_rect _r; + + crlmodule_get_crop_compose(subdev, cfg, crops, NULL, sel->which); + + if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + if (sel->pad == ssd->sink_pad) + src_size = &ssd->sink_fmt; + else + src_size = &ssd->compose; + } else { + if (sel->pad == ssd->sink_pad) { + _r.left = 0; + _r.top = 0; + _r.width = v4l2_subdev_get_try_format(subdev, + cfg, sel->pad) + ->width; + _r.height = v4l2_subdev_get_try_format(subdev, + cfg, sel->pad) + ->height; + src_size = &_r; + } else { + src_size = + v4l2_subdev_get_try_compose(subdev, cfg, + ssd->sink_pad); + } + } + + if (ssd == sensor->src && sel->pad == CRL_PAD_SRC) { + sel->r.left = 0; + sel->r.top = 0; + } + + sel->r.width = min(sel->r.width, src_size->width); + sel->r.height = min(sel->r.height, src_size->height); + + sel->r.left = min_t(s32, sel->r.left, src_size->width - sel->r.width); + sel->r.top = min_t(s32, sel->r.top, src_size->height - sel->r.height); + + *crops[sel->pad] = sel->r; + + if (ssd != sensor->pixel_array && sel->pad == CRL_PAD_SINK) + crlmodule_propagate(subdev, cfg, sel->which, + V4L2_SEL_TGT_CROP); + + /* TODO! Should we short list supported mode? */ + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct v4l2_rect *crops[CRL_PADS]; + + dev_dbg(&client->dev, "%s sd_name: %s pad: %d w: %d, h: %d code: 0x%x", + __func__, ssd->sd.name, fmt->pad, + fmt->format.width, fmt->format.height, + fmt->format.code); + + mutex_lock(&sensor->mutex); + + /* Currently we only support ALTERNATE interlaced mode. */ + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + ssd->field = fmt->format.field; + + if (fmt->pad == ssd->source_pad) { + u32 code = fmt->format.code; + int rval = __crlmodule_get_format(subdev, cfg, fmt); + + if (!rval && subdev == &sensor->src->sd) { + /* Check if this code is supported, if yes get index */ + int idx = __crlmodule_get_data_fmt_index(sensor, code); + + if (idx < 0) { + dev_err(&client->dev, "%s invalid format\n", + __func__); + mutex_unlock(&sensor->mutex); + return -EINVAL; + } + + sensor->fmt_index = idx; + /* TODO! validate PLL? */ + } + mutex_unlock(&sensor->mutex); + return rval; + } + + fmt->format.width = + clamp_t(uint32_t, fmt->format.width, + sensor->sensor_ds->sensor_limits->x_addr_min, + sensor->sensor_ds->sensor_limits->x_addr_max); + fmt->format.height = + clamp_t(uint32_t, fmt->format.height, + sensor->sensor_ds->sensor_limits->y_addr_min, + sensor->sensor_ds->sensor_limits->y_addr_max); + + crlmodule_get_crop_compose(subdev, cfg, crops, NULL, fmt->which); + + crops[ssd->sink_pad]->left = 0; + crops[ssd->sink_pad]->top = 0; + crops[ssd->sink_pad]->width = fmt->format.width; + crops[ssd->sink_pad]->height = fmt->format.height; + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) + ssd->sink_fmt = *crops[ssd->sink_pad]; + + crlmodule_propagate(subdev, cfg, fmt->which, + V4L2_SEL_TGT_CROP); + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + + return 0; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_set_selection(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int ret; + + dev_dbg(&client->dev, "%s sd_name: %s sel w: %d, h: %d target: %d", + __func__, ssd->sd.name, sel->r.width, + sel->r.height, sel->target); + + ret = __crlmodule_sel_supported(subdev, sel); + if (ret) { + dev_dbg(&client->dev, + "%s sd_name: %s w: %d, h: %d target: %d not supported", + __func__, ssd->sd.name, sel->r.width, + sel->r.height, sel->target); + return ret; + } + + mutex_lock(&sensor->mutex); + + sel->r.width = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->x_addr_min, + sel->r.width); + sel->r.height = max_t(unsigned int, + sensor->sensor_ds->sensor_limits->y_addr_min, + sel->r.height); + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + ret = crlmodule_set_crop(subdev, cfg, sel); + break; + case V4L2_SEL_TGT_COMPOSE: + ret = crlmodule_set_compose(subdev, cfg, sel); + break; + default: + ret = -EINVAL; + } + + crlmodule_update_current_mode(sensor); + + mutex_unlock(&sensor->mutex); + return ret; +} + +static int crlmodule_get_skip_frames(struct v4l2_subdev *subdev, u32 *frames) +{ + /* TODO Handle this */ + return 0; +} + +static int crlmodule_start_streaming(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + const struct crl_pll_configuration *pll; + const struct crl_csi_data_fmt *fmt; + int rval; + + dev_dbg(&client->dev, "%s start streaming pll_idx: %d fmt_idx: %d\n", + __func__, sensor->pll_index, + sensor->fmt_index); + + pll = &sensor->sensor_ds->pll_configs[sensor->pll_index]; + fmt = &sensor->sensor_ds->csi_fmts[sensor->fmt_index]; + + crlmodule_update_current_mode(sensor); + + rval = crlmodule_write_regs(sensor, fmt->regs, fmt->regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set format\n", __func__); + return rval; + } + + rval = crlmodule_write_regs(sensor, pll->pll_regs, pll->pll_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return rval; + } + + /* Write mode list */ + rval = crlmodule_write_regs(sensor, + sensor->current_mode->mode_regs, + sensor->current_mode->mode_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return rval; + } + + /* Write stream on list */ + rval = crlmodule_write_regs(sensor, + sensor->sensor_ds->streamon_regs, + sensor->sensor_ds->streamon_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set stream\n", __func__); + return rval; + } + + return 0; +} + +static int crlmodule_stop_streaming(struct crl_sensor *sensor) +{ + return crlmodule_write_regs(sensor, + sensor->sensor_ds->streamoff_regs, + sensor->sensor_ds->streamoff_regs_items); +} + +static int crlmodule_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int rval = 0; + + mutex_lock(&sensor->mutex); + + if (sensor->streaming == enable) + goto out; + + if (enable) { + + if (sensor->msr_list) { + rval = crlmodule_apply_msrlist(client, + sensor->msr_list); + if (rval) + dev_warn(&client->dev, "msrlist write error %d\n", + rval); + } + rval = crlmodule_start_streaming(sensor); + if (!rval) + sensor->streaming = 1; + } else { + rval = crlmodule_stop_streaming(sensor); + sensor->streaming = 0; + } + +out: + mutex_unlock(&sensor->mutex); + + /* SENSOR_IDLE control cannot be set when streaming*/ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_IDLE, enable); + + /* SENSOR_STREAMING controls cannot be set when not streaming */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_STREAMING, !enable); + + /* SENSOR_POWERED_ON controls does not matter about streaming. */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_POWERED_ON, false); + + return rval; +} + +static int crlmodule_identify_module(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int size = 0, pos; + char *id_string; + const char *expect_id; + int i, ret; + u32 val; + + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) + size += sensor->sensor_ds->id_regs[i].width + 1; + + /* TODO! If no ID! return success? */ + if (!size) + return 0; + + expect_id = sensor->platform_data->id_string; + /* Create string variabel to append module ID */ + id_string = kzalloc(size, GFP_KERNEL); + if (!id_string) + return -ENOMEM; + *id_string = '\0'; + + /* Go through each regs in the list and append to id_string */ + for (i = 0; i < sensor->sensor_ds->id_reg_items; i++) { + ret = crlmodule_read_reg(sensor, + sensor->sensor_ds->id_regs[i].reg, + &val); + if (ret) + goto out; + + if (i) + pos += snprintf(id_string + pos, size - pos, " 0x%x", val); + else + pos = snprintf(id_string, size, "0x%x", val); + if (pos >= size) + break; + } + + /* Check here if this module in the supported list + * Ideally the module manufacturer and id should be in platform + * data or ACPI and here the driver should read the value from the + * register and check if this matches to any in the supported + * platform data + */ + if (expect_id && + (strnlen(id_string, size) != strnlen(expect_id, size + 1) || + strncmp(id_string, expect_id, size))) { + dev_err(&client->dev, + "Sensor detection failed: expect \"%s\" actual \"%s\"", + expect_id, id_string); + ret = -ENODEV; + } + +out: + dev_dbg(&client->dev, "%s module: %s expected id: %s\n", + __func__, id_string, + (expect_id) ? expect_id : "not specified"); + kfree(id_string); + if (ret) + dev_err(&client->dev, "sensor detection failed\n"); + return ret; +} + +static int crlmodule_get_frame_desc(struct v4l2_subdev *subdev, + unsigned int pad, + struct v4l2_mbus_frame_desc *desc) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_frame_desc *crl_desc = sensor->sensor_ds->frame_desc; + unsigned int i; + + desc->num_entries = sensor->sensor_ds->frame_desc_entries; + if (desc->num_entries) + desc->type = sensor->sensor_ds->frame_desc_type; + + /* + * By any chance the sensor configuration has more than the maximum + * supported, clip the number of entries to the MAX supported. + */ + if (desc->num_entries > V4L2_FRAME_DESC_ENTRY_MAX) + desc->num_entries = V4L2_FRAME_DESC_ENTRY_MAX; + + for (i = 0; i < desc->num_entries; i++) { + int ret; + u32 val; + + ret = __crlmodule_parse_dynamic_entity(sensor, + crl_desc[i].flags, &val); + if (ret) + return ret; + desc->entry[i].flags = (u16)val; + + ret = __crlmodule_parse_dynamic_entity(sensor, crl_desc[i].bpp, + &val); + if (ret) + return ret; + desc->entry[i].bpp = (u8)val; + + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].pixelcode, &val); + if (ret) + return ret; + desc->entry[i].pixelcode = val; + + if (desc->entry[i].flags & V4L2_MBUS_FRAME_DESC_FL_BLOB) { + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].length, &val); + if (ret) + return ret; + desc->entry[i].size.length = val; + } else { + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].start_line, &val); + if (ret) + return ret; + desc->entry[i].size.two_dim.start_line = + (u16)val; + + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].start_pixel, &val); + if (ret) + return ret; + desc->entry[i].size.two_dim.start_pixel = + (u16)val; + + ret = __crlmodule_calc_dynamic_entity_values( + sensor, crl_desc[i].height.ops_items, + crl_desc[i].height.ops, &val); + if (ret) + return ret; + desc->entry[i].size.two_dim.height = (u16)val; + + ret = __crlmodule_calc_dynamic_entity_values( + sensor, crl_desc[i].width.ops_items, + crl_desc[i].width.ops, &val); + if (ret) + return ret; + desc->entry[i].size.two_dim.width = (u16)val; + } + + if (desc->type == CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].csi2_channel, &val); + if (ret) + return ret; + desc->entry[i].bus.csi2.channel = (u8)val; + + ret = __crlmodule_parse_dynamic_entity( + sensor, crl_desc[i].csi2_data_type, &val); + if (ret) + return ret; + desc->entry[i].bus.csi2.data_type = (u8)val; + } + } + + return 0; +} + + +static int crlmodule_get_routing(struct v4l2_subdev *subdev, + struct v4l2_subdev_routing *route) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + int i; + + if (!route) + return -EINVAL; + + if (ssd != sensor->src || + sensor->sensor_ds->frame_desc_entries <= 1) + return -ENOIOCTLCMD; + + for (i = 0; i < min(sensor->sensor_ds->frame_desc_entries, + route->num_routes); i++) { + route->routes[i].sink_pad = CRL_PAD_SINK; + route->routes[i].sink_stream = 0; + route->routes[i].source_pad = CRL_PAD_SRC; + route->routes[i].source_stream = i; + route->routes[i].flags = sensor->src->route_flags[i]; + } + + route->num_routes = i; + return 0; +} + +static int crlmodule_set_routing(struct v4l2_subdev *subdev, + struct v4l2_subdev_routing *route) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct crl_subdev *ssd = to_crlmodule_subdev(subdev); + const unsigned int stream_nr = sensor->sensor_ds->frame_desc_entries; + struct v4l2_subdev_route *t; + int i, ret = 0; + + if (!route) + return -EINVAL; + + if (ssd != sensor->src || + sensor->sensor_ds->frame_desc_entries <= 1) + return -ENOIOCTLCMD; + + for (i = 0; i < min(stream_nr, route->num_routes); ++i) { + t = &route->routes[i]; + + if (t->source_stream > stream_nr - 1) + continue; + + if (t->source_pad != CRL_PAD_SRC || + t->sink_pad != CRL_PAD_SINK) + continue; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + sensor->src->route_flags[t->source_stream] |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + sensor->src->route_flags[t->source_stream] &= + (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +/* + * This function executes the initialisation routines after the power on + * is successfully completed. Following operations are done + * + * Initiases registers after sensor power up - if any such list is configured + * V4l2 Ctrl handler framework intialisation + */ +static int crlmodule_run_poweron_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int rval; + + dev_dbg(&client->dev, "%s set power up registers: %d\n", + __func__, sensor->sensor_ds->powerup_regs_items); + + /* Write the power up registers */ + rval = crlmodule_write_regs(sensor, sensor->sensor_ds->powerup_regs, + sensor->sensor_ds->powerup_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return rval; + } + + /* Are we still initialising...? If yes, return here. */ + if (!sensor->pixel_array) + return 0; + + dev_dbg(&client->dev, "%s init v4l2 controls", __func__); + + rval = v4l2_ctrl_handler_setup( + &sensor->pixel_array->ctrl_handler); + if (rval) { + dev_err(&client->dev, "%s PA v4l2_ctrl_handler failed\n", + __func__); + return rval; + } + + rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler); + if (rval) + dev_err(&client->dev, "%s SRC v4l2_ctrl_handler failed\n", + __func__); + + /* SENSOR_IDLE control can be set only when not streaming*/ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_IDLE, false); + + /* SENSOR_STREAMING controls can be set only when streaming */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_STREAMING, true); + + /* SENSOR_POWERED_ON controls can be set after power on */ + __crlmodule_grab_v4l2_ctrl(sensor, SENSOR_POWERED_ON, false); + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + + return rval; +} + +static int custom_gpio_request(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int i; + + for (i = 0; i < CRL_MAX_CUSTOM_GPIO_AMOUNT; i++) { + if (sensor->platform_data->custom_gpio[i].name[0] == '\0') + break; + if (devm_gpio_request_one( + &client->dev, + sensor->platform_data->custom_gpio[i].number, 0, + sensor->platform_data->custom_gpio[i].name) != 0) { + dev_err(&client->dev, + "unable to acquire %s %d\n", + sensor->platform_data->custom_gpio[i].name, + sensor->platform_data->custom_gpio[i].number); + return -ENODEV; + } + } + return 0; +} + +static void custom_gpio_ctrl(struct crl_sensor *sensor, bool set) +{ + int i; + unsigned int val; + + for (i = 0; i < CRL_MAX_CUSTOM_GPIO_AMOUNT; i++) { + if (sensor->platform_data->custom_gpio[i].name[0] == '\0') + break; + if (set) + val = sensor->platform_data->custom_gpio[i].val; + else + val = sensor->platform_data->custom_gpio[i].undo_val; + + gpio_set_value( + sensor->platform_data->custom_gpio[i].number, val); + } +} + +/* + * This function handles sensor power up routine failure because of any failed + * step in the routine. The index "i" is the index to last successfull power + * sequence entity successfull completed. This function executes the power + * senquence entities in the reverse or with undo value. + */ +static void crlmodule_undo_poweron_entities( + struct crl_sensor *sensor, + int rev_idx) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_power_seq_entity *entity; + int idx; + + for (idx = rev_idx; idx >= 0; idx--) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: +#ifdef CONFIG_INTEL_IPU4_OV13858 + if (!vcm_in_use) { +#endif + gpio_set_value(sensor->platform_data->xshutdown, + entity->undo_val); +#ifdef CONFIG_INTEL_IPU4_OV13858 + } +#endif + break; + case CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER: + custom_gpio_ctrl(sensor, false); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->gpiod_priv) { + if (gpiod_cansleep(entity->gpiod_priv)) + gpiod_set_raw_value_cansleep( + entity->gpiod_priv, + entity->undo_val); + else + gpiod_set_raw_value(entity->gpiod_priv, + entity->undo_val); + } else { + gpio_set_value(entity->ent_number, + entity->undo_val); + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + regulator_disable(entity->regulator_priv); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + clk_disable_unprepare(sensor->xclk); + break; + default: + dev_err(&client->dev, "%s Invalid power type\n", + __func__); + break; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } +} + +static int __crlmodule_powerup_sequence(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_power_seq_entity *entity; + unsigned idx; + int rval; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + entity = &sensor->pwr_entity[idx]; + dev_dbg(&client->dev, "%s power type %d index %d\n", + __func__, entity->type, idx); + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + gpio_set_value(sensor->platform_data->xshutdown, + entity->val); + break; + case CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER: + custom_gpio_ctrl(sensor, true); + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->gpiod_priv) { + if (gpiod_cansleep(entity->gpiod_priv)) + gpiod_set_raw_value_cansleep( + entity->gpiod_priv, + entity->val); + else + gpiod_set_raw_value(entity->gpiod_priv, + entity->val); + } else { + gpio_set_value(entity->ent_number, entity->val); + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + rval = regulator_enable(entity->regulator_priv); + if (rval) { + dev_err(&client->dev, + "Failed to enable regulator: %d\n", + rval); + devm_regulator_put(entity->regulator_priv); + entity->regulator_priv = NULL; + goto error; + } + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + rval = clk_set_rate(sensor->xclk, + sensor->platform_data->ext_clk); + if (rval < 0) { + dev_err(&client->dev, + "unable to set clock freq to %u\n", + sensor->platform_data->ext_clk); + goto error; + } + if (clk_get_rate(sensor->xclk) != + sensor->platform_data->ext_clk) + dev_warn(&client->dev, + "warning: unable to set \ + accurate clock freq %u\n", + sensor->platform_data->ext_clk); + rval = clk_prepare_enable(sensor->xclk); + if (rval) { + dev_err(&client->dev, "Failed to enable \ + clock: %d\n", rval); + goto error; + } + break; + default: + dev_err(&client->dev, "Invalid power type\n"); + rval = -ENODEV; + goto error; + } + + if (entity->delay) + usleep_range(entity->delay, entity->delay + 10); + } + + return 0; +error: + dev_err(&client->dev, "Error:Power sequece failed\n"); + if (idx > 0) + crlmodule_undo_poweron_entities(sensor, idx-1); + return rval; +} + +static int crlmodule_set_power(struct v4l2_subdev *subdev, int on) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int ret = 0; + + if (on) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put(&client->dev); + return ret; + } + } + + mutex_lock(&sensor->power_mutex); + if (on && !sensor->power_count) { + usleep_range(2000, 3000); + ret = crlmodule_run_poweron_init(sensor); + if (ret < 0) { + pm_runtime_put(&client->dev); + goto out; + } + } + + /* Update the power count. */ + sensor->power_count += on ? 1 : -1; + WARN_ON(sensor->power_count < 0); + +out: + mutex_unlock(&sensor->power_mutex); + + if (!on) + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_subdev_ops crlmodule_ops; +static const struct v4l2_subdev_internal_ops crlmodule_internal_ops; +static const struct media_entity_operations crlmodule_entity_ops; + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Modified based on the CRL Module changes + */ +static int crlmodule_init_subdevs(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_subdev *prev_sd = NULL; + int i = 0, j; + struct crl_subdev *sd; + int rval = 0; + + dev_dbg(&client->dev, "%s\n", __func__); + + /* + * The scaler, binner and PA order matters. Sensor configuration file + * must maintain this order. PA sub dev is a must and binner and + * scaler can be omitted based on the sensor. But if scaler is present + * it must be the first sd. + */ + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_SCALER) { + sensor->scaler = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_BINNER) { + sensor->binner = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + if (sensor->sensor_ds->subdevs[i].subdev_type + == CRL_SUBDEV_TYPE_PIXEL_ARRAY) { + sensor->pixel_array = &sensor->ssds[sensor->ssds_used]; + sensor->ssds_used++; + i++; + } + + /* CRL MediaCTL IF driver can't handle if none of these sd's present! */ + if (!sensor->ssds_used) { + dev_err(&client->dev, "%s no subdevs present\n", __func__); + return -ENODEV; + } + + if (!sensor->sensor_ds->pll_config_items) { + dev_err(&client->dev, "%s no pll configurations\n", __func__); + return -ENODEV; + } + + /* TODO validate rest of the settings from the sensor definition file */ + + dev_dbg(&client->dev, "%s subdevs: %d\n", __func__, i); + + for (i = 0; i < sensor->ssds_used; i++) { + bool has_substreams = false; + + sd = &sensor->ssds[i]; + + if (sd != sensor->src) + v4l2_subdev_init(&sd->sd, &crlmodule_ops); + else if (sensor->sensor_ds->frame_desc_entries > 1) + has_substreams = true; + + sd->sensor = sensor; + + if (sd == sensor->pixel_array) { + sd->npads = 1; + } else { + sd->npads = 2; + sd->source_pad = 1; + } + + snprintf(sd->sd.name, + sizeof(sd->sd.name), "%s %d-%4.4x", + sensor->sensor_ds->subdevs[i].name, + i2c_adapter_id(client->adapter), client->addr); + + sd->sink_fmt.width = + sensor->sensor_ds->sensor_limits->x_addr_max; + sd->sink_fmt.height = + sensor->sensor_ds->sensor_limits->y_addr_max; + sd->compose.width = sd->sink_fmt.width; + sd->compose.height = sd->sink_fmt.height; + sd->crop[sd->source_pad] = sd->compose; + sd->pads[sd->source_pad].flags = MEDIA_PAD_FL_SOURCE | + (has_substreams ? MEDIA_PAD_FL_MULTIPLEX : 0); + if (sd != sensor->pixel_array) { + sd->crop[sd->sink_pad] = sd->compose; + sd->pads[sd->sink_pad].flags = MEDIA_PAD_FL_SINK; + } + + if (has_substreams) { + sd->route_flags = devm_kzalloc(&client->dev, + sizeof(unsigned int) * + sensor->sensor_ds->frame_desc_entries, + GFP_KERNEL); + if (!sd->route_flags) + return -ENOMEM; + for (j = 0; j < sensor->sensor_ds->frame_desc_entries; + j++) + sd->route_flags[j] = + V4L2_SUBDEV_ROUTE_FL_SOURCE; + sd->route_flags[0] |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + } + + sd->sd.entity.ops = &crlmodule_entity_ops; + + if (prev_sd == NULL) { + prev_sd = sd; + continue; + } + + sd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + sd->sd.internal_ops = &crlmodule_internal_ops; + sd->sd.owner = THIS_MODULE; + v4l2_set_subdevdata(&sd->sd, client); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&sd->sd.entity, sd->npads, + sd->pads, 0); +#else + rval = media_entity_pads_init(&sd->sd.entity, sd->npads, + sd->pads); +#endif + if (rval) { + dev_err(&client->dev, + "media_entity_init failed\n"); + return rval; + } + + rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev, + &sd->sd); + if (rval) { + dev_err(&client->dev, + "v4l2_device_register_subdev failed\n"); + return rval; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_create_link(&sd->sd.entity, +#else + rval = media_create_pad_link(&sd->sd.entity, +#endif + sd->source_pad, + &prev_sd->sd.entity, + prev_sd->sink_pad, + MEDIA_LNK_FL_ENABLED | + MEDIA_LNK_FL_IMMUTABLE); + if (rval) { + dev_err(&client->dev, + "media_entity_create_link failed\n"); + return rval; + } + + prev_sd = sd; + } + + return rval; +} + +static int __init_power_resources(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_power_seq_entity *entity; + unsigned idx; + + sensor->pwr_entity = devm_kzalloc(&client->dev, + sizeof(struct crl_power_seq_entity) * + sensor->sensor_ds->power_items, GFP_KERNEL); + + if (!sensor->pwr_entity) + return -ENOMEM; + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) + sensor->pwr_entity[idx] = + sensor->sensor_ds->power_entities[idx]; + + dev_dbg(&client->dev, "%s\n", __func__); + + for (idx = 0; idx < sensor->sensor_ds->power_items; idx++) { + int rval; + + entity = &sensor->pwr_entity[idx]; + + switch (entity->type) { + case CRL_POWER_ETY_GPIO_FROM_PDATA: + if (devm_gpio_request_one(&client->dev, + sensor->platform_data->xshutdown, 0, + "CRL xshutdown") != 0) { + dev_err(&client->dev, + "unable to acquire xshutdown %d\n", + sensor->platform_data->xshutdown); + return -ENODEV; + } + break; + case CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER: + rval = custom_gpio_request(sensor); + if (rval < 0) + return rval; + break; + case CRL_POWER_ETY_GPIO_CUSTOM: + if (entity->ent_name[0]) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + entity->gpiod_priv = gpiod_get(NULL, + entity->ent_name); +#else + entity->gpiod_priv = gpiod_get(NULL, + entity->ent_name, GPIOD_OUT_LOW); +#endif + if (IS_ERR(entity->gpiod_priv)) { + dev_err(&client->dev, + "unable to acquire custom gpio %s\n", + entity->ent_name); + entity->gpiod_priv = NULL; + return -ENODEV; + } + } else { + if (devm_gpio_request_one(&client->dev, + entity->ent_number, 0, + "CRL Custom") != 0) { + dev_err(&client->dev, + "unable to acquire custom gpio %d\n", + entity->ent_number); + return -ENODEV; + } + } + break; + case CRL_POWER_ETY_REGULATOR_FRAMEWORK: + entity->regulator_priv = devm_regulator_get( + &client->dev, entity->ent_name); + if (IS_ERR(entity->regulator_priv)) { + dev_err(&client->dev, + "Failed to get regulator: %s\n", + entity->ent_name); + entity->regulator_priv = NULL; + return -ENODEV; + } + rval = regulator_set_voltage(entity->regulator_priv, + entity->val, + entity->val); + /* Not all regulator supports voltage change */ + if (rval < 0) + dev_info(&client->dev, + "Failed to set voltage %s %d\n", + entity->ent_name, entity->val); + break; + case CRL_POWER_ETY_CLK_FRAMEWORK: + sensor->xclk = devm_clk_get(&client->dev, + entity->ent_name[0] ? entity->ent_name : NULL); + if (IS_ERR(sensor->xclk)) { + dev_err(&client->dev, + "Cannot get sensor clk\n"); + return -ENODEV; + } + break; + default: + dev_err(&client->dev, "Invalid Power item\n"); + return -ENODEV; + } + } + + return 0; +} + +static int crl_request_gpio_irq(struct crl_sensor *sensor) +{ + int rval; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int irq_pin = sensor->platform_data->crl_irq_pin; + + if (!gpio_is_valid(irq_pin)) { + dev_err(&client->dev, "%s: GPIO pin %d is invalid!\n", + __func__, irq_pin); + return -ENODEV; + } + dev_dbg(&client->dev, + "%s: IRQ GPIO %d is valid.\n", __func__, irq_pin); + + rval = devm_gpio_request(&client->dev, irq_pin, + sensor->platform_data->irq_pin_name); + if (rval) { + dev_err(&client->dev, + "%s:IRQ GPIO pin request failed!\n", __func__); + return rval; + } + + gpio_direction_input(irq_pin); + sensor->irq = gpio_to_irq(irq_pin); + rval = devm_request_threaded_irq(&client->dev, sensor->irq, + sensor->sensor_ds->crl_irq_fn, + sensor->sensor_ds->crl_threaded_irq_fn, + sensor->platform_data->irq_pin_flags, + sensor->platform_data->irq_pin_name, + sensor); + + dev_dbg(&client->dev, "%s: GPIO register GPIO IRQ result: %d\n", + __func__, rval); + + return rval; +} + +static int crlmodule_registered(struct v4l2_subdev *subdev) +{ + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + + int rval; + + rval = __init_power_resources(subdev); + if (rval) + return -ENODEV; + + pm_runtime_enable(&client->dev); + + /* Power up the sensor */ + if (pm_runtime_get_sync(&client->dev) < 0) { + rval = -ENODEV; + goto out; + } + + /* init GPIO IRQ */ + if (sensor->sensor_ds->irq_in_use == true) { + rval = crl_request_gpio_irq(sensor); + if (rval) { + rval = -ENODEV; + goto out; + } + } + + /* one time init */ + rval = crlmodule_write_regs(sensor, + sensor->sensor_ds->onetime_init_regs, + sensor->sensor_ds->onetime_init_regs_items); + if (rval) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + rval = -ENODEV; + goto out; + } + + /* sensor specific init */ + if (sensor->sensor_ds->sensor_init) { + rval = sensor->sensor_ds->sensor_init(client); + + if (rval) { + dev_err(&client->dev, + "%s failed to run sensor specific init\n", + __func__); + rval = -ENODEV; + goto out; + } + } + /* Identify the module */ + rval = crlmodule_identify_module(subdev); + if (rval) { + rval = -ENODEV; + goto out; + } + + rval = crlmodule_init_subdevs(subdev); + if (rval) + goto out; + + sensor->binning_horizontal = 1; + sensor->binning_vertical = 1; + sensor->scale_m = 1; + sensor->flip_info = CRL_FLIP_DEFAULT_NONE; + sensor->ext_ctrl_impacts_pll_selection = false; + sensor->ext_ctrl_impacts_mode_selection = false; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + sensor->pixel_array->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; +#else + sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; +#endif + + rval = crlmodule_init_controls(sensor); + if (rval) + goto out; + + mutex_lock(&sensor->mutex); + crlmodule_update_current_mode(sensor); + mutex_unlock(&sensor->mutex); + rval = crlmodule_nvm_init(sensor); + +out: + dev_dbg(&client->dev, "%s rval: %d\n", __func__, rval); + /* crlmodule_power_off(sensor); */ + pm_runtime_put(&client->dev); + + return rval; +} + +/* + * Function main code replicated from /drivers/media/i2c/smiapp/smiapp-core.c + * Slightly modified based on the CRL Module changes + */ +static int crlmodule_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + u32 mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10; + unsigned int i; + int rval; + + dev_dbg(&client->dev, "%s\n", __func__); + + mutex_lock(&sensor->mutex); + + for (i = 0; i < ssd->npads; i++) { + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, i); + struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, + fh->pad, i); + struct v4l2_rect *try_comp; + + try_fmt->width = sensor->sensor_ds->sensor_limits->x_addr_max; + try_fmt->height = sensor->sensor_ds->sensor_limits->y_addr_max; + try_fmt->code = mbus_code; + + try_crop->top = 0; + try_crop->left = 0; + try_crop->width = try_fmt->width; + try_crop->height = try_fmt->height; + + if (ssd != sensor->pixel_array) + continue; + + try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i); + *try_comp = *try_crop; + } + + mutex_unlock(&sensor->mutex); + + + rval = pm_runtime_get_sync(&client->dev); + if (rval < 0) + pm_runtime_put(&client->dev); + return rval; +} + +static int crlmodule_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + + pm_runtime_put(&client->dev); + + return 0; +} + +static int crlmodule_get_registers(struct v4l2_subdev *sd, struct crl_registers_info *info) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct crl_register_read_rep reg; + int i; + int ret = 0; + + if (info->number > REGS_BUF_SIZE) { + dev_err(&client->dev, "error: max register's numbers than %d\n", REGS_BUF_SIZE); + return -1; + } + + for (i = 0; i < info->number; i++) { + reg.address = info->start_address + i; + reg.dev_i2c_addr = CRL_I2C_ADDRESS_NO_OVERRIDE; + reg.len = info->len; + reg.mask = 0xff; + ret = crlmodule_read_reg(sensor, reg, &info->regs[i]); + if (ret < 0) + return ret; + } + + return ret; +} + +static int crlmodule_set_registers(struct v4l2_subdev *sd, struct crl_registers_info *info) +{ + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int i; + int ret = 0; + + if (info->number > REGS_BUF_SIZE) { + dev_err(&client->dev, "error: max register's numbers than %d\n", REGS_BUF_SIZE); + return -1; + } + + for (i = 0; i < info->number; i++) { + ret = crlmodule_write_reg(sensor, CRL_I2C_ADDRESS_NO_OVERRIDE, + info->start_address + i, info->len, 0xff, info->regs[i]); + if (ret < 0) + return ret; + } + + return ret; +} + +static long crlmodule_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) +{ + int ret; + + switch (cmd) { + case CRL_G_REGISTERS: + ret = crlmodule_get_registers(sd, arg); + break; + case CRL_S_REGISTERS: + ret = crlmodule_set_registers(sd, arg); + break; + default: + ret = -1; + break; + }; + + return ret; +} + +static const struct v4l2_subdev_video_ops crlmodule_video_ops = { + .s_stream = crlmodule_set_stream, +}; + +static const struct v4l2_subdev_core_ops crlmodule_core_ops = { + .s_power = crlmodule_set_power, + .ioctl = crlmodule_ioctl, +}; + +static const struct v4l2_subdev_pad_ops crlmodule_pad_ops = { + .enum_mbus_code = crlmodule_enum_mbus_code, + .get_fmt = crlmodule_get_format, + .set_fmt = crlmodule_set_format, + .get_selection = crlmodule_get_selection, + .set_selection = crlmodule_set_selection, + .enum_frame_size = crlmodule_enum_frame_size, + .get_frame_desc = crlmodule_get_frame_desc, + .get_routing = crlmodule_get_routing, + .set_routing = crlmodule_set_routing, +}; + +static const struct v4l2_subdev_sensor_ops crlmodule_sensor_ops = { + .g_skip_frames = crlmodule_get_skip_frames, +}; + +static const struct v4l2_subdev_ops crlmodule_ops = { + .core = &crlmodule_core_ops, + .video = &crlmodule_video_ops, + .pad = &crlmodule_pad_ops, + .sensor = &crlmodule_sensor_ops, +}; + +static const struct media_entity_operations crlmodule_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops crlmodule_internal_src_ops = { + .registered = crlmodule_registered, + .open = crlmodule_open, + .close = crlmodule_close, +}; + +static const struct v4l2_subdev_internal_ops crlmodule_internal_ops = { + .open = crlmodule_open, + .close = crlmodule_close, +}; + +#ifdef CONFIG_PM + +static int crlmodule_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(sd); + + return __crlmodule_powerup_sequence(sensor); +} + +static int crlmodule_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + + if (sensor->streaming) + crlmodule_stop_streaming(sensor); + + if (sensor->power_count > 0) + crlmodule_undo_poweron_entities(sensor, + sensor->sensor_ds->power_items - 1); + return 0; +} + +static int crlmodule_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct crl_subdev *ssd = to_crlmodule_subdev(sd); + struct crl_sensor *sensor = ssd->sensor; + int rval = 0; + + if (sensor->power_count > 0) { + rval = __crlmodule_powerup_sequence(sensor); + if (!rval) + rval = crlmodule_run_poweron_init(sensor); + } + + if (!rval && sensor->streaming) + rval = crlmodule_start_streaming(sensor); + + return rval; +} +#else +#define crlmodule_runtime_suspend NULL +#define crlmodule_runtime_resume NULL +#define crlmodule_suspend NULL +#define crlmodule_resume NULL +#endif /* CONFIG_PM */ + +static int crlmodule_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct crl_sensor *sensor; + int ret; +#ifdef CONFIG_INTEL_IPU4_OV13858 + vcm_in_use = false; +#endif + + if (client->dev.platform_data == NULL) + return -ENODEV; + + /* TODO! Create the sensor based on the interface */ + sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); + if (sensor == NULL) + return -ENOMEM; + + sensor->platform_data = client->dev.platform_data; + mutex_init(&sensor->mutex); + mutex_init(&sensor->power_mutex); + + ret = crlmodule_populate_ds(sensor, &client->dev); + if (ret) + return -ENODEV; + + sensor->src = &sensor->ssds[sensor->ssds_used]; + + v4l2_i2c_subdev_init(&sensor->src->sd, client, &crlmodule_ops); + sensor->src->sd.internal_ops = &crlmodule_internal_src_ops; + sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + if (sensor->sensor_ds->frame_desc_entries > 1) + sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + sensor->src->sensor = sensor; + + sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE; + if (sensor->sensor_ds->frame_desc_entries > 1) + sensor->src->sd.flags |= MEDIA_PAD_FL_MULTIPLEX; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + ret = media_entity_init(&sensor->src->sd.entity, 2, + sensor->src->pads, 0); +#else + ret = media_entity_pads_init(&sensor->src->sd.entity, 2, + sensor->src->pads); +#endif + if (ret < 0) + goto cleanup; + ret = v4l2_async_register_subdev(&sensor->src->sd); + if (ret < 0) + goto cleanup; + + /* Load IQ tuning registers from drvb file*/ + if (sensor->sensor_ds->msr_file_name) { + ret = crlmodule_load_msrlist(client, + sensor->sensor_ds->msr_file_name, + &sensor->msr_list); + if (ret) + dev_warn(&client->dev, + "msrlist loading failed. Ignore, move on\n"); + } else { + /* sensor will still continue streaming */ + dev_warn(&client->dev, "No msrlists associated with sensor\n"); + } + + return 0; + +cleanup: + media_entity_cleanup(&sensor->src->sd.entity); + crlmodule_release_ds(sensor); + return ret; +} + +static void crlmodule_free_controls(struct crl_sensor *sensor) +{ + unsigned int i; + + for (i = 0; i < sensor->ssds_used; i++) + v4l2_ctrl_handler_free(&sensor->ssds[i].ctrl_handler); +} + +static int crlmodule_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + unsigned int i; + + if (sensor->sensor_ds->sensor_cleanup) + sensor->sensor_ds->sensor_cleanup(client); + + v4l2_async_unregister_subdev(&sensor->src->sd); + for (i = 0; i < sensor->ssds_used; i++) { + v4l2_device_unregister_subdev(&sensor->ssds[i].sd); + media_entity_cleanup(&sensor->ssds[i].sd.entity); + } + + for (i = 0; i < sensor->sensor_ds->power_items; i++) { + struct crl_power_seq_entity *entity = + &sensor->pwr_entity[i]; + + if (entity->type == CRL_POWER_ETY_GPIO_CUSTOM && + entity->gpiod_priv) + gpiod_put(entity->gpiod_priv); + } + + crlmodule_nvm_deinit(sensor); + crlmodule_release_ds(sensor); + crlmodule_free_controls(sensor); + crlmodule_release_msrlist(&sensor->msr_list); + + pm_runtime_disable(&client->dev); + + return 0; +} + + +static const struct i2c_device_id crlmodule_id_table[] = { + { CRLMODULE_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, crlmodule_id_table); + +static const struct dev_pm_ops crlmodule_pm_ops = { + .runtime_suspend = crlmodule_runtime_suspend, + .runtime_resume = crlmodule_runtime_resume, + .suspend = crlmodule_suspend, + .resume = crlmodule_resume, +}; + +static struct i2c_driver crlmodule_i2c_driver = { + .driver = { + .name = CRLMODULE_NAME, + .pm = &crlmodule_pm_ops, + }, + .probe = crlmodule_probe, + .remove = crlmodule_remove, + .id_table = crlmodule_id_table, +}; + +module_i2c_driver(crlmodule_i2c_driver); + +MODULE_AUTHOR("Vinod Govindapillai "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_AUTHOR("Tommi Franttila "); +MODULE_DESCRIPTION("Generic driver for common register list based \ + camera sensor modules"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/crlmodule/crlmodule-data.c b/drivers/media/i2c/crlmodule/crlmodule-data.c new file mode 100755 index 000000000000..b1a2412519e3 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-data.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#include "crlmodule.h" +#include "crl_ov13858_configuration.h" +#include "crl_imx132_configuration.h" +#include "crl_imx214_configuration.h" +#include "crl_imx135_configuration.h" +#include "crl_imx230_configuration.h" +#include "crl_imx318_configuration.h" +#include "crl_ov8858_configuration.h" +#include "crl_ov13860_configuration.h" +#include "crl_adv7481_cvbs_configuration.h" +#include "crl_adv7481_hdmi_configuration.h" +#include "crl_adv7481_eval_configuration.h" +#include "crl_imx185_configuration.h" +#include "crl_ov10635_configuration.h" +#include "crl_ar0231at_configuration.h" +#include "crl_ov10640_configuration.h" +#include "crl_imx477_master_configuration.h" +#include "crl_imx477_slave_configuration.h" +#include "crl_imx274_configuration.h" +#include "crl_ov5670_configuration.h" +#include "crl_imx290_configuration.h" +#include "crl_pixter_stub_configuration.h" +#include "crl_ov2740_configuration.h" +#include "crl_ov9281_configuration.h" +#include "crl_magna_configuration.h" +#include "crl_ar023z_configuration.h" +#include "crl_ov2775_configuration.h" + +static const struct crlmodule_sensors supported_sensors[] = { + { "i2c-OVTIF858:00", "ov13858", &ov13858_crl_configuration}, + { "OV13858", "ov13858", &ov13858_crl_configuration}, + { "OV13858-2", "ov13858", &ov13858_crl_configuration}, + { "i2c-INT3474:00", "ov2740", &ov2740_crl_configuration }, + { "OV2740", "ov2740", &ov2740_crl_configuration }, + { "i2c-SONY214A:00", "imx214", &imx214_crl_configuration }, + { "IMX214", "imx214", &imx214_crl_configuration }, + { "i2c-SONY132A:00", "imx132", &imx132_crl_configuration }, + { "i2c-INT3471:00", "imx135", &imx135_crl_configuration }, + { "i2c-SONY230A:00", "imx230", &imx230_crl_configuration }, + { "i2c-INT3477:00", "ov8858", &ov8858_crl_configuration }, + { "i2c-OV5670AA:00", "ov5670", &ov5670_crl_configuration }, + { "IMX185", "imx185", &imx185_crl_configuration }, + { "IMX477-MASTER", "imx477", &imx477_master_crl_configuration }, + { "IMX477-SLAVE-1", "imx477", &imx477_slave_crl_configuration }, + { "OV13860", "ov13860", &ov13860_crl_configuration }, + { "OV9281", "ov9281", &ov9281_crl_configuration }, + { "ADV7481 CVBS", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, + { "ADV7481 HDMI", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "ADV7481_EVAL", "adv7481_eval", &adv7481_eval_crl_configuration }, + { "ADV7481B_EVAL", "adv7481b_eval", &adv7481b_eval_crl_configuration }, + { "i2c-ADV7481A:00", "adv7481_hdmi", &adv7481_hdmi_crl_configuration }, + { "i2c-ADV7481B:00", "adv7481_cvbs", &adv7481_cvbs_crl_configuration }, + { "SONY318A", "imx318", &imx318_crl_configuration }, + { "OV10635", "ov10635", &ov10635_crl_configuration }, + { "AR0231AT", "ar0231at", &ar0231at_crl_configuration }, + { "OV10640", "ov10640", &ov10640_crl_configuration }, + { "IMX274", "imx274", &imx274_crl_configuration }, + { "OV5670", "ov5670", &ov5670_crl_configuration }, + { "IMX290", "imx290", &imx290_crl_configuration}, + { "PIXTER_STUB", "pixter_stub", &pixter_stub_crl_configuration}, + { "PIXTER_STUB_B", "pixter_stub_b", &pixter_stub_b_crl_configuration}, + { "PIXTER_STUB_C", "pixter_stub_c", &pixter_stub_c_crl_configuration}, + { "PIXTER_STUB_D", "pixter_stub_d", &pixter_stub_d_crl_configuration}, + { "PIXTER_STUB_E", "pixter_stub_e", &pixter_stub_e_crl_configuration}, + { "PIXTER_STUB_F", "pixter_stub_f", &pixter_stub_f_crl_configuration}, + { "PIXTER_STUB_G", "pixter_stub_g", &pixter_stub_g_crl_configuration}, + { "PIXTER_STUB_H", "pixter_stub_h", &pixter_stub_h_crl_configuration}, + { "INT3474", "ov2740", &ov2740_crl_configuration }, + { "MAGNA", "magna", &magna_crl_configuration }, + { "AR023Z", "ar023z", &ar023z_crl_configuration }, + { "OV2775", "ov2775", &ov2775_crl_configuration }, +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_sensors); i++) { + /* Check the ACPI supported modules */ + if (!strcmp(dev_name(dev), supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + + /* Check the non ACPI modules */ + if (!strcmp(sensor->platform_data->module_name, + supported_sensors[i].pname)) { + sensor->sensor_ds = supported_sensors[i].ds; + dev_info(dev, "%s %s selected\n", + __func__, supported_sensors[i].name); + return 0; + }; + } + + dev_err(dev, "%s No suitable configuration found for %s\n", + __func__, dev_name(dev)); + return -EINVAL; +} + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor) +{ + /* TODO! Revisit this. */ + return 0; +} + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor) +{ + /* + * TODO! Revisit this. + * Place for cleaning all the resources used for the generation + * of CRL data structure. + */ +} + diff --git a/drivers/media/i2c/crlmodule/crlmodule-msrlist.c b/drivers/media/i2c/crlmodule/crlmodule-msrlist.c new file mode 100644 index 000000000000..a15b76b921d2 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-msrlist.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include "crlmodule-msrlist.h" +#include "crlmodule.h" + +/* + * + * DRVB file is part of the old structure of tagged + * binary container, which is used as such in crlmodule. + * Changes needs to be done in cameralibs to remove the + * tagged structure and convert to untagged drvb format. + * Below are the tagged binary data container structure + * definitions. Most of it is copied from libmsrlisthelper.c + * and some changes done for crlmodule. + * + */ + +static int crlmodule_write_msrlist(struct i2c_client *client, u8 *bufptr, + unsigned int size) +{ + /* + * + * The configuration data contains any number of sequences where + * the first byte (that is, u8) that marks the number of bytes + * in the sequence to follow, is indeed followed by the indicated + * number of bytes of actual data to be written to sensor. + * By convention, the first two bytes of actual data should be + * understood as an address in the sensor address space (hibyte + * followed by lobyte) where the remaining data in the sequence + * will be written. + * + */ + + u8 *ptr = bufptr; + int ret; + + while (ptr < bufptr + size) { + struct i2c_msg msg = { + .addr = client->addr, + .flags = 0, + }; + + msg.len = *ptr++; + msg.buf = ptr; + ptr += msg.len; + + if (ptr > bufptr + size) + return -EINVAL; + + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret < 0) { + dev_err(&client->dev, "i2c write error: %d", ret); + return ret; + } + } + return 0; +} + +static int crlmodule_parse_msrlist(struct i2c_client *client, u8 *buffer, + unsigned int size) +{ + u8 *endptr8 = buffer + size; + int ret; + unsigned int dataset = 0; + struct tbd_data_record_header *header = + (struct tbd_data_record_header *)buffer; + + do { + + if ((u8 *)header + sizeof(*header) > endptr8) + return -EINVAL; + + if ((u8 *)header + header->data_offset + + header->data_size > endptr8) + return -EINVAL; + + dataset++; + + if (header->data_size && (header->flags & 1)) { + + ret = crlmodule_write_msrlist(client, + buffer + header->data_offset, + header->data_size); + if (ret) + return ret; + } + header = (struct tbd_data_record_header *)(buffer + + header->next_offset); + } while (header->next_offset); + + return 0; +} + + +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw) +{ + struct tbd_header *header; + struct tbd_record_header *record; + + header = (struct tbd_header *)fw->data; + record = (struct tbd_record_header *)(header + 1); + + if (record->size && record->class_id != TBD_CLASS_DRV_ID) + return -EINVAL; + + return crlmodule_parse_msrlist(client, (u8 *)(record + 1), + record->size); +} + + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw) +{ + + struct tbd_header *header; + struct tbd_record_header *record; + int ret = -ENOENT; + + ret = request_firmware(fw, name, &client->dev); + if (ret) { + dev_err(&client->dev, + "Error %d while requesting firmware %s\n", + ret, name); + return ret; + } + header = (struct tbd_header *)(*fw)->data; + + if (sizeof(*header) > (*fw)->size) + goto out; + + /* Check that we have drvb block. */ + if (memcmp(&header->tag, "DRVB", 4)) + goto out; + + if (header->size != (*fw)->size) + goto out; + + if (sizeof(*header) + sizeof(*record) > (*fw)->size) + goto out; + + + return 0; + +out: + crlmodule_release_msrlist(fw); + return ret; +} + + +void crlmodule_release_msrlist(const struct firmware **fw) +{ + release_firmware(*fw); + *fw = NULL; +} diff --git a/drivers/media/i2c/crlmodule/crlmodule-msrlist.h b/drivers/media/i2c/crlmodule/crlmodule-msrlist.h new file mode 100644 index 000000000000..013469bfec1a --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-msrlist.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef __CRLMODULE_MSRLIST_H__ +#define __CRLMODULE_MSRLIST_H__ + +#define TBD_CLASS_DRV_ID 2 + +struct i2c_client; +struct firmware; + +struct tbd_header { + /* Tag identifier, also checks endianness */ + u32 tag; + /* Container size including this header */ + u32 size; + /* Version, format 0xYYMMDDVV */ + u32 version; + /* Revision, format 0xYYMMDDVV */ + u32 revision; + /* Configuration flag bits set */ + u32 config_bits; + /* Global checksum, header included */ + u32 checksum; +} __packed; + +struct tbd_record_header { + /* Size of record including header */ + u32 size; + /* tbd_format_t enumeration values used */ + u8 format_id; + /* Packing method; 0 = no packing */ + u8 packing_key; + /* tbd_class_t enumeration values used */ + u16 class_id; +} __packed; + +struct tbd_data_record_header { + u16 next_offset; + u16 flags; + u16 data_offset; + u16 data_size; +} __packed; + +int crlmodule_load_msrlist(struct i2c_client *client, char *name, + const struct firmware **fw); +int crlmodule_apply_msrlist(struct i2c_client *client, + const struct firmware *fw); +void crlmodule_release_msrlist(const struct firmware **fw); + +#endif /* ifndef __CRLMODULE_MSRLIST_H__ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-nvm.c b/drivers/media/i2c/crlmodule/crlmodule-nvm.c new file mode 100755 index 000000000000..691027b66446 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-nvm.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2016 - 2018 Intel Corporation + * + * Author: Tommi Franttila + * + */ + +#include +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM + int pass_vcm_val; +#endif + +static ssize_t crlmodule_sysfs_nvm_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev)); + struct crl_sensor *sensor = to_crlmodule_sensor(subdev); + + memcpy(buf, sensor->nvm_data, min_t(unsigned long, PAGE_SIZE, + sensor->nvm_size)); + return sensor->nvm_size; +} + +DEVICE_ATTR(nvm, S_IRUGO, crlmodule_sysfs_nvm_read, NULL); + +static unsigned int crlmodule_get_nvm_size(struct crl_sensor *sensor) +{ + + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int i, size = 0; + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) + size += sensor->sensor_ds->crl_nvm_info.nvm_config[i].size; + + if (size > PAGE_SIZE) { + dev_err(&client->dev, "nvm size too big\n"); + size = 0; + } + return size; +} + +static int crlmodule_get_nvm_data(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int i; + int rval = 0; + + u8 *nvm_data = sensor->nvm_data; + + if (sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items) { + dev_dbg(&client->dev, + "%s perform pre-operations\n", __func__); + + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_preop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm pre-operations\n"); + return rval; + } + } + + for (i = 0; i < sensor->sensor_ds->crl_nvm_info.nvm_blobs_items; i++) { + + dev_dbg(&client->dev, + "%s read blob %d dev_addr: 0x%x start_addr: 0x%x size: %d", + __func__, i, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->size); + + crlmodule_block_read(sensor, + sensor->sensor_ds->crl_nvm_info.nvm_config->dev_addr, + sensor->sensor_ds->crl_nvm_info.nvm_config->start_addr, + sensor->sensor_ds->crl_nvm_info.nvm_flags + & CRL_NVM_ADDR_MODE_MASK, + sensor->sensor_ds->crl_nvm_info.nvm_config->size, + nvm_data); +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM + pass_vcm_val = *(nvm_data+7) | (*(nvm_data+8)<<8); + dev_dbg(&client->dev,"%s af_far nvm_data: %d \n",__func__,pass_vcm_val); +#endif + nvm_data += sensor->sensor_ds->crl_nvm_info.nvm_config->size; + sensor->sensor_ds->crl_nvm_info.nvm_config++; + } + + if (sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items) { + dev_dbg(&client->dev, "%s perform post-operations\n", + __func__); + rval = crlmodule_write_regs( + sensor, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs, + sensor->sensor_ds->crl_nvm_info.nvm_postop_regs_items); + if (rval) { + dev_err(&client->dev, + "failed to perform nvm post-operations\n"); + return rval; + } + } + return rval; +} + +int crlmodule_nvm_init(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + unsigned int size = crlmodule_get_nvm_size(sensor); + int rval; + + if (size) { + sensor->nvm_data = devm_kzalloc(&client->dev, size, GFP_KERNEL); + if (sensor->nvm_data == NULL) { + dev_err(&client->dev, "nvm buf allocation failed\n"); + return -ENOMEM; + } + sensor->nvm_size = size; + + rval = crlmodule_get_nvm_data(sensor); + if (rval) + goto err; + if (device_create_file(&client->dev, &dev_attr_nvm) != 0) { + dev_err(&client->dev, "sysfs nvm entry failed\n"); + rval = -EBUSY; + goto err; + } + } + + return 0; +err: + sensor->nvm_size = 0; + return rval; +} + +void crlmodule_nvm_deinit(struct crl_sensor *sensor) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + + if (sensor->nvm_size) { + device_remove_file(&client->dev, &dev_attr_nvm); + sensor->nvm_size = 0; + } +} +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM +EXPORT_SYMBOL(pass_vcm_val); +#endif diff --git a/drivers/media/i2c/crlmodule/crlmodule-nvm.h b/drivers/media/i2c/crlmodule/crlmodule-nvm.h new file mode 100644 index 000000000000..42d462d321cc --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-nvm.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * + * Author: Tommi Franttila + * + */ + +#ifndef __CRLMODULE_NVM_H_ +#define __CRLMODULE_NVM_H_ + +#include "crlmodule.h" + +#define CRL_NVM_ADDR_MODE_8BIT 0x00000001 +#define CRL_NVM_ADDR_MODE_16BIT 0x00000002 + +#define CRL_NVM_ADDR_MODE_MASK (CRL_NVM_ADDR_MODE_8BIT | \ + CRL_NVM_ADDR_MODE_16BIT) + + +int crlmodule_nvm_init(struct crl_sensor *sensor); +void crlmodule_nvm_deinit(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_NVM_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-regs.c b/drivers/media/i2c/crlmodule/crlmodule-regs.c new file mode 100644 index 000000000000..e4b8c8aa36e8 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-regs.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2017 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#include +#include + +#include "crlmodule.h" +#include "crlmodule-nvm.h" +#include "crlmodule-regs.h" + +static DEFINE_MUTEX(crl_i2c_mutex); + +static bool reg_verify; +module_param(reg_verify, bool, 0444); +MODULE_PARM_DESC(reg_verify, "enable/disable registers write value and read value checking"); + +static int crlmodule_i2c_read(struct crl_sensor *sensor, u16 dev_i2c_addr, + u16 reg, u8 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct i2c_msg msg[2]; + unsigned char data[4]; + int r; + + dev_dbg(&client->dev, "%s reg, len: [0x%04x, %d]", __func__, reg, len); + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg[0].addr = client->addr; + else + msg[0].addr = dev_i2c_addr; + + msg[1].addr = msg[0].addr; + + msg[0].flags = 0; + msg[0].buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) { + /* change address to 7bit format */ + msg[0].addr = msg[0].addr >> 1; + msg[1].addr = msg[1].addr >> 1; + } + if ((sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_8BIT)) { + data[0] = (u8) (reg & 0xff); + msg[0].len = 1; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg[0].len = 2; + } + + msg[1].flags = I2C_M_RD; + msg[1].buf = data; + msg[1].len = len; + + r = i2c_transfer(client->adapter, msg, 2); + + if (r < 0) + goto err; + + *val = 0; + /* high byte comes first */ + switch (len) { + case CRL_REG_LEN_32BIT: + *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + + data[3]; + break; + case CRL_REG_LEN_24BIT: + *val = (data[0] << 16) + (data[1] << 8) + data[2]; + break; + case CRL_REG_LEN_16BIT: + *val = (data[0] << 8) + data[1]; + break; + case CRL_REG_LEN_08BIT: + *val = data[0]; + break; + } + + return 0; + +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r); + + return r; +} + +static int crlmodule_i2c_write(struct crl_sensor *sensor, u16 dev_i2c_addr, + u16 reg, u8 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct i2c_msg msg; + unsigned char data[6]; + unsigned int retries; + int r; + int ret; + u32 rval; + unsigned char *data_offset; + + if (len != CRL_REG_LEN_08BIT && len != CRL_REG_LEN_16BIT && + len != CRL_REG_LEN_24BIT && len != CRL_REG_LEN_32BIT) + return -EINVAL; + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) + msg.addr = client->addr; + else + msg.addr = dev_i2c_addr; + + msg.flags = 0; /* Write */ + msg.buf = data; + + if (sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) + msg.addr = msg.addr >> 1; + + if ((sensor->sensor_ds->addr_len == CRL_ADDR_7BIT) || + (sensor->sensor_ds->addr_len == CRL_ADDR_8BIT)) { + data[0] = (u8) (reg & 0xff); + msg.len = 1 + len; + data_offset = &data[1]; + } else { + /* high byte goes out first */ + data[0] = (u8) (reg >> 8); + data[1] = (u8) (reg & 0xff); + msg.len = 2 + len; + data_offset = &data[2]; + } + + dev_dbg(&client->dev, "%s len reg, val: [%d, 0x%04x, 0x%04x]", + __func__, len, reg, val); + + switch (len) { + case CRL_REG_LEN_08BIT: + val = val & 0xFF; + data_offset[0] = val; + break; + case CRL_REG_LEN_16BIT: + val = val & 0xFFFF; + data_offset[0] = val >> 8; + data_offset[1] = val; + break; + case CRL_REG_LEN_24BIT: + val = val & 0xFFFFFF; + data_offset[0] = val >> 16; + data_offset[1] = val >> 8; + data_offset[2] = val; + break; + case CRL_REG_LEN_32BIT: + data_offset[0] = val >> 24; + data_offset[1] = val >> 16; + data_offset[2] = val >> 8; + data_offset[3] = val; + break; + } + + for (retries = 0; retries < 5; retries++) { + /* + * Due to unknown reason sensor stops responding. This + * loop is a temporaty solution until the root cause + * is found. + */ + r = i2c_transfer(client->adapter, &msg, 1); + if (r == 1) { + if (retries) + dev_err(&client->dev, + "sensor i2c stall encountered. retries: %d\n", + retries); + + if (reg_verify) { + ret = crlmodule_i2c_read(sensor, dev_i2c_addr, reg, len, &rval); + if (ret < 0) + dev_err(&client->dev, "i2c read error\n"); + else if (rval != val) { + dev_warn(&client->dev, + "reg:0x%x write val(0x%x), read val(0x%x)", + reg, val, rval); + } + } + return 0; + } + + usleep_range(2000, 2000); + } + + dev_err(&client->dev, + "wrote 0x%x to offset 0x%x error %d\n", val, reg, r); + + return r; +} + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val) +{ + return crlmodule_i2c_read(sensor, reg.dev_i2c_addr, reg.address, + reg.len, val); +} + +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + int ret; + u32 val2; + + /* + * Sensor setting sequence may need some delay. + * delay value is specified by reg.val field + */ + if (len == CRL_REG_LEN_DELAY) { + msleep(val); + return 0; + } + + /* + * If the same register is being used for two settings, updating + * one value should not overwrite the other one. Such registers + * must be marked as CRL_REG_READ_AND_UPDATE. For such registers + * first read the register and update it + */ + + if (len & CRL_REG_READ_AND_UPDATE) { + u32 tmp; + /* Some rare cases 2 different devices can + * make i2c accesses to same physical i2c address, + * those read modify writes must be protected by static + * mutex + */ + if (sensor->sensor_ds->i2c_mutex_in_use) + mutex_lock(&crl_i2c_mutex); + + ret = crlmodule_i2c_read(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, &val2); + if (ret) { + if (sensor->sensor_ds->i2c_mutex_in_use) + mutex_unlock(&crl_i2c_mutex); + return ret; + } + + tmp = val2 & ~mask; + tmp |= val & mask; + val = tmp; + } + + ret = crlmodule_i2c_write(sensor, dev_i2c_addr, reg, + len & CRL_REG_LEN_READ_MASK, val); + + if ((sensor->sensor_ds->i2c_mutex_in_use) + && (len & CRL_REG_READ_AND_UPDATE)) + mutex_unlock(&crl_i2c_mutex); + + if (ret < 0) { + dev_err(&client->dev, + "error %d writing reg 0x%4.4x, val 0x%2.2x", + ret, reg, val); + return ret; + } + + return 0; +} + +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len) +{ + unsigned int i; + int ret; + + for (i = 0; i < len; i++) { + ret = crlmodule_write_reg(sensor, + regs[i].dev_i2c_addr, + regs[i].address, + regs[i].len, + regs[i].mask, + regs[i].val); + if (ret < 0) + return ret; + }; + + return 0; +} + +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf) +{ + struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd); + struct i2c_msg msg[2]; + u8 data[2]; + u16 offset = 0; + int r; + + memset(msg, 0, sizeof(msg)); + + if (dev_i2c_addr == CRL_I2C_ADDRESS_NO_OVERRIDE) { + msg[0].addr = client->addr; + msg[1].addr = client->addr; + } else { + msg[0].addr = dev_i2c_addr; + msg[1].addr = dev_i2c_addr; + } + + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) + msg[0].len = 1; + else if (addr_mode & CRL_NVM_ADDR_MODE_16BIT) + msg[0].len = 2; + else + return -EINVAL; + + msg[0].flags = 0; + msg[1].flags = I2C_M_RD; + + while (offset < len) { + if (addr_mode & CRL_NVM_ADDR_MODE_8BIT) { + data[0] = addr & 0xff; + } else { + data[0] = (addr >> 8) & 0xff; + data[1] = addr & 0xff; + } + + msg[0].buf = data; + msg[1].len = min(CRLMODULE_I2C_BLOCK_SIZE, len - offset); + msg[1].buf = &buf[offset]; + r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (r != ARRAY_SIZE(msg)) { + if (r >= 0) + r = -EIO; + goto err; + } + addr += msg[1].len; + offset += msg[1].len; + } + return 0; +err: + dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r); + return r; +} diff --git a/drivers/media/i2c/crlmodule/crlmodule-regs.h b/drivers/media/i2c/crlmodule/crlmodule-regs.h new file mode 100644 index 000000000000..6d84486e1ae1 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-regs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_REGS_H_ +#define __CRLMODULE_REGS_H_ + +struct crl_sensor; +struct crl_register_read_rep; +struct crl_register_write_rep; + +#define CRLMODULE_I2C_BLOCK_SIZE 0x20 + +int crlmodule_read_reg(struct crl_sensor *sensor, + const struct crl_register_read_rep reg, u32 *val); +int crlmodule_write_regs(struct crl_sensor *sensor, + const struct crl_register_write_rep *regs, int len); +int crlmodule_write_reg(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 reg, + u8 len, u32 mask, u32 val); +int crlmodule_block_read(struct crl_sensor *sensor, u16 dev_i2c_addr, u16 addr, + u8 addr_mode, u16 len, u8 *buf); + +#endif /* __CRLMODULE_REGS_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule-sensor-ds.h b/drivers/media/i2c/crlmodule/crlmodule-sensor-ds.h new file mode 100644 index 000000000000..ff03185b1025 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule-sensor-ds.h @@ -0,0 +1,622 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_SENSOR_DS_H_ +#define __CRLMODULE_SENSOR_DS_H_ + +#include +#include "crlmodule.h" + +#define CRL_REG_LEN_08BIT 1 +#define CRL_REG_LEN_16BIT 2 +#define CRL_REG_LEN_24BIT 3 +#define CRL_REG_LEN_32BIT 4 + +#define CRL_REG_READ_AND_UPDATE (1 << 3) +#define CRL_REG_LEN_READ_MASK 0x07 +#define CRL_REG_LEN_DELAY 0x10 + +#define CRL_FLIP_DEFAULT_NONE 0 +#define CRL_FLIP_HFLIP 1 +#define CRL_FLIP_VFLIP 2 +#define CRL_FLIP_HFLIP_VFLIP 3 + +#define CRL_FLIP_HFLIP_MASK 0xfe +#define CRL_FLIP_VFLIP_MASK 0xfd + +#define CRL_PIXEL_ORDER_GRBG 0 +#define CRL_PIXEL_ORDER_RGGB 1 +#define CRL_PIXEL_ORDER_BGGR 2 +#define CRL_PIXEL_ORDER_GBRG 3 +#define CRL_PIXEL_ORDER_IGNORE 255 + +/* Flag to notify configuration selction imact from V4l2 Ctrls */ +#define CRL_IMPACTS_NO_IMPACT 0 +#define CRL_IMPACTS_PLL_SELECTION (1 << 1) +#define CRL_IMPACTS_MODE_SELECTION (1 << 2) + +/* + * In crl_dynamic_entity::entity_type is denoted by bits 6 and 7 + * 0 -> crl_dynamic_entity:entity_value is a constant + * 1 -> crl_dynamic_entity:entity_value is a referene to variable + * 2 -> crl_dynamic_entity:entity_value is a v4l2_ctrl value + * 3 -> crl_dynamic_entity:entity_value is a 8 bit register address + */ +enum crl_dynamic_entity_type { + CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST = 0, + CRL_DYNAMIC_VAL_OPERAND_TYPE_VAR_REF, + CRL_DYNAMIC_VAL_OPERAND_TYPE_CTRL_VAL, + CRL_DYNAMIC_VAL_OPERAND_TYPE_REG_VAL, /* Only 8bit registers */ +}; + +/* + * For some combo device which has some devices inside itself with different + * i2c address, adding flag to specify whether current device needs i2c + * address override. + * For back-compatibility, making flag equals 0. So existing sensor configure + * doesn't need to be modified. + */ +#define CRL_I2C_ADDRESS_NO_OVERRIDE 0 + +struct crl_sensor; +struct i2c_client; + +enum crl_subdev_type { + CRL_SUBDEV_TYPE_SCALER, + CRL_SUBDEV_TYPE_BINNER, + CRL_SUBDEV_TYPE_PIXEL_ARRAY, +}; + +enum crl_v4l2ctrl_op_type { + CRL_V4L2_CTRL_SET_OP, + CRL_V4L2_CTRL_GET_OP, +}; + +enum crl_v4l2ctrl_update_context { + SENSOR_IDLE, /* Powered on. But not streamind */ + SENSOR_STREAMING, /* Sensor streaming */ + SENSOR_POWERED_ON, /* streaming or idle */ +}; + +enum crl_operators { + CRL_BITWISE_AND = 0, + CRL_BITWISE_OR, + CRL_BITWISE_LSHIFT, + CRL_BITWISE_RSHIFT, + CRL_BITWISE_XOR, + CRL_BITWISE_COMPLEMENT, + CRL_ADD, + CRL_SUBTRACT, + CRL_MULTIPLY, + CRL_DIV, + CRL_ASSIGNMENT, +}; + +/* Replicated from videodev2.h */ +enum crl_v4l2_ctrl_type { + CRL_V4L2_CTRL_TYPE_INTEGER = 1, + CRL_V4L2_CTRL_TYPE_BOOLEAN, + CRL_V4L2_CTRL_TYPE_MENU_INT, + CRL_V4L2_CTRL_TYPE_MENU_ITEMS, + CRL_V4L2_CTRL_TYPE_BUTTON, + CRL_V4L2_CTRL_TYPE_INTEGER64, + CRL_V4L2_CTRL_TYPE_CTRL_CLASS, + CRL_V4L2_CTRL_TYPE_CUSTOM, +}; + +enum crl_addr_len { + CRL_ADDR_16BIT = 0, + CRL_ADDR_8BIT, + CRL_ADDR_7BIT, +}; + +enum crl_operands { + CRL_CONSTANT = 0, + CRL_VARIABLE, + CRL_CONTROL, +}; + +/* References to the CRL driver member variables */ +enum crl_member_data_reference_ids { + CRL_VAR_REF_OUTPUT_WIDTH = 1, + CRL_VAR_REF_OUTPUT_HEIGHT, + CRL_VAR_REF_PA_CROP_WIDTH, + CRL_VAR_REF_PA_CROP_HEIGHT, + CRL_VAR_REF_FRAME_TIMING_WIDTH, + CRL_VAR_REF_FRAME_TIMING_HEIGHT, + CRL_VAR_REF_BINNER_WIDTH, + CRL_VAR_REF_BINNER_HEIGHT, + CRL_VAR_REF_H_BINN_FACTOR, + CRL_VAR_REF_V_BINN_FACTOR, + CRL_VAR_REF_SCALE_FACTOR, + CRL_VAR_REF_BITSPERPIXEL, + CRL_VAR_REF_PIXELRATE_PA, + CRL_VAR_REF_PIXELRATE_CSI, + CRL_VAR_REF_PIXELRATE_LINK_FREQ, +}; + +enum crl_frame_desc_type { + CRL_V4L2_MBUS_FRAME_DESC_TYPE_PLATFORM, + CRL_V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL, + CRL_V4L2_MBUS_FRAME_DESC_TYPE_CCP2, + CRL_V4L2_MBUS_FRAME_DESC_TYPE_CSI2, +}; + +enum crl_pwr_ent_type { + CRL_POWER_ETY_GPIO_FROM_PDATA = 1, + CRL_POWER_ETY_GPIO_FROM_PDATA_BY_NUMBER, + CRL_POWER_ETY_GPIO_CUSTOM, + CRL_POWER_ETY_REGULATOR_FRAMEWORK, + CRL_POWER_ETY_CLK_FRAMEWORK, +}; + +struct crl_dynamic_entity { + enum crl_dynamic_entity_type entity_type; + u32 entity_val; +}; + +struct crl_arithmetic_ops { + enum crl_operators op; + struct crl_dynamic_entity operand; +}; + +struct crl_dynamic_calculated_entity { + u8 ops_items; + struct crl_arithmetic_ops *ops; +}; + +struct crl_register_write_rep { + u16 address; + u8 len; + u32 val; + u16 dev_i2c_addr; + u32 mask; +}; + +struct crl_register_read_rep { + u16 address; + u8 len; + u32 mask; + u16 dev_i2c_addr; +}; + +/* + * crl_dynamic_register_access is used mainly in the v4l2_ctrl context. + * This is intended to provide some generic arithmetic operations on the values + * to be written to a control's register or on the values read from a register. + * These arithmetic operations are controlled using struct crl_arithmetic_ops. + * + * One important information is that this structure behave differently for the + * set controls and volatile get controls. + * + * For the set control operation, the usage of the members are straight forward. + * The set control can result into multiple register write operations. Hence + * there can be more than one crl_dynamic_register_access entries associated + * with a control which results into separate register writes. + * + * But for the volatile get control operation, where a v4l2 control is used + * to query read only information from the sensor, there could be only one + * crl_dynamic_register_access entry. Because the result of a get control is + * a single value. crl_dynamic_register_access.address, len and mask values are + * not used in volatile get control context. Instead all the needed information + * must be encoded into member -> ops (struct crl_arithmetic_ops) + */ +struct crl_dynamic_register_access { + u16 address; + u8 len; + u32 mask; + u8 ops_items; + struct crl_arithmetic_ops *ops; + u16 dev_i2c_addr; +}; + +struct crl_sensor_detect_config { + struct crl_register_read_rep reg; /* Register to read */ + unsigned int width; /* width of the value in chars*/ +}; + +struct crl_sensor_subdev_config { + enum crl_subdev_type subdev_type; + char name[32]; +}; + +/* + * The ctrl id value pair which should be compared when selecting a + * configuration. This gives flexibility to provide any data through set ctrl + * and provide selection mechanism for a particular configuration + */ +struct crl_ctrl_data_pair { + u32 ctrl_id; + u32 data; +}; + +enum crl_dep_ctrl_action_type { + CRL_DEP_CTRL_ACTION_TYPE_SELF = 0, + CRL_DEP_CTRL_ACTION_TYPE_DEP_CTRL, +}; + +enum crl_dep_ctrl_condition { + CRL_DEP_CTRL_CONDITION_GREATER = 0, + CRL_DEP_CTRL_CONDITION_LESSER, + CRL_DEP_CTRL_CONDITION_EQUAL, +}; + +enum crl_dep_ctrl_action { + CRL_DEP_CTRL_CONDITION_ADD = 0, + CRL_DEP_CTRL_CONDITION_SUBTRACT, + CRL_DEP_CTRL_CONDITION_MULTIPLY, + CRL_DEP_CTRL_CONDITION_DIVIDE, +}; + +struct crl_dep_ctrl_cond_action { + enum crl_dep_ctrl_condition cond; + u32 cond_value; + enum crl_dep_ctrl_action action; + u32 action_value; +}; + +/* Dependency control provision */ +struct crl_dep_ctrl_provision { + u32 ctrl_id; + enum crl_dep_ctrl_action_type action_type; + unsigned int action_items; + struct crl_dep_ctrl_cond_action *action; +}; + +/* + * Multiple set of register lists can be written to + * the sensor configuration based on the control's value + * struct crl_dep_reg_list introduces a provision for this + * purpose. + * + * struct crl_dep_reg_list *dep_regs; + * + * In dep_regs, a "condition" and "value" is added which is + * compared with ctrl->val and the register list that is to + * be written to the sensor. + * + * Example: For a v4l2_ctrl, if we need to set + * reg_list A when ctrl->val > 60 + * reg_list B when ctrl->val < 60 + * and reg_list C when ctrl->val == 60 + * + * So dep_regs block should be like this in the sensor + * specific configuration file: + * + * dep_regs = { + * { + * reg_condition = CRL_DEP_CTRL_CONDITION_GREATER, + * cond_value = { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 60 }, + * no_direct_regs = sizeof(X) + * direct_regs = X + * no_dyn_items = sizeof(A) + * dyn_regs = A + * }, + * { + * reg_condition = CRL_DEP_CTRL_CONDITION_LESSER, + * cond_value = { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 60 }, + * no_direct_regs = 0 + * direct_regs = 0 + * no_dyn_items = sizeof(B) + * dyn_regs = B + * }, + * { + * reg_condition = CRL_DEP_CTRL_CONDITION_EQUAL, + * cond_value = { CRL_DYNAMIC_VAL_OPERAND_TYPE_CONST, 60 }, + * no_direct_regs = sizeof(Z) + * direct_regs = Z + * no_dyn_items = size(C) + * dyn_regs = C + * }, + * } + * cond_value is defined as dynamic entity, which can be a constant, + * another control value or a reference to the pre-defined set of variables + * or a register value. + * + * CRL driver will execute the above dep_regs in the same order + * as it is written. care must be taken for eample in the cases + * like, ctrl->val > 60, reg_list A. and if ctrl_val > 80, + * reg_list D etc. + */ + +struct crl_dep_reg_list { + enum crl_dep_ctrl_condition reg_cond; + struct crl_dynamic_entity cond_value; + unsigned int no_direct_regs; + struct crl_register_write_rep *direct_regs; + unsigned int no_dyn_items; + struct crl_dynamic_register_access *dyn_regs; +}; + +struct crl_sensor_limits { + unsigned int x_addr_max; + unsigned int y_addr_max; + unsigned int x_addr_min; + unsigned int y_addr_min; + unsigned int min_frame_length_lines; + unsigned int max_frame_length_lines; + unsigned int min_line_length_pixels; + unsigned int max_line_length_pixels; + u8 scaler_m_min; + u8 scaler_m_max; + u8 scaler_n_min; + u8 scaler_n_max; + u8 min_even_inc; + u8 max_even_inc; + u8 min_odd_inc; + u8 max_odd_inc; +}; + +struct crl_v4l2_ctrl_data_std { + s64 min; + s64 max; + u64 step; + s64 def; +}; + +struct crl_v4l2_ctrl_data_menu_items { + const char *const *menu; + unsigned int size; +}; + +struct crl_v4l2_ctrl_data_std_menu { + const int64_t *std_menu; + unsigned int size; +}; + +struct crl_v4l2_ctrl_data_int_menu { + const s64 *menu; + s64 max; + s64 def; +}; + +union crl_v4l2_ctrl_data_types { + struct crl_v4l2_ctrl_data_std std_data; + struct crl_v4l2_ctrl_data_menu_items v4l2_menu_items; + struct crl_v4l2_ctrl_data_std_menu v4l2_std_menu; + struct crl_v4l2_ctrl_data_int_menu v4l2_int_menu; +}; + +/* + * Please note a difference in the usage of "regs" member in case of a + * volatile get control for read only purpose. Please check the + * "struct crl_dynamic_register_access" declaration comments for more details. + * + * Read only controls must have "flags" V4L2_CTRL_FLAG_READ_ONLY set. + */ +struct crl_v4l2_ctrl { + enum crl_subdev_type sd_type; + enum crl_v4l2ctrl_op_type op_type; + enum crl_v4l2ctrl_update_context context; + char name[32]; + u32 ctrl_id; + enum crl_v4l2_ctrl_type type; + union crl_v4l2_ctrl_data_types data; + unsigned long flags; + u32 impact; /* If this control impact any config selection */ + struct v4l2_ctrl *ctrl; + unsigned int regs_items; + struct crl_dynamic_register_access *regs; + unsigned int dep_items; + struct crl_dep_ctrl_provision *dep_ctrls; + enum v4l2_ctrl_type v4l2_type; + unsigned int crl_ctrl_dep_reg_list; /* contains no. of dep_regs */ + struct crl_dep_reg_list *dep_regs; +}; + +struct crl_pll_configuration { + s64 input_clk; + s64 op_sys_clk; + u8 bitsperpixel; + u32 pixel_rate_csi; + u32 pixel_rate_pa; + u8 csi_lanes; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int pll_regs_items; + const struct crl_register_write_rep *pll_regs; +}; + +struct crl_subdev_rect_rep { + enum crl_subdev_type subdev_type; + struct v4l2_rect in_rect; + struct v4l2_rect out_rect; +}; + +struct crl_mode_rep { + unsigned int sd_rects_items; + const struct crl_subdev_rect_rep *sd_rects; + u8 binn_hor; + u8 binn_vert; + u8 scale_m; + s32 width; + s32 height; + unsigned int comp_items; + struct crl_ctrl_data_pair *ctrl_data; + unsigned int mode_regs_items; + const struct crl_register_write_rep *mode_regs; + + /* + * Minimum and maximum value for line length pixels and frame length + * lines are added for modes. This facilitates easy handling of + * modes which binning skipping and affects the calculation of + * vblank and hblank values. + * + * The blank values are limited based on the following logic + * + * If mode specific limits are available + * vblank = clamp(min_llp - PA_width, max_llp - PA_width) + * hblank = clamp(min_fll - PA_Height, max_fll - PA_Height + * + * If mode specific blanking limits are not available, then the sensor + * limits will be used in the same manner. + * + * If sensor mode limits are not available, then the values will be + * written directly to the associated control registers. + */ + s32 min_llp; /* minimum/maximum value for line length pixels */ + s32 max_llp; + s32 min_fll; + s32 max_fll; /* minimum/maximum value for frame length lines */ +}; + +struct crl_csi_data_fmt { + u32 code; + u8 pixel_order; + u8 bits_per_pixel; + unsigned int regs_items; + const struct crl_register_write_rep *regs; +}; + +struct crl_flip_data { + u8 flip; + u8 pixel_order; +}; + +struct crl_power_seq_entity { + enum crl_pwr_ent_type type; + char ent_name[12]; + int ent_number; + u16 address; + unsigned int val; + unsigned int undo_val; /* Undo value if any previous step failed */ + unsigned int delay; /* delay in micro seconds */ + struct regulator *regulator_priv; /* R/W */ + struct gpio_desc *gpiod_priv; +}; + +struct crl_nvm_blob { + u8 dev_addr; + u16 start_addr; + u16 size; +}; + +struct crl_nvm { + unsigned int nvm_preop_regs_items; + const struct crl_register_write_rep *nvm_preop_regs; + + unsigned int nvm_postop_regs_items; + const struct crl_register_write_rep *nvm_postop_regs; + + unsigned int nvm_blobs_items; + struct crl_nvm_blob *nvm_config; + u32 nvm_flags; +}; + +/* Representation for v4l2_mbus_frame_desc_entry */ +struct crl_frame_desc { + struct crl_dynamic_entity flags; + struct crl_dynamic_entity bpp; + struct crl_dynamic_entity pixelcode; + struct crl_dynamic_entity start_line; + struct crl_dynamic_entity start_pixel; + struct crl_dynamic_calculated_entity width; + struct crl_dynamic_calculated_entity height; + struct crl_dynamic_entity length; + struct crl_dynamic_entity csi2_channel; + struct crl_dynamic_entity csi2_data_type; +}; + +typedef int (*sensor_specific_init)(struct i2c_client *); +typedef int (*sensor_specific_cleanup)(struct i2c_client *); + +struct crl_sensor_configuration { + + const struct crl_clock_entity *clock_entity; + + const unsigned int power_items; + const struct crl_power_seq_entity *power_entities; + const unsigned int power_delay; /* in micro seconds */ + + const unsigned int onetime_init_regs_items; + const struct crl_register_write_rep *onetime_init_regs; + + const unsigned int powerup_regs_items; + const struct crl_register_write_rep *powerup_regs; + + const unsigned int poweroff_regs_items; + const struct crl_register_write_rep *poweroff_regs; + + const unsigned int id_reg_items; + const struct crl_sensor_detect_config *id_regs; + + const unsigned int subdev_items; + const struct crl_sensor_subdev_config *subdevs; + + const struct crl_sensor_limits *sensor_limits; + + const unsigned int pll_config_items; + const struct crl_pll_configuration *pll_configs; + + const unsigned int modes_items; + const struct crl_mode_rep *modes; + /* + * Fail safe mode should be the largest resolution available in the + * mode list. If none of the mode parameters are matched, the driver + * will select this mode for streaming. + */ + const unsigned int fail_safe_mode_index; + + const unsigned int streamon_regs_items; + const struct crl_register_write_rep *streamon_regs; + + const unsigned int streamoff_regs_items; + const struct crl_register_write_rep *streamoff_regs; + + const unsigned int v4l2_ctrls_items; + const struct crl_v4l2_ctrl *v4l2_ctrl_bank; + + const unsigned int csi_fmts_items; + const struct crl_csi_data_fmt *csi_fmts; + + const unsigned int flip_items; + const struct crl_flip_data *flip_data; + + struct crl_nvm crl_nvm_info; + + enum crl_addr_len addr_len; + + unsigned int frame_desc_entries; + enum crl_frame_desc_type frame_desc_type; + struct crl_frame_desc *frame_desc; + char *msr_file_name; + + sensor_specific_init sensor_init; + sensor_specific_cleanup sensor_cleanup; + /* + * Irq handlers for threaded irq. These are needed if driver need to + * handle gpio interrupt. crl_threaded_irq_fn is then mandatory. Irq + * pin configuration is in platform data. + */ + irqreturn_t (*crl_irq_fn)(int irq, void *sensor_struct); + irqreturn_t (*crl_threaded_irq_fn)(int irq, void *sensor_struct); + const bool irq_in_use; + const bool i2c_mutex_in_use; +}; + +struct crlmodule_sensors { + char *pname; + char *name; + struct crl_sensor_configuration *ds; +}; + +/* + * Function to populate the CRL data structure from the sensor configuration + * definition file + */ +int crlmodule_populate_ds(struct crl_sensor *sensor, struct device *dev); + +/* + * Function validate the contents CRL data structure to check if all the + * required fields are filled and are according to the limits. + */ +int crlmodule_validate_ds(struct crl_sensor *sensor); + +/* Function to free all resources allocated for the CRL data structure */ +void crlmodule_release_ds(struct crl_sensor *sensor); + +#endif /* __CRLMODULE_SENSOR_DS_H_ */ diff --git a/drivers/media/i2c/crlmodule/crlmodule.h b/drivers/media/i2c/crlmodule/crlmodule.h new file mode 100644 index 000000000000..e68e82fd0634 --- /dev/null +++ b/drivers/media/i2c/crlmodule/crlmodule.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Author: Vinod Govindapillai + * + */ + +#ifndef __CRLMODULE_PRIV_H_ +#define __CRLMODULE_PRIV_H_ + +#include +#include +#include +#include +#include "../../../../include/media/crlmodule.h" +#include +#include +#include "../../../../include/uapi/linux/crlmodule.h" +#include "crlmodule-sensor-ds.h" + +#define CRL_SUBDEVS 3 + +#define CRL_PA_PAD_SRC 0 +#define CRL_PAD_SINK 0 +#define CRL_PAD_SRC 1 +#define CRL_PADS 2 + +struct crl_subdev { + struct v4l2_subdev sd; + struct media_pad pads[2]; + struct v4l2_rect sink_fmt; + struct v4l2_rect crop[2]; + struct v4l2_rect compose; /* compose on sink */ + unsigned short sink_pad; + unsigned short source_pad; + int npads; + struct crl_sensor *sensor; + struct v4l2_ctrl_handler ctrl_handler; + unsigned int field; + unsigned int *route_flags; +}; + +struct crl_sensor { + /* + * "mutex" is used to serialise access to all fields here + * except v4l2_ctrls at the end of the struct. "mutex" is also + * used to serialise access to file handle specific + * information. The exception to this rule is the power_mutex + * below. + */ + struct mutex mutex; + /* + * power mutex became necessity because of the v4l2_ctrl_handler_setup + * is being called from power on function which needs to be serialised + * but v4l2_ctrl_handler setup uses "mutex" so it cannot be used. + */ + struct mutex power_mutex; + + struct crl_subdev ssds[CRL_SUBDEVS]; + u32 ssds_used; + struct crl_subdev *src; + struct crl_subdev *binner; + struct crl_subdev *scaler; + struct crl_subdev *pixel_array; + + struct crlmodule_platform_data *platform_data; + + u8 binning_horizontal; + u8 binning_vertical; + + u8 sensor_mode; + u8 scale_m; + u8 fmt_index; + u8 flip_info; + u8 pll_index; + + + int power_count; + + bool streaming; + + struct crl_sensor_configuration *sensor_ds; + struct crl_v4l2_ctrl *v4l2_ctrl_bank; + + /* These are mandatory controls. So good to have reference to these */ + struct v4l2_ctrl *pixel_rate_pa; + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate_csi; + + s64 *link_freq_menu; + + /* If extra v4l2 contrl has an impact on PLL selection */ + bool ext_ctrl_impacts_pll_selection; + bool ext_ctrl_impacts_mode_selection; + bool blanking_ctrl_not_use; + bool direct_mode_in_use; + const struct crl_mode_rep *current_mode; + + struct clk *xclk; + struct crl_power_seq_entity *pwr_entity; + unsigned int irq; + + u8 *nvm_data; + u16 nvm_size; + + /* Pointer to binary file which contains + * tunable IQ parameters like NR, DPC, BLC + * Not all MSR's are moved to the binary + * at the moment. + */ + const struct firmware *msr_list; + /* + * Pointer to store sensor specific data structure, that + * can be used for example in interrupt specific code. + */ + void *sensor_specific_data; +}; + +#define to_crlmodule_subdev(_sd) \ + container_of(_sd, struct crl_subdev, sd) + +#define to_crlmodule_sensor(_sd) \ + (to_crlmodule_subdev(_sd)->sensor) + +#endif /* __CRLMODULE_PRIV_H_ */ diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c index 39f51daa7558..c5642813eff1 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.c +++ b/drivers/media/i2c/cx25840/cx25840-core.c @@ -463,8 +463,13 @@ static void cx23885_initialize(struct i2c_client *client) { DEFINE_WAIT(wait); struct cx25840_state *state = to_state(i2c_get_clientdata(client)); + u32 clk_freq = 0; struct workqueue_struct *q; + /* cx23885 sets hostdata to clk_freq pointer */ + if (v4l2_get_subdev_hostdata(&state->sd)) + clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd)); + /* * Come out of digital power down * The CX23888, at least, needs this, otherwise registers aside from @@ -500,8 +505,13 @@ static void cx23885_initialize(struct i2c_client *client) * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz * 572.73 MHz before post divide */ - /* HVR1850 or 50MHz xtal */ - cx25840_write(client, 0x2, 0x71); + if (clk_freq == 25000000) { + /* 888/ImpactVCBe or 25Mhz xtal */ + ; /* nothing to do */ + } else { + /* HVR1850 or 50MHz xtal */ + cx25840_write(client, 0x2, 0x71); + } cx25840_write4(client, 0x11c, 0x01d1744c); cx25840_write4(client, 0x118, 0x00000416); cx25840_write4(client, 0x404, 0x0010253e); @@ -544,9 +554,15 @@ static void cx23885_initialize(struct i2c_client *client) /* HVR1850 */ switch (state->id) { case CX23888_AV: - /* 888/HVR1250 specific */ - cx25840_write4(client, 0x10c, 0x13333333); - cx25840_write4(client, 0x108, 0x00000515); + if (clk_freq == 25000000) { + /* 888/ImpactVCBe or 25MHz xtal */ + cx25840_write4(client, 0x10c, 0x01b6db7b); + cx25840_write4(client, 0x108, 0x00000512); + } else { + /* 888/HVR1250 or 50MHz xtal */ + cx25840_write4(client, 0x10c, 0x13333333); + cx25840_write4(client, 0x108, 0x00000515); + } break; default: cx25840_write4(client, 0x10c, 0x002be2c9); @@ -576,7 +592,7 @@ static void cx23885_initialize(struct i2c_client *client) * 368.64 MHz before post divide * 122.88 MHz / 0xa = 12.288 MHz */ - /* HVR1850 or 50MHz xtal */ + /* HVR1850 or 50MHz xtal or 25MHz xtal */ cx25840_write4(client, 0x114, 0x017dbf48); cx25840_write4(client, 0x110, 0x000a030e); break; diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c index 95af4fc99cd0..da7b4f18ae33 100644 --- a/drivers/media/i2c/dw9714.c +++ b/drivers/media/i2c/dw9714.c @@ -1,78 +1,74 @@ -/* - * Copyright (c) 2015--2017 Intel Corporation. +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2015 - 2018 Intel Corporation * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. + * Based on ATOMISP dw9714 implementation by + * Huang Shenbo +#include #include #include #include +#include +#include #include #include - -#define DW9714_NAME "dw9714" -#define DW9714_MAX_FOCUS_POS 1023 -/* - * This acts as the minimum granularity of lens movement. - * Keep this value power of 2, so the control steps can be - * uniformly adjusted for gradual lens movement, with desired - * number of control steps. - */ -#define DW9714_CTRL_STEPS 16 -#define DW9714_CTRL_DELAY_US 1000 -/* - * S[3:2] = 0x00, codes per step for "Linear Slope Control" - * S[1:0] = 0x00, step period - */ -#define DW9714_DEFAULT_S 0x0 -#define DW9714_VAL(data, s) ((data) << 4 | (s)) +#include "../../../include/media/dw9714.h" +#ifdef CONFIG_CRLMODULE_RD_NVM_TO_VCM +extern int pass_vcm_val; +#endif /* dw9714 device structure */ struct dw9714_device { struct i2c_client *client; struct v4l2_ctrl_handler ctrls_vcm; - struct v4l2_subdev sd; + struct v4l2_subdev subdev_vcm; + struct dw9714_platform_data *pdata; u16 current_val; }; -static inline struct dw9714_device *to_dw9714_vcm(struct v4l2_ctrl *ctrl) -{ - return container_of(ctrl->handler, struct dw9714_device, ctrls_vcm); -} - -static inline struct dw9714_device *sd_to_dw9714_vcm(struct v4l2_subdev *subdev) -{ - return container_of(subdev, struct dw9714_device, sd); -} +#define to_dw9714_vcm(_ctrl) \ + container_of(_ctrl->handler, struct dw9714_device, ctrls_vcm) static int dw9714_i2c_write(struct i2c_client *client, u16 data) { + const int num_msg = 1; int ret; u16 val = cpu_to_be16(data); + struct i2c_msg msg = { + .addr = client->addr, + .flags = 0, + .len = sizeof(val), + .buf = (u8 *)&val, + }; + + ret = i2c_transfer(client->adapter, &msg, num_msg); + + /*One retry*/ + if (ret != num_msg) + ret = i2c_transfer(client->adapter, &msg, num_msg); - ret = i2c_master_send(client, (const char *)&val, sizeof(val)); - if (ret != sizeof(val)) { - dev_err(&client->dev, "I2C write fail\n"); + if (ret != num_msg) { + dev_err(&client->dev, "I2C write fail fail\n"); return -EIO; + } else { + return 0; } - return 0; } static int dw9714_t_focus_vcm(struct dw9714_device *dw9714_dev, u16 val) { struct i2c_client *client = dw9714_dev->client; + int ret = -EINVAL; + dev_dbg(&client->dev, "Setting new value VCM: %d\n", val); dw9714_dev->current_val = val; - return dw9714_i2c_write(client, DW9714_VAL(val, DW9714_DEFAULT_S)); + ret = dw9714_i2c_write(client, + VCM_VAL(val, VCM_DEFAULT_S)); + return ret; } static int dw9714_set_ctrl(struct v4l2_ctrl *ctrl) @@ -81,34 +77,62 @@ static int dw9714_set_ctrl(struct v4l2_ctrl *ctrl) if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) return dw9714_t_focus_vcm(dev_vcm, ctrl->val); - - return -EINVAL; + else + return -EINVAL; } static const struct v4l2_ctrl_ops dw9714_vcm_ctrl_ops = { .s_ctrl = dw9714_set_ctrl, }; +static int dw9714_init_controls(struct dw9714_device *dev_vcm) +{ + struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; + const struct v4l2_ctrl_ops *ops = &dw9714_vcm_ctrl_ops; + struct i2c_client *client = dev_vcm->client; + + v4l2_ctrl_handler_init(hdl, 1); + + v4l2_ctrl_new_std(hdl, ops, + V4L2_CID_FOCUS_ABSOLUTE, + 0, + DW9714_MAX_FOCUS_POS, + 1, + 0); + + if (hdl->error) + dev_err(&client->dev, "dw9714_init_controls fail\n"); + dev_vcm->subdev_vcm.ctrl_handler = hdl; + return hdl->error; +} + +static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev) +{ + v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm); + v4l2_device_unregister_subdev(&dw9714_dev->subdev_vcm); + media_entity_cleanup(&dw9714_dev->subdev_vcm.entity); +} + static int dw9714_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { - struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); + struct dw9714_device *dw9714_dev = container_of(sd, + struct dw9714_device, subdev_vcm); struct device *dev = &dw9714_dev->client->dev; int rval; rval = pm_runtime_get_sync(dev); - if (rval < 0) { - pm_runtime_put_noidle(dev); - return rval; - } + dev_dbg(dev, "%s rval = %d\n", __func__, rval); - return 0; + return rval; } static int dw9714_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { - struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); + struct dw9714_device *dw9714_dev = container_of(sd, + struct dw9714_device, subdev_vcm); struct device *dev = &dw9714_dev->client->dev; + dev_dbg(dev, "%s\n", __func__); pm_runtime_put(dev); return 0; @@ -121,62 +145,64 @@ static const struct v4l2_subdev_internal_ops dw9714_int_ops = { static const struct v4l2_subdev_ops dw9714_ops = { }; -static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev) -{ - v4l2_async_unregister_subdev(&dw9714_dev->sd); - v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm); - media_entity_cleanup(&dw9714_dev->sd.entity); -} - -static int dw9714_init_controls(struct dw9714_device *dev_vcm) -{ - struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; - const struct v4l2_ctrl_ops *ops = &dw9714_vcm_ctrl_ops; - struct i2c_client *client = dev_vcm->client; - - v4l2_ctrl_handler_init(hdl, 1); - - v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE, - 0, DW9714_MAX_FOCUS_POS, DW9714_CTRL_STEPS, 0); - - if (hdl->error) - dev_err(&client->dev, "%s fail error: 0x%x\n", - __func__, hdl->error); - dev_vcm->sd.ctrl_handler = hdl; - return hdl->error; -} - -static int dw9714_probe(struct i2c_client *client) +static int dw9714_probe(struct i2c_client *client, + const struct i2c_device_id *devid) { struct dw9714_device *dw9714_dev; + struct dw9714_platform_data *pdata = dev_get_platdata(&client->dev); int rval; dw9714_dev = devm_kzalloc(&client->dev, sizeof(*dw9714_dev), - GFP_KERNEL); + GFP_KERNEL); + if (dw9714_dev == NULL) return -ENOMEM; + if (pdata) { + dw9714_dev->pdata = pdata; + +#ifndef CONFIG_INTEL_IPU4_OV13858 + if (pdata->gpio_xsd >= 0 && devm_gpio_request_one(&client->dev, + dw9714_dev->pdata->gpio_xsd, 0, + "dw9714 xsd") != 0) { + dev_err(&client->dev, + "unable to acquire xshutdown %d\n", + dw9714_dev->pdata->gpio_xsd); + return -ENODEV; + } +#else + dev_dbg(&client->dev, "Do not request GPIO as it's common pin design"); +#endif + } + dw9714_dev->client = client; - v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops); - dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - dw9714_dev->sd.internal_ops = &dw9714_int_ops; + v4l2_i2c_subdev_init(&dw9714_dev->subdev_vcm, client, &dw9714_ops); + dw9714_dev->subdev_vcm.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + dw9714_dev->subdev_vcm.internal_ops = &dw9714_int_ops; + + snprintf(dw9714_dev->subdev_vcm.name, + sizeof(dw9714_dev->subdev_vcm.name), + DW9714_NAME " %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); rval = dw9714_init_controls(dw9714_dev); if (rval) goto err_cleanup; - - rval = media_entity_pads_init(&dw9714_dev->sd.entity, 0, NULL); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&dw9714_dev->subdev_vcm.entity, 0, NULL, 0); +#else + rval = media_entity_pads_init(&dw9714_dev->subdev_vcm.entity, 0, + NULL); +#endif if (rval < 0) goto err_cleanup; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + dw9714_dev->subdev_vcm.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_LENS; +#else + dw9714_dev->subdev_vcm.entity.function = MEDIA_ENT_F_LENS; +#endif - dw9714_dev->sd.entity.function = MEDIA_ENT_F_LENS; - - rval = v4l2_async_register_subdev(&dw9714_dev->sd); - if (rval < 0) - goto err_cleanup; - - pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); return 0; @@ -190,7 +216,8 @@ static int dw9714_probe(struct i2c_client *client) static int dw9714_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); + struct dw9714_device *dw9714_dev = container_of(sd, + struct dw9714_device, subdev_vcm); pm_runtime_disable(&client->dev); dw9714_subdev_cleanup(dw9714_dev); @@ -198,90 +225,131 @@ static int dw9714_remove(struct i2c_client *client) return 0; } -/* - * This function sets the vcm position, so it consumes least current - * The lens position is gradually moved in units of DW9714_CTRL_STEPS, - * to make the movements smoothly. - */ -static int __maybe_unused dw9714_vcm_suspend(struct device *dev) +#ifdef CONFIG_PM + +static int dw9714_runtime_suspend(struct device *dev) { +#ifndef CONFIG_INTEL_IPU4_OV13858 struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); + struct dw9714_device *dw9714_dev = container_of(sd, + struct dw9714_device, subdev_vcm); int ret, val; + dev_dbg(dev, "%s\n", __func__); + + for (val = dw9714_dev->current_val & ~(DW9714_CTRL_STEPS - 1); - val >= 0; val -= DW9714_CTRL_STEPS) { + val >= 0 ; val -= DW9714_CTRL_STEPS) { ret = dw9714_i2c_write(client, - DW9714_VAL(val, DW9714_DEFAULT_S)); + VCM_VAL((u16)val, VCM_DEFAULT_S)); if (ret) - dev_err_once(dev, "%s I2C failure: %d", __func__, ret); + dev_err(dev, "%s I2C failure: %d", __func__, ret); usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10); } + + if (dw9714_dev->pdata) { + if (dw9714_dev->pdata->gpio_xsd >= 0) + gpio_set_value(dw9714_dev->pdata->gpio_xsd, 0); + if (dw9714_dev->pdata->sensor_dev) + pm_runtime_put(dw9714_dev->pdata->sensor_dev); + } +#else + vcm_in_use = false; + dev_dbg(dev, "%s: Skip xshutdown due to common pin\n", __func__); +#endif + return 0; } -/* - * This function sets the vcm position to the value set by the user - * through v4l2_ctrl_ops s_ctrl handler - * The lens position is gradually moved in units of DW9714_CTRL_STEPS, - * to make the movements smoothly. - */ -static int __maybe_unused dw9714_vcm_resume(struct device *dev) +static int dw9714_runtime_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); - struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); + struct dw9714_device *dw9714_dev = container_of(sd, + struct dw9714_device, subdev_vcm); int ret, val; +#ifdef CONFIG_INTEL_IPU4_OV13858 + vcm_in_use = true; +#endif + if (dw9714_dev->pdata) { + if (dw9714_dev->pdata->sensor_dev) { + ret = pm_runtime_get_sync( + dw9714_dev->pdata->sensor_dev); + if (ret < 0) + goto out; + } + if (dw9714_dev->pdata->gpio_xsd >= 0) +#ifndef CONFIG_INTEL_IPU4_OV13858 + gpio_set_value(dw9714_dev->pdata->gpio_xsd, 1); +#else + crlmodule_vcm_gpio_set_value( + dw9714_dev->pdata->gpio_xsd, 1); + dev_dbg(dev, "Control xshutdown via crlmodule"); +#endif + } +#ifndef CONFIG_CRLMODULE_RD_NVM_TO_VCM for (val = dw9714_dev->current_val % DW9714_CTRL_STEPS; val < dw9714_dev->current_val + DW9714_CTRL_STEPS - 1; val += DW9714_CTRL_STEPS) { ret = dw9714_i2c_write(client, - DW9714_VAL(val, DW9714_DEFAULT_S)); + VCM_VAL((u16)val, VCM_DEFAULT_S)); if (ret) - dev_err_ratelimited(dev, "%s I2C failure: %d", - __func__, ret); + dev_err(dev, "%s I2C failure: %d", __func__, ret); usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10); } - return 0; + /* restore v4l2 control values */ + ret = v4l2_ctrl_handler_setup(&dw9714_dev->ctrls_vcm); + +#else + ret = 0; + val = 0; + if(pass_vcm_val!=0){ + dw9714_t_focus_vcm(dw9714_dev,pass_vcm_val); + dev_dbg(dev,"%s nvm pass %d to set vcm\n",__func__,pass_vcm_val); + }else + dev_err(dev,"%s nvm pass fail \n",__func__); +#endif + out: + if (ret && dw9714_dev->pdata && dw9714_dev->pdata->sensor_dev) + pm_runtime_put_sync(dw9714_dev->pdata->sensor_dev); + dev_dbg(dev, "%s rval = %d\n", __func__, ret); + return ret; } +#else + +#define dw9714_suspend NULL +#define dw9714_resume NULL + +#endif /* CONFIG_PM */ + static const struct i2c_device_id dw9714_id_table[] = { { DW9714_NAME, 0 }, - { { 0 } } + { } }; MODULE_DEVICE_TABLE(i2c, dw9714_id_table); -static const struct of_device_id dw9714_of_table[] = { - { .compatible = "dongwoon,dw9714" }, - { { 0 } } -}; -MODULE_DEVICE_TABLE(of, dw9714_of_table); - static const struct dev_pm_ops dw9714_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume) - SET_RUNTIME_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume, NULL) + .runtime_suspend = dw9714_runtime_suspend, + .runtime_resume = dw9714_runtime_resume, }; static struct i2c_driver dw9714_i2c_driver = { - .driver = { - .name = DW9714_NAME, + .driver = { + .name = DW9714_NAME, .pm = &dw9714_pm_ops, - .of_match_table = dw9714_of_table, }, - .probe_new = dw9714_probe, - .remove = dw9714_remove, - .id_table = dw9714_id_table, + .probe = dw9714_probe, + .remove = dw9714_remove, + .id_table = dw9714_id_table, }; module_i2c_driver(dw9714_i2c_driver); -MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Jian Xu Zheng "); -MODULE_AUTHOR("Yuning Pu "); MODULE_AUTHOR("Jouni Ukkonen "); MODULE_AUTHOR("Tommi Franttila "); MODULE_DESCRIPTION("DW9714 VCM driver"); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c new file mode 100644 index 000000000000..9fee39888408 --- /dev/null +++ b/drivers/media/i2c/imx319.c @@ -0,0 +1,2452 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IMX319_REG_MODE_SELECT 0x0100 +#define IMX319_MODE_STANDBY 0x00 +#define IMX319_MODE_STREAMING 0x01 + +/* Chip ID */ +#define IMX319_REG_CHIP_ID 0x0016 +#define IMX319_CHIP_ID 0x0319 + +/* V_TIMING internal */ +#define IMX319_REG_FLL 0x0340 +#define IMX319_FLL_MAX 0xffff + +/* Exposure control */ +#define IMX319_REG_EXPOSURE 0x0202 +#define IMX319_EXPOSURE_MIN 1 +#define IMX319_EXPOSURE_STEP 1 +#define IMX319_EXPOSURE_DEFAULT 0x04ee + +/* Analog gain control */ +#define IMX319_REG_ANALOG_GAIN 0x0204 +#define IMX319_ANA_GAIN_MIN 0 +#define IMX319_ANA_GAIN_MAX 960 +#define IMX319_ANA_GAIN_STEP 1 +#define IMX319_ANA_GAIN_DEFAULT 0 + +/* Digital gain control */ +#define IMX319_REG_DPGA_USE_GLOBAL_GAIN 0x3ff9 +#define IMX319_REG_DIG_GAIN_GLOBAL 0x020e +#define IMX319_DGTL_GAIN_MIN 256 +#define IMX319_DGTL_GAIN_MAX 4095 +#define IMX319_DGTL_GAIN_STEP 1 +#define IMX319_DGTL_GAIN_DEFAULT 256 + +/* Test Pattern Control */ +#define IMX319_REG_TEST_PATTERN 0x0600 +#define IMX319_TEST_PATTERN_DISABLED 0 +#define IMX319_TEST_PATTERN_SOLID_COLOR 1 +#define IMX319_TEST_PATTERN_COLOR_BARS 2 +#define IMX319_TEST_PATTERN_GRAY_COLOR_BARS 3 +#define IMX319_TEST_PATTERN_PN9 4 + +/* Flip Control */ +#define IMX319_REG_ORIENTATION 0x0101 + +struct imx319_reg { + u16 address; + u8 val; +}; + +struct imx319_reg_list { + u32 num_of_regs; + const struct imx319_reg *regs; +}; + +/* Mode : resolution and related config&values */ +struct imx319_mode { + /* Frame width */ + u32 width; + /* Frame height */ + u32 height; + + /* V-timing */ + u32 fll_def; + u32 fll_min; + + /* H-timing */ + u32 llp; + + /* Default register values */ + struct imx319_reg_list reg_list; +}; + +struct imx319 { + struct v4l2_subdev sd; + struct media_pad pad; + + struct v4l2_ctrl_handler ctrl_handler; + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *exposure; + struct v4l2_ctrl *vflip; + struct v4l2_ctrl *hflip; + + /* Current mode */ + const struct imx319_mode *cur_mode; + + /* + * Mutex for serialized access: + * Protect sensor set pad format and start/stop streaming safely. + * Protect access to sensor v4l2 controls. + */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +static const struct imx319_reg imx319_global_regs[] = { + { 0x0136, 0x13 }, + { 0x0137, 0x33 }, + { 0x3c7e, 0x03 }, + { 0x3c7f, 0x07 }, + { 0x4d39, 0x0b }, + { 0x4d41, 0x33 }, + { 0x4d43, 0x0c }, + { 0x4d49, 0x89 }, + { 0x4e05, 0x0b }, + { 0x4e0d, 0x33 }, + { 0x4e0f, 0x0c }, + { 0x4e15, 0x89 }, + { 0x4e49, 0x2a }, + { 0x4e51, 0x33 }, + { 0x4e53, 0x0c }, + { 0x4e59, 0x89 }, + { 0x5601, 0x4f }, + { 0x560b, 0x45 }, + { 0x562f, 0x0a }, + { 0x5643, 0x0a }, + { 0x5645, 0x0c }, + { 0x56ef, 0x51 }, + { 0x586f, 0x33 }, + { 0x5873, 0x89 }, + { 0x5905, 0x33 }, + { 0x5907, 0x89 }, + { 0x590d, 0x33 }, + { 0x590f, 0x89 }, + { 0x5915, 0x33 }, + { 0x5917, 0x89 }, + { 0x5969, 0x1c }, + { 0x596b, 0x72 }, + { 0x5971, 0x33 }, + { 0x5973, 0x89 }, + { 0x5975, 0x33 }, + { 0x5977, 0x89 }, + { 0x5979, 0x1c }, + { 0x597b, 0x72 }, + { 0x5985, 0x33 }, + { 0x5987, 0x89 }, + { 0x5999, 0x1c }, + { 0x599b, 0x72 }, + { 0x59a5, 0x33 }, + { 0x59a7, 0x89 }, + { 0x7485, 0x08 }, + { 0x7487, 0x0c }, + { 0x7489, 0xc7 }, + { 0x748b, 0x8b }, + { 0x9004, 0x09 }, + { 0x9200, 0x6a }, + { 0x9201, 0x22 }, + { 0x9202, 0x6a }, + { 0x9203, 0x23 }, + { 0x9204, 0x5f }, + { 0x9205, 0x23 }, + { 0x9206, 0x5f }, + { 0x9207, 0x24 }, + { 0x9208, 0x5f }, + { 0x9209, 0x26 }, + { 0x920a, 0x5f }, + { 0x920b, 0x27 }, + { 0x920c, 0x5f }, + { 0x920d, 0x29 }, + { 0x920e, 0x5f }, + { 0x920f, 0x2a }, + { 0x9210, 0x5f }, + { 0x9211, 0x2c }, + { 0xbc22, 0x1a }, + { 0xf01f, 0x04 }, + { 0xf021, 0x03 }, + { 0xf023, 0x02 }, + { 0xf03d, 0x05 }, + { 0xf03f, 0x03 }, + { 0xf041, 0x02 }, + { 0xf0af, 0x04 }, + { 0xf0b1, 0x03 }, + { 0xf0b3, 0x02 }, + { 0xf0cd, 0x05 }, + { 0xf0cf, 0x03 }, + { 0xf0d1, 0x02 }, + { 0xf13f, 0x04 }, + { 0xf141, 0x03 }, + { 0xf143, 0x02 }, + { 0xf15d, 0x05 }, + { 0xf15f, 0x03 }, + { 0xf161, 0x02 }, + { 0xf1cf, 0x04 }, + { 0xf1d1, 0x03 }, + { 0xf1d3, 0x02 }, + { 0xf1ed, 0x05 }, + { 0xf1ef, 0x03 }, + { 0xf1f1, 0x02 }, + { 0xf287, 0x04 }, + { 0xf289, 0x03 }, + { 0xf28b, 0x02 }, + { 0xf2a5, 0x05 }, + { 0xf2a7, 0x03 }, + { 0xf2a9, 0x02 }, + { 0xf2b7, 0x04 }, + { 0xf2b9, 0x03 }, + { 0xf2bb, 0x02 }, + { 0xf2d5, 0x05 }, + { 0xf2d7, 0x03 }, + { 0xf2d9, 0x02 }, +}; + +struct imx319_reg_list imx319_global_setting = { + .num_of_regs = ARRAY_SIZE(imx319_global_regs), + .regs = imx319_global_regs, +}; + +static const struct imx319_reg mode_3264x2448_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0f }, + { 0x0343, 0x80 }, + { 0x0340, 0x0a }, + { 0x0341, 0x78 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x09 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x01 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0x08 }, + { 0x040a, 0x00 }, + { 0x040b, 0x08 }, + { 0x040c, 0x0c }, + { 0x040d, 0xc0 }, + { 0x040e, 0x09 }, + { 0x040f, 0x90 }, + { 0x034c, 0x0c }, + { 0x034d, 0xc0 }, + { 0x034e, 0x09 }, + { 0x034f, 0x90 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x04 }, + { 0x0305, 0x03 }, + { 0x0306, 0x01 }, + { 0x0307, 0x2c }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0x48 }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x01 }, + { 0x3f79, 0x18 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x00 }, + { 0xe014, 0x00 }, + { 0x0202, 0x0a }, + { 0x0203, 0x66 }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_3280x2464_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0f }, + { 0x0343, 0x80 }, + { 0x0340, 0x0a }, + { 0x0341, 0x78 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x09 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x01 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0x00 }, + { 0x040a, 0x00 }, + { 0x040b, 0x00 }, + { 0x040c, 0x0c }, + { 0x040d, 0xd0 }, + { 0x040e, 0x09 }, + { 0x040f, 0xa0 }, + { 0x034c, 0x0c }, + { 0x034d, 0xd0 }, + { 0x034e, 0x09 }, + { 0x034f, 0xa0 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x04 }, + { 0x0305, 0x03 }, + { 0x0306, 0x01 }, + { 0x0307, 0x2c }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0x48 }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x01 }, + { 0x3f79, 0x18 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x00 }, + { 0xe014, 0x00 }, + { 0x0202, 0x0a }, + { 0x0203, 0x66 }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_1936x1096_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0f }, + { 0x0343, 0x80 }, + { 0x0340, 0x07 }, + { 0x0341, 0x72 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x01 }, + { 0x0347, 0x30 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x08 }, + { 0x034b, 0x6f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x01 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x02 }, + { 0x0409, 0xa0 }, + { 0x040a, 0x01 }, + { 0x040b, 0x7c }, + { 0x040c, 0x07 }, + { 0x040d, 0x90 }, + { 0x040e, 0x04 }, + { 0x040f, 0x48 }, + { 0x034c, 0x07 }, + { 0x034d, 0x90 }, + { 0x034e, 0x04 }, + { 0x034f, 0x48 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x02 }, + { 0x0305, 0x03 }, + { 0x0306, 0x00 }, + { 0x0307, 0xd6 }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0x48 }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x01 }, + { 0x3f79, 0x18 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x00 }, + { 0xe014, 0x00 }, + { 0x0202, 0x07 }, + { 0x0203, 0x60 }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_1920x1080_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0f }, + { 0x0343, 0x80 }, + { 0x0340, 0x07 }, + { 0x0341, 0x72 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x01 }, + { 0x0347, 0x30 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x08 }, + { 0x034b, 0x6f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x01 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x02 }, + { 0x0409, 0xa8 }, + { 0x040a, 0x01 }, + { 0x040b, 0x84 }, + { 0x040c, 0x07 }, + { 0x040d, 0x80 }, + { 0x040e, 0x04 }, + { 0x040f, 0x38 }, + { 0x034c, 0x07 }, + { 0x034d, 0x80 }, + { 0x034e, 0x04 }, + { 0x034f, 0x38 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x02 }, + { 0x0305, 0x03 }, + { 0x0306, 0x00 }, + { 0x0307, 0xd6 }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0x48 }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x01 }, + { 0x3f79, 0x18 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x00 }, + { 0xe014, 0x00 }, + { 0x0202, 0x07 }, + { 0x0203, 0x60 }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_1640x1232_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x08 }, + { 0x0343, 0x20 }, + { 0x0340, 0x05 }, + { 0x0341, 0x28 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x09 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x02 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0x00 }, + { 0x040a, 0x00 }, + { 0x040b, 0x00 }, + { 0x040c, 0x06 }, + { 0x040d, 0x68 }, + { 0x040e, 0x04 }, + { 0x040f, 0xd0 }, + { 0x034c, 0x06 }, + { 0x034d, 0x68 }, + { 0x034e, 0x04 }, + { 0x034f, 0xd0 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x04 }, + { 0x0305, 0x03 }, + { 0x0306, 0x01 }, + { 0x0307, 0x36 }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0xba }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x00 }, + { 0x3f79, 0x34 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x04 }, + { 0xe014, 0x00 }, + { 0x0202, 0x05 }, + { 0x0203, 0x16 }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_1640x922_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x08 }, + { 0x0343, 0x20 }, + { 0x0340, 0x05 }, + { 0x0341, 0x00 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x01 }, + { 0x0347, 0x30 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x08 }, + { 0x034b, 0x6f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x02 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0x00 }, + { 0x040a, 0x00 }, + { 0x040b, 0x02 }, + { 0x040c, 0x06 }, + { 0x040d, 0x68 }, + { 0x040e, 0x03 }, + { 0x040f, 0x9a }, + { 0x034c, 0x06 }, + { 0x034d, 0x68 }, + { 0x034e, 0x03 }, + { 0x034f, 0x9a }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x04 }, + { 0x0305, 0x03 }, + { 0x0306, 0x01 }, + { 0x0307, 0x2c }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0xba }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x00 }, + { 0x3f79, 0x34 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x04 }, + { 0xe014, 0x00 }, + { 0x0202, 0x04 }, + { 0x0203, 0xee }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_1296x736_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x08 }, + { 0x0343, 0x20 }, + { 0x0340, 0x05 }, + { 0x0341, 0x00 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x01 }, + { 0x0347, 0xf0 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x07 }, + { 0x034b, 0xaf }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x02 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0xac }, + { 0x040a, 0x00 }, + { 0x040b, 0x00 }, + { 0x040c, 0x05 }, + { 0x040d, 0x10 }, + { 0x040e, 0x02 }, + { 0x040f, 0xe0 }, + { 0x034c, 0x05 }, + { 0x034d, 0x10 }, + { 0x034e, 0x02 }, + { 0x034f, 0xe0 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x04 }, + { 0x0305, 0x03 }, + { 0x0306, 0x01 }, + { 0x0307, 0x2c }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0xba }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x00 }, + { 0x3f79, 0x34 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x04 }, + { 0xe014, 0x00 }, + { 0x0202, 0x04 }, + { 0x0203, 0xee }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const struct imx319_reg mode_1280x720_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x08 }, + { 0x0343, 0x20 }, + { 0x0340, 0x05 }, + { 0x0341, 0x00 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x02 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x07 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0221, 0x11 }, + { 0x0381, 0x01 }, + { 0x0383, 0x01 }, + { 0x0385, 0x01 }, + { 0x0387, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x0a }, + { 0x3140, 0x02 }, + { 0x3141, 0x00 }, + { 0x3f0d, 0x0a }, + { 0x3f14, 0x01 }, + { 0x3f3c, 0x02 }, + { 0x3f4d, 0x01 }, + { 0x3f4c, 0x01 }, + { 0x4254, 0x7f }, + { 0x0401, 0x00 }, + { 0x0404, 0x00 }, + { 0x0405, 0x10 }, + { 0x0408, 0x00 }, + { 0x0409, 0xb4 }, + { 0x040a, 0x00 }, + { 0x040b, 0x00 }, + { 0x040c, 0x05 }, + { 0x040d, 0x00 }, + { 0x040e, 0x02 }, + { 0x040f, 0xd0 }, + { 0x034c, 0x05 }, + { 0x034d, 0x00 }, + { 0x034e, 0x02 }, + { 0x034f, 0xd0 }, + { 0x3261, 0x00 }, + { 0x3264, 0x00 }, + { 0x3265, 0x10 }, + { 0x0301, 0x06 }, + { 0x0303, 0x04 }, + { 0x0305, 0x03 }, + { 0x0306, 0x01 }, + { 0x0307, 0x2c }, + { 0x0309, 0x0a }, + { 0x030b, 0x02 }, + { 0x030d, 0x04 }, + { 0x030e, 0x01 }, + { 0x030f, 0x2c }, + { 0x0310, 0x01 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x0822, 0x00 }, + { 0x0823, 0x00 }, + { 0x3e20, 0x01 }, + { 0x3e37, 0x00 }, + { 0x3e3b, 0x01 }, + { 0x38a3, 0x01 }, + { 0x38a8, 0x00 }, + { 0x38a9, 0x00 }, + { 0x38aa, 0x00 }, + { 0x38ab, 0x00 }, + { 0x3234, 0x00 }, + { 0x3fc1, 0x00 }, + { 0x3235, 0x00 }, + { 0x3802, 0x00 }, + { 0x3143, 0x04 }, + { 0x360a, 0x00 }, + { 0x0b00, 0x00 }, + { 0x0106, 0x00 }, + { 0x0b05, 0x01 }, + { 0x0b06, 0x01 }, + { 0x3230, 0x00 }, + { 0x3602, 0x01 }, + { 0x3607, 0x01 }, + { 0x3c00, 0x00 }, + { 0x3c01, 0xba }, + { 0x3c02, 0xc8 }, + { 0x3c03, 0xaa }, + { 0x3c04, 0x91 }, + { 0x3c05, 0x54 }, + { 0x3c06, 0x26 }, + { 0x3c07, 0x20 }, + { 0x3c08, 0x51 }, + { 0x3d80, 0x00 }, + { 0x3f50, 0x00 }, + { 0x3f56, 0x00 }, + { 0x3f57, 0x30 }, + { 0x3f78, 0x00 }, + { 0x3f79, 0x34 }, + { 0x3f7c, 0x00 }, + { 0x3f7d, 0x00 }, + { 0x3fba, 0x00 }, + { 0x3fbb, 0x00 }, + { 0xa081, 0x04 }, + { 0xe014, 0x00 }, + { 0x0202, 0x04 }, + { 0x0203, 0xee }, + { 0x0224, 0x01 }, + { 0x0225, 0xf4 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x020e, 0x01 }, + { 0x020f, 0x00 }, + { 0x0210, 0x01 }, + { 0x0211, 0x00 }, + { 0x0212, 0x01 }, + { 0x0213, 0x00 }, + { 0x0214, 0x01 }, + { 0x0215, 0x00 }, + { 0x0218, 0x01 }, + { 0x0219, 0x00 }, + { 0x3614, 0x00 }, + { 0x3616, 0x0d }, + { 0x3617, 0x56 }, + { 0xb612, 0x20 }, + { 0xb613, 0x20 }, + { 0xb614, 0x20 }, + { 0xb615, 0x20 }, + { 0xb616, 0x0a }, + { 0xb617, 0x0a }, + { 0xb618, 0x20 }, + { 0xb619, 0x20 }, + { 0xb61a, 0x20 }, + { 0xb61b, 0x20 }, + { 0xb61c, 0x0a }, + { 0xb61d, 0x0a }, + { 0xb666, 0x30 }, + { 0xb667, 0x30 }, + { 0xb668, 0x30 }, + { 0xb669, 0x30 }, + { 0xb66a, 0x14 }, + { 0xb66b, 0x14 }, + { 0xb66c, 0x20 }, + { 0xb66d, 0x20 }, + { 0xb66e, 0x20 }, + { 0xb66f, 0x20 }, + { 0xb670, 0x10 }, + { 0xb671, 0x10 }, + { 0x3237, 0x00 }, + { 0x3900, 0x00 }, + { 0x3901, 0x00 }, + { 0x3902, 0x00 }, + { 0x3904, 0x00 }, + { 0x3905, 0x00 }, + { 0x3906, 0x00 }, + { 0x3907, 0x00 }, + { 0x3908, 0x00 }, + { 0x3909, 0x00 }, + { 0x3912, 0x00 }, + { 0x3930, 0x00 }, + { 0x3931, 0x00 }, + { 0x3933, 0x00 }, + { 0x3934, 0x00 }, + { 0x3935, 0x00 }, + { 0x3936, 0x00 }, + { 0x3937, 0x00 }, + { 0x30ac, 0x00 }, +}; + +static const char * const imx319_test_pattern_menu[] = { + "Disabled", + "100% color bars", + "Solid color", + "Fade to gray color bars", + "PN9" +}; + +static const int imx319_test_pattern_val[] = { + IMX319_TEST_PATTERN_DISABLED, + IMX319_TEST_PATTERN_COLOR_BARS, + IMX319_TEST_PATTERN_SOLID_COLOR, + IMX319_TEST_PATTERN_GRAY_COLOR_BARS, + IMX319_TEST_PATTERN_PN9, +}; + +/* Configurations for supported link frequencies */ +/* Menu items for LINK_FREQ V4L2 control */ +static const s64 link_freq_menu_items[] = { + 360000000, +}; + +/* Mode configs */ +static const struct imx319_mode supported_modes[] = { + { + .width = 3280, + .height = 2464, + .fll_def = 0xa78, + .fll_min = 0xa78, + .llp = 0xf80, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), + .regs = mode_3280x2464_regs, + }, + }, + { + .width = 3264, + .height = 2448, + .fll_def = 0xa78, + .fll_min = 0xa78, + .llp = 0xf80, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs), + .regs = mode_3264x2448_regs, + }, + }, + { + .width = 1936, + .height = 1096, + .fll_def = 0x772, + .fll_min = 0x772, + .llp = 0xf80, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1936x1096_regs), + .regs = mode_1936x1096_regs, + }, + }, + { + .width = 1920, + .height = 1080, + .fll_def = 0x772, + .fll_min = 0x772, + .llp = 0xf80, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1920x1080_regs), + .regs = mode_1920x1080_regs, + }, + }, + { + .width = 1640, + .height = 1232, + .fll_def = 0x528, + .fll_min = 0x528, + .llp = 0x820, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs), + .regs = mode_1640x1232_regs, + }, + }, + { + .width = 1640, + .height = 922, + .fll_def = 0x500, + .fll_min = 0x500, + .llp = 0x820, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1640x922_regs), + .regs = mode_1640x922_regs, + }, + }, + { + .width = 1296, + .height = 736, + .fll_def = 0x500, + .fll_min = 0x500, + .llp = 0x820, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1296x736_regs), + .regs = mode_1296x736_regs, + }, + }, + { + .width = 1280, + .height = 720, + .fll_def = 0x500, + .fll_min = 0x500, + .llp = 0x820, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1280x720_regs), + .regs = mode_1280x720_regs, + }, + }, +}; + +static inline struct imx319 *to_imx319(struct v4l2_subdev *_sd) +{ + return container_of(_sd, struct imx319, sd); +} + +/* Get bayer order based on flip setting. */ +static __u32 imx319_get_format_code(struct imx319 *imx319) +{ + /* + * Only one bayer order is supported. + * It depends on the flip settings. + */ + static const __u32 codes[2][2] = { + { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, }, + { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, }, + }; + + return codes[imx319->vflip->val][imx319->hflip->val]; +} + +/* Read registers up to 4 at a time */ +static int imx319_read_reg(struct imx319 *imx319, u16 reg, u32 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + struct i2c_msg msgs[2]; + u8 addr_buf[2]; + u8 data_buf[4] = { 0 }; + int ret; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, addr_buf); + /* Write register address */ + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = ARRAY_SIZE(addr_buf); + msgs[0].buf = addr_buf; + + /* Read data from register */ + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_buf[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = get_unaligned_be32(data_buf); + + return 0; +} + +/* Write registers up to 4 at a time */ +static int imx319_write_reg(struct imx319 *imx319, u16 reg, u32 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + u8 buf[6]; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, buf); + put_unaligned_be32(val << (8 * (4 - len)), buf + 2); + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +/* Write a list of registers */ +static int imx319_write_regs(struct imx319 *imx319, + const struct imx319_reg *regs, u32 len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + int ret; + u32 i; + + for (i = 0; i < len; i++) { + ret = imx319_write_reg(imx319, regs[i].address, 1, + regs[i].val); + if (ret) { + dev_err_ratelimited( + &client->dev, + "Failed to write reg 0x%4.4x. error = %d\n", + regs[i].address, ret); + + return ret; + } + } + + return 0; +} + +/* Open sub-device */ +static int imx319_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct imx319 *imx319 = to_imx319(sd); + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, 0); + + mutex_lock(&imx319->mutex); + + /* Initialize try_fmt */ + try_fmt->width = imx319->cur_mode->width; + try_fmt->height = imx319->cur_mode->height; + try_fmt->code = imx319_get_format_code(imx319); + try_fmt->field = V4L2_FIELD_NONE; + + mutex_unlock(&imx319->mutex); + + return 0; +} + +static int imx319_update_digital_gain(struct imx319 *imx319, u32 d_gain) +{ + int ret; + + ret = imx319_write_reg(imx319, IMX319_REG_DPGA_USE_GLOBAL_GAIN, 1, 1); + if (ret) + return ret; + + /* Digital gain = (d_gain & 0xFF00) + (d_gain & 0xFF)/256 times */ + return imx319_write_reg(imx319, IMX319_REG_DIG_GAIN_GLOBAL, 2, d_gain); +} + +static int imx319_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct imx319 *imx319 = container_of(ctrl->handler, + struct imx319, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + s64 max; + int ret; + + /* Propagate change of current control to all related controls */ + switch (ctrl->id) { + case V4L2_CID_VBLANK: + /* Update max exposure while meeting expected vblanking */ + max = imx319->cur_mode->height + ctrl->val - 18; + __v4l2_ctrl_modify_range(imx319->exposure, + imx319->exposure->minimum, + max, imx319->exposure->step, max); + break; + }; + + /* + * Applying V4L2 control value only happens + * when power is up for streaming + */ + if (pm_runtime_get_if_in_use(&client->dev) == 0) + return 0; + + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + /* Analog gain = 1024/(1024 - ctrl->val) times */ + ret = imx319_write_reg(imx319, IMX319_REG_ANALOG_GAIN, + 2, ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + ret = imx319_update_digital_gain(imx319, ctrl->val); + break; + case V4L2_CID_EXPOSURE: + ret = imx319_write_reg(imx319, IMX319_REG_EXPOSURE, + 2, ctrl->val); + break; + case V4L2_CID_VBLANK: + /* Update FLL that meets expected vertical blanking */ + ret = imx319_write_reg(imx319, IMX319_REG_FLL, 2, + imx319->cur_mode->height + ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = imx319_write_reg(imx319, IMX319_REG_TEST_PATTERN, + 2, imx319_test_pattern_val[ctrl->val]); + break; + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + ret = imx319_write_reg(imx319, IMX319_REG_ORIENTATION, 1, + imx319->hflip->val | + imx319->vflip->val << 1); + break; + default: + ret = -EINVAL; + dev_info(&client->dev, + "ctrl(id:0x%x,val:0x%x) is not handled\n", + ctrl->id, ctrl->val); + break; + }; + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops imx319_ctrl_ops = { + .s_ctrl = imx319_set_ctrl, +}; + +static int imx319_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct imx319 *imx319 = to_imx319(sd); + + if (code->index > 0) + return -EINVAL; + + code->code = imx319_get_format_code(imx319); + + return 0; +} + +static int imx319_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct imx319 *imx319 = to_imx319(sd); + + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != imx319_get_format_code(imx319)) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static void imx319_update_pad_format(struct imx319 *imx319, + const struct imx319_mode *mode, + struct v4l2_subdev_format *fmt) +{ + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.code = imx319_get_format_code(imx319); + fmt->format.field = V4L2_FIELD_NONE; + fmt->format.colorspace = V4L2_COLORSPACE_DEFAULT; + fmt->format.xfer_func = V4L2_XFER_FUNC_DEFAULT; + fmt->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + fmt->format.quantization = V4L2_QUANTIZATION_DEFAULT; +} + +static int imx319_do_get_pad_format(struct imx319 *imx319, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *framefmt; + struct v4l2_subdev *sd = &imx319->sd; + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + fmt->format = *framefmt; + } else { + imx319_update_pad_format(imx319, imx319->cur_mode, fmt); + } + + return 0; +} + +static int imx319_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx319 *imx319 = to_imx319(sd); + int ret; + + mutex_lock(&imx319->mutex); + ret = imx319_do_get_pad_format(imx319, cfg, fmt); + mutex_unlock(&imx319->mutex); + + return ret; +} + +static int +imx319_set_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx319 *imx319 = to_imx319(sd); + const struct imx319_mode *mode; + struct v4l2_mbus_framefmt *framefmt; + s32 vblank_def; + s32 vblank_min; + s64 h_blank; + s64 pixel_rate; + + mutex_lock(&imx319->mutex); + + /* + * Only one bayer order is supported. + * It depends on the flip settings. + */ + fmt->format.code = imx319_get_format_code(imx319); + + mode = v4l2_find_nearest_size(supported_modes, + ARRAY_SIZE(supported_modes), width, height, + fmt->format.width, fmt->format.height); + imx319_update_pad_format(imx319, mode, fmt); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + *framefmt = fmt->format; + } else { + imx319->cur_mode = mode; + pixel_rate = + (link_freq_menu_items[0] * 2 * 4) / 10; + __v4l2_ctrl_s_ctrl_int64(imx319->pixel_rate, pixel_rate); + /* Update limits and set FPS to default */ + vblank_def = imx319->cur_mode->fll_def - + imx319->cur_mode->height; + vblank_min = imx319->cur_mode->fll_min - + imx319->cur_mode->height; + __v4l2_ctrl_modify_range( + imx319->vblank, vblank_min, + IMX319_FLL_MAX - imx319->cur_mode->height, 1, + vblank_def); + __v4l2_ctrl_s_ctrl(imx319->vblank, vblank_def); + h_blank = mode->llp - imx319->cur_mode->width; + /* + * Currently hblank is not changeable. + * So FPS control is done only by vblank. + */ + __v4l2_ctrl_modify_range(imx319->hblank, h_blank, + h_blank, 1, h_blank); + } + + mutex_unlock(&imx319->mutex); + + return 0; +} + +/* Start streaming */ +static int imx319_start_streaming(struct imx319 *imx319) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + const struct imx319_reg_list *reg_list; + int ret; + + /* Global Setting */ + reg_list = &imx319_global_setting; + ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set global settings\n", + __func__); + return ret; + } + + /* Apply default values of current mode */ + reg_list = &imx319->cur_mode->reg_list; + ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return ret; + } + + /* Apply customized values from user */ + ret = __v4l2_ctrl_handler_setup(imx319->sd.ctrl_handler); + if (ret) + return ret; + + return imx319_write_reg(imx319, IMX319_REG_MODE_SELECT, + 1, IMX319_MODE_STREAMING); +} + +/* Stop streaming */ +static int imx319_stop_streaming(struct imx319 *imx319) +{ + return imx319_write_reg(imx319, IMX319_REG_MODE_SELECT, + 1, IMX319_MODE_STANDBY); +} + +static int imx319_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct imx319 *imx319 = to_imx319(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + mutex_lock(&imx319->mutex); + if (imx319->streaming == enable) { + mutex_unlock(&imx319->mutex); + return 0; + } + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto err_unlock; + } + + /* + * Apply default & customized values + * and then start streaming. + */ + ret = imx319_start_streaming(imx319); + if (ret) + goto err_rpm_put; + } else { + imx319_stop_streaming(imx319); + pm_runtime_put(&client->dev); + } + + imx319->streaming = enable; + mutex_unlock(&imx319->mutex); + + return ret; + +err_rpm_put: + pm_runtime_put(&client->dev); +err_unlock: + mutex_unlock(&imx319->mutex); + + return ret; +} + +static int __maybe_unused imx319_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx319 *imx319 = to_imx319(sd); + + if (imx319->streaming) + imx319_stop_streaming(imx319); + + return 0; +} + +static int __maybe_unused imx319_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx319 *imx319 = to_imx319(sd); + int ret; + + if (imx319->streaming) { + ret = imx319_start_streaming(imx319); + if (ret) + goto error; + } + + return 0; + +error: + imx319_stop_streaming(imx319); + imx319->streaming = 0; + return ret; +} + +/* Verify chip ID */ +static int imx319_identify_module(struct imx319 *imx319) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + int ret; + u32 val; + + ret = imx319_read_reg(imx319, IMX319_REG_CHIP_ID, 2, &val); + if (ret) + return ret; + + if (val != IMX319_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + IMX319_CHIP_ID, val); + return -EIO; + } + return 0; +} + +static const struct v4l2_subdev_core_ops imx319_subdev_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_video_ops imx319_video_ops = { + .s_stream = imx319_set_stream, +}; + +static const struct v4l2_subdev_pad_ops imx319_pad_ops = { + .enum_mbus_code = imx319_enum_mbus_code, + .get_fmt = imx319_get_pad_format, + .set_fmt = imx319_set_pad_format, + .enum_frame_size = imx319_enum_frame_size, +}; + +static const struct v4l2_subdev_ops imx319_subdev_ops = { + .core = &imx319_subdev_core_ops, + .video = &imx319_video_ops, + .pad = &imx319_pad_ops, +}; + +static const struct media_entity_operations imx319_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops imx319_internal_ops = { + .open = imx319_open, +}; + +/* Initialize control handlers */ +static int imx319_init_controls(struct imx319 *imx319) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd); + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 exposure_max; + s64 vblank_def; + s64 vblank_min; + s64 hblank; + s64 pixel_rate; + const struct imx319_mode *mode; + int ret; + + ctrl_hdlr = &imx319->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10); + if (ret) + return ret; + + ctrl_hdlr->lock = &imx319->mutex; + imx319->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, + &imx319_ctrl_ops, + V4L2_CID_LINK_FREQ, + 0, + 0, + link_freq_menu_items); + if (imx319->link_freq) + imx319->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */ + pixel_rate = (link_freq_menu_items[0] * 2 * 4) / 10; + /* By default, PIXEL_RATE is read only */ + imx319->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops, + V4L2_CID_PIXEL_RATE, + pixel_rate, pixel_rate, + 1, pixel_rate); + + /* Initialze vblank/hblank/exposure parameters based on current mode */ + mode = imx319->cur_mode; + vblank_def = mode->fll_def - mode->height; + vblank_min = mode->fll_min - mode->height; + imx319->vblank = v4l2_ctrl_new_std( + ctrl_hdlr, &imx319_ctrl_ops, V4L2_CID_VBLANK, + vblank_min, IMX319_FLL_MAX - mode->height, 1, + vblank_def); + + hblank = mode->llp - mode->width; + imx319->hblank = v4l2_ctrl_new_std( + ctrl_hdlr, &imx319_ctrl_ops, V4L2_CID_HBLANK, + hblank, hblank, 1, hblank); + if (imx319->hblank) + imx319->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + exposure_max = mode->fll_def - 18; + imx319->exposure = v4l2_ctrl_new_std( + ctrl_hdlr, &imx319_ctrl_ops, + V4L2_CID_EXPOSURE, IMX319_EXPOSURE_MIN, + exposure_max, IMX319_EXPOSURE_STEP, + IMX319_EXPOSURE_DEFAULT); + + imx319->hflip = v4l2_ctrl_new_std( + ctrl_hdlr, &imx319_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + imx319->vflip = v4l2_ctrl_new_std( + ctrl_hdlr, &imx319_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + + v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + IMX319_ANA_GAIN_MIN, IMX319_ANA_GAIN_MAX, + IMX319_ANA_GAIN_STEP, IMX319_ANA_GAIN_DEFAULT); + + /* Digital gain */ + v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + IMX319_DGTL_GAIN_MIN, IMX319_DGTL_GAIN_MAX, + IMX319_DGTL_GAIN_STEP, IMX319_DGTL_GAIN_DEFAULT); + + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx319_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(imx319_test_pattern_menu) - 1, + 0, 0, imx319_test_pattern_menu); + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + dev_err(&client->dev, "%s control init failed (%d)\n", + __func__, ret); + goto error; + } + + imx319->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +error: + v4l2_ctrl_handler_free(ctrl_hdlr); + + return ret; +} + +static void imx319_free_controls(struct imx319 *imx319) +{ + v4l2_ctrl_handler_free(imx319->sd.ctrl_handler); +} + +static int imx319_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct imx319 *imx319; + int ret; + + imx319 = devm_kzalloc(&client->dev, sizeof(*imx319), GFP_KERNEL); + if (!imx319) + return -ENOMEM; + + mutex_init(&imx319->mutex); + + /* Initialize subdev */ + v4l2_i2c_subdev_init(&imx319->sd, client, &imx319_subdev_ops); + + /* Check module identity */ + ret = imx319_identify_module(imx319); + if (ret) { + dev_err(&client->dev, "failed to find sensor: %d\n", ret); + goto error_probe; + } + + /* Set default mode to max resolution */ + imx319->cur_mode = &supported_modes[0]; + + ret = imx319_init_controls(imx319); + if (ret) { + dev_err(&client->dev, "failed to init controls: %d\n", ret); + goto error_probe; + } + + /* Initialize subdev */ + imx319->sd.internal_ops = &imx319_internal_ops; + imx319->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; + imx319->sd.entity.ops = &imx319_subdev_entity_ops; + imx319->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + /* Initialize source pad */ + imx319->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&imx319->sd.entity, 1, &imx319->pad); + if (ret) { + dev_err(&client->dev, "%s failed:%d\n", __func__, ret); + goto error_handler_free; + } + + ret = v4l2_async_register_subdev_sensor_common(&imx319->sd); + if (ret < 0) + goto error_media_entity; + + /* + * Device is already turned on by i2c-core with ACPI domain PM. + * Enable runtime PM and turn off the device. + */ + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_idle(&client->dev); + + return 0; + +error_media_entity: + media_entity_cleanup(&imx319->sd.entity); + +error_handler_free: + imx319_free_controls(imx319); + +error_probe: + mutex_destroy(&imx319->mutex); + + return ret; +} + +static int imx319_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx319 *imx319 = to_imx319(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + imx319_free_controls(imx319); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + mutex_destroy(&imx319->mutex); + + return 0; +} + +static const struct i2c_device_id imx319_id_table[] = { + { "imx319", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(i2c, imx319_id_table); + +static const struct dev_pm_ops imx319_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx319_suspend, imx319_resume) +}; + +static const struct acpi_device_id imx319_acpi_ids[] = { + { "SONY319A" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, imx319_acpi_ids); + +static struct i2c_driver imx319_i2c_driver = { + .driver = { + .name = "imx319", + .owner = THIS_MODULE, + .pm = &imx319_pm_ops, + .acpi_match_table = ACPI_PTR(imx319_acpi_ids), + }, + .probe = imx319_probe, + .remove = imx319_remove, + .id_table = imx319_id_table, +}; +module_i2c_driver(imx319_i2c_driver); + +MODULE_AUTHOR("Qiu, Tianshu "); +MODULE_DESCRIPTION("Sony imx319 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c new file mode 100644 index 000000000000..3c092d02b1f5 --- /dev/null +++ b/drivers/media/i2c/imx355.c @@ -0,0 +1,1760 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IMX355_REG_MODE_SELECT 0x0100 +#define IMX355_MODE_STANDBY 0x00 +#define IMX355_MODE_STREAMING 0x01 + +/* Chip ID */ +#define IMX355_REG_CHIP_ID 0x0016 +#define IMX355_CHIP_ID 0x0355 + +/* V_TIMING internal */ +#define IMX355_REG_FLL 0x0340 +#define IMX355_FLL_MAX 0xffff + +/* Exposure control */ +#define IMX355_REG_EXPOSURE 0x0202 +#define IMX355_EXPOSURE_MIN 1 +#define IMX355_EXPOSURE_STEP 1 +#define IMX355_EXPOSURE_DEFAULT 0x0282 + +/* Analog gain control */ +#define IMX355_REG_ANALOG_GAIN 0x0204 +#define IMX355_ANA_GAIN_MIN 0 +#define IMX355_ANA_GAIN_MAX 960 +#define IMX355_ANA_GAIN_STEP 1 +#define IMX355_ANA_GAIN_DEFAULT 0 + +/* Digital gain control */ +#define IMX355_REG_DPGA_USE_GLOBAL_GAIN 0x3070 +#define IMX355_REG_DIG_GAIN_GLOBAL 0x020e +#define IMX355_DGTL_GAIN_MIN 256 +#define IMX355_DGTL_GAIN_MAX 4095 +#define IMX355_DGTL_GAIN_STEP 1 +#define IMX355_DGTL_GAIN_DEFAULT 256 + +/* Test Pattern Control */ +#define IMX355_REG_TEST_PATTERN 0x0600 +#define IMX355_TEST_PATTERN_DISABLED 0 +#define IMX355_TEST_PATTERN_SOLID_COLOR 1 +#define IMX355_TEST_PATTERN_COLOR_BARS 2 +#define IMX355_TEST_PATTERN_GRAY_COLOR_BARS 3 +#define IMX355_TEST_PATTERN_PN9 4 + +/* Flip Control */ +#define IMX355_REG_ORIENTATION 0x0101 + +struct imx355_reg { + u16 address; + u8 val; +}; + +struct imx355_reg_list { + u32 num_of_regs; + const struct imx355_reg *regs; +}; + +/* Mode : resolution and related config&values */ +struct imx355_mode { + /* Frame width */ + u32 width; + /* Frame height */ + u32 height; + + /* V-timing */ + u32 fll_def; + u32 fll_min; + + /* H-timing */ + u32 llp; + + /* Default register values */ + struct imx355_reg_list reg_list; +}; + +struct imx355 { + struct v4l2_subdev sd; + struct media_pad pad; + + struct v4l2_ctrl_handler ctrl_handler; + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *exposure; + struct v4l2_ctrl *vflip; + struct v4l2_ctrl *hflip; + + /* Current mode */ + const struct imx355_mode *cur_mode; + + /* + * Mutex for serialized access: + * Protect sensor set pad format and start/stop streaming safely. + * Protect access to sensor v4l2 controls. + */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +static const struct imx355_reg imx355_global_regs[] = { + { 0x0136, 0x13 }, + { 0x0137, 0x33 }, + { 0x304e, 0x03 }, + { 0x4348, 0x16 }, + { 0x4350, 0x19 }, + { 0x4408, 0x0a }, + { 0x440c, 0x0b }, + { 0x4411, 0x5f }, + { 0x4412, 0x2c }, + { 0x4623, 0x00 }, + { 0x462c, 0x0f }, + { 0x462d, 0x00 }, + { 0x462e, 0x00 }, + { 0x4684, 0x54 }, + { 0x480a, 0x07 }, + { 0x4908, 0x07 }, + { 0x4909, 0x07 }, + { 0x490d, 0x0a }, + { 0x491e, 0x0f }, + { 0x4921, 0x06 }, + { 0x4923, 0x28 }, + { 0x4924, 0x28 }, + { 0x4925, 0x29 }, + { 0x4926, 0x29 }, + { 0x4927, 0x1f }, + { 0x4928, 0x20 }, + { 0x4929, 0x20 }, + { 0x492a, 0x20 }, + { 0x492c, 0x05 }, + { 0x492d, 0x06 }, + { 0x492e, 0x06 }, + { 0x492f, 0x06 }, + { 0x4930, 0x03 }, + { 0x4931, 0x04 }, + { 0x4932, 0x04 }, + { 0x4933, 0x05 }, + { 0x595e, 0x01 }, + { 0x5963, 0x01 }, + { 0x3030, 0x01 }, + { 0x3031, 0x01 }, + { 0x3045, 0x01 }, + { 0x4010, 0x00 }, + { 0x4011, 0x00 }, + { 0x4012, 0x00 }, + { 0x4013, 0x01 }, + { 0x68a8, 0xfe }, + { 0x68a9, 0xff }, + { 0x6888, 0x00 }, + { 0x6889, 0x00 }, + { 0x68b0, 0x00 }, + { 0x3058, 0x00 }, + { 0x305a, 0x00 }, +}; + +static const struct imx355_reg_list imx355_global_setting = { + .num_of_regs = ARRAY_SIZE(imx355_global_regs), + .regs = imx355_global_regs, +}; + +static const struct imx355_reg mode_3268x2448_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x0a }, + { 0x0341, 0x37 }, + { 0x0344, 0x00 }, + { 0x0345, 0x08 }, + { 0x0346, 0x00 }, + { 0x0347, 0x08 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcb }, + { 0x034a, 0x09 }, + { 0x034b, 0x97 }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x0c }, + { 0x034d, 0xc4 }, + { 0x034e, 0x09 }, + { 0x034f, 0x90 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_3264x2448_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x0a }, + { 0x0341, 0x37 }, + { 0x0344, 0x00 }, + { 0x0345, 0x08 }, + { 0x0346, 0x00 }, + { 0x0347, 0x08 }, + { 0x0348, 0x0c }, + { 0x0349, 0xc7 }, + { 0x034a, 0x09 }, + { 0x034b, 0x97 }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x0c }, + { 0x034d, 0xc0 }, + { 0x034e, 0x09 }, + { 0x034f, 0x90 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_3280x2464_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x0a }, + { 0x0341, 0x37 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x09 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x0c }, + { 0x034d, 0xd0 }, + { 0x034e, 0x09 }, + { 0x034f, 0xa0 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1940x1096_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x02 }, + { 0x0345, 0xa0 }, + { 0x0346, 0x02 }, + { 0x0347, 0xac }, + { 0x0348, 0x0a }, + { 0x0349, 0x33 }, + { 0x034a, 0x06 }, + { 0x034b, 0xf3 }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x07 }, + { 0x034d, 0x94 }, + { 0x034e, 0x04 }, + { 0x034f, 0x48 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1936x1096_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x02 }, + { 0x0345, 0xa0 }, + { 0x0346, 0x02 }, + { 0x0347, 0xac }, + { 0x0348, 0x0a }, + { 0x0349, 0x2f }, + { 0x034a, 0x06 }, + { 0x034b, 0xf3 }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x07 }, + { 0x034d, 0x90 }, + { 0x034e, 0x04 }, + { 0x034f, 0x48 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1924x1080_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x02 }, + { 0x0345, 0xa8 }, + { 0x0346, 0x02 }, + { 0x0347, 0xb4 }, + { 0x0348, 0x0a }, + { 0x0349, 0x2b }, + { 0x034a, 0x06 }, + { 0x034b, 0xeb }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x07 }, + { 0x034d, 0x84 }, + { 0x034e, 0x04 }, + { 0x034f, 0x38 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1920x1080_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x02 }, + { 0x0345, 0xa8 }, + { 0x0346, 0x02 }, + { 0x0347, 0xb4 }, + { 0x0348, 0x0a }, + { 0x0349, 0x27 }, + { 0x034a, 0x06 }, + { 0x034b, 0xeb }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x00 }, + { 0x0901, 0x11 }, + { 0x0902, 0x00 }, + { 0x034c, 0x07 }, + { 0x034d, 0x80 }, + { 0x034e, 0x04 }, + { 0x034f, 0x38 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1640x1232_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x07 }, + { 0x0343, 0x2c }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x09 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x00 }, + { 0x034c, 0x06 }, + { 0x034d, 0x68 }, + { 0x034e, 0x04 }, + { 0x034f, 0xd0 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1640x922_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x07 }, + { 0x0343, 0x2c }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x01 }, + { 0x0347, 0x30 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x08 }, + { 0x034b, 0x63 }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x00 }, + { 0x034c, 0x06 }, + { 0x034d, 0x68 }, + { 0x034e, 0x03 }, + { 0x034f, 0x9a }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1300x736_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x07 }, + { 0x0343, 0x2c }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x01 }, + { 0x0345, 0x58 }, + { 0x0346, 0x01 }, + { 0x0347, 0xf0 }, + { 0x0348, 0x0b }, + { 0x0349, 0x7f }, + { 0x034a, 0x07 }, + { 0x034b, 0xaf }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x00 }, + { 0x034c, 0x05 }, + { 0x034d, 0x14 }, + { 0x034e, 0x02 }, + { 0x034f, 0xe0 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1296x736_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x07 }, + { 0x0343, 0x2c }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x01 }, + { 0x0345, 0x58 }, + { 0x0346, 0x01 }, + { 0x0347, 0xf0 }, + { 0x0348, 0x0b }, + { 0x0349, 0x77 }, + { 0x034a, 0x07 }, + { 0x034b, 0xaf }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x00 }, + { 0x034c, 0x05 }, + { 0x034d, 0x10 }, + { 0x034e, 0x02 }, + { 0x034f, 0xe0 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1284x720_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x07 }, + { 0x0343, 0x2c }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x01 }, + { 0x0345, 0x68 }, + { 0x0346, 0x02 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0b }, + { 0x0349, 0x6f }, + { 0x034a, 0x07 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x00 }, + { 0x034c, 0x05 }, + { 0x034d, 0x04 }, + { 0x034e, 0x02 }, + { 0x034f, 0xd0 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_1280x720_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x07 }, + { 0x0343, 0x2c }, + { 0x0340, 0x05 }, + { 0x0341, 0x1a }, + { 0x0344, 0x01 }, + { 0x0345, 0x68 }, + { 0x0346, 0x02 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0b }, + { 0x0349, 0x67 }, + { 0x034a, 0x07 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x22 }, + { 0x0902, 0x00 }, + { 0x034c, 0x05 }, + { 0x034d, 0x00 }, + { 0x034e, 0x02 }, + { 0x034f, 0xd0 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x00 }, + { 0x0701, 0x10 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const struct imx355_reg mode_820x616_regs[] = { + { 0x0112, 0x0a }, + { 0x0113, 0x0a }, + { 0x0114, 0x03 }, + { 0x0342, 0x0e }, + { 0x0343, 0x58 }, + { 0x0340, 0x02 }, + { 0x0341, 0x8c }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x0c }, + { 0x0349, 0xcf }, + { 0x034a, 0x09 }, + { 0x034b, 0x9f }, + { 0x0220, 0x00 }, + { 0x0222, 0x01 }, + { 0x0900, 0x01 }, + { 0x0901, 0x44 }, + { 0x0902, 0x00 }, + { 0x034c, 0x03 }, + { 0x034d, 0x34 }, + { 0x034e, 0x02 }, + { 0x034f, 0x68 }, + { 0x0301, 0x05 }, + { 0x0303, 0x01 }, + { 0x0305, 0x02 }, + { 0x0306, 0x00 }, + { 0x0307, 0x78 }, + { 0x030b, 0x01 }, + { 0x030d, 0x02 }, + { 0x030e, 0x00 }, + { 0x030f, 0x4b }, + { 0x0310, 0x00 }, + { 0x0700, 0x02 }, + { 0x0701, 0x78 }, + { 0x0820, 0x0b }, + { 0x0821, 0x40 }, + { 0x3088, 0x04 }, + { 0x6813, 0x02 }, + { 0x6835, 0x07 }, + { 0x6836, 0x01 }, + { 0x6837, 0x04 }, + { 0x684d, 0x07 }, + { 0x684e, 0x01 }, + { 0x684f, 0x04 }, +}; + +static const char * const imx355_test_pattern_menu[] = { + "Disabled", + "100% color bars", + "Solid color", + "Fade to gray color bars", + "PN9" +}; + +static const int imx355_test_pattern_val[] = { + IMX355_TEST_PATTERN_DISABLED, + IMX355_TEST_PATTERN_COLOR_BARS, + IMX355_TEST_PATTERN_SOLID_COLOR, + IMX355_TEST_PATTERN_GRAY_COLOR_BARS, + IMX355_TEST_PATTERN_PN9, +}; + +/* Configurations for supported link frequencies */ +/* Menu items for LINK_FREQ V4L2 control */ +static const s64 link_freq_menu_items[] = { + 360000000, +}; + +/* Mode configs */ +static const struct imx355_mode supported_modes[] = { + { + .width = 3280, + .height = 2464, + .fll_def = 0xa37, + .fll_min = 0xa37, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), + .regs = mode_3280x2464_regs, + }, + }, + { + .width = 3268, + .height = 2448, + .fll_def = 0xa37, + .fll_min = 0xa37, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3268x2448_regs), + .regs = mode_3268x2448_regs, + }, + }, + { + .width = 3264, + .height = 2448, + .fll_def = 0xa37, + .fll_min = 0xa37, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs), + .regs = mode_3264x2448_regs, + }, + }, + { + .width = 1940, + .height = 1096, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1940x1096_regs), + .regs = mode_1940x1096_regs, + }, + }, + { + .width = 1936, + .height = 1096, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1936x1096_regs), + .regs = mode_1936x1096_regs, + }, + }, + { + .width = 1924, + .height = 1080, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1924x1080_regs), + .regs = mode_1924x1080_regs, + }, + }, + { + .width = 1920, + .height = 1080, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1920x1080_regs), + .regs = mode_1920x1080_regs, + }, + }, + { + .width = 1640, + .height = 1232, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0x72c, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs), + .regs = mode_1640x1232_regs, + }, + }, + { + .width = 1640, + .height = 922, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0x72c, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1640x922_regs), + .regs = mode_1640x922_regs, + }, + }, + { + .width = 1300, + .height = 736, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0x72c, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1300x736_regs), + .regs = mode_1300x736_regs, + }, + }, + { + .width = 1296, + .height = 736, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0x72c, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1296x736_regs), + .regs = mode_1296x736_regs, + }, + }, + { + .width = 1284, + .height = 720, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0x72c, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1284x720_regs), + .regs = mode_1284x720_regs, + }, + }, + { + .width = 1280, + .height = 720, + .fll_def = 0x51a, + .fll_min = 0x51a, + .llp = 0x72c, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1280x720_regs), + .regs = mode_1280x720_regs, + }, + }, + { + .width = 820, + .height = 616, + .fll_def = 0x28c, + .fll_min = 0x28c, + .llp = 0xe58, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_820x616_regs), + .regs = mode_820x616_regs, + }, + }, +}; + +static inline struct imx355 *to_imx355(struct v4l2_subdev *_sd) +{ + return container_of(_sd, struct imx355, sd); +} + +/* Get bayer order based on flip setting. */ +static __u32 imx355_get_format_code(struct imx355 *imx355) +{ + /* + * Only one bayer order is supported. + * It depends on the flip settings. + */ + static const __u32 codes[2][2] = { + { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, }, + { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, }, + }; + + return codes[imx355->vflip->val][imx355->hflip->val]; +} + +/* Read registers up to 4 at a time */ +static int imx355_read_reg(struct imx355 *imx355, u16 reg, u32 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + struct i2c_msg msgs[2]; + u8 addr_buf[2]; + u8 data_buf[4] = { 0 }; + int ret; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, addr_buf); + /* Write register address */ + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = ARRAY_SIZE(addr_buf); + msgs[0].buf = addr_buf; + + /* Read data from register */ + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_buf[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = get_unaligned_be32(data_buf); + + return 0; +} + +/* Write registers up to 4 at a time */ +static int imx355_write_reg(struct imx355 *imx355, u16 reg, u32 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + u8 buf[6]; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, buf); + put_unaligned_be32(val << (8 * (4 - len)), buf + 2); + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +/* Write a list of registers */ +static int imx355_write_regs(struct imx355 *imx355, + const struct imx355_reg *regs, u32 len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + int ret; + u32 i; + + for (i = 0; i < len; i++) { + ret = imx355_write_reg(imx355, regs[i].address, 1, + regs[i].val); + if (ret) { + dev_err_ratelimited( + &client->dev, + "Failed to write reg 0x%4.4x. error = %d\n", + regs[i].address, ret); + + return ret; + } + } + + return 0; +} + +/* Open sub-device */ +static int imx355_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct imx355 *imx355 = to_imx355(sd); + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, fh->pad, 0); + + mutex_lock(&imx355->mutex); + + /* Initialize try_fmt */ + try_fmt->width = imx355->cur_mode->width; + try_fmt->height = imx355->cur_mode->height; + try_fmt->code = imx355_get_format_code(imx355); + try_fmt->field = V4L2_FIELD_NONE; + + mutex_unlock(&imx355->mutex); + + return 0; +} + +static int imx355_update_digital_gain(struct imx355 *imx355, u32 d_gain) +{ + int ret; + + ret = imx355_write_reg(imx355, IMX355_REG_DPGA_USE_GLOBAL_GAIN, 1, 1); + if (ret) + return ret; + + /* Digital gain = (d_gain & 0xFF00) + (d_gain & 0xFF)/256 times */ + return imx355_write_reg(imx355, IMX355_REG_DIG_GAIN_GLOBAL, 2, d_gain); +} + +static int imx355_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct imx355 *imx355 = container_of(ctrl->handler, + struct imx355, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + s64 max; + int ret; + + /* Propagate change of current control to all related controls */ + switch (ctrl->id) { + case V4L2_CID_VBLANK: + /* Update max exposure while meeting expected vblanking */ + max = imx355->cur_mode->height + ctrl->val - 10; + __v4l2_ctrl_modify_range(imx355->exposure, + imx355->exposure->minimum, + max, imx355->exposure->step, max); + break; + }; + + /* + * Applying V4L2 control value only happens + * when power is up for streaming + */ + if (pm_runtime_get_if_in_use(&client->dev) == 0) + return 0; + + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + /* Analog gain = 1024/(1024 - ctrl->val) times */ + ret = imx355_write_reg(imx355, IMX355_REG_ANALOG_GAIN, + 2, ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + ret = imx355_update_digital_gain(imx355, ctrl->val); + break; + case V4L2_CID_EXPOSURE: + ret = imx355_write_reg(imx355, IMX355_REG_EXPOSURE, + 2, ctrl->val); + break; + case V4L2_CID_VBLANK: + /* Update FLL that meets expected vertical blanking */ + ret = imx355_write_reg(imx355, IMX355_REG_FLL, 2, + imx355->cur_mode->height + ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = imx355_write_reg(imx355, IMX355_REG_TEST_PATTERN, + 2, imx355_test_pattern_val[ctrl->val]); + break; + case V4L2_CID_HFLIP: + case V4L2_CID_VFLIP: + ret = imx355_write_reg(imx355, IMX355_REG_ORIENTATION, 1, + imx355->hflip->val | + imx355->vflip->val << 1); + break; + default: + ret = -EINVAL; + dev_info(&client->dev, + "ctrl(id:0x%x,val:0x%x) is not handled\n", + ctrl->id, ctrl->val); + break; + }; + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops imx355_ctrl_ops = { + .s_ctrl = imx355_set_ctrl, +}; + +static int imx355_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct imx355 *imx355 = to_imx355(sd); + + if (code->index > 0) + return -EINVAL; + + code->code = imx355_get_format_code(imx355); + + return 0; +} + +static int imx355_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct imx355 *imx355 = to_imx355(sd); + + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != imx355_get_format_code(imx355)) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static void imx355_update_pad_format(struct imx355 *imx355, + const struct imx355_mode *mode, + struct v4l2_subdev_format *fmt) +{ + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.code = imx355_get_format_code(imx355); + fmt->format.field = V4L2_FIELD_NONE; + fmt->format.colorspace = V4L2_COLORSPACE_DEFAULT; + fmt->format.xfer_func = V4L2_XFER_FUNC_DEFAULT; + fmt->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + fmt->format.quantization = V4L2_QUANTIZATION_DEFAULT; +} + +static int imx355_do_get_pad_format(struct imx355 *imx355, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *framefmt; + struct v4l2_subdev *sd = &imx355->sd; + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + fmt->format = *framefmt; + } else { + imx355_update_pad_format(imx355, imx355->cur_mode, fmt); + } + + return 0; +} + +static int imx355_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx355 *imx355 = to_imx355(sd); + int ret; + + mutex_lock(&imx355->mutex); + ret = imx355_do_get_pad_format(imx355, cfg, fmt); + mutex_unlock(&imx355->mutex); + + return ret; +} + +static int +imx355_set_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct imx355 *imx355 = to_imx355(sd); + const struct imx355_mode *mode; + struct v4l2_mbus_framefmt *framefmt; + s32 vblank_def; + s32 vblank_min; + s64 h_blank; + s64 pixel_rate; + + mutex_lock(&imx355->mutex); + + /* + * Only one bayer order is supported. + * It depends on the flip settings. + */ + fmt->format.code = imx355_get_format_code(imx355); + + mode = v4l2_find_nearest_size(supported_modes, + ARRAY_SIZE(supported_modes), width, height, + fmt->format.width, fmt->format.height); + imx355_update_pad_format(imx355, mode, fmt); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + *framefmt = fmt->format; + } else { + imx355->cur_mode = mode; + pixel_rate = + (link_freq_menu_items[0] * 2 * 4) / 10; + __v4l2_ctrl_s_ctrl_int64(imx355->pixel_rate, pixel_rate); + /* Update limits and set FPS to default */ + vblank_def = imx355->cur_mode->fll_def - + imx355->cur_mode->height; + vblank_min = imx355->cur_mode->fll_min - + imx355->cur_mode->height; + __v4l2_ctrl_modify_range( + imx355->vblank, vblank_min, + IMX355_FLL_MAX - imx355->cur_mode->height, 1, + vblank_def); + __v4l2_ctrl_s_ctrl(imx355->vblank, vblank_def); + h_blank = mode->llp - imx355->cur_mode->width; + /* + * Currently hblank is not changeable. + * So FPS control is done only by vblank. + */ + __v4l2_ctrl_modify_range(imx355->hblank, h_blank, + h_blank, 1, h_blank); + } + + mutex_unlock(&imx355->mutex); + + return 0; +} + +/* Start streaming */ +static int imx355_start_streaming(struct imx355 *imx355) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + const struct imx355_reg_list *reg_list; + int ret; + + /* Global Setting */ + reg_list = &imx355_global_setting; + ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set global settings\n", + __func__); + return ret; + } + + /* Apply default values of current mode */ + reg_list = &imx355->cur_mode->reg_list; + ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs); + if (ret) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return ret; + } + + /* Apply customized values from user */ + ret = __v4l2_ctrl_handler_setup(imx355->sd.ctrl_handler); + if (ret) + return ret; + + return imx355_write_reg(imx355, IMX355_REG_MODE_SELECT, + 1, IMX355_MODE_STREAMING); +} + +/* Stop streaming */ +static int imx355_stop_streaming(struct imx355 *imx355) +{ + return imx355_write_reg(imx355, IMX355_REG_MODE_SELECT, + 1, IMX355_MODE_STANDBY); +} + +static int imx355_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct imx355 *imx355 = to_imx355(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + mutex_lock(&imx355->mutex); + if (imx355->streaming == enable) { + mutex_unlock(&imx355->mutex); + return 0; + } + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto err_unlock; + } + + /* + * Apply default & customized values + * and then start streaming. + */ + ret = imx355_start_streaming(imx355); + if (ret) + goto err_rpm_put; + } else { + imx355_stop_streaming(imx355); + pm_runtime_put(&client->dev); + } + + imx355->streaming = enable; + mutex_unlock(&imx355->mutex); + + return ret; + +err_rpm_put: + pm_runtime_put(&client->dev); +err_unlock: + mutex_unlock(&imx355->mutex); + + return ret; +} + +static int __maybe_unused imx355_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx355 *imx355 = to_imx355(sd); + + if (imx355->streaming) + imx355_stop_streaming(imx355); + + return 0; +} + +static int __maybe_unused imx355_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx355 *imx355 = to_imx355(sd); + int ret; + + if (imx355->streaming) { + ret = imx355_start_streaming(imx355); + if (ret) + goto error; + } + + return 0; + +error: + imx355_stop_streaming(imx355); + imx355->streaming = 0; + return ret; +} + +/* Verify chip ID */ +static int imx355_identify_module(struct imx355 *imx355) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + int ret; + u32 val; + + ret = imx355_read_reg(imx355, IMX355_REG_CHIP_ID, 2, &val); + if (ret) + return ret; + + if (val != IMX355_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + IMX355_CHIP_ID, val); + return -EIO; + } + return 0; +} + +static const struct v4l2_subdev_core_ops imx355_subdev_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_video_ops imx355_video_ops = { + .s_stream = imx355_set_stream, +}; + +static const struct v4l2_subdev_pad_ops imx355_pad_ops = { + .enum_mbus_code = imx355_enum_mbus_code, + .get_fmt = imx355_get_pad_format, + .set_fmt = imx355_set_pad_format, + .enum_frame_size = imx355_enum_frame_size, +}; + +static const struct v4l2_subdev_ops imx355_subdev_ops = { + .core = &imx355_subdev_core_ops, + .video = &imx355_video_ops, + .pad = &imx355_pad_ops, +}; + +static const struct media_entity_operations imx355_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops imx355_internal_ops = { + .open = imx355_open, +}; + +/* Initialize control handlers */ +static int imx355_init_controls(struct imx355 *imx355) +{ + struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd); + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 exposure_max; + s64 vblank_def; + s64 vblank_min; + s64 hblank; + s64 pixel_rate; + const struct imx355_mode *mode; + int ret; + + ctrl_hdlr = &imx355->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10); + if (ret) + return ret; + + ctrl_hdlr->lock = &imx355->mutex; + imx355->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, + &imx355_ctrl_ops, + V4L2_CID_LINK_FREQ, + 0, + 0, + link_freq_menu_items); + if (imx355->link_freq) + imx355->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */ + pixel_rate = (link_freq_menu_items[0] * 2 * 4) / 10; + /* By default, PIXEL_RATE is read only */ + imx355->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops, + V4L2_CID_PIXEL_RATE, + pixel_rate, pixel_rate, + 1, pixel_rate); + + /* Initialze vblank/hblank/exposure parameters based on current mode */ + mode = imx355->cur_mode; + vblank_def = mode->fll_def - mode->height; + vblank_min = mode->fll_min - mode->height; + imx355->vblank = v4l2_ctrl_new_std( + ctrl_hdlr, &imx355_ctrl_ops, V4L2_CID_VBLANK, + vblank_min, IMX355_FLL_MAX - mode->height, 1, + vblank_def); + + hblank = mode->llp - mode->width; + imx355->hblank = v4l2_ctrl_new_std( + ctrl_hdlr, &imx355_ctrl_ops, V4L2_CID_HBLANK, + hblank, hblank, 1, hblank); + if (imx355->hblank) + imx355->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + exposure_max = mode->fll_def - 10; + imx355->exposure = v4l2_ctrl_new_std( + ctrl_hdlr, &imx355_ctrl_ops, + V4L2_CID_EXPOSURE, IMX355_EXPOSURE_MIN, + exposure_max, IMX355_EXPOSURE_STEP, + IMX355_EXPOSURE_DEFAULT); + + imx355->hflip = v4l2_ctrl_new_std( + ctrl_hdlr, &imx355_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + imx355->vflip = v4l2_ctrl_new_std( + ctrl_hdlr, &imx355_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + + v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + IMX355_ANA_GAIN_MIN, IMX355_ANA_GAIN_MAX, + IMX355_ANA_GAIN_STEP, IMX355_ANA_GAIN_DEFAULT); + + /* Digital gain */ + v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + IMX355_DGTL_GAIN_MIN, IMX355_DGTL_GAIN_MAX, + IMX355_DGTL_GAIN_STEP, IMX355_DGTL_GAIN_DEFAULT); + + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx355_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(imx355_test_pattern_menu) - 1, + 0, 0, imx355_test_pattern_menu); + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + dev_err(&client->dev, "%s control init failed (%d)\n", + __func__, ret); + goto error; + } + + imx355->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +error: + v4l2_ctrl_handler_free(ctrl_hdlr); + + return ret; +} + +static void imx355_free_controls(struct imx355 *imx355) +{ + v4l2_ctrl_handler_free(imx355->sd.ctrl_handler); +} + +static int imx355_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct imx355 *imx355; + int ret; + + imx355 = devm_kzalloc(&client->dev, sizeof(*imx355), GFP_KERNEL); + if (!imx355) + return -ENOMEM; + + mutex_init(&imx355->mutex); + + /* Initialize subdev */ + v4l2_i2c_subdev_init(&imx355->sd, client, &imx355_subdev_ops); + + /* Check module identity */ + ret = imx355_identify_module(imx355); + if (ret) { + dev_err(&client->dev, "failed to find sensor: %d\n", ret); + goto error_probe; + } + + /* Set default mode to max resolution */ + imx355->cur_mode = &supported_modes[0]; + + ret = imx355_init_controls(imx355); + if (ret) { + dev_err(&client->dev, "failed to init controls: %d\n", ret); + goto error_probe; + } + + /* Initialize subdev */ + imx355->sd.internal_ops = &imx355_internal_ops; + imx355->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; + imx355->sd.entity.ops = &imx355_subdev_entity_ops; + imx355->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + /* Initialize source pad */ + imx355->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&imx355->sd.entity, 1, &imx355->pad); + if (ret) { + dev_err(&client->dev, "%s failed:%d\n", __func__, ret); + goto error_handler_free; + } + + ret = v4l2_async_register_subdev_sensor_common(&imx355->sd); + if (ret < 0) + goto error_media_entity; + + /* + * Device is already turned on by i2c-core with ACPI domain PM. + * Enable runtime PM and turn off the device. + */ + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_idle(&client->dev); + + return 0; + +error_media_entity: + media_entity_cleanup(&imx355->sd.entity); + +error_handler_free: + imx355_free_controls(imx355); + +error_probe: + mutex_destroy(&imx355->mutex); + + return ret; +} + +static int imx355_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct imx355 *imx355 = to_imx355(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + imx355_free_controls(imx355); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + + mutex_destroy(&imx355->mutex); + + return 0; +} + +static const struct i2c_device_id imx355_id_table[] = { + { "imx355", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(i2c, imx355_id_table); + +static const struct dev_pm_ops imx355_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx355_suspend, imx355_resume) +}; + +static const struct acpi_device_id imx355_acpi_ids[] = { + {"SONY355A"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, imx355_acpi_ids); + +static struct i2c_driver imx355_i2c_driver = { + .driver = { + .name = "imx355", + .owner = THIS_MODULE, + .pm = &imx355_pm_ops, + .acpi_match_table = ACPI_PTR(imx355_acpi_ids), + }, + .probe = imx355_probe, + .remove = imx355_remove, + .id_table = imx355_id_table, +}; +module_i2c_driver(imx355_i2c_driver); + +MODULE_AUTHOR("Qiu, Tianshu "); +MODULE_DESCRIPTION("Sony imx355 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/lc898122/Kconfig b/drivers/media/i2c/lc898122/Kconfig new file mode 100644 index 000000000000..7f11e6e52d33 --- /dev/null +++ b/drivers/media/i2c/lc898122/Kconfig @@ -0,0 +1,5 @@ +config VIDEO_LC898122 + tristate "lc898122 voice coil support" + depends on I2C && VIDEO_V4L2 + ---help--- + This is a driver for the LC898122 camera lens voice coil. diff --git a/drivers/media/i2c/lc898122/Makefile b/drivers/media/i2c/lc898122/Makefile new file mode 100644 index 000000000000..4ad76babc61d --- /dev/null +++ b/drivers/media/i2c/lc898122/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +obj-$(CONFIG_VIDEO_LC898122) += lc898122x.o + +lc898122x-objs := lc898122.o lc898122-oiscmd.o lc898122-oisinit.o + + +ccflags-y += -Werror -I$(srcpath)/$(src)/../../../../include/ diff --git a/drivers/media/i2c/lc898122/lc898122-oiscmd.c b/drivers/media/i2c/lc898122/lc898122-oiscmd.c new file mode 100644 index 000000000000..4bb4b268e621 --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122-oiscmd.c @@ -0,0 +1,1442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2015 - 2018 Intel Corporation + * Copyright (C) ON Semiconductor + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lc898122.h" +#include "lc898122-oisinit.h" +#include "lc898122-regs.h" + +void lc898122_GyrCon(struct lc898122_device *lc898122_dev, u8); +void lc898122_SetSineWave(struct lc898122_device *lc898122_dev, u8, u8); +void lc898122_StartSineWave(struct lc898122_device *lc898122_dev); +void lc898122_StopSineWave(struct lc898122_device *lc898122_dev); + +void lc898122_SetMeasFil(struct lc898122_device *lc898122_dev, u8); +void lc898122_ClrMeasFil(struct lc898122_device *lc898122_dev); + +u8 lc898122_TstActMov(struct lc898122_device *lc898122_dev, u8); + +void lc898122_StbOnn(struct lc898122_device *lc898122_dev); +void lc898122_StbOnnN(struct lc898122_device *lc898122_dev, u8 UcStbY, + u8 UcStbX); +void lc898122_SetGcf(struct lc898122_device *lc898122_dev, u8 UcSetNum); + + +#define LC898122_MAXLMT_20M 0x40400000 +#define LC898122_MINLMT_20M 0x40000000 +#define LC898122_CHGCOEF_20M 0xB9400000 +#define LC898122_MINLMT_MOV_20M 0x00000000 +#define LC898122_CHGCOEF_MOV_20M 0xB8800000 + +#define LC898122_MAXLMT_13M 0x40333333 +#define LC898122_MINLMT_13M 0x3FC51EB9 +#define LC898122_CHGCOEF_13M 0xBA120820 +#define LC898122_MINLMT_MOV_13M 0x00000000 +#define LC898122_CHGCOEF_MOV_13M 0xB92B6DB7 + +#define LC898122_ZOOMTBL 16 +const u32 ClGyxZom[LC898122_ZOOMTBL] = { + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000 +}; + +const u32 ClGyyZom[LC898122_ZOOMTBL] = { + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000, + 0x3F800000 +}; + +#define LC898122_COEFTBL 7 +const u32 ClDiCof_20M[LC898122_COEFTBL] = { + LC898122_DIFIL_S2_20M, + LC898122_DIFIL_S2_20M, + LC898122_DIFIL_S2_20M, + LC898122_DIFIL_S2_20M, + LC898122_DIFIL_S2_20M, + LC898122_DIFIL_S2_20M, + LC898122_DIFIL_S2_20M +}; +const u32 ClDiCof_13M[LC898122_COEFTBL] = { + LC898122_DIFIL_S2_13M, + LC898122_DIFIL_S2_13M, + LC898122_DIFIL_S2_13M, + LC898122_DIFIL_S2_13M, + LC898122_DIFIL_S2_13M, + LC898122_DIFIL_S2_13M, + LC898122_DIFIL_S2_13M +}; + +void lc898122_MesFil(struct lc898122_device *lc898122_dev, u8 UcMesMod) +{ + struct i2c_client *client = lc898122_dev->client; + + if (lc898122_dev->state.flags & LC898122_EXTCLK_ALL) { + if (!UcMesMod) { + RamWrite32A(client, LC898122_mes1aa, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes1ab, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes1ac, 0x3F6C34C0); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3F800000); + RamWrite32A(client, LC898122_mes1bb, 0x00000000); + RamWrite32A(client, LC898122_mes1bc, 0x00000000); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + RamWrite32A(client, LC898122_mes2aa, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes2ab, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes2ac, 0x3F6C34C0); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3F800000); + RamWrite32A(client, LC898122_mes2bb, 0x00000000); + RamWrite32A(client, LC898122_mes2bc, 0x00000000); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + + } else if (UcMesMod == LC898122_LOOPGAIN) { + RamWrite32A(client, LC898122_mes1aa, 0x3E587E00); + RamWrite32A(client, LC898122_mes1ab, 0x3E587E00); + RamWrite32A(client, LC898122_mes1ac, 0x3F13C100); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3F7FD400); + RamWrite32A(client, LC898122_mes1bb, 0xBF7FD400); + RamWrite32A(client, LC898122_mes1bc, 0x3F7FA840); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + + RamWrite32A(client, LC898122_mes2aa, 0x3E587E00); + RamWrite32A(client, LC898122_mes2ab, 0x3E587E00); + RamWrite32A(client, LC898122_mes2ac, 0x3F13C100); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3F7FD400); + RamWrite32A(client, LC898122_mes2bb, 0xBF7FD400); + RamWrite32A(client, LC898122_mes2bc, 0x3F7FA840); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + + } else if (UcMesMod == LC898122_THROUGH) { + RamWrite32A(client, LC898122_mes1aa, 0x3F800000); + RamWrite32A(client, LC898122_mes1ab, 0x00000000); + RamWrite32A(client, LC898122_mes1ac, 0x00000000); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3F800000); + RamWrite32A(client, LC898122_mes1bb, 0x00000000); + RamWrite32A(client, LC898122_mes1bc, 0x00000000); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + + RamWrite32A(client, LC898122_mes2aa, 0x3F800000); + RamWrite32A(client, LC898122_mes2ab, 0x00000000); + RamWrite32A(client, LC898122_mes2ac, 0x00000000); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3F800000); + RamWrite32A(client, LC898122_mes2bb, 0x00000000); + RamWrite32A(client, LC898122_mes2bc, 0x00000000); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + + } else if (UcMesMod == LC898122_NOISE) { + RamWrite32A(client, LC898122_mes1aa, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes1ab, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes1ac, 0x3F6C34C0); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes1bb, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes1bc, 0x3F6C34C0); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + RamWrite32A(client, LC898122_mes2aa, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes2ab, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes2ac, 0x3F6C34C0); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes2bb, 0x3D1E5A40); + RamWrite32A(client, LC898122_mes2bc, 0x3F6C34C0); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + } + } else { + if (!UcMesMod) { + RamWrite32A(client, LC898122_mes1aa, 0x3CA175C0); + RamWrite32A(client, LC898122_mes1ab, 0x3CA175C0); + RamWrite32A(client, LC898122_mes1ac, 0x3F75E8C0); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3F800000); + RamWrite32A(client, LC898122_mes1bb, 0x00000000); + RamWrite32A(client, LC898122_mes1bc, 0x00000000); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + RamWrite32A(client, LC898122_mes2aa, 0x3CA175C0); + RamWrite32A(client, LC898122_mes2ab, 0x3CA175C0); + RamWrite32A(client, LC898122_mes2ac, 0x3F75E8C0); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3F800000); + RamWrite32A(client, LC898122_mes2bb, 0x00000000); + RamWrite32A(client, LC898122_mes2bc, 0x00000000); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + + } else if (UcMesMod == LC898122_LOOPGAIN) { + RamWrite32A(client, LC898122_mes1aa, 0x3DF21080); + RamWrite32A(client, LC898122_mes1ab, 0x3DF21080); + RamWrite32A(client, LC898122_mes1ac, 0x3F437BC0); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3F7EF980); + RamWrite32A(client, LC898122_mes1bb, 0xBF7EF980); + RamWrite32A(client, LC898122_mes1bc, 0x3F7DF300); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + RamWrite32A(client, LC898122_mes2aa, 0x3DF21080); + RamWrite32A(client, LC898122_mes2ab, 0x3DF21080); + RamWrite32A(client, LC898122_mes2ac, 0x3F437BC0); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3F7EF980); + RamWrite32A(client, LC898122_mes2bb, 0xBF7EF980); + RamWrite32A(client, LC898122_mes2bc, 0x3F7DF300); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + } else if (UcMesMod == LC898122_THROUGH) { + RamWrite32A(client, LC898122_mes1aa, 0x3F800000); + RamWrite32A(client, LC898122_mes1ab, 0x00000000); + RamWrite32A(client, LC898122_mes1ac, 0x00000000); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3F800000); + RamWrite32A(client, LC898122_mes1bb, 0x00000000); + RamWrite32A(client, LC898122_mes1bc, 0x00000000); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + + RamWrite32A(client, LC898122_mes2aa, 0x3F800000); + RamWrite32A(client, LC898122_mes2ab, 0x00000000); + RamWrite32A(client, LC898122_mes2ac, 0x00000000); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3F800000); + RamWrite32A(client, LC898122_mes2bb, 0x00000000); + RamWrite32A(client, LC898122_mes2bc, 0x00000000); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + } else if (UcMesMod == LC898122_NOISE) { + RamWrite32A(client, LC898122_mes1aa, 0x3CA175C0); + RamWrite32A(client, LC898122_mes1ab, 0x3CA175C0); + RamWrite32A(client, LC898122_mes1ac, 0x3F75E8C0); + RamWrite32A(client, LC898122_mes1ad, 0x00000000); + RamWrite32A(client, LC898122_mes1ae, 0x00000000); + RamWrite32A(client, LC898122_mes1ba, 0x3CA175C0); + RamWrite32A(client, LC898122_mes1bb, 0x3CA175C0); + RamWrite32A(client, LC898122_mes1bc, 0x3F75E8C0); + RamWrite32A(client, LC898122_mes1bd, 0x00000000); + RamWrite32A(client, LC898122_mes1be, 0x00000000); + + RamWrite32A(client, LC898122_mes2aa, 0x3CA175C0); + RamWrite32A(client, LC898122_mes2ab, 0x3CA175C0); + RamWrite32A(client, LC898122_mes2ac, 0x3F75E8C0); + RamWrite32A(client, LC898122_mes2ad, 0x00000000); + RamWrite32A(client, LC898122_mes2ae, 0x00000000); + RamWrite32A(client, LC898122_mes2ba, 0x3CA175C0); + RamWrite32A(client, LC898122_mes2bb, 0x3CA175C0); + RamWrite32A(client, LC898122_mes2bc, 0x3F75E8C0); + RamWrite32A(client, LC898122_mes2bd, 0x00000000); + RamWrite32A(client, LC898122_mes2be, 0x00000000); + } + } +} + +void lc898122_SrvCon(struct lc898122_device *lc898122_dev, u8 UcDirSel, + u8 UcSwcCon) +{ + struct i2c_client *client = lc898122_dev->client; + + if (UcSwcCon) { + if (!UcDirSel) { + RegWriteA(client, LC898122_WH_EQSWX, 0x03); + RamWrite32A(client, LC898122_sxggf, 0x00000000); + } else { + RegWriteA(client, LC898122_WH_EQSWY, 0x03); + RamWrite32A(client, LC898122_syggf, 0x00000000); + } + } else { + if (!UcDirSel) { + RegWriteA(client, LC898122_WH_EQSWX, 0x02); + RamWrite32A(client, LC898122_SXLMT, 0x00000000); + } else { + RegWriteA(client, LC898122_WH_EQSWY, 0x02); + RamWrite32A(client, LC898122_SYLMT, 0x00000000); + } + } +} + +u8 lc898122_RtnCen(struct lc898122_device *lc898122_dev, + u8 UcCmdPar) +{ + u8 UcCmdSts = LC898122_EXE_END; + + lc898122_GyrCon(lc898122_dev, OFF); + + if (!UcCmdPar) { + lc898122_StbOnn(lc898122_dev); + } else if (UcCmdPar == 0x01) { + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, ON); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, OFF); + } else if (UcCmdPar == 0x02) { + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, OFF); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, ON); + } + + return UcCmdSts; +} + +void lc898122_GyrCon(struct lc898122_device *lc898122_dev, u8 UcGyrCon) +{ + struct i2c_client *client = lc898122_dev->client; + + RegWriteA(client, LC898122_WG_SHTON, 0x00); + + if (UcGyrCon == ON) { + + if (lc898122_dev->state.flags & LC898122_GAIN_CONT) + lc898122_autogaincontrol(lc898122_dev, ON); + + lc898122_cleargyro(lc898122_dev, 0x000E, LC898122_CLR_FRAM1); + + RamWrite32A(client, LC898122_sxggf, 0x3F800000); + RamWrite32A(client, LC898122_syggf, 0x3F800000); + + } else if (UcGyrCon == SPC) { + + if (lc898122_dev->state.flags & LC898122_GAIN_CONT) + lc898122_autogaincontrol(lc898122_dev, ON); + + RamWrite32A(client, LC898122_sxggf, 0x3F800000); + RamWrite32A(client, LC898122_syggf, 0x3F800000); + } else { + RamWrite32A(client, LC898122_sxggf, 0x00000000); + RamWrite32A(client, LC898122_syggf, 0x00000000); + + if (lc898122_dev->state.flags & LC898122_GAIN_CONT) + lc898122_autogaincontrol(lc898122_dev, OFF); + } +} + +void lc898122_OisEna(struct lc898122_device *lc898122_dev) +{ + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, ON); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, ON); + + lc898122_GyrCon(lc898122_dev, ON); +} + +void OisEnaLin(struct lc898122_device *lc898122_dev) +{ + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, ON); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, ON); + + lc898122_GyrCon(lc898122_dev, SPC); +} + +void lc898122_S2cPro(struct lc898122_device *lc898122_dev, u8 uc_mode) +{ + struct i2c_client *client = lc898122_dev->client; + u32 DIFIL_S2; + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) + DIFIL_S2 = LC898122_DIFIL_S2_20M; + else + DIFIL_S2 = LC898122_DIFIL_S2_13M; + + if (uc_mode == 1) { + if (lc898122_dev->state.flags & LC898122_H1COEF_CHANGER) + lc898122_SetH1cMod(lc898122_dev, LC898122_S2MODE); + RegWriteA(client, LC898122_WG_SHTON, 0x11); + RamWrite32A(client, LC898122_gxh1c, DIFIL_S2); + RamWrite32A(client, LC898122_gyh1c, DIFIL_S2); + } else { + RamWrite32A(client, LC898122_gxh1c, + lc898122_dev->state.UlH1Coefval); + RamWrite32A(client, LC898122_gyh1c, + lc898122_dev->state.UlH1Coefval); + RegWriteA(client, LC898122_WG_SHTON, 0x00); + + if (lc898122_dev->state.flags & LC898122_H1COEF_CHANGER) + lc898122_SetH1cMod(lc898122_dev, + lc898122_dev->state.UcH1LvlMod); + } +} + +short lc898122_GenMes(struct lc898122_device *lc898122_dev, u16 UsRamAdd, + u8 UcMesMod) +{ + struct i2c_client *client = lc898122_dev->client; + + short SsMesRlt; + + RegWriteA(client, LC898122_WC_MES1ADD0, (u8)UsRamAdd); + RegWriteA(client, LC898122_WC_MES1ADD1, (u8)((UsRamAdd >> 8) & 0x0001)); + RamWrite32A(client, LC898122_MSABS1AV, 0x00000000); + + if (!UcMesMod) { + RegWriteA(client, LC898122_WC_MESLOOP1, 0x04); + RegWriteA(client, LC898122_WC_MESLOOP0, 0x00); + RamWrite32A(client, LC898122_msmean, 0x3A7FFFF7); + } else { + RegWriteA(client, LC898122_WC_MESLOOP1, 0x00); + RegWriteA(client, LC898122_WC_MESLOOP0, 0x01); + RamWrite32A(client, LC898122_msmean, 0x3F800000); + } + + RegWriteA(client, LC898122_WC_MESABS, 0x00); + lc898122_BsyWit(lc898122_dev, LC898122_WC_MESMODE, 0x01); + lc898122_RamAccFixMod(lc898122_dev, ON); + RamReadA(client, LC898122_MSABS1AV, (u16 *)&SsMesRlt); + lc898122_RamAccFixMod(lc898122_dev, OFF); + + return SsMesRlt; +} + +const u16 CucFreqVal_extclk[17] = { + 0xFFFF, /* 0: Stop */ + 0x0059, /* 1: 0.994653Hz */ + 0x00B2, /* 2: 1.989305Hz */ + 0x010C, /* 3: 2.995133Hz */ + 0x0165, /* 4: 3.989786Hz */ + 0x01BF, /* 5: 4.995614Hz */ + 0x0218, /* 6: 5.990267Hz */ + 0x0272, /* 7: 6.996095Hz */ + 0x02CB, /* 8: 7.990748Hz */ + 0x0325, /* 9: 8.996576Hz */ + 0x037E, /* A: 9.991229Hz */ + 0x03D8, /* B: 10.99706Hz */ + 0x0431, /* C: 11.99171Hz */ + 0x048B, /* D: 12.99754Hz */ + 0x04E4, /* E: 13.99219Hz */ + 0x053E, /* F: 14.99802Hz */ + 0x0597 /* 10: 15.99267Hz */ +}; + +const u16 CucFreqVal[17] = { + 0xFFFF, /* 0: Stop */ + 0x002C, /* 1: 0.983477Hz */ + 0x0059, /* 2: 1.989305Hz */ + 0x0086, /* 3: 2.995133Hz */ + 0x00B2, /* 4: 3.97861Hz */ + 0x00DF, /* 5: 4.984438Hz */ + 0x010C, /* 6: 5.990267Hz */ + 0x0139, /* 7: 6.996095Hz */ + 0x0165, /* 8: 7.979572Hz */ + 0x0192, /* 9: 8.9854Hz */ + 0x01BF, /* A: 9.991229Hz */ + 0x01EC, /* B: 10.99706Hz */ + 0x0218, /* C: 11.98053Hz */ + 0x0245, /* D: 12.98636Hz */ + 0x0272, /* E: 13.99219Hz */ + 0x029F, /* F: 14.99802Hz */ + 0x02CB /* 10: 15.9815Hz */ +}; + +void lc898122_SetSinWavePara(struct lc898122_device *lc898122_dev, + u8 UcTableVal, u8 UcMethodVal) +{ + struct i2c_client *client = lc898122_dev->client; + + u16 UsFreqDat; + u8 UcEqSwX, UcEqSwY; + + if (UcTableVal > 0x10) + UcTableVal = 0x10; /* Limit */ + + if (lc898122_dev->state.flags & LC898122_EXTCLK_ALL) + UsFreqDat = CucFreqVal_extclk[UcTableVal]; + else + UsFreqDat = CucFreqVal[UcTableVal]; + + if (UcMethodVal == LC898122_SINEWAVE) { + RegWriteA(client, LC898122_WC_SINPHSX, 0x00); + RegWriteA(client, LC898122_WC_SINPHSY, 0x00); + } else if (UcMethodVal == LC898122_CIRCWAVE) { + RegWriteA(client, LC898122_WC_SINPHSX, 0x00); + RegWriteA(client, LC898122_WC_SINPHSY, 0x20); + } else { + RegWriteA(client, LC898122_WC_SINPHSX, 0x00); + RegWriteA(client, LC898122_WC_SINPHSY, 0x00); + } + + + if (lc898122_dev->state.flags & LC898122_USE_SINLPF) { + if (((lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + ((UcMethodVal == LC898122_CIRCWAVE) || + (UcMethodVal == LC898122_SINEWAVE))) || + (!(lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + ((UcMethodVal != LC898122_XHALWAVE) && + (UcMethodVal != LC898122_YHALWAVE)))) { + lc898122_MesFil(lc898122_dev, LC898122_NOISE); + /* LPF */ + } + } + + + if (UsFreqDat == 0xFFFF) { + RegReadA(client, LC898122_WH_EQSWX, &UcEqSwX); + RegReadA(client, LC898122_WH_EQSWY, &UcEqSwY); + UcEqSwX &= ~LC898122_EQSINSW; + UcEqSwY &= ~LC898122_EQSINSW; + RegWriteA(client, LC898122_WH_EQSWX, UcEqSwX); + RegWriteA(client, LC898122_WH_EQSWY, UcEqSwY); + + + if (lc898122_dev->state.flags & LC898122_USE_SINLPF) { + if (((lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + ((UcMethodVal == LC898122_CIRCWAVE) || + (UcMethodVal == LC898122_SINEWAVE) || + (UcMethodVal == LC898122_XACTTEST) || + (UcMethodVal == LC898122_YACTTEST))) || + !((lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + (UcMethodVal != LC898122_XHALWAVE) && + (UcMethodVal != LC898122_YHALWAVE))) { + RegWriteA(client, LC898122_WC_DPON, 0x00); + RegWriteA(client, LC898122_WC_DPO1ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPO1ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPO2ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPO2ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPI1ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPI1ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPI2ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPI2ADD1, 0x00); + + lc898122_RamAccFixMod(lc898122_dev, ON); + + RamWriteA(client, LC898122_SXOFFZ1, + lc898122_dev->state.UsCntXof); + RamWriteA(client, LC898122_SYOFFZ1, + lc898122_dev->state.UsCntYof); + + lc898122_RamAccFixMod(lc898122_dev, OFF); + + RegWriteA(client, LC898122_WC_MES1ADD0, 0x00); + RegWriteA(client, LC898122_WC_MES1ADD1, 0x00); + RegWriteA(client, LC898122_WC_MES2ADD0, 0x00); + RegWriteA(client, LC898122_WC_MES2ADD1, 0x00); + } + } + RegWriteA(client, LC898122_WC_SINON, 0x00); + + } else { + RegReadA(client, LC898122_WH_EQSWX, &UcEqSwX); + RegReadA(client, LC898122_WH_EQSWY, &UcEqSwY); + + if (((lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + ((UcMethodVal == LC898122_CIRCWAVE) || + (UcMethodVal == LC898122_SINEWAVE))) || + (!(lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + ((UcMethodVal != LC898122_XHALWAVE) && + (UcMethodVal != LC898122_YHALWAVE)))) { + + if (lc898122_dev->state.flags & LC898122_USE_SINLPF) { + RegWriteA(client, LC898122_WC_DPI1ADD0, + (u8)LC898122_MES1BZ2); + RegWriteA(client, LC898122_WC_DPI1ADD1, + (u8)((LC898122_MES1BZ2 >> 8) & 0x0001)); + RegWriteA(client, LC898122_WC_DPI2ADD0, + (u8)LC898122_MES2BZ2); + RegWriteA(client, LC898122_WC_DPI2ADD1, + (u8)((LC898122_MES2BZ2 >> 8) & 0x0001)); + RegWriteA(client, LC898122_WC_DPO1ADD0, + (u8)LC898122_SXOFFZ1); + RegWriteA(client, LC898122_WC_DPO1ADD1, + (u8)((LC898122_SXOFFZ1 >> 8) & 0x0001)); + RegWriteA(client, LC898122_WC_DPO2ADD0, + (u8)LC898122_SYOFFZ1); + RegWriteA(client, LC898122_WC_DPO2ADD1, + (u8)((LC898122_SYOFFZ1 >> 8) & 0x0001)); + + RegWriteA(client, LC898122_WC_MES1ADD0, + (u8)LC898122_SINXZ); + RegWriteA(client, LC898122_WC_MES1ADD1, + (u8)((LC898122_SINXZ >> 8) & 0x0001)); + RegWriteA(client, LC898122_WC_MES2ADD0, + (u8)LC898122_SINYZ); + RegWriteA(client, LC898122_WC_MES2ADD1, + (u8)((LC898122_SINYZ >> 8) & 0x0001)); + + RegWriteA(client, LC898122_WC_DPON, 0x03); + + UcEqSwX &= ~LC898122_EQSINSW; + UcEqSwY &= ~LC898122_EQSINSW; + } else { + UcEqSwX |= 0x08; + UcEqSwY |= 0x08; + } + } else if ((lc898122_dev->state.flags & LC898122_ACCEPTANCE) && + ((UcMethodVal == LC898122_XACTTEST) || + (UcMethodVal == LC898122_YACTTEST))) { + RegWriteA(client, LC898122_WC_DPI2ADD0, + (u8)LC898122_MES2BZ2); + RegWriteA(client, LC898122_WC_DPI2ADD1, + (u8)((LC898122_MES2BZ2 >> 8) & 0x0001)); + if (UcMethodVal == LC898122_XACTTEST) { + RegWriteA(client, LC898122_WC_DPO2ADD0, + (u8)LC898122_SXOFFZ1); + RegWriteA(client, LC898122_WC_DPO2ADD1, + (u8)((LC898122_SXOFFZ1 >> 8) & 0x0001)); + RegWriteA(client, LC898122_WC_MES2ADD0, + (u8)LC898122_SINXZ); + RegWriteA(client, LC898122_WC_MES2ADD1, + (u8)((LC898122_SINXZ >> 8) & 0x0001)); + } else { + RegWriteA(client, LC898122_WC_DPO2ADD0, + (u8)LC898122_SYOFFZ1); + RegWriteA(client, LC898122_WC_DPO2ADD1, + (u8)((LC898122_SYOFFZ1 >> 8) & 0x0001)); + RegWriteA(client, LC898122_WC_MES2ADD0, + (u8)LC898122_SINYZ); + RegWriteA(client, LC898122_WC_MES2ADD1, + (u8)((LC898122_SINYZ >> 8) & 0x0001)); + } + + RegWriteA(client, LC898122_WC_DPON, 0x02); + + UcEqSwX &= ~LC898122_EQSINSW; + UcEqSwY &= ~LC898122_EQSINSW; + } else { + if (UcMethodVal == LC898122_XHALWAVE) + UcEqSwX = 0x22; + else + UcEqSwY = 0x22; + } + + RegWriteA(client, LC898122_WC_SINFRQ0, (u8)UsFreqDat); + RegWriteA(client, LC898122_WC_SINFRQ1, (u8)(UsFreqDat >> 8)); + RegWriteA(client, LC898122_WC_MESSINMODE, 0x00); + + RegWriteA(client, LC898122_WH_EQSWX, UcEqSwX); + RegWriteA(client, LC898122_WH_EQSWY, UcEqSwY); + + RegWriteA(client, LC898122_WC_SINON, 0x01); + } +} + +void lc898122_SetStandby(struct lc898122_device *lc898122_dev, u8 UcContMode) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcStbb0, UcClkon; + + switch (UcContMode) { + case LC898122_STB1_ON: + if (!(lc898122_dev->state.flags & LC898122_AF_PWMMODE)) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + RegWriteA(client, LC898122_STBB0, 0x00); + RegWriteA(client, LC898122_STBB1, 0x00); + RegWriteA(client, LC898122_PWMA, 0x00); + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_CVA, 0x00); + lc898122_driversw(lc898122_dev, OFF); + lc898122_afdriversw(lc898122_dev, OFF); + if (!(lc898122_dev->state.flags & LC898122_MONITOR_OFF)) + RegWriteA(client, LC898122_PWMMONA, 0x00); + lc898122_selectgyrosleep(lc898122_dev, ON); + break; + case LC898122_STB1_OFF: + lc898122_selectgyrosleep(lc898122_dev, OFF); + RegWriteA(client, LC898122_PWMMONA, 0x80); + lc898122_driversw(lc898122_dev, ON); + lc898122_afdriversw(lc898122_dev, ON); + RegWriteA(client, LC898122_CVA, 0xC0); + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_PWMAAF, 0x80); + else + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_PWMA, 0xC0); + RegWriteA(client, LC898122_STBB1, 0x05); + RegWriteA(client, LC898122_STBB0, 0xDF); + + break; + case LC898122_STB2_ON: + if (!(lc898122_dev->state.flags & LC898122_AF_PWMMODE)) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + RegWriteA(client, LC898122_STBB0, 0x00); + RegWriteA(client, LC898122_STBB1, 0x00); + RegWriteA(client, LC898122_PWMA, 0x00); + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_CVA, 0x00); + lc898122_driversw(lc898122_dev, OFF); + lc898122_afdriversw(lc898122_dev, OFF); + if (!(lc898122_dev->state.flags & LC898122_MONITOR_OFF)) + RegWriteA(client, LC898122_PWMMONA, 0x00); + lc898122_selectgyrosleep(lc898122_dev, ON); + RegWriteA(client, LC898122_CLKON, 0x00); + break; + case LC898122_STB2_OFF: + RegWriteA(client, LC898122_CLKON, 0x1F); + lc898122_selectgyrosleep(lc898122_dev, OFF); + RegWriteA(client, LC898122_PWMMONA, 0x80); + lc898122_driversw(lc898122_dev, ON); + lc898122_afdriversw(lc898122_dev, ON); + RegWriteA(client, LC898122_CVA, 0xC0); + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_PWMAAF, 0x80); + else + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_PWMA, 0xC0); + RegWriteA(client, LC898122_STBB1, 0x05); + RegWriteA(client, LC898122_STBB0, 0xDF); + + break; + case LC898122_STB3_ON: + + if (!(lc898122_dev->state.flags & LC898122_AF_PWMMODE)) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + RegWriteA(client, LC898122_STBB0, 0x00); + RegWriteA(client, LC898122_STBB1, 0x00); + RegWriteA(client, LC898122_PWMA, 0x00); + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_CVA, 0x00); + lc898122_driversw(lc898122_dev, OFF); + lc898122_afdriversw(lc898122_dev, OFF); + if (!(lc898122_dev->state.flags & LC898122_MONITOR_OFF)) + RegWriteA(client, LC898122_PWMMONA, 0x00); + lc898122_selectgyrosleep(lc898122_dev, ON); + RegWriteA(client, LC898122_CLKON, 0x00); + RegWriteA(client, LC898122_I2CSEL, 0x01); + RegWriteA(client, LC898122_OSCSTOP, 0x02); + break; + case LC898122_STB3_OFF: + RegWriteA(client, LC898122_OSCSTOP, 0x00); + RegWriteA(client, LC898122_I2CSEL, 0x00); + RegWriteA(client, LC898122_CLKON, 0x1F); + lc898122_selectgyrosleep(lc898122_dev, OFF); + RegWriteA(client, LC898122_PWMMONA, 0x80); + lc898122_driversw(lc898122_dev, ON); + lc898122_afdriversw(lc898122_dev, ON); + RegWriteA(client, LC898122_CVA, 0xC0); + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_PWMAAF, 0x80); + else + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_PWMA, 0xC0); + RegWriteA(client, LC898122_STBB1, 0x05); + RegWriteA(client, LC898122_STBB0, 0xDF); + + break; + + case LC898122_STB4_ON: + + if (!(lc898122_dev->state.flags & LC898122_AF_PWMMODE)) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + RegWriteA(client, LC898122_STBB0, 0x00); + RegWriteA(client, LC898122_STBB1, 0x00); + RegWriteA(client, LC898122_PWMA, 0x00); + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_CVA, 0x00); + lc898122_driversw(lc898122_dev, OFF); + lc898122_afdriversw(lc898122_dev, OFF); + if (!(lc898122_dev->state.flags & LC898122_MONITOR_OFF)) + RegWriteA(client, LC898122_PWMMONA, 0x00); + lc898122_GyOutSignalCont(lc898122_dev); + RegWriteA(client, LC898122_CLKON, 0x04); + break; + case LC898122_STB4_OFF: + RegWriteA(client, LC898122_CLKON, 0x1F); + lc898122_selectgyrosleep(lc898122_dev, OFF); + RegWriteA(client, LC898122_PWMMONA, 0x80); + lc898122_driversw(lc898122_dev, ON); + lc898122_afdriversw(lc898122_dev, ON); + RegWriteA(client, LC898122_CVA, 0xC0); + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_PWMAAF, 0x80); + else + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_PWMA, 0xC0); + RegWriteA(client, LC898122_STBB1, 0x05); + RegWriteA(client, LC898122_STBB0, 0xDF); + + break; + + /************** special mode ************/ + case LC898122_STB2_OISON: + + RegReadA(client, LC898122_STBB0, &UcStbb0); + UcStbb0 &= 0x80; + RegWriteA(client, LC898122_STBB0, UcStbb0); + RegWriteA(client, LC898122_PWMA, 0x00); + RegWriteA(client, LC898122_CVA, 0x00); + lc898122_driversw(lc898122_dev, OFF); + if (!(lc898122_dev->state.flags & LC898122_MONITOR_OFF)) + RegWriteA(client, LC898122_PWMMONA, 0x00); + lc898122_selectgyrosleep(lc898122_dev, ON); + RegReadA(client, LC898122_CLKON, &UcClkon); + UcClkon &= 0x1A; + RegWriteA(client, LC898122_CLKON, UcClkon); + break; + case LC898122_STB2_OISOFF: + RegReadA(client, LC898122_CLKON, &UcClkon); + UcClkon |= 0x05; + RegWriteA(client, LC898122_CLKON, UcClkon); + lc898122_selectgyrosleep(lc898122_dev, OFF); + + RegWriteA(client, LC898122_PWMMONA, 0x80); + lc898122_driversw(lc898122_dev, ON); + RegWriteA(client, LC898122_CVA, 0xC0); + RegWriteA(client, LC898122_PWMA, 0xC0); + RegReadA(client, LC898122_STBB0, &UcStbb0); + UcStbb0 |= 0x5F; + RegWriteA(client, LC898122_STBB0, UcStbb0); + + break; + + case LC898122_STB2_AFON: + if (!(lc898122_dev->state.flags & LC898122_AF_PWMMODE)) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + + RegReadA(client, LC898122_STBB0, &UcStbb0); + UcStbb0 &= 0x7F; + RegWriteA(client, LC898122_STBB0, UcStbb0); + RegWriteA(client, LC898122_STBB1, 0x00); + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_PWMAAF, 0x80); + else + RegWriteA(client, LC898122_PWMAAF, 0x00); + lc898122_afdriversw(lc898122_dev, OFF); + if (!(lc898122_dev->state.flags & LC898122_MONITOR_OFF)) + RegWriteA(client, LC898122_PWMMONA, 0x00); + RegReadA(client, LC898122_CLKON, &UcClkon); + UcClkon &= 0x07; + RegWriteA(client, LC898122_CLKON, UcClkon); + break; + case LC898122_STB2_AFOFF: + RegReadA(client, LC898122_CLKON, &UcClkon); + UcClkon |= 0x18; + RegWriteA(client, LC898122_CLKON, UcClkon); + lc898122_afdriversw(lc898122_dev, ON); + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_STBB1, 0x05); + RegReadA(client, LC898122_STBB0, &UcStbb0); + UcStbb0 |= 0x80; + RegWriteA(client, LC898122_STBB0, UcStbb0); + + break; + } +} + +void lc898122_SetZsp(struct lc898122_device *lc898122_dev, + u8 UcZoomStepDat) +{ + struct i2c_client *client = lc898122_dev->client; + u32 UlGyrZmx, UlGyrZmy, UlGyrZrx, UlGyrZry; + + /* Zoom Step */ + if (UcZoomStepDat > (LC898122_ZOOMTBL - 1)) + UcZoomStepDat = (LC898122_ZOOMTBL - 1); + + if (UcZoomStepDat == 0) { + UlGyrZmx = ClGyxZom[0]; + UlGyrZmy = ClGyyZom[0]; + } else { + UlGyrZmx = ClGyxZom[UcZoomStepDat]; + UlGyrZmy = ClGyyZom[UcZoomStepDat]; + } + + RamWrite32A(client, LC898122_gxlens, UlGyrZmx); + RamWrite32A(client, LC898122_gylens, UlGyrZmy); + + RamRead32A(client, LC898122_gxlens, &UlGyrZrx); + RamRead32A(client, LC898122_gylens, &UlGyrZry); + + if (UlGyrZmx != UlGyrZrx) + RamWrite32A(client, LC898122_gxlens, UlGyrZmx); + + if (UlGyrZmy != UlGyrZry) + RamWrite32A(client, LC898122_gylens, UlGyrZmy); +} + +void lc898122_StbOnn(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcRegValx, UcRegValy; + u8 UcRegIni; + u8 UcCnt; + + RegReadA(client, LC898122_WH_EQSWX, &UcRegValx); + RegReadA(client, LC898122_WH_EQSWY, &UcRegValy); + + if (((UcRegValx & 0x01) != 0x01) && ((UcRegValy & 0x01) != 0x01)) { + RegWriteA(client, LC898122_WH_SMTSRVON, 0x01); + + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, ON); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, ON); + + for (UcCnt = 0; UcCnt < 60; UcCnt++) { + RegReadA(client, LC898122_RH_SMTSRVSTT, &UcRegIni); + if ((UcRegIni & 0x77) == 0x66) + break; + msleep(10); + } + + RegWriteA(client, LC898122_WH_SMTSRVON, 0x00); + + if (UcCnt == 60) { + RamWriteA(client, LC898122_SXOFFZ2, 0); + RamWriteA(client, LC898122_SYOFFZ2, 0); + } + } else { + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, ON); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, ON); + } +} + +void lc898122_StbOnnN(struct lc898122_device *lc898122_dev, u8 UcStbY, + u8 UcStbX) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 UcRegIni; + u8 UcSttMsk = 0; + u8 UcCnt; + + RegWriteA(client, LC898122_WH_SMTSRVON, 0x01); + if (UcStbX == ON) + UcSttMsk |= 0x07; + if (UcStbY == ON) + UcSttMsk |= 0x70; + + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, UcStbX); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, UcStbY); + + for (UcCnt = 0; UcCnt < 60; UcCnt++) { + RegReadA(client, LC898122_RH_SMTSRVSTT, &UcRegIni); + if ((UcRegIni & UcSttMsk) == (0x66 & UcSttMsk)) + break; + msleep(10); + } + + RegWriteA(client, LC898122_WH_SMTSRVON, 0x00); + + if (UcCnt == 60) { + if (UcStbX == ON) + RamWriteA(client, LC898122_SXOFFZ2, 0); + if (UcStbY == ON) + RamWriteA(client, LC898122_SYOFFZ2, 0); + } +} + +void lc898122_OptCen(struct lc898122_device *lc898122_dev, u8 UcOptmode, + u16 UsOptXval, u16 UsOptYval) +{ + struct i2c_client *client = lc898122_dev->client; + + lc898122_RamAccFixMod(lc898122_dev, ON); + + switch (UcOptmode) { + case LC898122_VAL_SET: + RamWriteA(client, LC898122_SXOFFZ1, UsOptXval); + RamWriteA(client, LC898122_SYOFFZ1, UsOptYval); + break; + case LC898122_VAL_FIX: + lc898122_dev->state.UsCntXof = UsOptXval; + lc898122_dev->state.UsCntYof = UsOptYval; + RamWriteA(client, LC898122_SXOFFZ1, + lc898122_dev->state.UsCntXof); + RamWriteA(client, LC898122_SYOFFZ1, + lc898122_dev->state.UsCntYof); + break; + case LC898122_VAL_SPC: + RamReadA(client, LC898122_SXOFFZ1, &UsOptXval); + RamReadA(client, LC898122_SYOFFZ1, &UsOptYval); + lc898122_dev->state.UsCntXof = UsOptXval; + lc898122_dev->state.UsCntYof = UsOptYval; + break; + } + + lc898122_RamAccFixMod(lc898122_dev, OFF); + +} + +void lc898122_SetSineWave(struct lc898122_device *lc898122_dev, u8 UcJikuSel, + u8 UcMeasMode) +{ + struct i2c_client *client = lc898122_dev->client; + + u16 UsFRQ_extclk[] = { 0x30EE/*139.9Hz*/, 0x037E/*10Hz*/ }; + u16 UsFRQ_osc[] = { 0x1877/*139.9Hz*/, 0x01BF/*10Hz*/ }; + u32 UlAMP[2][2] = {{ 0x3CA3D70A, 0x3CA3D70A }, + { 0x3F800000, 0x3F800000 } }; + u8 UcEqSwX, UcEqSwY; + u16 *UsFRQ; + + if (lc898122_dev->state.flags & LC898122_EXTCLK_ALL) + UsFRQ = UsFRQ_extclk; + else + UsFRQ = UsFRQ_osc; + + UcMeasMode &= 0x01; + UcJikuSel &= 0x01; + + RegWriteA(client, LC898122_WC_SINPHSX, 0x00); + RegWriteA(client, LC898122_WC_SINPHSY, 0x00); + + RegWriteA(client, LC898122_WC_MESSINMODE, 0x00); + RegWriteA(client, LC898122_WC_MESWAIT, 0x00); + + RamWrite32A(client, LC898122_sxsin, UlAMP[UcMeasMode][LC898122_X_DIR]); + RamWrite32A(client, LC898122_sysin, UlAMP[UcMeasMode][LC898122_Y_DIR]); + + RegWriteA(client, LC898122_WC_SINFRQ0, (u8)UsFRQ[UcMeasMode]); + RegWriteA(client, LC898122_WC_SINFRQ1, (u8)(UsFRQ[UcMeasMode] >> 8)); + + RegReadA(client, LC898122_WH_EQSWX, &UcEqSwX); + RegReadA(client, LC898122_WH_EQSWY, &UcEqSwY); + if (!UcMeasMode && !UcJikuSel) { + UcEqSwX |= 0x10; + UcEqSwY &= ~LC898122_EQSINSW; + } else if (!UcMeasMode && UcJikuSel) { + UcEqSwX &= ~LC898122_EQSINSW; + UcEqSwY |= 0x10; + } else if (UcMeasMode && !UcJikuSel) { + UcEqSwX = 0x22; + UcEqSwY = 0x03; + } else { + UcEqSwX = 0x03; + UcEqSwY = 0x22; + } + + RegWriteA(client, LC898122_WH_EQSWX, UcEqSwX); + RegWriteA(client, LC898122_WH_EQSWY, UcEqSwY); +} + +void lc898122_StartSineWave(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + RegWriteA(client, LC898122_WC_SINON, 0x01); +} + +void lc898122_StopSineWave(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 UcEqSwX, UcEqSwY; + + RegWriteA(client, LC898122_WC_SINON, 0x00); + RegReadA(client, LC898122_WH_EQSWX, &UcEqSwX); + RegReadA(client, LC898122_WH_EQSWY, &UcEqSwY); + UcEqSwX &= ~LC898122_EQSINSW; + UcEqSwY &= ~LC898122_EQSINSW; + RegWriteA(client, LC898122_WH_EQSWX, UcEqSwX); + RegWriteA(client, LC898122_WH_EQSWY, UcEqSwY); + +} + +void lc898122_SetMeasFil(struct lc898122_device *lc898122_dev, u8 UcFilSel) +{ + lc898122_MesFil(lc898122_dev, UcFilSel); +} + +void lc898122_ClrMeasFil(struct lc898122_device *lc898122_dev) +{ + lc898122_cleargyro(lc898122_dev, 0x1000, LC898122_CLR_FRAM1); +} + +void lc898122_SetPanTiltMode(struct lc898122_device *lc898122_dev, u8 UcPnTmod) +{ + struct i2c_client *client = lc898122_dev->client; + + switch (UcPnTmod) { + case OFF: + RegWriteA(client, LC898122_WG_PANON, 0x00); + break; + case ON: +#ifdef NEW_PTST + RegWriteA(client, LC898122_WG_PANON, 0x10); +#else + RegWriteA(client, LC898122_WG_PANON, 0x01); +#endif + break; + } + +} + +u8 lc898122_TriSts(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 UcRsltSts = 0; + u8 UcVal; + + RegReadA(client, LC898122_WG_ADJGANGXATO, &UcVal); + if (UcVal & 0x03) { + RegReadA(client, LC898122_RG_LEVJUGE, &UcVal); + UcRsltSts = UcVal & 0x11; + UcRsltSts |= 0x80; + } + + return UcRsltSts; +} + +u8 lc898122_DrvPwmSw(struct lc898122_device *lc898122_dev, u8 UcSelPwmMod) +{ + struct i2c_client *client = lc898122_dev->client; + + switch (UcSelPwmMod) { + case LC898122_Mlnp: + RegWriteA(client, LC898122_DRVFC, 0xF0); + lc898122_dev->state.UcPwmMod = LC898122_PWMMOD_CVL; + break; + + case LC898122_Mpwm: + if (lc898122_dev->state.flags & LC898122_PWM_BREAK) + RegWriteA(client, LC898122_DRVFC, 0x00); + else + RegWriteA(client, LC898122_DRVFC, 0xC0); + lc898122_dev->state.UcPwmMod = LC898122_PWMMOD_PWM; + break; + } + + return UcSelPwmMod << 4; +} + +void lc898122_SetGcf(struct lc898122_device *lc898122_dev, u8 UcSetNum) +{ + struct i2c_client *client = lc898122_dev->client; + + if (UcSetNum > (LC898122_COEFTBL - 1)) + UcSetNum = (LC898122_COEFTBL - 1); + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) + lc898122_dev->state.UlH1Coefval = ClDiCof_20M[UcSetNum]; + else + lc898122_dev->state.UlH1Coefval = ClDiCof_13M[UcSetNum]; + + RamWrite32A(client, LC898122_gxh1c, lc898122_dev->state.UlH1Coefval); + RamWrite32A(client, LC898122_gyh1c, lc898122_dev->state.UlH1Coefval); + + if (lc898122_dev->state.flags & LC898122_H1COEF_CHANGER) + lc898122_SetH1cMod(lc898122_dev, UcSetNum); +} + +void lc898122_SetH1cMod(struct lc898122_device *lc898122_dev, + u8 UcSetNum) +{ + struct i2c_client *client = lc898122_dev->client; + + u32 MAXLMT; + u32 MINLMT; + u32 CHGCOEF; + u32 MINLMT_MOV; + u32 CHGCOEF_MOV; + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) { + MAXLMT = LC898122_MAXLMT_20M; + MINLMT = LC898122_MINLMT_20M; + CHGCOEF = LC898122_CHGCOEF_20M; + MINLMT_MOV = LC898122_MINLMT_MOV_20M; + CHGCOEF_MOV = LC898122_CHGCOEF_MOV_20M; + } else{ + MAXLMT = LC898122_MAXLMT_13M; + MINLMT = LC898122_MINLMT_13M; + CHGCOEF = LC898122_CHGCOEF_13M; + MINLMT_MOV = LC898122_MINLMT_MOV_13M; + CHGCOEF_MOV = LC898122_CHGCOEF_MOV_13M; + } + + switch (UcSetNum) { + case (LC898122_ACTMODE): + lc898122_IniPtMovMod(lc898122_dev, OFF); + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) + lc898122_dev->state.UlH1Coefval = ClDiCof_20M[0]; + else + lc898122_dev->state.UlH1Coefval = ClDiCof_13M[0]; + + lc898122_dev->state.UcH1LvlMod = 0; + + RamWrite32A(client, LC898122_gxlmt6L, MINLMT); + RamWrite32A(client, LC898122_gxlmt6H, MAXLMT); + + RamWrite32A(client, LC898122_gylmt6L, MINLMT); + RamWrite32A(client, LC898122_gylmt6H, MAXLMT); + + RamWrite32A(client, LC898122_gxhc_tmp, + lc898122_dev->state.UlH1Coefval); + RamWrite32A(client, LC898122_gxmg, CHGCOEF); + + RamWrite32A(client, LC898122_gyhc_tmp, + lc898122_dev->state.UlH1Coefval); + RamWrite32A(client, LC898122_gymg, CHGCOEF); + + RegWriteA(client, LC898122_WG_HCHR, 0x12); + break; + + case(LC898122_S2MODE): + RegWriteA(client, LC898122_WG_HCHR, 0x10); + break; + + case(LC898122_MOVMODE): + lc898122_IniPtMovMod(lc898122_dev, ON); + + RamWrite32A(client, LC898122_gxlmt6L, MINLMT_MOV); + RamWrite32A(client, LC898122_gylmt6L, MINLMT_MOV); + + RamWrite32A(client, LC898122_gxmg, CHGCOEF_MOV); + RamWrite32A(client, LC898122_gymg, CHGCOEF_MOV); + + RamWrite32A(client, LC898122_gxhc_tmp, + lc898122_dev->state.UlH1Coefval); + RamWrite32A(client, LC898122_gyhc_tmp, + lc898122_dev->state.UlH1Coefval); + + RegWriteA(client, LC898122_WG_HCHR, 0x12); + break; + + default: + lc898122_IniPtMovMod(lc898122_dev, OFF); + + lc898122_dev->state.UcH1LvlMod = UcSetNum; + + RamWrite32A(client, LC898122_gxlmt6L, MINLMT); + RamWrite32A(client, LC898122_gylmt6L, MINLMT); + + RamWrite32A(client, LC898122_gxmg, CHGCOEF); + RamWrite32A(client, LC898122_gymg, CHGCOEF); + + RamWrite32A(client, LC898122_gxhc_tmp, + lc898122_dev->state.UlH1Coefval); + RamWrite32A(client, LC898122_gyhc_tmp, + lc898122_dev->state.UlH1Coefval); + + RegWriteA(client, LC898122_WG_HCHR, 0x12); + break; + } +} + +u16 lc898122_RdFwVr(void) +{ + return LC898122_FW_VER; +} + +u8 lc898122_GetDOFSTDAF(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 ucDRVFC3AF, ucDRVFC4AF, ucRegDat; + + if (lc898122_dev->state.UcCvrCod == LC898122_CVER122) { + RegReadA(client, LC898122_DRVFC3AF, &ucDRVFC3AF); + RegReadA(client, LC898122_DRVFC4AF, &ucDRVFC4AF); + ucRegDat = ((ucDRVFC4AF & 0xC0) >> 3) | (ucDRVFC3AF & 0x07); + } else { + RegReadA(client, LC898122_DRVFC4AF, &ucDRVFC4AF); + ucRegDat = ucDRVFC4AF >> 3; + } + + return ucRegDat; +} + +void lc898122_SetDOFSTDAF(struct lc898122_device *lc898122_dev, u8 ucSetDat) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 ucRegDat; + + if (lc898122_dev->state.UcCvrCod == LC898122_CVER122) { + RegReadA(client, LC898122_DRVFC3AF, &ucRegDat); + RegWriteA(client, LC898122_DRVFC4AF, (ucSetDat & 0x18) << 3); + RegWriteA(client, LC898122_DRVFC3AF, + (ucRegDat & 0x70) | (ucSetDat & 0x07)); + } else { + RegWriteA(client, LC898122_DRVFC4AF, ucSetDat << 3); + } +} + +u8 lc898122_TstActMov(struct lc898122_device *lc898122_dev, u8 UcDirSel) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 UcRsltSts; + u16 UsMsppVal; + + lc898122_MesFil(lc898122_dev, LC898122_NOISE); + + if (!UcDirSel) { + RamWrite32A(client, LC898122_sxsin, LC898122_ACT_CHK_LVL); + RamWrite32A(client, LC898122_sysin, 0x000000); + lc898122_SetSinWavePara(lc898122_dev, 0x05, LC898122_XACTTEST); + } else { + RamWrite32A(client, LC898122_sxsin, 0x000000); + RamWrite32A(client, LC898122_sysin, LC898122_ACT_CHK_LVL); + lc898122_SetSinWavePara(lc898122_dev, 0x05, LC898122_YACTTEST); + } + + if (!UcDirSel) { + RegWriteA(client, LC898122_WC_MES1ADD0, (u8)LC898122_SXINZ1); + RegWriteA(client, LC898122_WC_MES1ADD1, + (u8)((LC898122_SXINZ1 >> 8) & 0x0001)); + } else { + RegWriteA(client, LC898122_WC_MES1ADD0, + (u8)LC898122_SYINZ1); + RegWriteA(client, LC898122_WC_MES1ADD1, + (u8)((LC898122_SYINZ1 >> 8) & 0x0001)); + } + + RegWriteA(client, LC898122_WC_MESSINMODE, 0x00); + RegWriteA(client, LC898122_WC_MESLOOP1, 0x00); + RegWriteA(client, LC898122_WC_MESLOOP0, 0x02); + RamWrite32A(client, LC898122_msmean, 0x3F000000); + RegWriteA(client, LC898122_WC_MESABS, 0x00); + lc898122_BsyWit(lc898122_dev, LC898122_WC_MESMODE, 0x02); + + lc898122_RamAccFixMod(lc898122_dev, ON); + RamReadA(client, LC898122_MSPP1AV, &UsMsppVal); + lc898122_RamAccFixMod(lc898122_dev, OFF); + + if (!UcDirSel) + lc898122_SetSinWavePara(lc898122_dev, 0x00, LC898122_XACTTEST); + else + lc898122_SetSinWavePara(lc898122_dev, 0x00, LC898122_YACTTEST); + + UcRsltSts = LC898122_EXE_END; + if (UsMsppVal > LC898122_ACT_THR) { + if (!UcDirSel) + UcRsltSts = LC898122_EXE_HXMVER; + else + UcRsltSts = LC898122_EXE_HYMVER; + } + + return UcRsltSts; +} + +u8 lc898122_RunHea(struct lc898122_device *lc898122_dev) +{ + u8 UcRst; + + UcRst = LC898122_EXE_END; + UcRst |= lc898122_TstActMov(lc898122_dev, LC898122_X_DIR); + UcRst |= lc898122_TstActMov(lc898122_dev, LC898122_Y_DIR); + + return UcRst; +} + +u8 lc898122_RunGea(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + u8 UcRst, UcCnt, UcXLowCnt, UcYLowCnt, UcXHigCnt, UcYHigCnt; + u16 UsGxoVal[10], UsGyoVal[10], UsDif; + + UcRst = LC898122_EXE_END; + UcXLowCnt = UcYLowCnt = UcXHigCnt = UcYHigCnt = 0; + + lc898122_MesFil(lc898122_dev, LC898122_THROUGH); + + for (UcCnt = 0; UcCnt < 10; UcCnt++) { + RegWriteA(client, LC898122_WC_MES1ADD0, 0x00); + RegWriteA(client, LC898122_WC_MES1ADD1, 0x00); + lc898122_cleargyro(lc898122_dev, 0x1000, LC898122_CLR_FRAM1); + UsGxoVal[UcCnt] = + (u16)lc898122_GenMes(lc898122_dev, + LC898122_AD2Z, 0); + + RegWriteA(client, LC898122_WC_MES1ADD0, 0x00); + RegWriteA(client, LC898122_WC_MES1ADD1, 0x00); + lc898122_cleargyro(lc898122_dev, 0x1000, LC898122_CLR_FRAM1); + UsGyoVal[UcCnt] = + (u16)lc898122_GenMes(lc898122_dev, + LC898122_AD3Z, 0); + + if (UcCnt > 0) { + if ((short)UsGxoVal[0] > (short)UsGxoVal[UcCnt]) + UsDif = (u16)((short)UsGxoVal[0] - + (short)UsGxoVal[UcCnt]); + else + UsDif = (u16)((short)UsGxoVal[UcCnt] - + (short)UsGxoVal[0]); + + if (UsDif > LC898122_GEA_DIF_HIG) + UcXHigCnt++; + + if (UsDif < LC898122_GEA_DIF_LOW) + UcXLowCnt++; + + if ((short)UsGyoVal[0] > (short)UsGyoVal[UcCnt]) + UsDif = (u16)((short)UsGyoVal[0] - + (short)UsGyoVal[UcCnt]); + else + UsDif = (u16)((short)UsGyoVal[UcCnt] - + (short)UsGyoVal[0]); + + if (UsDif > LC898122_GEA_DIF_HIG) + UcYHigCnt++; + + if (UsDif < LC898122_GEA_DIF_LOW) + UcYLowCnt++; + } + } + + if (UcXHigCnt >= 1) + UcRst = UcRst | LC898122_EXE_GXABOVE; + + if (UcXLowCnt > 8) + UcRst = UcRst | LC898122_EXE_GXBELOW; + + if (UcYHigCnt >= 1) + UcRst = UcRst | LC898122_EXE_GYABOVE; + + if (UcYLowCnt > 8) + UcRst = UcRst | LC898122_EXE_GYBELOW; + + return UcRst; +} + diff --git a/drivers/media/i2c/lc898122/lc898122-oisfil.h b/drivers/media/i2c/lc898122/lc898122-oisfil.h new file mode 100644 index 000000000000..5e73e5071735 --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122-oisfil.h @@ -0,0 +1,1186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * Copyright (C) ON Semiconductor + * + */ + +/*Filter Calculator Version 4.02*/ +/*the time and date : 2015/3/25 11:58:50*/ +/*FC filename : LC898122_FIL_20M_V0016*/ +/*fs,23438Hz*/ +/*LSI No.,LC898122*/ +/*Comment,*/ + +/* 8bit */ +const struct STFILREG CsFilReg_20M[] = { + { 0x0111, 0x00}, /*00,0111*/ + { 0x0113, 0x00}, /*00,0113*/ + { 0x0114, 0x00}, /*00,0114*/ + { 0x0172, 0x00}, /*00,0172*/ + { 0x01E3, 0x00}, /*00,01E3*/ + { 0x01E4, 0x00}, /*00,01E4*/ + { 0xFFFF, 0xFF } + }; + +/* 32bit */ +const struct STFILRAM CsFilRam_20M[] = { + { 0x1000, 0x3F800000}, /*1000,0dB,invert=0*/ + { 0x1001, 0x3F800000}, /*1001,0dB,invert=0*/ + { 0x1002, 0x00000000}, /*1002,Cutoff,invert=0*/ + { 0x1003, 0x3F800000}, /*1003,0dB,invert=0*/ + { 0x1004, 0x38A8A540}, /*1004,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1005, 0x38A8A540}, /*1005,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1006, 0x3F7FF580}, /*1006,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1007, 0x3F800000}, /*1007,0dB,invert=0*/ + { 0x1008, 0xBF800000}, /*1008,0dB,invert=1*/ + { 0x1009, 0x00000000}, /*1009,Cutoff,invert=0*/ + { 0x100A, 0x3F800000}, /*100A,0dB,invert=0*/ + { 0x100B, 0x3F800000}, /*100B,0dB,invert=0*/ + { 0x100C, 0x3F800000}, /*100C,0dB,invert=0*/ + { 0x100E, 0x3F800000}, /*100E,0dB,invert=0*/ + { 0x1010, 0x3DA2AD80}, /*1010*/ + { 0x1011, 0x00000000}, /*1011,Free,fs/1,invert=0*/ + { 0x1012, 0x3F7FFD00}, /*1012,Free,fs/1,invert=0*/ + { 0x1013, 0x3FC83380}, /*1013,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1014, 0xBFC58900}, /*1014,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1015, 0x3F75E8C0}, /*1015,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1016, 0x3F06BD80}, /*1016,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1017, 0xBF06BA00}, /*1017,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1018, 0x3F7FFC80}, /*1018,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1019, 0x3F800000}, /*1019,Through,0dB,fs/1,invert=0*/ + { 0x101A, 0x00000000}, /*101A,Through,0dB,fs/1,invert=0*/ + { 0x101B, 0x00000000}, /*101B,Through,0dB,fs/1,invert=0*/ + { 0x101C, 0x3F800000}, /*101C,0dB,invert=0*/ + { 0x101D, 0x00000000}, /*101D,Cutoff,invert=0*/ + { 0x101E, 0x00000000}, /*101E,Cutoff,invert=0*/ + { 0x1020, 0x3F800000}, /*1020,0dB,invert=0*/ + { 0x1021, 0x3F800000}, /*1021,0dB,invert=0*/ + { 0x1022, 0x3F800000}, /*1022,0dB,invert=0*/ + { 0x1023, 0x3F800000}, /*1023,Through,0dB,fs/1,invert=0*/ + { 0x1024, 0x00000000}, /*1024,Through,0dB,fs/1,invert=0*/ + { 0x1025, 0x00000000}, /*1025,Through,0dB,fs/1,invert=0*/ + { 0x1026, 0x00000000}, /*1026,Through,0dB,fs/1,invert=0*/ + { 0x1027, 0x00000000}, /*1027,Through,0dB,fs/1,invert=0*/ + { 0x1030, 0x3F800000}, /*1030,Through,0dB,fs/1,invert=0*/ + { 0x1031, 0x00000000}, /*1031,Through,0dB,fs/1,invert=0*/ + { 0x1032, 0x00000000}, /*1032,Through,0dB,fs/1,invert=0*/ + { 0x1033, 0x3F800000}, /*1033,Through,0dB,fs/1,invert=0*/ + { 0x1034, 0x00000000}, /*1034,Through,0dB,fs/1,invert=0*/ + { 0x1035, 0x00000000}, /*1035,Through,0dB,fs/1,invert=0*/ + { 0x1036, 0x3F800000}, /*1036,Through,0dB,fs/1,invert=0*/ + { 0x1037, 0x00000000}, /*1037,Through,0dB,fs/1,invert=0*/ + { 0x1038, 0x00000000}, /*1038,Through,0dB,fs/1,invert=0*/ + { 0x1039, 0x3F800000}, /*1039,Through,0dB,fs/1,invert=0*/ + { 0x103A, 0x00000000}, /*103A,Through,0dB,fs/1,invert=0*/ + { 0x103B, 0x00000000}, /*103B,Through,0dB,fs/1,invert=0*/ + { 0x103C, 0x3F800000}, /*103C,Through,0dB,fs/1,invert=0*/ + { 0x103D, 0x00000000}, /*103D,Through,0dB,fs/1,invert=0*/ + { 0x103E, 0x00000000}, /*103E,Through,0dB,fs/1,invert=0*/ + { 0x1043, 0x39D2BD40}, /*1043,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1044, 0x39D2BD40}, /*1044,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1045, 0x3F7FCB40}, /*1045,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1046, 0x38A8A540}, /*1046,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1047, 0x38A8A540}, /*1047,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1048, 0x3F7FF580}, /*1048,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1049, 0x390C87C0}, /*1049,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104A, 0x390C87C0}, /*104A,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104B, 0x3F7FEE80}, /*104B,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104C, 0x398C8300}, /*104C,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104D, 0x398C8300}, /*104D,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104E, 0x3F7FDCC0}, /*104E,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x1053, 0x3F800000}, /*1053,Through,0dB,fs/1,invert=0*/ + { 0x1054, 0x00000000}, /*1054,Through,0dB,fs/1,invert=0*/ + { 0x1055, 0x00000000}, /*1055,Through,0dB,fs/1,invert=0*/ + { 0x1056, 0x3F800000}, /*1056,Through,0dB,fs/1,invert=0*/ + { 0x1057, 0x00000000}, /*1057,Through,0dB,fs/1,invert=0*/ + { 0x1058, 0x00000000}, /*1058,Through,0dB,fs/1,invert=0*/ + { 0x1059, 0x3F800000}, /*1059,Through,0dB,fs/1,invert=0*/ + { 0x105A, 0x00000000}, /*105A,Through,0dB,fs/1,invert=0*/ + { 0x105B, 0x00000000}, /*105B,Through,0dB,fs/1,invert=0*/ + { 0x105C, 0x3F800000}, /*105C,Through,0dB,fs/1,invert=0*/ + { 0x105D, 0x00000000}, /*105D,Through,0dB,fs/1,invert=0*/ + { 0x105E, 0x00000000}, /*105E,Through,0dB,fs/1,invert=0*/ + { 0x1063, 0x3F800000}, /*1063,0dB,invert=0*/ + { 0x1066, 0x3F800000}, /*1066,0dB,invert=0*/ + { 0x1069, 0x3F800000}, /*1069,0dB,invert=0*/ + { 0x106C, 0x3F800000}, /*106C,0dB,invert=0*/ + { 0x1073, 0x00000000}, /*1073,Cutoff,invert=0*/ + { 0x1076, 0x3F800000}, /*1076,0dB,invert=0*/ + { 0x1079, 0x3F800000}, /*1079,0dB,invert=0*/ + { 0x107C, 0x3F800000}, /*107C,0dB,invert=0*/ + { 0x1083, 0x38D1B700}, /*1083,-80dB,invert=0*/ + { 0x1086, 0x00000000}, /*1086,Cutoff,invert=0*/ + { 0x1089, 0x00000000}, /*1089,Cutoff,invert=0*/ + { 0x108C, 0x00000000}, /*108C,Cutoff,invert=0*/ + { 0x1093, 0x00000000}, /*1093,Cutoff,invert=0*/ + { 0x1098, 0x3F800000}, /*1098,0dB,invert=0*/ + { 0x1099, 0x3F800000}, /*1099,0dB,invert=0*/ + { 0x109A, 0x3F800000}, /*109A,0dB,invert=0*/ + { 0x10A1, 0x3C58B440}, /*10A1,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A2, 0x3C58B440}, /*10A2,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A3, 0x3F793A40}, /*10A3,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A4, 0x3C58B440}, /*10A4,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A5, 0x3C58B440}, /*10A5,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A6, 0x3F793A40}, /*10A6,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A7, 0x3F800000}, /*10A7,Through,0dB,fs/1,invert=0*/ + { 0x10A8, 0x00000000}, /*10A8,Through,0dB,fs/1,invert=0*/ + { 0x10A9, 0x00000000}, /*10A9,Through,0dB,fs/1,invert=0*/ + { 0x10AA, 0x00000000}, /*10AA,Cutoff,invert=0*/ + { 0x10AB, 0x3BDA2580}, /*10AB,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AC, 0x3BDA2580}, /*10AC,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AD, 0x3F7C9780}, /*10AD,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10B0, 0x3E0DE280}, /*10B0,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x10B1, 0x3E0DE280}, /*10B1,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x10B2, 0x3F390EC0}, /*10B2,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x10B3, 0x3F800000}, /*10B3,0dB,invert=0*/ + { 0x10B4, 0x00000000}, /*10B4,Cutoff,invert=0*/ + { 0x10B5, 0x00000000}, /*10B5,Cutoff,invert=0*/ + { 0x10B6, 0x3F353C00}, /*10B6,-3dB,invert=0*/ + { 0x10B8, 0x3F800000}, /*10B8,0dB,invert=0*/ + { 0x10B9, 0x00000000}, /*10B9,Cutoff,invert=0*/ + { 0x10C0, 0x3FE304C0}, /*10C0,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x10C1, 0xBFDF6540}, /*10C1,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x10C2, 0x3F437BC0}, /*10C2,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x10C3, 0x3F7F7D40}, /*10C3,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x10C4, 0xBF7C6D00}, /*10C4,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x10C5, 0x3F7BEA40}, /*10C5,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x10C6, 0x3D506F00}, /*10C6,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C7, 0x3D506F00}, /*10C7,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C8, 0x3F65F240}, /*10C8,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C9, 0x3BAED500}, /*10C9,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x10CA, 0x3BAED500}, /*10CA,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x10CB, 0x3F7FEE80}, /*10CB,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x10CC, 0x3E0FC5C0}, /*10CC,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x10CD, 0xBE0ED000}, /*10CD,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x10CE, 0x3F7FC280}, /*10CE,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x10D0, 0x3FFF64C0}, /*10D0,6dB,invert=0*/ + { 0x10D1, 0x00000000}, /*10D1,Cutoff,invert=0*/ + { 0x10D2, 0x3F800000}, /*10D2,0dB,invert=0*/ + { 0x10D3, 0x3F800000}, /*10D3,0dB,invert=0*/ + { 0x10D4, 0x3F800000}, /*10D4,0dB,invert=0*/ + { 0x10D5, 0x3F800000}, /*10D5,0dB,invert=0*/ + { 0x10D7, 0x3F8EF7C0}, /*10D7,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x10D8, 0x3F8EF7C0}, /*10D8,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x10D9, 0x3F5FD780}, /*10D9,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x10DA, 0x3F76BA40}, /*10DA,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DB, 0xBFE16F80}, /*10DB,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DC, 0x3FE16F80}, /*10DC,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DD, 0x3F641800}, /*10DD,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DE, 0xBF5AD200}, /*10DE,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10E0, 0x3F7C5880}, /*10E0,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E1, 0xBFEFD200}, /*10E1,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E2, 0x3FEFD200}, /*10E2,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E3, 0x3F6AA180}, /*10E3,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E4, 0xBF66FA40}, /*10E4,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E5, 0x3F800000}, /*10E5,0dB,invert=0*/ + { 0x10E8, 0x3F800000}, /*10E8,0dB,invert=0*/ + { 0x10E9, 0x00000000}, /*10E9,Cutoff,invert=0*/ + { 0x10EA, 0x00000000}, /*10EA,Cutoff,invert=0*/ + { 0x10EB, 0x00000000}, /*10EB,Cutoff,invert=0*/ + { 0x10F0, 0x3F800000}, /*10F0,Through,0dB,fs/1,invert=0*/ + { 0x10F1, 0x00000000}, /*10F1,Through,0dB,fs/1,invert=0*/ + { 0x10F2, 0x00000000}, /*10F2,Through,0dB,fs/1,invert=0*/ + { 0x10F3, 0x00000000}, /*10F3,Through,0dB,fs/1,invert=0*/ + { 0x10F4, 0x00000000}, /*10F4,Through,0dB,fs/1,invert=0*/ + { 0x10F5, 0x3F800000}, /*10F5,Through,0dB,fs/1,invert=0*/ + { 0x10F6, 0x00000000}, /*10F6,Through,0dB,fs/1,invert=0*/ + { 0x10F7, 0x00000000}, /*10F7,Through,0dB,fs/1,invert=0*/ + { 0x10F8, 0x00000000}, /*10F8,Through,0dB,fs/1,invert=0*/ + { 0x10F9, 0x00000000}, /*10F9,Through,0dB,fs/1,invert=0*/ + { 0x1100, 0x3F800000}, /*1100,0dB,invert=0*/ + { 0x1101, 0x3F800000}, /*1101,0dB,invert=0*/ + { 0x1102, 0x00000000}, /*1102,Cutoff,invert=0*/ + { 0x1103, 0x3F800000}, /*1103,0dB,invert=0*/ + { 0x1104, 0x38A8A540}, /*1104,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1105, 0x38A8A540}, /*1105,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1106, 0x3F7FF580}, /*1106,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1107, 0x3F800000}, /*1107,0dB,invert=0*/ + { 0x1108, 0xBF800000}, /*1108,0dB,invert=1*/ + { 0x1109, 0x00000000}, /*1109,Cutoff,invert=0*/ + { 0x110A, 0x3F800000}, /*110A,0dB,invert=0*/ + { 0x110B, 0x3F800000}, /*110B,0dB,invert=0*/ + { 0x110C, 0x3F800000}, /*110C,0dB,invert=0*/ + { 0x110E, 0x3F800000}, /*110E,0dB,invert=0*/ + { 0x1110, 0x3DA2AD80}, /*1110*/ + { 0x1111, 0x00000000}, /*1111,Free,fs/1,invert=0*/ + { 0x1112, 0x3F7FFD00}, /*1112,Free,fs/1,invert=0*/ + { 0x1113, 0x3FC83380}, /*1113,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1114, 0xBFC58900}, /*1114,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1115, 0x3F75E8C0}, /*1115,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1116, 0x3F06BD80}, /*1116,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1117, 0xBF06BA00}, /*1117,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1118, 0x3F7FFC80}, /*1118,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1119, 0x3F800000}, /*1119,Through,0dB,fs/1,invert=0*/ + { 0x111A, 0x00000000}, /*111A,Through,0dB,fs/1,invert=0*/ + { 0x111B, 0x00000000}, /*111B,Through,0dB,fs/1,invert=0*/ + { 0x111C, 0x3F800000}, /*111C,0dB,invert=0*/ + { 0x111D, 0x00000000}, /*111D,Cutoff,invert=0*/ + { 0x111E, 0x00000000}, /*111E,Cutoff,invert=0*/ + { 0x1120, 0x3F800000}, /*1120,0dB,invert=0*/ + { 0x1121, 0x3F800000}, /*1121,0dB,invert=0*/ + { 0x1122, 0x3F800000}, /*1122,0dB,invert=0*/ + { 0x1123, 0x3F800000}, /*1123,Through,0dB,fs/1,invert=0*/ + { 0x1124, 0x00000000}, /*1124,Through,0dB,fs/1,invert=0*/ + { 0x1125, 0x00000000}, /*1125,Through,0dB,fs/1,invert=0*/ + { 0x1126, 0x00000000}, /*1126,Through,0dB,fs/1,invert=0*/ + { 0x1127, 0x00000000}, /*1127,Through,0dB,fs/1,invert=0*/ + { 0x1130, 0x3F800000}, /*1130,Through,0dB,fs/1,invert=0*/ + { 0x1131, 0x00000000}, /*1131,Through,0dB,fs/1,invert=0*/ + { 0x1132, 0x00000000}, /*1132,Through,0dB,fs/1,invert=0*/ + { 0x1133, 0x3F800000}, /*1133,Through,0dB,fs/1,invert=0*/ + { 0x1134, 0x00000000}, /*1134,Through,0dB,fs/1,invert=0*/ + { 0x1135, 0x00000000}, /*1135,Through,0dB,fs/1,invert=0*/ + { 0x1136, 0x3F800000}, /*1136,Through,0dB,fs/1,invert=0*/ + { 0x1137, 0x00000000}, /*1137,Through,0dB,fs/1,invert=0*/ + { 0x1138, 0x00000000}, /*1138,Through,0dB,fs/1,invert=0*/ + { 0x1139, 0x3F800000}, /*1139,Through,0dB,fs/1,invert=0*/ + { 0x113A, 0x00000000}, /*113A,Through,0dB,fs/1,invert=0*/ + { 0x113B, 0x00000000}, /*113B,Through,0dB,fs/1,invert=0*/ + { 0x113C, 0x3F800000}, /*113C,Through,0dB,fs/1,invert=0*/ + { 0x113D, 0x00000000}, /*113D,Through,0dB,fs/1,invert=0*/ + { 0x113E, 0x00000000}, /*113E,Through,0dB,fs/1,invert=0*/ + { 0x1143, 0x39D2BD40}, /*1143,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1144, 0x39D2BD40}, /*1144,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1145, 0x3F7FCB40}, /*1145,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1146, 0x38A8A540}, /*1146,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1147, 0x38A8A540}, /*1147,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1148, 0x3F7FF580}, /*1148,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1149, 0x390C87C0}, /*1149,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x114A, 0x390C87C0}, /*114A,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x114B, 0x3F7FEE80}, /*114B,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x114C, 0x398C8300}, /*114C,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x114D, 0x398C8300}, /*114D,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x114E, 0x3F7FDCC0}, /*114E,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x1153, 0x3F800000}, /*1153,Through,0dB,fs/1,invert=0*/ + { 0x1154, 0x00000000}, /*1154,Through,0dB,fs/1,invert=0*/ + { 0x1155, 0x00000000}, /*1155,Through,0dB,fs/1,invert=0*/ + { 0x1156, 0x3F800000}, /*1156,Through,0dB,fs/1,invert=0*/ + { 0x1157, 0x00000000}, /*1157,Through,0dB,fs/1,invert=0*/ + { 0x1158, 0x00000000}, /*1158,Through,0dB,fs/1,invert=0*/ + { 0x1159, 0x3F800000}, /*1159,Through,0dB,fs/1,invert=0*/ + { 0x115A, 0x00000000}, /*115A,Through,0dB,fs/1,invert=0*/ + { 0x115B, 0x00000000}, /*115B,Through,0dB,fs/1,invert=0*/ + { 0x115C, 0x3F800000}, /*115C,Through,0dB,fs/1,invert=0*/ + { 0x115D, 0x00000000}, /*115D,Through,0dB,fs/1,invert=0*/ + { 0x115E, 0x00000000}, /*115E,Through,0dB,fs/1,invert=0*/ + { 0x1163, 0x3F800000}, /*1163,0dB,invert=0*/ + { 0x1166, 0x3F800000}, /*1166,0dB,invert=0*/ + { 0x1169, 0x3F800000}, /*1169,0dB,invert=0*/ + { 0x116C, 0x3F800000}, /*116C,0dB,invert=0*/ + { 0x1173, 0x00000000}, /*1173,Cutoff,invert=0*/ + { 0x1176, 0x3F800000}, /*1176,0dB,invert=0*/ + { 0x1179, 0x3F800000}, /*1179,0dB,invert=0*/ + { 0x117C, 0x3F800000}, /*117C,0dB,invert=0*/ + { 0x1183, 0x38D1B700}, /*1183,-80dB,invert=0*/ + { 0x1186, 0x00000000}, /*1186,Cutoff,invert=0*/ + { 0x1189, 0x00000000}, /*1189,Cutoff,invert=0*/ + { 0x118C, 0x00000000}, /*118C,Cutoff,invert=0*/ + { 0x1193, 0x00000000}, /*1193,Cutoff,invert=0*/ + { 0x1198, 0x3F800000}, /*1198,0dB,invert=0*/ + { 0x1199, 0x3F800000}, /*1199,0dB,invert=0*/ + { 0x119A, 0x3F800000}, /*119A,0dB,invert=0*/ + { 0x11A1, 0x3C58B440}, /*11A1,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A2, 0x3C58B440}, /*11A2,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A3, 0x3F793A40}, /*11A3,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A4, 0x3C58B440}, /*11A4,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A5, 0x3C58B440}, /*11A5,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A6, 0x3F793A40}, /*11A6,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A7, 0x3F800000}, /*11A7,Through,0dB,fs/1,invert=0*/ + { 0x11A8, 0x00000000}, /*11A8,Through,0dB,fs/1,invert=0*/ + { 0x11A9, 0x00000000}, /*11A9,Through,0dB,fs/1,invert=0*/ + { 0x11AA, 0x00000000}, /*11AA,Cutoff,invert=0*/ + { 0x11AB, 0x3BDA2580}, /*11AB,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x11AC, 0x3BDA2580}, /*11AC,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x11AD, 0x3F7C9780}, /*11AD,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x11B0, 0x3E0DE280}, /*11B0,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x11B1, 0x3E0DE280}, /*11B1,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x11B2, 0x3F390EC0}, /*11B2,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x11B3, 0x3F800000}, /*11B3,0dB,invert=0*/ + { 0x11B4, 0x00000000}, /*11B4,Cutoff,invert=0*/ + { 0x11B5, 0x00000000}, /*11B5,Cutoff,invert=0*/ + { 0x11B6, 0x3F353C00}, /*11B6,-3dB,invert=0*/ + { 0x11B8, 0x3F800000}, /*11B8,0dB,invert=0*/ + { 0x11B9, 0x00000000}, /*11B9,Cutoff,invert=0*/ + { 0x11C0, 0x3FE304C0}, /*11C0,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x11C1, 0xBFDF6540}, /*11C1,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x11C2, 0x3F437BC0}, /*11C2,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x11C3, 0x3F7F7D40}, /*11C3,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x11C4, 0xBF7C6D00}, /*11C4,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x11C5, 0x3F7BEA40}, /*11C5,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x11C6, 0x3D506F00}, /*11C6,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x11C7, 0x3D506F00}, /*11C7,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x11C8, 0x3F65F240}, /*11C8,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x11C9, 0x3BAED500}, /*11C9,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x11CA, 0x3BAED500}, /*11CA,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x11CB, 0x3F7FEE80}, /*11CB,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x11CC, 0x3E0FC5C0}, /*11CC,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x11CD, 0xBE0ED000}, /*11CD,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x11CE, 0x3F7FC280}, /*11CE,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x11D0, 0x3FFF64C0}, /*11D0,6dB,invert=0*/ + { 0x11D1, 0x00000000}, /*11D1,Cutoff,invert=0*/ + { 0x11D2, 0x3F800000}, /*11D2,0dB,invert=0*/ + { 0x11D3, 0x3F800000}, /*11D3,0dB,invert=0*/ + { 0x11D4, 0x3F800000}, /*11D4,0dB,invert=0*/ + { 0x11D5, 0x3F800000}, /*11D5,0dB,invert=0*/ + { 0x11D7, 0x3F8EF7C0}, /*11D7,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x11D8, 0x3F8EF7C0}, /*11D8,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x11D9, 0x3F5FD780}, /*11D9,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x11DA, 0x3F76BA40}, /*11DA,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x11DB, 0xBFE16F80}, /*11DB,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x11DC, 0x3FE16F80}, /*11DC,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x11DD, 0x3F641800}, /*11DD,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x11DE, 0xBF5AD200}, /*11DE,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x11E0, 0x3F7C5880}, /*11E0,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x11E1, 0xBFEFD200}, /*11E1,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x11E2, 0x3FEFD200}, /*11E2,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x11E3, 0x3F6AA180}, /*11E3,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x11E4, 0xBF66FA40}, /*11E4,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x11E5, 0x3F800000}, /*11E5,0dB,invert=0*/ + { 0x11E8, 0x3F800000}, /*11E8,0dB,invert=0*/ + { 0x11E9, 0x00000000}, /*11E9,Cutoff,invert=0*/ + { 0x11EA, 0x00000000}, /*11EA,Cutoff,invert=0*/ + { 0x11EB, 0x00000000}, /*11EB,Cutoff,invert=0*/ + { 0x11F0, 0x3F800000}, /*11F0,Through,0dB,fs/1,invert=0*/ + { 0x11F1, 0x00000000}, /*11F1,Through,0dB,fs/1,invert=0*/ + { 0x11F2, 0x00000000}, /*11F2,Through,0dB,fs/1,invert=0*/ + { 0x11F3, 0x00000000}, /*11F3,Through,0dB,fs/1,invert=0*/ + { 0x11F4, 0x00000000}, /*11F4,Through,0dB,fs/1,invert=0*/ + { 0x11F5, 0x3F800000}, /*11F5,Through,0dB,fs/1,invert=0*/ + { 0x11F6, 0x00000000}, /*11F6,Through,0dB,fs/1,invert=0*/ + { 0x11F7, 0x00000000}, /*11F7,Through,0dB,fs/1,invert=0*/ + { 0x11F8, 0x00000000}, /*11F8,Through,0dB,fs/1,invert=0*/ + { 0x11F9, 0x00000000}, /*11F9,Through,0dB,fs/1,invert=0*/ + { 0x1200, 0x00000000}, /*1200,Cutoff,invert=0*/ + { 0x1201, 0x3F800000}, /*1201,0dB,invert=0*/ + { 0x1202, 0x3F800000}, /*1202,0dB,invert=0*/ + { 0x1203, 0x3F800000}, /*1203,0dB,invert=0*/ + { 0x1204, 0x3F800000}, /*1204,Through,0dB,fs/1,invert=0*/ + { 0x1205, 0x00000000}, /*1205,Through,0dB,fs/1,invert=0*/ + { 0x1206, 0x00000000}, /*1206,Through,0dB,fs/1,invert=0*/ + { 0x1207, 0x3F800000}, /*1207,Through,0dB,fs/1,invert=0*/ + { 0x1208, 0x00000000}, /*1208,Through,0dB,fs/1,invert=0*/ + { 0x1209, 0x00000000}, /*1209,Through,0dB,fs/1,invert=0*/ + { 0x120A, 0x3F800000}, /*120A,Through,0dB,fs/1,invert=0*/ + { 0x120B, 0x00000000}, /*120B,Through,0dB,fs/1,invert=0*/ + { 0x120C, 0x00000000}, /*120C,Through,0dB,fs/1,invert=0*/ + { 0x120D, 0x3F800000}, /*120D,Through,0dB,fs/1,invert=0*/ + { 0x120E, 0x00000000}, /*120E,Through,0dB,fs/1,invert=0*/ + { 0x120F, 0x00000000}, /*120F,Through,0dB,fs/1,invert=0*/ + { 0x1210, 0x3F800000}, /*1210,Through,0dB,fs/1,invert=0*/ + { 0x1211, 0x00000000}, /*1211,Through,0dB,fs/1,invert=0*/ + { 0x1212, 0x00000000}, /*1212,Through,0dB,fs/1,invert=0*/ + { 0x1213, 0x3F800000}, /*1213,0dB,invert=0*/ + { 0x1214, 0x3F800000}, /*1214,0dB,invert=0*/ + { 0x1215, 0x3F800000}, /*1215,0dB,invert=0*/ + { 0x1216, 0x3F800000}, /*1216,0dB,invert=0*/ + { 0x1217, 0x3F800000}, /*1217,0dB,invert=0*/ + { 0x1218, 0x00000000}, /*1218,Cutoff,fs/1,invert=0*/ + { 0x1219, 0x00000000}, /*1219,Cutoff,fs/1,invert=0*/ + { 0x121A, 0x00000000}, /*121A,Cutoff,fs/1,invert=0*/ + { 0x121B, 0x00000000}, /*121B,Cutoff,fs/1,invert=0*/ + { 0x121C, 0x00000000}, /*121C,Cutoff,fs/1,invert=0*/ + { 0x121D, 0x3F800000}, /*121D,0dB,invert=0*/ + { 0x121E, 0x3F800000}, /*121E,0dB,invert=0*/ + { 0x121F, 0x3F800000}, /*121F,0dB,invert=0*/ + { 0x1235, 0x3F800000}, /*1235,0dB,invert=0*/ + { 0x1236, 0x3F800000}, /*1236,0dB,invert=0*/ + { 0x1237, 0x3F800000}, /*1237,0dB,invert=0*/ + { 0x1238, 0x3F800000}, /*1238,0dB,invert=0*/ + { 0xFFFF, 0xFFFFFFFF } + }; + +/* 32bit */ +const struct STFILRAM CsFilRam_20M_simul_set[] = { + { 0x1000, 0x3F800000}, /*1000,0dB,invert=0*/ + { 0x1001, 0x3F800000}, /*1001,0dB,invert=0*/ + { 0x1002, 0x00000000}, /*1002,Cutoff,invert=0*/ + { 0x1003, 0x3F800000}, /*1003,0dB,invert=0*/ + { 0x1004, 0x38A8A540}, /*1004,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1005, 0x38A8A540}, /*1005,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1006, 0x3F7FF580}, /*1006,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1007, 0x3F800000}, /*1007,0dB,invert=0*/ + { 0x1008, 0xBF800000}, /*1008,0dB,invert=1*/ + { 0x1009, 0x00000000}, /*1009,Cutoff,invert=0*/ + { 0x100A, 0x3F800000}, /*100A,0dB,invert=0*/ + { 0x100B, 0x3F800000}, /*100B,0dB,invert=0*/ + { 0x100C, 0x3F800000}, /*100C,0dB,invert=0*/ + { 0x100E, 0x3F800000}, /*100E,0dB,invert=0*/ + { 0x1010, 0x3DA2AD80}, /*1010*/ + { 0x1011, 0x00000000}, /*1011,Free,fs/1,invert=0*/ + { 0x1012, 0x3F7FFD00}, /*1012,Free,fs/1,invert=0*/ + { 0x1013, 0x3FC83380}, /*1013,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1014, 0xBFC58900}, /*1014,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1015, 0x3F75E8C0}, /*1015,HBF,50Hz,150Hz,4dB,fs/1,invert=0*/ + { 0x1016, 0x3F06BD80}, /*1016,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1017, 0xBF06BA00}, /*1017,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1018, 0x3F7FFC80}, /*1018,LBF,0.2Hz,0.38Hz,0dB,fs/1,invert=0*/ + { 0x1019, 0x3F800000}, /*1019,Through,0dB,fs/1,invert=0*/ + { 0x101A, 0x00000000}, /*101A,Through,0dB,fs/1,invert=0*/ + { 0x101B, 0x00000000}, /*101B,Through,0dB,fs/1,invert=0*/ + { 0x101C, 0x3F800000}, /*101C,0dB,invert=0*/ + { 0x101D, 0x00000000}, /*101D,Cutoff,invert=0*/ + { 0x101E, 0x00000000}, /*101E,Cutoff,invert=0*/ + { 0x1020, 0x3F800000}, /*1020,0dB,invert=0*/ + { 0x1021, 0x3F800000}, /*1021,0dB,invert=0*/ + { 0x1022, 0x3F800000}, /*1022,0dB,invert=0*/ + { 0x1023, 0x3F800000}, /*1023,Through,0dB,fs/1,invert=0*/ + { 0x1024, 0x00000000}, /*1024,Through,0dB,fs/1,invert=0*/ + { 0x1025, 0x00000000}, /*1025,Through,0dB,fs/1,invert=0*/ + { 0x1026, 0x00000000}, /*1026,Through,0dB,fs/1,invert=0*/ + { 0x1027, 0x00000000}, /*1027,Through,0dB,fs/1,invert=0*/ + { 0x1030, 0x3F800000}, /*1030,Through,0dB,fs/1,invert=0*/ + { 0x1031, 0x00000000}, /*1031,Through,0dB,fs/1,invert=0*/ + { 0x1032, 0x00000000}, /*1032,Through,0dB,fs/1,invert=0*/ + { 0x1033, 0x3F800000}, /*1033,Through,0dB,fs/1,invert=0*/ + { 0x1034, 0x00000000}, /*1034,Through,0dB,fs/1,invert=0*/ + { 0x1035, 0x00000000}, /*1035,Through,0dB,fs/1,invert=0*/ + { 0x1036, 0x3F800000}, /*1036,Through,0dB,fs/1,invert=0*/ + { 0x1037, 0x00000000}, /*1037,Through,0dB,fs/1,invert=0*/ + { 0x1038, 0x00000000}, /*1038,Through,0dB,fs/1,invert=0*/ + { 0x1039, 0x3F800000}, /*1039,Through,0dB,fs/1,invert=0*/ + { 0x103A, 0x00000000}, /*103A,Through,0dB,fs/1,invert=0*/ + { 0x103B, 0x00000000}, /*103B,Through,0dB,fs/1,invert=0*/ + { 0x103C, 0x3F800000}, /*103C,Through,0dB,fs/1,invert=0*/ + { 0x103D, 0x00000000}, /*103D,Through,0dB,fs/1,invert=0*/ + { 0x103E, 0x00000000}, /*103E,Through,0dB,fs/1,invert=0*/ + { 0x1043, 0x39D2BD40}, /*1043,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1044, 0x39D2BD40}, /*1044,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1045, 0x3F7FCB40}, /*1045,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1046, 0x38A8A540}, /*1046,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1047, 0x38A8A540}, /*1047,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1048, 0x3F7FF580}, /*1048,LPF,0.6Hz,0dB,fs/1,invert=0*/ + { 0x1049, 0x390C87C0}, /*1049,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104A, 0x390C87C0}, /*104A,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104B, 0x3F7FEE80}, /*104B,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104C, 0x398C8300}, /*104C,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104D, 0x398C8300}, /*104D,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104E, 0x3F7FDCC0}, /*104E,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x1053, 0x3F800000}, /*1053,Through,0dB,fs/1,invert=0*/ + { 0x1054, 0x00000000}, /*1054,Through,0dB,fs/1,invert=0*/ + { 0x1055, 0x00000000}, /*1055,Through,0dB,fs/1,invert=0*/ + { 0x1056, 0x3F800000}, /*1056,Through,0dB,fs/1,invert=0*/ + { 0x1057, 0x00000000}, /*1057,Through,0dB,fs/1,invert=0*/ + { 0x1058, 0x00000000}, /*1058,Through,0dB,fs/1,invert=0*/ + { 0x1059, 0x3F800000}, /*1059,Through,0dB,fs/1,invert=0*/ + { 0x105A, 0x00000000}, /*105A,Through,0dB,fs/1,invert=0*/ + { 0x105B, 0x00000000}, /*105B,Through,0dB,fs/1,invert=0*/ + { 0x105C, 0x3F800000}, /*105C,Through,0dB,fs/1,invert=0*/ + { 0x105D, 0x00000000}, /*105D,Through,0dB,fs/1,invert=0*/ + { 0x105E, 0x00000000}, /*105E,Through,0dB,fs/1,invert=0*/ + { 0x1063, 0x3F800000}, /*1063,0dB,invert=0*/ + { 0x1066, 0x3F800000}, /*1066,0dB,invert=0*/ + { 0x1069, 0x3F800000}, /*1069,0dB,invert=0*/ + { 0x106C, 0x3F800000}, /*106C,0dB,invert=0*/ + { 0x1073, 0x00000000}, /*1073,Cutoff,invert=0*/ + { 0x1076, 0x3F800000}, /*1076,0dB,invert=0*/ + { 0x1079, 0x3F800000}, /*1079,0dB,invert=0*/ + { 0x107C, 0x3F800000}, /*107C,0dB,invert=0*/ + { 0x1083, 0x38D1B700}, /*1083,-80dB,invert=0*/ + { 0x1086, 0x00000000}, /*1086,Cutoff,invert=0*/ + { 0x1089, 0x00000000}, /*1089,Cutoff,invert=0*/ + { 0x108C, 0x00000000}, /*108C,Cutoff,invert=0*/ + { 0x1093, 0x00000000}, /*1093,Cutoff,invert=0*/ + { 0x1098, 0x3F800000}, /*1098,0dB,invert=0*/ + { 0x1099, 0x3F800000}, /*1099,0dB,invert=0*/ + { 0x109A, 0x3F800000}, /*109A,0dB,invert=0*/ + { 0x10A1, 0x3C58B440}, /*10A1,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A2, 0x3C58B440}, /*10A2,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A3, 0x3F793A40}, /*10A3,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A4, 0x3C58B440}, /*10A4,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A5, 0x3C58B440}, /*10A5,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A6, 0x3F793A40}, /*10A6,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A7, 0x3F800000}, /*10A7,Through,0dB,fs/1,invert=0*/ + { 0x10A8, 0x00000000}, /*10A8,Through,0dB,fs/1,invert=0*/ + { 0x10A9, 0x00000000}, /*10A9,Through,0dB,fs/1,invert=0*/ + { 0x10AA, 0x00000000}, /*10AA,Cutoff,invert=0*/ + { 0x10AB, 0x3BDA2580}, /*10AB,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AC, 0x3BDA2580}, /*10AC,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AD, 0x3F7C9780}, /*10AD,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10B0, 0x3E0DE280}, /*10B0,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x10B1, 0x3E0DE280}, /*10B1,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x10B2, 0x3F390EC0}, /*10B2,LPF,1200Hz,0dB,fs/1,invert=0*/ + { 0x10B3, 0x3F800000}, /*10B3,0dB,invert=0*/ + { 0x10B4, 0x00000000}, /*10B4,Cutoff,invert=0*/ + { 0x10B5, 0x00000000}, /*10B5,Cutoff,invert=0*/ + { 0x10B6, 0x3F353C00}, /*10B6,-3dB,invert=0*/ + { 0x10B8, 0x3F800000}, /*10B8,0dB,invert=0*/ + { 0x10B9, 0x00000000}, /*10B9,Cutoff,invert=0*/ + { 0x10C0, 0x3FE304C0}, /*10C0,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x10C1, 0xBFDF6540}, /*10C1,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x10C2, 0x3F437BC0}, /*10C2,HBF,60Hz,1000Hz,6dB,fs/1,invert=0*/ + { 0x10C3, 0x3F7F7D40}, /*10C3,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x10C4, 0xBF7C6D00}, /*10C4,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x10C5, 0x3F7BEA40}, /*10C5,HBF,45Hz,60Hz,0dB,fs/1,invert=0*/ + { 0x10C6, 0x3D506F00}, /*10C6,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C7, 0x3D506F00}, /*10C7,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C8, 0x3F65F240}, /*10C8,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C9, 0x3BAED500}, /*10C9,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x10CA, 0x3BAED500}, /*10CA,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x10CB, 0x3F7FEE80}, /*10CB,LPF,1Hz,32dB,fs/1,invert=0*/ + { 0x10CC, 0x3E0FC5C0}, /*10CC,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x10CD, 0xBE0ED000}, /*10CD,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x10CE, 0x3F7FC280}, /*10CE,LBF,3.5Hz,25Hz,0dB,fs/1,invert=0*/ + { 0x10D0, 0x3FFF64C0}, /*10D0,6dB,invert=0*/ + { 0x10D1, 0x00000000}, /*10D1,Cutoff,invert=0*/ + { 0x10D2, 0x3F800000}, /*10D2,0dB,invert=0*/ + { 0x10D3, 0x3F800000}, /*10D3,0dB,invert=0*/ + { 0x10D4, 0x3F800000}, /*10D4,0dB,invert=0*/ + { 0x10D5, 0x3F800000}, /*10D5,0dB,invert=0*/ + { 0x10D7, 0x3F8EF7C0}, /*10D7,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x10D8, 0x3F8EF7C0}, /*10D8,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x10D9, 0x3F5FD780}, /*10D9,LPF,500Hz,25dB,fs/1,invert=0*/ + { 0x10DA, 0x3F76BA40}, /*10DA,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DB, 0xBFE16F80}, /*10DB,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DC, 0x3FE16F80}, /*10DC,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DD, 0x3F641800}, /*10DD,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10DE, 0xBF5AD200}, /*10DE,PKF,1200Hz,-6dB,5,fs/1,invert=0*/ + { 0x10E0, 0x3F7C5880}, /*10E0,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E1, 0xBFEFD200}, /*10E1,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E2, 0x3FEFD200}, /*10E2,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E3, 0x3F6AA180}, /*10E3,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E4, 0xBF66FA40}, /*10E4,PKF,650Hz,-3dB,4,fs/1,invert=0*/ + { 0x10E5, 0x3F800000}, /*10E5,0dB,invert=0*/ + { 0x10E8, 0x3F800000}, /*10E8,0dB,invert=0*/ + { 0x10E9, 0x00000000}, /*10E9,Cutoff,invert=0*/ + { 0x10EA, 0x00000000}, /*10EA,Cutoff,invert=0*/ + { 0x10EB, 0x00000000}, /*10EB,Cutoff,invert=0*/ + { 0x10F0, 0x3F800000}, /*10F0,Through,0dB,fs/1,invert=0*/ + { 0x10F1, 0x00000000}, /*10F1,Through,0dB,fs/1,invert=0*/ + { 0x10F2, 0x00000000}, /*10F2,Through,0dB,fs/1,invert=0*/ + { 0x10F3, 0x00000000}, /*10F3,Through,0dB,fs/1,invert=0*/ + { 0x10F4, 0x00000000}, /*10F4,Through,0dB,fs/1,invert=0*/ + { 0x10F5, 0x3F800000}, /*10F5,Through,0dB,fs/1,invert=0*/ + { 0x10F6, 0x00000000}, /*10F6,Through,0dB,fs/1,invert=0*/ + { 0x10F7, 0x00000000}, /*10F7,Through,0dB,fs/1,invert=0*/ + { 0x10F8, 0x00000000}, /*10F8,Through,0dB,fs/1,invert=0*/ + { 0x10F9, 0x00000000}, /*10F9,Through,0dB,fs/1,invert=0*/ + { 0x1200, 0x00000000}, /*1200,Cutoff,invert=0*/ + { 0x1201, 0x3F800000}, /*1201,0dB,invert=0*/ + { 0x1202, 0x3F800000}, /*1202,0dB,invert=0*/ + { 0x1203, 0x3F800000}, /*1203,0dB,invert=0*/ + { 0x1204, 0x3F800000}, /*1204,Through,0dB,fs/1,invert=0*/ + { 0x1205, 0x00000000}, /*1205,Through,0dB,fs/1,invert=0*/ + { 0x1206, 0x00000000}, /*1206,Through,0dB,fs/1,invert=0*/ + { 0x1207, 0x3F800000}, /*1207,Through,0dB,fs/1,invert=0*/ + { 0x1208, 0x00000000}, /*1208,Through,0dB,fs/1,invert=0*/ + { 0x1209, 0x00000000}, /*1209,Through,0dB,fs/1,invert=0*/ + { 0x120A, 0x3F800000}, /*120A,Through,0dB,fs/1,invert=0*/ + { 0x120B, 0x00000000}, /*120B,Through,0dB,fs/1,invert=0*/ + { 0x120C, 0x00000000}, /*120C,Through,0dB,fs/1,invert=0*/ + { 0x120D, 0x3F800000}, /*120D,Through,0dB,fs/1,invert=0*/ + { 0x120E, 0x00000000}, /*120E,Through,0dB,fs/1,invert=0*/ + { 0x120F, 0x00000000}, /*120F,Through,0dB,fs/1,invert=0*/ + { 0x1210, 0x3F800000}, /*1210,Through,0dB,fs/1,invert=0*/ + { 0x1211, 0x00000000}, /*1211,Through,0dB,fs/1,invert=0*/ + { 0x1212, 0x00000000}, /*1212,Through,0dB,fs/1,invert=0*/ + { 0x1213, 0x3F800000}, /*1213,0dB,invert=0*/ + { 0x1214, 0x3F800000}, /*1214,0dB,invert=0*/ + { 0x1215, 0x3F800000}, /*1215,0dB,invert=0*/ + { 0x1216, 0x3F800000}, /*1216,0dB,invert=0*/ + { 0x1217, 0x3F800000}, /*1217,0dB,invert=0*/ + { 0x1218, 0x00000000}, /*1218,Cutoff,fs/1,invert=0*/ + { 0x1219, 0x00000000}, /*1219,Cutoff,fs/1,invert=0*/ + { 0x121A, 0x00000000}, /*121A,Cutoff,fs/1,invert=0*/ + { 0x121B, 0x00000000}, /*121B,Cutoff,fs/1,invert=0*/ + { 0x121C, 0x00000000}, /*121C,Cutoff,fs/1,invert=0*/ + { 0x121D, 0x3F800000}, /*121D,0dB,invert=0*/ + { 0x121E, 0x3F800000}, /*121E,0dB,invert=0*/ + { 0x121F, 0x3F800000}, /*121F,0dB,invert=0*/ + { 0x1235, 0x3F800000}, /*1235,0dB,invert=0*/ + { 0x1236, 0x3F800000}, /*1236,0dB,invert=0*/ + { 0x1237, 0x3F800000}, /*1237,0dB,invert=0*/ + { 0x1238, 0x3F800000}, /*1238,0dB,invert=0*/ + { 0xFFFF, 0xFFFFFFFF } + }; + + +/*Filter Calculator Version 4.02*/ +/*the time and date : 2015/1/17 17:58:38*/ +/*FC filename : LC898122_FIL_13M_V0009*/ +/*fs,23438Hz*/ +/*LSI No.,LC898122*/ +/*Comment,*/ + +/* 8bit */ +const struct STFILREG CsFilReg_13M[] = { + { 0x0111, 0x00}, /*00,0111*/ + { 0x0113, 0x00}, /*00,0113*/ + { 0x0114, 0x00}, /*00,0114*/ + { 0x0172, 0x00}, /*00,0172*/ + { 0x01E3, 0x00}, /*00,01E3*/ + { 0x01E4, 0x00}, /*00,01E4*/ + { 0xFFFF, 0xFF } + }; +/* 32bit */ +const struct STFILRAM CsFilRam_13M[] = { + { 0x1000, 0x3F800000}, /*1000,0dB,invert=0*/ + { 0x1001, 0x3F800000}, /*1001,0dB,invert=0*/ + { 0x1002, 0x00000000}, /*1002,Cutoff,invert=0*/ + { 0x1003, 0x3F800000}, /*1003,0dB,invert=0*/ + { 0x1004, 0x3828A700}, /*1004,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1005, 0x3828A700}, /*1005,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1006, 0x3F7FFAC0}, /*1006,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1007, 0x3F800000}, /*1007,0dB,invert=0*/ + { 0x1008, 0xBF800000}, /*1008,0dB,invert=1*/ + { 0x1009, 0x00000000}, /*1009,Cutoff,invert=0*/ + { 0x100A, 0x3F800000}, /*100A,0dB,invert=0*/ + { 0x100B, 0x3F800000}, /*100B,0dB,invert=0*/ + { 0x100C, 0x3F800000}, /*100C,0dB,invert=0*/ + { 0x100E, 0x3F800000}, /*100E,0dB,invert=0*/ + { 0x1010, 0x3DA2AD80}, /*1010*/ + { 0x1011, 0x00000000}, /*1011,Free,fs/1,invert=0*/ + { 0x1012, 0x3F7FFE00}, /*1012,Free,fs/1,invert=0*/ + { 0x1013, 0x3FB26DC0}, /*1013,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1014, 0xBFB00DC0}, /*1014,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1015, 0x3F75E8C0}, /*1015,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1016, 0x3F1B2780}, /*1016,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1017, 0xBF1B2400}, /*1017,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1018, 0x3F7FFC80}, /*1018,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1019, 0x3F800000}, /*1019,Through,0dB,fs/1,invert=0*/ + { 0x101A, 0x00000000}, /*101A,Through,0dB,fs/1,invert=0*/ + { 0x101B, 0x00000000}, /*101B,Through,0dB,fs/1,invert=0*/ + { 0x101C, 0x3F800000}, /*101C,0dB,invert=0*/ + { 0x101D, 0x00000000}, /*101D,Cutoff,invert=0*/ + { 0x101E, 0x00000000}, /*101E,Cutoff,invert=0*/ + { 0x1020, 0x3F800000}, /*1020,0dB,invert=0*/ + { 0x1021, 0x3F800000}, /*1021,0dB,invert=0*/ + { 0x1022, 0x3F800000}, /*1022,0dB,invert=0*/ + { 0x1023, 0x3F800000}, /*1023,Through,0dB,fs/1,invert=0*/ + { 0x1024, 0x00000000}, /*1024,Through,0dB,fs/1,invert=0*/ + { 0x1025, 0x00000000}, /*1025,Through,0dB,fs/1,invert=0*/ + { 0x1026, 0x00000000}, /*1026,Through,0dB,fs/1,invert=0*/ + { 0x1027, 0x00000000}, /*1027,Through,0dB,fs/1,invert=0*/ + { 0x1030, 0x3F800000}, /*1030,Through,0dB,fs/1,invert=0*/ + { 0x1031, 0x00000000}, /*1031,Through,0dB,fs/1,invert=0*/ + { 0x1032, 0x00000000}, /*1032,Through,0dB,fs/1,invert=0*/ + { 0x1033, 0x3F800000}, /*1033,Through,0dB,fs/1,invert=0*/ + { 0x1034, 0x00000000}, /*1034,Through,0dB,fs/1,invert=0*/ + { 0x1035, 0x00000000}, /*1035,Through,0dB,fs/1,invert=0*/ + { 0x1036, 0x3F800000}, /*1036,Through,0dB,fs/1,invert=0*/ + { 0x1037, 0x00000000}, /*1037,Through,0dB,fs/1,invert=0*/ + { 0x1038, 0x00000000}, /*1038,Through,0dB,fs/1,invert=0*/ + { 0x1039, 0x3F800000}, /*1039,Through,0dB,fs/1,invert=0*/ + { 0x103A, 0x00000000}, /*103A,Through,0dB,fs/1,invert=0*/ + { 0x103B, 0x00000000}, /*103B,Through,0dB,fs/1,invert=0*/ + { 0x103C, 0x3F800000}, /*103C,Through,0dB,fs/1,invert=0*/ + { 0x103D, 0x00000000}, /*103D,Through,0dB,fs/1,invert=0*/ + { 0x103E, 0x00000000}, /*103E,Through,0dB,fs/1,invert=0*/ + { 0x1043, 0x39D2BD40}, /*1043,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1044, 0x39D2BD40}, /*1044,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1045, 0x3F7FCB40}, /*1045,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1046, 0x388C8A40}, /*1046,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1047, 0x388C8A40}, /*1047,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1048, 0x3F7FF740}, /*1048,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1049, 0x390C87C0}, /*1049,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104A, 0x390C87C0}, /*104A,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104B, 0x3F7FEE80}, /*104B,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104C, 0x398C8300}, /*104C,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104D, 0x398C8300}, /*104D,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104E, 0x3F7FDCC0}, /*104E,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x1053, 0x3F800000}, /*1053,Through,0dB,fs/1,invert=0*/ + { 0x1054, 0x00000000}, /*1054,Through,0dB,fs/1,invert=0*/ + { 0x1055, 0x00000000}, /*1055,Through,0dB,fs/1,invert=0*/ + { 0x1056, 0x3F800000}, /*1056,Through,0dB,fs/1,invert=0*/ + { 0x1057, 0x00000000}, /*1057,Through,0dB,fs/1,invert=0*/ + { 0x1058, 0x00000000}, /*1058,Through,0dB,fs/1,invert=0*/ + { 0x1059, 0x3F800000}, /*1059,Through,0dB,fs/1,invert=0*/ + { 0x105A, 0x00000000}, /*105A,Through,0dB,fs/1,invert=0*/ + { 0x105B, 0x00000000}, /*105B,Through,0dB,fs/1,invert=0*/ + { 0x105C, 0x3F800000}, /*105C,Through,0dB,fs/1,invert=0*/ + { 0x105D, 0x00000000}, /*105D,Through,0dB,fs/1,invert=0*/ + { 0x105E, 0x00000000}, /*105E,Through,0dB,fs/1,invert=0*/ + { 0x1063, 0x3F800000}, /*1063,0dB,invert=0*/ + { 0x1066, 0x3F800000}, /*1066,0dB,invert=0*/ + { 0x1069, 0x3F800000}, /*1069,0dB,invert=0*/ + { 0x106C, 0x3F800000}, /*106C,0dB,invert=0*/ + { 0x1073, 0x00000000}, /*1073,Cutoff,invert=0*/ + { 0x1076, 0x3F800000}, /*1076,0dB,invert=0*/ + { 0x1079, 0x3F800000}, /*1079,0dB,invert=0*/ + { 0x107C, 0x3F800000}, /*107C,0dB,invert=0*/ + { 0x1083, 0x38D1B700}, /*1083,-80dB,invert=0*/ + { 0x1086, 0x00000000}, /*1086,Cutoff,invert=0*/ + { 0x1089, 0x00000000}, /*1089,Cutoff,invert=0*/ + { 0x108C, 0x00000000}, /*108C,Cutoff,invert=0*/ + { 0x1093, 0x00000000}, /*1093,Cutoff,invert=0*/ + { 0x1098, 0x3F800000}, /*1098,0dB,invert=0*/ + { 0x1099, 0x3F800000}, /*1099,0dB,invert=0*/ + { 0x109A, 0x3F800000}, /*109A,0dB,invert=0*/ + { 0x10A1, 0x3C58B440}, /*10A1,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A2, 0x3C58B440}, /*10A2,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A3, 0x3F793A40}, /*10A3,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A4, 0x3C58B440}, /*10A4,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A5, 0x3C58B440}, /*10A5,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A6, 0x3F793A40}, /*10A6,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A7, 0x3F800000}, /*10A7,Through,0dB,fs/1,invert=0*/ + { 0x10A8, 0x00000000}, /*10A8,Through,0dB,fs/1,invert=0*/ + { 0x10A9, 0x00000000}, /*10A9,Through,0dB,fs/1,invert=0*/ + { 0x10AA, 0x00000000}, /*10AA,Cutoff,invert=0*/ + { 0x10AB, 0x3BDA2580}, /*10AB,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AC, 0x3BDA2580}, /*10AC,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AD, 0x3F7C9780}, /*10AD,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10B0, 0x3F800000}, /*10B0,Through,0dB,fs/1,invert=0*/ + { 0x10B1, 0x00000000}, /*10B1,Through,0dB,fs/1,invert=0*/ + { 0x10B2, 0x00000000}, /*10B2,Through,0dB,fs/1,invert=0*/ + { 0x10B3, 0x3F800000}, /*10B3,0dB,invert=0*/ + { 0x10B4, 0x00000000}, /*10B4,Cutoff,invert=0*/ + { 0x10B5, 0x00000000}, /*10B5,Cutoff,invert=0*/ + { 0x10B6, 0x3F353C00}, /*10B6,-3dB,invert=0*/ + { 0x10B8, 0x3F800000}, /*10B8,0dB,invert=0*/ + { 0x10B9, 0x00000000}, /*10B9,Cutoff,invert=0*/ + { 0x10C0, 0x3F944EC0}, /*10C0,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x10C1, 0xBF925540}, /*10C1,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x10C2, 0x3F5414C0}, /*10C2,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x10C3, 0x3F800000}, /*10C3,Through,0dB,fs/1,invert=0*/ + { 0x10C4, 0x00000000}, /*10C4,Through,0dB,fs/1,invert=0*/ + { 0x10C5, 0x00000000}, /*10C5,Through,0dB,fs/1,invert=0*/ + { 0x10C6, 0x3D506F00}, /*10C6,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C7, 0x3D506F00}, /*10C7,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C8, 0x3F65F240}, /*10C8,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C9, 0x3C208400}, /*10C9,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x10CA, 0x3C208400}, /*10CA,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x10CB, 0x3F7FE940}, /*10CB,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x10CC, 0x3E1D2100}, /*10CC,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x10CD, 0xBE1C0980}, /*10CD,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x10CE, 0x3F7F5080}, /*10CE,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x10D0, 0x3FFF64C0}, /*10D0,6dB,invert=0*/ + { 0x10D1, 0x00000000}, /*10D1,Cutoff,invert=0*/ + { 0x10D2, 0x3F800000}, /*10D2,0dB,invert=0*/ + { 0x10D3, 0x3F800000}, /*10D3,0dB,invert=0*/ + { 0x10D4, 0x3F800000}, /*10D4,0dB,invert=0*/ + { 0x10D5, 0x3F800000}, /*10D5,0dB,invert=0*/ + { 0x10D7, 0x41FCFB80}, /*10D7,Through,30dB,fs/1,invert=0*/ + { 0x10D8, 0x00000000}, /*10D8,Through,30dB,fs/1,invert=0*/ + { 0x10D9, 0x00000000}, /*10D9,Through,30dB,fs/1,invert=0*/ + { 0x10DA, 0x3F649140}, /*10DA,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DB, 0xBFD21D40}, /*10DB,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DC, 0x3FD21D40}, /*10DC,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DD, 0x3F4F0940}, /*10DD,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DE, 0xBF339A80}, /*10DE,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10E0, 0x3DD17800}, /*10E0,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E1, 0x3DD17800}, /*10E1,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E2, 0x3F4BA200}, /*10E2,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E3, 0x00000000}, /*10E3,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E4, 0x00000000}, /*10E4,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E5, 0x3F800000}, /*10E5,0dB,invert=0*/ + { 0x10E8, 0x3F800000}, /*10E8,0dB,invert=0*/ + { 0x10E9, 0x00000000}, /*10E9,Cutoff,invert=0*/ + { 0x10EA, 0x00000000}, /*10EA,Cutoff,invert=0*/ + { 0x10EB, 0x00000000}, /*10EB,Cutoff,invert=0*/ + { 0x10F0, 0x3F800000}, /*10F0,Through,0dB,fs/1,invert=0*/ + { 0x10F1, 0x00000000}, /*10F1,Through,0dB,fs/1,invert=0*/ + { 0x10F2, 0x00000000}, /*10F2,Through,0dB,fs/1,invert=0*/ + { 0x10F3, 0x00000000}, /*10F3,Through,0dB,fs/1,invert=0*/ + { 0x10F4, 0x00000000}, /*10F4,Through,0dB,fs/1,invert=0*/ + { 0x10F5, 0x3F800000}, /*10F5,Through,0dB,fs/1,invert=0*/ + { 0x10F6, 0x00000000}, /*10F6,Through,0dB,fs/1,invert=0*/ + { 0x10F7, 0x00000000}, /*10F7,Through,0dB,fs/1,invert=0*/ + { 0x10F8, 0x00000000}, /*10F8,Through,0dB,fs/1,invert=0*/ + { 0x10F9, 0x00000000}, /*10F9,Through,0dB,fs/1,invert=0*/ + { 0x1100, 0x3F800000}, /*1100,0dB,invert=0*/ + { 0x1101, 0x3F800000}, /*1101,0dB,invert=0*/ + { 0x1102, 0x00000000}, /*1102,Cutoff,invert=0*/ + { 0x1103, 0x3F800000}, /*1103,0dB,invert=0*/ + { 0x1104, 0x3828A700}, /*1104,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1105, 0x3828A700}, /*1105,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1106, 0x3F7FFAC0}, /*1106,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1107, 0x3F800000}, /*1107,0dB,invert=0*/ + { 0x1108, 0xBF800000}, /*1108,0dB,invert=1*/ + { 0x1109, 0x00000000}, /*1109,Cutoff,invert=0*/ + { 0x110A, 0x3F800000}, /*110A,0dB,invert=0*/ + { 0x110B, 0x3F800000}, /*110B,0dB,invert=0*/ + { 0x110C, 0x3F800000}, /*110C,0dB,invert=0*/ + { 0x110E, 0x3F800000}, /*110E,0dB,invert=0*/ + { 0x1110, 0x3DA2AD80}, /*1110*/ + { 0x1111, 0x00000000}, /*1111,Free,fs/1,invert=0*/ + { 0x1112, 0x3F7FFE00}, /*1112,Free,fs/1,invert=0*/ + { 0x1113, 0x3FB26DC0}, /*1113,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1114, 0xBFB00DC0}, /*1114,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1115, 0x3F75E8C0}, /*1115,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1116, 0x3F1B2780}, /*1116,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1117, 0xBF1B2400}, /*1117,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1118, 0x3F7FFC80}, /*1118,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1119, 0x3F800000}, /*1119,Through,0dB,fs/1,invert=0*/ + { 0x111A, 0x00000000}, /*111A,Through,0dB,fs/1,invert=0*/ + { 0x111B, 0x00000000}, /*111B,Through,0dB,fs/1,invert=0*/ + { 0x111C, 0x3F800000}, /*111C,0dB,invert=0*/ + { 0x111D, 0x00000000}, /*111D,Cutoff,invert=0*/ + { 0x111E, 0x00000000}, /*111E,Cutoff,invert=0*/ + { 0x1120, 0x3F800000}, /*1120,0dB,invert=0*/ + { 0x1121, 0x3F800000}, /*1121,0dB,invert=0*/ + { 0x1122, 0x3F800000}, /*1122,0dB,invert=0*/ + { 0x1123, 0x3F800000}, /*1123,Through,0dB,fs/1,invert=0*/ + { 0x1124, 0x00000000}, /*1124,Through,0dB,fs/1,invert=0*/ + { 0x1125, 0x00000000}, /*1125,Through,0dB,fs/1,invert=0*/ + { 0x1126, 0x00000000}, /*1126,Through,0dB,fs/1,invert=0*/ + { 0x1127, 0x00000000}, /*1127,Through,0dB,fs/1,invert=0*/ + { 0x1130, 0x3F800000}, /*1130,Through,0dB,fs/1,invert=0*/ + { 0x1131, 0x00000000}, /*1131,Through,0dB,fs/1,invert=0*/ + { 0x1132, 0x00000000}, /*1132,Through,0dB,fs/1,invert=0*/ + { 0x1133, 0x3F800000}, /*1133,Through,0dB,fs/1,invert=0*/ + { 0x1134, 0x00000000}, /*1134,Through,0dB,fs/1,invert=0*/ + { 0x1135, 0x00000000}, /*1135,Through,0dB,fs/1,invert=0*/ + { 0x1136, 0x3F800000}, /*1136,Through,0dB,fs/1,invert=0*/ + { 0x1137, 0x00000000}, /*1137,Through,0dB,fs/1,invert=0*/ + { 0x1138, 0x00000000}, /*1138,Through,0dB,fs/1,invert=0*/ + { 0x1139, 0x3F800000}, /*1139,Through,0dB,fs/1,invert=0*/ + { 0x113A, 0x00000000}, /*113A,Through,0dB,fs/1,invert=0*/ + { 0x113B, 0x00000000}, /*113B,Through,0dB,fs/1,invert=0*/ + { 0x113C, 0x3F800000}, /*113C,Through,0dB,fs/1,invert=0*/ + { 0x113D, 0x00000000}, /*113D,Through,0dB,fs/1,invert=0*/ + { 0x113E, 0x00000000}, /*113E,Through,0dB,fs/1,invert=0*/ + { 0x1143, 0x39D2BD40}, /*1143,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1144, 0x39D2BD40}, /*1144,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1145, 0x3F7FCB40}, /*1145,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1146, 0x388C8A40}, /*1146,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1147, 0x388C8A40}, /*1147,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1148, 0x3F7FF740}, /*1148,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1149, 0x390C87C0}, /*1149,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x114A, 0x390C87C0}, /*114A,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x114B, 0x3F7FEE80}, /*114B,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x114C, 0x398C8300}, /*114C,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x114D, 0x398C8300}, /*114D,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x114E, 0x3F7FDCC0}, /*114E,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x1153, 0x3F800000}, /*1153,Through,0dB,fs/1,invert=0*/ + { 0x1154, 0x00000000}, /*1154,Through,0dB,fs/1,invert=0*/ + { 0x1155, 0x00000000}, /*1155,Through,0dB,fs/1,invert=0*/ + { 0x1156, 0x3F800000}, /*1156,Through,0dB,fs/1,invert=0*/ + { 0x1157, 0x00000000}, /*1157,Through,0dB,fs/1,invert=0*/ + { 0x1158, 0x00000000}, /*1158,Through,0dB,fs/1,invert=0*/ + { 0x1159, 0x3F800000}, /*1159,Through,0dB,fs/1,invert=0*/ + { 0x115A, 0x00000000}, /*115A,Through,0dB,fs/1,invert=0*/ + { 0x115B, 0x00000000}, /*115B,Through,0dB,fs/1,invert=0*/ + { 0x115C, 0x3F800000}, /*115C,Through,0dB,fs/1,invert=0*/ + { 0x115D, 0x00000000}, /*115D,Through,0dB,fs/1,invert=0*/ + { 0x115E, 0x00000000}, /*115E,Through,0dB,fs/1,invert=0*/ + { 0x1163, 0x3F800000}, /*1163,0dB,invert=0*/ + { 0x1166, 0x3F800000}, /*1166,0dB,invert=0*/ + { 0x1169, 0x3F800000}, /*1169,0dB,invert=0*/ + { 0x116C, 0x3F800000}, /*116C,0dB,invert=0*/ + { 0x1173, 0x00000000}, /*1173,Cutoff,invert=0*/ + { 0x1176, 0x3F800000}, /*1176,0dB,invert=0*/ + { 0x1179, 0x3F800000}, /*1179,0dB,invert=0*/ + { 0x117C, 0x3F800000}, /*117C,0dB,invert=0*/ + { 0x1183, 0x38D1B700}, /*1183,-80dB,invert=0*/ + { 0x1186, 0x00000000}, /*1186,Cutoff,invert=0*/ + { 0x1189, 0x00000000}, /*1189,Cutoff,invert=0*/ + { 0x118C, 0x00000000}, /*118C,Cutoff,invert=0*/ + { 0x1193, 0x00000000}, /*1193,Cutoff,invert=0*/ + { 0x1198, 0x3F800000}, /*1198,0dB,invert=0*/ + { 0x1199, 0x3F800000}, /*1199,0dB,invert=0*/ + { 0x119A, 0x3F800000}, /*119A,0dB,invert=0*/ + { 0x11A1, 0x3C58B440}, /*11A1,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A2, 0x3C58B440}, /*11A2,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A3, 0x3F793A40}, /*11A3,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A4, 0x3C58B440}, /*11A4,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A5, 0x3C58B440}, /*11A5,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A6, 0x3F793A40}, /*11A6,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x11A7, 0x3F800000}, /*11A7,Through,0dB,fs/1,invert=0*/ + { 0x11A8, 0x00000000}, /*11A8,Through,0dB,fs/1,invert=0*/ + { 0x11A9, 0x00000000}, /*11A9,Through,0dB,fs/1,invert=0*/ + { 0x11AA, 0x00000000}, /*11AA,Cutoff,invert=0*/ + { 0x11AB, 0x3BDA2580}, /*11AB,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x11AC, 0x3BDA2580}, /*11AC,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x11AD, 0x3F7C9780}, /*11AD,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x11B0, 0x3F800000}, /*11B0,Through,0dB,fs/1,invert=0*/ + { 0x11B1, 0x00000000}, /*11B1,Through,0dB,fs/1,invert=0*/ + { 0x11B2, 0x00000000}, /*11B2,Through,0dB,fs/1,invert=0*/ + { 0x11B3, 0x3F800000}, /*11B3,0dB,invert=0*/ + { 0x11B4, 0x00000000}, /*11B4,Cutoff,invert=0*/ + { 0x11B5, 0x00000000}, /*11B5,Cutoff,invert=0*/ + { 0x11B6, 0x3F353C00}, /*11B6,-3dB,invert=0*/ + { 0x11B8, 0x3F800000}, /*11B8,0dB,invert=0*/ + { 0x11B9, 0x00000000}, /*11B9,Cutoff,invert=0*/ + { 0x11C0, 0x3F944EC0}, /*11C0,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x11C1, 0xBF925540}, /*11C1,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x11C2, 0x3F5414C0}, /*11C2,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x11C3, 0x3F800000}, /*11C3,Through,0dB,fs/1,invert=0*/ + { 0x11C4, 0x00000000}, /*11C4,Through,0dB,fs/1,invert=0*/ + { 0x11C5, 0x00000000}, /*11C5,Through,0dB,fs/1,invert=0*/ + { 0x11C6, 0x3D506F00}, /*11C6,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x11C7, 0x3D506F00}, /*11C7,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x11C8, 0x3F65F240}, /*11C8,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x11C9, 0x3C208400}, /*11C9,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x11CA, 0x3C208400}, /*11CA,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x11CB, 0x3F7FE940}, /*11CB,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x11CC, 0x3E1D2100}, /*11CC,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x11CD, 0xBE1C0980}, /*11CD,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x11CE, 0x3F7F5080}, /*11CE,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x11D0, 0x3FFF64C0}, /*11D0,6dB,invert=0*/ + { 0x11D1, 0x00000000}, /*11D1,Cutoff,invert=0*/ + { 0x11D2, 0x3F800000}, /*11D2,0dB,invert=0*/ + { 0x11D3, 0x3F800000}, /*11D3,0dB,invert=0*/ + { 0x11D4, 0x3F800000}, /*11D4,0dB,invert=0*/ + { 0x11D5, 0x3F800000}, /*11D5,0dB,invert=0*/ + { 0x11D7, 0x41FCFB80}, /*11D7,Through,30dB,fs/1,invert=0*/ + { 0x11D8, 0x00000000}, /*11D8,Through,30dB,fs/1,invert=0*/ + { 0x11D9, 0x00000000}, /*11D9,Through,30dB,fs/1,invert=0*/ + { 0x11DA, 0x3F649140}, /*11DA,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x11DB, 0xBFD21D40}, /*11DB,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x11DC, 0x3FD21D40}, /*11DC,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x11DD, 0x3F4F0940}, /*11DD,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x11DE, 0xBF339A80}, /*11DE,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x11E0, 0x3DD17800}, /*11E0,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x11E1, 0x3DD17800}, /*11E1,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x11E2, 0x3F4BA200}, /*11E2,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x11E3, 0x00000000}, /*11E3,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x11E4, 0x00000000}, /*11E4,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x11E5, 0x3F800000}, /*11E5,0dB,invert=0*/ + { 0x11E8, 0x3F800000}, /*11E8,0dB,invert=0*/ + { 0x11E9, 0x00000000}, /*11E9,Cutoff,invert=0*/ + { 0x11EA, 0x00000000}, /*11EA,Cutoff,invert=0*/ + { 0x11EB, 0x00000000}, /*11EB,Cutoff,invert=0*/ + { 0x11F0, 0x3F800000}, /*11F0,Through,0dB,fs/1,invert=0*/ + { 0x11F1, 0x00000000}, /*11F1,Through,0dB,fs/1,invert=0*/ + { 0x11F2, 0x00000000}, /*11F2,Through,0dB,fs/1,invert=0*/ + { 0x11F3, 0x00000000}, /*11F3,Through,0dB,fs/1,invert=0*/ + { 0x11F4, 0x00000000}, /*11F4,Through,0dB,fs/1,invert=0*/ + { 0x11F5, 0x3F800000}, /*11F5,Through,0dB,fs/1,invert=0*/ + { 0x11F6, 0x00000000}, /*11F6,Through,0dB,fs/1,invert=0*/ + { 0x11F7, 0x00000000}, /*11F7,Through,0dB,fs/1,invert=0*/ + { 0x11F8, 0x00000000}, /*11F8,Through,0dB,fs/1,invert=0*/ + { 0x11F9, 0x00000000}, /*11F9,Through,0dB,fs/1,invert=0*/ + { 0x1200, 0x00000000}, /*1200,Cutoff,invert=0*/ + { 0x1201, 0x3F800000}, /*1201,0dB,invert=0*/ + { 0x1202, 0x3F800000}, /*1202,0dB,invert=0*/ + { 0x1203, 0x3F800000}, /*1203,0dB,invert=0*/ + { 0x1204, 0x3F800000}, /*1204,Through,0dB,fs/1,invert=0*/ + { 0x1205, 0x00000000}, /*1205,Through,0dB,fs/1,invert=0*/ + { 0x1206, 0x00000000}, /*1206,Through,0dB,fs/1,invert=0*/ + { 0x1207, 0x3F800000}, /*1207,Through,0dB,fs/1,invert=0*/ + { 0x1208, 0x00000000}, /*1208,Through,0dB,fs/1,invert=0*/ + { 0x1209, 0x00000000}, /*1209,Through,0dB,fs/1,invert=0*/ + { 0x120A, 0x3F800000}, /*120A,Through,0dB,fs/1,invert=0*/ + { 0x120B, 0x00000000}, /*120B,Through,0dB,fs/1,invert=0*/ + { 0x120C, 0x00000000}, /*120C,Through,0dB,fs/1,invert=0*/ + { 0x120D, 0x3F800000}, /*120D,Through,0dB,fs/1,invert=0*/ + { 0x120E, 0x00000000}, /*120E,Through,0dB,fs/1,invert=0*/ + { 0x120F, 0x00000000}, /*120F,Through,0dB,fs/1,invert=0*/ + { 0x1210, 0x3F800000}, /*1210,Through,0dB,fs/1,invert=0*/ + { 0x1211, 0x00000000}, /*1211,Through,0dB,fs/1,invert=0*/ + { 0x1212, 0x00000000}, /*1212,Through,0dB,fs/1,invert=0*/ + { 0x1213, 0x3F800000}, /*1213,0dB,invert=0*/ + { 0x1214, 0x3F800000}, /*1214,0dB,invert=0*/ + { 0x1215, 0x3F800000}, /*1215,0dB,invert=0*/ + { 0x1216, 0x3F800000}, /*1216,0dB,invert=0*/ + { 0x1217, 0x3F800000}, /*1217,0dB,invert=0*/ + { 0x1218, 0x00000000}, /*1218,Cutoff,fs/1,invert=0*/ + { 0x1219, 0x00000000}, /*1219,Cutoff,fs/1,invert=0*/ + { 0x121A, 0x00000000}, /*121A,Cutoff,fs/1,invert=0*/ + { 0x121B, 0x00000000}, /*121B,Cutoff,fs/1,invert=0*/ + { 0x121C, 0x00000000}, /*121C,Cutoff,fs/1,invert=0*/ + { 0x121D, 0x3F800000}, /*121D,0dB,invert=0*/ + { 0x121E, 0x3F800000}, /*121E,0dB,invert=0*/ + { 0x121F, 0x3F800000}, /*121F,0dB,invert=0*/ + { 0x1235, 0x3F800000}, /*1235,0dB,invert=0*/ + { 0x1236, 0x3F800000}, /*1236,0dB,invert=0*/ + { 0x1237, 0x3F800000}, /*1237,0dB,invert=0*/ + { 0x1238, 0x3F800000}, /*1238,0dB,invert=0*/ + { 0xFFFF, 0xFFFFFFFF } + }; + +/* 32bit */ +const struct STFILRAM CsFilRam_13M_simul_set[] = { + { 0x1000, 0x3F800000}, /*1000,0dB,invert=0*/ + { 0x1001, 0x3F800000}, /*1001,0dB,invert=0*/ + { 0x1002, 0x00000000}, /*1002,Cutoff,invert=0*/ + { 0x1003, 0x3F800000}, /*1003,0dB,invert=0*/ + { 0x1004, 0x3828A700}, /*1004,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1005, 0x3828A700}, /*1005,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1006, 0x3F7FFAC0}, /*1006,LPF,0.3Hz,0dB,fs/1,invert=0*/ + { 0x1007, 0x3F800000}, /*1007,0dB,invert=0*/ + { 0x1008, 0xBF800000}, /*1008,0dB,invert=1*/ + { 0x1009, 0x00000000}, /*1009,Cutoff,invert=0*/ + { 0x100A, 0x3F800000}, /*100A,0dB,invert=0*/ + { 0x100B, 0x3F800000}, /*100B,0dB,invert=0*/ + { 0x100C, 0x3F800000}, /*100C,0dB,invert=0*/ + { 0x100E, 0x3F800000}, /*100E,0dB,invert=0*/ + { 0x1010, 0x3DA2AD80}, /*1010*/ + { 0x1011, 0x00000000}, /*1011,Free,fs/1,invert=0*/ + { 0x1012, 0x3F7FFE00}, /*1012,Free,fs/1,invert=0*/ + { 0x1013, 0x3FB26DC0}, /*1013,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1014, 0xBFB00DC0}, /*1014,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1015, 0x3F75E8C0}, /*1015,HBF,50Hz,150Hz,3dB,fs/1,invert=0*/ + { 0x1016, 0x3F1B2780}, /*1016,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1017, 0xBF1B2400}, /*1017,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1018, 0x3F7FFC80}, /*1018,LBF,0.2Hz,0.33Hz,0dB,fs/1,invert=0*/ + { 0x1019, 0x3F800000}, /*1019,Through,0dB,fs/1,invert=0*/ + { 0x101A, 0x00000000}, /*101A,Through,0dB,fs/1,invert=0*/ + { 0x101B, 0x00000000}, /*101B,Through,0dB,fs/1,invert=0*/ + { 0x101C, 0x3F800000}, /*101C,0dB,invert=0*/ + { 0x101D, 0x00000000}, /*101D,Cutoff,invert=0*/ + { 0x101E, 0x00000000}, /*101E,Cutoff,invert=0*/ + { 0x1020, 0x3F800000}, /*1020,0dB,invert=0*/ + { 0x1021, 0x3F800000}, /*1021,0dB,invert=0*/ + { 0x1022, 0x3F800000}, /*1022,0dB,invert=0*/ + { 0x1023, 0x3F800000}, /*1023,Through,0dB,fs/1,invert=0*/ + { 0x1024, 0x00000000}, /*1024,Through,0dB,fs/1,invert=0*/ + { 0x1025, 0x00000000}, /*1025,Through,0dB,fs/1,invert=0*/ + { 0x1026, 0x00000000}, /*1026,Through,0dB,fs/1,invert=0*/ + { 0x1027, 0x00000000}, /*1027,Through,0dB,fs/1,invert=0*/ + { 0x1030, 0x3F800000}, /*1030,Through,0dB,fs/1,invert=0*/ + { 0x1031, 0x00000000}, /*1031,Through,0dB,fs/1,invert=0*/ + { 0x1032, 0x00000000}, /*1032,Through,0dB,fs/1,invert=0*/ + { 0x1033, 0x3F800000}, /*1033,Through,0dB,fs/1,invert=0*/ + { 0x1034, 0x00000000}, /*1034,Through,0dB,fs/1,invert=0*/ + { 0x1035, 0x00000000}, /*1035,Through,0dB,fs/1,invert=0*/ + { 0x1036, 0x3F800000}, /*1036,Through,0dB,fs/1,invert=0*/ + { 0x1037, 0x00000000}, /*1037,Through,0dB,fs/1,invert=0*/ + { 0x1038, 0x00000000}, /*1038,Through,0dB,fs/1,invert=0*/ + { 0x1039, 0x3F800000}, /*1039,Through,0dB,fs/1,invert=0*/ + { 0x103A, 0x00000000}, /*103A,Through,0dB,fs/1,invert=0*/ + { 0x103B, 0x00000000}, /*103B,Through,0dB,fs/1,invert=0*/ + { 0x103C, 0x3F800000}, /*103C,Through,0dB,fs/1,invert=0*/ + { 0x103D, 0x00000000}, /*103D,Through,0dB,fs/1,invert=0*/ + { 0x103E, 0x00000000}, /*103E,Through,0dB,fs/1,invert=0*/ + { 0x1043, 0x39D2BD40}, /*1043,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1044, 0x39D2BD40}, /*1044,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1045, 0x3F7FCB40}, /*1045,LPF,3Hz,0dB,fs/1,invert=0*/ + { 0x1046, 0x388C8A40}, /*1046,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1047, 0x388C8A40}, /*1047,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1048, 0x3F7FF740}, /*1048,LPF,0.5Hz,0dB,fs/1,invert=0*/ + { 0x1049, 0x390C87C0}, /*1049,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104A, 0x390C87C0}, /*104A,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104B, 0x3F7FEE80}, /*104B,LPF,1Hz,0dB,fs/1,invert=0*/ + { 0x104C, 0x398C8300}, /*104C,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104D, 0x398C8300}, /*104D,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x104E, 0x3F7FDCC0}, /*104E,LPF,2Hz,0dB,fs/1,invert=0*/ + { 0x1053, 0x3F800000}, /*1053,Through,0dB,fs/1,invert=0*/ + { 0x1054, 0x00000000}, /*1054,Through,0dB,fs/1,invert=0*/ + { 0x1055, 0x00000000}, /*1055,Through,0dB,fs/1,invert=0*/ + { 0x1056, 0x3F800000}, /*1056,Through,0dB,fs/1,invert=0*/ + { 0x1057, 0x00000000}, /*1057,Through,0dB,fs/1,invert=0*/ + { 0x1058, 0x00000000}, /*1058,Through,0dB,fs/1,invert=0*/ + { 0x1059, 0x3F800000}, /*1059,Through,0dB,fs/1,invert=0*/ + { 0x105A, 0x00000000}, /*105A,Through,0dB,fs/1,invert=0*/ + { 0x105B, 0x00000000}, /*105B,Through,0dB,fs/1,invert=0*/ + { 0x105C, 0x3F800000}, /*105C,Through,0dB,fs/1,invert=0*/ + { 0x105D, 0x00000000}, /*105D,Through,0dB,fs/1,invert=0*/ + { 0x105E, 0x00000000}, /*105E,Through,0dB,fs/1,invert=0*/ + { 0x1063, 0x3F800000}, /*1063,0dB,invert=0*/ + { 0x1066, 0x3F800000}, /*1066,0dB,invert=0*/ + { 0x1069, 0x3F800000}, /*1069,0dB,invert=0*/ + { 0x106C, 0x3F800000}, /*106C,0dB,invert=0*/ + { 0x1073, 0x00000000}, /*1073,Cutoff,invert=0*/ + { 0x1076, 0x3F800000}, /*1076,0dB,invert=0*/ + { 0x1079, 0x3F800000}, /*1079,0dB,invert=0*/ + { 0x107C, 0x3F800000}, /*107C,0dB,invert=0*/ + { 0x1083, 0x38D1B700}, /*1083,-80dB,invert=0*/ + { 0x1086, 0x00000000}, /*1086,Cutoff,invert=0*/ + { 0x1089, 0x00000000}, /*1089,Cutoff,invert=0*/ + { 0x108C, 0x00000000}, /*108C,Cutoff,invert=0*/ + { 0x1093, 0x00000000}, /*1093,Cutoff,invert=0*/ + { 0x1098, 0x3F800000}, /*1098,0dB,invert=0*/ + { 0x1099, 0x3F800000}, /*1099,0dB,invert=0*/ + { 0x109A, 0x3F800000}, /*109A,0dB,invert=0*/ + { 0x10A1, 0x3C58B440}, /*10A1,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A2, 0x3C58B440}, /*10A2,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A3, 0x3F793A40}, /*10A3,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A4, 0x3C58B440}, /*10A4,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A5, 0x3C58B440}, /*10A5,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A6, 0x3F793A40}, /*10A6,LPF,100Hz,0dB,fs/1,invert=0*/ + { 0x10A7, 0x3F800000}, /*10A7,Through,0dB,fs/1,invert=0*/ + { 0x10A8, 0x00000000}, /*10A8,Through,0dB,fs/1,invert=0*/ + { 0x10A9, 0x00000000}, /*10A9,Through,0dB,fs/1,invert=0*/ + { 0x10AA, 0x00000000}, /*10AA,Cutoff,invert=0*/ + { 0x10AB, 0x3BDA2580}, /*10AB,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AC, 0x3BDA2580}, /*10AC,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10AD, 0x3F7C9780}, /*10AD,LPF,50Hz,0dB,fs/1,invert=0*/ + { 0x10B0, 0x3F800000}, /*10B0,Through,0dB,fs/1,invert=0*/ + { 0x10B1, 0x00000000}, /*10B1,Through,0dB,fs/1,invert=0*/ + { 0x10B2, 0x00000000}, /*10B2,Through,0dB,fs/1,invert=0*/ + { 0x10B3, 0x3F800000}, /*10B3,0dB,invert=0*/ + { 0x10B4, 0x00000000}, /*10B4,Cutoff,invert=0*/ + { 0x10B5, 0x00000000}, /*10B5,Cutoff,invert=0*/ + { 0x10B6, 0x3F353C00}, /*10B6,-3dB,invert=0*/ + { 0x10B8, 0x3F800000}, /*10B8,0dB,invert=0*/ + { 0x10B9, 0x00000000}, /*10B9,Cutoff,invert=0*/ + { 0x10C0, 0x3F944EC0}, /*10C0,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x10C1, 0xBF925540}, /*10C1,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x10C2, 0x3F5414C0}, /*10C2,HBF,50Hz,700Hz,2dB,fs/1,invert=0*/ + { 0x10C3, 0x3F800000}, /*10C3,Through,0dB,fs/1,invert=0*/ + { 0x10C4, 0x00000000}, /*10C4,Through,0dB,fs/1,invert=0*/ + { 0x10C5, 0x00000000}, /*10C5,Through,0dB,fs/1,invert=0*/ + { 0x10C6, 0x3D506F00}, /*10C6,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C7, 0x3D506F00}, /*10C7,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C8, 0x3F65F240}, /*10C8,LPF,400Hz,0dB,fs/1,invert=0*/ + { 0x10C9, 0x3C208400}, /*10C9,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x10CA, 0x3C208400}, /*10CA,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x10CB, 0x3F7FE940}, /*10CB,LPF,1.3Hz,35dB,fs/1,invert=0*/ + { 0x10CC, 0x3E1D2100}, /*10CC,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x10CD, 0xBE1C0980}, /*10CD,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x10CE, 0x3F7F5080}, /*10CE,LBF,10Hz,26Hz,-8dB,fs/1,invert=0*/ + { 0x10D0, 0x3FFF64C0}, /*10D0,6dB,invert=0*/ + { 0x10D1, 0x00000000}, /*10D1,Cutoff,invert=0*/ + { 0x10D2, 0x3F800000}, /*10D2,0dB,invert=0*/ + { 0x10D3, 0x3F800000}, /*10D3,0dB,invert=0*/ + { 0x10D4, 0x3F800000}, /*10D4,0dB,invert=0*/ + { 0x10D5, 0x3F800000}, /*10D5,0dB,invert=0*/ + { 0x10D7, 0x41FCFB80}, /*10D7,Through,30dB,fs/1,invert=0*/ + { 0x10D8, 0x00000000}, /*10D8,Through,30dB,fs/1,invert=0*/ + { 0x10D9, 0x00000000}, /*10D9,Through,30dB,fs/1,invert=0*/ + { 0x10DA, 0x3F649140}, /*10DA,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DB, 0xBFD21D40}, /*10DB,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DC, 0x3FD21D40}, /*10DC,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DD, 0x3F4F0940}, /*10DD,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10DE, 0xBF339A80}, /*10DE,PKF,1000Hz,-11dB,3,fs/1,invert=0*/ + { 0x10E0, 0x3DD17800}, /*10E0,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E1, 0x3DD17800}, /*10E1,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E2, 0x3F4BA200}, /*10E2,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E3, 0x00000000}, /*10E3,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E4, 0x00000000}, /*10E4,LPF,850Hz,0dB,fs/1,invert=0*/ + { 0x10E5, 0x3F800000}, /*10E5,0dB,invert=0*/ + { 0x10E8, 0x3F800000}, /*10E8,0dB,invert=0*/ + { 0x10E9, 0x00000000}, /*10E9,Cutoff,invert=0*/ + { 0x10EA, 0x00000000}, /*10EA,Cutoff,invert=0*/ + { 0x10EB, 0x00000000}, /*10EB,Cutoff,invert=0*/ + { 0x10F0, 0x3F800000}, /*10F0,Through,0dB,fs/1,invert=0*/ + { 0x10F1, 0x00000000}, /*10F1,Through,0dB,fs/1,invert=0*/ + { 0x10F2, 0x00000000}, /*10F2,Through,0dB,fs/1,invert=0*/ + { 0x10F3, 0x00000000}, /*10F3,Through,0dB,fs/1,invert=0*/ + { 0x10F4, 0x00000000}, /*10F4,Through,0dB,fs/1,invert=0*/ + { 0x10F5, 0x3F800000}, /*10F5,Through,0dB,fs/1,invert=0*/ + { 0x10F6, 0x00000000}, /*10F6,Through,0dB,fs/1,invert=0*/ + { 0x10F7, 0x00000000}, /*10F7,Through,0dB,fs/1,invert=0*/ + { 0x10F8, 0x00000000}, /*10F8,Through,0dB,fs/1,invert=0*/ + { 0x10F9, 0x00000000}, /*10F9,Through,0dB,fs/1,invert=0*/ + { 0x1200, 0x00000000}, /*1200,Cutoff,invert=0*/ + { 0x1201, 0x3F800000}, /*1201,0dB,invert=0*/ + { 0x1202, 0x3F800000}, /*1202,0dB,invert=0*/ + { 0x1203, 0x3F800000}, /*1203,0dB,invert=0*/ + { 0x1204, 0x3F800000}, /*1204,Through,0dB,fs/1,invert=0*/ + { 0x1205, 0x00000000}, /*1205,Through,0dB,fs/1,invert=0*/ + { 0x1206, 0x00000000}, /*1206,Through,0dB,fs/1,invert=0*/ + { 0x1207, 0x3F800000}, /*1207,Through,0dB,fs/1,invert=0*/ + { 0x1208, 0x00000000}, /*1208,Through,0dB,fs/1,invert=0*/ + { 0x1209, 0x00000000}, /*1209,Through,0dB,fs/1,invert=0*/ + { 0x120A, 0x3F800000}, /*120A,Through,0dB,fs/1,invert=0*/ + { 0x120B, 0x00000000}, /*120B,Through,0dB,fs/1,invert=0*/ + { 0x120C, 0x00000000}, /*120C,Through,0dB,fs/1,invert=0*/ + { 0x120D, 0x3F800000}, /*120D,Through,0dB,fs/1,invert=0*/ + { 0x120E, 0x00000000}, /*120E,Through,0dB,fs/1,invert=0*/ + { 0x120F, 0x00000000}, /*120F,Through,0dB,fs/1,invert=0*/ + { 0x1210, 0x3F800000}, /*1210,Through,0dB,fs/1,invert=0*/ + { 0x1211, 0x00000000}, /*1211,Through,0dB,fs/1,invert=0*/ + { 0x1212, 0x00000000}, /*1212,Through,0dB,fs/1,invert=0*/ + { 0x1213, 0x3F800000}, /*1213,0dB,invert=0*/ + { 0x1214, 0x3F800000}, /*1214,0dB,invert=0*/ + { 0x1215, 0x3F800000}, /*1215,0dB,invert=0*/ + { 0x1216, 0x3F800000}, /*1216,0dB,invert=0*/ + { 0x1217, 0x3F800000}, /*1217,0dB,invert=0*/ + { 0x1218, 0x00000000}, /*1218,Cutoff,fs/1,invert=0*/ + { 0x1219, 0x00000000}, /*1219,Cutoff,fs/1,invert=0*/ + { 0x121A, 0x00000000}, /*121A,Cutoff,fs/1,invert=0*/ + { 0x121B, 0x00000000}, /*121B,Cutoff,fs/1,invert=0*/ + { 0x121C, 0x00000000}, /*121C,Cutoff,fs/1,invert=0*/ + { 0x121D, 0x3F800000}, /*121D,0dB,invert=0*/ + { 0x121E, 0x3F800000}, /*121E,0dB,invert=0*/ + { 0x121F, 0x3F800000}, /*121F,0dB,invert=0*/ + { 0x1235, 0x3F800000}, /*1235,0dB,invert=0*/ + { 0x1236, 0x3F800000}, /*1236,0dB,invert=0*/ + { 0x1237, 0x3F800000}, /*1237,0dB,invert=0*/ + { 0x1238, 0x3F800000}, /*1238,0dB,invert=0*/ + { 0xFFFF, 0xFFFFFFFF } + }; + + + diff --git a/drivers/media/i2c/lc898122/lc898122-oisinit.c b/drivers/media/i2c/lc898122/lc898122-oisinit.c new file mode 100644 index 000000000000..429eb708cd54 --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122-oisinit.c @@ -0,0 +1,1269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2015 - 2018 Intel Corporation + * Copyright (C) ON Semiconductor + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lc898122.h" +#include "lc898122-oisinit.h" +#include "lc898122-regs.h" +#include "lc898122-oisfil.h" + +#define LC898122_TRI_LEVEL 0x3A83126F +#define LC898122_TIMELOW 0x50 +#define LC898122_TIMEHGH 0x05 +#define LC898122_TIMEBSE_EXT 0x2F +#define LC898122_TIMEBSE 0x5D +#define LC898122_MONADR LC898122_GXXFZ +#define LC898122_GANADR LC898122_gxadj +#define LC898122_XMINGAIN 0x00000000 +#define LC898122_XMAXGAIN 0x3F800000 +#define LC898122_YMINGAIN 0x00000000 +#define LC898122_YMAXGAIN 0x3F800000 +#define LC898122_XSTEPUP 0x38D1B717 +#define LC898122_XSTEPDN 0xBD4CCCCD +#define LC898122_YSTEPUP 0x38D1B717 +#define LC898122_YSTEPDN 0xBD4CCCCD + +#define LC898122_LOOP_TIMEOUT 10 + +#define LC898122_AFTYPE_UNIDIR 0x1 +#define LC898122_AFTYPE_BI_DIR 0x2 + +#define LC898122_MODULE_13M 0x01 +#define LC898122_MODULE_20M 0x02 + +static void lc898122_gyoutsignal(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + if (lc898122_dev->state.flags & LC898122_USE_INVENSENSE) { + RegWriteA(client, LC898122_GRADR0, + LC898122_INVENSENSE_GYROX_INI); + RegWriteA(client, LC898122_GRADR1, + LC898122_INVENSENSE_GYROY_INI); + } else if (lc898122_dev->state.flags & LC898122_USE_STMICRO_L3G4IS) { + RegWriteA(client, LC898122_GRADR0, + LC898122_STMICRO_GYROX_INI); + RegWriteA(client, LC898122_GRADR1, + LC898122_STMICRO_GYROY_INI); + } else if (lc898122_dev->state.flags & LC898122_USE_PANASONIC) { + RegWriteA(client, LC898122_GRADR0, + LC898122_PANASONIC_GYROX_INI); + RegWriteA(client, LC898122_GRADR1, + LC898122_PANASONIC_GYROY_INI); + } else { + /* ERROR */ + } + RegWriteA(client, LC898122_GRSEL, 0x02); +} + +void lc898122_accwait(struct lc898122_device *lc898122_dev, u8 UcTrgDat) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcFlgVal; + int UcCnt; + + for (UcCnt = 0; UcCnt < 60; UcCnt++) { + RegReadA(client, LC898122_GRACC, &UcFlgVal); + UcFlgVal &= UcTrgDat; + if (UcFlgVal == 0) + break; + msleep(LC898122_LOOP_TIMEOUT); + } +} + +void lc898122_selectgyrosleep(struct lc898122_device *lc898122_dev, + u8 UcSelMode) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcRamIni; + u8 UcGrini; + + if (lc898122_dev->state.flags & LC898122_USE_INVENSENSE) { + if (UcSelMode == ON) { + RegWriteA(client, LC898122_WC_EQON, 0x00); + RegWriteA(client, LC898122_GRSEL, 0x01); + + RegReadA(client, LC898122_GRINI, &UcGrini); + RegWriteA(client, LC898122_GRINI, (UcGrini | + LC898122_SLOWMODE)); + + RegWriteA(client, LC898122_GRADR0, 0x6B); + RegWriteA(client, LC898122_GRACC, 0x01); + + lc898122_accwait(lc898122_dev, 0x01); + + RegReadA(client, LC898122_GRDAT0H, &UcRamIni); + UcRamIni |= 0x40; + + if (lc898122_dev->state.flags & LC898122_GYROSTANDBY) + UcRamIni &= ~0x01; + + RegWriteA(client, LC898122_GRADR0, 0x6B); + RegWriteA(client, LC898122_GSETDT, UcRamIni); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + + if (lc898122_dev->state.flags & LC898122_GYROSTANDBY) { + RegWriteA(client, LC898122_GRADR0, 0x6C); + RegWriteA(client, LC898122_GSETDT, 0x07); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + } + } else { + if (lc898122_dev->state.flags & LC898122_GYROSTANDBY) { + RegWriteA(client, LC898122_GRADR0, 0x6C); + RegWriteA(client, LC898122_GSETDT, 0x00); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + } + + RegWriteA(client, LC898122_GRADR0, 0x6B); + RegWriteA(client, LC898122_GRACC, 0x01); + lc898122_accwait(lc898122_dev, 0x01); + RegReadA(client, LC898122_GRDAT0H, &UcRamIni); + + UcRamIni &= ~0x40; + if (lc898122_dev->state.flags & LC898122_GYROSTANDBY) + UcRamIni |= 0x01; + + RegWriteA(client, LC898122_GSETDT, UcRamIni); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + + RegReadA(client, LC898122_GRINI, &UcGrini); + RegWriteA(client, LC898122_GRINI, + (UcGrini & ~LC898122_SLOWMODE)); + + lc898122_gyoutsignal(lc898122_dev); + + msleep(50); + + RegWriteA(client, LC898122_WC_EQON, 0x01); + + lc898122_cleargyro(lc898122_dev, + 0x007F, + LC898122_CLR_FRAM1); + } + } else { + if (UcSelMode == ON) { + RegWriteA(client, LC898122_WC_EQON, 0x00); + RegWriteA(client, LC898122_GRSEL, 0x01); + RegWriteA(client, LC898122_GRADR0, 0x4C); + RegWriteA(client, LC898122_GSETDT, 0x02); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + } else { + RegWriteA(client, LC898122_GRADR0, 0x4C); + RegWriteA(client, LC898122_GSETDT, 0x00); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + lc898122_gyoutsignal(lc898122_dev); + + msleep(50); + + RegWriteA(client, LC898122_WC_EQON, 0x01); + lc898122_cleargyro(lc898122_dev, 0x007F, + LC898122_CLR_FRAM1); + } + } +} + +void lc898122_cleargyro(struct lc898122_device *lc898122_dev, + u16 UsClrFil, u8 UcClrMod) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcRamClr; + int UcCnt; + + /* Select Filter to clear */ + RegWriteA(client, LC898122_WC_RAMDLYMOD1, (u8)(UsClrFil >> 8)); + RegWriteA(client, LC898122_WC_RAMDLYMOD0, (u8)UsClrFil); + + /* Enable Clear */ + RegWriteA(client, LC898122_WC_RAMINITON, UcClrMod); + + /* Check RAM Clear complete */ + for (UcCnt = 0; UcCnt < 60; UcCnt++) { + RegReadA(client, LC898122_WC_RAMINITON, &UcRamClr); + UcRamClr &= UcClrMod; + if (UcRamClr == 0) + break; + msleep(LC898122_LOOP_TIMEOUT); + } +} + +void lc898122_driversw(struct lc898122_device *lc898122_dev, u8 UcDrvSw) +{ + struct i2c_client *client = lc898122_dev->client; + + if (UcDrvSw == ON) { + if (lc898122_dev->state.UcPwmMod == LC898122_PWMMOD_CVL) { + RegWriteA(client, LC898122_DRVFC, 0xF0); + } else { + if (lc898122_dev->state.flags & LC898122_PWM_BREAK) + RegWriteA(client, LC898122_DRVFC, 0x00); + else + RegWriteA(client, LC898122_DRVFC, 0xC0); + } + } else { + if (lc898122_dev->state.UcPwmMod == LC898122_PWMMOD_CVL) { + RegWriteA(client, LC898122_DRVFC, 0x30); + } else { + if (lc898122_dev->state.flags & LC898122_PWM_BREAK) + RegWriteA(client, LC898122_DRVFC, 0x00); + else + RegWriteA(client, LC898122_DRVFC, 0x00); + } + } +} + +void lc898122_GyOutSignalCont(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + RegWriteA(client, LC898122_GRSEL, 0x04); +} + + +void lc898122_BsyWit(struct lc898122_device *lc898122_dev, + u16 UsTrgAdr, + u8 UcTrgDat) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcFlgVal; + u8 UcCnt; + + RegWriteA(client, UsTrgAdr, UcTrgDat); + + for (UcCnt = 0; UcCnt < 60; UcCnt++) { + RegReadA(client, UsTrgAdr, &UcFlgVal); + UcFlgVal &= (UcTrgDat & 0x0F); + if (UcFlgVal == 0) + break; + msleep(LC898122_LOOP_TIMEOUT); + } +} + +void lc898122_IniPtAve(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + RegWriteA(client, LC898122_WG_PANSTT1DWNSMP0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT1DWNSMP1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT2DWNSMP0, 0x90); + RegWriteA(client, LC898122_WG_PANSTT2DWNSMP1, 0x01); + RegWriteA(client, LC898122_WG_PANSTT3DWNSMP0, 0x64); + RegWriteA(client, LC898122_WG_PANSTT3DWNSMP1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT4DWNSMP0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT4DWNSMP1, 0x00); + + RamWrite32A(client, LC898122_st1mean, 0x3f800000); + RamWrite32A(client, LC898122_st2mean, 0x3B23D700); + RamWrite32A(client, LC898122_st3mean, 0x3C23D700); + RamWrite32A(client, LC898122_st4mean, 0x3f800000); +} + +void lc898122_RamAccFixMod(struct lc898122_device *lc898122_dev, u8 UcAccMod) +{ + struct i2c_client *client = lc898122_dev->client; + + switch (UcAccMod) { + case OFF: + RegWriteA(client, LC898122_WC_RAMACCMOD, 0x00); + break; + case ON: + RegWriteA(client, LC898122_WC_RAMACCMOD, 0x31); + break; + } +} + +void lc898122_IniPtMovMod(struct lc898122_device *lc898122_dev, u8 UcPtMod) +{ + struct i2c_client *client = lc898122_dev->client; + + switch (UcPtMod) { + case OFF: + RegWriteA(client, LC898122_WG_PANSTTSETGYRO, 0x00); + RegWriteA(client, LC898122_WG_PANSTTSETGAIN, 0x54); + RegWriteA(client, LC898122_WG_PANSTTSETISTP, 0x14); + RegWriteA(client, LC898122_WG_PANSTTSETIFTR, 0x94); + RegWriteA(client, LC898122_WG_PANSTTSETLFTR, 0x00); + break; + case ON: + RegWriteA(client, LC898122_WG_PANSTTSETGYRO, 0x00); + RegWriteA(client, LC898122_WG_PANSTTSETGAIN, 0x00); + RegWriteA(client, LC898122_WG_PANSTTSETISTP, 0x14); + RegWriteA(client, LC898122_WG_PANSTTSETIFTR, 0x94); + RegWriteA(client, LC898122_WG_PANSTTSETLFTR, 0x00); + break; + } +} + +void lc898122_ChkCvr(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + RegReadA(client, LC898122_CVER, &lc898122_dev->state.UcCvrCod); + RegWriteA(client, LC898122_VRREG, (u8)LC898122_FW_VER); +} + +void lc898122_RemOff(struct lc898122_device *lc898122_dev, u8 UcMod) +{ + struct i2c_client *client = lc898122_dev->client; + + switch (UcMod) { + case OFF: + RegWriteA(client, LC898122_WG_PANSTT6, 0x11); + RegWriteA(client, LC898122_WC_RAMACCXY, 0x01); + RamWrite32A(client, LC898122_gxia_1, 0x39D2BD40); + RamWrite32A(client, LC898122_gxib_1, 0x39D2BD40); + RamWrite32A(client, LC898122_gxic_1, 0x3F7FCB40); + RegWriteA(client, LC898122_WC_RAMACCXY, 0x00); + RegWriteA(client, LC898122_WG_PANSTT6, 0x00); + break; + + case ON: + lc898122_cleargyro(lc898122_dev, 0x007F, LC898122_CLR_FRAM1); + RegWriteA(client, LC898122_WC_RAMACCXY, 0x01); + RamWrite32A(client, LC898122_gxia_1, 0x3B038040); + RamWrite32A(client, LC898122_gxib_1, 0x3B038040); + RamWrite32A(client, LC898122_gxic_1, 0x3F7EF900); + RegWriteA(client, LC898122_WC_RAMACCXY, 0x00); + lc898122_SetPanTiltMode(lc898122_dev, ON); + RegWriteA(client, LC898122_WG_PANSTT6, 0x44); + break; + } +} + +void lc898122_afdriversw(struct lc898122_device *lc898122_dev, u8 UcDrvSw) +{ + struct i2c_client *client = lc898122_dev->client; + + if (UcDrvSw == ON) { + if (lc898122_dev->state.UcAfType == LC898122_BI_DIR) { + RegWriteA(client, LC898122_DRVFCAF, 0x10); + } else { + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + else + RegWriteA(client, LC898122_DRVFCAF, 0x20); + } + RegWriteA(client, LC898122_CCAAF, 0x80); + } else { + RegWriteA(client, LC898122_CCAAF, 0x00); + } +} + +void lc898122_settregaf(struct lc898122_device *lc898122_dev, u16 UsTregAf) +{ + struct i2c_client *client = lc898122_dev->client; + + if (lc898122_dev->state.UcAfType == LC898122_BI_DIR) + RamWriteA(client, LC898122_TREG_H, (UsTregAf << 5)); + else + RamWriteA(client, LC898122_TREG_H, (UsTregAf << 6)); +} + +void lc898122_autogaincontrol(struct lc898122_device *lc898122_dev, + u8 UcModeSw) +{ + struct i2c_client *client = lc898122_dev->client; + + if (UcModeSw == OFF) { + RegWriteA(client, LC898122_WG_ADJGANGXATO, 0xA0); + RegWriteA(client, LC898122_WG_ADJGANGYATO, 0xA0); + RamWrite32A(client, LC898122_GANADR, LC898122_XMAXGAIN); + RamWrite32A(client, LC898122_GANADR | + 0x0100, LC898122_YMAXGAIN); + } else { + RegWriteA(client, LC898122_WG_ADJGANGXATO, 0xA3); + RegWriteA(client, LC898122_WG_ADJGANGYATO, 0xA3); + } +} + +static void lc898122_afinitialsetting(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcStbb0; + + struct af_cfg_t { + u16 RWEXD1_L_AF; + u16 RWEXD2_L_AF; + u16 RWEXD3_L_AF; + u8 FSTCTIME_AF; + u8 FSTMODE_AF; + }; + + const struct af_cfg_t *af_cfg = NULL; + + const struct af_cfg_t af_cfg_20M = { + .RWEXD1_L_AF = LC898122_RWEXD1_L_AF_20M, + .RWEXD2_L_AF = LC898122_RWEXD2_L_AF_20M, + .RWEXD3_L_AF = LC898122_RWEXD3_L_AF_20M, + .FSTCTIME_AF = LC898122_FSTCTIME_AF_20M, + .FSTMODE_AF = LC898122_FSTMODE_AF_20M, + }; + + const struct af_cfg_t af_cfg_13M = { + .RWEXD1_L_AF = LC898122_RWEXD1_L_AF_13M, + .RWEXD2_L_AF = LC898122_RWEXD2_L_AF_13M, + .RWEXD3_L_AF = LC898122_RWEXD3_L_AF_13M, + .FSTCTIME_AF = LC898122_FSTCTIME_AF_13M, + .FSTMODE_AF = LC898122_FSTMODE_AF_13M, + }; + + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) + af_cfg = &af_cfg_20M; + else + af_cfg = &af_cfg_13M; + + lc898122_afdriversw(lc898122_dev, OFF); + + if (lc898122_dev->state.UcAfType == LC898122_BI_DIR) { + lc898122_settregaf(lc898122_dev, 0x0400); + RegWriteA(client, LC898122_DRVFCAF, 0x10); + RegWriteA(client, LC898122_DRVFC3AF, 0x40); + RegWriteA(client, LC898122_DRVFC4AF, 0x80); + RegWriteA(client, LC898122_AFFC, 0x90); + } else { + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_DRVFCAF, 0x00); + else + RegWriteA(client, LC898122_DRVFCAF, 0x20); + + RegWriteA(client, LC898122_DRVFC3AF, 0x00); + RegWriteA(client, LC898122_DRVFC4AF, 0x80); + RegWriteA(client, LC898122_PWMAAF, 0x00); + RegWriteA(client, LC898122_AFFC, 0x80); + } + + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) { + RegWriteA(client, LC898122_DRVFC2AF, 0x82); + RegWriteA(client, LC898122_DRVCH3SEL, 0x02); + RegWriteA(client, LC898122_PWMFCAF, 0x89); + RegWriteA(client, LC898122_PWMPERIODAF, 0xA0); + } else { + RegWriteA(client, LC898122_DRVFC2AF, 0x00); + RegWriteA(client, LC898122_DRVCH3SEL, 0x00); + RegWriteA(client, LC898122_PWMFCAF, 0x01); + RegWriteA(client, LC898122_PWMPERIODAF, 0x20); + } + + if (lc898122_dev->state.UcAfType == LC898122_BI_DIR) + RegWriteA(client, LC898122_CCFCAF, 0x08); + else + RegWriteA(client, LC898122_CCFCAF, 0x40); + + RegReadA(client, LC898122_STBB0, &UcStbb0); + UcStbb0 &= 0x7F; + RegWriteA(client, LC898122_STBB0, UcStbb0); + RegWriteA(client, LC898122_STBB1, 0x00); + + /* AF Initial setting */ + RegWriteA(client, LC898122_FSTMODE, af_cfg->FSTMODE_AF); + RamWriteA(client, LC898122_RWEXD1_L, af_cfg->RWEXD1_L_AF); + RamWriteA(client, LC898122_RWEXD2_L, af_cfg->RWEXD2_L_AF); + RamWriteA(client, LC898122_RWEXD3_L, af_cfg->RWEXD3_L_AF); + RegWriteA(client, LC898122_FSTCTIME, af_cfg->FSTCTIME_AF); + RegWriteA(client, LC898122_TCODEH, 0x04); + + if (lc898122_dev->state.flags & LC898122_AF_PWMMODE) + RegWriteA(client, LC898122_PWMAAF, 0x80); + + UcStbb0 |= 0x80; + RegWriteA(client, LC898122_STBB0, UcStbb0); + RegWriteA(client, LC898122_STBB1, 0x05); + + lc898122_afdriversw(lc898122_dev, ON); +} + +static void lc898122_init_clock(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + struct clkcfg { + u8 pwmdiv; + u8 srvdiv; + u8 gifdiv; + u8 afpwmdiv; + u8 opafdiv; + u8 clksel; + } clkregs = { + .pwmdiv = 0x00, + .srvdiv = 0x00, + .gifdiv = 0x02, + .afpwmdiv = 0x00, + .opafdiv = 0x02, + .clksel = 0x00, + }; + + lc898122_ChkCvr(lc898122_dev); /* Read Cver */ + + /*OSC Enables*/ + lc898122_dev->state.UcOscAdjFlg = 0; + + if (lc898122_dev->state.flags & LC898122_SET_DEFAULTS) { + /*OSC ENABLE*/ + RegWriteA(client, LC898122_OSCSTOP, 0x00); + RegWriteA(client, LC898122_OSCSET, 0x90); + RegWriteA(client, LC898122_OSCCNTEN, 0x00); + } + /*Clock Enables*/ + RegWriteA(client, LC898122_CLKON, 0x1F); + + if (lc898122_dev->state.flags & LC898122_EXTCLK_ALL) { + clkregs.clksel = 0x07; + clkregs.gifdiv = 0x02; + } else if (lc898122_dev->state.flags & LC898122_EXTCLK_PWM) { + clkregs.clksel = 0x01; + clkregs.gifdiv = 0x03; + } + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) + clkregs.opafdiv = 0x06; + else + clkregs.opafdiv = 0x04; + + RegWriteA(client, LC898122_CLKSEL, clkregs.clksel); + RegWriteA(client, LC898122_PWMDIV, clkregs.pwmdiv); + RegWriteA(client, LC898122_SRVDIV, clkregs.srvdiv); + RegWriteA(client, LC898122_GIFDIV, clkregs.gifdiv); + RegWriteA(client, LC898122_AFPWMDIV, clkregs.afpwmdiv); + RegWriteA(client, LC898122_OPAFDIV, clkregs.opafdiv); + +} + +static void lc898122_init_iop(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + if (lc898122_dev->state.flags & LC898122_SET_DEFAULTS) { + RegWriteA(client, LC898122_P0LEV, 0x00); + RegWriteA(client, LC898122_P0DIR, 0x00); + RegWriteA(client, LC898122_P0PON, 0x0F); + RegWriteA(client, LC898122_P0PUD, 0x0F); + } + + if (lc898122_dev->state.flags & LC898122_USE_3W_DGYRO) + RegWriteA(client, LC898122_IOP1SEL, 0x02); + else + RegWriteA(client, LC898122_IOP1SEL, 0x00); + + if (lc898122_dev->state.flags & LC898122_SET_DEFAULTS) { + RegWriteA(client, LC898122_IOP0SEL, 0x02); + RegWriteA(client, LC898122_IOP2SEL, 0x02); + RegWriteA(client, LC898122_IOP3SEL, 0x00); + RegWriteA(client, LC898122_IOP4SEL, 0x00); + RegWriteA(client, LC898122_IOP5SEL, 0x00); + RegWriteA(client, LC898122_DGINSEL, 0x00); + RegWriteA(client, LC898122_I2CSEL, 0x00); + RegWriteA(client, LC898122_DLMODE, 0x00); + } +} + +static void lc898122_init_dgyro(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcGrini; + + if (lc898122_dev->state.flags & LC898122_USE_3W_DGYRO) + RegWriteA(client, LC898122_SPIM, 0x00); + else + RegWriteA(client, LC898122_SPIM, 0x01); + + RegWriteA(client, LC898122_GRSEL, 0x01); + RegWriteA(client, LC898122_GRINI, 0x80); + + if (lc898122_dev->state.flags & LC898122_USE_STMICRO_L3G4IS) { + + RegWriteA(client, LC898122_LSBF, 0x03); + RegWriteA(client, LC898122_GRADR0, 0x0B); + RegWriteA(client, LC898122_GSETDT, 0x0F); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + RegWriteA(client, LC898122_GRADR0, 0x0D); + RegWriteA(client, LC898122_GSETDT, 0x01); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + + } else if (lc898122_dev->state.flags & LC898122_USE_INVENSENSE) { + + RegReadA(client, LC898122_GRINI, &UcGrini); + RegWriteA(client, LC898122_GRINI, (UcGrini | + LC898122_SLOWMODE)); + RegWriteA(client, LC898122_GRADR0, 0x6A); + RegWriteA(client, LC898122_GSETDT, 0x10); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + RegWriteA(client, LC898122_GRADR0, 0x1B); + RegWriteA(client, LC898122_GSETDT, + (LC898122_INVENSENSE_FS_SEL << 3)); + RegWriteA(client, LC898122_GRACC, 0x10); + lc898122_accwait(lc898122_dev, 0x10); + RegReadA(client, LC898122_GRINI, &UcGrini); + RegWriteA(client, LC898122_GRINI, + (UcGrini & ~LC898122_SLOWMODE)); + + } else if (lc898122_dev->state.flags & LC898122_USE_PANASONIC) { + + RegWriteA(client, LC898122_PANAM, 0x09); + RegWriteA(client, LC898122_REVB7, 0x03); + + } else { + dev_err(&client->dev, "No gyro configured\n"); + } + RegWriteA(client, LC898122_RDSEL, 0x7C); + lc898122_gyoutsignal(lc898122_dev); +} + +static void lc898122_init_monitor(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + RegWriteA(client, LC898122_PWMMONA, 0x00); + RegWriteA(client, LC898122_MONSELA, 0x5C); + RegWriteA(client, LC898122_MONSELB, 0x5D); + RegWriteA(client, LC898122_MONSELC, 0x00); + RegWriteA(client, LC898122_MONSELD, 0x00); + + /* Monitor Circuit */ + RegWriteA(client, LC898122_WC_PINMON1, 0x00); + RegWriteA(client, LC898122_WC_PINMON2, 0x00); + RegWriteA(client, LC898122_WC_PINMON3, 0x00); + RegWriteA(client, LC898122_WC_PINMON4, 0x00); + + /* Delay Monitor */ + RegWriteA(client, LC898122_WC_DLYMON11, 0x04); + RegWriteA(client, LC898122_WC_DLYMON10, 0x40); + RegWriteA(client, LC898122_WC_DLYMON21, 0x04); + RegWriteA(client, LC898122_WC_DLYMON20, 0xC0); + RegWriteA(client, LC898122_WC_DLYMON31, 0x00); + RegWriteA(client, LC898122_WC_DLYMON30, 0x00); + RegWriteA(client, LC898122_WC_DLYMON41, 0x00); + RegWriteA(client, LC898122_WC_DLYMON40, 0x00); + + RegWriteA(client, LC898122_PWMMONA, 0x80); +} + +static void lc898122_init_servo(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + u8 UcStbb0; + + struct actuator { + u32 A3_IEXP3; + u32 A1_IEXP1; + }; + + const struct actuator *actuator_param = NULL; + + const struct actuator actuator_6p5ohm = { + .A3_IEXP3 = ACTREG_6P5OHM_A3_IEXP3, + .A1_IEXP1 = ACTREG_6P5OHM_A1_IEXP1, + }; + + + const struct actuator actuator_9p2ohm = { + .A3_IEXP3 = ACTREG_9P2OHM_A3_IEXP3, + .A1_IEXP1 = ACTREG_9P2OHM_A1_IEXP1, + }; + + const struct actuator actuator_15p0ohm = { + .A3_IEXP3 = ACTREG_15P0OHM_A3_IEXP3, + .A1_IEXP1 = ACTREG_15P0OHM_A1_IEXP1, + }; + + if (lc898122_dev->state.flags & LC898122_ACTREG_15P0OHM) + actuator_param = &actuator_15p0ohm; + else if (lc898122_dev->state.flags & LC898122_ACTREG_9P2OHM) + actuator_param = &actuator_9p2ohm; + else if (lc898122_dev->state.flags & LC898122_ACTREG_6P5OHM) + actuator_param = &actuator_6p5ohm; + else + actuator_param = &actuator_6p5ohm; + + lc898122_dev->state.UcPwmMod = LC898122_INIT_PWMMODE; + + RegWriteA(client, LC898122_WC_EQON, 0x00); + RegWriteA(client, LC898122_WC_RAMINITON, 0x00); + + lc898122_cleargyro(lc898122_dev, 0x0000, LC898122_CLR_ALL_RAM); + + RegWriteA(client, LC898122_WH_EQSWX, 0x02); + RegWriteA(client, LC898122_WH_EQSWY, 0x02); + + lc898122_RamAccFixMod(lc898122_dev, OFF); + + /* Monitor Gain */ + RamWrite32A(client, LC898122_dm1g, 0x3F800000); + RamWrite32A(client, LC898122_dm2g, 0x3F800000); + RamWrite32A(client, LC898122_dm3g, 0x3F800000); + RamWrite32A(client, LC898122_dm4g, 0x3F800000); + + /* Hall output limitter */ + RamWrite32A(client, LC898122_sxlmta1, 0x3F800000); + RamWrite32A(client, LC898122_sylmta1, 0x3F800000); + + /* Emargency Stop */ + RegWriteA(client, LC898122_WH_EMGSTPON, 0x00); + RegWriteA(client, LC898122_WH_EMGSTPTMR, 0xFF); + RamWrite32A(client, LC898122_sxemglev, 0x3F800000); + RamWrite32A(client, LC898122_syemglev, 0x3F800000); + + /* Hall Servo smoothing */ + RegWriteA(client, LC898122_WH_SMTSRVON, 0x00); + + if (lc898122_dev->state.flags & LC898122_EXTCLK_ALL) { + RegWriteA(client, LC898122_WH_SMTSRVSMP, 0x03); + RegWriteA(client, LC898122_WH_SMTTMR, 0x07); + } else { + RegWriteA(client, LC898122_WH_SMTSRVSMP, 0x06); + RegWriteA(client, LC898122_WH_SMTTMR, 0x0f); + } + + RamWrite32A(client, LC898122_sxsmtav, 0xBC800000); + RamWrite32A(client, LC898122_sysmtav, 0xBC800000); + RamWrite32A(client, LC898122_sxsmtstp, 0x3AE90466); + RamWrite32A(client, LC898122_sysmtstp, 0x3AE90466); + /* High-dimensional correction */ + RegWriteA(client, LC898122_WH_HOFCON, 0x11); + + /* Front */ + RamWrite32A(client, LC898122_sxiexp3, actuator_param->A3_IEXP3); + RamWrite32A(client, LC898122_sxiexp2, 0x00000000); + RamWrite32A(client, LC898122_sxiexp1, actuator_param->A1_IEXP1); + RamWrite32A(client, LC898122_sxiexp0, 0x00000000); + RamWrite32A(client, LC898122_sxiexp, 0x3F800000); + RamWrite32A(client, LC898122_syiexp3, actuator_param->A3_IEXP3); + RamWrite32A(client, LC898122_syiexp2, 0x00000000); + RamWrite32A(client, LC898122_syiexp1, actuator_param->A1_IEXP1); + RamWrite32A(client, LC898122_syiexp0, 0x00000000); + RamWrite32A(client, LC898122_syiexp, 0x3F800000); + + /* Back */ + RamWrite32A(client, LC898122_sxoexp3, actuator_param->A3_IEXP3); + RamWrite32A(client, LC898122_sxoexp2, 0x00000000); + RamWrite32A(client, LC898122_sxoexp1, actuator_param->A1_IEXP1); + RamWrite32A(client, LC898122_sxoexp0, 0x00000000); + RamWrite32A(client, LC898122_sxoexp, 0x3F800000); + RamWrite32A(client, LC898122_syoexp3, actuator_param->A3_IEXP3); + RamWrite32A(client, LC898122_syoexp2, 0x00000000); + RamWrite32A(client, LC898122_syoexp1, actuator_param->A1_IEXP1); + RamWrite32A(client, LC898122_syoexp0, 0x00000000); + RamWrite32A(client, LC898122_syoexp, 0x3F800000); + + /* Sine wave */ + if (lc898122_dev->state.flags & LC898122_SET_DEFAULTS) { + RegWriteA(client, LC898122_WC_SINON, 0x00); + RegWriteA(client, LC898122_WC_SINFRQ0, 0x00); + RegWriteA(client, LC898122_WC_SINFRQ1, 0x60); + RegWriteA(client, LC898122_WC_SINPHSX, 0x00); + RegWriteA(client, LC898122_WC_SINPHSY, 0x20); + + /* AD over sampling */ + RegWriteA(client, LC898122_WC_ADMODE, 0x06); + + /* Measure mode */ + RegWriteA(client, LC898122_WC_MESMODE, 0x00); + RegWriteA(client, LC898122_WC_MESSINMODE, 0x00); + RegWriteA(client, LC898122_WC_MESLOOP0, 0x08); + RegWriteA(client, LC898122_WC_MESLOOP1, 0x02); + RegWriteA(client, LC898122_WC_MES1ADD0, 0x00); + RegWriteA(client, LC898122_WC_MES1ADD1, 0x00); + RegWriteA(client, LC898122_WC_MES2ADD0, 0x00); + RegWriteA(client, LC898122_WC_MES2ADD1, 0x00); + RegWriteA(client, LC898122_WC_MESABS, 0x00); + RegWriteA(client, LC898122_WC_MESWAIT, 0x00); + + /* auto measure */ + RegWriteA(client, LC898122_WC_AMJMODE, 0x00); + RegWriteA(client, LC898122_WC_AMJLOOP0, 0x08); + RegWriteA(client, LC898122_WC_AMJLOOP1, 0x02); + RegWriteA(client, LC898122_WC_AMJIDL0, 0x02); + RegWriteA(client, LC898122_WC_AMJIDL1, 0x00); + RegWriteA(client, LC898122_WC_AMJ1ADD0, 0x00); + RegWriteA(client, LC898122_WC_AMJ1ADD1, 0x00); + RegWriteA(client, LC898122_WC_AMJ2ADD0, 0x00); + RegWriteA(client, LC898122_WC_AMJ2ADD1, 0x00); + + /* Data Pass */ + RegWriteA(client, LC898122_WC_DPI1ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPI1ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPI2ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPI2ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPI3ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPI3ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPI4ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPI4ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPO1ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPO1ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPO2ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPO2ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPO3ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPO3ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPO4ADD0, 0x00); + RegWriteA(client, LC898122_WC_DPO4ADD1, 0x00); + RegWriteA(client, LC898122_WC_DPON, 0x00); + + /* Interrupt Flag */ + RegWriteA(client, LC898122_WC_INTMSK, 0xFF); + } + + /* Ram Access */ + lc898122_RamAccFixMod(lc898122_dev, OFF); + + /* PWM Signal Generate */ + lc898122_driversw(lc898122_dev, OFF); + + RegWriteA(client, LC898122_DRVFC2, 0x90); + RegWriteA(client, LC898122_DRVSELX, 0xFF); + RegWriteA(client, LC898122_DRVSELY, 0xFF); + + + if (lc898122_dev->state.flags & LC898122_PWM_BREAK) { + if (lc898122_dev->state.UcCvrCod == LC898122_CVER122) + RegWriteA(client, LC898122_PWMFC, 0x2D); + else + RegWriteA(client, LC898122_PWMFC, 0x3D); + } else { + RegWriteA(client, LC898122_PWMFC, 0x21); + } + + if (lc898122_dev->state.flags & LC898122_USE_VH_SYNC) { + RegWriteA(client, LC898122_STROBEFC, 0x80); + RegWriteA(client, LC898122_STROBEDLYX, 0x00); + RegWriteA(client, LC898122_STROBEDLYY, 0x00); + } + + RegWriteA(client, LC898122_PWMA, 0x00); + RegWriteA(client, LC898122_PWMDLYX, 0x04); + RegWriteA(client, LC898122_PWMDLYY, 0x04); + + if (lc898122_dev->state.flags & LC898122_SET_DEFAULTS) { + RegWriteA(client, LC898122_DRVCH1SEL, 0x00); + RegWriteA(client, LC898122_DRVCH2SEL, 0x00); + + RegWriteA(client, LC898122_PWMDLYTIMX, 0x00); + RegWriteA(client, LC898122_PWMDLYTIMY, 0x00); + } + + if (lc898122_dev->state.UcCvrCod == LC898122_CVER122) { + RegWriteA(client, LC898122_PWMPERIODY, 0x00); + RegWriteA(client, LC898122_PWMPERIODY2, 0x00); + } else { + if (lc898122_dev->state.flags & LC898122_EXTCLK_PWM) { + RegWriteA(client, LC898122_PWMPERIODX, 0x84); + RegWriteA(client, LC898122_PWMPERIODX2, 0x00); + RegWriteA(client, LC898122_PWMPERIODY, 0x84); + RegWriteA(client, LC898122_PWMPERIODY2, 0x00); + } else { + RegWriteA(client, LC898122_PWMPERIODX, 0x00); + RegWriteA(client, LC898122_PWMPERIODX2, 0x00); + RegWriteA(client, LC898122_PWMPERIODY, 0x00); + RegWriteA(client, LC898122_PWMPERIODY2, 0x00); + } + } + + /* Linear PWM circuit setting */ + RegWriteA(client, LC898122_CVA, 0xC0); + + if (lc898122_dev->state.UcCvrCod == LC898122_CVER122) + RegWriteA(client, LC898122_CVFC, 0x22); + + RegWriteA(client, LC898122_CVFC2, 0x80); + if (lc898122_dev->state.UcCvrCod == LC898122_CVER122) { + RegWriteA(client, LC898122_CVSMTHX, 0x00); + RegWriteA(client, LC898122_CVSMTHY, 0x00); + } + + RegReadA(client, LC898122_STBB0, &UcStbb0); + UcStbb0 &= 0x80; + RegWriteA(client, LC898122_STBB0, UcStbb0); +} + +static void lc898122_init_gyro(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + struct gyro_cfg { + u32 GYRLMT1H; + u32 GYRLMT3_S1; + u32 GYRLMT3_S2; + u32 GYRLMT4_S1; + u32 GYRLMT4_S2; + u32 GYRA12_HGH; + u32 GYRA12_MID; + u32 GYRA34_HGH; + u32 GYRA34_MID; + u32 GYRB12_HGH; + u32 GYRB12_MID; + u32 GYRB34_HGH; + u32 GYRB34_MID; + }; + + const struct gyro_cfg gyro_13M = { + .GYRLMT1H = LC898122_GYRLMT1H_13M, + .GYRLMT3_S1 = LC898122_GYRLMT3_S1_13M, + .GYRLMT3_S2 = LC898122_GYRLMT3_S2_13M, + .GYRLMT4_S1 = LC898122_GYRLMT4_S1_13M, + .GYRLMT4_S2 = LC898122_GYRLMT4_S2_13M, + .GYRA12_HGH = LC898122_GYRA12_HGH_13M, + .GYRA12_MID = LC898122_GYRA12_MID_13M, + .GYRA34_HGH = LC898122_GYRA34_HGH_13M, + .GYRA34_MID = LC898122_GYRA34_MID_13M, + .GYRB12_HGH = LC898122_GYRB12_HGH_13M, + .GYRB12_MID = LC898122_GYRB12_MID_13M, + .GYRB34_HGH = LC898122_GYRB34_HGH_13M, + .GYRB34_MID = LC898122_GYRB34_MID_13M, + }; + + const struct gyro_cfg gyro_20M = { + .GYRLMT1H = LC898122_GYRLMT1H_20M, + .GYRLMT3_S1 = LC898122_GYRLMT3_S1_20M, + .GYRLMT3_S2 = LC898122_GYRLMT3_S2_20M, + .GYRLMT4_S1 = LC898122_GYRLMT4_S1_20M, + .GYRLMT4_S2 = LC898122_GYRLMT4_S2_20M, + .GYRA12_HGH = LC898122_GYRA12_HGH_20M, + .GYRA12_MID = LC898122_GYRA12_MID_20M, + .GYRA34_HGH = LC898122_GYRA34_HGH_20M, + .GYRA34_MID = LC898122_GYRA34_MID_20M, + .GYRB12_HGH = LC898122_GYRB12_HGH_20M, + .GYRB12_MID = LC898122_GYRB12_MID_20M, + .GYRB34_HGH = LC898122_GYRB34_HGH_20M, + .GYRB34_MID = LC898122_GYRB34_MID_20M, + }; + + const struct gyro_cfg *gyro; + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) + gyro = &gyro_20M; + else + gyro = &gyro_13M; + + /* Gyro Filter Setting */ + RegWriteA(client, LC898122_WG_EQSW, 0x03); + + /* Gyro Filter Down Sampling */ + RegWriteA(client, LC898122_WG_SHTON, 0x10); + + if (lc898122_dev->state.flags & LC898122_SET_DEFAULTS) { + RegWriteA(client, LC898122_WG_SHTDLYTMR, 0x00); + RegWriteA(client, LC898122_WG_GADSMP, 0x00); + RegWriteA(client, LC898122_WG_HCHR, 0x00); + RegWriteA(client, LC898122_WG_LMT3MOD, 0x00); + RegWriteA(client, LC898122_WG_VREFADD, 0x12); + } + + RegWriteA(client, LC898122_WG_SHTMOD, 0x06); + + /* Limiter */ + RamWrite32A(client, LC898122_gxlmt1H, gyro->GYRLMT1H); + RamWrite32A(client, LC898122_gylmt1H, gyro->GYRLMT1H); + + RamWrite32A(client, LC898122_gxlmt3HS0, gyro->GYRLMT3_S1); + RamWrite32A(client, LC898122_gylmt3HS0, gyro->GYRLMT3_S1); + RamWrite32A(client, LC898122_gxlmt3HS1, gyro->GYRLMT3_S2); + RamWrite32A(client, LC898122_gylmt3HS1, gyro->GYRLMT3_S2); + RamWrite32A(client, LC898122_gylmt4HS0, gyro->GYRLMT4_S1); + RamWrite32A(client, LC898122_gxlmt4HS0, gyro->GYRLMT4_S1); + RamWrite32A(client, LC898122_gxlmt4HS1, gyro->GYRLMT4_S2); + RamWrite32A(client, LC898122_gylmt4HS1, gyro->GYRLMT4_S2); + + /* Pan/Tilt parameter */ + RegWriteA(client, LC898122_WG_PANADDA, 0x12); + RegWriteA(client, LC898122_WG_PANADDB, 0x09); + + /* Threshold */ + RamWrite32A(client, LC898122_SttxHis, 0x00000000); + RamWrite32A(client, LC898122_SttxaL, 0x00000000); + RamWrite32A(client, LC898122_SttxbL, 0x00000000); + RamWrite32A(client, LC898122_Sttx12aM, gyro->GYRA12_MID); + RamWrite32A(client, LC898122_Sttx12aH, gyro->GYRA12_HGH); + RamWrite32A(client, LC898122_Sttx12bM, gyro->GYRB12_MID); + RamWrite32A(client, LC898122_Sttx12bH, gyro->GYRB12_HGH); + RamWrite32A(client, LC898122_Sttx34aM, gyro->GYRA34_MID); + RamWrite32A(client, LC898122_Sttx34aH, gyro->GYRA34_HGH); + RamWrite32A(client, LC898122_Sttx34bM, gyro->GYRB34_MID); + RamWrite32A(client, LC898122_Sttx34bH, gyro->GYRB34_HGH); + RamWrite32A(client, LC898122_SttyaL, 0x00000000); + RamWrite32A(client, LC898122_SttybL, 0x00000000); + RamWrite32A(client, LC898122_Stty12aM, gyro->GYRA12_MID); + RamWrite32A(client, LC898122_Stty12aH, gyro->GYRA12_HGH); + RamWrite32A(client, LC898122_Stty12bM, gyro->GYRB12_MID); + RamWrite32A(client, LC898122_Stty12bH, gyro->GYRB12_HGH); + RamWrite32A(client, LC898122_Stty34aM, gyro->GYRA34_MID); + RamWrite32A(client, LC898122_Stty34aH, gyro->GYRA34_HGH); + RamWrite32A(client, LC898122_Stty34bM, gyro->GYRB34_MID); + RamWrite32A(client, LC898122_Stty34bH, gyro->GYRB34_HGH); + /* Pan level */ + RegWriteA(client, LC898122_WG_PANLEVABS, 0x00); + /* Average parameter are set IniAdj */ + + /* Phase Transition Settings */ + RegWriteA(client, LC898122_WG_PANSTT21JUG0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT21JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT31JUG0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT31JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT41JUG0, 0x01); + RegWriteA(client, LC898122_WG_PANSTT41JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT12JUG0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT12JUG1, 0x07); + RegWriteA(client, LC898122_WG_PANSTT13JUG0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT13JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT23JUG0, 0x11); + RegWriteA(client, LC898122_WG_PANSTT23JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT43JUG0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT43JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT34JUG0, 0x01); + RegWriteA(client, LC898122_WG_PANSTT34JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT24JUG0, 0x00); + RegWriteA(client, LC898122_WG_PANSTT24JUG1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT42JUG0, 0x44); + RegWriteA(client, LC898122_WG_PANSTT42JUG1, 0x04); + + /* State Timer */ + RegWriteA(client, LC898122_WG_PANSTT1LEVTMR, 0x00); + RegWriteA(client, LC898122_WG_PANSTT2LEVTMR, 0x00); + RegWriteA(client, LC898122_WG_PANSTT3LEVTMR, 0x00); + RegWriteA(client, LC898122_WG_PANSTT4LEVTMR, 0x03); + + /* Control filter */ + RegWriteA(client, LC898122_WG_PANTRSON0, 0x11); + /* State Setting */ + lc898122_IniPtMovMod(lc898122_dev, OFF); + /* Hold */ + RegWriteA(client, LC898122_WG_PANSTTSETILHLD, 0x00); + /* State2,4 Step Time Setting */ + RegWriteA(client, LC898122_WG_PANSTT2TMR0, 0x01); + RegWriteA(client, LC898122_WG_PANSTT2TMR1, 0x00); + RegWriteA(client, LC898122_WG_PANSTT4TMR0, 0x02); + RegWriteA(client, LC898122_WG_PANSTT4TMR1, 0x07); + RegWriteA(client, LC898122_WG_PANSTTXXXTH, 0x00); + + /* NEW_PTST code not yet ported here */ + + if (lc898122_dev->state.flags & LC898122_GAIN_CONT) { + RamWrite32A(client, LC898122_gxlevlow, LC898122_TRI_LEVEL); + RamWrite32A(client, LC898122_gylevlow, LC898122_TRI_LEVEL); + RamWrite32A(client, LC898122_gxadjmin, LC898122_XMINGAIN); + RamWrite32A(client, LC898122_gxadjmax, LC898122_XMAXGAIN); + RamWrite32A(client, LC898122_gxadjdn, LC898122_XSTEPDN); + RamWrite32A(client, LC898122_gxadjup, LC898122_XSTEPUP); + RamWrite32A(client, LC898122_gyadjmin, LC898122_YMINGAIN); + RamWrite32A(client, LC898122_gyadjmax, LC898122_YMAXGAIN); + RamWrite32A(client, LC898122_gyadjdn, LC898122_YSTEPDN); + RamWrite32A(client, LC898122_gyadjup, LC898122_YSTEPUP); + + RegWriteA(client, LC898122_WG_LEVADD, (u8) LC898122_MONADR); + if (lc898122_dev->state.flags & LC898122_EXTCLK_ALL) + RegWriteA(client, LC898122_WG_LEVTMR, + LC898122_TIMEBSE_EXT); + else + RegWriteA(client, LC898122_WG_LEVTMR, + LC898122_TIMEBSE); + RegWriteA(client, LC898122_WG_LEVTMRLOW, LC898122_TIMELOW); + RegWriteA(client, LC898122_WG_LEVTMRHGH, LC898122_TIMEHGH); + RegWriteA(client, LC898122_WG_ADJGANADD, (u8) LC898122_GANADR); + RegWriteA(client, LC898122_WG_ADJGANGO, 0x00); + + /* exe function */ + lc898122_autogaincontrol(lc898122_dev, OFF); + } +} + +static void lc898122_init_filters(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + int index; + struct STFILREG *pFilReg; + struct STFILRAM *pFilRam; + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) { + pFilReg = (struct STFILREG *)CsFilReg_20M; + if (lc898122_dev->state.flags & LC898122_XY_SIMU_SET) + pFilRam = (struct STFILRAM *)CsFilRam_20M_simul_set; + else + pFilRam = (struct STFILRAM *)CsFilRam_20M; + } else { + pFilReg = (struct STFILREG *)CsFilReg_13M; + if (lc898122_dev->state.flags & LC898122_XY_SIMU_SET) + pFilRam = (struct STFILRAM *)CsFilRam_13M_simul_set; + else + pFilRam = (struct STFILRAM *)CsFilRam_13M; + } + + index = 0; + while (pFilReg[index].UsRegAdd != 0xFFFF) { + RegWriteA(client, + pFilReg[index].UsRegAdd, + pFilReg[index].UcRegDat); + index++; + } + if (lc898122_dev->state.flags & LC898122_XY_SIMU_SET) + RegWriteA(client, LC898122_WC_RAMACCXY, 0x01); + + index = 0; + while (pFilRam[index].UsRamAdd != 0xFFFF) { + RamWrite32A(client, + pFilRam[index].UsRamAdd, + pFilRam[index].UlRamDat); + index++; + } + + if (lc898122_dev->state.flags & LC898122_XY_SIMU_SET) + RegWriteA(client, LC898122_WC_RAMACCXY, 0x00); +} + +static void lc898122_init_adjust(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + u8 BIAS_CUR_OIS; + u8 AMP_GAIN_X; + u8 AMP_GAIN_Y; + + if (lc898122_dev->state.UcModule == LC898122_MODULE_20M) { + BIAS_CUR_OIS = LC898122_BIAS_CUR_OIS_20M; + AMP_GAIN_X = LC898122_AMP_GAIN_X_20M; + AMP_GAIN_Y = LC898122_AMP_GAIN_Y_20M; + } else { + BIAS_CUR_OIS = LC898122_BIAS_CUR_OIS_13M; + AMP_GAIN_X = LC898122_AMP_GAIN_X_13M; + AMP_GAIN_Y = LC898122_AMP_GAIN_Y_13M; + } + + RegWriteA(client, LC898122_WC_RAMACCXY, 0x00); + + lc898122_IniPtAve(lc898122_dev); + + /* OIS */ + RegWriteA(client, LC898122_CMSDAC0, BIAS_CUR_OIS); + RegWriteA(client, LC898122_OPGSEL0, AMP_GAIN_X); + RegWriteA(client, LC898122_OPGSEL1, AMP_GAIN_Y); + /* AF */ + RegWriteA(client, LC898122_CMSDAC1, LC898122_BIAS_CUR_AF); + RegWriteA(client, LC898122_OPGSEL2, LC898122_AMP_GAIN_AF); + + RegWriteA(client, LC898122_OSCSET, LC898122_OSC_INI); + + /* adjusted value */ + RegWriteA(client, LC898122_IZAH, LC898122_DGYRO_OFST_XH); + RegWriteA(client, LC898122_IZAL, LC898122_DGYRO_OFST_XL); + RegWriteA(client, LC898122_IZBH, LC898122_DGYRO_OFST_YH); + RegWriteA(client, LC898122_IZBL, LC898122_DGYRO_OFST_YL); + + /* Ram Access */ + lc898122_RamAccFixMod(lc898122_dev, ON); + + /* OIS adjusted parameter */ + RamWriteA(client, LC898122_DAXHLO, LC898122_DAHLXO_INI); + RamWriteA(client, LC898122_DAXHLB, LC898122_DAHLXB_INI); + RamWriteA(client, LC898122_DAYHLO, LC898122_DAHLYO_INI); + RamWriteA(client, LC898122_DAYHLB, LC898122_DAHLYB_INI); + RamWriteA(client, LC898122_OFF0Z, LC898122_HXOFF0Z_INI); + RamWriteA(client, LC898122_OFF1Z, LC898122_HYOFF1Z_INI); + RamWriteA(client, LC898122_sxg, LC898122_SXGAIN_INI); + RamWriteA(client, LC898122_syg, LC898122_SYGAIN_INI); + + /* AF adjusted parameter */ + RamWriteA(client, LC898122_DAZHLO, LC898122_DAHLZO_INI); + RamWriteA(client, LC898122_DAZHLB, LC898122_DAHLZB_INI); + + /* Ram Access */ + lc898122_RamAccFixMod(lc898122_dev, OFF); + + if (lc898122_dev->state.UcAfType == LC898122_BI_DIR) + lc898122_SetDOFSTDAF(lc898122_dev, LC898122_AFDROF_INI); + + RamWrite32A(client, LC898122_gxzoom, LC898122_GXGAIN_INI); + RamWrite32A(client, LC898122_gyzoom, LC898122_GYGAIN_INI); + + RamWrite32A(client, LC898122_sxq, LC898122_SXQ_INI); + RamWrite32A(client, LC898122_syq, LC898122_SYQ_INI); + + if (LC898122_GXHY_GYHX) { + RamWrite32A(client, LC898122_sxgx, 0x00000000); + RamWrite32A(client, LC898122_sxgy, 0x3F800000); + RamWrite32A(client, LC898122_sygy, 0x00000000); + RamWrite32A(client, LC898122_sygx, 0x3F800000); + } + + lc898122_SetZsp(lc898122_dev, 0); + + RegWriteA(client, LC898122_PWMA, 0xC0); + + RegWriteA(client, LC898122_STBB0, 0xDF); + RegWriteA(client, LC898122_WC_EQSW, 0x02); + RegWriteA(client, LC898122_WC_MESLOOP1, 0x02); + RegWriteA(client, LC898122_WC_MESLOOP0, 0x00); + RegWriteA(client, LC898122_WC_AMJLOOP1, 0x02); + RegWriteA(client, LC898122_WC_AMJLOOP0, 0x00); + + lc898122_SetPanTiltMode(lc898122_dev, OFF); + lc898122_SetGcf(lc898122_dev, 0); + + if (lc898122_dev->state.flags & LC898122_H1COEF_CHANGER) + lc898122_SetH1cMod(lc898122_dev, LC898122_ACTMODE); + + lc898122_driversw(lc898122_dev, ON); + RegWriteA(client, LC898122_WC_EQON, 0x01); +} + +void lc898122_selectmodule(struct lc898122_device *lc898122_dev, + u8 uc_module) +{ + switch (uc_module) { + case LC898122_MODULE_13M: + lc898122_dev->state.UcAfType = LC898122_UNI_DIR; + lc898122_dev->state.UcModule = LC898122_MODULE_13M; + break; + case LC898122_MODULE_20M: + lc898122_dev->state.UcAfType = LC898122_BI_DIR; + lc898122_dev->state.UcModule = LC898122_MODULE_20M; + break; + default: + lc898122_dev->state.UcAfType = LC898122_UNI_DIR; + lc898122_dev->state.UcModule = LC898122_MODULE_13M; + break; + } +} + +void lc898122_initsettings(struct lc898122_device *lc898122_dev) +{ + /* Clock Setting */ + lc898122_init_clock(lc898122_dev); + /* I/O Port Initial Setting */ + lc898122_init_iop(lc898122_dev); + /* DigitalGyro Initial Setting */ + lc898122_init_dgyro(lc898122_dev); + /* Monitor & Other Initial Setting */ + lc898122_init_monitor(lc898122_dev); + /* Servo Initial Setting */ + lc898122_init_servo(lc898122_dev); + /* Gyro Filter Initial Setting */ + lc898122_init_gyro(lc898122_dev); + /* Gyro Filter Initial Setting */ + lc898122_init_filters(lc898122_dev); + /* Adjust Fix Value Setting */ + lc898122_init_adjust(lc898122_dev); + +} + +void lc898122_initsettingsaf(struct lc898122_device *lc898122_dev) +{ + /* Clock Setting */ + lc898122_init_clock(lc898122_dev); + /* AF Initial Setting */ + lc898122_afinitialsetting(lc898122_dev); + +} + diff --git a/drivers/media/i2c/lc898122/lc898122-oisinit.h b/drivers/media/i2c/lc898122/lc898122-oisinit.h new file mode 100644 index 000000000000..d619fe0a6002 --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122-oisinit.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * Copyright (C) ON Semiconductor + * + */ + +#ifndef __LC898122_OISINIT_H__ +#define __LC898122_OISINIT_H__ + +#define LC898122_FW_VER 0x0003 + +#define LC898122_SET_DEFAULTS 0x000001 +#define LC898122_EXTCLK_ALL 0x000002 +#define LC898122_EXTCLK_PWM 0x000004 +#define LC898122_USE_3W_DGYRO 0x000008 +#define LC898122_USE_STMICRO_L3G4IS 0x000010 +#define LC898122_USE_INVENSENSE 0x000020 +#define LC898122_USE_PANASONIC 0x000040 +#define LC898122_PWM_BREAK 0x000080 +#define LC898122_USE_VH_SYNC 0x000100 +#define LC898122_GAIN_CONT 0x000200 +#define LC898122_H1COEF_CHANGER 0x000400 +#define LC898122_XY_SIMU_SET 0x000800 +#define LC898122_AF_PWMMODE 0x001000 +#define LC898122_GYROSTANDBY 0x002000 +#define LC898122_MONITOR_OFF 0x004000 +#define LC898122_NEUTRAL_CENTER 0x008000 +#define LC898122_ACTREG_15P0OHM 0x010000 +#define LC898122_ACTREG_9P2OHM 0x020000 +#define LC898122_ACTREG_6P5OHM 0x040000 +#define LC898122_ACCEPTANCE 0x080000 +#define LC898122_USE_SINLPF 0x100000 + + +#define LC898122_DEFCONFIG (LC898122_XY_SIMU_SET | LC898122_USE_INVENSENSE | \ + LC898122_GAIN_CONT | LC898122_SET_DEFAULTS | \ + LC898122_ACTREG_6P5OHM | LC898122_PWM_BREAK | \ + LC898122_H1COEF_CHANGER | LC898122_USE_SINLPF) +struct lc898122_device; + +struct lc898122_ois { + u8 UcAfType; + u8 UcModule; + u8 UcOscAdjFlg; + u8 UcPwmMod; + u8 UcCvrCod; + u32 flags; + u32 UlH1Coefval; + u8 UcH1LvlMod; + u16 UsCntXof; + u16 UsCntYof; +}; + +int lc898122_read_long(struct i2c_client *client, u16 reg, u32 *val); +int lc898122_write_long(struct i2c_client *client, u16 reg, u32 val); +int lc898122_read_word(struct i2c_client *client, u16 reg, u16 *val); +int lc898122_write_word(struct i2c_client *client, u16 reg, u16 val); +int lc898122_read_byte(struct i2c_client *client, u16 reg, u8 *val); +int lc898122_write_byte(struct i2c_client *client, u16 reg, u8 val); + +static inline int RamRead32A(struct i2c_client *client, u16 addr, u32 *data) +{ + return lc898122_read_long(client, addr, data); +} + +static inline int RamWrite32A(struct i2c_client *client, u16 addr, u32 data) +{ + return lc898122_write_long(client, addr, data); +} + +static inline int RamReadA(struct i2c_client *client, u16 addr, u16 *data) +{ + return lc898122_read_word(client, addr, data); +} + +static inline int RamWriteA(struct i2c_client *client, u16 addr, u16 data) +{ + return lc898122_write_word(client, addr, data); +} + +static inline int RegReadA(struct i2c_client *client, u16 addr, u8 *data) +{ + return lc898122_read_byte(client, addr, data); +} + +static inline int RegWriteA(struct i2c_client *client, u16 addr, u8 data) +{ + return lc898122_write_byte(client, addr, data); +} + +#define LC898122_HALL_ADJ 0 +#define LC898122_LOOPGAIN 1 +#define LC898122_THROUGH 2 +#define LC898122_NOISE 3 + +#define LC898122_INVENSENSE_FS_SEL 3 +#define LC898122_INVENSENSE_GYROX_INI 0x45 +#define LC898122_INVENSENSE_GYROY_INI 0x43 + +#define LC898122_STMICRO_GYROX_INI 0x43 +#define LC898122_STMICRO_GYROY_INI 0x45 + +#define LC898122_PANASONIC_GYROX_INI 0x7C +#define LC898122_PANASONIC_GYROY_INI 0x78 + +#define LC898122_ACT_CHK_LVL 0x3ecccccd +#define LC898122_ACT_THR 0x0400 +#define LC898122_GEA_DIF_HIG 0x0010 +#define LC898122_GEA_DIF_LOW 0x0001 + +/* OIS Calibration Parameter */ +#define LC898122_DAHLXO_INI 0x0000 +#define LC898122_DAHLXB_INI 0xC000 +#define LC898122_DAHLYO_INI 0x0000 +#define LC898122_DAHLYB_INI 0xC000 +#define LC898122_HXOFF0Z_INI 0x0000 +#define LC898122_HYOFF1Z_INI 0x0000 +#define LC898122_SXGAIN_INI 0x2000 +#define LC898122_SYGAIN_INI 0x2000 +#define LC898122_DGYRO_OFST_XH 0x00 +#define LC898122_DGYRO_OFST_XL 0x00 +#define LC898122_DGYRO_OFST_YH 0x00 +#define LC898122_DGYRO_OFST_YL 0x00 +#define LC898122_GXGAIN_INI 0x3F333333 +#define LC898122_GYGAIN_INI 0xBF333333 +#define LC898122_OSC_INI 0x2C + +/* Actuator Select */ +/* Hall parameter */ +#define LC898122_BIAS_CUR_OIS_20M 0x44 +#define LC898122_AMP_GAIN_X_20M 0x03 +#define LC898122_AMP_GAIN_Y_20M 0x03 +#define LC898122_BIAS_CUR_OIS_13M 0x33 +#define LC898122_AMP_GAIN_X_13M 0x05 +#define LC898122_AMP_GAIN_Y_13M 0x05 + +/* AF Open parameter */ +#define LC898122_RWEXD1_L_AF_20M 0x7FFF +#define LC898122_RWEXD2_L_AF_20M 0x113E +#define LC898122_RWEXD3_L_AF_20M 0x7211 +#define LC898122_FSTCTIME_AF_20M 0xA9 +#define LC898122_FSTMODE_AF_20M 0x00 +#define LC898122_RWEXD1_L_AF_13M 0x7FFF +#define LC898122_RWEXD2_L_AF_13M 0x4a02 +#define LC898122_RWEXD3_L_AF_13M 0x7d62 +#define LC898122_FSTCTIME_AF_13M 0xF9 +#define LC898122_FSTMODE_AF_13M 0x02 + +/* (0.3750114X^3+0.5937681X)*(0.3750114X^3+0.5937681X) 6.5ohm*/ +#define ACTREG_6P5OHM_A3_IEXP3 0x3EC0017F +#define ACTREG_6P5OHM_A1_IEXP1 0x3F180130 + +/* (0.3750114X^3+0.55X)*(0.3750114X^3+0.55X) 9.2ohm*/ +#define ACTREG_9P2OHM_A3_IEXP3 0x3EC0017F +#define ACTREG_9P2OHM_A1_IEXP1 0x3F0CCCCD + +/* (0.4531388X^3+0.4531388X)*(0.4531388X^3+0.4531388X) 15ohm*/ +#define ACTREG_15P0OHM_A3_IEXP3 0x3EE801CF +#define ACTREG_15P0OHM_A1_IEXP1 0x3EE801CF + +/* AF adjust parameter */ +#define LC898122_DAHLZB_INI 0x9000 +#define LC898122_DAHLZO_INI 0x0000 +#define LC898122_BIAS_CUR_AF 0x00 +#define LC898122_AMP_GAIN_AF 0x00 + +#define LC898122_AFDROF_INI 0x10 + +/*** Hall, Gyro Parameter Setting ***/ +/* Hall Parameter */ +#define LC898122_SXGAIN_LOP 0x3000 +#define LC898122_SYGAIN_LOP 0x3000 +#define LC898122_SXQ_INI 0x3F800000 +#define LC898122_SYQ_INI 0xBF800000 + +/* Gyro Parameter */ +#define LC898122_GXHY_GYHX 0 +#define LC898122_TCODEH_ADJ 0x0000 + +#define LC898122_GYRLMT1H_20M 0x3DCCCCC0 /* 0.1F */ +#define LC898122_GYRLMT3_S1_20M 0x3EE66666 /* 0.45F */ +#define LC898122_GYRLMT3_S2_20M 0x3EE66666 /* 0.45F */ + +#define LC898122_GYRLMT4_S1_20M 0x40400000 /* 3.0F */ +#define LC898122_GYRLMT4_S2_20M 0x40400000 /* 3.0F */ + +#define LC898122_GYRA12_HGH_20M 0x40000000 /* 2.00F */ +#define LC898122_GYRA12_MID_20M 0x3F800000 /* 1.0F */ +#define LC898122_GYRA34_HGH_20M 0x3F000000 /* 0.5F */ +#define LC898122_GYRA34_MID_20M 0x3DCCCCCD /* 0.1F */ + +#define LC898122_GYRB12_HGH_20M 0x3E4CCCCD /* 0.20F */ +#define LC898122_GYRB12_MID_20M 0x3CA3D70A /* 0.02F */ +#define LC898122_GYRB34_HGH_20M 0x3CA3D70A /* 0.02F */ +#define LC898122_GYRB34_MID_20M 0x3C23D70A /* 0.001F */ + +#define LC898122_GYRLMT1H_13M 0x3DCCCCC0 /* 0.1F */ +#define LC898122_GYRLMT3_S1_13M 0x3F0F5C29 /* 0.56F */ +#define LC898122_GYRLMT3_S2_13M 0x3F0F5C29 /* 0.56F */ + +#define LC898122_GYRLMT4_S1_13M 0x40333333 /* 2.8F */ +#define LC898122_GYRLMT4_S2_13M 0x40333333 /* 2.8F */ + +#define LC898122_GYRA12_HGH_13M 0x401CCCCD /* 2.45F */ +#define LC898122_GYRA12_MID_13M 0x3FB33333 /* 1.4F */ +#define LC898122_GYRA34_HGH_13M 0x3F000000 /* 0.5F */ +#define LC898122_GYRA34_MID_13M 0x3DCCCCCD /* 0.1F */ + +#define LC898122_GYRB12_HGH_13M 0x3E4CCCCD /* 0.20F */ +#define LC898122_GYRB12_MID_13M 0x3CA3D70A /* 0.02F */ +#define LC898122_GYRB34_HGH_13M 0x3CA3D70A /* 0.02F */ +#define LC898122_GYRB34_MID_13M 0x3C23D70A /* 0.01F */ + +/* Command Status */ +#define LC898122_EXE_END 0x02 /* Execute End (Adjust OK) */ +#define LC898122_EXE_HXADJ 0x06 /* Adjust NG : X Hall NG (Gain or Offset) */ +#define LC898122_EXE_HYADJ 0x0A /* Adjust NG : Y Hall NG (Gain or Offset) */ +#define LC898122_EXE_LXADJ 0x12 /* Adjust NG : X Loop NG (Gain) */ +#define LC898122_EXE_LYADJ 0x22 /* Adjust NG : Y Loop NG (Gain) */ +#define LC898122_EXE_GXADJ 0x42 /* Adjust NG : X Gyro NG (offset) */ +#define LC898122_EXE_GYADJ 0x82 /* Adjust NG : Y Gyro NG (offset) */ +#define LC898122_EXE_OCADJ 0x402 /* Adjust NG : OSC Clock NG */ +#define LC898122_EXE_AFOFF 0x802 /* Adjust NG : AF Offset */ +#define LC898122_EXE_ERR 0x99 /* Execute Error End */ + +#define LC898122_EXE_HXMVER 0x06 /* X Err */ +#define LC898122_EXE_HYMVER 0x0A /* Y Err */ + +/* Gyro Examination of Acceptance */ +#define LC898122_EXE_GXABOVE 0x06 /* X Above */ +#define LC898122_EXE_GXBELOW 0x0A /* X Below */ +#define LC898122_EXE_GYABOVE 0x12 /* Y Above */ +#define LC898122_EXE_GYBELOW 0x22 /* Y Below */ + + +#define LC898122_SUCCESS 0x00 +#define LC898122_FAILURE 0x01 + +#ifndef ON +#define ON 0x01 +#define OFF 0x00 +#endif +#define SPC 0x02 + +#define LC898122_X_DIR 0x00 +#define LC898122_Y_DIR 0x01 +#define LC898122_X2_DIR 0x10 +#define LC898122_Y2_DIR 0x11 + +/* Standby mode */ +#define LC898122_STB1_ON 0x00 +#define LC898122_STB1_OFF 0x01 +#define LC898122_STB2_ON 0x02 +#define LC898122_STB2_OFF 0x03 +#define LC898122_STB3_ON 0x04 +#define LC898122_STB3_OFF 0x05 +#define LC898122_STB4_ON 0x06 +#define LC898122_STB4_OFF 0x07 +#define LC898122_STB2_OISON 0x08 +#define LC898122_STB2_OISOFF 0x09 +#define LC898122_STB2_AFON 0x0A +#define LC898122_STB2_AFOFF 0x0B + +/* Optical Center & Gyro Gain for Mode */ +#define LC898122_VAL_SET 0x00 +#define LC898122_VAL_FIX 0x01 +#define LC898122_VAL_SPC 0x02 + +struct STFILREG { + u16 UsRegAdd; + u8 UcRegDat; +}; + +struct STFILRAM { + u16 UsRamAdd; + u32 UlRamDat; +}; + +#define LC898122_MEASSTR 0x01 +#define LC898122_MEASCNT 0x08 +#define LC898122_MEASFIX 0x80 + +#define LC898122_PWMMOD_CVL 0x00 +#define LC898122_PWMMOD_PWM 0x01 + +#define LC898122_INIT_PWMMODE LC898122_PWMMOD_CVL + +#define LC898122_CVER122 0x93 + +#define LC898122_MODULE_13M 0x01 +#define LC898122_MODULE_20M 0x02 + +#define LC898122_UNI_DIR 0x01 +#define LC898122_BI_DIR 0x02 + +#define LC898122_CLR_FRAM0 0x01 +#define LC898122_CLR_FRAM1 0x02 +#define LC898122_CLR_ALL_RAM 0x03 + +#define LC898122_DIFIL_S2_20M 0x3F7FFD00 +#define LC898122_DIFIL_S2_13M 0x3F7FFE00 + +#define LC898122_SINEWAVE 0 +#define LC898122_XHALWAVE 1 +#define LC898122_YHALWAVE 2 +#define LC898122_XACTTEST 10 +#define LC898122_YACTTEST 10 +#define LC898122_CIRCWAVE 255 + +#define LC898122_Mlnp 0 +#define LC898122_Mpwm 1 + +#define LC898122_S2MODE 0x40 +#define LC898122_ACTMODE 0x80 +#define LC898122_MOVMODE 0xFF + +#define LC898122_HALL_H_VAL 0x3F800000 +#define LC898122_PTP_BEFORE 0 +#define LC898122_PTP_AFTER 1 + +void lc898122_initsettingsaf(struct lc898122_device *lc898122_dev); +void lc898122_initsettings(struct lc898122_device *lc898122_dev); +void lc898122_selectmodule(struct lc898122_device *lc898122_dev, + u8 uc_module); +void lc898122_settregaf(struct lc898122_device *lc898122_dev, + u16 UsTregAf); +void lc898122_autogaincontrol(struct lc898122_device *lc898122_dev, + u8 UcModeSw); +void lc898122_RamAccFixMod(struct lc898122_device *lc898122_dev, + u8 UcAccMod); +void lc898122_cleargyro(struct lc898122_device *lc898122_dev, + u16 UsClrFil, u8 UcClrMod); +void lc898122_SetDOFSTDAF(struct lc898122_device *lc898122_dev, + u8 ucSetDat); + +u8 lc898122_RtnCen(struct lc898122_device *lc898122_dev, + u8 UcCmdPar); +void lc898122_RemOff(struct lc898122_device *lc898122_dev, + u8 UcMod); +void lc898122_BsyWit(struct lc898122_device *lc898122_dev, + u16 UsTrgAdr, u8 UcTrgDat); +void lc898122_OisEna(struct lc898122_device *lc898122_dev); + +void lc898122_SetGcf(struct lc898122_device *lc898122_dev, + u8 UcSetNum); +void lc898122_SetH1cMod(struct lc898122_device *lc898122_dev, + u8 UcSetNum); +void lc898122_SetZsp(struct lc898122_device *lc898122_dev, + u8 UcZoomStepDat); +void lc898122_SetPanTiltMode(struct lc898122_device *lc898122_dev, + u8 UcPnTmod); +void lc898122_IniPtMovMod(struct lc898122_device *lc898122_dev, u8 UcPtMod); +void lc898122_driversw(struct lc898122_device *lc898122_dev, u8 UcDrvSw); +void lc898122_afdriversw(struct lc898122_device *lc898122_dev, u8 UcDrvSw); +void lc898122_selectgyrosleep(struct lc898122_device *lc898122_dev, + u8 UcSelMode); +void lc898122_GyOutSignalCont(struct lc898122_device *lc898122_dev); +void lc898122_SrvCon(struct lc898122_device *lc898122_dev, u8 UcDirSel, + u8 UcSwcCon); + +#endif diff --git a/drivers/media/i2c/lc898122/lc898122-regs.h b/drivers/media/i2c/lc898122/lc898122-regs.h new file mode 100644 index 000000000000..74caa3845209 --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122-regs.h @@ -0,0 +1,1263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation + * Copyright (C) ON Semiconductor + * + */ + +#define LC898122_DRVFC 0x0001 +#define LC898122_DRVFC2 0x0002 +#define LC898122_DRVSELX 0x0003 +#define LC898122_DRVSELY 0x0004 +#define LC898122_DRVCH1SEL 0x0005 +#define LC898122_DRVCH2SEL 0x0006 +#define LC898122_PWMA 0x0010 +#define LC898122_PWMFC 0x0011 +#define LC898122_PWMDLYX 0x0012 +#define LC898122_PWMDLYY 0x0013 +#define LC898122_PWMDLYTIMX 0x0014 +#define LC898122_PWMDLYTIMY 0x0015 +#define LC898122_PWMFC2 0x0016 +#define LC898122_PWMPERIODX 0x0018 +#define LC898122_PWMPERIODX2 0x0019 +#define LC898122_PWMPERIODY 0x001A +#define LC898122_PWMPERIODY2 0x001B +#define LC898122_STROBEFC 0x001C +#define LC898122_STROBEDLYX 0x001D +#define LC898122_STROBEDLYY 0x001E +#define LC898122_CVA 0x0020 +#define LC898122_CVFC 0x0021 +#define LC898122_CVFC2 0x0022 +#define LC898122_CVSMTHX 0x0023 +#define LC898122_CVSMTHY 0x0024 +#define LC898122_PWMMONA 0x0030 +#define LC898122_PWMMONFC 0x0031 +#define LC898122_DACMONFC 0x0032 +#define LC898122_DACSLVADD 0x0040 +#define LC898122_DACMSTCODE 0x0041 +#define LC898122_DACFSCKRATE 0x0042 +#define LC898122_DACHSCKRATE 0x0043 +#define LC898122_DACI2CFC 0x0044 +#define LC898122_DACI2CA 0x0045 +#define LC898122_DRVFCAF 0x0081 +#define LC898122_DRVFC2AF 0x0082 +#define LC898122_DRVFC3AF 0x0083 +#define LC898122_DRVFC4AF 0x0084 +#define LC898122_DRVCH3SEL 0x0085 +#define LC898122_AFFC 0x0088 +#define LC898122_PWMAAF 0x0090 +#define LC898122_PWMFCAF 0x0091 +#define LC898122_PWMDLYAF 0x0092 +#define LC898122_PWMDLYTIMAF 0x0093 +#define LC898122_PWMPERIODAF 0x0099 +#define LC898122_CCAAF 0x00A0 +#define LC898122_CCFCAF 0x00A1 + +/* Filter Register */ + +#define LC898122_WC_EQON 0x0101 +#define LC898122_WC_RAMINITON 0x0102 +#define LC898122_WC_CPUOPEON 0x0103 +#define LC898122_WC_VMON 0x0104 +#define LC898122_WC_DPON 0x0105 +#define LC898122_WG_SHTON 0x0107 +#define LC898122_WG_ADJGANGO 0x0108 +#define LC898122_WG_PANON 0x0109 +#define LC898122_WG_PANSTT6 0x010A +#define LC898122_WG_NPANSTFRC 0x010B +#define LC898122_WG_CNTPICGO 0x010C +#define LC898122_WG_NPANINION 0x010D +#define LC898122_WG_NPANSTOFF 0x010E +#define LC898122_WG_EQSW 0x0110 +#define LC898122_WG_DWNSMP1 0x0111 +#define LC898122_WG_DWNSMP2 0x0112 +#define LC898122_WG_DWNSMP3 0x0113 +#define LC898122_WG_DWNSMP4 0x0114 +#define LC898122_WG_SHTMOD 0x0116 +#define LC898122_WG_SHTDLYTMR 0x0117 +#define LC898122_WG_LMT3MOD 0x0118 +#define LC898122_WG_VREFADD 0x0119 +#define LC898122_WG_HCHR 0x011B +#define LC898122_WG_GADSMP 0x011C +#define LC898122_WG_LEVADD 0x0120 +#define LC898122_WG_LEVTMRLOW 0x0121 +#define LC898122_WG_LEVTMRHGH 0x0122 +#define LC898122_WG_LEVTMR 0x0123 +#define LC898122_WG_ADJGANADD 0x0128 +#define LC898122_WG_ADJGANGXATO 0x0129 +#define LC898122_WG_ADJGANGYATO 0x012A +#define LC898122_WG_PANADDA 0x0130 +#define LC898122_WG_PANADDB 0x0131 +#define LC898122_WG_PANTRSON0 0x0132 +#define LC898122_WG_PANLEVABS 0x0133 +#define LC898122_WG_PANSTT1DWNSMP0 0x0134 +#define LC898122_WG_PANSTT1DWNSMP1 0x0135 +#define LC898122_WG_PANSTT2DWNSMP0 0x0136 +#define LC898122_WG_PANSTT2DWNSMP1 0x0137 +#define LC898122_WG_PANSTT3DWNSMP0 0x0138 +#define LC898122_WG_PANSTT3DWNSMP1 0x0139 +#define LC898122_WG_PANSTT4DWNSMP0 0x013A +#define LC898122_WG_PANSTT4DWNSMP1 0x013B +#define LC898122_WG_PANSTT2TMR0 0x013C +#define LC898122_WG_PANSTT2TMR1 0x013D +#define LC898122_WG_PANSTT4TMR0 0x013E +#define LC898122_WG_PANSTT4TMR1 0x013F +#define LC898122_WG_PANSTT21JUG0 0x0140 +#define LC898122_WG_PANSTT21JUG1 0x0141 +#define LC898122_WG_PANSTT31JUG0 0x0142 +#define LC898122_WG_PANSTT31JUG1 0x0143 +#define LC898122_WG_PANSTT41JUG0 0x0144 +#define LC898122_WG_PANSTT41JUG1 0x0145 +#define LC898122_WG_PANSTT12JUG0 0x0146 +#define LC898122_WG_PANSTT12JUG1 0x0147 +#define LC898122_WG_PANSTT13JUG0 0x0148 +#define LC898122_WG_PANSTT13JUG1 0x0149 +#define LC898122_WG_PANSTT23JUG0 0x014A +#define LC898122_WG_PANSTT23JUG1 0x014B +#define LC898122_WG_PANSTT43JUG0 0x014C +#define LC898122_WG_PANSTT43JUG1 0x014D +#define LC898122_WG_PANSTT34JUG0 0x014E +#define LC898122_WG_PANSTT34JUG1 0x014F +#define LC898122_WG_PANSTT24JUG0 0x0150 +#define LC898122_WG_PANSTT24JUG1 0x0151 +#define LC898122_WG_PANSTT42JUG0 0x0152 +#define LC898122_WG_PANSTT42JUG1 0x0153 +#define LC898122_WG_PANSTTSETGYRO 0x0154 +#define LC898122_WG_PANSTTSETGAIN 0x0155 +#define LC898122_WG_PANSTTSETISTP 0x0156 +#define LC898122_WG_PANSTTSETIFTR 0x0157 +#define LC898122_WG_PANSTTSETLFTR 0x0158 +#define LC898122_WG_PANSTTXXXTH 0x015A +#define LC898122_WG_PANSTT1LEVTMR 0x015B +#define LC898122_WG_PANSTT2LEVTMR 0x015C +#define LC898122_WG_PANSTT3LEVTMR 0x015D +#define LC898122_WG_PANSTT4LEVTMR 0x015E +#define LC898122_WG_PANSTTSETILHLD 0x015F +#define LC898122_WG_STT3MOD 0x0160 +#define LC898122_WG_STILMOD 0x0161 +#define LC898122_WG_PLAYON 0x0162 +#define LC898122_WG_NPANJ2DWNSMP 0x0163 +#define LC898122_WG_NPANTST0 0x0164 +#define LC898122_WG_NPANDWNSMP 0x0165 +#define LC898122_WG_NPANST3RTMR 0x0166 +#define LC898122_WG_NPANST12BTMR 0x0167 +#define LC898122_WG_NPANST12TMRX 0x0168 +#define LC898122_WG_NPANST12TMRY 0x0169 +#define LC898122_WG_NPANST3TMRX 0x016A +#define LC898122_WG_NPANST3TMRY 0x016B +#define LC898122_WG_NPANST4TMRX 0x016C +#define LC898122_WG_NPANST4TMRY 0x016D +#define LC898122_WG_NPANFUN 0x016E +#define LC898122_WG_NPANINITMR 0x016F +#define LC898122_WH_EQSWX 0x0170 +#define LC898122_WH_EQSWY 0x0171 +#define LC898122_EQSINSW 0x3C +#define LC898122_WH_DWNSMP1 0x0172 +#define LC898122_WH_G2SDLY 0x0173 +#define LC898122_WH_HOFCON 0x0174 +#define LC898122_WH_EMGSTPON 0x0178 +#define LC898122_WH_EMGSTPTMR 0x017A +#define LC898122_WH_SMTSRVON 0x017C +#define LC898122_WH_SMTSRVSMP 0x017D +#define LC898122_WH_SMTTMR 0x017E +#define LC898122_WC_SINON 0x0180 +#define LC898122_WC_SINFRQ0 0x0181 +#define LC898122_WC_SINFRQ1 0x0182 +#define LC898122_WC_SINPHSX 0x0183 +#define LC898122_WC_SINPHSY 0x0184 +#define LC898122_WC_ADMODE 0x0188 +#define LC898122_WC_CPUOPE1ADD 0x018A +#define LC898122_WC_CPUOPE2ADD 0x018B +#define LC898122_WC_RAMACCMOD 0x018C +#define LC898122_WC_RAMACCXY 0x018D +#define LC898122_WC_RAMDLYMOD0 0x018E +#define LC898122_WC_RAMDLYMOD1 0x018F +#define LC898122_WC_MESMODE 0x0190 +#define LC898122_WC_MESSINMODE 0x0191 +#define LC898122_WC_MESLOOP0 0x0192 +#define LC898122_WC_MESLOOP1 0x0193 +#define LC898122_WC_MES1ADD0 0x0194 +#define LC898122_WC_MES1ADD1 0x0195 +#define LC898122_WC_MES2ADD0 0x0196 +#define LC898122_WC_MES2ADD1 0x0197 +#define LC898122_WC_MESABS 0x0198 +#define LC898122_WC_MESWAIT 0x0199 +#define LC898122_RC_MESST 0x019D +#define LC898122_RC_MESLOOP0 0x019E +#define LC898122_RC_MESLOOP1 0x019F +#define LC898122_WC_AMJMODE 0x01A0 +#define LC898122_WC_AMJDF 0x01A1 +#define LC898122_WC_AMJLOOP0 0x01A2 +#define LC898122_WC_AMJLOOP1 0x01A3 +#define LC898122_WC_AMJIDL0 0x01A4 +#define LC898122_WC_AMJIDL1 0x01A5 +#define LC898122_WC_AMJ1ADD0 0x01A6 +#define LC898122_WC_AMJ1ADD1 0x01A7 +#define LC898122_WC_AMJ2ADD0 0x01A8 +#define LC898122_WC_AMJ2ADD1 0x01A9 +#define LC898122_RC_AMJST 0x01AC +#define LC898122_RC_AMJERROR 0x01AD +#define LC898122_RC_AMJLOOP0 0x01AE +#define LC898122_RC_AMJLOOP1 0x01AF +#define LC898122_WC_DPI1ADD0 0x01B0 +#define LC898122_WC_DPI1ADD1 0x01B1 +#define LC898122_WC_DPI2ADD0 0x01B2 +#define LC898122_WC_DPI2ADD1 0x01B3 +#define LC898122_WC_DPI3ADD0 0x01B4 +#define LC898122_WC_DPI3ADD1 0x01B5 +#define LC898122_WC_DPI4ADD0 0x01B6 +#define LC898122_WC_DPI4ADD1 0x01B7 +#define LC898122_WC_DPO1ADD0 0x01B8 +#define LC898122_WC_DPO1ADD1 0x01B9 +#define LC898122_WC_DPO2ADD0 0x01BA +#define LC898122_WC_DPO2ADD1 0x01BB +#define LC898122_WC_DPO3ADD0 0x01BC +#define LC898122_WC_DPO3ADD1 0x01BD +#define LC898122_WC_DPO4ADD0 0x01BE +#define LC898122_WC_DPO4ADD1 0x01BF +#define LC898122_WC_PINMON1 0x01C0 +#define LC898122_WC_PINMON2 0x01C1 +#define LC898122_WC_PINMON3 0x01C2 +#define LC898122_WC_PINMON4 0x01C3 +#define LC898122_WC_DLYMON10 0x01C4 +#define LC898122_WC_DLYMON11 0x01C5 +#define LC898122_WC_DLYMON20 0x01C6 +#define LC898122_WC_DLYMON21 0x01C7 +#define LC898122_WC_DLYMON30 0x01C8 +#define LC898122_WC_DLYMON31 0x01C9 +#define LC898122_WC_DLYMON40 0x01CA +#define LC898122_WC_DLYMON41 0x01CB +#define LC898122_WC_INTMSK 0x01CE +#define LC898122_WC_FRCAD 0x01D0 +#define LC898122_WC_FRCADEN 0x01D1 +#define LC898122_WC_ADRES 0x01D2 +#define LC898122_WC_TSTMON 0x01D3 +#define LC898122_WC_RAMACCTM0 0x01D4 +#define LC898122_WC_RAMACCTM1 0x01D5 +#define LC898122_WC_EQSW 0x01E0 +#define LC898122_WC_STPMV 0x01E1 +#define LC898122_WC_STPMVMOD 0x01E2 +#define LC898122_WC_DWNSMP1 0x01E3 +#define LC898122_WC_DWNSMP2 0x01E4 +#define LC898122_WC_DWNSMP3 0x01E5 +#define LC898122_WC_LEVTMP 0x01E6 +#define LC898122_WC_DIFTMP 0x01E7 +#define LC898122_WC_L10 0x01E8 +#define LC898122_WC_L11 0x01E9 +#define LC898122_RG_XPANFIL 0x01F0 +#define LC898122_RG_YPANFIL 0x01F1 +#define LC898122_RG_XPANRAW 0x01F2 +#define LC898122_RG_YPANRAW 0x01F3 +#define LC898122_RG_LEVJUGE 0x01F4 +#define LC898122_RG_NXPANST 0x01F5 +#define LC898122_RC_RAMACC 0x01F6 +#define LC898122_RH_EMLEV 0x01F7 +#define LC898122_RH_SMTSRVSTT 0x01F8 +#define LC898122_RC_CNTPIC 0x01F9 +#define LC898122_RC_LEVDIF 0x01FA +#define LC898122_RC_FLG0 0x01FE +#define LC898122_RC_INT 0x01FF + + +/* System Register */ + +#define LC898122_CLKTST 0x020A +#define LC898122_CLKON 0x020B +#define LC898122_CLKSEL 0x020C +#define LC898122_PWMDIV 0x0210 +#define LC898122_SRVDIV 0x0211 +#define LC898122_GIFDIV 0x0212 +#define LC898122_AFPWMDIV 0x0213 +#define LC898122_OPAFDIV 0x0214 +#define LC898122_P0LEV 0x0220 +#define LC898122_P0DIR 0x0221 +#define LC898122_P0PON 0x0222 +#define LC898122_P0PUD 0x0223 +#define LC898122_IOP0SEL 0x0230 +#define LC898122_IOP1SEL 0x0231 +#define LC898122_IOP2SEL 0x0232 +#define LC898122_IOP3SEL 0x0233 +#define LC898122_IOP4SEL 0x0234 +#define LC898122_IOP5SEL 0x0235 +#define LC898122_DGINSEL 0x0236 +#define LC898122_IOP_CNT 0x0238 +#define LC898122_OUT56MON 0x0239 +#define LC898122_BSYSEL 0x0240 +#define LC898122_I2CSEL 0x0248 +#define LC898122_DLMODE 0x0249 +#define LC898122_TSTREG0 0x024E +#define LC898122_TSTREG1 0x024F +#define LC898122_STBB0 0x0250 +#define LC898122_CMSDAC0 0x0251 +#define LC898122_CMSDAC1 0x0252 +#define LC898122_OPGSEL0 0x0253 +#define LC898122_OPGSEL1 0x0254 +#define LC898122_OPGSEL2 0x0255 +#define LC898122_OSCSTOP 0x0256 +#define LC898122_OSCSET 0x0257 +#define LC898122_OSCCNTEN 0x0258 +#define LC898122_LDO_C_SET 0x0259 +#define LC898122_VGA_SW0 0x025A +#define LC898122_VGA_SW1 0x025B +#define LC898122_RSTRLSCNTL 0x025C +#define LC898122_RSTRLSCNTH 0x025D +#define LC898122_OSCCK_CNTR0 0x025E +#define LC898122_OSCCK_CNTR1 0x025F +#define LC898122_EXTCNTEN 0x0260 +#define LC898122_EXTCLKLOW 0x0261 +#define LC898122_ADCTEST 0x0262 +#define LC898122_LDSTB 0x0263 +#define LC898122_STBB1 0x0264 +#define LC898122_OSCMAMODE 0x0265 +#define LC898122_OSCTCNTL 0x0266 +#define LC898122_OSCTCNTH 0x0267 +#define LC898122_MONSELA 0x0270 +#define LC898122_MONSELB 0x0271 +#define LC898122_MONSELC 0x0272 +#define LC898122_MONSELD 0x0273 +#define LC898122_CmMonTst 0x0274 +#define LC898122_SOFTRES1 0x0278 +#define LC898122_SOFTRES2 0x0279 +#define LC898122_CVER 0x027E +#define LC898122_TESTRD 0x027F + + +/* Digital Gyro I/F Register */ +#define LC898122_GRSEL 0x0280 +#define LC898122_GRINI 0x0281 +#define LC898122_SLOWMODE 0x04 +#define LC898122_GRACC 0x0282 +#define LC898122_GRADR0 0x0283 +#define LC898122_GRADR1 0x0284 +#define LC898122_GRADR2 0x0285 +#define LC898122_GRADR3 0x0286 +#define LC898122_GRADR4 0x0287 +#define LC898122_GRADR5 0x0288 +#define LC898122_GRADR6 0x0289 +#define LC898122_GSETDT 0x028A +#define LC898122_RDSEL 0x028B +#define LC898122_REVB7 0x028C +#define LC898122_LSBF 0x028D +#define LC898122_PANAM 0x028E +#define LC898122_SPIM 0x028F +#define LC898122_GRDAT0H 0x0290 +#define LC898122_GRDAT0L 0x0291 +#define LC898122_GRDAT1H 0x0292 +#define LC898122_GRDAT1L 0x0293 +#define LC898122_GRDAT2H 0x0294 +#define LC898122_GRDAT2L 0x0295 +#define LC898122_GRDAT3H 0x0296 +#define LC898122_GRDAT3L 0x0297 +#define LC898122_GRDAT4H 0x0298 +#define LC898122_GRDAT4L 0x0299 +#define LC898122_GRDAT5H 0x029A +#define LC898122_GRDAT5L 0x029B +#define LC898122_GRDAT6H 0x029C +#define LC898122_GRDAT6L 0x029D +#define LC898122_IZAH 0x02A0 +#define LC898122_IZAL 0x02A1 +#define LC898122_IZBH 0x02A2 +#define LC898122_IZBL 0x02A3 +#define LC898122_GRFLG0 0x02B8 +#define LC898122_GRFLG1 0x02B9 +#define LC898122_DGSTAT0 0x02C1 +#define LC898122_DGSTAT1 0x02C2 +/* USE TEST REG */ +#define LC898122_VRREG 0x02D0 + +/* Open AF Register */ + +#define LC898122_FSTMODE 0x0302 +#define LC898122_FSTCTIME 0x0303 +#define LC898122_TCODEH 0x0304 +#define LC898122_TCODEL 0x0305 +#define LC898122_LTHDH 0x0306 +#define LC898122_LTHDL 0x0307 +#define LC898122_FSTOPTION 0x0310 +#define LC898122_OPAFEN 0x0320 +#define LC898122_OPAFSW 0x0330 +#define LC898122_OPAFST 0x0335 + +#define LC898122_TREG_H 0x0380 +#define LC898122_TREG_L 0x0381 +/* 2Byte access */ +#define LC898122_RWEXD1_L 0x0396 +/* 2Byte access */ +#define LC898122_RWEXD2_L 0x0398 +/* 2Byte access */ +#define LC898122_RWEXD3_L 0x039A + +/* FILTER COEFFICIENT RAM */ +#define LC898122_gx45g 0x1000 +#define LC898122_gx45x 0x1001 +#define LC898122_gx45y 0x1002 +#define LC898122_gxgyro 0x1003 +#define LC898122_gxia 0x1004 +#define LC898122_gxib 0x1005 +#define LC898122_gxic 0x1006 +#define LC898122_gxggain 0x1007 +#define LC898122_gxigain 0x1008 +#define LC898122_gxggain2 0x1009 +#define LC898122_gx2x4xf 0x100A +#define LC898122_gxadj 0x100B +#define LC898122_gxgain 0x100C +#define LC898122_gxl3 0x100D +#define LC898122_gxhc_tmp 0x100E +#define LC898122_npxlev1 0x100F +#define LC898122_gxh1a 0x1010 +#define LC898122_gxh1b 0x1011 +#define LC898122_gxh1c 0x1012 +#define LC898122_gxh2a 0x1013 +#define LC898122_gxh2b 0x1014 +#define LC898122_gxh2c 0x1015 +#define LC898122_gxh3a 0x1016 +#define LC898122_gxh3b 0x1017 +#define LC898122_gxh3c 0x1018 +#define LC898122_gxla 0x1019 +#define LC898122_gxlb 0x101A +#define LC898122_gxlc 0x101B +#define LC898122_gxhgain 0x101C +#define LC898122_gxlgain 0x101D +#define LC898122_gxigainstp 0x101E +#define LC898122_npxlev2 0x101F +#define LC898122_gxzoom 0x1020 +#define LC898122_gx2x4xb 0x1021 +#define LC898122_gxlens 0x1022 +#define LC898122_gxta 0x1023 +#define LC898122_gxtb 0x1024 +#define LC898122_gxtc 0x1025 +#define LC898122_gxtd 0x1026 +#define LC898122_gxte 0x1027 +#define LC898122_gxlmt1H 0x1028 +#define LC898122_gxlmt3HS0 0x1029 +#define LC898122_gxlmt3HS1 0x102A +#define LC898122_gxlmt4HS0 0x102B +#define LC898122_gxlmt4HS1 0x102C +#define LC898122_gxlmt6L 0x102D +#define LC898122_gxlmt6H 0x102E +#define LC898122_npxlev3 0x102F +#define LC898122_gxj1a 0x1030 +#define LC898122_gxj1b 0x1031 +#define LC898122_gxj1c 0x1032 +#define LC898122_gxj2a 0x1033 +#define LC898122_gxj2b 0x1034 +#define LC898122_gxj2c 0x1035 +#define LC898122_gxk1a 0x1036 +#define LC898122_gxk1b 0x1037 +#define LC898122_gxk1c 0x1038 +#define LC898122_gxk2a 0x1039 +#define LC898122_gxk2b 0x103A +#define LC898122_gxk2c 0x103B +#define LC898122_gxoa 0x103C +#define LC898122_gxob 0x103D +#define LC898122_gxoc 0x103E +#define LC898122_npxlev4 0x103F +#define LC898122_MSABS1 0x1040 +#define LC898122_MSABS1AV 0x1041 +#define LC898122_MSPP1AV 0x1042 +#define LC898122_gxia_1 0x1043 +#define LC898122_gxib_1 0x1044 +#define LC898122_gxic_1 0x1045 +#define LC898122_gxia_a 0x1046 +#define LC898122_gxib_a 0x1047 +#define LC898122_gxic_a 0x1048 +#define LC898122_gxia_b 0x1049 +#define LC898122_gxib_b 0x104A +#define LC898122_gxic_b 0x104B +#define LC898122_gxia_c 0x104C +#define LC898122_gxib_c 0x104D +#define LC898122_gxic_c 0x104E +#define LC898122_Sttx12aM 0x104F +#define LC898122_MSMAX1 0x1050 +#define LC898122_MSMAX1AV 0x1051 +#define LC898122_MSCT1AV 0x1052 +#define LC898122_gxla_1 0x1053 +#define LC898122_gxlb_1 0x1054 +#define LC898122_gxlc_1 0x1055 +#define LC898122_gxla_a 0x1056 +#define LC898122_gxlb_a 0x1057 +#define LC898122_gxlc_a 0x1058 +#define LC898122_gxla_b 0x1059 +#define LC898122_gxlb_b 0x105A +#define LC898122_gxlc_b 0x105B +#define LC898122_gxla_c 0x105C +#define LC898122_gxlb_c 0x105D +#define LC898122_gxlc_c 0x105E +#define LC898122_Sttx12aH 0x105F +#define LC898122_MSMIN1 0x1060 +#define LC898122_MSMIN1AV 0x1061 +#define LC898122_MS1AV 0x1062 +#define LC898122_gxgyro_1 0x1063 +#define LC898122_gxgyro_1d 0x1064 +#define LC898122_gxgyro_1u 0x1065 +#define LC898122_gxgyro_a 0x1066 +#define LC898122_gxgyro_2d 0x1067 +#define LC898122_gxgyro_2u 0x1068 +#define LC898122_gxgyro_b 0x1069 +#define LC898122_gxgyro_3d 0x106A +#define LC898122_gxgyro_3u 0x106B +#define LC898122_gxgyro_c 0x106C +#define LC898122_gxgyro_4d 0x106D +#define LC898122_gxgyro_4u 0x106E +#define LC898122_Sttx12bM 0x106F +#define LC898122_HOStp 0x1070 +#define LC898122_HOMin 0x1071 +#define LC898122_HOMax 0x1072 +#define LC898122_gxgain_1 0x1073 +#define LC898122_gxgain_1d 0x1074 +#define LC898122_gxgain_1u 0x1075 +#define LC898122_gxgain_a 0x1076 +#define LC898122_gxgain_2d 0x1077 +#define LC898122_gxgain_2u 0x1078 +#define LC898122_gxgain_b 0x1079 +#define LC898122_gxgain_3d 0x107A +#define LC898122_gxgain_3u 0x107B +#define LC898122_gxgain_c 0x107C +#define LC898122_gxgain_4d 0x107D +#define LC898122_gxgain_4u 0x107E +#define LC898122_Sttx12bH 0x107F +#define LC898122_HBStp 0x1080 +#define LC898122_HBMin 0x1081 +#define LC898122_HBMax 0x1082 +#define LC898122_gxistp_1 0x1083 +#define LC898122_gxistp_1d 0x1084 +#define LC898122_gxistp_1u 0x1085 +#define LC898122_gxistp_a 0x1086 +#define LC898122_gxistp_2d 0x1087 +#define LC898122_gxistp_2u 0x1088 +#define LC898122_gxistp_b 0x1089 +#define LC898122_gxistp_3d 0x108A +#define LC898122_gxistp_3u 0x108B +#define LC898122_gxistp_c 0x108C +#define LC898122_gxistp_4d 0x108D +#define LC898122_gxistp_4u 0x108E +#define LC898122_Sttx34aM 0x108F +#define LC898122_LGStp 0x1090 +#define LC898122_LGMin 0x1091 +#define LC898122_LGMax 0x1092 +#define LC898122_gxistp 0x1093 +#define LC898122_gxadjmin 0x1094 +#define LC898122_gxadjmax 0x1095 +#define LC898122_gxadjdn 0x1096 +#define LC898122_gxadjup 0x1097 +#define LC898122_gxog3 0x1098 +#define LC898122_gxog5 0x1099 +#define LC898122_gxog7 0x109A +#define LC898122_npxlev8 0x109B +#define LC898122_sxlmtb1 0x109C +#define LC898122_SttxaL 0x109D +#define LC898122_SttxbL 0x109E +#define LC898122_Sttx34aH 0x109F +#define LC898122_sxlmtb2 0x10A0 +#define LC898122_pxmaa 0x10A1 +#define LC898122_pxmab 0x10A2 +#define LC898122_pxmac 0x10A3 +#define LC898122_pxmba 0x10A4 +#define LC898122_pxmbb 0x10A5 +#define LC898122_pxmbc 0x10A6 +#define LC898122_gxma 0x10A7 +#define LC898122_gxmb 0x10A8 +#define LC898122_gxmc 0x10A9 +#define LC898122_gxmg 0x10AA +#define LC898122_gxleva 0x10AB +#define LC898122_gxlevb 0x10AC +#define LC898122_gxlevc 0x10AD +#define LC898122_gxlevlow 0x10AE +#define LC898122_Sttx34bM 0x10AF +#define LC898122_sxria 0x10B0 +#define LC898122_sxrib 0x10B1 +#define LC898122_sxric 0x10B2 +#define LC898122_sxinx 0x10B3 +#define LC898122_sxiny 0x10B4 +#define LC898122_sxggf 0x10B5 +#define LC898122_sxag 0x10B6 +#define LC898122_sxpr 0x10B7 +#define LC898122_sxgx 0x10B8 +#define LC898122_sxgy 0x10B9 +#define LC898122_sxiexp3 0x10BA +#define LC898122_sxiexp2 0x10BB +#define LC898122_sxiexp1 0x10BC +#define LC898122_sxiexp0 0x10BD +#define LC898122_sxiexp 0x10BE +#define LC898122_Sttx34bH 0x10BF +#define LC898122_sxda 0x10C0 +#define LC898122_sxdb 0x10C1 +#define LC898122_sxdc 0x10C2 +#define LC898122_sxea 0x10C3 +#define LC898122_sxeb 0x10C4 +#define LC898122_sxec 0x10C5 +#define LC898122_sxua 0x10C6 +#define LC898122_sxub 0x10C7 +#define LC898122_sxuc 0x10C8 +#define LC898122_sxia 0x10C9 +#define LC898122_sxib 0x10CA +#define LC898122_sxic 0x10CB +#define LC898122_sxja 0x10CC +#define LC898122_sxjb 0x10CD +#define LC898122_sxjc 0x10CE +#define LC898122_npxlev1_i 0x10CF +#define LC898122_sxfa 0x10D0 +#define LC898122_sxfb 0x10D1 +#define LC898122_sxfc 0x10D2 +#define LC898122_sxg 0x10D3 +#define LC898122_sxg2 0x10D4 +#define LC898122_sxsin 0x10D5 +#define LC898122_sxggf_tmp 0x10D6 +#define LC898122_sxsa 0x10D7 +#define LC898122_sxsb 0x10D8 +#define LC898122_sxsc 0x10D9 +#define LC898122_sxoa 0x10DA +#define LC898122_sxob 0x10DB +#define LC898122_sxoc 0x10DC +#define LC898122_sxod 0x10DD +#define LC898122_sxoe 0x10DE +#define LC898122_npxlev2_i 0x10DF +#define LC898122_sxpa 0x10E0 +#define LC898122_sxpb 0x10E1 +#define LC898122_sxpc 0x10E2 +#define LC898122_sxpd 0x10E3 +#define LC898122_sxpe 0x10E4 +#define LC898122_sxq 0x10E5 +#define LC898122_sxlmta1 0x10E6 +#define LC898122_sxlmta2 0x10E7 +#define LC898122_smxga 0x10E8 +#define LC898122_smxgb 0x10E9 +#define LC898122_smxa 0x10EA +#define LC898122_smxb 0x10EB +#define LC898122_sxemglev 0x10EC +#define LC898122_sxsmtav 0x10ED +#define LC898122_sxsmtstp 0x10EE +#define LC898122_npxlev3_i 0x10EF +#define LC898122_mes1aa 0x10F0 +#define LC898122_mes1ab 0x10F1 +#define LC898122_mes1ac 0x10F2 +#define LC898122_mes1ad 0x10F3 +#define LC898122_mes1ae 0x10F4 +#define LC898122_mes1ba 0x10F5 +#define LC898122_mes1bb 0x10F6 +#define LC898122_mes1bc 0x10F7 +#define LC898122_mes1bd 0x10F8 +#define LC898122_mes1be 0x10F9 +#define LC898122_sxoexp3 0x10FA +#define LC898122_sxoexp2 0x10FB +#define LC898122_sxoexp1 0x10FC +#define LC898122_sxoexp0 0x10FD +#define LC898122_sxoexp 0x10FE +#define LC898122_npxlev4_i 0x10FF +#define LC898122_gy45g 0x1100 +#define LC898122_gy45y 0x1101 +#define LC898122_gy45x 0x1102 +#define LC898122_gygyro 0x1103 +#define LC898122_gyia 0x1104 +#define LC898122_gyib 0x1105 +#define LC898122_gyic 0x1106 +#define LC898122_gyggain 0x1107 +#define LC898122_gyigain 0x1108 +#define LC898122_gyggain2 0x1109 +#define LC898122_gy2x4xf 0x110A +#define LC898122_gyadj 0x110B +#define LC898122_gygain 0x110C +#define LC898122_gyl3 0x110D +#define LC898122_gyhc_tmp 0x110E +#define LC898122_npylev1 0x110F +#define LC898122_gyh1a 0x1110 +#define LC898122_gyh1b 0x1111 +#define LC898122_gyh1c 0x1112 +#define LC898122_gyh2a 0x1113 +#define LC898122_gyh2b 0x1114 +#define LC898122_gyh2c 0x1115 +#define LC898122_gyh3a 0x1116 +#define LC898122_gyh3b 0x1117 +#define LC898122_gyh3c 0x1118 +#define LC898122_gyla 0x1119 +#define LC898122_gylb 0x111A +#define LC898122_gylc 0x111B +#define LC898122_gyhgain 0x111C +#define LC898122_gylgain 0x111D +#define LC898122_gyigainstp 0x111E +#define LC898122_npylev2 0x111F +#define LC898122_gyzoom 0x1120 +#define LC898122_gy2x4xb 0x1121 +#define LC898122_gylens 0x1122 +#define LC898122_gyta 0x1123 +#define LC898122_gytb 0x1124 +#define LC898122_gytc 0x1125 +#define LC898122_gytd 0x1126 +#define LC898122_gyte 0x1127 +#define LC898122_gylmt1H 0x1128 +#define LC898122_gylmt3HS0 0x1129 +#define LC898122_gylmt3HS1 0x112A +#define LC898122_gylmt4HS0 0x112B +#define LC898122_gylmt4HS1 0x112C +#define LC898122_gylmt6L 0x112D +#define LC898122_gylmt6H 0x112E +#define LC898122_npylev3 0x112F +#define LC898122_gyj1a 0x1130 +#define LC898122_gyj1b 0x1131 +#define LC898122_gyj1c 0x1132 +#define LC898122_gyj2a 0x1133 +#define LC898122_gyj2b 0x1134 +#define LC898122_gyj2c 0x1135 +#define LC898122_gyk1a 0x1136 +#define LC898122_gyk1b 0x1137 +#define LC898122_gyk1c 0x1138 +#define LC898122_gyk2a 0x1139 +#define LC898122_gyk2b 0x113A +#define LC898122_gyk2c 0x113B +#define LC898122_gyoa 0x113C +#define LC898122_gyob 0x113D +#define LC898122_gyoc 0x113E +#define LC898122_npylev4 0x113F +#define LC898122_MSABS2 0x1140 +#define LC898122_MSABS2AV 0x1141 +#define LC898122_MSPP2AV 0x1142 +#define LC898122_gyia_1 0x1143 +#define LC898122_gyib_1 0x1144 +#define LC898122_gyic_1 0x1145 +#define LC898122_gyia_a 0x1146 +#define LC898122_gyib_a 0x1147 +#define LC898122_gyic_a 0x1148 +#define LC898122_gyia_b 0x1149 +#define LC898122_gyib_b 0x114A +#define LC898122_gyic_b 0x114B +#define LC898122_gyia_c 0x114C +#define LC898122_gyib_c 0x114D +#define LC898122_gyic_c 0x114E +#define LC898122_Stty12aM 0x114F +#define LC898122_MSMAX2 0x1150 +#define LC898122_MSMAX2AV 0x1151 +#define LC898122_MSCT2AV 0x1152 +#define LC898122_gyla_1 0x1153 +#define LC898122_gylb_1 0x1154 +#define LC898122_gylc_1 0x1155 +#define LC898122_gyla_a 0x1156 +#define LC898122_gylb_a 0x1157 +#define LC898122_gylc_a 0x1158 +#define LC898122_gyla_b 0x1159 +#define LC898122_gylb_b 0x115A +#define LC898122_gylc_b 0x115B +#define LC898122_gyla_c 0x115C +#define LC898122_gylb_c 0x115D +#define LC898122_gylc_c 0x115E +#define LC898122_Stty12aH 0x115F +#define LC898122_MSMIN2 0x1160 +#define LC898122_MSMIN2AV 0x1161 +#define LC898122_MS2AV 0x1162 +#define LC898122_gygyro_1 0x1163 +#define LC898122_gygyro_1d 0x1164 +#define LC898122_gygyro_1u 0x1165 +#define LC898122_gygyro_a 0x1166 +#define LC898122_gygyro_2d 0x1167 +#define LC898122_gygyro_2u 0x1168 +#define LC898122_gygyro_b 0x1169 +#define LC898122_gygyro_3d 0x116A +#define LC898122_gygyro_3u 0x116B +#define LC898122_gygyro_c 0x116C +#define LC898122_gygyro_4d 0x116D +#define LC898122_gygyro_4u 0x116E +#define LC898122_Stty12bM 0x116F +#define LC898122_GGStp 0x1170 +#define LC898122_GGMin 0x1171 +#define LC898122_GGMax 0x1172 +#define LC898122_gygain_1 0x1173 +#define LC898122_gygain_1d 0x1174 +#define LC898122_gygain_1u 0x1175 +#define LC898122_gygain_a 0x1176 +#define LC898122_gygain_2d 0x1177 +#define LC898122_gygain_2u 0x1178 +#define LC898122_gygain_b 0x1179 +#define LC898122_gygain_3d 0x117A +#define LC898122_gygain_3u 0x117B +#define LC898122_gygain_c 0x117C +#define LC898122_gygain_4d 0x117D +#define LC898122_gygain_4u 0x117E +#define LC898122_Stty12bH 0x117F +#define LC898122_GGStp2 0x1180 +#define LC898122_GGMin2 0x1181 +#define LC898122_GGMax2 0x1182 +#define LC898122_gyistp_1 0x1183 +#define LC898122_gyistp_1d 0x1184 +#define LC898122_gyistp_1u 0x1185 +#define LC898122_gyistp_a 0x1186 +#define LC898122_gyistp_2d 0x1187 +#define LC898122_gyistp_2u 0x1188 +#define LC898122_gyistp_b 0x1189 +#define LC898122_gyistp_3d 0x118A +#define LC898122_gyistp_3u 0x118B +#define LC898122_gyistp_c 0x118C +#define LC898122_gyistp_4d 0x118D +#define LC898122_gyistp_4u 0x118E +#define LC898122_Stty34aM 0x118F +#define LC898122_vma 0x1190 +#define LC898122_vmb 0x1191 +#define LC898122_vmc 0x1192 +#define LC898122_gyistp 0x1193 +#define LC898122_gyadjmin 0x1194 +#define LC898122_gyadjmax 0x1195 +#define LC898122_gyadjdn 0x1196 +#define LC898122_gyadjup 0x1197 +#define LC898122_gyog3 0x1198 +#define LC898122_gyog5 0x1199 +#define LC898122_gyog7 0x119A +#define LC898122_npylev8 0x119B +#define LC898122_sylmtb1 0x119C +#define LC898122_SttyaL 0x119D +#define LC898122_SttybL 0x119E +#define LC898122_Stty34aH 0x119F +#define LC898122_sylmtb2 0x11A0 +#define LC898122_pymaa 0x11A1 +#define LC898122_pymab 0x11A2 +#define LC898122_pymac 0x11A3 +#define LC898122_pymba 0x11A4 +#define LC898122_pymbb 0x11A5 +#define LC898122_pymbc 0x11A6 +#define LC898122_gyma 0x11A7 +#define LC898122_gymb 0x11A8 +#define LC898122_gymc 0x11A9 +#define LC898122_gymg 0x11AA +#define LC898122_gyleva 0x11AB +#define LC898122_gylevb 0x11AC +#define LC898122_gylevc 0x11AD +#define LC898122_gylevlow 0x11AE +#define LC898122_Stty34bM 0x11AF +#define LC898122_syria 0x11B0 +#define LC898122_syrib 0x11B1 +#define LC898122_syric 0x11B2 +#define LC898122_syiny 0x11B3 +#define LC898122_syinx 0x11B4 +#define LC898122_syggf 0x11B5 +#define LC898122_syag 0x11B6 +#define LC898122_sypr 0x11B7 +#define LC898122_sygy 0x11B8 +#define LC898122_sygx 0x11B9 +#define LC898122_syiexp3 0x11BA +#define LC898122_syiexp2 0x11BB +#define LC898122_syiexp1 0x11BC +#define LC898122_syiexp0 0x11BD +#define LC898122_syiexp 0x11BE +#define LC898122_Stty34bH 0x11BF +#define LC898122_syda 0x11C0 +#define LC898122_sydb 0x11C1 +#define LC898122_sydc 0x11C2 +#define LC898122_syea 0x11C3 +#define LC898122_syeb 0x11C4 +#define LC898122_syec 0x11C5 +#define LC898122_syua 0x11C6 +#define LC898122_syub 0x11C7 +#define LC898122_syuc 0x11C8 +#define LC898122_syia 0x11C9 +#define LC898122_syib 0x11CA +#define LC898122_syic 0x11CB +#define LC898122_syja 0x11CC +#define LC898122_syjb 0x11CD +#define LC898122_syjc 0x11CE +#define LC898122_npylev1_i 0x11CF +#define LC898122_syfa 0x11D0 +#define LC898122_syfb 0x11D1 +#define LC898122_syfc 0x11D2 +#define LC898122_syg 0x11D3 +#define LC898122_syg2 0x11D4 +#define LC898122_sysin 0x11D5 +#define LC898122_syggf_tmp 0x11D6 +#define LC898122_sysa 0x11D7 +#define LC898122_sysb 0x11D8 +#define LC898122_sysc 0x11D9 +#define LC898122_syoa 0x11DA +#define LC898122_syob 0x11DB +#define LC898122_syoc 0x11DC +#define LC898122_syod 0x11DD +#define LC898122_syoe 0x11DE +#define LC898122_npylev2_i 0x11DF +#define LC898122_sypa 0x11E0 +#define LC898122_sypb 0x11E1 +#define LC898122_sypc 0x11E2 +#define LC898122_sypd 0x11E3 +#define LC898122_sype 0x11E4 +#define LC898122_syq 0x11E5 +#define LC898122_sylmta1 0x11E6 +#define LC898122_sylmta2 0x11E7 +#define LC898122_smyga 0x11E8 +#define LC898122_smygb 0x11E9 +#define LC898122_smya 0x11EA +#define LC898122_smyb 0x11EB +#define LC898122_syemglev 0x11EC +#define LC898122_sysmtav 0x11ED +#define LC898122_sysmtstp 0x11EE +#define LC898122_npylev3_i 0x11EF +#define LC898122_mes2aa 0x11F0 +#define LC898122_mes2ab 0x11F1 +#define LC898122_mes2ac 0x11F2 +#define LC898122_mes2ad 0x11F3 +#define LC898122_mes2ae 0x11F4 +#define LC898122_mes2ba 0x11F5 +#define LC898122_mes2bb 0x11F6 +#define LC898122_mes2bc 0x11F7 +#define LC898122_mes2bd 0x11F8 +#define LC898122_mes2be 0x11F9 +#define LC898122_syoexp3 0x11FA +#define LC898122_syoexp2 0x11FB +#define LC898122_syoexp1 0x11FC +#define LC898122_syoexp0 0x11FD +#define LC898122_syoexp 0x11FE +#define LC898122_npylev4_i 0x11FF +#define LC898122_afsin 0x1200 +#define LC898122_afing 0x1201 +#define LC898122_afstmg 0x1202 +#define LC898122_afag 0x1203 +#define LC898122_afda 0x1204 +#define LC898122_afdb 0x1205 +#define LC898122_afdc 0x1206 +#define LC898122_afea 0x1207 +#define LC898122_afeb 0x1208 +#define LC898122_afec 0x1209 +#define LC898122_afua 0x120A +#define LC898122_afub 0x120B +#define LC898122_afuc 0x120C +#define LC898122_afia 0x120D +#define LC898122_afib 0x120E +#define LC898122_afic 0x120F +#define LC898122_afja 0x1210 +#define LC898122_afjb 0x1211 +#define LC898122_afjc 0x1212 +#define LC898122_affa 0x1213 +#define LC898122_affb 0x1214 +#define LC898122_affc 0x1215 +#define LC898122_afg 0x1216 +#define LC898122_afg2 0x1217 +#define LC898122_afpa 0x1218 +#define LC898122_afpb 0x1219 +#define LC898122_afpc 0x121A +#define LC898122_afpd 0x121B +#define LC898122_afpe 0x121C +#define LC898122_afstma 0x121D +#define LC898122_afstmb 0x121E +#define LC898122_afstmc 0x121F +#define LC898122_aflmt 0x1220 +#define LC898122_aflmt2 0x1221 +#define LC898122_afssmv1 0x1222 +#define LC898122_afssmv2 0x1223 +#define LC898122_afsjlev 0x1224 +#define LC898122_afsjdif 0x1225 +#define LC898122_SttxHis 0x1226 +#define LC898122_tmpa 0x1227 +#define LC898122_af_cc 0x1228 +#define LC898122_a_df 0x1229 +#define LC898122_b_df 0x122A +#define LC898122_c_df 0x122B +#define LC898122_d_df 0x122C +#define LC898122_e_df 0x122D +#define LC898122_f_df 0x122E +#define LC898122_pi 0x122F +#define LC898122_msmean 0x1230 +#define LC898122_vmlevhis 0x1231 +#define LC898122_vmlev 0x1232 +#define LC898122_vmtl 0x1233 +#define LC898122_vmth 0x1234 +#define LC898122_st1mean 0x1235 +#define LC898122_st2mean 0x1236 +#define LC898122_st3mean 0x1237 +#define LC898122_st4mean 0x1238 +#define LC898122_dm1g 0x1239 +#define LC898122_dm2g 0x123A +#define LC898122_dm3g 0x123B +#define LC898122_dm4g 0x123C +#define LC898122_zero 0x123D +#define LC898122_com10 0x123E +#define LC898122_cop10 0x123F + +/* FILTER DELAY RAM */ +#define LC898122_SINXZ 0x1400 +#define LC898122_GX45Z 0x1401 +#define LC898122_GXINZ 0x1402 +#define LC898122_GXI1Z1 0x1403 +#define LC898122_GXI1Z2 0x1404 +#define LC898122_GXI2Z1 0x1405 +#define LC898122_GXI2Z2 0x1406 +#define LC898122_GXMZ1 0x1407 +#define LC898122_GXMZ2 0x1408 +#define LC898122_GXIZ 0x1409 +#define LC898122_GXXFZ 0x140A +#define LC898122_GXADJZ 0x140B +#define LC898122_GXGAINZ 0x140C +#define LC898122_GXLEV1Z1 0x140D +#define LC898122_GXLEV1Z2 0x140E +#define LC898122_TMPX 0x140F +#define LC898122_SXDOFFZ2 0x1410 +#define LC898122_GXH1Z1 0x1411 +#define LC898122_GXH1Z2 0x1412 +#define LC898122_GXH2Z1 0x1414 +#define LC898122_GXH2Z2 0x1415 +#define LC898122_GXLEV2Z1 0x1416 +#define LC898122_GXH3Z1 0x1417 +#define LC898122_GXH3Z2 0x1418 +#define LC898122_GXL1Z1 0x1419 +#define LC898122_GXL1Z2 0x141A +#define LC898122_GXL2Z1 0x141B +#define LC898122_GXL2Z2 0x141C +#define LC898122_GXL3Z 0x141D +#define LC898122_GXLZ 0x141E +#define LC898122_GXI3Z 0x141F +#define LC898122_GXZOOMZ 0x1420 +#define LC898122_GXXBZ 0x1421 +#define LC898122_GXLENSZ 0x1422 +#define LC898122_GXLMT3Z 0x1423 +#define LC898122_GXTZ1 0x1424 +#define LC898122_GXTZ2 0x1425 +#define LC898122_GXTZ3 0x1426 +#define LC898122_GXTZ4 0x1427 +#define LC898122_GX2SXZ 0x1428 +#define LC898122_SXOVRZ 0x1429 +#define LC898122_PXAMZ 0x142A +#define LC898122_PXMAZ1 0x142B +#define LC898122_PXMAZ2 0x142C +#define LC898122_PXBMZ 0x142D +#define LC898122_PXMBZ1 0x142E +#define LC898122_PXMBZ2 0x142F +#define LC898122_DAXHLOtmp 0x1430 +#define LC898122_GXJ1Z1 0x1431 +#define LC898122_GXJ1Z2 0x1432 +#define LC898122_SXINZ1 0x1433 +#define LC898122_GXJ2Z1 0x1434 +#define LC898122_GXJ2Z2 0x1435 +#define LC898122_SXINZ2 0x1436 +#define LC898122_GXK1Z1 0x1437 +#define LC898122_GXK1Z2 0x1438 +#define LC898122_SXTRZ 0x1439 +#define LC898122_GXK2Z1 0x143A +#define LC898122_GXK2Z2 0x143B +#define LC898122_SXIEXPZ 0x143C +#define LC898122_GXOZ1 0x143D +#define LC898122_GXOZ2 0x143E +#define LC898122_GXLEV2Z2 0x143F +#define LC898122_AD0Z 0x1440 +#define LC898122_SXRIZ1 0x1441 +#define LC898122_SXRIZ2 0x1442 +#define LC898122_SXAGZ 0x1443 +#define LC898122_SXSMTZ 0x1444 +#define LC898122_MES1AZ1 0x1445 +#define LC898122_MES1AZ2 0x1446 +#define LC898122_MES1AZ3 0x1447 +#define LC898122_MES1AZ4 0x1448 +#define LC898122_SXTRZ1 0x1449 +#define LC898122_AD2Z 0x144A +#define LC898122_MES1BZ1 0x144B +#define LC898122_MES1BZ2 0x144C +#define LC898122_MES1BZ3 0x144D +#define LC898122_MES1BZ4 0x144E +#define LC898122_AD4Z 0x144F +#define LC898122_OFF0Z 0x1450 +#define LC898122_SXDZ1 0x1451 +#define LC898122_SXDZ2 0x1452 +#define LC898122_NPXDIFZ 0x1453 +#define LC898122_SXEZ1 0x1454 +#define LC898122_SXEZ2 0x1455 +#define LC898122_SX2HXZ2 0x1456 +#define LC898122_SXUZ1 0x1457 +#define LC898122_SXUZ2 0x1458 +#define LC898122_SXTRZ2 0x1459 +#define LC898122_OFF2Z 0x145A +#define LC898122_SXIZ1 0x145B +#define LC898122_SXIZ2 0x145C +#define LC898122_SXJZ1 0x145D +#define LC898122_SXJZ2 0x145E +#define LC898122_OFF4Z 0x145F +#define LC898122_AD0OFFZ 0x1460 +#define LC898122_SXOFFZ1 0x1461 +#define LC898122_SXOFFZ2 0x1462 +#define LC898122_SXFZ 0x1463 +#define LC898122_SXGZ 0x1464 +#define LC898122_NPXTMPZ 0x1465 +#define LC898122_SXG3Z 0x1466 +#define LC898122_SXSZ1 0x1467 +#define LC898122_SXSZ2 0x1468 +#define LC898122_SXTRZ3 0x1469 +#define LC898122_AD2OFFZ 0x146A +#define LC898122_SXOZ1 0x146B +#define LC898122_SXOZ2 0x146C +#define LC898122_SXOZ3 0x146D +#define LC898122_SXOZ4 0x146E +#define LC898122_AD4OFFZ 0x146F +#define LC898122_SXDOFFZ 0x1470 +#define LC898122_SXPZ1 0x1471 +#define LC898122_SXPZ2 0x1472 +#define LC898122_SXPZ3 0x1473 +#define LC898122_SXPZ4 0x1474 +#define LC898122_SXQZ 0x1475 +#define LC898122_SXOEXPZ 0x1476 +#define LC898122_SXLMT 0x1477 +#define LC898122_SX2HXZ 0x1478 +#define LC898122_DAXHLO 0x1479 +#define LC898122_DAXHLB 0x147A +#define LC898122_TMPX2 0x147B +#define LC898122_TMPX3 0x147C +#define LC898122_SINYZ 0x1480 +#define LC898122_GY45Z 0x1481 +#define LC898122_GYINZ 0x1482 +#define LC898122_GYI1Z1 0x1483 +#define LC898122_GYI1Z2 0x1484 +#define LC898122_GYI2Z1 0x1485 +#define LC898122_GYI2Z2 0x1486 +#define LC898122_GYMZ1 0x1487 +#define LC898122_GYMZ2 0x1488 +#define LC898122_GYIZ 0x1489 +#define LC898122_GYXFZ 0x148A +#define LC898122_GYADJZ 0x148B +#define LC898122_GYGAINZ 0x148C +#define LC898122_GYLEV1Z1 0x148D +#define LC898122_GYLEV1Z2 0x148E +#define LC898122_TMPY 0x148F +#define LC898122_SYDOFFZ2 0x1490 +#define LC898122_GYH1Z1 0x1491 +#define LC898122_GYH1Z2 0x1492 +#define LC898122_GYH2Z1 0x1494 +#define LC898122_GYH2Z2 0x1495 +#define LC898122_GYLEV2Z1 0x1496 +#define LC898122_GYH3Z1 0x1497 +#define LC898122_GYH3Z2 0x1498 +#define LC898122_GYL1Z1 0x1499 +#define LC898122_GYL1Z2 0x149A +#define LC898122_GYL2Z1 0x149B +#define LC898122_GYL2Z2 0x149C +#define LC898122_GYL3Z 0x149D +#define LC898122_GYLZ 0x149E +#define LC898122_GYI3Z 0x149F +#define LC898122_GYZOOMZ 0x14A0 +#define LC898122_GYXBZ 0x14A1 +#define LC898122_GYLENSZ 0x14A2 +#define LC898122_GYLMT3Z 0x14A3 +#define LC898122_GYTZ1 0x14A4 +#define LC898122_GYTZ2 0x14A5 +#define LC898122_GYTZ3 0x14A6 +#define LC898122_GYTZ4 0x14A7 +#define LC898122_GY2SYZ 0x14A8 +#define LC898122_SYOVRZ 0x14A9 +#define LC898122_PYAMZ 0x14AA +#define LC898122_PYMAZ1 0x14AB +#define LC898122_PYMAZ2 0x14AC +#define LC898122_PYBMZ 0x14AD +#define LC898122_PYMBZ1 0x14AE +#define LC898122_PYMBZ2 0x14AF +#define LC898122_DAYHLOtmp 0x14B0 +#define LC898122_GYJ1Z1 0x14B1 +#define LC898122_GYJ1Z2 0x14B2 +#define LC898122_SYINZ1 0x14B3 +#define LC898122_GYJ2Z1 0x14B4 +#define LC898122_GYJ2Z2 0x14B5 +#define LC898122_SYINZ2 0x14B6 +#define LC898122_GYK1Z1 0x14B7 +#define LC898122_GYK1Z2 0x14B8 +#define LC898122_SYTRZ 0x14B9 +#define LC898122_GYK2Z1 0x14BA +#define LC898122_GYK2Z2 0x14BB +#define LC898122_SYIEXPZ 0x14BC +#define LC898122_GYOZ1 0x14BD +#define LC898122_GYOZ2 0x14BE +#define LC898122_GYLEV2Z2 0x14BF +#define LC898122_AD1Z 0x14C0 +#define LC898122_SYRIZ1 0x14C1 +#define LC898122_SYRIZ2 0x14C2 +#define LC898122_SYAGZ 0x14C3 +#define LC898122_SYSMTZ 0x14C4 +#define LC898122_MES2AZ1 0x14C5 +#define LC898122_MES2AZ2 0x14C6 +#define LC898122_MES2AZ3 0x14C7 +#define LC898122_MES2AZ4 0x14C8 +#define LC898122_SYTRZ1 0x14C9 +#define LC898122_AD3Z 0x14CA +#define LC898122_MES2BZ1 0x14CB +#define LC898122_MES2BZ2 0x14CC +#define LC898122_MES2BZ3 0x14CD +#define LC898122_MES2BZ4 0x14CE +#define LC898122_AD5Z 0x14CF +#define LC898122_OFF1Z 0x14D0 +#define LC898122_SYDZ1 0x14D1 +#define LC898122_SYDZ2 0x14D2 +#define LC898122_NPYDIFZ 0x14D3 +#define LC898122_SYEZ1 0x14D4 +#define LC898122_SYEZ2 0x14D5 +#define LC898122_SY2HYZ2 0x14D6 +#define LC898122_SYUZ1 0x14D7 +#define LC898122_SYUZ2 0x14D8 +#define LC898122_SYTRZ2 0x14D9 +#define LC898122_OFF3Z 0x14DA +#define LC898122_SYIZ1 0x14DB +#define LC898122_SYIZ2 0x14DC +#define LC898122_SYJZ1 0x14DD +#define LC898122_SYJZ2 0x14DE +#define LC898122_OFF5Z 0x14DF +#define LC898122_AD1OFFZ 0x14E0 +#define LC898122_SYOFFZ1 0x14E1 +#define LC898122_SYOFFZ2 0x14E2 +#define LC898122_SYFZ 0x14E3 +#define LC898122_SYGZ 0x14E4 +#define LC898122_NPYTMPZ 0x14E5 +#define LC898122_SYG3Z 0x14E6 +#define LC898122_SYSZ1 0x14E7 +#define LC898122_SYSZ2 0x14E8 +#define LC898122_SYTRZ3 0x14E9 +#define LC898122_AD3OFFZ 0x14EA +#define LC898122_SYOZ1 0x14EB +#define LC898122_SYOZ2 0x14EC +#define LC898122_SYOZ3 0x14ED +#define LC898122_SYOZ4 0x14EE +#define LC898122_AD5OFFZ 0x14EF +#define LC898122_SYDOFFZ 0x14F0 +#define LC898122_SYPZ1 0x14F1 +#define LC898122_SYPZ2 0x14F2 +#define LC898122_SYPZ3 0x14F3 +#define LC898122_SYPZ4 0x14F4 +#define LC898122_SYQZ 0x14F5 +#define LC898122_SYOEXPZ 0x14F6 +#define LC898122_SYLMT 0x14F7 +#define LC898122_SY2HYZ 0x14F8 +#define LC898122_DAYHLO 0x14F9 +#define LC898122_DAYHLB 0x14FA +#define LC898122_TMPY2 0x14FB +#define LC898122_TMPY3 0x14FC +#define LC898122_AFSINZ 0x1500 +#define LC898122_AFDIFTMP 0x1501 +#define LC898122_AFINZ 0x1502 +#define LC898122_AFINZ2 0x1503 +#define LC898122_AFAGZ 0x1504 +#define LC898122_AFDZ1 0x1505 +#define LC898122_AFDZ2 0x1506 +#define LC898122_AFSTMGTSS 0x1507 +#define LC898122_AFEZ1 0x1508 +#define LC898122_AFEZ2 0x1509 +#define LC898122_OFSTAFZ 0x150A +#define LC898122_AFUZ1 0x150B +#define LC898122_AFUZ2 0x150C +#define LC898122_AD4OFFZ2 0x150D +#define LC898122_AFIZ1 0x150E +#define LC898122_AFIZ2 0x150F +#define LC898122_OFF6Z 0x1510 +#define LC898122_AFJZ1 0x1511 +#define LC898122_AFJZ2 0x1512 +#define LC898122_AFSTMTGT 0x1513 +#define LC898122_AFSTMSTP 0x1514 +#define LC898122_AFSTMTGTtmp 0x1515 +#define LC898122_AFFZ 0x1516 +#define LC898122_AFGZ 0x1517 +#define LC898122_AFG3Z 0x1518 +#define LC898122_AFPZ1 0x1519 +#define LC898122_AFPZ2 0x151A +#define LC898122_AFPZ3 0x151B +#define LC898122_AFPZ4 0x151C +#define LC898122_AFLMTZ 0x151D +#define LC898122_AF2PWM 0x151E +#define LC898122_AFSTMZ2 0x151F +#define LC898122_VMXYZ 0x1520 +#define LC898122_VMZ1 0x1521 +#define LC898122_VMZ2 0x1522 +#define LC898122_OAFTHL 0x1524 +#define LC898122_PR 0x1525 +#define LC898122_AFRATO1 0x1526 +#define LC898122_ADRATO2 0x1527 +#define LC898122_AFRATO3 0x1528 +#define LC898122_DAZHLO 0x1529 +#define LC898122_DAZHLB 0x152A +#define LC898122_AFL1Z 0x152B +#define LC898122_AFL2Z 0x152C +#define LC898122_AFDFZ 0x152D +#define LC898122_pi_L1 0x152E +#define LC898122_pi_L2 0x152F + diff --git a/drivers/media/i2c/lc898122/lc898122.c b/drivers/media/i2c/lc898122/lc898122.c new file mode 100644 index 000000000000..41cff0027f87 --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122.c @@ -0,0 +1,633 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "lc898122.h" +#include "lc898122-oisinit.h" +#include "lc898122-regs.h" + +#define to_lc898122_vcm(_ctrl) \ + container_of((_ctrl)->handler, struct lc898122_device, ctrls_vcm) + +#define to_lc898122_sensor(_subdev_vcm) \ + container_of(_subdev_vcm, struct lc898122_device, subdev_vcm) + +int lc898122_write_byte(struct i2c_client *client, u16 reg, u8 val) +{ + struct i2c_msg msg; + u8 buf[3]; + + buf[0] = reg >> 8; + buf[1] = reg & 0xff; + buf[2] = val; + + msg.addr = client->addr; + msg.flags = 0; + msg.len = 3; + msg.buf = buf; + + if (i2c_transfer(client->adapter, &msg, 1) != 1) + return -EIO; + return 0; +} + +int lc898122_read_byte(struct i2c_client *client, u16 reg, u8 *val) +{ + struct i2c_msg msg[2]; + int ret; + unsigned char data[3]; + + memset(msg, 0, sizeof(msg)); + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = 2; + msg[0].buf = data; + + data[0] = reg >> 8; + data[1] = reg & 0xff; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 1; + msg[1].buf = data; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret != 2) + return -EIO; + + *val = ((u8)data[0]); + + return 0; +} + +int lc898122_write_word(struct i2c_client *client, u16 reg, u16 val) +{ + struct i2c_msg msg; + u8 buf[4]; + + buf[0] = reg >> 8; + buf[1] = reg & 0xff; + buf[2] = val >> 8; + buf[3] = val & 0xff; + + msg.addr = client->addr; + msg.flags = 0; + msg.len = 4; + msg.buf = buf; + + if (i2c_transfer(client->adapter, &msg, 1) != 1) + return -EIO; + return 0; +} + +int lc898122_read_word(struct i2c_client *client, u16 reg, u16 *val) +{ + struct i2c_msg msg[2]; + int ret; + unsigned char data[4]; + + memset(msg, 0, sizeof(msg)); + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = 2; + msg[0].buf = data; + + data[0] = reg >> 8; + data[1] = reg & 0xff; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 2; + msg[1].buf = data; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret != 2) + return -EIO; + + *val = (data[0] << 8) + data[1]; + + return 0; +} + +int lc898122_write_long(struct i2c_client *client, u16 reg, u32 val) +{ + struct i2c_msg msg; + unsigned char buf[6]; + + buf[0] = reg >> 8; + buf[1] = reg & 0xff; + buf[2] = (val >> 24); + buf[3] = (val >> 16); + buf[4] = (val >> 8); + buf[5] = val; + + msg.addr = client->addr; + msg.flags = 0; + msg.len = 6; + msg.buf = buf; + + if (i2c_transfer(client->adapter, &msg, 1) != 1) + return -EIO; + return 0; +} + +int lc898122_read_long(struct i2c_client *client, u16 reg, u32 *val) +{ + struct i2c_msg msg[2]; + int ret; + unsigned char data[6] = { + reg >> 8, reg & 0xff + }; + + memset(msg, 0, sizeof(msg)); + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = 2; + msg[0].buf = data; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 3; + msg[1].buf = data; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret != 2) + return -EIO; + + *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3]; + + return 0; +} + +static int lc898122_adjust_af_parameters(struct lc898122_device *lc898122_dev) +{ + struct i2c_client *client = lc898122_dev->client; + + lc898122_RamAccFixMod(lc898122_dev, ON); + + RamWriteA(client, LC898122_DAXHLO, + (lc898122_dev->buf[HALLOFFSET_X_HIGH] << 8) | + (lc898122_dev->buf[HALLOFFSET_X_LOW])); + + RamWriteA(client, LC898122_DAYHLO, + (lc898122_dev->buf[HALLOFFSET_Y_HIGH] << 8) | + (lc898122_dev->buf[HALLOFFSET_Y_LOW])); + + RamWriteA(client, LC898122_DAXHLB, + (lc898122_dev->buf[HALL_BIAS_X_HIGH] << 8) | + (lc898122_dev->buf[HALL_BIAS_X_LOW])); + + RamWriteA(client, LC898122_DAYHLB, + (lc898122_dev->buf[HALL_BIAS_Y_HIGH] << 8) | + (lc898122_dev->buf[HALL_BIAS_Y_LOW])); + + RamWriteA(client, LC898122_OFF0Z, + (lc898122_dev->buf[HALL_AD_OFFSET_X_HIGH] << 8) | + (lc898122_dev->buf[HALL_AD_OFFSET_X_LOW])); + + RamWriteA(client, LC898122_OFF1Z, + (lc898122_dev->buf[HALL_AD_OFFSET_Y_HIGH] << 8) | + (lc898122_dev->buf[HALL_AD_OFFSET_Y_LOW])); + + RamWriteA(client, LC898122_sxg, + (lc898122_dev->buf[LOOP_GAIN_X_HIGH] << 8) | + (lc898122_dev->buf[LOOP_GAIN_X_LOW])); + + RamWriteA(client, LC898122_syg, + (lc898122_dev->buf[LOOP_GAIN_Y_HIGH] << 8) | + (lc898122_dev->buf[LOOP_GAIN_Y_LOW])); + + lc898122_RamAccFixMod(lc898122_dev, OFF); + + /* adjusted value */ + RegWriteA(client, LC898122_IZAH, lc898122_dev->buf[GYRO_OFFSET_X_HIGH]); + RegWriteA(client, LC898122_IZAL, lc898122_dev->buf[GYRO_OFFSET_X_LOW]); + RegWriteA(client, LC898122_IZBH, lc898122_dev->buf[GYRO_OFFSET_Y_HIGH]); + RegWriteA(client, LC898122_IZBL, lc898122_dev->buf[GYRO_OFFSET_Y_LOW]); + + RegWriteA(client, LC898122_OSCSET, lc898122_dev->buf[OSC_VAL]); + + RamWrite32A(client, LC898122_gxzoom, + (lc898122_dev->buf[GYRO_GAIN_X3] << 24) | + (lc898122_dev->buf[GYRO_GAIN_X2] << 16) | + (lc898122_dev->buf[GYRO_GAIN_X1] << 8) | + (lc898122_dev->buf[GYRO_GAIN_X0])); + + RamWrite32A(client, LC898122_gyzoom, + lc898122_dev->buf[GYRO_GAIN_Y3] << 24 | + lc898122_dev->buf[GYRO_GAIN_Y2] << 16 | + lc898122_dev->buf[GYRO_GAIN_Y1] << 8 | + lc898122_dev->buf[GYRO_GAIN_Y0]); + + /*SET VCM DAC OFFSET */ + lc898122_SetDOFSTDAF(lc898122_dev, lc898122_dev->buf[VCM_DAC_OFFSET]); + + /*Remove gyro offset*/ + lc898122_RemOff(lc898122_dev, ON); + + RegWriteA(client, LC898122_TCODEH, 0x04); + + lc898122_settregaf(lc898122_dev, 0x0400); + + /*clear removing gyro offset */ + lc898122_RemOff(lc898122_dev, OFF); + + return 0; +} + + +/* + * Read EEPROM data from the EEPROM chip and store + * it into a kmalloced buffer. On error return NULL. + * The caller must kfree the buffer when no more needed. + * @size: set to the size of the returned EEPROM data. + */ +static int lc898122_eeprom_read(struct lc898122_device *lc898122_dev, u8 *buf) +{ + struct i2c_msg msg[2]; + unsigned int eeprom_i2c_addr = 0x54; + struct i2c_client *client = lc898122_dev->client; + unsigned int size = LC898122_EEPROM_SIZE; + static const unsigned int max_read_size = MAX_WRITE_BUF_SIZE; + int addr; + + buf = devm_kzalloc(&client->dev, size, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + for (addr = 0; addr < size; addr += max_read_size) { + unsigned char addr_buf; + int r; + + eeprom_i2c_addr |= (addr >> 8) & 1; + addr_buf = addr & 0xFF; + + msg[0].addr = eeprom_i2c_addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = &addr_buf; + + msg[1].addr = eeprom_i2c_addr; + msg[1].flags = I2C_M_RD; + msg[1].len = min(max_read_size, size - addr); + msg[1].buf = &buf[addr]; + + r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (r != ARRAY_SIZE(msg)) { + dev_err(&client->dev, "read failed at 0x%02x\n", addr); + return r; + } + } + + lc898122_dev->buf = buf; + + return 0; +} + +static int lc898122_open(struct v4l2_subdev *subdev_vcm, + struct v4l2_subdev_fh *fh) +{ + struct lc898122_device *lc898122_dev = to_lc898122_sensor(subdev_vcm); + struct i2c_client *client = lc898122_dev->client; + int rval; + + rval = pm_runtime_get_sync(&client->dev); + if (rval < 0) { + pm_runtime_put(&lc898122_dev->client->dev); + return rval; + } + + atomic_inc(&lc898122_dev->open); + + return 0; +} + +static int lc898122_close(struct v4l2_subdev *subdev_vcm, + struct v4l2_subdev_fh *fh) +{ + struct lc898122_device *lc898122_dev = to_lc898122_sensor(subdev_vcm); + + atomic_dec(&lc898122_dev->open); + pm_runtime_put(&lc898122_dev->client->dev); + + return 0; +} + +static int lc898122_t_focus_vcm(struct lc898122_device *lc898122_dev, u16 val) +{ + struct i2c_client *client = lc898122_dev->client; + + /* Move the lens to target position */ + lc898122_settregaf(lc898122_dev, val); + + dev_dbg(&client->dev, "Setting new value VCM: %d\n", val); + + return 0; +} + +static int lc898122_set_stabilization(struct lc898122_device *lc898122_dev, + int val) +{ + /* + * Val is coming from user space, which indicates + * on and off status of OIS control + */ + if (val) { + /* Turn On OIS */ + lc898122_OisEna(lc898122_dev); + } else { + /* Turn OIS OFF */ + lc898122_RtnCen(lc898122_dev, 0x00); + } + /* TODO: Check OIS for Video and still modes */ + + return 0; +} + +static int lc898122_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct lc898122_device *dev_vcm = to_lc898122_vcm(ctrl); + + if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) + return lc898122_t_focus_vcm(dev_vcm, ctrl->val); + else if (ctrl->id == V4L2_CID_IMAGE_STABILIZATION) + return lc898122_set_stabilization(dev_vcm, ctrl->val); + else + return -EINVAL; +} + +static const struct v4l2_ctrl_ops lc898122_vcm_ctrl_ops = { + .s_ctrl = lc898122_set_ctrl, +}; + +static const struct v4l2_subdev_core_ops lc898122_vcm_core_ops = { +}; + +static const struct v4l2_subdev_internal_ops lc898122_internal_ops = { + .open = lc898122_open, + .close = lc898122_close, +}; + +static int lc898122_init_controls(struct lc898122_device *dev_vcm) +{ + struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; + const struct v4l2_ctrl_ops *ops = &lc898122_vcm_ctrl_ops; + struct i2c_client *client = dev_vcm->client; + + v4l2_ctrl_handler_init(hdl, 2); + + v4l2_ctrl_new_std(hdl, ops, + V4L2_CID_FOCUS_ABSOLUTE, + 0, + LC898122_MAX_FOCUS_POS, + 1, + 0); + v4l2_ctrl_new_std(hdl, ops, + V4L2_CID_IMAGE_STABILIZATION, + 0, + 1, + 1, + 0); + + if (hdl->error) + dev_err(&client->dev, "lc898122_init_controls fail\n"); + dev_vcm->subdev_vcm.ctrl_handler = hdl; + return hdl->error; +} + +static void lc898122_subdev_cleanup(struct lc898122_device *lc898122_dev) +{ + v4l2_ctrl_handler_free(&lc898122_dev->ctrls_vcm); + v4l2_device_unregister_subdev(&lc898122_dev->subdev_vcm); + media_entity_cleanup(&lc898122_dev->subdev_vcm.entity); +} + +static const struct v4l2_subdev_ops lc898122_ops = { + .core = &lc898122_vcm_core_ops, +}; + +static int lc898122_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct lc898122_device *lc898122_dev; + struct lc898122_platform_data *pdata; + int rval; + + lc898122_dev = devm_kzalloc(&client->dev, sizeof(*lc898122_dev), + GFP_KERNEL); + + if (lc898122_dev == NULL) + return -ENOMEM; + + pdata = dev_get_platdata(&client->dev); + if (pdata) + lc898122_dev->sensor_dev = pdata->sensor_device; + + /* + * If we got sensor device pointer assume that sensor decive owns + * all the shared resources and control of them. + */ + if (lc898122_dev->sensor_dev) + if (!try_module_get(lc898122_dev->sensor_dev->driver->owner)) + return -ENODEV; + + lc898122_dev->state.flags = LC898122_DEFCONFIG; + lc898122_dev->client = client; + + v4l2_i2c_subdev_init(&lc898122_dev->subdev_vcm, client, &lc898122_ops); + + lc898122_dev->subdev_vcm.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + lc898122_dev->subdev_vcm.internal_ops = &lc898122_internal_ops; + snprintf(lc898122_dev->subdev_vcm.name, + sizeof(lc898122_dev->subdev_vcm.name), + LC898122_NAME " %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); + + rval = lc898122_init_controls(lc898122_dev); + if (rval) + goto err_cleanup; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&lc898122_dev->subdev_vcm.entity, 0, NULL, 0); +#else + rval = media_entity_pads_init(&lc898122_dev->subdev_vcm.entity, 0, + NULL); +#endif + if (rval < 0) + goto err_cleanup; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + lc898122_dev->subdev_vcm.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_LENS; +#else + lc898122_dev->subdev_vcm.entity.function = MEDIA_ENT_F_LENS; +#endif + + atomic_set(&lc898122_dev->open, 0); + pm_runtime_enable(&client->dev); + /* + * Read eeprom contents in driver probe and save them in buffer + * for later use + */ + + rval = pm_runtime_get_sync(&client->dev); + if (rval >= 0) + rval = lc898122_eeprom_read(lc898122_dev, lc898122_dev->buf); + pm_runtime_put_sync(&client->dev); + + if (rval) + goto err_cleanup; + + return 0; + +err_cleanup: + lc898122_subdev_cleanup(lc898122_dev); + dev_err(&client->dev, "lc898122 Probe failed: %d\n", rval); + if (lc898122_dev->sensor_dev) + module_put(lc898122_dev->sensor_dev->driver->owner); + return rval; +} + +static int lc898122_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lc898122_device *lc898122_dev = container_of(sd, + struct lc898122_device, subdev_vcm); + + lc898122_subdev_cleanup(lc898122_dev); + if (lc898122_dev->sensor_dev) + module_put(lc898122_dev->sensor_dev->driver->owner); + + return 0; +} + +static int lc898122_poweron_init(struct lc898122_device *lc898122_dev) +{ + int rval = 0; + + /* when this is called from probe buf is not yet allocated */ + if (!lc898122_dev->buf) + return 0; + + /* select module 20M*/ + lc898122_selectmodule(lc898122_dev, 0x02); + /* initialize AF */ + lc898122_initsettingsaf(lc898122_dev); + /* initialize OIS */ + lc898122_initsettings(lc898122_dev); + + /* AF calibration data adjustment needed before lens movement */ + rval = lc898122_adjust_af_parameters(lc898122_dev); + if (rval) + dev_err(&lc898122_dev->client->dev, + "failed to adjust AF tuning data\n"); + + return rval; +} + +#ifdef CONFIG_PM +static void lc898122_complete(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lc898122_device *lc898122_dev = + container_of(sd, struct lc898122_device, subdev_vcm); + + if (!atomic_read(&lc898122_dev->open)) + return; + + /* + * The lens motor is part of the sensor module and the sensor + * PM flows are in sensor driver (with correct order of regulator + * controls etc.). As I2C device this device is a child of I2C + * controller not the sensor module. At resume phase sensor may still + * be in legacy suspended state but at complete it is sure that + * also the sensor has been restored. Perform re-init here if the + * device was enabled before the suspend. + */ + lc898122_poweron_init(lc898122_dev); +} + +static int lc898122_runtime_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lc898122_device *lc898122_dev = + container_of(sd, struct lc898122_device, subdev_vcm); + + /* X/Y axis servo OFF */ + lc898122_SrvCon(lc898122_dev, LC898122_X_DIR, OFF); + lc898122_SrvCon(lc898122_dev, LC898122_Y_DIR, OFF); + + lc898122_settregaf(lc898122_dev, 0x400); + + if (lc898122_dev->sensor_dev) + pm_runtime_put(lc898122_dev->sensor_dev); + + return 0; +} + +static int lc898122_runtime_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lc898122_device *lc898122_dev = + container_of(sd, struct lc898122_device, subdev_vcm); + int rval = 0; + + if (lc898122_dev->sensor_dev) { + rval = pm_runtime_get_sync(lc898122_dev->sensor_dev); + if (rval < 0) + return rval; + } + + lc898122_poweron_init(lc898122_dev); + + return 0; +} + +#else +#define lc898122_complete NULL +#define lc898122_runtime_suspend NULL +#define lc898122_runtime_resume NULL +#endif + +static const struct dev_pm_ops lc898122_pm_ops = { + .complete = lc898122_complete, + .runtime_suspend = lc898122_runtime_suspend, + .runtime_resume = lc898122_runtime_resume, +}; + +static const struct i2c_device_id lc898122_id_table[] = { + { LC898122_NAME, 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, lc898122_id_table); + +static struct i2c_driver lc898122_i2c_driver = { + .driver = { + .name = LC898122_NAME, + .pm = &lc898122_pm_ops, + }, + .probe = lc898122_probe, + .remove = lc898122_remove, + .id_table = lc898122_id_table, +}; + +module_i2c_driver(lc898122_i2c_driver); + +MODULE_AUTHOR("Kriti Pachhandara "); +MODULE_DESCRIPTION("lc898122 VCM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/lc898122/lc898122.h b/drivers/media/i2c/lc898122/lc898122.h new file mode 100644 index 000000000000..5edf01769d0d --- /dev/null +++ b/drivers/media/i2c/lc898122/lc898122.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef __LC898122_H__ +#define __LC898122_H__ + +#include +#include "lc898122-oisinit.h" + +struct lc898122_device { + struct i2c_client *client; + struct v4l2_ctrl_handler ctrls_vcm; + struct v4l2_subdev subdev_vcm; + struct lc898122_ois state; + struct device *sensor_dev; + atomic_t open; + u8 *buf; +}; + +#define LC898122_EEPROM_SIZE 128 +#define MAX_WRITE_BUF_SIZE 32 + +#define LC898122_INVALID_CONFIG 0xffffffff +#define LC898122_MAX_FOCUS_POS 2047 + +#define HALLOFFSET_X_LOW 0x11 +#define HALLOFFSET_X_HIGH 0x12 +#define HALLOFFSET_Y_LOW 0x13 +#define HALLOFFSET_Y_HIGH 0x14 +#define HALL_BIAS_X_LOW 0x15 +#define HALL_BIAS_X_HIGH 0x16 +#define HALL_BIAS_Y_LOW 0x17 +#define HALL_BIAS_Y_HIGH 0x18 +#define HALL_AD_OFFSET_X_LOW 0x19 +#define HALL_AD_OFFSET_X_HIGH 0x1A +#define HALL_AD_OFFSET_Y_LOW 0x1B +#define HALL_AD_OFFSET_Y_HIGH 0x1C +#define LOOP_GAIN_X_LOW 0x1D +#define LOOP_GAIN_X_HIGH 0x1E +#define LOOP_GAIN_Y_LOW 0x1F +#define LOOP_GAIN_Y_HIGH 0x20 +#define GYRO_OFFSET_X_LOW 0x25 +#define GYRO_OFFSET_X_HIGH 0x26 +#define GYRO_OFFSET_Y_LOW 0x27 +#define GYRO_OFFSET_Y_HIGH 0x28 +#define OSC_VAL 0x29 +#define GYRO_GAIN_X0 0x2A +#define GYRO_GAIN_X1 0x2B +#define GYRO_GAIN_X2 0x2C +#define GYRO_GAIN_X3 0x2D +#define GYRO_GAIN_Y0 0x2E +#define GYRO_GAIN_Y1 0x2F +#define GYRO_GAIN_Y2 0x30 +#define GYRO_GAIN_Y3 0x31 +#define VCM_DAC_OFFSET 0x38 +#define AF_5M_L 0x0C +#define AF_5M_H 0x0D +#define AF_10CM_L 0x0E +#define AF_10CM_H 0x0F + + +#endif diff --git a/drivers/media/i2c/lm3643.c b/drivers/media/i2c/lm3643.c new file mode 100644 index 000000000000..695a23855a40 --- /dev/null +++ b/drivers/media/i2c/lm3643.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2014 - 2018 Intel Corporation + * + * Lm3560 and other TI drivers used for this, written by + * Daniel Jeong " + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "../../../include/media/lm3643.h" +#include +#include + +/* registers definitions */ +#define ENABLE_REG 0x01 +#define MODE_SHIFT 2 +#define TORCH_NTC_EN_SHIFT 4 +#define STROBE_EN_SHIFT 5 +#define STROBE_TYPE_SHIFT 6 +#define TX_PIN_MASK 0x80 +#define TX_EN_SHIFT 7 +#define MODE_MASK 0xc +#define MASK_STROBE_SRC 1 +#define ENABLE_MASK 0x3 + +#define IVFM_REG 0x02 +#define IVFM_SELECTION_SHIFT 0 +#define IVFM_DISABLE 0 +#define IVFM_STOP_AND_HOLD 1 +#define IVFM_DONW 2 +#define IVFM_UP_AND_DONW 3 +#define IVFM_HYSTERESIS_SHIFT 2 +#define IVFM_LEVEL_SHIFT 3 +#define IVFM_UVLO_SHIFT 6 + +#define LED1_FLASH_BR_REG 0x03 +#define LED2_FLASH_BR_REG 0x04 +#define LED1_TORCH_BR_REG 0x05 +#define LED2_TORCH_BR_REG 0x06 +#define MASK_TORCH_BR 0x7f +#define MASK_FLASH_BR 0x7f +#define LED2_OVERDRIVE_SHIFT 7 + +#define BOOST_REG 0x07 +#define BOOST_CURRENT_LIMIT_SHIFT 0 +#define BOOST_FREQ_SHIFT 1 +#define BOOST_MODE_SHIFT 2 +#define BOOST_FAULT_DETEC_SHIFT 3 +#define BOOST_SW_RESET_SHIFT 7 + +#define TIMING_REG 0x08 +#define MASK_FLASH_TOUT 0xf + +#define FLASH_TIMEOUT_SHIFT 0 +#define TORCH_CURRENT_RAMP_SHIFT 4 +#define TEMP_REG 0x08 +#define FLAGS1_REG 0x0a +#define FLAGS2_REG 0x0b +#define FAULT_TIMEOUT (1 << 0) +#define FAULT_UVLO (1 << 1) +#define FAULT_THERMAL_SHUTDOWN (1 << 2) +#define FAULT_SHORT_CIRCUIT_LED1 (1 << 5) +#define FAULT_SHORT_CIRCUIT_LED2 (1 << 4) +#define FAULT_SHORT_CIRCUIT_VOUT (1 << 6) +#define FAULT_OVP (1 << 1) +#define FAULT_OCP (1 << 3) +#define FAULT_IVFM (1 << 2) +#define FAULT_OVERTEMP (1 << 0) +#define FAULT_NTC_TRIP (3 << 3) + + +#define FAULT_SHORT (FAULT_SHORT_CIRCUIT_LED1 | FAULT_SHORT_CIRCUIT_LED2 \ + | FAULT_SHORT_CIRCUIT_VOUT) + +enum led_mode { + MODE_SHDN = 0x0, + MODE_IR_DRIVE = 0x1, + MODE_TORCH = 0x2, + MODE_FLASH = 0x3, +}; + +#ifndef V4L2_FLASH_FAULT_UNDER_VOLTAGE + #define V4L2_FLASH_FAULT_UNDER_VOLTAGE (1 << 6) +#endif +#ifndef V4L2_FLASH_FAULT_INPUT_VOLTAGE + #define V4L2_FLASH_FAULT_INPUT_VOLTAGE (1 << 7) +#endif +#ifndef V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE + #define V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE (1 << 8) +#endif + +/* + struct lm3643_gpio + * + * @list: list node + * @mutex: serialize access + * @gpio: gpio number + * @num_users: number of lm3643 devices which shares this gpio + * @use_count: Number of requests to turn gpio on + */ + +struct lm3643_gpio { + struct list_head list; + struct mutex mutex; + int gpio; + int num_users; + int use_count; +}; + +/* + * struct lm3643_flash + * + * @pdata: platform data + * @regmap: reg. map for i2c + * @lock: muxtex for serial access. + * @led_mode: V4L2 LED mode + * @ctrls_led: V4L2 contols + * @subdev_led: V4L2 subdev + * @mode_reg : mode register value + * @lmgpio: Gpio access struct + */ +struct lm3643_flash { + struct device *dev; + struct lm3643_platform_data *pdata; + struct regmap *regmap; + struct v4l2_ctrl_handler ctrls_led; + struct v4l2_subdev subdev_led; + enum v4l2_flash_led_mode led_mode; + struct lm3643_gpio *lmgpio; +}; + +#define to_lm3643_flash(_ctrl) \ + container_of(_ctrl->handler, struct lm3643_flash, ctrls_led) + +static LIST_HEAD(lm3643_gpios); +static DEFINE_MUTEX(lm3643_gpio_mutex); + +static int lm3643_suspend(struct device *dev); +static int lm3643_resume(struct device *dev); +static int lm3643_init_device(struct lm3643_flash *flash); + +/* Call this from probe */ +static struct lm3643_gpio *lm3643_get_gpio(int gpio_num, struct device *dev) +{ + + struct lm3643_gpio *gpio; + struct lm3643_gpio *tmp; + int rval; + + if (gpio_num < 0) { + dev_err(dev, "Invalid GPIO\n"); + return NULL; + } + + mutex_lock(&lm3643_gpio_mutex); + + list_for_each_entry_safe(gpio, tmp, &lm3643_gpios, list) { + if (gpio_num == gpio->gpio) + goto out; + } + /* New */ + gpio = kzalloc(sizeof(*gpio), GFP_KERNEL); + if (!gpio) { + dev_err(dev, "No memory\n"); + goto error; + } + + mutex_init(&gpio->mutex); + + rval = gpio_request(gpio_num, "flash reset"); + if (rval) { + dev_err(dev, "Can't get gpio %d\n", gpio_num); + goto freemem; + } + + rval = gpio_direction_output(gpio_num, 0); + if (rval) { + dev_err(dev, "Can't set gpio to output\n"); + goto freegpio; + } + + gpio->gpio = gpio_num; + list_add(&gpio->list, &lm3643_gpios); + +out: + mutex_unlock(&lm3643_gpio_mutex); + + if (gpio) { + mutex_lock(&gpio->mutex); + gpio->num_users++; + mutex_unlock(&gpio->mutex); + } + + return gpio; +freegpio: + gpio_free(gpio_num); +freemem: + kfree(gpio); +error: + mutex_unlock(&lm3643_gpio_mutex); + return NULL; +} + +static void lm3643_gpio_ctrl(struct lm3643_gpio *gpio, bool state) +{ + mutex_lock(&gpio->mutex); + if (state) { + gpio_set_value(gpio->gpio, 1); + gpio->use_count++; + } else { + gpio->use_count--; + if (!gpio->use_count) + gpio_set_value(gpio->gpio, 0); + } + mutex_unlock(&gpio->mutex); +} + +static void lm3643_gpio_remove(struct lm3643_gpio *gpio, int gpio_num) +{ + struct lm3643_gpio *tmp; + + mutex_lock(&lm3643_gpio_mutex); + + list_for_each_entry_safe(gpio, tmp, &lm3643_gpios, list) { + if (gpio_num != gpio->gpio) + continue; + mutex_lock(&gpio->mutex); + gpio->num_users--; + if (!gpio->num_users) { + gpio_free(gpio->gpio); + list_del(&gpio->list); + mutex_unlock(&gpio->mutex); + mutex_destroy(&gpio->mutex); + kfree(gpio); + } else { + mutex_unlock(&gpio->mutex); + } + } + mutex_unlock(&lm3643_gpio_mutex); +} + +/* enable mode control */ +static int lm3643_mode_ctrl(struct lm3643_flash *flash) +{ + switch (flash->led_mode) { + case V4L2_FLASH_LED_MODE_NONE: + regmap_update_bits(flash->regmap, + ENABLE_REG, + MODE_MASK, + MODE_SHDN << MODE_SHIFT); + break; + case V4L2_FLASH_LED_MODE_TORCH: + regmap_update_bits(flash->regmap, + ENABLE_REG, + MODE_MASK, + MODE_TORCH << MODE_SHIFT); + break; + case V4L2_FLASH_LED_MODE_FLASH: + regmap_update_bits(flash->regmap, + ENABLE_REG, + MODE_MASK, + MODE_FLASH << MODE_SHIFT); + break; + default: + return -EINVAL; + } + + return 0; +} + +/* V4L2 controls */ +static int lm3643_get_ctrl(struct v4l2_ctrl *ctrl) +{ + struct lm3643_flash *flash = to_lm3643_flash(ctrl); + unsigned int reg_val1, reg_val2; + int rval; + + if (ctrl->id != V4L2_CID_FLASH_FAULT) + return -EINVAL; + + rval = regmap_read(flash->regmap, FLAGS1_REG, ®_val1); + if (rval < 0) + return rval; + + rval = regmap_read(flash->regmap, FLAGS2_REG, ®_val2); + if (rval < 0) + return rval; + + dev_dbg(flash->dev, "Flags1 = 0x%x, Flags2 = 0x%x\n", reg_val1, + reg_val2); + ctrl->val = 0; + if (reg_val1 & FAULT_TIMEOUT) + ctrl->val |= V4L2_FLASH_FAULT_TIMEOUT; + if (reg_val1 & FAULT_SHORT) + ctrl->val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT; + if (reg_val1 & FAULT_UVLO) + ctrl->val |= V4L2_FLASH_FAULT_UNDER_VOLTAGE; + if (reg_val2 & FAULT_IVFM) + ctrl->val |= V4L2_FLASH_FAULT_INPUT_VOLTAGE; + if (reg_val1 & FAULT_OCP) + ctrl->val |= V4L2_FLASH_FAULT_OVER_CURRENT; + if (reg_val1 & FAULT_THERMAL_SHUTDOWN) + ctrl->val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; + if (reg_val2 & FAULT_OVERTEMP) + ctrl->val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE; + if (reg_val2 & FAULT_NTC_TRIP) + ctrl->val |= V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE; + if (reg_val2 & FAULT_OVP) + ctrl->val |= V4L2_FLASH_FAULT_OVER_VOLTAGE; + return 0; +} + +static int lm3643_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct lm3643_flash *flash = to_lm3643_flash(ctrl); + int rval = 0; + + dev_dbg(flash->dev, "lm3643 control 0x%x\n", ctrl->id); + + switch (ctrl->id) { + case V4L2_CID_FLASH_LED_MODE: + if (flash->led_mode == ctrl->val) + break; + flash->led_mode = ctrl->val; + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) + rval = lm3643_mode_ctrl(flash); + break; + case V4L2_CID_FLASH_STROBE: + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) { + rval = -EBUSY; + break; + } + flash->led_mode = V4L2_FLASH_LED_MODE_FLASH; + rval = lm3643_mode_ctrl(flash); + break; + case V4L2_CID_FLASH_STROBE_SOURCE: + if (ctrl->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL) { + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + lm3643_mode_ctrl(flash); + } + rval = regmap_update_bits(flash->regmap, + ENABLE_REG, + MASK_STROBE_SRC, + (ctrl->val) << STROBE_EN_SHIFT); + break; + case V4L2_CID_FLASH_STROBE_STOP: + if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) { + rval = -EBUSY; + break; + } + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + rval = lm3643_mode_ctrl(flash); + break; + case V4L2_CID_FLASH_TIMEOUT: + rval = regmap_update_bits(flash->regmap, + TIMING_REG, MASK_FLASH_TOUT, + ctrl->val); + break; + /* + * Intensity: flash intensity in given in mA + */ + case V4L2_CID_FLASH_INTENSITY: + rval = regmap_update_bits(flash->regmap, + LED1_FLASH_BR_REG, + MASK_FLASH_BR, + LM3643_FLASH_BRT_mA_TO_REG( + ctrl->val)); + break; + case V4L2_CID_FLASH_TORCH_INTENSITY: + rval = regmap_update_bits(flash->regmap, + LED1_TORCH_BR_REG, + MASK_TORCH_BR, + LM3643_TORCH_BRT_mA_TO_REG( + ctrl->val)); + break; + case V4L2_CID_FLASH_FAULT: + break; + default: + dev_err(flash->dev, "lm3643 invalid control 0x%x\n", ctrl->id); + rval = -EINVAL; + break; + } + return rval; +} + +static const struct v4l2_ctrl_ops lm3643_led_ctrl_ops = { + .g_volatile_ctrl = lm3643_get_ctrl, + .s_ctrl = lm3643_set_ctrl, +}; +static int lm3643_init_controls(struct lm3643_flash *flash) +{ + struct v4l2_ctrl *fault; + + struct v4l2_ctrl_handler *hdl = &flash->ctrls_led; + const struct v4l2_ctrl_ops *ops = &lm3643_led_ctrl_ops; + + v4l2_ctrl_handler_init(hdl, 8); + + /* flash mode */ + v4l2_ctrl_new_std_menu(hdl, ops, + V4L2_CID_FLASH_LED_MODE, + V4L2_FLASH_LED_MODE_TORCH, + ~0x7, + V4L2_FLASH_LED_MODE_NONE); + + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + + /* flash source */ + v4l2_ctrl_new_std_menu(hdl, ops, + V4L2_CID_FLASH_STROBE_SOURCE, + 0x1, + ~0x1, + V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + + /* flash strobe */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE, 0, 0, 0, 0); + + /* flash strobe stop */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0); + + /* flash strobe timeout */ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TIMEOUT, + LM3643_FLASH_TOUT_MIN, + LM3643_FLASH_TOUT_MAX, + LM3643_FLASH_TOUT_STEP, LM3643_FLASH_TOUT_DEF); + /*max flash current in uA*/ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_INTENSITY, + 0, + flash->pdata->flash_max_brightness, + 1, + flash->pdata->flash_max_brightness); + + /* max torch current uA*/ + v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_TORCH_INTENSITY, + 0, + flash->pdata->torch_max_brightness, + 1, + flash->pdata->torch_max_brightness); + + /* fault */ + fault = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FLASH_FAULT, 0, + V4L2_FLASH_FAULT_OVER_VOLTAGE + | V4L2_FLASH_FAULT_OVER_TEMPERATURE + | V4L2_FLASH_FAULT_SHORT_CIRCUIT + | V4L2_FLASH_FAULT_TIMEOUT, 0, 0); + if (fault != NULL) + fault->flags |= V4L2_CTRL_FLAG_VOLATILE; + if (hdl->error) + dev_err(flash->dev, "lm3643_init_controls fail\n"); + flash->subdev_led.ctrl_handler = hdl; + return hdl->error; +} + +static int lm3643_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct lm3643_flash *flash = container_of(sd, struct lm3643_flash, + subdev_led); + int rval; + + rval = pm_runtime_get_sync(flash->dev); + dev_dbg(flash->dev, "%s rval = %d\n", __func__, rval); + return rval; +} + +static int lm3643_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct lm3643_flash *flash = container_of(sd, struct lm3643_flash, + subdev_led); + dev_dbg(flash->dev, "%s\n", __func__); + pm_runtime_put(flash->dev); + + return 0; +} +static int lm3643_s_power(struct v4l2_subdev *sd, int on) +{ + struct lm3643_flash *flash = container_of(sd, struct lm3643_flash, + subdev_led); + dev_dbg(flash->dev, "%s value = %d\n", __func__, on); + + if (!on) + pm_runtime_put(flash->dev); + else + pm_runtime_get_sync(flash->dev); + return 0; +} +static const struct v4l2_subdev_core_ops lm3643_core_ops = { + .s_power = lm3643_s_power, +}; + +static const struct v4l2_subdev_internal_ops lm3643_int_ops = { + .open = lm3643_open, + .close = lm3643_close, +}; +static const struct v4l2_subdev_ops lm3643_ops = { + .core = &lm3643_core_ops, +}; + +static const struct regmap_config lm3643_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xFF, +}; + +static int lm3643_subdev_init(struct lm3643_flash *flash) +{ + struct i2c_client *client = to_i2c_client(flash->dev); + int rval; + + dev_dbg(flash->dev, "lm3643 subdev init\n"); + v4l2_i2c_subdev_init(&flash->subdev_led, client, &lm3643_ops); + flash->subdev_led.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(flash->subdev_led.name, sizeof(flash->subdev_led.name), + LM3643_NAME " %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); + flash->subdev_led.internal_ops = &lm3643_int_ops; + rval = lm3643_init_controls(flash); + if (rval) + goto err_out; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&flash->subdev_led.entity, 0, NULL, 0); +#else + rval = media_entity_pads_init(&flash->subdev_led.entity, 0, NULL); +#endif + if (rval < 0) + goto err_out; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + flash->subdev_led.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH; +#else + flash->subdev_led.entity.function = MEDIA_ENT_F_FLASH; +#endif +err_out: + return rval; +} + +static void lm3646_subdev_cleanup(struct lm3643_flash *flash) +{ + v4l2_ctrl_handler_free(&flash->ctrls_led); + v4l2_device_unregister_subdev(&flash->subdev_led); + media_entity_cleanup(&flash->subdev_led.entity); + lm3643_gpio_ctrl(flash->lmgpio, 0); + lm3643_gpio_remove(flash->lmgpio, flash->lmgpio->gpio); +} + +static int lm3643_init_device(struct lm3643_flash *flash) +{ + unsigned int reg_val; + int rval; + + /* output disable */ + flash->led_mode = V4L2_FLASH_LED_MODE_NONE; + lm3643_mode_ctrl(flash); + + /* Enable both leds*/ + rval = regmap_update_bits(flash->regmap, + ENABLE_REG, + ENABLE_MASK, + 0x3); + if (rval < 0) + return rval; + + rval = regmap_update_bits(flash->regmap, + LED1_FLASH_BR_REG, + MASK_FLASH_BR, + LM3643_FLASH_BRT_mA_TO_REG( + flash->pdata->flash_max_brightness)); + if (rval < 0) + return rval; + + rval = regmap_update_bits(flash->regmap, + LED1_TORCH_BR_REG, + MASK_TORCH_BR, + LM3643_TORCH_BRT_mA_TO_REG( + flash->pdata->torch_max_brightness)); + if (rval < 0) + return rval; + + /* Reset flag registers */ + dev_dbg(flash->dev, "%s reset flag registers\n", __func__); + rval = regmap_read(flash->regmap, FLAGS1_REG, ®_val); + if (rval < 0) + return rval; + + return regmap_read(flash->regmap, FLAGS2_REG, ®_val); +} + +static int lm3643_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct lm3643_flash *flash; + struct lm3643_platform_data *pdata = dev_get_platdata(&client->dev); + int rval; + + flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL); + if (flash == NULL) + return -ENOMEM; + + flash->regmap = devm_regmap_init_i2c(client, &lm3643_regmap); + if (IS_ERR(flash->regmap)) + return PTR_ERR(flash->regmap); + + /* check device tree if there is no platform data */ + if (pdata == NULL) { + pdata = devm_kzalloc(&client->dev, + sizeof(struct lm3643_platform_data), + GFP_KERNEL); + if (pdata == NULL) + return -ENOMEM; + + pdata->flash_max_brightness = 500; + pdata->torch_max_brightness = 89; + } + + flash->pdata = pdata; + flash->dev = &client->dev; + + flash->lmgpio = lm3643_get_gpio(flash->pdata->gpio_reset, flash->dev); + if (!flash->lmgpio) + return -ENODEV; + lm3643_gpio_ctrl(flash->lmgpio, 1); + rval = lm3643_init_device(flash); + if (rval < 0) { + dev_err(flash->dev, "%s initdevice fail\n", __func__); + lm3643_gpio_ctrl(flash->lmgpio, 0); + lm3643_gpio_remove(flash->lmgpio, flash->lmgpio->gpio); + return rval; + } + rval = lm3643_subdev_init(flash); + if (rval < 0) { + dev_err(flash->dev, "%s subdev init fail\n", __func__); + lm3646_subdev_cleanup(flash); + return rval; + } + dev_dbg(flash->dev, "%s Success\n", __func__); + lm3643_gpio_ctrl(flash->lmgpio, 0); + pm_runtime_enable(flash->dev); + return 0; +} + +static int lm3643_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lm3643_flash *flash = container_of(sd, struct lm3643_flash, + subdev_led); + lm3646_subdev_cleanup(flash); + pm_runtime_disable(flash->dev); + dev_info(flash->dev, "%s\n", __func__); + return 0; +} + +#ifdef CONFIG_PM + +static int lm3643_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lm3643_flash *flash = container_of(sd, struct lm3643_flash, + subdev_led); + dev_dbg(flash->dev, "%s\n", __func__); + + lm3643_gpio_ctrl(flash->lmgpio, 0); + + return 0; +} + +static int lm3643_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lm3643_flash *flash = container_of(sd, struct lm3643_flash, + subdev_led); + int rval; + + lm3643_gpio_ctrl(flash->lmgpio, 1); + rval = lm3643_init_device(flash); + if (rval) + goto out; + + /* restore v4l2 control values */ + rval = v4l2_ctrl_handler_setup(&flash->ctrls_led); +out: + dev_dbg(flash->dev, "%s rval = %d\n", __func__, rval); + return rval; +} + +#else + +#define lm3643_suspend NULL +#define lm3643_resume NULL + +#endif /* CONFIG_PM */ + +static const struct i2c_device_id lm3643_id_table[] = { + {LM3643_NAME, 0}, + {} +}; + +static const struct dev_pm_ops lm3643_pm_ops = { + .suspend = lm3643_suspend, + .resume = lm3643_resume, + .runtime_suspend = lm3643_suspend, + .runtime_resume = lm3643_resume, +}; + +MODULE_DEVICE_TABLE(i2c, lm3643_id_table); + +static struct i2c_driver lm3643_i2c_driver = { + .driver = { + .name = LM3643_NAME, + .pm = &lm3643_pm_ops, + }, + .probe = lm3643_probe, + .remove = lm3643_remove, + .id_table = lm3643_id_table, +}; + +module_i2c_driver(lm3643_i2c_driver); + +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_DESCRIPTION("TI LM3643 Dual Flash LED driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 463534d44756..6e0093018121 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -608,7 +608,7 @@ static int m5mols_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, * .get_frame_desc is only used for compressed formats, * thus we always return the capture frame parameters here. */ - fd->entry[0].length = info->cap.buf_size; + fd->entry[0].size.length = info->cap.buf_size; fd->entry[0].pixelcode = info->ffmt[M5MOLS_RESTYPE_CAPTURE].code; mutex_unlock(&info->lock); @@ -629,11 +629,11 @@ static int m5mols_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad, fd->entry[0].flags = V4L2_MBUS_FRAME_DESC_FL_LEN_MAX; fd->num_entries = 1; - fd->entry[0].length = clamp_t(u32, fd->entry[0].length, - mf->width * mf->height, - M5MOLS_MAIN_JPEG_SIZE_MAX); + fd->entry[0].size.length = clamp_t(u32, fd->entry[0].size.length, + mf->width * mf->height, + M5MOLS_MAIN_JPEG_SIZE_MAX); mutex_lock(&info->lock); - info->cap.buf_size = fd->entry[0].length; + info->cap.buf_size = fd->entry[0].size.length; mutex_unlock(&info->lock); return 0; diff --git a/drivers/media/i2c/max9286-reg-settings.h b/drivers/media/i2c/max9286-reg-settings.h new file mode 100644 index 000000000000..325e375c9924 --- /dev/null +++ b/drivers/media/i2c/max9286-reg-settings.h @@ -0,0 +1,87 @@ +/* SPDX-LIcense_Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#ifndef MAX9286_REG_H +#define MAX9286_REG_H + +#include + +#define DS_ADDR_MAX9286 0x48 +#define S_ADDR_MAX96705 0x40 +#define S_ADDR_MAX96705_BROADCAST (S_ADDR_MAX96705 + NR_OF_MAX_STREAMS + 1) + +#define ADDR_AR0231AT_SENSOR 0x10 + +/* Deserializer: MAX9286 registers */ +#define DS_LINK_ENABLE 0x00 +#define DS_FSYNCMODE 0x01 +#define DS_FSYNC_PERIOD_LOW 0x06 +#define DS_FSYNC_PERIOD_MIDDLE 0x07 +#define DS_FSYNC_PERIOD_HIGH 0x08 +#define DS_FWDCCEN_REVCCEN 0x0A +#define DS_LINK_OUTORD 0x0B +#define DS_CSI_DBL_DT 0x12 +#define DS_CSI_VC_CTL 0x15 +#define DS_ENEQ 0x1B +#define DS_MAX9286_DEVID 0x1E +#define DS_FSYNC_LOCKED 0x31 +#define DS_I2CLOCACK 0x34 +#define DS_FPL_RT 0x3B +#define DS_ENCRC_FPL 0x3F +#define DS_CONFIGL_VIDEOL_DET 0x49 +#define DS_OVERLAP_WIN_LOW 0x63 +#define DS_OVERLAP_WIN_HIGH 0x64 +#define DS_ATUO_MASK_LINK 0x69 + +/* Serializer: MAX96705 registers */ +#define S_SERADDR 0x00 +#define S_MAIN_CTL 0x04 +#define S_CMLLVL_PREEMP 0x06 +#define S_CONFIG 0x07 +#define S_RSVD_8 0x08 +#define S_I2C_SOURCE_IS 0x09 +#define S_I2C_DST_IS 0x0A +#define S_I2C_SOURCE_SER 0x0B +#define S_I2C_DST_SER 0x0C +#define S_INPUT_STATUS 0x15 +#define S_SYNC_GEN_CONFIG 0x43 +#define S_VS_DLY_2 0x44 +#define S_VS_DLY_1 0x45 +#define S_VS_H_2 0x47 +#define S_VS_H_1 0x48 +#define S_VS_H_0 0x49 +#define S_RSVD_97 0x97 + +struct max9286_register_write { + u8 reg; + u8 val; +}; + +static const struct max9286_register_write max9286_byte_order_settings[] = { + {0x20, 0x0B}, + {0x21, 0x0A}, + {0x22, 0x09}, + {0x23, 0x08}, + {0x24, 0x07}, + {0x25, 0x06}, + {0x26, 0x05}, + {0x27, 0x04}, + {0x28, 0x03}, + {0x29, 0x02}, + {0x2A, 0x01}, + {0x2B, 0x00}, + {0x30, 0x1B}, + {0x31, 0x1A}, + {0x32, 0x19}, + {0x33, 0x18}, + {0x34, 0x17}, + {0x35, 0x16}, + {0x36, 0x15}, + {0x37, 0x14}, + {0x38, 0x13}, + {0x39, 0x12}, + {0x3A, 0x11}, + {0x3B, 0x10}, +}; + +#endif diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c new file mode 100644 index 000000000000..06bbba379151 --- /dev/null +++ b/drivers/media/i2c/max9286.c @@ -0,0 +1,1194 @@ +/* SPDX-LIcense_Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "max9286-reg-settings.h" + +struct max9286 { + struct v4l2_subdev v4l2_sd; + struct max9286_pdata *pdata; + struct media_pad pad[NR_OF_MAX_PADS]; + unsigned char sensor_present; + unsigned int total_sensor_num; + unsigned int nsources; + unsigned int nsinks; + unsigned int npads; + unsigned int nstreams; + const char *name; + struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_subdev *sub_devs[NR_OF_MAX_SINK_PADS]; + struct v4l2_mbus_framefmt *ffmts[NR_OF_MAX_PADS]; + struct rect *crop; + struct rect *compose; + struct { + unsigned int *stream_id; + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + + struct regmap *regmap8; + struct mutex max_mutex; + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *test_pattern; +}; + +#define to_max_9286(_sd) container_of(_sd, struct max9286, v4l2_sd) + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct max9286_csi_data_format max_csi_data_formats[] = { + { MEDIA_BUS_FMT_YUYV8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + +static const uint32_t max9286_supported_codes_pad[] = { + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + 0, +}; + +static const uint32_t *max9286_supported_codes[] = { + max9286_supported_codes_pad, +}; + +static struct regmap_config max9286_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +/* Serializer register write */ +static int max96705_write_register(struct max9286 *max, + unsigned int offset, u8 reg, u8 val) +{ + int ret; + int retry, timeout = 10; + struct i2c_client *client = v4l2_get_subdevdata(&max->v4l2_sd); + + client->addr = S_ADDR_MAX96705 + offset; + for (retry = 0; retry < timeout; retry++) { + ret = i2c_smbus_write_byte_data(client, reg, val); + if (val < 0) + usleep_range(5000, 6000); + else + break; + } + + client->addr = DS_ADDR_MAX9286; + if (retry >= timeout) { + dev_err(max->v4l2_sd.dev, + "%s:write reg failed: reg=%2x\n", __func__, reg); + return -EREMOTEIO; + } + + return 0; +} + +/* Serializer register read */ +static int +max96705_read_register(struct max9286 *max, unsigned int i, u8 reg) +{ + int val; + int retry, timeout = 10; + struct i2c_client *client = v4l2_get_subdevdata(&max->v4l2_sd); + + client->addr = S_ADDR_MAX96705 + i; + for (retry = 0; retry < timeout; retry++) { + val = i2c_smbus_read_byte_data(client, reg); + if (val >= 0) + break; + usleep_range(5000, 6000); + } + + client->addr = DS_ADDR_MAX9286; + if (retry >= timeout) { + dev_err(max->v4l2_sd.dev, + "%s:read reg failed: reg=%2x\n", __func__, reg); + return -EREMOTEIO; + } + + return val; +} + +/* Initialize image sensors and set stream on registers */ +static int max9286_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct max9286 *max = to_max_9286(subdev); + struct media_pad *remote_pad; + struct v4l2_subdev *sd; + int i, rval; + unsigned int val; + u8 slval = 0xE0; + + dev_dbg(max->v4l2_sd.dev, "MAX9286 set stream. enable = %d\n", enable); + + /* Disable I2C ACK */ + rval = regmap_write(max->regmap8, DS_I2CLOCACK, 0xB6); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable I2C ACK!\n"); + return rval; + } + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + if (((0x01 << (i)) & max->sensor_present) == 0) + continue; + + /* Find the pad at the remote end of the link */ + remote_pad = media_entity_remote_pad(&max->pad[i]); + + if (!remote_pad) + continue; + /* Enable link */ + slval |= (0x0F & (1 << i)); + rval = regmap_write(max->regmap8, DS_LINK_ENABLE, slval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable GMSL links!\n"); + return rval; + } + + rval = regmap_write(max->regmap8, DS_ATUO_MASK_LINK, 0x30); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to write 0x69\n"); + return rval; + } + /* Calls sensor set stream */ + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set stream for %s. enable = %d\n", + sd->name, enable); + return rval; + } + } + + /* Enable I2C ACK */ + rval = regmap_write(max->regmap8, DS_I2CLOCACK, 0x36); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to enable I2C ACK!\n"); + return rval; + } + + /* Check if valid PCLK is available for the links */ + for (i = 1; i <= NR_OF_MAX_SINK_PADS; i++) { + if (((0x01 << (i - 1)) & max->sensor_present) == 0) + continue; + + val = max96705_read_register(max, i, S_INPUT_STATUS); + if ((val != -EREMOTEIO) && (val & 0x01)) + dev_info(max->v4l2_sd.dev, + "Valid PCLK detected for link %d\n", i); + else if (val != -EREMOTEIO) + dev_info(max->v4l2_sd.dev, + "Failed to read PCLK reg for link %d\n", i); + } + + /* Set preemphasis settings for all serializers (set to 3.3dB)*/ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_CMLLVL_PREEMP, 0xAA); + usleep_range(5000, 6000); + + /* Set VSYNC Delay */ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_SYNC_GEN_CONFIG, 0x21); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_DLY_2, 0x06); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_DLY_1, 0xD8); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_2, 0x26); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_1, 0x00); + usleep_range(5000, 6000); + + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_VS_H_0, 0x00); + usleep_range(5000, 6000); + + /* Enable link equalizers */ + rval = regmap_write(max->regmap8, DS_ENEQ, 0x0F); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to automatically detect serial data rate!\n"); + return rval; + } + usleep_range(5000, 6000); + rval = regmap_write(max->regmap8, 0x0C, 0x91); + + /* Enable serial links and desable configuration */ + max96705_write_register(max, S_ADDR_MAX96705_BROADCAST - + S_ADDR_MAX96705, S_MAIN_CTL, 0x83); + /* Wait for more than 2 Frames time from each sensor */ + usleep_range(100000, 101000); + + /* + * Poll frame synchronization bit of deserializer + * All the cameras should work in SYNC mode + * MAX9286 sends a pulse to each camera, then each camera sends out + * one frame. The VSYNC for each camera should appear in almost same + * time for the deserializer to lock FSYNC + */ + rval = regmap_read(max->regmap8, DS_FSYNC_LOCKED, &val); + if (rval) { + dev_info(max->v4l2_sd.dev, "Frame SYNC not locked!\n"); + return rval; + } else if (val & (0x01 << 6)) + dev_info(max->v4l2_sd.dev, "Deserializer Frame SYNC locked\n"); + + /* + * Enable/set bit[7] of DS_CSI_VC_CTL register for VC operation + * Set VC according to the link number + * Enable CSI-2 output + */ + if (!enable) { + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x93); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to disable CSI output!\n"); + return rval; + } + } else { + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x9B); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable CSI output!\n"); + return rval; + } + } + + return 0; +} + +/* Get the media bus format */ +static struct v4l2_mbus_framefmt * +__max9286_get_ffmt(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, unsigned int which, + unsigned int stream) +{ + struct max9286 *max = to_max_9286(subdev); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(subdev, cfg, pad); + else + return &max->ffmts[pad][stream]; +} + +/* callback for VIDIOC_SUBDEV_G_FMT ioctl handler code */ +static int max9286_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct max9286 *max = to_max_9286(subdev); + + if (fmt->stream > max->nstreams) + return -EINVAL; + + mutex_lock(&max->max_mutex); + fmt->format = *__max9286_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + mutex_unlock(&max->max_mutex); + + dev_dbg(subdev->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", + fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? + "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", + fmt->pad, fmt->stream); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +/* Validate csi_data_format */ +static const struct max9286_csi_data_format * +max9286_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(max_csi_data_formats); i++) { + if (max_csi_data_formats[i].code == code) + return &max_csi_data_formats[i]; + } + + return &max_csi_data_formats[0]; +} + +/* callback for VIDIOC_SUBDEV_S_FMT ioctl handler code */ +static int max9286_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct max9286 *max = to_max_9286(subdev); + const struct max9286_csi_data_format *csi_format; + struct v4l2_mbus_framefmt *ffmt; + + if (fmt->stream > max->nstreams) + return -EINVAL; + + csi_format = max9286_validate_csi_data_format(fmt->format.code); + + mutex_lock(&max->max_mutex); + ffmt = __max9286_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = csi_format->code; + } + + fmt->format = *ffmt; + mutex_unlock(&max->max_mutex); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + ffmt->width, ffmt->height, ffmt->code); + + return 0; +} + +/* get the current low level media bus frame parameters */ +static int max9286_get_frame_desc(struct v4l2_subdev *sd, + unsigned int pad, struct v4l2_mbus_frame_desc *desc) +{ + struct max9286 *max = to_max_9286(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + u8 vc = 0; + int i; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + + for (i = 0; i < min_t(int, max->nstreams, desc->num_entries); i++) { + struct v4l2_mbus_framefmt *ffmt = + &max->ffmts[i][MAX_PAD_SOURCE]; + const struct max9286_csi_data_format *csi_format = + max9286_validate_csi_data_format(ffmt->code); + + entry->size.two_dim.width = ffmt->width; + entry->size.two_dim.height = ffmt->height; + entry->pixelcode = ffmt->code; + entry->bus.csi2.channel = vc++; + entry->bpp = csi_format->compressed; + entry++; + } + + return 0; +} + +/* Enumerate media bus formats available at a given sub-device pad */ +static int max9286_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct max9286 *max = to_max_9286(sd); + const uint32_t *supported_code = max9286_supported_codes[code->pad]; + bool next_stream = false; + int i; + + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > max->nstreams) + return -EINVAL; + + if (next_stream) { + if (!(max->pad[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX)) + return -EINVAL; + + if (code->stream < max->nstreams - 1) { + code->stream++; + return 0; + } else + return -EINVAL; + } + + for (i = 0; supported_code[i]; i++) { + if (i == code->index) { + code->code = supported_code[i]; + return 0; + } + } + + return -EINVAL; +} + +/* Configure Media Controller routing */ +static int max9286_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct max9286 *max = to_max_9286(sd); + int i, j, ret = 0; + + for (i = 0; i < min(route->num_routes, max->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + unsigned int sink = t->sink_pad; + unsigned int source = t->source_pad; + + if (t->sink_stream > max->nstreams - 1 || + t->source_stream > max->nstreams - 1) + continue; + + if (t->source_pad != MAX_PAD_SOURCE) + continue; + + for (j = 0; j < max->nstreams; j++) { + if (sink == max->route[j].sink && + source == max->route[j].source) + break; + } + + if (j == max->nstreams) + continue; + + max->stream[sink].stream_id[0] = t->sink_stream; + max->stream[source].stream_id[sink] = t->source_stream; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + max->route[j].flags |= V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + max->route[j].flags &= (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +/* Configure Media Controller routing */ +static int max9286_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct max9286 *max = to_max_9286(sd); + int i; + + for (i = 0; i < min(max->nstreams, route->num_routes); ++i) { + unsigned int sink = max->route[i].sink; + unsigned int source = max->route[i].source; + + route->routes[i].sink_pad = sink; + route->routes[i].sink_stream = + max->stream[sink].stream_id[0]; + route->routes[i].source_pad = source; + route->routes[i].source_stream = + max->stream[source].stream_id[sink]; + route->routes[i].flags = max->route[i].flags; + } + + route->num_routes = i; + + return 0; +} + +/* called when the subdev device node is opened by an application */ +static int max9286_open(struct v4l2_subdev *subdev, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .pad = MAX_PAD_SOURCE, + .format = { + .width = MAX9286_MAX_WIDTH, + .height = MAX9286_MAX_HEIGHT, + + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + }, + .stream = 0, + }; + + *try_fmt = fmt.format; + + return 0; +} + +/* + * called when this subdev is registered. When called the v4l2_dev field is + * set to the correct v4l2_device. + */ +static int max9286_registered(struct v4l2_subdev *subdev) +{ + struct max9286 *max = to_max_9286(subdev); + int i, j, k, l, rval, num, nsinks; + + num = max->pdata->subdev_num; + nsinks = max->nsinks; + for (i = 0, k = 0; (i < num) && (k < nsinks); i++, k++) { + struct max9286_subdev_i2c_info *info = + &max->pdata->subdev_info[i]; + struct i2c_adapter *adapter; + + adapter = i2c_get_adapter(info->i2c_adapter_id); + max->sub_devs[k] = v4l2_i2c_new_subdev_board( + max->v4l2_sd.v4l2_dev, adapter, + &info->board_info, 0); + i2c_put_adapter(adapter); + if (!max->sub_devs[k]) { + dev_err(max->v4l2_sd.dev, + "can't create new i2c subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + continue; + } + + for (j = 0; j < max->sub_devs[k]->entity.num_pads; j++) { + if (max->sub_devs[k]->entity.pads[j].flags & + MEDIA_PAD_FL_SOURCE) + break; + } + + if (j == max->sub_devs[k]->entity.num_pads) { + dev_warn(max->v4l2_sd.dev, + "no source pad in subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -ENOENT; + } + + for (l = 0; l < max->nsinks; l++) { + rval = media_create_pad_link( + &max->sub_devs[k]->entity, j, + &max->v4l2_sd.entity, l, 0); + if (rval) { + dev_err(max->v4l2_sd.dev, + "can't create link to %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -EINVAL; + } + } + } + + return 0; +} + +static int max9286_set_power(struct v4l2_subdev *subdev, int on) +{ + return 0; +} + +static const struct v4l2_subdev_core_ops max9286_core_subdev_ops = { + .s_power = max9286_set_power, +}; + +static bool max9286_sd_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct max9286 *va = to_max_9286(media_entity_to_v4l2_subdev(entity)); + + if (stream == NULL || *stream >= va->nstreams) + return false; + + if ((va->route[*stream].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((va->route[*stream].source == pad0 && + va->route[*stream].sink == pad1) || + (va->route[*stream].source == pad1 && + va->route[*stream].sink == pad0))) + return true; + + return false; +} + +static const struct v4l2_subdev_video_ops max9286_sd_video_ops = { + .s_stream = max9286_set_stream, +}; + +static const struct media_entity_operations max9286_sd_entity_ops = { + .has_route = max9286_sd_has_route, +}; + +static const struct v4l2_subdev_pad_ops max9286_sd_pad_ops = { + .get_fmt = max9286_get_format, + .set_fmt = max9286_set_format, + .get_frame_desc = max9286_get_frame_desc, + .enum_mbus_code = max9286_enum_mbus_code, + .set_routing = max9286_set_routing, + .get_routing = max9286_get_routing, +}; + +static struct v4l2_subdev_ops max9286_sd_ops = { + .core = &max9286_core_subdev_ops, + .video = &max9286_sd_video_ops, + .pad = &max9286_sd_pad_ops, +}; + +static struct v4l2_subdev_internal_ops max9286_sd_internal_ops = { + .open = max9286_open, + .registered = max9286_registered, +}; + +static int max9286_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops max9286_ctrl_ops = { + .s_ctrl = max9286_s_ctrl, +}; + +static const s64 max9286_op_sys_clock[] = { 87750000, }; +static const struct v4l2_ctrl_config max9286_controls[] = { + { + .ops = &max9286_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .max = ARRAY_SIZE(max9286_op_sys_clock) - 1, + .min = 0, + .step = 0, + .def = 0, + .qmenu_int = max9286_op_sys_clock, + }, + { + .ops = &max9286_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 1, + .min = 0, + .step = 1, + .def = 0, + }, +}; + +/* Registers MAX9286 sub-devices (Image sensors) */ +static int max9286_register_subdev(struct max9286 *max) +{ + int i, rval; + struct i2c_client *client = v4l2_get_subdevdata(&max->v4l2_sd); + + /* subdevice driver initializes v4l2 subdev */ + v4l2_subdev_init(&max->v4l2_sd, &max9286_sd_ops); + snprintf(max->v4l2_sd.name, sizeof(max->v4l2_sd.name), + "MAX9286 %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); + max->v4l2_sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + max->v4l2_sd.internal_ops = &max9286_sd_internal_ops; + max->v4l2_sd.entity.ops = &max9286_sd_entity_ops; + v4l2_set_subdevdata(&max->v4l2_sd, client); + + v4l2_ctrl_handler_init(&max->ctrl_handler, + ARRAY_SIZE(max9286_controls)); + + if (max->ctrl_handler.error) { + dev_err(max->v4l2_sd.dev, + "Failed to init max9286 controls. ERR: %d!\n", + max->ctrl_handler.error); + return max->ctrl_handler.error; + } + + max->v4l2_sd.ctrl_handler = &max->ctrl_handler; + + for (i = 0; i < ARRAY_SIZE(max9286_controls); i++) { + const struct v4l2_ctrl_config *cfg = + &max9286_controls[i]; + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_new_custom(&max->ctrl_handler, cfg, NULL); + if (!ctrl) { + dev_err(max->v4l2_sd.dev, + "Failed to create ctrl %s!\n", cfg->name); + rval = max->ctrl_handler.error; + goto failed_out; + } + } + + max->link_freq = v4l2_ctrl_find(&max->ctrl_handler, V4L2_CID_LINK_FREQ); + max->test_pattern = v4l2_ctrl_find(&max->ctrl_handler, + V4L2_CID_TEST_PATTERN); + + for (i = 0; i < max->nsinks; i++) + max->pad[i].flags = MEDIA_PAD_FL_SINK; + max->pad[MAX_PAD_SOURCE].flags = + MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MULTIPLEX; + rval = media_entity_pads_init(&max->v4l2_sd.entity, + NR_OF_MAX_PADS, max->pad); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to init media entity for max9286!\n"); + goto failed_out; + } + + return 0; +failed_out: + media_entity_cleanup(&max->v4l2_sd.entity); + v4l2_ctrl_handler_free(&max->ctrl_handler); + return rval; +} + +/* + * Get the output link order + * By default: + * bits[7:6] 11: Link 3 is 4th in the CSI-2 output order + * bits[5:4] 10: Link 2 is 3rd in the CSI-2 output order + * bits[3:2] 01: Link 1 is 2nd in the CSI-2 output order + * bits[1:0] 00: Link 0 is 1st in the CSI-2 output order + */ +u8 get_output_link_order(struct max9286 *max) +{ + u8 val = 0xE4, i; + u8 order_config[14][3] = { + {1, 8, 0x27}, + {1, 4, 0xC6}, + {1, 2, 0xE1}, + {1, 1, 0xE4}, + {2, 0xC, 0x4E}, + {2, 0xA, 0x72}, + {2, 0x9, 0x78}, + {2, 0x6, 0xD2}, + {2, 0x5, 0xD8}, + {2, 0x3, 0xE4}, + {3, 0xE, 0x93}, + {3, 0xD, 0x9C}, + {3, 0xB, 0xB4}, + {3, 0x7, 0xE4}, + }; + + if (max->total_sensor_num < 4) { + for (i = 0; i < 14; i++) { + if ((max->total_sensor_num == order_config[i][0]) + && (max->sensor_present == order_config[i][1])) + return order_config[i][2]; + } + } + + /* sensor_num = 4 will return 0xE4 */ + return val; +} + +/* MAX9286 initial setup and Reverse channel setup */ +static int max9286_init(struct max9286 *max, struct i2c_client *client) +{ + int i, rval; + unsigned int val, lval; + u8 mval, slval, tmval; + + usleep_range(10000, 11000); + + rval = regmap_read(max->regmap8, DS_MAX9286_DEVID, &val); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to read device ID of MAX9286!\n"); + return rval; + } + dev_info(max->v4l2_sd.dev, "MAX9286 device ID: 0x%X\n", val); + + rval = regmap_write(max->regmap8, DS_CSI_VC_CTL, 0x93); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable CSI output!\n"); + return rval; + } + /* All the links are working in Legacy reverse control-channel mode */ + /* Enable Custom Reverse Channel and First Pulse Length */ + rval = regmap_write(max->regmap8, DS_ENCRC_FPL, 0x4F); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + /* + * 2ms of delay is required after any analog change to reverse control + * channel for bus timeout and I2C state machine to settle from any + * glitches + */ + usleep_range(2000, 3000); + /* First pulse length rise time changed from 300ns to 200ns */ + rval = regmap_write(max->regmap8, DS_FPL_RT, 0x1E); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* Enable configuration links */ + max96705_write_register(max, 0, S_MAIN_CTL, 0x43); + usleep_range(5000, 6000); + + /* + * Enable high threshold for reverse channel input buffer + * This increases immunity to power supply noise when the + * coaxial link is used for power as well as signal + */ + max96705_write_register(max, 0, S_RSVD_8, 0x01); + /* Enable change of reverse control parameters */ + + max96705_write_register(max, 0, S_RSVD_97, 0x5F); + + /* Wait 2ms after any change to reverse control channel */ + usleep_range(2000, 3000); + + /* Increase reverse amplitude from 100mV to 170mV to compensate for + * higher threshold + */ + rval = regmap_write(max->regmap8, DS_FPL_RT, 0x19); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + + /* + * Enable CSI-2 lanes D0, D1, D2, D3 + * Enable CSI-2 DBL (Double Input Mode) + * Enable GMSL DBL for RAWx2 + * Enable YUV422 8-bit data type + */ + rval = regmap_write(max->regmap8, DS_CSI_DBL_DT, 0xF7); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + /* Enable Frame sync Auto-mode for row/column reset on frame sync + * sensors + */ + rval = regmap_write(max->regmap8, DS_FSYNCMODE, 0x00); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to disable PRBS test!\n"); + return rval; + } + usleep_range(2000, 3000); + rval = regmap_write(max->regmap8, DS_OVERLAP_WIN_LOW, 0x00); + rval = regmap_write(max->regmap8, DS_OVERLAP_WIN_HIGH, 0x00); + + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_LOW, 0x55); + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_MIDDLE, 0xc2); + rval = regmap_write(max->regmap8, DS_FSYNC_PERIOD_HIGH, 0x2C); + + for (i = 0; i < ARRAY_SIZE(max9286_byte_order_settings); i++) { + rval = max96705_write_register(max, 0, + max9286_byte_order_settings[i].reg, + max9286_byte_order_settings[i].val); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set max9286 byte order\n"); + return rval; + } + } + + /* Detect video links */ + rval = regmap_read(max->regmap8, DS_CONFIGL_VIDEOL_DET, &lval); + if (rval) { + dev_err(max->v4l2_sd.dev, "Failed to read register 0x49!\n"); + return rval; + } + + /* + * Check on which links the sensors are connected + * And also check total number of sensors connected to the deserializer + */ + max->sensor_present = ((lval >> 4) & 0xF) | (lval & 0xF); + + for (i = 0; i < NR_OF_MAX_STREAMS; i++) { + if (max->sensor_present & (0x1 << i)) { + dev_info(max->v4l2_sd.dev, + "Sensor present on deserializer link %d\n", i); + max->total_sensor_num += 1; + } + } + + dev_info(max->v4l2_sd.dev, + "total sensor present = %d", max->total_sensor_num); + dev_info(max->v4l2_sd.dev, + "sensor present on links = %d", max->sensor_present); + + if (!max->total_sensor_num) { + dev_err(max->v4l2_sd.dev, "No sensors connected!\n"); + } else { + dev_info(max->v4l2_sd.dev, + "Total number of sensors connected = %d\n", + max->total_sensor_num); + } + + slval = get_output_link_order(max); + + /* Set link output order */ + rval = regmap_write(max->regmap8, DS_LINK_OUTORD, slval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to set Link output order!\n"); + return rval; + } + + slval = 0xE0 | max->sensor_present; + + /* + * Enable DBL + * Edge select: Rising Edge + * Enable HS/VS encoding + */ + max96705_write_register(max, 0, S_CONFIG, 0x94); + usleep_range(2000, 3000); + + mval = 0; + tmval = 0; + /* + * Setup each serializer individually and their respective I2C slave + * address changed to a unique value by enabling one reverse channel + * at a time via deserializer's DS_FWDCCEN_REVCCEN control register. + * Also create broadcast slave address for MAX96705 serializer. + * After this stage, i2cdetect on I2C-ADAPTER should display the + * below devices + * 10: Sensor address + * 11, 12, 13, 14: Sensors alias addresses + * 41, 42, 43, 44: Serializers alias addresses + * 45: Serializer's broadcast address + * 48: Deserializer's address + */ + + for (i = 1; i <= NR_OF_MAX_SINK_PADS; i++) { + /* Setup the link when the sensor is connected to the link */ + if (((0x1 << (i - 1)) & max->sensor_present) == 0) + continue; + + /* Enable only one reverse channel at a time */ + mval = (0x11 << (i - 1)); + tmval |= (0x11 << (i - 1)); + rval = regmap_write(max->regmap8, DS_FWDCCEN_REVCCEN, mval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable channel for %d!\n", i); + return rval; + } + /* Wait 2ms after enabling reverse channel */ + usleep_range(2000, 3000); + + /* Change Serializer slave address */ + max96705_write_register(max, 0, S_SERADDR, + (S_ADDR_MAX96705 + i) << 1); + /* Unique link 'i' image sensor slave address */ + max96705_write_register(max, i, S_I2C_SOURCE_IS, + (ADDR_AR0231AT_SENSOR + i) << 1); + /* Link 'i' image sensor slave address */ + max96705_write_register(max, i, S_I2C_DST_IS, + ADDR_AR0231AT_SENSOR << 1); + /* Serializer broadcast address */ + max96705_write_register(max, i, S_I2C_SOURCE_SER, + S_ADDR_MAX96705_BROADCAST << 1); + /* Link 'i' serializer address */ + max96705_write_register(max, i, S_I2C_DST_SER, + (S_ADDR_MAX96705 + i) << 1); + } + + /* Enable I2c reverse channels */ + rval = regmap_write(max->regmap8, DS_FWDCCEN_REVCCEN, tmval); + if (rval) { + dev_err(max->v4l2_sd.dev, + "Failed to enable channel for %d!\n", i); + return rval; + } + usleep_range(2000, 3000); + + return 0; +} + +/* Unbind the MAX9286 device driver from the I2C client */ +static int max9286_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct max9286 *max = to_max_9286(subdev); + int i; + + mutex_destroy(&max->max_mutex); + v4l2_ctrl_handler_free(&max->ctrl_handler); + v4l2_device_unregister_subdev(&max->v4l2_sd); + media_entity_cleanup(&max->v4l2_sd.entity); + + for (i = 0; i < NR_OF_MAX_SINK_PADS; i++) { + if (max->sub_devs[i]) { + struct i2c_client *sub_client = + v4l2_get_subdevdata(max->sub_devs[i]); + + i2c_unregister_device(sub_client); + } + max->sub_devs[i] = NULL; + } + + return 0; +} + +/* Called by I2C probe */ +static int max9286_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct max9286 *max; + int i = 0; + int rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); + if (!max) + return -ENOMEM; + + max->pdata = client->dev.platform_data; + + max->nsources = NR_OF_MAX_SOURCE_PADS; + max->nsinks = NR_OF_MAX_SINK_PADS; + max->npads = NR_OF_MAX_PADS; + max->nstreams = NR_OF_MAX_STREAMS; + + max->crop = devm_kcalloc(&client->dev, max->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + max->compose = devm_kcalloc(&client->dev, max->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + max->route = devm_kcalloc(&client->dev, max->nstreams, + sizeof(*max->route), GFP_KERNEL); + max->stream = devm_kcalloc(&client->dev, max->npads, + sizeof(*max->stream), GFP_KERNEL); + + if (!max->crop || !max->compose || !max->route || !max->stream) + return -ENOMEM; + + for (i = 0; i < max->npads; i++) { + max->ffmts[i] = + devm_kcalloc(&client->dev, max->nstreams, + sizeof(struct v4l2_mbus_framefmt), GFP_KERNEL); + if (!max->ffmts[i]) + return -ENOMEM; + + max->stream[i].stream_id = + devm_kcalloc(&client->dev, max->nsinks, + sizeof(int), GFP_KERNEL); + if (!max->stream[i].stream_id) + return -ENOMEM; + } + + for (i = 0; i < max->nstreams; i++) { + max->route[i].sink = i; + max->route[i].source = MAX_PAD_SOURCE; + max->route[i].flags = 0; + } + + for (i = 0; i < max->nsinks; i++) { + max->stream[i].stream_id[0] = i; + max->stream[MAX_PAD_SOURCE].stream_id[i] = i; + } + + max->regmap8 = devm_regmap_init_i2c(client, &max9286_reg_config8); + if (IS_ERR(max->regmap8)) { + dev_err(&client->dev, "Failed to init regmap8!\n"); + return -EIO; + } + + mutex_init(&max->max_mutex); + + v4l2_i2c_subdev_init(&max->v4l2_sd, client, &max9286_sd_ops); + + rval = max9286_register_subdev(max); + if (rval) { + dev_err(&client->dev, + "Failed to register MAX9286 subdevice!\n"); + goto error_mutex_destroy; + } + + rval = max9286_init(max, client); + if (rval) { + dev_err(&client->dev, "Failed to initialise MAX9286!\n"); + goto error_media_entity; + } + + return 0; + +error_media_entity: + media_entity_cleanup(&max->v4l2_sd.entity); + v4l2_ctrl_handler_free(&max->ctrl_handler); +error_mutex_destroy: + mutex_destroy(&max->max_mutex); + + return rval; +} + +#ifdef CONFIG_PM +static int max9286_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct max9286 *max = to_max_9286(subdev); + + return max9286_init(max, client); +} +#else +#define max9286_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id max9286_id_table[] = { + { MAX9286_NAME, 0 }, + {}, +}; + +static const struct dev_pm_ops max9286_pm_ops = { + .resume = max9286_resume, +}; + +static struct i2c_driver max9286_i2c_driver = { + .driver = { + .name = MAX9286_NAME, + .pm = &max9286_pm_ops, + }, + .probe = max9286_probe, + .remove = max9286_remove, + .id_table = max9286_id_table, +}; + +module_i2c_driver(max9286_i2c_driver); + +MODULE_AUTHOR("Kiran Kumar "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Maxim96705 serializer and Maxim9286 deserializer driver"); diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index d28845f7356f..a31fe18c71d6 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -1131,13 +1131,14 @@ static int ov5645_probe(struct i2c_client *client, ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &ov5645->ep); + + of_node_put(endpoint); + if (ret < 0) { dev_err(dev, "parsing endpoint node failed\n"); return ret; } - of_node_put(endpoint); - if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) { dev_err(dev, "invalid bus type, must be CSI2\n"); return -EINVAL; diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c new file mode 100644 index 000000000000..9c8fabd03fda --- /dev/null +++ b/drivers/media/i2c/ov8856.c @@ -0,0 +1,1476 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include + +#ifndef V4L2_CID_DIGITAL_GAIN +#define V4L2_CID_DIGITAL_GAIN V4L2_CID_GAIN +#endif + +#define OV8856_REG_VALUE_08BIT 1 +#define OV8856_REG_VALUE_16BIT 2 +#define OV8856_REG_VALUE_24BIT 3 + +#define OV8856_REG_MODE_SELECT 0x0100 +#define OV8856_MODE_STANDBY 0x00 +#define OV8856_MODE_STREAMING 0x01 + +#define OV8856_REG_SOFTWARE_RST 0x0103 +#define OV8856_SOFTWARE_RST 0x01 + +/* Chip ID */ +#define OV8856_REG_CHIP_ID 0x300a +#define OV8856_CHIP_ID 0x00885a + +/* V_TIMING internal */ +#define OV8856_REG_VTS 0x380e +#define OV8856_VTS_30FPS 0x09b2 /* 30 fps */ +#define OV8856_VTS_60FPS 0x04da /* 60 fps */ +#define OV8856_VTS_MAX 0x7fff + +/* HBLANK control - read only */ +#define OV8856_PPL_270MHZ 1648 +#define OV8856_PPL_540MHZ 3296 + +/* Exposure control */ +#define OV8856_REG_EXPOSURE 0x3500 +#define OV8856_EXPOSURE_MIN 4 +#define OV8856_EXPOSURE_STEP 1 +#define OV8856_EXPOSURE_DEFAULT 0x640 + +/* Analog gain control */ +#define OV8856_REG_ANALOG_GAIN 0x3508 +#define OV8856_ANA_GAIN_MIN 0 +#define OV8856_ANA_GAIN_MAX 0x1fff +#define OV8856_ANA_GAIN_STEP 1 +#define OV8856_ANA_GAIN_DEFAULT 0x80 + +/* Digital gain control */ +#define OV8856_REG_B_MWB_GAIN 0x5019 +#define OV8856_REG_G_MWB_GAIN 0x501b +#define OV8856_REG_R_MWB_GAIN 0x501d +#define OV8856_DGTL_GAIN_MIN 0 +#define OV8856_DGTL_GAIN_MAX 16384 /* Max = 16 X */ +#define OV8856_DGTL_GAIN_DEFAULT 1024 /* Default gain = 1 X */ +#define OV8856_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */ + +/* Test Pattern Control */ +#define OV8856_REG_TEST_PATTERN 0x4503 +#define OV8856_TEST_PATTERN_ENABLE BIT(7) +#define OV8856_TEST_PATTERN_MASK 0xfc + +/* Number of frames to skip */ +#define OV8856_NUM_OF_SKIP_FRAMES 2 + +struct ov8856_reg { + u16 address; + u8 val; +}; + +struct ov8856_reg_list { + u32 num_of_regs; + const struct ov8856_reg *regs; +}; + +/* Link frequency config */ +struct ov8856_link_freq_config { + u32 pixels_per_line; + + /* PLL registers for this link frequency */ + struct ov8856_reg_list reg_list; +}; + +/* Mode : resolution and related config&values */ +struct ov8856_mode { + /* Frame width */ + u32 width; + /* Frame height */ + u32 height; + + /* V-timing */ + u32 vts_def; + u32 vts_min; + + /* Index of Link frequency config to be used */ + u32 link_freq_index; + /* Default register values */ + struct ov8856_reg_list reg_list; +}; + +/* 4224x3136 needs 1080Mbps/lane, 4 lanes */ +static const struct ov8856_reg mipi_data_rate_1080mbps[] = { + /* PLL1 registers */ + {0x0302, 0x26 }, /* 3c for 19.2Mhz input clk */ + {0x0303, 0x00 }, /* 01 for 19.2Mhz input clk */ + {0x030d, 0x26 }, /* 1e for 19.2Mhz input clk */ + {0x031e, 0x0c }, + {0x3000, 0x00 }, + {0x300e, 0x00 }, + {0x3010, 0x00 }, + {0x3015, 0x84 }, + {0x3018, 0x72 }, + {0x3033, 0x24 }, + {0x3500, 0x00 }, + {0x3501, 0x9a }, + {0x3502, 0x20 }, + {0x3503, 0x00 }, + {0x3505, 0x83 }, + {0x3508, 0x00 }, + {0x3509, 0x80 }, + {0x350c, 0x00 }, + {0x350d, 0x80 }, + {0x350e, 0x04 }, + {0x350f, 0x00 }, + {0x3510, 0x00 }, + {0x3511, 0x02 }, + {0x3512, 0x00 }, + {0x3600, 0x72 }, + {0x3601, 0x40 }, + {0x3602, 0x30 }, + {0x3610, 0xc5 }, + {0x3611, 0x58 }, + {0x3612, 0x5c }, + {0x3613, 0x5a }, + {0x3614, 0x60 }, + {0x3628, 0xff }, + {0x3629, 0xff }, + {0x362a, 0xff }, + {0x3633, 0x10 }, + {0x3634, 0x10 }, + {0x3635, 0x10 }, + {0x3636, 0x10 }, + {0x3663, 0x08 }, + {0x3669, 0x34 }, + {0x366e, 0x10 }, + {0x3706, 0x86 }, + {0x370b, 0x7e }, + {0x3714, 0x23 }, + {0x3730, 0x12 }, + {0x3733, 0x10 }, + {0x3764, 0x00 }, + {0x3765, 0x00 }, + {0x3769, 0x62 }, + {0x376a, 0x2a }, + {0x376b, 0x30 }, + {0x3780, 0x00 }, + {0x3781, 0x24 }, + {0x3782, 0x00 }, + {0x3783, 0x23 }, + {0x3798, 0x2f }, + {0x37a1, 0x60 }, + {0x37a8, 0x6a }, + {0x37ab, 0x3f }, + {0x37c2, 0x04 }, + {0x37c3, 0xf1 }, + {0x37c9, 0x80 }, + {0x37cb, 0x03 }, + {0x37cc, 0x0a }, + {0x37cd, 0x16 }, + {0x37ce, 0x1f }, + {0x3814, 0x01 }, + {0x3815, 0x01 }, + {0x3816, 0x00 }, + {0x3817, 0x00 }, + {0x3818, 0x00 }, + {0x3819, 0x00 }, + {0x3820, 0x80 }, + {0x3821, 0x46 }, + {0x382a, 0x01 }, + {0x382b, 0x01 }, + {0x3830, 0x06 }, + {0x3836, 0x02 }, + {0x3862, 0x04 }, + {0x3863, 0x08 }, + {0x3cc0, 0x33 }, + {0x3d85, 0x17 }, + {0x3d8c, 0x73 }, + {0x3d8d, 0xde }, + {0x4001, 0xe0 }, + {0x4003, 0x40 }, + {0x4008, 0x00 }, + {0x4009, 0x0b }, + {0x400a, 0x00 }, + {0x400b, 0x84 }, + {0x400f, 0x80 }, + {0x4010, 0xf0 }, + {0x4011, 0xff }, + {0x4012, 0x02 }, + {0x4013, 0x01 }, + {0x4014, 0x01 }, + {0x4015, 0x01 }, + {0x4042, 0x00 }, + {0x4043, 0x80 }, + {0x4044, 0x00 }, + {0x4045, 0x80 }, + {0x4046, 0x00 }, + {0x4047, 0x80 }, + {0x4048, 0x00 }, + {0x4049, 0x80 }, + {0x4041, 0x03 }, + {0x404c, 0x20 }, + {0x404d, 0x00 }, + {0x404e, 0x20 }, + {0x4203, 0x80 }, + {0x4307, 0x30 }, + {0x4317, 0x00 }, + {0x4503, 0x08 }, + {0x4601, 0x80 }, + {0x4816, 0x53 }, + {0x481b, 0x58 }, + {0x481f, 0x27 }, + {0x482a, 0x3C }, /* set sensor TX to 60*UI + 145ns */ + {0x4818, 0x00 }, /* set sensor TX to 60*UI + 145ns */ + {0x4819, 0x91 }, /* set sensor TX to 60*UI + 145ns */ + {0x4837, 0x15 }, /* 16 for 19.2Mhz input clk */ + {0x5000, 0x77 }, + {0x5001, 0x0a }, + {0x5004, 0x06 }, + {0x502e, 0x03 }, + {0x5030, 0x41 }, + {0x5795, 0x02 }, + {0x5796, 0x20 }, + {0x5797, 0x20 }, + {0x5798, 0xd5 }, + {0x5799, 0xd5 }, + {0x579a, 0x00 }, + {0x579b, 0x50 }, + {0x579c, 0x00 }, + {0x579d, 0x2c }, + {0x579e, 0x0c }, + {0x579f, 0x40 }, + {0x57a0, 0x09 }, + {0x57a1, 0x40 }, + {0x5780, 0x14 }, + {0x5781, 0x0f }, + {0x5782, 0x44 }, + {0x5783, 0x02 }, + {0x5784, 0x01 }, + {0x5785, 0x01 }, + {0x5786, 0x00 }, + {0x5787, 0x04 }, + {0x5788, 0x02 }, + {0x5789, 0x0f }, + {0x578a, 0xfd }, + {0x578b, 0xf5 }, + {0x578c, 0xf5 }, + {0x578d, 0x03 }, + {0x578e, 0x08 }, + {0x578f, 0x0c }, + {0x5790, 0x08 }, + {0x5791, 0x04 }, + {0x5792, 0x00 }, + {0x5793, 0x52 }, + {0x5794, 0xa3 }, + {0x59f8, 0x3d }, + {0x5a08, 0x02 }, + {0x5b00, 0x02 }, + {0x5b01, 0x10 }, + {0x5b02, 0x03 }, + {0x5b03, 0xcf }, + {0x5b05, 0x6c }, + {0x5e00, 0x00 }, +}; + +/* + * 2112x1568, 2112x1188, 1056x784 need 540Mbps/lane, + * 4 lanes + */ +static const struct ov8856_reg mipi_data_rate_540mbps[] = { + {0x0302, 0x43 }, /* 3c for 19.2Mhz input clk */ + {0x0303, 0x00 }, /* 01 for 19.2Mhz input clk */ + {0x030d, 0x26 }, /* 1e for 19.2Mhz input clk */ + {0x031e, 0x0c }, + {0x3000, 0x00 }, + {0x300e, 0x00 }, + {0x3010, 0x00 }, + {0x3015, 0x84 }, + {0x3018, 0x32 }, + {0x3033, 0x24 }, + {0x3500, 0x00 }, + {0x3501, 0x9a }, + {0x3502, 0x20 }, + {0x3503, 0x00 }, + {0x3505, 0x83 }, + {0x3508, 0x00 }, + {0x3509, 0x80 }, + {0x350c, 0x00 }, + {0x350d, 0x80 }, + {0x350e, 0x04 }, + {0x350f, 0x00 }, + {0x3510, 0x00 }, + {0x3511, 0x02 }, + {0x3512, 0x00 }, + {0x3600, 0x72 }, + {0x3601, 0x40 }, + {0x3602, 0x30 }, + {0x3610, 0xc5 }, + {0x3611, 0x58 }, + {0x3612, 0x5c }, + {0x3613, 0x5a }, + {0x3614, 0x60 }, + {0x3628, 0xff }, + {0x3629, 0xff }, + {0x362a, 0xff }, + {0x3633, 0x10 }, + {0x3634, 0x10 }, + {0x3635, 0x10 }, + {0x3636, 0x10 }, + {0x3663, 0x08 }, + {0x3669, 0x34 }, + {0x366e, 0x10 }, + {0x3706, 0x86 }, + {0x370b, 0x7e }, + {0x3714, 0x23 }, + {0x3730, 0x12 }, + {0x3733, 0x10 }, + {0x3764, 0x00 }, + {0x3765, 0x00 }, + {0x3769, 0x62 }, + {0x376a, 0x2a }, + {0x376b, 0x30 }, + {0x3780, 0x00 }, + {0x3781, 0x24 }, + {0x3782, 0x00 }, + {0x3783, 0x23 }, + {0x3798, 0x2f }, + {0x37a1, 0x60 }, + {0x37a8, 0x6a }, + {0x37ab, 0x3f }, + {0x37c2, 0x04 }, + {0x37c3, 0xf1 }, + {0x37c9, 0x80 }, + {0x37cb, 0x03 }, + {0x37cc, 0x0a }, + {0x37cd, 0x16 }, + {0x37ce, 0x1f }, + {0x3814, 0x01 }, + {0x3815, 0x01 }, + {0x3816, 0x00 }, + {0x3817, 0x00 }, + {0x3818, 0x00 }, + {0x3819, 0x00 }, + {0x3820, 0x80 }, + {0x3821, 0x46 }, + {0x382a, 0x01 }, + {0x382b, 0x01 }, + {0x3830, 0x06 }, + {0x3836, 0x02 }, + {0x3862, 0x04 }, + {0x3863, 0x08 }, + {0x3cc0, 0x33 }, + {0x3d85, 0x17 }, + {0x3d8c, 0x73 }, + {0x3d8d, 0xde }, + {0x4001, 0xe0 }, + {0x4003, 0x40 }, + {0x4008, 0x00 }, + {0x4009, 0x0b }, + {0x400a, 0x00 }, + {0x400b, 0x84 }, + {0x400f, 0x80 }, + {0x4010, 0xf0 }, + {0x4011, 0xff }, + {0x4012, 0x02 }, + {0x4013, 0x01 }, + {0x4014, 0x01 }, + {0x4015, 0x01 }, + {0x4042, 0x00 }, + {0x4043, 0x80 }, + {0x4044, 0x00 }, + {0x4045, 0x80 }, + {0x4046, 0x00 }, + {0x4047, 0x80 }, + {0x4048, 0x00 }, + {0x4049, 0x80 }, + {0x4041, 0x03 }, + {0x404c, 0x20 }, + {0x404d, 0x00 }, + {0x404e, 0x20 }, + {0x4203, 0x80 }, + {0x4307, 0x30 }, + {0x4317, 0x00 }, + {0x4503, 0x08 }, + {0x4601, 0x80 }, + {0x4816, 0x53 }, + {0x481b, 0x58 }, + {0x481f, 0x27 }, + {0x482a, 0x06 }, /* set sensor TX to 60*UI + 145ns */ + {0x4818, 0x00 }, /* set sensor TX to 60*UI + 145ns */ + {0x4819, 0x70 }, /* set sensor TX to 60*UI + 145ns */ + {0x4837, 0x0C }, + {0x5000, 0x77 }, + {0x5001, 0x0a }, + {0x5004, 0x06 }, + {0x502e, 0x03 }, + {0x5030, 0x41 }, + {0x5795, 0x02 }, + {0x5796, 0x20 }, + {0x5797, 0x20 }, + {0x5798, 0xd5 }, + {0x5799, 0xd5 }, + {0x579a, 0x00 }, + {0x579b, 0x50 }, + {0x579c, 0x00 }, + {0x579d, 0x2c }, + {0x579e, 0x0c }, + {0x579f, 0x40 }, + {0x57a0, 0x09 }, + {0x57a1, 0x40 }, + {0x5780, 0x14 }, + {0x5781, 0x0f }, + {0x5782, 0x44 }, + {0x5783, 0x02 }, + {0x5784, 0x01 }, + {0x5785, 0x01 }, + {0x5786, 0x00 }, + {0x5787, 0x04 }, + {0x5788, 0x02 }, + {0x5789, 0x0f }, + {0x578a, 0xfd }, + {0x578b, 0xf5 }, + {0x578c, 0xf5 }, + {0x578d, 0x03 }, + {0x578e, 0x08 }, + {0x578f, 0x0c }, + {0x5790, 0x08 }, + {0x5791, 0x04 }, + {0x5792, 0x00 }, + {0x5793, 0x52 }, + {0x5794, 0xa3 }, + {0x59f8, 0x3d }, + {0x5a08, 0x02 }, + {0x5b00, 0x02 }, + {0x5b01, 0x10 }, + {0x5b02, 0x03 }, + {0x5b03, 0xcf }, + {0x5b05, 0x6c }, + {0x5e00, 0x00 }, +}; + +static const struct ov8856_reg mode_3264x2448_regs[] = { + {0x3800, 0x00 }, + {0x3801, 0x00 }, + {0x3802, 0x00 }, + {0x3803, 0x0c }, + {0x3804, 0x0c }, + {0x3805, 0xdf }, + {0x3806, 0x09 }, + {0x3807, 0xa3 }, + {0x3808, 0x0c }, + {0x3809, 0xc0 }, + {0x380a, 0x09 }, + {0x380b, 0x90 }, + {0x380c, 0x07 }, + {0x380d, 0x8c }, + {0x380e, 0x09 }, + {0x380f, 0xb2 }, + {0x3810, 0x00 }, + {0x3811, 0x10 }, + {0x3812, 0x00 }, + {0x3813, 0x03 }, +}; + +static const struct ov8856_reg mode_3280x2464_regs[] = { + {0x3800, 0x00 }, + {0x3801, 0x00 }, + {0x3802, 0x00 }, + {0x3803, 0x04 }, + {0x3804, 0x0c }, + {0x3805, 0xdf }, + {0x3806, 0x09 }, + {0x3807, 0xa7 }, + {0x3808, 0x0c }, + {0x3809, 0xd0 }, + {0x380a, 0x09 }, + {0x380b, 0xa0 }, + {0x380c, 0x07 }, + {0x380d, 0x8c }, + {0x380e, 0x09 }, + {0x380f, 0xc2 }, + {0x3810, 0x00 }, + {0x3811, 0x08 }, + {0x3812, 0x00 }, + {0x3813, 0x03 }, +}; + +static const struct ov8856_reg mode_3280x1848_regs[] = { + {0x3800, 0x00 }, + {0x3801, 0x00 }, + {0x3802, 0x01 }, + {0x3803, 0x38 }, + {0x3804, 0x0c }, + {0x3805, 0xdf }, + {0x3806, 0x09 }, + {0x3807, 0x77 }, + {0x3808, 0x0c }, + {0x3809, 0xd0 }, + {0x380a, 0x07 }, + {0x380b, 0x38 }, + {0x380c, 0x07 }, + {0x380d, 0x8c }, + {0x380e, 0x09 }, + {0x380f, 0xc2 }, + {0x3810, 0x00 }, + {0x3811, 0x08 }, + {0x3812, 0x00 }, + {0x3813, 0x03 }, +}; + +static const struct ov8856_reg mode_1940x1096_regs[] = { + /* 60 FPS */ + {0x3800, 0x02 }, + {0x3801, 0x8e }, + {0x3802, 0x03 }, + {0x3803, 0xc6 }, + {0x3804, 0x0a }, + {0x3805, 0x49 }, + {0x3806, 0x08 }, + {0x3807, 0x92 }, + {0x3808, 0x07 }, + {0x3809, 0x94 }, + {0x380a, 0x04 }, + {0x380b, 0x48 }, + {0x380c, 0x07 }, + {0x380d, 0x8c }, + {0x380e, 0x04 }, + {0x380f, 0xda }, + {0x3810, 0x00 }, + {0x3811, 0x10 }, + {0x3812, 0x00 }, + {0x3813, 0x03 }, +}; + +static const struct ov8856_reg mode_2112x1186_regs[] = { + /* 60 FPS */ + {0x3800, 0x02 }, + {0x3801, 0x40 }, + {0x3802, 0x02 }, + {0x3803, 0x82 }, + {0x3804, 0x0a }, + {0x3805, 0x9f }, + {0x3806, 0x07 }, + {0x3807, 0x2d }, + {0x3808, 0x08 }, + {0x3809, 0x40 }, + {0x380a, 0x04 }, + {0x380b, 0xa4 }, + {0x380c, 0x07 }, + {0x380d, 0x8c }, + {0x380e, 0x04 }, + {0x380f, 0xda }, + {0x3810, 0x00 }, + {0x3811, 0x10 }, + {0x3812, 0x00 }, + {0x3813, 0x03 }, +}; + +static const char * const ov8856_test_pattern_menu[] = { + "Disabled", + "Vertical Color Bar Type 1", + "Vertical Color Bar Type 2", + "Vertical Color Bar Type 3", + "Vertical Color Bar Type 4" +}; + +/* Configurations for supported link frequencies */ +#define OV8856_NUM_OF_LINK_FREQS 2 +#define OV8856_LINK_FREQ_540MHZ 540000000ULL +#define OV8856_LINK_FREQ_270MHZ 270000000ULL +#define OV8856_LINK_FREQ_INDEX_0 0 +#define OV8856_LINK_FREQ_INDEX_1 1 + +/* Menu items for LINK_FREQ V4L2 control */ +static const s64 link_freq_menu_items[OV8856_NUM_OF_LINK_FREQS] = { + OV8856_LINK_FREQ_540MHZ, + OV8856_LINK_FREQ_270MHZ +}; + +/* Link frequency configs */ +static const struct ov8856_link_freq_config +link_freq_configs[OV8856_NUM_OF_LINK_FREQS] = { + { + .pixels_per_line = OV8856_PPL_540MHZ, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps), + .regs = mipi_data_rate_1080mbps, + } + }, + { + .pixels_per_line = OV8856_PPL_270MHZ, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps), + .regs = mipi_data_rate_540mbps, + } + } +}; + +/* Mode configs */ +static const struct ov8856_mode supported_modes[] = { + { + .width = 3264, + .height = 2448, + .vts_def = OV8856_VTS_30FPS, + .vts_min = OV8856_VTS_30FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs), + .regs = mode_3264x2448_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_INDEX_0, + }, + { + .width = 3280, + .height = 2464, + .vts_def = 0x9c2, + .vts_min = 0x9c2, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), + .regs = mode_3280x2464_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_INDEX_0, + }, + { + .width = 3280, + .height = 1848, + .vts_def = OV8856_VTS_30FPS, + .vts_min = OV8856_VTS_30FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3280x1848_regs), + .regs = mode_3280x1848_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_INDEX_0, + }, + { + .width = 1940, + .height = 1096, + .vts_def = OV8856_VTS_60FPS, + .vts_min = OV8856_VTS_60FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1940x1096_regs), + .regs = mode_1940x1096_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_INDEX_1, + }, + { + .width = 2112, + .height = 1186, + .vts_def = OV8856_VTS_60FPS, + .vts_min = OV8856_VTS_60FPS, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_2112x1186_regs), + .regs = mode_2112x1186_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_INDEX_1, + }, +}; + +struct ov8856 { + struct v4l2_subdev sd; + struct media_pad pad; + + struct v4l2_ctrl_handler ctrl_handler; + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *exposure; + + /* Current mode */ + const struct ov8856_mode *cur_mode; + + /* Mutex for serialized access */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +#define to_ov8856(_sd) container_of(_sd, struct ov8856, sd) + +/* Read registers up to 4 at a time */ +static int ov8856_read_reg(struct ov8856 *ov8856, u16 reg, u32 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + struct i2c_msg msgs[2]; + u8 *data_be_p; + int ret; + u32 data_be = 0; + u16 reg_addr_be = cpu_to_be16(reg); + + if (len > 4) + return -EINVAL; + + data_be_p = (u8 *)&data_be; + /* Write register address */ + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = 2; + msgs[0].buf = (u8 *)®_addr_be; + + /* Read data from register */ + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_be_p[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = be32_to_cpu(data_be); + + return 0; +} + +/* Write registers up to 4 at a time */ +static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u32 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + int buf_i, val_i; + u8 buf[6], *val_p; + + if (len > 4) + return -EINVAL; + + buf[0] = reg >> 8; + buf[1] = reg & 0xff; + + val = cpu_to_be32(val); + val_p = (u8 *)&val; + buf_i = 2; + val_i = 4 - len; + + while (val_i < 4) + buf[buf_i++] = val_p[val_i++]; + + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +/* Write a list of registers */ +static int ov8856_write_regs(struct ov8856 *ov8856, + const struct ov8856_reg *regs, u32 len) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + int ret; + u32 i; + + for (i = 0; i < len; i++) { + ret = ov8856_write_reg(ov8856, regs[i].address, 1, regs[i].val); + if (ret) { + dev_err_ratelimited( + &client->dev, + "Failed to write reg 0x%4.4x. error = %d\n", + regs[i].address, ret); + + return ret; + } + } + + return 0; +} + +static int ov8856_write_reg_list(struct ov8856 *ov8856, + const struct ov8856_reg_list *r_list) +{ + return ov8856_write_regs(ov8856, r_list->regs, r_list->num_of_regs); +} + +/* Open sub-device */ +static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd, + fh->pad, + 0); + + mutex_lock(&ov8856->mutex); + + /* Initialize try_fmt */ + try_fmt->width = ov8856->cur_mode->width; + try_fmt->height = ov8856->cur_mode->height; + try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; + try_fmt->field = V4L2_FIELD_NONE; + + /* No crop or compose */ + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static int ov8856_update_digital_gain(struct ov8856 *ov8856, u32 d_gain) +{ + int ret; + + ret = ov8856_write_reg(ov8856, OV8856_REG_B_MWB_GAIN, + OV8856_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; + + ret = ov8856_write_reg(ov8856, OV8856_REG_G_MWB_GAIN, + OV8856_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; + + ret = ov8856_write_reg(ov8856, OV8856_REG_R_MWB_GAIN, + OV8856_REG_VALUE_16BIT, d_gain); + + return ret; +} + +static int ov8856_enable_test_pattern(struct ov8856 *ov8856, u32 pattern) +{ + int ret; + u32 val; + + ret = ov8856_read_reg(ov8856, OV8856_REG_TEST_PATTERN, + OV8856_REG_VALUE_08BIT, &val); + if (ret) + return ret; + + if (pattern) { + val &= OV8856_TEST_PATTERN_MASK; + val |= (pattern - 1) | OV8856_TEST_PATTERN_ENABLE; + } else { + val &= ~OV8856_TEST_PATTERN_ENABLE; + } + + return ov8856_write_reg(ov8856, OV8856_REG_TEST_PATTERN, + OV8856_REG_VALUE_08BIT, val); +} + +static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ov8856 *ov8856 = container_of(ctrl->handler, + struct ov8856, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + s64 max; + int ret; + + /* Propagate change of current control to all related controls */ + switch (ctrl->id) { + case V4L2_CID_VBLANK: + /* Update max exposure while meeting expected vblanking */ + max = ov8856->cur_mode->height + ctrl->val - 8; + __v4l2_ctrl_modify_range(ov8856->exposure, + ov8856->exposure->minimum, + max, ov8856->exposure->step, max); + break; + }; + + /* + * Applying V4L2 control value only happens + * when power is up for streaming + */ + if (pm_runtime_get_if_in_use(&client->dev) <= 0) + return 0; + + ret = 0; + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + ret = ov8856_write_reg(ov8856, OV8856_REG_ANALOG_GAIN, + OV8856_REG_VALUE_16BIT, ctrl->val); + break; + case V4L2_CID_DIGITAL_GAIN: + ret = ov8856_update_digital_gain(ov8856, ctrl->val); + break; + case V4L2_CID_EXPOSURE: + ret = ov8856_write_reg(ov8856, OV8856_REG_EXPOSURE, + OV8856_REG_VALUE_24BIT, + ctrl->val << 4); + break; + case V4L2_CID_VBLANK: + /* Update VTS that meets expected vertical blanking */ + ret = ov8856_write_reg(ov8856, OV8856_REG_VTS, + OV8856_REG_VALUE_16BIT, + ov8856->cur_mode->height + ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = ov8856_enable_test_pattern(ov8856, ctrl->val); + break; + default: + dev_info(&client->dev, + "ctrl(id:0x%x,val:0x%x) is not handled\n", + ctrl->id, ctrl->val); + break; + }; + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops ov8856_ctrl_ops = { + .s_ctrl = ov8856_set_ctrl, +}; + +static int ov8856_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + /* Only one bayer order(GRBG) is supported */ + if (code->index > 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_SGRBG10_1X10; + + return 0; +} + +static int ov8856_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static void ov8856_update_pad_format(const struct ov8856_mode *mode, + struct v4l2_subdev_format *fmt) +{ + fmt->format.width = mode->width; + fmt->format.height = mode->height; + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + fmt->format.field = V4L2_FIELD_NONE; +} + +static int ov8856_do_get_pad_format(struct ov8856 *ov8856, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *framefmt; + struct v4l2_subdev *sd = &ov8856->sd; + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + fmt->format = *framefmt; + } else { + ov8856_update_pad_format(ov8856->cur_mode, fmt); + } + + return 0; +} + +static int ov8856_get_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + int ret; + + mutex_lock(&ov8856->mutex); + ret = ov8856_do_get_pad_format(ov8856, cfg, fmt); + mutex_unlock(&ov8856->mutex); + + return ret; +} + +/* + * Calculate resolution distance + */ +static int +ov8856_get_resolution_dist(const struct ov8856_mode *mode, + struct v4l2_mbus_framefmt *framefmt) +{ + return abs(mode->width - framefmt->width) + + abs(mode->height - framefmt->height); +} + +/* + * Find the closest supported resolution to the requested resolution + */ +static const struct ov8856_mode * +ov8856_find_best_fit(struct ov8856 *ov8856, + struct v4l2_subdev_format *fmt) +{ + int i, dist, cur_best_fit = 0, cur_best_fit_dist = -1; + struct v4l2_mbus_framefmt *framefmt = &fmt->format; + + for (i = 0; i < ARRAY_SIZE(supported_modes); i++) { + dist = ov8856_get_resolution_dist(&supported_modes[i], + framefmt); + if (cur_best_fit_dist == -1 || dist < cur_best_fit_dist) { + cur_best_fit_dist = dist; + cur_best_fit = i; + } + } + + return &supported_modes[cur_best_fit]; +} + +static int +ov8856_set_pad_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + const struct ov8856_mode *mode; + struct v4l2_mbus_framefmt *framefmt; + s32 vblank_def; + s32 vblank_min; + s64 h_blank; + s64 pixel_rate; + + mutex_lock(&ov8856->mutex); + + /* Only one raw bayer(GRBG) order is supported */ + if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10) + fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; + + mode = ov8856_find_best_fit(ov8856, fmt); + ov8856_update_pad_format(mode, fmt); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); + *framefmt = fmt->format; + } else { + ov8856->cur_mode = mode; + __v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index); + pixel_rate = + (link_freq_menu_items[mode->link_freq_index] * 2 * 4) / + 10; + __v4l2_ctrl_s_ctrl_int64(ov8856->pixel_rate, pixel_rate); + /* Update limits and set FPS to default */ + vblank_def = ov8856->cur_mode->vts_def - + ov8856->cur_mode->height; + vblank_min = ov8856->cur_mode->vts_min - + ov8856->cur_mode->height; + __v4l2_ctrl_modify_range( + ov8856->vblank, vblank_min, + OV8856_VTS_MAX - ov8856->cur_mode->height, 1, + vblank_def); + __v4l2_ctrl_s_ctrl(ov8856->vblank, vblank_def); + h_blank = + link_freq_configs[mode->link_freq_index].pixels_per_line + - ov8856->cur_mode->width; + __v4l2_ctrl_modify_range(ov8856->hblank, h_blank, + h_blank, 1, h_blank); + } + + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static int ov8856_get_skip_frames(struct v4l2_subdev *sd, u32 *frames) +{ + *frames = OV8856_NUM_OF_SKIP_FRAMES; + + return 0; +} + +/* Start streaming */ +static int ov8856_start_streaming(struct ov8856 *ov8856) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + const struct ov8856_reg_list *reg_list; + int ret, link_freq_index; + + /* Get out of from software reset */ + ret = ov8856_write_reg(ov8856, OV8856_REG_SOFTWARE_RST, + OV8856_REG_VALUE_08BIT, OV8856_SOFTWARE_RST); + if (ret) { + dev_err(&client->dev, "%s failed to set powerup registers\n", + __func__); + return ret; + } + + /* Setup PLL */ + link_freq_index = ov8856->cur_mode->link_freq_index; + reg_list = &link_freq_configs[link_freq_index].reg_list; + ret = ov8856_write_reg_list(ov8856, reg_list); + if (ret) { + dev_err(&client->dev, "%s failed to set plls\n", __func__); + return ret; + } + + /* Apply default values of current mode */ + reg_list = &ov8856->cur_mode->reg_list; + ret = ov8856_write_reg_list(ov8856, reg_list); + if (ret) { + dev_err(&client->dev, "%s failed to set mode\n", __func__); + return ret; + } + + /* Apply customized values from user */ + ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler); + if (ret) + return ret; + + return ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT, + OV8856_REG_VALUE_08BIT, + OV8856_MODE_STREAMING); +} + +/* Stop streaming */ +static int ov8856_stop_streaming(struct ov8856 *ov8856) +{ + return ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT, + OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY); +} + +static int ov8856_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + mutex_lock(&ov8856->mutex); + if (ov8856->streaming == enable) { + mutex_unlock(&ov8856->mutex); + return 0; + } + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + goto err_unlock; + } + + /* + * Apply default & customized values + * and then start streaming. + */ + ret = ov8856_start_streaming(ov8856); + if (ret) + goto err_rpm_put; + } else { + ov8856_stop_streaming(ov8856); + pm_runtime_put(&client->dev); + } + + ov8856->streaming = enable; + mutex_unlock(&ov8856->mutex); + + return ret; + +err_rpm_put: + pm_runtime_put(&client->dev); +err_unlock: + mutex_unlock(&ov8856->mutex); + + return ret; +} + +static int __maybe_unused ov8856_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov8856 *ov8856 = to_ov8856(sd); + + if (ov8856->streaming) + ov8856_stop_streaming(ov8856); + + return 0; +} + +static int __maybe_unused ov8856_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov8856 *ov8856 = to_ov8856(sd); + int ret; + + if (ov8856->streaming) { + ret = ov8856_start_streaming(ov8856); + if (ret) + goto error; + } + + return 0; + +error: + ov8856_stop_streaming(ov8856); + ov8856->streaming = 0; + return ret; +} + +/* Verify chip ID */ +static int ov8856_identify_module(struct ov8856 *ov8856) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + int ret; + u32 val; + + ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID, + OV8856_REG_VALUE_24BIT, &val); + if (ret) + return ret; + + if (val != OV8856_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x\n", + OV8856_CHIP_ID, val); + return -EIO; + } + return 0; +} + +static const struct v4l2_subdev_video_ops ov8856_video_ops = { + .s_stream = ov8856_set_stream, +}; + +static const struct v4l2_subdev_pad_ops ov8856_pad_ops = { + .enum_mbus_code = ov8856_enum_mbus_code, + .get_fmt = ov8856_get_pad_format, + .set_fmt = ov8856_set_pad_format, + .enum_frame_size = ov8856_enum_frame_size, +}; + +static const struct v4l2_subdev_sensor_ops ov8856_sensor_ops = { + .g_skip_frames = ov8856_get_skip_frames, +}; + +static const struct v4l2_subdev_ops ov8856_subdev_ops = { + .video = &ov8856_video_ops, + .pad = &ov8856_pad_ops, + .sensor = &ov8856_sensor_ops, +}; + +static const struct media_entity_operations ov8856_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops ov8856_internal_ops = { + .open = ov8856_open, +}; + +/* Initialize control handlers */ +static int ov8856_init_controls(struct ov8856 *ov8856) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 exposure_max; + s64 vblank_def; + s64 vblank_min; + s64 hblank; + s64 pixel_rate_min; + s64 pixel_rate_max; + const struct ov8856_mode *mode; + int ret; + + ctrl_hdlr = &ov8856->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); + if (ret) + return ret; + + mutex_init(&ov8856->mutex); + ctrl_hdlr->lock = &ov8856->mutex; + ov8856->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, + &ov8856_ctrl_ops, + V4L2_CID_LINK_FREQ, + OV8856_NUM_OF_LINK_FREQS - 1, + 0, link_freq_menu_items); + ov8856->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */ + pixel_rate_max = (link_freq_menu_items[0] * 2 * 4) / 10; + pixel_rate_min = (link_freq_menu_items[1] * 2 * 4) / 10; + /* By default, PIXEL_RATE is read only */ + ov8856->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_PIXEL_RATE, + pixel_rate_min, pixel_rate_max, + 1, pixel_rate_max); + + mode = ov8856->cur_mode; + vblank_def = mode->vts_def - mode->height; + vblank_min = mode->vts_min - mode->height; + ov8856->vblank = v4l2_ctrl_new_std( + ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_VBLANK, + vblank_min, OV8856_VTS_MAX - mode->height, 1, + vblank_def); + + hblank = link_freq_configs[mode->link_freq_index].pixels_per_line - + mode->width; + ov8856->hblank = v4l2_ctrl_new_std( + ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_HBLANK, + hblank, hblank, 1, hblank); + ov8856->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + exposure_max = mode->vts_def - 8; + ov8856->exposure = v4l2_ctrl_new_std( + ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_EXPOSURE, OV8856_EXPOSURE_MIN, + exposure_max, OV8856_EXPOSURE_STEP, + OV8856_EXPOSURE_DEFAULT); + + v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + OV8856_ANA_GAIN_MIN, OV8856_ANA_GAIN_MAX, + OV8856_ANA_GAIN_STEP, OV8856_ANA_GAIN_DEFAULT); + + /* Digital gain */ + v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + OV8856_DGTL_GAIN_MIN, OV8856_DGTL_GAIN_MAX, + OV8856_DGTL_GAIN_STEP, OV8856_DGTL_GAIN_DEFAULT); + + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(ov8856_test_pattern_menu) - 1, + 0, 0, ov8856_test_pattern_menu); + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + dev_err(&client->dev, "%s control init failed (%d)\n", + __func__, ret); + goto error; + } + + ov8856->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +error: + v4l2_ctrl_handler_free(ctrl_hdlr); + mutex_destroy(&ov8856->mutex); + + return ret; +} + +static void ov8856_free_controls(struct ov8856 *ov8856) +{ + v4l2_ctrl_handler_free(ov8856->sd.ctrl_handler); + mutex_destroy(&ov8856->mutex); +} + +static int ov8856_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ov8856 *ov8856; + int ret; + + dev_info(&client->dev, "ov8856_probe. %d-%04x\n", + client->adapter->nr, client->addr); + ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL); + if (!ov8856) + return -ENOMEM; + + /* Initialize subdev */ + v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops); + + /* Check module identity */ + ret = ov8856_identify_module(ov8856); + if (ret) { + dev_err(&client->dev, "failed to find sensor: %d\n", ret); + return ret; + } + + /* Set default mode to max resolution */ + ov8856->cur_mode = &supported_modes[0]; + + ret = ov8856_init_controls(ov8856); + if (ret) + return ret; + + /* Initialize subdev */ + ov8856->sd.internal_ops = &ov8856_internal_ops; + ov8856->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + ov8856->sd.entity.ops = &ov8856_subdev_entity_ops; + ov8856->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + /* Initialize source pad */ + ov8856->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad); + if (ret) { + dev_err(&client->dev, "%s failed:%d\n", __func__, ret); + goto error_handler_free; + } + + ret = v4l2_async_register_subdev(&ov8856->sd); + if (ret < 0) + goto error_media_entity; + + /* + * Device is already turned on by i2c-core with ACPI domain PM. + * Enable runtime PM and turn off the device. + */ + pm_runtime_get_noresume(&client->dev); + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_put(&client->dev); + + return 0; + +error_media_entity: + media_entity_cleanup(&ov8856->sd.entity); + +error_handler_free: + ov8856_free_controls(ov8856); + dev_err(&client->dev, "%s failed:%d\n", __func__, ret); + + return ret; +} + +static int ov8856_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov8856 *ov8856 = to_ov8856(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + ov8856_free_controls(ov8856); + + /* + * Disable runtime PM but keep the device turned on. + * i2c-core with ACPI domain PM will turn off the device. + */ + pm_runtime_get_sync(&client->dev); + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + + return 0; +} + +static const struct i2c_device_id ov8856_id_table[] = { + {"ov8856", 0}, + {}, +}; + +MODULE_DEVICE_TABLE(i2c, ov8856_id_table); + +static const struct dev_pm_ops ov8856_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ov8856_suspend, ov8856_resume) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ov8856_acpi_ids[] = { + {"OVTID858"}, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(acpi, ov8856_acpi_ids); +#endif + +static struct i2c_driver ov8856_i2c_driver = { + .driver = { + .name = "ov8856", + .owner = THIS_MODULE, + .pm = &ov8856_pm_ops, + /*.acpi_match_table = ACPI_PTR(ov8856_acpi_ids),*/ + }, + .probe = ov8856_probe, + .remove = ov8856_remove, + .id_table = ov8856_id_table, +}; + +module_i2c_driver(ov8856_i2c_driver); + +MODULE_AUTHOR("Pu, Yuning "); +MODULE_AUTHOR("Qiu, Tianshu "); +MODULE_DESCRIPTION("Omnivision ov8856 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index cdc4f2392ef9..65bbb20ae8e0 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1177,9 +1177,9 @@ static int s5c73m3_oif_set_frame_desc(struct v4l2_subdev *sd, unsigned int pad, if (pad != OIF_SOURCE_PAD || fd == NULL) return -EINVAL; - fd->entry[0].length = 10 * SZ_1M; - fd->entry[1].length = max_t(u32, fd->entry[1].length, - S5C73M3_EMBEDDED_DATA_MAXLEN); + fd->entry[0].size.length = 10 * SZ_1M; + fd->entry[1].size.length = max_t(u32, fd->entry[1].size.length, + S5C73M3_EMBEDDED_DATA_MAXLEN); fd->num_entries = 2; mutex_lock(&state->lock); diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c index 9fd254a8e20d..13c10b5e2b45 100644 --- a/drivers/media/i2c/s5k6aa.c +++ b/drivers/media/i2c/s5k6aa.c @@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client) /** * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration + * @s5k6aa: pointer to &struct s5k6aa describing the device * * Configure the internal ISP PLL for the required output frequency. * Locking: called with s5k6aa.lock mutex held. @@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa) /** * s5k6aa_configure_video_bus - configure the video output interface + * @s5k6aa: pointer to &struct s5k6aa describing the device * @bus_type: video bus type: parallel or MIPI-CSI * @nlanes: number of MIPI lanes to be used (MIPI-CSI only) * @@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout, /** * s5k6aa_set_prev_config - write user preview register set + * @s5k6aa: pointer to &struct s5k6aa describing the device + * @preset: s5kaa preset to be applied * * Configure output resolution and color fromat, pixel clock * frequency range, device frame rate type and frame period range. @@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa, /** * s5k6aa_initialize_isp - basic ISP MCU initialization + * @sd: pointer to V4L2 sub-device descriptor * * Configure AHB addresses for registers read/write; configure PLLs for * required output pixel clock. The ISP power supply needs to be already diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 700f433261d0..e4d7f2febf00 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -1001,7 +1001,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, if (rval) goto out; - for (i = 0; i < 1000; i++) { + for (i = 1000; i > 0; i--) { rval = smiapp_read( sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s); @@ -1012,11 +1012,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY) break; - if (--i == 0) { - rval = -ETIMEDOUT; - goto out; - } - + } + if (!i) { + rval = -ETIMEDOUT; + goto out; } for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) { diff --git a/drivers/media/i2c/ti964-reg.h b/drivers/media/i2c/ti964-reg.h new file mode 100644 index 000000000000..e916c41b74a1 --- /dev/null +++ b/drivers/media/i2c/ti964-reg.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef TI964_REG_H +#define TI964_REG_H + +struct ti964_register_write { + u8 reg; + u8 val; +}; + +static const struct ti964_register_write ti964_frame_sync_settings[2][5] = { + { + {0x18, 0x00}, /* Disable frame sync. */ + {0x19, 0x00}, + {0x1a, 0x02}, + {0x1b, 0x0a}, + {0x1c, 0xd3}, + }, + { + {0x19, 0x01}, /* Frame sync high time.*/ + {0x1a, 0x15}, + {0x1b, 0x09}, /* Frame sync low time. */ + {0x1c, 0xC3}, + {0x18, 0x01}, /* Enable frame sync. and use high/low mode */ + } +}; + +static const struct ti964_register_write ti964_init_settings[] = { + {0x8, 0x1c}, + {0xa, 0x79}, + {0xb, 0x79}, + {0xd, 0xb9}, + {0x10, 0x91}, + {0x11, 0x85}, + {0x12, 0x89}, + {0x13, 0xc1}, + {0x17, 0xe1}, + {0x18, 0x0}, /* Disable frame sync. */ + {0x19, 0x0}, /* Frame sync high time. */ + {0x1a, 0x2}, + {0x1b, 0xa}, /* Frame sync low time. */ + {0x1c, 0xd3}, + {0x21, 0x43}, /* Enable best effort mode. */ + {0xb0, 0x10}, + {0xb1, 0x14}, + {0xb2, 0x1f}, + {0xb3, 0x8}, + {0x32, 0x1}, /* Select CSI port 0 */ + {0x4c, 0x1}, /* Select RX port 0 */ + {0x58, 0x58}, + {0x5c, 0x18}, /* TI913 alias addr 0xc */ + {0x6d, 0x7f}, + {0x70, 0x1e}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0x4c, 0x12}, /* Select RX port 1 */ + {0x58, 0x58}, + {0x5c, 0x1a}, /* TI913 alias addr 0xd */ + {0x6d, 0x7f}, + {0x70, 0x5e}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0x4c, 0x24}, /* Select RX port 2*/ + {0x58, 0x58}, + {0x5c, 0x1c}, /* TI913 alias addr 0xe */ + {0x6d, 0x7f}, + {0x70, 0x9e}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0x4c, 0x38}, /* Select RX port3 */ + {0x58, 0x58}, + {0x5c, 0x1e}, /* TI913 alias addr 0xf */ + {0x6d, 0x7f}, + {0x70, 0xde}, /* YUV422_8 */ + {0x7c, 0x81}, /* Use RAW10 8bit mode */ + {0xd2, 0x84}, + {0xbc, 0x00}, +}; + +static const struct ti964_register_write ti964_tp_settings[] = { + {0xb0, 0x0}, + {0xb1, 0x02}, + {0xb2, 0xb3}, + {0xb1, 0x01}, +}; +/*register definition */ +#define TI964_DEVID 0x0 +#define TI964_RESET 0x1 +#define TI964_CSI_PLL_CTL 0x1f +#define TI964_FS_CTL 0x18 +#define TI964_FWD_CTL1 0x20 +#define TI964_RX_PORT_SEL 0x4c +#define TI964_SLAVE_ID0 0x5d +#define TI964_SLAVE_ALIAS_ID0 0x65 +#define TI964_PORT_CONFIG 0x6d +#define TI964_BC_GPIO_CTL0 0x6e +#define TI964_RAW10_ID 0x70 +#define TI964_RAW12_ID 0x71 +#define TI964_PORT_CONFIG2 0x7c + +#define TI964_IND_ACC_DATA 0xb2 +#define TI964_CSI_CTL 0x33 + +/* register value definition */ +#define TI964_POWER_ON 0x1 +#define TI964_POWER_OFF 0x20 +#define TI964_FPD3_RAW10_100MHz 0x7f +#define TI964_FPD3_RAW12_50MHz 0x7d +#define TI964_FPD3_RAW12_75MHz 0x7e +#define TI964_RAW12 0x41 +#define TI964_RAW10_NORMAL 0x1 +#define TI964_RAW10_8BIT 0x81 +#define TI964_GPIO0_HIGH 0x09 +#define TI964_GPIO0_LOW 0x08 +#define TI964_GPIO1_HIGH 0x90 +#define TI964_GPIO1_LOW 0x80 +#define TI964_GPIO0_FSIN 0x0a +#define TI964_GPIO1_FSIN 0xa0 +#define TI964_GPIO0_MASK 0x0f +#define TI964_GPIO1_MASK 0xf0 +#define TI964_MIPI_800MBPS 0x2 +#define TI964_MIPI_1600MBPS 0x0 +#define TI964_CSI_ENABLE 0x1 +#define TI964_CSI_CONTS_CLOCK 0x2 +#define TI964_CSI_SKEWCAL 0x40 +#define TI964_FSIN_ENABLE 0x1 +#endif diff --git a/drivers/media/i2c/ti964.c b/drivers/media/i2c/ti964.c new file mode 100644 index 000000000000..7f97cd4b1c1c --- /dev/null +++ b/drivers/media/i2c/ti964.c @@ -0,0 +1,1364 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ti964-reg.h" + +struct ti964_subdev { + struct v4l2_subdev *sd; + unsigned short rx_port; + unsigned short fsin_gpio; + unsigned short phy_i2c_addr; + unsigned short alias_i2c_addr; + char sd_name[16]; +}; + +struct ti964 { + struct v4l2_subdev sd; + struct media_pad pad[NR_OF_TI964_PADS]; + struct v4l2_ctrl_handler ctrl_handler; + struct ti964_pdata *pdata; + struct ti964_subdev sub_devs[NR_OF_TI964_SINK_PADS]; + struct crlmodule_platform_data subdev_pdata[NR_OF_TI964_SINK_PADS]; + const char *name; + + struct mutex mutex; + + struct regmap *regmap8; + struct regmap *regmap16; + + struct v4l2_mbus_framefmt *ffmts[NR_OF_TI964_PADS]; + struct rect *crop; + struct rect *compose; + + struct { + unsigned int *stream_id; + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + + unsigned int nsinks; + unsigned int nsources; + unsigned int nstreams; + unsigned int npads; + + struct gpio_chip gc; + + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *test_pattern; +}; + +#define to_ti964(_sd) container_of(_sd, struct ti964, sd) + +static const s64 ti964_op_sys_clock[] = {400000000, 800000000}; +static const u8 ti964_op_sys_clock_reg_val[] = { + TI964_MIPI_800MBPS, + TI964_MIPI_1600MBPS +}; + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct ti964_csi_data_format va_csi_data_formats[] = { + { MEDIA_BUS_FMT_YUYV8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + +static const uint32_t ti964_supported_codes_pad[] = { + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const uint32_t *ti964_supported_codes[] = { + ti964_supported_codes_pad, +}; + +static struct regmap_config ti964_reg_config8 = { + .reg_bits = 8, + .val_bits = 8, +}; + +static struct regmap_config ti964_reg_config16 = { + .reg_bits = 16, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, +}; + +static int ti964_reg_set_bit(struct ti964 *va, unsigned char reg, + unsigned char bit, unsigned char val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(va->regmap8, reg, ®_val); + if (ret) + return ret; + if (val) + reg_val |= 1 << bit; + else + reg_val &= ~(1 << bit); + + return regmap_write(va->regmap8, reg, reg_val); +} + +static int ti964_map_phy_i2c_addr(struct ti964 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI964_SLAVE_ID0, addr); +} + +static int ti964_map_alias_i2c_addr(struct ti964 *va, unsigned short rx_port, + unsigned short addr) +{ + int rval; + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + return regmap_write(va->regmap8, TI964_SLAVE_ALIAS_ID0, addr); +} + +static int ti964_fsin_gpio_init(struct ti964 *va, unsigned short rx_port, + unsigned short fsin_gpio) +{ + int rval; + int reg_val; + + rval = regmap_read(va->regmap8, TI964_FS_CTL, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (!reg_val & TI964_FSIN_ENABLE) { + dev_dbg(va->sd.dev, "FSIN not enabled, skip config FSIN GPIO.\n"); + return 0; + } + + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) + return rval; + + rval = regmap_read(va->regmap8, TI964_BC_GPIO_CTL0, ®_val); + if (rval) { + dev_dbg(va->sd.dev, "Failed to read gpio status.\n"); + return rval; + } + + if (fsin_gpio == 0) { + reg_val &= ~TI964_GPIO0_MASK; + reg_val |= TI964_GPIO0_FSIN; + } else { + reg_val &= ~TI964_GPIO1_MASK; + reg_val |= TI964_GPIO1_FSIN; + } + + rval = regmap_write(va->regmap8, TI964_BC_GPIO_CTL0, reg_val); + if (rval) + dev_dbg(va->sd.dev, "Failed to set gpio.\n"); + + return rval; +} + +static int ti964_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ti964 *va = to_ti964(sd); + int i; + + for (i = 0; i < min(va->nstreams, route->num_routes); ++i) { + unsigned int sink = va->route[i].sink; + unsigned int source = va->route[i].source; + + route->routes[i].sink_pad = sink; + route->routes[i].sink_stream = + va->stream[sink].stream_id[0]; + route->routes[i].source_pad = source; + route->routes[i].source_stream = + va->stream[source].stream_id[sink]; + route->routes[i].flags = va->route[i].flags; + } + + route->num_routes = i; + + return 0; +} + +static int ti964_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ti964 *va = to_ti964(sd); + int i, j, ret = 0; + + for (i = 0; i < min(route->num_routes, va->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + unsigned int sink = t->sink_pad; + unsigned int source = t->source_pad; + + if (t->sink_stream > va->nstreams - 1 || + t->source_stream > va->nstreams - 1) + continue; + + if (t->source_pad != TI964_PAD_SOURCE) + continue; + + for (j = 0; j < va->nstreams; j++) { + if (sink == va->route[j].sink && + source == va->route[j].source) + break; + } + + if (j == va->nstreams) + continue; + + va->stream[sink].stream_id[0] = t->sink_stream; + va->stream[source].stream_id[sink] = t->source_stream; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + va->route[j].flags |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + va->route[j].flags &= + (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +static int ti964_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ti964 *va = to_ti964(sd); + const uint32_t *supported_code = + ti964_supported_codes[code->pad]; + bool next_stream = false; + int i; + + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > va->nstreams) + return -EINVAL; + + if (next_stream) { + if (!(va->pad[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX)) + return -EINVAL; + if (code->stream < va->nstreams - 1) { + code->stream++; + return 0; + } else { + return -EINVAL; + } + } + + for (i = 0; supported_code[i]; i++) { + if (i == code->index) { + code->code = supported_code[i]; + return 0; + } + } + + return -EINVAL; +} + +static const struct ti964_csi_data_format + *ti964_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(va_csi_data_formats); i++) { + if (va_csi_data_formats[i].code == code) + return &va_csi_data_formats[i]; + } + + return &va_csi_data_formats[0]; +} + +static int ti964_get_frame_desc(struct v4l2_subdev *sd, + unsigned int pad, struct v4l2_mbus_frame_desc *desc) +{ + struct ti964 *va = to_ti964(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + u8 vc = 0; + int i; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + + for (i = 0; i < min_t(int, va->nstreams, desc->num_entries); i++) { + struct v4l2_mbus_framefmt *ffmt = + &va->ffmts[i][TI964_PAD_SOURCE]; + const struct ti964_csi_data_format *csi_format = + ti964_validate_csi_data_format(ffmt->code); + + entry->size.two_dim.width = ffmt->width; + entry->size.two_dim.height = ffmt->height; + entry->pixelcode = ffmt->code; + entry->bus.csi2.channel = vc++; + entry->bpp = csi_format->compressed; + entry++; + } + + return 0; +} + +static struct v4l2_mbus_framefmt * +__ti964_get_ffmt(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, unsigned int which, + unsigned int stream) +{ + struct ti964 *va = to_ti964(subdev); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(subdev, cfg, pad); + else + return &va->ffmts[pad][stream]; +} + +static int ti964_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ti964 *va = to_ti964(subdev); + + if (fmt->stream > va->nstreams) + return -EINVAL; + + mutex_lock(&va->mutex); + fmt->format = *__ti964_get_ffmt(subdev, cfg, fmt->pad, + fmt->which, fmt->stream); + mutex_unlock(&va->mutex); + + dev_dbg(subdev->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", + fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? + "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", + fmt->pad, fmt->stream); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + fmt->format.width, fmt->format.height, fmt->format.code); + + return 0; +} + +static int ti964_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ti964 *va = to_ti964(subdev); + const struct ti964_csi_data_format *csi_format; + struct v4l2_mbus_framefmt *ffmt; + + if (fmt->stream > va->nstreams) + return -EINVAL; + + csi_format = ti964_validate_csi_data_format( + fmt->format.code); + + mutex_lock(&va->mutex); + ffmt = __ti964_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = csi_format->code; + } + fmt->format = *ffmt; + mutex_unlock(&va->mutex); + + dev_dbg(subdev->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + ffmt->width, ffmt->height, ffmt->code); + + return 0; +} + +static int ti964_open(struct v4l2_subdev *subdev, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .pad = TI964_PAD_SOURCE, + .format = { + .width = TI964_MAX_WIDTH, + .height = TI964_MAX_HEIGHT, + .code = MEDIA_BUS_FMT_YUYV8_1X16, + }, + .stream = 0, + }; + + *try_fmt = fmt.format; + + return 0; +} + +static int ti964_registered(struct v4l2_subdev *subdev) +{ + struct ti964 *va = to_ti964(subdev); + int i, j, k, l, rval; + + for (i = 0, k = 0; i < va->pdata->subdev_num; i++) { + struct ti964_subdev_info *info = + &va->pdata->subdev_info[i]; + struct crlmodule_platform_data *pdata = + (struct crlmodule_platform_data *) + info->board_info.platform_data; + struct i2c_adapter *adapter; + + if (k >= va->nsinks) + break; + + /* + * The sensors should not share the same pdata structure. + * Clone the pdata for each sensor. + */ + memcpy(&va->subdev_pdata[k], pdata, sizeof(*pdata)); + if (va->subdev_pdata[k].xshutdown != 0 && + va->subdev_pdata[k].xshutdown != 1) { + dev_err(va->sd.dev, "xshutdown(%d) must be 0 or 1 to connect.\n", + va->subdev_pdata[k].xshutdown); + return -EINVAL; + } + + /* If 0 is xshutdown, then 1 would be FSIN, vice versa. */ + va->sub_devs[k].fsin_gpio = 1 - va->subdev_pdata[k].xshutdown; + + /* + * Change the gpio value to have xshutdown + * and rx port included, so in gpio_set those + * can be caculated from it. + */ + va->subdev_pdata[k].xshutdown += va->gc.base + + info->rx_port * NR_OF_GPIOS_PER_PORT; + info->board_info.platform_data = &va->subdev_pdata[k]; + + if (!info->phy_i2c_addr || !info->board_info.addr) { + dev_err(va->sd.dev, "can't find the physical and alias addr.\n"); + return -EINVAL; + } + + /* Map PHY I2C address. */ + rval = ti964_map_phy_i2c_addr(va, info->rx_port, + info->phy_i2c_addr); + if (rval) + return rval; + + /* Map 7bit ALIAS I2C address. */ + rval = ti964_map_alias_i2c_addr(va, info->rx_port, + info->board_info.addr << 1); + if (rval) + return rval; + + adapter = i2c_get_adapter(info->i2c_adapter_id); + va->sub_devs[k].sd = v4l2_i2c_new_subdev_board( + va->sd.v4l2_dev, adapter, + &info->board_info, 0); + i2c_put_adapter(adapter); + if (!va->sub_devs[k].sd) { + dev_err(va->sd.dev, + "can't create new i2c subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + continue; + } + va->sub_devs[k].rx_port = info->rx_port; + va->sub_devs[k].phy_i2c_addr = info->phy_i2c_addr; + va->sub_devs[k].alias_i2c_addr = info->board_info.addr; + memcpy(va->sub_devs[k].sd_name, + va->subdev_pdata[k].module_name, + min(sizeof(va->sub_devs[k].sd_name) - 1, + sizeof(va->subdev_pdata[k].module_name) - 1)); + + for (j = 0; j < va->sub_devs[k].sd->entity.num_pads; j++) { + if (va->sub_devs[k].sd->entity.pads[j].flags & + MEDIA_PAD_FL_SOURCE) + break; + } + + if (j == va->sub_devs[k].sd->entity.num_pads) { + dev_warn(va->sd.dev, + "no source pad in subdev %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -ENOENT; + } + + for (l = 0; l < va->nsinks; l++) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_create_link( +#else + rval = media_create_pad_link( +#endif + &va->sub_devs[k].sd->entity, j, + &va->sd.entity, l, 0); + if (rval) { + dev_err(va->sd.dev, + "can't create link to %d-%04x\n", + info->i2c_adapter_id, + info->board_info.addr); + return -EINVAL; + } + } + k++; + } + + return 0; +} + +static int ti964_set_power(struct v4l2_subdev *subdev, int on) +{ + struct ti964 *va = to_ti964(subdev); + int ret; + u8 val; + + ret = regmap_write(va->regmap8, TI964_RESET, + (on) ? TI964_POWER_ON : TI964_POWER_OFF); + if (ret || !on) + return ret; + + /* Configure MIPI clock bsaed on control value. */ + ret = regmap_write(va->regmap8, TI964_CSI_PLL_CTL, + ti964_op_sys_clock_reg_val[ + v4l2_ctrl_g_ctrl(va->link_freq)]); + if (ret) + return ret; + val = TI964_CSI_ENABLE; + val |= TI964_CSI_CONTS_CLOCK; + /* Enable skew calculation when 1.6Gbps output is enabled. */ + if (v4l2_ctrl_g_ctrl(va->link_freq)) + val |= TI964_CSI_SKEWCAL; + return regmap_write(va->regmap8, TI964_CSI_CTL, val); +} + +static bool ti964_broadcast_mode(struct v4l2_subdev *subdev) +{ + struct ti964 *va = to_ti964(subdev); + struct v4l2_subdev_format fmt = { 0 }; + struct v4l2_subdev *sd; + char *sd_name = NULL; + bool first = true; + unsigned int h = 0, w = 0, code = 0; + bool single_stream = true; + int i, rval; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&va->pad[i]); + + if (!remote_pad) + continue; + + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.pad = remote_pad->index; + fmt.stream = 0; + + rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); + if (rval) + return false; + + if (first) { + sd_name = va->sub_devs[i].sd_name; + h = fmt.format.height; + w = fmt.format.width; + code = fmt.format.code; + first = false; + } else { + if (strncmp(sd_name, va->sub_devs[i].sd_name, 16)) + return false; + + if (h != fmt.format.height || w != fmt.format.width + || code != fmt.format.code) + return false; + + single_stream = false; + } + } + + if (single_stream) + return false; + + return true; +} + +static int ti964_tp_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct ti964 *va = to_ti964(subdev); + int i, rval; + + dev_dbg(va->sd.dev, "TI964 starts to stream test pattern.\n"); + for (i = 0; i < ARRAY_SIZE(ti964_tp_settings); i++) { + rval = regmap_write(va->regmap8, + ti964_tp_settings[i].reg, + ti964_tp_settings[i].val); + if (rval) { + dev_err(va->sd.dev, "Register write error.\n"); + return rval; + } + } + + rval = regmap_write(va->regmap8, TI964_IND_ACC_DATA, enable); + if (rval) { + dev_err(va->sd.dev, "Register write error.\n"); + return rval; + } + + return 0; +} + +static int ti964_rx_port_config(struct ti964 *va, int sink, int rx_port) +{ + int rval; + u8 bpp; + int port_cfg2_val; + int vc_mode_reg_index; + int vc_mode_reg_val; + int mipi_dt_type; + int high_fv_flags = va->subdev_pdata[sink].high_framevalid_flags; + + /* Select RX port. */ + rval = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (rval) { + dev_err(va->sd.dev, "Failed to select RX port.\n"); + return rval; + } + + /* Set RX port mode. */ + bpp = ti964_validate_csi_data_format( + va->ffmts[sink][0].code)->width; + rval = regmap_write(va->regmap8, TI964_PORT_CONFIG, + (bpp == 12) ? + TI964_FPD3_RAW12_75MHz : TI964_FPD3_RAW10_100MHz); + if (rval) { + dev_err(va->sd.dev, "Failed to set port config.\n"); + return rval; + } + + mipi_dt_type = ti964_validate_csi_data_format( + va->ffmts[sink][0].code)->mipi_dt_code; + /* + * RAW8 and YUV422 need to enable RAW10 bit mode. + * RAW12 need to set the RAW10_8bit to reserved. + */ + switch (bpp) { + case 8: + case 16: + port_cfg2_val = TI964_RAW10_8BIT & (~high_fv_flags); + vc_mode_reg_index = TI964_RAW10_ID; + break; + case 12: + port_cfg2_val = TI964_RAW12; + vc_mode_reg_index = TI964_RAW12_ID; + break; + default: + port_cfg2_val = TI964_RAW10_NORMAL & (~high_fv_flags); + vc_mode_reg_index = TI964_RAW10_ID; + break; + } + + vc_mode_reg_val = mipi_dt_type | sink << 6; + rval = regmap_write(va->regmap8, vc_mode_reg_index, vc_mode_reg_val); + if (rval) { + dev_err(va->sd.dev, "Failed to set virtual channel & data type.\n"); + return rval; + } + + rval = regmap_write(va->regmap8, TI964_PORT_CONFIG2, port_cfg2_val); + if (rval) { + dev_err(va->sd.dev, "Failed to set port config2.\n"); + return rval; + } + + return 0; +} + +static int ti964_map_subdevs_addr(struct ti964 *va) +{ + unsigned short rx_port, phy_i2c_addr, alias_i2c_addr; + int i, rval; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + rx_port = va->sub_devs[i].rx_port; + phy_i2c_addr = va->sub_devs[i].phy_i2c_addr; + alias_i2c_addr = va->sub_devs[i].alias_i2c_addr; + + if (!phy_i2c_addr || !alias_i2c_addr) + continue; + + rval = ti964_map_phy_i2c_addr(va, rx_port, phy_i2c_addr); + if (rval) + return rval; + + /* set 7bit alias i2c addr */ + rval = ti964_map_alias_i2c_addr(va, rx_port, + alias_i2c_addr << 1); + if (rval) + return rval; + } + + return 0; +} + +static int ti964_find_subdev_index(struct ti964 *va, struct v4l2_subdev *sd) +{ + int i; + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (va->sub_devs[i].sd == sd) + return i; + } + + WARN_ON(1); + + return -EINVAL; +} + +static int ti964_set_frame_sync(struct ti964 *va, int enable) +{ + int i, rval; + int index = !!enable; + + for (i = 0; i < ARRAY_SIZE(ti964_frame_sync_settings[index]); i++) { + rval = regmap_write(va->regmap8, + ti964_frame_sync_settings[index][i].reg, + ti964_frame_sync_settings[index][i].val); + if (rval) { + dev_err(va->sd.dev, "Failed to %s frame sync\n", + enable ? "enable" : "disable"); + return rval; + } + } + + return 0; +} + +static int ti964_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct ti964 *va = to_ti964(subdev); + struct v4l2_subdev *sd; + int i, j, rval; + bool broadcast; + unsigned int rx_port; + int sd_idx = -1; + DECLARE_BITMAP(rx_port_enabled, 32); + + dev_dbg(va->sd.dev, "TI964 set stream, enable %d\n", enable); + + if (v4l2_ctrl_g_ctrl(va->test_pattern)) + return ti964_tp_set_stream(subdev, enable); + + broadcast = ti964_broadcast_mode(subdev); + if (enable) + dev_info(va->sd.dev, "TI964 in %s mode", + broadcast ? "broadcast" : "non broadcast"); + + bitmap_zero(rx_port_enabled, 32); + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&va->pad[i]); + + if (!remote_pad) + continue; + + /* Find ti964 subdev */ + sd = media_entity_to_v4l2_subdev(remote_pad->entity); + j = ti964_find_subdev_index(va, sd); + if (j < 0) + return -EINVAL; + rx_port = va->sub_devs[j].rx_port; + rval = ti964_rx_port_config(va, i, rx_port); + if (rval < 0) + return rval; + + bitmap_set(rx_port_enabled, rx_port, 1); + + if (broadcast && sd_idx == -1) { + sd_idx = j; + } else if (broadcast) { + rval = ti964_map_alias_i2c_addr(va, rx_port, + va->sub_devs[sd_idx].alias_i2c_addr << 1); + if (rval < 0) + return rval; + } else { + /* Stream on/off sensor */ + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set stream for %s, enable %d\n", + sd->name, enable); + return rval; + } + + /* RX port fordward */ + rval = ti964_reg_set_bit(va, TI964_FWD_CTL1, + rx_port + 4, !enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + + } + } + + if (broadcast) { + if (sd_idx < 0) { + dev_err(va->sd.dev, "No sensor connected!\n"); + return -ENODEV; + } + sd = va->sub_devs[sd_idx].sd; + rval = v4l2_subdev_call(sd, video, s_stream, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set stream for %s. enable %d\n", + sd->name, enable); + return rval; + } + + rval = ti964_set_frame_sync(va, enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to set frame sync.\n"); + return rval; + } + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (enable && test_bit(i, rx_port_enabled)) { + rval = ti964_fsin_gpio_init(va, + va->sub_devs[i].rx_port, + va->sub_devs[i].fsin_gpio); + if (rval) { + dev_err(va->sd.dev, + "Failed to enable frame sync gpio init.\n"); + return rval; + } + } + } + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (!test_bit(i, rx_port_enabled)) + continue; + + /* RX port fordward */ + rval = ti964_reg_set_bit(va, TI964_FWD_CTL1, + i + 4, !enable); + if (rval) { + dev_err(va->sd.dev, + "Failed to forward RX port%d. enable %d\n", + i, enable); + return rval; + } + } + + /* + * Restore each subdev i2c address as we may + * touch it later. + */ + rval = ti964_map_subdevs_addr(va); + if (rval) + return rval; + } + + return 0; +} + +static struct v4l2_subdev_internal_ops ti964_sd_internal_ops = { + .open = ti964_open, + .registered = ti964_registered, +}; + +static bool ti964_sd_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct ti964 *va = to_ti964(media_entity_to_v4l2_subdev(entity)); + + if (va == NULL || stream == NULL || + *stream >= va->nstreams || *stream < 0) + return false; + + if ((va->route[*stream].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((va->route[*stream].source == pad0 && + va->route[*stream].sink == pad1) || + (va->route[*stream].source == pad1 && + va->route[*stream].sink == pad0))) + return true; + + return false; +} + +static const struct media_entity_operations ti964_sd_entity_ops = { + .has_route = ti964_sd_has_route, +}; + +static const struct v4l2_subdev_video_ops ti964_sd_video_ops = { + .s_stream = ti964_set_stream, +}; + +static const struct v4l2_subdev_core_ops ti964_core_subdev_ops = { + .s_power = ti964_set_power, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + .g_ctrl = v4l2_subdev_g_ctrl, + .s_ctrl = v4l2_subdev_s_ctrl, + .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, + .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, + .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, + .queryctrl = v4l2_subdev_queryctrl, +#endif +}; + +static int ti964_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops ti964_ctrl_ops = { + .s_ctrl = ti964_s_ctrl, +}; + +static const struct v4l2_ctrl_config ti964_controls[] = { + { + .ops = &ti964_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .max = ARRAY_SIZE(ti964_op_sys_clock) - 1, + .min = 0, + .step = 0, + .def = 0, + .qmenu_int = ti964_op_sys_clock, + }, + { + .ops = &ti964_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "V4L2_CID_TEST_PATTERN", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 1, + .min = 0, + .step = 1, + .def = 0, + }, +}; + +static const struct v4l2_subdev_pad_ops ti964_sd_pad_ops = { + .get_fmt = ti964_get_format, + .set_fmt = ti964_set_format, + .get_frame_desc = ti964_get_frame_desc, + .enum_mbus_code = ti964_enum_mbus_code, + .set_routing = ti964_set_routing, + .get_routing = ti964_get_routing, +}; + +static struct v4l2_subdev_ops ti964_sd_ops = { + .core = &ti964_core_subdev_ops, + .video = &ti964_sd_video_ops, + .pad = &ti964_sd_pad_ops, +}; + +static int ti964_register_subdev(struct ti964 *va) +{ + int i, rval; + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + + v4l2_subdev_init(&va->sd, &ti964_sd_ops); + snprintf(va->sd.name, sizeof(va->sd.name), "TI964 %d-%4.4x", + i2c_adapter_id(client->adapter), client->addr); + + va->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + va->sd.internal_ops = &ti964_sd_internal_ops; + va->sd.entity.ops = &ti964_sd_entity_ops; + + v4l2_set_subdevdata(&va->sd, client); + + v4l2_ctrl_handler_init(&va->ctrl_handler, + ARRAY_SIZE(ti964_controls)); + + if (va->ctrl_handler.error) { + dev_err(va->sd.dev, + "Failed to init ti964 controls. ERR: %d!\n", + va->ctrl_handler.error); + return va->ctrl_handler.error; + } + + va->sd.ctrl_handler = &va->ctrl_handler; + + for (i = 0; i < ARRAY_SIZE(ti964_controls); i++) { + const struct v4l2_ctrl_config *cfg = + &ti964_controls[i]; + struct v4l2_ctrl *ctrl; + + ctrl = v4l2_ctrl_new_custom(&va->ctrl_handler, cfg, NULL); + if (!ctrl) { + dev_err(va->sd.dev, + "Failed to create ctrl %s!\n", cfg->name); + rval = va->ctrl_handler.error; + goto failed_out; + } + } + + va->link_freq = v4l2_ctrl_find(&va->ctrl_handler, V4L2_CID_LINK_FREQ); + va->test_pattern = v4l2_ctrl_find(&va->ctrl_handler, + V4L2_CID_TEST_PATTERN); + + for (i = 0; i < va->nsinks; i++) + va->pad[i].flags = MEDIA_PAD_FL_SINK; + va->pad[TI964_PAD_SOURCE].flags = + MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MULTIPLEX; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&va->sd.entity, NR_OF_TI964_PADS, va->pad, 0); +#else + rval = media_entity_pads_init(&va->sd.entity, + NR_OF_TI964_PADS, va->pad); +#endif + if (rval) { + dev_err(va->sd.dev, + "Failed to init media entity for ti964!\n"); + goto failed_out; + } + + return 0; + +failed_out: + v4l2_ctrl_handler_free(&va->ctrl_handler); + return rval; +} + +static int ti964_init(struct ti964 *va) +{ + unsigned int reset_gpio = va->pdata->reset_gpio; + int i, rval; + unsigned int val; + + gpio_set_value(reset_gpio, 1); + usleep_range(2000, 3000); + dev_dbg(va->sd.dev, "Setting reset gpio %d to 1.\n", reset_gpio); + + rval = regmap_read(va->regmap8, TI964_DEVID, &val); + if (rval) { + dev_err(va->sd.dev, "Failed to read device ID of TI964!\n"); + return rval; + } + dev_info(va->sd.dev, "TI964 device ID: 0x%X\n", val); + + for (i = 0; i < ARRAY_SIZE(ti964_init_settings); i++) { + rval = regmap_write(va->regmap8, + ti964_init_settings[i].reg, + ti964_init_settings[i].val); + if (rval) + return rval; + } + + rval = ti964_map_subdevs_addr(va); + if (rval) + return rval; + + return 0; +} + +static void ti964_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct i2c_client *client = to_i2c_client(chip->dev); +#else + struct i2c_client *client = to_i2c_client(chip->parent); +#endif + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + unsigned int reg_val; + int rx_port, gpio_port; + int ret; + + if (gpio >= NR_OF_TI964_GPIOS) + return; + + rx_port = gpio / NR_OF_GPIOS_PER_PORT; + gpio_port = gpio % NR_OF_GPIOS_PER_PORT; + + ret = regmap_write(va->regmap8, TI964_RX_PORT_SEL, + (rx_port << 4) + (1 << rx_port)); + if (ret) { + dev_dbg(&client->dev, "Failed to select RX port.\n"); + return; + } + ret = regmap_read(va->regmap8, TI964_BC_GPIO_CTL0, ®_val); + if (ret) { + dev_dbg(&client->dev, "Failed to read gpio status.\n"); + return; + } + + if (gpio_port == 0) { + reg_val &= ~TI964_GPIO0_MASK; + reg_val |= value ? TI964_GPIO0_HIGH : TI964_GPIO0_LOW; + } else { + reg_val &= ~TI964_GPIO1_MASK; + reg_val |= value ? TI964_GPIO1_HIGH : TI964_GPIO1_LOW; + } + + ret = regmap_write(va->regmap8, TI964_BC_GPIO_CTL0, reg_val); + if (ret) + dev_dbg(&client->dev, "Failed to set gpio.\n"); +} + +static int ti964_gpio_direction_output(struct gpio_chip *chip, + unsigned gpio, int level) +{ + return 0; +} + +static int ti964_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct ti964 *va; + int i, rval = 0; + + if (client->dev.platform_data == NULL) + return -ENODEV; + + va = devm_kzalloc(&client->dev, sizeof(*va), GFP_KERNEL); + if (!va) + return -ENOMEM; + + va->pdata = client->dev.platform_data; + + va->nsources = NR_OF_TI964_SOURCE_PADS; + va->nsinks = NR_OF_TI964_SINK_PADS; + va->npads = NR_OF_TI964_PADS; + va->nstreams = NR_OF_TI964_STREAMS; + + va->crop = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->compose = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->route = devm_kcalloc(&client->dev, va->nstreams, + sizeof(*va->route), GFP_KERNEL); + + va->stream = devm_kcalloc(&client->dev, va->npads, + sizeof(*va->stream), GFP_KERNEL); + + if (!va->crop || !va->compose || !va->route || !va->stream) + return -ENOMEM; + + for (i = 0; i < va->npads; i++) { + va->ffmts[i] = devm_kcalloc(&client->dev, va->nstreams, + sizeof(struct v4l2_mbus_framefmt), + GFP_KERNEL); + if (!va->ffmts[i]) + return -ENOMEM; + + va->stream[i].stream_id = + devm_kcalloc(&client->dev, va->nsinks, + sizeof(*va->stream[i].stream_id), GFP_KERNEL); + if (!va->stream[i].stream_id) + return -ENOMEM; + } + + for (i = 0; i < va->nstreams; i++) { + va->route[i].sink = i; + va->route[i].source = TI964_PAD_SOURCE; + va->route[i].flags = 0; + } + + for (i = 0; i < va->nsinks; i++) { + va->stream[i].stream_id[0] = i; + va->stream[TI964_PAD_SOURCE].stream_id[i] = i; + } + + va->regmap8 = devm_regmap_init_i2c(client, + &ti964_reg_config8); + if (IS_ERR(va->regmap8)) { + dev_err(&client->dev, "Failed to init regmap8!\n"); + return -EIO; + } + + va->regmap16 = devm_regmap_init_i2c(client, + &ti964_reg_config16); + if (IS_ERR(va->regmap16)) { + dev_err(&client->dev, "Failed to init regmap16!\n"); + return -EIO; + } + + mutex_init(&va->mutex); + v4l2_i2c_subdev_init(&va->sd, client, &ti964_sd_ops); + rval = ti964_register_subdev(va); + if (rval) { + dev_err(&client->dev, "Failed to register va subdevice!\n"); + return rval; + } + + if (devm_gpio_request_one(va->sd.dev, va->pdata->reset_gpio, 0, + "ti964 reset") != 0) { + dev_err(va->sd.dev, "Unable to acquire gpio %d\n", + va->pdata->reset_gpio); + return -ENODEV; + } + + rval = ti964_init(va); + if (rval) { + dev_err(&client->dev, "Failed to init TI964!\n"); + return rval; + } + + /* + * TI964 has several back channel GPIOs. + * We export GPIO0 and GPIO1 to control reset or fsin. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + va->gc.dev = &client->dev; +#else + va->gc.parent = &client->dev; +#endif + va->gc.owner = THIS_MODULE; + va->gc.label = "TI964 GPIO"; + va->gc.ngpio = NR_OF_TI964_GPIOS; + va->gc.base = -1; + va->gc.set = ti964_gpio_set; + va->gc.direction_output = ti964_gpio_direction_output; + rval = gpiochip_add(&va->gc); + if (rval) { + dev_err(&client->dev, "Failed to add gpio chip!\n"); + return -EIO; + } + + return 0; +} + +static int ti964_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + int i; + + if (!va) + return 0; + + mutex_destroy(&va->mutex); + v4l2_ctrl_handler_free(&va->ctrl_handler); + v4l2_device_unregister_subdev(&va->sd); + media_entity_cleanup(&va->sd.entity); + + for (i = 0; i < NR_OF_TI964_SINK_PADS; i++) { + if (va->sub_devs[i].sd) { + struct i2c_client *sub_client = + v4l2_get_subdevdata(va->sub_devs[i].sd); + + i2c_unregister_device(sub_client); + } + va->sub_devs[i].sd = NULL; + } + + gpiochip_remove(&va->gc); + + return 0; +} + +#ifdef CONFIG_PM +static int ti964_suspend(struct device *dev) +{ + return 0; +} + +static int ti964_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct ti964 *va = to_ti964(subdev); + + return ti964_init(va); +} +#else +#define ti964_suspend NULL +#define ti964_resume NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id ti964_id_table[] = { + { TI964_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, ti964_id_table); + +static const struct dev_pm_ops ti964_pm_ops = { + .suspend = ti964_suspend, + .resume = ti964_resume, +}; + +static struct i2c_driver ti964_i2c_driver = { + .driver = { + .name = TI964_NAME, + .pm = &ti964_pm_ops, + }, + .probe = ti964_probe, + .remove = ti964_remove, + .id_table = ti964_id_table, +}; +module_i2c_driver(ti964_i2c_driver); + +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI964 CSI2-Aggregator driver"); diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 7b79a7498751..698fa764999c 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -506,80 +506,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] = /* FIXME: Current api doesn't handle all VBI types, those not yet supported are placed under #if 0 */ #if 0 - {0x010, /* Teletext, SECAM, WST System A */ + [0] = {0x010, /* Teletext, SECAM, WST System A */ {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } }, #endif - {0x030, /* Teletext, PAL, WST System B */ + [1] = {0x030, /* Teletext, PAL, WST System B */ {V4L2_SLICED_TELETEXT_B,6,22,1}, { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } }, #if 0 - {0x050, /* Teletext, PAL, WST System C */ + [2] = {0x050, /* Teletext, PAL, WST System C */ {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } }, - {0x070, /* Teletext, NTSC, WST System B */ + [3] = {0x070, /* Teletext, NTSC, WST System B */ {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } }, - {0x090, /* Tetetext, NTSC NABTS System C */ + [4] = {0x090, /* Tetetext, NTSC NABTS System C */ {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } }, - {0x0b0, /* Teletext, NTSC-J, NABTS System D */ + [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */ {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } }, - {0x0d0, /* Closed Caption, PAL/SECAM */ + [6] = {0x0d0, /* Closed Caption, PAL/SECAM */ {V4L2_SLICED_CAPTION_625,22,22,1}, { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } }, #endif - {0x0f0, /* Closed Caption, NTSC */ + [7] = {0x0f0, /* Closed Caption, NTSC */ {V4L2_SLICED_CAPTION_525,21,21,1}, { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } }, - {0x110, /* Wide Screen Signal, PAL/SECAM */ + [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */ {V4L2_SLICED_WSS_625,23,23,1}, { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } }, #if 0 - {0x130, /* Wide Screen Signal, NTSC C */ + [9] = {0x130, /* Wide Screen Signal, NTSC C */ {V4L2_SLICED_WSS_525,20,20,1}, { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } }, - {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ + [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ {V4l2_SLICED_VITC_625,6,22,0}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } }, - {0x170, /* Vertical Interval Timecode (VITC), NTSC */ + [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */ {V4l2_SLICED_VITC_525,10,20,0}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } }, #endif - {0x190, /* Video Program System (VPS), PAL */ + [12] = {0x190, /* Video Program System (VPS), PAL */ {V4L2_SLICED_VPS,16,16,0}, { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } }, /* 0x1d0 User programmable */ - - /* End of struct */ - { (u16)-1 } }; static int tvp5150_write_inittab(struct v4l2_subdev *sd, @@ -592,10 +589,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd, return 0; } -static int tvp5150_vdp_init(struct v4l2_subdev *sd, - const struct i2c_vbi_ram_value *regs) +static int tvp5150_vdp_init(struct v4l2_subdev *sd) { unsigned int i; + int j; /* Disable Full Field */ tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); @@ -605,14 +602,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, tvp5150_write(sd, i, 0xff); /* Load Ram Table */ - while (regs->reg != (u16)-1) { + for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) { + const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j]; + + if (!regs->type.vbi_type) + continue; + tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); for (i = 0; i < 16; i++) tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); - - regs++; } return 0; } @@ -621,19 +621,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap) { - const struct i2c_vbi_ram_value *regs = vbi_ram_default; - int line; + int line, i; dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); memset(cap, 0, sizeof *cap); - while (regs->reg != (u16)-1 ) { - for (line=regs->type.ini_line;line<=regs->type.end_line;line++) { + for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { + const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; + + if (!regs->type.vbi_type) + continue; + + for (line = regs->type.ini_line; + line <= regs->type.end_line; + line++) { cap->service_lines[0][line] |= regs->type.vbi_type; } cap->service_set |= regs->type.vbi_type; - - regs++; } return 0; } @@ -652,14 +656,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, * MSB = field2 */ static int tvp5150_set_vbi(struct v4l2_subdev *sd, - const struct i2c_vbi_ram_value *regs, unsigned int type,u8 flags, int line, const int fields) { struct tvp5150 *decoder = to_tvp5150(sd); v4l2_std_id std = decoder->norm; u8 reg; - int pos = 0; + int i, pos = 0; if (std == V4L2_STD_ALL) { dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); @@ -672,19 +675,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, if (line < 6 || line > 27) return 0; - while (regs->reg != (u16)-1) { + for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { + const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; + + if (!regs->type.vbi_type) + continue; + if ((type & regs->type.vbi_type) && (line >= regs->type.ini_line) && (line <= regs->type.end_line)) break; - - regs++; pos++; } - if (regs->reg == (u16)-1) - return 0; - type = pos | (flags & 0xf0); reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; @@ -697,8 +700,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, return type; } -static int tvp5150_get_vbi(struct v4l2_subdev *sd, - const struct i2c_vbi_ram_value *regs, int line) +static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line) { struct tvp5150 *decoder = to_tvp5150(sd); v4l2_std_id std = decoder->norm; @@ -727,8 +729,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd, return 0; } pos = ret & 0x0f; - if (pos < 0x0f) - type |= regs[pos].type.vbi_type; + if (pos < ARRAY_SIZE(vbi_ram_default)) + type |= vbi_ram_default[pos].type.vbi_type; } return type; @@ -789,7 +791,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val) tvp5150_write_inittab(sd, tvp5150_init_default); /* Initializes VDP registers */ - tvp5150_vdp_init(sd, vbi_ram_default); + tvp5150_vdp_init(sd); /* Selects decoder input */ tvp5150_selmux(sd); @@ -1122,8 +1124,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f for (i = 0; i <= 23; i++) { svbi->service_lines[1][i] = 0; svbi->service_lines[0][i] = - tvp5150_set_vbi(sd, vbi_ram_default, - svbi->service_lines[0][i], 0xf0, i, 3); + tvp5150_set_vbi(sd, svbi->service_lines[0][i], + 0xf0, i, 3); } /* Enables FIFO */ tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); @@ -1149,7 +1151,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f for (i = 0; i <= 23; i++) { svbi->service_lines[0][i] = - tvp5150_get_vbi(sd, vbi_ram_default, i); + tvp5150_get_vbi(sd, i); mask |= svbi->service_lines[0][i]; } svbi->service_set = mask; diff --git a/drivers/media/i2c/vcm_stub.c b/drivers/media/i2c/vcm_stub.c new file mode 100644 index 000000000000..773c2a379b92 --- /dev/null +++ b/drivers/media/i2c/vcm_stub.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include "vcm_stub.h" + +#define ctrl_to_vcm_stub(_ctrl) \ + container_of(_ctrl->handler, struct vcm_stub_device, ctrls_vcm) + +static int vcm_stub_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct vcm_stub_device *vcm_stub_dev = ctrl_to_vcm_stub(ctrl); + + if (ctrl->id != V4L2_CID_FOCUS_ABSOLUTE) { + dev_err(&vcm_stub_dev->client->dev, + "Unsupported VCM set control\n"); + return -EINVAL; + } + + vcm_stub_dev->current_val = ctrl->val; + dev_dbg(&vcm_stub_dev->client->dev, + "Setting new value to VCM: %d\n", + ctrl->val); + + return 0; +} + +static int vcm_stub_get_ctrl(struct v4l2_ctrl *ctrl) +{ + struct vcm_stub_device *vcm_stub_dev = ctrl_to_vcm_stub(ctrl); + + if (ctrl->id != V4L2_CID_FOCUS_ABSOLUTE) { + dev_err(&vcm_stub_dev->client->dev, + "Unsupported VCM get control\n"); + return -EINVAL; + } + + ctrl->val = vcm_stub_dev->current_val; + dev_dbg(&vcm_stub_dev->client->dev, + "Get setting from VCM: %d\n", + vcm_stub_dev->current_val); + + return 0; +} + +static const struct v4l2_ctrl_ops vcm_stub_ctrl_ops = { + .s_ctrl = vcm_stub_set_ctrl, + .g_volatile_ctrl = vcm_stub_get_ctrl, +}; + +static int vcm_stub_init_controls(struct vcm_stub_device *dev_vcm) +{ + struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; + const struct v4l2_ctrl_ops *ops = &vcm_stub_ctrl_ops; + struct i2c_client *client = dev_vcm->client; + + v4l2_ctrl_handler_init(hdl, 1); + + v4l2_ctrl_new_std(hdl, ops, + V4L2_CID_FOCUS_ABSOLUTE, + 0, + VCM_MAX_FOCUS_POS, + 1, + 0); + + if (hdl->error) + dev_err(&client->dev, "vcm_stub_init_controls fail\n"); + dev_vcm->subdev_vcm.ctrl_handler = hdl; + return hdl->error; +} + +static void vcm_stub_subdev_cleanup(struct vcm_stub_device *vcm_stub_dev) +{ + v4l2_ctrl_handler_free(&vcm_stub_dev->ctrls_vcm); + v4l2_device_unregister_subdev(&vcm_stub_dev->subdev_vcm); + media_entity_cleanup(&vcm_stub_dev->subdev_vcm.entity); +} + +static int vcm_stub_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct vcm_stub_device *vcm_stub_dev = + container_of(sd, struct vcm_stub_device, subdev_vcm); + struct device *dev = &vcm_stub_dev->client->dev; + + dev_dbg(dev, "vcm stub opening successfully\n"); + + return 0; +} + +static int vcm_stub_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct vcm_stub_device *vcm_stub_dev = + container_of(sd, struct vcm_stub_device, subdev_vcm); + struct device *dev = &vcm_stub_dev->client->dev; + + dev_dbg(dev, "vcm stub closed\n"); + + return 0; +} + +static const struct v4l2_subdev_internal_ops vcm_stub_int_ops = { + .open = vcm_stub_open, + .close = vcm_stub_close, +}; + +static const struct v4l2_subdev_ops vcm_stub_ops = { }; + +static int vcm_stub_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct vcm_stub_device *vcm_stub_dev; + int val; + + vcm_stub_dev = devm_kzalloc(&client->dev, sizeof(*vcm_stub_dev), + GFP_KERNEL); + + if (vcm_stub_dev == NULL) { + dev_err(&client->dev, "Probe: Failed to allocate memory!\n"); + return -ENOMEM; + } + + vcm_stub_dev->client = client; + + v4l2_i2c_subdev_init(&vcm_stub_dev->subdev_vcm, client, &vcm_stub_ops); + vcm_stub_dev->subdev_vcm.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + vcm_stub_dev->subdev_vcm.internal_ops = &vcm_stub_int_ops; + + snprintf(vcm_stub_dev->subdev_vcm.name, + sizeof(vcm_stub_dev->subdev_vcm.name), + VCM_STUB_NAME); + + val = vcm_stub_init_controls(vcm_stub_dev); + if (val) + goto err_cleanup; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + val = media_entity_init(&vcm_stub_dev->subdev_vcm.entity, 0, NULL, 0); +#else + val = media_entity_pads_init(&vcm_stub_dev->subdev_vcm.entity, 0, NULL); +#endif + if (val < 0) + goto err_cleanup; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + vcm_stub_dev->subdev_vcm.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_LENS; +#else + vcm_stub_dev->subdev_vcm.entity.function = MEDIA_ENT_F_LENS; +#endif + + return 0; + +err_cleanup: + vcm_stub_subdev_cleanup(vcm_stub_dev); + dev_err(&client->dev, "Probe failed: %d\n", val); + return val; +} + +static int vcm_stub_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct vcm_stub_device *vcm_stub_dev = + container_of(sd, struct vcm_stub_device, subdev_vcm); + + vcm_stub_subdev_cleanup(vcm_stub_dev); + + return 0; +} + +static const struct i2c_device_id vcm_stub_id_table[] = { + { VCM_STUB_NAME, 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, vcm_stub_id_table); + +static struct i2c_driver vcm_stub_i2c_driver = { + .driver = { + .name = VCM_STUB_NAME, + }, + .probe = vcm_stub_probe, + .remove = vcm_stub_remove, + .id_table = vcm_stub_id_table, +}; + +module_i2c_driver(vcm_stub_i2c_driver); + +MODULE_AUTHOR("Mingda Xu "); +MODULE_DESCRIPTION("VCM stub driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/i2c/vcm_stub.h b/drivers/media/i2c/vcm_stub.h new file mode 100644 index 000000000000..f609ed223640 --- /dev/null +++ b/drivers/media/i2c/vcm_stub.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 - 2018 Intel Corporation */ + +#ifndef _VCM_STUB_H_ +#define _VCM_STUB_H_ + +#define VCM_STUB_NAME "vcm_stub" +#define VCM_MAX_FOCUS_POS 0xFFFF + +/* VCM stub device structure */ +struct vcm_stub_device { + struct i2c_client *client; + struct v4l2_ctrl_handler ctrls_vcm; + struct v4l2_subdev subdev_vcm; + unsigned int current_val; +}; + +#endif diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index e79f72b8b858..362d1aa55570 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -19,6 +19,7 @@ /* We need to access legacy defines from linux/media.h */ #define __NEED_MEDIA_LEGACY_API +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include @@ -35,6 +37,356 @@ #ifdef CONFIG_MEDIA_CONTROLLER +static char *__request_state[] = { + "IDLE", + "QUEUED", + "DELETED", + "COMPLETED", +}; + +#define request_state(i) \ + ((i) < ARRAY_SIZE(__request_state) ? __request_state[i] : "UNKNOWN") + + +struct media_device_fh { + struct media_devnode_fh fh; + struct list_head requests; + struct { + struct list_head head; + wait_queue_head_t wait; + atomic_t sequence; + } kevents; +}; + +static inline struct media_device_fh *media_device_fh(struct file *filp) +{ + return container_of(filp->private_data, struct media_device_fh, fh); +} + +/* ----------------------------------------------------------------------------- + * Requests + */ + +/** + * media_device_request_find - Find a request based from its ID + * @mdev: The media device + * @reqid: The request ID + * + * Find and return the request associated with the given ID, or NULL if no such + * request exists. + * + * When the function returns a non-NULL request it increases its reference + * count. The caller is responsible for releasing the reference by calling + * media_device_request_put() on the request. + */ +struct media_device_request * +media_device_request_find(struct media_device *mdev, u16 reqid) +{ + struct media_device_request *req; + unsigned long flags; + bool found = false; + + spin_lock_irqsave(&mdev->req_lock, flags); + list_for_each_entry(req, &mdev->requests, list) { + if (req->id == reqid) { + kref_get(&req->kref); + found = true; + break; + } + } + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (!found) { + dev_dbg(mdev->dev, + "request: can't find %u\n", reqid); + return NULL; + } + + return req; +} +EXPORT_SYMBOL_GPL(media_device_request_find); + +void media_device_request_get(struct media_device_request *req) +{ + kref_get(&req->kref); +} +EXPORT_SYMBOL_GPL(media_device_request_get); + +static void media_device_request_queue_event(struct media_device *mdev, + struct media_device_request *req, + struct media_device_fh *fh) +{ + struct media_kevent *kev = req->kev; + struct media_event *ev = &kev->ev; + + lockdep_assert_held(&mdev->req_lock); + + ev->sequence = atomic_inc_return(&fh->kevents.sequence); + ev->type = MEDIA_EVENT_TYPE_REQUEST_COMPLETE; + ev->req_complete.id = req->id; + + list_add(&kev->list, &fh->kevents.head); + req->kev = NULL; + req->state = MEDIA_DEVICE_REQUEST_STATE_COMPLETE; + wake_up(&fh->kevents.wait); +} + +static void media_device_request_release(struct kref *kref) +{ + struct media_device_request *req = + container_of(kref, struct media_device_request, kref); + struct media_device *mdev = req->mdev; + + dev_dbg(mdev->dev, "release request %u\n", req->id); + + ida_simple_remove(&mdev->req_ids, req->id); + + kfree(req->kev); + req->kev = NULL; + + mdev->ops->req_free(mdev, req); +} + +void media_device_request_put(struct media_device_request *req) +{ + kref_put(&req->kref, media_device_request_release); +} +EXPORT_SYMBOL_GPL(media_device_request_put); + +static int media_device_request_alloc(struct media_device *mdev, + struct file *filp, + struct media_request_cmd *cmd) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_device_request *req; + struct media_kevent *kev; + unsigned long flags; + int id = ida_simple_get(&mdev->req_ids, 1, 0, GFP_KERNEL); + int ret; + + if (id < 0) { + dev_dbg(mdev->dev, "request: unable to obtain new id\n"); + return id; + } + + kev = kzalloc(sizeof(*kev), GFP_KERNEL); + if (!kev) { + ret = -ENOMEM; + goto out_ida_simple_remove; + } + + req = mdev->ops->req_alloc(mdev); + if (!req) { + ret = -ENOMEM; + goto out_kev_free; + } + + req->mdev = mdev; + req->id = id; + req->filp = filp; + req->state = MEDIA_DEVICE_REQUEST_STATE_IDLE; + req->kev = kev; + kref_init(&req->kref); + + spin_lock_irqsave(&mdev->req_lock, flags); + list_add_tail(&req->list, &mdev->requests); + list_add_tail(&req->fh_list, &fh->requests); + spin_unlock_irqrestore(&mdev->req_lock, flags); + + cmd->request = req->id; + + dev_dbg(mdev->dev, "request: allocated id %u\n", req->id); + + return 0; + +out_kev_free: + kfree(kev); + +out_ida_simple_remove: + ida_simple_remove(&mdev->req_ids, id); + + return ret; +} + +static int media_device_request_delete(struct media_device *mdev, + struct media_device_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&mdev->req_lock, flags); + + if (req->state != MEDIA_DEVICE_REQUEST_STATE_IDLE) { + spin_unlock_irqrestore(&mdev->req_lock, flags); + dev_dbg(mdev->dev, "request: can't delete %u, state %s\n", + req->id, request_state(req->state)); + return -EINVAL; + } + + req->state = MEDIA_DEVICE_REQUEST_STATE_DELETED; + + if (req->filp) { + /* + * If the file handle is gone by now the + * request has already been deleted from the + * two lists. + */ + list_del(&req->list); + list_del(&req->fh_list); + req->filp = NULL; + } + + spin_unlock_irqrestore(&mdev->req_lock, flags); + + media_device_request_put(req); + + return 0; +} + +void media_device_request_complete(struct media_device *mdev, + struct media_device_request *req) +{ + struct file *filp; + unsigned long flags; + + spin_lock_irqsave(&mdev->req_lock, flags); + + if (req->state == MEDIA_DEVICE_REQUEST_STATE_IDLE) { + dev_dbg(mdev->dev, + "request: not completing an idle request %u\n", + req->id); + spin_unlock_irqrestore(&mdev->req_lock, flags); + return; + } + + if (WARN_ON(req->state != MEDIA_DEVICE_REQUEST_STATE_QUEUED)) { + dev_dbg(mdev->dev, "request: can't delete %u, state %s\n", + req->id, request_state(req->state)); + spin_unlock_irqrestore(&mdev->req_lock, flags); + return; + } + + req->state = MEDIA_DEVICE_REQUEST_STATE_COMPLETE; + filp = req->filp; + if (filp) { + /* + * If the file handle is still around we remove if + * from the lists here. Otherwise it has been removed + * when the file handle closed. + */ + list_del(&req->list); + list_del(&req->fh_list); + /* If the user asked for an event, let's queue one. */ + if (req->flags & MEDIA_REQ_FL_COMPLETE_EVENT) + media_device_request_queue_event( + mdev, req, media_device_fh(filp)); + req->filp = NULL; + } + + spin_unlock_irqrestore(&mdev->req_lock, flags); + + /* + * The driver holds a reference to a request if the filp + * pointer is non-NULL: the file handle associated to the + * request may have been released by now, i.e. filp is NULL. + */ + if (filp) + media_device_request_put(req); +} +EXPORT_SYMBOL_GPL(media_device_request_complete); + +static int media_device_request_queue_apply( + struct media_device *mdev, struct media_device_request *req, + u32 req_flags, int (*fn)(struct media_device *mdev, + struct media_device_request *req), bool queue) +{ + char *str = queue ? "queue" : "apply"; + unsigned long flags; + int rval = 0; + + if (!fn) + return -ENOSYS; + + spin_lock_irqsave(&mdev->req_lock, flags); + if (req->state != MEDIA_DEVICE_REQUEST_STATE_IDLE) { + rval = -EINVAL; + dev_dbg(mdev->dev, + "request: unable to %s %u, request in state %s\n", + str, req->id, request_state(req->state)); + } else { + req->state = MEDIA_DEVICE_REQUEST_STATE_QUEUED; + req->flags = req_flags; + } + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (rval) + return rval; + + rval = fn(mdev, req); + if (rval) { + spin_lock_irqsave(&mdev->req_lock, flags); + req->state = MEDIA_DEVICE_REQUEST_STATE_IDLE; + spin_unlock_irqrestore(&mdev->req_lock, flags); + dev_dbg(mdev->dev, + "request: can't %s %u\n", str, req->id); + } else { + dev_dbg(mdev->dev, + "request: %s %u\n", str, req->id); + } + + return rval; +} + +static long media_device_request_cmd(struct media_device *mdev, + struct file *filp, + struct media_request_cmd *cmd) +{ + struct media_device_request *req = NULL; + int ret; + + if (!mdev->ops || !mdev->ops->req_alloc || !mdev->ops->req_free) + return -ENOTTY; + + if (cmd->cmd != MEDIA_REQ_CMD_ALLOC) { + req = media_device_request_find(mdev, cmd->request); + if (!req) + return -EINVAL; + } + + switch (cmd->cmd) { + case MEDIA_REQ_CMD_ALLOC: + ret = media_device_request_alloc(mdev, filp, cmd); + break; + + case MEDIA_REQ_CMD_DELETE: + ret = media_device_request_delete(mdev, req); + break; + + case MEDIA_REQ_CMD_APPLY: + ret = media_device_request_queue_apply(mdev, req, cmd->flags, + mdev->ops->req_apply, + false); + break; + + case MEDIA_REQ_CMD_QUEUE: + ret = media_device_request_queue_apply(mdev, req, cmd->flags, + mdev->ops->req_queue, + true); + break; + + default: + ret = -EINVAL; + break; + } + + if (req) + media_device_request_put(req); + + if (ret < 0) + return ret; + + return 0; +} + /* ----------------------------------------------------------------------------- * Userspace API */ @@ -46,17 +398,61 @@ static inline void __user *media_get_uptr(__u64 arg) static int media_device_open(struct file *filp) { + struct media_device_fh *fh; + + fh = kzalloc(sizeof(*fh), GFP_KERNEL); + if (!fh) + return -ENOMEM; + + INIT_LIST_HEAD(&fh->requests); + INIT_LIST_HEAD(&fh->kevents.head); + init_waitqueue_head(&fh->kevents.wait); + atomic_set(&fh->kevents.sequence, -1); + filp->private_data = &fh->fh; + return 0; } static int media_device_close(struct file *filp) { + struct media_device_fh *fh = media_device_fh(filp); + struct media_device *mdev = fh->fh.devnode->media_dev; + + spin_lock_irq(&mdev->req_lock); + while (!list_empty(&fh->requests)) { + struct media_device_request *req = + list_first_entry(&fh->requests, typeof(*req), fh_list); + + list_del(&req->list); + list_del(&req->fh_list); + req->filp = NULL; + spin_unlock_irq(&mdev->req_lock); + media_device_request_put(req); + spin_lock_irq(&mdev->req_lock); + } + + while (!list_empty(&fh->kevents.head)) { + struct media_kevent *kev = + list_first_entry(&fh->kevents.head, typeof(*kev), list); + + list_del(&kev->list); + spin_unlock_irq(&mdev->req_lock); + kfree(kev); + spin_lock_irq(&mdev->req_lock); + } + spin_unlock_irq(&mdev->req_lock); + + kfree(fh); + return 0; } -static int media_device_get_info(struct media_device *dev, - struct media_device_info *info) +static long media_device_get_info(struct media_device *dev, + struct file *filp, + void *arg) { + struct media_device_info *info = (struct media_device_info *)arg; + memset(info, 0, sizeof(*info)); if (dev->driver_name[0]) @@ -94,8 +490,10 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id) } static long media_device_enum_entities(struct media_device *mdev, - struct media_entity_desc *entd) + struct file *filp, + void *arg) { + struct media_entity_desc *entd = (struct media_entity_desc *)arg; struct media_entity *ent; ent = find_entity(mdev, entd->id); @@ -147,8 +545,10 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad, } static long media_device_enum_links(struct media_device *mdev, - struct media_links_enum *links) + struct file *filp, + void *arg) { + struct media_links_enum *links = (struct media_links_enum *)arg; struct media_entity *entity; entity = find_entity(mdev, links->entity); @@ -195,8 +595,10 @@ static long media_device_enum_links(struct media_device *mdev, } static long media_device_setup_link(struct media_device *mdev, - struct media_link_desc *linkd) + struct file *filp, + void *arg) { + struct media_link_desc *linkd = (struct media_link_desc *)arg; struct media_link *link = NULL; struct media_entity *source; struct media_entity *sink; @@ -222,9 +624,9 @@ static long media_device_setup_link(struct media_device *mdev, return __media_entity_setup_link(link, linkd->flags); } -static long media_device_get_topology(struct media_device *mdev, - struct media_v2_topology *topo) +static long media_device_get_topology(struct media_device *mdev, void *arg) { + struct media_v2_topology *topo = (struct media_v2_topology *)arg; struct media_entity *entity; struct media_interface *intf; struct media_pad *pad; @@ -358,6 +760,48 @@ static long media_device_get_topology(struct media_device *mdev, return ret; } +static struct media_kevent *opportunistic_dqevent(struct media_device *mdev, + struct file *filp) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_kevent *kev = NULL; + unsigned long flags; + + spin_lock_irqsave(&mdev->req_lock, flags); + if (!list_empty(&fh->kevents.head)) { + kev = list_last_entry(&fh->kevents.head, + struct media_kevent, list); + list_del(&kev->list); + } + spin_unlock_irqrestore(&mdev->req_lock, flags); + + return kev; +} + +static int media_device_dqevent(struct media_device *mdev, + struct file *filp, + struct media_event *ev) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_kevent *kev; + + if (filp->f_flags & O_NONBLOCK) { + kev = opportunistic_dqevent(mdev, filp); + if (!kev) + return -ENODATA; + } else { + int ret = wait_event_interruptible( + fh->kevents.wait, + (kev = opportunistic_dqevent(mdev, filp))); + if (ret == -ERESTARTSYS) + return ret; + } + + *ev = kev->ev; + kfree(kev); + + return 0; +} static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd) { @@ -383,7 +827,8 @@ static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) #define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \ [_IOC_NR(MEDIA_IOC_##__cmd)] = { \ .cmd = MEDIA_IOC_##__cmd, \ - .fn = (long (*)(struct media_device *, void *))func, \ + .fn = (long (*)(struct media_device *, \ + struct file *, void *))func, \ .flags = fl, \ .arg_from_user = from_user, \ .arg_to_user = to_user, \ @@ -395,8 +840,8 @@ static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd) /* the table is indexed by _IOC_NR(cmd) */ struct media_ioctl_info { unsigned int cmd; + long (*fn)(struct media_device *dev, struct file *file, void *arg); unsigned short flags; - long (*fn)(struct media_device *dev, void *arg); long (*arg_from_user)(void *karg, void __user *uarg, unsigned int cmd); long (*arg_to_user)(void __user *uarg, void *karg, unsigned int cmd); }; @@ -407,6 +852,8 @@ static const struct media_ioctl_info ioctl_info[] = { MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX), MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX), MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX), + MEDIA_IOC(REQUEST_CMD, media_device_request_cmd, 0), + MEDIA_IOC(DQEVENT, media_device_dqevent, 0), }; static long media_device_ioctl(struct file *filp, unsigned int cmd, @@ -440,7 +887,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd, if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX) mutex_lock(&dev->graph_mutex); - ret = info->fn(dev, karg); + ret = info->fn(dev, filp, karg); if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX) mutex_unlock(&dev->graph_mutex); @@ -455,6 +902,34 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd, return ret; } +static unsigned int media_device_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct media_device_fh *fh = media_device_fh(filp); + struct media_device *mdev = fh->fh.devnode->media_dev; + unsigned int poll_events = poll_requested_events(wait); + int ret = 0; + + if (poll_events & (POLLIN | POLLOUT)) + return POLLERR; + + if (poll_events & POLLPRI) { + unsigned long flags; + bool empty; + + spin_lock_irqsave(&mdev->req_lock, flags); + empty = list_empty(&fh->kevents.head); + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (empty) + poll_wait(filp, &fh->kevents.wait, wait); + else + ret |= POLLPRI; + } + + return ret; +} + #ifdef CONFIG_COMPAT struct media_links_enum32 { @@ -463,9 +938,9 @@ struct media_links_enum32 { compat_uptr_t links; /* struct media_link_desc * */ __u32 reserved[4]; }; - static long media_device_enum_links32(struct media_device *mdev, - struct media_links_enum32 __user *ulinks) + struct file *filp, + struct media_links_enum32 __user *ulinks) { struct media_links_enum links; compat_uptr_t pads_ptr, links_ptr; @@ -480,11 +955,10 @@ static long media_device_enum_links32(struct media_device *mdev, links.pads = compat_ptr(pads_ptr); links.links = compat_ptr(links_ptr); - return media_device_enum_links(mdev, &links); + return media_device_enum_links(mdev, filp, &links); } #define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32) - static long media_device_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -496,6 +970,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd, case MEDIA_IOC_ENUM_LINKS32: mutex_lock(&dev->graph_mutex); ret = media_device_enum_links32(dev, + filp, (struct media_links_enum32 __user *)arg); mutex_unlock(&dev->graph_mutex); break; @@ -512,6 +987,7 @@ static const struct media_file_operations media_device_fops = { .owner = THIS_MODULE, .open = media_device_open, .ioctl = media_device_ioctl, + .poll = media_device_poll, #ifdef CONFIG_COMPAT .compat_ioctl = media_device_compat_ioctl, #endif /* CONFIG_COMPAT */ @@ -705,6 +1181,10 @@ int __must_check __media_device_register(struct media_device *mdev, if (!devnode) return -ENOMEM; + ida_init(&mdev->req_ids); + spin_lock_init(&mdev->req_lock); + INIT_LIST_HEAD(&mdev->requests); + /* Register the device node. */ mdev->devnode = devnode; devnode->fops = &media_device_fops; @@ -727,6 +1207,7 @@ int __must_check __media_device_register(struct media_device *mdev, mdev->devnode = NULL; media_devnode_unregister_prepare(devnode); media_devnode_unregister(devnode); + ida_destroy(&mdev->req_ids); return ret; } @@ -811,6 +1292,7 @@ void media_device_unregister(struct media_device *mdev) device_remove_file(&mdev->devnode->dev, &dev_attr_model); media_devnode_unregister(mdev->devnode); + ida_destroy(&mdev->req_ids); /* devnode free is handled in media_devnode_*() */ mdev->devnode = NULL; } diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index 423248f577b6..33e59530965d 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -149,6 +149,7 @@ static long media_compat_ioctl(struct file *filp, unsigned int cmd, /* Override for the open function */ static int media_open(struct inode *inode, struct file *filp) { + struct media_devnode_fh *fh; struct media_devnode *devnode; int ret; @@ -170,17 +171,16 @@ static int media_open(struct inode *inode, struct file *filp) get_device(&devnode->dev); mutex_unlock(&media_devnode_lock); - filp->private_data = devnode; - - if (devnode->fops->open) { - ret = devnode->fops->open(filp); - if (ret) { - put_device(&devnode->dev); - filp->private_data = NULL; - return ret; - } + ret = devnode->fops->open(filp); + if (ret) { + put_device(&devnode->dev); + filp->private_data = NULL; + return ret; } + fh = filp->private_data; + fh->devnode = devnode; + return 0; } diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c index 2ace0410d277..5877b1b4bed4 100644 --- a/drivers/media/media-entity.c +++ b/drivers/media/media-entity.c @@ -245,18 +245,38 @@ EXPORT_SYMBOL_GPL(media_entity_pads_init); * Graph traversal */ -static struct media_entity * -media_entity_other(struct media_entity *entity, struct media_link *link) +/** + * media_entity_has_route - Check if two entity pads are connected internally + * @entity: The entity + * @pad0: The first pad index + * @pad1: The second pad index + * + * This function can be used to check whether two pads of an entity are + * connected internally in the entity. + * + * The caller must hold entity->source->parent->mutex. + * + * Return: true if the pads are connected internally and false otherwise. + */ +bool media_entity_has_route(struct media_entity *entity, unsigned int pad0, + unsigned int pad1) { - if (link->source->entity == entity) - return link->sink->entity; - else - return link->source->entity; + if (pad0 >= entity->num_pads || pad1 >= entity->num_pads) + return false; + + if (pad0 == pad1) + return true; + + if (!entity->ops || !entity->ops->has_route) + return true; + + return entity->ops->has_route(entity, pad0, pad1, NULL); } +EXPORT_SYMBOL_GPL(media_entity_has_route); /* push an entity to traversal stack */ static void stack_push(struct media_graph *graph, - struct media_entity *entity) + struct media_entity *entity, int pad, int stream) { if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) { WARN_ON(1); @@ -264,7 +284,9 @@ static void stack_push(struct media_graph *graph, } graph->top++; graph->stack[graph->top].link = entity->links.next; + graph->stack[graph->top].pad = pad; graph->stack[graph->top].entity = entity; + graph->stack[graph->top].stream = stream; } static struct media_entity *stack_pop(struct media_graph *graph) @@ -278,6 +300,8 @@ static struct media_entity *stack_pop(struct media_graph *graph) } #define link_top(en) ((en)->stack[(en)->top].link) +#define pad_top(en) ((en)->stack[(en)->top].pad) +#define stream_top(en) ((en)->stack[(en)->top].stream) #define stack_top(en) ((en)->stack[(en)->top].entity) /* @@ -314,16 +338,16 @@ void media_graph_walk_cleanup(struct media_graph *graph) EXPORT_SYMBOL_GPL(media_graph_walk_cleanup); void media_graph_walk_start(struct media_graph *graph, - struct media_entity *entity) + struct media_pad *pad) { media_entity_enum_zero(&graph->ent_enum); - media_entity_enum_set(&graph->ent_enum, entity); + media_entity_enum_set(&graph->ent_enum, pad->entity); graph->top = 0; graph->stack[graph->top].entity = NULL; - stack_push(graph, entity); - dev_dbg(entity->graph_obj.mdev->dev, - "begin graph walk at '%s'\n", entity->name); + stack_push(graph, pad->entity, pad->index, -1); + dev_dbg(pad->entity->graph_obj.mdev->dev, + "begin graph walk at '%s'\n", pad->entity->name); } EXPORT_SYMBOL_GPL(media_graph_walk_start); @@ -331,7 +355,11 @@ static void media_graph_walk_iter(struct media_graph *graph) { struct media_entity *entity = stack_top(graph); struct media_link *link; + unsigned int from_pad = pad_top(graph); struct media_entity *next; + struct media_pad *remote; + struct media_pad *local; + int stream = stream_top(graph); link = list_entry(link_top(graph), typeof(*link), list); @@ -345,8 +373,31 @@ static void media_graph_walk_iter(struct media_graph *graph) return; } - /* Get the entity in the other end of the link . */ - next = media_entity_other(entity, link); + /* + * Get the local pad, the remote pad and the entity at the other + * end of the link. + */ + if (link->source->entity == entity) { + remote = link->sink; + local = link->source; + } else { + remote = link->source; + local = link->sink; + } + + next = remote->entity; + + /* + * Are the local pad and the pad we came from connected + * internally in the entity ? + */ + if (entity->ops && entity->ops->has_route) { + if (!entity->ops->has_route(entity, from_pad, + local->index, &stream)) { + link_top(graph) = link_top(graph)->next; + return; + } + } /* Has the entity already been visited? */ if (media_entity_enum_test_and_set(&graph->ent_enum, next)) { @@ -359,7 +410,7 @@ static void media_graph_walk_iter(struct media_graph *graph) /* Push the new entity to stack and start over. */ link_top(graph) = link_top(graph)->next; - stack_push(graph, next); + stack_push(graph, next, remote->index, stream); dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n", next->name); } @@ -441,7 +492,7 @@ __must_check int __media_pipeline_start(struct media_entity *entity, goto error_graph_walk_start; } - media_graph_walk_start(&pipe->graph, entity); + media_graph_walk_start(graph, &entity->pads[0]); while ((entity = media_graph_walk_next(graph))) { DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS); @@ -449,17 +500,8 @@ __must_check int __media_pipeline_start(struct media_entity *entity, entity->stream_count++; - if (WARN_ON(entity->pipe && entity->pipe != pipe)) { - ret = -EBUSY; - goto error; - } - entity->pipe = pipe; - /* Already streaming --- no need to check. */ - if (entity->stream_count > 1) - continue; - if (!entity->ops || !entity->ops->link_validate) continue; @@ -522,7 +564,7 @@ __must_check int __media_pipeline_start(struct media_entity *entity, * Link validation on graph failed. We revert what we did and * return the error. */ - media_graph_walk_start(graph, entity_err); + media_graph_walk_start(graph, &entity_err->pads[0]); while ((entity_err = media_graph_walk_next(graph))) { /* Sanity check for negative stream_count */ @@ -573,7 +615,7 @@ void __media_pipeline_stop(struct media_entity *entity) if (WARN_ON(!pipe)) return; - media_graph_walk_start(graph, entity); + media_graph_walk_start(graph, &entity->pads[0]); while ((entity = media_graph_walk_next(graph))) { /* Sanity check for negative stream_count */ diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig index da28e68c87d8..b92e2bffdf79 100644 --- a/drivers/media/pci/Kconfig +++ b/drivers/media/pci/Kconfig @@ -17,6 +17,7 @@ source "drivers/media/pci/tw5864/Kconfig" source "drivers/media/pci/tw68/Kconfig" source "drivers/media/pci/tw686x/Kconfig" source "drivers/media/pci/zoran/Kconfig" +source "drivers/media/pci/intel/Kconfig" endif if MEDIA_ANALOG_TV_SUPPORT diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile index 1ab759e9b1bb..71fa8edfab92 100644 --- a/drivers/media/pci/Makefile +++ b/drivers/media/pci/Makefile @@ -3,7 +3,8 @@ # Makefile for the kernel multimedia device drivers. # -obj-y += ttpci/ \ +obj-y += intel/ \ + ttpci/ \ b2c2/ \ pluto2/ \ dm1105/ \ diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index a5f52137d306..d4bc78b4fcb5 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) bt878_num); if (bt878_num >= BT878_MAX) { printk(KERN_ERR "bt878: Too many devices inserted\n"); - result = -ENOMEM; - goto fail0; + return -ENOMEM; } if (pci_enable_device(dev)) return -EIO; diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 78a8836d03e4..6c0fd9438dd9 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -2286,6 +2286,10 @@ void cx23885_card_setup(struct cx23885_dev *dev) &dev->i2c_bus[2].i2c_adap, "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840) { + /* set host data for clk_freq configuration */ + v4l2_set_subdev_hostdata(dev->sd_cx25840, + &dev->clk_freq); + dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE; v4l2_subdev_call(dev->sd_cx25840, core, load_fw); } diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 8f63df1cb418..4612f26fcd6d 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -873,6 +873,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev) if (cx23885_boards[dev->board].clk_freq > 0) dev->clk_freq = cx23885_boards[dev->board].clk_freq; + if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE && + dev->pci->subsystem_device == 0x7137) { + /* Hauppauge ImpactVCBe device ID 0x7137 is populated + * with an 888, and a 25Mhz crystal, instead of the + * usual third overtone 50Mhz. The default clock rate must + * be overridden so the cx25840 is properly configured + */ + dev->clk_freq = 25000000; + } + dev->pci_bus = dev->pci->bus->number; dev->pci_slot = PCI_SLOT(dev->pci->devfn); cx23885_irq_add(dev, 0x001f00); diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index 04aa4a68a0ae..040c6c251d3a 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -867,6 +867,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev) dev->nr = ++cx25821_devcount; sprintf(dev->name, "cx25821[%d]", dev->nr); + if (dev->nr >= ARRAY_SIZE(card)) { + CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card)); + return -ENODEV; + } if (dev->pci->device != 0x8210) { pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n", __func__, dev->pci->device); @@ -882,9 +886,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev) dev->channels[i].sram_channels = &cx25821_sram_channels[i]; } - if (dev->nr > 1) - CX25821_INFO("dev->nr > 1!"); - /* board config */ dev->board = 1; /* card[dev->nr]; */ dev->_max_num_decoders = MAX_DECODERS; diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig new file mode 100644 index 000000000000..e32812129c4c --- /dev/null +++ b/drivers/media/pci/intel/Kconfig @@ -0,0 +1,90 @@ +config VIDEO_INTEL_IPU + tristate "Intel IPU driver" + depends on ACPI && X86_64 + select IOMMU_API + select IOMMU_IOVA + select X86_DEV_DMA_OPS if X86 + select VIDEOBUF2_DMA_CONTIG + select PHYS_ADDR_T_64BIT + select COMMON_CLK + ---help--- + Say Y here! + +choice + prompt "intel ipu generation type" + depends on VIDEO_INTEL_IPU + default VIDEO_INTEL_IPU4 + +config VIDEO_INTEL_IPU4 + bool "Compile for IPU4 driver" + ---help--- + Say Y here! + +config VIDEO_INTEL_IPU4P + bool "Compile for IPU4P driver" + ---help--- + Say Y here! + +endchoice + +choice + prompt "intel ipu hardware platform type" + depends on VIDEO_INTEL_IPU + default VIDEO_INTEL_IPU_SOC + +config VIDEO_INTEL_IPU_SOC + bool "Compile for SOC" + ---help--- + Select for SOC platform + +endchoice + +config VIDEO_INTEL_IPU_FW_LIB + bool "Compile firmware library" + ---help--- + If selected, the firmware hostlib css would be compiled + +config VIDEO_INTEL_IPU_WERROR + bool "Force GCC to throw an error instead of a warning when compiling" + depends on VIDEO_INTEL_IPU + depends on EXPERT + depends on !COMPILE_TEST + default n + help + Add -Werror to the build flags for (and only for) intel ipu module. + Do not enable this unless you are writing code for the ipu module. + + Recommended for driver developers only. + + If in doubt, say "N". +config VIDEO_INTEL_ICI + depends on VIDEO_INTEL_IPU + bool "Compile for ICI driver" + ---help--- + If selected ICI driver will be compiled + +config VIDEO_INTEL_UOS + bool "Compile driver per UOS" + ---help--- + If selected UOS driver components will be compiled + +config VIDEO_INTEL_IPU_ACRN + bool "Compile for virtio mediation" + +choice + prompt "Virtio driver type" + depends on VIDEO_INTEL_IPU_ACRN + default VIDEO_INTEL_IPU_VIRTIO_BE + +config VIDEO_INTEL_IPU_VIRTIO_BE + bool "Configure IPU4 as virtio backend" + depends on VBS + ---help--- + Configuring IPU4 driver as virtio backend + +config VIDEO_INTEL_IPU_VIRTIO_FE + bool "Configure IPU4 as virtio frontend" + ---help--- + Configuring IPU4 driver as virtio frontend + +endchoice diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile new file mode 100644 index 000000000000..0aeba667f11e --- /dev/null +++ b/drivers/media/pci/intel/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +# force check the compile warning to make sure zero warnings +# note we may have build issue when gcc upgraded. +subdir-ccflags-y := -Wall -Wextra +subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) +subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) +subdir-ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror + +obj-$(CONFIG_VIDEO_INTEL_ICI) += ici/ +obj-$(CONFIG_VIDEO_INTEL_IPU_ACRN) += virtio/ +ifndef CONFIG_VIDEO_INTEL_ICI +obj-$(CONFIG_VIDEO_INTEL_IPU4) += ipu4/ +endif +obj-$(CONFIG_VIDEO_INTEL_IPU4P) += ipu4/ diff --git a/drivers/media/pci/intel/ici/Makefile b/drivers/media/pci/intel/ici/Makefile new file mode 100644 index 000000000000..b39fbaabb3fc --- /dev/null +++ b/drivers/media/pci/intel/ici/Makefile @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +ifdef CONFIG_VIDEO_INTEL_ICI +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DIPU_VC_SUPPORT -DIPU_HAS_ISA -DIPU_PSYS_LEGACY -Wframe-larger-than=4096 + + +# work-around to re-use ipu4-css and libintel-ipu4_ici.c together when +# compiling ICI-ISYS +$(shell cp -r $(srcpath)/$(src)/../ipu4/ipu4-css/ $(srcpath)/$(src)/) +$(shell cp -f $(srcpath)/$(src)/libintel-ipu4_ici.c $(srcpath)/$(src)/ipu4-css/libintel-ipu4.c) +$(shell cp -f $(srcpath)/$(src)/../ipu4/ipu-platform-resources.h $(srcpath)/$(src)/) + +intel-ipu4-objs +=../ipu.o \ + ../ipu-bus.o \ + ici-dma.o \ + ../ipu-buttress.o \ + ../ipu-trace.o \ + ../ipu-cpd.o \ + ../ipu-fw-com.o \ + ../ipu4/ipu4.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4.o + +intel-ipu4-mmu-objs += ../ipu-mmu.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-mmu.o + +ici-isys-mod-objs += \ + ici-isys.o \ + ici-isys-csi2.o \ + ici-isys-tpg.o \ + ici-isys-csi2-be.o \ + ici-isys-stream.o \ + ici-isys-frame-buf.o \ + ici-isys-subdev.o \ + ici-isys-pipeline.o \ + ici-isys-pipeline-device.o \ + ici-isys-stream-device.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += ici-isys-mod.o + +intel-ipu4-psys-objs += ../ipu-psys.o \ + ../ipu4/ipu4-resources.o \ + ../ipu4/ipu4-psys.o \ + +ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +intel-ipu4-psys-objs += ipu4-fw-resources.o \ + ../ipu-fw-psys.o +endif + +ifeq ($(CONFIG_COMPAT),y) +intel-ipu4-psys-objs += ../ipu-psys-compat32.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-psys.o + +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +include $(srcpath)/$(src)/ipu4-css/Makefile.isyslib +include $(srcpath)/$(src)/ipu4-css/Makefile.psyslib +endif + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ +ccflags-y += -I$(srcpath)/$(src)/../ipu4/ +ccflags-y += -I$(srcpath)/$(src)/ +ccflags-y += -I$(srcpath)/$(src)/ipu4-css + +ccflags-y += -DPARAMETER_INTERFACE_V2 + +endif diff --git a/drivers/media/pci/intel/ici/ici-dma.c b/drivers/media/pci/intel/ici/ici-dma.c new file mode 100644 index 000000000000..da501a2e8744 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-dma.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu-dma.h" +#include "ipu-mmu.h" + +static struct page **__intel_ipu4_dma_alloc(struct device *dev, + size_t buf_size, + gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + int num_pages = buf_size >> PAGE_SHIFT; + int array_size = num_pages * sizeof(struct page *); + struct page **page_list; + int i = 0; + + if (array_size <= PAGE_SIZE) + page_list = kzalloc(array_size, GFP_KERNEL); + else + page_list = vzalloc(array_size); + if (!page_list) + return NULL; + + gfp |= __GFP_NOWARN; + + while (num_pages) { + int j, order = __fls(num_pages); + + page_list[i] = alloc_pages(gfp, order); + while (!page_list[i] && order) + page_list[i] = alloc_pages(gfp, --order); + if (!page_list[i]) + goto error; + + if (order) { + split_page(page_list[i], order); + j = 1 << order; + while (--j) + page_list[i + j] = page_list[i] + j; + } + + i += 1 << order; + num_pages -= 1 << order; + } + + return page_list; +error: + while (i--) + if (page_list[i]) + __free_pages(page_list[i], 0); + if (array_size <= PAGE_SIZE) + kfree(page_list); + else + vfree(page_list); + return NULL; +} + +static int __intel_ipu4_dma_free(struct device *dev, struct page **page_list, + size_t buf_size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + int num_pages = buf_size >> PAGE_SHIFT; + int array_size = num_pages * sizeof(struct page *); + int i; + + for (i = 0; i < num_pages; i++) { + if (page_list[i]) { + __free_pages(page_list[i], 0); + } + } + + if (array_size <= PAGE_SIZE) + kfree(page_list); + else + vfree(page_list); + return 0; +} + +static void intel_ipu4_dma_sync_single_for_cpu( + struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + + clflush_cache_range( + phys_to_virt(iommu_iova_to_phys( + mmu->dmap->domain, dma_handle)), size); +} + +static void intel_ipu4_dma_sync_sg_for_cpu( + struct device *dev, struct scatterlist *sglist, int nents, + enum dma_data_direction dir) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) { + clflush_cache_range( + phys_to_virt(iommu_iova_to_phys( + mmu->dmap->domain, + sg_dma_address(sg))), + sg->length); + } +} + +static void *intel_ipu4_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct page **pages; + struct iova *iova; + struct vm_struct *area; + int i; + int rval; + + size = PAGE_ALIGN(size); + + iova = alloc_iova(&mmu->dmap->iovad, size >> PAGE_SHIFT, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return NULL; + + pages = __intel_ipu4_dma_alloc(dev, size, gfp, attrs); + if (!pages) + goto out_free_iova; + + for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { + rval = iommu_map(mmu->dmap->domain, + (iova->pfn_lo + i) << PAGE_SHIFT, + page_to_phys(pages[i]), PAGE_SIZE, 0); + if (rval) + goto out_unmap; + } + + area = __get_vm_area(size, 0, VMALLOC_START, VMALLOC_END); + if (!area) + goto out_unmap; + + area->pages = pages; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + if (map_vm_area(area, PAGE_KERNEL, &pages)) +#else + if (map_vm_area(area, PAGE_KERNEL, pages)) +#endif + goto out_vunmap; + + *dma_handle = iova->pfn_lo << PAGE_SHIFT; + + mmu->tlb_invalidate(mmu); + + return area->addr; + +out_vunmap: + vunmap(area->addr); + +out_unmap: + __intel_ipu4_dma_free(dev, pages, size, attrs); + for (i--; i >= 0; i--) { + iommu_unmap(mmu->dmap->domain, (iova->pfn_lo + i) << PAGE_SHIFT, + PAGE_SIZE); + } +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); + + return NULL; +} + +static void intel_ipu4_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct vm_struct *area = find_vm_area(vaddr); + struct iova *iova = find_iova(&mmu->dmap->iovad, + dma_handle >> PAGE_SHIFT); + + if (WARN_ON(!area)) + return; + + if (WARN_ON(!area->pages)) + return; + + BUG_ON(!iova); + + size = PAGE_ALIGN(size); + + vunmap(vaddr); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + __intel_ipu4_dma_free(dev, area->pages, size, attrs); + + __free_iova(&mmu->dmap->iovad, iova); + + mmu->tlb_invalidate(mmu); +} + +static int intel_ipu4_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(addr); + size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t i; + + if (!area) + return -EFAULT; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + if (size > area->size) + return -EFAULT; + + for (i = 0; i < count; i++) + vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT), + area->pages[i]); + + return 0; +} + +static void intel_ipu4_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct iova *iova = find_iova(&mmu->dmap->iovad, + sg_dma_address(sglist) >> PAGE_SHIFT); + + if (!nents) + return; + + BUG_ON(!iova); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + intel_ipu4_dma_sync_sg_for_cpu(dev, sglist, nents, + DMA_BIDIRECTIONAL); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + mmu->tlb_invalidate(mmu); + + __free_iova(&mmu->dmap->iovad, iova); +} + +static int intel_ipu4_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct scatterlist *sg; + struct iova *iova; + size_t size = 0; + uint32_t iova_addr; + int i; + + for_each_sg(sglist, sg, nents, i) + size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + + dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size); + + iova = alloc_iova(&mmu->dmap->iovad, size, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return 0; + + dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, + iova->pfn_hi); + + iova_addr = iova->pfn_lo; + + for_each_sg(sglist, sg, nents, i) { + int rval; + + dev_dbg(dev, + "dmamap details: mapping entry %d: iova 0x%8.8x, \ + physical 0x%16.16llx\n", + i, iova_addr << PAGE_SHIFT, page_to_phys(sg_page(sg))); + rval = iommu_map(mmu->dmap->domain, iova_addr << PAGE_SHIFT, + page_to_phys(sg_page(sg)), + PAGE_ALIGN(sg->length), 0); + if (rval) + goto out_fail; + sg_dma_address(sg) = iova_addr << PAGE_SHIFT; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg_dma_len(sg) = sg->length; +#endif /* CONFIG_NEED_SG_DMA_LENGTH */ + + iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + intel_ipu4_dma_sync_sg_for_cpu(dev, sglist, nents, + DMA_BIDIRECTIONAL); + + mmu->tlb_invalidate(mmu); + + return nents; + +out_fail: + intel_ipu4_dma_unmap_sg(dev, sglist, i, dir, attrs); + + return 0; +} + +/* +* Create scatter-list for the already allocated DMA buffer +*/ +static int intel_ipu4_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + int n_pages; + int ret = 0; + + if (WARN_ON(!area->pages)) + return -ENOMEM; + + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + ret = sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size, + GFP_KERNEL); + if (ret) + dev_dbg(dev, "IPU get sgt table fail\n"); + + return ret; +} + +const struct dma_map_ops ipu_dma_ops = { + .alloc = intel_ipu4_dma_alloc, + .free = intel_ipu4_dma_free, + .mmap = intel_ipu4_dma_mmap, + .map_sg = intel_ipu4_dma_map_sg, + .unmap_sg = intel_ipu4_dma_unmap_sg, + .sync_single_for_cpu = intel_ipu4_dma_sync_single_for_cpu, + .sync_single_for_device = intel_ipu4_dma_sync_single_for_cpu, + .sync_sg_for_cpu = intel_ipu4_dma_sync_sg_for_cpu, + .sync_sg_for_device = intel_ipu4_dma_sync_sg_for_cpu, + .get_sgtable = intel_ipu4_dma_get_sgtable, +}; +EXPORT_SYMBOL_GPL(ipu_dma_ops); + diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2-be.c b/drivers/media/pci/intel/ici/ici-isys-csi2-be.c new file mode 100644 index 000000000000..6841263a8d16 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2-be.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#ifndef IPU4_DEBUG +#define IPU4_DEBUG 1 +#endif + +#include "./ici/ici-isys-csi2-be.h" +#include "isysapi/interface/ia_css_isysapi_fw_types.h" + +#define ici_asd_to_csi2_be(__asd) \ + container_of(__asd, struct ici_isys_csi2_be, asd) + +static const uint32_t ici_csi2_be_supported_codes_pad[] = { + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + 0, +}; + +static const uint32_t ici_csi2_be_soc_supported_codes_pad[] = { + ICI_FORMAT_RGB888, + ICI_FORMAT_RGB565, + ICI_FORMAT_UYVY, + ICI_FORMAT_YUYV, + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + 0, +}; + +static const uint32_t *ici_csi2_be_supported_codes[] = { + ici_csi2_be_supported_codes_pad, + ici_csi2_be_supported_codes_pad, +}; + +static const uint32_t *ici_csi2_be_soc_supported_codes[] = { + ici_csi2_be_soc_supported_codes_pad, + ici_csi2_be_soc_supported_codes_pad, +}; + +static int get_supported_code_index(uint32_t code) +{ + int i; + + for (i = 0; ici_csi2_be_supported_codes_pad[i]; i++) { + if (ici_csi2_be_supported_codes_pad[i] == code) + return i; + } + return -EINVAL; +} + +void ici_csi2_be_set_ffmt(struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *ffmt) +{ + struct ici_framefmt *cur_ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + int idx=0; + if (!cur_ffmt) + return; + + ffmt->colorspace = 0; + memset(ffmt->reserved, 0, sizeof(ffmt->reserved)); + switch (pad) { + case CSI2_BE_ICI_PAD_SINK: + DEBUGK("%s: sink pad %u\n", __func__, pad); + if (ffmt->field != ICI_FIELD_ALTERNATE) + ffmt->field = ICI_FIELD_NONE; + *cur_ffmt = *ffmt; + + ici_isys_subdev_fmt_propagate(asd, pad, NULL, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + ffmt); + break; + case CSI2_BE_ICI_PAD_SOURCE: { + struct ici_framefmt *sink_ffmt = + __ici_isys_subdev_get_ffmt(asd, + CSI2_BE_ICI_PAD_SINK); + + struct ici_rect *r = + &asd->crop[CSI2_BE_ICI_PAD_SOURCE]; + + u32 code = 0; + if (sink_ffmt) + code = sink_ffmt->pixelformat; + + idx = get_supported_code_index(code); + + DEBUGK("%s: source pad %u\n", __func__, pad); + + if (asd->valid_tgts[CSI2_BE_ICI_PAD_SOURCE].crop + && idx >= 0) { + int crop_info = 0; + + DEBUGK("%s: setting CROP, pad %u\n", __func__, + pad); + + if (r->top & 1) + crop_info |= CSI2_BE_ICI_CROP_VER; + if (r->left & 1) + crop_info |= CSI2_BE_ICI_CROP_HOR; + code = ici_csi2_be_supported_codes_pad[(( + idx & + CSI2_BE_ICI_CROP_MASK) + ^ + crop_info) + + + (idx & + ~CSI2_BE_ICI_CROP_MASK)]; + } + + DEBUGK("%s: setting to w:%u,h:%u,pf:%u,field:%u\n", + __func__, r->width, + r->height, code, sink_ffmt->field); + cur_ffmt->width = r->width; + cur_ffmt->height = r->height; + cur_ffmt->pixelformat = code; + cur_ffmt->field = sink_ffmt->field; + *ffmt = *cur_ffmt; + break; + } + default: + BUG_ON(1); + } +} + +static int ici_csi2_be_set_stream( + struct ici_isys_node *node, + void* ip, + int state) +{ + return 0; +} + +static int ici_csi2_be_pipeline_validate( + struct node_pipeline *inp, + struct ici_isys_node *node) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_csi2_be *csi2_be = + ici_asd_to_csi2_be(asd); + struct ici_isys_pipeline *ip = + ici_nodepipe_to_pipeline(inp); + + ip->csi2_be = csi2_be; + return 0; +} + +int ici_isys_csi2_be_init(struct ici_isys_csi2_be + *csi2_be, + struct ici_isys *isys, + unsigned int type) +{ + struct ici_pad_framefmt pff = { + .pad.pad_idx = CSI2_BE_ICI_PAD_SINK, + .ffmt = { + .width = 4096, + .height = 3072, + }, + }; + int rval; + char name[ICI_MAX_NODE_NAME]; + + dev_info(&isys->adev->dev, "ici_isys_csi2_be_init\n"); + + csi2_be->asd.isys = isys; + if (type == ICI_BE_RAW) { + csi2_be->as.buf_list.css_pin_type = + IA_CSS_ISYS_PIN_TYPE_RAW_NS; + snprintf(name, sizeof(name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE"); + } else if (type >= ICI_BE_SOC) { + csi2_be->as.buf_list.css_pin_type = + IA_CSS_ISYS_PIN_TYPE_RAW_SOC; + snprintf(name, sizeof(name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC %u", type-1); + } else { + return -EINVAL; + } + + rval = ici_isys_subdev_init(&csi2_be->asd, + name, + NR_OF_CSI2_BE_ICI_PADS, + 0); + if (rval) { + dev_err(&isys->adev->dev, "can't init subdevice\n"); + goto fail_subdev; + } + + csi2_be->asd.pads[CSI2_BE_ICI_PAD_SINK].flags = ICI_PAD_FLAGS_SINK + | ICI_PAD_FLAGS_MUST_CONNECT; + csi2_be->asd.pads[CSI2_BE_ICI_PAD_SOURCE].flags = + ICI_PAD_FLAGS_SOURCE; + + if (type == ICI_BE_RAW) + csi2_be->asd.valid_tgts[CSI2_BE_ICI_PAD_SOURCE].crop = true; + else + csi2_be->asd.valid_tgts[CSI2_BE_ICI_PAD_SOURCE].crop = false; + + csi2_be->asd.set_ffmt_internal = ici_csi2_be_set_ffmt; + + if (type == ICI_BE_RAW) { + csi2_be->asd.supported_codes = ici_csi2_be_supported_codes; + csi2_be->asd.be_mode = ICI_BE_RAW; + csi2_be->asd.isl_mode = ICI_ISL_CSI2_BE; + } else { + csi2_be->asd.supported_codes = ici_csi2_be_soc_supported_codes; + csi2_be->asd.be_mode = ICI_BE_SOC; + csi2_be->asd.isl_mode = ICI_ISL_OFF; + } + + csi2_be->asd.node.node_set_pad_ffmt(&csi2_be->asd.node, &pff); + /* ipu4_isys_csi2_be2_set_sel(&csi2_be->asd.sd, NULL, &sel); */ + /* csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops; */ + csi2_be->asd.node.node_set_streaming = + ici_csi2_be_set_stream; + csi2_be->asd.node.node_pipeline_validate = + ici_csi2_be_pipeline_validate; + + + csi2_be->as.isys = isys; + if (type == ICI_BE_RAW) + csi2_be->as.pfmts = ici_isys_pfmts; + else + csi2_be->as.pfmts = ici_isys_pfmts_be_soc; + + csi2_be->as.try_fmt_vid_mplane = + ici_isys_video_try_fmt_vid_mplane_default; + csi2_be->as.prepare_firmware_stream_cfg = + ici_isys_prepare_firmware_stream_cfg_default; + + rval = ici_isys_stream_init(&csi2_be->as, &csi2_be->asd, + &csi2_be->asd.node, CSI2_BE_ICI_PAD_SOURCE, + ICI_PAD_FLAGS_SINK); + if (rval) { + dev_err(&isys->adev->dev, "can't init stream node\n"); + goto fail_stream; + } + return 0; + +fail_stream: + ici_isys_subdev_cleanup(&csi2_be->asd); +fail_subdev: + return rval; +} +EXPORT_SYMBOL(ici_isys_csi2_be_init); + +void ici_isys_csi2_be_cleanup(struct ici_isys_csi2_be + *csi2_be) +{ + ici_isys_subdev_cleanup(&csi2_be->asd); + ici_isys_stream_cleanup(&csi2_be->as); +} +EXPORT_SYMBOL(ici_isys_csi2_be_cleanup); + +#endif /* ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2-be.h b/drivers/media/pci/intel/ici/ici-isys-csi2-be.h new file mode 100644 index 000000000000..428619d24520 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2-be.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_CSI2_BE_H +#define ICI_ISYS_CSI2_BE_H + +#include "ici-isys-subdev.h" +#include "ici-isys-stream.h" + +#define CSI2_BE_ICI_PAD_SINK 0 +#define CSI2_BE_ICI_PAD_SOURCE 1 +#define NR_OF_CSI2_BE_ICI_PADS 2 + +#define CSI2_BE_ICI_CROP_HOR (1 << 0) +#define CSI2_BE_ICI_CROP_VER (1 << 1) +#define CSI2_BE_ICI_CROP_MASK (CSI2_BE_ICI_CROP_VER | CSI2_BE_ICI_CROP_HOR) + +struct ici_isys_csi2_be_pdata; +/* + * struct ici_isys_csi2_be + */ +struct ici_isys_csi2_be { + struct ici_isys_csi2_be_pdata *pdata; + struct ici_isys_subdev asd; + struct ici_isys_stream as; +}; + +int ici_isys_csi2_be_init(struct ici_isys_csi2_be + *csi2_be, + struct ici_isys *isys, unsigned int type); +void ici_isys_csi2_be_cleanup(struct ici_isys_csi2_be + *csi2_be); + +#endif /* ICI_ISYS_CSI2_BE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2.c b/drivers/media/pci/intel/ici/ici-isys-csi2.c new file mode 100644 index 000000000000..65416dd6d83b --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2.c @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED +#include +#include "./ici/ici-isys-subdev.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-csi2.h" +#include "isysapi/interface/ia_css_isysapi_fw_types.h" +#include "ipu-platform-isys-csi2-reg.h" +//#include "intel-ipu-isys-csi2-common.h" + +#define CSI2_ACCINV 8 + +#define ici_asd_to_csi2(__asd, index) \ + container_of(__asd, struct ici_isys_csi2, asd[index]) + +static const uint32_t ici_csi2_supported_codes_pad_sink[] = { + ICI_FORMAT_RGB888, + ICI_FORMAT_RGB565, + ICI_FORMAT_UYVY, + ICI_FORMAT_YUYV, + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + ICI_FORMAT_SBGGR10_DPCM8, + ICI_FORMAT_SGBRG10_DPCM8, + ICI_FORMAT_SGRBG10_DPCM8, + ICI_FORMAT_SRGGB10_DPCM8, + 0, +}; + +static const uint32_t ici_csi2_supported_codes_pad_source[] = { + ICI_FORMAT_RGB888, + ICI_FORMAT_RGB565, + ICI_FORMAT_UYVY, + ICI_FORMAT_YUYV, + ICI_FORMAT_SBGGR12, + ICI_FORMAT_SGBRG12, + ICI_FORMAT_SGRBG12, + ICI_FORMAT_SRGGB12, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + 0, +}; + +static const uint32_t *ici_csi2_supported_codes[] = { + ici_csi2_supported_codes_pad_sink, + ici_csi2_supported_codes_pad_source, +}; + +void ici_csi2_set_ffmt(struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *ffmt) +{ + struct ici_framefmt *cur_ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + if (ffmt->field != ICI_FIELD_ALTERNATE) + ffmt->field = ICI_FIELD_NONE; + ffmt->colorspace = 0; + memset(ffmt->reserved, 0, sizeof(ffmt->reserved)); + + switch (pad) { + case CSI2_ICI_PAD_SINK: + if (cur_ffmt) + *cur_ffmt = *ffmt; + ici_isys_subdev_fmt_propagate(asd, pad, NULL, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + ffmt); + break; + case CSI2_ICI_PAD_SOURCE:{ + struct ici_framefmt *sink_ffmt = + __ici_isys_subdev_get_ffmt(asd, + CSI2_ICI_PAD_SINK); + if (sink_ffmt) { + *cur_ffmt = *sink_ffmt; + cur_ffmt->pixelformat = + ici_isys_subdev_code_to_uncompressed + (sink_ffmt->pixelformat); + *ffmt = *cur_ffmt; + } + break; + } + default: + BUG_ON(1); + } +} + +static void ici_isys_csi2_error(struct ici_isys_csi2 + *csi2) +{ + /* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ + static const struct ici_isys_csi2_error { + const char *error_string; + bool is_info_only; + } errors[] = { + { + "Single packet header error corrected", true}, { + "Multiple packet header errors detected", true}, { + "Payload checksum (CRC) error", true}, { + "FIFO overflow", false}, { + "Reserved short packet data type detected", true}, { + "Reserved long packet data type detected", true}, { + "Incomplete long packet detected", false}, { + "Frame sync error", false}, { + "Line sync error", false}, { + "DPHY recoverable synchronization error", true}, { + "DPHY non-recoverable synchronization error", false}, { + "Escape mode error", true}, { + "Escape mode trigger event", true}, { + "Escape mode ultra-low power state for data lane(s)", true}, + { + "Escape mode ultra-low power state exit for clock lane", + true}, { + "Inter-frame short packet discarded", true}, { + "Inter-frame long packet discarded", true},}; + u32 status = csi2->receiver_errors; + unsigned int i; + + csi2->receiver_errors = 0; + + for (i = 0; i < ARRAY_SIZE(errors); i++) { + if (status & BIT(i)) { + if (errors[i].is_info_only) + dev_dbg(&csi2->isys->adev->dev, + "csi2-%i info: %s\n", + csi2->index, errors[i].error_string); + else + dev_err_ratelimited(&csi2->isys->adev->dev, + "csi2-%i error: %s\n", + csi2->index, + errors[i].error_string); + } + } +} + +#define DIV_SHIFT 8 + +static uint32_t calc_timing(int32_t a, int32_t b, int64_t link_freq, + int32_t accinv) +{ + return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) + / (int32_t) (link_freq >> DIV_SHIFT)); +} + +int ici_isys_csi2_calc_timing(struct ici_isys_csi2 + *csi2, struct + ici_isys_csi2_timing + *timing, uint32_t accinv) +{ + int64_t link_frequency = 0; + + int idx, rval; + + struct ici_ext_subdev *sd = + (struct ici_ext_subdev*)csi2->ext_sd; + + struct ici_ext_sd_param param = { + .sd = sd, + .id = ICI_EXT_SD_PARAM_ID_LINK_FREQ, + .type = ICI_EXT_SD_PARAM_TYPE_INT32, + }; + + if (!sd || !sd->get_param) { + dev_err(&csi2->isys->adev->dev, + "External device not available\n"); + return -ENODEV; + } + rval = sd->get_param(¶m); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get link frequency\n"); + return rval; + } + + idx = param.val; + param.type = ICI_EXT_SD_PARAM_TYPE_INT64; + + rval = sd->get_menu_item(¶m, idx); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get menu item\n"); + return rval; + } + + link_frequency = param.val; + dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__, + link_frequency); + + if (!link_frequency) + return -EINVAL; + + timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, + link_frequency, accinv); + timing->csettle = + calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, link_frequency, + accinv); + dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen); + dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle); + + timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, + link_frequency, accinv); + timing->dsettle = + calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, link_frequency, + accinv); + dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen); + dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle); + + return 0; +} + +static void ici_isys_register_errors(struct + ici_isys_csi2 + *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSIRX_IRQ_STATUS); + + dev_dbg(&csi2->isys->adev->dev, + "ici_isys_register_errors\n"); + writel(status, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + csi2->receiver_errors |= status; +} + +static void ici_isys_csi2_sof_event(struct ici_isys_csi2 + *csi2, unsigned int vc) +{ + unsigned long flags; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame = true; + spin_unlock_irqrestore(&csi2->isys->lock, flags); +} + +static void ici_isys_csi2_eof_event(struct ici_isys_csi2 + *csi2, unsigned int vc) +{ + unsigned long flags; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame = false; + if (csi2->wait_for_sync) + complete(&csi2->eof_completion); + spin_unlock_irqrestore(&csi2->isys->lock, flags); +} + +void ici_isys_csi2_isr(struct ici_isys_csi2 *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSI2PART_IRQ_STATUS); + unsigned int i; + + writel(status, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + + if (status & CSI2_CSI2PART_IRQ_CSIRX) + ici_isys_register_errors(csi2); + + for (i = 0; i < NR_OF_CSI2_ICI_VC; i++) { + if ((status & CSI2_IRQ_FS_VC(i))) + ici_isys_csi2_sof_event(csi2, i); + + if ((status & CSI2_IRQ_FE_VC(i))) + ici_isys_csi2_eof_event(csi2, i); + } + +} +EXPORT_SYMBOL(ici_isys_csi2_isr); + +void ici_isys_csi2_wait_last_eof(struct ici_isys_csi2 + *csi2) +{ + unsigned long flags; + int tout; + + spin_lock_irqsave(&csi2->isys->lock, flags); + if (!csi2->in_frame) { + spin_unlock_irqrestore(&csi2->isys->lock, flags); + return; + } + + reinit_completion(&csi2->eof_completion); + csi2->wait_for_sync = true; + spin_unlock_irqrestore(&csi2->isys->lock, flags); + tout = wait_for_completion_timeout(&csi2->eof_completion, + ICI_EOF_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(&csi2->isys->adev->dev, + "csi2-%d: timeout at sync to eof\n", csi2->index); + } + csi2->wait_for_sync = false; +} + +static void csi2_capture_done(struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info) +{ + ici_isys_frame_buf_capture_done(ip, info); + if (ip->csi2) + ici_isys_csi2_error(ip->csi2); +} + +int ici_csi2_set_stream( + struct ici_isys_node *node, + void* ip, + int state) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_csi2 *csi2 = + ici_asd_to_csi2(asd, asd->index); + struct ici_isys_csi2_timing timing = { 0 }; + unsigned int i, nlanes; + int rval; + u32 csi2csirx = 0, csi2part = 0; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", state); + + if (!state) { + ici_isys_csi2_error(csi2); + writel(0, csi2->base + CSI2_REG_CSI_RX_CONFIG); + writel(0, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* Disable interrupts */ + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_ENABLE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + return 0; + } + + ici_isys_stream_add_capture_done(ip, csi2_capture_done); + + nlanes = csi2->nlanes; + + rval = ici_isys_csi2_calc_timing(csi2, + &timing, + CSI2_ACCINV); + if (rval) + return rval; + + writel(timing.ctermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE); + writel(timing.csettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE); + + for (i = 0; i < nlanes; i++) { + writel(timing.dtermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(i)); + writel(timing.dsettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(i)); + } + writel(CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11, + csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(nlanes, csi2->base + CSI2_REG_CSI_RX_NOF_ENABLED_LANES); + + writel(CSI2_CSI_RX_ENABLE_ENABLE, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* SOF enabled from CSI2PART register in B0 */ + for (i = 0; i < NR_OF_CSI2_ICI_VC; i++) + csi2part |= CSI2_IRQ_FS_VC(i) | CSI2_IRQ_FE_VC(i); + + /* Enable csi2 receiver error interrupts */ + csi2csirx = BIT(CSI2_CSIRX_NUM_ERRORS) - 1; + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_MASK); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_ENABLE); + + /* Enable csi2 error and SOF-related irqs */ + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + + return 0; +} + +unsigned int ici_isys_csi2_get_current_field( + struct device* dev, + struct ici_isys_mipi_packet_header *ph) +{ + unsigned int field; + + /* Check if the first SOF packet is received. */ + if ((ph->dtype & ICI_ISYS_SHORT_PACKET_DTYPE_MASK) != 0) + dev_warn(dev, + "First short packet is not SOF.\n"); + field = (ph->word_count % 2) ? ICI_FIELD_TOP : + ICI_FIELD_BOTTOM; + dev_dbg(dev, + "Interlaced field ready. frame_num = %d field = %d\n", + ph->word_count, field); + + return field; +} + +static int ici_csi2_pipeline_validate( + struct node_pipeline *inp, + struct ici_isys_node *node) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_csi2 *csi2 = + ici_asd_to_csi2(asd, asd->index); + struct ici_isys_pipeline *ip = + ici_nodepipe_to_pipeline(inp); + + if (ip->csi2) { + dev_err(&csi2->isys->adev->dev, + "Pipeline does not support > 1 CSI2 node\n"); + return -EINVAL; + } + node->pipe = inp; + ip->csi2 = csi2; + ip->asd_source = asd; + ip->vc = asd - csi2->asd; // index of asd element in csi2->asd array + ip->asd_source_pad_id = CSI2_ICI_PAD_SINK; + return 0; +} + +int ici_isys_csi2_init(struct ici_isys_csi2 *csi2, + struct ici_isys *isys, + void __iomem *base, unsigned int index) +{ + struct ici_pad_framefmt fmt = { + .pad.pad_idx = CSI2_ICI_PAD_SINK, + .ffmt = { + .width = 4096, + .height = 3072, + }, + }; + + int rval; + char name[ICI_MAX_NODE_NAME]; + unsigned int i; + + csi2->isys = isys; + csi2->base = base; + csi2->index = index; + + for(i=0; iasd[i].isys = isys; + rval = ici_isys_subdev_init(&csi2->asd[i], + name, + NR_OF_CSI2_ICI_PADS, + i); + if (rval) + goto fail; + + csi2->asd[i].pads[CSI2_ICI_PAD_SINK].flags = ICI_PAD_FLAGS_SINK; + csi2->asd[i].pads[CSI2_ICI_PAD_SOURCE].flags = ICI_PAD_FLAGS_SOURCE; + + csi2->asd[i].source = IA_CSS_ISYS_STREAM_SRC_CSI2_PORT0 + index; + csi2->asd[i].supported_codes = ici_csi2_supported_codes; + csi2->asd[i].set_ffmt_internal = ici_csi2_set_ffmt; + csi2->asd[i].node.node_set_streaming = + ici_csi2_set_stream; + csi2->asd[i].node.node_pipeline_validate = + ici_csi2_pipeline_validate; + + csi2->asd[i].node.node_set_pad_ffmt(&csi2->asd[i].node, &fmt); + + snprintf(csi2->as[i].node.name, sizeof(csi2->as[i].node.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u VC %u capture", index, i); + csi2->as[i].isys = isys; + csi2->as[i].try_fmt_vid_mplane = + ici_isys_video_try_fmt_vid_mplane_default; + csi2->as[i].prepare_firmware_stream_cfg = + ici_isys_prepare_firmware_stream_cfg_default; + csi2->as[i].packed = true; + csi2->as[i].buf_list.css_pin_type = IA_CSS_ISYS_PIN_TYPE_MIPI; + csi2->as[i].pfmts = ici_isys_pfmts_packed; + csi2->as[i].line_header_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + csi2->as[i].line_footer_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + init_completion(&csi2->eof_completion); + + rval = ici_isys_stream_init(&csi2->as[i], &csi2->asd[i], + &csi2->asd[i].node, CSI2_ICI_PAD_SOURCE, + ICI_PAD_FLAGS_SINK); + if (rval) { + dev_err(&isys->adev->dev, "can't init stream node\n"); + goto fail; + } + } + init_completion(&csi2->eof_completion); + + return 0; + +fail: + ici_isys_csi2_cleanup(csi2); + + return rval; +} +EXPORT_SYMBOL(ici_isys_csi2_init); + +void ici_isys_csi2_cleanup(struct ici_isys_csi2 *csi2) +{ + ici_isys_subdev_cleanup(&csi2->asd[0]); + ici_isys_stream_cleanup(&csi2->as[0]); +} +EXPORT_SYMBOL(ici_isys_csi2_cleanup); + +#endif /* ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-csi2.h b/drivers/media/pci/intel/ici/ici-isys-csi2.h new file mode 100644 index 000000000000..504e413af8ea --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-csi2.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_CSI2_H +#define ICI_ISYS_CSI2_H + +#include "ici-isys-frame-buf.h" +#include "ici-isys-subdev.h" +#include "ici-isys-stream.h" + +struct ici_isys_csi2_pdata; + +#define CSI2_ICI_PAD_SINK 0 +#define CSI2_ICI_PAD_SOURCE 1 +#define NR_OF_CSI2_ICI_PADS 2 +#define NR_OF_CSI2_ICI_VC 4 + +#define ICI_ISYS_SHORT_PACKET_BUFFER_NUM 32 +#define ICI_ISYS_SHORT_PACKET_WIDTH 32 +#define ICI_ISYS_SHORT_PACKET_FRAME_PACKETS 2 +#define ICI_ISYS_SHORT_PACKET_EXTRA_PACKETS 64 +#define ICI_ISYS_SHORT_PACKET_UNITSIZE 8 +#define ICI_ISYS_SHORT_PACKET_GENERAL_DT 0 +#define ICI_ISYS_SHORT_PACKET_PT 0 +#define ICI_ISYS_SHORT_PACKET_FT 0 +#define ICI_ISYS_SHORT_PACKET_DTYPE_MASK 0x3f +#define ICI_ISYS_SHORT_PACKET_STRIDE \ + (ICI_ISYS_SHORT_PACKET_WIDTH * \ + ICI_ISYS_SHORT_PACKET_UNITSIZE) +#define ICI_ISYS_SHORT_PACKET_NUM(num_lines) \ + ((num_lines) * 2 + ICI_ISYS_SHORT_PACKET_FRAME_PACKETS + \ + ICI_ISYS_SHORT_PACKET_EXTRA_PACKETS) +#define ICI_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \ + DIV_ROUND_UP(ICI_ISYS_SHORT_PACKET_NUM(num_lines) * \ + ICI_ISYS_SHORT_PACKET_UNITSIZE, \ + ICI_ISYS_SHORT_PACKET_STRIDE) +#define ICI_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \ + (ICI_ISYS_SHORT_PACKET_WIDTH * \ + ICI_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \ + ICI_ISYS_SHORT_PACKET_UNITSIZE) +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER 256 +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE 16 +#define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \ + (IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \ + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE) +#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 +#define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER 0 +#define IPU_ISYS_SHORT_PACKET_FROM_TUNIT 1 + +#define ICI_EOF_TIMEOUT 1000 +#define ICI_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(ICI_EOF_TIMEOUT) + +#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 +#define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK 0x2082 +#define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2) + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 + +/* + * struct ici_isys_csi2 + * + */ +struct ici_isys_csi2 { + struct ici_isys_csi2_pdata *pdata; + struct ici_isys *isys; + struct ici_isys_subdev asd[NR_OF_CSI2_ICI_VC]; + struct ici_isys_stream as[NR_OF_CSI2_ICI_VC]; + void *ext_sd; + + void __iomem *base; + u32 receiver_errors; + unsigned int nlanes; + unsigned int index; + atomic_t sof_sequence; + + bool wait_for_sync; + bool in_frame; + struct completion eof_completion; +}; + +struct ici_isys_csi2_timing { + uint32_t ctermen; + uint32_t csettle; + uint32_t dtermen; + uint32_t dsettle; +}; + +/* + * This structure defines the MIPI packet header output + * from IPU4 MIPI receiver. Due to hardware conversion, + * this structure is not the same as defined in CSI-2 spec. + */ +__packed struct ici_isys_mipi_packet_header { + uint32_t word_count : 16, + dtype : 13, + sync : 2, + stype : 1; + uint32_t sid : 4, + port_id : 4, + reserved : 23, + odd_even : 1; +}; + +/* + * This structure defines the trace message content + * for CSI2 receiver monitor messages. + */ +__packed struct ici_isys_csi2_monitor_message { + uint64_t fe : 1, + fs : 1, + pe : 1, + ps : 1, + le : 1, + ls : 1, + reserved1 : 2, + sequence : 2, + reserved2 : 2, + flash_shutter : 4, + error_cause : 12, + fifo_overrun : 1, + crc_error : 2, + reserved3 : 1, + timestamp_l : 16, + port : 4, + vc : 2, + reserved4 : 2, + frame_sync : 4, + reserved5 : 4; + uint64_t reserved6 : 3, + cmd : 2, + reserved7 : 1, + monitor_id : 7, + reserved8 : 1, + timestamp_h : 50; +}; + +int ici_isys_csi2_init(struct ici_isys_csi2 *csi2, + struct ici_isys *isys, + void __iomem *base, unsigned int index); +void ici_isys_csi2_cleanup(struct ici_isys_csi2 *csi2); +void ici_isys_csi2_wait_last_eof(struct ici_isys_csi2 *csi2); +void ici_isys_csi2_isr(struct ici_isys_csi2 *csi2); +unsigned int ici_isys_csi2_get_current_field( + struct device* dev, struct ici_isys_mipi_packet_header *ph); + +#endif /* ICI_ISYS_CSI2_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-frame-buf.c b/drivers/media/pci/intel/ici/ici-isys-frame-buf.c new file mode 100644 index 000000000000..8e62c273b35a --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-frame-buf.c @@ -0,0 +1,950 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#include "isysapi/interface/ia_css_isysapi_types.h" +#include "isysapi/interface/ia_css_isysapi.h" +#include "./ici/ici-isys-frame-buf.h" + +#define get_frame_entry_to_buf_wrap(get_entry) \ + container_of(get_entry, struct ici_frame_buf_wrapper,\ + get_frame_entry) + +#define put_frame_entry_to_buf_wrap(put_entry) \ + container_of(put_entry, struct ici_frame_buf_wrapper,\ + put_frame_entry) + +static struct ici_frame_buf_wrapper +*ici_frame_buf_lookup(struct ici_isys_frame_buf_list + *buf_list, + struct ici_frame_info + *user_frame_info) +{ + struct ici_frame_buf_wrapper *buf; + int i; + int mem_type = user_frame_info->mem_type; + + list_for_each_entry(buf, &buf_list->getbuf_list, node) { + for (i = 0; i < user_frame_info->num_planes; i++) { + struct ici_frame_plane *new_plane = + &user_frame_info->frame_planes[i]; + struct ici_frame_plane *cur_plane = + &buf->frame_info.frame_planes[i]; + if (buf->state != ICI_BUF_PREPARED && + buf->state != ICI_BUF_DONE){ + continue; + } + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (new_plane->mem.userptr == + cur_plane->mem.userptr) + return buf; + break; + case ICI_MEM_DMABUF: + if (new_plane->mem.dmafd == + cur_plane->mem.dmafd) + return buf; + break; + } + //TODO: add multiplaner checks + } + + } + return NULL; +} + +static void ici_put_userpages(struct device *dev, + struct ici_kframe_plane + *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; + struct scatterlist *sgl; + unsigned int i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + struct mm_struct* mm = current->active_mm; + if (!mm){ + dev_err(dev, "Failed to get active mm_struct ptr from current process.\n"); + return; + } + + down_read(&mm->mmap_sem); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_unmap_sg_attrs(kframe_plane->dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + dma_unmap_sg_attrs(kframe_plane->dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + for_each_sg(sgt->sgl, sgl, sgt->orig_nents, i) { + struct page *page = sg_page(sgl); + + unsigned int npages = PAGE_ALIGN(sgl->offset + sgl->length) + >> PAGE_SHIFT; + unsigned int page_no; + + for (page_no = 0; page_no < npages; ++page_no, ++page) { + set_page_dirty_lock(page); + put_page(page); + } + } + + kfree(sgt); + kframe_plane->sgt = NULL; + + up_read(&mm->mmap_sem); +} + +static void ici_put_dma(struct device *dev, + struct ici_kframe_plane + *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; + + if (WARN_ON(!kframe_plane->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (kframe_plane->kaddr) { + dma_buf_vunmap(kframe_plane->db_attach->dmabuf, + kframe_plane->kaddr); + kframe_plane->kaddr = NULL; + } + dma_buf_unmap_attachment(kframe_plane->db_attach, sgt, + DMA_BIDIRECTIONAL); + + kframe_plane->dma_addr = 0; + kframe_plane->sgt = NULL; + +} + +static int ici_map_dma(struct device *dev, + struct ici_frame_plane + *frame_plane, + struct ici_kframe_plane + *kframe_plane) +{ + + int ret = 0; + int fd = frame_plane->mem.dmafd; + + kframe_plane->dbdbuf = dma_buf_get(fd); + if (!kframe_plane->dbdbuf) { + ret = -EINVAL; + goto error; + } + + if (frame_plane->length == 0) + kframe_plane->length = kframe_plane->dbdbuf->size; + else + kframe_plane->length = frame_plane->length; + + kframe_plane->fd = fd; + kframe_plane->db_attach = dma_buf_attach(kframe_plane->dbdbuf, dev); + + if (IS_ERR(kframe_plane->db_attach)) { + ret = PTR_ERR(kframe_plane->db_attach); + goto error_put; + } + + kframe_plane->sgt = dma_buf_map_attachment(kframe_plane->db_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kframe_plane->sgt)) { + ret = -EINVAL; + kframe_plane->sgt = NULL; + dev_dbg(dev, "map attachment failed\n"); + goto error_detach; + } + + kframe_plane->dma_addr = sg_dma_address(kframe_plane->sgt->sgl); + kframe_plane->kaddr = dma_buf_vmap(kframe_plane->dbdbuf); + + if (!kframe_plane->kaddr) { + ret = -EINVAL; + goto error_detach; + } + + dev_dbg(dev, "MAPBUF: mapped fd %d\n", fd); + + return 0; + +error_detach: + dma_buf_detach(kframe_plane->dbdbuf, kframe_plane->db_attach); +error_put: + dma_buf_put(kframe_plane->dbdbuf); +error: + return ret; +} + +static int ici_get_userpages(struct device *dev, + struct ici_frame_plane + *frame_plane, + struct ici_kframe_plane + *kframe_plane) +{ + unsigned long start, end, addr; + int npages, array_size; + struct page **pages; + int nr = 0; + int ret = 0; + struct sg_table *sgt; + unsigned int i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + addr = (unsigned long)frame_plane->mem.userptr; + start = addr & PAGE_MASK; + end = PAGE_ALIGN(addr + frame_plane->length); + npages = (end - start) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + if (!npages) + return -EINVAL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + + if (!pages) { + kfree(sgt); + return -ENOMEM; + } + + down_read(¤t->mm->mmap_sem); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + nr = get_user_pages( + current, current->mm, + start, npages, 1, 0, pages, NULL); +#else + nr = get_user_pages(start, npages, FOLL_WRITE, pages, NULL); +#endif + if (nr < npages) + goto error_free_pages; + + ret = sg_alloc_table_from_pages(sgt, pages, npages, + addr & ~PAGE_MASK, frame_plane->length, + GFP_KERNEL); + if (ret) { + dev_err(dev, "Failed to init sgt\n"); + goto error_free_pages; + } + + + kframe_plane->dev = dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + if (sgt->nents <= 0) { + dev_err(dev, "Failed to init dma_map\n"); + ret = -EIO; + goto error_dma_map; + } + kframe_plane->dma_addr = sg_dma_address(sgt->sgl); + kframe_plane->sgt = sgt; + +error_free_page_list: + if (pages) { + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + } + up_read(¤t->mm->mmap_sem); + return ret; + +error_dma_map: +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + +error_free_pages: + if (pages) { + for (i = 0; i < nr; i++) + put_page(pages[i]); + } + kfree(sgt); + goto error_free_page_list; +} + +static int ici_get_userpages_virt(struct device *dev, + struct ici_frame_plane + *frame_plane, + struct ici_kframe_plane + *kframe_plane, + struct page **pages) +{ + unsigned long addr; + int npages; + int ret = 0; + struct sg_table *sgt; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + + addr = (unsigned long)frame_plane->mem.userptr; + npages = kframe_plane->npages; + + if (!npages) + return -EINVAL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(sgt, pages, npages, + addr & ~PAGE_MASK, frame_plane->length, + GFP_KERNEL); + if (ret) { + dev_err(dev, "Failed to init sgt\n"); + goto error_free_pages; + } + + + kframe_plane->dev = dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + sgt->nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + + if (sgt->nents <= 0) { + dev_err(dev, "Failed to init dma_map\n"); + ret = -EIO; + goto error_dma_map; + } + kframe_plane->dma_addr = sg_dma_address(sgt->sgl); + kframe_plane->sgt = sgt; + +error_free_page_list: + return ret; + +error_dma_map: +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, &attrs); +#else + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, + DMA_FROM_DEVICE, attrs); +#endif + +error_free_pages: + kfree(sgt); + goto error_free_page_list; +} + +int ici_isys_get_buf(struct ici_isys_stream *as, + struct ici_frame_info *frame_info) +{ + int res; + unsigned i; + struct ici_frame_buf_wrapper *buf; + + struct ici_kframe_plane *kframe_plane; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + int mem_type = frame_info->mem_type; + + if (mem_type != ICI_MEM_USERPTR && + mem_type != ICI_MEM_DMABUF) { + dev_err(&as->isys->adev->dev, "Memory type not supproted\n"); + return -EINVAL; + } + + if (!frame_info->frame_planes[0].length) { + dev_err(&as->isys->adev->dev, "User length not set\n"); + return -EINVAL; + } + buf = ici_frame_buf_lookup(buf_list, frame_info); + + if (buf) { + buf->state = ICI_BUF_PREPARED; + return 0; + } + + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf->buf_id = frame_info->frame_buf_id; + buf->buf_list = buf_list; + memcpy(&buf->frame_info, frame_info, sizeof(buf->frame_info)); + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (!frame_info->frame_planes[0].mem.userptr) { + dev_err(&as->isys->adev->dev, + "User pointer not define\n"); + res = -EINVAL; + goto err_exit; + } + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = + ICI_MEM_USERPTR; + res = + ici_get_userpages( + &as->isys->adev->dev, + &frame_info-> + frame_planes[i], + kframe_plane); + if (res) + goto err_exit; + } + break; + case ICI_MEM_DMABUF: + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = + ICI_MEM_DMABUF; + res = ici_map_dma( + &as->isys->adev->dev, + &frame_info-> + frame_planes[i], + kframe_plane); + if (res) + goto err_exit; + } + break; + } + + mutex_lock(&buf_list->mutex); + buf->state = ICI_BUF_PREPARED; + list_add_tail(&buf->node, &buf_list->getbuf_list); + mutex_unlock(&buf_list->mutex); + return 0; + +err_exit: + kfree(buf); + return res; +} + +int ici_isys_get_buf_virt(struct ici_isys_stream *as, + struct ici_frame_buf_wrapper *frame_buf, + struct page **pages) +{ + int res; + unsigned i; + struct ici_frame_buf_wrapper *buf; + + struct ici_kframe_plane *kframe_plane; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + int mem_type = frame_buf->frame_info.mem_type; + + if (mem_type != ICI_MEM_USERPTR && + mem_type != ICI_MEM_DMABUF) { + dev_err(&as->isys->adev->dev, "Memory type not supproted\n"); + return -EINVAL; + } + + if (!frame_buf->frame_info.frame_planes[0].length) { + dev_err(&as->isys->adev->dev, "User length not set\n"); + return -EINVAL; + } + buf = ici_frame_buf_lookup(buf_list, &frame_buf->frame_info); + + if (buf) { + buf->state = ICI_BUF_PREPARED; + return 0; + } + + + buf = frame_buf; + + buf->buf_list = buf_list; + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (!frame_buf->frame_info.frame_planes[0].mem.userptr) { + dev_err(&as->isys->adev->dev, + "User pointer not define\n"); + return -EINVAL; + } + for (i = 0; i < frame_buf->frame_info.num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = + ICI_MEM_USERPTR; + res = + ici_get_userpages_virt( + &as->isys->adev->dev, + &frame_buf->frame_info.frame_planes[i], + kframe_plane, + pages); + if (res) + return res; + } + break; + case ICI_MEM_DMABUF: + break; + } + + mutex_lock(&buf_list->mutex); + buf->state = ICI_BUF_PREPARED; + list_add_tail(&buf->node, &buf_list->getbuf_list); + mutex_unlock(&buf_list->mutex); + return 0; +} + +int ici_isys_put_buf(struct ici_isys_stream *as, + struct ici_frame_info *frame_info, + unsigned int f_flags) +{ + struct ici_frame_buf_wrapper *buf; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + unsigned long flags = 0; + int rval; + + spin_lock_irqsave(&buf_list->lock, flags); + if (list_empty(&buf_list->putbuf_list)) { + /* Wait */ + if (!(f_flags & O_NONBLOCK)) { + spin_unlock_irqrestore(&buf_list->lock, flags); + rval = wait_event_interruptible(buf_list->wait, + !list_empty(&buf_list-> + putbuf_list)); + spin_lock_irqsave(&buf_list->lock, flags); + if (rval == -ERESTARTSYS) + return rval; + } + } + + if (list_empty(&buf_list->putbuf_list)) { + spin_unlock_irqrestore(&buf_list->lock, flags); + return -ENODATA; + } + + buf = list_entry(buf_list->putbuf_list.next, + struct ici_frame_buf_wrapper, node); + list_del(&buf->node); + spin_unlock_irqrestore(&buf_list->lock, flags); + + mutex_lock(&buf_list->mutex); + buf->state = ICI_BUF_DONE; + list_add_tail(&buf->node, + &buf_list->getbuf_list); + mutex_unlock(&buf_list->mutex); + + memcpy(frame_info, &buf->frame_info, sizeof(buf->frame_info)); + return 0; +} + +static void frame_buf_done( + struct ici_isys_frame_buf_list *buf_list, + struct ici_frame_buf_wrapper *buf) +{ + unsigned long flags = 0; + spin_lock_irqsave(&buf_list->lock, flags); + buf->state = ICI_BUF_READY; + list_add_tail(&buf->node, &buf_list->putbuf_list); + spin_unlock_irqrestore(&buf_list->lock, flags); + wake_up_interruptible(&buf_list->wait); +} + +void ici_isys_frame_buf_ready(struct ici_isys_pipeline + *ip, + struct ia_css_isys_resp_info *info) +{ + struct ici_frame_buf_wrapper *buf; + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + struct ici_isys *isys = as->isys; + unsigned long flags = 0; + bool found = false; + + dev_dbg(&isys->adev->dev, "buffer: received buffer %8.8x\n", + info->pin.addr); + + spin_lock_irqsave(&buf_list->lock, flags); + + list_for_each_entry_reverse(buf, &buf_list->getbuf_list, node) { + struct ici_kframe_plane* plane; + + if (buf->state != ICI_BUF_ACTIVE) + continue; + plane = &buf->kframe_info.planes[0]; + if (plane->dma_addr == info->pin.addr) { + found = true; + break; + } + } + + if (!found) { + spin_unlock_irqrestore(&buf_list->lock, flags); + dev_err(&isys->adev->dev, + "WARNING: cannot find a matching video buffer!\n"); + return; + } + + list_del(&buf->node); + spin_unlock_irqrestore(&buf_list->lock, flags); + + /* + * For interlaced buffers, the notification to user space + * is postponed to capture_done event since the field + * information is available only at that time. + */ + if (ip->interlaced) { + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + list_add(&buf->node, &buf_list->interlacebuf_list); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + } else { + buf->frame_info.field = ICI_FIELD_NONE; + frame_buf_done(buf_list, buf); + if (as->frame_done_notify_queue) + as->frame_done_notify_queue(); + } + + dev_dbg(&isys->adev->dev, "buffer: found buffer %p\n", buf); +} + +static void unmap_buf(struct ici_frame_buf_wrapper *buf) +{ + int i; + + for (i = 0; i < buf->frame_info.num_planes; i++) { + struct ici_kframe_plane *kframe_plane = + &buf->kframe_info.planes[i]; + switch (kframe_plane->mem_type) { + case ICI_MEM_USERPTR: + ici_put_userpages(kframe_plane->dev, + kframe_plane); + break; + case ICI_MEM_DMABUF: + ici_put_dma(kframe_plane->dev, + kframe_plane); + break; + default: + dev_err(&buf->buf_list->strm_dev->dev, "not supported memory type: %d\n", + kframe_plane->mem_type); + break; + } + } +} + +void ici_isys_frame_buf_stream_cancel(struct + ici_isys_stream + *as) +{ + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + struct ici_frame_buf_wrapper *buf; + struct ici_frame_buf_wrapper *next_buf; + + list_for_each_entry_safe(buf, next_buf, &buf_list->getbuf_list, node) { + list_del(&buf->node); + unmap_buf(buf); + } + list_for_each_entry_safe(buf, next_buf, &buf_list->putbuf_list, node) { + list_del(&buf->node); + unmap_buf(buf); + } + list_for_each_entry_safe(buf, next_buf, &buf_list->interlacebuf_list, + node) { + list_del(&buf->node); + unmap_buf(buf); + } +} + +int ici_isys_frame_buf_add_next( + struct ici_isys_stream *as, + struct ia_css_isys_frame_buff_set *css_buf) +{ + struct ici_frame_buf_wrapper *buf = NULL; + struct ici_isys_frame_buf_list *buf_list = &as->buf_list; + unsigned long flags = 0; + bool found = false; + + mutex_lock(&buf_list->mutex); + + list_for_each_entry(buf, &buf_list->getbuf_list, node) { + if (buf->state == ICI_BUF_PREPARED){ + found = true; + break; + } + } + + if (!found) { + /* No more buffers available */ + goto cleanup_mutex; + } + + + buf->state = ICI_BUF_ACTIVE; + mutex_unlock(&buf_list->mutex); + + css_buf->send_irq_sof = 1; + css_buf->output_pins[buf_list->fw_output].addr = + (uint32_t)buf->kframe_info.planes[0].dma_addr; + css_buf->output_pins[buf_list->fw_output].out_buf_id = + buf->buf_id + 1; + + if (buf_list->short_packet_bufs) { + struct ici_frame_short_buf* sb; + struct ici_isys_mipi_packet_header* ph; + struct ia_css_isys_output_pin_payload *output_pin; + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + if (!list_empty(&buf_list->short_packet_incoming)) { + sb = list_entry(buf_list->short_packet_incoming.next, + struct ici_frame_short_buf, node); + list_del(&sb->node); + list_add_tail(&sb->node, &buf_list->short_packet_active); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + + ph = (struct ici_isys_mipi_packet_header*) + sb->buffer; + ph->word_count = 0xffff; + ph->dtype = 0xff; + dma_sync_single_for_cpu(sb->dev, sb->dma_addr, sizeof(*ph), + DMA_BIDIRECTIONAL); + output_pin = &css_buf->output_pins[ + buf_list->short_packet_output_pin]; + output_pin->addr = sb->dma_addr; + output_pin->out_buf_id = sb->buf_id + 1; + } else { + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + dev_err(&as->isys->adev->dev, + "No more short packet buffers. Driver bug?"); + WARN_ON(1); + } + } + return 0; + +cleanup_mutex: + mutex_unlock(&buf_list->mutex); + return -ENODATA; +} + +void ici_isys_frame_buf_capture_done( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info) +{ + if (ip->interlaced) { + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + unsigned long flags = 0; + struct ici_frame_short_buf* sb; + struct ici_frame_buf_wrapper* buf; + struct ici_frame_buf_wrapper* buf_safe; + struct list_head list; + + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + if(ip->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + if (!list_empty(&buf_list->short_packet_active)) { + sb = list_last_entry(&buf_list->short_packet_active, + struct ici_frame_short_buf, node); + list_move(&sb->node, &buf_list->short_packet_incoming); + } + + list_cut_position(&list, + &buf_list->interlacebuf_list, + buf_list->interlacebuf_list.prev); + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + + list_for_each_entry_safe(buf, buf_safe, &list, node) { + buf->frame_info.field = ip->cur_field; + list_del(&buf->node); + frame_buf_done(buf_list, buf); + if (as->frame_done_notify_queue) + as->frame_done_notify_queue(); + } + } +} + +void ici_isys_frame_short_packet_ready( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info) +{ + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + unsigned long flags = 0; + struct ici_frame_short_buf* sb; + + spin_lock_irqsave(&buf_list->short_packet_queue_lock, flags); + if (list_empty(&buf_list->short_packet_active)) { + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, + flags); + dev_err(&as->isys->adev->dev, + "active short buffer queue empty\n"); + return; + } + list_for_each_entry_reverse(sb, &buf_list->short_packet_active, + node) { + if (sb->dma_addr == info->pin.addr) { + ip->cur_field = + ici_isys_csi2_get_current_field( + &as->isys->adev->dev, + (struct ici_isys_mipi_packet_header*) + sb->buffer); + break; + } + } + spin_unlock_irqrestore(&buf_list->short_packet_queue_lock, flags); +} + +void ici_isys_frame_buf_short_packet_destroy( + struct ici_isys_stream* as) +{ + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + unsigned int i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + unsigned long attrs; + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + if (!buf_list->short_packet_bufs) + return; + + for (i = 0 ; i < ICI_ISYS_SHORT_PACKET_BUFFER_NUM ; + i++) { + if (buf_list->short_packet_bufs[i].buffer) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_free_attrs(&as->isys->adev->dev, + buf_list->short_packet_bufs[i].length, + buf_list->short_packet_bufs[i].buffer, + buf_list->short_packet_bufs[i].dma_addr, &attrs); +#else + dma_free_attrs(&as->isys->adev->dev, + buf_list->short_packet_bufs[i].length, + buf_list->short_packet_bufs[i].buffer, + buf_list->short_packet_bufs[i].dma_addr, attrs); +#endif + } + kfree(buf_list->short_packet_bufs); + buf_list->short_packet_bufs = NULL; +} + +int ici_isys_frame_buf_short_packet_setup( + struct ici_isys_stream* as, + struct ici_stream_format* source_fmt) +{ + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + unsigned int i; + size_t buf_size; + + buf_size = + ICI_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt->ffmt.height); + buf_list->num_short_packet_lines = + ICI_ISYS_SHORT_PACKET_PKT_LINES(source_fmt->ffmt.height); + + INIT_LIST_HEAD(&buf_list->short_packet_incoming); + INIT_LIST_HEAD(&buf_list->short_packet_active); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + as->ip.cur_field = ICI_FIELD_TOP; + + buf_list->short_packet_bufs = kzalloc( + sizeof(struct ici_frame_short_buf) * + ICI_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL); + if (!buf_list->short_packet_bufs) + return -ENOMEM; + + for (i = 0 ; i < ICI_ISYS_SHORT_PACKET_BUFFER_NUM ; + i++) { + struct ici_frame_short_buf* sb = + &buf_list->short_packet_bufs[i]; + sb->buf_id = i; + sb->buf_list = buf_list; + sb->length = buf_size; + sb->dev = &as->isys->adev->dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + sb->buffer = dma_alloc_attrs( + sb->dev, buf_size, &sb->dma_addr, GFP_KERNEL, &attrs); +#else + sb->buffer = dma_alloc_attrs( + sb->dev, buf_size, &sb->dma_addr, GFP_KERNEL, attrs); +#endif + if (!sb->buffer) { + ici_isys_frame_buf_short_packet_destroy(as); + return -ENOMEM; + } + list_add(&sb->node, &buf_list->short_packet_incoming); + } + return 0; +} + +int ici_isys_frame_buf_init( + struct ici_isys_frame_buf_list* buf_list) +{ + buf_list->drv_priv = NULL; + mutex_init(&buf_list->mutex); + spin_lock_init(&buf_list->lock); + spin_lock_init(&buf_list->short_packet_queue_lock); + INIT_LIST_HEAD(&buf_list->getbuf_list); + INIT_LIST_HEAD(&buf_list->putbuf_list); + INIT_LIST_HEAD(&buf_list->interlacebuf_list); + init_waitqueue_head(&buf_list->wait); + return 0; +} + +#endif /* #ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-frame-buf.h b/drivers/media/pci/intel/ici/ici-isys-frame-buf.h new file mode 100644 index 000000000000..771967ce5360 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-frame-buf.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_FRAME_BUF_H +#define ICI_ISYS_FRAME_BUF_H + +#include +#include +#include + +struct ici_isys_pipeline; +struct ia_css_isys_frame_buff_set; +struct ici_stream_device; +struct ici_isys_stream; +struct ia_css_isys_resp_info; + +struct ici_kframe_plane { + struct device *dev; + unsigned int mem_type; + unsigned long length; + + /* For user_ptr */ + unsigned long page_offset; + + /* Common */ + dma_addr_t dma_addr; + struct sg_table *sgt; + + /* For DMA operation */ + int fd; + struct dma_buf_attachment *db_attach; + struct dma_buf *dbdbuf; + void *kaddr; + + /* For mediator */ + int npages; + u64 page_table_ref; +}; + +struct ici_kframe_info { + struct ici_kframe_plane planes[ICI_MAX_PLANES]; + int num_planes; +}; + +typedef enum frame_buf_state_ { + ICI_BUF_NOT_SET, + ICI_BUF_PREPARED, + ICI_BUF_ACTIVE, + ICI_BUF_READY, + ICI_BUF_DONE, +} frame_buf_state; + +struct ici_frame_buf_wrapper { + struct ici_kframe_info kframe_info; + struct ici_frame_info frame_info; + struct list_head node; + struct ici_isys_frame_buf_list *buf_list; + struct list_head uos_node; + struct ici_isys_frame_buf_list *uos_buf_list; + uint32_t buf_id; + frame_buf_state state; +}; + +struct ici_frame_short_buf { + void* buffer; + dma_addr_t dma_addr; + struct device* dev; + size_t length; + struct list_head node; + struct ici_isys_frame_buf_list *buf_list; + uint32_t buf_id; +}; + +struct ici_isys_frame_buf_list { + void *drv_priv; + struct mutex mutex; + struct list_head getbuf_list; + struct list_head putbuf_list; + + struct list_head interlacebuf_list; + + uint32_t css_pin_type; + unsigned int fw_output; + spinlock_t lock; + wait_queue_head_t wait; + struct ici_stream_device *strm_dev; + spinlock_t short_packet_queue_lock; + struct list_head short_packet_incoming; + struct list_head short_packet_active; + struct ici_frame_short_buf* short_packet_bufs; + uint32_t num_short_packet_lines; + uint32_t short_packet_output_pin; +}; + +int ici_isys_get_buf(struct ici_isys_stream *as, + struct ici_frame_info + *user_frame_info); + +int ici_isys_get_buf_virt(struct ici_isys_stream *as, + struct ici_frame_buf_wrapper *frame_buf, + struct page **pages); + +int ici_isys_put_buf(struct ici_isys_stream *as, + struct ici_frame_info + *user_frame_info, unsigned int f_flags); + +int ici_isys_frame_buf_init(struct + ici_isys_frame_buf_list + *buf_list); + +void ici_isys_frame_buf_ready( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + +int ici_isys_frame_buf_add_next( + struct ici_isys_stream *as, + struct ia_css_isys_frame_buff_set *css_buf); + +void ici_isys_frame_buf_stream_cancel( + struct ici_isys_stream *as); + +int ici_isys_frame_buf_short_packet_setup( + struct ici_isys_stream* as, + struct ici_stream_format* source_fmt); + +void ici_isys_frame_buf_short_packet_destroy( + struct ici_isys_stream* as); + +void ici_isys_frame_short_packet_ready( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + +void ici_isys_frame_buf_capture_done( + struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + +#endif /* ICI_ISYS_FRAME_BUF_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline-device.c b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.c new file mode 100644 index 000000000000..a4e4d77b2a64 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.c @@ -0,0 +1,493 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" + +#ifdef ICI_ENABLED + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "./ici/ici-isys-pipeline-device.h" +#include "./ici/ici-isys-pipeline.h" + +static struct class *pipeline_class; + +static struct ici_isys_node* find_node( + struct ici_isys_pipeline_device *pipe_dev, + unsigned id); + +static int pipeline_device_open(struct inode *inode, struct file *file) +{ + struct ici_isys_pipeline_device *pipe_dev = + inode_to_ici_isys_pipeline_device(inode); + int rval = 0; + + file->private_data = pipe_dev; + + get_device(&pipe_dev->dev); + + DEBUGK("pipeline_device_open\n"); + + return rval; +} + +static int pipeline_device_release(struct inode *inode, + struct file *file) +{ + struct ici_isys_pipeline_device *pipe_dev = + inode_to_ici_isys_pipeline_device(inode); + + put_device(&pipe_dev->dev); + + DEBUGK("pipeline_device_release\n"); + + return 0; +} + +static int pipeline_enum_links(struct file *file, void *fh, + struct ici_links_query *links_query) +{ + struct ici_isys_node *node; + struct node_pipe* pipe; + struct node_pad* pad; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_link_desc* link; + + node = find_node(pipe_dev, links_query->pad.node_id); + if (!node) + return -ENODEV; + if (links_query->pad.pad_idx >= node->nr_pads) + return -EINVAL; + + pad = &node->node_pad[links_query->pad.pad_idx]; + if (pad->pad_id != links_query->pad.pad_idx) + return -EINVAL; + + links_query->links_cnt = 0; + + list_for_each_entry(pipe, &node->node_pipes, list_entry) { + if (pipe->src_pad != pad && pipe->sink_pad != pad) + continue; + link = &links_query->links[links_query->links_cnt]; + link->source.node_id = pipe->src_pad->node->node_id; + link->source.pad_idx = pipe->src_pad->pad_id; + link->source.flags = pipe->src_pad->flags; + link->sink.node_id = pipe->sink_pad->node->node_id; + link->sink.pad_idx = pipe->sink_pad->pad_id; + link->sink.flags = pipe->sink_pad->flags; + link->flags = pipe->flags; + ++links_query->links_cnt; + if (WARN_ON(links_query->links_cnt >= + ICI_MAX_LINKS)) { + dev_warn(&pipe_dev->dev, + "Too many links defined. %d\n", + links_query->links_cnt); + break; + } + } + return 0; +} + +static int pipeline_enum_nodes(struct file *file, void *fh, + struct ici_node_desc *node_desc) +{ + struct ici_isys_pipeline_device* pipeline_dev = + file->private_data; + struct ici_isys_node *node; + struct ici_pad_desc* pad_desc; + int pad; + bool found = false; + + node_desc->node_count = 0; + list_for_each_entry(node, &pipeline_dev->nodes, node_entry) { + node_desc->node_count++; + if (node_desc->node_id != node->node_id) + continue; + + /* fill out the node data */ + found = true; + memcpy(node_desc->name, node->name, + sizeof(node_desc->name)); + node_desc->nr_pads = node->nr_pads; + for (pad=0; pad < node->nr_pads; pad++) { + pad_desc = &node_desc->node_pad[pad]; + pad_desc->pad_idx = node->node_pad[pad].pad_id; + pad_desc->node_id = node->node_id; + pad_desc->flags = node->node_pad[pad].flags; + } + } + if (node_desc->node_id == -1) + return 0; + if (!found) + return -ENODEV; + return 0; +} + +static struct ici_isys_node* find_node( + struct ici_isys_pipeline_device *pipe_dev, + unsigned id) +{ + struct ici_isys_node *ici_node; + + list_for_each_entry(ici_node, &pipe_dev->nodes, node_entry) { + if (ici_node->node_id == id) + return ici_node; + } + return NULL; +} + +static int ici_pipeline_get_supported_format(struct file *file, + void *fh, + struct ici_pad_supported_format_desc *format_desc) +{ + struct ici_isys_node *node; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + + node = find_node(pipe_dev, format_desc->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_get_pad_supported_format) + return node->node_get_pad_supported_format(node, + format_desc); + return -ENODEV; +} + +static struct node_pipe* find_pipe( + struct ici_isys_node* src_node, + struct ici_link_desc *link) +{ + struct node_pipe *np; + + list_for_each_entry(np, &src_node->node_pipes, list_entry) { + if (np->src_pad->node->node_id == link->source.node_id + && np->src_pad->pad_id == link->source.pad_idx + && np->sink_pad->node->node_id == + link->sink.node_id + && np->sink_pad->pad_id == link->sink.pad_idx) + + return np; + } + + return NULL; +} + +static int ici_setup_link(struct file *file, void *fh, + struct ici_link_desc *link) +{ + int rval = 0; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_isys_node *src_node, *sink_node; + struct node_pipe *np; + + src_node = find_node(pipe_dev, link->source.node_id); + if (!src_node) + return -ENODEV; + + sink_node = find_node(pipe_dev, link->sink.node_id); + if (!sink_node) + return -ENODEV; + + np = find_pipe(src_node, link); + + if (np) { + np->flags = link->flags; + } else { + dev_warn(&pipe_dev->dev, "Link not found\n"); + return -ENODEV; + } + + np = find_pipe(sink_node, link); + if (np) + np->flags = link->flags | ICI_LINK_FLAG_BACKLINK; + else + dev_warn(&pipe_dev->dev, "Backlink not found\n"); + + return rval; +} + +int ici_pipeline_set_ffmt(struct file *file, void *fh, + struct ici_pad_framefmt *ffmt) +{ + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_isys_node *node; + int rval = -ENODEV; + + node = find_node(pipe_dev, ffmt->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_set_pad_ffmt) + rval = node->node_set_pad_ffmt(node, ffmt); + + return rval; +} + +int ici_pipeline_get_ffmt(struct file *file, void *fh, + struct ici_pad_framefmt *ffmt) +{ + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + struct ici_isys_node *node; + int rval = -ENODEV; + + node = find_node(pipe_dev, ffmt->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_get_pad_ffmt) + rval = node->node_get_pad_ffmt(node, ffmt); + + return rval; +} + +static int ici_pipeline_set_sel(struct file *file, void *fh, + struct ici_pad_selection *pad_sel) +{ + struct ici_isys_node *node; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + + node = find_node(pipe_dev, pad_sel->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_set_pad_sel) + return node->node_set_pad_sel(node, pad_sel); + return -ENODEV; +} + +static int ici_pipeline_get_sel(struct file *file, void *fh, + struct ici_pad_selection *pad_sel) +{ + struct ici_isys_node *node; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + + node = find_node(pipe_dev, pad_sel->pad.node_id); + if (!node) + return -ENODEV; + + if (node->node_get_pad_sel) + return node->node_get_pad_sel(node, pad_sel); + return -ENODEV; +} + +static long ici_pipeline_ioctl_common(void __user *up, + struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union { + struct ici_node_desc node_desc; + struct ici_link_desc link; + struct ici_pad_framefmt pad_prop; + struct ici_pad_supported_format_desc + format_desc; + struct ici_links_query links_query; + struct ici_pad_selection pad_sel; + } isys_ioctl_cmd_args; + int err = 0; + struct ici_isys_pipeline_device *pipe_dev = + file->private_data; + const struct ici_pipeline_ioctl_ops *ops; + + if (_IOC_SIZE(ioctl_cmd) > sizeof(isys_ioctl_cmd_args)) + return -ENOTTY; + + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(&isys_ioctl_cmd_args, up, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + + mutex_lock(&pipe_dev->mutex); + ops = pipe_dev->pipeline_ioctl_ops; + switch(ioctl_cmd) { + case ICI_IOC_ENUM_NODES: + err = ops->pipeline_enum_nodes(file, pipe_dev, + &isys_ioctl_cmd_args.node_desc); + break; + case ICI_IOC_ENUM_LINKS: + err = ops->pipeline_enum_links(file, pipe_dev, + &isys_ioctl_cmd_args.links_query); + break; + case ICI_IOC_SETUP_PIPE: + err = ops->pipeline_setup_pipe(file, pipe_dev, + &isys_ioctl_cmd_args.link); + break; + case ICI_IOC_SET_FRAMEFMT: + err = ops->pad_set_ffmt(file, pipe_dev, + &isys_ioctl_cmd_args.pad_prop); + break; + case ICI_IOC_GET_FRAMEFMT: + err = ops->pad_get_ffmt(file, pipe_dev, + &isys_ioctl_cmd_args.pad_prop); + break; + case ICI_IOC_GET_SUPPORTED_FRAMEFMT: + err = ops->pad_get_supported_format(file, pipe_dev, + &isys_ioctl_cmd_args.format_desc); + break; + case ICI_IOC_SET_SELECTION: + err = ops->pad_set_sel(file, pipe_dev, + &isys_ioctl_cmd_args.pad_sel); + break; + case ICI_IOC_GET_SELECTION: + err = ops->pad_get_sel(file, pipe_dev, + &isys_ioctl_cmd_args.pad_sel); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(&pipe_dev->mutex); + if (err < 0) + return err; + + if (_IOC_DIR(ioctl_cmd) & _IOC_READ) { + err = copy_to_user(up, &isys_ioctl_cmd_args, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + + return 0; +} + +static long ici_pipeline_ioctl(struct file *file, + unsigned int ioctl_cmd, unsigned long ioctl_arg) +{ + long status = 0; + void __user *up = (void __user *)ioctl_arg; + status = ici_pipeline_ioctl_common(up, file, ioctl_cmd, + ioctl_arg); + + return status; +} + +static long ici_pipeline_ioctl32(struct file *file, + unsigned int ioctl_cmd, unsigned long ioctl_arg) +{ + long status = 0; + void __user *up = compat_ptr(ioctl_arg); + status = ici_pipeline_ioctl_common(up, file, ioctl_cmd, + ioctl_arg); + + return status; +} + +static const struct ici_pipeline_ioctl_ops pipeline_ioctls = +{ + .pipeline_setup_pipe = ici_setup_link, + .pipeline_enum_nodes = pipeline_enum_nodes, + .pipeline_enum_links = pipeline_enum_links, + .pad_set_ffmt = ici_pipeline_set_ffmt, + .pad_get_ffmt = ici_pipeline_get_ffmt, + .pad_get_supported_format = + ici_pipeline_get_supported_format, + .pad_set_sel = ici_pipeline_set_sel, + .pad_get_sel = ici_pipeline_get_sel, + +}; + +static const struct file_operations ici_isys_pipeline_fops = +{ + .owner = THIS_MODULE, + .open = pipeline_device_open, + .unlocked_ioctl = ici_pipeline_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ici_pipeline_ioctl32, +#endif + .release = pipeline_device_release, +}; + +static void pipeline_device_main_release(struct device *sd) +{ +} + +int pipeline_device_register( + struct ici_isys_pipeline_device *pipe_dev, + struct ici_isys *isys) +{ + int rval = 0; + + pipeline_class = + class_create(THIS_MODULE, + ICI_PIPELINE_DEVICE_NAME); + if (IS_ERR(pipeline_class)) { + printk(KERN_WARNING "Failed to register device class %s\n", + ICI_PIPELINE_DEVICE_NAME); + return PTR_ERR(pipeline_class); + } + + pipe_dev->parent = &isys->adev->dev; + pipe_dev->minor = -1; + + cdev_init(&pipe_dev->cdev, &ici_isys_pipeline_fops); + pipe_dev->cdev.owner = ici_isys_pipeline_fops.owner; + + rval = cdev_add(&pipe_dev->cdev, + MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE), 1); + if (rval) { + printk(KERN_ERR "%s: failed to add cdevice\n", __func__); + goto fail; + } + + pipe_dev->dev.class = pipeline_class; + pipe_dev->dev.devt = MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE); + pipe_dev->dev.parent = pipe_dev->parent; + pipe_dev->dev.release = pipeline_device_main_release; + dev_set_name(&pipe_dev->dev, "%s", + ICI_PIPELINE_DEVICE_NAME); + rval = device_register(&pipe_dev->dev); + if (rval < 0) { + printk(KERN_ERR "%s: device_register failed\n", __func__); + goto out_cdev_del; + } + + strlcpy(pipe_dev->name, pipe_dev->dev.kobj.name, + sizeof(pipe_dev->name)); + pipe_dev->minor = MINOR_PIPELINE; + + DEBUGK("Device registered: %s\n", pipe_dev->name); + pipe_dev->pipeline_ioctl_ops = &pipeline_ioctls; + mutex_init(&pipe_dev->mutex); + INIT_LIST_HEAD(&pipe_dev->nodes); + + return 0; + +out_cdev_del: + cdev_del(&pipe_dev->cdev); + +fail: + return rval; +} +EXPORT_SYMBOL(pipeline_device_register); + +void pipeline_device_unregister( + struct ici_isys_pipeline_device* pipe_dev) +{ + DEBUGK("Pipeline device unregistering..."); + device_unregister(&pipe_dev->dev); + cdev_del(&pipe_dev->cdev); + class_destroy(pipeline_class); + mutex_destroy(&pipe_dev->mutex); +} +EXPORT_SYMBOL(pipeline_device_unregister); + + +#endif /*ICI_ENABLED*/ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline-device.h b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.h new file mode 100644 index 000000000000..b218b4d1f10a --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline-device.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_PIPELINE_DEVICE_H +#define ICI_ISYS_PIPELINE_DEVICE_H + +#include +#include +#include +#include +#include + +struct ici_pipeline_ioctl_ops; +struct ici_link_desc; +struct ici_pad_supported_format_desc; + +struct ici_isys_pipeline_device { + struct cdev cdev; + struct device dev; + struct device *parent; + int minor; + char name[32]; + struct mutex mutex; + const struct file_operations *fops; + struct list_head nodes; + const struct ici_pipeline_ioctl_ops *pipeline_ioctl_ops; + unsigned next_node_id; +}; + +/* Pipeline IOCTLs */ +struct ici_pipeline_ioctl_ops { + int (*pipeline_enum_nodes)(struct file *file, void *fh, + struct ici_node_desc *node_desc); + int (*pipeline_enum_links)(struct file *file, void *fh, + struct ici_links_query *links_query); + int (*pipeline_setup_pipe)(struct file *file, void *fh, + struct ici_link_desc *link); + int (*pad_set_ffmt)(struct file *file, void *fh, + struct ici_pad_framefmt* pad_ffmt); + int (*pad_get_ffmt)(struct file *file, void *fh, + struct ici_pad_framefmt* pad_ffmt); + int (*pad_get_supported_format)(struct file *file, void *fh, + struct ici_pad_supported_format_desc *format_desc); + int (*pad_set_sel)(struct file *file, void *fh, + struct ici_pad_selection* pad_sel); + int (*pad_get_sel)(struct file *file, void *fh, + struct ici_pad_selection* pad_sel); +}; + +int pipeline_device_register( + struct ici_isys_pipeline_device *pipe_dev, + struct ici_isys *isys); +void pipeline_device_unregister(struct ici_isys_pipeline_device + *pipe_dev); + +#define inode_to_ici_isys_pipeline_device(inode) \ + container_of((inode)->i_cdev,\ + struct ici_isys_pipeline_device, cdev) + +#endif /*ICI_ISYS_PIPELINE_DEVICE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline.c b/drivers/media/pci/intel/ici/ici-isys-pipeline.c new file mode 100644 index 000000000000..f60cda378c18 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" + +#ifdef ICI_ENABLED + +#include "./ici/ici-isys-pipeline.h" + +int ici_isys_pipeline_init(struct ici_isys_pipeline *ip) +{ + return 0; +} + +int ici_isys_pipeline_node_init( + struct ici_isys *isys, + struct ici_isys_node *node, + const char* name, + unsigned num_pads, + struct node_pad *node_pads) +{ + unsigned int pad_id; + + mutex_lock(&isys->pipeline_dev.mutex); + node->parent = &isys->pipeline_dev; + snprintf(node->name, sizeof(node->name), "%s", name); + if (num_pads > ICI_MAX_PADS) { + dev_warn(&isys->adev->dev, + "Too many external pads %d\n", num_pads); + num_pads = ICI_MAX_PADS; + } + node->nr_pads = num_pads; + node->node_pad = node_pads; + node->nr_pipes = 0; + node->node_id = isys->pipeline_dev.next_node_id++; + + INIT_LIST_HEAD(&node->node_entry); + INIT_LIST_HEAD(&node->iterate_node); + INIT_LIST_HEAD(&node->node_pipes); + + for (pad_id = 0; pad_id < num_pads; pad_id++) { + node->node_pad[pad_id].node = node; + node->node_pad[pad_id].pad_id = pad_id; + } + + list_add_tail(&node->node_entry, + &node->parent->nodes); + dev_info(&isys->adev->dev, + "Setup node \"%s\" with %d pads\n", + node->name, + node->nr_pads); + mutex_unlock(&isys->pipeline_dev.mutex); + return 0; +} + +void node_pads_cleanup(struct ici_isys_node *node) +{ + struct node_pipe *tmp, *q, *np; + list_for_each_entry_safe(np, q, &node->node_pipes, list_entry) { + tmp = np; + list_del(&np->list_entry); + kfree(tmp); + } +} + +static struct node_pipe* node_pad_add_link(struct ici_isys_node *node) +{ + struct node_pipe *np; + np = kzalloc(sizeof(*np), GFP_KERNEL); + if (!np) + return NULL; + + list_add_tail(&np->list_entry, &node->node_pipes); + node->nr_pipes++; + return np; +} + +int node_pad_create_link(struct ici_isys_node *src, + u16 src_pad, struct ici_isys_node *sink, + u16 sink_pad, u32 flags ) +{ + int rval = 0; + struct node_pipe *np; + struct node_pipe *rnp; + if(!src || !sink || !src->parent) + return -EINVAL; + + mutex_lock(&src->parent->mutex); + np = node_pad_add_link(src); + if(!np) { + rval = -ENOMEM; + goto cleanup_mutex; + } + + np->src_pad = &src->node_pad[src_pad]; + np->sink_pad = &sink->node_pad[sink_pad]; + np->flags = flags; + np->rev_pipe = NULL; + + rnp = node_pad_add_link(sink); + if(!rnp) { + rval = -ENOMEM; + goto cleanup_mutex; + } + + rnp->src_pad = &src->node_pad[src_pad]; + rnp->sink_pad = &sink->node_pad[sink_pad]; + rnp->flags = flags | ICI_LINK_FLAG_BACKLINK; + rnp->rev_pipe = np; + np->rev_pipe = rnp; + +cleanup_mutex: + mutex_unlock(&src->parent->mutex); + return rval; +} + +static int __ici_isys_pipeline_for_each_node( + ici_isys_pipeline_node_cb_fn cb_fn, + void* cb_data, + struct ici_isys_node* start_node, + struct ici_isys_pipeline *ip_active, + bool backwards) +{ + struct node_pipe *pipe; + struct ici_isys_node* node; + struct ici_isys_node* next_node = NULL; + int rval; + LIST_HEAD(node_list); + + if (!cb_fn || !start_node || !start_node->parent) + return -EINVAL; + + rval = cb_fn(cb_data, start_node, NULL); + if (rval) + return rval; + list_add_tail(&start_node->iterate_node, &node_list); + while (!list_empty(&node_list)) { + node = list_entry(node_list.next, + struct ici_isys_node, + iterate_node); + list_del(&node->iterate_node); + list_for_each_entry(pipe, &node->node_pipes, + list_entry) { + if (backwards && !(pipe->flags & ICI_LINK_FLAG_BACKLINK)) + continue; + else if (!backwards && (pipe->flags & ICI_LINK_FLAG_BACKLINK)) + continue; + if (ip_active && !(pipe->flags & ICI_LINK_FLAG_ENABLED)) + continue; + next_node = (backwards ? pipe->src_pad->node : + pipe->sink_pad->node); + rval = cb_fn(cb_data, next_node, pipe); + if (rval) + return rval; + list_add_tail(&next_node->iterate_node, + &node_list); + } + } + return 0; +} + +int ici_isys_pipeline_for_each_node( + ici_isys_pipeline_node_cb_fn cb_fn, + void* cb_data, + struct ici_isys_node* start_node, + struct ici_isys_pipeline *ip_active, + bool backwards) +{ + int rval = 0; + mutex_lock(&start_node->parent->mutex); + rval = __ici_isys_pipeline_for_each_node(cb_fn, + cb_data, start_node, ip_active, backwards); + mutex_unlock(&start_node->parent->mutex); + return rval; +} + +#endif /* ICI_ENABLED */ diff --git a/drivers/media/pci/intel/ici/ici-isys-pipeline.h b/drivers/media/pci/intel/ici/ici-isys-pipeline.h new file mode 100644 index 000000000000..8004d8df0c06 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-pipeline.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_PIPELINE_H +#define ICI_ISYS_PIPELINE_H + +#include +#include +#include + +#include + +#define ICI_ISYS_OUTPUT_PINS 11 +#define ICI_NUM_CAPTURE_DONE 2 +#define ICI_ISYS_MAX_PARALLEL_SOF 2 + +struct ici_isys_node; +struct ici_isys_subdev; +struct ici_isys_csi2_be; +struct ici_isys_csi2; +struct ici_isys_tpg; +struct ia_css_isys_resp_info; +struct ici_isys_pipeline; +struct ici_isys_stream; +struct node_pad; + +struct ici_sequence_info { + unsigned int sequence; + u64 timestamp; +}; + +struct ici_output_pin_data { + void (*pin_ready)(struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *info); + struct ici_isys_frame_buf_list *buf_list; +}; + +struct ici_isys_pipeline { + struct node_pipeline pipe; + struct ici_isys_pipeline_device *pipeline_dev; + int source; /* SSI stream source */ + int stream_handle; /* stream handle for CSS API */ + unsigned int nr_output_pins; /* How many firmware pins? */ + struct ici_isys_csi2_be *csi2_be; + struct ici_isys_csi2 *csi2; + struct ici_isys_subdev *asd_source; + int asd_source_pad_id; + unsigned int streaming; + struct completion stream_open_completion; + struct completion stream_close_completion; + struct completion stream_start_completion; + struct completion stream_stop_completion; + struct completion capture_ack_completion; + struct ici_isys *isys; + + void (*capture_done[ICI_NUM_CAPTURE_DONE]) + (struct ici_isys_pipeline *ip, + struct ia_css_isys_resp_info *resp); + struct ici_output_pin_data + output_pins[ICI_ISYS_OUTPUT_PINS]; + bool interlaced; + int error; + int cur_field; + unsigned int short_packet_source; + unsigned int short_packet_trace_index; + unsigned int vc; +}; + +int ici_isys_pipeline_node_init( + struct ici_isys *isys, + struct ici_isys_node *node, + const char* name, + unsigned num_pads, + struct node_pad *node_pads); + +int node_pad_create_link(struct ici_isys_node *src, + u16 src_pad, struct ici_isys_node *sink, + u16 sink_pad, u32 flags ); + +void node_pads_cleanup(struct ici_isys_node *node); + +typedef int (*ici_isys_pipeline_node_cb_fn)(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe); + +int ici_isys_pipeline_for_each_node( + ici_isys_pipeline_node_cb_fn cb_fn, + void* cb_data, + struct ici_isys_node* start_node, + struct ici_isys_pipeline* ip_active, + bool backwards); + +#define ici_nodepipe_to_pipeline(__np) \ + container_of(__np, struct ici_isys_pipeline, pipe) + +#endif /* ICI_ISYS_PIPELINE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-stream-device.c b/drivers/media/pci/intel/ici/ici-isys-stream-device.c new file mode 100644 index 000000000000..96336980db7d --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream-device.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "./ici/ici-isys-stream-device.h" +#include "./ici/ici-isys-pipeline-device.h" + +#define MAX_STREAM_DEVICES 64 + +static dev_t ici_stream_dev_t; +static struct class* stream_class; +static int stream_devices_registered = 0; +static int stream_device_init = 0; + +static int ici_stream_init(void); +static void ici_stream_exit(void); + +static int stream_device_open(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + int rval = 0; + + get_device(&strm_dev->dev); + + file->private_data = strm_dev; + if (strm_dev->fops->open) + rval = strm_dev->fops->open(inode, file); + + if (rval) + put_device(&strm_dev->dev); + + return rval; +} + +static int stream_device_release(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + int rval = 0; + + if (strm_dev->fops->release) + rval = strm_dev->fops->release(inode, file); + + put_device(&strm_dev->dev); + return rval; +} + +static unsigned int ici_fop_poll(struct file *file, struct poll_table_struct *poll) +{ + struct ici_stream_device *strm_dev = file->private_data; + unsigned int rval = POLLERR | POLLHUP; + + if (strm_dev->fops->poll) + rval = strm_dev->fops->poll(file, poll); + else + return DEFAULT_POLLMASK; + + return rval; +} + +#ifdef CONFIG_COMPAT +struct timeval32 { + __u32 tv_sec; + __u32 tv_usec; +} __attribute__((__packed__)); + +struct ici_frame_plane32 { + __u32 bytes_used; + __u32 length; + union { + compat_uptr_t userptr; + __s32 dmafd; + } mem; + __u32 data_offset; + __u32 reserved[2]; +} __attribute__((__packed__)); + +struct ici_frame_info32 { + __u32 frame_type; + __u32 field; + __u32 flag; + __u32 frame_buf_id; + struct timeval32 frame_timestamp; + __u32 frame_sequence_id; + __u32 mem_type; /* _DMA or _USER_PTR */ + struct ici_frame_plane32 frame_planes[ICI_MAX_PLANES]; /* multi-planar */ + __u32 num_planes; /* =1 single-planar > 1 multi-planar array size */ + __u32 reserved[2]; +} __attribute__((__packed__)); + +#define ICI_IOC_GET_BUF32 _IOWR(MAJOR_STREAM, 3, struct ici_frame_info32) +#define ICI_IOC_PUT_BUF32 _IOWR(MAJOR_STREAM, 4, struct ici_frame_info32) + +static void copy_from_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + get_user(kp->frame_type, &up->frame_type); + get_user(kp->field, &up->field); + get_user(kp->flag, &up->flag); + get_user(kp->frame_buf_id, &up->frame_buf_id); + get_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + get_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + get_user(kp->frame_sequence_id, &up->frame_sequence_id); + get_user(kp->mem_type, &up->mem_type); + get_user(kp->num_planes, &up->num_planes); + for (i=0; inum_planes; i++) { + get_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + get_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if(kp->mem_type==ICI_MEM_USERPTR) { + get_user(userptr, &up->frame_planes[i].mem.userptr); + kp->frame_planes[i].mem.userptr = (unsigned long) compat_ptr(userptr); + } else if (kp->mem_type==ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + }; + get_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} + +static void copy_to_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + put_user(kp->frame_type, &up->frame_type); + put_user(kp->field, &up->field); + put_user(kp->flag, &up->flag); + put_user(kp->frame_buf_id, &up->frame_buf_id); + put_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + put_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + put_user(kp->frame_sequence_id, &up->frame_sequence_id); + put_user(kp->mem_type, &up->mem_type); + put_user(kp->num_planes, &up->num_planes); + for (i=0; inum_planes; i++) { + put_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + put_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if(kp->mem_type==ICI_MEM_USERPTR) { + userptr = (unsigned long)compat_ptr(kp->frame_planes[i].mem.userptr); + put_user(userptr, &up->frame_planes[i].mem.userptr); + } else if (kp->mem_type==ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + } + put_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} + +static long ici_stream_ioctl32(struct file *file, __u32 ioctl_cmd, + unsigned long ioctl_arg) { + union { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + } isys_ioctl_cmd_args; + + int err = 0; + struct ici_stream_device *strm_dev = file->private_data; + void __user *up = compat_ptr(ioctl_arg); + + mutex_lock(strm_dev->mutex); + + switch(ioctl_cmd) { + case ICI_IOC_STREAM_ON: + err = strm_dev->ipu_ioctl_ops->ici_stream_on(file, strm_dev); + break; + case ICI_IOC_STREAM_OFF: + err = strm_dev->ipu_ioctl_ops->ici_stream_off(file, strm_dev); + break; + case ICI_IOC_GET_BUF32: + copy_from_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + err = strm_dev->ipu_ioctl_ops->ici_get_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + if (err) + break; + copy_to_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + break; + case ICI_IOC_PUT_BUF32: + copy_from_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + err = strm_dev->ipu_ioctl_ops->ici_put_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + if (err) + break; + copy_to_user_frame_info32(&isys_ioctl_cmd_args.frame_info, up); + break; + case ICI_IOC_SET_FORMAT: + if (_IOC_SIZE(ioctl_cmd) > sizeof(isys_ioctl_cmd_args)) + return -ENOTTY; + + err = copy_from_user(&isys_ioctl_cmd_args, up, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + + err = strm_dev->ipu_ioctl_ops->ici_set_format(file, strm_dev, &isys_ioctl_cmd_args.sf); + if (err) + break; + + err = copy_to_user(up, &isys_ioctl_cmd_args, _IOC_SIZE(ioctl_cmd)); + if (err) { + return -EFAULT; + } + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(strm_dev->mutex); + if (err) { + return err; + } + + return 0; +} +#endif + +static long ici_stream_ioctl(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) { + union { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + } isys_ioctl_cmd_args; + int err = 0; + struct ici_stream_device *strm_dev = file->private_data; + void __user *up = (void __user *)ioctl_arg; + + bool copy = (ioctl_cmd != ICI_IOC_STREAM_ON && + ioctl_cmd != ICI_IOC_STREAM_OFF); + + if (copy) { + if (_IOC_SIZE(ioctl_cmd) > sizeof(isys_ioctl_cmd_args)) + return -ENOTTY; + + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(&isys_ioctl_cmd_args, up, + _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + } + + mutex_lock(strm_dev->mutex); + + switch(ioctl_cmd) { + case ICI_IOC_STREAM_ON: + err = strm_dev->ipu_ioctl_ops->ici_stream_on(file, strm_dev); + break; + case ICI_IOC_STREAM_OFF: + err = strm_dev->ipu_ioctl_ops->ici_stream_off(file, strm_dev); + break; + case ICI_IOC_GET_BUF: + err = strm_dev->ipu_ioctl_ops->ici_get_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + break; + case ICI_IOC_PUT_BUF: + err = strm_dev->ipu_ioctl_ops->ici_put_buf(file, strm_dev, &isys_ioctl_cmd_args.frame_info); + break; + case ICI_IOC_SET_FORMAT: + err = strm_dev->ipu_ioctl_ops->ici_set_format(file, strm_dev, &isys_ioctl_cmd_args.sf); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(strm_dev->mutex); + if (err) + return err; + + if (copy && _IOC_DIR(ioctl_cmd) & _IOC_READ) { + err = copy_to_user(up, &isys_ioctl_cmd_args, _IOC_SIZE(ioctl_cmd)); + if (err) + return -EFAULT; + } + + return 0; +} + +static const struct file_operations ici_stream_fops = { + .owner = THIS_MODULE, + .open = stream_device_open, /* calls strm_dev->fops->open() */ + .unlocked_ioctl = ici_stream_ioctl, /* calls strm_dev->ipu_ioctl_ops->() */ +#ifdef CONFIG_COMPAT + .compat_ioctl = ici_stream_ioctl32, +#endif + .release = stream_device_release, /* calls strm_dev->fops->release() */ + .poll = ici_fop_poll, /* calls strm_dev->fops->poll() */ +}; + +/* Called on device_unregister */ +static void base_device_release(struct device *sd) +{ +} + +int stream_device_register(struct ici_stream_device *strm_dev) +{ + int rval = 0; + int num; + + if (!stream_device_init) { + rval = ici_stream_init(); + if (rval) { + printk(KERN_ERR "%s: failed to init stream device\n", __func__); + return rval; + } + stream_device_init++; + } + num = stream_devices_registered; + + if (!(num < MAX_STREAM_DEVICES)) { + printk(KERN_WARNING "%s: wrong minor of stream device: %d\n", + __func__, num); + return -EINVAL; + } + strm_dev->minor = -1; + + cdev_init(&strm_dev->cdev, &ici_stream_fops); + strm_dev->cdev.owner = ici_stream_fops.owner; + + rval = cdev_add(&strm_dev->cdev, MKDEV(MAJOR(ici_stream_dev_t), num), 1); + if (rval) { + printk(KERN_WARNING "%s: failed to add cdevice\n", __func__); + return rval; + } + + strm_dev->dev.class = stream_class; + strm_dev->dev.devt = MKDEV(MAJOR(ici_stream_dev_t), num); + strm_dev->dev.parent = strm_dev->dev_parent; + dev_set_name(&strm_dev->dev, "%s%d", ICI_STREAM_DEVICE_NAME, num); + rval = device_register(&strm_dev->dev); + if (rval < 0) { + printk(KERN_WARNING "%s: device_register failed\n", __func__); + cdev_del(&strm_dev->cdev); + return rval; + } + + /* Release function will be called on device unregister, + it is needed to avoid errors */ + strm_dev->dev.release = base_device_release; + strlcpy(strm_dev->name, strm_dev->dev.kobj.name, sizeof(strm_dev->name)); + strm_dev->minor = num; + + printk(KERN_INFO "Device registered: %s\n", strm_dev->name); + stream_devices_registered++; + + return 0; +} + +void stream_device_unregister(struct ici_stream_device *strm_dev) +{ + device_unregister(&strm_dev->dev); + cdev_del(&strm_dev->cdev); + + stream_devices_registered--; + if (!stream_devices_registered) { + ici_stream_exit(); + stream_device_init--; + } +} + +static int ici_stream_init(void) +{ + int rval; + ici_stream_dev_t = MKDEV(MAJOR_STREAM, 0); + + rval = register_chrdev_region(ici_stream_dev_t, + MAX_STREAM_DEVICES, ICI_STREAM_DEVICE_NAME); + if (rval) { + printk(KERN_WARNING "can't register intel_ipu_ici stream chrdev region (%d)\n", rval); + return rval; + } + + stream_class = class_create(THIS_MODULE, ICI_STREAM_DEVICE_NAME); + if (IS_ERR(stream_class)) { + unregister_chrdev_region(ici_stream_dev_t, MAX_STREAM_DEVICES); + printk(KERN_WARNING "Failed to register device class %s\n", ICI_STREAM_DEVICE_NAME); + return PTR_ERR(stream_class); + } + + return 0; +} + +static void ici_stream_exit(void) +{ + class_unregister(stream_class); + //class_destroy(stream_class); + unregister_chrdev_region(ici_stream_dev_t, MAX_STREAM_DEVICES); + + printk(KERN_INFO "intel_ipu_ici stream device unregistered\n"); +} + diff --git a/drivers/media/pci/intel/ici/ici-isys-stream-device.h b/drivers/media/pci/intel/ici/ici-isys-stream-device.h new file mode 100644 index 000000000000..5aec89450920 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream-device.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_STREAM_DEVICE_H +#define ICI_ISYS_STREAM_DEVICE_H + +#include +#include +#include +#include +#include + +#include "ici-isys-frame-buf.h" +#include "ici-isys-pipeline.h" +#include "virtio/intel-ipu4-virtio-common.h" + +struct ici_ioctl_ops; +struct ici_frame_plane; + +struct ici_stream_device { + struct device dev; /* intel stream base dev */ + struct cdev cdev; /* character device */ + struct device *dev_parent; /* parent device ipu_bus */ + struct mutex *mutex; + const struct file_operations *fops; /* standard Linux fops */ + struct ici_isys_frame_buf_list *frame_buf_list; /* frame buffer wrapper pointer */ + char name[32]; /* device name */ + int minor; /* driver minor */ + unsigned long flags; /* stream device state machine */ + const struct ici_ioctl_ops *ipu_ioctl_ops; + //Mediator param + int virt_dev_id; + struct ipu4_virtio_priv *virt_priv; +}; + +struct ici_ioctl_ops { + int (*ici_set_format) (struct file *file, void *fh, + struct ici_stream_format *psf); + int (*ici_stream_on) (struct file *file, void *fh); + int (*ici_stream_off) (struct file *file, void *fh); + int (*ici_get_buf) (struct file *file, void *fh, + struct ici_frame_info *fram); + int (*ici_get_buf_virt) (struct file *file, void *fh, + struct ici_frame_buf_wrapper *fram, struct page **pages); + int (*ici_put_buf) (struct file *file, void *fh, + struct ici_frame_info *fram); +}; + +#define inode_to_intel_ipu_stream_device(inode) \ + container_of((inode)->i_cdev, struct ici_stream_device, cdev) + +int stream_device_register(struct ici_stream_device *strm_dev); + +void stream_device_unregister(struct ici_stream_device *strm_dev); + +#endif /* ICI_ISYS_STREAM_DEVICE_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-stream.c b/drivers/media/pci/intel/ici/ici-isys-stream.c new file mode 100644 index 000000000000..1d45a76d56ea --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream.c @@ -0,0 +1,1506 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#include +#include +#include +#include +#include +#include "isysapi/interface/ia_css_isysapi_fw_types.h" +#include "isysapi/interface/ia_css_isysapi.h" +#include +#include +#include +#include "ipu-trace.h" +#include "ipu-fw-isys.h" +#include "ipu-wrapper.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-csi2.h" +#include "./ici/ici-isys-csi2-be.h" +#include + +#ifndef IPU4_DEBUG +#define IPU4_DEBUG 1 +#endif + +#define dev_to_stream(dev) \ + container_of(dev, struct ici_isys_stream, strm_dev) + +const struct ici_isys_pixelformat ici_isys_pfmts[] = { + /* YUV vector format */ + { ICI_FORMAT_YUYV, 24, 24, ICI_FORMAT_YUYV, IA_CSS_ISYS_FRAME_FORMAT_YUV420_16 }, + /* Raw bayer vector formats. */ + { ICI_FORMAT_SBGGR12, 16, 12, ICI_FORMAT_SBGGR12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG12, 16, 12, ICI_FORMAT_SGBRG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG12, 16, 12, ICI_FORMAT_SGRBG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB12, 16, 12, ICI_FORMAT_SRGGB12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR10, 16, 10, ICI_FORMAT_SBGGR10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG10, 16, 10, ICI_FORMAT_SGBRG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG10, 16, 10, ICI_FORMAT_SGRBG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB10, 16, 10, ICI_FORMAT_SRGGB10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR8, 16, 8, ICI_FORMAT_SBGGR8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG8, 16, 8, ICI_FORMAT_SGBRG8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG8, 16, 8, ICI_FORMAT_SGRBG8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB8, 16, 8, ICI_FORMAT_SRGGB8, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + /*{ V4L2_FMT_INTEL_IPU4_ISYS_META, 8, 8, MEDIA_BUS_FMT_FIXED, IA_CSS_ISYS_MIPI_DATA_TYPE_EMBEDDED },*/ + { } +}; + +const struct ici_isys_pixelformat ici_isys_pfmts_be_soc[] = { + { ICI_FORMAT_UYVY, 16, 16, ICI_FORMAT_UYVY, IA_CSS_ISYS_FRAME_FORMAT_UYVY }, + { ICI_FORMAT_YUYV, 16, 16, ICI_FORMAT_YUYV, IA_CSS_ISYS_FRAME_FORMAT_YUYV }, + { ICI_FORMAT_RGB565, 32, 32, ICI_FORMAT_RGB565, IA_CSS_ISYS_FRAME_FORMAT_RGBA888 }, + { ICI_FORMAT_RGB888, 32, 32, ICI_FORMAT_RGB888, IA_CSS_ISYS_FRAME_FORMAT_RGBA888 }, + /* Raw bayer formats. */ + { ICI_FORMAT_SBGGR12, 16, 12, ICI_FORMAT_SBGGR12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG12, 16, 12, ICI_FORMAT_SGBRG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG12, 16, 12, ICI_FORMAT_SGRBG12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB12, 16, 12, ICI_FORMAT_SRGGB12, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR10, 16, 10, ICI_FORMAT_SBGGR10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGBRG10, 16, 10, ICI_FORMAT_SGBRG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SGRBG10, 16, 10, ICI_FORMAT_SGRBG10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SRGGB10, 16, 10, ICI_FORMAT_SRGGB10, IA_CSS_ISYS_FRAME_FORMAT_RAW16 }, + { ICI_FORMAT_SBGGR8, 8, 8, ICI_FORMAT_SBGGR8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGBRG8, 8, 8, ICI_FORMAT_SGBRG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGRBG8, 8, 8, ICI_FORMAT_SGRBG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SRGGB8, 8, 8, ICI_FORMAT_SRGGB8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { } +}; + +const struct ici_isys_pixelformat ici_isys_pfmts_packed[] = { + { ICI_FORMAT_UYVY, 16, 16, ICI_FORMAT_UYVY, IA_CSS_ISYS_FRAME_FORMAT_UYVY }, + { ICI_FORMAT_YUYV, 16, 16, ICI_FORMAT_YUYV, IA_CSS_ISYS_FRAME_FORMAT_YUYV }, + { ICI_FORMAT_RGB565, 16, 16, ICI_FORMAT_RGB565, IA_CSS_ISYS_FRAME_FORMAT_RGB565 }, + { ICI_FORMAT_RGB888, 24, 24, ICI_FORMAT_RGB888, IA_CSS_ISYS_FRAME_FORMAT_RGBA888 }, + { ICI_FORMAT_SBGGR12, 12, 12, ICI_FORMAT_SBGGR12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SGBRG12, 12, 12, ICI_FORMAT_SGBRG12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SGRBG12, 12, 12, ICI_FORMAT_SGRBG12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SRGGB12, 12, 12, ICI_FORMAT_SRGGB12, IA_CSS_ISYS_FRAME_FORMAT_RAW12 }, + { ICI_FORMAT_SBGGR10, 10, 10, ICI_FORMAT_SBGGR10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SGBRG10, 10, 10, ICI_FORMAT_SGBRG10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SGRBG10, 10, 10, ICI_FORMAT_SGRBG10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SRGGB10, 10, 10, ICI_FORMAT_SRGGB10, IA_CSS_ISYS_FRAME_FORMAT_RAW10 }, + { ICI_FORMAT_SBGGR8, 8, 8, ICI_FORMAT_SBGGR8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGBRG8, 8, 8, ICI_FORMAT_SGBRG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SGRBG8, 8, 8, ICI_FORMAT_SGRBG8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { ICI_FORMAT_SRGGB8, 8, 8, ICI_FORMAT_SRGGB8, IA_CSS_ISYS_FRAME_FORMAT_RAW8 }, + { } +}; + +static int node_set_format(struct ici_isys_node *node, + struct ici_pad_framefmt* pff) +{ + if (node->node_set_pad_ffmt) + return node->node_set_pad_ffmt(node, pff); + return 0; +} + +struct pipeline_format_data { + struct ici_isys_stream *as; + struct ici_pad_framefmt pff; +}; + +static int set_pipeline_node_format(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe) +{ + int ret; + struct pipeline_format_data* fmt_data = cb_data; + struct ici_isys_stream *as = fmt_data->as; + dev_info(&as->isys->adev->dev, + "Set ext sd \"%s\" format %d, width %d, height %d\n", + node->name, + fmt_data->pff.ffmt.pixelformat, + fmt_data->pff.ffmt.width, + fmt_data->pff.ffmt.height); + fmt_data->pff.pad.pad_idx = 0; + ret = node_set_format(node, &fmt_data->pff); + if (ret < 0) + return ret; + if (node->nr_pads > 1) { + fmt_data->pff.pad.pad_idx = 1; + ret = node_set_format(node, &fmt_data->pff); + if (ret < 0) + return ret; + } + return 0; +} + +static int set_pipeline_format(struct ici_isys_stream *as, + struct ici_framefmt* ff) +{ + struct pipeline_format_data fmt_data = { + .as = as, + .pff.pad.pad_idx = 0, + .pff.ffmt = *ff + }; + + return ici_isys_pipeline_for_each_node( + set_pipeline_node_format, + &fmt_data, + &as->node, + &as->ip, + true); +} + +struct pipeline_power_data { + struct ici_isys_stream *as; + int power; +}; + +static int pipeline_set_node_power(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe) +{ + struct pipeline_power_data* pwr_data = cb_data; + struct ici_isys_stream *as = pwr_data->as; + dev_info(&as->isys->adev->dev, + "Set ext sd \"%s\" power to %d\n", + node->name, pwr_data->power); + if (node->node_set_power) { + int ret = node->node_set_power(node, pwr_data->power); + if (ret < 0) + return ret; + } + return 0; +} + +static int pipeline_set_power(struct ici_isys_stream *as, + int state) +{ + struct pipeline_power_data pwr_data = { + .as = as, + .power = state, + }; + + return ici_isys_pipeline_for_each_node( + pipeline_set_node_power, + &pwr_data, + &as->node, + &as->ip, + true); +} + +static int intel_ipu4_isys_library_close(struct ici_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + rval = ipu_lib_call(device_close, isys); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + return rval; +} + +static unsigned int get_comp_format(u32 code) +{ + unsigned int predictor = 0; /* currently hard coded */ + unsigned int udt = ici_isys_format_code_to_mipi(code); + unsigned int scheme = ici_isys_get_compression_scheme(code); + + /* if data type is not user defined return here */ + if ((udt < ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) + || (udt > ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(8))) + return 0; + + /* + * For each user defined type (1..8) there is configuration bitfield for + * decompression. + * + * | bit 3 | bits 2:0 | + * | predictor | scheme | + * compression schemes: + * 000 = no compression + * 001 = 10 - 6 - 10 + * 010 = 10 - 7 - 10 + * 011 = 10 - 8 - 10 + * 100 = 12 - 6 - 12 + * 101 = 12 - 7 - 12 + * 110 = 12 - 8 - 12 + */ + + return ((predictor << 3) | scheme) << + ((udt - ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4); +} + +static void csi_short_packet_prepare_firmware_stream_cfg_ici( + struct ici_isys_pipeline *ip, + struct ia_css_isys_stream_cfg_data *cfg) +{ + struct ici_isys_stream *as = + ici_pipeline_to_stream(ip); + struct ici_isys_frame_buf_list *buf_list = + &as->buf_list; + int input_pin = cfg->nof_input_pins++; + int output_pin = cfg->nof_output_pins++; + struct ia_css_isys_input_pin_info *input_info = + &cfg->input_pins[input_pin]; + struct ia_css_isys_output_pin_info *output_info = + &cfg->output_pins[output_pin]; + + /* + * Setting dt as ICI_ISYS_SHORT_PACKET_GENERAL_DT will cause + * MIPI receiver to receive all MIPI short packets. + */ + input_info->dt = ICI_ISYS_SHORT_PACKET_GENERAL_DT; + input_info->input_res.width = ICI_ISYS_SHORT_PACKET_WIDTH; + input_info->input_res.height = buf_list->num_short_packet_lines; + + ip->output_pins[output_pin].pin_ready = + ici_isys_frame_short_packet_ready; + ip->output_pins[output_pin].buf_list = buf_list; + buf_list->short_packet_output_pin = output_pin; + + output_info->input_pin_id = input_pin; + output_info->output_res.width = ICI_ISYS_SHORT_PACKET_WIDTH; + output_info->output_res.height = buf_list->num_short_packet_lines; + output_info->stride = ICI_ISYS_SHORT_PACKET_WIDTH * + ICI_ISYS_SHORT_PACKET_UNITSIZE; + output_info->pt = ICI_ISYS_SHORT_PACKET_PT; + output_info->ft = ICI_ISYS_SHORT_PACKET_FT; + output_info->send_irq = 1; +} + +static int csi_short_packet_configure_tunit( + struct ici_isys_pipeline *ip, + bool enable) +{ + struct ici_isys *isys = ip->isys; + void __iomem *isys_base = isys->pdata->base; + void __iomem *tunit_base = isys_base + TRACE_REG_IS_TRACE_UNIT_BASE; + void __iomem *csi2_tm_base; + void __iomem *event_mask_reg; + unsigned int trace_addr; + int rval; + int i; + + if (ip->csi2->index >= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) { + csi2_tm_base = isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE; + trace_addr = TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL; + event_mask_reg = csi2_tm_base + + TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P( + ip->csi2->index); + } else { + csi2_tm_base = isys->pdata->base + TRACE_REG_CSI2_TM_BASE; + trace_addr = TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL; + event_mask_reg = csi2_tm_base + + TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P( + ip->csi2->index); + } + + if (!enable) { + writel(0, event_mask_reg); + writel(0, csi2_tm_base + + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + writel(0, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + return 0; + } + + /* ring buffer base */ + writel(isys->short_packet_trace_buffer_dma_addr, + tunit_base + TRACE_REG_TUN_DRAM_BASE_ADDR); + + /* ring buffer end */ + writel(isys->short_packet_trace_buffer_dma_addr + + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE - + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE, + tunit_base + TRACE_REG_TUN_DRAM_END_ADDR); + + /* Infobits for ddr trace */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + tunit_base + TRACE_REG_TUN_DDR_INFO_VAL); + + /* Remove reset from trace timers */ + writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, + isys_base + TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N); + + /* Reset CSI2 monitor */ + writel(1, csi2_tm_base + TRACE_REG_CSI2_TM_RESET_REG_IDX); + + /* Set trace address register. */ + writel(trace_addr, csi2_tm_base + + TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX); + writel(TRACE_REG_CSI2_TM_TRACE_HEADER_VAL, csi2_tm_base + + TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX); + + /* Enable DDR trace. */ + writel(1, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + + /* Enable trace for CSI2 port. */ +#if 0 + reg_val = readl(event_mask_reg); + reg_val |= TRACE_REG_CSI2_TM_EVENT_FS(ip->vc); + writel(reg_val, event_mask_reg); +#else + for (i = 0; i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS + + IPU_ISYS_MAX_CSI2_COMBO_PORTS; i++) { + void __iomem *event_mask_reg = + (i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS) ? + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(i) : + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(i); + + writel(IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK, + event_mask_reg); + } +#endif + /* Enable CSI2 receiver monitor */ + writel(1, csi2_tm_base + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + + rval = ipu_buttress_tsc_read(isys->adev->isp, + &isys->tsc_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read TSC timer.\n"); + return rval; + } + rval = ipu_trace_get_timer(&isys->adev->dev, + &isys->tunit_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + + return 0; +} + +static void get_stream_opened(struct ici_isys_stream *as) +{ + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + as->isys->stream_opened++; + spin_unlock_irqrestore(&as->isys->lock, flags); +} + +static void put_stream_opened(struct ici_isys_stream *as) +{ + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + as->isys->stream_opened--; + spin_unlock_irqrestore(&as->isys->lock, flags); +} + +static int get_stream_handle(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + unsigned int stream_handle; + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + for (stream_handle = 0; + stream_handle < INTEL_IPU4_ISYS_MAX_STREAMS; stream_handle++) + if (as->isys->ici_pipes[stream_handle] == NULL) + break; + if (stream_handle == INTEL_IPU4_ISYS_MAX_STREAMS) { + spin_unlock_irqrestore(&as->isys->lock, flags); + return -EBUSY; + } + as->isys->ici_pipes[stream_handle] = ip; + ip->stream_handle = stream_handle; + spin_unlock_irqrestore(&as->isys->lock, flags); + return 0; +} + +static void put_stream_handle(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + unsigned long flags; + + spin_lock_irqsave(&as->isys->lock, flags); + as->isys->ici_pipes[ip->stream_handle] = NULL; + ip->stream_handle = -1; + spin_unlock_irqrestore(&as->isys->lock, flags); +} + +/* Create stream and start it using the CSS library API. */ +static int start_stream_firmware(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct device *dev = &as->isys->adev->dev; + struct ia_css_isys_stream_cfg_data stream_cfg = { + .src = ip->source, + .vc = 0, + .isl_use = ICI_ISL_OFF, + .nof_input_pins = 1, + }; + struct ia_css_isys_frame_buff_set css_buf; + struct ici_pad_framefmt source_fmt = { + .pad.pad_idx = ip->asd_source_pad_id, + .ffmt = {0}, + + }; + struct ici_isys_node* be_csi2_node = NULL; + + int rval, rvalout, tout, i; + + rval = ip->asd_source->node.node_get_pad_ffmt( + &ip->asd_source->node, &source_fmt); + if (rval) + return rval; + stream_cfg.compfmt = get_comp_format(source_fmt.ffmt.pixelformat); + stream_cfg.input_pins[0].input_res.width = source_fmt.ffmt.width; + stream_cfg.input_pins[0].input_res.height = source_fmt.ffmt.height; + stream_cfg.input_pins[0].dt = + ici_isys_format_code_to_mipi(source_fmt.ffmt.pixelformat); + + /* + * Only CSI2-BE has the capability to do crop, + * so get the crop info from csi2-be. + */ + stream_cfg.crop[0].right_offset = source_fmt.ffmt.width; + stream_cfg.crop[0].bottom_offset = source_fmt.ffmt.height; + if (ip->csi2_be) { + struct ici_pad_selection ps = { + .pad.pad_idx = CSI2_BE_ICI_PAD_SOURCE, + .rect = {0}, + }; + be_csi2_node = &ip->csi2_be->asd.node; + if (be_csi2_node->node_get_pad_sel) + rval = be_csi2_node->node_get_pad_sel( + be_csi2_node, &ps); + else + rval = -ENODEV; + if (!rval) { + stream_cfg.crop[0].left_offset = ps.rect.left; + stream_cfg.crop[0].top_offset = ps.rect.top; + stream_cfg.crop[0].right_offset = ps.rect.left + + ps.rect.width; + stream_cfg.crop[0].bottom_offset = ps.rect.top + + ps.rect.height; + } + } + + as->prepare_firmware_stream_cfg(as, &stream_cfg); + + if (ip->interlaced) { + if (ip->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + csi_short_packet_prepare_firmware_stream_cfg_ici(ip, + &stream_cfg); + else + csi_short_packet_configure_tunit(ip, 1); + } + +// csslib_dump_isys_stream_cfg(dev, &stream_cfg); //TODO implement corresponding function to dump command input to FW + + ip->nr_output_pins = stream_cfg.nof_output_pins; + + rval = get_stream_handle(as); + if (rval) { + dev_dbg(dev, "Can't get stream_handle\n"); + return rval; + } + + reinit_completion(&ip->stream_open_completion); +/* SKTODO: Debug start */ + printk("SKTODO: My stream open\n"); + printk("ia_css_isys_stream_source src = %d\n", stream_cfg.src); + printk("ia_css_isys_mipi_vc vc = %d\n", stream_cfg.vc); + printk("ia_css_isys_isl_use isl_use = %d\n", stream_cfg.isl_use); + printk("compfmt = %u\n", stream_cfg.compfmt); + printk("struct ia_css_isys_isa_cfg isa_cfg"); + for ( i = 0 ; i < N_IA_CSS_ISYS_CROPPING_LOCATION ; i++ ) { + printk("crop[%d].top_offset = %d\n", i, stream_cfg.crop[i].top_offset); + printk("crop[%d].left_offset = %d\n", i, stream_cfg.crop[i].left_offset); + printk("crop[%d].bottom_offset = %d\n", i, stream_cfg.crop[i].bottom_offset); + printk("crop[%d].right_offset = %d\n", i, stream_cfg.crop[i].right_offset); + } + printk("send_irq_sof_discarded = %u\n", stream_cfg.send_irq_sof_discarded); + printk("send_irq_eof_discarded = %u\n", stream_cfg.send_irq_eof_discarded); + printk("send_resp_sof_discarded = %u\n", stream_cfg.send_resp_sof_discarded); + printk("send_resp_eof_discarded = %u\n", stream_cfg.send_resp_eof_discarded); + printk("nof_input_pins = %u\n", stream_cfg.nof_input_pins); + printk("nof_output_pins = %u\n", stream_cfg.nof_output_pins); + for (i = 0 ; i < stream_cfg.nof_input_pins ; i++) { + printk("input_pins[%d].input_res.width = %u\n", i, stream_cfg.input_pins[i].input_res.width); + printk("input_pins[%d].input_res.height = %u\n", i, stream_cfg.input_pins[i].input_res.height); + printk("input_pins[%d].dt = %d\n", i, stream_cfg.input_pins[i].dt); + printk("input_pins[%d].mipi_store_mode = %d\n", i, stream_cfg.input_pins[i].mipi_store_mode); + } + for (i = 0 ; i < stream_cfg.nof_output_pins ; i++) { + printk("output_pins[%d].input_pin_id = %u\n", i, stream_cfg.output_pins[i].input_pin_id); + printk("output_pins[%d].output_res.width = %u\n", i, stream_cfg.output_pins[i].output_res.width); + printk("output_pins[%d].output_res.height = %u\n", i, stream_cfg.output_pins[i].output_res.height); + printk("output_pins[%d].stride = %u\n", i, stream_cfg.output_pins[i].stride); + printk("output_pins[%d].pt = %d\n", i, stream_cfg.output_pins[i].pt); + printk("output_pins[%d].ft = %d\n", i, stream_cfg.output_pins[i].ft); + printk("output_pins[%d].watermark_in_lines = %u\n", i, stream_cfg.output_pins[i].watermark_in_lines); + printk("output_pins[%d].send_irq = %u\n", i, stream_cfg.output_pins[i].send_irq); + } +/* SKTODO: Debug end */ + rval = ipu_lib_call(stream_open, as->isys, ip->stream_handle, &stream_cfg); + if (rval < 0) { + dev_err(dev, "can't open stream (%d)\n", rval); + goto out_put_stream_handle; + } + get_stream_opened(as); + + tout = wait_for_completion_timeout(&ip->stream_open_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream open time out\n"); + rval = -ETIMEDOUT; + goto out_put_stream_opened; + } + if (ip->error) { + dev_err(dev, "stream open error: %d\n", ip->error); + rval = -EIO; + goto out_put_stream_opened; + } + dev_dbg(dev, "start stream: open complete\n"); + + rval = ici_isys_frame_buf_add_next(as, &css_buf); + if (rval) { + dev_err(dev, "no buffers for streaming (%d)\n", rval); + goto out_stream_close; + } +//TODO implement corresponding function to dump command input to FW +// csslib_dump_isys_frame_buff_set(dev, &css_buf, +// stream_cfg.nof_output_pins); + + reinit_completion(&ip->stream_start_completion); + rval = ipu_lib_call(stream_start, as->isys, ip->stream_handle, + &css_buf); + if (rval < 0) { + dev_err(dev, "can't start streaming (%d)\n", rval); + goto out_stream_close; + } + + tout = wait_for_completion_timeout(&ip->stream_start_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream start time out\n"); + rval = -ETIMEDOUT; + goto out_stream_close; + } + if (ip->error) { + dev_err(dev, "stream start error: %d\n", ip->error); + rval = -EIO; + goto out_stream_close; + } + dev_dbg(dev, "start stream: complete\n"); + + return 0; + +out_stream_close: + reinit_completion(&ip->stream_close_completion); + + rvalout = ipu_lib_call(stream_close, as->isys, ip->stream_handle); + if (rvalout < 0) { + dev_dbg(dev, "can't close stream (%d)\n", rvalout); + } else { + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "stream close complete\n"); + } + +out_put_stream_opened: + put_stream_opened(as); + +out_put_stream_handle: + put_stream_handle(as); + return rval; +} + +static void stop_streaming_firmware(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct device *dev = &as->isys->adev->dev; + int rval, tout; + + reinit_completion(&ip->stream_stop_completion); + rval = ipu_lib_call(stream_flush, as->isys, ip->stream_handle); + if (rval < 0) { + dev_err(dev, "can't stop stream (%d)\n", rval); + } else { + tout = wait_for_completion_timeout(&ip->stream_stop_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream stop time out\n"); + else if (ip->error) + dev_err(dev, "stream stop error: %d\n", ip->error); + else + dev_dbg(dev, "stop stream: complete\n"); + } + if (ip->interlaced && ip->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_TUNIT) + csi_short_packet_configure_tunit(ip, 0); +} + +static void close_streaming_firmware(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct device *dev = &as->isys->adev->dev; + int rval, tout; + + reinit_completion(&ip->stream_close_completion); + rval = ipu_lib_call(stream_close, as->isys, ip->stream_handle); + if (rval < 0) { + dev_err(dev, "can't close stream (%d)\n", rval); + } else { + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "close stream: complete\n"); + } + put_stream_opened(as); + put_stream_handle(as); +} + +void ici_isys_stream_add_capture_done( + struct ici_isys_pipeline* ip, + void (*capture_done)(struct ici_isys_pipeline* ip, + struct ia_css_isys_resp_info* resp)) +{ + unsigned int i; + + /* Different instances may register same function. Add only once */ + for (i = 0; i < ICI_NUM_CAPTURE_DONE ; i++) + if (ip->capture_done[i] == capture_done) + return; + + for (i = 0; i < ICI_NUM_CAPTURE_DONE ; i++) { + if (ip->capture_done[i] == NULL) { + ip->capture_done[i] = capture_done; + return; + } + } + /* + * Too many call backs registered. Change to INTEL_IPU4_NUM_CAPTURE_DONE + * constant probably required. + */ + BUG(); +} + +void ici_isys_prepare_firmware_stream_cfg_default( + struct ici_isys_stream *as, + struct ia_css_isys_stream_cfg_data *cfg) +{ + struct ici_isys_pipeline *ip = &as->ip; + + struct ici_isys_frame_buf_list *bl = &as->buf_list; + + struct ia_css_isys_output_pin_info *pin_info; + int pin = cfg->nof_output_pins++; + + bl->fw_output = pin; + ip->output_pins[pin].pin_ready = ici_isys_frame_buf_ready; + ip->output_pins[pin].buf_list = bl; + + pin_info = &cfg->output_pins[pin]; + pin_info->input_pin_id = 0; + pin_info->output_res.width = as->strm_format.ffmt.width; + pin_info->output_res.height = as->strm_format.ffmt.height; + pin_info->stride = as->strm_format.pfmt.plane_fmt[0].bytesperline; + pin_info->pt = bl->css_pin_type; + pin_info->ft = as->pfmt->css_pixelformat; + pin_info->send_irq = 1; + cfg->vc = ip->vc; +} + +static int pipeline_validate_node(void* cb_data, + struct ici_isys_node* src_node, + struct node_pipe* pipe) +{ + int rval; + struct ici_isys_pipeline *ip = cb_data; + + dev_err(&ip->pipeline_dev->dev, "Validating node %s\n", + src_node->name); + if (src_node->node_pipeline_validate) { + rval = src_node->node_pipeline_validate(&ip->pipe, + src_node); + if (rval) + return rval; + } + if (pipe) { + struct ici_isys_node* sink_node = + pipe->sink_pad->node; + struct ici_pad_framefmt src_format = { + .pad.pad_idx = pipe->src_pad->pad_id, + }; + struct ici_pad_framefmt sink_format = { + .pad.pad_idx = pipe->sink_pad->pad_id, + }; + if (src_node->node_get_pad_ffmt) { + rval = src_node->node_get_pad_ffmt(src_node, + &src_format); + if (rval) + return rval; + } + if (sink_node->node_get_pad_ffmt) { + rval = sink_node->node_get_pad_ffmt(sink_node, + &sink_format); + if (rval) + return rval; + } + if (src_format.ffmt.width != sink_format.ffmt.width || + src_format.ffmt.height != sink_format.ffmt.height || + src_format.ffmt.pixelformat != sink_format.ffmt.pixelformat || + src_format.ffmt.field != sink_format.ffmt.field || + src_format.ffmt.colorspace != sink_format.ffmt.colorspace) { + dev_err(&ip->pipeline_dev->dev, "Formats don't match node (%d:%d) -> node (%d:%d)\n", + src_node->node_id, src_format.pad.pad_idx, + sink_node->node_id, sink_format.pad.pad_idx); + return -EINVAL; + } + } + return 0; +} + +static int pipeline_validate( + struct ici_isys_node *node, + struct ici_isys_pipeline *ip) +{ + return ici_isys_pipeline_for_each_node( + pipeline_validate_node, + ip, + node, + ip, + true); +} + +struct set_streaming_data { + struct ici_isys_pipeline *ip; + bool external; + int state; +}; + +static int set_streaming_node(void* cb_data, + struct ici_isys_node* node, + struct node_pipe* pipe) +{ + struct set_streaming_data* data = cb_data; + if (data->external != node->external) + return 0; + + if (node->node_set_streaming) + return node->node_set_streaming(node, data->ip, + data->state); + return 0; +} + + +static int set_streaming(struct ici_isys_node *node, + struct ici_isys_pipeline *ip, + bool external, + int state) +{ + struct set_streaming_data data = { + .ip = ip, + .external = external, + .state = state + }; + return ici_isys_pipeline_for_each_node( + set_streaming_node, + &data, + node, + ip, + true); +} + +static int ici_isys_set_streaming( + struct ici_isys_stream *as, + unsigned int state) +{ + struct ici_isys_pipeline *ip = &as->ip; + int rval = 0; + + dev_dbg(&as->isys->adev->dev, "set stream (intel_stream%d): %d\n", state, + as->strm_dev.minor); + + if (!state) { + stop_streaming_firmware(as); + + /* stop external sub-device now. */ + if (ip->csi2) { + ici_isys_csi2_wait_last_eof(ip->csi2); + } + + set_streaming(&as->node, ip, true, 0); + } + + rval = set_streaming(&as->node, ip, false, state); + if (rval) + goto out_stop_streaming; + + if (state) { + rval = start_stream_firmware(as); + if (rval) { + goto out_stop_streaming; + } + dev_dbg(&ip->isys->adev->dev, "set stream: source %d, stream_handle %d\n", + ip->source, ip->stream_handle); + + /* Start external sub-device now. */ + rval = set_streaming(&as->node, ip, true, state); + if (rval) + goto out_stop_streaming_firmware; + + } else { + close_streaming_firmware(as); + } + + ip->streaming = state; + return 0; + +out_stop_streaming_firmware: + stop_streaming_firmware(as); + +out_stop_streaming: + set_streaming(&as->node, ip, false, 0); + return rval; +} + +static void stream_buffers(struct ici_isys_stream *as) +{ + struct ici_isys_pipeline *ip = &as->ip; + struct ia_css_isys_frame_buff_set set = {}; + int rval; + + for (;;) { + rval = ici_isys_frame_buf_add_next(as, &set); + if (rval) { + break; + } +//TODO implement corresponding function to dump command input to FW +// csslib_dump_isys_frame_buff_set(&as->isys->adev->dev, &set, +// ip->nr_output_pins); + WARN_ON(ipu_lib_call( + stream_capture_indication, as->isys, + ip->stream_handle, &set) < 0); + } +} + +static int ici_isys_stream_on(struct file *file, void *fh) +{ + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys_pipeline *ip = &as->ip; + int rval, i; + + dev_dbg(&as->isys->adev->dev, + "stream_on: %u\n", as->strm_dev.minor); + + if (ip->streaming) { + dev_dbg(&as->isys->adev->dev, + "Already streaming\n"); + return 0; + } + + ip->csi2 = NULL; + ip->csi2_be = NULL; + ip->asd_source = NULL; + ip->asd_source_pad_id = 0; + rval = pipeline_validate(&as->node, ip); + if (rval) + return rval; + + if (!ip->asd_source) { + dev_err(&ip->isys->adev->dev, "set stream: Pipeline does not have a source\n"); + return -ENODEV; + } + + pipeline_set_power(as, 1); + + mutex_lock(&as->isys->stream_mutex); + ip->source = ip->asd_source->source; + + for (i = 0; i < ICI_NUM_CAPTURE_DONE; i++) + ip->capture_done[i] = NULL; + + if (ip->interlaced) { + pr_err("** SKTODO: INTERLACE ENABLED **\n"); + if (ip->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { + rval = ici_isys_frame_buf_short_packet_setup( + as, &as->strm_format); + if (rval) + goto out_requeue; + } else { + memset(ip->isys->short_packet_trace_buffer, 0, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE); + dma_sync_single_for_device(&as->isys->adev->dev, + as->isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + ip->short_packet_trace_index = 0; + } + } + + rval = ici_isys_set_streaming(as, 1); + if (rval) + goto out_cleanup_short_packet; + + ip->streaming = 1; + + dev_dbg(&as->isys->adev->dev, "dispatching queued requests\n"); + stream_buffers(as); + dev_dbg(&as->isys->adev->dev, + "done dispatching queued requests\n"); + + mutex_unlock(&as->isys->stream_mutex); + + return 0; + +out_cleanup_short_packet: + ici_isys_frame_buf_short_packet_destroy(as); + +out_requeue: + ici_isys_frame_buf_stream_cancel(as); + mutex_unlock(&as->isys->stream_mutex); + pipeline_set_power(as, 0); + return rval; +} + +static int ici_isys_stream_off(struct file *file, void *fh) +{ + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys_pipeline *ip = &as->ip; + + mutex_lock(&as->isys->stream_mutex); + if (ip->streaming) + ici_isys_set_streaming(as, 0); + + ip->streaming = 0; + ici_isys_frame_buf_short_packet_destroy(as); + mutex_unlock(&as->isys->stream_mutex); + + ici_isys_frame_buf_stream_cancel(as); + pipeline_set_power(as, 0); + return 0; +} + +const struct ici_isys_pixelformat +*ici_isys_get_pixelformat( + struct ici_isys_stream *as, unsigned int pixelformat) +{ + const struct ici_isys_pixelformat *pfmt; + unsigned pad; + const unsigned *supported_codes; + + pad = as->pad.pad_id; + supported_codes = as->asd->supported_codes[pad]; + + for (pfmt = as->pfmts; pfmt->bpp; pfmt++) { + unsigned int i; + + if (pfmt->code != pixelformat) + continue; + + for (i = 0; supported_codes[i]; i++) { + if (pfmt->code == supported_codes[i]) + return pfmt; + } + } + + /* Not found. Get the default, i.e. the first defined one. */ + for (pfmt = as->pfmts; pfmt->bpp; pfmt++) { + if (pfmt->code == *supported_codes) + return pfmt; + } + + BUG(); +} + +const struct ici_isys_pixelformat +*ici_isys_video_try_fmt_vid_mplane_default( + struct ici_isys_stream *as, + struct ici_stream_format *mpix) +{ + const struct ici_isys_pixelformat *pfmt = + ici_isys_get_pixelformat(as, mpix->ffmt.pixelformat); + + mpix->ffmt.pixelformat = pfmt->pixelformat; + mpix->pfmt.num_planes = 1; + + if (!as->packed) + mpix->pfmt.plane_fmt[0].bytesperline = + mpix->ffmt.width * DIV_ROUND_UP(pfmt->bpp, + BITS_PER_BYTE); + else + mpix->pfmt.plane_fmt[0].bytesperline = DIV_ROUND_UP( + as->line_header_length + as->line_footer_length + + (unsigned int)mpix->ffmt.width * pfmt->bpp, + BITS_PER_BYTE); + + mpix->pfmt.plane_fmt[0].bytesperline = + ALIGN(mpix->pfmt.plane_fmt[0].bytesperline, + as->isys->line_align); + mpix->pfmt.plane_fmt[0].bpp = pfmt->bpp; + + /* + * (height + 1) * bytesperline due to a hardware issue: the DMA unit + * is a power of two, and a line should be transferred as few units + * as possible. The result is that up to line length more data than + * the image size may be transferred to memory after the image. + * Another limition is the GDA allocation unit size. For low + * resolution it gives a bigger number. Use larger one to avoid + * memory corruption. + */ + mpix->pfmt.plane_fmt[0].sizeimage = + max(max(mpix->pfmt.plane_fmt[0].sizeimage, + mpix->pfmt.plane_fmt[0].bytesperline * + mpix->ffmt.height + + max(mpix->pfmt.plane_fmt[0].bytesperline, + as->isys->pdata->ipdata->isys_dma_overshoot)), + 1U); + + if (mpix->ffmt.field == ICI_FIELD_ANY) + mpix->ffmt.field = ICI_FIELD_NONE; + + return pfmt; +} + +static int ici_s_fmt_vid_cap_mplane( + struct ici_isys_stream *as, + struct ici_stream_format *f) +{ + if (as->ip.streaming) + return -EBUSY; + + as->pfmt = as->try_fmt_vid_mplane(as, f); + as->strm_format = *f; + + return 0; +} + +/** + * Returns true if device does not support real interrupts and + * polling must be used. + */ +static int ici_poll_for_events( + struct ici_isys_stream *as) +{ +// return is_intel_ipu_hw_fpga(); + return 0; +} + +static void ipu_cleanup_fw_msg_bufs(struct ici_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *fwmsg0; + unsigned long flags; + + spin_lock_irqsave(&isys->listlock, flags); + list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) + list_move(&fwmsg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +static int stream_fop_open(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = + inode_to_intel_ipu_stream_device(inode); + struct ici_isys_stream* as = dev_to_stream(strm_dev); + struct ici_isys *isys = as->isys; + struct ipu_bus_device *adev = + to_ipu_bus_device(&isys->adev->dev); + struct ipu_device *isp = adev->isp; + int rval; + DEBUGK("%s: stream open (%p)\n", __func__, as); + + mutex_lock(&isys->mutex); + if (isys->reset_needed) { + mutex_unlock(&isys->mutex); + dev_warn(&isys->adev->dev, "isys power cycle required\n"); + return -EIO; + } + mutex_unlock(&isys->mutex); + + rval = ipu_buttress_authenticate(isp); + if (rval) { + dev_err(&isys->adev->dev, "FW authentication failed\n"); + return rval; + } + + rval = pm_runtime_get_sync(&isys->adev->dev); + if (rval < 0) { + pm_runtime_put_noidle(&isys->adev->dev); + return rval; + } + + mutex_lock(&isys->mutex); + + ipu_configure_spc(adev->isp, + &isys->pdata->ipdata->hw_variant, + IA_CSS_PKG_DIR_ISYS_INDEX, + isys->pdata->base, isys->pkg_dir, + isys->pkg_dir_dma_addr); + + + if (isys->ici_stream_opened++) { + /* Already open */ + mutex_unlock(&isys->mutex); + return 0; + } + + ipu_cleanup_fw_msg_bufs(isys); + + if (isys->fwcom) { + /* + * Something went wrong in previous shutdown. As we are now + * restarting isys we can safely delete old context. + */ + ipu_fw_isys_cleanup(isys); + isys->fwcom = NULL; + } + + if (ici_poll_for_events(as)) { + static const struct sched_param param = { + .sched_priority = MAX_USER_RT_PRIO/2, + }; + + isys->isr_thread = kthread_run( + intel_ipu4_isys_isr_run_ici, as->isys, + IPU_ISYS_ENTITY_PREFIX); + + if (IS_ERR(isys->isr_thread)) { + rval = PTR_ERR(isys->isr_thread); + goto out_intel_ipu4_pipeline_pm_use; + } + + sched_setscheduler(isys->isr_thread, SCHED_FIFO, ¶m); + } + + rval = ipu_fw_isys_init(as->isys, IPU_ISYS_NUM_STREAMS); + if (rval < 0) + goto out_lib_init; + + mutex_unlock(&isys->mutex); + + return 0; + +out_lib_init: + if (ici_poll_for_events(as)) + kthread_stop(isys->isr_thread); + +out_intel_ipu4_pipeline_pm_use: + isys->ici_stream_opened--; + mutex_unlock(&isys->mutex); + pm_runtime_put(&isys->adev->dev); + + return rval; +} + +static int stream_fop_release(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = + inode_to_intel_ipu_stream_device(inode); + struct ici_isys_stream* as = dev_to_stream(strm_dev); + int ret = 0; + DEBUGK("%s: stream release (%p)\n", __func__, as); + + if (as->ip.streaming) { + ici_isys_stream_off(file, NULL); + } + + mutex_lock(&as->isys->mutex); + + if (!--as->isys->ici_stream_opened) { + if (ici_poll_for_events(as)) + kthread_stop(as->isys->isr_thread); + + intel_ipu4_isys_library_close(as->isys); + if (as->isys->fwcom) { + as->isys->reset_needed = true; + ret = -EIO; + } + } + + mutex_unlock(&as->isys->mutex); + + pm_runtime_put(&as->isys->adev->dev); + return ret; +} + +static unsigned int stream_fop_poll(struct file *file, + struct poll_table_struct *poll) +{ + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys *isys = as->isys; + unsigned int res = 0; + + dev_dbg(&isys->adev->dev, "stream_fop_poll\n"); + + poll_wait(file, &as->buf_list.wait, poll); + + if (!list_empty(&as->buf_list.putbuf_list)) + res = POLLIN; + + dev_dbg(&isys->adev->dev, "stream_fop_poll res %u\n", res); + + return res; +} + +static int ici_isys_set_format(struct file *file, void *fh, + struct ici_stream_format *sf) +{ + int rval; + struct ici_isys_stream *as = + dev_to_stream(file->private_data); + struct ici_isys *isys = as->isys; + + DEBUGK("%s: ici stream set format (%p)\n \ + width: %u, height: %u, pixelformat: %u, field: %u, colorspace: %u\n", + __func__, as, + sf->ffmt.width, + sf->ffmt.height, + sf->ffmt.pixelformat, + sf->ffmt.field, + sf->ffmt.colorspace); + + if (sf->ffmt.field == ICI_FIELD_ALTERNATE) { + DEBUGK("Interlaced enabled\n"); + as->ip.interlaced = true; + as->ip.short_packet_source = 1; + } else { + as->ip.interlaced = false; + } + + rval = ici_s_fmt_vid_cap_mplane(as, sf); + if (rval) { + dev_err(&isys->adev->dev, "failed to set format (vid_cap) %d\n", rval); + return rval; + } + if (sf->pfmt.num_planes != 1) { + dev_err(&isys->adev->dev, "Invalid num of planes %d\n", + sf->pfmt.num_planes); + return rval; + } + if (!sf->pfmt.plane_fmt[0].sizeimage) { + dev_err(&isys->adev->dev, "Zero image size for plane 0\n"); + return rval; + } + + rval = set_pipeline_format(as, &sf->ffmt); + if (rval) { + dev_err(&isys->adev->dev, + "failed to set format on pipeline %d\n", rval); + return rval; + } + return 0; +} + +static int ici_isys_getbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + int rval = 0; + struct ici_isys_stream *as = dev_to_stream( + file->private_data); + struct ici_isys *isys = as->isys; + + //DEBUGK("%s: ici stream getbuf (%p)\n", __func__, as); + rval = ici_isys_get_buf(as, user_frame_info); + if(rval) { + dev_err(&isys->adev->dev, "failed to get buffer %d\n", rval); + return rval; + } + + mutex_lock(&as->isys->stream_mutex); + if (as->ip.streaming) { + stream_buffers(as); + } + mutex_unlock(&as->isys->stream_mutex); + return 0; +} + +static int ici_isys_getbuf_virt(struct file *file, void *fh, + struct ici_frame_buf_wrapper *user_frame_buf, struct page **pages) +{ + int rval = 0; + struct ici_isys_stream *as = dev_to_stream( + file->private_data); + struct ici_isys *isys = as->isys; + + rval = ici_isys_get_buf_virt(as, user_frame_buf, pages); + if (rval) { + dev_err(&isys->adev->dev, "failed to get buffer %d\n", rval); + return rval; + } + + mutex_lock(&as->isys->stream_mutex); + if (as->ip.streaming) { + stream_buffers(as); + } + mutex_unlock(&as->isys->stream_mutex); + return 0; +} + +static int ici_isys_putbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + int rval = 0; + struct ici_isys_stream *as = dev_to_stream(file->private_data); + struct ici_isys *isys = as->isys; + //DEBUGK("%s: ici stream putbuf (%p)\n", __func__, as); + rval = ici_isys_put_buf(as, user_frame_info, + file->f_flags); + if(rval) { + dev_err(&isys->adev->dev, "failed to put buffer %d\n", rval); + return rval; + } + return 0; +} + +static int ici_isys_stream_get_ffmt( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + struct ici_isys_stream *as = node->sd; + if (pff->pad.pad_idx != 0) { + dev_err(&as->isys->adev->dev, + "Stream only has pad 0\n"); + return -EINVAL; + } + pff->ffmt = as->strm_format.ffmt; + return 0; +} + +static const struct ici_ioctl_ops ioctl_ops_mplane_ici = { + .ici_set_format = ici_isys_set_format, + .ici_stream_on = ici_isys_stream_on, + .ici_stream_off = ici_isys_stream_off, + .ici_get_buf = ici_isys_getbuf, + .ici_get_buf_virt = ici_isys_getbuf_virt, + .ici_put_buf = ici_isys_putbuf, +}; + +static const struct file_operations ipu4_isys_ici_stream_fops = { + .owner = THIS_MODULE, + .poll = stream_fop_poll, + .open = stream_fop_open, + .release = stream_fop_release, +}; + +int ici_isys_stream_init( + struct ici_isys_stream *as, + struct ici_isys_subdev *asd, + struct ici_isys_node *node, + unsigned int pad, + unsigned long pad_flags) +{ + int rval; + char name[ICI_MAX_NODE_NAME]; + + mutex_init(&as->mutex); + init_completion(&as->ip.stream_open_completion); + init_completion(&as->ip.stream_close_completion); + init_completion(&as->ip.stream_start_completion); + init_completion(&as->ip.stream_stop_completion); + init_completion(&as->ip.capture_ack_completion); + as->ip.isys = as->isys; + as->ip.pipeline_dev = &as->isys->pipeline_dev; + as->asd = asd; + + as->strm_dev.ipu_ioctl_ops = &ioctl_ops_mplane_ici; + + ici_isys_frame_buf_init(&as->buf_list); + + as->pad.flags = pad_flags | ICI_PAD_FLAGS_MUST_CONNECT; + snprintf(name, sizeof(name), + "%s Stream", asd->node.name); + rval = ici_isys_pipeline_node_init(as->isys, + &as->node, name, 1, &as->pad); + if (rval) + goto out_init_fail; + + if (__ici_isys_subdev_get_ffmt(asd, pad)) + as->strm_format.ffmt = + *__ici_isys_subdev_get_ffmt(asd, pad); + + as->node.sd = as; + as->node.pipe = &as->ip.pipe; + as->node.node_get_pad_ffmt = + ici_isys_stream_get_ffmt; + + asd->node.pipe = &as->ip.pipe; + /*asd->node.ops = &entity_ops;*/ + as->strm_dev.fops = &ipu4_isys_ici_stream_fops; + + as->strm_dev.frame_buf_list = &as->buf_list; + as->strm_dev.mutex = &as->mutex; + as->strm_dev.dev_parent = &as->isys->adev->dev; + dev_set_drvdata(&as->strm_dev.dev, as); + + mutex_lock(&as->mutex); + + rval = stream_device_register(&as->strm_dev); + if (rval) + goto out_mutex_unlock; + + if (pad_flags & ICI_PAD_FLAGS_SINK) + rval = node_pad_create_link( + node, pad, &as->node, 0, 0); + else if (pad_flags & ICI_PAD_FLAGS_SOURCE) + rval = node_pad_create_link( + &as->node, 0, node, pad, 0); + + if (rval) { + printk(KERN_WARNING "can't create link\n"); + goto out_mutex_unlock; + } + + mutex_unlock(&as->mutex); + + return rval; + +out_mutex_unlock: + mutex_unlock(&as->mutex); + node_pads_cleanup(&as->asd->node); + //intel_ipu4_isys_framebuf_cleanup(&as->buf_list); +out_init_fail: + mutex_destroy(&as->mutex); + + return rval; +} + +void ici_isys_stream_cleanup(struct ici_isys_stream *as) +{ + list_del(&as->node.node_entry); + stream_device_unregister(&as->strm_dev); + node_pads_cleanup(&as->asd->node); + mutex_destroy(&as->mutex); + //intel_ipu4_isys_framebuf_cleanup(&as->buf_list); +} + +#endif //ICI_ENABLED + diff --git a/drivers/media/pci/intel/ici/ici-isys-stream.h b/drivers/media/pci/intel/ici/ici-isys-stream.h new file mode 100644 index 000000000000..77d89ed2ea79 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-stream.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_STREAM_H +#define ICI_ISYS_STREAM_H + +#include +#include +#include + +#include "ici-isys-stream-device.h" +#include "ici-isys-frame-buf.h" +#include "ici-isys-pipeline.h" + + +struct ici_isys; +struct ia_css_isys_stream_cfg_data; +struct ici_isys_subdev; + +struct ici_isys_pixelformat { + uint32_t pixelformat; + uint32_t bpp; + uint32_t bpp_packed; + uint32_t code; + uint32_t css_pixelformat; +}; + +struct ici_isys_stream { + /* Serialise access to other fields in the struct. */ + struct mutex mutex; + struct node_pad pad; + struct ici_isys_node node; + struct ici_stream_device strm_dev; + struct ici_stream_format strm_format; + const struct ici_isys_pixelformat *pfmts; + const struct ici_isys_pixelformat *pfmt; + struct ici_isys_frame_buf_list buf_list; + struct ici_isys_subdev* asd; + struct ici_isys *isys; /* its parent device */ + struct ici_isys_pipeline ip; + unsigned int streaming; + bool packed; + unsigned int line_header_length; /* bits */ + unsigned int line_footer_length; /* bits */ + const struct ici_isys_pixelformat *(*try_fmt_vid_mplane)( + struct ici_isys_stream *as, + struct ici_stream_format *mpix); + void (*prepare_firmware_stream_cfg)( + struct ici_isys_stream *as, + struct ia_css_isys_stream_cfg_data *cfg); + int (*frame_done_notify_queue)(void); +}; + +#define to_intel_ipu4_isys_ici_stream(__buf_list) \ + container_of(__buf_list, struct ici_isys_stream, buf_list) +#define ici_pipeline_to_stream(__ip) \ + container_of(__ip, struct ici_isys_stream, ip) + +extern const struct ici_isys_pixelformat ici_isys_pfmts[]; +extern const struct ici_isys_pixelformat ici_isys_pfmts_be_soc[]; +extern const struct ici_isys_pixelformat ici_isys_pfmts_packed[]; + +const struct ici_isys_pixelformat +*ici_isys_video_try_fmt_vid_mplane_default( + struct ici_isys_stream *as, + struct ici_stream_format *mpix); +void ici_isys_prepare_firmware_stream_cfg_default( + struct ici_isys_stream *as, + struct ia_css_isys_stream_cfg_data *cfg); + +int ici_isys_stream_init(struct ici_isys_stream *as, + struct ici_isys_subdev *asd, + struct ici_isys_node *node, + unsigned int pad, + unsigned long pad_flags); +void ici_isys_stream_cleanup(struct ici_isys_stream *as); + +void ici_isys_stream_add_capture_done( + struct ici_isys_pipeline* ip, + void (*capture_done)(struct ici_isys_pipeline* ip, + struct ia_css_isys_resp_info* resp)); + +#endif /* ICI_ISYS_STREAM_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-subdev.c b/drivers/media/pci/intel/ici/ici-isys-subdev.c new file mode 100644 index 000000000000..4d12a700d015 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-subdev.c @@ -0,0 +1,548 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED + +#include "./ici/ici-isys-subdev.h" +#include "./ici/ici-isys-pipeline.h" + +unsigned int ici_isys_format_code_to_bpp(u32 code) +{ + switch (code) { + case ICI_FORMAT_RGB888: + return 24; + case ICI_FORMAT_RGB565: + case ICI_FORMAT_UYVY: + case ICI_FORMAT_YUYV: + return 16; + case ICI_FORMAT_SBGGR12: + case ICI_FORMAT_SGBRG12: + case ICI_FORMAT_SGRBG12: + case ICI_FORMAT_SRGGB12: + return 12; + case ICI_FORMAT_SBGGR10: + case ICI_FORMAT_SGBRG10: + case ICI_FORMAT_SGRBG10: + case ICI_FORMAT_SRGGB10: + return 10; + case ICI_FORMAT_SBGGR8: + case ICI_FORMAT_SGBRG8: + case ICI_FORMAT_SGRBG8: + case ICI_FORMAT_SRGGB8: + case ICI_FORMAT_SBGGR10_DPCM8: + case ICI_FORMAT_SGBRG10_DPCM8: + case ICI_FORMAT_SGRBG10_DPCM8: + case ICI_FORMAT_SRGGB10_DPCM8: + return 8; + default: + BUG_ON(1); + } +} + +unsigned int ici_isys_format_code_to_mipi(u32 code) +{ + switch (code) { + case ICI_FORMAT_RGB565: + return ICI_ISYS_MIPI_CSI2_TYPE_RGB565; + case ICI_FORMAT_RGB888: + return ICI_ISYS_MIPI_CSI2_TYPE_RGB888; + case ICI_FORMAT_UYVY: + case ICI_FORMAT_YUYV: + return ICI_ISYS_MIPI_CSI2_TYPE_YUV422_8; + case ICI_FORMAT_SBGGR12: + case ICI_FORMAT_SGBRG12: + case ICI_FORMAT_SGRBG12: + case ICI_FORMAT_SRGGB12: + return ICI_ISYS_MIPI_CSI2_TYPE_RAW12; + case ICI_FORMAT_SBGGR10: + case ICI_FORMAT_SGBRG10: + case ICI_FORMAT_SGRBG10: + case ICI_FORMAT_SRGGB10: + return ICI_ISYS_MIPI_CSI2_TYPE_RAW10; + case ICI_FORMAT_SBGGR8: + case ICI_FORMAT_SGBRG8: + case ICI_FORMAT_SGRBG8: + case ICI_FORMAT_SRGGB8: + return ICI_ISYS_MIPI_CSI2_TYPE_RAW8; + case ICI_FORMAT_SBGGR10_DPCM8: + case ICI_FORMAT_SGBRG10_DPCM8: + case ICI_FORMAT_SGRBG10_DPCM8: + case ICI_FORMAT_SRGGB10_DPCM8: + return ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(1); + default: + BUG_ON(1); + } +} + +enum ici_isys_subdev_pixelorder +ici_isys_subdev_get_pixelorder(u32 code) +{ + switch (code) { + case ICI_FORMAT_SBGGR12: + case ICI_FORMAT_SBGGR10: + case ICI_FORMAT_SBGGR8: + case ICI_FORMAT_SBGGR10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_BGGR; + case ICI_FORMAT_SGBRG12: + case ICI_FORMAT_SGBRG10: + case ICI_FORMAT_SGBRG8: + case ICI_FORMAT_SGBRG10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_GBRG; + case ICI_FORMAT_SGRBG12: + case ICI_FORMAT_SGRBG10: + case ICI_FORMAT_SGRBG8: + case ICI_FORMAT_SGRBG10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_GRBG; + case ICI_FORMAT_SRGGB12: + case ICI_FORMAT_SRGGB10: + case ICI_FORMAT_SRGGB8: + case ICI_FORMAT_SRGGB10_DPCM8: + return ICI_ISYS_SUBDEV_PIXELORDER_RGGB; + default: + BUG_ON(1); + } +} + +unsigned int ici_isys_get_compression_scheme(u32 code) +{ + switch (code) { + case ICI_FORMAT_SBGGR10_DPCM8: + case ICI_FORMAT_SGBRG10_DPCM8: + case ICI_FORMAT_SGRBG10_DPCM8: + case ICI_FORMAT_SRGGB10_DPCM8: + return 3; + default: + return 0; + } +} + +u32 ici_isys_subdev_code_to_uncompressed(u32 sink_code) +{ + switch (sink_code) { + case ICI_FORMAT_SBGGR10_DPCM8: + return ICI_FORMAT_SBGGR10; + case ICI_FORMAT_SGBRG10_DPCM8: + return ICI_FORMAT_SGBRG10; + case ICI_FORMAT_SGRBG10_DPCM8: + return ICI_FORMAT_SGRBG10; + case ICI_FORMAT_SRGGB10_DPCM8: + return ICI_FORMAT_SRGGB10; + default: + return sink_code; + } +} + +struct ici_framefmt *__ici_isys_subdev_get_ffmt( + struct ici_isys_subdev *asd, + unsigned pad) +{ + if (pad >= asd->num_pads) + return NULL; + + return &asd->ffmt[pad]; +} + +int ici_isys_subdev_get_ffmt( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + int ret = 0; + struct ici_framefmt *format_out; + struct ici_isys_subdev *asd = node->sd; + + mutex_lock(&asd->mutex); + format_out = __ici_isys_subdev_get_ffmt(asd, + pff->pad.pad_idx); + if (format_out) + pff->ffmt = *format_out; + else + ret = -EINVAL; + mutex_unlock(&asd->mutex); + return ret; +} + +static int __subdev_set_ffmt(struct ici_isys_subdev *asd, + struct ici_pad_framefmt *pff) +{ + unsigned int i; + unsigned pad = pff->pad.pad_idx; + unsigned pixelformat; + BUG_ON(!mutex_is_locked(&asd->mutex)); + + if (pad >= asd->num_pads) + return -EINVAL; + + pff->ffmt.width = clamp(pff->ffmt.width, + IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + pff->ffmt.height = clamp(pff->ffmt.height, + IPU_ISYS_MIN_HEIGHT, + IPU_ISYS_MAX_HEIGHT); + + pixelformat = asd->supported_codes[pad][0]; + for (i = 0; asd->supported_codes[pad][i]; i++) { + if (asd->supported_codes[pad][i] == + pff->ffmt.pixelformat) { + + pixelformat = asd->supported_codes[pad][i]; + break; + } + } + pff->ffmt.pixelformat = pixelformat; + asd->set_ffmt_internal(asd, pad, &pff->ffmt); + asd->ffmt[pad] = pff->ffmt; + return 0; +} + +int ici_isys_subdev_set_ffmt( + struct ici_isys_node* node, + struct ici_pad_framefmt* pff) +{ + int res; + struct ici_isys_subdev *asd = node->sd; + + mutex_lock(&asd->mutex); + res = __subdev_set_ffmt(asd, pff); + mutex_unlock(&asd->mutex); + return res; +} + +int ici_isys_subdev_get_supported_format( + struct ici_isys_node* node, + struct ici_pad_supported_format_desc* psfd) +{ + struct ici_isys_subdev *asd = node->sd; + int pad = psfd->pad.pad_idx; + int idx = psfd->idx; + int i; + int rval = 0; + + mutex_lock(&asd->mutex); + if (!asd->supported_code_counts[pad]) { + for (i = 0; asd->supported_codes[pad][i]; i++) {} + asd->supported_code_counts[pad] = i; + } + + if (idx < asd->supported_code_counts[pad]) { + psfd->color_format = asd->supported_codes[pad][idx]; + psfd->min_width = IPU_ISYS_MIN_WIDTH; + psfd->max_width = IPU_ISYS_MAX_WIDTH; + psfd->min_height = IPU_ISYS_MIN_HEIGHT; + psfd->max_height = IPU_ISYS_MAX_HEIGHT; + } else { + rval = -EINVAL; + } + + mutex_unlock(&asd->mutex); + return rval; +} + + +int intel_ipu4_isys_subdev_set_crop_rect(struct ici_isys_subdev + *asd, unsigned pad, + struct ici_rect *r) +{ + struct node_pad *np; + struct ici_rect rmax = { 0 }; + struct ici_rect *rcrop; + unsigned int tgt; + struct ici_framefmt *ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + if (!ffmt) + return -EINVAL; + if (!asd->valid_tgts[pad].crop) + return -EINVAL; + np = &asd->pads[pad]; + rcrop = &asd->crop[pad]; + + if (np->flags & ICI_PAD_FLAGS_SINK) { + rmax.width = ffmt->width; + rmax.height = ffmt->height; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SINK_CROP; + } else { + /* 0 is the sink pad. */ + rmax = asd->crop[0]; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP; + } + rcrop->width = clamp(r->width, IPU_ISYS_MIN_WIDTH, rmax.width); + rcrop->height = clamp(r->height, IPU_ISYS_MIN_HEIGHT, + rmax.height); + ici_isys_subdev_fmt_propagate(asd, pad, rcrop, tgt, NULL); + return 0; +} + +int intel_ipu4_isys_subdev_set_compose_rect(struct ici_isys_subdev + *asd, unsigned pad, + struct ici_rect *r) +{ + struct node_pad *np; + struct ici_rect rmax = { 0 }; + struct ici_rect *rcompose; + unsigned int tgt; + struct ici_framefmt *ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + if (!ffmt) + return -EINVAL; + if (!asd->valid_tgts[pad].compose) + return -EINVAL; + np = &asd->pads[pad]; + rcompose = &asd->compose[pad]; + + if (np->flags & ICI_PAD_FLAGS_SINK) { + rmax = asd->crop[pad]; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE; + } else { + /* 0 is the sink pad. */ + rmax = asd->compose[0]; + tgt = ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE; + } + rcompose->width = clamp(r->width, IPU_ISYS_MIN_WIDTH, + rmax.width); + rcompose->height = clamp(r->height, IPU_ISYS_MIN_HEIGHT, + rmax.height); + ici_isys_subdev_fmt_propagate(asd, pad, rcompose, tgt, + NULL); + return 0; +} + +int ici_isys_subdev_set_sel( + struct ici_isys_node* node, + struct ici_pad_selection* ps) +{ + struct ici_isys_subdev *asd = node->sd; + int rval = 0; + + if (WARN_ON(ps->pad.pad_idx >= asd->num_pads)) + return -EINVAL; + + mutex_lock(&asd->mutex); + switch (ps->sel_type) + { + case ICI_EXT_SEL_TYPE_COMPOSE: + rval = intel_ipu4_isys_subdev_set_compose_rect( + asd, ps->pad.pad_idx, &ps->rect); + break; + case ICI_EXT_SEL_TYPE_CROP: + rval = intel_ipu4_isys_subdev_set_crop_rect( + asd, ps->pad.pad_idx, &ps->rect); + break; + default: + rval = -EINVAL; + } + mutex_unlock(&asd->mutex); + return rval; +} + +int ici_isys_subdev_get_sel( + struct ici_isys_node* node, + struct ici_pad_selection* ps) +{ + struct ici_isys_subdev *asd = node->sd; + int rval = 0; + + if (WARN_ON(ps->pad.pad_idx >= asd->num_pads)) + return -EINVAL; + + mutex_lock(&asd->mutex); + switch (ps->sel_type) + { + case ICI_EXT_SEL_TYPE_COMPOSE: + ps->rect = asd->compose[ps->pad.pad_idx]; + break; + case ICI_EXT_SEL_TYPE_CROP: + ps->rect = asd->crop[ps->pad.pad_idx]; + break; + default: + rval = -EINVAL; + } + mutex_unlock(&asd->mutex); + return rval; +} + +void ici_isys_subdev_fmt_propagate( + struct ici_isys_subdev *asd, + unsigned pad, + struct ici_rect *r, + enum ici_isys_subdev_prop_tgt tgt, + struct ici_framefmt *ffmt) +{ + unsigned i; + struct ici_framefmt *ffmts[asd->num_pads]; + struct ici_rect *crops[asd->num_pads]; + struct ici_rect *compose[asd->num_pads]; + + if (WARN_ON(pad >= asd->num_pads)) + return; + + for (i = 0; i < asd->num_pads; i++) { + ffmts[i] = __ici_isys_subdev_get_ffmt(asd, pad); + crops[i] = &asd->crop[i]; + compose[i] = &asd->compose[i]; + } + + switch (tgt) { + case ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT: + crops[pad]->left = crops[pad]->top = 0; + crops[pad]->width = ffmt->width; + crops[pad]->height = ffmt->height; + ici_isys_subdev_fmt_propagate(asd, pad, + crops[pad], tgt + 1, + ffmt); + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SINK_CROP: + if (WARN_ON(asd->pads[pad].flags & ICI_PAD_FLAGS_SOURCE)) + return; + compose[pad]->left = compose[pad]->top = 0; + compose[pad]->width = r->width; + compose[pad]->height = r->height; + ici_isys_subdev_fmt_propagate(asd, pad, + compose[pad], tgt + 1, + ffmt); + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE: + if (!(asd->pads[pad].flags & ICI_PAD_FLAGS_SINK)) + return; + + for (i = 1; i < asd->num_pads; i++) { + if (!(asd->pads[i].flags & ICI_PAD_FLAGS_SOURCE)) + continue; + + compose[i]->left = compose[i]->top = 0; + compose[i]->width = r->width; + compose[i]->height = r->height; + ici_isys_subdev_fmt_propagate(asd, i, + compose[i], + tgt + 1, + ffmt); + } + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE: + if (WARN_ON(asd->pads[pad].flags & ICI_PAD_FLAGS_SINK)) + return; + + crops[pad]->left = crops[pad]->top = 0; + crops[pad]->width = r->width; + crops[pad]->height = r->height; + ici_isys_subdev_fmt_propagate(asd, pad, + crops[pad], tgt + 1, + ffmt); + return; + case ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{ + struct ici_framefmt fmt = { + .width = r->width, + .height = r->height, + /* + * Either use the code from sink pad + * or the current one. + */ + .pixelformat = (ffmt ? ffmt->pixelformat : + ffmts[pad]->pixelformat), + }; + + asd->set_ffmt_internal(asd, pad, &fmt); + return; + } + } +} + +int ici_isys_subdev_init(struct ici_isys_subdev *asd, + const char* name, + unsigned int num_pads, + unsigned int index) +{ + int res = 0; + + mutex_init(&asd->mutex); + asd->num_pads = num_pads; + asd->pads = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->pads), GFP_KERNEL); + + asd->ffmt = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->ffmt), GFP_KERNEL); + + asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->crop), GFP_KERNEL); + + asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->compose), GFP_KERNEL); + + asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->valid_tgts), + GFP_KERNEL); + + asd->supported_code_counts = devm_kcalloc(&asd->isys->adev->dev, + num_pads, sizeof(*asd->supported_code_counts), + GFP_KERNEL); + + if (!asd->pads || !asd->ffmt || !asd->crop || !asd->compose || + !asd->valid_tgts || !asd->supported_code_counts) { + res = -ENOMEM; + goto cleanup_allocs; + } + + asd->isl_mode = ICI_ISL_OFF; + asd->be_mode = ICI_BE_RAW; + asd->source = -1; + asd->index = index; + + asd->node.parent = &asd->isys->pipeline_dev; + asd->node.sd = asd; + asd->node.external = false; + + res = ici_isys_pipeline_node_init(asd->isys, + &asd->node, name, asd->num_pads, asd->pads); + if (res) + goto cleanup_allocs; + + asd->node.node_set_pad_ffmt = + ici_isys_subdev_set_ffmt; + asd->node.node_get_pad_ffmt = + ici_isys_subdev_get_ffmt; + asd->node.node_get_pad_supported_format = + ici_isys_subdev_get_supported_format; + asd->node.node_set_pad_sel = + ici_isys_subdev_set_sel; + asd->node.node_get_pad_sel = + ici_isys_subdev_get_sel; + + return 0; + +cleanup_allocs: + if (asd->valid_tgts) + devm_kfree(&asd->isys->adev->dev, asd->valid_tgts); + if (asd->compose) + devm_kfree(&asd->isys->adev->dev, asd->compose); + if (asd->crop) + devm_kfree(&asd->isys->adev->dev, asd->crop); + if (asd->ffmt) + devm_kfree(&asd->isys->adev->dev, asd->ffmt); + if (asd->pads) + devm_kfree(&asd->isys->adev->dev, asd->pads); + mutex_destroy(&asd->mutex); + return res; +} + +void ici_isys_subdev_cleanup( + struct ici_isys_subdev *asd) +{ + list_del(&asd->node.node_entry); + + if (asd->valid_tgts) + devm_kfree(&asd->isys->adev->dev, asd->valid_tgts); + if (asd->compose) + devm_kfree(&asd->isys->adev->dev, asd->compose); + if (asd->crop) + devm_kfree(&asd->isys->adev->dev, asd->crop); + if (asd->ffmt) + devm_kfree(&asd->isys->adev->dev, asd->ffmt); + if (asd->pads) + devm_kfree(&asd->isys->adev->dev, asd->pads); + mutex_destroy(&asd->mutex); +} + +#endif /*ICI_ENABLED*/ diff --git a/drivers/media/pci/intel/ici/ici-isys-subdev.h b/drivers/media/pci/intel/ici/ici-isys-subdev.h new file mode 100644 index 000000000000..e783fa104131 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-subdev.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_SUBDEV_H +#define ICI_ISYS_SUBDEV_H + +#include +#include "ici-isys-pipeline.h" + +struct node_subdev_format; + +#define ICI_ISYS_MIPI_CSI2_TYPE_NULL 0x10 +#define ICI_ISYS_MIPI_CSI2_TYPE_BLANKING 0x11 +#define ICI_ISYS_MIPI_CSI2_TYPE_EMBEDDED8 0x12 +#define ICI_ISYS_MIPI_CSI2_TYPE_YUV422_8 0x1e +#define ICI_ISYS_MIPI_CSI2_TYPE_RGB565 0x22 +#define ICI_ISYS_MIPI_CSI2_TYPE_RGB888 0x24 +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW6 0x28 +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW7 0x29 +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW8 0x2a +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW10 0x2b +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW12 0x2c +#define ICI_ISYS_MIPI_CSI2_TYPE_RAW14 0x2d +#define ICI_ISYS_MIPI_CSI2_TYPE_USER_DEF(i) (0x30 + (i)-1) /* 1-8 */ + +enum ici_be_mode { + ICI_BE_RAW = 0, + ICI_BE_SOC +}; + +enum ici_isl_mode { + ICI_ISL_OFF = 0, /* IA_CSS_ISYS_USE_NO_ISL_NO_ISA */ + ICI_ISL_CSI2_BE, /* IA_CSS_ISYS_USE_SINGLE_DUAL_ISL */ + ICI_ISL_ISA /* IA_CSS_ISYS_USE_SINGLE_ISA */ +}; + +enum ici_isys_subdev_pixelorder { + ICI_ISYS_SUBDEV_PIXELORDER_BGGR = 0, + ICI_ISYS_SUBDEV_PIXELORDER_GBRG, + ICI_ISYS_SUBDEV_PIXELORDER_GRBG, + ICI_ISYS_SUBDEV_PIXELORDER_RGGB, +}; + +enum ici_isys_subdev_prop_tgt { + ICI_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_CROP, + ICI_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE, + ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE, + ICI_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, +}; + +#define ICI_ISYS_SUBDEV_PROP_TGT_NR_OF \ + (INTEL_IPU4_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1) + +struct ici_isys_subdev { + struct ici_isys_node node; + /* Serialise access to any other field in the struct */ + struct mutex mutex; + struct ici_isys *isys; + unsigned const *const *supported_codes; + unsigned* supported_code_counts; + unsigned int num_pads; + struct node_pad *pads; + struct ici_framefmt *ffmt; + struct ici_rect *crop; + struct ici_rect *compose; + struct { + bool crop; + bool compose; + } *valid_tgts; + enum ici_isl_mode isl_mode; + enum ici_be_mode be_mode; + int source; /* SSI stream source; -1 if unset */ + unsigned int index; /* index for sd array in csi2 */ + void (*set_ffmt_internal)( + struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *format); +}; + +unsigned int ici_isys_format_code_to_bpp(u32 code); +unsigned int ici_isys_format_code_to_mipi(u32 code); +enum ici_isys_subdev_pixelorder +ici_isys_subdev_get_pixelorder(u32 code); +unsigned int ici_isys_get_compression_scheme(u32 code); +u32 ici_isys_subdev_code_to_uncompressed(u32 sink_code); + +struct ici_framefmt* __ici_isys_subdev_get_ffmt( + struct ici_isys_subdev *asd, + unsigned pad); +void ici_isys_subdev_fmt_propagate( + struct ici_isys_subdev *asd, + unsigned pad, + struct ici_rect *r, + enum ici_isys_subdev_prop_tgt + tgt, + struct ici_framefmt *ffmt); + +int ici_isys_subdev_init(struct ici_isys_subdev *asd, + const char* name, + unsigned int num_pads, + unsigned int index); +void ici_isys_subdev_cleanup( + struct ici_isys_subdev *asd); + +#define ici_node_to_subdev(__node) \ + container_of(__node, struct ici_isys_subdev, node) +#endif /* ICI_ISYS_SUBDEV_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys-tpg.c b/drivers/media/pci/intel/ici/ici-isys-tpg.c new file mode 100644 index 000000000000..250ed01a0cd6 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-tpg.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include "./ici/ici-isys.h" +#ifdef ICI_ENABLED +#include +#include "./ici/ici-isys-subdev.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-tpg.h" +#include "ipu-isys-tpg.h" +#include "isysapi/interface/ia_css_isysapi_fw_types.h" + +#define MIPI_GEN_PPC 4 + +#define ici_asd_to_tpg(__asd) \ + container_of(__asd, struct ici_isys_tpg, asd) + +static const uint32_t ici_tpg_supported_codes_pad[] = { + ICI_FORMAT_SBGGR8, + ICI_FORMAT_SGBRG8, + ICI_FORMAT_SGRBG8, + ICI_FORMAT_SRGGB8, + ICI_FORMAT_SBGGR10, + ICI_FORMAT_SGBRG10, + ICI_FORMAT_SGRBG10, + ICI_FORMAT_SRGGB10, + 0, +}; + +static const uint32_t *ici_tpg_supported_codes[] = { + ici_tpg_supported_codes_pad, +}; + +static int set_stream( + struct ici_isys_node *node, + void* ip, + int enable) +{ + struct ici_isys_subdev *asd = node->sd; + struct ici_isys_tpg *tpg = + to_ici_isys_tpg(asd); + unsigned int bpp = + ici_isys_format_code_to_bpp(tpg->asd. + ffmt[TPG_PAD_SOURCE]. + pixelformat); + /* + * In B0 MIPI_GEN block is CSI2 FB. Need to enable/disable TPG selection + * register to control the TPG streaming. + */ + writel(enable ? 1 : 0, tpg->sel); + + if (!enable) { + writel(0, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; + } + + writel(MIPI_GEN_COM_DTYPE_RAW(bpp), + tpg->base + MIPI_GEN_REG_COM_DTYPE); + writel(ici_isys_format_code_to_mipi + (tpg->asd.ffmt[TPG_PAD_SOURCE].pixelformat), + tpg->base + MIPI_GEN_REG_COM_VTYPE); + + writel(0, tpg->base + MIPI_GEN_REG_COM_VCHAN); + writel(DIV_ROUND_UP + (tpg->asd.ffmt[TPG_PAD_SOURCE].width * bpp, BITS_PER_BYTE), + tpg->base + MIPI_GEN_REG_COM_WCOUNT); + + writel(0, tpg->base + MIPI_GEN_REG_SYNG_NOF_FRAMES); + + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE].width, MIPI_GEN_PPC), + tpg->base + MIPI_GEN_REG_SYNG_NOF_PIXELS); + writel(tpg->asd.ffmt[TPG_PAD_SOURCE].height, + tpg->base + MIPI_GEN_REG_SYNG_NOF_LINES); + + writel(0, tpg->base + MIPI_GEN_REG_TPG_MODE); + + writel(-1, tpg->base + MIPI_GEN_REG_TPG_HCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_VCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_XYCNT_MASK); + writel(0, tpg->base + MIPI_GEN_REG_TPG_HCNT_DELTA); + writel(0, tpg->base + MIPI_GEN_REG_TPG_VCNT_DELTA); + + writel(2, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; +} + +static const char *const tpg_mode_items[] = { + "Ramp", + "Checkerboard", /* Does not work, disabled. */ + "Frame Based Colour", + NULL, +}; + +void ici_tpg_set_ffmt(struct ici_isys_subdev *asd, + unsigned pad, + struct ici_framefmt *ffmt) +{ + struct ici_framefmt *cur_ffmt = + __ici_isys_subdev_get_ffmt(asd, pad); + + ffmt->field = ICI_FIELD_NONE; + ffmt->colorspace = 0; + memset(ffmt->reserved, 0, sizeof(ffmt->reserved)); + if (cur_ffmt) { + *cur_ffmt = *ffmt; + dev_dbg(&asd->isys->adev->dev, "%s: TPG ici stream set format\n" + "width: %u, height: %u, pixelformat: %u, colorspace: %u field: %u\n", + __func__, + cur_ffmt->width, + cur_ffmt->height, + cur_ffmt->pixelformat, cur_ffmt->colorspace, cur_ffmt->field); + } +} + +static int ici_tpg_pipeline_validate( + struct node_pipeline *inp, + struct ici_isys_node *node) +{ + struct ici_isys_subdev* asd = node->sd; + struct ici_isys_tpg *tpg = + ici_asd_to_tpg(asd); + struct ici_isys_pipeline *ip = + ici_nodepipe_to_pipeline(inp); + + ip->asd_source = &tpg->asd; + ip->asd_source_pad_id = TPG_PAD_SOURCE; + return 0; +} + +int ici_isys_tpg_init(struct ici_isys_tpg *tpg, + struct ici_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index) +{ + struct ici_pad_framefmt fmt = { + .pad.pad_idx = TPG_PAD_SOURCE, + .ffmt = { + .width = 4096, + .height = 3072, + }, + }; + + int rval; + char name[ICI_MAX_NODE_NAME]; + + dev_dbg(&isys->adev->dev, "ici_isys_tpg_init\n"); + + tpg->isys = isys; + tpg->base = base; + tpg->index = index; + tpg->sel = sel; + tpg->asd.isys = isys; + + snprintf(name, sizeof(name), + IPU_ISYS_ENTITY_PREFIX " TPG %u", index); + rval = ici_isys_subdev_init(&tpg->asd, + name, NR_OF_TPG_PADS, 0); + if (rval) + goto fail; + + tpg->asd.pads[TPG_PAD_SOURCE].flags = ICI_PAD_FLAGS_SOURCE; + + tpg->asd.source = IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index; + tpg->asd.supported_codes = ici_tpg_supported_codes; + tpg->asd.set_ffmt_internal = ici_tpg_set_ffmt; + tpg->asd.node.node_set_streaming = set_stream; + tpg->asd.node.node_pipeline_validate = + ici_tpg_pipeline_validate; + tpg->asd.node.node_set_pad_ffmt(&tpg->asd.node, &fmt); + tpg->as.isys = isys; + tpg->as.try_fmt_vid_mplane = + ici_isys_video_try_fmt_vid_mplane_default; + tpg->as.prepare_firmware_stream_cfg = + ici_isys_prepare_firmware_stream_cfg_default; + tpg->as.pfmts = ici_isys_pfmts_packed; + tpg->as.packed = true; + tpg->as.buf_list.css_pin_type = IA_CSS_ISYS_PIN_TYPE_MIPI; + tpg->as.line_header_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + tpg->as.line_footer_length = + INTEL_IPU4_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + + /*TODO:*/ + /* + * Buffer queue management call backs to be added. + */ + + rval = ici_isys_stream_init(&tpg->as, &tpg->asd, + &tpg->asd.node, TPG_PAD_SOURCE, + ICI_PAD_FLAGS_SINK); + if (rval) { + dev_err(&isys->adev->dev, "can't init stream node\n"); + goto fail; + } + + return 0; + +fail: + ici_isys_tpg_cleanup(tpg); + + return 1; +} +EXPORT_SYMBOL(ici_isys_tpg_init); + +void ici_isys_tpg_cleanup(struct ici_isys_tpg *tpg) +{ + ici_isys_subdev_cleanup(&tpg->asd); + ici_isys_stream_cleanup(&tpg->as); +} +EXPORT_SYMBOL(ici_isys_tpg_cleanup); + +#endif /*ICI_ENABLED*/ diff --git a/drivers/media/pci/intel/ici/ici-isys-tpg.h b/drivers/media/pci/intel/ici/ici-isys-tpg.h new file mode 100644 index 000000000000..5e6eeefc9e83 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys-tpg.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_TPG_H +#define ICI_ISYS_TPG_H + +#include "ici-isys-frame-buf.h" +#include "ici-isys-subdev.h" +#include "ici-isys-stream.h" + +struct intel_ipu4_isys_tpg_pdata; + +#define TPG_PAD_SOURCE 0 +#define NR_OF_TPG_PADS 1 + +/* + * struct ici_isys_tpg + * + * +*/ +struct ici_isys_tpg { + struct intel_ipu4_isys_tpg_pdata *pdata; + struct ici_isys *isys; + struct ici_isys_subdev asd; + struct ici_isys_stream as; + + void __iomem *base; + void __iomem *sel; + int streaming; + u32 receiver_errors; + unsigned int nlanes; + unsigned int index; + atomic_t sof_sequence; +}; + +#define to_ici_isys_tpg(sd) \ + container_of(sd, struct ici_isys_tpg, asd) + +int ici_isys_tpg_init(struct ici_isys_tpg *tpg, + struct ici_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index); +void ici_isys_tpg_cleanup(struct ici_isys_tpg *tpg); + +#endif /* ICI_ISYS_TPG_H */ diff --git a/drivers/media/pci/intel/ici/ici-isys.c b/drivers/media/pci/intel/ici/ici-isys.c new file mode 100644 index 000000000000..466659baec24 --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys.c @@ -0,0 +1,1367 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-cpd.h" +#include "ipu-mmu.h" +#include "ipu-dma.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-trace.h" +#include "ipu-buttress.h" +#include "isysapi/interface/ia_css_isysapi.h" +#include "./ici/ici-isys.h" +#include "./ici/ici-isys-csi2.h" +#include "./ici/ici-isys-pipeline-device.h" + +#ifdef ICI_ENABLED + +#define ISYS_PM_QOS_VALUE 300 + +#define INTEL_IPU4_ISYS_OUTPUT_PINS 11 +#define INTEL_IPU4_NUM_CAPTURE_DONE 2 + +/* Trace block definitions for isys */ +struct ipu_trace_block isys_trace_blocks[] = { + { + .offset = TRACE_REG_IS_TRACE_UNIT_BASE, + .type = IPU_TRACE_BLOCK_TUN, + }, + { + .offset = TRACE_REG_IS_SP_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_IS_SP_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_ISL_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_MMU_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_CSI2_TM_BASE, + .type = IPU_TRACE_CSI2, + }, + { + .offset = TRACE_REG_CSI2_3PH_TM_BASE, + .type = IPU_TRACE_CSI2_3PH, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_PH3_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + .offset = TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, + .type = IPU_TRACE_TIMER_RST, + }, + { + .type = IPU_TRACE_BLOCK_END, + } +}; + + +// Latest code structure doesnt do these functions. +// let it remain to gauge the impact and then remove. +#if 0 +static int isys_determine_legacy_csi_lane_configuration(struct ici_isys *isys) +{ + const struct csi_lane_cfg { + u32 reg_value; + int port_lanes[IPU_ISYS_MAX_CSI2_LEGACY_PORTS]; + } csi_lanes_to_cfg[] = { + { 0x0, { 4, 2, 0, 0 } }, /* no sensor defaults here */ + { 0x1, { 3, 2, 0, 0 } }, + { 0x2, { 2, 2, 0, 0 } }, + { 0x3, { 1, 2, 0, 0 } }, + { 0x4, { 4, 1, 0, 0 } }, + { 0x5, { 3, 1, 0, 0 } }, + { 0x6, { 2, 1, 0, 0 } }, + { 0x7, { 1, 1, 0, 0 } }, + { 0x8, { 4, 1, 0, 1 } }, + { 0x9, { 3, 1, 0, 1 } }, + { 0xa, { 2, 1, 0, 1 } }, + { 0xb, { 1, 1, 0, 1 } }, + { 0x10, { 2, 2, 2, 0 } }, + { 0x11, { 2, 2, 1, 0 } }, + { 0x18, { 2, 1, 2, 1 } }, + { 0x19, { 1, 1, 1, 1 } }, + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(csi_lanes_to_cfg); i++) { + for (j = 0; j < IPU_ISYS_MAX_CSI2_LEGACY_PORTS; j++) { + /* Port with no sensor can be handled as don't care */ + if (!isys->ici_csi2[j].nlanes) + continue; + if (csi_lanes_to_cfg[i].port_lanes[j] != + isys->ici_csi2[j].nlanes) + break; + } + + if (j < IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + continue; + + isys->legacy_port_cfg = csi_lanes_to_cfg[i].reg_value; + dev_dbg(&isys->adev->dev, "Lane configuration value 0x%x\n,", + isys->legacy_port_cfg); + return 0; + } + dev_err(&isys->adev->dev, "Non supported CSI lane configuration\n,"); + return -EINVAL; +} + +static int isys_determine_csi_combo_lane_configuration(struct ici_isys *isys) +{ + const struct csi_lane_cfg { + u32 reg_value; + int port_lanes[IPU_ISYS_MAX_CSI2_COMBO_PORTS]; + } csi_lanes_to_cfg[] = { + { 0x1f, { 0, 0 } }, /* no sensor defaults here - disable all */ + { 0x10, { 4, 0 } }, + { 0x11, { 3, 0 } }, + { 0x12, { 2, 0 } }, + { 0x13, { 1, 0 } }, + { 0x14, { 3, 1 } }, + { 0x15, { 2, 1 } }, + { 0x16, { 1, 1 } }, + { 0x18, { 2, 2 } }, + { 0x19, { 1, 2 } }, + }; + int i, j; + + for (i = 0; i < ARRAY_SIZE(csi_lanes_to_cfg); i++) { + for (j = 0; j < IPU_ISYS_MAX_CSI2_COMBO_PORTS; j++) { + /* Port with no sensor can be handled as don't care */ + if (!isys->ici_csi2[j + IPU_ISYS_MAX_CSI2_LEGACY_PORTS].nlanes) + continue; + if (csi_lanes_to_cfg[i].port_lanes[j] != + isys->ici_csi2[j + IPU_ISYS_MAX_CSI2_LEGACY_PORTS].nlanes) + break; + } + + if (j < IPU_ISYS_MAX_CSI2_COMBO_PORTS) + continue; + + isys->combo_port_cfg = csi_lanes_to_cfg[i].reg_value; + dev_dbg(&isys->adev->dev, + "Combo port lane configuration value 0x%x\n", + isys->combo_port_cfg); + + return 0; + } + dev_err(&isys->adev->dev, + "Unsupported CSI2-combo lane configuration\n"); + return 0; +} + +#endif +struct isys_i2c_test { + u8 bus_nr; + u16 addr; + struct i2c_client *client; +}; + +static int isys_i2c_test(struct device *dev, void *priv) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct isys_i2c_test *test = priv; + + if (!client) + return 0; + + if (i2c_adapter_id(client->adapter) != test->bus_nr + || client->addr != test->addr) + return 0; + + test->client = client; + + return 0; +} + +static struct i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, + struct ipu_isys_subdev_info *sd_info) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct isys_i2c_test test = { + .bus_nr = i2c_adapter_id(adapter), + .addr = info->addr, + }; + int rval; + + rval = i2c_for_each_dev(&test, isys_i2c_test); + if (rval || !test.client) + return NULL; + return test.client; +} + +static struct ici_ext_subdev *register_acpi_i2c_subdev( + struct ipu_isys_subdev_info *sd_info, struct i2c_client *client) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct ici_ext_subdev *sd; + + request_module(I2C_MODULE_PREFIX "%s", info->type); + + /* ACPI overwrite with platform data */ + client->dev.platform_data = info->platform_data; + /* Change I2C client name to one in temporary platform data */ + strlcpy(client->name, info->type, sizeof(client->name)); + + if (device_reprobe(&client->dev)) + return NULL; + + if (!client->dev.driver) + return NULL; + + if (!try_module_get(client->dev.driver->owner)) + return NULL; + + sd = i2c_get_clientdata(client); + + module_put(client->dev.driver->owner); + + return sd; +} + +static int ext_device_setup_node(void* ipu_data, + struct ici_ext_subdev *sd, + const char* name) +{ + int rval; + struct ici_isys *isys = ipu_data; + sd->node.sd = sd; + sd->node.external = true; + rval = ici_isys_pipeline_node_init( + isys, &sd->node, name, sd->num_pads, sd->pads); + if (rval) + return rval; + sd->num_pads = sd->node.nr_pads; + return 0; +} + +static int isys_complete_ext_device_registration( + struct ici_isys *isys, + struct ici_ext_subdev *sd, + struct ipu_isys_csi2_config *csi2) +{ + int rval; + struct ici_ext_subdev_register sd_register = {0}; + unsigned int i; + + sd_register.ipu_data = isys; + sd_register.sd = sd; + sd_register.setup_node = ext_device_setup_node; + sd_register.create_link = node_pad_create_link; + rval = sd->do_register(&sd_register); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to regsiter external subdev\n"); + return rval; + } + if (csi2) { + for (i = 0; i < NR_OF_CSI2_VC; i++) { + rval = node_pad_create_link(&sd->node, sd->src_pad, + &isys->ici_csi2[csi2->port].asd[i].node, + CSI2_ICI_PAD_SINK, 0); + if (rval) { + dev_warn(&isys->adev->dev, + "can't create link from external node\n"); + return rval; + } + + isys->ici_csi2[csi2->port].nlanes = csi2->nlanes; + isys->ici_csi2[csi2->port].ext_sd = sd; + } + } + return 0; +} + +static int isys_register_ext_subdev(struct ici_isys *isys, + struct ipu_isys_subdev_info *sd_info, + bool acpi_only) +{ + struct i2c_adapter *adapter = + i2c_get_adapter(sd_info->i2c.i2c_adapter_id); + struct ici_ext_subdev *sd; + struct i2c_client *client; + int rval; + + dev_info(&isys->adev->dev, + "creating new i2c subdev for %s (address %2.2x, bus %d)", + sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, + sd_info->i2c.i2c_adapter_id); + + if (!adapter) { + dev_warn(&isys->adev->dev, "can't find adapter\n"); + return -ENOENT; + } + if (sd_info->csi2) { + dev_info(&isys->adev->dev, "sensor device on CSI port: %d\n", + sd_info->csi2->port); + if (sd_info->csi2->port >= IPU_ISYS_MAX_CSI2_PORTS || + !isys->ici_csi2[sd_info->csi2->port].isys) { + dev_warn(&isys->adev->dev, "invalid csi2 port %u\n", + sd_info->csi2->port); + rval = -EINVAL; + goto skip_put_adapter; + } + } else { + dev_info(&isys->adev->dev, "non camera subdevice\n"); + } + + client = isys_find_i2c_subdev(adapter, sd_info); + + if (acpi_only) { + if (!client) { + dev_dbg(&isys->adev->dev, + "Matching ACPI device not found - postpone\n"); + rval = 0; + goto skip_put_adapter; + } + rval = 0; + goto skip_put_adapter; + if (!sd_info->acpiname) { + dev_dbg(&isys->adev->dev, + "No name in platform data\n"); + rval = 0; + goto skip_put_adapter; + } + if (strcmp(dev_name(&client->dev), sd_info->acpiname)) { + dev_dbg(&isys->adev->dev, "Names don't match: %s != %s", + dev_name(&client->dev), sd_info->acpiname); + rval = 0; + goto skip_put_adapter; + } + /* Acpi match found. Continue to reprobe */ + } else if (client) { + dev_dbg(&isys->adev->dev, "Device exists\n"); + rval = 0; + goto skip_put_adapter; + } + else if (sd_info->acpiname) { + dev_dbg(&isys->adev->dev, "ACPI name don't match: %s\n", + sd_info->acpiname); + rval = 0; + goto skip_put_adapter; + } + if (!client) { + dev_info(&isys->adev->dev, + "i2c device not found in ACPI table\n"); + client = i2c_new_device(adapter, + &sd_info->i2c.board_info); + sd = i2c_get_clientdata(client); + } else { + dev_info(&isys->adev->dev, "i2c device found in ACPI table\n"); + sd = register_acpi_i2c_subdev(sd_info, client); + } + + if (!sd) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + rval = -EINVAL; + goto skip_put_adapter; + } + + return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); + +skip_put_adapter: + i2c_put_adapter(adapter); + + return rval; +} + +static int isys_acpi_add_device(struct device *dev, void *priv, + struct ipu_isys_csi2_config *csi2, + bool reprobe) +{ + struct ici_isys *isys = priv; + struct i2c_client *client = i2c_verify_client(dev); + struct ici_ext_subdev *sd; + + if (!client) + return -ENODEV; + + if (reprobe) + if (device_reprobe(&client->dev)) + return -ENODEV; + + if (!client->dev.driver) + return -ENODEV; + + /* Lock the module so we can safely get the v4l2_subdev pointer */ + if (!try_module_get(client->dev.driver->owner)) + return -ENODEV; + + sd = i2c_get_clientdata(client); + module_put(client->dev.driver->owner); + if (!sd) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + return -ENODEV; + } + + return isys_complete_ext_device_registration(isys, sd, csi2); +} + +static void isys_register_ext_subdevs(struct ici_isys *isys) +{ + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + + if (spdata) { + /* Scan spdata first to possibly override ACPI data */ + /* ACPI created devices */ + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info, true); + + /* Scan non-acpi devices */ + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info, false); + } else { + dev_info(&isys->adev->dev, "no subdevice info provided\n"); + } + + /* Handle real ACPI stuff */ + request_module("ipu4-acpi"); + ipu_get_acpi_devices(isys, &isys->adev->dev, + isys_acpi_add_device); +} + +static void isys_unregister_subdevices(struct ici_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + unsigned int i; + + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) { + ici_isys_csi2_be_cleanup(&isys->ici_csi2_be[i]); + } + + for (i = 0; i < tpg->ntpgs; i++) { + ici_isys_tpg_cleanup(&isys->ici_tpg[i]); + } + + for (i = 0; i < csi2->nports; i++) { + ici_isys_csi2_cleanup(&isys->ici_csi2[i]); + } +} + +static int isys_register_subdevices(struct ici_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + + unsigned int i, j, k; + int rval; + + BUG_ON(csi2->nports > IPU_ISYS_MAX_CSI2_PORTS); + BUG_ON(tpg->ntpgs > 2); + + for (i = 0; i < csi2->nports; i++) { + rval = ici_isys_csi2_init( + &isys->ici_csi2[i], isys, + isys->pdata->base + csi2->offsets[i], i); + if (rval) + goto fail; + + isys->isr_csi2_bits |= + IPU_ISYS_UNISPART_IRQ_CSI2(i); + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = ici_isys_tpg_init(&isys->ici_tpg[i], isys, + isys->pdata->base + tpg->offsets[i], + isys->pdata->base + tpg->sels[i], i); + if(rval) + goto fail; + } + + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) { + rval = ici_isys_csi2_be_init(&isys->ici_csi2_be[i], + isys, i); + if (rval) { + goto fail; + } + } + + for (i = 0; i < csi2->nports; i++) { + for (j = 0; j < NR_OF_CSI2_VC; j++ ) { + rval = node_pad_create_link( + &isys->ici_csi2[i].asd[j].node, CSI2_ICI_PAD_SOURCE, + &isys->ici_csi2_be[ICI_BE_RAW].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between csi2 and csi2_be\n"); + goto fail; + } + + for (k = 1; k < NR_OF_CSI2_BE_SOC_STREAMS; k++ ) { + rval = node_pad_create_link( + &isys->ici_csi2[i].asd[j].node, CSI2_ICI_PAD_SOURCE, + &isys->ici_csi2_be[k].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between csi2 and csi2_be soc\n"); + goto fail; + } + } + } + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = node_pad_create_link( + &isys->ici_tpg[i].asd.node, TPG_PAD_SOURCE, + &isys->ici_csi2_be[ICI_BE_RAW].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between tpg and csi2_be\n"); + goto fail; + } + + for (j = 1; j < NR_OF_CSI2_BE_SOC_STREAMS; j++) { + rval = node_pad_create_link( + &isys->ici_tpg[i].asd.node, TPG_PAD_SOURCE, + &isys->ici_csi2_be[j].asd.node, + CSI2_BE_ICI_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between tpg and csi2_be soc\n"); + goto fail; + } + } + } + + return 0; + +fail: + isys_unregister_subdevices(isys); + return rval; +} + +static int isys_register_devices(struct ici_isys *isys) +{ + int rval; + +/* Pipeline device registration */ + DEBUGK("Pipeline device registering...\n"); + rval = pipeline_device_register(&isys->pipeline_dev, isys); + if (rval < 0) { + dev_info(&isys->pipeline_dev.dev, "can't register pipeline device\n"); + return rval; + } + + rval = isys_register_subdevices(isys); + if (rval) + goto out_pipeline_device_unregister; + + isys_register_ext_subdevs(isys); + +// Latest code structure doesnt do these functions. +// let it remain to gaugae impact and then remove. +#if 0 + rval = isys_determine_legacy_csi_lane_configuration(isys); + if (rval) + goto out_isys_unregister_subdevices; + + rval = isys_determine_csi_combo_lane_configuration(isys); + if (rval) + goto out_isys_unregister_subdevices; + +#ifndef CONFIG_PM + ipu_buttress_csi_port_config(isys->adev->isp, + isys->legacy_port_cfg, + isys->combo_port_cfg); +#endif +#endif + return 0; + +#if 0 +out_isys_unregister_subdevices: + isys_unregister_subdevices(isys); +#endif +out_pipeline_device_unregister: + pipeline_device_unregister(&isys->pipeline_dev); + + return rval; +} + +static void isys_unregister_devices(struct ici_isys *isys) +{ + pipeline_device_unregister(&isys->pipeline_dev); + DEBUGK("Pipeline device unregistered\n"); + isys_unregister_subdevices(isys); +} + +static void isys_setup_hw(struct ici_isys *isys) +{ + void __iomem *base = isys->pdata->base; + u32 irqs; + unsigned int i; + + /* Enable irqs for all MIPI busses */ + irqs = IPU_ISYS_UNISPART_IRQ_CSI2(0) | + IPU_ISYS_UNISPART_IRQ_CSI2(1) | + IPU_ISYS_UNISPART_IRQ_CSI2(2) | + IPU_ISYS_UNISPART_IRQ_CSI2(3) | + IPU_ISYS_UNISPART_IRQ_CSI2(4) | + IPU_ISYS_UNISPART_IRQ_CSI2(5); + + irqs |= IPU_ISYS_UNISPART_IRQ_SW; + + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE); + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); + + /* Write CDC FIFO threshold values for isys */ + for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(isys->pdata->ipdata->hw_variant.cdc_fifo_threshold[i], + base + IPU_REG_ISYS_CDC_THRESHOLD(i)); +} + +#ifdef CONFIG_PM +static int isys_runtime_pm_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int ret; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + ipu_trace_restore(dev); + + pm_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); +#if 0 + ipu_buttress_csi_port_config(isp, + isys->legacy_port_cfg, + isys->combo_port_cfg); +#endif + ret = ipu_buttress_start_tsc_sync(isp); + if (ret) + return ret; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 1; + spin_unlock_irqrestore(&isys->power_lock, flags); + + isys_setup_hw(isys); + + return 0; +} + +static int isys_runtime_pm_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 0; + spin_unlock_irqrestore(&isys->power_lock, flags); + + ipu_trace_stop(dev); + mutex_lock(&isys->mutex); + isys->reset_needed = false; + mutex_unlock(&isys->mutex); + + pm_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + return 0; +} + +static int isys_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + + /* If stream is open, refuse to suspend */ + if (isys->stream_opened) + return -EBUSY; + + return 0; +} + +static int isys_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops isys_pm_ops = { + .runtime_suspend = isys_runtime_pm_suspend, + .runtime_resume = isys_runtime_pm_resume, + .suspend = isys_suspend, + .resume = isys_resume, +}; +#define ISYS_PM_OPS (&isys_pm_ops) +#else +#define ISYS_PM_OPS NULL +#endif + +static void isys_remove(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + struct ipu_device *isp = adev->isp; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + dev_info(&adev->dev, "removed\n"); + debugfs_remove_recursive(isys->debugfsdir); + + ipu_trace_uninit(&adev->dev); + isys_unregister_devices(isys); + pm_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); + ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); + release_firmware(isys->fw); + } + + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, attrs); +#endif +} + +static int intel_ipu4_isys_icache_prefetch_get(void *data, u64 *val) +{ + struct ici_isys *isys = data; + + *val = isys->icache_prefetch; + return 0; +} + +static int intel_ipu4_isys_icache_prefetch_set(void *data, u64 val) +{ + struct ici_isys *isys = data; + + if (val != !!val) + return -EINVAL; + + isys->icache_prefetch = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops, + intel_ipu4_isys_icache_prefetch_get, + intel_ipu4_isys_icache_prefetch_set, + "%llu\n"); + +static int intel_ipu4_isys_init_debugfs(struct ici_isys *isys) +{ + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir); + if (IS_ERR(dir)) + return -ENOMEM; + + file = debugfs_create_file("icache_prefetch", S_IRUSR | S_IWUSR, + dir, isys, + &isys_icache_prefetch_fops); + if (IS_ERR(file)) + goto err; + + isys->debugfsdir = dir; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +static int alloc_fw_msg_buffers(struct ici_isys *isys, int amount) +{ + dma_addr_t dma_addr; + struct isys_fw_msgs *addr; + unsigned int i; + unsigned long flags; + + for (i = 0; i < amount; i++) { + addr = dma_alloc_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + &dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!addr) + break; + addr->dma_addr = dma_addr; + + spin_lock_irqsave(&isys->listlock, flags); + list_add(&addr->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); + } + if (i == amount) + return 0; + spin_lock_irqsave(&isys->listlock, flags); + while (!list_empty(&isys->framebuflist)) { + addr = list_first_entry(&isys->framebuflist, + struct isys_fw_msgs, head); + list_del(&addr->head); + spin_unlock_irqrestore(&isys->listlock, flags); + dma_free_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + addr, addr->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + spin_lock_irqsave(&isys->listlock, flags); + } + spin_unlock_irqrestore(&isys->listlock, flags); + return -ENOMEM; +} + +static int isys_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = dev_get_drvdata(adev->iommu); + struct ici_isys *isys; + struct ipu_device *isp = adev->isp; + const struct firmware *uninitialized_var(fw); + int rval = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + trace_printk("B|%d|TMWK\n", current->pid); + + /* Has the domain been attached? */ + if (!mmu || !isp->pkg_dir_dma_addr) { + trace_printk("E|TMWK\n"); + return -EPROBE_DEFER; + } + + isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL); + if (!isys) { + trace_printk("E|TMWK\n"); + return -ENOMEM; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + isys->short_packet_trace_buffer = dma_alloc_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + &isys->short_packet_trace_buffer_dma_addr, GFP_KERNEL, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; + isys->short_packet_trace_buffer = dma_alloc_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + &isys->short_packet_trace_buffer_dma_addr, GFP_KERNEL, attrs); +#endif + if (!isys->short_packet_trace_buffer) + return -ENOMEM; + + isys->adev = adev; + isys->pdata = adev->pdata; + + INIT_LIST_HEAD(&isys->requests); + + spin_lock_init(&isys->lock); + spin_lock_init(&isys->power_lock); + isys->power = 0; + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); + mutex_init(&isys->lib_mutex); + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); + INIT_LIST_HEAD(&isys->framebuflist_fw); + + dev_info(&adev->dev, "isys probe %p %p\n", adev, &adev->dev); + ipu_bus_set_drvdata(adev, isys); + + isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN; + isys->icache_prefetch = is_ipu_hw_bxtp_e0(isp); + +#ifndef CONFIG_PM + isys_setup_hw(isys); +#endif + + if (!isp->secure_mode) { + fw = isp->cpd_fw; + + rval = ipu_buttress_map_fw_image( + adev, fw, &isys->fw_sgt); + if (rval) + goto release_firmware; + + isys->pkg_dir = ipu_cpd_create_pkg_dir( + adev, isp->cpd_fw->data, + sg_dma_address(isys->fw_sgt.sgl), + &isys->pkg_dir_dma_addr, + &isys->pkg_dir_size); + if (isys->pkg_dir == NULL) { + rval = -ENOMEM; + goto remove_shared_buffer; + } + } + + /* Debug fs failure is not fatal. */ + intel_ipu4_isys_init_debugfs(isys); + + ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev, + isys_trace_blocks); + + pm_qos_add_request(&isys->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + alloc_fw_msg_buffers(isys, 20); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + rval = isys_register_devices(isys); + if (rval) + goto out_remove_pkg_dir_shared_buffer; + + trace_printk("E|TMWK\n"); + return 0; + +out_remove_pkg_dir_shared_buffer: + if (!isp->secure_mode) + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); +remove_shared_buffer: + if (!isp->secure_mode) + ipu_buttress_unmap_fw_image( + adev, &isys->fw_sgt); +release_firmware: + if (!isp->secure_mode) + release_firmware(isys->fw); + ipu_trace_uninit(&adev->dev); + + trace_printk("E|TMWK\n"); + + mutex_destroy(&isys->mutex); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->lib_mutex); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, &attrs); +#else + dma_free_attrs(&adev->dev, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, attrs); +#endif + return rval; +} + +struct fwmsg { + int type; + char *msg; + bool valid_ts; +}; + +static const struct fwmsg fw_msg[] = { + { IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + "STREAM_START_AND_CAPTURE_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + "STREAM_START_AND_CAPTURE_DONE", 1 }, + { IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1 }, + { IA_CSS_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1 }, + { IA_CSS_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1 }, + { -1, "UNKNOWN MESSAGE", 0 }, +}; + +static int resp_type_to_index(int type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fw_msg); i++) + if (fw_msg[i].type == type) + return i; + + return i - 1; +} + + +static u64 extract_time_from_short_packet_msg( + struct ici_isys_csi2_monitor_message *msg) + +{ + u64 time_h = msg->timestamp_h << 14; + u64 time_l = msg->timestamp_l; + u64 time_h_ovl = time_h & 0xc000; + u64 time_h_h = time_h & (~0xffff); + + /* Fix possible roll overs. */ + if (time_h_ovl >= (time_l & 0xc000)) + return time_h_h | time_l; + else + return (time_h_h - 0x10000) | time_l; +} +static u64 tunit_time_to_us(struct ici_isys *isys, u64 time) +{ + struct ipu_bus_device *adev = + to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 1000000; + return time / isys_clk; +} + +static u64 tsc_time_to_tunit_time(struct ici_isys *isys, + u64 tsc_base, u64 tunit_base, u64 tsc_time) +{ + struct ipu_bus_device *adev = + to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 100000; + u64 tsc_clk = IPU_BUTTRESS_TSC_CLK / 100000; + + return (tsc_time - tsc_base) * isys_clk / tsc_clk + tunit_base; +} + +static int isys_isr_one_ici(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + struct ia_css_isys_resp_info resp; + struct ici_isys_pipeline *pipe; + u64 ts; + int rval; + unsigned int i; + + if (!isys->fwcom) + return 0; + + rval = ipu_lib_call_notrace_unlocked(stream_handle_response, + isys, &resp); + if (rval < 0) + return rval; + + ts = (u64)resp.timestamp[1] << 32 | resp.timestamp[0]; + + + if (resp.error == IA_CSS_ISYS_ERROR_STREAM_IN_SUSPENSION) + /* Suspension is kind of special case: not enough buffers */ + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n", + resp.type, + fw_msg[resp_type_to_index(resp.type)].msg, + resp.stream_handle, + resp.error_details, + fw_msg[resp_type_to_index(resp.type)].valid_ts ? + ts : 0, resp.pin_id); + else if (resp.error) + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n", + resp.type, + fw_msg[resp_type_to_index(resp.type)].msg, + resp.stream_handle, + resp.error, resp.error_details, + fw_msg[resp_type_to_index(resp.type)].valid_ts ? + ts : 0, resp.pin_id); + else + dev_dbg(&adev->dev, + "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n", + resp.type, + fw_msg[resp_type_to_index(resp.type)].msg, + resp.stream_handle, + fw_msg[resp_type_to_index(resp.type)].valid_ts ? + ts : 0, resp.pin_id); + + if (resp.stream_handle >= INTEL_IPU4_ISYS_MAX_STREAMS) { + dev_err(&adev->dev, "bad stream handle %u\n", + resp.stream_handle); + return 0; + } + + pipe = isys->ici_pipes[resp.stream_handle]; + if (!pipe) { + dev_err(&adev->dev, "no pipeline for stream %u\n", + resp.stream_handle); + return 0; + } + pipe->error = resp.error; + + switch (resp.type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + complete(&pipe->stream_open_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + complete(&pipe->stream_close_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK: + complete(&pipe->stream_start_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + complete(&pipe->stream_start_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK: + complete(&pipe->stream_stop_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + complete(&pipe->stream_stop_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY: + if (resp.pin_id < IPU_ISYS_OUTPUT_PINS && + pipe->output_pins[resp.pin_id].pin_ready) + pipe->output_pins[resp.pin_id].pin_ready(pipe, &resp); + else + dev_err(&adev->dev, + "%d:No data pin ready handler for pin id %d\n", + resp.stream_handle, resp.pin_id); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + complete(&pipe->capture_ack_completion); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + + if(pipe->interlaced && pipe->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + unsigned int i = pipe->short_packet_trace_index; + bool msg_matched = false; + unsigned int monitor_id; + + if(pipe->csi2->index>= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + monitor_id = TRACE_REG_CSI2_3PH_TM_MONITOR_ID; + else + monitor_id = TRACE_REG_CSI2_TM_MONITOR_ID; + + dma_sync_single_for_cpu(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + do { + struct ici_isys_csi2_monitor_message msg = isys->short_packet_trace_buffer[i]; + u64 sof_time = tsc_time_to_tunit_time(isys, + isys->tsc_timer_base, isys->tunit_timer_base, + (u64) resp.timestamp[1] << 32 | resp.timestamp[0]); + u64 trace_time = extract_time_from_short_packet_msg(&msg); + u64 delta_time_us = tunit_time_to_us(isys, + (sof_time > trace_time) ? + sof_time - trace_time : + trace_time - sof_time); + + i = (i + 1) % IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER; + if (msg.cmd == TRACE_REG_CMD_TYPE_D64MTS && + msg.monitor_id == monitor_id && + msg.fs == 1 && + msg.port == pipe->csi2->index && + msg.vc == pipe->vc && + delta_time_us < IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT) { + pipe->cur_field = (msg.sequence % 2) ? + ICI_FIELD_TOP : ICI_FIELD_BOTTOM; + pipe->short_packet_trace_index = i; + msg_matched = true; + dev_dbg(&isys->adev->dev,"Interlaced field ready. field = %d\n", + pipe->cur_field); + break; + } + } while (i != pipe->short_packet_trace_index); + + if (!msg_matched) + /* We have walked through the whole buffer. */ + dev_dbg(&isys->adev->dev,"No matched trace message found.\n"); + } + + for (i = 0; i < INTEL_IPU4_NUM_CAPTURE_DONE; i++) + if (pipe->capture_done[i]) + pipe->capture_done[i](pipe, &resp); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF: + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF: + break; + default: + dev_err(&adev->dev, "%d:unknown response type %u\n", + resp.stream_handle, resp.type); + break; + } + + return 0; +} + +static irqreturn_t isys_isr(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + status = readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + do { + writel(status, isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + + if (isys->isr_csi2_bits & status) { + unsigned int i; + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + if (status & + IPU_ISYS_UNISPART_IRQ_CSI2(i)){ + + ici_isys_csi2_isr( + &isys->ici_csi2[i]); + } + } + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + + /* + * Handle a single FW event per checking the CSI-2 + * receiver SOF status. This is done in order to avoid + * the case where events arrive to the event queue and + * one of them is a SOF event which then could be + * handled before the SOF interrupt. This would pose + * issues in sequence numbering which is based on SOF + * interrupts, always assumed to arrive before FW SOF + * events. + */ + if (status & IPU_ISYS_UNISPART_IRQ_SW && + !isys_isr_one_ici(adev)) + status = IPU_ISYS_UNISPART_IRQ_SW; + else + status = 0; + + status |= readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + } while (status & (isys->isr_csi2_bits + | IPU_ISYS_UNISPART_IRQ_SW)); + + spin_unlock(&isys->power_lock); + return IRQ_HANDLED; +} + +static void isys_isr_poll_ici(struct ipu_bus_device *adev) +{ + struct ici_isys *isys = ipu_bus_get_drvdata(adev); + + if (!isys->fwcom) { + dev_dbg(&isys->adev->dev, + "got interrupt but device not configured yet\n"); + return; + } + + while (!isys_isr_one_ici(adev)); +} + +int intel_ipu4_isys_isr_run_ici(void *ptr) +{ + struct ici_isys *isys = ptr; + + while (!kthread_should_stop()) { + usleep_range(500, 1000); + if (isys->ici_stream_opened) + isys_isr_poll_ici(isys->adev); + } + + return 0; +} + +static struct ipu_bus_driver isys_driver = { + .probe = isys_probe, + .remove = isys_remove, + .isr = isys_isr, + .wanted = IPU_ISYS_NAME, + .drv = { + .name = IPU_ISYS_NAME, + .owner = THIS_MODULE, + .pm = ISYS_PM_OPS, + }, +}; + +module_ipu_bus_driver(isys_driver); + +MODULE_AUTHOR("Scott Kennedy "); +MODULE_AUTHOR("Marcin Mozejko "); +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Jouni Högander "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Intel intel_ipu4 ici input system driver"); + +#endif /* ICI_ENABLED */ + diff --git a/drivers/media/pci/intel/ici/ici-isys.h b/drivers/media/pci/intel/ici/ici-isys.h new file mode 100644 index 000000000000..aec81a10936b --- /dev/null +++ b/drivers/media/pci/intel/ici/ici-isys.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef ICI_ISYS_H +#define ICI_ISYS_H + +#define ICI_ENABLED + +#ifdef ICI_ENABLED +#define IPU4_DEBUG + +#include +#include + +#include "ipu.h" +#include "ipu-pdata.h" +#include "ipu-fw-isys.h" +#include "ici-isys-stream.h" +#include "ici-isys-csi2.h" +#include "ici-isys-csi2-be.h" +#include "ici-isys-pipeline-device.h" +#include "ici-isys-tpg.h" +#include "ipu-platform.h" +#include "ipu4/ipu-platform-isys.h" +#include "ipu4/ipu-platform-regs.h" + +#define IPU_ISYS_ENTITY_PREFIX "Intel IPU4" + +#define IPU_ISYS_2600_MEM_LINE_ALIGN 64 + +#define IPU_ISYS_MAX_CSI2_PORTS IPU_ISYS_MAX_CSI2_LEGACY_PORTS+IPU_ISYS_MAX_CSI2_COMBO_PORTS +/* for TPG */ +#define INTEL_IPU4_ISYS_FREQ_BXT_FPGA 25000000UL +#define INTEL_IPU4_ISYS_FREQ_BXT 533000000UL + +#define IPU_ISYS_SIZE_RECV_QUEUE 40 +#define IPU_ISYS_SIZE_SEND_QUEUE 40 +#define IPU_ISYS_NUM_RECV_QUEUE 1 + +/* + * Device close takes some time from last ack message to actual stopping + * of the SP processor. As long as the SP processor runs we can't proceed with + * clean up of resources. + */ +#define IPU_ISYS_OPEN_TIMEOUT_US 1000 +#define IPU_ISYS_OPEN_RETRY 1000 +#define IPU_ISYS_TURNOFF_DELAY_US 1000 +#define IPU_ISYS_TURNOFF_TIMEOUT 1000 +#define IPU_LIB_CALL_TIMEOUT_MS 2000 +#define IPU_LIB_CALL_TIMEOUT_JIFFIES \ + msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) + +#define INTEL_IPU4_ISYS_CSI2_LONG_PACKET_HEADER_SIZE 32 +#define INTEL_IPU4_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE 32 + +/* + * For B0/1: FW support max 6 streams + */ +#define INTEL_IPU4_ISYS_MAX_STREAMS 6 + + +#define IPU_ISYS_MIN_WIDTH 1U +#define IPU_ISYS_MIN_HEIGHT 1U +#define IPU_ISYS_MAX_WIDTH 16384U +#define IPU_ISYS_MAX_HEIGHT 16384U + +struct task_struct; + +/* + * struct ici_isys + * + * @media_dev: Media device + * @v4l2_dev: V4L2 device + * @adev: ISYS ipu4 bus device + * @power: Is ISYS powered on or not? + * @isr_bits: Which bits does the ISR handle? + * @power_lock: Serialise access to power (power state in general) + * @lock: serialise access to pipes + * @pipes: pipelines per stream ID + * @fwcom: fwcom library private pointer + * @line_align: line alignment in memory + * @legacy_port_cfg: lane mappings for legacy CSI-2 ports + * @combo_port_cfg: lane mappings for D/C-PHY ports + * @isr_thread: for polling for events if interrupt delivery isn't available + * @reset_needed: Isys requires d0i0->i3 transition + * @video_opened: total number of opened file handles on video nodes + * @mutex: serialise access isys video open/release related operations + * @stream_mutex: serialise stream start and stop, queueing requests + * @pdata: platform data pointer + * @csi2: CSI-2 receivers + * @tpg: test pattern generators + * @csi2_be: CSI-2 back-ends + * @isa: Input system accelerator + * @fw: ISYS firmware binary (unsecure firmware) + * @fw_sgt: fw scatterlist + * @pkg_dir: host pointer to pkg_dir + * @pkg_dir_dma_addr: I/O virtual address for pkg_dir + * @pkg_dir_size: size of pkg_dir in bytes + */ +struct ici_isys { + struct ipu_bus_device *adev; + + int power; + spinlock_t power_lock; + u32 isr_csi2_bits; + spinlock_t lock; + struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS]; + void *fwcom; + unsigned int line_align; + u32 legacy_port_cfg; + u32 combo_port_cfg; + struct task_struct *isr_thread; + bool reset_needed; + bool icache_prefetch; + unsigned int video_opened; + unsigned int stream_opened; + struct dentry *debugfsdir; + struct mutex mutex; + struct mutex stream_mutex; + struct mutex lib_mutex; + + struct ipu_isys_pdata *pdata; + + struct ici_isys_pipeline_device pipeline_dev; + + struct ici_isys_pipeline *ici_pipes[IPU_ISYS_MAX_STREAMS]; + struct ici_isys_csi2 ici_csi2[IPU_ISYS_MAX_CSI2_PORTS]; + struct ici_isys_tpg ici_tpg[2]; // TODO map to a macro + struct ici_isys_csi2_be ici_csi2_be[NR_OF_CSI2_BE_SOC_STREAMS]; + unsigned int ici_stream_opened; + + const struct firmware *fw; + struct sg_table fw_sgt; + + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned pkg_dir_size; + + struct list_head requests; + struct pm_qos_request pm_qos; + struct ici_isys_csi2_monitor_message *short_packet_trace_buffer; + dma_addr_t short_packet_trace_buffer_dma_addr; + u64 tsc_timer_base; + u64 tunit_timer_base; + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +}; + +int intel_ipu4_isys_isr_run_ici(void *ptr); + +struct isys_fw_msgs { + union { + u64 dummy; + struct ipu_fw_isys_frame_buff_set_abi frame; + struct ipu_fw_isys_stream_cfg_data_abi stream; + } fw_msg; + struct list_head head; + dma_addr_t dma_addr; +}; + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__);\ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func);\ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__);\ + \ + rval; \ + }) + +#undef DEBUGK +#ifdef IPU4_DEBUG /* Macro for printing debug infos */ +# ifdef __KERNEL__ /* for kernel space */ +# define DEBUGK(fmt, args...) printk(KERN_DEBUG "IPU4: " fmt, ## args) +# else /* for user space */ +# define DEBUGK(fmt, args...) fprintf(stderr, fmt, ## args) +# endif +#else /* no debug prints */ +# define DEBUGK(fmt, args...) +#endif + +#else /* ICI_ENABLED */ +#pragma message "IPU ICI version is DISABLED." +#endif /* ICI_ENABLED */ + +#endif /* ICI_ISYS_H */ diff --git a/drivers/media/pci/intel/ici/libintel-ipu4_ici.c b/drivers/media/pci/intel/ici/libintel-ipu4_ici.c new file mode 100644 index 000000000000..b4fc334ed191 --- /dev/null +++ b/drivers/media/pci/intel/ici/libintel-ipu4_ici.c @@ -0,0 +1,404 @@ +// SPDX-License_Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include "ici/ici-isys.h" +#include "ipu-wrapper.h" +#include + +#include "ipu-platform.h" + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__); \ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func); \ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__); \ + \ + rval; \ + }) + +static int wrapper_init_done; + +int ipu_fw_isys_close(struct ici_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_close); + +int ipu_fw_isys_init(struct ici_isys *isys, + unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + unsigned int i; + + struct ia_css_isys_device_cfg_data isys_cfg = { + .driver_sys = { + .ssid = ISYS_SSID, + .mmid = ISYS_MMID, + .num_send_queues = clamp_t( + unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS), + .num_recv_queues = IPU_ISYS_NUM_RECV_QUEUE, + .send_queue_size = IPU_ISYS_SIZE_SEND_QUEUE, + .recv_queue_size = IPU_ISYS_SIZE_RECV_QUEUE, + .icache_prefetch = isys->icache_prefetch, + }, + }; + struct device *dev = &isys->adev->dev; + int rval; + + if (!wrapper_init_done) { + wrapper_init_done = true; + ipu_wrapper_init(ISYS_MMID, &isys->adev->dev, + isys->pdata->base); + } + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < min(IPU_NOF_SRAM_BLOCKS_MAX, NOF_SRAM_BLOCKS_MAX); i++) { + if (i < isys_cfg.driver_sys.num_send_queues) + isys_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + isys_cfg.driver_sys.num_send_queues; + else + isys_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + rval = -ia_css_isys_device_open(&isys->fwcom, &isys_cfg); + if (rval < 0) { + dev_err(dev, "isys device open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_lib_call(device_open_ready, isys); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys device open ready failed %d\n", rval); + ipu_fw_isys_close(isys); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_init); + +void ipu_fw_isys_cleanup(struct ici_isys *isys) +{ + ipu_lib_call(device_release, isys, 1); + isys->fwcom = NULL; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_cleanup); + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp( + void *context, unsigned int queue, + struct ipu_fw_isys_resp_info_abi *response) +{ + struct ia_css_isys_resp_info apiresp; + int rval; + + rval = -ia_css_isys_stream_handle_response(context, &apiresp); + if (rval < 0) + return NULL; + + response->buf_id = 0; + response->type = apiresp.type; + response->timestamp[0] = apiresp.timestamp[0]; + response->timestamp[1] = apiresp.timestamp[1]; + response->stream_handle = apiresp.stream_handle; + response->error_info.error = apiresp.error; + response->error_info.error_details = apiresp.error_details; + response->pin.out_buf_id = apiresp.pin.out_buf_id; + response->pin.addr = apiresp.pin.addr; + response->pin_id = apiresp.pin_id; + response->process_group_light.param_buf_id = + apiresp.process_group_light.param_buf_id; + response->process_group_light.addr = + apiresp.process_group_light.addr; + response->acc_id = apiresp.acc_id; +#ifdef IPU_OTF_SUPPORT + response->frame_counter = apiresp.frame_counter; + response->written_direct = apiresp.written_direct; +#endif + + return response; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_get_resp); + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + /* Nothing to do here really */ +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_put_resp); + +int ipu_fw_isys_simple_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + int rval = -1; + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_START: + rval = ipu_lib_call(stream_start, isys, stream_handle, + NULL); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH: + rval = ipu_lib_call(stream_flush, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_STOP: + rval = ipu_lib_call(stream_stop, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE: + rval = ipu_lib_call(stream_close, isys, stream_handle); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_simple_cmd); + +static void resolution_abi_to_api(const struct ipu_fw_isys_resolution_abi *abi, + struct ia_css_isys_resolution *api) +{ + api->width = abi->width; + api->height = abi->height; +} + +static void output_pin_payload_abi_to_api( + struct ipu_fw_isys_output_pin_payload_abi *abi, + struct ia_css_isys_output_pin_payload *api) +{ + api->out_buf_id = abi->out_buf_id; + api->addr = abi->addr; +} + +static void output_pin_info_abi_to_api( + struct ipu_fw_isys_output_pin_info_abi *abi, + struct ia_css_isys_output_pin_info *api) +{ + api->input_pin_id = abi->input_pin_id; + resolution_abi_to_api(&abi->output_res, &api->output_res); + api->stride = abi->stride; + api->pt = abi->pt; + api->watermark_in_lines = abi->watermark_in_lines; + api->payload_buf_size = abi->payload_buf_size; + api->send_irq = abi->send_irq; + api->ft = abi->ft; +#ifdef IPU_OTF_SUPPORT + api->link_id = abi->link_id; +#endif + api->reserve_compression = abi->reserve_compression; +} + +static void param_pin_abi_to_api(struct ipu_fw_isys_param_pin_abi *abi, + struct ia_css_isys_param_pin *api) +{ + api->param_buf_id = abi->param_buf_id; + api->addr = abi->addr; +} + +static void input_pin_info_abi_to_api( + struct ipu_fw_isys_input_pin_info_abi *abi, + struct ia_css_isys_input_pin_info *api) +{ + resolution_abi_to_api(&abi->input_res, &api->input_res); + api->dt = abi->dt; + api->mipi_store_mode = abi->mipi_store_mode; + api->mapped_dt = abi->mapped_dt; +} + +static void isa_cfg_abi_to_api(const struct ipu_fw_isys_isa_cfg_abi *abi, + struct ia_css_isys_isa_cfg *api) +{ + unsigned int i; + + for (i = 0; i < min(N_IPU_FW_ISYS_RESOLUTION_INFO, + N_IA_CSS_ISYS_RESOLUTION_INFO); i++) + resolution_abi_to_api(&abi->isa_res[i], &api->isa_res[i]); + + api->blc_enabled = abi->cfg.blc; + api->lsc_enabled = abi->cfg.lsc; + api->dpc_enabled = abi->cfg.dpc; + api->downscaler_enabled = abi->cfg.downscaler; + api->awb_enabled = abi->cfg.awb; + api->af_enabled = abi->cfg.af; + api->ae_enabled = abi->cfg.ae; + api->paf_type = abi->cfg.paf; + api->send_irq_stats_ready = abi->cfg.send_irq_stats_ready; + api->send_resp_stats_ready = abi->cfg.send_irq_stats_ready; +} + +static void cropping_abi_to_api(struct ipu_fw_isys_cropping_abi *abi, + struct ia_css_isys_cropping *api) +{ + api->top_offset = abi->top_offset; + api->left_offset = abi->left_offset; + api->bottom_offset = abi->bottom_offset; + api->right_offset = abi->right_offset; +} + +static void stream_cfg_abi_to_api(struct ipu_fw_isys_stream_cfg_data_abi *abi, + struct ia_css_isys_stream_cfg_data *api) +{ + unsigned int i; + + api->src = abi->src; + api->vc = abi->vc; + api->isl_use = abi->isl_use; + api->compfmt = abi->compfmt; + isa_cfg_abi_to_api(&abi->isa_cfg, &api->isa_cfg); + for (i = 0; i < min(N_IPU_FW_ISYS_CROPPING_LOCATION, + N_IA_CSS_ISYS_CROPPING_LOCATION); i++) + cropping_abi_to_api(&abi->crop[i], &api->crop[i]); + + api->send_irq_sof_discarded = abi->send_irq_sof_discarded; + api->send_irq_eof_discarded = abi->send_irq_eof_discarded; + api->send_resp_sof_discarded = abi->send_irq_sof_discarded; + api->send_resp_eof_discarded = abi->send_irq_eof_discarded; + api->nof_input_pins = abi->nof_input_pins; + api->nof_output_pins = abi->nof_output_pins; + for (i = 0; i < abi->nof_input_pins; i++) + input_pin_info_abi_to_api(&abi->input_pins[i], + &api->input_pins[i]); + + for (i = 0; i < abi->nof_output_pins; i++) + output_pin_info_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); +} + +static void frame_buff_set_abi_to_api( + struct ipu_fw_isys_frame_buff_set_abi *abi, + struct ia_css_isys_frame_buff_set *api) +{ + int i; + + for (i = 0; i < min(IPU_MAX_OPINS, MAX_OPINS); i++) + output_pin_payload_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); + + param_pin_abi_to_api(&abi->process_group_light, + &api->process_group_light); + + api->send_irq_sof = abi->send_irq_sof; + api->send_irq_eof = abi->send_irq_eof; +} + +int ipu_fw_isys_complex_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, + enum ipu_fw_isys_send_type send_type) +{ + union { + struct ia_css_isys_stream_cfg_data stream_cfg; + struct ia_css_isys_frame_buff_set buf; + } param; + int rval = -1; + + memset(¶m, 0, sizeof(param)); + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_capture_indication, + isys, stream_handle, ¶m.buf); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN: + stream_cfg_abi_to_api(cpu_mapped_buf, ¶m.stream_cfg); + rval = ipu_lib_call(stream_open, isys, stream_handle, + ¶m.stream_cfg); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_start, isys, stream_handle, + ¶m.buf); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_complex_cmd); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu library"); +EXPORT_SYMBOL_GPL(ia_css_isys_device_open); +EXPORT_SYMBOL_GPL(ia_css_isys_device_open_ready); +EXPORT_SYMBOL_GPL(ia_css_isys_device_close); +EXPORT_SYMBOL_GPL(ia_css_isys_device_release); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_open); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_close); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_start); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_stop); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_flush); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_capture_indication); +EXPORT_SYMBOL_GPL(ia_css_isys_stream_handle_response); + diff --git a/drivers/media/pci/intel/ipu-bus.c b/drivers/media/pci/intel/ipu-bus.c new file mode 100644 index 000000000000..a50b6ab5a4b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu-bus.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-platform.h" +#include "ipu-dma.h" +#include "ipu-mmu.h" + +#ifdef CONFIG_PM +static struct bus_type ipu_bus; + +static int bus_pm_suspend_child_dev(struct device *dev, void *p) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct device *parent = (struct device *)p; + + if (!ipu_bus_get_drvdata(adev)) + return 0; /* Device not attached to any driver yet */ + + if (dev->parent != parent || adev->ctrl) + return 0; + + return pm_generic_runtime_suspend(dev); +} + +static int bus_pm_runtime_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + int rval; + + if (!adev->ctrl) { + dev_dbg(dev, "has no buttress control info, bailing out\n"); + return 0; + } + + rval = bus_for_each_dev(&ipu_bus, NULL, dev, bus_pm_suspend_child_dev); + if (rval) { + dev_err(dev, "failed to suspend child device\n"); + return rval; + } + + rval = pm_generic_runtime_suspend(dev); + if (rval) + return rval; + + rval = ipu_buttress_power(dev, adev->ctrl, false); + dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval); + if (!rval) + return 0; + + dev_err(dev, "power down failed!\n"); + + /* Powering down failed, attempt to resume device now */ + rval = pm_generic_runtime_resume(dev); + if (!rval) + return -EBUSY; + + return -EIO; +} + +static int bus_pm_resume_child_dev(struct device *dev, void *p) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct device *parent = (struct device *)p; + int r; + + if (!ipu_bus_get_drvdata(adev)) + return 0; /* Device not attached to any driver yet */ + + if (dev->parent != parent || adev->ctrl) + return 0; + + mutex_lock(&adev->resume_lock); + r = pm_generic_runtime_resume(dev); + mutex_unlock(&adev->resume_lock); + return r; +} + +static int bus_pm_runtime_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + int rval; + + if (!adev->ctrl) { + dev_dbg(dev, "has no buttress control info, bailing out\n"); + return 0; + } + + rval = ipu_buttress_power(dev, adev->ctrl, true); + dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval); + if (rval) + return rval; + + rval = pm_generic_runtime_resume(dev); + dev_dbg(dev, "%s: resume %d\n", __func__, rval); + if (rval) + goto out_err; + + /* + * It needs to be ensured that IPU child devices' resume/suspend are + * called only when the child devices' power is turned on/off by the + * parent device here. Therefore, children's suspend/resume are called + * from here, because that is the only way to guarantee it. + */ + rval = bus_for_each_dev(&ipu_bus, NULL, dev, bus_pm_resume_child_dev); + if (rval) { + dev_err(dev, "failed to resume child device - reset it\n"); + + rval = pm_generic_runtime_suspend(dev); + dev_dbg(dev, "%s: suspend %d\n", __func__, rval); + + rval = ipu_buttress_power(dev, adev->ctrl, false); + dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval); + if (rval) + return rval; + + usleep_range(1000, 1100); + + rval = ipu_buttress_power(dev, adev->ctrl, true); + dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval); + if (rval) + return rval; + + rval = pm_generic_runtime_resume(dev); + dev_dbg(dev, "%s: re-resume %d\n", __func__, rval); + if (rval) + goto out_err; + + rval = bus_for_each_dev(&ipu_bus, NULL, dev, + bus_pm_resume_child_dev); + + if (rval) { + dev_err(dev, "resume retry failed\n"); + goto out_err; + } + } + + return 0; + +out_err: + if (adev->ctrl) + ipu_buttress_power(dev, adev->ctrl, false); + + return -EBUSY; +} + +static const struct dev_pm_ops ipu_bus_pm_ops = { + .runtime_suspend = bus_pm_runtime_suspend, + .runtime_resume = bus_pm_runtime_resume, +}; + +#define IPU_BUS_PM_OPS (&ipu_bus_pm_ops) +#else +#define IPU_BUS_PM_OPS NULL +#endif + +static int ipu_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ipu_bus_driver *adrv = to_ipu_bus_driver(drv); + + dev_dbg(dev, "bus match: \"%s\" --- \"%s\"\n", dev_name(dev), + adrv->wanted); + + return !strncmp(dev_name(dev), adrv->wanted, strlen(adrv->wanted)); +} + +static struct ipu_dma_mapping *alloc_dma_mapping(struct device *dev) +{ + struct ipu_dma_mapping *dmap; + + dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); + if (!dmap) + return NULL; + + dmap->domain = iommu_domain_alloc(dev->bus); + if (!dmap->domain) { + kfree(dmap); + return NULL; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + init_iova_domain(&dmap->iovad, dma_get_mask(dev) >> PAGE_SHIFT); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + init_iova_domain(&dmap->iovad, SZ_4K, 1, + dma_get_mask(dev) >> PAGE_SHIFT); +#else + init_iova_domain(&dmap->iovad, SZ_4K, 1); +#endif + + kref_init(&dmap->ref); + + pr_debug("alloc mapping\n"); + + iova_cache_get(); + + return dmap; +} + +static void free_dma_mapping(void *ptr) +{ + struct ipu_mmu *mmu = ptr; + struct ipu_dma_mapping *dmap = mmu->dmap; + + iommu_domain_free(dmap->domain); + mmu->set_mapping(mmu, NULL); + iova_cache_put(); + put_iova_domain(&dmap->iovad); + kfree(dmap); +} + +static struct iommu_group *ipu_bus_get_group(struct device *dev) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct iommu_group *group; + struct ipu_dma_mapping *dmap; + + if (!mmu) { + dev_err(dev, "%s: no iommu available\n", __func__); + return NULL; + } + + group = iommu_group_get(dev); + if (group) + return group; + + group = iommu_group_alloc(); + if (!group) { + dev_err(dev, "%s: can't alloc iommu group\n", __func__); + return NULL; + } + + dmap = alloc_dma_mapping(dev); + if (!dmap) { + dev_err(dev, "%s: can't alloc dma mapping\n", __func__); + iommu_group_put(group); + return NULL; + } + + iommu_group_set_iommudata(group, mmu, free_dma_mapping); + + /* + * Turn mmu on and off synchronously. Otherwise it may still be on + * at psys / isys probing phase and that may cause problems on + * develoment environments. + */ + pm_runtime_get_sync(aiommu); + mmu->set_mapping(mmu, dmap); + pm_runtime_put_sync(aiommu); + + return group; +} + +static int ipu_bus_probe(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver); + struct iommu_group *group = NULL; + int rval; + + dev_dbg(dev, "bus probe dev %s\n", dev_name(dev)); + + if (adev->iommu) { + dev_dbg(dev, "iommu %s\n", dev_name(adev->iommu)); + + group = ipu_bus_get_group(dev); + if (!group) + return -EPROBE_DEFER; + + rval = iommu_group_add_device(group, dev); + if (rval) + goto out_err; + } + + adev->adrv = adrv; + if (adrv->probe) { + rval = adrv->probe(adev); + if (!rval) { + /* + * If the device power, after probe, is enabled + * (from the parent device), its resume needs to + * be called to initialize the device properly. + */ + if (!adev->ctrl && + !pm_runtime_status_suspended(dev->parent)) { + mutex_lock(&adev->resume_lock); + pm_generic_runtime_resume(dev); + mutex_unlock(&adev->resume_lock); + } + } + } else { + rval = -ENODEV; + } + + if (rval) + goto out_err; + + return 0; + +out_err: + ipu_bus_set_drvdata(adev, NULL); + adev->adrv = NULL; + iommu_group_remove_device(dev); + iommu_group_put(group); + return rval; +} + +static int ipu_bus_remove(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver); + + if (adrv->remove) + adrv->remove(adev); + + if (adev->iommu) + iommu_group_remove_device(dev); + + return 0; +} + +static struct bus_type ipu_bus = { + .name = IPU_BUS_NAME, + .match = ipu_bus_match, + .probe = ipu_bus_probe, + .remove = ipu_bus_remove, + .pm = IPU_BUS_PM_OPS, +}; + +static struct mutex ipu_bus_mutex; + +static void ipu_bus_release(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + + kfree(adev); +} + +struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev, + struct device *parent, void *pdata, + struct device *iommu, + struct ipu_buttress_ctrl *ctrl, + char *name, unsigned int nr) +{ + struct ipu_bus_device *adev; + struct ipu_device *isp = pci_get_drvdata(pdev); + int rval; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + adev->dev.parent = parent; + adev->dev.bus = &ipu_bus; + adev->dev.release = ipu_bus_release; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 16) + adev->dev.dma_ops = &ipu_dma_ops; +#else + adev->dev.archdata.dma_ops = &ipu_dma_ops; +#endif + adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ? + IPU_MMU_ADDRESS_BITS : + IPU_MMU_ADDRESS_BITS_NON_SECURE); + adev->dev.dma_mask = &adev->dma_mask; + adev->iommu = iommu; + adev->ctrl = ctrl; + adev->pdata = pdata; + adev->isp = isp; + mutex_init(&adev->resume_lock); + dev_set_name(&adev->dev, "%s%d", name, nr); + + rval = device_register(&adev->dev); + if (rval) { + put_device(&adev->dev); + return ERR_PTR(rval); + } + + mutex_lock(&ipu_bus_mutex); + list_add(&adev->list, &isp->devices); + mutex_unlock(&ipu_bus_mutex); + + return adev; +} + +void ipu_bus_del_devices(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + struct ipu_bus_device *adev, *save; + + mutex_lock(&ipu_bus_mutex); + + list_for_each_entry_safe(adev, save, &isp->devices, list) { + list_del(&adev->list); + device_unregister(&adev->dev); + } + + mutex_unlock(&ipu_bus_mutex); +} + +int ipu_bus_register_driver(struct ipu_bus_driver *adrv) +{ + adrv->drv.bus = &ipu_bus; + return driver_register(&adrv->drv); +} +EXPORT_SYMBOL(ipu_bus_register_driver); + +int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv) +{ + driver_unregister(&adrv->drv); + return 0; +} +EXPORT_SYMBOL(ipu_bus_unregister_driver); + +int ipu_bus_register(void) +{ + mutex_init(&ipu_bus_mutex); + return bus_register(&ipu_bus); +} +EXPORT_SYMBOL(ipu_bus_register); + +void ipu_bus_unregister(void) +{ + mutex_destroy(&ipu_bus_mutex); + return bus_unregister(&ipu_bus); +} +EXPORT_SYMBOL(ipu_bus_unregister); + +int ipu_bus_set_iommu(struct iommu_ops *ops) +{ + if (iommu_present(&ipu_bus)) + return 0; + + return bus_set_iommu(&ipu_bus, ops); +} +EXPORT_SYMBOL(ipu_bus_set_iommu); + +static int flr_rpm_recovery(struct device *dev, void *p) +{ + dev_dbg(dev, "FLR recovery call\n"); + /* + * We are not necessarily going through device from child to + * parent. runtime PM refuses to change state for parent if the child + * is still active. At FLR (full reset for whole IPU) that doesn't + * matter. Everything has been power gated by HW during the FLR cycle + * and we are just cleaning up SW state. Thus, ignore child during + * set_suspended. + */ + pm_suspend_ignore_children(dev, true); + pm_runtime_set_suspended(dev); + pm_suspend_ignore_children(dev, false); + + return 0; +} + +int ipu_bus_flr_recovery(void) +{ + bus_for_each_dev(&ipu_bus, NULL, NULL, flr_rpm_recovery); + return 0; +} +EXPORT_SYMBOL(ipu_bus_flr_recovery); diff --git a/drivers/media/pci/intel/ipu-bus.h b/drivers/media/pci/intel/ipu-bus.h new file mode 100644 index 000000000000..4226b865fe99 --- /dev/null +++ b/drivers/media/pci/intel/ipu-bus.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_BUS_H +#define IPU_BUS_H + +#include +#include +#include +#include +#include + +#define IPU_BUS_NAME IPU_NAME "-bus" + +struct ipu_buttress_ctrl; +struct ipu_subsystem_trace_config; + +struct ipu_bus_device { + struct device dev; + struct list_head list; + void *pdata; + struct ipu_bus_driver *adrv; + struct device *iommu; + struct ipu_device *isp; + struct ipu_subsystem_trace_config *trace_cfg; + struct ipu_buttress_ctrl *ctrl; + u64 dma_mask; + /* Protect runtime_resume calls on the dev */ + struct mutex resume_lock; +}; + +#define to_ipu_bus_device(_dev) container_of(_dev, struct ipu_bus_device, dev) + +struct ipu_bus_driver { + struct device_driver drv; + char wanted[20]; + int (*probe)(struct ipu_bus_device *adev); + void (*remove)(struct ipu_bus_device *adev); + irqreturn_t (*isr)(struct ipu_bus_device *adev); + irqreturn_t (*isr_threaded)(struct ipu_bus_device *adev); + bool wake_isr_thread; +}; + +#define to_ipu_bus_driver(_drv) container_of(_drv, struct ipu_bus_driver, drv) + +struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev, + struct device *parent, void *pdata, + struct device *iommu, + struct ipu_buttress_ctrl *ctrl, + char *name, unsigned int nr); +void ipu_bus_del_devices(struct pci_dev *pdev); + +int ipu_bus_register_driver(struct ipu_bus_driver *adrv); +int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv); + +int ipu_bus_register(void); +void ipu_bus_unregister(void); + +int ipu_bus_set_iommu(struct iommu_ops *ops); + +#define module_ipu_bus_driver(drv) \ + module_driver(drv, ipu_bus_register_driver, \ + ipu_bus_unregister_driver) + +#define ipu_bus_set_drvdata(adev, data) dev_set_drvdata(&(adev)->dev, data) +#define ipu_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->dev) + +int ipu_bus_flr_recovery(void); + +#endif diff --git a/drivers/media/pci/intel/ipu-buttress.c b/drivers/media/pci/intel/ipu-buttress.c new file mode 100644 index 000000000000..fc813387b2aa --- /dev/null +++ b/drivers/media/pci/intel/ipu-buttress.c @@ -0,0 +1,1791 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-cpd.h" +#define CREATE_TRACE_POINTS +#define IPU_PERF_REG_TRACE +#include "ipu-trace-event.h" + +#define BOOTLOADER_STATUS_OFFSET 0x8000 +#define BOOTLOADER_MAGIC_KEY 0xb00710ad + +#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 +#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 +#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE + +#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10 + +#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT 5000 +#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT 10000 +#define BUTTRESS_CSE_FWRESET_TIMEOUT 100 + +#define BUTTRESS_IPC_TX_TIMEOUT 1000 +#define BUTTRESS_IPC_RX_TIMEOUT 1000 +#define BUTTRESS_IPC_VALIDITY_TIMEOUT 1000 + +#define IPU_BUTTRESS_TSC_LIMIT 500 /* 26 us @ 19.2 MHz */ +#define IPU_BUTTRESS_TSC_RETRY 10 + +#define BUTTRESS_CSE_IPC_RESET_RETRY 4 + +#define BUTTRESS_IPC_CMD_SEND_RETRY 1 + +static const struct ipu_buttress_sensor_clk_freq sensor_clk_freqs[] = { + {6750000, BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ}, + {8000000, BUTTRESS_SENSOR_CLK_FREQ_8MHZ}, + {9600000, BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ}, + {12000000, BUTTRESS_SENSOR_CLK_FREQ_12MHZ}, + {13600000, BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ}, + {14400000, BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ}, + {15800000, BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ}, + {16200000, BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ}, + {17300000, BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ}, + {18600000, BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ}, + {19200000, BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ}, + {24000000, BUTTRESS_SENSOR_CLK_FREQ_24MHZ}, + {26000000, BUTTRESS_SENSOR_CLK_FREQ_26MHZ}, + {27000000, BUTTRESS_SENSOR_CLK_FREQ_27MHZ} +}; + +static const u32 ipu_adev_irq_mask[] = { + BUTTRESS_ISR_IS_IRQ, BUTTRESS_ISR_PS_IRQ +}; + +int ipu_buttress_ipc_reset(struct ipu_device *isp, struct ipu_buttress_ipc *ipc) +{ + struct ipu_buttress *b = &isp->buttress; + unsigned long tout_jfs; + unsigned int tout = 500; + u32 val = 0, csr_in_clr; + + mutex_lock(&b->ipc_mutex); + + /* Clear-by-1 CSR (all bits), corresponding internal states. */ + val = readl(isp->base + ipc->csr_in); + writel(val, isp->base + ipc->csr_in); + + /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ + writel(ENTRY, isp->base + ipc->csr_out); + + /* + * Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + */ + csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | + BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | + BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; + + /* + * How long we should wait here? + */ + tout_jfs = jiffies + msecs_to_jiffies(tout); + do { + val = readl(isp->base + ipc->csr_in); + dev_dbg(&isp->pdev->dev, "%s: csr_in = %x\n", __func__, val); + if (val & ENTRY) { + if (val & EXIT) { + dev_dbg(&isp->pdev->dev, + "%s:%s & %s\n", + __func__, + "IPC_PEER_COMP_ACTIONS_RST_PHASE1", + "IPC_PEER_COMP_ACTIONS_RST_PHASE2"); + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_COMP_ACTIONS_RST_PHASE2). + * 2) Set peer CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + */ + writel(ENTRY | EXIT, + isp->base + ipc->csr_in); + + writel(QUERY, isp->base + ipc->csr_out); + + tout_jfs = jiffies + msecs_to_jiffies(tout); + continue; + } else { + dev_dbg(&isp->pdev->dev, + "%s:IPC_PEER_COMP_ACTIONS_RST_PHASE1\n", + __func__); + /* + * 1) Clear-by-1 CSR bits + * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1. + */ + writel(ENTRY | QUERY, + isp->base + ipc->csr_in); + + writel(ENTRY, isp->base + ipc->csr_out); + + tout_jfs = jiffies + msecs_to_jiffies(tout); + continue; + } + } else if (val & EXIT) { + dev_dbg(&isp->pdev->dev, + "%s: IPC_PEER_COMP_ACTIONS_RST_PHASE2\n", + __func__); + /* + * Clear-by-1 CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * 1) Clear incoming doorbell. + * 2) Clear-by-1 all CSR bits EXCEPT following + * bits: + * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. + * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. + * C. Possibly custom bits, depending on + * their role. + * 3) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE2. + */ + writel(EXIT, isp->base + ipc->csr_in); + + writel(0 << BUTTRESS_IU2CSEDB0_BUSY_SHIFT, + isp->base + ipc->db0_in); + + writel(csr_in_clr, isp->base + ipc->csr_in); + + writel(EXIT, isp->base + ipc->csr_out); + + /* + * Read csr_in again to make sure if RST_PHASE2 is done. + * If csr_in is QUERY, it should be handled again. + */ + usleep_range(100, 500); + val = readl(isp->base + ipc->csr_in); + if (val & QUERY) { + dev_dbg(&isp->pdev->dev, + "%s: RST_PHASE2 retry csr_in = %x\n", + __func__, val); + continue; + } + + mutex_unlock(&b->ipc_mutex); + + return 0; + } else if (val & QUERY) { + dev_dbg(&isp->pdev->dev, + "%s: %s\n", + __func__, + "IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE"); + /* + * 1) Clear-by-1 CSR bit + * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. + * 2) Set peer CSR bit + * IPC_PEER_COMP_ACTIONS_RST_PHASE1 + */ + writel(QUERY, isp->base + ipc->csr_in); + + writel(ENTRY, isp->base + ipc->csr_out); + + tout_jfs = jiffies + msecs_to_jiffies(tout); + } + usleep_range(100, 500); + } while (!time_after(jiffies, tout_jfs)); + + mutex_unlock(&b->ipc_mutex); + + dev_err(&isp->pdev->dev, "Timed out while waiting for CSE!\n"); + + return -ETIMEDOUT; +} + +static void +ipu_buttress_ipc_validity_close(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc) +{ + /* Set bit 5 in CSE CSR */ + writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); +} + +static int +ipu_buttress_ipc_validity_open(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc) +{ + unsigned long tout_jfs; + unsigned int tout = BUTTRESS_IPC_VALIDITY_TIMEOUT; + u32 val; + + /* Set bit 3 in CSE CSR */ + writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, + isp->base + ipc->csr_out); + + /* + * How long we should wait here? + */ + tout_jfs = jiffies + msecs_to_jiffies(tout); + do { + val = readl(isp->base + ipc->csr_in); + dev_dbg(&isp->pdev->dev, "%s: CSE/ISH2IUCSR = %x\n", + __func__, val); + + if (val & BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID) { + dev_dbg(&isp->pdev->dev, + "%s: Validity ack received from peer\n", + __func__); + return 0; + } + usleep_range(100, 1000); + } while (!time_after(jiffies, tout_jfs)); + + dev_err(&isp->pdev->dev, "Timed out while waiting for CSE!\n"); + + ipu_buttress_ipc_validity_close(isp, ipc); + + return -ETIMEDOUT; +} + +static void ipu_buttress_ipc_recv(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc, u32 *ipc_msg) +{ + if (ipc_msg) + *ipc_msg = readl(isp->base + ipc->data0_in); + writel(0, isp->base + ipc->db0_in); +} + +int +ipu_buttress_ipc_send_bulk(struct ipu_device *isp, + enum ipu_buttress_ipc_domain ipc_domain, + struct ipu_ipc_buttress_bulk_msg *msgs, u32 size) +{ + struct ipu_buttress *b = &isp->buttress; + struct ipu_buttress_ipc *ipc; + unsigned long tx_timeout_jiffies, rx_timeout_jiffies; + u32 val; + int ret; + int tout; + unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY; + + ipc = ipc_domain == IPU_BUTTRESS_IPC_CSE ? &b->cse : &b->ish; + + mutex_lock(&b->ipc_mutex); + + ret = ipu_buttress_ipc_validity_open(isp, ipc); + if (ret) { + dev_err(&isp->pdev->dev, "IPC validity open failed\n"); + goto out; + } + + tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT); + rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT); + + for (i = 0; i < size; i++) { + reinit_completion(&ipc->send_complete); + if (msgs[i].require_resp) + reinit_completion(&ipc->recv_complete); + + dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n", + msgs[i].cmd); + writel(msgs[i].cmd, isp->base + ipc->data0_out); + + val = 1 << BUTTRESS_IU2CSEDB0_BUSY_SHIFT | msgs[i].cmd_size; + + writel(val, isp->base + ipc->db0_out); + + tout = wait_for_completion_timeout(&ipc->send_complete, + tx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "send IPC response timeout\n"); + if (!retry--) { + ret = -ETIMEDOUT; + goto out; + } + + /* + * WORKAROUND: Sometimes CSE is not + * responding on first try, try again. + */ + writel(0, isp->base + ipc->db0_out); + i--; + continue; + } + + retry = BUTTRESS_IPC_CMD_SEND_RETRY; + + if (!msgs[i].require_resp) + continue; + + tout = wait_for_completion_timeout(&ipc->recv_complete, + rx_timeout_jiffies); + if (!tout) { + dev_err(&isp->pdev->dev, "recv IPC response timeout\n"); + ret = -ETIMEDOUT; + goto out; + } + + if (ipc->nack_mask && + (ipc->recv_data & ipc->nack_mask) == ipc->nack) { + dev_err(&isp->pdev->dev, + "IPC NACK for cmd 0x%x\n", msgs[i].cmd); + ret = -ENODEV; + goto out; + } + + if (ipc->recv_data != msgs[i].expected_resp) { + dev_err(&isp->pdev->dev, + "expected resp: 0x%x, IPC response: 0x%x ", + msgs[i].expected_resp, ipc->recv_data); + ret = -EIO; + goto out; + } + } + + dev_dbg(&isp->pdev->dev, "bulk IPC commands completed\n"); + +out: + ipu_buttress_ipc_validity_close(isp, ipc); + mutex_unlock(&b->ipc_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(ipu_buttress_ipc_send_bulk); + +static int +ipu_buttress_ipc_send(struct ipu_device *isp, + enum ipu_buttress_ipc_domain ipc_domain, + u32 ipc_msg, u32 size) +{ + struct ipu_ipc_buttress_bulk_msg msg = { + .cmd = ipc_msg, + .cmd_size = size, + }; + + return ipu_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1); +} + +static irqreturn_t ipu_buttress_call_isr(struct ipu_bus_device *adev) +{ + irqreturn_t ret = IRQ_WAKE_THREAD; + + if (!adev || !adev->adrv) + return IRQ_NONE; + + if (adev->adrv->isr) + ret = adev->adrv->isr(adev); + + if (ret == IRQ_WAKE_THREAD && !adev->adrv->isr_threaded) + ret = IRQ_NONE; + + adev->adrv->wake_isr_thread = (ret == IRQ_WAKE_THREAD); + + return ret; +} + +irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr) +{ + struct ipu_device *isp = isp_ptr; + struct ipu_bus_device *adev[] = { isp->isys, isp->psys }; + struct ipu_buttress *b = &isp->buttress; + irqreturn_t ret = IRQ_NONE; + u32 disable_irqs = 0; + u32 irq_status; +#ifdef CONFIG_VIDEO_INTEL_IPU4P + u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS; +#else + u32 reg_irq_sts = BUTTRESS_REG_ISR_ENABLED_STATUS; +#endif + unsigned int i; + + dev_dbg(&isp->pdev->dev, "isr: Buttress interrupt handler\n"); + + pm_runtime_get(&isp->pdev->dev); + + if (!pm_runtime_active(&isp->pdev->dev)) { + irq_status = readl(isp->base + reg_irq_sts); + writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); + pm_runtime_put(&isp->pdev->dev); + return IRQ_HANDLED; + } + + trace_ipu_perf_reg(BUTTRESS_REG_IS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL)); + trace_ipu_perf_reg(BUTTRESS_REG_PS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL)); + + irq_status = readl(isp->base + reg_irq_sts); + if (!irq_status) { + pm_runtime_put(&isp->pdev->dev); + return IRQ_NONE; + } + + do { + writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR); + + for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) { + if (irq_status & ipu_adev_irq_mask[i]) { + irqreturn_t r = ipu_buttress_call_isr(adev[i]); + + if (r == IRQ_WAKE_THREAD) { + ret = IRQ_WAKE_THREAD; + disable_irqs |= ipu_adev_irq_mask[i]; + } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { + ret = IRQ_HANDLED; + } + } + } + + if (irq_status & (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | + BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | + BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | + BUTTRESS_ISR_SAI_VIOLATION) && + ret == IRQ_NONE) + ret = IRQ_HANDLED; + + if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n"); + ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); + complete(&b->cse.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n"); + ipu_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data); + complete(&b->ish.recv_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->cse.send_complete); + } + + if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) { + dev_dbg(&isp->pdev->dev, + "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n"); + complete(&b->ish.send_complete); + } + + if (irq_status & BUTTRESS_ISR_SAI_VIOLATION) { + dev_err(&isp->pdev->dev, + "BUTTRESS_ISR_SAI_VIOLATION\n"); + WARN_ON(1); + } + + irq_status = readl(isp->base + reg_irq_sts); + } while (irq_status && !isp->flr_done); + + if (disable_irqs) + writel(BUTTRESS_IRQS & ~disable_irqs, + isp->base + BUTTRESS_REG_ISR_ENABLE); + + pm_runtime_put(&isp->pdev->dev); + + return ret; +} + +irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr) +{ + struct ipu_device *isp = isp_ptr; + struct ipu_bus_device *adev[] = { isp->isys, isp->psys }; + irqreturn_t ret = IRQ_NONE; + unsigned int i; + + dev_dbg(&isp->pdev->dev, "isr: Buttress threaded interrupt handler\n"); + + for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) { + if (adev[i] && adev[i]->adrv && + adev[i]->adrv->wake_isr_thread && + adev[i]->adrv->isr_threaded(adev[i]) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + return ret; +} + +int ipu_buttress_power(struct device *dev, + struct ipu_buttress_ctrl *ctrl, bool on) +{ + struct ipu_device *isp = to_ipu_bus_device(dev)->isp; + unsigned long tout_jfs; + u32 pwr_sts, val; + int ret = 0; + + if (!ctrl) + return 0; + + /* Until FLR completion nothing is expected to work */ + if (isp->flr_done) + return 0; + + mutex_lock(&isp->buttress.power_mutex); + + if (!on) { + val = 0; + pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; + } else { + val = 1 << BUTTRESS_FREQ_CTL_START_SHIFT + | ctrl->divisor << ctrl->divisor_shift + | ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT; + + pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; + } + + val |= ctrl->ovrd << ctrl->ovrd_shift; + writel(val, isp->base + ctrl->freq_ctl); + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_POWER_TIMEOUT); + do { + usleep_range(10, 40); + val = readl(isp->base + BUTTRESS_REG_PWR_STATE); + if ((val & ctrl->pwr_sts_mask) == pwr_sts) { + dev_dbg(&isp->pdev->dev, + "Rail state successfully changed\n"); + goto out; + } + } while (!time_after(jiffies, tout_jfs)); + + dev_err(&isp->pdev->dev, + "Timeout when trying to change state of the rail 0x%x\n", val); + + ret = -ETIMEDOUT; + +out: + ctrl->started = !ret && on; + + trace_ipu_perf_reg(BUTTRESS_REG_IS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL)); + trace_ipu_perf_reg(BUTTRESS_REG_PS_FREQ_CTL, + readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL)); + + mutex_unlock(&isp->buttress.power_mutex); + + return ret; +} + +static bool secure_mode_enable = 1; +module_param(secure_mode_enable, bool, 0660); +MODULE_PARM_DESC(secure_mode, "IPU secure mode enable"); + +void ipu_buttress_set_secure_mode(struct ipu_device *isp) +{ + u8 retry = 100; + u32 val, read; + + /* + * HACK to disable possible secure mode. This can be + * reverted when CSE is disabling the secure mode + */ + read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + if (secure_mode_enable) + val = read |= 1 << BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT; + else + val = read & ~(1 << BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT); + + if (val == read) + return; + + writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); + + /* In B0, for some registers in buttress, because of a hw bug, write + * might not succeed at first attempt. Write twice until the + * write is successful + */ + writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); + + while (retry--) { + read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + if (read == val) + break; + + writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL); + + if (retry == 0) + dev_err(&isp->pdev->dev, + "update security control register failed\n"); + } +} +EXPORT_SYMBOL_GPL(ipu_buttress_set_secure_mode); + +bool ipu_buttress_get_secure_mode(struct ipu_device *isp) +{ + u32 val; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + return val & (1 << BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT); +} + +bool ipu_buttress_auth_done(struct ipu_device *isp) +{ + u32 val; + + if (!isp->secure_mode) + return 1; + + val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + + return (val & BUTTRESS_SECURITY_CTL_FW_SETUP_MASK) == + BUTTRESS_SECURITY_CTL_AUTH_DONE; +} +EXPORT_SYMBOL(ipu_buttress_auth_done); + +static void ipu_buttress_set_psys_ratio(struct ipu_device *isp, + unsigned int psys_divisor, + unsigned int psys_qos_floor) +{ + struct ipu_buttress_ctrl *ctrl = isp->psys_iommu->ctrl; + + mutex_lock(&isp->buttress.power_mutex); + + if (ctrl->divisor == psys_divisor && ctrl->qos_floor == psys_qos_floor) + goto out_mutex_unlock; + + ctrl->divisor = psys_divisor; + ctrl->qos_floor = psys_qos_floor; + + if (ctrl->started) { + /* + * According to documentation driver initiates DVFS + * transition by writing wanted ratio, floor ratio and start + * bit. No need to stop PS first + */ + writel(1 << BUTTRESS_FREQ_CTL_START_SHIFT | + ctrl-> + qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT | + psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL); + } + +out_mutex_unlock: + mutex_unlock(&isp->buttress.power_mutex); +} + +static void ipu_buttress_set_psys_freq(struct ipu_device *isp, + unsigned int freq) +{ + unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP; + + if (isp->buttress.psys_force_ratio) + return; + + ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio); +} + +void +ipu_buttress_add_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint) +{ + struct ipu_buttress *b = &isp->buttress; + + mutex_lock(&b->cons_mutex); + list_add(&constraint->list, &b->constraints); + + if (constraint->min_freq > b->psys_min_freq) { + isp->buttress.psys_min_freq = min(constraint->min_freq, + b->psys_fused_freqs.max_freq); + ipu_buttress_set_psys_freq(isp, b->psys_min_freq); + } + mutex_unlock(&isp->buttress.cons_mutex); +} +EXPORT_SYMBOL_GPL(ipu_buttress_add_psys_constraint); + +void +ipu_buttress_remove_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint) +{ + struct ipu_buttress *b = &isp->buttress; + struct ipu_buttress_constraint *c; + unsigned int min_freq = 0; + + mutex_lock(&b->cons_mutex); + list_del(&constraint->list); + + if (constraint->min_freq >= b->psys_min_freq) { + list_for_each_entry(c, &b->constraints, list) + if (c->min_freq > min_freq) + min_freq = c->min_freq; + + b->psys_min_freq = clamp(min_freq, + b->psys_fused_freqs.efficient_freq, + b->psys_fused_freqs.max_freq); + ipu_buttress_set_psys_freq(isp, b->psys_min_freq); + } + mutex_unlock(&b->cons_mutex); +} +EXPORT_SYMBOL_GPL(ipu_buttress_remove_psys_constraint); + +int ipu_buttress_reset_authentication(struct ipu_device *isp) +{ + unsigned long tout_jfs; + u32 val; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, + "Non-secure mode -> skip authentication\n"); + return 0; + } + + writel(1 << BUTTRESS_FW_RESET_CTL_START_SHIFT, isp->base + + BUTTRESS_REG_FW_RESET_CTL); + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_CSE_FWRESET_TIMEOUT); + do { + val = readl(isp->base + BUTTRESS_REG_FW_RESET_CTL); + if (val & 1 << BUTTRESS_FW_RESET_CTL_DONE_SHIFT) { + dev_info(&isp->pdev->dev, + "FW reset for authentication done!\n"); + writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); + /* + * Leave some time for HW restore. + */ + usleep_range(100, 1000); + return 0; + } + usleep_range(100, 1000); + } while (!time_after(jiffies, tout_jfs)); + + dev_err(&isp->pdev->dev, + "Timed out while resetting authentication state!\n"); + + return -ETIMEDOUT; +} + +int ipu_buttress_map_fw_image(struct ipu_bus_device *sys, + const struct firmware *fw, struct sg_table *sgt) +{ + struct page **pages; + const void *addr; + unsigned long n_pages, i; + int rval; + + n_pages = PAGE_ALIGN(fw->size) >> PAGE_SHIFT; + + pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + addr = fw->data; + for (i = 0; i < n_pages; i++) { + struct page *p = vmalloc_to_page(addr); + + if (!p) { + rval = -ENODEV; + goto out; + } + pages[i] = p; + addr += PAGE_SIZE; + } + + rval = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size, + GFP_KERNEL); + if (rval) { + rval = -ENOMEM; + goto out; + } + + n_pages = dma_map_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + if (n_pages != sgt->nents) { + rval = -ENOMEM; + sg_free_table(sgt); + goto out; + } + + dma_sync_sg_for_device(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + +out: + kfree(pages); + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_buttress_map_fw_image); + +int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys, + struct sg_table *sgt) +{ + dma_unmap_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + sg_free_table(sgt); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_buttress_unmap_fw_image); + +int ipu_buttress_authenticate(struct ipu_device *isp) +{ + struct ipu_psys_pdata *psys_pdata; + struct ipu_buttress *b = &isp->buttress; + u32 data; + int rval; + unsigned long tout_jfs; + + if (!isp->secure_mode) { + dev_dbg(&isp->pdev->dev, + "Non-secure mode -> skip authentication\n"); + return 0; + } + + psys_pdata = isp->psys->pdata; + + mutex_lock(&b->auth_mutex); + + rval = pm_runtime_get_sync(&isp->psys_iommu->dev); + if (rval < 0) { + dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); + goto iunit_power_off; + } + + if (ipu_buttress_auth_done(isp)) { + rval = 0; + goto iunit_power_off; + } + + /* + * Write address of FIT table to FW_SOURCE register + * Let's use fw address. I.e. not using FIT table yet + */ + data = lower_32_bits(isp->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO); + + data = upper_32_bits(isp->pkg_dir_dma_addr); + writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI); + + /* + * Write boot_load into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n"); + rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, + BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, 1); + if (rval) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + goto iunit_power_off; + } + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_CSE_BOOTLOAD_TIMEOUT); + do { + data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + data &= BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; + if (data == BUTTRESS_SECURITY_CTL_FW_SETUP_DONE) { + dev_dbg(&isp->pdev->dev, "CSE boot_load done\n"); + break; + } else if (data == BUTTRESS_SECURITY_CTL_AUTH_FAILED) { + dev_err(&isp->pdev->dev, "CSE boot_load failed\n"); + rval = -EINVAL; + goto iunit_power_off; + } + usleep_range(500, 1000); + } while (!time_after(jiffies, tout_jfs)); + + if (data != BUTTRESS_SECURITY_CTL_FW_SETUP_DONE) { + dev_err(&isp->pdev->dev, "CSE boot_load timed out\n"); + rval = -ETIMEDOUT; + goto iunit_power_off; + } + + tout_jfs = jiffies + msecs_to_jiffies(BUTTRESS_CSE_BOOTLOAD_TIMEOUT); + do { + data = readl(psys_pdata->base + BOOTLOADER_STATUS_OFFSET); + dev_dbg(&isp->pdev->dev, "%s: BOOTLOADER_STATUS 0x%x", + __func__, data); + if (data == BOOTLOADER_MAGIC_KEY) { + dev_dbg(&isp->pdev->dev, + "%s: Expected magic number found, breaking...", + __func__); + break; + } + usleep_range(500, 1000); + } while (!time_after(jiffies, tout_jfs)); + + if (data != BOOTLOADER_MAGIC_KEY) { + dev_dbg(&isp->pdev->dev, + "%s: CSE boot_load timed out...\n", __func__); + rval = -ETIMEDOUT; + goto iunit_power_off; + } + + /* + * Write authenticate_run into IU2CSEDATA0 + * Write sizeof(boot_load) | 0x2 << CLIENT_ID to + * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as + */ + dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n"); + rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE, + BUTTRESS_IU2CSEDATA0_IPC_AUTHENTICATE_RUN, + 1); + if (rval) { + dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n"); + goto iunit_power_off; + } + + tout_jfs = jiffies; + tout_jfs += msecs_to_jiffies(BUTTRESS_CSE_AUTHENTICATE_TIMEOUT); + do { + data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); + data &= BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; + if (data == BUTTRESS_SECURITY_CTL_AUTH_DONE) { + dev_dbg(&isp->pdev->dev, "CSE authenticate_run done\n"); + break; + } else if (data == BUTTRESS_SECURITY_CTL_AUTH_FAILED) { + dev_err(&isp->pdev->dev, + "CSE authenticate_run failed\n"); + rval = -EINVAL; + goto iunit_power_off; + } + usleep_range(500, 1000); + } while (!time_after(jiffies, tout_jfs)); + + if (data != BUTTRESS_SECURITY_CTL_AUTH_DONE) { + dev_err(&isp->pdev->dev, "CSE authenticate_run timed out\n"); + rval = -ETIMEDOUT; + goto iunit_power_off; + } + +iunit_power_off: + pm_runtime_put(&isp->psys_iommu->dev); + + mutex_unlock(&b->auth_mutex); + + return rval; +} +EXPORT_SYMBOL(ipu_buttress_authenticate); + +static int ipu_buttress_send_tsc_request(struct ipu_device *isp) +{ + unsigned long tout_jfs = msecs_to_jiffies(5); + + writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC, + isp->base + BUTTRESS_REG_FABRIC_CMD); + + tout_jfs += jiffies; + do { + u32 val; + + val = readl(isp->base + BUTTRESS_REG_PWR_STATE); + val = (val & BUTTRESS_PWR_STATE_HH_STATUS_MASK) >> + BUTTRESS_PWR_STATE_HH_STATUS_SHIFT; + + switch (val) { + case BUTTRESS_PWR_STATE_HH_STATE_DONE: + dev_dbg(&isp->pdev->dev, "Start tsc sync completed!\n"); + return 0; + case BUTTRESS_PWR_STATE_HH_STATE_ERR: + dev_err(&isp->pdev->dev, "Start tsc sync failed!\n"); + return -EINVAL; + default: + usleep_range(500, 1000); + break; + } + } while (!time_after(jiffies, tout_jfs)); + + return -ETIMEDOUT; +} + +int ipu_buttress_start_tsc_sync(struct ipu_device *isp) +{ + unsigned int i; + + for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { + int ret; + + ret = ipu_buttress_send_tsc_request(isp); + if (ret == -ETIMEDOUT) { + u32 val; + /* set tsw soft reset */ + val = readl(isp->base + BUTTRESS_REG_TSW_CTL); + val = val | BUTTRESS_TSW_CTL_SOFT_RESET; + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + /* clear tsw soft reset */ + val = val & (~BUTTRESS_TSW_CTL_SOFT_RESET); + writel(val, isp->base + BUTTRESS_REG_TSW_CTL); + + continue; + } + return ret; + } + + dev_err(&isp->pdev->dev, "TSC sync failed(timeout).\n"); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL(ipu_buttress_start_tsc_sync); + +struct clk_ipu_sensor { + struct ipu_device *isp; + struct clk_hw hw; + unsigned int id; + unsigned long rate; +}; + +#define to_clk_ipu_sensor(_hw) container_of(_hw, struct clk_ipu_sensor, hw) + +static int ipu_buttress_clk_pll_prepare(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + int ret; + + /* Workaround needed to get sensor clock running in some cases */ + ret = pm_runtime_get_sync(&ck->isp->isys->dev); + return ret >= 0 ? 0 : ret; +} + +static void ipu_buttress_clk_pll_unprepare(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + + /* Workaround needed to get sensor clock stopped in some cases */ + pm_runtime_put(&ck->isp->isys->dev); +} + +static int ipu_buttress_clk_pll_enable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + unsigned int i; + + /* + * Start bit behaves like master clock request towards ICLK. + * It is needed regardless of the 24 MHz or per clock out pll + * setting. + */ + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); + val |= 1 << BUTTRESS_FREQ_CTL_START_SHIFT; + val &= ~BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(ck->id); + for (i = 0; i < ARRAY_SIZE(sensor_clk_freqs); i++) + if (sensor_clk_freqs[i].rate == ck->rate) + break; + + if (i < ARRAY_SIZE(sensor_clk_freqs)) + val |= sensor_clk_freqs[i].val << + BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(ck->id); + else + val |= BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(ck->id); + + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); + + return 0; +} + +static void ipu_buttress_clk_pll_disable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + int i; + + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) { + if (val & + (1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i))) + return; + } + + /* See enable control above */ + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); + val &= ~(1 << BUTTRESS_FREQ_CTL_START_SHIFT); + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_FREQ_CTL); +} + +static int ipu_buttress_clk_enable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + val |= 1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(ck->id); + + /* Enable dynamic sensor clock */ + val |= 1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(ck->id); + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + + return 0; +} + +static void ipu_buttress_clk_disable(struct clk_hw *hw) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + u32 val; + + val = readl(ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); + val &= ~(1 << BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(ck->id)); + writel(val, ck->isp->base + BUTTRESS_REG_SENSOR_CLK_CTL); +} + +static long ipu_buttress_clk_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long best = ULONG_MAX; + unsigned long round_rate = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(sensor_clk_freqs); i++) { + long diff = sensor_clk_freqs[i].rate - rate; + + if (diff == 0) + return rate; + + diff = abs(diff); + if (diff < best) { + best = diff; + round_rate = sensor_clk_freqs[i].rate; + } + } + + return round_rate; +} + +static unsigned long +ipu_buttress_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + + return ck->rate; +} + +static int ipu_buttress_clk_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct clk_ipu_sensor *ck = to_clk_ipu_sensor(hw); + + /* + * R N P PVD PLLout + * 1 45 128 2 6.75 + * 1 40 96 2 8 + * 1 40 80 2 9.6 + * 1 15 20 4 14.4 + * 1 40 32 2 24 + * 1 65 48 1 26 + * + */ + ck->rate = rate; + + return 0; +} + +static const struct clk_ops ipu_buttress_clk_sensor_ops = { + .enable = ipu_buttress_clk_enable, + .disable = ipu_buttress_clk_disable, +}; + +static const struct clk_ops ipu_buttress_clk_sensor_ops_parent = { + .enable = ipu_buttress_clk_pll_enable, + .disable = ipu_buttress_clk_pll_disable, + .prepare = ipu_buttress_clk_pll_prepare, + .unprepare = ipu_buttress_clk_pll_unprepare, + .round_rate = ipu_buttress_clk_round_rate, + .recalc_rate = ipu_buttress_clk_recalc_rate, + .set_rate = ipu_buttress_clk_set_rate, +}; + +static struct clk_init_data ipu_buttress_sensor_clk_data[] = { + { + .name = "OSC_CLK_OUT0", + .ops = &ipu_buttress_clk_sensor_ops, + .parent_names = (const char *[]){"ipu_sensor_pll0"}, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, + { + .name = "OSC_CLK_OUT1", + .ops = &ipu_buttress_clk_sensor_ops, + .parent_names = (const char *[]){"ipu_sensor_pll1"}, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, + { + .name = "OSC_CLK_OUT2", + .ops = &ipu_buttress_clk_sensor_ops, + .parent_names = (const char *[]){"ipu_sensor_pll2"}, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_init_data ipu_buttress_sensor_pll_data[] = { + { + .name = "ipu_sensor_pll0", + .ops = &ipu_buttress_clk_sensor_ops_parent, + }, + { + .name = "ipu_sensor_pll1", + .ops = &ipu_buttress_clk_sensor_ops_parent, + }, + { + .name = "ipu_sensor_pll2", + .ops = &ipu_buttress_clk_sensor_ops_parent, + }, +}; + +static void ipu_buttress_read_psys_fused_freqs(struct ipu_device *isp) +{ + struct ipu_buttress_fused_freqs *fused_freq = + &isp->buttress.psys_fused_freqs; + u32 reg_val, max_ratio, min_ratio, efficient_ratio; + + reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CAPABILITIES); + + min_ratio = (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT; + max_ratio = (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT; + efficient_ratio = + (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT; + + fused_freq->min_freq = min_ratio * BUTTRESS_PS_FREQ_STEP; + fused_freq->max_freq = max_ratio * BUTTRESS_PS_FREQ_STEP; + fused_freq->efficient_freq = efficient_ratio * BUTTRESS_PS_FREQ_STEP; +} + +static int ipu_buttress_clk_init(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + struct ipu_isys_subdev_pdata *pdata = isp->pdev->dev.platform_data; + struct ipu_isys_clk_mapping *clkmap = pdata ? pdata->clk_map : NULL; + struct clk_init_data *clk_data_parent; + struct clk_init_data *clk_data; + int i, rval; + unsigned int num_plls; + + ipu_buttress_read_psys_fused_freqs(isp); + isp->buttress.psys_min_freq = b->psys_fused_freqs.efficient_freq; + + clk_data_parent = ipu_buttress_sensor_pll_data; + + num_plls = ARRAY_SIZE(ipu_buttress_sensor_pll_data); + + for (i = 0; i < num_plls; i++) { + struct clk_ipu_sensor *parent_clk = + devm_kzalloc(&isp->pdev->dev, + sizeof(*parent_clk), GFP_KERNEL); + + if (!parent_clk) { + rval = -ENOMEM; + goto err; + } + + parent_clk->hw.init = &clk_data_parent[i]; + parent_clk->isp = isp; + parent_clk->id = i; + + b->pll_sensor[i] = clk_register(NULL, &parent_clk->hw); + if (IS_ERR(b->pll_sensor[i])) { + rval = PTR_ERR(b->pll_sensor[i]); + goto err; + } + } + + clk_data = ipu_buttress_sensor_clk_data; + + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) { + char buffer[16]; /* max for clk_register_clkdev */ + unsigned int parent_index = 0; + struct clk_ipu_sensor *my_clk = + devm_kzalloc(&isp->pdev->dev, sizeof(*my_clk), + GFP_KERNEL); + + if (!my_clk) { + rval = -ENOMEM; + goto err; + } + + if (i < num_plls) + parent_index = i; + + my_clk->hw.init = &clk_data[i]; + + my_clk->id = i; + my_clk->isp = isp; + + b->clk_sensor[i] = clk_register(NULL, &my_clk->hw); + if (IS_ERR(b->clk_sensor[i])) { + rval = PTR_ERR(b->clk_sensor[i]); + goto err; + } + rval = clk_set_parent(b->clk_sensor[i], + b->pll_sensor[parent_index]); + if (rval) + goto err; + + /* Register generic clocks for sensor driver */ + snprintf(buffer, sizeof(buffer), "ipu_cam_clk%d", i); + rval = clk_register_clkdev(b->clk_sensor[i], buffer, NULL); + if (rval) + goto err; + } + + /* Now map sensor clocks */ + if (!clkmap) + return 0; + + while (clkmap->clkdev_data.dev_id) { + /* + * Lookup table must be NULL terminated + * CLKDEV_INIT(NULL, NULL, NULL) + */ + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) { + if (!strcmp(clkmap->platform_clock_name, + clk_data[i].name)) { + clkmap->clkdev_data.clk = b->clk_sensor[i]; + clkdev_add(&clkmap->clkdev_data); + break; + } + } + clkmap++; + } + + return 0; + +err: + /* It is safe to call clk_unregister with null pointer */ + for (i = IPU_BUTTRESS_NUM_OF_SENS_CKS - 1; i >= 0; i--) + clk_unregister(b->clk_sensor[i]); + + for (i = num_plls - 1; i >= 0; i--) + clk_unregister(b->pll_sensor[i]); + + return rval; +} + +static void ipu_buttress_clk_exit(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + int i; + + /* It is safe to call clk_unregister with null pointer */ + for (i = 0; i < IPU_BUTTRESS_NUM_OF_SENS_CKS; i++) + clk_unregister(b->clk_sensor[i]); + + for (i = 0; i < ARRAY_SIZE(ipu_buttress_sensor_pll_data); i++) + clk_unregister(b->pll_sensor[i]); +} + +int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val) +{ + struct ipu_buttress *b = &isp->buttress; + u32 tsc_hi, tsc_lo_1, tsc_lo_2, tsc_lo_3, tsc_chk = 0; + unsigned long flags; + short retry = IPU_BUTTRESS_TSC_RETRY; + + do { + spin_lock_irqsave(&b->tsc_lock, flags); + tsc_hi = readl(isp->base + BUTTRESS_REG_TSC_HI); + + /* + * We are occasionally getting broken values from + * HH. Reading 3 times and doing sanity check as a WA + */ + tsc_lo_1 = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_lo_2 = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_lo_3 = readl(isp->base + BUTTRESS_REG_TSC_LO); + tsc_chk = readl(isp->base + BUTTRESS_REG_TSC_HI); + spin_unlock_irqrestore(&b->tsc_lock, flags); + if (tsc_chk == tsc_hi && tsc_lo_2 && + tsc_lo_2 - tsc_lo_1 <= IPU_BUTTRESS_TSC_LIMIT && + tsc_lo_3 - tsc_lo_2 <= IPU_BUTTRESS_TSC_LIMIT) { + *val = (u64)tsc_hi << 32 | tsc_lo_2; + return 0; + } + + /* + * Trace error only if limit checkings fails at least + * by two consecutive readings. + */ + if (retry < IPU_BUTTRESS_TSC_RETRY - 1 && tsc_lo_2) + dev_err(&isp->pdev->dev, + "%s = %u, %s = %u, %s = %u, %s = %u, %s = %u", + "failure: tsc_hi", tsc_hi, + "tsc_chk", tsc_chk, + "tsc_lo_1", tsc_lo_1, + "tsc_lo_2", tsc_lo_2, "tsc_lo_3", tsc_lo_3); + } while (retry--); + + if (!tsc_chk && !tsc_lo_2) + return -EIO; + + WARN_ON_ONCE(1); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(ipu_buttress_tsc_read); + +#ifdef CONFIG_DEBUG_FS + +static int ipu_buttress_reg_open(struct inode *inode, struct file *file) +{ + if (!inode->i_private) + return -EACCES; + + file->private_data = inode->i_private; + return 0; +} + +static ssize_t ipu_buttress_reg_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct debugfs_reg32 *reg = file->private_data; + u8 tmp[11]; + u32 val = readl((void __iomem *)reg->offset); + int len = scnprintf(tmp, sizeof(tmp), "0x%08x", val); + + return simple_read_from_buffer(buf, len, ppos, &tmp, len); +} + +static ssize_t ipu_buttress_reg_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct debugfs_reg32 *reg = file->private_data; + u32 val; + int rval; + + rval = kstrtou32_from_user(buf, count, 0, &val); + if (rval) + return rval; + + writel(val, (void __iomem *)reg->offset); + + return count; +} + +static struct debugfs_reg32 buttress_regs[] = { + {"IU2CSEDB0", BUTTRESS_REG_IU2CSEDB0}, + {"IU2CSEDATA0", BUTTRESS_REG_IU2CSEDATA0}, + {"CSE2IUDB0", BUTTRESS_REG_CSE2IUDB0}, + {"CSE2IUDATA0", BUTTRESS_REG_CSE2IUDATA0}, + {"CSE2IUCSR", BUTTRESS_REG_CSE2IUCSR}, + {"IU2CSECSR", BUTTRESS_REG_IU2CSECSR}, +}; + +static const struct file_operations ipu_buttress_reg_fops = { + .owner = THIS_MODULE, + .open = ipu_buttress_reg_open, + .read = ipu_buttress_reg_read, + .write = ipu_buttress_reg_write, +}; + +static int ipu_buttress_start_tsc_sync_set(void *data, u64 val) +{ + struct ipu_device *isp = data; + + return ipu_buttress_start_tsc_sync(isp); +} + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_start_tsc_sync_fops, NULL, + ipu_buttress_start_tsc_sync_set, "%llu\n"); + +u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks) +{ + u64 ns = ticks * 10000; + /* + * TSC clock frequency is 19.2MHz, + * converting TSC tick count to ns is calculated by: + * ns = ticks * 1000 000 000 / 19.2Mhz + * = ticks * 1000 000 000 / 19200000Hz + * = ticks * 10000 / 192 ns + */ + do_div(ns, 192); + + return ns; +} +EXPORT_SYMBOL_GPL(ipu_buttress_tsc_ticks_to_ns); + +static int ipu_buttress_tsc_get(void *data, u64 *val) +{ + return ipu_buttress_tsc_read(data, val); +} +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_tsc_fops, ipu_buttress_tsc_get, + NULL, "%llu\n"); + +static int ipu_buttress_psys_force_freq_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + + *val = isp->buttress.psys_force_ratio * BUTTRESS_PS_FREQ_STEP; + + return 0; +} + +static int ipu_buttress_psys_force_freq_set(void *data, u64 val) +{ + struct ipu_device *isp = data; + + if (val && (val < BUTTRESS_MIN_FORCE_PS_FREQ || + val > BUTTRESS_MAX_FORCE_PS_FREQ)) + return -EINVAL; + + do_div(val, BUTTRESS_PS_FREQ_STEP); + isp->buttress.psys_force_ratio = val; + + if (isp->buttress.psys_force_ratio) + ipu_buttress_set_psys_ratio(isp, + isp->buttress.psys_force_ratio, + isp->buttress.psys_force_ratio); + else + ipu_buttress_set_psys_freq(isp, isp->buttress.psys_min_freq); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_force_freq_fops, + ipu_buttress_psys_force_freq_get, + ipu_buttress_psys_force_freq_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_freq_fops, + ipu_buttress_psys_freq_get, NULL, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_isys_freq_fops, + ipu_buttress_isys_freq_get, NULL, "%llu\n"); + +int ipu_buttress_debugfs_init(struct ipu_device *isp) +{ + struct debugfs_reg32 *reg = + devm_kcalloc(&isp->pdev->dev, ARRAY_SIZE(buttress_regs), + sizeof(*reg), GFP_KERNEL); + struct dentry *dir, *file; + int i; + + if (!reg) + return -ENOMEM; + + dir = debugfs_create_dir("buttress", isp->ipu_dir); + if (!dir) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(buttress_regs); i++, reg++) { + reg->offset = (unsigned long)isp->base + + buttress_regs[i].offset; + reg->name = buttress_regs[i].name; + file = debugfs_create_file(reg->name, 0700, + dir, reg, &ipu_buttress_reg_fops); + if (!file) + goto err; + } + + file = debugfs_create_file("start_tsc_sync", 0200, dir, isp, + &ipu_buttress_start_tsc_sync_fops); + if (!file) + goto err; + file = debugfs_create_file("tsc", 0400, dir, isp, + &ipu_buttress_tsc_fops); + if (!file) + goto err; + file = debugfs_create_file("psys_force_freq", 0700, dir, isp, + &ipu_buttress_psys_force_freq_fops); + if (!file) + goto err; + + file = debugfs_create_file("psys_freq", 0400, dir, isp, + &ipu_buttress_psys_freq_fops); + if (!file) + goto err; + + file = debugfs_create_file("isys_freq", 0400, dir, isp, + &ipu_buttress_isys_freq_fops); + if (!file) + goto err; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +#endif /* CONFIG_DEBUG_FS */ + +static ssize_t +ipu_buttress_psys_fused_min_freq_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "%u\n", + isp->buttress.psys_fused_freqs.min_freq); +} + +static DEVICE_ATTR(psys_fused_min_freq, 0444, + ipu_buttress_psys_fused_min_freq_get, NULL); + +static ssize_t +ipu_buttress_psys_fused_max_freq_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "%u\n", + isp->buttress.psys_fused_freqs.max_freq); +} + +static DEVICE_ATTR(psys_fused_max_freq, 0444, + ipu_buttress_psys_fused_max_freq_get, NULL); + +static ssize_t +ipu_buttress_psys_fused_efficient_freq_get(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev)); + + return snprintf(buf, PAGE_SIZE, "%u\n", + isp->buttress.psys_fused_freqs.efficient_freq); +} + +static DEVICE_ATTR(psys_fused_efficient_freq, 0444, + ipu_buttress_psys_fused_efficient_freq_get, NULL); + +int ipu_buttress_restore(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT); + + return 0; +} +EXPORT_SYMBOL(ipu_buttress_restore); + +int ipu_buttress_init(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + int rval, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; + + mutex_init(&b->power_mutex); + mutex_init(&b->auth_mutex); + mutex_init(&b->cons_mutex); + mutex_init(&b->ipc_mutex); + spin_lock_init(&b->tsc_lock); + init_completion(&b->ish.send_complete); + init_completion(&b->cse.send_complete); + init_completion(&b->ish.recv_complete); + init_completion(&b->cse.recv_complete); + + b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; + b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; + b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; + b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; + b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; + b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; + b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; + b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; + + b->ish.csr_in = BUTTRESS_REG_ISH2IUCSR; + b->ish.csr_out = BUTTRESS_REG_IU2ISHCSR; + b->ish.db0_in = BUTTRESS_REG_ISH2IUDB0; + b->ish.db0_out = BUTTRESS_REG_IU2ISHDB0; + b->ish.data0_in = BUTTRESS_REG_ISH2IUDATA0; + b->ish.data0_out = BUTTRESS_REG_IU2ISHDATA0; + INIT_LIST_HEAD(&b->constraints); + + rval = ipu_buttress_clk_init(isp); + if (rval) { + dev_err(&isp->pdev->dev, "Clock init failed\n"); + goto err_mutex_destroy; + } + + ipu_buttress_set_secure_mode(isp); + isp->secure_mode = ipu_buttress_get_secure_mode(isp); + if (isp->secure_mode != secure_mode_enable) + dev_warn(&isp->pdev->dev, "Unable to set secure mode!\n"); + + dev_info(&isp->pdev->dev, "IPU in %s mode\n", + isp->secure_mode ? "secure" : "non-secure"); + + b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR); + writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE); + + rval = device_create_file(&isp->pdev->dev, + &dev_attr_psys_fused_min_freq); + if (rval) { + dev_err(&isp->pdev->dev, "Create min freq file failed\n"); + goto err_clk_unregister; + } + + rval = device_create_file(&isp->pdev->dev, + &dev_attr_psys_fused_max_freq); + if (rval) { + dev_err(&isp->pdev->dev, "Create max freq file failed\n"); + goto err_remove_min_freq_file; + } + + rval = device_create_file(&isp->pdev->dev, + &dev_attr_psys_fused_efficient_freq); + if (rval) { + dev_err(&isp->pdev->dev, "Create efficient freq file failed\n"); + goto err_remove_max_freq_file; + } + + /* + * We want to retry couple of time in case CSE initialization + * is delayed for reason or another. + */ + do { + rval = ipu_buttress_ipc_reset(isp, &b->cse); + if (rval) { + dev_err(&isp->pdev->dev, + "IPC reset protocol failed, retry!\n"); + } else { + dev_dbg(&isp->pdev->dev, "IPC reset completed!\n"); + return 0; + } + } while (ipc_reset_retry--); + + dev_err(&isp->pdev->dev, "IPC reset protocol failed\n"); + +err_remove_max_freq_file: + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); +err_remove_min_freq_file: + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); +err_clk_unregister: + ipu_buttress_clk_exit(isp); +err_mutex_destroy: + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); + + return rval; +} + +void ipu_buttress_exit(struct ipu_device *isp) +{ + struct ipu_buttress *b = &isp->buttress; + + writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE); + + device_remove_file(&isp->pdev->dev, + &dev_attr_psys_fused_efficient_freq); + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq); + device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq); + + ipu_buttress_clk_exit(isp); + + mutex_destroy(&b->power_mutex); + mutex_destroy(&b->auth_mutex); + mutex_destroy(&b->cons_mutex); + mutex_destroy(&b->ipc_mutex); +} diff --git a/drivers/media/pci/intel/ipu-buttress.h b/drivers/media/pci/intel/ipu-buttress.h new file mode 100644 index 000000000000..7c29db5e7b55 --- /dev/null +++ b/drivers/media/pci/intel/ipu-buttress.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_BUTTRESS_H +#define IPU_BUTTRESS_H + +#include +#include +#include "ipu.h" + +#define IPU_BUTTRESS_NUM_OF_SENS_CKS 3 +#define IPU_BUTTRESS_NUM_OF_PLL_CKS 3 +#define IPU_BUTTRESS_TSC_CLK 19200000 + +#define BUTTRESS_POWER_TIMEOUT 200 + +#define BUTTRESS_PS_FREQ_STEP 25U +#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8) +#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32) + +struct ipu_buttress_ctrl { + u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; + union { + unsigned int divisor; + unsigned int ratio; + }; + union { + unsigned int divisor_shift; + unsigned int ratio_shift; + }; + unsigned int ovrd; + u32 ovrd_shift; + unsigned int qos_floor; + bool started; +}; + +struct ipu_buttress_fused_freqs { + unsigned int min_freq; + unsigned int max_freq; + unsigned int efficient_freq; +}; + +struct ipu_buttress_ipc { + struct completion send_complete; + struct completion recv_complete; + u32 nack; + u32 nack_mask; + u32 recv_data; + u32 csr_out; + u32 csr_in; + u32 db0_in; + u32 db0_out; + u32 data0_out; + u32 data0_in; +}; + +struct ipu_buttress { + struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; + spinlock_t tsc_lock; /* tsc lock */ + struct clk *clk_sensor[IPU_BUTTRESS_NUM_OF_SENS_CKS]; + struct clk *pll_sensor[IPU_BUTTRESS_NUM_OF_PLL_CKS]; + struct ipu_buttress_ipc cse; + struct ipu_buttress_ipc ish; + struct list_head constraints; + struct ipu_buttress_fused_freqs psys_fused_freqs; + unsigned int psys_min_freq; + u32 wdt_cached_value; + u8 psys_force_ratio; + bool force_suspend; + bool ps_started; +}; + +struct ipu_buttress_sensor_clk_freq { + unsigned int rate; + unsigned int val; +}; + +struct firmware; + +enum ipu_buttress_ipc_domain { + IPU_BUTTRESS_IPC_CSE, + IPU_BUTTRESS_IPC_ISH, +}; + +struct ipu_buttress_constraint { + struct list_head list; + unsigned int min_freq; +}; + +struct ipu_ipc_buttress_bulk_msg { + u32 cmd; + u32 expected_resp; + bool require_resp; + u8 cmd_size; +}; + +int ipu_buttress_ipc_reset(struct ipu_device *isp, + struct ipu_buttress_ipc *ipc); +int ipu_buttress_map_fw_image(struct ipu_bus_device *sys, + const struct firmware *fw, struct sg_table *sgt); +int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys, + struct sg_table *sgt); +int ipu_buttress_power(struct device *dev, + struct ipu_buttress_ctrl *ctrl, bool on); +void +ipu_buttress_add_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint); +void +ipu_buttress_remove_psys_constraint(struct ipu_device *isp, + struct ipu_buttress_constraint *constraint); +void ipu_buttress_set_secure_mode(struct ipu_device *isp); +bool ipu_buttress_get_secure_mode(struct ipu_device *isp); +int ipu_buttress_authenticate(struct ipu_device *isp); +int ipu_buttress_reset_authentication(struct ipu_device *isp); +bool ipu_buttress_auth_done(struct ipu_device *isp); +int ipu_buttress_start_tsc_sync(struct ipu_device *isp); +int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val); +u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks); + +irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr); +irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr); +int ipu_buttress_debugfs_init(struct ipu_device *isp); +int ipu_buttress_init(struct ipu_device *isp); +void ipu_buttress_exit(struct ipu_device *isp); +void ipu_buttress_csi_port_config(struct ipu_device *isp, + u32 legacy, u32 combo); +int ipu_buttress_restore(struct ipu_device *isp); + +int +ipu_buttress_ipc_send_bulk(struct ipu_device *isp, + enum ipu_buttress_ipc_domain ipc_domain, + struct ipu_ipc_buttress_bulk_msg *msgs, u32 size); +int ipu_buttress_psys_freq_get(void *data, u64 *val); +int ipu_buttress_isys_freq_get(void *data, u64 *val); + +#endif /* IPU_BUTTRESS_H */ diff --git a/drivers/media/pci/intel/ipu-cpd.c b/drivers/media/pci/intel/ipu-cpd.c new file mode 100644 index 000000000000..dca03232aa4f --- /dev/null +++ b/drivers/media/pci/intel/ipu-cpd.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include + +#include "ipu.h" +#include "ipu-cpd.h" + +#include + +/* 15 entries + header*/ +#define MAX_PKG_DIR_ENT_CNT 16 +/* 2 qword per entry/header */ +#define PKG_DIR_ENT_LEN 2 +/* PKG_DIR size in bytes */ +#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ + (PKG_DIR_ENT_LEN) * sizeof(u64)) +#define PKG_DIR_ID_SHIFT 48 +#define PKG_DIR_ID_MASK 0x7f +#define PKG_DIR_VERSION_SHIFT 32 +#define PKG_DIR_SIZE_MASK 0xfffff +/* _IUPKDR_ */ +#define PKG_DIR_HDR_MARK 0x5f4955504b44525f + +/* $CPD */ +#define CPD_HDR_MARK 0x44504324 + +/* Maximum size is 2K DWORDs */ +#define MAX_MANIFEST_SIZE (2 * 1024 * sizeof(u32)) + +/* Maximum size is 64k */ +#define MAX_METADATA_SIZE (64 * 1024) + +#define MAX_COMPONENT_ID 127 +#define MAX_COMPONENT_VERSION 0xffff + +#define CPD_MANIFEST_IDX 0 +#define CPD_METADATA_IDX 1 +#define CPD_MODULEDATA_IDX 2 + +#define ipu_cpd_get_entries(cpd) ((struct ipu_cpd_ent *) \ + ((struct ipu_cpd_hdr *)cpd + 1)) +#define ipu_cpd_get_entry(cpd, idx) (&ipu_cpd_get_entries(cpd)[idx]) +#define ipu_cpd_get_manifest(cpd) ipu_cpd_get_entry(cpd, CPD_MANIFEST_IDX) +#define ipu_cpd_get_metadata(cpd) ipu_cpd_get_entry(cpd, CPD_METADATA_IDX) +#define ipu_cpd_get_moduledata(cpd) ipu_cpd_get_entry(cpd, CPD_MODULEDATA_IDX) + +static bool fw_version_check = true; +module_param(fw_version_check, bool, 0444); +MODULE_PARM_DESC(fw_version_check, "enable/disable checking firmware version"); + +static const struct ipu_cpd_metadata_cmpnt * +ipu_cpd_metadata_get_cmpnt(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, + u8 idx) +{ + const struct ipu_cpd_metadata_extn *extn; + const struct ipu_cpd_metadata_cmpnt *cmpnts; + int cmpnt_count; + + extn = metadata; + cmpnts = metadata + sizeof(*extn); + cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts); + + if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { + dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", + idx); + return ERR_PTR(-EINVAL); + } + + return &cmpnts[idx]; +} + +static u32 ipu_cpd_metadata_cmpnt_version(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->ver; +} + +static int ipu_cpd_metadata_get_cmpnt_id(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->id; +} + +static u32 +ipu_cpd_metadata_get_cmpnt_icache_base_offs(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->icache_base_offs; +} + +static u32 +ipu_cpd_metadata_get_cmpnt_entry_point(struct ipu_device *isp, + const void *metadata, + unsigned int metadata_size, u8 idx) +{ + const struct ipu_cpd_metadata_cmpnt *cmpnt = + ipu_cpd_metadata_get_cmpnt(isp, metadata, + metadata_size, idx); + + if (IS_ERR(cmpnt)) + return PTR_ERR(cmpnt); + + return cmpnt->entry_point; +} + +static int ipu_cpd_parse_module_data(struct ipu_device *isp, + const void *module_data, + unsigned int module_data_size, + dma_addr_t dma_addr_module_data, + u64 *pkg_dir, + const void *metadata, + unsigned int metadata_size) +{ + const struct ipu_cpd_module_data_hdr *module_data_hdr; + const struct ipu_cpd_hdr *dir_hdr; + const struct ipu_cpd_ent *dir_ent; + int i; + + if (!module_data) + return -EINVAL; + + module_data_hdr = module_data; + dir_hdr = module_data + module_data_hdr->hdr_len; + dir_ent = (struct ipu_cpd_ent *)(dir_hdr + 1); + + pkg_dir[0] = PKG_DIR_HDR_MARK; + /* pkg_dir entry count = component count + pkg_dir header */ + pkg_dir[1] = dir_hdr->ent_cnt + 1; + + for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { + u64 *p = &pkg_dir[PKG_DIR_ENT_LEN + i * PKG_DIR_ENT_LEN]; + int ver, id; + + *p++ = dma_addr_module_data + dir_ent->offset; + + id = ipu_cpd_metadata_get_cmpnt_id(isp, metadata, + metadata_size, i); + if (id < 0 || id > MAX_COMPONENT_ID) { + dev_err(&isp->pdev->dev, + "Failed to parse component id\n"); + return -EINVAL; + } + ver = ipu_cpd_metadata_cmpnt_version(isp, metadata, + metadata_size, i); + if (ver < 0 || ver > MAX_COMPONENT_VERSION) { + dev_err(&isp->pdev->dev, + "Failed to parse component version\n"); + return -EINVAL; + } + + /* + * PKG_DIR Entry (type == id) + * 63:56 55 54:48 47:32 31:24 23:0 + * Rsvd Rsvd Type Version Rsvd Size + */ + *p = dir_ent->len | (u64) id << PKG_DIR_ID_SHIFT | + (u64)ver << PKG_DIR_VERSION_SHIFT; + } + + return 0; +} + +void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev, + const void *src, + dma_addr_t dma_addr_src, + dma_addr_t *dma_addr, unsigned int *pkg_dir_size) +{ + struct ipu_device *isp = adev->isp; + const struct ipu_cpd_ent *ent, *man_ent, *met_ent; + u64 *pkg_dir; + unsigned int man_sz, met_sz; + void *pkg_dir_pos; + int ret; + + man_ent = ipu_cpd_get_manifest(src); + man_sz = man_ent->len; + + met_ent = ipu_cpd_get_metadata(src); + met_sz = met_ent->len; + + *pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; + pkg_dir = dma_alloc_attrs(&adev->dev, *pkg_dir_size, dma_addr, + GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!pkg_dir) + return pkg_dir; + + /* + * pkg_dir entry/header: + * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 + * N Address/Offset/"_IUPKDR_" + * N + 1 | rsvd | rsvd | type | ver | rsvd | size + * + * We can ignore other fields that size in N + 1 qword as they + * are 0 anyway. Just setting size for now. + */ + + ent = ipu_cpd_get_moduledata(src); + + ret = ipu_cpd_parse_module_data(isp, src + ent->offset, + ent->len, + dma_addr_src + ent->offset, + pkg_dir, + src + met_ent->offset, met_ent->len); + if (ret) { + dev_err(&isp->pdev->dev, + "Unable to parse module data section!\n"); + dma_free_attrs(&isp->psys->dev, *pkg_dir_size, pkg_dir, + *dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + return NULL; + } + + /* Copy manifest after pkg_dir */ + pkg_dir_pos = pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; + memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); + + /* Copy metadata after manifest */ + pkg_dir_pos += man_sz; + memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); + + dma_sync_single_range_for_device(&adev->dev, *dma_addr, + 0, *pkg_dir_size, DMA_TO_DEVICE); + + return pkg_dir; +} +EXPORT_SYMBOL_GPL(ipu_cpd_create_pkg_dir); + +void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev, + u64 *pkg_dir, + dma_addr_t dma_addr, unsigned int pkg_dir_size) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, NULL); +#else + dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, 0); +#endif +} +EXPORT_SYMBOL_GPL(ipu_cpd_free_pkg_dir); + +u32 ipu_cpd_get_pg_icache_base(struct ipu_device *isp, + u8 idx, + const void *cpd_file, unsigned int cpd_file_size) +{ + const struct ipu_cpd_ent *metadata = ipu_cpd_get_metadata(cpd_file); + const void *metadata_addr = cpd_file + metadata->offset; + + return ipu_cpd_metadata_get_cmpnt_icache_base_offs(isp, + metadata_addr, + metadata->len, idx); +} +EXPORT_SYMBOL_GPL(ipu_cpd_get_pg_icache_base); + +u32 ipu_cpd_get_pg_entry_point(struct ipu_device *isp, + u8 idx, + const void *cpd_file, unsigned int cpd_file_size) +{ + const struct ipu_cpd_ent *metadata = ipu_cpd_get_metadata(cpd_file); + const void *metadata_addr = cpd_file + metadata->offset; + + return ipu_cpd_metadata_get_cmpnt_entry_point(isp, + metadata_addr, + metadata->len, idx); +} +EXPORT_SYMBOL_GPL(ipu_cpd_get_pg_entry_point); + +static int ipu_cpd_validate_cpd(struct ipu_device *isp, + const void *cpd, + unsigned long cpd_size, unsigned long data_size) +{ + const struct ipu_cpd_hdr *cpd_hdr = cpd; + struct ipu_cpd_ent *ent; + unsigned int i; + + /* Ensure cpd hdr is within moduledata */ + if (cpd_size < sizeof(*cpd_hdr)) { + dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); + return -EINVAL; + } + + /* Sanity check for CPD header */ + if ((cpd_size - sizeof(*cpd_hdr)) / sizeof(*ent) < cpd_hdr->ent_cnt) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Ensure that all entries are within moduledata */ + ent = (struct ipu_cpd_ent *)(cpd_hdr + 1); + for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { + if (data_size < ent->offset || + data_size - ent->offset < ent->len) { + dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); + return -EINVAL; + } + } + + return 0; +} + +static int ipu_cpd_validate_moduledata(struct ipu_device *isp, + const void *moduledata, + u32 moduledata_size) +{ + const struct ipu_cpd_module_data_hdr *mod_hdr = moduledata; + int rval; + + /* Ensure moduledata hdr is within moduledata */ + if (moduledata_size < sizeof(*mod_hdr) || + moduledata_size < mod_hdr->hdr_len) { + dev_err(&isp->pdev->dev, "Invalid moduledata size\n"); + return -EINVAL; + } + + if (fw_version_check && mod_hdr->fw_pkg_date != IA_CSS_FW_PKG_RELEASE) { + dev_err(&isp->pdev->dev, + "Moduledata and library version mismatch (%x != %x)\n", + mod_hdr->fw_pkg_date, IA_CSS_FW_PKG_RELEASE); + return -EINVAL; + } + + dev_warn(&isp->pdev->dev, + "Moduledata version: %x, library version: %x\n", + mod_hdr->fw_pkg_date, IA_CSS_FW_PKG_RELEASE); + + dev_info(&isp->pdev->dev, "CSS release: %x\n", IA_CSS_FW_PKG_RELEASE); + rval = ipu_cpd_validate_cpd(isp, moduledata + + mod_hdr->hdr_len, + moduledata_size - + mod_hdr->hdr_len, moduledata_size); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); + return -EINVAL; + } + + return 0; +} + +static int ipu_cpd_validate_metadata(struct ipu_device *isp, + const void *metadata, u32 meta_size) +{ + const struct ipu_cpd_metadata_extn *extn = metadata; + + /* Sanity check for metadata size */ + if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { + dev_err(&isp->pdev->dev, "%s: Invalid metadata\n", __func__); + return -EINVAL; + } + + /* Validate extension and image types */ + if (extn->extn_type != IPU_CPD_METADATA_EXTN_TYPE_IUNIT || + extn->img_type != IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { + dev_err(&isp->pdev->dev, + "Invalid metadata descriptor img_type (%d)\n", + extn->img_type); + return -EINVAL; + } + + /* Validate metadata size multiple of metadata components */ + if ((meta_size - sizeof(*extn)) % + sizeof(struct ipu_cpd_metadata_cmpnt)) { + dev_err(&isp->pdev->dev, "%s: Invalid metadata size\n", + __func__); + return -EINVAL; + } + + return 0; +} + +int ipu_cpd_validate_cpd_file(struct ipu_device *isp, + const void *cpd_file, unsigned long cpd_file_size) +{ + const struct ipu_cpd_hdr *hdr = cpd_file; + struct ipu_cpd_ent *ent; + int rval; + + rval = ipu_cpd_validate_cpd(isp, cpd_file, + cpd_file_size, cpd_file_size); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); + return -EINVAL; + } + + /* Check for CPD file marker */ + if (hdr->hdr_mark != CPD_HDR_MARK) { + dev_err(&isp->pdev->dev, "Invalid CPD header\n"); + return -EINVAL; + } + + /* Sanity check for manifest size */ + ent = ipu_cpd_get_manifest(cpd_file); + if (ent->len > MAX_MANIFEST_SIZE) { + dev_err(&isp->pdev->dev, "Invalid manifest size\n"); + return -EINVAL; + } + + /* Validate metadata */ + ent = ipu_cpd_get_metadata(cpd_file); + rval = ipu_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid metadata\n"); + return rval; + } + + /* Validate moduledata */ + ent = ipu_cpd_get_moduledata(cpd_file); + rval = ipu_cpd_validate_moduledata(isp, cpd_file + ent->offset, + ent->len); + if (rval) { + dev_err(&isp->pdev->dev, "Invalid moduledata\n"); + return rval; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_cpd_validate_cpd_file); + +unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx) +{ + return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN]; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_address); + +unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir) +{ + return pkg_dir[1]; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_num_entries); + +unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx) +{ + return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] & PKG_DIR_SIZE_MASK; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_size); + +unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx) +{ + return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] >> + PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK; +} +EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_type); diff --git a/drivers/media/pci/intel/ipu-cpd.h b/drivers/media/pci/intel/ipu-cpd.h new file mode 100644 index 000000000000..c91ae33c1b85 --- /dev/null +++ b/drivers/media/pci/intel/ipu-cpd.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#ifndef IPU_CPD_H +#define IPU_CPD_H + +#define IPU_CPD_SIZE_OF_FW_ARCH_VERSION 7 +#define IPU_CPD_SIZE_OF_SYSTEM_VERSION 11 +#define IPU_CPD_SIZE_OF_COMPONENT_NAME 12 + +#define IPU_CPD_METADATA_EXTN_TYPE_IUNIT 0x10 + +#define IPU_CPD_METADATA_IMAGE_TYPE_RESERVED 0 +#define IPU_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1 +#define IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2 + +#define IPU_CPD_PKG_DIR_PSYS_SERVER_IDX 0 +#define IPU_CPD_PKG_DIR_ISYS_SERVER_IDX 1 + +#define IPU_CPD_PKG_DIR_CLIENT_PG_TYPE 3 + +struct __packed ipu_cpd_module_data_hdr { + u32 hdr_len; + u32 endian; + u32 fw_pkg_date; + u32 hive_sdk_date; + u32 compiler_date; + u32 target_platform_type; + u8 sys_ver[IPU_CPD_SIZE_OF_SYSTEM_VERSION]; + u8 fw_arch_ver[IPU_CPD_SIZE_OF_FW_ARCH_VERSION]; + u8 rsvd[2]; +}; + +struct __packed ipu_cpd_hdr { + u32 hdr_mark; + u32 ent_cnt; + u8 hdr_ver; + u8 ent_ver; + u8 hdr_len; + u8 chksm; + u32 name; +}; + +struct __packed ipu_cpd_ent { + u8 name[IPU_CPD_SIZE_OF_COMPONENT_NAME]; + u32 offset; + u32 len; + u8 rsvd[4]; +}; + +struct __packed ipu_cpd_metadata_cmpnt { + u32 id; + u32 size; + u32 ver; + u8 sha2_hash[32]; + u32 entry_point; + u32 icache_base_offs; + u8 attrs[16]; +}; + +struct __packed ipu_cpd_metadata_extn { + u32 extn_type; + u32 len; + u32 img_type; + u8 rsvd[16]; +}; + +struct __packed ipu_cpd_client_pkg_hdr { + u32 prog_list_offs; + u32 prog_list_size; + u32 prog_desc_offs; + u32 prog_desc_size; + u32 pg_manifest_offs; + u32 pg_manifest_size; + u32 prog_bin_offs; + u32 prog_bin_size; +}; + +void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev, + const void *src, + dma_addr_t dma_addr_src, + dma_addr_t *dma_addr, unsigned int *pkg_dir_size); +void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev, + u64 *pkg_dir, + dma_addr_t dma_addr, unsigned int pkg_dir_size); +u32 ipu_cpd_get_pg_icache_base(struct ipu_device *isp, + u8 idx, + const void *cpd_file, + unsigned int cpd_file_size); +u32 ipu_cpd_get_pg_entry_point(struct ipu_device *isp, + u8 idx, + const void *cpd_file, + unsigned int cpd_file_size); +int ipu_cpd_validate_cpd_file(struct ipu_device *isp, + const void *cpd_file, + unsigned long cpd_file_size); +unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx); +unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir); +unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx); +unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx); + +#endif /* IPU_CPD_H */ diff --git a/drivers/media/pci/intel/ipu-dma.c b/drivers/media/pci/intel/ipu-dma.c new file mode 100644 index 000000000000..0770d8dbbadf --- /dev/null +++ b/drivers/media/pci/intel/ipu-dma.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu-dma.h" +#include "ipu-mmu.h" + +/* Begin of things adapted from arch/arm/mm/dma-mapping.c */ +static void __dma_clear_buffer(struct page *page, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + /* + * Ensure that the allocated pages are zeroed, and that any data + * lurking in the kernel direct-mapped region is invalidated. + */ + if (PageHighMem(page)) { + while (size > 0) { + void *ptr = kmap_atomic(page); + + memset(ptr, 0, PAGE_SIZE); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + clflush_cache_range(ptr, PAGE_SIZE); + kunmap_atomic(ptr); + page++; + size -= PAGE_SIZE; + } + } else { + void *ptr = page_address(page); + + memset(ptr, 0, size); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + clflush_cache_range(ptr, size); + } +} + +static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, + gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct page **pages; + int count = size >> PAGE_SHIFT; + int array_size = count * sizeof(struct page *); + int i = 0; + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + return NULL; + + gfp |= __GFP_NOWARN; + + while (count) { + int j, order = __fls(count); + + pages[i] = alloc_pages(gfp, order); + while (!pages[i] && order) + pages[i] = alloc_pages(gfp, --order); + if (!pages[i]) + goto error; + + if (order) { + split_page(pages[i], order); + j = 1 << order; + while (--j) + pages[i + j] = pages[i] + j; + } + + __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs); + i += 1 << order; + count -= 1 << order; + } + + return pages; +error: + while (i--) + if (pages[i]) + __free_pages(pages[i], 0); + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + return NULL; +} + +static int __iommu_free_buffer(struct device *dev, struct page **pages, + size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + int count = size >> PAGE_SHIFT; + int array_size = count * sizeof(struct page *); + int i; + + for (i = 0; i < count; i++) { + if (pages[i]) { + __dma_clear_buffer(pages[i], PAGE_SIZE, attrs); + __free_pages(pages[i], 0); + } + } + + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); + return 0; +} + +/* End of things adapted from arch/arm/mm/dma-mapping.c */ + +static void ipu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, + enum dma_data_direction dir) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + unsigned long pa = iommu_iova_to_phys(mmu->dmap->domain, dma_handle); + + clflush_cache_range(phys_to_virt(pa), size); +} + +static void ipu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sglist, + int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sglist, sg, nents, i) + clflush_cache_range(page_to_virt(sg_page(sg)), sg->length); +} + +static void *ipu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct page **pages; + struct iova *iova; + struct vm_struct *area; + int i; + int rval; + + size = PAGE_ALIGN(size); + + iova = alloc_iova(&mmu->dmap->iovad, size >> PAGE_SHIFT, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return NULL; + + pages = __iommu_alloc_buffer(dev, size, gfp, attrs); + if (!pages) + goto out_free_iova; + + for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { + rval = iommu_map(mmu->dmap->domain, + (iova->pfn_lo + i) << PAGE_SHIFT, + page_to_phys(pages[i]), PAGE_SIZE, 0); + if (rval) + goto out_unmap; + } + + area = __get_vm_area(size, 0, VMALLOC_START, VMALLOC_END); + if (!area) + goto out_unmap; + + area->pages = pages; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + if (map_vm_area(area, PAGE_KERNEL, &pages)) +#else + if (map_vm_area(area, PAGE_KERNEL, pages)) +#endif + goto out_vunmap; + + *dma_handle = iova->pfn_lo << PAGE_SHIFT; + + mmu->tlb_invalidate(mmu); + + return area->addr; + +out_vunmap: + vunmap(area->addr); + +out_unmap: + for (i--; i >= 0; i--) { + iommu_unmap(mmu->dmap->domain, (iova->pfn_lo + i) << PAGE_SHIFT, + PAGE_SIZE); + } + __iommu_free_buffer(dev, pages, size, attrs); + +out_free_iova: + __free_iova(&mmu->dmap->iovad, iova); + + return NULL; +} + +static void ipu_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct vm_struct *area = find_vm_area(vaddr); + struct page **pages; + struct iova *iova = find_iova(&mmu->dmap->iovad, + dma_handle >> PAGE_SHIFT); + + if (WARN_ON(!area)) + return; + + if (WARN_ON(!area->pages)) + return; + + WARN_ON(!iova); + + size = PAGE_ALIGN(size); + + pages = area->pages; + + vunmap(vaddr); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + __iommu_free_buffer(dev, pages, size, attrs); + + __free_iova(&mmu->dmap->iovad, iova); + + mmu->tlb_invalidate(mmu); +} + +static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *addr, dma_addr_t iova, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(addr); + size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t i; + + if (!area) + return -EFAULT; + + if (vma->vm_start & ~PAGE_MASK) + return -EINVAL; + + if (size > area->size) + return -EFAULT; + + for (i = 0; i < count; i++) + vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT), + area->pages[i]); + + return 0; +} + +static void ipu_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct iova *iova = find_iova(&mmu->dmap->iovad, + sg_dma_address(sglist) >> PAGE_SHIFT); + + if (!nents) + return; + + WARN_ON(!iova); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); + + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + + mmu->tlb_invalidate(mmu); + + __free_iova(&mmu->dmap->iovad, iova); +} + +static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_mmu *mmu = dev_get_drvdata(aiommu); + struct scatterlist *sg; + struct iova *iova; + size_t size = 0; + u32 iova_addr; + int i; + + for_each_sg(sglist, sg, nents, i) + size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + + dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size); + + iova = alloc_iova(&mmu->dmap->iovad, size, + dma_get_mask(dev) >> PAGE_SHIFT, 0); + if (!iova) + return 0; + + dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, + iova->pfn_hi); + + iova_addr = iova->pfn_lo; + + for_each_sg(sglist, sg, nents, i) { + int rval; + + dev_dbg(dev, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n", + i, iova_addr << PAGE_SHIFT, + (unsigned long long)page_to_phys(sg_page(sg))); + rval = iommu_map(mmu->dmap->domain, iova_addr << PAGE_SHIFT, + page_to_phys(sg_page(sg)), + PAGE_ALIGN(sg->length), 0); + if (rval) + goto out_fail; + sg_dma_address(sg) = iova_addr << PAGE_SHIFT; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg_dma_len(sg) = sg->length; +#endif /* CONFIG_NEED_SG_DMA_LENGTH */ + + iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) +#else + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) +#endif + ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL); + + mmu->tlb_invalidate(mmu); + + return nents; + +out_fail: + ipu_dma_unmap_sg(dev, sglist, i, dir, attrs); + + return 0; +} + +/* + * Create scatter-list for the already allocated DMA buffer + */ +static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs *attrs +#else + unsigned long attrs +#endif + ) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + int n_pages; + int ret = 0; + + if (WARN_ON(!area->pages)) + return -ENOMEM; + + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + + ret = sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size, + GFP_KERNEL); + if (ret) + dev_dbg(dev, "IPU get sgt table fail\n"); + + return ret; +} + +const struct dma_map_ops ipu_dma_ops = { + .alloc = ipu_dma_alloc, + .free = ipu_dma_free, + .mmap = ipu_dma_mmap, + .map_sg = ipu_dma_map_sg, + .unmap_sg = ipu_dma_unmap_sg, + .sync_single_for_cpu = ipu_dma_sync_single_for_cpu, + .sync_single_for_device = ipu_dma_sync_single_for_cpu, + .sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu, + .sync_sg_for_device = ipu_dma_sync_sg_for_cpu, + .get_sgtable = ipu_dma_get_sgtable, +}; +EXPORT_SYMBOL_GPL(ipu_dma_ops); diff --git a/drivers/media/pci/intel/ipu-dma.h b/drivers/media/pci/intel/ipu-dma.h new file mode 100644 index 000000000000..9974b69fd6fd --- /dev/null +++ b/drivers/media/pci/intel/ipu-dma.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_DMA_H +#define IPU_DMA_H + +#include + +struct iommu_domain; + +struct ipu_dma_mapping { + struct iommu_domain *domain; + struct iova_domain iovad; + struct kref ref; +}; + +extern const struct dma_map_ops ipu_dma_ops; + +#endif /* IPU_DMA_H */ diff --git a/drivers/media/pci/intel/ipu-fw-com.c b/drivers/media/pci/intel/ipu-fw-com.c new file mode 100644 index 000000000000..78c5c9291d70 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-com.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-fw-com.h" +#include "ipu-bus.h" + +/* + * FWCOM layer is a shared resource between FW and driver. It consist + * of token queues to both send and receive directions. Queue is simply + * an array of structures with read and write indexes to the queue. + * There are 1...n queues to both directions. Queues locates in + * system ram and are mapped to ISP MMU so that both CPU and ISP can + * see the same buffer. Indexes are located in ISP DMEM so that FW code + * can poll those with very low latency and cost. CPU access to indexes is + * more costly but that happens only at message sending time and + * interrupt trigged message handling. CPU doesn't need to poll indexes. + * wr_reg / rd_reg are offsets to those dmem location. They are not + * the indexes itself. + */ + +/* Shared structure between driver and FW - do not modify */ +struct ipu_fw_sys_queue { + u64 host_address; + u32 vied_address; + u32 size; + u32 token_size; + u32 wr_reg; /* reg no in subsystem's regmem */ + u32 rd_reg; + u32 _align; +}; + +struct ipu_fw_sys_queue_res { + u64 host_address; + u32 vied_address; + u32 reg; +}; + +enum syscom_state { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum syscom_cmd { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ipu_fw_syscom_config { + u32 firmware_address; + + u32 num_input_queues; + u32 num_output_queues; + + /* ISP pointers to an array of ipu_fw_sys_queue structures */ + u32 input_queue; + u32 output_queue; + + /* ISYS / PSYS private data */ + u32 specific_addr; + u32 specific_size; +}; + +/* End of shared structures / data */ + +struct ipu_fw_com_context { + struct ipu_bus_device *adev; + void __iomem *dmem_addr; + int (*cell_ready)(struct ipu_bus_device *adev); + void (*cell_start)(struct ipu_bus_device *adev); + + void *dma_buffer; + dma_addr_t dma_addr; + unsigned int dma_size; + unsigned long attrs; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + struct ipu_fw_sys_queue *input_queue; /* array of host to SP queues */ + struct ipu_fw_sys_queue *output_queue; /* array of SP to host */ + + void *config_host_addr; + void *specific_host_addr; + u64 ibuf_host_addr; + u64 obuf_host_addr; + + u32 config_vied_addr; + u32 input_queue_vied_addr; + u32 output_queue_vied_addr; + u32 specific_vied_addr; + u32 ibuf_vied_addr; + u32 obuf_vied_addr; +}; + +#define FW_COM_WR_REG 0 +#define FW_COM_RD_REG 4 + +#define REGMEM_OFFSET 0 + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 5 +}; + +enum message_direction { + DIR_RECV = 0, + DIR_SEND +}; + +static unsigned int num_messages(unsigned int wr, unsigned int rd, + unsigned int size) +{ + if (wr < rd) + wr += size; + return wr - rd; +} + +static unsigned int num_free(unsigned int wr, unsigned int rd, + unsigned int size) +{ + return size - num_messages(wr, rd, size); +} + +static unsigned int curr_index(void __iomem *q_dmem, + enum message_direction dir) +{ + return readl(q_dmem + + (dir == DIR_RECV ? FW_COM_RD_REG : FW_COM_WR_REG)); +} + +static unsigned int inc_index(void __iomem *q_dmem, struct ipu_fw_sys_queue *q, + enum message_direction dir) +{ + unsigned int index; + + index = curr_index(q_dmem, dir) + 1; + return index >= q->size ? 0 : index; +} + +static unsigned int ipu_sys_queue_buf_size(unsigned int size, + unsigned int token_size) +{ + return (size + 1) * token_size; +} + +static void ipu_sys_queue_init(struct ipu_fw_sys_queue *q, unsigned int size, + unsigned int token_size, struct ipu_fw_sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = ipu_sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; +} + +void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg, + struct ipu_bus_device *adev, void __iomem *base) +{ + struct ipu_fw_com_context *ctx; + struct ipu_fw_syscom_config *fw_cfg; + unsigned int i; + unsigned int sizeall, offset; + unsigned int sizeinput = 0, sizeoutput = 0; + unsigned long attrs = 0; + struct ipu_fw_sys_queue_res res; + + /* error handling */ + if (!cfg || !cfg->cell_start || !cfg->cell_ready) + return NULL; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET; + ctx->adev = adev; + ctx->cell_start = cfg->cell_start; + ctx->cell_ready = cfg->cell_ready; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + /* + * Allocate DMA mapped memory. Allocate one big chunk. + */ + sizeall = + /* Base cfg for FW */ + roundup(sizeof(struct ipu_fw_syscom_config), 8) + + /* Descriptions of the queues */ + cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue) + + cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue) + + /* FW specific information structure */ + roundup(cfg->specific_size, 8); + + for (i = 0; i < cfg->num_input_queues; i++) + sizeinput += ipu_sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + + for (i = 0; i < cfg->num_output_queues; i++) + sizeoutput += ipu_sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + + sizeall += sizeinput + sizeoutput; + + ctx->dma_buffer = dma_alloc_attrs(&ctx->adev->dev, sizeall, + &ctx->dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + attrs); + ctx->attrs = attrs; +#else + NULL); +#endif + if (!ctx->dma_buffer) { + dev_err(&ctx->adev->dev, "failed to allocate dma memory\n"); + return NULL; + } + + ctx->dma_size = sizeall; + + /* This is the address where FW starts to parse allocations */ + ctx->config_host_addr = ctx->dma_buffer; + ctx->config_vied_addr = ctx->dma_addr; + fw_cfg = (struct ipu_fw_syscom_config *)ctx->config_host_addr; + offset = roundup(sizeof(struct ipu_fw_syscom_config), 8); + + ctx->input_queue = ctx->dma_buffer + offset; + ctx->input_queue_vied_addr = ctx->dma_addr + offset; + offset += cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue); + + ctx->output_queue = ctx->dma_buffer + offset; + ctx->output_queue_vied_addr = ctx->dma_addr + offset; + offset += cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue); + + ctx->specific_host_addr = ctx->dma_buffer + offset; + ctx->specific_vied_addr = ctx->dma_addr + offset; + offset += roundup(cfg->specific_size, 8); + + ctx->ibuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset); + ctx->ibuf_vied_addr = ctx->dma_addr + offset; + offset += sizeinput; + + ctx->obuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset); + ctx->obuf_vied_addr = ctx->dma_addr + offset; + offset += sizeoutput; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + ipu_sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + ipu_sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + memcpy((void *)ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + } + + fw_cfg->num_input_queues = cfg->num_input_queues; + fw_cfg->num_output_queues = cfg->num_output_queues; + fw_cfg->input_queue = ctx->input_queue_vied_addr; + fw_cfg->output_queue = ctx->output_queue_vied_addr; + fw_cfg->specific_addr = ctx->specific_vied_addr; + fw_cfg->specific_size = cfg->specific_size; + + clflush_cache_range(ctx->dma_buffer, sizeall); + + return ctx; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_prepare); + +int ipu_fw_com_open(struct ipu_fw_com_context *ctx) +{ + /* Check if SP is in valid state */ + if (!ctx->cell_ready(ctx->adev)) + return -EIO; + + /* store syscom uninitialized state */ + writel(SYSCOM_STATE_UNINIT, ctx->dmem_addr + SYSCOM_STATE_REG * 4); + /* store syscom uninitialized command */ + writel(SYSCOM_COMMAND_UNINIT, + ctx->dmem_addr + SYSCOM_COMMAND_REG * 4); + /* store firmware configuration address */ + writel(ctx->config_vied_addr, + ctx->dmem_addr + SYSCOM_CONFIG_REG * 4); + + ctx->cell_start(ctx->adev); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_open); + +int ipu_fw_com_close(struct ipu_fw_com_context *ctx) +{ + int state; + + state = readl(ctx->dmem_addr + 4 * SYSCOM_STATE_REG); + if (state != SYSCOM_STATE_READY) + return -EBUSY; + + /* set close request flag */ + writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr + + SYSCOM_COMMAND_REG * 4); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_close); + +int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force) +{ + /* check if release is forced, an verify cell state if it is not */ + if (!force && !ctx->cell_ready(ctx->adev)) + return -EBUSY; + + dma_free_attrs(&ctx->adev->dev, ctx->dma_size, + ctx->dma_buffer, ctx->dma_addr, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + ctx->attrs); +#else + NULL); +#endif + kfree(ctx); + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_release); + +int ipu_fw_com_ready(struct ipu_fw_com_context *ctx) +{ + int state; + + /* check if SP syscom is ready to open the queue */ + state = readl(ctx->dmem_addr + SYSCOM_STATE_REG * 4); + if (state != SYSCOM_STATE_READY) + return -EBUSY; /* SPC is not ready to handle messages yet */ + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_com_ready); + +static bool is_index_valid(struct ipu_fw_sys_queue *q, unsigned int index) +{ + if (index >= q->size) + return false; + return true; +} + +void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + unsigned int index; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + /* Catch indexes in dmem */ + if (!is_index_valid(q, wr) || !is_index_valid(q, rd)) + return NULL; + + packets = num_free(wr + 1, rd, q->size); + if (packets <= 0) + return NULL; + + index = curr_index(q_dmem, DIR_SEND); + + return (void *)(unsigned long)q->host_address + (index * q->token_size); +} +EXPORT_SYMBOL_GPL(ipu_send_get_token); + +void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + int index = curr_index(q_dmem, DIR_SEND); + void *addr = (void *)(unsigned long)q->host_address + + (index * q->token_size); + + clflush_cache_range(addr, q->token_size); + + /* Increment index */ + index = inc_index(q_dmem, q, DIR_SEND); + + writel(index, q_dmem + FW_COM_WR_REG); +} +EXPORT_SYMBOL_GPL(ipu_send_put_token); + +void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int wr, rd; + unsigned int packets; + void *addr; + + wr = readl(q_dmem + FW_COM_WR_REG); + rd = readl(q_dmem + FW_COM_RD_REG); + + /* Catch indexes in dmem? */ + if (!is_index_valid(q, wr) || !is_index_valid(q, rd)) + return NULL; + + packets = num_messages(wr, rd, q->size); + if (packets <= 0) + return NULL; + + addr = (void *)(unsigned long)q->host_address + (rd * q->token_size); + clflush_cache_range(addr, q->token_size); + + return addr; +} +EXPORT_SYMBOL_GPL(ipu_recv_get_token); + +void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr) +{ + struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr]; + void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4; + unsigned int rd = inc_index(q_dmem, q, DIR_RECV); + + /* Release index */ + writel(rd, q_dmem + FW_COM_RD_REG); +} +EXPORT_SYMBOL_GPL(ipu_recv_put_token); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu fw comm library"); diff --git a/drivers/media/pci/intel/ipu-fw-com.h b/drivers/media/pci/intel/ipu-fw-com.h new file mode 100644 index 000000000000..de47455ea9a4 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-com.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_FW_COM_H +#define IPU_FW_COM_H + +struct ipu_fw_com_context; +struct ipu_bus_device; + +struct ipu_fw_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +struct ipu_fw_com_cfg { + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ipu_fw_syscom_queue_config *input; + struct ipu_fw_syscom_queue_config *output; + + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + int (*cell_ready)(struct ipu_bus_device *adev); + void (*cell_start)(struct ipu_bus_device *adev); +}; + +void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg, + struct ipu_bus_device *adev, void __iomem *base); + +int ipu_fw_com_open(struct ipu_fw_com_context *ctx); +int ipu_fw_com_ready(struct ipu_fw_com_context *ctx); +int ipu_fw_com_close(struct ipu_fw_com_context *ctx); +int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force); + +void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr); +void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr); +void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr); +void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr); + +#endif diff --git a/drivers/media/pci/intel/ipu-fw-isys.c b/drivers/media/pci/intel/ipu-fw-isys.c new file mode 100644 index 000000000000..ca0bdba60404 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-isys.c @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include "ipu-platform-regs.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" +#include "ipu-isys.h" + +#define IPU_FW_UNSUPPORTED_DATA_TYPE 0 +static const uint32_t +extracted_bits_per_pixel_per_mipi_data_type[N_IPU_FW_ISYS_MIPI_DATA_TYPE] = { + + 64, /* [0x00] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE */ + 64, /* [0x01] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE */ + 64, /* [0x02] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE */ + 64, /* [0x03] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x04] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x05] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x06] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x07] */ + 64, /* [0x08] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 */ + 64, /* [0x09] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 */ + 64, /* [0x0A] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 */ + 64, /* [0x0B] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 */ + 64, /* [0x0C] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 */ + 64, /* [0x0D] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 */ + 64, /* [0x0E] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 */ + 64, /* [0x0F] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x10] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x11] */ + 8, /* [0x12] IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x13] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x14] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x15] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x16] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x17] */ + 12, /* [0x18] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 */ + 15, /* [0x19] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 */ + 12, /* [0x1A] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x1B] */ + 12, /* [0x1C] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT */ + 15, /* [0x1D] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT */ + 16, /* [0x1E] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 */ + 20, /* [0x1F] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 */ + 16, /* [0x20] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 */ + 16, /* [0x21] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 */ + 16, /* [0x22] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 */ + 18, /* [0x23] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 */ + 24, /* [0x24] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x25] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x26] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x27] */ + 6, /* [0x28] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 */ + 7, /* [0x29] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 */ + 8, /* [0x2A] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 */ + 10, /* [0x2B] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 */ + 12, /* [0x2C] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 */ + 14, /* [0x2D] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 */ + 16, /* [0x2E] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 */ + 8, /* [0x2F] IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 */ + 8, /* [0x30] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 */ + 8, /* [0x31] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 */ + 8, /* [0x32] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 */ + 8, /* [0x33] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 */ + 8, /* [0x34] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 */ + 8, /* [0x35] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 */ + 8, /* [0x36] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 */ + 8, /* [0x37] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x38] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x39] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3A] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3B] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3C] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3D] */ + IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3E] */ + IPU_FW_UNSUPPORTED_DATA_TYPE /* [0x3F] */ +}; + +#ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +static const char send_msg_types[N_IPU_FW_ISYS_SEND_TYPE][32] = { + "STREAM_OPEN", + "STREAM_START", + "STREAM_START_AND_CAPTURE", + "STREAM_CAPTURE", + "STREAM_STOP", + "STREAM_FLUSH", + "STREAM_CLOSE" +}; + +static int handle_proxy_response(struct ipu_isys *isys, unsigned int req_id) +{ + struct ipu_fw_isys_proxy_resp_info_abi *resp; + int rval = -EIO; + + resp = (struct ipu_fw_isys_proxy_resp_info_abi *) + ipu_recv_get_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES); + if (!resp) + return 1; + + dev_dbg(&isys->adev->dev, + "Proxy response: id 0x%x, error %d, details %d\n", + resp->request_id, resp->error_info.error, + resp->error_info.error_details); + + if (req_id == resp->request_id) + rval = 0; + + ipu_recv_put_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES); + return rval; +} + +/* Simple blocking proxy send function */ +int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value) +{ + struct ipu_fw_com_context *ctx = isys->fwcom; + struct ipu_fw_proxy_send_queue_token *token; + unsigned int timeout = 1000; + int rval = -EBUSY; + + dev_dbg(&isys->adev->dev, + "proxy send token: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n", + req_id, index, offset, value); + + mutex_lock(&isys->mutex); + token = ipu_send_get_token(ctx, IPU_BASE_PROXY_SEND_QUEUES); + if (!token) + goto leave; + + token->request_id = req_id; + token->region_index = index; + token->offset = offset; + token->value = value; + ipu_send_put_token(ctx, IPU_BASE_PROXY_SEND_QUEUES); + + /* Currently proxy doesn't support irq based service. Poll */ + do { + usleep_range(100, 110); + rval = handle_proxy_response(isys, req_id); + if (!rval) + break; + if (rval == -EIO) { + dev_err(&isys->adev->dev, + "Proxy response received with unexpected id\n"); + break; + } + timeout--; + } while (rval && timeout); + + if (!timeout) + dev_err(&isys->adev->dev, "Proxy response timed out\n"); +leave: + mutex_unlock(&isys->mutex); + return rval; +} + +int +ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, enum ipu_fw_isys_send_type send_type) +{ + struct ipu_fw_com_context *ctx = isys->fwcom; + struct ipu_fw_send_queue_token *token; + + if (send_type >= N_IPU_FW_ISYS_SEND_TYPE) + return -EINVAL; + + dev_dbg(&isys->adev->dev, "send_token: %s\n", + send_msg_types[send_type]); + + /* + * Time to flush cache in case we have some payload. Not all messages + * have that + */ + if (cpu_mapped_buf) + clflush_cache_range(cpu_mapped_buf, size); + + token = ipu_send_get_token(ctx, + stream_handle + IPU_BASE_MSG_SEND_QUEUES); + if (!token) + return -EBUSY; + + token->payload = dma_mapped_buf; + token->buf_handle = (unsigned long)cpu_mapped_buf; + token->send_type = send_type; + + ipu_send_put_token(ctx, stream_handle + IPU_BASE_MSG_SEND_QUEUES); + + return 0; +} + +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + return ipu_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, + send_type); +} + +int ipu_fw_isys_close(struct ipu_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_fw_com_close(isys->fwcom); + spin_unlock_irqrestore(&isys->power_lock, flags); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_fw_com_release(isys->fwcom, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} + +void ipu_fw_isys_cleanup(struct ipu_isys *isys) +{ + ipu_fw_com_release(isys->fwcom, 1); + isys->fwcom = NULL; +} + +static void start_sp(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *spc_regs_base = isys->pdata->base + + isys->pdata->ipdata->hw_variant.spc_offset; + u32 val = 0; + + val |= IPU_ISYS_SPC_STATUS_START | + IPU_ISYS_SPC_STATUS_RUN | + IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + val |= isys->icache_prefetch ? IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; + + writel(val, spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); +} + +static int query_sp(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *spc_regs_base = isys->pdata->base + + isys->pdata->ipdata->hw_variant.spc_offset; + u32 val = readl(spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); + + /* return true when READY == 1, START == 0 */ + val &= IPU_ISYS_SPC_STATUS_READY | IPU_ISYS_SPC_STATUS_START; + + return val == IPU_ISYS_SPC_STATUS_READY; +} + +int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + int num_in_message_queues = clamp_t(unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS); + int num_out_message_queues = 1; + int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY; + int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV; + int type_msg = IPU_FW_ISYS_QUEUE_TYPE_MSG; + int base_dev_send = IPU_BASE_DEV_SEND_QUEUES; + int base_msg_send = IPU_BASE_MSG_SEND_QUEUES; + int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES; + + struct ipu_fw_syscom_queue_config + input_queue_cfg[IPU_N_MAX_SEND_QUEUES]; + struct ipu_fw_syscom_queue_config + output_queue_cfg[IPU_N_MAX_RECV_QUEUES]; + + struct ipu_fw_com_cfg fwcom = { + .input = input_queue_cfg, + .output = output_queue_cfg, + .cell_start = start_sp, + .cell_ready = query_sp, + }; + + struct ipu_fw_isys_fw_config isys_fw_cfg = { + .num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] = + IPU_N_MAX_PROXY_SEND_QUEUES, + .num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = + IPU_N_MAX_DEV_SEND_QUEUES, + .num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] = + num_in_message_queues, + .num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] = + IPU_N_MAX_PROXY_RECV_QUEUES, + /* Common msg/dev return queue */ + .num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = 0, + .num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] = + num_out_message_queues, + }; + struct device *dev = &isys->adev->dev; + int rval, i; + + fwcom.num_input_queues = + isys_fw_cfg.num_send_queues[type_proxy] + + isys_fw_cfg.num_send_queues[type_dev] + + isys_fw_cfg.num_send_queues[type_msg]; + + fwcom.num_output_queues = + isys_fw_cfg.num_recv_queues[type_proxy] + + isys_fw_cfg.num_recv_queues[type_dev] + + isys_fw_cfg.num_recv_queues[type_msg]; + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < IPU_NOF_SRAM_BLOCKS_MAX; i++) { + if (i < num_in_message_queues) + isys_fw_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + num_in_message_queues; + else + isys_fw_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + /* FW assumes proxy interface at fwcom queue 0 */ + for (i = 0; i < isys_fw_cfg.num_send_queues[type_proxy]; i++) { + input_queue_cfg[i].token_size = + sizeof(struct ipu_fw_proxy_send_queue_token); + input_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_SEND_QUEUE; + } + + for (i = 0; i < isys_fw_cfg.num_send_queues[type_dev]; i++) { + input_queue_cfg[base_dev_send + i].token_size = + sizeof(struct ipu_fw_send_queue_token); + input_queue_cfg[base_dev_send + i].queue_size = + IPU_DEV_SEND_QUEUE_SIZE; + } + + for (i = 0; i < isys_fw_cfg.num_send_queues[type_msg]; i++) { + input_queue_cfg[base_msg_send + i].token_size = + sizeof(struct ipu_fw_send_queue_token); + input_queue_cfg[base_msg_send + i].queue_size = + IPU_ISYS_SIZE_SEND_QUEUE; + } + + for (i = 0; i < isys_fw_cfg.num_recv_queues[type_proxy]; i++) { + output_queue_cfg[i].token_size = + sizeof(struct ipu_fw_proxy_resp_queue_token); + output_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_RECV_QUEUE; + } + /* There is no recv DEV queue */ + for (i = 0; i < isys_fw_cfg.num_recv_queues[type_msg]; i++) { + output_queue_cfg[base_msg_recv + i].token_size = + sizeof(struct ipu_fw_resp_queue_token); + output_queue_cfg[base_msg_recv + i].queue_size = + IPU_ISYS_SIZE_RECV_QUEUE; + } + + fwcom.dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset; + fwcom.specific_addr = &isys_fw_cfg; + fwcom.specific_size = sizeof(isys_fw_cfg); + + isys->fwcom = ipu_fw_com_prepare(&fwcom, isys->adev, isys->pdata->base); + if (!isys->fwcom) { + dev_err(dev, "isys fw com prepare failed\n"); + return -EIO; + } + + rval = ipu_fw_com_open(isys->fwcom); + if (rval) { + dev_err(dev, "isys fw com open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_fw_com_ready(isys->fwcom); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys port open ready failed %d\n", rval); + ipu_fw_isys_close(isys); + } + + return rval; +} + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp(void *context, + unsigned int queue, + struct + ipu_fw_isys_resp_info_abi + *response) +{ + return (struct ipu_fw_isys_resp_info_abi *) + ipu_recv_get_token(context, queue); +} + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + ipu_recv_put_token(context, queue); +} +#endif + +void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg) +{ + unsigned int i; + unsigned int idx; + + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + idx = stream_cfg->input_pins[i].dt; + stream_cfg->input_pins[i].bits_per_pix = + extracted_bits_per_pixel_per_mipi_data_type[idx]; + stream_cfg->input_pins[i].mapped_dt = + N_IPU_FW_ISYS_MIPI_DATA_TYPE; + } +} + +void +ipu_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg) +{ + unsigned int i; + + dev_dbg(dev, "---------------------------\n"); + dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n"); + dev_dbg(dev, "---------------------------\n"); + + dev_dbg(dev, "Source %d\n", stream_cfg->src); + dev_dbg(dev, "VC %d\n", stream_cfg->vc); + dev_dbg(dev, "Nof input pins %d\n", stream_cfg->nof_input_pins); + dev_dbg(dev, "Nof output pins %d\n", stream_cfg->nof_output_pins); + + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + dev_dbg(dev, "Input pin %d\n", i); + dev_dbg(dev, "Mipi data type 0x%0x\n", + stream_cfg->input_pins[i].dt); + dev_dbg(dev, "Mipi store mode %d\n", + stream_cfg->input_pins[i].mipi_store_mode); + dev_dbg(dev, "Bits per pixel %d\n", + stream_cfg->input_pins[i].bits_per_pix); + dev_dbg(dev, "Mapped data type 0x%0x\n", + stream_cfg->input_pins[i].mapped_dt); + dev_dbg(dev, "Input res width %d\n", + stream_cfg->input_pins[i].input_res.width); + dev_dbg(dev, "Input res height %d\n", + stream_cfg->input_pins[i].input_res.height); + } + + for (i = 0; i < N_IPU_FW_ISYS_CROPPING_LOCATION; i++) { + dev_dbg(dev, "Crop info %d\n", i); + dev_dbg(dev, "Crop.top_offset %d\n", + stream_cfg->crop[i].top_offset); + dev_dbg(dev, "Crop.left_offset %d\n", + stream_cfg->crop[i].left_offset); + dev_dbg(dev, "Crop.bottom_offset %d\n", + stream_cfg->crop[i].bottom_offset); + dev_dbg(dev, "Crop.right_offset %d\n", + stream_cfg->crop[i].right_offset); + dev_dbg(dev, "----------------\n"); + } + + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + dev_dbg(dev, "Output pin %d\n", i); + dev_dbg(dev, "Output input pin id %d\n", + stream_cfg->output_pins[i].input_pin_id); + dev_dbg(dev, "Output res width %d\n", + stream_cfg->output_pins[i].output_res.width); + dev_dbg(dev, "Output res height %d\n", + stream_cfg->output_pins[i].output_res.height); + dev_dbg(dev, "Stride %d\n", stream_cfg->output_pins[i].stride); + dev_dbg(dev, "Pin type %d\n", stream_cfg->output_pins[i].pt); + dev_dbg(dev, "Ft %d\n", stream_cfg->output_pins[i].ft); + dev_dbg(dev, "Watermar in lines %d\n", + stream_cfg->output_pins[i].watermark_in_lines); + dev_dbg(dev, "Send irq %d\n", + stream_cfg->output_pins[i].send_irq); + dev_dbg(dev, "Reserve compression %d\n", + stream_cfg->output_pins[i].reserve_compression); + dev_dbg(dev, "----------------\n"); + } + + dev_dbg(dev, "Isl_use %d\n", stream_cfg->isl_use); + switch (stream_cfg->isl_use) { + case IPU_FW_ISYS_USE_SINGLE_ISA: + dev_dbg(dev, "ISA cfg:\n"); + dev_dbg(dev, "blc_enabled %d\n", stream_cfg->isa_cfg.cfg.blc); + dev_dbg(dev, "lsc_enabled %d\n", stream_cfg->isa_cfg.cfg.lsc); + dev_dbg(dev, "dpc_enabled %d\n", stream_cfg->isa_cfg.cfg.dpc); + dev_dbg(dev, "downscaler_enabled %d\n", + stream_cfg->isa_cfg.cfg.downscaler); + dev_dbg(dev, "awb_enabled %d\n", stream_cfg->isa_cfg.cfg.awb); + dev_dbg(dev, "af_enabled %d\n", stream_cfg->isa_cfg.cfg.af); + dev_dbg(dev, "ae_enabled %d\n", stream_cfg->isa_cfg.cfg.ae); + break; + case IPU_FW_ISYS_USE_SINGLE_DUAL_ISL: + case IPU_FW_ISYS_USE_NO_ISL_NO_ISA: + default: + break; + } +} + +void ipu_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs) +{ + unsigned int i; + + dev_dbg(dev, "--------------------------\n"); + dev_dbg(dev, "IPU_FW_ISYS_FRAME_BUFF_SET\n"); + dev_dbg(dev, "--------------------------\n"); + + for (i = 0; i < outputs; i++) { + dev_dbg(dev, "Output pin %d\n", i); + dev_dbg(dev, "out_buf_id %llu\n", + buf->output_pins[i].out_buf_id); + dev_dbg(dev, "addr 0x%x\n", buf->output_pins[i].addr); + dev_dbg(dev, "compress %u\n", buf->output_pins[i].compress); + + dev_dbg(dev, "----------------\n"); + } + + dev_dbg(dev, "process_group_light.addr 0x%x\n", + buf->process_group_light.addr); + dev_dbg(dev, "process_group_light.param_buf_id %llu\n", + buf->process_group_light.param_buf_id); + dev_dbg(dev, "send_irq_sof 0x%x\n", buf->send_irq_sof); + dev_dbg(dev, "send_irq_eof 0x%x\n", buf->send_irq_eof); + dev_dbg(dev, "send_resp_sof 0x%x\n", buf->send_resp_sof); + dev_dbg(dev, "send_resp_eof 0x%x\n", buf->send_resp_eof); +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + dev_dbg(dev, "send_irq_capture_ack 0x%x\n", buf->send_irq_capture_ack); + dev_dbg(dev, "send_irq_capture_done 0x%x\n", buf->send_irq_capture_done); +#endif +#ifdef IPU_OTF_SUPPORT + dev_dbg(dev, "frame_counter 0x%x\n", buf->frame_counter); +#endif +} diff --git a/drivers/media/pci/intel/ipu-fw-isys.h b/drivers/media/pci/intel/ipu-fw-isys.h new file mode 100644 index 000000000000..110620257278 --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-isys.h @@ -0,0 +1,847 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_FW_ISYS_H +#define IPU_FW_ISYS_H + +#include "ipu-fw-com.h" + +/* Max number of Input/Output Pins */ +#define IPU_MAX_IPINS 4 + +/* worst case is ISA use where a single input pin produces: + * Mipi output, NS Pixel Output, and Scaled Pixel Output. + * This is how the 2 is calculated + */ +#define IPU_MAX_OPINS ((IPU_MAX_IPINS) + 2) + +/* Max number of supported virtual streams */ +#define IPU_STREAM_ID_MAX 8 + +/* Aligned with the approach of having one dedicated per stream */ +#define IPU_N_MAX_MSG_SEND_QUEUES (IPU_STREAM_ID_MAX) +/* Single return queue for all streams/commands type */ +#define IPU_N_MAX_MSG_RECV_QUEUES 1 +/* Single device queue for high priority commands (bypass in-order queue) */ +#define IPU_N_MAX_DEV_SEND_QUEUES 1 +/* Single dedicated send queue for proxy interface */ +#define IPU_N_MAX_PROXY_SEND_QUEUES 1 +/* Single dedicated recv queue for proxy interface */ +#define IPU_N_MAX_PROXY_RECV_QUEUES 1 +/* Send queues layout */ +#define IPU_BASE_PROXY_SEND_QUEUES 0 +#define IPU_BASE_DEV_SEND_QUEUES \ + (IPU_BASE_PROXY_SEND_QUEUES + IPU_N_MAX_PROXY_SEND_QUEUES) +#define IPU_BASE_MSG_SEND_QUEUES \ + (IPU_BASE_DEV_SEND_QUEUES + IPU_N_MAX_DEV_SEND_QUEUES) +#define IPU_N_MAX_SEND_QUEUES \ + (IPU_BASE_MSG_SEND_QUEUES + IPU_N_MAX_MSG_SEND_QUEUES) +/* Recv queues layout */ +#define IPU_BASE_PROXY_RECV_QUEUES 0 +#define IPU_BASE_MSG_RECV_QUEUES \ + (IPU_BASE_PROXY_RECV_QUEUES + IPU_N_MAX_PROXY_RECV_QUEUES) +#define IPU_N_MAX_RECV_QUEUES \ + (IPU_BASE_MSG_RECV_QUEUES + IPU_N_MAX_MSG_RECV_QUEUES) + +/* Consider 1 slot per stream since driver is not expected to pipeline + * device commands for the same stream + */ +#define IPU_DEV_SEND_QUEUE_SIZE (IPU_STREAM_ID_MAX) + +/* Max number of supported SRAM buffer partitions. + * It refers to the size of stream partitions. + * These partitions are further subpartitioned internally + * by the FW, but by declaring statically the stream + * partitions we solve the buffer fragmentation issue + */ +#define IPU_NOF_SRAM_BLOCKS_MAX (IPU_STREAM_ID_MAX) + +/* Max number of supported input pins routed in ISL */ +#define IPU_MAX_IPINS_IN_ISL 2 + +/* Max number of planes for frame formats supported by the FW */ +#define IPU_PIN_PLANES_MAX 4 + +/** + * enum ipu_fw_isys_resp_type + */ +enum ipu_fw_isys_resp_type { + IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, + IPU_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, + IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, + IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IPU_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IPU_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IPU_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IPU_FW_ISYS_RESP_TYPE +}; + +/** + * enum ipu_fw_isys_send_type + */ +enum ipu_fw_isys_send_type { + IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IPU_FW_ISYS_SEND_TYPE_STREAM_START, + IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE, + IPU_FW_ISYS_SEND_TYPE_STREAM_STOP, + IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH, + IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IPU_FW_ISYS_SEND_TYPE +}; + +/** + * enum ipu_fw_isys_queue_type + */ +enum ipu_fw_isys_queue_type { + IPU_FW_ISYS_QUEUE_TYPE_PROXY = 0, + IPU_FW_ISYS_QUEUE_TYPE_DEV, + IPU_FW_ISYS_QUEUE_TYPE_MSG, + N_IPU_FW_ISYS_QUEUE_TYPE +}; + +/** + * enum ipu_fw_isys_stream_source: Specifies a source for a stream + */ +enum ipu_fw_isys_stream_source { + IPU_FW_ISYS_STREAM_SRC_PORT_0 = 0, + IPU_FW_ISYS_STREAM_SRC_PORT_1, + IPU_FW_ISYS_STREAM_SRC_PORT_2, + IPU_FW_ISYS_STREAM_SRC_PORT_3, + IPU_FW_ISYS_STREAM_SRC_PORT_4, + IPU_FW_ISYS_STREAM_SRC_PORT_5, + IPU_FW_ISYS_STREAM_SRC_PORT_6, + IPU_FW_ISYS_STREAM_SRC_PORT_7, + IPU_FW_ISYS_STREAM_SRC_PORT_8, + IPU_FW_ISYS_STREAM_SRC_PORT_9, + IPU_FW_ISYS_STREAM_SRC_PORT_10, + IPU_FW_ISYS_STREAM_SRC_PORT_11, + IPU_FW_ISYS_STREAM_SRC_PORT_12, + IPU_FW_ISYS_STREAM_SRC_PORT_13, + IPU_FW_ISYS_STREAM_SRC_PORT_14, + IPU_FW_ISYS_STREAM_SRC_PORT_15, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_2, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_3, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_4, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_5, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_6, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_7, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_8, + IPU_FW_ISYS_STREAM_SRC_MIPIGEN_9, + N_IPU_FW_ISYS_STREAM_SRC +}; + +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_0 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_1 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_2 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_3 + +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU_FW_ISYS_STREAM_SRC_PORT_4 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU_FW_ISYS_STREAM_SRC_PORT_5 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_6 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_7 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_8 +#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_9 + +#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0 +#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1 + +/** + * enum ipu_fw_isys_mipi_vc: MIPI csi2 spec + * supports up to 4 virtual per physical channel + */ +enum ipu_fw_isys_mipi_vc { + IPU_FW_ISYS_MIPI_VC_0 = 0, + IPU_FW_ISYS_MIPI_VC_1, + IPU_FW_ISYS_MIPI_VC_2, + IPU_FW_ISYS_MIPI_VC_3, + N_IPU_FW_ISYS_MIPI_VC +}; + +/** + * Supported Pixel Frame formats. Expandable if needed + */ +enum ipu_fw_isys_frame_format_type { + IPU_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV12_TILEY, /* 12 bit YUV 420, + * Intel proprietary tiled format, + * TileY + */ + IPU_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */ + IPU_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */ + IPU_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */ + IPU_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */ + IPU_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */ + IPU_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */ + IPU_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */ + IPU_FW_ISYS_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines + * followed by a uvinterleaved line + */ + IPU_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + IPU_FW_ISYS_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub + * pixels are packed into one 16 bit + * value, 5 bits for R, 6 bits + * for G and 5 bits for B. + */ + + IPU_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IPU_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, + * A=Alpha (alpha is unused) + */ + IPU_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */ + IPU_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */ + N_IPU_FW_ISYS_FRAME_FORMAT +}; + +/* Temporary for driver compatibility */ +#define IPU_FW_ISYS_FRAME_FORMAT_RAW (IPU_FW_ISYS_FRAME_FORMAT_RAW16) + +/** + * Supported MIPI data type. Keep in sync array in ipu_fw_isys_private.c + */ +enum ipu_fw_isys_mipi_data_type { + /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ + IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, + IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, + IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ + IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ + /** Reserved 0x04-0x07 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, + /** GENERIC SHORT PACKET DATA TYPES */ + /** They are used to keep the timing information for + * the opening/closing of shutters, + * triggering of flashes and etc. + */ + /* Generic Short Packet Codes 1 - 8 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, + IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, + /** GENERIC LONG PACKET DATA TYPES */ + IPU_FW_ISYS_MIPI_DATA_TYPE_NULL = 0x10, + IPU_FW_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, + /* Embedded 8-bit non Image Data */ + IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, + /** Reserved 0x13-0x17 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, + /** YUV DATA TYPES */ + /* 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, + /* 10 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, + /* 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, + /** Reserved 0x1B */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, + /* YUV420 8-bit Chroma Shifted Pixel Sampling) */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, + /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, + /* UYVY..UVYV, 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, + /* UYVY..UVYV, 10 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, + /** RGB DATA TYPES */ + /* BGR..BGR, 4 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, + /* BGR..BGR, 5 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, + /* BGR..BGR, 5 bits B and R, 6 bits G */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, + /* BGR..BGR, 6 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, + /* BGR..BGR, 8 bits per subpixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, + /** Reserved 0x25-0x27 */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, + /** RAW DATA TYPES */ + /* RAW data, 6 - 14 bits per pixel */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, + /** Reserved 0x2E-2F are used with assigned meaning */ + /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, + /* Binary byte stream, which is target at JPEG, + * not specified in CSI-MIPI standard + */ + IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, + + /** USER DEFINED 8-BIT DATA TYPES */ + /** For example, the data transmitter (e.g. the SoC sensor) + * can keep the JPEG data as + * the User Defined Data Type 4 and the MPEG data as the + * User Defined Data Type 7. + */ + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, + IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, + /** Reserved 0x38-0x3F */ + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, + IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, + + /* Keep always last and max value */ + N_IPU_FW_ISYS_MIPI_DATA_TYPE = 0x40 +}; + +/** enum ipu_fw_isys_pin_type: output pin buffer types. + * Buffers can be queued and de-queued to hand them over between IA and ISYS + */ +enum ipu_fw_isys_pin_type { + /* Captured as MIPI packets */ + IPU_FW_ISYS_PIN_TYPE_MIPI = 0, + /* Captured through the ISApf (with/without ISA) + * and the non-scaled output path + */ + IPU_FW_ISYS_PIN_TYPE_RAW_NS, + /* Captured through the ISApf + ISA and the scaled output path */ + IPU_FW_ISYS_PIN_TYPE_RAW_S, + /* Captured through the SoC path */ + IPU_FW_ISYS_PIN_TYPE_RAW_SOC, + /* Reserved for future use, maybe short packets */ + IPU_FW_ISYS_PIN_TYPE_METADATA_0, + /* Reserved for future use */ + IPU_FW_ISYS_PIN_TYPE_METADATA_1, + /* Legacy (non-PIV2), used for the AWB stats */ + IPU_FW_ISYS_PIN_TYPE_AWB_STATS, + /* Legacy (non-PIV2), used for the AF stats */ + IPU_FW_ISYS_PIN_TYPE_AF_STATS, + /* Legacy (non-PIV2), used for the AE stats */ + IPU_FW_ISYS_PIN_TYPE_HIST_STATS, + /* Used for the PAF FF */ + IPU_FW_ISYS_PIN_TYPE_PAF_FF, + /* Keep always last and max value */ + N_IPU_FW_ISYS_PIN_TYPE +}; + +/** + * enum ipu_fw_isys_isl_use + * Describes the ISL/ISA use + */ +enum ipu_fw_isys_isl_use { + IPU_FW_ISYS_USE_NO_ISL_NO_ISA = 0, + IPU_FW_ISYS_USE_SINGLE_DUAL_ISL, + IPU_FW_ISYS_USE_SINGLE_ISA, + N_IPU_FW_ISYS_USE +}; + +/** + * enum ipu_fw_isys_mipi_store_mode. Describes if long MIPI packets reach + * MIPI SRAM with the long packet header or + * if not, then only option is to capture it with pin type MIPI. + */ +enum ipu_fw_isys_mipi_store_mode { + IPU_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IPU_FW_ISYS_MIPI_STORE_MODE +}; + +/** + * enum ipu_fw_isys_type_paf. Describes the Type of PAF enabled + */ +enum ipu_fw_isys_type_paf { + /* PAF data not present */ + IPU_FW_ISYS_TYPE_NO_PAF = 0, + /* Type 2 sensor types, PAF coming separately from Image Frame */ + /* PAF data in interleaved format(RLRL or LRLR) */ + IPU_FW_ISYS_TYPE_INTERLEAVED_PAF, + /* PAF data in non-interleaved format(LL/RR or RR/LL) */ + IPU_FW_ISYS_TYPE_NON_INTERLEAVED_PAF, + /* Type 3 sensor types , PAF data embedded in Image Frame */ + /* Frame Embedded PAF in interleaved format(RLRL or LRLR) */ + IPU_FW_ISYS_TYPE_FRAME_EMB_INTERLEAVED_PAF, + /* Frame Embedded PAF non-interleaved format(LL/RR or RR/LL) */ + IPU_FW_ISYS_TYPE_FRAME_EMB_NON_INTERLEAVED_PAF, + N_IPU_FW_ISYS_TYPE_PAF +}; + +/** + * enum ipu_fw_isys_cropping_location. Enumerates the cropping locations in ISYS + */ +enum ipu_fw_isys_cropping_location { + /* Cropping executed in ISAPF (mainly), + * ISAPF preproc (odd column) and MIPI STR2MMIO (odd row) + */ + IPU_FW_ISYS_CROPPING_LOCATION_PRE_ISA = 0, + /* Reserved for legacy mode which will never be implemented */ + IPU_FW_ISYS_CROPPING_LOCATION_RESERVED_1, + /* Cropping executed in StreamPifConv in the ISA output for + * RAW_NS pin + */ + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED, + /* Cropping executed in StreamScaledPifConv + * in the ISA output for RAW_S pin + */ + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_SCALED, + N_IPU_FW_ISYS_CROPPING_LOCATION +}; + +/** + * enum ipu_fw_isys_resolution_info. Describes the resolution, + * required to setup the various ISA GP registers. + */ +enum ipu_fw_isys_resolution_info { + /* Scaled ISA output resolution before + * the StreamScaledPifConv cropping + */ + IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED = 0, + /* Non-Scaled ISA output resolution before the StreamPifConv cropping */ + IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + N_IPU_FW_ISYS_RESOLUTION_INFO +}; + +/** + * enum ipu_fw_isys_error. Describes the error type detected by the FW + */ +enum ipu_fw_isys_error { + IPU_FW_ISYS_ERROR_NONE = 0, /* No details */ + IPU_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ + IPU_FW_ISYS_ERROR_HW_CONSISTENCY, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ + IPU_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ + IPU_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ + IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ + IPU_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ + IPU_FW_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ + IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ + IPU_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ + N_IPU_FW_ISYS_ERROR +}; + +/** + * enum ipu_fw_proxy_error. Describes the error type for + * the proxy detected by the FW + */ +enum ipu_fw_proxy_error { + IPU_FW_PROXY_ERROR_NONE = 0, + IPU_FW_PROXY_ERROR_INVALID_WRITE_REGION, + IPU_FW_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IPU_FW_PROXY_ERROR +}; + +struct ipu_isys; + +/** + * struct ipu_fw_isys_buffer_partition_abi - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each virtual stream + */ +struct ipu_fw_isys_buffer_partition_abi { + u32 num_gda_pages[IPU_STREAM_ID_MAX]; +}; + +/** + * struct ipu_fw_isys_fw_config - contains the parts from + * ia_css_isys_device_cfg_data we need to transfer to the cell + */ +struct ipu_fw_isys_fw_config { + struct ipu_fw_isys_buffer_partition_abi buffer_partition; + u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE]; + u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE]; +}; + +/** + * struct ipu_fw_isys_resolution_abi: Generic resolution structure. + * @Width + * @Height + */ +struct ipu_fw_isys_resolution_abi { + u32 width; + u32 height; +}; + +/** + * struct ipu_fw_isys_output_pin_payload_abi + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + */ +struct ipu_fw_isys_output_pin_payload_abi { + u64 out_buf_id; + u32 addr; + u32 compress; +}; + +/** + * struct ipu_fw_isys_output_pin_info_abi + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: minimum size in Bytes of all buffers that will be + * supplied for capture on this pin + * @send_irq: assert if pin event should trigger irq + * @pt: pin type -real format "enum ipu_fw_isys_pin_type" + * @ft: frame format type -real format "enum ipu_fw_isys_frame_format_type" + * @input_pin_id: related input pin id + * @reserve_compression: reserve compression resources for pin + */ +struct ipu_fw_isys_output_pin_info_abi { + struct ipu_fw_isys_resolution_abi output_res; + u32 stride; + u32 watermark_in_lines; + u32 payload_buf_size; + u8 send_irq; + u8 input_pin_id; + u8 pt; + u8 ft; + u8 reserved; + u8 reserve_compression; +}; + +/** + * struct ipu_fw_isys_param_pin_abi + * @param_buf_id: Points to param port buffer - buffer identifier + * @addr: Points to param pin buffer - CSS Virtual Address + */ +struct ipu_fw_isys_param_pin_abi { + u64 param_buf_id; + u32 addr; +}; + +/** + * struct ipu_fw_isys_input_pin_info_abi + * @input_res: input resolution + * @dt: mipi data type ((enum ipu_fw_isys_mipi_data_type) + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin pin type for this + * input pin can only be MIPI + * (enum ipu_fw_isys_mipi_store_mode) + * @bits_per_pix: native bits per pixel + * @mapped_dt: actual data type from sensor +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + * @crop_first_and_last_lines Control whether to crop the + * first and last line of the + * input image. Crop done by HW + * device. +#endif + */ +struct ipu_fw_isys_input_pin_info_abi { + struct ipu_fw_isys_resolution_abi input_res; + u8 dt; + u8 mipi_store_mode; + u8 bits_per_pix; + u8 mapped_dt; +#if !defined(CONFIG_VIDEO_INTEL_IPU4) && !defined(CONFIG_VIDEO_INTEL_IPU4P) + u8 crop_first_and_last_lines; +#endif +}; + +/** + * struct ipu_fw_isys_isa_cfg_abi. Describes the ISA cfg + */ +struct ipu_fw_isys_isa_cfg_abi { + struct ipu_fw_isys_resolution_abi + isa_res[N_IPU_FW_ISYS_RESOLUTION_INFO]; + struct { + unsigned int blc:1; + unsigned int lsc:1; + unsigned int dpc:1; + unsigned int downscaler:1; + unsigned int awb:1; + unsigned int af:1; + unsigned int ae:1; + unsigned int paf:8; + unsigned int send_irq_stats_ready:1; + unsigned int send_resp_stats_ready:1; + } cfg; +}; + +/** + * struct ipu_fw_isys_cropping_abi - cropping coordinates + */ +struct ipu_fw_isys_cropping_abi { + s32 top_offset; + s32 left_offset; + s32 bottom_offset; + s32 right_offset; +}; + +/** + * struct ipu_fw_isys_stream_cfg_data_abi + * ISYS stream configuration data structure + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded + * and send the response + * - if '0' the send_resp_sof_discarded will determine + * whether to send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded + * and send the response + * - if '0' the send_resp_eof_discarded will determine + * whether to send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + */ +struct ipu_fw_isys_stream_cfg_data_abi { + struct ipu_fw_isys_isa_cfg_abi isa_cfg; + struct ipu_fw_isys_cropping_abi crop[N_IPU_FW_ISYS_CROPPING_LOCATION]; + struct ipu_fw_isys_input_pin_info_abi input_pins[IPU_MAX_IPINS]; + struct ipu_fw_isys_output_pin_info_abi output_pins[IPU_MAX_OPINS]; + u32 compfmt; + u8 nof_input_pins; + u8 nof_output_pins; + u8 send_irq_sof_discarded; + u8 send_irq_eof_discarded; + u8 send_resp_sof_discarded; + u8 send_resp_eof_discarded; + u8 src; + u8 vc; + u8 isl_use; +}; + +/** + * struct ipu_fw_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and + * send the response + * - if '0' the send_resp_sof will determine whether to + * send the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and + * send the response + * - if '0' the send_resp_eof will determine whether to + * send the response + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + */ +struct ipu_fw_isys_frame_buff_set_abi { + struct ipu_fw_isys_output_pin_payload_abi output_pins[IPU_MAX_OPINS]; + struct ipu_fw_isys_param_pin_abi process_group_light; + u8 send_irq_sof; + u8 send_irq_eof; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + u8 send_irq_capture_ack; + u8 send_irq_capture_done; +#endif + u8 send_resp_sof; + u8 send_resp_eof; + u8 reserved; +}; + +/** + * struct ipu_fw_isys_error_info_abi + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional error info + */ +struct ipu_fw_isys_error_info_abi { + enum ipu_fw_isys_error error; + u32 error_details; +}; + +/** + * struct ipu_fw_isys_resp_info_comm + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type (enum ipu_fw_isys_resp_type) + * @pin_id: pin id that the pin payload corresponds to + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + */ +struct ipu_fw_isys_resp_info_abi { + u64 buf_id; + struct ipu_fw_isys_output_pin_payload_abi pin; + struct ipu_fw_isys_param_pin_abi process_group_light; + struct ipu_fw_isys_error_info_abi error_info; + u32 timestamp[2]; + u8 stream_handle; + u8 type; + u8 pin_id; + u8 acc_id; + u16 reserved; +}; + +/** + * struct ipu_fw_isys_proxy_error_info_comm + * @proxy_error: error code if something went wrong + * @proxy_error_details: depending on error code, it may contain additional + * error info + */ +struct ipu_fw_isys_proxy_error_info_abi { + enum ipu_fw_proxy_error error; + u32 error_details; +}; + +struct ipu_fw_isys_proxy_resp_info_abi { + u32 request_id; + struct ipu_fw_isys_proxy_error_info_abi error_info; +}; + +/** + * struct ipu_fw_proxy_write_queue_token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address + * of the region + * @value: Value that is requested to be written with the proxy write request + */ +struct ipu_fw_proxy_write_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +/* From here on type defines not coming from the ISYSAPI interface */ + +/** + * struct ipu_fw_resp_queue_token + */ +struct ipu_fw_resp_queue_token { + struct ipu_fw_isys_resp_info_abi resp_info; +}; + +/** + * struct ipu_fw_send_queue_token + */ +struct ipu_fw_send_queue_token { + u64 buf_handle; + u32 payload; + u16 send_type; + u16 stream_id; +}; + +/** + * struct ipu_fw_proxy_resp_queue_token + */ +struct ipu_fw_proxy_resp_queue_token { + struct ipu_fw_isys_proxy_resp_info_abi proxy_resp_info; +}; + +/** + * struct ipu_fw_proxy_send_queue_token + */ +struct ipu_fw_proxy_send_queue_token { + u32 request_id; + u32 region_index; + u32 offset; + u32 value; +}; + +void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg); + +void ipu_fw_isys_dump_stream_cfg(struct device *dev, + struct ipu_fw_isys_stream_cfg_data_abi + *stream_cfg); +void ipu_fw_isys_dump_frame_buff_set(struct device *dev, + struct ipu_fw_isys_frame_buff_set_abi *buf, + unsigned int outputs); +#ifndef CONFIG_VIDEO_INTEL_ICI +int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams); +int ipu_fw_isys_close(struct ipu_isys *isys); +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type); +int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, enum ipu_fw_isys_send_type send_type); +int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value); +void ipu_fw_isys_cleanup(struct ipu_isys *isys); +#else +struct ici_isys; +int ipu_fw_isys_init(struct ici_isys *isys, unsigned int num_streams); +int ipu_fw_isys_close(struct ici_isys *isys); +int ipu_fw_isys_simple_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type); +int ipu_fw_isys_complex_cmd(struct ici_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, enum ipu_fw_isys_send_type send_type); +int ipu_fw_isys_send_proxy_token(struct ici_isys *isys, + unsigned int req_id, + unsigned int index, + unsigned int offset, u32 value); +void ipu_fw_isys_cleanup(struct ici_isys *isys); +#endif +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp(void *context, + unsigned int queue, + struct + ipu_fw_isys_resp_info_abi + *response); +void ipu_fw_isys_put_resp(void *context, unsigned int queue); +#endif diff --git a/drivers/media/pci/intel/ipu-fw-psys.c b/drivers/media/pci/intel/ipu-fw-psys.c new file mode 100644 index 000000000000..5f0f601170ac --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-psys.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016 - 2018 Intel Corporation + +#include + +#include + +#include "ipu-fw-com.h" +#include "ipu-fw-psys.h" +#include "ipu-psys.h" + +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) +{ + kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED; + return 0; +} + +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} + +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} + +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} + +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_fw_psys_cmd *psys_cmd; + int ret = 0; + + psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); + if (!psys_cmd) { + dev_err(&kcmd->fh->psys->adev->dev, "failed to get token!\n"); + kcmd->pg_user = NULL; + ret = -ENODATA; + goto out; + } + psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START; + psys_cmd->msg = 0; + psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; + ipu_send_put_token(kcmd->fh->psys->fwcom, 0); + +out: + return ret; +} + + +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_fw_psys_cmd *psys_cmd; + int ret = 0; + + psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0); + if (!psys_cmd) { + dev_err(&kcmd->fh->psys->adev->dev, "failed to get token!\n"); + kcmd->pg_user = NULL; + ret = -ENODATA; + goto out; + } + psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP; + psys_cmd->msg = 0; + psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address; + ipu_send_put_token(kcmd->fh->psys->fwcom, 0); + +out: + return ret; +} + +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) +{ + kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED; + return 0; +} + +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event) +{ + void *rcv; + + rcv = ipu_recv_get_token(psys->fwcom, 0); + if (!rcv) + return 0; + + memcpy(event, rcv, sizeof(*event)); + ipu_recv_put_token(psys->fwcom, 0); + return 1; +} + +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, unsigned int size) +{ + u32 type; + u32 buffer_state; + + type = terminal->terminal_type; + + switch (type) { + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM: + buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED; + break; + case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM: + case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN: + case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN: + buffer_state = IPU_FW_PSYS_BUFFER_FULL; + break; + case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT: + case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT: + buffer_state = IPU_FW_PSYS_BUFFER_EMPTY; + break; + default: + dev_err(&kcmd->fh->psys->adev->dev, + "unknown terminal type: 0x%x\n", type); + return -EAGAIN; + } + + if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN || + type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) { + struct ipu_fw_psys_data_terminal *dterminal = + (struct ipu_fw_psys_data_terminal *)terminal; + dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY; + dterminal->frame.data_bytes = size; + if (!ipu_fw_psys_pg_get_protocol(kcmd)) + dterminal->frame.data = buffer; + else + dterminal->frame.data_index = terminal_idx; + dterminal->frame.buffer_state = buffer_state; + } else { + struct ipu_fw_psys_param_terminal *pterminal = + (struct ipu_fw_psys_param_terminal *)terminal; + if (!ipu_fw_psys_pg_get_protocol(kcmd)) + pterminal->param_payload.buffer = buffer; + else + pterminal->param_payload.terminal_index = terminal_idx; + } + return 0; +} + +static int process_get_cell(struct ipu_fw_psys_process *process, int index) +{ + int cell; + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + cell = process->cell_id; +#else + cell = process->cells[index]; +#endif + return cell; +} + +static u32 process_get_cells_bitmap(struct ipu_fw_psys_process *process) +{ + unsigned int i; + int cell_id; + u32 bitmap = 0; + + for (i = 0; i < IPU_FW_PSYS_PROCESS_MAX_CELLS; i++) { + cell_id = process_get_cell(process, i); + if (cell_id != IPU_FW_PSYS_N_CELL_ID) + bitmap |= (1 << cell_id); + } + return bitmap; +} + +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, const char *note) +{ + struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg; + u32 pgid = pg->ID; + u8 processes = pg->process_count; + u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset); + unsigned int p, chn, mem, mem_id; + int cell; + + dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n", + __func__, note, pgid, processes); + + for (p = 0; p < processes; p++) { + struct ipu_fw_psys_process *process = + (struct ipu_fw_psys_process *) + ((char *)pg + process_offset_table[p]); + cell = process_get_cell(process, 0); + dev_dbg(&psys->adev->dev, "\t process %i size=%u", + p, process->size); + dev_dbg(&psys->adev->dev, + "\t cell %i cell_bitmap=0x%x kernel_bitmap 0x%llx", + cell, process_get_cells_bitmap(process), + (u64) process->kernel_bitmap[1] << 32 | + (u64) process->kernel_bitmap[0]); + for (mem = 0; mem < IPU_FW_PSYS_N_DATA_MEM_TYPE_ID; mem++) { + mem_id = process->ext_mem_id[mem]; + if (mem_id != IPU_FW_PSYS_N_MEM_ID) + dev_dbg(&psys->adev->dev, + "\t mem type %u id %d offset=0x%x", + mem, mem_id, + process->ext_mem_offset[mem]); + } + for (chn = 0; chn < IPU_FW_PSYS_N_DEV_CHN_ID; chn++) { + if (process->dev_chn_offset[chn] != (u16)(-1)) + dev_dbg(&psys->adev->dev, + "\t dev_chn[%u]=0x%x\n", + chn, process->dev_chn_offset[chn]); + } + } +} + +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->ID; +} + +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->terminal_count; +} + +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->size; +} + +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress) +{ + kcmd->kpg->pg->ipu_virtual_address = vaddress; + return 0; +} + +struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd + *kcmd, int index) +{ + struct ipu_fw_psys_terminal *terminal; + u16 *terminal_offset_table; + + terminal_offset_table = + (uint16_t *)((char *)kcmd->kpg->pg + + kcmd->kpg->pg->terminals_offset); + terminal = (struct ipu_fw_psys_terminal *) + ((char *)kcmd->kpg->pg + terminal_offset_table[index]); + return terminal; +} + +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) +{ + kcmd->kpg->pg->token = (u64)token; +} + +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->token; +} + +int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd) +{ + return kcmd->kpg->pg->protocol_version; +} + + +int ipu_fw_psys_open(struct ipu_psys *psys) +{ + int retry = IPU_PSYS_OPEN_RETRY, retval; + + retval = ipu_fw_com_open(psys->fwcom); + if (retval) { + dev_err(&psys->adev->dev, "fw com open failed.\n"); + return retval; + } + + do { + usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, + IPU_PSYS_OPEN_TIMEOUT_US + 10); + retval = ipu_fw_com_ready(psys->fwcom); + if (!retval) { + dev_dbg(&psys->adev->dev, "psys port open ready!\n"); + break; + } + } while (retry-- > 0); + + if (!retry && retval) { + dev_err(&psys->adev->dev, "psys port open ready failed %d\n", + retval); + ipu_fw_com_close(psys->fwcom); + return retval; + } + return 0; +} + +int ipu_fw_psys_close(struct ipu_psys *psys) +{ + int retval; + + retval = ipu_fw_com_close(psys->fwcom); + if (retval) { + dev_err(&psys->adev->dev, "fw com close failed.\n"); + return retval; + } + return retval; +} diff --git a/drivers/media/pci/intel/ipu-fw-psys.h b/drivers/media/pci/intel/ipu-fw-psys.h new file mode 100644 index 000000000000..187a20a962ea --- /dev/null +++ b/drivers/media/pci/intel/ipu-fw-psys.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef IPU_FW_PSYS_H +#define IPU_FW_PSYS_H + +#include "ipu-platform-resources.h" + +/* ia_css_psys_device.c */ +#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20 +#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40 + +/* ia_css_psys_transport.h */ +#define IPU_FW_PSYS_CMD_BITS 64 +#define IPU_FW_PSYS_EVENT_BITS 128 + +/* ia_css_psys_transport.h */ +enum { + IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0, + IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, + IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, + IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12 +}; + +enum { + IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID, + IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID +}; + +enum { + IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0, + IPU_FW_PSYS_PROCESS_GROUP_CREATED, + IPU_FW_PSYS_PROCESS_GROUP_READY, + IPU_FW_PSYS_PROCESS_GROUP_BLOCKED, + IPU_FW_PSYS_PROCESS_GROUP_STARTED, + IPU_FW_PSYS_PROCESS_GROUP_RUNNING, + IPU_FW_PSYS_PROCESS_GROUP_STALLED, + IPU_FW_PSYS_PROCESS_GROUP_STOPPED, + IPU_FW_PSYS_N_PROCESS_GROUP_STATES +}; + +enum { + IPU_FW_PSYS_CONNECTION_MEMORY = 0, + IPU_FW_PSYS_CONNECTION_MEMORY_STREAM, + IPU_FW_PSYS_CONNECTION_STREAM, + IPU_FW_PSYS_N_CONNECTION_TYPES +}; + +enum { + IPU_FW_PSYS_BUFFER_NULL = 0, + IPU_FW_PSYS_BUFFER_UNDEFINED, + IPU_FW_PSYS_BUFFER_EMPTY, + IPU_FW_PSYS_BUFFER_NONEMPTY, + IPU_FW_PSYS_BUFFER_FULL, + IPU_FW_PSYS_N_BUFFER_STATES +}; + +enum { + IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0, + IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN, + IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN, + IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT, + IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM, + IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IPU_FW_PSYS_N_TERMINAL_TYPES +}; + +enum { + IPU_FW_PSYS_COL_DIMENSION = 0, + IPU_FW_PSYS_ROW_DIMENSION = 1, + IPU_FW_PSYS_N_DATA_DIMENSION = 2 +}; + +enum { + IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0, + IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT, + IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH, + IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH, + IPU_FW_PSYS_PROCESS_GROUP_CMD_START, + IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN, + IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN, + IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP, + IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND, + IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME, + IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT, + IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET, + IPU_FW_PSYS_N_PROCESS_GROUP_CMDS +}; + +enum { + IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, + IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS +}; + +/* ia_css_psys_process_group_cmd_impl.h */ +struct __packed ipu_fw_psys_process_group { + u64 token; + u64 private_token; + u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS]; + u32 size; + u32 pg_load_start_ts; + u32 pg_load_cycles; + u32 pg_init_cycles; + u32 pg_processing_cycles; + u32 ID; + u32 state; + u32 ipu_virtual_address; + u32 resource_bitmap; + u16 fragment_count; + u16 fragment_state; + u16 fragment_limit; + u16 processes_offset; + u16 terminals_offset; + u8 process_count; + u8 terminal_count; + u8 subgraph_count; + u8 protocol_version; + u8 base_queue_id; + u8 num_queues; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; +}; + +/* ia_css_psys_init.h */ +struct ipu_fw_psys_srv_init { + void *host_ddr_pkg_dir; + u32 ddr_pkg_dir_address; + u32 pkg_dir_size; + + u32 icache_prefetch_sp; + u32 icache_prefetch_isp; +}; + +/* ia_css_psys_transport.h */ +struct __packed ipu_fw_psys_cmd { + u16 command; + u16 msg; + u32 context_handle; +}; + +struct __packed ipu_fw_psys_event { + u16 status; + u16 command; + u32 context_handle; + u64 token; +}; + +/* ia_css_terminal_base_types.h */ +struct ipu_fw_psys_terminal { + u32 terminal_type; + s16 parent_offset; + u16 size; + u16 tm_index; + u8 ID; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; + +/* ia_css_terminal_types.h */ +struct ipu_fw_psys_param_payload { + u64 host_buffer; + u32 buffer; + u32 terminal_index; +}; + +/* ia_css_program_group_param_types.h */ +struct ipu_fw_psys_param_terminal { + struct ipu_fw_psys_terminal base; + struct ipu_fw_psys_param_payload param_payload; + u16 param_section_desc_offset; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; +}; + +struct ipu_fw_psys_frame { + u32 buffer_state; + u32 access_type; + u32 pointer_state; + u32 access_scope; + u32 data; + u32 data_index; + u32 data_bytes; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT]; +}; + +/* ia_css_program_group_data.h */ +struct ipu_fw_psys_frame_descriptor { + u32 frame_format_type; + u32 plane_count; + u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES]; + u32 stride[1]; + u16 dimension[2]; + u16 size; + u8 bpp; + u8 bpe; + u8 is_compressed; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; +}; + +struct ipu_fw_psys_stream { + u64 dummy; +}; + +/* ia_css_psys_terminal_private_types.h */ +struct ipu_fw_psys_data_terminal { + struct ipu_fw_psys_terminal base; + struct ipu_fw_psys_frame_descriptor frame_descriptor; + struct ipu_fw_psys_frame frame; + struct ipu_fw_psys_stream stream; + u32 reserved; + u32 connection_type; + u16 fragment_descriptor_offset; + u8 kernel_id; + u8 subgraph_id; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; +}; + +/* ia_css_psys_buffer_set.h */ +struct ipu_fw_psys_buffer_set { + u64 token; + u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 ipu_virtual_address; + u32 process_group_handle; + u16 terminal_count; + u8 frame_counter; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; +}; + +struct ipu_fw_psys_program_group_manifest { + u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 ID; + u16 program_manifest_offset; + u16 terminal_manifest_offset; + u16 private_data_offset; + u16 rbm_manifest_offset; + u16 size; + u8 alignment; + u8 kernel_count; + u8 program_count; + u8 terminal_count; + u8 subgraph_count; + u8 reserved[5]; +}; + +struct ipu_fw_generic_program_manifest { + u16 *dev_chn_size; + u16 *dev_chn_offset; + u16 *ext_mem_size; + u16 *ext_mem_offset; + u8 cell_id; + u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS]; + u8 cell_type_id; + u8 *is_dfm_relocatable; + u32 *dfm_port_bitmap; + u32 *dfm_active_port_bitmap; +}; + +struct ipu_fw_generic_process { + u16 ext_mem_id; + u16 ext_mem_offset; + u16 dev_chn_offset; + u16 cell_id; + u16 dfm_port_bitmap; + u16 dfm_active_port_bitmap; +}; + +struct ipu_fw_resource_definitions { + u32 num_cells; + u32 num_cells_type; + const u32 *cells; + u32 num_dev_channels; + const u16 *dev_channels; + + u32 num_ext_mem_types; + u32 num_ext_mem_ids; + const u16 *ext_mem_ids; + + u32 num_dfm_ids; + const u16 *dfms; + + u32 cell_mem_row; + const enum ipu_mem_id *cell_mem; + struct ipu_fw_generic_process process; +}; + +struct ipu_psys_kcmd; +struct ipu_psys; +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event); +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, unsigned int size); +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, const char *note); +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress); +struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd + *kcmd, int index); +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token); +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd); +int ipu_fw_psys_open(struct ipu_psys *psys); +int ipu_fw_psys_close(struct ipu_psys *psys); + +/* common resource interface for both abi and api mode */ +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value); +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index); +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr); +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value); +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset); +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process); +#endif /* IPU_FW_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c b/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c new file mode 100644 index 000000000000..8648b5f755ca --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2-be-soc.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2-be.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +/* + * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. + * Otherwise pixel order calculation below WILL BREAK! + */ +static const u32 csi2_be_soc_supported_codes_pad[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 *csi2_be_soc_supported_codes[NR_OF_CSI2_BE_SOC_PADS]; + +static struct v4l2_subdev_internal_ops csi2_be_soc_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static const struct v4l2_subdev_core_ops csi2_be_soc_sd_core_ops = { +}; + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + return 0; +} + +static const struct v4l2_subdev_video_ops csi2_be_soc_sd_video_ops = { + .s_stream = set_stream, +}; + +static int +__subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + + ip->csi2_be_soc = to_ipu_isys_csi2_be_soc(sd); + return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); +} + +static int +ipu_isys_csi2_be_soc_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; + + if (sel->target == V4L2_SEL_TGT_CROP && + pad->flags & MEDIA_PAD_FL_SOURCE && + asd->valid_tgts[sel->pad].crop) { + struct v4l2_rect *r; + unsigned int sink_pad = 0; + int i; + + for (i = 0; i < asd->nstreams; i++) { + if (!(asd->route[i].flags & + V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + if (asd->route[i].source == sel->pad) { + sink_pad = asd->route[i].sink; + break; + } + } + + if (i == asd->nstreams) { + dev_dbg(&asd->isys->adev->dev, "No sink pad routed.\n"); + return -EINVAL; + } + r = __ipu_isys_get_selection(sd, cfg, sel->target, + sink_pad, sel->which); + + /* Cropping is not supported by SoC BE. + * Only horizontal padding is allowed. + */ + sel->r.top = r->top; + sel->r.left = r->left; + sel->r.width = clamp(sel->r.width, r->width, + IPU_ISYS_MAX_WIDTH); + sel->r.height = r->height; + + *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, + sel->which) = sel->r; + ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, + sel->pad, sel->which); + return 0; + } + return -EINVAL; +} + +static const struct v4l2_subdev_pad_ops csi2_be_soc_sd_pad_ops = { + .link_validate = __subdev_link_validate, + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_subdev_set_ffmt, + .get_selection = ipu_isys_subdev_get_sel, + .set_selection = ipu_isys_csi2_be_soc_set_sel, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, + .set_routing = ipu_isys_subdev_set_routing, + .get_routing = ipu_isys_subdev_get_routing, +}; + +static struct v4l2_subdev_ops csi2_be_soc_sd_ops = { + .core = &csi2_be_soc_sd_core_ops, + .video = &csi2_be_soc_sd_video_ops, + .pad = &csi2_be_soc_sd_pad_ops, +}; + +static struct media_entity_operations csi2_be_soc_entity_ops = { + .link_validate = v4l2_subdev_link_validate, + .has_route = ipu_isys_subdev_has_route, +}; + +static void csi2_be_soc_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SINK) { + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + *ffmt = fmt->format; + + ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, + NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + fmt->pad, fmt->which); + } else if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) { + struct v4l2_mbus_framefmt *sink_ffmt; + struct v4l2_rect *r; + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + unsigned int sink_pad = 0; + int i; + + for (i = 0; i < asd->nsinks; i++) + if (media_entity_has_route(&sd->entity, fmt->pad, i)) + break; + if (i != asd->nsinks) + sink_pad = i; + sink_ffmt = __ipu_isys_get_ffmt(sd, cfg, sink_pad, + fmt->stream, + fmt->which); + r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, + fmt->pad, fmt->which); + + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->code = sink_ffmt->code; + ffmt->field = sink_ffmt->field; + } +} + +void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be_soc) +{ + int i; + + v4l2_device_unregister_subdev(&csi2_be_soc->asd.sd); + ipu_isys_subdev_cleanup(&csi2_be_soc->asd); + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) + ipu_isys_video_cleanup(&csi2_be_soc->av[i]); +} + +int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc, + struct ipu_isys *isys) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_BE_SOC_PAD_SINK(0), + .format = { + .width = 4096, + .height = 3072, + }, + }; + int rval, i; + + csi2_be_soc->asd.sd.entity.ops = &csi2_be_soc_entity_ops; + csi2_be_soc->asd.isys = isys; + + rval = ipu_isys_subdev_init(&csi2_be_soc->asd, + &csi2_be_soc_sd_ops, 0, + NR_OF_CSI2_BE_SOC_PADS, + NR_OF_CSI2_BE_SOC_STREAMS, + NR_OF_CSI2_BE_SOC_SOURCE_PADS, + NR_OF_CSI2_BE_SOC_SINK_PADS, 0); + if (rval) + goto fail; + + for (i = CSI2_BE_SOC_PAD_SINK(0); i < NR_OF_CSI2_BE_SOC_SINK_PADS; i++) + csi2_be_soc->asd.pad[i].flags = MEDIA_PAD_FL_SINK; + + for (i = CSI2_BE_SOC_PAD_SOURCE(0); + i < NR_OF_CSI2_BE_SOC_SOURCE_PADS + CSI2_BE_SOC_PAD_SOURCE(0); + i++) { + csi2_be_soc->asd.pad[i].flags = MEDIA_PAD_FL_SOURCE; + csi2_be_soc->asd.valid_tgts[i].crop = true; + } + + for (i = 0; i < NR_OF_CSI2_BE_SOC_PADS; i++) + csi2_be_soc_supported_codes[i] = + csi2_be_soc_supported_codes_pad; + csi2_be_soc->asd.supported_codes = csi2_be_soc_supported_codes; + csi2_be_soc->asd.be_mode = IPU_BE_SOC; + csi2_be_soc->asd.isl_mode = IPU_ISL_OFF; + csi2_be_soc->asd.set_ffmt = csi2_be_soc_set_ffmt; + + for (i = CSI2_BE_SOC_PAD_SINK(0); i < NR_OF_CSI2_BE_SOC_SINK_PADS; + i++) { + fmt.pad = CSI2_BE_SOC_PAD_SINK(i); + ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt); + } + + ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt); + csi2_be_soc->asd.sd.internal_ops = &csi2_be_soc_sd_internal_ops; + + snprintf(csi2_be_soc->asd.sd.name, sizeof(csi2_be_soc->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC"); + + v4l2_set_subdevdata(&csi2_be_soc->asd.sd, &csi2_be_soc->asd); + + mutex_lock(&csi2_be_soc->asd.mutex); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, + &csi2_be_soc->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + /* create default route information */ + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) { + csi2_be_soc->asd.route[i].sink = CSI2_BE_SOC_PAD_SINK(i); + csi2_be_soc->asd.route[i].source = CSI2_BE_SOC_PAD_SOURCE(i); + csi2_be_soc->asd.route[i].flags = 0; + } + + for (i = 0; i < NR_OF_CSI2_BE_SOC_SOURCE_PADS; i++) { + csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SINK(i)].stream_id[0] + = 0; + csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SOURCE(i)].stream_id[0] + = 0; + } + for (i = 0; i < NR_OF_CSI2_BE_SOC_STREAMS; i++) { + csi2_be_soc->asd.route[i].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE | + V4L2_SUBDEV_ROUTE_FL_IMMUTABLE; + bitmap_set(csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SINK(i)]. + streams_stat, 0, 1); + bitmap_set(csi2_be_soc->asd.stream[CSI2_BE_SOC_PAD_SOURCE(i)]. + streams_stat, 0, 1); + } + mutex_unlock(&csi2_be_soc->asd.mutex); + for (i = 0; i < NR_OF_CSI2_BE_SOC_SOURCE_PADS; i++) { + snprintf(csi2_be_soc->av[i].vdev.name, + sizeof(csi2_be_soc->av[i].vdev.name), + IPU_ISYS_ENTITY_PREFIX " BE SOC capture %d", i); + csi2_be_soc->av[i].aq.css_pin_type = + IPU_FW_ISYS_PIN_TYPE_RAW_SOC; + csi2_be_soc->av[i].isys = isys; + csi2_be_soc->av[i].pfmts = ipu_isys_pfmts_be_soc; + + csi2_be_soc->av[i].try_fmt_vid_mplane = + ipu_isys_video_try_fmt_vid_mplane_default; + csi2_be_soc->av[i].prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + csi2_be_soc->av[i].aq.buf_prepare = ipu_isys_buf_prepare; + csi2_be_soc->av[i].aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2_be_soc->av[i].aq.link_fmt_validate = + ipu_isys_link_fmt_validate; + csi2_be_soc->av[i].aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2_be_soc->av[i], + &csi2_be_soc->asd.sd.entity, + CSI2_BE_SOC_PAD_SOURCE(i), + MEDIA_PAD_FL_SINK, + MEDIA_LNK_FL_DYNAMIC); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + } + + return 0; + +fail: + ipu_isys_csi2_be_soc_cleanup(csi2_be_soc); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be.c b/drivers/media/pci/intel/ipu-isys-csi2-be.c new file mode 100644 index 000000000000..deaf2a55362a --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2-be.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2-be.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +/* + * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes. + * Otherwise pixel order calculation below WILL BREAK! + */ +static const u32 csi2_be_supported_codes_pad[] = { + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 *csi2_be_supported_codes[] = { + csi2_be_supported_codes_pad, + csi2_be_supported_codes_pad, +}; + +static struct v4l2_subdev_internal_ops csi2_be_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static const struct v4l2_subdev_core_ops csi2_be_sd_core_ops = { +}; + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + return 0; +} + +static const struct v4l2_subdev_video_ops csi2_be_sd_video_ops = { + .s_stream = set_stream, +}; + +static int __subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + + ip->csi2_be = to_ipu_isys_csi2_be(sd); + return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); +} + +static int get_supported_code_index(u32 code) +{ + int i; + + for (i = 0; csi2_be_supported_codes_pad[i]; i++) { + if (csi2_be_supported_codes_pad[i] == code) + return i; + } + return -EINVAL; +} + +static int ipu_isys_csi2_be_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; + + if (sel->target == V4L2_SEL_TGT_CROP && + pad->flags & MEDIA_PAD_FL_SOURCE && + asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop) { + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, sel->pad, 0, sel->which); + struct v4l2_rect *r = __ipu_isys_get_selection + (sd, cfg, sel->target, CSI2_BE_PAD_SINK, sel->which); + + if (get_supported_code_index(ffmt->code) < 0) { + /* Non-bayer formats can't be single line cropped */ + sel->r.left &= ~1; + sel->r.top &= ~1; + + /* Non-bayer formats can't pe padded at all */ + sel->r.width = clamp(sel->r.width, + IPU_ISYS_MIN_WIDTH, r->width); + } else { + sel->r.width = clamp(sel->r.width, + IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + } + + /* + * ISAPF can pad only horizontally, height is + * restricted by sink pad resolution. + */ + sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, + r->height); + *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, + sel->which) = sel->r; + ipu_isys_subdev_fmt_propagate + (sd, cfg, NULL, &sel->r, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, + sel->pad, sel->which); + return 0; + } + return ipu_isys_subdev_set_sel(sd, cfg, sel); +} + +static const struct v4l2_subdev_pad_ops csi2_be_sd_pad_ops = { + .link_validate = __subdev_link_validate, + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_subdev_set_ffmt, + .get_selection = ipu_isys_subdev_get_sel, + .set_selection = ipu_isys_csi2_be_set_sel, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, +}; + +static struct v4l2_subdev_ops csi2_be_sd_ops = { + .core = &csi2_be_sd_core_ops, + .video = &csi2_be_sd_video_ops, + .pad = &csi2_be_sd_pad_ops, +}; + +static struct media_entity_operations csi2_be_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +static void csi2_be_set_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *cfg, + struct v4l2_subdev_format *fmt) +#else +static void csi2_be_set_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +#endif +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + + switch (fmt->pad) { + case CSI2_BE_PAD_SINK: + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + *ffmt = fmt->format; + + ipu_isys_subdev_fmt_propagate + (sd, cfg, &fmt->format, NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which); + return; + case CSI2_BE_PAD_SOURCE: { + struct v4l2_mbus_framefmt *sink_ffmt = + __ipu_isys_get_ffmt(sd, cfg, CSI2_BE_PAD_SINK, + fmt->stream, fmt->which); + struct v4l2_rect *r = + __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, + CSI2_BE_PAD_SOURCE, + fmt->which); + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + u32 code = sink_ffmt->code; + int idx = get_supported_code_index(code); + + if (asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop && idx >= 0) { + int crop_info = 0; + + if (r->top & 1) + crop_info |= CSI2_BE_CROP_VER; + if (r->left & 1) + crop_info |= CSI2_BE_CROP_HOR; + code = csi2_be_supported_codes_pad + [((idx & CSI2_BE_CROP_MASK) ^ crop_info) + + (idx & ~CSI2_BE_CROP_MASK)]; + } + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->code = code; + ffmt->field = sink_ffmt->field; + return; + } + default: + dev_err(&csi2->isys->adev->dev, "Unknown pad type\n"); + WARN_ON(1); + } +} + +void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be) +{ + v4l2_device_unregister_subdev(&csi2_be->asd.sd); + ipu_isys_subdev_cleanup(&csi2_be->asd); + ipu_isys_video_cleanup(&csi2_be->av); +} + +int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be, + struct ipu_isys *isys) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_BE_PAD_SINK, + .format = { + .width = 4096, + .height = 3072, + }, + }; + struct v4l2_subdev_selection sel = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_BE_PAD_SOURCE, + .target = V4L2_SEL_TGT_CROP, + .r = { + .width = fmt.format.width, + .height = fmt.format.height, + }, + }; + int rval; + + csi2_be->asd.sd.entity.ops = &csi2_be_entity_ops; + csi2_be->asd.isys = isys; + + rval = ipu_isys_subdev_init(&csi2_be->asd, &csi2_be_sd_ops, 0, + NR_OF_CSI2_BE_PADS, + NR_OF_CSI2_BE_STREAMS, + NR_OF_CSI2_BE_SOURCE_PADS, + NR_OF_CSI2_BE_SINK_PADS, 0); + if (rval) + goto fail; + + csi2_be->asd.pad[CSI2_BE_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + csi2_be->asd.pad[CSI2_BE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + csi2_be->asd.valid_tgts[CSI2_BE_PAD_SOURCE].crop = true; + csi2_be->asd.set_ffmt = csi2_be_set_ffmt; + csi2_be->asd.isys = isys; + + BUILD_BUG_ON(ARRAY_SIZE(csi2_be_supported_codes) != NR_OF_CSI2_BE_PADS); + csi2_be->asd.supported_codes = csi2_be_supported_codes; + csi2_be->asd.be_mode = IPU_BE_RAW; + csi2_be->asd.isl_mode = IPU_ISL_CSI2_BE; + + ipu_isys_subdev_set_ffmt(&csi2_be->asd.sd, NULL, &fmt); + ipu_isys_csi2_be_set_sel(&csi2_be->asd.sd, NULL, &sel); + + csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops; + snprintf(csi2_be->asd.sd.name, sizeof(csi2_be->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE"); + snprintf(csi2_be->av.vdev.name, sizeof(csi2_be->av.vdev.name), + IPU_ISYS_ENTITY_PREFIX " CSI2 BE capture"); + csi2_be->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS; + v4l2_set_subdevdata(&csi2_be->asd.sd, &csi2_be->asd); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2_be->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + csi2_be->av.isys = isys; + csi2_be->av.pfmts = ipu_isys_pfmts; + csi2_be->av.try_fmt_vid_mplane = + ipu_isys_video_try_fmt_vid_mplane_default; + csi2_be->av.prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + csi2_be->av.aq.buf_prepare = ipu_isys_buf_prepare; + csi2_be->av.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2_be->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + csi2_be->av.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2_be->av, &csi2_be->asd.sd.entity, + CSI2_BE_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + return 0; + +fail: + ipu_isys_csi2_be_cleanup(csi2_be); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu-isys-csi2-be.h b/drivers/media/pci/intel/ipu-isys-csi2-be.h new file mode 100644 index 000000000000..70a17833a9c4 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2-be.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_CSI2_BE_H +#define IPU_ISYS_CSI2_BE_H + +#include +#include + +#include "ipu-isys-queue.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-platform-isys.h" + +struct ipu_isys_csi2_be_pdata; +struct ipu_isys; + +#define CSI2_BE_PAD_SINK 0 +#define CSI2_BE_PAD_SOURCE 1 + +#define NR_OF_CSI2_BE_PADS 2 +#define NR_OF_CSI2_BE_SOURCE_PADS 1 +#define NR_OF_CSI2_BE_SINK_PADS 1 + +#define NR_OF_CSI2_BE_STREAMS 1 +#define NR_OF_CSI2_BE_SOC_SOURCE_PADS NR_OF_CSI2_BE_SOC_STREAMS +#define NR_OF_CSI2_BE_SOC_SINK_PADS NR_OF_CSI2_BE_SOC_STREAMS +#define CSI2_BE_SOC_PAD_SINK(n) \ + ({ typeof(n) __n = (n); \ + (__n) >= NR_OF_CSI2_BE_SOC_SINK_PADS ? \ + (NR_OF_CSI2_BE_SOC_SINK_PADS) : (__n); }) +#define CSI2_BE_SOC_PAD_SOURCE(n) \ + ({ typeof(n) __n = (n); \ + (__n) >= NR_OF_CSI2_BE_SOC_SOURCE_PADS ? \ + (NR_OF_CSI2_BE_SOC_PADS - 1) : \ + ((__n) + NR_OF_CSI2_BE_SOC_SINK_PADS); }) +#define NR_OF_CSI2_BE_SOC_PADS \ + (NR_OF_CSI2_BE_SOC_SOURCE_PADS + NR_OF_CSI2_BE_SOC_SINK_PADS) + +#define CSI2_BE_CROP_HOR BIT(0) +#define CSI2_BE_CROP_VER BIT(1) +#define CSI2_BE_CROP_MASK (CSI2_BE_CROP_VER | CSI2_BE_CROP_HOR) + +/* + * struct ipu_isys_csi2_be + */ +struct ipu_isys_csi2_be { + struct ipu_isys_csi2_be_pdata *pdata; + struct ipu_isys_subdev asd; + struct ipu_isys_video av; +}; + +struct ipu_isys_csi2_be_soc { + struct ipu_isys_csi2_be_pdata *pdata; + struct ipu_isys_subdev asd; + struct ipu_isys_video av[NR_OF_CSI2_BE_SOC_SOURCE_PADS]; +}; + +#define to_ipu_isys_csi2_be(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_csi2_be, asd) + +#define to_ipu_isys_csi2_be_soc(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_csi2_be_soc, asd) + +int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be, + struct ipu_isys *isys); +int ipu_isys_csi2_be_soc_init( + struct ipu_isys_csi2_be_soc *csi2_be_soc, struct ipu_isys *isys); +void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be); +void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be); + +#endif /* IPU_ISYS_CSI2_BE_H */ diff --git a/drivers/media/pci/intel/ipu-isys-csi2.c b/drivers/media/pci/intel/ipu-isys-csi2.c new file mode 100644 index 000000000000..4667df082344 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2.c @@ -0,0 +1,963 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-platform-regs.h" + +#define CREATE_TRACE_POINTS +#define IPU_SOF_SEQID_TRACE +#define IPU_EOF_SEQID_TRACE +#include "ipu-trace-event.h" + +static const u32 csi2_supported_codes_pad_sink[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, + MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, + MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, + MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const u32 csi2_supported_codes_pad_source[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +#ifdef IPU_META_DATA_SUPPORT +static const u32 csi2_supported_codes_pad_meta[] = { + MEDIA_BUS_FMT_FIXED, + 0, +}; +#endif + +static const u32 *csi2_supported_codes[NR_OF_CSI2_PADS]; + +static struct v4l2_subdev_internal_ops csi2_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq) +{ + struct ipu_isys_pipeline *pipe = container_of(csi2->asd.sd.entity.pipe, + struct ipu_isys_pipeline, + pipe); + struct v4l2_subdev *ext_sd = + media_entity_to_v4l2_subdev(pipe->external->entity); + struct v4l2_ext_control c = {.id = V4L2_CID_LINK_FREQ, }; + struct v4l2_ext_controls cs = {.count = 1, + .controls = &c, + }; + struct v4l2_querymenu qm = {.id = c.id, }; + int rval; + + if (!ext_sd) { + WARN_ON(1); + return -ENODEV; + } + rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler, &cs); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get link frequency\n"); + return rval; + } + + qm.index = c.value; + + rval = v4l2_querymenu(ext_sd->ctrl_handler, &qm); + if (rval) { + dev_info(&csi2->isys->adev->dev, "can't get menu item\n"); + return rval; + } + + dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__, + qm.value); + + if (!qm.value) + return -EINVAL; + *link_freq = qm.value; + return 0; +} + +#ifdef IPU_META_DATA_SUPPORT +static int ipu_get_frame_desc_entry_by_dt(struct v4l2_subdev *sd, + struct v4l2_mbus_frame_desc_entry + *entry, u8 data_type) +{ + struct v4l2_mbus_frame_desc desc; + int rval, i; + + rval = v4l2_subdev_call(sd, pad, get_frame_desc, 0, &desc); + if (rval) + return rval; + + for (i = 0; i < desc.num_entries; i++) { + if (desc.entry[i].bus.csi2.data_type != data_type) + continue; + *entry = desc.entry[i]; + return 0; + } + + return -EINVAL; +} + +static void csi2_meta_prepare_firmware_stream_cfg_default( + struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_queue *aq = &av->aq; + struct ipu_fw_isys_output_pin_info_abi *pin_info; + struct v4l2_mbus_frame_desc_entry entry; + int pin = cfg->nof_output_pins++; + int inpin = cfg->nof_input_pins++; + int rval; + + aq->fw_output = pin; + ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready; + ip->output_pins[pin].aq = aq; + + pin_info = &cfg->output_pins[pin]; + pin_info->input_pin_id = inpin; + pin_info->output_res.width = av->mpix.width; + pin_info->output_res.height = av->mpix.height; + pin_info->stride = av->mpix.plane_fmt[0].bytesperline; + pin_info->pt = aq->css_pin_type; + pin_info->ft = av->pfmt->css_pixelformat; + pin_info->send_irq = 1; + + rval = + ipu_get_frame_desc_entry_by_dt(media_entity_to_v4l2_subdev + (ip->external->entity), &entry, + IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8); + if (!rval) { + cfg->input_pins[inpin].dt = IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8; + cfg->input_pins[inpin].input_res.width = + entry.size.two_dim.width * entry.bpp / BITS_PER_BYTE; + cfg->input_pins[inpin].input_res.height = + entry.size.two_dim.height; + } +} +#endif + +static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + + dev_dbg(&csi2->isys->adev->dev, "subscribe event(type %u id %u)\n", + sub->type, sub->id); + + switch (sub->type) { + case V4L2_EVENT_FRAME_SYNC: + return v4l2_event_subscribe(fh, sub, 10, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { + .subscribe_event = subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +#ifdef IPU_META_DATA_SUPPORT +static struct ipu_isys_pixelformat csi2_meta_pfmts[] = { + {V4L2_FMT_IPU_ISYS_META, 8, 8, 0, MEDIA_BUS_FMT_FIXED, 0}, + {}, +}; +#endif +/* + * The input system CSI2+ receiver has several + * parameters affecting the receiver timings. These depend + * on the MIPI bus frequency F in Hz (sensor transmitter rate) + * as follows: + * register value = (A/1e9 + B * UI) / COUNT_ACC + * where + * UI = 1 / (2 * F) in seconds + * COUNT_ACC = counter accuracy in seconds + * For IPU4, COUNT_ACC = 0.125 ns + * + * A and B are coefficients from the table below, + * depending whether the register minimum or maximum value is + * calculated. + * Minimum Maximum + * Clock lane A B A B + * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 + * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 + * Data lanes + * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 + * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 + * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 + * + * We use the minimum values of both A and B. + */ + +#define DIV_SHIFT 8 + +static uint32_t calc_timing(s32 a, int32_t b, int64_t link_freq, int32_t accinv) +{ + return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT) + / (int32_t)(link_freq >> DIV_SHIFT)); +} + +static int +ipu_isys_csi2_calc_timing(struct ipu_isys_csi2 *csi2, + struct ipu_isys_csi2_timing *timing, uint32_t accinv) +{ + __s64 link_freq; + int rval; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return rval; + + timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B, + link_freq, accinv); + timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B, + link_freq, accinv); + dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen); + dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle); + + timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A, + CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B, + link_freq, accinv); + timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A, + CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B, + link_freq, accinv); + dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen); + dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle); + + return 0; +} + +#define CSI2_ACCINV 8 + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + struct ipu_isys_csi2_config *cfg; + struct v4l2_subdev *ext_sd; + struct v4l2_control c = {.id = V4L2_CID_MIPI_LANES, }; + struct ipu_isys_csi2_timing timing; + unsigned int nlanes; + int rval; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); + + if (!ip->external->entity) { + WARN_ON(1); + return -ENODEV; + } + ext_sd = media_entity_to_v4l2_subdev(ip->external->entity); + cfg = v4l2_get_subdev_hostdata(ext_sd); + + if (!enable) { + csi2->stream_count--; + if (csi2->stream_count) + return 0; + + ipu_isys_csi2_set_stream(sd, timing, 0, enable); + return 0; + } + + ip->has_sof = true; + + if (csi2->stream_count) { + csi2->stream_count++; + return 0; + } + + rval = v4l2_g_ctrl(ext_sd->ctrl_handler, &c); + if (!rval && c.value > 0 && cfg->nlanes > c.value) { + nlanes = c.value; + dev_dbg(&csi2->isys->adev->dev, "lane nr %d.\n", nlanes); + } else { + nlanes = cfg->nlanes; + } + + rval = ipu_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV); + if (rval) + return rval; + + ipu_isys_csi2_set_stream(sd, timing, nlanes, enable); + csi2->stream_count++; + + return 0; +} + +static void csi2_capture_done(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + if (ip->interlaced && ip->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { + struct ipu_isys_buffer *ib; + unsigned long flags; + + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + if (!list_empty(&ip->short_packet_active)) { + ib = list_last_entry(&ip->short_packet_active, + struct ipu_isys_buffer, head); + list_move(&ib->head, &ip->short_packet_incoming); + } + spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); + } + if (ip->csi2) { + ipu_isys_csi2_error(ip->csi2); + } +} + +static int csi2_link_validate(struct media_link *link) +{ + struct ipu_isys_csi2 *csi2; + struct ipu_isys_pipeline *ip; + struct v4l2_subdev_route r[IPU_ISYS_MAX_STREAMS]; + struct v4l2_subdev_routing routing = { + .routes = r, + .num_routes = IPU_ISYS_MAX_STREAMS, + }; + unsigned int active = 0; + int i; + int rval; + + if (!link->sink->entity || + !link->sink->entity->pipe || !link->source->entity) + return -EINVAL; + csi2 = + to_ipu_isys_csi2(media_entity_to_v4l2_subdev(link->sink->entity)); + ip = to_ipu_isys_pipeline(link->sink->entity->pipe); + csi2->receiver_errors = 0; + ip->csi2 = csi2; + ipu_isys_video_add_capture_done(to_ipu_isys_pipeline + (link->sink->entity->pipe), + csi2_capture_done); + + rval = v4l2_subdev_link_validate(link); + if (rval) + return rval; + + if (!v4l2_ctrl_g_ctrl(csi2->store_csi2_header)) { + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) { + struct media_pad *remote_pad = + media_entity_remote_pad(&csi2->asd. + pad[CSI2_PAD_SOURCE(i)]); + + if (remote_pad && + is_media_entity_v4l2_subdev(remote_pad->entity)) { + dev_err(&csi2->isys->adev->dev, + "CSI2 BE requires CSI2 headers.\n"); + return -EINVAL; + } + } + } + + rval = + v4l2_subdev_call(media_entity_to_v4l2_subdev(link->source->entity), + pad, get_routing, &routing); + + if (rval) { + csi2->remote_streams = 1; + return 0; + } + + for (i = 0; i < routing.num_routes; i++) { + if (routing.routes[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + active++; + } + + if (active != + bitmap_weight(csi2->asd.stream[link->sink->index].streams_stat, 32)) + return -EINVAL; + + csi2->remote_streams = active; + + return 0; +} + +static bool csi2_has_route(struct media_entity *entity, unsigned int pad0, + unsigned int pad1, int *stream) +{ +#ifdef IPU_META_DATA_SUPPORT + if (pad0 == CSI2_PAD_META || pad1 == CSI2_PAD_META) + return true; +#endif + return ipu_isys_subdev_has_route(entity, pad0, pad1, stream); +} + +static const struct v4l2_subdev_video_ops csi2_sd_video_ops = { + .s_stream = set_stream, +}; + +#ifdef IPU_META_DATA_SUPPORT +static int get_metadata_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct media_pad *pad = + media_entity_remote_pad(&sd->entity.pads[CSI2_PAD_SINK]); + struct v4l2_mbus_frame_desc_entry entry; + int rval; + + if (!pad) + return -EINVAL; + + rval = + ipu_get_frame_desc_entry_by_dt(media_entity_to_v4l2_subdev + (pad->entity), &entry, + IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8); + + if (!rval) { + fmt->format.width = + entry.size.two_dim.width * entry.bpp / BITS_PER_BYTE; + fmt->format.height = entry.size.two_dim.height; + fmt->format.code = entry.pixelcode; + fmt->format.field = V4L2_FIELD_NONE; + } + return rval; +} +#endif + +static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ +#ifdef IPU_META_DATA_SUPPORT + if (fmt->pad == CSI2_PAD_META) + return get_metadata_fmt(sd, cfg, fmt); +#endif + return ipu_isys_subdev_get_ffmt(sd, cfg, fmt); +} + +static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ +#ifdef IPU_META_DATA_SUPPORT + if (fmt->pad == CSI2_PAD_META) + return get_metadata_fmt(sd, cfg, fmt); +#endif + return ipu_isys_subdev_set_ffmt(sd, cfg, fmt); +} + +static int __subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + + if (source_fmt->format.field == V4L2_FIELD_ALTERNATE) + ip->interlaced = true; + + return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt); +} + +static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { + .link_validate = __subdev_link_validate, + .get_fmt = ipu_isys_csi2_get_fmt, + .set_fmt = ipu_isys_csi2_set_fmt, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, + .set_routing = ipu_isys_subdev_set_routing, + .get_routing = ipu_isys_subdev_get_routing, +}; + +static struct v4l2_subdev_ops csi2_sd_ops = { + .core = &csi2_sd_core_ops, + .video = &csi2_sd_video_ops, + .pad = &csi2_sd_pad_ops, +}; + +static struct media_entity_operations csi2_entity_ops = { + .link_validate = csi2_link_validate, + .has_route = csi2_has_route, +}; + +static void csi2_set_ffmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + + if (fmt->format.field != V4L2_FIELD_ALTERNATE) + fmt->format.field = V4L2_FIELD_NONE; + + if (fmt->pad == CSI2_PAD_SINK) { + *ffmt = fmt->format; + if (fmt->stream) + return; + ipu_isys_subdev_fmt_propagate( + sd, cfg, &fmt->format, NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + fmt->pad, fmt->which); + return; + } + +#ifdef IPU_META_DATA_SUPPORT + if (fmt->pad == CSI2_PAD_META) { + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + struct media_pad *pad = media_entity_remote_pad( + &sd->entity.pads[CSI2_PAD_SINK]); + struct v4l2_mbus_frame_desc_entry entry; + int rval; + + if (!pad) { + ffmt->width = 0; + ffmt->height = 0; + ffmt->code = 0; + return; + } + + rval = ipu_get_frame_desc_entry_by_dt( + media_entity_to_v4l2_subdev(pad->entity), + &entry, + IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8); + + if (!rval) { + ffmt->width = entry.size.two_dim.width * entry.bpp + / BITS_PER_BYTE; + ffmt->height = entry.size.two_dim.height; + ffmt->code = entry.pixelcode; + ffmt->field = V4L2_FIELD_NONE; + } + + return; + } +#endif + if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->field = fmt->format.field; + ffmt->code = + ipu_isys_subdev_code_to_uncompressed(fmt->format.code); + return; + } + + WARN_ON(1); +} + +static const struct ipu_isys_pixelformat * +csi2_try_fmt(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(av->vdev.entity.links[0].source-> + entity); +#else + struct media_link *link = list_first_entry(&av->vdev.entity.links, + struct media_link, list); + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(link->source->entity); +#endif + struct ipu_isys_csi2 *csi2; + + if (!sd) + return NULL; + + csi2 = to_ipu_isys_csi2(sd); + + return ipu_isys_video_try_fmt_vid_mplane(av, mpix, + v4l2_ctrl_g_ctrl(csi2->store_csi2_header)); +} + +void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2) +{ + int i; + + if (!csi2->isys) + return; + + v4l2_device_unregister_subdev(&csi2->asd.sd); + ipu_isys_subdev_cleanup(&csi2->asd); + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) + ipu_isys_video_cleanup(&csi2->av[i]); + +#ifdef IPU_META_DATA_SUPPORT + ipu_isys_video_cleanup(&csi2->av_meta); +#endif + csi2->isys = NULL; +} + +static void csi_ctrl_init(struct v4l2_subdev *sd) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + + static const struct v4l2_ctrl_config cfg = { + .id = V4L2_CID_IPU_STORE_CSI2_HEADER, + .name = "Store CSI-2 Headers", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .min = 0, + .max = 1, + .step = 1, + .def = 1, + }; + + csi2->store_csi2_header = v4l2_ctrl_new_custom(&csi2->asd.ctrl_handler, + &cfg, NULL); +} + +int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2, + struct ipu_isys *isys, + void __iomem *base, unsigned int index) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_PAD_SINK, + .format = { + .width = 4096, + .height = 3072, + }, + }; +#ifdef IPU_META_DATA_SUPPORT + struct v4l2_subdev_format fmt_meta = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CSI2_PAD_META, + }; +#endif + int i, rval, src; + + csi2->isys = isys; + csi2->base = base; + csi2->index = index; + + csi2->asd.sd.entity.ops = &csi2_entity_ops; + csi2->asd.ctrl_init = csi_ctrl_init; + csi2->asd.isys = isys; + init_completion(&csi2->eof_completion); + csi2->remote_streams = 1; + csi2->stream_count = 0; + + rval = ipu_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, + NR_OF_CSI2_PADS, + NR_OF_CSI2_STREAMS, + NR_OF_CSI2_SOURCE_PADS, + NR_OF_CSI2_SINK_PADS, + V4L2_SUBDEV_FL_HAS_SUBSTREAMS); + if (rval) + goto fail; + + csi2->asd.pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT | MEDIA_PAD_FL_MULTIPLEX; + for (i = CSI2_PAD_SOURCE(0); + i < (NR_OF_CSI2_SOURCE_PADS + CSI2_PAD_SOURCE(0)); i++) + csi2->asd.pad[i].flags = MEDIA_PAD_FL_SOURCE; + +#ifdef IPU_META_DATA_SUPPORT + csi2->asd.pad[CSI2_PAD_META].flags = MEDIA_PAD_FL_SOURCE; +#endif + src = index; +#ifdef CONFIG_VIDEO_INTEL_IPU4P + src = index ? (index + 5) : (index + 3); +#endif + csi2->asd.source = IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 + src; + csi2_supported_codes[CSI2_PAD_SINK] = csi2_supported_codes_pad_sink; + + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) + csi2_supported_codes[i + 1] = csi2_supported_codes_pad_source; +#ifdef IPU_META_DATA_SUPPORT + csi2_supported_codes[CSI2_PAD_META] = csi2_supported_codes_pad_meta; +#endif + csi2->asd.supported_codes = csi2_supported_codes; + csi2->asd.set_ffmt = csi2_set_ffmt; + + csi2->asd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; + csi2->asd.sd.internal_ops = &csi2_sd_internal_ops; + snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u", index); + v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); + + mutex_lock(&csi2->asd.mutex); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); + if (rval) { + mutex_unlock(&csi2->asd.mutex); + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt); +#ifdef IPU_META_DATA_SUPPORT + __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt_meta); +#endif + + /* create default route information */ + for (i = 0; i < NR_OF_CSI2_STREAMS; i++) { + csi2->asd.route[i].sink = CSI2_PAD_SINK; + csi2->asd.route[i].source = CSI2_PAD_SOURCE(i); + csi2->asd.route[i].flags = 0; + } + + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) { + csi2->asd.stream[CSI2_PAD_SINK].stream_id[i] = i; + csi2->asd.stream[CSI2_PAD_SOURCE(i)].stream_id[CSI2_PAD_SINK] + = i; + } + csi2->asd.route[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE | + V4L2_SUBDEV_ROUTE_FL_IMMUTABLE; + bitmap_set(csi2->asd.stream[CSI2_PAD_SINK].streams_stat, 0, 1); + bitmap_set(csi2->asd.stream[CSI2_PAD_SOURCE(0)].streams_stat, 0, 1); + + mutex_unlock(&csi2->asd.mutex); + + for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++) { + snprintf(csi2->av[i].vdev.name, sizeof(csi2->av[i].vdev.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u capture %d", + index, i); + csi2->av[i].isys = isys; + csi2->av[i].aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; + csi2->av[i].pfmts = ipu_isys_pfmts_packed; + csi2->av[i].try_fmt_vid_mplane = csi2_try_fmt; + csi2->av[i].prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + csi2->av[i].packed = true; + csi2->av[i].line_header_length = + IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + csi2->av[i].line_footer_length = + IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + csi2->av[i].aq.buf_prepare = ipu_isys_buf_prepare; + csi2->av[i].aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2->av[i].aq.link_fmt_validate = ipu_isys_link_fmt_validate; + csi2->av[i].aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2->av[i], + &csi2->asd.sd.entity, + CSI2_PAD_SOURCE(i), + MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + } + +#ifdef IPU_META_DATA_SUPPORT + snprintf(csi2->av_meta.vdev.name, sizeof(csi2->av_meta.vdev.name), + IPU_ISYS_ENTITY_PREFIX " CSI-2 %u meta", index); + csi2->av_meta.isys = isys; + csi2->av_meta.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; + csi2->av_meta.pfmts = csi2_meta_pfmts; + csi2->av_meta.try_fmt_vid_mplane = csi2_try_fmt; + csi2->av_meta.prepare_firmware_stream_cfg = + csi2_meta_prepare_firmware_stream_cfg_default; + csi2->av_meta.packed = true; + csi2->av_meta.line_header_length = + IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + csi2->av_meta.line_footer_length = + IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + csi2->av_meta.aq.buf_prepare = ipu_isys_buf_prepare; + csi2->av_meta.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + csi2->av_meta.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + csi2->av_meta.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&csi2->av_meta, &csi2->asd.sd.entity, + CSI2_PAD_META, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init metadata node\n"); + goto fail; + } +#endif + return 0; + +fail: + ipu_isys_csi2_cleanup(csi2); + + return rval; +} + +void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2, unsigned int vc) +{ + struct ipu_isys_pipeline *ip = NULL; + struct v4l2_event ev = { + .type = V4L2_EVENT_FRAME_SYNC, + }; + struct video_device *vdev = csi2->asd.sd.devnode; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame[vc] = true; + + for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { + if (csi2->isys->pipes[i] && + csi2->isys->pipes[i]->vc == vc && + csi2->isys->pipes[i]->csi2 == csi2) { + ip = csi2->isys->pipes[i]; + break; + } + } + + /* Pipe already vanished */ + if (!ip) { + spin_unlock_irqrestore(&csi2->isys->lock, flags); + return; + } + + ev.u.frame_sync.frame_sequence = atomic_inc_return(&ip->sequence) - 1; + ev.id = ip->stream_id; + spin_unlock_irqrestore(&csi2->isys->lock, flags); + + trace_ipu_sof_seqid(ev.u.frame_sync.frame_sequence, csi2->index, vc); + v4l2_event_queue(vdev, &ev); + dev_dbg(&csi2->isys->adev->dev, + "sof_event::csi2-%i sequence: %i, vc: %d, stream_id: %d\n", + csi2->index, ev.u.frame_sync.frame_sequence, vc, ip->stream_id); +} + +void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2, unsigned int vc) +{ + struct ipu_isys_pipeline *ip = NULL; + unsigned long flags; + unsigned int i; + u32 frame_sequence; + + spin_lock_irqsave(&csi2->isys->lock, flags); + csi2->in_frame[vc] = false; + if (csi2->wait_for_sync[vc]) + complete(&csi2->eof_completion); + spin_unlock_irqrestore(&csi2->isys->lock, flags); + + for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { + if (csi2->isys->pipes[i] && + csi2->isys->pipes[i]->vc == vc && + csi2->isys->pipes[i]->csi2 == csi2) { + ip = csi2->isys->pipes[i]; + break; + } + } + + if (ip) { + frame_sequence = atomic_read(&ip->sequence); + + trace_ipu_eof_seqid(frame_sequence, csi2->index, vc); + + dev_dbg(&csi2->isys->adev->dev, + "eof_event::csi2-%i sequence: %i, vc: %d, stream_id: %d\n", + csi2->index, frame_sequence, vc, ip->stream_id); + } +} + +/* Call this function only _after_ the sensor has been stopped */ +void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2) +{ + unsigned long flags, tout; + unsigned int i; + + for (i = 0; i < NR_OF_CSI2_VC; i++) { + spin_lock_irqsave(&csi2->isys->lock, flags); + + if (!csi2->in_frame[i]) { + spin_unlock_irqrestore(&csi2->isys->lock, flags); + continue; + } + + reinit_completion(&csi2->eof_completion); + csi2->wait_for_sync[i] = true; + spin_unlock_irqrestore(&csi2->isys->lock, flags); + tout = wait_for_completion_timeout(&csi2->eof_completion, + IPU_EOF_TIMEOUT_JIFFIES); + if (!tout) + dev_err(&csi2->isys->adev->dev, + "csi2-%d: timeout at sync to eof of vc %d\n", + csi2->index, i); + csi2->wait_for_sync[i] = false; + } +} + +struct ipu_isys_buffer *ipu_isys_csi2_get_short_packet_buffer(struct + ipu_isys_pipeline + *ip) +{ + struct ipu_isys_buffer *ib; + struct ipu_isys_private_buffer *pb; + struct ipu_isys_mipi_packet_header *ph; + + if (list_empty(&ip->short_packet_incoming)) + return NULL; + ib = list_last_entry(&ip->short_packet_incoming, + struct ipu_isys_buffer, head); + pb = ipu_isys_buffer_to_private_buffer(ib); + ph = (struct ipu_isys_mipi_packet_header *)pb->buffer; + + /* Fill the packet header with magic number. */ + ph->word_count = 0xffff; + ph->dtype = 0xff; + + dma_sync_single_for_cpu(&ip->isys->adev->dev, pb->dma_addr, + sizeof(*ph), DMA_BIDIRECTIONAL); + return ib; +} diff --git a/drivers/media/pci/intel/ipu-isys-csi2.h b/drivers/media/pci/intel/ipu-isys-csi2.h new file mode 100644 index 000000000000..a464df92dcf8 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-csi2.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_CSI2_H +#define IPU_ISYS_CSI2_H + +#include +#include + +#include "ipu-isys-queue.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-platform-isys.h" + +struct ipu_isys_csi2_timing; +struct ipu_isys_csi2_pdata; +struct ipu_isys; + +#define NR_OF_CSI2_SINK_PADS 1 +#define CSI2_PAD_SINK 0 +#define NR_OF_CSI2_STREAMS NR_OF_CSI2_VC +#define NR_OF_CSI2_SOURCE_PADS NR_OF_CSI2_STREAMS +#define CSI2_PAD_SOURCE(n) \ + ({ typeof(n) __n = (n); \ + (__n >= NR_OF_CSI2_SOURCE_PADS ? \ + (NR_OF_CSI2_PADS - 2) : \ + (__n + NR_OF_CSI2_SINK_PADS)); }) +#ifdef IPU_META_DATA_SUPPORT +#define NR_OF_CSI2_META_PADS 1 +#define NR_OF_CSI2_PADS \ + (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SOURCE_PADS + NR_OF_CSI2_META_PADS) +#define CSI2_PAD_META (NR_OF_CSI2_PADS - 1) +#else +#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SOURCE_PADS) +#endif + +#define IPU_ISYS_SHORT_PACKET_BUFFER_NUM VIDEO_MAX_FRAME +#define IPU_ISYS_SHORT_PACKET_WIDTH 32 +#define IPU_ISYS_SHORT_PACKET_FRAME_PACKETS 2 +#define IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS 64 +#define IPU_ISYS_SHORT_PACKET_UNITSIZE 8 +#define IPU_ISYS_SHORT_PACKET_GENERAL_DT 0 +#define IPU_ISYS_SHORT_PACKET_PT 0 +#define IPU_ISYS_SHORT_PACKET_FT 0 + +#define IPU_ISYS_SHORT_PACKET_STRIDE \ + (IPU_ISYS_SHORT_PACKET_WIDTH * \ + IPU_ISYS_SHORT_PACKET_UNITSIZE) +#define IPU_ISYS_SHORT_PACKET_NUM(num_lines) \ + ((num_lines) * 2 + IPU_ISYS_SHORT_PACKET_FRAME_PACKETS + \ + IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS) +#define IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \ + DIV_ROUND_UP(IPU_ISYS_SHORT_PACKET_NUM(num_lines) * \ + IPU_ISYS_SHORT_PACKET_UNITSIZE, \ + IPU_ISYS_SHORT_PACKET_STRIDE) +#define IPU_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \ + (IPU_ISYS_SHORT_PACKET_WIDTH * \ + IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \ + IPU_ISYS_SHORT_PACKET_UNITSIZE) + +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER 256 +#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE 16 +#define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \ + (IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \ + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE) + +#define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER 0 +#define IPU_ISYS_SHORT_PACKET_FROM_TUNIT 1 + +#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100 +#define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK 0x2082 +#define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2) + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 + +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 +#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 +#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 + +#define IPU_EOF_TIMEOUT 300 +#define IPU_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(IPU_EOF_TIMEOUT) + +/* + * struct ipu_isys_csi2 + * + * @nlanes: number of lanes in the receiver + */ +struct ipu_isys_csi2 { + struct ipu_isys_csi2_pdata *pdata; + struct ipu_isys *isys; + struct ipu_isys_subdev asd; + struct ipu_isys_video av[NR_OF_CSI2_SOURCE_PADS]; +#ifdef IPU_META_DATA_SUPPORT + struct ipu_isys_video av_meta; +#endif + struct completion eof_completion; + + void __iomem *base; + u32 receiver_errors; + unsigned int nlanes; + unsigned int index; + atomic_t sof_sequence; + bool in_frame[NR_OF_CSI2_VC]; + bool wait_for_sync[NR_OF_CSI2_VC]; + + unsigned int remote_streams; + unsigned int stream_count; + + struct v4l2_ctrl *store_csi2_header; +}; + +struct ipu_isys_csi2_timing { + u32 ctermen; + u32 csettle; + u32 dtermen; + u32 dsettle; +}; + +/* + * This structure defines the MIPI packet header output + * from IPU MIPI receiver. Due to hardware conversion, + * this structure is not the same as defined in CSI-2 spec. + */ +struct ipu_isys_mipi_packet_header { + u32 word_count:16, dtype:13, sync:2, stype:1; + u32 sid:4, port_id:4, reserved:23, odd_even:1; +} __packed; + +/* + * This structure defines the trace message content + * for CSI2 receiver monitor messages. + */ +struct ipu_isys_csi2_monitor_message { + u64 fe:1, + fs:1, + pe:1, + ps:1, + le:1, + ls:1, + reserved1:2, + sequence:2, + reserved2:2, + flash_shutter:4, + error_cause:12, + fifo_overrun:1, + crc_error:2, + reserved3:1, + timestamp_l:16, + port:4, vc:2, reserved4:2, frame_sync:4, reserved5:4; + u64 reserved6:3, + cmd:2, reserved7:1, monitor_id:7, reserved8:1, timestamp_h:50; +} __packed; + +#define to_ipu_isys_csi2(sd) container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_csi2, asd) + +int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq); +int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2, + struct ipu_isys *isys, + void __iomem *base, unsigned int index); +void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2); +struct ipu_isys_buffer * +ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip); +void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2, unsigned int vc); +void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2, unsigned int vc); +void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2); + +/* interface for platform specific */ +int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, + struct ipu_isys_csi2_timing timing, + unsigned int nlanes, int enable); +unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, + unsigned int *timestamp); +void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2); +void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2); +bool ipu_isys_csi2_skew_cal_required(struct ipu_isys_csi2 *csi2); +int ipu_isys_csi2_set_skew_cal(struct ipu_isys_csi2 *csi2, int enable); + +#endif /* IPU_ISYS_CSI2_H */ diff --git a/drivers/media/pci/intel/ipu-isys-media.h b/drivers/media/pci/intel/ipu-isys-media.h new file mode 100644 index 000000000000..823324ef4a16 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-media.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_MEDIA_H +#define IPU_ISYS_MEDIA_H + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#define is_media_entity_v4l2_subdev(e) \ + (media_entity_type(e) == MEDIA_ENT_T_V4L2_SUBDEV) +#define is_media_entity_v4l2_io(e) \ + (media_entity_type(e) == MEDIA_ENT_T_DEVNODE) +#define media_create_pad_link(a, b, c, d, e) \ + media_entity_create_link(a, b, c, d, e) +#define media_entity_pads_init(a, b, c) \ + media_entity_init(a, b, c, 0) +#define media_entity_id(ent) ((ent)->id) +#define media_entity_graph_walk_init(a, b) 0 +#define media_entity_graph_walk_cleanup(a) do { } while (0) + +#define IPU_COMPAT_MAX_ENTITIES MEDIA_ENTITY_ENUM_MAX_ID + +struct media_entity_enum { + unsigned long *bmap; + int idx_max; +}; + +static inline int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev) +{ + int idx_max = IPU_COMPAT_MAX_ENTITIES; + + ent_enum->bmap = kcalloc(DIV_ROUND_UP(idx_max, BITS_PER_LONG), + sizeof(long), GFP_KERNEL); + if (!ent_enum->bmap) + return -ENOMEM; + + bitmap_zero(ent_enum->bmap, idx_max); + + ent_enum->idx_max = idx_max; + return 0; +} + +static inline void media_entity_enum_cleanup(struct media_entity_enum *ent_enum) +{ + kfree(ent_enum->bmap); +} + +static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (media_entity_id(entity) >= ent_enum->idx_max) { + WARN_ON(1); + return; + } + __set_bit(media_entity_id(entity), ent_enum->bmap); +} + +static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) +{ + bitmap_zero(ent_enum->bmap, ent_enum->idx_max); +} + +static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, + struct media_entity *entity) +{ + if (media_entity_id(entity) >= ent_enum->idx_max) { + WARN_ON(1); + return false; + } + + return test_bit(media_entity_id(entity), ent_enum->bmap); +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#define media_pipeline_start(e, p) media_entity_pipeline_start(e, p) + +#define media_pipeline_stop(e) media_entity_pipeline_stop(e) + +#define media_graph_walk_init(g, d) media_entity_graph_walk_init(g, d) + +#define media_graph_walk_start(g, p) media_entity_graph_walk_start(g, p) + +#define media_graph_walk_next(g) media_entity_graph_walk_next(g) + +#define media_graph_walk_cleanup(g) media_entity_graph_walk_cleanup(g) +#endif + + +#endif /* IPU_ISYS_MEDIA_H */ diff --git a/drivers/media/pci/intel/ipu-isys-queue.c b/drivers/media/pci/intel/ipu-isys-queue.c new file mode 100644 index 000000000000..1f6f76945953 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-queue.c @@ -0,0 +1,1546 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-isys-video.h" + +static bool wall_clock_ts_on; +module_param(wall_clock_ts_on, bool, 0660); +MODULE_PARM_DESC(wall_clock_ts_on, "Timestamp based on REALTIME clock"); + +static int queue_setup(struct vb2_queue *q, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + const struct v4l2_format *__fmt, +#endif + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + void *alloc_ctxs[] +#else + struct device *alloc_devs[] +#endif + ) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + const struct v4l2_format *fmt = __fmt; + const struct ipu_isys_pixelformat *pfmt; + struct v4l2_pix_format_mplane mpix; +#else + bool use_fmt = false; +#endif + unsigned int i; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + if (fmt) + mpix = fmt->fmt.pix_mp; + else + mpix = av->mpix; + + pfmt = av->try_fmt_vid_mplane(av, &mpix); + + *num_planes = mpix.num_planes; +#else + /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ + if (!*num_planes) { + use_fmt = true; + *num_planes = av->mpix.num_planes; + } +#endif + + for (i = 0; i < *num_planes; i++) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + sizes[i] = mpix.plane_fmt[i].sizeimage; +#else + if (use_fmt) + sizes[i] = av->mpix.plane_fmt[i].sizeimage; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + alloc_ctxs[i] = aq->ctx; +#else + alloc_devs[i] = aq->dev; +#endif + dev_dbg(&av->isys->adev->dev, + "%s: queue setup: plane %d size %u\n", + av->vdev.name, i, sizes[i]); + } + + return 0; +} + +void ipu_isys_queue_lock(struct vb2_queue *q) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "%s: queue lock\n", av->vdev.name); + mutex_lock(&av->mutex); +} + +void ipu_isys_queue_unlock(struct vb2_queue *q) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "%s: queue unlock\n", av->vdev.name); + mutex_unlock(&av->mutex); +} + +static int buf_init(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, + __func__); + + if (aq->buf_init) + return aq->buf_init(vb); + + return 0; +} + +int ipu_isys_buf_prepare(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, + "buffer: %s: configured size %u, buffer size %lu\n", + av->vdev.name, + av->mpix.plane_fmt[0].sizeimage, vb2_plane_size(vb, 0)); + + if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0)) + return -EINVAL; + + vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline * + av->mpix.height); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_planes[0].data_offset = av->line_header_length / BITS_PER_BYTE; +#else + vb->planes[0].data_offset = av->line_header_length / BITS_PER_BYTE; +#endif + + return 0; +} + +static int buf_prepare(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); + u32 request = +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + to_vb2_v4l2_buffer(vb)->request; +#else + vb->v4l2_buf.request; +#endif + struct media_device *mdev = &av->isys->media_dev; + struct ipu_isys_request *ireq; + u32 request_state; + unsigned long flags; + int rval; + + if (av->isys->adev->isp->flr_done) + return -EIO; + + if (request) { + ib->req = media_device_request_find(&av->isys->media_dev, + request); + if (!ib->req) { + dev_dbg(&av->isys->adev->dev, + "can't find request %u\n", request); + return -ENOENT; + } + } + + rval = aq->buf_prepare(vb); + if (!request) + return rval; + if (rval) + goto out_put_request; + + ireq = to_ipu_isys_request(ib->req); + + spin_lock_irqsave(&ireq->lock, flags); + spin_lock(&mdev->req_lock); + request_state = ib->req->state; + if (request_state == MEDIA_DEVICE_REQUEST_STATE_IDLE) + list_add(&ib->req_head, &ireq->buffers); + spin_unlock(&mdev->req_lock); + spin_unlock_irqrestore(&ireq->lock, flags); + if (request_state != MEDIA_DEVICE_REQUEST_STATE_IDLE) { + dev_dbg(&av->isys->adev->dev, + "%s: request %u state %u\n", __func__, ib->req->id, + request_state); + rval = -EINVAL; + } else { + dev_dbg(&av->isys->adev->dev, + "%s: request %u\n", __func__, ib->req->id); + } + + if (!rval) + return 0; + +out_put_request: + media_device_request_put(ib->req); + ib->req = NULL; + + return rval; +} + +static void buf_finish(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); + + dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, + __func__); + + if (ib->req) { + struct ipu_isys_request *ireq = to_ipu_isys_request(ib->req); + unsigned long flags; + bool done; + + spin_lock_irqsave(&ireq->lock, flags); + list_del(&ib->req_head); + done = list_empty(&ireq->buffers); + spin_unlock_irqrestore(&ireq->lock, flags); + dev_dbg(&av->isys->adev->dev, "request %u complete %s\n", + ib->req->id, done ? "true" : "false"); + if (done) { + media_device_request_complete(&av->isys->media_dev, + ib->req); + mutex_lock(&av->isys->stream_mutex); + list_del(&ireq->head); + mutex_unlock(&av->isys->stream_mutex); + } + media_device_request_put(ib->req); + ib->req = NULL; + } +} + +static void buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name, + __func__); + + if (aq->buf_cleanup) + return aq->buf_cleanup(vb); +} + +/* + * Queue a buffer list back to incoming or active queues. The buffers + * are removed from the buffer list. + */ +void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state) +{ + struct ipu_isys_buffer *ib, *ib_safe; + unsigned long flags; + bool first = true; + + if (!bl) + return; + + WARN_ON(!bl->nbufs); + WARN_ON(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE && + op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING); + + list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { + struct ipu_isys_video *av; + + if (ib->type == IPU_ISYS_VIDEO_BUFFER) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + av = ipu_isys_queue_to_video(aq); + spin_lock_irqsave(&aq->lock, flags); + list_del(&ib->head); + if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) + list_add(&ib->head, &aq->active); + else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) + list_add_tail(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE) + vb2_buffer_done(vb, state); + } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) { + struct ipu_isys_private_buffer *pb = + ipu_isys_buffer_to_private_buffer(ib); + struct ipu_isys_pipeline *ip = pb->ip; + + av = container_of(ip, struct ipu_isys_video, ip); + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + list_del(&ib->head); + if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) + list_add(&ib->head, &ip->short_packet_active); + else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) + list_add(&ib->head, &ip->short_packet_incoming); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, + flags); + } else { + WARN_ON(1); + return; + } + + if (first) { + dev_dbg(&av->isys->adev->dev, + "queue buffer list %p op_flags %lx, state %d, %d buffers\n", + bl, op_flags, state, bl->nbufs); + first = false; + } + + bl->nbufs--; + } + + WARN_ON(bl->nbufs); +} + +/* + * flush_firmware_streamon_fail() - Flush in cases where requests may + * have been queued to firmware and the *firmware streamon fails for a + * reason or another. + */ +static void flush_firmware_streamon_fail(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys_queue *aq; + unsigned long flags; + + lockdep_assert_held(&pipe_av->mutex); + + list_for_each_entry(aq, &ip->queues, node) { + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib, *ib_safe; + + spin_lock_irqsave(&aq->lock, flags); + list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + if (av->streaming) { + dev_dbg(&av->isys->adev->dev, + "%s: queue buffer %u back to incoming\n", + av->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + /* Queue already streaming, return to driver. */ + list_add(&ib->head, &aq->incoming); + continue; + } + /* Queue not yet streaming, return to user. */ + dev_dbg(&av->isys->adev->dev, + "%s: return %u back to videobuf2\n", + av->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib), + VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&aq->lock, flags); + } +} + +/* + * Attempt obtaining a buffer list from the incoming queues, a list of + * buffers that contains one entry from each video buffer queue. If + * all queues have no buffers, the buffers that were already dequeued + * are returned to their queues. + */ +static int buffer_list_get(struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl) +{ + struct ipu_isys_queue *aq; + struct ipu_isys_buffer *ib; + unsigned long flags; + int ret = 0; + + bl->nbufs = 0; + INIT_LIST_HEAD(&bl->head); + + list_for_each_entry(aq, &ip->queues, node) { + struct ipu_isys_buffer *ib; + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->incoming)) { + spin_unlock_irqrestore(&aq->lock, flags); + ret = -ENODATA; + goto error; + } + + ib = list_last_entry(&aq->incoming, + struct ipu_isys_buffer, head); + if (ib->req) { + spin_unlock_irqrestore(&aq->lock, flags); + ret = -ENODATA; + goto error; + } + + dev_dbg(&ip->isys->adev->dev, "buffer: %s: buffer %u\n", + ipu_isys_queue_to_video(aq)->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + ipu_isys_buffer_to_vb2_buffer(ib)->v4l2_buf.index +#else + ipu_isys_buffer_to_vb2_buffer(ib)->index +#endif + ); + list_del(&ib->head); + list_add(&ib->head, &bl->head); + spin_unlock_irqrestore(&aq->lock, flags); + + bl->nbufs++; + } + + list_for_each_entry(ib, &bl->head, head) { + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_video *av; + + aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + av = ipu_isys_queue_to_video(aq); + + if (aq->prepare_frame_buff_set) + aq->prepare_frame_buff_set(vb); + } + + /* Get short packet buffer. */ + if (ip->interlaced && ip->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) { + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + ib = ipu_isys_csi2_get_short_packet_buffer(ip); + if (!ib) { + spin_unlock_irqrestore(&ip->short_packet_queue_lock, + flags); + ret = -ENODATA; + dev_err(&ip->isys->adev->dev, + "No more short packet buffers. Driver bug?"); + WARN_ON(1); + goto error; + } + list_move(&ib->head, &bl->head); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); + bl->nbufs++; + } + + dev_dbg(&ip->isys->adev->dev, "get buffer list %p, %u buffers\n", bl, + bl->nbufs); + return ret; + +error: + if (!list_empty(&bl->head)) + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_INCOMING, 0); + return ret; +} + +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin( + struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi *set) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + set->output_pins[aq->fw_output].addr = + vb2_dma_contig_plane_dma_addr(vb, 0); + set->output_pins[aq->fw_output].out_buf_id = +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index + 1; +#else + vb->index + 1; +#endif +} + +/* + * Convert a buffer list to a isys fw ABI framebuffer set. The + * buffer list is not modified. + */ +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set( + struct ipu_fw_isys_frame_buff_set_abi *set, + struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl) +{ + struct ipu_isys_buffer *ib; + + WARN_ON(!bl->nbufs); + + set->send_irq_sof = 1; + set->send_resp_sof = 1; + set->send_irq_eof = 1; + set->send_resp_eof = 1; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + set->send_irq_capture_ack = 1; + set->send_irq_capture_done = 1; +#endif + + list_for_each_entry(ib, &bl->head, head) { + if (ib->type == IPU_ISYS_VIDEO_BUFFER) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + if (aq->fill_frame_buff_set_pin) + aq->fill_frame_buff_set_pin(vb, set); + } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) { + struct ipu_isys_private_buffer *pb = + ipu_isys_buffer_to_private_buffer(ib); + struct ipu_fw_isys_output_pin_payload_abi *output_pin = + &set->output_pins[ip->short_packet_output_pin]; + + output_pin->addr = pb->dma_addr; + output_pin->out_buf_id = pb->index + 1; + } else { + WARN_ON(1); + } + } +} + +static void +ipu_isys_req_dispatch(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set, + dma_addr_t dma_addr); + +struct ipu_isys_request *ipu_isys_next_queued_request(struct ipu_isys_pipeline + *ip) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + struct ipu_isys_request *ireq; + struct ipu_isys_buffer *ib; + unsigned long flags; + + lockdep_assert_held(&isys->stream_mutex); + + if (list_empty(&isys->requests)) { + dev_dbg(&isys->adev->dev, "%s: no requests found\n", __func__); + return NULL; + } + + list_for_each_entry_reverse(ireq, &isys->requests, head) { + /* Does the request belong to this pipeline? */ + bool is_ours = false; + bool is_others = false; + + dev_dbg(&isys->adev->dev, "%s: checking request %u\n", + __func__, ireq->req.id); + + spin_lock_irqsave(&ireq->lock, flags); + list_for_each_entry(ib, &ireq->buffers, req_head) { + struct vb2_buffer *vb = + ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&isys->adev->dev, "%s: buffer in vdev %s\n", + __func__, av->vdev.name); + + if (media_entity_enum_test(&ip->entity_enum, + &av->vdev.entity)) + is_ours = true; + else + is_others = true; + } + spin_unlock_irqrestore(&ireq->lock, flags); + + dev_dbg(&isys->adev->dev, "%s: is%s ours, is%s others'\n", + __func__, is_ours ? "" : "n't", is_others ? "" : "n't"); + + if (!is_ours || WARN_ON(is_others)) + continue; + + list_del_init(&ireq->head); + + return ireq; + } + + return NULL; +} + +/* Start streaming for real. The buffer list must be available. */ +static int ipu_isys_stream_start(struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl, bool error) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + struct media_device *mdev = &pipe_av->isys->media_dev; + struct ipu_isys_buffer_list __bl; + struct ipu_isys_request *ireq; + int rval; + + mutex_lock(&pipe_av->isys->stream_mutex); + + rval = ipu_isys_video_set_streaming(pipe_av, 1, bl); + if (rval) { + mutex_unlock(&pipe_av->isys->stream_mutex); + goto out_requeue; + } + + ip->streaming = 1; + + dev_dbg(&pipe_av->isys->adev->dev, "dispatching queued requests\n"); + + while ((ireq = ipu_isys_next_queued_request(ip))) { + struct ipu_fw_isys_frame_buff_set_abi *set; + struct isys_fw_msgs *msg; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + /* TODO: A PROPER CLEAN UP */ + mutex_unlock(&pipe_av->isys->stream_mutex); + return -ENOMEM; + } + + set = to_frame_msg_buf(msg); + + rval = ipu_isys_req_prepare(mdev, ireq, ip, set); + if (rval) { + mutex_unlock(&pipe_av->isys->stream_mutex); + goto out_requeue; + } + + ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, set, + ip->nr_output_pins); + ipu_isys_req_dispatch(mdev, ireq, ip, set, to_dma_addr(msg)); + } + + dev_dbg(&pipe_av->isys->adev->dev, + "done dispatching queued requests\n"); + + mutex_unlock(&pipe_av->isys->stream_mutex); + + bl = &__bl; + + do { + struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; + struct isys_fw_msgs *msg; + + rval = buffer_list_get(ip, bl); + if (rval == -EINVAL) + goto out_requeue; + else if (rval < 0) + break; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) + /* TODO: PROPER CLEANUP */ + return -ENOMEM; + + buf = to_frame_msg_buf(msg); + + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set(buf, ip, bl); + + ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf, + ip->nr_output_pins); + + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + + rval = ipu_fw_isys_complex_cmd(pipe_av->isys, + ip->stream_handle, + buf, to_dma_addr(msg), + sizeof(*buf), + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + ipu_put_fw_mgs_buffer(pipe_av->isys, (uintptr_t) buf); + } while (!WARN_ON(rval)); + + return 0; + +out_requeue: + if (bl && bl->nbufs) + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_INCOMING | + (error ? + IPU_ISYS_BUFFER_LIST_FL_SET_STATE : + 0), + error ? VB2_BUF_STATE_ERROR : + VB2_BUF_STATE_QUEUED); + flush_firmware_streamon_fail(ip); + + return rval; +} + +static void __buf_queue(struct vb2_buffer *vb, bool force) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb); + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_buffer_list bl; + + struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; + struct isys_fw_msgs *msg; + + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + unsigned long flags; + unsigned int i; + int rval; + + dev_dbg(&av->isys->adev->dev, "buffer: %s: buf_queue %u\n", + av->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index +#else + vb->index +#endif + ); + + for (i = 0; i < vb->num_planes; i++) + dev_dbg(&av->isys->adev->dev, "iova: plane %u iova 0x%x\n", i, + (u32) vb2_dma_contig_plane_dma_addr(vb, i)); + + spin_lock_irqsave(&aq->lock, flags); + list_add(&ib->head, &aq->incoming); + spin_unlock_irqrestore(&aq->lock, flags); + + if (ib->req) + return; + + if (!pipe_av || !vb->vb2_queue->streaming) { + dev_dbg(&av->isys->adev->dev, + "not pipe_av set, adding to incoming\n"); + return; + } + + mutex_unlock(&av->mutex); + mutex_lock(&pipe_av->mutex); + + if (!force && ip->nr_streaming != ip->nr_queues) { + dev_dbg(&av->isys->adev->dev, + "not streaming yet, adding to incoming\n"); + goto out; + } + + /* + * We just put one buffer to the incoming list of this queue + * (above). Let's see whether all queues in the pipeline would + * have a buffer. + */ + rval = buffer_list_get(ip, &bl); + if (rval < 0) { + if (rval == -EINVAL) { + dev_err(&av->isys->adev->dev, + "error: should not happen\n"); + WARN_ON(1); + } else { + dev_dbg(&av->isys->adev->dev, + "not enough buffers available\n"); + } + goto out; + } + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + rval = -ENOMEM; + goto out; + } + buf = to_frame_msg_buf(msg); + + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set(buf, ip, &bl); + + ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf, + ip->nr_output_pins); + + if (!ip->streaming) { + dev_dbg(&av->isys->adev->dev, + "Wow! Got a buffer to start streaming!\n"); + rval = ipu_isys_stream_start(ip, &bl, true); + if (rval) + dev_err(&av->isys->adev->dev, + "Ouch. Stream start failed.\n"); + goto out; + } + + /* + * We must queue the buffers in the buffer list to the + * appropriate video buffer queues BEFORE passing them to the + * firmware since we could get a buffer event back before we + * have queued them ourselves to the active queue. + */ + ipu_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + + rval = ipu_fw_isys_complex_cmd(pipe_av->isys, + ip->stream_handle, + buf, to_dma_addr(msg), + sizeof(*buf), + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + ipu_put_fw_mgs_buffer(pipe_av->isys, (uintptr_t) buf); + /* + * FIXME: mark the buffers in the buffer list if the queue + * operation fails. + */ + if (!WARN_ON(rval < 0)) + dev_dbg(&av->isys->adev->dev, "queued buffer\n"); + +out: + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); +} + +static void buf_queue(struct vb2_buffer *vb) +{ + __buf_queue(vb, false); +} + +int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq) +{ + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct v4l2_subdev_format fmt = { 0 }; + struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads); + struct v4l2_subdev *sd; + int rval; + + if (!pad) { + dev_dbg(&av->isys->adev->dev, + "video node %s pad not connected\n", av->vdev.name); + return -ENOTCONN; + } + + sd = media_entity_to_v4l2_subdev(pad->entity); + + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.pad = pad->index; + fmt.stream = 0; + rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); + if (rval) + return rval; + + if (fmt.format.width != av->mpix.width || + fmt.format.height != av->mpix.height) { + dev_dbg(&av->isys->adev->dev, + "wrong width or height %ux%u (%ux%u expected)\n", + av->mpix.width, av->mpix.height, + fmt.format.width, fmt.format.height); + return -EINVAL; + } + + if (fmt.format.field != av->mpix.field) { + dev_dbg(&av->isys->adev->dev, + "wrong field value 0x%8.8x (0x%8.8x expected)\n", + av->mpix.field, fmt.format.field); + return -EINVAL; + } + + if (fmt.format.code != av->pfmt->code) { + dev_dbg(&av->isys->adev->dev, + "wrong media bus code 0x%8.8x (0x%8.8x expected)\n", + av->pfmt->code, fmt.format.code); + return -EINVAL; + } + + return 0; +} + +/* Return buffers back to videobuf2. */ +static void return_buffers(struct ipu_isys_queue *aq, + enum vb2_buffer_state state) +{ + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + int reset_needed = 0; + unsigned long flags; + + spin_lock_irqsave(&aq->lock, flags); + while (!list_empty(&aq->incoming)) { + struct ipu_isys_buffer *ib = list_first_entry(&aq->incoming, + struct + ipu_isys_buffer, + head); + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + dev_dbg(&av->isys->adev->dev, + "%s: stop_streaming incoming %u\n", + ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue + (vb->vb2_queue))->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + + spin_lock_irqsave(&aq->lock, flags); + } + + /* + * Something went wrong (FW crash / HW hang / not all buffers + * returned from isys) if there are still buffers queued in active + * queue. We have to clean up places a bit. + */ + while (!list_empty(&aq->active)) { + struct ipu_isys_buffer *ib = list_first_entry(&aq->active, + struct + ipu_isys_buffer, + head); + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + vb2_buffer_done(vb, state); + + dev_warn(&av->isys->adev->dev, "%s: cleaning active queue %u\n", + ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue + (vb->vb2_queue))->vdev.name, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index); +#else + vb->index); +#endif + + spin_lock_irqsave(&aq->lock, flags); + reset_needed = 1; + } + + spin_unlock_irqrestore(&aq->lock, flags); + + if (reset_needed) { + mutex_lock(&av->isys->mutex); + av->isys->reset_needed = true; + mutex_unlock(&av->isys->mutex); + } +} + +static int start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_video *pipe_av; + struct ipu_isys_pipeline *ip; + struct ipu_isys_buffer_list __bl, *bl = NULL; + bool first; + int rval; + + dev_dbg(&av->isys->adev->dev, + "stream: %s: width %u, height %u, css pixelformat %u\n", + av->vdev.name, av->mpix.width, av->mpix.height, + av->pfmt->css_pixelformat); + + mutex_lock(&av->isys->stream_mutex); + + first = !av->vdev.entity.pipe; + + if (first) { + rval = ipu_isys_video_prepare_streaming(av, 1); + if (rval) + goto out_return_buffers; + } + + mutex_unlock(&av->isys->stream_mutex); + + rval = aq->link_fmt_validate(aq); + if (rval) { + dev_dbg(&av->isys->adev->dev, + "%s: link format validation failed (%d)\n", + av->vdev.name, rval); + goto out_unprepare_streaming; + } + + ip = to_ipu_isys_pipeline(av->vdev.entity.pipe); + pipe_av = container_of(ip, struct ipu_isys_video, ip); + mutex_unlock(&av->mutex); + + mutex_lock(&pipe_av->mutex); + ip->nr_streaming++; + dev_dbg(&av->isys->adev->dev, "queue %u of %u\n", ip->nr_streaming, + ip->nr_queues); + list_add(&aq->node, &ip->queues); + if (ip->nr_streaming != ip->nr_queues) + goto out; + + if (list_empty(&av->isys->requests)) { + bl = &__bl; + rval = buffer_list_get(ip, bl); + if (rval == -EINVAL) { + goto out_stream_start; + } else if (rval < 0) { + dev_dbg(&av->isys->adev->dev, + "no request available --- postponing streamon\n"); + goto out; + } + } + + rval = ipu_isys_stream_start(ip, bl, false); + if (rval) + goto out_stream_start; + +out: + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); + + return 0; + +out_stream_start: + list_del(&aq->node); + ip->nr_streaming--; + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); + +out_unprepare_streaming: + mutex_lock(&av->isys->stream_mutex); + if (first) + ipu_isys_video_prepare_streaming(av, 0); + +out_return_buffers: + mutex_unlock(&av->isys->stream_mutex); + return_buffers(aq, VB2_BUF_STATE_QUEUED); + + return rval; +} + +static void stop_streaming(struct vb2_queue *q) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + + if (pipe_av != av) { + mutex_unlock(&av->mutex); + mutex_lock(&pipe_av->mutex); + } + + mutex_lock(&av->isys->stream_mutex); + if (ip->nr_streaming == ip->nr_queues && ip->streaming) + ipu_isys_video_set_streaming(av, 0, NULL); + if (ip->nr_streaming == 1) + ipu_isys_video_prepare_streaming(av, 0); + mutex_unlock(&av->isys->stream_mutex); + + ip->nr_streaming--; + list_del(&aq->node); + ip->streaming = 0; + + if (pipe_av != av) { + mutex_unlock(&pipe_av->mutex); + mutex_lock(&av->mutex); + } + + return_buffers(aq, VB2_BUF_STATE_ERROR); +} + +static unsigned int +get_sof_sequence_by_timestamp(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + u64 time = (u64) info->timestamp[1] << 32 | info->timestamp[0]; + unsigned int i; + + for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) + if (time == ip->seq[i].timestamp) { + dev_dbg(&isys->adev->dev, + "sof: using sequence number %u for timestamp 0x%16.16llx\n", + ip->seq[i].sequence, time); + return ip->seq[i].sequence; + } + + dev_dbg(&isys->adev->dev, "SOF: looking for 0x%16.16llx\n", time); + for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) + dev_dbg(&isys->adev->dev, + "SOF: sequence %u, timestamp value 0x%16.16llx\n", + ip->seq[i].sequence, ip->seq[i].timestamp); + dev_dbg(&isys->adev->dev, "SOF sequence number not found\n"); + + return 0; +} + +static u64 get_sof_ns_delta(struct ipu_isys_video *av, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(&av->isys->adev->dev); + struct ipu_device *isp = adev->isp; + u64 delta, tsc_now; + + if (!ipu_buttress_tsc_read(isp, &tsc_now)) + delta = tsc_now - + ((u64) info->timestamp[1] << 32 | info->timestamp[0]); + else + delta = 0; + + return ipu_buttress_tsc_ticks_to_ns(delta); +} + +void +ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct timespec ts_now; +#endif + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + u64 ns; + u32 sequence; + + if (ip->has_sof) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + ns = ktime_to_ns((wall_clock_ts_on) ? ktime_get_real() : + ktime_get()); + ns -= get_sof_ns_delta(av, info); +#else + ns = (wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns(); + ns -= get_sof_ns_delta(av, info); +#endif + sequence = get_sof_sequence_by_timestamp(ip, info); + } else { +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + ns = ktime_to_ns((wall_clock_ts_on) ? ktime_get_real() : + ktime_get()); +#else + ns = ((wall_clock_ts_on) ? ktime_get_real_ns() : + ktime_get_ns()); +#endif + sequence = (atomic_inc_return(&ip->sequence) - 1) + / ip->nr_queues; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.sequence = sequence; + ts_now = ns_to_timespec(ns); + vb->v4l2_buf.timestamp.tv_sec = ts_now.tv_sec; + vb->v4l2_buf.timestamp.tv_usec = ts_now.tv_nsec / NSEC_PER_USEC; + + dev_dbg(&av->isys->adev->dev, "buffer: %s: buffer done %u\n", + av->vdev.name, vb->v4l2_buf.index); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + vbuf->sequence = sequence; + ts_now = ns_to_timespec(ns); + vbuf->timestamp.tv_sec = ts_now.tv_sec; + vbuf->timestamp.tv_usec = ts_now.tv_nsec / NSEC_PER_USEC; + + dev_dbg(&av->isys->adev->dev, "%s: buffer done %u\n", av->vdev.name, + vb->index); +#else + vbuf->vb2_buf.timestamp = ns; + vbuf->sequence = sequence; + + dev_dbg(&av->isys->adev->dev, "buffer: %s: buffer done %u\n", + av->vdev.name, vb->index); +#endif +} + +void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib) +{ + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + + if (atomic_read(&ib->str2mmio_flag)) { + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + /* + * Operation on buffer is ended with error and will be reported + * to the userspace when it is de-queued + */ + atomic_set(&ib->str2mmio_flag, 0); + } else { + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + } +} + +void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + struct ipu_isys_queue *aq = ip->output_pins[info->pin_id].aq; + struct ipu_isys_buffer *ib; + struct vb2_buffer *vb; + unsigned long flags; + bool first = true; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct v4l2_buffer *buf; +#else + struct vb2_v4l2_buffer *buf; +#endif + + dev_dbg(&isys->adev->dev, "buffer: %s: received buffer %8.8x\n", + ipu_isys_queue_to_video(aq)->vdev.name, info->pin.addr); + + spin_lock_irqsave(&aq->lock, flags); + if (list_empty(&aq->active)) { + spin_unlock_irqrestore(&aq->lock, flags); + dev_err(&isys->adev->dev, "active queue empty\n"); + return; + } + + list_for_each_entry_reverse(ib, &aq->active, head) { + dma_addr_t addr; + + vb = ipu_isys_buffer_to_vb2_buffer(ib); + addr = vb2_dma_contig_plane_dma_addr(vb, 0); + + if (info->pin.addr != addr) { + if (first) + dev_err(&isys->adev->dev, + "WARNING: buffer address %pad expected!\n", + &addr); + first = false; + continue; + } + + if (info->error_info.error == + IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) { + /* + * Check for error message: + * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO' + */ + atomic_set(&ib->str2mmio_flag, 1); + } + dev_dbg(&isys->adev->dev, "buffer: found buffer %pad\n", &addr); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + buf = &vb->v4l2_buf; +#else + buf = to_vb2_v4l2_buffer(vb); +#endif + buf->field = V4L2_FIELD_NONE; + + /* + * Use "reserved" field to pass csi2 index and vc. + * May need to change to other approach. + */ + buf->reserved &= 0xFFFFFF00; + if (ip->csi2) + buf->reserved |= ip->csi2->index << 4; + buf->reserved |= ip->vc; + + list_del(&ib->head); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu_isys_buf_calc_sequence_time(ib, info); + + /* + * For interlaced buffers, the notification to user space + * is postponed to capture_done event since the field + * information is available only at that time. + */ + if (ip->interlaced) { + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + list_add(&ib->head, &ip->pending_interlaced_bufs); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, + flags); + } else { + ipu_isys_queue_buf_done(ib); + } + + return; + } + + dev_err(&isys->adev->dev, + "WARNING: cannot find a matching video buffer!\n"); + + spin_unlock_irqrestore(&aq->lock, flags); +} + +void +ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + unsigned long flags; + + dev_dbg(&isys->adev->dev, "receive short packet buffer %8.8x\n", + info->pin.addr); + spin_lock_irqsave(&ip->short_packet_queue_lock, flags); + ip->cur_field = ipu_isys_csi2_get_current_field(ip, info->timestamp); + spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags); +} + +void ipu_isys_req_free(struct media_device *mdev, + struct media_device_request *req) +{ + struct ipu_isys_request *ireq = to_ipu_isys_request(req); + + kfree(ireq); +} + +struct +media_device_request *ipu_isys_req_alloc(struct media_device *mdev) +{ + struct ipu_isys_request *ireq; + + ireq = kzalloc(sizeof(*ireq), GFP_KERNEL); + if (!ireq) + return NULL; + + INIT_LIST_HEAD(&ireq->buffers); + spin_lock_init(&ireq->lock); + INIT_LIST_HEAD(&ireq->head); + + return &ireq->req; +} + +int ipu_isys_req_prepare(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set) +{ + struct ipu_isys *isys = + container_of(ip, struct ipu_isys_video, ip)->isys; + struct media_device_request *req = &ireq->req; + struct ipu_isys_buffer *ib; + unsigned long flags; + + dev_dbg(&isys->adev->dev, "preparing request %u\n", req->id); + + set->send_irq_sof = 1; + set->send_resp_sof = 1; + set->send_irq_eof = 1; + set->send_resp_eof = 1; +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + set->send_irq_capture_ack = 1; + set->send_irq_capture_done = 1; +#endif + + spin_lock_irqsave(&ireq->lock, flags); + + list_for_each_entry(ib, &ireq->buffers, req_head) { + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + + if (aq->prepare_frame_buff_set) + aq->prepare_frame_buff_set(vb); + + if (aq->fill_frame_buff_set_pin) + aq->fill_frame_buff_set_pin(vb, set); + + spin_lock(&aq->lock); + list_move(&ib->head, &aq->active); + spin_unlock(&aq->lock); + } + + spin_unlock_irqrestore(&ireq->lock, flags); + + return 0; +} + +static void +ipu_isys_req_dispatch(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set, + dma_addr_t dma_addr) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + int rval; + + rval = ipu_fw_isys_complex_cmd(pipe_av->isys, + ip->stream_handle, + set, dma_addr, sizeof(*set), + IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE); + ipu_put_fw_mgs_buffer(pipe_av->isys, (uintptr_t) set); + + WARN_ON(rval); +} + +int ipu_isys_req_queue(struct media_device *mdev, + struct media_device_request *req) +{ + struct ipu_isys *isys = container_of(mdev, struct ipu_isys, media_dev); + struct ipu_isys_request *ireq = to_ipu_isys_request(req); + struct ipu_isys_pipeline *ip; + struct ipu_isys_buffer *ib; + struct media_pipeline *pipe = NULL; + unsigned long flags; + bool no_pipe = false; + int rval = 0; + + spin_lock_irqsave(&ireq->lock, flags); + if (list_empty(&ireq->buffers)) { + rval = -ENODATA; + goto out_list_empty; + } + + /* Verify that all buffers are related to a single pipeline. */ + list_for_each_entry(ib, &ireq->buffers, req_head) { + struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib); + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + dev_dbg(&isys->adev->dev, "%s: device %s, id %u\n", __func__, + av->vdev.name, vb-> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + v4l2_buf. +#endif + index); + if (!pipe) { + if (!av->vdev.entity.pipe) { + no_pipe = true; + continue; + } + + pipe = av->vdev.entity.pipe; + dev_dbg(&isys->adev->dev, "%s: pipe %p\n", + av->vdev.name, pipe); + continue; + } + + if (av->vdev.entity.pipe != pipe) { + dev_dbg(&isys->adev->dev, + "request %u includes buffers in multiple pipelines\n", + req->id); + rval = -EINVAL; + goto out_list_empty; + } + } + + spin_unlock_irqrestore(&ireq->lock, flags); + + mutex_lock(&isys->stream_mutex); + + ip = to_ipu_isys_pipeline(pipe); + + if (pipe && ip->streaming) { + struct isys_fw_msgs *msg; + struct ipu_fw_isys_frame_buff_set_abi *set; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + rval = -ENOMEM; + goto out_mutex_unlock; + } + + set = to_frame_msg_buf(msg); + + if (no_pipe) { + dev_dbg(&isys->adev->dev, + "request %u includes buffers in and outside pipelines\n", + req->id); + rval = -EINVAL; + goto out_mutex_unlock; + } + + dev_dbg(&isys->adev->dev, + "request has a pipeline, dispatching\n"); + rval = ipu_isys_req_prepare(mdev, ireq, ip, set); + if (rval) + goto out_mutex_unlock; + + ipu_fw_isys_dump_frame_buff_set(&isys->adev->dev, set, + ip->nr_output_pins); + ipu_isys_req_dispatch(mdev, ireq, ip, set, to_dma_addr(msg)); + } else { + dev_dbg(&isys->adev->dev, + "%s: adding request %u to the mdev queue\n", __func__, + req->id); + + list_add(&ireq->head, &isys->requests); + } + +out_mutex_unlock: + mutex_unlock(&isys->stream_mutex); + + return rval; + +out_list_empty: + spin_unlock_irqrestore(&ireq->lock, flags); + + return rval; +} + +struct vb2_ops ipu_isys_queue_ops = { + .queue_setup = queue_setup, + .wait_prepare = ipu_isys_queue_unlock, + .wait_finish = ipu_isys_queue_lock, + .buf_init = buf_init, + .buf_prepare = buf_prepare, + .buf_finish = buf_finish, + .buf_cleanup = buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .buf_queue = buf_queue, +}; + +int ipu_isys_queue_init(struct ipu_isys_queue *aq) +{ + struct ipu_isys *isys = ipu_isys_queue_to_video(aq)->isys; + int rval; + + if (!aq->vbq.io_modes) + aq->vbq.io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF; + aq->vbq.drv_priv = aq; + aq->vbq.allow_requests = true; + aq->vbq.ops = &ipu_isys_queue_ops; + aq->vbq.mem_ops = &vb2_dma_contig_memops; + aq->vbq.timestamp_flags = (wall_clock_ts_on) ? + V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN : V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + + rval = vb2_queue_init(&aq->vbq); + if (rval) + return rval; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + aq->ctx = vb2_dma_contig_init_ctx(&isys->adev->dev); + if (IS_ERR(aq->ctx)) { + vb2_queue_release(&aq->vbq); + return PTR_ERR(aq->ctx); + } +#else + aq->dev = &isys->adev->dev; + aq->vbq.dev = &isys->adev->dev; +#endif + spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->active); + INIT_LIST_HEAD(&aq->incoming); + + return 0; +} + +void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + if (IS_ERR_OR_NULL(aq->ctx)) + return; + + vb2_dma_contig_cleanup_ctx(aq->ctx); + aq->ctx = NULL; +#endif + vb2_queue_release(&aq->vbq); +} diff --git a/drivers/media/pci/intel/ipu-isys-queue.h b/drivers/media/pci/intel/ipu-isys-queue.h new file mode 100644 index 000000000000..4162b0547479 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-queue.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_QUEUE_H +#define IPU_ISYS_QUEUE_H + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#include +#else +#include +#endif + + +struct ipu_isys_video; +struct ipu_isys_pipeline; +struct ipu_fw_isys_resp_info_abi; +struct ipu_fw_isys_frame_buff_set_abi; + +enum ipu_isys_buffer_type { + IPU_ISYS_VIDEO_BUFFER, + IPU_ISYS_SHORT_PACKET_BUFFER, +}; + +struct ipu_isys_queue { + struct list_head node; /* struct ipu_isys_pipeline.queues */ + struct vb2_queue vbq; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct vb2_alloc_ctx *ctx; +#else + struct device *dev; +#endif + /* + * @lock: serialise access to queued and pre_streamon_queued + */ + spinlock_t lock; + struct list_head active; + struct list_head incoming; + u32 css_pin_type; + unsigned int fw_output; + int (*buf_init)(struct vb2_buffer *vb); + void (*buf_cleanup)(struct vb2_buffer *vb); + int (*buf_prepare)(struct vb2_buffer *vb); + void (*prepare_frame_buff_set)(struct vb2_buffer *vb); + void (*fill_frame_buff_set_pin)(struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi * + set); + int (*link_fmt_validate)(struct ipu_isys_queue *aq); +}; + +struct ipu_isys_buffer { + struct list_head head; + enum ipu_isys_buffer_type type; + struct list_head req_head; + struct media_device_request *req; + atomic_t str2mmio_flag; +}; + +struct ipu_isys_video_buffer { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + struct vb2_buffer vb; +#else + struct vb2_v4l2_buffer vb_v4l2; +#endif + struct ipu_isys_buffer ib; +}; + +struct ipu_isys_private_buffer { + struct ipu_isys_buffer ib; + struct ipu_isys_pipeline *ip; + unsigned int index; + unsigned int bytesused; + dma_addr_t dma_addr; + void *buffer; +}; + +#define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) +#define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) +#define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) + +struct ipu_isys_buffer_list { + struct list_head head; + unsigned int nbufs; +}; + +#define vb2_queue_to_ipu_isys_queue(__vb2) \ + container_of(__vb2, struct ipu_isys_queue, vbq) + +#define ipu_isys_to_isys_video_buffer(__ib) \ + container_of(__ib, struct ipu_isys_video_buffer, ib) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#define vb2_buffer_to_ipu_isys_video_buffer(__vb) \ + container_of(__vb, struct ipu_isys_video_buffer, vb) + +#define ipu_isys_buffer_to_vb2_buffer(__ib) \ + (&ipu_isys_to_isys_video_buffer(__ib)->vb) +#else +#define vb2_buffer_to_ipu_isys_video_buffer(__vb) \ + container_of(to_vb2_v4l2_buffer(__vb), \ + struct ipu_isys_video_buffer, vb_v4l2) + +#define ipu_isys_buffer_to_vb2_buffer(__ib) \ + (&ipu_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) +#endif + +#define vb2_buffer_to_ipu_isys_buffer(__vb) \ + (&vb2_buffer_to_ipu_isys_video_buffer(__vb)->ib) + +#define ipu_isys_buffer_to_private_buffer(__ib) \ + container_of(__ib, struct ipu_isys_private_buffer, ib) + +struct ipu_isys_request { + struct media_device_request req; + /* serialise access to buffers */ + spinlock_t lock; + struct list_head buffers; /* struct ipu_isys_buffer.head */ + bool dispatched; + /* + * struct ipu_isys.requests; + * struct ipu_isys_pipeline.struct.* + */ + struct list_head head; +}; + +#define to_ipu_isys_request(__req) \ + container_of(__req, struct ipu_isys_request, req) + +void ipu_isys_queue_lock(struct vb2_queue *q); +void ipu_isys_queue_unlock(struct vb2_queue *q); + +int ipu_isys_buf_prepare(struct vb2_buffer *vb); + +void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl, + unsigned long op_flags, + enum vb2_buffer_state state); +struct ipu_isys_request *ipu_isys_next_queued_request( + struct ipu_isys_pipeline *ip); +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin( + struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi *set); +void ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set( + struct ipu_fw_isys_frame_buff_set_abi *set, + struct ipu_isys_pipeline *ip, + struct ipu_isys_buffer_list *bl); +int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq); + +void +ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib, + struct ipu_fw_isys_resp_info_abi *info); +void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib); +void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info); +void +ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *inf); + +void ipu_isys_req_free(struct media_device *mdev, + struct media_device_request *req); +struct media_device_request *ipu_isys_req_alloc(struct media_device *mdev); +int ipu_isys_req_prepare(struct media_device *mdev, + struct ipu_isys_request *ireq, + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_frame_buff_set_abi *set); +int ipu_isys_req_queue(struct media_device *mdev, + struct media_device_request *req); + +int ipu_isys_queue_init(struct ipu_isys_queue *aq); +void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq); + +#endif /* IPU_ISYS_QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu-isys-subdev.c b/drivers/media/pci/intel/ipu-isys-subdev.c new file mode 100644 index 000000000000..7a72df4d3115 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-subdev.c @@ -0,0 +1,957 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include + +#include + +#include + +#include "ipu-isys.h" +#include "ipu-isys-video.h" +#include "ipu-isys-subdev.h" + +unsigned int ipu_isys_mbus_code_to_bpp(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB888_1X24: + return 24; + case MEDIA_BUS_FMT_Y10_1X10: + case MEDIA_BUS_FMT_RGB565_1X16: + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + return 16; + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SRGGB14_1X14: + return 14; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return 12; + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return 10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return 8; + default: + WARN_ON(1); + return -EINVAL; + } +} + +unsigned int ipu_isys_mbus_code_to_mipi(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_RGB565_1X16: + return IPU_ISYS_MIPI_CSI2_TYPE_RGB565; + case MEDIA_BUS_FMT_RGB888_1X24: + return IPU_ISYS_MIPI_CSI2_TYPE_RGB888; + case MEDIA_BUS_FMT_UYVY8_1X16: + case MEDIA_BUS_FMT_YUYV8_1X16: + return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8; + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SRGGB14_1X14: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW14; + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW12; + case MEDIA_BUS_FMT_Y10_1X10: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW10; + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + return IPU_ISYS_MIPI_CSI2_TYPE_RAW8; + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1); + default: + WARN_ON(1); + return -EINVAL; + } +} + +enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_BGGR; + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_GBRG; + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_GRBG; + case MEDIA_BUS_FMT_SRGGB14_1X14: + case MEDIA_BUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return IPU_ISYS_SUBDEV_PIXELORDER_RGGB; + default: + WARN_ON(1); + return -EINVAL; + } +} + +u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code) +{ + switch (sink_code) { + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + return MEDIA_BUS_FMT_SBGGR10_1X10; + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + return MEDIA_BUS_FMT_SGBRG10_1X10; + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + return MEDIA_BUS_FMT_SGRBG10_1X10; + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return MEDIA_BUS_FMT_SRGGB10_1X10; + default: + return sink_code; + } +} + +struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config + *cfg, +#endif + unsigned int pad, + unsigned int stream, + unsigned int which) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) + return &asd->ffmt[pad][stream]; + else + return v4l2_subdev_get_try_format( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, +#endif + cfg, pad); +} + +struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + unsigned int target, + unsigned int pad, unsigned int which) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { + switch (target) { + case V4L2_SEL_TGT_CROP: + return &asd->crop[pad]; + case V4L2_SEL_TGT_COMPOSE: + return &asd->compose[pad]; + } + } else { + switch (target) { + case V4L2_SEL_TGT_CROP: + return v4l2_subdev_get_try_crop( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, +#endif + cfg, pad); + case V4L2_SEL_TGT_COMPOSE: + return v4l2_subdev_get_try_compose( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, +#endif + cfg, pad); + } + } + WARN_ON(1); + return NULL; +} + +static int target_valid(struct v4l2_subdev *sd, unsigned int target, + unsigned int pad) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + switch (target) { + case V4L2_SEL_TGT_CROP: + return asd->valid_tgts[pad].crop; + case V4L2_SEL_TGT_COMPOSE: + return asd->valid_tgts[pad].compose; + default: + return 0; + } +} + +void ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_mbus_framefmt *ffmt, + struct v4l2_rect *r, + enum isys_subdev_prop_tgt tgt, + unsigned int pad, unsigned int which) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct v4l2_mbus_framefmt *ffmts[sd->entity.num_pads]; + struct v4l2_rect *crops[sd->entity.num_pads]; + struct v4l2_rect *compose[sd->entity.num_pads]; + unsigned int i; + + if (tgt == IPU_ISYS_SUBDEV_PROP_TGT_NR_OF) + return; + + if (WARN_ON(pad >= sd->entity.num_pads)) + return; + + for (i = 0; i < sd->entity.num_pads; i++) { + ffmts[i] = __ipu_isys_get_ffmt(sd, cfg, i, 0, which); + crops[i] = __ipu_isys_get_selection( + sd, cfg, V4L2_SEL_TGT_CROP, i, which); + compose[i] = __ipu_isys_get_selection( + sd, cfg, V4L2_SEL_TGT_COMPOSE, i, which); + } + + switch (tgt) { + case IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT: + crops[pad]->left = 0; + crops[pad]->top = 0; + crops[pad]->width = ffmt->width; + crops[pad]->height = ffmt->height; + ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, crops[pad], + tgt + 1, pad, which); + return; + case IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP: + if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return; + + compose[pad]->left = 0; + compose[pad]->top = 0; + compose[pad]->width = r->width; + compose[pad]->height = r->height; + ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + compose[pad], tgt + 1, + pad, which); + return; + case IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE: + if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) + return; + + /* 1:n and 1:1 case: only propagate to the first source pad */ + if (asd->nsinks == 1 && asd->nsources >= 1) { + compose[asd->nsinks]->left = + compose[asd->nsinks]->top = 0; + compose[asd->nsinks]->width = r->width; + compose[asd->nsinks]->height = r->height; + ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + compose[asd->nsinks], + tgt + 1, asd->nsinks, + which); + /* n:n case: propagate according to route info */ + } else if (asd->nsinks == asd->nsources && asd->nsources > 1) { + for (i = asd->nsinks; i < sd->entity.num_pads; i++) + if (media_entity_has_route(&sd->entity, pad, i)) + break; + + if (i != sd->entity.num_pads) { + compose[i]->left = 0; + compose[i]->top = 0; + compose[i]->width = r->width; + compose[i]->height = r->height; + ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + compose[i], + tgt + 1, i, + which); + } + /* n:m case: propagate to all source pad */ + } else if (asd->nsinks != asd->nsources && asd->nsources > 1 && + asd->nsources > 1) { + for (i = 1; i < sd->entity.num_pads; i++) { + if (!(sd->entity.pads[i].flags & + MEDIA_PAD_FL_SOURCE)) + continue; + + compose[i]->left = 0; + compose[i]->top = 0; + compose[i]->width = r->width; + compose[i]->height = r->height; + ipu_isys_subdev_fmt_propagate(sd, cfg, + ffmt, + compose[i], + tgt + 1, i, + which); + } + } + return; + case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE: + if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)) + return; + + crops[pad]->left = 0; + crops[pad]->top = 0; + crops[pad]->width = r->width; + crops[pad]->height = r->height; + ipu_isys_subdev_fmt_propagate(sd, cfg, ffmt, + crops[pad], tgt + 1, pad, which); + return; + case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{ + struct v4l2_subdev_format fmt = { + .which = which, + .pad = pad, + .format = { + .width = r->width, + .height = r->height, + /* + * Either use the code from sink pad + * or the current one. + */ + .code = + ffmt ? ffmt->code : ffmts[pad]->code, + .field = + ffmt ? ffmt->field : ffmts[pad]-> + field, + }, + }; + + asd->set_ffmt(sd, cfg, &fmt); + return; + } + } +} + +void ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + + /* No propagation for non-zero pads. */ + if (fmt->pad) { + struct v4l2_mbus_framefmt *sink_ffmt = + __ipu_isys_get_ffmt(sd, cfg, 0, fmt->stream, + fmt->which); + + ffmt->width = sink_ffmt->width; + ffmt->height = sink_ffmt->height; + ffmt->code = sink_ffmt->code; + ffmt->field = sink_ffmt->field; + return; + } + + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = fmt->format.code; + ffmt->field = fmt->format.field; + + ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, NULL, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + fmt->pad, fmt->which); +} + +int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + u32 code = asd->supported_codes[fmt->pad][0]; + unsigned int i; + + WARN_ON(!mutex_is_locked(&asd->mutex)); + + fmt->format.width = clamp(fmt->format.width, IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + fmt->format.height = clamp(fmt->format.height, + IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); + + for (i = 0; asd->supported_codes[fmt->pad][i]; i++) { + if (asd->supported_codes[fmt->pad][i] == fmt->format.code) { + code = asd->supported_codes[fmt->pad][i]; + break; + } + } + + fmt->format.code = code; + + asd->set_ffmt(sd, cfg, fmt); + + fmt->format = *ffmt; + + return 0; +} + +int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + int rval; + + if (fmt->stream >= asd->nstreams) + return -EINVAL; + + mutex_lock(&asd->mutex); + rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt); + mutex_unlock(&asd->mutex); + + return rval; +} + +int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (fmt->stream >= asd->nstreams) + return -EINVAL; + + mutex_lock(&asd->mutex); + fmt->format = *__ipu_isys_get_ffmt(sd, cfg, fmt->pad, + fmt->stream, + fmt->which); + mutex_unlock(&asd->mutex); + + return 0; +} + +int ipu_isys_subdev_get_frame_desc(struct v4l2_subdev *sd, + struct v4l2_mbus_frame_desc *desc) +{ + int i, rval = 0; + + for (i = 0; i < sd->entity.num_pads; i++) { + if (!(sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)) + continue; + + rval = v4l2_subdev_call(sd, pad, get_frame_desc, i, desc); + if (!rval) + return rval; + } + + if (i == sd->entity.num_pads) + rval = -EINVAL; + + return rval; +} + +bool ipu_isys_subdev_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream) +{ + struct ipu_isys_subdev *asd; + int i; + + if (!entity) { + WARN_ON(1); + return false; + } + asd = to_ipu_isys_subdev(media_entity_to_v4l2_subdev(entity)); + + /* Two sinks are never connected together. */ + if (pad0 < asd->nsinks && pad1 < asd->nsinks) + return false; + + for (i = 0; i < asd->nstreams; i++) { + if ((asd->route[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) && + ((asd->route[i].sink == pad0 && + asd->route[i].source == pad1) || + (asd->route[i].sink == pad1 && + asd->route[i].source == pad0))) { + if (stream) + *stream = i; + return true; + } + } + + return false; +} + +int ipu_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + int i, j, ret = 0; + + WARN_ON(!mutex_is_locked(&sd->entity. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + parent +#else + graph_obj.mdev +#endif + ->graph_mutex)); + + for (i = 0; i < min(route->num_routes, asd->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + + if (t->sink_stream > asd->nstreams - 1 || + t->source_stream > asd->nstreams - 1) + continue; + + for (j = 0; j < asd->nstreams; j++) { + if (t->sink_pad == asd->route[j].sink && + t->source_pad == asd->route[j].source) + break; + } + + if (j == asd->nstreams) + continue; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_IMMUTABLE && + t->flags != asd->route[j].flags) + continue; + + if ((t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE) && asd->nsinks) + continue; + + if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) { + int source_pad = 0; + + if (sd->entity.pads[t->sink_pad].flags & + MEDIA_PAD_FL_MULTIPLEX) + source_pad = t->source_pad - asd->nsinks; + + asd->stream[t->sink_pad].stream_id[source_pad] = + t->sink_stream; + } + + if (sd->entity.pads[t->source_pad].flags & + MEDIA_PAD_FL_MULTIPLEX) + asd->stream[t->source_pad].stream_id[t->sink_pad] = + t->source_stream; + else + asd->stream[t->source_pad].stream_id[0] = + t->source_stream; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) { + bitmap_set(asd->stream[t->source_pad].streams_stat, + t->source_stream, 1); + if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) + bitmap_set(asd->stream[t->sink_pad] + .streams_stat, t->sink_stream, 1); + asd->route[j].flags |= V4L2_SUBDEV_ROUTE_FL_ACTIVE; + } else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) { + bitmap_clear(asd->stream[t->source_pad].streams_stat, + t->source_stream, 1); + if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_SOURCE)) + bitmap_clear(asd->stream[t->sink_pad] + .streams_stat, t->sink_stream, 1); + asd->route[j].flags &= (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + } + + return ret; +} + +int ipu_isys_subdev_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + int i, j; + + for (i = 0, j = 0; i < min(asd->nstreams, route->num_routes); ++i) { + route->routes[j].sink_pad = asd->route[i].sink; + if (sd->entity.pads[asd->route[i].sink].flags & + MEDIA_PAD_FL_MULTIPLEX) { + int source_pad = asd->route[i].source - asd->nsinks; + + route->routes[j].sink_stream = + asd->stream[asd->route[i].sink]. + stream_id[source_pad]; + } else { + route->routes[j].sink_stream = + asd->stream[asd->route[i].sink].stream_id[0]; + } + + route->routes[j].source_pad = asd->route[i].source; + if (sd->entity.pads[asd->route[i].source].flags & + MEDIA_PAD_FL_MULTIPLEX) { + route->routes[j].source_stream = + asd->stream[asd->route[i].source].stream_id[asd-> + route + [i]. + sink]; + } else { + route->routes[j].source_stream = + asd->stream[asd->route[i].source].stream_id[0]; + } + route->routes[j++].flags = asd->route[i].flags; + } + + route->num_routes = j; + + return 0; +} + +int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_selection *sel) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + struct media_pad *pad = &asd->sd.entity.pads[sel->pad]; + struct v4l2_rect *r, __r = { 0 }; + unsigned int tgt; + + if (!target_valid(sd, sel->target, sel->pad)) + return -EINVAL; + + switch (sel->target) { + case V4L2_SEL_TGT_CROP: + if (pad->flags & MEDIA_PAD_FL_SINK) { + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, sel->pad, 0, + sel->which); + + __r.width = ffmt->width; + __r.height = ffmt->height; + r = &__r; + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP; + } else { + /* 0 is the sink pad. */ + r = __ipu_isys_get_selection(sd, cfg, sel->target, 0, + sel->which); + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP; + } + + break; + case V4L2_SEL_TGT_COMPOSE: + if (pad->flags & MEDIA_PAD_FL_SINK) { + r = __ipu_isys_get_selection(sd, cfg, V4L2_SEL_TGT_CROP, + sel->pad, sel->which); + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE; + } else { + r = __ipu_isys_get_selection(sd, cfg, + V4L2_SEL_TGT_COMPOSE, 0, + sel->which); + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE; + } + break; + default: + return -EINVAL; + } + + sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, r->width); + sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, r->height); + *__ipu_isys_get_selection(sd, cfg, sel->target, sel->pad, + sel->which) = sel->r; + ipu_isys_subdev_fmt_propagate(sd, cfg, NULL, &sel->r, tgt, + sel->pad, sel->which); + + return 0; +} + +int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_selection *sel) +{ + if (!target_valid(sd, sel->target, sel->pad)) + return -EINVAL; + + sel->r = *__ipu_isys_get_selection(sd, cfg, sel->target, + sel->pad, sel->which); + + return 0; +} + +int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + const u32 *supported_codes = asd->supported_codes[code->pad]; + u32 index; + bool next_stream = false; + + if (sd->entity.pads[code->pad].flags & MEDIA_PAD_FL_MULTIPLEX) { + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > asd->nstreams - 1) + return -EINVAL; + + if (next_stream && code->stream < asd->nstreams) { + code->stream++; + return 0; + } + + return -EINVAL; + } + + for (index = 0; supported_codes[index]; index++) { + if (index == code->index) { + code->code = supported_codes[index]; + return 0; + } + } + + return -EINVAL; +} + +/* + * Besides validating the link, figure out the external pad and the + * ISYS FW ABI source. + */ +int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt) +{ + struct v4l2_subdev *source_sd = + media_entity_to_v4l2_subdev(link->source->entity); + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + + if (!source_sd) + return -ENODEV; + if (strncmp(source_sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) { + /* + * source_sd isn't ours --- sd must be the external + * sub-device. + */ + ip->external = link->source; + ip->source = to_ipu_isys_subdev(sd)->source; + dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n", + sd->entity.name, ip->source); + } else if (source_sd->entity.num_pads == 1) { + /* All internal sources have a single pad. */ + ip->external = link->source; + ip->source = to_ipu_isys_subdev(source_sd)->source; + + dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n", + sd->entity.name, ip->source); + } + + if (asd->isl_mode != IPU_ISL_OFF) + ip->isl_mode = asd->isl_mode; + + return v4l2_subdev_link_validate_default(sd, link, source_fmt, + sink_fmt); +} + +int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd); + unsigned int i; + + mutex_lock(&asd->mutex); + + for (i = 0; i < asd->sd.entity.num_pads; i++) { + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, fh->pad, +#else + fh, +#endif + i); + struct v4l2_rect *try_crop = + v4l2_subdev_get_try_crop( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, + fh->pad, +#else + fh, +#endif + i); + struct v4l2_rect *try_compose = + v4l2_subdev_get_try_compose( +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + sd, + fh->pad, +#else + fh, +#endif + i); + + *try_fmt = asd->ffmt[i][0]; + *try_crop = asd->crop[i]; + *try_compose = asd->compose[i]; + } + + mutex_unlock(&asd->mutex); + + return 0; +} + +int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + return 0; +} + +int ipu_isys_subdev_init(struct ipu_isys_subdev *asd, + struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_pads, + unsigned int num_streams, + unsigned int num_source, + unsigned int num_sink, + unsigned int sd_flags) +{ + int i; + int rval = -EINVAL; + + mutex_init(&asd->mutex); + + v4l2_subdev_init(&asd->sd, ops); + + asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | sd_flags; + asd->sd.owner = THIS_MODULE; + + asd->nstreams = num_streams; + asd->nsources = num_source; + asd->nsinks = num_sink; + + asd->pad = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->pad), GFP_KERNEL); + + asd->ffmt = (struct v4l2_mbus_framefmt **) + devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(struct v4l2_mbus_framefmt *), + GFP_KERNEL); + + asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->crop), GFP_KERNEL); + + asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->compose), GFP_KERNEL); + + asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->valid_tgts), GFP_KERNEL); + asd->route = devm_kcalloc(&asd->isys->adev->dev, num_streams, + sizeof(*asd->route), GFP_KERNEL); + + asd->stream = devm_kcalloc(&asd->isys->adev->dev, num_pads, + sizeof(*asd->stream), GFP_KERNEL); + + if (!asd->pad || !asd->ffmt || !asd->crop || !asd->compose || + !asd->valid_tgts || !asd->route || !asd->stream) + return -ENOMEM; + + for (i = 0; i < num_pads; i++) { + asd->ffmt[i] = (struct v4l2_mbus_framefmt *) + devm_kcalloc(&asd->isys->adev->dev, num_streams, + sizeof(struct v4l2_mbus_framefmt), GFP_KERNEL); + if (!asd->ffmt[i]) + return -ENOMEM; + + asd->stream[i].stream_id = + devm_kcalloc(&asd->isys->adev->dev, num_source, + sizeof(*asd->stream[i].stream_id), GFP_KERNEL); + if (!asd->stream[i].stream_id) + return -ENOMEM; + } + + rval = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); + if (rval) + goto out_mutex_destroy; + + if (asd->ctrl_init) { + rval = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); + if (rval) + goto out_media_entity_cleanup; + + asd->ctrl_init(&asd->sd); + if (asd->ctrl_handler.error) { + rval = asd->ctrl_handler.error; + goto out_v4l2_ctrl_handler_free; + } + + asd->sd.ctrl_handler = &asd->ctrl_handler; + } + + asd->source = -1; + + return 0; + +out_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(&asd->ctrl_handler); + +out_media_entity_cleanup: + media_entity_cleanup(&asd->sd.entity); + +out_mutex_destroy: + mutex_destroy(&asd->mutex); + + return rval; +} + +void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd) +{ + media_entity_cleanup(&asd->sd.entity); + v4l2_ctrl_handler_free(&asd->ctrl_handler); + mutex_destroy(&asd->mutex); +} diff --git a/drivers/media/pci/intel/ipu-isys-subdev.h b/drivers/media/pci/intel/ipu-isys-subdev.h new file mode 100644 index 000000000000..6b08cf57e8ca --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-subdev.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_SUBDEV_H +#define IPU_ISYS_SUBDEV_H + +#include + +#include +#include +#include + +#include "ipu-isys-queue.h" + +#define IPU_ISYS_MIPI_CSI2_TYPE_NULL 0x10 +#define IPU_ISYS_MIPI_CSI2_TYPE_BLANKING 0x11 +#define IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8 0x12 +#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8 0x1e +#define IPU_ISYS_MIPI_CSI2_TYPE_RGB565 0x22 +#define IPU_ISYS_MIPI_CSI2_TYPE_RGB888 0x24 +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW6 0x28 +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW7 0x29 +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW8 0x2a +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW10 0x2b +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW12 0x2c +#define IPU_ISYS_MIPI_CSI2_TYPE_RAW14 0x2d +/* 1-8 */ +#define IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(i) (0x30 + (i) - 1) + +#define FMT_ENTRY (struct ipu_isys_fmt_entry []) + +enum isys_subdev_prop_tgt { + IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP, + IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE, + IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP, +}; + +#define IPU_ISYS_SUBDEV_PROP_TGT_NR_OF \ + (IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1) + +enum ipu_isl_mode { + IPU_ISL_OFF = 0, /* IPU_FW_ISYS_USE_NO_ISL_NO_ISA */ + IPU_ISL_CSI2_BE, /* IPU_FW_ISYS_USE_SINGLE_DUAL_ISL */ + IPU_ISL_ISA /* IPU_FW_ISYS_USE_SINGLE_ISA */ +}; + +enum ipu_be_mode { + IPU_BE_RAW = 0, + IPU_BE_SOC +}; + +enum ipu_isys_subdev_pixelorder { + IPU_ISYS_SUBDEV_PIXELORDER_BGGR = 0, + IPU_ISYS_SUBDEV_PIXELORDER_GBRG, + IPU_ISYS_SUBDEV_PIXELORDER_GRBG, + IPU_ISYS_SUBDEV_PIXELORDER_RGGB, +}; + +struct ipu_isys; + +struct ipu_isys_subdev { + /* Serialise access to any other field in the struct */ + struct mutex mutex; + struct v4l2_subdev sd; + struct ipu_isys *isys; + u32 const *const *supported_codes; + struct media_pad *pad; + struct v4l2_mbus_framefmt **ffmt; + struct v4l2_rect *crop; + struct v4l2_rect *compose; + struct { + unsigned int *stream_id; + DECLARE_BITMAP(streams_stat, 32); + } *stream; /* stream enable/disable status, indexed by pad */ + struct { + unsigned int sink; + unsigned int source; + int flags; + } *route; /* pad level info, indexed by stream */ + unsigned int nstreams; + unsigned int nsinks; + unsigned int nsources; + struct v4l2_ctrl_handler ctrl_handler; + void (*ctrl_init)(struct v4l2_subdev *sd); + void (*set_ffmt)(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); + struct { + bool crop; + bool compose; + } *valid_tgts; + enum ipu_isl_mode isl_mode; + enum ipu_be_mode be_mode; + int source; /* SSI stream source; -1 if unset */ +}; + +#define to_ipu_isys_subdev(__sd) \ + container_of(__sd, struct ipu_isys_subdev, sd) + +struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config + *cfg, +#endif + unsigned int pad, + unsigned int stream, + unsigned int which); + +unsigned int ipu_isys_mbus_code_to_bpp(u32 code); +unsigned int ipu_isys_mbus_code_to_mipi(u32 code); +u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code); + +enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code); + +void ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_mbus_framefmt *ffmt, + struct v4l2_rect *r, + enum isys_subdev_prop_tgt tgt, + unsigned int pad, unsigned int which); + +void ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + unsigned int target, + unsigned int pad, + unsigned int which); +int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt); +int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel); +int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel); +int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_mbus_code_enum + *code); +int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd, + struct media_link *link, + struct v4l2_subdev_format *source_fmt, + struct v4l2_subdev_format *sink_fmt); + +int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); +int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); +int ipu_isys_subdev_init(struct ipu_isys_subdev *asd, + struct v4l2_subdev_ops *ops, + unsigned int nr_ctrls, + unsigned int num_pads, + unsigned int num_streams, + unsigned int num_source, + unsigned int num_sink, + unsigned int sd_flags); +void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd); +int ipu_isys_subdev_get_frame_desc(struct v4l2_subdev *sd, + struct v4l2_mbus_frame_desc *desc); +int ipu_isys_subdev_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route); +int ipu_isys_subdev_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route); +bool ipu_isys_subdev_has_route(struct media_entity *entity, + unsigned int pad0, unsigned int pad1, int *stream); +#endif /* IPU_ISYS_SUBDEV_H */ diff --git a/drivers/media/pci/intel/ipu-isys-tpg.c b/drivers/media/pci/intel/ipu-isys-tpg.c new file mode 100644 index 000000000000..5fe004801751 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-tpg.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include + +#include +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-tpg.h" +#include "ipu-isys-video.h" +#include "ipu-platform-isys-csi2-reg.h" + +static const u32 tpg_supported_codes_pad[] = { + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + 0, +}; + +static const u32 *tpg_supported_codes[] = { + tpg_supported_codes_pad, +}; + +static struct v4l2_subdev_internal_ops tpg_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static const struct v4l2_subdev_video_ops tpg_sd_video_ops = { + .s_stream = tpg_set_stream, +}; + +static int ipu_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ipu_isys_tpg *tpg = container_of(container_of(ctrl->handler, + struct + ipu_isys_subdev, + ctrl_handler), + struct ipu_isys_tpg, asd); + + switch (ctrl->id) { + case V4L2_CID_HBLANK: + writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC); + break; + case V4L2_CID_VBLANK: + writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC); + break; + case V4L2_CID_LINE_LENGTH_PIXELS: + if (ctrl->val > tpg->asd.ffmt[TPG_PAD_SOURCE][0].width) + writel(ctrl->val - + tpg->asd.ffmt[TPG_PAD_SOURCE][0].width, + tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC); + break; + case V4L2_CID_FRAME_LENGTH_LINES: + if (ctrl->val > tpg->asd.ffmt[TPG_PAD_SOURCE][0].height) + writel(ctrl->val - + tpg->asd.ffmt[TPG_PAD_SOURCE][0].height, + tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC); + break; + case V4L2_CID_TEST_PATTERN: + writel(ctrl->val, tpg->base + MIPI_GEN_REG_TPG_MODE); + break; + } + + return 0; +} + +static const struct v4l2_ctrl_ops ipu_isys_tpg_ctrl_ops = { + .s_ctrl = ipu_isys_tpg_s_ctrl, +}; + +static s64 ipu_isys_tpg_rate(struct ipu_isys_tpg *tpg, unsigned int bpp) +{ + return MIPI_GEN_PPC * IPU_ISYS_FREQ; +} + +static const char *const tpg_mode_items[] = { + "Ramp", + "Checkerboard", /* Does not work, disabled. */ + "Frame Based Colour", +}; + +static struct v4l2_ctrl_config tpg_mode = { + .ops = &ipu_isys_tpg_ctrl_ops, + .id = V4L2_CID_TEST_PATTERN, + .name = "Test Pattern", + .type = V4L2_CTRL_TYPE_MENU, + .min = 0, + .max = ARRAY_SIZE(tpg_mode_items) - 1, + .def = 0, + .menu_skip_mask = 0x2, + .qmenu = tpg_mode_items, +}; + +static const struct v4l2_ctrl_config csi2_header_cfg = { + .id = V4L2_CID_IPU_STORE_CSI2_HEADER, + .name = "Store CSI-2 Headers", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .min = 0, + .max = 1, + .step = 1, + .def = 1, +}; + +static void ipu_isys_tpg_init_controls(struct v4l2_subdev *sd) +{ + struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); + int hblank; + struct v4l2_ctrl_config cfg = { + .ops = &ipu_isys_tpg_ctrl_ops, + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 65535, + .min = 8, + .step = 1, + .qmenu = NULL, + .elem_size = 0, + }; + + hblank = 1024; + + tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, + &ipu_isys_tpg_ctrl_ops, + V4L2_CID_HBLANK, 8, 65535, 1, hblank); + + tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, + &ipu_isys_tpg_ctrl_ops, + V4L2_CID_VBLANK, 8, 65535, 1, 1024); + + cfg.id = V4L2_CID_LINE_LENGTH_PIXELS; + cfg.name = "Line Length Pixels"; + cfg.def = 1024 + 4096; + + tpg->llp = v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &cfg, NULL); + + cfg.id = V4L2_CID_FRAME_LENGTH_LINES; + cfg.name = "Frame Length Lines"; + cfg.def = 1024 + 3072; + tpg->fll = v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &cfg, NULL); + + tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, + &ipu_isys_tpg_ctrl_ops, + V4L2_CID_PIXEL_RATE, 0, 0, 1, 0); + + if (tpg->pixel_rate) { + tpg->pixel_rate->cur.val = ipu_isys_tpg_rate(tpg, 8); + tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY; + } + + v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL); + tpg->store_csi2_header = + v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &csi2_header_cfg, NULL); +} + +static void tpg_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + fmt->format.field = V4L2_FIELD_NONE; + *__ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which) = fmt->format; +} + +static int ipu_isys_tpg_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); + __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE][0].code; + unsigned int bpp = ipu_isys_mbus_code_to_bpp(code); + s64 tpg_rate = ipu_isys_tpg_rate(tpg, bpp); + int rval; + + mutex_lock(&tpg->asd.mutex); + rval = __ipu_isys_subdev_set_ffmt(sd, cfg, fmt); + mutex_unlock(&tpg->asd.mutex); + + if (rval || fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE) + return rval; + + v4l2_ctrl_s_ctrl_int64(tpg->pixel_rate, tpg_rate); + + return 0; +} + +static const struct ipu_isys_pixelformat *ipu_isys_tpg_try_fmt( + struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_entity entity = av->vdev.entity; + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(entity.links[0].source->entity); +#else + struct media_link *link = list_first_entry(&av->vdev.entity.links, + struct media_link, list); + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(link->source->entity); +#endif + struct ipu_isys_tpg *tpg; + + if (!sd) + return NULL; + + tpg = to_ipu_isys_tpg(sd); + + return ipu_isys_video_try_fmt_vid_mplane(av, mpix, + v4l2_ctrl_g_ctrl(tpg->store_csi2_header)); +} + +static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = { + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_tpg_set_ffmt, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, +}; + +static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + switch (sub->type) { +#ifdef IPU_TPG_SOF + case V4L2_EVENT_FRAME_SYNC: + return v4l2_event_subscribe(fh, sub, 10, NULL); +#endif + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +}; + +/* V4L2 subdev core operations */ +static const struct v4l2_subdev_core_ops tpg_sd_core_ops = { + .subscribe_event = subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static struct v4l2_subdev_ops tpg_sd_ops = { + .core = &tpg_sd_core_ops, + .video = &tpg_sd_video_ops, + .pad = &tpg_sd_pad_ops, +}; + +static struct media_entity_operations tpg_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg) +{ + v4l2_device_unregister_subdev(&tpg->asd.sd); + ipu_isys_subdev_cleanup(&tpg->asd); + ipu_isys_video_cleanup(&tpg->av); +} + +int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg, + struct ipu_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = TPG_PAD_SOURCE, + .format = { + .width = 4096, + .height = 3072, + }, + }; + int rval; + + tpg->isys = isys; + tpg->base = base; + tpg->sel = sel; + tpg->index = index; + + tpg->asd.sd.entity.ops = &tpg_entity_ops; + tpg->asd.ctrl_init = ipu_isys_tpg_init_controls; + tpg->asd.isys = isys; + + rval = ipu_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5, + NR_OF_TPG_PADS, + NR_OF_TPG_STREAMS, + NR_OF_TPG_SOURCE_PADS, + NR_OF_TPG_SINK_PADS, + V4L2_SUBDEV_FL_HAS_EVENTS); + if (rval) + return rval; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + tpg->asd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; +#else + tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; +#endif + tpg->asd.pad[TPG_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + + tpg->asd.source = IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index; + tpg->asd.supported_codes = tpg_supported_codes; + tpg->asd.set_ffmt = tpg_set_ffmt; + ipu_isys_subdev_set_ffmt(&tpg->asd.sd, NULL, &fmt); + + tpg->asd.sd.internal_ops = &tpg_sd_internal_ops; + snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " TPG %u", index); + v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + snprintf(tpg->av.vdev.name, sizeof(tpg->av.vdev.name), + IPU_ISYS_ENTITY_PREFIX " TPG %u capture", index); + tpg->av.isys = isys; + tpg->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI; + tpg->av.pfmts = ipu_isys_pfmts_packed; + tpg->av.try_fmt_vid_mplane = ipu_isys_tpg_try_fmt; + tpg->av.prepare_firmware_stream_cfg = + ipu_isys_prepare_firmware_stream_cfg_default; + tpg->av.packed = true; + tpg->av.line_header_length = IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE; + tpg->av.line_footer_length = IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE; + tpg->av.aq.buf_prepare = ipu_isys_buf_prepare; + tpg->av.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + tpg->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + tpg->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&tpg->av, &tpg->asd.sd.entity, + TPG_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + return 0; + +fail: + ipu_isys_tpg_cleanup(tpg); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu-isys-tpg.h b/drivers/media/pci/intel/ipu-isys-tpg.h new file mode 100644 index 000000000000..4a2a23d3d75a --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-tpg.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_TPG_H +#define IPU_ISYS_TPG_H + +#include +#include +#include + +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" +#include "ipu-isys-queue.h" + +struct ipu_isys_tpg_pdata; +struct ipu_isys; + +#define TPG_PAD_SOURCE 0 +#define NR_OF_TPG_PADS 1 +#define NR_OF_TPG_SOURCE_PADS 1 +#define NR_OF_TPG_SINK_PADS 0 +#define NR_OF_TPG_STREAMS 1 + +/* + * PPC is 4 pixels for clock for RAW8, RAW10 and RAW12. + * Source: FW validation test code. + */ +#define MIPI_GEN_PPC 4 + +#define MIPI_GEN_REG_COM_ENABLE 0x0 +#define MIPI_GEN_REG_COM_DTYPE 0x4 +/* RAW8, RAW10 or RAW12 */ +#define MIPI_GEN_COM_DTYPE_RAW(n) (((n) - 8) / 2) +#define MIPI_GEN_REG_COM_VTYPE 0x8 +#define MIPI_GEN_REG_COM_VCHAN 0xc +#define MIPI_GEN_REG_COM_WCOUNT 0x10 +#define MIPI_GEN_REG_PRBS_RSTVAL0 0x14 +#define MIPI_GEN_REG_PRBS_RSTVAL1 0x18 +#define MIPI_GEN_REG_SYNG_FREE_RUN 0x1c +#define MIPI_GEN_REG_SYNG_PAUSE 0x20 +#define MIPI_GEN_REG_SYNG_NOF_FRAMES 0x24 +#define MIPI_GEN_REG_SYNG_NOF_PIXELS 0x28 +#define MIPI_GEN_REG_SYNG_NOF_LINES 0x2c +#define MIPI_GEN_REG_SYNG_HBLANK_CYC 0x30 +#define MIPI_GEN_REG_SYNG_VBLANK_CYC 0x34 +#define MIPI_GEN_REG_SYNG_STAT_HCNT 0x38 +#define MIPI_GEN_REG_SYNG_STAT_VCNT 0x3c +#define MIPI_GEN_REG_SYNG_STAT_FCNT 0x40 +#define MIPI_GEN_REG_SYNG_STAT_DONE 0x44 +#define MIPI_GEN_REG_TPG_MODE 0x48 +#define MIPI_GEN_REG_TPG_HCNT_MASK 0x4c +#define MIPI_GEN_REG_TPG_VCNT_MASK 0x50 +#define MIPI_GEN_REG_TPG_XYCNT_MASK 0x54 +#define MIPI_GEN_REG_TPG_HCNT_DELTA 0x58 +#define MIPI_GEN_REG_TPG_VCNT_DELTA 0x5c +#define MIPI_GEN_REG_TPG_R1 0x60 +#define MIPI_GEN_REG_TPG_G1 0x64 +#define MIPI_GEN_REG_TPG_B1 0x68 +#define MIPI_GEN_REG_TPG_R2 0x6c +#define MIPI_GEN_REG_TPG_G2 0x70 +#define MIPI_GEN_REG_TPG_B2 0x74 + +/* + * struct ipu_isys_tpg + * + * @nlanes: number of lanes in the receiver + */ +struct ipu_isys_tpg { + struct ipu_isys_tpg_pdata *pdata; + struct ipu_isys *isys; + struct ipu_isys_subdev asd; + struct ipu_isys_video av; + + void __iomem *base; + void __iomem *sel; + unsigned int index; + int streaming; + + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *llp; + struct v4l2_ctrl *fll; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *store_csi2_header; +}; + +#define to_ipu_isys_tpg(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_tpg, asd) +#ifdef IPU_TPG_SOF +void ipu_isys_tpg_sof_event(struct ipu_isys_tpg *tpg); +extern const struct v4l2_subdev_core_ops tpg_sd_core_ops; +#endif +int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg, + struct ipu_isys *isys, + void __iomem *base, void __iomem *sel, + unsigned int index); +void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg); +int tpg_set_stream(struct v4l2_subdev *sd, int enable); + +#endif /* IPU_ISYS_TPG_H */ diff --git a/drivers/media/pci/intel/ipu-isys-video.c b/drivers/media/pci/intel/ipu-isys-video.c new file mode 100644 index 000000000000..0c6e59bae841 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-video.c @@ -0,0 +1,1828 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif + +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) +#include +#endif + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-cpd.h" +#include "ipu-isys.h" +#include "ipu-isys-video.h" +#include "ipu-platform.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-trace.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" + +static unsigned int num_stream_support = IPU_ISYS_NUM_STREAMS; +module_param(num_stream_support, uint, 0660); +MODULE_PARM_DESC(num_stream_support, "IPU project support number of stream"); + +static bool use_stream_stop; +module_param(use_stream_stop, bool, 0660); +MODULE_PARM_DESC(use_stream_stop, "Use STOP command if running in CSI capture mode"); + +const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[] = { + {V4L2_PIX_FMT_Y10, 16, 10, 0, MEDIA_BUS_FMT_Y10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_UYVY}, + {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_YUYV}, + {V4L2_PIX_FMT_NV16, 16, 16, 8, MEDIA_BUS_FMT_YUYV8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_NV16}, + {V4L2_PIX_FMT_XRGB32, 32, 32, 0, MEDIA_BUS_FMT_RGB565_1X16, + IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, + {V4L2_PIX_FMT_XBGR32, 32, 32, 0, MEDIA_BUS_FMT_RGB888_1X24, + IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, + /* Raw bayer formats. */ + {V4L2_PIX_FMT_SBGGR14, 16, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG14, 16, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG14, 16, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB14, 16, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {} +}; + +const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[] = { + {V4L2_PIX_FMT_Y10, 10, 10, 0, MEDIA_BUS_FMT_Y10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_UYVY}, + {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16, + IPU_FW_ISYS_FRAME_FORMAT_YUYV}, + {V4L2_PIX_FMT_RGB565, 16, 16, 0, MEDIA_BUS_FMT_RGB565_1X16, + IPU_FW_ISYS_FRAME_FORMAT_RGB565}, + {V4L2_PIX_FMT_BGR24, 24, 24, 0, MEDIA_BUS_FMT_RGB888_1X24, + IPU_FW_ISYS_FRAME_FORMAT_RGBA888}, +#ifndef V4L2_PIX_FMT_SBGGR12P + {V4L2_PIX_FMT_SBGGR12, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGBRG12, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGRBG12, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SRGGB12, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SBGGR14, 14, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGBRG14, 14, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGRBG14, 14, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SRGGB14, 14, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, +#else /* V4L2_PIX_FMT_SBGGR12P */ + {V4L2_PIX_FMT_SBGGR12P, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGBRG12P, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SGRBG12P, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SRGGB12P, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW12}, + {V4L2_PIX_FMT_SBGGR14P, 14, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGBRG14P, 14, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SGRBG14P, 14, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, + {V4L2_PIX_FMT_SRGGB14P, 14, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW14}, +#endif /* V4L2_PIX_FMT_SBGGR12P */ + {V4L2_PIX_FMT_SBGGR10P, 10, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SGBRG10P, 10, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SGRBG10P, 10, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SRGGB10P, 10, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW10}, + {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW8}, + {} +}; + +static int video_open(struct file *file) +{ + struct ipu_isys_video *av = video_drvdata(file); + struct ipu_isys *isys = av->isys; + struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev); + struct ipu_device *isp = adev->isp; + int rval; + + mutex_lock(&isys->mutex); + + if (isys->reset_needed || isp->flr_done) { + mutex_unlock(&isys->mutex); + dev_warn(&isys->adev->dev, "isys power cycle required\n"); + return -EIO; + } + mutex_unlock(&isys->mutex); + + rval = ipu_buttress_authenticate(isp); + if (rval) { + dev_err(&isys->adev->dev, "FW authentication failed\n"); + return rval; + } + + rval = pm_runtime_get_sync(&isys->adev->dev); + if (rval < 0) { + pm_runtime_put_noidle(&isys->adev->dev); + return rval; + } + + rval = v4l2_fh_open(file); + if (rval) + goto out_power_down; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + rval = ipu_pipeline_pm_use(&av->vdev.entity, 1); +#else + rval = v4l2_pipeline_pm_use(&av->vdev.entity, 1); +#endif + if (rval) + goto out_v4l2_fh_release; + + mutex_lock(&isys->mutex); + + if (isys->video_opened++) { + /* Already open */ + mutex_unlock(&isys->mutex); + return 0; + } + + ipu_configure_spc(adev->isp, + &isys->pdata->ipdata->hw_variant, + IPU_CPD_PKG_DIR_ISYS_SERVER_IDX, + isys->pdata->base, isys->pkg_dir, + isys->pkg_dir_dma_addr); + + /* + * Buffers could have been left to wrong queue at last closure. + * Move them now back to empty buffer queue. + */ + ipu_cleanup_fw_msg_bufs(isys); + + if (isys->fwcom) { + /* + * Something went wrong in previous shutdown. As we are now + * restarting isys we can safely delete old context. + */ + dev_err(&isys->adev->dev, "Clearing old context\n"); + ipu_fw_isys_cleanup(isys); + } + + + rval = ipu_fw_isys_init(av->isys, num_stream_support); + if (rval < 0) + goto out_lib_init; + + mutex_unlock(&isys->mutex); + + return 0; + +out_lib_init: + isys->video_opened--; + mutex_unlock(&isys->mutex); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + ipu_pipeline_pm_use(&av->vdev.entity, 0); +#else + v4l2_pipeline_pm_use(&av->vdev.entity, 0); +#endif + +out_v4l2_fh_release: + v4l2_fh_release(file); +out_power_down: + pm_runtime_put(&isys->adev->dev); + + return rval; +} + +static int video_release(struct file *file) +{ + struct ipu_isys_video *av = video_drvdata(file); + int ret = 0; + + vb2_fop_release(file); + + mutex_lock(&av->isys->mutex); + + if (!--av->isys->video_opened) { + ipu_fw_isys_close(av->isys); + if (av->isys->fwcom) { + av->isys->reset_needed = true; + ret = -EIO; + } + } + + mutex_unlock(&av->isys->mutex); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + ipu_pipeline_pm_use(&av->vdev.entity, 0); +#else + v4l2_pipeline_pm_use(&av->vdev.entity, 0); +#endif + + if (av->isys->reset_needed) + pm_runtime_put_sync(&av->isys->adev->dev); + else + pm_runtime_put(&av->isys->adev->dev); + + return ret; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) +static struct media_pad *other_pad(struct media_pad *pad) +{ + struct media_link *link; + + list_for_each_entry(link, &pad->entity->links, list) { + if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) + != MEDIA_LNK_FL_DATA_LINK) + continue; + + return link->source == pad ? link->sink : link->source; + } + + WARN_ON(1); + return NULL; +} +#endif + +const struct ipu_isys_pixelformat *ipu_isys_get_pixelformat( + struct ipu_isys_video *av, + u32 pixelformat) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_pad *pad = + av->vdev.entity.pads[0].flags & MEDIA_PAD_FL_SOURCE ? + av->vdev.entity.links[0].sink : av->vdev.entity.links[0].source; +#else + struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]); +#endif + struct v4l2_subdev *sd; + const u32 *supported_codes; + const struct ipu_isys_pixelformat *pfmt; + + if (!pad || !pad->entity) { + WARN_ON(1); + return NULL; + } + + sd = media_entity_to_v4l2_subdev(pad->entity); + supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index]; + + for (pfmt = av->pfmts; pfmt->bpp; pfmt++) { + unsigned int i; + + if (pfmt->pixelformat != pixelformat) + continue; + + for (i = 0; supported_codes[i]; i++) { + if (pfmt->code == supported_codes[i]) + return pfmt; + } + } + + /* Not found. Get the default, i.e. the first defined one. */ + for (pfmt = av->pfmts; pfmt->bpp; pfmt++) { + if (pfmt->code == *supported_codes) + return pfmt; + } + + WARN_ON(1); + return NULL; +} + +int ipu_isys_vidioc_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct ipu_isys_video *av = video_drvdata(file); + + strlcpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver)); + strlcpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", + av->isys->media_dev.bus_info); + + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE + | V4L2_CAP_VIDEO_CAPTURE_MPLANE + | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING + | V4L2_CAP_DEVICE_CAPS; + + cap->device_caps = V4L2_CAP_STREAMING; + + switch (av->aq.vbq.type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE; + break; + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE; + break; + default: + WARN_ON(1); + } + + return 0; +} + +int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f) +{ + struct ipu_isys_video *av = video_drvdata(file); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_pad *pad = + av->vdev.entity.pads[0].flags & MEDIA_PAD_FL_SOURCE ? + av->vdev.entity.links[0].sink : av->vdev.entity.links[0].source; +#else + struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]); +#endif + struct v4l2_subdev *sd; + const u32 *supported_codes; + const struct ipu_isys_pixelformat *pfmt; + u32 index; + + if (!pad || !pad->entity) + return -EINVAL; + sd = media_entity_to_v4l2_subdev(pad->entity); + supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index]; + + /* Walk the 0-terminated array for the f->index-th code. */ + for (index = f->index; *supported_codes && index; + index--, supported_codes++) { + }; + + if (!*supported_codes) + return -EINVAL; + + f->flags = 0; + + /* Code found */ + for (pfmt = av->pfmts; pfmt->bpp; pfmt++) + if (pfmt->code == *supported_codes) + break; + + if (!pfmt->bpp) { + dev_warn(&av->isys->adev->dev, + "Format not found in mapping table."); + return -EINVAL; + } + + f->pixelformat = pfmt->pixelformat; + + return 0; +} + +static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *fmt) +{ + struct ipu_isys_video *av = video_drvdata(file); + + fmt->fmt.pix_mp = av->mpix; + + return 0; +} + +const struct ipu_isys_pixelformat * +ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ + return ipu_isys_video_try_fmt_vid_mplane(av, mpix, 0); +} + +const struct ipu_isys_pixelformat *ipu_isys_video_try_fmt_vid_mplane( + struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix, + int store_csi2_header) +{ + const struct ipu_isys_pixelformat *pfmt = + ipu_isys_get_pixelformat(av, mpix->pixelformat); + + if (!pfmt) + return NULL; + mpix->pixelformat = pfmt->pixelformat; + mpix->num_planes = 1; + + mpix->width = clamp(mpix->width, IPU_ISYS_MIN_WIDTH, + IPU_ISYS_MAX_WIDTH); + mpix->height = clamp(mpix->height, IPU_ISYS_MIN_HEIGHT, + IPU_ISYS_MAX_HEIGHT); + + if (!av->packed) + mpix->plane_fmt[0].bytesperline = + mpix->width * DIV_ROUND_UP(pfmt->bpp_planar ? + pfmt->bpp_planar : pfmt->bpp, + BITS_PER_BYTE); + else if (store_csi2_header) + mpix->plane_fmt[0].bytesperline = + DIV_ROUND_UP(av->line_header_length + + av->line_footer_length + + (unsigned int)mpix->width * pfmt->bpp, + BITS_PER_BYTE); + else + mpix->plane_fmt[0].bytesperline = + DIV_ROUND_UP((unsigned int)mpix->width * pfmt->bpp, + BITS_PER_BYTE); + + mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline, + av->isys->line_align); + if (pfmt->bpp_planar) + mpix->plane_fmt[0].bytesperline = + mpix->plane_fmt[0].bytesperline * + pfmt->bpp / pfmt->bpp_planar; + /* + * (height + 1) * bytesperline due to a hardware issue: the DMA unit + * is a power of two, and a line should be transferred as few units + * as possible. The result is that up to line length more data than + * the image size may be transferred to memory after the image. + * Another limition is the GDA allocation unit size. For low + * resolution it gives a bigger number. Use larger one to avoid + * memory corruption. + */ + mpix->plane_fmt[0].sizeimage = + max(max(mpix->plane_fmt[0].sizeimage, + mpix->plane_fmt[0].bytesperline * mpix->height + + max(mpix->plane_fmt[0].bytesperline, + av->isys->pdata->ipdata->isys_dma_overshoot)), 1U); + + memset(mpix->plane_fmt[0].reserved, 0, + sizeof(mpix->plane_fmt[0].reserved)); + + if (mpix->field == V4L2_FIELD_ANY) + mpix->field = V4L2_FIELD_NONE; + /* Use defaults */ + mpix->colorspace = V4L2_COLORSPACE_RAW; + mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mpix->quantization = V4L2_QUANTIZATION_DEFAULT; + mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT; + + return pfmt; +} + +static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + if (av->aq.vbq.streaming) + return -EBUSY; + + av->pfmt = av->try_fmt_vid_mplane(av, &f->fmt.pix_mp); + av->mpix = f->fmt.pix_mp; + + return 0; +} + +static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + av->try_fmt_vid_mplane(av, &f->fmt.pix_mp); + + return 0; +} + +static void fmt_sp_to_mp(struct v4l2_pix_format_mplane *mpix, + struct v4l2_pix_format *pix) +{ + mpix->width = pix->width; + mpix->height = pix->height; + mpix->pixelformat = pix->pixelformat; + mpix->field = pix->field; + mpix->num_planes = 1; + mpix->plane_fmt[0].bytesperline = pix->bytesperline; + mpix->plane_fmt[0].sizeimage = pix->sizeimage; + mpix->flags = pix->flags; +} + +static void fmt_mp_to_sp(struct v4l2_pix_format *pix, + struct v4l2_pix_format_mplane *mpix) +{ + pix->width = mpix->width; + pix->height = mpix->height; + pix->pixelformat = mpix->pixelformat; + pix->field = mpix->field; + WARN_ON(mpix->num_planes != 1); + pix->bytesperline = mpix->plane_fmt[0].bytesperline; + pix->sizeimage = mpix->plane_fmt[0].sizeimage; + pix->flags = mpix->flags; + pix->colorspace = mpix->colorspace; + pix->ycbcr_enc = mpix->ycbcr_enc; + pix->quantization = mpix->quantization; + pix->xfer_func = mpix->xfer_func; +} + +static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + fmt_mp_to_sp(&f->fmt.pix, &av->mpix); + + return 0; +} + +static int vidioc_s_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + struct v4l2_pix_format_mplane mpix = { 0 }; + + if (av->aq.vbq.streaming) + return -EBUSY; + + fmt_sp_to_mp(&mpix, &f->fmt.pix); + + av->pfmt = av->try_fmt_vid_mplane(av, &mpix); + av->mpix = mpix; + + fmt_mp_to_sp(&f->fmt.pix, &mpix); + + return 0; +} + +static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + struct v4l2_pix_format_mplane mpix = { 0 }; + + fmt_sp_to_mp(&mpix, &f->fmt.pix); + + av->try_fmt_vid_mplane(av, &mpix); + + fmt_mp_to_sp(&f->fmt.pix, &mpix); + + return 0; +} + +static int vidioc_enum_input(struct file *file, void *fh, + struct v4l2_input *input) +{ + if (input->index > 0) + return -EINVAL; + strlcpy(input->name, "camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + + return 0; +} + +static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) +{ + *input = 0; + + return 0; +} + +static int vidioc_s_input(struct file *file, void *fh, unsigned int input) +{ + return input == 0 ? 0 : -EINVAL; +} + +/* + * Return true if an entity directly connected to an Iunit entity is + * an image source for the ISP. This can be any external directly + * connected entity or any of the test pattern generators in the + * Iunit. + */ +static bool is_external(struct ipu_isys_video *av, struct media_entity *entity) +{ + struct v4l2_subdev *sd; + unsigned int i; + + /* All video nodes are ours. */ + if (!is_media_entity_v4l2_subdev(entity)) + return false; + + sd = media_entity_to_v4l2_subdev(entity); + if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) + return true; + + for (i = 0; i < av->isys->pdata->ipdata->tpg.ntpgs && + av->isys->tpg[i].isys; i++) + if (entity == &av->isys->tpg[i].asd.sd.entity) + return true; + + return false; +} + +static int link_validate(struct media_link *link) +{ + struct ipu_isys_video *av = + container_of(link->sink, struct ipu_isys_video, pad); + /* All sub-devices connected to a video node are ours. */ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct v4l2_subdev_route r[IPU_ISYS_MAX_STREAMS]; + struct v4l2_subdev_routing routing = { + .routes = r, + .num_routes = IPU_ISYS_MAX_STREAMS, + }; + int i, rval, active = 0; + struct v4l2_subdev *sd; + + if (!link->source->entity) + return -EINVAL; + sd = media_entity_to_v4l2_subdev(link->source->entity); + if (is_external(av, link->source->entity)) { + ip->external = media_entity_remote_pad(av->vdev.entity.pads); + ip->source = to_ipu_isys_subdev(sd)->source; + } + + rval = v4l2_subdev_call(sd, pad, get_routing, &routing); + if (rval) + goto err_subdev; + + for (i = 0; i < routing.num_routes; i++) { + if (!(routing.routes[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + continue; + + if (routing.routes[i].source_pad == link->source->index) + ip->stream_id = routing.routes[i].sink_stream; + + active++; + } + + if (ip->external) { + struct v4l2_mbus_frame_desc desc = { + .num_entries = IPU_ISYS_MAX_STREAMS, + }; + + sd = media_entity_to_v4l2_subdev(ip->external->entity); + rval = ipu_isys_subdev_get_frame_desc(sd, &desc); + if (!rval && ip->stream_id < desc.num_entries) + ip->vc = desc.entry[ip->stream_id].bus.csi2.channel; + } + +err_subdev: + ip->nr_queues++; + + return 0; +} + +static void get_stream_opened(struct ipu_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + av->isys->stream_opened++; + spin_unlock_irqrestore(&av->isys->lock, flags); +} + +static void put_stream_opened(struct ipu_isys_video *av) +{ + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + av->isys->stream_opened--; + spin_unlock_irqrestore(&av->isys->lock, flags); +} + +static int get_stream_handle(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + unsigned int stream_handle; + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + for (stream_handle = 0; + stream_handle < IPU_ISYS_MAX_STREAMS; stream_handle++) + if (!av->isys->pipes[stream_handle]) + break; + if (stream_handle == IPU_ISYS_MAX_STREAMS) { + spin_unlock_irqrestore(&av->isys->lock, flags); + return -EBUSY; + } + av->isys->pipes[stream_handle] = ip; + ip->stream_handle = stream_handle; + spin_unlock_irqrestore(&av->isys->lock, flags); + return 0; +} + +static void put_stream_handle(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + unsigned long flags; + + spin_lock_irqsave(&av->isys->lock, flags); + av->isys->pipes[ip->stream_handle] = NULL; + ip->stream_handle = -1; + spin_unlock_irqrestore(&av->isys->lock, flags); +} + +static int get_external_facing_format(struct ipu_isys_pipeline *ip, + struct v4l2_subdev_format *format) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct v4l2_subdev *sd; + struct media_pad *external_facing; + + if (!ip->external->entity) { + WARN_ON(1); + return -ENODEV; + } + sd = media_entity_to_v4l2_subdev(ip->external->entity); + external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ? + ip->external : media_entity_remote_pad(ip->external); + if (WARN_ON(!external_facing)) { + dev_warn(&av->isys->adev->dev, + "no external facing pad --- driver bug?\n"); + return -EINVAL; + } + + format->which = V4L2_SUBDEV_FORMAT_ACTIVE; + format->pad = 0; + format->stream = ip->stream_id; + sd = media_entity_to_v4l2_subdev(external_facing->entity); + + return v4l2_subdev_call(sd, pad, get_fmt, NULL, format); +} + +static void short_packet_queue_destroy(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + unsigned int i; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + if (!ip->short_packet_bufs) + return; + for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) { + if (ip->short_packet_bufs[i].buffer) + dma_free_attrs(&av->isys->adev->dev, + ip->short_packet_buffer_size, + ip->short_packet_bufs[i].buffer, + ip->short_packet_bufs[i].dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + } + kfree(ip->short_packet_bufs); + ip->short_packet_bufs = NULL; +} + +static int short_packet_queue_setup(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct v4l2_subdev_format source_fmt = { 0 }; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + unsigned int i; + int rval; + size_t buf_size; + + INIT_LIST_HEAD(&ip->pending_interlaced_bufs); + ip->cur_field = V4L2_FIELD_TOP; + + if (ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + ip->short_packet_trace_index = 0; + return 0; + } + + rval = get_external_facing_format(ip, &source_fmt); + if (rval) + return rval; + buf_size = IPU_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt.format.height); + ip->short_packet_buffer_size = buf_size; + ip->num_short_packet_lines = + IPU_ISYS_SHORT_PACKET_PKT_LINES(source_fmt.format.height); + + /* Initialize short packet queue. */ + INIT_LIST_HEAD(&ip->short_packet_incoming); + INIT_LIST_HEAD(&ip->short_packet_active); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + ip->short_packet_bufs = + kzalloc(sizeof(struct ipu_isys_private_buffer) * + IPU_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL); + if (!ip->short_packet_bufs) + return -ENOMEM; + + for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) { + struct ipu_isys_private_buffer *buf = &ip->short_packet_bufs[i]; + + buf->index = (unsigned int)i; + buf->ip = ip; + buf->ib.type = IPU_ISYS_SHORT_PACKET_BUFFER; + buf->bytesused = buf_size; + buf->buffer = dma_alloc_attrs(&av->isys->adev->dev, buf_size, + &buf->dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + if (!buf->buffer) { + short_packet_queue_destroy(ip); + return -ENOMEM; + } + list_add(&buf->ib.head, &ip->short_packet_incoming); + } + + return 0; +} + +static void csi_short_packet_prepare_firmware_stream_cfg( + struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + int input_pin = cfg->nof_input_pins++; + int output_pin = cfg->nof_output_pins++; + struct ipu_fw_isys_input_pin_info_abi *input_info = + &cfg->input_pins[input_pin]; + struct ipu_fw_isys_output_pin_info_abi *output_info = + &cfg->output_pins[output_pin]; + + /* + * Setting dt as IPU_ISYS_SHORT_PACKET_GENERAL_DT will cause + * MIPI receiver to receive all MIPI short packets. + */ + input_info->dt = IPU_ISYS_SHORT_PACKET_GENERAL_DT; + input_info->input_res.width = IPU_ISYS_SHORT_PACKET_WIDTH; + input_info->input_res.height = ip->num_short_packet_lines; + + ip->output_pins[output_pin].pin_ready = + ipu_isys_queue_short_packet_ready; + ip->output_pins[output_pin].aq = NULL; + ip->short_packet_output_pin = output_pin; + + output_info->input_pin_id = input_pin; + output_info->output_res.width = IPU_ISYS_SHORT_PACKET_WIDTH; + output_info->output_res.height = ip->num_short_packet_lines; + output_info->stride = IPU_ISYS_SHORT_PACKET_WIDTH * + IPU_ISYS_SHORT_PACKET_UNITSIZE; + output_info->pt = IPU_ISYS_SHORT_PACKET_PT; + output_info->ft = IPU_ISYS_SHORT_PACKET_FT; + output_info->send_irq = 1; +} + +void ipu_isys_prepare_firmware_stream_cfg_default( + struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + + struct ipu_isys_queue *aq = &av->aq; + struct ipu_fw_isys_output_pin_info_abi *pin_info; + int pin = cfg->nof_output_pins++; + + aq->fw_output = pin; + ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready; + ip->output_pins[pin].aq = aq; + + pin_info = &cfg->output_pins[pin]; + pin_info->input_pin_id = 0; + pin_info->output_res.width = av->mpix.width; + pin_info->output_res.height = av->mpix.height; + + if (!av->pfmt->bpp_planar) + pin_info->stride = av->mpix.plane_fmt[0].bytesperline; + else + pin_info->stride = ALIGN(DIV_ROUND_UP(av->mpix.width * + av->pfmt->bpp_planar, + BITS_PER_BYTE), + av->isys->line_align); + + pin_info->pt = aq->css_pin_type; + pin_info->ft = av->pfmt->css_pixelformat; + pin_info->send_irq = 1; + cfg->vc = ip->vc; +} + +static unsigned int ipu_isys_get_compression_scheme(u32 code) +{ + switch (code) { + case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: + case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: + return 3; + default: + return 0; + } +} + +static unsigned int get_comp_format(u32 code) +{ + unsigned int predictor = 0; /* currently hard coded */ + unsigned int udt = ipu_isys_mbus_code_to_mipi(code); + unsigned int scheme = ipu_isys_get_compression_scheme(code); + + /* if data type is not user defined return here */ + if (udt < IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1) || + udt > IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(8)) + return 0; + + /* + * For each user defined type (1..8) there is configuration bitfield for + * decompression. + * + * | bit 3 | bits 2:0 | + * | predictor | scheme | + * compression schemes: + * 000 = no compression + * 001 = 10 - 6 - 10 + * 010 = 10 - 7 - 10 + * 011 = 10 - 8 - 10 + * 100 = 12 - 6 - 12 + * 101 = 12 - 7 - 12 + * 110 = 12 - 8 - 12 + */ + + return ((predictor << 3) | scheme) << + ((udt - IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4); +} + +/* Create stream and start it using the CSS FW ABI. */ +static int start_stream_firmware(struct ipu_isys_video *av, + struct ipu_isys_buffer_list *bl) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct device *dev = &av->isys->adev->dev; + struct v4l2_subdev_selection sel_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .target = V4L2_SEL_TGT_CROP, + .pad = CSI2_BE_PAD_SOURCE, + }; + struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg; + struct isys_fw_msgs *msg = NULL; + struct ipu_fw_isys_frame_buff_set_abi *buf = NULL; + struct ipu_isys_queue *aq; + struct ipu_isys_video *isl_av = NULL; + struct ipu_isys_request *ireq = NULL; + struct v4l2_subdev_format source_fmt = { 0 }; + struct v4l2_subdev *be_sd = NULL; + struct media_pad *source_pad = media_entity_remote_pad(&av->pad); + int rval, rvalout, tout; + + rval = get_external_facing_format(ip, &source_fmt); + if (rval) + return rval; + + msg = ipu_get_fw_msg_buf(ip); + if (!msg) + return -ENOMEM; + + stream_cfg = to_stream_cfg_msg_buf(msg); + stream_cfg->compfmt = get_comp_format(source_fmt.format.code); + stream_cfg->input_pins[0].input_res.width = source_fmt.format.width; + stream_cfg->input_pins[0].input_res.height = source_fmt.format.height; + stream_cfg->input_pins[0].dt = + ipu_isys_mbus_code_to_mipi(source_fmt.format.code); + stream_cfg->input_pins[0].mapped_dt = N_IPU_FW_ISYS_MIPI_DATA_TYPE; + + if (ip->csi2 && !v4l2_ctrl_g_ctrl(ip->csi2->store_csi2_header)) + stream_cfg->input_pins[0].mipi_store_mode = + IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER; + else if (ip->tpg && !v4l2_ctrl_g_ctrl(ip->tpg->store_csi2_header)) + stream_cfg->input_pins[0].mipi_store_mode = + IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER; + + stream_cfg->src = ip->source; + stream_cfg->vc = 0; + stream_cfg->isl_use = ip->isl_mode; + stream_cfg->nof_input_pins = 1; + + /* + * Only CSI2-BE and SOC BE has the capability to do crop, + * so get the crop info from csi2-be or csi2-be-soc. + */ + if (ip->csi2_be) { + be_sd = &ip->csi2_be->asd.sd; + } else if (ip->csi2_be_soc) { + be_sd = &ip->csi2_be_soc->asd.sd; + if (source_pad) + sel_fmt.pad = source_pad->index; + } + if (be_sd && + !v4l2_subdev_call(be_sd, pad, get_selection, NULL, &sel_fmt)) { + stream_cfg->crop[0].left_offset = sel_fmt.r.left; + stream_cfg->crop[0].top_offset = sel_fmt.r.top; + stream_cfg->crop[0].right_offset = sel_fmt.r.left + + sel_fmt.r.width; + stream_cfg->crop[0].bottom_offset = sel_fmt.r.top + + sel_fmt.r.height; + + } else { + stream_cfg->crop[0].right_offset = source_fmt.format.width; + stream_cfg->crop[0].bottom_offset = source_fmt.format.height; + } + + /* + * If the CSI-2 backend's video node is part of the pipeline + * it must be arranged first in the output pin list. This is + * the most probably a firmware requirement. + */ + if (ip->isl_mode == IPU_ISL_CSI2_BE) + isl_av = &ip->csi2_be->av; + else if (ip->isl_mode == IPU_ISL_ISA) + isl_av = &av->isys->isa.av; + + if (isl_av) { + struct ipu_isys_queue *safe; + + list_for_each_entry_safe(aq, safe, &ip->queues, node) { + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + + if (av != isl_av) + continue; + + list_del(&aq->node); + list_add(&aq->node, &ip->queues); + break; + } + } + + list_for_each_entry(aq, &ip->queues, node) { + struct ipu_isys_video *__av = ipu_isys_queue_to_video(aq); + + __av->prepare_firmware_stream_cfg(__av, stream_cfg); + } + + if (ip->interlaced && ip->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + csi_short_packet_prepare_firmware_stream_cfg(ip, stream_cfg); + + ipu_fw_isys_dump_stream_cfg(dev, stream_cfg); + + ip->nr_output_pins = stream_cfg->nof_output_pins; + + rval = get_stream_handle(av); + if (rval) { + dev_dbg(dev, "Can't get stream_handle\n"); + return rval; + } + + reinit_completion(&ip->stream_open_completion); + + ipu_fw_isys_set_params(stream_cfg); + + rval = ipu_fw_isys_complex_cmd(av->isys, + ip->stream_handle, + stream_cfg, + to_dma_addr(msg), + sizeof(*stream_cfg), + IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN); + ipu_put_fw_mgs_buffer(av->isys, (uintptr_t) stream_cfg); + + if (rval < 0) { + dev_err(dev, "can't open stream (%d)\n", rval); + goto out_put_stream_handle; + } + + get_stream_opened(av); + + tout = wait_for_completion_timeout(&ip->stream_open_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream open time out\n"); + rval = -ETIMEDOUT; + goto out_put_stream_opened; + } + if (ip->error) { + dev_err(dev, "stream open error: %d\n", ip->error); + rval = -EIO; + goto out_put_stream_opened; + } + dev_dbg(dev, "start stream: open complete\n"); + + ireq = ipu_isys_next_queued_request(ip); + + if (bl || ireq) { + msg = ipu_get_fw_msg_buf(ip); + if (!msg) { + rval = -ENOMEM; + goto out_put_stream_opened; + } + buf = to_frame_msg_buf(msg); + } + + if (bl) { + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set(buf, ip, bl); + ipu_isys_buffer_list_queue(bl, + IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); + } else if (ireq) { + rval = ipu_isys_req_prepare(&av->isys->media_dev, + ireq, ip, buf); + if (rval) + goto out_put_stream_opened; + } + + reinit_completion(&ip->stream_start_completion); + + if (bl || ireq) { + ipu_fw_isys_dump_frame_buff_set(dev, buf, + stream_cfg->nof_output_pins); + rval = ipu_fw_isys_complex_cmd(av->isys, + ip->stream_handle, + buf, to_dma_addr(msg), + sizeof(*buf), + IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE); + ipu_put_fw_mgs_buffer(av->isys, (uintptr_t) buf); + } else { + rval = ipu_fw_isys_simple_cmd(av->isys, + ip->stream_handle, + IPU_FW_ISYS_SEND_TYPE_STREAM_START); + } + + if (rval < 0) { + dev_err(dev, "can't start streaming (%d)\n", rval); + goto out_stream_close; + } + + tout = wait_for_completion_timeout(&ip->stream_start_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) { + dev_err(dev, "stream start time out\n"); + rval = -ETIMEDOUT; + goto out_stream_close; + } + if (ip->error) { + dev_err(dev, "stream start error: %d\n", ip->error); + rval = -EIO; + goto out_stream_close; + } + dev_dbg(dev, "start stream: complete\n"); + + return 0; + +out_stream_close: + reinit_completion(&ip->stream_close_completion); + + rvalout = ipu_fw_isys_simple_cmd(av->isys, + ip->stream_handle, + IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (rvalout < 0) { + dev_dbg(dev, "can't close stream (%d)\n", rvalout); + goto out_put_stream_opened; + } + + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "stream close complete\n"); + +out_put_stream_opened: + put_stream_opened(av); + +out_put_stream_handle: + put_stream_handle(av); + return rval; +} + +static void stop_streaming_firmware(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct device *dev = &av->isys->adev->dev; + int rval, tout; + enum ipu_fw_isys_send_type send_type = + IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH; + + reinit_completion(&ip->stream_stop_completion); + + /* Use STOP command if running in CSI capture mode */ + if (use_stream_stop) + send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_STOP; + + rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, + send_type); + + if (rval < 0) { + dev_err(dev, "can't stop stream (%d)\n", rval); + return; + } + + tout = wait_for_completion_timeout(&ip->stream_stop_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream stop time out\n"); + else if (ip->error) + dev_err(dev, "stream stop error: %d\n", ip->error); + else + dev_dbg(dev, "stop stream: complete\n"); +} + +static void close_streaming_firmware(struct ipu_isys_video *av) +{ + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct device *dev = &av->isys->adev->dev; + int rval, tout; + + reinit_completion(&ip->stream_close_completion); + + rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle, + IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE); + if (rval < 0) { + dev_err(dev, "can't close stream (%d)\n", rval); + return; + } + + tout = wait_for_completion_timeout(&ip->stream_close_completion, + IPU_LIB_CALL_TIMEOUT_JIFFIES); + if (!tout) + dev_err(dev, "stream close time out\n"); + else if (ip->error) + dev_err(dev, "stream close error: %d\n", ip->error); + else + dev_dbg(dev, "close stream: complete\n"); + + put_stream_opened(av); + put_stream_handle(av); +} + +void +ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip, + void (*capture_done) + (struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *resp)) +{ + unsigned int i; + + /* Different instances may register same function. Add only once */ + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) + if (ip->capture_done[i] == capture_done) + return; + + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) { + if (!ip->capture_done[i]) { + ip->capture_done[i] = capture_done; + return; + } + } + /* + * Too many call backs registered. Change to IPU_NUM_CAPTURE_DONE + * constant probably required. + */ + WARN_ON(1); +} + +int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av, + unsigned int state) +{ + struct ipu_isys *isys = av->isys; + struct device *dev = &isys->adev->dev; + struct ipu_isys_pipeline *ip; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + struct media_graph graph; +#else + struct media_entity_graph graph; +#endif + struct media_entity *entity; + struct media_device *mdev = &av->isys->media_dev; + int rval; + unsigned int i; + + dev_dbg(dev, "prepare stream: %d\n", state); + + if (!state) { + ip = to_ipu_isys_pipeline(av->vdev.entity.pipe); + + if (ip->interlaced && isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) + short_packet_queue_destroy(ip); + media_pipeline_stop(&av->vdev.entity); + media_entity_enum_cleanup(&ip->entity_enum); + return 0; + } + + ip = &av->ip; + + WARN_ON(ip->nr_streaming); + ip->has_sof = false; + ip->nr_queues = 0; + ip->external = NULL; + atomic_set(&ip->sequence, 0); + ip->isl_mode = IPU_ISL_OFF; + + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) + ip->capture_done[i] = NULL; + ip->csi2_be = NULL; + ip->csi2_be_soc = NULL; + ip->csi2 = NULL; + ip->tpg = NULL; + ip->seq_index = 0; + memset(ip->seq, 0, sizeof(ip->seq)); + + WARN_ON(!list_empty(&ip->queues)); + ip->interlaced = false; + + rval = media_entity_enum_init(&ip->entity_enum, mdev); + if (rval) + return rval; + + rval = media_pipeline_start(&av->vdev.entity, &ip->pipe); + if (rval < 0) { + dev_dbg(dev, "pipeline start failed\n"); + goto out_enum_cleanup; + } + + if (!ip->external) { + dev_err(dev, "no external entity set! Driver bug?\n"); + rval = -EINVAL; + goto out_pipeline_stop; + } + + rval = media_graph_walk_init(&graph, mdev); + if (rval) + goto out_pipeline_stop; + + /* Gather all entities in the graph. */ + mutex_lock(&mdev->graph_mutex); + media_graph_walk_start(&graph, &av->vdev.entity.pads[0]); + while ((entity = media_graph_walk_next(&graph))) + media_entity_enum_set(&ip->entity_enum, entity); + + mutex_unlock(&mdev->graph_mutex); + + media_graph_walk_cleanup(&graph); + + if (ip->interlaced) { + rval = short_packet_queue_setup(ip); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to setup short packet buffer.\n"); + goto out_pipeline_stop; + } + } + + dev_dbg(dev, "prepare stream: external entity %s\n", + ip->external->entity->name); + + return 0; + +out_pipeline_stop: + media_pipeline_stop(&av->vdev.entity); + +out_enum_cleanup: + media_entity_enum_cleanup(&ip->entity_enum); + + return rval; +} + +static int perform_skew_cal(struct ipu_isys_pipeline *ip) +{ + struct v4l2_subdev *ext_sd = + media_entity_to_v4l2_subdev(ip->external->entity); + int rval; + + if (!ext_sd) { + WARN_ON(1); + return -ENODEV; + } + ipu_isys_csi2_set_skew_cal(ip->csi2, true); + + rval = v4l2_subdev_call(ext_sd, video, s_stream, true); + if (rval) + goto turn_off_skew_cal; + + /* TODO: do we have a better way available than waiting for a while ? */ + msleep(50); + + rval = v4l2_subdev_call(ext_sd, video, s_stream, false); + +turn_off_skew_cal: + ipu_isys_csi2_set_skew_cal(ip->csi2, false); + + /* TODO: do we have a better way available than waiting for a while ? */ + msleep(50); + + return rval; +} + +int ipu_isys_video_set_streaming(struct ipu_isys_video *av, + unsigned int state, + struct ipu_isys_buffer_list *bl) +{ + struct device *dev = &av->isys->adev->dev; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + struct media_device *mdev = av->vdev.entity.parent; + struct media_entity_graph graph; +#else + struct media_device *mdev = av->vdev.entity.graph_obj.mdev; +#endif + struct media_entity_enum entities; + + struct media_entity *entity, *entity2; + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + struct v4l2_subdev *sd, *esd; + int rval = 0; + + dev_dbg(dev, "set stream: %d\n", state); + + if (!ip->external->entity) { + WARN_ON(1); + return -ENODEV; + } + esd = media_entity_to_v4l2_subdev(ip->external->entity); + + if (state) { + rval = media_graph_walk_init(&ip->graph, mdev); + if (rval) + return rval; + rval = media_entity_enum_init(&entities, mdev); + if (rval) + goto out_media_entity_graph_init; + } + + if (!state) { + stop_streaming_firmware(av); + + /* stop external sub-device now. */ + dev_err(dev, "s_stream %s (ext)\n", ip->external->entity->name); + + if (ip->csi2) { + if (ip->csi2->stream_count == 1) { + v4l2_subdev_call(esd, video, s_stream, state); + ipu_isys_csi2_wait_last_eof(ip->csi2); + } + } else { + v4l2_subdev_call(esd, video, s_stream, state); + } + } + + mutex_lock(&mdev->graph_mutex); + + media_graph_walk_start(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph, + &av->vdev.entity.pads[0]); + + while ((entity = media_graph_walk_next(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph))) { + sd = media_entity_to_v4l2_subdev(entity); + + dev_dbg(dev, "set stream: entity %s\n", entity->name); + + /* Non-subdev nodes can be safely ignored here. */ + if (!is_media_entity_v4l2_subdev(entity)) + continue; + + /* Don't start truly external devices quite yet. */ + if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX, + strlen(IPU_ISYS_ENTITY_PREFIX)) != 0 || + ip->external->entity == entity) + continue; + + dev_dbg(dev, "s_stream %s\n", entity->name); + rval = v4l2_subdev_call(sd, video, s_stream, state); + if (!state) + continue; + if (rval && rval != -ENOIOCTLCMD) { + mutex_unlock(&mdev->graph_mutex); + goto out_media_entity_stop_streaming; + } + + media_entity_enum_set(&entities, entity); + } + + mutex_unlock(&mdev->graph_mutex); + + /* Oh crap */ + if (state) { + if (ipu_isys_csi2_skew_cal_required(ip->csi2) && + ip->csi2->remote_streams == ip->csi2->stream_count) + perform_skew_cal(ip); + + rval = start_stream_firmware(av, bl); + if (rval) + goto out_media_entity_stop_streaming; + + dev_dbg(dev, "set stream: source %d, stream_handle %d\n", + ip->source, ip->stream_handle); + + /* Start external sub-device now. */ + dev_dbg(dev, "set stream: s_stream %s (ext)\n", + ip->external->entity->name); + + if (ip->csi2 && + ip->csi2->remote_streams == ip->csi2->stream_count) + rval = v4l2_subdev_call(esd, video, s_stream, state); + else if (!ip->csi2) + rval = v4l2_subdev_call(esd, video, s_stream, state); + if (rval) + goto out_media_entity_stop_streaming_firmware; + } else { + close_streaming_firmware(av); + av->ip.stream_id = 0; + av->ip.vc = 0; + } + + if (state) + media_entity_enum_cleanup(&entities); + else + media_graph_walk_cleanup(&ip->graph); + av->streaming = state; + + return 0; + +out_media_entity_stop_streaming_firmware: + stop_streaming_firmware(av); + +out_media_entity_stop_streaming: + mutex_lock(&mdev->graph_mutex); + + media_graph_walk_start(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph, + &av->vdev.entity.pads[0]); + + while (state && (entity2 = media_graph_walk_next(& +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + ip-> +#endif + graph)) && + entity2 != entity) { + sd = media_entity_to_v4l2_subdev(entity2); + + if (!media_entity_enum_test(&entities, entity2)) + continue; + + v4l2_subdev_call(sd, video, s_stream, 0); + } + + mutex_unlock(&mdev->graph_mutex); + + media_entity_enum_cleanup(&entities); + +out_media_entity_graph_init: + media_graph_walk_cleanup(&ip->graph); + + return rval; +} + +#ifdef CONFIG_COMPAT +static long ipu_isys_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret = -ENOIOCTLCMD; + void __user *up = compat_ptr(arg); + + /* + * at present, there is not any private IOCTL need to compat handle + */ + if (file->f_op->unlocked_ioctl) + ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)up); + + return ret; +} +#endif + +static const struct v4l2_ioctl_ops ioctl_ops_splane = { + .vidioc_querycap = ipu_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt, + .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_enum_input = vidioc_enum_input, + .vidioc_g_input = vidioc_g_input, + .vidioc_s_input = vidioc_s_input, +}; + +static const struct v4l2_ioctl_ops ioctl_ops_mplane = { + .vidioc_querycap = ipu_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap_mplane = ipu_isys_vidioc_enum_fmt, + .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane, + .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane, + .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_enum_input = vidioc_enum_input, + .vidioc_g_input = vidioc_g_input, + .vidioc_s_input = vidioc_s_input, +}; + +static const struct media_entity_operations entity_ops = { + .link_validate = link_validate, +}; + +static const struct v4l2_file_operations isys_fops = { + .owner = THIS_MODULE, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = ipu_isys_compat_ioctl, +#endif + .mmap = vb2_fop_mmap, + .open = video_open, + .release = video_release, +}; + +/* + * Do everything that's needed to initialise things related to video + * buffer queue, video node, and the related media entity. The caller + * is expected to assign isys field and set the name of the video + * device. + */ +int ipu_isys_video_init(struct ipu_isys_video *av, + struct media_entity *entity, + unsigned int pad, unsigned long pad_flags, + unsigned int flags) +{ + const struct v4l2_ioctl_ops *ioctl_ops = NULL; + int rval; + + mutex_init(&av->mutex); + init_completion(&av->ip.stream_open_completion); + init_completion(&av->ip.stream_close_completion); + init_completion(&av->ip.stream_start_completion); + init_completion(&av->ip.stream_stop_completion); + init_completion(&av->ip.capture_ack_completion); + INIT_LIST_HEAD(&av->ip.queues); + spin_lock_init(&av->ip.short_packet_queue_lock); + av->ip.isys = av->isys; + av->ip.stream_id = 0; + av->ip.vc = 0; + + if (pad_flags & MEDIA_PAD_FL_SINK) { + /* data_offset is available only for multi-plane buffers */ + if (av->line_header_length) { + av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + ioctl_ops = &ioctl_ops_mplane; + } else { + av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ioctl_ops = &ioctl_ops_splane; + } + av->vdev.vfl_dir = VFL_DIR_RX; + } else { + av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + av->vdev.vfl_dir = VFL_DIR_TX; + } + rval = ipu_isys_queue_init(&av->aq); + if (rval) + goto out_mutex_destroy; + + av->pad.flags = pad_flags | MEDIA_PAD_FL_MUST_CONNECT; + rval = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); + if (rval) + goto out_ipu_isys_queue_cleanup; + + av->vdev.entity.ops = &entity_ops; + av->vdev.release = video_device_release_empty; + av->vdev.fops = &isys_fops; + av->vdev.v4l2_dev = &av->isys->v4l2_dev; + if (!av->vdev.ioctl_ops) + av->vdev.ioctl_ops = ioctl_ops; + av->vdev.queue = &av->aq.vbq; + av->vdev.lock = &av->mutex; + set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); + video_set_drvdata(&av->vdev, av); + + mutex_lock(&av->mutex); + + rval = video_register_device(&av->vdev, VFL_TYPE_GRABBER, -1); + if (rval) + goto out_media_entity_cleanup; + + if (pad_flags & MEDIA_PAD_FL_SINK) + rval = media_create_pad_link(entity, pad, + &av->vdev.entity, 0, flags); + else + rval = media_create_pad_link(&av->vdev.entity, 0, entity, + pad, flags); + if (rval) { + dev_info(&av->isys->adev->dev, "can't create link\n"); + goto out_media_entity_cleanup; + } + + av->pfmt = av->try_fmt_vid_mplane(av, &av->mpix); + + mutex_unlock(&av->mutex); + + return rval; + +out_media_entity_cleanup: + video_unregister_device(&av->vdev); + mutex_unlock(&av->mutex); + media_entity_cleanup(&av->vdev.entity); + +out_ipu_isys_queue_cleanup: + ipu_isys_queue_cleanup(&av->aq); + +out_mutex_destroy: + mutex_destroy(&av->mutex); + + return rval; +} + +void ipu_isys_video_cleanup(struct ipu_isys_video *av) +{ + video_unregister_device(&av->vdev); + media_entity_cleanup(&av->vdev.entity); + mutex_destroy(&av->mutex); + ipu_isys_queue_cleanup(&av->aq); +} diff --git a/drivers/media/pci/intel/ipu-isys-video.h b/drivers/media/pci/intel/ipu-isys-video.h new file mode 100644 index 000000000000..c1375f70a897 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys-video.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_VIDEO_H +#define IPU_ISYS_VIDEO_H + +#include +#include +#include +#include +#include +#include + +#include "ipu-isys-queue.h" + +#define IPU_ISYS_OUTPUT_PINS 11 +#define IPU_NUM_CAPTURE_DONE 2 +#define IPU_ISYS_MAX_PARALLEL_SOF 2 + +struct ipu_isys; +struct ipu_isys_csi2_be_soc; +struct ipu_fw_isys_stream_cfg_data_abi; + +struct ipu_isys_pixelformat { + u32 pixelformat; + u32 bpp; + u32 bpp_packed; + u32 bpp_planar; + u32 code; + u32 css_pixelformat; +}; + +struct sequence_info { + unsigned int sequence; + u64 timestamp; +}; + +struct output_pin_data { + void (*pin_ready)(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info); + struct ipu_isys_queue *aq; +}; + +struct ipu_isys_pipeline { + struct media_pipeline pipe; + struct media_pad *external; + atomic_t sequence; + unsigned int seq_index; + struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF]; + int source; /* SSI stream source */ + int stream_handle; /* stream handle for CSS API */ + unsigned int nr_output_pins; /* How many firmware pins? */ + enum ipu_isl_mode isl_mode; + struct ipu_isys_csi2_be *csi2_be; + struct ipu_isys_csi2_be_soc *csi2_be_soc; + struct ipu_isys_csi2 *csi2; + struct ipu_isys_tpg *tpg; + /* + * Number of capture queues, write access serialised using struct + * ipu_isys.stream_mutex + */ + int nr_queues; + int nr_streaming; /* Number of capture queues streaming */ + int streaming; /* Has streaming been really started? */ + struct list_head queues; + struct completion stream_open_completion; + struct completion stream_close_completion; + struct completion stream_start_completion; + struct completion stream_stop_completion; + struct completion capture_ack_completion; + struct ipu_isys *isys; + + void (*capture_done[IPU_NUM_CAPTURE_DONE]) + (struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *resp); + struct output_pin_data output_pins[IPU_ISYS_OUTPUT_PINS]; + bool has_sof; + bool interlaced; + int error; + struct ipu_isys_private_buffer *short_packet_bufs; + size_t short_packet_buffer_size; + unsigned int num_short_packet_lines; + unsigned int short_packet_output_pin; + unsigned int cur_field; + struct list_head short_packet_incoming; + struct list_head short_packet_active; + /* Serialize access to short packet active and incoming lists */ + spinlock_t short_packet_queue_lock; + struct list_head pending_interlaced_bufs; + unsigned int short_packet_trace_index; + unsigned int vc; + unsigned int stream_id; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + struct media_graph graph; +#else + struct media_entity_graph graph; +#endif +#endif + struct media_entity_enum entity_enum; +}; + +#define to_ipu_isys_pipeline(__pipe) \ + container_of((__pipe), struct ipu_isys_pipeline, pipe) + +struct ipu_isys_video { + /* Serialise access to other fields in the struct. */ + struct mutex mutex; + struct media_pad pad; + struct video_device vdev; + struct v4l2_pix_format_mplane mpix; + const struct ipu_isys_pixelformat *pfmts; + const struct ipu_isys_pixelformat *pfmt; + struct ipu_isys_queue aq; + struct ipu_isys *isys; + struct ipu_isys_pipeline ip; + unsigned int streaming; + bool packed; + unsigned int line_header_length; /* bits */ + unsigned int line_footer_length; /* bits */ + const struct ipu_isys_pixelformat *(*try_fmt_vid_mplane)( + struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix); + void (*prepare_firmware_stream_cfg)(struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg); +}; + +#define ipu_isys_queue_to_video(__aq) \ + container_of(__aq, struct ipu_isys_video, aq) + +extern const struct ipu_isys_pixelformat ipu_isys_pfmts[]; +extern const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[]; +extern const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[]; + +const struct ipu_isys_pixelformat * +ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat); + +int ipu_isys_vidioc_querycap(struct file *file, void *fh, + struct v4l2_capability *cap); + +int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh, + struct v4l2_fmtdesc *f); + +const struct ipu_isys_pixelformat * +ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix); + +const struct ipu_isys_pixelformat * +ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix, + int store_csi2_header); + +void ipu_isys_prepare_firmware_stream_cfg_default( + struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg); +int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av, + unsigned int state); +int ipu_isys_video_set_streaming(struct ipu_isys_video *av, unsigned int state, + struct ipu_isys_buffer_list *bl); +int ipu_isys_video_init(struct ipu_isys_video *av, struct media_entity *source, + unsigned int source_pad, unsigned long pad_flags, + unsigned int flags); +void ipu_isys_video_cleanup(struct ipu_isys_video *av); +void ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip, + void (*capture_done) + (struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *resp)); + +#endif /* IPU_ISYS_VIDEO_H */ diff --git a/drivers/media/pci/intel/ipu-isys.c b/drivers/media/pci/intel/ipu-isys.c new file mode 100644 index 000000000000..6ce6bab5c771 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys.c @@ -0,0 +1,1566 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) +#include +#endif +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-cpd.h" +#include "ipu-mmu.h" +#include "ipu-dma.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-isys-tpg.h" +#include "ipu-isys-video.h" +#include "ipu-platform-regs.h" +#include "ipu-buttress.h" +#include "ipu-platform.h" +#include "ipu-platform-buttress-regs.h" + +#define ISYS_PM_QOS_VALUE 300 + +/* + * The param was passed from module to indicate if port + * could be optimized. + */ +static bool csi2_port_optimized; +module_param(csi2_port_optimized, bool, 0660); +MODULE_PARM_DESC(csi2_port_optimized, "IPU CSI2 port optimization"); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +/* + * BEGIN adapted code from drivers/media/platform/omap3isp/isp.c. + * FIXME: This (in terms of functionality if not code) should be most + * likely generalised in the framework, and use made optional for + * drivers. + */ +/* + * ipu_pipeline_pm_use_count - Count the number of users of a pipeline + * @entity: The entity + * + * Return the total number of users of all video device nodes in the pipeline. + */ +static int ipu_pipeline_pm_use_count(struct media_pad *pad) +{ + struct media_entity_graph graph; + struct media_entity *entity = pad->entity; + int use = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_init(&graph, entity->graph_obj.mdev); +#endif + media_graph_walk_start(&graph, pad); + + while ((entity = media_graph_walk_next(&graph))) { + if (is_media_entity_v4l2_io(entity)) + use += entity->use_count; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_cleanup(&graph); +#endif + return use; +} + +/* + * ipu_pipeline_pm_power_one - Apply power change to an entity + * @entity: The entity + * @change: Use count change + * + * Change the entity use count by @change. If the entity is a subdev update its + * power state by calling the core::s_power operation when the use count goes + * from 0 to != 0 or from != 0 to 0. + * + * Return 0 on success or a negative error code on failure. + */ +static int ipu_pipeline_pm_power_one(struct media_entity *entity, int change) +{ + struct v4l2_subdev *subdev; + int ret; + + subdev = is_media_entity_v4l2_subdev(entity) + ? media_entity_to_v4l2_subdev(entity) : NULL; + + if (entity->use_count == 0 && change > 0 && subdev) { + ret = v4l2_subdev_call(subdev, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + return ret; + } + + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + if (entity->use_count == 0 && change < 0 && subdev) + v4l2_subdev_call(subdev, core, s_power, 0); + + return 0; +} + +/* + * ipu_get_linked_pad - Find internally connected pad for a given pad + * @entity: The entity + * @pad: Initial pad + * + * Return index of the linked pad. + */ +static int ipu_get_linked_pad(struct media_entity *entity, + struct media_pad *pad) +{ + int i; + + for (i = 0; i < entity->num_pads; i++) { + struct media_pad *opposite_pad = &entity->pads[i]; + + if (opposite_pad == pad) + continue; + + if (media_entity_has_route(entity, pad->index, + opposite_pad->index)) + return opposite_pad->index; + } + + return 0; +} + +/* + * ipu_pipeline_pm_power - Apply power change to all entities + * in a pipeline + * @entity: The entity + * @change: Use count change + * @from_pad: Starting pad + * + * Walk the pipeline to update the use count and the power state of + * all non-node + * entities. + * + * Return 0 on success or a negative error code on failure. + */ +static int ipu_pipeline_pm_power(struct media_entity *entity, + int change, int from_pad) +{ + struct media_entity_graph graph; + struct media_entity *first = entity; + int ret = 0; + + if (!change) + return 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_init(&graph, entity->graph_obj.mdev); +#endif + media_graph_walk_start(&graph, &entity->pads[from_pad]); + + while (!ret && (entity = media_graph_walk_next(&graph))) + if (!is_media_entity_v4l2_io(entity)) + ret = ipu_pipeline_pm_power_one(entity, change); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_cleanup(&graph); +#endif + if (!ret) + return 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_init(&graph, entity->graph_obj.mdev); +#endif + media_graph_walk_start(&graph, &first->pads[from_pad]); + + while ((first = media_graph_walk_next(&graph)) && + first != entity) + if (!is_media_entity_v4l2_io(first)) + ipu_pipeline_pm_power_one(first, -change); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_graph_walk_cleanup(&graph); +#endif + return ret; +} + +/* + * ipu_pipeline_pm_use - Update the use count of an entity + * @entity: The entity + * @use: Use (1) or stop using (0) the entity + * + * Update the use count of all entities in the pipeline and power entities + * on or off accordingly. + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. No failure can occur when the use parameter is + * set to 0. + */ +int ipu_pipeline_pm_use(struct media_entity *entity, int use) +{ + int change = use ? 1 : -1; + int ret; + + mutex_lock(&entity-> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + parent +#else + graph_obj.mdev +#endif + ->graph_mutex); + + /* Apply use count to node. */ + entity->use_count += change; + WARN_ON(entity->use_count < 0); + + /* Apply power change to connected non-nodes. */ + ret = ipu_pipeline_pm_power(entity, change, 0); + if (ret < 0) + entity->use_count -= change; + + mutex_unlock(&entity-> +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + parent +#else + graph_obj.mdev +#endif + ->graph_mutex); + + return ret; +} + +/* + * ipu_pipeline_link_notify - Link management notification callback + * @link: The link + * @flags: New link flags that will be applied + * @notification: The link's state change notification type + * (MEDIA_DEV_NOTIFY_*) + * + * React to link management on powered pipelines by updating the use count of + * all entities in the source and sink sides of the link. Entities are powered + * on or off accordingly. + * + * Return 0 on success or a negative error code on failure. Powering entities + * off is assumed to never fail. This function will not fail for disconnection + * events. + */ +static int ipu_pipeline_link_notify(struct media_link *link, u32 flags, + unsigned int notification) +{ + struct media_entity *source = link->source->entity; + struct media_entity *sink = link->sink->entity; + int source_use = ipu_pipeline_pm_use_count(link->source); + int sink_use = ipu_pipeline_pm_use_count(link->sink); + int ret; + + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && + !(flags & MEDIA_LNK_FL_ENABLED)) { + /* Powering off entities is assumed to never fail. */ + ipu_pipeline_pm_power(source, -sink_use, 0); + ipu_pipeline_pm_power(sink, -source_use, 0); + return 0; + } + + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && + (flags & MEDIA_LNK_FL_ENABLED)) { + int from_pad = ipu_get_linked_pad(source, link->source); + + ret = ipu_pipeline_pm_power(source, sink_use, from_pad); + if (ret < 0) + return ret; + + ret = ipu_pipeline_pm_power(sink, source_use, 0); + if (ret < 0) + ipu_pipeline_pm_power(source, -sink_use, 0); + + return ret; + } + + return 0; +} + +/* END adapted code from drivers/media/platform/omap3isp/isp.c */ +#endif /* < v4.6 */ + +struct isys_i2c_test { + u8 bus_nr; + u16 addr; + struct i2c_client *client; +}; + +static int isys_i2c_test(struct device *dev, void *priv) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct isys_i2c_test *test = priv; + + if (!client) + return 0; + + if (i2c_adapter_id(client->adapter) != test->bus_nr || + client->addr != test->addr) + return 0; + + test->client = client; + + return 0; +} + +static struct +i2c_client *isys_find_i2c_subdev(struct i2c_adapter *adapter, + struct ipu_isys_subdev_info *sd_info) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct isys_i2c_test test = { + .bus_nr = i2c_adapter_id(adapter), + .addr = info->addr, + }; + int rval; + + rval = i2c_for_each_dev(&test, isys_i2c_test); + if (rval || !test.client) + return NULL; + return test.client; +} + +static struct v4l2_subdev * +register_acpi_i2c_subdev(struct v4l2_device *v4l2_dev, + struct ipu_isys_subdev_info *sd_info, + struct i2c_client *client) +{ + struct i2c_board_info *info = &sd_info->i2c.board_info; + struct v4l2_subdev *sd; + + request_module(I2C_MODULE_PREFIX "%s", info->type); + + /* ACPI overwrite with platform data */ + client->dev.platform_data = info->platform_data; + /* Change I2C client name to one in temporary platform data */ + strlcpy(client->name, info->type, sizeof(client->name)); + + if (device_reprobe(&client->dev)) + return NULL; + + if (!client->dev.driver) + return NULL; + + if (!try_module_get(client->dev.driver->owner)) + return NULL; + + sd = i2c_get_clientdata(client); + + if (v4l2_device_register_subdev(v4l2_dev, sd)) + sd = NULL; + + module_put(client->dev.driver->owner); + + return sd; +} + +static int +isys_complete_ext_device_registration(struct ipu_isys *isys, + struct v4l2_subdev *sd, + struct ipu_isys_csi2_config *csi2) +{ + unsigned int i; + int rval; + + v4l2_set_subdev_hostdata(sd, csi2); + + for (i = 0; i < sd->entity.num_pads; i++) { + if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + + if (i == sd->entity.num_pads) { + dev_warn(&isys->adev->dev, + "no source pad in external entity\n"); + rval = -ENOENT; + goto skip_unregister_subdev; + } + + rval = media_create_pad_link(&sd->entity, i, + &isys->csi2[csi2->port].asd.sd.entity, + 0, 0); + if (rval) { + dev_warn(&isys->adev->dev, "can't create link\n"); + goto skip_unregister_subdev; + } + + isys->csi2[csi2->port].nlanes = csi2->nlanes; + return 0; + +skip_unregister_subdev: + v4l2_device_unregister_subdev(sd); + return rval; +} + +static int isys_register_ext_subdev(struct ipu_isys *isys, + struct ipu_isys_subdev_info *sd_info, + bool acpi_only) +{ + struct i2c_adapter *adapter = + i2c_get_adapter(sd_info->i2c.i2c_adapter_id); + struct v4l2_subdev *sd; + struct i2c_client *client; + int rval; + + dev_info(&isys->adev->dev, + "creating new i2c subdev for %s (address %2.2x, bus %d)", + sd_info->i2c.board_info.type, sd_info->i2c.board_info.addr, + sd_info->i2c.i2c_adapter_id); + + if (!adapter) { + dev_warn(&isys->adev->dev, "can't find adapter\n"); + return -ENOENT; + } + if (sd_info->csi2) { + dev_info(&isys->adev->dev, "sensor device on CSI port: %d\n", + sd_info->csi2->port); + if (sd_info->csi2->port >= isys->pdata->ipdata->csi2.nports || + !isys->csi2[sd_info->csi2->port].isys) { + dev_warn(&isys->adev->dev, "invalid csi2 port %u\n", + sd_info->csi2->port); + rval = -EINVAL; + goto skip_put_adapter; + } + } else { + dev_info(&isys->adev->dev, "non camera subdevice\n"); + } + + client = isys_find_i2c_subdev(adapter, sd_info); + + if (acpi_only) { + if (!client) { + dev_dbg(&isys->adev->dev, "Matching ACPI device not found - postpone\n"); + rval = 0; + goto skip_put_adapter; + } + if (!sd_info->acpiname) { + dev_dbg(&isys->adev->dev, "No name in platform data\n"); + rval = 0; + goto skip_put_adapter; + } + if (strcmp(dev_name(&client->dev), sd_info->acpiname)) { + dev_dbg(&isys->adev->dev, "Names don't match: %s != %s", + dev_name(&client->dev), sd_info->acpiname); + rval = 0; + goto skip_put_adapter; + } + /* Acpi match found. Continue to reprobe */ + } else if (client) { + dev_dbg(&isys->adev->dev, "Device exists\n"); + rval = 0; + goto skip_put_adapter; + } else if (sd_info->acpiname) { + dev_dbg(&isys->adev->dev, "ACPI name don't match: %s\n", + sd_info->acpiname); + rval = 0; + goto skip_put_adapter; + } + + if (!client) { + dev_info(&isys->adev->dev, + "i2c device not found in ACPI table\n"); + sd = v4l2_i2c_new_subdev_board(&isys->v4l2_dev, adapter, + &sd_info->i2c.board_info, 0); + } else { + dev_info(&isys->adev->dev, "i2c device found in ACPI table\n"); + sd = register_acpi_i2c_subdev(&isys->v4l2_dev, + sd_info, client); + } + + if (!sd) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + rval = -EINVAL; + goto skip_put_adapter; + } + if (!sd_info->csi2) + return 0; + + return isys_complete_ext_device_registration(isys, sd, sd_info->csi2); + +skip_put_adapter: + i2c_put_adapter(adapter); + + return rval; +} + +static int isys_acpi_add_device(struct device *dev, void *priv, + struct ipu_isys_csi2_config *csi2, + bool reprobe) +{ + struct ipu_isys *isys = priv; + struct i2c_client *client = i2c_verify_client(dev); + struct v4l2_subdev *sd; + + if (!client) + return -ENODEV; + + if (reprobe) + if (device_reprobe(&client->dev)) + return -ENODEV; + + if (!client->dev.driver) + return -ENODEV; + + /* Lock the module so we can safely get the v4l2_subdev pointer */ + if (!try_module_get(client->dev.driver->owner)) + return -ENODEV; + + sd = i2c_get_clientdata(client); + + if (v4l2_device_register_subdev(&isys->v4l2_dev, sd)) { + dev_warn(&isys->adev->dev, "can't create new i2c subdev\n"); + goto leave_module_put; + } + module_put(client->dev.driver->owner); + + if (!csi2) + return 0; + + return isys_complete_ext_device_registration(isys, sd, csi2); + +leave_module_put: + module_put(client->dev.driver->owner); + return -ENODEV; +} + +static void isys_register_ext_subdevs(struct ipu_isys *isys) +{ + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + + if (spdata) { + /* Scan spdata first to possibly override ACPI data */ + /* ACPI created devices */ + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info, true); + + /* Scan non-acpi devices */ + for (sd_info = spdata->subdevs; *sd_info; sd_info++) + isys_register_ext_subdev(isys, *sd_info, false); + } else { + dev_info(&isys->adev->dev, "no subdevice info provided\n"); + } + + /* Handle real ACPI stuff */ + request_module("ipu4-acpi"); + ipu_get_acpi_devices(isys, &isys->adev->dev, + isys_acpi_add_device); + +} + +static void isys_unregister_subdevices(struct ipu_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + unsigned int i; + + ipu_isys_csi2_be_cleanup(&isys->csi2_be); + ipu_isys_csi2_be_soc_cleanup(&isys->csi2_be_soc); + + ipu_isys_isa_cleanup(&isys->isa); + + for (i = 0; i < tpg->ntpgs; i++) + ipu_isys_tpg_cleanup(&isys->tpg[i]); + + for (i = 0; i < csi2->nports; i++) + ipu_isys_csi2_cleanup(&isys->csi2[i]); +} + +static int isys_register_subdevices(struct ipu_isys *isys) +{ + const struct ipu_isys_internal_tpg_pdata *tpg = + &isys->pdata->ipdata->tpg; + const struct ipu_isys_internal_csi2_pdata *csi2 = + &isys->pdata->ipdata->csi2; + struct ipu_isys_subdev_pdata *spdata = isys->pdata->spdata; + struct ipu_isys_subdev_info **sd_info; + DECLARE_BITMAP(csi2_enable, 32); + unsigned int i, j, k; + int rval; + + /* + * Here is somewhat a workaround, let each platform decide + * if csi2 port can be optimized, which means only registered + * port from pdata would be enabled. + */ + if (csi2_port_optimized && spdata) { + bitmap_zero(csi2_enable, 32); + for (sd_info = spdata->subdevs; *sd_info; sd_info++) { + if ((*sd_info)->csi2) { + i = (*sd_info)->csi2->port; + if (i >= csi2->nports) { + dev_warn(&isys->adev->dev, + "invalid csi2 port %u\n", i); + continue; + } + bitmap_set(csi2_enable, i, 1); + } + } + } else { + bitmap_fill(csi2_enable, 32); + } + + isys->csi2 = devm_kcalloc(&isys->adev->dev, csi2->nports, + sizeof(*isys->csi2), GFP_KERNEL); + if (!isys->csi2) { + rval = -ENOMEM; + goto fail; + } + + for (i = 0; i < csi2->nports; i++) { + if (!test_bit(i, csi2_enable)) + continue; + + rval = ipu_isys_csi2_init(&isys->csi2[i], isys, + isys->pdata->base + + csi2->offsets[i], i); + if (rval) + goto fail; + + isys->isr_csi2_bits |= IPU_ISYS_UNISPART_IRQ_CSI2(i); + } + + isys->tpg = devm_kcalloc(&isys->adev->dev, tpg->ntpgs, + sizeof(*isys->tpg), GFP_KERNEL); + if (!isys->tpg) { + rval = -ENOMEM; + goto fail; + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = ipu_isys_tpg_init(&isys->tpg[i], isys, + isys->pdata->base + + tpg->offsets[i], + tpg->sels ? (isys->pdata->base + + tpg->sels[i]) : NULL, i); + if (rval) + goto fail; + } + + rval = ipu_isys_csi2_be_soc_init(&isys->csi2_be_soc, isys); + if (rval) { + dev_info(&isys->adev->dev, + "can't register soc csi2 be device\n"); + goto fail; + } + + rval = ipu_isys_csi2_be_init(&isys->csi2_be, isys); + if (rval) { + dev_info(&isys->adev->dev, + "can't register raw csi2 be device\n"); + goto fail; + } + rval = ipu_isys_isa_init(&isys->isa, isys, NULL); + if (rval) { + dev_info(&isys->adev->dev, "can't register isa device\n"); + goto fail; + } + + for (i = 0; i < csi2->nports; i++) { + if (!test_bit(i, csi2_enable)) + continue; + + for (j = CSI2_PAD_SOURCE(0); + j < (NR_OF_CSI2_SOURCE_PADS + CSI2_PAD_SOURCE(0)); j++) { + rval = + media_create_pad_link(&isys->csi2[i].asd.sd.entity, + j, + &isys->csi2_be.asd.sd.entity, + CSI2_BE_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link csi2 <=> csi2_be\n"); + goto fail; + } + + for (k = CSI2_BE_SOC_PAD_SINK(0); + k < NR_OF_CSI2_BE_SOC_SINK_PADS; k++) { + rval = + media_create_pad_link(&isys->csi2[i].asd.sd. + entity, j, + &isys->csi2_be_soc. + asd.sd.entity, k, + MEDIA_LNK_FL_DYNAMIC); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link csi2->be_soc\n"); + goto fail; + } + } + } + } + + for (i = 0; i < tpg->ntpgs; i++) { + rval = media_create_pad_link(&isys->tpg[i].asd.sd.entity, + TPG_PAD_SOURCE, + &isys->csi2_be.asd.sd.entity, + CSI2_BE_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between tpg and csi2_be\n"); + goto fail; + } + + for (k = CSI2_BE_SOC_PAD_SINK(0); + k < NR_OF_CSI2_BE_SOC_SINK_PADS; k++) { + rval = + media_create_pad_link(&isys->tpg[i].asd.sd.entity, + TPG_PAD_SOURCE, + &isys->csi2_be_soc.asd.sd. + entity, k, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link tpg->be_soc\n"); + goto fail; + } + } + } + + rval = media_create_pad_link(&isys->csi2_be.asd.sd.entity, + CSI2_BE_PAD_SOURCE, + &isys->isa.asd.sd.entity, ISA_PAD_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, + "can't create link between CSI2 raw be and ISA\n"); + goto fail; + } + return 0; + +fail: + isys_unregister_subdevices(isys); + return rval; +} + +static struct media_device_ops isys_mdev_ops = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + .link_notify = ipu_pipeline_link_notify, +#else + .link_notify = v4l2_pipeline_link_notify, +#endif + .req_alloc = ipu_isys_req_alloc, + .req_free = ipu_isys_req_free, + .req_queue = ipu_isys_req_queue, +}; + +static int isys_register_devices(struct ipu_isys *isys) +{ + int rval; + + isys->media_dev.dev = &isys->adev->dev; + isys->media_dev.ops = &isys_mdev_ops; + strlcpy(isys->media_dev.model, + IPU_MEDIA_DEV_MODEL_NAME, sizeof(isys->media_dev.model)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + isys->media_dev.driver_version = LINUX_VERSION_CODE; +#endif + snprintf(isys->media_dev.bus_info, sizeof(isys->media_dev.bus_info), + "pci:%s", dev_name(isys->adev->dev.parent->parent)); + strlcpy(isys->v4l2_dev.name, isys->media_dev.model, + sizeof(isys->v4l2_dev.name)); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_device_init(&isys->media_dev); +#endif + + rval = media_device_register(&isys->media_dev); + if (rval < 0) { + dev_info(&isys->adev->dev, "can't register media device\n"); + goto out_media_device_unregister; + } + + isys->v4l2_dev.mdev = &isys->media_dev; + + rval = v4l2_device_register(&isys->adev->dev, &isys->v4l2_dev); + if (rval < 0) { + dev_info(&isys->adev->dev, "can't register v4l2 device\n"); + goto out_media_device_unregister; + } + + rval = isys_register_subdevices(isys); + if (rval) + goto out_v4l2_device_unregister; + + isys_register_ext_subdevs(isys); + + rval = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); + if (rval) + goto out_isys_unregister_subdevices; + + return 0; + +out_isys_unregister_subdevices: + isys_unregister_subdevices(isys); + +out_v4l2_device_unregister: + v4l2_device_unregister(&isys->v4l2_dev); + +out_media_device_unregister: + media_device_unregister(&isys->media_dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_device_cleanup(&isys->media_dev); +#endif + + return rval; +} + +static void isys_unregister_devices(struct ipu_isys *isys) +{ + isys_unregister_subdevices(isys); + v4l2_device_unregister(&isys->v4l2_dev); + media_device_unregister(&isys->media_dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + media_device_cleanup(&isys->media_dev); +#endif +} + +#ifdef CONFIG_PM +static int isys_runtime_pm_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int ret; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + ipu_trace_restore(dev); + + pm_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); + + ret = ipu_buttress_start_tsc_sync(isp); + if (ret) + return ret; + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 1; + spin_unlock_irqrestore(&isys->power_lock, flags); + + if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + mutex_lock(&isys->short_packet_tracing_mutex); + isys->short_packet_tracing_count = 0; + mutex_unlock(&isys->short_packet_tracing_mutex); + } + isys_setup_hw(isys); + + return 0; +} + +static int isys_runtime_pm_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + unsigned long flags; + + if (!isys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + spin_lock_irqsave(&isys->power_lock, flags); + isys->power = 0; + spin_unlock_irqrestore(&isys->power_lock, flags); + + ipu_trace_stop(dev); + mutex_lock(&isys->mutex); + isys->reset_needed = false; + mutex_unlock(&isys->mutex); + + pm_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); + + return 0; +} + +static int isys_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + + /* If stream is open, refuse to suspend */ + if (isys->stream_opened) + return -EBUSY; + + return 0; +} + +static int isys_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops isys_pm_ops = { + .runtime_suspend = isys_runtime_pm_suspend, + .runtime_resume = isys_runtime_pm_resume, + .suspend = isys_suspend, + .resume = isys_resume, +}; + +#define ISYS_PM_OPS (&isys_pm_ops) +#else +#define ISYS_PM_OPS NULL +#endif + +static void isys_remove(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + struct ipu_device *isp = adev->isp; + struct isys_fw_msgs *fwmsg, *safe; + + dev_info(&adev->dev, "removed\n"); + if (isp->ipu_dir) + debugfs_remove_recursive(isys->debugfsdir); + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) { + dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs), + fwmsg, fwmsg->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + } + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) { + dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs), + fwmsg, fwmsg->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + } + + ipu_trace_uninit(&adev->dev); + isys_unregister_devices(isys); + pm_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); + ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); + release_firmware(isys->fw); + } + + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); + + if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; + + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(&adev->dev, trace_size, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, + &attrs); +#else + unsigned long attrs; + + attrs = DMA_ATTR_NON_CONSISTENT; + dma_free_attrs(&adev->dev, trace_size, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, attrs); +#endif + } +} + +static int ipu_isys_icache_prefetch_get(void *data, u64 *val) +{ + struct ipu_isys *isys = data; + + *val = isys->icache_prefetch; + return 0; +} + +static int ipu_isys_icache_prefetch_set(void *data, u64 val) +{ + struct ipu_isys *isys = data; + + if (val != !!val) + return -EINVAL; + + isys->icache_prefetch = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops, + ipu_isys_icache_prefetch_get, + ipu_isys_icache_prefetch_set, "%llu\n"); + +static int ipu_isys_init_debugfs(struct ipu_isys *isys) +{ + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir); + if (IS_ERR(dir)) + return -ENOMEM; + + file = debugfs_create_file("icache_prefetch", 0600, + dir, isys, &isys_icache_prefetch_fops); + if (IS_ERR(file)) + goto err; + + isys->debugfsdir = dir; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +static int alloc_fw_msg_buffers(struct ipu_isys *isys, int amount) +{ + dma_addr_t dma_addr; + struct isys_fw_msgs *addr; + unsigned int i; + unsigned long flags; + + for (i = 0; i < amount; i++) { + addr = dma_alloc_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + &dma_addr, GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!addr) + break; + addr->dma_addr = dma_addr; + + spin_lock_irqsave(&isys->listlock, flags); + list_add(&addr->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); + } + if (i == amount) + return 0; + spin_lock_irqsave(&isys->listlock, flags); + while (!list_empty(&isys->framebuflist)) { + addr = list_first_entry(&isys->framebuflist, + struct isys_fw_msgs, head); + list_del(&addr->head); + spin_unlock_irqrestore(&isys->listlock, flags); + dma_free_attrs(&isys->adev->dev, + sizeof(struct isys_fw_msgs), + addr, addr->dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + spin_lock_irqsave(&isys->listlock, flags); + } + spin_unlock_irqrestore(&isys->listlock, flags); + return -ENOMEM; +} + +struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip) +{ + struct ipu_isys_video *pipe_av = + container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys *isys; + struct isys_fw_msgs *msg; + unsigned long flags; + + isys = pipe_av->isys; + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + spin_unlock_irqrestore(&isys->listlock, flags); + dev_dbg(&isys->adev->dev, "Frame list empty - Allocate more"); + + alloc_fw_msg_buffers(isys, 5); + + spin_lock_irqsave(&isys->listlock, flags); + if (list_empty(&isys->framebuflist)) { + dev_err(&isys->adev->dev, "Frame list empty"); + spin_unlock_irqrestore(&isys->listlock, flags); + return NULL; + } + } + msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); + list_move(&msg->head, &isys->framebuflist_fw); + spin_unlock_irqrestore(&isys->listlock, flags); + memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); + + return msg; +} + +void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys) +{ + struct isys_fw_msgs *fwmsg, *fwmsg0; + unsigned long flags; + + spin_lock_irqsave(&isys->listlock, flags); + list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) + list_move(&fwmsg->head, &isys->framebuflist); + spin_unlock_irqrestore(&isys->listlock, flags); +} + +void ipu_put_fw_mgs_buffer(struct ipu_isys *isys, u64 data) +{ + struct isys_fw_msgs *msg; + u64 *ptr = (u64 *)(unsigned long)data; + + if (!ptr) + return; + + spin_lock(&isys->listlock); + msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); + list_move(&msg->head, &isys->framebuflist); + spin_unlock(&isys->listlock); +} +EXPORT_SYMBOL_GPL(ipu_put_fw_mgs_buffer); + +static int isys_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = dev_get_drvdata(adev->iommu); + struct ipu_isys *isys; + struct ipu_device *isp = adev->isp; + const u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE; + dma_addr_t *trace_dma_addr; + + const struct firmware *uninitialized_var(fw); + int rval = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + trace_printk("B|%d|TMWK\n", current->pid); + + /* Has the domain been attached? */ + if (!mmu || !isp->pkg_dir_dma_addr) { + trace_printk("E|TMWK\n"); + return -EPROBE_DEFER; + } + + isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL); + if (!isys) + return -ENOMEM; + + /* By default, short packet is captured from T-Unit. */ +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_TUNIT; + trace_dma_addr = &isys->short_packet_trace_buffer_dma_addr; + mutex_init(&isys->short_packet_tracing_mutex); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + isys->short_packet_trace_buffer = + dma_alloc_attrs(&adev->dev, trace_size, trace_dma_addr, + GFP_KERNEL, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; + isys->short_packet_trace_buffer = + dma_alloc_attrs(&adev->dev, trace_size, trace_dma_addr, + GFP_KERNEL, attrs); +#endif + if (!isys->short_packet_trace_buffer) + return -ENOMEM; +#else + isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_RECEIVER; +#endif + isys->adev = adev; + isys->pdata = adev->pdata; + + INIT_LIST_HEAD(&isys->requests); + + spin_lock_init(&isys->lock); + spin_lock_init(&isys->power_lock); + isys->power = 0; + + mutex_init(&isys->mutex); + mutex_init(&isys->stream_mutex); + mutex_init(&isys->lib_mutex); + + spin_lock_init(&isys->listlock); + INIT_LIST_HEAD(&isys->framebuflist); + INIT_LIST_HEAD(&isys->framebuflist_fw); + + dev_info(&adev->dev, "isys probe %p %p\n", adev, &adev->dev); + ipu_bus_set_drvdata(adev, isys); + + isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN; +#ifdef CONFIG_VIDEO_INTEL_IPU4 + isys->icache_prefetch = is_ipu_hw_bxtp_e0(isp); +#else + isys->icache_prefetch = 0; +#endif + +#ifndef CONFIG_PM + isys_setup_hw(isys); +#endif + + if (!isp->secure_mode) { + fw = isp->cpd_fw; + rval = ipu_buttress_map_fw_image(adev, fw, &isys->fw_sgt); + if (rval) + goto release_firmware; + + isys->pkg_dir = ipu_cpd_create_pkg_dir(adev, isp->cpd_fw->data, + sg_dma_address(isys-> + fw_sgt. + sgl), + &isys->pkg_dir_dma_addr, + &isys->pkg_dir_size); + if (!isys->pkg_dir) { + rval = -ENOMEM; + goto remove_shared_buffer; + } + } + + /* Debug fs failure is not fatal. */ + ipu_isys_init_debugfs(isys); + + ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev, + isys_trace_blocks); + + pm_qos_add_request(&isys->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + alloc_fw_msg_buffers(isys, 20); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + rval = isys_register_devices(isys); + if (rval) + goto out_remove_pkg_dir_shared_buffer; + + trace_printk("E|TMWK\n"); + return 0; + +out_remove_pkg_dir_shared_buffer: + if (!isp->secure_mode) + ipu_cpd_free_pkg_dir(adev, isys->pkg_dir, + isys->pkg_dir_dma_addr, + isys->pkg_dir_size); +remove_shared_buffer: + if (!isp->secure_mode) + ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt); +release_firmware: + if (!isp->secure_mode) + release_firmware(isys->fw); + ipu_trace_uninit(&adev->dev); + + trace_printk("E|TMWK\n"); + + mutex_destroy(&isys->mutex); + mutex_destroy(&isys->stream_mutex); + + if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) { + mutex_destroy(&isys->short_packet_tracing_mutex); + dma_free_attrs(&adev->dev, trace_size, + isys->short_packet_trace_buffer, + isys->short_packet_trace_buffer_dma_addr, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs); +#else + attrs); +#endif + } + + return rval; +} + +struct fwmsg { + int type; + char *msg; + bool valid_ts; +}; + +static const struct fwmsg fw_msg[] = { + {IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + "STREAM_START_AND_CAPTURE_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + "STREAM_START_AND_CAPTURE_DONE", 1}, + {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1}, + {IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1}, + {IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1}, + {IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1}, + {-1, "UNKNOWN MESSAGE", 0}, +}; + +static int resp_type_to_index(int type) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(fw_msg); i++) + if (fw_msg[i].type == type) + return i; + + return i - 1; +} + +int isys_isr_one(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + struct ipu_fw_isys_resp_info_abi resp_data; + struct ipu_fw_isys_resp_info_abi *resp; + struct ipu_isys_pipeline *pipe; + u64 ts; + unsigned int i; + + if (!isys->fwcom) + return 0; + + resp = ipu_fw_isys_get_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES, + &resp_data); + if (!resp) + return 1; + + ts = (u64) resp->timestamp[1] << 32 | resp->timestamp[0]; + + if (resp->error_info.error == IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION) + /* Suspension is kind of special case: not enough buffers */ + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n", + resp->type, + fw_msg[resp_type_to_index(resp->type)].msg, + resp->stream_handle, + resp->error_info.error_details, + fw_msg[resp_type_to_index(resp->type)].valid_ts ? + ts : 0, resp->pin_id); + else if (resp->error_info.error) + dev_dbg(&adev->dev, + "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n", + resp->type, + fw_msg[resp_type_to_index(resp->type)].msg, + resp->stream_handle, + resp->error_info.error, resp->error_info.error_details, + fw_msg[resp_type_to_index(resp->type)].valid_ts ? + ts : 0, resp->pin_id); + else + dev_dbg(&adev->dev, + "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n", + resp->type, + fw_msg[resp_type_to_index(resp->type)].msg, + resp->stream_handle, + fw_msg[resp_type_to_index(resp->type)].valid_ts ? + ts : 0, resp->pin_id); + + if (resp->stream_handle >= IPU_ISYS_MAX_STREAMS) { + dev_err(&adev->dev, "bad stream handle %u\n", + resp->stream_handle); + goto leave; + } + + pipe = isys->pipes[resp->stream_handle]; + if (!pipe) { + dev_err(&adev->dev, "no pipeline for stream %u\n", + resp->stream_handle); + goto leave; + } + pipe->error = resp->error_info.error; + + switch (resp->type) { + case IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + ipu_put_fw_mgs_buffer(ipu_bus_get_drvdata(adev), resp->buf_id); + complete(&pipe->stream_open_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + complete(&pipe->stream_close_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK: + complete(&pipe->stream_start_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + ipu_put_fw_mgs_buffer(ipu_bus_get_drvdata(adev), resp->buf_id); + complete(&pipe->stream_start_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK: + complete(&pipe->stream_stop_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + complete(&pipe->stream_stop_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY: + if (resp->pin_id < IPU_ISYS_OUTPUT_PINS && + pipe->output_pins[resp->pin_id].pin_ready) + pipe->output_pins[resp->pin_id].pin_ready(pipe, resp); + else + dev_err(&adev->dev, + "%d:No data pin ready handler for pin id %d\n", + resp->stream_handle, resp->pin_id); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + ipu_put_fw_mgs_buffer(ipu_bus_get_drvdata(adev), resp->buf_id); + complete(&pipe->capture_ack_completion); + break; + case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + if (pipe->interlaced) { + struct ipu_isys_buffer *ib, *ib_safe; + struct list_head list; + unsigned long flags; + + if (pipe->isys->short_packet_source == + IPU_ISYS_SHORT_PACKET_FROM_TUNIT) + pipe->cur_field = + ipu_isys_csi2_get_current_field(pipe, + resp-> + timestamp); + /* + * Move the pending buffers to a local temp list. + * Then we do not need to handle the lock during + * the loop. + */ + spin_lock_irqsave(&pipe->short_packet_queue_lock, + flags); + list_cut_position(&list, + &pipe->pending_interlaced_bufs, + pipe->pending_interlaced_bufs.prev); + spin_unlock_irqrestore(&pipe->short_packet_queue_lock, + flags); + + list_for_each_entry_safe(ib, ib_safe, &list, head) { + struct vb2_buffer *vb; + + vb = ipu_isys_buffer_to_vb2_buffer(ib); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.field = pipe->cur_field; +#else + to_vb2_v4l2_buffer(vb)->field = pipe->cur_field; +#endif + list_del(&ib->head); + + ipu_isys_queue_buf_done(ib); + } + } + for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) + if (pipe->capture_done[i]) + pipe->capture_done[i] (pipe, resp); + + break; + case IPU_FW_ISYS_RESP_TYPE_FRAME_SOF: +#ifdef IPU_TPG_SOF + if (pipe->tpg) + ipu_isys_tpg_sof_event(pipe->tpg); +#endif + pipe->seq[pipe->seq_index].sequence = + atomic_read(&pipe->sequence) - 1; + pipe->seq[pipe->seq_index].timestamp = ts; + dev_dbg(&adev->dev, + "sof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + pipe->seq[pipe->seq_index].sequence, ts); + pipe->seq_index = (pipe->seq_index + 1) + % IPU_ISYS_MAX_PARALLEL_SOF; + break; + case IPU_FW_ISYS_RESP_TYPE_FRAME_EOF: + dev_dbg(&adev->dev, + "eof: handle %d: (index %u), timestamp 0x%16.16llx\n", + resp->stream_handle, + pipe->seq[pipe->seq_index].sequence, ts); + break; + case IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY: + break; + default: + dev_err(&adev->dev, "%d:unknown response type %u\n", + resp->stream_handle, resp->type); + break; + } + +leave: + ipu_fw_isys_put_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES); + return 0; +} + +static void isys_isr_poll(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + + if (!isys->fwcom) { + dev_dbg(&isys->adev->dev, + "got interrupt but device not configured yet\n"); + return; + } + + mutex_lock(&isys->mutex); + isys_isr(adev); + mutex_unlock(&isys->mutex); +} + +int ipu_isys_isr_run(void *ptr) +{ + struct ipu_isys *isys = ptr; + + while (!kthread_should_stop()) { + usleep_range(500, 1000); + if (isys->stream_opened) + isys_isr_poll(isys->adev); + } + + return 0; +} + +static struct ipu_bus_driver isys_driver = { + .probe = isys_probe, + .remove = isys_remove, + .isr = isys_isr, + .wanted = IPU_ISYS_NAME, + .drv = { + .name = IPU_ISYS_NAME, + .owner = THIS_MODULE, + .pm = ISYS_PM_OPS, + }, +}; + +module_ipu_bus_driver(isys_driver); + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Jouni Högander "); +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Renwei Wu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Leifu Zhao "); +MODULE_AUTHOR("Xia Wu "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_AUTHOR("Yu Xia "); +MODULE_AUTHOR("Jerry Hu "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu input system driver"); diff --git a/drivers/media/pci/intel/ipu-isys.h b/drivers/media/pci/intel/ipu-isys.h new file mode 100644 index 000000000000..b362f8613585 --- /dev/null +++ b/drivers/media/pci/intel/ipu-isys.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_H +#define IPU_ISYS_H + +#include +#include + +#include +#include + +#include + +#include "ipu.h" +#include "ipu-isys-csi2.h" +#include "ipu-isys-csi2-be.h" +#include "ipu-isys-tpg.h" +#include "ipu-isys-video.h" +#include "ipu-pdata.h" +#include "ipu-fw-isys.h" +#include "ipu-platform-isys.h" + +#define IPU_ISYS_2600_MEM_LINE_ALIGN 64 + +/* for TPG */ +#define IPU_ISYS_FREQ 533000000UL + +/* + * Current message queue configuration. These must be big enough + * so that they never gets full. Queues are located in system memory + */ +#define IPU_ISYS_SIZE_RECV_QUEUE 40 +#define IPU_ISYS_SIZE_SEND_QUEUE 40 +#define IPU_ISYS_SIZE_PROXY_RECV_QUEUE 5 +#define IPU_ISYS_SIZE_PROXY_SEND_QUEUE 5 +#define IPU_ISYS_NUM_RECV_QUEUE 1 + +/* + * Device close takes some time from last ack message to actual stopping + * of the SP processor. As long as the SP processor runs we can't proceed with + * clean up of resources. + */ +#define IPU_ISYS_OPEN_TIMEOUT_US 1000 +#define IPU_ISYS_OPEN_RETRY 1000 +#define IPU_ISYS_TURNOFF_DELAY_US 1000 +#define IPU_ISYS_TURNOFF_TIMEOUT 1000 +#define IPU_LIB_CALL_TIMEOUT_JIFFIES \ + msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) + +#define IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE 32 +#define IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE 32 + +#define IPU_ISYS_MIN_WIDTH 1U +#define IPU_ISYS_MIN_HEIGHT 1U +#define IPU_ISYS_MAX_WIDTH 16384U +#define IPU_ISYS_MAX_HEIGHT 16384U + +struct task_struct; + +/* + * struct ipu_isys + * + * @media_dev: Media device + * @v4l2_dev: V4L2 device + * @adev: ISYS bus device + * @power: Is ISYS powered on or not? + * @isr_bits: Which bits does the ISR handle? + * @power_lock: Serialise access to power (power state in general) + * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers + * @lock: serialise access to pipes + * @pipes: pipelines per stream ID + * @fwcom: fw communication layer private pointer + * or optional external library private pointer + * @line_align: line alignment in memory + * @reset_needed: Isys requires d0i0->i3 transition + * @video_opened: total number of opened file handles on video nodes + * @mutex: serialise access isys video open/release related operations + * @stream_mutex: serialise stream start and stop, queueing requests + * @lib_mutex: optional external library mutex + * @pdata: platform data pointer + * @csi2: CSI-2 receivers + * @tpg: test pattern generators + * @csi2_be: CSI-2 back-ends + * @isa: Input system accelerator + * @fw: ISYS firmware binary (unsecure firmware) + * @fw_sgt: fw scatterlist + * @pkg_dir: host pointer to pkg_dir + * @pkg_dir_dma_addr: I/O virtual address for pkg_dir + * @pkg_dir_size: size of pkg_dir in bytes + * @short_packet_source: select short packet capture mode + */ +struct ipu_isys { + struct media_device media_dev; + struct v4l2_device v4l2_dev; + struct ipu_bus_device *adev; + + int power; + spinlock_t power_lock; /* Serialise access to power */ + u32 isr_csi2_bits; + u32 csi2_rx_ctrl_cached; + spinlock_t lock; /* Serialise access to pipes */ + struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS]; + void *fwcom; + unsigned int line_align; + bool reset_needed; + bool icache_prefetch; + bool csi2_cse_ipc_not_supported; + unsigned int video_opened; + unsigned int stream_opened; + struct dentry *debugfsdir; + struct mutex mutex; /* Serialise isys video open/release related */ + struct mutex stream_mutex; /* Stream start, stop, queueing reqs */ + struct mutex lib_mutex; /* Serialise optional external library mutex */ + + struct ipu_isys_pdata *pdata; + + struct ipu_isys_csi2 *csi2; + struct ipu_isys_tpg *tpg; + struct ipu_isys_isa isa; + struct ipu_isys_csi2_be csi2_be; + struct ipu_isys_csi2_be_soc csi2_be_soc; + + const struct firmware *fw; + struct sg_table fw_sgt; + + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; + + struct list_head requests; + struct pm_qos_request pm_qos; + unsigned int short_packet_source; + struct ipu_isys_csi2_monitor_message *short_packet_trace_buffer; + dma_addr_t short_packet_trace_buffer_dma_addr; + unsigned int short_packet_tracing_count; + struct mutex short_packet_tracing_mutex; /* For tracing count */ + u64 tsc_timer_base; + u64 tunit_timer_base; + spinlock_t listlock; /* Protect framebuflist */ + struct list_head framebuflist; + struct list_head framebuflist_fw; +}; + +struct isys_fw_msgs { + union { + u64 dummy; + struct ipu_fw_isys_frame_buff_set_abi frame; + struct ipu_fw_isys_stream_cfg_data_abi stream; + } fw_msg; + struct list_head head; + dma_addr_t dma_addr; +}; + +#define to_frame_msg_buf(a) (&(a)->fw_msg.frame) +#define to_stream_cfg_msg_buf(a) (&(a)->fw_msg.stream) +#define to_dma_addr(a) ((a)->dma_addr) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) +int ipu_pipeline_pm_use(struct media_entity *entity, int use); +#endif +struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip); +void ipu_put_fw_mgs_buffer(struct ipu_isys *isys, u64 data); +void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys); + +extern const struct v4l2_ioctl_ops ipu_isys_ioctl_ops; + +void isys_setup_hw(struct ipu_isys *isys); +int isys_isr_one(struct ipu_bus_device *adev); +int ipu_isys_isr_run(void *ptr); +irqreturn_t isys_isr(struct ipu_bus_device *adev); + +#endif /* IPU_ISYS_H */ diff --git a/drivers/media/pci/intel/ipu-mmu.c b/drivers/media/pci/intel/ipu-mmu.c new file mode 100644 index 000000000000..f46f479afc98 --- /dev/null +++ b/drivers/media/pci/intel/ipu-mmu.c @@ -0,0 +1,874 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include + +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-platform.h" +#include "ipu-bus.h" +#include "ipu-dma.h" +#include "ipu-mmu.h" +#include "ipu-platform-regs.h" + +#define ISP_PAGE_SHIFT 12 +#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) +#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1)) + +#define ISP_L1PT_SHIFT 22 +#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) + +#define ISP_L2PT_SHIFT 12 +#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) + +#define ISP_L1PT_PTES 1024 +#define ISP_L2PT_PTES 1024 + +#define ISP_PADDR_SHIFT 12 + +#define REG_TLB_INVALIDATE 0x0000 + +#define MMU0_TLB_INVALIDATE 1 + +#define MMU1_TLB_INVALIDATE 0xffff + +#define REG_L1_PHYS 0x0004 /* 27-bit pfn */ +#define REG_INFO 0x0008 + +/* The range of stream ID i in L1 cache is from 0 to 15 */ +#define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4)) + +/* The range of stream ID i in L2 cache is from 0 to 15 */ +#define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4)) + +/* ZLW Enable for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_EN_SID(i) (0x100 + ((i) * 0x20)) + +/* ZLW 1D mode Enable for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_1DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0004) + +/* Set ZLW insertion N pages ahead per stream 1D where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(i) (0x100 + ((i) * 0x20) + 0x0008) + +/* ZLW 2D mode Enable for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_2DMODE_SID(i) (0x100 + ((i) * 0x20) + 0x0010) + +/* ZLW Insertion for each stream in L1 MMU AT where i : 0..15 */ +#define MMUV2_AT_REG_L1_ZLW_INSERTION(i) (0x100 + ((i) * 0x20) + 0x000c) + +#define MMUV2_AT_REG_L1_FW_ZLW_FIFO (0x100 + \ + (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20) + 0x003c) + +/* FW ZLW has prioty - needed for ZLW invalidations */ +#define MMUV2_AT_REG_L1_FW_ZLW_PRIO (0x100 + \ + (IPU_MMU_MAX_TLB_L1_STREAMS * 0x20)) + +#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) +#define TBL_VIRT_ADDR(a) phys_to_virt(TBL_PHYS_ADDR(a)) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +#define to_ipu_mmu_domain(dom) ((dom)->priv) +#else +#define to_ipu_mmu_domain(dom) \ + container_of(dom, struct ipu_mmu_domain, domain) +#endif + +static void zlw_invalidate(struct ipu_mmu *mmu, struct ipu_mmu_hw *mmu_hw) +{ + unsigned int retry = 0; + unsigned int i, j; + int ret; + + for (i = 0; i < mmu_hw->nr_l1streams; i++) { + /* We need to invalidate only the zlw enabled stream IDs */ + if (mmu_hw->l1_zlw_en[i]) { + /* + * Maximum 16 blocks per L1 stream + * Write trash buffer iova offset to the FW_ZLW + * register. This will trigger pre-fetching of next 16 + * pages from the page table. So we need to increment + * iova address by 16 * 4K to trigger the next 16 pages. + * Once this loop is completed, the L1 cache will be + * filled with trash buffer translation. + * + * TODO: Instead of maximum 16 blocks, use the allocated + * block size + */ + for (j = 0; j < mmu_hw->l1_block_sz[i]; j++) + writel(mmu->iova_addr_trash + + j * MMUV2_TRASH_L1_BLOCK_OFFSET, + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_INSERTION(i)); + + /* + * Now we need to fill the L2 cache entry. L2 cache + * entries will be automatically updated, based on the + * L1 entry. The above loop for L1 will update only one + * of the two entries in L2 as the L1 is under 4MB + * range. To force the other entry in L2 to update, we + * just need to trigger another pre-fetch which is + * outside the above 4MB range. + */ + writel(mmu->iova_addr_trash + + MMUV2_TRASH_L2_BLOCK_OFFSET, + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_INSERTION(0)); + } + } + + /* + * Wait until AT is ready. FIFO read should return 2 when AT is ready. + * Retry value of 1000 is just by guess work to avoid the forever loop. + */ + do { + if (retry > 1000) { + dev_err(mmu->dev, "zlw invalidation failed\n"); + return; + } + ret = readl(mmu_hw->base + MMUV2_AT_REG_L1_FW_ZLW_FIFO); + retry++; + } while (ret != 2); +} + +static void tlb_invalidate(struct ipu_mmu *mmu) +{ + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&mmu->ready_lock, flags); + if (!mmu->ready) { + spin_unlock_irqrestore(&mmu->ready_lock, flags); + return; + } + + for (i = 0; i < mmu->nr_mmus; i++) { + u32 inv; + + /* + * To avoid the HW bug induced dead lock in some of the IPU4 + * MMUs on successive invalidate calls, we need to first do a + * read to the page table base before writing the invalidate + * register. MMUs which need to implement this WA, will have + * the insert_read_before_invalidate flasg set as true. + * Disregard the return value of the read. + */ + if (mmu->mmu_hw[i].insert_read_before_invalidate) + readl(mmu->mmu_hw[i].base + REG_L1_PHYS); + + /* Normal invalidate or zlw invalidate */ + if (mmu->mmu_hw[i].zlw_invalidate) { + /* trash buffer must be mapped by now, just in case! */ + WARN_ON(!mmu->iova_addr_trash); + + zlw_invalidate(mmu, &mmu->mmu_hw[i]); + } else { + if (mmu->mmu_hw[i].nr_l1streams == 32) + inv = 0xffffffff; + else if (mmu->mmu_hw[i].nr_l1streams == 0) + inv = MMU0_TLB_INVALIDATE; + else + inv = MMU1_TLB_INVALIDATE; + writel(inv, mmu->mmu_hw[i].base + + REG_TLB_INVALIDATE); + } + } + spin_unlock_irqrestore(&mmu->ready_lock, flags); +} + +#ifdef DEBUG +static void page_table_dump(struct ipu_mmu_domain *adom) +{ + u32 l1_idx; + + pr_debug("begin IOMMU page table dump\n"); + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { + u32 l2_idx; + u32 iova = (phys_addr_t) l1_idx << ISP_L1PT_SHIFT; + + if (adom->pgtbl[l1_idx] == adom->dummy_l2_tbl) + continue; + pr_debug("l1 entry %u; iovas 0x%8.8x--0x%8.8x, at %p\n", + l1_idx, iova, iova + ISP_PAGE_SIZE, + (void *)TBL_PHYS_ADDR(adom->pgtbl[l1_idx])); + + for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) { + u32 *l2_pt = TBL_VIRT_ADDR(adom->pgtbl[l1_idx]); + u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT); + + if (l2_pt[l2_idx] == adom->dummy_page) + continue; + + pr_debug("\tl2 entry %u; iova 0x%8.8x, phys %p\n", + l2_idx, iova2, + (void *)TBL_PHYS_ADDR(l2_pt[l2_idx])); + } + } + + pr_debug("end IOMMU page table dump\n"); +} +#endif /* DEBUG */ + +static u32 *alloc_page_table(struct ipu_mmu_domain *adom, bool l1) +{ + u32 *pt = (u32 *) __get_free_page(GFP_KERNEL | GFP_DMA32); + int i; + + if (!pt) + return NULL; + + pr_debug("__get_free_page() == %p\n", pt); + + for (i = 0; i < ISP_L1PT_PTES; i++) + pt[i] = l1 ? adom->dummy_l2_tbl : adom->dummy_page; + + return pt; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) +static int ipu_mmu_domain_init(struct iommu_domain *domain) +{ + struct ipu_mmu_domain *adom; + void *ptr; + + adom = kzalloc(sizeof(*adom), GFP_KERNEL); + if (!adom) + return -ENOMEM; + + domain->priv = adom; + adom->domain = domain; + + ptr = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); + if (!ptr) + goto err; + + adom->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + ptr = alloc_page_table(adom, false); + if (!ptr) + goto err; + + adom->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + /* + * We always map the L1 page table (a single page as well as + * the L2 page tables). + */ + adom->pgtbl = alloc_page_table(adom, true); + if (!adom->pgtbl) + goto err; + + spin_lock_init(&adom->lock); + + pr_debug("domain initialised\n"); + pr_debug("ops %p\n", domain->ops); + + return 0; + +err: + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_page)); + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_l2_tbl)); + kfree(adom); + + return -ENOMEM; +} +#else +static struct iommu_domain *ipu_mmu_domain_alloc(unsigned int type) +{ + struct ipu_mmu_domain *adom; + void *ptr; + + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + adom = kzalloc(sizeof(*adom), GFP_KERNEL); + if (!adom) + return NULL; + + adom->domain.geometry.aperture_start = 0; + adom->domain.geometry.aperture_end = DMA_BIT_MASK(IPU_MMU_ADDRESS_BITS); + adom->domain.geometry.force_aperture = true; + + ptr = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); + if (!ptr) + goto err_mem; + + adom->dummy_page = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + ptr = alloc_page_table(adom, false); + if (!ptr) + goto err; + + adom->dummy_l2_tbl = virt_to_phys(ptr) >> ISP_PAGE_SHIFT; + + /* + * We always map the L1 page table (a single page as well as + * the L2 page tables). + */ + adom->pgtbl = alloc_page_table(adom, true); + if (!adom->pgtbl) + goto err; + + spin_lock_init(&adom->lock); + + pr_debug("domain initialised\n"); + pr_debug("ops %p\n", adom->domain.ops); + + return &adom->domain; + +err: + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_page)); + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_l2_tbl)); +err_mem: + kfree(adom); + + return NULL; +} +#endif + +static void ipu_mmu_domain_destroy(struct iommu_domain *domain) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + struct iova *iova; + u32 l1_idx; + + if (adom->iova_addr_trash) { + iova = find_iova(&adom->dmap->iovad, adom->iova_addr_trash >> + PAGE_SHIFT); + /* unmap and free the corresponding trash buffer iova */ + iommu_unmap(domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + __free_iova(&adom->dmap->iovad, iova); + + /* + * Set iova_addr_trash in mmu to 0, so that on next HW init + * this will be mapped again. + */ + adom->iova_addr_trash = 0; + } + + for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) + if (adom->pgtbl[l1_idx] != adom->dummy_l2_tbl) + free_page((unsigned long) + TBL_VIRT_ADDR(adom->pgtbl[l1_idx])); + + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_page)); + free_page((unsigned long)TBL_VIRT_ADDR(adom->dummy_l2_tbl)); + free_page((unsigned long)adom->pgtbl); + kfree(adom); +} + +static int ipu_mmu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + + spin_lock(&adom->lock); + + adom->users++; + + dev_dbg(dev, "domain attached\n"); + + spin_unlock(&adom->lock); + + return 0; +} + +static void ipu_mmu_detach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + + spin_lock(&adom->lock); + + adom->users--; + dev_dbg(dev, "domain detached\n"); + + spin_unlock(&adom->lock); +} + +static int l2_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + u32 l1_idx = iova >> ISP_L1PT_SHIFT; + u32 l1_entry = adom->pgtbl[l1_idx]; + u32 *l2_pt; + u32 iova_start = iova; + unsigned int l2_idx; + unsigned long flags; + + pr_debug("mapping l2 page table for l1 index %u (iova %8.8x)\n", + l1_idx, (u32) iova); + + if (l1_entry == adom->dummy_l2_tbl) { + u32 *l2_virt = alloc_page_table(adom, false); + + if (!l2_virt) + return -ENOMEM; + + l1_entry = virt_to_phys(l2_virt) >> ISP_PADDR_SHIFT; + pr_debug("allocated page for l1_idx %u\n", l1_idx); + + spin_lock_irqsave(&adom->lock, flags); + if (adom->pgtbl[l1_idx] == adom->dummy_l2_tbl) { + adom->pgtbl[l1_idx] = l1_entry; +#ifdef CONFIG_X86 + clflush_cache_range(&adom->pgtbl[l1_idx], + sizeof(adom->pgtbl[l1_idx])); +#endif /* CONFIG_X86 */ + } else { + spin_unlock_irqrestore(&adom->lock, flags); + free_page((unsigned long)TBL_VIRT_ADDR(l1_entry)); + spin_lock_irqsave(&adom->lock, flags); + } + } else { + spin_lock_irqsave(&adom->lock, flags); + } + + l2_pt = TBL_VIRT_ADDR(adom->pgtbl[l1_idx]); + + pr_debug("l2_pt at %p\n", l2_pt); + + paddr = ALIGN(paddr, ISP_PAGE_SIZE); + + l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + + pr_debug("l2_idx %u, phys 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); + if (l2_pt[l2_idx] != adom->dummy_page) { + spin_unlock_irqrestore(&adom->lock, flags); + return -EBUSY; + } + + l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; + + spin_unlock_irqrestore(&adom->lock, flags); + +#ifdef CONFIG_X86 + clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx])); +#endif /* CONFIG_X86 */ + + pr_debug("l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); + + return 0; +} + +static int ipu_mmu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + u32 iova_start = round_down(iova, ISP_PAGE_SIZE); + u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); + + pr_debug + ("mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n", + iova_start, iova_end, size, paddr); + + return l2_map(domain, iova_start, paddr, size); +} + +static size_t l2_unmap(struct iommu_domain *domain, unsigned long iova, + phys_addr_t dummy, size_t size) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + u32 l1_idx = iova >> ISP_L1PT_SHIFT; + u32 *l2_pt = TBL_VIRT_ADDR(adom->pgtbl[l1_idx]); + u32 iova_start = iova; + unsigned int l2_idx; + size_t unmapped = 0; + + pr_debug("unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n", + l1_idx, iova); + + if (adom->pgtbl[l1_idx] == adom->dummy_l2_tbl) + return -EINVAL; + + pr_debug("l2_pt at %p\n", l2_pt); + + for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; + (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT) + < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) { + unsigned long flags; + + pr_debug("l2 index %u unmapped, was 0x%10.10llx\n", + l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx])); + spin_lock_irqsave(&adom->lock, flags); + l2_pt[l2_idx] = adom->dummy_page; + spin_unlock_irqrestore(&adom->lock, flags); +#ifdef CONFIG_X86 + clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx])); +#endif /* CONFIG_X86 */ + unmapped++; + } + + return unmapped << ISP_PAGE_SHIFT; +} + +static size_t ipu_mmu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ + return l2_unmap(domain, iova, 0, size); +} + +static phys_addr_t ipu_mmu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct ipu_mmu_domain *adom = to_ipu_mmu_domain(domain); + u32 *l2_pt = TBL_VIRT_ADDR(adom->pgtbl[iova >> ISP_L1PT_SHIFT]); + + return (phys_addr_t) l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT] + << ISP_PAGE_SHIFT; +} + +static int allocate_trash_buffer(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + unsigned int n_pages = PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE) >> PAGE_SHIFT; + struct iova *iova; + u32 iova_addr; + unsigned int i; + int ret; + + /* Allocate 8MB in iova range */ + iova = alloc_iova(&mmu->dmap->iovad, n_pages, + dma_get_mask(mmu->dev) >> PAGE_SHIFT, 0); + if (!iova) { + dev_err(&adev->dev, "cannot allocate iova range for trash\n"); + return -ENOMEM; + } + + /* + * Map the 8MB iova address range to the same physical trash page + * mmu->trash_page which is already reserved at the probe + */ + iova_addr = iova->pfn_lo; + for (i = 0; i < n_pages; i++) { + ret = iommu_map(mmu->dmap->domain, iova_addr << PAGE_SHIFT, + page_to_phys(mmu->trash_page), PAGE_SIZE, 0); + if (ret) { + dev_err(&adev->dev, + "mapping trash buffer range failed\n"); + goto out_unmap; + } + + iova_addr++; + } + + /* save the address for the ZLW invalidation */ + mmu->iova_addr_trash = iova->pfn_lo << PAGE_SHIFT; + dev_info(&adev->dev, "iova trash buffer for MMUID: %d is %u\n", + mmu->mmid, (unsigned int)mmu->iova_addr_trash); + return 0; + +out_unmap: + iommu_unmap(mmu->dmap->domain, iova->pfn_lo << PAGE_SHIFT, + (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT); + __free_iova(&mmu->dmap->iovad, iova); + return ret; +} + +static int ipu_mmu_hw_init(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + struct ipu_mmu_domain *adom; + unsigned int i; + unsigned long flags; + + dev_dbg(dev, "mmu hw init\n"); + /* + * FIXME: following fix for null pointer check is not a complete one. + * if mmu is not powered cycled before being used, the page table + * address will still not be set into HW. + */ + if (!mmu->dmap) { + dev_warn(dev, "mmu is not ready yet. skipping.\n"); + return 0; + } + adom = to_ipu_mmu_domain(mmu->dmap->domain); + + adom->dmap = mmu->dmap; + + /* Initialise the each MMU HW block */ + for (i = 0; i < mmu->nr_mmus; i++) { + struct ipu_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; + bool zlw_invalidate = false; + unsigned int j; + u16 block_addr; + + /* Write page table address per MMU */ + writel((phys_addr_t) virt_to_phys(adom->pgtbl) + >> ISP_PADDR_SHIFT, + mmu->mmu_hw[i].base + REG_L1_PHYS); + + /* Set info bits per MMU */ + writel(mmu->mmu_hw[i].info_bits, + mmu->mmu_hw[i].base + REG_INFO); + + /* Configure MMU TLB stream configuration for L1 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams; + block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) { + if (block_addr > IPU_MAX_LI_BLOCK_ADDR) { + dev_err(dev, "invalid L1 configuration\n"); + return -EINVAL; + } + + /* Write block start address for each streams */ + writel(block_addr, mmu_hw->base + + mmu_hw->l1_stream_id_reg_offset + 4 * j); + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + /* Enable ZLW for streams based on the init table */ + writel(mmu->mmu_hw[i].l1_zlw_en[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_EN_SID(j)); + + /* To track if zlw is enabled in any streams */ + zlw_invalidate |= mmu->mmu_hw[i].l1_zlw_en[j]; + + /* Enable ZLW 1D mode for streams from the init table */ + writel(mmu->mmu_hw[i].l1_zlw_1d_mode[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_1DMODE_SID(j)); + + /* Set when the ZLW insertion will happen */ + writel(mmu->mmu_hw[i].l1_ins_zlw_ahead_pages[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_INS_N_AHEAD_SID(j)); + + /* Set if ZLW 2D mode active for each streams */ + writel(mmu->mmu_hw[i].l1_zlw_2d_mode[j], + mmu_hw->base + + MMUV2_AT_REG_L1_ZLW_2DMODE_SID(j)); +#endif + } + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) + /* + * If ZLW invalidate is enabled even for one stream in a MMU1, + * we need to set the FW ZLW operations have higher priority + * on that MMU1 + */ + if (zlw_invalidate) + writel(1, mmu_hw->base + + MMUV2_AT_REG_L1_FW_ZLW_PRIO); +#endif + /* Configure MMU TLB stream configuration for L2 */ + for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams; + block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) { + if (block_addr > IPU_MAX_L2_BLOCK_ADDR) { + dev_err(dev, "invalid L2 configuration\n"); + return -EINVAL; + } + + writel(block_addr, mmu_hw->base + + mmu_hw->l2_stream_id_reg_offset + 4 * j); + } + } + + /* Allocate trash buffer, if not allocated. Only once per MMU */ + if (!mmu->iova_addr_trash) { + int ret; + + ret = allocate_trash_buffer(adev); + if (ret) { + dev_err(dev, "trash buffer allocation failed\n"); + return ret; + } + + /* + * Update the domain pointer to trash buffer to release it on + * domain destroy + */ + adom->iova_addr_trash = mmu->iova_addr_trash; + } + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = true; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + + return 0; +} + +static void set_mapping(struct ipu_mmu *mmu, struct ipu_dma_mapping *dmap) +{ + mmu->dmap = dmap; + + if (!dmap) + return; + + pm_runtime_get_sync(mmu->dev); + ipu_mmu_hw_init(mmu->dev); + pm_runtime_put(mmu->dev); +} + +static int ipu_mmu_add_device(struct device *dev) +{ + struct device *aiommu = to_ipu_bus_device(dev)->iommu; + struct ipu_dma_mapping *dmap; + int rval; + + if (!aiommu || !dev->iommu_group) + return 0; + + dmap = iommu_group_get_iommudata(dev->iommu_group); + if (!dmap) + return 0; + + pr_debug("attach dev %s\n", dev_name(dev)); + + rval = iommu_attach_device(dmap->domain, dev); + if (rval) + return rval; + + kref_get(&dmap->ref); + + return 0; +} + +static struct iommu_ops ipu_iommu_ops = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + .domain_init = ipu_mmu_domain_init, + .domain_destroy = ipu_mmu_domain_destroy, +#else + .domain_alloc = ipu_mmu_domain_alloc, + .domain_free = ipu_mmu_domain_destroy, +#endif + .attach_dev = ipu_mmu_attach_dev, + .detach_dev = ipu_mmu_detach_dev, + .map = ipu_mmu_map, + .unmap = ipu_mmu_unmap, + .iova_to_phys = ipu_mmu_iova_to_phys, + .add_device = ipu_mmu_add_device, + .pgsize_bitmap = SZ_4K, +}; + +static int ipu_mmu_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu_pdata *pdata; + struct ipu_mmu *mmu; + int rval; + + mmu = devm_kzalloc(&adev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + dev_dbg(&adev->dev, "mmu probe %p %p\n", adev, &adev->dev); + ipu_bus_set_drvdata(adev, mmu); + + rval = ipu_bus_set_iommu(&ipu_iommu_ops); + if (rval) + return rval; + + pdata = adev->pdata; + + mmu->mmid = pdata->mmid; + + mmu->mmu_hw = pdata->mmu_hw; + mmu->nr_mmus = pdata->nr_mmus; + mmu->tlb_invalidate = tlb_invalidate; + mmu->set_mapping = set_mapping; + mmu->dev = &adev->dev; + mmu->ready = false; + spin_lock_init(&mmu->ready_lock); + + /* + * Allocate 1 page of physical memory for the trash buffer + * + * TODO! Could be further optimized by allocating only one page per ipu + * instance instead of per mmu + */ + mmu->trash_page = alloc_page(GFP_KERNEL); + if (!mmu->trash_page) { + dev_err(&adev->dev, "insufficient memory for trash buffer\n"); + return -ENOMEM; + } + dev_info(&adev->dev, "MMU: %d, allocated page for trash: 0x%p\n", + mmu->mmid, mmu->trash_page); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + /* + * FIXME: We can't unload this --- bus_set_iommu() will + * register a notifier which must stay until the devices are + * gone. + */ + __module_get(THIS_MODULE); + + return 0; +} + +/* + * Leave iommu ops as they were --- this means we must be called as + * the very last. + */ +static void ipu_mmu_remove(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + + __free_page(mmu->trash_page); + dev_dbg(&adev->dev, "removed\n"); +} + +static irqreturn_t ipu_mmu_isr(struct ipu_bus_device *adev) +{ + dev_info(&adev->dev, "Yeah!\n"); + return IRQ_NONE; +} + +#ifdef CONFIG_PM +static int ipu_mmu_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_mmu *mmu = ipu_bus_get_drvdata(adev); + unsigned long flags; + + spin_lock_irqsave(&mmu->ready_lock, flags); + mmu->ready = false; + spin_unlock_irqrestore(&mmu->ready_lock, flags); + + return 0; +} + +static const struct dev_pm_ops ipu_mmu_pm_ops = { + .resume = ipu_mmu_hw_init, + .suspend = ipu_mmu_suspend, + .runtime_resume = ipu_mmu_hw_init, + .runtime_suspend = ipu_mmu_suspend, +}; + +#define IPU_MMU_PM_OPS (&ipu_mmu_pm_ops) + +#else /* !CONFIG_PM */ + +#define IPU_MMU_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +static struct ipu_bus_driver ipu_mmu_driver = { + .probe = ipu_mmu_probe, + .remove = ipu_mmu_remove, + .isr = ipu_mmu_isr, + .wanted = IPU_MMU_NAME, + .drv = { + .name = IPU_MMU_NAME, + .owner = THIS_MODULE, + .pm = IPU_MMU_PM_OPS, + }, +}; +module_ipu_bus_driver(ipu_mmu_driver); + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu mmu driver"); diff --git a/drivers/media/pci/intel/ipu-mmu.h b/drivers/media/pci/intel/ipu-mmu.h new file mode 100644 index 000000000000..0e8863a2f024 --- /dev/null +++ b/drivers/media/pci/intel/ipu-mmu.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_MMU_H +#define IPU_MMU_H + +#include + +#include "ipu.h" +#include "ipu-pdata.h" + +#define ISYS_MMID 1 +#define PSYS_MMID 0 + +/* + * @pgtbl: virtual address of the l1 page table (one page) + */ +struct ipu_mmu_domain { + u32 __iomem *pgtbl; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct iommu_domain *domain; +#else + struct iommu_domain domain; +#endif + spinlock_t lock; /* Serialize access to users */ + unsigned int users; + struct ipu_dma_mapping *dmap; + u32 dummy_l2_tbl; + u32 dummy_page; + + /* Reference to the trash address to unmap on domain destroy */ + dma_addr_t iova_addr_trash; +}; + +/* + * @pgtbl: physical address of the l1 page table + */ +struct ipu_mmu { + struct list_head node; + unsigned int users; + + struct ipu_mmu_hw *mmu_hw; + unsigned int nr_mmus; + int mmid; + + phys_addr_t pgtbl; + struct device *dev; + + struct ipu_dma_mapping *dmap; + + struct page *trash_page; + dma_addr_t iova_addr_trash; + + bool ready; + spinlock_t ready_lock; /* Serialize access to bool ready */ + + void (*tlb_invalidate)(struct ipu_mmu *mmu); + void (*set_mapping)(struct ipu_mmu *mmu, + struct ipu_dma_mapping *dmap); +}; + +#endif diff --git a/drivers/media/pci/intel/ipu-pdata.h b/drivers/media/pci/intel/ipu-pdata.h new file mode 100644 index 000000000000..66f111266f05 --- /dev/null +++ b/drivers/media/pci/intel/ipu-pdata.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PDATA_H +#define IPU_PDATA_H + +#define IPU_MMU_NAME IPU_NAME "-mmu" +#define IPU_ISYS_CSI2_NAME IPU_NAME "-csi2" +#define IPU_ISYS_NAME IPU_NAME "-isys" +#define IPU_PSYS_NAME IPU_NAME "-psys" +#define IPU_BUTTRESS_NAME IPU_NAME "-buttress" + +#define IPU_MMU_MAX_DEVICES 4 +#define IPU_MMU_ADDRESS_BITS 32 +/* The firmware is accessible within the first 2 GiB only in non-secure mode. */ +#define IPU_MMU_ADDRESS_BITS_NON_SECURE 31 + +#if defined(CONFIG_VIDEO_INTEL_IPU4) || defined(CONFIG_VIDEO_INTEL_IPU4P) +#define IPU_MMU_MAX_TLB_L1_STREAMS 16 +#define IPU_MMU_MAX_TLB_L2_STREAMS 16 +#define IPU_MAX_LI_BLOCK_ADDR 64 +#define IPU_MAX_L2_BLOCK_ADDR 32 +#else +#define IPU_MMU_MAX_TLB_L1_STREAMS 32 +#define IPU_MMU_MAX_TLB_L2_STREAMS 32 +#define IPU_MAX_LI_BLOCK_ADDR 128 +#define IPU_MAX_L2_BLOCK_ADDR 64 +#endif + +#define IPU_ISYS_MAX_CSI2_LEGACY_PORTS 4 +#define IPU_ISYS_MAX_CSI2_COMBO_PORTS 2 + +#define IPU_MAX_FRAME_COUNTER 0xff + +/* + * To maximize the IOSF utlization, IPU need to send requests in bursts. + * At the DMA interface with the buttress, there are CDC FIFOs with burst + * collection capability. CDC FIFO burst collectors have a configurable + * threshold and is configured based on the outcome of performance measurements. + * + * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 + * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 + * + * Threshold values are pre-defined and are arrived at after performance + * evaluations on a type of IPU4 + */ +#define IPU_MAX_VC_IOSF_PORTS 4 + +/* + * IPU must configure correct arbitration mechanism related to the IOSF VC + * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on + * stall and 1 means stall until the request is completed. + */ +#define IPU_BTRS_ARB_MODE_TYPE_REARB 0 +#define IPU_BTRS_ARB_MODE_TYPE_STALL 1 + +/* Currently chosen arbitration mechanism for VC0 */ +#define IPU_BTRS_ARB_STALL_MODE_VC0 \ + IPU_BTRS_ARB_MODE_TYPE_REARB + +/* Currently chosen arbitration mechanism for VC1 */ +#define IPU_BTRS_ARB_STALL_MODE_VC1 \ + IPU_BTRS_ARB_MODE_TYPE_REARB + +struct ipu_isys_subdev_pdata; + +/* + * MMU Invalidation HW bug workaround by ZLW mechanism + * + * IPU4 MMUV2 has a bug in the invalidation mechanism which might result in + * wrong translation or replication of the translation. This will cause data + * corruption. So we cannot directly use the MMU V2 invalidation registers + * to invalidate the MMU. Instead, whenever an invalidate is called, we need to + * clear the TLB by evicting all the valid translations by filling it with trash + * buffer (which is guaranteed not to be used by any other processes). ZLW is + * used to fill the L1 and L2 caches with the trash buffer translations. ZLW + * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in + * advance to the L1 and L2 caches without triggering any memory operations. + * + * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream + * One L1 block has 16 entries, hence points to 16 * 4K pages + * L2 -> 16 streams and 32 blocks. 2 blocks per streams + * One L2 block maps to 1024 L1 entries, hence points to 4MB address range + * 2 blocks per L2 stream means, 1 stream points to 8MB range + * + * As we need to clear the caches and 8MB being the biggest cache size, we need + * to have trash buffer which points to 8MB address range. As these trash + * buffers are not used for any memory transactions, we need only the least + * amount of physical memory. So we reserve 8MB IOVA address range but only + * one page is reserved from physical memory. Each of this 8MB IOVA address + * range is then mapped to the same physical memory page. + */ +/* One L2 entry maps 1024 L1 entries and one L1 entry per page */ +#define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE) +/* Max L2 blocks per stream */ +#define IPU_MMUV2_MAX_L2_BLOCKS 2 +/* Max L1 blocks per stream */ +#define IPU_MMUV2_MAX_L1_BLOCKS 16 +#define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \ + IPU_MMUV2_MAX_L2_BLOCKS) +/* Entries per L1 block */ +#define MMUV2_ENTRIES_PER_L1_BLOCK 16 +#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * \ + PAGE_SIZE) +#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE + +/* + * In some of the IPU4 MMUs, there is provision to configure L1 and L2 page + * table caches. Both these L1 and L2 caches are divided into multiple sections + * called streams. There is maximum 16 streams for both caches. Each of these + * sections are subdivided into multiple blocks. When nr_l1streams = 0 and + * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support + * L1/L2 page table caches. + * + * L1 stream per block sizes are configurable and varies per usecase. + * L2 has constant block sizes - 2 blocks per stream. + * + * MMU1 support pre-fetching of the pages to have less cache lookup misses. To + * enable the pre-fetching, MMU1 AT (Address Translator) device registers + * need to be configured. + * + * There are four types of memory accesses which requires ZLW configuration. + * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU. + * + * 1. Sequential Access or 1D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1 + * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where + * N is pre-defined and hardcoded in the platform data + * Set ZLW_2D -> 0 + * + * 2. ZLW 2D mode + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 1, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 1 + * + * 3. ZLW Enable (no 1D or 2D mode) + * Set ZLW_EN -> 1 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * 4. ZLW disable + * Set ZLW_EN -> 0 + * set ZLW_PAGE_CROSS_1D -> 0, + * Set ZLW_N -> 0 + * Set ZLW_2D -> 0 + * + * To configure the ZLW for the above memory access, four registers are + * available. Hence to track these four settings, we have the following entries + * in the struct ipu_mmu_hw. Each of these entries are per stream and + * available only for the L1 streams. + * + * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN) + * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary + * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted + * Insert ZLW request N pages ahead address. + * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D) + * + * + * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined + * as per the usecase specific calculations. Any change to this pre-defined + * table has to happen in sync with IPU4 FW. + */ +struct ipu_mmu_hw { + union { + unsigned long offset; + void __iomem *base; + }; + unsigned int info_bits; + u8 nr_l1streams; + /* + * L1 has variable blocks per stream - total of 64 blocks and maximum of + * 16 blocks per stream. Configurable by using the block start address + * per stream. Block start address is calculated from the block size + */ + u8 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS]; + /* Is ZLW is enabled in each stream */ + bool l1_zlw_en[IPU_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_1d_mode[IPU_MMU_MAX_TLB_L1_STREAMS]; + u8 l1_ins_zlw_ahead_pages[IPU_MMU_MAX_TLB_L1_STREAMS]; + bool l1_zlw_2d_mode[IPU_MMU_MAX_TLB_L1_STREAMS]; + + u32 l1_stream_id_reg_offset; + u32 l2_stream_id_reg_offset; + + u8 nr_l2streams; + /* + * L2 has fixed 2 blocks per stream. Block address is calculated + * from the block size + */ + u8 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS]; + /* flag to track if WA is needed for successive invalidate HW bug */ + bool insert_read_before_invalidate; + /* flag to track if zlw based mmu invalidation is needed */ + bool zlw_invalidate; +}; + +struct ipu_mmu_pdata { + unsigned int nr_mmus; + struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES]; + int mmid; +}; + +struct ipu_isys_csi2_pdata { + void __iomem *base; +}; + +#define IPU_EV_AUTO 0xff + +struct ipu_combo_receiver_params { + u8 crc_val; + u8 drc_val; + u8 drc_val_combined; + u8 ctle_val; +}; + +struct ipu_receiver_electrical_params { + u64 min_freq; + u64 max_freq; + unsigned short device; /* PCI DEVICE ID */ + u8 revision; /* PCI REVISION */ + /* base settings at first receiver power on */ + u8 rcomp_val_combo; + u8 rcomp_val_legacy; + + /* Combo per receiver settings */ + struct ipu_combo_receiver_params ports[2]; +}; + +struct ipu_isys_internal_csi2_pdata { + unsigned int nports; + unsigned int *offsets; + struct ipu_receiver_electrical_params *evparams; + u32 evsetmask0; + u32 evsetmask1; + unsigned char *evlanecombine; +}; + +struct ipu_isys_internal_tpg_pdata { + unsigned int ntpgs; + unsigned int *offsets; + unsigned int *sels; +}; + +/* + * One place to handle all the IPU HW variations + */ +struct ipu_hw_variants { + unsigned long offset; + unsigned int nr_mmus; + struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES]; + u8 cdc_fifos; + u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS]; + u32 dmem_offset; + u32 spc_offset; /* SPC offset from psys base */ +}; + +struct ipu_isys_internal_pdata { + struct ipu_isys_internal_csi2_pdata csi2; + struct ipu_isys_internal_tpg_pdata tpg; + struct ipu_hw_variants hw_variant; + u32 num_parallel_streams; + u32 isys_dma_overshoot; +}; + +struct ipu_isys_pdata { + void __iomem *base; + const struct ipu_isys_internal_pdata *ipdata; + struct ipu_isys_subdev_pdata *spdata; +}; + +struct ipu_psys_internal_pdata { + struct ipu_hw_variants hw_variant; +}; + +struct ipu_psys_pdata { + void __iomem *base; + const struct ipu_psys_internal_pdata *ipdata; +}; + +#endif diff --git a/drivers/media/pci/intel/ipu-psys-compat32.c b/drivers/media/pci/intel/ipu-psys-compat32.c new file mode 100644 index 000000000000..452fd3860456 --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys-compat32.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include + +#include + +#include "ipu-psys.h" + +static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret = -ENOTTY; + + if (file->f_op->unlocked_ioctl) + ret = file->f_op->unlocked_ioctl(file, cmd, arg); + + return ret; +} + +struct ipu_psys_buffer32 { + u64 len; + union { + int fd; + compat_uptr_t userptr; + u64 reserved; + } base; + u32 data_offset; + u32 bytes_used; + u32 flags; + u32 reserved[2]; +} __packed; + +struct ipu_psys_command32 { + u64 issue_id; + u64 user_token; + u32 priority; + compat_uptr_t pg_manifest; + compat_uptr_t buffers; + int pg; + u32 pg_manifest_size; + u32 bufcount; + u32 min_psys_freq; + u32 reserved[2]; +} __packed; + +struct ipu_psys_manifest32 { + u32 index; + u32 size; + compat_uptr_t manifest; + u32 reserved[5]; +} __packed; + +static int +get_ipu_psys_command32(struct ipu_psys_command *kp, + struct ipu_psys_command32 __user *up) +{ + compat_uptr_t pgm, bufs; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_command32)) || + get_user(kp->issue_id, &up->issue_id) || + get_user(kp->user_token, &up->user_token) || + get_user(kp->priority, &up->priority) || + get_user(pgm, &up->pg_manifest) || + get_user(bufs, &up->buffers) || + get_user(kp->pg, &up->pg) || + get_user(kp->pg_manifest_size, &up->pg_manifest_size) || + get_user(kp->bufcount, &up->bufcount) || + get_user(kp->min_psys_freq, &up->min_psys_freq) + ) + return -EFAULT; + + kp->pg_manifest = compat_ptr(pgm); + kp->buffers = compat_ptr(bufs); + + return 0; +} + +static int +get_ipu_psys_buffer32(struct ipu_psys_buffer *kp, + struct ipu_psys_buffer32 __user *up) +{ + compat_uptr_t ptr; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_buffer32)) || + get_user(kp->len, &up->len) || + get_user(ptr, &up->base.userptr) || + get_user(kp->data_offset, &up->data_offset) || + get_user(kp->bytes_used, &up->bytes_used) || + get_user(kp->flags, &up->flags)) + return -EFAULT; + + kp->base.userptr = compat_ptr(ptr); + + return 0; +} + +static int +put_ipu_psys_buffer32(struct ipu_psys_buffer *kp, + struct ipu_psys_buffer32 __user *up) +{ + if (!access_ok(VERIFY_WRITE, up, + sizeof(struct ipu_psys_buffer32)) || + put_user(kp->len, &up->len) || + put_user(kp->base.fd, &up->base.fd) || + put_user(kp->data_offset, &up->data_offset) || + put_user(kp->bytes_used, &up->bytes_used) || + put_user(kp->flags, &up->flags)) + return -EFAULT; + + return 0; +} + +static int +get_ipu_psys_manifest32(struct ipu_psys_manifest *kp, + struct ipu_psys_manifest32 __user *up) +{ + compat_uptr_t ptr; + + if (!access_ok(VERIFY_READ, up, + sizeof(struct ipu_psys_manifest32)) || + get_user(kp->index, &up->index) || + get_user(kp->size, &up->size) || get_user(ptr, &up->manifest)) + return -EFAULT; + + kp->manifest = compat_ptr(ptr); + + return 0; +} + +static int +put_ipu_psys_manifest32(struct ipu_psys_manifest *kp, + struct ipu_psys_manifest32 __user *up) +{ + compat_uptr_t ptr = (u32)((unsigned long)kp->manifest); + + if (!access_ok(VERIFY_WRITE, up, + sizeof(struct ipu_psys_manifest32)) || + put_user(kp->index, &up->index) || + put_user(kp->size, &up->size) || put_user(ptr, &up->manifest)) + return -EFAULT; + + return 0; +} + +#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32) +#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32) +#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32) +#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32) +#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32) + +long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg) +{ + union { + struct ipu_psys_buffer buf; + struct ipu_psys_command cmd; + struct ipu_psys_event ev; + struct ipu_psys_manifest m; + } karg; + int compatible_arg = 1; + int err = 0; + void __user *up = compat_ptr(arg); + + switch (cmd) { + case IPU_IOC_GETBUF32: + cmd = IPU_IOC_GETBUF; + break; + case IPU_IOC_PUTBUF32: + cmd = IPU_IOC_PUTBUF; + break; + case IPU_IOC_QCMD32: + cmd = IPU_IOC_QCMD; + break; + case IPU_IOC_GET_MANIFEST32: + cmd = IPU_IOC_GET_MANIFEST; + break; + } + + switch (cmd) { + case IPU_IOC_GETBUF: + case IPU_IOC_PUTBUF: + err = get_ipu_psys_buffer32(&karg.buf, up); + compatible_arg = 0; + break; + case IPU_IOC_QCMD: + err = get_ipu_psys_command32(&karg.cmd, up); + compatible_arg = 0; + break; + case IPU_IOC_GET_MANIFEST: + err = get_ipu_psys_manifest32(&karg.m, up); + compatible_arg = 0; + break; + } + if (err) + return err; + + if (compatible_arg) { + err = native_ioctl(file, cmd, (unsigned long)up); + } else { + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + err = native_ioctl(file, cmd, (unsigned long)&karg); + set_fs(old_fs); + } + + if (err) + return err; + + switch (cmd) { + case IPU_IOC_GETBUF: + err = put_ipu_psys_buffer32(&karg.buf, up); + break; + case IPU_IOC_GET_MANIFEST: + err = put_ipu_psys_manifest32(&karg.m, up); + break; + } + return err; +} +EXPORT_SYMBOL_GPL(ipu_psys_compat_ioctl32); diff --git a/drivers/media/pci/intel/ipu-psys.c b/drivers/media/pci/intel/ipu-psys.c new file mode 100644 index 000000000000..714a79090bf0 --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys.c @@ -0,0 +1,1777 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) +#include +#else +#include +#endif + +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-platform.h" +#include "ipu-buttress.h" +#include "ipu-cpd.h" +#include "ipu-fw-psys.h" +#include "ipu-psys.h" +#include "ipu-platform-regs.h" +#include "ipu-fw-isys.h" +#include "ipu-fw-com.h" + +static bool async_fw_init; +module_param(async_fw_init, bool, 0664); +MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization"); + +#define IPU_PSYS_NUM_DEVICES 4 +#define IPU_PSYS_AUTOSUSPEND_DELAY 2000 + +#ifdef CONFIG_PM +static int psys_runtime_pm_resume(struct device *dev); +static int psys_runtime_pm_suspend(struct device *dev); +#else +#define pm_runtime_dont_use_autosuspend(d) +#define pm_runtime_use_autosuspend(d) +#define pm_runtime_set_autosuspend_delay(d, f) 0 +#define pm_runtime_get_sync(d) 0 +#define pm_runtime_put(d) 0 +#define pm_runtime_put_sync(d) 0 +#define pm_runtime_put_noidle(d) 0 +#define pm_runtime_put_autosuspend(d) 0 +#endif + +static dev_t ipu_psys_dev_t; +static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES); +static DEFINE_MUTEX(ipu_psys_mutex); + +static struct fw_init_task { + struct delayed_work work; + struct ipu_psys *psys; +} fw_init_task; + +static void ipu_psys_remove(struct ipu_bus_device *adev); + +static struct bus_type ipu_psys_bus = { + .name = IPU_PSYS_NAME, +}; + +static struct ipu_psys_capability caps = { + .version = 1, + .driver = "ipu-psys", +}; + +struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size) +{ + struct ipu_psys_pg *kpg; + unsigned long flags; + + spin_lock_irqsave(&psys->pgs_lock, flags); + list_for_each_entry(kpg, &psys->pgs, list) { + if (!kpg->pg_size && kpg->size >= pg_size) { + kpg->pg_size = pg_size; + spin_unlock_irqrestore(&psys->pgs_lock, flags); + return kpg; + } + } + spin_unlock_irqrestore(&psys->pgs_lock, flags); + /* no big enough buffer available, allocate new one */ + kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); + if (!kpg) + return NULL; + + kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size, + &kpg->pg_dma_addr, GFP_KERNEL, + DMA_ATTR_NON_CONSISTENT); + if (!kpg->pg) { + kfree(kpg); + return NULL; + } + + kpg->pg_size = pg_size; + kpg->size = pg_size; + spin_lock_irqsave(&psys->pgs_lock, flags); + list_add(&kpg->list, &psys->pgs); + spin_unlock_irqrestore(&psys->pgs_lock, flags); + + return kpg; +} + +struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd) +{ + struct ipu_psys_kbuffer *kbuffer; + + list_for_each_entry(kbuffer, &fh->bufmap, list) { + if (kbuffer->fd == fd) + return kbuffer; + } + + return NULL; +} + +struct ipu_psys_kbuffer * +ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr) +{ + struct ipu_psys_kbuffer *kbuffer; + + list_for_each_entry(kbuffer, &fh->bufmap, list) { + if (kbuffer->kaddr == kaddr) + return kbuffer; + } + + return NULL; +} + +static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach) +{ + struct vm_area_struct *vma; + unsigned long start, end; + int npages, array_size; + struct page **pages; + struct sg_table *sgt; + int nr = 0; + int ret = -ENOMEM; + + start = (unsigned long)attach->userptr; + end = PAGE_ALIGN(start + attach->len); + npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + goto free_sgt; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, start); + if (!vma) { + ret = -EFAULT; + goto error_up_read; + } + + if (vma->vm_end < start + attach->len) { + dev_err(attach->dev, + "vma at %lu is too small for %llu bytes\n", + start, attach->len); + ret = -EFAULT; + goto error_up_read; + } + + /* + * For buffers from Gralloc, VM_PFNMAP is expected, + * but VM_IO is set. Possibly bug in Gralloc. + */ + attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP); + + if (attach->vma_is_io) { + unsigned long io_start = start; + + for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) { + unsigned long pfn; + + ret = follow_pfn(vma, io_start, &pfn); + if (ret) + goto error_up_read; + pages[nr] = pfn_to_page(pfn); + } + } else { + nr = get_user_pages( +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + current, current->mm, +#endif + start & PAGE_MASK, npages, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + 1, 0, +#else + FOLL_WRITE, +#endif + pages, NULL); + if (nr < npages) + goto error_up_read; + } + up_read(¤t->mm->mmap_sem); + + ret = sg_alloc_table_from_pages(sgt, pages, npages, + start & ~PAGE_MASK, attach->len, + GFP_KERNEL); + if (ret < 0) + goto error; + + attach->sgt = sgt; + attach->pages = pages; + attach->npages = npages; + + return 0; + +error_up_read: + up_read(¤t->mm->mmap_sem); +error: + if (!attach->vma_is_io) + while (nr > 0) + put_page(pages[--nr]); + + if (array_size <= PAGE_SIZE) + kfree(pages); + else + vfree(pages); +free_sgt: + kfree(sgt); + + dev_err(attach->dev, "failed to get userpages:%d\n", ret); + + return ret; +} + +static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach) +{ + if (!attach || !attach->userptr || !attach->sgt) + return; + + if (!attach->vma_is_io) { + int i = attach->npages; + + while (--i >= 0) { + set_page_dirty_lock(attach->pages[i]); + put_page(attach->pages[i]); + } + } + + if (is_vmalloc_addr(attach->pages)) + vfree(attach->pages); + else + kfree(attach->pages); + + sg_free_table(attach->sgt); + kfree(attach->sgt); + attach->sgt = NULL; +} + +static int ipu_dma_buf_attach(struct dma_buf *dbuf, struct device *dev, + struct dma_buf_attachment *attach) +{ + struct ipu_psys_kbuffer *kbuf = dbuf->priv; + struct ipu_dma_buf_attach *ipu_attach; + + ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL); + if (!ipu_attach) + return -ENOMEM; + + ipu_attach->dev = dev; + ipu_attach->len = kbuf->len; + ipu_attach->userptr = kbuf->userptr; + + attach->priv = ipu_attach; + return 0; +} + +static void ipu_dma_buf_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *attach) +{ + struct ipu_dma_buf_attach *ipu_attach = attach->priv; + + kfree(ipu_attach); + attach->priv = NULL; +} + +static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct ipu_dma_buf_attach *ipu_attach = attach->priv; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + DEFINE_DMA_ATTRS(attrs); +#else + unsigned long attrs; +#endif + int ret; + + ret = ipu_psys_get_userpages(ipu_attach); + if (ret) + return NULL; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl, + ipu_attach->sgt->orig_nents, dir, &attrs); +#else + attrs = DMA_ATTR_SKIP_CPU_SYNC; + ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl, + ipu_attach->sgt->orig_nents, dir, attrs); +#endif + if (ret < ipu_attach->sgt->orig_nents) { + ipu_psys_put_userpages(ipu_attach); + dev_dbg(attach->dev, "buf map failed\n"); + + return ERR_PTR(-EIO); + } + + /* + * Initial cache flush to avoid writing dirty pages for buffers which + * are later marked as IPU_BUFFER_FLAG_NO_FLUSH. + */ + dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl, + ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL); + + return ipu_attach->sgt; +} + +static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sg, enum dma_data_direction dir) +{ + struct ipu_dma_buf_attach *ipu_attach = attach->priv; + + dma_unmap_sg(attach->dev, sg->sgl, sg->orig_nents, dir); + ipu_psys_put_userpages(ipu_attach); +} + +static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) +{ + return -ENOTTY; +} + +static void *ipu_dma_buf_kmap(struct dma_buf *dbuf, unsigned long pgnum) +{ + return NULL; +} + +static void *ipu_dma_buf_kmap_atomic(struct dma_buf *dbuf, unsigned long pgnum) +{ + return NULL; +} + +static void ipu_dma_buf_release(struct dma_buf *buf) +{ + struct ipu_psys_kbuffer *kbuf = buf->priv; + + if (!kbuf) + return; + + dev_dbg(&kbuf->psys->adev->dev, "releasing buffer %d\n", kbuf->fd); + + if (kbuf->db_attach) + ipu_psys_put_userpages(kbuf->db_attach->priv); + kfree(kbuf); +} + +static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) + size_t start, size_t len, +#endif + enum dma_data_direction dir) +{ + return -ENOTTY; +} + +static void *ipu_dma_buf_vmap(struct dma_buf *dmabuf) +{ + struct dma_buf_attachment *attach; + struct ipu_dma_buf_attach *ipu_attach; + + if (list_empty(&dmabuf->attachments)) + return NULL; + + attach = list_last_entry(&dmabuf->attachments, + struct dma_buf_attachment, node); + ipu_attach = attach->priv; + + if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages) + return NULL; + + return vm_map_ram(ipu_attach->pages, + ipu_attach->npages, 0, PAGE_KERNEL); +} + +static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct dma_buf_attachment *attach; + struct ipu_dma_buf_attach *ipu_attach; + + if (WARN_ON(list_empty(&dmabuf->attachments))) + return; + + attach = list_last_entry(&dmabuf->attachments, + struct dma_buf_attachment, node); + ipu_attach = attach->priv; + + if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)) + return; + + vm_unmap_ram(vaddr, ipu_attach->npages); +} + +static struct dma_buf_ops ipu_dma_buf_ops = { + .attach = ipu_dma_buf_attach, + .detach = ipu_dma_buf_detach, + .map_dma_buf = ipu_dma_buf_map, + .unmap_dma_buf = ipu_dma_buf_unmap, + .release = ipu_dma_buf_release, + .begin_cpu_access = ipu_dma_buf_begin_cpu_access, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) + .kmap = ipu_dma_buf_kmap, + .kmap_atomic = ipu_dma_buf_kmap_atomic, +#else + .map = ipu_dma_buf_kmap, + .map_atomic = ipu_dma_buf_kmap_atomic, +#endif + .mmap = ipu_dma_buf_mmap, + .vmap = ipu_dma_buf_vmap, + .vunmap = ipu_dma_buf_vunmap, +}; + +static int ipu_psys_open(struct inode *inode, struct file *file) +{ + struct ipu_psys *psys = inode_to_ipu_psys(inode); + struct ipu_device *isp = psys->adev->isp; + struct ipu_psys_fh *fh; + struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp; + int i, rval; + + if (isp->flr_done) + return -EIO; + + rval = ipu_buttress_authenticate(isp); + if (rval) { + dev_err(&psys->adev->dev, "FW authentication failed\n"); + return rval; + } + + pm_runtime_use_autosuspend(&psys->adev->dev); + + fh = kzalloc(sizeof(*fh), GFP_KERNEL); + if (!fh) + return -ENOMEM; + + mutex_init(&fh->mutex); + INIT_LIST_HEAD(&fh->bufmap); + for (i = 0; i < IPU_PSYS_CMD_PRIORITY_NUM; i++) + INIT_LIST_HEAD(&fh->kcmds[i]); + + init_waitqueue_head(&fh->wait); + + mutex_init(&fh->bs_mutex); + INIT_LIST_HEAD(&fh->buf_sets); + + /* allocate and map memory for buf_sets */ + for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) { + kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL); + if (!kbuf_set) + goto out_free_buf_sets; + kbuf_set->kaddr = dma_alloc_attrs(&psys->adev->dev, + IPU_PSYS_BUF_SET_MAX_SIZE, + &kbuf_set->dma_addr, + GFP_KERNEL, + DMA_ATTR_NON_CONSISTENT); + if (!kbuf_set->kaddr) { + kfree(kbuf_set); + goto out_free_buf_sets; + } + kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE; + list_add(&kbuf_set->list, &fh->buf_sets); + } + + fh->psys = psys; + file->private_data = fh; + + mutex_lock(&psys->mutex); + list_add_tail(&fh->list, &psys->fhs); + mutex_unlock(&psys->mutex); + + return 0; + +out_free_buf_sets: + list_for_each_entry_safe(kbuf_set, kbuf_set_tmp, &fh->buf_sets, list) { + dma_free_attrs(&psys->adev->dev, + kbuf_set->size, kbuf_set->kaddr, + kbuf_set->dma_addr, DMA_ATTR_NON_CONSISTENT); + list_del(&kbuf_set->list); + kfree(kbuf_set); + } + mutex_destroy(&fh->bs_mutex); + mutex_destroy(&fh->mutex); + kfree(fh); + return -ENOMEM; +} + +static int ipu_psys_release(struct inode *inode, struct file *file) +{ + struct ipu_psys *psys = inode_to_ipu_psys(inode); + struct ipu_psys_fh *fh = file->private_data; + struct ipu_psys_kbuffer *kbuf, *kbuf0; + struct ipu_psys_kcmd *kcmd, *kcmd0; + struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0; + int p; + + mutex_lock(&psys->mutex); + mutex_lock(&fh->mutex); + + /* + * Set pg_user to NULL so that completed kcmds don't write + * their result to user space anymore. + */ + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) + list_for_each_entry(kcmd, &fh->kcmds[p], list) + kcmd->pg_user = NULL; + + /* Prevent scheduler from running more kcmds */ + memset(fh->new_kcmd_tail, 0, sizeof(fh->new_kcmd_tail)); + + /* Wait until kcmds are completed in this queue and free them */ + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + fh->new_kcmd_tail[p] = NULL; + list_for_each_entry_safe(kcmd, kcmd0, &fh->kcmds[p], list) { + ipu_psys_kcmd_abort(psys, kcmd, -EIO); + ipu_psys_kcmd_free(kcmd); + } + } + + mutex_lock(&fh->bs_mutex); + list_for_each_entry_safe(kbuf_set, kbuf_set0, &fh->buf_sets, list) { + dma_free_attrs(&psys->adev->dev, + kbuf_set->size, kbuf_set->kaddr, + kbuf_set->dma_addr, DMA_ATTR_NON_CONSISTENT); + list_del(&kbuf_set->list); + kfree(kbuf_set); + } + mutex_unlock(&fh->bs_mutex); + + /* clean up buffers */ + list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) { + list_del(&kbuf->list); + /* Unmap and release buffers */ + if (kbuf->dbuf && kbuf->db_attach) { + struct dma_buf *dbuf; + + dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr); + dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + dbuf = kbuf->dbuf; + kbuf->dbuf = NULL; + kbuf->db_attach = NULL; + dma_buf_put(dbuf); + } else { + if (kbuf->db_attach) + ipu_psys_put_userpages(kbuf->db_attach->priv); + kfree(kbuf); + } + } + + list_del(&fh->list); + + /* disable runtime autosuspend for the last fh */ + if (list_empty(&psys->fhs)) + pm_runtime_dont_use_autosuspend(&psys->adev->dev); + + mutex_unlock(&fh->mutex); + mutex_unlock(&psys->mutex); + + mutex_destroy(&fh->bs_mutex); + mutex_destroy(&fh->mutex); + kfree(fh); + + return 0; +} + +static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) +{ + struct ipu_psys_kbuffer *kbuf; + struct ipu_psys *psys = fh->psys; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); +#endif + struct dma_buf *dbuf; + int ret; + + if (!buf->base.userptr) { + dev_err(&psys->adev->dev, "Buffer allocation not supported\n"); + return -EINVAL; + } + + kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + kbuf->len = buf->len; + kbuf->userptr = buf->base.userptr; + kbuf->flags = buf->flags; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + exp_info.ops = &ipu_dma_buf_ops; + exp_info.size = kbuf->len; + exp_info.flags = O_RDWR; + exp_info.priv = kbuf; + + dbuf = dma_buf_export(&exp_info); +#else + dbuf = dma_buf_export(kbuf, &ipu_dma_buf_ops, kbuf->len, 0); +#endif + if (IS_ERR(dbuf)) { + kfree(kbuf); + return PTR_ERR(dbuf); + } + + ret = dma_buf_fd(dbuf, 0); + if (ret < 0) { + kfree(kbuf); + return ret; + } + + dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p", buf->base.userptr); + + kbuf->fd = ret; + buf->base.fd = ret; + kbuf->psys = psys; + kbuf->fh = fh; + kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; + kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; + + mutex_lock(&fh->mutex); + list_add_tail(&kbuf->list, &fh->bufmap); + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, "to %d\n", buf->base.fd); + + return 0; +} + +static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh) +{ + return 0; +} + +static struct ipu_psys_kcmd *__ipu_get_completed_kcmd(struct ipu_psys *psys, + struct ipu_psys_fh *fh) +{ + int p; + + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + struct ipu_psys_kcmd *kcmd; + + if (list_empty(&fh->kcmds[p])) + continue; + kcmd = list_first_entry(&fh->kcmds[p], + struct ipu_psys_kcmd, list); + if (kcmd->state != KCMD_STATE_COMPLETE) + continue; + /* Found a kcmd in completed state */ + return kcmd; + } + + return NULL; +} + +static struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys *psys, + struct ipu_psys_fh *fh) +{ + struct ipu_psys_kcmd *kcmd; + + mutex_lock(&fh->mutex); + kcmd = __ipu_get_completed_kcmd(psys, fh); + mutex_unlock(&fh->mutex); + + return kcmd; +} + +static long ipu_ioctl_dqevent(struct ipu_psys_event *event, + struct ipu_psys_fh *fh, unsigned int f_flags) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd = NULL; + int rval; + + dev_dbg(&psys->adev->dev, "IOC_DQEVENT\n"); + + if (!(f_flags & O_NONBLOCK)) { + rval = wait_event_interruptible(fh->wait, + (kcmd = + ipu_get_completed_kcmd(psys, + fh))); + if (rval == -ERESTARTSYS) + return rval; + } + + mutex_lock(&fh->mutex); + if (!kcmd) { + kcmd = __ipu_get_completed_kcmd(psys, fh); + if (!kcmd) { + mutex_unlock(&fh->mutex); + return -ENODATA; + } + } + + *event = kcmd->ev; + ipu_psys_kcmd_free(kcmd); + mutex_unlock(&fh->mutex); + + return 0; +} + +static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kbuffer *kbuf; + struct dma_buf *dbuf; + int ret; + + mutex_lock(&fh->mutex); + kbuf = ipu_psys_lookup_kbuffer(fh, fd); + + if (!kbuf) { + /* This fd isn't generated by ipu_psys_getbuf, it + * is a new fd. Create a new kbuf item for this fd, and + * add this kbuf to bufmap list. + */ + kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); + if (!kbuf) { + mutex_unlock(&fh->mutex); + return -ENOMEM; + } + + kbuf->psys = psys; + kbuf->fh = fh; + list_add_tail(&kbuf->list, &fh->bufmap); + } + + if (kbuf->sgt) { + dev_dbg(&psys->adev->dev, "has been mapped!\n"); + goto mapbuf_end; + } + + kbuf->dbuf = dma_buf_get(fd); + if (IS_ERR(kbuf->dbuf)) { + if (!kbuf->userptr) { + list_del(&kbuf->list); + kfree(kbuf); + } + mutex_unlock(&fh->mutex); + return -EINVAL; + } + + if (kbuf->len == 0) + kbuf->len = kbuf->dbuf->size; + + kbuf->fd = fd; + + kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev); + if (IS_ERR(kbuf->db_attach)) { + ret = PTR_ERR(kbuf->db_attach); + goto error_put; + } + + kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kbuf->sgt)) { + ret = -EINVAL; + kbuf->sgt = NULL; + dev_dbg(&psys->adev->dev, "map attachment failed\n"); + goto error_detach; + } + + kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); + + kbuf->kaddr = dma_buf_vmap(kbuf->dbuf); + if (!kbuf->kaddr) { + ret = -EINVAL; + goto error_unmap; + } + +mapbuf_end: + + kbuf->valid = true; + + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, "IOC_MAPBUF: mapped fd %d\n", fd); + + return 0; + +error_unmap: + dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); +error_detach: + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + kbuf->db_attach = NULL; +error_put: + list_del(&kbuf->list); + dbuf = kbuf->dbuf; + + if (!kbuf->userptr) + kfree(kbuf); + + mutex_unlock(&fh->mutex); + dma_buf_put(dbuf); + + return ret; +} + +static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh) +{ + struct ipu_psys_kbuffer *kbuf; + struct ipu_psys *psys = fh->psys; + struct dma_buf *dmabuf; + + mutex_lock(&fh->mutex); + kbuf = ipu_psys_lookup_kbuffer(fh, fd); + if (!kbuf) { + dev_dbg(&psys->adev->dev, "buffer %d not found\n", fd); + mutex_unlock(&fh->mutex); + return -EINVAL; + } + + /* From now on it is not safe to use this kbuffer */ + kbuf->valid = false; + + dma_buf_vunmap(kbuf->dbuf, kbuf->kaddr); + dma_buf_unmap_attachment(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); + + dma_buf_detach(kbuf->dbuf, kbuf->db_attach); + + dmabuf = kbuf->dbuf; + + kbuf->db_attach = NULL; + kbuf->dbuf = NULL; + + list_del(&kbuf->list); + + if (!kbuf->userptr) + kfree(kbuf); + + mutex_unlock(&fh->mutex); + dma_buf_put(dmabuf); + + dev_dbg(&psys->adev->dev, "IOC_UNMAPBUF: fd %d\n", fd); + + return 0; +} + +static unsigned int ipu_psys_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct ipu_psys_fh *fh = file->private_data; + struct ipu_psys *psys = fh->psys; + unsigned int res = 0; + + dev_dbg(&psys->adev->dev, "ipu psys poll\n"); + + poll_wait(file, &fh->wait, wait); + + if (ipu_get_completed_kcmd(psys, fh)) + res = POLLIN; + + dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res); + + return res; +} + +static long ipu_get_manifest(struct ipu_psys_manifest *manifest, + struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_device *isp = psys->adev->isp; + struct ipu_cpd_client_pkg_hdr *client_pkg; + u32 entries; + void *host_fw_data; + dma_addr_t dma_fw_data; + u32 client_pkg_offset; + + host_fw_data = (void *)isp->cpd_fw->data; + dma_fw_data = sg_dma_address(psys->fw_sgt.sgl); + + entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); + if (!manifest || manifest->index > entries - 1) { + dev_err(&psys->adev->dev, "invalid argument\n"); + return -EINVAL; + } + + if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) || + ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) < + IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) { + dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n"); + return -ENOENT; + } + + client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir, + manifest->index); + client_pkg_offset -= dma_fw_data; + + client_pkg = host_fw_data + client_pkg_offset; + manifest->size = client_pkg->pg_manifest_size; + + if (!manifest->manifest) + return 0; + + if (copy_to_user(manifest->manifest, + (uint8_t *) client_pkg + client_pkg->pg_manifest_offs, + manifest->size)) { + return -EFAULT; + } + + return 0; +} + +static long ipu_psys_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + union { + struct ipu_psys_buffer buf; + struct ipu_psys_command cmd; + struct ipu_psys_event ev; + struct ipu_psys_capability caps; + struct ipu_psys_manifest m; + } karg; + struct ipu_psys_fh *fh = file->private_data; + int err = 0; + void __user *up = (void __user *)arg; + bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF); + + if (copy) { + if (_IOC_SIZE(cmd) > sizeof(karg)) + return -ENOTTY; + + if (_IOC_DIR(cmd) & _IOC_WRITE) { + err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + } + } + + switch (cmd) { + case IPU_IOC_MAPBUF: + err = ipu_psys_mapbuf(arg, fh); + break; + case IPU_IOC_UNMAPBUF: + err = ipu_psys_unmapbuf(arg, fh); + break; + case IPU_IOC_QUERYCAP: + karg.caps = caps; + break; + case IPU_IOC_GETBUF: + err = ipu_psys_getbuf(&karg.buf, fh); + break; + case IPU_IOC_PUTBUF: + err = ipu_psys_putbuf(&karg.buf, fh); + break; + case IPU_IOC_QCMD: + err = ipu_psys_kcmd_new(&karg.cmd, fh); + break; + case IPU_IOC_DQEVENT: + err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags); + break; + case IPU_IOC_GET_MANIFEST: + err = ipu_get_manifest(&karg.m, fh); + break; + default: + err = -ENOTTY; + break; + } + + if (err) + return err; + + if (copy && _IOC_DIR(cmd) & _IOC_READ) + if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) + return -EFAULT; + + return 0; +} + +static const struct file_operations ipu_psys_fops = { + .open = ipu_psys_open, + .release = ipu_psys_release, + .unlocked_ioctl = ipu_psys_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ipu_psys_compat_ioctl32, +#endif + .poll = ipu_psys_poll, + .owner = THIS_MODULE, +}; + +static void ipu_psys_dev_release(struct device *dev) +{ +} + +#ifdef CONFIG_PM +static int psys_runtime_pm_resume(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int retval; + + if (!psys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + /* + * In runtime autosuspend mode, if the psys is in power on state, no + * need to resume again. + */ + spin_lock_irqsave(&psys->power_lock, flags); + if (psys->power) { + spin_unlock_irqrestore(&psys->power_lock, flags); + return 0; + } + spin_unlock_irqrestore(&psys->power_lock, flags); + + if (async_fw_init && !psys->fwcom) { + dev_err(dev, + "%s: asynchronous firmware init not finished, skipping\n", + __func__); + return 0; + } + + if (!ipu_buttress_auth_done(adev->isp)) { + dev_err(dev, "%s: not yet authenticated, skipping\n", __func__); + return 0; + } + + ipu_psys_setup_hw(psys); + + ipu_trace_restore(&psys->adev->dev); + + ipu_configure_spc(adev->isp, + &psys->pdata->ipdata->hw_variant, + IPU_CPD_PKG_DIR_PSYS_SERVER_IDX, + psys->pdata->base, psys->pkg_dir, + psys->pkg_dir_dma_addr); + + retval = ipu_fw_psys_open(psys); + if (retval) { + dev_err(&psys->adev->dev, "Failed to open abi.\n"); + return retval; + } + + spin_lock_irqsave(&psys->power_lock, flags); + psys->power = 1; + spin_unlock_irqrestore(&psys->power_lock, flags); + + return 0; +} + +static int psys_runtime_pm_suspend(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + unsigned long flags; + int rval; + + if (!psys) { + WARN(1, "%s called before probing. skipping.\n", __func__); + return 0; + } + + if (!psys->power) + return 0; + + spin_lock_irqsave(&psys->power_lock, flags); + psys->power = 0; + spin_unlock_irqrestore(&psys->power_lock, flags); + + /* + * We can trace failure but better to not return an error. + * At suspend we are progressing towards psys power gated state. + * Any hang / failure inside psys will be forgotten soon. + */ + rval = ipu_fw_psys_close(psys); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + return 0; +} + +static const struct dev_pm_ops psys_pm_ops = { + .runtime_suspend = psys_runtime_pm_suspend, + .runtime_resume = psys_runtime_pm_resume, +}; + +#define PSYS_PM_OPS (&psys_pm_ops) +#else +#define PSYS_PM_OPS NULL +#endif + +static int cpd_fw_reload(struct ipu_device *isp) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys); + int rval; + + if (!isp->secure_mode) { + dev_warn(&isp->pdev->dev, + "CPD firmware reload was only supported for secure mode.\n"); + return -EINVAL; + } + + if (isp->cpd_fw) { + ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir, + psys->pkg_dir_dma_addr, + psys->pkg_dir_size); + + ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt); + release_firmware(isp->cpd_fw); + isp->cpd_fw = NULL; + dev_info(&isp->pdev->dev, "Old FW removed\n"); + } + + rval = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, + &isp->pdev->dev); + if (rval) { + dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n", + IPU_CPD_FIRMWARE_NAME); + return rval; + } + + rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (rval) { + dev_err(&isp->pdev->dev, "Failed to validate cpd file\n"); + goto out_release_firmware; + } + + rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt); + if (rval) + goto out_release_firmware; + + psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys, + isp->cpd_fw->data, + sg_dma_address(psys->fw_sgt.sgl), + &psys->pkg_dir_dma_addr, + &psys->pkg_dir_size); + + if (!psys->pkg_dir) { + rval = -EINVAL; + goto out_unmap_fw_image; + } + + isp->pkg_dir = psys->pkg_dir; + isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr; + isp->pkg_dir_size = psys->pkg_dir_size; + + if (!isp->secure_mode) + return 0; + + rval = ipu_fw_authenticate(isp, 1); + if (rval) + goto out_free_pkg_dir; + + return 0; + +out_free_pkg_dir: + ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir, + psys->pkg_dir_dma_addr, psys->pkg_dir_size); +out_unmap_fw_image: + ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt); +out_release_firmware: + release_firmware(isp->cpd_fw); + isp->cpd_fw = NULL; + + return rval; +} + +static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val) +{ + struct ipu_psys *psys = data; + + *val = psys->icache_prefetch_sp; + return 0; +} + +static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val) +{ + struct ipu_psys *psys = data; + + if (val != !!val) + return -EINVAL; + + psys->icache_prefetch_sp = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops, + ipu_psys_icache_prefetch_sp_get, + ipu_psys_icache_prefetch_sp_set, "%llu\n"); + +static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val) +{ + struct ipu_psys *psys = data; + + *val = psys->icache_prefetch_isp; + return 0; +} + +static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val) +{ + struct ipu_psys *psys = data; + + if (val != !!val) + return -EINVAL; + + psys->icache_prefetch_isp = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops, + ipu_psys_icache_prefetch_isp_get, + ipu_psys_icache_prefetch_isp_set, "%llu\n"); + +static int ipu_psys_init_debugfs(struct ipu_psys *psys) +{ + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir); + if (IS_ERR(dir)) + return -ENOMEM; + + file = debugfs_create_file("icache_prefetch_sp", 0600, + dir, psys, &psys_icache_prefetch_sp_fops); + if (IS_ERR(file)) + goto err; + + file = debugfs_create_file("icache_prefetch_isp", 0600, + dir, psys, &psys_icache_prefetch_isp_fops); + if (IS_ERR(file)) + goto err; + + psys->debugfsdir = dir; + +#ifdef IPU_PSYS_GPC + if (ipu_psys_gpc_init_debugfs(psys)) + return -ENOMEM; +#endif + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +} + +static int ipu_psys_sched_cmd(void *ptr) +{ + struct ipu_psys *psys = ptr; + size_t pending = 0; + + while (1) { + wait_event_interruptible(psys->sched_cmd_wq, + (kthread_should_stop() || (pending = + atomic_read(&psys->wakeup_sched_thread_count)))); + + if (kthread_should_stop()) + break; + + if (pending == 0) + continue; + + mutex_lock(&psys->mutex); + atomic_set(&psys->wakeup_sched_thread_count, 0); + ipu_psys_run_next(psys); + mutex_unlock(&psys->mutex); + } + + return 0; +} + +static void start_sp(struct ipu_bus_device *adev) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + void __iomem *spc_regs_base = psys->pdata->base + + psys->pdata->ipdata->hw_variant.spc_offset; + u32 val = 0; + + val |= IPU_ISYS_SPC_STATUS_START | + IPU_ISYS_SPC_STATUS_RUN | + IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + val |= psys->icache_prefetch_sp ? + IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0; + writel(val, spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); +} + +static int query_sp(struct ipu_bus_device *adev) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + void __iomem *spc_regs_base = psys->pdata->base + + psys->pdata->ipdata->hw_variant.spc_offset; + u32 val = readl(spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL); + + /* return true when READY == 1, START == 0 */ + val &= IPU_ISYS_SPC_STATUS_READY | IPU_ISYS_SPC_STATUS_START; + + return val == IPU_ISYS_SPC_STATUS_READY; +} + +static int ipu_psys_fw_init(struct ipu_psys *psys) +{ + struct ipu_fw_syscom_queue_config + fw_psys_cmd_queue_cfg[IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID]; + struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = { + { + IPU_FW_PSYS_EVENT_QUEUE_SIZE, + sizeof(struct ipu_fw_psys_event) + } + }; + struct ipu_fw_psys_srv_init server_init = { + .ddr_pkg_dir_address = 0, + .host_ddr_pkg_dir = NULL, + .pkg_dir_size = 0, + .icache_prefetch_sp = psys->icache_prefetch_sp, + .icache_prefetch_isp = psys->icache_prefetch_isp, + }; + struct ipu_fw_com_cfg fwcom = { + .num_input_queues = IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID, + .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID, + .output = fw_psys_event_queue_cfg, + .specific_addr = &server_init, + .specific_size = sizeof(server_init), + .cell_start = start_sp, + .cell_ready = query_sp, + }; + int rval, i; + + for (i = 0; i < IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID; i++) { + fw_psys_cmd_queue_cfg[i].queue_size = + IPU_FW_PSYS_CMD_QUEUE_SIZE; + fw_psys_cmd_queue_cfg[i].token_size = + sizeof(struct ipu_fw_psys_cmd); + } + + fwcom.input = fw_psys_cmd_queue_cfg; + + fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset; + + rval = ipu_buttress_authenticate(psys->adev->isp); + if (rval) { + dev_err(&psys->adev->dev, "FW authentication failed(%d)\n", + rval); + return rval; + } + + psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base); + if (!psys->fwcom) { + dev_err(&psys->adev->dev, "psys fw com prepare failed\n"); + return -EIO; + } + + return 0; +} + +static void run_fw_init_work(struct work_struct *work) +{ + struct fw_init_task *task = (struct fw_init_task *)work; + struct ipu_psys *psys = task->psys; + int rval; + + rval = ipu_psys_fw_init(psys); + + if (rval) { + dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval); + ipu_psys_remove(psys->adev); + } else { + dev_info(&psys->adev->dev, "FW init done\n"); + } +} + +static int ipu_psys_probe(struct ipu_bus_device *adev) +{ + struct ipu_mmu *mmu = dev_get_drvdata(adev->iommu); + struct ipu_device *isp = adev->isp; + struct ipu_psys_pg *kpg, *kpg0; + struct ipu_psys *psys; + const struct firmware *fw; + unsigned int minor; + int i, rval = -E2BIG; + + trace_printk("B|%d|TMWK\n", current->pid); + + /* Has the domain been attached? */ + if (!mmu) { + trace_printk("E|TMWK\n"); + return -EPROBE_DEFER; + } + + mutex_lock(&ipu_psys_mutex); + + minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0); + if (minor == IPU_PSYS_NUM_DEVICES) { + dev_err(&adev->dev, "too many devices\n"); + goto out_unlock; + } + + psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL); + if (!psys) { + rval = -ENOMEM; + goto out_unlock; + } + + psys->adev = adev; + psys->pdata = adev->pdata; +#ifdef CONFIG_VIDEO_INTEL_IPU4 + psys->icache_prefetch_sp = is_ipu_hw_bxtp_e0(isp); +#else + psys->icache_prefetch_sp = 0; +#endif + + ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev, + psys_trace_blocks); + + cdev_init(&psys->cdev, &ipu_psys_fops); + psys->cdev.owner = ipu_psys_fops.owner; + + rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1); + if (rval) { + dev_err(&adev->dev, "cdev_add failed (%d)\n", rval); + goto out_unlock; + } + + set_bit(minor, ipu_psys_devices); + + spin_lock_init(&psys->power_lock); + spin_lock_init(&psys->pgs_lock); + psys->power = 0; + psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; + + mutex_init(&psys->mutex); + INIT_LIST_HEAD(&psys->fhs); + INIT_LIST_HEAD(&psys->pgs); + INIT_LIST_HEAD(&psys->started_kcmds_list); + INIT_WORK(&psys->watchdog_work, ipu_psys_watchdog_work); + + init_waitqueue_head(&psys->sched_cmd_wq); + atomic_set(&psys->wakeup_sched_thread_count, 0); + /* + * Create a thread to schedule commands sent to IPU firmware. + * The thread reduces the coupling between the command scheduler + * and queueing commands from the user to driver. + */ + psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys, + "psys_sched_cmd"); + + if (IS_ERR(psys->sched_cmd_thread)) { + psys->sched_cmd_thread = NULL; + mutex_destroy(&psys->mutex); + goto out_unlock; + } + + ipu_bus_set_drvdata(adev, psys); + + rval = ipu_psys_resource_pool_init(&psys->resource_pool_started); + if (rval < 0) { + dev_err(&psys->dev, + "unable to alloc process group resources\n"); + goto out_mutex_destroy; + } + + rval = ipu_psys_resource_pool_init(&psys->resource_pool_running); + if (rval < 0) { + dev_err(&psys->dev, + "unable to alloc process group resources\n"); + goto out_resources_started_free; + } + + fw = adev->isp->cpd_fw; + + rval = ipu_buttress_map_fw_image(adev, fw, &psys->fw_sgt); + if (rval) + goto out_resources_running_free; + + psys->pkg_dir = ipu_cpd_create_pkg_dir(adev, fw->data, + sg_dma_address(psys->fw_sgt.sgl), + &psys->pkg_dir_dma_addr, + &psys->pkg_dir_size); + if (!psys->pkg_dir) { + rval = -ENOMEM; + goto out_unmap_fw_image; + } + + /* allocate and map memory for process groups */ + for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) { + kpg = kzalloc(sizeof(*kpg), GFP_KERNEL); + if (!kpg) + goto out_free_pgs; + kpg->pg = dma_alloc_attrs(&adev->dev, + IPU_PSYS_PG_MAX_SIZE, + &kpg->pg_dma_addr, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); + if (!kpg->pg) { + kfree(kpg); + goto out_free_pgs; + } + kpg->size = IPU_PSYS_PG_MAX_SIZE; + list_add(&kpg->list, &psys->pgs); + } + + isp->pkg_dir = psys->pkg_dir; + isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr; + isp->pkg_dir_size = psys->pkg_dir_size; + + caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir); + + dev_info(&adev->dev, "pkg_dir entry count:%d\n", caps.pg_count); + if (async_fw_init) { + INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task, + run_fw_init_work); + fw_init_task.psys = psys; + schedule_delayed_work((struct delayed_work *)&fw_init_task, 0); + } else { + rval = ipu_psys_fw_init(psys); + } + + if (rval) { + dev_err(&adev->dev, "FW init failed(%d)\n", rval); + goto out_free_pgs; + } + + psys->dev.parent = &adev->dev; + psys->dev.bus = &ipu_psys_bus; + psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor); + psys->dev.release = ipu_psys_dev_release; + dev_set_name(&psys->dev, "ipu-psys%d", minor); + rval = device_register(&psys->dev); + if (rval < 0) { + dev_err(&psys->dev, "psys device_register failed\n"); + goto out_release_fw_com; + } + + /* Add the hw stepping information to caps */ + strlcpy(caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME, + sizeof(caps.dev_model)); + + pm_runtime_allow(&adev->dev); + pm_runtime_enable(&adev->dev); + + pm_runtime_set_autosuspend_delay(&psys->adev->dev, + IPU_PSYS_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&psys->adev->dev); + pm_runtime_mark_last_busy(&psys->adev->dev); + + mutex_unlock(&ipu_psys_mutex); + + /* Debug fs failure is not fatal. */ + ipu_psys_init_debugfs(psys); + + adev->isp->cpd_fw_reload = &cpd_fw_reload; + + dev_info(&adev->dev, "psys probe minor: %d\n", minor); + + trace_printk("E|TMWK\n"); + return 0; + +out_release_fw_com: + ipu_fw_com_release(psys->fwcom, 1); +out_free_pgs: + list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { + dma_free_attrs(&adev->dev, kpg->size, kpg->pg, + kpg->pg_dma_addr, DMA_ATTR_NON_CONSISTENT); + kfree(kpg); + } + + if (!isp->secure_mode) + ipu_cpd_free_pkg_dir(adev, psys->pkg_dir, + psys->pkg_dir_dma_addr, + psys->pkg_dir_size); +out_unmap_fw_image: + ipu_buttress_unmap_fw_image(adev, &psys->fw_sgt); +out_resources_running_free: + ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); +out_resources_started_free: + ipu_psys_resource_pool_cleanup(&psys->resource_pool_started); +out_mutex_destroy: + mutex_destroy(&psys->mutex); + cdev_del(&psys->cdev); + if (psys->sched_cmd_thread) { + kthread_stop(psys->sched_cmd_thread); + psys->sched_cmd_thread = NULL; + } +out_unlock: + /* Safe to call even if the init is not called */ + ipu_trace_uninit(&adev->dev); + mutex_unlock(&ipu_psys_mutex); + + trace_printk("E|TMWK\n"); + return rval; +} + +static void ipu_psys_remove(struct ipu_bus_device *adev) +{ + struct ipu_device *isp = adev->isp; + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + struct ipu_psys_pg *kpg, *kpg0; + + if (isp->ipu_dir) + debugfs_remove_recursive(psys->debugfsdir); + + flush_workqueue(IPU_PSYS_WORK_QUEUE); + + if (psys->sched_cmd_thread) { + kthread_stop(psys->sched_cmd_thread); + psys->sched_cmd_thread = NULL; + } + + pm_runtime_dont_use_autosuspend(&psys->adev->dev); + + mutex_lock(&ipu_psys_mutex); + + list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) { + dma_free_attrs(&adev->dev, kpg->size, kpg->pg, + kpg->pg_dma_addr, DMA_ATTR_NON_CONSISTENT); + kfree(kpg); + } + + if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1)) + dev_err(&adev->dev, "fw com release failed.\n"); + + isp->pkg_dir = NULL; + isp->pkg_dir_dma_addr = 0; + isp->pkg_dir_size = 0; + + ipu_cpd_free_pkg_dir(adev, psys->pkg_dir, + psys->pkg_dir_dma_addr, psys->pkg_dir_size); + + ipu_buttress_unmap_fw_image(adev, &psys->fw_sgt); + + kfree(psys->server_init); + kfree(psys->syscom_config); + + ipu_trace_uninit(&adev->dev); + + ipu_psys_resource_pool_cleanup(&psys->resource_pool_started); + ipu_psys_resource_pool_cleanup(&psys->resource_pool_running); + + device_unregister(&psys->dev); + + clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices); + cdev_del(&psys->cdev); + + mutex_unlock(&ipu_psys_mutex); + + mutex_destroy(&psys->mutex); + + dev_info(&adev->dev, "removed\n"); +} + +static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev) +{ + struct ipu_psys *psys = ipu_bus_get_drvdata(adev); + void __iomem *base = psys->pdata->base; + u32 status; + int r; + + mutex_lock(&psys->mutex); +#ifdef CONFIG_PM + if (!READ_ONCE(psys->power)) { + mutex_unlock(&psys->mutex); + return IRQ_NONE; + } + + r = pm_runtime_get_sync(&psys->adev->dev); + if (r < 0) { + pm_runtime_put(&psys->adev->dev); + mutex_unlock(&psys->mutex); + return IRQ_NONE; + } +#endif + + status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS); + writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); + + if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) { + writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); + ipu_psys_handle_events(psys); + } + + pm_runtime_mark_last_busy(&psys->adev->dev); + pm_runtime_put_autosuspend(&psys->adev->dev); + mutex_unlock(&psys->mutex); + + return status ? IRQ_HANDLED : IRQ_NONE; +} + + +static struct ipu_bus_driver ipu_psys_driver = { + .probe = ipu_psys_probe, + .remove = ipu_psys_remove, + .isr_threaded = psys_isr_threaded, + .wanted = IPU_PSYS_NAME, + .drv = { + .name = IPU_PSYS_NAME, + .owner = THIS_MODULE, + .pm = PSYS_PM_OPS, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +static int __init ipu_psys_init(void) +{ + int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0, + IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME); + if (rval) { + pr_err("can't alloc psys chrdev region (%d)\n", rval); + return rval; + } + + rval = bus_register(&ipu_psys_bus); + if (rval) { + pr_warn("can't register psys bus (%d)\n", rval); + goto out_bus_register; + } + + ipu_bus_register_driver(&ipu_psys_driver); + + return rval; + +out_bus_register: + unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); + + return rval; +} + +static void __exit ipu_psys_exit(void) +{ + ipu_bus_unregister_driver(&ipu_psys_driver); + bus_unregister(&ipu_psys_bus); + unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES); +} + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +module_init(ipu_psys_init); +module_exit(ipu_psys_exit); + +MODULE_AUTHOR("Antti Laakso "); +MODULE_AUTHOR("Bin Han "); +MODULE_AUTHOR("Renwei Wu "); +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_AUTHOR("Xia Wu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu processing system driver"); diff --git a/drivers/media/pci/intel/ipu-psys.h b/drivers/media/pci/intel/ipu-psys.h new file mode 100644 index 000000000000..762950f4261f --- /dev/null +++ b/drivers/media/pci/intel/ipu-psys.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PSYS_H +#define IPU_PSYS_H + +#include +#include + +#include "ipu.h" +#include "ipu-pdata.h" +#include "ipu-fw-psys.h" + +#include + +#define IPU_PSYS_PG_POOL_SIZE 16 +#define IPU_PSYS_PG_MAX_SIZE 2048 +#define IPU_MAX_PSYS_CMD_BUFFERS 32 +#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS +#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS +#define IPU_PSYS_CLOSE_TIMEOUT_US 50 +#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US) +#define IPU_PSYS_BUF_SET_POOL_SIZE 16 +#define IPU_PSYS_BUF_SET_MAX_SIZE 1024 +#define IPU_PSYS_WORK_QUEUE system_power_efficient_wq +#define IPU_MAX_RESOURCES 32 + +/* Opaque structure. Do not access fields. */ +struct ipu_resource { + u32 id; + int elements; /* Number of elements available to allocation */ + unsigned long *bitmap; /* Allocation bitmap, a bit for each element */ +}; + +enum ipu_resource_type { + IPU_RESOURCE_DEV_CHN = 0, + IPU_RESOURCE_EXT_MEM, + IPU_RESOURCE_DFM +}; + +/* Allocation of resource(s) */ +/* Opaque structure. Do not access fields. */ +struct ipu_resource_alloc { + enum ipu_resource_type type; + struct ipu_resource *resource; + int elements; + int pos; +}; + +/* + * This struct represents all of the currently allocated + * resources from IPU model. It is used also for allocating + * resources for the next set of PGs to be run on IPU + * (ie. those PGs which are not yet being run and which don't + * yet reserve real IPU resources). + */ +#define IPU_PSYS_RESOURCE_OVERALLOC 2 /* Some room for ABI / ext lib delta */ +struct ipu_psys_resource_pool { + u32 cells; /* Bitmask of cells allocated */ + struct ipu_resource dev_channels[IPU_FW_PSYS_N_DEV_CHN_ID + + IPU_PSYS_RESOURCE_OVERALLOC]; + struct ipu_resource ext_memory[IPU_FW_PSYS_N_MEM_ID + + IPU_PSYS_RESOURCE_OVERALLOC]; + struct ipu_resource dfms[IPU_FW_PSYS_N_DEV_DFM_ID + + IPU_PSYS_RESOURCE_OVERALLOC]; +}; + +/* + * This struct keeps book of the resources allocated for a specific PG. + * It is used for freeing up resources from struct ipu_psys_resources + * when the PG is released from IPU4 (or model of IPU4). + */ +struct ipu_psys_resource_alloc { + u32 cells; /* Bitmask of cells needed */ + struct ipu_resource_alloc + resource_alloc[IPU_MAX_RESOURCES]; + int resources; +}; + +struct task_struct; +struct ipu_psys { + struct cdev cdev; + struct device dev; + + struct mutex mutex; /* Psys various */ + int power; + bool icache_prefetch_sp; + bool icache_prefetch_isp; + spinlock_t power_lock; /* Serialize access to power */ + spinlock_t pgs_lock; /* Protect pgs list access */ + struct list_head fhs; + struct list_head pgs; + struct list_head started_kcmds_list; + struct ipu_psys_pdata *pdata; + struct ipu_bus_device *adev; + struct ia_css_syscom_context *dev_ctx; + struct ia_css_syscom_config *syscom_config; + struct ia_css_psys_server_init *server_init; + struct task_struct *sched_cmd_thread; + struct work_struct watchdog_work; + wait_queue_head_t sched_cmd_wq; + atomic_t wakeup_sched_thread_count; + struct dentry *debugfsdir; + + /* Resources needed to be managed for process groups */ + struct ipu_psys_resource_pool resource_pool_running; + struct ipu_psys_resource_pool resource_pool_started; + + const struct firmware *fw; + struct sg_table fw_sgt; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; + unsigned long timeout; + + int active_kcmds, started_kcmds; + void *fwcom; +}; + +struct ipu_psys_fh { + struct ipu_psys *psys; + struct mutex mutex; /* Protects bufmap & kcmds fields */ + struct list_head list; + struct list_head bufmap; + struct list_head kcmds[IPU_PSYS_CMD_PRIORITY_NUM]; + struct ipu_psys_kcmd + *new_kcmd_tail[IPU_PSYS_CMD_PRIORITY_NUM]; + wait_queue_head_t wait; + struct mutex bs_mutex; /* Protects buf_set field */ + struct list_head buf_sets; +}; + + +struct ipu_psys_pg { + struct ipu_fw_psys_process_group *pg; + size_t size; + size_t pg_size; + dma_addr_t pg_dma_addr; + struct list_head list; + struct ipu_psys_resource_alloc resource_alloc; +}; + +enum ipu_psys_cmd_state { + KCMD_STATE_NEW, + KCMD_STATE_START_PREPARED, + KCMD_STATE_STARTED, + KCMD_STATE_RUN_PREPARED, + KCMD_STATE_RUNNING, + KCMD_STATE_COMPLETE +}; + +struct ipu_psys_buffer_set { + struct list_head list; + struct ipu_fw_psys_buffer_set *buf_set; + size_t size; + size_t buf_set_size; + dma_addr_t dma_addr; + void *kaddr; + struct ipu_psys_kcmd *kcmd; +}; + +struct ipu_psys_kcmd { + struct ipu_psys_fh *fh; + struct list_head list; + struct list_head started_list; + enum ipu_psys_cmd_state state; + void *pg_manifest; + size_t pg_manifest_size; + struct ipu_psys_kbuffer **kbufs; + struct ipu_psys_buffer *buffers; + size_t nbuffers; + struct ipu_fw_psys_process_group *pg_user; + struct ipu_psys_pg *kpg; + u64 user_token; + u64 issue_id; + u32 priority; + struct ipu_buttress_constraint constraint; + struct ipu_psys_buffer_set *kbuf_set; + + struct ipu_psys_event ev; + struct timer_list watchdog; +}; + +struct ipu_dma_buf_attach { + struct device *dev; + u64 len; + void *userptr; + struct sg_table *sgt; + bool vma_is_io; + struct page **pages; + size_t npages; +}; + +struct ipu_psys_kbuffer { + u64 len; + void *userptr; + u32 flags; + int fd; + void *kaddr; + struct list_head list; + dma_addr_t dma_addr; + struct sg_table *sgt; + struct dma_buf_attachment *db_attach; + struct dma_buf *dbuf; + struct ipu_psys *psys; + struct ipu_psys_fh *fh; + bool valid; /* True when buffer is usable */ +}; + +#define inode_to_ipu_psys(inode) \ + container_of((inode)->i_cdev, struct ipu_psys, cdev) + + +#ifdef CONFIG_COMPAT +long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +void ipu_psys_setup_hw(struct ipu_psys *psys); +void ipu_psys_handle_events(struct ipu_psys *psys); +int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh); +int ipu_psys_kcmd_queue(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd); +void ipu_psys_kcmd_complete(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, int error); +void ipu_psys_run_next(struct ipu_psys *psys); +void ipu_psys_watchdog_work(struct work_struct *work); +int ipu_psys_kcmd_abort(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, int error); +void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd); +struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size); +struct ipu_psys_kbuffer * +ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd); +struct ipu_psys_kbuffer * +ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr); +#ifdef IPU_PSYS_GPC +int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys); +#endif +int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool); +void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool *pool); + +#endif /* IPU_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu-trace-event.h b/drivers/media/pci/intel/ipu-trace-event.h new file mode 100644 index 000000000000..b5e8d4be42ed --- /dev/null +++ b/drivers/media/pci/intel/ipu-trace-event.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2015 - 2018 Intel Corporation */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipu + +#if !defined(IPU_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ) +#define IPU_EVENT_H + +#include + +#ifdef IPU_SOF_SEQID_TRACE +TRACE_EVENT(ipu_sof_seqid, + TP_PROTO(unsigned int seqid, unsigned int csiport, + unsigned int csivc), + TP_ARGS(seqid, csiport, csivc), + TP_STRUCT__entry(__field(unsigned int, seqid) + __field(unsigned int, csiport) + __field(unsigned int, csivc) + ), + TP_fast_assign(__entry->seqid = seqid; + __entry->csiport = csiport; + __entry->csivc = csivc;), + TP_printk("seqid<%u>,csiport<%u>,csivc<%u>", __entry->seqid, + __entry->csiport, __entry->csivc) + ); +#endif + +#ifdef IPU_EOF_SEQID_TRACE +TRACE_EVENT(ipu_eof_seqid, + TP_PROTO(unsigned int seqid, unsigned int csiport, + unsigned int csivc), + TP_ARGS(seqid, csiport, csivc), + TP_STRUCT__entry(__field(unsigned int, seqid) + __field(unsigned int, csiport) + __field(unsigned int, csivc) + ), + TP_fast_assign(__entry->seqid = seqid; + __entry->csiport = csiport; + __entry->csivc = csivc;), + TP_printk("seqid<%u>,csiport<%u>,csivc<%u>", __entry->seqid, + __entry->csiport, __entry->csivc) + ); +#endif + +#ifdef IPU_PERF_REG_TRACE +TRACE_EVENT(ipu_perf_reg, + TP_PROTO(unsigned int addr, unsigned int val), + TP_ARGS(addr, val), TP_STRUCT__entry(__field(unsigned int, addr) + __field(unsigned int, val) + ), + TP_fast_assign(__entry->addr = addr; + __entry->val = val;), + TP_printk("addr=%u,val=%u", __entry->addr, __entry->val) + ); +#endif + +#ifdef IPU_PG_KCMD_TRACE +TRACE_EVENT(ipu_pg_kcmd, + TP_PROTO(const char *func, unsigned int id, + unsigned long long issue_id, unsigned int pri, + unsigned int pg_id, unsigned int load_cycles, + unsigned int init_cycles, + unsigned int processing_cycles), + TP_ARGS(func, id, issue_id, pri, pg_id, load_cycles, + init_cycles, processing_cycles), + TP_STRUCT__entry(__field(const char *, func) + __field(unsigned int, id) + __field(unsigned long long, issue_id) + __field(unsigned int, pri) + __field(unsigned int, pg_id) + __field(unsigned int, load_cycles) + __field(unsigned int, init_cycles) + __field(unsigned int, processing_cycles) + ), + TP_fast_assign(__entry->func = func; + __entry->id = id; + __entry->issue_id = issue_id; + __entry->pri = pri; + __entry->pg_id = pg_id; + __entry->load_cycles = load_cycles; + __entry->init_cycles = init_cycles; + __entry->processing_cycles = processing_cycles;), + TP_printk + ("pg-kcmd: func=%s,id=%u,issue_id=0x%llx,pri=%u,pg_id=%d," + "load_cycles=%u,init_cycles=%u,processing_cycles=%u", + __entry->func, __entry->id, __entry->issue_id, __entry->pri, + __entry->pg_id, __entry->load_cycles, __entry->init_cycles, + __entry->processing_cycles) + ); + +#endif +#endif + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE ipu-trace-event +/* This part must be outside protection */ +#include diff --git a/drivers/media/pci/intel/ipu-trace.c b/drivers/media/pci/intel/ipu-trace.c new file mode 100644 index 000000000000..5e0795d78649 --- /dev/null +++ b/drivers/media/pci/intel/ipu-trace.c @@ -0,0 +1,915 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" + +/* Input data processing states */ +enum config_file_parse_states { + STATE_FILL = 0, + STATE_COMMENT, + STATE_COMPLETE, +}; + +struct trace_register_range { + u32 start; + u32 end; +}; + +static u16 trace_unit_template[] = TRACE_REG_CREATE_TUN_REGISTER_LIST; +static u16 trace_monitor_template[] = TRACE_REG_CREATE_TM_REGISTER_LIST; +static u16 trace_gpc_template[] = TRACE_REG_CREATE_GPC_REGISTER_LIST; + +static struct trace_register_range trace_csi2_range_template[] = { + { + .start = TRACE_REG_CSI2_TM_RESET_REG_IDX, + .end = TRACE_REG_CSI2_TM_IRQ_ENABLE_REG_IDn(7) + }, + { + .start = TRACE_REG_END_MARK, + .end = TRACE_REG_END_MARK + } +}; + +static struct trace_register_range trace_csi2_3ph_range_template[] = { + { + .start = TRACE_REG_CSI2_3PH_TM_RESET_REG_IDX, + .end = TRACE_REG_CSI2_3PH_TM_IRQ_ENABLE_REG_IDn(7) + }, + { + .start = TRACE_REG_END_MARK, + .end = TRACE_REG_END_MARK + } +}; + +static struct trace_register_range trace_sig2cio_range_template[] = { + { + .start = TRACE_REG_SIG2CIO_ADDRESS, + .end = (TRACE_REG_SIG2CIO_STATUS + 8 * TRACE_REG_SIG2CIO_SIZE_OF) + }, + { + .start = TRACE_REG_END_MARK, + .end = TRACE_REG_END_MARK + } +}; + +#define LINE_MAX_LEN 128 +#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 10) +#define TRACE_MESSAGE_SIZE 16 +/* + * It looks that the trace unit sometimes writes outside the given buffer. + * To avoid memory corruption one extra page is reserved at the end + * of the buffer. Read also the extra area since it may contain valid data. + */ +#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE +#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD +#define MAX_TRACE_REGISTERS 200 +#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32) + +#define IPU_TRACE_TIME_RETRY 5 + +struct config_value { + u32 reg; + u32 value; +}; + +struct ipu_trace_buffer { + dma_addr_t dma_handle; + void *memory_buffer; +}; + +struct ipu_subsystem_trace_config { + u32 offset; + void __iomem *base; + struct ipu_trace_buffer memory; /* ring buffer */ + struct device *dev; + struct ipu_trace_block *blocks; + unsigned int fill_level; /* Nbr of regs in config table below */ + bool running; + /* Cached register values */ + struct config_value config[MAX_TRACE_REGISTERS]; +}; + +/* + * State of the input data processing is kept in this structure. + * Only one user is supported at time. + */ +struct buf_state { + char line_buffer[LINE_MAX_LEN]; + enum config_file_parse_states state; + int offset; /* Offset to line_buffer */ +}; + +struct ipu_trace { + struct mutex lock; + bool open; + char *conf_dump_buffer; + int size_conf_dump; + struct buf_state buffer_state; + + struct ipu_subsystem_trace_config isys; + struct ipu_subsystem_trace_config psys; +}; + +int ipu_trace_get_timer(struct device *dev, u64 *timer) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_subsystem_trace_config *sys = adev->trace_cfg; + struct ipu_trace_block *blocks; + void __iomem *addr = NULL; + uint32_t time_hi1, time_hi2, time_lo, retry; + + if (!sys) + return -ENODEV; + /* Find trace unit base address */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TUN) { + addr = sys->base + blocks->offset; + break; + } + blocks++; + } + if (!addr) + return -ENODEV; + + for (retry = 0; retry < IPU_TRACE_TIME_RETRY; retry++) { + time_hi1 = readl(addr + TRACE_REG_TUN_LOCAL_TIMER1); + time_lo = readl(addr + TRACE_REG_TUN_LOCAL_TIMER0); + time_hi2 = readl(addr + TRACE_REG_TUN_LOCAL_TIMER1); + *timer = (((u64) time_hi1) << 32) | time_lo; + if (time_hi1 == time_hi2) + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(ipu_trace_get_timer); + +static void __ipu_trace_restore(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ipu_trace *trace = isp->trace; + struct config_value *config; + struct ipu_subsystem_trace_config *sys = adev->trace_cfg; + struct ipu_trace_block *blocks; + uint32_t mapped_trace_buffer; + void __iomem *addr = NULL; + int i; + + if (trace->open) { + dev_info(dev, "Trace control file open. Skipping update\n"); + return; + } + + if (!sys) + return; + + /* leave if no trace configuration for this subsystem */ + if (sys->fill_level == 0) + return; + + /* Find trace unit base address */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TUN) { + addr = sys->base + blocks->offset; + break; + } + blocks++; + } + if (!addr) + return; + + if (!sys->memory.memory_buffer) { + sys->memory.memory_buffer = + dma_alloc_attrs(dev, MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, + &sys->memory.dma_handle, + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); + } + + if (!sys->memory.memory_buffer) { + dev_err(dev, "No memory for tracing. Trace unit disabled\n"); + return; + } + + config = sys->config; + mapped_trace_buffer = sys->memory.dma_handle; + + /* ring buffer base */ + writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR); + + /* ring buffer end */ + writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE - + TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR); + + /* Infobits for ddr trace */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + addr + TRACE_REG_TUN_DDR_INFO_VAL); + + /* Find trace timer reset address */ + addr = NULL; + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_TIMER_RST) { + addr = sys->base + blocks->offset; + break; + } + blocks++; + } + if (!addr) { + dev_err(dev, "No trace reset addr\n"); + return; + } + + /* Remove reset from trace timers */ + writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr); + + /* Register config received from userspace */ + for (i = 0; i < sys->fill_level; i++) { + dev_dbg(dev, + "Trace restore: reg 0x%08x, value 0x%08x\n", + config[i].reg, config[i].value); + writel(config[i].value, isp->base + config[i].reg); + } + + sys->running = true; +} + +void ipu_trace_restore(struct device *dev) +{ + struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace; + + if (!trace) + return; + + mutex_lock(&trace->lock); + __ipu_trace_restore(dev); + mutex_unlock(&trace->lock); +} +EXPORT_SYMBOL_GPL(ipu_trace_restore); + +static void __ipu_trace_stop(struct device *dev) +{ + struct ipu_subsystem_trace_config *sys = + to_ipu_bus_device(dev)->trace_cfg; + struct ipu_trace_block *blocks; + + if (!sys) + return; + + if (!sys->running) + return; + sys->running = false; + + /* Turn off all the gpc blocks */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_GPC) { + writel(0, sys->base + blocks->offset + + TRACE_REG_GPC_OVERALL_ENABLE); + } + blocks++; + } + + /* Turn off all the trace monitors */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TM) { + writel(0, sys->base + blocks->offset + + TRACE_REG_TM_TRACE_ENABLE_NPK); + + writel(0, sys->base + blocks->offset + + TRACE_REG_TM_TRACE_ENABLE_DDR); + } + blocks++; + } + + /* Turn off trace units */ + blocks = sys->blocks; + while (blocks->type != IPU_TRACE_BLOCK_END) { + if (blocks->type == IPU_TRACE_BLOCK_TUN) { + writel(0, sys->base + blocks->offset + + TRACE_REG_TUN_DDR_ENABLE); + writel(0, sys->base + blocks->offset + + TRACE_REG_TUN_NPK_ENABLE); + } + blocks++; + } +} + +void ipu_trace_stop(struct device *dev) +{ + struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace; + + if (!trace) + return; + + mutex_lock(&trace->lock); + __ipu_trace_stop(dev); + mutex_unlock(&trace->lock); +} +EXPORT_SYMBOL_GPL(ipu_trace_stop); + +static int validate_register(u32 base, u32 reg, u16 *template) +{ + int i = 0; + + while (template[i] != TRACE_REG_END_MARK) { + if (template[i] + base != reg) { + i++; + continue; + } + /* This is a valid register */ + return 0; + } + return -EINVAL; +} + +static int validate_register_range(u32 base, u32 reg, + struct trace_register_range *template) +{ + unsigned int i = 0; + + if (!IS_ALIGNED(reg, sizeof(u32))) + return -EINVAL; + + while (template[i].start != TRACE_REG_END_MARK) { + if ((reg < template[i].start + base) || + (reg > template[i].end + base)) { + i++; + continue; + } + /* This is a valid register */ + return 0; + } + return -EINVAL; +} + +static int update_register_cache(struct ipu_device *isp, u32 reg, u32 value) +{ + struct ipu_trace *dctrl = isp->trace; + const struct ipu_trace_block *blocks; + struct ipu_subsystem_trace_config *sys; + struct device *dev; + u32 base = 0; + u16 *template = NULL; + struct trace_register_range *template_range = NULL; + int i, range; + int rval = -EINVAL; + + if (dctrl->isys.offset == dctrl->psys.offset) { + /* For the IPU with uniform address space */ + if (reg >= IPU_ISYS_OFFSET && + reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET) + sys = &dctrl->isys; + else if (reg >= IPU_PSYS_OFFSET && + reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET) + sys = &dctrl->psys; + else + goto error; + } else { + if (dctrl->isys.offset && + reg >= dctrl->isys.offset && + reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET) + sys = &dctrl->isys; + else if (dctrl->psys.offset && + reg >= dctrl->psys.offset && + reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET) + sys = &dctrl->psys; + else + goto error; + } + + blocks = sys->blocks; + dev = sys->dev; + + /* Check registers block by block */ + i = 0; + while (blocks[i].type != IPU_TRACE_BLOCK_END) { + base = blocks[i].offset + sys->offset; + if ((reg >= base && reg < base + TRACE_REG_MAX_BLOCK_SIZE)) + break; + i++; + } + + range = 0; + switch (blocks[i].type) { + case IPU_TRACE_BLOCK_TUN: + template = trace_unit_template; + break; + case IPU_TRACE_BLOCK_TM: + template = trace_monitor_template; + break; + case IPU_TRACE_BLOCK_GPC: + template = trace_gpc_template; + break; + case IPU_TRACE_CSI2: + range = 1; + template_range = trace_csi2_range_template; + break; + case IPU_TRACE_CSI2_3PH: + range = 1; + template_range = trace_csi2_3ph_range_template; + break; + case IPU_TRACE_SIG2CIOS: + range = 1; + template_range = trace_sig2cio_range_template; + break; + default: + goto error; + } + + if (range) + rval = validate_register_range(base, reg, template_range); + else + rval = validate_register(base, reg, template); + + if (rval) + goto error; + + if (sys->fill_level < MAX_TRACE_REGISTERS) { + dev_dbg(dev, + "Trace reg addr 0x%08x value 0x%08x\n", reg, value); + sys->config[sys->fill_level].reg = reg; + sys->config[sys->fill_level].value = value; + sys->fill_level++; + } else { + rval = -ENOMEM; + goto error; + } + return 0; +error: + dev_info(&isp->pdev->dev, + "Trace register address 0x%08x ignored as invalid register\n", + reg); + return rval; +} + +/* + * We don't know how much data is received this time. Process given data + * character by character. + * Fill the line buffer until either + * 1) new line is got -> go to decode + * or + * 2) line_buffer is full -> ignore rest of line and then try to decode + * or + * 3) Comment mark is found -> ignore rest of the line and then try to decode + * the data which was received before the comment mark + * + * Decode phase tries to find "reg = value" pairs and validates those + */ +static int process_buffer(struct ipu_device *isp, + char *buffer, int size, struct buf_state *state) +{ + int i, ret; + int curr_state = state->state; + u32 reg, value; + + for (i = 0; i < size; i++) { + /* + * Comment mark in any position turns on comment mode + * until end of line + */ + if (curr_state != STATE_COMMENT && buffer[i] == '#') { + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMMENT; + continue; + } + + switch (curr_state) { + case STATE_COMMENT: + /* Only new line can break this mode */ + if (buffer[i] == '\n') + curr_state = STATE_COMPLETE; + break; + case STATE_FILL: + state->line_buffer[state->offset] = buffer[i]; + state->offset++; + + if (state->offset >= sizeof(state->line_buffer) - 1) { + /* Line buffer full - ignore rest */ + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMMENT; + break; + } + + if (buffer[i] == '\n') { + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMPLETE; + } + break; + default: + state->offset = 0; + state->line_buffer[state->offset] = '\0'; + curr_state = STATE_COMMENT; + } + + if (curr_state == STATE_COMPLETE) { + ret = sscanf(state->line_buffer, "%x = %x", + ®, &value); + if (ret == 2) + update_register_cache(isp, reg, value); + + state->offset = 0; + curr_state = STATE_FILL; + } + } + state->state = curr_state; + return 0; +} + +static void traceconf_dump(struct ipu_device *isp) +{ + struct ipu_subsystem_trace_config *sys[2] = { + &isp->trace->isys, + &isp->trace->psys + }; + int i, j, rem_size; + char *out; + + isp->trace->size_conf_dump = 0; + out = isp->trace->conf_dump_buffer; + rem_size = TRACE_CONF_DUMP_BUFFER_SIZE; + + for (j = 0; j < ARRAY_SIZE(sys); j++) { + for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) { + int bytes_print; + int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n", + sys[j]->config[i].reg, + sys[j]->config[i].value); + + bytes_print = min(n, rem_size - 1); + rem_size -= bytes_print; + out += bytes_print; + } + } + isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer; +} + +static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys) +{ + if (!sys->memory.memory_buffer) + return; + + memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_OVERREAD); + + dma_sync_single_for_device(sys->dev, + sys->memory.dma_handle, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); +} + +static int traceconf_open(struct inode *inode, struct file *file) +{ + int ret; + struct ipu_device *isp; + + if (!inode->i_private) + return -EACCES; + + isp = inode->i_private; + + ret = mutex_trylock(&isp->trace->lock); + if (!ret) + return -EBUSY; + + if (isp->trace->open) { + mutex_unlock(&isp->trace->lock); + return -EBUSY; + } + + file->private_data = isp; + isp->trace->open = 1; + if (file->f_mode & FMODE_WRITE) { + /* TBD: Allocate temp buffer for processing. + * Push validated buffer to active config + */ + + /* Forget old config if opened for write */ + isp->trace->isys.fill_level = 0; + isp->trace->psys.fill_level = 0; + } + + if (file->f_mode & FMODE_READ) { + isp->trace->conf_dump_buffer = + vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE); + if (!isp->trace->conf_dump_buffer) { + isp->trace->open = 0; + mutex_unlock(&isp->trace->lock); + return -ENOMEM; + } + traceconf_dump(isp); + } + mutex_unlock(&isp->trace->lock); + return 0; +} + +static ssize_t traceconf_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_device *isp = file->private_data; + + return simple_read_from_buffer(buf, len, ppos, + isp->trace->conf_dump_buffer, + isp->trace->size_conf_dump); +} + +static ssize_t traceconf_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_device *isp = file->private_data; + char buffer[64]; + ssize_t bytes, count; + loff_t pos = *ppos; + + if (*ppos < 0) + return -EINVAL; + + count = min(len, sizeof(buffer)); + bytes = copy_from_user(buffer, buf, count); + if (bytes == count) + return -EFAULT; + + count -= bytes; + mutex_lock(&isp->trace->lock); + process_buffer(isp, buffer, count, &isp->trace->buffer_state); + mutex_unlock(&isp->trace->lock); + *ppos = pos + count; + + return count; +} + +static int traceconf_release(struct inode *inode, struct file *file) +{ + struct ipu_device *isp = file->private_data; + struct device *psys_dev = isp->psys ? &isp->psys->dev : NULL; + struct device *isys_dev = isp->isys ? &isp->isys->dev : NULL; + int pm_rval = -EINVAL; + + /* + * Turn devices on outside trace->lock mutex. PM transition may + * cause call to function which tries to take the same lock. + * Also do this before trace->open is set back to 0 to avoid + * double restore (one here and one in pm transition). We can't + * rely purely on the restore done by pm call backs since trace + * configuration can occur in any phase compared to other activity. + */ + + if (file->f_mode & FMODE_WRITE) { + if (isys_dev) + pm_rval = pm_runtime_get_sync(isys_dev); + + if (pm_rval >= 0) { + /* ISYS ok or missing */ + if (psys_dev) + pm_rval = pm_runtime_get_sync(psys_dev); + + if (pm_rval < 0) { + pm_runtime_put_noidle(psys_dev); + if (isys_dev) + pm_runtime_put(isys_dev); + } + } else { + pm_runtime_put_noidle(&isp->isys->dev); + } + } + + mutex_lock(&isp->trace->lock); + isp->trace->open = 0; + vfree(isp->trace->conf_dump_buffer); + isp->trace->conf_dump_buffer = NULL; + + if (pm_rval >= 0) { + /* Update new cfg to HW */ + if (isys_dev) { + __ipu_trace_stop(isys_dev); + clear_trace_buffer(isp->isys->trace_cfg); + __ipu_trace_restore(isys_dev); + } + + if (psys_dev) { + __ipu_trace_stop(psys_dev); + clear_trace_buffer(isp->psys->trace_cfg); + __ipu_trace_restore(psys_dev); + } + } + + mutex_unlock(&isp->trace->lock); + + if (pm_rval >= 0) { + /* Again - this must be done with trace->lock not taken */ + if (psys_dev) + pm_runtime_put(psys_dev); + if (isys_dev) + pm_runtime_put(isys_dev); + } + return 0; +} + +static const struct file_operations ipu_traceconf_fops = { + .owner = THIS_MODULE, + .open = traceconf_open, + .release = traceconf_release, + .read = traceconf_read, + .write = traceconf_write, + .llseek = no_llseek, +}; + +static int gettrace_open(struct inode *inode, struct file *file) +{ + struct ipu_subsystem_trace_config *sys = inode->i_private; + + if (!sys) + return -EACCES; + + if (!sys->memory.memory_buffer) + return -EACCES; + + dma_sync_single_for_cpu(sys->dev, + sys->memory.dma_handle, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE); + + file->private_data = sys; + return 0; +}; + +static ssize_t gettrace_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_subsystem_trace_config *sys = file->private_data; + + return simple_read_from_buffer(buf, len, ppos, + sys->memory.memory_buffer, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_OVERREAD); +} + +static ssize_t gettrace_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct ipu_subsystem_trace_config *sys = file->private_data; + const char str[] = "clear"; + char buffer[sizeof(str)] = { 0 }; + ssize_t ret; + + ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len); + if (ret < 0) + return ret; + + if (ret < sizeof(str) - 1) + return -EINVAL; + + if (!strncmp(str, buffer, sizeof(str) - 1)) { + clear_trace_buffer(sys); + return len; + } + + return -EINVAL; +} + +static int gettrace_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations ipu_gettrace_fops = { + .owner = THIS_MODULE, + .open = gettrace_open, + .release = gettrace_release, + .read = gettrace_read, + .write = gettrace_write, + .llseek = no_llseek, +}; + +int ipu_trace_init(struct ipu_device *isp, void __iomem *base, + struct device *dev, struct ipu_trace_block *blocks) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_trace *trace = isp->trace; + struct ipu_subsystem_trace_config *sys; + int ret = 0; + + if (!isp->trace) + return 0; + + mutex_lock(&isp->trace->lock); + + if (dev == &isp->isys->dev) { + sys = &trace->isys; + } else if (dev == &isp->psys->dev) { + sys = &trace->psys; + } else { + ret = -EINVAL; + goto leave; + } + + adev->trace_cfg = sys; + sys->dev = dev; + sys->offset = base - isp->base; /* sub system offset */ + sys->base = base; + sys->blocks = blocks; + +leave: + mutex_unlock(&isp->trace->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(ipu_trace_init); + +void ipu_trace_uninit(struct device *dev) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(dev); + struct ipu_device *isp = adev->isp; + struct ipu_trace *trace = isp->trace; + struct ipu_subsystem_trace_config *sys = adev->trace_cfg; + + if (!trace || !sys) + return; + + mutex_lock(&trace->lock); + + if (sys->memory.memory_buffer) + dma_free_attrs(sys->dev, + MEMORY_RING_BUFFER_SIZE + + MEMORY_RING_BUFFER_GUARD, + sys->memory.memory_buffer, + sys->memory.dma_handle, DMA_ATTR_NON_CONSISTENT); + + sys->dev = NULL; + sys->memory.memory_buffer = NULL; + + mutex_unlock(&trace->lock); +} +EXPORT_SYMBOL_GPL(ipu_trace_uninit); + +int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir) +{ + struct dentry *files[3]; + int i = 0; + + files[i] = debugfs_create_file("traceconf", 0644, + dir, isp, &ipu_traceconf_fops); + if (!files[i]) + return -ENOMEM; + i++; + + files[i] = debugfs_create_file("getisystrace", 0444, + dir, + &isp->trace->isys, &ipu_gettrace_fops); + + if (!files[i]) + goto error; + i++; + + files[i] = debugfs_create_file("getpsystrace", 0444, + dir, + &isp->trace->psys, &ipu_gettrace_fops); + if (!files[i]) + goto error; + + return 0; + +error: + for (; i > 0; i--) + debugfs_remove(files[i - 1]); + return -ENOMEM; +} + +int ipu_trace_add(struct ipu_device *isp) +{ + isp->trace = devm_kzalloc(&isp->pdev->dev, + sizeof(struct ipu_trace), GFP_KERNEL); + if (!isp->trace) + return -ENOMEM; + + mutex_init(&isp->trace->lock); + + return 0; +} + +void ipu_trace_release(struct ipu_device *isp) +{ + if (!isp->trace) + return; + mutex_destroy(&isp->trace->lock); +} + +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu trace support"); diff --git a/drivers/media/pci/intel/ipu-trace.h b/drivers/media/pci/intel/ipu-trace.h new file mode 100644 index 000000000000..9167c0400273 --- /dev/null +++ b/drivers/media/pci/intel/ipu-trace.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_TRACE_H +#define IPU_TRACE_H +#include + +#define TRACE_REG_MAX_BLOCK_SIZE 0x0fff + +#define TRACE_REG_END_MARK 0xffff + +#define TRACE_REG_CMD_TYPE_D64 0x0 +#define TRACE_REG_CMD_TYPE_D64M 0x1 +#define TRACE_REG_CMD_TYPE_D64TS 0x2 +#define TRACE_REG_CMD_TYPE_D64MTS 0x3 + +/* Trace unit register offsets */ +#define TRACE_REG_TUN_DDR_ENABLE 0x000 +#define TRACE_REG_TUN_NPK_ENABLE 0x004 +#define TRACE_REG_TUN_DDR_INFO_VAL 0x008 +#define TRACE_REG_TUN_NPK_ADDR 0x00C +#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010 +#define TRACE_REG_TUN_DRAM_END_ADDR 0x014 +#define TRACE_REG_TUN_LOCAL_TIMER0 0x018 +#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C +#define TRACE_REG_TUN_WR_PTR 0x020 +#define TRACE_REG_TUN_RD_PTR 0x024 + +#define TRACE_REG_CREATE_TUN_REGISTER_LIST { \ + TRACE_REG_TUN_DDR_ENABLE, \ + TRACE_REG_TUN_NPK_ENABLE, \ + TRACE_REG_TUN_DDR_INFO_VAL, \ + TRACE_REG_TUN_NPK_ADDR, \ + TRACE_REG_END_MARK \ +} +/* + * Following registers are left out on purpose: + * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR + * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR + */ + +/* Trace monitor register offsets */ +#define TRACE_REG_TM_TRACE_ADDR_A 0x0900 +#define TRACE_REG_TM_TRACE_ADDR_B 0x0904 +#define TRACE_REG_TM_TRACE_ADDR_C 0x0908 +#define TRACE_REG_TM_TRACE_ADDR_D 0x090c +#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910 +#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914 +#define TRACE_REG_TM_TRACE_PER_PC 0x0918 +#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c +#define TRACE_REG_TM_TRACE_HEADER 0x0920 +#define TRACE_REG_TM_TRACE_CFG 0x0924 +#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928 +#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c +#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930 +#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934 +#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938 +#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c +#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940 +#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944 +#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948 +#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c +#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950 +#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954 +#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958 +#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c +#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00 +#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04 +#define TRACE_REG_TM_FWTRACE_LAST 0x0A08 + +#define TRACE_REG_CREATE_TM_REGISTER_LIST { \ + TRACE_REG_TM_TRACE_ADDR_A, \ + TRACE_REG_TM_TRACE_ADDR_B, \ + TRACE_REG_TM_TRACE_ADDR_C, \ + TRACE_REG_TM_TRACE_ADDR_D, \ + TRACE_REG_TM_TRACE_ENABLE_NPK, \ + TRACE_REG_TM_TRACE_ENABLE_DDR, \ + TRACE_REG_TM_TRACE_PER_PC, \ + TRACE_REG_TM_TRACE_PER_BRANCH, \ + TRACE_REG_TM_TRACE_HEADER, \ + TRACE_REG_TM_TRACE_CFG, \ + TRACE_REG_TM_TRACE_LOST_PACKETS, \ + TRACE_REG_TM_TRACE_LP_CLEAR, \ + TRACE_REG_TM_TRACE_LMRUN_MASK, \ + TRACE_REG_TM_TRACE_LMRUN_PC_LOW, \ + TRACE_REG_TM_TRACE_LMRUN_PC_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_SEL, \ + TRACE_REG_TM_TRACE_MMIO_WP0_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP1_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP2_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP3_LOW, \ + TRACE_REG_TM_TRACE_MMIO_WP0_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_WP1_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_WP2_HIGH, \ + TRACE_REG_TM_TRACE_MMIO_WP3_HIGH, \ + TRACE_REG_END_MARK \ +} + +/* + * Following exists only in (I)SP address space: + * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST + */ + +#define TRACE_REG_GPC_RESET 0x000 +#define TRACE_REG_GPC_OVERALL_ENABLE 0x004 +#define TRACE_REG_GPC_TRACE_HEADER 0x008 +#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C +#define TRACE_REG_GPC_TRACE_NPK_EN 0x010 +#define TRACE_REG_GPC_TRACE_DDR_EN 0x014 +#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018 +#define TRACE_REG_GPC_TRACE_LPKT 0x01C + +#define TRACE_REG_GPC_ENABLE_ID0 0x020 +#define TRACE_REG_GPC_ENABLE_ID1 0x024 +#define TRACE_REG_GPC_ENABLE_ID2 0x028 +#define TRACE_REG_GPC_ENABLE_ID3 0x02c + +#define TRACE_REG_GPC_VALUE_ID0 0x030 +#define TRACE_REG_GPC_VALUE_ID1 0x034 +#define TRACE_REG_GPC_VALUE_ID2 0x038 +#define TRACE_REG_GPC_VALUE_ID3 0x03c + +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040 +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044 +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048 +#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c + +#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050 +#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054 +#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058 +#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c + +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060 +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064 +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068 +#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c + +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070 +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074 +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078 +#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c + +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080 +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084 +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088 +#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c + +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090 +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094 +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098 +#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c + +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0 +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4 +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8 +#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac + +#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0 +#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4 +#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8 +#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc + +#define TRACE_REG_CREATE_GPC_REGISTER_LIST { \ + TRACE_REG_GPC_RESET, \ + TRACE_REG_GPC_OVERALL_ENABLE, \ + TRACE_REG_GPC_TRACE_HEADER, \ + TRACE_REG_GPC_TRACE_ADDRESS, \ + TRACE_REG_GPC_TRACE_NPK_EN, \ + TRACE_REG_GPC_TRACE_DDR_EN, \ + TRACE_REG_GPC_TRACE_LPKT_CLEAR, \ + TRACE_REG_GPC_TRACE_LPKT, \ + TRACE_REG_GPC_ENABLE_ID0, \ + TRACE_REG_GPC_ENABLE_ID1, \ + TRACE_REG_GPC_ENABLE_ID2, \ + TRACE_REG_GPC_ENABLE_ID3, \ + TRACE_REG_GPC_VALUE_ID0, \ + TRACE_REG_GPC_VALUE_ID1, \ + TRACE_REG_GPC_VALUE_ID2, \ + TRACE_REG_GPC_VALUE_ID3, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID0, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID1, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID2, \ + TRACE_REG_GPC_CNT_INPUT_SELECT_ID3, \ + TRACE_REG_GPC_CNT_START_SELECT_ID0, \ + TRACE_REG_GPC_CNT_START_SELECT_ID1, \ + TRACE_REG_GPC_CNT_START_SELECT_ID2, \ + TRACE_REG_GPC_CNT_START_SELECT_ID3, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID0, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID1, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID2, \ + TRACE_REG_GPC_CNT_STOP_SELECT_ID3, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID0, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID1, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID2, \ + TRACE_REG_GPC_CNT_MSG_SELECT_ID3, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2, \ + TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2, \ + TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2, \ + TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3, \ + TRACE_REG_GPC_IRQ_ENABLE_ID0, \ + TRACE_REG_GPC_IRQ_ENABLE_ID1, \ + TRACE_REG_GPC_IRQ_ENABLE_ID2, \ + TRACE_REG_GPC_IRQ_ENABLE_ID3, \ + TRACE_REG_END_MARK \ +} + +/* CSI2 legacy receiver trace registers */ +#define TRACE_REG_CSI2_TM_RESET_REG_IDX 0x0000 +#define TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX 0x0004 +#define TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX 0x0008 +#define TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX 0x000c +#define TRACE_REG_CSI2_TM_TRACE_HEADER_VAL 0xf +#define TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL 0x100218 +#define TRACE_REG_CSI2_TM_MONITOR_ID 0x8 + +/* 0 <= n <= 3 */ +#define TRACE_REG_CSI2_TM_TRACE_NPK_EN_REG_IDX_P(n) (0x0010 + (n) * 4) +#define TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(n) (0x0020 + (n) * 4) +#define TRACE_CSI2_TM_EVENT_FE(vc) (BIT(0) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_FS(vc) (BIT(1) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_PE(vc) (BIT(2) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_PS(vc) (BIT(3) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_LE(vc) (BIT(4) << (vc * 6)) +#define TRACE_CSI2_TM_EVENT_LS(vc) (BIT(5) << (vc * 6)) + +#define TRACE_REG_CSI2_TM_TRACE_LPKT_CLEAR_REG_IDX 0x0030 +#define TRACE_REG_CSI2_TM_TRACE_LPKT_REG_IDX 0x0034 + +/* 0 <= n <= 7 */ +#define TRACE_REG_CSI2_TM_ENABLE_REG_IDn(n) (0x0038 + (n) * 4) +#define TRACE_REG_CSI2_TM_VALUE_REG_IDn(n) (0x0058 + (n) * 4) +#define TRACE_REG_CSI2_TM_CNT_INPUT_SELECT_REG_IDn(n) (0x0078 + (n) * 4) +#define TRACE_REG_CSI2_TM_CNT_START_SELECT_REG_IDn(n) (0x0098 + (n) * 4) +#define TRACE_REG_CSI2_TM_CNT_STOP_SELECT_REG_IDn(n) (0x00b8 + (n) * 4) +#define TRACE_REG_CSI2_TM_IRQ_TRIGGER_VALUE_REG_IDn(n) (0x00d8 + (n) * 4) +#define TRACE_REG_CSI2_TM_IRQ_TIMER_SELECT_REG_IDn(n) (0x00f8 + (n) * 4) +#define TRACE_REG_CSI2_TM_IRQ_ENABLE_REG_IDn(n) (0x0118 + (n) * 4) + +/* CSI2_3PH combo receiver trace registers */ +#define TRACE_REG_CSI2_3PH_TM_RESET_REG_IDX 0x0000 +#define TRACE_REG_CSI2_3PH_TM_OVERALL_ENABLE_REG_IDX 0x0004 +#define TRACE_REG_CSI2_3PH_TM_TRACE_HEADER_REG_IDX 0x0008 +#define TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_REG_IDX 0x000c +#define TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL 0x100258 +#define TRACE_REG_CSI2_3PH_TM_MONITOR_ID 0x9 + +/* 0 <= n <= 5 */ +#define TRACE_REG_CSI2_3PH_TM_TRACE_NPK_EN_REG_IDX_P(n) (0x0010 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(n) (0x0028 + (n) * 4) + +#define TRACE_REG_CSI2_3PH_TM_TRACE_LPKT_CLEAR_REG_IDX 0x0040 +#define TRACE_REG_CSI2_3PH_TM_TRACE_LPKT_REG_IDX 0x0044 + +/* 0 <= n <= 7 */ +#define TRACE_REG_CSI2_3PH_TM_ENABLE_REG_IDn(n) (0x0048 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_VALUE_REG_IDn(n) (0x0068 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_CNT_INPUT_SELECT_REG_IDn(n) (0x0088 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_CNT_START_SELECT_REG_IDn(n) (0x00a8 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_CNT_STOP_SELECT_REG_IDn(n) (0x00c8 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_IRQ_TRIGGER_VALUE_REG_IDn(n) (0x00e8 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_IRQ_TIMER_SELECT_REG_IDn(n) (0x0108 + (n) * 4) +#define TRACE_REG_CSI2_3PH_TM_IRQ_ENABLE_REG_IDn(n) (0x0128 + (n) * 4) + +/* SIG2CIO trace monitors */ +#define TRACE_REG_SIG2CIO_ADDRESS 0x0000 +#define TRACE_REG_SIG2CIO_WDATA 0x0004 +#define TRACE_REG_SIG2CIO_MASK 0x0008 +#define TRACE_REG_SIG2CIO_GROUP_CFG 0x000c +#define TRACE_REG_SIG2CIO_STICKY 0x0010 +#define TRACE_REG_SIG2CIO_RST_STICKY 0x0014 +#define TRACE_REG_SIG2CIO_MANUAL_RST_STICKY 0x0018 +#define TRACE_REG_SIG2CIO_STATUS 0x001c +/* Size of on SIG2CIO block */ +#define TRACE_REG_SIG2CIO_SIZE_OF 0x0020 + +struct ipu_trace; +struct ipu_subsystem_trace_config; + +enum ipu_trace_block_type { + IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */ + IPU_TRACE_BLOCK_TM, /* Trace monitor */ + IPU_TRACE_BLOCK_GPC, /* General purpose control */ + IPU_TRACE_CSI2, /* CSI2 legacy receiver */ + IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */ + IPU_TRACE_SIG2CIOS, + IPU_TRACE_TIMER_RST, /* Trace reset control timer */ + IPU_TRACE_BLOCK_END /* End of list */ +}; + +struct ipu_trace_block { + u32 offset; /* Offset to block inside subsystem */ + enum ipu_trace_block_type type; +}; + +int ipu_trace_add(struct ipu_device *isp); +int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir); +void ipu_trace_release(struct ipu_device *isp); +int ipu_trace_init(struct ipu_device *isp, void __iomem *base, + struct device *dev, struct ipu_trace_block *blocks); +void ipu_trace_restore(struct device *dev); +void ipu_trace_uninit(struct device *dev); +void ipu_trace_stop(struct device *dev); +int ipu_trace_get_timer(struct device *dev, u64 *timer); +#endif diff --git a/drivers/media/pci/intel/ipu-wrapper.c b/drivers/media/pci/intel/ipu-wrapper.c new file mode 100644 index 000000000000..9e06887dd857 --- /dev/null +++ b/drivers/media/pci/intel/ipu-wrapper.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include + +#include +#include +#include +#include +#include + +#include "ipu-bus.h" +#include "ipu-dma.h" +#include "ipu-mmu.h" +#include "ipu-wrapper.h" +#include "vied_subsystem_access.h" +#include "vied_subsystem_access_initialization.h" +#include "shared_memory_map.h" +#include "shared_memory_access.h" + +struct wrapper_base { + void __iomem *sys_base; + const struct dma_map_ops *ops; + /* Protect shared memory buffers */ + spinlock_t lock; + struct list_head buffers; + u32 css_map_done; + struct device *dev; +}; + +static struct wrapper_base isys; +static struct wrapper_base psys; + +struct my_css_memory_buffer_item { + struct list_head list; + dma_addr_t iova; + unsigned long *addr; + size_t bytes; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif +}; + +static struct wrapper_base *get_mem_sub_system(int mmid) +{ + if (mmid == ISYS_MMID) + return &isys; + + if (mmid == PSYS_MMID) + return &psys; + WARN(1, "Invalid mem subsystem"); + return NULL; +} + +static struct wrapper_base *get_sub_system(int ssid) +{ + if (ssid == ISYS_SSID) + return &isys; + + if (ssid == PSYS_SSID) + return &psys; + WARN(1, "Invalid subsystem"); + return NULL; +} + +/* + * Subsystem access functions to access IUNIT MMIO space + */ +static void *host_addr(int ssid, u32 addr) +{ + if (ssid == ISYS_SSID) + return isys.sys_base + addr; + else if (ssid == PSYS_SSID) + return psys.sys_base + addr; + /* + * Calling WARN_ON is a bit brutal but better to capture wrong register + * accesses immediately. We have no way to return an error here. + */ + WARN_ON(1); + + return NULL; +} + +void vied_subsystem_store_32(unsigned int ssid, u32 addr, u32 data) +{ + writel(data, host_addr(ssid, addr)); +} + +void vied_subsystem_store_16(unsigned int ssid, u32 addr, u16 data) +{ + writew(data, host_addr(ssid, addr)); +} + +void vied_subsystem_store_8(unsigned int ssid, u32 addr, u8 data) +{ + writeb(data, host_addr(ssid, addr)); +} + +void vied_subsystem_store(unsigned int ssid, + u32 addr, const void *data, unsigned int size) +{ + void *dst = host_addr(ssid, addr); + + dev_dbg(get_sub_system(ssid)->dev, "access: %s 0x%x size: %d\n", + __func__, addr, size); + + for (; size >= sizeof(u32); size -= sizeof(u32), + dst += sizeof(u32), data += sizeof(u32)) { + writel(*(u32 *) data, dst); + } + if (size >= sizeof(u16)) { + writew(*(u16 *) data, dst); + size -= sizeof(u16), dst += sizeof(u16), data += sizeof(u16); + } + if (size) + writeb(*(u8 *) data, dst); +} + +u32 vied_subsystem_load_32(unsigned int ssid, u32 addr) +{ + return readl(host_addr(ssid, addr)); +} + +u16 vied_subsystem_load_16(unsigned int ssid, u32 addr) +{ + return readw(host_addr(ssid, addr)); +} + +u8 vied_subsystem_load_8(unsigned int ssid, u32 addr) +{ + return readb(host_addr(ssid, addr)); +} + +void vied_subsystem_load(unsigned int ssid, u32 addr, + void *data, unsigned int size) +{ + void *src = host_addr(ssid, addr); + + dev_dbg(get_sub_system(ssid)->dev, "access: %s 0x%x size: %d\n", + __func__, addr, size); + + for (; size >= sizeof(u32); size -= sizeof(u32), + src += sizeof(u32), data += sizeof(u32)) + *(u32 *) data = readl(src); + if (size >= sizeof(u16)) { + *(u16 *) data = readw(src); + size -= sizeof(u16), src += sizeof(u16), data += sizeof(u16); + } + if (size) + *(u8 *) data = readb(src); +} + +/* + * Initialize base address for subsystem + */ +void vied_subsystem_access_initialize(unsigned int system) +{ +} + +/* + * Shared memory access codes written by Dash Biswait, + * copied from FPGA environment + */ + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param mmid: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(unsigned int mmid, u64 host_ddr_addr, + size_t memory_size, size_t ps) +{ + return 0; +} + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(unsigned int mmid) +{ +} + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param ssid: id of subsystem + * \param mmid: id of ddr memory + * \param mmu_ps: size of page in bits + * \param mmu_pnrs: page numbers + * \param ddr_addr: base address + * \param inv_tlb: invalidate tbl + * \param sbt: set l1 base address + */ +int shared_memory_map_initialize(unsigned int ssid, unsigned int mmid, + size_t mmu_ps, size_t mmu_pnrs, u64 ddr_addr, + shared_memory_invalidate_mmu_tlb inv_tlb, + shared_memory_set_page_table_base_address sbt) +{ + return 0; +} + +/** + * \brief De-initialize the shared memory interface administration on the host. + */ +void shared_memory_map_uninitialize(unsigned int ssid, unsigned int mmid) +{ +} + +static u8 alloc_cookie; + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. + * \Returns NULL when insufficient memory available + */ +u64 shared_memory_alloc(unsigned int mmid, size_t bytes) +{ + struct wrapper_base *mine = get_mem_sub_system(mmid); + struct my_css_memory_buffer_item *buf; + unsigned long flags; + + dev_dbg(mine->dev, "%s: in, size: %zu\n", __func__, bytes); + + if (!bytes) + return (unsigned long)&alloc_cookie; + + might_sleep(); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return 0; + + /*alloc using ipu dma driver */ + buf->bytes = PAGE_ALIGN(bytes); + + buf->addr = dma_alloc_attrs(mine->dev, buf->bytes, &buf->iova, + GFP_KERNEL, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + NULL +#else + 0 +#endif + ); + if (!buf->addr) { + kfree(buf); + return 0; + } + + spin_lock_irqsave(&mine->lock, flags); + list_add(&buf->list, &mine->buffers); + spin_unlock_irqrestore(&mine->lock, flags); + + return (unsigned long)buf->addr; +} + +/** + * \brief Free (DDR) shared memory space. + */ +void shared_memory_free(unsigned int mmid, u64 addr) +{ + struct wrapper_base *mine = get_mem_sub_system(mmid); + struct my_css_memory_buffer_item *buf = NULL; + unsigned long flags; + + if ((void *)(unsigned long)addr == &alloc_cookie) + return; + + might_sleep(); + + dev_dbg(mine->dev, "looking for iova %8.8llx\n", addr); + + spin_lock_irqsave(&mine->lock, flags); + list_for_each_entry(buf, &mine->buffers, list) { + dev_dbg(mine->dev, "buffer addr %8.8lx\n", (long)buf->addr); + if ((long)buf->addr != addr) + continue; + + dev_dbg(mine->dev, "found it!\n"); + list_del(&buf->list); + spin_unlock_irqrestore(&mine->lock, flags); + dma_free_attrs(mine->dev, buf->bytes, buf->addr, buf->iova, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &buf->attrs +#else + buf->attrs +#endif + ); + kfree(buf); + return; + } + dev_warn(mine->dev, "Can't find mem object %8.8llx\n", addr); + spin_unlock_irqrestore(&mine->lock, flags); +} + +/** + * \brief Convert a host virtual address to a CSS virtual address and + * \update the MMU. + */ +u32 shared_memory_map(unsigned int ssid, unsigned int mmid, u64 addr) +{ + struct wrapper_base *mine = get_mem_sub_system(mmid); + struct my_css_memory_buffer_item *buf = NULL; + unsigned long flags; + + if ((void *)(unsigned long)addr == &alloc_cookie) + return 0; + + spin_lock_irqsave(&mine->lock, flags); + list_for_each_entry(buf, &mine->buffers, list) { + dev_dbg(mine->dev, "%s %8.8lx\n", __func__, (long)buf->addr); + if ((long)buf->addr != addr) + continue; + + dev_dbg(mine->dev, "mapped!!\n"); + spin_unlock_irqrestore(&mine->lock, flags); + return buf->iova; + } + dev_err(mine->dev, "Can't find mapped object %8.8llx\n", addr); + spin_unlock_irqrestore(&mine->lock, flags); + return 0; +} + +/** + * \brief Free a CSS virtual address and update the MMU. + */ +void shared_memory_unmap(unsigned int ssid, unsigned int mmid, u32 addr) +{ +} + +/** + * \brief Store a byte into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store_8(unsigned int mmid, u64 addr, u8 data) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%x\n", + __func__, addr, data); + + *((u8 *)(unsigned long) addr) = data; + /*Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u8)); +} + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store_16(unsigned int mmid, u64 addr, u16 data) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%x\n", + __func__, addr, data); + + *((u16 *)(unsigned long) addr) = data; + /*Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long) addr, sizeof(u16)); +} + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store_32(unsigned int mmid, u64 addr, u32 data) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%x\n", + __func__, addr, data); + + *((u32 *)(unsigned long) addr) = data; + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long) addr, sizeof(u32)); +} + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_store(unsigned int mmid, u64 addr, const void *data, + size_t bytes) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%lx bytes = 0x%zx\n", __func__, + (unsigned long)addr, bytes); + + if (!data) { + dev_err(get_mem_sub_system(mmid)->dev, + "%s: data ptr is null\n", __func__); + } else { + const u8 *pdata = data; + u8 *paddr = (u8 *)(unsigned long)addr; + size_t i = 0; + + for (; i < bytes; ++i) + *paddr++ = *pdata++; + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long) addr, bytes); + } +} + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host + * \virtual address + */ +void shared_memory_zero(unsigned int mmid, u64 addr, size_t bytes) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx data = 0x%zx\n", + __func__, (unsigned long long)addr, bytes); + + memset((void *)(unsigned long)addr, 0, bytes); + clflush_cache_range((void *)(unsigned long)addr, bytes); +} + +/** + * \brief Load a byte from (DDR) shared memory space using a host + * \virtual address + */ +u8 shared_memory_load_8(unsigned int mmid, u64 addr) +{ + u8 data = 0; + + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx\n", __func__, addr); + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u8)); + data = *(u8 *)(unsigned long) addr; + return data; +} + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host + * \virtual address + */ +u16 shared_memory_load_16(unsigned int mmid, u64 addr) +{ + u16 data = 0; + + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx\n", __func__, addr); + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u16)); + data = *(u16 *)(unsigned long)addr; + return data; +} + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host + * \virtual address + */ +u32 shared_memory_load_32(unsigned int mmid, u64 addr) +{ + u32 data = 0; + + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%llx\n", __func__, addr); + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, sizeof(u32)); + data = *(u32 *)(unsigned long)addr; + return data; +} + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host + * \virtual address + */ +void shared_memory_load(unsigned int mmid, u64 addr, void *data, size_t bytes) +{ + dev_dbg(get_mem_sub_system(mmid)->dev, + "access: %s: Enter addr = 0x%lx bytes = 0x%zx\n", __func__, + (unsigned long)addr, bytes); + + if (!data) { + dev_err(get_mem_sub_system(mmid)->dev, + "%s: data ptr is null\n", __func__); + + } else { + u8 *pdata = data; + u8 *paddr = (u8 *)(unsigned long)addr; + size_t i = 0; + + /* Invalidate the cache lines to flush the content to ddr. */ + clflush_cache_range((void *)(unsigned long)addr, bytes); + for (; i < bytes; ++i) + *pdata++ = *paddr++; + } +} + +static int init_wrapper(struct wrapper_base *sys) +{ + INIT_LIST_HEAD(&sys->buffers); + spin_lock_init(&sys->lock); + return 0; +} + +/* + * Wrapper driver set base address for library use + */ +void ipu_wrapper_init(int mmid, struct device *dev, void __iomem *base) +{ + struct wrapper_base *sys = get_mem_sub_system(mmid); + + init_wrapper(sys); + sys->dev = dev; + sys->sys_base = base; +} diff --git a/drivers/media/pci/intel/ipu-wrapper.h b/drivers/media/pci/intel/ipu-wrapper.h new file mode 100644 index 000000000000..b7df285e1142 --- /dev/null +++ b/drivers/media/pci/intel/ipu-wrapper.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_WRAPPER_H +#define IPU_WRAPPER_H + +#define ISYS_SSID 1 +#define PSYS_SSID 0 + +#define ISYS_MMID 1 +#define PSYS_MMID 0 + +struct device; + +void ipu_wrapper_init(int mmid, struct device *dev, void __iomem *base); + +#endif /* IPU_WRAPPER_H */ diff --git a/drivers/media/pci/intel/ipu.c b/drivers/media/pci/intel/ipu.c new file mode 100644 index 000000000000..cccd6d2b7ba2 --- /dev/null +++ b/drivers/media/pci/intel/ipu.c @@ -0,0 +1,732 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-buttress.h" +#include "ipu-platform.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-cpd.h" +#include "ipu-pdata.h" +#include "ipu-bus.h" +#include "ipu-mmu.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-trace.h" + +#define IPU_PCI_BAR 0 + +static struct ipu_bus_device *ipu_mmu_init(struct pci_dev *pdev, + struct device *parent, + struct ipu_buttress_ctrl *ctrl, + void __iomem *base, + const struct ipu_hw_variants *hw, + unsigned int nr, int mmid) +{ + struct ipu_mmu_pdata *pdata = + devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + unsigned int i; + + if (!pdata) + return ERR_PTR(-ENOMEM); + + if (hw->nr_mmus > IPU_MMU_MAX_DEVICES) + return ERR_PTR(-EINVAL); + + for (i = 0; i < hw->nr_mmus; i++) { + struct ipu_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; + const struct ipu_mmu_hw *src_mmu = &hw->mmu_hw[i]; + + if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS || + src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS) + return ERR_PTR(-EINVAL); + + *pdata_mmu = *src_mmu; + pdata_mmu->base = base + src_mmu->offset; + } + + pdata->nr_mmus = hw->nr_mmus; + pdata->mmid = mmid; + + return ipu_bus_add_device(pdev, parent, pdata, NULL, ctrl, + IPU_MMU_NAME, nr); +} + +static struct ipu_bus_device *ipu_isys_init(struct pci_dev *pdev, + struct device *parent, + struct device *iommu, + void __iomem *base, + const struct ipu_isys_internal_pdata + *ipdata, + struct ipu_isys_subdev_pdata + *spdata, unsigned int nr) +{ + struct ipu_isys_pdata *pdata = + devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + pdata->spdata = spdata; + + return ipu_bus_add_device(pdev, parent, pdata, iommu, NULL, + IPU_ISYS_NAME, nr); +} + +static struct ipu_bus_device *ipu_psys_init(struct pci_dev *pdev, + struct device *parent, + struct device *iommu, + void __iomem *base, + const struct ipu_psys_internal_pdata + *ipdata, unsigned int nr) +{ + struct ipu_psys_pdata *pdata = + devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->base = base; + pdata->ipdata = ipdata; + return ipu_bus_add_device(pdev, parent, pdata, iommu, NULL, + IPU_PSYS_NAME, nr); +} + +int ipu_fw_authenticate(void *data, u64 val) +{ + struct ipu_device *isp = data; + int ret; + + if (!isp->secure_mode) + return -EINVAL; + + ret = ipu_buttress_reset_authentication(isp); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to reset authentication!\n"); + return ret; + } + + return ipu_buttress_authenticate(isp); +} +EXPORT_SYMBOL(ipu_fw_authenticate); +DEFINE_SIMPLE_ATTRIBUTE(authenticate_fops, NULL, ipu_fw_authenticate, "%llu\n"); + +#ifdef CONFIG_DEBUG_FS +static int resume_ipu_bus_device(struct ipu_bus_device *adev) +{ + struct device *dev = &adev->dev; + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (!pm || !pm->resume) + return -EIO; + + return pm->resume(dev); +} + +static int suspend_ipu_bus_device(struct ipu_bus_device *adev) +{ + struct device *dev = &adev->dev; + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (!pm || !pm->suspend) + return -EIO; + + return pm->suspend(dev); +} + +static int force_suspend_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + struct ipu_buttress *b = &isp->buttress; + + *val = b->force_suspend; + return 0; +} + +static int force_suspend_set(void *data, u64 val) +{ + struct ipu_device *isp = data; + struct ipu_buttress *b = &isp->buttress; + int ret = 0; + + if (val == b->force_suspend) + return 0; + + if (val) { + b->force_suspend = 1; + ret = suspend_ipu_bus_device(isp->psys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to suspend psys\n"); + return ret; + } + ret = suspend_ipu_bus_device(isp->isys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to suspend isys\n"); + return ret; + } + ret = pci_set_power_state(isp->pdev, PCI_D3hot); + if (ret) { + dev_err(&isp->pdev->dev, + "Failed to suspend IUnit PCI device\n"); + return ret; + } + } else { + ret = pci_set_power_state(isp->pdev, PCI_D0); + if (ret) { + dev_err(&isp->pdev->dev, + "Failed to suspend IUnit PCI device\n"); + return ret; + } + ret = resume_ipu_bus_device(isp->isys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to resume isys\n"); + return ret; + } + ret = resume_ipu_bus_device(isp->psys_iommu); + if (ret) { + dev_err(&isp->pdev->dev, "Failed to resume psys\n"); + return ret; + } + b->force_suspend = 0; + } + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(force_suspend_fops, force_suspend_get, + force_suspend_set, "%llu\n"); +/* + * The sysfs interface for reloading cpd fw is there only for debug purpose, + * and it must not be used when either isys or psys is in use. + */ +static int cpd_fw_reload(void *data, u64 val) +{ + struct ipu_device *isp = data; + int rval = -EINVAL; + + if (isp->cpd_fw_reload) + rval = isp->cpd_fw_reload(isp); + if (!rval && isp->isys_fw_reload) + rval = isp->isys_fw_reload(isp); + + return rval; +} + +DEFINE_SIMPLE_ATTRIBUTE(cpd_fw_fops, NULL, cpd_fw_reload, "%llu\n"); + +#endif /* CONFIG_DEBUG_FS */ + +static int ipu_init_debugfs(struct ipu_device *isp) +{ +#ifdef CONFIG_DEBUG_FS + struct dentry *file; + struct dentry *dir; + + dir = debugfs_create_dir(pci_name(isp->pdev), NULL); + if (!dir) + return -ENOMEM; + + file = debugfs_create_file("force_suspend", 0700, dir, isp, + &force_suspend_fops); + if (!file) + goto err; + file = debugfs_create_file("authenticate", 0700, dir, isp, + &authenticate_fops); + if (!file) + goto err; + + file = debugfs_create_file("cpd_fw_reload", 0700, dir, isp, + &cpd_fw_fops); + if (!file) + goto err; + + if (ipu_trace_debugfs_add(isp, dir)) + goto err; + + isp->ipu_dir = dir; + + if (ipu_buttress_debugfs_init(isp)) + goto err; + + return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; +#else + return 0; +#endif /* CONFIG_DEBUG_FS */ +} + +static void ipu_remove_debugfs(struct ipu_device *isp) +{ + /* + * Since isys and psys debugfs dir will be created under ipu root dir, + * mark its dentry to NULL to avoid duplicate removal. + */ + debugfs_remove_recursive(isp->ipu_dir); + isp->ipu_dir = NULL; +} + +static int ipu_pci_config_setup(struct pci_dev *dev) +{ + u16 pci_command; + int rval = pci_enable_msi(dev); + + if (rval) { + dev_err(&dev->dev, "Failed to enable msi (%d)\n", rval); + return rval; + } + + pci_read_config_word(dev, PCI_COMMAND, &pci_command); + pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(dev, PCI_COMMAND, pci_command); + + return 0; +} + +static void ipu_configure_vc_mechanism(struct ipu_device *isp) +{ + u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL); + + if (IPU_BTRS_ARB_STALL_MODE_VC0 == IPU_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0; + + if (IPU_BTRS_ARB_STALL_MODE_VC1 == IPU_BTRS_ARB_MODE_TYPE_STALL) + val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + else + val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1; + + writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL); +} + +static int ipu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ipu_device *isp; + phys_addr_t phys; + void __iomem *const *iomap; + void __iomem *isys_base = NULL; + void __iomem *psys_base = NULL; + struct ipu_buttress_ctrl *isys_ctrl, *psys_ctrl; + unsigned int dma_mask = IPU_DMA_MASK; + int rval; + + trace_printk("B|%d|TMWK\n", current->pid); + + isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); + if (!isp) + return -ENOMEM; + + isp->pdev = pdev; + INIT_LIST_HEAD(&isp->devices); + + rval = pcim_enable_device(pdev); + if (rval) { + dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", + rval); + trace_printk("E|TMWK\n"); + return rval; + } + + dev_info(&pdev->dev, "Device 0x%x (rev: 0x%x)\n", + pdev->device, pdev->revision); + + phys = pci_resource_start(pdev, IPU_PCI_BAR); + + rval = pcim_iomap_regions(pdev, + 1 << IPU_PCI_BAR, + pci_name(pdev)); + if (rval) { + dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", + rval); + trace_printk("E|TMWK\n"); + return rval; + } + dev_info(&pdev->dev, "physical base address 0x%llx\n", phys); + + iomap = pcim_iomap_table(pdev); + if (!iomap) { + dev_err(&pdev->dev, "Failed to iomap table (%d)\n", rval); + trace_printk("E|TMWK\n"); + return -ENODEV; + } + + isp->base = iomap[IPU_PCI_BAR]; + dev_info(&pdev->dev, "mapped as: 0x%p\n", isp->base); + + pci_set_drvdata(pdev, isp); + pci_set_master(pdev); + + isp->cpd_fw_name = IPU_CPD_FIRMWARE_NAME; + + isys_base = isp->base + isys_ipdata.hw_variant.offset; + psys_base = isp->base + psys_ipdata.hw_variant.offset; + + rval = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)); + if (!rval) + rval = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(dma_mask)); + if (rval) { + dev_err(&pdev->dev, "Failed to set DMA mask (%d)\n", rval); + trace_printk("E|TMWK\n"); + return rval; + } + + rval = ipu_pci_config_setup(pdev); + if (rval) { + trace_printk("E|TMWK\n"); + return rval; + } + + rval = devm_request_threaded_irq(&pdev->dev, pdev->irq, + ipu_buttress_isr, + ipu_buttress_isr_threaded, + IRQF_SHARED, IPU_NAME, isp); + if (rval) { + dev_err(&pdev->dev, "Requesting irq failed(%d)\n", rval); + trace_printk("E|TMWK\n"); + return rval; + } + + rval = ipu_buttress_init(isp); + if (rval) { + trace_printk("E|TMWK\n"); + return rval; + } + + dev_info(&pdev->dev, "cpd file name: %s\n", isp->cpd_fw_name); + + rval = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, &pdev->dev); + if (rval) { + dev_err(&isp->pdev->dev, "Requesting signed firmware failed\n"); + trace_printk("E|TMWK\n"); + return rval; + } + + rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data, + isp->cpd_fw->size); + if (rval) { + dev_err(&isp->pdev->dev, "Failed to validate cpd\n"); + goto out_ipu_bus_del_devices; + } + + rval = ipu_trace_add(isp); + if (rval) + dev_err(&pdev->dev, "Trace support not available\n"); + + /* + * NOTE Device hierarchy below is important to ensure proper + * runtime suspend and resume order. + * Also registration order is important to ensure proper + * suspend and resume order during system + * suspend. Registration order is as follows: + * isys_iommu->isys->psys_iommu->psys + */ + isys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*isys_ctrl), GFP_KERNEL); + if (!isys_ctrl) { + rval = -ENOMEM; + goto out_ipu_bus_del_devices; + } + + /* Init butress control with default values based on the HW */ + memcpy(isys_ctrl, &isys_buttress_ctrl, sizeof(*isys_ctrl)); + + isp->isys_iommu = ipu_mmu_init(pdev, &pdev->dev, isys_ctrl, + isys_base, + &isys_ipdata.hw_variant, 0, ISYS_MMID); + rval = PTR_ERR(isp->isys_iommu); + if (IS_ERR(isp->isys_iommu)) { + dev_err(&pdev->dev, "can't create isys iommu device\n"); + rval = -ENOMEM; + goto out_ipu_bus_del_devices; + } + + isp->isys = ipu_isys_init(pdev, &isp->isys_iommu->dev, + &isp->isys_iommu->dev, isys_base, + &isys_ipdata, pdev->dev.platform_data, 0); + rval = PTR_ERR(isp->isys); + if (IS_ERR(isp->isys)) + goto out_ipu_bus_del_devices; + + psys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*psys_ctrl), GFP_KERNEL); + if (!psys_ctrl) { + rval = -ENOMEM; + goto out_ipu_bus_del_devices; + } + + /* Init butress control with default values based on the HW */ + memcpy(psys_ctrl, &psys_buttress_ctrl, sizeof(*psys_ctrl)); + + isp->psys_iommu = ipu_mmu_init(pdev, + isp->isys_iommu ? + &isp->isys_iommu->dev : + &pdev->dev, psys_ctrl, psys_base, + &psys_ipdata.hw_variant, 1, PSYS_MMID); + rval = PTR_ERR(isp->psys_iommu); + if (IS_ERR(isp->psys_iommu)) { + dev_err(&pdev->dev, "can't create psys iommu device\n"); + goto out_ipu_bus_del_devices; + } + + isp->psys = ipu_psys_init(pdev, &isp->psys_iommu->dev, + &isp->psys_iommu->dev, psys_base, + &psys_ipdata, 0); + rval = PTR_ERR(isp->psys); + if (IS_ERR(isp->psys)) + goto out_ipu_bus_del_devices; + + rval = ipu_init_debugfs(isp); + if (rval) { + dev_err(&pdev->dev, "Failed to initialize debugfs"); + goto out_ipu_bus_del_devices; + } + + /* Configure the arbitration mechanisms for VC requests */ + ipu_configure_vc_mechanism(isp); + + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + trace_printk("E|TMWK\n"); + return 0; + +out_ipu_bus_del_devices: + ipu_bus_del_devices(pdev); + ipu_buttress_exit(isp); + release_firmware(isp->cpd_fw); + + trace_printk("E|TMWK\n"); + return rval; +} + +static void ipu_pci_remove(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + ipu_remove_debugfs(isp); + ipu_trace_release(isp); + + ipu_bus_del_devices(pdev); + + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + ipu_buttress_exit(isp); + + release_firmware(isp->cpd_fw); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) +static void ipu_pci_reset_notify(struct pci_dev *pdev, bool prepare) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + if (prepare) { + dev_err(&pdev->dev, "FLR prepare\n"); + pm_runtime_forbid(&isp->pdev->dev); + isp->flr_done = true; + return; + } + + ipu_buttress_restore(isp); + if (isp->secure_mode) + ipu_buttress_reset_authentication(isp); + + ipu_bus_flr_recovery(); + isp->ipc_reinit = true; + pm_runtime_allow(&isp->pdev->dev); + + dev_err(&pdev->dev, "FLR completed\n"); +} +#else +static void ipu_pci_reset_prepare(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + dev_warn(&pdev->dev, "FLR prepare\n"); + pm_runtime_forbid(&isp->pdev->dev); + isp->flr_done = true; +} + +static void ipu_pci_reset_done(struct pci_dev *pdev) +{ + struct ipu_device *isp = pci_get_drvdata(pdev); + + ipu_buttress_restore(isp); + if (isp->secure_mode) + ipu_buttress_reset_authentication(isp); + + ipu_bus_flr_recovery(); + isp->ipc_reinit = true; + pm_runtime_allow(&isp->pdev->dev); + + dev_warn(&pdev->dev, "FLR completed\n"); +} +#endif + +#ifdef CONFIG_PM + +/* + * PCI base driver code requires driver to provide these to enable + * PCI device level PM state transitions (D0<->D3) + */ +static int ipu_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu_device *isp = pci_get_drvdata(pdev); + + isp->flr_done = false; + + return 0; +} + +static int ipu_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu_device *isp = pci_get_drvdata(pdev); + struct ipu_buttress *b = &isp->buttress; + int rval; + + /* Configure the arbitration mechanisms for VC requests */ + ipu_configure_vc_mechanism(isp); + + ipu_buttress_set_secure_mode(isp); + isp->secure_mode = ipu_buttress_get_secure_mode(isp); + dev_info(dev, "IPU in %s mode\n", + isp->secure_mode ? "secure" : "non-secure"); + + ipu_buttress_restore(isp); + + rval = ipu_buttress_ipc_reset(isp, &b->cse); + if (rval) + dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n"); + + return 0; +} + +static int ipu_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ipu_device *isp = pci_get_drvdata(pdev); + int rval; + + ipu_configure_vc_mechanism(isp); + ipu_buttress_restore(isp); + + if (isp->ipc_reinit) { + struct ipu_buttress *b = &isp->buttress; + + isp->ipc_reinit = false; + rval = ipu_buttress_ipc_reset(isp, &b->cse); + if (rval) + dev_err(&isp->pdev->dev, + "IPC reset protocol failed!\n"); + } + + return 0; +} + +static const struct dev_pm_ops ipu_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(&ipu_suspend, &ipu_resume) + SET_RUNTIME_PM_OPS(&ipu_suspend, /* Same as in suspend flow */ + &ipu_runtime_resume, + NULL) +}; + +#define IPU_PM (&ipu_pm_ops) +#else +#define IPU_PM NULL +#endif + +static const struct pci_device_id ipu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU_PCI_ID)}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ipu_pci_tbl); + +static const struct pci_error_handlers pci_err_handlers = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) + .reset_notify = ipu_pci_reset_notify, +#else + .reset_prepare = ipu_pci_reset_prepare, + .reset_done = ipu_pci_reset_done, +#endif +}; + +static struct pci_driver ipu_pci_driver = { + .name = IPU_NAME, + .id_table = ipu_pci_tbl, + .probe = ipu_pci_probe, + .remove = ipu_pci_remove, + .driver = { + .pm = IPU_PM, + }, + .err_handler = &pci_err_handlers, +}; + +static int __init ipu_init(void) +{ + int rval = ipu_bus_register(); + + if (rval) { + pr_warn("can't register ipu bus (%d)\n", rval); + return rval; + } + + rval = pci_register_driver(&ipu_pci_driver); + if (rval) { + pr_warn("can't register pci driver (%d)\n", rval); + goto out_pci_register_driver; + } + + return 0; + +out_pci_register_driver: + ipu_bus_unregister(); + + return rval; +} + +static void __exit ipu_exit(void) +{ + pci_unregister_driver(&ipu_pci_driver); + ipu_bus_unregister(); +} + +module_init(ipu_init); +module_exit(ipu_exit); + +MODULE_AUTHOR("Sakari Ailus "); +MODULE_AUTHOR("Jouni Högander "); +MODULE_AUTHOR("Antti Laakso "); +MODULE_AUTHOR("Samu Onkalo "); +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Renwei Wu "); +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Leifu Zhao "); +MODULE_AUTHOR("Xia Wu "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_AUTHOR("Intel"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu pci driver"); diff --git a/drivers/media/pci/intel/ipu.h b/drivers/media/pci/intel/ipu.h new file mode 100644 index 000000000000..1916018858a7 --- /dev/null +++ b/drivers/media/pci/intel/ipu.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_H +#define IPU_H + +#include +#include +#include + +#include "ipu-pdata.h" +#include "ipu-bus.h" +#include "ipu-buttress.h" +#include "ipu-trace.h" + +#if defined(CONFIG_VIDEO_INTEL_IPU4) +#define IPU_PCI_ID 0x5a88 +#elif defined(CONFIG_VIDEO_INTEL_IPU4P) +#define IPU_PCI_ID 0x5a19 +#endif + +/* processing system frequency: 25Mhz x ratio, Legal values [8,32] */ +#define PS_FREQ_CTL_DEFAULT_RATIO 0x12 + +/* input system frequency: 1600Mhz / divisor. Legal values [2,8] */ +#define IS_FREQ_SOURCE 1600000000 +#define IS_FREQ_CTL_DIVISOR 0x4 + +/* + * ISYS DMA can overshoot. For higher resolutions over allocation is one line + * but it must be at minimum 1024 bytes. Value could be different in + * different versions / generations thus provide it via platform data. + */ +#define IPU_ISYS_OVERALLOC_MIN 1024 + +/* + * Physical pages in GDA 128 * 1K pages. + */ +#define IPU_DEVICE_GDA_NR_PAGES 128 + +/* + * Virtualization factor for Broxton to calculate the available virtual pages. + * In IPU4, there is limitation of only 1024 virtual pages. Hence the + * virtualization factor is 8 (128 * 8 = 1024). + */ +#define IPU_DEVICE_GDA_VIRT_FACTOR 8 + +struct pci_dev; +struct list_head; +struct firmware; + +#define NR_OF_MMU_RESOURCES 2 + +struct ipu_device { + struct pci_dev *pdev; + struct list_head devices; + struct ipu_bus_device *isys_iommu, *isys; + struct ipu_bus_device *psys_iommu, *psys; + struct ipu_buttress buttress; + + const struct firmware *cpd_fw; + const char *cpd_fw_name; + u64 *pkg_dir; + dma_addr_t pkg_dir_dma_addr; + unsigned int pkg_dir_size; + + void __iomem *base; + void __iomem *base2; + struct dentry *ipu_dir; + struct ipu_trace *trace; + bool flr_done; + bool ipc_reinit; + bool secure_mode; + + int (*isys_fw_reload)(struct ipu_device *isp); + int (*cpd_fw_reload)(struct ipu_device *isp); +}; + +#define IPU_DMA_MASK 39 +#define IPU_LIB_CALL_TIMEOUT_MS 2000 +#define IPU_PSYS_CMD_TIMEOUT_MS 2000 +#define IPU_PSYS_OPEN_TIMEOUT_US 50 +#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US) + +int ipu_fw_authenticate(void *data, u64 val); +void ipu_configure_spc(struct ipu_device *isp, + const struct ipu_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr); +#endif /* IPU_H */ diff --git a/drivers/media/pci/intel/ipu4/Makefile b/drivers/media/pci/intel/ipu4/Makefile new file mode 100644 index 000000000000..127814633fc7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/Makefile @@ -0,0 +1,131 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +ifdef CONFIG_VIDEO_INTEL_IPU4 +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 +ccflags-y += -DIPU_META_DATA_SUPPORT + +intel-ipu4-objs += ../ipu.o \ + ../ipu-bus.o \ + ../ipu-dma.o \ + ../ipu-buttress.o \ + ../ipu-trace.o \ + ../ipu-cpd.o \ + ../ipu-fw-com.o \ + ipu4.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4.o + +intel-ipu4-mmu-objs += ../ipu-mmu.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-mmu.o + +intel-ipu4-isys-objs += ../ipu-isys.o \ + ../ipu-isys-csi2.o \ + ipu4-isys.o \ + ipu4-isys-csi2.o \ + ../ipu-isys-csi2-be-soc.o \ + ../ipu-isys-csi2-be.o \ + ../ipu-fw-isys.o \ + ipu4-isys-isa.o \ + ../ipu-isys-video.o \ + ../ipu-isys-queue.o \ + ../ipu-isys-subdev.o \ + ../ipu-isys-tpg.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-isys.o + +intel-ipu4-psys-objs += ../ipu-psys.o \ + ipu4-psys.o \ + ipu4-resources.o \ + +ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +intel-ipu4-psys-objs += ipu4-fw-resources.o \ + ../ipu-fw-psys.o +endif + +ifeq ($(CONFIG_COMPAT),y) +intel-ipu4-psys-objs += ../ipu-psys-compat32.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-psys.o + +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +include $(srcpath)/$(src)/ipu4-css/Makefile.isyslib +include $(srcpath)/$(src)/ipu4-css/Makefile.psyslib +endif + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ +ccflags-y += -I$(srcpath)/$(src)/ +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +ccflags-y += -I$(srcpath)/$(src)/ipu4-css +endif + +ccflags-y += -DPARAMETER_INTERFACE_V2 +endif + +ifdef CONFIG_VIDEO_INTEL_IPU4P +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 +ccflags-y += -DIPU_META_DATA_SUPPORT + +intel-ipu4p-objs += ../ipu.o \ + ../ipu-bus.o \ + ../ipu-dma.o \ + ../ipu-buttress.o \ + ../ipu-trace.o \ + ../ipu-cpd.o \ + ../ipu-fw-com.o \ + ipu4.o + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p.o + +intel-ipu4p-mmu-objs += ../ipu-mmu.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-mmu.o + +intel-ipu4p-isys-objs += ../ipu-isys.o \ + ../ipu-isys-csi2.o \ + ipu4-isys.o \ + ipu4p-isys-csi2.o \ + ../ipu-isys-csi2-be-soc.o \ + ../ipu-isys-csi2-be.o \ + ../ipu-fw-isys.o \ + ipu4-isys-isa.o \ + ../ipu-isys-video.o \ + ../ipu-isys-queue.o \ + ../ipu-isys-subdev.o \ + ../ipu-isys-tpg.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-isys.o + +intel-ipu4p-psys-objs += ../ipu-psys.o \ + ipu4-psys.o \ + ipu4-resources.o \ + +ifndef CONFIG_VIDEO_INTEL_IPU_FW_LIB +intel-ipu4p-psys-objs += ipu4-fw-resources.o \ + ../ipu-fw-psys.o +endif + +ifeq ($(CONFIG_COMPAT),y) +intel-ipu4p-psys-objs += ../ipu-psys-compat32.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-psys.o + +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +include $(srcpath)/$(src)/ipu4p-css/Makefile.isyslib +include $(srcpath)/$(src)/ipu4p-css/Makefile.psyslib +endif + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ +ccflags-y += -I$(srcpath)/$(src)/ +ifdef CONFIG_VIDEO_INTEL_IPU_FW_LIB +ccflags-y += -I$(srcpath)/$(src)/ipu4p-css +endif + +ccflags-y += -DPARAMETER_INTERFACE_V2 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-buttress-regs.h b/drivers/media/pci/intel/ipu4/ipu-platform-buttress-regs.h new file mode 100644 index 000000000000..ffd770c88198 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-buttress-regs.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_BUTTRESS_REGS_H +#define IPU_PLATFORM_BUTTRESS_REGS_H + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT 20 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK (0x1f << 20) +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xc + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT 25 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK (0x1f << 25) +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0x10 + +#define BUTTRESS_REG_CSI_BSCAN_EXCLUDE 0x100d8 +#define CPHY0_DLL_OVRD_OFFSET 0x10100 +#define CPHY0_RX_CONTROL1_OFFSET 0x10110 +#define DPHY0_DLL_OVRD_OFFSET 0x1014c +#define DPHY0_RX_CNTRL_OFFSET 0x10158 +#define BB0_AFE_CONFIG_OFFSET 0x10174 + +#define BUTTRESS_REG_IS_FREQ_CTL_RATIO_SHIFT 1 +#define BUTTRESS_REG_PS_FREQ_CTL_OVRD_SHIFT 7 +#define BUTTRESS_REG_PS_FREQ_CTL_RATIO_SHIFT 8 + +#define BUTTRESS_REG_CPHYX_DLL_OVRD(x) \ + (CPHY0_DLL_OVRD_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_CPHYX_RX_CONTROL1(x) \ + (CPHY0_RX_CONTROL1_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_DPHYX_DLL_OVRD(x) \ + (DPHY0_DLL_OVRD_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_DPHYX_RX_CNTRL(x) \ + (DPHY0_RX_CNTRL_OFFSET + (x >> 1) * 0x100) +#define BUTTRESS_REG_BBX_AFE_CONFIG(x) \ + (BB0_AFE_CONFIG_OFFSET + (x >> 1) * 0x100) +#endif /* CONFIG_VIDEO_INTEL_IPU4P */ + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT 20 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK (0xf << 20) +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa + +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT 24 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK (0x1f << 24) +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf +#endif /* CONFIG_VIDEO_INTEL_IPU4 */ + +#define BUTTRESS_REG_WDT 0x8 +#define BUTTRESS_REG_BTRS_CTRL 0xc +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0) +#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1) + +#define BUTTRESS_REG_FW_RESET_CTL 0x30 +#define BUTTRESS_FW_RESET_CTL_START_SHIFT 0 +#define BUTTRESS_FW_RESET_CTL_DONE_SHIFT 1 + +#define BUTTRESS_REG_IS_FREQ_CTL 0x34 +#define BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK 0xf + +#define BUTTRESS_REG_PS_FREQ_CTL 0x38 +#define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xff + +#define BUTTRESS_FREQ_CTL_START_SHIFT 31 +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8 +#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK (0xff << 8) + +#define BUTTRESS_REG_PWR_STATE 0x5c +#define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 4 +#define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x7 << 4) + +#define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 8 +#define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x7 << 8) + +#define BUTTRESS_PWR_STATE_RESET 0x0 +#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1 +#define BUTTRESS_PWR_STATE_PWR_RDY 0x3 +#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4 + +#define BUTTRESS_PWR_STATE_HH_STATUS_SHIFT 12 +#define BUTTRESS_PWR_STATE_HH_STATUS_MASK (0x3 << 12) + +enum { + BUTTRESS_PWR_STATE_HH_STATE_IDLE, + BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS, + BUTTRESS_PWR_STATE_HH_STATE_DONE, + BUTTRESS_PWR_STATE_HH_STATE_ERR, +}; + +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9 +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe +#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15 +#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16 + +#define BUTTRESS_REG_SECURITY_CTL 0x300 + +#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE_SHIFT 16 +#define BUTTRESS_SECURITY_CTL_FW_SETUP_SHIFT 0 +#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK 0x1f + +#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE 0x1 +#define BUTTRESS_SECURITY_CTL_AUTH_DONE 0x2 +#define BUTTRESS_SECURITY_CTL_AUTH_FAILED 0x8 + +#define BUTTRESS_REG_SENSOR_FREQ_CTL 0x16c + +#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(i) \ + (0x1b << ((i) * 10)) +#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(i) ((i) * 10) +#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(i) \ + (0x1ff << ((i) * 10)) + +#define BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ 0x176 +#define BUTTRESS_SENSOR_CLK_FREQ_8MHZ 0x164 +#define BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ 0x2 +#define BUTTRESS_SENSOR_CLK_FREQ_12MHZ 0x1b2 +#define BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ 0x1ac +#define BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ 0x1cc +#define BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ 0x1a6 +#define BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ 0xca +#define BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ 0x12e +#define BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ 0x1c0 +#define BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ 0x0 +#define BUTTRESS_SENSOR_CLK_FREQ_24MHZ 0xb2 +#define BUTTRESS_SENSOR_CLK_FREQ_26MHZ 0xae +#define BUTTRESS_SENSOR_CLK_FREQ_27MHZ 0x196 + +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FB_RATIO_MASK 0xff +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_SHIFT 8 +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_MASK (0x2 << 8) +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_SHIFT 10 +#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_MASK (0x2 << 10) +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FORCE_OFF_SHIFT 12 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_SHIFT 14 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_MASK (0x2 << 14) +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_SHIFT 16 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_MASK (0x2 << 16) +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_SHIFT 18 +#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_MASK (0x2 << 18) +#define BUTTRESS_SENSOR_FREQ_CTL_START_SHIFT 31 + +#define BUTTRESS_REG_SENSOR_CLK_CTL 0x170 + +/* 0 <= i <= 2 */ +#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i) ((i) * 2) +#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(i) ((i) * 2 + 1) + +#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78 +#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C +#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80 + +#define BUTTRESS_REG_ISR_STATUS 0x90 +#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94 +#define BUTTRESS_REG_ISR_ENABLE 0x98 +#define BUTTRESS_REG_ISR_CLEAR 0x9C + +#define BUTTRESS_ISR_IS_IRQ BIT(0) +#define BUTTRESS_ISR_PS_IRQ BIT(1) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2) +#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3) +#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4) +#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5) +#define BUTTRESS_ISR_CSE_CSR_SET BIT(6) +#define BUTTRESS_ISR_ISH_CSR_SET BIT(7) +#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8) +#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9) +#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10) +#define BUTTRESS_ISR_SAI_VIOLATION BIT(11) +#define BUTTRESS_ISR_HW_ASSERTION BIT(12) + +#define BUTTRESS_REG_IU2CSEDB0 0x100 + +#define BUTTRESS_IU2CSEDB0_BUSY_SHIFT 31 +#define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27 +#define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10 +#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 + +#define BUTTRESS_REG_IU2CSEDATA0 0x104 + +#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTHENTICATE_RUN 2 +#define BUTTRESS_IU2CSEDATA0_IPC_AUTHENTICATE_REPLACE 3 +#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 + +#define BUTTRESS_REG_IU2CSECSR 0x108 + +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) +#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) +#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) +#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) +#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) + +#define BUTTRESS_REG_CSE2IUDB0 0x304 +#define BUTTRESS_REG_CSE2IUCSR 0x30C +#define BUTTRESS_REG_CSE2IUDATA0 0x308 + +/* 0x20 == NACK, 0xf == unknown command */ +#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 +#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff + +#define BUTTRESS_REG_ISH2IUCSR 0x50 +#define BUTTRESS_REG_ISH2IUDB0 0x54 +#define BUTTRESS_REG_ISH2IUDATA0 0x58 + +#define BUTTRESS_REG_IU2ISHDB0 0x10C +#define BUTTRESS_REG_IU2ISHDATA0 0x110 +#define BUTTRESS_REG_IU2ISHDATA1 0x114 +#define BUTTRESS_REG_IU2ISHCSR 0x118 + +#define BUTTRESS_REG_ISH_START_DETECT 0x198 +#define BUTTRESS_REG_ISH_START_DETECT_MASK 0x19C + +#define BUTTRESS_REG_FABRIC_CMD 0x88 + +#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0) +#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4) + +#define BUTTRESS_REG_TSW_CTL 0x120 +#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8) + +#define BUTTRESS_REG_TSC_LO 0x164 +#define BUTTRESS_REG_TSC_HI 0x168 + +#define BUTTRESS_REG_CSI2_PORT_CONFIG_AB 0x200 +#define BUTTRESS_CSI2_PORT_CONFIG_AB_MUX_MASK 0x1f +#define BUTTRESS_CSI2_PORT_CONFIG_AB_COMBO_SHIFT_B0 16 + +#define BUTTRESS_REG_PS_FREQ_CAPABILITIES 0xf7498 + +#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT 24 +#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK (0xff << 24) +#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT 16 +#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK (0xff << 16) +#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT 8 +#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK (0xff << 8) +#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT 0 +#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK (0xff) + +#define BUTTRESS_IRQS (BUTTRESS_ISR_SAI_VIOLATION | \ + BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \ + BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \ + BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH | \ + BUTTRESS_ISR_IS_IRQ | \ + BUTTRESS_ISR_PS_IRQ) + +#endif /* IPU_BUTTRESS_REGS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-isys-csi2-reg.h b/drivers/media/pci/intel/ipu4/ipu-platform-isys-csi2-reg.h new file mode 100644 index 000000000000..f19372ba3e57 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-isys-csi2-reg.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_ISYS_CSI2_REG_H +#define IPU_PLATFORM_ISYS_CSI2_REG_H + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +/* CSI RX CPHY regs */ +#define CSI2_REG_CSI_RX_CPHY_NOF_ENABLED_LANES 0x04 +#define CSI2_REG_CSI_RX_CPHY_HBP_TESTMODE 0x08 +#define CSI2_REG_CSI_RX_CPHY_PH_CRC_CFG 0x0C +#define CSI2_REG_CSI_RX_CPHY_ERR_HANDLING 0x10 +#define CSI2_REG_CSI_RX_CPHY_PORTCFG_CTL 0x14 +#define CSI2_REG_CSI_RX_CPHY_PORTCFG_TIMEOUT_CNTR 0x18 +#define CSI2_REG_CSI_RX_CPHY_SYNC_CNTR_SEL 0x1C +#define CSI2_REG_CSI_RX_CPHY_STATS 0x20 + +#define CSI2_REG_CSI2PART_IRQ_EDGE 0xB00 +#define CSI2_REG_CSI2PART_IRQ_MASK 0xB04 +#define CSI2_REG_CSI2PART_IRQ_STATUS 0xB08 +#define CSI2_REG_CSI2PART_IRQ_CLEAR 0xB0c +#define CSI2_REG_CSI2PART_IRQ_ENABLE 0xB10 +#define CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE 0xB14 +#define CSI2_CSI2PART_IRQ_CSIRX 0x10000 +#define CSI2_CSI2PART_IRQ_CSI2S2M 0x20000 + +#define CSI2_REG_CSIRX_IRQ_EDGE 0xC00 +#define CSI2_REG_CSIRX_IRQ_MASK 0xC04 +#define CSI2_REG_CSIRX_IRQ_STATUS 0xC08 +#define CSI2_REG_CSIRX_IRQ_CLEAR 0xC0c +#define CSI2_REG_CSIRX_IRQ_ENABLE 0xC10 +#define CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE 0xC14 +#define CSI2_CSIRX_HEADER_SINGLE_ERROR_CORRECTED BIT(0) +#define CSI2_CSIRX_HEADER_MULTIPLE_ERRORS_CORRECTED BIT(1) +#define CSI2_CSIRX_PAYLOAD_CRC_ERROR BIT(2) +#define CSI2_CSIRX_FIFO_OVERFLOW BIT(3) +#define CSI2_CSIRX_RESERVED_SHORT_PACKET_DATA_TYPE BIT(4) +#define CSI2_CSIRX_RESERVED_LONG_PACKET_DATA_TYPE BIT(5) +#define CSI2_CSIRX_INCOMPLETE_LONG_PACKET BIT(6) +#define CSI2_CSIRX_FRAME_SYNC_ERROR BIT(7) +#define CSI2_CSIRX_LINE_SYNC_ERROR BIT(8) +#define CSI2_CSIRX_DPHY_RECOVERABLE_SYNC_ERROR BIT(9) +#define CSI2_CSIRX_DPHY_NONRECOVERABLE_SYNC_ERROR BIT(10) +#define CSI2_CSIRX_ESCAPE_MODE_ERROR BIT(11) +#define CSI2_CSIRX_ESCAPE_MODE_TRIGGER_EVENT BIT(12) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_DATA BIT(13) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_EXIT_CLK BIT(14) +#define CSI2_CSIRX_INTER_FRAME_SHORT_PACKET_DISCARDED BIT(15) +#define CSI2_CSIRX_INTER_FRAME_LONG_PACKET_DISCARDED BIT(16) +#define CSI2_CSIRX_NUM_ERRORS 17 + +#define CSI2_REG_CSI2S2M_IRQ_EDGE 0xD00 +#define CSI2_REG_CSI2S2M_IRQ_MASK 0xD04 +#define CSI2_REG_CSI2S2M_IRQ_STATUS 0xD08 +#define CSI2_REG_CSI2S2M_IRQ_CLEAR 0xD0c +#define CSI2_REG_CSI2S2M_IRQ_ENABLE 0xD10 +#define CSI2_REG_CSI2S2M_IRQ_LEVEL_NOT_PULSE 0xD14 + +#define CSI2_IRQ_FS_VC(chn) (0x10000 << ((chn) * 4)) +#define CSI2_IRQ_FE_VC(chn) (0x20000 << ((chn) * 4)) +#define CSI2_IRQ_LS_VC(chn) (0x40000 << ((chn) * 4)) +#define CSI2_IRQ_LE_VC(chn) (0x80000 << ((chn) * 4)) +#endif /* CONFIG_VIDEO_INTEL_IPU4P */ + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +/* IRQ-related registers specific to each of the four CSI receivers */ +#define CSI2_REG_CSI2PART_IRQ_EDGE 0x400 +#define CSI2_REG_CSI2PART_IRQ_MASK 0x404 +#define CSI2_REG_CSI2PART_IRQ_STATUS 0x408 +#define CSI2_REG_CSI2PART_IRQ_CLEAR 0x40c +#define CSI2_REG_CSI2PART_IRQ_ENABLE 0x410 +#define CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE 0x414 +#define CSI2_CSI2PART_IRQ_CSIRX 0x10000 +#define CSI2_CSI2PART_IRQ_CSI2S2M 0x20000 + +#define CSI2_REG_CSIRX_IRQ_EDGE 0x500 +#define CSI2_REG_CSIRX_IRQ_MASK 0x504 +#define CSI2_REG_CSIRX_IRQ_STATUS 0x508 +#define CSI2_REG_CSIRX_IRQ_CLEAR 0x50c +#define CSI2_REG_CSIRX_IRQ_ENABLE 0x510 +#define CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE 0x514 +#define CSI2_CSIRX_HEADER_SINGLE_ERROR_CORRECTED BIT(0) +#define CSI2_CSIRX_HEADER_MULTIPLE_ERRORS_CORRECTED BIT(1) +#define CSI2_CSIRX_PAYLOAD_CRC_ERROR BIT(2) +#define CSI2_CSIRX_FIFO_OVERFLOW BIT(3) +#define CSI2_CSIRX_RESERVED_SHORT_PACKET_DATA_TYPE BIT(4) +#define CSI2_CSIRX_RESERVED_LONG_PACKET_DATA_TYPE BIT(5) +#define CSI2_CSIRX_INCOMPLETE_LONG_PACKET BIT(6) +#define CSI2_CSIRX_FRAME_SYNC_ERROR BIT(7) +#define CSI2_CSIRX_LINE_SYNC_ERROR BIT(8) +#define CSI2_CSIRX_DPHY_RECOVERABLE_SYNC_ERROR BIT(9) +#define CSI2_CSIRX_DPHY_NONRECOVERABLE_SYNC_ERROR BIT(10) +#define CSI2_CSIRX_ESCAPE_MODE_ERROR BIT(11) +#define CSI2_CSIRX_ESCAPE_MODE_TRIGGER_EVENT BIT(12) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_DATA BIT(13) +#define CSI2_CSIRX_ESCAPE_MODE_ULTRALOW_POWER_EXIT_CLK BIT(14) +#define CSI2_CSIRX_INTER_FRAME_SHORT_PACKET_DISCARDED BIT(15) +#define CSI2_CSIRX_INTER_FRAME_LONG_PACKET_DISCARDED BIT(16) +#define CSI2_CSIRX_NUM_ERRORS 17 + +#define CSI2_REG_CSI2S2M_IRQ_EDGE 0x600 +#define CSI2_REG_CSI2S2M_IRQ_MASK 0x604 +#define CSI2_REG_CSI2S2M_IRQ_STATUS 0x608 +#define CSI2_REG_CSI2S2M_IRQ_CLEAR 0x60c +#define CSI2_REG_CSI2S2M_IRQ_ENABLE 0x610 +#define CSI2_REG_CSI2S2M_IRQ_LEVEL_NOT_PULSE 0x614 + +#define CSI2_IRQ_FS_VC(chn) (1 << ((chn) * 4)) +#define CSI2_IRQ_FE_VC(chn) (2 << ((chn) * 4)) +#define CSI2_IRQ_LS_VC(chn) (4 << ((chn) * 4)) +#define CSI2_IRQ_LE_VC(chn) (8 << ((chn) * 4)) +#endif /* CONFIG_VIDEO_INTEL_IPU4 */ + +#define CSI2_REG_CSI_RX_ENABLE 0x00 +#define CSI2_CSI_RX_ENABLE_ENABLE 0x01 +/* Enabled lanes - 1 */ +#define CSI2_REG_CSI_RX_NOF_ENABLED_LANES 0x04 +#define CSI2_REG_CSI_RX_CONFIG 0x08 +#define CSI2_CSI_RX_CONFIG_RELEASE_LP11 0x1 +#define CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING 0x2 +#define CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE 0x4 +#define CSI2_REG_CSI_RX_HBP_TESTMODE_ENABLE 0x0c +#define CSI2_REG_CSI_RX_ERROR_HANDLING 0x10 +#define CSI2_REG_CSI_RX_SYNC_COUNTER_SEL 0x14 +#define CSI2_RX_SYNC_COUNTER_INTERNAL 0 +#define CSI2_RX_SYNC_COUNTER_EXTERNAL 3 +#define CSI2_REG_CSI_RX_SP_IF_CONFIG 0x18 +#define CSI2_REG_CSI_RX_LP_IF_CONFIG 0x1C +#define CSI2_REG_CSI_RX_STATUS 0x20 +#define CSI2_CSI_RX_STATUS_BUSY 0x01 +#define CSI2_REG_CSI_RX_STATUS_DLANE_HS 0x24 +#define CSI2_REG_CSI_RX_STATUS_DLANE_LP 0x28 +#define CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE 0x2c +#define CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE 0x30 +/* 0..3 */ +#define CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(n) (0x34 + (n) * 8) +#define CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(n) (0x38 + (n) * 8) + +/*General purposer registers, offset to gpreg base*/ +#define CSI2_REG_CSI_GPREG_SOFT_RESET 0 +#define CSI2_REG_CSI_GPREG_SOFT_RESET_SLV 0x4 +#define CSI2_REG_CSI_GPREG_HPLL_FREQ 0x8 +#define CSI2_REG_CSI_GPREG_ISCLK_RATIO 0xc +#define CSI2_REG_CSI_GPREG_HPLL_FREQ_ISCLK_RATIO_OVERRIDE 0x10 +#define CSI2_REG_CSI_GPREG_CR_PORT_CONFIG 0x14 +#define CSI2_REG_CSI_GPREG_RCOMP_TIMER_DISABLE 0x18 +#define CSI2_REG_CSI_GPREG_RCOMP_TIMER_VALUE 0x1c + +/* + * Following is the list of relevant registers and + * their offset within the legacy PHY endpoint. Accessible only via + * sideband bus. + * Register naming is a bit misleading. DPHY / CPHY / LANE0 / LANE1 + * all are required for DPHY configurations. + * Registers are accessible only via sideband bus. + */ + +/* Legacy receiver block */ +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY 0xb8 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT4_SHIFT 9 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT3_SHIFT 8 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT2_SHIFT 7 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT1_SHIFT 6 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_CODE_SHIFT 1 +#define CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_SHIFT 0 + +/* Combo receiver block */ +#define CSI2_SB_CSI_RCOMP_CONTROL_COMBO 0x08 +#define CSI2_SB_CSI_RCOMP_UPDATE_MODE_SHIFT 15 +#define CSI2_SB_CSI_RCOMP_OVR_ENABLE_SHIFT 6 +#define CSI2_SB_CSI_RCOMP_OVR_CODE_SHIFT 1 + +#define CSI2_SB_CPHY0_DLL_OVRD 0x18 +#define CSI2_SB_CPHY0_DLL_OVRD_CRCDC_FSM_DLANE0_SHIFT 1 +#define CSI2_SB_CPHY0_DLL_OVRD_LDEN_CRCDC_FSM_DLANE0 BIT(0) +#define CSI2_SB_CPHY2_DLL_OVRD 0x60 +#define CSI2_SB_CPHY2_DLL_OVRD_CRCDC_FSM_DLANE1_SHIFT 1 +#define CSI2_SB_CPHY2_DLL_OVRD_LDEN_CRCDC_FSM_DLANE1 BIT(0) + +#define CSI2_SB_CPHY0_RX_CONTROL1 0x28 +#define CSI2_SB_CPHY0_RX_CONTROL1_EQ_LANE0_SHIFT 27 +#define CSI2_SB_CPHY2_RX_CONTROL1 0x68 +#define CSI2_SB_CPHY2_RX_CONTROL1_EQ_LANE1_SHIFT 27 + +#define CSI2_SB_DPHY0_DLL_OVRD 0xA4 +#define CSI2_SB_DPHY0_DLL_OVRD_LDEN_DRC_FSM_SHIFT 0 +#define CSI2_SB_DPHY0_DLL_OVRD_DRC_FSM_OVRD_SHIFT 1 +#define CSI2_SB_DPHY1_DLL_OVRD 0xD0 +#define CSI2_SB_DPHY1_DLL_OVRD_LDEN_DRC_FSM_SHIFT 0 +#define CSI2_SB_DPHY1_DLL_OVRD_DRC_FSM_OVRD_SHIFT 1 + +#define CSI2_SB_DPHY0_RX_CNTRL 0xB0 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE3_SHIFT 28 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE2_SHIFT 26 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE1_SHIFT 24 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE0_SHIFT 22 +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE23_MASK \ + ((1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE3_SHIFT) | \ + (1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE2_SHIFT)) + +#define CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE01_MASK \ + ((1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE1_SHIFT) | \ + (1 << CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE0_SHIFT)) + +#endif /* IPU_ISYS_CSI2_REG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-isys.h b/drivers/media/pci/intel/ipu4/ipu-platform-isys.h new file mode 100644 index 000000000000..51f26f2198e2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-isys.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_ISYS_H +#define IPU_PLATFORM_ISYS_H + +#include "ipu4-isys-isa.h" + +#define IPU_ISYS_ENTITY_PREFIX "Intel IPU4" + +/* + * FW support max 8 streams + */ +#define IPU_ISYS_MAX_STREAMS 8 + +#define NR_OF_CSI2_BE_SOC_STREAMS 8 +#define NR_OF_CSI2_VC 4 + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-regs.h b/drivers/media/pci/intel/ipu4/ipu-platform-regs.h new file mode 100644 index 000000000000..e54b2b55afbf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-regs.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_REGS_H +#define IPU_PLATFORM_REGS_H + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define IPU_ISYS_IOMMU0_OFFSET 0x000e0000 +#define IPU_ISYS_IOMMU1_OFFSET 0x000e0100 + +#define IPU_ISYS_OFFSET 0x00100000 +#define IPU_PSYS_OFFSET 0x00400000 + +#define IPU_PSYS_IOMMU0_OFFSET 0x000b0000 +#define IPU_PSYS_IOMMU1_OFFSET 0x000b0100 +#define IPU_PSYS_IOMMU1R_OFFSET 0x000b0600 + +/* the offset from IOMMU base register */ +#define IPU_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU_MMU_L2_STREAM_ID_REG_OFFSET 0x4c + +#define IPU_TPG0_ADDR_OFFSET 0x66c00 +#define IPU_TPG1_ADDR_OFFSET 0x6ec00 +#define IPU_CSI2BE_ADDR_OFFSET 0xba000 + +#define IPU_PSYS_MMU0_CTRL_OFFSET 0x08 + +#define IPU_GPOFFSET 0x66800 +#define IPU_COMBO_GPOFFSET 0x6e800 + +#define IPU_GPREG_MIPI_PKT_GEN0_SEL 0x1c +#define IPU_GPREG_MIPI_PKT_GEN1_SEL 0x1c + +#define IPU_REG_ISYS_ISA_ACC_IRQ_CTRL_BASE 0xb0c00 +#define IPU_REG_ISYS_A_IRQ_CTRL_BASE 0xbe200 +#define IPU_REG_ISYS_SIP0_IRQ_CTRL_BASE 0x66d00 +#define IPU_REG_ISYS_SIP1_IRQ_CTRL_BASE 0x6ed00 +#define IPU_REG_ISYS_SIP0_IRQ_CTRL_STATUS 0x66d08 +#define IPU_REG_ISYS_SIP1_IRQ_CTRL_STATUS 0x6ed08 +#define IPU_REG_ISYS_SIP0_IRQ_CTRL_CLEAR 0x66d0c +#define IPU_REG_ISYS_SIP1_IRQ_CTRL_CLEAR 0x6ed0c +#define IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(p) \ + ({ typeof(p) __p = (p); \ + __p > 0 ? (0x6cb00 + 0x800 * (__p - 1)) : (0x66300); }) +#define IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(p) \ + ({ typeof(p) __p = (p); \ + __p > 0 ? (0x6cc00 + 0x800 * (__p - 1)) : (0x66400); }) +#define IPU_ISYS_CSI2_A_IRQ_MASK GENMASK(0, 0) +#define IPU_ISYS_CSI2_B_IRQ_MASK GENMASK(1, 1) +#define IPU_ISYS_CSI2_C_IRQ_MASK GENMASK(2, 2) +#define IPU_ISYS_CSI2_D_IRQ_MASK GENMASK(3, 3) + +/* IRQ-related registers relative to ISYS_OFFSET */ +#define IPU_REG_ISYS_UNISPART_IRQ_EDGE 0x7c000 +#define IPU_REG_ISYS_UNISPART_IRQ_MASK 0x7c004 +#define IPU_REG_ISYS_UNISPART_IRQ_STATUS 0x7c008 +#define IPU_REG_ISYS_UNISPART_IRQ_CLEAR 0x7c00c +#define IPU_REG_ISYS_UNISPART_IRQ_ENABLE 0x7c010 +#define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x7c014 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_REG 0x7c414 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x7c418 +#define IPU_ISYS_UNISPART_IRQ_SW BIT(22) +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_ISYS_IOMMU0_OFFSET 0x000e0000 +#define IPU_ISYS_IOMMU1_OFFSET 0x000e0100 + +#define IPU_ISYS_OFFSET 0x00100000 +#define IPU_PSYS_OFFSET 0x00400000 + +#define IPU_PSYS_IOMMU0_OFFSET 0x000b0000 +#define IPU_PSYS_IOMMU1_OFFSET 0x000b0100 +#define IPU_PSYS_IOMMU1R_OFFSET 0x000b0600 + +/* the offset from IOMMU base register */ +#define IPU_MMU_L1_STREAM_ID_REG_OFFSET 0x0c +#define IPU_MMU_L2_STREAM_ID_REG_OFFSET 0x4c + +#define IPU_TPG0_ADDR_OFFSET 0x64800 +#define IPU_TPG1_ADDR_OFFSET 0x6f400 +#define IPU_CSI2BE_ADDR_OFFSET 0xba000 + +#define IPU_PSYS_MMU0_CTRL_OFFSET 0x08 + +#define IPU_GPOFFSET 0x67800 +#define IPU_COMBO_GPOFFSET 0x6f000 + +#define IPU_GPREG_MIPI_PKT_GEN0_SEL 0x24 +#define IPU_GPREG_MIPI_PKT_GEN1_SEL 0x1c + +/* IRQ-related registers relative to ISYS_OFFSET */ +#define IPU_REG_ISYS_UNISPART_IRQ_EDGE 0x7c000 +#define IPU_REG_ISYS_UNISPART_IRQ_MASK 0x7c004 +#define IPU_REG_ISYS_UNISPART_IRQ_STATUS 0x7c008 +#define IPU_REG_ISYS_UNISPART_IRQ_CLEAR 0x7c00c +#define IPU_REG_ISYS_UNISPART_IRQ_ENABLE 0x7c010 +#define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x7c014 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_REG 0x7c414 +#define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x7c418 +#define IPU_ISYS_UNISPART_IRQ_SW BIT(30) +#endif /* CONFIG_VIDEO_INTEL_IPU4 */ + +#define IPU_ISYS_SPC_OFFSET 0x000000 +#define IPU_PSYS_SPC_OFFSET 0x000000 +#define IPU_ISYS_DMEM_OFFSET 0x008000 +#define IPU_PSYS_DMEM_OFFSET 0x008000 + +/* PKG DIR OFFSET in IMR in secure mode */ +#define IPU_PKG_DIR_IMR_OFFSET 0x40 + +/* PCI config registers */ +#define IPU_REG_PCI_PCIECAPHDR_PCIECAP 0x70 +#define IPU_REG_PCI_DEVICECAP 0x74 +#define IPU_REG_PCI_DEVICECTL_DEVICESTS 0x78 +#define IPU_REG_PCI_MSI_CAPID 0xac +#define IPU_REG_PCI_MSI_ADDRESS_LO 0xb0 +#define IPU_REG_PCI_MSI_ADDRESS_HI 0xb4 +#define IPU_REG_PCI_MSI_DATA 0xb8 +#define IPU_REG_PCI_PMCAP 0xd0 +#define IPU_REG_PCI_PMCS 0xd4 +#define IPU_REG_PCI_MANUFACTURING_ID 0xf8 +#define IPU_REG_PCI_IUNIT_ACCESS_CTRL_VIOL 0xfc + +/* ISYS registers */ +/* Isys DMA CIO info register */ +#define IPU_REG_ISYS_INFO_CIO_DMA0(a) (0x81810 + (a) * 0x40) +#define IPU_REG_ISYS_INFO_CIO_DMA1(a) (0x93010 + (a) * 0x40) +#define IPU_REG_ISYS_INFO_CIO_DMA_IS(a) (0xb0610 + (a) * 0x40) +#define IPU_ISYS_NUM_OF_DMA0_CHANNELS 16 +#define IPU_ISYS_NUM_OF_DMA1_CHANNELS 32 +#define IPU_ISYS_NUM_OF_IS_CHANNELS 4 +/*Isys Info register offsets*/ +#define IPU_REG_ISYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 +#define IPU_REG_ISYS_INFO_SEG_CMEM_MASTER(a) (0x2C + (a * 12)) +#define IPU_REG_ISYS_INFO_SEG_XMEM_MASTER(a) (0x5C + (a * 12)) + +/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */ +#define IPU_REG_ISYS_CDC_THRESHOLD(i) (0x7c400 + ((i) * 4)) + +/*Iunit Info bits*/ +#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2C + ((a) * 12)) +#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5C + ((a) * 12)) +#define IPU_REG_PSYS_INFO_SEG_DATA_MASTER(a) (0x8C + ((a) * 12)) + +#define IPU_ISYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU_ISYS_SPC_STATUS_START BIT(1) +#define IPU_ISYS_SPC_STATUS_RUN BIT(3) +#define IPU_ISYS_SPC_STATUS_READY BIT(5) +#define IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0 + +#define IPU_PSYS_SPC_STATUS_START BIT(1) +#define IPU_PSYS_SPC_STATUS_RUN BIT(3) +#define IPU_PSYS_SPC_STATUS_READY BIT(5) +#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12) +#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13) + +#define IPU_PSYS_REG_SPC_START_PC 0x4 +#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10 +#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000 +#define IPU_PSYS_REG_SPP1_STATUS_CTRL 0x30000 +#define IPU_PSYS_REG_SPF_STATUS_CTRL 0x40000 +#define IPU_PSYS_REG_ISP0_STATUS_CTRL 0x1C0000 +#define IPU_PSYS_REG_ISP1_STATUS_CTRL 0x240000 +#define IPU_PSYS_REG_ISP2_STATUS_CTRL 0x2C0000 +#define IPU_PSYS_REG_ISP3_STATUS_CTRL 0x340000 +#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14 + +/* VC0 */ +#define IPU_INFO_ENABLE_SNOOP BIT(0) +#define IPU_INFO_IMR_DESTINED BIT(1) +#define IPU_INFO_REQUEST_DESTINATION_BUT_REGS 0 +#define IPU_INFO_REQUEST_DESTINATION_PRIMARY BIT(4) +#define IPU_INFO_REQUEST_DESTINATION_P2P (BIT(4) | BIT(5)) +/* VC1 */ +#define IPU_INFO_DEADLINE_PTR BIT(1) +#define IPU_INFO_ZLW BIT(2) +#define IPU_INFO_STREAM_ID_SET(a) ((a & 0xF) << 4) +#define IPU_INFO_ADDRESS_SWIZZ BIT(8) + +/* Trace unit related register definitions */ +#define TRACE_REG_MAX_ISYS_OFFSET 0x0fffff +#define TRACE_REG_MAX_PSYS_OFFSET 0xffffff +/* ISYS trace registers - offsets to isys base address */ +/* Trace unit base offset */ +#define TRACE_REG_IS_TRACE_UNIT_BASE 0x07d000 +/* Trace monitors */ +#define TRACE_REG_IS_SP_EVQ_BASE 0x001000 +/* GPC blocks */ +#define TRACE_REG_IS_SP_GPC_BASE 0x000800 +#define TRACE_REG_IS_ISL_GPC_BASE 0x0bd400 +#define TRACE_REG_IS_MMU_GPC_BASE 0x0e0B00 +/* CSI2 receivers */ +#define TRACE_REG_CSI2_TM_BASE 0x067a00 +#define TRACE_REG_CSI2_3PH_TM_BASE 0x06f200 +/* Trace timers */ +#define TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x060614 +#define TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x07c410 +#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0) +/* SIG2CIO */ +/* 0 < n <= 8 */ +#define TRACE_REG_CSI2_SIG2SIO_GR_BASE(n) (0x067c00 + (n) * 0x20) +#define TRACE_REG_CSI2_SIG2SIO_GR_NUM 9 +/* 0 < n <= 8 */ +#define TRACE_REG_CSI2_PH3_SIG2SIO_GR_BASE(n) (0x06f600 + (n) * 0x20) +#define TRACE_REG_CSI2_PH3_SIG2SIO_GR_NUM 9 +/* PSYS trace registers - offsets to isys base address */ +/* Trace unit base offset */ +#define TRACE_REG_PS_TRACE_UNIT_BASE 0x3e0000 +/* Trace monitors */ +#define TRACE_REG_PS_SPC_EVQ_BASE 0x001000 +#define TRACE_REG_PS_SPP0_EVQ_BASE 0x021000 +#define TRACE_REG_PS_SPP1_EVQ_BASE 0x031000 +#define TRACE_REG_PS_SPF_EVQ_BASE 0x041000 +#define TRACE_REG_PS_ISP0_EVQ_BASE 0x1c1000 +#define TRACE_REG_PS_ISP1_EVQ_BASE 0x241000 +#define TRACE_REG_PS_ISP2_EVQ_BASE 0x2c1000 +#define TRACE_REG_PS_ISP3_EVQ_BASE 0x341000 +/* GPC blocks */ +#define TRACE_REG_PS_SPC_GPC_BASE 0x000800 +#define TRACE_REG_PS_SPP0_GPC_BASE 0x020800 +#define TRACE_REG_PS_SPP1_GPC_BASE 0x030800 +#define TRACE_REG_PS_SPF_GPC_BASE 0x040800 +#define TRACE_REG_PS_MMU_GPC_BASE 0x0b0b00 +#define TRACE_REG_PS_ISL_GPC_BASE 0x0fe800 +#define TRACE_REG_PS_ISP0_GPC_BASE 0x1c0800 +#define TRACE_REG_PS_ISP1_GPC_BASE 0x240800 +#define TRACE_REG_PS_ISP2_GPC_BASE 0x2c0800 +#define TRACE_REG_PS_ISP3_GPC_BASE 0x340800 + +/* common macros on each platform */ +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_ISYS_UNISPART_IRQ_CSI2(port) \ + ({ typeof(port) __port = (port); \ + __port < IPU_ISYS_MAX_CSI2_LEGACY_PORTS ? \ + ((0x8) << __port) : \ + (0x800 << (__port - IPU_ISYS_MAX_CSI2_LEGACY_PORTS)); }) +#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (BIT(17) << (n)) +#endif +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define IPU_ISYS_UNISPART_IRQ_CSI2(port) \ + ((port) > 0 ? 0x10 : 0x8) +/* bit 20 for fw irqreg0 */ +#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (BIT(20) << (n)) +#endif +/* IRQ-related registers in PSYS, relative to IPU_xx_PSYS_OFFSET */ +#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x60200 +#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x60204 +#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x60208 +#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x6020c +#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x60210 +#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x60214 +/* There are 8 FW interrupts, n = 0..7 */ +#define IPU_PSYS_GPDEV_FWIRQ0 0 +#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x60100) +/* CDC Burst collector thresholds for psys - 4 FIFOs i= 0..3 */ +#define IPU_REG_PSYS_CDC_THRESHOLD(i) (0x60600 + ((i) * 4)) + +#endif /* IPU_REGS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform-resources.h b/drivers/media/pci/intel/ipu4/ipu-platform-resources.h new file mode 100644 index 000000000000..59b2cd46c9f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform-resources.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2016 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_RESOURCES_H +#define IPU_PLATFORM_RESOURCES_H + +#include + +/* ia_css_psys_program_group_private.h */ +/* ia_css_psys_process_group_cmd_impl.h */ +#ifdef CONFIG_VIDEO_INTEL_IPU4P +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 2 +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0 +#else +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 4 +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 4 +#endif +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 4 + +/* ia_css_terminal_base_types.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +/* ia_css_terminal_types.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 + +/* ia_css_psys_terminal.c */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 4 + +/* ia_css_program_group_data.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 +#define IPU_FW_PSYS_N_FRAME_PLANES 6 +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4 + +/* ia_css_psys_buffer_set.h */ +#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 5 + +enum { + IPU_FW_PSYS_CMD_QUEUE_COMMAND_ID, + IPU_FW_PSYS_CMD_QUEUE_DEVICE_ID, + IPU_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, + IPU_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, + IPU_FW_PSYS_N_PSYS_CMD_QUEUE_ID +}; + +enum { + IPU_FW_PSYS_GMEM_TYPE_ID = 0, + IPU_FW_PSYS_DMEM_TYPE_ID, + IPU_FW_PSYS_VMEM_TYPE_ID, + IPU_FW_PSYS_BAMEM_TYPE_ID, + IPU_FW_PSYS_PMEM_TYPE_ID, + IPU_FW_PSYS_N_MEM_TYPE_ID +}; + +enum ipu_mem_id { + IPU_FW_PSYS_VMEM0_ID = 0, + IPU_FW_PSYS_VMEM1_ID, + IPU_FW_PSYS_VMEM2_ID, + IPU_FW_PSYS_VMEM3_ID, + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_BAMEM0_ID, + IPU_FW_PSYS_BAMEM1_ID, + IPU_FW_PSYS_BAMEM2_ID, + IPU_FW_PSYS_BAMEM3_ID, + IPU_FW_PSYS_DMEM0_ID, + IPU_FW_PSYS_DMEM1_ID, + IPU_FW_PSYS_DMEM2_ID, + IPU_FW_PSYS_DMEM3_ID, + IPU_FW_PSYS_DMEM4_ID, + IPU_FW_PSYS_DMEM5_ID, + IPU_FW_PSYS_DMEM6_ID, + IPU_FW_PSYS_DMEM7_ID, + IPU_FW_PSYS_PMEM0_ID, + IPU_FW_PSYS_PMEM1_ID, + IPU_FW_PSYS_PMEM2_ID, + IPU_FW_PSYS_PMEM3_ID, + IPU_FW_PSYS_N_MEM_ID +}; + +enum { + IPU_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0, + IPU_FW_PSYS_DEV_CHN_GDC_ID, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID, + IPU_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID, + IPU_FW_PSYS_DEV_CHN_DMA_IPFD_ID, + IPU_FW_PSYS_DEV_CHN_DMA_ISA_ID, + IPU_FW_PSYS_DEV_CHN_DMA_FW_ID, +#ifdef CONFIG_VIDEO_INTEL_IPU4P + IPU_FW_PSYS_DEV_CHN_DMA_CMPRS_ID, +#endif + IPU_FW_PSYS_N_DEV_CHN_ID +}; + +enum { + IPU_FW_PSYS_SP_CTRL_TYPE_ID = 0, + IPU_FW_PSYS_SP_SERVER_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_ISA_TYPE_ID, + IPU_FW_PSYS_ACC_OSA_TYPE_ID, + IPU_FW_PSYS_GDC_TYPE_ID, + IPU_FW_PSYS_N_CELL_TYPE_ID +}; + +enum { + IPU_FW_PSYS_SP0_ID = 0, + IPU_FW_PSYS_SP1_ID, + IPU_FW_PSYS_SP2_ID, + IPU_FW_PSYS_VP0_ID, + IPU_FW_PSYS_VP1_ID, + IPU_FW_PSYS_VP2_ID, + IPU_FW_PSYS_VP3_ID, + IPU_FW_PSYS_ACC0_ID, + IPU_FW_PSYS_ACC1_ID, + IPU_FW_PSYS_ACC2_ID, + IPU_FW_PSYS_ACC3_ID, + IPU_FW_PSYS_ACC4_ID, + IPU_FW_PSYS_ACC5_ID, + IPU_FW_PSYS_ACC6_ID, + IPU_FW_PSYS_ACC7_ID, + IPU_FW_PSYS_GDC0_ID, + IPU_FW_PSYS_GDC1_ID, + IPU_FW_PSYS_N_CELL_ID +}; + +#define IPU_FW_PSYS_N_DEV_DFM_ID 0 +#define IPU_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU_FW_PSYS_N_MEM_TYPE_ID - 1) +#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1 +#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 2 +#define IPU_FW_PSYS_RBM_NOF_ELEMS 2 + +#define IPU_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30 +#define IPU_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 4 +#define IPU_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30 +#define IPU_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 20 +#define IPU_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 2 +#define IPU_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 5 +#define IPU_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2 +#define IPU_FW_PSYS_DEV_CHN_DMA_FW_MAX_SIZE 1 +#define IPU_FW_PSYS_DEV_CHN_DMA_CMPRS_MAX_SIZE 6 + +#define IPU_FW_PSYS_VMEM0_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM1_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM2_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM3_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_VMEM4_MAX_SIZE 0x0800 +#define IPU_FW_PSYS_BAMEM0_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_BAMEM1_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_BAMEM2_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_BAMEM3_MAX_SIZE 0x0400 +#define IPU_FW_PSYS_DMEM0_MAX_SIZE 0x4000 +#define IPU_FW_PSYS_DMEM1_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM2_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM3_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM4_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM5_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM6_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_DMEM7_MAX_SIZE 0x1000 +#define IPU_FW_PSYS_PMEM0_MAX_SIZE 0x0500 +#define IPU_FW_PSYS_PMEM1_MAX_SIZE 0x0500 +#define IPU_FW_PSYS_PMEM2_MAX_SIZE 0x0500 +#define IPU_FW_PSYS_PMEM3_MAX_SIZE 0x0500 + +struct ipu_fw_psys_program_manifest { + u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 ID; + u32 program_type; + s32 parent_offset; + u32 program_dependency_offset; + u32 terminal_dependency_offset; + u16 size; + u16 int_mem_size[IPU_FW_PSYS_N_MEM_TYPE_ID]; + u16 ext_mem_size[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u16 ext_mem_offset[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u16 dev_chn_size[IPU_FW_PSYS_N_DEV_CHN_ID]; + u16 dev_chn_offset[IPU_FW_PSYS_N_DEV_CHN_ID]; + u8 cell_id; + u8 cell_type_id; + u8 program_dependency_count; + u8 terminal_dependency_count; +#ifndef CONFIG_VIDEO_INTEL_IPU4P + u8 reserved[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST]; +#endif +}; + +struct ipu_fw_psys_process { + u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS]; + u32 size; + u32 ID; + u32 program_idx; + u32 state; + s16 parent_offset; + u16 cell_dependencies_offset; + u16 terminal_dependencies_offset; + u16 int_mem_offset[IPU_FW_PSYS_N_MEM_TYPE_ID]; + u16 ext_mem_offset[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u16 dev_chn_offset[IPU_FW_PSYS_N_DEV_CHN_ID]; + u8 cell_id; + u8 int_mem_id[IPU_FW_PSYS_N_MEM_TYPE_ID]; + u8 ext_mem_id[IPU_FW_PSYS_N_DATA_MEM_TYPE_ID]; + u8 cell_dependency_count; + u8 terminal_dependency_count; + u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT]; +}; + +struct ipu_psys_resource_alloc; +struct ipu_fw_psys_process_group; +struct ipu_psys_resource_pool; +int ipu_psys_allocate_resources(const struct device *dev, + struct ipu_fw_psys_process_group *pg, + void *pg_manifest, + struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool *pool); +int ipu_psys_move_resources(const struct device *dev, + struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool *source_pool, + struct ipu_psys_resource_pool *target_pool); + +void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool *pool); + +extern const struct ipu_fw_resource_definitions *res_defs; + +#endif /* IPU_PLATFORM_RESOURCES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu-platform.h b/drivers/media/pci/intel/ipu4/ipu-platform.h new file mode 100644 index 000000000000..924cb2ef7e34 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu-platform.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2013 - 2018 Intel Corporation */ + +#ifndef IPU_PLATFORM_H +#define IPU_PLATFORM_H + +#define IPU_NAME "intel-ipu4" + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_CPD_FIRMWARE_NAME "ipu4_cpd_b0.bin" +#else +#define IPU_CPD_FIRMWARE_NAME "ipu4p_cpd.bin" +#endif + +/* + * The following definitions are encoded to the media_device's model field so + * that the software components which uses IPU driver can get the hw stepping + * information. + */ +#ifdef CONFIG_VIDEO_INTEL_IPU4 +#define IPU_MEDIA_DEV_MODEL_NAME "ipu4/Broxton B" +#else +#define IPU_MEDIA_DEV_MODEL_NAME "ipu4p" +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4 + +#define IPU_HW_BXT_P_B1_REV 0xa +#define IPU_HW_BXT_P_D0_REV 0xb +#define IPU_HW_BXT_P_E0_REV 0xc + +#define IPU_ISYS_NUM_STREAMS 8 /* Max 8 */ + +/* BXTP E0 has icache bug fixed */ +#define is_ipu_hw_bxtp_e0(isp) \ + ({ typeof(isp) __isp = (isp); \ + (__isp->pdev->device == IPU_PCI_ID && \ + __isp->pdev->revision == IPU_HW_BXT_P_E0_REV); }) +#endif + +/* declearations, definitions in ipu4.c */ +extern const struct ipu_isys_internal_pdata isys_ipdata; +extern const struct ipu_psys_internal_pdata psys_ipdata; +extern const struct ipu_buttress_ctrl isys_buttress_ctrl; +extern const struct ipu_buttress_ctrl psys_buttress_ctrl; + +/* definitions in ipu4-isys.c */ +extern struct ipu_trace_block isys_trace_blocks[]; +/* definitions in ipu4-psys.c */ +extern struct ipu_trace_block psys_trace_blocks[]; + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_inc b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_inc new file mode 100644 index 000000000000..48a4edea420f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_inc @@ -0,0 +1,26 @@ +IPU_ISYSLIB_INC = \ + -I$(IPU_ISYSLIB_ROOT)/buffer/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/src \ + -I$(IPU_ISYSLIB_ROOT)/device_access/interface \ + -I$(IPU_ISYSLIB_ROOT)/device_access/src \ + -I$(IPU_ISYSLIB_ROOT)/devices \ + -I$(IPU_ISYSLIB_ROOT)/devices/interface \ + -I$(IPU_ISYSLIB_ROOT)/devices/isys/bxtB0 \ + -I$(IPU_ISYSLIB_ROOT)/devices/src \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_ISYSLIB_ROOT)/isysapi/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_ISYSLIB_ROOT)/port/interface \ + -I$(IPU_ISYSLIB_ROOT)/reg_dump/src/isys/bxtB0_gen_reg_dump \ + -I$(IPU_ISYSLIB_ROOT)/regmem/interface \ + -I$(IPU_ISYSLIB_ROOT)/regmem/src \ + -I$(IPU_ISYSLIB_ROOT)/support \ + -I$(IPU_ISYSLIB_ROOT)/syscom/interface \ + -I$(IPU_ISYSLIB_ROOT)/syscom/src \ + -I$(IPU_ISYSLIB_ROOT)/trace/interface \ + -I$(IPU_ISYSLIB_ROOT)/utils/system_defs/ \ + -I$(IPU_ISYSLIB_ROOT)/vied \ + -I$(IPU_ISYSLIB_ROOT)/vied/vied/ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_src b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_src new file mode 100644 index 000000000000..c20760bdb5f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4isys_src @@ -0,0 +1,19 @@ +IPU_ISYSLIB_SRC = \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_private.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public_trace.o + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +IPU_ISYSLIB_SRC += \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_ISYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_inc b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_inc new file mode 100644 index 000000000000..abc61475e988 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_inc @@ -0,0 +1,52 @@ +IPU_PSYSLIB_INC = \ + -I$(IPU_PSYSLIB_ROOT)/buffer/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/src \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/interface \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/src \ + -I$(IPU_PSYSLIB_ROOT)/cpd/ \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_component/interface \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_metadata/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/src \ + -I$(IPU_PSYSLIB_ROOT)/devices \ + -I$(IPU_PSYSLIB_ROOT)/devices/interface \ + -I$(IPU_PSYSLIB_ROOT)/devices/psys/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/devices/src \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_PSYSLIB_ROOT)/port/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_private_pg/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_server/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/kernel/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/psys_server_manifest/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/resource_model/bxtB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/src \ + -I$(IPU_PSYSLIB_ROOT)/reg_dump/src/psys/bxtB0_gen_reg_dump \ + -I$(IPU_PSYSLIB_ROOT)/regmem/interface \ + -I$(IPU_PSYSLIB_ROOT)/regmem/src \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/interface \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/src \ + -I$(IPU_PSYSLIB_ROOT)/support \ + -I$(IPU_PSYSLIB_ROOT)/syscom/interface \ + -I$(IPU_PSYSLIB_ROOT)/syscom/src \ + -I$(IPU_PSYSLIB_ROOT)/trace/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied \ + -I$(IPU_PSYSLIB_ROOT)/vied/vied/ \ + -I$(IPU_PSYSLIB_ROOT)/vied_nci_acb/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/src \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_src b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_src new file mode 100644 index 000000000000..8344bf569e13 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.ipu4psys_src @@ -0,0 +1,32 @@ +IPU_PSYSLIB_SRC = \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/client_pkg/src/ia_css_client_pkg.o \ + $(IPU_PSYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/psys_server/src/bxt_spctrl_process_group_cmd_impl.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/data/src/ia_css_program_group_data.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/device/src/ia_css_psys_device.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_buffer_set.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process_group.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/kernel/src/ia_css_kernel_bitmap.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/param/src/ia_css_program_group_param.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/sim/src/vied_nci_psys_system.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_group_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_terminal_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal_manifest.o \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.isyslib b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.isyslib new file mode 100644 index 000000000000..c99c9a898c69 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.isyslib @@ -0,0 +1,47 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +PROGRAMS = isys_fw +SYSTEM = input_system_system +IPU_ISYSLIB_ROOT_REL = ipu4-css/lib2600 +IPU_ISYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) + +include $(srcpath)/$(src)/ipu4-css/Makefile.ipu4isys_inc +include $(srcpath)/$(src)/ipu4-css/Makefile.ipu4isys_src + +# +# copy wrapper here only for isys usage, psys would use the original one +# +$(shell cp -f $(srcpath)/$(src)/../ipu-wrapper.c $(srcpath)/$(src)/ipu4-css/ipu-wrapper.c) + +intel-ipu4-isys-csslib-objs := \ + ipu4-css/libintel-ipu4.o \ + $(IPU_ISYSLIB_SRC) + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +intel-ipu4-isys-csslib-objs += ipu4-css/ipu-wrapper.o +endif +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-isys-csslib.o + +INCLUDES := -I$(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) \ + -I$(srcpath)/$(src) \ + $(IPU_ISYSLIB_INC) + +DEFINES:= -D__HOST__ -D__KERNEL__ -DISYS_FPGA -DPSYS_FPGA + +DEFINES += -DSSID=1 +DEFINES += -DMMID=1 +DEFINES += -DPROGNAME=isys_fw +DEFINES += -DPROGMAP=\"isys_fw.map.h\" +DEFINES += -DSUBSYSTEM_INCLUDE=\ +DEFINES += -DCELL=input_system_unis_logic_sp_control_tile_sp +DEFINES += -DSPMAIN=isys_fw +DEFINES += -DRUN_INTEGRATION +DEFINES += -DDEBUG_SP_NCI +DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 +DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=0 +DEFINES += -DHRT_USE_VIR_ADDRS +DEFINES += -DHRT_HW + +ccflags-y += $(INCLUDES) $(DEFINES) -fno-common diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.psyslib b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.psyslib new file mode 100644 index 000000000000..52a8ee385219 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/Makefile.psyslib @@ -0,0 +1,15 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +# note: this file only defines INCLUDES paths for psyslib +include $(srcpath)/$(src)/ipu4-css/Makefile.ipu4psys_inc + +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/ipu4-css/lib2600psys/lib +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 + +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) + +obj-$(CONFIG_VIDEO_INTEL_IPU) += ipu4-css/lib2600psys/ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4-css/ia_css_fw_pkg_release.h new file mode 100644 index 000000000000..cb20a688b7c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20180615 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/buffer.mk new file mode 100644 index 000000000000..c00a1133b440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_access.h new file mode 100644 index 000000000000..e5fe647742c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_type.h new file mode 100644 index 000000000000..de51f2394158 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 000000000000..2530297e8e36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,25 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 000000000000..b8e7a6ac4648 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 000000000000..d3d01353ce43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 000000000000..a8c0f9e8554e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,31 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 000000000000..0299fc3b7eb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_return_token.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_return_token.h new file mode 100644 index 000000000000..440161d2f32b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_return_token.h @@ -0,0 +1,54 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RETURN_TOKEN_H +#define __IA_CSS_RETURN_TOKEN_H + +#include "storage_class.h" +#include "assert_support.h" /* For CT_ASSERT */ + +/* ia_css_return_token: data item of exacly 8 bytes (64 bits) + * which can be used to pass a return token back to the host +*/ +typedef unsigned long long ia_css_return_token; + +STORAGE_CLASS_INLINE void +ia_css_return_token_copy(ia_css_return_token *to, + const ia_css_return_token *from) +{ + /* copy a return token on VIED processor */ + int *dst = (int *)to; + int *src = (int *)from; + + dst[0] = src[0]; + dst[1] = src[1]; +} + +STORAGE_CLASS_INLINE void +ia_css_return_token_zero(ia_css_return_token *to) +{ + /* zero return token on VIED processor */ + int *dst = (int *)to; + + dst[0] = 0; + dst[1] = 0; +} + +STORAGE_CLASS_INLINE void _check_return_token_size(void) +{ + CT_ASSERT(sizeof(int) == 4); + CT_ASSERT(sizeof(ia_css_return_token) == 8); +} + +#endif /* __IA_CSS_RETURN_TOKEN_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 000000000000..558ec679f98a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 000000000000..ff62914f99dc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/buffer_access.c new file mode 100644 index 000000000000..83cbda5a9ff5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/buffer_access.c @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 000000000000..3828b186ddac --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 000000000000..0f99a06e9a89 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 000000000000..2bd754062a0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,185 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 000000000000..892dcbd49825 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,182 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 000000000000..1041bd07721b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/cell.mk new file mode 100644 index 000000000000..fa5e65022601 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/interface/ia_css_cell.h new file mode 100644 index 000000000000..3fac3c791b6e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/src/ia_css_cell_impl.h new file mode 100644 index 000000000000..60b2e234da1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/isys/subsystem_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/isys/subsystem_bxtB0.mk new file mode 100644 index 000000000000..da142032349f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/isys/subsystem_bxtB0.mk @@ -0,0 +1,60 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of ISYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +############################################################################ +# FIRMWARE RELATED VARIABLES +############################################################################ + +# Activate loading params and storing stats DDR<->REGs with DMA +ISYS_USE_ISA_DMA = 1 +# Used in ISA module +ISYS_ISL_DPC_DPC_V2 = 0 + +# Specification for Isys server's fixed globals' locations +REGMEM_OFFSET = 0 # Starting from 0 +REGMEM_SIZE = 34 +REGMEM_WORD_BYTES = 4 +FW_LOAD_NO_OF_REQUEST_OFFSET = 136 # Taken from REGMEM_OFFSET + REGMEM_SIZE_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 + +# Workarounds: + +# This WA is not to pipeline store frame commands for SID processors that control a Str2Vec (ISA output) +WA_HSD1304553438 = 1 + +# Larger than specified frames that complete mid-line +WA_HSD1209062354 = 1 + +# WA to disable clock gating for the devices in the CSI receivers needed for using the mipi_pkt_gen device +WA_HSD1805168877 = 0 + +# Support IBUF soft-reset at stream start +SOFT_RESET_IBUF_STREAM_START_SUPPORT = 1 + +############################################################################ +# TESTING RELATED VARIABLES +############################################################################ + +# TODO: This define should be entirely removed. +# Used in mipi_capture +ISYS_DISABLE_VERIFY_RECEIVED_SOF_EOF = 0 + +ISYS_ACCESS_BLOCKER_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/system_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/system_bxtB0.mk new file mode 100644 index 000000000000..24d079b40516 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/config/system_bxtB0.mk @@ -0,0 +1,88 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = css_broxton_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +SP_FP_CELL = sp2601_fp +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA without PAF and DPC-Pext support for IPU4-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 0 +HAS_DPC_PEXT_IN_ISYS_ISL = 0 +HAS_PMA_IF = 0 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v1 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v1 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v2 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = BXT_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v1 + +HAS_ACC_CLUSTER_PAF_PAL = 0 +HAS_ACC_CLUSTER_PEXT_PAL = 0 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = bxtB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/device_access.mk new file mode 100644 index 000000000000..1629d9af803b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_cmem.h new file mode 100644 index 000000000000..3dc47c29fcab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem.h new file mode 100644 index 000000000000..de2b94d8af54 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 000000000000..57aab3323c73 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_cmem_host.h new file mode 100644 index 000000000000..22799e67214c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 000000000000..adc178b75059 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_host.h new file mode 100644 index 000000000000..d94991fc1114 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 000000000000..5102f6e44d2f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 000000000000..e6e1e9dcbe80 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 000000000000..481b0504a237 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 000000000000..63397dc0b7fe --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 000000000000..72caed3eef0c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_devices.h new file mode 100644 index 000000000000..bd672104db3b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_devices.h @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +/* define cell instances in ISYS */ + +#define SPC0_CELL input_system_unis_logic_sp_control_tile_sp + +enum ipu_device_isys_cell_id { + SPC0, + NUM_CELLS +}; + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_defs.h new file mode 100644 index 000000000000..093e3fc2e581 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,23 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x0 +#define SPC0_DMEM_CBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DMA_M0_ADDRESS 0x210000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_impl.h new file mode 100644 index 000000000000..5f8ab1ac928f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/isys/bxtB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,57 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spc0_databus_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* regs not accessible from DBUS */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const struct +ipu_device_cell_properties_s ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_databus_mem_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 0 /* SPC0 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 000000000000..430295cd9d94 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 000000000000..b1ffbf7ea21f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 000000000000..73062e9db87b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 000000000000..24ad04fe8720 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 000000000000..cd508f05ed40 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 000000000000..3a7b333d3bf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +/* A strange problem with hivecc compiler which is described + * here https://icggerrit.ir.intel.com/#/c/51630/1 forces this + * enum to be explicitly initialized for the moment + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h new file mode 100644 index 000000000000..dc42fe566d9f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h @@ -0,0 +1,401 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_FW_BRIDGED_TYPES_H +#define __IA_CSS_ISYS_FW_BRIDGED_TYPES_H + +#include "platform_support.h" + +#include "ia_css_isysapi_fw_types.h" + +/** + * struct ia_css_isys_buffer_partition_comm - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each + * virtual stream + */ +struct ia_css_isys_buffer_partition_comm { + aligned_uint32(unsigned int, num_gda_pages[STREAM_ID_MAX]); +}; + +/** + * struct ia_css_isys_fw_config - contains the parts from + * ia_css_isys_device_cfg_data + * we need to transfer to the cell + * @num_send_queues: Number of send queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + * @num_recv_queues: Number of receive queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + */ +struct ia_css_isys_fw_config { + aligned_struct(struct ia_css_isys_buffer_partition_comm, + buffer_partition); + aligned_uint32(unsigned int, + num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); + aligned_uint32(unsigned int, + num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); +}; + +/** + * struct ia_css_isys_resolution_comm: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution_comm { + aligned_uint32(unsigned int, width); + aligned_uint32(unsigned int, height); +}; + +/** + * struct ia_css_isys_output_pin_payload_comm + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + */ +struct ia_css_isys_output_pin_payload_comm { + aligned_uint64(ia_css_return_token, out_buf_id); + aligned_uint32(ia_css_output_buffer_css_address, addr); + aligned_uint32(unsigned int, compress); +}; + +/** + * struct ia_css_isys_output_pin_info_comm + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: Size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + * @send_irq: assert if pin event should trigger irq + * @pt: pin type + * @ft: frame format type + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + */ +struct ia_css_isys_output_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, output_res); + aligned_uint32(unsigned int, stride); + aligned_uint32(unsigned int, watermark_in_lines); + aligned_uint32(unsigned int, payload_buf_size); + aligned_uint8(unsigned int, send_irq); + aligned_uint8(unsigned int, input_pin_id); + aligned_uint8(enum ia_css_isys_pin_type, pt); + aligned_uint8(enum ia_css_isys_frame_format_type, ft); + aligned_uint8(enum ia_css_isys_link_id, link_id); + aligned_uint8(unsigned int, reserve_compression); +}; + +/** + * struct ia_css_isys_param_pin_comm + * @param_buf_id: Points to param port buffer - buffer identifier + * @addr: Points to param pin buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin_comm { + aligned_uint64(ia_css_return_token, param_buf_id); + aligned_uint32(ia_css_input_buffer_css_address, addr); +}; + +/** + * struct ia_css_isys_input_pin_info_comm + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * hdiscarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @bits_per_pix: native bits per pixel + * @dt_rename: mapped_dt + */ +struct ia_css_isys_input_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, input_res); + aligned_uint8(enum ia_css_isys_mipi_data_type, dt); + aligned_uint8(enum ia_css_isys_mipi_store_mode, mipi_store_mode); + aligned_uint8(unsigned int, bits_per_pix); + aligned_uint8(unsigned int, mapped_dt); +}; + +/** + * ISA configuration fields, definition and macros + */ +#define ISA_CFG_FIELD_BLC_EN_LEN 1 +#define ISA_CFG_FIELD_BLC_EN_SHIFT 0 + +#define ISA_CFG_FIELD_LSC_EN_LEN 1 +#define ISA_CFG_FIELD_LSC_EN_SHIFT 1 + +#define ISA_CFG_FIELD_DPC_EN_LEN 1 +#define ISA_CFG_FIELD_DPC_EN_SHIFT 2 + +#define ISA_CFG_FIELD_DOWNSCALER_EN_LEN 1 +#define ISA_CFG_FIELD_DOWNSCALER_EN_SHIFT 3 + +#define ISA_CFG_FIELD_AWB_EN_LEN 1 +#define ISA_CFG_FIELD_AWB_EN_SHIFT 4 + +#define ISA_CFG_FIELD_AF_EN_LEN 1 +#define ISA_CFG_FIELD_AF_EN_SHIFT 5 + +#define ISA_CFG_FIELD_AE_EN_LEN 1 +#define ISA_CFG_FIELD_AE_EN_SHIFT 6 + +#define ISA_CFG_FIELD_PAF_TYPE_LEN 8 +#define ISA_CFG_FIELD_PAF_TYPE_SHIFT 7 + +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_SHIFT 15 + +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_SHIFT 16 + +/* Helper macros */ +#define ISA_CFG_GET_MASK_FROM_LEN(len) ((1 << (len)) - 1) +#define ISA_CFG_GET_MASK_FROM_TAG(tag) \ + (ISA_CFG_GET_MASK_FROM_LEN(ISA_CFG_FIELD_##tag##_LEN)) +#define ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + (ISA_CFG_FIELD_##tag##_SHIFT) +/* Get/Set macros */ +#define ISA_CFG_FIELD_GET(tag, word) \ + ( \ + ((word) >> (ISA_CFG_GET_SHIFT_FROM_TAG(tag))) &\ + ISA_CFG_GET_MASK_FROM_TAG(tag) \ + ) +#define ISA_CFG_FIELD_SET(tag, word, value) \ + word |= ( \ + ((value) & ISA_CFG_GET_MASK_FROM_TAG(tag)) << \ + ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + ) + +/** + * struct ia_css_isys_isa_cfg_comm. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg_comm { + aligned_struct(struct ia_css_isys_resolution_comm, + isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]); + aligned_uint32(/* multi-field packing */, cfg_fields); +}; + + /** + * struct ia_css_isys_cropping_comm - cropping coordinates + */ +struct ia_css_isys_cropping_comm { + aligned_int32(int, top_offset); + aligned_int32(int, left_offset); + aligned_int32(int, bottom_offset); + aligned_int32(int, right_offset); +}; + + /** + * struct ia_css_isys_stream_cfg_data_comm + * ISYS stream configuration data structure + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + */ +struct ia_css_isys_stream_cfg_data_comm { + aligned_struct(struct ia_css_isys_isa_cfg_comm, isa_cfg); + aligned_struct(struct ia_css_isys_cropping_comm, + crop[N_IA_CSS_ISYS_CROPPING_LOCATION]); + aligned_struct(struct ia_css_isys_input_pin_info_comm, + input_pins[MAX_IPINS]); + aligned_struct(struct ia_css_isys_output_pin_info_comm, + output_pins[MAX_OPINS]); + aligned_uint32(unsigned int, compfmt); + aligned_uint8(unsigned int, nof_input_pins); + aligned_uint8(unsigned int, nof_output_pins); + aligned_uint8(unsigned int, send_irq_sof_discarded); + aligned_uint8(unsigned int, send_irq_eof_discarded); + aligned_uint8(unsigned int, send_resp_sof_discarded); + aligned_uint8(unsigned int, send_resp_eof_discarded); + aligned_uint8(enum ia_css_isys_stream_source, src); + aligned_uint8(enum ia_css_isys_mipi_vc, vc); + aligned_uint8(enum ia_css_isys_isl_use, isl_use); +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send the + * response + * - if '0' the send_resp_sof will determine whether to send the + * response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send the + * response + * - if '0' the send_resp_eof will determine whether to send the + * response + * @send_resp_sof: send response for frame sof detected, used only when + * send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, used only when + * send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set_comm { + aligned_struct(struct ia_css_isys_output_pin_payload_comm, + output_pins[MAX_OPINS]); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_uint8(unsigned int, send_irq_sof); + aligned_uint8(unsigned int, send_irq_eof); + aligned_uint8(unsigned int, send_irq_capture_ack); + aligned_uint8(unsigned int, send_irq_capture_done); + aligned_uint8(unsigned int, send_resp_sof); + aligned_uint8(unsigned int, send_resp_eof); + aligned_uint8(unsigned int, frame_counter); +}; + +/** + * struct ia_css_isys_error_info_comm + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_error_info_comm { + aligned_enum(enum ia_css_isys_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_resp_info_comm + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type + * @pin_id: pin id that the pin payload corresponds to + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED, + * @written_direct: indicates if frame was written direct (online mode) or not. + * + */ + +struct ia_css_isys_resp_info_comm { + aligned_uint64(ia_css_return_token, buf_id); /* Used internally only */ + aligned_struct(struct ia_css_isys_output_pin_payload_comm, pin); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_struct(struct ia_css_isys_error_info_comm, error_info); + aligned_uint32(unsigned int, timestamp[2]); + aligned_uint8(unsigned int, stream_handle); + aligned_uint8(enum ia_css_isys_resp_type, type); + aligned_uint8(unsigned int, pin_id); + aligned_uint8(unsigned int, acc_id); + aligned_uint8(unsigned int, frame_counter); + aligned_uint8(unsigned int, written_direct); +}; + +/** + * struct ia_css_isys_proxy_error_info_comm + * @proxy_error: error code if something went wrong + * @proxy_error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_proxy_error_info_comm { + aligned_enum(enum ia_css_proxy_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_proxy_resp_info_comm + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error_info: details in struct definition + */ +struct ia_css_isys_proxy_resp_info_comm { + aligned_uint32(uint32_t, request_id); + aligned_struct(struct ia_css_isys_proxy_error_info_comm, error_info); +}; + +/** + * struct ia_css_proxy_write_queue_token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address of the + * region + * @value: Value that is requested to be written with the proxy write request + */ +struct ia_css_proxy_write_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +/* From here on type defines not coming from the ISYSAPI interface */ + +/** + * struct resp_queue_token + */ +struct resp_queue_token { + aligned_struct(struct ia_css_isys_resp_info_comm, resp_info); +}; + +/** + * struct send_queue_token + */ +struct send_queue_token { + aligned_uint64(ia_css_return_token, buf_handle); + aligned_uint32(ia_css_input_buffer_css_address, payload); + aligned_uint16(enum ia_css_isys_send_type, send_type); + aligned_uint16(unsigned int, stream_id); +}; + +/** + * struct proxy_resp_queue_token + */ +struct proxy_resp_queue_token { + aligned_struct(struct ia_css_isys_proxy_resp_info_comm, + proxy_resp_info); +}; + +/** + * struct proxy_send_queue_token + */ +struct proxy_send_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +#endif /* __IA_CSS_ISYS_FW_BRIDGED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi.h new file mode 100644 index 000000000000..5f10f72c0974 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi.h @@ -0,0 +1,326 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_H +#define __IA_CSS_ISYSAPI_H + +/** + * errno.h specified error codes to be used + * URL:http://man7.org/linux/man-pages/man3/errno.3.html> + */ + + +/* The following is needed for the function arguments */ +#include "ia_css_isysapi_types.h" + +/* To define the HANDLE */ +#include "type_support.h" + + +/** + * ia_css_isys_device_open() - configure ISYS device + * @ context : device handle output parameter + * @config: device configuration data struct ptr as input parameter, + * read only by css fw until function return + * Ownership, ISYS will only access read my_device during fct call + * Prepares and Sends to PG server (SP) the syscom and isys context + * Executes the host level 0 and 1 boot sequence and starts the PG server (SP) + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +extern int ia_css_isys_context_store_dmem( + const HANDLE *context, + const struct ia_css_isys_device_cfg_data *config +); +extern bool ia_css_isys_ab_spc_ready( + HANDLE *context +); +extern int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config +); +#else +extern int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +#endif + +/** + * ia_css_isys_device_open_ready() - Complete ISYS device configuration + * @ context : device handle output parameter + * read only by css fw until function return + * Requires the boot failure to be completed before it can return + * successfully (includes syscom and isys context) + * Initialise Host/ISYS messaging queues + * Must be called multiple times until it succeeds or it is determined by + * the driver that the boot seuqence has failed. + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_open_ready( + HANDLE context +); + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + * @ stream_handle: stream handle + * @ stream_cfg: stream configuration data struct pointer, which is + * "read only" by ISYS until function return + * ownership, ISYS will only read access stream_cfg during fct call + * Pre-conditions: + * Any Isys/Ssys interface changes must call ia_css_isys_stream_open() + * Post-condition: + * On successful call, ISYS hardware resource (IBFctrl, ISL, DMAs) + * are acquired and ISYS server is able to handle stream specific commands + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_stream_close() - close virtual stream + * @ stream_handle: stream identifier + * release ISYS resources by freeing up stream HW resources + * output pin buffers ownership is returned to the driver + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + * @ stream_handle: stream identifier + * @next_frame: + * if next_frame != NULL: apply next_frame + * settings asynchronously and start stream + * This mode ensures that the first frame is captured + * and thus a minimal start up latency + * (preconditions: sensor streaming must be switched off) + * + * if next_frame == NULL: sensor can be in a streaming state, + * all capture indicates commands will be + * processed synchronously (e.g. on mipi SOF events) + * + * To be called once ia_css_isys_stream_open() successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_start() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechansim + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + * @ stream_handle: stream identifier + * stop both accepting new commands and processing + * submitted capture indication commands + * Support for Secure Touch + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + * @ stream_handle: stream identifier + * stop accepting commands, but process + * the already submitted capture indicates + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_capture_indication() + * captures "next frame" on stream_handle + * @ stream_handle: stream identifier + * @ next_frame: frame pin payloads are provided atomically + * purpose: stream capture new frame command, Successfull calls will + * result in frame output pins being captured + * + * To be called once ia_css_isys_stream_start() is successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_capture_indication() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechanism + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS, and this + * refers specifically the following cases: + * - output pins from SOC path if the same datatype is also passed into ISAPF + * path or it has active MIPI output (not NULL) + * - full resolution pin from ISA (but not when bypassing ISA) + * - scaled pin from ISA (bypassing ISA for scaled pin is impossible) + * - output pins from MIPI path but only when the same datatype is also + * either forwarded to the ISAPF path based on the stream configuration + * (it is ok if the second output pin of this datatype is also skipped) + * or it has an active SOC output (not NULL) + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + * @received_response: provides response info from the + * "next response element" from ISYS server + * received_response will be written to during the fct call and + * can be read by the drv once fct is returned + * + * purpose: Allows the client to handle received ISYS responses + * Upon an IRQ event, the driver will call ia_css_isys_stream_handle_response() + * until the queue is emptied + * Responses returning IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY to the driver will + * hand back ia_css_isys_output_pin ownership to the drv + * ISYS FW will not write/read access ia_css_isys_output_pin + * once it belongs to the driver + * Pre-conditions: ISYS client must have sent a CMDs to ISYS srv + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response +); + +/** + * ia_css_isys_device_close() - close ISYS device + * @context : device handle output parameter + * Purpose: Request for the cell to close + * All streams must be stopped when calling ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_destroy( + HANDLE context +); +extern void ia_css_isys_device_close( + void +); +#else +extern int ia_css_isys_device_close( + HANDLE context +); +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + * @context : device handle output parameter + * @force: forces release or verifies the state before releasing + * Purpose: Free context forcibly or not + * Must be called after ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_release( + HANDLE context, + unsigned int force +); + +/** + * ia_css_isys_proxy_write_req() - issue a isys proxy write request + * @context : device handle output parameter + * Purpose: Issues a write request for the regions that are exposed + * by proxy interface + * Can be called any time between ia_css_isys_device_open + * ia_css_isys_device_close + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val +); + +/** + * ia_css_isys_proxy_handle_write_response() + * - Handles isys proxy write request responses + * @context : device handle output parameter + * Purpose: Handling the responses that are created by FW upon the completion + * proxy interface write request + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response +); + +#endif /* __IA_CSS_ISYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h new file mode 100644 index 000000000000..938f726d1cfb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h @@ -0,0 +1,512 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_TYPES_H +#define __IA_CSS_ISYSAPI_FW_TYPES_H + + +/* Max number of Input/Output Pins */ +#define MAX_IPINS (4) +/* worst case is ISA use where a single input pin produces: +* Mipi output, NS Pixel Output, and Scaled Pixel Output. +* This is how the 2 is calculated +*/ +#define MAX_OPINS ((MAX_IPINS) + 2) + +/* Max number of supported virtual streams */ +#define STREAM_ID_MAX (8) + +/* Aligned with the approach of having one dedicated per stream */ +#define N_MAX_MSG_SEND_QUEUES (STREAM_ID_MAX) +/* Single return queue for all streams/commands type */ +#define N_MAX_MSG_RECV_QUEUES (1) +/* Single device queue for high priority commands (bypass in-order queue) */ +#define N_MAX_DEV_SEND_QUEUES (1) +/* Single dedicated send queue for proxy interface */ +#define N_MAX_PROXY_SEND_QUEUES (1) +/* Single dedicated recv queue for proxy interface */ +#define N_MAX_PROXY_RECV_QUEUES (1) +/* Send queues layout */ +#define BASE_PROXY_SEND_QUEUES (0) +#define BASE_DEV_SEND_QUEUES (BASE_PROXY_SEND_QUEUES + N_MAX_PROXY_SEND_QUEUES) +#define BASE_MSG_SEND_QUEUES (BASE_DEV_SEND_QUEUES + N_MAX_DEV_SEND_QUEUES) +#define N_MAX_SEND_QUEUES (BASE_MSG_SEND_QUEUES + N_MAX_MSG_SEND_QUEUES) +/* Recv queues layout */ +#define BASE_PROXY_RECV_QUEUES (0) +#define BASE_MSG_RECV_QUEUES (BASE_PROXY_RECV_QUEUES + N_MAX_PROXY_RECV_QUEUES) +#define N_MAX_RECV_QUEUES (BASE_MSG_RECV_QUEUES + N_MAX_MSG_RECV_QUEUES) + +#define MAX_QUEUE_SIZE (256) +#define MIN_QUEUE_SIZE (1) + +/* Consider 1 slot per stream since driver is not expected to pipeline + * device commands for the same stream */ +#define DEV_SEND_QUEUE_SIZE (STREAM_ID_MAX) + +/* Max number of supported SRAM buffer partitions */ +/* It refers to the size of stream partitions */ +/* These partitions are further subpartitioned internally */ +/* by the FW, but by declaring statically the stream */ +/* partitions we solve the buffer fragmentation issue */ +#define NOF_SRAM_BLOCKS_MAX (STREAM_ID_MAX) + +/* Max number of supported input pins routed in ISL */ +#define MAX_IPINS_IN_ISL (2) + +/* Max number of planes for frame formats supported by the FW */ +#define PIN_PLANES_MAX (4) + +/** + * enum ia_css_isys_resp_type + */ +enum ia_css_isys_resp_type { + IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IA_CSS_ISYS_RESP_TYPE +}; + +/** + * enum ia_css_isys_send_type + */ +enum ia_css_isys_send_type { + IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IA_CSS_ISYS_SEND_TYPE_STREAM_START, + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_STOP, + IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH, + IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IA_CSS_ISYS_SEND_TYPE +}; + +/** + * enum ia_css_isys_queue_type + */ +enum ia_css_isys_queue_type { + IA_CSS_ISYS_QUEUE_TYPE_PROXY = 0, + IA_CSS_ISYS_QUEUE_TYPE_DEV, + IA_CSS_ISYS_QUEUE_TYPE_MSG, + N_IA_CSS_ISYS_QUEUE_TYPE +}; + +/** + * enum ia_css_isys_stream_source: Specifies a source for a stream + */ +enum ia_css_isys_stream_source { + IA_CSS_ISYS_STREAM_SRC_PORT_0 = 0, + IA_CSS_ISYS_STREAM_SRC_PORT_1, + IA_CSS_ISYS_STREAM_SRC_PORT_2, + IA_CSS_ISYS_STREAM_SRC_PORT_3, + IA_CSS_ISYS_STREAM_SRC_PORT_4, + IA_CSS_ISYS_STREAM_SRC_PORT_5, + IA_CSS_ISYS_STREAM_SRC_PORT_6, + IA_CSS_ISYS_STREAM_SRC_PORT_7, + IA_CSS_ISYS_STREAM_SRC_PORT_8, + IA_CSS_ISYS_STREAM_SRC_PORT_9, + IA_CSS_ISYS_STREAM_SRC_PORT_10, + IA_CSS_ISYS_STREAM_SRC_PORT_11, + IA_CSS_ISYS_STREAM_SRC_PORT_12, + IA_CSS_ISYS_STREAM_SRC_PORT_13, + IA_CSS_ISYS_STREAM_SRC_PORT_14, + IA_CSS_ISYS_STREAM_SRC_PORT_15, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_2, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_3, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_4, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_5, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_6, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_7, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_8, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_9, + N_IA_CSS_ISYS_STREAM_SRC +}; + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_0 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_1 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_2 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_3 + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTA IA_CSS_ISYS_STREAM_SRC_PORT_4 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTB IA_CSS_ISYS_STREAM_SRC_PORT_5 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_6 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_7 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_8 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_9 + +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT0 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0 +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT1 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1 + +/** + * enum ia_css_isys_mipi_vc: MIPI csi2 spec + * supports upto 4 virtual per physical channel + */ +enum ia_css_isys_mipi_vc { + IA_CSS_ISYS_MIPI_VC_0 = 0, + IA_CSS_ISYS_MIPI_VC_1, + IA_CSS_ISYS_MIPI_VC_2, + IA_CSS_ISYS_MIPI_VC_3, + N_IA_CSS_ISYS_MIPI_VC +}; + +/** + * Supported Pixel Frame formats. Expandable if needed + */ +enum ia_css_isys_frame_format_type { + IA_CSS_ISYS_FRAME_FORMAT_NV11 = 0,/* 12 bit YUV 411, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12,/* 12 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_16,/* 16 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_TILEY,/* 12 bit YUV 420, Intel + proprietary tiled format, + TileY + */ + IA_CSS_ISYS_FRAME_FORMAT_NV16,/* 16 bit YUV 422, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV21,/* 12 bit YUV 420, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV61,/* 16 bit YUV 422, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV12,/* 12 bit YUV 420, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV16,/* 16 bit YUV 422, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420,/* 12 bit YUV 420, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_10,/* yuv420, 10 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_12,/* yuv420, 12 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_14,/* yuv420, 14 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_16,/* yuv420, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422,/* 16 bit YUV 422, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422_16,/* yuv422, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_UYVY,/* 16 bit YUV 422, UYVY interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUYV,/* 16 bit YUV 422, YUYV interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUV444,/* 24 bit YUV 444, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV_LINE,/* Internal format, 2 y lines + followed by a uvinterleaved line + */ + IA_CSS_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RGB565,/* 16 bit RGB, 1 plane. Each 3 sub + pixels are packed into one 16 bit + value, 5 bits for R, 6 bits for G + and 5 bits for B. + */ + IA_CSS_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IA_CSS_ISYS_FRAME_FORMAT_RGBA888,/* 32 bit RGBA, 1 plane, + A=Alpha (alpha is unused) + */ + IA_CSS_ISYS_FRAME_FORMAT_QPLANE6,/* Internal, for advanced ISP */ + IA_CSS_ISYS_FRAME_FORMAT_BINARY_8,/* byte stream, used for jpeg. */ + N_IA_CSS_ISYS_FRAME_FORMAT +}; +/* Temporary for driver compatibility */ +#define IA_CSS_ISYS_FRAME_FORMAT_RAW (IA_CSS_ISYS_FRAME_FORMAT_RAW16) + + +/** + * Supported MIPI data type. Keep in sync array in ia_css_isys_private.c + */ +enum ia_css_isys_mipi_data_type { + /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ + /** Reserved 0x04-0x07 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, + /** GENERIC SHORT PACKET DATA TYPES */ + /** They are used to keep the timing information for the + * opening/closing of shutters, triggering of flashes and etc. + */ + /* Generic Short Packet Code 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, + /* Generic Short Packet Code 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, + /* Generic Short Packet Code 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, + /* Generic Short Packet Code 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, + /* Generic Short Packet Code 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, + /* Generic Short Packet Code 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, + /* Generic Short Packet Code 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, + /* Generic Short Packet Code 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, + /** GENERIC LONG PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_NULL = 0x10, + IA_CSS_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, + /* Embedded 8-bit non Image Data */ + IA_CSS_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, + /** Reserved 0x13-0x17 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, + /** YUV DATA TYPES */ + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, + /* 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, + /** Reserved 0x1B */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, + /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, + /* YUV420 10-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, + /* UYVY..UVYV, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, + /* UYVY..UVYV, 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, + /** RGB DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, + /* BGR..BGR, 5 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, + /* BGR..BGR, 5 bits B and R, 6 bits G */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, + /* BGR..BGR, 6 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, + /* BGR..BGR, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, + /** Reserved 0x25-0x27 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, + /** RAW DATA TYPES */ + /* RAW data, 6 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, + /* RAW data, 7 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, + /* RAW data, 8 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, + /* RAW data, 10 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, + /* RAW data, 12 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, + /* RAW data, 14 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, + /** Reserved 0x2E-2F are used with assigned meaning */ + /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, + /* Binary byte stream, which is target at JPEG, not specified in + * CSI-MIPI standard + */ + IA_CSS_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, + /** USER DEFINED 8-BIT DATA TYPES */ + /** For example, the data transmitter (e.g. the SoC sensor) can keep + * the JPEG data as the User Defined Data Type 4 and the MPEG data as + * the User Defined Data Type 7. + */ + /* User defined 8-bit data type 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, + /* User defined 8-bit data type 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, + /* User defined 8-bit data type 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, + /* User defined 8-bit data type 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, + /* User defined 8-bit data type 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, + /* User defined 8-bit data type 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, + /* User defined 8-bit data type 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, + /* User defined 8-bit data type 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, + /** Reserved 0x38-0x3F */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, + + /* Keep always last and max value */ + N_IA_CSS_ISYS_MIPI_DATA_TYPE = 0x40 +}; + +/** enum ia_css_isys_pin_type: output pin buffer types. + * Buffers can be queued and de-queued to hand them over between IA and ISYS + */ +enum ia_css_isys_pin_type { + /* Captured as MIPI packets */ + IA_CSS_ISYS_PIN_TYPE_MIPI = 0, + /* Captured through the ISApf (with/without ISA) + * and the non-scaled output path + */ + IA_CSS_ISYS_PIN_TYPE_RAW_NS, + /* Captured through the ISApf + ISA and the scaled output path */ + IA_CSS_ISYS_PIN_TYPE_RAW_S, + /* Captured through the SoC path */ + IA_CSS_ISYS_PIN_TYPE_RAW_SOC, + /* Reserved for future use, maybe short packets */ + IA_CSS_ISYS_PIN_TYPE_METADATA_0, + /* Reserved for future use */ + IA_CSS_ISYS_PIN_TYPE_METADATA_1, + /* Legacy (non-PIV2), used for the AWB stats */ + IA_CSS_ISYS_PIN_TYPE_AWB_STATS, + /* Legacy (non-PIV2), used for the AF stats */ + IA_CSS_ISYS_PIN_TYPE_AF_STATS, + /* Legacy (non-PIV2), used for the AE stats */ + IA_CSS_ISYS_PIN_TYPE_HIST_STATS, + /* Used for the PAF FF*/ + IA_CSS_ISYS_PIN_TYPE_PAF_FF, + /* Keep always last and max value */ + N_IA_CSS_ISYS_PIN_TYPE +}; + +/** + * enum ia_css_isys_isl_use. Describes the ISL/ISA use + * (ISAPF path in after BXT A0) + */ +enum ia_css_isys_isl_use { + IA_CSS_ISYS_USE_NO_ISL_NO_ISA = 0, + IA_CSS_ISYS_USE_SINGLE_DUAL_ISL, + IA_CSS_ISYS_USE_SINGLE_ISA, + N_IA_CSS_ISYS_USE +}; + +/** + * enum ia_css_isys_mipi_store_mode. Describes if long MIPI packets reach MIPI + * SRAM with the long packet header or not. + * if not, then only option is to capture it with pin type MIPI. + */ +enum ia_css_isys_mipi_store_mode { + IA_CSS_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IA_CSS_ISYS_MIPI_STORE_MODE +}; + +/** + * enum ia_css_isys_mipi_dt_rename_mode. Describes if long MIPI packets have + * DT with some other DT format. + */ +enum ia_css_isys_mipi_dt_rename_mode { + IA_CSS_ISYS_MIPI_DT_NO_RENAME = 0, + IA_CSS_ISYS_MIPI_DT_RENAMED_MODE, + N_IA_CSS_ISYS_MIPI_DT_MODE +}; + +/** + * enum ia_css_isys_type_paf. Describes the Type of PAF enabled + * (PAF path in after cnlB0) + */ +enum ia_css_isys_type_paf { + /* PAF data not present */ + IA_CSS_ISYS_TYPE_NO_PAF = 0, + /* Type 2 sensor types, PAF coming separately from Image Frame */ + /* PAF data in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_INTERLEAVED_PAF, + /* PAF data in non-interleaved format(LL/RR or RR/LL) */ + IA_CSS_ISYS_TYPE_NON_INTERLEAVED_PAF, + /* Type 3 sensor types , PAF data embedded in Image Frame*/ + /* Frame Embedded PAF in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_INTERLEAVED_PAF, + /* Frame Embedded PAF non-interleaved format(LL/RR or RR/LL)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_NON_INTERLEAVED_PAF, + N_IA_CSS_ISYS_TYPE_PAF +}; + +/** + * enum ia_css_isys_cropping_location. Enumerates the cropping locations + * in ISYS + */ +enum ia_css_isys_cropping_location { + /* Cropping executed in ISAPF (mainly), ISAPF preproc (odd column) and + * MIPI STR2MMIO (odd row) + */ + IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA = 0, + /* BXT A0 legacy mode which will never be implemented */ + IA_CSS_ISYS_CROPPING_LOCATION_RESERVED_1, + /* Cropping executed in StreamPifConv in the ISA output for + * RAW_NS pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED, + /* Cropping executed in StreamScaledPifConv in the ISA output for + * RAW_S pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED, + N_IA_CSS_ISYS_CROPPING_LOCATION +}; + +/** + * enum ia_css_isys_resolution_info. Describes the resolution, required to + * setup the various ISA GP registers. + */ +enum ia_css_isys_resolution_info { + /* Scaled ISA output resolution before the + * StreamScaledPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED = 0, + /* Non-Scaled ISA output resolution before the + * StreamPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + N_IA_CSS_ISYS_RESOLUTION_INFO +}; + +/** + * enum ia_css_isys_error. Describes the error type detected by the FW + */ +enum ia_css_isys_error { + IA_CSS_ISYS_ERROR_NONE = 0, /* No details */ + IA_CSS_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_HW_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ + IA_CSS_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ + IA_CSS_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ + IA_CSS_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ + IA_CSS_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ + IA_CSS_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ + N_IA_CSS_ISYS_ERROR +}; + +/** + * enum ia_css_proxy_error. Describes the error type for the proxy detected by + * the FW + */ +enum ia_css_proxy_error { + IA_CSS_PROXY_ERROR_NONE = 0, + IA_CSS_PROXY_ERROR_INVALID_WRITE_REGION, + IA_CSS_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IA_CSS_PROXY_ERROR +}; + +#endif /* __IA_CSS_ISYSAPI_FW_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h new file mode 100644 index 000000000000..bc056157cedb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h @@ -0,0 +1,21 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_VERSION_H +#define __IA_CSS_ISYSAPI_FW_VERSION_H + +/* ISYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION ISYS_FIRMWARE_VERSION + +#endif /* __IA_CSS_ISYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h new file mode 100644 index 000000000000..1d1dbf370299 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h @@ -0,0 +1,122 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H + +#include "ia_css_isysapi_proxy_region_types.h" + +/* + * Definitions for IPU4_B0_PROXY_INT + */ + +#if defined(IPU4_B0_PROXY_INT) + +/** + * enum ipu4_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4B0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4_b0_ia_css_proxy_write_region { + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE = 0, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD, + N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4_b0_reg_write_desc[N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, offset */ + {0x64128, /*input_system_csi2_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE*/ + {0x65128, /*input_system_csi2_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE*/ + {0x66128, /*input_system_csi2_logic_s2m_c_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE*/ + {0x67128, /*input_system_csi2_logic_s2m_d_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE*/ + {0x6C128, /*input_system_csi2_3ph_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE*/ + {0x6C928, /*input_system_csi2_3ph_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE*/ + {0x6D128, /*input_system_csi2_3ph_logic_s2m_0_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE*/ + {0x6D928, /*input_system_csi2_3ph_logic_s2m_1_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE*/ + {0x6E128, /*input_system_csi2_3ph_logic_s2m_2_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE*/ + {0x6E928, /*input_system_csi2_3ph_logic_s2m_3_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE*/ + {0x7800C, /*input_system_unis_logic_gda_irq_urgent_threshold*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD*/ + {0x78010, /*input_system_unis_logic_gda_irq_critical_threshold*/ 4} /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD*/ +}; + +#endif /*defined(IPU4_B0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_A0_PROXY_INT + */ + +#if defined(IPU4P_A0_PROXY_INT) + +/** + * enum ipu4p_a0_ia_css_proxy_write_region. Provides the list of regions for ipu4pA0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_a0_ia_css_proxy_write_region { + N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION +}; + +#define IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE + +#ifndef IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE +struct ia_css_proxy_write_region_description ipu4p_a0_reg_write_desc[N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION] = { +} +#endif /*IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE*/ + +#endif /*defined(IPU4P_A0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_B0_PROXY_INT + */ + +#if defined(IPU4P_B0_PROXY_INT) + +/** + * enum ipu4p_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4pB0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_b0_ia_css_proxy_write_region { + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD = 0, + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE, + N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4p_b0_reg_write_desc[N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, max_offset */ + /*input_system_unis_logic_gda_iwake_threshold*/ + {0x78014, 4}, /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD*/ + /*input_system_unis_logic_gda_enable_iwake*/ + {0x7801C, 4} /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE*/ +}; + +#endif /*defined(IPU4P_B0_PROXY_INT)*/ + +/* + */ + + + +/* + */ + + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h new file mode 100644 index 000000000000..045f089e5a4c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H + + +struct ia_css_proxy_write_region_description { + uint32_t base_addr; + uint32_t offset; +}; + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_types.h new file mode 100644 index 000000000000..dafc34506c19 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/interface/ia_css_isysapi_types.h @@ -0,0 +1,348 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TYPES_H +#define __IA_CSS_ISYSAPI_TYPES_H + +#include "ia_css_isysapi_fw_types.h" +#include "type_support.h" + +#include "ia_css_return_token.h" +#include "ia_css_output_buffer.h" +#include "ia_css_input_buffer.h" +#include "ia_css_terminal_defs.h" + +/** + * struct ia_css_isys_buffer_partition - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each virtual stream + */ +struct ia_css_isys_buffer_partition { + unsigned int num_gda_pages[STREAM_ID_MAX]; +}; + +/** + * This should contain the driver specified info for sys + */ +struct ia_css_driver_sys_config { + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues; /* # of MSG send queues */ + unsigned int num_recv_queues; /* # of MSG recv queues */ + unsigned int send_queue_size; /* max # tokens per queue */ + unsigned int recv_queue_size; /* max # tokens per queue */ + + unsigned int icache_prefetch; /* enable prefetching for SPC */ +}; + +/** + * This should contain the driver specified info for proxy write queues + */ +struct ia_css_driver_proxy_config { + /* max # tokens per PROXY send/recv queue. + * Proxy queues are used for write access purpose + */ + unsigned int proxy_write_queue_size; +}; + + /** + * struct ia_css_isys_device_cfg_data - ISYS device configuration data + * @driver_sys + * @buffer_partition: Information required for the virtual SRAM + * space partition of the streams. + * @driver_proxy + * @secure: Driver needs to set 'secure' to indicate the intention + * when invoking ia_css_isys_context_create() in + * HAS_DUAL_CMD_CTX_SUPPORT case. If 'true', it's for + * secure case. + */ +struct ia_css_isys_device_cfg_data { + struct ia_css_driver_sys_config driver_sys; + struct ia_css_isys_buffer_partition buffer_partition; + struct ia_css_driver_proxy_config driver_proxy; + bool secure; + unsigned vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +/** + * struct ia_css_isys_resolution: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution { + unsigned int width; + unsigned int height; +}; + +/** + * struct ia_css_isys_output_pin_payload + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compressed: Request frame compression (1), or not (0) + */ +struct ia_css_isys_output_pin_payload { + ia_css_return_token out_buf_id; + ia_css_output_buffer_css_address addr; + unsigned int compress; +}; + +/** + * struct ia_css_isys_output_pin_info + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @pt: pin type + * @ft: frame format type + * @watermark_in_lines: pin watermark level in lines + * @send_irq: assert if pin event should trigger irq + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + * @payload_buf_size: Minimum size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + */ +struct ia_css_isys_output_pin_info { + unsigned int input_pin_id; + struct ia_css_isys_resolution output_res; + unsigned int stride; + enum ia_css_isys_pin_type pt; + enum ia_css_isys_frame_format_type ft; + unsigned int watermark_in_lines; + unsigned int send_irq; + enum ia_css_isys_link_id link_id; + unsigned int reserve_compression; + unsigned int payload_buf_size; +}; + +/** + * struct ia_css_isys_param_pin + * @param_buf_id: Points to param buffer - buffer identifier + * @addr: Points to param buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin { + ia_css_return_token param_buf_id; + ia_css_input_buffer_css_address addr; +}; + +/** + * struct ia_css_isys_input_pin_info + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @dt_rename_mode: defines if MIPI data is encapsulated in some other + * data type + * @mapped_dt: Encapsulating in mipi data type(what sensor sends) + */ +struct ia_css_isys_input_pin_info { + struct ia_css_isys_resolution input_res; + enum ia_css_isys_mipi_data_type dt; + enum ia_css_isys_mipi_store_mode mipi_store_mode; + enum ia_css_isys_mipi_dt_rename_mode dt_rename_mode; + enum ia_css_isys_mipi_data_type mapped_dt; +}; + +/** + * struct ia_css_isys_isa_cfg. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg { + /* Following sets resolution information neeed by the IS GP registers, + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED, + * it is needed when there is RAW_NS pin + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + * it is needed when there is RAW_S pin + */ + struct ia_css_isys_resolution isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]; + /* acc id 0, set if process required */ + unsigned int blc_enabled; + /* acc id 1, set if process required */ + unsigned int lsc_enabled; + /* acc id 2, set if process required */ + unsigned int dpc_enabled; + /* acc id 3, set if process required */ + unsigned int downscaler_enabled; + /* acc id 4, set if process required */ + unsigned int awb_enabled; + /* acc id 5, set if process required */ + unsigned int af_enabled; + /* acc id 6, set if process required */ + unsigned int ae_enabled; + /* acc id 7, disabled, or type of paf enabled*/ + enum ia_css_isys_type_paf paf_type; + /* Send irq for any statistics buffers which got completed */ + unsigned int send_irq_stats_ready; + /* Send response for any statistics buffers which got completed */ + unsigned int send_resp_stats_ready; +}; + +/** + * struct ia_css_isys_cropping - cropping coordinates + * Left/Top offsets are INCLUDED + * Right/Bottom offsets are EXCLUDED + * Horizontal: [left_offset,right_offset) + * Vertical: [top_offset,bottom_offset) + * Padding is supported + */ +struct ia_css_isys_cropping { + int top_offset; + int left_offset; + int bottom_offset; + int right_offset; +}; + + /** + * struct ia_css_isys_stream_cfg_data + * ISYS stream configuration data structure + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + * @compfmt: de-compression setting for User Defined Data + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @the rest: input/output pin descriptors + */ +struct ia_css_isys_stream_cfg_data { + enum ia_css_isys_stream_source src; + enum ia_css_isys_mipi_vc vc; + enum ia_css_isys_isl_use isl_use; + unsigned int compfmt; + struct ia_css_isys_isa_cfg isa_cfg; + struct ia_css_isys_cropping crop[N_IA_CSS_ISYS_CROPPING_LOCATION]; + unsigned int send_irq_sof_discarded; + unsigned int send_irq_eof_discarded; + unsigned int send_resp_sof_discarded; + unsigned int send_resp_eof_discarded; + unsigned int nof_input_pins; + unsigned int nof_output_pins; + struct ia_css_isys_input_pin_info input_pins[MAX_IPINS]; + struct ia_css_isys_output_pin_info output_pins[MAX_OPINS]; +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send + * the response + * - if '0' the send_resp_sof will determine whether to send + * the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send + * the response + * - if '0' the send_resp_eof will determine whether to send + * the response + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set { + struct ia_css_isys_output_pin_payload output_pins[MAX_OPINS]; + struct ia_css_isys_param_pin process_group_light; + unsigned int send_irq_sof; + unsigned int send_irq_eof; + unsigned int send_irq_capture_ack; + unsigned int send_irq_capture_done; + unsigned int send_resp_sof; + unsigned int send_resp_eof; + uint8_t frame_counter; +}; + +/** + * struct ia_css_isys_resp_info + * @type: response type + * @stream_handle: stream id the response corresponds to + * @timestamp: Time information for event if available + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + * @pin: this var is valid for pin event related responses, + * contains pin addresses + * @pin_id: this var is valid for pin event related responses, + * contains pin id that the pin payload corresponds to + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED + * @written_direct: indicates if frame was written direct (online mode) or to DDR. + */ +struct ia_css_isys_resp_info { + enum ia_css_isys_resp_type type; + unsigned int stream_handle; + unsigned int timestamp[2]; + enum ia_css_isys_error error; + unsigned int error_details; + struct ia_css_isys_output_pin_payload pin; + unsigned int pin_id; + struct ia_css_isys_param_pin process_group_light; + unsigned int acc_id; + uint8_t frame_counter; + uint8_t written_direct; +}; + +/** + * struct ia_css_proxy_write_req_val + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @region_index: region id for the write request + * @offset: Offset to the specific register within the region + * @value: Value to be written to register + */ +struct ia_css_proxy_write_req_val { + uint32_t request_id; + uint32_t region_index; + uint32_t offset; + uint32_t value; +}; + +/** + * struct ia_css_proxy_write_req_resp + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error: error code if something went wrong + * @error_details: error detail includes either offset or region index + * information which caused proxy request to be rejected + * (invalid access request) + */ +struct ia_css_proxy_write_req_resp { + uint32_t request_id; + enum ia_css_proxy_error error; + uint32_t error_details; +}; + + +#endif /* __IA_CSS_ISYSAPI_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/isysapi.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/isysapi.mk new file mode 100644 index 000000000000..0d06298f9acb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/isysapi.mk @@ -0,0 +1,77 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is ISYSAPI + +include $(MODULES_DIR)/config/isys/subsystem_$(IPU_SYSVER).mk + +ISYSAPI_DIR=$${MODULES_DIR}/isysapi + +ISYSAPI_INTERFACE=$(ISYSAPI_DIR)/interface +ISYSAPI_SOURCES=$(ISYSAPI_DIR)/src +ISYSAPI_EXTINCLUDE=$${MODULES_DIR}/support +ISYSAPI_EXTINTERFACE=$${MODULES_DIR}/syscom/interface + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public.c + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_private.c + +# ISYSAPI Trace Log Level = ISYSAPI_TRACE_LOG_LEVEL_NORMAL +# Other options are [ISYSAPI_TRACE_LOG_LEVEL_OFF, ISYSAPI_TRACE_LOG_LEVEL_DEBUG] +ifndef ISYSAPI_TRACE_CONFIG_HOST + ISYSAPI_TRACE_CONFIG_HOST=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif +ifndef ISYSAPI_TRACE_CONFIG_FW + ISYSAPI_TRACE_CONFIG_FW=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif + +ISYSAPI_HOST_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_HOST) +ISYSAPI_FW_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_FW) + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public_trace.c + +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw.c +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw_utils.c + +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_SOURCES)/$(IPU_SYSVER) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_CPPFLAGS += -DWA_HSD1805168877=$(WA_HSD1805168877) + +ISYSAPI_HOST_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) + +ifeq ($(ISYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +ISYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +ISYSAPI_FW_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif + +ifdef AB_CONFIG_ARRAY_SIZE +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=$(AB_CONFIG_ARRAY_SIZE) +else +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=1 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.c new file mode 100644 index 000000000000..ec92f14ee238 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.c @@ -0,0 +1,981 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isys_private.h" +/* The following is needed for the contained data types */ +#include "ia_css_isys_fw_bridged_types.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_syscom_config.h" +/* + * The following header file is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "ia_css_isysapi_trace.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "storage_class.h" + +#include "ia_css_shared_buffer_cpu.h" + +/* + * defines how many stream cfg host may sent concurrently + * before receiving the stream ack + */ +#define STREAM_CFG_BUFS_PER_MSG_QUEUE (1) +#define NEXT_FRAME_BUFS_PER_MSG_QUEUE \ + (ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] + 4 + 1) +/* + * There is an edge case that host has filled the full queue + * with capture requests (ctx->send_queue_size), + * SP reads and HW-queues all of them (4), + * while in the meantime host continues queueing capture requests + * without checking for responses which SP will have sent with each HW-queue + * capture request (if it does then the 4 is much more improbable to appear, + * but still not impossible). + * After this, host tries to queue an extra capture request + * even though there is no space in the msg queue because msg queue + * is checked at a later point, so +1 is needed + */ + +/* + * A DT is supported assuming when the MIPI packets + * have the same size even when even/odd lines are different, + * and the size is the average per line + */ +#define IA_CSS_UNSUPPORTED_DATA_TYPE (0) +static const uint32_t +ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + N_IA_CSS_ISYS_MIPI_DATA_TYPE] = { + /* + * Remove Prefix "IA_CSS_ISYS_MIPI_DATA_TYPE_" in comments + * to align with Checkpatch 80 characters requirements + * For detailed comments of each field, please refer to + * definition of enum ia_css_isys_mipi_data_type{} in + * isysapi/interface/ia_css_isysapi_fw_types.h + */ + 64, /* [0x00] FRAME_START_CODE */ + 64, /* [0x01] FRAME_END_CODE */ + 64, /* [0x02] LINE_START_CODE Optional */ + 64, /* [0x03] LINE_END_CODE Optional */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x04] RESERVED_0x04 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x05] RESERVED_0x05 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x06] RESERVED_0x06 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x07] RESERVED_0x07 */ + 64, /* [0x08] GENERIC_SHORT1 */ + 64, /* [0x09] GENERIC_SHORT2 */ + 64, /* [0x0A] GENERIC_SHORT3 */ + 64, /* [0x0B] GENERIC_SHORT4 */ + 64, /* [0x0C] GENERIC_SHORT5 */ + 64, /* [0x0D] GENERIC_SHORT6 */ + 64, /* [0x0E] GENERIC_SHORT7 */ + 64, /* [0x0F] GENERIC_SHORT8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x10] NULL To be ignored */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x11] BLANKING_DATA To be ignored */ + 8, /* [0x12] EMBEDDED non Image Data */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x13] RESERVED_0x13 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x14] RESERVED_0x14 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x15] RESERVED_0x15 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x16] RESERVED_0x16 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x17] RESERVED_0x17 */ + 12, /* [0x18] YUV420_8 */ + 15, /* [0x19] YUV420_10 */ + 12, /* [0x1A] YUV420_8_LEGACY */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x1B] RESERVED_0x1B */ + 12, /* [0x1C] YUV420_8_SHIFT */ + 15, /* [0x1D] YUV420_10_SHIFT */ + 16, /* [0x1E] YUV422_8 */ + 20, /* [0x1F] YUV422_10 */ + 16, /* [0x20] RGB_444 */ + 16, /* [0x21] RGB_555 */ + 16, /* [0x22] RGB_565 */ + 18, /* [0x23] RGB_666 */ + 24, /* [0x24] RGB_888 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x25] RESERVED_0x25 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x26] RESERVED_0x26 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x27] RESERVED_0x27 */ + 6, /* [0x28] RAW_6 */ + 7, /* [0x29] RAW_7 */ + 8, /* [0x2A] RAW_8 */ + 10, /* [0x2B] RAW_10 */ + 12, /* [0x2C] RAW_12 */ + 14, /* [0x2D] RAW_14 */ + 16, /* [0x2E] RAW_16 */ + 8, /* [0x2F] BINARY_8 */ + 8, /* [0x30] USER_DEF1 */ + 8, /* [0x31] USER_DEF2 */ + 8, /* [0x32] USER_DEF3 */ + 8, /* [0x33] USER_DEF4 */ + 8, /* [0x34] USER_DEF5 */ + 8, /* [0x35] USER_DEF6 */ + 8, /* [0x36] USER_DEF7 */ + 8, /* [0x37] USER_DEF8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x38] RESERVED_0x38 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x39] RESERVED_0x39 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3A] RESERVED_0x3A */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3B] RESERVED_0x3B */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3C] RESERVED_0x3C */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3D] RESERVED_0x3D */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3E] RESERVED_0x3E */ + IA_CSS_UNSUPPORTED_DATA_TYPE /* [0x3F] RESERVED_0x3F */ +}; + +STORAGE_CLASS_INLINE int get_stream_cfg_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * STREAM_CFG_BUFS_PER_MSG_QUEUE) + + stream_cfg_buff_counter; +} + +STORAGE_CLASS_INLINE int get_next_frame_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int next_frame_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * NEXT_FRAME_BUFS_PER_MSG_QUEUE) + + next_frame_buff_counter; +} + +STORAGE_CLASS_INLINE void free_comm_buff_shared_mem( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter, + int next_frame_buff_counter) +{ + int buff_slot; + + /* Initialiser is the current value of stream_handle */ + for (; stream_handle >= 0; stream_handle--) { + /* + * Initialiser is the current value of stream_cfg_buff_counter + */ + for (; stream_cfg_buff_counter >= 0; + stream_cfg_buff_counter--) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Set for the next iteration */ + stream_cfg_buff_counter = STREAM_CFG_BUFS_PER_MSG_QUEUE - 1; + /* + * Initialiser is the current value of next_frame_buff_counter + */ + for (; next_frame_buff_counter >= 0; + next_frame_buff_counter--) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, next_frame_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + next_frame_buff_counter = NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1; + } +} + +/* + * ia_css_isys_constr_comm_buff_queue() + */ +int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int stream_cfg_buff_counter; + int next_frame_buff_counter; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + STREAM_CFG_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + verifret(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id != NULL, + EFAULT); + + ctx->isys_comm_buffer_queue.pnext_frame_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + NEXT_FRAME_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + if (ctx->isys_comm_buffer_queue.pnext_frame_buff_id == NULL) { + ia_css_cpu_mem_free( + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + verifret(0, EFAULT); /* return EFAULT; equivalent */ + } + + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Initialisation needs to happen here for both loops */ + stream_cfg_buff_counter = 0; + next_frame_buff_counter = 0; + + for (; stream_cfg_buff_counter < STREAM_CFG_BUFS_PER_MSG_QUEUE; + stream_cfg_buff_counter++) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_stream_cfg_data_comm)); + if (ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[ + buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] = 0; + for (; next_frame_buff_counter < + (int)NEXT_FRAME_BUFS_PER_MSG_QUEUE; + next_frame_buff_counter++) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + next_frame_buff_counter); + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_frame_buff_set_comm)); + if (ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] = 0; + } + + return 0; + +SHARED_BUFF_ALLOC_FAILURE: + /* stream_handle has correct value for calling the free function */ + /* prepare stream_cfg_buff_counter for calling the free function */ + stream_cfg_buff_counter--; + /* prepare next_frame_buff_counter for calling the free function */ + next_frame_buff_counter--; + free_comm_buff_shared_mem( + ctx, + stream_handle, + stream_cfg_buff_counter, + next_frame_buff_counter); + + verifret(0, EFAULT); /* return EFAULT; equivalent */ +} + +/* + * ia_css_isys_force_unmap_comm_buff_queue() + */ +int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + IA_CSS_TRACE_0(ISYSAPI, WARNING, + "ia_css_isys_force_unmap_comm_buff_queue() called\n"); + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) <= + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping stream_cfg %d\n", + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]); + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) <= + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping next_frame %d\n", + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]); + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + } + + return 0; +} + +/* + * ia_css_isys_destr_comm_buff_queue() + */ +int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + verifret(ctx, EFAULT); /* Host Consistency */ + + free_comm_buff_shared_mem( + ctx, + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] - 1, + STREAM_CFG_BUFS_PER_MSG_QUEUE - 1, + NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1); + + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pnext_frame_buff_id); + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + + return 0; +} + +STORAGE_CLASS_INLINE void resolution_host_to_css( + const struct ia_css_isys_resolution *resolution_host, + struct ia_css_isys_resolution_comm *resolution_css) +{ + resolution_css->width = resolution_host->width; + resolution_css->height = resolution_host->height; +} + +STORAGE_CLASS_INLINE void output_pin_payload_host_to_css( + const struct ia_css_isys_output_pin_payload *output_pin_payload_host, + struct ia_css_isys_output_pin_payload_comm *output_pin_payload_css) +{ + output_pin_payload_css->out_buf_id = + output_pin_payload_host->out_buf_id; + output_pin_payload_css->addr = output_pin_payload_host->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_css->compress = output_pin_payload_host->compress; +#else + output_pin_payload_css->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void output_pin_info_host_to_css( + const struct ia_css_isys_output_pin_info *output_pin_info_host, + struct ia_css_isys_output_pin_info_comm *output_pin_info_css) +{ + output_pin_info_css->input_pin_id = output_pin_info_host->input_pin_id; + resolution_host_to_css( + &output_pin_info_host->output_res, + &output_pin_info_css->output_res); + output_pin_info_css->stride = output_pin_info_host->stride; + output_pin_info_css->pt = output_pin_info_host->pt; + output_pin_info_css->watermark_in_lines = + output_pin_info_host->watermark_in_lines; + output_pin_info_css->send_irq = output_pin_info_host->send_irq; + output_pin_info_css->ft = output_pin_info_host->ft; + output_pin_info_css->link_id = output_pin_info_host->link_id; +#ifdef ENABLE_DEC400 + output_pin_info_css->reserve_compression = output_pin_info_host->reserve_compression; + output_pin_info_css->payload_buf_size = output_pin_info_host->payload_buf_size; +#else + output_pin_info_css->reserve_compression = 0; + /* Though payload_buf_size was added for compression, set sane value for + * payload_buf_size, just in case... + */ + output_pin_info_css->payload_buf_size = + output_pin_info_host->stride * output_pin_info_host->output_res.height; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_host_to_css( + const struct ia_css_isys_param_pin *param_pin_host, + struct ia_css_isys_param_pin_comm *param_pin_css) +{ + param_pin_css->param_buf_id = param_pin_host->param_buf_id; + param_pin_css->addr = param_pin_host->addr; +} + +STORAGE_CLASS_INLINE void input_pin_info_host_to_css( + const struct ia_css_isys_input_pin_info *input_pin_info_host, + struct ia_css_isys_input_pin_info_comm *input_pin_info_css) +{ + resolution_host_to_css( + &input_pin_info_host->input_res, + &input_pin_info_css->input_res); + if (input_pin_info_host->dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt out of range\n"); + return; + } + if (input_pin_info_host->dt_rename_mode >= N_IA_CSS_ISYS_MIPI_DT_MODE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt_rename_mode out of range\n"); + return; + } + /* Mapped DT check if data type renaming is being used*/ + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE && + input_pin_info_host->mapped_dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->mapped_dt out of range\n"); + return; + } + input_pin_info_css->dt = input_pin_info_host->dt; + input_pin_info_css->mipi_store_mode = + input_pin_info_host->mipi_store_mode; + input_pin_info_css->bits_per_pix = + ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + input_pin_info_host->dt]; + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE) { + input_pin_info_css->mapped_dt = input_pin_info_host->mapped_dt; + } + else { + input_pin_info_css->mapped_dt = N_IA_CSS_ISYS_MIPI_DATA_TYPE; + } +} + +STORAGE_CLASS_INLINE void isa_cfg_host_to_css( + const struct ia_css_isys_isa_cfg *isa_cfg_host, + struct ia_css_isys_isa_cfg_comm *isa_cfg_css) +{ + unsigned int i; + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + resolution_host_to_css(&isa_cfg_host->isa_res[i], + &isa_cfg_css->isa_res[i]); + } + isa_cfg_css->cfg_fields = 0; + ISA_CFG_FIELD_SET(BLC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->blc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(LSC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->lsc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DPC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->dpc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DOWNSCALER_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->downscaler_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AWB_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->awb_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AF_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->af_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AE_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->ae_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(PAF_TYPE, isa_cfg_css->cfg_fields, + isa_cfg_host->paf_type); + ISA_CFG_FIELD_SET(SEND_IRQ_STATS_READY, isa_cfg_css->cfg_fields, + isa_cfg_host->send_irq_stats_ready ? 1 : 0); + ISA_CFG_FIELD_SET(SEND_RESP_STATS_READY, isa_cfg_css->cfg_fields, + (isa_cfg_host->send_irq_stats_ready || + isa_cfg_host->send_resp_stats_ready) ? 1 : 0); +} + +STORAGE_CLASS_INLINE void cropping_host_to_css( + const struct ia_css_isys_cropping *cropping_host, + struct ia_css_isys_cropping_comm *cropping_css) +{ + cropping_css->top_offset = cropping_host->top_offset; + cropping_css->left_offset = cropping_host->left_offset; + cropping_css->bottom_offset = cropping_host->bottom_offset; + cropping_css->right_offset = cropping_host->right_offset; + +} + +STORAGE_CLASS_INLINE int stream_cfg_data_host_to_css( + const struct ia_css_isys_stream_cfg_data *stream_cfg_data_host, + struct ia_css_isys_stream_cfg_data_comm *stream_cfg_data_css) +{ + unsigned int i; + + stream_cfg_data_css->src = stream_cfg_data_host->src; + stream_cfg_data_css->vc = stream_cfg_data_host->vc; + stream_cfg_data_css->isl_use = stream_cfg_data_host->isl_use; + stream_cfg_data_css->compfmt = stream_cfg_data_host->compfmt; + stream_cfg_data_css->isa_cfg.cfg_fields = 0; + + switch (stream_cfg_data_host->isl_use) { + case IA_CSS_ISYS_USE_SINGLE_ISA: + isa_cfg_host_to_css(&stream_cfg_data_host->isa_cfg, + &stream_cfg_data_css->isa_cfg); + /* deliberate fall-through */ + case IA_CSS_ISYS_USE_SINGLE_DUAL_ISL: + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + cropping_host_to_css(&stream_cfg_data_host->crop[i], + &stream_cfg_data_css->crop[i]); + } + break; + case IA_CSS_ISYS_USE_NO_ISL_NO_ISA: + break; + default: + break; + } + + stream_cfg_data_css->send_irq_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? 1 : 0; + stream_cfg_data_css->send_irq_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? 1 : 0; + stream_cfg_data_css->send_resp_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? + 1 : stream_cfg_data_host->send_resp_sof_discarded; + stream_cfg_data_css->send_resp_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? + 1 : stream_cfg_data_host->send_resp_eof_discarded; + stream_cfg_data_css->nof_input_pins = + stream_cfg_data_host->nof_input_pins; + stream_cfg_data_css->nof_output_pins = + stream_cfg_data_host->nof_output_pins; + for (i = 0; i < stream_cfg_data_host->nof_input_pins; i++) { + input_pin_info_host_to_css( + &stream_cfg_data_host->input_pins[i], + &stream_cfg_data_css->input_pins[i]); + verifret(stream_cfg_data_css->input_pins[i].bits_per_pix, + EINVAL); + } + for (i = 0; i < stream_cfg_data_host->nof_output_pins; i++) { + output_pin_info_host_to_css( + &stream_cfg_data_host->output_pins[i], + &stream_cfg_data_css->output_pins[i]); + } + return 0; +} + +STORAGE_CLASS_INLINE void frame_buff_set_host_to_css( + const struct ia_css_isys_frame_buff_set *frame_buff_set_host, + struct ia_css_isys_frame_buff_set_comm *frame_buff_set_css) +{ + int i; + + for (i = 0; i < MAX_OPINS; i++) { + output_pin_payload_host_to_css( + &frame_buff_set_host->output_pins[i], + &frame_buff_set_css->output_pins[i]); + } + + param_pin_host_to_css(&frame_buff_set_host->process_group_light, + &frame_buff_set_css->process_group_light); + frame_buff_set_css->send_irq_sof = + frame_buff_set_host->send_irq_sof ? 1 : 0; + frame_buff_set_css->send_irq_eof = + frame_buff_set_host->send_irq_eof ? 1 : 0; + frame_buff_set_css->send_irq_capture_done = + (uint8_t)frame_buff_set_host->send_irq_capture_done; + frame_buff_set_css->send_irq_capture_ack = + frame_buff_set_host->send_irq_capture_ack ? 1 : 0; + frame_buff_set_css->send_resp_sof = + frame_buff_set_host->send_irq_sof ? + 1 : frame_buff_set_host->send_resp_sof; + frame_buff_set_css->send_resp_eof = + frame_buff_set_host->send_irq_eof ? + 1 : frame_buff_set_host->send_resp_eof; + frame_buff_set_css->frame_counter = + frame_buff_set_host->frame_counter; +} + +STORAGE_CLASS_INLINE void buffer_partition_host_to_css( + const struct ia_css_isys_buffer_partition *buffer_partition_host, + struct ia_css_isys_buffer_partition_comm *buffer_partition_css) +{ + int i; + + for (i = 0; i < STREAM_ID_MAX; i++) { + buffer_partition_css->num_gda_pages[i] = + buffer_partition_host->num_gda_pages[i]; + } +} + +STORAGE_CLASS_INLINE void output_pin_payload_css_to_host( + const struct ia_css_isys_output_pin_payload_comm * + output_pin_payload_css, + struct ia_css_isys_output_pin_payload *output_pin_payload_host) +{ + output_pin_payload_host->out_buf_id = + output_pin_payload_css->out_buf_id; + output_pin_payload_host->addr = output_pin_payload_css->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_host->compress = output_pin_payload_css->compress; +#else + output_pin_payload_host->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_css_to_host( + const struct ia_css_isys_param_pin_comm *param_pin_css, + struct ia_css_isys_param_pin *param_pin_host) +{ + param_pin_host->param_buf_id = param_pin_css->param_buf_id; + param_pin_host->addr = param_pin_css->addr; + +} + +STORAGE_CLASS_INLINE void resp_info_css_to_host( + const struct ia_css_isys_resp_info_comm *resp_info_css, + struct ia_css_isys_resp_info *resp_info_host) +{ + resp_info_host->type = resp_info_css->type; + resp_info_host->timestamp[0] = resp_info_css->timestamp[0]; + resp_info_host->timestamp[1] = resp_info_css->timestamp[1]; + resp_info_host->stream_handle = resp_info_css->stream_handle; + resp_info_host->error = resp_info_css->error_info.error; + resp_info_host->error_details = + resp_info_css->error_info.error_details; + output_pin_payload_css_to_host( + &resp_info_css->pin, &resp_info_host->pin); + resp_info_host->pin_id = resp_info_css->pin_id; + param_pin_css_to_host(&resp_info_css->process_group_light, + &resp_info_host->process_group_light); + resp_info_host->acc_id = resp_info_css->acc_id; + resp_info_host->frame_counter = resp_info_css->frame_counter; + resp_info_host->written_direct = resp_info_css->written_direct; +} + +/* + * ia_css_isys_constr_fw_stream_cfg() + */ +int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + ia_css_shared_buffer_cpu_address stream_cfg_cpu_addr; + ia_css_shared_buffer_css_address stream_cfg_css_addr; + int buff_slot; + int retval = 0; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pstream_cfg_fw, EFAULT); /* Host Consistency */ + verifret(pbuf_stream_cfg_id, EFAULT); /* Host Consistency */ + verifret(stream_cfg, EFAULT); /* Host Consistency */ + + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) < + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + *pbuf_stream_cfg_id = + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_stream_cfg_id, EADDRNOTAVAIL); + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_stream_cfg_id); + /* Host-FW Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + retval = stream_cfg_data_host_to_css(stream_cfg, stream_cfg_cpu_addr); + if (retval) + return retval; + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + stream_cfg_css_addr = + ia_css_shared_buffer_css_map(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_stream_cfg_id); + + *pstream_cfg_fw = stream_cfg_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + STREAM_CFG_BUFS_PER_MSG_QUEUE + + /* To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % STREAM_CFG_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_constr_fw_next_frame() + */ +int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + ia_css_shared_buffer_cpu_address next_frame_cpu_addr; + ia_css_shared_buffer_css_address next_frame_css_addr; + int buff_slot; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pnext_frame_fw, EFAULT); /* Host Consistency */ + verifret(next_frame, EFAULT); /* Host Consistency */ + verifret(pbuf_next_frame_id, EFAULT); /* Host Consistency */ + + /* For some reason responses are not dequeued in time */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) < + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPERM); + buff_slot = get_next_frame_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + *pbuf_next_frame_id = + ctx->isys_comm_buffer_queue.pnext_frame_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_next_frame_id, EADDRNOTAVAIL); + + /* map it in cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_next_frame_id); + /* Host-FW Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + frame_buff_set_host_to_css(next_frame, next_frame_cpu_addr); + + /* unmap the buffer from cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + /* map it to css */ + next_frame_css_addr = + ia_css_shared_buffer_css_map(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_next_frame_id); + + *pnext_frame_fw = next_frame_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + NEXT_FRAME_BUFS_PER_MSG_QUEUE + + /* + * To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_extract_fw_response() + */ +int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response) +{ + int buff_slot; + unsigned int css_address; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(token, EFAULT); /* Host Consistency */ + verifret(received_response, EFAULT); /* Host Consistency */ + + resp_info_css_to_host(&(token->resp_info), received_response); + + switch (token->resp_info.type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[ + token->resp_info.stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_next_frame_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[ + token->resp_info.stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + default: + break; + } + + return 0; +} + +/* + * ia_css_isys_extract_proxy_response() + */ +int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *preceived_response) +{ + verifret(token, EFAULT); /* Host Consistency */ + verifret(preceived_response, EFAULT); /* Host Consistency */ + + preceived_response->request_id = token->proxy_resp_info.request_id; + preceived_response->error = token->proxy_resp_info.error_info.error; + preceived_response->error_details = + token->proxy_resp_info.error_info.error_details; + + return 0; +} + +/* + * ia_css_isys_prepare_param() + */ +int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[]) +{ + unsigned int i; + + verifret(isys_fw_cfg, EFAULT); /* Host Consistency */ + verifret(buf_partition, EFAULT); /* Host Consistency */ + verifret(num_send_queues, EFAULT); /* Host Consistency */ + verifret(num_recv_queues, EFAULT); /* Host Consistency */ + + buffer_partition_host_to_css(buf_partition, + &isys_fw_cfg->buffer_partition); + for (i = 0; i < N_IA_CSS_ISYS_QUEUE_TYPE; i++) { + isys_fw_cfg->num_send_queues[i] = num_send_queues[i]; + isys_fw_cfg->num_recv_queues[i] = num_recv_queues[i]; + } + + return 0; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.h new file mode 100644 index 000000000000..d53fa53c9a81 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_private.h @@ -0,0 +1,156 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PRIVATE_H +#define __IA_CSS_ISYS_PRIVATE_H + + +#include "type_support.h" +/* Needed for the structure member ia_css_sys_context * sys */ +#include "ia_css_syscom.h" +/* Needed for the definitions of STREAM_ID_MAX */ +#include "ia_css_isysapi.h" +/* The following is needed for the function arguments */ +#include "ia_css_isys_fw_bridged_types.h" + +#include "ia_css_shared_buffer.h" + + +/* Set for the respective error handling */ +#define VERIFY_DEVSTATE 1 + +#if (VERIFY_DEVSTATE != 0) +/** + * enum device_state + */ +enum device_state { + IA_CSS_ISYS_DEVICE_STATE_IDLE = 0, + IA_CSS_ISYS_DEVICE_STATE_CONFIGURED = 1, + IA_CSS_ISYS_DEVICE_STATE_READY = 2 +}; +#endif /* VERIFY_DEVSTATE */ + +/** + * enum stream_state + */ +enum stream_state { + IA_CSS_ISYS_STREAM_STATE_IDLE = 0, + IA_CSS_ISYS_STREAM_STATE_OPENED = 1, + IA_CSS_ISYS_STREAM_STATE_STARTED = 2 +}; + + +/** + * struct ia_css_isys_comm_buffer_queue + */ +struct ia_css_isys_comm_buffer_queue { + ia_css_shared_buffer *pstream_cfg_buff_id; + unsigned int stream_cfg_queue_head[STREAM_ID_MAX]; + unsigned int stream_cfg_queue_tail[STREAM_ID_MAX]; + ia_css_shared_buffer *pnext_frame_buff_id; + unsigned int next_frame_queue_head[STREAM_ID_MAX]; + unsigned int next_frame_queue_tail[STREAM_ID_MAX]; +}; + + +/** + * struct ia_css_isys_context + */ +struct ia_css_isys_context { + struct ia_css_syscom_context *sys; + /* add here any isys specific members that need + to be passed into the isys api functions as input */ + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int send_queue_size[N_IA_CSS_ISYS_QUEUE_TYPE]; + struct ia_css_isys_comm_buffer_queue isys_comm_buffer_queue; + unsigned int stream_nof_output_pins[STREAM_ID_MAX]; +#if (VERIFY_DEVSTATE != 0) + enum device_state dev_state; +#endif /* VERIFY_DEVSTATE */ + enum stream_state stream_state_array[STREAM_ID_MAX]; + /* If true, this context is created based on secure config */ + bool secure; +}; + + +/** + * ia_css_isys_constr_comm_buff_queue() + */ +extern int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_force_unmap_comm_buff_queue() + */ +extern int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_destr_comm_buff_queue() + */ +extern int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_constr_fw_stream_cfg() + */ +extern int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_constr_fw_next_frame() + */ +extern int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_extract_fw_response() + */ +extern int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response +); +extern int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *received_response +); + +/** + * ia_css_isys_prepare_param() + */ +extern int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[] +); + +#endif /* __IA_CSS_ISYS_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public.c new file mode 100644 index 000000000000..f7b132527249 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public.c @@ -0,0 +1,1284 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* TODO: REMOVE --> START IF EXTERNALLY INCLUDED/DEFINED */ +/* These are temporary, the correct numbers need to be inserted/linked */ +/* Until this happens, the following definitions stay here */ +#define INPUT_MIN_WIDTH 1 +#define INPUT_MAX_WIDTH 16384 +#define INPUT_MIN_HEIGHT 1 +#define INPUT_MAX_HEIGHT 16384 +#define OUTPUT_MIN_WIDTH 1 +#define OUTPUT_MAX_WIDTH 16384 +#define OUTPUT_MIN_HEIGHT 1 +#define OUTPUT_MAX_HEIGHT 16384 +/* REMOVE --> END IF EXTERNALLY INCLUDED/DEFINED */ + + +/* The FW bridged types are included through the following */ +#include "ia_css_isysapi.h" +/* The following provides the isys-sys context */ +#include "ia_css_isys_private.h" +/* The following provides the sys layer functions */ +#include "ia_css_syscom.h" + +#include "ia_css_cell.h" +#include "ipu_device_cell_properties.h" + +/* The following provides the tracing functions */ +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" + +#include "ia_css_shared_buffer_cpu.h" +/* The following is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "cpu_mem_support.h" +#include "math_support.h" +#include "misc_support.h" +#include "system_const.h" + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config); +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config); + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + unsigned int stream_handle; + struct ia_css_isys_context *ctx; + struct ia_css_syscom_config sys; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config input_queue_cfg[N_MAX_SEND_QUEUES]; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config output_queue_cfg[N_MAX_RECV_QUEUES]; + struct ia_css_isys_fw_config isys_fw_cfg; + unsigned int proxy_write_queue_size; + unsigned int ssid; + unsigned int mmid; + unsigned int i; + + /* Printing "ENTRY isys_context_create" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_create\n"); + + verifret(config != NULL, EFAULT); + + /* Printing configuration information if tracing level = VERBOSE. */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_device_config_data(config); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Runtime check for # of send and recv MSG queues */ + verifret(config->driver_sys.num_send_queues <= + N_MAX_MSG_SEND_QUEUES/*=STREAM_ID_MAX*/, EINVAL); + verifret(config->driver_sys.num_recv_queues <= + N_MAX_MSG_RECV_QUEUES, EINVAL); + + /* Runtime check for send and recv MSG queue sizes */ + verifret(config->driver_sys.send_queue_size <= MAX_QUEUE_SIZE, EINVAL); + verifret(config->driver_sys.recv_queue_size <= MAX_QUEUE_SIZE, EINVAL); + + /* TODO: return an error in case MAX_QUEUE_SIZE is exceeded + * (Similar to runtime check on MSG queue sizes) + */ + proxy_write_queue_size = uclip( + config->driver_proxy.proxy_write_queue_size, + MIN_QUEUE_SIZE, + MAX_QUEUE_SIZE); + + ctx = (struct ia_css_isys_context *) + ia_css_cpu_mem_alloc(sizeof(struct ia_css_isys_context)); + verifret(ctx != NULL, EFAULT); + *context = (HANDLE)ctx; + + /* Copy to the sys config the driver_sys config, + * and add the internal info (token sizes) + */ + ssid = config->driver_sys.ssid; + mmid = config->driver_sys.mmid; + sys.ssid = ssid; + sys.mmid = mmid; + + ctx->secure = config->secure; + /* Following operations need to be aligned with + * "enum ia_css_isys_queue_type" list (list of queue types) + */ + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + N_MAX_DEV_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_send_queues; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_RECV_QUEUES; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + 0; /* Common msg/dev return queue */ + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_recv_queues; + + sys.num_input_queues = + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + sys.num_output_queues = + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + + sys.input = input_queue_cfg; + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].queue_size = + proxy_write_queue_size; + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].token_size = + sizeof(struct proxy_send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].queue_size = + DEV_SEND_QUEUE_SIZE; + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].queue_size = + config->driver_sys.send_queue_size; + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + proxy_write_queue_size; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + DEV_SEND_QUEUE_SIZE; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.send_queue_size; + + sys.output = output_queue_cfg; + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].queue_size = + proxy_write_queue_size; + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].token_size = + sizeof(struct proxy_resp_queue_token); + } + /* There is no recv DEV queue */ + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].queue_size = + config->driver_sys.recv_queue_size; + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].token_size = + sizeof(struct resp_queue_token); + } + + sys.regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + sys.dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + +#if HAS_DUAL_CMD_CTX_SUPPORT + sys.dmem_addr += config->secure ? REGMEM_SECURE_OFFSET : REGMEM_OFFSET; +#endif + + /* Prepare the param */ + ia_css_isys_prepare_param( + &isys_fw_cfg, + &config->buffer_partition, + ctx->num_send_queues, + ctx->num_recv_queues); + + /* parameter struct to be passed to fw */ + sys.specific_addr = &isys_fw_cfg; + /* parameters size */ + sys.specific_size = sizeof(isys_fw_cfg); + sys.secure = config->secure; + if (config->secure) { + sys.vtl0_addr_mask = config->vtl0_addr_mask; + } + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_context_create || call ia_css_syscom_open()\n"); + /* The allocation of the queues will take place within this call and + * info will be stored in sys_context output + */ + ctx->sys = ia_css_syscom_open(&sys, NULL); + if (!ctx->sys) { + ia_css_cpu_mem_free(ctx); + return -EFAULT; + } + + /* Update the context with the id's */ + ctx->ssid = ssid; + ctx->mmid = mmid; + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_IDLE; + } + + retval = ia_css_isys_constr_comm_buff_queue(ctx); + if (retval) { + ia_css_syscom_close(ctx->sys); + ia_css_syscom_release(ctx->sys, 1); + ia_css_cpu_mem_free(ctx); + return retval; + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing device configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Printing "LEAVE isys_context_create" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_create\n"); + return 0; +} + +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_start_server || start SPC\n"); + /* The firmware is loaded and syscom is ready, start the SPC */ + ia_css_cell_start_prefetch(config->driver_sys.ssid, SPC0, + config->driver_sys.icache_prefetch); + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, "SPC prefetch: %d\n", + config->driver_sys.icache_prefetch); + return 0; +} + +/** + * ia_css_isys_device_open() - open and configure ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_context_create(context, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_isys_context_store_dmem( + const HANDLE *context, + const struct ia_css_isys_device_cfg_data *config) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_store_dmem(ctx->sys, config->driver_sys.ssid, config->vtl0_addr_mask); +} + +bool ia_css_isys_ab_spc_ready( + HANDLE *context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_is_ab_spc_ready(ctx->sys); +} + +int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_start_server(config); +} +#else +int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + + retval = isys_context_create(context, config); + if (retval) { + IA_CSS_TRACE_1(ISYSAPI, ERROR, "ia_css_isys_device_open() failed (retval %d)\n", retval); + return retval; + } + + isys_start_server(config); + return 0; +} +#endif + +/** + * ia_css_isys_device_open_ready() - open and configure ISYS device + */ +int ia_css_isys_device_open_ready(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_OPEN" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + /* Open the ports for all the non-MSG send queues (PROXY + DEV) */ + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + retval = ia_css_syscom_send_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Open the ports for all the recv queues (PROXY + MSG) */ + for (i = 0; + i < (ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]); + i++) { + retval = ia_css_syscom_recv_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_READY; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY\n"); + return 0; +} + + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + */ +int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address stream_cfg_fw = 0; + ia_css_shared_buffer buf_stream_cfg_id = (ia_css_shared_buffer)NULL; + /* Printing "ENTRY IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing stream configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_stream_config_data(stream_cfg); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + + verifret(stream_cfg != NULL, EFAULT); + verifret(stream_cfg->src < N_IA_CSS_ISYS_STREAM_SRC, EINVAL); + verifret(stream_cfg->vc < N_IA_CSS_ISYS_MIPI_VC, EINVAL); + verifret(stream_cfg->isl_use < N_IA_CSS_ISYS_USE, EINVAL); + if (stream_cfg->isl_use != IA_CSS_ISYS_USE_NO_ISL_NO_ISA) { + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MIN_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MAX_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MIN_WIDTH, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MAX_WIDTH, EINVAL); + } + verifret(stream_cfg->nof_input_pins <= MAX_IPINS, EINVAL); + verifret(stream_cfg->nof_output_pins <= MAX_OPINS, EINVAL); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + /* Verify input pin */ + verifret( + stream_cfg->input_pins[i].input_res.width >= + INPUT_MIN_WIDTH && + stream_cfg->input_pins[i].input_res.width <= + INPUT_MAX_WIDTH && + stream_cfg->input_pins[i].input_res.height >= + INPUT_MIN_HEIGHT && + stream_cfg->input_pins[i].input_res.height <= + INPUT_MAX_HEIGHT, EINVAL); + verifret(stream_cfg->input_pins[i].dt < + N_IA_CSS_ISYS_MIPI_DATA_TYPE, EINVAL); +/* #ifdef To be removed when driver inits the value */ +#ifdef DRIVER_INIT_MIPI_STORE_MODE + verifret(stream_cfg->input_pins[i].mipi_store_mode < + N_IA_CSS_ISYS_MIPI_STORE_MODE, EINVAL); +#endif /* DRIVER_INIT_MIPI_STORE_MODE */ + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + /* Verify output pin */ + verifret(stream_cfg->output_pins[i].input_pin_id < + stream_cfg->nof_input_pins, EINVAL); + verifret(stream_cfg->output_pins[i].pt < + N_IA_CSS_ISYS_PIN_TYPE, EINVAL); + verifret(stream_cfg->output_pins[i].ft < + N_IA_CSS_ISYS_FRAME_FORMAT, EINVAL); + /* Verify that the stride is aligned to 64 bytes: HW spec */ + verifret(stream_cfg->output_pins[i].stride%(XMEM_WIDTH/8) == + 0, EINVAL); + verifret((stream_cfg->output_pins[i].output_res.width >= + OUTPUT_MIN_WIDTH) && + (stream_cfg->output_pins[i].output_res.width <= + OUTPUT_MAX_WIDTH) && + (stream_cfg->output_pins[i].output_res.height >= + OUTPUT_MIN_HEIGHT) && + (stream_cfg->output_pins[i].output_res.height <= + OUTPUT_MAX_HEIGHT), EINVAL); + verifret((stream_cfg->output_pins[i].pt == + IA_CSS_ISYS_PIN_TYPE_MIPI) || + (stream_cfg-> + input_pins[stream_cfg->output_pins[i].input_pin_id].mipi_store_mode != + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER), EINVAL); + if (stream_cfg->isl_use == IA_CSS_ISYS_USE_SINGLE_ISA) { + switch (stream_cfg->output_pins[i].pt) { + case IA_CSS_ISYS_PIN_TYPE_RAW_NS: + /* Ensure the PIFCONV cropped resolution + * matches the RAW_NS output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution matches + * the Non-scaled ISA output resolution before + * the PIFCONV cropping, since nothing can + * modify the resolution in that part of + * the pipe + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width, + EINVAL); + /* Ensure the Non-scaled ISA output resolution + * before the PIFCONV cropping bounds the + * RAW_NS pin output resolution since padding + * is not supported + */ + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height >= +stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width >= +stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + case IA_CSS_ISYS_PIN_TYPE_RAW_S: + /* Ensure the ScaledPIFCONV cropped resolution + * matches the RAW_S output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution bounds + * the Scaled ISA output resolution before the + * ScaledPIFCONV cropping, since only IDS can + * modify the resolution, and this only to + * make it smaller + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width, + EINVAL); + /* Ensure the Scaled ISA output resolution + * before the ScaledPIFCONV cropping bounds + * the RAW_S pin output resolution since + * padding is not supported + */ + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height >= + stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width >= + stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + default: + break; + } + } + } + + /* open 1 send queue/stream and a single receive queue + * if not existing + */ + retval = ia_css_syscom_send_port_open(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN; + retval = ia_css_isys_constr_fw_stream_cfg(ctx, stream_handle, + &stream_cfg_fw, &buf_stream_cfg_id, stream_cfg); + verifret(retval == 0, retval); + token.payload = stream_cfg_fw; + token.buf_handle = HOST_ADDRESS(buf_stream_cfg_id); + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_nof_output_pins[stream_handle] = + stream_cfg->nof_output_pins; + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_OPEN\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_close() - close virtual stream + */ +int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_CLOSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* close 1 send queue/stream and the single receive queue + * if none is using it + */ + retval = ia_css_syscom_send_port_close(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + ctx->stream_state_array[stream_handle] = IA_CSS_ISYS_STREAM_STATE_IDLE; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_CLOSE\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + */ +int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_START\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + if (next_frame != NULL) { + token.send_type = + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } else { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_START; + token.payload = 0; + token.buf_handle = 0; + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_STARTED; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_START\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + */ +int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_STOP\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_DEV_SEND_QUEUES)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_STOP; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_DEV_SEND_QUEUES), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_STOP\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + */ +int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_FLUSH\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_FLUSH\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_capture_indication() + * - captures "next frame" on stream_handle + */ +int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + *if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + verifret(next_frame != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + */ +int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct resp_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available( + ctx->sys, BASE_MSG_RECV_QUEUES); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer( + ctx->sys, BASE_MSG_RECV_QUEUES, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + retval = ia_css_isys_extract_fw_response( + ctx, &token, received_response); + verifret(retval == 0, retval); + + /* Printing received response information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_isys_resp_info(received_response); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + verifret(received_response->type < N_IA_CSS_ISYS_RESP_TYPE, EINVAL); + verifret(received_response->stream_handle < STREAM_ID_MAX, EINVAL); + + if (received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED) { + verifret(received_response->pin.addr != 0, EFAULT); + verifret(received_response->pin.out_buf_id != 0, EFAULT); + verifret(received_response->pin_id < + ctx->stream_nof_output_pins[received_response->stream_handle], + EINVAL); + } + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + return 0; +} + + +/** + * ia_css_isys_device_close() - close ISYS device + */ +static int isys_context_destroy(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int stream_handle; + unsigned int queue_id; + unsigned int nof_recv_queues; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_destroy\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + nof_recv_queues = ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + /* Close the ports for all the recv queues (MSG and PROXY) */ + for (queue_id = 0; queue_id < nof_recv_queues; queue_id++) { + retval = ia_css_syscom_recv_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Close the ports for PROXY send queue(s) */ + for (queue_id = 0; + queue_id < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + queue_id++) { + retval = ia_css_syscom_send_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + } + + retval = ia_css_syscom_close(ctx->sys); + verifret(retval == 0, EBUSY); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_destroy\n"); + + return 0; +} +/** + * ia_css_isys_device_close() - close ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_destroy(HANDLE context) +{ + return isys_context_destroy(context); +} + +void ia_css_isys_device_close(void) +{ + /* Created for legacy, nothing to perform here */ +} + +#else +int ia_css_isys_device_close(HANDLE context) +{ + return isys_context_destroy(context); +} +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + */ +int ia_css_isys_device_release(HANDLE context, unsigned int force) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_RELEASE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + retval = ia_css_syscom_release(ctx->sys, force); + verifret(retval == 0, EBUSY); + + /* If ia_css_isys_device_release called with force==1, this should + * happen after timeout, so no active transfers + * If ia_css_isys_device_release called with force==0, this should + * happen after SP has gone idle, so no active transfers + */ + ia_css_isys_force_unmap_comm_buff_queue(ctx); + ia_css_isys_destr_comm_buff_queue(ctx); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_IDLE; +#endif /* VERIFY_DEVSTATE */ + + ia_css_cpu_mem_free(ctx); + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_DEVICE_RELEASE\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_write_req() - send ISYS proxy write requests + */ +int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_send_queue_token token; + int packets; + int retval = 0; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + verifret(ctx, EFAULT); + verifret(write_req_val != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + token.request_id = write_req_val->request_id; + token.region_index = write_req_val->region_index; + token.offset = write_req_val->offset; + token.value = write_req_val->value; + + retval = ia_css_syscom_send_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_handle_write_response() - handle ISYS proxy responses + */ +int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_resp_queue_token token; + int retval = 0; + int packets; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + verifret(ctx, EFAULT); + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + + retval = ia_css_isys_extract_proxy_response(&token, received_response); + verifret(retval == 0, retval); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + + return 0; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.c new file mode 100644 index 000000000000..d6500a0cb605 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.c @@ -0,0 +1,379 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_isysapi.h" +#include "ia_css_isys_private.h" +#include "error_support.h" +#include "ia_css_syscom.h" + +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx) +{ + unsigned int i; + + verifret(ctx != NULL, EFAULT); + /* Print ctx->(ssid, mmid, dev_state) */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "Print ia_css_isys_context *ctx\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_3(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ssid = %d\n" + "\t\t\tia_css_isys_context->mmid = %d\n" + "\t\t\tia_css_isys_context->device_state = %d\n" + , ctx->ssid + , ctx->mmid + , ctx->dev_state); + /* Print ctx->(stream_state_array, stream_nof_output_pins) */ + for (i = 0; i < STREAM_ID_MAX; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_context->stream_state[i = %d] = %d\n" + "\t\t\tia_css_isys_context->stream_nof_output_pins[i = %d] = %d\n" + , i + , ctx->stream_state_array[i] + , i + , ctx->stream_nof_output_pins[i]); + } + /* Print ctx->ia_css_syscom_context */ + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ia_css_syscom_context = %p\n" + , (struct ia_css_syscom_context *)(ctx->sys)); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, + VERBOSE, + "Print ia_css_isys_device_cfg_data *config\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_7(ISYSAPI, + VERBOSE, + "\tia_css_isys_device_cfg_data->driver_sys.ssid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.mmid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_send_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_recv_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.send_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.recv_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_proxy.proxy_write_queue_size = %d\n", + config->driver_sys.ssid, + config->driver_sys.mmid, + config->driver_sys.num_send_queues, + config->driver_sys.num_recv_queues, + config->driver_sys.send_queue_size, + config->driver_sys.recv_queue_size, + config->driver_proxy.proxy_write_queue_size); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + unsigned int i; + + verifret(stream_cfg != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_stream_cfg_data stream_cfg\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_5(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isl_use = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_stream_source = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_mipi_vc = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_input_pins = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_output_pins = %d\n" + , stream_cfg->isl_use + , stream_cfg->src + , stream_cfg->vc + , stream_cfg->nof_input_pins + , stream_cfg->nof_output_pins); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->send_irq_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_irq_eof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_eof_discarded = %d\n" + , stream_cfg->send_irq_sof_discarded + , stream_cfg->send_irq_eof_discarded + , stream_cfg->send_resp_sof_discarded + , stream_cfg->send_resp_eof_discarded); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_data_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->input_pins[i].dt + , i + , stream_cfg->input_pins[i].input_res.width + , i + , stream_cfg->input_pins[i].input_res.height); + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_store_mode = %d\n" + , i + , stream_cfg->input_pins[i].mipi_store_mode); + } + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].top_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].left_offset = %d\n" + , i + , stream_cfg->crop[i].top_offset + , i + , stream_cfg->crop[i].left_offset); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].bottom_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].right_offset = %d\n" + , i + , stream_cfg->crop[i].bottom_offset + , i + , stream_cfg->crop[i].right_offset); + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_pin_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_frame_format_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].input_pin_id = %d\n" + , i + , stream_cfg->output_pins[i].pt + , i + , stream_cfg->output_pins[i].ft + , i + , stream_cfg->output_pins[i].input_pin_id); + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].watermark_in_lines = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].send_irq = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].stride = %d\n" + , i + , stream_cfg->output_pins[i].watermark_in_lines + , i + , stream_cfg->output_pins[i].send_irq + , i + , stream_cfg->output_pins[i].stride); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->output_pins[i].output_res.width + , i + , stream_cfg->output_pins[i].output_res.height); + } + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].height = %d\n" + , i + , stream_cfg->isa_cfg.isa_res[i].width + , i + , stream_cfg->isa_cfg.isa_res[i].height); + } + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.blc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.lsc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.dpc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.downscaler_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.awb_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.af_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ae_enabled = %d\n" + , stream_cfg->isa_cfg.blc_enabled + , stream_cfg->isa_cfg.lsc_enabled + , stream_cfg->isa_cfg.dpc_enabled + , stream_cfg->isa_cfg.downscaler_enabled + , stream_cfg->isa_cfg.awb_enabled + , stream_cfg->isa_cfg.af_enabled + , stream_cfg->isa_cfg.ae_enabled); + + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.paf_type = %d\n" + , stream_cfg->isa_cfg.paf_type); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins) +{ + unsigned int i; + + verifret(next_frame != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_frame_buff_set *next_frame\n" + "-------------------------------------------------------\n"); + for (i = 0; i < nof_output_pins; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_input_buffer_css_address = %08xu\n" + , i + , (unsigned long int) + next_frame->output_pins[i].out_buf_id + , i + , next_frame->output_pins[i].addr); + } + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->process_group_light.ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->process_group_light.ia_css_input_buffer_css_address = %08xu\n" + , (unsigned long int) + next_frame->process_group_light.param_buf_id + , next_frame->process_group_light.addr); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->send_irq_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_irq_eof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_eof = %d\n" + , (int) next_frame->send_irq_sof + , (int) next_frame->send_irq_eof + , (int) next_frame->send_resp_sof + , (int) next_frame->send_resp_eof); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response) +{ + verifret(received_response != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ISYS_RESPONSE_INFO\n" + "-------------------------------------------------------\n"); + switch (received_response->type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED\n"); + break; + default: + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = INVALID\n"); + break; + } + + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.type = %d\n" + "\t\t\tia_css_isys_resp_info.stream_handle = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[0] = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[1] = %d\n", + received_response->type, + received_response->stream_handle, + received_response->timestamp[0], + received_response->timestamp[1]); + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.error = %d\n" + "\t\t\tia_css_isys_resp_info.error_details = %d\n" + "\t\t\tia_css_isys_resp_info.pin.out_buf_id = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin.addr = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin_id = %d\n" + "\t\t\tia_css_isys_resp_info.frame_counter = %d\n," + "\t\t\tia_css_isys_resp_info.written_direct = %d\n", + received_response->error, + received_response->error_details, + (unsigned long long)received_response->pin.out_buf_id, + (unsigned long long)received_response->pin.addr, + received_response->pin_id, + received_response->frame_counter, + received_response->written_direct); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "------------------------------------------------------\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.h new file mode 100644 index 000000000000..5b6508058fd6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isys_public_trace.h @@ -0,0 +1,55 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PUBLIC_TRACE_H +#define __IA_CSS_ISYS_PUBLIC_TRACE_H + +#include "ia_css_isysapi_trace.h" + +#include "ia_css_isysapi_types.h" + +#include "ia_css_isysapi.h" + +#include "ia_css_isys_private.h" +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx); + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config); +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg); +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins); +/** + * print_isys_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response); + +#endif /* __IA_CSS_ISYS_PUBLIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isysapi_trace.h new file mode 100644 index 000000000000..c6b944f245b1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/isysapi/src/ia_css_isysapi_trace.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TRACE_H +#define __IA_CSS_ISYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define ISYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define ISYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define ISYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* ISYSAPI and all the submodules in ISYSAPI will have + * the default tracing level set to this level + */ +#define ISYSAPI_TRACE_CONFIG_DEFAULT ISYSAPI_TRACE_LOG_LEVEL_NORMAL + +/* In case ISYSAPI_TRACE_CONFIG is not defined, set it to default level */ +#if !defined(ISYSAPI_TRACE_CONFIG) + #define ISYSAPI_TRACE_CONFIG ISYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* ISYSAPI Module tracing backend is mapped to + * TUNIT tracing for target platforms + */ +#ifdef IA_CSS_TRACE_PLATFORM_CELL + #ifndef HRT_CSIM + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE + #else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #endif +#else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(ISYSAPI_TRACE_CONFIG)) + /* TRACE_OFF */ + #if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_OFF + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_NORMAL */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_NORMAL + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_DEBUG */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No ISYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "ISYSAPI_TRACE_CONFIG not defined" +#endif + +#endif /* __IA_CSS_ISYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 000000000000..6bc2fa708d43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,100 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 000000000000..2e45eaa52727 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 000000000000..27e87d1e6774 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 000000000000..ec0ee18b41e1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/pkg_dir.mk new file mode 100644 index 000000000000..a4b4aaa4995e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/pkg_dir.mk @@ -0,0 +1,30 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 000000000000..348b56833e06 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 000000000000..ca5564c7d990 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,202 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 000000000000..3a50245261e5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,50 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/port_env_struct.h new file mode 100644 index 000000000000..4d39a4739a8b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue.h new file mode 100644 index 000000000000..b233ab3baf01 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue_struct.h new file mode 100644 index 000000000000..ef48fcfded2b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port.h new file mode 100644 index 000000000000..cce253b26668 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port_struct.h new file mode 100644 index 000000000000..52ec563b13cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port.h new file mode 100644 index 000000000000..04a160f3f019 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port_struct.h new file mode 100644 index 000000000000..f834c62bc3db --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/port.mk new file mode 100644 index 000000000000..b3801247802e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/queue.c new file mode 100644 index 000000000000..eeec99dfe2d0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/recv_port.c new file mode 100644 index 000000000000..31b36e9ceafb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/send_port.c new file mode 100644 index 000000000000..8d1fba08c5d5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 000000000000..c51d65c8cb64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 000000000000..5dd23ddbd180 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/isys/bxtB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 000000000000..dab9c669c182 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +#ifdef USE_SSID_BUTTRESS + * seen from the host; these addresses already contain the ISYS or PSYS offset. + */ +#define REG_DUMP_READ_REGISTER(addr)\ + vied_subsystem_load_32(IPU_DEVICE_BUTTRESS, addr) +#else +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) +#endif + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/interface/regmem_access.h new file mode 100644 index 000000000000..d4576af936f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/regmem.mk new file mode 100644 index 000000000000..24ebc1c325d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_access_host.h new file mode 100644 index 000000000000..8878d7074fab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_const.h new file mode 100644 index 000000000000..ac7e3a98a434 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/assert_support.h new file mode 100644 index 000000000000..28aed19409b9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/assert_support.h @@ -0,0 +1,200 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + * see also: + * http://www.klocwork.com/products/documentation/current/ + * Tuning_C/C%2B%2B_analysis#Assertions + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/cpu_mem_support.h new file mode 100644 index 000000000000..fa349cac4b24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/error_support.h new file mode 100644 index 000000000000..9fe1f65125e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/math_support.h new file mode 100644 index 000000000000..633f86f1a1b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/misc_support.h new file mode 100644 index 000000000000..a2c2729e946d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/platform_support.h new file mode 100644 index 000000000000..1752efc7b4df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/print_support.h new file mode 100644 index 000000000000..0b614f7ef12d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/storage_class.h new file mode 100644 index 000000000000..af19b4026220 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/type_support.h new file mode 100644 index 000000000000..a86da0e78941 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom.h new file mode 100644 index 000000000000..5426d6d18e0b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 000000000000..8c827c2ba395 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,98 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 000000000000..1a0191d37102 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,52 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom.c new file mode 100644 index 000000000000..cdf9df0531ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 000000000000..0cacd5a34934 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_context.h new file mode 100644 index 000000000000..ecf22f6b7ac5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/syscom.mk new file mode 100644 index 000000000000..8d36b8928af5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/interface/ia_css_trace.h new file mode 100644 index 000000000000..b85b1810f107 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/trace.mk new file mode 100644 index 000000000000..b232880b882b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/utils/system_defs/system_const.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/utils/system_defs/system_const.h new file mode 100644 index 000000000000..161f28fced97 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/utils/system_defs/system_const.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SYSTEM_CONST_H +#define __SYSTEM_CONST_H + +/* The values included in this file should have been + * taken from system/device properties which + * are not currently available in SDK + */ + +#define XMEM_WIDTH (512) +#define MG_PPC (4) + +#endif /* __SYSTEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_access.h new file mode 100755 index 000000000000..1e81bad9f4ee --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_map.h new file mode 100755 index 000000000000..1bbedcf9e7fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_config.h new file mode 100755 index 000000000000..912f016ead24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_memory_access_types.h new file mode 100755 index 000000000000..0b44492789e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access.h new file mode 100755 index 000000000000..674f5fb5b0f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_initialization.h new file mode 100755 index 000000000000..81f4d08d5ae0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_types.h new file mode 100755 index 000000000000..75fef6c4ddba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_types.h new file mode 100755 index 000000000000..0acfdbb00cfa --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/Makefile b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/Makefile new file mode 100644 index 000000000000..2321e29ccb14 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/Makefile @@ -0,0 +1,50 @@ +# +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +include $(srcpath)/$(src)/../Makefile.ipu4psys_src +include $(srcpath)/$(src)/../Makefile.ipu4psys_inc + +SSID = 0 +MMID = 0 + +IPU_PSYSLIB_ROOT_REL = lib +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_PSYSLIB_ROOT_REL) + +ccflags-y += -I$(srcpath)/$(src)/../../../ +ccflags-y += -I$(srcpath)/$(src)/../../ +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DHAS_LATE_BINDING_SUPPORT=0 -DIPU_PSYS_LEGACY + +IPU_PSYSLIB_SRC += libcsspsys2600.o + +#CFLAGS = -W -Wall -Wstrict-prototypes -Wmissing-prototypes -O2 -fomit-frame-pointer -Wno-unused-variable +HOST_DEFINES += -DSSID=$(SSID) +HOST_DEFINES += -DMMID=$(MMID) +HOST_DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=$(SSID) +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +HOST_DEFINES += -DHRT_USE_VIR_ADDRS +HOST_DEFINES += -DHRT_HW +HOST_DEFINES += -DVIED_NCI_TUNIT_PSYS +HOST_DEFINES += -DFIRMWARE_RELEASE_VERSION +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DAPI_SPLIT_START_STATE_UPDATE + +intel-ipu4-psys-csslib-objs := ../../../ipu-wrapper.o \ + $(IPU_PSYSLIB_SRC) + +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4-psys-csslib.o +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) -fno-common -v + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/DSS_V2_program_group/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/DSS_V2_program_group/ia_css_fw_pkg_release.h new file mode 100644 index 000000000000..cb20a688b7c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/DSS_V2_program_group/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20180615 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/buffer.mk new file mode 100644 index 000000000000..c00a1133b440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_access.h new file mode 100644 index 000000000000..e5fe647742c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_type.h new file mode 100644 index 000000000000..de51f2394158 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 000000000000..2530297e8e36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,25 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 000000000000..b8e7a6ac4648 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 000000000000..d3d01353ce43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 000000000000..a8c0f9e8554e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,31 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 000000000000..0299fc3b7eb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 000000000000..558ec679f98a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 000000000000..ff62914f99dc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c new file mode 100644 index 000000000000..83cbda5a9ff5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 000000000000..3828b186ddac --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 000000000000..0f99a06e9a89 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 000000000000..2bd754062a0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,185 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 000000000000..892dcbd49825 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,182 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 000000000000..1041bd07721b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/cell.mk new file mode 100644 index 000000000000..fa5e65022601 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/interface/ia_css_cell.h new file mode 100644 index 000000000000..3fac3c791b6e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h new file mode 100644 index 000000000000..60b2e234da1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h new file mode 100644 index 000000000000..e8b0a48b27e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h @@ -0,0 +1,60 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_H +#define __IA_CSS_CLIENT_PKG_H + +#include "type_support.h" +#include "ia_css_client_pkg_storage_class.h" +/* for ia_css_client_pkg_header_s (ptr only), ia_css_client_pkg_t */ +#include "ia_css_client_pkg_types.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size); + +#ifdef __INLINE_CLIENT_PKG__ +#include "ia_css_client_pkg_impl.h" +#endif + +#endif /* __IA_CSS_CLIENT_PKG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h new file mode 100644 index 000000000000..98af98d5d824 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +#define __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_CLIENT_PKG__ +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +#else +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h new file mode 100644 index 000000000000..ff5bf01358f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h @@ -0,0 +1,44 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_TYPES_H +#define __IA_CSS_CLIENT_PKG_TYPES_H + +#include "type_support.h" + +typedef void ia_css_client_pkg_t; + +struct ia_css_client_pkg_header_s { + uint32_t prog_list_offset; + uint32_t prog_list_size; + uint32_t prog_desc_offset; + uint32_t prog_desc_size; + uint32_t pg_manifest_offset; + uint32_t pg_manifest_size; + uint32_t prog_bin_offset; + uint32_t prog_bin_size; +}; + +struct ia_css_client_pkg_prog_s { + uint32_t prog_id; + uint32_t prog_offset; + uint32_t prog_size; +}; + +struct ia_css_client_pkg_prog_list_s { + uint32_t prog_desc_count; + uint32_t prog_bin_count; +}; + +#endif /* __IA_CSS_CLIENT_PKG_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c new file mode 100644 index 000000000000..0b2fd86d09f3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_CLIENT_PKG__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_client_pkg_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_CLIENT_PKG__ */ +#include "ia_css_client_pkg_impl.h" +#endif /* __INLINE_CLIENT_PKG__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h new file mode 100644 index 000000000000..b79e5de02b89 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h @@ -0,0 +1,161 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_IMPL_H +#define __IA_CSS_CLIENT_PKG_IMPL_H + +#include "ia_css_client_pkg.h" +#include "ia_css_client_pkg_types.h" +#include "error_support.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->pg_manifest_offset; + *(size) = client_pkg_header->pg_manifest_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_list_offset; + *(size) = client_pkg_header->prog_list_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_desc_offset; + *(size) = client_pkg_header->prog_desc_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size) +{ + uint8_t i; + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_bin_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_bin_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + pkg_prog_bin_entry += pkg_prog_list->prog_desc_count; + + for (i = 0; i < pkg_prog_list->prog_bin_count; i++) { + if (program_id == pkg_prog_bin_entry->prog_id) { + *(offset) = pkg_prog_bin_entry->prog_offset; + *(size) = pkg_prog_bin_entry->prog_size; + ret_val = 0; + break; + } else if (0 == pkg_prog_bin_entry->prog_size) { + /* We can have a variable number of program descriptors. + * The first non-valid one will have size set to 0 + */ + break; + } + pkg_prog_bin_entry++; + } +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_desc_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_desc_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + + verifjmpexit(program_index < pkg_prog_list->prog_desc_count); + verifjmpexit(program_id == pkg_prog_desc_entry[program_index].prog_id); + verifjmpexit(pkg_prog_desc_entry[program_index].prog_size > 0); + *(offset) = pkg_prog_desc_entry[program_index].prog_offset; + *(size) = pkg_prog_desc_entry[program_index].prog_size; + ret_val = 0; + +EXIT: + return ret_val; +} + +#endif /* __IA_CSS_CLIENT_PKG_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/psys/subsystem_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/psys/subsystem_bxtB0.mk new file mode 100644 index 000000000000..2f60853f0089 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/psys/subsystem_bxtB0.mk @@ -0,0 +1,109 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of PSYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +# Activate loading params and storing stats DDR<->REGs with DMA +PSYS_USE_ISA_DMA = 1 + +# Used in ISA module +PSYS_ISL_DPC_DPC_V2 = 0 + +# Assume OFS will be running concurrently with IPF, and prioritize according to rates of services on devproxy +CONCURRENT_OFS_IPF_PRIORITY_OPTIMIZATION_ENABLED = 1 + +# Use the DMA for terminal loading in Psys server +PSYS_SERVER_ENABLE_TERMINAL_LOAD_DMA = 1 + +HAS_GMEM = 1 +# use DMA NCI for OFS Service to reduce load in tproxy +DMA_NCI_IN_OFS_SERVICE = 1 + +# See HSD 1805169230 +HAS_FWDMA_ALIGNMENT_ISSUE_SIGHTING = 1 + +HAS_SPC = 1 +HAS_SPP0 = 1 +HAS_SPP1 = 1 +HAS_ISP0 = 1 +HAS_ISP1 = 1 +HAS_ISP2 = 1 +HAS_ISP3 = 1 + +# Specification for Psys server's fixed globals' locations +REGMEM_OFFSET = 0 # Starting from 0 +REGMEM_SIZE = 18 +REGMEM_WORD_BYTES = 4 +REGMEM_SIZE_BYTES = 72 +GPC_ISP_PERF_DATA_OFFSET = 72 # Taken from REGMEM_OFFSET + REGMEM_SIZE_BYTES +GPC_ISP_PERF_DATA_SIZE_BYTES = 80 +FW_LOAD_NO_OF_REQUEST_OFFSET = 152 # Taken from GPC_ISP_PERF_DATA_OFFSET + GPC_ISP_PERF_DATA_SIZE_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 +DISPATCHER_SCRATCH_SPACE_OFFSET = 156 # Taken from FW_LOAD_NO_OF_REQUEST_OFFSET + FW_LOAD_NO_OF_REQUEST_SIZE_BYTES + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PSYS_SERVER_MANIFEST_VERSION = bxtB0 +PSYS_RESOURCE_MODEL_VERSION = bxtB0 +PSYS_ACCESS_BLOCKER_VERSION = v1 + +# Disable support for PPG protocol to save codesize +PSYS_HAS_PPG_SUPPORT = 0 +# Disable support for late binding +PSYS_HAS_LATE_BINDING_SUPPORT = 0 + +# Specify PSYS server context spaces for caching context from DDR +PSYS_SERVER_NOF_CACHES = 4 +PSYS_SERVER_MAX_NUM_PROC_GRP = $(PSYS_SERVER_NOF_CACHES) +PSYS_SERVER_MAX_NUM_EXEC_PROC_GRP = 8 # Max PG's running, 4 running on Cores, 4 being updated on the host upon executing. +PSYS_SERVER_MAX_PROC_GRP_SIZE = 4052 +PSYS_SERVER_MAX_MANIFEST_SIZE = 3732 +PSYS_SERVER_MAX_CLIENT_PKG_SIZE = 2420 +PSYS_SERVER_MAX_BUFFER_SET_SIZE = 0 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS = 88 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS = 1 +# The caching scheme for this subsystem suits the method of queueing ahead separate PGs for frames in an interleaved +# fashion. As such there should be as many caches to support to heaviest two concurrent PGs, times two. This results +# in the following distribution of caches: two large ones for the maximum sized PG, two smaller ones for the +# second-largest sized PG. +PSYS_SERVER_CACHE_0_PROC_GRP_SIZE = $(PSYS_SERVER_MAX_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_0_MANIFEST_SIZE = $(PSYS_SERVER_MAX_MANIFEST_SIZE) +PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE = $(PSYS_SERVER_MAX_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE = $(PSYS_SERVER_MAX_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_1_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_0_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_1_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_0_MANIFEST_SIZE) +PSYS_SERVER_CACHE_1_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_1_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_2_PROC_GRP_SIZE = 1800 +PSYS_SERVER_CACHE_2_MANIFEST_SIZE = 2344 +PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE = 1240 +PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE = 0 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS = 45 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) + +PSYS_SERVER_CACHE_3_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_2_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_3_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_2_MANIFEST_SIZE) +PSYS_SERVER_CACHE_3_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_3_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/system_bxtB0.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/system_bxtB0.mk new file mode 100644 index 000000000000..24d079b40516 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/config/system_bxtB0.mk @@ -0,0 +1,88 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = css_broxton_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +SP_FP_CELL = sp2601_fp +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA without PAF and DPC-Pext support for IPU4-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 0 +HAS_DPC_PEXT_IN_ISYS_ISL = 0 +HAS_PMA_IF = 0 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v1 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v1 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v2 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = BXT_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v1 + +HAS_ACC_CLUSTER_PAF_PAL = 0 +HAS_ACC_CLUSTER_PEXT_PAL = 0 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = bxtB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk new file mode 100644 index 000000000000..8ecc3e42e55d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk @@ -0,0 +1,28 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + +# MODULE is cpd/cpd_component + +CPD_DIR = $${MODULES_DIR}/cpd +CPD_COMPONENT_DIR = $${MODULES_DIR}/cpd/cpd_component +CPD_COMPONENT_INTERFACE = $(CPD_COMPONENT_DIR)/interface +CPD_COMPONENT_SOURCES = $(CPD_COMPONENT_DIR)/src + +CPD_COMPONENT_FILES = $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component_create.c +CPD_COMPONENT_FILES += $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component.c +CPD_COMPONENT_CPPFLAGS = -I$(CPD_COMPONENT_INTERFACE) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_COMPONENT_SOURCES) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h new file mode 100644 index 000000000000..7ad3070b2fd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h @@ -0,0 +1,90 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_COMPONENT_TYPES_H +#define __IA_CSS_CPD_COMPONENT_TYPES_H + +/** @file + * This file contains datastructure related to generation of CPD file + */ + +#include "type_support.h" + +#define SIZE_OF_FW_ARCH_VERSION 7 +#define SIZE_OF_SYSTEM_VERSION 11 +#define SIZE_OF_COMPONENT_NAME 12 + +enum ia_css_cpd_component_endianness { + IA_CSSCPD_COMP_ENDIAN_RSVD, + IA_CSS_CPD_COMP_LITTLE_ENDIAN, + IA_CSS_CPD_COMP_BIG_ENDIAN +}; + +/** Module Data (components) Header + * Following data structure has been created using FAS section 5.25 + * Open : Should we add padding at the end of module directory + * (the component must be 512 aligned) + */ +typedef struct { + uint32_t header_size; + /**< Specifies endianness of the binary data */ + unsigned int endianness; + /**< fw_pkg_date is current date stored in 'binary decimal' + * representation e.g. 538248729 (0x20150619) + */ + uint32_t fw_pkg_date; + /**< hive_sdk_date is date of HIVE_SDK stored in + * 'binary decimal' representation + */ + uint32_t hive_sdk_date; + /**< compiler_date is date of ptools stored in + * 'binary decimal' representation + */ + uint32_t compiler_date; + /**< UNSCHED / SCHED / TARGET / CRUN */ + unsigned int target_platform_type; + /**< specifies the system version stored as string + * e.g. BXTB0_IPU4'\0' + */ + uint8_t system_version[SIZE_OF_SYSTEM_VERSION]; + /**< specifies fw architecture version e.g. for BXT CSS3.0'\0' */ + uint8_t fw_arch_version[SIZE_OF_FW_ARCH_VERSION]; + uint8_t rsvd[2]; +} ia_css_header_component_t; + +/** Module Data Directory = Directory Header + Directory Entry (0..n) + * Following two Data Structure has been taken from CSE Storage FAS (CPD desgin) + * Module Data Directory Header + */ +typedef struct { + uint32_t header_marker; + uint32_t number_of_entries; + uint8_t header_version; + uint8_t entry_version; + uint8_t header_length; /**< 0x10 (16) Fixed for this version*/ + uint8_t checksum; + uint32_t partition_name; +} ia_css_directory_header_component_t; + +/** Module Date Directory Entry + */ +typedef struct { + /**< character string describing the component name */ + uint8_t entry_name[SIZE_OF_COMPONENT_NAME]; + uint32_t offset; + uint32_t length; + uint32_t rsvd; /**< Must be 0 */ +} ia_css_directory_entry_component_t; + +#endif /* __IA_CSS_CPD_COMPONENT_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk new file mode 100644 index 000000000000..ac78815dfbd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk @@ -0,0 +1,29 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + + +# MODULE is CPD UTL (Metadata File Extension) + +CPD_DIR = $${MODULES_DIR}/cpd/ +CPD_METADATA_DIR = $${MODULES_DIR}/cpd/cpd_metadata +CPD_METADATA_INTERFACE = $(CPD_METADATA_DIR)/interface +CPD_METADATA_SOURCES = $(CPD_METADATA_DIR)/src + +CPD_METADATA_FILES = $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata_create.c +CPD_METADATA_FILES += $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata.c +CPD_METADATA_CPPFLAGS = -I$(CPD_METADATA_INTERFACE) \ + -I$(CPD_METADATA_SOURCES) \ + -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h new file mode 100644 index 000000000000..a88c6aede08c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_METADATA_TYPES_H +#define __IA_CSS_CPD_METADATA_TYPES_H + +/** @file + * This file contains data structures related to generation of + * metadata file extension + */ +#include + +/* As per v0.2 manifest document + * Header = Extension Type (4) + Extension Length (4) + + * iUnit Image Type (4) + Reserved (16) + */ +#define IPU_METADATA_HEADER_RSVD_SIZE 16 +#define IPU_METADATA_HEADER_FIELDS_SIZE 12 +#define IPU_METADATA_HEADER_SIZE \ + (IPU_METADATA_HEADER_FIELDS_SIZE + IPU_METADATA_HEADER_RSVD_SIZE) + +/* iUnit metadata extension tpye value */ +#define IPU_METADATA_EXTENSION_TYPE 16 + +/* Unique id for level 0 bootloader component */ +#define IA_CSS_IUNIT_BTLDR_ID 0 +/* Unique id for psys server program group component */ +#define IA_CSS_IUNIT_PSYS_SERVER_ID 1 +/* Unique id for isys server program group component */ +#define IA_CSS_IUNIT_ISYS_SERVER_ID 2 +/* Initial Identifier for client program group component */ +#define IA_CSS_IUNIT_CLIENT_ID 3 + +/* Use this to parse date from release version from the iUnit component + * e.g. 20150701 + */ +#define IA_CSS_IUNIT_COMP_DATE_SIZE 8 +/* offset of release version in program group binary + * e.g. release_version = "scci_gerrit_20150716_2117" + * In cpd file we only use date/version for the component + */ +#define IA_CSS_IUNIT_DATE_OFFSET 12 + +#define IPU_METADATA_HASH_KEY_SIZE 32 +#define IPU_METADATA_ATTRIBUTE_SIZE 16 +#define IA_CSE_METADATA_COMPONENT_ID_MAX 127 + +typedef enum { + IA_CSS_CPD_METADATA_IMAGE_TYPE_RESERVED, + IA_CSS_CPD_METADATA_IMAGE_TYPE_BOOTLOADER, + IA_CSS_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE +} ia_css_cpd_metadata_image_type_t; + +typedef enum { + IA_CSS_CPD_MAIN_FW_TYPE_RESERVED, + IA_CSS_CPD_MAIN_FW_TYPE_PSYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_ISYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_CLIENT +} ia_css_cpd_iunit_main_fw_type_t; + +/** Data structure for component specific information + * Following data structure has been taken from CSE Manifest v0.2 + */ +typedef struct { + /**< Component ID - unique for each component */ + uint32_t id; + /**< Size of the components */ + uint32_t size; + /**< Version/date of when the components is being generated/created */ + uint32_t version; + /**< SHA 256 Hash Key for component */ + uint8_t sha2_hash[IPU_METADATA_HASH_KEY_SIZE]; + /**< component sp entry point + * - Only valid for btldr/psys/isys server component + */ + uint32_t entry_point; + /**< component icache base address + * - Only valid for btldr/psys/isys server component + */ + uint32_t icache_base_offset; + /**< Resevred - must be 0 */ + uint8_t attributes[IPU_METADATA_ATTRIBUTE_SIZE]; +} ia_css_cpd_metadata_component_t; + +/** Data structure for Metadata File Extension Header + */ +typedef struct { + /**< Specifies the binary image type + * - could be bootloader or main firmware + */ + ia_css_cpd_metadata_image_type_t image_type; + /**< Number of components available in metadata file extension + * (For btldr always 1) + */ + uint32_t component_count; + /**< Component specific information */ + ia_css_cpd_metadata_component_t *components; +} ia_css_cpd_metadata_desc_t; + +#endif /* __IA_CSS_CPD_METADATA_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/device_access.mk new file mode 100644 index 000000000000..1629d9af803b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h new file mode 100644 index 000000000000..3dc47c29fcab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h new file mode 100644 index 000000000000..de2b94d8af54 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 000000000000..57aab3323c73 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h new file mode 100644 index 000000000000..22799e67214c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 000000000000..adc178b75059 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h new file mode 100644 index 000000000000..d94991fc1114 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 000000000000..5102f6e44d2f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/bxtB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 000000000000..e6e1e9dcbe80 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 000000000000..481b0504a237 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 000000000000..63397dc0b7fe --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 000000000000..72caed3eef0c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h new file mode 100644 index 000000000000..fd0c5a586c94 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_H +#define __IPU_DEVICE_GP_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_gp_properties_types.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_gp_mux_addr(const unsigned int device_id, const unsigned int mux_id); + +#include "ipu_device_gp_properties_func.h" + +#endif /* __IPU_DEVICE_GP_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h new file mode 100644 index 000000000000..b57a2fc3f428 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h @@ -0,0 +1,207 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_TYPES_H +#define __IPU_DEVICE_GP_PROPERTIES_TYPES_H + +enum ipu_device_gp_isa_value { + /* ISA_MUX_SEL options */ + IPU_DEVICE_GP_ISA_MUX_SEL_ICA = 0, /* Enable output after FF ICA */ + IPU_DEVICE_GP_ISA_MUX_SEL_LSC = 1, /* Enable output after FF LSC */ + IPU_DEVICE_GP_ISA_MUX_SEL_DPC = 2, /* Enable output after FF DPC */ + /* ICA stream block options */ + /* UNBLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_UNBLOCK = 0, + /* BLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_BLOCK = 1, + /* LSC stream block options */ + /* UNBLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_UNBLOCK = 0, + /* BLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_BLOCK = 1, + /* DPC stream block options */ + /* UNBLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_UNBLOCK = 0, + /* BLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_BLOCK = 1, + /* Defines needed only for bxtB0 */ + /* ISA_AWB_MUX_SEL options */ + /* Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_ICA = 0, + /* DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_DPC = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_UNBLOCK = 0, + /* BLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_BLOCK = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_UNBLOCK = 0, + /* BLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_BLOCK = 1, + + /* PAF STRM options */ + /* Disable streaming to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_DISABLE_STREAM = 0, + /* Enable stream0 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM0 = 1, + /* Enable stream1 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM1 = 2, + /* PAF SRC SEL options */ + /* External channel input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL0 = 0, + /* DPC extracted input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL1 = 1, + /* PAF_GDDPC_BLK options */ + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK0 = 0, + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK1 = 1, + /* PAF ISA STR_PORT options */ + IPU_DEVICE_GP_ISA_PAF_STR_PORT0 = 0, + IPU_DEVICE_GP_ISA_PAF_STR_PORT1 = 1, + + /* scaler port block options */ + IPU_DEVICE_GP_ISA_SCALER_PORT_UNBLOCK = 0, + IPU_DEVICE_GP_ISA_SCALER_PORT_BLOCK = 1, + + IPU_DEVICE_GP_ISA_STATIC_SCALED_OUT_DEMUX_SEL_S2V = 0, + IPU_DEVICE_GP_ISA_STATIC_SCALED_OUT_DEMUX_SEL_PSA = 1, + + /* Muxes/demuxes */ + /* 0 - to ISL.S2V; 1 - to PSA */ + IPU_DEVICE_GP_ISA_STATIC_ISA_ORIG_OUT_DEMUX_SEL_S2V = 0, + IPU_DEVICE_GP_ISA_STATIC_ISA_ORIG_OUT_DEMUX_SEL_PSA = 1, + /* 0 - to ISL.S2V; 1 - to PSA */ + IPU_DEVICE_GP_ISA_STATIC_ISA_SCALED_A_OUT_DEMUX_SEL_S2V = 0, + IPU_DEVICE_GP_ISA_STATIC_ISA_SCALED_A_OUT_DEMUX_SEL_PSA = 1, + /* 0 - Input Correction input. 1 - B2B mux input */ + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_SEL_INPUT_CORR = 0, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_SEL_B2B = 1, + /* 0 - to Lsc , 1 - to Dpc , 2 - to X2b */ + IPU_DEVICE_GP_ISA_STATIC_ISA_INPUT_CORR_DEMUX_SEL_LSC = 0, + IPU_DEVICE_GP_ISA_STATIC_ISA_INPUT_CORR_DEMUX_SEL_DPC = 1, + IPU_DEVICE_GP_ISA_STATIC_ISA_INPUT_CORR_DEMUX_SEL_X2B = 2, + /* 0 - Input correction, 1 - Dpc , 2 - X2b */ + IPU_DEVICE_GP_ISA_STATIC_LSC_MUX_SEL_INPUT_CORR = 0, + IPU_DEVICE_GP_ISA_STATIC_LSC_MUX_SEL_DPC = 1, + IPU_DEVICE_GP_ISA_STATIC_LSC_MUX_SEL_X2B = 2, + /* 0 - to B2b, 1 - to Dpc , 2 - X2b*/ + IPU_DEVICE_GP_ISA_STATIC_LSC_DEMUX_SEL_B2B = 0, + IPU_DEVICE_GP_ISA_STATIC_LSC_DEMUX_SEL_DPC = 1, + IPU_DEVICE_GP_ISA_STATIC_LSC_DEMUX_SEL_X2B = 2, + /* 0 - Lsc, 1 - Input correction , 2 - X2b */ + IPU_DEVICE_GP_ISA_STATIC_DPC_MUX_SEL_LSC = 0, + IPU_DEVICE_GP_ISA_STATIC_DPC_MUX_SEL_INPUT_CORR = 1, + IPU_DEVICE_GP_ISA_STATIC_DPC_MUX_SEL_X2B = 2, + /* 0 - to Lsc, 1 - to B2b , 2 - to X2b */ + IPU_DEVICE_GP_ISA_STATIC_DPC_DEMUX_SEL_LSC = 0, + IPU_DEVICE_GP_ISA_STATIC_DPC_DEMUX_SEL_B2B = 1, + IPU_DEVICE_GP_ISA_STATIC_DPC_DEMUX_SEL_X2B = 2, + /* 0 - Lsc, 1 - X2b, 2 - Input correction */ + IPU_DEVICE_GP_ISA_STATIC_X2B_MUX_SEL_LSC = 0, + IPU_DEVICE_GP_ISA_STATIC_X2B_MUX_SEL_X2B = 1, + IPU_DEVICE_GP_ISA_STATIC_X2B_MUX_SEL_INPUT_CORR = 2, + /* 0 - Through X2B S2V RGBIR, 1 - Bypass */ + IPU_DEVICE_GP_ISA_STATIC_SVE_RGBIR_BP_MUX_DEMUX_SEL_SVE_RGBIR = 0, + IPU_DEVICE_GP_ISA_STATIC_SVE_RGBIR_BP_MUX_DEMUX_SEL_BYPASS = 1, + /* 0 - X2B SVE RGBIR, 1- X2B MD */ + IPU_DEVICE_GP_ISA_STATIC_IR_DEPTH_MUX_SEL_SVE_RGBIR = 0, + IPU_DEVICE_GP_ISA_STATIC_IR_DEPTH_MUX_SEL_MD = 1, + /* 0 - to Lsc, 1 - to Dpc, 2 - to B2b */ + IPU_DEVICE_GP_ISA_STATIC_X2B_DEMUX_SEL_LSC = 0, + IPU_DEVICE_GP_ISA_STATIC_X2B_DEMUX_SEL_DPC = 1, + IPU_DEVICE_GP_ISA_STATIC_X2B_DEMUX_SEL_B2B = 2, + /* 0 - Lsc, 1 - Dpc , 2 - X2b */ + IPU_DEVICE_GP_ISA_STATIC_B2B_MUX_SEL_LSC = 0, + IPU_DEVICE_GP_ISA_STATIC_B2B_MUX_SEL_DPC = 1, + IPU_DEVICE_GP_ISA_STATIC_B2B_MUX_SEL_X2B = 2, + /* 0 - External PAF CH0/1; 1 - DPC extracted PAF CH0/1; 2 - X2B extracted PAF CH0/ Black Box CH1 */ + IPU_DEVICE_GP_ISA_STATIC_PAF_SRC_SEL_EXT_PAF = 0, + IPU_DEVICE_GP_ISA_STATIC_PAF_SRC_SEL_DPC = 1, + IPU_DEVICE_GP_ISA_STATIC_PAF_SRC_SEL_X2B = 2, + /* 0 - from R2I; 1 - from B2B - TODO remove after SDK 0.5 as it was removed from design */ + IPU_DEVICE_GP_ISA_STATIC_ISA_ORIG_OUT_MUX_SEL_R2I = 0, + IPU_DEVICE_GP_ISA_STATIC_ISA_ORIG_OUT_MUX_SEL_B2B = 1, + /* Blockers */ + IPU_DEVICE_GP_ISA_STATIC_PORT_BLK_UNBLOCK = 0, + IPU_DEVICE_GP_ISA_STATIC_PORT_BLK_BLOCK = 1, + + + /* sis port block options */ + IPU_DEVICE_GP_ISA_SIS_PORT_UNBLOCK = 0, + IPU_DEVICE_GP_ISA_SIS_PORT_BLOCK = 1, + IPU_DEVICE_GP_ISA_CONF_INVALID = 0xFF +}; + +enum ipu_device_gp_psa_value { + /* Defines needed for bxtB0 */ + /* PSA_STILLS_MODE_MUX */ + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_WO_DM = 0, + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_W_DM = 1, + /* PSA_ACM_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_ACM = 0, + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_S2V = 1, + /* PSA_S2V_RGB_F_MUX */ + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_ACM = 0, + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_DM_OR_SPLITTER = 1, + /* PSA_V2S_RGB_4_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_GTM = 0, + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_ACM = 1, + * fixed function MAS. Choose between pixel stream and + * delta stream as BNLM output (gpreg 1) + */ + IPU_DEVICE_GP_PSA_1_NOISE_MUX_BNLM_PIXELS = 0, + IPU_DEVICE_GP_PSA_1_NOISE_MUX_DELTA_STREAM = 1, + /* enable/disable BNLM Pixel Block (gpreg 2) */ + IPU_DEVICE_GP_PSA_1_BNLM_PIXEL_STREAM_BLOCK_DISABLE = 0, + IPU_DEVICE_GP_PSA_1_BNLM_PIXEL_STREAM_BLOCK_ENABLE = 1, + /* enable/disable BNLM delta stream (gpreg 3) */ + IPU_DEVICE_GP_PSA_1_BNLM_DELTA_STREAM_BLOCK_DISABLE = 0, + IPU_DEVICE_GP_PSA_1_BNLM_DELTA_STREAM_BLOCK_ENABLE = 1, + /* choose BNLM output to XNR or to WB/DM (gpreg 0) */ + IPU_DEVICE_GP_PSA_2_BNLM_TO_XNR = 0, + IPU_DEVICE_GP_PSA_2_BNLM_TO_WB_DM = 1, + /* choose direction of output from vec2str 4 (gpreg 4) */ + IPU_DEVICE_GP_PSA_2_V2S_RGB_4_TO_GTC = 0, + IPU_DEVICE_GP_PSA_2_V2S_RGB_4_TO_ACM = 1, + IPU_DEVICE_GP_PSA_2_V2S_RGB_4_TO_VCSC = 2, + IPU_DEVICE_GP_PSA_2_V2S_RGB_4_TO_GSTAR = 3, + /* enable/disable VCSC input block (gpreg 7) */ + IPU_DEVICE_GP_PSA_2_VCSC_INPUT_BLOCK_DISABLE = 0, + IPU_DEVICE_GP_PSA_2_VCSC_INPUT_BLOCK_ENABLE = 1, + /* enable/disable XNR5 bypass block (gpreg 8) */ + IPU_DEVICE_GP_PSA_2_XNR5_BP_BLOCK_DISABLE = 0, + IPU_DEVICE_GP_PSA_2_XNR5_BP_BLOCK_ENABLE = 1, + /* choose to use VCSC or bypass it (gpreg 5) */ + IPU_DEVICE_GP_PSA_3_MUX_USE_VCSC = 0, + IPU_DEVICE_GP_PSA_3_MUX_BP_VCSC = 1, + /* choose to use XNR5 or bypass it (gpreg 6) */ + IPU_DEVICE_GP_PSA_3_MUX_USE_XNR5 = 0, + IPU_DEVICE_GP_PSA_3_MUX_BP_XNR5 = 1, + /* choose which input to use for the BNLM acc */ + IPU_DEVICE_GP_PSA_1_BNLM_IN_MUX_V2S = 0, + IPU_DEVICE_GP_PSA_1_BNLM_IN_MUX_ISA_DOWNSCALED = 1, + IPU_DEVICE_GP_PSA_1_BNLM_IN_MUX_ISA_UNSCALED = 2, + IPU_DEVICE_GP_PSA_CONF_INVALID = 0xFF +}; + +enum ipu_device_gp_isl_value { + /* choose and route pixel stream to CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_IN_USE = 0, + /* choose and route pixel stream bypass CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_BYPASS +}; + +#endif /* __IPU_DEVICE_GP_PROPERTIES_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_acb_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_acb_devices.h new file mode 100644 index 000000000000..d9472a5d33ca --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_acb_devices.h @@ -0,0 +1,43 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_ACB_DEVICES_H +#define __IPU_DEVICE_ACB_DEVICES_H + +enum ipu_device_acb_id { + /* PSA accelerators */ + IPU_DEVICE_ACB_WBA_ID = 0, + IPU_DEVICE_ACB_RYNR_ID, + IPU_DEVICE_ACB_DEMOSAIC_ID, + IPU_DEVICE_ACB_ACM_ID, + IPU_DEVICE_ACB_GTC_ID, + IPU_DEVICE_ACB_YUV1_ID, + IPU_DEVICE_ACB_DVS_ID, + IPU_DEVICE_ACB_LACE_ID, + /* ISA accelerators */ + IPU_DEVICE_ACB_ICA_ID, + IPU_DEVICE_ACB_LSC_ID, + IPU_DEVICE_ACB_DPC_ID, + IPU_DEVICE_ACB_IDS_ID, + IPU_DEVICE_ACB_AWB_ID, + IPU_DEVICE_ACB_AF_ID, + IPU_DEVICE_ACB_AE_ID, + IPU_DEVICE_ACB_NUM_ACB +}; + +#define IPU_DEVICE_ACB_NUM_PSA_ACB (IPU_DEVICE_ACB_LACE_ID + 1) +#define IPU_DEVICE_ACB_NUM_ISA_ACB \ + (IPU_DEVICE_ACB_NUM_ACB - IPU_DEVICE_ACB_NUM_PSA_ACB) + +#endif /* __IPU_DEVICE_ACB_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_devices.h new file mode 100644 index 000000000000..7a57967cb6eb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_devices.h @@ -0,0 +1,38 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +#define SPC0_CELL processing_system_sp_cluster_sp_cluster_logic_spc_tile_sp +#define SPP0_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile0_sp +#define SPP1_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile1_sp +#define ISP0_CELL processing_system_isp_tile0_logic_isp +#define ISP1_CELL processing_system_isp_tile1_logic_isp +#define ISP2_CELL processing_system_isp_tile2_logic_isp +#define ISP3_CELL processing_system_isp_tile3_logic_isp + +enum ipu_device_psys_cell_id { + SPC0, + SPP0, + SPP1, + ISP0, + ISP1, + ISP2, + ISP3, + NUM_CELLS +}; +#define NUM_ISP_CELLS 4 + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_defs.h new file mode 100644 index 000000000000..09241bea7250 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,66 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x00000000 +#define SPC0_DMEM_CBUS_ADDRESS 0x00008000 +#define SPC0_DMEM_DBUS_ADDRESS 0x02000000 +#define SPC0_DMEM_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPC0_DMEM_INT_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPP0_REGS_CBUS_ADDRESS 0x00020000 +#define SPP0_DMEM_CBUS_ADDRESS 0x00028000 +#define SPP0_DMEM_DBUS_ADDRESS 0x02020000 +#define SPP1_REGS_CBUS_ADDRESS 0x00030000 +#define SPP1_DMEM_CBUS_ADDRESS 0x00038000 +#define SPP1_DMEM_DBUS_ADDRESS 0x02030000 +#define ISP0_REGS_CBUS_ADDRESS 0x001C0000 +#define ISP0_PMEM_CBUS_ADDRESS 0x001D0000 +#define ISP0_DMEM_CBUS_ADDRESS 0x001F0000 +#define ISP0_BAMEM_CBUS_ADDRESS 0x00200000 +#define ISP0_VMEM_CBUS_ADDRESS 0x00220000 +#define ISP1_REGS_CBUS_ADDRESS 0x00240000 +#define ISP1_PMEM_CBUS_ADDRESS 0x00250000 +#define ISP1_DMEM_CBUS_ADDRESS 0x00270000 +#define ISP1_BAMEM_CBUS_ADDRESS 0x00280000 +#define ISP1_VMEM_CBUS_ADDRESS 0x002A0000 +#define ISP2_REGS_CBUS_ADDRESS 0x002C0000 +#define ISP2_PMEM_CBUS_ADDRESS 0x002D0000 +#define ISP2_DMEM_CBUS_ADDRESS 0x002F0000 +#define ISP2_BAMEM_CBUS_ADDRESS 0x00300000 +#define ISP2_VMEM_CBUS_ADDRESS 0x00320000 +#define ISP3_REGS_CBUS_ADDRESS 0x00340000 +#define ISP3_PMEM_CBUS_ADDRESS 0x00350000 +#define ISP3_DMEM_CBUS_ADDRESS 0x00370000 +#define ISP3_BAMEM_CBUS_ADDRESS 0x00380000 +#define ISP3_VMEM_CBUS_ADDRESS 0x003A0000 +#define ISP0_PMEM_DBUS_ADDRESS 0x08000000 +#define ISP0_DMEM_DBUS_ADDRESS 0x08400000 +#define ISP0_BAMEM_DBUS_ADDRESS 0x09000000 +#define ISP0_VMEM_DBUS_ADDRESS 0x08800000 +#define ISP1_PMEM_DBUS_ADDRESS 0x0A000000 +#define ISP1_DMEM_DBUS_ADDRESS 0x0A400000 +#define ISP1_BAMEM_DBUS_ADDRESS 0x0B000000 +#define ISP1_VMEM_DBUS_ADDRESS 0x0A800000 +#define ISP2_PMEM_DBUS_ADDRESS 0x0C000000 +#define ISP2_DMEM_DBUS_ADDRESS 0x0C400000 +#define ISP2_BAMEM_DBUS_ADDRESS 0x0D000000 +#define ISP2_VMEM_DBUS_ADDRESS 0x0C800000 +#define ISP3_PMEM_DBUS_ADDRESS 0x0E000000 +#define ISP3_DMEM_DBUS_ADDRESS 0x0E400000 +#define ISP3_BAMEM_DBUS_ADDRESS 0x0F000000 +#define ISP3_VMEM_DBUS_ADDRESS 0x0E800000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_impl.h new file mode 100644 index 000000000000..10c28983eeb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,193 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_sp2600_proxy_properties_impl.h" +#include "ipu_device_isp2600_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP1_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP0_REGS_CBUS_ADDRESS, /* reg addr */ + ISP0_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP0_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP0_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP0_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP1_REGS_CBUS_ADDRESS, /* reg addr */ + ISP1_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP1_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP1_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP1_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP2_REGS_CBUS_ADDRESS, /* reg addr */ + ISP2_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP2_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP2_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP2_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP3_REGS_CBUS_ADDRESS, /* reg addr */ + ISP3_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP3_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP3_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP3_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_spc0_mem_databus_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP0_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP0_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP0_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP0_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP1_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP1_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP1_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP1_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP2_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP2_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP2_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP2_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP3_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP3_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP3_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP3_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const struct ipu_device_cell_properties_s +ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp0_mem_address, + ipu_device_spp0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp1_mem_address, + ipu_device_spp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp0_mem_address, + ipu_device_isp0_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp1_mem_address, + ipu_device_isp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp2_mem_address, + ipu_device_isp2_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp3_mem_address, + ipu_device_isp3_mem_databus_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 4, /* SPC0 */ + 5, /* SPP0 */ + 6, /* SPP1 */ + 0, /* ISP0 */ + 1, /* ISP1 */ + 2, /* ISP2 */ + 3 /* ISP3 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_ff_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_ff_devices.h new file mode 100644 index 000000000000..3af7ba63a364 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_ff_devices.h @@ -0,0 +1,55 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_FF_DEVICES_H +#define __IPU_DEVICE_FF_DEVICES_H + +enum ipu_device_ff_id { + /* PSA fixed functions */ + IPU_DEVICE_FF_WBA_WBA = 0, + IPU_DEVICE_FF_RYNR_SPLITTER, + IPU_DEVICE_FF_RYNR_COLLECTOR, + IPU_DEVICE_FF_RYNR_BNLM, + IPU_DEVICE_FF_RYNR_VCUD, + IPU_DEVICE_FF_DEMOSAIC_DEMOSAIC, + IPU_DEVICE_FF_ACM_CCM, + IPU_DEVICE_FF_ACM_ACM, + IPU_DEVICE_FF_GTC_CSC_CDS, + IPU_DEVICE_FF_GTC_GTM, + IPU_DEVICE_FF_YUV1_SPLITTER, + IPU_DEVICE_FF_YUV1_IEFD, + IPU_DEVICE_FF_YUV1_YDS, + IPU_DEVICE_FF_YUV1_TCC, + IPU_DEVICE_FF_DVS_YBIN, + IPU_DEVICE_FF_DVS_DVS, + IPU_DEVICE_FF_LACE_LACE, + /* ISA fixed functions */ + IPU_DEVICE_FF_ICA_INL, + IPU_DEVICE_FF_ICA_GBL, + IPU_DEVICE_FF_ICA_PCLN, + IPU_DEVICE_FF_LSC_LSC, + IPU_DEVICE_FF_DPC_DPC, + IPU_DEVICE_FF_IDS_SCALER, + IPU_DEVICE_FF_AWB_AWRG, + IPU_DEVICE_FF_AF_AF, + IPU_DEVICE_FF_AE_WGHT_HIST, + IPU_DEVICE_FF_AE_CCM, + IPU_DEVICE_FF_NUM_FF +}; + +#define IPU_DEVICE_FF_NUM_PSA_FF (IPU_DEVICE_FF_LACE_LACE + 1) +#define IPU_DEVICE_FF_NUM_ISA_FF \ + (IPU_DEVICE_FF_NUM_FF - IPU_DEVICE_FF_NUM_PSA_FF) + +#endif /* __IPU_DEVICE_FF_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_gp_devices.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_gp_devices.h new file mode 100644 index 000000000000..f6afd6003324 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/psys/bxtB0/ipu_device_gp_devices.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_DEVICES_H +#define __IPU_DEVICE_GP_DEVICES_H +#include "math_support.h" +#include "type_support.h" + +enum ipu_device_gp_id { + IPU_DEVICE_GP_PSA = 0, /* PSA */ + IPU_DEVICE_GP_ISA_STATIC, /* ISA Static */ + IPU_DEVICE_GP_ISA_RUNTIME, /* ISA Runtime */ + IPU_DEVICE_GP_ISL, /* ISL */ + IPU_DEVICE_GP_NUM_GP +}; + +enum ipu_device_gp_psa_mux_id { + /* Post RYNR/CCN: 0-To ACM (Video), 1-To Demosaic (Stills)*/ + IPU_DEVICE_GP_PSA_STILLS_MODE_MUX = 0, + /* Post Vec2Str 4: 0-To GTC, 1-To ACM */ + IPU_DEVICE_GP_PSA_V2S_RGB_4_DEMUX, + /* Post DM and pre ACM 0-CCM/ACM: 1-DM Component Splitter */ + IPU_DEVICE_GP_PSA_S2V_RGB_F_MUX, + /* Pre ACM/CCM: 0-To CCM/ACM, 1-To str2vec id_f */ + IPU_DEVICE_GP_PSA_ACM_DEMUX, + IPU_DEVICE_GP_PSA_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_static_mux_id { + IPU_DEVICE_GP_ISA_STATIC_MUX_SEL = 0, + IPU_DEVICE_GP_ISA_STATIC_PORTA_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTB_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTC_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_SEL, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_INPUT_CORR_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_DPC_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_runtime_mux_id { + IPU_DEVICE_GP_ISA_RUNTIME_FRAME_SIZE = 0, + IPU_DEVICE_GP_ISA_RUNTIME_SCALED_FRAME_SIZE, + IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX +}; + +enum ipu_device_gp_isl_mux_id { + IPU_DEVICE_GP_ISL_MIPI_BE_MUX = 0, + IPU_DEVICE_GP_ISL_MUX_NUM_MUX +}; + +#define IPU_DEVICE_GP_MAX_NUM MAX4((uint32_t)IPU_DEVICE_GP_PSA_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISL_MUX_NUM_MUX) + +#endif /* __IPU_DEVICE_GP_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h new file mode 100644 index 000000000000..de733be67998 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h @@ -0,0 +1,151 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H +#define __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H + +/* isp2600 definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_isp2600_registers { + /* control registers */ + IPU_DEVICE_ISP2600_STAT_CTRL = 0x0, + IPU_DEVICE_ISP2600_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_ISP2600_ICACHE_BASE = 0x10, + IPU_DEVICE_ISP2600_ICACHE_INFO = 0x14, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_ISP2600_QMEM_BASE = 0x1C, + + IPU_DEVICE_ISP2600_CMEM_BASE = 0x28, + + IPU_DEVICE_ISP2600_XMEM_BASE = 0x88, + IPU_DEVICE_ISP2600_XMEM_INFO = 0x8C, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE = 0x90, + + IPU_DEVICE_ISP2600_XVMEM_BASE = 0xB8, + + /* debug registers */ + IPU_DEVICE_ISP2600_DEBUG_PC = 0x130, + IPU_DEVICE_ISP2600_STALL = 0x134 +}; + + +enum ipu_device_isp2600_memories { + IPU_DEVICE_ISP2600_REGS, + IPU_DEVICE_ISP2600_PMEM, + IPU_DEVICE_ISP2600_DMEM, + IPU_DEVICE_ISP2600_BAMEM, + IPU_DEVICE_ISP2600_VMEM, + IPU_DEVICE_ISP2600_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_isp2600_mem_size[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + 0x00140, + 0x14000, + 0x04000, + 0x20000, + 0x20000 +}; + + +enum ipu_device_isp2600_masters { + IPU_DEVICE_ISP2600_ICACHE, + IPU_DEVICE_ISP2600_QMEM, + IPU_DEVICE_ISP2600_CMEM, + IPU_DEVICE_ISP2600_XMEM, + IPU_DEVICE_ISP2600_XVMEM, + IPU_DEVICE_ISP2600_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_isp2600_masters[IPU_DEVICE_ISP2600_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_ISP2600_ICACHE_BASE, + IPU_DEVICE_ISP2600_ICACHE_INFO, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_ISP2600_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_CMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_ISP2600_XMEM_BASE, + IPU_DEVICE_ISP2600_XMEM_INFO, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_XVMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + } +}; + +enum ipu_device_isp2600_stall_bits { + IPU_DEVICE_ISP2600_STALL_ICACHE0, + IPU_DEVICE_ISP2600_STALL_ICACHE1, + IPU_DEVICE_ISP2600_STALL_DMEM, + IPU_DEVICE_ISP2600_STALL_QMEM, + IPU_DEVICE_ISP2600_STALL_CMEM, + IPU_DEVICE_ISP2600_STALL_XMEM, + IPU_DEVICE_ISP2600_STALL_BAMEM, + IPU_DEVICE_ISP2600_STALL_VMEM, + IPU_DEVICE_ISP2600_STALL_XVMEM, + IPU_DEVICE_ISP2600_NUM_STALL_BITS +}; + +#define IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE 64 /* 512 bits per instruction */ +#define IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE 8 /* 8 instructions per burst */ + +static const struct ipu_device_cell_count_s ipu_device_isp2600_count = { + IPU_DEVICE_ISP2600_NUM_MEMORIES, + IPU_DEVICE_ISP2600_NUM_MASTERS, + IPU_DEVICE_ISP2600_NUM_STALL_BITS, + IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE * + IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE +}; + +static const unsigned int ipu_device_isp2600_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x130, 0x134 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_isp2600_properties = { + &ipu_device_isp2600_count, + ipu_device_isp2600_masters, + ipu_device_isp2600_reg_offset, + ipu_device_isp2600_mem_size +}; + +#endif /* __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 000000000000..430295cd9d94 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h new file mode 100644 index 000000000000..b3f120f9fea8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h @@ -0,0 +1,140 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H + +/* sp2600_fp definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_fp_registers { + /* control registers */ + IPU_DEVICE_SP2600_FP_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_FP_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_FP_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_FP_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_FP_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_FP_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_FP_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_FP_XMEM_BASE = 0x88, + IPU_DEVICE_SP2600_FP_XMEM_INFO = 0x8C, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE = 0x90, + + /* debug registers */ + IPU_DEVICE_SP2600_FP_DEBUG_PC = 0xCC, + IPU_DEVICE_SP2600_FP_STALL = 0xD0 +}; + + +enum ipu_device_sp2600_fp_memories { + IPU_DEVICE_SP2600_FP_REGS, + IPU_DEVICE_SP2600_FP_PMEM, + IPU_DEVICE_SP2600_FP_DMEM, + IPU_DEVICE_SP2600_FP_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_fp_mem_size[IPU_DEVICE_SP2600_FP_NUM_MEMORIES] = { + 0x000DC, + 0x00000, + 0x10000, + 0x08000 +}; + +enum ipu_device_sp2600_fp_masters { + IPU_DEVICE_SP2600_FP_ICACHE, + IPU_DEVICE_SP2600_FP_QMEM, + IPU_DEVICE_SP2600_FP_CMEM, + IPU_DEVICE_SP2600_FP_XMEM, + IPU_DEVICE_SP2600_FP_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_fp_masters[IPU_DEVICE_SP2600_FP_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_ICACHE_BASE, + IPU_DEVICE_SP2600_FP_ICACHE_INFO, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_SP2600_FP_CMEM_BASE, + IPU_DEVICE_SP2600_FP_CMEM_INFO, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_FP_XMEM_BASE, + IPU_DEVICE_SP2600_FP_XMEM_INFO, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_fp_stall_bits { + IPU_DEVICE_SP2600_FP_STALL_ICACHE, + IPU_DEVICE_SP2600_FP_STALL_DMEM, + IPU_DEVICE_SP2600_FP_STALL_QMEM, + IPU_DEVICE_SP2600_FP_STALL_CMEM, + IPU_DEVICE_SP2600_FP_STALL_XMEM, + IPU_DEVICE_SP2600_FP_STALL_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_fp_count = { + IPU_DEVICE_SP2600_FP_NUM_MEMORIES, + IPU_DEVICE_SP2600_FP_NUM_MASTERS, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS, + IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_fp_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_fp_properties = { + &ipu_device_sp2600_fp_count, + ipu_device_sp2600_fp_masters, + ipu_device_sp2600_fp_reg_offset, + ipu_device_sp2600_fp_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h new file mode 100644 index 000000000000..6fdcd7faea9b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h @@ -0,0 +1,138 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H + +/* sp2600_proxy definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_proxy_registers { + /* control registers */ + IPU_DEVICE_SP2600_PROXY_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_PROXY_START_PC = 0x4, + + /* THESE ADDRESSES NEED TO BE CHECKED !!!! */ + /* master port registers */ + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_PROXY_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_PROXY_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_PROXY_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_PROXY_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_PROXY_STALL = 0xA0 +}; + + +enum ipu_device_sp2600_proxy_memories { + IPU_DEVICE_SP2600_PROXY_REGS, + IPU_DEVICE_SP2600_PROXY_PMEM, + IPU_DEVICE_SP2600_PROXY_DMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_proxy_mem_size[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + 0x00AC, + 0x0000, + 0x4000 +}; + +enum ipu_device_sp2600_proxy_masters { + IPU_DEVICE_SP2600_PROXY_ICACHE, + IPU_DEVICE_SP2600_PROXY_QMEM, + IPU_DEVICE_SP2600_PROXY_CMEM, + IPU_DEVICE_SP2600_PROXY_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_proxy_masters[IPU_DEVICE_SP2600_PROXY_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_CMEM_BASE, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_XMEM_BASE, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_proxy_stall_bits { + IPU_DEVICE_SP2600_PROXY_STALL_ICACHE, + IPU_DEVICE_SP2600_PROXY_STALL_DMEM, + IPU_DEVICE_SP2600_PROXY_STALL_QMEM, + IPU_DEVICE_SP2600_PROXY_STALL_CMEM, + IPU_DEVICE_SP2600_PROXY_STALL_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_proxy_count = { + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS, + IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_proxy_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0xCC, 0xD0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_proxy_properties = { + &ipu_device_sp2600_proxy_count, + ipu_device_sp2600_proxy_masters, + ipu_device_sp2600_proxy_reg_offset, + ipu_device_sp2600_proxy_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 000000000000..b1ffbf7ea21f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 000000000000..73062e9db87b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 000000000000..24ad04fe8720 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 000000000000..cd508f05ed40 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 000000000000..3a7b333d3bf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +/* A strange problem with hivecc compiler which is described + * here https://icggerrit.ir.intel.com/#/c/51630/1 forces this + * enum to be explicitly initialized for the moment + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 000000000000..6bc2fa708d43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,100 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 000000000000..2e45eaa52727 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 000000000000..27e87d1e6774 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 000000000000..ec0ee18b41e1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/pkg_dir.mk new file mode 100644 index 000000000000..a4b4aaa4995e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/pkg_dir.mk @@ -0,0 +1,30 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 000000000000..348b56833e06 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 000000000000..ca5564c7d990 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,202 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 000000000000..3a50245261e5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,50 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/port_env_struct.h new file mode 100644 index 000000000000..4d39a4739a8b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue.h new file mode 100644 index 000000000000..b233ab3baf01 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue_struct.h new file mode 100644 index 000000000000..ef48fcfded2b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port.h new file mode 100644 index 000000000000..cce253b26668 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port_struct.h new file mode 100644 index 000000000000..52ec563b13cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port.h new file mode 100644 index 000000000000..04a160f3f019 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port_struct.h new file mode 100644 index 000000000000..f834c62bc3db --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/port.mk new file mode 100644 index 000000000000..b3801247802e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/queue.c new file mode 100644 index 000000000000..eeec99dfe2d0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/recv_port.c new file mode 100644 index 000000000000..31b36e9ceafb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/send_port.c new file mode 100644 index 000000000000..8d1fba08c5d5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h new file mode 100644 index 000000000000..b36dbbca96ca --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h @@ -0,0 +1,44 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PRIVATE_PG_DATA_H +#define __IA_CSS_PSYS_PRIVATE_PG_DATA_H + +#include "ipu_device_acb_devices.h" +#include "ipu_device_gp_devices.h" +#include "type_support.h" +#include "vied_nci_acb_route_type.h" + +#define PRIV_CONF_INVALID 0xFF + +struct ia_css_psys_pg_buffer_information_s { + unsigned int buffer_base_addr; + unsigned int bpe; + unsigned int buffer_width; + unsigned int buffer_height; + unsigned int num_of_buffers; + unsigned int dfm_port_addr; +}; + +typedef struct ia_css_psys_pg_buffer_information_s ia_css_psys_pg_buffer_information_t; + +struct ia_css_psys_private_pg_data { + nci_acb_route_t acb_route[IPU_DEVICE_ACB_NUM_ACB]; + uint8_t psa_mux_conf[IPU_DEVICE_GP_PSA_MUX_NUM_MUX]; + uint8_t isa_mux_conf[IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX]; + ia_css_psys_pg_buffer_information_t input_buffer_info; +}; + +#endif /* __IA_CSS_PSYS_PRIVATE_PG_DATA_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h new file mode 100644 index 000000000000..eee1d6ab0a49 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h @@ -0,0 +1,107 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BXT_SPCTRL_TRACE_H +#define __IA_CSS_BXT_SPCTRL_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from + * the .mk file outside. + * Log levels not in the range below will cause a + * "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" + */ +#define BXT_SPCTRL_TRACE_LOG_LEVEL_OFF 1 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL 2 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG 3 + +/* BXT_SPCTRL and all the submodules in BXT_SPCTRL will have the + * default tracing level set to the BXT_SPCTRL_TRACE_CONFIG level. + * If not defined in the psysapi.mk fill it will be set by + * default to no trace (BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL) + */ +#define BXT_SPCTRL_TRACE_CONFIG_DEFAULT BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + +#if !defined(BXT_SPCTRL_TRACE_CONFIG) +# define BXT_SPCTRL_TRACE_CONFIG BXT_SPCTRL_TRACE_CONFIG_DEFAULT +#endif + +/* BXT_SPCTRL Module tracing backend is mapped to TUNIT tracing for + * target platforms + */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(BXT_SPCTRL_TRACE_CONFIG)) + /* Module specific trace setting */ +# if BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_OFF + /* BXT_SPCTRL_TRACE_LOG_LEVEL_OFF */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + /* BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG + /* BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "BXT_SPCTRL_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in BXT_SPCTRL with a specific tracing level */ +/* #define BXT_SPCTRL_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_BXT_SPCTRL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/psys_server.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/psys_server.mk new file mode 100644 index 000000000000..c4462c984793 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/psys_server.mk @@ -0,0 +1,81 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYS_SERVER + +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk +include $(MODULES_DIR)/config/$(SUBSYSTEM)/subsystem_$(IPU_SYSVER).mk + +PSYS_SERVER_DIR=${MODULES_DIR}/psys_server + +# The watchdog should never be merged enabled +PSYS_SERVER_WATCHDOG_ENABLE ?= 0 + +PSYS_SERVER_INTERFACE=$(PSYS_SERVER_DIR)/interface +PSYS_SERVER_SOURCES=$(PSYS_SERVER_DIR)/src + +# PSYS API implementation files. Consider a new module for those to avoid +# having them together with firmware. +PSYS_SERVER_HOST_FILES += $${MODULES_DIR}/psysapi/device/src/ia_css_psys_device.c +PSYS_SERVER_HOST_FILES += $(PSYS_SERVER_SOURCES)/bxt_spctrl_process_group_cmd_impl.c + +PSYS_SERVER_HOST_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) + +PSYS_SERVER_HOST_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_HOST_CPPFLAGS += -DMMID=$(MMID) + + +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_cmd_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_event_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_init_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_process_group_fw.c + +# Files that server modules need to use +PSYS_SERVER_SUPPORT_FILES = $(PSYS_SERVER_SOURCES)/dev_access_conv/$(IPU_SYSVER)/ia_css_psys_server_dev_access_type_conv.c +PSYS_SERVER_SUPPORT_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_config.c + +# Include those to build the release firmware. Otherwise replace by test code. +PSYS_SERVER_RELEASE_FW_FILES = $(PSYS_SERVER_SOURCES)/psys_server.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_proxy.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dev_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_terminal_load.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_remote_obj_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dma_access.c +ifeq ($(HAS_DEC400), 1) +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dec400_access.c +endif +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SUPPORT_FILES) + +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(IPU_SYSVER) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(PSYS_SERVER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/loader/$(PSYS_SERVER_LOADER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/$(PSYS_ACCESS_BLOCKER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/src + +PSYS_SERVER_FW_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_FW_CPPFLAGS += -DMMID=$(MMID) +PSYS_SERVER_FW_CPPFLAGS += -DHAS_DPCM=$(if $(HAS_DPCM),1,0) + +# PSYS server watchdog for debugging +ifeq ($(PSYS_SERVER_WATCHDOG_ENABLE), 1) + PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_watchdog.c + PSYS_SERVER_FW_CPPFLAGS += -DPSYS_SERVER_WATCHDOG_DEBUG +endif + +PSYS_SERVER_FW_CPPFLAGS += -D$(PSYS_HW_VERSION) + +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_TPROXY=$(PSYS_SERVER_ENABLE_TPROXY) +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_DEVPROXY=$(PSYS_SERVER_ENABLE_DEVPROXY) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c new file mode 100644 index 000000000000..6f8aea782464 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c @@ -0,0 +1,332 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_device.h" +#include "ia_css_psys_process_group_cmd_impl.h" +#include "ia_css_psysapi.h" +#include "ia_css_psys_terminal.h" +#include "ia_css_psys_process.h" +#include "ia_css_psys_process.psys.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_process_group.psys.h" +#include "ia_css_psys_program_group_manifest.h" +#include "type_support.h" +#include "error_support.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "ia_css_bxt_spctrl_trace.h" + +#if HAS_DUAL_CMD_CTX_SUPPORT +#define MAX_CLIENT_PGS 8 /* same as test_params.h */ +struct ia_css_process_group_context { + ia_css_process_group_t *pg; + bool secure; +}; +struct ia_css_process_group_context pg_contexts[MAX_CLIENT_PGS]; +static unsigned int num_of_pgs; + +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + unsigned int i; + bool secure = false; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): enter:\n"); + + for (i = 0; i < num_of_pgs; i++) { + if (pg_contexts[i].pg == process_group) { + secure = pg_contexts[i].secure; + break; + } + } + + IA_CSS_TRACE_1(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): secure %d\n", secure); + return secure ? psys_syscom_secure : psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + IA_CSS_TRACE_2(BXT_SPCTRL, INFO, + "ia_css_process_group_store(): pg instance %d secure %d\n", num_of_pgs, secure); + + pg_contexts[num_of_pgs].pg = process_group; + pg_contexts[num_of_pgs].secure = secure; + num_of_pgs++; + return 0; +} +#else /* HAS_DUAL_CMD_CTX_SUPPORT */ +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + return psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + NOT_USED(process_group); + NOT_USED(secure); + + return 0; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param) +{ + NOT_USED(process_group); + NOT_USED(program_group_manifest); + NOT_USED(program_group_param); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_create(): enter:\n"); + + return 0; +} + +int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_destroy(): enter:\n"); + + return 0; +} + +int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd) +{ + int retval = -1; + ia_css_process_group_state_t state; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): enter:\n"); + + verifexit(process_group != NULL); + + state = ia_css_process_group_get_state(process_group); + + verifexit(state != IA_CSS_PROCESS_GROUP_ERROR); + verifexit(state < IA_CSS_N_PROCESS_GROUP_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_GROUP_CMD_SUBMIT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_SUBMIT:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_READY); + + /* External resource availability checks */ + verifexit(ia_css_can_process_group_submit(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_BLOCKED; + break; + case IA_CSS_PROCESS_GROUP_CMD_START: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_START:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + /* External resource state checks */ + verifexit(ia_css_can_process_group_start(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_STARTED; + break; + case IA_CSS_PROCESS_GROUP_CMD_DISOWN: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_DISOWN:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_STARTED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_START; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + verifexit(ia_css_process_group_print(process_group, NULL) == 0); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_STOP: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_STOP:\n"); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_STOP; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + queue_id = ia_css_process_group_get_base_queue_id(process_group); + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + queue_id, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_ABORT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_ABORT:\n"); + + /* Once the flushing of shared buffers is fixed this verifexit + * should be changed to be state = IA_CSS_PROCESS_GROUP_STARTED + */ + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_ABORT; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, &psys_cmd); + verifexit(retval > 0); + break; + default: + verifexit(false); + break; + } + + retval = 0; +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_process_group_exec_cmd failed (%i)\n", retval); + } + return retval; +} + +STORAGE_CLASS_INLINE int enqueue_buffer_set_cmd( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset, + uint16_t command + ) +{ + int retval = -1; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + verifexit(ia_css_process_group_get_state(process_group) + == IA_CSS_PROCESS_GROUP_STARTED); + + verifexit(queue_offset < + ia_css_process_group_get_num_queues(process_group)); + + queue_id = + ia_css_process_group_get_base_queue_id(process_group) + + queue_offset; + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), queue_id); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = command; + psys_cmd.msg = 0; + psys_cmd.context_handle = + ia_css_buffer_set_get_ipu_address(buffer_set); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), queue_id, &psys_cmd); + verifexit(retval > 0); + + retval = 0; + +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_buffer_set():\n"); + retval = enqueue_buffer_set_cmd( + process_group, + buffer_set, + queue_offset, + IA_CSS_PROCESS_GROUP_CMD_RUN); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *param_buffer_set) +{ +#if (HAS_LATE_BINDING_SUPPORT == 1) + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_param_buffer_set():\n"); + + retval = enqueue_buffer_set_cmd( + process_group, + param_buffer_set, + IA_CSS_PSYS_LATE_BINDING_QUEUE_OFFSET, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed (%i)\n", retval); + } +#else + int retval = -1; + + NOT_USED(process_group); + NOT_USED(param_buffer_set); + IA_CSS_TRACE_0(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed, no late binding supported\n"); +#endif /* (HAS_LATE_BINDING_SUPPORT == 1) */ + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h new file mode 100644 index 000000000000..6ccca1d9b69e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h @@ -0,0 +1,418 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_H +#define __IA_CSS_PROGRAM_GROUP_DATA_H + +#include "ia_css_psys_data_storage_class.h" + +/*! \file */ + +/** @file ia_css_program_group_data.h + * + * Define the data objects that are passed to the process groups + * i.e. frames and matrices with their sub-structures + * + * The data objects are separate from the process group terminal, + * although they are stored by value rather than by reference and + * make the process group terminal dependendent on its definition + * + * This frame definition overloads the current CSS frame definition + * they are the same object, just a slightly different implementation + */ + +#include /* vied_vaddress_t */ + +#include +#include "ia_css_program_group_data_defs.h" /* ia_css_frame_format_type */ + +#include "ia_css_terminal_defs.h" + +/* + * Frame buffer state used for sequencing + * (see FAS 5.5.3) + * + * The buffer can be in DDR or a handle to a stream + */ +typedef enum ia_css_buffer_state { + IA_CSS_BUFFER_NULL = 0, + IA_CSS_BUFFER_UNDEFINED, + IA_CSS_BUFFER_EMPTY, + IA_CSS_BUFFER_NONEMPTY, + IA_CSS_BUFFER_FULL, + IA_CSS_N_BUFFER_STATES +} ia_css_buffer_state_t; + +#define IA_CSS_BUFFER_STATE_IN_BITS 32 + +/* + * Pointer state used to signal MMU invalidation + */ +typedef enum ia_css_pointer_state { + IA_CSS_POINTER_INVALID = 0, + IA_CSS_POINTER_VALID, + IA_CSS_N_POINTER_STATES +} ia_css_pointer_state_t; + +#define IA_CSS_POINTER_STATE_IN_BITS 32 + +/* + * Access direction needed to select the access port + */ +typedef enum ia_css_access_type { + IA_CSS_ACCESS_LOCKED = 0, + IA_CSS_ACCESS_READ, + IA_CSS_ACCESS_WRITE, + IA_CSS_ACCESS_MODIFY, + IA_CSS_N_ACCESS_TYPES +} ia_css_access_type_t; + +#define IA_CSS_ACCESS_TYPE_IN_BITS 32 + +/* + * Access attribute needed to select the access port + * - public : snooped + * - private: non-snooped + * Naming is a bit awkward, lack of inspiration + */ +typedef enum ia_css_access_scope { + IA_CSS_ACCESS_PRIVATE = 0, + IA_CSS_ACCESS_PUBLIC, + IA_CSS_N_ACCESS_SCOPES +} ia_css_access_scopes_t; + +#define IA_CSS_ACCESS_SCOPES_IN_BITS 32 + +#define IA_CSS_N_FRAME_PLANES 6 + +#define IA_CSS_FRAME_FORMAT_BITMAP_BITS 64 +typedef uint64_t ia_css_frame_format_bitmap_t; + +typedef struct ia_css_param_frame_descriptor_s ia_css_param_frame_descriptor_t; +typedef struct ia_css_param_frame_s ia_css_param_frame_t; + +typedef struct ia_css_frame_descriptor_s ia_css_frame_descriptor_t; +typedef struct ia_css_frame_s ia_css_frame_t; +typedef struct ia_css_fragment_descriptor_s ia_css_fragment_descriptor_t; + +typedef struct ia_css_stream_s ia_css_stream_t; + + +#define N_UINT64_IN_STREAM_STRUCT 1 + +#define IA_CSS_STREAM_STRUCT_BITS \ + (N_UINT64_IN_STREAM_STRUCT * 64) + +struct ia_css_stream_s { + uint64_t dummy; +}; + +struct ia_css_param_frame_descriptor_s { + uint16_t size; /**< Size of the descriptor */ + uint32_t buffer_count; /**< Number of parameter buffers */ +}; + +struct ia_css_param_frame_s { + /*< Base virtual addresses to parameters in subsystem virtual + * memory space + */ + vied_vaddress_t *data; +}; + +#define N_UINT32_IN_FRAME_DESC_STRUCT \ + (1 + IA_CSS_N_FRAME_PLANES + (IA_CSS_N_DATA_DIMENSION - 1)) +#define N_UINT16_IN_FRAME_DESC_STRUCT (1 + IA_CSS_N_DATA_DIMENSION) +#define N_UINT8_IN_FRAME_DESC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 + +#define IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + (IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + (N_UINT32_IN_FRAME_DESC_STRUCT * 32) \ + + (N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_UINT8_IN_FRAME_DESC_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_FRAME_DESC_STRUCT * 8)) + +/* + * Structure defining the frame (size and access) properties for + * inbuild types only. + * + * The inbuild types like FourCC, MIPI and CSS private types are supported + * by FW all other types are custom types which interpretation must be encoded + * on the buffer itself or known by the source and sink + */ +struct ia_css_frame_descriptor_s { + /**< Indicates if this is a generic type or inbuild with + * variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< Number of data planes (pointers) */ + uint32_t plane_count; + /**< Plane offsets accounting for fragments */ + uint32_t plane_offsets[IA_CSS_N_FRAME_PLANES]; + /**< Physical size aspects */ + uint32_t stride[IA_CSS_N_DATA_DIMENSION - 1]; + /**< Logical dimensions */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Size of this descriptor */ + uint16_t size; + /**< Bits per pixel */ + uint8_t bpp; + /**< Bits per element */ + uint8_t bpe; + /**< 1 if terminal uses compressed datatype, 0 otherwise */ + uint8_t is_compressed; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; +}; + +#define N_UINT32_IN_FRAME_STRUCT 2 +#define N_PADDING_UINT8_IN_FRAME_STRUCT 4 + +#define IA_CSS_FRAME_STRUCT_BITS \ + (IA_CSS_BUFFER_STATE_IN_BITS \ + + IA_CSS_ACCESS_TYPE_IN_BITS \ + + IA_CSS_POINTER_STATE_IN_BITS \ + + IA_CSS_ACCESS_SCOPES_IN_BITS \ + + VIED_VADDRESS_BITS \ + + (N_UINT32_IN_FRAME_STRUCT * 32) \ + + (N_PADDING_UINT8_IN_FRAME_STRUCT * 8)) + + +/* + * Main frame structure holding the main store and auxilary access properties + * the "pointer_state" and "access_scope" should be encoded on the + * "vied_vaddress_t" type + */ +struct ia_css_frame_s { + /**< State of the frame for purpose of sequencing */ + ia_css_buffer_state_t buffer_state; + /**< Access direction, may change when buffer state changes */ + ia_css_access_type_t access_type; + /**< State of the pointer for purpose of embedded MMU coherency */ + ia_css_pointer_state_t pointer_state; + /**< Access to the pointer for purpose of host cache coherency */ + ia_css_access_scopes_t access_scope; + /**< Base virtual address to data in subsystem virtual memory space */ + vied_vaddress_t data; + /**< Offset to buffer address within external buffer set structure */ + uint32_t data_index; + /**< Total allocation size in bytes */ + uint32_t data_bytes; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_STRUCT]; +}; + +#define N_UINT16_IN_FRAGMENT_DESC_STRUCT (3 * IA_CSS_N_DATA_DIMENSION) +#define N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT 4 + +#define IA_CSS_FRAGMENT_DESCRIPTOR_STRUCT_BITS \ + ((N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT * 8)) + +/* + * Structure defining the fragment (size and access) properties. + * + * All cropping and padding effects are described by the difference between + * the frame size and its location and the fragment size(s) and location(s) + */ +struct ia_css_fragment_descriptor_s { + /**< Logical dimensions of the fragment */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Logical location of the fragment in the frame */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Fractional start (phase) of the fragment in the access unit */ + uint16_t offset[IA_CSS_N_DATA_DIMENSION]; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT]; +}; + + +/*! Print the frame object to file/stream + + @param frame[in] frame object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid); + +/*! Get the data buffer handle from the frame object + +@param frame[in] frame object + +@return buffer pointer, VIED_NULL on error +*/ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame); + +/*! Get the data buffer handle from the frame object + + @param frame[in] frame object + + @return buffer pointer, VIED_NULL on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +vied_vaddress_t ia_css_frame_get_buffer(const ia_css_frame_t *frame); + +/*! Set the data buffer handle on the frame object + + @param frame[in] frame object + @param buffer[in] buffer pointer + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, vied_vaddress_t buffer); + +/*! Get the data buffer index in the frame object + + @param frame[in] frame object + + @return data buffer index on success, -1 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame); + +/*! Set the data buffer index in the frame object + + @param frame[in] frame object + @param data_index[in] data buffer index + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index); + +/*! Set the data buffer size on the frame object + + @param frame[in] frame object + @param size[in] number of data bytes + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, unsigned size); + +/*! Get the data buffer state from the frame object + + @param frame[in] frame object + + @return buffer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame); + +/*! Set the data buffer state of the frame object + + @param frame[in] frame object + @param buffer_state[in] buffer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer_state(ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state); + +/*! Get the data pointer state from the frame object + + @param frame[in] frame object + + @return pointer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame); + +/*! Set the data pointer state of the frame object + + @param frame[in] frame object + @param pointer_state[in] pointer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_pointer_state(ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state); + +/*! Print the frame descriptor object to file/stream + + @param frame_descriptor[in] frame descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, void *fid); + +/*! Print the fragment descriptor object to file/stream + + @param fragment_descriptor[in] fragment descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, void *fid); + +/*! Compute the bitmap for the frame format type + + @param frame_format_type[in] frame format type + + @return 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type); + +/*! clear frame format bitmap + + @return cleared bitmap + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void); + + +/*! Compute the size of storage required for the data descriptor object + * on a terminal + *@param plane_count[in] The number of data planes in the buffer + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count); +/*! Compute the size of storage required for the kernel parameter descriptor + * object on a terminal + + @param section_count[in] The number of parameter sections in the buffer + + @return 0 on error + */ +extern size_t ia_css_sizeof_kernel_param_descriptor( + const uint16_t section_count); + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h new file mode 100644 index 000000000000..3f177a19b98b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h @@ -0,0 +1,196 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H +#define __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H + + +/* + * Pre-defined frame format + * + * Those formats have inbuild support of traffic + * and access functions + * + * Note that the formats are for terminals, so there + * is no distinction between input and output formats + * - Custom formats with ot without descriptor + * - 4CC formats such as YUV variants + * - MIPI (line) formats as produced by CSI receivers + * - MIPI (sensor) formats such as Bayer or RGBC + * - CSS internal formats (private types) + * - CSS parameters (type 1 - 6) + */ +#define IA_CSS_FRAME_FORMAT_TYPE_BITS 32 +typedef enum ia_css_frame_format_type { + IA_CSS_DATA_CUSTOM_NO_DESCRIPTOR = 0, + IA_CSS_DATA_CUSTOM, + + /* 12 bit YUV 411, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV11, + /* bpp bit YUV 420, Y, U, V 3-plane (bpp/1.5 bpe) */ + IA_CSS_DATA_FORMAT_YUV420, + /* 12 bit YUV 420, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV12, + /* 12 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12, + /* 16 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12_16, + /* 12 bit YUV 420, Intel proprietary tiled format, TileY */ + IA_CSS_DATA_FORMAT_NV12_TILEY, + /* 12 bit YUV 420, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV21, + /* bpp bit YUV 422, Y, U, V 3-plane (bpp/2 bpe) */ + IA_CSS_DATA_FORMAT_YUV422, + /* 16 bit YUV 422, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV16, + /* 16 bit YUV 422, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV16, + /* 16 bit YUV 422, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV61, + /* 16 bit YUV 422, UYVY 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_UYVY, + /* 16 bit YUV 422, YUYV 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_YUYV, + /* bpp bit YUV 444, Y, U, V 3-plane (bpp/3 bpe) */ + IA_CSS_DATA_FORMAT_YUV444, + /* 8 bit monochrome plane */ + IA_CSS_DATA_FORMAT_Y800, + + /* 5-6-5 bit packed (1-plane) RGB (16bpp, ~5 bpe) */ + IA_CSS_DATA_FORMAT_RGB565, + /* 24 bit RGB, 3 planes (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGB888, + /* 32 bit RGB-Alpha, 1 plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGBA888, + + /* bpp bit raw, [[Gr, R];[B, Gb]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG, + /* bpp bit raw, [[R, Gr];[Gb, B]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_RGGB, + /* bpp bit raw, [[B, Gb];[Gr, R]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_BGGR, + /* bpp bit raw, [[Gb, B];[R, Gr]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GBRG, + + /* bpp bit (NV12) YUV 420, Y, UV 2-plane derived 3-line, + * 2-Y, 1-UV (bpp/1.5 bpe): M420 format + */ + IA_CSS_DATA_FORMAT_YUV420_LINE, + /* Deprecated RAW, 1 plane */ + IA_CSS_DATA_FORMAT_RAW, + /* Deprecated RAW, 1 plane, packed */ + IA_CSS_DATA_FORMAT_RAW_PACKED, + /* Internal, for advanced ISP */ + IA_CSS_DATA_FORMAT_QPLANE6, + /* 1D byte stream, used for jpeg 1-plane */ + IA_CSS_DATA_FORMAT_BINARY_8, + /* Deprecated MIPI frame, 1D byte stream 1 plane */ + IA_CSS_DATA_FORMAT_MIPI, + /* 12 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (8 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_8, + /* 15 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (10 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_10, + /* 12 bit [[UY];[VY]] 1-plane interleaved 2-line (8 bit per element) */ + IA_CSS_DATA_FORMAT_MIPI_LEGACY_YUV420_8, + + /* Type 1-5 parameter, not fragmentable */ + IA_CSS_DATA_GENERIC_PARAMETER, + /* Video stabilisation Type 6 parameter, fragmentable */ + IA_CSS_DATA_DVS_PARAMETER, + /* Video stabilisation Type 6 parameter, coordinates */ + IA_CSS_DATA_DVS_COORDINATES, + /* Dead Pixel correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_DPC_PARAMETER, + /* Lens Shading Correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_LSC_PARAMETER, + /* 3A statistics output HI. */ + IA_CSS_DATA_S3A_STATISTICS_HI, + /* 3A statistics output LO. */ + IA_CSS_DATA_S3A_STATISTICS_LO, + /* histogram output */ + IA_CSS_DATA_S3A_HISTOGRAM, + /* GammaStar grid */ + IA_CSS_DATA_GAMMASTAR_GRID, + + /* Gr R B Gb Gr R B Gb in PIXELS (also called isys interleaved) */ + IA_CSS_DATA_FORMAT_BAYER_LINE_INTERLEAVED, + /* Gr R B Gb Gr R B Gb in VECTORS (VCC IMAGE, ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_VECTORIZED, + /* Gr R Gr R ... | B Gb B Gb .. in VECTORS (ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG_VECTORIZED, + + /* 16 bit YUV 420, Y even plane, Y uneven plane, + * UV plane vector interleaved + */ + IA_CSS_DATA_FORMAT_YUV420_VECTORIZED, + /* 16 bit YUV 420, YYUVYY vector interleaved */ + IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED, + + /* 12 bit YUV 420, Intel proprietary tiled format, TileYf */ + IA_CSS_DATA_FORMAT_NV12_TILEYF, + + /*Y samples appear first in the memory. All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 10 bit data. + */ + IA_CSS_DATA_FORMAT_P010, + + /* MSB aligned version of P010*/ + IA_CSS_DATA_FORMAT_P010_MSB, + + /* P016/P012 Y samples appear first in the memory. + * All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 12 bit data. + */ + IA_CSS_DATA_FORMAT_P016, + + /* MSB aligned version of P016*/ + IA_CSS_DATA_FORMAT_P016_MSB, + + /* TILEYYf representation of P010*/ + IA_CSS_DATA_FORMAT_P010_TILEYF, + + /* TILEYYf representation of P010 MSB aligned*/ + IA_CSS_DATA_FORMAT_P010_MSB_TILEYF, + + /* TILEYYf representation of P016*/ + IA_CSS_DATA_FORMAT_P016_TILEYF, + + /* TILEYYf representation of P016 MSB aligned*/ + IA_CSS_DATA_FORMAT_P016_MSB_TILEYF, + + /* consists of L and R PDAF pixel pairs. + * L and R can be interleaved or not. 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_PAF, + + IA_CSS_N_FRAME_FORMAT_TYPES +} ia_css_frame_format_type_t; + + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h new file mode 100644 index 000000000000..6a4e3a28e533 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DATA_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DATA_INLINE__ +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DATA_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h new file mode 100644 index 000000000000..49afed9ce9df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_TRACE_H +#define __IA_CSS_PSYS_DATA_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + #define PSYS_DATA_TRACE_LEVEL_CONFIG PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DATA_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DATA_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DATA_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c new file mode 100644 index 000000000000..edf3e55e6c39 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_data_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_program_group_data_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_DATA_INLINE__ */ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h new file mode 100644 index 000000000000..f08a057e4480 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h @@ -0,0 +1,455 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H +#define __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H + +#include "ia_css_program_group_data.h" +#include "ia_css_psys_data_trace.h" +#include "ia_css_terminal_defs.h" +#include /* for verifexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid) +{ + int retval = -1; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(frame != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer = %d\n", ia_css_frame_get_buffer(frame)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer_state = %d\n", ia_css_frame_get_buffer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tbuffer_state = %s\n", + * ia_css_buffer_state_string(ia_css_frame_get_buffer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tpointer_state = %d\n", ia_css_frame_get_pointer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tpointer_state = %s\n", + * ia_css_pointer_state_string(ia_css_frame_get_pointer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdata_bytes = %d\n", frame->data_bytes); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame) { + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_host_virtual_address(): enter:\n"); + + verifexit(frame != NULL); + return &(frame->data); + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_host_virtual_address invalid argument\n"); + } + return NULL; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +vied_vaddress_t ia_css_frame_get_buffer( + const ia_css_frame_t *frame) +{ + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer(): enter:\n"); + + verifexit(frame != NULL); + buffer = frame->data; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, + vied_vaddress_t buffer) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer(): enter:\n"); + + verifexit(frame != NULL); + frame->data = buffer; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame) +{ + int data_index = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_data_index(): enter:\n"); + + verifexit(frame != NULL); + + data_index = frame->data_index; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_data_index invalid argument\n"); + } + return data_index; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_index(): enter:\n"); + + verifexit(frame != NULL); + + frame->data_index = data_index; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_index failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, + unsigned int size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_bytes(): enter:\n"); + + verifexit(frame != NULL); + frame->data_bytes = size; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_bytes failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame) +{ + ia_css_buffer_state_t buffer_state = IA_CSS_N_BUFFER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + buffer_state = frame->buffer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_state invalid argument\n"); + } + return buffer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer_state( + ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->buffer_state = buffer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame) +{ + ia_css_pointer_state_t pointer_state = IA_CSS_N_POINTER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + pointer_state = frame->pointer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_pointer_state invalid argument\n"); + } + return pointer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_pointer_state( + ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->pointer_state = pointer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_pointer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, + void *fid) +{ + int retval = -1; + int i; + uint8_t frame_plane_count; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + COMPILATION_ERROR_IF(IA_CSS_N_DATA_DIMENSION <= 0); + + verifexit(frame_descriptor != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tframe_format_type = %d\n", + frame_descriptor->frame_format_type); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tframe_format_type = %s\n", + * ia_css_frame_format_string(frame_descriptor->frame_format_type)); + */ + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpp = %d\n", frame_descriptor->bpp); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpe = %d\n", frame_descriptor->bpe); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tis_compressed = %d\n", frame_descriptor->is_compressed); + + frame_plane_count = IA_CSS_N_FRAME_PLANES; + /* frame_plane_count = + * ia_css_frame_plane_count(frame_descriptor->frame_format_type); + */ + + verifexit(frame_plane_count > 0); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tplane_offsets[%d]: [\n", frame_plane_count); + for (i = 0; i < (int)frame_plane_count - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->plane_offsets[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d ]\n", frame_descriptor->plane_offsets[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->dimension[i]); + + COMPILATION_ERROR_IF(0 > (IA_CSS_N_DATA_DIMENSION - 2)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tstride[%d] = {\n", IA_CSS_N_DATA_DIMENSION - 1); + i = 0; + if (IA_CSS_N_DATA_DIMENSION > 2) { + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 2; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->stride[i]); + } + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->stride[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, + void *fid) +{ + int retval = -1; + int i; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_fragment_descriptor_print(): enter:\n"); + + verifexit(fragment_descriptor != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "dimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->dimension[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "index[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->index[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->index[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "offset[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->offset[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\t%4d }\n", + fragment_descriptor->offset[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_fragment_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type) +{ + ia_css_frame_format_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bit_mask(): enter:\n"); + + if ((frame_format_type < IA_CSS_N_FRAME_FORMAT_TYPES) && + (frame_format_type < IA_CSS_FRAME_FORMAT_BITMAP_BITS)) { + bit_mask = (ia_css_frame_format_bitmap_t)1 << frame_format_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_format_bit_mask invalid argument\n"); + } + + return bit_mask; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void) +{ + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bitmap_clear(): enter:\n"); + + return 0; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_sizeof_frame_descriptor(): enter:\n"); + + verifexit(plane_count > 0); + size += sizeof(ia_css_frame_descriptor_t); + size += plane_count * sizeof(uint32_t); + +EXIT: + if (0 == plane_count) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_sizeof_frame_descriptor invalid argument\n"); + } + return size; +} + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/bxtB0/ia_css_psys_transport_dep.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/bxtB0/ia_css_psys_transport_dep.h new file mode 100644 index 000000000000..7bb145c1b183 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/bxtB0/ia_css_psys_transport_dep.h @@ -0,0 +1,35 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_DEP_H +#define __IA_CSS_PSYS_TRANSPORT_DEP_H + +/* + * The ID's of the Psys specific queues. + */ +typedef enum ia_css_psys_cmd_queues { + /**< The in-order queue for scheduled process groups */ + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID = 0, + /**< The in-order queue for commands changing psys or + * process group state + */ + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, + IA_CSS_N_PSYS_CMD_QUEUE_ID +} ia_css_psys_cmd_queue_ID_t; + +#endif /* __IA_CSS_PSYS_TRANSPORT_DEP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h new file mode 100644 index 000000000000..dc8fa531b11e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h @@ -0,0 +1,516 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_H +#define __IA_CSS_PSYS_DEVICE_H + +#include "ia_css_psys_init.h" +#include "ia_css_psys_transport.h" + +/*! \file */ + +/** @file ia_css_psys_device.h + * + * Define the interface to open the psys specific communication layer + * instance + */ + +#include /* vied_vaddress_t */ + +#include +#include + +#include +#include + +#define IA_CSS_PSYS_STATE_READY_PATTERN (0xF7F7F7F7) +#define IA_CSS_PSYS_STATE_RUNNING_PATTERN (0xE6E6E6E6) +#define IA_CSS_PSYS_STATE_STARTING_PATTERN (0xD5D5D5D5) +#define IA_CSS_PSYS_STATE_STARTED_PATTERN (0xC4C4C4C4) +#define IA_CSS_PSYS_STATE_INITIALIZING_PATTERN (0xB3B3B3B3) +#define IA_CSS_PSYS_STATE_INITIALIZED_PATTERN (0xA0A0A0A0) + +/* + * Defines the state of psys: + * - IA_CSS_PSYS_STATE_UNKNOWN = psys status is unknown (or not recognized) + * - IA_CSS_PSYS_STATE_INITIALING = some of the psys components are + * not initialized yet + * - IA_CSS_PSYS_STATE_INITIALIZED = psys components are initialized + * - IA_CSS_PSYS_STATE_STARTING = some of the psys components are initialized + * but not started yet + * - IA_CSS_PSYS_STATE_STARTED = psys components are started + * - IA_CSS_PSYS_STATE_RUNNING = some of the psys components are started + * but not ready yet + * - IA_CSS_PSYS_STATE_READY = psys is ready + * The state of psys can be obtained calling ia_css_psys_check_state() +*/ +typedef enum ia_css_psys_state { + IA_CSS_PSYS_STATE_UNKNOWN = 0, /**< psys state is unknown */ + /*< some of the psys components are not initialized yet*/ + IA_CSS_PSYS_STATE_INITIALIZING = IA_CSS_PSYS_STATE_INITIALIZING_PATTERN, + /**< psys components are initialized */ + IA_CSS_PSYS_STATE_INITIALIZED = IA_CSS_PSYS_STATE_INITIALIZED_PATTERN, + /**< some of the psys components are not started yet */ + IA_CSS_PSYS_STATE_STARTING = IA_CSS_PSYS_STATE_STARTING_PATTERN, + /**< psys components are started */ + IA_CSS_PSYS_STATE_STARTED = IA_CSS_PSYS_STATE_STARTED_PATTERN, + /**< some of the psys components are not ready yet */ + IA_CSS_PSYS_STATE_RUNNING = IA_CSS_PSYS_STATE_RUNNING_PATTERN, + /**< psys is ready */ + IA_CSS_PSYS_STATE_READY = IA_CSS_PSYS_STATE_READY_PATTERN, +} ia_css_psys_state_t; + +extern struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +extern struct ia_css_syscom_context *psys_syscom_secure; +#endif + +/*! Print the syscom creation descriptor to file/stream + + @param config[in] Psys syscom descriptor + @param fid[out] file/stream handle + + @return < 0 on error +*/ +extern int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, void *fid); + +/*! Print the Psys syscom object to file/stream + + @param context[in] Psys syscom object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_psys_print( + const struct ia_css_syscom_context *context, void *fid); + +/*! Create the syscom creation descriptor + + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify(void); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Create the syscom creation descriptor for secure stream + + @param vtl0_addr_mask[in] VTL0 address mask that will be stored in 'secure' ctx + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask); +#endif + +/*! Compute the size of storage required for allocating the Psys syscom object + + @param config[in] Psys syscom descriptor + + @return 0 on error + */ +extern size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Open (and map the storage for) the Psys syscom object + This is the same as ia_css_psys_open() excluding server start. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + @return NULL on error + */ + +extern struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); + +/*! Store the parameters of the Psys syscom object in DMEM, so + they can be communicated with FW. This step needs to be invoked + after SPC starts in ia_css_psys_open(), so SPC DMEM access blocker + programming already takes effective. + + @param context[in] Psys syscom object + @param config[in] Psys syscom descriptor + @return 0 if successful + */ +extern int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config); + +/*! Start PSYS Server. Psys syscom object must have been created already. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + @param config[in] Psys syscom descriptor + + @return true if psys open started successfully + */ +extern int ia_css_psys_open( + struct ia_css_syscom_config *config); +#else +/*! Open (and map the storage for) the Psys syscom object + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + + Precondition(1): The buffer must be large enough to hold the syscom object. + Its size must be computed with the function "ia_css_sizeof_psys()". + The buffer must be created in the kernel memory space. + + Precondition(2): If buffer == NULL, the storage allocations and mapping + is performed in this function. Config must hold the handle to the Psys + virtual memory space + + Postcondition: The context is initialised in the provided/created buffer. + The syscom context pointer is the kernel space handle to the syscom object + + @return NULL on error + */ +extern struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +/*! completes the psys open procedure. Must be called multiple times + until it succeeds or driver determines the boot sequence has failed. + + @param context[in] Psys syscom object + + @return false if psys open has not completed successfully + */ +extern bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Request close of a PSYS context + * The functionatlity is the same as ia_css_psys_close() which closes PSYS syscom object. + * Counterpart of ia_css_psys_context_create() + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context); + +/*! Request close of a PSYS device for VTIO case + * @param None + * @return 0 if successful + */ +extern int ia_css_psys_close(void); +#else +/*! Request close of a PSYS context + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT*/ + +/*! Unmap and free the storage of the PSYS context + * @param context[in] Psys context + * @param force[in] Force release even if device is busy + * @return 0 if release is successful + * EINVAL if context is invalid + * EBUSY if device is not yet idle, and force==0 + */ +extern int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force); + +/*! Checks the state of the Psys syscom object + + @param context[in] Psys syscom object + + @return State of the syscom object + */ +extern ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated cmd queue in the Psys syscom object is full + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is not full or on error + */ + +extern bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object is notfull + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is full on error + */ +extern bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object holds N space + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param N[in] Number of messages + + @return false if the cmd queue space is unavailable or on error + */ +extern bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N); + +/*!Return the free space count in the designated cmd queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return the space, < 0 on error + */ +extern int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if there are any messages pending in the Psys syscom + * object event queues + + @param context[in] Psys syscom object + + @return false if there are no messages or on error + */ +extern bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated event queue in the Psys syscom object is empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the event queue is not empty or on error + */ +extern bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue in the Psys syscom object is not empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the receive queue is empty or on error + */ +extern bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue + * in the Psys syscom object holds N items + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param N[in] Number of messages + + @return false if the event queue has insufficient messages + available or on error +*/ +extern bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N); + +/*!Return the message count in the designated event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return the messages, < 0 on error + */ +extern int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*! Send (pass by value) a command on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID +@param cmd_msg_buffer[in] pointer to the command message buffer + +Precondition: The command message buffer must be large enough + to hold the command + +Postcondition: Either 0 or 1 commands have been sent + +Note: The message size is fixed and determined on creation + + @return the number of sent commands (1), <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer); + +/*! Send (pass by value) N commands on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param cmd_msg_buffer[in] Pointer to the command message buffer +@param N[in] Number of commands + +Precondition: The command message buffer must be large enough + to hold the commands + +Postcondition: Either 0 or up to and including N commands have been sent + + Note: The message size is fixed and determined on creation + + @return the number of sent commands, <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N); + +/*! Receive (pass by value) an event from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + + Precondition: The event message buffer must be large enough to hold the event + + Postcondition: Either 0 or 1 events have been received + + Note: The event size is fixed and determined on creation + + @return the number of received events (1), <= 0 on error + */ +extern int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer); + +/*! Receive (pass by value) N events from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + @param N[in] Number of events + + Precondition: The event buffer must be large enough to hold the events + + Postcondition: Either 0 or up to and including N events have been received + + Note: The message size is fixed and determined on creation + + @return the number of received event messages, <= 0 on error + */ +extern int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N); + + +/* + * Access functions to query the object stats + */ + + +/*!Return the size of the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context); + +/*!Return the number of cmd queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the number of event queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the size of the indicated Psys command queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the size of the indicated Psys event queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Return the command message size of the indicated Psys command queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the event message size of the indicated Psys event queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +#endif /* __IA_CSS_PSYS_DEVICE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h new file mode 100644 index 000000000000..8e5899bc66db --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_TRACE_H +#define __IA_CSS_PSYS_DEVICE_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + #define PSYS_DEVICE_TRACE_LEVEL_CONFIG \ + PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DEVICE_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DEVICE_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h new file mode 100644 index 000000000000..1120b357632c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h @@ -0,0 +1,37 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_INIT_H +#define __IA_CSS_PSYS_INIT_H + +#include /* vied_vaddress_t */ + +/* Init parameters passed to the fw on device open (non secure mode) */ +typedef struct ia_css_psys_server_init { + /* These members are used in PSS only and will be removed */ + /* Shared memory host address of pkg dir */ + unsigned long long host_ddr_pkg_dir; + /* Address of pkg_dir structure in DDR */ + vied_vaddress_t ddr_pkg_dir_address; + /* Size of Package dir in DDR */ + uint32_t pkg_dir_size; + + /* Prefetch configiration */ + /* enable prefetching on SPC, SPP0 and SPP1 */ + uint32_t icache_prefetch_sp; + /* enable prefetching on ISP0..N */ + uint32_t icache_prefetch_isp; +} ia_css_psys_server_init_t; + +#endif /* __IA_CSS_PSYS_INIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h new file mode 100644 index 000000000000..e0d1e935c221 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h @@ -0,0 +1,92 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_H +#define __IA_CSS_PSYS_TRANSPORT_H + +#include /* ia_css_psys_cmd_queues */ +#include /* vied_vaddress_t */ + +#include + +typedef enum ia_css_psys_event_queues { + /**< The in-order queue for event returns */ + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + IA_CSS_N_PSYS_EVENT_QUEUE_ID +} ia_css_psys_event_queue_ID_t; + +typedef enum ia_css_psys_event_types { + /**< No error to report. */ + IA_CSS_PSYS_EVENT_TYPE_SUCCESS = 0, + /**< Unknown unhandled error */ + IA_CSS_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, + /* Retrieving remote object: */ + /**< Object ID not found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, + /**< Objects too big, or size is zero. */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, + /**< Failed to load whole process group from tproxy/dma */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, + /**< The proper package could not be found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, + /* Process group: */ + /**< Failed to run, error while loading frame */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, + /**< Failed to run, error while loading fragment */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, + /**< The process count of the process group is zero */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, + /**< Process(es) initialization */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, + /**< Aborted (after host request) */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, + /**< NULL pointer in the process group */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, + /**< Process group validation failed */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12 +} ia_css_psys_event_type_t; + +#define IA_CSS_PSYS_CMD_BITS 64 +struct ia_css_psys_cmd_s { + /**< The command issued to the process group */ + uint16_t command; + /**< Message field of the command */ + uint16_t msg; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; +}; + +#define IA_CSS_PSYS_EVENT_BITS 128 +struct ia_css_psys_event_s { + /**< The (return) status of the command issued to + * the process group this event refers to + */ + uint16_t status; + /**< The command issued to the process group this event refers to */ + uint16_t command; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; + /**< This token (size) must match the token registered + * in a process group + */ + uint64_t token; +}; + +struct ia_css_psys_buffer_s { + /**< The in-order queue for scheduled process groups */ + void *host_buffer; + vied_vaddress_t *isp_buffer; +}; + +#endif /* __IA_CSS_PSYS_TRANSPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c new file mode 100644 index 000000000000..c3ed98add7d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c @@ -0,0 +1,854 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_psys_device.h" +#include "ia_css_psys_device_trace.h" +#include "ia_css_psys_init.h" +#include "regmem_access.h" + +#include +#include +#include + +#include "ia_css_cell.h" + +#define IA_CSS_PSYS_CMD_QUEUE_SIZE 0x20 +#define IA_CSS_PSYS_EVENT_QUEUE_SIZE 0x40 + +static struct ia_css_syscom_queue_config ia_css_psys_cmd_queue_cfg[IA_CSS_N_PSYS_CMD_QUEUE_ID]; + +static struct ia_css_syscom_queue_config + ia_css_psys_event_queue_cfg[IA_CSS_N_PSYS_EVENT_QUEUE_ID] = { + {IA_CSS_PSYS_EVENT_QUEUE_SIZE, IA_CSS_PSYS_EVENT_BITS/8}, +}; + +static struct ia_css_syscom_config psys_syscom_config; +struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +static struct ia_css_syscom_config psys_syscom_config_secure; +struct ia_css_syscom_context *psys_syscom_secure; +#endif +static bool external_alloc = true; + +int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(config != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_psys_print( + const struct ia_css_syscom_context *context, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_print(): enter:\n"); + + verifexit(context != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_print failed (%i)\n", retval); + } + return retval; +} + +static void set_syscom_config(struct ia_css_syscom_config *config) +{ + int i; + config->num_input_queues = IA_CSS_N_PSYS_CMD_QUEUE_ID; + config->num_output_queues = IA_CSS_N_PSYS_EVENT_QUEUE_ID; + /* The number of queues are different for different platforms + * so the array is initialized here + */ + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + ia_css_psys_cmd_queue_cfg[i].queue_size = IA_CSS_PSYS_CMD_QUEUE_SIZE; + ia_css_psys_cmd_queue_cfg[i].token_size = IA_CSS_PSYS_CMD_BITS/8; + } + config->input = ia_css_psys_cmd_queue_cfg; + config->output = ia_css_psys_event_queue_cfg; + config->vtl0_addr_mask = 0; +} + +struct ia_css_syscom_config *ia_css_psys_specify(void) +{ + struct ia_css_syscom_config *config = &psys_syscom_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify(): enter:\n"); + set_syscom_config(config); + config->secure = false; + + return config; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask) +{ + struct ia_css_syscom_config *config = &psys_syscom_config_secure; + + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify_secure(mask %#x): enter:\n", vtl0_addr_mask); + set_syscom_config(config); + config->secure = true; + config->vtl0_addr_mask = vtl0_addr_mask; + return config; +} +#endif + +size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_sizeof_psys(): enter:\n"); + + NOT_USED(config); + + return size; +} + +/* Internal function to create syscom_context */ +static struct ia_css_syscom_context *psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_create(): enter:\n"); + + if (config == NULL) + goto EXIT; + + if (buffer == NULL) { + /* Allocate locally */ + external_alloc = false; + } + + /* + * Here we would like to pass separately the sub-system ID + * and optionally the user pointer to be mapped, depending on + * where this open is called, and which virtual memory handles + * we see here. + */ + /* context = ia_css_syscom_open(get_virtual_memory_handle(vied_psys_ID), + * buffer, config); + */ + context = ia_css_syscom_open(config, NULL); + if (context == NULL) + goto EXIT; + + return context; + +EXIT: + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, "psys_context_create failed\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + return psys_context_create(buffer, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config) +{ + return ia_css_syscom_store_dmem(context, config->ssid, config->vtl0_addr_mask); +} +#endif + +/* Internal function to start psys server */ +static int psys_start_server( + struct ia_css_syscom_config *config) +{ + ia_css_psys_server_init_t *server_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_start_server(): enter:\n"); + + /* Configure SPC icache prefetching and start SPC */ + server_config = (ia_css_psys_server_init_t *)config->specific_addr; + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "SPC prefetch: %d\n", + server_config->icache_prefetch_sp); + ia_css_cell_start_prefetch(config->ssid, SPC0, + server_config->icache_prefetch_sp); + return 0; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_psys_open( + struct ia_css_syscom_config *config) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + return psys_start_server(config); +} +#else +struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + + context = psys_context_create(buffer, config); + + /* Configure SPC icache prefetching and start SPC */ + psys_start_server(config); + + return context; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context) +{ + int retval = -1; + bool ready = 0; + unsigned int i; + int syscom_retval; + + verifexit(context != NULL); + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_send_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_recv_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_open_is_ready(): complete:\n"); + + /* If this point reached, do not print error */ + retval = 0; + /* If this point reached, ready */ + ready = 1; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_open_is_ready failed\n"); + } + return ready; +} + +/* Internal function to close syscom_context */ +static struct ia_css_syscom_context *psys_context_destroy( + struct ia_css_syscom_context *context) +{ + /* Success: return NULL, Error: return context pointer value + * Intention is to change return type to int (errno), + * see commented values. + */ + + unsigned int i; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_destroy(): enter:\n"); + + /* NULL pointer check disabled, since there is no proper return value */ + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + if (ia_css_syscom_send_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + if (ia_css_syscom_recv_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + /* request device close */ + if (ia_css_syscom_close(context) != 0) + return context; /* EBUSY */ + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "psys_context_destroy(): leave: OK\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} + +int ia_css_psys_close() +{ + /* Intentionally left blank for now since syscom objects should have + * been destroyed already by prior ia_css_psys_context_destroy() calls. + */ + return 0; +} +#else +struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force) +{ + if (context == NULL) + return -EFAULT; + + /* try to free resources */ + if (ia_css_syscom_release(context, force) != 0) + return -EBUSY; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_release(): leave: OK\n"); + return 0; +} + +ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_check_state(): enter:\n"); + + NOT_USED(context); + + /* For the time being, return the READY state to be used by SPC test */ + return IA_CSS_PSYS_STATE_READY; +} + +bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_full = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_full = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_full failed\n"); + } + return is_full; +} + +bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_not_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_not_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_full = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_not_full failed\n"); + } + return is_not_full; +} + +bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N) +{ + bool has_N_space = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_cmd_queue_N_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_space = ((unsigned int)num_tokens >= N); +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_cmd_queue_N_space failed\n"); + } + return has_N_space; +} + +int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + int N_space = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_get_available_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_space = (int)(num_tokens); +EXIT: + if (N_space < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_get_available_space failed\n"); + } + return N_space; +} + +bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context) +{ + ia_css_psys_event_queue_ID_t i; + bool any_msg = false; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_any_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + for (i = (ia_css_psys_event_queue_ID_t)0; + i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + any_msg = + any_msg || ia_css_is_psys_event_queue_not_empty(context, i); + } + +EXIT: + return any_msg; +} + +bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, (unsigned int)id); + verifexit(num_tokens >= 0); + + is_empty = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_empty failed\n"); + } + return is_empty; +} + +bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_not_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_empty = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_not_empty failed\n"); + } + return is_not_empty; +} + +bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N) +{ + bool has_N_msgs = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_event_queue_N_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_msgs = ((unsigned int)num_tokens >= N); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_event_queue_N_msgs failed\n"); + } + return has_N_msgs; +} + +int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + int N_msgs = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_get_available_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_msgs = (int)(num_tokens); +EXIT: + if (N_msgs < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_get_available_msgs failed\n"); + } + return N_msgs; +} + +int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send(): enter:\n"); + verifexit(context != NULL); + + verifexit(context != NULL); + /* The ~full check fails on receive queues */ + verifexit(ia_css_is_psys_cmd_queue_not_full(context, id)); + verifexit(cmd_msg_buffer != NULL); + + verifexit(ia_css_syscom_send_port_transfer(context, (unsigned int)id, + cmd_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send failed\n"); + } + return count; +} + +int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_cmd_s *cmd_msg_buffer_loc = + (struct ia_css_psys_cmd_s *)cmd_msg_buffer; + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send_N(): enter:\n"); + verifexit(context != NULL); + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_cmd_queue_send(context, id, + (void *)(&cmd_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send_N failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive(): enter:\n"); + + verifexit(context != NULL); + /* The ~empty check fails on send queues */ + verifexit(ia_css_is_psys_event_queue_not_empty(context, id)); + verifexit(event_msg_buffer != NULL); + + verifexit(ia_css_syscom_recv_port_transfer(context, (unsigned int)id, + event_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_event_s *event_msg_buffer_loc; + int count; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive_N(): enter:\n"); + + event_msg_buffer_loc = (struct ia_css_psys_event_s *)event_msg_buffer; + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_event_queue_receive(context, id, + (void *)(&event_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive_N failed\n"); + } + return count; +} + +size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_size failed\n"); + } + return size; +} + +unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_CMD_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_count failed\n"); + } + return count; +} + +unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_EVENT_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_count failed\n"); + } + return count; +} + +size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_cmd_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_event_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_cmd_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} + +size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_event_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h new file mode 100644 index 000000000000..392b4359353f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h @@ -0,0 +1,174 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_H +#define __IA_CSS_PSYS_BUFFER_SET_H + +#include "ia_css_base_types.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_types.h" +#include "ia_css_terminal_types.h" + +#define N_UINT64_IN_BUFFER_SET_STRUCT 1 +#define N_UINT16_IN_BUFFER_SET_STRUCT 1 +#define N_UINT8_IN_BUFFER_SET_STRUCT 1 +#define N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 5 +#define SIZE_OF_BUFFER_SET \ + (N_UINT64_IN_BUFFER_SET_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT16_IN_BUFFER_SET_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS) + +typedef struct ia_css_buffer_set_s ia_css_buffer_set_t; + +struct ia_css_buffer_set_s { + /* Token for user context reference */ + uint64_t token; + /* IPU virtual address of this buffer set */ + vied_vaddress_t ipu_virtual_address; + /* IPU virtual address of the process group corresponding to this buffer set */ + vied_vaddress_t process_group_handle; + /* Number of terminal buffer addresses in this structure */ + uint16_t terminal_count; + /* Frame id to associate with this buffer set */ + uint8_t frame_counter; + /* Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; +}; + + +/*! Construct a buffer set object at specified location + + @param buffer_set_mem[in] memory location to create buffer set object + @param process_group[in] process group corresponding to this buffer set + @param frame_counter[in] frame number for this buffer set object + + @return pointer to buffer set object on success, NULL on error + */ +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter); + +/*! Compute size (in bytes) required for full buffer set object + + @param process_group[in] process group corresponding to this buffer set + + @return size in bytes of buffer set object on success, 0 on error + */ +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group); + +/*! Set a buffer address in a buffer set object + + @param buffer_set[in] buffer set object to set buffer in + @param terminal_index[in] terminal index to use as a reference between + buffer and terminal + @param buffer[in] buffer address to store + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer); + +/*! Get virtual buffer address from a buffer set object and terminal object by + resolving the index used + + @param buffer_set[in] buffer set object to get buffer from + @param terminal[in] terminal object to get buffer of + + @return virtual buffer address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal); + +/*! Set ipu virtual address of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param ipu_vaddress[in] ipu virtual address of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress); + +/*! Get ipu virtual address from a buffer set object + + @param buffer_set[in] buffer set object to get ipu address from + + @return virtual buffer set address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set); + +/*! Set process group handle in a buffer set object + + @param buffer_set[in] buffer set object to set handle in + @param process_group_handle[in] process group handle of the buffer set + object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle); + +/*! Get process group handle from a buffer set object + + @param buffer_set[in] buffer set object to get handle from + + @return virtual process group address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set); + +/*! Set token of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param token[in] token of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token); + +/*! Get token from a buffer set object + + @param buffer_set[in] buffer set object to get token from + + @return token on success, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_BUFFER_SET_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h new file mode 100644 index 000000000000..9a1e3a7a1294 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h new file mode 100644 index 000000000000..e8a979dfce0b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_TRACE_H +#define __IA_CSS_PSYS_DYNAMIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + #define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG \ + PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DYNAMIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DYNAMIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h new file mode 100644 index 000000000000..f4ef80f74213 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h @@ -0,0 +1,396 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_H +#define __IA_CSS_PSYS_PROCESS_H + +/*! \file */ + +/** @file ia_css_psys_process.h + * + * Define the methods on the process object that are not part of + * a single interface + */ + +#include +#include + +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Internal resources + */ +#include + +/* + * Process manager + */ +#include + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process[in] process object + @param cmd[in] command + + @return < 0 on invalid argument(s) or process state + */ +extern int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd); + +/*! Get the internal memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return internal memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id); + + +/*! Get the external memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return external memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the stored size of the process object + + @param process[in] process object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_get_size(const ia_css_process_t *process); + +/*! Get the (pointer to) the process group parent of the process object + + @param process[in] process object + + @return the pointer to the parent, NULL on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process); + +/*! Set the (pointer to) the process group parent of the process object + + @param process[in] process object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on invalid argument(s) + */ +extern int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent); + +/*! Get the unique ID of program used by the process object + + @param process[in] process object + + @return ID, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process); + +/*! Get the state of the process object + + @param process[in] process object + + @return state, limit value (IA_CSS_N_PROCESS_STATES) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process); + +/*! Set the state of the process object + + @param process[in] process object + @param state[in] state of the process + + @return < 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state); + +/*! Get the assigned cell of the the process object + + @param process[in] process object + + @return cell ID, limit value (VIED_NCI_N_CELL_ID) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process); + +/*! Get the number of cells the process object depends on + + @param process[in] process object + + @return number of cells + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process); + +/*! Get the number of terminals the process object depends on + + @param process[in] process object + + @return number of terminals + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process); + +/*! Set n-th cell dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on invalid process argument + */ +extern int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th cell dependency of a process object + + @param process[in] Process object + @param cell_num[in] n-th cell + + @return n-th cell dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num); + +/*! Set n-th terminal dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on on invalid argument(s) + */ +extern int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th terminal dependency of a process object + + @param process[in] Process object + @param terminal_num[in] n-th cell + + @return n-th terminal dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num); + +/*! Get the kernel bitmap of the the process object + + @param process[in] process object + + @return process kernel bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process); + + +/*! Get the cells bitmap of the the process object + + @param process[in] process object + + @return process cells bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process); + +/*! Sets the dfm device resource allocation bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] resource bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + + +/*! Sets the active dfm ports bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] active ports bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the dfm port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id dfm resource id + + @return bitmap of all DFM ports used by process, corresponding to the input dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + +/*! Get the dfm active port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id[in] dfm resource id + + @return bitmap of all active DFM ports used by the process, corresponding to the input + dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + + +/*! Sets the cells bitmap of + * the process object + + @param process[in] process object + @param bitmap[in] bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the device channel id-n resource allocation offset of the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the ext mem type-n resource id of the the process object + + @param process[in] process object + @param mem_type[in] mem type + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type); + + +/*! Sets the device channel id-n resource allocation offset of + * the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + @param offset[in] resource offset + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Boolean test if the process object type is valid + + @param process[in] process object + @param p_manifest[in] program manifest + + @return true if the process object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest); + +/*! Gets the program_idx from the process object + + @param process[in] process object + + @return program index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h new file mode 100644 index 000000000000..cab796560414 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h @@ -0,0 +1,144 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.kernel.h + * + * Define the methods on the process object: Hsys kernel interface + */ + +#include + +#include + +/* + * Internal resources + */ + +/*! Clear all resource (offset) specifications + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_all(ia_css_process_t *process); + +/*! Set the cell ID resource specification + + @param process[in] process object + @param cell_id[in] cell ID + + @return < 0 on error + */ +extern int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id); + +/*! Clear cell ID resource specification + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_cell(ia_css_process_t *process); + +/*! Set the memory resource (offset) specification for a memory + that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + @param offset[in] offset + + @return < 0 on error + */ +extern int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Clear a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + + @return < 0 on error + */ +extern int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h new file mode 100644 index 000000000000..015a60b0e1af --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h @@ -0,0 +1,85 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.user.h + * + * Define the methods on the process object: Hsys user interface + */ + +#include /* ia_css_program_param_t */ + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process object + + @param manifest[in] program manifest + @param param[in] program parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param); + +/*! Create the process object + + @param raw_mem[in] pre allocated memory + @param manifest[in] program manifest + @param param[in] program parameters + + @return NULL on error + */ +extern ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx); + +/*! Destroy (the storage of) the process object + + @param process[in] process object + + @return NULL + */ +extern ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process); + +/* + * Access functions + */ + +/*! Print the process object to file/stream + + @param process[in] process object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_process_print( + const ia_css_process_t *process, + void *fid); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h new file mode 100644 index 000000000000..ba1db574a438 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PSYS_H +#define __IA_CSS_PSYS_PROCESS_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process.psys.h + * + * Define the methods on the process object: Psys embedded interface + */ + +#include + +/* + * Process manager + */ + +/*! Acquire the resources specificed in process object + + @param process[in] process object + + Postcondition: This is a try process if any of the + resources is not available, all succesfully acquired + ones will be release and the function will return an + error + + @return < 0 on error + */ +extern int ia_css_process_acquire(ia_css_process_t *process); + +/*! Release the resources specificed in process object + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_release(ia_css_process_t *process); + + +#endif /* __IA_CSS_PSYS_PROCESS_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h new file mode 100644 index 000000000000..c0f6901adeb0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h @@ -0,0 +1,366 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_H +#define __IA_CSS_PSYS_PROCESS_GROUP_H + +/*! \file */ + +/** @file ia_css_psys_process_group.h + * + * Define the methods on the process object that are not part of + * a single interface + */ +#include "ia_css_rbm.h" + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Registration of user contexts / callback info + * External resources + * Sequencing resources + */ +#include + +/* + * Dispatcher + */ +#include + +/* + * Access to sub-structure handles / fields + */ + +#include "ia_css_terminal.h" + +/*! Get the number of fragments on the process group + + @param process_group[in] process group object + + Note: Future change is to have a fragment count per + independent subgraph + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group); + + +/*! Get the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state); + +/*! Set the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state); + +/*! Get the number of processes on the process group + + @param process_group[in] process group object + + @return the process count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group); + +/*! Get the number of terminals on the process group + + @param process_group[in] process group object + + Note: Future change is to have a terminal count per + independent subgraph + + @return the terminal count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group); + +/*! Get the PG load start timestamp + + @param process_group[in] process group object + + @return PG load start timestamp, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group); + +/*! Get the PG load time in cycles + + @param process_group[in] process group object + + @return PG load time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG init time in cycles + + @param process_group[in] process group object + + @return PG init time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG processing time in cycles + + @param process_group[in] process group object + + @return PG processing time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the (pointer to) the terminal of the process group object + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type); + +/*! Get the (pointer to) the terminal of the process group object + * for terminals which have only a single instance + * (cached in, cached out, program, program_ctrl_init) + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type); + +/*! Get the (pointer to) the indexed terminal of the process group object + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Get the (pointer to) the indexed process of the process group object + + @param process_group[in] process group object + @param process_index[in] index of the process + + @return the pointer to the process, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_group, + const unsigned int process_index); + +/*! Get the stored size of the process group object + + @param process_group[in] process group object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group); + +/*! Get the state of the the process group object + + @param process_group[in] process group object + + @return state, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group); + +/*! Get the unique ID of program group used by the process group object + + @param process_group[in] process group object + + @return ID, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group); + +/*! Get the resource bitmap of the process group + + @param process_group[in] process group object + + @return the reource bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the resource bitmap of the process group + + @param process_group[in] process group object + @param resource_bitmap[in] the resource bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap); + +/*! Get the routing bitmap of the process group + + @param process_group[in] process group object + + @return routing bitmap (pointer) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the routing bitmap of the process group + + @param process_group[in] process group object + @param rbm[in] routing bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm); + +/*! Get IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in/out] process group ipu virtual address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress); + +/*! Set IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in] process group ipu address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress); + +/*! Get protocol version used by a process group + + @param process_group[in] process group object + + @return invalid protocol version on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group); + +/*! Get base queue id used by a process group + + @param process_group[in] process group object + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group); + +/*! Set base queue id used by a process group + + @param process_group[in] process group object + @param queue_id[in] process group queue id + + @return invalid queue id on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id); + +/*! Get number of queues used by a process group + + @param process_group[in] process group object + + @return invalid number of queues (0) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group); + +/*! Set number of queues used by a process group + + @param process_group[in] process group object + @param num_queues[in] process group number of queues + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h new file mode 100644 index 000000000000..93cce2555de9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h @@ -0,0 +1,324 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.kernel.h + * + * Define the methods on the process group object: Hsys kernel interface + */ + +#include + +#include +#include + +#include /* uint8_t */ + +/* + * Registration of user contexts / callback info + */ + +/*! Get the user (callback) token as registered in the process group + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group); + +/*! Set (register) a user (callback) token in the process group + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is + returned in each return message related to the process + group the token is registered with. + + @return < 0 on error + */ +extern int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +/* + * Passing of a (fragment) watermark + */ + +/*! Get the fragment progress limit of the process group + + @param process_group[in] process group object + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group); + +/*! Set the new fragment progress limit of the process group + + @param process_group[in] process group object + @param fragment_limit[in] New limit value + + Note: The limit value must be less or equal to the fragment + count value. The process group will not make progress beyond + the limit value. The limit value can be modified asynchronously + If the limit value is reached before an update happens, the + process group will suspend and will not automatically resume. + + The limit is monotonically increasing. The default value is + equal to the fragment count + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit); + +/*! Clear the fragment progress limit of the process group + + @param process_group[in] process group object + + Note: This function sets the fragment limit to zero. + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group); + +/* + * Commands + */ + +/*! Perform the start command on the process group + + @param process_group[in] process group object + + Note: Start is an action of the l-Scheduler it makes the + process group eligible for execution + + Precondition: The external resources that are attached to + the process group must be in the correct state, i.e. input + buffers are not-empty and output buffers not-full + + @return < 0 on error + */ +extern int ia_css_process_group_start( + ia_css_process_group_t *process_group); + +/*! Perform the suspend command on the process group + + @param process_group[in] process group object + + Note: Suspend indicates that the process group execution + is halted at the next fragment boundary. The process group + will not automatically resume + + Precondition: The process group must be running + + @return < 0 on error + */ +extern int ia_css_process_group_suspend( + ia_css_process_group_t *process_group); + +/*! Perform the resume command on the process group + + @param process_group[in] process group object + + Note: Resume indicates that the process group is again + eligible for execution + + Precondition: The process group must be started + + @return < 0 on error + */ +extern int ia_css_process_group_resume( + ia_css_process_group_t *process_group); + +/*! Perform the reset command on the process group + + @param process_group[in] process group object + + Note: Return the process group to the started state + + Precondition: The process group must be running or stopped + + @return < 0 on error + */ +extern int ia_css_process_group_reset( + ia_css_process_group_t *process_group); + +/*! Perform the abort command on the process group + + @param process_group[in] process group object + + Note: Force the process group to the stopped state + + Precondition: The process group must be running or started + + @return < 0 on error + */ +extern int ia_css_process_group_abort( + ia_css_process_group_t *process_group); + +/*! Release ownership of the process group + + @param process_group[in] process group object + + Note: Release notifies PSYS and hands over ownership of the + process group from SW to FW + + Precondition: The process group must be in the started state + + @return < 0 on error + */ +extern int ia_css_process_group_disown( + ia_css_process_group_t *process_group); + +/* + * External resources + */ + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param buffer[in] buffer handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The buffer handle shall not be VIED_NULL, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The buffer can be in memory or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the data buffer on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The buffer handle shall be reset to VIED_NULL, the buffer + state to BUFFER_NULL + + @return VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param stream[in] stream handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The stream handle shall not be zero, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The stream is used exclusive to a buffer; the latter can be in memory + or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the stream handle on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The stream handle shall be reset to zero, the buffer + state to BUFFER_NULL + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/* + * Sequencing resources + */ + +/*! Set a(n artificial) blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Note: The barriers have to be set to force sequence between started + process groups + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Clear a previously set blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Precondition: The barriers must have been set + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Boolean test if the process group preconditions for start are satisfied + + @param process_group[in] process group object + + @return true if the process group can be started + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h new file mode 100644 index 000000000000..dfbcc8815c1e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h @@ -0,0 +1,199 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.user.h + * + * Define the methods on the process group object: Hsys user interface + */ + +#include /* ia_css_program_group_param_t */ + +#include +#include +#include + +#include "ia_css_psys_dynamic_storage_class.h" + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create (the storage for) the process group object + + @param process_grp_mem[in/out] raw memory for process group + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return NULL on error + */ +extern ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Destroy (the storage of) the process group object + + @param process_group[in] process group object + + @return NULL + */ +extern ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group); + +/*! Print the process group object to file/stream + + @param process_group[in] process group object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid); + +/* + * Commands + */ + +/*! Perform the submit command on the process group + + @param process_group[in] process group object + + Note: Submit is an action of the h-Scheduler it makes the + process group eligible for the l-Scheduler + + Precondition: The external resources must be attached to + the process group + + @return < 0 on error + */ +extern int ia_css_process_group_submit( + ia_css_process_group_t *process_group); + +/*! Boolean test if the process group object type is valid + + @param process_group[in] process group object + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return true if the process group is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Boolean test if the process group preconditions for submit are satisfied + + @param process_group[in] process group object + + @return true if the process group can be submitted + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group); + +/*! Boolean test if the preconditions on process group and buffer set are + satisfied for enqueuing buffer set + + @param process_group[in] process group object + @param buffer_set[in] buffer set object + + @return true if the buffer set can be enqueued + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set); + +/*! Compute the cyclecount required for executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of processes required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of terminals required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get private token as registered in the process group by the implementation + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group); + +/*! Set private token in the process group as needed by the implementation + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is private + to the implementation. This is in addition to the user token + + @return < 0 on error, 0 on success + */ +extern int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h new file mode 100644 index 000000000000..6ceccfc2f9bc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h @@ -0,0 +1,60 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H +#define __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process_group.psys.h + * + * Define the methods on the process group object: Psys embedded interface + */ + +#include + +/* + * Dispatcher + */ + +/*! Perform the run command on the process group + + @param process_group[in] process group object + + Note: Run indicates that the process group will execute + + Precondition: The process group must be started or + suspended and the processes have acquired the necessary + internal resources + + @return < 0 on error + */ +extern int ia_css_process_group_run( + ia_css_process_group_t *process_group); + +/*! Perform the stop command on the process group + + @param process_group[in] process group object + + Note: Stop indicates that the process group has completed execution + + Postcondition: The external resoruces can now be detached + + @return < 0 on error + */ +extern int ia_css_process_group_stop( + ia_css_process_group_t *process_group); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h new file mode 100644 index 000000000000..530f93ef6ce0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h @@ -0,0 +1,178 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H + +#include "type_support.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_rbm_manifest_types.h" + +#define N_UINT64_IN_PROCESS_GROUP_STRUCT 2 +#define N_UINT32_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT16_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT8_IN_PROCESS_GROUP_STRUCT 7 +#define N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 3 + +#define SIZE_OF_PROCESS_GROUP_STRUCT_BITS \ + (IA_CSS_RBM_BITS \ + + N_UINT64_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT64_T_BITS \ + + N_UINT32_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT32_T_BITS \ + + IA_CSS_PROGRAM_GROUP_ID_BITS \ + + IA_CSS_PROCESS_GROUP_STATE_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_NCI_RESOURCE_BITMAP_BITS \ + + N_UINT16_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_process_group_s { + /**< User (callback) token / user context reference, + * zero is an error value + */ + uint64_t token; + /**< private token / context reference, zero is an error value */ + uint64_t private_token; + /**< PG routing bitmap used to set connection between programs >*/ + ia_css_rbm_t routing_bitmap; + /**< Size of this structure */ + uint32_t size; + /**< The timestamp when PG load starts */ + uint32_t pg_load_start_ts; + /**< PG load time in cycles */ + uint32_t pg_load_cycles; + /**< PG init time in cycles */ + uint32_t pg_init_cycles; + /**< PG processing time in cycles */ + uint32_t pg_processing_cycles; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + /**< State of the process group FSM */ + ia_css_process_group_state_t state; + /**< Virtual address of process group in IPU */ + vied_vaddress_t ipu_virtual_address; + /**< Bitmap of the compute resources used by the process group */ + vied_nci_resource_bitmap_t resource_bitmap; + /**< Number of fragments offered on each terminal */ + uint16_t fragment_count; + /**< Current fragment of processing */ + uint16_t fragment_state; + /**< Watermark to control fragment processing */ + uint16_t fragment_limit; + /**< Array[process_count] of process addresses in this process group */ + uint16_t processes_offset; + /**< Array[terminal_count] of terminal addresses on this process group */ + uint16_t terminals_offset; + /**< Parameter dependent number of processes in this process group */ + uint8_t process_count; + /**< Parameter dependent number of terminals on this process group */ + uint8_t terminal_count; + /**< Parameter dependent number of independent subgraphs in + * this process group + */ + uint8_t subgraph_count; + /**< Process group protocol version */ + uint8_t protocol_version; + /**< Dedicated base queue id used for enqueueing payload buffer sets */ + uint8_t base_queue_id; + /**< Number of dedicated queues used */ + uint8_t num_queues; + /**< Mask the send_pg_done IRQ */ + uint8_t mask_irq; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; +}; + +/*! Callback after process group is created. Implementations can provide + * suitable actions needed when process group is created. + + @param process_group[in] process group object + @param program_group_manifest[in] program group manifest + @param program_group_param[in] program group parameters + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param); + +/*! Callback before process group is about to be destoyed. Any implementation + * specific cleanups can be done here. + + @param process_group[in] process group object + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group); + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process_group[in] process group object + @param cmd[in] command + + @return < 0 on error + */ +extern int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd); + + +/*! Enqueue a buffer set corresponding to a persistent program group by + * sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] buffer set + @param queue_offset[in] offset to be used from the queue id + specified in the process group object + (0 for first buffer set for frame, 1 + for late binding) + + @return < 0 on error + */ +extern int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset); + +/*! Enqueue a parameter buffer set corresponding to a persistent program + * group by sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] parameter buffer set + + @return < 0 on error + */ +extern int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set); + +/*! Need to store the 'secure' mode for each PG for FW test app only + * + * @param process_group[in] process group object + * @param secure[in] parameter buffer set + * + * @return < 0 on error + */ +extern int ia_css_process_group_store( + ia_css_process_group_t *process_group, + bool secure); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h new file mode 100644 index 000000000000..b424fb9631fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h @@ -0,0 +1,98 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_TYPES_H +#define __IA_CSS_PSYS_PROCESS_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_process_types.h + * + * The types belonging to the terminal/process/process group dynamic module + */ + +#include +#include + +#include + +#define IA_CSS_PROCESS_INVALID_PROGRAM_IDX ((uint32_t)-1) + +/* private */ +typedef enum ia_css_process_group_cmd { + IA_CSS_PROCESS_GROUP_CMD_NOP = 0, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT, + IA_CSS_PROCESS_GROUP_CMD_ATTACH, + IA_CSS_PROCESS_GROUP_CMD_DETACH, + IA_CSS_PROCESS_GROUP_CMD_START, + IA_CSS_PROCESS_GROUP_CMD_DISOWN, + IA_CSS_PROCESS_GROUP_CMD_RUN, + IA_CSS_PROCESS_GROUP_CMD_STOP, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND, + IA_CSS_PROCESS_GROUP_CMD_RESUME, + IA_CSS_PROCESS_GROUP_CMD_ABORT, + IA_CSS_PROCESS_GROUP_CMD_RESET, + IA_CSS_N_PROCESS_GROUP_CMDS +} ia_css_process_group_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_GROUP_STATE_BITS 32 +typedef enum ia_css_process_group_state { + IA_CSS_PROCESS_GROUP_ERROR = 0, + IA_CSS_PROCESS_GROUP_CREATED, + IA_CSS_PROCESS_GROUP_READY, + IA_CSS_PROCESS_GROUP_BLOCKED, + IA_CSS_PROCESS_GROUP_STARTED, + IA_CSS_PROCESS_GROUP_RUNNING, + IA_CSS_PROCESS_GROUP_STALLED, + IA_CSS_PROCESS_GROUP_STOPPED, + IA_CSS_N_PROCESS_GROUP_STATES +} ia_css_process_group_state_t; + +/* private */ +typedef enum ia_css_process_cmd { + IA_CSS_PROCESS_CMD_NOP = 0, + IA_CSS_PROCESS_CMD_ACQUIRE, + IA_CSS_PROCESS_CMD_RELEASE, + IA_CSS_PROCESS_CMD_START, + IA_CSS_PROCESS_CMD_LOAD, + IA_CSS_PROCESS_CMD_STOP, + IA_CSS_PROCESS_CMD_SUSPEND, + IA_CSS_PROCESS_CMD_RESUME, + IA_CSS_N_PROCESS_CMDS +} ia_css_process_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_STATE_BITS 32 +typedef enum ia_css_process_state { + IA_CSS_PROCESS_ERROR = 0, + IA_CSS_PROCESS_CREATED, + IA_CSS_PROCESS_READY, + IA_CSS_PROCESS_STARTED, + IA_CSS_PROCESS_RUNNING, + IA_CSS_PROCESS_STOPPED, + IA_CSS_PROCESS_SUSPENDED, + IA_CSS_N_PROCESS_STATES +} ia_css_process_state_t; + +/* public */ +typedef struct ia_css_process_group_s ia_css_process_group_t; +typedef struct ia_css_process_s ia_css_process_t; + +typedef struct ia_css_data_terminal_s ia_css_data_terminal_t; + +#endif /* __IA_CSS_PSYS_PROCESS_TYPES_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h new file mode 100644 index 000000000000..abf398299d16 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h @@ -0,0 +1,316 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_H +#define __IA_CSS_PSYS_TERMINAL_H + +/*! \file */ + +/** @file ia_css_psys_terminal.h + * + * Define the methods on the terminal object that are not part of + * a single interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include /* FILE */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest_base_types.h" + +/* + * Creation + */ +#include + +/*! Boolean test if the terminal object type is input + + @param terminal[in] terminal object + + @return true if the terminal is input, false otherwise or on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal); + +/*! Get the stored size of the terminal object + + @param terminal[in] terminal object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal); + +/*! Get the type of the terminal object + + @param terminal[in] terminal object + + @return the type of the terminal, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal); + +/*! Set the type of the terminal object + + @param terminal[in] terminal object + @param terminal_type[in] type of the terminal + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type); + +/*! Get the index of the terminal manifest object + + @param terminal[in] terminal object + + @return the index of the terminal manifest object, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal); + +/*! Set the index of the terminal manifest object + + @param terminal[in] terminal object + @param tm_index[in] terminal manifest index + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t tm_index); + +/*! Get id of the terminal object + + @param terminal[in] terminal object + + @return id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal); + +/*! Get kernel id of the data terminal object + + @param dterminal[in] data terminal object + + @return kernel id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal); + +/*! Get the connection type from the terminal object + + @param terminal[in] terminal object + + @return buffer type, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal); + +/*! Set the connection type of the terminal object + + @param terminal[in] terminal object + @param connection_type[in] connection type + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type); + +/*! Get link id of the data terminal object + + @param dterminal[in] data terminal object + + @return link id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal); + + +/*! Set link id of the terminal object + + @param terminal[in] data terminal object + @param link_id[in] synchronization link id + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id); + +/*! Get the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + + @return the pointer to the parent, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal); + +/*! Set the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent); + +/*! Boolean test if the terminal object type is valid + + @param terminal[in] process terminal object + @param terminal_manifest[in] program terminal manifest + + @return true if the process terminal object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest); + +/* ================= Program Control Init Terminal - START ================= */ + +/*! + * Gets the program init terminal descripor size + * @param manifest[in] program control init terminal manifest + * @return size, error if < 0. + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Initialize program control init terminal + * @param nof_fragments[in] Number of fragments + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Get a program desc for a program control init terminal + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index +); + +/*! + * Pretty prints the program control init termnial + * @param terminal[in] program control init terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal); + +/*! + * Gets a load section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param load_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index +); + +/*! + * Gets process_id from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Set control info of program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param process_id unique process id used to identify the process + * among all active process + * @param num_done_events number of events required to close the process + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events); + +/*! + * Gets num_done_events value from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Gets a connect section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param connect_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index +); + +/* ================= Program Control Init Terminal - END ================= */ + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h new file mode 100644 index 000000000000..b8aa08c19754 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h @@ -0,0 +1,255 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the terminal object: Hsys user interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_kernel_bitmap.h" + +/* + * Creation + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +/*! Compute the size of storage required for allocating the terminal object + + @param manifest[in] terminal manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create the terminal object + + @param raw_mem[in] pre allocated memory + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + @param enable_bitmap program group enable bitmap + + @return NULL on error + */ +extern ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap); + +/*! Destroy (the storage of) the process object + + @param terminal[in] terminal object + + @return NULL + */ +extern ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal); +#endif /* !defined(__VIED_CELL) */ + +/*! Print the terminal object to file/stream + + @param terminal[in] terminal object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid); + +/*! Get the (pointer to) the frame object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *terminal); + +/*! Get the (pointer to) the frame descriptor object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame descriptor, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal); + +/*! Get the (pointer to) the fragment descriptor object in the terminal object + + @param terminal[in] terminal object + +@return the pointer to the fragment descriptor, NULL on error +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_fragment_descriptor_t + *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index); + +/*! Get the number of fragments on the terminal + + @param terminal[in] terminal object + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal); + +/*! Get the number of section on the (param)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the section count, 0 on error + */ +extern uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get the number of planes on the (data)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the plane count, 1(default) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! check if given terminal is parameter terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program terminal. + + @program terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program control init terminal. + + @program control init terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is spatial parameter terminal. + + @spatial terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is data terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal); + +/*! obtain buffer out of terminal(both data & param terminals can call this) + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return vied address of buffer stored in terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal); + +/*!store a buffer in the terminal. + + @param terminal[in] (base)terminal object of either data or param terminal. + @param buffer[in] buffer in vied (hrt address) space. + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_buffer(ia_css_terminal_t *terminal, + vied_vaddress_t buffer); + +/*! Obtain terminal buffer index out of terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return terminal buffer index stored in terminal object on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal); + +/*! Store a terminal buffer index in the terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + @param terminal_index[in] terminal buffer index + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index); + +#endif /* __IA_CSS_PSYS_TERMINAL_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c new file mode 100644 index 000000000000..82d53831f9a9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "assert_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_buffer_set.h" +#include "ia_css_psys_process_group.h" + +/* + * Functions to possibly inline + */ +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __buffer_set_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF(SIZE_OF_BUFFER_SET != + CHAR_BIT * sizeof(ia_css_buffer_set_t)); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_buffer_set_t) % sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* The below functions are not to be compiled for firmware */ +#if !defined(__HIVECC) + +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter) +{ + ia_css_buffer_set_t *buffer_set = NULL; + unsigned int i; + int ret = -1; + + verifexit(buffer_set_mem != NULL); + verifexit(process_group != NULL); + + buffer_set = (ia_css_buffer_set_t *)buffer_set_mem; + + /* + * Set base struct members + */ + buffer_set->ipu_virtual_address = VIED_NULL; + ia_css_process_group_get_ipu_vaddress(process_group, + &buffer_set->process_group_handle); + buffer_set->frame_counter = frame_counter; + buffer_set->terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * Initialize adjacent buffer addresses + */ + for (i = 0; i < buffer_set->terminal_count; i++) { + vied_vaddress_t *buffer = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + sizeof(vied_vaddress_t) * i); + + *buffer = VIED_NULL; + } + ret = 0; + +EXIT: + if (ret != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_create failed\n"); + } + return buffer_set; +} + +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group) +{ + size_t size = 0; + + verifexit(process_group != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_buffer_set(): enter:\n"); + + size = sizeof(ia_css_buffer_set_t) + + ia_css_process_group_get_terminal_count(process_group) * + sizeof(vied_vaddress_t); + +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_buffer_set failed\n"); + } + return size; +} + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h new file mode 100644 index 000000000000..0399d76f3331 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h @@ -0,0 +1,241 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_IMPL_H +#define __IA_CSS_PSYS_BUFFER_SET_IMPL_H + +#include "error_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "vied_nci_psys_system_global.h" +#include "ia_css_psys_terminal.hsys.user.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + vied_vaddress_t *buffer_ptr; + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_buffer(): enter:\n"); + + /* + * Set address in buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + *buffer_ptr = buffer; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_buffer: invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + vied_vaddress_t *buffer_ptr; + int terminal_index; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_buffer(): enter:\n"); + + /* + * Retrieve terminal index from terminal object + */ + terminal_index = ia_css_terminal_get_terminal_index(terminal); + verifexitval(terminal_index >= 0, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + /* + * Retrieve address from buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + buffer = *buffer_ptr; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_buffer: invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_ipu_address(): enter:\n"); + + buffer_set->ipu_virtual_address = ipu_vaddress; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_ipu_address invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t ipu_virtual_address = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_ipu_address(): enter:\n"); + + ipu_virtual_address = buffer_set->ipu_virtual_address; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_ipu_address: invalid argument\n"); + } + return ipu_virtual_address; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_process_group_context(): enter:\n"); + + buffer_set->process_group_handle = process_group_handle; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_process_group_context invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t process_group_handle = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_process_group_handle(): enter:\n"); + + process_group_handle = buffer_set->process_group_handle; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_process_group_handle: invalid argument\n"); + } + return process_group_handle; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_token(): enter:\n"); + + buffer_set->token = token; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_token invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + uint64_t token = 0; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_token(): enter:\n"); + + token = buffer_set->token; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_token: invalid argument\n"); + } + return token; +} + +#endif /* __IA_CSS_PSYS_BUFFER_SET_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c new file mode 100644 index 000000000000..f9e060f62ead --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c @@ -0,0 +1,1148 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_private_types.h" +#include /* for NOT_USED */ + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __HIVECC marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param) +{ + size_t size = 0, tmp_size; + + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process(): enter:\n"); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_t))); + + COMPILATION_ERROR_IF(0 != sizeof(ia_css_process_t)%sizeof(uint64_t)); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + size += sizeof(ia_css_process_t); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + tmp_size = program_dependency_count*sizeof(vied_nci_resource_id_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + tmp_size = terminal_dependency_count*sizeof(uint8_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process invalid argument\n"); + } + return size; +} + +ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx) +{ + size_t tmp_size; + int retval = -1; + ia_css_process_t *process = NULL; + char *process_raw_ptr = (char *) raw_mem; + + /* size_t size = ia_css_sizeof_process(manifest, param); */ + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(process_raw_ptr != NULL); + + process = (ia_css_process_t *) process_raw_ptr; + verifexit(process != NULL); + + process->kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(manifest); + process->state = IA_CSS_PROCESS_CREATED; + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + /* A process requires at least one input or output */ + verifexit((program_dependency_count + + terminal_dependency_count) != 0); + + process_raw_ptr += sizeof(ia_css_process_t); + if (program_dependency_count != 0) { + process->cell_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + tmp_size = + program_dependency_count * sizeof(vied_nci_resource_id_t); + process_raw_ptr += + tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + } else { + process->cell_dependencies_offset = 0; + } + + if (terminal_dependency_count != 0) { + process->terminal_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + } + + process->size = (uint32_t)ia_css_sizeof_process(manifest, param); + + process->ID = ia_css_program_manifest_get_program_ID(manifest); + verifexit(process->ID != 0); + process->program_idx = program_idx; + + process->cell_dependency_count = program_dependency_count; + process->terminal_dependency_count = terminal_dependency_count; + + process->parent_offset = 0; + + verifexit(ia_css_process_clear_all(process) == 0); + + process->state = IA_CSS_PROCESS_READY; + retval = 0; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): Created successfully process %p ID 0x%x\n", + process, process->ID); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_create failed (%i)\n", retval); + process = ia_css_process_destroy(process); + } + return process; +} + +ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process) +{ + + return process; +} +#endif + +int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + +/* Some programs are mapped on a fixed cell, + * when the process group is created + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + /* If the process group has already been created, but no VP cell + * has been assigned to this process (i.e. not fixed in + * manifest), then we need to set the cell of this process + * while its parent state is READY (the ready state is set at + * the end of ia_css_process_group_create) + */ + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* Some programs are mapped on a fixed cell, thus check is not secure, + * but it will detect a preset, the process manager will do the secure check + */ + verifexit(ia_css_process_get_cell(process) == + VIED_NCI_N_CELL_ID); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + ia_css_process_cells_set_cell(process, 0, cell_id); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_cell( + ia_css_process_t *process) +{ + int retval = -1; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_t *parent; + vied_nci_resource_bitmap_t resource_bitmap; + vied_nci_resource_bitmap_t bit_mask; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_cell(): enter:\n"); + verifexit(process != NULL); + + cell_id = ia_css_process_get_cell(process); + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_set(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + if (vied_nci_is_cell_mem_of_type(cell_id, mem_type_id, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_type_id); + + process->int_mem_id[mem_type_id] = mem_id; + process->int_mem_offset[mem_type_id] = offset; + retval = 0; + } +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_int_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + uint16_t mem_index; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* We could just clear the field, but lets check the state for + * consistency first + */ + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if (vied_nci_is_cell_mem_of_type( + cell_id, mem_index, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_index); + int mem_of_type; + + mem_of_type = + vied_nci_is_mem_of_type(mem_id, mem_type_id); + + assert(mem_of_type); + assert((process->int_mem_id[mem_type_id] == mem_id) || + (process->int_mem_id[mem_type_id] == + VIED_NCI_N_MEM_ID)); + process->int_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_type_id] = + IA_CSS_PROCESS_INVALID_OFFSET; + retval = 0; + } + } + +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_int_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_int_mem failed (%i)\n", retval); + } +return retval; +} + +int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + vied_nci_mem_type_ID_t mem_type_id; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_ext_mem(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + /* Check that the memory actually exists, "vied_nci_has_cell_mem_of_id()" + * will return false on error + */ + + mem_type_id = vied_nci_mem_get_type(mem_id); + if (((!vied_nci_has_cell_mem_of_id(cell_id, mem_id) && + (mem_type_id != VIED_NCI_PMEM_TYPE_ID)) + || vied_nci_mem_is_ext_type(mem_type_id)) && + (mem_id < VIED_NCI_N_MEM_ID)) { + + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + process->ext_mem_id[mem_type_id] = mem_id; + process->ext_mem_offset[mem_type_id] = offset; + retval = 0; + } + +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_ext_mem invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_ext_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + verifexit(parent != NULL); + verifexit(state == IA_CSS_PROCESS_READY); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + + process->ext_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_type_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_ext_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cells_bitmap(): enter:\n"); + + verifexit(process != NULL); + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); + ia_css_process_cells_set_cell(process, + array_index, (vied_nci_cell_ID_t)bit_index); + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { + ia_css_process_cells_set_cell(process, + array_index, VIED_NCI_N_CELL_ID); + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cells_bitmap invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cells_bitmap failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dev_chn(): enter:\n"); + + verifexit(process != NULL); + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + process->dev_chn_offset[dev_chn_id] = offset; + + retval = 0; +EXIT: + if (NULL == process || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dev_chn invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dev_chn invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_port(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dfm_port invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_active_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_dev_chn(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + process->dev_chn_offset[dev_chn_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_dev_chn invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_dev_chn failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_all( + ia_css_process_t *process) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int mem_index; + int dev_chn_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_all(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + +/* Resource clear can only be called in excluded states contrary to set */ + verifexit((parent_state != IA_CSS_PROCESS_GROUP_RUNNING) || + (parent_state == IA_CSS_N_PROCESS_GROUP_STATES)); + verifexit((state == IA_CSS_PROCESS_CREATED) || + (state == IA_CSS_PROCESS_READY)); + + for (dev_chn_index = 0; dev_chn_index < VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + process->dev_chn_offset[dev_chn_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } +/* No difference whether a cell_id has been set or not, clear all */ + for (mem_index = 0; mem_index < VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + process->ext_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + process->int_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + + ia_css_process_cells_clear(process); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_all invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_all failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_acquire( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_acquire(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_acquire invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_acquire failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_release( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_release(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_t invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_release failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_print(const ia_css_process_t *process, void *fid) +{ + int retval = -1; + int i, dev_chn_index; + uint16_t mem_index; + uint8_t cell_dependency_count, terminal_dependency_count; + vied_nci_cell_ID_t cell_id = ia_css_process_get_cell(process); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_print(process %p): enter:\n", process); + + verifexit(process != NULL); + + IA_CSS_TRACE_6(PSYSAPI_DYNAMIC, INFO, + "\tprocess %p, sizeof %d, programID %d, state %d, parent %p, cell %d\n", + process, + (int)ia_css_process_get_size(process), + (int)ia_css_process_get_program_ID(process), + (int)ia_css_process_get_state(process), + (void *)ia_css_process_get_parent(process), + (int)ia_css_process_get_cell(process)); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->int_mem_id[mem_index]); + if (cell_id == VIED_NCI_N_CELL_ID) { + verifexit(mem_id == VIED_NCI_N_MEM_ID); + continue; + } + verifexit(((mem_id == vied_nci_cell_get_mem(cell_id, mem_index)) + || (mem_id == VIED_NCI_N_MEM_ID))); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tinternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->int_mem_offset[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->ext_mem_id[mem_index]); + /* TODO: in case of an cells_bitmap = [], + * vied_nci_cell_get_mem_type will return a wrong result. + */ + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\texternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->ext_mem_offset[mem_index]); + NOT_USED(mem_id); + } + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tdevice channel index %d, type %d, offset 0x%x\n", + dev_chn_index, + (int)dev_chn_index, + process->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tdfm device index %d, type %d, bitmap 0x%x active_ports_bitmap 0x%x\n", + dev_chn_index, dev_chn_index, + process->dfm_port_bitmap[dev_chn_index], + process->dfm_active_port_bitmap[dev_chn_index]); + } +#endif + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tcells[%d] = 0x%x\n", + i, ia_css_process_cells_get_cell(process, i)); + } + + cell_dependency_count = + ia_css_process_get_cell_dependency_count(process); + if (cell_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {};\n", cell_dependency_count); + } else { + vied_nci_resource_id_t cell_dependency; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {", cell_dependency_count); + for (i = 0; i < (int)cell_dependency_count - 1; i++) { + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", cell_dependency); + } + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", cell_dependency); + (void)cell_dependency; + } + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t terminal_dependency; + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", terminal_dependency); + } + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", terminal_dependency); + (void)terminal_dependency; + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_print invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_parent(): enter:\n"); + + verifexit(process != NULL); + verifexit(parent != NULL); + + process->parent_offset = (uint16_t) ((char *)parent - (char *)process); + retval = 0; +EXIT: + if (NULL == process || NULL == parent) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_parent invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_parent failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *process_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell_dependency(): enter:\n"); + verifexit(process != NULL); + + process_dep_ptr = + (uint8_t *)process + process->cell_dependencies_offset + + dep_index*sizeof(vied_nci_resource_id_t); + + + *process_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_terminal_dependency(): enter:\n"); + verifexit(process != NULL); + verifexit(ia_css_process_get_terminal_dependency_count(process) > dep_index); + + terminal_dep_ptr = + (uint8_t *)process + process->terminal_dependencies_offset + + dep_index*sizeof(uint8_t); + + *terminal_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd) +{ + int retval = -1; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, "ia_css_process_cmd(): enter:\n"); + + verifexit(process != NULL); + + state = ia_css_process_get_state(process); + + verifexit(state != IA_CSS_PROCESS_ERROR); + verifexit(state < IA_CSS_N_PROCESS_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_CMD_NOP: + break; + case IA_CSS_PROCESS_CMD_ACQUIRE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_RELEASE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_START: + verifexit((state == IA_CSS_PROCESS_READY) + || (state == IA_CSS_PROCESS_STOPPED)); + process->state = IA_CSS_PROCESS_STARTED; + break; + case IA_CSS_PROCESS_CMD_LOAD: + verifexit(state == IA_CSS_PROCESS_STARTED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_PROCESS_CMD_STOP: + verifexit((state == IA_CSS_PROCESS_RUNNING) + || (state == IA_CSS_PROCESS_SUSPENDED)); + process->state = IA_CSS_PROCESS_STOPPED; + break; + case IA_CSS_PROCESS_CMD_SUSPEND: + verifexit(state == IA_CSS_PROCESS_RUNNING); + process->state = IA_CSS_PROCESS_SUSPENDED; + break; + case IA_CSS_PROCESS_CMD_RESUME: + verifexit(state == IA_CSS_PROCESS_SUSPENDED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_N_PROCESS_CMDS: /* Fall through */ + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd invalid cmd (0x%x)\n", cmd); + goto EXIT; + } + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_cmd invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd failed (%i)\n", retval); + } + return retval; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c new file mode 100644 index 000000000000..46bb82804153 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c @@ -0,0 +1,886 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_dynamic_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This header is need for cpu memset to 0 +* and process groups are not created in SP +*/ +#if !defined(__VIED_CELL) +#include "cpu_mem_support.h" +#endif + +/* This source file is created with the intention of sharing and +* compiled for host and firmware. Since there is no native 64bit +* data type support for firmware this wouldn't compile for SP +* tile. The part of the file that is not compilable are marked +* with the following __VIED_CELL marker and this comment. Once we +* come up with a solution to address this issue this will be +* removed. +*/ +#if !defined(__VIED_CELL) +static bool ia_css_process_group_is_program_enabled( + const ia_css_program_manifest_t *program_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap(program_manifest); + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type(program_manifest); + ia_css_kernel_bitmap_t program_enable_bitmap; + + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + + if (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER || + program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + /* + * EXCLUSIVE_SUB programs are subsets of + * EXCLUSIVE_SUPER so the bits of the enable_bitmap + * that refer to those are those of their + * EXCLUSIVE_SUPER program (on which the depend) and + * not the subset that their own program_bitmap has + */ + if (program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + ia_css_kernel_bitmap_t super_program_bitmap; + + const ia_css_program_group_manifest_t * + prog_group_manifest = + ia_css_program_manifest_get_parent(program_manifest); + uint8_t super_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, 0); + const ia_css_program_manifest_t * + super_program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + prog_group_manifest, super_prog_idx); + + verifexit(super_program_manifest != NULL); + if (((program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER)) + || ((program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER))) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_is_program_enabled(): Error\n"); + verifexit(0); + } + + super_program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + super_program_manifest); + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, + super_program_bitmap); + } else { + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, program_bitmap); + } + + if (ia_css_is_kernel_bitmap_equal( + program_enable_bitmap, program_bitmap)) { + return true; + } + } else if (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) { + /* + * Virtual super programs are not selectable + * only the virtual sub programs + */ + return false; + } else { + return true; + } + } + +EXIT: + return false; +} + +static bool ia_css_process_group_is_terminal_enabled( + const ia_css_terminal_manifest_t *terminal_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_terminal_type_t terminal_type; + + verifjmpexit(NULL != terminal_manifest); + terminal_type = ia_css_terminal_manifest_get_type(terminal_manifest); + + if (ia_css_is_terminal_manifest_data_terminal(terminal_manifest)) { + ia_css_data_terminal_manifest_t *data_term_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + ia_css_kernel_bitmap_t term_bitmap = + ia_css_data_terminal_manifest_get_kernel_bitmap( + data_term_manifest); + /* + * Terminals depend on a kernel, + * if the kernel is present the program it contains and + * the terminal the program depends on are active + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)) { + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_spatial_param_terminal_manifest_t *spatial_term_man = + (ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest; + + term_kernel_bitmap = + ia_css_kernel_bitmap_set( + term_kernel_bitmap, + spatial_term_man->kernel_id); + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + return true; + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + /* + * For parameter out terminals, we disable the terminals + * if ALL the corresponding kernels are disabled, + * for parameter in terminals we cannot do this; + * even if kernels are disabled, it may be required that + * (HW) parameters must be supplied via the parameter + * in terminal (e.g. bypass bits). + */ + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_param_terminal_manifest_t *param_term_man = + (ia_css_param_terminal_manifest_t *)terminal_manifest; + ia_css_param_manifest_section_desc_t *section_desc; + unsigned int section = 0; + + for (section = 0; section < param_term_man-> + param_manifest_section_desc_count; section++) { + section_desc = + ia_css_param_terminal_manifest_get_prm_sct_desc( + param_term_man, section); + verifjmpexit(section_desc != NULL); + term_kernel_bitmap = ia_css_kernel_bitmap_set( + term_kernel_bitmap, + section_desc->kernel_id); + } + + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)) { + return true; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest)) { + return true; + } +EXIT: + return false; +} + +size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0, tmp_size; + int i, error_val = -1; + uint8_t process_count, process_num; + uint8_t terminal_count; + ia_css_kernel_bitmap_t enable_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process_group(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_GROUP_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_group_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_process_group_t) % sizeof(uint64_t)); + + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + + verifexit(process_count != 0); + verifexit(terminal_count != 0); + + size += sizeof(ia_css_process_group_t); + + tmp_size = process_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + tmp_size = terminal_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst(manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + verifexit(process_num < process_count); + size += ia_css_sizeof_process( + program_manifest, program_param); + process_num++; + } + } + + verifexit(process_num == process_count); + + for (i = 0; i < (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + terminal_manifest, enable_bitmap)) { + size += ia_css_sizeof_terminal( + terminal_manifest, param); + } + } + + error_val = 0; + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process_group invalid argument\n"); + } + if (error_val != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_process_group ERROR(%d)\n", error_val); + } + return size; +} + +ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = ia_css_sizeof_process_group(manifest, param); + int retval = -1; + int ret; + int i; + ia_css_process_group_t *process_group = NULL; + uint8_t process_count, process_num; + uint8_t terminal_count, terminal_num; + uint16_t fragment_count; + char *process_grp_raw_ptr; + uint16_t *process_tab_ptr, *terminal_tab_ptr; + ia_css_kernel_bitmap_t enable_bitmap; + uint8_t manifest_terminal_count; + + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(process_grp_mem %p, manifest %p, group_param %p): enter:\n", + process_grp_mem, manifest, param); + + verifexit(process_grp_mem != NULL); + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + + process_group = (ia_css_process_group_t *)process_grp_mem; + ia_css_cpu_mem_set_zero(process_group, size); + process_grp_raw_ptr = (char *) process_group; + + process_group->state = IA_CSS_PROCESS_GROUP_CREATED; + + process_group->protocol_version = + ia_css_program_group_param_get_protocol_version(param); + + fragment_count = ia_css_program_group_param_get_fragment_count(param); + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + process_group->fragment_count = fragment_count; + process_group->process_count = process_count; + process_group->terminal_count = terminal_count; + + process_grp_raw_ptr += sizeof(ia_css_process_group_t); + process_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->processes_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), process_count * sizeof(uint16_t)); + terminal_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->terminals_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + /* Move raw pointer to the first process */ + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), terminal_count * sizeof(uint16_t)); + + /* Set default */ + verifexit(ia_css_process_group_set_fragment_limit( + process_group, fragment_count) == 0); + + /* Set process group terminal dependency list */ + /* This list is used during creating the process dependency list */ + manifest_terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + terminal_num = 0; + for (i = 0; i < (int)manifest_terminal_count; i++) { + ia_css_terminal_manifest_t *t_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + verifexit(NULL != t_manifest); + if (ia_css_process_group_is_terminal_enabled( + t_manifest, enable_bitmap)) { + ia_css_terminal_t *terminal = NULL; + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param( + param, i); + + verifexit(NULL != terminal_param); + terminal_tab_ptr[terminal_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + terminal = ia_css_terminal_create( + process_grp_raw_ptr, t_manifest, + terminal_param, enable_bitmap); + verifexit(terminal != NULL); + verifexit((ia_css_terminal_set_parent( + terminal, process_group) == 0)); + verifexit((ia_css_terminal_set_terminal_manifest_index( + terminal, i) == 0)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: terminal_manifest_index %d\n", + i); + + process_grp_raw_ptr += ia_css_terminal_get_size( + terminal); + terminal_num++; + } + } + verifexit(terminal_num == terminal_count); + + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_process_t *process = NULL; + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + unsigned int prog_dep_index, proc_dep_index; + unsigned int term_dep_index, term_index; + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + + verifexit(process_num < process_count); + + process_tab_ptr[process_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + process = ia_css_process_create( + process_grp_raw_ptr, + program_manifest, + program_param, + i); + verifexit(process != NULL); + + ia_css_process_set_parent(process, process_group); + if (ia_css_has_program_manifest_fixed_cell( + program_manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID( + program_manifest); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: cell_id %d\n", + cell_id); + ia_css_process_set_cell(process, cell_id); + } + + process_grp_raw_ptr += ia_css_process_get_size( + process); + /* + * Set process dependencies of process derived + * from program manifest + */ + for (prog_dep_index = 0; prog_dep_index < + ia_css_program_manifest_get_program_dependency_count( + program_manifest); prog_dep_index++) { + uint8_t dep_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, prog_dep_index); + const ia_css_program_manifest_t * + dep_prg_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, dep_prog_idx); + ia_css_program_ID_t id = + ia_css_program_manifest_get_program_ID( + dep_prg_manifest); + + verifexit(id != 0); + for (proc_dep_index = 0; + proc_dep_index < process_num; + proc_dep_index++) { + ia_css_process_t *dep_process = + ia_css_process_group_get_process( + process_group, + proc_dep_index); + + ia_css_process_set_cell_dependency( + process, + prog_dep_index, 0); + + if (ia_css_process_get_program_ID( + dep_process) == id) { + ia_css_process_set_cell_dependency( + process, + prog_dep_index, + proc_dep_index); + break; + } + } + } + process_num++; + + /* + * Set terminal dependencies of process derived + * from program manifest + */ + for (term_dep_index = 0; term_dep_index < + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest); term_dep_index++) { + uint8_t pm_term_index = + ia_css_program_manifest_get_terminal_dependency + (program_manifest, term_dep_index); + + verifexit(pm_term_index < manifest_terminal_count); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): term_dep_index: %d, pm_term_index: %d\n", + term_dep_index, pm_term_index); + for (term_index = 0; + term_index < terminal_count; + term_index++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal( + process_group, + term_index); + + if (ia_css_terminal_get_terminal_manifest_index + (terminal) == pm_term_index) { + ia_css_process_set_terminal_dependency( + process, + term_dep_index, + term_index); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create() set_terminal_dependency(process: %d, dep_idx: %d, term_idx: %d)\n", + i, term_dep_index, term_index); + + break; + } + } + } + } + } + verifexit(process_num == process_count); + + process_group->size = + (uint32_t)ia_css_sizeof_process_group(manifest, param); + process_group->ID = + ia_css_program_group_manifest_get_program_group_ID(manifest); + + /* Initialize performance measurement fields to zero */ + process_group->pg_load_start_ts = 0; + process_group->pg_load_cycles = 0; + process_group->pg_init_cycles = 0; + process_group->pg_processing_cycles = 0; + + verifexit(process_group->ID != 0); + + ret = ia_css_process_group_on_create(process_group, manifest, param); + verifexit(ret == 0); + + process_group->state = IA_CSS_PROCESS_GROUP_READY; + retval = 0; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): Created successfully process group ID 0x%x\n", + process_group->ID); + +EXIT: + if (NULL == process_grp_mem || NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_create failed (%i)\n", retval); + process_group = ia_css_process_group_destroy(process_group); + } + return process_group; +} + +ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group) +{ + if (process_group != NULL) { + ia_css_process_group_on_destroy(process_group); + process_group = NULL; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_destroy invalid argument\n"); + } + return process_group; +} + +int ia_css_process_group_submit( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_submit(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); +} + +int ia_css_process_group_start( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_start(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_START); +} + +int ia_css_process_group_stop( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_stop(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_STOP); +} + +int ia_css_process_group_run( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_run(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RUN); +} + +int ia_css_process_group_suspend( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_suspend(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND); +} + +int ia_css_process_group_resume( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_resume(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESUME); +} + +int ia_css_process_group_reset( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_reset(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESET); +} + +int ia_css_process_group_abort( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_abort(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_ABORT); +} + +int ia_css_process_group_disown( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_disown(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_DISOWN); +} + +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_token failed (%i)\n", + retval); + } + return retval; +} + +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_private_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->private_token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_private_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_private_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->private_token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_private_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_private_token failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t process_count = 0; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_process_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_program_count(manifest); + i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest); + /* + * Programs can be orthogonal, + * a mutually exclusive subset, + * or a concurrent subset + */ + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type( + program_manifest); + /* + * An exclusive subnode < exclusive supernode, + * so simply don't count it + */ + if (program_type != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB && + program_type != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + process_count++; + } + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_process_count invalid argument\n"); + } + return process_count; +} + +uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + ia_css_kernel_bitmap_t total_bitmap, enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_terminal_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *tmanifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + tmanifest, enable_bitmap)) { + terminal_count++; + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_terminal_count invalid argument\n"); + } + return terminal_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h new file mode 100644 index 000000000000..f99602dc3c9e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h @@ -0,0 +1,1538 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H + +#include +#include +#include "ia_css_psys_process_group_cmd_impl.h" +#include +#include +#include +#include +#include +#include +#include "ia_css_terminal_manifest_types.h" + +#include "ia_css_rbm.h" + +#include /* ia_css_kernel_bitmap_t */ + +#include +#include +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#include "ia_css_psys_dynamic_trace.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_limit = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_limit = process_group->fragment_limit; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument\n"); + } + return fragment_limit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit) +{ + DECLARE_ERRVAL + int retval = -1; + uint16_t fragment_state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + retval = ia_css_process_group_get_fragment_state(process_group, + &fragment_state); + + verifexitval(retval == 0, EINVAL); + verifexitval(fragment_limit > fragment_state, EINVAL); + verifexitval(fragment_limit <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_limit = fragment_limit; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->fragment_limit = 0; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_t *terminal = NULL; + + NOT_USED(buffer_state); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = ia_css_process_group_get_terminal( + process_group, terminal_index); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(ia_css_process_group_get_state(process_group) == + IA_CSS_PROCESS_GROUP_READY, EINVAL); + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY || + process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * Legacy flow: + * Terminal address is part of the process group structure + */ + retval = ia_css_terminal_set_buffer( + terminal, buffer); + } else if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG) { + /* + * PPG flow: + * Terminal address is part of external buffer set structure + */ + retval = ia_css_terminal_set_terminal_index( + terminal, terminal_index); + } + verifexitval(retval == 0, EFAULT); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p has buffer 0x%x\n", terminal, buffer); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, buffer_state); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_vaddress_t buffer = VIED_NULL; + + ia_css_terminal_t *terminal = NULL; + ia_css_process_group_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = + ia_css_process_group_get_terminal( + process_group, terminal_index); + state = ia_css_process_group_get_state(process_group); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(state == IA_CSS_PROCESS_GROUP_READY, EINVAL); + + buffer = ia_css_terminal_get_buffer(terminal); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, IA_CSS_BUFFER_NULL); + verifexitval(retval == 0, EINVAL); + } + ia_css_terminal_set_buffer(terminal, VIED_NULL); + + retval = 0; +EXIT: + /* + * buffer pointer will appear on output, + * regardless of subsequent fails to avoid memory leaks + */ + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer failed (%i)\n", + retval); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(stream); + NOT_USED(buffer_state); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_stream failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + int retval = -1; + uint32_t stream = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_stream failed (%i)\n", + retval); + } + return stream; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask, resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_set(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + int i; + + uint8_t process_count; + uint8_t terminal_count; + vied_vaddress_t ipu_vaddress = VIED_NULL; + ia_css_rbm_t routing_bitmap; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_print(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + retval = ia_css_process_group_get_ipu_vaddress(process_group, &ipu_vaddress); + verifexitval(retval == 0, EINVAL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print start ===============\n"); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprocess_group cpu address = %p\n", process_group); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tipu_virtual_address = %#x\n", ipu_vaddress); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tsizeof(process_group) = %d\n", + (int)ia_css_process_group_get_size(process_group)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tfragment_count = %d\n", + (int)ia_css_process_group_get_fragment_count(process_group)); + + routing_bitmap = *ia_css_process_group_get_routing_bitmap(process_group); + for (i = 0; i < (int)IA_CSS_RBM_NOF_ELEMS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\trouting_bitmap[index = %d] = 0x%X\n", + i, (int)routing_bitmap.data[i]); + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprogram_group(process_group) = %d\n", + (int)ia_css_process_group_get_program_group_ID(process_group)); + process_count = ia_css_process_group_get_process_count(process_group); + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d processes\n", (int)process_count); + for (i = 0; i < (int)process_count; i++) { + ia_css_process_t *process = + ia_css_process_group_get_process(process_group, i); + + retval = ia_css_process_print(process, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d terminals\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + + retval = ia_css_terminal_print(terminal, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print end ===============\n"); + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_print invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *pg_manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint8_t proc_idx; + uint8_t prog_idx; + uint8_t proc_term_idx; + uint8_t process_count; + uint8_t program_count; + uint8_t terminal_count; + uint8_t man_terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_process_group_valid(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(pg_manifest != NULL, EFAULT); + NOT_USED(param); + + process_count = process_group->process_count; + terminal_count = process_group->terminal_count; + program_count = + ia_css_program_group_manifest_get_program_count(pg_manifest); + man_terminal_count = + ia_css_program_group_manifest_get_terminal_count(pg_manifest); + + /* Validate process group */ + invalid_flag = invalid_flag || + !(program_count >= process_count) || + !(man_terminal_count >= terminal_count) || + !(process_group->size > process_group->processes_offset) || + !(process_group->size > process_group->terminals_offset); + + /* Validate processes */ + for (proc_idx = 0; proc_idx < process_count; proc_idx++) { + const ia_css_process_t *process; + ia_css_program_ID_t prog_id; + bool no_match_found = true; + + process = ia_css_process_group_get_process( + process_group, proc_idx); + verifexitval(NULL != process, EFAULT); + prog_id = ia_css_process_get_program_ID(process); + for (prog_idx = 0; prog_idx < program_count; prog_idx++) { + ia_css_program_manifest_t *p_manifest = NULL; + + p_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + pg_manifest, prog_idx); + if (prog_id == + ia_css_program_manifest_get_program_ID( + p_manifest)) { + invalid_flag = invalid_flag || + !ia_css_is_process_valid( + process, p_manifest); + no_match_found = false; + break; + } + } + invalid_flag = invalid_flag || no_match_found; + } + + /* Validate terminals */ + for (proc_term_idx = 0; proc_term_idx < terminal_count; + proc_term_idx++) { + int man_term_idx; + const ia_css_terminal_t *terminal; + const ia_css_terminal_manifest_t *terminal_manifest; + + terminal = + ia_css_process_group_get_terminal( + process_group, proc_term_idx); + verifexitval(NULL != terminal, EFAULT); + man_term_idx = + ia_css_terminal_get_terminal_manifest_index(terminal); + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + pg_manifest, man_term_idx); + invalid_flag = invalid_flag || + !ia_css_is_terminal_valid(terminal, terminal_manifest); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_group_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_submit = false; + int retval = -1; + uint8_t terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * For legacy pg flow, buffer addresses are contained inside + * the process group structure, so these need to be validated + * on process group submission. + */ + buffer = ia_css_terminal_get_buffer(terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + } + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + + } + /* Only true if no check failed */ + can_submit = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit failed (%i)\n", + retval); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): leave:\n"); + return can_submit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + int i; + bool can_enqueue = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_enqueue_buffer_set(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(buffer_set != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * For ppg flow, buffer addresses are contained in the + * external buffer set structure, so these need to be + * validated before enqueueing. + */ + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + buffer = ia_css_buffer_set_get_buffer(buffer_set, terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + } + /* Only true if no check failed */ + can_enqueue = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set failed (%i)\n", + retval); + } + return can_enqueue; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_start = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_start(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + ia_css_buffer_state_t buffer_state; + bool ok = false; + + verifexitval(terminal != NULL, EINVAL); + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* + * buffer_state is applicable only for data terminals + */ + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + bool is_input = ia_css_is_terminal_input(terminal); + /* + * check for NULL here. + * then invoke next 2 statements + */ + verifexitval(frame != NULL, EINVAL); + IA_CSS_TRACE_5(PSYSAPI_DYNAMIC, VERBOSE, + "\tTerminal %d: buffer_state %u, access_type %u, data_bytes %u, data %u\n", + i, frame->buffer_state, frame->access_type, + frame->data_bytes, frame->data); + buffer_state = ia_css_frame_get_buffer_state(frame); + + ok = ((is_input && + (buffer_state == IA_CSS_BUFFER_FULL)) || + (!is_input && (buffer_state == + IA_CSS_BUFFER_EMPTY))); + + } else if (ia_css_is_terminal_parameter_terminal(terminal) == + true) { + /* + * FIXME: + * is there any pre-requisite for param_terminal? + */ + ok = true; + } else if (ia_css_is_terminal_program_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_spatial_parameter_terminal( + terminal) == true) { + ok = true; + } else { + /* neither data nor parameter terminal, so error.*/ + break; + } + + if (!ok) + break; + } + /* Only true if no check failed */ + can_start = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_start failed (%i)\n", + retval); + } + return can_start; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_size(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + size = process_group->size; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_process_group_state_t state = IA_CSS_N_PROCESS_GROUP_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + state = process_group->state; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_state invalid argument\n"); + } + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + const ia_css_rbm_t *rbm = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + rbm = &(process_group->routing_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_routing_bitmap invalid argument\n"); + } + return rbm; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_count = process_group->fragment_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t process_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_process_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_count = process_group->process_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process_count invalid argument\n"); + } + return process_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = process_group->terminal_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_start_ts = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_start_ts(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_start_ts = process_group->pg_load_start_ts; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_start_ts invalid argument\n"); + } + return pg_load_start_ts; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_cycles = process_group->pg_load_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_cycles invalid argument\n"); + } + return pg_load_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_init_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_init_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_init_cycles = process_group->pg_init_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_init_cycles invalid argument\n"); + } + return pg_init_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_processing_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_processing_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_processing_cycles = process_group->pg_processing_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_processing_cycles invalid argument\n"); + } + return pg_processing_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type) +{ + unsigned int proc_cnt; + ia_css_terminal_t *terminal = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_from_type(): enter:\n"); + + for (proc_cnt = 0; proc_cnt < (unsigned int)ia_css_process_group_get_terminal_count(process_group); proc_cnt++) { + terminal = ia_css_process_group_get_terminal(process_group, proc_cnt); + if (terminal == NULL) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_from_type() Failed to get terminal %d", proc_cnt); + goto EXIT; + } + if (ia_css_terminal_get_type(terminal) == terminal_type) { + return terminal; + } + terminal = NULL; /* If not the expected type, return NULL */ + } +EXIT: + return terminal; +} + +/* Returns the terminal or NULL if it was not found + For some of those maybe valid to not exist at all in the process group */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type) +{ + int i, term_count; + + assert(process_group != NULL); + + /* Those below have at most one instance per process group */ + assert(term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); + + term_count = ia_css_process_group_get_terminal_count(process_group); + + for (i = 0; i < term_count; i++) { + const ia_css_terminal_t *terminal = ia_css_process_group_get_terminal(process_group, i); + + if (ia_css_terminal_get_type(terminal) == term_type) { + /* Only one parameter terminal per process group */ + return terminal; + } + } + + return NULL; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_grp, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + ia_css_terminal_t *terminal_ptr = NULL; + uint16_t *terminal_offset_table; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal(): enter:\n"); + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(terminal_num < process_grp->terminal_count, EINVAL); + + terminal_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->terminals_offset); + terminal_ptr = + (ia_css_terminal_t *)((char *)process_grp + + terminal_offset_table[terminal_num]); + + verifexitval(terminal_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal invalid argument\n"); + } + return terminal_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_grp, + const unsigned int process_num) +{ + DECLARE_ERRVAL + ia_css_process_t *process_ptr = NULL; + uint16_t *process_offset_table; + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(process_num < process_grp->process_count, EINVAL); + + process_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->processes_offset); + process_ptr = + (ia_css_process_t *)((char *)process_grp + + process_offset_table[process_num]); + + verifexitval(process_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process invalid argument\n"); + } + return process_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_program_group_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_program_group_ID(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + id = process_group->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t resource_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = process_group->resource_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_resource_bitmap invalid argument\n"); + } + return resource_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->resource_bitmap = resource_bitmap; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->routing_bitmap = rbm; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint32_t cycle_count = 0; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + + cycle_count = 1; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_compute_cycle_count invalid argument\n"); + } + return cycle_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_set_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_state = fragment_state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state != NULL, EFAULT); + + *fragment_state = process_group->fragment_state; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(ipu_vaddress != NULL, EFAULT); + + *ipu_vaddress = process_group->ipu_virtual_address; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->ipu_virtual_address = ipu_vaddress; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t protocol_version = IA_CSS_PROCESS_GROUP_N_PROTOCOLS; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_protocol_version(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + protocol_version = process_group->protocol_version; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t queue_id = IA_CSS_N_PSYS_CMD_QUEUE_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + queue_id = process_group->base_queue_id; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_base_queue_id invalid argument\n"); + } + return queue_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->base_queue_id = queue_id; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_base_queue_id invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t num_queues = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + num_queues = process_group->num_queues; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_num_queues invalid argument\n"); + } + return num_queues; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->num_queues = num_queues; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_num_queues invalid argument\n"); + } + return retval; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group) +{ + bool has_vp = false; + uint32_t i; + + uint8_t process_count = ia_css_process_group_get_process_count(process_group); + + for (i = 0; i < process_count; i++) { + ia_css_process_t *process; + vied_nci_cell_ID_t cell_id; + + process = ia_css_process_group_get_process(process_group, i); + cell_id = ia_css_process_get_cell(process); + + if (VIED_NCI_VP_TYPE_ID == vied_nci_cell_get_type(cell_id)) { + has_vp = true; + break; + } + } + + return has_vp; +} + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h new file mode 100644 index 000000000000..5d0303012700 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h @@ -0,0 +1,637 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_IMPL_H +#define __IA_CSS_PSYS_PROCESS_IMPL_H + +#include + +#include +#include + +#include +#include +#include + +#include + +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_process_private_types.h" + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE vied_nci_cell_ID_t ia_css_process_cells_get_cell(const ia_css_process_t *process, int index) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return VIED_NCI_N_CELL_ID; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + return process->cell_id; +#else + return process->cells[index]; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE void ia_css_process_cells_set_cell(ia_css_process_t *process, int index, vied_nci_cell_ID_t cell_id) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + process->cell_id = cell_id; +#else + process->cells[index] = cell_id; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process */ +STORAGE_CLASS_INLINE void ia_css_process_cells_clear(ia_css_process_t *process) +{ + int i; + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + ia_css_process_cells_set_cell(process, i, VIED_NCI_N_CELL_ID); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +#if IA_CSS_PROCESS_MAX_CELLS > 1 + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == ia_css_process_cells_get_cell(process, i)); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#else + (void)i; +#endif + cell_id = ia_css_process_cells_get_cell(process, 0); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell invalid argument\n"); + } + return cell_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem(): enter:\n"); + + verifexitval(process != NULL && mem_type < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem invalid argument\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->ext_mem_id[mem_type]; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_idx(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_idx invalid argument\n"); + return IA_CSS_PROCESS_INVALID_PROGRAM_IDX; + } + return process->program_idx; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dev_chn(): enter:\n"); + + verifexitval(process != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dev_chn(): invalid arguments\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->dev_chn_offset[dev_chn_id]; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t int_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_int_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_id < VIED_NCI_N_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + int_mem_offset = process->int_mem_offset[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_int_mem_offset invalid argument\n"); + } + + return int_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t ext_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + ext_mem_offset = process->ext_mem_offset[mem_type_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem_offset invalid argument\n"); + } + + return ext_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_get_size( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_size(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + size = process->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_size invalid argument\n"); + } + + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_state_t state = IA_CSS_N_PROCESS_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + state = process->state; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_state invalid argument\n"); + } + + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + process->state = state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_state invalid argument\n"); + } + + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t cell_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + cell_dependency_count = process->cell_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency_count invalid argument\n"); + } + return cell_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_terminal_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + terminal_dependency_count = process->terminal_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency_count invalid argument process\n"); + } + return terminal_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_parent(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + parent = + (ia_css_process_group_t *) ((char *)process + process->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_parent invalid argument process\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_program_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_ID(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + id = process->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_ID invalid argument process\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num) +{ + DECLARE_ERRVAL + vied_nci_resource_id_t cell_dependency = + IA_CSS_PROCESS_INVALID_DEPENDENCY; + vied_nci_resource_id_t *cell_dep_ptr = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + verifexitval(cell_num < process->cell_dependency_count, EFAULT); + + cell_dep_ptr = + (vied_nci_resource_id_t *) + ((char *)process + process->cell_dependencies_offset); + cell_dependency = *(cell_dep_ptr + cell_num); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency invalid argument\n"); + } + return cell_dependency; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + uint8_t *ter_dep_ptr = NULL; + uint8_t ter_dep = IA_CSS_PROCESS_INVALID_DEPENDENCY; + + verifexitval(process != NULL, EFAULT); + verifexitval(terminal_num < process->terminal_dependency_count, EFAULT); + + ter_dep_ptr = (uint8_t *) ((char *)process + + process->terminal_dependencies_offset); + + ter_dep = *(ter_dep_ptr + terminal_num); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency invalid argument\n"); + } + return ter_dep; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_kernel_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + bitmap = process->kernel_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_kernel_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + vied_nci_cell_ID_t cell_id; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + cell_id = ia_css_process_cells_get_cell(process, i); + if (VIED_NCI_N_CELL_ID != cell_id) { + bitmap |= (1 << cell_id); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cells_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_active_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_active_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_active_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_active_port_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + ia_css_program_ID_t prog_id; + ia_css_kernel_bitmap_t prog_kernel_bitmap; + + verifexitval(NULL != process, EFAULT); + verifexitval(NULL != p_manifest, EFAULT); + + prog_id = ia_css_process_get_program_ID(process); + verifjmpexit(prog_id == + ia_css_program_manifest_get_program_ID(p_manifest)); + + prog_kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(p_manifest); + + invalid_flag = (process->size <= process->cell_dependencies_offset) || + (process->size <= process->terminal_dependencies_offset) || + !ia_css_is_kernel_bitmap_subset(prog_kernel_bitmap, + process->kernel_bitmap); + + if (ia_css_has_program_manifest_fixed_cell(p_manifest)) { + vied_nci_cell_ID_t cell_id; + + cell_id = ia_css_program_manifest_get_cell_ID(p_manifest); + invalid_flag = invalid_flag || + (cell_id != (vied_nci_cell_ID_t)(ia_css_process_get_cell(process))); + } + invalid_flag = invalid_flag || + ((process->cell_dependency_count + + process->terminal_dependency_count) == 0) || + (process->cell_dependency_count != + ia_css_program_manifest_get_program_dependency_count(p_manifest)) || + (process->terminal_dependency_count != + ia_css_program_manifest_get_terminal_dependency_count(p_manifest)); + + /* TODO: to be removed once all PGs pass validation */ + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_process_valid(): false\n"); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +#endif /* __IA_CSS_PSYS_PROCESS_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h new file mode 100644 index 000000000000..ae0affde9718 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h @@ -0,0 +1,87 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H + +#include "ia_css_psys_process_types.h" +#include "vied_nci_psys_resource_model.h" + +#define N_UINT32_IN_PROCESS_STRUCT 2 +#define N_UINT16_IN_PROCESS_STRUCT 3 +#define N_UINT8_IN_PROCESS_STRUCT 2 + +#define SIZE_OF_PROCESS_STRUCT_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (N_UINT32_IN_PROCESS_STRUCT * 32) \ + + IA_CSS_PROGRAM_ID_BITS \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_PROCESS_STATE_BITS \ + + (N_UINT16_IN_PROCESS_STRUCT * 16) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DEV_CHN_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (N_UINT8_IN_PROCESS_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_PROCESS_STRUCT * 8)) + +struct ia_css_process_s { + /**< Indicate which kernels lead to this process being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + uint32_t size; /**< Size of this structure */ + ia_css_program_ID_t ID; /**< Referal ID to a specific program FW */ + uint32_t program_idx; /**< Program Index into the PG manifest */ +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocated to this process */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< State of the process FSM dependent on the parent FSM */ + ia_css_process_state_t state; + int16_t parent_offset; /**< Reference to the process group */ + /**< Array[dependency_count] of ID's of the cells that provide input */ + uint16_t cell_dependencies_offset; + /**< Array[terminal_dependency_count] of indices of connected terminals */ + uint16_t terminal_dependencies_offset; + /**< (internal) Memory allocation offset given to this process */ + vied_nci_resource_size_t int_mem_offset[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation offset given to this process */ + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation offset given to this process */ + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; + /**< Cells (VP, ACB) allocated for the process*/ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (internal) Memory ID; This is redundant, derived from cell_id */ + vied_nci_resource_id_t int_mem_id[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory ID */ + vied_nci_resource_id_t ext_mem_id[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Number of processes (mapped on cells) this process depends on */ + uint8_t cell_dependency_count; + /**< Number of terminals this process depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if (N_PADDING_UINT8_IN_PROCESS_STRUCT > 0) + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_STRUCT]; +#endif /*(N_PADDING_UINT8_IN_PROCESS_STRUCT > 0)*/ +}; + +#endif /* __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c new file mode 100644 index 000000000000..ea406f229273 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c @@ -0,0 +1,604 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_types.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __terminal_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_frame_grid_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_GRID_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_grid_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_grid_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_slice_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_slice_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_slice_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_slice_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_command_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_load_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_load_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_connect_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_connect_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS != + (CHAR_BIT * + sizeof(struct ia_css_program_desc_control_info_s))); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_program_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_program_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_control_init_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_terminal_t) % + sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0; + uint16_t fragment_count = + ia_css_program_group_param_get_fragment_count(param); + + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_t))); + + COMPILATION_ERROR_IF( + 0 != sizeof(ia_css_data_terminal_t)%sizeof(uint64_t)); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_terminal(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + if (ia_css_is_terminal_manifest_parameter_terminal(manifest)) { + const ia_css_param_terminal_manifest_t *param_term_man = + (const ia_css_param_terminal_manifest_t *)manifest; + if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + size = ia_css_param_in_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count); + } else if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + size = ia_css_param_out_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count, + fragment_count); + } else { + assert(NULL == "Invalid parameter terminal type"); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_terminal(): Invalid parameter terminal type:\n"); + verifjmpexit(0); + } + } else if (ia_css_is_terminal_manifest_data_terminal(manifest)) { + size += sizeof(ia_css_data_terminal_t); + size += fragment_count * sizeof(ia_css_fragment_descriptor_t); + } else if (ia_css_is_terminal_manifest_program_terminal(manifest)) { + ia_css_program_terminal_manifest_t *prog_term_man = + (ia_css_program_terminal_manifest_t *)manifest; + + size = ia_css_program_terminal_get_descriptor_size( + fragment_count, + prog_term_man-> + fragment_param_manifest_section_desc_count, + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count, + (fragment_count * prog_term_man-> + max_kernel_fragment_sequencer_command_desc)); + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest)) { + ia_css_spatial_param_terminal_manifest_t *spatial_param_term = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + size = ia_css_spatial_param_terminal_get_descriptor_size( + spatial_param_term-> + frame_grid_param_manifest_section_desc_count, + fragment_count); + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest)) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_term_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + + size = ia_css_program_control_init_terminal_get_descriptor_size( + progctrlinit_term_man); + } +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_terminal invalid argument\n"); + } + return size; +} + +ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap) +{ + char *terminal_raw_ptr; + ia_css_terminal_t *terminal = NULL; + uint16_t fragment_count; + int i, j; + int retval = -1; + ia_css_program_group_param_t *param; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(manifest %p, terminal_param %p): enter:\n", + manifest, terminal_param); + + param = ia_css_terminal_param_get_parent(terminal_param); + fragment_count = ia_css_program_group_param_get_fragment_count(param); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + terminal_raw_ptr = (char *) raw_mem; + + terminal = (ia_css_terminal_t *) terminal_raw_ptr; + verifexit(terminal != NULL); + + terminal->size = (uint16_t)ia_css_sizeof_terminal(manifest, param); + verifexit(ia_css_terminal_set_type( + terminal, ia_css_terminal_manifest_get_type(manifest)) == 0); + + terminal->ID = ia_css_terminal_manifest_get_ID(manifest); + + verifexit(ia_css_terminal_set_buffer(terminal, + VIED_NULL) == 0); + + if (ia_css_is_terminal_manifest_data_terminal(manifest) == true) { + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + ia_css_kernel_bitmap_t intersection = + ia_css_kernel_bitmap_intersection(enable_bitmap, + ia_css_data_terminal_manifest_get_kernel_bitmap( + (const ia_css_data_terminal_manifest_t *)manifest)); + + verifexit(frame != NULL); + verifexit(ia_css_frame_set_buffer_state( + frame, IA_CSS_BUFFER_NULL) == 0); + verifexit(ia_css_is_kernel_bitmap_onehot(intersection) == + true); + + terminal_raw_ptr += sizeof(ia_css_data_terminal_t); + dterminal->fragment_descriptor_offset = + (uint16_t) (terminal_raw_ptr - (char *)terminal); + + dterminal->kernel_id = 0; + while (!ia_css_is_kernel_bitmap_empty(intersection)) { + intersection = ia_css_kernel_bitmap_shift( + intersection); + dterminal->kernel_id++; + } + assert(dterminal->kernel_id > 0); + dterminal->kernel_id -= 1; + + /* some terminal and fragment initialization */ + dterminal->frame_descriptor.frame_format_type = + terminal_param->frame_format_type; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + dterminal->frame_descriptor.dimension[i] = + terminal_param->dimensions[i]; + } + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] = + terminal_param->stride; + dterminal->frame_descriptor.bpp = terminal_param->bpp; + dterminal->frame_descriptor.bpe = terminal_param->bpe; + switch (dterminal->frame_descriptor.frame_format_type) { + case IA_CSS_DATA_FORMAT_UYVY: + case IA_CSS_DATA_FORMAT_YUYV: + case IA_CSS_DATA_FORMAT_Y800: + case IA_CSS_DATA_FORMAT_RGB565: + case IA_CSS_DATA_FORMAT_RGBA888: + case IA_CSS_DATA_FORMAT_BAYER_GRBG: + case IA_CSS_DATA_FORMAT_BAYER_RGGB: + case IA_CSS_DATA_FORMAT_BAYER_BGGR: + case IA_CSS_DATA_FORMAT_BAYER_GBRG: + case IA_CSS_DATA_FORMAT_RAW: + case IA_CSS_DATA_FORMAT_RAW_PACKED: + case IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED: + case IA_CSS_DATA_FORMAT_PAF: + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + case IA_CSS_DATA_FORMAT_NV12: + case IA_CSS_DATA_FORMAT_NV21: + case IA_CSS_DATA_FORMAT_NV16: + case IA_CSS_DATA_FORMAT_NV61: + dterminal->frame_descriptor.plane_count = 2; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV444: + case IA_CSS_DATA_FORMAT_RGB888: + case IA_CSS_DATA_FORMAT_YUV420_VECTORIZED: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV420: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION]/2 * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]/2; + break; + default: + /* Unset, resulting in potential terminal connect issues */ + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + } + /* + * Initial solution for single fragment initialization + * TODO: + * where to get the fragment description params from??? + */ + if (fragment_count > 0) { + ia_css_fragment_descriptor_t *fragment_descriptor = + (ia_css_fragment_descriptor_t *) + terminal_raw_ptr; + + fragment_descriptor->index[IA_CSS_COL_DIMENSION] = + terminal_param->index[IA_CSS_COL_DIMENSION]; + fragment_descriptor->index[IA_CSS_ROW_DIMENSION] = + terminal_param->index[IA_CSS_ROW_DIMENSION]; + fragment_descriptor->offset[0] = + terminal_param->offset; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + fragment_descriptor->dimension[i] = + terminal_param->fragment_dimensions[i]; + } + } + /* end fragment stuff */ + } else if (ia_css_is_terminal_manifest_parameter_terminal(manifest) == + true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + uint16_t section_count = + ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; + size_t curr_offset = 0; + + pterminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + for (i = 0; i < section_count; i++) { + ia_css_param_section_desc_t *section = + ia_css_param_in_terminal_get_param_section_desc( + pterminal, i); + const ia_css_param_manifest_section_desc_t * + man_section = + ia_css_param_terminal_manifest_get_prm_sct_desc( + (const ia_css_param_terminal_manifest_t *)manifest, i); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + } else if (ia_css_is_terminal_manifest_program_terminal(manifest) == + true && + ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PROGRAM) { /* for program terminal */ + ia_css_program_terminal_t *prog_terminal = + (ia_css_program_terminal_t *)terminal; + const ia_css_program_terminal_manifest_t *prog_terminal_man = + (const ia_css_program_terminal_manifest_t *)manifest; + ia_css_kernel_fragment_sequencer_info_desc_t + *sequencer_info_desc_base = NULL; + uint16_t section_count = prog_terminal_man-> + fragment_param_manifest_section_desc_count; + uint16_t manifest_info_count = + prog_terminal_man-> + kernel_fragment_sequencer_info_manifest_info_count; + /* information needs to come from user or manifest once + * the size sizeof function is updated. + */ + uint16_t nof_command_objs = 0; + size_t curr_offset = 0; + + prog_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + prog_terminal->fragment_param_section_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (fragment_count * manifest_info_count * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + NOT_USED(sequencer_info_desc_base); + for (i = 0; i < fragment_count; i++) { + for (j = 0; j < section_count; j++) { + ia_css_fragment_param_section_desc_t *section = + ia_css_program_terminal_get_frgmnt_prm_sct_desc( + prog_terminal, i, j, section_count); + const ia_css_fragment_param_manifest_section_desc_t * + man_section = +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc + (prog_terminal_man, j); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + + sequencer_info_desc_base = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_terminal, i, 0, + manifest_info_count); + + /* + * This offset cannot be initialized properly + * since the number of commands in every sequencer + * is not known at this point + */ + /*for (j = 0; j < manifest_info_count; j++) { + sequencer_info_desc_base[j]. + command_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (manifest_info_count * + sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t + )); + }*/ + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest) == true) { + ia_css_spatial_param_terminal_t *spatial_param_terminal = + (ia_css_spatial_param_terminal_t *)terminal; + ia_css_spatial_param_terminal_manifest_t * + spatia_param_terminal_man = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + + /* Initialize the spatial terminal structure */ + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (fragment_count * sizeof(ia_css_fragment_grid_desc_t)); + spatial_param_terminal->kernel_id = + spatia_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_sliced_terminal(manifest) == + true) { + ia_css_sliced_param_terminal_t *sliced_param_terminal = + (ia_css_sliced_param_terminal_t *)terminal; + ia_css_sliced_param_terminal_manifest_t + *sliced_param_terminal_man = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + + /* Initialize the sliced terminal structure */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + sliced_param_terminal->kernel_id = + sliced_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest) == true) { + verifjmpexit(ia_css_program_control_init_terminal_init( + (ia_css_program_control_init_terminal_t *) + terminal, + (const ia_css_program_control_init_terminal_manifest_t *) + manifest) == 0); + } else { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed, not a data or param terminal. Returning (%i)\n", + EFAULT); + goto EXIT; + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(): Created successfully terminal %p\n", + terminal); + + retval = 0; +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_terminal_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed (%i)\n", retval); + terminal = ia_css_terminal_destroy(terminal); + } + return terminal; +} + +ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal) +{ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_destroy(terminal %p): enter:\n", terminal); + return terminal; +} + +uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) /* Delete 2nd argument*/ +{ + uint16_t section_count = 0; + + NOT_USED(param); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_param_terminal_compute_section_count(): enter:\n"); + + verifexit(manifest != NULL); + section_count = ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_param_terminal_compute_section_count: invalid argument\n"); + } + return section_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h new file mode 100644 index 000000000000..36fb0f1d469a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h @@ -0,0 +1,1868 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_IMPL_H +#define __IA_CSS_PSYS_TERMINAL_IMPL_H + +#include + +#include +#include + +#include +#include + +#include + + +#include +#include /* for verifexit, verifjmpexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_manifest_types.h" +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_types.h" + +STORAGE_CLASS_INLINE int ia_css_data_terminal_print(const ia_css_terminal_t *terminal, + void *fid) { + + DECLARE_ERRVAL + int retval = -1; + int i; + ia_css_data_terminal_t *dterminal = (ia_css_data_terminal_t *)terminal; + uint16_t fragment_count = + ia_css_data_terminal_get_fragment_count(dterminal); + verifexitval(fragment_count != 0, EINVAL); + + retval = ia_css_frame_descriptor_print( + ia_css_data_terminal_get_frame_descriptor(dterminal), + fid); + verifexitval(retval == 0, EINVAL); + + retval = ia_css_frame_print( + ia_css_data_terminal_get_frame(dterminal), fid); + verifexitval(retval == 0, EINVAL); + + for (i = 0; i < (int)fragment_count; i++) { + retval = ia_css_fragment_descriptor_print( + ia_css_data_terminal_get_fragment_descriptor( + dterminal, i), fid); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_type_t term_type = ia_css_terminal_get_type(terminal); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_print(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p sizeof %d, typeof %d, parent %p\n", + terminal, + (int)ia_css_terminal_get_size(terminal), + (int)ia_css_terminal_get_type(terminal), + (void *)ia_css_terminal_get_parent(terminal)); + + switch (term_type) { + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + ia_css_program_control_init_terminal_print( + (ia_css_program_control_init_terminal_t *)terminal); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + ia_css_data_terminal_print(terminal, fid); + break; + default: + /* other terminal prints are currently not supported */ + break; + } + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + bool is_input = false; + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_input(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + is_input = true; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + is_input = false; + break; + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input: Unknown terminal type (%d)\n", + terminal_type); + goto EXIT; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input invalid argument\n"); + } + return is_input; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_size(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + size = terminal->size; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = terminal->terminal_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_type invalid argument\n"); + } + return terminal_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal->terminal_type = terminal_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + uint16_t terminal_manifest_index; + + terminal_manifest_index = 0xffff; + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_manifest_index = terminal->tm_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_manifest_index: invalid argument\n"); + } + return terminal_manifest_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t terminal_manifest_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + terminal->tm_index = terminal_manifest_index; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_ID_t retval = IA_CSS_TERMINAL_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_ID(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + retval = terminal->ID; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_ID invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_kernel_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + retval = dterminal->kernel_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_kernel_id: invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_connection_type_t connection_type = IA_CSS_N_CONNECTION_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + connection_type = dterminal->connection_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_connection_type: invalid argument\n"); + } + return connection_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t link_id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + link_id = dterminal->link_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_link_id: invalid argument\n"); + } + return link_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + dterminal->link_id = link_id; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + dterminal->connection_type = connection_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type: invalid argument dterminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + parent = (ia_css_process_group_t *) ((char *)terminal + + terminal->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_parent invalid argument\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + terminal->parent_offset = (uint16_t) ((char *)parent - + (char *)terminal); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_t *frame = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame = (ia_css_frame_t *)(&(dterminal->frame)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame invalid argument\n"); + } + return frame; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_descriptor_t *frame_descriptor = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame_descriptor = + (ia_css_frame_descriptor_t *)(&(dterminal->frame_descriptor)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return frame_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_fragment_descriptor_t *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index) +{ + DECLARE_ERRVAL + ia_css_fragment_descriptor_t *fragment_descriptor = NULL; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + fragment_count = ia_css_data_terminal_get_fragment_count(dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(fragment_count != 0, EINVAL); + verifexitval(fragment_index < fragment_count, EINVAL); + + fragment_descriptor = (ia_css_fragment_descriptor_t *) + ((char *)dterminal + dterminal->fragment_descriptor_offset); + + fragment_descriptor += fragment_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return fragment_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_fragment_count(): enter:\n"); + + parent = ia_css_terminal_get_parent((ia_css_terminal_t *)dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + fragment_count = ia_css_process_group_get_fragment_count(parent); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_fragment_count: invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_parameter_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_data_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_data_terminal invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_control_init_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_control_init_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_spatial_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_spatial_param_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint8_t plane_count = 1; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + /* TODO: Implementation Missing*/ + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_compute_plane_count(): enter:\n"); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_compute_plane_count: invalid argument\n"); + } + return plane_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + buffer = ia_css_frame_get_buffer(frame); + } else if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + buffer = param_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + buffer = program_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + buffer = program_ctrl_init_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + buffer = spatial_terminal->param_payload.buffer; + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_buffer: invalid argument terminal\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_buffer( + ia_css_terminal_t *terminal, + vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_buffer(frame, buffer); + verifexitval(retval == 0, EINVAL); + } else if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else { + return retval; + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + int terminal_index = -1; + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + terminal_index = ia_css_frame_get_data_index(frame); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + terminal_index = param_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + terminal_index = program_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + terminal_index = program_ctrl_init_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + terminal_index = spatial_terminal->param_payload.terminal_index; + } else { + verifjmpexit(0); + } + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_index: invalid argument\n"); + } + return terminal_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_data_index(frame, terminal_index); + verifexitval(retval == 0, EINVAL); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) + == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else { + return retval; + } + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_index failed (%i)\n", + retval); + } + return retval; +} + +STORAGE_CLASS_INLINE bool ia_css_is_data_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + + const ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + const ia_css_data_terminal_manifest_t *dt_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + const ia_css_frame_descriptor_t *frame_descriptor; + ia_css_frame_format_bitmap_t man_frame_format_bitmap; + ia_css_frame_format_bitmap_t proc_frame_format_bitmap; + uint16_t max_value[IA_CSS_N_DATA_DIMENSION]; + uint16_t min_value[IA_CSS_N_DATA_DIMENSION]; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_data_terminal_valid enter\n"); + + frame_descriptor = + ia_css_data_terminal_get_frame_descriptor(dterminal); + verifexitval(frame_descriptor != NULL, EFAULT); + man_frame_format_bitmap = + ia_css_data_terminal_manifest_get_frame_format_bitmap( + dt_manifest); + proc_frame_format_bitmap = + ia_css_frame_format_bit_mask( + frame_descriptor->frame_format_type); + /* + * TODO: Replace by 'validation of frame format type'. + * Currently frame format type is not correctly set by manifest, + * waiting for HSD 1804260604 + */ + if (man_frame_format_bitmap > 0) { + if ((man_frame_format_bitmap & + proc_frame_format_bitmap) == 0) { + uint32_t *bitmap_arr = + (uint32_t *)&man_frame_format_bitmap; + + NOT_USED(bitmap_arr); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format type not defined in manifest\n"); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " man bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + bitmap_arr = (uint32_t *)&proc_frame_format_bitmap; + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " proc bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + } + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format bitmap not defined in manifest\n"); + } + ia_css_data_terminal_manifest_get_min_size(dt_manifest, min_value); + /* + * TODO: Replace by validation of Minimal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if ((frame_descriptor->dimension[IA_CSS_COL_DIMENSION] < + min_value[IA_CSS_COL_DIMENSION])) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Minimal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] < + min_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame row dimensions not set correctly (by manifest)\n"); + } + + ia_css_data_terminal_manifest_get_max_size(dt_manifest, max_value); + /* + * TODO: Replace by validation of Maximal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_COL_DIMENSION] > + max_value[IA_CSS_COL_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Maximal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] > + max_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame row dimensions not set correctly (by manifest)\n"); + } + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "min_value: [%d,%d]\n", + min_value[IA_CSS_COL_DIMENSION], + min_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "max_value: [%d,%d]\n", + max_value[IA_CSS_COL_DIMENSION], + max_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "frame dim: [%d,%d]\n", + frame_descriptor->dimension[IA_CSS_COL_DIMENSION], + frame_descriptor->dimension[IA_CSS_ROW_DIMENSION]); + /* + * TODO: Add validation of fragment dimensions. + * Currently not set by manifest yet, waiting for HSD 1804260604 + */ + NOT_USED(nof_fragments); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_data_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +STORAGE_CLASS_INLINE void ia_css_program_terminal_seq_info_print( + const ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *man_seq_info_desc, + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc) +{ + NOT_USED(man_seq_info_desc); + NOT_USED(term_seq_info_desc); + + /* slice dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + + /* slice count row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + + /* decimation factor row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); +} + +STORAGE_CLASS_INLINE bool ia_css_is_program_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_program_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + uint16_t frag_seq_info_count, seq_idx; + const ia_css_program_terminal_t *prog_term; + const ia_css_program_terminal_manifest_t *prog_term_man; + + prog_term = (const ia_css_program_terminal_t *)terminal; + prog_term_man = + (const ia_css_program_terminal_manifest_t *) + terminal_manifest; + frag_seq_info_count = + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count; + + for (seq_idx = 0; seq_idx < frag_seq_info_count; seq_idx++) { + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc; + const + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + man_seq_info_desc; + + term_seq_info_desc = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_term, frag_idx, seq_idx, + frag_seq_info_count); + verifexitval(term_seq_info_desc != NULL, EFAULT); + man_seq_info_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (prog_term_man, seq_idx); + verifexitval(man_seq_info_desc != NULL, EFAULT); + + ia_css_program_terminal_seq_info_print( + man_seq_info_desc, term_seq_info_desc); + /* slice dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + + /* slice count row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + + /* decimation factor row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_program_terminal_valid() invalid argument\n"); + return false; + } + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_program_terminal_valid(): validation failed\n"); + /* TODO: program terminal parameters not correctly defined, + * disable validation result until issues has been solved + */ + return true; + } + return (!invalid_flag); +} + +STORAGE_CLASS_INLINE bool ia_css_is_sliced_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + uint16_t slice_idx, section_idx; + + const ia_css_sliced_param_terminal_t *sliced_term = + (const ia_css_sliced_param_terminal_t *)terminal; + const ia_css_sliced_param_terminal_manifest_t *sliced_term_man = + (const ia_css_sliced_param_terminal_manifest_t *) + terminal_manifest; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_sliced_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + const ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_term, frag_idx); + + verifexitval(fragment_slice_desc != NULL, EFAULT); + + for (slice_idx = 0; + slice_idx < fragment_slice_desc->slice_count; + slice_idx++) { + for (section_idx = 0; + section_idx < + sliced_term_man->sliced_param_section_count; + section_idx++) { + const + ia_css_sliced_param_manifest_section_desc_t * + slice_man_section_desc; + const ia_css_slice_param_section_desc_t * + slice_section_desc; + + slice_man_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_term_man, section_idx); + slice_section_desc = + ia_css_sliced_param_terminal_get_slice_param_section_desc( + sliced_term, frag_idx, + slice_idx, section_idx, + sliced_term_man-> + sliced_param_section_count); + verifexitval(slice_man_section_desc != NULL, EFAULT); + verifexitval(slice_section_desc != NULL, EFAULT); + + invalid_flag = invalid_flag || + (slice_section_desc->mem_size > + slice_man_section_desc->max_mem_size); + } + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_sliced_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } + +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest) +{ + DECLARE_ERRVAL + bool is_valid = false; + uint16_t nof_fragments; + ia_css_terminal_type_t terminal_type = IA_CSS_TERMINAL_INVALID_ID; + + verifexitval(NULL != terminal, EFAULT); + verifexitval(NULL != terminal_manifest, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_valid enter\n"); + + nof_fragments = ia_css_data_terminal_get_fragment_count( + (const ia_css_data_terminal_t *)terminal); + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + is_valid = ia_css_is_data_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + is_valid = ia_css_is_program_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + /* Nothing to be validated for cached and spatial + * parameters, return valid + */ + is_valid = true; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + is_valid = ia_css_is_sliced_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + default: + /* Terminal type unknown, return invalid */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_terminal_valid() Terminal type %x unknown\n", + (int)terminal_type); + is_valid = false; + break; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_valid() invalid argument\n"); + return false; + } + /* TODO: to be removed once all PGs pass validation */ + if (is_valid == false) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_terminal_valid(): type: %d validation failed\n", + terminal_type); + } + return is_valid; +} + +/* ================= Program Control Init Terminal - START ================= */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + unsigned int base_load_sec; + unsigned int base_connect_sec; + unsigned int load_index = 0; + unsigned int connect_index = 0; + unsigned int load_section_count = 0; + unsigned int connect_section_count = 0; + + ia_css_program_control_init_manifest_program_desc_t *man_progs; + + verifjmpexit(terminal != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc(manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + terminal->program_count = manifest->program_count; + terminal->program_section_desc_offset = + sizeof(ia_css_program_control_init_terminal_t); + + base_load_sec = /* base_load_sec relative to first program */ + terminal->program_count * + sizeof(ia_css_program_control_init_program_desc_t); + + base_connect_sec = base_load_sec + + load_section_count * + sizeof(ia_css_program_control_init_load_section_desc_t); + + for (i = 0; i < terminal->program_count; i++) { + ia_css_program_control_init_program_desc_t *prog; + + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, i); + verifjmpexit(prog != NULL); + + prog->load_section_count = man_progs[i].load_section_count; + prog->connect_section_count = man_progs[i].connect_section_count; + + prog->load_section_desc_offset = + base_load_sec + + load_index * + sizeof(ia_css_program_control_init_load_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + prog->connect_section_desc_offset = + base_connect_sec + + connect_index * + sizeof(ia_css_program_control_init_connect_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + + load_index += man_progs[i].load_section_count; + connect_index += man_progs[i].connect_section_count; + } + retval = 0; +EXIT: + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + unsigned int i; + unsigned size = 0; + unsigned load_section_count = 0; + unsigned connect_section_count = 0; + ia_css_program_control_init_manifest_program_desc_t *man_progs; + verifjmpexit(manifest != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc( + manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + size = sizeof(ia_css_program_control_init_terminal_t) + + manifest->program_count * + sizeof(struct ia_css_program_control_init_program_desc_s) + + load_section_count * + sizeof(struct ia_css_program_control_init_load_section_desc_s) + + connect_section_count * + sizeof(struct ia_css_program_control_init_connect_section_desc_s); +EXIT: + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal) +{ + unsigned int prog_idx, sec_idx; + ia_css_program_control_init_program_desc_t *prog; + ia_css_program_control_init_load_section_desc_t *load_sec; + ia_css_program_control_init_connect_section_desc_t *connect_sec; + + verifjmpexit(terminal != NULL); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "program_count: %d, payload_fragment_stride: %d\n", + terminal->program_count, + terminal->payload_fragment_stride); + + for (prog_idx = 0; prog_idx < terminal->program_count; prog_idx++) { + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, prog_idx); + verifjmpexit(prog != NULL); + + for (sec_idx = 0; sec_idx < prog->load_section_count; sec_idx++) { + load_sec = + ia_css_program_control_init_terminal_get_load_section_desc( + prog, sec_idx); + verifjmpexit(load_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "load_section>> device_descriptor_id: 0x%x, mem_offset: %d, " + "mem_size: %d, mode_bitmask: %x\n", + load_sec->device_descriptor_id.data, + load_sec->mem_offset, + load_sec->mem_size, + load_sec->mode_bitmask); + } + for (sec_idx = 0; sec_idx < prog->connect_section_count; sec_idx++) { + connect_sec = + ia_css_program_control_init_terminal_get_connect_section_desc( + prog, sec_idx); + verifjmpexit(connect_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "connect_section>> device_descriptor_id: 0x%x, " + "connect_terminal_ID: %d, connect_section_idx: %d, " + "mode_bitmask: %x\n", + connect_sec->device_descriptor_id.data, + connect_sec->connect_terminal_ID, + connect_sec->connect_section_idx, + connect_sec->mode_bitmask); + } + } +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index) +{ + ia_css_program_control_init_program_desc_t *program_desc_base; + ia_css_program_control_init_program_desc_t *program_desc = NULL; + + verifjmpexit(prog_ctrl_init_terminal != NULL); + verifjmpexit(program_index < prog_ctrl_init_terminal->program_count); + + program_desc_base = (ia_css_program_control_init_program_desc_t *) + (((const char *)prog_ctrl_init_terminal) + + prog_ctrl_init_terminal->program_section_desc_offset); + program_desc = &(program_desc_base[program_index]); + +EXIT: + return program_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + ia_css_process_id_t process_id = 0; + + verifjmpexit(program_desc != NULL); + + process_id = program_desc->control_info.process_id; + +EXIT: + return process_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + uint8_t num_done_events = 0; + + verifjmpexit(program_desc != NULL); + + num_done_events = program_desc->control_info.num_done_events; + +EXIT: + return num_done_events; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events) +{ + verifjmpexit(program_desc != NULL); + + program_desc->control_info.process_id = process_id; + program_desc->control_info.num_done_events = num_done_events; + +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index) +{ + ia_css_program_control_init_load_section_desc_t *load_section_desc_base; + ia_css_program_control_init_load_section_desc_t *load_section_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(load_section_index < program_desc->load_section_count); + + load_section_desc_base = (ia_css_program_control_init_load_section_desc_t *) + (((const char *)program_desc) + + program_desc->load_section_desc_offset); + load_section_desc = &(load_section_desc_base[load_section_index]); + +EXIT: + return load_section_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index) +{ + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc_base; + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(connect_section_index < program_desc->connect_section_count); + + connect_sec_desc_base = + (ia_css_program_control_init_connect_section_desc_t *) + (((const char *)program_desc) + + program_desc->connect_section_desc_offset); + connect_sec_desc = &(connect_sec_desc_base[connect_section_index]); + +EXIT: + return connect_sec_desc; +} + +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h new file mode 100644 index 000000000000..68626561acb5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h @@ -0,0 +1,186 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H + +#include "ia_css_terminal_types.h" +#include "ia_css_program_group_data.h" +#include "ia_css_psys_manifest_types.h" + +#define N_UINT16_IN_DATA_TERMINAL_STRUCT 1 +#define N_UINT8_IN_DATA_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 + +/* ========================= Data terminal - START ========================= */ + +#define SIZE_OF_DATA_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + + IA_CSS_FRAME_STRUCT_BITS \ + + IA_CSS_STREAM_STRUCT_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_CONNECTION_TYPE_BITS \ + + (N_UINT16_IN_DATA_TERMINAL_STRUCT * 16) \ + + (N_UINT8_IN_DATA_TERMINAL_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT * 8)) + +/* + * The (data) terminal can be attached to a buffer or a stream. + * The stream interface is not necessarily limited to strict in-order access. + * For a stream the restriction is that contrary to a buffer it cannot be + * addressed directly, i.e. it behaves as a port, + * but it may support stream_pos() and/or seek() operations + */ +struct ia_css_data_terminal_s { + /**< Data terminal base */ + ia_css_terminal_t base; + /**< Properties of the data attached to the terminal */ + ia_css_frame_descriptor_t frame_descriptor; + /**< Data buffer handle attached to the terminal */ + ia_css_frame_t frame; + /**< (exclusive) Data stream handle attached to the terminal + * if the data is sourced over a device port + */ + ia_css_stream_t stream; + /**< Reserved */ + uint32_t reserved; + /**< Connection {buffer, stream, ...} */ + ia_css_connection_type_t connection_type; + /**< Array[fragment_count] (fragment_count being equal for all + * terminals in a subgraph) of fragment descriptors + */ + uint16_t fragment_descriptor_offset; + /**< Kernel id where this terminal is connected to */ + uint8_t kernel_id; + /**< Indicate to which subgraph this terminal belongs + * for common constraints + */ + uint8_t subgraph_id; + /* Link ID of the data terminal */ + uint8_t link_id; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; +}; +/* ========================== Data terminal - END ========================== */ + +/* ================= Program Control Init Terminal - START ================= */ +#define SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + ) +struct ia_css_program_control_init_load_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; +}; + +#define MODE_BITMASK_MEMORY (1u << IA_CSS_CONNECTION_MEMORY) +#define MODE_BITMASK_MEMORY_STREAM (1u << IA_CSS_CONNECTION_MEMORY_STREAM) +#define MODE_BITMASK_STREAM (1u << IA_CSS_CONNECTION_STREAM) +#define MODE_BITMASK_DONT_CARE (MODE_BITMASK_MEMORY | MODE_BITMASK_MEMORY_STREAM | MODE_BITMASK_STREAM) + +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT (5) +#define SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (1 * IA_CSS_UINT16_T_BITS) \ + + IA_CSS_TERMINAL_ID_BITS \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT * \ + IA_CSS_UINT8_T_BITS) \ + ) +struct ia_css_program_control_init_connect_section_desc_s { + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; + /* Connected terminal section (plane) index */ + uint16_t connect_section_idx; + /* Absolute referral ID for the connected terminal */ + ia_css_terminal_ID_t connect_terminal_ID; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO (1) +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT (4) +#define SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS \ + (1 * IA_CSS_UINT16_T_BITS) \ + + (1 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO * IA_CSS_UINT8_T_BITS) + +#define SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS \ + (4 * IA_CSS_UINT16_T_BITS) \ + + (SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS) \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT * \ + IA_CSS_UINT8_T_BITS) + +struct ia_css_program_desc_control_info_s { + /* 12-bit process identifier */ + ia_css_process_id_t process_id; + /* number of done acks required to close the process */ + uint8_t num_done_events; + uint8_t padding[N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO]; +}; + +struct ia_css_program_control_init_program_desc_s { + /* Number of load sections in this program */ + uint16_t load_section_count; + /* Points to variable size array of + * ia_css_program_control_init_load_section_desc_s + * in relation to its program_desc + */ + uint16_t load_section_desc_offset; + /* Number of connect sections in this program */ + uint16_t connect_section_count; + /* Points to variable size array of + * ia_css_program_control_init_connect_section_desc_s + * in relation to its program_desc + */ + uint16_t connect_section_desc_offset; + struct ia_css_program_desc_control_info_s control_info; + /* align to 64 bits */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT]; +}; + +#define SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + ) +struct ia_css_program_control_init_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Fragment stride for the payload, used to find the base + * of the payload for a given fragment + */ + uint32_t payload_fragment_stride; + /* Points to the variable array of + * ia_css_program_control_init_program_desc_s + */ + uint16_t program_section_desc_offset; + /* Number of instantiated programs in program group (processes) */ + uint16_t program_count; +}; +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h new file mode 100644 index 000000000000..4c8fd33b331c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h @@ -0,0 +1,23 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_H +#define __IA_CSS_PSYSAPI_H + +#include +#include +#include +#include + +#endif /* __IA_CSS_PSYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h new file mode 100644 index 000000000000..5658a2988a08 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h @@ -0,0 +1,33 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYSAPI_FW_VERSION_H +#define __IA_CSS_PSYSAPI_FW_VERSION_H + +/* PSYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION PSYS_FIRMWARE_VERSION + +enum ia_css_process_group_protocol_version { + /* + * Legacy protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, + /* + * Persistent process group support protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, + IA_CSS_PROCESS_GROUP_N_PROTOCOLS +}; + +#endif /* __IA_CSS_PSYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h new file mode 100644 index 000000000000..e35ec24c77b3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h @@ -0,0 +1,78 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_TRACE_H +#define __IA_CSS_PSYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define PSYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define PSYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define PSYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* PSYSAPI and all the submodules in PSYSAPI will have the default tracing + * level set to the PSYSAPI_TRACE_CONFIG level. If not defined in the + * psysapi.mk fill it will be set by default to no trace + * (PSYSAPI_TRACE_LOG_LEVEL_OFF) + */ +#define PSYSAPI_TRACE_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +#if !defined(PSYSAPI_TRACE_CONFIG) + #define PSYSAPI_TRACE_CONFIG PSYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* Module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_TRACE_CONFIG)) + /* Module specific trace setting */ + #if PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "PSYSAPI_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in PSYSAPI with a specific tracing level */ +/* #define PSYSAPI_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_PSYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h new file mode 100644 index 000000000000..3fec775eb019 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h @@ -0,0 +1,223 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_KERNEL_BITMAP_H +#define __IA_CSS_KERNEL_BITMAP_H + +/*! \file */ + +/** @file ia_css_kernel_bitmap.h + * + * The types and operations to make logic decisions given kernel bitmaps + * "ia_css_kernel_bitmap_t" can be larger than native types + */ + +#include +#include "vied_nci_psys_resource_model.h" + +#define IA_CSS_KERNEL_BITMAP_BITS 64 +#define IA_CSS_KERNEL_BITMAP_ELEM_TYPE uint32_t +#define IA_CSS_KERNEL_BITMAP_ELEM_BITS \ + (sizeof(IA_CSS_KERNEL_BITMAP_ELEM_TYPE)*8) +#define IA_CSS_KERNEL_BITMAP_NOF_ELEMS \ + ((IA_CSS_KERNEL_BITMAP_BITS) / (IA_CSS_KERNEL_BITMAP_ELEM_BITS)) + +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +typedef struct { + IA_CSS_KERNEL_BITMAP_ELEM_TYPE data[IA_CSS_KERNEL_BITMAP_NOF_ELEMS]; +} ia_css_kernel_bitmap_elems_t; + +/** Users should make no assumption about the actual type of + * ia_css_kernel_bitmap_t. + * Users should use IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS in + * case they erroneously assume that this type is uint64_t and they + * cannot change their implementation. + */ +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +typedef ia_css_kernel_bitmap_elems_t ia_css_kernel_bitmap_t; +#else +typedef uint64_t ia_css_kernel_bitmap_t; +#if IA_CSS_KERNEL_BITMAP_BITS > 64 +#error IA_CSS_KERNEL_BITMAP_BITS > 64 not supported \ + with IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +#endif +#endif + +/*! Print the bits of a kernel bitmap + + @return < 0 on error + */ +extern int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid); + +/*! Create an empty kernel bitmap + + @return bitmap = 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void); + +/*! Creates the complement of a kernel bitmap + * @param bitmap[in] kernel bitmap + * @return ~bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap); + +/*! Create the union of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 | bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Create the intersection of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 & bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps is empty + + @param bitmap[in] kernel bitmap + + @return bitmap == 0 + */ +extern bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the intersection of two kernel bitmaps is empty + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return (bitmap0 & bitmap1) == 0 + */ +extern bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the second kernel bitmap is a subset of the first (or equal) + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + Note: An empty set is always a subset, this function + returns true if bitmap 1 is empty + + @return (bitmap0 & bitmap1) == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps are equal + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Right shift kernel bitmap + + @param bitmap0[in] kernel bitmap 0 + + @return bitmap0 >> 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the kernel bitmaps contains only a single element + + @param bitmap[in] kernel bitmap + + @return weight(bitmap) == 1 + */ +extern bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap); + +/*! Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +extern int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create the union of a kernel bitmap with a onehot bitmap + * with a bit set at index + + @return bitmap[index] |= 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Creates kernel bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value); + +/*! Converts an ia_css_kernel_bitmap_t type to uint64_t. Note that if + * ia_css_kernel_bitmap_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value +*/ +extern uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value); + +/*! Creates a kernel bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Set a previously clear field of a kernel bitmap at index + + @return if bitmap[index] == 0, bitmap[index] -> 1, else 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create a onehot kernel bitmap with a bit set at index + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index); + +/*! Create a random bitmap + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_ran_bitmap(void); + +#endif /* __IA_CSS_KERNEL_BITMAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h new file mode 100644 index 000000000000..1ba29c7ab77e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_KERNEL_TRACE_H +#define __IA_CSS_PSYS_KERNEL_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + #define PSYS_KERNEL_TRACE_LEVEL_CONFIG \ + PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_KERNEL_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_KERNEL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c new file mode 100644 index 000000000000..61ea1fb290a6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c @@ -0,0 +1,414 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include "ia_css_psys_kernel_trace.h" + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap); + +bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_intersection_empty(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_empty(intersection); +} + +bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + bool is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_empty(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } +#else + NOT_USED(i); + is_empty = (bitmap == 0); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_empty; +} + +bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_equal(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } +#else + NOT_USED(i); + is_equal = (bitmap0 == bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_equal; +} + +bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap) +{ + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_onehot(): enter:\n"); + return ia_css_kernel_bitmap_compute_weight(bitmap) == 1; +} + +bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_subset(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_equal(intersection, bitmap1); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void) +{ + unsigned int i; + ia_css_kernel_bitmap_t bitmap; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_clear(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } +#else + NOT_USED(i); + bitmap = 0; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_complement(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } +#else + NOT_USED(i); + result = ~bitmap; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_union(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 | bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 & bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set(): enter:\n"); + + bit_mask = ia_css_kernel_bit_mask(index); + return ia_css_kernel_bitmap_union(bitmap, bit_mask); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_create_from_uint64(): enter:\n"); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + result = ia_css_kernel_bitmap_clear(); + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_KERNEL_BITMAP_ELEM_TYPE) + (value >> (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } +#if IA_CSS_KERNEL_BITMAP_BITS < 64 + if ((value >> IA_CSS_KERNEL_BITMAP_BITS) != 0) { + IA_CSS_TRACE_0(PSYSAPI_KERNEL, ERROR, + "ia_css_kernel_bitmap_create_from_uint64(): " + "kernel bitmap is not wide enough to encode value\n"); + assert(0); + } +#endif +#else + NOT_USED(i); + result = value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ + return result; +} + +uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_KERNEL_BITMAP_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < nof_elems_bits64; i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +#else + (void)i; + (void)res; + (void)nof_elems_bits64; + return (uint64_t)value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_unset(): enter:\n"); + + result = ia_css_kernel_bit_mask(index); + result = ia_css_kernel_bitmap_complement(result); + return ia_css_kernel_bitmap_intersection(bitmap, result); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t ret; + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set_unique(): enter:\n"); + + ret = ia_css_kernel_bitmap_clear(); + bit_mask = ia_css_kernel_bit_mask(index); + + if (ia_css_is_kernel_bitmap_intersection_empty(bitmap, bit_mask) + && !ia_css_is_kernel_bitmap_empty(bit_mask)) { + ret = ia_css_kernel_bitmap_union(bitmap, bit_mask); + } + return ret; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_kernel_bitmap_t bit_mask = ia_css_kernel_bitmap_clear(); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bit_mask(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + bit_mask = (ia_css_kernel_bitmap_t)1 << index; + } +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bit_mask; +} + + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap) +{ + ia_css_kernel_bitmap_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); i++) { + weight += ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + } + + return weight; +} + +int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_set(): enter:\n"); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + return (((bitmap >> index) & 0x1) == 1); +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_shift(): enter:\n"); + + loc_bitmap = bitmap; + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = IA_CSS_KERNEL_BITMAP_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_KERNEL_BITMAP_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } +#else + NOT_USED(i); + NOT_USED(lsb_current_elem); + NOT_USED(lsb_previous_elem); + loc_bitmap >>= 1; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return loc_bitmap; +} + +int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, + "ia_css_kernel_bitmap_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + IA_CSS_TRACE_2(PSYSAPI_KERNEL, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "}\n"); + + retval = 0; + return retval; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h new file mode 100644 index 000000000000..8295f3892f86 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h @@ -0,0 +1,296 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_H + +/*! \file */ + +/** @file ia_css_program_group_param.h + * + * Define the methods on the program group parameter object that are not part + * of a single interface + */ +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +#include + +/*! Get the stored size of the program group parameter object + + @param param[in] program group parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *param); + +/*! initialize program_group_param + + @param blob[in] program group parameter object + @param program_count[in] number of terminals. + @param terminal_count[in] number of terminals. + @param fragment_count[in] number of terminals. + + @return 0 if success, else failure. + */ +extern int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types); +/*! Get the program parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] program parameter index + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the terminal parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] terminal parameter index + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the fragment count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return fragment count, 0 on error + */ +extern uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param); + +/*! Get the program count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return program count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param); + +/*! Get the terminal count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return terminal count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param); + +/*! Set the protocol version in a program group parameter object + + @param program_group_param[in] program group parameter object + @param protocol_version[in] protocol version + + @return nonzero on error +*/ +extern int +ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version); + +/*! Get the protocol version from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return protocol version +*/ +extern uint8_t +ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param); + +/*! Set the kernel enable bitmap from a program group parameter object + + @param param[in] program group parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return kernel enable bitmap, 0 on error +*/ +extern ia_css_kernel_bitmap_t +ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param); + +/*! Get the stored size of the program parameter object + + @param param[in] program parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param); + +/*! Set the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *param); + +/*! Get the stored size of the terminal parameter object + + @param param[in] terminal parameter object + + @return size, 0 on error + */ +extern size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param); + +/*! Get the kernel enable bitmap from a terminal parameter object + + @param terminal_param[in] terminal parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param); + +/*! Get the parent object for this terminal param. + + @param terminal_param[in] terminal parameter object + + @return parent program group param object + */ +extern ia_css_program_group_param_t *ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param); + +/*! Get the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return data format type (ia_css_data_format_type_t) + */ +extern ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *terminal_param); + +/*! Set the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param data_format_type[in] data format type + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *terminal_param, + const ia_css_frame_format_type_t data_format_type); + +/*! Get bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return bits per pixel + */ +extern uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *terminal_param); + +/*! Set bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param bpp[in] bits per pixel + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *terminal_param, + const uint8_t bpp); + +/*! Get dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[out] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *terminal_param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Set dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[in] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *terminal_param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Get stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return stride of the frame to be attached. + */ +extern uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *terminal_param); + +/*! Set stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param stride[in] stride + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *terminal_param, + const uint32_t stride); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h new file mode 100644 index 000000000000..7821f8147a1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h @@ -0,0 +1,153 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H + +/*! \file */ + +/** @file ia_css_program_group_param.sim.h + * + * Define the methods on the program group parameter object: Simulation only + */ +#include + +#include + +#include + +/* Simulation */ + +/*! Create a program group parameter object from specification + + @param specification[in] specification (index) + @param manifest[in] program group manifest + + @return NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_create( + const unsigned int specification, + const ia_css_program_group_manifest_t *manifest); + +/*! Destroy the program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_destroy( + ia_css_program_group_param_t *param); + +/*! Compute the size of storage required for allocating + * the program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return 0 on error + */ +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Allocate (the store of) a program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return program group parameter pointer, NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Free (the store of) a program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_free( + ia_css_program_group_param_t *param); + +/*! Print the program group parameter object to file/stream + + @param param[in] program group parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid); + +/*! Allocate (the store of) a program parameter object + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_param_alloc(void); + +/*! Free (the store of) a program parameter object + + @param param[in] program parameter object + + @return NULL + */ +extern ia_css_program_param_t *ia_css_program_param_free( + ia_css_program_param_t *param); + +/*! Print the program parameter object to file/stream + + @param param[in] program parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid); + +/*! Allocate (the store of) a terminal parameter object + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_alloc(void); + +/*! Free (the store of) a terminal parameter object + + @param param[in] terminal parameter object + + @return NULL + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_free( + ia_css_terminal_param_t *param); + +/*! Print the terminal parameter object to file/stream + + @param param[in] terminal parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h new file mode 100644 index 000000000000..d61b94cfb6bc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h @@ -0,0 +1,67 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H + +/*! \file */ + +/** @file ia_css_program_group_param_types.h + * + * Define the parameter objects that are necessary to create the process + * groups i.e. enable parameters and parameters to set-up frame descriptors + */ + +#include +#include /* ia_css_kernel_bitmap_t */ +#include + +#include +/*! make this public so that driver can populate, + * size, bpp, dimensions for all terminals. + * + * Currently one API is provided to get frame_format_type. + * + * frame_format_type is set during ia_css_terminal_param_init(). + * Value for that is const and binary specific. + */ +struct ia_css_terminal_param_s { + uint32_t size; /**< Size of this structure */ + /**< Indicates if this is a generic type or inbuild + * with variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION];/**< Logical dimensions */ + /**< Mapping to the index field of the terminal descriptor */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Logical fragment dimension, + * TODO: fragment dimensions can be different per fragment + */ + uint16_t fragment_dimensions[IA_CSS_N_DATA_DIMENSION]; + uint32_t stride;/**< Stride of a frame */ + uint16_t offset;/**< Offset in bytes to first fragment */ + uint8_t bpp; /**< Bits per pixel */ + uint8_t bpe; /**< Bits per element */ +}; + +typedef struct ia_css_program_group_param_s ia_css_program_group_param_t; +typedef struct ia_css_program_param_s ia_css_program_param_t; +typedef struct ia_css_terminal_param_s ia_css_terminal_param_t; + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h new file mode 100644 index 000000000000..f59dfbf165e4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PARAM_TRACE_H +#define __IA_CSS_PSYS_PARAM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + #define PSYS_PARAM_TRACE_LEVEL_CONFIG PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_PARAM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_PARAM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c new file mode 100644 index 000000000000..7cffb012ab39 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c @@ -0,0 +1,772 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ia_css_psys_param_trace.h" + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type); + +static int +ia_css_program_param_init(ia_css_program_param_t *program_param, + int32_t offset); + +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_sizeof_program_group_param(): enter:\n"); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(fragment_count != 0); + + size += sizeof(ia_css_program_group_param_t); + size += program_count * fragment_count * sizeof(ia_css_program_param_t); + size += terminal_count * sizeof(ia_css_terminal_param_t); +EXIT: + if (0 == program_count || 0 == terminal_count || 0 == fragment_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_sizeof_program_group_param invalid argument\n"); + } + return size; +} + +size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *program_group_param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_size(): enter:\n"); + + if (program_group_param != NULL) { + size = program_group_param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_size invalid argument\n"); + } + return size; +} + +size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_size invalid argument\n"); + } + return size; +} + +ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_program_param_t *program_param = NULL; + ia_css_program_param_t *program_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_program_count(param); + + verifexit(i < program_count); + + program_param_base = (ia_css_program_param_t *) + (((char *)param) + param->program_param_offset); + + program_param = &program_param_base[i]; + +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_param invalid argument\n"); + } + return program_param; +} + +size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_size invalid argument\n"); + } + + return size; +} + +ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_terminal_param_t *terminal_param = NULL; + ia_css_terminal_param_t *terminal_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_terminal_count(param); + + verifexit(i < program_count); + + terminal_param_base = (ia_css_terminal_param_t *) + (((char *)param) + param->terminal_param_offset); + terminal_param = &terminal_param_base[i]; +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_param invalid argument\n"); + } + return terminal_param; +} + +uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_count(): enter:\n"); + + if (param != NULL) { + program_count = param->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_count invalid argument\n"); + } + return program_count; +} + +uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_count(): enter:\n"); + + if (param != NULL) { + terminal_count = param->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param) +{ + uint8_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_fragment_count(): enter:\n"); + + if (param != NULL) { + fragment_count = (uint8_t)param->fragment_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +int ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_protocol_version(): enter:\n"); + + if (param != NULL) { + param->protocol_version = protocol_version; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_protocol_version failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param) +{ + uint8_t protocol_version = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_protocol_version(): enter:\n"); + + if (param != NULL) { + protocol_version = param->protocol_version; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + bitmap = param->kernel_enable_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_set_kernel_enable_bitmap(): enter:\n"); + + if (program_param != NULL) { + program_param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *program_param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(program_param != NULL); + verifexit(program_param->parent_offset != 0); + + base = (char *)((char *)program_param + program_param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == program_param || 0 == program_param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(param != NULL); + verifexit(param->parent_offset != 0); + + base = (char *)((char *)param + param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == param || 0 == param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *param) +{ + ia_css_frame_format_type_t ft = IA_CSS_N_FRAME_FORMAT_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_frame_format_type(): enter:\n"); + + verifexit(param != NULL); + + ft = param->frame_format_type; +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_frame_format_type invalid argument\n"); + } + return ft; +} + +int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *param, + const ia_css_frame_format_type_t data_format_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_frame_format_type(): enter:\n"); + + if (param != NULL) { + param->frame_format_type = data_format_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_frame_format_type failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *param) +{ + uint8_t bpp = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_bpp(): enter:\n"); + + verifexit(param != NULL); + + bpp = param->bpp; + +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_bpp invalid argument\n"); + } + return bpp; +} + +int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *param, + const uint8_t bpp) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_bpp(): enter:\n"); + + if (param != NULL) { + param->bpp = bpp; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_bpp failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_dimensions(): enter:\n"); + + if (param != NULL) { + dimensions[IA_CSS_COL_DIMENSION] = + param->dimensions[IA_CSS_COL_DIMENSION]; + dimensions[IA_CSS_ROW_DIMENSION] = + param->dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_get_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_dimensions(): enter:\n"); + + if (param != NULL) { + param->dimensions[IA_CSS_COL_DIMENSION] = + dimensions[IA_CSS_COL_DIMENSION]; + param->dimensions[IA_CSS_ROW_DIMENSION] = + dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *param, + const uint32_t stride) +{ + int retval = -1; + + verifexit(param != NULL); + param->stride = stride; + retval = 0; + +EXIT: + return retval; +} + +uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *param) +{ + uint32_t stride = 0; + + verifexit(param != NULL); + stride = param->stride; + +EXIT: + return stride; +} + + +static int ia_css_program_param_init( + ia_css_program_param_t *program_param, + int32_t offset) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_param_t))); + verifexit(program_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_init(): enter:\n"); + + program_param->size = sizeof(ia_css_program_param_t); + /* parent is at negative offset from current program.*/ + program_param->parent_offset = -offset; + /*TODO: Kernel_bitmap setting. ?*/ + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_init failed (%i)\n", retval); + } + return retval; +} + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_terminal_param_t))); + verifexit(terminal_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_init(): enter:\n"); + + terminal_param->size = sizeof(ia_css_terminal_param_t); + /* parent is at negative offset from current program.*/ + terminal_param->parent_offset = -((int32_t)offset); + /*TODO: Kernel_bitmap setting. ?*/ + terminal_param->frame_format_type = frame_format_type; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_init failed (%i)\n", retval); + } + return retval; +} + +ia_css_program_group_param_t * +ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param) +{ + ia_css_program_group_param_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_parent(): enter:\n"); + + verifexit(NULL != param); + + base = (char *)((char *)param + param->parent_offset); + + parent = (ia_css_program_group_param_t *)(base); +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types) +{ + int i = 0; + char *param_base; + uint32_t offset; + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_param_t))); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_init(): enter:\n"); + + assert(blob != 0); + + verifexit(blob != NULL); + verifexit(frame_format_types != NULL); + + blob->program_count = program_count; + blob->fragment_count = fragment_count; + blob->terminal_count = terminal_count; + blob->program_param_offset = sizeof(ia_css_program_group_param_t); + blob->terminal_param_offset = blob->program_param_offset + + sizeof(ia_css_program_param_t) * program_count; + + param_base = (char *)((char *)blob + blob->program_param_offset); + offset = blob->program_param_offset; + + for (i = 0; i < program_count; i++) { + ia_css_program_param_init( + (ia_css_program_param_t *)param_base, offset); + offset += sizeof(ia_css_program_param_t); + param_base += sizeof(ia_css_program_param_t); + } + + param_base = (char *)((char *)blob + blob->terminal_param_offset); + offset = blob->terminal_param_offset; + + for (i = 0; i < terminal_count; i++) { + ia_css_terminal_param_init( + (ia_css_terminal_param_t *)param_base, + offset, + frame_format_types[i]); + + offset += sizeof(ia_css_terminal_param_t); + param_base += sizeof(ia_css_terminal_param_t); + } + + /* + * For now, set legacy flow by default. This can be removed as soon + * as all hosts/drivers explicitly set the protocol version. + */ + blob->protocol_version = IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY; + + blob->size = (uint32_t)ia_css_sizeof_program_group_param(program_count, + terminal_count, + fragment_count); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_init failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(program_group_param) = %d\n", + (int)ia_css_program_group_param_get_size(param)); + + program_count = ia_css_program_group_param_get_program_count(param); + terminal_count = ia_css_program_group_param_get_terminal_count(param); + + bitmap = ia_css_program_group_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "%d program params\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + retval = ia_css_program_param_print(program_param, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "%d terminal params\n", + (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param(param, i); + + retval = ia_css_terminal_param_print(terminal_param, fid); + verifjmpexit(retval == 0); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(terminal_param) = %d\n", + (int)ia_css_terminal_param_get_size(param)); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "\tframe_format_type = %d\n", param->frame_format_type); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid) +{ + int retval = -1; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "sizeof(program_param) = %d\n", + (int)ia_css_program_param_get_size(param)); + + bitmap = ia_css_program_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_print failed (%i)\n", retval); + } + return retval; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h new file mode 100644 index 000000000000..6672737e51a1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h @@ -0,0 +1,80 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H + +#include +#include +#include +#include +#include +#include +#include + +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT 7 +#define SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + IA_CSS_UINT16_T_BITS \ + + (3 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* tentative; co-design with ISP algorithm */ +struct ia_css_program_group_param_s { + /* The enable bits for each individual kernel */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + uint32_t program_param_offset; + uint32_t terminal_param_offset; + /* Number of (explicit) fragments to use in a frame */ + uint16_t fragment_count; + /* Number of active programs */ + uint8_t program_count; + /* Number of active terminals */ + uint8_t terminal_count; + /* Program group protocol version */ + uint8_t protocol_version; + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT]; +}; + +#define SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_INT32_T_BITS) + +/* private */ +struct ia_css_program_param_s { + /* What to use this one for ? */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + /* offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; +}; + +#define SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS \ + (IA_CSS_UINT32_T_BITS \ + + IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + IA_CSS_INT32_T_BITS \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + IA_CSS_INT32_T_BITS \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (IA_CSS_UINT8_T_BITS * 1)) + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.c new file mode 100644 index 000000000000..6fafa1e3f364 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.c @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_server_manifest.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +const vied_nci_resource_spec_t psys_server_manifest = { + /* internal memory */ + { /* resource id size offset*/ + {VIED_NCI_GMEM_TYPE_ID, 0, 0}, + {VIED_NCI_DMEM_TYPE_ID, VIED_NCI_DMEM0_MAX_SIZE, 0}, + {VIED_NCI_VMEM_TYPE_ID, 0, 0}, + {VIED_NCI_BAMEM_TYPE_ID, 0, 0}, + {VIED_NCI_PMEM_TYPE_ID, 0, 0} + }, + /* external memory */ + { /* resource id size offset*/ + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + }, + /* device channel */ + { /* resource id size offset*/ + {VIED_NCI_DEV_CHN_DMA_EXT0_ID, + PSYS_SERVER_DMA_CHANNEL_SIZE, + PSYS_SERVER_DMA_CHANNEL_OFFSET}, + {VIED_NCI_DEV_CHN_GDC_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_IPFD_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_ISA_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_FW_ID, 0, 0} + } +}; + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.h new file mode 100644 index 000000000000..b4c7fbc32d5b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psys_server_manifest/bxtB0/ia_css_psys_server_manifest.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SERVER_MANIFEST_H +#define __IA_CSS_PSYS_SERVER_MANIFEST_H + +#include "vied_nci_psys_resource_model.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +#define PSYS_SERVER_DMA_CHANNEL_SIZE 2 +#define PSYS_SERVER_DMA_CHANNEL_OFFSET 28 + +extern const vied_nci_resource_spec_t psys_server_manifest; + +#endif /* __IA_CSS_PSYS_SERVER_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psysapi.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psysapi.mk new file mode 100644 index 000000000000..e1977cbe2ca2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/psysapi.mk @@ -0,0 +1,122 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYSAPI +# +ifdef _H_PSYSAPI_MK +$(error ERROR: psysapi.mk included multiple times, please check makefile) +else +_H_PSYSAPI_MK=1 +endif + +include $(MODULES_DIR)/config/psys/subsystem_$(IPU_SYSVER).mk + +PSYSAPI_DIR = $${MODULES_DIR}/psysapi + +PSYSAPI_PROCESS_HOST_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c + +# Use PSYS_MANIFEST_HOST_FILES when only accessing manifest functions +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c + +# Use only kernel bitmap functionality from PSYS API +PSYSAPI_KERNEL_BITMAP_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/interface + +# Use PSYSAPI_HOST_FILES when program and process group are both needed +PSYSAPI_HOST_FILES = $(PSYSAPI_PROCESS_HOST_FILES) $(PSYSAPI_MANIFEST_HOST_FILES) + +# Use PSYSAPI_PROCESS_GROUP_HOST_FILES when program and process group are both needed but there is no +# implementation (yet) of the user customization functions defined in ia_css_psys_process_group_cmd_impl.h. +# Dummy implementations are provided in $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c +PSYSAPI_PROCESS_GROUP_HOST_FILES = $(PSYSAPI_HOST_FILES) +PSYSAPI_PROCESS_GROUP_HOST_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c + +# for now disabled, implementation for now provided by psys api impl +#PSYSAPI_HOST_FILES += $(PSYSAPI_DIR)/device/src/ia_css_psys_device.c + +PSYSAPI_HOST_CPPFLAGS = -I$(PSYSAPI_DIR)/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface/$(IPU_SYSVER) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +PSYSAPI_FW_CPPFLAGS = $(PSYSAPI_HOST_CPPFLAGS) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +# Defining the trace level for the PSYSAPI +PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_TRACE_CONFIG=PSYSAPI_TRACE_LOG_LEVEL_NORMAL +# Enable/Disable 'late binding' support and it's additional queues +PSYSAPI_HOST_CPPFLAGS += -DHAS_LATE_BINDING_SUPPORT=$(PSYS_HAS_LATE_BINDING_SUPPORT) + +#Example: how to switch to a different log level for a sub-module +#PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_DYNAMIC_TRACING_OVERRIDE=PSYSAPI_TRACE_LOG_LEVEL_DEBUG + +# enable host side implementation +# TODO: better name for the flag to enable the impl... +PSYSAPI_HOST_CPPFLAGS += -D__X86_SIM__ + +# Files for Firmware +PSYSAPI_FW_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_sim_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c + +# resource model +PSYSAPI_RESOURCE_MODEL_FILES = $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c + +ifeq ($(PSYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +PSYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(PSYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.c new file mode 100644 index 000000000000..21be1bc375c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.c @@ -0,0 +1,323 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "vied_nci_psys_resource_model.h" + +/* + * Cell types by cell IDs + */ +const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID] = { + VIED_NCI_SP_CTRL_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_GDC_TYPE_ID +}; + +/* + * Memory types by memory IDs + */ +const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_GMEM_TYPE_ID,/* VMEM4 is GMEM according to vied_nci_cell_mem */ + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID +}; + +/* + * Cell mem count by cell type ID + */ +const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID] = { + VIED_NCI_N_SP_CTRL_MEM, + VIED_NCI_N_SP_SERVER_MEM, + VIED_NCI_N_VP_MEM, + VIED_NCI_N_ACC_PSA_MEM, + VIED_NCI_N_ACC_ISA_MEM, + VIED_NCI_N_ACC_OSA_MEM +}; + +/* + * Cell mem type by cell type ID and memory index + */ +const vied_nci_mem_type_ID_t +vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_GMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + } +}; + +/* + * Ext mem ID by memory index + */ +const vied_nci_mem_ID_t +vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID] = { + VIED_NCI_VMEM4_ID, /* VIED_NCI_GMEM_TYPE_ID */ + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID +}; + +/* + * Cell mem ID by cell ID and memory index + */ +const vied_nci_mem_ID_t +vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_VMEM0_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_PMEM0_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_VMEM1_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_PMEM1_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_PMEM2_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_PMEM3_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + } +}; + +/* + * Memory sizes by mem ID + */ +const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM0_MAX_SIZE, + VIED_NCI_VMEM1_MAX_SIZE, + VIED_NCI_VMEM2_MAX_SIZE, + VIED_NCI_VMEM3_MAX_SIZE, + VIED_NCI_VMEM4_MAX_SIZE, + VIED_NCI_BAMEM0_MAX_SIZE, + VIED_NCI_BAMEM1_MAX_SIZE, + VIED_NCI_BAMEM2_MAX_SIZE, + VIED_NCI_BAMEM3_MAX_SIZE, + VIED_NCI_DMEM0_MAX_SIZE, + VIED_NCI_DMEM1_MAX_SIZE, + VIED_NCI_DMEM2_MAX_SIZE, + VIED_NCI_DMEM3_MAX_SIZE, + VIED_NCI_DMEM4_MAX_SIZE, + VIED_NCI_DMEM5_MAX_SIZE, + VIED_NCI_DMEM6_MAX_SIZE, + VIED_NCI_DMEM7_MAX_SIZE, + VIED_NCI_PMEM0_MAX_SIZE, + VIED_NCI_PMEM1_MAX_SIZE, + VIED_NCI_PMEM2_MAX_SIZE, + VIED_NCI_PMEM3_MAX_SIZE +}; + +/* + * Memory word sizes by mem type ID + */ +const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID] = { + VIED_NCI_GMEM_WORD_SIZE, + VIED_NCI_DMEM_WORD_SIZE, + VIED_NCI_VMEM_WORD_SIZE, + VIED_NCI_BAMEM_WORD_SIZE +}; + +/* + * Number of channels by device ID + */ +const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID] = { + VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE, + VIED_NCI_DEV_CHN_GDC_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE +}; + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.h new file mode 100644 index 000000000000..1cb4e010d55d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/resource_model/bxtB0/vied_nci_psys_resource_model.h @@ -0,0 +1,300 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_RESOURCE_MODEL_H +#define __VIED_NCI_PSYS_RESOURCE_MODEL_H + +#include "type_support.h" +#include "storage_class.h" + +#define HAS_DFM 0 +#define NON_RELOC_RESOURCE_SUPPORT 0 +#define IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + +/* Defines for the routing bitmap in the program group manifest. + */ +#define VIED_NCI_RBM_MAX_MUX_COUNT 0 +#define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT 0 +#define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT 0 +#define N_PADDING_UINT8_IN_RBM_MANIFEST 2 + +/* The amount of padding bytes needed to make + * ia_css_process_s structure 64 bit aligned + */ +#define N_PADDING_UINT8_IN_PROCESS_STRUCT 4 +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST 4 + +/** + * Resource model for BXT B0 + */ + +/* + * Cell IDs + */ +typedef enum { + VIED_NCI_SP0_ID = 0, + VIED_NCI_SP1_ID, + VIED_NCI_SP2_ID, + VIED_NCI_VP0_ID, + VIED_NCI_VP1_ID, + VIED_NCI_VP2_ID, + VIED_NCI_VP3_ID, + VIED_NCI_ACC0_ID, + VIED_NCI_ACC1_ID, + VIED_NCI_ACC2_ID, + VIED_NCI_ACC3_ID, + VIED_NCI_ACC4_ID, + VIED_NCI_ACC5_ID, + VIED_NCI_ACC6_ID, + VIED_NCI_ACC7_ID, + VIED_NCI_GDC0_ID, + VIED_NCI_GDC1_ID, + VIED_NCI_N_CELL_ID +} vied_nci_cell_ID_t; + +/* + * Barrier bits (to model process group dependencies) + */ +typedef enum { + VIED_NCI_BARRIER0_ID, + VIED_NCI_BARRIER1_ID, + VIED_NCI_BARRIER2_ID, + VIED_NCI_BARRIER3_ID, + VIED_NCI_BARRIER4_ID, + VIED_NCI_BARRIER5_ID, + VIED_NCI_BARRIER6_ID, + VIED_NCI_BARRIER7_ID, + VIED_NCI_N_BARRIER_ID +} vied_nci_barrier_ID_t; + +/* + * Cell types + */ +typedef enum { + VIED_NCI_SP_CTRL_TYPE_ID = 0, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_N_CELL_TYPE_ID +} vied_nci_cell_type_ID_t; + +/* + * Memory IDs + */ +typedef enum { + VIED_NCI_VMEM0_ID = 0, + VIED_NCI_VMEM1_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_VMEM4_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_DMEM3_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_PMEM0_ID, + VIED_NCI_PMEM1_ID, + VIED_NCI_PMEM2_ID, + VIED_NCI_PMEM3_ID, + VIED_NCI_N_MEM_ID +} vied_nci_mem_ID_t; + +/* + * Memory types + */ +typedef enum { + VIED_NCI_GMEM_TYPE_ID = 0, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID +} vied_nci_mem_type_ID_t; + +/* Excluding PMEM */ +#define VIED_NCI_N_DATA_MEM_TYPE_ID (VIED_NCI_N_MEM_TYPE_ID - 1) + +#define VIED_NCI_N_SP_CTRL_MEM 2 +#define VIED_NCI_N_SP_SERVER_MEM 2 +#define VIED_NCI_N_VP_MEM 4 +#define VIED_NCI_N_ACC_PSA_MEM 0 +#define VIED_NCI_N_ACC_ISA_MEM 0 +#define VIED_NCI_N_ACC_OSA_MEM 0 + +#define VIED_NCI_N_VP_CELL 4 +#define VIED_NCI_N_ACC_CELL 8 + +/* + * Device IDs + */ +typedef enum { + VIED_NCI_DEV_CHN_DMA_EXT0_ID = 0, + VIED_NCI_DEV_CHN_GDC_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, + VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, + VIED_NCI_DEV_CHN_DMA_IPFD_ID, + VIED_NCI_DEV_CHN_DMA_ISA_ID, + VIED_NCI_DEV_CHN_DMA_FW_ID, + VIED_NCI_N_DEV_CHN_ID +} vied_nci_dev_chn_ID_t; + +typedef enum { + DFM_IS_NOT_AVAILABLE +} vied_nci_dev_dfm_id_t; + +#define VIED_NCI_N_DEV_DFM_ID 0 + + +/* + * Memory size (previously in vied_nci_psys_system.c) + * VMEM: in words, 64 Byte per word. + * BAMEM: in words, 64 Byte per word + * DMEM: in words, 4 Byte per word. + * PMEM: in words, 64 Byte per word. + */ +#define VIED_NCI_GMEM_WORD_SIZE 64 +#define VIED_NCI_DMEM_WORD_SIZE 4 +#define VIED_NCI_VMEM_WORD_SIZE 64 +#define VIED_NCI_BAMEM_WORD_SIZE 64 + +#define VIED_NCI_VMEM0_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM1_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM2_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM3_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM4_MAX_SIZE (0x0800) +#define VIED_NCI_BAMEM0_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM1_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM2_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM3_MAX_SIZE (0x0400) +#define VIED_NCI_DMEM0_MAX_SIZE (0x4000) +#define VIED_NCI_DMEM1_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM2_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM3_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM4_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM5_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM6_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM7_MAX_SIZE (0x1000) +#define VIED_NCI_PMEM0_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM1_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM2_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM3_MAX_SIZE (0x0500) + +/* + * Number of channels per device + */ +#define VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_GDC_MAX_SIZE (4) +#define VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE (20) +#define VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE (5) +#define VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE (1) + +/* + * Storage of the resource and resource type enumerators + */ +#define VIED_NCI_RESOURCE_ID_BITS 8 +typedef uint8_t vied_nci_resource_id_t; + +#define VIED_NCI_RESOURCE_SIZE_BITS 16 +typedef uint16_t vied_nci_resource_size_t; + +#define VIED_NCI_RESOURCE_BITMAP_BITS 32 +typedef uint32_t vied_nci_resource_bitmap_t; + +#define IA_CSS_PROCESS_INVALID_DEPENDENCY ((vied_nci_resource_id_t)(-1)) +#define IA_CSS_PROCESS_INVALID_OFFSET ((vied_nci_resource_size_t)(-1)) +#define IA_CSS_PROCESS_MAX_CELLS 1 + +/* + * Resource specifications + * Note that the FAS uses the terminology local/remote memory. In the PSYS API, + * these are called internal/external memory. + */ + +/* resource spec for internal (local) memory */ +struct vied_nci_resource_spec_int_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_int_mem_s + vied_nci_resource_spec_int_mem_t; + +/* resource spec for external (remote) memory */ +struct vied_nci_resource_spec_ext_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_ext_mem_s + vied_nci_resource_spec_ext_mem_t; + +/* resource spec for device channel */ +struct vied_nci_resource_spec_dev_chn_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_dev_chn_s + vied_nci_resource_spec_dev_chn_t; + +/* resource spec for all contiguous resources */ +struct vied_nci_resource_spec_s { + vied_nci_resource_spec_int_mem_t int_mem[VIED_NCI_N_MEM_TYPE_ID]; + vied_nci_resource_spec_ext_mem_t ext_mem[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_spec_dev_chn_t dev_chn[VIED_NCI_N_DEV_CHN_ID]; +}; + +typedef struct vied_nci_resource_spec_s vied_nci_resource_spec_t; + +#ifndef PIPE_GENERATION + +extern const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID]; +extern const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID]; +extern const vied_nci_mem_type_ID_t + vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; +extern const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + +STORAGE_CLASS_INLINE +uint32_t vied_nci_mem_is_ext_type(const vied_nci_mem_type_ID_t mem_type_id) +{ + return((mem_type_id == VIED_NCI_GMEM_TYPE_ID)); +} + +#endif /* PIPE_GENERATION */ + +#endif /* __VIED_NCI_PSYS_RESOURCE_MODEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h new file mode 100644 index 000000000000..78f917672672 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_DATA_H +#define __IA_CSS_PSYS_SIM_DATA_H + +/*! Set the seed if the random number generator + + @param seed[in] Random number generator seed + */ +extern void ia_css_psys_ran_set_seed(const unsigned int seed); + +/*! Generate a random number of a specified bit depth + + @param bit_depth[in] The number of bits of the random output + + @return out, weight(out) <= bit_depth, 0 on error + */ +extern unsigned int ia_css_psys_ran_var(const unsigned int bit_depth); + +/*! Generate a random number of a specified range + + @param range[in] The range of the random output + + @return 0 <= out < range, 0 on error + */ +extern unsigned int ia_css_psys_ran_val(const unsigned int range); + +/*! Generate a random number in a specified interval + + @param lo[in] The lower bound of the random output range + @param hi[in] The higher bound of the random output range + + @return lo <= out < hi, 0 on error + */ +extern unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi); + +#endif /* __IA_CSS_PSYS_SIM_DATA_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h new file mode 100644 index 000000000000..61095257ec55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_STORAGE_CLASS_H +#define __IA_CSS_PSYS_SIM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_SIM_INLINE__ +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_SIM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h new file mode 100644 index 000000000000..423ff1980270 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_TRACE_H +#define __IA_CSS_PSYS_SIM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + #define PSYS_SIM_TRACE_LEVEL_CONFIG PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_SIM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_SIM_TRACE_METHOD PSYSAPI_TRACE_METHOD + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_SIM_TRACE_LEVEL_INFO PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_SIM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h new file mode 100644 index 000000000000..529bea763cc2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h @@ -0,0 +1,180 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_SYSTEM_GLOBAL_H +#define __VIED_NCI_PSYS_SYSTEM_GLOBAL_H + +#include +#include "ia_css_base_types.h" +#include "ia_css_psys_sim_storage_class.h" +#include "vied_nci_psys_resource_model.h" + +/* + * Key system types + */ +/* Subsystem internal physical address */ +#define VIED_ADDRESS_BITS 32 + +/* typedef uint32_t vied_address_t; */ + +/* Subsystem internal virtual address */ + +/* Subsystem internal data bus */ +#define VIED_DATA_BITS 32 +typedef uint32_t vied_data_t; + +#define VIED_NULL ((vied_vaddress_t)0) + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( +const vied_nci_resource_bitmap_t bitmap, +const unsigned int index, +const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index); + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ + +#endif /* __VIED_NCI_PSYS_SYSTEM_GLOBAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c new file mode 100644 index 000000000000..6dccac823871 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c @@ -0,0 +1,91 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +#include "ia_css_psys_sim_trace.h" + +static unsigned int ia_css_psys_ran_seed; + +void ia_css_psys_ran_set_seed(const unsigned int seed) +{ + ia_css_psys_ran_seed = seed; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_set_seed(): enter:\n"); + +} + +static unsigned int ia_css_psys_ran_int (void) +{ + ia_css_psys_ran_seed = 1664525UL * ia_css_psys_ran_seed + 1013904223UL; + return ia_css_psys_ran_seed; +} + +unsigned int ia_css_psys_ran_var(const unsigned int bit_depth) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_var(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (bit_depth > 32) + out = tmp; + else if (bit_depth == 0) + out = 0; + else + out = (unsigned short)(tmp >> (32 - bit_depth)); + + return out; +} + +unsigned int ia_css_psys_ran_val(const unsigned int range) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_val(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (range > 1) + out = tmp % range; + else + out = 0; + + return out; +} + +unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi) +{ + unsigned int out; + unsigned int tmp; + unsigned int range = hi - lo; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_interval(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if ((range > 1) && (lo < hi)) + out = lo + (tmp % range); + else + out = 0; + + return out; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h new file mode 100644 index 000000000000..ff51175548ec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h @@ -0,0 +1,485 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PSYS_SYSTEM_GLOBAL_IMPL_H +#define __PSYS_SYSTEM_GLOBAL_IMPL_H + +#include + +#include "ia_css_psys_sim_trace.h" +#include + +/* Use vied_bits instead, however for test purposes we uses explicit type + * checking + */ +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned int index) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bit_mask(): enter:\n"); + + if (index < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (vied_nci_resource_bitmap_t)1 << index; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap | bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap & (~bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + vied_nci_resource_bitmap_t ones = (vied_nci_resource_bitmap_t)-1; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitfield_mask(): enter:\n"); + + if (position < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (ones >> (sizeof(vied_nci_resource_bitmap_t) - size)) << position; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index, + const unsigned int size) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_bitfield(): enter:\n"); + + bit_mask = vied_nci_bitfield_mask(index, size); + ret = vied_nci_bitmap_set(bitmap, bit_mask); + + return ret; +} + + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + vied_nci_resource_bitmap_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_set_unique(): enter:\n"); + + if ((bitmap & bit_mask) == 0) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_unique(): enter:\n"); + + bit_mask = vied_nci_bit_mask(index); + + if (((bitmap & bit_mask) == 0) && (bit_mask != 0)) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_empty(): enter:\n"); + + return (bitmap == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return !vied_nci_is_bitmap_clear(bitmap, bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + + vied_nci_resource_bitmap_t bitmask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bit_set_in_bitmap(): enter:\n"); + bitmask = vied_nci_bit_mask(index); + return vied_nci_is_bitmap_set(bitmap, bitmask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return ((bitmap & bit_mask) == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap) +{ + vied_nci_resource_bitmap_t loc_bitmap = bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_compute_weight(): enter:\n"); + + /* Do not need the iterator "i" */ + for (i = 0; (i < VIED_NCI_RESOURCE_BITMAP_BITS) && + (loc_bitmap != 0); i++) { + weight += loc_bitmap & 0x01; + loc_bitmap >>= 1; + } + + return weight; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_union(): enter:\n"); + return (bitmap0 | bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); + return (bitmap0 & bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_xor(): enter:\n"); + return (bitmap0 ^ bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_bit_mask(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (cell_id < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << cell_id; + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_barrier_bit_mask(): enter:\n"); + + if ((barrier_id < VIED_NCI_N_BARRIER_ID) && + ((barrier_id + VIED_NCI_N_CELL_ID) < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << + (barrier_id + VIED_NCI_N_CELL_ID); + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_cell_type_ID_t cell_type = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_type(): enter:\n"); + + if (cell_id < VIED_NCI_N_CELL_ID) { + cell_type = vied_nci_cell_type[cell_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_cell_get_type(): invalid argument\n"); + } + + return cell_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_type(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_type = vied_nci_mem_type[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_type(): invalid argument\n"); + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_size(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_size = vied_nci_mem_size[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_size(): invalid argument\n"); + } + + return mem_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + uint16_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_dev_chn_get_size(): enter:\n"); + + if (dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + dev_chn_size = vied_nci_dev_chn_size[dev_chn_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_dev_chn_get_size(): invalid argument\n"); + } + + return dev_chn_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_of_type(): enter:\n"); + + return ((vied_nci_cell_get_type(cell_id) == + cell_type_id) && (cell_type_id != + VIED_NCI_N_CELL_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_mem_of_type(): enter:\n"); + + return ((vied_nci_mem_get_type(mem_id) == mem_type_id) && + (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_mem_of_type(): enter:\n"); + + return ((vied_nci_cell_get_mem_type(cell_id, mem_index) == mem_type_id) + && (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_index; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_has_cell_mem_of_id(): enter:\n"); + + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((vied_nci_cell_get_mem(cell_id, mem_index) == mem_id) && + (mem_id != VIED_NCI_N_MEM_ID)) { + break; + } + } + + return (mem_index < VIED_NCI_N_MEM_TYPE_ID); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id) +{ + uint16_t mem_count = 0; + vied_nci_cell_type_ID_t cell_type; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_count(): enter:\n"); + + cell_type = vied_nci_cell_get_type(cell_id); + + if (cell_type < VIED_NCI_N_CELL_TYPE_ID) + mem_count = vied_nci_N_cell_mem[cell_type]; + + return mem_count; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_type(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[ + vied_nci_cell_get_type(cell_id)][mem_index]; + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_ID_t mem_id = VIED_NCI_N_MEM_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_id = vied_nci_cell_mem[cell_id][mem_index]; + } + + return mem_id; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_type_get_mem_type(): enter:\n"); + + if ((cell_type_id < VIED_NCI_N_CELL_TYPE_ID) + && (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[cell_type_id][mem_index]; + } + + return mem_type; +} + +#endif /* __PSYS_SYSTEM_GLOBAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c new file mode 100644 index 000000000000..b0e0aebb6e77 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_sim_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_psys_system_global_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_SIM_INLINE__ */ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h new file mode 100644 index 000000000000..4a2f96e9405e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_MANIFEST_TYPES_H +#define __IA_CSS_PSYS_MANIFEST_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_manifest_types.h + * + * The types belonging to the terminal/program/ + * program group manifest static module + */ + +#include +#include "vied_nci_psys_resource_model.h" + + +/* This value is used in the manifest to indicate that the resource + * offset field must be ignored and the resource is relocatable + */ +#define IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE ((vied_nci_resource_size_t)(-1)) + +/* + * Connection type defining the interface source/sink + * + * Note that the connection type does not define the + * real-time configuration of the system, i.e. it + * does not describe whether a source and sink + * program group or sub-system operate synchronously + * that is a program script property {online, offline} + * (see FAS 5.16.3) + */ +#define IA_CSS_CONNECTION_BITMAP_BITS 8 +typedef uint8_t ia_css_connection_bitmap_t; + +#define IA_CSS_CONNECTION_TYPE_BITS 32 +typedef enum ia_css_connection_type { + /**< The terminal is in DDR */ + IA_CSS_CONNECTION_MEMORY = 0, + /**< The terminal is a (watermark) queued stream over DDR */ + IA_CSS_CONNECTION_MEMORY_STREAM, + /* The terminal is a device port */ + IA_CSS_CONNECTION_STREAM, + IA_CSS_N_CONNECTION_TYPES +} ia_css_connection_type_t; + +#define IA_CSS_PROGRAM_TYPE_BITS 32 +typedef enum ia_css_program_type { + IA_CSS_PROGRAM_TYPE_SINGULAR = 0, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER, +/* + * Future extension; A bitmap coding starts making more sense + * + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUPER, + */ + IA_CSS_N_PROGRAM_TYPES +} ia_css_program_type_t; + +#define IA_CSS_PROGRAM_GROUP_ID_BITS 32 +typedef uint32_t ia_css_program_group_ID_t; +#define IA_CSS_PROGRAM_ID_BITS 32 +typedef uint32_t ia_css_program_ID_t; + +#define IA_CSS_PROGRAM_INVALID_ID ((uint32_t)(-1)) +#define IA_CSS_PROGRAM_GROUP_INVALID_ID ((uint32_t)(-1)) + +typedef struct ia_css_program_group_manifest_s +ia_css_program_group_manifest_t; +typedef struct ia_css_program_manifest_s +ia_css_program_manifest_t; +typedef struct ia_css_data_terminal_manifest_s +ia_css_data_terminal_manifest_t; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +typedef struct ia_css_program_control_init_manifest_program_desc_s + ia_css_program_control_init_manifest_program_desc_t; + +typedef struct ia_css_program_control_init_terminal_manifest_s + ia_css_program_control_init_terminal_manifest_t; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +#endif /* __IA_CSS_PSYS_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h new file mode 100644 index 000000000000..7a3b712fb1a9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h @@ -0,0 +1,312 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H + +#include "ia_css_psys_static_storage_class.h" + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.h + * + * Define the methods on the program group manifest object that are not part of + * a single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_rbm_manifest_types.h" + +#define IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT ((uint8_t)(-1)) + +/*! Get the stored size of the program group manifest object + + @param manifest[in] program group manifest object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @return program group ID, IA_CSS_PROGRAM_GROUP_INVALID_ID on invalid argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @param program group ID + + @return 0 on success, -1 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id); + +/*! Get the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + + @return alignment, IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT on invalid manifest + argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + @param alignment[in] alignment desired + + @return < 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment); + +/*! Get the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + + @return bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t +ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + @param kernel bitmap[in] kernel enable bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the number of programs in the program group manifest object + + @param manifest[in] program group manifest object + + @return program count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the number of terminals in the program group manifest object + + @param manifest[in] program group manifest object + + @return terminal count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) private data blob in the manifest + + @param manifest[in] program group manifest object + + @return private data blob, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) routing bitmap (rbm) manifest + + @param manifest[in] program group manifest object + + @return rbm manifest, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_rbm_manifest_t * +ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) indexed program manifest in the program group manifest + * object + + @param manifest[in] program group manifest object + @param program_index[in] index of the program manifest object + + @return program manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index); + +/*! Get the (pointer to) indexed terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed data terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return data terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed parameter terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return parameter terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed spatial param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return spatial param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed sliced param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return sliced param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed program terminal manifest in the program group + * manifest object + + @parammanifest[in]program group manifest object + @paramprogram_index[in]index of the terminal manifest object + + @return program terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! initialize program group manifest + + @param manifest[in] program group manifest object + @param program_count[in] number of programs. + @param terminal_count[in] number of terminals. + @param program_deps[in] program dependencies for programs in pg. + @param terminal_deps[in] terminal dependencies for programs in pg. + @param terminal_type[in] array of terminal types, binary specific + static frame data + @param cached_in_param_section_count[in]Number of parameter terminal sections + @param cached_out_param_section_count[in] Number of parameter out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per cached out + terminal + @param sliced_in_param_section_count[in] Array[sliced_in_terminal_count] + with sections per sliced in + terminal + @param sliced_out_param_section_count[in] Array[sliced_out_terminal_count] + with sections per sliced out + terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the program init + terminal, + @param kernel_fragment_seq_count[in] Number of kernel fragment + seqence info. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return none; + */ +extern void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +#ifdef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h new file mode 100644 index 000000000000..3f9927b27bb0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h @@ -0,0 +1,72 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.hsys.user.h + * + * Define the methods on the program group manifest object: Hsys user interface + */ + +#include + +#include /* bool */ + +/*! Print the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Read the program group manifest object from file/stream + + @param fid[in] file/stream handle + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_read( + void *fid); + +/*! Write the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_write( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Boolean test if the program group manifest is valid + + @param manifest[in] program group manifest + + @return true if program group manifest is correct, false on error + */ +extern bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h new file mode 100644 index 000000000000..8220c0612137 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h @@ -0,0 +1,130 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.sim.h + * + * Define the methods on the program group manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ +#include "ia_css_terminal_defs.h" + +/*! Create a program group manifest object from specification + + @param specification[in] specification (index) + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_create( + const unsigned int specification); + +/*! Destroy the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_destroy( + ia_css_program_group_manifest_t *manifest); + +/*! Compute the size of storage required for allocating + * the program group (PG) manifest object + + @param program_count[in] Number of programs in the PG + @param terminal_count[in] Number of terminals on the PG + @param program_dependency_count[in] Array[program_count] with the PG + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + @param cached_in_param_section_count[in] Number of parameter + in terminal sections + @param cached_out_param_section_count[in] Number of parameter + out terminal sections + @param sliced_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced in terminal + @param sliced_out_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per + spatial terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the + program init terminal, + @param kernel_fragment_seq_count[in] Number of + kernel_fragment_seq_count. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return 0 on error + */ +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +/*! Create (the storage for) the program group manifest object + + @param program_count[in] Number of programs in the program group + @param terminal_count[in] Number of terminals on the program group + @param program_dependency_count[in] Array[program_count] with the + program dependencies + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type); + +/*! Free (the storage of) the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_free( + ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h new file mode 100644 index 000000000000..b7333671ed4f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h @@ -0,0 +1,488 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.h + * + * Define the methods on the program manifest object that are not part of a + * single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +/* + * Resources needs + */ +#include + +#define IA_CSS_PROGRAM_INVALID_DEPENDENCY ((uint8_t)(-1)) + +/*! Check if the program manifest object specifies a fixed cell allocation + + @param manifest[in] program manifest object + + @return has_fixed_cell, false on invalid argument + */ +extern bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest); + +/*! Get the stored size of the program manifest object + + @param manifest[in] program manifest object + + @return size, 0 on invalid argument + */ +extern size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest); + +/*! Get the program ID of the program manifest object + + @param manifest[in] program manifest object + + @return program ID, IA_CSS_PROGRAM_INVALID_ID on invalid argument + */ +extern ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest); + +/*! Set the program ID of the program manifest object + + @param manifest[in] program manifest object + + @param program ID + + @return 0 on success, -1 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id); + +/*! Get the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + @param program_offset[in] this program's offset from + program_group_manifest's base address. + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset); + +/*! Get the type of the program manifest object + + @param manifest[in] program manifest object + + @return program type, limit value (IA_CSS_N_PROGRAM_TYPES) on invalid manifest + argument +*/ +extern ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest); + +/*! Set the type of the program manifest object + + @param manifest[in] program manifest object + @param program_type[in] program type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type); + +/*! Set the cell id of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_id[in] program cell id + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id); + +/*! Set the cell type of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_type[in] program cell type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id); + +/*! Set cells bitmap for the program + + @param manifest[in] program manifest object + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get cells bitmap for the program + + @param manifest[in] program manifest object + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set DFM port bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get bitmap of DFM ports requested for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return DFM port bitmap + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Set active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + +/*! Set DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param is_relocatable[in] 1 if dfm device ports are relocatable, 0 otherwise + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable); + +/*! Get DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 1 if dfm device ports are relocatable, 0 otherwise + */ +extern uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param int_mem_size[in] internal memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_size[in] external memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Set a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_size[in] device channel size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size); + +/*! Set a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_offset[in] device channel offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset); + + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_offset[in] external memory offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset); + +/*! Get a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped. + + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the kernel composition of the program manifest object + + @param manifest[in] program manifest object + + @return bitmap, 0 on invalid arguments + */ +extern ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set the kernel dependency of the program manifest object + + @param manifest[in] program manifest object + @param kernel_bitmap[in] kernel composition bitmap + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Get the number of programs this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the program which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return program dependency, + IA_CSS_PROGRAM_INVALID_DEPENDENCY on invalid arguments + */ +extern uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the program which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return program dependency + */ +extern int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index); + +/*! Get the number of terminals this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the terminal which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return terminal dependency, IA_CSS_PROGRAM_INVALID_DEPENDENCY on error + */ +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the terminal which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index); + +/*! Check if the program manifest object specifies a subnode program + + @param manifest[in] program manifest object + + @return is_subnode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest); + +/*! Check if the program manifest object specifies a supernode program + + @param manifest[in] program manifest object + + @return is_supernode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest); +/*! Check if the program manifest object specifies a singular program + + @param manifest[in] program manifest object + + @return is_singular, false on invalid argument + */ +extern bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h new file mode 100644 index 000000000000..9d737b75a576 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h @@ -0,0 +1,96 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.kernel.h + * + * Define the methods on the program manifest object: Hsys kernel interface + */ + +#include + +#include + +#include /* uint8_t */ + +/* + * Resources needs + */ + +/*! Get the cell ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell ID is specified, the program this manifest belongs to + must be mapped on that instance. If the cell ID is invalid (limit value) + then the cell type ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the cell type ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell type ID is specified, the program this manifest belongs + to can be mapped on any instance of this clee type. If the cell type ID is + invalid (limit value) then a specific cell ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h new file mode 100644 index 000000000000..087c84b7106e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.user.h + * + * Define the methods on the program manifest object: Hsys user interface + */ + +#include + +/*! Print the program manifest object to file/stream + + @param manifest[in] program manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h new file mode 100644 index 000000000000..0c2cef11f30e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h @@ -0,0 +1,61 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.sim.h + * + * Define the methods on the program manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ + +/*! Compute the size of storage required for allocating + * the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return 0 on error + */ +extern size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Create (the storage for) the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return NULL on error + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_alloc( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Destroy (the storage of) the program manifest object + + @param manifest[in] program manifest + + @return NULL + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_free( + ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h new file mode 100644 index 000000000000..f3c832b5a4a3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h new file mode 100644 index 000000000000..7c5612cd0969 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_TRACE_H +#define __IA_CSS_PSYS_STATIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + #define PSYS_STATIC_TRACE_LEVEL_CONFIG \ + PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_STATIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_STATIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h new file mode 100644 index 000000000000..0fa62b32e1a7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h @@ -0,0 +1,423 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.h + * + * Define the methods on the terminal manifest object that are not part of a + * single interface + */ + +#include + +#include + +#include + +#include /* ia_css_frame_format_bitmap_t */ +#include /* ia_css_kernel_bitmap_t */ + +#include /* size_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_base_types.h" + + +/*! Check if the terminal manifest object specifies a spatial param terminal + * type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a program terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest); + + +/*! Check if the terminal manifest object specifies a program control init terminal type + * + * @param manifest[in] terminal manifest object + * + * @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (cached) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (sliced) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a data terminal type + + @param manifest[in] terminal manifest object + + @return is_data_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the stored size of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return size, 0 on invalid manifest argument + */ +extern size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + @param terminal_offset[in] this terminal's offset from + program_group_manifest base address. + + @return < 0 on invalid arguments + */ +extern int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal type, limit value (IA_CSS_N_TERMINAL_TYPES) on invalid + manifest argument +*/ +extern ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the type of the terminal manifest object + + @param manifest[in] terminal manifest object + @param terminal_type[in] terminal type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type); + +/*! Set the ID of the terminal manifest object + + @param manifest[in] terminal manifest object + @param ID[in] terminal ID + + @return < 0 on invalid manifest argument + */ +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal id, IA_CSS_TERMINAL_INVALID_ID on invalid manifest argument + */ +extern ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the supported frame types of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return frame format bitmap, 0 on invalid manifest argument +*/ +extern ia_css_frame_format_bitmap_t + ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the chosen frame type for the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] frame format bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap); + +/*! Check if the (data) terminal manifest object supports compression + + @param manifest[in] (data) terminal manifest object + + @return compression_support, true if compression is supported + */ +extern bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the compression support feature of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param compression_support[in] set true to support compression + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support); + +/*! Set the supported connection types of the terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] connection bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap); + +/*! Get the connection bitmap of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return connection bitmap, 0 on invalid manifest argument +*/ +extern ia_css_connection_bitmap_t + ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Get the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return kernel bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param kernel_bitmap[in] kernel dependency bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Set the unique kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param index[in] kernel dependency bitmap index + + @return < 0 on invalid argument(s) + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index); + +/*! Set the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! + * Get the program control init connect section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of connect section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + + +/*! + * Get the program control init load section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of load section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + +/*! + * Get the program control init terminal manifest size. + * @param nof_programs[in] Number of programs. + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Get the program control init terminal manifest program desc. + * @param terminal[in] Program control init terminal. + * @param program[in] Number of programs. + * @return program control init terminal program desc (or NULL if error). + */ +extern +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program); + +/*! + * Initialize the program control init terminal manifest. + * @param nof_programs[in] Number of programs + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Pretty prints the program control init terminal manifest. + * @param terminal[in] Program control init terminal. + */ +extern +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h new file mode 100644 index 000000000000..1d2f06f3cbce --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the termianl manifest object: Hsys user interface + */ + +#include + +/*! Print the terminal manifest object to file/stream + + @param manifest[in] terminal manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h new file mode 100644 index 000000000000..f7da810d82f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h @@ -0,0 +1,48 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.sim.h + * + * Define the methods on the terminal manifest object: Simulation only + */ + +#include /* size_t */ +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_defs.h" + +/*! Create (the storage for) the terminal manifest object + + @param terminal_type[in] type of the terminal manifest {parameter, data} + + @return NULL on error + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_alloc( + const ia_css_terminal_type_t terminal_type); + +/*! Destroy (the storage of) the terminal manifest object + + @param manifest[in] terminal manifest + + @return NULL + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_free( + ia_css_terminal_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c new file mode 100644 index 000000000000..5af4de746310 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c @@ -0,0 +1,1038 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_static_storage_class.h" +#include "ia_css_psys_program_group_manifest.h" +#include "ia_css_rbm_manifest.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* + * We need to refactor those files in order to + * build in the firmware only what is needed, + * switches are put current to workaround compilation problems + * in the firmware (for example lack of uint64_t support) + * supported in the firmware + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + size_t size = 0; + int i = 0; + int j = 0; + int m = 0; + int n = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_group_manifest(): enter:\n"); + + verifexit(program_count != 0); + verifexit(program_dependency_count != NULL); + verifexit(terminal_dependency_count != NULL); + + size += sizeof(ia_css_program_group_manifest_t); + + /* Private payload in the program group manifest */ + size += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + /* RBM manifest in the program group manifest */ + size += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + for (i = 0; i < (int)program_count; i++) { + size += ia_css_sizeof_program_manifest( + program_dependency_count[i], + terminal_dependency_count[i]); + } + + for (i = 0; i < (int)terminal_count; i++) { + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + size += ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + size += ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + size += ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + size += ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + size += ia_css_program_control_init_terminal_manifest_get_size( + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + size += sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_param_section_count[m]); + m++; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + } + +EXIT: + if (0 == program_count || 0 == terminal_count || + NULL == program_dependency_count || + NULL == terminal_dependency_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + return size; +} + +/* + * Currently, the design of XNR kernel inside the *_pregdc program group, + * does not fit the exact model as is being asserted on in + * ia_css_is_program_group_manifest_valid. We therefore disable some checks. + * Further investigation is needed to determine whether *_pregdc program group + * can be canged or that the model must be changed. + * #define USE_SIMPLIFIED_GRAPH_MODEL 1 allows multiple programs to be + * connected to the same terminal, and it allows a kernel be mapped over + * multiple programs. + */ +#define USE_SIMPLIFIED_GRAPH_MODEL 1 + +/* + * Model and/or check refinements + * - Parallel programs do not yet have mutual exclusive alternatives + * - The pgram dependencies do not need to be acyclic + * - Parallel programs need to have an equal kernel requirement + */ +bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest) +{ + int i; + bool is_valid = false; + uint8_t terminal_count; + uint8_t program_count; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t check_bitmap; + ia_css_kernel_bitmap_t terminal_bitmap; + /* + * Use a standard bitmap type for the minimum logic to check the DAG, + * generic functions can be used for the kernel enable bitmaps; Later + */ + vied_nci_resource_bitmap_t resource_bitmap; + int terminal_bitmap_weight; + bool has_parameter_terminal_in = false; + bool has_parameter_terminal_out = false; + bool has_program_control_init_terminal = false; + bool has_program_terminal = false; + bool has_program_terminal_sequencer_info = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_group_manifest_valid(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(ia_css_program_group_manifest_get_size(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_alignment(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_program_group_ID(manifest) != 0); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + check_bitmap = ia_css_kernel_bitmap_clear(); + resource_bitmap = vied_nci_bit_mask(VIED_NCI_RESOURCE_BITMAP_BITS); + terminal_bitmap = ia_css_kernel_bitmap_clear(); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(!ia_css_is_kernel_bitmap_empty(total_bitmap)); + verifexit(vied_nci_is_bitmap_empty(resource_bitmap)); + + /* Check the kernel bitmaps for terminals */ + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest_i = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + bool is_parameter_in = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_parameter_out = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_data = + ia_css_is_terminal_manifest_data_terminal( + terminal_manifest_i); + bool is_program = + ia_css_is_terminal_manifest_program_terminal( + terminal_manifest_i); + bool is_spatial_param = + ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest_i); + bool is_program_control_init = + ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest_i); + + if (is_parameter_in) { + /* + * There can be only one cached in parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_in); + has_parameter_terminal_in = is_parameter_in; + } else if (is_parameter_out) { + /* + * There can be only one cached out parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_out); + has_parameter_terminal_out = is_parameter_out; + } else if (is_data) { + ia_css_data_terminal_manifest_t *dterminal_manifest_i = + (ia_css_data_terminal_manifest_t *) + terminal_manifest_i; + ia_css_kernel_bitmap_t terminal_bitmap_i = + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest_i); + /* + * A terminal must depend on kernels that are a subset + * of the total, correction, it can only depend on one + * kernel + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_onehot( + terminal_bitmap_i)); + } else if (is_program) { + verifexit(!has_program_terminal); + verifexit(terminal_manifest_i); + has_program_terminal = is_program; + has_program_terminal_sequencer_info = + (((ia_css_program_terminal_manifest_t *) + terminal_manifest_i)-> + kernel_fragment_sequencer_info_manifest_info_count + != 0); + } else if (is_program_control_init) { + has_program_control_init_terminal = is_program_control_init; + } else { + const ia_css_spatial_param_terminal_manifest_t + *spatial_param_man = + (const ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest_i; + verifexit(spatial_param_man); + verifexit(is_spatial_param); + + terminal_bitmap = + ia_css_kernel_bitmap_set(terminal_bitmap, + spatial_param_man->kernel_id); + verifexit(!ia_css_is_kernel_bitmap_empty(terminal_bitmap)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap)); + } + } + + /* Check the kernel bitmaps for programs */ + for (i = 0; i < (int)program_count; i++) { + int j; + ia_css_program_manifest_t *program_manifest_i = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_type_t program_type_i = + ia_css_program_manifest_get_type(program_manifest_i); + ia_css_kernel_bitmap_t program_bitmap_i = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_i); + uint8_t program_dependency_count_i = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_i); + uint8_t terminal_dependency_count_i = + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_i); + uint8_t program_dependency_i0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, 0); + bool is_sub_i = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_i); + bool is_exclusive_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB); + bool is_virtual_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_super_i = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_i); + + /* + * A program must have kernels that + * are a subset of the total + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, program_bitmap_i)); + verifexit((program_type_i != IA_CSS_N_PROGRAM_TYPES)); + verifexit((program_dependency_count_i + terminal_dependency_count_i) != 0); + /* + * Checks for subnodes + * - Parallel subnodes cannot depend on terminals + * - Exclusive subnodes must depend on + * fewer terminals than the supernode + * - Subnodes only depend on a supernode of the same type + * - Must have a subset of the supernode's kernels + * (but not equal) + * - This tests only positive cases + * Checks for singular or supernodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + */ + if (is_sub_i) { + /* Subnode */ + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, program_dependency_i0); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_count_i == 1); + if (is_exclusive_sub_i || is_virtual_sub_i) { + verifexit(terminal_dependency_count_i <= + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_k)); + } else{ + verifexit(terminal_dependency_count_i == 0); + } + verifexit(program_type_k == + (is_exclusive_sub_i ? + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER : + is_virtual_sub_i ? + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER : + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER)); + verifexit(!ia_css_is_kernel_bitmap_equal( + program_bitmap_k, program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + program_bitmap_k, program_bitmap_i)); + } else { + /* Singular or Supernode */ + int k; + + for (k = 0; k < program_dependency_count_i; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, k); + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, (int)program_dependency_k); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_k < + program_count); + verifexit((program_type_k != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_k != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_k)); +#else + (void)program_bitmap_k; +#endif + } + } + + /* Check for relations */ + for (j = 0; j < (int)program_count; j++) { + int k; + ia_css_program_manifest_t *program_manifest_j = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, j); + ia_css_program_type_t program_type_j = + ia_css_program_manifest_get_type(program_manifest_j); + ia_css_kernel_bitmap_t program_bitmap_j = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_j); + uint8_t program_dependency_count_j = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_j); + uint8_t program_dependency_j0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, 0); + bool is_sub_j = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_j); + bool is_super_j = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_j); + bool is_virtual_sub_j = + (program_type_j == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_j_subset_i = + ia_css_is_kernel_bitmap_subset( + program_bitmap_i, program_bitmap_j); + bool is_i_subset_j = + ia_css_is_kernel_bitmap_subset( + program_bitmap_j, program_bitmap_i); + + /* Test below would fail for i==j */ + if (i == j) + continue; + + /* Empty sets are always subsets, but meaningless */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_j)); + + /* + * Checks for mutual subnodes + * - Parallel subnodes must have an equal + * set of kernels + * - Exclusive and virtual subnodes must + * have an unequal set of kernels + * Checks for subnodes + * - Subnodes must have a subset of kernels + */ + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(program_dependency_i0 != i); + verifexit(program_dependency_j0 != i); + + if (program_dependency_i0 == + program_dependency_j0) { + verifexit(is_sub_i); + /* + * Subnodes are subsets, + * not for virtual nodes + */ + if (!is_virtual_sub_i) + verifexit( + ((is_j_subset_i || + is_i_subset_j))); + /* + * That must be equal for + * parallel subnodes, + * must be unequal for + * exlusive and virtual subnodes + */ + verifexit( + ((is_j_subset_i && is_i_subset_j) ^ + (is_exclusive_sub_i | + is_virtual_sub_i))); + + } + if (is_j_subset_i || is_i_subset_j) { + verifexit(program_dependency_i0 == + program_dependency_j0); + } + } + + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(!is_i_subset_j); + + if (program_dependency_j0 == i) { + verifexit(program_dependency_i0 != + program_dependency_j0); + verifexit(is_super_i); + verifexit(is_j_subset_i); + + } + if (is_j_subset_i) { + verifexit(program_dependency_j0 == i); + } + } + + /* + * Checks for dependent nodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + * unless a subnode + */ + for (k = 0; k < (int)program_dependency_count_j; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, k); + + verifexit((program_dependency_k < + program_count)); + if (program_dependency_k == i) { + /* program[j] depends on program[i] */ + verifexit((i != j)); + verifexit((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); + verifexit(USE_SIMPLIFIED_GRAPH_MODEL || + (ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j) ^ is_sub_j)); + } + } + + /* + * Checks for supernodes and subnodes + * - Detect nodes that kernel-wise are subsets, + * but not connected to the correct supernode + * - We do not (yet) detect if programs properly + * depend on all parallel nodes + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j)) { + /* + * This test will pass if + * the program manifest is NULL, + * but that's no concern here + */ +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_i)); + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_j)); + if (!is_virtual_sub_j) + verifexit((is_j_subset_i || is_i_subset_j)); +#else + (void)is_virtual_sub_j; +#endif + if (is_super_i) { + verifexit(is_sub_j); + verifexit(program_dependency_j0 == i); + } + if (is_super_j) { + verifexit(is_sub_i); + verifexit(program_dependency_i0 == j); + } + } + } + check_bitmap = ia_css_kernel_bitmap_union( + check_bitmap, program_bitmap_i); + /* + * A terminal can be bound to only a single + * (of multiple concurrent) program(s), + * i.e. the one that holds the iterator to control it + * Only singular and super nodes can depend on a terminal. + * This loop accumulates all terminal + * dependencies over all programs + */ + for (j = 0; j < (int)terminal_dependency_count_i; j++) { + uint8_t terminal_dependency = + ia_css_program_manifest_get_terminal_dependency( + program_manifest_i, j); + + verifexit(terminal_dependency < terminal_count); + if ((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)) { + /* If the subnode always came after the */ + /* supernode we could check for presence */ + resource_bitmap = + vied_nci_bit_mask_set_unique( + resource_bitmap, + terminal_dependency); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!vied_nci_is_bitmap_empty( + resource_bitmap)); +#endif + } + } + } + verifexit(ia_css_is_kernel_bitmap_equal( + total_bitmap, check_bitmap)); + + terminal_bitmap_weight = + vied_nci_bitmap_compute_weight(resource_bitmap); + verifexit(terminal_bitmap_weight >= 0); + if (has_parameter_terminal_in || + has_parameter_terminal_out || + has_program_terminal || + has_program_control_init_terminal) { + int skip_terminal_count = 0; + + if (has_parameter_terminal_in) + skip_terminal_count++; + if (has_parameter_terminal_out) + skip_terminal_count++; + if (has_program_control_init_terminal) { + skip_terminal_count++; + } + if (has_program_terminal) + skip_terminal_count++; + if (has_program_terminal_sequencer_info) + skip_terminal_count--; +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit((terminal_bitmap_weight == + (terminal_count - skip_terminal_count))); +#endif + } else + verifexit((terminal_bitmap_weight == terminal_count)); + + is_valid = true; +EXIT: + if (is_valid == false) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_is_program_group_manifest_valid: failed\n"); + } + return is_valid; +} + +int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_kernel_bitmap invalid argument\n"); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_kernel_bitmap invalid argument\n"); + } + return bitmap; +} + +void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + int i = 0; + int j = 0; + int m = 0; + int n = 0; + int result; + uint32_t offset = 0; + char *prg_manifest_base, *terminal_manifest_base; + size_t program_size = 0; + + /* + * assert(blob != NULL); + */ + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_manifest_t))); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_init(): enter:\n"); + + for (i = 0; i < (int)program_count; i++) { + program_size += + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + } + + /* A program group ID cannot be zero */ + blob->ID = 1; + blob->program_count = program_count; + blob->terminal_count = terminal_count; + blob->program_manifest_offset = sizeof(ia_css_program_group_manifest_t); + blob->terminal_manifest_offset = + (uint32_t)blob->program_manifest_offset + program_size; + + prg_manifest_base = (char *) + (((char *)blob) + blob->program_manifest_offset); + offset = blob->program_manifest_offset; + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_init( + (ia_css_program_manifest_t *)prg_manifest_base, + program_dependencies[i], terminal_dependencies[i]); + ia_css_program_manifest_set_parent_offset( + (ia_css_program_manifest_t *)prg_manifest_base, offset); + program_size = + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + prg_manifest_base += program_size; + offset += (uint32_t)program_size; + } + + offset = blob->terminal_manifest_offset; + terminal_manifest_base = (char *) (((char *)blob) + offset); + for (i = 0; i < (int)terminal_count; i++) { + size_t terminal_size = 0; + ia_css_terminal_manifest_t *term_manifest = + (ia_css_terminal_manifest_t *)terminal_manifest_base; + + ia_css_terminal_manifest_set_parent_offset( + (ia_css_terminal_manifest_t *) + terminal_manifest_base, + offset); + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_in_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed in cached in terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_out_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + result = ia_css_spatial_param_terminal_manifest_init( + (ia_css_spatial_param_terminal_manifest_t *) + term_manifest, + spatial_param_section_count[j]); + if (0 == result) { + terminal_size = + ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_spatial_param_terminal_manifest_init failed in spatial terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + result = ia_css_program_terminal_manifest_init( + (ia_css_program_terminal_manifest_t *) + term_manifest, + fragment_param_section_count, + kernel_fragment_seq_count); + if (0 == result) { + terminal_size = + ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_terminal_manifest_init failed in program terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + result = ia_css_program_control_init_terminal_manifest_init( + (ia_css_program_control_init_terminal_manifest_t *) + term_manifest, + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + if (0 == result) { + terminal_size = + ia_css_program_control_init_terminal_manifest_get_size( + program_count, + NULL, + NULL); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_control_init_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + terminal_size = sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_in_param_section_count[m]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_in_param_section_count[m]); + m++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced terminal failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_out_param_section_count[n]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced out terminal failed\n"); + } + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_init invalid argument\n"); + } + term_manifest->size = (uint16_t)terminal_size; + term_manifest->terminal_type = terminal_type[i]; + terminal_manifest_base += terminal_size; + offset += (uint32_t)terminal_size; + } + + /* Set the private program group manifest blob offset */ + blob->private_data_offset = offset; + offset += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + + /* Set the RBM manifest blob offset */ + blob->rbm_manifest_offset = offset; + offset += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + assert(offset <= UINT16_MAX); + blob->size = (uint16_t)offset; +} + +int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + struct ia_css_psys_private_pg_data *priv_data; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_print(): enter:\n"); + + NOT_USED(fid); + + verifexit(manifest != NULL); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sizeof(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "alignment(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_alignment(manifest)); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program group ID = %d\n", + (int)ia_css_program_group_manifest_get_program_group_ID( + manifest)); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + bitmap = ia_css_program_group_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d program manifests\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + + retval = ia_css_program_manifest_print(program_manifest, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d terminal manifests\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + retval = ia_css_terminal_manifest_print( + terminal_manifest, fid); + verifjmpexit(retval == 0); + } + + priv_data = + (struct ia_css_psys_private_pg_data *) + ia_css_program_group_manifest_get_private_data(manifest); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "private_data_offset %d\n", manifest->private_data_offset); + + for (i = 0; i < IPU_DEVICE_GP_PSA_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "PSA MUX id %d mux val %d\n", i, + priv_data->psa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "ISA MUX id %d mux val %d\n", i, + priv_data->isa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_ACB_NUM_ACB; i++) { + + if (priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID) { + + assert(priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID && + priv_data->acb_route[i].out_select != + NCI_ACB_PORT_INVALID); + + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "Route Cell id %d In %d Out %d\n", i, + priv_data->acb_route[i].in_select, + priv_data->acb_route[i].out_select); + } + + } + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_base_addr 0x%x\n", + priv_data->input_buffer_info.buffer_base_addr); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: bpe = %d\n", + priv_data->input_buffer_info.bpe); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_width = %d\n", + priv_data->input_buffer_info.buffer_width); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_height = %d\n", + priv_data->input_buffer_info.buffer_height); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: num_of_buffers = %d\n", + priv_data->input_buffer_info.num_of_buffers); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: dfm_port_addr = 0x%x\n", + priv_data->input_buffer_info.dfm_port_addr); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_group_manifest_print failed (%i)\n", + retval); + } + return retval; +} +#endif /* !defined(__HIVECC) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h new file mode 100644 index 000000000000..527b8cc00dd1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h @@ -0,0 +1,415 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H + +#include +#include +#include +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_private_pg_data.h" +#include /* Safer bit mask functions */ +#include "ia_css_psys_static_trace.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_program_group_ID_t id = IA_CSS_PROGRAM_GROUP_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_program_group_ID invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_alignment(): enter:\n"); + + if (manifest != NULL) { + manifest->alignment = alignment; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_alignment invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t alignment = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_alignment(): enter:\n"); + + if (manifest != NULL) { + alignment = manifest->alignment; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_alignment invalid argument\n"); + } + return alignment; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest) +{ + void *private_data = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_private_data(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + private_data = (void *)((const char *)manifest + + manifest->private_data_offset); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_private_data invalid argument\n"); + } + return private_data; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_rbm_manifest_t *ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_rbm_manifest_t *rbm_manifest = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_rbm_manifest(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + rbm_manifest = (ia_css_rbm_manifest_t *)((const char *)manifest + + manifest->rbm_manifest_offset); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_rbm_manifest invalid argument\n"); + } + return rbm_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index) +{ + ia_css_program_manifest_t *prg_manifest_base; + uint8_t *program_manifest = NULL; + uint8_t program_count; + unsigned int i; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_prgrm_mnfst(%p,%d): enter:\n", + manifest, program_index); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + + verifexit(manifest != NULL); + verifexit(program_index < program_count); + + prg_manifest_base = (ia_css_program_manifest_t *)((char *)manifest + + manifest->program_manifest_offset); + if (program_index < program_count) { + program_manifest = (uint8_t *)prg_manifest_base; + for (i = 0; i < program_index; i++) { + program_manifest += ((ia_css_program_manifest_t *) + program_manifest)->size; + } + } + +EXIT: + if (NULL == manifest || program_index >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_prgrm_mnfst invalid argument\n"); + } + return (ia_css_program_manifest_t *)program_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_data_terminal_manifest_t *data_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_data_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_data_terminal(terminal_manifest)); + + data_terminal_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; +EXIT: + return data_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_param_terminal_manifest_t *param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest)); + param_terminal_manifest = + (ia_css_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_spatial_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)); + + spatial_param_terminal_manifest = + (ia_css_spatial_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return spatial_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_sliced_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_sliced_terminal( + terminal_manifest)); + + sliced_param_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return sliced_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_program_terminal_manifest_t *program_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)); + + program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)terminal_manifest; + EXIT: + return program_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_terminal_manifest_t *terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest_base; + uint8_t terminal_count; + uint8_t i = 0; + uint32_t offset; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_term_mnfst(%p,%d): enter:\n", + manifest, (int)terminal_index); + + verifexit(manifest != NULL); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + verifexit(terminal_index < terminal_count); + + terminal_manifest_base = + (ia_css_terminal_manifest_t *)((char *)manifest + + manifest->terminal_manifest_offset); + terminal_manifest = terminal_manifest_base; + while (i < terminal_index) { + offset = + (uint32_t)ia_css_terminal_manifest_get_size(terminal_manifest); + terminal_manifest = (ia_css_terminal_manifest_t *) + ((char *)terminal_manifest + offset); + i++; + } +EXIT: + return terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_count(): enter:\n"); + + if (manifest != NULL) { + program_count = manifest->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_count invalid argument\n"); + } + return program_count; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_terminal_count(): enter:\n"); + + if (manifest != NULL) { + terminal_count = manifest->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h new file mode 100644 index 000000000000..502d59def6e9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h @@ -0,0 +1,212 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H + +#include "ia_css_psys_manifest_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_kernel_bitmap.h" +#include "ia_css_program_group_data.h" +#include "vied_nci_psys_resource_model.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#define SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS \ + ((IA_CSS_KERNEL_BITMAP_BITS) \ + + (IA_CSS_PROGRAM_GROUP_ID_BITS) \ + + (5 * IA_CSS_UINT16_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_group_manifest_s { + /**< Indicate kernels are present in this program group */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + uint16_t program_manifest_offset; + uint16_t terminal_manifest_offset; + /**< Offset to private data (not part of the official API) */ + uint16_t private_data_offset; + /**< Offset to RBM manifest */ + uint16_t rbm_manifest_offset; + /**< Size of this structure */ + uint16_t size; + /**< Storage alignment requirement (in uint8_t) */ + uint8_t alignment; + /**< Total number of kernels in this program group */ + uint8_t kernel_count; + /**< Total number of program in this program group */ + uint8_t program_count; + /**< Total number of terminals on this program group */ + uint8_t terminal_count; + /**< Total number of independent subgraphs in this program group */ + uint8_t subgraph_count; + /**< Padding; esnures that rbm_manifest starts on 64bit alignment */ + uint8_t reserved[5]; +}; + +#define SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_PROGRAM_ID_BITS \ + + IA_CSS_PROGRAM_TYPE_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_UINT16_T_BITS \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_MEM_TYPE_ID) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DATA_MEM_TYPE_ID * 2) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DEV_CHN_ID * 2) \ + + (IA_CSS_UINT8_T_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_RESOURCE_ID_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST * IA_CSS_UINT8_T_BITS)) +/* + * This structure contains only the information required for resource + * management and construction of the process group. + * The header for the program binary load is separate + */ + +struct ia_css_program_manifest_s { + /**< Indicate which kernels lead to this program being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to a specific program FW, valid ID's != 0 */ + ia_css_program_ID_t ID; + /**< Specification of for exclusive or parallel programs */ + ia_css_program_type_t program_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint32_t program_dependency_offset; + uint32_t terminal_dependency_offset; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocation of this program */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick + * If an empty port is configured to run in active mode, the empty + * port and the corresponding full port(s) in the stream must be kicked. + * The empty port must always be kicked aster the full port. + */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< Size of this structure */ + uint16_t size; + /**< (internal) Memory allocation size needs of this program */ + vied_nci_resource_size_t int_mem_size[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation size needs of this program */ + vied_nci_resource_size_t ext_mem_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation size needs of this program */ + vied_nci_resource_size_t dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM ports are relocatable if value is set to 1. + * The flag is per dfm port type. + * This will not be supported for now. + */ + uint8_t is_dfm_relocatable[VIED_NCI_N_DEV_DFM_ID]; +#endif + /** Array of all the cells this program needs */ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (exclusive) indication of a cell type to be used by this program */ + vied_nci_resource_id_t cell_type_id; + + /**< Number of programs this program depends on */ + uint8_t program_dependency_count; + /**< Number of terminals this program depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST > 0 + /*hivecc does not allow an array of zero length*/ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST]; +#endif +}; + +/* + *Calculation for manual size check for struct ia_css_data_terminal_manifest_s + */ +#define SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + IA_CSS_FRAME_FORMAT_BITMAP_BITS \ + + IA_CSS_CONNECTION_BITMAP_BITS \ + + IA_CSS_KERNEL_BITMAP_BITS \ + + (4 * (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION)) \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (4*IA_CSS_UINT8_T_BITS)) +/* + * Inherited data terminal class + */ +struct ia_css_data_terminal_manifest_s { + /**< Data terminal base */ + ia_css_terminal_manifest_t base; + /**< Supported (4CC / MIPI / parameter) formats */ + ia_css_frame_format_bitmap_t frame_format_bitmap; + /**< Indicate which kernels lead to this terminal being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Minimum size of the frame */ + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of the frame */ + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]; + /**< Minimum size of a fragment that the program port can accept */ + uint16_t min_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of a fragment that the program port can accept */ + uint16_t max_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Indicate if this terminal is derived from a principal terminal */ + uint16_t terminal_dependency; + /**< Indicate what (streaming) interface types this terminal supports */ + ia_css_connection_bitmap_t connection_bitmap; + /**< Indicates if compression is supported on the data associated with + * this terminal. '1' indicates compression is supported, + * '0' otherwise + */ + uint8_t compression_support; + uint8_t reserved[4]; +}; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +#define N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT 4 +struct ia_css_program_control_init_manifest_program_desc_s { + uint16_t load_section_count; + uint16_t connect_section_count; + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT 2 +struct ia_css_program_control_init_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Number of programs in program group */ + uint32_t program_count; + /* + * Points to array of ia_css_program_control_init_terminal_program_desc_t + * with size program_count. + */ + uint16_t program_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT]; +}; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +extern void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c new file mode 100644 index 000000000000..be1ef9676879 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c @@ -0,0 +1,1241 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_print */ +#include + +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_psys_static_trace.h" + +#include +#include + +size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_manifest(): enter:\n"); + + size += sizeof(ia_css_program_manifest_t); + size += program_dependency_count * sizeof(uint8_t); + size += terminal_dependency_count * sizeof(uint8_t); + size = ceil_mul(size, sizeof(uint64_t)); + + return size; +} + +bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest) +{ + bool has_fixed_cell = false; + + vied_nci_cell_ID_t cell_id; + vied_nci_cell_type_ID_t cell_type_id; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_has_program_manifest_fixed_cell(): enter:\n"); + + verifexit(manifest != NULL); + + cell_id = ia_css_program_manifest_get_cell_ID(manifest); + cell_type_id = ia_css_program_manifest_get_cell_type_ID(manifest); + + has_fixed_cell = ((cell_id != VIED_NCI_N_CELL_ID) && + (cell_type_id == VIED_NCI_N_CELL_TYPE_ID)); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_has_program_manifest_fixed_cell invalid argument\n"); + } + return has_fixed_cell; +} + +size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_size invalid argument\n"); + } + + return size; +} + +ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_ID_t program_id = IA_CSS_PROGRAM_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_ID(): enter:\n"); + + if (manifest != NULL) { + program_id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_ID invalid argument\n"); + } + return program_id; +} + +int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_ID failed (%i)\n", ret); + } + return ret; +} + +ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *) (base); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current program offset*/ + manifest->parent_offset = -program_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type = IA_CSS_N_PROGRAM_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + program_type = manifest->program_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_type invalid argument\n"); + } + return program_type; +} + +int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->program_type = program_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_type failed (%i)\n", retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_kernel_bitmap invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_kernel_bitmap failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_ID(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + cell_id = manifest->cell_id; +#else + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == manifest->cells[i]); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + cell_id = manifest->cells[0]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_ID invalid argument\n"); + } + return cell_id; +} + +int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_ID(): enter:\n"); + if (manifest != NULL) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = cell_id; +#else + manifest->cells[0] = cell_id; + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + manifest->cells[i] = VIED_NCI_N_CELL_ID; + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_ID failed (%i)\n", retval); + } + return retval; +} + +vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_type_ID_t cell_type_id = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_type_ID(): enter:\n"); + + verifexit(manifest != NULL); + + cell_type_id = (vied_nci_cell_type_ID_t)(manifest->cell_type_id); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_type_ID invalid argument\n"); + } + return cell_type_id; +} + +int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_type_ID(): enter:\n"); + if (manifest != NULL) { + manifest->cell_type_id = cell_type_id; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_type_ID failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t int_mem_size = 0; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_int_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + /* loop over vied_nci_cell_mem_type to verify mem_type_id for a + * specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + int_mem_size = manifest->int_mem_size[mem_index]; + } + } + +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_int_mem_size invalid argument\n"); + } + return int_mem_size; +} + +int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cells_bitmap(): enter:\n"); + + if (manifest != NULL) { + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = (vied_nci_cell_ID_t)bit_index; +#else + manifest->cells[array_index] = (vied_nci_cell_ID_t)bit_index; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = VIED_NCI_N_CELL_ID; +#else + manifest->cells[array_index] = VIED_NCI_N_CELL_ID; +#endif /* IA_CSS_PROCESS_MAX_CELLS */ + } + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_cells_bitmap invalid argument\n"); + } +EXIT: + return retval; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_resource_bitmap_t bitmap = 0; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cells_bitmap(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + bitmap = (1 << manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + if (VIED_NCI_N_CELL_ID != manifest->cells[i]) { + bitmap |= (1 << manifest->cells[i]); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cells_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_active_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->is_dfm_relocatable[dfm_type_id] = is_relocatable; +#else + (void)is_relocatable; + (void)dfm_type_id; +#endif + retval = 0; + + EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_is_dfm_relocatable invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + uint8_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + ret = manifest->is_dfm_relocatable[dfm_type_id]; +#else + ret = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_is_dfm_relocatable invalid argument\n"); + } + return ret; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_port_bitmap invalid argument\n"); + } + return bitmap; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_active_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_active_port_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size) +{ + int retval = -1; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_int_mem_size(): enter:\n"); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + if (manifest != NULL && mem_type_id < VIED_NCI_N_MEM_TYPE_ID) { + /* loop over vied_nci_cell_mem_type to verify mem_type_id for + * a specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + manifest->int_mem_size[mem_index] = + int_mem_size; + retval = 0; + } + } + } + if (retval != 0) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_int_mem_size cell_type_id %d has no mem_type_id %d\n", + (int)cell_type_id, (int)mem_type_id); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_size = manifest->ext_mem_size[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_size invalid argument\n"); + } + return ext_mem_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_offset = manifest->ext_mem_offset[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_offset invalid argument\n"); + } + return ext_mem_offset; +} + +int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_size(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_size[mem_type_id] = ext_mem_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_offset(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_offset[mem_type_id] = ext_mem_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_offset invalid argument\n"); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_size = manifest->dev_chn_size[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_size invalid argument\n"); + } + return dev_chn_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_offset = manifest->dev_chn_offset[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_offset invalid argument\n"); + } + return dev_chn_offset; +} + +int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_size(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_size[dev_chn_id] = dev_chn_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_offset(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_offset[dev_chn_id] = dev_chn_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_offset invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t program_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency_count(): enter:\n"); + + if (manifest != NULL) { + program_dependency_count = manifest->program_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency_count invalid argument\n"); + } + return program_dependency_count; +} + +uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t program_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + + if (index < program_dependency_count) { + program_dep_ptr = + (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index * sizeof(uint8_t)); + program_dependency = *program_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency invalid argument\n"); + } + return program_dependency; +} + +int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + uint8_t program_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count( + ia_css_program_manifest_get_parent(manifest)); + + if ((index < program_dependency_count) && + (program_dependency < program_count)) { + program_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index*sizeof(uint8_t)); + *program_dep_ptr = program_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_dependency(m, %d, %d) failed (%i)\n", + program_dependency, index, retval); + } + return retval; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency_count(): enter:\n"); + + if (manifest != NULL) { + terminal_dependency_count = manifest->terminal_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency_count invalid argument\n"); + } + return terminal_dependency_count; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t terminal_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency(): enter:\n"); + + if (index < terminal_dependency_count) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + terminal_dependency = *terminal_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency invalid argument\n"); + } + return terminal_dependency; +} + +int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + uint8_t terminal_count = + ia_css_program_group_manifest_get_terminal_count( + ia_css_program_manifest_get_parent(manifest)); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_terminal_dependency(): enter:\n"); + + if ((index < terminal_dependency_count) && + (terminal_dependency < terminal_count)) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + *terminal_dep_ptr = terminal_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_terminal_dependency failed (%i)\n", + retval); + } + return retval; +} + +bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_subnode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); +} + +bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_supernode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER); +} + +bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_singular_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_SINGULAR); +} + +void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_init(): enter:\n"); + + /*TODO: add assert*/ + if (!blob) + return; + + blob->ID = 1; + blob->program_dependency_count = program_dependency_count; + blob->terminal_dependency_count = terminal_dependency_count; + blob->program_dependency_offset = sizeof(ia_css_program_manifest_t); + blob->terminal_dependency_offset = blob->program_dependency_offset + + sizeof(uint8_t) * program_dependency_count; + blob->size = + (uint16_t)ia_css_sizeof_program_manifest( + program_dependency_count, + terminal_dependency_count); +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug, refer to + developercommunity.visualstudio.com/content/problem/209359/ice-with-fpfast-in-156-and-msvc-daily-1413263051-p.html +*/ +#pragma optimize("", off) +#endif + +int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i, mem_index, dev_chn_index; + + vied_nci_cell_type_ID_t cell_type_id; + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_program_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "program ID = %d\n", + (int)ia_css_program_manifest_get_program_ID(manifest)); + + bitmap = ia_css_program_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell(program) = %d\n", + (int)cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell type(program) = %d\n", + (int)cell_type_id); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) type = %d\n", + (int)vied_nci_cell_type_get_mem_type(cell_type_id, mem_index)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) size = %d\n", + manifest->int_mem_size[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) type = %d\n", + (int)(vied_nci_mem_type_ID_t)mem_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) size = %d\n", + manifest->ext_mem_size[mem_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) offset = %d\n", + manifest->ext_mem_offset[mem_index]); + } + + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) size = %d\n", + manifest->dev_chn_size[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) offset = %d\n", + manifest->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) port_bitmap = %d\n", + manifest->dfm_port_bitmap[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) active_port_bitmap = %d\n", + manifest->dfm_active_port_bitmap[dev_chn_index]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) is_dfm_relocatable = %d\n", + manifest->is_dfm_relocatable[dev_chn_index]); + } +#endif + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cells[i]); + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + if (program_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {};\n", + program_dependency_count); + } else { + uint8_t prog_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {\n", + program_dependency_count); + for (i = 0; i < (int)program_dependency_count - 1; i++) { + prog_dep = + ia_css_program_manifest_get_program_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", prog_dep); + } + prog_dep = + ia_css_program_manifest_get_program_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", prog_dep); + (void)prog_dep; + } + + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t term_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {\n", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + term_dep = + ia_css_program_manifest_get_terminal_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", term_dep); + } + term_dep = + ia_css_program_manifest_get_terminal_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", term_dep); + (void)term_dep; + } + (void)cell_type_id; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_print failed (%i)\n", retval); + } + return retval; +} + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug */ +#pragma optimize("", off) +#endif + +#endif + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c new file mode 100644 index 000000000000..80ff0d5b0080 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c @@ -0,0 +1,1138 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +/* Data object types on the terminals */ +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_clear, ia_css_... */ +#include + +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_types.h" + +#include +#include +#include +#include "ia_css_psys_static_trace.h" + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +static const char *terminal_type_strings[IA_CSS_N_TERMINAL_TYPES + 1] = { + "IA_CSS_TERMINAL_TYPE_DATA_IN", + "IA_CSS_TERMINAL_TYPE_DATA_OUT", + "IA_CSS_TERMINAL_TYPE_PARAM_STREAM", + /**< Type 1-5 parameter input */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN", + /**< Type 1-5 parameter output */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT", + /**< Represent the new type of terminal for + * the explicit slicing, when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN", + /**< Represent the new type of terminal for + * the explicit slicing, when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT", + /**< State (private data) input */ + "IA_CSS_TERMINAL_TYPE_STATE_IN", + /**< State (private data) output */ + "IA_CSS_TERMINAL_TYPE_STATE_OUT", + "IA_CSS_TERMINAL_TYPE_PROGRAM", + "IA_CSS_TERMINAL_TYPR_PROGRAM_CONTROL_INIT", + "UNDEFINED_TERMINAL_TYPE"}; + +#endif + +bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT)); +} + +bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_program_control_init_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + + +bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_data_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT)); +} + +bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_sliced_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT)); +} + +size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_size: invalid argument\n"); + } + return size; +} + +ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + terminal_type = manifest->terminal_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_type: invalid argument\n"); + } + return terminal_type; +} + +int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->terminal_type = terminal_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_type failed (%i)\n", + retval); + } + return retval; +} + +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = ID; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_ID failed (%i)\n", + retval); + } + return retval; +} + +ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_ID_t retval; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_ID(): enter:\n"); + + if (manifest != NULL) { + retval = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_get_ID failed\n"); + retval = IA_CSS_TERMINAL_INVALID_ID; + } + return retval; +} + +ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *)(base); +EXIT: + return parent; +} + +int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current terminal offset*/ + manifest->parent_offset = -terminal_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_frame_format_bitmap_t +ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_frame_format_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->frame_format_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_frame_format_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->frame_format_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_frame_format_bitmap failed (%i)\n", + ret); + } + + return ret; +} + +bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest) +{ + bool compression_support = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_compression_support(): enter:\n"); + + if (manifest != NULL) { + /* compression_support is used boolean encoded in uint8_t. + * So we only need to check + * if this is non-zero + */ + compression_support = (manifest->compression_support != 0); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_can_support_compression invalid argument\n"); + } + + return compression_support; +} + +int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_compression_support(): enter:\n"); + + if (manifest != NULL) { + manifest->compression_support = + (compression_support == true) ? 1 : 0; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_compression_support failed (%i)\n", + ret); + } + + return ret; +} + +ia_css_connection_bitmap_t ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_connection_bitmap_t connection_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + connection_bitmap = manifest->connection_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_connection_bitmap invalid argument\n"); + } + return connection_bitmap; +} + +int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + assert(bitmap != 0); /* zero means there is no connection, this is invalid. */ + assert((bitmap >> IA_CSS_N_CONNECTION_TYPES) == 0); + + manifest->connection_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_set_connection_bitmap invalid argument\n"); + } + return ret; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_kernel_bitmap: invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap: failed (%i)\n", + retval); + } + + return retval; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique(): enter:\n"); + + if (manifest != NULL) { + ia_css_kernel_bitmap_t kernel_bitmap = + ia_css_kernel_bitmap_clear(); + + kernel_bitmap = ia_css_kernel_bitmap_set(kernel_bitmap, index); + verifexit(!ia_css_is_kernel_bitmap_empty(kernel_bitmap)); + verifexit(ia_css_data_terminal_manifest_set_kernel_bitmap( + manifest, kernel_bitmap) == 0); + retval = 0; + } + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique failed (%i)\n", + retval); + } + return retval; +} +#endif + +int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_fragment_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_fragment_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_fragment_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_fragment_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_fragment_size invalid argument\n"); + } + return retval; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#define PRINT_DIMENSION(name, var) IA_CSS_TRACE_3(PSYSAPI_STATIC, \ + INFO, "%s:\t%d %d\n", \ + (name), \ + (var)[IA_CSS_COL_DIMENSION], \ + (var)[IA_CSS_ROW_DIMENSION]) + +int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid) +{ + int retval = -1; + ia_css_terminal_type_t terminal_type = + ia_css_terminal_manifest_get_type(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_terminal_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_terminal_manifest_get_size(manifest)); + + PRINT("typeof(manifest) = %s\n", terminal_type_strings[terminal_type]); + + if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + ia_css_param_terminal_manifest_t *pterminal_manifest = + (ia_css_param_terminal_manifest_t *)manifest; + uint16_t section_count = + pterminal_manifest->param_manifest_section_desc_count; + int i; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sections(manifest) = %d\n", (int)section_count); + for (i = 0; i < section_count; i++) { + const ia_css_param_manifest_section_desc_t *manifest = + ia_css_param_terminal_manifest_get_prm_sct_desc( + pterminal_manifest, i); + verifjmpexit(manifest != NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)manifest->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)manifest->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)manifest->max_mem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)manifest->region_id); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT) { + ia_css_sliced_param_terminal_manifest_t + *sliced_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + uint32_t kernel_id; + uint16_t section_count; + uint16_t section_idx; + + kernel_id = sliced_terminal_manifest->kernel_id; + section_count = + sliced_terminal_manifest->sliced_param_section_count; + + NOT_USED(kernel_id); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section_count = %d\n", (int)section_count); + + for (section_idx = 0; section_idx < section_count; + section_idx++) { + ia_css_sliced_param_manifest_section_desc_t + *sliced_param_manifest_section_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section %d\n", (int)section_idx); + sliced_param_manifest_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_terminal_manifest, section_idx); + verifjmpexit(sliced_param_manifest_section_desc != + NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)sliced_param_manifest_section_desc->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)sliced_param_manifest_section_desc->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)sliced_param_manifest_section_desc->max_mem_size); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM) { + ia_css_program_terminal_manifest_t *program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)manifest; + uint32_t sequencer_info_kernel_id; + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t seq_info_idx; + + sequencer_info_kernel_id = + program_terminal_manifest->sequencer_info_kernel_id; + max_kernel_fragment_sequencer_command_desc = + program_terminal_manifest-> + max_kernel_fragment_sequencer_command_desc; + kernel_fragment_sequencer_info_manifest_info_count = + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_count; + + NOT_USED(sequencer_info_kernel_id); + NOT_USED(max_kernel_fragment_sequencer_command_desc); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer_info_kernel_id = %d\n", + (int)sequencer_info_kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_kernel_fragment_sequencer_command_desc = %d\n", + (int)max_kernel_fragment_sequencer_command_desc); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_fragment_sequencer_info_manifest_info_count = %d\n", + (int) + kernel_fragment_sequencer_info_manifest_info_count); + + for (seq_info_idx = 0; seq_info_idx < + kernel_fragment_sequencer_info_manifest_info_count; + seq_info_idx++) { + ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *sequencer_info_manifest_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer info %d\n", (int)seq_info_idx); + sequencer_info_manifest_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (program_terminal_manifest, seq_info_idx); + verifjmpexit(sequencer_info_manifest_desc != NULL); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + ia_css_program_control_init_terminal_manifest_print(progctrlinit_man); + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + + ia_css_data_terminal_manifest_t *dterminal_manifest = + (ia_css_data_terminal_manifest_t *)manifest; + int i; + + NOT_USED(dterminal_manifest); + + verifexit(ia_css_kernel_bitmap_print( + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest), fid) == 0); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "formats(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_frame_format_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "connection(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_connection_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "dependent(manifest) = %d\n", + (int)dterminal_manifest->terminal_dependency); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->min_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->min_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_size[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->max_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->max_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->min_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->min_fragment_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->max_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->max_fragment_size[i]); + + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT) { + + ia_css_spatial_param_terminal_manifest_t *stm = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + ia_css_frame_grid_param_manifest_section_desc_t *sec; + int sec_count = + stm->frame_grid_param_manifest_section_desc_count; + ia_css_fragment_grid_manifest_desc_t *fragd = + &stm->common_fragment_grid_desc; + ia_css_frame_grid_manifest_desc_t *framed = + &stm->frame_grid_desc; + int sec_index; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "kernel_id:\t\t%d\n", + stm->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "compute_units_p_elem:\t%d\n", + stm->compute_units_p_elem); + + PRINT_DIMENSION("min_fragment_grid_dimension", + fragd->min_fragment_grid_dimension); + PRINT_DIMENSION("max_fragment_grid_dimension", + fragd->max_fragment_grid_dimension); + PRINT_DIMENSION("min_frame_grid_dimension", + framed->min_frame_grid_dimension); + PRINT_DIMENSION("max_frame_grid_dimension", + framed->max_frame_grid_dimension); + + NOT_USED(framed); + NOT_USED(fragd); + + for (sec_index = 0; sec_index < sec_count; sec_index++) { + sec = ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + stm, sec_index); + verifjmpexit(sec != NULL); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, "--------------------------\n"); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmem_type_id:\t%d\n", + sec->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tregion_id:\t%d\n", + sec->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\telem_size:\t%d\n", + sec->elem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmax_mem_size:\t%d\n", + sec->max_mem_size); + } + } else if (terminal_type < IA_CSS_N_TERMINAL_TYPES) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "terminal type can not be pretty printed, not supported\n"); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_print failed (%i)\n", + retval); + } + return retval; +} + +/* Program control init Terminal */ +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->connect_section_count; +} + + +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->load_section_count; +} + +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + (void)nof_load_sections; /* might be needed in future */ + (void)nof_connect_sections; /* might be needed in future */ + + return sizeof(ia_css_program_control_init_terminal_manifest_t) + + nof_programs * + sizeof(ia_css_program_control_init_manifest_program_desc_t); +} + +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program) +{ + ia_css_program_control_init_manifest_program_desc_t *progs; + + assert(terminal != NULL); + assert(program < terminal->program_count); + + progs = (ia_css_program_control_init_manifest_program_desc_t *) + ((const char *)terminal + terminal->program_desc_offset); + + return &progs[program]; +} + +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + unsigned int i; + ia_css_program_control_init_manifest_program_desc_t *progs; + + if (terminal == NULL) { + return -EFAULT; + } + + terminal->program_count = nof_programs; + terminal->program_desc_offset = + sizeof(ia_css_program_control_init_terminal_manifest_t); + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + for (i = 0; i < nof_programs; i++) { + progs[i].load_section_count = nof_load_sections[i]; + progs[i].connect_section_count = nof_connect_sections[i]; + } + return 0; +} + +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal) +{ + unsigned int i; + + ia_css_program_control_init_manifest_program_desc_t *progs; + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + assert(progs); + (void)progs; + + for (i = 0; i < terminal->program_count; i++) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "program index: %d, load sec: %d, connect sec: %d\n", + i, + progs[i].load_section_count, + progs[i].connect_section_count); + } +} + +#endif + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 000000000000..c51d65c8cb64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 000000000000..5dd23ddbd180 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/psys/bxtB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 000000000000..dab9c669c182 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +#ifdef USE_SSID_BUTTRESS + * seen from the host; these addresses already contain the ISYS or PSYS offset. + */ +#define REG_DUMP_READ_REGISTER(addr)\ + vied_subsystem_load_32(IPU_DEVICE_BUTTRESS, addr) +#else +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) +#endif + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/interface/regmem_access.h new file mode 100644 index 000000000000..d4576af936f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/regmem.mk new file mode 100644 index 000000000000..24ebc1c325d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_access_host.h new file mode 100644 index 000000000000..8878d7074fab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_const.h new file mode 100644 index 000000000000..ac7e3a98a434 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h new file mode 100644 index 000000000000..4a04a9890326 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h @@ -0,0 +1,173 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_H +#define __IA_CSS_RBM_H + +#include "ia_css_rbm_storage_class.h" +#include + +#define IA_CSS_RBM_BITS 64 +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +#define IA_CSS_RBM_ELEM_TYPE uint32_t +#define IA_CSS_RBM_ELEM_BITS \ + (sizeof(IA_CSS_RBM_ELEM_TYPE)*8) +#define IA_CSS_RBM_NOF_ELEMS \ + ((IA_CSS_RBM_BITS) / (IA_CSS_RBM_ELEM_BITS)) + +/** Users should make no assumption about the actual type of + * ia_css_rbm_t. + */ +typedef struct { + IA_CSS_RBM_ELEM_TYPE data[IA_CSS_RBM_NOF_ELEMS]; +} ia_css_rbm_elems_t; +typedef ia_css_rbm_elems_t ia_css_rbm_t; + +/** Print the bits of a routing bitmap + * @return < 0 on error + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid); + +/** Create an empty routing bitmap + * @return bitmap = 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_clear(void); + +/** Creates the complement of a routing bitmap + * @param bitmap[in] routing bitmap + * @return ~bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap); + +/** Create the union of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 | bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Create the intersection of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 & bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps is empty + * @param bitmap[in] routing bitmap + * @return bitmap == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap); + +/** Check if the intersection of two routing bitmaps is empty + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return (bitmap0 & bitmap1) == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the second routing bitmap is a subset of the first (or equal) + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in routing bitmap 1 + * Note: An empty set is always a subset, this function + * returns true if bitmap 1 is empty + * @return (bitmap0 & bitmap1) == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps are equal + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create the union of a routing bitmap with a onehot bitmap + * with a bit set at index + * @return bitmap[index] |= 1 +*/ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Creates routing bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value); + +/** Converts an ia_css_rbm_t type to uint64_t. Note that if + * ia_css_rbm_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value + */ +IA_CSS_RBM_STORAGE_CLASS_H +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value); + +/** Creates a routing bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create a onehot routing bitmap with a bit set at index + * @return bitmap[index] = 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index); + +#ifdef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ + +#endif /* __IA_CSS_RBM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h new file mode 100644 index 000000000000..ee700df72dff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h @@ -0,0 +1,134 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_H +#define __IA_CSS_RBM_MANIFEST_H + +#include "type_support.h" +#include "ia_css_rbm_manifest_types.h" + +/** Returns the descriptor size of the RBM manifest. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_size(void); + +/** Initializes the RBM manifest. + * @param rbm[in] Routing bitmap. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm); + +/** Returns a pointer to the array of mux descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of mux descriptors array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of validation descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of the validation descriptor array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of terminal routing descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest); + +/** \brief Returns the size of the terminal routing descriptor array. + * Note: pretty printing differs from on host and on IPU. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest); + +/** Pretty prints the routing bitmap manifest. + * @param manifest[in] Routing bitmap manifest. + */ +void +ia_css_rbm_manifest_print(const ia_css_rbm_manifest_t *manifest); + +/** \brief Pretty prints a RBM (routing bitmap). + * Note: pretty printing differs from on host and on IPU. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_desc_count[in] Number of muxes in list mux. + */ +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count); + +/** \brief check for the validity of a routing bitmap. + * @param manifest[in] Routing bitmap manifest. + * @param rbm[in] Routing bitmap + * @return true on match. + */ +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm); + +/** \brief sets, using manifest info, the value of a mux in the routing bitmap. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_count[in] Number of muxes in list mux. + * @param gp_dev_id[in] ID of sub system (PSA/ISA) where the mux is located. + * @param mux_id[in] ID of mux to set configuration for. + * @param value[in] Value of the mux. + * @return routing bitmap. + */ +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value); + +#ifdef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +#endif /* __IA_CSS_RBM_MANIFEST_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h new file mode 100644 index 000000000000..ade20446b9f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_TYPES_H +#define __IA_CSS_RBM_MANIFEST_TYPES_H + +#include "ia_css_rbm.h" +#include "vied_nci_psys_resource_model.h" + +#ifndef VIED_NCI_RBM_MAX_MUX_COUNT +#error Please define VIED_NCI_RBM_MAX_MUX_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#error Please define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#error Please define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#endif +#ifndef N_PADDING_UINT8_IN_RBM_MANIFEST +#error Please define N_PADDING_UINT8_IN_RBM_MANIFEST +#endif + +#define SIZE_OF_RBM_MUX_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_mux_desc_s { + uint8_t gp_dev_id; + uint8_t mux_id; + uint8_t offset; + uint8_t size_bits; +} ia_css_rbm_mux_desc_t; + +#define SIZE_OF_RBM_VALIDATION_RULE_DESC_S ( \ + (2 * IA_CSS_RBM_BITS) \ + + (1 * IA_CSS_UINT32_T_BITS)) + +typedef struct ia_css_rbm_validation_rule_s { + ia_css_rbm_t match; /* RBM is an array of 32 bit elements */ + ia_css_rbm_t mask; + uint32_t expected_value; +} ia_css_rbm_validation_rule_t; + +#define SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_terminal_routing_desc_s { + uint8_t terminal_id; + uint8_t connection_state; + uint8_t mux_id; + uint8_t state; +} ia_css_rbm_terminal_routing_desc_t; + +#define SIZE_OF_RBM_MANIFEST_S ( \ + (VIED_NCI_RBM_MAX_MUX_COUNT * SIZE_OF_RBM_MUX_DESC_S) \ + + (VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT * SIZE_OF_RBM_VALIDATION_RULE_DESC_S) \ + + (VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT * SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S) \ + + (3 * IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_RBM_MANIFEST * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_manifest_s { +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT > 0 + ia_css_rbm_validation_rule_t + validation_rules[VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT]; +#endif + uint16_t mux_desc_count; + uint16_t validation_rule_count; + uint16_t terminal_routing_desc_count; + +#if VIED_NCI_RBM_MAX_MUX_COUNT > 0 + ia_css_rbm_mux_desc_t + mux_desc[VIED_NCI_RBM_MAX_MUX_COUNT]; +#endif + +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT > 0 + ia_css_rbm_terminal_routing_desc_t + terminal_routing_desc[VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT]; +#endif + +#if N_PADDING_UINT8_IN_RBM_MANIFEST > 0 + uint8_t padding[N_PADDING_UINT8_IN_RBM_MANIFEST]; +#endif +} ia_css_rbm_manifest_t; + +#endif /* __IA_CSS_RBM_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h new file mode 100644 index 000000000000..9548e9a9fabb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_STORAGE_CLASS_H +#define __IA_CSS_RBM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_RBM_INLINE__ +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_RBM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h new file mode 100644 index 000000000000..dd060323da5c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h @@ -0,0 +1,77 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_TRACE_H +#define __IA_CSS_RBM_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from the .mk file outside. +* Log levels not in the range below will cause a "No RBM_TRACE_CONFIG Tracing level defined" +*/ +#define RBM_TRACE_LOG_LEVEL_OFF 1 +#define RBM_TRACE_LOG_LEVEL_NORMAL 2 +#define RBM_TRACE_LOG_LEVEL_DEBUG 3 + +#define RBM_TRACE_CONFIG_DEFAULT RBM_TRACE_LOG_LEVEL_NORMAL + +#if !defined(RBM_TRACE_CONFIG) +# define RBM_TRACE_CONFIG RBM_TRACE_CONFIG_DEFAULT +#endif + +/* IPU_RESOURCE Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(RBM_TRACE_CONFIG)) +/* Module specific trace setting */ +# if RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_OFF +/* RBM_TRACE_LOG_LEVEL_OFF */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_NORMAL +/* RBM_TRACE_LOG_LEVEL_NORMAL */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_DEBUG +/* RBM_TRACE_LOG_LEVEL_DEBUG */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No RBM_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "RBM_TRACE_CONFIG not defined" +#endif + +#endif /* __RBM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk new file mode 100644 index 000000000000..f4251f9740fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk @@ -0,0 +1,39 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifdef _H_ROUTING_BITMAP_MK +$(error ERROR: routing_bitmap.mk included multiple times, please check makefile) +else +_H_ROUTING_BITMAP_MK=1 +endif + +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm_manifest.c + +ROUTING_BITMAP_DIR = $(MODULES_DIR)/routing_bitmap +ROUTING_BITMAP_INTERFACE = $(ROUTING_BITMAP_DIR)/interface +ROUTING_BITMAP_SOURCES = $(ROUTING_BITMAP_DIR)/src + +ROUTING_BITMAP_CPPFLAGS = -I$(ROUTING_BITMAP_INTERFACE) +ROUTING_BITMAP_CPPFLAGS += -I$(ROUTING_BITMAP_SOURCES) + +ifeq ($(ROUTING_BITMAP_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_INLINE__ +else +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm.c +endif + +ifeq ($(ROUTING_BITMAP_MANIFEST_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_MANIFEST_INLINE__ +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c new file mode 100644 index 000000000000..bc5bf14efbd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h new file mode 100644 index 000000000000..c8cd78d416a1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h @@ -0,0 +1,338 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +STORAGE_CLASS_INLINE int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap); + +STORAGE_CLASS_INLINE ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap); + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_intersection_empty(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_empty(intersection); +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + bool is_empty = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_empty(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } + return is_empty; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_equal(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } + return is_equal; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_subset(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_equal(intersection, bitmap1); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_clear(void) +{ + unsigned int i; + ia_css_rbm_t bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_clear(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } + return bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_complement(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_union(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_intersection(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t bit_mask; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_set(): enter:\n"); + + bit_mask = ia_css_rbm_bit_mask(index); + return ia_css_rbm_union(bitmap, bit_mask); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_create_from_uint64(): enter:\n"); + + result = ia_css_rbm_clear(); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_RBM_ELEM_TYPE) + (value >> (i * IA_CSS_RBM_ELEM_BITS)); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_RBM_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_RBM_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + + for (i = 0; i < MIN(IA_CSS_RBM_NOF_ELEMS, nof_elems_bits64); i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_RBM_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_RBM_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_unset(): enter:\n"); + + result = ia_css_rbm_bit_mask(index); + result = ia_css_rbm_complement(result); + return ia_css_rbm_intersection(bitmap, result); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_rbm_t bit_mask = ia_css_rbm_clear(); + + assert(index < IA_CSS_RBM_BITS); + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_bit_mask(): enter:\n"); + if (index < IA_CSS_RBM_BITS) { + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } + return bit_mask; +} + +STORAGE_CLASS_INLINE +int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap) +{ + ia_css_rbm_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); i++) { + weight += ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + } + + return weight; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_set(): enter:\n"); + + assert(index < IA_CSS_RBM_BITS); + + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +} + +STORAGE_CLASS_INLINE +ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_shift(): enter:\n"); + + loc_bitmap = bitmap; + + for (i = IA_CSS_RBM_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_RBM_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } + return loc_bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, INFO, + "ia_css_rbm_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(RBM, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + IA_CSS_TRACE_2(RBM, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(RBM, INFO, "}\n"); + + retval = 0; + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c new file mode 100644 index 000000000000..ef3beb8760b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c @@ -0,0 +1,224 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +STORAGE_CLASS_INLINE void +ia_css_rbm_print_with_header( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count, + bool print_header) +{ +#ifdef __HIVECC + ia_css_rbm_print(*rbm, NULL); + (void)print_header; + (void)mux_desc_count; + (void)mux; +#else + int i, j; + + assert(mux != NULL); + assert(rbm != NULL); + if (mux == NULL || rbm == NULL) + return; + + if (print_header) { + for (i = mux_desc_count - 1; i >= 0; i--) { + PRINT("%*d|", mux[i].size_bits, mux[i].mux_id); + } + PRINT("\n"); + } + for (i = mux_desc_count - 1; i >= 0; i--) { + for (j = mux[i].size_bits - 1; j >= 0; j--) { + PRINT("%d", ia_css_is_rbm_set(*rbm, j + mux[i].offset)); + } + PRINT("|"); + } +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_rbm_validation_rule_print( + ia_css_rbm_validation_rule_t *rule, + ia_css_rbm_mux_desc_t *mux_desc, + unsigned int mux_desc_count, + bool print_header) +{ + ia_css_rbm_print_with_header(&rule->match, mux_desc, mux_desc_count, print_header); +#ifdef __HIVECC + IA_CSS_TRACE_0(RBM, INFO, "Mask\n"); +#else + PRINT("\t"); +#endif + ia_css_rbm_print_with_header(&rule->mask, mux_desc, mux_desc_count, false); +#ifdef __HIVECC + IA_CSS_TRACE_1(RBM, INFO, "Rule expected_value: %d\n", rule->expected_value); +#else + PRINT("\t%d\n", rule->expected_value); +#endif +} + +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count) +{ + ia_css_rbm_print_with_header(rbm, mux, mux_desc_count, false); +#ifndef __HIVECC + PRINT("\n"); +#endif +} + +void +ia_css_rbm_manifest_print( + const ia_css_rbm_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + bool print_header = true; + ia_css_rbm_mux_desc_t *muxes; + ia_css_rbm_validation_rule_t *validation_rule; + ia_css_rbm_terminal_routing_desc_t *terminal_routing_desc; + + verifjmpexit(manifest != NULL); + muxes = ia_css_rbm_manifest_get_muxes(manifest); + verifjmpexit(muxes != NULL || manifest->mux_desc_count == 0); + + for (i = 0; i < manifest->mux_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "id: %d.%d offstet: %d size_bits: %d\n", + muxes[i].gp_dev_id, + muxes[i].mux_id, + muxes[i].offset, + muxes[i].size_bits); + } +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + validation_rule = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(validation_rule != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + ia_css_rbm_validation_rule_print(&validation_rule[i], muxes, manifest->mux_desc_count, print_header); + print_header = false; + } +#else + (void) validation_rule; + (void) print_header; +#endif + terminal_routing_desc = ia_css_rbm_manifest_get_terminal_routing_desc(manifest); + verifjmpexit(terminal_routing_desc != NULL || manifest->terminal_routing_desc_count == 0); + for (i = 0; i < manifest->terminal_routing_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "terminal_id: %d connection_state: %d mux_id: %d state: %d\n", + terminal_routing_desc[i].terminal_id, + terminal_routing_desc[i].connection_state, + terminal_routing_desc[i].mux_id, + terminal_routing_desc[i].state); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_print failed\n"); + } +} + +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm) +{ + unsigned int i; + ia_css_rbm_t res; + ia_css_rbm_t final_rbm = ia_css_rbm_clear(); + ia_css_rbm_validation_rule_t *rules; + bool matches_rules; + + verifjmpexit(manifest != NULL); + verifjmpexit(rbm != NULL); + + if (ia_css_is_rbm_empty(*rbm)) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_check_rbm_validity failes: RBM is empty.\n"); + return false; + } + +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + rules = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(rules != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + res = ia_css_rbm_intersection(*rbm, rules[i].mask); + matches_rules = ia_css_is_rbm_equal(res, rules[i].match); + + if (!matches_rules) + continue; + + if (rules[i].expected_value == 1) { + final_rbm = ia_css_rbm_union(final_rbm, res); + } else { + IA_CSS_TRACE_1(RBM, INFO, "ia_css_rbm_manifest_check_rbm_validity failes on rule %d\n", 1); + return false; + } + } +#else + (void)matches_rules; + (void)i; + (void)rules; + (void)res; +#endif + return ia_css_is_rbm_equal(final_rbm, *rbm); +EXIT: + return false; +} + +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value) +{ + unsigned int i; + + verifjmpexit(mux != NULL); + + for (i = 0; i < mux_count; i++) { + if (mux[i].gp_dev_id == gp_dev_id && mux[i].mux_id == mux_id) + break; + } + if (i >= mux_count) { + IA_CSS_TRACE_2(RBM, ERROR, + "ia_css_rbm_set_mux mux with mux_id %d.%d not found\n", gp_dev_id, mux_id); + return rbm; + } + if (value >= mux[i].size_bits) { + IA_CSS_TRACE_3(RBM, ERROR, + "ia_css_rbm_set_mux mux mux_id %d.%d, value %d illegal\n", gp_dev_id, mux_id, value); + return rbm; + } + rbm = ia_css_rbm_set(rbm, mux[i].offset + value); +EXIT: + return rbm; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h new file mode 100644 index 000000000000..7059b6bc898e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h @@ -0,0 +1,108 @@ + + +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm_trace.h" + +#include "type_support.h" +#include "math_support.h" +#include "error_support.h" +#include "assert_support.h" +#include "print_support.h" + +STORAGE_CLASS_INLINE +void __ia_css_rbm_manifest_check_struct(void) +{ + COMPILATION_ERROR_IF( + sizeof(ia_css_rbm_manifest_t) != (SIZE_OF_RBM_MANIFEST_S / IA_CSS_UINT8_T_BITS)); + COMPILATION_ERROR_IF( + (sizeof(ia_css_rbm_manifest_t) % 8 /* 64 bit */) != 0); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_size(void) +{ + unsigned int size = sizeof(struct ia_css_rbm_manifest_s); + + return ceil_mul(size, sizeof(uint64_t)); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm) +{ + rbm->mux_desc_count = 0; + rbm->terminal_routing_desc_count = 0; + rbm->validation_rule_count = 0; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_MUX_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_mux_desc_t *)manifest->mux_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->mux_desc_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_validation_rule_t *)manifest->validation_rules; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->validation_rule_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_terminal_routing_desc_t *)manifest->terminal_routing_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->terminal_routing_desc_count; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/assert_support.h new file mode 100644 index 000000000000..28aed19409b9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/assert_support.h @@ -0,0 +1,200 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + * see also: + * http://www.klocwork.com/products/documentation/current/ + * Tuning_C/C%2B%2B_analysis#Assertions + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/cpu_mem_support.h new file mode 100644 index 000000000000..fa349cac4b24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/error_support.h new file mode 100644 index 000000000000..9fe1f65125e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/math_support.h new file mode 100644 index 000000000000..633f86f1a1b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/misc_support.h new file mode 100644 index 000000000000..a2c2729e946d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/platform_support.h new file mode 100644 index 000000000000..1752efc7b4df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/print_support.h new file mode 100644 index 000000000000..0b614f7ef12d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/storage_class.h new file mode 100644 index 000000000000..af19b4026220 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/type_support.h new file mode 100644 index 000000000000..a86da0e78941 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h new file mode 100644 index 000000000000..5426d6d18e0b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 000000000000..8c827c2ba395 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,98 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 000000000000..1a0191d37102 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,52 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom.c new file mode 100644 index 000000000000..cdf9df0531ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 000000000000..0cacd5a34934 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h new file mode 100644 index 000000000000..ecf22f6b7ac5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/syscom.mk new file mode 100644 index 000000000000..8d36b8928af5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/interface/ia_css_trace.h new file mode 100644 index 000000000000..b85b1810f107 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/trace.mk new file mode 100644 index 000000000000..b232880b882b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_access.h new file mode 100755 index 000000000000..1e81bad9f4ee --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_map.h new file mode 100755 index 000000000000..1bbedcf9e7fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_config.h new file mode 100755 index 000000000000..912f016ead24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h new file mode 100755 index 000000000000..0b44492789e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h new file mode 100755 index 000000000000..674f5fb5b0f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h new file mode 100755 index 000000000000..81f4d08d5ae0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h new file mode 100755 index 000000000000..75fef6c4ddba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_types.h new file mode 100755 index 000000000000..0acfdbb00cfa --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h new file mode 100644 index 000000000000..b09d9f4d5d42 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h @@ -0,0 +1,39 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef VIED_NCI_ACB_ROUTE_TYPE_H_ +#define VIED_NCI_ACB_ROUTE_TYPE_H_ + +#include "type_support.h" + +typedef enum { + NCI_ACB_PORT_ISP = 0, + NCI_ACB_PORT_ACC = 1, + NCI_ACB_PORT_INVALID = 0xFF +} nci_acb_port_t; + +typedef struct { + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t in_select; + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t out_select; + /* When set, Ack will be sent only when Eof arrives */ + uint32_t ignore_line_num; + /* Fork adapter to enable streaming to both output + * (next acb out and isp out) + */ + uint32_t fork_acb_output; +} nci_acb_route_t; + +#endif /* VIED_NCI_ACB_ROUTE_TYPE_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h new file mode 100644 index 000000000000..1ea7e729078c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PARAM_STORAGE_CLASS_H +#define __IA_CSS_PARAM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_PARAMETERS__ +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C +#else +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PARAM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h new file mode 100644 index 000000000000..4cc71be3fc38 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_H +#define __IA_CSS_TERMINAL_H + +#include "type_support.h" +#include "ia_css_terminal_types.h" +#include "ia_css_param_storage_class.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h new file mode 100644 index 000000000000..ca0a436082cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_H +#define __IA_CSS_TERMINAL_MANIFEST_H + +#include "type_support.h" +#include "ia_css_param_storage_class.h" +#include "ia_css_terminal_manifest_types.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h new file mode 100644 index 000000000000..fe146395a8f4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h @@ -0,0 +1,342 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_TYPES_H + + +#include "ia_css_terminal_defs.h" +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_manifest_base_types.h" + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT 1 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* =============== Cached Param Terminal Manifest - START ============== */ +struct ia_css_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT]; +}; + +typedef struct ia_css_param_manifest_section_desc_s + ia_css_param_manifest_section_desc_t; + + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT 4 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + (2*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* Frame constant parameters terminal manifest */ +struct ia_css_param_terminal_manifest_s { + /* Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* + * Number of cached parameter sections, coming from manifest + * but also shared by the terminal + */ + uint16_t param_manifest_section_desc_count; + /* + * Points to the variable array of + * struct ia_css_param_section_desc_s + */ + uint16_t param_manifest_section_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_param_terminal_manifest_s + ia_css_param_terminal_manifest_t; +/* ================= Cached Param Terminal Manifest - End ================ */ + + +/* ================= Spatial Param Terminal Manifest - START ============= */ + +#define SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_fragment_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t min_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t max_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_fragment_grid_manifest_desc_s + ia_css_fragment_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_param_manifest_section_desc_s { + /* Maximum buffer total size allowed for + * this frame of parameters + */ + uint32_t max_mem_size; + /* Memory space targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* size in bytes of each compute unit for + * the specified memory space and region + */ + uint8_t elem_size; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_frame_grid_param_manifest_section_desc_s + ia_css_frame_grid_param_manifest_section_desc_t; + +#define SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_frame_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t min_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t max_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_frame_grid_manifest_desc_s + ia_css_frame_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT * \ + IA_CSS_UINT8_T_BITS)) + +struct ia_css_spatial_param_terminal_manifest_s { + /* Spatial Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* Contains limits for the frame spatial parameters */ + ia_css_frame_grid_manifest_desc_t frame_grid_desc; + /* + * Constains limits for the fragment spatial parameters + * - COMMON AMONG FRAGMENTS + */ + ia_css_fragment_grid_manifest_desc_t common_fragment_grid_desc; + /* + * Number of frame spatial parameter sections, they are set + * in slice-steps through frame processing + */ + uint16_t frame_grid_param_manifest_section_desc_count; + /* + * Points to the variable array of + * ia_css_frame_spatial_param_manifest_section_desc_t + */ + uint16_t frame_grid_param_manifest_section_desc_offset; + /* + * Indication of the kernel this spatial parameter terminal belongs to + * SHOULD MATCH TO INDEX AND BE USED ONLY FOR CHECK + */ + uint8_t kernel_id; + /* + * Groups together compute units in order to achieve alignment + * requirements for transfes and to achieve canonical frame + * representation + */ + uint8_t compute_units_p_elem; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_spatial_param_terminal_manifest_s + ia_css_spatial_param_terminal_manifest_t; + +/* ================= Spatial Param Terminal Manifest - END ================ */ + +/* ================= Sliced Param Terminal Manifest - START =============== */ + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT (2) +#define SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 2 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* + * Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT]; +}; + +typedef struct ia_css_sliced_param_manifest_section_desc_s + ia_css_sliced_param_manifest_section_desc_t; + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT 3 +#define SIZE_OF_SLICED_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + 2 * IA_CSS_UINT16_T_BITS \ + + 1 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal manifest */ +struct ia_css_sliced_param_terminal_manifest_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_manifest_t base; + /* + * Number of the array elements + * sliced_param_section_offset points to + */ + uint16_t sliced_param_section_count; + /* + * Points to array of ia_css_sliced_param_manifest_section_desc_s + * which constain info for the slicing of the parameters + */ + uint16_t sliced_param_section_offset; + /* Kernel identifier */ + uint8_t kernel_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT]; +}; + +typedef struct ia_css_sliced_param_terminal_manifest_s + ia_css_sliced_param_terminal_manifest_t; + +/* ================= Slice Param Terminal Manifest - End =============== */ + +/* ================= Program Terminal Manifest - START ================= */ + +#define N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Fragment constant parameters manifest */ +struct ia_css_fragment_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_fragment_param_manifest_section_desc_s + ia_css_fragment_param_manifest_section_desc_t; + +#define SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS \ + (10*IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s { + /* Slice dimensions */ + uint16_t min_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Slice dimensions */ + uint16_t max_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t min_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t max_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + min_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + max_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + min_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + max_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + min_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + max_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s + ia_css_kernel_fragment_sequencer_info_manifest_desc_t; + +#define N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (IA_CSS_UINT32_T_BITS) \ + + (5*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Connection manager passes seq info as single blob at the moment */ + uint32_t sequencer_info_kernel_id; + /* Maximum number of command secriptors supported + * by the program group + */ + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t fragment_param_manifest_section_desc_count; + uint16_t fragment_param_manifest_section_desc_offset; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t kernel_fragment_sequencer_info_manifest_info_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_program_terminal_manifest_s + ia_css_program_terminal_manifest_t; + +/* ==================== Program Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h new file mode 100644 index 000000000000..c5c89fb7ec91 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h @@ -0,0 +1,351 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_TYPES_H +#define __IA_CSS_TERMINAL_TYPES_H + +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_base_types.h" + + +typedef struct ia_css_program_control_init_load_section_desc_s + ia_css_program_control_init_load_section_desc_t; +typedef struct ia_css_program_control_init_connect_section_desc_s + ia_css_program_control_init_connect_section_desc_t; +typedef struct ia_css_program_control_init_program_desc_s + ia_css_program_control_init_program_desc_t; +typedef struct ia_css_program_control_init_terminal_s + ia_css_program_control_init_terminal_t; + +typedef struct ia_css_program_terminal_s ia_css_program_terminal_t; +typedef struct ia_css_fragment_param_section_desc_s + ia_css_fragment_param_section_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_info_desc_s + ia_css_kernel_fragment_sequencer_info_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_command_desc_s + ia_css_kernel_fragment_sequencer_command_desc_t; + +typedef struct ia_css_sliced_param_terminal_s ia_css_sliced_param_terminal_t; +typedef struct ia_css_fragment_slice_desc_s ia_css_fragment_slice_desc_t; +typedef struct ia_css_slice_param_section_desc_s + ia_css_slice_param_section_desc_t; + +typedef struct ia_css_spatial_param_terminal_s ia_css_spatial_param_terminal_t; +typedef struct ia_css_frame_grid_desc_s ia_css_frame_grid_desc_t; +typedef struct ia_css_frame_grid_param_section_desc_s + ia_css_frame_grid_param_section_desc_t; +typedef struct ia_css_fragment_grid_desc_s ia_css_fragment_grid_desc_t; + +typedef struct ia_css_param_terminal_s ia_css_param_terminal_t; +typedef struct ia_css_param_section_desc_s ia_css_param_section_desc_t; + +typedef struct ia_css_param_payload_s ia_css_param_payload_t; +typedef struct ia_css_terminal_s ia_css_terminal_t; + +/* =================== Generic Parameter Payload - START =================== */ +#define N_UINT64_IN_PARAM_PAYLOAD_STRUCT 1 +#define N_UINT32_IN_PARAM_PAYLOAD_STRUCT 1 + +#define IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + (N_UINT64_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT32_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT32_T_BITS) + +struct ia_css_param_payload_s { + /* + * Temporary variable holding the host address of the parameter buffer + * as PSYS is handling the parameters on the host side for the moment + */ + uint64_t host_buffer; + /* + * Base virtual addresses to parameters in subsystem virtual + * memory space + * NOTE: Used in legacy pg flow + */ + vied_vaddress_t buffer; + /* + * Offset to buffer address within external buffer set structure + * NOTE: Used in ppg flow + */ + uint32_t terminal_index; +}; +/* =================== Generic Parameter Payload - End ==================== */ + + +/* ==================== Cached Param Terminal - START ==================== */ +#define N_UINT32_IN_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Frame constant parameters section */ +struct ia_css_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 + +#define SIZE_OF_PARAM_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal */ +struct ia_css_param_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to the variable array of ia_css_param_section_desc_t */ + uint16_t param_section_desc_offset; + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Cached Param Terminal - End ==================== */ + + +/* ==================== Spatial Param Terminal - START ==================== */ +#define N_UINT16_IN_FRAG_GRID_STRUCT (2 * IA_CSS_N_DATA_DIMENSION) + +#define SIZE_OF_FRAG_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAG_GRID_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_fragment_grid_desc_s { + /* + * Offset width/height of the top-left compute unit of the + * fragment compared to the frame + */ + uint16_t fragment_grid_index[IA_CSS_N_DATA_DIMENSION]; + /* + * Resolution width/height of the spatial parameters that + * correspond to the fragment measured in compute units + */ + uint16_t fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +#define N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* + * A plane of parameters with spatial aspect + * (compute units correlated to pixel data) + */ +struct ia_css_frame_grid_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* + * stride in bytes of each line of compute units for + * the specified memory space and region + */ + uint32_t stride; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT]; +}; + +#define N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT IA_CSS_N_DATA_DIMENSION +#define N_PADDING_UINT8_IN_FRAME_GRID_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_desc_s { + /* Resolution width/height of the frame of + * spatial parameters measured in compute units + */ + uint16_t frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_STRUCT]; +}; + +#define N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT 1 +#define N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT 2 + +#define SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + SIZE_OF_FRAME_GRID_STRUCT_BITS \ + + N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_spatial_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Contains info for the frame of spatial parameters */ + ia_css_frame_grid_desc_t frame_grid_desc; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to the variable array of + * ia_css_frame_grid_param_section_desc_t + */ + uint16_t frame_grid_param_section_desc_offset; + /* + * Points to array of ia_css_fragment_spatial_desc_t + * which constain info for the fragments of spatial parameters + */ + uint16_t fragment_grid_desc_offset; +}; +/* ==================== Spatial Param Terminal - END ==================== */ + + +/* ==================== Sliced Param Terminal - START ==================== */ +#define N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT 2 + +#define SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS \ + (N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* A Slice of parameters ready to be trasferred from/to registers */ +struct ia_css_slice_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT 2 +#define N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT 4 + +#define SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS \ + (N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_fragment_slice_desc_s { + /* + * Points to array of ia_css_slice_param_section_desc_t + * which constain info for each prameter slice + */ + uint16_t slice_section_desc_offset; + /* Number of slices for the parameters for this fragment */ + uint16_t slice_count; + uint8_t padding[N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT]; +}; + +#define N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT 2 + +#define SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to array of ia_css_fragment_slice_desc_t + * which constain info for the slicing of the parameters + */ + uint16_t fragment_slice_desc_offset; + uint8_t padding[N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Sliced Param Terminal - END ==================== */ + + +/* ==================== Program Terminal - START ==================== */ + +#define N_UINT32_IN_FRAG_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAG_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Fragment constant parameters section */ +struct ia_css_fragment_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT IA_CSS_N_COMMAND_COUNT + +#define SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT * IA_CSS_UINT16_T_BITS) + +/* 4 commands packe together to save memory space */ +struct ia_css_kernel_fragment_sequencer_command_desc_s { + /* Contains the "(command_index%4) == index" command desc */ + uint16_t line_count[IA_CSS_N_COMMAND_COUNT]; +}; + +#define N_UINT16_IN_FRAG_SEQ_INFO_STRUCT (5 * IA_CSS_N_DATA_DIMENSION + 2) + +#define SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_INFO_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_desc_s { + /* Slice dimensions */ + uint16_t fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Size of active fragment region */ + int16_t + fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* If >0 it overrides the standard fragment sequencer info */ + uint16_t command_count; + /* + * To be used only if command_count>0, points to the descriptors + * for the commands (ia_css_kernel_fragment_sequencer_command_desc_s) + */ + uint16_t command_desc_offset; +}; + +#define N_UINT16_IN_PROG_TERM_STRUCT 2 +#define N_PADDING_UINT8_IN_PROG_TERM_STRUCT 4 + +#define SIZE_OF_PROG_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PROG_TERM_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PROG_TERM_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_program_terminal_s { + /* Program terminal base */ + ia_css_terminal_t base; + /* Program terminal buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to array of ia_css_fragment_param_desc_s */ + uint16_t fragment_param_section_desc_offset; + /* Points to array of ia_css_kernel_fragment_sequencer_info_s */ + uint16_t kernel_fragment_sequencer_info_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_TERM_STRUCT]; +}; +/* ==================== Program Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c new file mode 100644 index 000000000000..683fb3a88cd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h new file mode 100644 index 000000000000..9ccf3931e8e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h @@ -0,0 +1,495 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_IMPL_H +#define __IA_CSS_TERMINAL_IMPL_H + +#include "ia_css_terminal.h" +#include "ia_css_terminal_types.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +/* Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections) +{ + return sizeof(ia_css_param_terminal_t) + + nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = &(param_section_base[section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_param_terminal_t) + + nof_fragments*nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = + &(param_section_base[(nof_sections * fragment_index) + + section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT; + param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + param_terminal->base.size = terminal_size; + param_terminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + return 0; +} + +/* Spatial Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_spatial_param_terminal_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_section_desc_t) + + nof_fragments * sizeof(ia_css_fragment_grid_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index) +{ + ia_css_fragment_grid_desc_t *fragment_grid_desc_base; + ia_css_fragment_grid_desc_t *fragment_grid_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + fragment_grid_desc_base = + (ia_css_fragment_grid_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->fragment_grid_desc_offset); + fragment_grid_desc = &(fragment_grid_desc_base[fragment_index]); + +EXIT: + return fragment_grid_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index) +{ + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_base; + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + frame_grid_param_section_base = + (ia_css_frame_grid_param_section_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->frame_grid_param_section_desc_offset); + frame_grid_param_section_desc = + &(frame_grid_param_section_base[section_index]); + +EXIT: + return frame_grid_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + spatial_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT; + spatial_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + spatial_param_terminal->base.size = terminal_size; + spatial_param_terminal->kernel_id = kernel_id; + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (nof_fragments * sizeof(ia_css_fragment_grid_desc_t)); + + return 0; +} + +/* Sliced terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments) +{ + unsigned int descriptor_size = 0; + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + verifjmpexit(nof_slices != NULL); + + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + nof_slices_total += nof_slices[fragment_index]; + } + + descriptor_size = + sizeof(ia_css_sliced_param_terminal_t) + + nof_fragments*sizeof(ia_css_fragment_slice_desc_t) + + nof_slices_total*nof_slice_param_sections*sizeof( + ia_css_fragment_param_section_desc_t); + +EXIT: + return descriptor_size; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc_base; + ia_css_fragment_slice_desc_t *fragment_slice_desc = NULL; + + verifjmpexit(sliced_param_terminal != NULL); + + fragment_slice_desc_base = + (ia_css_fragment_slice_desc_t *) + (((const char *)sliced_param_terminal) + + sliced_param_terminal->fragment_slice_desc_offset); + fragment_slice_desc = &(fragment_slice_desc_base[fragment_index]); + +EXIT: + return fragment_slice_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc; + ia_css_slice_param_section_desc_t *slice_param_section_desc_base; + ia_css_slice_param_section_desc_t *slice_param_section_desc = NULL; + + fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index + ); + verifjmpexit(fragment_slice_desc != NULL); + + slice_param_section_desc_base = + (ia_css_slice_param_section_desc_t *) + (((const char *)sliced_param_terminal) + + fragment_slice_desc->slice_section_desc_offset); + slice_param_section_desc = + &(slice_param_section_desc_base[( + slice_index * nof_slice_param_sections) + + section_index]); + +EXIT: + return slice_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + sliced_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT; + sliced_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + sliced_param_terminal->base.size = terminal_size; + sliced_param_terminal->kernel_id = kernel_id; + /* set here to use below to find the pointer */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index); + /* + * Error handling not required at this point + * since everything has been constructed/validated just above + */ + fragment_slice_desc->slice_count = nof_slices[fragment_index]; + fragment_slice_desc->slice_section_desc_offset = + sliced_param_terminal->fragment_slice_desc_offset + + (nof_fragments * sizeof( + ia_css_fragment_slice_desc_t)) + + (nof_slices_total * nof_slice_param_sections * sizeof( + ia_css_slice_param_section_desc_t)); + nof_slices_total += nof_slices[fragment_index]; + } + + return 0; +} + +/* Program terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + return sizeof(ia_css_program_terminal_t) + + nof_fragments * nof_fragment_param_sections * + sizeof(ia_css_fragment_param_section_desc_t) + + nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) + + nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections) +{ + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc_base; + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc = NULL; + + verifjmpexit(program_terminal != NULL); + verifjmpexit(section_index < nof_fragment_param_sections); + + fragment_param_section_desc_base = + (ia_css_fragment_param_section_desc_t *) + (((const char *)program_terminal) + + program_terminal->fragment_param_section_desc_offset); + fragment_param_section_desc = + &(fragment_param_section_desc_base[(fragment_index * + nof_fragment_param_sections) + section_index]); + +EXIT: + return fragment_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc_base; + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc = NULL; + + verifjmpexit(program_terminal != NULL); + if (nof_kernel_fragment_sequencer_infos > 0) { + verifjmpexit(info_index < nof_kernel_fragment_sequencer_infos); + } + + kernel_fragment_sequencer_info_desc_base = + (ia_css_kernel_fragment_sequencer_info_desc_t *) + (((const char *)program_terminal) + + program_terminal->kernel_fragment_sequencer_info_desc_offset); + kernel_fragment_sequencer_info_desc = + &(kernel_fragment_sequencer_info_desc_base[(fragment_index * + nof_kernel_fragment_sequencer_infos) + info_index]); + +EXIT: + return kernel_fragment_sequencer_info_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + program_terminal->base.terminal_type = IA_CSS_TERMINAL_TYPE_PROGRAM; + program_terminal->base.parent_offset = 0-((int16_t)terminal_offset); + program_terminal->base.size = terminal_size; + program_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + program_terminal->fragment_param_section_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset) +{ + if (command_desc_offset == NULL) { + return -EFAULT; + } + + *command_desc_offset = 0; + + if (program_terminal == NULL) { + return -EFAULT; + } + + *command_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (commands_slots_used * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count) +{ + uint16_t *line_count = NULL; + + verifjmpexit(kernel_fragment_sequencer_command_desc_base != NULL); + line_count = + (uint16_t *)&(kernel_fragment_sequencer_command_desc_base[ + set_count >> 2].line_count[set_count & 0x00000003]); +EXIT: + return line_count; +} + +#endif /* __IA_CSS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c new file mode 100644 index 000000000000..53c4708c7fc9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h new file mode 100644 index 000000000000..39734136b117 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h @@ -0,0 +1,347 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_IMPL_H +#define __IA_CSS_TERMINAL_MANIFEST_IMPL_H + +#include "ia_css_terminal_manifest.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +STORAGE_CLASS_INLINE void __terminal_manifest_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_manifest_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_frame_grid_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_manifest_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_fragment_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t)) + ); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof + (ia_css_sliced_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_manifest_section_desc_t) % + sizeof(uint64_t)); +} + +/* Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections) +{ + + return sizeof(ia_css_param_terminal_manifest_t) + + nof_sections*sizeof(ia_css_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + param_terminal->param_manifest_section_desc_count = section_count; + param_terminal->param_manifest_section_desc_offset = sizeof( + ia_css_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_param_manifest_section_desc_t *param_manifest_section_base; + ia_css_param_manifest_section_desc_t * + param_manifest_section_desc = NULL; + + verifjmpexit(param_terminal_manifest != NULL); + + param_manifest_section_base = + (ia_css_param_manifest_section_desc_t *) + (((const char *)param_terminal_manifest) + + param_terminal_manifest->param_manifest_section_desc_offset); + + param_manifest_section_desc = + &(param_manifest_section_base[section_index]); + +EXIT: + return param_manifest_section_desc; +} + +/* Spatial Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_count = section_count; + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_offset = + sizeof(ia_css_spatial_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_base; + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_desc = NULL; + + verifjmpexit(spatial_param_terminal_manifest != NULL); + + frame_param_manifest_section_base = + (ia_css_frame_grid_param_manifest_section_desc_t *) + (((const char *)spatial_param_terminal_manifest) + + spatial_param_terminal_manifest-> + frame_grid_param_manifest_section_desc_offset); + frame_param_manifest_section_desc = + &(frame_param_manifest_section_base[section_index]); + +EXIT: + return frame_param_manifest_section_desc; +} + +/* Sliced Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_slice_param_sections * + sizeof(ia_css_sliced_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count) +{ + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + sliced_param_terminal->sliced_param_section_count = section_count; + sliced_param_terminal->sliced_param_section_offset = + sizeof(ia_css_sliced_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_base; + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_desc = NULL; + + verifjmpexit(sliced_param_terminal_manifest != NULL); + + sliced_param_manifest_section_base = + (ia_css_sliced_param_manifest_section_desc_t *) + (((const char *)sliced_param_terminal_manifest) + + sliced_param_terminal_manifest-> + sliced_param_section_offset); + sliced_param_manifest_section_desc = + &(sliced_param_manifest_section_base[section_index]); + +EXIT: + return sliced_param_manifest_section_desc; +} + +/* Program Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + return sizeof(ia_css_program_terminal_manifest_t) + + nof_fragment_param_sections * + sizeof(ia_css_fragment_param_manifest_section_desc_t) + + nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_manifest_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + program_terminal->fragment_param_manifest_section_desc_count = + fragment_param_section_count; + program_terminal->fragment_param_manifest_section_desc_offset = + sizeof(ia_css_program_terminal_manifest_t); + + program_terminal->kernel_fragment_sequencer_info_manifest_info_count = + kernel_fragment_seq_info_section_count; + program_terminal->kernel_fragment_sequencer_info_manifest_info_offset = + sizeof(ia_css_program_terminal_manifest_t) + + fragment_param_section_count*sizeof( + ia_css_fragment_param_manifest_section_desc_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index) +{ + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section_base; + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + fragment_param_manifest_section_base = + (ia_css_fragment_param_manifest_section_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + fragment_param_manifest_section_desc_offset); + fragment_param_manifest_section = + &(fragment_param_manifest_section_base[section_index]); + +EXIT: + return fragment_param_manifest_section; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index) +{ + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc_base; + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + kernel_manifest_fragment_sequencer_info_manifest_desc_base = + (ia_css_kernel_fragment_sequencer_info_manifest_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_offset); + + kernel_manifest_fragment_sequencer_info_manifest_desc = + &(kernel_manifest_fragment_sequencer_info_manifest_desc_base[ + info_index]); + +EXIT: + return kernel_manifest_fragment_sequencer_info_manifest_desc; +} + +#endif /* __IA_CSS_TERMINAL_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/vied_parameters.mk b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/vied_parameters.mk new file mode 100644 index 000000000000..5e8903340f2e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/lib/vied_parameters/vied_parameters.mk @@ -0,0 +1,77 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is VIED_PARAMETERS + +VIED_PARAMETERS_DIR=$${MODULES_DIR}/vied_parameters + +VIED_PARAMETERS_INTERFACE=$(VIED_PARAMETERS_DIR)/interface +VIED_PARAMETERS_SOURCES=$(VIED_PARAMETERS_DIR)/src +VIED_PARAMETERS_EXTINCLUDE = $${MODULES_DIR}/support + +VIED_PARAMETERS_DYNAMIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_HOST_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_HOST_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) + +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES = $(VIED_PARAMETERS_SOURCES)/ia_css_isys_process_group.c +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES += $(VIED_PARAMETERS_DIR)/client/ia_css_isys_parameter_client.c + +VIED_PARAMETERS_DYNAMIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_FW_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_FW_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) +VIED_PARAMETERS_SUPPORT_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/support +VIED_PARAMETERS_SUPPORT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/support/$(IPU_SYSVER) +VIED_PARAMETERS_ISA_CLIENT_HOST_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/client +VIED_PARAMETERS_PSA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_psys_parameter_utils.c +VIED_PARAMETERS_PSA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_psys_parameter_utils_dep.c + +VIED_PARAMETERS_UTILS_HOST_CPPFLAGS = $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +VIED_PARAMETERS_ISA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_isys_parameter_utils.c +VIED_PARAMETERS_ISA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_isys_parameter_utils_dep.c + +VIED_PARAMETERS_PRINT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/print/interface +VIED_PARAMETERS_PRINT_FILES += $(VIED_PARAMETERS_DIR)/print/src/ia_css_terminal_print.c + +# VIED_PARAMETERS Trace Log Level = VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +# Other options are [VIED_PARAMETERS_TRACE_LOG_LEVEL_OFF, VIED_PARAMETERS_TRACE_LOG_LEVEL_DEBUG] +ifndef VIED_PARAMETERS_TRACE_CONFIG_HOST + VIED_PARAMETERS_TRACE_CONFIG_HOST=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif +ifndef VIED_PARAMETERS_TRACE_CONFIG_FW + VIED_PARAMETERS_TRACE_CONFIG_FW=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif + +VIED_PARAMETERS_HOST_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_HOST) +VIED_PARAMETERS_FW_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_FW) + +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_HOST_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_FW_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +#For IPU interface +include $(MODULES_DIR)/fw_abi_common_types/cpu/fw_abi_cpu_types.mk +VIED_PARAMETERS_HOST_CPPFLAGS += $(FW_ABI_COMMON_TYPES_HOST_CPPFLAGS) +endif + +VIED_PARAMETERS_FW_CPPFLAGS += $(FW_ABI_COMMON_TYPES_FW_CPPFLAGS) diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.c b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.c new file mode 100644 index 000000000000..ee13b8e61cbb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.c @@ -0,0 +1,472 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include + +#include "ipu.h" +#include "ipu-mmu.h" +#include "ipu-psys.h" +#include "ipu-fw-psys.h" +#include "ipu-wrapper.h" +#include "libcsspsys2600.h" + +#include +#include +#include +#include +#include + +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_start((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_start); + +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_disown((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_disown); + +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) +{ + int rval; + + rval = ia_css_process_group_stop((ia_css_process_group_t *) + kcmd->kpg->pg); + if (rval) { + dev_err(&kcmd->fh->psys->adev->dev, + "failed to abort kcmd!\n"); + kcmd->pg_user = NULL; + rval = -EIO; + /* TODO: need to reset PSYS by power cycling it */ + } + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_abort); + +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_submit((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_submit); + +static void *syscom_buffer; +static struct ia_css_syscom_config *syscom_config; +static struct ia_css_psys_server_init *server_init; + +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event) +{ + return ia_css_psys_event_queue_receive(psys_syscom, + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + (struct ia_css_psys_event_s *)event); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_rcv_event); + +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, + unsigned size) +{ + ia_css_terminal_type_t type; + u32 buffer_state; + + type = ia_css_terminal_get_type((ia_css_terminal_t *)terminal); + + switch (type) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + buffer_state = IA_CSS_BUFFER_UNDEFINED; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_STATE_IN: + buffer_state = IA_CSS_BUFFER_FULL; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + buffer_state = IA_CSS_BUFFER_EMPTY; + break; + default: + dev_err(&kcmd->fh->psys->adev->dev, + "unknown terminal type: 0x%x\n", type); + return -EAGAIN; + } + + if (type == IA_CSS_TERMINAL_TYPE_DATA_IN || + type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + ia_css_frame_t *frame; + + if (ia_css_data_terminal_set_connection_type( + (ia_css_data_terminal_t *)terminal, + IA_CSS_CONNECTION_MEMORY)) + return -EIO; + frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + if (!frame) + return -EIO; + + if (ia_css_frame_set_data_bytes(frame, size)) + return -EIO; + } + + return -ia_css_process_group_attach_buffer( + (ia_css_process_group_t *)kcmd->kpg->pg, buffer, + buffer_state, terminal_idx); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_terminal_set); + +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, + const char *note) +{ + ia_css_process_group_t *pg = (ia_css_process_group_t *)kcmd->kpg->pg; + ia_css_program_group_ID_t pgid = + ia_css_process_group_get_program_group_ID(pg); + uint8_t processes = ia_css_process_group_get_process_count( + (ia_css_process_group_t *)kcmd->kpg->pg); + unsigned int p; + + dev_dbg(&psys->adev->dev, "%s %s pgid %i processes %i\n", + __func__, note, pgid, processes); + for (p = 0; p < processes; p++) { + ia_css_process_t *process = + ia_css_process_group_get_process(pg, p); + + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i cell %i dev_chn: ext0 %i ext1r %i ext1w %i int %i ipfd %i isa %i\n", + __func__, pgid, p, + ia_css_process_get_cell(process), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_EXT0_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_INTERNAL_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_IPFD_ID), + ia_css_process_get_dev_chn(process, + VIED_NCI_DEV_CHN_DMA_ISA_ID)); + } +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_dump); + +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_program_group_ID( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_id); + +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_terminal_count( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal_count); + +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_size((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_size); + +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress) +{ + return ia_css_process_group_set_ipu_vaddress((ia_css_process_group_t *) + kcmd->kpg->pg, vaddress); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_ipu_vaddress); + +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_load_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_load_cycles); + +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_init_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_init_cycles); + +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_processing_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_processing_cycles); + +struct ipu_fw_psys_terminal * +ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd *kcmd, int index) +{ + return (struct ipu_fw_psys_terminal *)ia_css_process_group_get_terminal( + (ia_css_process_group_t *)kcmd->kpg->pg, index); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal); + +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) +{ + ia_css_process_group_set_token((ia_css_process_group_t *)kcmd->kpg->pg, + token); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_token); + +int ipu_fw_psys_pg_get_protocol( + struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_protocol_version( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_protocol); + +int ipu_fw_psys_open(struct ipu_psys *psys) +{ + bool opened; + int retry = IPU_PSYS_OPEN_RETRY; + + ipu_wrapper_init(PSYS_MMID, &psys->adev->dev, + psys->pdata->base); + + server_init->icache_prefetch_sp = psys->icache_prefetch_sp; + server_init->icache_prefetch_isp = psys->icache_prefetch_isp; + + psys_syscom = ia_css_psys_open(syscom_buffer, syscom_config); + if (!psys_syscom) { + dev_err(&psys->adev->dev, + "psys library open failed\n"); + return -ENODEV; + } + do { + opened = ia_css_psys_open_is_ready(psys_syscom); + if (opened) + break; + usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, + IPU_PSYS_OPEN_TIMEOUT_US + 10); + retry--; + } while (retry > 0); + + if (!retry && !opened) { + dev_err(&psys->adev->dev, + "psys library open ready failed\n"); + ia_css_psys_close(psys_syscom); + ia_css_psys_release(psys_syscom, 1); + psys_syscom = NULL; + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_open); + +int ipu_fw_psys_close(struct ipu_psys *psys) +{ + int rval; + unsigned int retry = IPU_PSYS_CLOSE_TIMEOUT; + + if (!psys_syscom) + return 0; + + if (ia_css_psys_close(psys_syscom)) { + dev_err(&psys->adev->dev, + "psys library close ready failed\n"); + return 0; + } + + do { + rval = ia_css_psys_release(psys_syscom, 0); + if (rval && rval != -EBUSY) { + dev_dbg(&psys->adev->dev, "psys library release failed\n"); + break; + } + usleep_range(IPU_PSYS_CLOSE_TIMEOUT_US, + IPU_PSYS_CLOSE_TIMEOUT_US + 10); + } while (rval && --retry); + + psys_syscom = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_close); + +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_token); + +static const struct ipu_fw_resource_definitions default_defs = { + .cells = vied_nci_cell_type, + .num_cells = VIED_NCI_N_CELL_ID, + .num_cells_type = VIED_NCI_N_CELL_TYPE_ID, + .dev_channels = vied_nci_dev_chn_size, + .num_dev_channels = VIED_NCI_N_DEV_CHN_ID, + + .num_ext_mem_types = VIED_NCI_N_DATA_MEM_TYPE_ID, + .num_ext_mem_ids = VIED_NCI_N_MEM_ID, + .ext_mem_ids = vied_nci_mem_size, + + .cell_mem_row = VIED_NCI_N_MEM_TYPE_ID, + .cell_mem = (enum ipu_mem_id *)vied_nci_cell_mem, +}; + +const struct ipu_fw_resource_definitions *res_defs = &default_defs; +EXPORT_SYMBOL_GPL(res_defs); + +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value) +{ + return ia_css_process_set_cell((ia_css_process_t *)ptr, + (vied_nci_cell_ID_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_cell_id); + +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) +{ + return ia_css_process_get_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_process_cell_id); + +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) +{ + return ia_css_process_clear_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_clear_process_cell); + +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value) +{ + return ia_css_process_set_dev_chn((ia_css_process_t *)ptr, + (vied_nci_dev_chn_ID_t)offset, + (vied_nci_resource_size_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_dev_chn_offset); + +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset) +{ + return ia_css_process_set_ext_mem((ia_css_process_t *)ptr, mem_id, offset); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_ext_mem); + +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process) +{ + ia_css_program_ID_t process_id = + ia_css_process_get_program_ID( + (const ia_css_process_t *)process); + int programs = + ia_css_program_group_manifest_get_program_count( + (const ia_css_program_group_manifest_t *)pg_manifest); + int i; + + for (i = 0; i < programs; i++) { + ia_css_program_ID_t program_id; + ia_css_program_manifest_t *pm = + ia_css_program_group_manifest_get_prgrm_mnfst( + (const ia_css_program_group_manifest_t *) + pg_manifest, i); + if (!pm) + continue; + program_id = ia_css_program_manifest_get_program_ID(pm); + if (program_id == process_id) { + gen_pm->dev_chn_size = (u16 *)pm->dev_chn_size; + gen_pm->ext_mem_size = (u16 *)pm->ext_mem_size; + gen_pm->cell_id = pm->cell_id; + gen_pm->cell_type_id = pm->cell_type_id; + return 0; + } + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_program_manifest_by_process); + +static int __init libcsspsys2600_init(void) +{ + int rval; + + syscom_buffer = kzalloc(ia_css_sizeof_psys(NULL), GFP_KERNEL); + if (!syscom_buffer) + return -ENOMEM; + + syscom_config = kzalloc(sizeof(struct ia_css_syscom_config), + GFP_KERNEL); + if (!syscom_config) { + rval = -ENOMEM; + goto out_syscom_buffer_free; + } + + server_init = kzalloc(sizeof(struct ia_css_psys_server_init), + GFP_KERNEL); + if (!server_init) { + rval = -ENOMEM; + goto out_syscom_config_free; + } + + server_init->ddr_pkg_dir_address = 0; + server_init->host_ddr_pkg_dir = 0; + server_init->pkg_dir_size = 0; + + *syscom_config = *ia_css_psys_specify(); + syscom_config->specific_addr = server_init; + syscom_config->specific_size = sizeof(struct ia_css_psys_server_init); + syscom_config->ssid = PSYS_SSID; + syscom_config->mmid = PSYS_MMID; + syscom_config->regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + syscom_config->dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + + return 0; + +out_syscom_config_free: + kfree(syscom_config); +out_syscom_buffer_free: + kfree(syscom_buffer); + + return rval; +} + +static void __exit libcsspsys2600_exit(void) +{ + kfree(syscom_buffer); + kfree(syscom_config); + kfree(server_init); +} + +module_init(libcsspsys2600_init); +module_exit(libcsspsys2600_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu psys css library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.h b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.h new file mode 100644 index 000000000000..b8d790f56180 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/lib2600psys/libcsspsys2600.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LIBCSSPSYS2600_H +#define LIBCSSPSYS2600_H + +#include +#include +#include +#include +#include +#include +#include + +extern struct ia_css_syscom_context *psys_syscom; +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4-css/libintel-ipu4.c b/drivers/media/pci/intel/ipu4/ipu4-css/libintel-ipu4.c new file mode 100644 index 000000000000..a7128898e449 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-css/libintel-ipu4.c @@ -0,0 +1,392 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include "ipu-isys.h" +#include "ipu-wrapper.h" +#include + +#include "ipu-platform.h" + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__); \ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func); \ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__); \ + \ + rval; \ + }) + +static int wrapper_init_done; + +int ipu_fw_isys_close(struct ipu_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_close); + +int ipu_fw_isys_init(struct ipu_isys *isys, + unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + unsigned int i; + + struct ia_css_isys_device_cfg_data isys_cfg = { + .driver_sys = { + .ssid = ISYS_SSID, + .mmid = ISYS_MMID, + .num_send_queues = clamp_t( + unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS), + .num_recv_queues = IPU_ISYS_NUM_RECV_QUEUE, + .send_queue_size = IPU_ISYS_SIZE_SEND_QUEUE, + .recv_queue_size = IPU_ISYS_SIZE_RECV_QUEUE, + .icache_prefetch = isys->icache_prefetch, + }, + }; + struct device *dev = &isys->adev->dev; + int rval; + + if (!wrapper_init_done) { + wrapper_init_done = true; + ipu_wrapper_init(ISYS_MMID, &isys->adev->dev, + isys->pdata->base); + } + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < min(IPU_NOF_SRAM_BLOCKS_MAX, NOF_SRAM_BLOCKS_MAX); i++) { + if (i < isys_cfg.driver_sys.num_send_queues) + isys_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + isys_cfg.driver_sys.num_send_queues; + else + isys_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + rval = -ia_css_isys_device_open(&isys->fwcom, &isys_cfg); + if (rval < 0) { + dev_err(dev, "isys device open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_lib_call(device_open_ready, isys); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys device open ready failed %d\n", rval); + ipu_fw_isys_close(isys); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_init); + +void ipu_fw_isys_cleanup(struct ipu_isys *isys) +{ + ipu_lib_call(device_release, isys, 1); + isys->fwcom = NULL; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_cleanup); + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp( + void *context, unsigned int queue, + struct ipu_fw_isys_resp_info_abi *response) +{ + struct ia_css_isys_resp_info apiresp; + int rval; + + rval = -ia_css_isys_stream_handle_response(context, &apiresp); + if (rval < 0) + return NULL; + + response->buf_id = 0; + response->type = apiresp.type; + response->timestamp[0] = apiresp.timestamp[0]; + response->timestamp[1] = apiresp.timestamp[1]; + response->stream_handle = apiresp.stream_handle; + response->error_info.error = apiresp.error; + response->error_info.error_details = apiresp.error_details; + response->pin.out_buf_id = apiresp.pin.out_buf_id; + response->pin.addr = apiresp.pin.addr; + response->pin_id = apiresp.pin_id; + response->process_group_light.param_buf_id = + apiresp.process_group_light.param_buf_id; + response->process_group_light.addr = + apiresp.process_group_light.addr; + response->acc_id = apiresp.acc_id; +#ifdef IPU_OTF_SUPPORT + response->frame_counter = apiresp.frame_counter; + response->written_direct = apiresp.written_direct; +#endif + + return response; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_get_resp); + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + /* Nothing to do here really */ +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_put_resp); + +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + int rval = -1; + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_START: + rval = ipu_lib_call(stream_start, isys, stream_handle, + NULL); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH: + rval = ipu_lib_call(stream_flush, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_STOP: + rval = ipu_lib_call(stream_stop, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE: + rval = ipu_lib_call(stream_close, isys, stream_handle); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_simple_cmd); + +static void resolution_abi_to_api(const struct ipu_fw_isys_resolution_abi *abi, + struct ia_css_isys_resolution *api) +{ + api->width = abi->width; + api->height = abi->height; +} + +static void output_pin_payload_abi_to_api( + struct ipu_fw_isys_output_pin_payload_abi *abi, + struct ia_css_isys_output_pin_payload *api) +{ + api->out_buf_id = abi->out_buf_id; + api->addr = abi->addr; +} + +static void output_pin_info_abi_to_api( + struct ipu_fw_isys_output_pin_info_abi *abi, + struct ia_css_isys_output_pin_info *api) +{ + api->input_pin_id = abi->input_pin_id; + resolution_abi_to_api(&abi->output_res, &api->output_res); + api->stride = abi->stride; + api->pt = abi->pt; + api->watermark_in_lines = abi->watermark_in_lines; + api->payload_buf_size = abi->payload_buf_size; + api->send_irq = abi->send_irq; + api->ft = abi->ft; +#ifdef IPU_OTF_SUPPORT + api->link_id = abi->link_id; +#endif + api->reserve_compression = abi->reserve_compression; +} + +static void param_pin_abi_to_api(struct ipu_fw_isys_param_pin_abi *abi, + struct ia_css_isys_param_pin *api) +{ + api->param_buf_id = abi->param_buf_id; + api->addr = abi->addr; +} + +static void input_pin_info_abi_to_api( + struct ipu_fw_isys_input_pin_info_abi *abi, + struct ia_css_isys_input_pin_info *api) +{ + resolution_abi_to_api(&abi->input_res, &api->input_res); + api->dt = abi->dt; + api->mipi_store_mode = abi->mipi_store_mode; + api->mapped_dt = abi->mapped_dt; +} + +static void isa_cfg_abi_to_api(const struct ipu_fw_isys_isa_cfg_abi *abi, + struct ia_css_isys_isa_cfg *api) +{ + unsigned int i; + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) + resolution_abi_to_api(&abi->isa_res[i], &api->isa_res[i]); + + api->blc_enabled = abi->cfg.blc; + api->lsc_enabled = abi->cfg.lsc; + api->dpc_enabled = abi->cfg.dpc; + api->downscaler_enabled = abi->cfg.downscaler; + api->awb_enabled = abi->cfg.awb; + api->af_enabled = abi->cfg.af; + api->ae_enabled = abi->cfg.ae; + api->paf_type = abi->cfg.paf; + api->send_irq_stats_ready = abi->cfg.send_irq_stats_ready; + api->send_resp_stats_ready = abi->cfg.send_irq_stats_ready; +} + +static void cropping_abi_to_api(struct ipu_fw_isys_cropping_abi *abi, + struct ia_css_isys_cropping *api) +{ + api->top_offset = abi->top_offset; + api->left_offset = abi->left_offset; + api->bottom_offset = abi->bottom_offset; + api->right_offset = abi->right_offset; +} + +static void stream_cfg_abi_to_api(struct ipu_fw_isys_stream_cfg_data_abi *abi, + struct ia_css_isys_stream_cfg_data *api) +{ + unsigned int i; + + api->src = abi->src; + api->vc = abi->vc; + api->isl_use = abi->isl_use; + api->compfmt = abi->compfmt; + isa_cfg_abi_to_api(&abi->isa_cfg, &api->isa_cfg); + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) + cropping_abi_to_api(&abi->crop[i], &api->crop[i]); + + api->send_irq_sof_discarded = abi->send_irq_sof_discarded; + api->send_irq_eof_discarded = abi->send_irq_eof_discarded; + api->send_resp_sof_discarded = abi->send_irq_sof_discarded; + api->send_resp_eof_discarded = abi->send_irq_eof_discarded; + api->nof_input_pins = abi->nof_input_pins; + api->nof_output_pins = abi->nof_output_pins; + for (i = 0; i < abi->nof_input_pins; i++) + input_pin_info_abi_to_api(&abi->input_pins[i], + &api->input_pins[i]); + + for (i = 0; i < abi->nof_output_pins; i++) + output_pin_info_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); +} + +static void frame_buff_set_abi_to_api( + struct ipu_fw_isys_frame_buff_set_abi *abi, + struct ia_css_isys_frame_buff_set *api) +{ + int i; + + for (i = 0; i < min(IPU_MAX_OPINS, MAX_OPINS); i++) + output_pin_payload_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); + + param_pin_abi_to_api(&abi->process_group_light, + &api->process_group_light); + + api->send_irq_sof = abi->send_irq_sof; + api->send_irq_eof = abi->send_irq_eof; + api->send_irq_capture_ack = abi->send_irq_capture_ack; + api->send_irq_capture_done = abi->send_irq_capture_done; +} + +int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, + enum ipu_fw_isys_send_type send_type) +{ + union { + struct ia_css_isys_stream_cfg_data stream_cfg; + struct ia_css_isys_frame_buff_set buf; + } param; + int rval = -1; + + memset(¶m, 0, sizeof(param)); + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_capture_indication, + isys, stream_handle, ¶m.buf); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN: + stream_cfg_abi_to_api(cpu_mapped_buf, ¶m.stream_cfg); + rval = ipu_lib_call(stream_open, isys, stream_handle, + ¶m.stream_cfg); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_start, isys, stream_handle, + ¶m.buf); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_complex_cmd); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4-fw-resources.c b/drivers/media/pci/intel/ipu4/ipu4-fw-resources.c new file mode 100644 index 000000000000..581b7379cef8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-fw-resources.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include "ipu-fw-psys.h" + +#include + +/* resources table */ +/* + * Cell types by cell IDs + */ +const u32 ipu_fw_psys_cell_types[IPU_FW_PSYS_N_CELL_ID] = { + IPU_FW_PSYS_SP_CTRL_TYPE_ID, + IPU_FW_PSYS_SP_SERVER_TYPE_ID, + IPU_FW_PSYS_SP_SERVER_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_VP_TYPE_ID, + IPU_FW_PSYS_ACC_ISA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_PSA_TYPE_ID, + IPU_FW_PSYS_ACC_OSA_TYPE_ID, + IPU_FW_PSYS_GDC_TYPE_ID, + IPU_FW_PSYS_GDC_TYPE_ID +}; + +const u16 ipu_fw_num_dev_channels[IPU_FW_PSYS_N_DEV_CHN_ID] = { + IPU_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_GDC_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE, + IPU_FW_PSYS_DEV_CHN_DMA_FW_MAX_SIZE, +#ifdef CONFIG_VIDEO_INTEL_IPU4P + IPU_FW_PSYS_DEV_CHN_DMA_CMPRS_MAX_SIZE +#endif +}; + +const u16 ipu_fw_psys_mem_size[IPU_FW_PSYS_N_MEM_ID] = { + IPU_FW_PSYS_VMEM0_MAX_SIZE, + IPU_FW_PSYS_VMEM1_MAX_SIZE, + IPU_FW_PSYS_VMEM2_MAX_SIZE, + IPU_FW_PSYS_VMEM3_MAX_SIZE, + IPU_FW_PSYS_VMEM4_MAX_SIZE, + IPU_FW_PSYS_BAMEM0_MAX_SIZE, + IPU_FW_PSYS_BAMEM1_MAX_SIZE, + IPU_FW_PSYS_BAMEM2_MAX_SIZE, + IPU_FW_PSYS_BAMEM3_MAX_SIZE, + IPU_FW_PSYS_DMEM0_MAX_SIZE, + IPU_FW_PSYS_DMEM1_MAX_SIZE, + IPU_FW_PSYS_DMEM2_MAX_SIZE, + IPU_FW_PSYS_DMEM3_MAX_SIZE, + IPU_FW_PSYS_DMEM4_MAX_SIZE, + IPU_FW_PSYS_DMEM5_MAX_SIZE, + IPU_FW_PSYS_DMEM6_MAX_SIZE, + IPU_FW_PSYS_DMEM7_MAX_SIZE, + IPU_FW_PSYS_PMEM0_MAX_SIZE, + IPU_FW_PSYS_PMEM1_MAX_SIZE, + IPU_FW_PSYS_PMEM2_MAX_SIZE, + IPU_FW_PSYS_PMEM3_MAX_SIZE +}; + +const enum ipu_mem_id +ipu_fw_psys_cell_mem[IPU_FW_PSYS_N_CELL_ID][IPU_FW_PSYS_N_MEM_TYPE_ID] = { + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_DMEM0_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_DMEM1_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_DMEM2_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM4_ID, + IPU_FW_PSYS_VMEM0_ID, + IPU_FW_PSYS_BAMEM0_ID, + IPU_FW_PSYS_PMEM0_ID + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM5_ID, + IPU_FW_PSYS_VMEM1_ID, + IPU_FW_PSYS_BAMEM1_ID, + IPU_FW_PSYS_PMEM1_ID + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM6_ID, + IPU_FW_PSYS_VMEM2_ID, + IPU_FW_PSYS_BAMEM2_ID, + IPU_FW_PSYS_PMEM2_ID, + }, + { + IPU_FW_PSYS_VMEM4_ID, + IPU_FW_PSYS_DMEM7_ID, + IPU_FW_PSYS_VMEM3_ID, + IPU_FW_PSYS_BAMEM3_ID, + IPU_FW_PSYS_PMEM3_ID, + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + }, + { + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID, + IPU_FW_PSYS_N_MEM_ID + } +}; + +static const struct ipu_fw_resource_definitions default_defs = { + .cells = ipu_fw_psys_cell_types, + .num_cells = IPU_FW_PSYS_N_CELL_ID, + .num_cells_type = IPU_FW_PSYS_N_CELL_TYPE_ID, + + .dev_channels = ipu_fw_num_dev_channels, + .num_dev_channels = IPU_FW_PSYS_N_DEV_CHN_ID, + + .num_ext_mem_types = IPU_FW_PSYS_N_DATA_MEM_TYPE_ID, + .num_ext_mem_ids = IPU_FW_PSYS_N_MEM_ID, + .ext_mem_ids = ipu_fw_psys_mem_size, + + .num_dfm_ids = IPU_FW_PSYS_N_DEV_DFM_ID, + + .cell_mem_row = IPU_FW_PSYS_N_MEM_TYPE_ID, + .cell_mem = &ipu_fw_psys_cell_mem[0][0], + + .process.ext_mem_id = offsetof(struct ipu_fw_psys_process, + ext_mem_id[0]), + .process.ext_mem_offset = offsetof(struct ipu_fw_psys_process, + ext_mem_offset[0]), + .process.dev_chn_offset = offsetof(struct ipu_fw_psys_process, + dev_chn_offset[0]), + .process.cell_id = offsetof(struct ipu_fw_psys_process, cell_id), +}; + +const struct ipu_fw_resource_definitions *res_defs = &default_defs; + +/********** Generic resource handling **********/ + +/* + * Extension library gives byte offsets to its internal structures. + * use those offsets to update fields. Without extension lib access + * structures directly. + */ +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value) +{ + struct ipu_fw_psys_process_group *parent = + (struct ipu_fw_psys_process_group *) ((char *)ptr + + ptr->parent_offset); + + ptr->cell_id = value; + parent->resource_bitmap |= 1 << value; + + return 0; +} + +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) +{ + return ptr->cell_id; +} + +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) +{ + struct ipu_fw_psys_process_group *parent; + u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0); + int retval = -1; + + parent = (struct ipu_fw_psys_process_group *) ((char *)ptr + + ptr->parent_offset); + if ((1 << cell_id) && ((1 << cell_id) & parent->resource_bitmap)) { + ipu_fw_psys_set_process_cell_id(ptr, 0, IPU_FW_PSYS_N_CELL_ID); + parent->resource_bitmap &= ~(1 << cell_id); + retval = 0; + } + + return retval; +} + +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value) +{ + ptr->dev_chn_offset[offset] = value; + + return 0; +} + +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset) +{ + ptr->ext_mem_offset[type_id] = offset; + ptr->ext_mem_id[type_id] = mem_id; + + return 0; +} + +static struct ipu_fw_psys_program_manifest * +ipu_resource_get_program_manifest( + const struct ipu_fw_psys_program_group_manifest *manifest, + const unsigned int program_index) +{ + struct ipu_fw_psys_program_manifest *prg_manifest_base; + u8 *program_manifest = NULL; + u8 program_count; + unsigned int i; + + program_count = manifest->program_count; + + prg_manifest_base = (struct ipu_fw_psys_program_manifest *) + ((char *)manifest + manifest->program_manifest_offset); + if (program_index < program_count) { + program_manifest = (u8 *) prg_manifest_base; + for (i = 0; i < program_index; i++) + program_manifest += + ((struct ipu_fw_psys_program_manifest *) + program_manifest)->size; + } + + return (struct ipu_fw_psys_program_manifest *)program_manifest; +} + +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process) +{ + u32 process_id = process->ID; + int programs = pg_manifest->program_count; + int i; + + for (i = 0; i < programs; i++) { + u32 program_id; + struct ipu_fw_psys_program_manifest *pm = + ipu_resource_get_program_manifest(pg_manifest, i); + if (!pm) + continue; + program_id = pm->ID; + if (program_id == process_id) { + gen_pm->dev_chn_size = pm->dev_chn_size; + gen_pm->dev_chn_offset = NULL; + gen_pm->ext_mem_offset = NULL; + gen_pm->cell_id = pm->cell_id; + gen_pm->cell_type_id = pm->cell_type_id; + gen_pm->ext_mem_size = pm->ext_mem_size; + return 0; + } + } + return -ENOENT; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys-csi2.c b/drivers/media/pci/intel/ipu4/ipu4-isys-csi2.c new file mode 100644 index 000000000000..11e409187216 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys-csi2.c @@ -0,0 +1,695 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2013 - 2018 Intel Corporation + +#include "ipu.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" +#include "ipu-isys-csi2.h" + +#define CSE_IPC_CMDPHYWRITEL 35 +#define CSE_IPC_CMDPHYWRITEH 36 +#define CSE_IPC_CMDLEGACYPHYWRITEL 39 +#define CSE_IPC_CMDLEGACYPHYWRITEH 40 + +#define NBR_BULK_MSGS 30 /* Space reservation for IPC messages */ + +#define CSI2_UPDATE_TIME_TRY_NUM 3 +#define CSI2_UPDATE_TIME_MAX_DIFF 20 + +static u32 +build_cse_ipc_commands(struct ipu_ipc_buttress_bulk_msg *target, + u32 nbr_msgs, u32 opcodel, u32 reg, u32 data) +{ + struct ipu_ipc_buttress_bulk_msg *msgs = &target[nbr_msgs]; + u32 opcodeh = opcodel == CSE_IPC_CMDPHYWRITEL ? + CSE_IPC_CMDPHYWRITEH : CSE_IPC_CMDLEGACYPHYWRITEH; + + /* + * Writing of 32 bits consist of 2 16 bit IPC messages to CSE. + * Messages must be in low-high order and nothing else between + * them. + * Register is in bits 8..15 as index (register value divided by 4) + */ + msgs->cmd = opcodel | (reg << (8 - 2)) | ((data & 0xffff) << 16); + msgs->expected_resp = opcodel; + msgs->require_resp = true; + msgs->cmd_size = 4; + msgs++; + + msgs->cmd = opcodeh | (reg << (8 - 2)) | (data & 0xffff0000); + msgs->expected_resp = opcodeh; + msgs->require_resp = true; + msgs->cmd_size = 4; + + nbr_msgs += 2; + + /* Hits only if code change introduces too many new IPC messages */ + WARN_ON(nbr_msgs > NBR_BULK_MSGS); + + return nbr_msgs; +} + +static int csi2_ev_correction_params(struct ipu_isys_csi2 *csi2, + unsigned int lanes) +{ + struct ipu_device *isp = csi2->isys->adev->isp; + struct ipu_ipc_buttress_bulk_msg *messages; + const struct ipu_receiver_electrical_params *ev_params; + const struct ipu_isys_internal_csi2_pdata *csi2_pdata; + + __s64 link_freq; + unsigned int i; + u32 val; + u32 nbr_msgs = 0; + int rval; + bool conf_set0; + bool conf_set1; + bool conf_combined = false; + + csi2_pdata = &csi2->isys->pdata->ipdata->csi2; + ev_params = csi2_pdata->evparams; + if (!ev_params) + return 0; + + if (csi2->isys->csi2_cse_ipc_not_supported) + return 0; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return rval; + + i = 0; + while (ev_params[i].device) { + if (ev_params[i].device == isp->pdev->device && + ev_params[i].revision == isp->pdev->revision && + ev_params[i].min_freq < link_freq && + ev_params[i].max_freq >= link_freq) + break; + i++; + } + + if (!ev_params[i].device) { + dev_info(&csi2->isys->adev->dev, + "No rcomp value override for this HW revision\n"); + return 0; + } + + messages = kcalloc(NBR_BULK_MSGS, sizeof(*messages), GFP_KERNEL); + if (!messages) + return -ENOMEM; + + conf_set0 = csi2_pdata->evsetmask0 & (1 << csi2->index); + conf_set1 = csi2_pdata->evsetmask1 & (1 << csi2->index); + if (csi2_pdata->evlanecombine[csi2->index]) { + conf_combined = + lanes > csi2_pdata->evlanecombine[csi2->index] ? 1 : 0; + } + conf_set1 |= conf_combined; + + /* + * Note: There is no way to make R-M-W to these. Possible non-zero reset + * default is OR'd with the values + */ + val = 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT1_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT2_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT3_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_PORT4_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_ENABLE_SHIFT | + ev_params[i].rcomp_val_legacy << + CSI2_SB_CSI_RCOMP_CONTROL_LEGACY_OVR_CODE_SHIFT; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDLEGACYPHYWRITEL, + CSI2_SB_CSI_RCOMP_CONTROL_LEGACY, + val); + + val = 2 << CSI2_SB_CSI_RCOMP_UPDATE_MODE_SHIFT | + 1 << CSI2_SB_CSI_RCOMP_OVR_ENABLE_SHIFT | + ev_params[i].rcomp_val_combo << CSI2_SB_CSI_RCOMP_OVR_CODE_SHIFT; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CSI_RCOMP_CONTROL_COMBO, val); + + if (conf_set0) { + val = 0x380078 | ev_params[i].ports[0].ctle_val << + CSI2_SB_CPHY0_RX_CONTROL1_EQ_LANE0_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY0_RX_CONTROL1, + val); + val = 0x10000; + if (ev_params[i].ports[0].crc_val != IPU_EV_AUTO) + val |= ev_params[i].ports[0].crc_val << + CSI2_SB_CPHY0_DLL_OVRD_CRCDC_FSM_DLANE0_SHIFT | + CSI2_SB_CPHY0_DLL_OVRD_LDEN_CRCDC_FSM_DLANE0; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY0_DLL_OVRD, val); + } + + if (conf_set1) { + val = 0x380078 | ev_params[i].ports[1].ctle_val << + CSI2_SB_CPHY2_RX_CONTROL1_EQ_LANE1_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY2_RX_CONTROL1, + val); + + val = 0x10000; + if (ev_params[i].ports[1].crc_val != IPU_EV_AUTO) + val |= ev_params[i].ports[1].crc_val << + CSI2_SB_CPHY2_DLL_OVRD_CRCDC_FSM_DLANE1_SHIFT | + CSI2_SB_CPHY2_DLL_OVRD_LDEN_CRCDC_FSM_DLANE1; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_CPHY2_DLL_OVRD, val); + } + + mutex_lock(&csi2->isys->mutex); + /* This register is shared between two receivers */ + val = csi2->isys->csi2_rx_ctrl_cached; + if (conf_set0) { + val &= ~CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE01_MASK; + if (ev_params[i].ports[0].drc_val != IPU_EV_AUTO) + val |= + CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE01_MASK; + } + + if (conf_set1) { + val &= ~CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE23_MASK; + if (ev_params[i].ports[1].drc_val != IPU_EV_AUTO) + val |= + CSI2_SB_DPHY0_RX_CNTRL_SKEWCAL_CR_SEL_DLANE23_MASK; + } + csi2->isys->csi2_rx_ctrl_cached = val; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_RX_CNTRL, val); + mutex_unlock(&csi2->isys->mutex); + + if (conf_set0 && ev_params[i].ports[0].drc_val != IPU_EV_AUTO) { + /* Write value with FSM disabled */ + val = (conf_combined ? + ev_params[i].ports[0].drc_val_combined : + ev_params[i].ports[0].drc_val) << + CSI2_SB_DPHY0_DLL_OVRD_DRC_FSM_OVRD_SHIFT; + + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_DLL_OVRD, val); + + /* Write value with FSM enabled */ + val |= 1 << CSI2_SB_DPHY1_DLL_OVRD_LDEN_DRC_FSM_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_DLL_OVRD, val); + } else if (conf_set0 && ev_params[i].ports[0].drc_val == IPU_EV_AUTO) { + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY0_DLL_OVRD, 0); + } + + if (conf_set1 && ev_params[i].ports[1].drc_val != IPU_EV_AUTO) { + val = (conf_combined ? + ev_params[i].ports[1].drc_val_combined : + ev_params[i].ports[1].drc_val) << + CSI2_SB_DPHY0_DLL_OVRD_DRC_FSM_OVRD_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY1_DLL_OVRD, val); + + val |= 1 << CSI2_SB_DPHY1_DLL_OVRD_LDEN_DRC_FSM_SHIFT; + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY1_DLL_OVRD, val); + } else if (conf_set1 && ev_params[i].ports[1].drc_val == IPU_EV_AUTO) { + nbr_msgs = build_cse_ipc_commands(messages, nbr_msgs, + CSE_IPC_CMDPHYWRITEL, + CSI2_SB_DPHY1_DLL_OVRD, 0); + } + + rval = ipu_buttress_ipc_send_bulk(isp, + IPU_BUTTRESS_IPC_CSE, + messages, nbr_msgs); + + if (rval == -ENODEV) + csi2->isys->csi2_cse_ipc_not_supported = true; + + kfree(messages); + return 0; +} + +static void ipu_isys_register_errors(struct ipu_isys_csi2 *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSIRX_IRQ_STATUS); + + writel(status, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + csi2->receiver_errors |= status; +} + +void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2) +{ + /* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ + static const struct ipu_isys_csi2_error { + const char *error_string; + bool is_info_only; + } errors[] = { + {"Single packet header error corrected", true}, + {"Multiple packet header errors detected", true}, + {"Payload checksum (CRC) error", true}, + {"FIFO overflow", false}, + {"Reserved short packet data type detected", true}, + {"Reserved long packet data type detected", true}, + {"Incomplete long packet detected", false}, + {"Frame sync error", false}, + {"Line sync error", false}, + {"DPHY recoverable synchronization error", true}, + {"DPHY non-recoverable synchronization error", false}, + {"Escape mode error", true}, + {"Escape mode trigger event", true}, + {"Escape mode ultra-low power state for data lane(s)", true}, + {"Escape mode ultra-low power state exit for clock lane", true}, + {"Inter-frame short packet discarded", true}, + {"Inter-frame long packet discarded", true}, + }; + u32 status; + unsigned int i; + + /* Register errors once more in case of error interrupts are disabled */ + ipu_isys_register_errors(csi2); + status = csi2->receiver_errors; + csi2->receiver_errors = 0; + + for (i = 0; i < ARRAY_SIZE(errors); i++) { + if (!(status & BIT(i))) + continue; + + if (errors[i].is_info_only) + dev_dbg(&csi2->isys->adev->dev, + "csi2-%i info: %s\n", + csi2->index, errors[i].error_string); + else + dev_err_ratelimited(&csi2->isys->adev->dev, + "csi2-%i error: %s\n", + csi2->index, + errors[i].error_string); + } +} + +static u64 tunit_time_to_us(struct ipu_isys *isys, u64 time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 1000000; + + do_div(time, isys_clk); + + return time; +} + +static int update_timer_base(struct ipu_isys *isys) +{ + int rval, i; + u64 time; + + for (i = 0; i < CSI2_UPDATE_TIME_TRY_NUM; i++) { + rval = ipu_trace_get_timer(&isys->adev->dev, &time); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + rval = ipu_buttress_tsc_read(isys->adev->isp, + &isys->tsc_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read TSC timer.\n"); + return rval; + } + rval = ipu_trace_get_timer(&isys->adev->dev, + &isys->tunit_timer_base); + if (rval) { + dev_err(&isys->adev->dev, + "Failed to read Tunit timer.\n"); + return rval; + } + if (tunit_time_to_us(isys, isys->tunit_timer_base - time) < + CSI2_UPDATE_TIME_MAX_DIFF) + return 0; + } + dev_dbg(&isys->adev->dev, "Timer base values may not be accurate.\n"); + return 0; +} + +static int +ipu_isys_csi2_configure_tunit(struct ipu_isys_csi2 *csi2, bool enable) +{ + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + void __iomem *tunit_base = isys_base + TRACE_REG_IS_TRACE_UNIT_BASE; + int i, ret = 0; + + mutex_lock(&isys->short_packet_tracing_mutex); + if (!enable) { + isys->short_packet_tracing_count--; + if (isys->short_packet_tracing_count == 0) + writel(0, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + goto out_release_mutex; + } + + isys->short_packet_tracing_count++; + if (isys->short_packet_tracing_count > 1) + goto out_release_mutex; + + memset(isys->short_packet_trace_buffer, 0, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE); + dma_sync_single_for_device(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + /* ring buffer base */ + writel(isys->short_packet_trace_buffer_dma_addr, + tunit_base + TRACE_REG_TUN_DRAM_BASE_ADDR); + + /* ring buffer end */ + writel(isys->short_packet_trace_buffer_dma_addr + + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE - + IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE, + tunit_base + TRACE_REG_TUN_DRAM_END_ADDR); + + /* Infobits for ddr trace */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + tunit_base + TRACE_REG_TUN_DDR_INFO_VAL); + + /* Remove reset from trace timers */ + writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, + isys_base + TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N); + + /* Reset CSI2 monitors */ + writel(1, isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_RESET_REG_IDX); + writel(1, isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_RESET_REG_IDX); + + /* Set trace address register. */ + writel(TRACE_REG_CSI2_TM_TRACE_ADDRESS_VAL, + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX); + writel(TRACE_REG_CSI2_TM_TRACE_HEADER_VAL, + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX); + writel(TRACE_REG_CSI2_3PH_TM_TRACE_ADDRESS_VAL, + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_ADDRESS_REG_IDX); + writel(TRACE_REG_CSI2_TM_TRACE_HEADER_VAL, + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_HEADER_REG_IDX); + + /* Enable DDR trace. */ + writel(1, tunit_base + TRACE_REG_TUN_DDR_ENABLE); + + /* Enable trace for CSI2 port. */ + for (i = 0; i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS + + IPU_ISYS_MAX_CSI2_COMBO_PORTS; i++) { + void __iomem *event_mask_reg = + (i < IPU_ISYS_MAX_CSI2_LEGACY_PORTS) ? + isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_TRACE_DDR_EN_REG_IDX_P(i) : + isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_3PH_TM_TRACE_DDR_EN_REG_IDX_P(i); + + writel(IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK, + event_mask_reg); + } + + /* Enable CSI2 receiver monitor */ + writel(1, isys->pdata->base + TRACE_REG_CSI2_TM_BASE + + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + writel(1, isys->pdata->base + TRACE_REG_CSI2_3PH_TM_BASE + + TRACE_REG_CSI2_TM_OVERALL_ENABLE_REG_IDX); + + ret = update_timer_base(isys); + +out_release_mutex: + mutex_unlock(&isys->short_packet_tracing_mutex); + + return ret; +} + +int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, + struct ipu_isys_csi2_timing timing, + unsigned int nlanes, int enable) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe, + struct ipu_isys_pipeline, + pipe); + unsigned int i; + int rval; + u32 val, csi2part = 0, csi2csirx; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); + + if (!enable) { + ipu_isys_csi2_error(csi2); + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val &= ~(CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11); + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(0, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* Disable interrupts */ + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2S2M_IRQ_ENABLE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + if (ip->interlaced) + ipu_isys_csi2_configure_tunit(csi2, 0); + return 0; + } + + csi2_ev_correction_params(csi2, nlanes); + + writel(timing.ctermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE); + writel(timing.csettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE); + + for (i = 0; i < nlanes; i++) { + writel(timing.dtermen, + csi2->base + + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(i)); + writel(timing.dsettle, + csi2->base + + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(i)); + } + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val |= CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11; + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(nlanes, csi2->base + CSI2_REG_CSI_RX_NOF_ENABLED_LANES); + writel(CSI2_CSI_RX_ENABLE_ENABLE, + csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* SOF/EOF of VC0-VC3 enabled from CSI2PART register in B0 */ + for (i = 0; i < NR_OF_CSI2_VC; i++) + csi2part |= CSI2_IRQ_FS_VC(i) | CSI2_IRQ_FE_VC(i); + + /* Enable csi2 receiver error interrupts */ + csi2csirx = BIT(CSI2_CSIRX_NUM_ERRORS) - 1; + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSIRX_IRQ_LEVEL_NOT_PULSE); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_CLEAR); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_MASK); + writel(csi2csirx, csi2->base + CSI2_REG_CSIRX_IRQ_ENABLE); + + /* Enable csi2 error and SOF-related irqs */ + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_EDGE); + writel(0, csi2->base + CSI2_REG_CSI2PART_IRQ_LEVEL_NOT_PULSE); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_MASK); + writel(csi2part, csi2->base + CSI2_REG_CSI2PART_IRQ_ENABLE); + if (ip->interlaced) { + writel(CSI2_RX_SYNC_COUNTER_EXTERNAL, + csi2->base + CSI2_REG_CSI_RX_SYNC_COUNTER_SEL); + rval = ipu_isys_csi2_configure_tunit(csi2, 1); + if (rval) + return rval; + } + + return 0; +} + +void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2) +{ + u32 status = readl(csi2->base + CSI2_REG_CSI2PART_IRQ_STATUS); + unsigned int i; + + writel(status, csi2->base + CSI2_REG_CSI2PART_IRQ_CLEAR); + + if (status & CSI2_CSI2PART_IRQ_CSIRX) + ipu_isys_register_errors(csi2); + + for (i = 0; i < NR_OF_CSI2_VC; i++) { + if ((status & CSI2_IRQ_FS_VC(i))) + ipu_isys_csi2_sof_event(csi2, i); + + if ((status & CSI2_IRQ_FE_VC(i))) + ipu_isys_csi2_eof_event(csi2, i); + } +} + +static u64 tsc_time_to_tunit_time(struct ipu_isys *isys, + u64 tsc_base, u64 tunit_base, u64 tsc_time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 100000; + u64 tsc_clk = IPU_BUTTRESS_TSC_CLK / 100000; + u64 tunit_time; + + tunit_time = (tsc_time - tsc_base) * isys_clk; + do_div(tunit_time, tsc_clk); + + return tunit_time + tunit_base; +} + +/* Extract the timestamp from trace message. + * The timestamp in the traces message contains two parts. + * The lower part contains bit0 ~ 15 of the total 64bit timestamp. + * The higher part contains bit14 ~ 63 of the 64bit timestamp. + * These two parts are sampled at different time. + * Two overlaped bits are used to identify if there's roll overs + * in the lower part during the two samples. + * If the two overlapped bits do not match, a fix is needed to + * handle the roll over. + */ +static u64 +extract_time_from_short_packet_msg(struct ipu_isys_csi2_monitor_message *msg) +{ + u64 time_h = msg->timestamp_h << 14; + u64 time_l = msg->timestamp_l; + u64 time_h_ovl = time_h & 0xc000; + u64 time_h_h = time_h & (~0xffff); + + /* Fix possible roll overs. */ + if (time_h_ovl >= (time_l & 0xc000)) + return time_h_h | time_l; + else + return (time_h_h - 0x10000) | time_l; +} + +unsigned int +ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, + unsigned int *timestamp) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys *isys = av->isys; + unsigned int field = V4L2_FIELD_TOP; + + /* + * Find the nearest message that has matched msg type, + * port id, virtual channel and packet type. + */ + unsigned int i = ip->short_packet_trace_index; + bool msg_matched = false; + unsigned int monitor_id; + + if (ip->csi2->index >= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + monitor_id = TRACE_REG_CSI2_3PH_TM_MONITOR_ID; + else + monitor_id = TRACE_REG_CSI2_TM_MONITOR_ID; + + dma_sync_single_for_cpu(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + do { + struct ipu_isys_csi2_monitor_message msg = + isys->short_packet_trace_buffer[i]; + u64 sof_time = tsc_time_to_tunit_time(isys, + isys->tsc_timer_base, + isys->tunit_timer_base, + (((u64) timestamp[1]) << + 32) | timestamp[0]); + u64 trace_time = extract_time_from_short_packet_msg(&msg); + u64 delta_time_us = tunit_time_to_us(isys, + (sof_time > trace_time) ? + sof_time - trace_time : + trace_time - sof_time); + + i = (i + 1) % IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER; + + if (msg.cmd == TRACE_REG_CMD_TYPE_D64MTS && + msg.monitor_id == monitor_id && + msg.fs == 1 && + msg.port == ip->csi2->index && + msg.vc == ip->vc && + delta_time_us < IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT) { + field = (msg.sequence % 2) ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; + ip->short_packet_trace_index = i; + msg_matched = true; + dev_dbg(&isys->adev->dev, + "Interlaced field ready. field = %d\n", field); + break; + } + } while (i != ip->short_packet_trace_index); + if (!msg_matched) + /* We have walked through the whole buffer. */ + dev_dbg(&isys->adev->dev, "No matched trace message found.\n"); + return field; +} + +bool ipu_isys_csi2_skew_cal_required(struct ipu_isys_csi2 *csi2) +{ + __s64 link_freq; + int rval; + + if (!csi2) + return false; + + /* Not yet ? */ + if (csi2->remote_streams != csi2->stream_count) + return false; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return false; + + if (link_freq <= IPU_SKEW_CAL_LIMIT_HZ) + return false; + + return true; +} + +int ipu_isys_csi2_set_skew_cal(struct ipu_isys_csi2 *csi2, int enable) +{ + u32 val; + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + + if (enable) + val |= CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + else + val &= ~CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys-isa.c b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.c new file mode 100644 index 000000000000..e5af62af6622 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.c @@ -0,0 +1,1060 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ipu.h" +#include "ipu-bus.h" +#include "ipu-isys.h" +#include "ipu4-isys-isa.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +static const u32 isa_supported_codes_pad_sink[] = { + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +/* Regardless of the input mode ISA always produces 16 bit output */ +static const u32 isa_supported_codes_pad_source[] = { + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + 0, +}; + +/* ISA configuration */ +struct ipu_isys_pixelformat isa_config_pfmts[] = { + {V4L2_FMT_IPU_ISA_CFG, 8, 8, 0, MEDIA_BUS_FMT_FIXED, 0}, + {}, +}; + +static const u32 isa_supported_codes_pad_cfg[] = { + MEDIA_BUS_FMT_FIXED, + 0, +}; + +static const u32 isa_supported_codes_pad_3a[] = { + MEDIA_BUS_FMT_FIXED, + 0, +}; + +static const u32 isa_supported_codes_pad_source_scaled[] = { + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_YUYV12_1X24, + 0, +}; + +static const u32 *isa_supported_codes[] = { + isa_supported_codes_pad_sink, + isa_supported_codes_pad_source, + isa_supported_codes_pad_cfg, + isa_supported_codes_pad_3a, + isa_supported_codes_pad_source_scaled, +}; + +static struct v4l2_subdev_internal_ops isa_sd_internal_ops = { + .open = ipu_isys_subdev_open, + .close = ipu_isys_subdev_close, +}; + +static int isa_config_vidioc_g_fmt_vid_out_mplane(struct file *file, void *fh, + struct v4l2_format *fmt) +{ + struct ipu_isys_video *av = video_drvdata(file); + + fmt->fmt.pix_mp = av->mpix; + + return 0; +} + +static const struct ipu_isys_pixelformat * +isa_config_try_fmt_vid_out_mplane(struct ipu_isys_video *av, + struct v4l2_pix_format_mplane *mpix) +{ + const struct ipu_isys_pixelformat *pfmt = + ipu_isys_get_pixelformat(av, mpix->pixelformat); + + if (!pfmt) + return NULL; + mpix->pixelformat = pfmt->pixelformat; + mpix->num_planes = ISA_CFG_BUF_PLANES; + + mpix->plane_fmt[ISA_CFG_BUF_PLANE_PG].bytesperline = 0; + mpix->plane_fmt[ISA_CFG_BUF_PLANE_PG].sizeimage = + ALIGN(max_t(u32, sizeof(struct ia_css_process_group_light), + mpix->plane_fmt[ISA_CFG_BUF_PLANE_PG].sizeimage), + av->isys->line_align); + + mpix->plane_fmt[ISA_CFG_BUF_PLANE_DATA].bytesperline = 0; + mpix->plane_fmt[ISA_CFG_BUF_PLANE_DATA].sizeimage = + ALIGN(max(1U, + mpix->plane_fmt[ISA_CFG_BUF_PLANE_DATA].sizeimage), + av->isys->line_align); + + return pfmt; +} + +static int isa_config_vidioc_s_fmt_vid_out_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + if (av->aq.vbq.streaming) + return -EBUSY; + + av->pfmt = isa_config_try_fmt_vid_out_mplane(av, &f->fmt.pix_mp); + av->mpix = f->fmt.pix_mp; + + return 0; +} + +static int isa_config_vidioc_try_fmt_vid_out_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct ipu_isys_video *av = video_drvdata(file); + + isa_config_try_fmt_vid_out_mplane(av, &f->fmt.pix_mp); + return 0; +} + +static const struct v4l2_ioctl_ops isa_config_ioctl_ops = { + .vidioc_querycap = ipu_isys_vidioc_querycap, + .vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt, + .vidioc_g_fmt_vid_out_mplane = isa_config_vidioc_g_fmt_vid_out_mplane, + .vidioc_s_fmt_vid_out_mplane = isa_config_vidioc_s_fmt_vid_out_mplane, + .vidioc_try_fmt_vid_out_mplane = + isa_config_vidioc_try_fmt_vid_out_mplane, + .vidioc_g_fmt_vid_cap_mplane = isa_config_vidioc_g_fmt_vid_out_mplane, + .vidioc_s_fmt_vid_cap_mplane = isa_config_vidioc_s_fmt_vid_out_mplane, + .vidioc_try_fmt_vid_cap_mplane = + isa_config_vidioc_try_fmt_vid_out_mplane, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_expbuf = vb2_ioctl_expbuf, +}; + +static const struct v4l2_subdev_core_ops isa_sd_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static int set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ipu_isys_isa *isa = to_ipu_isys_isa(sd); + unsigned int i; + + if (enable) + return 0; + + for (i = 0; i < ISA_CFG_BUF_PLANES; i++) + isa->next_param[i] = NULL; + + return 0; +} + +static const struct v4l2_subdev_video_ops isa_sd_video_ops = { + .s_stream = set_stream, +}; + +static const struct v4l2_subdev_pad_ops isa_sd_pad_ops = { + .link_validate = ipu_isys_subdev_link_validate, + .get_fmt = ipu_isys_subdev_get_ffmt, + .set_fmt = ipu_isys_subdev_set_ffmt, + .get_selection = ipu_isys_subdev_get_sel, + .set_selection = ipu_isys_subdev_set_sel, + .enum_mbus_code = ipu_isys_subdev_enum_mbus_code, +}; + +static struct v4l2_subdev_ops isa_sd_ops = { + .core = &isa_sd_core_ops, + .video = &isa_sd_video_ops, + .pad = &isa_sd_pad_ops, +}; + +static int isa_link_validate(struct media_link *link) +{ + struct ipu_isys_pipeline *ip; + struct media_pipeline *pipe; + + /* Non-video node source */ + if (is_media_entity_v4l2_subdev(link->source->entity)) + return v4l2_subdev_link_validate(link); + + pipe = link->sink->entity->pipe; + ip = to_ipu_isys_pipeline(pipe); + ip->nr_queues++; + + return 0; +} + +static struct media_entity_operations isa_entity_ops = { + .link_validate = isa_link_validate, +}; + +void ipu_isys_isa_cleanup(struct ipu_isys_isa *isa) +{ + v4l2_device_unregister_subdev(&isa->asd.sd); + ipu_isys_subdev_cleanup(&isa->asd); + ipu_isys_video_cleanup(&isa->av_scaled); + ipu_isys_video_cleanup(&isa->av_config); + ipu_isys_video_cleanup(&isa->av_3a); + ipu_isys_video_cleanup(&isa->av); +} + +static void isa_set_ffmt(struct v4l2_subdev *sd, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0) + struct v4l2_subdev_fh *cfg, +#else + struct v4l2_subdev_pad_config *cfg, +#endif + struct v4l2_subdev_format *fmt) +{ + struct v4l2_mbus_framefmt *ffmt = + __ipu_isys_get_ffmt(sd, cfg, fmt->pad, fmt->stream, + fmt->which); + enum ipu_isys_subdev_pixelorder order; + enum isys_subdev_prop_tgt tgt; + + switch (fmt->pad) { + case ISA_PAD_SINK: + fmt->format.field = V4L2_FIELD_NONE; + *ffmt = fmt->format; + tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT; + ipu_isys_subdev_fmt_propagate(sd, cfg, &fmt->format, + NULL, tgt, fmt->pad, fmt->which); + return; + case ISA_PAD_SOURCE: { + struct v4l2_mbus_framefmt *sink_ffmt = + __ipu_isys_get_ffmt(sd, cfg, ISA_PAD_SINK, + fmt->stream, fmt->which); + struct v4l2_rect *r = + __ipu_isys_get_selection(sd, cfg, + V4L2_SEL_TGT_CROP, + ISA_PAD_SOURCE, + fmt->which); + + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->field = sink_ffmt->field; + order = ipu_isys_subdev_get_pixelorder(sink_ffmt->code); + ffmt->code = isa_supported_codes_pad_source[order]; + return; + } + case ISA_PAD_CONFIG: + case ISA_PAD_3A: + ffmt->code = MEDIA_BUS_FMT_FIXED; + ffmt->width = 0; + ffmt->height = 0; + fmt->format = *ffmt; + return; + case ISA_PAD_SOURCE_SCALED: { + struct v4l2_mbus_framefmt *sink_ffmt = + __ipu_isys_get_ffmt(sd, cfg, ISA_PAD_SINK, + fmt->stream, fmt->which); + struct v4l2_rect *r = + __ipu_isys_get_selection(sd, cfg, + V4L2_SEL_TGT_CROP, + ISA_PAD_SOURCE_SCALED, + fmt->which); + + ffmt->width = r->width; + ffmt->height = r->height; + ffmt->field = sink_ffmt->field; + order = ipu_isys_subdev_get_pixelorder(sink_ffmt->code); + ffmt->code = + isa_supported_codes_pad_source_scaled[order]; + if (fmt->format.code == MEDIA_BUS_FMT_YUYV12_1X24) + ffmt->code = MEDIA_BUS_FMT_YUYV12_1X24; + + return; + } + default: + WARN_ON(1); + } +} + +static int isa_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops isa_ctrl_ops = { + .s_ctrl = isa_s_ctrl, +}; + +static void isa_capture_done(struct ipu_isys_pipeline *ip, + struct ipu_fw_isys_resp_info_abi *info) +{ + struct ipu_isys_isa *isa = &ip->isys->isa; + struct ipu_isys_queue *aq = &isa->av_config.aq; + struct ipu_isys_buffer *ib; + unsigned long flags; + + if (WARN_ON_ONCE(list_empty(&aq->active))) + return; + + spin_lock_irqsave(&aq->lock, flags); + ib = list_last_entry(&aq->active, struct ipu_isys_buffer, head); + list_del(&ib->head); + dev_dbg(&ip->isys->adev->dev, "isa cfg: dequeued buffer %p", ib); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu_isys_buf_calc_sequence_time(ib, info); + ipu_isys_queue_buf_done(ib); + + aq = &isa->av_3a.aq; + + if (isa->av_3a.vdev.entity.pipe != isa->av_config.vdev.entity.pipe) { + dev_dbg(&ip->isys->adev->dev, "3a disabled\n"); + return; + } + + if (WARN_ON_ONCE(list_empty(&aq->active))) + return; + + spin_lock_irqsave(&aq->lock, flags); + ib = list_last_entry(&aq->active, struct ipu_isys_buffer, head); + list_del(&ib->head); + dev_dbg(&ip->isys->adev->dev, "isa 3a: dequeued buffer %p", ib); + spin_unlock_irqrestore(&aq->lock, flags); + + ipu_isys_buf_calc_sequence_time(ib, info); + ipu_isys_queue_buf_done(ib); +} + +/* Maximum size of the buffer-specific process group. */ +#define PGL_SIZE PAGE_SIZE + +static int isa_3a_buf_init(struct vb2_buffer *vb) +{ + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + + isa_buf->pgl.pg = kzalloc(PGL_SIZE, GFP_KERNEL); + if (!isa_buf->pgl.pg) + return -ENOMEM; + + return 0; +} + +static void isa_3a_buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + + kfree(isa_buf->pgl.pg); +} + +static int isa_config_buf_init(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + int rval; + + rval = isa_3a_buf_init(vb); + if (rval) + return rval; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + isa_buf->pgl.common_pg = + dma_alloc_attrs(&av->isys->adev->dev, PGL_SIZE << 1, + &isa_buf->pgl.iova, GFP_KERNEL | __GFP_ZERO, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + + dev_dbg(&av->isys->adev->dev, + "buf_init: index %u, cpu addr %p, dma addr %pad\n", +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index, +#else + vb->index, +#endif + isa_buf->pgl.common_pg, &isa_buf->pgl.iova); + + if (!isa_buf->pgl.common_pg) { + isa_3a_buf_cleanup(vb); + return -ENOMEM; + } + + return 0; +} + +static void isa_config_buf_cleanup(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + struct dma_attrs attrs; +#else + unsigned long attrs; +#endif + + dev_dbg(&av->isys->adev->dev, + "buf_cleanup: index %u, cpu addr %p, dma addr %pad\n", +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_buf.index, +#else + vb->index, +#endif + isa_buf->pgl.pg, &isa_buf->pgl.iova); + if (!isa_buf->pgl.pg) + return; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + init_dma_attrs(&attrs); + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); +#else + attrs = DMA_ATTR_NON_CONSISTENT; +#endif + + dma_free_attrs(&av->isys->adev->dev, PGL_SIZE << 1, + isa_buf->pgl.common_pg, isa_buf->pgl.iova, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + &attrs +#else + attrs +#endif + ); + + isa_3a_buf_cleanup(vb); +} + +static void +isa_prepare_firmware_stream_cfg(struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi *cfg) +{ + struct v4l2_rect *r; + unsigned int pad, cropping_location, res_info; + + if (av == &av->isys->isa.av) { + pad = ISA_PAD_SOURCE; + cropping_location = + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED; + res_info = IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED; + } else if (av == &av->isys->isa.av_scaled) { + pad = ISA_PAD_SOURCE_SCALED; + cropping_location = + IPU_FW_ISYS_CROPPING_LOCATION_POST_ISA_SCALED; + res_info = IPU_FW_ISYS_RESOLUTION_INFO_POST_ISA_SCALED; + } else { + WARN_ON(1); + return; + } + + r = __ipu_isys_get_selection(&av->isys->isa.asd.sd, NULL, + V4L2_SEL_TGT_CROP, pad, + V4L2_SUBDEV_FORMAT_ACTIVE); + + cfg->crop[cropping_location].top_offset = r->top; + cfg->crop[cropping_location].left_offset = r->left; + cfg->crop[cropping_location].bottom_offset = r->top + r->height; + cfg->crop[cropping_location].right_offset = r->left + r->width; + + r = __ipu_isys_get_selection(&av->isys->isa.asd.sd, NULL, + V4L2_SEL_TGT_COMPOSE, pad, + V4L2_SUBDEV_FORMAT_ACTIVE); + + cfg->isa_cfg.isa_res[res_info].height = r->height; + cfg->isa_cfg.isa_res[res_info].width = r->width; + ipu_isys_prepare_firmware_stream_cfg_default(av, cfg); +} + +static void +isa_prepare_firmware_stream_cfg_param(struct ipu_isys_video *av, + struct ipu_fw_isys_stream_cfg_data_abi + *cfg) +{ + struct ipu_isys_isa *isa = &av->isys->isa; + struct ipu_isys_pipeline *ip = + to_ipu_isys_pipeline(av->vdev.entity.pipe); + + cfg->isa_cfg.cfg.blc = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_BLC); + cfg->isa_cfg.cfg.lsc = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_LSC); + cfg->isa_cfg.cfg.dpc = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_DPC); + cfg->isa_cfg.cfg.downscaler = + !!(isa->isa_en->val & V4L2_IPU_ISA_EN_SCALER); + cfg->isa_cfg.cfg.awb = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_AWB); + cfg->isa_cfg.cfg.af = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_AF); + cfg->isa_cfg.cfg.ae = !!(isa->isa_en->val & V4L2_IPU_ISA_EN_AE); + + cfg->isa_cfg.cfg.send_irq_stats_ready = 1; + cfg->isa_cfg.cfg.send_resp_stats_ready = 1; + ipu_isys_video_add_capture_done(ip, isa_capture_done); +} + +static bool is_capture_terminal(struct ia_css_terminal *t) +{ + switch (t->terminal_type) { + case IPU_FW_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IPU_FW_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IPU_FW_TERMINAL_TYPE_PARAM_SLICED_OUT: + return true; + default: + return false; + } +} + +/* Return the pointer to the terminal payload's IOVA. */ +static int isa_terminal_get_iova(struct device *dev, struct ia_css_terminal *t, + u32 **iova) +{ + switch (t->terminal_type) { + case IPU_FW_TERMINAL_TYPE_PARAM_CACHED_IN: + case IPU_FW_TERMINAL_TYPE_PARAM_CACHED_OUT:{ + struct ia_css_param_terminal *tpterm = (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + case IPU_FW_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IPU_FW_TERMINAL_TYPE_PARAM_SPATIAL_OUT:{ + struct ia_css_spatial_param_terminal *tpterm = + (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + case IPU_FW_TERMINAL_TYPE_PARAM_SLICED_IN: + case IPU_FW_TERMINAL_TYPE_PARAM_SLICED_OUT:{ + struct ia_css_sliced_param_terminal *tpterm = (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + case IPU_FW_TERMINAL_TYPE_PROGRAM:{ + struct ia_css_program_terminal *tpterm = (void *)t; + + *iova = &tpterm->param_payload.buffer; + break; + } + default: + dev_dbg(dev, "unhandled terminal type %u\n", t->terminal_type); + return -EINVAL; + } + + return 0; +} + +/* + * Validate a process group, and add the IOVA of the data plane to the + * offsets related to the start of the data plane. + */ +static int isa_import_pg(struct vb2_buffer *vb) +{ + void *__pg = vb2_plane_vaddr(vb, ISA_CFG_BUF_PLANE_PG); + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + struct ia_css_process_group_light *pg = isa_buf->pgl.pg; + bool capture = aq->vbq.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + u32 addr = vb2_dma_contig_plane_dma_addr(vb, + ISA_CFG_BUF_PLANE_DATA); + unsigned int i; + + if (!__pg) { + dev_warn(&av->isys->adev->dev, + "virtual mapping of the buffer failed\n"); + return -EINVAL; + } + + if (vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG) > PGL_SIZE) { + dev_dbg(&av->isys->adev->dev, + "too large process group, max %lu\n", PGL_SIZE); + return -EINVAL; + } + + /* + * Copy the light process group to a kernel buffer so that it + * cannot be modified by the user space. + */ + memcpy(pg, __pg, vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG)); + + if (pg->size > vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG)) { + dev_dbg(&av->isys->adev->dev, + "process group size too large (%u bytes, %lu bytes available)\n", + pg->size, vb2_plane_size(vb, ISA_CFG_BUF_PLANE_PG)); + return -EINVAL; + } + + if (!pg->terminal_count) { + dev_dbg(&av->isys->adev->dev, "no terminals defined\n"); + return -EINVAL; + } + + if ((void *)(ia_css_terminal_offsets(pg) + + pg->terminal_count * sizeof(uint16_t)) - (void *)pg + > pg->size) { + dev_dbg(&av->isys->adev->dev, + "terminal offsets do not fit in the buffer\n"); + return -EINVAL; + } + + for (i = 0; i < pg->terminal_count; i++) { + struct ia_css_terminal *t = to_ia_css_terminal(pg, i); + u32 *iova; + int rval; + + if ((void *)t + sizeof(*t) - (void *)pg > pg->size) { + dev_dbg(&av->isys->adev->dev, + "terminal %u does not fit in the buffer\n", i); + return -EINVAL; + } + + dev_dbg(&av->isys->adev->dev, + "terminal: terminal %u, size %u, capture %u / %u\n", + i, t->size, capture, is_capture_terminal(t)); + + if (capture != is_capture_terminal(t)) + continue; + + dev_dbg(&av->isys->adev->dev, "terminal: %u offset %u\n", i, + ia_css_terminal_offsets(pg)[i]); + + rval = isa_terminal_get_iova(&av->isys->adev->dev, t, &iova); + if (rval) + return rval; + + dev_dbg(&av->isys->adev->dev, + "terminal: offset 0x%x, address 0x%8.8x\n", + *iova, (u32) addr + *iova); + + if (addr + *iova < addr) { + dev_dbg(&av->isys->adev->dev, + "address space overflow\n"); + return -EINVAL; + } + + if (*iova > vb2_plane_size(vb, ISA_CFG_BUF_PLANE_DATA)) { + dev_dbg(&av->isys->adev->dev, + "offset outside the buffer\n"); + return -EINVAL; + } + + /* + * Add the IOVA of the data plane to the terminal + * payload's offset. + */ + *iova += addr; + } + + return 0; +} + +static int isa_terminal_buf_prepare(struct vb2_buffer *vb) +{ + struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + unsigned int i; + + for (i = 0; i < ISA_CFG_BUF_PLANES; i++) { + vb2_set_plane_payload(vb, i, av->mpix.plane_fmt[i].sizeimage); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + vb->v4l2_planes[i].data_offset = 0; +#else + vb->planes[i].data_offset = 0; +#endif + } + + return isa_import_pg(vb); +} + +/* + * Count relevant terminals in a light process group and add the + * number of found to the common light process group. + */ +static void +isa_config_count_valid_terminals(struct device *dev, + struct ia_css_process_group_light *cpg, + struct ia_css_process_group_light *pg, + bool capture) +{ + unsigned int i; + + for (i = 0; i < pg->terminal_count; i++) + if (capture == is_capture_terminal(to_ia_css_terminal(pg, i))) + cpg->terminal_count++; +} + +static void +isa_config_prepare_frame_buff_set_one(struct device *dev, + struct ia_css_process_group_light *cpg, + struct ia_css_process_group_light *pg, + dma_addr_t addr, bool capture, + unsigned int *terminal_count) +{ + unsigned int i; + + dev_dbg(dev, "terminal: size %u, count %u, offset %u\n", + pg->size, pg->terminal_count, pg->terminals_offset_offset); + + dev_dbg(dev, "terminal: copying %u terminal offsets to %p from %p\n", + pg->terminal_count, ia_css_terminal_offsets(cpg), + ia_css_terminal_offsets(pg)); + + for (i = 0; i < pg->terminal_count; i++) { + struct ia_css_terminal *t = to_ia_css_terminal(pg, i), *ct; + + dev_dbg(dev, + "terminal: parsing %u, size %u, capture %u / %u\n", + i, t->size, capture, is_capture_terminal(t)); + + if (capture != is_capture_terminal(t)) + continue; + + ia_css_terminal_offsets(cpg)[*terminal_count] = + ia_css_terminal_offset(cpg, *terminal_count); + + dev_dbg(dev, "terminal: %u offset %u\n", *terminal_count, + ia_css_terminal_offsets(cpg)[*terminal_count]); + + ct = to_ia_css_terminal(cpg, *terminal_count); + + dev_dbg(dev, + "terminal: copying terminal %p to %p (%u bytes)\n", + t, ct, t->size); + memcpy(ct, t, t->size); + + (*terminal_count)++; + } +} + +/* + * Move the terminals from a read-only or write-only light process + * group to a common process group. + */ +static void isa_config_prepare_frame_buff_set(struct vb2_buffer *__vb) +{ + struct ipu_isys_queue *aq = + vb2_queue_to_ipu_isys_queue(__vb->vb2_queue); + struct ipu_isys_video *av = ipu_isys_queue_to_video(aq); + struct ipu_isys_isa *isa = &av->isys->isa; + struct vb2_buffer *vb[ISA_PARAM_QUEUES]; + struct ia_css_process_group_light *pg[ISA_PARAM_QUEUES]; + dma_addr_t addr[ISA_PARAM_QUEUES]; + struct ia_css_process_group_light *cpg; + struct ipu_isys_isa_buffer *__isa_buf; + unsigned int terminal_count = 0, i; + bool capture = &av->isys->isa.av_3a.aq == aq; + + dev_dbg(&av->isys->adev->dev, "%s: capture %u\n", av->vdev.name, + capture); + + isa->next_param[capture] = __vb; + + /* Proceed only when both cfg and stats buffers are available. */ + if (!isa->next_param[!capture]) + return; + + /* Obtain common process group light buffer from config buffer */ + __isa_buf = vb2_buffer_to_ipu_isys_isa_buffer( + isa->next_param[ISA_CFG_BUF_PLANE_PG]); + + for (i = 0; i < ISA_PARAM_QUEUES; i++) { + struct ipu_isys_isa_buffer *isa_buf; + + vb[i] = isa->next_param[i]; + isa_buf = vb2_buffer_to_ipu_isys_isa_buffer(vb[i]); + pg[i] = isa_buf->pgl.pg; + addr[i] = vb2_dma_contig_plane_dma_addr(vb[i], + ISA_CFG_BUF_PLANE_DATA); + + dma_sync_single_for_device(&av->isys->adev->dev, + addr[i], vb2_plane_size(vb[i], + ISA_CFG_BUF_PLANE_DATA), + DMA_TO_DEVICE); + + dev_dbg(&av->isys->adev->dev, + "terminal: queue %u, plane 0: vaddr %p, dma_addr %pad program group size %u program group terminals %u\n", + i, pg[i], &addr[i], pg[i]->size, pg[i]->terminal_count); + } + + cpg = __isa_buf->pgl.common_pg; + cpg->terminal_count = 0; + cpg->terminals_offset_offset = sizeof(*cpg); + + if (cpg->size > PGL_SIZE << 1) { + dev_err(&av->isys->adev->dev, + "not enough room for terms, %lu found, %u needed\n", + PGL_SIZE << 1, cpg->size); + return; + } + + for (i = 0; i < ISA_PARAM_QUEUES; i++) + isa_config_count_valid_terminals(&av->isys->adev->dev, + cpg, pg[i], i); + + for (i = 0; i < ISA_PARAM_QUEUES; i++) { + isa_config_prepare_frame_buff_set_one(&av->isys->adev->dev, cpg, + pg[i], addr[i], i, + &terminal_count); + + isa->next_param[i] = NULL; + } + + cpg->size = ia_css_terminal_offset(cpg, cpg->terminal_count); + + dev_dbg(&av->isys->adev->dev, "common pg size 0x%x count %d\n", + cpg->size, cpg->terminal_count); + + dma_sync_single_for_device(&av->isys->adev->dev, __isa_buf->pgl.iova, + PGL_SIZE << 1, DMA_TO_DEVICE); +} + +static void +isa_config_fill_frame_buff_set_pin(struct vb2_buffer *vb, + struct ipu_fw_isys_frame_buff_set_abi *set) +{ + struct ipu_isys_isa_buffer *isa_buf = + vb2_buffer_to_ipu_isys_isa_buffer(vb); + + set->process_group_light.addr = isa_buf->pgl.iova; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + set->process_group_light.param_buf_id = vb->v4l2_buf.index + 1; +#else + set->process_group_light.param_buf_id = vb->index + 1; +#endif +} + +static void isa_ctrl_init(struct v4l2_subdev *sd) +{ + struct ipu_isys_isa *isa = to_ipu_isys_isa(sd); + static const struct v4l2_ctrl_config cfg = { + .ops = &isa_ctrl_ops, + .id = V4L2_CID_IPU_ISA_EN, + .name = "ISA enable", + .type = V4L2_CTRL_TYPE_BITMASK, + .max = V4L2_IPU_ISA_EN_BLC + | V4L2_IPU_ISA_EN_LSC + | V4L2_IPU_ISA_EN_DPC + | V4L2_IPU_ISA_EN_SCALER + | V4L2_IPU_ISA_EN_AWB + | V4L2_IPU_ISA_EN_AF | V4L2_IPU_ISA_EN_AE, + }; + + isa->isa_en = v4l2_ctrl_new_custom(&isa->asd.ctrl_handler, &cfg, NULL); +} + +int ipu_isys_isa_init(struct ipu_isys_isa *isa, + struct ipu_isys *isys, void __iomem *base) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = ISA_PAD_SINK, + .format = { + .width = 4096, + .height = 3072, + }, + }; + struct v4l2_subdev_format fmt_config = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = ISA_PAD_CONFIG, + }; + struct v4l2_subdev_format fmt_3a = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = ISA_PAD_3A, + }; + int rval; + + isa->base = base; + + isa->asd.sd.entity.ops = &isa_entity_ops; + isa->asd.ctrl_init = isa_ctrl_init; + isa->asd.isys = isys; + + rval = ipu_isys_subdev_init(&isa->asd, &isa_sd_ops, 1, + NR_OF_ISA_PADS, + NR_OF_ISA_STREAMS, + NR_OF_ISA_SOURCE_PADS, + NR_OF_ISA_SINK_PADS, + V4L2_SUBDEV_FL_HAS_EVENTS); + if (rval) + goto fail; + + isa->asd.pad[ISA_PAD_SINK].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + isa->asd.pad[ISA_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; + isa->asd.valid_tgts[ISA_PAD_SOURCE].crop = true; + isa->asd.pad[ISA_PAD_CONFIG].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; + isa->asd.pad[ISA_PAD_3A].flags = MEDIA_PAD_FL_SOURCE; + isa->asd.pad[ISA_PAD_SOURCE_SCALED].flags = MEDIA_PAD_FL_SOURCE; + isa->asd.valid_tgts[ISA_PAD_SOURCE_SCALED].compose = true; + isa->asd.valid_tgts[ISA_PAD_SOURCE_SCALED].crop = true; + + isa->asd.isl_mode = IPU_ISL_ISA; + isa->asd.supported_codes = isa_supported_codes; + isa->asd.set_ffmt = isa_set_ffmt; + ipu_isys_subdev_set_ffmt(&isa->asd.sd, NULL, &fmt); + ipu_isys_subdev_set_ffmt(&isa->asd.sd, NULL, &fmt_config); + ipu_isys_subdev_set_ffmt(&isa->asd.sd, NULL, &fmt_3a); + + isa->asd.sd.internal_ops = &isa_sd_internal_ops; + snprintf(isa->asd.sd.name, sizeof(isa->asd.sd.name), + IPU_ISYS_ENTITY_PREFIX " ISA"); + v4l2_set_subdevdata(&isa->asd.sd, &isa->asd); + rval = v4l2_device_register_subdev(&isys->v4l2_dev, &isa->asd.sd); + if (rval) { + dev_info(&isys->adev->dev, "can't register v4l2 subdev\n"); + goto fail; + } + + snprintf(isa->av.vdev.name, sizeof(isa->av.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA capture"); + isa->av.isys = isys; + isa->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS; + isa->av.pfmts = ipu_isys_pfmts; + isa->av.try_fmt_vid_mplane = ipu_isys_video_try_fmt_vid_mplane_default; + isa->av.prepare_firmware_stream_cfg = isa_prepare_firmware_stream_cfg; + isa->av.aq.buf_prepare = ipu_isys_buf_prepare; + isa->av.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + isa->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&isa->av, &isa->asd.sd.entity, + ISA_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + snprintf(isa->av_config.vdev.name, sizeof(isa->av_config.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA config"); + isa->av_config.isys = isys; + isa->av_config.pfmts = isa_config_pfmts; + isa->av_config.try_fmt_vid_mplane = isa_config_try_fmt_vid_out_mplane; + isa->av_config.prepare_firmware_stream_cfg = + isa_prepare_firmware_stream_cfg_param; + isa->av_config.vdev.ioctl_ops = &isa_config_ioctl_ops; + isa->av_config.aq.buf_init = isa_config_buf_init; + isa->av_config.aq.buf_cleanup = isa_config_buf_cleanup; + isa->av_config.aq.buf_prepare = isa_terminal_buf_prepare; + isa->av_config.aq.prepare_frame_buff_set = + isa_config_prepare_frame_buff_set; + isa->av_config.aq.fill_frame_buff_set_pin = + isa_config_fill_frame_buff_set_pin; + isa->av_config.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av_config.aq.vbq.io_modes = VB2_MMAP | VB2_DMABUF; + isa->av_config.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_isa_buffer); + + rval = ipu_isys_video_init(&isa->av_config, &isa->asd.sd.entity, + ISA_PAD_CONFIG, MEDIA_PAD_FL_SOURCE, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + snprintf(isa->av_3a.vdev.name, sizeof(isa->av_3a.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA 3A stats"); + isa->av_3a.isys = isys; + isa->av_3a.pfmts = isa_config_pfmts; + isa->av_3a.try_fmt_vid_mplane = isa_config_try_fmt_vid_out_mplane; + isa->av_3a.prepare_firmware_stream_cfg = + isa_prepare_firmware_stream_cfg_param; + isa->av_3a.vdev.ioctl_ops = &isa_config_ioctl_ops; + isa->av_3a.aq.buf_init = isa_3a_buf_init; + isa->av_3a.aq.buf_cleanup = isa_3a_buf_cleanup; + isa->av_3a.aq.buf_prepare = isa_terminal_buf_prepare; + isa->av_3a.aq.prepare_frame_buff_set = + isa_config_prepare_frame_buff_set; + isa->av_3a.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av_3a.aq.vbq.io_modes = VB2_MMAP | VB2_DMABUF; + isa->av_3a.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_isa_buffer); + isa->av_3a.line_header_length = 4; /* Set to non-zero to force mplane*/ + + rval = ipu_isys_video_init(&isa->av_3a, &isa->asd.sd.entity, + ISA_PAD_3A, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + snprintf(isa->av_scaled.vdev.name, sizeof(isa->av_scaled.vdev.name), + IPU_ISYS_ENTITY_PREFIX " ISA scaled capture"); + isa->av_scaled.isys = isys; + isa->av_scaled.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_S; + isa->av_scaled.pfmts = isa->av.pfmts; + isa->av_scaled.try_fmt_vid_mplane = + ipu_isys_video_try_fmt_vid_mplane_default; + isa->av_scaled.prepare_firmware_stream_cfg = + isa_prepare_firmware_stream_cfg; + isa->av_scaled.aq.buf_prepare = ipu_isys_buf_prepare; + isa->av_scaled.aq.fill_frame_buff_set_pin = + ipu_isys_buffer_list_to_ipu_fw_isys_frame_buff_set_pin; + isa->av_scaled.aq.link_fmt_validate = ipu_isys_link_fmt_validate; + isa->av_scaled.aq.vbq.buf_struct_size = + sizeof(struct ipu_isys_video_buffer); + + rval = ipu_isys_video_init(&isa->av_scaled, &isa->asd.sd.entity, + ISA_PAD_SOURCE_SCALED, MEDIA_PAD_FL_SINK, 0); + if (rval) { + dev_info(&isys->adev->dev, "can't init video node\n"); + goto fail; + } + + return 0; + +fail: + ipu_isys_isa_cleanup(isa); + + return rval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys-isa.h b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.h new file mode 100644 index 000000000000..649714dca2f4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys-isa.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2014 - 2018 Intel Corporation */ + +#ifndef IPU_ISYS_ISA_H +#define IPU_ISYS_ISA_H + +#include +#include + +#include "ipu-isys-queue.h" +#include "ipu-isys-subdev.h" +#include "ipu-isys-video.h" + +#define ISA_PAD_SINK 0 +#define ISA_PAD_SOURCE 1 +#define ISA_PAD_CONFIG 2 +#define ISA_PAD_3A 3 +#define ISA_PAD_SOURCE_SCALED 4 + +#define NR_OF_ISA_PADS 5 +#define NR_OF_ISA_SINK_PADS 2 +#define NR_OF_ISA_SOURCE_PADS 3 +#define NR_OF_ISA_STREAMS 1 + +struct ipu_isys; +struct ia_css_process_group_light; + +/* + * struct ipu_isa_buffer + * + * @ivb: Base buffer type which provides inheritance of + * isys buffer and vb2 buffer. + * @pgl: program group light DMA buffer + * @pgl.pg: process group, copy of the buffer's plane 0 + * but not mapped to user space + * @pgl.common_pg: A combined process group from both video buffers + * @pgl.iova: IOVA of common_pg + */ +struct ipu_isys_isa_buffer { + struct ipu_isys_video_buffer ivb; + struct { + struct ia_css_process_group_light *pg; + struct ia_css_process_group_light *common_pg; + dma_addr_t iova; + } pgl; +}; + +/* ISA CFG will use multiplanar buffers */ +#define ISA_CFG_BUF_PLANE_PG 0 +#define ISA_CFG_BUF_PLANE_DATA 1 +#define ISA_CFG_BUF_PLANES 2 + +#define ISA_PARAM_QUEUES 2 + +/* + * struct ipu_isys_isa + */ +struct ipu_isys_isa { + struct ipu_isys_subdev asd; + struct ipu_isys_video av; + struct ipu_isys_video av_config; + struct ipu_isys_video av_3a; + struct ipu_isys_video av_scaled; + + void __iomem *base; + + struct v4l2_ctrl *isa_en; + + struct vb2_buffer *next_param[ISA_PARAM_QUEUES]; /* config and 3a */ +}; + +#define to_ipu_isys_isa(sd) \ + container_of(to_ipu_isys_subdev(sd), \ + struct ipu_isys_isa, asd) + +#define vb2_buffer_to_ipu_isys_isa_buffer(__vb) \ + container_of(vb2_buffer_to_ipu_isys_video_buffer(__vb), \ + struct ipu_isys_isa_buffer, ivb) + +int ipu_isys_isa_init(struct ipu_isys_isa *isa, + struct ipu_isys *isys, void __iomem *base); +void ipu_isys_isa_cleanup(struct ipu_isys_isa *isa); +void ipu_isys_isa_isr(struct ipu_isys_isa *isa); + +#endif /* IPU_ISYS_ISA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4-isys.c b/drivers/media/pci/intel/ipu4/ipu4-isys.c new file mode 100644 index 000000000000..30cc42db49c7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-isys.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include + +#include "ipu.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-buttress-regs.h" +#include "ipu-trace.h" +#include "ipu-isys.h" +#include "ipu-isys-video.h" +#include "ipu-isys-tpg.h" + +#ifndef V4L2_PIX_FMT_SBGGR14V32 +/* + * Non-vectorized 14bit definitions have been upstreamed. + * To keep various versions of the ipu4 builds compileable use local + * definitions when global one's doesn't exists. + */ +#define V4L2_PIX_FMT_SBGGR14V32 v4l2_fourcc('b', 'V', '0', 'M') +#define V4L2_PIX_FMT_SGBRG14V32 v4l2_fourcc('b', 'V', '0', 'N') +#define V4L2_PIX_FMT_SGRBG14V32 v4l2_fourcc('b', 'V', '0', 'O') +#define V4L2_PIX_FMT_SRGGB14V32 v4l2_fourcc('b', 'V', '0', 'P') +#endif + +const struct ipu_isys_pixelformat ipu_isys_pfmts[] = { + /* YUV vector format */ + {V4L2_PIX_FMT_YUYV420_V32, 24, 24, 0, MEDIA_BUS_FMT_YUYV12_1X24, + IPU_FW_ISYS_FRAME_FORMAT_YUV420_16}, + /* Raw bayer vector formats. */ + {V4L2_PIX_FMT_SBGGR14V32, 16, 14, 0, MEDIA_BUS_FMT_SBGGR14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG14V32, 16, 14, 0, MEDIA_BUS_FMT_SGBRG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG14V32, 16, 14, 0, MEDIA_BUS_FMT_SGRBG14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB14V32, 16, 14, 0, MEDIA_BUS_FMT_SRGGB14_1X14, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR12V32, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG12V32, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG12V32, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB12V32, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR10V32, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG10V32, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG10V32, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB10V32, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SBGGR8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGBRG8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SGRBG8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_PIX_FMT_SRGGB8_16V32, 16, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8, + IPU_FW_ISYS_FRAME_FORMAT_RAW16}, + {V4L2_FMT_IPU_ISYS_META, 8, 8, 0, MEDIA_BUS_FMT_FIXED, + IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED}, + {} +}; + +struct ipu_trace_block isys_trace_blocks[] = { + { + .offset = TRACE_REG_IS_TRACE_UNIT_BASE, + .type = IPU_TRACE_BLOCK_TUN, + }, + { + .offset = TRACE_REG_IS_SP_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_IS_SP_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_ISL_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_IS_MMU_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_CSI2_TM_BASE, + .type = IPU_TRACE_CSI2, + }, + { + .offset = TRACE_REG_CSI2_3PH_TM_BASE, + .type = IPU_TRACE_CSI2_3PH, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + /* Note! this covers all 9 blocks */ + .offset = TRACE_REG_CSI2_PH3_SIG2SIO_GR_BASE(0), + .type = IPU_TRACE_SIG2CIOS, + }, + { + .offset = TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N, + .type = IPU_TRACE_TIMER_RST, + }, + { + .type = IPU_TRACE_BLOCK_END, + } +}; + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +void isys_setup_hw(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold; + u32 irqs; + unsigned int i; + + /* Enable irqs for all MIPI busses */ + irqs = IPU_ISYS_UNISPART_IRQ_CSI2(0) | + IPU_ISYS_UNISPART_IRQ_CSI2(1) | + IPU_ISYS_UNISPART_IRQ_CSI2(2) | + IPU_ISYS_UNISPART_IRQ_CSI2(3) | + IPU_ISYS_UNISPART_IRQ_CSI2(4) | IPU_ISYS_UNISPART_IRQ_CSI2(5); + + irqs |= IPU_ISYS_UNISPART_IRQ_SW; + + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK); + writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE); + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); + + /* Write CDC FIFO threshold values for isys */ + for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(thd[i], base + IPU_REG_ISYS_CDC_THRESHOLD(i)); +} +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +static void ipu4p_isys_irq_cfg(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + int i, j; + struct { + u32 base; + u32 mask; + } irq_config[] = { + {IPU_REG_ISYS_UNISPART_IRQ_EDGE, 0x400018}, + {IPU_REG_ISYS_ISA_ACC_IRQ_CTRL_BASE, 0x0}, + {IPU_REG_ISYS_A_IRQ_CTRL_BASE, 0x0}, + {IPU_REG_ISYS_SIP0_IRQ_CTRL_BASE, 0xf}, + {IPU_REG_ISYS_SIP1_IRQ_CTRL_BASE, 0xf}, + }; + unsigned int offsets[4] = { + 0x0, 0x4, 0x10, 0x14 + }; + + for (i = 0; i < ARRAY_SIZE(irq_config); i++) { + for (j = 0; j < ARRAY_SIZE(offsets); j++) + writel(irq_config[i].mask, + base + irq_config[i].base + offsets[j]); + writel(0xffffffff, base + irq_config[i].base + 0xc); + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG); +} + +static void ipu4p_isys_bb_cfg(struct ipu_isys *isys) +{ + void __iomem *isp_base = isys->adev->isp->base; + unsigned int i, val; + unsigned int bbconfig[4][3] = { + {4, 15, 0xf}, + {6, 15, 0x15}, + {12, 15, 0xf}, + {14, 15, 0x15}, + }; + + /* Config building block */ + for (i = 0; i < 4; i++) { + unsigned int bb = bbconfig[i][0]; + unsigned int crc = bbconfig[i][1]; + unsigned int afe = bbconfig[i][2]; + + val = readl(isp_base + BUTTRESS_REG_CPHYX_DLL_OVRD(bb)); + val &= ~0x7e; + val |= crc << 1; + val |= 1; + writel(val, isp_base + BUTTRESS_REG_CPHYX_DLL_OVRD(bb)); + val = readl(isp_base + BUTTRESS_REG_DPHYX_DLL_OVRD(bb)); + val |= 1; + writel(val, isp_base + BUTTRESS_REG_DPHYX_DLL_OVRD(bb)); + val &= ~1; + writel(val, isp_base + BUTTRESS_REG_DPHYX_DLL_OVRD(bb)); + val = afe | (2 << 29); + writel(val, isp_base + BUTTRESS_REG_BBX_AFE_CONFIG(bb)); + } +} + +static void ipu4p_isys_port_cfg(struct ipu_isys *isys) +{ + void __iomem *base = isys->pdata->base; + void __iomem *isp_base = isys->adev->isp->base; + + /* Port config */ + writel(0x3895, base + IPU_GPOFFSET + 0x14); + writel(0x3895, base + IPU_COMBO_GPOFFSET + 0x14); + writel((0x100 << 1) | (0x100 << 10) | (0x100 << 19), isp_base + + BUTTRESS_REG_CSI_BSCAN_EXCLUDE); +} + +void isys_setup_hw(struct ipu_isys *isys) +{ + ipu4p_isys_irq_cfg(isys); + ipu4p_isys_port_cfg(isys); + ipu4p_isys_bb_cfg(isys); +} +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +irqreturn_t isys_isr(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + status = readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + do { + writel(status, isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + + if (isys->isr_csi2_bits & status) { + unsigned int i; + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + if (IPU_ISYS_UNISPART_IRQ_CSI2(i) & status) + ipu_isys_csi2_isr(&isys->csi2[i]); + } + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + + /* + * Handle a single FW event per checking the CSI-2 + * receiver SOF status. This is done in order to avoid + * the case where events arrive to the event queue and + * one of them is a SOF event which then could be + * handled before the SOF interrupt. This would pose + * issues in sequence numbering which is based on SOF + * interrupts, always assumed to arrive before FW SOF + * events. + */ + if (status & IPU_ISYS_UNISPART_IRQ_SW && !isys_isr_one(adev)) + status = IPU_ISYS_UNISPART_IRQ_SW; + else + status = 0; + + status |= readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + } while (status & (isys->isr_csi2_bits + | IPU_ISYS_UNISPART_IRQ_SW) && + !isys->adev->isp->flr_done); + spin_unlock(&isys->power_lock); + + return IRQ_HANDLED; +} +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4P +irqreturn_t isys_isr(struct ipu_bus_device *adev) +{ + struct ipu_isys *isys = ipu_bus_get_drvdata(adev); + void __iomem *base = isys->pdata->base; + u32 status; + unsigned int i; + u32 sip0_status, sip1_status; + struct { + u32 *status; + u32 mask; + } csi2_irq_mask[] = { + {&sip0_status, IPU_ISYS_CSI2_D_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_A_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_B_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_C_IRQ_MASK}, + {&sip1_status, IPU_ISYS_CSI2_D_IRQ_MASK}, + }; + + spin_lock(&isys->power_lock); + if (!isys->power) { + spin_unlock(&isys->power_lock); + return IRQ_NONE; + } + + /* read unis sw irq */ + status = readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + dev_dbg(&adev->dev, "isys irq status - unis sw irq = 0x%x", status); + + do { + /* clear unis sw irqs */ + writel(status, isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_CLEAR); + + /* read and clear sip irq status */ + sip0_status = readl(isys->pdata->base + + IPU_REG_ISYS_SIP0_IRQ_CTRL_STATUS); + sip1_status = readl(isys->pdata->base + + IPU_REG_ISYS_SIP1_IRQ_CTRL_STATUS); + dev_dbg(&adev->dev, "isys irq status - sip0 = 0x%x sip1 = 0x%x", + sip0_status, sip1_status); + writel(sip0_status, isys->pdata->base + + IPU_REG_ISYS_SIP0_IRQ_CTRL_CLEAR); + writel(sip1_status, isys->pdata->base + + IPU_REG_ISYS_SIP1_IRQ_CTRL_CLEAR); + + for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { + if (*csi2_irq_mask[i].status & csi2_irq_mask[i].mask) + ipu_isys_csi2_isr(&isys->csi2[i]); + } + + writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG); + + /* + * Handle a single FW event per checking the CSI-2 + * receiver SOF status. This is done in order to avoid + * the case where events arrive to the event queue and + * one of them is a SOF event which then could be + * handled before the SOF interrupt. This would pose + * issues in sequence numbering which is based on SOF + * interrupts, always assumed to arrive before FW SOF + * events. + */ + if (status & IPU_ISYS_UNISPART_IRQ_SW && !isys_isr_one(adev)) + status = IPU_ISYS_UNISPART_IRQ_SW; + else + status = 0; + + status |= readl(isys->pdata->base + + IPU_REG_ISYS_UNISPART_IRQ_STATUS); + } while (status & (isys->isr_csi2_bits + | IPU_ISYS_UNISPART_IRQ_SW) && + !isys->adev->isp->flr_done); + spin_unlock(&isys->power_lock); + + return IRQ_HANDLED; +} +#endif + +int tpg_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd); + __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE][0].code; + unsigned int bpp = ipu_isys_mbus_code_to_bpp(code); + + /* + * MIPI_GEN block is CSI2 FB. Need to enable/disable TPG selection + * register to control the TPG streaming. + */ + if (tpg->sel) + writel(enable ? 1 : 0, tpg->sel); + + if (!enable) { + writel(0, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; + } + + writel(MIPI_GEN_COM_DTYPE_RAW(bpp), + tpg->base + MIPI_GEN_REG_COM_DTYPE); + writel(ipu_isys_mbus_code_to_mipi(code), + tpg->base + MIPI_GEN_REG_COM_VTYPE); + writel(0, tpg->base + MIPI_GEN_REG_COM_VCHAN); + + writel(0, tpg->base + MIPI_GEN_REG_SYNG_NOF_FRAMES); + + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE][0].width * + bpp, BITS_PER_BYTE), + tpg->base + MIPI_GEN_REG_COM_WCOUNT); + writel(DIV_ROUND_UP(tpg->asd.ffmt[TPG_PAD_SOURCE][0].width, + MIPI_GEN_PPC), + tpg->base + MIPI_GEN_REG_SYNG_NOF_PIXELS); + writel(tpg->asd.ffmt[TPG_PAD_SOURCE][0].height, + tpg->base + MIPI_GEN_REG_SYNG_NOF_LINES); + + writel(0, tpg->base + MIPI_GEN_REG_TPG_MODE); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_HCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_VCNT_MASK); + writel(-1, tpg->base + MIPI_GEN_REG_TPG_XYCNT_MASK); + writel(0, tpg->base + MIPI_GEN_REG_TPG_HCNT_DELTA); + writel(0, tpg->base + MIPI_GEN_REG_TPG_VCNT_DELTA); + + v4l2_ctrl_handler_setup(&tpg->asd.ctrl_handler); + + writel(2, tpg->base + MIPI_GEN_REG_COM_ENABLE); + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-psys.c b/drivers/media/pci/intel/ipu4/ipu4-psys.c new file mode 100644 index 000000000000..82e80d1ccbd3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-psys.c @@ -0,0 +1,988 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) +#include +#else +#include +#endif +#include +#include + +#include "ipu.h" +#include "ipu-psys.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" +#define CREATE_TRACE_POINTS +#define IPU_PG_KCMD_TRACE +#include "ipu-trace-event.h" + +static bool early_pg_transfer; +static bool enable_concurrency = true; +module_param(early_pg_transfer, bool, 0664); +module_param(enable_concurrency, bool, 0664); +MODULE_PARM_DESC(early_pg_transfer, + "Copy PGs back to user after resource allocation"); +MODULE_PARM_DESC(enable_concurrency, + "Enable concurrent execution of program groups"); + +struct ipu_trace_block psys_trace_blocks[] = { + { + .offset = TRACE_REG_PS_TRACE_UNIT_BASE, + .type = IPU_TRACE_BLOCK_TUN, + }, + { + .offset = TRACE_REG_PS_SPC_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_SPP0_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_SPP1_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP0_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP1_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP2_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_ISP3_EVQ_BASE, + .type = IPU_TRACE_BLOCK_TM, + }, + { + .offset = TRACE_REG_PS_SPC_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_SPP0_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_SPP1_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_MMU_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISL_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP0_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP1_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP2_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_ISP3_GPC_BASE, + .type = IPU_TRACE_BLOCK_GPC, + }, + { + .offset = TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N, + .type = IPU_TRACE_TIMER_RST, + }, + { + .type = IPU_TRACE_BLOCK_END, + } +}; + +static void set_sp_info_bits(void *base) +{ + int i; + + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + + for (i = 0; i < 4; i++) + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i)); + for (i = 0; i < 4; i++) + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i)); +} + +static void set_isp_info_bits(void *base) +{ + int i; + + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + + for (i = 0; i < 4; i++) + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + base + IPU_REG_PSYS_INFO_SEG_DATA_MASTER(i)); +} + +void ipu_psys_setup_hw(struct ipu_psys *psys) +{ + void __iomem *base = psys->pdata->base; + void __iomem *spc_regs_base = + base + psys->pdata->ipdata->hw_variant.spc_offset; + void *psys_iommu0_ctrl = base + + psys->pdata->ipdata->hw_variant.mmu_hw[0].offset + + IPU_PSYS_MMU0_CTRL_OFFSET; + const u8 *thd = psys->pdata->ipdata->hw_variant.cdc_fifo_threshold; + u32 irqs; + unsigned int i; + + /* Configure PSYS info bits */ + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, psys_iommu0_ctrl); + + set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); + set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL); + set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP1_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP0_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP1_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP2_STATUS_CTRL); + set_isp_info_bits(spc_regs_base + IPU_PSYS_REG_ISP3_STATUS_CTRL); + + /* Enable FW interrupt #0 */ + writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0)); + irqs = IPU_PSYS_GPDEV_IRQ_FWIRQ(0); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE); + /* + * With pulse setting, driver misses interrupts. IUNIT integration + * HAS(v1.26) suggests to use pulse, but this seem to be error in + * documentation. + */ + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK); + writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE); + + /* Write CDC FIFO threshold values for psys */ + for (i = 0; i < psys->pdata->ipdata->hw_variant.cdc_fifos; i++) + writel(thd[i], base + IPU_REG_PSYS_CDC_THRESHOLD(i)); +} + +/* + * Called to free up all resources associated with a kcmd. + * After this the kcmd doesn't anymore exist in the driver. + */ +void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys *psys; + unsigned long flags; + + if (!kcmd) + return; + + psys = kcmd->fh->psys; + + if (!list_empty(&kcmd->list)) + list_del(&kcmd->list); + + spin_lock_irqsave(&psys->pgs_lock, flags); + if (kcmd->kpg) + kcmd->kpg->pg_size = 0; + spin_unlock_irqrestore(&psys->pgs_lock, flags); + + mutex_lock(&kcmd->fh->bs_mutex); + if (kcmd->kbuf_set) + kcmd->kbuf_set->buf_set_size = 0; + mutex_unlock(&kcmd->fh->bs_mutex); + + kfree(kcmd->pg_manifest); + kfree(kcmd->kbufs); + kfree(kcmd->buffers); + kfree(kcmd); +} + +static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd, + struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd; + struct ipu_psys_kbuffer *kpgbuf; + unsigned int i; + int ret, prevfd = 0; + + if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS) + return NULL; + + if (!cmd->pg_manifest_size || + cmd->pg_manifest_size > KMALLOC_MAX_CACHE_SIZE) + return NULL; + + kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL); + if (!kcmd) + return NULL; + + kcmd->state = KCMD_STATE_NEW; + kcmd->fh = fh; + INIT_LIST_HEAD(&kcmd->list); + INIT_LIST_HEAD(&kcmd->started_list); + + mutex_lock(&fh->mutex); + kpgbuf = ipu_psys_lookup_kbuffer(fh, cmd->pg); + mutex_unlock(&fh->mutex); + if (!kpgbuf || !kpgbuf->sgt) + goto error; + + kcmd->pg_user = kpgbuf->kaddr; + kcmd->kpg = __get_pg_buf(psys, kpgbuf->len); + if (!kcmd->kpg) + goto error; + + memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size); + + kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL); + if (!kcmd->pg_manifest) + goto error; + + ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest, + cmd->pg_manifest_size); + if (ret) + goto error; + + kcmd->pg_manifest_size = cmd->pg_manifest_size; + + kcmd->user_token = cmd->user_token; + kcmd->issue_id = cmd->issue_id; + kcmd->priority = cmd->priority; + if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM) + goto error; + + kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd); + kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers), + GFP_KERNEL); + if (!kcmd->buffers) + goto error; + + kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]), + GFP_KERNEL); + if (!kcmd->kbufs) + goto error; + + + if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount) + goto error; + + ret = copy_from_user(kcmd->buffers, cmd->buffers, + kcmd->nbuffers * sizeof(*kcmd->buffers)); + if (ret) + goto error; + + for (i = 0; i < kcmd->nbuffers; i++) { + struct ipu_fw_psys_terminal *terminal; + + terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); + if (!terminal) + continue; + + + mutex_lock(&fh->mutex); + kcmd->kbufs[i] = ipu_psys_lookup_kbuffer(fh, + kcmd->buffers[i].base.fd); + mutex_unlock(&fh->mutex); + if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt || + kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used) + goto error; + if ((kcmd->kbufs[i]->flags & + IPU_BUFFER_FLAG_NO_FLUSH) || + (kcmd->buffers[i].flags & + IPU_BUFFER_FLAG_NO_FLUSH) || + prevfd == kcmd->buffers[i].base.fd) + continue; + + prevfd = kcmd->buffers[i].base.fd; + dma_sync_sg_for_device(&psys->adev->dev, + kcmd->kbufs[i]->sgt->sgl, + kcmd->kbufs[i]->sgt->orig_nents, + DMA_BIDIRECTIONAL); + } + + + return kcmd; +error: + ipu_psys_kcmd_free(kcmd); + + dev_dbg(&psys->adev->dev, "failed to copy cmd\n"); + + return NULL; +} + +static void ipu_psys_kcmd_run(struct ipu_psys *psys) +{ + struct ipu_psys_kcmd *kcmd = list_first_entry(&psys->started_kcmds_list, + struct ipu_psys_kcmd, + started_list); + int ret; + + ret = ipu_psys_move_resources(&psys->adev->dev, + &kcmd->kpg->resource_alloc, + &psys->resource_pool_started, + &psys->resource_pool_running); + if (!ret) { + psys->started_kcmds--; + psys->active_kcmds++; + kcmd->state = KCMD_STATE_RUNNING; + list_del(&kcmd->started_list); + kcmd->watchdog.expires = jiffies + + msecs_to_jiffies(psys->timeout); + add_timer(&kcmd->watchdog); + return; + } + + if (ret != -ENOSPC || !psys->active_kcmds) { + dev_err(&psys->adev->dev, + "kcmd %p failed to alloc resources (running (%d, psys->active_kcmds = %d))\n", + kcmd, ret, psys->active_kcmds); + ipu_psys_kcmd_abort(psys, kcmd, ret); + return; + } +} + +/* + * Move kcmd into completed state (due to running finished or failure). + * Fill up the event struct and notify waiters. + */ +void ipu_psys_kcmd_complete(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, int error) +{ + struct ipu_psys_fh *fh = kcmd->fh; + + trace_ipu_pg_kcmd(__func__, kcmd->user_token, kcmd->issue_id, + kcmd->priority, + ipu_fw_psys_pg_get_id(kcmd), + ipu_fw_psys_pg_load_cycles(kcmd), + ipu_fw_psys_pg_init_cycles(kcmd), + ipu_fw_psys_pg_processing_cycles(kcmd)); + + switch (kcmd->state) { + case KCMD_STATE_RUNNING: + if (try_to_del_timer_sync(&kcmd->watchdog) < 0) { + dev_err(&psys->adev->dev, + "could not cancel kcmd timer\n"); + return; + } + /* Fall through on purpose */ + case KCMD_STATE_RUN_PREPARED: + ipu_psys_free_resources(&kcmd->kpg->resource_alloc, + &psys->resource_pool_running); + if (psys->started_kcmds) + ipu_psys_kcmd_run(psys); + if (kcmd->state == KCMD_STATE_RUNNING) + psys->active_kcmds--; + break; + case KCMD_STATE_STARTED: + psys->started_kcmds--; + list_del(&kcmd->started_list); + /* Fall through on purpose */ + case KCMD_STATE_START_PREPARED: + ipu_psys_free_resources(&kcmd->kpg->resource_alloc, + &psys->resource_pool_started); + break; + default: + break; + } + + kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE; + kcmd->ev.user_token = kcmd->user_token; + kcmd->ev.issue_id = kcmd->issue_id; + kcmd->ev.error = error; + + if (kcmd->constraint.min_freq) + ipu_buttress_remove_psys_constraint(psys->adev->isp, + &kcmd->constraint); + + if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) { + struct ipu_psys_kbuffer *kbuf; + + kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh, + kcmd->pg_user); + + if (kbuf && kbuf->valid) + memcpy(kcmd->pg_user, + kcmd->kpg->pg, kcmd->kpg->pg_size); + else + dev_dbg(&psys->adev->dev, + "Skipping already unmapped buffer\n"); + } + + if (kcmd->state == KCMD_STATE_RUNNING || + kcmd->state == KCMD_STATE_STARTED) { + pm_runtime_mark_last_busy(&psys->adev->dev); + pm_runtime_put_autosuspend(&psys->adev->dev); + } + + kcmd->state = KCMD_STATE_COMPLETE; + + wake_up_interruptible(&fh->wait); +} + +/* + * Schedule next kcmd by finding a runnable kcmd from the highest + * priority queue in a round-robin fashion versus the client + * queues and running it. + * Any kcmds which fail to start are completed with an error. + */ +void ipu_psys_run_next(struct ipu_psys *psys) +{ + int p; + + /* + * Code below will crash if fhs is empty. Normally this + * shouldn't happen. + */ + if (list_empty(&psys->fhs)) { + WARN_ON(1); + return; + } + + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + int removed; + + do { + struct ipu_psys_fh *fh = list_first_entry(&psys->fhs, + struct + ipu_psys_fh, + list); + struct ipu_psys_fh *fh_last = + list_last_entry(&psys->fhs, + struct ipu_psys_fh, + list); + /* + * When a kcmd is scheduled from a fh, it might expose + * more runnable kcmds behind it in the same queue. + * Therefore loop running kcmds as long as some were + * scheduled. + */ + removed = 0; + do { + struct ipu_psys_fh *fh_next = + list_next_entry(fh, list); + struct ipu_psys_kcmd *kcmd; + int ret; + + mutex_lock(&fh->mutex); + + kcmd = fh->new_kcmd_tail[p]; + /* + * If concurrency is disabled and there are + * already commands running on the PSYS, do not + * run new commands. + */ + if (!enable_concurrency && + psys->active_kcmds > 0) { + mutex_unlock(&fh->mutex); + return; + } + + /* Are there new kcmds available for running? */ + if (!kcmd) + goto next; + + ret = ipu_psys_kcmd_queue(psys, kcmd); + if (ret == -ENOSPC) + goto next; + + /* Update pointer to the first new kcmd */ + fh->new_kcmd_tail[p] = NULL; + while (kcmd != list_last_entry(&fh->kcmds[p], + struct + ipu_psys_kcmd, + list)) { + kcmd = list_next_entry(kcmd, list); + if (kcmd->state == KCMD_STATE_NEW) { + fh->new_kcmd_tail[p] = kcmd; + break; + } + } + + list_move_tail(&fh->list, &psys->fhs); + removed++; +next: + mutex_unlock(&fh->mutex); + if (fh == fh_last) + break; + fh = fh_next; + } while (1); + } while (removed > 0); + } +} + +/* + * Move kcmd into completed state. If kcmd is currently running, + * abort it. + */ +int ipu_psys_kcmd_abort(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, int error) +{ + int ret = 0; + + if (kcmd->state == KCMD_STATE_COMPLETE) + return 0; + + if ((kcmd->state == KCMD_STATE_RUNNING || + kcmd->state == KCMD_STATE_STARTED)) { + ret = ipu_fw_psys_pg_abort(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to abort kcmd!\n"); + goto out; + } + } + +out: + ipu_psys_kcmd_complete(psys, kcmd, ret); + + return ret; +} + +/* + * Submit kcmd into psys queue. If running fails, complete the kcmd + * with an error. + */ +static int ipu_psys_kcmd_start(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd) +{ + /* + * Found a runnable PG. Move queue to the list tail for round-robin + * scheduling and run the PG. Start the watchdog timer if the PG was + * started successfully. Enable PSYS power if requested. + */ + int ret; + + if (psys->adev->isp->flr_done) { + ipu_psys_kcmd_complete(psys, kcmd, -EIO); + return -EIO; + } + + ret = pm_runtime_get_sync(&psys->adev->dev); + if (ret < 0) { + dev_err(&psys->adev->dev, "failed to power on PSYS\n"); + ipu_psys_kcmd_complete(psys, kcmd, -EIO); + pm_runtime_put_noidle(&psys->adev->dev); + return ret; + } + + if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) + memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size); + + ret = ipu_fw_psys_pg_start(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to start kcmd!\n"); + goto error; + } + + ipu_fw_psys_pg_dump(psys, kcmd, "run"); + + /* + * Starting from scci_master_20151228_1800, pg start api is split into + * two different calls, making driver responsible to flush pg between + * start and disown library calls. + */ + clflush_cache_range(kcmd->kpg->pg, kcmd->kpg->pg_size); + ret = ipu_fw_psys_pg_disown(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to start kcmd!\n"); + goto error; + } + + trace_ipu_pg_kcmd(__func__, kcmd->user_token, kcmd->issue_id, + kcmd->priority, + ipu_fw_psys_pg_get_id(kcmd), + ipu_fw_psys_pg_load_cycles(kcmd), + ipu_fw_psys_pg_init_cycles(kcmd), + ipu_fw_psys_pg_processing_cycles(kcmd)); + + switch (kcmd->state) { + case KCMD_STATE_RUN_PREPARED: + kcmd->state = KCMD_STATE_RUNNING; + psys->active_kcmds++; + kcmd->watchdog.expires = jiffies + + msecs_to_jiffies(psys->timeout); + add_timer(&kcmd->watchdog); + break; + case KCMD_STATE_START_PREPARED: + kcmd->state = KCMD_STATE_STARTED; + psys->started_kcmds++; + list_add_tail(&kcmd->started_list, &psys->started_kcmds_list); + break; + default: + WARN_ON(1); + ret = -EINVAL; + goto error; + } + return 0; + +error: + dev_err(&psys->adev->dev, "failed to start process group\n"); + ipu_psys_kcmd_complete(psys, kcmd, -EIO); + return ret; +} + +/* + * Move all kcmds in all queues forcily into completed state. + */ +static void ipu_psys_flush_kcmds(struct ipu_psys *psys, int error) +{ + struct ipu_psys_fh *fh; + struct ipu_psys_kcmd *kcmd; + int p; + + dev_err(&psys->dev, "flushing all commands with error: %d\n", error); + + list_for_each_entry(fh, &psys->fhs, list) { + mutex_lock(&fh->mutex); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + fh->new_kcmd_tail[p] = NULL; + list_for_each_entry(kcmd, &fh->kcmds[p], list) { + if (kcmd->state == KCMD_STATE_COMPLETE) + continue; + ipu_psys_kcmd_complete(psys, kcmd, error); + } + } + mutex_unlock(&fh->mutex); + } +} + +/* + * Abort all currently running process groups and reset PSYS + * by power cycling it. PSYS power must not be acquired + * except by running kcmds when calling this. + */ +static void ipu_psys_reset(struct ipu_psys *psys) +{ +#ifdef CONFIG_PM + struct device *d = &psys->adev->isp->psys_iommu->dev; + int r; + + pm_runtime_dont_use_autosuspend(&psys->adev->dev); + r = pm_runtime_get_sync(d); + if (r < 0) { + pm_runtime_put_noidle(d); + dev_err(&psys->adev->dev, "power management failed\n"); + return; + } + + ipu_psys_flush_kcmds(psys, -EIO); + flush_workqueue(pm_wq); + r = pm_runtime_put_sync(d); /* Turn big red power knob off here */ + /* Power was successfully turned off if and only if zero was returned */ + if (r) + dev_warn(&psys->adev->dev, + "power management failed, PSYS reset may be incomplete\n"); + pm_runtime_use_autosuspend(&psys->adev->dev); + ipu_psys_run_next(psys); +#else + dev_err(&psys->adev->dev, + "power management disabled, can not reset PSYS\n"); +#endif +} + +void ipu_psys_watchdog_work(struct work_struct *work) +{ + struct ipu_psys *psys = container_of(work, + struct ipu_psys, watchdog_work); + struct ipu_psys_fh *fh; + + mutex_lock(&psys->mutex); + + /* Loop over all running kcmds */ + list_for_each_entry(fh, &psys->fhs, list) { + int p, r; + + mutex_lock(&fh->mutex); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + struct ipu_psys_kcmd *kcmd; + + list_for_each_entry(kcmd, &fh->kcmds[p], list) { + if (fh->new_kcmd_tail[p] == kcmd) + break; + if (kcmd->state != KCMD_STATE_RUNNING) + continue; + + if (timer_pending(&kcmd->watchdog)) + continue; + /* Found an expired but running command */ + dev_err(&psys->adev->dev, + "kcmd:0x%llx[0x%llx] taking too long\n", + kcmd->user_token, kcmd->issue_id); + r = ipu_psys_kcmd_abort(psys, kcmd, -ETIME); + if (r) + goto stop_failed; + } + } + mutex_unlock(&fh->mutex); + } + + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + mutex_unlock(&psys->mutex); + return; + +stop_failed: + mutex_unlock(&fh->mutex); + ipu_psys_reset(psys); + mutex_unlock(&psys->mutex); +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 14, 2) +static void ipu_psys_watchdog(unsigned long data) +{ + struct ipu_psys_kcmd *kcmd = (struct ipu_psys_kcmd *)data; +#else +static void ipu_psys_watchdog(struct timer_list *t) +{ + struct ipu_psys_kcmd *kcmd = from_timer(kcmd, t, watchdog); +#endif + struct ipu_psys *psys = kcmd->fh->psys; + + queue_work(IPU_PSYS_WORK_QUEUE, &psys->watchdog_work); +} + +static int ipu_psys_config_legacy_pg(struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys *psys = kcmd->fh->psys; + unsigned int i; + int ret; + + ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd, kcmd->kpg->pg_dma_addr); + if (ret) { + ret = -EIO; + goto error; + } + + for (i = 0; i < kcmd->nbuffers; i++) { + struct ipu_fw_psys_terminal *terminal; + u32 buffer; + + terminal = ipu_fw_psys_pg_get_terminal(kcmd, i); + if (!terminal) + continue; + + buffer = (u32) kcmd->kbufs[i]->dma_addr + + kcmd->buffers[i].data_offset; + + ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, + buffer, kcmd->kbufs[i]->len); + if (ret == -EAGAIN) + continue; + + if (ret) { + dev_err(&psys->adev->dev, "Unable to set terminal\n"); + goto error; + } + } + + ipu_fw_psys_pg_set_token(kcmd, (uintptr_t) kcmd); + + ret = ipu_fw_psys_pg_submit(kcmd); + if (ret) { + dev_err(&psys->adev->dev, "failed to submit kcmd!\n"); + goto error; + } + + return 0; + +error: + dev_err(&psys->adev->dev, "failed to config legacy pg\n"); + return ret; +} + +static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd) +{ + struct ipu_psys_fh *fh; + struct ipu_psys_kcmd *kcmd0; + int p; + + list_for_each_entry(fh, &psys->fhs, list) { + mutex_lock(&fh->mutex); + for (p = 0; p < IPU_PSYS_CMD_PRIORITY_NUM; p++) { + list_for_each_entry(kcmd0, &fh->kcmds[p], list) { + if (kcmd0 == kcmd) { + mutex_unlock(&fh->mutex); + return true; + } + } + } + mutex_unlock(&fh->mutex); + } + + return false; +} + +int ipu_psys_kcmd_queue(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd) +{ + int ret; + + if (kcmd->state != KCMD_STATE_NEW) { + WARN_ON(1); + return -EINVAL; + } + + if (!psys->started_kcmds) { + ret = ipu_psys_allocate_resources(&psys->adev->dev, + kcmd->kpg->pg, + kcmd->pg_manifest, + &kcmd->kpg->resource_alloc, + &psys->resource_pool_running); + if (!ret) { + if (kcmd->state == KCMD_STATE_NEW) + kcmd->state = KCMD_STATE_RUN_PREPARED; + return ipu_psys_kcmd_start(psys, kcmd); + } + + if (ret != -ENOSPC || !psys->active_kcmds) { + dev_err(&psys->adev->dev, + "kcmd %p failed to alloc resources (running)\n", + kcmd); + ipu_psys_kcmd_complete(psys, kcmd, ret); + /* kcmd_complete doesn't handle PM for KCMD_STATE_NEW */ + pm_runtime_put(&psys->adev->dev); + return -EINVAL; + } + } + + ret = ipu_psys_allocate_resources(&psys->adev->dev, + kcmd->kpg->pg, + kcmd->pg_manifest, + &kcmd->kpg->resource_alloc, + &psys->resource_pool_started); + if (!ret) { + kcmd->state = KCMD_STATE_START_PREPARED; + return ipu_psys_kcmd_start(psys, kcmd); + } + + if (ret != -ENOSPC || !psys->started_kcmds) { + dev_err(&psys->adev->dev, + "kcmd %p failed to alloc resources (started)\n", kcmd); + ipu_psys_kcmd_complete(psys, kcmd, ret); + /* kcmd_complete doesn't handle PM for KCMD_STATE_NEW */ + pm_runtime_put(&psys->adev->dev); + ret = -EINVAL; + } + return ret; +} + +int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh) +{ + struct ipu_psys *psys = fh->psys; + struct ipu_psys_kcmd *kcmd; + size_t pg_size; + int ret; + + if (psys->adev->isp->flr_done) + return -EIO; + + kcmd = ipu_psys_copy_cmd(cmd, fh); + if (!kcmd) + return -EINVAL; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 14, 2) + init_timer(&kcmd->watchdog); + kcmd->watchdog.data = (unsigned long)kcmd; + kcmd->watchdog.function = &ipu_psys_watchdog; +#else + timer_setup(&kcmd->watchdog, ipu_psys_watchdog, 0); +#endif + + if (cmd->min_psys_freq) { + kcmd->constraint.min_freq = cmd->min_psys_freq; + ipu_buttress_add_psys_constraint(psys->adev->isp, + &kcmd->constraint); + } + + pg_size = ipu_fw_psys_pg_get_size(kcmd); + if (pg_size > kcmd->kpg->pg_size) { + dev_dbg(&psys->adev->dev, "pg size mismatch %zu %zu\n", + pg_size, kcmd->kpg->pg_size); + ret = -EINVAL; + goto error; + } + + ret = ipu_psys_config_legacy_pg(kcmd); + if (ret) + goto error; + + mutex_lock(&fh->mutex); + list_add_tail(&kcmd->list, &fh->kcmds[cmd->priority]); + if (!fh->new_kcmd_tail[cmd->priority] && kcmd->state == KCMD_STATE_NEW) { + fh->new_kcmd_tail[cmd->priority] = kcmd; + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + } + mutex_unlock(&fh->mutex); + + dev_dbg(&psys->adev->dev, + "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n", + cmd->user_token, cmd->issue_id, cmd->priority); + + return 0; + +error: + ipu_psys_kcmd_free(kcmd); + + return ret; +} + +void ipu_psys_handle_events(struct ipu_psys *psys) +{ + struct ipu_psys_kcmd *kcmd = NULL; + struct ipu_fw_psys_event event; + bool error; + + do { + memset(&event, 0, sizeof(event)); + if (!ipu_fw_psys_rcv_event(psys, &event)) + break; + + error = false; + kcmd = (struct ipu_psys_kcmd *)(unsigned long)event.token; + error = IS_ERR_OR_NULL(kcmd) ? true : false; + + dev_dbg(&psys->adev->dev, "psys received event status:%d\n", + event.status); + + if (error) { + dev_err(&psys->adev->dev, + "no token received, command unknown\n"); + pm_runtime_put(&psys->adev->dev); + ipu_psys_reset(psys); + pm_runtime_get(&psys->adev->dev); + break; + } + + if (ipu_psys_kcmd_is_valid(psys, kcmd)) + ipu_psys_kcmd_complete(psys, kcmd, + event.status == + IPU_PSYS_EVENT_CMD_COMPLETE || + event.status == + IPU_PSYS_EVENT_FRAGMENT_COMPLETE + ? 0 : -EIO); + /* Kick command scheduler thread */ + atomic_set(&psys->wakeup_sched_thread_count, 1); + wake_up_interruptible(&psys->sched_cmd_wq); + } while (1); +} diff --git a/drivers/media/pci/intel/ipu4/ipu4-resources.c b/drivers/media/pci/intel/ipu4/ipu4-resources.c new file mode 100644 index 000000000000..097ea1bb7ed9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4-resources.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include + +#include "ipu-fw-psys.h" +#include "ipu-psys.h" + +static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements) +{ + if (elements <= 0) { + res->bitmap = NULL; + return 0; + } + + res->bitmap = kcalloc(BITS_TO_LONGS(elements), sizeof(long), + GFP_KERNEL); + if (!res->bitmap) + return -ENOMEM; + res->elements = elements; + res->id = id; + return 0; +} + +static unsigned long +ipu_resource_alloc(struct ipu_resource *res, int n, + struct ipu_resource_alloc *alloc, + enum ipu_resource_type type) +{ + unsigned long p; + + if (n <= 0) { + alloc->elements = 0; + return 0; + } + + if (!res->bitmap) + return (unsigned long)(-ENOSPC); + + p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0); + alloc->resource = NULL; + + if (p >= res->elements) + return (unsigned long)(-ENOSPC); + bitmap_set(res->bitmap, p, n); + alloc->resource = res; + alloc->elements = n; + alloc->pos = p; + alloc->type = type; + + return p; +} + +static void ipu_resource_free(struct ipu_resource_alloc *alloc) +{ + if (alloc->elements <= 0) + return; + + if (alloc->type == IPU_RESOURCE_DFM) + *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements); + else + bitmap_clear(alloc->resource->bitmap, alloc->pos, + alloc->elements); + alloc->resource = NULL; +} + +static void ipu_resource_cleanup(struct ipu_resource *res) +{ + kfree(res->bitmap); + res->bitmap = NULL; +} + +/********** IPU PSYS-specific resource handling **********/ + +int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool + *pool) +{ + int i, j, k, ret; + + pool->cells = 0; + + for (i = 0; i < res_defs->num_dev_channels; i++) { + ret = ipu_resource_init(&pool->dev_channels[i], i, + res_defs->dev_channels[i]); + if (ret) + goto error; + } + + for (j = 0; j < res_defs->num_ext_mem_ids; j++) { + ret = ipu_resource_init(&pool->ext_memory[j], j, + res_defs->ext_mem_ids[j]); + if (ret) + goto memory_error; + } + + for (k = 0; k < res_defs->num_dfm_ids; k++) { + ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]); + if (ret) + goto dfm_error; + } + + return 0; + +dfm_error: + for (k--; k >= 0; k--) + ipu_resource_cleanup(&pool->dfms[k]); + +memory_error: + for (j--; j >= 0; j--) + ipu_resource_cleanup(&pool->ext_memory[j]); + +error: + for (i--; i >= 0; i--) + ipu_resource_cleanup(&pool->dev_channels[i]); + return ret; +} + + +void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool + *pool) +{ + u32 i; + + for (i = 0; i < res_defs->num_dev_channels; i++) + ipu_resource_cleanup(&pool->dev_channels[i]); + + for (i = 0; i < res_defs->num_ext_mem_ids; i++) + ipu_resource_cleanup(&pool->ext_memory[i]); + + for (i = 0; i < res_defs->num_dfm_ids; i++) + ipu_resource_cleanup(&pool->dfms[i]); +} + +static int ipu_psys_allocate_one_resource(const struct device *dev, + struct ipu_fw_psys_process *process, + struct ipu_resource *resource, + struct ipu_fw_generic_program_manifest *pm, + u32 resource_id, + struct ipu_psys_resource_alloc *alloc) +{ + const u16 resource_req = pm->dev_chn_size[resource_id]; + unsigned long retl; + + if (resource_req <= 0) + return 0; + + if (alloc->resources >= IPU_MAX_RESOURCES) { + dev_err(dev, "out of resource handles\n"); + return -ENOSPC; + } + retl = ipu_resource_alloc + (resource, resource_req, + &alloc->resource_alloc[alloc->resources], + IPU_RESOURCE_DEV_CHN); + if (IS_ERR_VALUE(retl)) { + dev_dbg(dev, "out of device channel resources\n"); + return (int)retl; + } + alloc->resources++; + + return 0; +} + +/* + * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM) + * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.) + */ +static int ipu_psys_allocate_memory_resource( + const struct device *dev, + struct ipu_fw_psys_process *process, + struct ipu_resource *resource, + struct ipu_fw_generic_program_manifest *pm, + u32 ext_mem_type_id, u32 ext_mem_bank_id, + struct ipu_psys_resource_alloc *alloc) +{ + const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id]; + + unsigned long retl; + + if (memory_resource_req <= 0) + return 0; + + if (alloc->resources >= IPU_MAX_RESOURCES) { + dev_err(dev, "out of resource handles\n"); + return -ENOSPC; + } + retl = ipu_resource_alloc + (resource, memory_resource_req, + &alloc->resource_alloc[alloc->resources], + IPU_RESOURCE_EXT_MEM); + if (IS_ERR_VALUE(retl)) { + dev_dbg(dev, "out of memory resources\n"); + return (int)retl; + } + + alloc->resources++; + + return 0; +} + +/* + * Allocate resources for pg from `pool'. Mark the allocated + * resources into `alloc'. Returns 0 on success, -ENOSPC + * if there are no enough resources, in which cases resources + * are not allocated at all, or some other error on other conditions. + */ +int ipu_psys_allocate_resources(const struct device *dev, + struct ipu_fw_psys_process_group *pg, + void *pg_manifest, + struct ipu_psys_resource_alloc + *alloc, struct ipu_psys_resource_pool + *pool) +{ + u32 resid; + u32 mem_type_id; + int ret, i; + u16 *process_offset_table; + u8 processes; + u32 cells = 0; + + if (!pg) + return -EINVAL; + process_offset_table = (u16 *)((u8 *) pg + pg->processes_offset); + processes = pg->process_count; + + for (i = 0; i < processes; i++) { + u32 cell; + struct ipu_fw_psys_process *process = + (struct ipu_fw_psys_process *) + ((char *)pg + process_offset_table[i]); + struct ipu_fw_generic_program_manifest pm; + + memset(&pm, 0, sizeof(pm)); + if (!process) { + dev_err(dev, "can not get process\n"); + ret = -ENOENT; + goto free_out; + } + + ret = ipu_fw_psys_get_program_manifest_by_process(&pm, + pg_manifest, + process); + if (ret < 0) { + dev_err(dev, "can not get manifest\n"); + goto free_out; + } + + if (pm.cell_id == res_defs->num_cells && + pm.cell_type_id == res_defs->num_cells_type) { + dev_dbg(dev, "ignore the cell requirement\n"); + cell = res_defs->num_cells; + } else if ((pm.cell_id != res_defs->num_cells && + pm.cell_type_id == res_defs->num_cells_type)) { + cell = ipu_fw_psys_get_process_cell_id(process, 0); + } else { + /* Find a free cell of desired type */ + u32 type = pm.cell_type_id; + + for (cell = 0; cell < res_defs->num_cells; cell++) + if (res_defs->cells[cell] == type && + ((pool->cells | cells) & (1 << cell)) == 0) + break; + if (cell >= res_defs->num_cells) { + dev_dbg(dev, "no free cells of right type\n"); + ret = -ENOSPC; + goto free_out; + } + ret = ipu_fw_psys_set_process_cell_id(process, 0, cell); + if (ret) + goto free_out; + } + if (cell < res_defs->num_cells) + cells |= 1 << cell; + if (pool->cells & cells) { + dev_dbg(dev, "out of cell resources\n"); + ret = -ENOSPC; + goto free_out; + } + if (pm.dev_chn_size) { + for (resid = 0; resid < res_defs->num_dev_channels; resid++) { + ret = ipu_psys_allocate_one_resource + (dev, process, + &pool->dev_channels[resid], &pm, resid, alloc); + if (ret) + goto free_out; + ret = ipu_fw_psys_set_process_dev_chn_offset(process, resid, + alloc->resource_alloc[alloc->resources - 1].pos); + if (ret) + goto free_out; + } + } + + if (pm.ext_mem_size) { + for (mem_type_id = 0; + mem_type_id < res_defs->num_ext_mem_types; mem_type_id++) { + u32 mem_bank_id = res_defs->num_ext_mem_ids; + + if (cell != res_defs->num_cells) + mem_bank_id = + res_defs->cell_mem[res_defs->cell_mem_row * + cell + mem_type_id]; + if (mem_bank_id == res_defs->num_ext_mem_ids) + continue; + + ret = ipu_psys_allocate_memory_resource + (dev, process, + &pool->ext_memory[mem_bank_id], + &pm, mem_type_id, mem_bank_id, alloc); + if (ret) + goto free_out; + /* no return value check here because fw api will + * do some checks, and would return non-zero + * except mem_type_id == 0. This may be caused by that + * above flow if allocating mem_bank_id is improper + */ + ipu_fw_psys_set_process_ext_mem + (process, mem_type_id, mem_bank_id, + alloc->resource_alloc[alloc->resources - 1].pos); + } + } + } + alloc->cells |= cells; + pool->cells |= cells; + return 0; + +free_out: + for (; i >= 0; i--) { + struct ipu_fw_psys_process *process = + (struct ipu_fw_psys_process *) + ((char *)pg + process_offset_table[i]); + struct ipu_fw_generic_program_manifest pm; + int retval; + + if (!process) + break; + + retval = ipu_fw_psys_get_program_manifest_by_process + (&pm, pg_manifest, process); + if (retval < 0) + break; + if ((pm.cell_id != res_defs->num_cells && + pm.cell_type_id == res_defs->num_cells_type)) + continue; + /* no return value check here because if finding free cell + * failed, process cell would not set then calling clear_cell + * will return non-zero. + */ + ipu_fw_psys_clear_process_cell(process); + } + dev_dbg(dev, "failed to allocate resources, ret %d\n", ret); + ipu_psys_free_resources(alloc, pool); + return ret; +} + +int ipu_psys_move_resources(const struct device *dev, + struct ipu_psys_resource_alloc *alloc, + struct ipu_psys_resource_pool + *source_pool, struct ipu_psys_resource_pool + *target_pool) +{ + int i; + + if (target_pool->cells & alloc->cells) { + dev_dbg(dev, "out of cell resources\n"); + return -ENOSPC; + } + + for (i = 0; i < alloc->resources; i++) { + unsigned long bitmap = 0; + unsigned int id = alloc->resource_alloc[i].resource->id; + unsigned long fbit, end; + + switch (alloc->resource_alloc[i].type) { + case IPU_RESOURCE_DEV_CHN: + bitmap_set(&bitmap, alloc->resource_alloc[i].pos, + alloc->resource_alloc[i].elements); + if (*target_pool->dev_channels[id].bitmap & bitmap) + return -ENOSPC; + break; + case IPU_RESOURCE_EXT_MEM: + end = alloc->resource_alloc[i].elements + + alloc->resource_alloc[i].pos; + + fbit = find_next_bit(target_pool->ext_memory[id].bitmap, + end, alloc->resource_alloc[i].pos); + /* if find_next_bit returns "end" it didn't find 1bit */ + if (end != fbit) + return -ENOSPC; + break; + case IPU_RESOURCE_DFM: + bitmap = alloc->resource_alloc[i].elements; + if (*target_pool->dfms[id].bitmap & bitmap) + return -ENOSPC; + break; + default: + dev_err(dev, "Illegal resource type\n"); + return -EINVAL; + } + } + + for (i = 0; i < alloc->resources; i++) { + u32 id = alloc->resource_alloc[i].resource->id; + + switch (alloc->resource_alloc[i].type) { + case IPU_RESOURCE_DEV_CHN: + bitmap_set(target_pool->dev_channels[id].bitmap, + alloc->resource_alloc[i].pos, + alloc->resource_alloc[i].elements); + ipu_resource_free(&alloc->resource_alloc[i]); + alloc->resource_alloc[i].resource = + &target_pool->dev_channels[id]; + break; + case IPU_RESOURCE_EXT_MEM: + bitmap_set(target_pool->ext_memory[id].bitmap, + alloc->resource_alloc[i].pos, + alloc->resource_alloc[i].elements); + ipu_resource_free(&alloc->resource_alloc[i]); + alloc->resource_alloc[i].resource = + &target_pool->ext_memory[id]; + break; + case IPU_RESOURCE_DFM: + *target_pool->dfms[id].bitmap |= + alloc->resource_alloc[i].elements; + *alloc->resource_alloc[i].resource->bitmap &= + ~(alloc->resource_alloc[i].elements); + alloc->resource_alloc[i].resource = + &target_pool->dfms[id]; + break; + default: + /* + * Just keep compiler happy. This case failed already + * in above loop. + */ + break; + } + } + + target_pool->cells |= alloc->cells; + source_pool->cells &= ~alloc->cells; + + return 0; +} + +/* Free resources marked in `alloc' from `resources' */ +void ipu_psys_free_resources(struct ipu_psys_resource_alloc + *alloc, struct ipu_psys_resource_pool *pool) +{ + unsigned int i; + + pool->cells &= ~alloc->cells; + alloc->cells = 0; + for (i = 0; i < alloc->resources; i++) + ipu_resource_free(&alloc->resource_alloc[i]); + alloc->resources = 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4.c b/drivers/media/pci/intel/ipu4/ipu4.c new file mode 100644 index 000000000000..c3615d225431 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4.c @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include "ipu.h" +#include "ipu-cpd.h" +#include "ipu-isys.h" +#include "ipu-buttress.h" +#include "ipu-psys.h" +#include "ipu-platform.h" +#include "ipu-platform-regs.h" +#include "ipu-platform-buttress-regs.h" + +#ifdef CONFIG_VIDEO_INTEL_IPU4 +static struct ipu_receiver_electrical_params ipu4_ev_params[] = { + {0, 1500000000ul / 2, IPU_PCI_ID, IPU_HW_BXT_P_B1_REV, + .rcomp_val_combo = 11, + .rcomp_val_legacy = 11, + .ports[0].crc_val = 18, + .ports[0].drc_val = 29, + .ports[0].drc_val_combined = 29, + .ports[0].ctle_val = 4, + .ports[1].crc_val = 18, + .ports[1].drc_val = 29, + .ports[1].drc_val_combined = 31, + .ports[1].ctle_val = 4 + }, + {0, 1500000000ul / 2, IPU_PCI_ID, IPU_HW_BXT_P_D0_REV, + .rcomp_val_combo = 11, + .rcomp_val_legacy = 11, + .ports[0].crc_val = 18, + .ports[0].drc_val = 29, + .ports[0].drc_val_combined = 29, + .ports[0].ctle_val = 4, + .ports[1].crc_val = 18, + .ports[1].drc_val = 29, + .ports[1].drc_val_combined = 31, + .ports[1].ctle_val = 4 + }, + {0, 1500000000ul / 2, IPU_PCI_ID, IPU_HW_BXT_P_E0_REV, + .rcomp_val_combo = 11, + .rcomp_val_legacy = 11, + .ports[0].crc_val = 18, + .ports[0].drc_val = 29, + .ports[0].drc_val_combined = 29, + .ports[0].ctle_val = 4, + .ports[1].crc_val = 18, + .ports[1].drc_val = 29, + .ports[1].drc_val_combined = 31, + .ports[1].ctle_val = 4 + }, + {}, +}; + +static unsigned int ipu4_csi_offsets[] = { + 0x64000, 0x65000, 0x66000, 0x67000, 0x6C000, 0x6C800 +}; + +static unsigned char ipu4_csi_evlanecombine[] = { + 0, 0, 0, 0, 2, 0 +}; + +static unsigned int ipu4_tpg_offsets[] = { + IPU_TPG0_ADDR_OFFSET, + IPU_TPG1_ADDR_OFFSET +}; + +static unsigned int ipu4_tpg_sels[] = { + IPU_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN0_SEL, + IPU_COMBO_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN1_SEL +}; + +const struct ipu_isys_internal_pdata isys_ipdata = { + .csi2 = { + .nports = ARRAY_SIZE(ipu4_csi_offsets), + .offsets = ipu4_csi_offsets, + .evparams = ipu4_ev_params, + .evlanecombine = ipu4_csi_evlanecombine, + .evsetmask0 = 1 << 4, /* CSI port 4 */ + .evsetmask1 = 1 << 5, /* CSI port 5 */ + }, + .tpg = { + .ntpgs = ARRAY_SIZE(ipu4_tpg_offsets), + .offsets = ipu4_tpg_offsets, + .sels = ipu4_tpg_sels, + }, + .hw_variant = { + .offset = IPU_ISYS_OFFSET, + .nr_mmus = 2, + .mmu_hw = { + { + .offset = IPU_ISYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_ISYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 8, 16, 16, 16, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8 + }, + .l1_zlw_en = { + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 3, 3, 3, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_ISYS_DMEM_OFFSET, + .spc_offset = IPU_ISYS_SPC_OFFSET, + }, + .num_parallel_streams = IPU_ISYS_NUM_STREAMS, + .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, +}; + +const struct ipu_psys_internal_pdata psys_ipdata = { + .hw_variant = { + .offset = IPU_PSYS_OFFSET, + .nr_mmus = 3, + .mmu_hw = { + { + .offset = IPU_PSYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_PSYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 0, 0, 0, 0, 10, 8, 10, 8, 0, + 4, 4, 12, 0, 0, 0, 8 + }, + .l1_zlw_en = { + 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, + 1, 1, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 0, 0, 0, 1, 1, 1, 1, 0, + 1, 1, 1, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 0, 0, 3, 3, + 3, 3, 0, 3, 1, 3, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU_PSYS_IOMMU1R_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, + 0, 0, 16, 12, 12, 16 + }, + .l1_zlw_en = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 1, 1, 1, 1 + }, + .l1_zlw_1d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 1, 1, 1 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 0, 0, 0, 0, + 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_PSYS_DMEM_OFFSET, + .spc_offset = IPU_PSYS_SPC_OFFSET, + }, +}; + +/* + * This is meant only as reference for initialising the buttress control, + * because the different HW stepping can have different initial values + * + * There is a HW bug and IS_PWR and PS_PWR fields cannot be used to + * detect if power on/off is ready. Using IS_PWR_FSM and PS_PWR_FSM + * fields instead. + */ +const struct ipu_buttress_ctrl isys_buttress_ctrl = { + .divisor = IS_FREQ_CTL_DIVISOR, + .qos_floor = 0, + .freq_ctl = BUTTRESS_REG_IS_FREQ_CTL, + .pwr_sts_shift = BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY, + .pwr_sts_off = BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE, +}; + +/* + * This is meant only as reference for initialising the buttress control, + * because the different HW stepping can have different initial values + */ + +const struct ipu_buttress_ctrl psys_buttress_ctrl = { + .divisor = PS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = PS_FREQ_CTL_DEFAULT_RATIO, + .freq_ctl = BUTTRESS_REG_PS_FREQ_CTL, + .pwr_sts_shift = BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP, + .pwr_sts_off = BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE, +}; +#endif + +#ifdef CONFIG_VIDEO_INTEL_IPU4P + +/* + * ipu4p available hw ports start from sip0 port3 + * available ports are: + * s0p3, s1p0, s1p1, s1p2, s1p3 + */ +static unsigned int ipu4p_csi_offsets[] = { + 0x64300, 0x6c000, 0x6c100, 0x6c200, 0x6c300 +}; + +static unsigned char ipu4p_csi_evlanecombine[] = { + 0, 0, 0, 0, 0, 0 +}; + +static unsigned int ipu4p_tpg_offsets[] = { + IPU_TPG0_ADDR_OFFSET, + IPU_TPG1_ADDR_OFFSET +}; + +static unsigned int ipu4p_tpg_sels[] = { + IPU_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN0_SEL, + IPU_COMBO_GPOFFSET + IPU_GPREG_MIPI_PKT_GEN1_SEL +}; + +const struct ipu_isys_internal_pdata isys_ipdata = { + .csi2 = { + .nports = ARRAY_SIZE(ipu4p_csi_offsets), + .offsets = ipu4p_csi_offsets, + .evlanecombine = ipu4p_csi_evlanecombine, + }, + .tpg = { + .ntpgs = ARRAY_SIZE(ipu4p_tpg_offsets), + .offsets = ipu4p_tpg_offsets, + .sels = ipu4p_tpg_sels, + }, + .hw_variant = { + .offset = IPU_ISYS_OFFSET, + .nr_mmus = 2, + .mmu_hw = { + { + .offset = IPU_ISYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_ISYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 5, 16, 6, 6, 6, 6, 6, 8, 0, + 0, 0, 0, 0, 0, 0, 5 + }, + .l1_zlw_en = { + 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 3, 3, 3, 3, 3, + 3, 3, 0, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_ISYS_DMEM_OFFSET, + .spc_offset = IPU_ISYS_SPC_OFFSET, + }, + .num_parallel_streams = IPU_ISYS_NUM_STREAMS, + .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, +}; + +const struct ipu_psys_internal_pdata psys_ipdata = { + .hw_variant = { + .offset = IPU_PSYS_OFFSET, + .nr_mmus = 3, + .mmu_hw = { + { + .offset = IPU_PSYS_IOMMU0_OFFSET, + .info_bits = + IPU_INFO_REQUEST_DESTINATION_PRIMARY, + .nr_l1streams = 0, + .nr_l2streams = 0, + .insert_read_before_invalidate = true, + }, + { + .offset = IPU_PSYS_IOMMU1_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 2, 5, 4, 2, 2, 10, 5, 16, 10, + 5, 0, 0, 0, 0, 0, 3 + }, + .l1_zlw_en = { + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 3, 3, 3, 3, + 3, 3, 3, 3, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + { + .offset = IPU_PSYS_IOMMU1R_OFFSET, + .info_bits = IPU_INFO_STREAM_ID_SET(0), + .nr_l1streams = IPU_MMU_MAX_TLB_L1_STREAMS, + .l1_block_sz = { + 2, 6, 5, 16, 16, 8, 8, 0, 0, + 0, 0, 0, 0, 0, 0, 3 + }, + .l1_zlw_en = { + 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + .l1_zlw_1d_mode = { + 0, 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .l1_ins_zlw_ahead_pages = { + 0, 0, 3, 3, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }, + .l1_zlw_2d_mode = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + }, + .nr_l2streams = IPU_MMU_MAX_TLB_L2_STREAMS, + .l2_block_sz = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2 + }, + .insert_read_before_invalidate = false, + .zlw_invalidate = true, + .l1_stream_id_reg_offset = + IPU_MMU_L1_STREAM_ID_REG_OFFSET, + .l2_stream_id_reg_offset = + IPU_MMU_L2_STREAM_ID_REG_OFFSET, + }, + }, + .dmem_offset = IPU_PSYS_DMEM_OFFSET, + .spc_offset = IPU_PSYS_SPC_OFFSET, + }, +}; + +const struct ipu_buttress_ctrl isys_buttress_ctrl = { + .divisor = IS_FREQ_CTL_DIVISOR, + .qos_floor = 0, + .ovrd = 0, + .freq_ctl = BUTTRESS_REG_IS_FREQ_CTL, + .divisor_shift = BUTTRESS_REG_IS_FREQ_CTL_RATIO_SHIFT, + .pwr_sts_shift = BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY, + .pwr_sts_off = BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE, +}; + +const struct ipu_buttress_ctrl psys_buttress_ctrl = { + .divisor = PS_FREQ_CTL_DEFAULT_RATIO, + .qos_floor = PS_FREQ_CTL_DEFAULT_RATIO, + .ovrd = 1, + .freq_ctl = BUTTRESS_REG_PS_FREQ_CTL, + .divisor_shift = BUTTRESS_REG_PS_FREQ_CTL_RATIO_SHIFT, + .ovrd_shift = BUTTRESS_REG_PS_FREQ_CTL_OVRD_SHIFT, + .pwr_sts_shift = BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT, + .pwr_sts_mask = BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK, + .pwr_sts_on = BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP, + .pwr_sts_off = BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE, +}; +#endif + +void ipu_configure_spc(struct ipu_device *isp, + const struct ipu_hw_variants *hw_variant, + int pkg_dir_idx, void __iomem *base, u64 *pkg_dir, + dma_addr_t pkg_dir_dma_addr) +{ + u32 val; + void __iomem *dmem_base = base + hw_variant->dmem_offset; + void __iomem *spc_regs_base = base + hw_variant->spc_offset; + + val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); + val |= IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE; + writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL); + + if (isp->secure_mode) { + writel(IPU_PKG_DIR_IMR_OFFSET, dmem_base); + } else { + u32 server_addr; + + server_addr = ipu_cpd_pkg_dir_get_address(pkg_dir, pkg_dir_idx); + + writel(server_addr + + ipu_cpd_get_pg_icache_base(isp, pkg_dir_idx, + isp->cpd_fw->data, + isp->cpd_fw->size), + spc_regs_base + IPU_PSYS_REG_SPC_ICACHE_BASE); + writel(ipu_cpd_get_pg_entry_point(isp, pkg_dir_idx, + isp->cpd_fw->data, + isp->cpd_fw->size), + spc_regs_base + IPU_PSYS_REG_SPC_START_PC); + writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY, + spc_regs_base + + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER); + writel(pkg_dir_dma_addr, dmem_base); + } +} +EXPORT_SYMBOL(ipu_configure_spc); + +int ipu_buttress_psys_freq_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + u32 reg_val, ratio; + int rval; + + rval = pm_runtime_get_sync(&isp->psys->dev); + if (rval < 0) { + pm_runtime_put(&isp->psys->dev); + dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); + return rval; + } + + reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CAPABILITIES); + + pm_runtime_put(&isp->psys->dev); + + ratio = (reg_val & + BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK) >> + BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT; + + *val = BUTTRESS_PS_FREQ_STEP * ratio; + + return 0; +} + +int ipu_buttress_isys_freq_get(void *data, u64 *val) +{ + struct ipu_device *isp = data; + u32 reg_val; + int rval; + + rval = pm_runtime_get_sync(&isp->isys->dev); + if (rval < 0) { + pm_runtime_put(&isp->isys->dev); + dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval); + return rval; + } + + reg_val = readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL); + + pm_runtime_put(&isp->isys->dev); + + /* Input system frequency specified as 1600MHz/divisor */ + *val = 1600 / (reg_val & BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_inc b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_inc new file mode 100644 index 000000000000..90a2ab46510c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_inc @@ -0,0 +1,26 @@ +IPU_ISYSLIB_INC = \ + -I$(IPU_ISYSLIB_ROOT)/buffer/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/interface \ + -I$(IPU_ISYSLIB_ROOT)/cell/src \ + -I$(IPU_ISYSLIB_ROOT)/device_access/interface \ + -I$(IPU_ISYSLIB_ROOT)/device_access/src \ + -I$(IPU_ISYSLIB_ROOT)/devices \ + -I$(IPU_ISYSLIB_ROOT)/devices/interface \ + -I$(IPU_ISYSLIB_ROOT)/devices/isys/cnlB0 \ + -I$(IPU_ISYSLIB_ROOT)/devices/src \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_ISYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_ISYSLIB_ROOT)/isysapi/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_ISYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_ISYSLIB_ROOT)/port/interface \ + -I$(IPU_ISYSLIB_ROOT)/reg_dump/src/isys/cnlB0_gen_reg_dump \ + -I$(IPU_ISYSLIB_ROOT)/regmem/interface \ + -I$(IPU_ISYSLIB_ROOT)/regmem/src \ + -I$(IPU_ISYSLIB_ROOT)/support \ + -I$(IPU_ISYSLIB_ROOT)/syscom/interface \ + -I$(IPU_ISYSLIB_ROOT)/syscom/src \ + -I$(IPU_ISYSLIB_ROOT)/trace/interface \ + -I$(IPU_ISYSLIB_ROOT)/utils/system_defs/ \ + -I$(IPU_ISYSLIB_ROOT)/vied \ + -I$(IPU_ISYSLIB_ROOT)/vied/vied/ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_src b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_src new file mode 100644 index 000000000000..c20760bdb5f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4pisys_src @@ -0,0 +1,19 @@ +IPU_ISYSLIB_SRC = \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_private.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public.o \ + $(IPU_ISYSLIB_ROOT_REL)/isysapi/src/ia_css_isys_public_trace.o + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +IPU_ISYSLIB_SRC += \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_ISYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_ISYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_ISYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_inc b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_inc new file mode 100644 index 000000000000..fb01678242ee --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_inc @@ -0,0 +1,52 @@ +IPU_PSYSLIB_INC = \ + -I$(IPU_PSYSLIB_ROOT)/buffer/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/interface \ + -I$(IPU_PSYSLIB_ROOT)/cell/src \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/interface \ + -I$(IPU_PSYSLIB_ROOT)/client_pkg/src \ + -I$(IPU_PSYSLIB_ROOT)/cpd/ \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_component/interface \ + -I$(IPU_PSYSLIB_ROOT)/cpd/cpd_metadata/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/interface \ + -I$(IPU_PSYSLIB_ROOT)/device_access/src \ + -I$(IPU_PSYSLIB_ROOT)/devices \ + -I$(IPU_PSYSLIB_ROOT)/devices/interface \ + -I$(IPU_PSYSLIB_ROOT)/devices/psys/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/devices/src \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types \ + -I$(IPU_PSYSLIB_ROOT)/fw_abi_common_types/cpu \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/interface \ + -I$(IPU_PSYSLIB_ROOT)/pkg_dir/src \ + -I$(IPU_PSYSLIB_ROOT)/port/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_private_pg/interface \ + -I$(IPU_PSYSLIB_ROOT)/psys_server/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/data/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/device/interface/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/dynamic/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/kernel/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/param/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/psys_server_manifest/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/resource_model/cnlB0 \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/sim/src \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/interface \ + -I$(IPU_PSYSLIB_ROOT)/psysapi/static/src \ + -I$(IPU_PSYSLIB_ROOT)/reg_dump/src/psys/cnlB0_gen_reg_dump \ + -I$(IPU_PSYSLIB_ROOT)/regmem/interface \ + -I$(IPU_PSYSLIB_ROOT)/regmem/src \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/interface \ + -I$(IPU_PSYSLIB_ROOT)/routing_bitmap/src \ + -I$(IPU_PSYSLIB_ROOT)/support \ + -I$(IPU_PSYSLIB_ROOT)/syscom/interface \ + -I$(IPU_PSYSLIB_ROOT)/syscom/src \ + -I$(IPU_PSYSLIB_ROOT)/trace/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied \ + -I$(IPU_PSYSLIB_ROOT)/vied/vied/ \ + -I$(IPU_PSYSLIB_ROOT)/vied_nci_acb/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/interface \ + -I$(IPU_PSYSLIB_ROOT)/vied_parameters/src \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_src b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_src new file mode 100644 index 000000000000..3ed88d455bab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.ipu4ppsys_src @@ -0,0 +1,32 @@ +IPU_PSYSLIB_SRC = \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/buffer_access.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_input_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_output_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/buffer/src/cpu/ia_css_shared_buffer.o \ + $(IPU_PSYSLIB_ROOT_REL)/client_pkg/src/ia_css_client_pkg.o \ + $(IPU_PSYSLIB_ROOT_REL)/pkg_dir/src/ia_css_pkg_dir.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/queue.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/recv_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/port/src/send_port.o \ + $(IPU_PSYSLIB_ROOT_REL)/psys_server/src/bxt_spctrl_process_group_cmd_impl.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/data/src/ia_css_program_group_data.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/device/src/ia_css_psys_device.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_buffer_set.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_process_group.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/dynamic/src/ia_css_psys_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/kernel/src/ia_css_kernel_bitmap.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/param/src/ia_css_program_group_param.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/sim/src/vied_nci_psys_system.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_group_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_program_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/psysapi/static/src/ia_css_psys_terminal_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/reg_dump/src/reg_dump_generic_bridge.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm.o \ + $(IPU_PSYSLIB_ROOT_REL)/routing_bitmap/src/ia_css_rbm_manifest.o \ + $(IPU_PSYSLIB_ROOT_REL)/syscom/src/ia_css_syscom.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal.o \ + $(IPU_PSYSLIB_ROOT_REL)/vied_parameters/src/ia_css_terminal_manifest.o \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.isyslib b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.isyslib new file mode 100644 index 000000000000..f0b540d78d2b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.isyslib @@ -0,0 +1,47 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +PROGRAMS = isys_fw +SYSTEM = input_system_system +IPU_ISYSLIB_ROOT_REL = ipu4p-css/lib2600 +IPU_ISYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) + +include $(srcpath)/$(src)/ipu4p-css/Makefile.ipu4pisys_inc +include $(srcpath)/$(src)/ipu4p-css/Makefile.ipu4pisys_src + +# +# copy wrapper here only for isys usage, psys would use the original one +# +$(shell cp -f $(srcpath)/$(src)/../ipu-wrapper.c $(srcpath)/$(src)/ipu4p-css/ipu-wrapper.c) + +intel-ipu4p-isys-csslib-objs := \ + ipu4p-css/libintel-ipu4p.o \ + $(IPU_ISYSLIB_SRC) + +ifeq ($(CONFIG_VIDEO_INTEL_IPU), m) +intel-ipu4p-isys-csslib-objs += ipu4p-css/ipu-wrapper.o +endif +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-isys-csslib.o + +INCLUDES := -I$(srcpath)/$(src)/$(IPU_ISYSLIB_ROOT_REL) \ + -I$(srcpath)/$(src) \ + $(IPU_ISYSLIB_INC) + +DEFINES:= -D__HOST__ -D__KERNEL__ -DISYS_FPGA -DPSYS_FPGA + +DEFINES += -DSSID=1 +DEFINES += -DMMID=1 +DEFINES += -DPROGNAME=isys_fw +DEFINES += -DPROGMAP=\"isys_fw.map.h\" +DEFINES += -DSUBSYSTEM_INCLUDE=\ +DEFINES += -DCELL=input_system_unis_logic_sp_control_tile_sp +DEFINES += -DSPMAIN=isys_fw +DEFINES += -DRUN_INTEGRATION +DEFINES += -DDEBUG_SP_NCI +DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 +DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=0 +DEFINES += -DHRT_USE_VIR_ADDRS +DEFINES += -DHRT_HW + +ccflags-y += $(INCLUDES) $(DEFINES) -fno-common diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.psyslib b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.psyslib new file mode 100644 index 000000000000..dc43b771c27c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/Makefile.psyslib @@ -0,0 +1,15 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +# note: this file only defines INCLUDES paths for lib2600psys +include $(srcpath)/$(src)/ipu4p-css/Makefile.ipu4ppsys_inc + +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/ipu4p-css/lib2600psys/lib +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL=1 + +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) + +obj-$(CONFIG_VIDEO_INTEL_IPU) += ipu4p-css/lib2600psys/ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/ia_css_fw_pkg_release.h new file mode 100644 index 000000000000..cb20a688b7c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20180615 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/buffer.mk new file mode 100644 index 000000000000..c00a1133b440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_access.h new file mode 100644 index 000000000000..e5fe647742c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_type.h new file mode 100644 index 000000000000..de51f2394158 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 000000000000..2530297e8e36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,25 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 000000000000..b8e7a6ac4648 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 000000000000..d3d01353ce43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 000000000000..a8c0f9e8554e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,31 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 000000000000..0299fc3b7eb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_return_token.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_return_token.h new file mode 100644 index 000000000000..440161d2f32b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_return_token.h @@ -0,0 +1,54 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RETURN_TOKEN_H +#define __IA_CSS_RETURN_TOKEN_H + +#include "storage_class.h" +#include "assert_support.h" /* For CT_ASSERT */ + +/* ia_css_return_token: data item of exacly 8 bytes (64 bits) + * which can be used to pass a return token back to the host +*/ +typedef unsigned long long ia_css_return_token; + +STORAGE_CLASS_INLINE void +ia_css_return_token_copy(ia_css_return_token *to, + const ia_css_return_token *from) +{ + /* copy a return token on VIED processor */ + int *dst = (int *)to; + int *src = (int *)from; + + dst[0] = src[0]; + dst[1] = src[1]; +} + +STORAGE_CLASS_INLINE void +ia_css_return_token_zero(ia_css_return_token *to) +{ + /* zero return token on VIED processor */ + int *dst = (int *)to; + + dst[0] = 0; + dst[1] = 0; +} + +STORAGE_CLASS_INLINE void _check_return_token_size(void) +{ + CT_ASSERT(sizeof(int) == 4); + CT_ASSERT(sizeof(ia_css_return_token) == 8); +} + +#endif /* __IA_CSS_RETURN_TOKEN_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 000000000000..558ec679f98a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 000000000000..ff62914f99dc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/buffer_access.c new file mode 100644 index 000000000000..83cbda5a9ff5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/buffer_access.c @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 000000000000..3828b186ddac --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 000000000000..0f99a06e9a89 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 000000000000..2bd754062a0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,185 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 000000000000..892dcbd49825 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,182 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 000000000000..1041bd07721b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/cell.mk new file mode 100644 index 000000000000..fa5e65022601 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/interface/ia_css_cell.h new file mode 100644 index 000000000000..3fac3c791b6e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/src/ia_css_cell_impl.h new file mode 100644 index 000000000000..60b2e234da1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/isys/subsystem_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/isys/subsystem_cnlB0.mk new file mode 100644 index 000000000000..d02690b384d2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/isys/subsystem_cnlB0.mk @@ -0,0 +1,78 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of ISYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +############################################################################ +# FIRMWARE RELATED VARIABLES +############################################################################ + +# Activate loading params and storing stats DDR<->REGs with DMA +ISYS_USE_ISA_DMA = 1 +#DMA does not work with AF due to a known bug +DISABLE_AF_STAT_DMA = 1 +# Used in ISA module +ISYS_ISL_DPC_DPC_V2 = 1 + +# Specification for Isys server's fixed globals' locations +REGMEM_OFFSET = 0 # Starting from 0 +REGMEM_SECURE_OFFSET = 4096 +REGMEM_SIZE = 36 +REGMEM_WORD_BYTES = 4 +FW_LOAD_NO_OF_REQUEST_OFFSET = 144 # Taken from REGMEM_OFFSET + REGMEM_SIZE*REGMEM_WORD_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 +# Total Used (@ REGMEM_OFFSET) = 148 # FW_LOAD_NO_OF_REQUEST_OFFSET + FW_LOAD_NO_OF_REQUEST_SIZE_BYTES +# Total Used (@ REGMEM_SECURE_OFFSET) = 144 # FW_LOAD_NO_OF_REQUEST_OFFSET + +# Workarounds: + +# This WA is not to pipeline store frame commands for SID processors that control a Str2Vec (ISA output) +WA_HSD1304553438 = 1 + +# FW workaround for HSD 1404347241. Disable clock gating for CSI2 DPHY Receiver ports +# This is also specified in the CNLB0 clock gating document: +# https://sharepoint.ger.ith.intel.com/sites/ICG_Arch/Shared%20Documents/ +# IPU%20Specs/IPU4-P/HAS/CNL%20B0%20clock%20gating%20registers.xlsx +DISABLE_CSI2_RX_DPHY_CLK_GATE = 1 + +# Larger than specified frames that complete mid-line +WA_HSD1209062354 = 0 + +# WA to disable clock gating for the devices in the CSI receivers needed for using the mipi_pkt_gen device +WA_HSD1805168877 = 0 + +# Support IBUF soft-reset at stream start +SOFT_RESET_IBUF_STREAM_START_SUPPORT = 0 + +############################################################################ +# TESTING RELATED VARIABLES +############################################################################ + +# Cannot remove this define +# Used in mipi_capture, isys_utils.mk, and stream_controller.mk +ISYS_DISABLE_VERIFY_RECEIVED_SOF_EOF = 0 + +ISYS_ACCESS_BLOCKER_VERSION = v1 + +HAS_SPC = 1 + +# Support dual command context for VTIO - concurrent secure and non-secure streams +ISYS_HAS_DUAL_CMD_CTX_SUPPORT = 1 + +AB_CONFIG_ARRAY_SIZE = 50 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/system_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/system_cnlB0.mk new file mode 100644 index 000000000000..667282b519c4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/config/system_cnlB0.mk @@ -0,0 +1,96 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +#--- DEFINES REQUIRED TO COMPILE USING LLVM --- +# Enable LLVM/Volcano for IPU4P, SPs only. +VOLCANO_IPU4P = 1 +VOLCANO_SP2601 = 1 +#---------------------------------------------- + +# enable NO_ALIAS for LLVM +ENABLE_NO_ALIAS_FOR_LLVM = 1 + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = ipu_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA with PAF and DPC-Pext support for IPU4P-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 1 +HAS_DPC_PEXT_IN_ISYS_ISL = 1 +HAS_PMA_IF = 1 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v2 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v2 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v3 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = CNL_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v2 + +HAS_ACC_CLUSTER_PAF_PAL = 1 +HAS_ACC_CLUSTER_PEXT_PAL = 1 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = cnlB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cpd_binary/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cpd_binary/ia_css_fw_pkg_release.h new file mode 100644 index 000000000000..cb20a688b7c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/cpd_binary/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20180615 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/device_access.mk new file mode 100644 index 000000000000..1629d9af803b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_cmem.h new file mode 100644 index 000000000000..3dc47c29fcab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem.h new file mode 100644 index 000000000000..de2b94d8af54 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 000000000000..57aab3323c73 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_cmem_host.h new file mode 100644 index 000000000000..22799e67214c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 000000000000..adc178b75059 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_host.h new file mode 100644 index 000000000000..d94991fc1114 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 000000000000..5102f6e44d2f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 000000000000..e6e1e9dcbe80 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 000000000000..481b0504a237 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 000000000000..63397dc0b7fe --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 000000000000..72caed3eef0c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_devices.h new file mode 100644 index 000000000000..274c9518fd3d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_devices.h @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +/* define cell instances in ISYS */ + +#define SPC0_CELL input_system_unis_logic_sp_control_tile_sp + +enum ipu_device_isys_cell_id { + SPC0 +}; +#define NUM_CELLS (SPC0 + 1) + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_defs.h new file mode 100644 index 000000000000..d7841599aaf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,23 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x0 +#define SPC0_DMEM_CBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DBUS_ADDRESS 0x8000 +#define SPC0_DMEM_DMA_M0_ADDRESS 0x1010000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_impl.h new file mode 100644 index 000000000000..f350ae74b94d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/isys/cnlB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,57 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spc0_databus_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* regs not accessible from DBUS */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const struct ipu_device_cell_properties_s +ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_databus_mem_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 0 /* SPC0 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 000000000000..430295cd9d94 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 000000000000..b1ffbf7ea21f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 000000000000..73062e9db87b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 000000000000..24ad04fe8720 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 000000000000..cd508f05ed40 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 000000000000..3a7b333d3bf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +/* A strange problem with hivecc compiler which is described + * here https://icggerrit.ir.intel.com/#/c/51630/1 forces this + * enum to be explicitly initialized for the moment + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h new file mode 100644 index 000000000000..5e47fe7026bd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isys_fw_bridged_types.h @@ -0,0 +1,402 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_FW_BRIDGED_TYPES_H +#define __IA_CSS_ISYS_FW_BRIDGED_TYPES_H + +#include "platform_support.h" + +#include "ia_css_isysapi_fw_types.h" + +/** + * struct ia_css_isys_buffer_partition_comm - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each + * virtual stream + */ +struct ia_css_isys_buffer_partition_comm { + aligned_uint32(unsigned int, num_gda_pages[STREAM_ID_MAX]); +}; + +/** + * struct ia_css_isys_fw_config - contains the parts from + * ia_css_isys_device_cfg_data + * we need to transfer to the cell + * @num_send_queues: Number of send queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + * @num_recv_queues: Number of receive queues per queue + * type(N_IA_CSS_ISYS_QUEUE_TYPE) + */ +struct ia_css_isys_fw_config { + aligned_struct(struct ia_css_isys_buffer_partition_comm, + buffer_partition); + aligned_uint32(unsigned int, + num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); + aligned_uint32(unsigned int, + num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]); +}; + +/** + * struct ia_css_isys_resolution_comm: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution_comm { + aligned_uint32(unsigned int, width); + aligned_uint32(unsigned int, height); +}; + +/** + * struct ia_css_isys_output_pin_payload_comm + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compress: Request frame compression (1), or not (0) + * This must be the same as ia_css_isys_output_pin_info_comm::reserve_compression + */ +struct ia_css_isys_output_pin_payload_comm { + aligned_uint64(ia_css_return_token, out_buf_id); + aligned_uint32(ia_css_output_buffer_css_address, addr); + aligned_uint32(unsigned int, compress); +}; + +/** + * struct ia_css_isys_output_pin_info_comm + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @watermark_in_lines: pin watermark level in lines + * @payload_buf_size: Size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + * @send_irq: assert if pin event should trigger irq + * @pt: pin type + * @ft: frame format type + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + */ +struct ia_css_isys_output_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, output_res); + aligned_uint32(unsigned int, stride); + aligned_uint32(unsigned int, watermark_in_lines); + aligned_uint32(unsigned int, payload_buf_size); + aligned_uint8(unsigned int, send_irq); + aligned_uint8(unsigned int, input_pin_id); + aligned_uint8(enum ia_css_isys_pin_type, pt); + aligned_uint8(enum ia_css_isys_frame_format_type, ft); + aligned_uint8(enum ia_css_isys_link_id, link_id); + aligned_uint8(unsigned int, reserve_compression); +}; + +/** + * struct ia_css_isys_param_pin_comm + * @param_buf_id: Points to param port buffer - buffer identifier + * @addr: Points to param pin buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin_comm { + aligned_uint64(ia_css_return_token, param_buf_id); + aligned_uint32(ia_css_input_buffer_css_address, addr); +}; + +/** + * struct ia_css_isys_input_pin_info_comm + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * hdiscarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @bits_per_pix: native bits per pixel + * @dt_rename: mapped_dt + */ +struct ia_css_isys_input_pin_info_comm { + aligned_struct(struct ia_css_isys_resolution_comm, input_res); + aligned_uint8(enum ia_css_isys_mipi_data_type, dt); + aligned_uint8(enum ia_css_isys_mipi_store_mode, mipi_store_mode); + aligned_uint8(unsigned int, bits_per_pix); + aligned_uint8(unsigned int, mapped_dt); +}; + +/** + * ISA configuration fields, definition and macros + */ +#define ISA_CFG_FIELD_BLC_EN_LEN 1 +#define ISA_CFG_FIELD_BLC_EN_SHIFT 0 + +#define ISA_CFG_FIELD_LSC_EN_LEN 1 +#define ISA_CFG_FIELD_LSC_EN_SHIFT 1 + +#define ISA_CFG_FIELD_DPC_EN_LEN 1 +#define ISA_CFG_FIELD_DPC_EN_SHIFT 2 + +#define ISA_CFG_FIELD_DOWNSCALER_EN_LEN 1 +#define ISA_CFG_FIELD_DOWNSCALER_EN_SHIFT 3 + +#define ISA_CFG_FIELD_AWB_EN_LEN 1 +#define ISA_CFG_FIELD_AWB_EN_SHIFT 4 + +#define ISA_CFG_FIELD_AF_EN_LEN 1 +#define ISA_CFG_FIELD_AF_EN_SHIFT 5 + +#define ISA_CFG_FIELD_AE_EN_LEN 1 +#define ISA_CFG_FIELD_AE_EN_SHIFT 6 + +#define ISA_CFG_FIELD_PAF_TYPE_LEN 8 +#define ISA_CFG_FIELD_PAF_TYPE_SHIFT 7 + +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_IRQ_STATS_READY_SHIFT 15 + +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_LEN 1 +#define ISA_CFG_FIELD_SEND_RESP_STATS_READY_SHIFT 16 + +/* Helper macros */ +#define ISA_CFG_GET_MASK_FROM_LEN(len) ((1 << (len)) - 1) +#define ISA_CFG_GET_MASK_FROM_TAG(tag) \ + (ISA_CFG_GET_MASK_FROM_LEN(ISA_CFG_FIELD_##tag##_LEN)) +#define ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + (ISA_CFG_FIELD_##tag##_SHIFT) +/* Get/Set macros */ +#define ISA_CFG_FIELD_GET(tag, word) \ + ( \ + ((word) >> (ISA_CFG_GET_SHIFT_FROM_TAG(tag))) &\ + ISA_CFG_GET_MASK_FROM_TAG(tag) \ + ) +#define ISA_CFG_FIELD_SET(tag, word, value) \ + word |= ( \ + ((value) & ISA_CFG_GET_MASK_FROM_TAG(tag)) << \ + ISA_CFG_GET_SHIFT_FROM_TAG(tag) \ + ) + +/** + * struct ia_css_isys_isa_cfg_comm. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg_comm { + aligned_struct(struct ia_css_isys_resolution_comm, + isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]); + aligned_uint32(/* multi-field packing */, cfg_fields); +}; + + /** + * struct ia_css_isys_cropping_comm - cropping coordinates + */ +struct ia_css_isys_cropping_comm { + aligned_int32(int, top_offset); + aligned_int32(int, left_offset); + aligned_int32(int, bottom_offset); + aligned_int32(int, right_offset); +}; + + /** + * struct ia_css_isys_stream_cfg_data_comm + * ISYS stream configuration data structure + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @input_pins: input pin descriptors + * @output_pins: output pin descriptors + * @compfmt: de-compression setting for User Defined Data + * @nof_input_pins: number of input pins + * @nof_output_pins: number of output pins + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + */ +struct ia_css_isys_stream_cfg_data_comm { + aligned_struct(struct ia_css_isys_isa_cfg_comm, isa_cfg); + aligned_struct(struct ia_css_isys_cropping_comm, + crop[N_IA_CSS_ISYS_CROPPING_LOCATION]); + aligned_struct(struct ia_css_isys_input_pin_info_comm, + input_pins[MAX_IPINS]); + aligned_struct(struct ia_css_isys_output_pin_info_comm, + output_pins[MAX_OPINS]); + aligned_uint32(unsigned int, compfmt); + aligned_uint8(unsigned int, nof_input_pins); + aligned_uint8(unsigned int, nof_output_pins); + aligned_uint8(unsigned int, send_irq_sof_discarded); + aligned_uint8(unsigned int, send_irq_eof_discarded); + aligned_uint8(unsigned int, send_resp_sof_discarded); + aligned_uint8(unsigned int, send_resp_eof_discarded); + aligned_uint8(enum ia_css_isys_stream_source, src); + aligned_uint8(enum ia_css_isys_mipi_vc, vc); + aligned_uint8(enum ia_css_isys_isl_use, isl_use); +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send the + * response + * - if '0' the send_resp_sof will determine whether to send the + * response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send the + * response + * - if '0' the send_resp_eof will determine whether to send the + * response + * @send_resp_sof: send response for frame sof detected, used only when + * send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, used only when + * send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set_comm { + aligned_struct(struct ia_css_isys_output_pin_payload_comm, + output_pins[MAX_OPINS]); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_uint8(unsigned int, send_irq_sof); + aligned_uint8(unsigned int, send_irq_eof); + aligned_uint8(unsigned int, send_irq_capture_ack); + aligned_uint8(unsigned int, send_irq_capture_done); + aligned_uint8(unsigned int, send_resp_sof); + aligned_uint8(unsigned int, send_resp_eof); + aligned_uint8(unsigned int, frame_counter); +}; + +/** + * struct ia_css_isys_error_info_comm + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_error_info_comm { + aligned_enum(enum ia_css_isys_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_resp_info_comm + * @pin: this var is only valid for pin event related responses, + * contains pin addresses + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @error_info: error information from the FW + * @timestamp: Time information for event if available + * @stream_handle: stream id the response corresponds to + * @type: response type + * @pin_id: pin id that the pin payload corresponds to + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED, + * @written_direct: indicates if frame was written direct (online mode) or not. + * + */ + +struct ia_css_isys_resp_info_comm { + aligned_uint64(ia_css_return_token, buf_id); /* Used internally only */ + aligned_struct(struct ia_css_isys_output_pin_payload_comm, pin); + aligned_struct(struct ia_css_isys_param_pin_comm, process_group_light); + aligned_struct(struct ia_css_isys_error_info_comm, error_info); + aligned_uint32(unsigned int, timestamp[2]); + aligned_uint8(unsigned int, stream_handle); + aligned_uint8(enum ia_css_isys_resp_type, type); + aligned_uint8(unsigned int, pin_id); + aligned_uint8(unsigned int, acc_id); + aligned_uint8(unsigned int, frame_counter); + aligned_uint8(unsigned int, written_direct); +}; + +/** + * struct ia_css_isys_proxy_error_info_comm + * @proxy_error: error code if something went wrong + * @proxy_error_details: depending on error code, it may contain additional + * error info + */ +struct ia_css_isys_proxy_error_info_comm { + aligned_enum(enum ia_css_proxy_error, error); + aligned_uint32(unsigned int, error_details); +}; + +/** + * struct ia_css_isys_proxy_resp_info_comm + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error_info: details in struct definition + */ +struct ia_css_isys_proxy_resp_info_comm { + aligned_uint32(uint32_t, request_id); + aligned_struct(struct ia_css_isys_proxy_error_info_comm, error_info); +}; + +/** + * struct ia_css_proxy_write_queue_token + * @request_id: update id for the specific proxy write request + * @region_index: Region id for the proxy write request + * @offset: Offset of the write request according to the base address of the + * region + * @value: Value that is requested to be written with the proxy write request + */ +struct ia_css_proxy_write_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +/* From here on type defines not coming from the ISYSAPI interface */ + +/** + * struct resp_queue_token + */ +struct resp_queue_token { + aligned_struct(struct ia_css_isys_resp_info_comm, resp_info); +}; + +/** + * struct send_queue_token + */ +struct send_queue_token { + aligned_uint64(ia_css_return_token, buf_handle); + aligned_uint32(ia_css_input_buffer_css_address, payload); + aligned_uint16(enum ia_css_isys_send_type, send_type); + aligned_uint16(unsigned int, stream_id); +}; + +/** + * struct proxy_resp_queue_token + */ +struct proxy_resp_queue_token { + aligned_struct(struct ia_css_isys_proxy_resp_info_comm, + proxy_resp_info); +}; + +/** + * struct proxy_send_queue_token + */ +struct proxy_send_queue_token { + aligned_uint32(uint32_t, request_id); + aligned_uint32(uint32_t, region_index); + aligned_uint32(uint32_t, offset); + aligned_uint32(uint32_t, value); +}; + +#endif /* __IA_CSS_ISYS_FW_BRIDGED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi.h new file mode 100644 index 000000000000..5f10f72c0974 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi.h @@ -0,0 +1,326 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_H +#define __IA_CSS_ISYSAPI_H + +/** + * errno.h specified error codes to be used + * URL:http://man7.org/linux/man-pages/man3/errno.3.html> + */ + + +/* The following is needed for the function arguments */ +#include "ia_css_isysapi_types.h" + +/* To define the HANDLE */ +#include "type_support.h" + + +/** + * ia_css_isys_device_open() - configure ISYS device + * @ context : device handle output parameter + * @config: device configuration data struct ptr as input parameter, + * read only by css fw until function return + * Ownership, ISYS will only access read my_device during fct call + * Prepares and Sends to PG server (SP) the syscom and isys context + * Executes the host level 0 and 1 boot sequence and starts the PG server (SP) + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +extern int ia_css_isys_context_store_dmem( + const HANDLE *context, + const struct ia_css_isys_device_cfg_data *config +); +extern bool ia_css_isys_ab_spc_ready( + HANDLE *context +); +extern int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config +); +#else +extern int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config +); +#endif + +/** + * ia_css_isys_device_open_ready() - Complete ISYS device configuration + * @ context : device handle output parameter + * read only by css fw until function return + * Requires the boot failure to be completed before it can return + * successfully (includes syscom and isys context) + * Initialise Host/ISYS messaging queues + * Must be called multiple times until it succeeds or it is determined by + * the driver that the boot seuqence has failed. + * All streams must be stopped when calling ia_css_isys_device_open() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_open_ready( + HANDLE context +); + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + * @ stream_handle: stream handle + * @ stream_cfg: stream configuration data struct pointer, which is + * "read only" by ISYS until function return + * ownership, ISYS will only read access stream_cfg during fct call + * Pre-conditions: + * Any Isys/Ssys interface changes must call ia_css_isys_stream_open() + * Post-condition: + * On successful call, ISYS hardware resource (IBFctrl, ISL, DMAs) + * are acquired and ISYS server is able to handle stream specific commands + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_stream_close() - close virtual stream + * @ stream_handle: stream identifier + * release ISYS resources by freeing up stream HW resources + * output pin buffers ownership is returned to the driver + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + * @ stream_handle: stream identifier + * @next_frame: + * if next_frame != NULL: apply next_frame + * settings asynchronously and start stream + * This mode ensures that the first frame is captured + * and thus a minimal start up latency + * (preconditions: sensor streaming must be switched off) + * + * if next_frame == NULL: sensor can be in a streaming state, + * all capture indicates commands will be + * processed synchronously (e.g. on mipi SOF events) + * + * To be called once ia_css_isys_stream_open() successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_start() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechansim + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + * @ stream_handle: stream identifier + * stop both accepting new commands and processing + * submitted capture indication commands + * Support for Secure Touch + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + * @ stream_handle: stream identifier + * stop accepting commands, but process + * the already submitted capture indicates + * Precondition: stream must be started + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle +); + +/** + * ia_css_isys_stream_capture_indication() + * captures "next frame" on stream_handle + * @ stream_handle: stream identifier + * @ next_frame: frame pin payloads are provided atomically + * purpose: stream capture new frame command, Successfull calls will + * result in frame output pins being captured + * + * To be called once ia_css_isys_stream_start() is successly called + * On success, the stream's HW resources are in active state + * + * Object ownership: During this function call, + * next_frame struct must be read but not modified by the ISYS, + * and in addition the driver is not allowed to modify it + * on function exit next_frame ownership is returned to + * the driver and is no longer accesses by iSYS + * next_frame contains a collection of + * ia_css_isys_output_pin * and ia_css_isys_input_pin * + * which point to the frame's "output/input pin info & data buffers", + * + * Upon the ia_css_isys_stream_capture_indication() call, + * ia_css_isys_output_pin* or ia_css_isys_input_pin* + * will now be owned by the ISYS + * these ptr will enable runtime/dynamic ISYS configuration and also + * to store and write captured payload data + * at the address specified in ia_css_isys_output_pin_payload + * These ptrs should no longer be accessed by any other + * code until (ia_css_isys_output_pin) gets handed + * back to the driver via the response mechanism + * ia_css_isys_stream_handle_response() + * the driver is responsible for providing valid + * ia_css_isys_output_pin* or ia_css_isys_output_pin* + * Pointers set to NULL will simply not be used by the ISYS, and this + * refers specifically the following cases: + * - output pins from SOC path if the same datatype is also passed into ISAPF + * path or it has active MIPI output (not NULL) + * - full resolution pin from ISA (but not when bypassing ISA) + * - scaled pin from ISA (bypassing ISA for scaled pin is impossible) + * - output pins from MIPI path but only when the same datatype is also + * either forwarded to the ISAPF path based on the stream configuration + * (it is ok if the second output pin of this datatype is also skipped) + * or it has an active SOC output (not NULL) + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + * @received_response: provides response info from the + * "next response element" from ISYS server + * received_response will be written to during the fct call and + * can be read by the drv once fct is returned + * + * purpose: Allows the client to handle received ISYS responses + * Upon an IRQ event, the driver will call ia_css_isys_stream_handle_response() + * until the queue is emptied + * Responses returning IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY to the driver will + * hand back ia_css_isys_output_pin ownership to the drv + * ISYS FW will not write/read access ia_css_isys_output_pin + * once it belongs to the driver + * Pre-conditions: ISYS client must have sent a CMDs to ISYS srv + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response +); + +/** + * ia_css_isys_device_close() - close ISYS device + * @context : device handle output parameter + * Purpose: Request for the cell to close + * All streams must be stopped when calling ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +extern int ia_css_isys_context_destroy( + HANDLE context +); +extern void ia_css_isys_device_close( + void +); +#else +extern int ia_css_isys_device_close( + HANDLE context +); +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + * @context : device handle output parameter + * @force: forces release or verifies the state before releasing + * Purpose: Free context forcibly or not + * Must be called after ia_css_isys_device_close() + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_device_release( + HANDLE context, + unsigned int force +); + +/** + * ia_css_isys_proxy_write_req() - issue a isys proxy write request + * @context : device handle output parameter + * Purpose: Issues a write request for the regions that are exposed + * by proxy interface + * Can be called any time between ia_css_isys_device_open + * ia_css_isys_device_close + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val +); + +/** + * ia_css_isys_proxy_handle_write_response() + * - Handles isys proxy write request responses + * @context : device handle output parameter + * Purpose: Handling the responses that are created by FW upon the completion + * proxy interface write request + * + * Return: int type error code (errno.h) + */ +extern int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response +); + +#endif /* __IA_CSS_ISYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h new file mode 100644 index 000000000000..938f726d1cfb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_types.h @@ -0,0 +1,512 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_TYPES_H +#define __IA_CSS_ISYSAPI_FW_TYPES_H + + +/* Max number of Input/Output Pins */ +#define MAX_IPINS (4) +/* worst case is ISA use where a single input pin produces: +* Mipi output, NS Pixel Output, and Scaled Pixel Output. +* This is how the 2 is calculated +*/ +#define MAX_OPINS ((MAX_IPINS) + 2) + +/* Max number of supported virtual streams */ +#define STREAM_ID_MAX (8) + +/* Aligned with the approach of having one dedicated per stream */ +#define N_MAX_MSG_SEND_QUEUES (STREAM_ID_MAX) +/* Single return queue for all streams/commands type */ +#define N_MAX_MSG_RECV_QUEUES (1) +/* Single device queue for high priority commands (bypass in-order queue) */ +#define N_MAX_DEV_SEND_QUEUES (1) +/* Single dedicated send queue for proxy interface */ +#define N_MAX_PROXY_SEND_QUEUES (1) +/* Single dedicated recv queue for proxy interface */ +#define N_MAX_PROXY_RECV_QUEUES (1) +/* Send queues layout */ +#define BASE_PROXY_SEND_QUEUES (0) +#define BASE_DEV_SEND_QUEUES (BASE_PROXY_SEND_QUEUES + N_MAX_PROXY_SEND_QUEUES) +#define BASE_MSG_SEND_QUEUES (BASE_DEV_SEND_QUEUES + N_MAX_DEV_SEND_QUEUES) +#define N_MAX_SEND_QUEUES (BASE_MSG_SEND_QUEUES + N_MAX_MSG_SEND_QUEUES) +/* Recv queues layout */ +#define BASE_PROXY_RECV_QUEUES (0) +#define BASE_MSG_RECV_QUEUES (BASE_PROXY_RECV_QUEUES + N_MAX_PROXY_RECV_QUEUES) +#define N_MAX_RECV_QUEUES (BASE_MSG_RECV_QUEUES + N_MAX_MSG_RECV_QUEUES) + +#define MAX_QUEUE_SIZE (256) +#define MIN_QUEUE_SIZE (1) + +/* Consider 1 slot per stream since driver is not expected to pipeline + * device commands for the same stream */ +#define DEV_SEND_QUEUE_SIZE (STREAM_ID_MAX) + +/* Max number of supported SRAM buffer partitions */ +/* It refers to the size of stream partitions */ +/* These partitions are further subpartitioned internally */ +/* by the FW, but by declaring statically the stream */ +/* partitions we solve the buffer fragmentation issue */ +#define NOF_SRAM_BLOCKS_MAX (STREAM_ID_MAX) + +/* Max number of supported input pins routed in ISL */ +#define MAX_IPINS_IN_ISL (2) + +/* Max number of planes for frame formats supported by the FW */ +#define PIN_PLANES_MAX (4) + +/** + * enum ia_css_isys_resp_type + */ +enum ia_css_isys_resp_type { + IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, + IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF, + IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, + IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED, + IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED, + IA_CSS_ISYS_RESP_TYPE_STATS_DATA_READY, + N_IA_CSS_ISYS_RESP_TYPE +}; + +/** + * enum ia_css_isys_send_type + */ +enum ia_css_isys_send_type { + IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN = 0, + IA_CSS_ISYS_SEND_TYPE_STREAM_START, + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE, + IA_CSS_ISYS_SEND_TYPE_STREAM_STOP, + IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH, + IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE, + N_IA_CSS_ISYS_SEND_TYPE +}; + +/** + * enum ia_css_isys_queue_type + */ +enum ia_css_isys_queue_type { + IA_CSS_ISYS_QUEUE_TYPE_PROXY = 0, + IA_CSS_ISYS_QUEUE_TYPE_DEV, + IA_CSS_ISYS_QUEUE_TYPE_MSG, + N_IA_CSS_ISYS_QUEUE_TYPE +}; + +/** + * enum ia_css_isys_stream_source: Specifies a source for a stream + */ +enum ia_css_isys_stream_source { + IA_CSS_ISYS_STREAM_SRC_PORT_0 = 0, + IA_CSS_ISYS_STREAM_SRC_PORT_1, + IA_CSS_ISYS_STREAM_SRC_PORT_2, + IA_CSS_ISYS_STREAM_SRC_PORT_3, + IA_CSS_ISYS_STREAM_SRC_PORT_4, + IA_CSS_ISYS_STREAM_SRC_PORT_5, + IA_CSS_ISYS_STREAM_SRC_PORT_6, + IA_CSS_ISYS_STREAM_SRC_PORT_7, + IA_CSS_ISYS_STREAM_SRC_PORT_8, + IA_CSS_ISYS_STREAM_SRC_PORT_9, + IA_CSS_ISYS_STREAM_SRC_PORT_10, + IA_CSS_ISYS_STREAM_SRC_PORT_11, + IA_CSS_ISYS_STREAM_SRC_PORT_12, + IA_CSS_ISYS_STREAM_SRC_PORT_13, + IA_CSS_ISYS_STREAM_SRC_PORT_14, + IA_CSS_ISYS_STREAM_SRC_PORT_15, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_2, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_3, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_4, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_5, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_6, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_7, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_8, + IA_CSS_ISYS_STREAM_SRC_MIPIGEN_9, + N_IA_CSS_ISYS_STREAM_SRC +}; + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_0 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_1 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_2 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_3 + +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTA IA_CSS_ISYS_STREAM_SRC_PORT_4 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_PORTB IA_CSS_ISYS_STREAM_SRC_PORT_5 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IA_CSS_ISYS_STREAM_SRC_PORT_6 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IA_CSS_ISYS_STREAM_SRC_PORT_7 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IA_CSS_ISYS_STREAM_SRC_PORT_8 +#define IA_CSS_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IA_CSS_ISYS_STREAM_SRC_PORT_9 + +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT0 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_0 +#define IA_CSS_ISYS_STREAM_SRC_MIPIGEN_PORT1 IA_CSS_ISYS_STREAM_SRC_MIPIGEN_1 + +/** + * enum ia_css_isys_mipi_vc: MIPI csi2 spec + * supports upto 4 virtual per physical channel + */ +enum ia_css_isys_mipi_vc { + IA_CSS_ISYS_MIPI_VC_0 = 0, + IA_CSS_ISYS_MIPI_VC_1, + IA_CSS_ISYS_MIPI_VC_2, + IA_CSS_ISYS_MIPI_VC_3, + N_IA_CSS_ISYS_MIPI_VC +}; + +/** + * Supported Pixel Frame formats. Expandable if needed + */ +enum ia_css_isys_frame_format_type { + IA_CSS_ISYS_FRAME_FORMAT_NV11 = 0,/* 12 bit YUV 411, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12,/* 12 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_16,/* 16 bit YUV 420, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV12_TILEY,/* 12 bit YUV 420, Intel + proprietary tiled format, + TileY + */ + IA_CSS_ISYS_FRAME_FORMAT_NV16,/* 16 bit YUV 422, Y, UV plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV21,/* 12 bit YUV 420, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_NV61,/* 16 bit YUV 422, Y, VU plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV12,/* 12 bit YUV 420, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YV16,/* 16 bit YUV 422, Y, V, U plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420,/* 12 bit YUV 420, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_10,/* yuv420, 10 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_12,/* yuv420, 12 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_14,/* yuv420, 14 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV420_16,/* yuv420, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422,/* 16 bit YUV 422, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV422_16,/* yuv422, 16 bits per subpixel */ + IA_CSS_ISYS_FRAME_FORMAT_UYVY,/* 16 bit YUV 422, UYVY interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUYV,/* 16 bit YUV 422, YUYV interleaved */ + IA_CSS_ISYS_FRAME_FORMAT_YUV444,/* 24 bit YUV 444, Y, U, V plane */ + IA_CSS_ISYS_FRAME_FORMAT_YUV_LINE,/* Internal format, 2 y lines + followed by a uvinterleaved line + */ + IA_CSS_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */ + IA_CSS_ISYS_FRAME_FORMAT_RGB565,/* 16 bit RGB, 1 plane. Each 3 sub + pixels are packed into one 16 bit + value, 5 bits for R, 6 bits for G + and 5 bits for B. + */ + IA_CSS_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */ + IA_CSS_ISYS_FRAME_FORMAT_RGBA888,/* 32 bit RGBA, 1 plane, + A=Alpha (alpha is unused) + */ + IA_CSS_ISYS_FRAME_FORMAT_QPLANE6,/* Internal, for advanced ISP */ + IA_CSS_ISYS_FRAME_FORMAT_BINARY_8,/* byte stream, used for jpeg. */ + N_IA_CSS_ISYS_FRAME_FORMAT +}; +/* Temporary for driver compatibility */ +#define IA_CSS_ISYS_FRAME_FORMAT_RAW (IA_CSS_ISYS_FRAME_FORMAT_RAW16) + + +/** + * Supported MIPI data type. Keep in sync array in ia_css_isys_private.c + */ +enum ia_css_isys_mipi_data_type { + /** SYNCHRONIZATION SHORT PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, + IA_CSS_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */ + IA_CSS_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */ + /** Reserved 0x04-0x07 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, + /** GENERIC SHORT PACKET DATA TYPES */ + /** They are used to keep the timing information for the + * opening/closing of shutters, triggering of flashes and etc. + */ + /* Generic Short Packet Code 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, + /* Generic Short Packet Code 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, + /* Generic Short Packet Code 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A, + /* Generic Short Packet Code 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B, + /* Generic Short Packet Code 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C, + /* Generic Short Packet Code 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D, + /* Generic Short Packet Code 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E, + /* Generic Short Packet Code 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F, + /** GENERIC LONG PACKET DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_NULL = 0x10, + IA_CSS_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, + /* Embedded 8-bit non Image Data */ + IA_CSS_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, + /** Reserved 0x13-0x17 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, + /** YUV DATA TYPES */ + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, + /* 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, + /* 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A, + /** Reserved 0x1B */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B, + /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C, + /* YUV420 10-bit (Chroma Shifted Pixel Sampling) */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D, + /* UYVY..UVYV, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E, + /* UYVY..UVYV, 10 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F, + /** RGB DATA TYPES */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20, + /* BGR..BGR, 5 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21, + /* BGR..BGR, 5 bits B and R, 6 bits G */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22, + /* BGR..BGR, 6 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23, + /* BGR..BGR, 8 bits per subpixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24, + /** Reserved 0x25-0x27 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, + /** RAW DATA TYPES */ + /* RAW data, 6 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28, + /* RAW data, 7 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29, + /* RAW data, 8 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A, + /* RAW data, 10 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B, + /* RAW data, 12 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C, + /* RAW data, 14 bits per pixel */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D, + /** Reserved 0x2E-2F are used with assigned meaning */ + /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E, + /* Binary byte stream, which is target at JPEG, not specified in + * CSI-MIPI standard + */ + IA_CSS_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F, + /** USER DEFINED 8-BIT DATA TYPES */ + /** For example, the data transmitter (e.g. the SoC sensor) can keep + * the JPEG data as the User Defined Data Type 4 and the MPEG data as + * the User Defined Data Type 7. + */ + /* User defined 8-bit data type 1 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, + /* User defined 8-bit data type 2 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, + /* User defined 8-bit data type 3 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, + /* User defined 8-bit data type 4 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, + /* User defined 8-bit data type 5 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, + /* User defined 8-bit data type 6 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, + /* User defined 8-bit data type 7 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, + /* User defined 8-bit data type 8 */ + IA_CSS_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, + /** Reserved 0x38-0x3F */ + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E, + IA_CSS_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F, + + /* Keep always last and max value */ + N_IA_CSS_ISYS_MIPI_DATA_TYPE = 0x40 +}; + +/** enum ia_css_isys_pin_type: output pin buffer types. + * Buffers can be queued and de-queued to hand them over between IA and ISYS + */ +enum ia_css_isys_pin_type { + /* Captured as MIPI packets */ + IA_CSS_ISYS_PIN_TYPE_MIPI = 0, + /* Captured through the ISApf (with/without ISA) + * and the non-scaled output path + */ + IA_CSS_ISYS_PIN_TYPE_RAW_NS, + /* Captured through the ISApf + ISA and the scaled output path */ + IA_CSS_ISYS_PIN_TYPE_RAW_S, + /* Captured through the SoC path */ + IA_CSS_ISYS_PIN_TYPE_RAW_SOC, + /* Reserved for future use, maybe short packets */ + IA_CSS_ISYS_PIN_TYPE_METADATA_0, + /* Reserved for future use */ + IA_CSS_ISYS_PIN_TYPE_METADATA_1, + /* Legacy (non-PIV2), used for the AWB stats */ + IA_CSS_ISYS_PIN_TYPE_AWB_STATS, + /* Legacy (non-PIV2), used for the AF stats */ + IA_CSS_ISYS_PIN_TYPE_AF_STATS, + /* Legacy (non-PIV2), used for the AE stats */ + IA_CSS_ISYS_PIN_TYPE_HIST_STATS, + /* Used for the PAF FF*/ + IA_CSS_ISYS_PIN_TYPE_PAF_FF, + /* Keep always last and max value */ + N_IA_CSS_ISYS_PIN_TYPE +}; + +/** + * enum ia_css_isys_isl_use. Describes the ISL/ISA use + * (ISAPF path in after BXT A0) + */ +enum ia_css_isys_isl_use { + IA_CSS_ISYS_USE_NO_ISL_NO_ISA = 0, + IA_CSS_ISYS_USE_SINGLE_DUAL_ISL, + IA_CSS_ISYS_USE_SINGLE_ISA, + N_IA_CSS_ISYS_USE +}; + +/** + * enum ia_css_isys_mipi_store_mode. Describes if long MIPI packets reach MIPI + * SRAM with the long packet header or not. + * if not, then only option is to capture it with pin type MIPI. + */ +enum ia_css_isys_mipi_store_mode { + IA_CSS_ISYS_MIPI_STORE_MODE_NORMAL = 0, + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER, + N_IA_CSS_ISYS_MIPI_STORE_MODE +}; + +/** + * enum ia_css_isys_mipi_dt_rename_mode. Describes if long MIPI packets have + * DT with some other DT format. + */ +enum ia_css_isys_mipi_dt_rename_mode { + IA_CSS_ISYS_MIPI_DT_NO_RENAME = 0, + IA_CSS_ISYS_MIPI_DT_RENAMED_MODE, + N_IA_CSS_ISYS_MIPI_DT_MODE +}; + +/** + * enum ia_css_isys_type_paf. Describes the Type of PAF enabled + * (PAF path in after cnlB0) + */ +enum ia_css_isys_type_paf { + /* PAF data not present */ + IA_CSS_ISYS_TYPE_NO_PAF = 0, + /* Type 2 sensor types, PAF coming separately from Image Frame */ + /* PAF data in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_INTERLEAVED_PAF, + /* PAF data in non-interleaved format(LL/RR or RR/LL) */ + IA_CSS_ISYS_TYPE_NON_INTERLEAVED_PAF, + /* Type 3 sensor types , PAF data embedded in Image Frame*/ + /* Frame Embedded PAF in interleaved format(RLRL or LRLR)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_INTERLEAVED_PAF, + /* Frame Embedded PAF non-interleaved format(LL/RR or RR/LL)*/ + IA_CSS_ISYS_TYPE_FRAME_EMB_NON_INTERLEAVED_PAF, + N_IA_CSS_ISYS_TYPE_PAF +}; + +/** + * enum ia_css_isys_cropping_location. Enumerates the cropping locations + * in ISYS + */ +enum ia_css_isys_cropping_location { + /* Cropping executed in ISAPF (mainly), ISAPF preproc (odd column) and + * MIPI STR2MMIO (odd row) + */ + IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA = 0, + /* BXT A0 legacy mode which will never be implemented */ + IA_CSS_ISYS_CROPPING_LOCATION_RESERVED_1, + /* Cropping executed in StreamPifConv in the ISA output for + * RAW_NS pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED, + /* Cropping executed in StreamScaledPifConv in the ISA output for + * RAW_S pin + */ + IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED, + N_IA_CSS_ISYS_CROPPING_LOCATION +}; + +/** + * enum ia_css_isys_resolution_info. Describes the resolution, required to + * setup the various ISA GP registers. + */ +enum ia_css_isys_resolution_info { + /* Scaled ISA output resolution before the + * StreamScaledPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED = 0, + /* Non-Scaled ISA output resolution before the + * StreamPifConv cropping + */ + IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + N_IA_CSS_ISYS_RESOLUTION_INFO +}; + +/** + * enum ia_css_isys_error. Describes the error type detected by the FW + */ +enum ia_css_isys_error { + IA_CSS_ISYS_ERROR_NONE = 0, /* No details */ + IA_CSS_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_HW_CONSISTENCY, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */ + IA_CSS_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */ + IA_CSS_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */ + IA_CSS_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */ + IA_CSS_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */ + IA_CSS_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */ + IA_CSS_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */ + N_IA_CSS_ISYS_ERROR +}; + +/** + * enum ia_css_proxy_error. Describes the error type for the proxy detected by + * the FW + */ +enum ia_css_proxy_error { + IA_CSS_PROXY_ERROR_NONE = 0, + IA_CSS_PROXY_ERROR_INVALID_WRITE_REGION, + IA_CSS_PROXY_ERROR_INVALID_WRITE_OFFSET, + N_IA_CSS_PROXY_ERROR +}; + +#endif /* __IA_CSS_ISYSAPI_FW_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h new file mode 100644 index 000000000000..bc056157cedb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_fw_version.h @@ -0,0 +1,21 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_FW_VERSION_H +#define __IA_CSS_ISYSAPI_FW_VERSION_H + +/* ISYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION ISYS_FIRMWARE_VERSION + +#endif /* __IA_CSS_ISYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h new file mode 100644 index 000000000000..0fb8310d7367 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_defs.h @@ -0,0 +1,114 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H + +#include "ia_css_isysapi_proxy_region_types.h" + +/* + * Definitions for IPU4_B0_PROXY_INT + */ + +#if defined(IPU4_B0_PROXY_INT) + +/** + * enum ipu4_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4B0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4_b0_ia_css_proxy_write_region { + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE = 0, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD, + IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD, + N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4_b0_reg_write_desc[N_IPU4_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, offset */ + {0x64128, /*input_system_csi2_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_0_ERROR_FILL_RATE*/ + {0x65128, /*input_system_csi2_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_1_ERROR_FILL_RATE*/ + {0x66128, /*input_system_csi2_logic_s2m_c_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_2_ERROR_FILL_RATE*/ + {0x67128, /*input_system_csi2_logic_s2m_d_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_3_ERROR_FILL_RATE*/ + {0x6C128, /*input_system_csi2_3ph_logic_s2m_a_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_4_ERROR_FILL_RATE*/ + {0x6C928, /*input_system_csi2_3ph_logic_s2m_b_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_5_ERROR_FILL_RATE*/ + {0x6D128, /*input_system_csi2_3ph_logic_s2m_0_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_6_ERROR_FILL_RATE*/ + {0x6D928, /*input_system_csi2_3ph_logic_s2m_1_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_7_ERROR_FILL_RATE*/ + {0x6E128, /*input_system_csi2_3ph_logic_s2m_2_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_8_ERROR_FILL_RATE*/ + {0x6E928, /*input_system_csi2_3ph_logic_s2m_3_stream2mmio_err_mode_dc_ctrl_reg_id*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_STR2MMIO_MIPI_9_ERROR_FILL_RATE*/ + {0x7800C, /*input_system_unis_logic_gda_irq_urgent_threshold*/ 4}, /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_URGENT_THRESHOLD*/ + {0x78010, /*input_system_unis_logic_gda_irq_critical_threshold*/ 4} /*IPU4_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IRQ_CRITICAL_THRESHOLD*/ +}; + +#endif /*defined(IPU4_B0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_A0_PROXY_INT + */ + +#if defined(IPU4P_A0_PROXY_INT) + +/** + * enum ipu4p_a0_ia_css_proxy_write_region. Provides the list of regions for ipu4pA0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_a0_ia_css_proxy_write_region { + N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION +}; + +#define IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE + +#ifndef IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE +struct ia_css_proxy_write_region_description ipu4p_a0_reg_write_desc[N_IPU4P_A0_IA_CSS_PROXY_WRITE_REGION] = { +} +#endif /*IPU4P_A0_NO_PROXY_WRITE_REGION_AVAILABLE*/ + +#endif /*defined(IPU4P_A0_PROXY_INT)*/ + +/* + * Definitions for IPU4P_B0_PROXY_INT + */ + +#if defined(IPU4P_B0_PROXY_INT) + +/** + * enum ipu4p_b0_ia_css_proxy_write_region. Provides the list of regions for ipu4pB0 that + * can be accessed (for writing purpose) through the proxy interface + */ +enum ipu4p_b0_ia_css_proxy_write_region { + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD = 0, + IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE, + N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION +}; + +struct ia_css_proxy_write_region_description ipu4p_b0_reg_write_desc[N_IPU4P_B0_IA_CSS_PROXY_WRITE_REGION] = { + /* base_addr, max_offset */ + /*input_system_unis_logic_gda_iwake_threshold*/ + {0x78014, 4}, /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_IWAKE_THRESHOLD*/ + /*input_system_unis_logic_gda_enable_iwake*/ + {0x7801C, 4} /*IPU4P_B0_IA_CSS_PROXY_WRITE_REGION_GDA_ENABLE_IWAKE*/ +}; + +#endif /*defined(IPU4P_B0_PROXY_INT)*/ + + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h new file mode 100644 index 000000000000..045f089e5a4c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_proxy_region_types.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H +#define __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H + + +struct ia_css_proxy_write_region_description { + uint32_t base_addr; + uint32_t offset; +}; + +#endif /* __IA_CSS_ISYSAPI_PROXY_REGION_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_types.h new file mode 100644 index 000000000000..481a7dc7b481 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/interface/ia_css_isysapi_types.h @@ -0,0 +1,349 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TYPES_H +#define __IA_CSS_ISYSAPI_TYPES_H + +#include "ia_css_isysapi_fw_types.h" +#include "type_support.h" + +#include "ia_css_return_token.h" +#include "ia_css_output_buffer.h" +#include "ia_css_input_buffer.h" +#include "ia_css_terminal_defs.h" + +/** + * struct ia_css_isys_buffer_partition - buffer partition information + * @num_gda_pages: Number of virtual gda pages available for each virtual stream + */ +struct ia_css_isys_buffer_partition { + unsigned int num_gda_pages[STREAM_ID_MAX]; +}; + +/** + * This should contain the driver specified info for sys + */ +struct ia_css_driver_sys_config { + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues; /* # of MSG send queues */ + unsigned int num_recv_queues; /* # of MSG recv queues */ + unsigned int send_queue_size; /* max # tokens per queue */ + unsigned int recv_queue_size; /* max # tokens per queue */ + + unsigned int icache_prefetch; /* enable prefetching for SPC */ +}; + +/** + * This should contain the driver specified info for proxy write queues + */ +struct ia_css_driver_proxy_config { + /* max # tokens per PROXY send/recv queue. + * Proxy queues are used for write access purpose + */ + unsigned int proxy_write_queue_size; +}; + + /** + * struct ia_css_isys_device_cfg_data - ISYS device configuration data + * @driver_sys + * @buffer_partition: Information required for the virtual SRAM + * space partition of the streams. + * @driver_proxy + * @secure: Driver needs to set 'secure' to indicate the intention + * when invoking ia_css_isys_context_create() in + * HAS_DUAL_CMD_CTX_SUPPORT case. If 'true', it's for + * secure case. + */ +struct ia_css_isys_device_cfg_data { + struct ia_css_driver_sys_config driver_sys; + struct ia_css_isys_buffer_partition buffer_partition; + struct ia_css_driver_proxy_config driver_proxy; + bool secure; + unsigned vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +/** + * struct ia_css_isys_resolution: Generic resolution structure. + * @Width + * @Height + */ +struct ia_css_isys_resolution { + unsigned int width; + unsigned int height; +}; + +/** + * struct ia_css_isys_output_pin_payload + * @out_buf_id: Points to output pin buffer - buffer identifier + * @addr: Points to output pin buffer - CSS Virtual Address + * @compressed: Request frame compression (1), or not (0) + * This must be the same as ia_css_isys_output_pin_info::reserve_compression + */ +struct ia_css_isys_output_pin_payload { + ia_css_return_token out_buf_id; + ia_css_output_buffer_css_address addr; + unsigned int compress; +}; + +/** + * struct ia_css_isys_output_pin_info + * @input_pin_id: input pin id/index which is source of + * the data for this output pin + * @output_res: output pin resolution + * @stride: output stride in Bytes (not valid for statistics) + * @pt: pin type + * @ft: frame format type + * @watermark_in_lines: pin watermark level in lines + * @send_irq: assert if pin event should trigger irq + * @link_id: identifies PPG to connect to, link_id = 0 implies offline + * while link_id > 0 implies buffer_chasing or online mode + * can be entered. + * @reserve_compression: Reserve compression resources for pin. + * @payload_buf_size: Minimum size in Bytes of all buffers that will be supplied for capture + * on this pin (i.e. addressed by ia_css_isys_output_pin_payload::addr) + */ +struct ia_css_isys_output_pin_info { + unsigned int input_pin_id; + struct ia_css_isys_resolution output_res; + unsigned int stride; + enum ia_css_isys_pin_type pt; + enum ia_css_isys_frame_format_type ft; + unsigned int watermark_in_lines; + unsigned int send_irq; + enum ia_css_isys_link_id link_id; + unsigned int reserve_compression; + unsigned int payload_buf_size; +}; + +/** + * struct ia_css_isys_param_pin + * @param_buf_id: Points to param buffer - buffer identifier + * @addr: Points to param buffer - CSS Virtual Address + */ +struct ia_css_isys_param_pin { + ia_css_return_token param_buf_id; + ia_css_input_buffer_css_address addr; +}; + +/** + * struct ia_css_isys_input_pin_info + * @input_res: input resolution + * @dt: mipi data type + * @mipi_store_mode: defines if legacy long packet header will be stored or + * discarded if discarded, output pin pin type for this + * input pin can only be MIPI + * @dt_rename_mode: defines if MIPI data is encapsulated in some other + * data type + * @mapped_dt: Encapsulating in mipi data type(what sensor sends) + */ +struct ia_css_isys_input_pin_info { + struct ia_css_isys_resolution input_res; + enum ia_css_isys_mipi_data_type dt; + enum ia_css_isys_mipi_store_mode mipi_store_mode; + enum ia_css_isys_mipi_dt_rename_mode dt_rename_mode; + enum ia_css_isys_mipi_data_type mapped_dt; +}; + +/** + * struct ia_css_isys_isa_cfg. Describes the ISA cfg + */ +struct ia_css_isys_isa_cfg { + /* Following sets resolution information neeed by the IS GP registers, + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED, + * it is needed when there is RAW_NS pin + * For index IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED, + * it is needed when there is RAW_S pin + */ + struct ia_css_isys_resolution isa_res[N_IA_CSS_ISYS_RESOLUTION_INFO]; + /* acc id 0, set if process required */ + unsigned int blc_enabled; + /* acc id 1, set if process required */ + unsigned int lsc_enabled; + /* acc id 2, set if process required */ + unsigned int dpc_enabled; + /* acc id 3, set if process required */ + unsigned int downscaler_enabled; + /* acc id 4, set if process required */ + unsigned int awb_enabled; + /* acc id 5, set if process required */ + unsigned int af_enabled; + /* acc id 6, set if process required */ + unsigned int ae_enabled; + /* acc id 7, disabled, or type of paf enabled*/ + enum ia_css_isys_type_paf paf_type; + /* Send irq for any statistics buffers which got completed */ + unsigned int send_irq_stats_ready; + /* Send response for any statistics buffers which got completed */ + unsigned int send_resp_stats_ready; +}; + +/** + * struct ia_css_isys_cropping - cropping coordinates + * Left/Top offsets are INCLUDED + * Right/Bottom offsets are EXCLUDED + * Horizontal: [left_offset,right_offset) + * Vertical: [top_offset,bottom_offset) + * Padding is supported + */ +struct ia_css_isys_cropping { + int top_offset; + int left_offset; + int bottom_offset; + int right_offset; +}; + + /** + * struct ia_css_isys_stream_cfg_data + * ISYS stream configuration data structure + * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1 + * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel) + * @isl_use: indicates whether stream requires ISL and how + * @compfmt: de-compression setting for User Defined Data + * @isa_cfg: details about what ACCs are active if ISA is used + * @crop: defines cropping resolution for the + * maximum number of input pins which can be cropped, + * it is directly mapped to the HW devices + * @send_irq_sof_discarded: send irq on discarded frame sof response + * - if '1' it will override the send_resp_sof_discarded and send + * the response + * - if '0' the send_resp_sof_discarded will determine whether to + * send the response + * @send_irq_eof_discarded: send irq on discarded frame eof response + * - if '1' it will override the send_resp_eof_discarded and send + * the response + * - if '0' the send_resp_eof_discarded will determine whether to + * send the response + * @send_resp_sof_discarded: send response for discarded frame sof detected, + * used only when send_irq_sof_discarded is '0' + * @send_resp_eof_discarded: send response for discarded frame eof detected, + * used only when send_irq_eof_discarded is '0' + * @the rest: input/output pin descriptors + */ +struct ia_css_isys_stream_cfg_data { + enum ia_css_isys_stream_source src; + enum ia_css_isys_mipi_vc vc; + enum ia_css_isys_isl_use isl_use; + unsigned int compfmt; + struct ia_css_isys_isa_cfg isa_cfg; + struct ia_css_isys_cropping crop[N_IA_CSS_ISYS_CROPPING_LOCATION]; + unsigned int send_irq_sof_discarded; + unsigned int send_irq_eof_discarded; + unsigned int send_resp_sof_discarded; + unsigned int send_resp_eof_discarded; + unsigned int nof_input_pins; + unsigned int nof_output_pins; + struct ia_css_isys_input_pin_info input_pins[MAX_IPINS]; + struct ia_css_isys_output_pin_info output_pins[MAX_OPINS]; +}; + +/** + * struct ia_css_isys_frame_buff_set - frame buffer set + * @output_pins: output pin addresses + * @process_group_light: process_group_light buffer address + * @send_irq_sof: send irq on frame sof response + * - if '1' it will override the send_resp_sof and send + * the response + * - if '0' the send_resp_sof will determine whether to send + * the response + * @send_irq_eof: send irq on frame eof response + * - if '1' it will override the send_resp_eof and send + * the response + * - if '0' the send_resp_eof will determine whether to send + * the response + * @send_resp_sof: send response for frame sof detected, + * used only when send_irq_sof is '0' + * @send_resp_eof: send response for frame eof detected, + * used only when send_irq_eof is '0' + * @frame_counter: frame number associated with this buffer set. + */ +struct ia_css_isys_frame_buff_set { + struct ia_css_isys_output_pin_payload output_pins[MAX_OPINS]; + struct ia_css_isys_param_pin process_group_light; + unsigned int send_irq_sof; + unsigned int send_irq_eof; + unsigned int send_irq_capture_ack; + unsigned int send_irq_capture_done; + unsigned int send_resp_sof; + unsigned int send_resp_eof; + uint8_t frame_counter; +}; + +/** + * struct ia_css_isys_resp_info + * @type: response type + * @stream_handle: stream id the response corresponds to + * @timestamp: Time information for event if available + * @error: error code if something went wrong + * @error_details: depending on error code, it may contain additional + * error info + * @pin: this var is valid for pin event related responses, + * contains pin addresses + * @pin_id: this var is valid for pin event related responses, + * contains pin id that the pin payload corresponds to + * @process_group_light: this var is valid for stats ready related responses, + * contains process group addresses + * @acc_id: this var is valid for stats ready related responses, + * contains accelerator id that finished producing + * all related statistics + * @frame_counter: valid for STREAM_START_AND_CAPTURE_DONE, + * STREAM_CAPTURE_DONE and STREAM_CAPTURE_DISCARDED + * @written_direct: indicates if frame was written direct (online mode) or to DDR. + */ +struct ia_css_isys_resp_info { + enum ia_css_isys_resp_type type; + unsigned int stream_handle; + unsigned int timestamp[2]; + enum ia_css_isys_error error; + unsigned int error_details; + struct ia_css_isys_output_pin_payload pin; + unsigned int pin_id; + struct ia_css_isys_param_pin process_group_light; + unsigned int acc_id; + uint8_t frame_counter; + uint8_t written_direct; +}; + +/** + * struct ia_css_proxy_write_req_val + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @region_index: region id for the write request + * @offset: Offset to the specific register within the region + * @value: Value to be written to register + */ +struct ia_css_proxy_write_req_val { + uint32_t request_id; + uint32_t region_index; + uint32_t offset; + uint32_t value; +}; + +/** + * struct ia_css_proxy_write_req_resp + * @request_id: Unique identifier for the write request + * (in case multiple write requests are issued for same register) + * @error: error code if something went wrong + * @error_details: error detail includes either offset or region index + * information which caused proxy request to be rejected + * (invalid access request) + */ +struct ia_css_proxy_write_req_resp { + uint32_t request_id; + enum ia_css_proxy_error error; + uint32_t error_details; +}; + + +#endif /* __IA_CSS_ISYSAPI_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/isysapi.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/isysapi.mk new file mode 100644 index 000000000000..0d06298f9acb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/isysapi.mk @@ -0,0 +1,77 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is ISYSAPI + +include $(MODULES_DIR)/config/isys/subsystem_$(IPU_SYSVER).mk + +ISYSAPI_DIR=$${MODULES_DIR}/isysapi + +ISYSAPI_INTERFACE=$(ISYSAPI_DIR)/interface +ISYSAPI_SOURCES=$(ISYSAPI_DIR)/src +ISYSAPI_EXTINCLUDE=$${MODULES_DIR}/support +ISYSAPI_EXTINTERFACE=$${MODULES_DIR}/syscom/interface + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public.c + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_private.c + +# ISYSAPI Trace Log Level = ISYSAPI_TRACE_LOG_LEVEL_NORMAL +# Other options are [ISYSAPI_TRACE_LOG_LEVEL_OFF, ISYSAPI_TRACE_LOG_LEVEL_DEBUG] +ifndef ISYSAPI_TRACE_CONFIG_HOST + ISYSAPI_TRACE_CONFIG_HOST=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif +ifndef ISYSAPI_TRACE_CONFIG_FW + ISYSAPI_TRACE_CONFIG_FW=ISYSAPI_TRACE_LOG_LEVEL_NORMAL +endif + +ISYSAPI_HOST_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_HOST) +ISYSAPI_FW_CPPFLAGS += -DISYSAPI_TRACE_CONFIG=$(ISYSAPI_TRACE_CONFIG_FW) + +ISYSAPI_HOST_FILES += $(ISYSAPI_SOURCES)/ia_css_isys_public_trace.c + +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_HOST_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_HOST_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw.c +ISYSAPI_FW_FILES += $(ISYSAPI_SOURCES)/isys_fw_utils.c + +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_INTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_SOURCES)/$(IPU_SYSVER) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINCLUDE) +ISYSAPI_FW_CPPFLAGS += -I$(ISYSAPI_EXTINTERFACE) +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/systems/ipu_system/dai/include/default_system +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu/dai +ISYSAPI_FW_CPPFLAGS += -I$(HIVESDK)/include/ipu + +ISYSAPI_FW_CPPFLAGS += -DWA_HSD1805168877=$(WA_HSD1805168877) + +ISYSAPI_HOST_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) + +ifeq ($(ISYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +ISYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +ISYSAPI_FW_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(ISYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif + +ifdef AB_CONFIG_ARRAY_SIZE +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=$(AB_CONFIG_ARRAY_SIZE) +else +ISYSAPI_FW_CPPFLAGS += -DAB_CONFIG_ARRAY_SIZE=1 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.c new file mode 100644 index 000000000000..ec92f14ee238 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.c @@ -0,0 +1,981 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isys_private.h" +/* The following is needed for the contained data types */ +#include "ia_css_isys_fw_bridged_types.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_syscom_config.h" +/* + * The following header file is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "ia_css_isysapi_trace.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "storage_class.h" + +#include "ia_css_shared_buffer_cpu.h" + +/* + * defines how many stream cfg host may sent concurrently + * before receiving the stream ack + */ +#define STREAM_CFG_BUFS_PER_MSG_QUEUE (1) +#define NEXT_FRAME_BUFS_PER_MSG_QUEUE \ + (ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] + 4 + 1) +/* + * There is an edge case that host has filled the full queue + * with capture requests (ctx->send_queue_size), + * SP reads and HW-queues all of them (4), + * while in the meantime host continues queueing capture requests + * without checking for responses which SP will have sent with each HW-queue + * capture request (if it does then the 4 is much more improbable to appear, + * but still not impossible). + * After this, host tries to queue an extra capture request + * even though there is no space in the msg queue because msg queue + * is checked at a later point, so +1 is needed + */ + +/* + * A DT is supported assuming when the MIPI packets + * have the same size even when even/odd lines are different, + * and the size is the average per line + */ +#define IA_CSS_UNSUPPORTED_DATA_TYPE (0) +static const uint32_t +ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + N_IA_CSS_ISYS_MIPI_DATA_TYPE] = { + /* + * Remove Prefix "IA_CSS_ISYS_MIPI_DATA_TYPE_" in comments + * to align with Checkpatch 80 characters requirements + * For detailed comments of each field, please refer to + * definition of enum ia_css_isys_mipi_data_type{} in + * isysapi/interface/ia_css_isysapi_fw_types.h + */ + 64, /* [0x00] FRAME_START_CODE */ + 64, /* [0x01] FRAME_END_CODE */ + 64, /* [0x02] LINE_START_CODE Optional */ + 64, /* [0x03] LINE_END_CODE Optional */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x04] RESERVED_0x04 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x05] RESERVED_0x05 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x06] RESERVED_0x06 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x07] RESERVED_0x07 */ + 64, /* [0x08] GENERIC_SHORT1 */ + 64, /* [0x09] GENERIC_SHORT2 */ + 64, /* [0x0A] GENERIC_SHORT3 */ + 64, /* [0x0B] GENERIC_SHORT4 */ + 64, /* [0x0C] GENERIC_SHORT5 */ + 64, /* [0x0D] GENERIC_SHORT6 */ + 64, /* [0x0E] GENERIC_SHORT7 */ + 64, /* [0x0F] GENERIC_SHORT8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x10] NULL To be ignored */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x11] BLANKING_DATA To be ignored */ + 8, /* [0x12] EMBEDDED non Image Data */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x13] RESERVED_0x13 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x14] RESERVED_0x14 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x15] RESERVED_0x15 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x16] RESERVED_0x16 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x17] RESERVED_0x17 */ + 12, /* [0x18] YUV420_8 */ + 15, /* [0x19] YUV420_10 */ + 12, /* [0x1A] YUV420_8_LEGACY */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x1B] RESERVED_0x1B */ + 12, /* [0x1C] YUV420_8_SHIFT */ + 15, /* [0x1D] YUV420_10_SHIFT */ + 16, /* [0x1E] YUV422_8 */ + 20, /* [0x1F] YUV422_10 */ + 16, /* [0x20] RGB_444 */ + 16, /* [0x21] RGB_555 */ + 16, /* [0x22] RGB_565 */ + 18, /* [0x23] RGB_666 */ + 24, /* [0x24] RGB_888 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x25] RESERVED_0x25 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x26] RESERVED_0x26 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x27] RESERVED_0x27 */ + 6, /* [0x28] RAW_6 */ + 7, /* [0x29] RAW_7 */ + 8, /* [0x2A] RAW_8 */ + 10, /* [0x2B] RAW_10 */ + 12, /* [0x2C] RAW_12 */ + 14, /* [0x2D] RAW_14 */ + 16, /* [0x2E] RAW_16 */ + 8, /* [0x2F] BINARY_8 */ + 8, /* [0x30] USER_DEF1 */ + 8, /* [0x31] USER_DEF2 */ + 8, /* [0x32] USER_DEF3 */ + 8, /* [0x33] USER_DEF4 */ + 8, /* [0x34] USER_DEF5 */ + 8, /* [0x35] USER_DEF6 */ + 8, /* [0x36] USER_DEF7 */ + 8, /* [0x37] USER_DEF8 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x38] RESERVED_0x38 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x39] RESERVED_0x39 */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3A] RESERVED_0x3A */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3B] RESERVED_0x3B */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3C] RESERVED_0x3C */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3D] RESERVED_0x3D */ + IA_CSS_UNSUPPORTED_DATA_TYPE, /* [0x3E] RESERVED_0x3E */ + IA_CSS_UNSUPPORTED_DATA_TYPE /* [0x3F] RESERVED_0x3F */ +}; + +STORAGE_CLASS_INLINE int get_stream_cfg_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * STREAM_CFG_BUFS_PER_MSG_QUEUE) + + stream_cfg_buff_counter; +} + +STORAGE_CLASS_INLINE int get_next_frame_buff_slot( + struct ia_css_isys_context *ctx, + int stream_handle, + int next_frame_buff_counter) +{ + NOT_USED(ctx); + return (stream_handle * NEXT_FRAME_BUFS_PER_MSG_QUEUE) + + next_frame_buff_counter; +} + +STORAGE_CLASS_INLINE void free_comm_buff_shared_mem( + struct ia_css_isys_context *ctx, + int stream_handle, + int stream_cfg_buff_counter, + int next_frame_buff_counter) +{ + int buff_slot; + + /* Initialiser is the current value of stream_handle */ + for (; stream_handle >= 0; stream_handle--) { + /* + * Initialiser is the current value of stream_cfg_buff_counter + */ + for (; stream_cfg_buff_counter >= 0; + stream_cfg_buff_counter--) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Set for the next iteration */ + stream_cfg_buff_counter = STREAM_CFG_BUFS_PER_MSG_QUEUE - 1; + /* + * Initialiser is the current value of next_frame_buff_counter + */ + for (; next_frame_buff_counter >= 0; + next_frame_buff_counter--) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, next_frame_buff_counter); + ia_css_shared_buffer_free( + ctx->ssid, ctx->mmid, + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + next_frame_buff_counter = NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1; + } +} + +/* + * ia_css_isys_constr_comm_buff_queue() + */ +int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int stream_cfg_buff_counter; + int next_frame_buff_counter; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + STREAM_CFG_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + verifret(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id != NULL, + EFAULT); + + ctx->isys_comm_buffer_queue.pnext_frame_buff_id = + (ia_css_shared_buffer *) + ia_css_cpu_mem_alloc(ctx-> + num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] * + NEXT_FRAME_BUFS_PER_MSG_QUEUE * + sizeof(ia_css_shared_buffer)); + if (ctx->isys_comm_buffer_queue.pnext_frame_buff_id == NULL) { + ia_css_cpu_mem_free( + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + verifret(0, EFAULT); /* return EFAULT; equivalent */ + } + + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Initialisation needs to happen here for both loops */ + stream_cfg_buff_counter = 0; + next_frame_buff_counter = 0; + + for (; stream_cfg_buff_counter < STREAM_CFG_BUFS_PER_MSG_QUEUE; + stream_cfg_buff_counter++) { + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, stream_cfg_buff_counter); + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_stream_cfg_data_comm)); + if (ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[ + buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] = 0; + for (; next_frame_buff_counter < + (int)NEXT_FRAME_BUFS_PER_MSG_QUEUE; + next_frame_buff_counter++) { + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + next_frame_buff_counter); + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] = + ia_css_shared_buffer_alloc( + ctx->ssid, ctx->mmid, + sizeof(struct + ia_css_isys_frame_buff_set_comm)); + if (ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot] == 0) { + goto SHARED_BUFF_ALLOC_FAILURE; + } + } + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] = 0; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] = 0; + } + + return 0; + +SHARED_BUFF_ALLOC_FAILURE: + /* stream_handle has correct value for calling the free function */ + /* prepare stream_cfg_buff_counter for calling the free function */ + stream_cfg_buff_counter--; + /* prepare next_frame_buff_counter for calling the free function */ + next_frame_buff_counter--; + free_comm_buff_shared_mem( + ctx, + stream_handle, + stream_cfg_buff_counter, + next_frame_buff_counter); + + verifret(0, EFAULT); /* return EFAULT; equivalent */ +} + +/* + * ia_css_isys_force_unmap_comm_buff_queue() + */ +int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + int stream_handle; + int buff_slot; + + verifret(ctx, EFAULT); /* Host Consistency */ + + IA_CSS_TRACE_0(ISYSAPI, WARNING, + "ia_css_isys_force_unmap_comm_buff_queue() called\n"); + for (stream_handle = 0; stream_handle < + (int)ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + stream_handle++) { + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) <= + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping stream_cfg %d\n", + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]); + buff_slot = get_stream_cfg_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot]); + } + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) <= + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPROTO); + for (; ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] < + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle]; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]++) { + IA_CSS_TRACE_1(ISYSAPI, WARNING, + "CSS forced unmapping next_frame %d\n", + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]); + buff_slot = get_next_frame_buff_slot( + ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ia_css_shared_buffer_css_unmap( + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot]); + } + } + + return 0; +} + +/* + * ia_css_isys_destr_comm_buff_queue() + */ +int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx) +{ + verifret(ctx, EFAULT); /* Host Consistency */ + + free_comm_buff_shared_mem( + ctx, + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] - 1, + STREAM_CFG_BUFS_PER_MSG_QUEUE - 1, + NEXT_FRAME_BUFS_PER_MSG_QUEUE - 1); + + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pnext_frame_buff_id); + ia_css_cpu_mem_free(ctx->isys_comm_buffer_queue.pstream_cfg_buff_id); + + return 0; +} + +STORAGE_CLASS_INLINE void resolution_host_to_css( + const struct ia_css_isys_resolution *resolution_host, + struct ia_css_isys_resolution_comm *resolution_css) +{ + resolution_css->width = resolution_host->width; + resolution_css->height = resolution_host->height; +} + +STORAGE_CLASS_INLINE void output_pin_payload_host_to_css( + const struct ia_css_isys_output_pin_payload *output_pin_payload_host, + struct ia_css_isys_output_pin_payload_comm *output_pin_payload_css) +{ + output_pin_payload_css->out_buf_id = + output_pin_payload_host->out_buf_id; + output_pin_payload_css->addr = output_pin_payload_host->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_css->compress = output_pin_payload_host->compress; +#else + output_pin_payload_css->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void output_pin_info_host_to_css( + const struct ia_css_isys_output_pin_info *output_pin_info_host, + struct ia_css_isys_output_pin_info_comm *output_pin_info_css) +{ + output_pin_info_css->input_pin_id = output_pin_info_host->input_pin_id; + resolution_host_to_css( + &output_pin_info_host->output_res, + &output_pin_info_css->output_res); + output_pin_info_css->stride = output_pin_info_host->stride; + output_pin_info_css->pt = output_pin_info_host->pt; + output_pin_info_css->watermark_in_lines = + output_pin_info_host->watermark_in_lines; + output_pin_info_css->send_irq = output_pin_info_host->send_irq; + output_pin_info_css->ft = output_pin_info_host->ft; + output_pin_info_css->link_id = output_pin_info_host->link_id; +#ifdef ENABLE_DEC400 + output_pin_info_css->reserve_compression = output_pin_info_host->reserve_compression; + output_pin_info_css->payload_buf_size = output_pin_info_host->payload_buf_size; +#else + output_pin_info_css->reserve_compression = 0; + /* Though payload_buf_size was added for compression, set sane value for + * payload_buf_size, just in case... + */ + output_pin_info_css->payload_buf_size = + output_pin_info_host->stride * output_pin_info_host->output_res.height; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_host_to_css( + const struct ia_css_isys_param_pin *param_pin_host, + struct ia_css_isys_param_pin_comm *param_pin_css) +{ + param_pin_css->param_buf_id = param_pin_host->param_buf_id; + param_pin_css->addr = param_pin_host->addr; +} + +STORAGE_CLASS_INLINE void input_pin_info_host_to_css( + const struct ia_css_isys_input_pin_info *input_pin_info_host, + struct ia_css_isys_input_pin_info_comm *input_pin_info_css) +{ + resolution_host_to_css( + &input_pin_info_host->input_res, + &input_pin_info_css->input_res); + if (input_pin_info_host->dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt out of range\n"); + return; + } + if (input_pin_info_host->dt_rename_mode >= N_IA_CSS_ISYS_MIPI_DT_MODE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->dt_rename_mode out of range\n"); + return; + } + /* Mapped DT check if data type renaming is being used*/ + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE && + input_pin_info_host->mapped_dt >= N_IA_CSS_ISYS_MIPI_DATA_TYPE) { + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "input_pin_info_host->mapped_dt out of range\n"); + return; + } + input_pin_info_css->dt = input_pin_info_host->dt; + input_pin_info_css->mipi_store_mode = + input_pin_info_host->mipi_store_mode; + input_pin_info_css->bits_per_pix = + ia_css_isys_extracted_bits_per_pixel_per_mipi_data_type[ + input_pin_info_host->dt]; + if (input_pin_info_host->dt_rename_mode == IA_CSS_ISYS_MIPI_DT_RENAMED_MODE) { + input_pin_info_css->mapped_dt = input_pin_info_host->mapped_dt; + } + else { + input_pin_info_css->mapped_dt = N_IA_CSS_ISYS_MIPI_DATA_TYPE; + } +} + +STORAGE_CLASS_INLINE void isa_cfg_host_to_css( + const struct ia_css_isys_isa_cfg *isa_cfg_host, + struct ia_css_isys_isa_cfg_comm *isa_cfg_css) +{ + unsigned int i; + + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + resolution_host_to_css(&isa_cfg_host->isa_res[i], + &isa_cfg_css->isa_res[i]); + } + isa_cfg_css->cfg_fields = 0; + ISA_CFG_FIELD_SET(BLC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->blc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(LSC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->lsc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DPC_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->dpc_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(DOWNSCALER_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->downscaler_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AWB_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->awb_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AF_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->af_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(AE_EN, isa_cfg_css->cfg_fields, + isa_cfg_host->ae_enabled ? 1 : 0); + ISA_CFG_FIELD_SET(PAF_TYPE, isa_cfg_css->cfg_fields, + isa_cfg_host->paf_type); + ISA_CFG_FIELD_SET(SEND_IRQ_STATS_READY, isa_cfg_css->cfg_fields, + isa_cfg_host->send_irq_stats_ready ? 1 : 0); + ISA_CFG_FIELD_SET(SEND_RESP_STATS_READY, isa_cfg_css->cfg_fields, + (isa_cfg_host->send_irq_stats_ready || + isa_cfg_host->send_resp_stats_ready) ? 1 : 0); +} + +STORAGE_CLASS_INLINE void cropping_host_to_css( + const struct ia_css_isys_cropping *cropping_host, + struct ia_css_isys_cropping_comm *cropping_css) +{ + cropping_css->top_offset = cropping_host->top_offset; + cropping_css->left_offset = cropping_host->left_offset; + cropping_css->bottom_offset = cropping_host->bottom_offset; + cropping_css->right_offset = cropping_host->right_offset; + +} + +STORAGE_CLASS_INLINE int stream_cfg_data_host_to_css( + const struct ia_css_isys_stream_cfg_data *stream_cfg_data_host, + struct ia_css_isys_stream_cfg_data_comm *stream_cfg_data_css) +{ + unsigned int i; + + stream_cfg_data_css->src = stream_cfg_data_host->src; + stream_cfg_data_css->vc = stream_cfg_data_host->vc; + stream_cfg_data_css->isl_use = stream_cfg_data_host->isl_use; + stream_cfg_data_css->compfmt = stream_cfg_data_host->compfmt; + stream_cfg_data_css->isa_cfg.cfg_fields = 0; + + switch (stream_cfg_data_host->isl_use) { + case IA_CSS_ISYS_USE_SINGLE_ISA: + isa_cfg_host_to_css(&stream_cfg_data_host->isa_cfg, + &stream_cfg_data_css->isa_cfg); + /* deliberate fall-through */ + case IA_CSS_ISYS_USE_SINGLE_DUAL_ISL: + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + cropping_host_to_css(&stream_cfg_data_host->crop[i], + &stream_cfg_data_css->crop[i]); + } + break; + case IA_CSS_ISYS_USE_NO_ISL_NO_ISA: + break; + default: + break; + } + + stream_cfg_data_css->send_irq_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? 1 : 0; + stream_cfg_data_css->send_irq_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? 1 : 0; + stream_cfg_data_css->send_resp_sof_discarded = + stream_cfg_data_host->send_irq_sof_discarded ? + 1 : stream_cfg_data_host->send_resp_sof_discarded; + stream_cfg_data_css->send_resp_eof_discarded = + stream_cfg_data_host->send_irq_eof_discarded ? + 1 : stream_cfg_data_host->send_resp_eof_discarded; + stream_cfg_data_css->nof_input_pins = + stream_cfg_data_host->nof_input_pins; + stream_cfg_data_css->nof_output_pins = + stream_cfg_data_host->nof_output_pins; + for (i = 0; i < stream_cfg_data_host->nof_input_pins; i++) { + input_pin_info_host_to_css( + &stream_cfg_data_host->input_pins[i], + &stream_cfg_data_css->input_pins[i]); + verifret(stream_cfg_data_css->input_pins[i].bits_per_pix, + EINVAL); + } + for (i = 0; i < stream_cfg_data_host->nof_output_pins; i++) { + output_pin_info_host_to_css( + &stream_cfg_data_host->output_pins[i], + &stream_cfg_data_css->output_pins[i]); + } + return 0; +} + +STORAGE_CLASS_INLINE void frame_buff_set_host_to_css( + const struct ia_css_isys_frame_buff_set *frame_buff_set_host, + struct ia_css_isys_frame_buff_set_comm *frame_buff_set_css) +{ + int i; + + for (i = 0; i < MAX_OPINS; i++) { + output_pin_payload_host_to_css( + &frame_buff_set_host->output_pins[i], + &frame_buff_set_css->output_pins[i]); + } + + param_pin_host_to_css(&frame_buff_set_host->process_group_light, + &frame_buff_set_css->process_group_light); + frame_buff_set_css->send_irq_sof = + frame_buff_set_host->send_irq_sof ? 1 : 0; + frame_buff_set_css->send_irq_eof = + frame_buff_set_host->send_irq_eof ? 1 : 0; + frame_buff_set_css->send_irq_capture_done = + (uint8_t)frame_buff_set_host->send_irq_capture_done; + frame_buff_set_css->send_irq_capture_ack = + frame_buff_set_host->send_irq_capture_ack ? 1 : 0; + frame_buff_set_css->send_resp_sof = + frame_buff_set_host->send_irq_sof ? + 1 : frame_buff_set_host->send_resp_sof; + frame_buff_set_css->send_resp_eof = + frame_buff_set_host->send_irq_eof ? + 1 : frame_buff_set_host->send_resp_eof; + frame_buff_set_css->frame_counter = + frame_buff_set_host->frame_counter; +} + +STORAGE_CLASS_INLINE void buffer_partition_host_to_css( + const struct ia_css_isys_buffer_partition *buffer_partition_host, + struct ia_css_isys_buffer_partition_comm *buffer_partition_css) +{ + int i; + + for (i = 0; i < STREAM_ID_MAX; i++) { + buffer_partition_css->num_gda_pages[i] = + buffer_partition_host->num_gda_pages[i]; + } +} + +STORAGE_CLASS_INLINE void output_pin_payload_css_to_host( + const struct ia_css_isys_output_pin_payload_comm * + output_pin_payload_css, + struct ia_css_isys_output_pin_payload *output_pin_payload_host) +{ + output_pin_payload_host->out_buf_id = + output_pin_payload_css->out_buf_id; + output_pin_payload_host->addr = output_pin_payload_css->addr; +#ifdef ENABLE_DEC400 + output_pin_payload_host->compress = output_pin_payload_css->compress; +#else + output_pin_payload_host->compress = 0; +#endif /* ENABLE_DEC400 */ +} + +STORAGE_CLASS_INLINE void param_pin_css_to_host( + const struct ia_css_isys_param_pin_comm *param_pin_css, + struct ia_css_isys_param_pin *param_pin_host) +{ + param_pin_host->param_buf_id = param_pin_css->param_buf_id; + param_pin_host->addr = param_pin_css->addr; + +} + +STORAGE_CLASS_INLINE void resp_info_css_to_host( + const struct ia_css_isys_resp_info_comm *resp_info_css, + struct ia_css_isys_resp_info *resp_info_host) +{ + resp_info_host->type = resp_info_css->type; + resp_info_host->timestamp[0] = resp_info_css->timestamp[0]; + resp_info_host->timestamp[1] = resp_info_css->timestamp[1]; + resp_info_host->stream_handle = resp_info_css->stream_handle; + resp_info_host->error = resp_info_css->error_info.error; + resp_info_host->error_details = + resp_info_css->error_info.error_details; + output_pin_payload_css_to_host( + &resp_info_css->pin, &resp_info_host->pin); + resp_info_host->pin_id = resp_info_css->pin_id; + param_pin_css_to_host(&resp_info_css->process_group_light, + &resp_info_host->process_group_light); + resp_info_host->acc_id = resp_info_css->acc_id; + resp_info_host->frame_counter = resp_info_css->frame_counter; + resp_info_host->written_direct = resp_info_css->written_direct; +} + +/* + * ia_css_isys_constr_fw_stream_cfg() + */ +int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + ia_css_shared_buffer_cpu_address stream_cfg_cpu_addr; + ia_css_shared_buffer_css_address stream_cfg_css_addr; + int buff_slot; + int retval = 0; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pstream_cfg_fw, EFAULT); /* Host Consistency */ + verifret(pbuf_stream_cfg_id, EFAULT); /* Host Consistency */ + verifret(stream_cfg, EFAULT); /* Host Consistency */ + + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle]) < + STREAM_CFG_BUFS_PER_MSG_QUEUE, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + *pbuf_stream_cfg_id = + ctx->isys_comm_buffer_queue.pstream_cfg_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_stream_cfg_id, EADDRNOTAVAIL); + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_stream_cfg_id); + /* Host-FW Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + retval = stream_cfg_data_host_to_css(stream_cfg, stream_cfg_cpu_addr); + if (retval) + return retval; + + stream_cfg_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_cpu_addr, EADDRINUSE); + + stream_cfg_css_addr = + ia_css_shared_buffer_css_map(*pbuf_stream_cfg_id); + /* Host Consistency */ + verifret(stream_cfg_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_stream_cfg_id); + + *pstream_cfg_fw = stream_cfg_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + STREAM_CFG_BUFS_PER_MSG_QUEUE + + /* To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % STREAM_CFG_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.stream_cfg_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_constr_fw_next_frame() + */ +int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + ia_css_shared_buffer_cpu_address next_frame_cpu_addr; + ia_css_shared_buffer_css_address next_frame_css_addr; + int buff_slot; + unsigned int wrap_compensation; + const unsigned int wrap_condition = 0xFFFFFFFF; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(pnext_frame_fw, EFAULT); /* Host Consistency */ + verifret(next_frame, EFAULT); /* Host Consistency */ + verifret(pbuf_next_frame_id, EFAULT); /* Host Consistency */ + + /* For some reason responses are not dequeued in time */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] - + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle]) < + NEXT_FRAME_BUFS_PER_MSG_QUEUE, EPERM); + buff_slot = get_next_frame_buff_slot(ctx, stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + *pbuf_next_frame_id = + ctx->isys_comm_buffer_queue.pnext_frame_buff_id[buff_slot]; + /* Host-FW Consistency */ + verifret(*pbuf_next_frame_id, EADDRNOTAVAIL); + + /* map it in cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_map(*pbuf_next_frame_id); + /* Host-FW Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + frame_buff_set_host_to_css(next_frame, next_frame_cpu_addr); + + /* unmap the buffer from cpu */ + next_frame_cpu_addr = + ia_css_shared_buffer_cpu_unmap(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_cpu_addr, EADDRINUSE); + + /* map it to css */ + next_frame_css_addr = + ia_css_shared_buffer_css_map(*pbuf_next_frame_id); + /* Host Consistency */ + verifret(next_frame_css_addr, EADDRINUSE); + + ia_css_shared_buffer_css_update(ctx->mmid, *pbuf_next_frame_id); + + *pnext_frame_fw = next_frame_css_addr; + + /* + * cover head wrap around extreme case, + * in which case force tail to wrap around too + * while maintaining diff and modulo + */ + if (ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle] == + wrap_condition) { + /* Value to be added to both head and tail */ + wrap_compensation = + /* + * Distance of wrap_condition to 0, + * will need to be added for wrapping around head to 0 + */ + (0 - wrap_condition) + + /* + * To force tail to also wrap around, + * since it has to happen concurrently + */ + NEXT_FRAME_BUFS_PER_MSG_QUEUE + + /* + * To preserve the same modulo, + * since the previous will result in head modulo 0 + */ + (wrap_condition % NEXT_FRAME_BUFS_PER_MSG_QUEUE); + ctx->isys_comm_buffer_queue. + next_frame_queue_head[stream_handle] += + wrap_compensation; + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[stream_handle] += + wrap_compensation; + } + ctx->isys_comm_buffer_queue.next_frame_queue_head[stream_handle]++; + + return 0; +} + +/* + * ia_css_isys_extract_fw_response() + */ +int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response) +{ + int buff_slot; + unsigned int css_address; + + verifret(ctx, EFAULT); /* Host Consistency */ + verifret(token, EFAULT); /* Host Consistency */ + verifret(received_response, EFAULT); /* Host Consistency */ + + resp_info_css_to_host(&(token->resp_info), received_response); + + switch (token->resp_info.type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + stream_cfg_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_stream_cfg_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + stream_cfg_queue_tail[ + token->resp_info.stream_handle] % + STREAM_CFG_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pstream_cfg_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.stream_cfg_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + /* Host-FW Consistency */ + verifret((ctx->isys_comm_buffer_queue. + next_frame_queue_head[token->resp_info.stream_handle] - + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]) > 0, EPROTO); + buff_slot = get_next_frame_buff_slot(ctx, + token->resp_info.stream_handle, + ctx->isys_comm_buffer_queue. + next_frame_queue_tail[ + token->resp_info.stream_handle] % + NEXT_FRAME_BUFS_PER_MSG_QUEUE); + verifret((ia_css_shared_buffer)HOST_ADDRESS( + token->resp_info.buf_id) == + ctx->isys_comm_buffer_queue. + pnext_frame_buff_id[buff_slot], EIO); + ctx->isys_comm_buffer_queue.next_frame_queue_tail[ + token->resp_info.stream_handle]++; + css_address = ia_css_shared_buffer_css_unmap( + (ia_css_shared_buffer) + HOST_ADDRESS(token->resp_info.buf_id)); + verifret(css_address, EADDRINUSE); + break; + default: + break; + } + + return 0; +} + +/* + * ia_css_isys_extract_proxy_response() + */ +int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *preceived_response) +{ + verifret(token, EFAULT); /* Host Consistency */ + verifret(preceived_response, EFAULT); /* Host Consistency */ + + preceived_response->request_id = token->proxy_resp_info.request_id; + preceived_response->error = token->proxy_resp_info.error_info.error; + preceived_response->error_details = + token->proxy_resp_info.error_info.error_details; + + return 0; +} + +/* + * ia_css_isys_prepare_param() + */ +int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[]) +{ + unsigned int i; + + verifret(isys_fw_cfg, EFAULT); /* Host Consistency */ + verifret(buf_partition, EFAULT); /* Host Consistency */ + verifret(num_send_queues, EFAULT); /* Host Consistency */ + verifret(num_recv_queues, EFAULT); /* Host Consistency */ + + buffer_partition_host_to_css(buf_partition, + &isys_fw_cfg->buffer_partition); + for (i = 0; i < N_IA_CSS_ISYS_QUEUE_TYPE; i++) { + isys_fw_cfg->num_send_queues[i] = num_send_queues[i]; + isys_fw_cfg->num_recv_queues[i] = num_recv_queues[i]; + } + + return 0; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.h new file mode 100644 index 000000000000..d53fa53c9a81 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_private.h @@ -0,0 +1,156 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PRIVATE_H +#define __IA_CSS_ISYS_PRIVATE_H + + +#include "type_support.h" +/* Needed for the structure member ia_css_sys_context * sys */ +#include "ia_css_syscom.h" +/* Needed for the definitions of STREAM_ID_MAX */ +#include "ia_css_isysapi.h" +/* The following is needed for the function arguments */ +#include "ia_css_isys_fw_bridged_types.h" + +#include "ia_css_shared_buffer.h" + + +/* Set for the respective error handling */ +#define VERIFY_DEVSTATE 1 + +#if (VERIFY_DEVSTATE != 0) +/** + * enum device_state + */ +enum device_state { + IA_CSS_ISYS_DEVICE_STATE_IDLE = 0, + IA_CSS_ISYS_DEVICE_STATE_CONFIGURED = 1, + IA_CSS_ISYS_DEVICE_STATE_READY = 2 +}; +#endif /* VERIFY_DEVSTATE */ + +/** + * enum stream_state + */ +enum stream_state { + IA_CSS_ISYS_STREAM_STATE_IDLE = 0, + IA_CSS_ISYS_STREAM_STATE_OPENED = 1, + IA_CSS_ISYS_STREAM_STATE_STARTED = 2 +}; + + +/** + * struct ia_css_isys_comm_buffer_queue + */ +struct ia_css_isys_comm_buffer_queue { + ia_css_shared_buffer *pstream_cfg_buff_id; + unsigned int stream_cfg_queue_head[STREAM_ID_MAX]; + unsigned int stream_cfg_queue_tail[STREAM_ID_MAX]; + ia_css_shared_buffer *pnext_frame_buff_id; + unsigned int next_frame_queue_head[STREAM_ID_MAX]; + unsigned int next_frame_queue_tail[STREAM_ID_MAX]; +}; + + +/** + * struct ia_css_isys_context + */ +struct ia_css_isys_context { + struct ia_css_syscom_context *sys; + /* add here any isys specific members that need + to be passed into the isys api functions as input */ + unsigned int ssid; + unsigned int mmid; + unsigned int num_send_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int num_recv_queues[N_IA_CSS_ISYS_QUEUE_TYPE]; + unsigned int send_queue_size[N_IA_CSS_ISYS_QUEUE_TYPE]; + struct ia_css_isys_comm_buffer_queue isys_comm_buffer_queue; + unsigned int stream_nof_output_pins[STREAM_ID_MAX]; +#if (VERIFY_DEVSTATE != 0) + enum device_state dev_state; +#endif /* VERIFY_DEVSTATE */ + enum stream_state stream_state_array[STREAM_ID_MAX]; + /* If true, this context is created based on secure config */ + bool secure; +}; + + +/** + * ia_css_isys_constr_comm_buff_queue() + */ +extern int ia_css_isys_constr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_force_unmap_comm_buff_queue() + */ +extern int ia_css_isys_force_unmap_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_destr_comm_buff_queue() + */ +extern int ia_css_isys_destr_comm_buff_queue( + struct ia_css_isys_context *ctx +); + +/** + * ia_css_isys_constr_fw_stream_cfg() + */ +extern int ia_css_isys_constr_fw_stream_cfg( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pstream_cfg_fw, + ia_css_shared_buffer *pbuf_stream_cfg_id, + const struct ia_css_isys_stream_cfg_data *stream_cfg +); + +/** + * ia_css_isys_constr_fw_next_frame() + */ +extern int ia_css_isys_constr_fw_next_frame( + struct ia_css_isys_context *ctx, + const unsigned int stream_handle, + ia_css_shared_buffer_css_address *pnext_frame_fw, + ia_css_shared_buffer *pbuf_next_frame_id, + const struct ia_css_isys_frame_buff_set *next_frame +); + +/** + * ia_css_isys_extract_fw_response() + */ +extern int ia_css_isys_extract_fw_response( + struct ia_css_isys_context *ctx, + const struct resp_queue_token *token, + struct ia_css_isys_resp_info *received_response +); +extern int ia_css_isys_extract_proxy_response( + const struct proxy_resp_queue_token *token, + struct ia_css_proxy_write_req_resp *received_response +); + +/** + * ia_css_isys_prepare_param() + */ +extern int ia_css_isys_prepare_param( + struct ia_css_isys_fw_config *isys_fw_cfg, + const struct ia_css_isys_buffer_partition *buf_partition, + const unsigned int num_send_queues[], + const unsigned int num_recv_queues[] +); + +#endif /* __IA_CSS_ISYS_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public.c new file mode 100644 index 000000000000..f7b132527249 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public.c @@ -0,0 +1,1284 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* TODO: REMOVE --> START IF EXTERNALLY INCLUDED/DEFINED */ +/* These are temporary, the correct numbers need to be inserted/linked */ +/* Until this happens, the following definitions stay here */ +#define INPUT_MIN_WIDTH 1 +#define INPUT_MAX_WIDTH 16384 +#define INPUT_MIN_HEIGHT 1 +#define INPUT_MAX_HEIGHT 16384 +#define OUTPUT_MIN_WIDTH 1 +#define OUTPUT_MAX_WIDTH 16384 +#define OUTPUT_MIN_HEIGHT 1 +#define OUTPUT_MAX_HEIGHT 16384 +/* REMOVE --> END IF EXTERNALLY INCLUDED/DEFINED */ + + +/* The FW bridged types are included through the following */ +#include "ia_css_isysapi.h" +/* The following provides the isys-sys context */ +#include "ia_css_isys_private.h" +/* The following provides the sys layer functions */ +#include "ia_css_syscom.h" + +#include "ia_css_cell.h" +#include "ipu_device_cell_properties.h" + +/* The following provides the tracing functions */ +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" + +#include "ia_css_shared_buffer_cpu.h" +/* The following is needed for the + * stddef.h (NULL), + * limits.h (CHAR_BIT definition). + */ +#include "type_support.h" +#include "error_support.h" +#include "cpu_mem_support.h" +#include "math_support.h" +#include "misc_support.h" +#include "system_const.h" + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config); +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config); + +static int isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + unsigned int stream_handle; + struct ia_css_isys_context *ctx; + struct ia_css_syscom_config sys; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config input_queue_cfg[N_MAX_SEND_QUEUES]; + /* Needs to be updated in case new type of queues are introduced */ + struct ia_css_syscom_queue_config output_queue_cfg[N_MAX_RECV_QUEUES]; + struct ia_css_isys_fw_config isys_fw_cfg; + unsigned int proxy_write_queue_size; + unsigned int ssid; + unsigned int mmid; + unsigned int i; + + /* Printing "ENTRY isys_context_create" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_create\n"); + + verifret(config != NULL, EFAULT); + + /* Printing configuration information if tracing level = VERBOSE. */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_device_config_data(config); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Runtime check for # of send and recv MSG queues */ + verifret(config->driver_sys.num_send_queues <= + N_MAX_MSG_SEND_QUEUES/*=STREAM_ID_MAX*/, EINVAL); + verifret(config->driver_sys.num_recv_queues <= + N_MAX_MSG_RECV_QUEUES, EINVAL); + + /* Runtime check for send and recv MSG queue sizes */ + verifret(config->driver_sys.send_queue_size <= MAX_QUEUE_SIZE, EINVAL); + verifret(config->driver_sys.recv_queue_size <= MAX_QUEUE_SIZE, EINVAL); + + /* TODO: return an error in case MAX_QUEUE_SIZE is exceeded + * (Similar to runtime check on MSG queue sizes) + */ + proxy_write_queue_size = uclip( + config->driver_proxy.proxy_write_queue_size, + MIN_QUEUE_SIZE, + MAX_QUEUE_SIZE); + + ctx = (struct ia_css_isys_context *) + ia_css_cpu_mem_alloc(sizeof(struct ia_css_isys_context)); + verifret(ctx != NULL, EFAULT); + *context = (HANDLE)ctx; + + /* Copy to the sys config the driver_sys config, + * and add the internal info (token sizes) + */ + ssid = config->driver_sys.ssid; + mmid = config->driver_sys.mmid; + sys.ssid = ssid; + sys.mmid = mmid; + + ctx->secure = config->secure; + /* Following operations need to be aligned with + * "enum ia_css_isys_queue_type" list (list of queue types) + */ + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + N_MAX_DEV_SEND_QUEUES; + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_send_queues; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + N_MAX_PROXY_RECV_QUEUES; + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + 0; /* Common msg/dev return queue */ + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.num_recv_queues; + + sys.num_input_queues = + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + sys.num_output_queues = + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + + sys.input = input_queue_cfg; + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].queue_size = + proxy_write_queue_size; + input_queue_cfg[BASE_PROXY_SEND_QUEUES + i].token_size = + sizeof(struct proxy_send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].queue_size = + DEV_SEND_QUEUE_SIZE; + input_queue_cfg[BASE_DEV_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].queue_size = + config->driver_sys.send_queue_size; + input_queue_cfg[BASE_MSG_SEND_QUEUES + i].token_size = + sizeof(struct send_queue_token); + } + + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_PROXY] = + proxy_write_queue_size; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_DEV] = + DEV_SEND_QUEUE_SIZE; + ctx->send_queue_size[IA_CSS_ISYS_QUEUE_TYPE_MSG] = + config->driver_sys.send_queue_size; + + sys.output = output_queue_cfg; + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + i++) { + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].queue_size = + proxy_write_queue_size; + output_queue_cfg[BASE_PROXY_RECV_QUEUES + i].token_size = + sizeof(struct proxy_resp_queue_token); + } + /* There is no recv DEV queue */ + for (i = 0; + i < ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]; + i++) { + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].queue_size = + config->driver_sys.recv_queue_size; + output_queue_cfg[BASE_MSG_RECV_QUEUES + i].token_size = + sizeof(struct resp_queue_token); + } + + sys.regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + sys.dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + +#if HAS_DUAL_CMD_CTX_SUPPORT + sys.dmem_addr += config->secure ? REGMEM_SECURE_OFFSET : REGMEM_OFFSET; +#endif + + /* Prepare the param */ + ia_css_isys_prepare_param( + &isys_fw_cfg, + &config->buffer_partition, + ctx->num_send_queues, + ctx->num_recv_queues); + + /* parameter struct to be passed to fw */ + sys.specific_addr = &isys_fw_cfg; + /* parameters size */ + sys.specific_size = sizeof(isys_fw_cfg); + sys.secure = config->secure; + if (config->secure) { + sys.vtl0_addr_mask = config->vtl0_addr_mask; + } + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_context_create || call ia_css_syscom_open()\n"); + /* The allocation of the queues will take place within this call and + * info will be stored in sys_context output + */ + ctx->sys = ia_css_syscom_open(&sys, NULL); + if (!ctx->sys) { + ia_css_cpu_mem_free(ctx); + return -EFAULT; + } + + /* Update the context with the id's */ + ctx->ssid = ssid; + ctx->mmid = mmid; + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_IDLE; + } + + retval = ia_css_isys_constr_comm_buff_queue(ctx); + if (retval) { + ia_css_syscom_close(ctx->sys); + ia_css_syscom_release(ctx->sys, 1); + ia_css_cpu_mem_free(ctx); + return retval; + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing device configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + /* Printing "LEAVE isys_context_create" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_create\n"); + return 0; +} + +static int isys_start_server( + const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "isys_start_server || start SPC\n"); + /* The firmware is loaded and syscom is ready, start the SPC */ + ia_css_cell_start_prefetch(config->driver_sys.ssid, SPC0, + config->driver_sys.icache_prefetch); + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, "SPC prefetch: %d\n", + config->driver_sys.icache_prefetch); + return 0; +} + +/** + * ia_css_isys_device_open() - open and configure ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_create( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_context_create(context, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_isys_context_store_dmem( + const HANDLE *context, + const struct ia_css_isys_device_cfg_data *config) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_store_dmem(ctx->sys, config->driver_sys.ssid, config->vtl0_addr_mask); +} + +bool ia_css_isys_ab_spc_ready( + HANDLE *context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *) *context; + + return ia_css_syscom_is_ab_spc_ready(ctx->sys); +} + +int ia_css_isys_device_open( + const struct ia_css_isys_device_cfg_data *config) +{ + return isys_start_server(config); +} +#else +int ia_css_isys_device_open( + HANDLE * context, + const struct ia_css_isys_device_cfg_data *config) +{ + int retval; + + retval = isys_context_create(context, config); + if (retval) { + IA_CSS_TRACE_1(ISYSAPI, ERROR, "ia_css_isys_device_open() failed (retval %d)\n", retval); + return retval; + } + + isys_start_server(config); + return 0; +} +#endif + +/** + * ia_css_isys_device_open_ready() - open and configure ISYS device + */ +int ia_css_isys_device_open_ready(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_OPEN" + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + /* Open the ports for all the non-MSG send queues (PROXY + DEV) */ + for (i = 0; + i < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + i++) { + retval = ia_css_syscom_send_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Open the ports for all the recv queues (PROXY + MSG) */ + for (i = 0; + i < (ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG]); + i++) { + retval = ia_css_syscom_recv_port_open(ctx->sys, i); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_READY; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_DEVICE_OPEN_READY\n"); + return 0; +} + + + /** + * ia_css_isys_stream_open() - open and configure a virtual stream + */ +int ia_css_isys_stream_open( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int i; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address stream_cfg_fw = 0; + ia_css_shared_buffer buf_stream_cfg_id = (ia_css_shared_buffer)NULL; + /* Printing "ENTRY IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_OPEN\n"); + + verifret(ctx, EFAULT); + + /* Printing stream configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_stream_config_data(stream_cfg); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + + verifret(stream_cfg != NULL, EFAULT); + verifret(stream_cfg->src < N_IA_CSS_ISYS_STREAM_SRC, EINVAL); + verifret(stream_cfg->vc < N_IA_CSS_ISYS_MIPI_VC, EINVAL); + verifret(stream_cfg->isl_use < N_IA_CSS_ISYS_USE, EINVAL); + if (stream_cfg->isl_use != IA_CSS_ISYS_USE_NO_ISL_NO_ISA) { + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MIN_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + OUTPUT_MAX_HEIGHT, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MIN_WIDTH, EINVAL); + + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset <= + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + OUTPUT_MAX_WIDTH, EINVAL); + } + verifret(stream_cfg->nof_input_pins <= MAX_IPINS, EINVAL); + verifret(stream_cfg->nof_output_pins <= MAX_OPINS, EINVAL); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + /* Verify input pin */ + verifret( + stream_cfg->input_pins[i].input_res.width >= + INPUT_MIN_WIDTH && + stream_cfg->input_pins[i].input_res.width <= + INPUT_MAX_WIDTH && + stream_cfg->input_pins[i].input_res.height >= + INPUT_MIN_HEIGHT && + stream_cfg->input_pins[i].input_res.height <= + INPUT_MAX_HEIGHT, EINVAL); + verifret(stream_cfg->input_pins[i].dt < + N_IA_CSS_ISYS_MIPI_DATA_TYPE, EINVAL); +/* #ifdef To be removed when driver inits the value */ +#ifdef DRIVER_INIT_MIPI_STORE_MODE + verifret(stream_cfg->input_pins[i].mipi_store_mode < + N_IA_CSS_ISYS_MIPI_STORE_MODE, EINVAL); +#endif /* DRIVER_INIT_MIPI_STORE_MODE */ + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + /* Verify output pin */ + verifret(stream_cfg->output_pins[i].input_pin_id < + stream_cfg->nof_input_pins, EINVAL); + verifret(stream_cfg->output_pins[i].pt < + N_IA_CSS_ISYS_PIN_TYPE, EINVAL); + verifret(stream_cfg->output_pins[i].ft < + N_IA_CSS_ISYS_FRAME_FORMAT, EINVAL); + /* Verify that the stride is aligned to 64 bytes: HW spec */ + verifret(stream_cfg->output_pins[i].stride%(XMEM_WIDTH/8) == + 0, EINVAL); + verifret((stream_cfg->output_pins[i].output_res.width >= + OUTPUT_MIN_WIDTH) && + (stream_cfg->output_pins[i].output_res.width <= + OUTPUT_MAX_WIDTH) && + (stream_cfg->output_pins[i].output_res.height >= + OUTPUT_MIN_HEIGHT) && + (stream_cfg->output_pins[i].output_res.height <= + OUTPUT_MAX_HEIGHT), EINVAL); + verifret((stream_cfg->output_pins[i].pt == + IA_CSS_ISYS_PIN_TYPE_MIPI) || + (stream_cfg-> + input_pins[stream_cfg->output_pins[i].input_pin_id].mipi_store_mode != + IA_CSS_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER), EINVAL); + if (stream_cfg->isl_use == IA_CSS_ISYS_USE_SINGLE_ISA) { + switch (stream_cfg->output_pins[i].pt) { + case IA_CSS_ISYS_PIN_TYPE_RAW_NS: + /* Ensure the PIFCONV cropped resolution + * matches the RAW_NS output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_NONSCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution matches + * the Non-scaled ISA output resolution before + * the PIFCONV cropping, since nothing can + * modify the resolution in that part of + * the pipe + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset == + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width, + EINVAL); + /* Ensure the Non-scaled ISA output resolution + * before the PIFCONV cropping bounds the + * RAW_NS pin output resolution since padding + * is not supported + */ + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].height >= +stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> +isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_NONSCALED].width >= +stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + case IA_CSS_ISYS_PIN_TYPE_RAW_S: + /* Ensure the ScaledPIFCONV cropped resolution + * matches the RAW_S output pin resolution + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].bottom_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].top_offset + + (int)stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].right_offset == + stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_POST_ISA_SCALED].left_offset + + (int)stream_cfg->output_pins[i].output_res.width, EINVAL); + /* Ensure the ISAPF cropped resolution bounds + * the Scaled ISA output resolution before the + * ScaledPIFCONV cropping, since only IDS can + * modify the resolution, and this only to + * make it smaller + */ + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].bottom_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].top_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height, + EINVAL); + verifret(stream_cfg-> + crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].right_offset >= + stream_cfg->crop[IA_CSS_ISYS_CROPPING_LOCATION_PRE_ISA].left_offset + + (int)stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width, + EINVAL); + /* Ensure the Scaled ISA output resolution + * before the ScaledPIFCONV cropping bounds + * the RAW_S pin output resolution since + * padding is not supported + */ + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].height >= + stream_cfg->output_pins[i].output_res.height, EINVAL); + verifret(stream_cfg-> + isa_cfg.isa_res[IA_CSS_ISYS_RESOLUTION_INFO_POST_ISA_SCALED].width >= + stream_cfg->output_pins[i].output_res.width, EINVAL); + break; + default: + break; + } + } + } + + /* open 1 send queue/stream and a single receive queue + * if not existing + */ + retval = ia_css_syscom_send_port_open(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BUSY, EBUSY); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_OPEN; + retval = ia_css_isys_constr_fw_stream_cfg(ctx, stream_handle, + &stream_cfg_fw, &buf_stream_cfg_id, stream_cfg); + verifret(retval == 0, retval); + token.payload = stream_cfg_fw; + token.buf_handle = HOST_ADDRESS(buf_stream_cfg_id); + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_nof_output_pins[stream_handle] = + stream_cfg->nof_output_pins; + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_OPEN" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_OPEN\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_close() - close virtual stream + */ +int ia_css_isys_stream_close( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_CLOSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CLOSE; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* close 1 send queue/stream and the single receive queue + * if none is using it + */ + retval = ia_css_syscom_send_port_close(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + + ctx->stream_state_array[stream_handle] = IA_CSS_ISYS_STREAM_STATE_IDLE; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_CLOSE\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_start() - starts handling a mipi virtual stream + */ +int ia_css_isys_stream_start( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_START\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_OPENED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + if (next_frame != NULL) { + token.send_type = + IA_CSS_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } else { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_START; + token.payload = 0; + token.buf_handle = 0; + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_STARTED; + /* Printing "LEAVE IA_CSS_ISYS_STREAM_START" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_START\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_stop() - Stops a mipi virtual stream + */ +int ia_css_isys_stream_stop( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_STOP\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_DEV_SEND_QUEUES)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_STOP; + token.stream_id = stream_handle; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_DEV_SEND_QUEUES), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_STOP" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_STOP\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_flush() - stops a mipi virtual stream but + * completes processing cmd backlog + */ +int ia_css_isys_stream_flush( + HANDLE context, + const unsigned int stream_handle) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_STREAM_FLUSH\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_FLUSH; + token.payload = 0; + token.buf_handle = 0; + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + ctx->stream_state_array[stream_handle] = + IA_CSS_ISYS_STREAM_STATE_OPENED; + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_FLUSH" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_STREAM_FLUSH\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_capture_indication() + * - captures "next frame" on stream_handle + */ +int ia_css_isys_stream_capture_indication( + HANDLE context, + const unsigned int stream_handle, + const struct ia_css_isys_frame_buff_set *next_frame) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct send_queue_token token; + ia_css_shared_buffer_css_address next_frame_fw = 0; + ia_css_shared_buffer buf_next_frame_id = (ia_css_shared_buffer)NULL; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + verifret(ctx, EFAULT); + + /* Printing frame configuration and device handle context information + *if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); + print_isys_frame_buff_set(next_frame, + ctx->stream_nof_output_pins[stream_handle]); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(stream_handle < STREAM_ID_MAX, EINVAL); + verifret(stream_handle < + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG], EINVAL); + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_STARTED, EPERM); + verifret(next_frame != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle)); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + { + token.send_type = IA_CSS_ISYS_SEND_TYPE_STREAM_CAPTURE; + retval = ia_css_isys_constr_fw_next_frame(ctx, stream_handle, + &next_frame_fw, &buf_next_frame_id, next_frame); + verifret(retval == 0, retval); + token.payload = next_frame_fw; + token.buf_handle = HOST_ADDRESS(buf_next_frame_id); + } + retval = ia_css_syscom_send_port_transfer(ctx->sys, + (BASE_MSG_SEND_QUEUES + stream_handle), &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_CAPTURE_INDICATION\n"); + + return 0; +} + + +/** + * ia_css_isys_stream_handle_response() - handle ISYS responses + */ +int ia_css_isys_stream_handle_response( + HANDLE context, + struct ia_css_isys_resp_info *received_response) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + int packets; + struct resp_queue_token token; + + /* Printing "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available( + ctx->sys, BASE_MSG_RECV_QUEUES); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer( + ctx->sys, BASE_MSG_RECV_QUEUES, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + retval = ia_css_isys_extract_fw_response( + ctx, &token, received_response); + verifret(retval == 0, retval); + + /* Printing received response information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_isys_resp_info(received_response); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + + verifret(received_response->type < N_IA_CSS_ISYS_RESP_TYPE, EINVAL); + verifret(received_response->stream_handle < STREAM_ID_MAX, EINVAL); + + if (received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK || + received_response->type == IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED) { + verifret(received_response->pin.addr != 0, EFAULT); + verifret(received_response->pin.out_buf_id != 0, EFAULT); + verifret(received_response->pin_id < + ctx->stream_nof_output_pins[received_response->stream_handle], + EINVAL); + } + + /* Printing "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_STREAM_HANDLE_RESPONSE\n"); + + return 0; +} + + +/** + * ia_css_isys_device_close() - close ISYS device + */ +static int isys_context_destroy(HANDLE context) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + unsigned int stream_handle; + unsigned int queue_id; + unsigned int nof_recv_queues; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY isys_context_destroy\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_READY, EPERM); +#endif /* VERIFY_DEVSTATE */ + + nof_recv_queues = ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_MSG] + + ctx->num_recv_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY]; + /* Close the ports for all the recv queues (MSG and PROXY) */ + for (queue_id = 0; queue_id < nof_recv_queues; queue_id++) { + retval = ia_css_syscom_recv_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + /* Close the ports for PROXY send queue(s) */ + for (queue_id = 0; + queue_id < ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_PROXY] + + ctx->num_send_queues[IA_CSS_ISYS_QUEUE_TYPE_DEV]; + queue_id++) { + retval = ia_css_syscom_send_port_close( + ctx->sys, queue_id); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval == 0, EINVAL); + } + + for (stream_handle = 0; stream_handle < STREAM_ID_MAX; + stream_handle++) { + verifret(ctx->stream_state_array[stream_handle] == + IA_CSS_ISYS_STREAM_STATE_IDLE, EPERM); + } + + retval = ia_css_syscom_close(ctx->sys); + verifret(retval == 0, EBUSY); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_CONFIGURED; +#endif /* VERIFY_DEVSTATE */ + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_CLOSE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE isys_context_destroy\n"); + + return 0; +} +/** + * ia_css_isys_device_close() - close ISYS device + */ +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_isys_context_destroy(HANDLE context) +{ + return isys_context_destroy(context); +} + +void ia_css_isys_device_close(void) +{ + /* Created for legacy, nothing to perform here */ +} + +#else +int ia_css_isys_device_close(HANDLE context) +{ + return isys_context_destroy(context); +} +#endif + +/** + * ia_css_isys_device_release() - release ISYS device + */ +int ia_css_isys_device_release(HANDLE context, unsigned int force) +{ + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + int retval = 0; + + /* Printing "ENTRY IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_DEVICE_RELEASE\n"); + + verifret(ctx, EFAULT); + + /* Printing device handle context information + * if tracing level = VERBOSE. + */ +#if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + print_handle_context(ctx); +#endif /* ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG */ + +#if (VERIFY_DEVSTATE != 0) + verifret(ctx->dev_state == IA_CSS_ISYS_DEVICE_STATE_CONFIGURED, EPERM); +#endif /* VERIFY_DEVSTATE */ + + retval = ia_css_syscom_release(ctx->sys, force); + verifret(retval == 0, EBUSY); + + /* If ia_css_isys_device_release called with force==1, this should + * happen after timeout, so no active transfers + * If ia_css_isys_device_release called with force==0, this should + * happen after SP has gone idle, so no active transfers + */ + ia_css_isys_force_unmap_comm_buff_queue(ctx); + ia_css_isys_destr_comm_buff_queue(ctx); + +#if (VERIFY_DEVSTATE != 0) + ctx->dev_state = IA_CSS_ISYS_DEVICE_STATE_IDLE; +#endif /* VERIFY_DEVSTATE */ + + ia_css_cpu_mem_free(ctx); + + /* Printing "LEAVE IA_CSS_ISYS_DEVICE_RELEASE" message + * if tracing level = VERBOSE. + */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_DEVICE_RELEASE\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_write_req() - send ISYS proxy write requests + */ +int ia_css_isys_proxy_write_req( + HANDLE context, + const struct ia_css_proxy_write_req_val *write_req_val) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_send_queue_token token; + int packets; + int retval = 0; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ENTRY IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + verifret(ctx, EFAULT); + verifret(write_req_val != NULL, EFAULT); + + packets = ia_css_syscom_send_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + token.request_id = write_req_val->request_id; + token.region_index = write_req_val->region_index; + token.offset = write_req_val->offset; + token.value = write_req_val->value; + + retval = ia_css_syscom_send_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "LEAVE IA_CSS_ISYS_PROXY_WRITE_REQ\n"); + + return 0; +} + +/** + * ia_css_isys_proxy_handle_write_response() - handle ISYS proxy responses + */ +int ia_css_isys_proxy_handle_write_response( + HANDLE context, + struct ia_css_proxy_write_req_resp *received_response) +{ + + struct ia_css_isys_context *ctx = (struct ia_css_isys_context *)context; + struct proxy_resp_queue_token token; + int retval = 0; + int packets; + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "ENTRY IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + verifret(ctx, EFAULT); + verifret(received_response != NULL, EFAULT); + + packets = ia_css_syscom_recv_port_available(ctx->sys, 0); + verifret(packets != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(packets >= 0, EINVAL); + verifret(packets > 0, EPERM); + + retval = ia_css_syscom_recv_port_transfer(ctx->sys, 0, &token); + verifret(retval != FW_ERROR_BAD_ADDRESS, EFAULT); + verifret(retval >= 0, EINVAL); + + + retval = ia_css_isys_extract_proxy_response(&token, received_response); + verifret(retval == 0, retval); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "LEAVE IA_CSS_ISYS_PROXY_HANDLE_WRITE_RESPONSE\n"); + + return 0; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.c new file mode 100644 index 000000000000..d6500a0cb605 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.c @@ -0,0 +1,379 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_isysapi_trace.h" +#include "ia_css_isys_public_trace.h" +#include "ia_css_isysapi_types.h" +#include "ia_css_isysapi.h" +#include "ia_css_isys_private.h" +#include "error_support.h" +#include "ia_css_syscom.h" + +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx) +{ + unsigned int i; + + verifret(ctx != NULL, EFAULT); + /* Print ctx->(ssid, mmid, dev_state) */ + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "Print ia_css_isys_context *ctx\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_3(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ssid = %d\n" + "\t\t\tia_css_isys_context->mmid = %d\n" + "\t\t\tia_css_isys_context->device_state = %d\n" + , ctx->ssid + , ctx->mmid + , ctx->dev_state); + /* Print ctx->(stream_state_array, stream_nof_output_pins) */ + for (i = 0; i < STREAM_ID_MAX; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_context->stream_state[i = %d] = %d\n" + "\t\t\tia_css_isys_context->stream_nof_output_pins[i = %d] = %d\n" + , i + , ctx->stream_state_array[i] + , i + , ctx->stream_nof_output_pins[i]); + } + /* Print ctx->ia_css_syscom_context */ + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\tia_css_isys_context->ia_css_syscom_context = %p\n" + , (struct ia_css_syscom_context *)(ctx->sys)); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config) +{ + verifret(config != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, + VERBOSE, + "Print ia_css_isys_device_cfg_data *config\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_7(ISYSAPI, + VERBOSE, + "\tia_css_isys_device_cfg_data->driver_sys.ssid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.mmid = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_send_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.num_recv_queues = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.send_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_sys.recv_queue_size = %d\n" + "\t\t\tia_css_isys_device_cfg_data->driver_proxy.proxy_write_queue_size = %d\n", + config->driver_sys.ssid, + config->driver_sys.mmid, + config->driver_sys.num_send_queues, + config->driver_sys.num_recv_queues, + config->driver_sys.send_queue_size, + config->driver_sys.recv_queue_size, + config->driver_proxy.proxy_write_queue_size); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg) +{ + unsigned int i; + + verifret(stream_cfg != NULL, EFAULT); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_stream_cfg_data stream_cfg\n" + "-------------------------------------------------------\n"); + IA_CSS_TRACE_5(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isl_use = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_stream_source = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_mipi_vc = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_input_pins = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->nof_output_pins = %d\n" + , stream_cfg->isl_use + , stream_cfg->src + , stream_cfg->vc + , stream_cfg->nof_input_pins + , stream_cfg->nof_output_pins); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->send_irq_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_irq_eof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_sof_discarded = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->send_resp_eof_discarded = %d\n" + , stream_cfg->send_irq_sof_discarded + , stream_cfg->send_irq_eof_discarded + , stream_cfg->send_resp_sof_discarded + , stream_cfg->send_resp_eof_discarded); + for (i = 0; i < stream_cfg->nof_input_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_data_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->input_pins[i].dt + , i + , stream_cfg->input_pins[i].input_res.width + , i + , stream_cfg->input_pins[i].input_res.height); + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_input_pin_info[i = %d].ia_css_isys_mipi_store_mode = %d\n" + , i + , stream_cfg->input_pins[i].mipi_store_mode); + } + for (i = 0; i < N_IA_CSS_ISYS_CROPPING_LOCATION; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].top_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].left_offset = %d\n" + , i + , stream_cfg->crop[i].top_offset + , i + , stream_cfg->crop[i].left_offset); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].bottom_offset = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_cropping[i = %d].right_offset = %d\n" + , i + , stream_cfg->crop[i].bottom_offset + , i + , stream_cfg->crop[i].right_offset); + } + for (i = 0; i < stream_cfg->nof_output_pins; i++) { + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_pin_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_frame_format_type = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].input_pin_id = %d\n" + , i + , stream_cfg->output_pins[i].pt + , i + , stream_cfg->output_pins[i].ft + , i + , stream_cfg->output_pins[i].input_pin_id); + IA_CSS_TRACE_6(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].watermark_in_lines = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].send_irq = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].stride = %d\n" + , i + , stream_cfg->output_pins[i].watermark_in_lines + , i + , stream_cfg->output_pins[i].send_irq + , i + , stream_cfg->output_pins[i].stride); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_output_pin_info[i = %d].ia_css_isys_resolution.height = %d\n" + , i + , stream_cfg->output_pins[i].output_res.width + , i + , stream_cfg->output_pins[i].output_res.height); + } + for (i = 0; i < N_IA_CSS_ISYS_RESOLUTION_INFO; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].width = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ia_css_isys_resolution[i = %d].height = %d\n" + , i + , stream_cfg->isa_cfg.isa_res[i].width + , i + , stream_cfg->isa_cfg.isa_res[i].height); + } + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.blc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.lsc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.dpc_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.downscaler_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.awb_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.af_enabled = %d\n" + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.ae_enabled = %d\n" + , stream_cfg->isa_cfg.blc_enabled + , stream_cfg->isa_cfg.lsc_enabled + , stream_cfg->isa_cfg.dpc_enabled + , stream_cfg->isa_cfg.downscaler_enabled + , stream_cfg->isa_cfg.awb_enabled + , stream_cfg->isa_cfg.af_enabled + , stream_cfg->isa_cfg.ae_enabled); + + IA_CSS_TRACE_1(ISYSAPI, VERBOSE, + "\t\t\tia_css_isys_stream_cfg_data->ia_css_isys_isa_cfg.paf_type = %d\n" + , stream_cfg->isa_cfg.paf_type); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins) +{ + unsigned int i; + + verifret(next_frame != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "Print ia_css_isys_frame_buff_set *next_frame\n" + "-------------------------------------------------------\n"); + for (i = 0; i < nof_output_pins; i++) { + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->ia_css_isys_output_pin_payload[i = %d].ia_css_input_buffer_css_address = %08xu\n" + , i + , (unsigned long int) + next_frame->output_pins[i].out_buf_id + , i + , next_frame->output_pins[i].addr); + } + IA_CSS_TRACE_2(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->process_group_light.ia_css_return_token = %016lxu\n" + "\t\t\tia_css_isys_frame_buff_set->process_group_light.ia_css_input_buffer_css_address = %08xu\n" + , (unsigned long int) + next_frame->process_group_light.param_buf_id + , next_frame->process_group_light.addr); + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_frame_buff_set->send_irq_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_irq_eof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_sof = %d\n" + "\t\t\tia_css_isys_frame_buff_set->send_resp_eof = %d\n" + , (int) next_frame->send_irq_sof + , (int) next_frame->send_irq_eof + , (int) next_frame->send_resp_sof + , (int) next_frame->send_resp_eof); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "-------------------------------------------------------\n"); + return 0; +} + +/** + * print_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response) +{ + verifret(received_response != NULL, EFAULT); + + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, "ISYS_RESPONSE_INFO\n" + "-------------------------------------------------------\n"); + switch (received_response->type) { + case IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_OPEN_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_STOP_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_FLUSH_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CLOSE_ACK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_READY\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_WATERMARK\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_PIN_DATA_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED\n"); + break; + case IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED: + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = IA_CSS_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED\n"); + break; + default: + IA_CSS_TRACE_0(ISYSAPI, ERROR, + "\tia_css_isys_resp_info.ia_css_isys_resp_type = INVALID\n"); + break; + } + + IA_CSS_TRACE_4(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.type = %d\n" + "\t\t\tia_css_isys_resp_info.stream_handle = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[0] = %d\n" + "\t\t\tia_css_isys_resp_info.time_stamp[1] = %d\n", + received_response->type, + received_response->stream_handle, + received_response->timestamp[0], + received_response->timestamp[1]); + IA_CSS_TRACE_7(ISYSAPI, VERBOSE, + "\tia_css_isys_resp_info.error = %d\n" + "\t\t\tia_css_isys_resp_info.error_details = %d\n" + "\t\t\tia_css_isys_resp_info.pin.out_buf_id = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin.addr = %016llxu\n" + "\t\t\tia_css_isys_resp_info.pin_id = %d\n" + "\t\t\tia_css_isys_resp_info.frame_counter = %d\n," + "\t\t\tia_css_isys_resp_info.written_direct = %d\n", + received_response->error, + received_response->error_details, + (unsigned long long)received_response->pin.out_buf_id, + (unsigned long long)received_response->pin.addr, + received_response->pin_id, + received_response->frame_counter, + received_response->written_direct); + IA_CSS_TRACE_0(ISYSAPI, VERBOSE, + "------------------------------------------------------\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.h new file mode 100644 index 000000000000..5b6508058fd6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isys_public_trace.h @@ -0,0 +1,55 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYS_PUBLIC_TRACE_H +#define __IA_CSS_ISYS_PUBLIC_TRACE_H + +#include "ia_css_isysapi_trace.h" + +#include "ia_css_isysapi_types.h" + +#include "ia_css_isysapi.h" + +#include "ia_css_isys_private.h" +/** + * print_handle_context - formatted print function for + * struct ia_css_isys_context *ctx variable + */ +int print_handle_context(struct ia_css_isys_context *ctx); + +/** + * print_device_config_data - formatted print function for + * struct ia_css_isys_device_cfg_data *config variable + */ +int print_device_config_data(const struct ia_css_isys_device_cfg_data *config); +/** + * print_stream_config_data - formatted print function for + * ia_css_isys_stream_cfg_data stream_cfg variable + */ +int print_stream_config_data( + const struct ia_css_isys_stream_cfg_data *stream_cfg); +/** + * print_isys_frame_buff_set - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_frame_buff_set( + const struct ia_css_isys_frame_buff_set *next_frame, + const unsigned int nof_output_pins); +/** + * print_isys_isys_resp_info - formatted print function for + * struct ia_css_isys_frame_buff_set *next_frame variable + */ +int print_isys_resp_info(struct ia_css_isys_resp_info *received_response); + +#endif /* __IA_CSS_ISYS_PUBLIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isysapi_trace.h new file mode 100644 index 000000000000..c6b944f245b1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/isysapi/src/ia_css_isysapi_trace.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_ISYSAPI_TRACE_H +#define __IA_CSS_ISYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define ISYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define ISYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define ISYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* ISYSAPI and all the submodules in ISYSAPI will have + * the default tracing level set to this level + */ +#define ISYSAPI_TRACE_CONFIG_DEFAULT ISYSAPI_TRACE_LOG_LEVEL_NORMAL + +/* In case ISYSAPI_TRACE_CONFIG is not defined, set it to default level */ +#if !defined(ISYSAPI_TRACE_CONFIG) + #define ISYSAPI_TRACE_CONFIG ISYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* ISYSAPI Module tracing backend is mapped to + * TUNIT tracing for target platforms + */ +#ifdef IA_CSS_TRACE_PLATFORM_CELL + #ifndef HRT_CSIM + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE + #else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #endif +#else + #define ISYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(ISYSAPI_TRACE_CONFIG)) + /* TRACE_OFF */ + #if ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_OFF + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_NORMAL */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_NORMAL + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + /* TRACE_DEBUG */ + #elif ISYSAPI_TRACE_CONFIG == ISYSAPI_TRACE_LOG_LEVEL_DEBUG + #define ISYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define ISYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No ISYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "ISYSAPI_TRACE_CONFIG not defined" +#endif + +#endif /* __IA_CSS_ISYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 000000000000..6bc2fa708d43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,100 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 000000000000..2e45eaa52727 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 000000000000..27e87d1e6774 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 000000000000..ec0ee18b41e1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/pkg_dir.mk new file mode 100644 index 000000000000..a4b4aaa4995e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/pkg_dir.mk @@ -0,0 +1,30 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 000000000000..348b56833e06 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 000000000000..ca5564c7d990 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,202 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 000000000000..3a50245261e5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,50 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/port_env_struct.h new file mode 100644 index 000000000000..4d39a4739a8b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue.h new file mode 100644 index 000000000000..b233ab3baf01 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue_struct.h new file mode 100644 index 000000000000..ef48fcfded2b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port.h new file mode 100644 index 000000000000..cce253b26668 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port_struct.h new file mode 100644 index 000000000000..52ec563b13cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port.h new file mode 100644 index 000000000000..04a160f3f019 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port_struct.h new file mode 100644 index 000000000000..f834c62bc3db --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/port.mk new file mode 100644 index 000000000000..b3801247802e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/queue.c new file mode 100644 index 000000000000..eeec99dfe2d0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/recv_port.c new file mode 100644 index 000000000000..31b36e9ceafb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/send_port.c new file mode 100644 index 000000000000..8d1fba08c5d5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 000000000000..c51d65c8cb64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 000000000000..5dd23ddbd180 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/isys/cnlB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 000000000000..18d0c9806eda --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/interface/regmem_access.h new file mode 100644 index 000000000000..d4576af936f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/regmem.mk new file mode 100644 index 000000000000..24ebc1c325d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_access_host.h new file mode 100644 index 000000000000..8878d7074fab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_const.h new file mode 100644 index 000000000000..ac7e3a98a434 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/assert_support.h new file mode 100644 index 000000000000..28aed19409b9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/assert_support.h @@ -0,0 +1,200 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + * see also: + * http://www.klocwork.com/products/documentation/current/ + * Tuning_C/C%2B%2B_analysis#Assertions + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/cpu_mem_support.h new file mode 100644 index 000000000000..fa349cac4b24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/error_support.h new file mode 100644 index 000000000000..9fe1f65125e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/math_support.h new file mode 100644 index 000000000000..633f86f1a1b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/misc_support.h new file mode 100644 index 000000000000..a2c2729e946d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/platform_support.h new file mode 100644 index 000000000000..1752efc7b4df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/print_support.h new file mode 100644 index 000000000000..0b614f7ef12d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/storage_class.h new file mode 100644 index 000000000000..af19b4026220 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/type_support.h new file mode 100644 index 000000000000..a86da0e78941 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom.h new file mode 100644 index 000000000000..5426d6d18e0b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 000000000000..8c827c2ba395 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,98 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 000000000000..1a0191d37102 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,52 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom.c new file mode 100644 index 000000000000..cdf9df0531ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 000000000000..0cacd5a34934 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_context.h new file mode 100644 index 000000000000..ecf22f6b7ac5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/syscom.mk new file mode 100644 index 000000000000..8d36b8928af5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/interface/ia_css_trace.h new file mode 100644 index 000000000000..b85b1810f107 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/trace.mk new file mode 100644 index 000000000000..b232880b882b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/utils/system_defs/system_const.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/utils/system_defs/system_const.h new file mode 100644 index 000000000000..161f28fced97 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/utils/system_defs/system_const.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SYSTEM_CONST_H +#define __SYSTEM_CONST_H + +/* The values included in this file should have been + * taken from system/device properties which + * are not currently available in SDK + */ + +#define XMEM_WIDTH (512) +#define MG_PPC (4) + +#endif /* __SYSTEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_access.h new file mode 100644 index 000000000000..1e81bad9f4ee --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_map.h new file mode 100644 index 000000000000..1bbedcf9e7fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_config.h new file mode 100644 index 000000000000..912f016ead24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_memory_access_types.h new file mode 100644 index 000000000000..0b44492789e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access.h new file mode 100644 index 000000000000..674f5fb5b0f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_initialization.h new file mode 100644 index 000000000000..81f4d08d5ae0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_types.h new file mode 100644 index 000000000000..75fef6c4ddba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_types.h new file mode 100644 index 000000000000..0acfdbb00cfa --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/Makefile b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/Makefile new file mode 100644 index 000000000000..629899d78897 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/Makefile @@ -0,0 +1,53 @@ +# +# Copyright (c) 2010 - 2018 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +include $(srcpath)/$(src)/../Makefile.ipu4ppsys_src +include $(srcpath)/$(src)/../Makefile.ipu4ppsys_inc + +SSID = 0 +MMID = 0 +IPU_SYSVER = cnl + +IPU_PSYSLIB_ROOT_REL = lib +IPU_PSYSLIB_ROOT = $(srcpath)/$(src)/$(IPU_PSYSLIB_ROOT_REL) + +ccflags-y += -I$(srcpath)/$(src)/../../../ +ccflags-y += -I$(srcpath)/$(src)/../../ +ccflags-y += -DHAS_DUAL_CMD_CTX_SUPPORT=0 -DHAS_LATE_BINDING_SUPPORT=0 -DIPU_PSYS_LEGACY + +IPU_PSYSLIB_SRC += libcsspsys2600.o + +#CFLAGS = -W -Wall -Wstrict-prototypes -Wmissing-prototypes -O2 -fomit-frame-pointer -Wno-unused-variable +HOST_DEFINES += -DSSID=$(SSID) +HOST_DEFINES += -DMMID=$(MMID) +HOST_DEFINES += -DHRT_ON_VIED_SUBSYSTEM_ACCESS=$(SSID) +HOST_DEFINES += -DCFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +HOST_DEFINES += -DHRT_USE_VIR_ADDRS +HOST_DEFINES += -DHRT_HW +HOST_DEFINES += -DVIED_NCI_TUNIT_PSYS +HOST_DEFINES += -DFIRMWARE_RELEASE_VERSION +HOST_DEFINES += -DPSYS_SERVER_ON_SPC +HOST_DEFINES += -DAPI_SPLIT_START_STATE_UPDATE +HOST_DEFINES += -DHAS_DUAL_CMD_CTX_SUPPORT=0 +HOST_DEFINES += -DHAS_LATE_BINDING_SUPPORT=0 + +intel-ipu4p-psys-csslib-objs := ../../../ipu-wrapper.o \ + $(IPU_PSYSLIB_SRC) +obj-$(CONFIG_VIDEO_INTEL_IPU) += intel-ipu4p-psys-csslib.o + +ccflags-y += $(IPU_PSYSLIB_INC) $(HOST_DEFINES) -fno-common -v + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/CNL_program_group/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/CNL_program_group/ia_css_fw_pkg_release.h new file mode 100644 index 000000000000..cb20a688b7c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/CNL_program_group/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20180615 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/ICL_program_group/ia_css_fw_pkg_release.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/ICL_program_group/ia_css_fw_pkg_release.h new file mode 100644 index 000000000000..cb20a688b7c3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/ICL_program_group/ia_css_fw_pkg_release.h @@ -0,0 +1,14 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#define IA_CSS_FW_PKG_RELEASE 0x20180615 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/buffer.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/buffer.mk new file mode 100644 index 000000000000..c00a1133b440 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/buffer.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is BUFFER + +ifdef _H_BUFFER_MK +$(error ERROR: buffer.mk included multiple times, please check makefile) +else +_H_BUFFER_MK=1 +endif + +BUFFER_DIR=$${MODULES_DIR}/buffer + +BUFFER_INTERFACE=$(BUFFER_DIR)/interface +BUFFER_SOURCES_CPU=$(BUFFER_DIR)/src/cpu +BUFFER_SOURCES_CSS=$(BUFFER_DIR)/src/css + +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_output_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_input_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/ia_css_shared_buffer.c +BUFFER_HOST_FILES += $(BUFFER_SOURCES_CPU)/buffer_access.c +BUFFER_HOST_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_HOST_CPPFLAGS += -I$${MODULES_DIR}/support + +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_input_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_output_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/ia_css_shared_buffer.c +BUFFER_FW_FILES += $(BUFFER_SOURCES_CSS)/buffer_access.c + +BUFFER_FW_CPPFLAGS += -I$(BUFFER_INTERFACE) +BUFFER_FW_CPPFLAGS += -I$${MODULES_DIR}/support diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_access.h new file mode 100644 index 000000000000..e5fe647742c9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_access.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_ACCESS_H +#define __BUFFER_ACCESS_H + +#include "buffer_type.h" +/* #def to keep consistent the buffer load interfaces for host and css */ +#define IDM 0 + +void +buffer_load( + buffer_address address, + void *data, + unsigned int size, + unsigned int mm_id); + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int size, + unsigned int mm_id); + +#endif /* __BUFFER_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_type.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_type.h new file mode 100644 index 000000000000..de51f2394158 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/buffer_type.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __BUFFER_TYPE_H +#define __BUFFER_TYPE_H + +/* portable access to buffers in DDR */ + +#ifdef __VIED_CELL +typedef unsigned int buffer_address; +#else +/* workaround needed because shared_memory_access.h uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_access.h" +typedef host_virtual_address_t buffer_address; +#endif + +#endif /* __BUFFER_TYPE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h new file mode 100644 index 000000000000..2530297e8e36 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_buffer_address.h @@ -0,0 +1,25 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_ADDRESS_H +#define __IA_CSS_BUFFER_ADDRESS_H + +#include "type_support.h" + +typedef uint32_t ia_css_buffer_address; /* CSS virtual address */ + +#define ia_css_buffer_address_null ((ia_css_buffer_address)0) + +#endif /* __IA_CSS_BUFFER_ADDRESS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h new file mode 100644 index 000000000000..b8e7a6ac4648 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_H +#define __IA_CSS_INPUT_BUFFER_H + + +/* Input Buffers */ + +/* A CSS input buffer is a buffer in DDR that can be written by the CPU, + * and that can be read by CSS hardware, after the buffer has been handed over. + * Examples: command buffer, input frame buffer, parameter buffer + * An input buffer must be mapped into the CPU address space before it can be + * written by the CPU. + * After mapping, writing, and unmapping, the buffer can be handed over to the + * firmware. An input buffer is handed over to the CSS by mapping it to the + * CSS address space (by the CPU), and by passing the resulting CSS (virtial) + * address of the input buffer to the DA CSS hardware. + * The firmware can read from an input buffer as soon as it has been received + * CSS virtual address. + * The firmware should not write into an input buffer. + * The firmware hands over the input buffer (back to the CPU) by sending the + * buffer handle via a response. The host should unmap the buffer, + * before reusing it. + * The firmware should not read from the input buffer after returning the + * buffer handle to the CPU. + * + * A buffer may be pre-mapped to the CPU and/or to the CSS upon allocation, + * depending on the allocator's preference. In case of pre-mapped buffers, + * the map and unmap functions will only manage read and write access. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_input_buffer; /* input buffer handle */ +typedef void *ia_css_input_buffer_cpu_address; /* CPU virtual address */ +/* CSS virtual address */ +typedef ia_css_buffer_address ia_css_input_buffer_css_address; + +#endif /* __IA_CSS_INPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h new file mode 100644 index 000000000000..d3d01353ce43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_input_buffer_cpu.h @@ -0,0 +1,49 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_INPUT_BUFFER_CPU_H +#define __IA_CSS_INPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_input_buffer.h" + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b); + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b); + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b); + + +#endif /* __IA_CSS_INPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h new file mode 100644 index 000000000000..a8c0f9e8554e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer.h @@ -0,0 +1,31 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_H +#define __IA_CSS_OUTPUT_BUFFER_H + +/* Output Buffers */ +/* A CSS output buffer a buffer in DDR that can be written by CSS hardware + * and that can be read by the host, after the buffer has been handed over + * Examples: output frame buffer + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_output_buffer; +typedef void *ia_css_output_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_output_buffer_css_address; + +#endif /* __IA_CSS_OUTPUT_BUFFER_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h new file mode 100644 index 000000000000..0299fc3b7eb6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_output_buffer_cpu.h @@ -0,0 +1,48 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_OUTPUT_BUFFER_CPU_H +#define __IA_CSS_OUTPUT_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_output_buffer.h" + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b); + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b); +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b); + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b); + + +#endif /* __IA_CSS_OUTPUT_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h new file mode 100644 index 000000000000..558ec679f98a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_H +#define __IA_CSS_SHARED_BUFFER_H + +/* Shared Buffers */ +/* A CSS shared buffer is a buffer in DDR that can be read and written by the + * CPU and CSS. + * Both the CPU and CSS can have the buffer mapped simultaneously. + * Access rights are not managed by this interface, this could be done by means + * the read and write pointer of a queue, for example. + */ + +#include "ia_css_buffer_address.h" + +typedef struct ia_css_buffer_s *ia_css_shared_buffer; +typedef void *ia_css_shared_buffer_cpu_address; +typedef ia_css_buffer_address ia_css_shared_buffer_css_address; + +#endif /* __IA_CSS_SHARED_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h new file mode 100644 index 000000000000..ff62914f99dc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/interface/ia_css_shared_buffer_cpu.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SHARED_BUFFER_CPU_H +#define __IA_CSS_SHARED_BUFFER_CPU_H + +#include "vied/shared_memory_map.h" +#include "ia_css_shared_buffer.h" + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b); + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b); + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b); + +#endif /* __IA_CSS_SHARED_BUFFER_CPU_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c new file mode 100644 index 000000000000..83cbda5a9ff5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/buffer_access.c @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* implementation of buffer access from the CPU */ +/* using shared_memory interface */ + +#include "buffer_access.h" +#include "vied/shared_memory_access.h" + +void +buffer_load( + buffer_address address, + void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_load(mm_id, address, data, bytes); +} + +void +buffer_store( + buffer_address address, + const void *data, + unsigned int bytes, + unsigned int mm_id) +{ + shared_memory_store(mm_id, address, data, bytes); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c new file mode 100644 index 000000000000..3828b186ddac --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.c @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* provided interface */ +#include "ia_css_buffer.h" + +/* used interfaces */ +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + +ia_css_buffer_t +ia_css_buffer_alloc(vied_subsystem_t sid, vied_memory_t mid, unsigned int size) +{ + ia_css_buffer_t b; + + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + + b->css_address = shared_memory_map(sid, mid, b->mem); + b->size = size; + return b; +} + + +void +ia_css_buffer_free(vied_subsystem_t sid, vied_memory_t mid, ia_css_buffer_t b) +{ + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h new file mode 100644 index 000000000000..0f99a06e9a89 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_buffer.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BUFFER_H +#define __IA_CSS_BUFFER_H + +/* workaround: needed because uses size_t */ +#include "type_support.h" +#include "vied/shared_memory_map.h" + +typedef enum { + buffer_unmapped, /* buffer is not accessible by cpu, nor css */ + buffer_write, /* output buffer: css has write access */ + /* input buffer: cpu has write access */ + buffer_read, /* input buffer: css has read access */ + /* output buffer: cpu has read access */ + buffer_cpu, /* shared buffer: cpu has read/write access */ + buffer_css /* shared buffer: css has read/write access */ +} buffer_state; + +struct ia_css_buffer_s { + /* number of bytes bytes allocated */ + unsigned int size; + /* allocated virtual memory object */ + host_virtual_address_t mem; + /* virtual address to be used on css/firmware */ + vied_virtual_address_t css_address; + /* virtual address to be used on cpu/host */ + void *cpu_address; + buffer_state state; +}; + +typedef struct ia_css_buffer_s *ia_css_buffer_t; + +ia_css_buffer_t +ia_css_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size); + +void +ia_css_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_buffer_t b); + +#endif /* __IA_CSS_BUFFER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c new file mode 100644 index 000000000000..2bd754062a0e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_input_buffer.c @@ -0,0 +1,185 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_input_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_input_buffer +ia_css_input_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_input_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_input_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_input_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_map(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map input buffer to CPU address space, acquire write access */ + b->state = buffer_write; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_cpu_address +ia_css_input_buffer_cpu_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_write) + return NULL; + + /* unmap input buffer from CPU address space, release write access */ + b->state = buffer_unmapped; + + /* return pre-mapped buffer */ + return b->cpu_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + + /* now flush the cache */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_map_no_invalidate(vied_memory_t mid, ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map input buffer to CSS address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only copy in case of simulation, otherwise it should just work */ + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return (ia_css_input_buffer_css_address)b->css_address; +} + + +ia_css_input_buffer_css_address +ia_css_input_buffer_css_unmap(ia_css_input_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_read) + return 0; + + /* unmap input buffer from CSS address space, release read access */ + b->state = buffer_unmapped; + + /* input buffer only, no need to invalidate cache */ + + return (ia_css_input_buffer_css_address)b->css_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c new file mode 100644 index 000000000000..892dcbd49825 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_output_buffer.c @@ -0,0 +1,182 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_output_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_output_buffer +ia_css_output_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_output_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_output_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_output_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_map(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map output buffer to CSS address space, acquire write access */ + b->state = buffer_write; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_css_address +ia_css_output_buffer_css_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_write) + return 0; + + /* unmap output buffer from CSS address space, release write access */ + b->state = buffer_unmapped; + + return (ia_css_output_buffer_css_address)b->css_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* now invalidate the cache */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b->cpu_address; +} + + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_map_no_invalidate(vied_memory_t mid, ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map output buffer to CPU address space, acquire read access */ + b->state = buffer_read; + +#ifndef HRT_HW + /* only in simulation */ + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b->cpu_address; +} + +ia_css_output_buffer_cpu_address +ia_css_output_buffer_cpu_unmap(ia_css_output_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_read) + return NULL; + + /* unmap output buffer from CPU address space, release read access */ + b->state = buffer_unmapped; + + /* output only, no need to flush cache */ + + return b->cpu_address; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c new file mode 100644 index 000000000000..1041bd07721b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/buffer/src/cpu/ia_css_shared_buffer.c @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_shared_buffer_cpu.h" +#include "ia_css_buffer.h" +#include "vied/shared_memory_access.h" +#include "vied/shared_memory_map.h" +#include "cpu_mem_support.h" + + +ia_css_shared_buffer +ia_css_shared_buffer_alloc( + vied_subsystem_t sid, + vied_memory_t mid, + unsigned int size) +{ + ia_css_shared_buffer b; + + /* allocate buffer container */ + b = ia_css_cpu_mem_alloc(sizeof(*b)); + if (b == NULL) + return NULL; + + b->mem = shared_memory_alloc(mid, size); + if (b->mem == 0) { + ia_css_cpu_mem_free(b); + return NULL; + } + +#ifndef HRT_HW + /* initialize the buffer to avoid warnings when copying */ + shared_memory_zero(mid, b->mem, size); + + /* in simulation, we need to allocate a shadow host buffer */ + b->cpu_address = ia_css_cpu_mem_alloc_page_aligned(size); + if (b->cpu_address == NULL) { + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); + return NULL; + } +#else + /* on hw / real platform we can use the pointer from + * shared memory alloc + */ + b->cpu_address = (void *)HOST_ADDRESS(b->mem); +#endif + + b->css_address = shared_memory_map(sid, mid, b->mem); + + b->size = size; + b->state = buffer_unmapped; + + return b; +} + + +void +ia_css_shared_buffer_free( + vied_subsystem_t sid, + vied_memory_t mid, + ia_css_shared_buffer b) +{ + if (b == NULL) + return; + if (b->state != buffer_unmapped) + return; + +#ifndef HRT_HW + /* only free if we actually allocated it separately */ + ia_css_cpu_mem_free(b->cpu_address); +#endif + shared_memory_unmap(sid, mid, b->css_address); + shared_memory_free(mid, b->mem); + ia_css_cpu_mem_free(b); +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_unmapped) + return NULL; + + /* map shared buffer to CPU address space */ + b->state = buffer_cpu; + + return b->cpu_address; +} + + +ia_css_shared_buffer_cpu_address +ia_css_shared_buffer_cpu_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + if (b->state != buffer_cpu) + return NULL; + + /* unmap shared buffer from CPU address space */ + b->state = buffer_unmapped; + + return b->cpu_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_map(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_unmapped) + return 0; + + /* map shared buffer to CSS address space */ + b->state = buffer_css; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer_css_address +ia_css_shared_buffer_css_unmap(ia_css_shared_buffer b) +{ + if (b == NULL) + return 0; + if (b->state != buffer_css) + return 0; + + /* unmap shared buffer from CSS address space */ + b->state = buffer_unmapped; + + return (ia_css_shared_buffer_css_address)b->css_address; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_css_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to CSS after it was modified by the CPU */ + /* flush cache to ddr */ + ia_css_cpu_mem_cache_flush(b->cpu_address, b->size); +#ifndef HRT_HW + /* copy data from CPU address space to CSS address space */ + shared_memory_store(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + + return b; +} + + +ia_css_shared_buffer +ia_css_shared_buffer_cpu_update(vied_memory_t mid, ia_css_shared_buffer b) +{ + if (b == NULL) + return NULL; + + /* flush the buffer to the CPU after it has been modified by CSS */ +#ifndef HRT_HW + /* copy data from CSS address space to CPU address space */ + shared_memory_load(mid, b->mem, b->cpu_address, b->size); +#else + (void)mid; +#endif + /* flush cache to ddr */ + ia_css_cpu_mem_cache_invalidate(b->cpu_address, b->size); + + return b; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/cell.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/cell.mk new file mode 100644 index 000000000000..fa5e65022601 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/cell.mk @@ -0,0 +1,43 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef _CELL_MK_ +_CELL_MK_ = 1 + + +CELL_DIR=$${MODULES_DIR}/cell +CELL_INTERFACE=$(CELL_DIR)/interface +CELL_SOURCES=$(CELL_DIR)/src + +CELL_HOST_FILES = +CELL_FW_FILES = + +CELL_HOST_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +CELL_FW_CPPFLAGS = \ + -I$(CELL_INTERFACE) \ + -I$(CELL_SOURCES) + +ifdef 0 +# Disabled until it is decided to go this way or not +include $(MODULES_DIR)/device_access/device_access.mk +CELL_HOST_FILES += $(DEVICE_ACCESS_HOST_FILES) +CELL_FW_FILES += $(DEVICE_ACCESS_FW_FILES) +CELL_HOST_CPPFLAGS += $(DEVICE_ACCESS_HOST_CPPFLAGS) +CELL_FW_CPPFLAGS += $(DEVICE_ACCESS_FW_CPPFLAGS) +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/interface/ia_css_cell.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/interface/ia_css_cell.h new file mode 100644 index 000000000000..3fac3c791b6e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/interface/ia_css_cell.h @@ -0,0 +1,112 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_H +#define __IA_CSS_CELL_H + +#include "storage_class.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +#if 0 /* To be implemented after completing cell device properties */ +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_debug_pc(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stall_bits(unsigned int ssid, unsigned int cell_id); +#endif + +/* configure master ports */ + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell_id, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, + unsigned int cell_id, + unsigned int master, unsigned int segment, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value); + +/* Access memories */ + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value); + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr); + +/***********************************************************************/ + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value); + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id); + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch); + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id); + +/* include inline implementation */ +#include "ia_css_cell_impl.h" + +#endif /* __IA_CSS_CELL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h new file mode 100644 index 000000000000..60b2e234da1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell/src/ia_css_cell_impl.h @@ -0,0 +1,272 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_IMPL_H +#define __IA_CSS_CELL_IMPL_H + +#include "ia_css_cell.h" + +#include "ia_css_cmem.h" +#include "ipu_device_cell_properties.h" +#include "storage_class.h" +#include "assert_support.h" +#include "platform_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_addr(unsigned int cell_id) +{ + /* mem_id 0 is for registers */ + return ipu_device_cell_memory_address(cell_id, 0); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_dmem_addr(unsigned int cell_id) +{ + /* mem_id 1 is for DMEM */ + return ipu_device_cell_memory_address(cell_id, 1); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_mem_store_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr, unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ipu_device_cell_memory_address( + cell_id, mem_id) + addr, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_mem_load_32(unsigned int ssid, unsigned int cell_id, + unsigned int mem_id, unsigned int addr) +{ + return ia_css_cmem_load_32( + ssid, ipu_device_cell_memory_address(cell_id, mem_id) + addr); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_get_stat_ctrl(unsigned int ssid, unsigned int cell_id) +{ + return ia_css_cmem_load_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_stat_ctrl(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_is_ready(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = ia_css_cell_get_stat_ctrl(ssid, cell_id); + /* READY must be 1, START must be 0 */ + return (reg & (1 << IPU_DEVICE_CELL_STAT_CTRL_READY_BIT)) && + ((~reg) & (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT)); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_pc(unsigned int ssid, unsigned int cell_id, + unsigned int pc) +{ + /* set start PC */ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_start_bit(unsigned int ssid, unsigned int cell_id) +{ + unsigned int reg; + + reg = 1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_run_bit(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + unsigned int reg; + + reg = value << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT; + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start(unsigned int ssid, unsigned int cell_id) +{ + ia_css_cell_start_prefetch(ssid, cell_id, 0); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_start_prefetch(unsigned int ssid, unsigned int cell_id, + bool prefetch) +{ + unsigned int reg = 0; + + /* Set run bit and start bit */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_START_BIT); + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT); + /* Invalidate the icache */ + reg |= (1 << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); + /* Optionally enable prefetching */ + reg |= ((prefetch == 1) ? + (1 << IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT) : + 0); + + /* store into register */ + ia_css_cell_set_stat_ctrl(ssid, cell_id, reg); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_wait(unsigned int ssid, unsigned int cell_id) +{ + do { + ia_css_sleep(); + } while (!ia_css_cell_is_ready(ssid, cell_id)); +}; + +STORAGE_CLASS_INLINE void +ia_css_cell_set_icache_base_address(unsigned int ssid, unsigned int cell_id, + unsigned int value) +{ + ia_css_cmem_store_32( + ssid, ia_css_cell_regs_addr(cell_id) + + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +/* master port configuration */ + + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_info_override_bits(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_segment_base_address(unsigned int ssid, + unsigned int cell, + unsigned int master, unsigned int segment, unsigned int value) + +{ + unsigned int addr; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + assert(segment < ipu_device_cell_master_num_segments(cell, master)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + addr += segment * ipu_device_cell_master_stride(cell, master); + ia_css_cmem_store_32(ssid, addr, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_info_override_bits(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_info_override_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cell_set_master_base_address(unsigned int ssid, unsigned int cell, + unsigned int master, unsigned int value) +{ + unsigned int addr, s, stride, num_segments, segment_size; + + assert(cell < ipu_device_cell_num_devices()); + assert(master < ipu_device_cell_num_masters(cell)); + + addr = ipu_device_cell_memory_address(cell, 0); + addr += ipu_device_cell_master_base_reg(cell, master); + stride = ipu_device_cell_master_stride(cell, master); + num_segments = ipu_device_cell_master_num_segments(cell, master); + segment_size = ipu_device_cell_master_segment_size(cell, master); + + for (s = 0; s < num_segments; s++) { + ia_css_cmem_store_32(ssid, addr, value); + addr += stride; + value += segment_size; + } +} + +#endif /* __IA_CSS_CELL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/cell_program_load.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/cell_program_load.mk new file mode 100644 index 000000000000..ec5389aff4a0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/cell_program_load.mk @@ -0,0 +1,39 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +ifndef _CELL_PROGRAM_LOAD_MK_ +_CELL_PROGRAM_LOAD_MK_ = 1 + +CELL_PROGRAM_LOAD_DIR=$${MODULES_DIR}/cell_program_load +CELL_PROGRAM_LOAD_INTERFACE=$(CELL_PROGRAM_LOAD_DIR)/interface +CELL_PROGRAM_LOAD_SOURCES=$(CELL_PROGRAM_LOAD_DIR)/src + +CELL_PROGRAM_LOAD_HOST_FILES = $(CELL_PROGRAM_LOAD_SOURCES)/ia_css_cell_program_load.c + +CELL_PROGRAM_LOAD_FW_FILES = $(CELL_PROGRAM_LOAD_SOURCES)/ia_css_cell_program_load.c + +CELL_PROGRAM_LOAD_HOST_CPPFLAGS = \ + -I$(CELL_PROGRAM_LOAD_INTERFACE) \ + -I$(CELL_PROGRAM_LOAD_SOURCES) + +CELL_PROGRAM_LOAD_FW_CPPFLAGS = \ + -I$(CELL_PROGRAM_LOAD_INTERFACE) \ + -I$(CELL_PROGRAM_LOAD_SOURCES) + +ifeq ($(CRUN_DYNAMIC_LINK_PROGRAMS), 1) +CELL_PROGRAM_LOAD_HOST_CPPFLAGS += -DCRUN_DYNAMIC_LINK_PROGRAMS=1 +CELL_PROGRAM_LOAD_FW_CPPFLAGS += -DCRUN_DYNAMIC_LINK_PROGRAMS=1 +endif + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_group_load.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_group_load.h new file mode 100644 index 000000000000..812dd4ea09a8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_group_load.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_GROUP_LOAD_H +#define __IA_CSS_CELL_PROGRAM_GROUP_LOAD_H + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_xmem.h" +#include "ia_css_cell_program_struct.h" + +/* Load all programs in program group + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_group_load( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache +); + +/* Load all programs in program group + * each group may have multiple entry function. This function will return + * the info of each entry function to allow user start any of them + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_group_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache, + struct ia_css_cell_program_entry_func_info_s *entry_info, + unsigned int num_entry_info +); + +/* Load all programs in program group, except icache of first program + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_group_load_mem( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache +); + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#include "ia_css_cell_program_group_load_impl.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_GROUP_LOAD_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load.h new file mode 100644 index 000000000000..d7e689e9d569 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load.h @@ -0,0 +1,114 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_H +#define __IA_CSS_CELL_PROGRAM_LOAD_H + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_struct.h" +#include "ia_css_xmem.h" + +/* Perform full program load: + * - load program header + * - initialize icache and start PC of exec entry function + * - initialize PMEM and DMEM + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache +); + +/* Perform full program load: + * - load program header + * - initialize icache and start PC of exec entry function + * - initialize info of all entry function + * - initialize PMEM and DMEM + * Return 0 on success, -1 on incorrect magic number, + * -2 on incorrect release tag + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + /* program address as seen from caller */ + ia_css_xmem_address_t program_addr, + /* program address as seen from cell's icache */ + unsigned int program_addr_icache, + struct ia_css_cell_program_entry_func_info_s *entry_info +); + +/* Load program header, and initialize icache and start PC. + * After this, the cell may be started, but the entry function may not yet use + * global data, nor may code from PMEM be executed. + * Before accessing global data or executing code from PMEM + * the function ia_css_cell_load_program_mem must be executed. + */ + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_icache( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t program_addr, + unsigned int program_addr_icache); + +/* Load program header and finish the program load by + * initializing PMEM and DMEM. + * After this any code from the program may be be executed on the cell. + */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_mem( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t program_addr, + unsigned int program_addr_icache); + +/* set cell start PC to program init entry function */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_init_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info); + +/* set cell start PC to program exec entry function */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_exec_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info); + +/* set cell start PC to program done entry function */ +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_done_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info); + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#include "ia_css_cell_program_load_impl.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_prog.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_prog.h new file mode 100644 index 000000000000..0f8f1852449c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_prog.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_PROG_H +#define __IA_CSS_CELL_PROGRAM_LOAD_PROG_H + +/* basic functions needed to implement all program(group) loads */ + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_struct.h" +#include "ia_css_xmem.h" + + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_encode_entry_info( + struct ia_css_cell_program_entry_func_info_s *entry_info, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +void +ia_css_cell_program_load_set_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info, + enum ia_css_cell_program_entry_func_id func_id); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_header( + unsigned int mmid, + ia_css_xmem_address_t host_addr, + struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_icache_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_entry_prog( + unsigned int ssid, + unsigned int mmid, + enum ia_css_cell_program_entry_func_id entry_func_id, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_mem_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog); + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +int +ia_css_cell_program_load_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_s *prog); + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#include "ia_css_cell_program_load_prog_impl.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_PROG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_storage_class.h new file mode 100644 index 000000000000..8691e1402eaf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_load_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H +#define __IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#else +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_struct.h new file mode 100644 index 000000000000..de3c3682ff8d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/interface/ia_css_cell_program_struct.h @@ -0,0 +1,114 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_STRUCT_H +#define __IA_CSS_CELL_PROGRAM_STRUCT_H + +#define IA_CSS_CELL_ID_UNDEFINED 0xFFFFFFFF +#define IA_CSS_CELL_PROGRAM_MAGIC_NUMBER 0xF1A30002 + +#define CSIM_PROGRAM_NAME_SIZE 64 + +enum ia_css_cell_program_entry_func_id { + IA_CSS_CELL_PROGRAM_INIT_FUNC_ID, + IA_CSS_CELL_PROGRAM_EXEC_FUNC_ID, + IA_CSS_CELL_PROGRAM_DONE_FUNC_ID, + IA_CSS_CELL_PROGRAM_NUM_FUNC_ID, +}; + +struct ia_css_cell_program_entry_func_info_s { + /* start PC value of program entry functions */ + unsigned int start[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID]; + +#if defined(C_RUN) + /* entry function names */ + char func_name[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID][CSIM_PROGRAM_NAME_SIZE]; + /* for crun use only */ + unsigned int cell_id; +#endif + /* base address for cell's registers */ + unsigned int regs_addr; + +}; + +struct ia_css_cell_program_s { + /* must be equal to IA_CSS_CELL_PROGRAM_MAGIC_NUMBER */ + unsigned int magic_number; + + /* offset of blob relative to start of this struct */ + unsigned int blob_offset; + /* size of the blob, not used */ + unsigned int blob_size; + + /* start PC value of program entry functions */ + unsigned int start[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID]; + +#if defined(C_RUN) || defined(HRT_UNSCHED) || defined(HRT_SCHED) + /* program name */ + char prog_name[CSIM_PROGRAM_NAME_SIZE]; +#if defined(C_RUN) + /* entry function names */ + char func_name[IA_CSS_CELL_PROGRAM_NUM_FUNC_ID][CSIM_PROGRAM_NAME_SIZE]; +#endif +#endif + + /* offset of icache section in blob */ + unsigned int icache_source; + /* offset in the instruction space, not used */ + unsigned int icache_target; + /* icache section size, not used */ + unsigned int icache_size; + + /* offset of pmem section in blob */ + unsigned int pmem_source; + /* offset in the pmem, typically 0 */ + unsigned int pmem_target; + /* pmem section size, 0 if not used */ + unsigned int pmem_size; + + /* offset of data section in blob */ + unsigned int data_source; + /* offset of data section in dmem */ + unsigned int data_target; + /* size of dmem data section */ + unsigned int data_size; + + /* offset of bss section in dmem, to be zeroed */ + unsigned int bss_target; + /* size of bss section in dmem */ + unsigned int bss_size; + + /* for checking */ + unsigned int cell_id; + /* base address for cell's registers */ + unsigned int regs_addr; + + /* pmem data bus address */ + unsigned int cell_pmem_data_bus_addres; + /* dmem data bus address */ + unsigned int cell_dmem_data_bus_addres; + /* pmem config bus address */ + unsigned int cell_pmem_control_bus_addres; + /* dmem config bus address */ + unsigned int cell_dmem_control_bus_addres; + + /* offset to header of next program */ + unsigned int next; + /* Temporary workaround for a dma bug where it fails to trasfer + * data with size which is not multiple of 64 bytes + */ + unsigned int dummy[2]; +}; + +#endif /* __IA_CSS_CELL_PROGRAM_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_group_load_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_group_load_impl.h new file mode 100644 index 000000000000..20d71bb25d49 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_group_load_impl.h @@ -0,0 +1,128 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_GROUP_LOAD_IMPL_H +#define __IA_CSS_CELL_PROGRAM_GROUP_LOAD_IMPL_H + +#include "ia_css_cell_program_group_load.h" + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_load_prog.h" +#include "ia_css_cell_program_struct.h" + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_group_load( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + unsigned int next; + int status = 0; + + do { + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + + next = prog.next; + host_addr = + (ia_css_xmem_address_t)((unsigned long long)host_addr + next); + vied_addr += next; + } while (next); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_group_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_entry_func_info_s *entry_info, + unsigned int num_entry_info) +{ + struct ia_css_cell_program_s prog; + unsigned int next; + int status = 0; + unsigned int i = 0; + + do { + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + if (i >= num_entry_info) { + /* more program than entry info, + * cause access out of bound. + */ + return 1; + } + ia_css_cell_program_load_encode_entry_info( + &entry_info[i], &prog); + + next = prog.next; + host_addr = + (ia_css_xmem_address_t)((unsigned long long)host_addr + next); + vied_addr += next; + i++; + } while (next); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_group_load_mem( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + unsigned int next; + int status = 0; + + status = ia_css_cell_program_load_header(mmid, host_addr, &prog); + if (status) + return status; + + /* load memories of first program */ + status = ia_css_cell_program_load_mem_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + + /* return next from ia_css_cell_program_load_mem_prog? */ + next = prog.next; + + /* load next programs, if any */ + if (next) { + host_addr = + (ia_css_xmem_address_t)((unsigned long long)host_addr + next); + status = ia_css_cell_program_group_load( + ssid, mmid, host_addr, vied_addr + next); + if (status) + return status; + } + + return status; +} + +#endif /* __IA_CSS_CELL_PROGRAM_GROUP_LOAD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load.c new file mode 100644 index 000000000000..0a1ea1ac2ed1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load.c @@ -0,0 +1,31 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_IA_CSS_CELL_PROGRAM_LOAD__ + +#include "storage_class.h" +STORAGE_CLASS_INLINE void __dummy(void) { } + +#else + +/* low-level functions */ +#include "ia_css_cell_program_load_prog_impl.h" + +/* functions for single, unmapped program load */ +#include "ia_css_cell_program_load_impl.h" + +/* functions for program group load */ +#include "ia_css_cell_program_group_load_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_bin.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_bin.h new file mode 100644 index 000000000000..523ce536cb09 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_bin.h @@ -0,0 +1,193 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_BIN_H +#define __IA_CSS_CELL_PROGRAM_LOAD_BIN_H + +#include "ia_css_cell_program_load_prog.h" + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_struct.h" +#include "ia_css_cell_regs.h" +#include "misc_support.h" +#include "ia_css_fw_load.h" +#include "platform_support.h" +#include "ipu_device_buttress_properties_struct.h" + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +void +ia_css_cell_program_load_encode_entry_info( + struct ia_css_cell_program_entry_func_info_s *entry_info, + const struct ia_css_cell_program_s *prog) +{ + unsigned int i; + + for (i = 0; i < IA_CSS_CELL_PROGRAM_NUM_FUNC_ID; i++) + entry_info->start[i] = prog->start[i]; + + entry_info->regs_addr = prog->regs_addr; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +void +ia_css_cell_program_load_set_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info, + enum ia_css_cell_program_entry_func_id func_id) +{ + unsigned int start_pc; + + start_pc = entry_info->start[func_id]; + /* set start address */ + ia_css_cell_regs_set_start_pc(ssid, entry_info->regs_addr, start_pc); +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_icache_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog) +{ + unsigned int regs_addr; + struct ia_css_cell_program_entry_func_info_s entry_info; + + NOT_USED(mmid); + NOT_USED(host_addr); + + if (prog->cell_id == IA_CSS_CELL_ID_UNDEFINED) + return -1; + + regs_addr = prog->regs_addr; + + /* set icache base address */ + ia_css_cell_regs_set_icache_base_address(ssid, regs_addr, + vied_addr + prog->blob_offset + prog->icache_source); + + /* set icache info bits */ + ia_css_cell_regs_set_icache_info_bits( + ssid, regs_addr, IA_CSS_INFO_BITS_M0_DDR); + + /* by default we set to start PC of exec entry function */ + ia_css_cell_program_load_encode_entry_info(&entry_info, prog); + ia_css_cell_program_load_set_start_pc( + ssid, &entry_info, IA_CSS_CELL_PROGRAM_EXEC_FUNC_ID); + + return 0; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_entry_prog( + unsigned int ssid, + unsigned int mmid, + enum ia_css_cell_program_entry_func_id entry_func_id, + const struct ia_css_cell_program_s *prog) +{ + struct ia_css_cell_program_entry_func_info_s entry_info; + + NOT_USED(mmid); + + if (prog->cell_id == IA_CSS_CELL_ID_UNDEFINED) + return -1; + + ia_css_cell_program_load_encode_entry_info(&entry_info, prog); + ia_css_cell_program_load_set_start_pc(ssid, &entry_info, entry_func_id); + + return 0; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_mem_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + const struct ia_css_cell_program_s *prog) +{ + unsigned int transferred = 0; + unsigned int pending = 0; + unsigned int dmem_addr; + unsigned int pmem_addr; + + NOT_USED(vied_addr); + +#ifdef ENABLE_FW_LOAD_DMA + pmem_addr = prog->cell_pmem_data_bus_addres; + dmem_addr = prog->cell_dmem_data_bus_addres; +#else + pmem_addr = prog->cell_pmem_control_bus_addres; + dmem_addr = prog->cell_dmem_control_bus_addres; +#endif + + /* Copy text section from ddr to pmem. */ + if (prog->pmem_size) { + transferred = ia_css_fw_copy_begin(mmid, + ssid, + host_addr + prog->blob_offset + + prog->pmem_source, + pmem_addr + prog->pmem_target, + prog->pmem_size); + + assert(prog->pmem_size == transferred); + /* If less bytes are transferred that requested, signal error, + * This architecture enforces DMA xfer size > pmem_size. + * So, a DMA transfer request should be xferable*/ + if (transferred != prog->pmem_size) + return 1; + pending++; + } + + /* Copy data section from ddr to dmem. */ + if (prog->data_size) { + transferred = ia_css_fw_copy_begin(mmid, + ssid, + host_addr + prog->blob_offset + + prog->data_source, + dmem_addr + prog->data_target, + prog->data_size); + assert(prog->data_size == transferred); + /* If less bytes are transferred that requested, signal error, + * This architecture enforces DMA xfer size > data_size. + * So, a DMA transfer request should be xferable*/ + if (transferred != prog->data_size) + return 1; /*FALSE*/ + pending++; + } + + /* Zero bss section in dmem.*/ + if (prog->bss_size) { + transferred = ia_css_fw_zero_begin(ssid, + dmem_addr + prog->bss_target, + prog->bss_size); + assert(prog->bss_size == transferred); + /* If less bytes are transferred that requested, signal error, + * This architecture enforces DMA xfer size > bss_size. + * So, a DMA transfer request should be xferable*/ + if (transferred != prog->bss_size) + return 1; + pending++; + } + + /* Wait for all fw load to complete */ + while (pending) { + pending -= ia_css_fw_end(pending); + ia_css_sleep(); + } + return 0; /*Success*/ +} + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_BIN_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_impl.h new file mode 100644 index 000000000000..6201fd583482 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_impl.h @@ -0,0 +1,134 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_IMPL_H +#define __IA_CSS_CELL_PROGRAM_LOAD_IMPL_H + +#include "ia_css_cell_program_load.h" + +#include "ia_css_cell_program_load_storage_class.h" +#include "ia_css_cell_program_load_prog.h" +#include "ia_css_cell_program_struct.h" + + + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_multi_entry( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_prog( + ssid, mmid, host_addr, vied_addr, &prog); + if (status) + return status; + + ia_css_cell_program_load_encode_entry_info(entry_info, &prog); + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_icache( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_header(mmid, host_addr, &prog); + if (status) + return status; + + status = ia_css_cell_program_load_icache_prog( + ssid, mmid, host_addr, vied_addr, &prog); + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C int +ia_css_cell_program_load_mem( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr) +{ + struct ia_css_cell_program_s prog; + int status; + + status = ia_css_cell_program_load_header(mmid, host_addr, &prog); + if (status) + return status; + + status = ia_css_cell_program_load_mem_prog( + ssid, mmid, host_addr, vied_addr, &prog); + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C void +ia_css_cell_program_load_set_init_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + assert(entry_info != NULL); + + ia_css_cell_program_load_set_start_pc(ssid, entry_info, + IA_CSS_CELL_PROGRAM_INIT_FUNC_ID); +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C void +ia_css_cell_program_load_set_exec_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + assert(entry_info != NULL); + + ia_css_cell_program_load_set_start_pc(ssid, entry_info, + IA_CSS_CELL_PROGRAM_EXEC_FUNC_ID); +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C void +ia_css_cell_program_load_set_done_start_pc( + unsigned int ssid, + const struct ia_css_cell_program_entry_func_info_s *entry_info) +{ + assert(entry_info != NULL); + + ia_css_cell_program_load_set_start_pc(ssid, entry_info, + IA_CSS_CELL_PROGRAM_DONE_FUNC_ID); +} + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_prog_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_prog_impl.h new file mode 100644 index 000000000000..f20bc2f6da52 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_program_load_prog_impl.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_PROGRAM_LOAD_PROG_IMPL_H +#define __IA_CSS_CELL_PROGRAM_LOAD_PROG_IMPL_H + +#include "ia_css_cell_program_load_prog.h" +#include "ia_css_fw_load.h" + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_prog( + unsigned int ssid, + unsigned int mmid, + ia_css_xmem_address_t host_addr, + unsigned int vied_addr, + struct ia_css_cell_program_s *prog) +{ + int status; + + status = ia_css_cell_program_load_header(mmid, host_addr, prog); + if (status) + return status; + + status = ia_css_cell_program_load_icache_prog( + ssid, mmid, host_addr, vied_addr, prog); + if (status) + return status; + + status = ia_css_cell_program_load_mem_prog( + ssid, mmid, host_addr, vied_addr, prog); + if (status) + return status; + + return status; +} + +IA_CSS_CELL_PROGRAM_LOAD_STORAGE_CLASS_C +int +ia_css_cell_program_load_header( + unsigned int mmid, + ia_css_xmem_address_t host_addr, + struct ia_css_cell_program_s *prog) +{ + + /* read the program header from DDR */ + ia_css_fw_load(mmid, + host_addr, + prog, + sizeof(struct ia_css_cell_program_s)); + + /* check magic number */ + if (prog->magic_number != IA_CSS_CELL_PROGRAM_MAGIC_NUMBER) + return -1; + + return 0; +} + +#if defined(C_RUN) || defined(HRT_UNSCHED) || defined(HRT_SCHED) +#include "ia_css_cell_program_load_csim.h" +#else +#include "ia_css_cell_program_load_bin.h" +#endif + +#endif /* __IA_CSS_CELL_PROGRAM_LOAD_PROG_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_regs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_regs.h new file mode 100644 index 000000000000..4eb283b58de6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cell_program_load/src/ia_css_cell_regs.h @@ -0,0 +1,78 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CELL_REGS_H +#define __IA_CSS_CELL_REGS_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_stat_ctrl(unsigned int ssid, unsigned int regs_addr, + unsigned int value) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE unsigned int +ia_css_cell_regs_get_stat_ctrl(unsigned int ssid, unsigned int regs_addr) +{ + return ia_css_cmem_load_32(ssid, + regs_addr + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_icache_invalidate(unsigned int ssid, unsigned int regs_addr) +{ + ia_css_cell_regs_set_stat_ctrl(ssid, regs_addr, + 1u << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_start_pc(unsigned int ssid, unsigned int regs_addr, + unsigned int pc) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_START_PC_REG_ADDRESS, pc); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_icache_base_address(unsigned int ssid, + unsigned int regs_addr, + unsigned int value) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS, value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_set_icache_info_bits(unsigned int ssid, + unsigned int regs_addr, + unsigned int value) +{ + ia_css_cmem_store_32(ssid, + regs_addr + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS, + value); +} + +STORAGE_CLASS_INLINE void +ia_css_cell_regs_icache_invalidate(unsigned int ssid, unsigned int regs_addr) +{ + ia_css_cell_regs_set_stat_ctrl(ssid, regs_addr, + 1u << IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT); +} + +#endif /* __IA_CSS_CELL_REGS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h new file mode 100644 index 000000000000..e8b0a48b27e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg.h @@ -0,0 +1,60 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_H +#define __IA_CSS_CLIENT_PKG_H + +#include "type_support.h" +#include "ia_css_client_pkg_storage_class.h" +/* for ia_css_client_pkg_header_s (ptr only), ia_css_client_pkg_t */ +#include "ia_css_client_pkg_types.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size); + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size); + +#ifdef __INLINE_CLIENT_PKG__ +#include "ia_css_client_pkg_impl.h" +#endif + +#endif /* __IA_CSS_CLIENT_PKG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h new file mode 100644 index 000000000000..98af98d5d824 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H +#define __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_CLIENT_PKG__ +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +#else +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_CLIENT_PKG_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_CLIENT_PKG_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h new file mode 100644 index 000000000000..ff5bf01358f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/interface/ia_css_client_pkg_types.h @@ -0,0 +1,44 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_TYPES_H +#define __IA_CSS_CLIENT_PKG_TYPES_H + +#include "type_support.h" + +typedef void ia_css_client_pkg_t; + +struct ia_css_client_pkg_header_s { + uint32_t prog_list_offset; + uint32_t prog_list_size; + uint32_t prog_desc_offset; + uint32_t prog_desc_size; + uint32_t pg_manifest_offset; + uint32_t pg_manifest_size; + uint32_t prog_bin_offset; + uint32_t prog_bin_size; +}; + +struct ia_css_client_pkg_prog_s { + uint32_t prog_id; + uint32_t prog_offset; + uint32_t prog_size; +}; + +struct ia_css_client_pkg_prog_list_s { + uint32_t prog_desc_count; + uint32_t prog_bin_count; +}; + +#endif /* __IA_CSS_CLIENT_PKG_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c new file mode 100644 index 000000000000..0b2fd86d09f3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_CLIENT_PKG__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_client_pkg_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_CLIENT_PKG__ */ +#include "ia_css_client_pkg_impl.h" +#endif /* __INLINE_CLIENT_PKG__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h new file mode 100644 index 000000000000..b79e5de02b89 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/client_pkg/src/ia_css_client_pkg_impl.h @@ -0,0 +1,161 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CLIENT_PKG_IMPL_H +#define __IA_CSS_CLIENT_PKG_IMPL_H + +#include "ia_css_client_pkg.h" +#include "ia_css_client_pkg_types.h" +#include "error_support.h" + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_pg_manifest_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->pg_manifest_offset; + *(size) = client_pkg_header->pg_manifest_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_list_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_list_offset; + *(size) = client_pkg_header->prog_list_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_desc_offset_size( + const struct ia_css_client_pkg_header_s *client_pkg_header, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + + verifjmpexit(NULL != client_pkg_header); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + *(offset) = client_pkg_header->prog_desc_offset; + *(size) = client_pkg_header->prog_desc_size; + ret_val = 0; +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_prog_bin_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t *offset, + uint32_t *size) +{ + uint8_t i; + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_bin_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_bin_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + pkg_prog_bin_entry += pkg_prog_list->prog_desc_count; + + for (i = 0; i < pkg_prog_list->prog_bin_count; i++) { + if (program_id == pkg_prog_bin_entry->prog_id) { + *(offset) = pkg_prog_bin_entry->prog_offset; + *(size) = pkg_prog_bin_entry->prog_size; + ret_val = 0; + break; + } else if (0 == pkg_prog_bin_entry->prog_size) { + /* We can have a variable number of program descriptors. + * The first non-valid one will have size set to 0 + */ + break; + } + pkg_prog_bin_entry++; + } +EXIT: + return ret_val; +} + +IA_CSS_CLIENT_PKG_STORAGE_CLASS_C +int ia_css_client_pkg_get_indexed_prog_desc_entry_offset_size( + const ia_css_client_pkg_t *client_pkg, + uint32_t program_id, + uint32_t program_index, + uint32_t *offset, + uint32_t *size) +{ + int ret_val = -1; + struct ia_css_client_pkg_header_s *client_pkg_header = NULL; + const struct ia_css_client_pkg_prog_list_s *pkg_prog_list = NULL; + const struct ia_css_client_pkg_prog_s *pkg_prog_desc_entry = NULL; + + verifjmpexit(NULL != client_pkg); + verifjmpexit(NULL != offset); + verifjmpexit(NULL != size); + + client_pkg_header = + (struct ia_css_client_pkg_header_s *)((uint8_t *)client_pkg); + pkg_prog_list = + (struct ia_css_client_pkg_prog_list_s *)((uint8_t *)client_pkg + + client_pkg_header->prog_list_offset); + pkg_prog_desc_entry = + (struct ia_css_client_pkg_prog_s *)((uint8_t *)pkg_prog_list + + sizeof(struct ia_css_client_pkg_prog_list_s)); + + verifjmpexit(program_index < pkg_prog_list->prog_desc_count); + verifjmpexit(program_id == pkg_prog_desc_entry[program_index].prog_id); + verifjmpexit(pkg_prog_desc_entry[program_index].prog_size > 0); + *(offset) = pkg_prog_desc_entry[program_index].prog_offset; + *(size) = pkg_prog_desc_entry[program_index].prog_size; + ret_val = 0; + +EXIT: + return ret_val; +} + +#endif /* __IA_CSS_CLIENT_PKG_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/psys/subsystem_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/psys/subsystem_cnlB0.mk new file mode 100644 index 000000000000..521613a2e6b1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/psys/subsystem_cnlB0.mk @@ -0,0 +1,144 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +############################################################################ +# This file is used to specify versions and properties of PSYS firmware +# components. Please note that these are subsystem specific. System specific +# properties should go to system_$IPU_SYSVER.mk. Also the device versions +# should be defined under "devices" or should be taken from the SDK. +############################################################################ + +# define for DPCM Compression/ Decompression module +HAS_DPCM = 1 + +# See HSD 1805169230 +HAS_FWDMA_ALIGNMENT_ISSUE_SIGHTING = 1 + +# Activate loading params and storing stats DDR<->REGs with DMA. +PSYS_USE_ISA_DMA = 1 + +# Used in ISA module +PSYS_ISL_DPC_DPC_V2 = 0 + +# Use the DMA for terminal loading in Psys server +PSYS_SERVER_ENABLE_TERMINAL_LOAD_DMA = 1 + +# Assume OFS will be running concurrently with IPF, and prioritize according to rates of services on devproxy +CONCURRENT_OFS_IPF_PRIORITY_OPTIMIZATION_ENABLED = 1 + +# Enable clock gating of input feeder ibufctrl as specified in: +# https://sharepoint.ger.ith.intel.com/sites/ICG_Arch/Shared%20Documents/ +# IPU%20Specs/IPU4-P/HAS/CNL%20B0%20clock%20gating%20registers.xlsx +# (see register: processing_system_psa_psa_logic_ipfd_ibufctrl_2600_inst_enable_clk_gate) +ENABLE_IPFD_IBUFCTRL_CLK_GATE = 1 + +# Enable clock gating of input slice light ibufctrl as specified in: +# https://sharepoint.ger.ith.intel.com/sites/ICG_Arch/Shared%20Documents/ +# IPU%20Specs/IPU4-P/HAS/CNL%20B0%20clock%20gating%20registers.xlsx +# (see register: processing_system_input_slice_light_logic_ibuf_ctrl_enable_clk_gate) +ENABLE_ISL_IBUFCTRL_CLK_GATE = 1 + +# Enable clock gating of GDC0 as specified in: +# https://sharepoint.ger.ith.intel.com/sites/ICG_Arch/Shared%20Documents/ +# IPU%20Specs/IPU4-P/HAS/CNL%20B0%20clock%20gating%20registers.xlsx +# (see register: processing_system_gdc_logic_gdc0_nr_of_lut_parts) +ENABLE_GDC0_CLK_GATE = 1 + + +# define for VCA_VCR2_FF +HAS_VCA_VCR2_FF = 1 + +HAS_GMEM = 1 +HAS_64KB_GDC_MEM = 1 + +# define for enabling mmu_stream_id_lut support +ENABLE_MMU_STREAM_ID_LUT = 1 + +# Specification for Psys server's fixed globals' locations +REGMEM_OFFSET = 0 +REGMEM_SECURE_OFFSET = 4096 +REGMEM_SIZE = 20 +REGMEM_WORD_BYTES = 4 +REGMEM_SIZE_BYTES = 80 +GPC_ISP_PERF_DATA_OFFSET = 80 # Taken from REGMEM_OFFSET + REGMEM_SIZE_BYTES +GPC_ISP_PERF_DATA_SIZE_BYTES = 80 +FW_LOAD_NO_OF_REQUEST_OFFSET = 160 # Taken from GPC_ISP_PERF_DATA_OFFSET + GPC_ISP_PERF_DATA_SIZE_BYTES +FW_LOAD_NO_OF_REQUEST_SIZE_BYTES = 4 +DISPATCHER_SCRATCH_SPACE_OFFSET = 4176 # Taken from REGMEM_SECURE_OFFSET + REGMEM_SIZE_BYTES +# Total Used (@ REGMEM_OFFSET) = 164 # FW_LOAD_NO_OF_REQUEST_OFFSET + FW_LOAD_NO_OF_REQUEST_SIZE_BYTES +# Total Used (@ REGMEM_SECURE_OFFSET) = 80 # REGMEM_SIZE_BYTES + +# use DMA NCI for OFS Service to reduce load in tproxy +DMA_NCI_IN_OFS_SERVICE = 1 +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PSYS_SERVER_MANIFEST_VERSION = cnlB0 +PSYS_RESOURCE_MODEL_VERSION = cnlB0 +PSYS_ACCESS_BLOCKER_VERSION = v1 + +# Disable support for PPG protocol to save codesize +PSYS_HAS_PPG_SUPPORT = 0 +# Disable support for late binding +PSYS_HAS_LATE_BINDING_SUPPORT = 0 + +# Specify PSYS server context spaces for caching context from DDR +PSYS_SERVER_NOF_CACHES = 4 +PSYS_SERVER_MAX_NUM_PROC_GRP = $(PSYS_SERVER_NOF_CACHES) +PSYS_SERVER_MAX_NUM_EXEC_PROC_GRP = 8 # Max PG's running, 4 running on Cores, 4 being updated on the host upon executing. +PSYS_SERVER_MAX_PROC_GRP_SIZE = 3352 +PSYS_SERVER_MAX_MANIFEST_SIZE = 3420 +PSYS_SERVER_MAX_CLIENT_PKG_SIZE = 2360 +PSYS_SERVER_MAX_BUFFER_SET_SIZE = 0 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS = 90 +PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS = 1 +# The caching scheme for this subsystem suits the method of queueing ahead separate PGs for frames in an interleaved +# fashion. As such there should be as many caches to support to heaviest two concurrent PGs, times two. This results +# in the following distribution of caches: two large ones for the maximum sized PG, two smaller ones for the +# second-largest sized PG. +PSYS_SERVER_CACHE_0_PROC_GRP_SIZE = $(PSYS_SERVER_MAX_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_0_MANIFEST_SIZE = $(PSYS_SERVER_MAX_MANIFEST_SIZE) +PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE = $(PSYS_SERVER_MAX_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE = $(PSYS_SERVER_MAX_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_1_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_0_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_1_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_0_MANIFEST_SIZE) +PSYS_SERVER_CACHE_1_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_0_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_1_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_0_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_0_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_1_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_2_PROC_GRP_SIZE = 1624 +PSYS_SERVER_CACHE_2_MANIFEST_SIZE = 1248 +PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE = 1040 +PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE = 0 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS = 43 +PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +PSYS_SERVER_CACHE_3_PROC_GRP_SIZE = $(PSYS_SERVER_CACHE_2_PROC_GRP_SIZE) +PSYS_SERVER_CACHE_3_MANIFEST_SIZE = $(PSYS_SERVER_CACHE_2_MANIFEST_SIZE) +PSYS_SERVER_CACHE_3_CLIENT_PKG_SIZE = $(PSYS_SERVER_CACHE_2_CLIENT_PKG_SIZE) +PSYS_SERVER_CACHE_3_BUFFER_SET_SIZE = $(PSYS_SERVER_CACHE_2_BUFFER_SET_SIZE) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_SECTIONS = $(PSYS_SERVER_CACHE_2_NUMBER_OF_TERMINAL_SECTIONS) +PSYS_SERVER_CACHE_3_NUMBER_OF_TERMINAL_STORE_SECTIONS = $(PSYS_SERVER_MAX_NUMBER_OF_TERMINAL_STORE_SECTIONS) +# Support dual command context for VTIO - concurrent secure and non-secure streams +PSYS_HAS_DUAL_CMD_CTX_SUPPORT = 1 + +HAS_SPC = 1 +HAS_SPP0 = 1 +HAS_SPP1 = 1 +HAS_ISP0 = 1 +HAS_ISP1 = 1 +HAS_ISP2 = 1 +HAS_ISP3 = 1 + +AB_CONFIG_ARRAY_SIZE = 50 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/system_cnlB0.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/system_cnlB0.mk new file mode 100644 index 000000000000..667282b519c4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/config/system_cnlB0.mk @@ -0,0 +1,96 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +#--- DEFINES REQUIRED TO COMPILE USING LLVM --- +# Enable LLVM/Volcano for IPU4P, SPs only. +VOLCANO_IPU4P = 1 +VOLCANO_SP2601 = 1 +#---------------------------------------------- + +# enable NO_ALIAS for LLVM +ENABLE_NO_ALIAS_FOR_LLVM = 1 + +LOGICAL_FW_INPUT_SYSTEM = input_system_system +LOGICAL_FW_PROCESSING_SYSTEM = processing_system_system +LOGICAL_FW_IPU_SYSTEM = ipu_system +LOGICAL_FW_ISP_SYSTEM = isp2601_default_system +SP_CONTROL_CELL = sp2601_control +SP_PROXY_CELL = sp2601_proxy +ISP_CELL = isp2601 +# The non-capital define isp2601 is used in the sdk, in order to distinguish +# between different isp versions the ISP_CELL_IDENTIFIER define is added. +ISP_CELL_IDENTIFIER = ISP2601 +HAS_IPFD = 1 +HAS_S2M_IN_ISYS_ISL_NONSOC_PATH = 0 +HAS_S2V_IN_ISYS_ISL_NONSOC_PATH = 1 +# ISL-IS non-SoC path has ISA with PAF and DPC-Pext support for IPU4P-B0 +HAS_ISA_IN_ISYS_ISL = 1 +HAS_PAF_IN_ISYS_ISL = 1 +HAS_DPC_PEXT_IN_ISYS_ISL = 1 +HAS_PMA_IF = 1 + +HAS_MIPIBE_IN_PSYS_ISL = 1 + +HAS_VPLESS_SUPPORT = 0 + +DLI_SYSTEM = hive_isp_css_2600_system +RESOURCE_MANAGER_VERSION = v2 +MEM_RESOURCE_VALIDATION_ERROR = 0 +OFS_SCALER_1_4K_TILEY_422_SUPPORT= 1 +PROGDESC_ACC_SYMBOLS_VERSION = v1 +DEVPROXY_INTERFACE_VERSION = v1 +FW_ABI_IPU_TYPES_VERSION = v1 + +HAS_ONLINE_MODE_SUPPORT_IN_ISYS_PSYS = 0 + +MMU_INTERFACE_VERSION = v2 +DEVICE_ACCESS_VERSION = v2 +PSYS_SERVER_VERSION = v3 +PSYS_SERVER_LOADER_VERSION = v1 +PSYS_HW_VERSION = CNL_B0_HW + +# Enable FW_DMA for loading firmware +PSYS_SERVER_ENABLE_FW_LOAD_DMA = 1 + +NCI_SPA_VERSION = v1 +MANIFEST_TOOL_VERSION = v2 +PSYS_CON_MGR_TOOL_VERSION = v1 +# TODO: Should be removed after performance issues OTF are solved +PSYS_PROC_MGR_VERSION = v1 +IPU_RESOURCES_VERSION = v2 + +HAS_ACC_CLUSTER_PAF_PAL = 1 +HAS_ACC_CLUSTER_PEXT_PAL = 1 +HAS_ACC_CLUSTER_GBL_PAL = 1 + +# TODO use version naming scheme "v#" to decouple +# IPU_SYSVER from version. +PARAMBINTOOL_ISA_INIT_VERSION = cnlB0 + +# Select EQC2EQ version +# Version 1: uniform address space, equal EQ addresses regardless of EQC device +# Version 2: multiple addresses per EQ, depending on location of EQC device +EQC2EQ_VERSION = v1 + +# Select DMA instance for fw_load +FW_LOAD_DMA_INSTANCE = NCI_DMA_FW + +HAS_DMA_FW = 1 + +HAS_SIS = 0 +HAS_IDS = 1 + +PSYS_SERVER_ENABLE_TPROXY = 1 +PSYS_SERVER_ENABLE_DEVPROXY = 1 +NCI_OFS_VERSION = v1 diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk new file mode 100644 index 000000000000..8ecc3e42e55d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/cpd_component.mk @@ -0,0 +1,28 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + +# MODULE is cpd/cpd_component + +CPD_DIR = $${MODULES_DIR}/cpd +CPD_COMPONENT_DIR = $${MODULES_DIR}/cpd/cpd_component +CPD_COMPONENT_INTERFACE = $(CPD_COMPONENT_DIR)/interface +CPD_COMPONENT_SOURCES = $(CPD_COMPONENT_DIR)/src + +CPD_COMPONENT_FILES = $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component_create.c +CPD_COMPONENT_FILES += $(CPD_COMPONENT_SOURCES)/ia_css_cpd_component.c +CPD_COMPONENT_CPPFLAGS = -I$(CPD_COMPONENT_INTERFACE) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_COMPONENT_SOURCES) +CPD_COMPONENT_CPPFLAGS += -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h new file mode 100644 index 000000000000..7ad3070b2fd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_component/interface/ia_css_cpd_component_types.h @@ -0,0 +1,90 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_COMPONENT_TYPES_H +#define __IA_CSS_CPD_COMPONENT_TYPES_H + +/** @file + * This file contains datastructure related to generation of CPD file + */ + +#include "type_support.h" + +#define SIZE_OF_FW_ARCH_VERSION 7 +#define SIZE_OF_SYSTEM_VERSION 11 +#define SIZE_OF_COMPONENT_NAME 12 + +enum ia_css_cpd_component_endianness { + IA_CSSCPD_COMP_ENDIAN_RSVD, + IA_CSS_CPD_COMP_LITTLE_ENDIAN, + IA_CSS_CPD_COMP_BIG_ENDIAN +}; + +/** Module Data (components) Header + * Following data structure has been created using FAS section 5.25 + * Open : Should we add padding at the end of module directory + * (the component must be 512 aligned) + */ +typedef struct { + uint32_t header_size; + /**< Specifies endianness of the binary data */ + unsigned int endianness; + /**< fw_pkg_date is current date stored in 'binary decimal' + * representation e.g. 538248729 (0x20150619) + */ + uint32_t fw_pkg_date; + /**< hive_sdk_date is date of HIVE_SDK stored in + * 'binary decimal' representation + */ + uint32_t hive_sdk_date; + /**< compiler_date is date of ptools stored in + * 'binary decimal' representation + */ + uint32_t compiler_date; + /**< UNSCHED / SCHED / TARGET / CRUN */ + unsigned int target_platform_type; + /**< specifies the system version stored as string + * e.g. BXTB0_IPU4'\0' + */ + uint8_t system_version[SIZE_OF_SYSTEM_VERSION]; + /**< specifies fw architecture version e.g. for BXT CSS3.0'\0' */ + uint8_t fw_arch_version[SIZE_OF_FW_ARCH_VERSION]; + uint8_t rsvd[2]; +} ia_css_header_component_t; + +/** Module Data Directory = Directory Header + Directory Entry (0..n) + * Following two Data Structure has been taken from CSE Storage FAS (CPD desgin) + * Module Data Directory Header + */ +typedef struct { + uint32_t header_marker; + uint32_t number_of_entries; + uint8_t header_version; + uint8_t entry_version; + uint8_t header_length; /**< 0x10 (16) Fixed for this version*/ + uint8_t checksum; + uint32_t partition_name; +} ia_css_directory_header_component_t; + +/** Module Date Directory Entry + */ +typedef struct { + /**< character string describing the component name */ + uint8_t entry_name[SIZE_OF_COMPONENT_NAME]; + uint32_t offset; + uint32_t length; + uint32_t rsvd; /**< Must be 0 */ +} ia_css_directory_entry_component_t; + +#endif /* __IA_CSS_CPD_COMPONENT_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk new file mode 100644 index 000000000000..ac78815dfbd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/cpd_metadata.mk @@ -0,0 +1,29 @@ +## +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +## + + +# MODULE is CPD UTL (Metadata File Extension) + +CPD_DIR = $${MODULES_DIR}/cpd/ +CPD_METADATA_DIR = $${MODULES_DIR}/cpd/cpd_metadata +CPD_METADATA_INTERFACE = $(CPD_METADATA_DIR)/interface +CPD_METADATA_SOURCES = $(CPD_METADATA_DIR)/src + +CPD_METADATA_FILES = $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata_create.c +CPD_METADATA_FILES += $(CPD_METADATA_SOURCES)/ia_css_cpd_metadata.c +CPD_METADATA_CPPFLAGS = -I$(CPD_METADATA_INTERFACE) \ + -I$(CPD_METADATA_SOURCES) \ + -I$(CPD_DIR) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h new file mode 100644 index 000000000000..a88c6aede08c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/cpd/cpd_metadata/interface/ia_css_cpd_metadata_types.h @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_CPD_METADATA_TYPES_H +#define __IA_CSS_CPD_METADATA_TYPES_H + +/** @file + * This file contains data structures related to generation of + * metadata file extension + */ +#include + +/* As per v0.2 manifest document + * Header = Extension Type (4) + Extension Length (4) + + * iUnit Image Type (4) + Reserved (16) + */ +#define IPU_METADATA_HEADER_RSVD_SIZE 16 +#define IPU_METADATA_HEADER_FIELDS_SIZE 12 +#define IPU_METADATA_HEADER_SIZE \ + (IPU_METADATA_HEADER_FIELDS_SIZE + IPU_METADATA_HEADER_RSVD_SIZE) + +/* iUnit metadata extension tpye value */ +#define IPU_METADATA_EXTENSION_TYPE 16 + +/* Unique id for level 0 bootloader component */ +#define IA_CSS_IUNIT_BTLDR_ID 0 +/* Unique id for psys server program group component */ +#define IA_CSS_IUNIT_PSYS_SERVER_ID 1 +/* Unique id for isys server program group component */ +#define IA_CSS_IUNIT_ISYS_SERVER_ID 2 +/* Initial Identifier for client program group component */ +#define IA_CSS_IUNIT_CLIENT_ID 3 + +/* Use this to parse date from release version from the iUnit component + * e.g. 20150701 + */ +#define IA_CSS_IUNIT_COMP_DATE_SIZE 8 +/* offset of release version in program group binary + * e.g. release_version = "scci_gerrit_20150716_2117" + * In cpd file we only use date/version for the component + */ +#define IA_CSS_IUNIT_DATE_OFFSET 12 + +#define IPU_METADATA_HASH_KEY_SIZE 32 +#define IPU_METADATA_ATTRIBUTE_SIZE 16 +#define IA_CSE_METADATA_COMPONENT_ID_MAX 127 + +typedef enum { + IA_CSS_CPD_METADATA_IMAGE_TYPE_RESERVED, + IA_CSS_CPD_METADATA_IMAGE_TYPE_BOOTLOADER, + IA_CSS_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE +} ia_css_cpd_metadata_image_type_t; + +typedef enum { + IA_CSS_CPD_MAIN_FW_TYPE_RESERVED, + IA_CSS_CPD_MAIN_FW_TYPE_PSYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_ISYS_SERVER, + IA_CSS_CPD_MAIN_FW_TYPE_CLIENT +} ia_css_cpd_iunit_main_fw_type_t; + +/** Data structure for component specific information + * Following data structure has been taken from CSE Manifest v0.2 + */ +typedef struct { + /**< Component ID - unique for each component */ + uint32_t id; + /**< Size of the components */ + uint32_t size; + /**< Version/date of when the components is being generated/created */ + uint32_t version; + /**< SHA 256 Hash Key for component */ + uint8_t sha2_hash[IPU_METADATA_HASH_KEY_SIZE]; + /**< component sp entry point + * - Only valid for btldr/psys/isys server component + */ + uint32_t entry_point; + /**< component icache base address + * - Only valid for btldr/psys/isys server component + */ + uint32_t icache_base_offset; + /**< Resevred - must be 0 */ + uint8_t attributes[IPU_METADATA_ATTRIBUTE_SIZE]; +} ia_css_cpd_metadata_component_t; + +/** Data structure for Metadata File Extension Header + */ +typedef struct { + /**< Specifies the binary image type + * - could be bootloader or main firmware + */ + ia_css_cpd_metadata_image_type_t image_type; + /**< Number of components available in metadata file extension + * (For btldr always 1) + */ + uint32_t component_count; + /**< Component specific information */ + ia_css_cpd_metadata_component_t *components; +} ia_css_cpd_metadata_desc_t; + +#endif /* __IA_CSS_CPD_METADATA_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/device_access.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/device_access.mk new file mode 100644 index 000000000000..1629d9af803b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/device_access.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifndef _DEVICE_ACCESS_MK_ +_DEVICE_ACCESS_MK_ = 1 + +# DEVICE_ACCESS_VERSION= +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk + +DEVICE_ACCESS_DIR=$${MODULES_DIR}/device_access +DEVICE_ACCESS_INTERFACE=$(DEVICE_ACCESS_DIR)/interface +DEVICE_ACCESS_SOURCES=$(DEVICE_ACCESS_DIR)/src + +DEVICE_ACCESS_HOST_FILES = + +DEVICE_ACCESS_FW_FILES = + +DEVICE_ACCESS_HOST_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS = \ + -I$(DEVICE_ACCESS_INTERFACE) \ + -I$(DEVICE_ACCESS_SOURCES) + +DEVICE_ACCESS_FW_CPPFLAGS += \ + -I$(DEVICE_ACCESS_SOURCES)/$(DEVICE_ACCESS_VERSION) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h new file mode 100644 index 000000000000..3dc47c29fcab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_cmem.h @@ -0,0 +1,58 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_H +#define __IA_CSS_CMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_cmem_address_t; +#else +#include +typedef vied_subsystem_address_t ia_css_cmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size); + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_cmem_cell.h" +#else +#include "ia_css_cmem_host.h" +#endif + +#endif /* __IA_CSS_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h new file mode 100644 index 000000000000..de2b94d8af54 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_H +#define __IA_CSS_XMEM_H + +#include "type_support.h" +#include "storage_class.h" + +#ifdef __VIED_CELL +typedef unsigned int ia_css_xmem_address_t; +#else +#include +typedef host_virtual_address_t ia_css_xmem_address_t; +#endif + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address); + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value); + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes); + +/* Include inline implementation */ + +#ifdef __VIED_CELL +#include "ia_css_xmem_cell.h" +#else +#include "ia_css_xmem_host.h" +#endif + +#endif /* __IA_CSS_XMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h new file mode 100644 index 000000000000..57aab3323c73 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/interface/ia_css_xmem_cmem.h @@ -0,0 +1,35 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_H +#define __IA_CSS_XMEM_CMEM_H + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size); + +/* include inline implementation */ +#include "ia_css_xmem_cmem_impl.h" + +#endif /* __IA_CSS_XMEM_CMEM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h new file mode 100644 index 000000000000..22799e67214c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_cmem_host.h @@ -0,0 +1,121 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_CMEM_HOST_H +#define __IA_CSS_CMEM_HOST_H + +/* This file is an inline implementation for the interface ia_css_cmem.h + * and should only be included there. */ + +#include "assert_support.h" +#include "misc_support.h" + +STORAGE_CLASS_INLINE uint32_t +ia_css_cmem_load_32(unsigned int ssid, ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + return vied_subsystem_load_32(ssid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_cond_cmem_load_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + return vied_subsystem_load_32(ssid, address); + else + return 0; +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store_32(unsigned int ssid, ia_css_cmem_address_t address, + uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cond_cmem_store_32(bool cond, unsigned int ssid, + ia_css_cmem_address_t address, uint32_t data) +{ + /* Address has to be word aligned */ + assert(0 == address % 4); + if (cond) + vied_subsystem_store_32(ssid, address, data); +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_load(unsigned int ssid, ia_css_cmem_address_t address, void *data, + unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + *data32 = ia_css_cmem_load_32(ssid, address); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_store(unsigned int ssid, ia_css_cmem_address_t address, + const void *data, unsigned int size) +{ + uint32_t *data32 = (uint32_t *)data; + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + assert((long)data % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, *data32); + address += 4; + data32 += 1; + } +} + +STORAGE_CLASS_INLINE void +ia_css_cmem_zero(unsigned int ssid, ia_css_cmem_address_t address, + unsigned int size) +{ + uint32_t end = address + size; + + assert(size % 4 == 0); + assert(address % 4 == 0); + + while (address != end) { + ia_css_cmem_store_32(ssid, address, 0); + address += 4; + } +} + +STORAGE_CLASS_INLINE ia_css_cmem_address_t +ia_css_cmem_get_cmem_addr_from_dmem(unsigned int base_addr, void *p) +{ + NOT_USED(base_addr); + return (ia_css_cmem_address_t)(uintptr_t)p; +} + +#endif /* __IA_CSS_CMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h new file mode 100644 index 000000000000..adc178b75059 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_cmem_impl.h @@ -0,0 +1,79 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_CMEM_IMPL_H +#define __IA_CSS_XMEM_CMEM_IMPL_H + +#include "ia_css_xmem_cmem.h" + +#include "ia_css_cmem.h" +#include "ia_css_xmem.h" + +/* Copy data from xmem to cmem, e.g., from a program in DDR to a cell's DMEM */ +/* This may also be implemented using DMA */ + +STORAGE_CLASS_INLINE void +ia_css_xmem_to_cmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_cmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_xmem_load_32(mmid, src); + ia_css_cmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + +/* Copy data from cmem to xmem */ + +STORAGE_CLASS_INLINE void +ia_css_cmem_to_xmem_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_cmem_address_t src, + ia_css_xmem_address_t dst, + unsigned int size) +{ + /* copy from ddr to subsystem, e.g., cell dmem */ + ia_css_xmem_address_t end = dst + size; + + assert(size % 4 == 0); + assert((uintptr_t) dst % 4 == 0); + assert((uintptr_t) src % 4 == 0); + + while (dst != end) { + uint32_t data; + + data = ia_css_cmem_load_32(mmid, src); + ia_css_xmem_store_32(ssid, dst, data); + dst += 4; + src += 4; + } +} + + +#endif /* __IA_CSS_XMEM_CMEM_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h new file mode 100644 index 000000000000..d94991fc1114 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/device_access/src/ia_css_xmem_host.h @@ -0,0 +1,84 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_XMEM_HOST_H +#define __IA_CSS_XMEM_HOST_H + +#include "ia_css_xmem.h" +#include +#include "assert_support.h" +#include + +STORAGE_CLASS_INLINE uint8_t +ia_css_xmem_load_8(unsigned int mmid, ia_css_xmem_address_t address) +{ + return shared_memory_load_8(mmid, address); +} + +STORAGE_CLASS_INLINE uint16_t +ia_css_xmem_load_16(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + return shared_memory_load_16(mmid, address); +} + +STORAGE_CLASS_INLINE uint32_t +ia_css_xmem_load_32(unsigned int mmid, ia_css_xmem_address_t address) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + return shared_memory_load_32(mmid, address); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_load(unsigned int mmid, ia_css_xmem_address_t address, void *data, + unsigned int size) +{ + shared_memory_load(mmid, address, data, size); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_8(unsigned int mmid, ia_css_xmem_address_t address, + uint8_t value) +{ + shared_memory_store_8(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_16(unsigned int mmid, ia_css_xmem_address_t address, + uint16_t value) +{ + /* Address has to be half-word aligned */ + assert(0 == (uintptr_t) address % 2); + shared_memory_store_16(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store_32(unsigned int mmid, ia_css_xmem_address_t address, + uint32_t value) +{ + /* Address has to be word aligned */ + assert(0 == (uintptr_t) address % 4); + shared_memory_store_32(mmid, address, value); +} + +STORAGE_CLASS_INLINE void +ia_css_xmem_store(unsigned int mmid, ia_css_xmem_address_t address, + const void *data, unsigned int bytes) +{ + shared_memory_store(mmid, address, data, bytes); +} + +#endif /* __IA_CSS_XMEM_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h new file mode 100644 index 000000000000..5102f6e44d2f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/cnlB0/ipu_device_buttress_properties_struct.h @@ -0,0 +1,68 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H + +/* Destination values for master port 0 and bitfield "request_dest" */ +enum cio_M0_btrs_dest { + DEST_IS_BUT_REGS = 0, + DEST_IS_DDR, + RESERVED, + DEST_IS_SUBSYSTEM, + N_BTRS_DEST +}; + +/* Bit-field positions for M0 info bits */ +enum ia_css_info_bits_m0_pos { + IA_CSS_INFO_BITS_M0_SNOOPABLE_POS = 0, + IA_CSS_INFO_BITS_M0_IMR_DESTINED_POS = 1, + IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS = 4 +}; + +#define IA_CSS_INFO_BITS_M0_DDR \ + (DEST_IS_DDR << IA_CSS_INFO_BITS_M0_REQUEST_DEST_POS) +#define IA_CSS_INFO_BITS_M0_SNOOPABLE (1 << IA_CSS_INFO_BITS_M0_SNOOPABLE_POS) + +/* Info bits as expected by the buttress */ +/* Deprecated because bit fields are not portable */ + +/* For master port 0*/ +union cio_M0_t { + struct { + unsigned int snoopable : 1; + unsigned int imr_destined : 1; + unsigned int spare0 : 2; + unsigned int request_dest : 2; + unsigned int spare1 : 26; + } as_bitfield; + unsigned int as_word; +}; + +/* For master port 1*/ +union cio_M1_t { + struct { + unsigned int spare0 : 1; + unsigned int deadline_pointer : 1; + unsigned int reserved : 1; + unsigned int zlw : 1; + unsigned int stream_id : 4; + unsigned int address_swizzling : 1; + unsigned int spare1 : 23; + } as_bitfield; + unsigned int as_word; +}; + + +#endif /* __IPU_DEVICE_BUTTRESS_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h new file mode 100644 index 000000000000..e6e1e9dcbe80 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_H +#define __IPU_DEVICE_CELL_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_cell_type_properties.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id); + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id); + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id); +#endif + +#include "ipu_device_cell_properties_func.h" + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h new file mode 100644 index 000000000000..481b0504a237 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_func.h @@ -0,0 +1,164 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_FUNC_H +#define __IPU_DEVICE_CELL_PROPERTIES_FUNC_H + +/* define properties for all cells uses in ISYS */ + +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_cell_devices.h" +#include "assert_support.h" +#include "storage_class.h" + +enum {IA_CSS_CELL_MASTER_ADDRESS_WIDTH = 32}; + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_devices(void) +{ + return NUM_CELLS; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_memories(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_memories; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_size(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + mem_size[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + return ipu_device_cell_properties[cell_id].mem_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_databus_memory_address(const unsigned int cell_id, + const unsigned int mem_id) +{ + assert(cell_id < NUM_CELLS); + assert(mem_id < ipu_device_cell_num_memories(cell_id)); + assert(mem_id != 0); + return ipu_device_cell_properties[cell_id].mem_databus_address[mem_id]; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_num_masters(const unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + num_master_ports; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_bits(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].segment_bits; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_num_segments(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << ipu_device_cell_master_segment_bits(cell_id, master_id); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_segment_size(const unsigned int cell_id, + const unsigned int master_id) +{ + return 1u << (IA_CSS_CELL_MASTER_ADDRESS_WIDTH - + ipu_device_cell_master_segment_bits(cell_id, master_id)); +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_stride(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].stride; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_base_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].base_address_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_master_info_override_reg(const unsigned int cell_id, + const unsigned int master_id) +{ + assert(cell_id < NUM_CELLS); + assert(master_id < ipu_device_cell_num_masters(cell_id)); + return + ipu_device_cell_properties[cell_id].type_properties-> + master[master_id].info_override_bits_register; +} + +STORAGE_CLASS_INLINE unsigned int +ipu_device_cell_icache_align(unsigned int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_cell_properties[cell_id].type_properties->count-> + icache_align; +} + +#ifdef C_RUN +STORAGE_CLASS_INLINE int +ipu_device_cell_id_crun(int cell_id) +{ + assert(cell_id < NUM_CELLS); + return ipu_device_map_cell_id_to_crun_proc_id[cell_id]; +} +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_FUNC_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h new file mode 100644 index 000000000000..63397dc0b7fe --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_properties_struct.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H +#define __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H + +/* definitions for all cell types */ + +struct ipu_device_cell_count_s { + unsigned int num_memories; + unsigned int num_master_ports; + unsigned int num_stall_bits; + unsigned int icache_align; +}; + +struct ipu_device_cell_master_properties_s { + unsigned int segment_bits; + unsigned int stride; /* offset to register of next segment */ + unsigned int base_address_register; /* address of first base address + register */ + unsigned int info_bits_register; + unsigned int info_override_bits_register; +}; + +struct ipu_device_cell_type_properties_s { + const struct ipu_device_cell_count_s *count; + const struct ipu_device_cell_master_properties_s *master; + const unsigned int *reg_offset; /* offsets of registers, some depend + on cell type */ + const unsigned int *mem_size; +}; + +struct ipu_device_cell_properties_s { + const struct ipu_device_cell_type_properties_s *type_properties; + const unsigned int *mem_address; + const unsigned int *mem_databus_address; + /* const cell_master_port_properties_s* master_port_properties; */ +}; + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h new file mode 100644 index 000000000000..72caed3eef0c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_cell_type_properties.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_TYPE_PROPERTIES_H +#define __IPU_DEVICE_CELL_TYPE_PROPERTIES_H + +#define IPU_DEVICE_INVALID_MEM_ADDRESS 0xFFFFFFFF + +enum ipu_device_cell_stat_ctrl_bit { + IPU_DEVICE_CELL_STAT_CTRL_RESET_BIT = 0, + IPU_DEVICE_CELL_STAT_CTRL_START_BIT = 1, + IPU_DEVICE_CELL_STAT_CTRL_RUN_BIT = 3, + IPU_DEVICE_CELL_STAT_CTRL_READY_BIT = 5, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_BIT = 6, + IPU_DEVICE_CELL_STAT_CTRL_STALL_BIT = 7, + IPU_DEVICE_CELL_STAT_CTRL_CLEAR_IRQ_MASK_FLAG_BIT = 8, + IPU_DEVICE_CELL_STAT_CTRL_BROKEN_IRQ_MASK_FLAG_BIT = 9, + IPU_DEVICE_CELL_STAT_CTRL_READY_IRQ_MASK_FLAG_BIT = 10, + IPU_DEVICE_CELL_STAT_CTRL_SLEEP_IRQ_MASK_FLAG_BIT = 11, + IPU_DEVICE_CELL_STAT_CTRL_INVALIDATE_ICACHE_BIT = 12, + IPU_DEVICE_CELL_STAT_CTRL_ICACHE_ENABLE_PREFETCH_BIT = 13 +}; + +enum ipu_device_cell_reg_addr { + IPU_DEVICE_CELL_STAT_CTRL_REG_ADDRESS = 0x0, + IPU_DEVICE_CELL_START_PC_REG_ADDRESS = 0x4, + IPU_DEVICE_CELL_ICACHE_BASE_REG_ADDRESS = 0x10, + IPU_DEVICE_CELL_ICACHE_INFO_BITS_REG_ADDRESS = 0x14 +}; + +enum ipu_device_cell_reg { + IPU_DEVICE_CELL_STAT_CTRL_REG, + IPU_DEVICE_CELL_START_PC_REG, + IPU_DEVICE_CELL_ICACHE_BASE_REG, + IPU_DEVICE_CELL_DEBUG_PC_REG, + IPU_DEVICE_CELL_STALL_REG, + IPU_DEVICE_CELL_NUM_REGS +}; + +enum ipu_device_cell_mem { + IPU_DEVICE_CELL_REGS, /* memory id of registers */ + IPU_DEVICE_CELL_PMEM, /* memory id of pmem */ + IPU_DEVICE_CELL_DMEM, /* memory id of dmem */ + IPU_DEVICE_CELL_BAMEM, /* memory id of bamem */ + IPU_DEVICE_CELL_VMEM /* memory id of vmem */ +}; +#define IPU_DEVICE_CELL_NUM_MEMORIES (IPU_DEVICE_CELL_VMEM + 1) + +enum ipu_device_cell_master { + IPU_DEVICE_CELL_MASTER_ICACHE, /* master port id of icache */ + IPU_DEVICE_CELL_MASTER_QMEM, + IPU_DEVICE_CELL_MASTER_CMEM, + IPU_DEVICE_CELL_MASTER_XMEM, + IPU_DEVICE_CELL_MASTER_XVMEM +}; +#define IPU_DEVICE_CELL_MASTER_NUM_MASTERS (IPU_DEVICE_CELL_MASTER_XVMEM + 1) + +#endif /* __IPU_DEVICE_CELL_TYPE_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h new file mode 100644 index 000000000000..fd0c5a586c94 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_H +#define __IPU_DEVICE_GP_PROPERTIES_H + +#include "storage_class.h" +#include "ipu_device_gp_properties_types.h" + +STORAGE_CLASS_INLINE unsigned int +ipu_device_gp_mux_addr(const unsigned int device_id, const unsigned int mux_id); + +#include "ipu_device_gp_properties_func.h" + +#endif /* __IPU_DEVICE_GP_PROPERTIES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h new file mode 100644 index 000000000000..acd91117ee27 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/interface/ipu_device_gp_properties_types.h @@ -0,0 +1,104 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_PROPERTIES_TYPES_H +#define __IPU_DEVICE_GP_PROPERTIES_TYPES_H + +enum ipu_device_gp_isa_value { + /* ISA_MUX_SEL options */ + IPU_DEVICE_GP_ISA_MUX_SEL_ICA = 0, /* Enable output after FF ICA */ + IPU_DEVICE_GP_ISA_MUX_SEL_LSC = 1, /* Enable output after FF LSC */ + IPU_DEVICE_GP_ISA_MUX_SEL_DPC = 2, /* Enable output after FF DPC */ + /* ICA stream block options */ + /* UNBLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_UNBLOCK = 0, + /* BLOCK signal received from ICA */ + IPU_DEVICE_GP_ISA_ICA_BLOCK = 1, + /* LSC stream block options */ + /* UNBLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_UNBLOCK = 0, + /* BLOCK signal received from LSC */ + IPU_DEVICE_GP_ISA_LSC_BLOCK = 1, + /* DPC stream block options */ + /* UNBLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_UNBLOCK = 0, + /* BLOCK signal received from DPC */ + IPU_DEVICE_GP_ISA_DPC_BLOCK = 1, + /* Defines needed only for bxtB0 */ + /* ISA_AWB_MUX_SEL options */ + /* Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_ICA = 0, + /* DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_SEL_DPC = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_UNBLOCK = 0, + /* BLOCK DPC input */ + IPU_DEVICE_GP_ISA_AWB_MUX_ICA_BLOCK = 1, + /* ISA_AWB_MUX_SEL options */ + /* UNBLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_UNBLOCK = 0, + /* BLOCK Input Correction input */ + IPU_DEVICE_GP_ISA_AWB_MUX_DPC_BLOCK = 1, + + /* PAF STRM options */ + /* Disable streaming to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_DISABLE_STREAM = 0, + /* Enable stream0 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM0 = 1, + /* Enable stream1 to PAF FF*/ + IPU_DEVICE_GP_ISA_PAF_ENABLE_STREAM1 = 2, + /* PAF SRC SEL options */ + /* External channel input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL0 = 0, + /* DPC extracted input */ + IPU_DEVICE_GP_ISA_PAF_SRC_SEL1 = 1, + /* PAF_GDDPC_BLK options */ + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK0 = 0, + IPU_DEVICE_GP_ISA_PAF_GDDPC_PORT_BLK1 = 1, + /* PAF ISA STR_PORT options */ + IPU_DEVICE_GP_ISA_PAF_STR_PORT0 = 0, + IPU_DEVICE_GP_ISA_PAF_STR_PORT1 = 1, + + /* sis port block options */ + IPU_DEVICE_GP_ISA_SIS_PORT_UNBLOCK = 0, + IPU_DEVICE_GP_ISA_SIS_PORT_BLOCK = 1, + IPU_DEVICE_GP_ISA_CONF_INVALID = 0xFF +}; + +enum ipu_device_gp_psa_value { + /* Defines needed for bxtB0 */ + /* PSA_STILLS_MODE_MUX */ + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_WO_DM = 0, + IPU_DEVICE_GP_PSA_MUX_POST_RYNR_ROUTE_W_DM = 1, + /* PSA_ACM_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_ACM = 0, + IPU_DEVICE_GP_PSA_DEMUX_PRE_ACM_ROUTE_TO_S2V = 1, + /* PSA_S2V_RGB_F_MUX */ + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_ACM = 0, + IPU_DEVICE_GP_PSA_MUX_PRE_S2V_RGB_F_FROM_DM_OR_SPLITTER = 1, + /* PSA_V2S_RGB_4_DEMUX */ + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_GTM = 0, + IPU_DEVICE_GP_PSA_DEMUX_POST_V2S_RGB_4_TO_ACM = 1, +}; + +enum ipu_device_gp_isl_value { + /* choose and route pixel stream to CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_IN_USE = 0, + /* choose and route pixel stream bypass CSI BE */ + IPU_DEVICE_GP_ISL_CSI_BE_BYPASS +}; + +#endif /* __IPU_DEVICE_GP_PROPERTIES_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_acb_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_acb_devices.h new file mode 100644 index 000000000000..4898fbb2e875 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_acb_devices.h @@ -0,0 +1,43 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_ACB_DEVICES_H +#define __IPU_DEVICE_ACB_DEVICES_H + +enum ipu_device_acb_id { + /* PSA accelerators */ + IPU_DEVICE_ACB_WBA_ID = 0, + IPU_DEVICE_ACB_RYNR_ID, + IPU_DEVICE_ACB_DEMOSAIC_ID, + IPU_DEVICE_ACB_ACM_ID, /* In CNLB0 ACM is called VCA in HW */ + IPU_DEVICE_ACB_GTC_ID, + IPU_DEVICE_ACB_YUV1_ID, + IPU_DEVICE_ACB_DVS_ID, + IPU_DEVICE_ACB_LACE_ID, + /* ISA accelerators */ + IPU_DEVICE_ACB_ICA_ID, + IPU_DEVICE_ACB_LSC_ID, + IPU_DEVICE_ACB_DPC_ID, + IPU_DEVICE_ACB_IDS_ID, + IPU_DEVICE_ACB_AWB_ID, + IPU_DEVICE_ACB_AF_ID, + IPU_DEVICE_ACB_AE_ID, + IPU_DEVICE_ACB_NUM_ACB +}; + +#define IPU_DEVICE_ACB_NUM_PSA_ACB (IPU_DEVICE_ACB_LACE_ID + 1) +#define IPU_DEVICE_ACB_NUM_ISA_ACB \ + (IPU_DEVICE_ACB_NUM_ACB - IPU_DEVICE_ACB_NUM_PSA_ACB) + +#endif /* __IPU_DEVICE_ACB_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_devices.h new file mode 100644 index 000000000000..0c923d139638 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_devices.h @@ -0,0 +1,38 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_CELL_DEVICES_H +#define __IPU_DEVICE_CELL_DEVICES_H + +#define SPC0_CELL processing_system_sp_cluster_sp_cluster_logic_spc_tile_sp +#define SPP0_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile0_sp +#define SPP1_CELL processing_system_sp_cluster_sp_cluster_logic_spp_tile1_sp +#define ISP0_CELL processing_system_isp_tile0_logic_isp +#define ISP1_CELL processing_system_isp_tile1_logic_isp +#define ISP2_CELL processing_system_isp_tile2_logic_isp +#define ISP3_CELL processing_system_isp_tile3_logic_isp + +enum ipu_device_psys_cell_id { + SPC0, + SPP0, + SPP1, + ISP0, + ISP1, + ISP2, + ISP3 +}; +#define NUM_CELLS (ISP3 + 1) +#define NUM_ISP_CELLS 4 + +#endif /* __IPU_DEVICE_CELL_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_defs.h new file mode 100644 index 000000000000..09241bea7250 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_defs.h @@ -0,0 +1,66 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +/* Generated file - please do not edit. */ + +#ifndef _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ +#define SPC0_REGS_CBUS_ADDRESS 0x00000000 +#define SPC0_DMEM_CBUS_ADDRESS 0x00008000 +#define SPC0_DMEM_DBUS_ADDRESS 0x02000000 +#define SPC0_DMEM_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPC0_DMEM_INT_DMA_M0_ADDRESS SPC0_DMEM_DBUS_ADDRESS +#define SPP0_REGS_CBUS_ADDRESS 0x00020000 +#define SPP0_DMEM_CBUS_ADDRESS 0x00028000 +#define SPP0_DMEM_DBUS_ADDRESS 0x02020000 +#define SPP1_REGS_CBUS_ADDRESS 0x00030000 +#define SPP1_DMEM_CBUS_ADDRESS 0x00038000 +#define SPP1_DMEM_DBUS_ADDRESS 0x02030000 +#define ISP0_REGS_CBUS_ADDRESS 0x001C0000 +#define ISP0_PMEM_CBUS_ADDRESS 0x001D0000 +#define ISP0_DMEM_CBUS_ADDRESS 0x001F0000 +#define ISP0_BAMEM_CBUS_ADDRESS 0x00200000 +#define ISP0_VMEM_CBUS_ADDRESS 0x00220000 +#define ISP1_REGS_CBUS_ADDRESS 0x00240000 +#define ISP1_PMEM_CBUS_ADDRESS 0x00250000 +#define ISP1_DMEM_CBUS_ADDRESS 0x00270000 +#define ISP1_BAMEM_CBUS_ADDRESS 0x00280000 +#define ISP1_VMEM_CBUS_ADDRESS 0x002A0000 +#define ISP2_REGS_CBUS_ADDRESS 0x002C0000 +#define ISP2_PMEM_CBUS_ADDRESS 0x002D0000 +#define ISP2_DMEM_CBUS_ADDRESS 0x002F0000 +#define ISP2_BAMEM_CBUS_ADDRESS 0x00300000 +#define ISP2_VMEM_CBUS_ADDRESS 0x00320000 +#define ISP3_REGS_CBUS_ADDRESS 0x00340000 +#define ISP3_PMEM_CBUS_ADDRESS 0x00350000 +#define ISP3_DMEM_CBUS_ADDRESS 0x00370000 +#define ISP3_BAMEM_CBUS_ADDRESS 0x00380000 +#define ISP3_VMEM_CBUS_ADDRESS 0x003A0000 +#define ISP0_PMEM_DBUS_ADDRESS 0x08000000 +#define ISP0_DMEM_DBUS_ADDRESS 0x08400000 +#define ISP0_BAMEM_DBUS_ADDRESS 0x09000000 +#define ISP0_VMEM_DBUS_ADDRESS 0x08800000 +#define ISP1_PMEM_DBUS_ADDRESS 0x0A000000 +#define ISP1_DMEM_DBUS_ADDRESS 0x0A400000 +#define ISP1_BAMEM_DBUS_ADDRESS 0x0B000000 +#define ISP1_VMEM_DBUS_ADDRESS 0x0A800000 +#define ISP2_PMEM_DBUS_ADDRESS 0x0C000000 +#define ISP2_DMEM_DBUS_ADDRESS 0x0C400000 +#define ISP2_BAMEM_DBUS_ADDRESS 0x0D000000 +#define ISP2_VMEM_DBUS_ADDRESS 0x0C800000 +#define ISP3_PMEM_DBUS_ADDRESS 0x0E000000 +#define ISP3_DMEM_DBUS_ADDRESS 0x0E400000 +#define ISP3_BAMEM_DBUS_ADDRESS 0x0F000000 +#define ISP3_VMEM_DBUS_ADDRESS 0x0E800000 +#endif /* _IPU_DEVICE_CELL_PROPERTIES_DEFS_H_ */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_impl.h new file mode 100644 index 000000000000..428a394e8136 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_cell_properties_impl.h @@ -0,0 +1,193 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_CELL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_CELL_PROPERTIES_IMPL_H + +#include "ipu_device_sp2600_control_properties_impl.h" +#include "ipu_device_sp2600_proxy_properties_impl.h" +#include "ipu_device_isp2600_properties_impl.h" +#include "ipu_device_cell_properties_defs.h" +#include "ipu_device_cell_devices.h" +#include "ipu_device_cell_type_properties.h"/* IPU_DEVICE_INVALID_MEM_ADDRESS */ + +static const unsigned int +ipu_device_spc0_mem_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + SPC0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP0_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + SPP1_REGS_CBUS_ADDRESS, + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_CBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP0_REGS_CBUS_ADDRESS, /* reg addr */ + ISP0_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP0_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP0_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP0_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP1_REGS_CBUS_ADDRESS, /* reg addr */ + ISP1_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP1_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP1_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP1_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP2_REGS_CBUS_ADDRESS, /* reg addr */ + ISP2_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP2_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP2_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP2_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + ISP3_REGS_CBUS_ADDRESS, /* reg addr */ + ISP3_PMEM_CBUS_ADDRESS, /* pmem addr */ + ISP3_DMEM_CBUS_ADDRESS, /* dmem addr */ + ISP3_BAMEM_CBUS_ADDRESS,/* bamem addr */ + ISP3_VMEM_CBUS_ADDRESS /* vmem addr */ +}; + +static const unsigned int +ipu_device_spc0_mem_databus_address[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPC0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp0_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP0_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_spp1_mem_databus_address[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no pmem */ + SPP1_DMEM_DBUS_ADDRESS +}; + +static const unsigned int +ipu_device_isp0_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP0_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP0_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP0_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP0_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp1_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP1_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP1_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP1_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP1_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp2_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP2_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP2_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP2_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP2_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const unsigned int +ipu_device_isp3_mem_databus_address[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + IPU_DEVICE_INVALID_MEM_ADDRESS, /* no reg addr */ + ISP3_PMEM_DBUS_ADDRESS, /* pmem databus addr */ + ISP3_DMEM_DBUS_ADDRESS, /* dmem databus addr */ + ISP3_BAMEM_DBUS_ADDRESS, /* bamem databus addr */ + ISP3_VMEM_DBUS_ADDRESS /* vmem databus addr */ +}; + +static const struct ipu_device_cell_properties_s +ipu_device_cell_properties[NUM_CELLS] = { + { + &ipu_device_sp2600_control_properties, + ipu_device_spc0_mem_address, + ipu_device_spc0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp0_mem_address, + ipu_device_spp0_mem_databus_address + }, + { + &ipu_device_sp2600_proxy_properties, + ipu_device_spp1_mem_address, + ipu_device_spp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp0_mem_address, + ipu_device_isp0_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp1_mem_address, + ipu_device_isp1_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp2_mem_address, + ipu_device_isp2_mem_databus_address + }, + { + &ipu_device_isp2600_properties, + ipu_device_isp3_mem_address, + ipu_device_isp3_mem_databus_address + } +}; + +#ifdef C_RUN + +/* Mapping between hrt_hive_processors enum and cell_id's used in FW */ +static const int ipu_device_map_cell_id_to_crun_proc_id[NUM_CELLS] = { + 4, /* SPC0 */ + 5, /* SPP0 */ + 6, /* SPP1 */ + 0, /* ISP0 */ + 1, /* ISP1 */ + 2, /* ISP2 */ + 3 /* ISP3 */ +}; + +#endif + +#endif /* __IPU_DEVICE_CELL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_ff_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_ff_devices.h new file mode 100644 index 000000000000..d784fb47ffaa --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_ff_devices.h @@ -0,0 +1,57 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IPU_DEVICE_FF_DEVICES_H +#define __IPU_DEVICE_FF_DEVICES_H + +enum ipu_device_ff_id { + /* Names (shortened) as used in */ + /* PSA fixed functions */ /* ipu_device_ff_hrt.txt */ + IPU_DEVICE_FF_WBA_WBA = 0, /* WBA_WBA */ + IPU_DEVICE_FF_RYNR_SPLITTER, /* RYNR_RYNR_SPLITTER */ + IPU_DEVICE_FF_RYNR_COLLECTOR, /* RYNR_RYNR_COLLECTOR */ + IPU_DEVICE_FF_RYNR_BNLM, /* RYNR_BNLM */ + IPU_DEVICE_FF_RYNR_VCUD, /* RYNR_VCUD */ + IPU_DEVICE_FF_DEMOSAIC_DEMOSAIC,/* DEMOSAIC_DEMOSAIC */ + IPU_DEVICE_FF_ACM_CCM, /* VCA_VCR, name as used in CNLB0 HW */ + IPU_DEVICE_FF_ACM_ACM, /* VCA_ACM, name as used in CNLB0 HW */ + IPU_DEVICE_FF_VCA_VCR2, /* VCA_VCR, part of ACM */ + IPU_DEVICE_FF_GTC_CSC_CDS, /* GTC_CSC_CDS */ + IPU_DEVICE_FF_GTC_GTM, /* GTC_GTM */ + IPU_DEVICE_FF_YUV1_SPLITTER, /* YUV1_Processing_YUV_SPLITTER */ + IPU_DEVICE_FF_YUV1_IEFD, /* YUV1_Processing_IEFD*/ + IPU_DEVICE_FF_YUV1_YDS, /* YUV1_Processing_YDS */ + IPU_DEVICE_FF_YUV1_TCC, /* YUV1_Processing_TCC */ + IPU_DEVICE_FF_DVS_YBIN, /* DVS_YBIN */ + IPU_DEVICE_FF_DVS_DVS, /* DVS_DVS */ + IPU_DEVICE_FF_LACE_LACE, /* Lace_Stat_LACE_STAT */ + /* ISA fixed functions */ + IPU_DEVICE_FF_ICA_INL, /* Input_Corr_INL */ + IPU_DEVICE_FF_ICA_GBL, /* Input_Corr_GBL */ + IPU_DEVICE_FF_ICA_PCLN, /* Input_Corr_PCLN */ + IPU_DEVICE_FF_LSC_LSC, /* Bayer_Lsc_LSC */ + IPU_DEVICE_FF_DPC_DPC, /* Bayer_Dpc_GDDPC */ + IPU_DEVICE_FF_IDS_SCALER, /* Bayer_Scaler_SCALER */ + IPU_DEVICE_FF_AWB_AWRG, /* Stat_AWB_AWRG */ + IPU_DEVICE_FF_AF_AF, /* Stat_AF_AWB_FR_AF_AWB_FR_GRD */ + IPU_DEVICE_FF_AE_WGHT_HIST, /* Stat_AE_WGHT_HIST */ + IPU_DEVICE_FF_AE_CCM, /* Stat_AE_AE_CCM */ + IPU_DEVICE_FF_NUM_FF +}; + +#define IPU_DEVICE_FF_NUM_PSA_FF (IPU_DEVICE_FF_LACE_LACE + 1) +#define IPU_DEVICE_FF_NUM_ISA_FF \ + (IPU_DEVICE_FF_NUM_FF - IPU_DEVICE_FF_NUM_PSA_FF) + +#endif /* __IPU_DEVICE_FF_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_gp_devices.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_gp_devices.h new file mode 100644 index 000000000000..ab8cd6a783ce --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/psys/cnlB0/ipu_device_gp_devices.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_GP_DEVICES_H +#define __IPU_DEVICE_GP_DEVICES_H +#include "math_support.h" +#include "type_support.h" + +enum ipu_device_gp_id { + IPU_DEVICE_GP_PSA = 0, /* PSA */ + IPU_DEVICE_GP_ISA_STATIC, /* ISA Static */ + IPU_DEVICE_GP_ISA_RUNTIME, /* ISA Runtime */ + IPU_DEVICE_GP_ISL, /* ISL */ + IPU_DEVICE_GP_NUM_GP +}; + +enum ipu_device_gp_psa_mux_id { + /* Post RYNR/CCN: 0-To ACM (Video), 1-To Demosaic (Stills)*/ + IPU_DEVICE_GP_PSA_STILLS_MODE_MUX = 0, + /* Post Vec2Str 4: 0-To GTC, 1-To ACM */ + IPU_DEVICE_GP_PSA_V2S_RGB_4_DEMUX, + /* Post DM and pre ACM 0-CCM/ACM: 1-DM Componenet Splitter */ + IPU_DEVICE_GP_PSA_S2V_RGB_F_MUX, + /* Pre ACM/CCM: 0-To CCM/ACM, 1-To str2vec id_f */ + IPU_DEVICE_GP_PSA_ACM_DEMUX, + IPU_DEVICE_GP_PSA_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_static_mux_id { + IPU_DEVICE_GP_ISA_STATIC_MUX_SEL = 0, + IPU_DEVICE_GP_ISA_STATIC_PORTA_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTB_BLK, + IPU_DEVICE_GP_ISA_STATIC_PORTC_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_SEL, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_INPUT_CORR_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_AWB_MUX_DPC_PORT_BLK, + IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX +}; + +enum ipu_device_gp_isa_runtime_mux_id { + IPU_DEVICE_GP_ISA_RUNTIME_FRAME_SIZE = 0, + IPU_DEVICE_GP_ISA_RUNTIME_SCALED_FRAME_SIZE, + IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX +}; + +enum ipu_device_gp_isl_mux_id { + IPU_DEVICE_GP_ISL_MIPI_BE_MUX = 0, + IPU_DEVICE_GP_ISL_MUX_NUM_MUX +}; + +#define IPU_DEVICE_GP_MAX_NUM MAX4((uint32_t)IPU_DEVICE_GP_PSA_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISA_RUNTIME_MUX_NUM_MUX, \ + (uint32_t)IPU_DEVICE_GP_ISL_MUX_NUM_MUX) + +#endif /* __IPU_DEVICE_GP_DEVICES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h new file mode 100644 index 000000000000..de733be67998 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_isp2600_properties_impl.h @@ -0,0 +1,151 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H +#define __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H + +/* isp2600 definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_isp2600_registers { + /* control registers */ + IPU_DEVICE_ISP2600_STAT_CTRL = 0x0, + IPU_DEVICE_ISP2600_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_ISP2600_ICACHE_BASE = 0x10, + IPU_DEVICE_ISP2600_ICACHE_INFO = 0x14, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_ISP2600_QMEM_BASE = 0x1C, + + IPU_DEVICE_ISP2600_CMEM_BASE = 0x28, + + IPU_DEVICE_ISP2600_XMEM_BASE = 0x88, + IPU_DEVICE_ISP2600_XMEM_INFO = 0x8C, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE = 0x90, + + IPU_DEVICE_ISP2600_XVMEM_BASE = 0xB8, + + /* debug registers */ + IPU_DEVICE_ISP2600_DEBUG_PC = 0x130, + IPU_DEVICE_ISP2600_STALL = 0x134 +}; + + +enum ipu_device_isp2600_memories { + IPU_DEVICE_ISP2600_REGS, + IPU_DEVICE_ISP2600_PMEM, + IPU_DEVICE_ISP2600_DMEM, + IPU_DEVICE_ISP2600_BAMEM, + IPU_DEVICE_ISP2600_VMEM, + IPU_DEVICE_ISP2600_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_isp2600_mem_size[IPU_DEVICE_ISP2600_NUM_MEMORIES] = { + 0x00140, + 0x14000, + 0x04000, + 0x20000, + 0x20000 +}; + + +enum ipu_device_isp2600_masters { + IPU_DEVICE_ISP2600_ICACHE, + IPU_DEVICE_ISP2600_QMEM, + IPU_DEVICE_ISP2600_CMEM, + IPU_DEVICE_ISP2600_XMEM, + IPU_DEVICE_ISP2600_XVMEM, + IPU_DEVICE_ISP2600_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_isp2600_masters[IPU_DEVICE_ISP2600_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_ISP2600_ICACHE_BASE, + IPU_DEVICE_ISP2600_ICACHE_INFO, + IPU_DEVICE_ISP2600_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_ISP2600_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_CMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_ISP2600_XMEM_BASE, + IPU_DEVICE_ISP2600_XMEM_INFO, + IPU_DEVICE_ISP2600_XMEM_INFO_OVERRIDE + }, + { + 3, + 0xC, + IPU_DEVICE_ISP2600_XVMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + } +}; + +enum ipu_device_isp2600_stall_bits { + IPU_DEVICE_ISP2600_STALL_ICACHE0, + IPU_DEVICE_ISP2600_STALL_ICACHE1, + IPU_DEVICE_ISP2600_STALL_DMEM, + IPU_DEVICE_ISP2600_STALL_QMEM, + IPU_DEVICE_ISP2600_STALL_CMEM, + IPU_DEVICE_ISP2600_STALL_XMEM, + IPU_DEVICE_ISP2600_STALL_BAMEM, + IPU_DEVICE_ISP2600_STALL_VMEM, + IPU_DEVICE_ISP2600_STALL_XVMEM, + IPU_DEVICE_ISP2600_NUM_STALL_BITS +}; + +#define IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE 64 /* 512 bits per instruction */ +#define IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE 8 /* 8 instructions per burst */ + +static const struct ipu_device_cell_count_s ipu_device_isp2600_count = { + IPU_DEVICE_ISP2600_NUM_MEMORIES, + IPU_DEVICE_ISP2600_NUM_MASTERS, + IPU_DEVICE_ISP2600_NUM_STALL_BITS, + IPU_DEVICE_ISP2600_ICACHE_WORD_SIZE * + IPU_DEVICE_ISP2600_ICACHE_BURST_SIZE +}; + +static const unsigned int ipu_device_isp2600_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x130, 0x134 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_isp2600_properties = { + &ipu_device_isp2600_count, + ipu_device_isp2600_masters, + ipu_device_isp2600_reg_offset, + ipu_device_isp2600_mem_size +}; + +#endif /* __IPU_DEVICE_ISP2600_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h new file mode 100644 index 000000000000..430295cd9d94 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_control_properties_impl.h @@ -0,0 +1,136 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H + +/* sp2600_control definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_control_registers { + /* control registers */ + IPU_DEVICE_SP2600_CONTROL_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_CONTROL_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_CONTROL_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_CONTROL_STALL = 0xA0 +}; + +enum ipu_device_sp2600_control_mems { + IPU_DEVICE_SP2600_CONTROL_REGS, + IPU_DEVICE_SP2600_CONTROL_PMEM, + IPU_DEVICE_SP2600_CONTROL_DMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_control_mem_size[IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES] = { + 0x000AC, + 0x00000, + 0x10000 +}; + +enum ipu_device_sp2600_control_masters { + IPU_DEVICE_SP2600_CONTROL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_QMEM, + IPU_DEVICE_SP2600_CONTROL_CMEM, + IPU_DEVICE_SP2600_CONTROL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_control_masters[IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_ICACHE_BASE, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO, + IPU_DEVICE_SP2600_CONTROL_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_CONTROL_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_CMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_CONTROL_XMEM_BASE, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO, + IPU_DEVICE_SP2600_CONTROL_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_control_stall_bits { + IPU_DEVICE_SP2600_CONTROL_STALL_ICACHE, + IPU_DEVICE_SP2600_CONTROL_STALL_DMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_QMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_CMEM, + IPU_DEVICE_SP2600_CONTROL_STALL_XMEM, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_control_count = { + IPU_DEVICE_SP2600_CONTROL_NUM_MEMORIES, + IPU_DEVICE_SP2600_CONTROL_NUM_MASTERS, + IPU_DEVICE_SP2600_CONTROL_NUM_STALL_BITS, + IPU_DEVICE_SP2600_CONTROL_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_CONTROL_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_control_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_control_properties = { + &ipu_device_sp2600_control_count, + ipu_device_sp2600_control_masters, + ipu_device_sp2600_control_reg_offset, + ipu_device_sp2600_control_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_CONTROL_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h new file mode 100644 index 000000000000..b3f120f9fea8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_fp_properties_impl.h @@ -0,0 +1,140 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H + +/* sp2600_fp definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_fp_registers { + /* control registers */ + IPU_DEVICE_SP2600_FP_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_FP_START_PC = 0x4, + + /* master port registers */ + IPU_DEVICE_SP2600_FP_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_FP_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_FP_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_FP_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_FP_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_FP_XMEM_BASE = 0x88, + IPU_DEVICE_SP2600_FP_XMEM_INFO = 0x8C, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE = 0x90, + + /* debug registers */ + IPU_DEVICE_SP2600_FP_DEBUG_PC = 0xCC, + IPU_DEVICE_SP2600_FP_STALL = 0xD0 +}; + + +enum ipu_device_sp2600_fp_memories { + IPU_DEVICE_SP2600_FP_REGS, + IPU_DEVICE_SP2600_FP_PMEM, + IPU_DEVICE_SP2600_FP_DMEM, + IPU_DEVICE_SP2600_FP_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_fp_mem_size[IPU_DEVICE_SP2600_FP_NUM_MEMORIES] = { + 0x000DC, + 0x00000, + 0x10000, + 0x08000 +}; + +enum ipu_device_sp2600_fp_masters { + IPU_DEVICE_SP2600_FP_ICACHE, + IPU_DEVICE_SP2600_FP_QMEM, + IPU_DEVICE_SP2600_FP_CMEM, + IPU_DEVICE_SP2600_FP_XMEM, + IPU_DEVICE_SP2600_FP_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_fp_masters[IPU_DEVICE_SP2600_FP_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_ICACHE_BASE, + IPU_DEVICE_SP2600_FP_ICACHE_INFO, + IPU_DEVICE_SP2600_FP_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_FP_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 3, + 0xC, + IPU_DEVICE_SP2600_FP_CMEM_BASE, + IPU_DEVICE_SP2600_FP_CMEM_INFO, + IPU_DEVICE_SP2600_FP_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_FP_XMEM_BASE, + IPU_DEVICE_SP2600_FP_XMEM_INFO, + IPU_DEVICE_SP2600_FP_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_fp_stall_bits { + IPU_DEVICE_SP2600_FP_STALL_ICACHE, + IPU_DEVICE_SP2600_FP_STALL_DMEM, + IPU_DEVICE_SP2600_FP_STALL_QMEM, + IPU_DEVICE_SP2600_FP_STALL_CMEM, + IPU_DEVICE_SP2600_FP_STALL_XMEM, + IPU_DEVICE_SP2600_FP_STALL_DMEM1, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_fp_count = { + IPU_DEVICE_SP2600_FP_NUM_MEMORIES, + IPU_DEVICE_SP2600_FP_NUM_MASTERS, + IPU_DEVICE_SP2600_FP_NUM_STALL_BITS, + IPU_DEVICE_SP2600_FP_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_FP_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_fp_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0x9C, 0xA0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_fp_properties = { + &ipu_device_sp2600_fp_count, + ipu_device_sp2600_fp_masters, + ipu_device_sp2600_fp_reg_offset, + ipu_device_sp2600_fp_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_FP_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h new file mode 100644 index 000000000000..6fdcd7faea9b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/devices/src/ipu_device_sp2600_proxy_properties_impl.h @@ -0,0 +1,138 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H +#define __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H + +/* sp2600_proxy definition */ + +#include "ipu_device_cell_properties_struct.h" + +enum ipu_device_sp2600_proxy_registers { + /* control registers */ + IPU_DEVICE_SP2600_PROXY_STAT_CTRL = 0x0, + IPU_DEVICE_SP2600_PROXY_START_PC = 0x4, + + /* THESE ADDRESSES NEED TO BE CHECKED !!!! */ + /* master port registers */ + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE = 0x10, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO = 0x14, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE = 0x18, + + IPU_DEVICE_SP2600_PROXY_QMEM_BASE = 0x1C, + + IPU_DEVICE_SP2600_PROXY_CMEM_BASE = 0x28, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO = 0x2C, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE = 0x30, + + IPU_DEVICE_SP2600_PROXY_XMEM_BASE = 0x58, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO = 0x5C, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE = 0x60, + + /* debug registers */ + IPU_DEVICE_SP2600_PROXY_DEBUG_PC = 0x9C, + IPU_DEVICE_SP2600_PROXY_STALL = 0xA0 +}; + + +enum ipu_device_sp2600_proxy_memories { + IPU_DEVICE_SP2600_PROXY_REGS, + IPU_DEVICE_SP2600_PROXY_PMEM, + IPU_DEVICE_SP2600_PROXY_DMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES +}; + +static const unsigned int +ipu_device_sp2600_proxy_mem_size[IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES] = { + 0x00AC, + 0x0000, + 0x4000 +}; + +enum ipu_device_sp2600_proxy_masters { + IPU_DEVICE_SP2600_PROXY_ICACHE, + IPU_DEVICE_SP2600_PROXY_QMEM, + IPU_DEVICE_SP2600_PROXY_CMEM, + IPU_DEVICE_SP2600_PROXY_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS +}; + +static const struct ipu_device_cell_master_properties_s +ipu_device_sp2600_proxy_masters[IPU_DEVICE_SP2600_PROXY_NUM_MASTERS] = { + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_ICACHE_BASE, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO, + IPU_DEVICE_SP2600_PROXY_ICACHE_INFO_OVERRIDE + }, + { + 0, + 0xC, + IPU_DEVICE_SP2600_PROXY_QMEM_BASE, + 0xFFFFFFFF, + 0xFFFFFFFF + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_CMEM_BASE, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO, + IPU_DEVICE_SP2600_PROXY_CMEM_INFO_OVERRIDE + }, + { + 2, + 0xC, + IPU_DEVICE_SP2600_PROXY_XMEM_BASE, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO, + IPU_DEVICE_SP2600_PROXY_XMEM_INFO_OVERRIDE + } +}; + +enum ipu_device_sp2600_proxy_stall_bits { + IPU_DEVICE_SP2600_PROXY_STALL_ICACHE, + IPU_DEVICE_SP2600_PROXY_STALL_DMEM, + IPU_DEVICE_SP2600_PROXY_STALL_QMEM, + IPU_DEVICE_SP2600_PROXY_STALL_CMEM, + IPU_DEVICE_SP2600_PROXY_STALL_XMEM, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS +}; + +/* 32 bits per instruction */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE 4 +/* 32 instructions per burst */ +#define IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE 32 + +static const struct ipu_device_cell_count_s ipu_device_sp2600_proxy_count = { + IPU_DEVICE_SP2600_PROXY_NUM_MEMORIES, + IPU_DEVICE_SP2600_PROXY_NUM_MASTERS, + IPU_DEVICE_SP2600_PROXY_NUM_STALL_BITS, + IPU_DEVICE_SP2600_PROXY_ICACHE_WORD_SIZE * + IPU_DEVICE_SP2600_PROXY_ICACHE_BURST_SIZE +}; + +static const unsigned int +ipu_device_sp2600_proxy_reg_offset[/* CELL_NUM_REGS */] = { + 0x0, 0x4, 0x10, 0xCC, 0xD0 +}; + +static const struct ipu_device_cell_type_properties_s +ipu_device_sp2600_proxy_properties = { + &ipu_device_sp2600_proxy_count, + ipu_device_sp2600_proxy_masters, + ipu_device_sp2600_proxy_reg_offset, + ipu_device_sp2600_proxy_mem_size +}; + +#endif /* __IPU_DEVICE_SP2600_PROXY_PROPERTIES_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk new file mode 100644 index 000000000000..b1ffbf7ea21f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/fw_abi_cpu_types.mk @@ -0,0 +1,24 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# + +# MODULE is FW ABI COMMON TYPES + +FW_ABI_COMMON_TYPES_DIRS = -I$${MODULES_DIR}/fw_abi_common_types +FW_ABI_COMMON_TYPES_DIRS += -I$${MODULES_DIR}/fw_abi_common_types/cpu + +FW_ABI_COMMON_TYPES_HOST_FILES = +FW_ABI_COMMON_TYPES_HOST_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) + +FW_ABI_COMMON_TYPES_FW_FILES = +FW_ABI_COMMON_TYPES_FW_CPPFLAGS = $(FW_ABI_COMMON_TYPES_DIRS) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h new file mode 100644 index 000000000000..73062e9db87b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_BASE_TYPES_H +#define __IA_CSS_TERMINAL_BASE_TYPES_H + + +#include "type_support.h" +#include "ia_css_terminal_defs.h" + +#define N_UINT16_IN_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_TERMINAL_STRUCT 5 + +#define SIZE_OF_TERMINAL_STRUCT_BITS \ + (IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + N_UINT16_IN_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* ==================== Base Terminal - START ==================== */ +struct ia_css_terminal_s { /**< Base terminal */ + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the process group */ + uint16_t size; /**< Size of this whole terminal layout-structure */ + uint16_t tm_index; /**< Index of the terminal manifest object */ + ia_css_terminal_ID_t ID; /**< Absolute referal ID for this terminal, valid ID's != 0 */ + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_STRUCT]; +}; +/* ==================== Base Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h new file mode 100644 index 000000000000..24ad04fe8720 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/cpu/ia_css_terminal_manifest_base_types.h @@ -0,0 +1,43 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H + +#include "ia_css_terminal_defs.h" + +#define N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT 5 +#define SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_UINT16_T_BITS \ + + IA_CSS_TERMINAL_ID_BITS \ + + IA_CSS_TERMINAL_TYPE_BITS \ + + IA_CSS_UINT32_T_BITS \ + + (N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT*IA_CSS_UINT8_T_BITS)) + +/* ==================== Base Terminal Manifest - START ==================== */ +struct ia_css_terminal_manifest_s { + ia_css_terminal_type_t terminal_type; /**< Type ia_css_terminal_type_t */ + int16_t parent_offset; /**< Offset to the program group manifest */ + uint16_t size; /**< Size of this whole terminal-manifest layout-structure */ + ia_css_terminal_ID_t ID; + uint8_t padding[N_PADDING_UINT8_IN_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_terminal_manifest_s + ia_css_terminal_manifest_t; + +/* ==================== Base Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h new file mode 100644 index 000000000000..cd508f05ed40 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_base_types.h @@ -0,0 +1,39 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BASE_TYPES_H +#define __IA_CSS_BASE_TYPES_H + +#include "type_support.h" + +#define VIED_VADDRESS_BITS 32 +typedef uint32_t vied_vaddress_t; + +#define DEVICE_DESCRIPTOR_ID_BITS 32 +typedef struct { + uint8_t device_id; + uint8_t instance_id; + uint8_t channel_id; + uint8_t section_id; +} device_descriptor_fields_t; + +typedef union { + device_descriptor_fields_t fields; + uint32_t data; +} device_descriptor_id_t; + +typedef uint16_t ia_css_process_id_t; + +#endif /* __IA_CSS_BASE_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h new file mode 100644 index 000000000000..3a7b333d3bf5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_abi_common_types/ia_css_terminal_defs.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_DEFS_H +#define __IA_CSS_TERMINAL_DEFS_H + + +#include "type_support.h" + +#define IA_CSS_TERMINAL_ID_BITS 8 +typedef uint8_t ia_css_terminal_ID_t; +#define IA_CSS_TERMINAL_INVALID_ID ((ia_css_terminal_ID_t)(-1)) + +/* + * Terminal Base Type + */ +typedef enum ia_css_terminal_type { + /**< Data input */ + IA_CSS_TERMINAL_TYPE_DATA_IN = 0, + /**< Data output */ + IA_CSS_TERMINAL_TYPE_DATA_OUT, + /**< Type 6 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_STREAM, + /**< Type 1-5 parameter input */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN, + /**< Type 1-5 parameter output */ + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN, + /**< Represent the new type of terminal for the + * "spatial dependent parameters", when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT, + /**< Represent the new type of terminal for the + * explicit slicing, when params go in + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN, + /**< Represent the new type of terminal for the + * explicit slicing, when params go out + */ + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT, + /**< State (private data) input */ + IA_CSS_TERMINAL_TYPE_STATE_IN, + /**< State (private data) output */ + IA_CSS_TERMINAL_TYPE_STATE_OUT, + IA_CSS_TERMINAL_TYPE_PROGRAM, + IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT, + IA_CSS_N_TERMINAL_TYPES +} ia_css_terminal_type_t; + +#define IA_CSS_TERMINAL_TYPE_BITS 32 + +/* Temporary redirection needed to facilicate merging with the drivers + in a backwards compatible manner */ +#define IA_CSS_TERMINAL_TYPE_PARAM_CACHED IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN + +/* + * Dimensions of the data objects. Note that a C-style + * data order is assumed. Data stored by row. + */ +/* A strange problem with hivecc compiler which is described + * here https://icggerrit.ir.intel.com/#/c/51630/1 forces this + * enum to be explicitly initialized for the moment + */ +typedef enum ia_css_dimension { + /**< The number of columns, i.e. the size of the row */ + IA_CSS_COL_DIMENSION = 0, + /**< The number of rows, i.e. the size of the column */ + IA_CSS_ROW_DIMENSION = 1, + IA_CSS_N_DATA_DIMENSION = 2 +} ia_css_dimension_t; + +#define IA_CSS_N_COMMAND_COUNT (4) + +#ifndef PIPE_GENERATION +/* Don't include these complex enum structures in Genpipe, it can't handle and it does not need them */ +/* + * enum ia_css_isys_link_id. Lists the link IDs used by the FW for On The Fly feature + */ +typedef enum ia_css_isys_link_id { + IA_CSS_ISYS_LINK_OFFLINE = 0, + IA_CSS_ISYS_LINK_MAIN_OUTPUT = 1, + IA_CSS_ISYS_LINK_PDAF_OUTPUT = 2 +} ia_css_isys_link_id_t; +#define N_IA_CSS_ISYS_LINK_ID (IA_CSS_ISYS_LINK_PDAF_OUTPUT + 1) + +/* + * enum ia_css_data_barrier_link_id. Lists the link IDs used by the FW for data barrier feature + */ +typedef enum ia_css_data_barrier_link_id { + IA_CSS_DATA_BARRIER_LINK_MEMORY = N_IA_CSS_ISYS_LINK_ID, + N_IA_CSS_DATA_BARRIER_LINK_ID +} ia_css_data_barrier_link_id_t; + +#endif /* #ifndef PIPE_GENERATION */ +#endif /* __IA_CSS_TERMINAL_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/fw_load.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/fw_load.mk new file mode 100644 index 000000000000..0af62100cba8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/fw_load.mk @@ -0,0 +1,59 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is FW_LOAD + +# select implementation for fw_load +ifeq ($(FW_LOAD_DMA), 1) +FW_LOAD_IMPL = fwdma +else +FW_LOAD_IMPL = xmem +endif + +FW_LOAD_FW_CPPFLAGS = + +# select DMA instance for fw_load +ifeq ($(FW_LOAD_DMA_INSTANCE),) +$(error FW_LOAD_DMA_INSTANCE not specified) +else +ifeq ($(FW_LOAD_DMA_INSTANCE), NCI_DMA_EXT0) +FW_LOAD_FW_CPPFLAGS += -DFW_LOAD_INSTANCE_USE_DMA_EXT0 +else +ifeq ($(FW_LOAD_DMA_INSTANCE), NCI_DMA_FW) +FW_LOAD_FW_CPPFLAGS += -DFW_LOAD_INSTANCE_USE_DMA_FW +else +$(error FW_LOAD_DMA_INSTANCE $(FW_LOAD_DMA_INSTANCE) not supported) +endif +endif +endif + +FW_LOAD_DIR = $${MODULES_DIR}/fw_load +FW_LOAD_INTERFACE = $(FW_LOAD_DIR)/interface +FW_LOAD_SOURCES = $(FW_LOAD_DIR)/src/$(FW_LOAD_IMPL) + +# XMEM/FWDMA supports on SP side +FW_LOAD_FW_FILES = $(FW_LOAD_SOURCES)/ia_css_fw_load.c +FW_LOAD_FW_CPPFLAGS += -I$(FW_LOAD_INTERFACE) \ + -I$(FW_LOAD_SOURCES) \ + -I$(FW_LOAD_DIR)/src + +# Only XMEM supports on Host side +FW_LOAD_HOST_FILES = $(FW_LOAD_DIR)/src/xmem/ia_css_fw_load.c +FW_LOAD_HOST_CPPFLAGS = -I$(FW_LOAD_INTERFACE) \ + -I$(FW_LOAD_DIR)/src/xmem \ + -I$(FW_LOAD_DIR)/src + +ifdef FW_LOAD_NO_OF_REQUEST_OFFSET +FW_LOAD_FW_CPPFLAGS += -DFW_LOAD_NO_OF_REQUEST_ADDRESS=$(FW_LOAD_NO_OF_REQUEST_OFFSET) +endif # FW_LOAD_NO_OF_REQUEST_OFFSET diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load.h new file mode 100644 index 000000000000..d1f7926f39c6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load.h @@ -0,0 +1,155 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_H +#define __IA_CSS_FW_LOAD_H + +#include "ia_css_fw_load_storage_class.h" +#include "ia_css_xmem.h" +#include "ia_css_cmem.h" + +enum ia_css_fw_load_mode { + IA_CSS_DBUS_ADDRESS = 0, + IA_CSS_CBUS_ADDRESS +}; + +/* Perform Initialization for fwload + Client must call init before it calls any other API + */ + +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_load_init(void); + +/* This motifies the use what address has to be passed into the 'dst' parameter + * of the ia_css_fw_copy function and the ia_css_fw_zero function. + * When this function returns IA_CSS_DBUS_ADDRESS, the user must pass a data-bus + * address, when the function returns IA_CSS_CBUS_ADDRESS, the user must pass a + * control-bus address. + * XMEM implementation will require control-bus address while, + * DMA implementation will require data-bus addresses. +*/ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_load_get_mode(void); + +/***************** FW LOAD BLOCKING FUNCTIONS *******************************/ +/* NOTE : User cannot use blocking functions immidiate after calling any + * non-blocking request functions. User must finish all the load request before + * it calls any blocking function. + * e.g. Following is the invalid use case. + * - ia_css_fw_load_copy_begin (non-blocking) then without ending this request, + * it calls ia_css_fw_load_copy (blocking). Client should not do this. + * But before calling ia_css_fw_load_copy, it shouold finish all request by + * calling ia_css_fw_end(). + */ + +/* Perform a single data transfer from DDR/IMR (src) to local variable(dst). + All arguments are multiples of 4. + The function returns when the transfer has completed. + The function may block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_load( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size +); + +/* Perform a single data transfer from DDR/IMR (src) to the subsystem (dst). + All arguments are multiples of 4. + The function returns when the transfer has completed. + The function may block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size +); + +/* Perform zeroing the memory in subsystem (dst) + The function returns when all transfers have completed. + The function may block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H void +ia_css_fw_zero( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size); + +/***************** FW LOAD NON_BLOCKING FUNCTIONS ****************************/ + +/* Perform a single data transfer from DDR/IMR (src) to local variable(dst). + All arguments are multiples of 4. + The function returns when the transfer has completed. + The function will not block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_load_begin( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size +); + +/* START OF TRANSFER / SUBMIT */ +/* Start a single data transfer from DDR/IMR (src) to the subsystem (dst). + The function returns 1 when the transfer has been issued successfully. + When the transfer cannot be issued, the function returns 0. + The function will not block. + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_copy_begin( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size +); + +/* Perform zeroing the subsystem (dst) memory + This function will not block + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_zero_begin( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size); + +/* END OF TRANSFER / ACKNOWLEDGES */ +/* Complete at most n transfers, + returns the number of transfers that could be completed + */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_end(unsigned int n); + +/* OPTIONALLY USED FUNCTIONS */ +/* Return the number of transactions that may be submitted without blocking */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_copy_begin_available(void); + +/* Return the number of transactions may be ended */ +IA_CSS_FW_LOAD_STORAGE_CLASS_H unsigned int +ia_css_fw_copy_end_available(void); + +#ifdef __INLINE_IA_CSS_FW_LOAD__ +#include "ia_css_fw_load_blocking_impl.h" +#include "ia_css_fw_load_non_blocking_impl.h" +#include "ia_css_fw_load_impl.h" +#endif + + +#endif /* __IA_CSS_FW_LOAD_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load_storage_class.h new file mode 100644 index 000000000000..5849461b73f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/interface/ia_css_fw_load_storage_class.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_STORAGE_CLASS_H +#define __IA_CSS_FW_LOAD_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_IA_CSS_FW_LOAD__ +#define IA_CSS_FW_LOAD_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_FW_LOAD_STORAGE_CLASS_C +#else +#define IA_CSS_FW_LOAD_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_FW_LOAD_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_FW_LOAD_STORAGE_CLASS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load.c new file mode 100644 index 000000000000..37492d4b2ea2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load.c @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/* C file with (optionally) inlined files */ + +/* Global variable for tracking the number of fw_load transactions */ +/* Needed in host side implementaion */ +#ifndef __VIED_CELL +unsigned int started; +#endif + +#ifdef __INLINE_IA_CSS_FW_LOAD__ +static inline int __avoid_warning_on_empty_file(void) { return 0; } +#else +#include "ia_css_fw_load_blocking_impl.h" +#include "ia_css_fw_load_non_blocking_impl.h" +#include "ia_css_fw_load_impl.h" +#endif /* __INLINE_IA_CSS_FW_LOAD__ */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_blocking_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_blocking_impl.h new file mode 100644 index 000000000000..02ad9c36156e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_blocking_impl.h @@ -0,0 +1,54 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_BLOCKING_IMPL_H +#define __IA_CSS_FW_LOAD_BLOCKING_IMPL_H + +#include "ia_css_fw_load.h" +#include "ia_css_fw_load_storage_class.h" +#include "ia_css_xmem_cmem.h" +#include "ia_css_xmem.h" +#include "ia_css_cmem.h" + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_load( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size) +{ + ia_css_xmem_load(mmid, src, dst, size); +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_copy( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + ia_css_xmem_to_cmem_copy(mmid, ssid, src, dst, size); +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_zero( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size) +{ + ia_css_cmem_zero(ssid, dst, size); +} + +#endif /* __IA_CSS_FW_LOAD_BLOCKING_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_impl.h new file mode 100644 index 000000000000..a9b6db8a5f55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_impl.h @@ -0,0 +1,26 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_IMPL_H +#define __IA_CSS_FW_LOAD_IMPL_H + +#include "ia_css_fw_load.h" + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_load_get_mode(void) +{ + return IA_CSS_CBUS_ADDRESS; +} + +#endif /* __IA_CSS_FW_LOAD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_host_state.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_host_state.h new file mode 100644 index 000000000000..1691e4522f78 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_host_state.h @@ -0,0 +1,21 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_NON_BLOCKING_HOST_STATE_H +#define __IA_CSS_FW_LOAD_NON_BLOCKING_HOST_STATE_H +/* Global variable for tracking the number of fw_load transactions */ +/* Used in xmem non blocking host side implementaion */ +extern unsigned int started; + +#endif /* __IA_CSS_FW_LOAD_NON_BLOCKING_HOST_STATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl.h new file mode 100644 index 000000000000..c8949aa49370 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl.h @@ -0,0 +1,125 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_H +#define __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_H + +#include "type_support.h" +#include "ia_css_fw_load.h" +#include "ia_css_fw_load_storage_class.h" +#include "math_support.h" +#include "error_support.h" + +#ifdef __VIED_CELL +#include "ia_css_fw_load_non_blocking_impl_sp.h" +#else +#include "ia_css_fw_load_non_blocking_impl_host.h" +#endif + +#define FW_LOAD_MAX_NB_TRANS UINT_MAX +#define FW_LOAD_XMEM_MAX_TRANSACTION_SUPPORT \ + umin(FW_LOAD_MAX_NB_TRANS, FW_LOAD_MAX_TRANS_SUPPORTED) + + +IA_CSS_FW_LOAD_STORAGE_CLASS_C void +ia_css_fw_load_init(void) +{ + fw_load_transaction_init(); +} + +/* START OF TRANSFER */ +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_load_begin( + unsigned int mmid, + ia_css_xmem_address_t src, + void *dst, + unsigned int size +) +{ + if (!ia_css_fw_copy_begin_available()) + return 0; + ia_css_fw_load(mmid, src, dst, size); + fw_load_transaction_add(); + return size; +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_copy_begin( + unsigned int mmid, + unsigned int ssid, + ia_css_xmem_address_t src, + ia_css_cmem_address_t dst, + unsigned int size) +{ + /* Check if there is space to hold the ack event in the queue */ + if (!ia_css_fw_copy_begin_available()) + return 0; + ia_css_fw_copy(mmid, ssid, src, dst, size); + fw_load_transaction_add(); + return size; +} + + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_zero_begin( + unsigned int ssid, + ia_css_cmem_address_t dst, + unsigned int size) +{ + if (!ia_css_fw_copy_begin_available()) + return 0; /*quote exceeded*/ + + ia_css_fw_zero(ssid, dst, size); + fw_load_transaction_add(); + return size; +} + +/* END OF TRANSFER */ +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_end(unsigned int n) +{ + int no_of_ack_received; + int fw_end_count; + int transaction_done; + bool success; + + no_of_ack_received = ia_css_fw_copy_end_available(); + fw_end_count = min(n, no_of_ack_received); + + transaction_done = 0; + + while (transaction_done < fw_end_count) { + success = fw_load_transaction_remove(); + assert(success == true); + transaction_done++; + } + return fw_end_count; +} + +/* OPTIONALLY USED */ +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_copy_begin_available(void) +{ + return (FW_LOAD_XMEM_MAX_TRANSACTION_SUPPORT - + ia_css_fw_copy_end_available()); +} + +IA_CSS_FW_LOAD_STORAGE_CLASS_C unsigned int +ia_css_fw_copy_end_available(void) +{ + /* check how many transactions are ready to be ended */ + return fw_load_transaction_get_finished(); +} + +#endif /* __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl_host.h new file mode 100644 index 000000000000..25a05cce2576 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/fw_load/src/xmem/ia_css_fw_load_non_blocking_impl_host.h @@ -0,0 +1,45 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_HOST_H +#define __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_HOST_H + +#include "storage_class.h" +#include "type_support.h" +#include "ia_css_fw_load_non_blocking_host_state.h" + +#define FW_LOAD_MAX_TRANS_SUPPORTED UINT_MAX + +STORAGE_CLASS_INLINE void fw_load_transaction_init(void) +{ + started = 0; +} + +STORAGE_CLASS_INLINE bool fw_load_transaction_add(void) +{ + started++; + return true; +} + +STORAGE_CLASS_INLINE bool fw_load_transaction_remove(void) +{ + started--; + return true; +} + +STORAGE_CLASS_INLINE unsigned int fw_load_transaction_get_finished(void) +{ + return started; +} +#endif /* __IA_CSS_FW_LOAD_NON_BLOCKING_IMPL_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h new file mode 100644 index 000000000000..6bc2fa708d43 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir.h @@ -0,0 +1,100 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_H +#define __IA_CSS_PKG_DIR_H + +#include "ia_css_pkg_dir_storage_class.h" +#include "ia_css_pkg_dir_types.h" +#include "type_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +/* User is expected to call the verify function manually, + * other functions do not call it internally + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +int ia_css_pkg_dir_verify_header( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +enum ia_css_pkg_dir_version ia_css_pkg_dir_get_version( + const ia_css_pkg_dir_entry_t *pkg_dir_header +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_set_version( + ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version +); + + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint32_t ia_css_pkg_dir_entry_get_size( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint16_t ia_css_pkg_dir_entry_get_version( + const ia_css_pkg_dir_entry_t *entry +); + +IA_CSS_PKG_DIR_STORAGE_CLASS_H +uint8_t ia_css_pkg_dir_entry_get_type( + const ia_css_pkg_dir_entry_t *entry +); + +/* Get the address of the specified entry in the PKG_DIR + * Note: This function expects the complete PKG_DIR in the same memory space + * and the entries contains offsets and not addresses. + */ +IA_CSS_PKG_DIR_STORAGE_CLASS_H +void *ia_css_pkg_dir_get_entry_address( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index +); + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "ia_css_pkg_dir_impl.h" + +#endif + +#endif /* __IA_CSS_PKG_DIR_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h new file mode 100644 index 000000000000..2e45eaa52727 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_iunit.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IUNIT_H +#define __IA_CSS_PKG_DIR_IUNIT_H + +/* In bootflow, pkg_dir only supports upto 16 entries in pkg_dir + * pkg_dir_header + Psys_server pg + Isys_server pg + 13 Client pg + */ + +enum { + IA_CSS_PKG_DIR_SIZE = 16, + IA_CSS_PKG_DIR_ENTRIES = IA_CSS_PKG_DIR_SIZE - 1 +}; + +#define IUNIT_MAX_CLIENT_PKG_ENTRIES 13 + +/* Example assignment of unique identifiers for the FW components + * This should match the identifiers in the manifest + */ +enum ia_css_pkg_dir_entry_type { + IA_CSS_PKG_DIR_HEADER = 0, + IA_CSS_PKG_DIR_PSYS_SERVER_PG, + IA_CSS_PKG_DIR_ISYS_SERVER_PG, + IA_CSS_PKG_DIR_CLIENT_PG +}; + +/* Fixed entries in the package directory */ +enum ia_css_pkg_dir_index { + IA_CSS_PKG_DIR_PSYS_INDEX = 0, + IA_CSS_PKG_DIR_ISYS_INDEX = 1, + IA_CSS_PKG_DIR_CLIENT_0 = 2 +}; + +#endif /* __IA_CSS_PKG_DIR_IUNIT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h new file mode 100644 index 000000000000..27e87d1e6774 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_storage_class.h @@ -0,0 +1,30 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_STORAGE_CLASS_H +#define __IA_CSS_PKG_DIR_STORAGE_CLASS_H + + +#include "storage_class.h" + +#ifndef __IA_CSS_PKG_DIR_INLINE__ +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C +#else +#define IA_CSS_PKG_DIR_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PKG_DIR_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PKG_DIR_STORAGE_CLASS_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h new file mode 100644 index 000000000000..ec0ee18b41e1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/interface/ia_css_pkg_dir_types.h @@ -0,0 +1,42 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_TYPES_H +#define __IA_CSS_PKG_DIR_TYPES_H + +#include "type_support.h" + +struct ia_css_pkg_dir_entry { + uint32_t address[2]; + uint32_t size; + uint16_t version; + uint8_t type; + uint8_t unused; +}; + +typedef void ia_css_pkg_dir_t; +typedef struct ia_css_pkg_dir_entry ia_css_pkg_dir_entry_t; + +/* The version field of the pkg_dir header defines + * if entries contain offsets or pointers + */ +/* This is temporary, until all pkg_dirs use pointers */ +enum ia_css_pkg_dir_version { + IA_CSS_PKG_DIR_POINTER, + IA_CSS_PKG_DIR_OFFSET +}; + + +#endif /* __IA_CSS_PKG_DIR_TYPES_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/pkg_dir.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/pkg_dir.mk new file mode 100644 index 000000000000..a4b4aaa4995e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/pkg_dir.mk @@ -0,0 +1,30 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PKG DIR + +PKG_DIR_DIR = $${MODULES_DIR}/pkg_dir +PKG_DIR_INTERFACE = $(PKG_DIR_DIR)/interface +PKG_DIR_SOURCES = $(PKG_DIR_DIR)/src + +PKG_DIR_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir.c +PKG_DIR_CPPFLAGS = -I$(PKG_DIR_INTERFACE) +PKG_DIR_CPPFLAGS += -I$(PKG_DIR_SOURCES) +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/../isp/kernels/io_ls/common +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu +PKG_DIR_CPPFLAGS += -I$${MODULES_DIR}/fw_abi_common_types/ipu/$(FW_ABI_IPU_TYPES_VERSION) + +PKG_DIR_CREATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_create.c +PKG_DIR_UPDATE_FILES = $(PKG_DIR_DIR)/src/ia_css_pkg_dir_update.c + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c new file mode 100644 index 000000000000..348b56833e06 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir.c @@ -0,0 +1,27 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __IA_CSS_PKG_DIR_INLINE__ + +#include "storage_class.h" + +STORAGE_CLASS_INLINE int __ia_css_pkg_dir_avoid_warning_on_empty_file(void) +{ + return 0; +} + +#else +#include "ia_css_pkg_dir_impl.h" + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h new file mode 100644 index 000000000000..ca5564c7d990 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_impl.h @@ -0,0 +1,202 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_IMPL_H +#define __IA_CSS_PKG_DIR_IMPL_H + +#include "ia_css_pkg_dir.h" +#include "ia_css_pkg_dir_int.h" +#include "error_support.h" +#include "type_support.h" +#include "assert_support.h" + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +const ia_css_pkg_dir_entry_t *ia_css_pkg_dir_get_entry( + const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + DECLARE_ERRVAL + struct ia_css_pkg_dir_entry *pkg_dir_header = NULL; + + verifexitval(pkg_dir != NULL, EFAULT); + + pkg_dir_header = (struct ia_css_pkg_dir_entry *)pkg_dir; + + /* First entry of the structure is the header, skip that */ + index++; + verifexitval(index < pkg_dir_header->size, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return NULL; + } + return &(pkg_dir_header[index]); +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +int ia_css_pkg_dir_verify_header(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + verifexitval(pkg_dir_header != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + return -1; + } + return ((pkg_dir_header->address[0] == PKG_DIR_MAGIC_VAL_0) + && (pkg_dir_header->address[1] == PKG_DIR_MAGIC_VAL_1)) ? + 0 : -1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_num_entries( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + uint32_t size = 0; + + verifexitval(pkg_dir_header != NULL, EFAULT); + size = pkg_dir_header->size; + verifexitval(size > 0, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return size - 1; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +enum ia_css_pkg_dir_version +ia_css_pkg_dir_get_version(const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + assert(pkg_dir_header != NULL); + return pkg_dir_header->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_set_version(ia_css_pkg_dir_entry_t *pkg_dir_header, + enum ia_css_pkg_dir_version version) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 1; + } + pkg_dir_header->version = version; + return 0; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_get_size_in_bytes( + const ia_css_pkg_dir_entry_t *pkg_dir_header) +{ + DECLARE_ERRVAL + + verifexitval(pkg_dir_header != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return sizeof(struct ia_css_pkg_dir_entry) * pkg_dir_header->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_lo( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[0]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_address_hi( + const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->address[1]; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint32_t ia_css_pkg_dir_entry_get_size(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->size; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint16_t ia_css_pkg_dir_entry_get_version(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->version; +} + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +uint8_t ia_css_pkg_dir_entry_get_type(const ia_css_pkg_dir_entry_t *entry) +{ + DECLARE_ERRVAL + + verifexitval(entry != NULL, EFAULT); +EXIT: + if (haserror(EFAULT)) { + return 0; + } + return entry->type; +} + + +IA_CSS_PKG_DIR_STORAGE_CLASS_C +void *ia_css_pkg_dir_get_entry_address(const ia_css_pkg_dir_t *pkg_dir, + uint32_t index) +{ + void *entry_blob = NULL; + const ia_css_pkg_dir_entry_t *pkg_dir_entry = + ia_css_pkg_dir_get_entry(pkg_dir, index-1); + + if ((pkg_dir_entry != NULL) && + (ia_css_pkg_dir_entry_get_size(pkg_dir_entry) > 0)) { + assert(ia_css_pkg_dir_entry_get_address_hi(pkg_dir_entry) == 0); + entry_blob = (void *)((char *)pkg_dir + + ia_css_pkg_dir_entry_get_address_lo(pkg_dir_entry)); + } + return entry_blob; +} + +#endif /* __IA_CSS_PKG_DIR_IMPL_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h new file mode 100644 index 000000000000..3a50245261e5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/pkg_dir/src/ia_css_pkg_dir_int.h @@ -0,0 +1,50 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PKG_DIR_INT_H +#define __IA_CSS_PKG_DIR_INT_H + +/* + * Package Dir structure as specified in CSE FAS + * + * PKG DIR Header + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * 0 "_IUPKDR_" + * 1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version of the Structure + * Size: Size of the entire table (including header) in 16 byte chunks + * Type: Must be 0 for header + * + * Figure 13: PKG DIR Header + * + * + * PKG DIR Entry + * Qword 63:56 55 54:48 47:32 31:24 23:0 + * N Address/Offset + * N+1 Rsvd Rsvd Type Version Rsvd Size + * + * Version: Version # of the Component + * Size: Size of the component in bytes + * Type: Component Identifier + */ + +#define PKG_DIR_SIZE_BITS 24 +#define PKG_DIR_TYPE_BITS 7 + +#define PKG_DIR_MAGIC_VAL_1 (('_' << 24) | ('I' << 16) | ('U' << 8) | 'P') +#define PKG_DIR_MAGIC_VAL_0 (('K' << 24) | ('D' << 16) | ('R' << 8) | '_') + +#endif /* __IA_CSS_PKG_DIR_INT_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/port_env_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/port_env_struct.h new file mode 100644 index 000000000000..4d39a4739a8b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/port_env_struct.h @@ -0,0 +1,24 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PORT_ENV_STRUCT_H +#define __PORT_ENV_STRUCT_H + +struct port_env { + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __PORT_ENV_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue.h new file mode 100644 index 000000000000..b233ab3baf01 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue.h @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_H +#define __QUEUE_H + +#include "queue_struct.h" +#include "port_env_struct.h" + +/* + * SYS queues are created by the host + * SYS queues cannot be accessed through the queue interface + * To send data into a queue a send_port must be opened. + * To receive data from a queue, a recv_port must be opened. + */ + +/* return required buffer size for queue */ +unsigned int +sys_queue_buf_size(unsigned int size, unsigned int token_size); + +/* + * initialize a queue that can hold at least 'size' tokens of + * 'token_size' bytes. + */ +void +sys_queue_init(struct sys_queue *q, unsigned int size, + unsigned int token_size, struct sys_queue_res *res); + +#endif /* __QUEUE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue_struct.h new file mode 100644 index 000000000000..ef48fcfded2b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/queue_struct.h @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __QUEUE_STRUCT_H +#define __QUEUE_STRUCT_H + +/* queue description, shared between sender and receiver */ + +#include "type_support.h" + +#ifdef __VIED_CELL +typedef struct {uint32_t v[2]; } host_buffer_address_t; +#else +typedef uint64_t host_buffer_address_t; +#endif + +typedef uint32_t vied_buffer_address_t; + + +struct sys_queue { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* reg no in subsystem's regmem */ + unsigned int rd_reg; + unsigned int _align; +}; + +struct sys_queue_res { + host_buffer_address_t host_address; + vied_buffer_address_t vied_address; + unsigned int reg; +}; + +#endif /* __QUEUE_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port.h new file mode 100644 index 000000000000..cce253b26668 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port.h @@ -0,0 +1,34 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_H +#define __RECV_PORT_H + + +struct recv_port; +struct sys_queue; +struct port_env; + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env); + +unsigned int +recv_port_available(const struct recv_port *p); + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data); + + +#endif /* __RECV_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port_struct.h new file mode 100644 index 000000000000..52ec563b13cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/recv_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __RECV_PORT_STRUCT_H +#define __RECV_PORT_STRUCT_H + +#include "buffer_type.h" + +struct recv_port { + buffer_address buffer; /* address of buffer in DDR */ + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer located in regmem */ + unsigned int rd_reg; /* index read pointer located in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; /* address of memory containing regmem */ +}; + +#endif /* __RECV_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port.h new file mode 100644 index 000000000000..04a160f3f019 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port.h @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_H +#define __SEND_PORT_H + + +/* + * A send port can be used to send tokens into a queue. + * The interface can be used on any type of processor (host, SP, ...) + */ + +struct send_port; +struct sys_queue; +struct port_env; + +/* + * Open a send port on a queue. After the port is opened, tokens can be sent + */ +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env); + +/* + * Determine how many tokens can be sent + */ +unsigned int +send_port_available(const struct send_port *p); + +/* + * Send a token via a send port. The function returns the number of + * tokens that have been sent: + * 1: the token was accepted + * 0: the token was not accepted (full queue) + * The size of a token is determined at initialization. + */ +unsigned int +send_port_transfer(const struct send_port *p, const void *data); + + +#endif /* __SEND_PORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port_struct.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port_struct.h new file mode 100644 index 000000000000..f834c62bc3db --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/interface/send_port_struct.h @@ -0,0 +1,32 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __SEND_PORT_STRUCT_H +#define __SEND_PORT_STRUCT_H + +#include "buffer_type.h" + +struct send_port { + buffer_address buffer; + unsigned int size; + unsigned int token_size; + unsigned int wr_reg; /* index of write pointer in regmem */ + unsigned int rd_reg; /* index of read pointer in regmem */ + + unsigned int mmid; + unsigned int ssid; + unsigned int mem_addr; +}; + +#endif /* __SEND_PORT_STRUCT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/port.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/port.mk new file mode 100644 index 000000000000..b3801247802e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/port.mk @@ -0,0 +1,31 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PORT + +PORT_DIR=$${MODULES_DIR}/port + +PORT_INTERFACE=$(PORT_DIR)/interface +PORT_SOURCES1=$(PORT_DIR)/src + +PORT_HOST_FILES += $(PORT_SOURCES1)/send_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/recv_port.c +PORT_HOST_FILES += $(PORT_SOURCES1)/queue.c + +PORT_HOST_CPPFLAGS += -I$(PORT_INTERFACE) + +PORT_FW_FILES += $(PORT_SOURCES1)/send_port.c +PORT_FW_FILES += $(PORT_SOURCES1)/recv_port.c + +PORT_FW_CPPFLAGS += -I$(PORT_INTERFACE) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/queue.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/queue.c new file mode 100644 index 000000000000..eeec99dfe2d0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/queue.c @@ -0,0 +1,47 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "queue.h" + +#include "regmem_access.h" +#include "port_env_struct.h" + +unsigned int sys_queue_buf_size(unsigned int size, unsigned int token_size) +{ + return (size + 1) * token_size; +} + +void +sys_queue_init(struct sys_queue *q, unsigned int size, unsigned int token_size, + struct sys_queue_res *res) +{ + unsigned int buf_size; + + q->size = size + 1; + q->token_size = token_size; + buf_size = sys_queue_buf_size(size, token_size); + + /* acquire the shared buffer space */ + q->host_address = res->host_address; + res->host_address += buf_size; + q->vied_address = res->vied_address; + res->vied_address += buf_size; + + /* acquire the shared read and writer pointers */ + q->wr_reg = res->reg; + res->reg++; + q->rd_reg = res->reg; + res->reg++; + +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/recv_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/recv_port.c new file mode 100644 index 000000000000..31b36e9ceafb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/recv_port.c @@ -0,0 +1,95 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "recv_port.h" +#include "port_env_struct.h" /* for port_env */ +#include "queue_struct.h" /* for sys_queue */ +#include "recv_port_struct.h" /* for recv_port */ +#include "buffer_access.h" /* for buffer_load, buffer_address */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_invalidate */ +#endif + +void +recv_port_open(struct recv_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; + +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +recv_port_index(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(rd, i, p->size); +} + +unsigned int +recv_port_available(const struct recv_port *p) +{ + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + + return OP_std_modadd(wr, -rd, p->size); +} + +STORAGE_CLASS_INLINE void +recv_port_copy(const struct recv_port *p, unsigned int i, void *data) +{ + unsigned int rd = recv_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (rd * token_size); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_invalidate((void *)HOST_ADDRESS(p->buffer), + token_size*p->size); +#endif + buffer_load(addr, data, token_size, p->mmid); +} + +STORAGE_CLASS_INLINE void +recv_port_release(const struct recv_port *p, unsigned int i) +{ + unsigned int rd = recv_port_index(p, i); + + regmem_store_32(p->mem_addr, p->rd_reg, rd, p->ssid); +} + +unsigned int +recv_port_transfer(const struct recv_port *p, void *data) +{ + if (!recv_port_available(p)) + return 0; + recv_port_copy(p, 0, data); + recv_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/send_port.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/send_port.c new file mode 100644 index 000000000000..8d1fba08c5d5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/port/src/send_port.c @@ -0,0 +1,94 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "send_port.h" +#include "queue_struct.h" /* for sys_queue */ +#include "send_port_struct.h" /* for send_port */ +#include "port_env_struct.h" /* for port_env */ +#include "regmem_access.h" /* for regmem_load_32, regmem_store_32 */ +#include "buffer_access.h" /* for buffer_store, buffer_address */ +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "math_support.h" /* for OP_std_modadd */ +#include "type_support.h" /* for HOST_ADDRESS */ + +#ifndef __VIED_CELL +#include "cpu_mem_support.h" /* for ia_css_cpu_mem_cache_flush */ +#endif + +void +send_port_open(struct send_port *p, const struct sys_queue *q, + const struct port_env *env) +{ + p->mmid = env->mmid; + p->ssid = env->ssid; + p->mem_addr = env->mem_addr; + + p->size = q->size; + p->token_size = q->token_size; + p->wr_reg = q->wr_reg; + p->rd_reg = q->rd_reg; +#ifdef __VIED_CELL + p->buffer = q->vied_address; +#else + p->buffer = q->host_address; +#endif +} + +STORAGE_CLASS_INLINE unsigned int +send_port_index(const struct send_port *p, unsigned int i) +{ + unsigned int wr = regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(wr, i, p->size); +} + +unsigned int +send_port_available(const struct send_port *p) +{ + int rd = (int)regmem_load_32(p->mem_addr, p->rd_reg, p->ssid); + int wr = (int)regmem_load_32(p->mem_addr, p->wr_reg, p->ssid); + + return OP_std_modadd(rd, -(wr+1), p->size); +} + +STORAGE_CLASS_INLINE void +send_port_copy(const struct send_port *p, unsigned int i, const void *data) +{ + unsigned int wr = send_port_index(p, i); + unsigned int token_size = p->token_size; + buffer_address addr = p->buffer + (wr * token_size); + + buffer_store(addr, data, token_size, p->mmid); +#ifndef __VIED_CELL + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(addr), token_size); +#endif +} + +STORAGE_CLASS_INLINE void +send_port_release(const struct send_port *p, unsigned int i) +{ + unsigned int wr = send_port_index(p, i); + + regmem_store_32(p->mem_addr, p->wr_reg, wr, p->ssid); +} + +unsigned int +send_port_transfer(const struct send_port *p, const void *data) +{ + if (!send_port_available(p)) + return 0; + send_port_copy(p, 0, data); + send_port_release(p, 1); + return 1; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/interface/psys_infobits.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/interface/psys_infobits.h new file mode 100644 index 000000000000..11029a180531 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/interface/psys_infobits.h @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PSYS_INFOBITS_H +#define __PSYS_INFOBITS_H + +void ia_css_psys_set_master_port_regs(unsigned int ssid); + +#endif /* __PSYS_INFOBITS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/psys_infobits.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/psys_infobits.mk new file mode 100644 index 000000000000..8734afe80ac4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/psys_infobits.mk @@ -0,0 +1,30 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# PSYS_INFOBITS +# + +PSYS_INFOBITS_DIR = $${MODULES_DIR}/psys_infobits + +PSYS_INFOBITS_INTERFACE = $(PSYS_INFOBITS_DIR)/interface +PSYS_INFOBITS_SOURCES = $(PSYS_INFOBITS_DIR)/src + +PSYS_INFOBITS_CPPFLAGS := \ + -I$(PSYS_INFOBITS_INTERFACE) + +PSYS_INFOBITS_HOST_FILES = \ + $(PSYS_INFOBITS_SOURCES)/psys_infobits.c + +PSYS_INFOBITS_FW_FILES = $(PSYS_INFOBITS_HOST_FILES) + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/src/psys_infobits.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/src/psys_infobits.c new file mode 100644 index 000000000000..5c43583f6193 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_infobits/src/psys_infobits.c @@ -0,0 +1,107 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "psys_infobits.h" + +#include "assert_support.h" +#include "ia_css_cell.h" +#include "ipu_device_cell_properties.h" +#include "ipu_device_cell_properties_impl.h" +#include "ipu_device_buttress_properties_struct.h" + +/* +** According to BXT CSS HAS PS the info bits as expected by buttress are +** Field---------Description---------------------Encoding---------------| + | 0 | CIOM0: Snoopable | 0 - non snoopable | + | | | 1 - snoopable | + ----------------------------------------------------------------------| + | 1 | CIOM0: VC0_RS_for_IMR | Deadline | + | | CIOM1: VC1_deadline_pointer | 0 - regular deadline | + | | | 1 - urgent deadline | + ----------------------------------------------------------------------| + | 2 | Deadline pointer reserved | | + ----------------------------------------------------------------------| + | 3 | CIOM1: Zero-length write (ZLW)| 0 - NOP | + | | | 1 - Convert transaction as ZLW + ----------------------------------------------------------------------| + | 5:4 | CIOM0: Request destination | Destination | + | | CIOM1: Stream_ID[1:0] | 00 - Buttress registers| + | | | 01 - Primary | + | | | 10 - Reserved | + | | | 11 - Input system | + ----------------------------------------------------------------------| + | 7:6 | CIOM1: Stream_ID[3:2] | For data prefetch | + ----------------------------------------------------------------------| + | 8 | CIOM1: Address swizzeling | | + ----------------------------------------------------------------------| + + ** As PSYS devices use MO port and the request destination is DDR + ** then bit 4 (Request destination) should be 1 (Primary), thus 0x10 +*/ + + +void ia_css_psys_set_master_port_regs(unsigned int ssid) +{ + /* set primary destination(DDR) */ + unsigned int info_bits = IA_CSS_INFO_BITS_M0_DDR; + enum ipu_device_psys_cell_id cell_id; + + COMPILATION_ERROR_IF(0 != SPC0); + + /* Configure SPC */ + cell_id = SPC0; + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_CONTROL_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_CONTROL_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_SP2600_CONTROL_XMEM, 0); + +#if defined(HAS_SPP0) + /* Configure SPP0 proxy */ + cell_id = SPP0; + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, 0); + COMPILATION_ERROR_IF(SPP0 < SPC0); +#endif + +#if defined(HAS_SPP1) + /* Configure SPP1 proxy */ + cell_id = SPP1; + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_SP2600_PROXY_XMEM, 0); + COMPILATION_ERROR_IF(SPP1 < SPC0); +#endif + +#if defined(HAS_ISP0) + /* Configure ISP(s) */ + for (cell_id = ISP0; cell_id < NUM_CELLS; cell_id++) { + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_CELL_MASTER_ICACHE, info_bits); + ia_css_cell_set_master_info_bits(ssid, cell_id, + IPU_DEVICE_CELL_MASTER_XMEM, info_bits); + ia_css_cell_set_master_base_address(ssid, cell_id, + IPU_DEVICE_CELL_MASTER_XMEM, 0); + } + COMPILATION_ERROR_IF(ISP0 < SPP0); +#endif +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h new file mode 100644 index 000000000000..b36dbbca96ca --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_private_pg/interface/ia_css_psys_private_pg_data.h @@ -0,0 +1,44 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PRIVATE_PG_DATA_H +#define __IA_CSS_PSYS_PRIVATE_PG_DATA_H + +#include "ipu_device_acb_devices.h" +#include "ipu_device_gp_devices.h" +#include "type_support.h" +#include "vied_nci_acb_route_type.h" + +#define PRIV_CONF_INVALID 0xFF + +struct ia_css_psys_pg_buffer_information_s { + unsigned int buffer_base_addr; + unsigned int bpe; + unsigned int buffer_width; + unsigned int buffer_height; + unsigned int num_of_buffers; + unsigned int dfm_port_addr; +}; + +typedef struct ia_css_psys_pg_buffer_information_s ia_css_psys_pg_buffer_information_t; + +struct ia_css_psys_private_pg_data { + nci_acb_route_t acb_route[IPU_DEVICE_ACB_NUM_ACB]; + uint8_t psa_mux_conf[IPU_DEVICE_GP_PSA_MUX_NUM_MUX]; + uint8_t isa_mux_conf[IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX]; + ia_css_psys_pg_buffer_information_t input_buffer_info; +}; + +#endif /* __IA_CSS_PSYS_PRIVATE_PG_DATA_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h new file mode 100644 index 000000000000..eee1d6ab0a49 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/interface/ia_css_bxt_spctrl_trace.h @@ -0,0 +1,107 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_BXT_SPCTRL_TRACE_H +#define __IA_CSS_BXT_SPCTRL_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from + * the .mk file outside. + * Log levels not in the range below will cause a + * "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" + */ +#define BXT_SPCTRL_TRACE_LOG_LEVEL_OFF 1 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL 2 +#define BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG 3 + +/* BXT_SPCTRL and all the submodules in BXT_SPCTRL will have the + * default tracing level set to the BXT_SPCTRL_TRACE_CONFIG level. + * If not defined in the psysapi.mk fill it will be set by + * default to no trace (BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL) + */ +#define BXT_SPCTRL_TRACE_CONFIG_DEFAULT BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + +#if !defined(BXT_SPCTRL_TRACE_CONFIG) +# define BXT_SPCTRL_TRACE_CONFIG BXT_SPCTRL_TRACE_CONFIG_DEFAULT +#endif + +/* BXT_SPCTRL Module tracing backend is mapped to TUNIT tracing for + * target platforms + */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define BXT_SPCTRL_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(BXT_SPCTRL_TRACE_CONFIG)) + /* Module specific trace setting */ +# if BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_OFF + /* BXT_SPCTRL_TRACE_LOG_LEVEL_OFF */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL + /* BXT_SPCTRL_TRACE_LOG_LEVEL_NORMAL */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED +# elif BXT_SPCTRL_TRACE_CONFIG == BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG + /* BXT_SPCTRL_TRACE_LOG_LEVEL_DEBUG */ +# define BXT_SPCTRL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED +# define BXT_SPCTRL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No BXT_SPCTRL_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "BXT_SPCTRL_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in BXT_SPCTRL with a specific tracing level */ +/* #define BXT_SPCTRL_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_BXT_SPCTRL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/psys_server.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/psys_server.mk new file mode 100644 index 000000000000..c4462c984793 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/psys_server.mk @@ -0,0 +1,81 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYS_SERVER + +include $(MODULES_DIR)/config/system_$(IPU_SYSVER).mk +include $(MODULES_DIR)/config/$(SUBSYSTEM)/subsystem_$(IPU_SYSVER).mk + +PSYS_SERVER_DIR=${MODULES_DIR}/psys_server + +# The watchdog should never be merged enabled +PSYS_SERVER_WATCHDOG_ENABLE ?= 0 + +PSYS_SERVER_INTERFACE=$(PSYS_SERVER_DIR)/interface +PSYS_SERVER_SOURCES=$(PSYS_SERVER_DIR)/src + +# PSYS API implementation files. Consider a new module for those to avoid +# having them together with firmware. +PSYS_SERVER_HOST_FILES += $${MODULES_DIR}/psysapi/device/src/ia_css_psys_device.c +PSYS_SERVER_HOST_FILES += $(PSYS_SERVER_SOURCES)/bxt_spctrl_process_group_cmd_impl.c + +PSYS_SERVER_HOST_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) + +PSYS_SERVER_HOST_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_HOST_CPPFLAGS += -DMMID=$(MMID) + + +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_cmd_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_event_queue_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_init_fw.c +PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/psys_process_group_fw.c + +# Files that server modules need to use +PSYS_SERVER_SUPPORT_FILES = $(PSYS_SERVER_SOURCES)/dev_access_conv/$(IPU_SYSVER)/ia_css_psys_server_dev_access_type_conv.c +PSYS_SERVER_SUPPORT_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_config.c + +# Include those to build the release firmware. Otherwise replace by test code. +PSYS_SERVER_RELEASE_FW_FILES = $(PSYS_SERVER_SOURCES)/psys_server.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_proxy.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dev_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_terminal_load.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_remote_obj_access.c +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dma_access.c +ifeq ($(HAS_DEC400), 1) +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_dec400_access.c +endif +PSYS_SERVER_RELEASE_FW_FILES += $(PSYS_SERVER_SUPPORT_FILES) + +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_INTERFACE) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(IPU_SYSVER) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/$(PSYS_SERVER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/loader/$(PSYS_SERVER_LOADER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/$(PSYS_ACCESS_BLOCKER_VERSION) +PSYS_SERVER_FW_CPPFLAGS += -I$(PSYS_SERVER_SOURCES)/access_blocker/src + +PSYS_SERVER_FW_CPPFLAGS += -DSSID=$(SSID) +PSYS_SERVER_FW_CPPFLAGS += -DMMID=$(MMID) +PSYS_SERVER_FW_CPPFLAGS += -DHAS_DPCM=$(if $(HAS_DPCM),1,0) + +# PSYS server watchdog for debugging +ifeq ($(PSYS_SERVER_WATCHDOG_ENABLE), 1) + PSYS_SERVER_FW_FILES += $(PSYS_SERVER_SOURCES)/ia_css_psys_server_watchdog.c + PSYS_SERVER_FW_CPPFLAGS += -DPSYS_SERVER_WATCHDOG_DEBUG +endif + +PSYS_SERVER_FW_CPPFLAGS += -D$(PSYS_HW_VERSION) + +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_TPROXY=$(PSYS_SERVER_ENABLE_TPROXY) +PSYS_SERVER_FW_CPPFLAGS += -DENABLE_DEVPROXY=$(PSYS_SERVER_ENABLE_DEVPROXY) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c new file mode 100644 index 000000000000..6f8aea782464 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psys_server/src/bxt_spctrl_process_group_cmd_impl.c @@ -0,0 +1,332 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_device.h" +#include "ia_css_psys_process_group_cmd_impl.h" +#include "ia_css_psysapi.h" +#include "ia_css_psys_terminal.h" +#include "ia_css_psys_process.h" +#include "ia_css_psys_process.psys.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_process_group.psys.h" +#include "ia_css_psys_program_group_manifest.h" +#include "type_support.h" +#include "error_support.h" +#include "misc_support.h" +#include "cpu_mem_support.h" +#include "ia_css_bxt_spctrl_trace.h" + +#if HAS_DUAL_CMD_CTX_SUPPORT +#define MAX_CLIENT_PGS 8 /* same as test_params.h */ +struct ia_css_process_group_context { + ia_css_process_group_t *pg; + bool secure; +}; +struct ia_css_process_group_context pg_contexts[MAX_CLIENT_PGS]; +static unsigned int num_of_pgs; + +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + unsigned int i; + bool secure = false; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): enter:\n"); + + for (i = 0; i < num_of_pgs; i++) { + if (pg_contexts[i].pg == process_group) { + secure = pg_contexts[i].secure; + break; + } + } + + IA_CSS_TRACE_1(BXT_SPCTRL, INFO, + "ia_css_process_group_get_context(): secure %d\n", secure); + return secure ? psys_syscom_secure : psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + IA_CSS_TRACE_2(BXT_SPCTRL, INFO, + "ia_css_process_group_store(): pg instance %d secure %d\n", num_of_pgs, secure); + + pg_contexts[num_of_pgs].pg = process_group; + pg_contexts[num_of_pgs].secure = secure; + num_of_pgs++; + return 0; +} +#else /* HAS_DUAL_CMD_CTX_SUPPORT */ +STORAGE_CLASS_INLINE +struct ia_css_syscom_context *ia_css_process_group_get_context(ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + return psys_syscom; +} + +int ia_css_process_group_store(ia_css_process_group_t *process_group, bool secure) +{ + NOT_USED(process_group); + NOT_USED(secure); + + return 0; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param) +{ + NOT_USED(process_group); + NOT_USED(program_group_manifest); + NOT_USED(program_group_param); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_create(): enter:\n"); + + return 0; +} + +int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group) +{ + NOT_USED(process_group); + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_on_destroy(): enter:\n"); + + return 0; +} + +int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd) +{ + int retval = -1; + ia_css_process_group_state_t state; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): enter:\n"); + + verifexit(process_group != NULL); + + state = ia_css_process_group_get_state(process_group); + + verifexit(state != IA_CSS_PROCESS_GROUP_ERROR); + verifexit(state < IA_CSS_N_PROCESS_GROUP_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_GROUP_CMD_SUBMIT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_SUBMIT:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_READY); + + /* External resource availability checks */ + verifexit(ia_css_can_process_group_submit(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_BLOCKED; + break; + case IA_CSS_PROCESS_GROUP_CMD_START: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_START:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + /* External resource state checks */ + verifexit(ia_css_can_process_group_start(process_group)); + + process_group->state = IA_CSS_PROCESS_GROUP_STARTED; + break; + case IA_CSS_PROCESS_GROUP_CMD_DISOWN: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_DISOWN:\n"); + verifexit(state == IA_CSS_PROCESS_GROUP_STARTED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_START; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + verifexit(ia_css_process_group_print(process_group, NULL) == 0); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_STOP: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_STOP:\n"); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_STOP; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + queue_id = ia_css_process_group_get_base_queue_id(process_group); + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + queue_id, &psys_cmd); + verifexit(retval > 0); + break; + case IA_CSS_PROCESS_GROUP_CMD_ABORT: + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_process_group_exec_cmd(): IA_CSS_PROCESS_GROUP_CMD_ABORT:\n"); + + /* Once the flushing of shared buffers is fixed this verifexit + * should be changed to be state = IA_CSS_PROCESS_GROUP_STARTED + */ + verifexit(state == IA_CSS_PROCESS_GROUP_BLOCKED); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = IA_CSS_PROCESS_GROUP_CMD_ABORT; + psys_cmd.msg = 0; + psys_cmd.context_handle = process_group->ipu_virtual_address; + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, &psys_cmd); + verifexit(retval > 0); + break; + default: + verifexit(false); + break; + } + + retval = 0; +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_process_group_exec_cmd failed (%i)\n", retval); + } + return retval; +} + +STORAGE_CLASS_INLINE int enqueue_buffer_set_cmd( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset, + uint16_t command + ) +{ + int retval = -1; + struct ia_css_psys_cmd_s psys_cmd; + bool cmd_queue_full; + unsigned int queue_id; + + verifexit(ia_css_process_group_get_state(process_group) + == IA_CSS_PROCESS_GROUP_STARTED); + + verifexit(queue_offset < + ia_css_process_group_get_num_queues(process_group)); + + queue_id = + ia_css_process_group_get_base_queue_id(process_group) + + queue_offset; + verifexit(queue_id < IA_CSS_N_PSYS_CMD_QUEUE_ID); + + cmd_queue_full = ia_css_is_psys_cmd_queue_full(ia_css_process_group_get_context(process_group), queue_id); + retval = EBUSY; + verifexit(cmd_queue_full == false); + + psys_cmd.command = command; + psys_cmd.msg = 0; + psys_cmd.context_handle = + ia_css_buffer_set_get_ipu_address(buffer_set); + + retval = ia_css_psys_cmd_queue_send(ia_css_process_group_get_context(process_group), queue_id, &psys_cmd); + verifexit(retval > 0); + + retval = 0; + +EXIT: + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_buffer_set():\n"); + retval = enqueue_buffer_set_cmd( + process_group, + buffer_set, + queue_offset, + IA_CSS_PROCESS_GROUP_CMD_RUN); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_buffer_set failed (%i)\n", retval); + } + return retval; +} + +int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *param_buffer_set) +{ +#if (HAS_LATE_BINDING_SUPPORT == 1) + int retval = -1; + + IA_CSS_TRACE_0(BXT_SPCTRL, INFO, + "ia_css_enqueue_param_buffer_set():\n"); + + retval = enqueue_buffer_set_cmd( + process_group, + param_buffer_set, + IA_CSS_PSYS_LATE_BINDING_QUEUE_OFFSET, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); + + if (0 != retval) { + IA_CSS_TRACE_1(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed (%i)\n", retval); + } +#else + int retval = -1; + + NOT_USED(process_group); + NOT_USED(param_buffer_set); + IA_CSS_TRACE_0(BXT_SPCTRL, ERROR, + "ia_css_enqueue_param_buffer_set failed, no late binding supported\n"); +#endif /* (HAS_LATE_BINDING_SUPPORT == 1) */ + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h new file mode 100644 index 000000000000..6ccca1d9b69e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data.h @@ -0,0 +1,418 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_H +#define __IA_CSS_PROGRAM_GROUP_DATA_H + +#include "ia_css_psys_data_storage_class.h" + +/*! \file */ + +/** @file ia_css_program_group_data.h + * + * Define the data objects that are passed to the process groups + * i.e. frames and matrices with their sub-structures + * + * The data objects are separate from the process group terminal, + * although they are stored by value rather than by reference and + * make the process group terminal dependendent on its definition + * + * This frame definition overloads the current CSS frame definition + * they are the same object, just a slightly different implementation + */ + +#include /* vied_vaddress_t */ + +#include +#include "ia_css_program_group_data_defs.h" /* ia_css_frame_format_type */ + +#include "ia_css_terminal_defs.h" + +/* + * Frame buffer state used for sequencing + * (see FAS 5.5.3) + * + * The buffer can be in DDR or a handle to a stream + */ +typedef enum ia_css_buffer_state { + IA_CSS_BUFFER_NULL = 0, + IA_CSS_BUFFER_UNDEFINED, + IA_CSS_BUFFER_EMPTY, + IA_CSS_BUFFER_NONEMPTY, + IA_CSS_BUFFER_FULL, + IA_CSS_N_BUFFER_STATES +} ia_css_buffer_state_t; + +#define IA_CSS_BUFFER_STATE_IN_BITS 32 + +/* + * Pointer state used to signal MMU invalidation + */ +typedef enum ia_css_pointer_state { + IA_CSS_POINTER_INVALID = 0, + IA_CSS_POINTER_VALID, + IA_CSS_N_POINTER_STATES +} ia_css_pointer_state_t; + +#define IA_CSS_POINTER_STATE_IN_BITS 32 + +/* + * Access direction needed to select the access port + */ +typedef enum ia_css_access_type { + IA_CSS_ACCESS_LOCKED = 0, + IA_CSS_ACCESS_READ, + IA_CSS_ACCESS_WRITE, + IA_CSS_ACCESS_MODIFY, + IA_CSS_N_ACCESS_TYPES +} ia_css_access_type_t; + +#define IA_CSS_ACCESS_TYPE_IN_BITS 32 + +/* + * Access attribute needed to select the access port + * - public : snooped + * - private: non-snooped + * Naming is a bit awkward, lack of inspiration + */ +typedef enum ia_css_access_scope { + IA_CSS_ACCESS_PRIVATE = 0, + IA_CSS_ACCESS_PUBLIC, + IA_CSS_N_ACCESS_SCOPES +} ia_css_access_scopes_t; + +#define IA_CSS_ACCESS_SCOPES_IN_BITS 32 + +#define IA_CSS_N_FRAME_PLANES 6 + +#define IA_CSS_FRAME_FORMAT_BITMAP_BITS 64 +typedef uint64_t ia_css_frame_format_bitmap_t; + +typedef struct ia_css_param_frame_descriptor_s ia_css_param_frame_descriptor_t; +typedef struct ia_css_param_frame_s ia_css_param_frame_t; + +typedef struct ia_css_frame_descriptor_s ia_css_frame_descriptor_t; +typedef struct ia_css_frame_s ia_css_frame_t; +typedef struct ia_css_fragment_descriptor_s ia_css_fragment_descriptor_t; + +typedef struct ia_css_stream_s ia_css_stream_t; + + +#define N_UINT64_IN_STREAM_STRUCT 1 + +#define IA_CSS_STREAM_STRUCT_BITS \ + (N_UINT64_IN_STREAM_STRUCT * 64) + +struct ia_css_stream_s { + uint64_t dummy; +}; + +struct ia_css_param_frame_descriptor_s { + uint16_t size; /**< Size of the descriptor */ + uint32_t buffer_count; /**< Number of parameter buffers */ +}; + +struct ia_css_param_frame_s { + /*< Base virtual addresses to parameters in subsystem virtual + * memory space + */ + vied_vaddress_t *data; +}; + +#define N_UINT32_IN_FRAME_DESC_STRUCT \ + (1 + IA_CSS_N_FRAME_PLANES + (IA_CSS_N_DATA_DIMENSION - 1)) +#define N_UINT16_IN_FRAME_DESC_STRUCT (1 + IA_CSS_N_DATA_DIMENSION) +#define N_UINT8_IN_FRAME_DESC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3 + +#define IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + (IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + (N_UINT32_IN_FRAME_DESC_STRUCT * 32) \ + + (N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_UINT8_IN_FRAME_DESC_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_FRAME_DESC_STRUCT * 8)) + +/* + * Structure defining the frame (size and access) properties for + * inbuild types only. + * + * The inbuild types like FourCC, MIPI and CSS private types are supported + * by FW all other types are custom types which interpretation must be encoded + * on the buffer itself or known by the source and sink + */ +struct ia_css_frame_descriptor_s { + /**< Indicates if this is a generic type or inbuild with + * variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< Number of data planes (pointers) */ + uint32_t plane_count; + /**< Plane offsets accounting for fragments */ + uint32_t plane_offsets[IA_CSS_N_FRAME_PLANES]; + /**< Physical size aspects */ + uint32_t stride[IA_CSS_N_DATA_DIMENSION - 1]; + /**< Logical dimensions */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Size of this descriptor */ + uint16_t size; + /**< Bits per pixel */ + uint8_t bpp; + /**< Bits per element */ + uint8_t bpe; + /**< 1 if terminal uses compressed datatype, 0 otherwise */ + uint8_t is_compressed; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_DESC_STRUCT]; +}; + +#define N_UINT32_IN_FRAME_STRUCT 2 +#define N_PADDING_UINT8_IN_FRAME_STRUCT 4 + +#define IA_CSS_FRAME_STRUCT_BITS \ + (IA_CSS_BUFFER_STATE_IN_BITS \ + + IA_CSS_ACCESS_TYPE_IN_BITS \ + + IA_CSS_POINTER_STATE_IN_BITS \ + + IA_CSS_ACCESS_SCOPES_IN_BITS \ + + VIED_VADDRESS_BITS \ + + (N_UINT32_IN_FRAME_STRUCT * 32) \ + + (N_PADDING_UINT8_IN_FRAME_STRUCT * 8)) + + +/* + * Main frame structure holding the main store and auxilary access properties + * the "pointer_state" and "access_scope" should be encoded on the + * "vied_vaddress_t" type + */ +struct ia_css_frame_s { + /**< State of the frame for purpose of sequencing */ + ia_css_buffer_state_t buffer_state; + /**< Access direction, may change when buffer state changes */ + ia_css_access_type_t access_type; + /**< State of the pointer for purpose of embedded MMU coherency */ + ia_css_pointer_state_t pointer_state; + /**< Access to the pointer for purpose of host cache coherency */ + ia_css_access_scopes_t access_scope; + /**< Base virtual address to data in subsystem virtual memory space */ + vied_vaddress_t data; + /**< Offset to buffer address within external buffer set structure */ + uint32_t data_index; + /**< Total allocation size in bytes */ + uint32_t data_bytes; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_STRUCT]; +}; + +#define N_UINT16_IN_FRAGMENT_DESC_STRUCT (3 * IA_CSS_N_DATA_DIMENSION) +#define N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT 4 + +#define IA_CSS_FRAGMENT_DESCRIPTOR_STRUCT_BITS \ + ((N_UINT16_IN_FRAME_DESC_STRUCT * 16) \ + + (N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT * 8)) + +/* + * Structure defining the fragment (size and access) properties. + * + * All cropping and padding effects are described by the difference between + * the frame size and its location and the fragment size(s) and location(s) + */ +struct ia_css_fragment_descriptor_s { + /**< Logical dimensions of the fragment */ + uint16_t dimension[IA_CSS_N_DATA_DIMENSION]; + /**< Logical location of the fragment in the frame */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Fractional start (phase) of the fragment in the access unit */ + uint16_t offset[IA_CSS_N_DATA_DIMENSION]; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_FRAGMENT_DESC_STRUCT]; +}; + + +/*! Print the frame object to file/stream + + @param frame[in] frame object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid); + +/*! Get the data buffer handle from the frame object + +@param frame[in] frame object + +@return buffer pointer, VIED_NULL on error +*/ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame); + +/*! Get the data buffer handle from the frame object + + @param frame[in] frame object + + @return buffer pointer, VIED_NULL on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +vied_vaddress_t ia_css_frame_get_buffer(const ia_css_frame_t *frame); + +/*! Set the data buffer handle on the frame object + + @param frame[in] frame object + @param buffer[in] buffer pointer + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, vied_vaddress_t buffer); + +/*! Get the data buffer index in the frame object + + @param frame[in] frame object + + @return data buffer index on success, -1 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame); + +/*! Set the data buffer index in the frame object + + @param frame[in] frame object + @param data_index[in] data buffer index + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index); + +/*! Set the data buffer size on the frame object + + @param frame[in] frame object + @param size[in] number of data bytes + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, unsigned size); + +/*! Get the data buffer state from the frame object + + @param frame[in] frame object + + @return buffer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame); + +/*! Set the data buffer state of the frame object + + @param frame[in] frame object + @param buffer_state[in] buffer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_buffer_state(ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state); + +/*! Get the data pointer state from the frame object + + @param frame[in] frame object + + @return pointer state, limit value on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame); + +/*! Set the data pointer state of the frame object + + @param frame[in] frame object + @param pointer_state[in] pointer state + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_set_pointer_state(ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state); + +/*! Print the frame descriptor object to file/stream + + @param frame_descriptor[in] frame descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, void *fid); + +/*! Print the fragment descriptor object to file/stream + + @param fragment_descriptor[in] fragment descriptor object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, void *fid); + +/*! Compute the bitmap for the frame format type + + @param frame_format_type[in] frame format type + + @return 0 on error + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type); + +/*! clear frame format bitmap + + @return cleared bitmap + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void); + + +/*! Compute the size of storage required for the data descriptor object + * on a terminal + *@param plane_count[in] The number of data planes in the buffer + */ +IA_CSS_PSYS_DATA_STORAGE_CLASS_H +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count); +/*! Compute the size of storage required for the kernel parameter descriptor + * object on a terminal + + @param section_count[in] The number of parameter sections in the buffer + + @return 0 on error + */ +extern size_t ia_css_sizeof_kernel_param_descriptor( + const uint16_t section_count); + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h new file mode 100644 index 000000000000..3f177a19b98b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_program_group_data_defs.h @@ -0,0 +1,196 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H +#define __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H + + +/* + * Pre-defined frame format + * + * Those formats have inbuild support of traffic + * and access functions + * + * Note that the formats are for terminals, so there + * is no distinction between input and output formats + * - Custom formats with ot without descriptor + * - 4CC formats such as YUV variants + * - MIPI (line) formats as produced by CSI receivers + * - MIPI (sensor) formats such as Bayer or RGBC + * - CSS internal formats (private types) + * - CSS parameters (type 1 - 6) + */ +#define IA_CSS_FRAME_FORMAT_TYPE_BITS 32 +typedef enum ia_css_frame_format_type { + IA_CSS_DATA_CUSTOM_NO_DESCRIPTOR = 0, + IA_CSS_DATA_CUSTOM, + + /* 12 bit YUV 411, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV11, + /* bpp bit YUV 420, Y, U, V 3-plane (bpp/1.5 bpe) */ + IA_CSS_DATA_FORMAT_YUV420, + /* 12 bit YUV 420, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV12, + /* 12 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12, + /* 16 bit YUV 420, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV12_16, + /* 12 bit YUV 420, Intel proprietary tiled format, TileY */ + IA_CSS_DATA_FORMAT_NV12_TILEY, + /* 12 bit YUV 420, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV21, + /* bpp bit YUV 422, Y, U, V 3-plane (bpp/2 bpe) */ + IA_CSS_DATA_FORMAT_YUV422, + /* 16 bit YUV 422, Y, V, U 3-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_YV16, + /* 16 bit YUV 422, Y, UV 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV16, + /* 16 bit YUV 422, Y, VU 2-plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_NV61, + /* 16 bit YUV 422, UYVY 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_UYVY, + /* 16 bit YUV 422, YUYV 1-plane interleaved (8 bit per element) */ + IA_CSS_DATA_FORMAT_YUYV, + /* bpp bit YUV 444, Y, U, V 3-plane (bpp/3 bpe) */ + IA_CSS_DATA_FORMAT_YUV444, + /* 8 bit monochrome plane */ + IA_CSS_DATA_FORMAT_Y800, + + /* 5-6-5 bit packed (1-plane) RGB (16bpp, ~5 bpe) */ + IA_CSS_DATA_FORMAT_RGB565, + /* 24 bit RGB, 3 planes (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGB888, + /* 32 bit RGB-Alpha, 1 plane (8 bit per element) */ + IA_CSS_DATA_FORMAT_RGBA888, + + /* bpp bit raw, [[Gr, R];[B, Gb]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG, + /* bpp bit raw, [[R, Gr];[Gb, B]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_RGGB, + /* bpp bit raw, [[B, Gb];[Gr, R]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_BGGR, + /* bpp bit raw, [[Gb, B];[R, Gr]] 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_BAYER_GBRG, + + /* bpp bit (NV12) YUV 420, Y, UV 2-plane derived 3-line, + * 2-Y, 1-UV (bpp/1.5 bpe): M420 format + */ + IA_CSS_DATA_FORMAT_YUV420_LINE, + /* Deprecated RAW, 1 plane */ + IA_CSS_DATA_FORMAT_RAW, + /* Deprecated RAW, 1 plane, packed */ + IA_CSS_DATA_FORMAT_RAW_PACKED, + /* Internal, for advanced ISP */ + IA_CSS_DATA_FORMAT_QPLANE6, + /* 1D byte stream, used for jpeg 1-plane */ + IA_CSS_DATA_FORMAT_BINARY_8, + /* Deprecated MIPI frame, 1D byte stream 1 plane */ + IA_CSS_DATA_FORMAT_MIPI, + /* 12 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (8 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_8, + /* 15 bit [[YY];[UYVY]] 1-plane interleaved 2-line + * (10 bit per element) + */ + IA_CSS_DATA_FORMAT_MIPI_YUV420_10, + /* 12 bit [[UY];[VY]] 1-plane interleaved 2-line (8 bit per element) */ + IA_CSS_DATA_FORMAT_MIPI_LEGACY_YUV420_8, + + /* Type 1-5 parameter, not fragmentable */ + IA_CSS_DATA_GENERIC_PARAMETER, + /* Video stabilisation Type 6 parameter, fragmentable */ + IA_CSS_DATA_DVS_PARAMETER, + /* Video stabilisation Type 6 parameter, coordinates */ + IA_CSS_DATA_DVS_COORDINATES, + /* Dead Pixel correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_DPC_PARAMETER, + /* Lens Shading Correction Type 6 parameter, fragmentable */ + IA_CSS_DATA_LSC_PARAMETER, + /* 3A statistics output HI. */ + IA_CSS_DATA_S3A_STATISTICS_HI, + /* 3A statistics output LO. */ + IA_CSS_DATA_S3A_STATISTICS_LO, + /* histogram output */ + IA_CSS_DATA_S3A_HISTOGRAM, + /* GammaStar grid */ + IA_CSS_DATA_GAMMASTAR_GRID, + + /* Gr R B Gb Gr R B Gb in PIXELS (also called isys interleaved) */ + IA_CSS_DATA_FORMAT_BAYER_LINE_INTERLEAVED, + /* Gr R B Gb Gr R B Gb in VECTORS (VCC IMAGE, ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_VECTORIZED, + /* Gr R Gr R ... | B Gb B Gb .. in VECTORS (ISP NWAY depentdent) */ + IA_CSS_DATA_FORMAT_BAYER_GRBG_VECTORIZED, + + /* 16 bit YUV 420, Y even plane, Y uneven plane, + * UV plane vector interleaved + */ + IA_CSS_DATA_FORMAT_YUV420_VECTORIZED, + /* 16 bit YUV 420, YYUVYY vector interleaved */ + IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED, + + /* 12 bit YUV 420, Intel proprietary tiled format, TileYf */ + IA_CSS_DATA_FORMAT_NV12_TILEYF, + + /*Y samples appear first in the memory. All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 10 bit data. + */ + IA_CSS_DATA_FORMAT_P010, + + /* MSB aligned version of P010*/ + IA_CSS_DATA_FORMAT_P010_MSB, + + /* P016/P012 Y samples appear first in the memory. + * All Y samples are array of WORDs; + * even number of lines ; + * Surface stride can be larger than the width of Y plane. + * This array is followed immediately by chroma array. + * Chroma array is an array of WORDs, with interleaved U/V samples. + * If the interleaved U/V plane is addresses as an * array of DWORDs, + * the least significant word contains U sample. The stride of the + * interleaved U/V plane is equal to Y plane. 12 bit data. + */ + IA_CSS_DATA_FORMAT_P016, + + /* MSB aligned version of P016*/ + IA_CSS_DATA_FORMAT_P016_MSB, + + /* TILEYYf representation of P010*/ + IA_CSS_DATA_FORMAT_P010_TILEYF, + + /* TILEYYf representation of P010 MSB aligned*/ + IA_CSS_DATA_FORMAT_P010_MSB_TILEYF, + + /* TILEYYf representation of P016*/ + IA_CSS_DATA_FORMAT_P016_TILEYF, + + /* TILEYYf representation of P016 MSB aligned*/ + IA_CSS_DATA_FORMAT_P016_MSB_TILEYF, + + /* consists of L and R PDAF pixel pairs. + * L and R can be interleaved or not. 1-plane (bpp == bpe) */ + IA_CSS_DATA_FORMAT_PAF, + + IA_CSS_N_FRAME_FORMAT_TYPES +} ia_css_frame_format_type_t; + + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_DEFS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h new file mode 100644 index 000000000000..6a4e3a28e533 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DATA_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DATA_INLINE__ +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DATA_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DATA_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h new file mode 100644 index 000000000000..49afed9ce9df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/interface/ia_css_psys_data_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DATA_TRACE_H +#define __IA_CSS_PSYS_DATA_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + #define PSYS_DATA_TRACE_LEVEL_CONFIG PSYS_DATA_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DATA_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DATA_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DATA_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DATA_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DATA_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DATA_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DATA_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DATA_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DATA_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DATA_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DATA_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DATA_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c new file mode 100644 index 000000000000..edf3e55e6c39 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_data_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_DATA_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_program_group_data_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_DATA_INLINE__ */ +#include "ia_css_program_group_data_impl.h" +#endif /* __IA_CSS_PSYS_DATA_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h new file mode 100644 index 000000000000..f08a057e4480 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/data/src/ia_css_program_group_data_impl.h @@ -0,0 +1,455 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H +#define __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H + +#include "ia_css_program_group_data.h" +#include "ia_css_psys_data_trace.h" +#include "ia_css_terminal_defs.h" +#include /* for verifexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_print( + const ia_css_frame_t *frame, void *fid) +{ + int retval = -1; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(frame != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer = %d\n", ia_css_frame_get_buffer(frame)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbuffer_state = %d\n", ia_css_frame_get_buffer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tbuffer_state = %s\n", + * ia_css_buffer_state_string(ia_css_frame_get_buffer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tpointer_state = %d\n", ia_css_frame_get_pointer_state(frame)); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tpointer_state = %s\n", + * ia_css_pointer_state_string(ia_css_frame_get_pointer_state(frame))); + */ + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdata_bytes = %d\n", frame->data_bytes); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +const vied_vaddress_t *ia_css_frame_get_buffer_host_virtual_address( + const ia_css_frame_t *frame) { + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_host_virtual_address(): enter:\n"); + + verifexit(frame != NULL); + return &(frame->data); + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_host_virtual_address invalid argument\n"); + } + return NULL; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +vied_vaddress_t ia_css_frame_get_buffer( + const ia_css_frame_t *frame) +{ + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer(): enter:\n"); + + verifexit(frame != NULL); + buffer = frame->data; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer( + ia_css_frame_t *frame, + vied_vaddress_t buffer) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer(): enter:\n"); + + verifexit(frame != NULL); + frame->data = buffer; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_get_data_index( + const ia_css_frame_t *frame) +{ + int data_index = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_data_index(): enter:\n"); + + verifexit(frame != NULL); + + data_index = frame->data_index; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_data_index invalid argument\n"); + } + return data_index; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_index( + ia_css_frame_t *frame, + unsigned int data_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_index(): enter:\n"); + + verifexit(frame != NULL); + + frame->data_index = data_index; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_index failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_data_bytes( + ia_css_frame_t *frame, + unsigned int size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_data_bytes(): enter:\n"); + + verifexit(frame != NULL); + frame->data_bytes = size; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_data_bytes failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_buffer_state_t ia_css_frame_get_buffer_state( + const ia_css_frame_t *frame) +{ + ia_css_buffer_state_t buffer_state = IA_CSS_N_BUFFER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + buffer_state = frame->buffer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_buffer_state invalid argument\n"); + } + return buffer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_buffer_state( + ia_css_frame_t *frame, + const ia_css_buffer_state_t buffer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_buffer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->buffer_state = buffer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_buffer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_pointer_state_t ia_css_frame_get_pointer_state( + const ia_css_frame_t *frame) +{ + ia_css_pointer_state_t pointer_state = IA_CSS_N_POINTER_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_get_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + pointer_state = frame->pointer_state; + +EXIT: + if (NULL == frame) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_get_pointer_state invalid argument\n"); + } + return pointer_state; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_set_pointer_state( + ia_css_frame_t *frame, + const ia_css_pointer_state_t pointer_state) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_set_pointer_state(): enter:\n"); + + verifexit(frame != NULL); + frame->pointer_state = pointer_state; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_set_pointer_state failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_frame_descriptor_print( + const ia_css_frame_descriptor_t *frame_descriptor, + void *fid) +{ + int retval = -1; + int i; + uint8_t frame_plane_count; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + COMPILATION_ERROR_IF(IA_CSS_N_DATA_DIMENSION <= 0); + + verifexit(frame_descriptor != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_frame_descriptor_print(): enter:\n"); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tframe_format_type = %d\n", + frame_descriptor->frame_format_type); + /* IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\tframe_format_type = %s\n", + * ia_css_frame_format_string(frame_descriptor->frame_format_type)); + */ + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpp = %d\n", frame_descriptor->bpp); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tbpe = %d\n", frame_descriptor->bpe); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tis_compressed = %d\n", frame_descriptor->is_compressed); + + frame_plane_count = IA_CSS_N_FRAME_PLANES; + /* frame_plane_count = + * ia_css_frame_plane_count(frame_descriptor->frame_format_type); + */ + + verifexit(frame_plane_count > 0); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tplane_offsets[%d]: [\n", frame_plane_count); + for (i = 0; i < (int)frame_plane_count - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->plane_offsets[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d ]\n", frame_descriptor->plane_offsets[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tdimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->dimension[i]); + + COMPILATION_ERROR_IF(0 > (IA_CSS_N_DATA_DIMENSION - 2)); + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\tstride[%d] = {\n", IA_CSS_N_DATA_DIMENSION - 1); + i = 0; + if (IA_CSS_N_DATA_DIMENSION > 2) { + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 2; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", frame_descriptor->stride[i]); + } + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", frame_descriptor->stride[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_frame_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +int ia_css_fragment_descriptor_print( + const ia_css_fragment_descriptor_t *fragment_descriptor, + void *fid) +{ + int retval = -1; + int i; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DATA, INFO, + "ia_css_fragment_descriptor_print(): enter:\n"); + + verifexit(fragment_descriptor != NULL); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "dimension[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->dimension[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->dimension[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "index[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->index[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d }\n", fragment_descriptor->index[i]); + + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "offset[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, + "\t%4d,\n", fragment_descriptor->offset[i]); + } + IA_CSS_TRACE_1(PSYSAPI_DATA, INFO, "\t%4d }\n", + fragment_descriptor->offset[i]); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DATA, ERROR, + "ia_css_fragment_descriptor_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bit_mask( + const ia_css_frame_format_type_t frame_format_type) +{ + ia_css_frame_format_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bit_mask(): enter:\n"); + + if ((frame_format_type < IA_CSS_N_FRAME_FORMAT_TYPES) && + (frame_format_type < IA_CSS_FRAME_FORMAT_BITMAP_BITS)) { + bit_mask = (ia_css_frame_format_bitmap_t)1 << frame_format_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_frame_format_bit_mask invalid argument\n"); + } + + return bit_mask; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +ia_css_frame_format_bitmap_t ia_css_frame_format_bitmap_clear(void) +{ + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_frame_format_bitmap_clear(): enter:\n"); + + return 0; +} + +IA_CSS_PSYS_DATA_STORAGE_CLASS_C +size_t ia_css_sizeof_frame_descriptor( + const uint8_t plane_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DATA, VERBOSE, + "ia_css_sizeof_frame_descriptor(): enter:\n"); + + verifexit(plane_count > 0); + size += sizeof(ia_css_frame_descriptor_t); + size += plane_count * sizeof(uint32_t); + +EXIT: + if (0 == plane_count) { + IA_CSS_TRACE_0(PSYSAPI_DATA, WARNING, + "ia_css_sizeof_frame_descriptor invalid argument\n"); + } + return size; +} + +#endif /* __IA_CSS_PROGRAM_GROUP_DATA_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/cnlB0/ia_css_psys_transport_dep.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/cnlB0/ia_css_psys_transport_dep.h new file mode 100644 index 000000000000..7bb145c1b183 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/cnlB0/ia_css_psys_transport_dep.h @@ -0,0 +1,35 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_DEP_H +#define __IA_CSS_PSYS_TRANSPORT_DEP_H + +/* + * The ID's of the Psys specific queues. + */ +typedef enum ia_css_psys_cmd_queues { + /**< The in-order queue for scheduled process groups */ + IA_CSS_PSYS_CMD_QUEUE_COMMAND_ID = 0, + /**< The in-order queue for commands changing psys or + * process group state + */ + IA_CSS_PSYS_CMD_QUEUE_DEVICE_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG0_COMMAND_ID, + /**< An in-order queue for dedicated PPG commands */ + IA_CSS_PSYS_CMD_QUEUE_PPG1_COMMAND_ID, + IA_CSS_N_PSYS_CMD_QUEUE_ID +} ia_css_psys_cmd_queue_ID_t; + +#endif /* __IA_CSS_PSYS_TRANSPORT_DEP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h new file mode 100644 index 000000000000..dc8fa531b11e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device.h @@ -0,0 +1,516 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_H +#define __IA_CSS_PSYS_DEVICE_H + +#include "ia_css_psys_init.h" +#include "ia_css_psys_transport.h" + +/*! \file */ + +/** @file ia_css_psys_device.h + * + * Define the interface to open the psys specific communication layer + * instance + */ + +#include /* vied_vaddress_t */ + +#include +#include + +#include +#include + +#define IA_CSS_PSYS_STATE_READY_PATTERN (0xF7F7F7F7) +#define IA_CSS_PSYS_STATE_RUNNING_PATTERN (0xE6E6E6E6) +#define IA_CSS_PSYS_STATE_STARTING_PATTERN (0xD5D5D5D5) +#define IA_CSS_PSYS_STATE_STARTED_PATTERN (0xC4C4C4C4) +#define IA_CSS_PSYS_STATE_INITIALIZING_PATTERN (0xB3B3B3B3) +#define IA_CSS_PSYS_STATE_INITIALIZED_PATTERN (0xA0A0A0A0) + +/* + * Defines the state of psys: + * - IA_CSS_PSYS_STATE_UNKNOWN = psys status is unknown (or not recognized) + * - IA_CSS_PSYS_STATE_INITIALING = some of the psys components are + * not initialized yet + * - IA_CSS_PSYS_STATE_INITIALIZED = psys components are initialized + * - IA_CSS_PSYS_STATE_STARTING = some of the psys components are initialized + * but not started yet + * - IA_CSS_PSYS_STATE_STARTED = psys components are started + * - IA_CSS_PSYS_STATE_RUNNING = some of the psys components are started + * but not ready yet + * - IA_CSS_PSYS_STATE_READY = psys is ready + * The state of psys can be obtained calling ia_css_psys_check_state() +*/ +typedef enum ia_css_psys_state { + IA_CSS_PSYS_STATE_UNKNOWN = 0, /**< psys state is unknown */ + /*< some of the psys components are not initialized yet*/ + IA_CSS_PSYS_STATE_INITIALIZING = IA_CSS_PSYS_STATE_INITIALIZING_PATTERN, + /**< psys components are initialized */ + IA_CSS_PSYS_STATE_INITIALIZED = IA_CSS_PSYS_STATE_INITIALIZED_PATTERN, + /**< some of the psys components are not started yet */ + IA_CSS_PSYS_STATE_STARTING = IA_CSS_PSYS_STATE_STARTING_PATTERN, + /**< psys components are started */ + IA_CSS_PSYS_STATE_STARTED = IA_CSS_PSYS_STATE_STARTED_PATTERN, + /**< some of the psys components are not ready yet */ + IA_CSS_PSYS_STATE_RUNNING = IA_CSS_PSYS_STATE_RUNNING_PATTERN, + /**< psys is ready */ + IA_CSS_PSYS_STATE_READY = IA_CSS_PSYS_STATE_READY_PATTERN, +} ia_css_psys_state_t; + +extern struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +extern struct ia_css_syscom_context *psys_syscom_secure; +#endif + +/*! Print the syscom creation descriptor to file/stream + + @param config[in] Psys syscom descriptor + @param fid[out] file/stream handle + + @return < 0 on error +*/ +extern int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, void *fid); + +/*! Print the Psys syscom object to file/stream + + @param context[in] Psys syscom object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_psys_print( + const struct ia_css_syscom_context *context, void *fid); + +/*! Create the syscom creation descriptor + + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify(void); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Create the syscom creation descriptor for secure stream + + @param vtl0_addr_mask[in] VTL0 address mask that will be stored in 'secure' ctx + @return NULL on error + */ +extern struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask); +#endif + +/*! Compute the size of storage required for allocating the Psys syscom object + + @param config[in] Psys syscom descriptor + + @return 0 on error + */ +extern size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Open (and map the storage for) the Psys syscom object + This is the same as ia_css_psys_open() excluding server start. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + @return NULL on error + */ + +extern struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); + +/*! Store the parameters of the Psys syscom object in DMEM, so + they can be communicated with FW. This step needs to be invoked + after SPC starts in ia_css_psys_open(), so SPC DMEM access blocker + programming already takes effective. + + @param context[in] Psys syscom object + @param config[in] Psys syscom descriptor + @return 0 if successful + */ +extern int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config); + +/*! Start PSYS Server. Psys syscom object must have been created already. + Target for VTIO usage where multiple syscom objects need to be + created first before this API is invoked. + @param config[in] Psys syscom descriptor + + @return true if psys open started successfully + */ +extern int ia_css_psys_open( + struct ia_css_syscom_config *config); +#else +/*! Open (and map the storage for) the Psys syscom object + + @param buffer[in] storage buffers for the syscom object + in the kernel virtual memory space and + its Psys mapped version + @param config[in] Psys syscom descriptor + + Precondition(1): The buffer must be large enough to hold the syscom object. + Its size must be computed with the function "ia_css_sizeof_psys()". + The buffer must be created in the kernel memory space. + + Precondition(2): If buffer == NULL, the storage allocations and mapping + is performed in this function. Config must hold the handle to the Psys + virtual memory space + + Postcondition: The context is initialised in the provided/created buffer. + The syscom context pointer is the kernel space handle to the syscom object + + @return NULL on error + */ +extern struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +/*! completes the psys open procedure. Must be called multiple times + until it succeeds or driver determines the boot sequence has failed. + + @param context[in] Psys syscom object + + @return false if psys open has not completed successfully + */ +extern bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/*! Request close of a PSYS context + * The functionatlity is the same as ia_css_psys_close() which closes PSYS syscom object. + * Counterpart of ia_css_psys_context_create() + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context); + +/*! Request close of a PSYS device for VTIO case + * @param None + * @return 0 if successful + */ +extern int ia_css_psys_close(void); +#else +/*! Request close of a PSYS context + * @param context[in]: Psys context + * @return NULL if close is successful context otherwise + */ +extern struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT*/ + +/*! Unmap and free the storage of the PSYS context + * @param context[in] Psys context + * @param force[in] Force release even if device is busy + * @return 0 if release is successful + * EINVAL if context is invalid + * EBUSY if device is not yet idle, and force==0 + */ +extern int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force); + +/*! Checks the state of the Psys syscom object + + @param context[in] Psys syscom object + + @return State of the syscom object + */ +extern ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated cmd queue in the Psys syscom object is full + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is not full or on error + */ + +extern bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object is notfull + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return false if the cmd queue is full on error + */ +extern bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if the designated cmd queue in the Psys syscom object holds N space + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param N[in] Number of messages + + @return false if the cmd queue space is unavailable or on error + */ +extern bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N); + +/*!Return the free space count in the designated cmd queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + @return the space, < 0 on error + */ +extern int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Indicate if there are any messages pending in the Psys syscom + * object event queues + + @param context[in] Psys syscom object + + @return false if there are no messages or on error + */ +extern bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context); + +/*!Indicate if the designated event queue in the Psys syscom object is empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the event queue is not empty or on error + */ +extern bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue in the Psys syscom object is not empty + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return false if the receive queue is empty or on error + */ +extern bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Indicate if the designated event queue + * in the Psys syscom object holds N items + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param N[in] Number of messages + + @return false if the event queue has insufficient messages + available or on error +*/ +extern bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N); + +/*!Return the message count in the designated event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + @return the messages, < 0 on error + */ +extern int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*! Send (pass by value) a command on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID +@param cmd_msg_buffer[in] pointer to the command message buffer + +Precondition: The command message buffer must be large enough + to hold the command + +Postcondition: Either 0 or 1 commands have been sent + +Note: The message size is fixed and determined on creation + + @return the number of sent commands (1), <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer); + +/*! Send (pass by value) N commands on a queue in the Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + @param cmd_msg_buffer[in] Pointer to the command message buffer +@param N[in] Number of commands + +Precondition: The command message buffer must be large enough + to hold the commands + +Postcondition: Either 0 or up to and including N commands have been sent + + Note: The message size is fixed and determined on creation + + @return the number of sent commands, <= 0 on error + */ +extern int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N); + +/*! Receive (pass by value) an event from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + + Precondition: The event message buffer must be large enough to hold the event + + Postcondition: Either 0 or 1 events have been received + + Note: The event size is fixed and determined on creation + + @return the number of received events (1), <= 0 on error + */ +extern int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer); + +/*! Receive (pass by value) N events from an event queue in the + * Psys syscom object + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + @param event_msg_buffer[out] pointer to the event message buffer + @param N[in] Number of events + + Precondition: The event buffer must be large enough to hold the events + + Postcondition: Either 0 or up to and including N events have been received + + Note: The message size is fixed and determined on creation + + @return the number of received event messages, <= 0 on error + */ +extern int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N); + + +/* + * Access functions to query the object stats + */ + + +/*!Return the size of the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context); + +/*!Return the number of cmd queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the number of event queues in the Psys syscom object + + @param context[in] Psys syscom object + + @return 0 on error + */ +extern unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context); + +/*!Return the size of the indicated Psys command queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom cmd queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the size of the indicated Psys event queue + + @param context[in] Psys syscom object + @param id[in] Psys syscom event queue ID + + Note: The queue size is expressed in the number of fields + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +/*!Return the command message size of the indicated Psys command queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id); + +/*!Return the event message size of the indicated Psys event queue + + @param context[in] Psys syscom object + + Note: The message size is expressed in uint8_t + + @return 0 on error + */ +extern size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id); + +#endif /* __IA_CSS_PSYS_DEVICE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h new file mode 100644 index 000000000000..8e5899bc66db --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_device_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DEVICE_TRACE_H +#define __IA_CSS_PSYS_DEVICE_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + #define PSYS_DEVICE_TRACE_LEVEL_CONFIG \ + PSYS_DEVICE_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DEVICE_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DEVICE_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DEVICE_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DEVICE_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DEVICE_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DEVICE_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DEVICE_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DEVICE_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DEVICE_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DEVICE_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DEVICE_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h new file mode 100644 index 000000000000..1120b357632c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_init.h @@ -0,0 +1,37 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_INIT_H +#define __IA_CSS_PSYS_INIT_H + +#include /* vied_vaddress_t */ + +/* Init parameters passed to the fw on device open (non secure mode) */ +typedef struct ia_css_psys_server_init { + /* These members are used in PSS only and will be removed */ + /* Shared memory host address of pkg dir */ + unsigned long long host_ddr_pkg_dir; + /* Address of pkg_dir structure in DDR */ + vied_vaddress_t ddr_pkg_dir_address; + /* Size of Package dir in DDR */ + uint32_t pkg_dir_size; + + /* Prefetch configiration */ + /* enable prefetching on SPC, SPP0 and SPP1 */ + uint32_t icache_prefetch_sp; + /* enable prefetching on ISP0..N */ + uint32_t icache_prefetch_isp; +} ia_css_psys_server_init_t; + +#endif /* __IA_CSS_PSYS_INIT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h new file mode 100644 index 000000000000..e0d1e935c221 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/interface/ia_css_psys_transport.h @@ -0,0 +1,92 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TRANSPORT_H +#define __IA_CSS_PSYS_TRANSPORT_H + +#include /* ia_css_psys_cmd_queues */ +#include /* vied_vaddress_t */ + +#include + +typedef enum ia_css_psys_event_queues { + /**< The in-order queue for event returns */ + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + IA_CSS_N_PSYS_EVENT_QUEUE_ID +} ia_css_psys_event_queue_ID_t; + +typedef enum ia_css_psys_event_types { + /**< No error to report. */ + IA_CSS_PSYS_EVENT_TYPE_SUCCESS = 0, + /**< Unknown unhandled error */ + IA_CSS_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1, + /* Retrieving remote object: */ + /**< Object ID not found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2, + /**< Objects too big, or size is zero. */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3, + /**< Failed to load whole process group from tproxy/dma */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4, + /**< The proper package could not be found */ + IA_CSS_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5, + /* Process group: */ + /**< Failed to run, error while loading frame */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6, + /**< Failed to run, error while loading fragment */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7, + /**< The process count of the process group is zero */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8, + /**< Process(es) initialization */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9, + /**< Aborted (after host request) */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10, + /**< NULL pointer in the process group */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11, + /**< Process group validation failed */ + IA_CSS_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12 +} ia_css_psys_event_type_t; + +#define IA_CSS_PSYS_CMD_BITS 64 +struct ia_css_psys_cmd_s { + /**< The command issued to the process group */ + uint16_t command; + /**< Message field of the command */ + uint16_t msg; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; +}; + +#define IA_CSS_PSYS_EVENT_BITS 128 +struct ia_css_psys_event_s { + /**< The (return) status of the command issued to + * the process group this event refers to + */ + uint16_t status; + /**< The command issued to the process group this event refers to */ + uint16_t command; + /**< The context reference (process group/buffer set/...) */ + uint32_t context_handle; + /**< This token (size) must match the token registered + * in a process group + */ + uint64_t token; +}; + +struct ia_css_psys_buffer_s { + /**< The in-order queue for scheduled process groups */ + void *host_buffer; + vied_vaddress_t *isp_buffer; +}; + +#endif /* __IA_CSS_PSYS_TRANSPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c new file mode 100644 index 000000000000..c3ed98add7d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/device/src/ia_css_psys_device.c @@ -0,0 +1,854 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include "ia_css_psys_device.h" +#include "ia_css_psys_device_trace.h" +#include "ia_css_psys_init.h" +#include "regmem_access.h" + +#include +#include +#include + +#include "ia_css_cell.h" + +#define IA_CSS_PSYS_CMD_QUEUE_SIZE 0x20 +#define IA_CSS_PSYS_EVENT_QUEUE_SIZE 0x40 + +static struct ia_css_syscom_queue_config ia_css_psys_cmd_queue_cfg[IA_CSS_N_PSYS_CMD_QUEUE_ID]; + +static struct ia_css_syscom_queue_config + ia_css_psys_event_queue_cfg[IA_CSS_N_PSYS_EVENT_QUEUE_ID] = { + {IA_CSS_PSYS_EVENT_QUEUE_SIZE, IA_CSS_PSYS_EVENT_BITS/8}, +}; + +static struct ia_css_syscom_config psys_syscom_config; +struct ia_css_syscom_context *psys_syscom; +#if HAS_DUAL_CMD_CTX_SUPPORT +static struct ia_css_syscom_config psys_syscom_config_secure; +struct ia_css_syscom_context *psys_syscom_secure; +#endif +static bool external_alloc = true; + +int ia_css_psys_config_print( + const struct ia_css_syscom_config *config, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_frame_print(): enter:\n"); + + verifexit(config != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_frame_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_psys_print( + const struct ia_css_syscom_context *context, + void *fh) +{ + int retval = -1; + + NOT_USED(fh); + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_print(): enter:\n"); + + verifexit(context != NULL); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_print failed (%i)\n", retval); + } + return retval; +} + +static void set_syscom_config(struct ia_css_syscom_config *config) +{ + int i; + config->num_input_queues = IA_CSS_N_PSYS_CMD_QUEUE_ID; + config->num_output_queues = IA_CSS_N_PSYS_EVENT_QUEUE_ID; + /* The number of queues are different for different platforms + * so the array is initialized here + */ + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + ia_css_psys_cmd_queue_cfg[i].queue_size = IA_CSS_PSYS_CMD_QUEUE_SIZE; + ia_css_psys_cmd_queue_cfg[i].token_size = IA_CSS_PSYS_CMD_BITS/8; + } + config->input = ia_css_psys_cmd_queue_cfg; + config->output = ia_css_psys_event_queue_cfg; + config->vtl0_addr_mask = 0; +} + +struct ia_css_syscom_config *ia_css_psys_specify(void) +{ + struct ia_css_syscom_config *config = &psys_syscom_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify(): enter:\n"); + set_syscom_config(config); + config->secure = false; + + return config; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_config *ia_css_psys_specify_secure(unsigned int vtl0_addr_mask) +{ + struct ia_css_syscom_config *config = &psys_syscom_config_secure; + + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "ia_css_psys_specify_secure(mask %#x): enter:\n", vtl0_addr_mask); + set_syscom_config(config); + config->secure = true; + config->vtl0_addr_mask = vtl0_addr_mask; + return config; +} +#endif + +size_t ia_css_sizeof_psys( + struct ia_css_syscom_config *config) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_sizeof_psys(): enter:\n"); + + NOT_USED(config); + + return size; +} + +/* Internal function to create syscom_context */ +static struct ia_css_syscom_context *psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_create(): enter:\n"); + + if (config == NULL) + goto EXIT; + + if (buffer == NULL) { + /* Allocate locally */ + external_alloc = false; + } + + /* + * Here we would like to pass separately the sub-system ID + * and optionally the user pointer to be mapped, depending on + * where this open is called, and which virtual memory handles + * we see here. + */ + /* context = ia_css_syscom_open(get_virtual_memory_handle(vied_psys_ID), + * buffer, config); + */ + context = ia_css_syscom_open(config, NULL); + if (context == NULL) + goto EXIT; + + return context; + +EXIT: + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, "psys_context_create failed\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_create( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + return psys_context_create(buffer, config); +} + +/* push context information to DMEM for FW to access */ +int ia_css_psys_context_store_dmem( + struct ia_css_syscom_context *context, + struct ia_css_syscom_config *config) +{ + return ia_css_syscom_store_dmem(context, config->ssid, config->vtl0_addr_mask); +} +#endif + +/* Internal function to start psys server */ +static int psys_start_server( + struct ia_css_syscom_config *config) +{ + ia_css_psys_server_init_t *server_config; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_start_server(): enter:\n"); + + /* Configure SPC icache prefetching and start SPC */ + server_config = (ia_css_psys_server_init_t *)config->specific_addr; + IA_CSS_TRACE_1(PSYSAPI_DEVICE, INFO, "SPC prefetch: %d\n", + server_config->icache_prefetch_sp); + ia_css_cell_start_prefetch(config->ssid, SPC0, + server_config->icache_prefetch_sp); + return 0; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +int ia_css_psys_open( + struct ia_css_syscom_config *config) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + return psys_start_server(config); +} +#else +struct ia_css_syscom_context *ia_css_psys_open( + const struct ia_css_psys_buffer_s *buffer, + struct ia_css_syscom_config *config) +{ + struct ia_css_syscom_context *context; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "ia_css_psys_open(): enter:\n"); + + context = psys_context_create(buffer, config); + + /* Configure SPC icache prefetching and start SPC */ + psys_start_server(config); + + return context; +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +bool ia_css_psys_open_is_ready( + struct ia_css_syscom_context *context) +{ + int retval = -1; + bool ready = 0; + unsigned int i; + int syscom_retval; + + verifexit(context != NULL); + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_send_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + syscom_retval = ia_css_syscom_recv_port_open(context, i); + if (syscom_retval != 0) { + if (syscom_retval == FW_ERROR_BUSY) { + /* Do not print error */ + retval = 0; + } + /* Not ready yet */ + goto EXIT; + } + } + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_open_is_ready(): complete:\n"); + + /* If this point reached, do not print error */ + retval = 0; + /* If this point reached, ready */ + ready = 1; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_open_is_ready failed\n"); + } + return ready; +} + +/* Internal function to close syscom_context */ +static struct ia_css_syscom_context *psys_context_destroy( + struct ia_css_syscom_context *context) +{ + /* Success: return NULL, Error: return context pointer value + * Intention is to change return type to int (errno), + * see commented values. + */ + + unsigned int i; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, "psys_context_destroy(): enter:\n"); + + /* NULL pointer check disabled, since there is no proper return value */ + + for (i = 0; i < IA_CSS_N_PSYS_CMD_QUEUE_ID; i++) { + if (ia_css_syscom_send_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + for (i = 0; i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + if (ia_css_syscom_recv_port_close(context, i) != 0) + return context; /* EINVAL */ + } + + /* request device close */ + if (ia_css_syscom_close(context) != 0) + return context; /* EBUSY */ + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "psys_context_destroy(): leave: OK\n"); + return NULL; +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +struct ia_css_syscom_context *ia_css_psys_context_destroy( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} + +int ia_css_psys_close() +{ + /* Intentionally left blank for now since syscom objects should have + * been destroyed already by prior ia_css_psys_context_destroy() calls. + */ + return 0; +} +#else +struct ia_css_syscom_context *ia_css_psys_close( + struct ia_css_syscom_context *context) +{ + return psys_context_destroy(context); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +int ia_css_psys_release( + struct ia_css_syscom_context *context, + bool force) +{ + if (context == NULL) + return -EFAULT; + + /* try to free resources */ + if (ia_css_syscom_release(context, force) != 0) + return -EBUSY; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, INFO, + "ia_css_psys_release(): leave: OK\n"); + return 0; +} + +ia_css_psys_state_t ia_css_psys_check_state( + struct ia_css_syscom_context *context) +{ + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_check_state(): enter:\n"); + + NOT_USED(context); + + /* For the time being, return the READY state to be used by SPC test */ + return IA_CSS_PSYS_STATE_READY; +} + +bool ia_css_is_psys_cmd_queue_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_full = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_full = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_full failed\n"); + } + return is_full; +} + +bool ia_css_is_psys_cmd_queue_not_full( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + bool is_not_full = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_cmd_queue_not_full(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_full = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_cmd_queue_not_full failed\n"); + } + return is_not_full; +} + +bool ia_css_has_psys_cmd_queue_N_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const unsigned int N) +{ + bool has_N_space = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_cmd_queue_N_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_space = ((unsigned int)num_tokens >= N); +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_cmd_queue_N_space failed\n"); + } + return has_N_space; +} + +int ia_css_psys_cmd_queue_get_available_space( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + int N_space = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_get_available_space(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_send_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_space = (int)(num_tokens); +EXIT: + if (N_space < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_get_available_space failed\n"); + } + return N_space; +} + +bool ia_css_any_psys_event_queue_not_empty( + struct ia_css_syscom_context *context) +{ + ia_css_psys_event_queue_ID_t i; + bool any_msg = false; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_any_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + for (i = (ia_css_psys_event_queue_ID_t)0; + i < IA_CSS_N_PSYS_EVENT_QUEUE_ID; i++) { + any_msg = + any_msg || ia_css_is_psys_event_queue_not_empty(context, i); + } + +EXIT: + return any_msg; +} + +bool ia_css_is_psys_event_queue_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, (unsigned int)id); + verifexit(num_tokens >= 0); + + is_empty = (num_tokens == 0); + retval = 0; +EXIT: + if (retval != 0) { + is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_empty failed\n"); + } + return is_empty; +} + +bool ia_css_is_psys_event_queue_not_empty( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + bool is_not_empty = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_is_psys_event_queue_not_empty(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + is_not_empty = (num_tokens != 0); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_is_psys_event_queue_not_empty failed\n"); + } + return is_not_empty; +} + +bool ia_css_has_psys_event_queue_N_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + const unsigned int N) +{ + bool has_N_msgs = false; + int num_tokens; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_has_psys_event_queue_N_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + has_N_msgs = ((unsigned int)num_tokens >= N); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_has_psys_event_queue_N_msgs failed\n"); + } + return has_N_msgs; +} + +int ia_css_psys_event_queue_get_available_msgs( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + int N_msgs = -1; + int num_tokens; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_get_available_msgs(): enter:\n"); + verifexit(context != NULL); + + num_tokens = ia_css_syscom_recv_port_available(context, + (unsigned int)id); + verifexit(num_tokens >= 0); + + N_msgs = (int)(num_tokens); +EXIT: + if (N_msgs < 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_get_available_msgs failed\n"); + } + return N_msgs; +} + +int ia_css_psys_cmd_queue_send( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send(): enter:\n"); + verifexit(context != NULL); + + verifexit(context != NULL); + /* The ~full check fails on receive queues */ + verifexit(ia_css_is_psys_cmd_queue_not_full(context, id)); + verifexit(cmd_msg_buffer != NULL); + + verifexit(ia_css_syscom_send_port_transfer(context, (unsigned int)id, + cmd_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send failed\n"); + } + return count; +} + +int ia_css_psys_cmd_queue_send_N( + struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id, + const void *cmd_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_cmd_s *cmd_msg_buffer_loc = + (struct ia_css_psys_cmd_s *)cmd_msg_buffer; + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_cmd_queue_send_N(): enter:\n"); + verifexit(context != NULL); + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_cmd_queue_send(context, id, + (void *)(&cmd_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_cmd_queue_send_N failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer) +{ + int count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive(): enter:\n"); + + verifexit(context != NULL); + /* The ~empty check fails on send queues */ + verifexit(ia_css_is_psys_event_queue_not_empty(context, id)); + verifexit(event_msg_buffer != NULL); + + verifexit(ia_css_syscom_recv_port_transfer(context, (unsigned int)id, + event_msg_buffer) >= 0); + + count = 1; +EXIT: + if (count == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive failed\n"); + } + return count; +} + +int ia_css_psys_event_queue_receive_N( + struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id, + void *event_msg_buffer, + const unsigned int N) +{ + struct ia_css_psys_event_s *event_msg_buffer_loc; + int count; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_event_queue_receive_N(): enter:\n"); + + event_msg_buffer_loc = (struct ia_css_psys_event_s *)event_msg_buffer; + + for (count = 0; count < (int)N; count++) { + int count_loc = ia_css_psys_event_queue_receive(context, id, + (void *)(&event_msg_buffer_loc[count])); + + verifexit(count_loc == 1); + } + +EXIT: + if ((unsigned int) count < N) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_event_queue_receive_N failed\n"); + } + return count; +} + +size_t ia_css_psys_get_size( + const struct ia_css_syscom_context *context) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_size failed\n"); + } + return size; +} + +unsigned int ia_css_psys_get_cmd_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_CMD_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_count failed\n"); + } + return count; +} + +unsigned int ia_css_psys_get_event_queue_count( + const struct ia_css_syscom_context *context) +{ + unsigned int count = 0; + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_count(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + count = (unsigned int)IA_CSS_N_PSYS_EVENT_QUEUE_ID; + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_count failed\n"); + } + return count; +} + +size_t ia_css_psys_get_cmd_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_cmd_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_event_queue_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t queue_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_queue_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + queue_size = ia_css_psys_event_queue_cfg[id].queue_size; +EXIT: + if (queue_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_event_queue_size failed\n"); + } + return queue_size; +} + +size_t ia_css_psys_get_cmd_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_cmd_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_cmd_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_cmd_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} + +size_t ia_css_psys_get_event_msg_size( + const struct ia_css_syscom_context *context, + ia_css_psys_event_queue_ID_t id) +{ + size_t msg_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DEVICE, VERBOSE, + "ia_css_psys_get_event_msg_size(): enter:\n"); + + verifexit(context != NULL); + /* How can I query the context ? */ + NOT_USED(context); + msg_size = ia_css_psys_event_queue_cfg[id].token_size; +EXIT: + if (msg_size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DEVICE, ERROR, + "ia_css_psys_get_cmd_msg_size failed\n"); + } + return msg_size; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h new file mode 100644 index 000000000000..392b4359353f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_buffer_set.h @@ -0,0 +1,174 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_H +#define __IA_CSS_PSYS_BUFFER_SET_H + +#include "ia_css_base_types.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_types.h" +#include "ia_css_terminal_types.h" + +#define N_UINT64_IN_BUFFER_SET_STRUCT 1 +#define N_UINT16_IN_BUFFER_SET_STRUCT 1 +#define N_UINT8_IN_BUFFER_SET_STRUCT 1 +#define N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 5 +#define SIZE_OF_BUFFER_SET \ + (N_UINT64_IN_BUFFER_SET_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT16_IN_BUFFER_SET_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_BUFFER_SET_STRUCT * IA_CSS_UINT8_T_BITS) + +typedef struct ia_css_buffer_set_s ia_css_buffer_set_t; + +struct ia_css_buffer_set_s { + /* Token for user context reference */ + uint64_t token; + /* IPU virtual address of this buffer set */ + vied_vaddress_t ipu_virtual_address; + /* IPU virtual address of the process group corresponding to this buffer set */ + vied_vaddress_t process_group_handle; + /* Number of terminal buffer addresses in this structure */ + uint16_t terminal_count; + /* Frame id to associate with this buffer set */ + uint8_t frame_counter; + /* Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_BUFFER_SET_STRUCT]; +}; + + +/*! Construct a buffer set object at specified location + + @param buffer_set_mem[in] memory location to create buffer set object + @param process_group[in] process group corresponding to this buffer set + @param frame_counter[in] frame number for this buffer set object + + @return pointer to buffer set object on success, NULL on error + */ +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter); + +/*! Compute size (in bytes) required for full buffer set object + + @param process_group[in] process group corresponding to this buffer set + + @return size in bytes of buffer set object on success, 0 on error + */ +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group); + +/*! Set a buffer address in a buffer set object + + @param buffer_set[in] buffer set object to set buffer in + @param terminal_index[in] terminal index to use as a reference between + buffer and terminal + @param buffer[in] buffer address to store + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer); + +/*! Get virtual buffer address from a buffer set object and terminal object by + resolving the index used + + @param buffer_set[in] buffer set object to get buffer from + @param terminal[in] terminal object to get buffer of + + @return virtual buffer address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal); + +/*! Set ipu virtual address of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param ipu_vaddress[in] ipu virtual address of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress); + +/*! Get ipu virtual address from a buffer set object + + @param buffer_set[in] buffer set object to get ipu address from + + @return virtual buffer set address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set); + +/*! Set process group handle in a buffer set object + + @param buffer_set[in] buffer set object to set handle in + @param process_group_handle[in] process group handle of the buffer set + object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle); + +/*! Get process group handle from a buffer set object + + @param buffer_set[in] buffer set object to get handle from + + @return virtual process group address on success, VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set); + +/*! Set token of a buffer set object within the buffer set object + + @param buffer_set[in] buffer set object to set ipu address in + @param token[in] token of the buffer set object + + @return 0 on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token); + +/*! Get token from a buffer set object + + @param buffer_set[in] buffer set object to get token from + + @return token on success, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_BUFFER_SET_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h new file mode 100644 index 000000000000..9a1e3a7a1294 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h new file mode 100644 index 000000000000..e8a979dfce0b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_dynamic_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_DYNAMIC_TRACE_H +#define __IA_CSS_PSYS_DYNAMIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + #define PSYS_DYNAMIC_TRACE_LEVEL_CONFIG \ + PSYS_DYNAMIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_DYNAMIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_DYNAMIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_DYNAMIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_DYNAMIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_DYNAMIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_DYNAMIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h new file mode 100644 index 000000000000..f4ef80f74213 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.h @@ -0,0 +1,396 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_H +#define __IA_CSS_PSYS_PROCESS_H + +/*! \file */ + +/** @file ia_css_psys_process.h + * + * Define the methods on the process object that are not part of + * a single interface + */ + +#include +#include + +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Internal resources + */ +#include + +/* + * Process manager + */ +#include + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process[in] process object + @param cmd[in] command + + @return < 0 on invalid argument(s) or process state + */ +extern int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd); + +/*! Get the internal memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return internal memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id); + + +/*! Get the external memory offset of the process object + + @param process[in] process object + @param mem_id[in] memory id + + @return external memory offset, + IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the stored size of the process object + + @param process[in] process object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_get_size(const ia_css_process_t *process); + +/*! Get the (pointer to) the process group parent of the process object + + @param process[in] process object + + @return the pointer to the parent, NULL on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process); + +/*! Set the (pointer to) the process group parent of the process object + + @param process[in] process object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on invalid argument(s) + */ +extern int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent); + +/*! Get the unique ID of program used by the process object + + @param process[in] process object + + @return ID, 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process); + +/*! Get the state of the process object + + @param process[in] process object + + @return state, limit value (IA_CSS_N_PROCESS_STATES) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process); + +/*! Set the state of the process object + + @param process[in] process object + @param state[in] state of the process + + @return < 0 on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state); + +/*! Get the assigned cell of the the process object + + @param process[in] process object + + @return cell ID, limit value (VIED_NCI_N_CELL_ID) on invalid argument + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process); + +/*! Get the number of cells the process object depends on + + @param process[in] process object + + @return number of cells + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process); + +/*! Get the number of terminals the process object depends on + + @param process[in] process object + + @return number of terminals + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process); + +/*! Set n-th cell dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on invalid process argument + */ +extern int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th cell dependency of a process object + + @param process[in] Process object + @param cell_num[in] n-th cell + + @return n-th cell dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num); + +/*! Set n-th terminal dependency of a process object + + @param process[in] Process object + @param dep_index[in] dep index + @param id[in] dep id + + @return < 0 on on invalid argument(s) + */ +extern int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id); + +/*! Get n-th terminal dependency of a process object + + @param process[in] Process object + @param terminal_num[in] n-th cell + + @return n-th terminal dependency, + IA_CSS_PROCESS_INVALID_DEPENDENCY on invalid argument(s) +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num); + +/*! Get the kernel bitmap of the the process object + + @param process[in] process object + + @return process kernel bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process); + + +/*! Get the cells bitmap of the the process object + + @param process[in] process object + + @return process cells bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process); + +/*! Sets the dfm device resource allocation bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] resource bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + + +/*! Sets the active dfm ports bitmap of + * the process object + + @param process[in] process object + @param dfm_dev_id[in] dfm device id + @param bitmap[in] active ports bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the dfm port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id dfm resource id + + @return bitmap of all DFM ports used by process, corresponding to the input dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + +/*! Get the dfm active port bitmap of the the process object + + @param process[in] process object + @param dfm_res_id[in] dfm resource id + + @return bitmap of all active DFM ports used by the process, corresponding to the input + dfm resource id + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id); + + +/*! Sets the cells bitmap of + * the process object + + @param process[in] process object + @param bitmap[in] bitmap + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get the device channel id-n resource allocation offset of the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the ext mem type-n resource id of the the process object + + @param process[in] process object + @param mem_type[in] mem type + + @return resource offset, IA_CSS_PROCESS_INVALID_OFFSET on invalid argument(s) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type); + + +/*! Sets the device channel id-n resource allocation offset of + * the process object + + @param process[in] process object + @param dev_chn_id[in] channel id + @param offset[in] resource offset + + @return < 0 on invalid argument(s) or process state + */ +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Boolean test if the process object type is valid + + @param process[in] process object + @param p_manifest[in] program manifest + + @return true if the process object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest); + +/*! Gets the program_idx from the process object + + @param process[in] process object + + @return program index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h new file mode 100644 index 000000000000..cab796560414 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.kernel.h @@ -0,0 +1,144 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.kernel.h + * + * Define the methods on the process object: Hsys kernel interface + */ + +#include + +#include + +/* + * Internal resources + */ + +/*! Clear all resource (offset) specifications + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_all(ia_css_process_t *process); + +/*! Set the cell ID resource specification + + @param process[in] process object + @param cell_id[in] cell ID + + @return < 0 on error + */ +extern int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id); + +/*! Clear cell ID resource specification + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_clear_cell(ia_css_process_t *process); + +/*! Set the memory resource (offset) specification for a memory + that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that belongs to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_type_id[in] mem type ID + @param offset[in] offset + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset); + +/*! Clear the memory resource (offset) specification for a memory + type that does not belong to the cell that is assigned to the process + + @param process[in] process object + @param mem_id[in] mem ID + + Precondition: The cell ID must be set + + @return < 0 on error + */ +extern int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + @param offset[in] offset + + @return < 0 on error + */ +extern int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset); + +/*! Clear a device channel resource (offset) specification + + @param process[in] process object + @param dev_chn_id[in] device channel ID + + @return < 0 on error + */ +extern int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h new file mode 100644 index 000000000000..015a60b0e1af --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.hsys.user.h @@ -0,0 +1,85 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process.hsys.user.h + * + * Define the methods on the process object: Hsys user interface + */ + +#include /* ia_css_program_param_t */ + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process object + + @param manifest[in] program manifest + @param param[in] program parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param); + +/*! Create the process object + + @param raw_mem[in] pre allocated memory + @param manifest[in] program manifest + @param param[in] program parameters + + @return NULL on error + */ +extern ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx); + +/*! Destroy (the storage of) the process object + + @param process[in] process object + + @return NULL + */ +extern ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process); + +/* + * Access functions + */ + +/*! Print the process object to file/stream + + @param process[in] process object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_process_print( + const ia_css_process_t *process, + void *fid); + +#endif /* __IA_CSS_PSYS_PROCESS_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h new file mode 100644 index 000000000000..ba1db574a438 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process.psys.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PSYS_H +#define __IA_CSS_PSYS_PROCESS_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process.psys.h + * + * Define the methods on the process object: Psys embedded interface + */ + +#include + +/* + * Process manager + */ + +/*! Acquire the resources specificed in process object + + @param process[in] process object + + Postcondition: This is a try process if any of the + resources is not available, all succesfully acquired + ones will be release and the function will return an + error + + @return < 0 on error + */ +extern int ia_css_process_acquire(ia_css_process_t *process); + +/*! Release the resources specificed in process object + + @param process[in] process object + + @return < 0 on error + */ +extern int ia_css_process_release(ia_css_process_t *process); + + +#endif /* __IA_CSS_PSYS_PROCESS_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h new file mode 100644 index 000000000000..c0f6901adeb0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.h @@ -0,0 +1,366 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_H +#define __IA_CSS_PSYS_PROCESS_GROUP_H + +/*! \file */ + +/** @file ia_css_psys_process_group.h + * + * Define the methods on the process object that are not part of + * a single interface + */ +#include "ia_css_rbm.h" + +#include +#include + +#include /* uint8_t */ + +/* + * Creation + */ +#include + +/* + * Registration of user contexts / callback info + * External resources + * Sequencing resources + */ +#include + +/* + * Dispatcher + */ +#include + +/* + * Access to sub-structure handles / fields + */ + +#include "ia_css_terminal.h" + +/*! Get the number of fragments on the process group + + @param process_group[in] process group object + + Note: Future change is to have a fragment count per + independent subgraph + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group); + + +/*! Get the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state); + +/*! Set the fragment state on the process group + + @param process_group[in] process group object + @param fragment_state[in] current fragment of processing + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state); + +/*! Get the number of processes on the process group + + @param process_group[in] process group object + + @return the process count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group); + +/*! Get the number of terminals on the process group + + @param process_group[in] process group object + + Note: Future change is to have a terminal count per + independent subgraph + + @return the terminal count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group); + +/*! Get the PG load start timestamp + + @param process_group[in] process group object + + @return PG load start timestamp, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group); + +/*! Get the PG load time in cycles + + @param process_group[in] process group object + + @return PG load time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG init time in cycles + + @param process_group[in] process group object + + @return PG init time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the PG processing time in cycles + + @param process_group[in] process group object + + @return PG processing time in cycles, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group); + +/*! Get the (pointer to) the terminal of the process group object + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type); + +/*! Get the (pointer to) the terminal of the process group object + * for terminals which have only a single instance + * (cached in, cached out, program, program_ctrl_init) + + @param process_group[in] process group object + @param terminal_type[in] terminal type of terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type); + +/*! Get the (pointer to) the indexed terminal of the process group object + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + @return the pointer to the terminal, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Get the (pointer to) the indexed process of the process group object + + @param process_group[in] process group object + @param process_index[in] index of the process + + @return the pointer to the process, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_group, + const unsigned int process_index); + +/*! Get the stored size of the process group object + + @param process_group[in] process group object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group); + +/*! Get the state of the the process group object + + @param process_group[in] process group object + + @return state, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group); + +/*! Get the unique ID of program group used by the process group object + + @param process_group[in] process group object + + @return ID, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group); + +/*! Get the resource bitmap of the process group + + @param process_group[in] process group object + + @return the reource bitmap + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the resource bitmap of the process group + + @param process_group[in] process group object + @param resource_bitmap[in] the resource bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap); + +/*! Get the routing bitmap of the process group + + @param process_group[in] process group object + + @return routing bitmap (pointer) + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group); + +/*! Set the routing bitmap of the process group + + @param process_group[in] process group object + @param rbm[in] routing bitmap + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm); + +/*! Get IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in/out] process group ipu virtual address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress); + +/*! Set IPU virtual address of process group + + @param process_group[in] process group object + @param ipu_vaddress[in] process group ipu address + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress); + +/*! Get protocol version used by a process group + + @param process_group[in] process group object + + @return invalid protocol version on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group); + +/*! Get base queue id used by a process group + + @param process_group[in] process group object + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group); + +/*! Set base queue id used by a process group + + @param process_group[in] process group object + @param queue_id[in] process group queue id + + @return invalid queue id on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id); + +/*! Get number of queues used by a process group + + @param process_group[in] process group object + + @return invalid number of queues (0) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group); + +/*! Set number of queues used by a process group + + @param process_group[in] process group object + @param num_queues[in] process group number of queues + + @return -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues); + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group); + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h new file mode 100644 index 000000000000..93cce2555de9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.kernel.h @@ -0,0 +1,324 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.kernel.h + * + * Define the methods on the process group object: Hsys kernel interface + */ + +#include + +#include +#include + +#include /* uint8_t */ + +/* + * Registration of user contexts / callback info + */ + +/*! Get the user (callback) token as registered in the process group + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group); + +/*! Set (register) a user (callback) token in the process group + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is + returned in each return message related to the process + group the token is registered with. + + @return < 0 on error + */ +extern int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +/* + * Passing of a (fragment) watermark + */ + +/*! Get the fragment progress limit of the process group + + @param process_group[in] process group object + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group); + +/*! Set the new fragment progress limit of the process group + + @param process_group[in] process group object + @param fragment_limit[in] New limit value + + Note: The limit value must be less or equal to the fragment + count value. The process group will not make progress beyond + the limit value. The limit value can be modified asynchronously + If the limit value is reached before an update happens, the + process group will suspend and will not automatically resume. + + The limit is monotonically increasing. The default value is + equal to the fragment count + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit); + +/*! Clear the fragment progress limit of the process group + + @param process_group[in] process group object + + Note: This function sets the fragment limit to zero. + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group); + +/* + * Commands + */ + +/*! Perform the start command on the process group + + @param process_group[in] process group object + + Note: Start is an action of the l-Scheduler it makes the + process group eligible for execution + + Precondition: The external resources that are attached to + the process group must be in the correct state, i.e. input + buffers are not-empty and output buffers not-full + + @return < 0 on error + */ +extern int ia_css_process_group_start( + ia_css_process_group_t *process_group); + +/*! Perform the suspend command on the process group + + @param process_group[in] process group object + + Note: Suspend indicates that the process group execution + is halted at the next fragment boundary. The process group + will not automatically resume + + Precondition: The process group must be running + + @return < 0 on error + */ +extern int ia_css_process_group_suspend( + ia_css_process_group_t *process_group); + +/*! Perform the resume command on the process group + + @param process_group[in] process group object + + Note: Resume indicates that the process group is again + eligible for execution + + Precondition: The process group must be started + + @return < 0 on error + */ +extern int ia_css_process_group_resume( + ia_css_process_group_t *process_group); + +/*! Perform the reset command on the process group + + @param process_group[in] process group object + + Note: Return the process group to the started state + + Precondition: The process group must be running or stopped + + @return < 0 on error + */ +extern int ia_css_process_group_reset( + ia_css_process_group_t *process_group); + +/*! Perform the abort command on the process group + + @param process_group[in] process group object + + Note: Force the process group to the stopped state + + Precondition: The process group must be running or started + + @return < 0 on error + */ +extern int ia_css_process_group_abort( + ia_css_process_group_t *process_group); + +/*! Release ownership of the process group + + @param process_group[in] process group object + + Note: Release notifies PSYS and hands over ownership of the + process group from SW to FW + + Precondition: The process group must be in the started state + + @return < 0 on error + */ +extern int ia_css_process_group_disown( + ia_css_process_group_t *process_group); + +/* + * External resources + */ + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param buffer[in] buffer handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The buffer handle shall not be VIED_NULL, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The buffer can be in memory or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the data buffer on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The buffer handle shall be reset to VIED_NULL, the buffer + state to BUFFER_NULL + + @return VIED_NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/*! Set (register) a data buffer to the indexed terminal in the process group + + @param process_group[in] process group object + @param stream[in] stream handle + @param buffer_state[in] state of the buffer + @param terminal_index[in] index of the terminal + + Note: The stream handle shall not be zero, the buffer + state can be undefined; BUFFER_UNDEFINED + + Note: The stream is used exclusive to a buffer; the latter can be in memory + or streaming over memory + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index); + +/*! Get (unregister) the stream handle on the indexed terminal of + * the process group + + @param process_group[in] process group object + @param terminal_index[in] index of the terminal + + Precondition: The process group must be stopped + + Postcondition: The stream handle shall be reset to zero, the buffer + state to BUFFER_NULL + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index); + +/* + * Sequencing resources + */ + +/*! Set a(n artificial) blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Note: The barriers have to be set to force sequence between started + process groups + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Clear a previously set blocking resource (barrier) in + * the process group resource map + + @param process_group[in] process group object + @param barrier_index[in] index of the barrier + + Precondition: The barriers must have been set + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index); + +/*! Boolean test if the process group preconditions for start are satisfied + + @param process_group[in] process group object + + @return true if the process group can be started + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h new file mode 100644 index 000000000000..dfbcc8815c1e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.hsys.user.h @@ -0,0 +1,199 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H +#define __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_process_group.hsys.user.h + * + * Define the methods on the process group object: Hsys user interface + */ + +#include /* ia_css_program_group_param_t */ + +#include +#include +#include + +#include "ia_css_psys_dynamic_storage_class.h" + +#include /* uint8_t */ + +/* + * Creation + */ + +/*! Compute the size of storage required for allocating the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create (the storage for) the process group object + + @param process_grp_mem[in/out] raw memory for process group + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return NULL on error + */ +extern ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Destroy (the storage of) the process group object + + @param process_group[in] process group object + + @return NULL + */ +extern ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group); + +/*! Print the process group object to file/stream + + @param process_group[in] process group object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid); + +/* + * Commands + */ + +/*! Perform the submit command on the process group + + @param process_group[in] process group object + + Note: Submit is an action of the h-Scheduler it makes the + process group eligible for the l-Scheduler + + Precondition: The external resources must be attached to + the process group + + @return < 0 on error + */ +extern int ia_css_process_group_submit( + ia_css_process_group_t *process_group); + +/*! Boolean test if the process group object type is valid + + @param process_group[in] process group object + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return true if the process group is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Boolean test if the process group preconditions for submit are satisfied + + @param process_group[in] process group object + + @return true if the process group can be submitted + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group); + +/*! Boolean test if the preconditions on process group and buffer set are + satisfied for enqueuing buffer set + + @param process_group[in] process group object + @param buffer_set[in] buffer set object + + @return true if the buffer set can be enqueued + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set); + +/*! Compute the cyclecount required for executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of processes required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Compute the number of terminals required for + * executing the process group object + + @param manifest[in] program group manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get private token as registered in the process group by the implementation + + @param process_group[in] process group object + + @return 0 on error + */ +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group); + +/*! Set private token in the process group as needed by the implementation + + @param process_group[in] process group object + @param token[in] user token + + Note: The token value shall be non-zero. This token is private + to the implementation. This is in addition to the user token + + @return < 0 on error, 0 on success + */ +extern int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token); + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h new file mode 100644 index 000000000000..6ceccfc2f9bc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group.psys.h @@ -0,0 +1,60 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H +#define __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H + +/*! \file */ + +/** @file ia_css_psys_process_group.psys.h + * + * Define the methods on the process group object: Psys embedded interface + */ + +#include + +/* + * Dispatcher + */ + +/*! Perform the run command on the process group + + @param process_group[in] process group object + + Note: Run indicates that the process group will execute + + Precondition: The process group must be started or + suspended and the processes have acquired the necessary + internal resources + + @return < 0 on error + */ +extern int ia_css_process_group_run( + ia_css_process_group_t *process_group); + +/*! Perform the stop command on the process group + + @param process_group[in] process group object + + Note: Stop indicates that the process group has completed execution + + Postcondition: The external resoruces can now be detached + + @return < 0 on error + */ +extern int ia_css_process_group_stop( + ia_css_process_group_t *process_group); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_PSYS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h new file mode 100644 index 000000000000..530f93ef6ce0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_group_cmd_impl.h @@ -0,0 +1,178 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H + +#include "type_support.h" +#include "ia_css_psys_process_group.h" +#include "ia_css_rbm_manifest_types.h" + +#define N_UINT64_IN_PROCESS_GROUP_STRUCT 2 +#define N_UINT32_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT16_IN_PROCESS_GROUP_STRUCT 5 +#define N_UINT8_IN_PROCESS_GROUP_STRUCT 7 +#define N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 3 + +#define SIZE_OF_PROCESS_GROUP_STRUCT_BITS \ + (IA_CSS_RBM_BITS \ + + N_UINT64_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT64_T_BITS \ + + N_UINT32_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT32_T_BITS \ + + IA_CSS_PROGRAM_GROUP_ID_BITS \ + + IA_CSS_PROCESS_GROUP_STATE_BITS \ + + VIED_VADDRESS_BITS \ + + VIED_NCI_RESOURCE_BITMAP_BITS \ + + N_UINT16_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_process_group_s { + /**< User (callback) token / user context reference, + * zero is an error value + */ + uint64_t token; + /**< private token / context reference, zero is an error value */ + uint64_t private_token; + /**< PG routing bitmap used to set connection between programs >*/ + ia_css_rbm_t routing_bitmap; + /**< Size of this structure */ + uint32_t size; + /**< The timestamp when PG load starts */ + uint32_t pg_load_start_ts; + /**< PG load time in cycles */ + uint32_t pg_load_cycles; + /**< PG init time in cycles */ + uint32_t pg_init_cycles; + /**< PG processing time in cycles */ + uint32_t pg_processing_cycles; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + /**< State of the process group FSM */ + ia_css_process_group_state_t state; + /**< Virtual address of process group in IPU */ + vied_vaddress_t ipu_virtual_address; + /**< Bitmap of the compute resources used by the process group */ + vied_nci_resource_bitmap_t resource_bitmap; + /**< Number of fragments offered on each terminal */ + uint16_t fragment_count; + /**< Current fragment of processing */ + uint16_t fragment_state; + /**< Watermark to control fragment processing */ + uint16_t fragment_limit; + /**< Array[process_count] of process addresses in this process group */ + uint16_t processes_offset; + /**< Array[terminal_count] of terminal addresses on this process group */ + uint16_t terminals_offset; + /**< Parameter dependent number of processes in this process group */ + uint8_t process_count; + /**< Parameter dependent number of terminals on this process group */ + uint8_t terminal_count; + /**< Parameter dependent number of independent subgraphs in + * this process group + */ + uint8_t subgraph_count; + /**< Process group protocol version */ + uint8_t protocol_version; + /**< Dedicated base queue id used for enqueueing payload buffer sets */ + uint8_t base_queue_id; + /**< Number of dedicated queues used */ + uint8_t num_queues; + /**< Mask the send_pg_done IRQ */ + uint8_t mask_irq; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT]; +}; + +/*! Callback after process group is created. Implementations can provide + * suitable actions needed when process group is created. + + @param process_group[in] process group object + @param program_group_manifest[in] program group manifest + @param program_group_param[in] program group parameters + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_create( + ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *program_group_manifest, + const ia_css_program_group_param_t *program_group_param); + +/*! Callback before process group is about to be destoyed. Any implementation + * specific cleanups can be done here. + + @param process_group[in] process group object + + @return 0 on success and non-zero on failure + */ +extern int ia_css_process_group_on_destroy( + ia_css_process_group_t *process_group); + +/* + * Command processor + */ + +/*! Execute a command locally or send it to be processed remotely + + @param process_group[in] process group object + @param cmd[in] command + + @return < 0 on error + */ +extern int ia_css_process_group_exec_cmd( + ia_css_process_group_t *process_group, + const ia_css_process_group_cmd_t cmd); + + +/*! Enqueue a buffer set corresponding to a persistent program group by + * sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] buffer set + @param queue_offset[in] offset to be used from the queue id + specified in the process group object + (0 for first buffer set for frame, 1 + for late binding) + + @return < 0 on error + */ +extern int ia_css_enqueue_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set, + unsigned int queue_offset); + +/*! Enqueue a parameter buffer set corresponding to a persistent program + * group by sending a command to subsystem. + + @param process_group[in] process group object + @param buffer_set[in] parameter buffer set + + @return < 0 on error + */ +extern int ia_css_enqueue_param_buffer_set( + ia_css_process_group_t *process_group, + ia_css_buffer_set_t *buffer_set); + +/*! Need to store the 'secure' mode for each PG for FW test app only + * + * @param process_group[in] process group object + * @param secure[in] parameter buffer set + * + * @return < 0 on error + */ +extern int ia_css_process_group_store( + ia_css_process_group_t *process_group, + bool secure); + + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_CMD_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h new file mode 100644 index 000000000000..b424fb9631fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_process_types.h @@ -0,0 +1,98 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_TYPES_H +#define __IA_CSS_PSYS_PROCESS_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_process_types.h + * + * The types belonging to the terminal/process/process group dynamic module + */ + +#include +#include + +#include + +#define IA_CSS_PROCESS_INVALID_PROGRAM_IDX ((uint32_t)-1) + +/* private */ +typedef enum ia_css_process_group_cmd { + IA_CSS_PROCESS_GROUP_CMD_NOP = 0, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT, + IA_CSS_PROCESS_GROUP_CMD_ATTACH, + IA_CSS_PROCESS_GROUP_CMD_DETACH, + IA_CSS_PROCESS_GROUP_CMD_START, + IA_CSS_PROCESS_GROUP_CMD_DISOWN, + IA_CSS_PROCESS_GROUP_CMD_RUN, + IA_CSS_PROCESS_GROUP_CMD_STOP, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND, + IA_CSS_PROCESS_GROUP_CMD_RESUME, + IA_CSS_PROCESS_GROUP_CMD_ABORT, + IA_CSS_PROCESS_GROUP_CMD_RESET, + IA_CSS_N_PROCESS_GROUP_CMDS +} ia_css_process_group_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_GROUP_STATE_BITS 32 +typedef enum ia_css_process_group_state { + IA_CSS_PROCESS_GROUP_ERROR = 0, + IA_CSS_PROCESS_GROUP_CREATED, + IA_CSS_PROCESS_GROUP_READY, + IA_CSS_PROCESS_GROUP_BLOCKED, + IA_CSS_PROCESS_GROUP_STARTED, + IA_CSS_PROCESS_GROUP_RUNNING, + IA_CSS_PROCESS_GROUP_STALLED, + IA_CSS_PROCESS_GROUP_STOPPED, + IA_CSS_N_PROCESS_GROUP_STATES +} ia_css_process_group_state_t; + +/* private */ +typedef enum ia_css_process_cmd { + IA_CSS_PROCESS_CMD_NOP = 0, + IA_CSS_PROCESS_CMD_ACQUIRE, + IA_CSS_PROCESS_CMD_RELEASE, + IA_CSS_PROCESS_CMD_START, + IA_CSS_PROCESS_CMD_LOAD, + IA_CSS_PROCESS_CMD_STOP, + IA_CSS_PROCESS_CMD_SUSPEND, + IA_CSS_PROCESS_CMD_RESUME, + IA_CSS_N_PROCESS_CMDS +} ia_css_process_cmd_t; + +/* private */ +#define IA_CSS_PROCESS_STATE_BITS 32 +typedef enum ia_css_process_state { + IA_CSS_PROCESS_ERROR = 0, + IA_CSS_PROCESS_CREATED, + IA_CSS_PROCESS_READY, + IA_CSS_PROCESS_STARTED, + IA_CSS_PROCESS_RUNNING, + IA_CSS_PROCESS_STOPPED, + IA_CSS_PROCESS_SUSPENDED, + IA_CSS_N_PROCESS_STATES +} ia_css_process_state_t; + +/* public */ +typedef struct ia_css_process_group_s ia_css_process_group_t; +typedef struct ia_css_process_s ia_css_process_t; + +typedef struct ia_css_data_terminal_s ia_css_data_terminal_t; + +#endif /* __IA_CSS_PSYS_PROCESS_TYPES_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h new file mode 100644 index 000000000000..abf398299d16 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.h @@ -0,0 +1,316 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_H +#define __IA_CSS_PSYS_TERMINAL_H + +/*! \file */ + +/** @file ia_css_psys_terminal.h + * + * Define the methods on the terminal object that are not part of + * a single interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include /* FILE */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest_base_types.h" + +/* + * Creation + */ +#include + +/*! Boolean test if the terminal object type is input + + @param terminal[in] terminal object + + @return true if the terminal is input, false otherwise or on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal); + +/*! Get the stored size of the terminal object + + @param terminal[in] terminal object + + @return size, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal); + +/*! Get the type of the terminal object + + @param terminal[in] terminal object + + @return the type of the terminal, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal); + +/*! Set the type of the terminal object + + @param terminal[in] terminal object + @param terminal_type[in] type of the terminal + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type); + +/*! Get the index of the terminal manifest object + + @param terminal[in] terminal object + + @return the index of the terminal manifest object, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal); + +/*! Set the index of the terminal manifest object + + @param terminal[in] terminal object + @param tm_index[in] terminal manifest index + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t tm_index); + +/*! Get id of the terminal object + + @param terminal[in] terminal object + + @return id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal); + +/*! Get kernel id of the data terminal object + + @param dterminal[in] data terminal object + + @return kernel id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal); + +/*! Get the connection type from the terminal object + + @param terminal[in] terminal object + + @return buffer type, limit value on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal); + +/*! Set the connection type of the terminal object + + @param terminal[in] terminal object + @param connection_type[in] connection type + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type); + +/*! Get link id of the data terminal object + + @param dterminal[in] data terminal object + + @return link id of terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal); + + +/*! Set link id of the terminal object + + @param terminal[in] data terminal object + @param link_id[in] synchronization link id + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id); + +/*! Get the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + + @return the pointer to the parent, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal); + +/*! Set the (pointer to) the process group parent of the terminal object + + @param terminal[in] terminal object + @param parent[in] (pointer to the) process group parent object + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent); + +/*! Boolean test if the terminal object type is valid + + @param terminal[in] process terminal object + @param terminal_manifest[in] program terminal manifest + + @return true if the process terminal object is correct, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest); + +/* ================= Program Control Init Terminal - START ================= */ + +/*! + * Gets the program init terminal descripor size + * @param manifest[in] program control init terminal manifest + * @return size, error if < 0. + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Initialize program control init terminal + * @param nof_fragments[in] Number of fragments + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest); + +/*! + * Get a program desc for a program control init terminal + * @param terminal[in] program control init terminal + * @param manifest[in] program control init terminal manifest + * @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index +); + +/*! + * Pretty prints the program control init termnial + * @param terminal[in] program control init terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal); + +/*! + * Gets a load section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param load_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index +); + +/*! + * Gets process_id from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Set control info of program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param process_id unique process id used to identify the process + * among all active process + * @param num_done_events number of events required to close the process + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events); + +/*! + * Gets num_done_events value from program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc); + +/*! + * Gets a connect section desc for a program desc + * of a program control init terminal + * @param program_desc[in] program control init terminal program desc + * @param connect_section_index[in] section index + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index +); + +/* ================= Program Control Init Terminal - END ================= */ + +#ifdef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h new file mode 100644 index 000000000000..b8aa08c19754 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/interface/ia_css_psys_terminal.hsys.user.h @@ -0,0 +1,255 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the terminal object: Hsys user interface + */ + +#include /* ia_css_frame_t */ +#include /* ia_css_program_group_param_t */ + +#include +#include + +#include /* bool */ +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_kernel_bitmap.h" + +/* + * Creation + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +/*! Compute the size of storage required for allocating the terminal object + + @param manifest[in] terminal manifest + @param param[in] program group parameters + + @return 0 on error + */ +extern size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Create the terminal object + + @param raw_mem[in] pre allocated memory + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + @param enable_bitmap program group enable bitmap + + @return NULL on error + */ +extern ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap); + +/*! Destroy (the storage of) the process object + + @param terminal[in] terminal object + + @return NULL + */ +extern ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal); +#endif /* !defined(__VIED_CELL) */ + +/*! Print the terminal object to file/stream + + @param terminal[in] terminal object + @param fid[out] file/stream handle + + @return < 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid); + +/*! Get the (pointer to) the frame object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *terminal); + +/*! Get the (pointer to) the frame descriptor object in the terminal object + + @param terminal[in] terminal object + + @return the pointer to the frame descriptor, NULL on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal); + +/*! Get the (pointer to) the fragment descriptor object in the terminal object + + @param terminal[in] terminal object + +@return the pointer to the fragment descriptor, NULL on error +*/ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +ia_css_fragment_descriptor_t + *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index); + +/*! Get the number of fragments on the terminal + + @param terminal[in] terminal object + + @return the fragment count, 0 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal); + +/*! Get the number of section on the (param)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the section count, 0 on error + */ +extern uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! Get the number of planes on the (data)terminal + @param manifest[in] terminal manifest + @param terminal_param[in] terminal parameter + + @return the plane count, 1(default) on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param); + +/*! check if given terminal is parameter terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program terminal. + + @program terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is program control init terminal. + + @program control init terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is spatial parameter terminal. + + @spatial terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal); + +/*! check if given terminal is data terminal. + + @param terminal[in] (base)terminal object + + @return true on success, false on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal); + +/*! obtain buffer out of terminal(both data & param terminals can call this) + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return vied address of buffer stored in terminal + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal); + +/*!store a buffer in the terminal. + + @param terminal[in] (base)terminal object of either data or param terminal. + @param buffer[in] buffer in vied (hrt address) space. + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_H +int ia_css_terminal_set_buffer(ia_css_terminal_t *terminal, + vied_vaddress_t buffer); + +/*! Obtain terminal buffer index out of terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + + @return terminal buffer index stored in terminal object on success, -1 on error + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal); + +/*! Store a terminal buffer index in the terminal object + + @param terminal[in] (base)terminal object of either data or param terminal. + @param terminal_index[in] terminal buffer index + + @return 0 on success + */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index); + +#endif /* __IA_CSS_PSYS_TERMINAL_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c new file mode 100644 index 000000000000..82d53831f9a9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set.c @@ -0,0 +1,111 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "assert_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_buffer_set.h" +#include "ia_css_psys_process_group.h" + +/* + * Functions to possibly inline + */ +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_buffer_set_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __buffer_set_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF(SIZE_OF_BUFFER_SET != + CHAR_BIT * sizeof(ia_css_buffer_set_t)); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_buffer_set_t) % sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* The below functions are not to be compiled for firmware */ +#if !defined(__HIVECC) + +ia_css_buffer_set_t *ia_css_buffer_set_create( + void *buffer_set_mem, + const ia_css_process_group_t *process_group, + const unsigned int frame_counter) +{ + ia_css_buffer_set_t *buffer_set = NULL; + unsigned int i; + int ret = -1; + + verifexit(buffer_set_mem != NULL); + verifexit(process_group != NULL); + + buffer_set = (ia_css_buffer_set_t *)buffer_set_mem; + + /* + * Set base struct members + */ + buffer_set->ipu_virtual_address = VIED_NULL; + ia_css_process_group_get_ipu_vaddress(process_group, + &buffer_set->process_group_handle); + buffer_set->frame_counter = frame_counter; + buffer_set->terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * Initialize adjacent buffer addresses + */ + for (i = 0; i < buffer_set->terminal_count; i++) { + vied_vaddress_t *buffer = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + sizeof(vied_vaddress_t) * i); + + *buffer = VIED_NULL; + } + ret = 0; + +EXIT: + if (ret != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_create failed\n"); + } + return buffer_set; +} + +size_t ia_css_sizeof_buffer_set( + const ia_css_process_group_t *process_group) +{ + size_t size = 0; + + verifexit(process_group != NULL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_buffer_set(): enter:\n"); + + size = sizeof(ia_css_buffer_set_t) + + ia_css_process_group_get_terminal_count(process_group) * + sizeof(vied_vaddress_t); + +EXIT: + if (size == 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_buffer_set failed\n"); + } + return size; +} + +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h new file mode 100644 index 000000000000..0399d76f3331 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_buffer_set_impl.h @@ -0,0 +1,241 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYS_BUFFER_SET_IMPL_H +#define __IA_CSS_PSYS_BUFFER_SET_IMPL_H + +#include "error_support.h" +#include "ia_css_psys_dynamic_trace.h" +#include "vied_nci_psys_system_global.h" +#include "ia_css_psys_terminal.hsys.user.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_buffer( + ia_css_buffer_set_t *buffer_set, + const unsigned int terminal_index, + const vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + vied_vaddress_t *buffer_ptr; + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_buffer(): enter:\n"); + + /* + * Set address in buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + *buffer_ptr = buffer; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_buffer: invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_buffer( + const ia_css_buffer_set_t *buffer_set, + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + vied_vaddress_t *buffer_ptr; + int terminal_index; + + verifexitval(buffer_set != NULL, EFAULT); + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_buffer(): enter:\n"); + + /* + * Retrieve terminal index from terminal object + */ + terminal_index = ia_css_terminal_get_terminal_index(terminal); + verifexitval(terminal_index >= 0, EFAULT); + verifexitval(terminal_index < buffer_set->terminal_count, EFAULT); + + /* + * Retrieve address from buffer set object + */ + buffer_ptr = + (vied_vaddress_t *)( + (char *)buffer_set + + sizeof(ia_css_buffer_set_t) + + terminal_index * sizeof(vied_vaddress_t)); + buffer = *buffer_ptr; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_buffer: invalid argument\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_ipu_address( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_ipu_address(): enter:\n"); + + buffer_set->ipu_virtual_address = ipu_vaddress; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_ipu_address invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_ipu_address( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t ipu_virtual_address = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_ipu_address(): enter:\n"); + + ipu_virtual_address = buffer_set->ipu_virtual_address; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_ipu_address: invalid argument\n"); + } + return ipu_virtual_address; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_process_group_handle( + ia_css_buffer_set_t *buffer_set, + const vied_vaddress_t process_group_handle) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_process_group_context(): enter:\n"); + + buffer_set->process_group_handle = process_group_handle; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_process_group_context invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_buffer_set_get_process_group_handle( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + vied_vaddress_t process_group_handle = VIED_NULL; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_process_group_handle(): enter:\n"); + + process_group_handle = buffer_set->process_group_handle; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_process_group_handle: invalid argument\n"); + } + return process_group_handle; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_buffer_set_set_token( + ia_css_buffer_set_t *buffer_set, + const uint64_t token) +{ + DECLARE_ERRVAL + int ret = -1; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_set_token(): enter:\n"); + + buffer_set->token = token; + + ret = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_set_token invalid argument\n"); + } + return ret; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint64_t ia_css_buffer_set_get_token( + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + uint64_t token = 0; + + verifexitval(buffer_set != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_buffer_set_get_token(): enter:\n"); + + token = buffer_set->token; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_buffer_set_get_token: invalid argument\n"); + } + return token; +} + +#endif /* __IA_CSS_PSYS_BUFFER_SET_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c new file mode 100644 index 000000000000..f9e060f62ead --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process.c @@ -0,0 +1,1148 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process.h" +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_process_private_types.h" +#include /* for NOT_USED */ + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __HIVECC marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_process( + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param) +{ + size_t size = 0, tmp_size; + + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process(): enter:\n"); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_t))); + + COMPILATION_ERROR_IF(0 != sizeof(ia_css_process_t)%sizeof(uint64_t)); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + size += sizeof(ia_css_process_t); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + tmp_size = program_dependency_count*sizeof(vied_nci_resource_id_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + tmp_size = terminal_dependency_count*sizeof(uint8_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process invalid argument\n"); + } + return size; +} + +ia_css_process_t *ia_css_process_create( + void *raw_mem, + const ia_css_program_manifest_t *manifest, + const ia_css_program_param_t *param, + const uint32_t program_idx) +{ + size_t tmp_size; + int retval = -1; + ia_css_process_t *process = NULL; + char *process_raw_ptr = (char *) raw_mem; + + /* size_t size = ia_css_sizeof_process(manifest, param); */ + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(process_raw_ptr != NULL); + + process = (ia_css_process_t *) process_raw_ptr; + verifexit(process != NULL); + + process->kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(manifest); + process->state = IA_CSS_PROCESS_CREATED; + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + /* A process requires at least one input or output */ + verifexit((program_dependency_count + + terminal_dependency_count) != 0); + + process_raw_ptr += sizeof(ia_css_process_t); + if (program_dependency_count != 0) { + process->cell_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + tmp_size = + program_dependency_count * sizeof(vied_nci_resource_id_t); + process_raw_ptr += + tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + } else { + process->cell_dependencies_offset = 0; + } + + if (terminal_dependency_count != 0) { + process->terminal_dependencies_offset = + (uint16_t) (process_raw_ptr - (char *)process); + } + + process->size = (uint32_t)ia_css_sizeof_process(manifest, param); + + process->ID = ia_css_program_manifest_get_program_ID(manifest); + verifexit(process->ID != 0); + process->program_idx = program_idx; + + process->cell_dependency_count = program_dependency_count; + process->terminal_dependency_count = terminal_dependency_count; + + process->parent_offset = 0; + + verifexit(ia_css_process_clear_all(process) == 0); + + process->state = IA_CSS_PROCESS_READY; + retval = 0; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_create(): Created successfully process %p ID 0x%x\n", + process, process->ID); + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_create failed (%i)\n", retval); + process = ia_css_process_destroy(process); + } + return process; +} + +ia_css_process_t *ia_css_process_destroy( + ia_css_process_t *process) +{ + + return process; +} +#endif + +int ia_css_process_set_cell( + ia_css_process_t *process, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + +/* Some programs are mapped on a fixed cell, + * when the process group is created + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + /* If the process group has already been created, but no VP cell + * has been assigned to this process (i.e. not fixed in + * manifest), then we need to set the cell of this process + * while its parent state is READY (the ready state is set at + * the end of ia_css_process_group_create) + */ + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* Some programs are mapped on a fixed cell, thus check is not secure, + * but it will detect a preset, the process manager will do the secure check + */ + verifexit(ia_css_process_get_cell(process) == + VIED_NCI_N_CELL_ID); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + ia_css_process_cells_set_cell(process, 0, cell_id); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_cell( + ia_css_process_t *process) +{ + int retval = -1; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_t *parent; + vied_nci_resource_bitmap_t resource_bitmap; + vied_nci_resource_bitmap_t bit_mask; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_cell(): enter:\n"); + verifexit(process != NULL); + + cell_id = ia_css_process_get_cell(process); + parent = ia_css_process_get_parent(process); + + verifexit(parent != NULL); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + bit_mask = vied_nci_cell_bit_mask(cell_id); + resource_bitmap = ia_css_process_group_get_resource_bitmap(parent); + + verifexit(bit_mask != 0); + verifexit(vied_nci_is_bitmap_set(bit_mask, resource_bitmap)); + + ia_css_process_cells_clear(process); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = ia_css_process_group_set_resource_bitmap( + parent, resource_bitmap); +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_cell invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_cell failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + if (vied_nci_is_cell_mem_of_type(cell_id, mem_type_id, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_type_id); + + process->int_mem_id[mem_type_id] = mem_id; + process->int_mem_offset[mem_type_id] = offset; + retval = 0; + } +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_int_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_int_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + uint16_t mem_index; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_int_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + +/* We could just clear the field, but lets check the state for + * consistency first + */ + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if (vied_nci_is_cell_mem_of_type( + cell_id, mem_index, mem_type_id)) { + vied_nci_mem_ID_t mem_id = + vied_nci_cell_get_mem(cell_id, mem_index); + int mem_of_type; + + mem_of_type = + vied_nci_is_mem_of_type(mem_id, mem_type_id); + + assert(mem_of_type); + assert((process->int_mem_id[mem_type_id] == mem_id) || + (process->int_mem_id[mem_type_id] == + VIED_NCI_N_MEM_ID)); + process->int_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_type_id] = + IA_CSS_PROCESS_INVALID_OFFSET; + retval = 0; + } + } + +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_int_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_int_mem failed (%i)\n", retval); + } +return retval; +} + +int ia_css_process_set_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_ID_t mem_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + vied_nci_cell_ID_t cell_id; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + vied_nci_mem_type_ID_t mem_type_id; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_ext_mem(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + cell_id = ia_css_process_get_cell(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + /* Check that the memory actually exists, "vied_nci_has_cell_mem_of_id()" + * will return false on error + */ + + mem_type_id = vied_nci_mem_get_type(mem_id); + if (((!vied_nci_has_cell_mem_of_id(cell_id, mem_id) && + (mem_type_id != VIED_NCI_PMEM_TYPE_ID)) + || vied_nci_mem_is_ext_type(mem_type_id)) && + (mem_id < VIED_NCI_N_MEM_ID)) { + + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + process->ext_mem_id[mem_type_id] = mem_id; + process->ext_mem_offset[mem_type_id] = offset; + retval = 0; + } + +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_ext_mem invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_ext_mem( + ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_ext_mem(): enter:\n"); + + verifexit(process != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + verifexit(parent != NULL); + verifexit(state == IA_CSS_PROCESS_READY); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + + process->ext_mem_id[mem_type_id] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_type_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_ext_mem invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_ext_mem failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cells_bitmap( + ia_css_process_t *process, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cells_bitmap(): enter:\n"); + + verifexit(process != NULL); + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_CREATED) || + (parent_state == IA_CSS_PROCESS_GROUP_READY))); + verifexit(state == IA_CSS_PROCESS_READY); + + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); + ia_css_process_cells_set_cell(process, + array_index, (vied_nci_cell_ID_t)bit_index); + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { + ia_css_process_cells_set_cell(process, + array_index, VIED_NCI_N_CELL_ID); + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_cells_bitmap invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_cells_bitmap failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t offset) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dev_chn(): enter:\n"); + + verifexit(process != NULL); + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + + process->dev_chn_offset[dev_chn_id] = offset; + + retval = 0; +EXIT: + if (NULL == process || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dev_chn invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dev_chn invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_port(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); + +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_dfm_port invalid argument\n"); + } + return retval; +} + +int ia_css_process_set_dfm_active_port_bitmap( + ia_css_process_t *process, + const vied_nci_dev_dfm_id_t dfm_dev_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + parent_state = ia_css_process_group_get_state(parent); + + /* TODO : separate process group start and run from + * process_group_exec_cmd() + */ + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) || + (parent_state == IA_CSS_PROCESS_GROUP_STARTED) || + (parent_state == IA_CSS_PROCESS_GROUP_RUNNING))); + verifexit(state == IA_CSS_PROCESS_READY); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_dev_id <= VIED_NCI_N_DEV_DFM_ID); + process->dfm_active_port_bitmap[dfm_dev_id] = bitmap; +#else + (void)bitmap; + (void)dfm_dev_id; +#endif + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_process_clear_dev_chn( + ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_dev_chn(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + state = ia_css_process_get_state(process); + + verifexit(((parent_state == IA_CSS_PROCESS_GROUP_BLOCKED) + || (parent_state == IA_CSS_PROCESS_GROUP_STARTED))); + verifexit(state == IA_CSS_PROCESS_READY); + + verifexit(dev_chn_id <= VIED_NCI_N_DEV_CHN_ID); + + process->dev_chn_offset[dev_chn_id] = IA_CSS_PROCESS_INVALID_OFFSET; + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_dev_chn invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_dev_chn failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_clear_all( + ia_css_process_t *process) +{ + int retval = -1; + ia_css_process_group_t *parent; + ia_css_process_group_state_t parent_state; + ia_css_process_state_t state; + int mem_index; + int dev_chn_index; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_clear_all(): enter:\n"); + + verifexit(process != NULL); + + parent = ia_css_process_get_parent(process); + state = ia_css_process_get_state(process); + + /* We should have a check on NULL != parent but it parent is NULL + * ia_css_process_group_get_state will return + * IA_CSS_N_PROCESS_GROUP_STATES so it will be filtered anyway later. + */ + + /* verifexit(parent != NULL); */ + + parent_state = ia_css_process_group_get_state(parent); + +/* Resource clear can only be called in excluded states contrary to set */ + verifexit((parent_state != IA_CSS_PROCESS_GROUP_RUNNING) || + (parent_state == IA_CSS_N_PROCESS_GROUP_STATES)); + verifexit((state == IA_CSS_PROCESS_CREATED) || + (state == IA_CSS_PROCESS_READY)); + + for (dev_chn_index = 0; dev_chn_index < VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + process->dev_chn_offset[dev_chn_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } +/* No difference whether a cell_id has been set or not, clear all */ + for (mem_index = 0; mem_index < VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + process->ext_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->ext_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + process->int_mem_id[mem_index] = VIED_NCI_N_MEM_ID; + process->int_mem_offset[mem_index] = + IA_CSS_PROCESS_INVALID_OFFSET; + } + + ia_css_process_cells_clear(process); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_clear_all invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_clear_all failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_acquire( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_acquire(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_acquire invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_acquire failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_release( + ia_css_process_t *process) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_release(): enter:\n"); + + verifexit(process != NULL); + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_t invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_release failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_print(const ia_css_process_t *process, void *fid) +{ + int retval = -1; + int i, dev_chn_index; + uint16_t mem_index; + uint8_t cell_dependency_count, terminal_dependency_count; + vied_nci_cell_ID_t cell_id = ia_css_process_get_cell(process); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_print(process %p): enter:\n", process); + + verifexit(process != NULL); + + IA_CSS_TRACE_6(PSYSAPI_DYNAMIC, INFO, + "\tprocess %p, sizeof %d, programID %d, state %d, parent %p, cell %d\n", + process, + (int)ia_css_process_get_size(process), + (int)ia_css_process_get_program_ID(process), + (int)ia_css_process_get_state(process), + (void *)ia_css_process_get_parent(process), + (int)ia_css_process_get_cell(process)); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->int_mem_id[mem_index]); + if (cell_id == VIED_NCI_N_CELL_ID) { + verifexit(mem_id == VIED_NCI_N_MEM_ID); + continue; + } + verifexit(((mem_id == vied_nci_cell_get_mem(cell_id, mem_index)) + || (mem_id == VIED_NCI_N_MEM_ID))); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tinternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->int_mem_offset[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + vied_nci_mem_ID_t mem_id = + (vied_nci_mem_ID_t)(process->ext_mem_id[mem_index]); + /* TODO: in case of an cells_bitmap = [], + * vied_nci_cell_get_mem_type will return a wrong result. + */ + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\texternal index %d, type %d, id %d offset 0x%x\n", + mem_index, + (int)vied_nci_cell_get_mem_type(cell_id, mem_index), + (int)mem_id, + process->ext_mem_offset[mem_index]); + NOT_USED(mem_id); + } + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tdevice channel index %d, type %d, offset 0x%x\n", + dev_chn_index, + (int)dev_chn_index, + process->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tdfm device index %d, type %d, bitmap 0x%x active_ports_bitmap 0x%x\n", + dev_chn_index, dev_chn_index, + process->dfm_port_bitmap[dev_chn_index], + process->dfm_active_port_bitmap[dev_chn_index]); + } +#endif + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tcells[%d] = 0x%x\n", + i, ia_css_process_cells_get_cell(process, i)); + } + + cell_dependency_count = + ia_css_process_get_cell_dependency_count(process); + if (cell_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {};\n", cell_dependency_count); + } else { + vied_nci_resource_id_t cell_dependency; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tcell_dependencies[%d] {", cell_dependency_count); + for (i = 0; i < (int)cell_dependency_count - 1; i++) { + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", cell_dependency); + } + cell_dependency = + ia_css_process_get_cell_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", cell_dependency); + (void)cell_dependency; + } + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t terminal_dependency; + + terminal_dependency_count = + ia_css_process_get_terminal_dependency_count(process); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tterminal_dependencies[%d] {", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d, ", terminal_dependency); + } + terminal_dependency = + ia_css_process_get_terminal_dependency(process, i); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "%4d}\n", terminal_dependency); + (void)terminal_dependency; + } + + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_print invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_parent( + ia_css_process_t *process, + ia_css_process_group_t *parent) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_parent(): enter:\n"); + + verifexit(process != NULL); + verifexit(parent != NULL); + + process->parent_offset = (uint16_t) ((char *)parent - (char *)process); + retval = 0; +EXIT: + if (NULL == process || NULL == parent) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_set_parent invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_parent failed (%i)\n", retval); + } + return retval; +} + +int ia_css_process_set_cell_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *process_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_cell_dependency(): enter:\n"); + verifexit(process != NULL); + + process_dep_ptr = + (uint8_t *)process + process->cell_dependencies_offset + + dep_index*sizeof(vied_nci_resource_id_t); + + + *process_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_set_terminal_dependency( + const ia_css_process_t *process, + const unsigned int dep_index, + const vied_nci_resource_id_t id) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_terminal_dependency(): enter:\n"); + verifexit(process != NULL); + verifexit(ia_css_process_get_terminal_dependency_count(process) > dep_index); + + terminal_dep_ptr = + (uint8_t *)process + process->terminal_dependencies_offset + + dep_index*sizeof(uint8_t); + + *terminal_dep_ptr = id; + retval = 0; +EXIT: + return retval; +} + +int ia_css_process_cmd( + ia_css_process_t *process, + const ia_css_process_cmd_t cmd) +{ + int retval = -1; + ia_css_process_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, "ia_css_process_cmd(): enter:\n"); + + verifexit(process != NULL); + + state = ia_css_process_get_state(process); + + verifexit(state != IA_CSS_PROCESS_ERROR); + verifexit(state < IA_CSS_N_PROCESS_STATES); + + switch (cmd) { + case IA_CSS_PROCESS_CMD_NOP: + break; + case IA_CSS_PROCESS_CMD_ACQUIRE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_RELEASE: + verifexit(state == IA_CSS_PROCESS_READY); + break; + case IA_CSS_PROCESS_CMD_START: + verifexit((state == IA_CSS_PROCESS_READY) + || (state == IA_CSS_PROCESS_STOPPED)); + process->state = IA_CSS_PROCESS_STARTED; + break; + case IA_CSS_PROCESS_CMD_LOAD: + verifexit(state == IA_CSS_PROCESS_STARTED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_PROCESS_CMD_STOP: + verifexit((state == IA_CSS_PROCESS_RUNNING) + || (state == IA_CSS_PROCESS_SUSPENDED)); + process->state = IA_CSS_PROCESS_STOPPED; + break; + case IA_CSS_PROCESS_CMD_SUSPEND: + verifexit(state == IA_CSS_PROCESS_RUNNING); + process->state = IA_CSS_PROCESS_SUSPENDED; + break; + case IA_CSS_PROCESS_CMD_RESUME: + verifexit(state == IA_CSS_PROCESS_SUSPENDED); + process->state = IA_CSS_PROCESS_RUNNING; + break; + case IA_CSS_N_PROCESS_CMDS: /* Fall through */ + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd invalid cmd (0x%x)\n", cmd); + goto EXIT; + } + retval = 0; +EXIT: + if (NULL == process) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_cmd invalid argument process\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_cmd failed (%i)\n", retval); + } + return retval; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c new file mode 100644 index 000000000000..46bb82804153 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group.c @@ -0,0 +1,886 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_process_group.h" +#include "ia_css_psys_dynamic_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_process_group_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* This header is need for cpu memset to 0 +* and process groups are not created in SP +*/ +#if !defined(__VIED_CELL) +#include "cpu_mem_support.h" +#endif + +/* This source file is created with the intention of sharing and +* compiled for host and firmware. Since there is no native 64bit +* data type support for firmware this wouldn't compile for SP +* tile. The part of the file that is not compilable are marked +* with the following __VIED_CELL marker and this comment. Once we +* come up with a solution to address this issue this will be +* removed. +*/ +#if !defined(__VIED_CELL) +static bool ia_css_process_group_is_program_enabled( + const ia_css_program_manifest_t *program_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap(program_manifest); + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type(program_manifest); + ia_css_kernel_bitmap_t program_enable_bitmap; + + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + + if (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER || + program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + /* + * EXCLUSIVE_SUB programs are subsets of + * EXCLUSIVE_SUPER so the bits of the enable_bitmap + * that refer to those are those of their + * EXCLUSIVE_SUPER program (on which the depend) and + * not the subset that their own program_bitmap has + */ + if (program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB || + program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + ia_css_kernel_bitmap_t super_program_bitmap; + + const ia_css_program_group_manifest_t * + prog_group_manifest = + ia_css_program_manifest_get_parent(program_manifest); + uint8_t super_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, 0); + const ia_css_program_manifest_t * + super_program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + prog_group_manifest, super_prog_idx); + + verifexit(super_program_manifest != NULL); + if (((program_type == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER)) + || ((program_type == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (ia_css_program_manifest_get_type( + super_program_manifest) != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER))) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_is_program_enabled(): Error\n"); + verifexit(0); + } + + super_program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + super_program_manifest); + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, + super_program_bitmap); + } else { + program_enable_bitmap = + ia_css_kernel_bitmap_intersection( + enable_bitmap, program_bitmap); + } + + if (ia_css_is_kernel_bitmap_equal( + program_enable_bitmap, program_bitmap)) { + return true; + } + } else if (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) { + /* + * Virtual super programs are not selectable + * only the virtual sub programs + */ + return false; + } else { + return true; + } + } + +EXIT: + return false; +} + +static bool ia_css_process_group_is_terminal_enabled( + const ia_css_terminal_manifest_t *terminal_manifest, + ia_css_kernel_bitmap_t enable_bitmap) +{ + ia_css_terminal_type_t terminal_type; + + verifjmpexit(NULL != terminal_manifest); + terminal_type = ia_css_terminal_manifest_get_type(terminal_manifest); + + if (ia_css_is_terminal_manifest_data_terminal(terminal_manifest)) { + ia_css_data_terminal_manifest_t *data_term_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + ia_css_kernel_bitmap_t term_bitmap = + ia_css_data_terminal_manifest_get_kernel_bitmap( + data_term_manifest); + /* + * Terminals depend on a kernel, + * if the kernel is present the program it contains and + * the terminal the program depends on are active + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)) { + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_spatial_param_terminal_manifest_t *spatial_term_man = + (ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest; + + term_kernel_bitmap = + ia_css_kernel_bitmap_set( + term_kernel_bitmap, + spatial_term_man->kernel_id); + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + return true; + + } else if (ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest) && terminal_type == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + /* + * For parameter out terminals, we disable the terminals + * if ALL the corresponding kernels are disabled, + * for parameter in terminals we cannot do this; + * even if kernels are disabled, it may be required that + * (HW) parameters must be supplied via the parameter + * in terminal (e.g. bypass bits). + */ + ia_css_kernel_bitmap_t term_kernel_bitmap = ia_css_kernel_bitmap_clear(); + ia_css_param_terminal_manifest_t *param_term_man = + (ia_css_param_terminal_manifest_t *)terminal_manifest; + ia_css_param_manifest_section_desc_t *section_desc; + unsigned int section = 0; + + for (section = 0; section < param_term_man-> + param_manifest_section_desc_count; section++) { + section_desc = + ia_css_param_terminal_manifest_get_prm_sct_desc( + param_term_man, section); + verifjmpexit(section_desc != NULL); + term_kernel_bitmap = ia_css_kernel_bitmap_set( + term_kernel_bitmap, + section_desc->kernel_id); + } + + if (!ia_css_is_kernel_bitmap_intersection_empty( + enable_bitmap, term_kernel_bitmap)) { + return true; + } + } else if (ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)) { + return true; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest)) { + return true; + } +EXIT: + return false; +} + +size_t ia_css_sizeof_process_group( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0, tmp_size; + int i, error_val = -1; + uint8_t process_count, process_num; + uint8_t terminal_count; + ia_css_kernel_bitmap_t enable_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_process_group(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + COMPILATION_ERROR_IF( + SIZE_OF_PROCESS_GROUP_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_process_group_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_process_group_t) % sizeof(uint64_t)); + + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + + verifexit(process_count != 0); + verifexit(terminal_count != 0); + + size += sizeof(ia_css_process_group_t); + + tmp_size = process_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + tmp_size = terminal_count * sizeof(uint16_t); + size += tot_bytes_for_pow2_align(sizeof(uint64_t), tmp_size); + + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst(manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + verifexit(process_num < process_count); + size += ia_css_sizeof_process( + program_manifest, program_param); + process_num++; + } + } + + verifexit(process_num == process_count); + + for (i = 0; i < (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + terminal_manifest, enable_bitmap)) { + size += ia_css_sizeof_terminal( + terminal_manifest, param); + } + } + + error_val = 0; + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_process_group invalid argument\n"); + } + if (error_val != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_process_group ERROR(%d)\n", error_val); + } + return size; +} + +ia_css_process_group_t *ia_css_process_group_create( + void *process_grp_mem, + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = ia_css_sizeof_process_group(manifest, param); + int retval = -1; + int ret; + int i; + ia_css_process_group_t *process_group = NULL; + uint8_t process_count, process_num; + uint8_t terminal_count, terminal_num; + uint16_t fragment_count; + char *process_grp_raw_ptr; + uint16_t *process_tab_ptr, *terminal_tab_ptr; + ia_css_kernel_bitmap_t enable_bitmap; + uint8_t manifest_terminal_count; + + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(process_grp_mem %p, manifest %p, group_param %p): enter:\n", + process_grp_mem, manifest, param); + + verifexit(process_grp_mem != NULL); + verifexit(manifest != NULL); + verifexit(param != NULL); + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + + process_group = (ia_css_process_group_t *)process_grp_mem; + ia_css_cpu_mem_set_zero(process_group, size); + process_grp_raw_ptr = (char *) process_group; + + process_group->state = IA_CSS_PROCESS_GROUP_CREATED; + + process_group->protocol_version = + ia_css_program_group_param_get_protocol_version(param); + + fragment_count = ia_css_program_group_param_get_fragment_count(param); + process_count = + ia_css_process_group_compute_process_count(manifest, param); + terminal_count = + ia_css_process_group_compute_terminal_count(manifest, param); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + process_group->fragment_count = fragment_count; + process_group->process_count = process_count; + process_group->terminal_count = terminal_count; + + process_grp_raw_ptr += sizeof(ia_css_process_group_t); + process_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->processes_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), process_count * sizeof(uint16_t)); + terminal_tab_ptr = (uint16_t *) process_grp_raw_ptr; + process_group->terminals_offset = + (uint16_t)(process_grp_raw_ptr - (char *)process_group); + + /* Move raw pointer to the first process */ + process_grp_raw_ptr += tot_bytes_for_pow2_align( + sizeof(uint64_t), terminal_count * sizeof(uint16_t)); + + /* Set default */ + verifexit(ia_css_process_group_set_fragment_limit( + process_group, fragment_count) == 0); + + /* Set process group terminal dependency list */ + /* This list is used during creating the process dependency list */ + manifest_terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + terminal_num = 0; + for (i = 0; i < (int)manifest_terminal_count; i++) { + ia_css_terminal_manifest_t *t_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + verifexit(NULL != t_manifest); + if (ia_css_process_group_is_terminal_enabled( + t_manifest, enable_bitmap)) { + ia_css_terminal_t *terminal = NULL; + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param( + param, i); + + verifexit(NULL != terminal_param); + terminal_tab_ptr[terminal_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + terminal = ia_css_terminal_create( + process_grp_raw_ptr, t_manifest, + terminal_param, enable_bitmap); + verifexit(terminal != NULL); + verifexit((ia_css_terminal_set_parent( + terminal, process_group) == 0)); + verifexit((ia_css_terminal_set_terminal_manifest_index( + terminal, i) == 0)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: terminal_manifest_index %d\n", + i); + + process_grp_raw_ptr += ia_css_terminal_get_size( + terminal); + terminal_num++; + } + } + verifexit(terminal_num == terminal_count); + + process_num = 0; + for (i = 0; i < (int)ia_css_program_group_manifest_get_program_count( + manifest); i++) { + ia_css_process_t *process = NULL; + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + unsigned int prog_dep_index, proc_dep_index; + unsigned int term_dep_index, term_index; + + if (ia_css_process_group_is_program_enabled( + program_manifest, enable_bitmap)) { + + verifexit(process_num < process_count); + + process_tab_ptr[process_num] = + (uint16_t)(process_grp_raw_ptr - + (char *)process_group); + process = ia_css_process_create( + process_grp_raw_ptr, + program_manifest, + program_param, + i); + verifexit(process != NULL); + + ia_css_process_set_parent(process, process_group); + if (ia_css_has_program_manifest_fixed_cell( + program_manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID( + program_manifest); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create: cell_id %d\n", + cell_id); + ia_css_process_set_cell(process, cell_id); + } + + process_grp_raw_ptr += ia_css_process_get_size( + process); + /* + * Set process dependencies of process derived + * from program manifest + */ + for (prog_dep_index = 0; prog_dep_index < + ia_css_program_manifest_get_program_dependency_count( + program_manifest); prog_dep_index++) { + uint8_t dep_prog_idx = + ia_css_program_manifest_get_program_dependency( + program_manifest, prog_dep_index); + const ia_css_program_manifest_t * + dep_prg_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, dep_prog_idx); + ia_css_program_ID_t id = + ia_css_program_manifest_get_program_ID( + dep_prg_manifest); + + verifexit(id != 0); + for (proc_dep_index = 0; + proc_dep_index < process_num; + proc_dep_index++) { + ia_css_process_t *dep_process = + ia_css_process_group_get_process( + process_group, + proc_dep_index); + + ia_css_process_set_cell_dependency( + process, + prog_dep_index, 0); + + if (ia_css_process_get_program_ID( + dep_process) == id) { + ia_css_process_set_cell_dependency( + process, + prog_dep_index, + proc_dep_index); + break; + } + } + } + process_num++; + + /* + * Set terminal dependencies of process derived + * from program manifest + */ + for (term_dep_index = 0; term_dep_index < + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest); term_dep_index++) { + uint8_t pm_term_index = + ia_css_program_manifest_get_terminal_dependency + (program_manifest, term_dep_index); + + verifexit(pm_term_index < manifest_terminal_count); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): term_dep_index: %d, pm_term_index: %d\n", + term_dep_index, pm_term_index); + for (term_index = 0; + term_index < terminal_count; + term_index++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal( + process_group, + term_index); + + if (ia_css_terminal_get_terminal_manifest_index + (terminal) == pm_term_index) { + ia_css_process_set_terminal_dependency( + process, + term_dep_index, + term_index); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create() set_terminal_dependency(process: %d, dep_idx: %d, term_idx: %d)\n", + i, term_dep_index, term_index); + + break; + } + } + } + } + } + verifexit(process_num == process_count); + + process_group->size = + (uint32_t)ia_css_sizeof_process_group(manifest, param); + process_group->ID = + ia_css_program_group_manifest_get_program_group_ID(manifest); + + /* Initialize performance measurement fields to zero */ + process_group->pg_load_start_ts = 0; + process_group->pg_load_cycles = 0; + process_group->pg_init_cycles = 0; + process_group->pg_processing_cycles = 0; + + verifexit(process_group->ID != 0); + + ret = ia_css_process_group_on_create(process_group, manifest, param); + verifexit(ret == 0); + + process_group->state = IA_CSS_PROCESS_GROUP_READY; + retval = 0; + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_create(): Created successfully process group ID 0x%x\n", + process_group->ID); + +EXIT: + if (NULL == process_grp_mem || NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_create failed (%i)\n", retval); + process_group = ia_css_process_group_destroy(process_group); + } + return process_group; +} + +ia_css_process_group_t *ia_css_process_group_destroy( + ia_css_process_group_t *process_group) +{ + if (process_group != NULL) { + ia_css_process_group_on_destroy(process_group); + process_group = NULL; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_destroy invalid argument\n"); + } + return process_group; +} + +int ia_css_process_group_submit( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_submit(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUBMIT); +} + +int ia_css_process_group_start( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_start(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_START); +} + +int ia_css_process_group_stop( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_stop(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_STOP); +} + +int ia_css_process_group_run( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_run(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RUN); +} + +int ia_css_process_group_suspend( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_suspend(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_SUSPEND); +} + +int ia_css_process_group_resume( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_resume(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESUME); +} + +int ia_css_process_group_reset( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_reset(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_RESET); +} + +int ia_css_process_group_abort( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_abort(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_ABORT); +} + +int ia_css_process_group_disown( + ia_css_process_group_t *process_group) +{ + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_disown(): enter:\n"); + + return ia_css_process_group_exec_cmd(process_group, + IA_CSS_PROCESS_GROUP_CMD_DISOWN); +} + +extern uint64_t ia_css_process_group_get_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_token failed (%i)\n", + retval); + } + return retval; +} + +extern uint64_t ia_css_process_group_get_private_token( + ia_css_process_group_t *process_group) +{ + uint64_t token = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_private_token(): enter:\n"); + + verifexit(process_group != NULL); + + token = process_group->private_token; + +EXIT: + if (NULL == process_group) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_get_private_token invalid argument\n"); + } + return token; +} + +int ia_css_process_group_set_private_token( + ia_css_process_group_t *process_group, + const uint64_t token) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_private_token(): enter:\n"); + + verifexit(process_group != NULL); + verifexit(token != 0); + + process_group->private_token = token; + + retval = 0; +EXIT: + if (NULL == process_group || 0 == token) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_set_private_token invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_private_token failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_process_group_compute_process_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t process_count = 0; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_process_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_program_count(manifest); + i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_kernel_bitmap_t program_bitmap = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest); + /* + * Programs can be orthogonal, + * a mutually exclusive subset, + * or a concurrent subset + */ + if (!ia_css_is_kernel_bitmap_intersection_empty(enable_bitmap, + program_bitmap)) { + ia_css_program_type_t program_type = + ia_css_program_manifest_get_type( + program_manifest); + /* + * An exclusive subnode < exclusive supernode, + * so simply don't count it + */ + if (program_type != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB && + program_type != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) { + process_count++; + } + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_process_count invalid argument\n"); + } + return process_count; +} + +uint8_t ia_css_process_group_compute_terminal_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + ia_css_kernel_bitmap_t total_bitmap, enable_bitmap; + int i; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_compute_terminal_count(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + enable_bitmap = + ia_css_program_group_param_get_kernel_enable_bitmap(param); + + verifexit(ia_css_is_program_group_manifest_valid(manifest)); + verifexit(ia_css_is_kernel_bitmap_subset(total_bitmap, enable_bitmap)); + verifexit(!ia_css_is_kernel_bitmap_empty(enable_bitmap)); + + for (i = 0; i < + (int)ia_css_program_group_manifest_get_terminal_count( + manifest); i++) { + ia_css_terminal_manifest_t *tmanifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + if (ia_css_process_group_is_terminal_enabled( + tmanifest, enable_bitmap)) { + terminal_count++; + } + } + +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_process_group_compute_terminal_count invalid argument\n"); + } + return terminal_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h new file mode 100644 index 000000000000..f99602dc3c9e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_group_impl.h @@ -0,0 +1,1538 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H +#define __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H + +#include +#include +#include "ia_css_psys_process_group_cmd_impl.h" +#include +#include +#include +#include +#include +#include +#include "ia_css_terminal_manifest_types.h" + +#include "ia_css_rbm.h" + +#include /* ia_css_kernel_bitmap_t */ + +#include +#include +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#include "ia_css_psys_dynamic_trace.h" + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_limit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_limit = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_limit = process_group->fragment_limit; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument\n"); + } + return fragment_limit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_limit( + ia_css_process_group_t *process_group, + const uint16_t fragment_limit) +{ + DECLARE_ERRVAL + int retval = -1; + uint16_t fragment_state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + retval = ia_css_process_group_get_fragment_state(process_group, + &fragment_state); + + verifexitval(retval == 0, EINVAL); + verifexitval(fragment_limit > fragment_state, EINVAL); + verifexitval(fragment_limit <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_limit = fragment_limit; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_fragment_limit( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_fragment_limit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->fragment_limit = 0; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_fragment_limit failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_buffer( + ia_css_process_group_t *process_group, + vied_vaddress_t buffer, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_t *terminal = NULL; + + NOT_USED(buffer_state); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = ia_css_process_group_get_terminal( + process_group, terminal_index); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(ia_css_process_group_get_state(process_group) == + IA_CSS_PROCESS_GROUP_READY, EINVAL); + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY || + process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * Legacy flow: + * Terminal address is part of the process group structure + */ + retval = ia_css_terminal_set_buffer( + terminal, buffer); + } else if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG) { + /* + * PPG flow: + * Terminal address is part of external buffer set structure + */ + retval = ia_css_terminal_set_terminal_index( + terminal, terminal_index); + } + verifexitval(retval == 0, EFAULT); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p has buffer 0x%x\n", terminal, buffer); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, buffer_state); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_buffer failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_process_group_detach_buffer( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_vaddress_t buffer = VIED_NULL; + + ia_css_terminal_t *terminal = NULL; + ia_css_process_group_state_t state; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_buffer(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal = + ia_css_process_group_get_terminal( + process_group, terminal_index); + state = ia_css_process_group_get_state(process_group); + + verifexitval(terminal != NULL, EINVAL); + verifexitval(state == IA_CSS_PROCESS_GROUP_READY, EINVAL); + + buffer = ia_css_terminal_get_buffer(terminal); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + verifexitval(frame != NULL, EINVAL); + + retval = ia_css_frame_set_buffer_state(frame, IA_CSS_BUFFER_NULL); + verifexitval(retval == 0, EINVAL); + } + ia_css_terminal_set_buffer(terminal, VIED_NULL); + + retval = 0; +EXIT: + /* + * buffer pointer will appear on output, + * regardless of subsequent fails to avoid memory leaks + */ + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_buffer failed (%i)\n", + retval); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_attach_stream( + ia_css_process_group_t *process_group, + uint32_t stream, + const ia_css_buffer_state_t buffer_state, + const unsigned int terminal_index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_attach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(stream); + NOT_USED(buffer_state); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_attach_stream failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_detach_stream( + ia_css_process_group_t *process_group, + const unsigned int terminal_index) +{ + int retval = -1; + uint32_t stream = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_detach_stream(): enter:\n"); + + NOT_USED(process_group); + NOT_USED(terminal_index); + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_detach_stream failed (%i)\n", + retval); + } + return stream; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask; + vied_nci_resource_bitmap_t resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_clear(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_set(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_clear_barrier( + ia_css_process_group_t *process_group, + const vied_nci_barrier_ID_t barrier_index) +{ + DECLARE_ERRVAL + int retval = -1; + vied_nci_resource_bitmap_t bit_mask, resource_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_clear_barrier(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = + ia_css_process_group_get_resource_bitmap(process_group); + + bit_mask = vied_nci_barrier_bit_mask(barrier_index); + + verifexitval(bit_mask != 0, EINVAL); + verifexitval(vied_nci_is_bitmap_set(bit_mask, resource_bitmap), EINVAL); + + resource_bitmap = vied_nci_bitmap_clear(resource_bitmap, bit_mask); + + retval = + ia_css_process_group_set_resource_bitmap( + process_group, resource_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_clear_barrier failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_print( + const ia_css_process_group_t *process_group, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + int i; + + uint8_t process_count; + uint8_t terminal_count; + vied_vaddress_t ipu_vaddress = VIED_NULL; + ia_css_rbm_t routing_bitmap; + + NOT_USED(fid); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_print(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + retval = ia_css_process_group_get_ipu_vaddress(process_group, &ipu_vaddress); + verifexitval(retval == 0, EINVAL); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print start ===============\n"); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprocess_group cpu address = %p\n", process_group); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tipu_virtual_address = %#x\n", ipu_vaddress); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tsizeof(process_group) = %d\n", + (int)ia_css_process_group_get_size(process_group)); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tfragment_count = %d\n", + (int)ia_css_process_group_get_fragment_count(process_group)); + + routing_bitmap = *ia_css_process_group_get_routing_bitmap(process_group); + for (i = 0; i < (int)IA_CSS_RBM_NOF_ELEMS; i++) { + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "\trouting_bitmap[index = %d] = 0x%X\n", + i, (int)routing_bitmap.data[i]); + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\tprogram_group(process_group) = %d\n", + (int)ia_css_process_group_get_program_group_ID(process_group)); + process_count = ia_css_process_group_get_process_count(process_group); + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d processes\n", (int)process_count); + for (i = 0; i < (int)process_count; i++) { + ia_css_process_t *process = + ia_css_process_group_get_process(process_group, i); + + retval = ia_css_process_print(process, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "\t%d terminals\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + + retval = ia_css_terminal_print(terminal, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "=============== Process group print end ===============\n"); + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_print invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_group_valid( + const ia_css_process_group_t *process_group, + const ia_css_program_group_manifest_t *pg_manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint8_t proc_idx; + uint8_t prog_idx; + uint8_t proc_term_idx; + uint8_t process_count; + uint8_t program_count; + uint8_t terminal_count; + uint8_t man_terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_process_group_valid(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(pg_manifest != NULL, EFAULT); + NOT_USED(param); + + process_count = process_group->process_count; + terminal_count = process_group->terminal_count; + program_count = + ia_css_program_group_manifest_get_program_count(pg_manifest); + man_terminal_count = + ia_css_program_group_manifest_get_terminal_count(pg_manifest); + + /* Validate process group */ + invalid_flag = invalid_flag || + !(program_count >= process_count) || + !(man_terminal_count >= terminal_count) || + !(process_group->size > process_group->processes_offset) || + !(process_group->size > process_group->terminals_offset); + + /* Validate processes */ + for (proc_idx = 0; proc_idx < process_count; proc_idx++) { + const ia_css_process_t *process; + ia_css_program_ID_t prog_id; + bool no_match_found = true; + + process = ia_css_process_group_get_process( + process_group, proc_idx); + verifexitval(NULL != process, EFAULT); + prog_id = ia_css_process_get_program_ID(process); + for (prog_idx = 0; prog_idx < program_count; prog_idx++) { + ia_css_program_manifest_t *p_manifest = NULL; + + p_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + pg_manifest, prog_idx); + if (prog_id == + ia_css_program_manifest_get_program_ID( + p_manifest)) { + invalid_flag = invalid_flag || + !ia_css_is_process_valid( + process, p_manifest); + no_match_found = false; + break; + } + } + invalid_flag = invalid_flag || no_match_found; + } + + /* Validate terminals */ + for (proc_term_idx = 0; proc_term_idx < terminal_count; + proc_term_idx++) { + int man_term_idx; + const ia_css_terminal_t *terminal; + const ia_css_terminal_manifest_t *terminal_manifest; + + terminal = + ia_css_process_group_get_terminal( + process_group, proc_term_idx); + verifexitval(NULL != terminal, EFAULT); + man_term_idx = + ia_css_terminal_get_terminal_manifest_index(terminal); + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + pg_manifest, man_term_idx); + invalid_flag = invalid_flag || + !ia_css_is_terminal_valid(terminal, terminal_manifest); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_group_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_submit( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_submit = false; + int retval = -1; + uint8_t terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + if (process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY) { + /* + * For legacy pg flow, buffer addresses are contained inside + * the process group structure, so these need to be validated + * on process group submission. + */ + buffer = ia_css_terminal_get_buffer(terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + } + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + + } + /* Only true if no check failed */ + can_submit = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit failed (%i)\n", + retval); + } + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_submit(): leave:\n"); + return can_submit; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_enqueue_buffer_set( + const ia_css_process_group_t *process_group, + const ia_css_buffer_set_t *buffer_set) +{ + DECLARE_ERRVAL + int i; + bool can_enqueue = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_enqueue_buffer_set(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(buffer_set != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + + /* + * For ppg flow, buffer addresses are contained in the + * external buffer set structure, so these need to be + * validated before enqueueing. + */ + verifexitval(process_group->protocol_version == + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, EFAULT); + + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + vied_vaddress_t buffer; + ia_css_buffer_state_t buffer_state; + + verifexitval(terminal != NULL, EINVAL); + + buffer = ia_css_buffer_set_get_buffer(buffer_set, terminal); + IA_CSS_TRACE_3(PSYSAPI_DYNAMIC, INFO, + "\tH: Terminal number(%d) is %p having buffer 0x%x\n", + i, terminal, buffer); + + /* buffer_state is applicable only for data terminals*/ + if (ia_css_is_terminal_data_terminal(terminal) == true) { + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EINVAL); + buffer_state = ia_css_frame_get_buffer_state(frame); + if ((buffer_state == IA_CSS_BUFFER_NULL) || + (buffer_state == IA_CSS_N_BUFFER_STATES)) { + break; + } + } else if ( + (ia_css_is_terminal_parameter_terminal(terminal) + != true) && + (ia_css_is_terminal_program_terminal(terminal) + != true) && + (ia_css_is_terminal_program_control_init_terminal(terminal) + != true) && + (ia_css_is_terminal_spatial_parameter_terminal( + terminal) != true)) { + /* neither data nor parameter terminal, so error.*/ + break; + } + } + /* Only true if no check failed */ + can_enqueue = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_enqueue_buffer_set failed (%i)\n", + retval); + } + return can_enqueue; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_can_process_group_start( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + int i; + bool can_start = false; + int retval = -1; + uint8_t terminal_count; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_can_process_group_start(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = + ia_css_process_group_get_terminal_count(process_group); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_t *terminal = + ia_css_process_group_get_terminal(process_group, i); + ia_css_buffer_state_t buffer_state; + bool ok = false; + + verifexitval(terminal != NULL, EINVAL); + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* + * buffer_state is applicable only for data terminals + */ + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + bool is_input = ia_css_is_terminal_input(terminal); + /* + * check for NULL here. + * then invoke next 2 statements + */ + verifexitval(frame != NULL, EINVAL); + IA_CSS_TRACE_5(PSYSAPI_DYNAMIC, VERBOSE, + "\tTerminal %d: buffer_state %u, access_type %u, data_bytes %u, data %u\n", + i, frame->buffer_state, frame->access_type, + frame->data_bytes, frame->data); + buffer_state = ia_css_frame_get_buffer_state(frame); + + ok = ((is_input && + (buffer_state == IA_CSS_BUFFER_FULL)) || + (!is_input && (buffer_state == + IA_CSS_BUFFER_EMPTY))); + + } else if (ia_css_is_terminal_parameter_terminal(terminal) == + true) { + /* + * FIXME: + * is there any pre-requisite for param_terminal? + */ + ok = true; + } else if (ia_css_is_terminal_program_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == + true) { + ok = true; + } else if (ia_css_is_terminal_spatial_parameter_terminal( + terminal) == true) { + ok = true; + } else { + /* neither data nor parameter terminal, so error.*/ + break; + } + + if (!ok) + break; + } + /* Only true if no check failed */ + can_start = (i == terminal_count); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_submit invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_can_process_group_start failed (%i)\n", + retval); + } + return can_start; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_group_get_size( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_size(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + size = process_group->size; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_state_t ia_css_process_group_get_state( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_process_group_state_t state = IA_CSS_N_PROCESS_GROUP_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + state = process_group->state; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_state invalid argument\n"); + } + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_rbm_t *ia_css_process_group_get_routing_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + const ia_css_rbm_t *rbm = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + rbm = &(process_group->routing_bitmap); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_routing_bitmap invalid argument\n"); + } + return rbm; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_process_group_get_fragment_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + fragment_count = process_group->fragment_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_process_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t process_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_process_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_count = process_group->process_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process_count invalid argument\n"); + } + return process_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_terminal_count( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_count(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + terminal_count = process_group->terminal_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_start_ts( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_start_ts = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_start_ts(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_start_ts = process_group->pg_load_start_ts; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_start_ts invalid argument\n"); + } + return pg_load_start_ts; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_load_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_load_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_load_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_load_cycles = process_group->pg_load_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_load_cycles invalid argument\n"); + } + return pg_load_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_init_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_init_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_init_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_init_cycles = process_group->pg_init_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_init_cycles invalid argument\n"); + } + return pg_init_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_get_pg_processing_cycles( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint32_t pg_processing_cycles = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_pg_processing_cycles(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + pg_processing_cycles = process_group->pg_processing_cycles; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_pg_processing_cycles invalid argument\n"); + } + return pg_processing_cycles; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal_from_type( + const ia_css_process_group_t *process_group, + const ia_css_terminal_type_t terminal_type) +{ + unsigned int proc_cnt; + ia_css_terminal_t *terminal = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal_from_type(): enter:\n"); + + for (proc_cnt = 0; proc_cnt < (unsigned int)ia_css_process_group_get_terminal_count(process_group); proc_cnt++) { + terminal = ia_css_process_group_get_terminal(process_group, proc_cnt); + if (terminal == NULL) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal_from_type() Failed to get terminal %d", proc_cnt); + goto EXIT; + } + if (ia_css_terminal_get_type(terminal) == terminal_type) { + return terminal; + } + terminal = NULL; /* If not the expected type, return NULL */ + } +EXIT: + return terminal; +} + +/* Returns the terminal or NULL if it was not found + For some of those maybe valid to not exist at all in the process group */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +const ia_css_terminal_t *ia_css_process_group_get_single_instance_terminal( + const ia_css_process_group_t *process_group, + ia_css_terminal_type_t term_type) +{ + int i, term_count; + + assert(process_group != NULL); + + /* Those below have at most one instance per process group */ + assert(term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + term_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM || + term_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); + + term_count = ia_css_process_group_get_terminal_count(process_group); + + for (i = 0; i < term_count; i++) { + const ia_css_terminal_t *terminal = ia_css_process_group_get_terminal(process_group, i); + + if (ia_css_terminal_get_type(terminal) == term_type) { + /* Only one parameter terminal per process group */ + return terminal; + } + } + + return NULL; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_t *ia_css_process_group_get_terminal( + const ia_css_process_group_t *process_grp, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + ia_css_terminal_t *terminal_ptr = NULL; + uint16_t *terminal_offset_table; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_terminal(): enter:\n"); + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(terminal_num < process_grp->terminal_count, EINVAL); + + terminal_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->terminals_offset); + terminal_ptr = + (ia_css_terminal_t *)((char *)process_grp + + terminal_offset_table[terminal_num]); + + verifexitval(terminal_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_terminal invalid argument\n"); + } + return terminal_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_t *ia_css_process_group_get_process( + const ia_css_process_group_t *process_grp, + const unsigned int process_num) +{ + DECLARE_ERRVAL + ia_css_process_t *process_ptr = NULL; + uint16_t *process_offset_table; + + verifexitval(process_grp != NULL, EFAULT); + verifexitval(process_num < process_grp->process_count, EINVAL); + + process_offset_table = + (uint16_t *)((char *)process_grp + + process_grp->processes_offset); + process_ptr = + (ia_css_process_t *)((char *)process_grp + + process_offset_table[process_num]); + + verifexitval(process_ptr != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_process invalid argument\n"); + } + return process_ptr; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_group_ID_t ia_css_process_group_get_program_group_ID( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + ia_css_program_group_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_program_group_ID(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + id = process_group->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_group_get_resource_bitmap( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t resource_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + resource_bitmap = process_group->resource_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_resource_bitmap invalid argument\n"); + } + return resource_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_resource_bitmap( + ia_css_process_group_t *process_group, + const vied_nci_resource_bitmap_t resource_bitmap) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_resource_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->resource_bitmap = resource_bitmap; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_resource_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_routing_bitmap( + ia_css_process_group_t *process_group, + const ia_css_rbm_t rbm) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_routing_bitmap(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + process_group->routing_bitmap = rbm; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_routing_bitmap failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_group_compute_cycle_count( + const ia_css_program_group_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint32_t cycle_count = 0; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + + cycle_count = 1; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_compute_cycle_count invalid argument\n"); + } + return cycle_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_fragment_state( + ia_css_process_group_t *process_group, + uint16_t fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_process_group_set_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state <= ia_css_process_group_get_fragment_count( + process_group), EINVAL); + + process_group->fragment_state = fragment_state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state invalid argument process_group\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_fragment_state( + const ia_css_process_group_t *process_group, + uint16_t *fragment_state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_fragment_state(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(fragment_state != NULL, EFAULT); + + *fragment_state = process_group->fragment_state; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_fragment_state failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_get_ipu_vaddress( + const ia_css_process_group_t *process_group, + vied_vaddress_t *ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + verifexitval(ipu_vaddress != NULL, EFAULT); + + *ipu_vaddress = process_group->ipu_virtual_address; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_ipu_vaddress( + ia_css_process_group_t *process_group, + vied_vaddress_t ipu_vaddress) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_ipu_vaddress(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->ipu_virtual_address = ipu_vaddress; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_ipu_vaddress failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_protocol_version( + const ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t protocol_version = IA_CSS_PROCESS_GROUP_N_PROTOCOLS; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_protocol_version(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + protocol_version = process_group->protocol_version; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_base_queue_id( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t queue_id = IA_CSS_N_PSYS_CMD_QUEUE_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + queue_id = process_group->base_queue_id; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_base_queue_id invalid argument\n"); + } + return queue_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_base_queue_id( + ia_css_process_group_t *process_group, + uint8_t queue_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_base_queue_id(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->base_queue_id = queue_id; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_base_queue_id invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_group_get_num_queues( + ia_css_process_group_t *process_group) +{ + DECLARE_ERRVAL + uint8_t num_queues = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_get_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + num_queues = process_group->num_queues; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_get_num_queues invalid argument\n"); + } + return num_queues; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_group_set_num_queues( + ia_css_process_group_t *process_group, + uint8_t num_queues) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_group_set_num_queues(): enter:\n"); + + verifexitval(process_group != NULL, EFAULT); + + process_group->num_queues = num_queues; + retval = 0; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_group_set_num_queues invalid argument\n"); + } + return retval; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_process_group_has_vp(const ia_css_process_group_t *process_group) +{ + bool has_vp = false; + uint32_t i; + + uint8_t process_count = ia_css_process_group_get_process_count(process_group); + + for (i = 0; i < process_count; i++) { + ia_css_process_t *process; + vied_nci_cell_ID_t cell_id; + + process = ia_css_process_group_get_process(process_group, i); + cell_id = ia_css_process_get_cell(process); + + if (VIED_NCI_VP_TYPE_ID == vied_nci_cell_get_type(cell_id)) { + has_vp = true; + break; + } + } + + return has_vp; +} + +#endif /* __IA_CSS_PSYS_PROCESS_GROUP_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h new file mode 100644 index 000000000000..5d0303012700 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_impl.h @@ -0,0 +1,637 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_IMPL_H +#define __IA_CSS_PSYS_PROCESS_IMPL_H + +#include + +#include +#include + +#include +#include +#include + +#include + +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_process_private_types.h" + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE vied_nci_cell_ID_t ia_css_process_cells_get_cell(const ia_css_process_t *process, int index) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return VIED_NCI_N_CELL_ID; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + return process->cell_id; +#else + return process->cells[index]; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process.h */ +STORAGE_CLASS_INLINE void ia_css_process_cells_set_cell(ia_css_process_t *process, int index, vied_nci_cell_ID_t cell_id) +{ + assert(index < IA_CSS_PROCESS_MAX_CELLS); + if (index >= IA_CSS_PROCESS_MAX_CELLS) { + return; + } +#if IA_CSS_PROCESS_MAX_CELLS == 1 + process->cell_id = cell_id; +#else + process->cells[index] = cell_id; +#endif +} + +/** Function only to be used in ia_css_psys_process_impl.h and ia_css_psys_process */ +STORAGE_CLASS_INLINE void ia_css_process_cells_clear(ia_css_process_t *process) +{ + int i; + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + ia_css_process_cells_set_cell(process, i, VIED_NCI_N_CELL_ID); + } +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_cell_ID_t ia_css_process_get_cell( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +#if IA_CSS_PROCESS_MAX_CELLS > 1 + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == ia_css_process_cells_get_cell(process, i)); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#else + (void)i; +#endif + cell_id = ia_css_process_cells_get_cell(process, 0); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell invalid argument\n"); + } + return cell_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_mem_ID_t ia_css_process_get_ext_mem_id( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem(): enter:\n"); + + verifexitval(process != NULL && mem_type < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem invalid argument\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->ext_mem_id[mem_type]; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint32_t ia_css_process_get_program_idx( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_idx(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_idx invalid argument\n"); + return IA_CSS_PROCESS_INVALID_PROGRAM_IDX; + } + return process->program_idx; +} + + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_dev_chn( + const ia_css_process_t *process, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + DECLARE_ERRVAL + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dev_chn(): enter:\n"); + + verifexitval(process != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID, EFAULT); + +EXIT: + if (!noerror()) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dev_chn(): invalid arguments\n"); + return IA_CSS_PROCESS_INVALID_OFFSET; + } + return process->dev_chn_offset[dev_chn_id]; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_int_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t int_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_int_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_id < VIED_NCI_N_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + int_mem_offset = process->int_mem_offset[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_int_mem_offset invalid argument\n"); + } + + return int_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_size_t ia_css_process_get_ext_mem_offset( + const ia_css_process_t *process, + const vied_nci_mem_type_ID_t mem_type_id) +{ + DECLARE_ERRVAL + vied_nci_resource_size_t ext_mem_offset = IA_CSS_PROCESS_INVALID_OFFSET; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_ext_mem_offset(): enter:\n"); + + verifexitval(process != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID, EFAULT); + +EXIT: + if (noerror()) { + ext_mem_offset = process->ext_mem_offset[mem_type_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_ext_mem_offset invalid argument\n"); + } + + return ext_mem_offset; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_process_get_size( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_size(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + size = process->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_size invalid argument\n"); + } + + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_state_t ia_css_process_get_state( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_state_t state = IA_CSS_N_PROCESS_STATES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + +EXIT: + if (noerror()) { + state = process->state; + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_state invalid argument\n"); + } + + return state; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_process_set_state( + ia_css_process_t *process, + ia_css_process_state_t state) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_set_state(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + process->state = state; + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_set_state invalid argument\n"); + } + + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_cell_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t cell_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + cell_dependency_count = process->cell_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency_count invalid argument\n"); + } + return cell_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency_count( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_terminal_dependency_count(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + terminal_dependency_count = process->terminal_dependency_count; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency_count invalid argument process\n"); + } + return terminal_dependency_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_process_get_parent( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_parent(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + parent = + (ia_css_process_group_t *) ((char *)process + process->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_parent invalid argument process\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_ID_t ia_css_process_get_program_ID( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_program_ID_t id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_program_ID(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + id = process->ID; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_program_ID invalid argument process\n"); + } + return id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_id_t ia_css_process_get_cell_dependency( + const ia_css_process_t *process, + const unsigned int cell_num) +{ + DECLARE_ERRVAL + vied_nci_resource_id_t cell_dependency = + IA_CSS_PROCESS_INVALID_DEPENDENCY; + vied_nci_resource_id_t *cell_dep_ptr = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_dependency(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + verifexitval(cell_num < process->cell_dependency_count, EFAULT); + + cell_dep_ptr = + (vied_nci_resource_id_t *) + ((char *)process + process->cell_dependencies_offset); + cell_dependency = *(cell_dep_ptr + cell_num); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cell_dependency invalid argument\n"); + } + return cell_dependency; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_process_get_terminal_dependency( + const ia_css_process_t *process, + const unsigned int terminal_num) +{ + DECLARE_ERRVAL + uint8_t *ter_dep_ptr = NULL; + uint8_t ter_dep = IA_CSS_PROCESS_INVALID_DEPENDENCY; + + verifexitval(process != NULL, EFAULT); + verifexitval(terminal_num < process->terminal_dependency_count, EFAULT); + + ter_dep_ptr = (uint8_t *) ((char *)process + + process->terminal_dependencies_offset); + + ter_dep = *(ter_dep_ptr + terminal_num); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_terminal_dependency invalid argument\n"); + } + return ter_dep; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_kernel_bitmap_t ia_css_process_get_kernel_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_kernel_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + bitmap = process->kernel_bitmap; + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_kernel_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_cells_bitmap( + const ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + vied_nci_cell_ID_t cell_id; + int i = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_cell_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); + + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + cell_id = ia_css_process_cells_get_cell(process, i); + if (VIED_NCI_N_CELL_ID != cell_id) { + bitmap |= (1 << cell_id); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_cells_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t* ia_css_process_get_dfm_active_port_bitmap_ptr( + ia_css_process_t *process) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t *p_bitmap = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + p_bitmap = &process->dfm_active_port_bitmap[0]; +#else + p_bitmap = NULL; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return p_bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_port_bitmap invalid argument process\n"); + } + + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_nci_resource_bitmap_t ia_css_process_get_dfm_active_port_bitmap( + const ia_css_process_t *process, + vied_nci_dev_dfm_id_t dfm_res_id) +{ + DECLARE_ERRVAL + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_process_get_dfm_active_port_bitmap(): enter:\n"); + + verifexitval(process != NULL, EFAULT); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexitval(dfm_res_id < VIED_NCI_N_DEV_DFM_ID, EFAULT); + bitmap = process->dfm_active_port_bitmap[dfm_res_id]; +#else + bitmap = 0; + (void)dfm_res_id; +#endif +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_process_get_dfm_active_port_bitmap invalid argument process\n"); + } + return bitmap; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_process_valid( + const ia_css_process_t *process, + const ia_css_program_manifest_t *p_manifest) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + ia_css_program_ID_t prog_id; + ia_css_kernel_bitmap_t prog_kernel_bitmap; + + verifexitval(NULL != process, EFAULT); + verifexitval(NULL != p_manifest, EFAULT); + + prog_id = ia_css_process_get_program_ID(process); + verifjmpexit(prog_id == + ia_css_program_manifest_get_program_ID(p_manifest)); + + prog_kernel_bitmap = + ia_css_program_manifest_get_kernel_bitmap(p_manifest); + + invalid_flag = (process->size <= process->cell_dependencies_offset) || + (process->size <= process->terminal_dependencies_offset) || + !ia_css_is_kernel_bitmap_subset(prog_kernel_bitmap, + process->kernel_bitmap); + + if (ia_css_has_program_manifest_fixed_cell(p_manifest)) { + vied_nci_cell_ID_t cell_id; + + cell_id = ia_css_program_manifest_get_cell_ID(p_manifest); + invalid_flag = invalid_flag || + (cell_id != (vied_nci_cell_ID_t)(ia_css_process_get_cell(process))); + } + invalid_flag = invalid_flag || + ((process->cell_dependency_count + + process->terminal_dependency_count) == 0) || + (process->cell_dependency_count != + ia_css_program_manifest_get_program_dependency_count(p_manifest)) || + (process->terminal_dependency_count != + ia_css_program_manifest_get_terminal_dependency_count(p_manifest)); + + /* TODO: to be removed once all PGs pass validation */ + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_process_valid(): false\n"); + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_process_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +#endif /* __IA_CSS_PSYS_PROCESS_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h new file mode 100644 index 000000000000..ae0affde9718 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_process_private_types.h @@ -0,0 +1,87 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H + +#include "ia_css_psys_process_types.h" +#include "vied_nci_psys_resource_model.h" + +#define N_UINT32_IN_PROCESS_STRUCT 2 +#define N_UINT16_IN_PROCESS_STRUCT 3 +#define N_UINT8_IN_PROCESS_STRUCT 2 + +#define SIZE_OF_PROCESS_STRUCT_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (N_UINT32_IN_PROCESS_STRUCT * 32) \ + + IA_CSS_PROGRAM_ID_BITS \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_PROCESS_STATE_BITS \ + + (N_UINT16_IN_PROCESS_STRUCT * 16) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (VIED_NCI_N_DEV_CHN_ID * VIED_NCI_RESOURCE_SIZE_BITS) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_N_DATA_MEM_TYPE_ID * VIED_NCI_RESOURCE_ID_BITS) \ + + (N_UINT8_IN_PROCESS_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_PROCESS_STRUCT * 8)) + +struct ia_css_process_s { + /**< Indicate which kernels lead to this process being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + uint32_t size; /**< Size of this structure */ + ia_css_program_ID_t ID; /**< Referal ID to a specific program FW */ + uint32_t program_idx; /**< Program Index into the PG manifest */ +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocated to this process */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< State of the process FSM dependent on the parent FSM */ + ia_css_process_state_t state; + int16_t parent_offset; /**< Reference to the process group */ + /**< Array[dependency_count] of ID's of the cells that provide input */ + uint16_t cell_dependencies_offset; + /**< Array[terminal_dependency_count] of indices of connected terminals */ + uint16_t terminal_dependencies_offset; + /**< (internal) Memory allocation offset given to this process */ + vied_nci_resource_size_t int_mem_offset[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation offset given to this process */ + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation offset given to this process */ + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; + /**< Cells (VP, ACB) allocated for the process*/ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (internal) Memory ID; This is redundant, derived from cell_id */ + vied_nci_resource_id_t int_mem_id[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory ID */ + vied_nci_resource_id_t ext_mem_id[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Number of processes (mapped on cells) this process depends on */ + uint8_t cell_dependency_count; + /**< Number of terminals this process depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if (N_PADDING_UINT8_IN_PROCESS_STRUCT > 0) + uint8_t padding[N_PADDING_UINT8_IN_PROCESS_STRUCT]; +#endif /*(N_PADDING_UINT8_IN_PROCESS_STRUCT > 0)*/ +}; + +#endif /* __IA_CSS_PSYS_PROCESS_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c new file mode 100644 index 000000000000..ea406f229273 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal.c @@ -0,0 +1,604 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_dynamic_storage_class.h" +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_types.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_DYNAMIC_INLINE__ +#include "ia_css_psys_terminal_impl.h" +#endif /* __IA_CSS_PSYS_DYNAMIC_INLINE__ */ + +STORAGE_CLASS_INLINE void __terminal_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_frame_grid_param_section_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_GRID_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_grid_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_grid_desc_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_slice_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_slice_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_slice_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_slice_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_kernel_fragment_sequencer_command_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_fragment_param_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_section_desc_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_load_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_load_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_connect_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_connect_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS != + (CHAR_BIT * + sizeof(struct ia_css_program_desc_control_info_s))); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS != + (CHAR_BIT * + sizeof(ia_css_program_control_init_program_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_program_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_program_control_init_terminal_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_control_init_terminal_t) % + sizeof(uint64_t)); +} + +/* + * Functions not to inline + */ + +/* + * This source file is created with the intention of sharing and + * compiled for host and firmware. Since there is no native 64bit + * data type support for firmware this wouldn't compile for SP + * tile. The part of the file that is not compilable are marked + * with the following __VIED_CELL marker and this comment. Once we + * come up with a solution to address this issue this will be + * removed. + */ +#if !defined(__VIED_CELL) +size_t ia_css_sizeof_terminal( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + size_t size = 0; + uint16_t fragment_count = + ia_css_program_group_param_get_fragment_count(param); + + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_STRUCT_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_t))); + + COMPILATION_ERROR_IF( + 0 != sizeof(ia_css_data_terminal_t)%sizeof(uint64_t)); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_sizeof_terminal(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + if (ia_css_is_terminal_manifest_parameter_terminal(manifest)) { + const ia_css_param_terminal_manifest_t *param_term_man = + (const ia_css_param_terminal_manifest_t *)manifest; + if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN) { + size = ia_css_param_in_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count); + } else if (ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + size = ia_css_param_out_terminal_get_descriptor_size( + param_term_man->param_manifest_section_desc_count, + fragment_count); + } else { + assert(NULL == "Invalid parameter terminal type"); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_sizeof_terminal(): Invalid parameter terminal type:\n"); + verifjmpexit(0); + } + } else if (ia_css_is_terminal_manifest_data_terminal(manifest)) { + size += sizeof(ia_css_data_terminal_t); + size += fragment_count * sizeof(ia_css_fragment_descriptor_t); + } else if (ia_css_is_terminal_manifest_program_terminal(manifest)) { + ia_css_program_terminal_manifest_t *prog_term_man = + (ia_css_program_terminal_manifest_t *)manifest; + + size = ia_css_program_terminal_get_descriptor_size( + fragment_count, + prog_term_man-> + fragment_param_manifest_section_desc_count, + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count, + (fragment_count * prog_term_man-> + max_kernel_fragment_sequencer_command_desc)); + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest)) { + ia_css_spatial_param_terminal_manifest_t *spatial_param_term = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + size = ia_css_spatial_param_terminal_get_descriptor_size( + spatial_param_term-> + frame_grid_param_manifest_section_desc_count, + fragment_count); + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest)) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_term_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + + size = ia_css_program_control_init_terminal_get_descriptor_size( + progctrlinit_term_man); + } +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_sizeof_terminal invalid argument\n"); + } + return size; +} + +ia_css_terminal_t *ia_css_terminal_create( + void *raw_mem, + const ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_param_t *terminal_param, + ia_css_kernel_bitmap_t enable_bitmap) +{ + char *terminal_raw_ptr; + ia_css_terminal_t *terminal = NULL; + uint16_t fragment_count; + int i, j; + int retval = -1; + ia_css_program_group_param_t *param; + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(manifest %p, terminal_param %p): enter:\n", + manifest, terminal_param); + + param = ia_css_terminal_param_get_parent(terminal_param); + fragment_count = ia_css_program_group_param_get_fragment_count(param); + + verifexit(manifest != NULL); + verifexit(param != NULL); + + terminal_raw_ptr = (char *) raw_mem; + + terminal = (ia_css_terminal_t *) terminal_raw_ptr; + verifexit(terminal != NULL); + + terminal->size = (uint16_t)ia_css_sizeof_terminal(manifest, param); + verifexit(ia_css_terminal_set_type( + terminal, ia_css_terminal_manifest_get_type(manifest)) == 0); + + terminal->ID = ia_css_terminal_manifest_get_ID(manifest); + + verifexit(ia_css_terminal_set_buffer(terminal, + VIED_NULL) == 0); + + if (ia_css_is_terminal_manifest_data_terminal(manifest) == true) { + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + ia_css_kernel_bitmap_t intersection = + ia_css_kernel_bitmap_intersection(enable_bitmap, + ia_css_data_terminal_manifest_get_kernel_bitmap( + (const ia_css_data_terminal_manifest_t *)manifest)); + + verifexit(frame != NULL); + verifexit(ia_css_frame_set_buffer_state( + frame, IA_CSS_BUFFER_NULL) == 0); + verifexit(ia_css_is_kernel_bitmap_onehot(intersection) == + true); + + terminal_raw_ptr += sizeof(ia_css_data_terminal_t); + dterminal->fragment_descriptor_offset = + (uint16_t) (terminal_raw_ptr - (char *)terminal); + + dterminal->kernel_id = 0; + while (!ia_css_is_kernel_bitmap_empty(intersection)) { + intersection = ia_css_kernel_bitmap_shift( + intersection); + dterminal->kernel_id++; + } + assert(dterminal->kernel_id > 0); + dterminal->kernel_id -= 1; + + /* some terminal and fragment initialization */ + dterminal->frame_descriptor.frame_format_type = + terminal_param->frame_format_type; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + dterminal->frame_descriptor.dimension[i] = + terminal_param->dimensions[i]; + } + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] = + terminal_param->stride; + dterminal->frame_descriptor.bpp = terminal_param->bpp; + dterminal->frame_descriptor.bpe = terminal_param->bpe; + switch (dterminal->frame_descriptor.frame_format_type) { + case IA_CSS_DATA_FORMAT_UYVY: + case IA_CSS_DATA_FORMAT_YUYV: + case IA_CSS_DATA_FORMAT_Y800: + case IA_CSS_DATA_FORMAT_RGB565: + case IA_CSS_DATA_FORMAT_RGBA888: + case IA_CSS_DATA_FORMAT_BAYER_GRBG: + case IA_CSS_DATA_FORMAT_BAYER_RGGB: + case IA_CSS_DATA_FORMAT_BAYER_BGGR: + case IA_CSS_DATA_FORMAT_BAYER_GBRG: + case IA_CSS_DATA_FORMAT_RAW: + case IA_CSS_DATA_FORMAT_RAW_PACKED: + case IA_CSS_DATA_FORMAT_YYUVYY_VECTORIZED: + case IA_CSS_DATA_FORMAT_PAF: + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + case IA_CSS_DATA_FORMAT_NV12: + case IA_CSS_DATA_FORMAT_NV21: + case IA_CSS_DATA_FORMAT_NV16: + case IA_CSS_DATA_FORMAT_NV61: + dterminal->frame_descriptor.plane_count = 2; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV444: + case IA_CSS_DATA_FORMAT_RGB888: + case IA_CSS_DATA_FORMAT_YUV420_VECTORIZED: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + break; + case IA_CSS_DATA_FORMAT_YUV420: + dterminal->frame_descriptor.plane_count = 3; + dterminal->frame_descriptor.plane_offsets[0] = 0; + dterminal->frame_descriptor.plane_offsets[1] = + dterminal->frame_descriptor.plane_offsets[0] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION] * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]; + dterminal->frame_descriptor.plane_offsets[2] = + dterminal->frame_descriptor.plane_offsets[1] + + dterminal->frame_descriptor.stride[IA_CSS_COL_DIMENSION]/2 * + dterminal->frame_descriptor.dimension[IA_CSS_ROW_DIMENSION]/2; + break; + default: + /* Unset, resulting in potential terminal connect issues */ + dterminal->frame_descriptor.plane_count = 1; + dterminal->frame_descriptor.plane_offsets[0] = 0; + break; + } + /* + * Initial solution for single fragment initialization + * TODO: + * where to get the fragment description params from??? + */ + if (fragment_count > 0) { + ia_css_fragment_descriptor_t *fragment_descriptor = + (ia_css_fragment_descriptor_t *) + terminal_raw_ptr; + + fragment_descriptor->index[IA_CSS_COL_DIMENSION] = + terminal_param->index[IA_CSS_COL_DIMENSION]; + fragment_descriptor->index[IA_CSS_ROW_DIMENSION] = + terminal_param->index[IA_CSS_ROW_DIMENSION]; + fragment_descriptor->offset[0] = + terminal_param->offset; + for (i = 0; i < IA_CSS_N_DATA_DIMENSION; i++) { + fragment_descriptor->dimension[i] = + terminal_param->fragment_dimensions[i]; + } + } + /* end fragment stuff */ + } else if (ia_css_is_terminal_manifest_parameter_terminal(manifest) == + true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + uint16_t section_count = + ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; + size_t curr_offset = 0; + + pterminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + for (i = 0; i < section_count; i++) { + ia_css_param_section_desc_t *section = + ia_css_param_in_terminal_get_param_section_desc( + pterminal, i); + const ia_css_param_manifest_section_desc_t * + man_section = + ia_css_param_terminal_manifest_get_prm_sct_desc( + (const ia_css_param_terminal_manifest_t *)manifest, i); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + } else if (ia_css_is_terminal_manifest_program_terminal(manifest) == + true && + ia_css_terminal_manifest_get_type(manifest) == + IA_CSS_TERMINAL_TYPE_PROGRAM) { /* for program terminal */ + ia_css_program_terminal_t *prog_terminal = + (ia_css_program_terminal_t *)terminal; + const ia_css_program_terminal_manifest_t *prog_terminal_man = + (const ia_css_program_terminal_manifest_t *)manifest; + ia_css_kernel_fragment_sequencer_info_desc_t + *sequencer_info_desc_base = NULL; + uint16_t section_count = prog_terminal_man-> + fragment_param_manifest_section_desc_count; + uint16_t manifest_info_count = + prog_terminal_man-> + kernel_fragment_sequencer_info_manifest_info_count; + /* information needs to come from user or manifest once + * the size sizeof function is updated. + */ + uint16_t nof_command_objs = 0; + size_t curr_offset = 0; + + prog_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + prog_terminal->fragment_param_section_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (fragment_count * manifest_info_count * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + NOT_USED(sequencer_info_desc_base); + for (i = 0; i < fragment_count; i++) { + for (j = 0; j < section_count; j++) { + ia_css_fragment_param_section_desc_t *section = + ia_css_program_terminal_get_frgmnt_prm_sct_desc( + prog_terminal, i, j, section_count); + const ia_css_fragment_param_manifest_section_desc_t * + man_section = +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc + (prog_terminal_man, j); + + verifjmpexit(man_section != NULL); + verifjmpexit(section != NULL); + + section->mem_size = man_section->max_mem_size; + section->mem_offset = curr_offset; + curr_offset += man_section->max_mem_size; + } + + sequencer_info_desc_base = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_terminal, i, 0, + manifest_info_count); + + /* + * This offset cannot be initialized properly + * since the number of commands in every sequencer + * is not known at this point + */ + /*for (j = 0; j < manifest_info_count; j++) { + sequencer_info_desc_base[j]. + command_desc_offset = + prog_terminal-> + kernel_fragment_sequencer_info_desc_offset + + (manifest_info_count * + sizeof( + ia_css_kernel_fragment_sequencer_info_desc_t) + + (nof_command_objs * + sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t + )); + }*/ + } + } else if (ia_css_is_terminal_manifest_spatial_parameter_terminal( + manifest) == true) { + ia_css_spatial_param_terminal_t *spatial_param_terminal = + (ia_css_spatial_param_terminal_t *)terminal; + ia_css_spatial_param_terminal_manifest_t * + spatia_param_terminal_man = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + + /* Initialize the spatial terminal structure */ + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (fragment_count * sizeof(ia_css_fragment_grid_desc_t)); + spatial_param_terminal->kernel_id = + spatia_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_sliced_terminal(manifest) == + true) { + ia_css_sliced_param_terminal_t *sliced_param_terminal = + (ia_css_sliced_param_terminal_t *)terminal; + ia_css_sliced_param_terminal_manifest_t + *sliced_param_terminal_man = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + + /* Initialize the sliced terminal structure */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + sliced_param_terminal->kernel_id = + sliced_param_terminal_man->kernel_id; + } else if (ia_css_is_terminal_manifest_program_control_init_terminal( + manifest) == true) { + verifjmpexit(ia_css_program_control_init_terminal_init( + (ia_css_program_control_init_terminal_t *) + terminal, + (const ia_css_program_control_init_terminal_manifest_t *) + manifest) == 0); + } else { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed, not a data or param terminal. Returning (%i)\n", + EFAULT); + goto EXIT; + } + + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_create(): Created successfully terminal %p\n", + terminal); + + retval = 0; +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_terminal_create invalid argument\n"); + } + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_create failed (%i)\n", retval); + terminal = ia_css_terminal_destroy(terminal); + } + return terminal; +} + +ia_css_terminal_t *ia_css_terminal_destroy( + ia_css_terminal_t *terminal) +{ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_destroy(terminal %p): enter:\n", terminal); + return terminal; +} + +uint16_t ia_css_param_terminal_compute_section_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) /* Delete 2nd argument*/ +{ + uint16_t section_count = 0; + + NOT_USED(param); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_param_terminal_compute_section_count(): enter:\n"); + + verifexit(manifest != NULL); + section_count = ((const ia_css_param_terminal_manifest_t *)manifest)-> + param_manifest_section_desc_count; +EXIT: + if (NULL == manifest || NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_param_terminal_compute_section_count: invalid argument\n"); + } + return section_count; +} +#endif /* !defined(__VIED_CELL) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h new file mode 100644 index 000000000000..36fb0f1d469a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_impl.h @@ -0,0 +1,1868 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_IMPL_H +#define __IA_CSS_PSYS_TERMINAL_IMPL_H + +#include + +#include +#include + +#include +#include + +#include + + +#include +#include /* for verifexit, verifjmpexit */ +#include /* for COMPILATION_ERROR_IF */ +#include /* for NOT_USED */ +#include "ia_css_psys_terminal_private_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_dynamic_trace.h" +#include "ia_css_psys_manifest_types.h" +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_types.h" + +STORAGE_CLASS_INLINE int ia_css_data_terminal_print(const ia_css_terminal_t *terminal, + void *fid) { + + DECLARE_ERRVAL + int retval = -1; + int i; + ia_css_data_terminal_t *dterminal = (ia_css_data_terminal_t *)terminal; + uint16_t fragment_count = + ia_css_data_terminal_get_fragment_count(dterminal); + verifexitval(fragment_count != 0, EINVAL); + + retval = ia_css_frame_descriptor_print( + ia_css_data_terminal_get_frame_descriptor(dterminal), + fid); + verifexitval(retval == 0, EINVAL); + + retval = ia_css_frame_print( + ia_css_data_terminal_get_frame(dterminal), fid); + verifexitval(retval == 0, EINVAL); + + for (i = 0; i < (int)fragment_count; i++) { + retval = ia_css_fragment_descriptor_print( + ia_css_data_terminal_get_fragment_descriptor( + dterminal, i), fid); + verifexitval(retval == 0, EINVAL); + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_print( + const ia_css_terminal_t *terminal, + void *fid) +{ + DECLARE_ERRVAL + int retval = -1; + ia_css_terminal_type_t term_type = ia_css_terminal_get_type(terminal); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, INFO, + "ia_css_terminal_print(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "\tTerminal %p sizeof %d, typeof %d, parent %p\n", + terminal, + (int)ia_css_terminal_get_size(terminal), + (int)ia_css_terminal_get_type(terminal), + (void *)ia_css_terminal_get_parent(terminal)); + + switch (term_type) { + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + ia_css_program_control_init_terminal_print( + (ia_css_program_control_init_terminal_t *)terminal); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + ia_css_data_terminal_print(terminal, fid); + break; + default: + /* other terminal prints are currently not supported */ + break; + } + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_print failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_input( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + bool is_input = false; + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_input(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_IN: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + is_input = true; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: /* Fall through */ + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + is_input = false; + break; + default: + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input: Unknown terminal type (%d)\n", + terminal_type); + goto EXIT; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_input invalid argument\n"); + } + return is_input; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +size_t ia_css_terminal_get_size( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_size(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + size = terminal->size; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_type_t ia_css_terminal_get_type( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_type = terminal->terminal_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_type invalid argument\n"); + } + return terminal_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_type( + ia_css_terminal_t *terminal, + const ia_css_terminal_type_t terminal_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_type(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal->terminal_type = terminal_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_type failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_terminal_get_terminal_manifest_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + uint16_t terminal_manifest_index; + + terminal_manifest_index = 0xffff; + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + terminal_manifest_index = terminal->tm_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_manifest_index: invalid argument\n"); + } + return terminal_manifest_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_manifest_index( + ia_css_terminal_t *terminal, + const uint16_t terminal_manifest_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_manifest_index(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + terminal->tm_index = terminal_manifest_index; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_manifest_index: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_terminal_ID_t ia_css_terminal_get_ID( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_ID_t retval = IA_CSS_TERMINAL_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_ID(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + retval = terminal->ID; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_ID invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_kernel_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_kernel_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + retval = dterminal->kernel_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_kernel_id: invalid argument\n"); + retval = 0; + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_connection_type_t ia_css_data_terminal_get_connection_type( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_connection_type_t connection_type = IA_CSS_N_CONNECTION_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + connection_type = dterminal->connection_type; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_connection_type: invalid argument\n"); + } + return connection_type; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_get_link_id( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + uint8_t link_id = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + link_id = dterminal->link_id; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_link_id: invalid argument\n"); + } + return link_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_link_id( + ia_css_data_terminal_t *dterminal, + const uint8_t link_id) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_link_id(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + dterminal->link_id = link_id; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: invalid argument terminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_link_id: failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_data_terminal_set_connection_type( + ia_css_data_terminal_t *dterminal, + const ia_css_connection_type_t connection_type) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_set_connection_type(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + dterminal->connection_type = connection_type; + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type: invalid argument dterminal\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_set_connection_type failed (%i)\n", + retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_group_t *ia_css_terminal_get_parent( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + parent = (ia_css_process_group_t *) ((char *)terminal + + terminal->parent_offset); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_parent invalid argument\n"); + } + return parent; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_parent( + ia_css_terminal_t *terminal, + ia_css_process_group_t *parent) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_parent(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + terminal->parent_offset = (uint16_t) ((char *)parent - + (char *)terminal); + + retval = 0; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent invalid argument\n"); + } + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_parent failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_t *ia_css_data_terminal_get_frame( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_t *frame = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame = (ia_css_frame_t *)(&(dterminal->frame)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame invalid argument\n"); + } + return frame; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_frame_descriptor_t *ia_css_data_terminal_get_frame_descriptor( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_frame_descriptor_t *frame_descriptor = NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + verifexitval(dterminal != NULL, EFAULT); + + frame_descriptor = + (ia_css_frame_descriptor_t *)(&(dterminal->frame_descriptor)); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return frame_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_fragment_descriptor_t *ia_css_data_terminal_get_fragment_descriptor( + const ia_css_data_terminal_t *dterminal, + const unsigned int fragment_index) +{ + DECLARE_ERRVAL + ia_css_fragment_descriptor_t *fragment_descriptor = NULL; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_frame_descriptor(): enter:\n"); + + fragment_count = ia_css_data_terminal_get_fragment_count(dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(fragment_count != 0, EINVAL); + verifexitval(fragment_index < fragment_count, EINVAL); + + fragment_descriptor = (ia_css_fragment_descriptor_t *) + ((char *)dterminal + dterminal->fragment_descriptor_offset); + + fragment_descriptor += fragment_index; +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_frame_descriptor: invalid argument\n"); + } + return fragment_descriptor; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint16_t ia_css_data_terminal_get_fragment_count( + const ia_css_data_terminal_t *dterminal) +{ + DECLARE_ERRVAL + ia_css_process_group_t *parent; + uint16_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_get_fragment_count(): enter:\n"); + + parent = ia_css_terminal_get_parent((ia_css_terminal_t *)dterminal); + + verifexitval(dterminal != NULL, EFAULT); + verifexitval(parent != NULL, EFAULT); + + fragment_count = ia_css_process_group_get_fragment_count(parent); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_get_fragment_count: invalid argument\n"); + } + return fragment_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_parameter_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_data_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_data_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_data_terminal invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_program_control_init_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_program_control_init_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_program_control_init_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_spatial_parameter_terminal( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_spatial_parameter_terminal(): enter:\n"); + + verifexitval(terminal != NULL, EFAULT); + + /* will return an error value on error */ + terminal_type = ia_css_terminal_get_type(terminal); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_spatial_param_terminal: invalid argument\n"); + } + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT); +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_data_terminal_compute_plane_count( + const ia_css_terminal_manifest_t *manifest, + const ia_css_program_group_param_t *param) +{ + DECLARE_ERRVAL + uint8_t plane_count = 1; + + NOT_USED(manifest); + NOT_USED(param); + + verifexitval(manifest != NULL, EFAULT); + verifexitval(param != NULL, EFAULT); + /* TODO: Implementation Missing*/ + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_data_terminal_compute_plane_count(): enter:\n"); +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_data_terminal_compute_plane_count: invalid argument\n"); + } + return plane_count; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +vied_vaddress_t ia_css_terminal_get_buffer( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + vied_vaddress_t buffer = VIED_NULL; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + buffer = ia_css_frame_get_buffer(frame); + } else if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + buffer = param_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + buffer = program_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + buffer = program_ctrl_init_terminal->param_payload.buffer; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + buffer = spatial_terminal->param_payload.buffer; + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_buffer: invalid argument terminal\n"); + } + return buffer; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_buffer( + ia_css_terminal_t *terminal, + vied_vaddress_t buffer) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_buffer(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_buffer(frame, buffer); + verifexitval(retval == 0, EINVAL); + } else if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.buffer = buffer; + retval = 0; + } else { + return retval; + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_buffer failed (%i)\n", retval); + } + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_get_terminal_index( + const ia_css_terminal_t *terminal) +{ + DECLARE_ERRVAL + int terminal_index = -1; + + verifexitval(terminal != NULL, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_get_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal)) { + ia_css_frame_t *frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + + verifexitval(frame != NULL, EFAULT); + terminal_index = ia_css_frame_get_data_index(frame); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal)) { + const ia_css_param_terminal_t *param_terminal = + (const ia_css_param_terminal_t *)terminal; + + terminal_index = param_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_terminal(terminal)) { + const ia_css_program_terminal_t *program_terminal = + (const ia_css_program_terminal_t *)terminal; + + terminal_index = program_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal)) { + const ia_css_program_control_init_terminal_t *program_ctrl_init_terminal = + (const ia_css_program_control_init_terminal_t *)terminal; + + terminal_index = program_ctrl_init_terminal->param_payload.terminal_index; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal)) { + const ia_css_spatial_param_terminal_t *spatial_terminal = + (const ia_css_spatial_param_terminal_t *)terminal; + + terminal_index = spatial_terminal->param_payload.terminal_index; + } else { + verifjmpexit(0); + } + } +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_get_terminal_index: invalid argument\n"); + } + return terminal_index; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int ia_css_terminal_set_terminal_index( + ia_css_terminal_t *terminal, + unsigned int terminal_index) +{ + DECLARE_ERRVAL + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_terminal_set_terminal_index(): enter:\n"); + + if (ia_css_is_terminal_data_terminal(terminal) == true) { + /* Currently using Frames inside data terminal , + * TODO: start directly using data. + */ + ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + ia_css_frame_t *frame = + ia_css_data_terminal_get_frame(dterminal); + + verifexitval(frame != NULL, EFAULT); + retval = ia_css_frame_set_data_index(frame, terminal_index); + verifexitval(retval == 0, EINVAL); + } else { + if (ia_css_is_terminal_parameter_terminal(terminal) == true) { + ia_css_param_terminal_t *pterminal = + (ia_css_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_terminal(terminal) == true) { + ia_css_program_terminal_t *pterminal = + (ia_css_program_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_program_control_init_terminal(terminal) + == true) { + ia_css_program_control_init_terminal_t *pterminal = + (ia_css_program_control_init_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else if (ia_css_is_terminal_spatial_parameter_terminal(terminal) == + true) { + ia_css_spatial_param_terminal_t *pterminal = + (ia_css_spatial_param_terminal_t *)terminal; + + pterminal->param_payload.terminal_index = terminal_index; + retval = 0; + } else { + return retval; + } + } + + retval = 0; +EXIT: + if (!noerror()) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, ERROR, + "ia_css_terminal_set_terminal_index failed (%i)\n", + retval); + } + return retval; +} + +STORAGE_CLASS_INLINE bool ia_css_is_data_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + + const ia_css_data_terminal_t *dterminal = + (ia_css_data_terminal_t *)terminal; + const ia_css_data_terminal_manifest_t *dt_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; + const ia_css_frame_descriptor_t *frame_descriptor; + ia_css_frame_format_bitmap_t man_frame_format_bitmap; + ia_css_frame_format_bitmap_t proc_frame_format_bitmap; + uint16_t max_value[IA_CSS_N_DATA_DIMENSION]; + uint16_t min_value[IA_CSS_N_DATA_DIMENSION]; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_data_terminal_valid enter\n"); + + frame_descriptor = + ia_css_data_terminal_get_frame_descriptor(dterminal); + verifexitval(frame_descriptor != NULL, EFAULT); + man_frame_format_bitmap = + ia_css_data_terminal_manifest_get_frame_format_bitmap( + dt_manifest); + proc_frame_format_bitmap = + ia_css_frame_format_bit_mask( + frame_descriptor->frame_format_type); + /* + * TODO: Replace by 'validation of frame format type'. + * Currently frame format type is not correctly set by manifest, + * waiting for HSD 1804260604 + */ + if (man_frame_format_bitmap > 0) { + if ((man_frame_format_bitmap & + proc_frame_format_bitmap) == 0) { + uint32_t *bitmap_arr = + (uint32_t *)&man_frame_format_bitmap; + + NOT_USED(bitmap_arr); + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format type not defined in manifest\n"); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " man bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + bitmap_arr = (uint32_t *)&proc_frame_format_bitmap; + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + " proc bitmap_arr[]: %d,%d\n", + bitmap_arr[1], bitmap_arr[0]); + } + } else { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Frame format bitmap not defined in manifest\n"); + } + ia_css_data_terminal_manifest_get_min_size(dt_manifest, min_value); + /* + * TODO: Replace by validation of Minimal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if ((frame_descriptor->dimension[IA_CSS_COL_DIMENSION] < + min_value[IA_CSS_COL_DIMENSION])) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Minimal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] < + min_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Minimal frame row dimensions not set correctly (by manifest)\n"); + } + + ia_css_data_terminal_manifest_get_max_size(dt_manifest, max_value); + /* + * TODO: Replace by validation of Maximal frame column dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_COL_DIMENSION] > + max_value[IA_CSS_COL_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame column dimensions not set correctly (by manifest)\n"); + } + /* + * TODO: Replace by validation of Maximal frame row dimensions. + * Currently not correctly set by manifest yet, + * waiting for HSD 1804260604 + */ + if (frame_descriptor->dimension[IA_CSS_ROW_DIMENSION] > + max_value[IA_CSS_ROW_DIMENSION]) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "Maximal frame row dimensions not set correctly (by manifest)\n"); + } + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "min_value: [%d,%d]\n", + min_value[IA_CSS_COL_DIMENSION], + min_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "max_value: [%d,%d]\n", + max_value[IA_CSS_COL_DIMENSION], + max_value[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, VERBOSE, "frame dim: [%d,%d]\n", + frame_descriptor->dimension[IA_CSS_COL_DIMENSION], + frame_descriptor->dimension[IA_CSS_ROW_DIMENSION]); + /* + * TODO: Add validation of fragment dimensions. + * Currently not set by manifest yet, waiting for HSD 1804260604 + */ + NOT_USED(nof_fragments); + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_data_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } +} + +STORAGE_CLASS_INLINE void ia_css_program_terminal_seq_info_print( + const ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *man_seq_info_desc, + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc) +{ + NOT_USED(man_seq_info_desc); + NOT_USED(term_seq_info_desc); + + /* slice dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_dimension[IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_COL_DIMENSION]); + + /* slice count row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_slice_count: %d\n", + term_seq_info_desc-> + fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + max_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_slice_count: %d\n", + man_seq_info_desc-> + min_fragment_grid_slice_count[IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[IA_CSS_COL_DIMENSION] + ); + + /* decimation factor row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_point_decimation_factor: %d\n", + term_seq_info_desc-> + fragment_grid_point_decimation_factor[IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_point_decimation_factor: %d\n", + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_topleft_index: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_topleft_index: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "fragment_grid_overlay_pixel_dimension: %d\n", + term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "max_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, VERBOSE, + "min_fragment_grid_overlay_pixel_dimension: %d\n", + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); +} + +STORAGE_CLASS_INLINE bool ia_css_is_program_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_program_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + uint16_t frag_seq_info_count, seq_idx; + const ia_css_program_terminal_t *prog_term; + const ia_css_program_terminal_manifest_t *prog_term_man; + + prog_term = (const ia_css_program_terminal_t *)terminal; + prog_term_man = + (const ia_css_program_terminal_manifest_t *) + terminal_manifest; + frag_seq_info_count = + prog_term_man-> + kernel_fragment_sequencer_info_manifest_info_count; + + for (seq_idx = 0; seq_idx < frag_seq_info_count; seq_idx++) { + const ia_css_kernel_fragment_sequencer_info_desc_t + *term_seq_info_desc; + const + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + man_seq_info_desc; + + term_seq_info_desc = + ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + prog_term, frag_idx, seq_idx, + frag_seq_info_count); + verifexitval(term_seq_info_desc != NULL, EFAULT); + man_seq_info_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (prog_term_man, seq_idx); + verifexitval(man_seq_info_desc != NULL, EFAULT); + + ia_css_program_terminal_seq_info_print( + man_seq_info_desc, term_seq_info_desc); + /* slice dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION]); + + /* slice dimension row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + + /* slice count column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION]); + + /* slice count row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + + /* decimation factor column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION]); + + /* decimation factor row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + + /* index column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION]); + + /* index row */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION]); + + /* dimension column */ + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] > + man_seq_info_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + invalid_flag = invalid_flag || + (term_seq_info_desc-> + fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION] < + man_seq_info_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_program_terminal_valid() invalid argument\n"); + return false; + } + if (invalid_flag == true) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_program_terminal_valid(): validation failed\n"); + /* TODO: program terminal parameters not correctly defined, + * disable validation result until issues has been solved + */ + return true; + } + return (!invalid_flag); +} + +STORAGE_CLASS_INLINE bool ia_css_is_sliced_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest, + const uint16_t nof_fragments) +{ + DECLARE_ERRVAL + bool invalid_flag = false; + uint16_t frag_idx; + + uint16_t slice_idx, section_idx; + + const ia_css_sliced_param_terminal_t *sliced_term = + (const ia_css_sliced_param_terminal_t *)terminal; + const ia_css_sliced_param_terminal_manifest_t *sliced_term_man = + (const ia_css_sliced_param_terminal_manifest_t *) + terminal_manifest; + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_sliced_terminal_valid enter\n"); + + for (frag_idx = 0; frag_idx < nof_fragments; frag_idx++) { + const ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_term, frag_idx); + + verifexitval(fragment_slice_desc != NULL, EFAULT); + + for (slice_idx = 0; + slice_idx < fragment_slice_desc->slice_count; + slice_idx++) { + for (section_idx = 0; + section_idx < + sliced_term_man->sliced_param_section_count; + section_idx++) { + const + ia_css_sliced_param_manifest_section_desc_t * + slice_man_section_desc; + const ia_css_slice_param_section_desc_t * + slice_section_desc; + + slice_man_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_term_man, section_idx); + slice_section_desc = + ia_css_sliced_param_terminal_get_slice_param_section_desc( + sliced_term, frag_idx, + slice_idx, section_idx, + sliced_term_man-> + sliced_param_section_count); + verifexitval(slice_man_section_desc != NULL, EFAULT); + verifexitval(slice_section_desc != NULL, EFAULT); + + invalid_flag = invalid_flag || + (slice_section_desc->mem_size > + slice_man_section_desc->max_mem_size); + } + } + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_sliced_terminal_valid() invalid argument\n"); + return false; + } else { + return (!invalid_flag); + } + +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +bool ia_css_is_terminal_valid( + const ia_css_terminal_t *terminal, + const ia_css_terminal_manifest_t *terminal_manifest) +{ + DECLARE_ERRVAL + bool is_valid = false; + uint16_t nof_fragments; + ia_css_terminal_type_t terminal_type = IA_CSS_TERMINAL_INVALID_ID; + + verifexitval(NULL != terminal, EFAULT); + verifexitval(NULL != terminal_manifest, EFAULT); + + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, VERBOSE, + "ia_css_is_terminal_valid enter\n"); + + nof_fragments = ia_css_data_terminal_get_fragment_count( + (const ia_css_data_terminal_t *)terminal); + terminal_type = ia_css_terminal_get_type(terminal); + + switch (terminal_type) { + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + is_valid = ia_css_is_data_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + is_valid = ia_css_is_program_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + /* Nothing to be validated for cached and spatial + * parameters, return valid + */ + is_valid = true; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + is_valid = ia_css_is_sliced_terminal_valid(terminal, + terminal_manifest, nof_fragments); + break; + default: + /* Terminal type unknown, return invalid */ + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, WARNING, + "ia_css_is_terminal_valid() Terminal type %x unknown\n", + (int)terminal_type); + is_valid = false; + break; + } + +EXIT: + if (haserror(EFAULT)) { + IA_CSS_TRACE_0(PSYSAPI_DYNAMIC, ERROR, + "ia_css_is_terminal_valid() invalid argument\n"); + return false; + } + /* TODO: to be removed once all PGs pass validation */ + if (is_valid == false) { + IA_CSS_TRACE_1(PSYSAPI_DYNAMIC, INFO, + "ia_css_is_terminal_valid(): type: %d validation failed\n", + terminal_type); + } + return is_valid; +} + +/* ================= Program Control Init Terminal - START ================= */ +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +int +ia_css_program_control_init_terminal_init( + ia_css_program_control_init_terminal_t *terminal, + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + unsigned int base_load_sec; + unsigned int base_connect_sec; + unsigned int load_index = 0; + unsigned int connect_index = 0; + unsigned int load_section_count = 0; + unsigned int connect_section_count = 0; + + ia_css_program_control_init_manifest_program_desc_t *man_progs; + + verifjmpexit(terminal != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc(manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + terminal->program_count = manifest->program_count; + terminal->program_section_desc_offset = + sizeof(ia_css_program_control_init_terminal_t); + + base_load_sec = /* base_load_sec relative to first program */ + terminal->program_count * + sizeof(ia_css_program_control_init_program_desc_t); + + base_connect_sec = base_load_sec + + load_section_count * + sizeof(ia_css_program_control_init_load_section_desc_t); + + for (i = 0; i < terminal->program_count; i++) { + ia_css_program_control_init_program_desc_t *prog; + + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, i); + verifjmpexit(prog != NULL); + + prog->load_section_count = man_progs[i].load_section_count; + prog->connect_section_count = man_progs[i].connect_section_count; + + prog->load_section_desc_offset = + base_load_sec + + load_index * + sizeof(ia_css_program_control_init_load_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + prog->connect_section_desc_offset = + base_connect_sec + + connect_index * + sizeof(ia_css_program_control_init_connect_section_desc_t) - + i * sizeof(ia_css_program_control_init_program_desc_t); + + load_index += man_progs[i].load_section_count; + connect_index += man_progs[i].connect_section_count; + } + retval = 0; +EXIT: + return retval; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +unsigned int +ia_css_program_control_init_terminal_get_descriptor_size( + const ia_css_program_control_init_terminal_manifest_t *manifest) +{ + unsigned int i; + unsigned size = 0; + unsigned load_section_count = 0; + unsigned connect_section_count = 0; + ia_css_program_control_init_manifest_program_desc_t *man_progs; + verifjmpexit(manifest != NULL); + + man_progs = + ia_css_program_control_init_terminal_manifest_get_program_desc( + manifest, 0); + verifjmpexit(man_progs != NULL); + + for (i = 0; i < manifest->program_count; i++) { + load_section_count += man_progs[i].load_section_count; + connect_section_count += man_progs[i].connect_section_count; + } + + size = sizeof(ia_css_program_control_init_terminal_t) + + manifest->program_count * + sizeof(struct ia_css_program_control_init_program_desc_s) + + load_section_count * + sizeof(struct ia_css_program_control_init_load_section_desc_s) + + connect_section_count * + sizeof(struct ia_css_program_control_init_connect_section_desc_s); +EXIT: + return size; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_print( + const ia_css_program_control_init_terminal_t *terminal) +{ + unsigned int prog_idx, sec_idx; + ia_css_program_control_init_program_desc_t *prog; + ia_css_program_control_init_load_section_desc_t *load_sec; + ia_css_program_control_init_connect_section_desc_t *connect_sec; + + verifjmpexit(terminal != NULL); + + IA_CSS_TRACE_2(PSYSAPI_DYNAMIC, INFO, + "program_count: %d, payload_fragment_stride: %d\n", + terminal->program_count, + terminal->payload_fragment_stride); + + for (prog_idx = 0; prog_idx < terminal->program_count; prog_idx++) { + prog = ia_css_program_control_init_terminal_get_program_desc( + terminal, prog_idx); + verifjmpexit(prog != NULL); + + for (sec_idx = 0; sec_idx < prog->load_section_count; sec_idx++) { + load_sec = + ia_css_program_control_init_terminal_get_load_section_desc( + prog, sec_idx); + verifjmpexit(load_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "load_section>> device_descriptor_id: 0x%x, mem_offset: %d, " + "mem_size: %d, mode_bitmask: %x\n", + load_sec->device_descriptor_id.data, + load_sec->mem_offset, + load_sec->mem_size, + load_sec->mode_bitmask); + } + for (sec_idx = 0; sec_idx < prog->connect_section_count; sec_idx++) { + connect_sec = + ia_css_program_control_init_terminal_get_connect_section_desc( + prog, sec_idx); + verifjmpexit(connect_sec != NULL); + IA_CSS_TRACE_4(PSYSAPI_DYNAMIC, INFO, + "connect_section>> device_descriptor_id: 0x%x, " + "connect_terminal_ID: %d, connect_section_idx: %d, " + "mode_bitmask: %x\n", + connect_sec->device_descriptor_id.data, + connect_sec->connect_terminal_ID, + connect_sec->connect_section_idx, + connect_sec->mode_bitmask); + } + } +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_program_desc_t * +ia_css_program_control_init_terminal_get_program_desc( + const ia_css_program_control_init_terminal_t *prog_ctrl_init_terminal, + const unsigned int program_index) +{ + ia_css_program_control_init_program_desc_t *program_desc_base; + ia_css_program_control_init_program_desc_t *program_desc = NULL; + + verifjmpexit(prog_ctrl_init_terminal != NULL); + verifjmpexit(program_index < prog_ctrl_init_terminal->program_count); + + program_desc_base = (ia_css_program_control_init_program_desc_t *) + (((const char *)prog_ctrl_init_terminal) + + prog_ctrl_init_terminal->program_section_desc_offset); + program_desc = &(program_desc_base[program_index]); + +EXIT: + return program_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_process_id_t ia_css_program_control_init_terminal_get_process_id( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + ia_css_process_id_t process_id = 0; + + verifjmpexit(program_desc != NULL); + + process_id = program_desc->control_info.process_id; + +EXIT: + return process_id; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +uint8_t ia_css_program_control_init_terminal_get_num_done_events( + const ia_css_program_control_init_program_desc_t *program_desc) +{ + uint8_t num_done_events = 0; + + verifjmpexit(program_desc != NULL); + + num_done_events = program_desc->control_info.num_done_events; + +EXIT: + return num_done_events; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +void ia_css_program_control_init_terminal_set_control_info( + ia_css_program_control_init_program_desc_t *program_desc, + ia_css_process_id_t process_id, + uint8_t num_done_events) +{ + verifjmpexit(program_desc != NULL); + + program_desc->control_info.process_id = process_id; + program_desc->control_info.num_done_events = num_done_events; + +EXIT: + return; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_load_section_desc_t * +ia_css_program_control_init_terminal_get_load_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int load_section_index) +{ + ia_css_program_control_init_load_section_desc_t *load_section_desc_base; + ia_css_program_control_init_load_section_desc_t *load_section_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(load_section_index < program_desc->load_section_count); + + load_section_desc_base = (ia_css_program_control_init_load_section_desc_t *) + (((const char *)program_desc) + + program_desc->load_section_desc_offset); + load_section_desc = &(load_section_desc_base[load_section_index]); + +EXIT: + return load_section_desc; +} + +IA_CSS_PSYS_DYNAMIC_STORAGE_CLASS_C +ia_css_program_control_init_connect_section_desc_t * +ia_css_program_control_init_terminal_get_connect_section_desc( + const ia_css_program_control_init_program_desc_t *program_desc, + const unsigned int connect_section_index) +{ + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc_base; + ia_css_program_control_init_connect_section_desc_t *connect_sec_desc = NULL; + + verifjmpexit(program_desc != NULL); + verifjmpexit(connect_section_index < program_desc->connect_section_count); + + connect_sec_desc_base = + (ia_css_program_control_init_connect_section_desc_t *) + (((const char *)program_desc) + + program_desc->connect_section_desc_offset); + connect_sec_desc = &(connect_sec_desc_base[connect_section_index]); + +EXIT: + return connect_sec_desc; +} + +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h new file mode 100644 index 000000000000..68626561acb5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/dynamic/src/ia_css_psys_terminal_private_types.h @@ -0,0 +1,186 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H +#define __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H + +#include "ia_css_terminal_types.h" +#include "ia_css_program_group_data.h" +#include "ia_css_psys_manifest_types.h" + +#define N_UINT16_IN_DATA_TERMINAL_STRUCT 1 +#define N_UINT8_IN_DATA_TERMINAL_STRUCT 3 +#define N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3 + +/* ========================= Data terminal - START ========================= */ + +#define SIZE_OF_DATA_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_FRAME_DESCRIPTOR_STRUCT_BITS \ + + IA_CSS_FRAME_STRUCT_BITS \ + + IA_CSS_STREAM_STRUCT_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_CONNECTION_TYPE_BITS \ + + (N_UINT16_IN_DATA_TERMINAL_STRUCT * 16) \ + + (N_UINT8_IN_DATA_TERMINAL_STRUCT * 8) \ + + (N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT * 8)) + +/* + * The (data) terminal can be attached to a buffer or a stream. + * The stream interface is not necessarily limited to strict in-order access. + * For a stream the restriction is that contrary to a buffer it cannot be + * addressed directly, i.e. it behaves as a port, + * but it may support stream_pos() and/or seek() operations + */ +struct ia_css_data_terminal_s { + /**< Data terminal base */ + ia_css_terminal_t base; + /**< Properties of the data attached to the terminal */ + ia_css_frame_descriptor_t frame_descriptor; + /**< Data buffer handle attached to the terminal */ + ia_css_frame_t frame; + /**< (exclusive) Data stream handle attached to the terminal + * if the data is sourced over a device port + */ + ia_css_stream_t stream; + /**< Reserved */ + uint32_t reserved; + /**< Connection {buffer, stream, ...} */ + ia_css_connection_type_t connection_type; + /**< Array[fragment_count] (fragment_count being equal for all + * terminals in a subgraph) of fragment descriptors + */ + uint16_t fragment_descriptor_offset; + /**< Kernel id where this terminal is connected to */ + uint8_t kernel_id; + /**< Indicate to which subgraph this terminal belongs + * for common constraints + */ + uint8_t subgraph_id; + /* Link ID of the data terminal */ + uint8_t link_id; + /**< Padding for 64bit alignment */ + uint8_t padding[N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT]; +}; +/* ========================== Data terminal - END ========================== */ + +/* ================= Program Control Init Terminal - START ================= */ +#define SIZE_OF_PROG_CONTROL_INIT_LOAD_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + ) +struct ia_css_program_control_init_load_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; +}; + +#define MODE_BITMASK_MEMORY (1u << IA_CSS_CONNECTION_MEMORY) +#define MODE_BITMASK_MEMORY_STREAM (1u << IA_CSS_CONNECTION_MEMORY_STREAM) +#define MODE_BITMASK_STREAM (1u << IA_CSS_CONNECTION_STREAM) +#define MODE_BITMASK_DONT_CARE (MODE_BITMASK_MEMORY | MODE_BITMASK_MEMORY_STREAM | MODE_BITMASK_STREAM) + +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT (5) +#define SIZE_OF_PROG_CONTROL_INIT_CONNECT_SECTION_DESC_STRUCT_BITS \ + (DEVICE_DESCRIPTOR_ID_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (1 * IA_CSS_UINT16_T_BITS) \ + + IA_CSS_TERMINAL_ID_BITS \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT * \ + IA_CSS_UINT8_T_BITS) \ + ) +struct ia_css_program_control_init_connect_section_desc_s { + /* Device descriptor */ + device_descriptor_id_t device_descriptor_id; /* 32 bits */ + /* (Applicable to) mode bitmask */ + uint32_t mode_bitmask; + /* Connected terminal section (plane) index */ + uint16_t connect_section_idx; + /* Absolute referral ID for the connected terminal */ + ia_css_terminal_ID_t connect_terminal_ID; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_CONNECT_SECT_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO (1) +#define N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT (4) +#define SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS \ + (1 * IA_CSS_UINT16_T_BITS) \ + + (1 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO * IA_CSS_UINT8_T_BITS) + +#define SIZE_OF_PROG_CONTROL_INIT_PROG_DESC_STRUCT_BITS \ + (4 * IA_CSS_UINT16_T_BITS) \ + + (SIZE_OF_PROGRAM_DESC_CONTROL_INFO_STRUCT_BITS) \ + + (N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT * \ + IA_CSS_UINT8_T_BITS) + +struct ia_css_program_desc_control_info_s { + /* 12-bit process identifier */ + ia_css_process_id_t process_id; + /* number of done acks required to close the process */ + uint8_t num_done_events; + uint8_t padding[N_PADDING_UINT8_IN_PROG_DESC_CONTROL_INFO]; +}; + +struct ia_css_program_control_init_program_desc_s { + /* Number of load sections in this program */ + uint16_t load_section_count; + /* Points to variable size array of + * ia_css_program_control_init_load_section_desc_s + * in relation to its program_desc + */ + uint16_t load_section_desc_offset; + /* Number of connect sections in this program */ + uint16_t connect_section_count; + /* Points to variable size array of + * ia_css_program_control_init_connect_section_desc_s + * in relation to its program_desc + */ + uint16_t connect_section_desc_offset; + struct ia_css_program_desc_control_info_s control_info; + /* align to 64 bits */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_CTRL_INIT_PROGRAM_DESC_STRUCT]; +}; + +#define SIZE_OF_PROG_CONTROL_INIT_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + (1 * IA_CSS_UINT32_T_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + ) +struct ia_css_program_control_init_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Fragment stride for the payload, used to find the base + * of the payload for a given fragment + */ + uint32_t payload_fragment_stride; + /* Points to the variable array of + * ia_css_program_control_init_program_desc_s + */ + uint16_t program_section_desc_offset; + /* Number of instantiated programs in program group (processes) */ + uint16_t program_count; +}; +/* ================= Program Control Init Terminal - END ================= */ + +#endif /* __IA_CSS_PSYS_TERMINAL_PRIVATE_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h new file mode 100644 index 000000000000..4c8fd33b331c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi.h @@ -0,0 +1,23 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_H +#define __IA_CSS_PSYSAPI_H + +#include +#include +#include +#include + +#endif /* __IA_CSS_PSYSAPI_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h new file mode 100644 index 000000000000..5658a2988a08 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_fw_version.h @@ -0,0 +1,33 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_PSYSAPI_FW_VERSION_H +#define __IA_CSS_PSYSAPI_FW_VERSION_H + +/* PSYSAPI FW VERSION is taken from Makefile for FW tests */ +#define BXT_FW_RELEASE_VERSION PSYS_FIRMWARE_VERSION + +enum ia_css_process_group_protocol_version { + /* + * Legacy protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY = 0, + /* + * Persistent process group support protocol + */ + IA_CSS_PROCESS_GROUP_PROTOCOL_PPG, + IA_CSS_PROCESS_GROUP_N_PROTOCOLS +}; + +#endif /* __IA_CSS_PSYSAPI_FW_VERSION_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h new file mode 100644 index 000000000000..e35ec24c77b3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/interface/ia_css_psysapi_trace.h @@ -0,0 +1,78 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYSAPI_TRACE_H +#define __IA_CSS_PSYSAPI_TRACE_H + +#include "ia_css_trace.h" + +#define PSYSAPI_TRACE_LOG_LEVEL_OFF 0 +#define PSYSAPI_TRACE_LOG_LEVEL_NORMAL 1 +#define PSYSAPI_TRACE_LOG_LEVEL_DEBUG 2 + +/* PSYSAPI and all the submodules in PSYSAPI will have the default tracing + * level set to the PSYSAPI_TRACE_CONFIG level. If not defined in the + * psysapi.mk fill it will be set by default to no trace + * (PSYSAPI_TRACE_LOG_LEVEL_OFF) + */ +#define PSYSAPI_TRACE_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +#if !defined(PSYSAPI_TRACE_CONFIG) + #define PSYSAPI_TRACE_CONFIG PSYSAPI_TRACE_CONFIG_DEFAULT +#endif + +/* Module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_TRACE_CONFIG)) + /* Module specific trace setting */ + #if PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_TRACE_CONFIG == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_TRACE_CONFIG Tracing level defined" + #endif +#else + #error "PSYSAPI_TRACE_CONFIG not defined" +#endif + +/* Overriding submodules in PSYSAPI with a specific tracing level */ +/* #define PSYSAPI_DYNAMIC_TRACING_OVERRIDE TRACE_LOG_LEVEL_VERBOSE */ + +#endif /* __IA_CSS_PSYSAPI_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h new file mode 100644 index 000000000000..3fec775eb019 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_kernel_bitmap.h @@ -0,0 +1,223 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_KERNEL_BITMAP_H +#define __IA_CSS_KERNEL_BITMAP_H + +/*! \file */ + +/** @file ia_css_kernel_bitmap.h + * + * The types and operations to make logic decisions given kernel bitmaps + * "ia_css_kernel_bitmap_t" can be larger than native types + */ + +#include +#include "vied_nci_psys_resource_model.h" + +#define IA_CSS_KERNEL_BITMAP_BITS 64 +#define IA_CSS_KERNEL_BITMAP_ELEM_TYPE uint32_t +#define IA_CSS_KERNEL_BITMAP_ELEM_BITS \ + (sizeof(IA_CSS_KERNEL_BITMAP_ELEM_TYPE)*8) +#define IA_CSS_KERNEL_BITMAP_NOF_ELEMS \ + ((IA_CSS_KERNEL_BITMAP_BITS) / (IA_CSS_KERNEL_BITMAP_ELEM_BITS)) + +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +typedef struct { + IA_CSS_KERNEL_BITMAP_ELEM_TYPE data[IA_CSS_KERNEL_BITMAP_NOF_ELEMS]; +} ia_css_kernel_bitmap_elems_t; + +/** Users should make no assumption about the actual type of + * ia_css_kernel_bitmap_t. + * Users should use IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS in + * case they erroneously assume that this type is uint64_t and they + * cannot change their implementation. + */ +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +typedef ia_css_kernel_bitmap_elems_t ia_css_kernel_bitmap_t; +#else +typedef uint64_t ia_css_kernel_bitmap_t; +#if IA_CSS_KERNEL_BITMAP_BITS > 64 +#error IA_CSS_KERNEL_BITMAP_BITS > 64 not supported \ + with IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS +#endif +#endif + +/*! Print the bits of a kernel bitmap + + @return < 0 on error + */ +extern int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid); + +/*! Create an empty kernel bitmap + + @return bitmap = 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void); + +/*! Creates the complement of a kernel bitmap + * @param bitmap[in] kernel bitmap + * @return ~bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap); + +/*! Create the union of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 | bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Create the intersection of two kernel bitmaps + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 & bitmap1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps is empty + + @param bitmap[in] kernel bitmap + + @return bitmap == 0 + */ +extern bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the intersection of two kernel bitmaps is empty + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return (bitmap0 & bitmap1) == 0 + */ +extern bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the second kernel bitmap is a subset of the first (or equal) + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + Note: An empty set is always a subset, this function + returns true if bitmap 1 is empty + + @return (bitmap0 & bitmap1) == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Check if the kernel bitmaps are equal + + @param bitmap0[in] kernel bitmap 0 + @param bitmap1[in] kernel bitmap 1 + + @return bitmap0 == bitmap1 + */ +extern bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1); + +/*! Right shift kernel bitmap + + @param bitmap0[in] kernel bitmap 0 + + @return bitmap0 >> 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap); + +/*! Check if the kernel bitmaps contains only a single element + + @param bitmap[in] kernel bitmap + + @return weight(bitmap) == 1 + */ +extern bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap); + +/*! Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +extern int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create the union of a kernel bitmap with a onehot bitmap + * with a bit set at index + + @return bitmap[index] |= 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Creates kernel bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value); + +/*! Converts an ia_css_kernel_bitmap_t type to uint64_t. Note that if + * ia_css_kernel_bitmap_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value +*/ +extern uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value); + +/*! Creates a kernel bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Set a previously clear field of a kernel bitmap at index + + @return if bitmap[index] == 0, bitmap[index] -> 1, else 0 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index); + +/*! Create a onehot kernel bitmap with a bit set at index + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index); + +/*! Create a random bitmap + + @return bitmap[index] = 1 + */ +extern ia_css_kernel_bitmap_t ia_css_kernel_ran_bitmap(void); + +#endif /* __IA_CSS_KERNEL_BITMAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h new file mode 100644 index 000000000000..1ba29c7ab77e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/interface/ia_css_psys_kernel_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_KERNEL_TRACE_H +#define __IA_CSS_PSYS_KERNEL_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + #define PSYS_KERNEL_TRACE_LEVEL_CONFIG \ + PSYS_KERNEL_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_KERNEL_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_KERNEL_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_KERNEL_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_KERNEL_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_KERNEL_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_KERNEL_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_KERNEL_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_KERNEL_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_KERNEL_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_KERNEL_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_KERNEL_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c new file mode 100644 index 000000000000..61ea1fb290a6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/kernel/src/ia_css_kernel_bitmap.c @@ -0,0 +1,414 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include "ia_css_psys_kernel_trace.h" + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap); + +bool ia_css_is_kernel_bitmap_intersection_empty( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_intersection_empty(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_empty(intersection); +} + +bool ia_css_is_kernel_bitmap_empty( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + bool is_empty = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_empty(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } +#else + NOT_USED(i); + is_empty = (bitmap == 0); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_empty; +} + +bool ia_css_is_kernel_bitmap_equal( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_equal(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } +#else + NOT_USED(i); + is_equal = (bitmap0 == bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return is_equal; +} + +bool ia_css_is_kernel_bitmap_onehot( + const ia_css_kernel_bitmap_t bitmap) +{ + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_onehot(): enter:\n"); + return ia_css_kernel_bitmap_compute_weight(bitmap) == 1; +} + +bool ia_css_is_kernel_bitmap_subset( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + ia_css_kernel_bitmap_t intersection; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_subset(): enter:\n"); + + intersection = ia_css_kernel_bitmap_intersection(bitmap0, bitmap1); + return ia_css_is_kernel_bitmap_equal(intersection, bitmap1); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_clear(void) +{ + unsigned int i; + ia_css_kernel_bitmap_t bitmap; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_clear(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } +#else + NOT_USED(i); + bitmap = 0; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_complement( + const ia_css_kernel_bitmap_t bitmap) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_complement(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } +#else + NOT_USED(i); + result = ~bitmap; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_union( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_union(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 | bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_intersection( + const ia_css_kernel_bitmap_t bitmap0, + const ia_css_kernel_bitmap_t bitmap1) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } +#else + NOT_USED(i); + result = (bitmap0 & bitmap1); +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return result; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set(): enter:\n"); + + bit_mask = ia_css_kernel_bit_mask(index); + return ia_css_kernel_bitmap_union(bitmap, bit_mask); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_create_from_uint64(): enter:\n"); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + result = ia_css_kernel_bitmap_clear(); + for (i = 0; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_KERNEL_BITMAP_ELEM_TYPE) + (value >> (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } +#if IA_CSS_KERNEL_BITMAP_BITS < 64 + if ((value >> IA_CSS_KERNEL_BITMAP_BITS) != 0) { + IA_CSS_TRACE_0(PSYSAPI_KERNEL, ERROR, + "ia_css_kernel_bitmap_create_from_uint64(): " + "kernel bitmap is not wide enough to encode value\n"); + assert(0); + } +#endif +#else + NOT_USED(i); + result = value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ + return result; +} + +uint64_t ia_css_kernel_bitmap_to_uint64( + const ia_css_kernel_bitmap_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_KERNEL_BITMAP_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = 0; i < nof_elems_bits64; i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_KERNEL_BITMAP_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_KERNEL_BITMAP_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +#else + (void)i; + (void)res; + (void)nof_elems_bits64; + return (uint64_t)value; +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_unset( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t result; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_unset(): enter:\n"); + + result = ia_css_kernel_bit_mask(index); + result = ia_css_kernel_bitmap_complement(result); + return ia_css_kernel_bitmap_intersection(bitmap, result); +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_set_unique( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + ia_css_kernel_bitmap_t ret; + ia_css_kernel_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_set_unique(): enter:\n"); + + ret = ia_css_kernel_bitmap_clear(); + bit_mask = ia_css_kernel_bit_mask(index); + + if (ia_css_is_kernel_bitmap_intersection_empty(bitmap, bit_mask) + && !ia_css_is_kernel_bitmap_empty(bit_mask)) { + ret = ia_css_kernel_bitmap_union(bitmap, bit_mask); + } + return ret; +} + +ia_css_kernel_bitmap_t ia_css_kernel_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_kernel_bitmap_t bit_mask = ia_css_kernel_bitmap_clear(); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bit_mask(): enter:\n"); +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + if (index < IA_CSS_KERNEL_BITMAP_BITS) { + bit_mask = (ia_css_kernel_bitmap_t)1 << index; + } +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return bit_mask; +} + + +static int ia_css_kernel_bitmap_compute_weight( + const ia_css_kernel_bitmap_t bitmap) +{ + ia_css_kernel_bitmap_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); i++) { + weight += ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + } + + return weight; +} + +int ia_css_is_kernel_bitmap_set( + const ia_css_kernel_bitmap_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_is_kernel_bitmap_set(): enter:\n"); + + /* Assert disabled for staging, because some PGs do not satisfy this condition */ + /* assert(index < IA_CSS_KERNEL_BITMAP_BITS); */ + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + elem_index = index / IA_CSS_KERNEL_BITMAP_ELEM_BITS; + elem_bit_index = index % IA_CSS_KERNEL_BITMAP_ELEM_BITS; + assert(elem_index < IA_CSS_KERNEL_BITMAP_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +#else + NOT_USED(elem_index); + NOT_USED(elem_bit_index); + return (((bitmap >> index) & 0x1) == 1); +#endif /* IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS */ +} + +ia_css_kernel_bitmap_t ia_css_kernel_bitmap_shift( + const ia_css_kernel_bitmap_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, VERBOSE, + "ia_css_kernel_bitmap_shift(): enter:\n"); + + loc_bitmap = bitmap; + +#ifndef IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + for (i = IA_CSS_KERNEL_BITMAP_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_KERNEL_BITMAP_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } +#else + NOT_USED(i); + NOT_USED(lsb_current_elem); + NOT_USED(lsb_previous_elem); + loc_bitmap >>= 1; +#endif /* IA_CSS_KERNEL_BITMAP_USE_ELEMS */ + return loc_bitmap; +} + +int ia_css_kernel_bitmap_print( + const ia_css_kernel_bitmap_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_kernel_bitmap_t loc_bitmap; + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, + "ia_css_kernel_bitmap_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_KERNEL_BITMAP_BITS) && + !ia_css_is_kernel_bitmap_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_kernel_bitmap_set(loc_bitmap, 0); + loc_bitmap = ia_css_kernel_bitmap_shift(loc_bitmap); + IA_CSS_TRACE_2(PSYSAPI_KERNEL, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(PSYSAPI_KERNEL, INFO, "}\n"); + + retval = 0; + return retval; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h new file mode 100644 index 000000000000..8295f3892f86 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.h @@ -0,0 +1,296 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_H + +/*! \file */ + +/** @file ia_css_program_group_param.h + * + * Define the methods on the program group parameter object that are not part + * of a single interface + */ +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +#include + +/*! Get the stored size of the program group parameter object + + @param param[in] program group parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *param); + +/*! initialize program_group_param + + @param blob[in] program group parameter object + @param program_count[in] number of terminals. + @param terminal_count[in] number of terminals. + @param fragment_count[in] number of terminals. + + @return 0 if success, else failure. + */ +extern int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types); +/*! Get the program parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] program parameter index + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the terminal parameter object from a program group parameter object + + @param program_group_param[in] program group parameter object + @param i[in] terminal parameter index + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i); + +/*! Get the fragment count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return fragment count, 0 on error + */ +extern uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param); + +/*! Get the program count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return program count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param); + +/*! Get the terminal count from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return terminal count, 0 on error + */ +extern uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param); + +/*! Set the protocol version in a program group parameter object + + @param program_group_param[in] program group parameter object + @param protocol_version[in] protocol version + + @return nonzero on error +*/ +extern int +ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version); + +/*! Get the protocol version from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return protocol version +*/ +extern uint8_t +ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param); + +/*! Set the kernel enable bitmap from a program group parameter object + + @param param[in] program group parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program group parameter object + + @param program_group_param[in] program group parameter object + + @return kernel enable bitmap, 0 on error +*/ +extern ia_css_kernel_bitmap_t +ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param); + +/*! Get the stored size of the program parameter object + + @param param[in] program parameter object + + @return size, 0 on error + */ +extern size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param); + +/*! Set the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + @param bitmap[in] kernel enable bitmap + + @return non-zero on error + */ +extern int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the kernel enable bitmap from a program parameter object + + @param program_param[in] program parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *param); + +/*! Get the stored size of the terminal parameter object + + @param param[in] terminal parameter object + + @return size, 0 on error + */ +extern size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param); + +/*! Get the kernel enable bitmap from a terminal parameter object + + @param terminal_param[in] terminal parameter object + + Note: This function returns in fact the kernel enable of the program group + parameters + + @return kernel enable bitmap, 0 on error + */ +extern ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param); + +/*! Get the parent object for this terminal param. + + @param terminal_param[in] terminal parameter object + + @return parent program group param object + */ +extern ia_css_program_group_param_t *ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param); + +/*! Get the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return data format type (ia_css_data_format_type_t) + */ +extern ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *terminal_param); + +/*! Set the data format type associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param data_format_type[in] data format type + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *terminal_param, + const ia_css_frame_format_type_t data_format_type); + +/*! Get bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return bits per pixel + */ +extern uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *terminal_param); + +/*! Set bits per pixel on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param bpp[in] bits per pixel + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *terminal_param, + const uint8_t bpp); + +/*! Get dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[out] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *terminal_param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Set dimensions on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param dimensions[in] dimension array + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *terminal_param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]); + +/*! Get stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + + @return stride of the frame to be attached. + */ +extern uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *terminal_param); + +/*! Set stride on the frame associated with the terminal. + + @param terminal_param[in] terminal parameter object + @param stride[in] stride + + @return non-zero on error. + */ +extern int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *terminal_param, + const uint32_t stride); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h new file mode 100644 index 000000000000..7821f8147a1a --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param.sim.h @@ -0,0 +1,153 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H + +/*! \file */ + +/** @file ia_css_program_group_param.sim.h + * + * Define the methods on the program group parameter object: Simulation only + */ +#include + +#include + +#include + +/* Simulation */ + +/*! Create a program group parameter object from specification + + @param specification[in] specification (index) + @param manifest[in] program group manifest + + @return NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_create( + const unsigned int specification, + const ia_css_program_group_manifest_t *manifest); + +/*! Destroy the program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_destroy( + ia_css_program_group_param_t *param); + +/*! Compute the size of storage required for allocating + * the program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return 0 on error + */ +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Allocate (the store of) a program group parameter object + + @param program_count[in] Number of programs in the process group + @param terminal_count[in] Number of terminals on the process group + @param fragment_count[in] Number of fragments on the terminals of + the process group + + @return program group parameter pointer, NULL on error + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count); + +/*! Free (the store of) a program group parameter object + + @param program_group_param[in] program group parameter object + + @return NULL + */ +extern ia_css_program_group_param_t *ia_css_program_group_param_free( + ia_css_program_group_param_t *param); + +/*! Print the program group parameter object to file/stream + + @param param[in] program group parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid); + +/*! Allocate (the store of) a program parameter object + + @return program parameter pointer, NULL on error + */ +extern ia_css_program_param_t *ia_css_program_param_alloc(void); + +/*! Free (the store of) a program parameter object + + @param param[in] program parameter object + + @return NULL + */ +extern ia_css_program_param_t *ia_css_program_param_free( + ia_css_program_param_t *param); + +/*! Print the program parameter object to file/stream + + @param param[in] program parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid); + +/*! Allocate (the store of) a terminal parameter object + + @return terminal parameter pointer, NULL on error + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_alloc(void); + +/*! Free (the store of) a terminal parameter object + + @param param[in] terminal parameter object + + @return NULL + */ +extern ia_css_terminal_param_t *ia_css_terminal_param_free( + ia_css_terminal_param_t *param); + +/*! Print the terminal parameter object to file/stream + + @param param[in] terminal parameter object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid); + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h new file mode 100644 index 000000000000..d61b94cfb6bc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_program_group_param_types.h @@ -0,0 +1,67 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H + +/*! \file */ + +/** @file ia_css_program_group_param_types.h + * + * Define the parameter objects that are necessary to create the process + * groups i.e. enable parameters and parameters to set-up frame descriptors + */ + +#include +#include /* ia_css_kernel_bitmap_t */ +#include + +#include +/*! make this public so that driver can populate, + * size, bpp, dimensions for all terminals. + * + * Currently one API is provided to get frame_format_type. + * + * frame_format_type is set during ia_css_terminal_param_init(). + * Value for that is const and binary specific. + */ +struct ia_css_terminal_param_s { + uint32_t size; /**< Size of this structure */ + /**< Indicates if this is a generic type or inbuild + * with variable size descriptor + */ + ia_css_frame_format_type_t frame_format_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION];/**< Logical dimensions */ + /**< Mapping to the index field of the terminal descriptor */ + uint16_t index[IA_CSS_N_DATA_DIMENSION]; + /**< Logical fragment dimension, + * TODO: fragment dimensions can be different per fragment + */ + uint16_t fragment_dimensions[IA_CSS_N_DATA_DIMENSION]; + uint32_t stride;/**< Stride of a frame */ + uint16_t offset;/**< Offset in bytes to first fragment */ + uint8_t bpp; /**< Bits per pixel */ + uint8_t bpe; /**< Bits per element */ +}; + +typedef struct ia_css_program_group_param_s ia_css_program_group_param_t; +typedef struct ia_css_program_param_s ia_css_program_param_t; +typedef struct ia_css_terminal_param_s ia_css_terminal_param_t; + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_TYPES_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h new file mode 100644 index 000000000000..f59dfbf165e4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/interface/ia_css_psys_param_trace.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PARAM_TRACE_H +#define __IA_CSS_PSYS_PARAM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + #define PSYS_PARAM_TRACE_LEVEL_CONFIG PSYS_PARAM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_PARAM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_PARAM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_PARAM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_PARAM_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_PARAM_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_PARAM_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_PARAM_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_PARAM_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_PARAM_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_PARAM_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_PARAM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c new file mode 100644 index 000000000000..7cffb012ab39 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param.c @@ -0,0 +1,772 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ia_css_psys_param_trace.h" + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type); + +static int +ia_css_program_param_init(ia_css_program_param_t *program_param, + int32_t offset); + +size_t ia_css_sizeof_program_group_param( + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_sizeof_program_group_param(): enter:\n"); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(fragment_count != 0); + + size += sizeof(ia_css_program_group_param_t); + size += program_count * fragment_count * sizeof(ia_css_program_param_t); + size += terminal_count * sizeof(ia_css_terminal_param_t); +EXIT: + if (0 == program_count || 0 == terminal_count || 0 == fragment_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_sizeof_program_group_param invalid argument\n"); + } + return size; +} + +size_t ia_css_program_group_param_get_size( + const ia_css_program_group_param_t *program_group_param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_size(): enter:\n"); + + if (program_group_param != NULL) { + size = program_group_param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_size invalid argument\n"); + } + return size; +} + +size_t ia_css_program_param_get_size( + const ia_css_program_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_size invalid argument\n"); + } + return size; +} + +ia_css_program_param_t *ia_css_program_group_param_get_program_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_program_param_t *program_param = NULL; + ia_css_program_param_t *program_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_program_count(param); + + verifexit(i < program_count); + + program_param_base = (ia_css_program_param_t *) + (((char *)param) + param->program_param_offset); + + program_param = &program_param_base[i]; + +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_param invalid argument\n"); + } + return program_param; +} + +size_t ia_css_terminal_param_get_size( + const ia_css_terminal_param_t *param) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_size(): enter:\n"); + + if (param != NULL) { + size = param->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_size invalid argument\n"); + } + + return size; +} + +ia_css_terminal_param_t *ia_css_program_group_param_get_terminal_param( + const ia_css_program_group_param_t *param, + const int i) +{ + ia_css_terminal_param_t *terminal_param = NULL; + ia_css_terminal_param_t *terminal_param_base; + int program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_param(): enter:\n"); + + verifexit(param != NULL); + + program_count = + (int)ia_css_program_group_param_get_terminal_count(param); + + verifexit(i < program_count); + + terminal_param_base = (ia_css_terminal_param_t *) + (((char *)param) + param->terminal_param_offset); + terminal_param = &terminal_param_base[i]; +EXIT: + if (NULL == param || i >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_param invalid argument\n"); + } + return terminal_param; +} + +uint8_t ia_css_program_group_param_get_program_count( + const ia_css_program_group_param_t *param) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_program_count(): enter:\n"); + + if (param != NULL) { + program_count = param->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_program_count invalid argument\n"); + } + return program_count; +} + +uint8_t ia_css_program_group_param_get_terminal_count( + const ia_css_program_group_param_t *param) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_terminal_count(): enter:\n"); + + if (param != NULL) { + terminal_count = param->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +uint16_t ia_css_program_group_param_get_fragment_count( + const ia_css_program_group_param_t *param) +{ + uint8_t fragment_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_fragment_count(): enter:\n"); + + if (param != NULL) { + fragment_count = (uint8_t)param->fragment_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_fragment_count invalid argument\n"); + } + return fragment_count; +} + +int ia_css_program_group_param_set_protocol_version( + ia_css_program_group_param_t *param, + uint8_t protocol_version) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_protocol_version(): enter:\n"); + + if (param != NULL) { + param->protocol_version = protocol_version; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_protocol_version failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_program_group_param_get_protocol_version( + const ia_css_program_group_param_t *param) +{ + uint8_t protocol_version = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_protocol_version(): enter:\n"); + + if (param != NULL) { + protocol_version = param->protocol_version; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_protocol_version invalid argument\n"); + } + return protocol_version; +} + +int ia_css_program_group_param_set_kernel_enable_bitmap( + ia_css_program_group_param_t *param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_set_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_param_get_kernel_enable_bitmap( + const ia_css_program_group_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_group_param_get_kernel_enable_bitmap(): enter:\n"); + + if (param != NULL) { + bitmap = param->kernel_enable_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_group_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_param_set_kernel_enable_bitmap( + ia_css_program_param_t *program_param, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_set_kernel_enable_bitmap(): enter:\n"); + + if (program_param != NULL) { + program_param->kernel_enable_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_set_kernel_enable_bitmap failed (%i)\n", + retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_param_get_kernel_enable_bitmap( + const ia_css_program_param_t *program_param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_program_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(program_param != NULL); + verifexit(program_param->parent_offset != 0); + + base = (char *)((char *)program_param + program_param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == program_param || 0 == program_param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_program_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_kernel_bitmap_t ia_css_terminal_param_get_kernel_enable_bitmap( + const ia_css_terminal_param_t *param) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_kernel_enable_bitmap(): enter:\n"); + + verifexit(param != NULL); + verifexit(param->parent_offset != 0); + + base = (char *)((char *)param + param->parent_offset); + bitmap = ((ia_css_program_group_param_t *)base)->kernel_enable_bitmap; +EXIT: + if (NULL == param || 0 == param->parent_offset) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_kernel_enable_bitmap invalid argument\n"); + } + return bitmap; +} + +ia_css_frame_format_type_t ia_css_terminal_param_get_frame_format_type( + const ia_css_terminal_param_t *param) +{ + ia_css_frame_format_type_t ft = IA_CSS_N_FRAME_FORMAT_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_frame_format_type(): enter:\n"); + + verifexit(param != NULL); + + ft = param->frame_format_type; +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_frame_format_type invalid argument\n"); + } + return ft; +} + +int ia_css_terminal_param_set_frame_format_type( + ia_css_terminal_param_t *param, + const ia_css_frame_format_type_t data_format_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_frame_format_type(): enter:\n"); + + if (param != NULL) { + param->frame_format_type = data_format_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_frame_format_type failed (%i)\n", + retval); + } + return retval; +} + +uint8_t ia_css_terminal_param_get_bpp( + const ia_css_terminal_param_t *param) +{ + uint8_t bpp = 0; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_bpp(): enter:\n"); + + verifexit(param != NULL); + + bpp = param->bpp; + +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_bpp invalid argument\n"); + } + return bpp; +} + +int ia_css_terminal_param_set_bpp( + ia_css_terminal_param_t *param, + const uint8_t bpp) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_bpp(): enter:\n"); + + if (param != NULL) { + param->bpp = bpp; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_bpp failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_get_dimensions( + const ia_css_terminal_param_t *param, + uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_dimensions(): enter:\n"); + + if (param != NULL) { + dimensions[IA_CSS_COL_DIMENSION] = + param->dimensions[IA_CSS_COL_DIMENSION]; + dimensions[IA_CSS_ROW_DIMENSION] = + param->dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_get_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_dimensions( + ia_css_terminal_param_t *param, + const uint16_t dimensions[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_set_dimensions(): enter:\n"); + + if (param != NULL) { + param->dimensions[IA_CSS_COL_DIMENSION] = + dimensions[IA_CSS_COL_DIMENSION]; + param->dimensions[IA_CSS_ROW_DIMENSION] = + dimensions[IA_CSS_ROW_DIMENSION]; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_set_dimensions failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_set_stride( + ia_css_terminal_param_t *param, + const uint32_t stride) +{ + int retval = -1; + + verifexit(param != NULL); + param->stride = stride; + retval = 0; + +EXIT: + return retval; +} + +uint32_t ia_css_terminal_param_get_stride( + const ia_css_terminal_param_t *param) +{ + uint32_t stride = 0; + + verifexit(param != NULL); + stride = param->stride; + +EXIT: + return stride; +} + + +static int ia_css_program_param_init( + ia_css_program_param_t *program_param, + int32_t offset) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_param_t))); + verifexit(program_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_init(): enter:\n"); + + program_param->size = sizeof(ia_css_program_param_t); + /* parent is at negative offset from current program.*/ + program_param->parent_offset = -offset; + /*TODO: Kernel_bitmap setting. ?*/ + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_init failed (%i)\n", retval); + } + return retval; +} + +static int +ia_css_terminal_param_init(ia_css_terminal_param_t *terminal_param, + uint32_t offset, + enum ia_css_frame_format_type frame_format_type) +{ + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_terminal_param_t))); + verifexit(terminal_param != NULL); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_init(): enter:\n"); + + terminal_param->size = sizeof(ia_css_terminal_param_t); + /* parent is at negative offset from current program.*/ + terminal_param->parent_offset = -((int32_t)offset); + /*TODO: Kernel_bitmap setting. ?*/ + terminal_param->frame_format_type = frame_format_type; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_init failed (%i)\n", retval); + } + return retval; +} + +ia_css_program_group_param_t * +ia_css_terminal_param_get_parent( + const ia_css_terminal_param_t *param) +{ + ia_css_program_group_param_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, VERBOSE, + "ia_css_terminal_param_get_parent(): enter:\n"); + + verifexit(NULL != param); + + base = (char *)((char *)param + param->parent_offset); + + parent = (ia_css_program_group_param_t *)(base); +EXIT: + if (NULL == param) { + IA_CSS_TRACE_0(PSYSAPI_PARAM, WARNING, + "ia_css_terminal_param_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_group_param_init( + ia_css_program_group_param_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint16_t fragment_count, + const enum ia_css_frame_format_type *frame_format_types) +{ + int i = 0; + char *param_base; + uint32_t offset; + int retval = -1; + + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_param_t))); + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_init(): enter:\n"); + + assert(blob != 0); + + verifexit(blob != NULL); + verifexit(frame_format_types != NULL); + + blob->program_count = program_count; + blob->fragment_count = fragment_count; + blob->terminal_count = terminal_count; + blob->program_param_offset = sizeof(ia_css_program_group_param_t); + blob->terminal_param_offset = blob->program_param_offset + + sizeof(ia_css_program_param_t) * program_count; + + param_base = (char *)((char *)blob + blob->program_param_offset); + offset = blob->program_param_offset; + + for (i = 0; i < program_count; i++) { + ia_css_program_param_init( + (ia_css_program_param_t *)param_base, offset); + offset += sizeof(ia_css_program_param_t); + param_base += sizeof(ia_css_program_param_t); + } + + param_base = (char *)((char *)blob + blob->terminal_param_offset); + offset = blob->terminal_param_offset; + + for (i = 0; i < terminal_count; i++) { + ia_css_terminal_param_init( + (ia_css_terminal_param_t *)param_base, + offset, + frame_format_types[i]); + + offset += sizeof(ia_css_terminal_param_t); + param_base += sizeof(ia_css_terminal_param_t); + } + + /* + * For now, set legacy flow by default. This can be removed as soon + * as all hosts/drivers explicitly set the protocol version. + */ + blob->protocol_version = IA_CSS_PROCESS_GROUP_PROTOCOL_LEGACY; + + blob->size = (uint32_t)ia_css_sizeof_program_group_param(program_count, + terminal_count, + fragment_count); + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_init failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_group_param_print( + const ia_css_program_group_param_t *param, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_group_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(program_group_param) = %d\n", + (int)ia_css_program_group_param_get_size(param)); + + program_count = ia_css_program_group_param_get_program_count(param); + terminal_count = ia_css_program_group_param_get_terminal_count(param); + + bitmap = ia_css_program_group_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "%d program params\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_param_t *program_param = + ia_css_program_group_param_get_program_param(param, i); + + retval = ia_css_program_param_print(program_param, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "%d terminal params\n", + (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_param_t *terminal_param = + ia_css_program_group_param_get_terminal_param(param, i); + + retval = ia_css_terminal_param_print(terminal_param, fid); + verifjmpexit(retval == 0); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_group_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_terminal_param_print( + const ia_css_terminal_param_t *param, + void *fid) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_terminal_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "sizeof(terminal_param) = %d\n", + (int)ia_css_terminal_param_get_size(param)); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, + "\tframe_format_type = %d\n", param->frame_format_type); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_terminal_param_print failed (%i)\n", retval); + } + return retval; +} + +int ia_css_program_param_print( + const ia_css_program_param_t *param, + void *fid) +{ + int retval = -1; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_PARAM, INFO, + "ia_css_program_param_print(): enter:\n"); + + verifexit(param != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_PARAM, INFO, "sizeof(program_param) = %d\n", + (int)ia_css_program_param_get_size(param)); + + bitmap = ia_css_program_param_get_kernel_enable_bitmap(param); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_PARAM, ERROR, + "ia_css_program_param_print failed (%i)\n", retval); + } + return retval; +} + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h new file mode 100644 index 000000000000..6672737e51a1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/param/src/ia_css_program_group_param_private.h @@ -0,0 +1,80 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H +#define __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H + +#include +#include +#include +#include +#include +#include +#include + +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT 7 +#define SIZE_OF_PROGRAM_GROUP_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + IA_CSS_UINT16_T_BITS \ + + (3 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* tentative; co-design with ISP algorithm */ +struct ia_css_program_group_param_s { + /* The enable bits for each individual kernel */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + uint32_t program_param_offset; + uint32_t terminal_param_offset; + /* Number of (explicit) fragments to use in a frame */ + uint16_t fragment_count; + /* Number of active programs */ + uint8_t program_count; + /* Number of active terminals */ + uint8_t terminal_count; + /* Program group protocol version */ + uint8_t protocol_version; + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_PARAM_STRUCT]; +}; + +#define SIZE_OF_PROGRAM_PARAM_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_UINT32_T_BITS \ + + IA_CSS_INT32_T_BITS) + +/* private */ +struct ia_css_program_param_s { + /* What to use this one for ? */ + ia_css_kernel_bitmap_t kernel_enable_bitmap; + /* Size of this structure */ + uint32_t size; + /* offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; +}; + +#define SIZE_OF_TERMINAL_PARAM_STRUCT_IN_BITS \ + (IA_CSS_UINT32_T_BITS \ + + IA_CSS_FRAME_FORMAT_TYPE_BITS \ + + IA_CSS_INT32_T_BITS \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION) \ + + IA_CSS_INT32_T_BITS \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (IA_CSS_UINT8_T_BITS * 1)) + +#endif /* __IA_CSS_PROGRAM_GROUP_PARAM_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.c new file mode 100644 index 000000000000..fed60fd4df4d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.c @@ -0,0 +1,52 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_server_manifest.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +const vied_nci_resource_spec_t psys_server_manifest = { + /* internal memory */ + { /* resource id size offset*/ + {VIED_NCI_GMEM_TYPE_ID, 0, 0}, + {VIED_NCI_DMEM_TYPE_ID, VIED_NCI_DMEM0_MAX_SIZE, 0}, + {VIED_NCI_VMEM_TYPE_ID, 0, 0}, + {VIED_NCI_BAMEM_TYPE_ID, 0, 0}, + {VIED_NCI_PMEM_TYPE_ID, 0, 0} + }, + /* external memory */ + { /* resource id size offset*/ + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0}, + {VIED_NCI_N_MEM_ID, 0, 0} + }, + /* device channel */ + { /* resource id size offset*/ + {VIED_NCI_DEV_CHN_DMA_EXT0_ID, + PSYS_SERVER_DMA_CHANNEL_SIZE, + PSYS_SERVER_DMA_CHANNEL_OFFSET}, + {VIED_NCI_DEV_CHN_GDC_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_IPFD_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_ISA_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_FW_ID, 0, 0}, + {VIED_NCI_DEV_CHN_DMA_CMPRS_ID, 0, 0} + } +}; + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.h new file mode 100644 index 000000000000..b4c7fbc32d5b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psys_server_manifest/cnlB0/ia_css_psys_server_manifest.h @@ -0,0 +1,29 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SERVER_MANIFEST_H +#define __IA_CSS_PSYS_SERVER_MANIFEST_H + +#include "vied_nci_psys_resource_model.h" + +/** + * Manifest of resources in use by PSYS itself + */ + +#define PSYS_SERVER_DMA_CHANNEL_SIZE 2 +#define PSYS_SERVER_DMA_CHANNEL_OFFSET 28 + +extern const vied_nci_resource_spec_t psys_server_manifest; + +#endif /* __IA_CSS_PSYS_SERVER_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psysapi.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psysapi.mk new file mode 100644 index 000000000000..e1977cbe2ca2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/psysapi.mk @@ -0,0 +1,122 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is PSYSAPI +# +ifdef _H_PSYSAPI_MK +$(error ERROR: psysapi.mk included multiple times, please check makefile) +else +_H_PSYSAPI_MK=1 +endif + +include $(MODULES_DIR)/config/psys/subsystem_$(IPU_SYSVER).mk + +PSYSAPI_DIR = $${MODULES_DIR}/psysapi + +PSYSAPI_PROCESS_HOST_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_PROCESS_HOST_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c + +# Use PSYS_MANIFEST_HOST_FILES when only accessing manifest functions +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_MANIFEST_HOST_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c + +# Use only kernel bitmap functionality from PSYS API +PSYSAPI_KERNEL_BITMAP_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_KERNEL_BITMAP_CPPFLAGS += -I$(PSYSAPI_DIR)/interface + +# Use PSYSAPI_HOST_FILES when program and process group are both needed +PSYSAPI_HOST_FILES = $(PSYSAPI_PROCESS_HOST_FILES) $(PSYSAPI_MANIFEST_HOST_FILES) + +# Use PSYSAPI_PROCESS_GROUP_HOST_FILES when program and process group are both needed but there is no +# implementation (yet) of the user customization functions defined in ia_css_psys_process_group_cmd_impl.h. +# Dummy implementations are provided in $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c +PSYSAPI_PROCESS_GROUP_HOST_FILES = $(PSYSAPI_HOST_FILES) +PSYSAPI_PROCESS_GROUP_HOST_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_process_group_cmd_impl.c + +# for now disabled, implementation for now provided by psys api impl +#PSYSAPI_HOST_FILES += $(PSYSAPI_DIR)/device/src/ia_css_psys_device.c + +PSYSAPI_HOST_CPPFLAGS = -I$(PSYSAPI_DIR)/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/device/interface/$(IPU_SYSVER) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/dynamic/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/data/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/kernel/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/param/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/src +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_HOST_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +PSYSAPI_FW_CPPFLAGS = $(PSYSAPI_HOST_CPPFLAGS) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/interface +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/static/src +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_FW_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/sim/interface +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION) +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/private +PSYSAPI_SYSTEM_GLOBAL_CPPFLAGS += -I$(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION) + +# Defining the trace level for the PSYSAPI +PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_TRACE_CONFIG=PSYSAPI_TRACE_LOG_LEVEL_NORMAL +# Enable/Disable 'late binding' support and it's additional queues +PSYSAPI_HOST_CPPFLAGS += -DHAS_LATE_BINDING_SUPPORT=$(PSYS_HAS_LATE_BINDING_SUPPORT) + +#Example: how to switch to a different log level for a sub-module +#PSYSAPI_HOST_CPPFLAGS += -DPSYSAPI_DYNAMIC_TRACING_OVERRIDE=PSYSAPI_TRACE_LOG_LEVEL_DEBUG + +# enable host side implementation +# TODO: better name for the flag to enable the impl... +PSYSAPI_HOST_CPPFLAGS += -D__X86_SIM__ + +# Files for Firmware +PSYSAPI_FW_FILES = $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_process_group.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_terminal.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/dynamic/src/ia_css_psys_buffer_set.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/param/src/ia_css_program_group_param.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/data/src/ia_css_program_group_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/vied_nci_psys_system.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/sim/src/ia_css_psys_sim_data.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_group_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_program_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/static/src/ia_css_psys_terminal_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/psys_server_manifest/$(PSYS_SERVER_MANIFEST_VERSION)/ia_css_psys_server_manifest.c +PSYSAPI_FW_FILES += $(PSYSAPI_DIR)/kernel/src/ia_css_kernel_bitmap.c + +# resource model +PSYSAPI_RESOURCE_MODEL_FILES = $(PSYSAPI_DIR)/resource_model/$(PSYS_RESOURCE_MODEL_VERSION)/vied_nci_psys_resource_model.c + +ifeq ($(PSYS_HAS_DUAL_CMD_CTX_SUPPORT), 1) +PSYSAPI_HOST_CPPFLAGS += -DHAS_DUAL_CMD_CTX_SUPPORT=$(PSYS_HAS_DUAL_CMD_CTX_SUPPORT) +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.c new file mode 100644 index 000000000000..4d751e0c8467 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.c @@ -0,0 +1,324 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "vied_nci_psys_resource_model.h" + +/* + * Cell types by cell IDs + */ +const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID] = { + VIED_NCI_SP_CTRL_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_GDC_TYPE_ID +}; + +/* + * Memory types by memory IDs + */ +const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_GMEM_TYPE_ID,/* VMEM4 is GMEM according to vied_nci_cell_mem */ + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID +}; + +/* + * Cell mem count by cell type ID + */ +const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID] = { + VIED_NCI_N_SP_CTRL_MEM, + VIED_NCI_N_SP_SERVER_MEM, + VIED_NCI_N_VP_MEM, + VIED_NCI_N_ACC_PSA_MEM, + VIED_NCI_N_ACC_ISA_MEM, + VIED_NCI_N_ACC_OSA_MEM +}; + +/* + * Cell mem type by cell type ID and memory index + */ +const vied_nci_mem_type_ID_t +vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_GMEM_TYPE_ID, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + }, + { + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID + } +}; + +/* + * Ext mem ID by memory index + */ +const vied_nci_mem_ID_t +vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID] = { + VIED_NCI_VMEM4_ID, /* VIED_NCI_GMEM_TYPE_ID */ + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID +}; + +/* + * Cell mem ID by cell ID and memory index + */ +const vied_nci_mem_ID_t +vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID] = { + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_VMEM0_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_PMEM0_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_VMEM1_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_PMEM1_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_PMEM2_ID + }, + { + VIED_NCI_VMEM4_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_PMEM3_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + }, + { + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID, + VIED_NCI_N_MEM_ID + } +}; + +/* + * Memory sizes by mem ID + */ +const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID] = { + VIED_NCI_VMEM0_MAX_SIZE, + VIED_NCI_VMEM1_MAX_SIZE, + VIED_NCI_VMEM2_MAX_SIZE, + VIED_NCI_VMEM3_MAX_SIZE, + VIED_NCI_VMEM4_MAX_SIZE, + VIED_NCI_BAMEM0_MAX_SIZE, + VIED_NCI_BAMEM1_MAX_SIZE, + VIED_NCI_BAMEM2_MAX_SIZE, + VIED_NCI_BAMEM3_MAX_SIZE, + VIED_NCI_DMEM0_MAX_SIZE, + VIED_NCI_DMEM1_MAX_SIZE, + VIED_NCI_DMEM2_MAX_SIZE, + VIED_NCI_DMEM3_MAX_SIZE, + VIED_NCI_DMEM4_MAX_SIZE, + VIED_NCI_DMEM5_MAX_SIZE, + VIED_NCI_DMEM6_MAX_SIZE, + VIED_NCI_DMEM7_MAX_SIZE, + VIED_NCI_PMEM0_MAX_SIZE, + VIED_NCI_PMEM1_MAX_SIZE, + VIED_NCI_PMEM2_MAX_SIZE, + VIED_NCI_PMEM3_MAX_SIZE +}; + +/* + * Memory word sizes by mem type ID + */ +const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID] = { + VIED_NCI_GMEM_WORD_SIZE, + VIED_NCI_DMEM_WORD_SIZE, + VIED_NCI_VMEM_WORD_SIZE, + VIED_NCI_BAMEM_WORD_SIZE +}; + +/* + * Number of channels by device ID + */ +const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID] = { + VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE, + VIED_NCI_DEV_CHN_GDC_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE, + VIED_NCI_DEV_CHN_DMA_CMPRS_MAX_SIZE +}; + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.h new file mode 100644 index 000000000000..6249d8af3eff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/resource_model/cnlB0/vied_nci_psys_resource_model.h @@ -0,0 +1,300 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_RESOURCE_MODEL_H +#define __VIED_NCI_PSYS_RESOURCE_MODEL_H + +#include "type_support.h" +#include "storage_class.h" + +#define HAS_DFM 0 +#define NON_RELOC_RESOURCE_SUPPORT 0 +#define IA_CSS_KERNEL_BITMAP_DO_NOT_USE_ELEMS + +/* Defines for the routing bitmap in the program group manifest. + */ +#define VIED_NCI_RBM_MAX_MUX_COUNT 0 +#define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT 0 +#define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT 0 +#define N_PADDING_UINT8_IN_RBM_MANIFEST 2 + +/* The amount of padding bytes needed to make + * ia_css_process_s structure 64 bit aligned + */ +#define N_PADDING_UINT8_IN_PROCESS_STRUCT 2 +#define N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST 0 + +/** + * Resource model for CNL B0 + */ + +/* + * Cell IDs + */ +typedef enum { + VIED_NCI_SP0_ID = 0, + VIED_NCI_SP1_ID, + VIED_NCI_SP2_ID, + VIED_NCI_VP0_ID, + VIED_NCI_VP1_ID, + VIED_NCI_VP2_ID, + VIED_NCI_VP3_ID, + VIED_NCI_ACC0_ID, + VIED_NCI_ACC1_ID, + VIED_NCI_ACC2_ID, + VIED_NCI_ACC3_ID, + VIED_NCI_ACC4_ID, + VIED_NCI_ACC5_ID, + VIED_NCI_ACC6_ID, + VIED_NCI_ACC7_ID, + VIED_NCI_GDC0_ID, + VIED_NCI_GDC1_ID, + VIED_NCI_N_CELL_ID +} vied_nci_cell_ID_t; + +/* + * Barrier bits (to model process group dependencies) + */ +typedef enum { + VIED_NCI_BARRIER0_ID, + VIED_NCI_BARRIER1_ID, + VIED_NCI_BARRIER2_ID, + VIED_NCI_BARRIER3_ID, + VIED_NCI_BARRIER4_ID, + VIED_NCI_BARRIER5_ID, + VIED_NCI_BARRIER6_ID, + VIED_NCI_BARRIER7_ID, + VIED_NCI_N_BARRIER_ID +} vied_nci_barrier_ID_t; + +/* + * Cell types + */ +typedef enum { + VIED_NCI_SP_CTRL_TYPE_ID = 0, + VIED_NCI_SP_SERVER_TYPE_ID, + VIED_NCI_VP_TYPE_ID, + VIED_NCI_ACC_PSA_TYPE_ID, + VIED_NCI_ACC_ISA_TYPE_ID, + VIED_NCI_ACC_OSA_TYPE_ID, + VIED_NCI_GDC_TYPE_ID, + VIED_NCI_N_CELL_TYPE_ID +} vied_nci_cell_type_ID_t; + +/* + * Memory IDs + */ +typedef enum { + VIED_NCI_VMEM0_ID = 0, + VIED_NCI_VMEM1_ID, + VIED_NCI_VMEM2_ID, + VIED_NCI_VMEM3_ID, + VIED_NCI_VMEM4_ID, + VIED_NCI_BAMEM0_ID, + VIED_NCI_BAMEM1_ID, + VIED_NCI_BAMEM2_ID, + VIED_NCI_BAMEM3_ID, + VIED_NCI_DMEM0_ID, + VIED_NCI_DMEM1_ID, + VIED_NCI_DMEM2_ID, + VIED_NCI_DMEM3_ID, + VIED_NCI_DMEM4_ID, + VIED_NCI_DMEM5_ID, + VIED_NCI_DMEM6_ID, + VIED_NCI_DMEM7_ID, + VIED_NCI_PMEM0_ID, + VIED_NCI_PMEM1_ID, + VIED_NCI_PMEM2_ID, + VIED_NCI_PMEM3_ID, + VIED_NCI_N_MEM_ID +} vied_nci_mem_ID_t; + +/* + * Memory types + */ +typedef enum { + VIED_NCI_GMEM_TYPE_ID = 0, + VIED_NCI_DMEM_TYPE_ID, + VIED_NCI_VMEM_TYPE_ID, + VIED_NCI_BAMEM_TYPE_ID, + VIED_NCI_PMEM_TYPE_ID, + VIED_NCI_N_MEM_TYPE_ID +} vied_nci_mem_type_ID_t; + +/* Excluding PMEM */ +#define VIED_NCI_N_DATA_MEM_TYPE_ID (VIED_NCI_N_MEM_TYPE_ID - 1) + +#define VIED_NCI_N_SP_CTRL_MEM 2 +#define VIED_NCI_N_SP_SERVER_MEM 2 +#define VIED_NCI_N_VP_MEM 4 +#define VIED_NCI_N_ACC_PSA_MEM 0 +#define VIED_NCI_N_ACC_ISA_MEM 0 +#define VIED_NCI_N_ACC_OSA_MEM 0 + +#define VIED_NCI_N_VP_CELL 4 +#define VIED_NCI_N_ACC_CELL 8 + +/* + * Device IDs + */ +typedef enum { + VIED_NCI_DEV_CHN_DMA_EXT0_ID = 0, + VIED_NCI_DEV_CHN_GDC_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_READ_ID, + VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_ID, + VIED_NCI_DEV_CHN_DMA_INTERNAL_ID, + VIED_NCI_DEV_CHN_DMA_IPFD_ID, + VIED_NCI_DEV_CHN_DMA_ISA_ID, + VIED_NCI_DEV_CHN_DMA_FW_ID, + VIED_NCI_DEV_CHN_DMA_CMPRS_ID, + VIED_NCI_N_DEV_CHN_ID +} vied_nci_dev_chn_ID_t; + +typedef enum { + DFM_IS_NOT_AVAILABLE +} vied_nci_dev_dfm_id_t; + +#define VIED_NCI_N_DEV_DFM_ID 0 +/* + * Memory size (previously in vied_nci_psys_system.c) + * VMEM: in words, 64 Byte per word. + * BAMEM: in words, 64 Byte per word + * DMEM: in words, 4 Byte per word. + * PMEM: in words, 64 Byte per word. + */ +#define VIED_NCI_GMEM_WORD_SIZE 64 +#define VIED_NCI_DMEM_WORD_SIZE 4 +#define VIED_NCI_VMEM_WORD_SIZE 64 +#define VIED_NCI_BAMEM_WORD_SIZE 64 + +#define VIED_NCI_VMEM0_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM1_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM2_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM3_MAX_SIZE (0x0800) +#define VIED_NCI_VMEM4_MAX_SIZE (0x0800) +#define VIED_NCI_BAMEM0_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM1_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM2_MAX_SIZE (0x0400) +#define VIED_NCI_BAMEM3_MAX_SIZE (0x0400) +#define VIED_NCI_DMEM0_MAX_SIZE (0x4000) +#define VIED_NCI_DMEM1_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM2_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM3_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM4_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM5_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM6_MAX_SIZE (0x1000) +#define VIED_NCI_DMEM7_MAX_SIZE (0x1000) +#define VIED_NCI_PMEM0_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM1_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM2_MAX_SIZE (0x0500) +#define VIED_NCI_PMEM3_MAX_SIZE (0x0500) + +/* + * Number of channels per device + */ +#define VIED_NCI_DEV_CHN_DMA_EXT0_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_GDC_MAX_SIZE (4) +#define VIED_NCI_DEV_CHN_DMA_EXT1_READ_MAX_SIZE (30) +#define VIED_NCI_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE (20) +#define VIED_NCI_DEV_CHN_DMA_INTERNAL_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_IPFD_MAX_SIZE (5) +#define VIED_NCI_DEV_CHN_DMA_ISA_MAX_SIZE (2) +#define VIED_NCI_DEV_CHN_DMA_FW_MAX_SIZE (1) +#define VIED_NCI_DEV_CHN_DMA_CMPRS_MAX_SIZE (6) + +/* + * Storage of the resource and resource type enumerators + */ +#define VIED_NCI_RESOURCE_ID_BITS 8 +typedef uint8_t vied_nci_resource_id_t; + +#define VIED_NCI_RESOURCE_SIZE_BITS 16 +typedef uint16_t vied_nci_resource_size_t; + +#define VIED_NCI_RESOURCE_BITMAP_BITS 32 +typedef uint32_t vied_nci_resource_bitmap_t; + +#define IA_CSS_PROCESS_INVALID_DEPENDENCY ((vied_nci_resource_id_t)(-1)) +#define IA_CSS_PROCESS_INVALID_OFFSET ((vied_nci_resource_size_t)(-1)) +#define IA_CSS_PROCESS_MAX_CELLS 1 + +/* + * Resource specifications + * Note that the FAS uses the terminology local/remote memory. In the PSYS API, + * these are called internal/external memory. + */ + +/* resource spec for internal (local) memory */ +struct vied_nci_resource_spec_int_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_int_mem_s + vied_nci_resource_spec_int_mem_t; + +/* resource spec for external (remote) memory */ +struct vied_nci_resource_spec_ext_mem_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_ext_mem_s + vied_nci_resource_spec_ext_mem_t; + +/* resource spec for device channel */ +struct vied_nci_resource_spec_dev_chn_s { + vied_nci_resource_id_t type_id; + vied_nci_resource_size_t size; + vied_nci_resource_size_t offset; +}; + +typedef struct vied_nci_resource_spec_dev_chn_s + vied_nci_resource_spec_dev_chn_t; + +/* resource spec for all contiguous resources */ +struct vied_nci_resource_spec_s { + vied_nci_resource_spec_int_mem_t int_mem[VIED_NCI_N_MEM_TYPE_ID]; + vied_nci_resource_spec_ext_mem_t ext_mem[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_spec_dev_chn_t dev_chn[VIED_NCI_N_DEV_CHN_ID]; +}; + +typedef struct vied_nci_resource_spec_s vied_nci_resource_spec_t; + +#ifndef PIPE_GENERATION + +extern const vied_nci_cell_type_ID_t vied_nci_cell_type[VIED_NCI_N_CELL_ID]; +extern const vied_nci_mem_type_ID_t vied_nci_mem_type[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_N_cell_mem[VIED_NCI_N_CELL_TYPE_ID]; +extern const vied_nci_mem_type_ID_t + vied_nci_cell_mem_type[VIED_NCI_N_CELL_TYPE_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_ext_mem[VIED_NCI_N_MEM_TYPE_ID]; +extern const vied_nci_mem_ID_t + vied_nci_cell_mem[VIED_NCI_N_CELL_ID][VIED_NCI_N_MEM_TYPE_ID]; +extern const uint16_t vied_nci_mem_size[VIED_NCI_N_MEM_ID]; +extern const uint16_t vied_nci_mem_word_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; +extern const uint16_t vied_nci_dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + +STORAGE_CLASS_INLINE +uint32_t vied_nci_mem_is_ext_type(const vied_nci_mem_type_ID_t mem_type_id) +{ + return((mem_type_id == VIED_NCI_GMEM_TYPE_ID)); +} + +#endif /* PIPE_GENERATION */ + +#endif /* __VIED_NCI_PSYS_RESOURCE_MODEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h new file mode 100644 index 000000000000..78f917672672 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_data.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_DATA_H +#define __IA_CSS_PSYS_SIM_DATA_H + +/*! Set the seed if the random number generator + + @param seed[in] Random number generator seed + */ +extern void ia_css_psys_ran_set_seed(const unsigned int seed); + +/*! Generate a random number of a specified bit depth + + @param bit_depth[in] The number of bits of the random output + + @return out, weight(out) <= bit_depth, 0 on error + */ +extern unsigned int ia_css_psys_ran_var(const unsigned int bit_depth); + +/*! Generate a random number of a specified range + + @param range[in] The range of the random output + + @return 0 <= out < range, 0 on error + */ +extern unsigned int ia_css_psys_ran_val(const unsigned int range); + +/*! Generate a random number in a specified interval + + @param lo[in] The lower bound of the random output range + @param hi[in] The higher bound of the random output range + + @return lo <= out < hi, 0 on error + */ +extern unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi); + +#endif /* __IA_CSS_PSYS_SIM_DATA_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h new file mode 100644 index 000000000000..61095257ec55 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_STORAGE_CLASS_H +#define __IA_CSS_PSYS_SIM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_SIM_INLINE__ +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_SIM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_SIM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h new file mode 100644 index 000000000000..423ff1980270 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/ia_css_psys_sim_trace.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_SIM_TRACE_H +#define __IA_CSS_PSYS_SIM_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + #define PSYS_SIM_TRACE_LEVEL_CONFIG PSYS_SIM_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_SIM_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_SIM_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_SIM_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_SIM_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_SIM_TRACE_METHOD PSYSAPI_TRACE_METHOD + #define PSYSAPI_SIM_TRACE_LEVEL_ASSERT PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_SIM_TRACE_LEVEL_ERROR PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_SIM_TRACE_LEVEL_WARNING PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_SIM_TRACE_LEVEL_INFO PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_SIM_TRACE_LEVEL_DEBUG PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_SIM_TRACE_LEVEL_VERBOSE PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_SIM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h new file mode 100644 index 000000000000..529bea763cc2 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/interface/vied_nci_psys_system_global.h @@ -0,0 +1,180 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __VIED_NCI_PSYS_SYSTEM_GLOBAL_H +#define __VIED_NCI_PSYS_SYSTEM_GLOBAL_H + +#include +#include "ia_css_base_types.h" +#include "ia_css_psys_sim_storage_class.h" +#include "vied_nci_psys_resource_model.h" + +/* + * Key system types + */ +/* Subsystem internal physical address */ +#define VIED_ADDRESS_BITS 32 + +/* typedef uint32_t vied_address_t; */ + +/* Subsystem internal virtual address */ + +/* Subsystem internal data bus */ +#define VIED_DATA_BITS 32 +typedef uint32_t vied_data_t; + +#define VIED_NULL ((vied_vaddress_t)0) + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( +const vied_nci_resource_bitmap_t bitmap, +const unsigned int index, +const unsigned int size); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index); + +IA_CSS_PSYS_SIM_STORAGE_CLASS_H +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index); + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ + +#endif /* __VIED_NCI_PSYS_SYSTEM_GLOBAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c new file mode 100644 index 000000000000..6dccac823871 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/ia_css_psys_sim_data.c @@ -0,0 +1,91 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +#include "ia_css_psys_sim_trace.h" + +static unsigned int ia_css_psys_ran_seed; + +void ia_css_psys_ran_set_seed(const unsigned int seed) +{ + ia_css_psys_ran_seed = seed; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_set_seed(): enter:\n"); + +} + +static unsigned int ia_css_psys_ran_int (void) +{ + ia_css_psys_ran_seed = 1664525UL * ia_css_psys_ran_seed + 1013904223UL; + return ia_css_psys_ran_seed; +} + +unsigned int ia_css_psys_ran_var(const unsigned int bit_depth) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_var(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (bit_depth > 32) + out = tmp; + else if (bit_depth == 0) + out = 0; + else + out = (unsigned short)(tmp >> (32 - bit_depth)); + + return out; +} + +unsigned int ia_css_psys_ran_val(const unsigned int range) +{ + unsigned int out; + unsigned int tmp; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "ia_css_psys_ran_val(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if (range > 1) + out = tmp % range; + else + out = 0; + + return out; +} + +unsigned int ia_css_psys_ran_interval(const unsigned int lo, + const unsigned int hi) +{ + unsigned int out; + unsigned int tmp; + unsigned int range = hi - lo; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_psys_ran_interval(): enter:\n"); + + tmp = ia_css_psys_ran_int(); + + if ((range > 1) && (lo < hi)) + out = lo + (tmp % range); + else + out = 0; + + return out; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h new file mode 100644 index 000000000000..ff51175548ec --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/psys_system_global_impl.h @@ -0,0 +1,485 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PSYS_SYSTEM_GLOBAL_IMPL_H +#define __PSYS_SYSTEM_GLOBAL_IMPL_H + +#include + +#include "ia_css_psys_sim_trace.h" +#include + +/* Use vied_bits instead, however for test purposes we uses explicit type + * checking + */ +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask( + const unsigned int index) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bit_mask(): enter:\n"); + + if (index < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (vied_nci_resource_bitmap_t)1 << index; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap | bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return bitmap & (~bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitfield_mask( + const unsigned int position, + const unsigned int size) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + vied_nci_resource_bitmap_t ones = (vied_nci_resource_bitmap_t)-1; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitfield_mask(): enter:\n"); + + if (position < VIED_NCI_RESOURCE_BITMAP_BITS) + bit_mask = (ones >> (sizeof(vied_nci_resource_bitmap_t) - size)) << position; + + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_bitfield( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index, + const unsigned int size) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_bitfield(): enter:\n"); + + bit_mask = vied_nci_bitfield_mask(index, size); + ret = vied_nci_bitmap_set(bitmap, bit_mask); + + return ret; +} + + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + vied_nci_resource_bitmap_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_set_unique(): enter:\n"); + + if ((bitmap & bit_mask) == 0) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bit_mask_set_unique( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + vied_nci_resource_bitmap_t ret = 0; + vied_nci_resource_bitmap_t bit_mask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bit_mask_set_unique(): enter:\n"); + + bit_mask = vied_nci_bit_mask(index); + + if (((bitmap & bit_mask) == 0) && (bit_mask != 0)) + ret = bitmap | bit_mask; + + return ret; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_empty( + const vied_nci_resource_bitmap_t bitmap) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_empty(): enter:\n"); + + return (bitmap == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_set( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_set(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return !vied_nci_is_bitmap_clear(bitmap, bit_mask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bit_set_in_bitmap( + const vied_nci_resource_bitmap_t bitmap, + const unsigned int index) +{ + + vied_nci_resource_bitmap_t bitmask; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bit_set_in_bitmap(): enter:\n"); + bitmask = vied_nci_bit_mask(index); + return vied_nci_is_bitmap_set(bitmap, bitmask); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_bitmap_clear( + const vied_nci_resource_bitmap_t bitmap, + const vied_nci_resource_bitmap_t bit_mask) +{ + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_bitmap_clear(): enter:\n"); + +/* + assert(vied_nci_is_bitmap_one_hot(bit_mask)); +*/ + return ((bitmap & bit_mask) == 0); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +int vied_nci_bitmap_compute_weight( + const vied_nci_resource_bitmap_t bitmap) +{ + vied_nci_resource_bitmap_t loc_bitmap = bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_compute_weight(): enter:\n"); + + /* Do not need the iterator "i" */ + for (i = 0; (i < VIED_NCI_RESOURCE_BITMAP_BITS) && + (loc_bitmap != 0); i++) { + weight += loc_bitmap & 0x01; + loc_bitmap >>= 1; + } + + return weight; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_union( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_bitmap_union(): enter:\n"); + return (bitmap0 | bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_intersection( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "ia_css_kernel_bitmap_intersection(): enter:\n"); + return (bitmap0 & bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_bitmap_xor( + const vied_nci_resource_bitmap_t bitmap0, + const vied_nci_resource_bitmap_t bitmap1) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, "vied_nci_bitmap_xor(): enter:\n"); + return (bitmap0 ^ bitmap1); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_cell_bit_mask( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_bit_mask(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (cell_id < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << cell_id; + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_resource_bitmap_t vied_nci_barrier_bit_mask( + const vied_nci_barrier_ID_t barrier_id) +{ + vied_nci_resource_bitmap_t bit_mask = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_barrier_bit_mask(): enter:\n"); + + if ((barrier_id < VIED_NCI_N_BARRIER_ID) && + ((barrier_id + VIED_NCI_N_CELL_ID) < VIED_NCI_RESOURCE_BITMAP_BITS)) { + bit_mask = (vied_nci_resource_bitmap_t)1 << + (barrier_id + VIED_NCI_N_CELL_ID); + } + return bit_mask; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_cell_type_ID_t vied_nci_cell_get_type( + const vied_nci_cell_ID_t cell_id) +{ + vied_nci_cell_type_ID_t cell_type = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_type(): enter:\n"); + + if (cell_id < VIED_NCI_N_CELL_ID) { + cell_type = vied_nci_cell_type[cell_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_cell_get_type(): invalid argument\n"); + } + + return cell_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_mem_get_type( + const vied_nci_mem_ID_t mem_id) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_type(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_type = vied_nci_mem_type[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_type(): invalid argument\n"); + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_mem_get_size( + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_mem_get_size(): enter:\n"); + + if (mem_id < VIED_NCI_N_MEM_ID) { + mem_size = vied_nci_mem_size[mem_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_mem_get_size(): invalid argument\n"); + } + + return mem_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_dev_chn_get_size( + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + uint16_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_dev_chn_get_size(): enter:\n"); + + if (dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + dev_chn_size = vied_nci_dev_chn_size[dev_chn_id]; + } else { + IA_CSS_TRACE_0(PSYSAPI_SIM, WARNING, + "vied_nci_dev_chn_get_size(): invalid argument\n"); + } + + return dev_chn_size; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_of_type( + const vied_nci_cell_ID_t cell_id, + const vied_nci_cell_type_ID_t cell_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_of_type(): enter:\n"); + + return ((vied_nci_cell_get_type(cell_id) == + cell_type_id) && (cell_type_id != + VIED_NCI_N_CELL_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_mem_of_type( + const vied_nci_mem_ID_t mem_id, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_mem_of_type(): enter:\n"); + + return ((vied_nci_mem_get_type(mem_id) == mem_type_id) && + (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_is_cell_mem_of_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index, + const vied_nci_mem_type_ID_t mem_type_id) +{ + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_is_cell_mem_of_type(): enter:\n"); + + return ((vied_nci_cell_get_mem_type(cell_id, mem_index) == mem_type_id) + && (mem_type_id != VIED_NCI_N_MEM_TYPE_ID)); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +bool vied_nci_has_cell_mem_of_id( + const vied_nci_cell_ID_t cell_id, + const vied_nci_mem_ID_t mem_id) +{ + uint16_t mem_index; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_has_cell_mem_of_id(): enter:\n"); + + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((vied_nci_cell_get_mem(cell_id, mem_index) == mem_id) && + (mem_id != VIED_NCI_N_MEM_ID)) { + break; + } + } + + return (mem_index < VIED_NCI_N_MEM_TYPE_ID); +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +uint16_t vied_nci_cell_get_mem_count( + const vied_nci_cell_ID_t cell_id) +{ + uint16_t mem_count = 0; + vied_nci_cell_type_ID_t cell_type; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_count(): enter:\n"); + + cell_type = vied_nci_cell_get_type(cell_id); + + if (cell_type < VIED_NCI_N_CELL_TYPE_ID) + mem_count = vied_nci_N_cell_mem[cell_type]; + + return mem_count; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_get_mem_type( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem_type(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[ + vied_nci_cell_get_type(cell_id)][mem_index]; + } + + return mem_type; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_ID_t vied_nci_cell_get_mem( + const vied_nci_cell_ID_t cell_id, + const uint16_t mem_index) +{ + vied_nci_mem_ID_t mem_id = VIED_NCI_N_MEM_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_get_mem(): enter:\n"); + + if ((cell_id < VIED_NCI_N_CELL_ID) && + (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_id = vied_nci_cell_mem[cell_id][mem_index]; + } + + return mem_id; +} + +IA_CSS_PSYS_SIM_STORAGE_CLASS_C +vied_nci_mem_type_ID_t vied_nci_cell_type_get_mem_type( + const vied_nci_cell_type_ID_t cell_type_id, + const uint16_t mem_index) +{ + vied_nci_mem_type_ID_t mem_type = VIED_NCI_N_MEM_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_SIM, VERBOSE, + "vied_nci_cell_type_get_mem_type(): enter:\n"); + + if ((cell_type_id < VIED_NCI_N_CELL_TYPE_ID) + && (mem_index < VIED_NCI_N_MEM_TYPE_ID)) { + mem_type = vied_nci_cell_mem_type[cell_type_id][mem_index]; + } + + return mem_type; +} + +#endif /* __PSYS_SYSTEM_GLOBAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c new file mode 100644 index 000000000000..b0e0aebb6e77 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/sim/src/vied_nci_psys_system.c @@ -0,0 +1,26 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_sim_storage_class.h" + +/* + * Functions to possibly inline + */ + +#ifdef __IA_CSS_PSYS_SIM_INLINE__ +STORAGE_CLASS_INLINE int +__ia_css_psys_system_global_avoid_warning_on_empty_file(void) { return 0; } +#else /* __IA_CSS_PSYS_SIM_INLINE__ */ +#include "psys_system_global_impl.h" +#endif /* __IA_CSS_PSYS_SIM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h new file mode 100644 index 000000000000..4a2f96e9405e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_manifest_types.h @@ -0,0 +1,102 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_MANIFEST_TYPES_H +#define __IA_CSS_PSYS_MANIFEST_TYPES_H + +/*! \file */ + +/** @file ia_css_psys_manifest_types.h + * + * The types belonging to the terminal/program/ + * program group manifest static module + */ + +#include +#include "vied_nci_psys_resource_model.h" + + +/* This value is used in the manifest to indicate that the resource + * offset field must be ignored and the resource is relocatable + */ +#define IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE ((vied_nci_resource_size_t)(-1)) + +/* + * Connection type defining the interface source/sink + * + * Note that the connection type does not define the + * real-time configuration of the system, i.e. it + * does not describe whether a source and sink + * program group or sub-system operate synchronously + * that is a program script property {online, offline} + * (see FAS 5.16.3) + */ +#define IA_CSS_CONNECTION_BITMAP_BITS 8 +typedef uint8_t ia_css_connection_bitmap_t; + +#define IA_CSS_CONNECTION_TYPE_BITS 32 +typedef enum ia_css_connection_type { + /**< The terminal is in DDR */ + IA_CSS_CONNECTION_MEMORY = 0, + /**< The terminal is a (watermark) queued stream over DDR */ + IA_CSS_CONNECTION_MEMORY_STREAM, + /* The terminal is a device port */ + IA_CSS_CONNECTION_STREAM, + IA_CSS_N_CONNECTION_TYPES +} ia_css_connection_type_t; + +#define IA_CSS_PROGRAM_TYPE_BITS 32 +typedef enum ia_css_program_type { + IA_CSS_PROGRAM_TYPE_SINGULAR = 0, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB, + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER, +/* + * Future extension; A bitmap coding starts making more sense + * + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB_PARALLEL_SUPER, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUB, + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER_PARALLEL_SUPER, + */ + IA_CSS_N_PROGRAM_TYPES +} ia_css_program_type_t; + +#define IA_CSS_PROGRAM_GROUP_ID_BITS 32 +typedef uint32_t ia_css_program_group_ID_t; +#define IA_CSS_PROGRAM_ID_BITS 32 +typedef uint32_t ia_css_program_ID_t; + +#define IA_CSS_PROGRAM_INVALID_ID ((uint32_t)(-1)) +#define IA_CSS_PROGRAM_GROUP_INVALID_ID ((uint32_t)(-1)) + +typedef struct ia_css_program_group_manifest_s +ia_css_program_group_manifest_t; +typedef struct ia_css_program_manifest_s +ia_css_program_manifest_t; +typedef struct ia_css_data_terminal_manifest_s +ia_css_data_terminal_manifest_t; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +typedef struct ia_css_program_control_init_manifest_program_desc_s + ia_css_program_control_init_manifest_program_desc_t; + +typedef struct ia_css_program_control_init_terminal_manifest_s + ia_css_program_control_init_terminal_manifest_t; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +#endif /* __IA_CSS_PSYS_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h new file mode 100644 index 000000000000..7a3b712fb1a9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.h @@ -0,0 +1,312 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H + +#include "ia_css_psys_static_storage_class.h" + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.h + * + * Define the methods on the program group manifest object that are not part of + * a single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_rbm_manifest_types.h" + +#define IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT ((uint8_t)(-1)) + +/*! Get the stored size of the program group manifest object + + @param manifest[in] program group manifest object + + @return size, 0 on invalid argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @return program group ID, IA_CSS_PROGRAM_GROUP_INVALID_ID on invalid argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the program group ID of the program group manifest object + + @param manifest[in] program group manifest object + + @param program group ID + + @return 0 on success, -1 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id); + +/*! Get the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + + @return alignment, IA_CSS_PROGRAM_GROUP_INVALID_ALIGNMENT on invalid manifest + argument +*/ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the storage alignment constraint of the program group binary data + + @param manifest[in] program group manifest object + @param alignment[in] alignment desired + + @return < 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment); + +/*! Get the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + + @return bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t +ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest); + +/*! Set the kernel enable bitmap of the program group + + @param manifest[in] program group manifest object + @param kernel bitmap[in] kernel enable bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap); + +/*! Get the number of programs in the program group manifest object + + @param manifest[in] program group manifest object + + @return program count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the number of terminals in the program group manifest object + + @param manifest[in] program group manifest object + + @return terminal count, 0 on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) private data blob in the manifest + + @param manifest[in] program group manifest object + + @return private data blob, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) routing bitmap (rbm) manifest + + @param manifest[in] program group manifest object + + @return rbm manifest, NULL on invalid manifest argument + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_rbm_manifest_t * +ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest); + +/*! Get the (pointer to) indexed program manifest in the program group manifest + * object + + @param manifest[in] program group manifest object + @param program_index[in] index of the program manifest object + + @return program manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index); + +/*! Get the (pointer to) indexed terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed data terminal manifest in the program group + * manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return data terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed parameter terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return parameter terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed spatial param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return spatial param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed sliced param terminal manifest in the program + * group manifest object + + @param manifest[in] program group manifest object + @param program_index[in] index of the terminal manifest object + + @return sliced param terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! Get the (pointer to) indexed program terminal manifest in the program group + * manifest object + + @parammanifest[in]program group manifest object + @paramprogram_index[in]index of the terminal manifest object + + @return program terminal manifest, NULL on invalid arguments + */ +IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index); + +/*! initialize program group manifest + + @param manifest[in] program group manifest object + @param program_count[in] number of programs. + @param terminal_count[in] number of terminals. + @param program_deps[in] program dependencies for programs in pg. + @param terminal_deps[in] terminal dependencies for programs in pg. + @param terminal_type[in] array of terminal types, binary specific + static frame data + @param cached_in_param_section_count[in]Number of parameter terminal sections + @param cached_out_param_section_count[in] Number of parameter out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per cached out + terminal + @param sliced_in_param_section_count[in] Array[sliced_in_terminal_count] + with sections per sliced in + terminal + @param sliced_out_param_section_count[in] Array[sliced_out_terminal_count] + with sections per sliced out + terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the program init + terminal, + @param kernel_fragment_seq_count[in] Number of kernel fragment + seqence info. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return none; + */ +extern void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +#ifdef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h new file mode 100644 index 000000000000..3f9927b27bb0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.hsys.user.h @@ -0,0 +1,72 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.hsys.user.h + * + * Define the methods on the program group manifest object: Hsys user interface + */ + +#include + +#include /* bool */ + +/*! Print the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Read the program group manifest object from file/stream + + @param fid[in] file/stream handle + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_read( + void *fid); + +/*! Write the program group manifest object to file/stream + + @param manifest[in] program group manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_group_manifest_write( + const ia_css_program_group_manifest_t *manifest, + void *fid); + +/*! Boolean test if the program group manifest is valid + + @param manifest[in] program group manifest + + @return true if program group manifest is correct, false on error + */ +extern bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_HSYS_USER_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h new file mode 100644 index 000000000000..8220c0612137 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_group_manifest.sim.h @@ -0,0 +1,130 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_group_manifest.sim.h + * + * Define the methods on the program group manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ +#include "ia_css_terminal_defs.h" + +/*! Create a program group manifest object from specification + + @param specification[in] specification (index) + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_create( + const unsigned int specification); + +/*! Destroy the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_destroy( + ia_css_program_group_manifest_t *manifest); + +/*! Compute the size of storage required for allocating + * the program group (PG) manifest object + + @param program_count[in] Number of programs in the PG + @param terminal_count[in] Number of terminals on the PG + @param program_dependency_count[in] Array[program_count] with the PG + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + @param cached_in_param_section_count[in] Number of parameter + in terminal sections + @param cached_out_param_section_count[in] Number of parameter + out terminal sections + @param sliced_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced in terminal + @param sliced_out_param_section_count[in] Array[sliced_terminal_count] + with sections per + sliced out terminal + @param spatial_param_section_count[in] Array[spatial_terminal_count] + with sections per + spatial terminal + @param fragment_param_section_count[in] Number of fragment parameter + sections of the + program init terminal, + @param kernel_fragment_seq_count[in] Number of + kernel_fragment_seq_count. + @param progctrlinit_load_section_counts[in] Number of progctrinit load + sections (size of array is program_count) + @param progctrlinit_connect_section_counts[in] Number of progctrinit connect + sections (size of array is program_count) + @return 0 on error + */ +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts); + +/*! Create (the storage for) the program group manifest object + + @param program_count[in] Number of programs in the program group + @param terminal_count[in] Number of terminals on the program group + @param program_dependency_count[in] Array[program_count] with the + program dependencies + @param terminal_dependency_count[in] Array[program_count] with the + terminal dependencies + @param terminal_type[in] Array[terminal_count] with the + terminal type + + @return NULL on error + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_alloc( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type); + +/*! Free (the storage of) the program group manifest object + + @param manifest[in] program group manifest + + @return NULL + */ +extern ia_css_program_group_manifest_t *ia_css_program_group_manifest_free( + ia_css_program_group_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_SIM_H */ + + + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h new file mode 100644 index 000000000000..b7333671ed4f --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.h @@ -0,0 +1,488 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.h + * + * Define the methods on the program manifest object that are not part of a + * single interface + */ + +#include + +#include /* uint8_t */ + +#include + +#include + +#include /* ia_css_kernel_bitmap_t */ + +/* + * Resources needs + */ +#include + +#define IA_CSS_PROGRAM_INVALID_DEPENDENCY ((uint8_t)(-1)) + +/*! Check if the program manifest object specifies a fixed cell allocation + + @param manifest[in] program manifest object + + @return has_fixed_cell, false on invalid argument + */ +extern bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest); + +/*! Get the stored size of the program manifest object + + @param manifest[in] program manifest object + + @return size, 0 on invalid argument + */ +extern size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest); + +/*! Get the program ID of the program manifest object + + @param manifest[in] program manifest object + + @return program ID, IA_CSS_PROGRAM_INVALID_ID on invalid argument + */ +extern ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest); + +/*! Set the program ID of the program manifest object + + @param manifest[in] program manifest object + + @param program ID + + @return 0 on success, -1 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id); + +/*! Get the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the program + * manifest object + + @param manifest[in] program manifest object + @param program_offset[in] this program's offset from + program_group_manifest's base address. + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset); + +/*! Get the type of the program manifest object + + @param manifest[in] program manifest object + + @return program type, limit value (IA_CSS_N_PROGRAM_TYPES) on invalid manifest + argument +*/ +extern ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest); + +/*! Set the type of the program manifest object + + @param manifest[in] program manifest object + @param program_type[in] program type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type); + +/*! Set the cell id of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_id[in] program cell id + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id); + +/*! Set the cell type of the program manifest object + + @param manifest[in] program manifest object + @param program_cell_type[in] program cell type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id); + +/*! Set cells bitmap for the program + + @param manifest[in] program manifest object + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get cells bitmap for the program + + @param manifest[in] program manifest object + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set DFM port bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get bitmap of DFM ports requested for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return DFM port bitmap + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Set active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param bitmap[in] bitmap + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap); + +/*! Get active DFM port specification bitmap for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + +/*! Set DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + @param is_relocatable[in] 1 if dfm device ports are relocatable, 0 otherwise + + @return 0 when not applicable and/or invalid arguments + */ +extern int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable); + +/*! Get DFM device relocatability specification for the program + + @param manifest[in] program manifest object + @param dfm_type_id[in] DFM resource type ID + + @return 1 if dfm device ports are relocatable, 0 otherwise + */ +extern uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id); + + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param int_mem_size[in] internal memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Set the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_size[in] external memory size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable and/or invalid arguments + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Set a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_size[in] device channel size + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size); + +/*! Set a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + @param dev_chn_offset[in] device channel offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset); + + +/*! Set the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type id + @param ext_mem_offset[in] external memory offset + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset); + +/*! Get a device channel resource (offset) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +/*! Get the memory resource (offset) specification for a memory + that does not belong to the cell where the program will be mapped. + + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return Valid fixed offset (if value is greater or equal to 0) or + IA_CSS_PROGRAM_MANIFEST_RESOURCE_OFFSET_IS_RELOCATABLE if offset + is relocatable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + + +/*! Get the kernel composition of the program manifest object + + @param manifest[in] program manifest object + + @return bitmap, 0 on invalid arguments + */ +extern ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest); + +/*! Set the kernel dependency of the program manifest object + + @param manifest[in] program manifest object + @param kernel_bitmap[in] kernel composition bitmap + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Get the number of programs this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the program which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return program dependency, + IA_CSS_PROGRAM_INVALID_DEPENDENCY on invalid arguments + */ +extern uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the program which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return program dependency + */ +extern int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index); + +/*! Get the number of terminals this programs depends on from the program group + * manifest object + + @param manifest[in] program manifest object + + @return program dependency count + */ +extern uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest); + +/*! Get the index of the terminal which the programs at this index depends on + from the program manifest object + + @param manifest[in] program manifest object + + @return terminal dependency, IA_CSS_PROGRAM_INVALID_DEPENDENCY on error + */ +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index); + +/*! Set the index of the terminal which the programs at this index depends on + in the program manifest object + + @param manifest[in] program manifest object + + @return < 0 on invalid arguments + */ +extern int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index); + +/*! Check if the program manifest object specifies a subnode program + + @param manifest[in] program manifest object + + @return is_subnode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest); + +/*! Check if the program manifest object specifies a supernode program + + @param manifest[in] program manifest object + + @return is_supernode, false on invalid argument + */ +extern bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest); +/*! Check if the program manifest object specifies a singular program + + @param manifest[in] program manifest object + + @return is_singular, false on invalid argument + */ +extern bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h new file mode 100644 index 000000000000..9d737b75a576 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.kernel.h @@ -0,0 +1,96 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.kernel.h + * + * Define the methods on the program manifest object: Hsys kernel interface + */ + +#include + +#include + +#include /* uint8_t */ + +/* + * Resources needs + */ + +/*! Get the cell ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell ID is specified, the program this manifest belongs to + must be mapped on that instance. If the cell ID is invalid (limit value) + then the cell type ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the cell type ID from the program manifest object + + @param manifest[in] program manifest object + + Note: If the cell type ID is specified, the program this manifest belongs + to can be mapped on any instance of this clee type. If the cell type ID is + invalid (limit value) then a specific cell ID must be specified instead + + @return cell ID, limit value if not specified + */ +extern vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest); + +/*! Get the memory resource (size) specification for a memory + that belongs to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get the memory resource (size) specification for a memory + that does not belong to the cell where the program will be mapped + + @param manifest[in] program manifest object + @param mem_type_id[in] mem type ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id); + +/*! Get a device channel resource (size) specification + + @param manifest[in] program manifest object + @param dev_chn_id[in] device channel ID + + @return 0 when not applicable + */ +extern vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_KERNEL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h new file mode 100644 index 000000000000..087c84b7106e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.hsys.user.h + * + * Define the methods on the program manifest object: Hsys user interface + */ + +#include + +/*! Print the program manifest object to file/stream + + @param manifest[in] program manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h new file mode 100644 index 000000000000..0c2cef11f30e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_program_manifest.sim.h @@ -0,0 +1,61 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H +#define __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_program_manifest.sim.h + * + * Define the methods on the program manifest object: Simulation only + */ + +#include + +#include /* uint8_t */ + +/*! Compute the size of storage required for allocating + * the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return 0 on error + */ +extern size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Create (the storage for) the program manifest object + + @param program_dependency_count[in] Number of programs this one depends on + @param terminal_dependency_count[in] Number of terminals this one depends on + + @return NULL on error + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_alloc( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +/*! Destroy (the storage of) the program manifest object + + @param manifest[in] program manifest + + @return NULL + */ +extern ia_css_program_manifest_t *ia_css_program_manifest_free( + ia_css_program_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_PROGRAM_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h new file mode 100644 index 000000000000..f3c832b5a4a3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_storage_class.h @@ -0,0 +1,28 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H +#define __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +#else +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PSYS_STATIC_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PSYS_STATIC_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h new file mode 100644 index 000000000000..7c5612cd0969 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_static_trace.h @@ -0,0 +1,103 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_STATIC_TRACE_H +#define __IA_CSS_PSYS_STATIC_TRACE_H + +#include "ia_css_psysapi_trace.h" + +#define PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT PSYSAPI_TRACE_LOG_LEVEL_OFF + +/* Default sub-module tracing config */ +#if (!defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + #define PSYS_STATIC_TRACE_LEVEL_CONFIG \ + PSYS_STATIC_TRACE_LEVEL_CONFIG_DEFAULT +#endif + +/* Module/sub-module specific trace setting will be used if + * the trace level is not specified from the module or + PSYSAPI_STATIC_TRACING_OVERRIDE is defined + */ +#if (defined(PSYSAPI_STATIC_TRACING_OVERRIDE)) + /* Module/sub-module specific trace setting */ + #if PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_OFF + /* PSYSAPI_TRACE_LOG_LEVEL_OFF */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_NORMAL + /* PSYSAPI_TRACE_LOG_LEVEL_NORMAL */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_DISABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_DISABLED + #elif PSYSAPI_STATIC_TRACING_OVERRIDE == PSYSAPI_TRACE_LOG_LEVEL_DEBUG + /* PSYSAPI_TRACE_LOG_LEVEL_DEBUG */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + IA_CSS_TRACE_METHOD_NATIVE + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + IA_CSS_TRACE_LEVEL_ENABLED + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + IA_CSS_TRACE_LEVEL_ENABLED + #else + #error "No PSYSAPI_DATA Tracing level defined" + #endif +#else + /* Inherit Module trace setting */ + #define PSYSAPI_STATIC_TRACE_METHOD \ + PSYSAPI_TRACE_METHOD + #define PSYSAPI_STATIC_TRACE_LEVEL_ASSERT \ + PSYSAPI_TRACE_LEVEL_ASSERT + #define PSYSAPI_STATIC_TRACE_LEVEL_ERROR \ + PSYSAPI_TRACE_LEVEL_ERROR + #define PSYSAPI_STATIC_TRACE_LEVEL_WARNING \ + PSYSAPI_TRACE_LEVEL_WARNING + #define PSYSAPI_STATIC_TRACE_LEVEL_INFO \ + PSYSAPI_TRACE_LEVEL_INFO + #define PSYSAPI_STATIC_TRACE_LEVEL_DEBUG \ + PSYSAPI_TRACE_LEVEL_DEBUG + #define PSYSAPI_STATIC_TRACE_LEVEL_VERBOSE \ + PSYSAPI_TRACE_LEVEL_VERBOSE +#endif + +#endif /* __IA_CSS_PSYSAPI_STATIC_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h new file mode 100644 index 000000000000..0fa62b32e1a7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.h @@ -0,0 +1,423 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.h + * + * Define the methods on the terminal manifest object that are not part of a + * single interface + */ + +#include + +#include + +#include + +#include /* ia_css_frame_format_bitmap_t */ +#include /* ia_css_kernel_bitmap_t */ + +#include /* size_t */ +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_base_types.h" + + +/*! Check if the terminal manifest object specifies a spatial param terminal + * type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a program terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest); + + +/*! Check if the terminal manifest object specifies a program control init terminal type + * + * @param manifest[in] terminal manifest object + * + * @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (cached) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a (sliced) parameter + * terminal type + + @param manifest[in] terminal manifest object + + @return is_parameter_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Check if the terminal manifest object specifies a data terminal type + + @param manifest[in] terminal manifest object + + @return is_data_terminal, false on invalid manifest argument + */ +extern bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the stored size of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return size, 0 on invalid manifest argument + */ +extern size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + + @return the pointer to the parent, NULL on invalid manifest argument + */ +extern ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the (pointer to) the program group manifest parent of the terminal + * manifest object + + @param manifest[in] terminal manifest object + @param terminal_offset[in] this terminal's offset from + program_group_manifest base address. + + @return < 0 on invalid arguments + */ +extern int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal type, limit value (IA_CSS_N_TERMINAL_TYPES) on invalid + manifest argument +*/ +extern ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest); + +/*! Set the type of the terminal manifest object + + @param manifest[in] terminal manifest object + @param terminal_type[in] terminal type + + @return < 0 on invalid manifest argument + */ +extern int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type); + +/*! Set the ID of the terminal manifest object + + @param manifest[in] terminal manifest object + @param ID[in] terminal ID + + @return < 0 on invalid manifest argument + */ +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID); + +/*! Get the type of the terminal manifest object + + @param manifest[in] terminal manifest object + + @return terminal id, IA_CSS_TERMINAL_INVALID_ID on invalid manifest argument + */ +extern ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest); + +/*! Get the supported frame types of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return frame format bitmap, 0 on invalid manifest argument +*/ +extern ia_css_frame_format_bitmap_t + ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the chosen frame type for the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] frame format bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap); + +/*! Check if the (data) terminal manifest object supports compression + + @param manifest[in] (data) terminal manifest object + + @return compression_support, true if compression is supported + */ +extern bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the compression support feature of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param compression_support[in] set true to support compression + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support); + +/*! Set the supported connection types of the terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param bitmap[in] connection bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap); + +/*! Get the connection bitmap of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return connection bitmap, 0 on invalid manifest argument +*/ +extern ia_css_connection_bitmap_t + ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Get the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + + @return kernel bitmap, 0 on invalid manifest argument + */ +extern ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest); + +/*! Set the kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param kernel_bitmap[in] kernel dependency bitmap + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap); + +/*! Set the unique kernel dependency of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param index[in] kernel dependency bitmap index + + @return < 0 on invalid argument(s) + */ +extern int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index); + +/*! Set the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the frame array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Set the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the min fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param min_size[in] Minimum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]); + +/*! Get the max fragment size of the (data) terminal manifest object + + @param manifest[in] (data) terminal manifest object + @param max_size[in] Maximum size of the fragment array + + @return < 0 on invalid manifest argument + */ +extern int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]); + +/*! + * Get the program control init connect section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of connect section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + + +/*! + * Get the program control init load section count for program prog. + * @param prog[in] program control init terminal program desc + * @return number of load section for program prog. + */ + +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog); + +/*! + * Get the program control init terminal manifest size. + * @param nof_programs[in] Number of programs. + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Get the program control init terminal manifest program desc. + * @param terminal[in] Program control init terminal. + * @param program[in] Number of programs. + * @return program control init terminal program desc (or NULL if error). + */ +extern +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program); + +/*! + * Initialize the program control init terminal manifest. + * @param nof_programs[in] Number of programs + * @param nof_load_sections[in] Array of size nof_programs, + * encoding the number of load sections. + * @param nof_connect_sections[in] Array of size nof_programs, + * encoding the number of connect sections. + * @return < 0 on invalid manifest argument + */ +extern +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections); + +/*! + * Pretty prints the program control init terminal manifest. + * @param terminal[in] Program control init terminal. + */ +extern +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h new file mode 100644 index 000000000000..1d2f06f3cbce --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.hsys.user.h @@ -0,0 +1,38 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H + +/*! \file */ + +/** @file ia_css_psys_terminal.hsys.user.h + * + * Define the methods on the termianl manifest object: Hsys user interface + */ + +#include + +/*! Print the terminal manifest object to file/stream + + @param manifest[in] terminal manifest object + @param fid[out] file/stream handle + + @return < 0 on error + */ +extern int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_HSYS_USER_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h new file mode 100644 index 000000000000..f7da810d82f1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/interface/ia_css_psys_terminal_manifest.sim.h @@ -0,0 +1,48 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H +#define __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H + +/*! \file */ + +/** @file ia_css_psys_terminal_manifest.sim.h + * + * Define the methods on the terminal manifest object: Simulation only + */ + +#include /* size_t */ +#include "ia_css_terminal.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_defs.h" + +/*! Create (the storage for) the terminal manifest object + + @param terminal_type[in] type of the terminal manifest {parameter, data} + + @return NULL on error + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_alloc( + const ia_css_terminal_type_t terminal_type); + +/*! Destroy (the storage of) the terminal manifest object + + @param manifest[in] terminal manifest + + @return NULL + */ +extern ia_css_terminal_manifest_t *ia_css_terminal_manifest_free( + ia_css_terminal_manifest_t *manifest); + +#endif /* __IA_CSS_PSYS_TERMINAL_MANIFEST_SIM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c new file mode 100644 index 000000000000..5af4de746310 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest.c @@ -0,0 +1,1038 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_psys_static_storage_class.h" +#include "ia_css_psys_program_group_manifest.h" +#include "ia_css_rbm_manifest.h" + +/* + * Functions to possibly inline + */ + +#ifndef __IA_CSS_PSYS_STATIC_INLINE__ +#include "ia_css_psys_program_group_manifest_impl.h" +#endif /* __IA_CSS_PSYS_STATIC_INLINE__ */ + +/* + * Functions not to inline + */ + +/* + * We need to refactor those files in order to + * build in the firmware only what is needed, + * switches are put current to workaround compilation problems + * in the firmware (for example lack of uint64_t support) + * supported in the firmware + */ +#if !defined(__HIVECC) +size_t ia_css_sizeof_program_group_manifest( + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependency_count, + const uint8_t *terminal_dependency_count, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + size_t size = 0; + int i = 0; + int j = 0; + int m = 0; + int n = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_group_manifest(): enter:\n"); + + verifexit(program_count != 0); + verifexit(program_dependency_count != NULL); + verifexit(terminal_dependency_count != NULL); + + size += sizeof(ia_css_program_group_manifest_t); + + /* Private payload in the program group manifest */ + size += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + /* RBM manifest in the program group manifest */ + size += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + for (i = 0; i < (int)program_count; i++) { + size += ia_css_sizeof_program_manifest( + program_dependency_count[i], + terminal_dependency_count[i]); + } + + for (i = 0; i < (int)terminal_count; i++) { + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + size += ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + size += ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + size += ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + size += ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + size += ia_css_program_control_init_terminal_manifest_get_size( + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + size += sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_param_section_count[m]); + m++; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + size += ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + } + +EXIT: + if (0 == program_count || 0 == terminal_count || + NULL == program_dependency_count || + NULL == terminal_dependency_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_sizeof_program_group_manifest invalid argument\n"); + } + return size; +} + +/* + * Currently, the design of XNR kernel inside the *_pregdc program group, + * does not fit the exact model as is being asserted on in + * ia_css_is_program_group_manifest_valid. We therefore disable some checks. + * Further investigation is needed to determine whether *_pregdc program group + * can be canged or that the model must be changed. + * #define USE_SIMPLIFIED_GRAPH_MODEL 1 allows multiple programs to be + * connected to the same terminal, and it allows a kernel be mapped over + * multiple programs. + */ +#define USE_SIMPLIFIED_GRAPH_MODEL 1 + +/* + * Model and/or check refinements + * - Parallel programs do not yet have mutual exclusive alternatives + * - The pgram dependencies do not need to be acyclic + * - Parallel programs need to have an equal kernel requirement + */ +bool ia_css_is_program_group_manifest_valid( + const ia_css_program_group_manifest_t *manifest) +{ + int i; + bool is_valid = false; + uint8_t terminal_count; + uint8_t program_count; + ia_css_kernel_bitmap_t total_bitmap; + ia_css_kernel_bitmap_t check_bitmap; + ia_css_kernel_bitmap_t terminal_bitmap; + /* + * Use a standard bitmap type for the minimum logic to check the DAG, + * generic functions can be used for the kernel enable bitmaps; Later + */ + vied_nci_resource_bitmap_t resource_bitmap; + int terminal_bitmap_weight; + bool has_parameter_terminal_in = false; + bool has_parameter_terminal_out = false; + bool has_program_control_init_terminal = false; + bool has_program_terminal = false; + bool has_program_terminal_sequencer_info = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_group_manifest_valid(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(ia_css_program_group_manifest_get_size(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_alignment(manifest) != 0); + verifexit(ia_css_program_group_manifest_get_program_group_ID(manifest) != 0); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + total_bitmap = + ia_css_program_group_manifest_get_kernel_bitmap(manifest); + check_bitmap = ia_css_kernel_bitmap_clear(); + resource_bitmap = vied_nci_bit_mask(VIED_NCI_RESOURCE_BITMAP_BITS); + terminal_bitmap = ia_css_kernel_bitmap_clear(); + + verifexit(program_count != 0); + verifexit(terminal_count != 0); + verifexit(!ia_css_is_kernel_bitmap_empty(total_bitmap)); + verifexit(vied_nci_is_bitmap_empty(resource_bitmap)); + + /* Check the kernel bitmaps for terminals */ + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest_i = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + bool is_parameter_in = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_parameter_out = + (IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT == + ia_css_terminal_manifest_get_type( + terminal_manifest_i)); + bool is_data = + ia_css_is_terminal_manifest_data_terminal( + terminal_manifest_i); + bool is_program = + ia_css_is_terminal_manifest_program_terminal( + terminal_manifest_i); + bool is_spatial_param = + ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest_i); + bool is_program_control_init = + ia_css_is_terminal_manifest_program_control_init_terminal( + terminal_manifest_i); + + if (is_parameter_in) { + /* + * There can be only one cached in parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_in); + has_parameter_terminal_in = is_parameter_in; + } else if (is_parameter_out) { + /* + * There can be only one cached out parameter terminal + * it serves kernels, not programs + */ + verifexit(!has_parameter_terminal_out); + has_parameter_terminal_out = is_parameter_out; + } else if (is_data) { + ia_css_data_terminal_manifest_t *dterminal_manifest_i = + (ia_css_data_terminal_manifest_t *) + terminal_manifest_i; + ia_css_kernel_bitmap_t terminal_bitmap_i = + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest_i); + /* + * A terminal must depend on kernels that are a subset + * of the total, correction, it can only depend on one + * kernel + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_onehot( + terminal_bitmap_i)); + } else if (is_program) { + verifexit(!has_program_terminal); + verifexit(terminal_manifest_i); + has_program_terminal = is_program; + has_program_terminal_sequencer_info = + (((ia_css_program_terminal_manifest_t *) + terminal_manifest_i)-> + kernel_fragment_sequencer_info_manifest_info_count + != 0); + } else if (is_program_control_init) { + has_program_control_init_terminal = is_program_control_init; + } else { + const ia_css_spatial_param_terminal_manifest_t + *spatial_param_man = + (const ia_css_spatial_param_terminal_manifest_t *) + terminal_manifest_i; + verifexit(spatial_param_man); + verifexit(is_spatial_param); + + terminal_bitmap = + ia_css_kernel_bitmap_set(terminal_bitmap, + spatial_param_man->kernel_id); + verifexit(!ia_css_is_kernel_bitmap_empty(terminal_bitmap)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, terminal_bitmap)); + } + } + + /* Check the kernel bitmaps for programs */ + for (i = 0; i < (int)program_count; i++) { + int j; + ia_css_program_manifest_t *program_manifest_i = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + ia_css_program_type_t program_type_i = + ia_css_program_manifest_get_type(program_manifest_i); + ia_css_kernel_bitmap_t program_bitmap_i = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_i); + uint8_t program_dependency_count_i = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_i); + uint8_t terminal_dependency_count_i = + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_i); + uint8_t program_dependency_i0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, 0); + bool is_sub_i = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_i); + bool is_exclusive_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB); + bool is_virtual_sub_i = + (program_type_i == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_super_i = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_i); + + /* + * A program must have kernels that + * are a subset of the total + */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + total_bitmap, program_bitmap_i)); + verifexit((program_type_i != IA_CSS_N_PROGRAM_TYPES)); + verifexit((program_dependency_count_i + terminal_dependency_count_i) != 0); + /* + * Checks for subnodes + * - Parallel subnodes cannot depend on terminals + * - Exclusive subnodes must depend on + * fewer terminals than the supernode + * - Subnodes only depend on a supernode of the same type + * - Must have a subset of the supernode's kernels + * (but not equal) + * - This tests only positive cases + * Checks for singular or supernodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + */ + if (is_sub_i) { + /* Subnode */ + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, program_dependency_i0); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_count_i == 1); + if (is_exclusive_sub_i || is_virtual_sub_i) { + verifexit(terminal_dependency_count_i <= + ia_css_program_manifest_get_terminal_dependency_count( + program_manifest_k)); + } else{ + verifexit(terminal_dependency_count_i == 0); + } + verifexit(program_type_k == + (is_exclusive_sub_i ? + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER : + is_virtual_sub_i ? + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER : + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER)); + verifexit(!ia_css_is_kernel_bitmap_equal( + program_bitmap_k, program_bitmap_i)); + verifexit(ia_css_is_kernel_bitmap_subset( + program_bitmap_k, program_bitmap_i)); + } else { + /* Singular or Supernode */ + int k; + + for (k = 0; k < program_dependency_count_i; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_i, k); + ia_css_program_manifest_t *program_manifest_k = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, (int)program_dependency_k); + ia_css_program_type_t program_type_k = + ia_css_program_manifest_get_type( + program_manifest_k); + ia_css_kernel_bitmap_t program_bitmap_k = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_k); + + verifexit(program_dependency_k < + program_count); + verifexit((program_type_k != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_k != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_k)); +#else + (void)program_bitmap_k; +#endif + } + } + + /* Check for relations */ + for (j = 0; j < (int)program_count; j++) { + int k; + ia_css_program_manifest_t *program_manifest_j = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, j); + ia_css_program_type_t program_type_j = + ia_css_program_manifest_get_type(program_manifest_j); + ia_css_kernel_bitmap_t program_bitmap_j = + ia_css_program_manifest_get_kernel_bitmap( + program_manifest_j); + uint8_t program_dependency_count_j = + ia_css_program_manifest_get_program_dependency_count( + program_manifest_j); + uint8_t program_dependency_j0 = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, 0); + bool is_sub_j = + ia_css_is_program_manifest_subnode_program_type( + program_manifest_j); + bool is_super_j = + ia_css_is_program_manifest_supernode_program_type( + program_manifest_j); + bool is_virtual_sub_j = + (program_type_j == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); + bool is_j_subset_i = + ia_css_is_kernel_bitmap_subset( + program_bitmap_i, program_bitmap_j); + bool is_i_subset_j = + ia_css_is_kernel_bitmap_subset( + program_bitmap_j, program_bitmap_i); + + /* Test below would fail for i==j */ + if (i == j) + continue; + + /* Empty sets are always subsets, but meaningless */ + verifexit(!ia_css_is_kernel_bitmap_empty( + program_bitmap_j)); + + /* + * Checks for mutual subnodes + * - Parallel subnodes must have an equal + * set of kernels + * - Exclusive and virtual subnodes must + * have an unequal set of kernels + * Checks for subnodes + * - Subnodes must have a subset of kernels + */ + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(program_dependency_i0 != i); + verifexit(program_dependency_j0 != i); + + if (program_dependency_i0 == + program_dependency_j0) { + verifexit(is_sub_i); + /* + * Subnodes are subsets, + * not for virtual nodes + */ + if (!is_virtual_sub_i) + verifexit( + ((is_j_subset_i || + is_i_subset_j))); + /* + * That must be equal for + * parallel subnodes, + * must be unequal for + * exlusive and virtual subnodes + */ + verifexit( + ((is_j_subset_i && is_i_subset_j) ^ + (is_exclusive_sub_i | + is_virtual_sub_i))); + + } + if (is_j_subset_i || is_i_subset_j) { + verifexit(program_dependency_i0 == + program_dependency_j0); + } + } + + if (((program_type_i == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_PARALLEL_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB)) || + ((program_type_i == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER) && + (program_type_j == + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB))) { + + verifexit(program_dependency_count_j == 1); + verifexit(!is_i_subset_j); + + if (program_dependency_j0 == i) { + verifexit(program_dependency_i0 != + program_dependency_j0); + verifexit(is_super_i); + verifexit(is_j_subset_i); + + } + if (is_j_subset_i) { + verifexit(program_dependency_j0 == i); + } + } + + /* + * Checks for dependent nodes + * - Cannot depend on exclusive subnodes + * - No intersection between kernels + * (too strict for multiple instances ?) + * unless a subnode + */ + for (k = 0; k < (int)program_dependency_count_j; k++) { + uint8_t program_dependency_k = + ia_css_program_manifest_get_program_dependency( + program_manifest_j, k); + + verifexit((program_dependency_k < + program_count)); + if (program_dependency_k == i) { + /* program[j] depends on program[i] */ + verifexit((i != j)); + verifexit((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)); + verifexit(USE_SIMPLIFIED_GRAPH_MODEL || + (ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j) ^ is_sub_j)); + } + } + + /* + * Checks for supernodes and subnodes + * - Detect nodes that kernel-wise are subsets, + * but not connected to the correct supernode + * - We do not (yet) detect if programs properly + * depend on all parallel nodes + */ + if (!ia_css_is_kernel_bitmap_intersection_empty( + program_bitmap_i, program_bitmap_j)) { + /* + * This test will pass if + * the program manifest is NULL, + * but that's no concern here + */ +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_i)); + verifexit(!ia_css_is_program_manifest_singular_program_type( + program_manifest_j)); + if (!is_virtual_sub_j) + verifexit((is_j_subset_i || is_i_subset_j)); +#else + (void)is_virtual_sub_j; +#endif + if (is_super_i) { + verifexit(is_sub_j); + verifexit(program_dependency_j0 == i); + } + if (is_super_j) { + verifexit(is_sub_i); + verifexit(program_dependency_i0 == j); + } + } + } + check_bitmap = ia_css_kernel_bitmap_union( + check_bitmap, program_bitmap_i); + /* + * A terminal can be bound to only a single + * (of multiple concurrent) program(s), + * i.e. the one that holds the iterator to control it + * Only singular and super nodes can depend on a terminal. + * This loop accumulates all terminal + * dependencies over all programs + */ + for (j = 0; j < (int)terminal_dependency_count_i; j++) { + uint8_t terminal_dependency = + ia_css_program_manifest_get_terminal_dependency( + program_manifest_i, j); + + verifexit(terminal_dependency < terminal_count); + if ((program_type_i != + IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) && + (program_type_i != + IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB)) { + /* If the subnode always came after the */ + /* supernode we could check for presence */ + resource_bitmap = + vied_nci_bit_mask_set_unique( + resource_bitmap, + terminal_dependency); +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit(!vied_nci_is_bitmap_empty( + resource_bitmap)); +#endif + } + } + } + verifexit(ia_css_is_kernel_bitmap_equal( + total_bitmap, check_bitmap)); + + terminal_bitmap_weight = + vied_nci_bitmap_compute_weight(resource_bitmap); + verifexit(terminal_bitmap_weight >= 0); + if (has_parameter_terminal_in || + has_parameter_terminal_out || + has_program_terminal || + has_program_control_init_terminal) { + int skip_terminal_count = 0; + + if (has_parameter_terminal_in) + skip_terminal_count++; + if (has_parameter_terminal_out) + skip_terminal_count++; + if (has_program_control_init_terminal) { + skip_terminal_count++; + } + if (has_program_terminal) + skip_terminal_count++; + if (has_program_terminal_sequencer_info) + skip_terminal_count--; +#if USE_SIMPLIFIED_GRAPH_MODEL == 0 + verifexit((terminal_bitmap_weight == + (terminal_count - skip_terminal_count))); +#endif + } else + verifexit((terminal_bitmap_weight == terminal_count)); + + is_valid = true; +EXIT: + if (is_valid == false) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_is_program_group_manifest_valid: failed\n"); + } + return is_valid; +} + +int ia_css_program_group_manifest_set_kernel_bitmap( + ia_css_program_group_manifest_t *manifest, + const ia_css_kernel_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = bitmap; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_kernel_bitmap invalid argument\n"); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_group_manifest_get_kernel_bitmap( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_kernel_bitmap invalid argument\n"); + } + return bitmap; +} + +void ia_css_program_group_manifest_init( + ia_css_program_group_manifest_t *blob, + const uint8_t program_count, + const uint8_t terminal_count, + const uint8_t *program_dependencies, + const uint8_t *terminal_dependencies, + const ia_css_terminal_type_t *terminal_type, + const uint16_t cached_in_param_section_count, + const uint16_t cached_out_param_section_count, + const uint16_t *spatial_param_section_count, + const uint16_t fragment_param_section_count, + const uint16_t *sliced_in_param_section_count, + const uint16_t *sliced_out_param_section_count, + const uint16_t kernel_fragment_seq_count, + const uint16_t *progctrlinit_load_section_counts, + const uint16_t *progctrlinit_connect_section_counts) +{ + int i = 0; + int j = 0; + int m = 0; + int n = 0; + int result; + uint32_t offset = 0; + char *prg_manifest_base, *terminal_manifest_base; + size_t program_size = 0; + + /* + * assert(blob != NULL); + */ + COMPILATION_ERROR_IF( + SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_data_terminal_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_group_manifest_t))); + COMPILATION_ERROR_IF( + SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_manifest_t))); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_init(): enter:\n"); + + for (i = 0; i < (int)program_count; i++) { + program_size += + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + } + + /* A program group ID cannot be zero */ + blob->ID = 1; + blob->program_count = program_count; + blob->terminal_count = terminal_count; + blob->program_manifest_offset = sizeof(ia_css_program_group_manifest_t); + blob->terminal_manifest_offset = + (uint32_t)blob->program_manifest_offset + program_size; + + prg_manifest_base = (char *) + (((char *)blob) + blob->program_manifest_offset); + offset = blob->program_manifest_offset; + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_init( + (ia_css_program_manifest_t *)prg_manifest_base, + program_dependencies[i], terminal_dependencies[i]); + ia_css_program_manifest_set_parent_offset( + (ia_css_program_manifest_t *)prg_manifest_base, offset); + program_size = + ia_css_sizeof_program_manifest(program_dependencies[i], + terminal_dependencies[i]); + prg_manifest_base += program_size; + offset += (uint32_t)program_size; + } + + offset = blob->terminal_manifest_offset; + terminal_manifest_base = (char *) (((char *)blob) + offset); + for (i = 0; i < (int)terminal_count; i++) { + size_t terminal_size = 0; + ia_css_terminal_manifest_t *term_manifest = + (ia_css_terminal_manifest_t *)terminal_manifest_base; + + ia_css_terminal_manifest_set_parent_offset( + (ia_css_terminal_manifest_t *) + terminal_manifest_base, + offset); + switch (terminal_type[i]) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_in_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_in_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed in cached in terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + result = ia_css_param_terminal_manifest_init( + (ia_css_param_terminal_manifest_t *) + term_manifest, + cached_out_param_section_count); + if (0 == result) { + terminal_size = + ia_css_param_terminal_manifest_get_size( + cached_out_param_section_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + result = ia_css_spatial_param_terminal_manifest_init( + (ia_css_spatial_param_terminal_manifest_t *) + term_manifest, + spatial_param_section_count[j]); + if (0 == result) { + terminal_size = + ia_css_spatial_param_terminal_manifest_get_size( + spatial_param_section_count[j]); + j++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_spatial_param_terminal_manifest_init failed in spatial terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM: + result = ia_css_program_terminal_manifest_init( + (ia_css_program_terminal_manifest_t *) + term_manifest, + fragment_param_section_count, + kernel_fragment_seq_count); + if (0 == result) { + terminal_size = + ia_css_program_terminal_manifest_get_size( + fragment_param_section_count, + kernel_fragment_seq_count); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_terminal_manifest_init failed in program terminal\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT: + result = ia_css_program_control_init_terminal_manifest_init( + (ia_css_program_control_init_terminal_manifest_t *) + term_manifest, + program_count, + progctrlinit_load_section_counts, + progctrlinit_connect_section_counts); + if (0 == result) { + terminal_size = + ia_css_program_control_init_terminal_manifest_get_size( + program_count, + NULL, + NULL); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_program_control_init_terminal_manifest_init failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + terminal_size = sizeof(ia_css_data_terminal_manifest_t); + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_in_param_section_count[m]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_in_param_section_count[m]); + m++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced terminal failed\n"); + } + break; + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + result = ia_css_sliced_param_terminal_manifest_init( + (ia_css_sliced_param_terminal_manifest_t *) + term_manifest, + sliced_out_param_section_count[n]); + if (0 == result) { + terminal_size = + ia_css_sliced_param_terminal_manifest_get_size( + sliced_out_param_section_count[n]); + n++; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_param_terminal_manifest_init in sliced out terminal failed\n"); + } + break; + default: + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_init invalid argument\n"); + } + term_manifest->size = (uint16_t)terminal_size; + term_manifest->terminal_type = terminal_type[i]; + terminal_manifest_base += terminal_size; + offset += (uint32_t)terminal_size; + } + + /* Set the private program group manifest blob offset */ + blob->private_data_offset = offset; + offset += ceil_mul(sizeof(struct ia_css_psys_private_pg_data), + sizeof(uint64_t)); + + /* Set the RBM manifest blob offset */ + blob->rbm_manifest_offset = offset; + offset += ceil_mul(sizeof(ia_css_rbm_manifest_t), + sizeof(uint64_t)); + + assert(offset <= UINT16_MAX); + blob->size = (uint16_t)offset; +} + +int ia_css_program_group_manifest_print( + const ia_css_program_group_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i; + uint8_t program_count, terminal_count; + ia_css_kernel_bitmap_t bitmap; + struct ia_css_psys_private_pg_data *priv_data; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_group_manifest_print(): enter:\n"); + + NOT_USED(fid); + + verifexit(manifest != NULL); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sizeof(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "alignment(manifest) = %d\n", + (int)ia_css_program_group_manifest_get_alignment(manifest)); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program group ID = %d\n", + (int)ia_css_program_group_manifest_get_program_group_ID( + manifest)); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + bitmap = ia_css_program_group_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d program manifests\n", (int)program_count); + for (i = 0; i < (int)program_count; i++) { + ia_css_program_manifest_t *program_manifest = + ia_css_program_group_manifest_get_prgrm_mnfst( + manifest, i); + + retval = ia_css_program_manifest_print(program_manifest, fid); + verifjmpexit(retval == 0); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "%d terminal manifests\n", (int)terminal_count); + for (i = 0; i < (int)terminal_count; i++) { + ia_css_terminal_manifest_t *terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst( + manifest, i); + + retval = ia_css_terminal_manifest_print( + terminal_manifest, fid); + verifjmpexit(retval == 0); + } + + priv_data = + (struct ia_css_psys_private_pg_data *) + ia_css_program_group_manifest_get_private_data(manifest); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "private_data_offset %d\n", manifest->private_data_offset); + + for (i = 0; i < IPU_DEVICE_GP_PSA_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "PSA MUX id %d mux val %d\n", i, + priv_data->psa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_GP_ISA_STATIC_MUX_NUM_MUX; i++) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "ISA MUX id %d mux val %d\n", i, + priv_data->isa_mux_conf[i]); + + } + + for (i = 0; i < IPU_DEVICE_ACB_NUM_ACB; i++) { + + if (priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID) { + + assert(priv_data->acb_route[i].in_select != + NCI_ACB_PORT_INVALID && + priv_data->acb_route[i].out_select != + NCI_ACB_PORT_INVALID); + + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "Route Cell id %d In %d Out %d\n", i, + priv_data->acb_route[i].in_select, + priv_data->acb_route[i].out_select); + } + + } + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_base_addr 0x%x\n", + priv_data->input_buffer_info.buffer_base_addr); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: bpe = %d\n", + priv_data->input_buffer_info.bpe); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_width = %d\n", + priv_data->input_buffer_info.buffer_width); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: buffer_height = %d\n", + priv_data->input_buffer_info.buffer_height); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: num_of_buffers = %d\n", + priv_data->input_buffer_info.num_of_buffers); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "Input Buffer: dfm_port_addr = 0x%x\n", + priv_data->input_buffer_info.dfm_port_addr); + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_group_manifest_print failed (%i)\n", + retval); + } + return retval; +} +#endif /* !defined(__HIVECC) */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h new file mode 100644 index 000000000000..527b8cc00dd1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_manifest_impl.h @@ -0,0 +1,415 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H + +#include +#include +#include +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_psys_private_pg_data.h" +#include /* Safer bit mask functions */ +#include "ia_css_psys_static_trace.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +size_t ia_css_program_group_manifest_get_size( + const ia_css_program_group_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_size invalid argument\n"); + } + return size; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_group_ID_t +ia_css_program_group_manifest_get_program_group_ID( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_program_group_ID_t id = IA_CSS_PROGRAM_GROUP_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_group_ID invalid argument\n"); + } + return id; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_program_group_ID( + ia_css_program_group_manifest_t *manifest, + ia_css_program_group_ID_t id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_program_group_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_program_group_ID invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +int ia_css_program_group_manifest_set_alignment( + ia_css_program_group_manifest_t *manifest, + const uint8_t alignment) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_set_alignment(): enter:\n"); + + if (manifest != NULL) { + manifest->alignment = alignment; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_set_alignment invalid argument\n"); + } + return retval; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_alignment( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t alignment = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_alignment(): enter:\n"); + + if (manifest != NULL) { + alignment = manifest->alignment; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_alignment invalid argument\n"); + } + return alignment; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +void *ia_css_program_group_manifest_get_private_data( + const ia_css_program_group_manifest_t *manifest) +{ + void *private_data = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_private_data(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + private_data = (void *)((const char *)manifest + + manifest->private_data_offset); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_private_data invalid argument\n"); + } + return private_data; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_rbm_manifest_t *ia_css_program_group_manifest_get_rbm_manifest( + const ia_css_program_group_manifest_t *manifest) +{ + ia_css_rbm_manifest_t *rbm_manifest = NULL; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_rbm_manifest(%p): enter:\n", + manifest); + + verifexit(manifest != NULL); + + rbm_manifest = (ia_css_rbm_manifest_t *)((const char *)manifest + + manifest->rbm_manifest_offset); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_rbm_manifest invalid argument\n"); + } + return rbm_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_manifest_t * +ia_css_program_group_manifest_get_prgrm_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int program_index) +{ + ia_css_program_manifest_t *prg_manifest_base; + uint8_t *program_manifest = NULL; + uint8_t program_count; + unsigned int i; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_prgrm_mnfst(%p,%d): enter:\n", + manifest, program_index); + + program_count = + ia_css_program_group_manifest_get_program_count(manifest); + + verifexit(manifest != NULL); + verifexit(program_index < program_count); + + prg_manifest_base = (ia_css_program_manifest_t *)((char *)manifest + + manifest->program_manifest_offset); + if (program_index < program_count) { + program_manifest = (uint8_t *)prg_manifest_base; + for (i = 0; i < program_index; i++) { + program_manifest += ((ia_css_program_manifest_t *) + program_manifest)->size; + } + } + +EXIT: + if (NULL == manifest || program_index >= program_count) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_prgrm_mnfst invalid argument\n"); + } + return (ia_css_program_manifest_t *)program_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_data_terminal_manifest_t * +ia_css_program_group_manifest_get_data_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_data_terminal_manifest_t *data_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_data_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_data_terminal(terminal_manifest)); + + data_terminal_manifest = + (ia_css_data_terminal_manifest_t *)terminal_manifest; +EXIT: + return data_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_param_terminal_manifest_t * +ia_css_program_group_manifest_get_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_param_terminal_manifest_t *param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_parameter_terminal( + terminal_manifest)); + param_terminal_manifest = + (ia_css_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_spatial_param_terminal_manifest_t * +ia_css_program_group_manifest_get_spatial_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_spatial_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_spatial_parameter_terminal( + terminal_manifest)); + + spatial_param_terminal_manifest = + (ia_css_spatial_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return spatial_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_sliced_param_terminal_manifest_t * +ia_css_program_group_manifest_get_sliced_param_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_sliced_param_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_sliced_terminal( + terminal_manifest)); + + sliced_param_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)terminal_manifest; +EXIT: + return sliced_param_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_program_terminal_manifest_t * +ia_css_program_group_manifest_get_program_terminal_manifest( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_program_terminal_manifest_t *program_terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_terminal_manifest(%p, %d): enter:\n", + manifest, (int)terminal_index); + + terminal_manifest = + ia_css_program_group_manifest_get_term_mnfst(manifest, + terminal_index); + + verifexit(ia_css_is_terminal_manifest_program_terminal( + terminal_manifest)); + + program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)terminal_manifest; + EXIT: + return program_terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +ia_css_terminal_manifest_t * +ia_css_program_group_manifest_get_term_mnfst( + const ia_css_program_group_manifest_t *manifest, + const unsigned int terminal_index) +{ + ia_css_terminal_manifest_t *terminal_manifest = NULL; + ia_css_terminal_manifest_t *terminal_manifest_base; + uint8_t terminal_count; + uint8_t i = 0; + uint32_t offset; + + IA_CSS_TRACE_2(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_term_mnfst(%p,%d): enter:\n", + manifest, (int)terminal_index); + + verifexit(manifest != NULL); + + terminal_count = + ia_css_program_group_manifest_get_terminal_count(manifest); + + verifexit(terminal_index < terminal_count); + + terminal_manifest_base = + (ia_css_terminal_manifest_t *)((char *)manifest + + manifest->terminal_manifest_offset); + terminal_manifest = terminal_manifest_base; + while (i < terminal_index) { + offset = + (uint32_t)ia_css_terminal_manifest_get_size(terminal_manifest); + terminal_manifest = (ia_css_terminal_manifest_t *) + ((char *)terminal_manifest + offset); + i++; + } +EXIT: + return terminal_manifest; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_program_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t program_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_program_count(): enter:\n"); + + if (manifest != NULL) { + program_count = manifest->program_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_program_count invalid argument\n"); + } + return program_count; +} + +IA_CSS_PSYS_STATIC_STORAGE_CLASS_C +uint8_t ia_css_program_group_manifest_get_terminal_count( + const ia_css_program_group_manifest_t *manifest) +{ + uint8_t terminal_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_group_manifest_get_terminal_count(): enter:\n"); + + if (manifest != NULL) { + terminal_count = manifest->terminal_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_group_manifest_get_terminal_count invalid argument\n"); + } + return terminal_count; +} + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h new file mode 100644 index 000000000000..502d59def6e9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_group_private.h @@ -0,0 +1,212 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H +#define __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H + +#include "ia_css_psys_manifest_types.h" +#include "ia_css_terminal_manifest_types.h" +#include "ia_css_kernel_bitmap.h" +#include "ia_css_program_group_data.h" +#include "vied_nci_psys_resource_model.h" +#include "ia_css_rbm_manifest_types.h" +#include +#include +#include + +#define SIZE_OF_PROGRAM_GROUP_MANIFEST_STRUCT_IN_BITS \ + ((IA_CSS_KERNEL_BITMAP_BITS) \ + + (IA_CSS_PROGRAM_GROUP_ID_BITS) \ + + (5 * IA_CSS_UINT16_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS) \ + + (5 * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_group_manifest_s { + /**< Indicate kernels are present in this program group */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to program group FW */ + ia_css_program_group_ID_t ID; + uint16_t program_manifest_offset; + uint16_t terminal_manifest_offset; + /**< Offset to private data (not part of the official API) */ + uint16_t private_data_offset; + /**< Offset to RBM manifest */ + uint16_t rbm_manifest_offset; + /**< Size of this structure */ + uint16_t size; + /**< Storage alignment requirement (in uint8_t) */ + uint8_t alignment; + /**< Total number of kernels in this program group */ + uint8_t kernel_count; + /**< Total number of program in this program group */ + uint8_t program_count; + /**< Total number of terminals on this program group */ + uint8_t terminal_count; + /**< Total number of independent subgraphs in this program group */ + uint8_t subgraph_count; + /**< Padding; esnures that rbm_manifest starts on 64bit alignment */ + uint8_t reserved[5]; +}; + +#define SIZE_OF_PROGRAM_MANIFEST_STRUCT_IN_BITS \ + (IA_CSS_KERNEL_BITMAP_BITS \ + + IA_CSS_PROGRAM_ID_BITS \ + + IA_CSS_PROGRAM_TYPE_BITS \ + + (3 * IA_CSS_UINT32_T_BITS) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (VIED_NCI_RESOURCE_BITMAP_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + IA_CSS_UINT16_T_BITS \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_MEM_TYPE_ID) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DATA_MEM_TYPE_ID * 2) \ + + (VIED_NCI_RESOURCE_SIZE_BITS * VIED_NCI_N_DEV_CHN_ID * 2) \ + + (IA_CSS_UINT8_T_BITS * VIED_NCI_N_DEV_DFM_ID) \ + + (IA_CSS_PROCESS_MAX_CELLS * VIED_NCI_RESOURCE_ID_BITS) \ + + (VIED_NCI_RESOURCE_ID_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST * IA_CSS_UINT8_T_BITS)) +/* + * This structure contains only the information required for resource + * management and construction of the process group. + * The header for the program binary load is separate + */ + +struct ia_css_program_manifest_s { + /**< Indicate which kernels lead to this program being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Referral ID to a specific program FW, valid ID's != 0 */ + ia_css_program_ID_t ID; + /**< Specification of for exclusive or parallel programs */ + ia_css_program_type_t program_type; + /**< offset to add to reach parent. This is negative value.*/ + int32_t parent_offset; + uint32_t program_dependency_offset; + uint32_t terminal_dependency_offset; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM port allocation of this program */ + vied_nci_resource_bitmap_t dfm_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; + /**< Active DFM ports which need a kick + * If an empty port is configured to run in active mode, the empty + * port and the corresponding full port(s) in the stream must be kicked. + * The empty port must always be kicked aster the full port. + */ + vied_nci_resource_bitmap_t dfm_active_port_bitmap[VIED_NCI_N_DEV_DFM_ID]; +#endif + /**< Size of this structure */ + uint16_t size; + /**< (internal) Memory allocation size needs of this program */ + vied_nci_resource_size_t int_mem_size[VIED_NCI_N_MEM_TYPE_ID]; + /**< (external) Memory allocation size needs of this program */ + vied_nci_resource_size_t ext_mem_size[VIED_NCI_N_DATA_MEM_TYPE_ID]; + vied_nci_resource_size_t ext_mem_offset[VIED_NCI_N_DATA_MEM_TYPE_ID]; + /**< Device channel allocation size needs of this program */ + vied_nci_resource_size_t dev_chn_size[VIED_NCI_N_DEV_CHN_ID]; + vied_nci_resource_size_t dev_chn_offset[VIED_NCI_N_DEV_CHN_ID]; +#if (VIED_NCI_N_DEV_DFM_ID > 0) + /**< DFM ports are relocatable if value is set to 1. + * The flag is per dfm port type. + * This will not be supported for now. + */ + uint8_t is_dfm_relocatable[VIED_NCI_N_DEV_DFM_ID]; +#endif + /** Array of all the cells this program needs */ +#if IA_CSS_PROCESS_MAX_CELLS == 1 + vied_nci_resource_id_t cell_id; +#else + vied_nci_resource_id_t cells[IA_CSS_PROCESS_MAX_CELLS]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + /**< (exclusive) indication of a cell type to be used by this program */ + vied_nci_resource_id_t cell_type_id; + + /**< Number of programs this program depends on */ + uint8_t program_dependency_count; + /**< Number of terminals this program depends on */ + uint8_t terminal_dependency_count; + /**< Padding bytes for 64bit alignment*/ +#if N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST > 0 + /*hivecc does not allow an array of zero length*/ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_GROUP_MANFEST]; +#endif +}; + +/* + *Calculation for manual size check for struct ia_css_data_terminal_manifest_s + */ +#define SIZE_OF_DATA_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + IA_CSS_FRAME_FORMAT_BITMAP_BITS \ + + IA_CSS_CONNECTION_BITMAP_BITS \ + + IA_CSS_KERNEL_BITMAP_BITS \ + + (4 * (IA_CSS_UINT16_T_BITS * IA_CSS_N_DATA_DIMENSION)) \ + + IA_CSS_UINT16_T_BITS \ + + IA_CSS_UINT8_T_BITS \ + + (4*IA_CSS_UINT8_T_BITS)) +/* + * Inherited data terminal class + */ +struct ia_css_data_terminal_manifest_s { + /**< Data terminal base */ + ia_css_terminal_manifest_t base; + /**< Supported (4CC / MIPI / parameter) formats */ + ia_css_frame_format_bitmap_t frame_format_bitmap; + /**< Indicate which kernels lead to this terminal being used */ + ia_css_kernel_bitmap_t kernel_bitmap; + /**< Minimum size of the frame */ + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of the frame */ + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]; + /**< Minimum size of a fragment that the program port can accept */ + uint16_t min_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Maximum size of a fragment that the program port can accept */ + uint16_t max_fragment_size[IA_CSS_N_DATA_DIMENSION]; + /**< Indicate if this terminal is derived from a principal terminal */ + uint16_t terminal_dependency; + /**< Indicate what (streaming) interface types this terminal supports */ + ia_css_connection_bitmap_t connection_bitmap; + /**< Indicates if compression is supported on the data associated with + * this terminal. '1' indicates compression is supported, + * '0' otherwise + */ + uint8_t compression_support; + uint8_t reserved[4]; +}; + +/* ============ Program Control Init Terminal Manifest - START ============ */ +#define N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT 4 +struct ia_css_program_control_init_manifest_program_desc_s { + uint16_t load_section_count; + uint16_t connect_section_count; + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_MANIFEST_PROGRAM_DESC_STRUCT]; +}; + +#define N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT 2 +struct ia_css_program_control_init_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Number of programs in program group */ + uint32_t program_count; + /* + * Points to array of ia_css_program_control_init_terminal_program_desc_t + * with size program_count. + */ + uint16_t program_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGCTRLINIT_TERMINAL_MANIFEST_STRUCT]; +}; +/* ============ Program Control Init Terminal Manifest - END ============ */ + +extern void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count); + +#endif /* __IA_CSS_PSYS_PROGRAM_GROUP_PRIVATE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c new file mode 100644 index 000000000000..be1ef9676879 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_program_manifest.c @@ -0,0 +1,1241 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_print */ +#include + +#include +#include "ia_css_psys_program_group_private.h" +#include "ia_css_psys_static_trace.h" + +#include +#include + +size_t ia_css_sizeof_program_manifest( + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_sizeof_program_manifest(): enter:\n"); + + size += sizeof(ia_css_program_manifest_t); + size += program_dependency_count * sizeof(uint8_t); + size += terminal_dependency_count * sizeof(uint8_t); + size = ceil_mul(size, sizeof(uint64_t)); + + return size; +} + +bool ia_css_has_program_manifest_fixed_cell( + const ia_css_program_manifest_t *manifest) +{ + bool has_fixed_cell = false; + + vied_nci_cell_ID_t cell_id; + vied_nci_cell_type_ID_t cell_type_id; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_has_program_manifest_fixed_cell(): enter:\n"); + + verifexit(manifest != NULL); + + cell_id = ia_css_program_manifest_get_cell_ID(manifest); + cell_type_id = ia_css_program_manifest_get_cell_type_ID(manifest); + + has_fixed_cell = ((cell_id != VIED_NCI_N_CELL_ID) && + (cell_type_id == VIED_NCI_N_CELL_TYPE_ID)); + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_has_program_manifest_fixed_cell invalid argument\n"); + } + return has_fixed_cell; +} + +size_t ia_css_program_manifest_get_size( + const ia_css_program_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_size invalid argument\n"); + } + + return size; +} + +ia_css_program_ID_t ia_css_program_manifest_get_program_ID( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_ID_t program_id = IA_CSS_PROGRAM_INVALID_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_ID(): enter:\n"); + + if (manifest != NULL) { + program_id = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_ID invalid argument\n"); + } + return program_id; +} + +int ia_css_program_manifest_set_program_ID( + ia_css_program_manifest_t *manifest, + ia_css_program_ID_t id) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = id; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_ID failed (%i)\n", ret); + } + return ret; +} + +ia_css_program_group_manifest_t *ia_css_program_manifest_get_parent( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *) (base); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_parent invalid argument\n"); + } + return parent; +} + +int ia_css_program_manifest_set_parent_offset( + ia_css_program_manifest_t *manifest, + int32_t program_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current program offset*/ + manifest->parent_offset = -program_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_program_type_t ia_css_program_manifest_get_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type = IA_CSS_N_PROGRAM_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + program_type = manifest->program_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_type invalid argument\n"); + } + return program_type; +} + +int ia_css_program_manifest_set_type( + ia_css_program_manifest_t *manifest, + const ia_css_program_type_t program_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->program_type = program_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_type failed (%i)\n", retval); + } + return retval; +} + +ia_css_kernel_bitmap_t ia_css_program_manifest_get_kernel_bitmap( + const ia_css_program_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_kernel_bitmap invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_program_manifest_set_kernel_bitmap( + ia_css_program_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_kernel_bitmap failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_cell_ID_t ia_css_program_manifest_get_cell_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_ID_t cell_id = VIED_NCI_N_CELL_ID; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_ID(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + cell_id = manifest->cell_id; +#else + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + assert(VIED_NCI_N_CELL_ID == manifest->cells[i]); +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } + cell_id = manifest->cells[0]; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_ID invalid argument\n"); + } + return cell_id; +} + +int ia_css_program_manifest_set_cell_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_ID_t cell_id) +{ + int retval = -1; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_ID(): enter:\n"); + if (manifest != NULL) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = cell_id; +#else + manifest->cells[0] = cell_id; + for (i = 1; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + manifest->cells[i] = VIED_NCI_N_CELL_ID; + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_ID failed (%i)\n", retval); + } + return retval; +} + +vied_nci_cell_type_ID_t ia_css_program_manifest_get_cell_type_ID( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_cell_type_ID_t cell_type_id = VIED_NCI_N_CELL_TYPE_ID; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cell_type_ID(): enter:\n"); + + verifexit(manifest != NULL); + + cell_type_id = (vied_nci_cell_type_ID_t)(manifest->cell_type_id); +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cell_type_ID invalid argument\n"); + } + return cell_type_id; +} + +int ia_css_program_manifest_set_cell_type_ID( + ia_css_program_manifest_t *manifest, + const vied_nci_cell_type_ID_t cell_type_id) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cell_type_ID(): enter:\n"); + if (manifest != NULL) { + manifest->cell_type_id = cell_type_id; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_cell_type_ID failed (%i)\n", + retval); + } + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_int_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t int_mem_size = 0; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_int_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_MEM_TYPE_ID); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + /* loop over vied_nci_cell_mem_type to verify mem_type_id for a + * specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + int_mem_size = manifest->int_mem_size[mem_index]; + } + } + +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_int_mem_size invalid argument\n"); + } + return int_mem_size; +} + +int ia_css_program_manifest_set_cells_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + int array_index = 0; + int bit_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_cells_bitmap(): enter:\n"); + + if (manifest != NULL) { + for (bit_index = 0; bit_index < VIED_NCI_N_CELL_ID; bit_index++) { + if (vied_nci_is_bit_set_in_bitmap(bitmap, bit_index)) { + verifexit(array_index < IA_CSS_PROCESS_MAX_CELLS); +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = (vied_nci_cell_ID_t)bit_index; +#else + manifest->cells[array_index] = (vied_nci_cell_ID_t)bit_index; +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + array_index++; + } + } + for (; array_index < IA_CSS_PROCESS_MAX_CELLS; array_index++) { +#if IA_CSS_PROCESS_MAX_CELLS == 1 + manifest->cell_id = VIED_NCI_N_CELL_ID; +#else + manifest->cells[array_index] = VIED_NCI_N_CELL_ID; +#endif /* IA_CSS_PROCESS_MAX_CELLS */ + } + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_cells_bitmap invalid argument\n"); + } +EXIT: + return retval; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_cells_bitmap( + const ia_css_program_manifest_t *manifest) +{ + vied_nci_resource_bitmap_t bitmap = 0; +#if IA_CSS_PROCESS_MAX_CELLS > 1 + int i = 0; +#endif /* IA_CSS_PROCESS_MAX_CELLS > 1 */ + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_cells_bitmap(): enter:\n"); + + verifexit(manifest != NULL); + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + bitmap = (1 << manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + if (VIED_NCI_N_CELL_ID != manifest->cells[i]) { + bitmap |= (1 << manifest->cells[i]); + } +#ifdef __HIVECC +#pragma hivecc unroll +#endif + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_cells_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_dfm_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_dfm_active_port_bitmap( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const vied_nci_resource_bitmap_t bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->dfm_active_port_bitmap[dfm_type_id] = bitmap; +#else + (void)bitmap; + (void)dfm_type_id; +#endif + retval = 0; + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dfm_active_port_bitmap invalid argument\n"); + } + return retval; +} + +int ia_css_program_manifest_set_is_dfm_relocatable( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id, + const uint8_t is_relocatable) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + manifest->is_dfm_relocatable[dfm_type_id] = is_relocatable; +#else + (void)is_relocatable; + (void)dfm_type_id; +#endif + retval = 0; + + EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_is_dfm_relocatable invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_is_dfm_relocatable( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + uint8_t ret = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_is_dfm_relocatable(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + ret = manifest->is_dfm_relocatable[dfm_type_id]; +#else + ret = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_is_dfm_relocatable invalid argument\n"); + } + return ret; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_port_bitmap invalid argument\n"); + } + return bitmap; +} + +vied_nci_resource_bitmap_t ia_css_program_manifest_get_dfm_active_port_bitmap( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_dfm_id_t dfm_type_id) +{ + vied_nci_resource_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dfm_active_port_bitmap(): enter:\n"); + + verifexit(manifest != NULL); +#if (VIED_NCI_N_DEV_DFM_ID > 0) + verifexit(dfm_type_id < VIED_NCI_N_DEV_DFM_ID); + bitmap = manifest->dfm_active_port_bitmap[dfm_type_id]; +#else + bitmap = 0; + (void)dfm_type_id; +#endif +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dfm_active_port_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_program_manifest_set_int_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t int_mem_size) +{ + int retval = -1; + vied_nci_cell_type_ID_t cell_type_id; + int mem_index; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_int_mem_size(): enter:\n"); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + + if (manifest != NULL && mem_type_id < VIED_NCI_N_MEM_TYPE_ID) { + /* loop over vied_nci_cell_mem_type to verify mem_type_id for + * a specific cell_type_id + */ + for (mem_index = 0; mem_index < VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + if ((int)mem_type_id == + (int)vied_nci_cell_type_get_mem_type( + cell_type_id, mem_index)) { + manifest->int_mem_size[mem_index] = + int_mem_size; + retval = 0; + } + } + } + if (retval != 0) { + IA_CSS_TRACE_2(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_int_mem_size cell_type_id %d has no mem_type_id %d\n", + (int)cell_type_id, (int)mem_type_id); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_size = manifest->ext_mem_size[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_size invalid argument\n"); + } + return ext_mem_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_ext_mem_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id) +{ + vied_nci_resource_size_t ext_mem_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_ext_mem_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID); + + ext_mem_offset = manifest->ext_mem_offset[mem_type_id]; +EXIT: + if (NULL == manifest || mem_type_id >= VIED_NCI_N_DATA_MEM_TYPE_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_ext_mem_offset invalid argument\n"); + } + return ext_mem_offset; +} + +int ia_css_program_manifest_set_ext_mem_size( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_size(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_size[mem_type_id] = ext_mem_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_ext_mem_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_mem_type_ID_t mem_type_id, + const vied_nci_resource_size_t ext_mem_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_ext_mem_offset(): enter:\n"); + + if (manifest != NULL && mem_type_id < VIED_NCI_N_DATA_MEM_TYPE_ID) { + manifest->ext_mem_offset[mem_type_id] = ext_mem_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_ext_mem_offset invalid argument\n"); + } + + return retval; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_size( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_size(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_size = manifest->dev_chn_size[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_size invalid argument\n"); + } + return dev_chn_size; +} + +vied_nci_resource_size_t ia_css_program_manifest_get_dev_chn_offset( + const ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id) +{ + vied_nci_resource_size_t dev_chn_offset = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_dev_chn_offset(): enter:\n"); + + verifexit(manifest != NULL); + verifexit(dev_chn_id < VIED_NCI_N_DEV_CHN_ID); + + dev_chn_offset = manifest->dev_chn_offset[dev_chn_id]; +EXIT: + if (NULL == manifest || dev_chn_id >= VIED_NCI_N_DEV_CHN_ID) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_dev_chn_offset invalid argument\n"); + } + return dev_chn_offset; +} + +int ia_css_program_manifest_set_dev_chn_size( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_size) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_size(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_size[dev_chn_id] = dev_chn_size; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_size invalid argument\n"); + } + + return retval; +} + +int ia_css_program_manifest_set_dev_chn_offset( + ia_css_program_manifest_t *manifest, + const vied_nci_dev_chn_ID_t dev_chn_id, + const vied_nci_resource_size_t dev_chn_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_dev_chn_offset(): enter:\n"); + + if (manifest != NULL && dev_chn_id < VIED_NCI_N_DEV_CHN_ID) { + manifest->dev_chn_offset[dev_chn_id] = dev_chn_offset; + retval = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_set_dev_chn_offset invalid argument\n"); + } + + return retval; +} + +uint8_t ia_css_program_manifest_get_program_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t program_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency_count(): enter:\n"); + + if (manifest != NULL) { + program_dependency_count = manifest->program_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency_count invalid argument\n"); + } + return program_dependency_count; +} + +uint8_t ia_css_program_manifest_get_program_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t program_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + + if (index < program_dependency_count) { + program_dep_ptr = + (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index * sizeof(uint8_t)); + program_dependency = *program_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_program_dependency invalid argument\n"); + } + return program_dependency; +} + +int ia_css_program_manifest_set_program_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t program_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *program_dep_ptr; + uint8_t program_dependency_count; + uint8_t program_count; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_program_dependency(): enter:\n"); + + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + program_count = + ia_css_program_group_manifest_get_program_count( + ia_css_program_manifest_get_parent(manifest)); + + if ((index < program_dependency_count) && + (program_dependency < program_count)) { + program_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->program_dependency_offset + + index*sizeof(uint8_t)); + *program_dep_ptr = program_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_program_dependency(m, %d, %d) failed (%i)\n", + program_dependency, index, retval); + } + return retval; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency_count( + const ia_css_program_manifest_t *manifest) +{ + uint8_t terminal_dependency_count = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency_count(): enter:\n"); + + if (manifest != NULL) { + terminal_dependency_count = manifest->terminal_dependency_count; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency_count invalid argument\n"); + } + return terminal_dependency_count; +} + +uint8_t ia_css_program_manifest_get_terminal_dependency( + const ia_css_program_manifest_t *manifest, + const unsigned int index) +{ + uint8_t terminal_dependency = IA_CSS_PROGRAM_INVALID_DEPENDENCY; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_get_terminal_dependency(): enter:\n"); + + if (index < terminal_dependency_count) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + terminal_dependency = *terminal_dep_ptr; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_program_manifest_get_terminal_dependency invalid argument\n"); + } + return terminal_dependency; +} + +int ia_css_program_manifest_set_terminal_dependency( + ia_css_program_manifest_t *manifest, + const uint8_t terminal_dependency, + const unsigned int index) +{ + int retval = -1; + uint8_t *terminal_dep_ptr; + uint8_t terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + uint8_t terminal_count = + ia_css_program_group_manifest_get_terminal_count( + ia_css_program_manifest_get_parent(manifest)); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_program_manifest_set_terminal_dependency(): enter:\n"); + + if ((index < terminal_dependency_count) && + (terminal_dependency < terminal_count)) { + terminal_dep_ptr = (uint8_t *)((uint8_t *)manifest + + manifest->terminal_dependency_offset + index); + *terminal_dep_ptr = terminal_dependency; + retval = 0; + } + + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_set_terminal_dependency failed (%i)\n", + retval); + } + return retval; +} + +bool ia_css_is_program_manifest_subnode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_subnode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUB) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUB); +} + +bool ia_css_is_program_manifest_supernode_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_supernode_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_PARALLEL_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_EXCLUSIVE_SUPER) || + (program_type == IA_CSS_PROGRAM_TYPE_VIRTUAL_SUPER); +} + +bool ia_css_is_program_manifest_singular_program_type( + const ia_css_program_manifest_t *manifest) +{ + ia_css_program_type_t program_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_program_manifest_singular_program_type(): enter:\n"); + + program_type = ia_css_program_manifest_get_type(manifest); + +/* The error return is the limit value, so no need to check on the manifest + * pointer + */ + return (program_type == IA_CSS_PROGRAM_TYPE_SINGULAR); +} + +void ia_css_program_manifest_init( + ia_css_program_manifest_t *blob, + const uint8_t program_dependency_count, + const uint8_t terminal_dependency_count) +{ + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_init(): enter:\n"); + + /*TODO: add assert*/ + if (!blob) + return; + + blob->ID = 1; + blob->program_dependency_count = program_dependency_count; + blob->terminal_dependency_count = terminal_dependency_count; + blob->program_dependency_offset = sizeof(ia_css_program_manifest_t); + blob->terminal_dependency_offset = blob->program_dependency_offset + + sizeof(uint8_t) * program_dependency_count; + blob->size = + (uint16_t)ia_css_sizeof_program_manifest( + program_dependency_count, + terminal_dependency_count); +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug, refer to + developercommunity.visualstudio.com/content/problem/209359/ice-with-fpfast-in-156-and-msvc-daily-1413263051-p.html +*/ +#pragma optimize("", off) +#endif + +int ia_css_program_manifest_print( + const ia_css_program_manifest_t *manifest, + void *fid) +{ + int retval = -1; + int i, mem_index, dev_chn_index; + + vied_nci_cell_type_ID_t cell_type_id; + uint8_t program_dependency_count; + uint8_t terminal_dependency_count; + ia_css_kernel_bitmap_t bitmap; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_program_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_program_manifest_get_size(manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "program ID = %d\n", + (int)ia_css_program_manifest_get_program_ID(manifest)); + + bitmap = ia_css_program_manifest_get_kernel_bitmap(manifest); + verifexit(ia_css_kernel_bitmap_print(bitmap, fid) == 0); + + if (ia_css_has_program_manifest_fixed_cell(manifest)) { + vied_nci_cell_ID_t cell_id = + ia_css_program_manifest_get_cell_ID(manifest); + + cell_type_id = vied_nci_cell_get_type(cell_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell(program) = %d\n", + (int)cell_id); + } else { + cell_type_id = + ia_css_program_manifest_get_cell_type_ID(manifest); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "cell type(program) = %d\n", + (int)cell_type_id); + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) type = %d\n", + (int)vied_nci_cell_type_get_mem_type(cell_type_id, mem_index)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(internal mem) size = %d\n", + manifest->int_mem_size[mem_index]); + } + + for (mem_index = 0; mem_index < (int)VIED_NCI_N_DATA_MEM_TYPE_ID; + mem_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) type = %d\n", + (int)(vied_nci_mem_type_ID_t)mem_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) size = %d\n", + manifest->ext_mem_size[mem_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(external mem) offset = %d\n", + manifest->ext_mem_offset[mem_index]); + } + + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_CHN_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) size = %d\n", + manifest->dev_chn_size[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(device channel) offset = %d\n", + manifest->dev_chn_offset[dev_chn_index]); + } +#if HAS_DFM + for (dev_chn_index = 0; dev_chn_index < (int)VIED_NCI_N_DEV_DFM_ID; + dev_chn_index++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) type = %d\n", + (int)dev_chn_index); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) port_bitmap = %d\n", + manifest->dfm_port_bitmap[dev_chn_index]); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) active_port_bitmap = %d\n", + manifest->dfm_active_port_bitmap[dev_chn_index]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(dfm port) is_dfm_relocatable = %d\n", + manifest->is_dfm_relocatable[dev_chn_index]); + } +#endif + +#if IA_CSS_PROCESS_MAX_CELLS == 1 + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cell_id); +#else + for (i = 0; i < IA_CSS_PROCESS_MAX_CELLS; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\ttype(cells) bitmap = %d\n", + manifest->cells[i]); + } +#endif /* IA_CSS_PROCESS_MAX_CELLS == 1 */ + program_dependency_count = + ia_css_program_manifest_get_program_dependency_count(manifest); + if (program_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {};\n", + program_dependency_count); + } else { + uint8_t prog_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "program_dependencies[%d] {\n", + program_dependency_count); + for (i = 0; i < (int)program_dependency_count - 1; i++) { + prog_dep = + ia_css_program_manifest_get_program_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", prog_dep); + } + prog_dep = + ia_css_program_manifest_get_program_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", prog_dep); + (void)prog_dep; + } + + terminal_dependency_count = + ia_css_program_manifest_get_terminal_dependency_count(manifest); + if (terminal_dependency_count == 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {};\n", + terminal_dependency_count); + } else { + uint8_t term_dep; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "terminal_dependencies[%d] {\n", + terminal_dependency_count); + for (i = 0; i < (int)terminal_dependency_count - 1; i++) { + term_dep = + ia_css_program_manifest_get_terminal_dependency( + manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t %4d,\n", term_dep); + } + term_dep = + ia_css_program_manifest_get_terminal_dependency(manifest, i); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\t %4d }\n", term_dep); + (void)term_dep; + } + (void)cell_type_id; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_program_manifest_print failed (%i)\n", retval); + } + return retval; +} + +#if defined(_MSC_VER) +/* WA for a visual studio compiler bug */ +#pragma optimize("", off) +#endif + +#endif + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c new file mode 100644 index 000000000000..80ff0d5b0080 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/psysapi/static/src/ia_css_psys_terminal_manifest.c @@ -0,0 +1,1138 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + + +#include + +/* Data object types on the terminals */ +#include +/* for ia_css_kernel_bitmap_t, ia_css_kernel_bitmap_clear, ia_css_... */ +#include + +#include "ia_css_psys_program_group_private.h" +#include "ia_css_terminal_manifest.h" +#include "ia_css_terminal_manifest_types.h" + +#include +#include +#include +#include "ia_css_psys_static_trace.h" + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +static const char *terminal_type_strings[IA_CSS_N_TERMINAL_TYPES + 1] = { + "IA_CSS_TERMINAL_TYPE_DATA_IN", + "IA_CSS_TERMINAL_TYPE_DATA_OUT", + "IA_CSS_TERMINAL_TYPE_PARAM_STREAM", + /**< Type 1-5 parameter input */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN", + /**< Type 1-5 parameter output */ + "IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN", + /**< Represent the new type of terminal for + * the "spatial dependent parameters", when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT", + /**< Represent the new type of terminal for + * the explicit slicing, when params go in + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN", + /**< Represent the new type of terminal for + * the explicit slicing, when params go out + */ + "IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT", + /**< State (private data) input */ + "IA_CSS_TERMINAL_TYPE_STATE_IN", + /**< State (private data) output */ + "IA_CSS_TERMINAL_TYPE_STATE_OUT", + "IA_CSS_TERMINAL_TYPE_PROGRAM", + "IA_CSS_TERMINAL_TYPR_PROGRAM_CONTROL_INIT", + "UNDEFINED_TERMINAL_TYPE"}; + +#endif + +bool ia_css_is_terminal_manifest_spatial_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT)); +} + +bool ia_css_is_terminal_manifest_program_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM); +} + +bool ia_css_is_terminal_manifest_program_control_init_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_program_control_init_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT); +} + + +bool ia_css_is_terminal_manifest_parameter_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_parameter_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT); +} + +bool ia_css_is_terminal_manifest_data_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + /* will return an error value on error */ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_data_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT)); +} + +bool ia_css_is_terminal_manifest_sliced_terminal( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_is_terminal_manifest_sliced_terminal(): enter:\n"); + + terminal_type = ia_css_terminal_manifest_get_type(manifest); + + return ((terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN) || + (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT)); +} + +size_t ia_css_terminal_manifest_get_size( + const ia_css_terminal_manifest_t *manifest) +{ + size_t size = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_size(): enter:\n"); + + if (manifest != NULL) { + size = manifest->size; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_size: invalid argument\n"); + } + return size; +} + +ia_css_terminal_type_t ia_css_terminal_manifest_get_type( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_type_t terminal_type = IA_CSS_N_TERMINAL_TYPES; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_type(): enter:\n"); + + if (manifest != NULL) { + terminal_type = manifest->terminal_type; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_terminal_manifest_get_type: invalid argument\n"); + } + return terminal_type; +} + +int ia_css_terminal_manifest_set_type( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_type_t terminal_type) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_type(): enter:\n"); + + if (manifest != NULL) { + manifest->terminal_type = terminal_type; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_type failed (%i)\n", + retval); + } + return retval; +} + +int ia_css_terminal_manifest_set_ID( + ia_css_terminal_manifest_t *manifest, + const ia_css_terminal_ID_t ID) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_ID(): enter:\n"); + + if (manifest != NULL) { + manifest->ID = ID; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_ID failed (%i)\n", + retval); + } + return retval; +} + +ia_css_terminal_ID_t ia_css_terminal_manifest_get_ID( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_terminal_ID_t retval; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_ID(): enter:\n"); + + if (manifest != NULL) { + retval = manifest->ID; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_get_ID failed\n"); + retval = IA_CSS_TERMINAL_INVALID_ID; + } + return retval; +} + +ia_css_program_group_manifest_t *ia_css_terminal_manifest_get_parent( + const ia_css_terminal_manifest_t *manifest) +{ + ia_css_program_group_manifest_t *parent = NULL; + char *base; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_get_parent(): enter:\n"); + + verifexit(manifest != NULL); + + base = (char *)((char *)manifest + manifest->parent_offset); + + parent = (ia_css_program_group_manifest_t *)(base); +EXIT: + return parent; +} + +int ia_css_terminal_manifest_set_parent_offset( + ia_css_terminal_manifest_t *manifest, + int32_t terminal_offset) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_terminal_manifest_set_parent_offset(): enter:\n"); + + verifexit(manifest != NULL); + + /* parent is at negative offset away from current terminal offset*/ + manifest->parent_offset = -terminal_offset; + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_set_parent_offset failed (%i)\n", + retval); + } + return retval; +} + +ia_css_frame_format_bitmap_t +ia_css_data_terminal_manifest_get_frame_format_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_frame_format_bitmap_t bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + bitmap = manifest->frame_format_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_frame_format_bitmap invalid argument\n"); + } + return bitmap; +} + +int ia_css_data_terminal_manifest_set_frame_format_bitmap( + ia_css_data_terminal_manifest_t *manifest, + ia_css_frame_format_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_frame_format_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->frame_format_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_frame_format_bitmap failed (%i)\n", + ret); + } + + return ret; +} + +bool ia_css_data_terminal_manifest_can_support_compression( + const ia_css_data_terminal_manifest_t *manifest) +{ + bool compression_support = false; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_compression_support(): enter:\n"); + + if (manifest != NULL) { + /* compression_support is used boolean encoded in uint8_t. + * So we only need to check + * if this is non-zero + */ + compression_support = (manifest->compression_support != 0); + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_can_support_compression invalid argument\n"); + } + + return compression_support; +} + +int ia_css_data_terminal_manifest_set_compression_support( + ia_css_data_terminal_manifest_t *manifest, + bool compression_support) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_compression_support(): enter:\n"); + + if (manifest != NULL) { + manifest->compression_support = + (compression_support == true) ? 1 : 0; + ret = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_compression_support failed (%i)\n", + ret); + } + + return ret; +} + +ia_css_connection_bitmap_t ia_css_data_terminal_manifest_get_connection_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_connection_bitmap_t connection_bitmap = 0; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + connection_bitmap = manifest->connection_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_connection_bitmap invalid argument\n"); + } + return connection_bitmap; +} + +int ia_css_data_terminal_manifest_set_connection_bitmap( + ia_css_data_terminal_manifest_t *manifest, ia_css_connection_bitmap_t bitmap) +{ + int ret = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_connection_bitmap(): enter:\n"); + + if (manifest != NULL) { + assert(bitmap != 0); /* zero means there is no connection, this is invalid. */ + assert((bitmap >> IA_CSS_N_CONNECTION_TYPES) == 0); + + manifest->connection_bitmap = bitmap; + ret = 0; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_set_connection_bitmap invalid argument\n"); + } + return ret; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) +ia_css_kernel_bitmap_t ia_css_data_terminal_manifest_get_kernel_bitmap( + const ia_css_data_terminal_manifest_t *manifest) +{ + ia_css_kernel_bitmap_t kernel_bitmap = ia_css_kernel_bitmap_clear(); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + kernel_bitmap = manifest->kernel_bitmap; + } else { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "ia_css_data_terminal_manifest_get_kernel_bitmap: invalid argument\n"); + } + return kernel_bitmap; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap( + ia_css_data_terminal_manifest_t *manifest, + const ia_css_kernel_bitmap_t kernel_bitmap) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap(): enter:\n"); + + if (manifest != NULL) { + manifest->kernel_bitmap = kernel_bitmap; + retval = 0; + } else { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap: failed (%i)\n", + retval); + } + + return retval; +} + +int ia_css_data_terminal_manifest_set_kernel_bitmap_unique( + ia_css_data_terminal_manifest_t *manifest, + const unsigned int index) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique(): enter:\n"); + + if (manifest != NULL) { + ia_css_kernel_bitmap_t kernel_bitmap = + ia_css_kernel_bitmap_clear(); + + kernel_bitmap = ia_css_kernel_bitmap_set(kernel_bitmap, index); + verifexit(!ia_css_is_kernel_bitmap_empty(kernel_bitmap)); + verifexit(ia_css_data_terminal_manifest_set_kernel_bitmap( + manifest, kernel_bitmap) == 0); + retval = 0; + } + +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_kernel_bitmap_unique failed (%i)\n", + retval); + } + return retval; +} +#endif + +int ia_css_data_terminal_manifest_set_min_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_size: invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_min_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->min_fragment_size[IA_CSS_COL_DIMENSION] = + min_size[IA_CSS_COL_DIMENSION]; + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION] = + min_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_set_max_fragment_size( + ia_css_data_terminal_manifest_t *manifest, + const uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_set_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + manifest->max_fragment_size[IA_CSS_COL_DIMENSION] = + max_size[IA_CSS_COL_DIMENSION]; + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION] = + max_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_set_max_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_min_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t min_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_min_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + min_size[IA_CSS_COL_DIMENSION] = + manifest->min_fragment_size[IA_CSS_COL_DIMENSION]; + min_size[IA_CSS_ROW_DIMENSION] = + manifest->min_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_min_fragment_size invalid argument\n"); + } + return retval; +} + +int ia_css_data_terminal_manifest_get_max_fragment_size( + const ia_css_data_terminal_manifest_t *manifest, + uint16_t max_size[IA_CSS_N_DATA_DIMENSION]) +{ + int retval = -1; + + IA_CSS_TRACE_0(PSYSAPI_STATIC, VERBOSE, + "ia_css_data_terminal_manifest_get_max_fragment_size(): enter:\n"); + + verifexit(manifest != NULL); + + max_size[IA_CSS_COL_DIMENSION] = + manifest->max_fragment_size[IA_CSS_COL_DIMENSION]; + max_size[IA_CSS_ROW_DIMENSION] = + manifest->max_fragment_size[IA_CSS_ROW_DIMENSION]; + retval = 0; + +EXIT: + if (NULL == manifest) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, ERROR, + "ia_css_data_terminal_manifest_get_max_fragment_size invalid argument\n"); + } + return retval; +} + +/* We need to refactor those files in order to build in the firmware only + what is needed, switches are put current to workaround compilation problems + in the firmware (for example lack of uint64_t support) + supported in the firmware + */ +#if !defined(__HIVECC) + +#define PRINT_DIMENSION(name, var) IA_CSS_TRACE_3(PSYSAPI_STATIC, \ + INFO, "%s:\t%d %d\n", \ + (name), \ + (var)[IA_CSS_COL_DIMENSION], \ + (var)[IA_CSS_ROW_DIMENSION]) + +int ia_css_terminal_manifest_print( + const ia_css_terminal_manifest_t *manifest, + void *fid) +{ + int retval = -1; + ia_css_terminal_type_t terminal_type = + ia_css_terminal_manifest_get_type(manifest); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, + "ia_css_terminal_manifest_print(): enter:\n"); + + verifexit(manifest != NULL); + NOT_USED(fid); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "sizeof(manifest) = %d\n", + (int)ia_css_terminal_manifest_get_size(manifest)); + + PRINT("typeof(manifest) = %s\n", terminal_type_strings[terminal_type]); + + if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT) { + ia_css_param_terminal_manifest_t *pterminal_manifest = + (ia_css_param_terminal_manifest_t *)manifest; + uint16_t section_count = + pterminal_manifest->param_manifest_section_desc_count; + int i; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sections(manifest) = %d\n", (int)section_count); + for (i = 0; i < section_count; i++) { + const ia_css_param_manifest_section_desc_t *manifest = + ia_css_param_terminal_manifest_get_prm_sct_desc( + pterminal_manifest, i); + verifjmpexit(manifest != NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)manifest->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)manifest->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)manifest->max_mem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)manifest->region_id); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT) { + ia_css_sliced_param_terminal_manifest_t + *sliced_terminal_manifest = + (ia_css_sliced_param_terminal_manifest_t *)manifest; + uint32_t kernel_id; + uint16_t section_count; + uint16_t section_idx; + + kernel_id = sliced_terminal_manifest->kernel_id; + section_count = + sliced_terminal_manifest->sliced_param_section_count; + + NOT_USED(kernel_id); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_id = %d\n", (int)kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section_count = %d\n", (int)section_count); + + for (section_idx = 0; section_idx < section_count; + section_idx++) { + ia_css_sliced_param_manifest_section_desc_t + *sliced_param_manifest_section_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "section %d\n", (int)section_idx); + sliced_param_manifest_section_desc = + ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + sliced_terminal_manifest, section_idx); + verifjmpexit(sliced_param_manifest_section_desc != + NULL); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "mem_type_id = %d\n", + (int)sliced_param_manifest_section_desc->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "region_id = %d\n", + (int)sliced_param_manifest_section_desc->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_mem_size = %d\n", + (int)sliced_param_manifest_section_desc->max_mem_size); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM) { + ia_css_program_terminal_manifest_t *program_terminal_manifest = + (ia_css_program_terminal_manifest_t *)manifest; + uint32_t sequencer_info_kernel_id; + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t seq_info_idx; + + sequencer_info_kernel_id = + program_terminal_manifest->sequencer_info_kernel_id; + max_kernel_fragment_sequencer_command_desc = + program_terminal_manifest-> + max_kernel_fragment_sequencer_command_desc; + kernel_fragment_sequencer_info_manifest_info_count = + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_count; + + NOT_USED(sequencer_info_kernel_id); + NOT_USED(max_kernel_fragment_sequencer_command_desc); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer_info_kernel_id = %d\n", + (int)sequencer_info_kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "max_kernel_fragment_sequencer_command_desc = %d\n", + (int)max_kernel_fragment_sequencer_command_desc); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "kernel_fragment_sequencer_info_manifest_info_count = %d\n", + (int) + kernel_fragment_sequencer_info_manifest_info_count); + + for (seq_info_idx = 0; seq_info_idx < + kernel_fragment_sequencer_info_manifest_info_count; + seq_info_idx++) { + ia_css_kernel_fragment_sequencer_info_manifest_desc_t + *sequencer_info_manifest_desc; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "sequencer info %d\n", (int)seq_info_idx); + sequencer_info_manifest_desc = + ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc + (program_terminal_manifest, seq_info_idx); + verifjmpexit(sequencer_info_manifest_desc != NULL); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_slice_count[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_slice_count[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_point_decimation_factor[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_point_decimation_factor[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_topleft_index[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_topleft_index[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "min_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + min_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + IA_CSS_TRACE_2(PSYSAPI_STATIC, INFO, + "max_fragment_grid_overlay_on_pixel_dimension[] = {%d, %d}\n", + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_COL_DIMENSION], + (int)sequencer_info_manifest_desc-> + max_fragment_grid_overlay_pixel_dimension[ + IA_CSS_ROW_DIMENSION]); + } + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT) { + ia_css_program_control_init_terminal_manifest_t *progctrlinit_man = + (ia_css_program_control_init_terminal_manifest_t *)manifest; + ia_css_program_control_init_terminal_manifest_print(progctrlinit_man); + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_DATA_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + + ia_css_data_terminal_manifest_t *dterminal_manifest = + (ia_css_data_terminal_manifest_t *)manifest; + int i; + + NOT_USED(dterminal_manifest); + + verifexit(ia_css_kernel_bitmap_print( + ia_css_data_terminal_manifest_get_kernel_bitmap( + dterminal_manifest), fid) == 0); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "formats(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_frame_format_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "connection(manifest) = %04x\n", + (int)ia_css_data_terminal_manifest_get_connection_bitmap( + dterminal_manifest)); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "dependent(manifest) = %d\n", + (int)dterminal_manifest->terminal_dependency); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->min_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->min_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_size[%d] = {\n", IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", dterminal_manifest->max_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", dterminal_manifest->max_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmin_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->min_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->min_fragment_size[i]); + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\tmax_fragment_size[%d] = {\n", + IA_CSS_N_DATA_DIMENSION); + for (i = 0; i < (int)IA_CSS_N_DATA_DIMENSION - 1; i++) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d,\n", + dterminal_manifest->max_fragment_size[i]); + } + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, + "\t\t%4d }\n", + dterminal_manifest->max_fragment_size[i]); + + } else if (terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN || + terminal_type == IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT) { + + ia_css_spatial_param_terminal_manifest_t *stm = + (ia_css_spatial_param_terminal_manifest_t *)manifest; + ia_css_frame_grid_param_manifest_section_desc_t *sec; + int sec_count = + stm->frame_grid_param_manifest_section_desc_count; + ia_css_fragment_grid_manifest_desc_t *fragd = + &stm->common_fragment_grid_desc; + ia_css_frame_grid_manifest_desc_t *framed = + &stm->frame_grid_desc; + int sec_index; + + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "kernel_id:\t\t%d\n", + stm->kernel_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "compute_units_p_elem:\t%d\n", + stm->compute_units_p_elem); + + PRINT_DIMENSION("min_fragment_grid_dimension", + fragd->min_fragment_grid_dimension); + PRINT_DIMENSION("max_fragment_grid_dimension", + fragd->max_fragment_grid_dimension); + PRINT_DIMENSION("min_frame_grid_dimension", + framed->min_frame_grid_dimension); + PRINT_DIMENSION("max_frame_grid_dimension", + framed->max_frame_grid_dimension); + + NOT_USED(framed); + NOT_USED(fragd); + + for (sec_index = 0; sec_index < sec_count; sec_index++) { + sec = ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + stm, sec_index); + verifjmpexit(sec != NULL); + + IA_CSS_TRACE_0(PSYSAPI_STATIC, INFO, "--------------------------\n"); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmem_type_id:\t%d\n", + sec->mem_type_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tregion_id:\t%d\n", + sec->region_id); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\telem_size:\t%d\n", + sec->elem_size); + IA_CSS_TRACE_1(PSYSAPI_STATIC, INFO, "\tmax_mem_size:\t%d\n", + sec->max_mem_size); + } + } else if (terminal_type < IA_CSS_N_TERMINAL_TYPES) { + IA_CSS_TRACE_0(PSYSAPI_STATIC, WARNING, + "terminal type can not be pretty printed, not supported\n"); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_1(PSYSAPI_STATIC, ERROR, + "ia_css_terminal_manifest_print failed (%i)\n", + retval); + } + return retval; +} + +/* Program control init Terminal */ +unsigned int ia_css_program_control_init_terminal_manifest_get_connect_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->connect_section_count; +} + + +unsigned int ia_css_program_control_init_terminal_manifest_get_load_section_count( + const ia_css_program_control_init_manifest_program_desc_t *prog) +{ + assert(prog); + return prog->load_section_count; +} + +unsigned int ia_css_program_control_init_terminal_manifest_get_size( + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + (void)nof_load_sections; /* might be needed in future */ + (void)nof_connect_sections; /* might be needed in future */ + + return sizeof(ia_css_program_control_init_terminal_manifest_t) + + nof_programs * + sizeof(ia_css_program_control_init_manifest_program_desc_t); +} + +ia_css_program_control_init_manifest_program_desc_t * +ia_css_program_control_init_terminal_manifest_get_program_desc( + const ia_css_program_control_init_terminal_manifest_t *terminal, + unsigned int program) +{ + ia_css_program_control_init_manifest_program_desc_t *progs; + + assert(terminal != NULL); + assert(program < terminal->program_count); + + progs = (ia_css_program_control_init_manifest_program_desc_t *) + ((const char *)terminal + terminal->program_desc_offset); + + return &progs[program]; +} + +int ia_css_program_control_init_terminal_manifest_init( + ia_css_program_control_init_terminal_manifest_t *terminal, + const uint16_t nof_programs, + const uint16_t *nof_load_sections, + const uint16_t *nof_connect_sections) +{ + unsigned int i; + ia_css_program_control_init_manifest_program_desc_t *progs; + + if (terminal == NULL) { + return -EFAULT; + } + + terminal->program_count = nof_programs; + terminal->program_desc_offset = + sizeof(ia_css_program_control_init_terminal_manifest_t); + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + for (i = 0; i < nof_programs; i++) { + progs[i].load_section_count = nof_load_sections[i]; + progs[i].connect_section_count = nof_connect_sections[i]; + } + return 0; +} + +void ia_css_program_control_init_terminal_manifest_print( + ia_css_program_control_init_terminal_manifest_t *terminal) +{ + unsigned int i; + + ia_css_program_control_init_manifest_program_desc_t *progs; + + progs = ia_css_program_control_init_terminal_manifest_get_program_desc( + terminal, 0); + + assert(progs); + (void)progs; + + for (i = 0; i < terminal->program_count; i++) { + IA_CSS_TRACE_3(PSYSAPI_STATIC, INFO, + "program index: %d, load sec: %d, connect sec: %d\n", + i, + progs[i].load_section_count, + progs[i].connect_section_count); + } +} + +#endif + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.c new file mode 100644 index 000000000000..c51d65c8cb64 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.c @@ -0,0 +1,15 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#include "ia_css_debug_dump.h" + void ia_css_debug_dump(void) {} \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.h new file mode 100644 index 000000000000..5dd23ddbd180 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/psys/cnlB0_gen_reg_dump/ia_css_debug_dump.h @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. +* Copyright (c) 2010 - 2018, Intel Corporation. +* +* This program is free software; you can redistribute it and/or modify it +* under the terms and conditions of the GNU General Public License, +* version 2, as published by the Free Software Foundation. +* +* This program is distributed in the hope it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +* more details. +*/ +#ifndef __IA_CSS_DEBUG_DUMP_H_ + #define __IA_CSS_DEBUG_DUMP_H_ + void ia_css_debug_dump(void); + #endif /* __IA_CSS_DEBUG_DUMP_H_ */ \ No newline at end of file diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c new file mode 100644 index 000000000000..18d0c9806eda --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/reg_dump/src/reg_dump_generic_bridge.c @@ -0,0 +1,40 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include +#include "ia_css_trace.h" +#ifdef USE_LOGICAL_SSIDS +/* + Logical names can be used to define the SSID + In order to resolve these names the following include file should be provided + and the define above should be enabled +*/ +#include +#endif + +#define REG_DUMP_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#define REG_DUMP_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED + +/* SSID value is defined in test makefiles as either isys0 or psys0 */ +#define REG_DUMP_READ_REGISTER(addr) vied_subsystem_load_32(SSID, addr) + +#define REG_DUMP_PRINT_0(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_0(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define REG_DUMP_PRINT_1(...) \ +EXPAND_VA_ARGS(IA_CSS_TRACE_1(REG_DUMP, VERBOSE, __VA_ARGS__)) +#define EXPAND_VA_ARGS(x) x + +/* Including generated source code for reg_dump */ +#include "ia_css_debug_dump.c" + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/interface/regmem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/interface/regmem_access.h new file mode 100644 index 000000000000..d4576af936f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/interface/regmem_access.h @@ -0,0 +1,67 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_H +#define __REGMEM_ACCESS_H + +#include "storage_class.h" + +enum regmem_id { + /* pass pkg_dir address to SPC in non-secure mode */ + PKG_DIR_ADDR_REG = 0, + /* pass syscom configuration to SPC */ + SYSCOM_CONFIG_REG = 1, + /* syscom state - modified by SP */ + SYSCOM_STATE_REG = 2, + /* syscom commands - modified by the host */ + SYSCOM_COMMAND_REG = 3, + /* Store interrupt status - updated by SP */ + SYSCOM_IRQ_REG = 4, + /* Store VTL0_ADDR_MASK in trusted secure regision - provided by host.*/ + SYSCOM_VTL0_ADDR_MASK = 5, +#if HAS_DUAL_CMD_CTX_SUPPORT + /* Initialized if trustlet exists - updated by host */ + TRUSTLET_STATUS = 6, + /* identify if SPC access blocker programming is completed - updated by SP */ + AB_SPC_STATUS = 7, + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 8 +#else + /* first syscom queue pointer register */ + SYSCOM_QPR_BASE_REG = 6 +#endif +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* Bit 0: for untrusted non-secure DRV driver on VTL0 + * Bit 1: for trusted secure TEE driver on VTL1 + */ +#define SYSCOM_IRQ_VTL0_MASK 0x1 +#define SYSCOM_IRQ_VTL1_MASK 0x2 +#endif + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_address, unsigned int reg, unsigned int ssid); + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_address, unsigned int reg, unsigned int value, + unsigned int ssid); + +#ifdef __VIED_CELL +#include "regmem_access_cell.h" +#else +#include "regmem_access_host.h" +#endif + +#endif /* __REGMEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/regmem.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/regmem.mk new file mode 100644 index 000000000000..24ebc1c325d8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/regmem.mk @@ -0,0 +1,32 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +ifndef REGMEM_MK +REGMEM_MK=1 + +# MODULE is REGMEM + +REGMEM_DIR=$${MODULES_DIR}/regmem + +REGMEM_INTERFACE=$(REGMEM_DIR)/interface +REGMEM_SOURCES=$(REGMEM_DIR)/src + +REGMEM_HOST_FILES = +REGMEM_FW_FILES = $(REGMEM_SOURCES)/regmem.c + +REGMEM_CPPFLAGS = -I$(REGMEM_INTERFACE) -I$(REGMEM_SOURCES) +REGMEM_HOST_CPPFLAGS = $(REGMEM_CPPFLAGS) +REGMEM_FW_CPPFLAGS = $(REGMEM_CPPFLAGS) + +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_access_host.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_access_host.h new file mode 100644 index 000000000000..8878d7074fab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_access_host.h @@ -0,0 +1,41 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_ACCESS_HOST_H +#define __REGMEM_ACCESS_HOST_H + +#include "regmem_access.h" /* implemented interface */ + +#include "storage_class.h" +#include "regmem_const.h" +#include +#include "ia_css_cmem.h" + +STORAGE_CLASS_INLINE unsigned int +regmem_load_32(unsigned int mem_addr, unsigned int reg, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + return ia_css_cmem_load_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg)); +} + +STORAGE_CLASS_INLINE void +regmem_store_32(unsigned int mem_addr, unsigned int reg, + unsigned int value, unsigned int ssid) +{ + /* No need to add REGMEM_OFFSET, it is already included in mem_addr. */ + ia_css_cmem_store_32(ssid, mem_addr + (REGMEM_WORD_BYTES*reg), + value); +} + +#endif /* __REGMEM_ACCESS_HOST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_const.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_const.h new file mode 100644 index 000000000000..ac7e3a98a434 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/regmem/src/regmem_const.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __REGMEM_CONST_H +#define __REGMEM_CONST_H + +#ifndef REGMEM_SIZE +#define REGMEM_SIZE (16) +#endif /* REGMEM_SIZE */ +#ifndef REGMEM_OFFSET +#define REGMEM_OFFSET (0) +#endif /* REGMEM_OFFSET */ +#ifndef REGMEM_WORD_BYTES +#define REGMEM_WORD_BYTES (4) +#endif + +#endif /* __REGMEM_CONST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h new file mode 100644 index 000000000000..4a04a9890326 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm.h @@ -0,0 +1,173 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_H +#define __IA_CSS_RBM_H + +#include "ia_css_rbm_storage_class.h" +#include + +#define IA_CSS_RBM_BITS 64 +/** An element is a 32 bit unsigned integer. 64 bit integers might cause + * problems in the compiler. + */ +#define IA_CSS_RBM_ELEM_TYPE uint32_t +#define IA_CSS_RBM_ELEM_BITS \ + (sizeof(IA_CSS_RBM_ELEM_TYPE)*8) +#define IA_CSS_RBM_NOF_ELEMS \ + ((IA_CSS_RBM_BITS) / (IA_CSS_RBM_ELEM_BITS)) + +/** Users should make no assumption about the actual type of + * ia_css_rbm_t. + */ +typedef struct { + IA_CSS_RBM_ELEM_TYPE data[IA_CSS_RBM_NOF_ELEMS]; +} ia_css_rbm_elems_t; +typedef ia_css_rbm_elems_t ia_css_rbm_t; + +/** Print the bits of a routing bitmap + * @return < 0 on error + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid); + +/** Create an empty routing bitmap + * @return bitmap = 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_clear(void); + +/** Creates the complement of a routing bitmap + * @param bitmap[in] routing bitmap + * @return ~bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap); + +/** Create the union of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 | bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Create the intersection of two routing bitmaps + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 & bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps is empty + * @param bitmap[in] routing bitmap + * @return bitmap == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap); + +/** Check if the intersection of two routing bitmaps is empty + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return (bitmap0 & bitmap1) == 0 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the second routing bitmap is a subset of the first (or equal) + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in routing bitmap 1 + * Note: An empty set is always a subset, this function + * returns true if bitmap 1 is empty + * @return (bitmap0 & bitmap1) == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Check if the routing bitmaps are equal + * @param bitmap0[in] routing bitmap 0 + * @param bitmap1[in] routing bitmap 1 + * @return bitmap0 == bitmap1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1); + +/** Checks whether a specific kernel bit is set + * @return bitmap[index] == 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create the union of a routing bitmap with a onehot bitmap + * with a bit set at index + * @return bitmap[index] |= 1 +*/ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Creates routing bitmap using a uint64 value. + * @return bitmap with the same bits set as in value (provided that width of bitmap is sufficient). + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value); + +/** Converts an ia_css_rbm_t type to uint64_t. Note that if + * ia_css_rbm_t contains more then 64 bits, only the lowest 64 bits + * are returned. + * @return uint64_t representation of value + */ +IA_CSS_RBM_STORAGE_CLASS_H +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value); + +/** Creates a routing bitmap with the bit at index 'index' removed. + * @return ~(1 << index) & bitmap + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index); + +/** Create a onehot routing bitmap with a bit set at index + * @return bitmap[index] = 1 + */ +IA_CSS_RBM_STORAGE_CLASS_H +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index); + +#ifdef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ + +#endif /* __IA_CSS_RBM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h new file mode 100644 index 000000000000..ee700df72dff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest.h @@ -0,0 +1,134 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_H +#define __IA_CSS_RBM_MANIFEST_H + +#include "type_support.h" +#include "ia_css_rbm_manifest_types.h" + +/** Returns the descriptor size of the RBM manifest. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_size(void); + +/** Initializes the RBM manifest. + * @param rbm[in] Routing bitmap. + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm); + +/** Returns a pointer to the array of mux descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of mux descriptors array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of validation descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest); + +/** Returns the size of the validation descriptor array. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest); + +/** Returns a pointer to the array of terminal routing descriptors. + * @param manifest[in] Routing bitmap manifest. + * @return NULL on error + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest); + +/** \brief Returns the size of the terminal routing descriptor array. + * Note: pretty printing differs from on host and on IPU. + * @param manifest[in] Routing bitmap manifest. + * @return size + */ +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest); + +/** Pretty prints the routing bitmap manifest. + * @param manifest[in] Routing bitmap manifest. + */ +void +ia_css_rbm_manifest_print(const ia_css_rbm_manifest_t *manifest); + +/** \brief Pretty prints a RBM (routing bitmap). + * Note: pretty printing differs from on host and on IPU. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_desc_count[in] Number of muxes in list mux. + */ +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count); + +/** \brief check for the validity of a routing bitmap. + * @param manifest[in] Routing bitmap manifest. + * @param rbm[in] Routing bitmap + * @return true on match. + */ +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm); + +/** \brief sets, using manifest info, the value of a mux in the routing bitmap. + * @param rbm[in] Routing bitmap. + * @param mux[in] List of mux descriptors corresponding to rbm. + * @param mux_count[in] Number of muxes in list mux. + * @param gp_dev_id[in] ID of sub system (PSA/ISA) where the mux is located. + * @param mux_id[in] ID of mux to set configuration for. + * @param value[in] Value of the mux. + * @return routing bitmap. + */ +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value); + +#ifdef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +#endif /* __IA_CSS_RBM_MANIFEST_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h new file mode 100644 index 000000000000..ade20446b9f6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_manifest_types.h @@ -0,0 +1,95 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_MANIFEST_TYPES_H +#define __IA_CSS_RBM_MANIFEST_TYPES_H + +#include "ia_css_rbm.h" +#include "vied_nci_psys_resource_model.h" + +#ifndef VIED_NCI_RBM_MAX_MUX_COUNT +#error Please define VIED_NCI_RBM_MAX_MUX_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#error Please define VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT +#endif +#ifndef VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#error Please define VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT +#endif +#ifndef N_PADDING_UINT8_IN_RBM_MANIFEST +#error Please define N_PADDING_UINT8_IN_RBM_MANIFEST +#endif + +#define SIZE_OF_RBM_MUX_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_mux_desc_s { + uint8_t gp_dev_id; + uint8_t mux_id; + uint8_t offset; + uint8_t size_bits; +} ia_css_rbm_mux_desc_t; + +#define SIZE_OF_RBM_VALIDATION_RULE_DESC_S ( \ + (2 * IA_CSS_RBM_BITS) \ + + (1 * IA_CSS_UINT32_T_BITS)) + +typedef struct ia_css_rbm_validation_rule_s { + ia_css_rbm_t match; /* RBM is an array of 32 bit elements */ + ia_css_rbm_t mask; + uint32_t expected_value; +} ia_css_rbm_validation_rule_t; + +#define SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S ( \ + (4 * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_terminal_routing_desc_s { + uint8_t terminal_id; + uint8_t connection_state; + uint8_t mux_id; + uint8_t state; +} ia_css_rbm_terminal_routing_desc_t; + +#define SIZE_OF_RBM_MANIFEST_S ( \ + (VIED_NCI_RBM_MAX_MUX_COUNT * SIZE_OF_RBM_MUX_DESC_S) \ + + (VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT * SIZE_OF_RBM_VALIDATION_RULE_DESC_S) \ + + (VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT * SIZE_OF_RBM_TERMINAL_ROUTING_DESC_S) \ + + (3 * IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_RBM_MANIFEST * IA_CSS_UINT8_T_BITS)) + +typedef struct ia_css_rbm_manifest_s { +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT > 0 + ia_css_rbm_validation_rule_t + validation_rules[VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT]; +#endif + uint16_t mux_desc_count; + uint16_t validation_rule_count; + uint16_t terminal_routing_desc_count; + +#if VIED_NCI_RBM_MAX_MUX_COUNT > 0 + ia_css_rbm_mux_desc_t + mux_desc[VIED_NCI_RBM_MAX_MUX_COUNT]; +#endif + +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT > 0 + ia_css_rbm_terminal_routing_desc_t + terminal_routing_desc[VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT]; +#endif + +#if N_PADDING_UINT8_IN_RBM_MANIFEST > 0 + uint8_t padding[N_PADDING_UINT8_IN_RBM_MANIFEST]; +#endif +} ia_css_rbm_manifest_t; + +#endif /* __IA_CSS_RBM_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h new file mode 100644 index 000000000000..9548e9a9fabb --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_storage_class.h @@ -0,0 +1,36 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_STORAGE_CLASS_H +#define __IA_CSS_RBM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __IA_CSS_RBM_INLINE__ +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +#else +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_RBM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h new file mode 100644 index 000000000000..dd060323da5c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/interface/ia_css_rbm_trace.h @@ -0,0 +1,77 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_TRACE_H +#define __IA_CSS_RBM_TRACE_H + +#include "ia_css_trace.h" + +/* Not using 0 to identify wrong configuration being passed from the .mk file outside. +* Log levels not in the range below will cause a "No RBM_TRACE_CONFIG Tracing level defined" +*/ +#define RBM_TRACE_LOG_LEVEL_OFF 1 +#define RBM_TRACE_LOG_LEVEL_NORMAL 2 +#define RBM_TRACE_LOG_LEVEL_DEBUG 3 + +#define RBM_TRACE_CONFIG_DEFAULT RBM_TRACE_LOG_LEVEL_NORMAL + +#if !defined(RBM_TRACE_CONFIG) +# define RBM_TRACE_CONFIG RBM_TRACE_CONFIG_DEFAULT +#endif + +/* IPU_RESOURCE Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define RBM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#if (defined(RBM_TRACE_CONFIG)) +/* Module specific trace setting */ +# if RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_OFF +/* RBM_TRACE_LOG_LEVEL_OFF */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_NORMAL +/* RBM_TRACE_LOG_LEVEL_NORMAL */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_DISABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +# elif RBM_TRACE_CONFIG == RBM_TRACE_LOG_LEVEL_DEBUG +/* RBM_TRACE_LOG_LEVEL_DEBUG */ +# define RBM_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_DEBUG IA_CSS_TRACE_LEVEL_ENABLED +# define RBM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +# else +# error "No RBM_TRACE_CONFIG Tracing level defined" +# endif +#else +# error "RBM_TRACE_CONFIG not defined" +#endif + +#endif /* __RBM_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk new file mode 100644 index 000000000000..f4251f9740fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/routing_bitmap.mk @@ -0,0 +1,39 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# + +ifdef _H_ROUTING_BITMAP_MK +$(error ERROR: routing_bitmap.mk included multiple times, please check makefile) +else +_H_ROUTING_BITMAP_MK=1 +endif + +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm_manifest.c + +ROUTING_BITMAP_DIR = $(MODULES_DIR)/routing_bitmap +ROUTING_BITMAP_INTERFACE = $(ROUTING_BITMAP_DIR)/interface +ROUTING_BITMAP_SOURCES = $(ROUTING_BITMAP_DIR)/src + +ROUTING_BITMAP_CPPFLAGS = -I$(ROUTING_BITMAP_INTERFACE) +ROUTING_BITMAP_CPPFLAGS += -I$(ROUTING_BITMAP_SOURCES) + +ifeq ($(ROUTING_BITMAP_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_INLINE__ +else +ROUTING_BITMAP_FILES += $(ROUTING_BITMAP_DIR)/src/ia_css_rbm.c +endif + +ifeq ($(ROUTING_BITMAP_MANIFEST_INLINE),1) +ROUTING_BITMAP_CPPFLAGS += -D__IA_CSS_RBM_MANIFEST_INLINE__ +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c new file mode 100644 index 000000000000..bc5bf14efbd7 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm.c @@ -0,0 +1,17 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_RBM_INLINE__ +#include "ia_css_rbm_impl.h" +#endif /* __IA_CSS_RBM_INLINE__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h new file mode 100644 index 000000000000..c8cd78d416a1 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_impl.h @@ -0,0 +1,338 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +STORAGE_CLASS_INLINE int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap); + +STORAGE_CLASS_INLINE ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap); + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_intersection_empty( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_intersection_empty(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_empty(intersection); +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_empty( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + bool is_empty = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_empty(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_empty &= bitmap.data[i] == 0; + } + return is_empty; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_equal( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + bool is_equal = true; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_equal(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + is_equal = is_equal && (bitmap0.data[i] == bitmap1.data[i]); + } + return is_equal; +} + +IA_CSS_RBM_STORAGE_CLASS_C +bool ia_css_is_rbm_subset( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + ia_css_rbm_t intersection; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_subset(): enter:\n"); + + intersection = ia_css_rbm_intersection(bitmap0, bitmap1); + return ia_css_is_rbm_equal(intersection, bitmap1); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_clear(void) +{ + unsigned int i; + ia_css_rbm_t bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_clear(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + bitmap.data[i] = 0; + } + return bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_complement( + const ia_css_rbm_t bitmap) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_complement(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = ~bitmap.data[i]; + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_union( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_union(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] | bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_intersection( + const ia_css_rbm_t bitmap0, + const ia_css_rbm_t bitmap1) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_intersection(): enter:\n"); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + result.data[i] = (bitmap0.data[i] & bitmap1.data[i]); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t bit_mask; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_set(): enter:\n"); + + bit_mask = ia_css_rbm_bit_mask(index); + return ia_css_rbm_union(bitmap, bit_mask); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_create_from_uint64( + const uint64_t value) +{ + unsigned int i; + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_create_from_uint64(): enter:\n"); + + result = ia_css_rbm_clear(); + for (i = 0; i < IA_CSS_RBM_NOF_ELEMS; i++) { + /* masking is done implictly, the MSB bits of casting will be chopped off */ + result.data[i] = (IA_CSS_RBM_ELEM_TYPE) + (value >> (i * IA_CSS_RBM_ELEM_BITS)); + } + return result; +} + +IA_CSS_RBM_STORAGE_CLASS_C +uint64_t ia_css_rbm_to_uint64( + const ia_css_rbm_t value) +{ + const unsigned int bits64 = sizeof(uint64_t) * 8; + const unsigned int nof_elems_bits64 = bits64 / IA_CSS_RBM_ELEM_BITS; + unsigned int i; + uint64_t res = 0; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_to_uint64(): enter:\n"); + + assert((bits64 % IA_CSS_RBM_ELEM_BITS) == 0); + assert(nof_elems_bits64 > 0); + + for (i = 0; i < MIN(IA_CSS_RBM_NOF_ELEMS, nof_elems_bits64); i++) { + res |= ((uint64_t)(value.data[i]) << (i * IA_CSS_RBM_ELEM_BITS)); + } + for (i = nof_elems_bits64; i < IA_CSS_RBM_NOF_ELEMS; i++) { + assert(value.data[i] == 0); + } + return res; +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_unset( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + ia_css_rbm_t result; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_unset(): enter:\n"); + + result = ia_css_rbm_bit_mask(index); + result = ia_css_rbm_complement(result); + return ia_css_rbm_intersection(bitmap, result); +} + +IA_CSS_RBM_STORAGE_CLASS_C +ia_css_rbm_t ia_css_rbm_bit_mask( + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + ia_css_rbm_t bit_mask = ia_css_rbm_clear(); + + assert(index < IA_CSS_RBM_BITS); + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_bit_mask(): enter:\n"); + if (index < IA_CSS_RBM_BITS) { + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + + bit_mask.data[elem_index] = 1 << elem_bit_index; + } + return bit_mask; +} + +STORAGE_CLASS_INLINE +int ia_css_rbm_compute_weight( + const ia_css_rbm_t bitmap) +{ + ia_css_rbm_t loc_bitmap; + int weight = 0; + int i; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_compute_weight(): enter:\n"); + + loc_bitmap = bitmap; + + /* In fact; do not need the iterator "i" */ + for (i = 0; (i < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); i++) { + weight += ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + } + + return weight; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_is_rbm_set( + const ia_css_rbm_t bitmap, + const unsigned int index) +{ + unsigned int elem_index; + unsigned int elem_bit_index; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_is_rbm_set(): enter:\n"); + + assert(index < IA_CSS_RBM_BITS); + + elem_index = index / IA_CSS_RBM_ELEM_BITS; + elem_bit_index = index % IA_CSS_RBM_ELEM_BITS; + assert(elem_index < IA_CSS_RBM_NOF_ELEMS); + return (((bitmap.data[elem_index] >> elem_bit_index) & 0x1) == 1); +} + +STORAGE_CLASS_INLINE +ia_css_rbm_t ia_css_rbm_shift( + const ia_css_rbm_t bitmap) +{ + int i; + unsigned int lsb_current_elem = 0; + unsigned int lsb_previous_elem = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, VERBOSE, + "ia_css_rbm_shift(): enter:\n"); + + loc_bitmap = bitmap; + + for (i = IA_CSS_RBM_NOF_ELEMS - 1; i >= 0; i--) { + lsb_current_elem = bitmap.data[i] & 0x01; + loc_bitmap.data[i] >>= 1; + loc_bitmap.data[i] |= (lsb_previous_elem << (IA_CSS_RBM_ELEM_BITS - 1)); + lsb_previous_elem = lsb_current_elem; + } + return loc_bitmap; +} + +IA_CSS_RBM_STORAGE_CLASS_C +int ia_css_rbm_print( + const ia_css_rbm_t bitmap, + void *fid) +{ + int retval = -1; + int bit; + unsigned int bit_index = 0; + ia_css_rbm_t loc_bitmap; + + IA_CSS_TRACE_0(RBM, INFO, + "ia_css_rbm_print(): enter:\n"); + + NOT_USED(fid); + NOT_USED(bit); + + IA_CSS_TRACE_0(RBM, INFO, "kernel bitmap {\n"); + + loc_bitmap = bitmap; + + for (bit_index = 0; (bit_index < IA_CSS_RBM_BITS) && + !ia_css_is_rbm_empty(loc_bitmap); bit_index++) { + + bit = ia_css_is_rbm_set(loc_bitmap, 0); + loc_bitmap = ia_css_rbm_shift(loc_bitmap); + IA_CSS_TRACE_2(RBM, INFO, "\t%d\t = %d\n", bit_index, bit); + } + IA_CSS_TRACE_0(RBM, INFO, "}\n"); + + retval = 0; + return retval; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c new file mode 100644 index 000000000000..ef3beb8760b6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest.c @@ -0,0 +1,224 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm.h" +#include "type_support.h" +#include "misc_support.h" +#include "assert_support.h" +#include "math_support.h" +#include "ia_css_rbm_trace.h" + +#ifndef __IA_CSS_RBM_MANIFEST_INLINE__ +#include "ia_css_rbm_manifest_impl.h" +#endif /* __IA_CSS_RBM_MANIFEST_INLINE__ */ + +STORAGE_CLASS_INLINE void +ia_css_rbm_print_with_header( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count, + bool print_header) +{ +#ifdef __HIVECC + ia_css_rbm_print(*rbm, NULL); + (void)print_header; + (void)mux_desc_count; + (void)mux; +#else + int i, j; + + assert(mux != NULL); + assert(rbm != NULL); + if (mux == NULL || rbm == NULL) + return; + + if (print_header) { + for (i = mux_desc_count - 1; i >= 0; i--) { + PRINT("%*d|", mux[i].size_bits, mux[i].mux_id); + } + PRINT("\n"); + } + for (i = mux_desc_count - 1; i >= 0; i--) { + for (j = mux[i].size_bits - 1; j >= 0; j--) { + PRINT("%d", ia_css_is_rbm_set(*rbm, j + mux[i].offset)); + } + PRINT("|"); + } +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_rbm_validation_rule_print( + ia_css_rbm_validation_rule_t *rule, + ia_css_rbm_mux_desc_t *mux_desc, + unsigned int mux_desc_count, + bool print_header) +{ + ia_css_rbm_print_with_header(&rule->match, mux_desc, mux_desc_count, print_header); +#ifdef __HIVECC + IA_CSS_TRACE_0(RBM, INFO, "Mask\n"); +#else + PRINT("\t"); +#endif + ia_css_rbm_print_with_header(&rule->mask, mux_desc, mux_desc_count, false); +#ifdef __HIVECC + IA_CSS_TRACE_1(RBM, INFO, "Rule expected_value: %d\n", rule->expected_value); +#else + PRINT("\t%d\n", rule->expected_value); +#endif +} + +void +ia_css_rbm_pretty_print( + const ia_css_rbm_t *rbm, + const ia_css_rbm_mux_desc_t *mux, + unsigned int mux_desc_count) +{ + ia_css_rbm_print_with_header(rbm, mux, mux_desc_count, false); +#ifndef __HIVECC + PRINT("\n"); +#endif +} + +void +ia_css_rbm_manifest_print( + const ia_css_rbm_manifest_t *manifest) +{ + int retval = -1; + unsigned int i; + bool print_header = true; + ia_css_rbm_mux_desc_t *muxes; + ia_css_rbm_validation_rule_t *validation_rule; + ia_css_rbm_terminal_routing_desc_t *terminal_routing_desc; + + verifjmpexit(manifest != NULL); + muxes = ia_css_rbm_manifest_get_muxes(manifest); + verifjmpexit(muxes != NULL || manifest->mux_desc_count == 0); + + for (i = 0; i < manifest->mux_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "id: %d.%d offstet: %d size_bits: %d\n", + muxes[i].gp_dev_id, + muxes[i].mux_id, + muxes[i].offset, + muxes[i].size_bits); + } +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + validation_rule = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(validation_rule != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + ia_css_rbm_validation_rule_print(&validation_rule[i], muxes, manifest->mux_desc_count, print_header); + print_header = false; + } +#else + (void) validation_rule; + (void) print_header; +#endif + terminal_routing_desc = ia_css_rbm_manifest_get_terminal_routing_desc(manifest); + verifjmpexit(terminal_routing_desc != NULL || manifest->terminal_routing_desc_count == 0); + for (i = 0; i < manifest->terminal_routing_desc_count; i++) { + IA_CSS_TRACE_4(RBM, INFO, "terminal_id: %d connection_state: %d mux_id: %d state: %d\n", + terminal_routing_desc[i].terminal_id, + terminal_routing_desc[i].connection_state, + terminal_routing_desc[i].mux_id, + terminal_routing_desc[i].state); + } + + retval = 0; +EXIT: + if (retval != 0) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_print failed\n"); + } +} + +bool +ia_css_rbm_manifest_check_rbm_validity( + const ia_css_rbm_manifest_t *manifest, + const ia_css_rbm_t *rbm) +{ + unsigned int i; + ia_css_rbm_t res; + ia_css_rbm_t final_rbm = ia_css_rbm_clear(); + ia_css_rbm_validation_rule_t *rules; + bool matches_rules; + + verifjmpexit(manifest != NULL); + verifjmpexit(rbm != NULL); + + if (ia_css_is_rbm_empty(*rbm)) { + IA_CSS_TRACE_0(RBM, ERROR, "ia_css_rbm_manifest_check_rbm_validity failes: RBM is empty.\n"); + return false; + } + +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT != 0 + rules = ia_css_rbm_manifest_get_validation_rules(manifest); + verifjmpexit(rules != NULL || manifest->validation_rule_count == 0); + + for (i = 0; i < manifest->validation_rule_count; i++) { + res = ia_css_rbm_intersection(*rbm, rules[i].mask); + matches_rules = ia_css_is_rbm_equal(res, rules[i].match); + + if (!matches_rules) + continue; + + if (rules[i].expected_value == 1) { + final_rbm = ia_css_rbm_union(final_rbm, res); + } else { + IA_CSS_TRACE_1(RBM, INFO, "ia_css_rbm_manifest_check_rbm_validity failes on rule %d\n", 1); + return false; + } + } +#else + (void)matches_rules; + (void)i; + (void)rules; + (void)res; +#endif + return ia_css_is_rbm_equal(final_rbm, *rbm); +EXIT: + return false; +} + +ia_css_rbm_t +ia_css_rbm_set_mux( + ia_css_rbm_t rbm, + ia_css_rbm_mux_desc_t *mux, + unsigned int mux_count, + unsigned int gp_dev_id, + unsigned int mux_id, + unsigned int value) +{ + unsigned int i; + + verifjmpexit(mux != NULL); + + for (i = 0; i < mux_count; i++) { + if (mux[i].gp_dev_id == gp_dev_id && mux[i].mux_id == mux_id) + break; + } + if (i >= mux_count) { + IA_CSS_TRACE_2(RBM, ERROR, + "ia_css_rbm_set_mux mux with mux_id %d.%d not found\n", gp_dev_id, mux_id); + return rbm; + } + if (value >= mux[i].size_bits) { + IA_CSS_TRACE_3(RBM, ERROR, + "ia_css_rbm_set_mux mux mux_id %d.%d, value %d illegal\n", gp_dev_id, mux_id, value); + return rbm; + } + rbm = ia_css_rbm_set(rbm, mux[i].offset + value); +EXIT: + return rbm; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h new file mode 100644 index 000000000000..7059b6bc898e --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/routing_bitmap/src/ia_css_rbm_manifest_impl.h @@ -0,0 +1,108 @@ + + +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_rbm_manifest.h" +#include "ia_css_rbm_trace.h" + +#include "type_support.h" +#include "math_support.h" +#include "error_support.h" +#include "assert_support.h" +#include "print_support.h" + +STORAGE_CLASS_INLINE +void __ia_css_rbm_manifest_check_struct(void) +{ + COMPILATION_ERROR_IF( + sizeof(ia_css_rbm_manifest_t) != (SIZE_OF_RBM_MANIFEST_S / IA_CSS_UINT8_T_BITS)); + COMPILATION_ERROR_IF( + (sizeof(ia_css_rbm_manifest_t) % 8 /* 64 bit */) != 0); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_size(void) +{ + unsigned int size = sizeof(struct ia_css_rbm_manifest_s); + + return ceil_mul(size, sizeof(uint64_t)); +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +void +ia_css_rbm_manifest_init(struct ia_css_rbm_manifest_s *rbm) +{ + rbm->mux_desc_count = 0; + rbm->terminal_routing_desc_count = 0; + rbm->validation_rule_count = 0; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_mux_desc_t * +ia_css_rbm_manifest_get_muxes(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_MUX_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_mux_desc_t *)manifest->mux_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_mux_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->mux_desc_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_validation_rule_t * +ia_css_rbm_manifest_get_validation_rules(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_VALIDATION_RULE_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_validation_rule_t *)manifest->validation_rules; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_validation_rule_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->validation_rule_count; +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +ia_css_rbm_terminal_routing_desc_t * +ia_css_rbm_manifest_get_terminal_routing_desc(const ia_css_rbm_manifest_t *manifest) +{ +#if VIED_NCI_RBM_MAX_TERMINAL_DESC_COUNT == 0 + (void)manifest; + return NULL; +#else + return (ia_css_rbm_terminal_routing_desc_t *)manifest->terminal_routing_desc; +#endif +} + +IA_CSS_RBM_MANIFEST_STORAGE_CLASS_C +unsigned int +ia_css_rbm_manifest_get_terminal_routing_desc_count(const ia_css_rbm_manifest_t *manifest) +{ + return manifest->terminal_routing_desc_count; +} diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/assert_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/assert_support.h new file mode 100644 index 000000000000..28aed19409b9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/assert_support.h @@ -0,0 +1,200 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ASSERT_SUPPORT_H +#define __ASSERT_SUPPORT_H + +/* This file provides support for run-time assertions + * and compile-time assertions. + * + * Run-time asstions are provided via the following syntax: + * assert(condition) + * Run-time assertions are disabled using the NDEBUG flag. + * + * Compile time assertions are provided via the following syntax: + * COMPILATION_ERROR_IF(condition); + * A compile-time assertion will fail to compile if the condition is false. + * The condition must be constant, such that it can be evaluated + * at compile time. + * + * OP___assert is deprecated. + */ + +#define IA_CSS_ASSERT(expr) assert(expr) + +#ifdef __KLOCWORK__ +/* Klocwork does not see that assert will lead to abortion + * as there is no good way to tell this to KW and the code + * should not depend on assert to function (actually the assert + * could be disabled in a release build) it was decided to + * disable the assert for KW scans (by defining NDEBUG) + * see also: + * http://www.klocwork.com/products/documentation/current/ + * Tuning_C/C%2B%2B_analysis#Assertions + */ +#define NDEBUG +#endif /* __KLOCWORK__ */ + +/** + * The following macro can help to test the size of a struct at compile + * time rather than at run-time. It does not work for all compilers; see + * below. + * + * Depending on the value of 'condition', the following macro is expanded to: + * - condition==true: + * an expression containing an array declaration with negative size, + * usually resulting in a compilation error + * - condition==false: + * (void) 1; // C statement with no effect + * + * example: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * SIZE_OF_HOST_SP_QUEUES_STRUCT); + * + * verify that the macro indeed triggers a compilation error with your compiler: + * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != + * (sizeof(struct host_sp_queues)+1) ); + * + * Not all compilers will trigger an error with this macro; + * use a search engine to search for BUILD_BUG_ON to find other methods. + */ +#define COMPILATION_ERROR_IF(condition) \ +((void)sizeof(char[1 - 2*!!(condition)])) + +/* Compile time assertion */ +#ifndef CT_ASSERT +#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1 : -1])) +#endif /* CT_ASSERT */ + +#ifdef NDEBUG + +#define assert(cnd) ((void)0) + +#else + +#include "storage_class.h" + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE +/* Windows kernel mode compilation */ +#include +#define assert(cnd) ASSERT(cnd) +#else +/* Windows usermode compilation */ +#include +#endif + +#elif defined(__HIVECC) + +/* + * target: assert disabled + * sched: assert enabled only when SCHED_DEBUG is defined + * unsched: assert enabled + */ +#if defined(HRT_HW) +#define assert(cnd) ((void)0) +#elif defined(HRT_SCHED) && !defined(DEBUG_SCHED) +#define assert(cnd) ((void)0) +#elif defined(PIPE_GENERATION) +#define assert(cnd) ((void)0) +#else +#include +#define assert(cnd) OP___csim_assert(cnd) +#endif + +#elif defined(__KERNEL__) +#include + +#ifndef KERNEL_ASSERT_TO_BUG +#ifndef KERNEL_ASSERT_TO_BUG_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON +#ifndef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#ifndef KERNEL_ASSERT_UNDEFINED +/* Default */ +#define KERNEL_ASSERT_TO_BUG +#endif /*KERNEL_ASSERT_UNDEFINED*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG +/* TODO: it would be cleaner to use this: + * #define assert(cnd) BUG_ON(cnd) + * but that causes many compiler warnings (==errors) under Android + * because it seems that the BUG_ON() macro is not seen as a check by + * gcc like the BUG() macro is. */ +#define assert(cnd) \ + do { \ + if (!(cnd)) { \ + BUG(); \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_BUG*/ + +#ifdef KERNEL_ASSERT_TO_BUG_ON +#define assert(cnd) BUG_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_BUG_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON +#define assert(cnd) WARN_ON(!(cnd)) +#endif /*KERNEL_ASSERT_TO_WARN_ON*/ + +#ifdef KERNEL_ASSERT_TO_WARN_ON_INF_LOOP +#define assert(cnd) \ + do { \ + int not_cnd = !(cnd); \ + WARN_ON(not_cnd); \ + if (not_cnd) { \ + for (;;) { \ + } \ + } \ + } while (0) +#endif /*KERNEL_ASSERT_TO_WARN_ON_INF_LOOP*/ + +#ifdef KERNEL_ASSERT_UNDEFINED +#include KERNEL_ASSERT_DEFINITION_FILESTRING +#endif /*KERNEL_ASSERT_UNDEFINED*/ + +#elif defined(__FIST__) || defined(__GNUC__) + +#include "assert.h" + +#else /* default is for unknown environments */ +#define assert(cnd) ((void)0) +#endif + +#endif /* NDEBUG */ + +#ifndef PIPE_GENERATION +/* Deprecated OP___assert, this is still used in ~1000 places + * in the code. This will be removed over time. + * The implementation for the pipe generation tool is in see support.isp.h */ +#define OP___assert(cnd) assert(cnd) + +#ifdef C_RUN +#define compile_time_assert(cond) OP___assert(cond) +#else +#include "storage_class.h" +extern void _compile_time_assert(void); +STORAGE_CLASS_INLINE void compile_time_assert(unsigned cond) +{ + /* Call undefined function if cond is false */ + if (!cond) + _compile_time_assert(); +} +#endif +#endif /* PIPE_GENERATION */ + +#endif /* __ASSERT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/cpu_mem_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/cpu_mem_support.h new file mode 100644 index 000000000000..fa349cac4b24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/cpu_mem_support.h @@ -0,0 +1,233 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __CPU_MEM_SUPPORT_H +#define __CPU_MEM_SUPPORT_H + +#include "storage_class.h" +#include "assert_support.h" +#include "type_support.h" + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_copy(void *dst, const void *src, unsigned int size) +{ + /* memcpy cannot be used in in Windows (function is not allowed), + * and the safer function memcpy_s is not available on other platforms. + * Because usage of ia_css_cpu_mem_copy is minimal, we implement it here in an easy, + * but sub-optimal way. + */ + unsigned int i; + + assert(dst != NULL && src != NULL); + + if (!(dst != NULL && src != NULL)) { + return NULL; + } + for (i = 0; i < size; i++) { + ((char *)dst)[i] = ((char *)src)[i]; + } + return dst; +} + +#if defined(__KERNEL__) + +#include +#include +#include +#include + +/* TODO: remove, workaround for issue in hrt file ibuf_ctrl_2600_config.c + * error checking code added to SDK that uses calls to exit function + */ +#define exit(a) return + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return kmalloc(size, GFP_KERNEL); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + return ia_css_cpu_mem_alloc(size); /* todo: align to page size */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_protect(void *ptr, unsigned int size, int prot) +{ + /* nothing here yet */ +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); /* available in kernel in linux/string.h */ +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + kfree(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* parameter check here */ + if (ptr == NULL) + return; + + clflush_cache_range(ptr, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* for now same as flush */ + ia_css_cpu_mem_cache_flush(ptr, size); +} + +#elif defined(_MSC_VER) + +#include +#include +#include + +extern void *hrt_malloc(size_t bytes, int zero_mem); +extern void *hrt_free(void *ptr); +extern void hrt_mem_cache_flush(void *ptr, unsigned int size); +extern void hrt_mem_cache_invalidate(void *ptr, unsigned int size); + +#define malloc(a) hrt_malloc(a, 1) +#define free(a) hrt_free(a) + +#define CSS_PAGE_SIZE (1<<12) + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + unsigned int buffer_size = size; + + /* Currently hrt_malloc calls Windows ExAllocatePoolWithTag() routine + * to request system memory. If the number of bytes is equal or bigger + * than the page size, then the returned address is page aligned, + * but if it's smaller it's not necessarily page-aligned We agreed + * with Windows team that we allocate a full page + * if it's less than page size + */ + if (buffer_size < CSS_PAGE_SIZE) + buffer_size = CSS_PAGE_SIZE; + + return ia_css_cpu_mem_alloc(buffer_size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_flush(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ +#ifdef _KERNEL_MODE + hrt_mem_cache_invalidate(ptr, size); +#else + (void)ptr; + (void)size; +#endif +} + +#else + +#include +#include +#include +/* Needed for the MPROTECT */ +#include +#include +#include +#include + + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc(unsigned int size) +{ + return malloc(size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_alloc_page_aligned(unsigned int size) +{ + int pagesize; + + pagesize = sysconf(_SC_PAGE_SIZE); + return memalign(pagesize, size); +} + +STORAGE_CLASS_INLINE void* +ia_css_cpu_mem_set_zero(void *dst, unsigned int size) +{ + return memset(dst, 0, size); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_free(void *ptr) +{ + free(ptr); +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_flush(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +STORAGE_CLASS_INLINE void +ia_css_cpu_mem_cache_invalidate(void *ptr, unsigned int size) +{ + /* not needed in simulation */ + (void)ptr; + (void)size; +} + +#endif + +#endif /* __CPU_MEM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/error_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/error_support.h new file mode 100644 index 000000000000..9fe1f65125e6 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/error_support.h @@ -0,0 +1,110 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __ERROR_SUPPORT_H +#define __ERROR_SUPPORT_H + +#if defined(__KERNEL__) +#include +#else +#include +#endif +#include + +/* OS-independent definition of IA_CSS errno values */ +/* #define IA_CSS_EINVAL 1 */ +/* #define IA_CSS_EFAULT 2 */ + +#ifdef __HIVECC +#define ERR_EMBEDDED 1 +#else +#define ERR_EMBEDDED 0 +#endif + +#if ERR_EMBEDDED +#define DECLARE_ERRVAL +#else +#define DECLARE_ERRVAL \ + int _errval = 0; +#endif + +/* Use "owl" in while to prevent compiler warnings in Windows */ +#define ALWAYS_FALSE ((void)0, 0) + +#define verifret(cond, error_type) \ +do { \ + if (!(cond)) { \ + return error_type; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmp(cond, error_tag) \ +do { \ + if (!(cond)) { \ + goto error_tag; \ + } \ +} while (ALWAYS_FALSE) + +#define verifexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#if ERR_EMBEDDED +#define verifexitval(cond, error_tag) \ +do { \ + assert(cond); \ +} while (ALWAYS_FALSE) +#else +#define verifexitval(cond, error_tag) \ +do { \ + if (!(cond)) { \ + _errval = (error_tag); \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) +#endif + +#if ERR_EMBEDDED +#define haserror(error_tag) (0) +#else +#define haserror(error_tag) \ + (_errval == (error_tag)) +#endif + +#if ERR_EMBEDDED +#define noerror() (1) +#else +#define noerror() \ + (_errval == 0) +#endif + +#define verifjmpexit(cond) \ +do { \ + if (!(cond)) { \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#define verifjmpexitsetretval(cond, retval) \ +do { \ + if (!(cond)) { \ + retval = -1; \ + goto EXIT; \ + } \ +} while (ALWAYS_FALSE) + +#endif /* __ERROR_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/math_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/math_support.h new file mode 100644 index 000000000000..633f86f1a1b0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/math_support.h @@ -0,0 +1,314 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MATH_SUPPORT_H +#define __MATH_SUPPORT_H + +#include "storage_class.h" /* for STORAGE_CLASS_INLINE */ +#include "type_support.h" +#include "assert_support.h" + +/* in case we have min/max/MIN/MAX macro's undefine them */ +#ifdef min +#undef min +#endif +#ifdef max +#undef max +#endif +#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */ +#undef MIN +#endif +#ifdef MAX +#undef MAX +#endif + +#ifndef UINT16_MAX +#define UINT16_MAX (0xffffUL) +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (0xffffffffUL) +#endif + +#define IS_ODD(a) ((a) & 0x1) +#define IS_EVEN(a) (!IS_ODD(a)) +#define IS_POWER2(a) (!((a)&((a)-1))) +#define IS_MASK_BITS_SET(a, b) ((a & b) != 0) + +/*To Find next power of 2 number from x */ +#define bit2(x) ((x) | ((x) >> 1)) +#define bit4(x) (bit2(x) | (bit2(x) >> 2)) +#define bit8(x) (bit4(x) | (bit4(x) >> 4)) +#define bit16(x) (bit8(x) | (bit8(x) >> 8)) +#define bit32(x) (bit16(x) | (bit16(x) >> 16)) +#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1) + +/* force a value to a lower even value */ +#define EVEN_FLOOR(x) ((x) & ~1UL) + +/* A => B */ +#define IMPLIES(a, b) (!(a) || (b)) + +/* The ORIG_BITS th bit is the sign bit */ +/* Sign extends a ORIG_BITS bits long signed number to a 64-bit signed number */ +/* By type casting it can relimited to any valid type-size + * (32-bit signed or 16-bit or 8-bit) + */ +/* By masking it can be transformed to any arbitrary bit size */ +#define SIGN_EXTEND(VAL, ORIG_BITS) \ +((~(((VAL)&(1ULL<<((ORIG_BITS)-1)))-1))|(VAL)) + +#define EXTRACT_BIT(a, b) ((a >> b) & 1) + +/* for preprocessor and array sizing use MIN and MAX + otherwise use min and max */ +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define CLIP(a, b, c) MIN((MAX((a), (b))), (c)) +/* Integer round-down division of a with b */ +#define FLOOR_DIV(a, b) ((b) ? ((a) / (b)) : 0) +/* Align a to the lower multiple of b */ +#define FLOOR_MUL(a, b) (FLOOR_DIV(a, b) * (b)) +/* Integer round-up division of a with b */ +#define CEIL_DIV(a, b) ((b) ? (((a) + (b) - 1) / (b)) : 0) +/* Align a to the upper multiple of b */ +#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b)) +/* Align a to the upper multiple of b - fast implementation + * for cases when b=pow(2,n) + */ +#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1)) +/* integer round-up division of a with pow(2,b) */ +#define CEIL_SHIFT(a, b) (((a) + (1UL << (b)) - 1) >> (b)) +/* Align a to the upper multiple of pow(2,b) */ +#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b)) +/* Absolute difference of a and b */ +#define ABS_DIF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) +#define ABS(a) ABS_DIF(a, 0) +/* Square of x */ +#define SQR(x) ((x)*(x)) +/* Integer round-half-down division of a nad b */ +#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0) +/* Align a to the round-half-down multiple of b */ +#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b)) + +#define MAX3(a, b, c) MAX((a), MAX((b), (c))) +#define MIN3(a, b, c) MIN((a), MIN((b), (c))) +#define MAX4(a, b, c, d) MAX((MAX((a), (b))), (MAX((c), (d)))) +#define MIN4(a, b, c, d) MIN((MIN((a), (b))), (MIN((c), (d)))) + +/* min and max should not be macros as they will evaluate their arguments twice. + if you really need a macro (e.g. for CPP or for initializing an array) + use MIN() and MAX(), otherwise use min() and max() */ + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(a) ((sizeof(a) / sizeof(*(a)))) +#endif + +#ifndef BYTES +#define BYTES(bit) (((bit)+7)/8) +#endif + +#if !defined(PIPE_GENERATION) +STORAGE_CLASS_INLINE unsigned int max_value_bits(unsigned int bits) +{ + return (bits == 0) ? 0 : ((2 * ((1 << ((bits) - 1)) - 1)) + 1); +} +STORAGE_CLASS_INLINE unsigned int max_value_bytes(unsigned int bytes) +{ + return max_value_bits(IA_CSS_UINT8_T_BITS * bytes); +} +STORAGE_CLASS_INLINE int max(int a, int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE int min(int a, int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE int clip(int a, int b, int c) +{ + return min(max(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b) +{ + return MAX(a, b); +} + +STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b) +{ + return MIN(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uclip(unsigned int a, unsigned int b, + unsigned int c) +{ + return umin(umax(a, b), c); +} + +STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b) +{ + return CEIL_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b) +{ + return CEIL_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b) +{ + return CEIL_MUL2(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b) +{ + return CEIL_SHIFT_MUL(a, b); +} + +STORAGE_CLASS_INLINE int abs_dif(int a, int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int uabs_dif(unsigned int a, unsigned int b) +{ + return ABS_DIF(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_DIV(a, b); +} + +STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, + unsigned int b) +{ + return ROUND_HALF_DOWN_MUL(a, b); +} + +STORAGE_CLASS_INLINE unsigned int ceil_pow2(uint32_t a) +{ + unsigned int retval = 0; + + if (IS_POWER2(a)) { + retval = (unsigned int)a; + } else { + unsigned int v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + retval = (unsigned int)(v+1); + } + return retval; +} + +STORAGE_CLASS_INLINE unsigned int floor_log2(uint32_t a) +{ + static const uint8_t de_bruijn[] = { + 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 + }; + uint32_t v = a; + + v |= v>>1; + v |= v>>2; + v |= v>>4; + v |= v>>8; + v |= v>>16; + return (unsigned int)de_bruijn[(v*0x07C4ACDDU)>>27]; +} + +/* Divide by small power of two */ +STORAGE_CLASS_INLINE unsigned int +udiv2_small_i(uint32_t a, uint32_t b) +{ + assert(b <= 2); + return a >> (b-1); +} + +/* optimized divide for small results + * a will be divided by b + * outbits is the number of bits needed for the result + * the smaller the cheaper the function will be. + * if the result doesn't fit in the number of output bits + * the result is incorrect and the function will assert + */ +STORAGE_CLASS_INLINE unsigned int +udiv_medium(uint32_t a, uint32_t b, unsigned outbits) +{ + int bit; + unsigned res = 0; + unsigned mask; + +#ifdef VOLCANO +#pragma ipu unroll +#endif + for (bit = outbits-1 ; bit >= 0; bit--) { + mask = 1<= (b<= c ? a+b-c : a+b); +} + +/* + * For SP and ISP, SDK provides the definition of OP_asp_slor. + * We need it only for host + */ +STORAGE_CLASS_INLINE unsigned int OP_asp_slor(int a, int b, int c) +{ + return ((a << c) | b); +} +#else +#include "hive/customops.h" +#endif /* !defined(__VIED_CELL) */ + +#endif /* !defined(PIPE_GENERATION) */ + +#if !defined(__KERNEL__) +#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val)) +#endif /* !defined(__KERNEL__) */ + +#endif /* __MATH_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/misc_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/misc_support.h new file mode 100644 index 000000000000..a2c2729e946d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/misc_support.h @@ -0,0 +1,76 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __MISC_SUPPORT_H +#define __MISC_SUPPORT_H + +/* suppress compiler warnings on unused variables */ +#ifndef NOT_USED +#define NOT_USED(a) ((void)(a)) +#endif + +/* Calculate the total bytes for pow(2) byte alignment */ +#define tot_bytes_for_pow2_align(pow2, cur_bytes) \ + ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1)) + +/* Display the macro value given a string */ +#define _STR(x) #x +#define STR(x) _STR(x) + +/* Concatenate */ +#ifndef CAT /* also defined in */ +#define _CAT(a, b) a ## b +#define CAT(a, b) _CAT(a, b) +#endif + +#define _CAT3(a, b, c) a ## b ## c +#define CAT3(a, b, c) _CAT3(a, b, c) + +/* NO_HOIST, NO_CSE, NO_ALIAS attributes must be ignored for host code */ +#ifndef __HIVECC +#ifndef NO_HOIST +#define NO_HOIST +#endif +#ifndef NO_CSE +#define NO_CSE +#endif +#ifndef NO_ALIAS +#define NO_ALIAS +#endif +#endif + +enum hive_method_id { + HIVE_METHOD_ID_CRUN, + HIVE_METHOD_ID_UNSCHED, + HIVE_METHOD_ID_SCHED, + HIVE_METHOD_ID_TARGET +}; + +/* Derive METHOD */ +#if defined(C_RUN) + #define HIVE_METHOD "crun" + #define HIVE_METHOD_ID HIVE_METHOD_ID_CRUN +#elif defined(HRT_UNSCHED) + #define HIVE_METHOD "unsched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_UNSCHED +#elif defined(HRT_SCHED) + #define HIVE_METHOD "sched" + #define HIVE_METHOD_ID HIVE_METHOD_ID_SCHED +#else + #define HIVE_METHOD "target" + #define HIVE_METHOD_ID HIVE_METHOD_ID_TARGET + #define HRT_TARGET 1 +#endif + +#endif /* __MISC_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/platform_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/platform_support.h new file mode 100644 index 000000000000..1752efc7b4df --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/platform_support.h @@ -0,0 +1,146 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PLATFORM_SUPPORT_H +#define __PLATFORM_SUPPORT_H + +#include "storage_class.h" + +#define MSEC_IN_SEC 1000 +#define NSEC_IN_MSEC 1000000 + +#if defined(_MSC_VER) +#include + +#define IA_CSS_EXTERN +#define SYNC_WITH(x) +#define CSS_ALIGN(d, a) _declspec(align(a)) d + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + /* Placeholder for driver team*/ +} + +STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) +{ + /* Placeholder for driver team*/ + (void)delay_time_ms; +} + +#elif defined(__HIVECC) +#include +#include + +#define IA_CSS_EXTERN extern +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + OP___schedule(); +} + +#elif defined(__KERNEL__) +#include +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __aligned(a) + +STORAGE_CLASS_INLINE void ia_css_sleep(void) +{ + usleep_range(1, 50); +} + +#elif defined(__GNUC__) +#include + +#define IA_CSS_EXTERN +#define CSS_ALIGN(d, a) d __attribute__((aligned(a))) + +/* Define some __HIVECC specific macros to nothing to allow host code compilation */ +#ifndef NO_ALIAS +#define NO_ALIAS +#endif + +#ifndef SYNC_WITH +#define SYNC_WITH(x) +#endif + +#if defined(HRT_CSIM) + #include "hrt/host.h" /* Using hrt_sleep from hrt/host.h */ + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + /* For the SDK still using hrt_sleep */ + hrt_sleep(); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + /* For the SDK still using hrt_sleep */ + long unsigned int i = 0; + for (i = 0; i < delay_time_ms; i++) { + hrt_sleep(); + } + } +#else + #include + STORAGE_CLASS_INLINE void ia_css_sleep(void) + { + struct timespec delay_time; + + delay_time.tv_sec = 0; + delay_time.tv_nsec = 10; + nanosleep(&delay_time, NULL); + } + STORAGE_CLASS_INLINE void ia_css_sleep_msec(long unsigned int delay_time_ms) + { + struct timespec delay_time; + + if (delay_time_ms >= MSEC_IN_SEC) { + delay_time.tv_sec = delay_time_ms / MSEC_IN_SEC; + delay_time.tv_nsec = (delay_time_ms % MSEC_IN_SEC) * NSEC_IN_MSEC; + } else { + delay_time.tv_sec = 0; + delay_time.tv_nsec = delay_time_ms * NSEC_IN_MSEC; + } + nanosleep(&delay_time, NULL); + } +#endif + +#else +#include +#endif + +/*needed for the include in stdint.h for various environments */ +#include "type_support.h" +#include "storage_class.h" + +#define MAX_ALIGNMENT 8 +#define aligned_uint8(type, obj) CSS_ALIGN(uint8_t obj, 1) +#define aligned_int8(type, obj) CSS_ALIGN(int8_t obj, 1) +#define aligned_uint16(type, obj) CSS_ALIGN(uint16_t obj, 2) +#define aligned_int16(type, obj) CSS_ALIGN(int16_t obj, 2) +#define aligned_uint32(type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_int32(type, obj) CSS_ALIGN(int32_t obj, 4) + +/* needed as long as hivecc does not define the type (u)int64_t */ +#if defined(__HIVECC) +#define aligned_uint64(type, obj) CSS_ALIGN(unsigned long long obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(signed long long obj, 8) +#else +#define aligned_uint64(type, obj) CSS_ALIGN(uint64_t obj, 8) +#define aligned_int64(type, obj) CSS_ALIGN(int64_t obj, 8) +#endif +#define aligned_enum(enum_type, obj) CSS_ALIGN(uint32_t obj, 4) +#define aligned_struct(struct_type, obj) struct_type obj + +#endif /* __PLATFORM_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/print_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/print_support.h new file mode 100644 index 000000000000..0b614f7ef12d --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/print_support.h @@ -0,0 +1,90 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __PRINT_SUPPORT_H +#define __PRINT_SUPPORT_H + +#if defined(_MSC_VER) +#ifdef _KERNEL_MODE + +/* TODO: Windows driver team to provide tracing mechanism for kernel mode + * e.g. DbgPrint and DbgPrintEx + */ +extern void FwTracePrintPWARN(const char *fmt, ...); +extern void FwTracePrintPRINT(const char *fmt, ...); +extern void FwTracePrintPERROR(const char *fmt, ...); +extern void FwTracePrintPDEBUG(const char *fmt, ...); + +#define PWARN(format, ...) FwTracePrintPWARN(format, __VA_ARGS__) +#define PRINT(format, ...) FwTracePrintPRINT(format, __VA_ARGS__) +#define PERROR(format, ...) FwTracePrintPERROR(format, __VA_ARGS__) +#define PDEBUG(format, ...) FwTracePrintPDEBUG(format, __VA_ARGS__) + +#else +/* Windows usermode compilation */ +#include + +/* To change the defines below, communicate with Windows team first + * to ensure they will not get flooded with prints + */ +/* This is temporary workaround to avoid flooding userspace + * Windows driver with prints + */ + +#define PWARN(format, ...) +#define PRINT(format, ...) +#define PERROR(format, ...) printf("error: " format, __VA_ARGS__) +#define PDEBUG(format, ...) + +#endif /* _KERNEL_MODE */ + +#elif defined(__HIVECC) +#include +/* To be revised + +#define PWARN(format) +#define PRINT(format) OP___printstring(format) +#define PERROR(variable) OP___dump(9999, arguments) +#define PDEBUG(variable) OP___dump(__LINE__, arguments) + +*/ + +#define PRINTSTRING(str) OP___printstring(str) + +#elif defined(__KERNEL__) +#include +#include + + +#define PWARN(format, arguments...) pr_debug(format, ##arguments) +#define PRINT(format, arguments...) pr_debug(format, ##arguments) +#define PERROR(format, arguments...) pr_debug(format, ##arguments) +#define PDEBUG(format, arguments...) pr_debug(format, ##arguments) + +#else +#include + +#define PRINT_HELPER(prefix, format, ...) printf(prefix format "%s", __VA_ARGS__) + +/* The trailing "" allows the edge case of printing single string */ +#define PWARN(...) PRINT_HELPER("warning: ", __VA_ARGS__, "") +#define PRINT(...) PRINT_HELPER("", __VA_ARGS__, "") +#define PERROR(...) PRINT_HELPER("error: ", __VA_ARGS__, "") +#define PDEBUG(...) PRINT_HELPER("debug: ", __VA_ARGS__, "") + +#define PRINTSTRING(str) PRINT(str) + +#endif + +#endif /* __PRINT_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/storage_class.h new file mode 100644 index 000000000000..af19b4026220 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/storage_class.h @@ -0,0 +1,51 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __STORAGE_CLASS_H +#define __STORAGE_CLASS_H + +#define STORAGE_CLASS_EXTERN \ +extern + +#if defined(_MSC_VER) +#define STORAGE_CLASS_INLINE \ +static __inline +#elif defined(__HIVECC) +#define STORAGE_CLASS_INLINE \ +static inline +#else +#define STORAGE_CLASS_INLINE \ +static inline +#endif + +/* Register struct */ +#ifndef __register +#if defined(__HIVECC) && !defined(PIPE_GENERATION) +#define __register register +#else +#define __register +#endif +#endif + +/* Memory attribute */ +#ifndef MEM +#ifdef PIPE_GENERATION +#elif defined(__HIVECC) +#include +#else +#define MEM(any_mem) +#endif +#endif + +#endif /* __STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/type_support.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/type_support.h new file mode 100644 index 000000000000..a86da0e78941 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/support/type_support.h @@ -0,0 +1,80 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __TYPE_SUPPORT_H +#define __TYPE_SUPPORT_H + +/* Per the DLI spec, types are in "type_support.h" and + * "platform_support.h" is for unclassified/to be refactored + * platform specific definitions. + */ +#define IA_CSS_UINT8_T_BITS 8 +#define IA_CSS_UINT16_T_BITS 16 +#define IA_CSS_UINT32_T_BITS 32 +#define IA_CSS_INT32_T_BITS 32 +#define IA_CSS_UINT64_T_BITS 64 + + +#if defined(_MSC_VER) +#include +#include +#include +#include +#if defined(_M_X64) +#define HOST_ADDRESS(x) (unsigned long long)(x) +#else +#define HOST_ADDRESS(x) (unsigned long)(x) +#endif + +#elif defined(PARAM_GENERATION) +/* Nothing */ +#elif defined(__HIVECC) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +typedef long long int64_t; +typedef unsigned long long uint64_t; + +#elif defined(__KERNEL__) +#include +#include + +#define CHAR_BIT (8) +#define HOST_ADDRESS(x) (unsigned long)(x) + +#elif defined(__GNUC__) +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#else /* default is for the FIST environment */ +#include +#include +#include +#include +#define HOST_ADDRESS(x) (unsigned long)(x) + +#endif + +#if !defined(PIPE_GENERATION) && !defined(IO_GENERATION) +/* genpipe cannot handle the void* syntax */ +typedef void *HANDLE; +#endif + +#endif /* __TYPE_SUPPORT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h new file mode 100644 index 000000000000..5426d6d18e0b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom.h @@ -0,0 +1,247 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_H +#define __IA_CSS_SYSCOM_H + + +/* + * The CSS Subsystem Communication Interface - Host side + * + * It provides subsystem initialzation, send ports and receive ports + * The PSYS and ISYS interfaces are implemented on top of this interface. + */ + +#include "ia_css_syscom_config.h" + +#define FW_ERROR_INVALID_PARAMETER (-1) +#define FW_ERROR_BAD_ADDRESS (-2) +#define FW_ERROR_BUSY (-3) +#define FW_ERROR_NO_MEMORY (-4) + +struct ia_css_syscom_context; + +/** + * ia_css_syscom_size() - provide syscom external buffer requirements + * @config: pointer to the configuration data (read) + * @size: pointer to the buffer size (write) + * + * Purpose: + * - Provide external buffer requirements + * - To be used for external buffer allocation + * + */ +extern void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size +); + +/** + * ia_css_syscom_open() - initialize a subsystem context + * @config: pointer to the configuration data (read) + * @buf: pointer to externally allocated buffers (read) + * @returns: struct ia_css_syscom_context* on success, 0 otherwise. + * + * Purpose: + * - initialize host side data structures + * - boot the subsystem? + * + */ +extern struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *config, + struct ia_css_syscom_buf *buf +); + +/** + * ia_css_syscom_close() - signal close to cell + * @context: pointer to the subsystem context + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if SPC is not ready yet. + * + * Purpose: + * Request from the Cell to terminate + */ +extern int +ia_css_syscom_close( + struct ia_css_syscom_context *context +); + +/** + * ia_css_syscom_release() - free context + * @context: pointer to the subsystem context + * @force: flag which specifies whether cell + * state will be checked before freeing the + * context. + * @returns: 0 on success, -2 (FW_ERROR_BUSY) if cell + * is busy and call was not forced. + * + * Purpose: + * 2 modes, with first (force==true) immediately + * free context, and second (force==false) verifying + * that the cell state is ok and freeing context if so, + * returning error otherwise. + */ +extern int +ia_css_syscom_release( + struct ia_css_syscom_context *context, + unsigned int force +); + +/** + * Open a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for sending tokens to the subsystem + * @context: pointer to the subsystem context + * @port: send port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be sent to a port without error. + * @context: pointer to the subsystem context + * @port: send port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Send a token to the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: send port index + * @token: pointer to the token value that is transferred to the subsystem + * @returns: number of tokens sent on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + const void *token +); + +/** + * Open a port for receiving tokens to the subsystem + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Closes a port for receiving tokens to the subsystem + * Returns 0 on success, otherwise negative value of error code + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Get the number of tokens that can be received from a port without errors. + * @context: pointer to the subsystem context + * @port: receive port index + * @returns: number of available tokens on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *context, + unsigned int port +); + +/** + * Receive a token from the subsystem port + * The token size is determined during initialization + * @context: pointer to the subsystem context + * @port: receive port index + * @token (output): pointer to (space for) the token to be received + * @returns: number of tokens received on success, + * -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *context, + unsigned int port, + void *token +); + +#if HAS_DUAL_CMD_CTX_SUPPORT +/** + * ia_css_syscom_store_dmem() - store subsystem context information in DMEM + * @context: pointer to the subsystem context + * @ssid: subsystem id + * @vtl0_addr_mask: VTL0 address mask; only applicable when the passed in context is secure + * @returns: 0 on success, -1 (FW_ERROR_INVALID_PARAMETER) otherwise. + */ +extern int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *context, + unsigned int ssid, + unsigned int vtl0_addr_mask +); + +/** + * ia_css_syscom_set_trustlet_status() - store truslet configuration setting + * @context: pointer to the subsystem context + * @trustlet_exist: 1 if trustlet exists + */ +extern void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +); + +/** + * ia_css_syscom_is_ab_spc_ready() - check if SPC access blocker programming is completed + * @context: pointer to the subsystem context + * @returns: 1 when status is ready. 0 otherwise + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +); +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ + +#endif /* __IA_CSS_SYSCOM_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h new file mode 100644 index 000000000000..8c827c2ba395 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_config.h @@ -0,0 +1,98 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_H +#define __IA_CSS_SYSCOM_CONFIG_H + +#include +#include + +/* syscom size struct, output of ia_css_syscom_size, + * input for (external) allocation + */ +struct ia_css_syscom_size { + /* Size of host buffer */ + unsigned int cpu; + /* Size of shared config buffer (host to cell) */ + unsigned int shm; + /* Size of shared input queue buffers (host to cell) */ + unsigned int ibuf; + /* Size of shared output queue buffers (cell to host) */ + unsigned int obuf; +}; + +/* syscom buffer struct, output of (external) allocation, + * input for ia_css_syscom_open + */ +struct ia_css_syscom_buf { + char *cpu; /* host buffer */ + + /* shared memory buffer host address */ + host_virtual_address_t shm_host; + /* shared memory buffer cell address */ + vied_virtual_address_t shm_cell; + + /* input queue shared buffer host address */ + host_virtual_address_t ibuf_host; + /* input queue shared buffer cell address */ + vied_virtual_address_t ibuf_cell; + + /* output queue shared buffer host address */ + host_virtual_address_t obuf_host; + /* output queue shared buffer cell address */ + vied_virtual_address_t obuf_cell; +}; + +struct ia_css_syscom_queue_config { + unsigned int queue_size; /* tokens per queue */ + unsigned int token_size; /* bytes per token */ +}; + +/** + * Parameter struct for ia_css_syscom_open + */ +struct ia_css_syscom_config { + /* This member in no longer used in syscom. + It is kept to not break any driver builds, and will be removed when + all assignments have been removed from driver code */ + /* address of firmware in DDR/IMR */ + unsigned long long host_firmware_address; + + /* address of firmware in DDR, seen from SPC */ + unsigned int vied_firmware_address; + + unsigned int ssid; + unsigned int mmid; + + unsigned int num_input_queues; + unsigned int num_output_queues; + struct ia_css_syscom_queue_config *input; + struct ia_css_syscom_queue_config *output; + + unsigned int regs_addr; + unsigned int dmem_addr; + + /* firmware-specific configuration data */ + void *specific_addr; + unsigned int specific_size; + + /* if true; secure syscom in VTIO Case + * if false, non-secure syscom + */ + bool secure; + unsigned int vtl0_addr_mask; /* only applicable in 'secure' case */ +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h new file mode 100644 index 000000000000..1a0191d37102 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/interface/ia_css_syscom_trace.h @@ -0,0 +1,52 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IA_CSS_SYSCOM_TRACE_H +#define __IA_CSS_SYSCOM_TRACE_H + +#include "ia_css_trace.h" + +#define SYSCOM_TRACE_LEVEL_DEFAULT 1 +#define SYSCOM_TRACE_LEVEL_DEBUG 2 + +/* Set to default level if no level is defined */ +#ifndef SYSCOM_TRACE_LEVEL +#define SYSCOM_TRACE_LEVEL SYSCOM_TRACE_LEVEL_DEFAULT +#endif /* SYSCOM_TRACE_LEVEL */ + +/* SYSCOM Module tracing backend is mapped to TUNIT tracing for target platforms */ +#ifdef __HIVECC +# ifndef HRT_CSIM +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_TRACE +# else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +# endif +#else +# define SYSCOM_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE +#endif + +#define SYSCOM_TRACE_LEVEL_INFO IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_WARNING IA_CSS_TRACE_LEVEL_ENABLED +#define SYSCOM_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + +#if (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEFAULT) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_DISABLED +#elif (SYSCOM_TRACE_LEVEL == SYSCOM_TRACE_LEVEL_DEBUG) +# define SYSCOM_TRACE_LEVEL_VERBOSE IA_CSS_TRACE_LEVEL_ENABLED +#else +# error "Connection manager trace level not defined!" +#endif /* SYSCOM_TRACE_LEVEL */ + +#endif /* __IA_CSS_SYSCOM_TRACE_H */ + diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom.c new file mode 100644 index 000000000000..cdf9df0531ff --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom.c @@ -0,0 +1,650 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#include "ia_css_syscom.h" + +#include "ia_css_syscom_context.h" +#include "ia_css_syscom_config_fw.h" +#include "ia_css_syscom_trace.h" + +#include "queue.h" +#include "send_port.h" +#include "recv_port.h" +#include "regmem_access.h" + +#include "error_support.h" +#include "cpu_mem_support.h" + +#include "queue_struct.h" +#include "send_port_struct.h" +#include "recv_port_struct.h" + +#include "type_support.h" +#include +#include +#include "platform_support.h" + +#include "ia_css_cell.h" + +/* struct of internal buffer sizes */ +struct ia_css_syscom_size_intern { + unsigned int context; + unsigned int input_queue; + unsigned int output_queue; + unsigned int input_port; + unsigned int output_port; + + unsigned int fw_config; + unsigned int specific; + + unsigned int input_buffer; + unsigned int output_buffer; +}; + +/* Allocate buffers internally, when no buffers are provided */ +static int +ia_css_syscom_alloc( + unsigned int ssid, + unsigned int mmid, + const struct ia_css_syscom_size *size, + struct ia_css_syscom_buf *buf) +{ + /* zero the buffer to set all pointers to zero */ + memset(buf, 0, sizeof(*buf)); + + /* allocate cpu_mem */ + buf->cpu = (char *)ia_css_cpu_mem_alloc(size->cpu); + if (!buf->cpu) + goto EXIT7; + + /* allocate and map shared config buffer */ + buf->shm_host = shared_memory_alloc(mmid, size->shm); + if (!buf->shm_host) + goto EXIT6; + buf->shm_cell = shared_memory_map(ssid, mmid, buf->shm_host); + if (!buf->shm_cell) + goto EXIT5; + + /* allocate and map input queue buffer */ + buf->ibuf_host = shared_memory_alloc(mmid, size->ibuf); + if (!buf->ibuf_host) + goto EXIT4; + buf->ibuf_cell = shared_memory_map(ssid, mmid, buf->ibuf_host); + if (!buf->ibuf_cell) + goto EXIT3; + + /* allocate and map output queue buffer */ + buf->obuf_host = shared_memory_alloc(mmid, size->obuf); + if (!buf->obuf_host) + goto EXIT2; + buf->obuf_cell = shared_memory_map(ssid, mmid, buf->obuf_host); + if (!buf->obuf_cell) + goto EXIT1; + + return 0; + +EXIT1: shared_memory_free(mmid, buf->obuf_host); +EXIT2: shared_memory_unmap(ssid, mmid, buf->ibuf_cell); +EXIT3: shared_memory_free(mmid, buf->ibuf_host); +EXIT4: shared_memory_unmap(ssid, mmid, buf->shm_cell); +EXIT5: shared_memory_free(mmid, buf->shm_host); +EXIT6: ia_css_cpu_mem_free(buf->cpu); +EXIT7: return FW_ERROR_NO_MEMORY; +} + +static void +ia_css_syscom_size_intern( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size_intern *size) +{ + /* convert syscom config into syscom internal size struct */ + + unsigned int i; + + size->context = sizeof(struct ia_css_syscom_context); + size->input_queue = cfg->num_input_queues * sizeof(struct sys_queue); + size->output_queue = cfg->num_output_queues * sizeof(struct sys_queue); + size->input_port = cfg->num_input_queues * sizeof(struct send_port); + size->output_port = cfg->num_output_queues * sizeof(struct recv_port); + + size->fw_config = sizeof(struct ia_css_syscom_config_fw); + size->specific = cfg->specific_size; + + /* accumulate input queue buffer sizes */ + size->input_buffer = 0; + for (i = 0; i < cfg->num_input_queues; i++) { + size->input_buffer += + sys_queue_buf_size(cfg->input[i].queue_size, + cfg->input[i].token_size); + } + + /* accumulate outut queue buffer sizes */ + size->output_buffer = 0; + for (i = 0; i < cfg->num_output_queues; i++) { + size->output_buffer += + sys_queue_buf_size(cfg->output[i].queue_size, + cfg->output[i].token_size); + } +} + +static void +ia_css_syscom_size_extern( + const struct ia_css_syscom_size_intern *i, + struct ia_css_syscom_size *e) +{ + /* convert syscom internal size struct into external size struct */ + + e->cpu = i->context + i->input_queue + i->output_queue + + i->input_port + i->output_port; + e->shm = i->fw_config + i->input_queue + i->output_queue + i->specific; + e->ibuf = i->input_buffer; + e->obuf = i->output_buffer; +} + +/* Function that provides buffer sizes to be allocated */ +void +ia_css_syscom_size( + const struct ia_css_syscom_config *cfg, + struct ia_css_syscom_size *size) +{ + struct ia_css_syscom_size_intern i; + + ia_css_syscom_size_intern(cfg, &i); + ia_css_syscom_size_extern(&i, size); +} + +static struct ia_css_syscom_context* +ia_css_syscom_assign_buf( + const struct ia_css_syscom_size_intern *i, + const struct ia_css_syscom_buf *buf) +{ + struct ia_css_syscom_context *ctx; + char *cpu_mem_buf; + host_virtual_address_t shm_buf_host; + vied_virtual_address_t shm_buf_cell; + + /* host context */ + cpu_mem_buf = buf->cpu; + + ctx = (struct ia_css_syscom_context *)cpu_mem_buf; + ia_css_cpu_mem_set_zero(ctx, i->context); + cpu_mem_buf += i->context; + + ctx->input_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->input_queue; + + ctx->output_queue = (struct sys_queue *) cpu_mem_buf; + cpu_mem_buf += i->output_queue; + + ctx->send_port = (struct send_port *) cpu_mem_buf; + cpu_mem_buf += i->input_port; + + ctx->recv_port = (struct recv_port *) cpu_mem_buf; + + + /* cell config */ + shm_buf_host = buf->shm_host; + shm_buf_cell = buf->shm_cell; + + ctx->config_host_addr = shm_buf_host; + shm_buf_host += i->fw_config; + ctx->config_vied_addr = shm_buf_cell; + shm_buf_cell += i->fw_config; + + ctx->input_queue_host_addr = shm_buf_host; + shm_buf_host += i->input_queue; + ctx->input_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->input_queue; + + ctx->output_queue_host_addr = shm_buf_host; + shm_buf_host += i->output_queue; + ctx->output_queue_vied_addr = shm_buf_cell; + shm_buf_cell += i->output_queue; + + ctx->specific_host_addr = shm_buf_host; + ctx->specific_vied_addr = shm_buf_cell; + + ctx->ibuf_host_addr = buf->ibuf_host; + ctx->ibuf_vied_addr = buf->ibuf_cell; + + ctx->obuf_host_addr = buf->obuf_host; + ctx->obuf_vied_addr = buf->obuf_cell; + + return ctx; +} + +struct ia_css_syscom_context* +ia_css_syscom_open( + struct ia_css_syscom_config *cfg, + struct ia_css_syscom_buf *buf_extern +) +{ + struct ia_css_syscom_size_intern size_intern; + struct ia_css_syscom_size size; + struct ia_css_syscom_buf buf_intern; + struct ia_css_syscom_buf *buf; + struct ia_css_syscom_context *ctx; + struct ia_css_syscom_config_fw fw_cfg; + unsigned int i; + struct sys_queue_res res; + + IA_CSS_TRACE_0(SYSCOM, INFO, "Entered: ia_css_syscom_open\n"); + + /* error handling */ + if (cfg == NULL) + return NULL; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) start\n", cfg->secure); + + /* check members of cfg: TBD */ + + /* + * Check if SP is in valid state, have to wait if not ready. + * In some platform (Such as VP), it will need more time to wait due to system performance; + * If return NULL without wait for SPC0 ready, Driver load FW will failed + */ + ia_css_cell_wait(cfg->ssid, SPC0); + + ia_css_syscom_size_intern(cfg, &size_intern); + ia_css_syscom_size_extern(&size_intern, &size); + + if (buf_extern) { + /* use externally allocated buffers */ + buf = buf_extern; + } else { + /* use internally allocated buffers */ + buf = &buf_intern; + if (ia_css_syscom_alloc(cfg->ssid, cfg->mmid, &size, buf) != 0) + return NULL; + } + + /* assign buffer pointers */ + ctx = ia_css_syscom_assign_buf(&size_intern, buf); + /* only need to free internally allocated buffers */ + ctx->free_buf = !buf_extern; + + ctx->cell_regs_addr = cfg->regs_addr; + /* regmem is at cell_dmem_addr + REGMEM_OFFSET */ + ctx->cell_dmem_addr = cfg->dmem_addr; + + ctx->num_input_queues = cfg->num_input_queues; + ctx->num_output_queues = cfg->num_output_queues; + + ctx->env.mmid = cfg->mmid; + ctx->env.ssid = cfg->ssid; + ctx->env.mem_addr = cfg->dmem_addr; + + ctx->regmem_idx = SYSCOM_QPR_BASE_REG; + + /* initialize input queues */ + res.reg = SYSCOM_QPR_BASE_REG; + res.host_address = ctx->ibuf_host_addr; + res.vied_address = ctx->ibuf_vied_addr; + for (i = 0; i < cfg->num_input_queues; i++) { + sys_queue_init(ctx->input_queue + i, + cfg->input[i].queue_size, + cfg->input[i].token_size, &res); + } + + /* initialize output queues */ + res.host_address = ctx->obuf_host_addr; + res.vied_address = ctx->obuf_vied_addr; + for (i = 0; i < cfg->num_output_queues; i++) { + sys_queue_init(ctx->output_queue + i, + cfg->output[i].queue_size, + cfg->output[i].token_size, &res); + } + + /* fill shared queue structs */ + shared_memory_store(cfg->mmid, ctx->input_queue_host_addr, + ctx->input_queue, + cfg->num_input_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->input_queue_host_addr), + cfg->num_input_queues * sizeof(struct sys_queue)); + shared_memory_store(cfg->mmid, ctx->output_queue_host_addr, + ctx->output_queue, + cfg->num_output_queues * sizeof(struct sys_queue)); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->output_queue_host_addr), + cfg->num_output_queues * sizeof(struct sys_queue)); + + /* Zero the queue buffers. Is this really needed? */ + shared_memory_zero(cfg->mmid, buf->ibuf_host, size.ibuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->ibuf_host), + size.ibuf); + shared_memory_zero(cfg->mmid, buf->obuf_host, size.obuf); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(buf->obuf_host), + size.obuf); + + /* copy firmware specific data */ + if (cfg->specific_addr && cfg->specific_size) { + shared_memory_store(cfg->mmid, ctx->specific_host_addr, + cfg->specific_addr, cfg->specific_size); + ia_css_cpu_mem_cache_flush( + (void *)HOST_ADDRESS(ctx->specific_host_addr), + cfg->specific_size); + } + + fw_cfg.num_input_queues = cfg->num_input_queues; + fw_cfg.num_output_queues = cfg->num_output_queues; + fw_cfg.input_queue = ctx->input_queue_vied_addr; + fw_cfg.output_queue = ctx->output_queue_vied_addr; + fw_cfg.specific_addr = ctx->specific_vied_addr; + fw_cfg.specific_size = cfg->specific_size; + + shared_memory_store(cfg->mmid, ctx->config_host_addr, + &fw_cfg, sizeof(struct ia_css_syscom_config_fw)); + ia_css_cpu_mem_cache_flush((void *)HOST_ADDRESS(ctx->config_host_addr), + sizeof(struct ia_css_syscom_config_fw)); + +#if !HAS_DUAL_CMD_CTX_SUPPORT + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, cfg->ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, cfg->ssid); + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_open store CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, cfg->ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, cfg->ssid); +#endif + + /* Indicate if ctx is created for secure stream purpose */ + ctx->secure = cfg->secure; + + IA_CSS_TRACE_1(SYSCOM, INFO, "ia_css_syscom_open (secure %d) completed\n", cfg->secure); + return ctx; +} + + +int +ia_css_syscom_close( + struct ia_css_syscom_context *ctx +) { + int state; + + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle close request yet */ + return FW_ERROR_BUSY; + } + + /* set close request flag */ + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_INACTIVE, ctx->env.ssid); + + return 0; +} + +static void +ia_css_syscom_free(struct ia_css_syscom_context *ctx) +{ + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->ibuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->ibuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, ctx->obuf_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->obuf_host_addr); + shared_memory_unmap(ctx->env.ssid, ctx->env.mmid, + ctx->config_vied_addr); + shared_memory_free(ctx->env.mmid, ctx->config_host_addr); + ia_css_cpu_mem_free(ctx); +} + +int +ia_css_syscom_release( + struct ia_css_syscom_context *ctx, + unsigned int force +) { + /* check if release is forced, an verify cell state if it is not */ + if (!force) { + if (!ia_css_cell_is_ready(ctx->env.ssid, SPC0)) + return FW_ERROR_BUSY; + } + + /* Reset the regmem idx */ + ctx->regmem_idx = 0; + + if (ctx->free_buf) + ia_css_syscom_free(ctx); + + return 0; +} + +int ia_css_syscom_send_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + send_port_open(ctx->send_port + port, + ctx->input_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_send_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +int ia_css_syscom_send_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_available(ctx->send_port + port); +} + +int ia_css_syscom_send_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + const void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_input_queues, FW_ERROR_INVALID_PARAMETER); + + return send_port_transfer(ctx->send_port + port, token); +} + +int ia_css_syscom_recv_port_open( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + int state; + + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + /* check if SP syscom is ready to open the queue */ + state = regmem_load_32(ctx->cell_dmem_addr, + SYSCOM_STATE_REG, ctx->env.ssid); + if (state != SYSCOM_STATE_READY) { + /* SPC is not ready to handle messages yet */ + return FW_ERROR_BUSY; + } + + /* initialize the port */ + recv_port_open(ctx->recv_port + port, + ctx->output_queue + port, &(ctx->env)); + + return 0; +} + +int ia_css_syscom_recv_port_close( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check parameters */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return 0; +} + +/* + * Get the number of responses in the response queue + */ +int +ia_css_syscom_recv_port_available( + struct ia_css_syscom_context *ctx, + unsigned int port +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_available(ctx->recv_port + port); +} + + +/* + * Dequeue the head of the response queue + * returns an error when the response queue is empty + */ +int +ia_css_syscom_recv_port_transfer( + struct ia_css_syscom_context *ctx, + unsigned int port, + void *token +) +{ + /* check params */ + verifret(ctx != NULL, FW_ERROR_BAD_ADDRESS); + verifret(port < ctx->num_output_queues, FW_ERROR_INVALID_PARAMETER); + + return recv_port_transfer(ctx->recv_port + port, token); +} + +#if HAS_DUAL_CMD_CTX_SUPPORT +/* + * store subsystem context information in DMEM + */ +int +ia_css_syscom_store_dmem( + struct ia_css_syscom_context *ctx, + unsigned int ssid, + unsigned int vtl0_addr_mask +) +{ + unsigned int read_back; + + NOT_USED(vtl0_addr_mask); + NOT_USED(read_back); + + if (ctx->secure) { + /* store VTL0 address mask in 'secure' context */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem VTL0_ADDR_MASK (%#x) @ dmem_addr %#x ssid %d\n", + vtl0_addr_mask, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_VTL0_ADDR_MASK, vtl0_addr_mask, ssid); + } + /* store firmware configuration address */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem CONFIG_REG (%#x) @ dmem_addr %#x ssid %d\n", + ctx->config_vied_addr, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_CONFIG_REG, + ctx->config_vied_addr, ssid); + /* store syscom uninitialized state */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem STATE_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_STATE_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_STATE_REG, + SYSCOM_STATE_UNINIT, ssid); + /* store syscom uninitialized command */ + IA_CSS_TRACE_3(SYSCOM, INFO, "ia_css_syscom_store_dmem COMMAND_REG (%#x) @ dmem_addr %#x ssid %d\n", + SYSCOM_COMMAND_UNINIT, ctx->cell_dmem_addr, ssid); + regmem_store_32(ctx->cell_dmem_addr, SYSCOM_COMMAND_REG, + SYSCOM_COMMAND_UNINIT, ssid); + + return 0; +} + +/* + * store truslet configuration status setting + */ +void +ia_css_syscom_set_trustlet_status( + unsigned int dmem_addr, + unsigned int ssid, + bool trustlet_exist +) +{ + unsigned int value; + + value = trustlet_exist ? TRUSTLET_EXIST : TRUSTLET_NOT_EXIST; + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_set_trustlet_status TRUSTLET_STATUS (%#x) @ dmem_addr %#x ssid %d\n", + value, dmem_addr, ssid); + regmem_store_32(dmem_addr, TRUSTLET_STATUS, value, ssid); +} + +/* + * check if SPC access blocker programming is completed + */ +bool +ia_css_syscom_is_ab_spc_ready( + struct ia_css_syscom_context *ctx +) +{ + unsigned int value; + + /* We only expect the call from non-secure context only */ + if (ctx->secure) { + IA_CSS_TRACE_0(SYSCOM, ERROR, "ia_css_syscom_is_spc_ab_ready - Please call from non-secure context\n"); + return false; + } + + value = regmem_load_32(ctx->cell_dmem_addr, AB_SPC_STATUS, ctx->env.ssid); + IA_CSS_TRACE_3(SYSCOM, INFO, + "ia_css_syscom_is_spc_ab_ready AB_SPC_STATUS @ dmem_addr %#x ssid %d - value %#x\n", + ctx->cell_dmem_addr, ctx->env.ssid, value); + + return (value == AB_SPC_READY); +} +#endif /* HAS_DUAL_CMD_CTX_SUPPORT */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h new file mode 100644 index 000000000000..0cacd5a34934 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_config_fw.h @@ -0,0 +1,69 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONFIG_FW_H +#define __IA_CSS_SYSCOM_CONFIG_FW_H + +#include "type_support.h" + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_STATE_UNINIT = 0x57A7E000, + /* SP Syscom sets this when it is ready for use */ + SYSCOM_STATE_READY = 0x57A7E001, + /* SP Syscom sets this when no more syscom accesses will happen */ + SYSCOM_STATE_INACTIVE = 0x57A7E002 +}; + +enum { + /* Program load or explicit host setting should init to this */ + SYSCOM_COMMAND_UNINIT = 0x57A7F000, + /* Host Syscom requests syscom to become inactive */ + SYSCOM_COMMAND_INACTIVE = 0x57A7F001 +}; + +#if HAS_DUAL_CMD_CTX_SUPPORT +enum { + /* Program load or explicit host setting should init to this */ + TRUSTLET_UNINIT = 0x57A8E000, + /* Host Syscom informs SP that Trustlet exists */ + TRUSTLET_EXIST = 0x57A8E001, + /* Host Syscom informs SP that Trustlet does not exist */ + TRUSTLET_NOT_EXIST = 0x57A8E002 +}; + +enum { + /* Program load or explicit setting initialized by SP */ + AB_SPC_NOT_READY = 0x57A8F000, + /* SP informs host that SPC access programming is completed */ + AB_SPC_READY = 0x57A8F001 +}; +#endif + +/* firmware config: data that sent from the host to SP via DDR */ +/* Cell copies data into a context */ + +struct ia_css_syscom_config_fw { + unsigned int firmware_address; + + unsigned int num_input_queues; + unsigned int num_output_queues; + unsigned int input_queue; /* hmm_ptr / struct queue* */ + unsigned int output_queue; /* hmm_ptr / struct queue* */ + + unsigned int specific_addr; /* vied virtual address */ + unsigned int specific_size; +}; + +#endif /* __IA_CSS_SYSCOM_CONFIG_FW_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h new file mode 100644 index 000000000000..ecf22f6b7ac5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/src/ia_css_syscom_context.h @@ -0,0 +1,65 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_SYSCOM_CONTEXT_H +#define __IA_CSS_SYSCOM_CONTEXT_H + +#include + +#include "port_env_struct.h" +#include + +/* host context */ +struct ia_css_syscom_context { + vied_virtual_address_t cell_firmware_addr; + unsigned int cell_regs_addr; + unsigned int cell_dmem_addr; + + struct port_env env; + + unsigned int num_input_queues; + unsigned int num_output_queues; + + /* array of input queues (from host to SP) */ + struct sys_queue *input_queue; + /* array of output queues (from SP to host) */ + struct sys_queue *output_queue; + + struct send_port *send_port; + struct recv_port *recv_port; + + unsigned int regmem_idx; + unsigned int free_buf; + + host_virtual_address_t config_host_addr; + host_virtual_address_t input_queue_host_addr; + host_virtual_address_t output_queue_host_addr; + host_virtual_address_t specific_host_addr; + host_virtual_address_t ibuf_host_addr; + host_virtual_address_t obuf_host_addr; + + vied_virtual_address_t config_vied_addr; + vied_virtual_address_t input_queue_vied_addr; + vied_virtual_address_t output_queue_vied_addr; + vied_virtual_address_t specific_vied_addr; + vied_virtual_address_t ibuf_vied_addr; + vied_virtual_address_t obuf_vied_addr; + + /* if true; secure syscom object as in VTIO Case + * if false, non-secure syscom + */ + bool secure; +}; + +#endif /* __IA_CSS_SYSCOM_CONTEXT_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/syscom.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/syscom.mk new file mode 100644 index 000000000000..8d36b8928af5 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/syscom/syscom.mk @@ -0,0 +1,42 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is SYSCOM + +SYSCOM_DIR=$${MODULES_DIR}/syscom + +SYSCOM_INTERFACE=$(SYSCOM_DIR)/interface +SYSCOM_SOURCES1=$(SYSCOM_DIR)/src + +SYSCOM_HOST_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom.c + +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_HOST_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_HOST_CPPFLAGS += -I$${MODULES_DIR}/devices +ifdef REGMEM_SECURE_OFFSET +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_HOST_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif + +SYSCOM_FW_FILES += $(SYSCOM_SOURCES1)/ia_css_syscom_fw.c + +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_INTERFACE) +SYSCOM_FW_CPPFLAGS += -I$(SYSCOM_SOURCES1) +SYSCOM_FW_CPPFLAGS += -DREGMEM_OFFSET=$(REGMEM_OFFSET) +ifdef REGMEM_SECURE_OFFSET +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=$(REGMEM_SECURE_OFFSET) +else +SYSCOM_FW_CPPFLAGS += -DREGMEM_SECURE_OFFSET=0 +endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/interface/ia_css_trace.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/interface/ia_css_trace.h new file mode 100644 index 000000000000..b85b1810f107 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/interface/ia_css_trace.h @@ -0,0 +1,883 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +/*! \file */ + +#ifndef __IA_CSS_TRACE_H +#define __IA_CSS_TRACE_H + +/* +** Configurations +*/ + +/** + * STEP 1: Define {Module Name}_TRACE_METHOD to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * + * Example: + * #define NCI_DMA_TRACE_METHOD IA_CSS_TRACE_METHOD_NATIVE + */ + +/**< Use whatever method of tracing that best suits the platform + * this code is compiled for. + */ +#define IA_CSS_TRACE_METHOD_NATIVE 1 +/**< Use the Tracing NCI. */ +#define IA_CSS_TRACE_METHOD_TRACE 2 + +/** + * STEP 2: Define {Module Name}_TRACE_LEVEL_{Level} to one of the following. + * Where: + * {Module Name} is the name of the targeted module. + * {Level}, in decreasing order of severity, is one of the + * following values: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * + * Example: + * #define NCI_DMA_TRACE_LEVEL_ASSERT IA_CSS_TRACE_LEVEL_DISABLED + * #define NCI_DMA_TRACE_LEVEL_ERROR IA_CSS_TRACE_LEVEL_ENABLED + */ +/**< Disables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_DISABLED 0 +/**< Enables the corresponding trace level. */ +#define IA_CSS_TRACE_LEVEL_ENABLED 1 + +/* + * Used in macro definition with do-while loop + * for removing checkpatch warnings + */ +#define IA_CSS_TRACE_FILE_DUMMY_DEFINE + +/** + * STEP 3: Define IA_CSS_TRACE_PRINT_FILE_LINE to have file name and + * line printed with every log message. + * + * Example: + * #define IA_CSS_TRACE_PRINT_FILE_LINE + */ + +/* +** Interface +*/ + +/* +** Static +*/ + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * at compile-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_0(module, severity, format) \ + IA_CSS_TRACE_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_1(module, severity, format, a1) \ + IA_CSS_TRACE_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_IMPL(module, 5, severity, format, a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_6(module, severity, format, a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_IMPL(module, 6, severity, format, a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled at compile-time. + * @see IA_CSS_TRACE_0 + */ +#define IA_CSS_TRACE_7(module, severity, format, a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Dynamic +*/ + +/** +* Declares, but does not define, dynamic tracing functions and variables +* for module \p module. For each module, place an instance of this macro +* in the compilation unit in which you want to use dynamic tracing facility +* so as to inform the compiler of the declaration of the available functions. +* An invocation of this function does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DEFINE +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) +/** +* Declares the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) + +/** +* Defines dynamic tracing functions and variables for module \p module. +* For each module, place an instance of this macro in one, and only one, +* of your SOURCE files so as to allow the linker resolve the related symbols. +* An invocation of this macro does not enable any of the available tracing +* levels. Do not place a semicolon after a call to this macro. +* @see IA_CSS_TRACE_DYNAMIC_DECLARE +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) +/** +* Defines the configuration function for the dynamic api seperatly, if one +* wants to use it. +*/ +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC(module) \ + IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) + +/** + * Logs a message with zero arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @param module The targeted module. + * @param severity The severity level of the trace message. In decreasing order: + * {ASSERT, ERROR, WARNING, INFO, DEBUG, VERBOSE}. + * @param format The message to be traced. + */ +#define IA_CSS_TRACE_DYNAMIC_0(module, severity, format) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 0, severity, format) + +/** + * Logs a message with one argument if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_1(module, severity, format, a1) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 1, severity, format, a1) + +/** + * Logs a message with two arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_2(module, severity, format, a1, a2) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 2, severity, format, a1, a2) + +/** + * Logs a message with three arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_3(module, severity, format, a1, a2, a3) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 3, severity, format, a1, a2, a3) + +/** + * Logs a message with four arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_4(module, severity, format, a1, a2, a3, a4) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 4, severity, format, a1, a2, a3, a4) + +/** + * Logs a message with five arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_5(module, severity, format, a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 5, severity, format, \ + a1, a2, a3, a4, a5) + +/** + * Logs a message with six arguments if the targeted severity level is enabled + * both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_6(module, severity, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 6, severity, format, \ + a1, a2, a3, a4, a5, a6) + +/** + * Logs a message with seven arguments if the targeted severity level + * is enabled both at compile-time, and run-time. + * @see IA_CSS_TRACE_DYNAMIC_0 + */ +#define IA_CSS_TRACE_DYNAMIC_7(module, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_DYNAMIC_IMPL(module, 7, severity, format, \ + a1, a2, a3, a4, a5, a6, a7) + +/* +** Implementation +*/ + +/* CAT */ +#define IA_CSS_TRACE_CAT_IMPL(a, b) a ## b +#define IA_CSS_TRACE_CAT(a, b) IA_CSS_TRACE_CAT_IMPL(a, b) + +/* Bridge */ +#if defined(__HIVECC) || defined(__GNUC__) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, arguments ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + ## arguments \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, \ + arguments ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + ## arguments); \ + } \ + } while (0) +#elif defined(_MSC_VER) +#define IA_CSS_TRACE_IMPL(module, argument_count, severity, ...) \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_, \ + argument_count \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_LEVEL_ \ + ), \ + severity \ + ) \ + ( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_CAT( \ + IA_CSS_TRACE_SEVERITY_, \ + severity \ + ), \ + _ \ + ), \ + IA_CSS_TRACE_CAT( \ + module, \ + _TRACE_METHOD \ + ) \ + ), \ + #module, \ + __VA_ARGS__ \ + ) \ + ) + +/* Bridge */ +#define IA_CSS_TRACE_DYNAMIC_IMPL(module, argument_count, severity, ...) \ + do { \ + if (IA_CSS_TRACE_CAT(IA_CSS_TRACE_CAT(module, _trace_level_), \ + severity)) { \ + IA_CSS_TRACE_IMPL(module, argument_count, severity, \ + __VA_ARGS__); \ + } \ + } while (0) +#endif + +/* +** Native Backend +*/ + +#if defined(__HIVECC) + #define IA_CSS_TRACE_PLATFORM_CELL +#elif defined(__GNUC__) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, \ + format), ## arguments); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, arguments ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, \ + format), ## arguments); \ + } while (0) + +#elif defined(_MSC_VER) + #define IA_CSS_TRACE_PLATFORM_HOST + + #define IA_CSS_TRACE_NATIVE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) + /* TODO: In case Host Side tracing is needed to be mapped to the + * Tunit, the following "IA_CSS_TRACE_TRACE" needs to be modified from + * PRINT to vied_nci_tunit_print function calls + */ + #define IA_CSS_TRACE_TRACE(severity, module, format, ...) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + PRINT(IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, \ + module, format), __VA_ARGS__); \ + } while (0) +#else + #error Unsupported platform! +#endif /* Platform */ + +#if defined(IA_CSS_TRACE_PLATFORM_CELL) + #include /* VOLATILE */ + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + do { \ + OP___printstring(__FILE__":") VOLATILE; \ + OP___printdec(__LINE__) VOLATILE; \ + OP___printstring("\n") VOLATILE; \ + } while (0) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + OP___printstring("["module"]:["severity"]:") \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_MSG_NATIVE(severity, module, format) \ + do { \ + IA_CSS_TRACE_FILE_PRINT_COMMAND; \ + OP___printstring("["module"]:["severity"]: "format) \ + VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_ARG_NATIVE(module, severity, i, value) \ + do { \ + IA_CSS_TRACE_MODULE_SEVERITY_PRINT(module, severity); \ + OP___dump(i, value) VOLATILE; \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + } while (0) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + do { \ + IA_CSS_TRACE_MSG_NATIVE(severity, module, format); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 1, a1); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 2, a2); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 3, a3); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 4, a4); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 5, a5); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 6, a6); \ + IA_CSS_TRACE_ARG_NATIVE(module, severity, 7, a7); \ + } while (0) + /* + ** Tracing Backend + */ +#if !defined(HRT_CSIM) && !defined(NO_TUNIT) + #include "vied_nci_tunit.h" +#endif + #define IA_CSS_TRACE_AUG_FORMAT_TRACE(format, module) \ + "[" module "]" format " : PID = %x : Timestamp = %d : PC = %x" + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + vied_nci_tunit_print(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + vied_nci_tunit_print1i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + vied_nci_tunit_print2i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + vied_nci_tunit_print3i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, a1, a2, a3, a4) \ + vied_nci_tunit_print4i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + vied_nci_tunit_print5i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + vied_nci_tunit_print6i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + vied_nci_tunit_print7i(IA_CSS_TRACE_AUG_FORMAT_TRACE(format, \ + module), \ + severity, a1, a2, a3, a4, a5, a6, a7) + +#elif defined(IA_CSS_TRACE_PLATFORM_HOST) + #include "print_support.h" + + #ifdef IA_CSS_TRACE_PRINT_FILE_LINE + #define IA_CSS_TRACE_FILE_PRINT_COMMAND \ + PRINT("%s:%d:\n", __FILE__, __LINE__) + #else + #define IA_CSS_TRACE_FILE_PRINT_COMMAND + #endif + + #define IA_CSS_TRACE_FORMAT_AUG_NATIVE(severity, module, format) \ + "[" module "]:[" severity "]: " format + + #define IA_CSS_TRACE_NATIVE_0(severity, module, format) \ + IA_CSS_TRACE_NATIVE(severity, module, format) + + #define IA_CSS_TRACE_NATIVE_1(severity, module, format, a1) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1) + + #define IA_CSS_TRACE_NATIVE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_NATIVE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_NATIVE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_NATIVE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_NATIVE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_NATIVE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_NATIVE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_NATIVE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) + + #define IA_CSS_TRACE_FORMAT_AUG_TRACE(severity, module, format) \ + "["module"]:["severity"]: "format + + #define IA_CSS_TRACE_TRACE_0(severity, module, format) \ + IA_CSS_TRACE_TRACE(severity, module, format) + + #define IA_CSS_TRACE_TRACE_1(severity, module, format, a1) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1) + + #define IA_CSS_TRACE_TRACE_2(severity, module, format, a1, a2) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2) + + #define IA_CSS_TRACE_TRACE_3(severity, module, format, a1, a2, a3) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3) + + #define IA_CSS_TRACE_TRACE_4(severity, module, format, \ + a1, a2, a3, a4) \ + IA_CSS_TRACE_TRACE(severity, module, format, a1, a2, a3, a4) + + #define IA_CSS_TRACE_TRACE_5(severity, module, format, \ + a1, a2, a3, a4, a5) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5) + + #define IA_CSS_TRACE_TRACE_6(severity, module, format, \ + a1, a2, a3, a4, a5, a6) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6) + + #define IA_CSS_TRACE_TRACE_7(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) \ + IA_CSS_TRACE_TRACE(severity, module, format, \ + a1, a2, a3, a4, a5, a6, a7) +#endif + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_0(severity, module, format) +#define IA_CSS_TRACE_1_1_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_1_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_1_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_1_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_1_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_1_1 IA_CSS_TRACE_NATIVE_0 +#define IA_CSS_TRACE_1_1_1 IA_CSS_TRACE_NATIVE_1 +#define IA_CSS_TRACE_2_1_1 IA_CSS_TRACE_NATIVE_2 +#define IA_CSS_TRACE_3_1_1 IA_CSS_TRACE_NATIVE_3 +#define IA_CSS_TRACE_4_1_1 IA_CSS_TRACE_NATIVE_4 +#define IA_CSS_TRACE_5_1_1 IA_CSS_TRACE_NATIVE_5 +#define IA_CSS_TRACE_6_1_1 IA_CSS_TRACE_NATIVE_6 +#define IA_CSS_TRACE_7_1_1 IA_CSS_TRACE_NATIVE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_1 "Assert" +#define IA_CSS_TRACE_SEVERITY_ERROR_1 "Error" +#define IA_CSS_TRACE_SEVERITY_WARNING_1 "Warning" +#define IA_CSS_TRACE_SEVERITY_INFO_1 "Info" +#define IA_CSS_TRACE_SEVERITY_DEBUG_1 "Debug" +#define IA_CSS_TRACE_SEVERITY_VERBOSE_1 "Verbose" + +/* Disabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_0(severity, module, format) +#define IA_CSS_TRACE_1_2_0(severity, module, format, arg1) +#define IA_CSS_TRACE_2_2_0(severity, module, format, arg1, arg2) +#define IA_CSS_TRACE_3_2_0(severity, module, format, arg1, arg2, arg3) +#define IA_CSS_TRACE_4_2_0(severity, module, format, arg1, arg2, arg3, arg4) +#define IA_CSS_TRACE_5_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5) +#define IA_CSS_TRACE_6_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6) +#define IA_CSS_TRACE_7_2_0(severity, module, format, arg1, arg2, arg3, arg4, \ + arg5, arg6, arg7) + +/* Enabled */ +/* Legend: IA_CSS_TRACE_{Argument Count}_{Backend ID}_{Enabled} */ +#define IA_CSS_TRACE_0_2_1 IA_CSS_TRACE_TRACE_0 +#define IA_CSS_TRACE_1_2_1 IA_CSS_TRACE_TRACE_1 +#define IA_CSS_TRACE_2_2_1 IA_CSS_TRACE_TRACE_2 +#define IA_CSS_TRACE_3_2_1 IA_CSS_TRACE_TRACE_3 +#define IA_CSS_TRACE_4_2_1 IA_CSS_TRACE_TRACE_4 +#define IA_CSS_TRACE_5_2_1 IA_CSS_TRACE_TRACE_5 +#define IA_CSS_TRACE_6_2_1 IA_CSS_TRACE_TRACE_6 +#define IA_CSS_TRACE_7_2_1 IA_CSS_TRACE_TRACE_7 + +/* Enabled */ +/* Legend: IA_CSS_TRACE_SEVERITY_{Severity Level}_{Backend ID} */ +#define IA_CSS_TRACE_SEVERITY_ASSERT_2 VIED_NCI_TUNIT_MSG_SEVERITY_FATAL +#define IA_CSS_TRACE_SEVERITY_ERROR_2 VIED_NCI_TUNIT_MSG_SEVERITY_ERROR +#define IA_CSS_TRACE_SEVERITY_WARNING_2 VIED_NCI_TUNIT_MSG_SEVERITY_WARNING +#define IA_CSS_TRACE_SEVERITY_INFO_2 VIED_NCI_TUNIT_MSG_SEVERITY_NORMAL +#define IA_CSS_TRACE_SEVERITY_DEBUG_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER1 +#define IA_CSS_TRACE_SEVERITY_VERBOSE_2 VIED_NCI_TUNIT_MSG_SEVERITY_USER2 + +/* +** Dynamicism +*/ + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_IMPL(module) \ + do { \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void); \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void); \ + } while (0) + +#define IA_CSS_TRACE_DYNAMIC_DECLARE_CONFIG_FUNC_IMPL(module) \ + do { \ + IA_CSS_TRACE_FILE_DUMMY_DEFINE; \ + void IA_CSS_TRACE_CAT(module, _trace_configure)\ + (int argc, const char *const *argv); \ + } while (0) + +#include "platform_support.h" +#include "type_support.h" + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_IMPL(module) \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_assert); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_error); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_warning); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_info); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_debug); \ + static uint8_t IA_CSS_TRACE_CAT(module, _trace_level_verbose); \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_assert_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_assert) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_error_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_error) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_warning_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_warning) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_info_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_info) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_debug_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_debug) = 0; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_enable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 1; \ + } \ + \ + void IA_CSS_TRACE_CAT(module, _trace_verbose_disable)(void) \ + { \ + IA_CSS_TRACE_CAT(module, _trace_level_verbose) = 0; \ + } + +#define IA_CSS_TRACE_DYNAMIC_DEFINE_CONFIG_FUNC_IMPL(module) \ +void IA_CSS_TRACE_CAT(module, _trace_configure)(const int argc, \ + const char *const *const argv) \ +{ \ + int i = 1; \ + const char *levels = 0; \ + \ + while (i < argc) { \ + if (!strcmp(argv[i], "-" #module "_trace")) { \ + ++i; \ + \ + if (i < argc) { \ + levels = argv[i]; \ + \ + while (*levels) { \ + switch (*levels++) { \ + case 'a': \ + IA_CSS_TRACE_CAT \ + (module, _trace_assert_enable)(); \ + break; \ + \ + case 'e': \ + IA_CSS_TRACE_CAT \ + (module, _trace_error_enable)(); \ + break; \ + \ + case 'w': \ + IA_CSS_TRACE_CAT \ + (module, _trace_warning_enable)(); \ + break; \ + \ + case 'i': \ + IA_CSS_TRACE_CAT \ + (module, _trace_info_enable)(); \ + break; \ + \ + case 'd': \ + IA_CSS_TRACE_CAT \ + (module, _trace_debug_enable)(); \ + break; \ + \ + case 'v': \ + IA_CSS_TRACE_CAT \ + (module, _trace_verbose_enable)(); \ + break; \ + \ + default: \ + } \ + } \ + } \ + } \ + \ + ++i; \ + } \ +} + +#endif /* __IA_CSS_TRACE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/trace.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/trace.mk new file mode 100644 index 000000000000..b232880b882b --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/trace/trace.mk @@ -0,0 +1,40 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE Trace + +# Dependencies +IA_CSS_TRACE_SUPPORT = $${MODULES_DIR}/support + +# API +IA_CSS_TRACE = $${MODULES_DIR}/trace +IA_CSS_TRACE_INTERFACE = $(IA_CSS_TRACE)/interface + +# +# Host +# + +# Host CPP Flags +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_HOST_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules + +# +# Firmware +# + +# Firmware CPP Flags +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_SUPPORT) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE_INTERFACE) +IA_CSS_TRACE_FW_CPPFLAGS += -I$(IA_CSS_TRACE)/trace_modules diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_access.h new file mode 100644 index 000000000000..1e81bad9f4ee --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_access.h @@ -0,0 +1,139 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_ACCESS_H +#define _SHARED_MEMORY_ACCESS_H + +#include +#include +#include + +typedef enum { + sm_esuccess, + sm_enomem, + sm_ezeroalloc, + sm_ebadvaddr, + sm_einternalerror, + sm_ecorruption, + sm_enocontiguousmem, + sm_enolocmem, + sm_emultiplefree, +} shared_memory_error; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the VIED subsystem + */ +typedef uint32_t vied_virtual_address_t; + +/** + * \brief Virtual address of (DDR) shared memory space as seen from the host + */ +typedef unsigned long long host_virtual_address_t; + +/** + * \brief List of physical addresses of (DDR) shared memory space. This is used to represent a list of physical pages. + */ +typedef struct shared_memory_physical_page_list_s *shared_memory_physical_page_list; +typedef struct shared_memory_physical_page_list_s +{ + shared_memory_physical_page_list next; + vied_physical_address_t address; +}shared_memory_physical_page_list_s; + + +/** + * \brief Initialize the shared memory interface administration on the host. + * \param idm: id of ddr memory + * \param host_ddr_addr: physical address of memory as seen from host + * \param memory_size: size of ddr memory in bytes + * \param ps: size of page in bytes (for instance 4096) + */ +int shared_memory_allocation_initialize(vied_memory_t idm, vied_physical_address_t host_ddr_addr, size_t memory_size, size_t ps); + +/** + * \brief De-initialize the shared memory interface administration on the host. + * + */ +void shared_memory_allocation_uninitialize(vied_memory_t idm); + +/** + * \brief Allocate (DDR) shared memory space and return a host virtual address. Returns NULL when insufficient memory available + */ +host_virtual_address_t shared_memory_alloc(vied_memory_t idm, size_t bytes); + +/** + * \brief Free (DDR) shared memory space. +*/ +void shared_memory_free(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Translate a virtual host.address to a physical address. +*/ +vied_physical_address_t shared_memory_virtual_host_to_physical_address (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Return the allocated physical pages for a virtual host.address. +*/ +shared_memory_physical_page_list shared_memory_virtual_host_to_physical_pages (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Destroy a shared_memory_physical_page_list. +*/ +void shared_memory_physical_pages_list_destroy (shared_memory_physical_page_list ppl); + +/** + * \brief Store a byte into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_8 (vied_memory_t idm, host_virtual_address_t addr, uint8_t data); + +/** + * \brief Store a 16-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_16(vied_memory_t idm, host_virtual_address_t addr, uint16_t data); + +/** + * \brief Store a 32-bit word into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store_32(vied_memory_t idm, host_virtual_address_t addr, uint32_t data); + +/** + * \brief Store a number of bytes into (DDR) shared memory space using a host virtual address + */ +void shared_memory_store(vied_memory_t idm, host_virtual_address_t addr, const void *data, size_t bytes); + +/** + * \brief Set a number of bytes of (DDR) shared memory space to 0 using a host virtual address + */ +void shared_memory_zero(vied_memory_t idm, host_virtual_address_t addr, size_t bytes); + +/** + * \brief Load a byte from (DDR) shared memory space using a host virtual address + */ +uint8_t shared_memory_load_8 (vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 16-bit word from (DDR) shared memory space using a host virtual address + */ +uint16_t shared_memory_load_16(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a 32-bit word from (DDR) shared memory space using a host virtual address + */ +uint32_t shared_memory_load_32(vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Load a number of bytes from (DDR) shared memory space using a host virtual address + */ +void shared_memory_load(vied_memory_t idm, host_virtual_address_t addr, void *data, size_t bytes); + +#endif /* _SHARED_MEMORY_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_map.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_map.h new file mode 100644 index 000000000000..1bbedcf9e7fd --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/shared_memory_map.h @@ -0,0 +1,53 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _SHARED_MEMORY_MAP_H +#define _SHARED_MEMORY_MAP_H + +#include +#include +#include + +typedef void (*shared_memory_invalidate_mmu_tlb)(void); +typedef void (*shared_memory_set_page_table_base_address)(vied_physical_address_t); + +typedef void (*shared_memory_invalidate_mmu_tlb_ssid)(vied_subsystem_t id); +typedef void (*shared_memory_set_page_table_base_address_ssid)(vied_subsystem_t id, vied_physical_address_t); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will NOT be taken into account. +*/ +int shared_memory_map_initialize(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb inv_tlb, shared_memory_set_page_table_base_address sbt); + +/** + * \brief Initialize the CSS virtual address system and MMU. The subsystem id will be taken into account. +*/ +int shared_memory_map_initialize_ssid(vied_subsystem_t id, vied_memory_t idm, size_t mmu_ps, size_t mmu_pnrs, vied_physical_address_t ddr_addr, shared_memory_invalidate_mmu_tlb_ssid inv_tlb, shared_memory_set_page_table_base_address_ssid sbt); + +/** + * \brief De-initialize the CSS virtual address system and MMU. +*/ +void shared_memory_map_uninitialize(vied_subsystem_t id, vied_memory_t idm); + +/** + * \brief Convert a host virtual address to a CSS virtual address and update the MMU. +*/ +vied_virtual_address_t shared_memory_map(vied_subsystem_t id, vied_memory_t idm, host_virtual_address_t addr); + +/** + * \brief Free a CSS virtual address and update the MMU. +*/ +void shared_memory_unmap(vied_subsystem_t id, vied_memory_t idm, vied_virtual_address_t addr); + + +#endif /* _SHARED_MEMORY_MAP_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_config.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_config.h new file mode 100644 index 000000000000..912f016ead24 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_config.h @@ -0,0 +1,33 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_CONFIG_H +#define _HRT_VIED_CONFIG_H + +/* Defines from the compiler: + * HRT_HOST - this is code running on the host + * HRT_CELL - this is code running on a cell + */ +#ifdef HRT_HOST +# define CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL 1 +# undef CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL + +#elif defined (HRT_CELL) +# undef CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL +# define CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL 1 + +#else /* !HRT_CELL */ +/* Allow neither HRT_HOST nor HRT_CELL for testing purposes */ +#endif /* !HRT_CELL */ + +#endif /* _HRT_VIED_CONFIG_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h new file mode 100644 index 000000000000..0b44492789e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_memory_access_types.h @@ -0,0 +1,36 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_MEMORY_ACCESS_TYPES_H +#define _HRT_VIED_MEMORY_ACCESS_TYPES_H + +/** Types for the VIED memory access interface */ + +#include "vied_types.h" + +/** + * \brief An identifier for a system memory. + * + * This identifier must be a compile-time constant. It is used in + * access to system memory. + */ +typedef unsigned int vied_memory_t; + +#ifndef __HIVECC +/** + * \brief The type for a physical address + */ +typedef unsigned long long vied_physical_address_t; +#endif + +#endif /* _HRT_VIED_MEMORY_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h new file mode 100644 index 000000000000..674f5fb5b0f9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access.h @@ -0,0 +1,70 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_H + +#include +#include "vied_config.h" +#include "vied_subsystem_access_types.h" + +#if !defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) && \ + !defined(CFG_VIED_SUBSYSTEM_ACCESS_LIB_IMPL) +#error Implementation selection macro for vied subsystem access not defined +#endif + +#if defined(CFG_VIED_SUBSYSTEM_ACCESS_INLINE_IMPL) +#ifndef __HIVECC +#error "Inline implementation of subsystem access not supported for host" +#endif +#define _VIED_SUBSYSTEM_ACCESS_INLINE static __inline +#include "vied_subsystem_access_impl.h" +#else +#define _VIED_SUBSYSTEM_ACCESS_INLINE +#endif + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr, uint8_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_16(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint16_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store_32(vied_subsystem_t dev, + vied_subsystem_address_t addr, uint32_t data); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_store(vied_subsystem_t dev, + vied_subsystem_address_t addr, + const void *data, unsigned int size); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint8_t vied_subsystem_load_8 (vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint16_t vied_subsystem_load_16(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +uint32_t vied_subsystem_load_32(vied_subsystem_t dev, + vied_subsystem_address_t addr); + +_VIED_SUBSYSTEM_ACCESS_INLINE +void vied_subsystem_load(vied_subsystem_t dev, + vied_subsystem_address_t addr, + void *data, unsigned int size); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h new file mode 100644 index 000000000000..81f4d08d5ae0 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_initialization.h @@ -0,0 +1,44 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H + +#include "vied_subsystem_access_types.h" + +/** @brief Initialises the access of a subsystem. + * @param[in] system The subsystem for which the access has to be initialised. + * + * vied_subsystem_access_initialize initilalises the access a subsystem. + * It sets the base address of the subsystem. This base address is extracted from the hsd file. + * + */ +void +vied_subsystem_access_initialize(vied_subsystem_t system); + + +/** @brief Initialises the access of multiple subsystems. + * @param[in] nr _subsystems The number of subsystems for which the access has to be initialised. + * @param[in] dev_base_addresses A pointer to an array of base addresses of subsystems. + * The size of this array must be "nr_subsystems". + * This array must be available during the accesses of the subsystem. + * + * vied_subsystems_access_initialize initilalises the access to multiple subsystems. + * It sets the base addresses of the subsystems that are provided by the array dev_base_addresses. + * + */ +void +vied_subsystems_access_initialize( unsigned int nr_subsystems + , const vied_subsystem_base_address_t *base_addresses); + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_INITIALIZE_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h new file mode 100644 index 000000000000..75fef6c4ddba --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_subsystem_access_types.h @@ -0,0 +1,34 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H +#define _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H + +/** Types for the VIED subsystem access interface */ +#include + +/** \brief An identifier for a VIED subsystem. + * + * This identifier must be a compile-time constant. It is used in + * access to a VIED subsystem. + */ +typedef unsigned int vied_subsystem_t; + + +/** \brief An address within a VIED subsystem */ +typedef uint32_t vied_subsystem_address_t; + +/** \brief A base address of a VIED subsystem seen from the host */ +typedef unsigned long long vied_subsystem_base_address_t; + +#endif /* _HRT_VIED_SUBSYSTEM_ACCESS_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_types.h new file mode 100644 index 000000000000..0acfdbb00cfa --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied/vied/vied_types.h @@ -0,0 +1,45 @@ +/* +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ +#ifndef _HRT_VIED_TYPES_H +#define _HRT_VIED_TYPES_H + +/** Types shared by VIED interfaces */ + +#include + +/** \brief An address within a VIED subsystem + * + * This will eventually replace teh vied_memory_address_t and vied_subsystem_address_t + */ +typedef uint32_t vied_address_t; + +/** \brief Memory address type + * + * A memory address is an offset within a memory. + */ +typedef uint32_t vied_memory_address_t; + +/** \brief Master port id */ +typedef int vied_master_port_id_t; + +/** + * \brief Require the existence of a certain type + * + * This macro can be used in interface header files to ensure that + * an implementation define type with a specified name exists. + */ +#define _VIED_REQUIRE_TYPE(T) enum { _VIED_SIZEOF_##T = sizeof(T) } + + +#endif /* _HRT_VIED_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h new file mode 100644 index 000000000000..b09d9f4d5d42 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_nci_acb/interface/vied_nci_acb_route_type.h @@ -0,0 +1,39 @@ +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef VIED_NCI_ACB_ROUTE_TYPE_H_ +#define VIED_NCI_ACB_ROUTE_TYPE_H_ + +#include "type_support.h" + +typedef enum { + NCI_ACB_PORT_ISP = 0, + NCI_ACB_PORT_ACC = 1, + NCI_ACB_PORT_INVALID = 0xFF +} nci_acb_port_t; + +typedef struct { + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t in_select; + /* 0 = ISP, 1 = Acc */ + nci_acb_port_t out_select; + /* When set, Ack will be sent only when Eof arrives */ + uint32_t ignore_line_num; + /* Fork adapter to enable streaming to both output + * (next acb out and isp out) + */ + uint32_t fork_acb_output; +} nci_acb_route_t; + +#endif /* VIED_NCI_ACB_ROUTE_TYPE_H_ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h new file mode 100644 index 000000000000..1ea7e729078c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_param_storage_class.h @@ -0,0 +1,28 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_PARAM_STORAGE_CLASS_H +#define __IA_CSS_PARAM_STORAGE_CLASS_H + +#include "storage_class.h" + +#ifndef __INLINE_PARAMETERS__ +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_EXTERN +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C +#else +#define IA_CSS_PARAMETERS_STORAGE_CLASS_H STORAGE_CLASS_INLINE +#define IA_CSS_PARAMETERS_STORAGE_CLASS_C STORAGE_CLASS_INLINE +#endif + +#endif /* __IA_CSS_PARAM_STORAGE_CLASS_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h new file mode 100644 index 000000000000..4cc71be3fc38 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal.h @@ -0,0 +1,188 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_H +#define __IA_CSS_TERMINAL_H + +#include "type_support.h" +#include "ia_css_terminal_types.h" +#include "ia_css_param_storage_class.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_section_desc_t * +ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id +); + + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h new file mode 100644 index 000000000000..ca0a436082cf --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest.h @@ -0,0 +1,109 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_H +#define __IA_CSS_TERMINAL_MANIFEST_H + +#include "type_support.h" +#include "ia_css_param_storage_class.h" +#include "ia_css_terminal_manifest_types.h" + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index +); + +IA_CSS_PARAMETERS_STORAGE_CLASS_H +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index +); + +#ifdef __INLINE_PARAMETERS__ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h new file mode 100644 index 000000000000..fe146395a8f4 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_manifest_types.h @@ -0,0 +1,342 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_TYPES_H +#define __IA_CSS_TERMINAL_MANIFEST_TYPES_H + + +#include "ia_css_terminal_defs.h" +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_manifest_base_types.h" + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT 1 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* =============== Cached Param Terminal Manifest - START ============== */ +struct ia_css_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MANIFEST_SEC_STRUCT]; +}; + +typedef struct ia_css_param_manifest_section_desc_s + ia_css_param_manifest_section_desc_t; + + +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT 4 +#define SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + (2*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +/* Frame constant parameters terminal manifest */ +struct ia_css_param_terminal_manifest_s { + /* Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* + * Number of cached parameter sections, coming from manifest + * but also shared by the terminal + */ + uint16_t param_manifest_section_desc_count; + /* + * Points to the variable array of + * struct ia_css_param_section_desc_s + */ + uint16_t param_manifest_section_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_MAN_STRUCT]; +}; + +typedef struct ia_css_param_terminal_manifest_s + ia_css_param_terminal_manifest_t; +/* ================= Cached Param Terminal Manifest - End ================ */ + + +/* ================= Spatial Param Terminal Manifest - START ============= */ + +#define SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_fragment_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t min_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters + * for the fragment measured in compute units + */ + uint16_t max_fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_fragment_grid_manifest_desc_s + ia_css_fragment_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_param_manifest_section_desc_s { + /* Maximum buffer total size allowed for + * this frame of parameters + */ + uint32_t max_mem_size; + /* Memory space targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* size in bytes of each compute unit for + * the specified memory space and region + */ + uint8_t elem_size; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_frame_grid_param_manifest_section_desc_s + ia_css_frame_grid_param_manifest_section_desc_t; + +#define SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS \ + ((IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) \ + + (IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS)) + +struct ia_css_frame_grid_manifest_desc_s { + /* Min resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t min_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Max resolution width/height of the spatial parameters for + * the frame measured in compute units + */ + uint16_t max_frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_frame_grid_manifest_desc_s + ia_css_frame_grid_manifest_desc_t; + +#define N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAME_GRID_MAN_STRUCT_IN_BITS) \ + + (SIZE_OF_FRAG_GRID_MAN_STRUCT_IN_BITS) \ + + (2 * IA_CSS_UINT16_T_BITS) \ + + (2 * IA_CSS_UINT8_T_BITS) \ + + (N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT * \ + IA_CSS_UINT8_T_BITS)) + +struct ia_css_spatial_param_terminal_manifest_s { + /* Spatial Parameter terminal manifest base */ + ia_css_terminal_manifest_t base; + /* Contains limits for the frame spatial parameters */ + ia_css_frame_grid_manifest_desc_t frame_grid_desc; + /* + * Constains limits for the fragment spatial parameters + * - COMMON AMONG FRAGMENTS + */ + ia_css_fragment_grid_manifest_desc_t common_fragment_grid_desc; + /* + * Number of frame spatial parameter sections, they are set + * in slice-steps through frame processing + */ + uint16_t frame_grid_param_manifest_section_desc_count; + /* + * Points to the variable array of + * ia_css_frame_spatial_param_manifest_section_desc_t + */ + uint16_t frame_grid_param_manifest_section_desc_offset; + /* + * Indication of the kernel this spatial parameter terminal belongs to + * SHOULD MATCH TO INDEX AND BE USED ONLY FOR CHECK + */ + uint8_t kernel_id; + /* + * Groups together compute units in order to achieve alignment + * requirements for transfes and to achieve canonical frame + * representation + */ + uint8_t compute_units_p_elem; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SPATIAL_PARAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_spatial_param_terminal_manifest_s + ia_css_spatial_param_terminal_manifest_t; + +/* ================= Spatial Param Terminal Manifest - END ================ */ + +/* ================= Sliced Param Terminal Manifest - START =============== */ + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT (2) +#define SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 2 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* + * Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MAN_SECTION_STRUCT]; +}; + +typedef struct ia_css_sliced_param_manifest_section_desc_s + ia_css_sliced_param_manifest_section_desc_t; + +#define N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT 3 +#define SIZE_OF_SLICED_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + (SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS \ + + 2 * IA_CSS_UINT16_T_BITS \ + + 1 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal manifest */ +struct ia_css_sliced_param_terminal_manifest_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_manifest_t base; + /* + * Number of the array elements + * sliced_param_section_offset points to + */ + uint16_t sliced_param_section_count; + /* + * Points to array of ia_css_sliced_param_manifest_section_desc_s + * which constain info for the slicing of the parameters + */ + uint16_t sliced_param_section_offset; + /* Kernel identifier */ + uint8_t kernel_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_SLICED_TERMINAL_MANIFEST_STRUCT]; +}; + +typedef struct ia_css_sliced_param_terminal_manifest_s + ia_css_sliced_param_terminal_manifest_t; + +/* ================= Slice Param Terminal Manifest - End =============== */ + +/* ================= Program Terminal Manifest - START ================= */ + +#define N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT 1 +#define SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS \ + (1 * IA_CSS_UINT32_T_BITS \ + + 3 * IA_CSS_UINT8_T_BITS \ + + N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Fragment constant parameters manifest */ +struct ia_css_fragment_param_manifest_section_desc_s { + /* Maximum size of the related parameter region */ + uint32_t max_mem_size; + /* Indication of the kernel this parameter belongs to */ + uint8_t kernel_id; + /* Memory targeted by this section + * (Register MMIO Interface/DMEM/VMEM/GMEM etc) + */ + uint8_t mem_type_id; + /* Region id within the specified memory space */ + uint8_t region_id; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_FRAG_PARAM_MAN_SEC_STRUCT]; +}; + +typedef struct ia_css_fragment_param_manifest_section_desc_s + ia_css_fragment_param_manifest_section_desc_t; + +#define SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS \ + (10*IA_CSS_N_DATA_DIMENSION*IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s { + /* Slice dimensions */ + uint16_t min_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Slice dimensions */ + uint16_t max_fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t min_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t max_fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + min_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + max_fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + min_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + max_fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + min_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Dimension of grid */ + int16_t + max_fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +typedef struct ia_css_kernel_fragment_sequencer_info_manifest_desc_s + ia_css_kernel_fragment_sequencer_info_manifest_desc_t; + +#define N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT 2 +#define SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS \ + ((SIZE_OF_TERMINAL_MANIFEST_STRUCT_IN_BITS) \ + + (IA_CSS_UINT32_T_BITS) \ + + (5*IA_CSS_UINT16_T_BITS) \ + + (N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT * IA_CSS_UINT8_T_BITS)) + +struct ia_css_program_terminal_manifest_s { + ia_css_terminal_manifest_t base; + /* Connection manager passes seq info as single blob at the moment */ + uint32_t sequencer_info_kernel_id; + /* Maximum number of command secriptors supported + * by the program group + */ + uint16_t max_kernel_fragment_sequencer_command_desc; + uint16_t fragment_param_manifest_section_desc_count; + uint16_t fragment_param_manifest_section_desc_offset; + uint16_t kernel_fragment_sequencer_info_manifest_info_count; + uint16_t kernel_fragment_sequencer_info_manifest_info_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROGRAM_TERM_MAN_STRUCT]; +}; + +typedef struct ia_css_program_terminal_manifest_s + ia_css_program_terminal_manifest_t; + +/* ==================== Program Terminal Manifest - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_MANIFEST_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h new file mode 100644 index 000000000000..c5c89fb7ec91 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/interface/ia_css_terminal_types.h @@ -0,0 +1,351 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_TYPES_H +#define __IA_CSS_TERMINAL_TYPES_H + +#include "type_support.h" +#include "ia_css_base_types.h" +#include "ia_css_terminal_base_types.h" + + +typedef struct ia_css_program_control_init_load_section_desc_s + ia_css_program_control_init_load_section_desc_t; +typedef struct ia_css_program_control_init_connect_section_desc_s + ia_css_program_control_init_connect_section_desc_t; +typedef struct ia_css_program_control_init_program_desc_s + ia_css_program_control_init_program_desc_t; +typedef struct ia_css_program_control_init_terminal_s + ia_css_program_control_init_terminal_t; + +typedef struct ia_css_program_terminal_s ia_css_program_terminal_t; +typedef struct ia_css_fragment_param_section_desc_s + ia_css_fragment_param_section_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_info_desc_s + ia_css_kernel_fragment_sequencer_info_desc_t; +typedef struct ia_css_kernel_fragment_sequencer_command_desc_s + ia_css_kernel_fragment_sequencer_command_desc_t; + +typedef struct ia_css_sliced_param_terminal_s ia_css_sliced_param_terminal_t; +typedef struct ia_css_fragment_slice_desc_s ia_css_fragment_slice_desc_t; +typedef struct ia_css_slice_param_section_desc_s + ia_css_slice_param_section_desc_t; + +typedef struct ia_css_spatial_param_terminal_s ia_css_spatial_param_terminal_t; +typedef struct ia_css_frame_grid_desc_s ia_css_frame_grid_desc_t; +typedef struct ia_css_frame_grid_param_section_desc_s + ia_css_frame_grid_param_section_desc_t; +typedef struct ia_css_fragment_grid_desc_s ia_css_fragment_grid_desc_t; + +typedef struct ia_css_param_terminal_s ia_css_param_terminal_t; +typedef struct ia_css_param_section_desc_s ia_css_param_section_desc_t; + +typedef struct ia_css_param_payload_s ia_css_param_payload_t; +typedef struct ia_css_terminal_s ia_css_terminal_t; + +/* =================== Generic Parameter Payload - START =================== */ +#define N_UINT64_IN_PARAM_PAYLOAD_STRUCT 1 +#define N_UINT32_IN_PARAM_PAYLOAD_STRUCT 1 + +#define IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + (N_UINT64_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT64_T_BITS \ + + VIED_VADDRESS_BITS \ + + N_UINT32_IN_PARAM_PAYLOAD_STRUCT * IA_CSS_UINT32_T_BITS) + +struct ia_css_param_payload_s { + /* + * Temporary variable holding the host address of the parameter buffer + * as PSYS is handling the parameters on the host side for the moment + */ + uint64_t host_buffer; + /* + * Base virtual addresses to parameters in subsystem virtual + * memory space + * NOTE: Used in legacy pg flow + */ + vied_vaddress_t buffer; + /* + * Offset to buffer address within external buffer set structure + * NOTE: Used in ppg flow + */ + uint32_t terminal_index; +}; +/* =================== Generic Parameter Payload - End ==================== */ + + +/* ==================== Cached Param Terminal - START ==================== */ +#define N_UINT32_IN_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Frame constant parameters section */ +struct ia_css_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6 + +#define SIZE_OF_PARAM_TERMINAL_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +/* Frame constant parameters terminal */ +struct ia_css_param_terminal_s { + /* Parameter terminal base */ + ia_css_terminal_t base; + /* Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to the variable array of ia_css_param_section_desc_t */ + uint16_t param_section_desc_offset; + uint8_t padding[N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Cached Param Terminal - End ==================== */ + + +/* ==================== Spatial Param Terminal - START ==================== */ +#define N_UINT16_IN_FRAG_GRID_STRUCT (2 * IA_CSS_N_DATA_DIMENSION) + +#define SIZE_OF_FRAG_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAG_GRID_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_fragment_grid_desc_s { + /* + * Offset width/height of the top-left compute unit of the + * fragment compared to the frame + */ + uint16_t fragment_grid_index[IA_CSS_N_DATA_DIMENSION]; + /* + * Resolution width/height of the spatial parameters that + * correspond to the fragment measured in compute units + */ + uint16_t fragment_grid_dimension[IA_CSS_N_DATA_DIMENSION]; +}; + +#define N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT 3 +#define N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT * IA_CSS_UINT8_T_BITS) + +/* + * A plane of parameters with spatial aspect + * (compute units correlated to pixel data) + */ +struct ia_css_frame_grid_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; + /* + * stride in bytes of each line of compute units for + * the specified memory space and region + */ + uint32_t stride; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_PARAM_SEC_STRUCT]; +}; + +#define N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT IA_CSS_N_DATA_DIMENSION +#define N_PADDING_UINT8_IN_FRAME_GRID_STRUCT 4 + +#define SIZE_OF_FRAME_GRID_STRUCT_BITS \ + (N_UINT16_IN_FRAME_GRID_STRUCT_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_FRAME_GRID_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_frame_grid_desc_s { + /* Resolution width/height of the frame of + * spatial parameters measured in compute units + */ + uint16_t frame_grid_dimension[IA_CSS_N_DATA_DIMENSION]; + uint8_t padding[N_PADDING_UINT8_IN_FRAME_GRID_STRUCT]; +}; + +#define N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT 1 +#define N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT 2 + +#define SIZE_OF_SPATIAL_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + SIZE_OF_FRAME_GRID_STRUCT_BITS \ + + N_UINT32_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SPATIAL_PARAM_TERM_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_spatial_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Contains info for the frame of spatial parameters */ + ia_css_frame_grid_desc_t frame_grid_desc; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to the variable array of + * ia_css_frame_grid_param_section_desc_t + */ + uint16_t frame_grid_param_section_desc_offset; + /* + * Points to array of ia_css_fragment_spatial_desc_t + * which constain info for the fragments of spatial parameters + */ + uint16_t fragment_grid_desc_offset; +}; +/* ==================== Spatial Param Terminal - END ==================== */ + + +/* ==================== Sliced Param Terminal - START ==================== */ +#define N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT 2 + +#define SIZE_OF_SLICE_PARAM_SECTION_DESC_STRUCT_BITS \ + (N_UINT32_IN_SLICE_PARAM_SECTION_DESC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* A Slice of parameters ready to be trasferred from/to registers */ +struct ia_css_slice_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT 2 +#define N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT 4 + +#define SIZE_OF_FRAGMENT_SLICE_DESC_STRUCT_BITS \ + (N_UINT16_IN_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_fragment_slice_desc_s { + /* + * Points to array of ia_css_slice_param_section_desc_t + * which constain info for each prameter slice + */ + uint16_t slice_section_desc_offset; + /* Number of slices for the parameters for this fragment */ + uint16_t slice_count; + uint8_t padding[N_PADDING_UINT8_FRAGMENT_SLICE_DESC_STRUCT]; +}; + +#define N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT 1 +#define N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT 2 + +#define SIZE_OF_SLICED_PARAM_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT32_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT32_T_BITS \ + + N_UINT16_IN_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_sliced_param_terminal_s { + /* Spatial Parameter terminal base */ + ia_css_terminal_t base; + /* Spatial Parameter buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Kernel identifier */ + uint32_t kernel_id; + /* + * Points to array of ia_css_fragment_slice_desc_t + * which constain info for the slicing of the parameters + */ + uint16_t fragment_slice_desc_offset; + uint8_t padding[N_PADDING_UINT8_SLICED_PARAM_TERMINAL_STRUCT]; +}; +/* ==================== Sliced Param Terminal - END ==================== */ + + +/* ==================== Program Terminal - START ==================== */ + +#define N_UINT32_IN_FRAG_PARAM_SEC_STRUCT 2 + +#define SIZE_OF_FRAG_PARAM_SEC_STRUCT_BITS \ + (N_UINT32_IN_FRAG_PARAM_SEC_STRUCT * IA_CSS_UINT32_T_BITS) + +/* Fragment constant parameters section */ +struct ia_css_fragment_param_section_desc_s { + /* Offset of the parameter allocation in memory */ + uint32_t mem_offset; + /* Memory allocation size needs of this parameter */ + uint32_t mem_size; +}; + +#define N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT IA_CSS_N_COMMAND_COUNT + +#define SIZE_OF_FRAG_SEQ_COMMANDS_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_COMMAND_STRUCT * IA_CSS_UINT16_T_BITS) + +/* 4 commands packe together to save memory space */ +struct ia_css_kernel_fragment_sequencer_command_desc_s { + /* Contains the "(command_index%4) == index" command desc */ + uint16_t line_count[IA_CSS_N_COMMAND_COUNT]; +}; + +#define N_UINT16_IN_FRAG_SEQ_INFO_STRUCT (5 * IA_CSS_N_DATA_DIMENSION + 2) + +#define SIZE_OF_FRAG_SEQ_INFO_STRUCT_BITS \ + (N_UINT16_IN_FRAG_SEQ_INFO_STRUCT * IA_CSS_UINT16_T_BITS) + +struct ia_css_kernel_fragment_sequencer_info_desc_s { + /* Slice dimensions */ + uint16_t fragment_grid_slice_dimension[IA_CSS_N_DATA_DIMENSION]; + /* Nof slices */ + uint16_t fragment_grid_slice_count[IA_CSS_N_DATA_DIMENSION]; + /* Grid point decimation factor */ + uint16_t + fragment_grid_point_decimation_factor[IA_CSS_N_DATA_DIMENSION]; + /* Relative position of grid origin to pixel origin */ + int16_t + fragment_grid_overlay_pixel_topleft_index[IA_CSS_N_DATA_DIMENSION]; + /* Size of active fragment region */ + int16_t + fragment_grid_overlay_pixel_dimension[IA_CSS_N_DATA_DIMENSION]; + /* If >0 it overrides the standard fragment sequencer info */ + uint16_t command_count; + /* + * To be used only if command_count>0, points to the descriptors + * for the commands (ia_css_kernel_fragment_sequencer_command_desc_s) + */ + uint16_t command_desc_offset; +}; + +#define N_UINT16_IN_PROG_TERM_STRUCT 2 +#define N_PADDING_UINT8_IN_PROG_TERM_STRUCT 4 + +#define SIZE_OF_PROG_TERM_STRUCT_BITS \ + (SIZE_OF_TERMINAL_STRUCT_BITS \ + + IA_CSS_PARAM_PAYLOAD_STRUCT_BITS \ + + N_UINT16_IN_PROG_TERM_STRUCT * IA_CSS_UINT16_T_BITS \ + + N_PADDING_UINT8_IN_PROG_TERM_STRUCT * IA_CSS_UINT8_T_BITS) + +struct ia_css_program_terminal_s { + /* Program terminal base */ + ia_css_terminal_t base; + /* Program terminal buffer handle attached to the terminal */ + ia_css_param_payload_t param_payload; + /* Points to array of ia_css_fragment_param_desc_s */ + uint16_t fragment_param_section_desc_offset; + /* Points to array of ia_css_kernel_fragment_sequencer_info_s */ + uint16_t kernel_fragment_sequencer_info_desc_offset; + /* align to 64 */ + uint8_t padding[N_PADDING_UINT8_IN_PROG_TERM_STRUCT]; +}; +/* ==================== Program Terminal - END ==================== */ + +#endif /* __IA_CSS_TERMINAL_TYPES_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c new file mode 100644 index 000000000000..683fb3a88cd8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h new file mode 100644 index 000000000000..9ccf3931e8e3 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_impl.h @@ -0,0 +1,495 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_IMPL_H +#define __IA_CSS_TERMINAL_IMPL_H + +#include "ia_css_terminal.h" +#include "ia_css_terminal_types.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +/* Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_in_terminal_get_descriptor_size( + const unsigned int nof_sections) +{ + return sizeof(ia_css_param_terminal_t) + + nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_in_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = &(param_section_base[section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_out_terminal_get_descriptor_size( + const unsigned int nof_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_param_terminal_t) + + nof_fragments*nof_sections*sizeof(ia_css_param_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_section_desc_t *ia_css_param_out_terminal_get_param_section_desc( + const ia_css_param_terminal_t *param_terminal, + const unsigned int section_index, + const unsigned int nof_sections, + const unsigned int fragment_index) +{ + ia_css_param_section_desc_t *param_section_base; + ia_css_param_section_desc_t *param_section_desc = NULL; + + verifjmpexit(param_terminal != NULL); + + param_section_base = + (ia_css_param_section_desc_t *) + (((const char *)param_terminal) + + param_terminal->param_section_desc_offset); + param_section_desc = + &(param_section_base[(nof_sections * fragment_index) + + section_index]); + +EXIT: + return param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_create( + ia_css_param_terminal_t *param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT; + param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + param_terminal->base.size = terminal_size; + param_terminal->param_section_desc_offset = + sizeof(ia_css_param_terminal_t); + + return 0; +} + +/* Spatial Param Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_get_descriptor_size( + const unsigned int nof_frame_param_sections, + const unsigned int nof_fragments) +{ + return sizeof(ia_css_spatial_param_terminal_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_section_desc_t) + + nof_fragments * sizeof(ia_css_fragment_grid_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_grid_desc_t * +ia_css_spatial_param_terminal_get_fragment_grid_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int fragment_index) +{ + ia_css_fragment_grid_desc_t *fragment_grid_desc_base; + ia_css_fragment_grid_desc_t *fragment_grid_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + fragment_grid_desc_base = + (ia_css_fragment_grid_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->fragment_grid_desc_offset); + fragment_grid_desc = &(fragment_grid_desc_base[fragment_index]); + +EXIT: + return fragment_grid_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_section_desc_t * +ia_css_spatial_param_terminal_get_frame_grid_param_section_desc( + const ia_css_spatial_param_terminal_t *spatial_param_terminal, + const unsigned int section_index) +{ + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_base; + ia_css_frame_grid_param_section_desc_t * + frame_grid_param_section_desc = NULL; + + verifjmpexit(spatial_param_terminal != NULL); + + frame_grid_param_section_base = + (ia_css_frame_grid_param_section_desc_t *) + (((const char *)spatial_param_terminal) + + spatial_param_terminal->frame_grid_param_section_desc_offset); + frame_grid_param_section_desc = + &(frame_grid_param_section_base[section_index]); + +EXIT: + return frame_grid_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_create( + ia_css_spatial_param_terminal_t *spatial_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + spatial_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT; + spatial_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + spatial_param_terminal->base.size = terminal_size; + spatial_param_terminal->kernel_id = kernel_id; + spatial_param_terminal->fragment_grid_desc_offset = + sizeof(ia_css_spatial_param_terminal_t); + spatial_param_terminal->frame_grid_param_section_desc_offset = + spatial_param_terminal->fragment_grid_desc_offset + + (nof_fragments * sizeof(ia_css_fragment_grid_desc_t)); + + return 0; +} + +/* Sliced terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_get_descriptor_size( + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments) +{ + unsigned int descriptor_size = 0; + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + verifjmpexit(nof_slices != NULL); + + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + nof_slices_total += nof_slices[fragment_index]; + } + + descriptor_size = + sizeof(ia_css_sliced_param_terminal_t) + + nof_fragments*sizeof(ia_css_fragment_slice_desc_t) + + nof_slices_total*nof_slice_param_sections*sizeof( + ia_css_fragment_param_section_desc_t); + +EXIT: + return descriptor_size; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_slice_desc_t * +ia_css_sliced_param_terminal_get_fragment_slice_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index +) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc_base; + ia_css_fragment_slice_desc_t *fragment_slice_desc = NULL; + + verifjmpexit(sliced_param_terminal != NULL); + + fragment_slice_desc_base = + (ia_css_fragment_slice_desc_t *) + (((const char *)sliced_param_terminal) + + sliced_param_terminal->fragment_slice_desc_offset); + fragment_slice_desc = &(fragment_slice_desc_base[fragment_index]); + +EXIT: + return fragment_slice_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_slice_param_section_desc_t * +ia_css_sliced_param_terminal_get_slice_param_section_desc( + const ia_css_sliced_param_terminal_t *sliced_param_terminal, + const unsigned int fragment_index, + const unsigned int slice_index, + const unsigned int section_index, + const unsigned int nof_slice_param_sections) +{ + ia_css_fragment_slice_desc_t *fragment_slice_desc; + ia_css_slice_param_section_desc_t *slice_param_section_desc_base; + ia_css_slice_param_section_desc_t *slice_param_section_desc = NULL; + + fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index + ); + verifjmpexit(fragment_slice_desc != NULL); + + slice_param_section_desc_base = + (ia_css_slice_param_section_desc_t *) + (((const char *)sliced_param_terminal) + + fragment_slice_desc->slice_section_desc_offset); + slice_param_section_desc = + &(slice_param_section_desc_base[( + slice_index * nof_slice_param_sections) + + section_index]); + +EXIT: + return slice_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_create( + ia_css_sliced_param_terminal_t *sliced_param_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const uint16_t is_input_terminal, + const unsigned int nof_slice_param_sections, + const unsigned int nof_slices[], + const unsigned int nof_fragments, + const uint32_t kernel_id) +{ + unsigned int fragment_index; + unsigned int nof_slices_total = 0; + + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + sliced_param_terminal->base.terminal_type = + is_input_terminal ? + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN : + IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT; + sliced_param_terminal->base.parent_offset = + 0 - ((int16_t)terminal_offset); + sliced_param_terminal->base.size = terminal_size; + sliced_param_terminal->kernel_id = kernel_id; + /* set here to use below to find the pointer */ + sliced_param_terminal->fragment_slice_desc_offset = + sizeof(ia_css_sliced_param_terminal_t); + for (fragment_index = 0; + fragment_index < nof_fragments; fragment_index++) { + ia_css_fragment_slice_desc_t *fragment_slice_desc = + ia_css_sliced_param_terminal_get_fragment_slice_desc( + sliced_param_terminal, + fragment_index); + /* + * Error handling not required at this point + * since everything has been constructed/validated just above + */ + fragment_slice_desc->slice_count = nof_slices[fragment_index]; + fragment_slice_desc->slice_section_desc_offset = + sliced_param_terminal->fragment_slice_desc_offset + + (nof_fragments * sizeof( + ia_css_fragment_slice_desc_t)) + + (nof_slices_total * nof_slice_param_sections * sizeof( + ia_css_slice_param_section_desc_t)); + nof_slices_total += nof_slices[fragment_index]; + } + + return 0; +} + +/* Program terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_get_descriptor_size( + const unsigned int nof_fragments, + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + return sizeof(ia_css_program_terminal_t) + + nof_fragments * nof_fragment_param_sections * + sizeof(ia_css_fragment_param_section_desc_t) + + nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t) + + nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_section_desc_t * +ia_css_program_terminal_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int section_index, + const unsigned int nof_fragment_param_sections) +{ + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc_base; + ia_css_fragment_param_section_desc_t * + fragment_param_section_desc = NULL; + + verifjmpexit(program_terminal != NULL); + verifjmpexit(section_index < nof_fragment_param_sections); + + fragment_param_section_desc_base = + (ia_css_fragment_param_section_desc_t *) + (((const char *)program_terminal) + + program_terminal->fragment_param_section_desc_offset); + fragment_param_section_desc = + &(fragment_param_section_desc_base[(fragment_index * + nof_fragment_param_sections) + section_index]); + +EXIT: + return fragment_param_section_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_desc_t * +ia_css_program_terminal_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_t *program_terminal, + const unsigned int fragment_index, + const unsigned int info_index, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc_base; + ia_css_kernel_fragment_sequencer_info_desc_t * + kernel_fragment_sequencer_info_desc = NULL; + + verifjmpexit(program_terminal != NULL); + if (nof_kernel_fragment_sequencer_infos > 0) { + verifjmpexit(info_index < nof_kernel_fragment_sequencer_infos); + } + + kernel_fragment_sequencer_info_desc_base = + (ia_css_kernel_fragment_sequencer_info_desc_t *) + (((const char *)program_terminal) + + program_terminal->kernel_fragment_sequencer_info_desc_offset); + kernel_fragment_sequencer_info_desc = + &(kernel_fragment_sequencer_info_desc_base[(fragment_index * + nof_kernel_fragment_sequencer_infos) + info_index]); + +EXIT: + return kernel_fragment_sequencer_info_desc; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_create( + ia_css_program_terminal_t *program_terminal, + const uint16_t terminal_offset, + const uint16_t terminal_size, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int nof_command_objs) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + if (terminal_offset > (1<<15)) { + return -EINVAL; + } + + program_terminal->base.terminal_type = IA_CSS_TERMINAL_TYPE_PROGRAM; + program_terminal->base.parent_offset = 0-((int16_t)terminal_offset); + program_terminal->base.size = terminal_size; + program_terminal->kernel_fragment_sequencer_info_desc_offset = + sizeof(ia_css_program_terminal_t); + program_terminal->fragment_param_section_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (nof_command_objs * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_get_command_base_offset( + const ia_css_program_terminal_t *program_terminal, + const unsigned int nof_fragments, + const unsigned int nof_kernel_fragment_sequencer_infos, + const unsigned int commands_slots_used, + uint16_t *command_desc_offset) +{ + if (command_desc_offset == NULL) { + return -EFAULT; + } + + *command_desc_offset = 0; + + if (program_terminal == NULL) { + return -EFAULT; + } + + *command_desc_offset = + program_terminal->kernel_fragment_sequencer_info_desc_offset + + (nof_fragments * nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_desc_t)) + + (commands_slots_used * sizeof( + ia_css_kernel_fragment_sequencer_command_desc_t)); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +uint16_t *ia_css_program_terminal_get_line_count( + const ia_css_kernel_fragment_sequencer_command_desc_t + *kernel_fragment_sequencer_command_desc_base, + const unsigned int set_count) +{ + uint16_t *line_count = NULL; + + verifjmpexit(kernel_fragment_sequencer_command_desc_base != NULL); + line_count = + (uint16_t *)&(kernel_fragment_sequencer_command_desc_base[ + set_count >> 2].line_count[set_count & 0x00000003]); +EXIT: + return line_count; +} + +#endif /* __IA_CSS_TERMINAL_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c new file mode 100644 index 000000000000..53c4708c7fc9 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest.c @@ -0,0 +1,20 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifdef __INLINE_PARAMETERS__ +#include "storage_class.h" +STORAGE_CLASS_INLINE int __ia_css_param_avoid_warning_on_empty_file(void) { return 0; } +#else /* __INLINE_PARAMETERS__ */ +#include "ia_css_terminal_manifest_impl.h" +#endif /* __INLINE_PARAMETERS__ */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h new file mode 100644 index 000000000000..39734136b117 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/src/ia_css_terminal_manifest_impl.h @@ -0,0 +1,347 @@ +/** +* Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2010 - 2018, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. +*/ + +#ifndef __IA_CSS_TERMINAL_MANIFEST_IMPL_H +#define __IA_CSS_TERMINAL_MANIFEST_IMPL_H + +#include "ia_css_terminal_manifest.h" +#include "error_support.h" +#include "assert_support.h" +#include "storage_class.h" + +STORAGE_CLASS_INLINE void __terminal_manifest_dummy_check_alignment(void) +{ + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_terminal_manifest_t) % sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SPATIAL_PARAM_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_spatial_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_spatial_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAME_GRID_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_frame_grid_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PROG_TERM_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_program_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_program_terminal_manifest_t)%sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_FRAG_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_fragment_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_fragment_param_manifest_section_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_KERNEL_FRAG_SEQ_INFO_MAN_STRUCT_IN_BITS != + (CHAR_BIT * sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t)) + ); + + COMPILATION_ERROR_IF(0 != sizeof( + ia_css_kernel_fragment_sequencer_info_manifest_desc_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_PARAM_TERMINAL_MANIFEST_STRUCT_IN_BITS != + (CHAR_BIT * sizeof(ia_css_sliced_param_terminal_manifest_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_terminal_manifest_t) % + sizeof(uint64_t)); + + COMPILATION_ERROR_IF( + SIZE_OF_SLICED_PARAM_MAN_SEC_STRUCT_IN_BITS != + (CHAR_BIT * sizeof + (ia_css_sliced_param_manifest_section_desc_t))); + + COMPILATION_ERROR_IF(0 != + sizeof(ia_css_sliced_param_manifest_section_desc_t) % + sizeof(uint64_t)); +} + +/* Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_param_terminal_manifest_get_size( + const unsigned int nof_sections) +{ + + return sizeof(ia_css_param_terminal_manifest_t) + + nof_sections*sizeof(ia_css_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_param_terminal_manifest_init( + ia_css_param_terminal_manifest_t *param_terminal, + const uint16_t section_count) +{ + if (param_terminal == NULL) { + return -EFAULT; + } + + param_terminal->param_manifest_section_desc_count = section_count; + param_terminal->param_manifest_section_desc_offset = sizeof( + ia_css_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_param_manifest_section_desc_t * +ia_css_param_terminal_manifest_get_prm_sct_desc( + const ia_css_param_terminal_manifest_t *param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_param_manifest_section_desc_t *param_manifest_section_base; + ia_css_param_manifest_section_desc_t * + param_manifest_section_desc = NULL; + + verifjmpexit(param_terminal_manifest != NULL); + + param_manifest_section_base = + (ia_css_param_manifest_section_desc_t *) + (((const char *)param_terminal_manifest) + + param_terminal_manifest->param_manifest_section_desc_offset); + + param_manifest_section_desc = + &(param_manifest_section_base[section_index]); + +EXIT: + return param_manifest_section_desc; +} + +/* Spatial Parameter Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_spatial_param_terminal_manifest_get_size( + const unsigned int nof_frame_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_frame_param_sections * sizeof( + ia_css_frame_grid_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_spatial_param_terminal_manifest_init( + ia_css_spatial_param_terminal_manifest_t *spatial_param_terminal, + const uint16_t section_count) +{ + if (spatial_param_terminal == NULL) { + return -EFAULT; + } + + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_count = section_count; + spatial_param_terminal-> + frame_grid_param_manifest_section_desc_offset = + sizeof(ia_css_spatial_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_frame_grid_param_manifest_section_desc_t * +ia_css_spatial_param_terminal_manifest_get_frm_grid_prm_sct_desc( + const ia_css_spatial_param_terminal_manifest_t * + spatial_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_base; + ia_css_frame_grid_param_manifest_section_desc_t * + frame_param_manifest_section_desc = NULL; + + verifjmpexit(spatial_param_terminal_manifest != NULL); + + frame_param_manifest_section_base = + (ia_css_frame_grid_param_manifest_section_desc_t *) + (((const char *)spatial_param_terminal_manifest) + + spatial_param_terminal_manifest-> + frame_grid_param_manifest_section_desc_offset); + frame_param_manifest_section_desc = + &(frame_param_manifest_section_base[section_index]); + +EXIT: + return frame_param_manifest_section_desc; +} + +/* Sliced Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_sliced_param_terminal_manifest_get_size( + const unsigned int nof_slice_param_sections) +{ + return sizeof(ia_css_spatial_param_terminal_manifest_t) + + nof_slice_param_sections * + sizeof(ia_css_sliced_param_manifest_section_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_sliced_param_terminal_manifest_init( + ia_css_sliced_param_terminal_manifest_t *sliced_param_terminal, + const uint16_t section_count) +{ + if (sliced_param_terminal == NULL) { + return -EFAULT; + } + + sliced_param_terminal->sliced_param_section_count = section_count; + sliced_param_terminal->sliced_param_section_offset = + sizeof(ia_css_sliced_param_terminal_manifest_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_sliced_param_manifest_section_desc_t * +ia_css_sliced_param_terminal_manifest_get_sliced_prm_sct_desc( + const ia_css_sliced_param_terminal_manifest_t * + sliced_param_terminal_manifest, + const unsigned int section_index) +{ + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_base; + ia_css_sliced_param_manifest_section_desc_t * + sliced_param_manifest_section_desc = NULL; + + verifjmpexit(sliced_param_terminal_manifest != NULL); + + sliced_param_manifest_section_base = + (ia_css_sliced_param_manifest_section_desc_t *) + (((const char *)sliced_param_terminal_manifest) + + sliced_param_terminal_manifest-> + sliced_param_section_offset); + sliced_param_manifest_section_desc = + &(sliced_param_manifest_section_base[section_index]); + +EXIT: + return sliced_param_manifest_section_desc; +} + +/* Program Terminal */ +IA_CSS_PARAMETERS_STORAGE_CLASS_C +unsigned int ia_css_program_terminal_manifest_get_size( + const unsigned int nof_fragment_param_sections, + const unsigned int nof_kernel_fragment_sequencer_infos) +{ + return sizeof(ia_css_program_terminal_manifest_t) + + nof_fragment_param_sections * + sizeof(ia_css_fragment_param_manifest_section_desc_t) + + nof_kernel_fragment_sequencer_infos * + sizeof(ia_css_kernel_fragment_sequencer_info_manifest_desc_t); +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +int ia_css_program_terminal_manifest_init( + ia_css_program_terminal_manifest_t *program_terminal, + const uint16_t fragment_param_section_count, + const uint16_t kernel_fragment_seq_info_section_count) +{ + if (program_terminal == NULL) { + return -EFAULT; + } + + program_terminal->fragment_param_manifest_section_desc_count = + fragment_param_section_count; + program_terminal->fragment_param_manifest_section_desc_offset = + sizeof(ia_css_program_terminal_manifest_t); + + program_terminal->kernel_fragment_sequencer_info_manifest_info_count = + kernel_fragment_seq_info_section_count; + program_terminal->kernel_fragment_sequencer_info_manifest_info_offset = + sizeof(ia_css_program_terminal_manifest_t) + + fragment_param_section_count*sizeof( + ia_css_fragment_param_manifest_section_desc_t); + + return 0; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_fragment_param_manifest_section_desc_t * +ia_css_program_terminal_manifest_get_frgmnt_prm_sct_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int section_index) +{ + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section_base; + ia_css_fragment_param_manifest_section_desc_t * + fragment_param_manifest_section = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + fragment_param_manifest_section_base = + (ia_css_fragment_param_manifest_section_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + fragment_param_manifest_section_desc_offset); + fragment_param_manifest_section = + &(fragment_param_manifest_section_base[section_index]); + +EXIT: + return fragment_param_manifest_section; +} + +IA_CSS_PARAMETERS_STORAGE_CLASS_C +ia_css_kernel_fragment_sequencer_info_manifest_desc_t * +ia_css_program_terminal_manifest_get_kernel_frgmnt_seq_info_desc( + const ia_css_program_terminal_manifest_t *program_terminal_manifest, + const unsigned int info_index) +{ + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc_base; + ia_css_kernel_fragment_sequencer_info_manifest_desc_t * + kernel_manifest_fragment_sequencer_info_manifest_desc = NULL; + + verifjmpexit(program_terminal_manifest != NULL); + + kernel_manifest_fragment_sequencer_info_manifest_desc_base = + (ia_css_kernel_fragment_sequencer_info_manifest_desc_t *) + (((const char *)program_terminal_manifest) + + program_terminal_manifest-> + kernel_fragment_sequencer_info_manifest_info_offset); + + kernel_manifest_fragment_sequencer_info_manifest_desc = + &(kernel_manifest_fragment_sequencer_info_manifest_desc_base[ + info_index]); + +EXIT: + return kernel_manifest_fragment_sequencer_info_manifest_desc; +} + +#endif /* __IA_CSS_TERMINAL_MANIFEST_IMPL_H */ diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/vied_parameters.mk b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/vied_parameters.mk new file mode 100644 index 000000000000..834a1a4b2bab --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/lib/vied_parameters/vied_parameters.mk @@ -0,0 +1,76 @@ +# # # +# Support for Intel Camera Imaging ISP subsystem. +# Copyright (c) 2010 - 2018, Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details +# +# +# MODULE is VIED_PARAMETERS + +VIED_PARAMETERS_DIR=$${MODULES_DIR}/vied_parameters + +VIED_PARAMETERS_INTERFACE=$(VIED_PARAMETERS_DIR)/interface +VIED_PARAMETERS_SOURCES=$(VIED_PARAMETERS_DIR)/src +VIED_PARAMETERS_EXTINCLUDE = $${MODULES_DIR}/support + +VIED_PARAMETERS_DYNAMIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_HOST_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_HOST_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_HOST_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) + +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES = $(VIED_PARAMETERS_SOURCES)/ia_css_isys_process_group.c +VIED_PARAMETERS_ISA_CLIENT_HOST_FILES += $(VIED_PARAMETERS_DIR)/client/ia_css_isys_parameter_client.c + +VIED_PARAMETERS_DYNAMIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal.c +VIED_PARAMETERS_STATIC_FW_FILES += $(VIED_PARAMETERS_SOURCES)/ia_css_terminal_manifest.c + +VIED_PARAMETERS_FW_FILES = $(VIED_PARAMETERS_DYNAMIC_HOST_FILES) +VIED_PARAMETERS_FW_FILES += $(VIED_PARAMETERS_STATIC_HOST_FILES) +VIED_PARAMETERS_SUPPORT_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/support +VIED_PARAMETERS_SUPPORT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/support/$(IPU_SYSVER) +VIED_PARAMETERS_ISA_CLIENT_HOST_CPPFLAGS = -I$(VIED_PARAMETERS_DIR)/client +VIED_PARAMETERS_PSA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_psys_parameter_utils.c +VIED_PARAMETERS_PSA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_psys_parameter_utils_dep.c + +VIED_PARAMETERS_UTILS_HOST_CPPFLAGS = $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +VIED_PARAMETERS_ISA_UTILS_HOST_FILES = $(MODULES_DIR)/vied_parameters/support/ia_css_isys_parameter_utils.c +VIED_PARAMETERS_ISA_UTILS_HOST_FILES += $(MODULES_DIR)/vied_parameters/support/$(IPU_SYSVER)/ia_css_isys_parameter_utils_dep.c + +VIED_PARAMETERS_PRINT_CPPFLAGS += -I$(VIED_PARAMETERS_DIR)/print/interface +VIED_PARAMETERS_PRINT_FILES += $(VIED_PARAMETERS_DIR)/print/src/ia_css_terminal_print.c + +# VIED_PARAMETERS Trace Log Level = VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +# Other options are [VIED_PARAMETERS_TRACE_LOG_LEVEL_OFF, VIED_PARAMETERS_TRACE_LOG_LEVEL_DEBUG] +ifndef VIED_PARAMETERS_TRACE_CONFIG_HOST + VIED_PARAMETERS_TRACE_CONFIG_HOST=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif +ifndef VIED_PARAMETERS_TRACE_CONFIG_FW + VIED_PARAMETERS_TRACE_CONFIG_FW=VIED_PARAMETERS_TRACE_LOG_LEVEL_NORMAL +endif + +VIED_PARAMETERS_HOST_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_HOST) +VIED_PARAMETERS_FW_CPPFLAGS += -DVIED_PARAMETERS_TRACE_CONFIG=$(VIED_PARAMETERS_TRACE_CONFIG_FW) + +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_HOST_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_HOST_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_INTERFACE) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_SOURCES) +VIED_PARAMETERS_FW_CPPFLAGS += -I$(VIED_PARAMETERS_EXTINCLUDE) +VIED_PARAMETERS_FW_CPPFLAGS += $(VIED_PARAMETERS_SUPPORT_CPPFLAGS) + +#For IPU interface +include $(MODULES_DIR)/fw_abi_common_types/cpu/fw_abi_cpu_types.mk +VIED_PARAMETERS_HOST_CPPFLAGS += $(FW_ABI_COMMON_TYPES_HOST_CPPFLAGS) + +VIED_PARAMETERS_FW_CPPFLAGS += $(FW_ABI_COMMON_TYPES_FW_CPPFLAGS) diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.c b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.c new file mode 100644 index 000000000000..af0e41e8f6dc --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.c @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include + +#include "ipu.h" +#include "ipu-mmu.h" +#include "ipu-psys.h" +#include "ipu-wrapper.h" +#include "ipu-fw-psys.h" +#include "libcsspsys2600.h" + +#include +#include +#include +#include +#include + +int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_start((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_start); + +int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_disown((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_disown); + +int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd) +{ + int rval; + + rval = ia_css_process_group_stop((ia_css_process_group_t *) + kcmd->kpg->pg); + if (rval) { + dev_err(&kcmd->fh->psys->adev->dev, + "failed to abort kcmd!\n"); + kcmd->pg_user = NULL; + rval = -EIO; + /* TODO: need to reset PSYS by power cycling it */ + } + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_abort); + +int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd) +{ + return -ia_css_process_group_submit((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_submit); + +static void *syscom_buffer; +static struct ia_css_syscom_config *syscom_config; +static struct ia_css_psys_server_init *server_init; + +int ipu_fw_psys_rcv_event(struct ipu_psys *psys, + struct ipu_fw_psys_event *event) +{ + return ia_css_psys_event_queue_receive(psys_syscom, + IA_CSS_PSYS_EVENT_QUEUE_MAIN_ID, + (struct ia_css_psys_event_s *)event); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_rcv_event); + +int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal, + int terminal_idx, + struct ipu_psys_kcmd *kcmd, + u32 buffer, + unsigned size) +{ + ia_css_terminal_type_t type; + u32 buffer_state; + + type = ia_css_terminal_get_type((ia_css_terminal_t *)terminal); + + switch (type) { + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_CACHED_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SPATIAL_OUT: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_IN: + case IA_CSS_TERMINAL_TYPE_PARAM_SLICED_OUT: + case IA_CSS_TERMINAL_TYPE_PROGRAM: + buffer_state = IA_CSS_BUFFER_UNDEFINED; + break; + case IA_CSS_TERMINAL_TYPE_PARAM_STREAM: + case IA_CSS_TERMINAL_TYPE_DATA_IN: + case IA_CSS_TERMINAL_TYPE_STATE_IN: + buffer_state = IA_CSS_BUFFER_FULL; + break; + case IA_CSS_TERMINAL_TYPE_DATA_OUT: + case IA_CSS_TERMINAL_TYPE_STATE_OUT: + buffer_state = IA_CSS_BUFFER_EMPTY; + break; + default: + dev_err(&kcmd->fh->psys->adev->dev, + "unknown terminal type: 0x%x\n", type); + return -EAGAIN; + } + + if (type == IA_CSS_TERMINAL_TYPE_DATA_IN || + type == IA_CSS_TERMINAL_TYPE_DATA_OUT) { + ia_css_frame_t *frame; + + if (ia_css_data_terminal_set_connection_type( + (ia_css_data_terminal_t *)terminal, + IA_CSS_CONNECTION_MEMORY)) + return -EIO; + frame = ia_css_data_terminal_get_frame( + (ia_css_data_terminal_t *)terminal); + if (!frame) + return -EIO; + + if (ia_css_frame_set_data_bytes(frame, size)) + return -EIO; + } + + return -ia_css_process_group_attach_buffer( + (ia_css_process_group_t *)kcmd->kpg->pg, buffer, + buffer_state, terminal_idx); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_terminal_set); + +void ipu_fw_psys_pg_dump(struct ipu_psys *psys, + struct ipu_psys_kcmd *kcmd, + const char *note) +{ + ia_css_process_group_t *pg = (ia_css_process_group_t *)kcmd->kpg->pg; + ia_css_program_group_ID_t pgid = + ia_css_process_group_get_program_group_ID(pg); + uint8_t processes = ia_css_process_group_get_process_count( + (ia_css_process_group_t *)kcmd->kpg->pg); + unsigned int p, chn, mem; + + dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes\n", + __func__, note, pgid, processes); + for (p = 0; p < processes; p++) { + ia_css_process_t *process = + ia_css_process_group_get_process(pg, p); + int cell = ia_css_process_get_cell(process); + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i cell %i cell_bitmap = 0x%x size = %zu\n", + __func__, pgid, p, + cell, + ia_css_process_get_cells_bitmap(process), + ia_css_process_get_size(process)); + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i kernel bitmap 0x%llx \n", + __func__, pgid, p, + ia_css_process_get_kernel_bitmap(process)); + for (mem = 0; mem < VIED_NCI_N_DATA_MEM_TYPE_ID; mem++ ) { + unsigned int mem_id = process->ext_mem_id[mem]; + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i index %u type %d id %d offset 0x%x \n", + __func__, pgid, p, mem, + vied_nci_cell_get_mem_type(cell, mem), + mem_id, process->ext_mem_offset[mem]); + } + for (chn = 0; chn < VIED_NCI_N_DEV_CHN_ID; chn++ ) { + dev_dbg(&psys->adev->dev, + "%s pgid %i process %i dev_chn[%u] = %i\n", + __func__, pgid, p, chn, + ia_css_process_get_dev_chn(process, chn)); + } + } +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_dump); + +int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_program_group_ID( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_id); + +int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_terminal_count( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal_count); + +int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_size((ia_css_process_group_t *) + kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_size); + +int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd, + dma_addr_t vaddress) +{ + return ia_css_process_group_set_ipu_vaddress((ia_css_process_group_t *) + kcmd->kpg->pg, vaddress); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_ipu_vaddress); + +int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_load_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_load_cycles); + +int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_init_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_init_cycles); + +int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_pg_processing_cycles( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_processing_cycles); + +struct ipu_fw_psys_terminal * +ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd *kcmd, int index) +{ + return (struct ipu_fw_psys_terminal *)ia_css_process_group_get_terminal( + (ia_css_process_group_t *)kcmd->kpg->pg, index); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_terminal); + +void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token) +{ + ia_css_process_group_set_token((ia_css_process_group_t *)kcmd->kpg->pg, + token); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_set_token); + +int ipu_fw_psys_pg_get_protocol( + struct ipu_psys_kcmd *kcmd) +{ + return ia_css_process_group_get_protocol_version( + (ia_css_process_group_t *)kcmd->kpg->pg); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_protocol); + +int ipu_fw_psys_open(struct ipu_psys *psys) +{ + bool opened; + int retry = IPU_PSYS_OPEN_RETRY; + + ipu_wrapper_init(PSYS_MMID, &psys->adev->dev, + psys->pdata->base); + + server_init->icache_prefetch_sp = psys->icache_prefetch_sp; + server_init->icache_prefetch_isp = psys->icache_prefetch_isp; + + psys_syscom = ia_css_psys_open(syscom_buffer, syscom_config); + if (!psys_syscom) { + dev_err(&psys->adev->dev, + "psys library open failed\n"); + return -ENODEV; + } + do { + opened = ia_css_psys_open_is_ready(psys_syscom); + if (opened) + break; + usleep_range(IPU_PSYS_OPEN_TIMEOUT_US, + IPU_PSYS_OPEN_TIMEOUT_US + 10); + retry--; + } while (retry > 0); + + if (!retry && !opened) { + dev_err(&psys->adev->dev, + "psys library open ready failed\n"); + ia_css_psys_close(psys_syscom); + ia_css_psys_release(psys_syscom, 1); + psys_syscom = NULL; + return -ENODEV; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_open); + +int ipu_fw_psys_close(struct ipu_psys *psys) +{ + int rval; + unsigned int retry = IPU_PSYS_CLOSE_TIMEOUT; + + if (!psys_syscom) + return 0; + + if (ia_css_psys_close(psys_syscom)) { + dev_err(&psys->adev->dev, + "psys library close ready failed\n"); + return 0; + } + + do { + rval = ia_css_psys_release(psys_syscom, 0); + if (rval && rval != -EBUSY) { + dev_dbg(&psys->adev->dev, "psys library release failed\n"); + break; + } + usleep_range(IPU_PSYS_CLOSE_TIMEOUT_US, + IPU_PSYS_CLOSE_TIMEOUT_US + 10); + } while (rval && --retry); + + psys_syscom = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_close); + +u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd) +{ + return 0; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_pg_get_token); + +static const struct ipu_fw_resource_definitions default_defs = { + .cells = vied_nci_cell_type, + .num_cells = VIED_NCI_N_CELL_ID, + .num_cells_type = VIED_NCI_N_CELL_TYPE_ID, + .dev_channels = vied_nci_dev_chn_size, + .num_dev_channels = VIED_NCI_N_DEV_CHN_ID, + + .num_ext_mem_types = VIED_NCI_N_DATA_MEM_TYPE_ID, + .num_ext_mem_ids = VIED_NCI_N_MEM_ID, + .ext_mem_ids = vied_nci_mem_size, + + .cell_mem_row = VIED_NCI_N_MEM_TYPE_ID, + .cell_mem = (enum ipu_mem_id *)vied_nci_cell_mem, +}; + +const struct ipu_fw_resource_definitions *res_defs = &default_defs; +EXPORT_SYMBOL_GPL(res_defs); + +int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index, + u8 value) +{ + return ia_css_process_set_cell((ia_css_process_t *)ptr, + (vied_nci_cell_ID_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_cell_id); + +u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index) +{ + return ia_css_process_get_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_process_cell_id); + +int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr) +{ + return ia_css_process_clear_cell((ia_css_process_t *)ptr); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_clear_process_cell); + +int ipu_fw_psys_set_process_dev_chn_offset(struct ipu_fw_psys_process *ptr, + u16 offset, u16 value) +{ + return ia_css_process_set_dev_chn((ia_css_process_t *)ptr, + (vied_nci_dev_chn_ID_t)offset, + (vied_nci_resource_size_t)value); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_dev_chn_offset); + +int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr, + u16 type_id, u16 mem_id, u16 offset) +{ + return ia_css_process_set_ext_mem((ia_css_process_t *)ptr, mem_id, offset); +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_set_process_ext_mem); + +int ipu_fw_psys_get_program_manifest_by_process( + struct ipu_fw_generic_program_manifest *gen_pm, + const struct ipu_fw_psys_program_group_manifest *pg_manifest, + struct ipu_fw_psys_process *process) +{ + ia_css_program_ID_t process_id = + ia_css_process_get_program_ID( + (const ia_css_process_t *)process); + int programs = + ia_css_program_group_manifest_get_program_count( + (const ia_css_program_group_manifest_t *)pg_manifest); + int i; + + for (i = 0; i < programs; i++) { + ia_css_program_ID_t program_id; + ia_css_program_manifest_t *pm = + ia_css_program_group_manifest_get_prgrm_mnfst( + (const ia_css_program_group_manifest_t *) + pg_manifest, i); + if (!pm) + continue; + program_id = ia_css_program_manifest_get_program_ID(pm); + if (program_id == process_id) { + gen_pm->dev_chn_size = (u16 *)pm->dev_chn_size; + gen_pm->ext_mem_size = (u16 *)pm->ext_mem_size; + gen_pm->cell_id = pm->cell_id; + gen_pm->cell_type_id = pm->cell_type_id; + return 0; + } + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(ipu_fw_psys_get_program_manifest_by_process); + +static int __init libcsspsys2600_init(void) +{ + int rval; + + syscom_buffer = kzalloc(ia_css_sizeof_psys(NULL), GFP_KERNEL); + if (!syscom_buffer) + return -ENOMEM; + + syscom_config = kzalloc(sizeof(struct ia_css_syscom_config), + GFP_KERNEL); + if (!syscom_config) { + rval = -ENOMEM; + goto out_syscom_buffer_free; + } + + server_init = kzalloc(sizeof(struct ia_css_psys_server_init), + GFP_KERNEL); + if (!server_init) { + rval = -ENOMEM; + goto out_syscom_config_free; + } + + server_init->ddr_pkg_dir_address = 0; + server_init->host_ddr_pkg_dir = 0; + server_init->pkg_dir_size = 0; + + *syscom_config = *ia_css_psys_specify(); + syscom_config->specific_addr = server_init; + syscom_config->specific_size = sizeof(struct ia_css_psys_server_init); + syscom_config->ssid = PSYS_SSID; + syscom_config->mmid = PSYS_MMID; + syscom_config->regs_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_REGS); + syscom_config->dmem_addr = ipu_device_cell_memory_address(SPC0, + IPU_DEVICE_SP2600_CONTROL_DMEM); + + return 0; + +out_syscom_config_free: + kfree(syscom_config); +out_syscom_buffer_free: + kfree(syscom_buffer); + + return rval; +} + +static void __exit libcsspsys2600_exit(void) +{ + kfree(syscom_buffer); + kfree(syscom_config); + kfree(server_init); +} + +module_init(libcsspsys2600_init); +module_exit(libcsspsys2600_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu psys css library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.h b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.h new file mode 100644 index 000000000000..b8d790f56180 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/lib2600psys/libcsspsys2600.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2015--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LIBCSSPSYS2600_H +#define LIBCSSPSYS2600_H + +#include +#include +#include +#include +#include +#include +#include + +extern struct ia_css_syscom_context *psys_syscom; +#endif diff --git a/drivers/media/pci/intel/ipu4/ipu4p-css/libintel-ipu4p.c b/drivers/media/pci/intel/ipu4/ipu4p-css/libintel-ipu4p.c new file mode 100644 index 000000000000..cb6fd0499c5c --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-css/libintel-ipu4p.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2014 - 2018 Intel Corporation + +#include +#include +#include +#include "ipu-isys.h" +#include "ipu-wrapper.h" +#include + +#include "ipu-platform.h" + +#define ipu_lib_call_notrace_unlocked(func, isys, ...) \ + ({ \ + int rval; \ + \ + rval = -ia_css_isys_##func((isys)->fwcom, ##__VA_ARGS__); \ + \ + rval; \ + }) + +#define ipu_lib_call_notrace(func, isys, ...) \ + ({ \ + int rval; \ + \ + mutex_lock(&(isys)->lib_mutex); \ + \ + rval = ipu_lib_call_notrace_unlocked( \ + func, isys, ##__VA_ARGS__); \ + \ + mutex_unlock(&(isys)->lib_mutex); \ + \ + rval; \ + }) + +#define ipu_lib_call(func, isys, ...) \ + ({ \ + int rval; \ + dev_dbg(&(isys)->adev->dev, "hostlib: libcall %s\n", #func); \ + rval = ipu_lib_call_notrace(func, isys, ##__VA_ARGS__); \ + \ + rval; \ + }) + +static int wrapper_init_done; + +int ipu_fw_isys_close(struct ipu_isys *isys) +{ + struct device *dev = &isys->adev->dev; + int timeout = IPU_ISYS_TURNOFF_TIMEOUT; + int rval; + unsigned long flags; + + /* + * Ask library to stop the isys fw. Actual close takes + * some time as the FW must stop its actions including code fetch + * to SP icache. + */ + spin_lock_irqsave(&isys->power_lock, flags); + rval = ipu_lib_call(device_close, isys); + spin_unlock_irqrestore(&isys->power_lock, flags); + if (rval) + dev_err(dev, "Device close failure: %d\n", rval); + + /* release probably fails if the close failed. Let's try still */ + do { + usleep_range(IPU_ISYS_TURNOFF_DELAY_US, + 2 * IPU_ISYS_TURNOFF_DELAY_US); + rval = ipu_lib_call_notrace(device_release, isys, 0); + timeout--; + } while (rval != 0 && timeout); + + /* Spin lock to wait the interrupt handler to be finished */ + spin_lock_irqsave(&isys->power_lock, flags); + if (!rval) + isys->fwcom = NULL; /* No further actions needed */ + else + dev_err(dev, "Device release time out %d\n", rval); + spin_unlock_irqrestore(&isys->power_lock, flags); + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_close); + +int ipu_fw_isys_init(struct ipu_isys *isys, + unsigned int num_streams) +{ + int retry = IPU_ISYS_OPEN_RETRY; + unsigned int i; + + struct ia_css_isys_device_cfg_data isys_cfg = { + .driver_sys = { + .ssid = ISYS_SSID, + .mmid = ISYS_MMID, + .num_send_queues = clamp_t( + unsigned int, num_streams, 1, + IPU_ISYS_NUM_STREAMS), + .num_recv_queues = IPU_ISYS_NUM_RECV_QUEUE, + .send_queue_size = IPU_ISYS_SIZE_SEND_QUEUE, + .recv_queue_size = IPU_ISYS_SIZE_RECV_QUEUE, + .icache_prefetch = isys->icache_prefetch, + }, + }; + struct device *dev = &isys->adev->dev; + int rval; + + if (!wrapper_init_done) { + wrapper_init_done = true; + ipu_wrapper_init(ISYS_MMID, &isys->adev->dev, + isys->pdata->base); + } + + /* + * SRAM partitioning. Initially equal partitioning is set + * TODO: Fine tune the partitining based on the stream pixel load + */ + for (i = 0; i < min(IPU_NOF_SRAM_BLOCKS_MAX, + NOF_SRAM_BLOCKS_MAX); i++) { + if (i < isys_cfg.driver_sys.num_send_queues) + isys_cfg.buffer_partition.num_gda_pages[i] = + (IPU_DEVICE_GDA_NR_PAGES * + IPU_DEVICE_GDA_VIRT_FACTOR) / + isys_cfg.driver_sys.num_send_queues; + else + isys_cfg.buffer_partition.num_gda_pages[i] = 0; + } + + rval = -ia_css_isys_device_open(&isys->fwcom, &isys_cfg); + if (rval < 0) { + dev_err(dev, "isys device open failed %d\n", rval); + return rval; + } + + do { + usleep_range(IPU_ISYS_OPEN_TIMEOUT_US, + IPU_ISYS_OPEN_TIMEOUT_US + 10); + rval = ipu_lib_call(device_open_ready, isys); + if (!rval) + break; + retry--; + } while (retry > 0); + + if (!retry && rval) { + dev_err(dev, "isys device open ready failed %d\n", rval); + ipu_fw_isys_close(isys); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_init); + +void ipu_fw_isys_cleanup(struct ipu_isys *isys) +{ + ipu_lib_call(device_release, isys, 1); + isys->fwcom = NULL; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_cleanup); + +struct ipu_fw_isys_resp_info_abi *ipu_fw_isys_get_resp( + void *context, unsigned int queue, + struct ipu_fw_isys_resp_info_abi *response) +{ + struct ia_css_isys_resp_info apiresp; + int rval; + + rval = -ia_css_isys_stream_handle_response(context, &apiresp); + if (rval < 0) + return NULL; + + response->buf_id = 0; + response->type = apiresp.type; + response->timestamp[0] = apiresp.timestamp[0]; + response->timestamp[1] = apiresp.timestamp[1]; + response->stream_handle = apiresp.stream_handle; + response->error_info.error = apiresp.error; + response->error_info.error_details = apiresp.error_details; + response->pin.out_buf_id = apiresp.pin.out_buf_id; + response->pin.addr = apiresp.pin.addr; + response->pin_id = apiresp.pin_id; + response->process_group_light.param_buf_id = + apiresp.process_group_light.param_buf_id; + response->process_group_light.addr = + apiresp.process_group_light.addr; + response->acc_id = apiresp.acc_id; +#ifdef IPU_OTF_SUPPORT + response->frame_counter = apiresp.frame_counter; + response->written_direct = apiresp.written_direct; +#endif + + return response; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_get_resp); + +void ipu_fw_isys_put_resp(void *context, unsigned int queue) +{ + /* Nothing to do here really */ +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_put_resp); + +int ipu_fw_isys_simple_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + enum ipu_fw_isys_send_type send_type) +{ + int rval = -1; + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_START: + rval = ipu_lib_call(stream_start, isys, stream_handle, NULL); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH: + rval = ipu_lib_call(stream_flush, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_STOP: + rval = ipu_lib_call(stream_stop, isys, stream_handle); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE: + rval = ipu_lib_call(stream_close, isys, stream_handle); + break; + default: + WARN_ON(1); + } + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_simple_cmd); + +static void resolution_abi_to_api(const struct ipu_fw_isys_resolution_abi *abi, + struct ia_css_isys_resolution *api) +{ + api->width = abi->width; + api->height = abi->height; +} + +static void output_pin_payload_abi_to_api( + struct ipu_fw_isys_output_pin_payload_abi *abi, + struct ia_css_isys_output_pin_payload *api) +{ + api->out_buf_id = abi->out_buf_id; + api->addr = abi->addr; +} + +static void output_pin_info_abi_to_api( + struct ipu_fw_isys_output_pin_info_abi *abi, + struct ia_css_isys_output_pin_info *api) +{ + api->input_pin_id = abi->input_pin_id; + resolution_abi_to_api(&abi->output_res, &api->output_res); + api->stride = abi->stride; + api->pt = abi->pt; + api->watermark_in_lines = abi->watermark_in_lines; + api->payload_buf_size = abi->payload_buf_size; + api->send_irq = abi->send_irq; + api->ft = abi->ft; +#ifdef IPU_OTF_SUPPORT + api->link_id = abi->link_id; +#endif + api->reserve_compression = abi->reserve_compression; +} + +static void param_pin_abi_to_api(struct ipu_fw_isys_param_pin_abi *abi, + struct ia_css_isys_param_pin *api) +{ + api->param_buf_id = abi->param_buf_id; + api->addr = abi->addr; +} + +static void input_pin_info_abi_to_api( + struct ipu_fw_isys_input_pin_info_abi *abi, + struct ia_css_isys_input_pin_info *api) +{ + resolution_abi_to_api(&abi->input_res, &api->input_res); + api->dt = abi->dt; + api->mipi_store_mode = abi->mipi_store_mode; + api->mapped_dt = abi->mapped_dt; +} + +static void isa_cfg_abi_to_api(const struct ipu_fw_isys_isa_cfg_abi *abi, + struct ia_css_isys_isa_cfg *api) +{ + unsigned int i; + + for (i = 0; i < min(N_IPU_FW_ISYS_RESOLUTION_INFO, + N_IA_CSS_ISYS_RESOLUTION_INFO); i++) + resolution_abi_to_api(&abi->isa_res[i], &api->isa_res[i]); + + api->blc_enabled = abi->cfg.blc; + api->lsc_enabled = abi->cfg.lsc; + api->dpc_enabled = abi->cfg.dpc; + api->downscaler_enabled = abi->cfg.downscaler; + api->awb_enabled = abi->cfg.awb; + api->af_enabled = abi->cfg.af; + api->ae_enabled = abi->cfg.ae; + api->paf_type = abi->cfg.paf; + api->send_irq_stats_ready = abi->cfg.send_irq_stats_ready; + api->send_resp_stats_ready = abi->cfg.send_irq_stats_ready; +} + +static void cropping_abi_to_api(struct ipu_fw_isys_cropping_abi *abi, + struct ia_css_isys_cropping *api) +{ + api->top_offset = abi->top_offset; + api->left_offset = abi->left_offset; + api->bottom_offset = abi->bottom_offset; + api->right_offset = abi->right_offset; +} + +static void stream_cfg_abi_to_api(struct ipu_fw_isys_stream_cfg_data_abi *abi, + struct ia_css_isys_stream_cfg_data *api) +{ + unsigned int i; + + api->src = abi->src; + api->vc = abi->vc; + api->isl_use = abi->isl_use; + api->compfmt = abi->compfmt; + isa_cfg_abi_to_api(&abi->isa_cfg, &api->isa_cfg); + for (i = 0; i < min(N_IPU_FW_ISYS_CROPPING_LOCATION, + N_IA_CSS_ISYS_CROPPING_LOCATION); i++) + cropping_abi_to_api(&abi->crop[i], &api->crop[i]); + + api->send_irq_sof_discarded = abi->send_irq_sof_discarded; + api->send_irq_eof_discarded = abi->send_irq_eof_discarded; + api->send_resp_sof_discarded = abi->send_irq_sof_discarded; + api->send_resp_eof_discarded = abi->send_irq_eof_discarded; + api->nof_input_pins = abi->nof_input_pins; + api->nof_output_pins = abi->nof_output_pins; + for (i = 0; i < abi->nof_input_pins; i++) + input_pin_info_abi_to_api(&abi->input_pins[i], + &api->input_pins[i]); + + for (i = 0; i < abi->nof_output_pins; i++) + output_pin_info_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); +} + +static void frame_buff_set_abi_to_api( + struct ipu_fw_isys_frame_buff_set_abi *abi, + struct ia_css_isys_frame_buff_set *api) +{ + int i; + + for (i = 0; i < min(IPU_MAX_OPINS, MAX_OPINS); i++) + output_pin_payload_abi_to_api(&abi->output_pins[i], + &api->output_pins[i]); + + param_pin_abi_to_api(&abi->process_group_light, + &api->process_group_light); + + api->send_irq_sof = abi->send_irq_sof; + api->send_irq_eof = abi->send_irq_eof; + api->send_irq_capture_ack = abi->send_irq_capture_ack; + api->send_irq_capture_done = abi->send_irq_capture_done; +} + +int ipu_fw_isys_complex_cmd(struct ipu_isys *isys, + const unsigned int stream_handle, + void *cpu_mapped_buf, + dma_addr_t dma_mapped_buf, + size_t size, + enum ipu_fw_isys_send_type send_type) +{ + union { + struct ia_css_isys_stream_cfg_data stream_cfg; + struct ia_css_isys_frame_buff_set buf; + } param; + int rval = -1; + + memset(¶m, 0, sizeof(param)); + + switch (send_type) { + case IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_capture_indication, + isys, stream_handle, ¶m.buf); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN: + stream_cfg_abi_to_api(cpu_mapped_buf, ¶m.stream_cfg); + rval = ipu_lib_call(stream_open, isys, stream_handle, + ¶m.stream_cfg); + break; + case IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE: + frame_buff_set_abi_to_api(cpu_mapped_buf, ¶m.buf); + rval = ipu_lib_call(stream_start, isys, stream_handle, + ¶m.buf); + break; + default: + WARN_ON(1); + } + + return rval; +} +EXPORT_SYMBOL_GPL(ipu_fw_isys_complex_cmd); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel ipu library"); diff --git a/drivers/media/pci/intel/ipu4/ipu4p-isys-csi2.c b/drivers/media/pci/intel/ipu4/ipu4p-isys-csi2.c new file mode 100644 index 000000000000..0ced354effe8 --- /dev/null +++ b/drivers/media/pci/intel/ipu4/ipu4p-isys-csi2.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include "ipu.h" +#include "ipu-buttress.h" +#include "ipu-isys.h" +#include "ipu-isys-csi2.h" +#include "ipu-platform-isys-csi2-reg.h" +#include "ipu-platform-regs.h" +#include "ipu-trace.h" +#include "ipu-isys-csi2.h" + +#define CSI2_UPDATE_TIME_TRY_NUM 3 +#define CSI2_UPDATE_TIME_MAX_DIFF 20 + +static int ipu4p_csi2_ev_correction_params(struct ipu_isys_csi2 + *csi2, unsigned int lanes) +{ + /* + * TBD: add implementation for ipu4p + * probably re-use ipu4 implementation + */ + return 0; +} + +static void ipu4p_isys_register_errors(struct ipu_isys_csi2 *csi2) +{ + u32 status; + unsigned int index; + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + + index = csi2->index; + status = readl(isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(index) + 0x8); + writel(status, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(index) + 0xc); + + status &= 0xffff; + dev_dbg(&isys->adev->dev, "csi %d rxsync status 0x%x", index, status); + csi2->receiver_errors |= status; +} + +void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2) +{ + /* + * Strings corresponding to CSI-2 receiver errors are here. + * Corresponding macros are defined in the header file. + */ + static const struct ipu_isys_csi2_error { + const char *error_string; + bool is_info_only; + } errors[] = { + {"Single packet header error corrected", true}, + {"Multiple packet header errors detected", true}, + {"Payload checksum (CRC) error", true}, + {"FIFO overflow", false}, + {"Reserved short packet data type detected", true}, + {"Reserved long packet data type detected", true}, + {"Incomplete long packet detected", false}, + {"Frame sync error", false}, + {"Line sync error", false}, + {"DPHY recoverable synchronization error", true}, + {"DPHY non-recoverable synchronization error", false}, + {"Escape mode error", true}, + {"Escape mode trigger event", true}, + {"Escape mode ultra-low power state for data lane(s)", true}, + {"Escape mode ultra-low power state exit for clock lane", true}, + {"Inter-frame short packet discarded", true}, + {"Inter-frame long packet discarded", true}, + }; + u32 status; + unsigned int i; + + /* Register errors once more in case of error interrupts are disabled */ + ipu4p_isys_register_errors(csi2); + status = csi2->receiver_errors; + csi2->receiver_errors = 0; + + for (i = 0; i < ARRAY_SIZE(errors); i++) { + if (status & BIT(i)) { + if (errors[i].is_info_only) + dev_dbg(&csi2->isys->adev->dev, + "csi2-%i info: %s\n", + csi2->index, errors[i].error_string); + else + dev_err_ratelimited(&csi2->isys->adev->dev, + "csi2-%i error: %s\n", + csi2->index, + errors[i].error_string); + } + } +} + +int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd, + struct ipu_isys_csi2_timing timing, + unsigned int nlanes, int enable) +{ + struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd); + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + unsigned int i; + u32 val, csi2part = 0; + + dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable); + if (!enable) { + ipu_isys_csi2_error(csi2); + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val &= ~(CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11); + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(0, csi2->base + CSI2_REG_CSI_RX_ENABLE); + + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x4); + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + + 0x10); + writel + (0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x4); + writel + (0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x10); + return 0; + } + + ipu4p_csi2_ev_correction_params(csi2, nlanes); + + writel(timing.ctermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_CLANE); + writel(timing.csettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_CLANE); + + for (i = 0; i < nlanes; i++) { + writel + (timing.dtermen, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_TERMEN_DLANE(i)); + writel + (timing.dsettle, + csi2->base + CSI2_REG_CSI_RX_DLY_CNT_SETTLE_DLANE(i)); + } + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + val |= CSI2_CSI_RX_CONFIG_DISABLE_BYTE_CLK_GATING | + CSI2_CSI_RX_CONFIG_RELEASE_LP11; + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + writel(nlanes, csi2->base + CSI2_REG_CSI_RX_NOF_ENABLED_LANES); + writel(CSI2_CSI_RX_ENABLE_ENABLE, + csi2->base + CSI2_REG_CSI_RX_ENABLE); + + /* SOF of VC0-VC3 enabled from CSI2PART register in B0 */ + for (i = 0; i < NR_OF_CSI2_VC; i++) + csi2part |= CSI2_IRQ_FS_VC(i) | CSI2_IRQ_FE_VC(i); + + /* Enable csi2 receiver error interrupts */ + writel(1, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index)); + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x14); + writel(0xffffffff, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0xc); + writel(1, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x4); + writel(1, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(csi2->index) + 0x10); + + csi2part |= 0xffff; + writel(csi2part, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index)); + writel(0, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x14); + writel(0xffffffff, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0xc); + writel(csi2part, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x4); + writel(csi2part, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(csi2->index) + 0x10); + + return 0; +} + +void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2) +{ + u32 status = 0; + unsigned int i, bus; + struct ipu_isys *isys = csi2->isys; + void __iomem *isys_base = isys->pdata->base; + + bus = csi2->index; + /* handle ctrl and ctrl0 irq */ + status = readl(isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(bus) + 0x8); + writel(status, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL_BASE(bus) + 0xc); + dev_dbg(&isys->adev->dev, "csi %d irq_ctrl status 0x%x", bus, status); + + if (!(status & BIT(0))) + return; + + status = readl(isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(bus) + 0x8); + writel(status, isys_base + + IPU_REG_ISYS_CSI_IRQ_CTRL0_BASE(bus) + 0xc); + dev_dbg(&isys->adev->dev, "csi %d irq_ctrl0 status 0x%x", bus, status); + /* register the csi sync error */ + csi2->receiver_errors |= status & 0xffff; + /* handle sof and eof event */ + for (i = 0; i < NR_OF_CSI2_VC; i++) { + if (status & CSI2_IRQ_FS_VC(i)) + ipu_isys_csi2_sof_event(csi2, i); + + if (status & CSI2_IRQ_FE_VC(i)) + ipu_isys_csi2_eof_event(csi2, i); + } +} + +static u64 tunit_time_to_us(struct ipu_isys *isys, u64 time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 1000000; + + do_div(time, isys_clk); + + return time; +} + +static u64 tsc_time_to_tunit_time(struct ipu_isys *isys, + u64 tsc_base, u64 tunit_base, u64 tsc_time) +{ + struct ipu_bus_device *adev = to_ipu_bus_device(isys->adev->iommu); + u64 isys_clk = IS_FREQ_SOURCE / adev->ctrl->divisor / 100000; + u64 tsc_clk = IPU_BUTTRESS_TSC_CLK / 100000; + u64 tunit_time; + + tunit_time = (tsc_time - tsc_base) * isys_clk; + do_div(tunit_time, tsc_clk); + + return tunit_time + tunit_base; +} + +/* Extract the timestamp from trace message. + * The timestamp in the traces message contains two parts. + * The lower part contains bit0 ~ 15 of the total 64bit timestamp. + * The higher part contains bit14 ~ 63 of the 64bit timestamp. + * These two parts are sampled at different time. + * Two overlaped bits are used to identify if there's roll overs + * in the lower part during the two samples. + * If the two overlapped bits do not match, a fix is needed to + * handle the roll over. + */ +static u64 extract_time_from_short_packet_msg(struct + ipu_isys_csi2_monitor_message + *msg) +{ + u64 time_h = msg->timestamp_h << 14; + u64 time_l = msg->timestamp_l; + u64 time_h_ovl = time_h & 0xc000; + u64 time_h_h = time_h & (~0xffff); + + /* Fix possible roll overs. */ + if (time_h_ovl >= (time_l & 0xc000)) + return time_h_h | time_l; + else + return (time_h_h - 0x10000) | time_l; +} + +unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip, + unsigned int *timestamp) +{ + struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip); + struct ipu_isys *isys = av->isys; + unsigned int field = V4L2_FIELD_TOP; + + /* + * Find the nearest message that has matched msg type, + * port id, virtual channel and packet type. + */ + unsigned int i = ip->short_packet_trace_index; + bool msg_matched = false; + unsigned int monitor_id; + + if (ip->csi2->index >= IPU_ISYS_MAX_CSI2_LEGACY_PORTS) + monitor_id = TRACE_REG_CSI2_3PH_TM_MONITOR_ID; + else + monitor_id = TRACE_REG_CSI2_TM_MONITOR_ID; + + dma_sync_single_for_cpu(&isys->adev->dev, + isys->short_packet_trace_buffer_dma_addr, + IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE, + DMA_BIDIRECTIONAL); + + do { + struct ipu_isys_csi2_monitor_message msg = + isys->short_packet_trace_buffer[i]; + u64 sof_time = tsc_time_to_tunit_time(isys, + isys->tsc_timer_base, + isys->tunit_timer_base, + (((u64) timestamp[1]) << + 32) | timestamp[0]); + u64 trace_time = extract_time_from_short_packet_msg(&msg); + u64 delta_time_us = tunit_time_to_us(isys, + (sof_time > trace_time) ? + sof_time - trace_time : + trace_time - sof_time); + + i = (i + 1) % IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER; + + if (msg.cmd == TRACE_REG_CMD_TYPE_D64MTS && + msg.monitor_id == monitor_id && + msg.fs == 1 && + msg.port == ip->csi2->index && + msg.vc == ip->vc && + delta_time_us < IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT) { + field = (msg.sequence % 2) ? + V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; + ip->short_packet_trace_index = i; + msg_matched = true; + dev_dbg(&isys->adev->dev, + "Interlaced field ready. field = %d\n", field); + break; + } + } while (i != ip->short_packet_trace_index); + if (!msg_matched) + /* We have walked through the whole buffer. */ + dev_dbg(&isys->adev->dev, "No matched trace message found.\n"); + + return field; +} + +bool ipu_isys_csi2_skew_cal_required(struct ipu_isys_csi2 *csi2) +{ + __s64 link_freq; + int rval; + + if (!csi2) + return false; + + /* Not yet ? */ + if (csi2->remote_streams != csi2->stream_count) + return false; + + rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq); + if (rval) + return false; + + if (link_freq <= IPU_SKEW_CAL_LIMIT_HZ) + return false; + + return true; +} + +int ipu_isys_csi2_set_skew_cal(struct ipu_isys_csi2 *csi2, int enable) +{ + u32 val; + + val = readl(csi2->base + CSI2_REG_CSI_RX_CONFIG); + + if (enable) + val |= CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + else + val &= ~CSI2_CSI_RX_CONFIG_SKEWCAL_ENABLE; + + writel(val, csi2->base + CSI2_REG_CSI_RX_CONFIG); + + return 0; +} diff --git a/drivers/media/pci/intel/virtio/Makefile b/drivers/media/pci/intel/virtio/Makefile new file mode 100644 index 000000000000..0f4eab5addfc --- /dev/null +++ b/drivers/media/pci/intel/virtio/Makefile @@ -0,0 +1,10 @@ +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +IPU_STEP = bxtB0 + +include $(srcpath)/$(src)/Makefile.virt + +ccflags-y += -I$(srcpath)/$(src)/../../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../ diff --git a/drivers/media/pci/intel/virtio/Makefile.virt b/drivers/media/pci/intel/virtio/Makefile.virt new file mode 100644 index 000000000000..c3c30c4bf921 --- /dev/null +++ b/drivers/media/pci/intel/virtio/Makefile.virt @@ -0,0 +1,22 @@ +ifndef IPU_STEP + $(error No IPU_STEP was defined. Stopping.) +endif + +TARGET_MODULE:=intel-ipu-virt-$(IPU_STEP) + +$(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-common.o + + +ifdef CONFIG_VIDEO_INTEL_IPU_VIRTIO_BE + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-pipeline.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-bridge.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-be-stream.o +else + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-pipeline.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe-payload.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-virtio-fe.o + $(TARGET_MODULE)-objs += ../virtio/intel-ipu4-para-virt-drv.o +endif + +obj-$(CONFIG_VIDEO_INTEL_IPU_ACRN) := $(TARGET_MODULE).o diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c new file mode 100644 index 000000000000..fda27f4f21b4 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.c @@ -0,0 +1,1330 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-para-virt-drv.h" +#include "intel-ipu4-virtio-fe-pipeline.h" +#include "intel-ipu4-virtio-fe-payload.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-pipeline-device.h" + + +static dev_t virt_pipeline_dev_t; +static struct class *virt_pipeline_class; +static struct ici_isys_pipeline_device *pipeline_dev; + +static dev_t virt_stream_dev_t; +static struct class *virt_stream_class; +static int virt_stream_devs_registered; +static int stream_dev_init; + +static struct ipu4_virtio_ctx *g_fe_priv; + +struct mutex fop_mutex; + +#ifdef CONFIG_COMPAT +struct timeval32 { + __u32 tv_sec; + __u32 tv_usec; +} __attribute__((__packed__)); + +struct ici_frame_plane32 { + __u32 bytes_used; + __u32 length; + union { + compat_uptr_t userptr; + __s32 dmafd; + } mem; + __u32 data_offset; + __u32 reserved[2]; +} __attribute__((__packed__)); + +struct ici_frame_info32 { + __u32 frame_type; + __u32 field; + __u32 flag; + __u32 frame_buf_id; + struct timeval32 frame_timestamp; + __u32 frame_sequence_id; + __u32 mem_type; /* _DMA or _USER_PTR */ + struct ici_frame_plane32 frame_planes[ICI_MAX_PLANES]; /* multi-planar */ + __u32 num_planes; /* =1 single-planar > 1 multi-planar array size */ + __u32 reserved[2]; +} __attribute__((__packed__)); + +#define ICI_IOC_GET_BUF32 _IOWR(MAJOR_STREAM, 3, struct ici_frame_info32) +#define ICI_IOC_PUT_BUF32 _IOWR(MAJOR_STREAM, 4, struct ici_frame_info32) + +static void copy_from_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + get_user(kp->frame_type, &up->frame_type); + get_user(kp->field, &up->field); + get_user(kp->flag, &up->flag); + get_user(kp->frame_buf_id, &up->frame_buf_id); + get_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + get_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + get_user(kp->frame_sequence_id, &up->frame_sequence_id); + get_user(kp->mem_type, &up->mem_type); + get_user(kp->num_planes, &up->num_planes); + for (i = 0; i < kp->num_planes; i++) { + get_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + get_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if (kp->mem_type == ICI_MEM_USERPTR) { + get_user(userptr, &up->frame_planes[i].mem.userptr); + kp->frame_planes[i].mem.userptr = (unsigned long) compat_ptr(userptr); + } else if (kp->mem_type == ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + }; + get_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} + +static void copy_to_user_frame_info32(struct ici_frame_info *kp, struct ici_frame_info32 __user *up) +{ + int i; + compat_uptr_t userptr; + + put_user(kp->frame_type, &up->frame_type); + put_user(kp->field, &up->field); + put_user(kp->flag, &up->flag); + put_user(kp->frame_buf_id, &up->frame_buf_id); + put_user(kp->frame_timestamp.tv_sec, &up->frame_timestamp.tv_sec); + put_user(kp->frame_timestamp.tv_usec, &up->frame_timestamp.tv_usec); + put_user(kp->frame_sequence_id, &up->frame_sequence_id); + put_user(kp->mem_type, &up->mem_type); + put_user(kp->num_planes, &up->num_planes); + for (i = 0; i < kp->num_planes; i++) { + put_user(kp->frame_planes[i].bytes_used, &up->frame_planes[i].bytes_used); + put_user(kp->frame_planes[i].length, &up->frame_planes[i].length); + if (kp->mem_type == ICI_MEM_USERPTR) { + userptr = (unsigned long)compat_ptr(kp->frame_planes[i].mem.userptr); + put_user(userptr, &up->frame_planes[i].mem.userptr); + } else if (kp->mem_type == ICI_MEM_DMABUF) { + get_user(kp->frame_planes[i].mem.dmafd, &up->frame_planes[i].mem.dmafd); + } + put_user(kp->frame_planes[i].data_offset, &up->frame_planes[i].data_offset); + } +} +#endif + +static int get_userpages(struct device *dev, struct ici_frame_plane *frame_plane, + struct ici_kframe_plane *kframe_plane) +{ + unsigned long start, end, addr; + int npages, array_size; + struct page **pages; + int nr = 0; + int ret = 0; + struct sg_table *sgt; + unsigned int i; + u64 page_table_ref; + u64 *page_table; + addr = (unsigned long)frame_plane->mem.userptr; + start = addr & PAGE_MASK; + end = PAGE_ALIGN(addr + frame_plane->length); + npages = (end - start) >> PAGE_SHIFT; + array_size = npages * sizeof(struct page *); + + if (!npages) + return -EINVAL; + + page_table = kcalloc(npages, sizeof(*page_table), GFP_KERNEL); + if (!page_table) { + pr_err("Shared Page table for mediation failed\n"); + return -ENOMEM; + } + + pr_debug("%s:%d Number of Pages:%d frame_length:%d\n", __func__, __LINE__, npages, frame_plane->length); + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return -ENOMEM; + if (array_size <= PAGE_SIZE) + pages = kzalloc(array_size, GFP_KERNEL); + else + pages = vzalloc(array_size); + if (!pages) + return -ENOMEM; + + down_read(¤t->mm->mmap_sem); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + nr = get_user_pages(current, current->mm, + start, npages, 1, 0, pages, NULL); +#else + nr = get_user_pages(start, npages, FOLL_WRITE, pages, NULL); +#endif + if (nr < npages) + goto error_free_pages; + /* Share physical address of pages */ + for (i = 0; i < npages; i++) + page_table[i] = page_to_phys(pages[i]); + + pr_debug("UOS phy page add %lld offset:%ld\n", page_table[0], addr & ~PAGE_MASK); + page_table_ref = virt_to_phys(page_table); + kframe_plane->page_table_ref = page_table_ref; + kframe_plane->npages = npages; + up_read(¤t->mm->mmap_sem); + return ret; +error_free_pages: + if (pages) { + for (i = 0; i < nr; i++) + put_page(pages[i]); + } + kfree(sgt); + return -ENOMEM; +} + +static struct ici_frame_buf_wrapper *frame_buf_lookup(struct ici_isys_frame_buf_list *buf_list, struct ici_frame_info *user_frame_info) +{ + struct ici_frame_buf_wrapper *buf; + int i; + int mem_type = user_frame_info->mem_type; + + list_for_each_entry(buf, &buf_list->getbuf_list, uos_node) { + for (i = 0; i < user_frame_info->num_planes; i++) { + struct ici_frame_plane *new_plane = &user_frame_info->frame_planes[i]; + struct ici_frame_plane *cur_plane = &buf->frame_info.frame_planes[i]; + + if (buf->state != ICI_BUF_PREPARED && + buf->state != ICI_BUF_DONE) + continue; + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (new_plane->mem.userptr == cur_plane->mem.userptr) + return buf; + break; + case ICI_MEM_DMABUF: + if (new_plane->mem.dmafd == cur_plane->mem.dmafd) + return buf; + break; + } + //TODO: add multiplaner checks + } + } + return NULL; +} +static void put_userpages(struct ici_kframe_plane *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; + struct scatterlist *sgl; + unsigned int i; + struct mm_struct *mm = current->active_mm; + + if (!mm) { + pr_err("Failed to get active mm_struct ptr from current process.\n"); + return; + } + + down_read(&mm->mmap_sem); + for_each_sg(sgt->sgl, sgl, sgt->orig_nents, i) { + struct page *page = sg_page(sgl); + + unsigned int npages = PAGE_ALIGN(sgl->offset + sgl->length) >> PAGE_SHIFT; + unsigned int page_no; + + for (page_no = 0; page_no < npages; ++page_no, ++page) { + set_page_dirty_lock(page); + put_page(page); + } + } + + kfree(sgt); + kframe_plane->sgt = NULL; + + up_read(&mm->mmap_sem); +} + +static void put_dma(struct ici_kframe_plane *kframe_plane) +{ + struct sg_table *sgt = kframe_plane->sgt; + + if (WARN_ON(!kframe_plane->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (kframe_plane->kaddr) { + dma_buf_vunmap(kframe_plane->db_attach->dmabuf, + kframe_plane->kaddr); + kframe_plane->kaddr = NULL; + } + dma_buf_unmap_attachment(kframe_plane->db_attach, sgt, + DMA_BIDIRECTIONAL); + + kframe_plane->dma_addr = 0; + kframe_plane->sgt = NULL; + +} + +static int map_dma(struct device *dev, struct ici_frame_plane *frame_plane, + struct ici_kframe_plane *kframe_plane) +{ + + int ret = 0; + int fd = frame_plane->mem.dmafd; + + kframe_plane->dbdbuf = dma_buf_get(fd); + if (!kframe_plane->dbdbuf) { + ret = -EINVAL; + goto error; + } + + if (frame_plane->length == 0) + kframe_plane->length = kframe_plane->dbdbuf->size; + else + kframe_plane->length = frame_plane->length; + + kframe_plane->fd = fd; + kframe_plane->db_attach = dma_buf_attach(kframe_plane->dbdbuf, dev); + + if (IS_ERR(kframe_plane->db_attach)) { + ret = PTR_ERR(kframe_plane->db_attach); + goto error_put; + } + + kframe_plane->sgt = dma_buf_map_attachment(kframe_plane->db_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(kframe_plane->sgt)) { + ret = -EINVAL; + kframe_plane->sgt = NULL; + pr_err("map attachment failed\n"); + goto error_detach; + } + + kframe_plane->dma_addr = sg_dma_address(kframe_plane->sgt->sgl); + kframe_plane->kaddr = dma_buf_vmap(kframe_plane->dbdbuf); + + if (!kframe_plane->kaddr) { + ret = -EINVAL; + goto error_detach; + } + + pr_debug("MAPBUF: mapped fd %d\n", fd); + + return 0; + +error_detach: + dma_buf_detach(kframe_plane->dbdbuf, kframe_plane->db_attach); +error_put: + dma_buf_put(kframe_plane->dbdbuf); +error: + return ret; +} + +static void unmap_buf(struct ici_frame_buf_wrapper *buf) +{ + int i; + + for (i = 0; i < buf->frame_info.num_planes; i++) { + struct ici_kframe_plane *kframe_plane = + &buf->kframe_info.planes[i]; + switch (kframe_plane->mem_type) { + case ICI_MEM_USERPTR: + put_userpages(kframe_plane); + break; + case ICI_MEM_DMABUF: + put_dma(kframe_plane); + break; + default: + pr_debug("not supported memory type: %d\n", kframe_plane->mem_type); + break; + } + } +} +struct ici_frame_buf_wrapper *get_buf(struct virtual_stream *vstream, struct ici_frame_info *frame_info) +{ + int res; + unsigned i; + struct ici_frame_buf_wrapper *buf; + + struct ici_kframe_plane *kframe_plane; + struct ici_isys_frame_buf_list *buf_list = &vstream->buf_list; + int mem_type = frame_info->mem_type; + + if (mem_type != ICI_MEM_USERPTR && mem_type != ICI_MEM_DMABUF) { + pr_err("Memory type not supproted\n"); + return NULL; + } + + if (!frame_info->frame_planes[0].length) { + pr_err("User length not set\n"); + return NULL; + } + + buf = frame_buf_lookup(buf_list, frame_info); + if (buf) { + pr_debug("Frame buffer found in the list: %ld\n", buf->frame_info.frame_planes[0].mem.userptr); + buf->state = ICI_BUF_PREPARED; + return buf; + } + pr_debug("Creating new buffer in the list\n"); + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return NULL; + + buf->buf_id = frame_info->frame_buf_id; + buf->uos_buf_list = buf_list; + memcpy(&buf->frame_info, frame_info, sizeof(buf->frame_info)); + + switch (mem_type) { + case ICI_MEM_USERPTR: + if (!frame_info->frame_planes[0].mem.userptr) { + pr_err("User pointer not define\n"); + return NULL; + } + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = ICI_MEM_USERPTR; + res = get_userpages(&vstream->strm_dev.dev, &frame_info->frame_planes[i], + kframe_plane); + if (res) + return NULL; + } + break; + case ICI_MEM_DMABUF: + for (i = 0; i < frame_info->num_planes; i++) { + kframe_plane = &buf->kframe_info.planes[i]; + kframe_plane->mem_type = ICI_MEM_DMABUF; + res = map_dma(&vstream->strm_dev.dev, &frame_info->frame_planes[i], + kframe_plane); + if (res) + return NULL; + } + + break; + } + mutex_lock(&buf_list->mutex); + buf->state = ICI_BUF_PREPARED; + list_add_tail(&buf->uos_node, &buf_list->getbuf_list); + mutex_unlock(&buf_list->mutex); + return buf; +} + +//Call from Stream-OFF and if Stream-ON fails +void buf_stream_cancel(struct virtual_stream *vstream) +{ + struct ici_isys_frame_buf_list *buf_list = &vstream->buf_list; + struct ici_frame_buf_wrapper *buf; + struct ici_frame_buf_wrapper *next_buf; + + list_for_each_entry_safe(buf, next_buf, &buf_list->getbuf_list, node) { + list_del(&buf->node); + unmap_buf(buf); + } + list_for_each_entry_safe(buf, next_buf, &buf_list->putbuf_list, node) { + list_del(&buf->node); + unmap_buf(buf); + } +} + +static int virt_isys_set_format(struct file *file, void *fh, + struct ici_stream_format *sf) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + + pr_debug("Calling Set Format\n"); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + op[0] = vstream->virt_dev_id; + op[1] = 0; + + req->payload = virt_to_phys(sf); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_SET_FORMAT, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to open virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + + return rval; +} + +static int virt_isys_stream_on(struct file *file, void *fh) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + pr_debug("Calling Stream ON\n"); + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_STREAM_ON, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to open virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req && !fe_ctx) + return -ENOMEM; + + return rval; +} + +static int virt_isys_stream_off(struct file *file, void *fh) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + + pr_debug("Calling Stream OFF\n"); + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_STREAM_OFF, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to open virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + +// buf_stream_cancel(vstream); + + return rval; +} + +static int virt_isys_getbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + struct ici_frame_buf_wrapper *buf; + int rval = 0; + int op[3]; + + pr_debug("Calling Get Buffer\n"); + + buf = get_buf(vstream, user_frame_info); + if (!buf) { + dev_err(&strm_dev->dev, "Failed to map buffer: %d\n", rval); + return -ENOMEM; + } + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + op[2] = user_frame_info->mem_type; + req->payload = virt_to_phys(buf); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_GET_BUF, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to Get Buffer\n"); + kfree(req); + return rval; + } + kfree(req); + + return rval; +} + +static int virt_isys_putbuf(struct file *file, void *fh, + struct ici_frame_info *user_frame_info) +{ + struct ici_stream_device *strm_dev = fh; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ipu4_virtio_req *req; + int rval = 0; + int op[2]; + + pr_debug("Calling Put Buffer\n"); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + req->payload = virt_to_phys(user_frame_info); + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PUT_BUF, &op[0]); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + dev_err(&strm_dev->dev, "Failed to Get Buffer\n"); + kfree(req); + return rval; + } + kfree(req); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req && !fe_ctx) + return -ENOMEM; + + return rval; +} + +static unsigned int stream_fop_poll(struct file *file, struct ici_stream_device *dev) +{ + struct ipu4_virtio_req *req; + struct virtual_stream *vstream = dev_to_vstream(dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + struct ici_stream_device *strm_dev = file->private_data; + int rval = 0; + int op[2]; + + dev_dbg(&strm_dev->dev, "stream_fop_poll %d\n", vstream->virt_dev_id); + get_device(&dev->dev); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + op[0] = vstream->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_POLL, &op[0]); + + mutex_lock(&fop_mutex); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_1); + if (rval) { + mutex_unlock(&fop_mutex); + dev_err(&strm_dev->dev, "Failed to open virtual device\n"); + kfree(req); + return rval; + } + + mutex_unlock(&fop_mutex); + + rval = req->func_ret; + kfree(req); + + return rval; +} + +static int virt_stream_fop_open(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + struct ipu4_virtio_req *req; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + int rval = 0; + int op[3]; + dev_info(&strm_dev->dev, "virtual stream open\n"); + get_device(&strm_dev->dev); + + file->private_data = strm_dev; + + if (!fe_ctx) + return -EINVAL; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) { + dev_err(&strm_dev->dev, "Virtio Req buffer failed\n"); + return -ENOMEM; + } + + op[0] = vstream->virt_dev_id; + op[1] = 1; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_DEVICE_OPEN, &op[0]); + + mutex_lock(&fop_mutex); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_1); + if (rval) { + mutex_unlock(&fop_mutex); + dev_err(&strm_dev->dev, "Failed to open virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + + mutex_unlock(&fop_mutex); + + return rval; +} + +static int virt_stream_fop_release(struct inode *inode, struct file *file) +{ + struct ici_stream_device *strm_dev = inode_to_intel_ipu_stream_device(inode); + struct ipu4_virtio_req *req; + struct virtual_stream *vstream = dev_to_vstream(strm_dev); + struct ipu4_virtio_ctx *fe_ctx = vstream->ctx; + int rval = 0; + int op[2]; + dev_info(&strm_dev->dev, "IPU virtual stream close\n"); + put_device(&strm_dev->dev); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + op[0] = strm_dev->virt_dev_id; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_DEVICE_CLOSE, &op[0]); + + mutex_lock(&fop_mutex); + + rval = fe_ctx->bknd_ops->send_req(fe_ctx->domid, req, true, IPU_VIRTIO_QUEUE_1); + if (rval) { + mutex_unlock(&fop_mutex); + dev_err(&strm_dev->dev, "Failed to close virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + + mutex_unlock(&fop_mutex); + + return rval; +} + +static unsigned int virt_stream_fop_poll(struct file *file, + struct poll_table_struct *poll) +{ + struct ici_stream_device *as = file->private_data; + unsigned int res = POLLERR | POLLHUP; + + dev_dbg(&as->dev, "virt_stream_fop_poll for:%s\n", as->name); + + res = stream_fop_poll(file, as); + + //res = POLLIN; + + dev_dbg(&as->dev, "virt_stream_fop_poll res %u\n", res); + + return res; +} + +static long virt_stream_ioctl32(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union isys_ioctl_cmd_args { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + }; + void __user *up = compat_ptr(ioctl_arg); + union isys_ioctl_cmd_args *data = NULL; + int err = 0; + struct ici_stream_device *dev = file->private_data; + + mutex_lock(dev->mutex); + switch (ioctl_cmd) { + case ICI_IOC_STREAM_ON: + pr_debug("IPU FE IOCTL STREAM_ON\n"); + err = virt_isys_stream_on(file, dev); + break; + case ICI_IOC_STREAM_OFF: + pr_debug("IPU FE IOCTL STREAM_OFF\n"); + err = virt_isys_stream_off(file, dev); + break; + case ICI_IOC_GET_BUF32: + pr_debug("IPU FE IOCTL GET_BUF\n"); + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + copy_from_user_frame_info32(&data->frame_info, up); + err = virt_isys_getbuf(file, dev, &data->frame_info); + copy_to_user_frame_info32(&data->frame_info, up); + kfree(data); + if (err) { + mutex_unlock(dev->mutex); + return -EFAULT; + } + break; + case ICI_IOC_PUT_BUF32: + pr_debug("IPU FE IOCTL PUT_BUF\n"); + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + copy_from_user_frame_info32(&data->frame_info, up); + err = virt_isys_putbuf(file, dev, &data->frame_info); + copy_to_user_frame_info32(&data->frame_info, up); + kfree(data); + if (err) { + mutex_unlock(dev->mutex); + return -EFAULT; + } + break; + case ICI_IOC_SET_FORMAT: + pr_debug("IPU FE IOCTL SET_FORMAT\n"); + if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) { + mutex_unlock(dev->mutex); + return -ENOTTY; + } + + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + err = copy_from_user(data, up, _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + mutex_unlock(dev->mutex); + return -EFAULT; + } + err = virt_isys_set_format(file, dev, &data->sf); + err = copy_to_user(up, data, _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + mutex_unlock(dev->mutex); + return -EFAULT; + } + kfree(data); + break; + + default: + err = -ENOTTY; + break; + } + + mutex_unlock(dev->mutex); + + return 0; +} + +static long virt_stream_ioctl(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union isys_ioctl_cmd_args { + struct ici_frame_info frame_info; + struct ici_stream_format sf; + }; + int err = 0; + union isys_ioctl_cmd_args *data = NULL; + struct ici_stream_device *dev = file->private_data; + void __user *up = (void __user *)ioctl_arg; + + bool copy = (ioctl_cmd != ICI_IOC_STREAM_ON && + ioctl_cmd != ICI_IOC_STREAM_OFF); + + if (copy) { + if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) + return -ENOTTY; + + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(data, up, + _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + return -EFAULT; + } + } + } + + mutex_lock(dev->mutex); + switch (ioctl_cmd) { + case ICI_IOC_STREAM_ON: + err = virt_isys_stream_on(file, dev); + break; + case ICI_IOC_STREAM_OFF: + err = virt_isys_stream_off(file, dev); + break; + case ICI_IOC_GET_BUF: + err = virt_isys_getbuf(file, dev, &data->frame_info); + break; + case ICI_IOC_PUT_BUF: + err = virt_isys_putbuf(file, dev, &data->frame_info); + break; + case ICI_IOC_SET_FORMAT: + err = virt_isys_set_format(file, dev, &data->sf); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(dev->mutex); + + if (copy) { + err = copy_to_user(up, data, _IOC_SIZE(ioctl_cmd)); + kfree(data); + } + return 0; +} + + +static const struct file_operations virt_stream_fops = { + .owner = THIS_MODULE, + .open = virt_stream_fop_open, /* calls strm_dev->fops->open() */ + .unlocked_ioctl = virt_stream_ioctl, /* calls strm_dev->ipu_ioctl_ops->() */ +#ifdef CONFIG_COMPAT + .compat_ioctl = virt_stream_ioctl32, +#endif + .release = virt_stream_fop_release, /* calls strm_dev->fops->release() */ + .poll = virt_stream_fop_poll, /* calls strm_dev->fops->poll() */ +}; + +/* Called on device_unregister */ +static void base_device_release(struct device *sd) +{ +} + +int virt_frame_buf_init(struct ici_isys_frame_buf_list *buf_list) +{ + buf_list->drv_priv = NULL; + mutex_init(&buf_list->mutex); + spin_lock_init(&buf_list->lock); + spin_lock_init(&buf_list->short_packet_queue_lock); + INIT_LIST_HEAD(&buf_list->getbuf_list); + INIT_LIST_HEAD(&buf_list->putbuf_list); + INIT_LIST_HEAD(&buf_list->interlacebuf_list); + init_waitqueue_head(&buf_list->wait); + return 0; +} + +static int virt_ici_stream_init(struct virtual_stream *vstream, + struct ici_stream_device *strm_dev) +{ + int rval; + int num; + struct ipu4_virtio_ctx *fe_ctx; + + if (!stream_dev_init) { + virt_stream_dev_t = MKDEV(MAJOR_STREAM, 0); + + rval = register_chrdev_region(virt_stream_dev_t, + MAX_STREAM_DEVICES, ICI_STREAM_DEVICE_NAME); + if (rval) { + pr_err("can't register virt_ici stream chrdev region (%d)\n", rval); + return rval; + } + + virt_stream_class = class_create(THIS_MODULE, ICI_STREAM_DEVICE_NAME); + if (IS_ERR(virt_stream_class)) { + unregister_chrdev_region(virt_stream_dev_t, MAX_STREAM_DEVICES); + pr_err("Failed to register device class %s\n", ICI_STREAM_DEVICE_NAME); + return PTR_ERR(virt_stream_class); + } + stream_dev_init++; + } + + num = virt_stream_devs_registered; + strm_dev->minor = -1; + cdev_init(&strm_dev->cdev, &virt_stream_fops); + strm_dev->cdev.owner = virt_stream_fops.owner; + + rval = cdev_add(&strm_dev->cdev, MKDEV(MAJOR(virt_stream_dev_t), num), 1); + if (rval) { + pr_err("%s: failed to add cdevice\n", __func__); + return rval; + } + + strm_dev->dev.class = virt_stream_class; + strm_dev->dev.devt = MKDEV(MAJOR(virt_stream_dev_t), num); + dev_set_name(&strm_dev->dev, "%s%d", ICI_STREAM_DEVICE_NAME, num); + + rval = device_register(&strm_dev->dev); + if (rval < 0) { + pr_err("%s: device_register failed\n", __func__); + cdev_del(&strm_dev->cdev); + return rval; + } + strm_dev->dev.release = base_device_release; + strlcpy(strm_dev->name, strm_dev->dev.kobj.name, sizeof(strm_dev->name)); + strm_dev->minor = num; + vstream->virt_dev_id = num; + + virt_stream_devs_registered++; + + fe_ctx = kcalloc(1, sizeof(struct ipu4_virtio_ctx), + GFP_KERNEL); + + if (!fe_ctx) + return -ENOMEM; + + fe_ctx->bknd_ops = &ipu4_virtio_bknd_ops; + + if (fe_ctx->bknd_ops->init) { + rval = fe_ctx->bknd_ops->init(); + if (rval < 0) { + pr_err("failed to initialize backend.\n"); + return rval; + } + } + + fe_ctx->domid = fe_ctx->bknd_ops->get_vm_id(); + vstream->ctx = fe_ctx; + dev_dbg(&strm_dev->dev, "IPU FE registered with domid:%d\n", fe_ctx->domid); + + return 0; +} + +static void virt_ici_stream_exit(void) +{ + class_unregister(virt_stream_class); + unregister_chrdev_region(virt_stream_dev_t, MAX_STREAM_DEVICES); + + pr_notice("Virtual stream device unregistered\n"); +} + +static int virt_pipeline_fop_open(struct inode *inode, struct file *file) +{ + struct ici_isys_pipeline_device *dev = inode_to_ici_isys_pipeline_device(inode); + struct ipu4_virtio_req *req; + int rval = 0; + int op[2]; + pr_debug("virt pipeline open\n"); + get_device(&dev->dev); + + file->private_data = dev; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + op[0] = dev->minor; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PIPELINE_OPEN, &op[0]); + + rval = g_fe_priv->bknd_ops->send_req(g_fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("Failed to open virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + + return rval; +} + +static int virt_pipeline_fop_release(struct inode *inode, struct file *file) +{ + int rval = 0; + int op[2]; + struct ipu4_virtio_req *req; + + struct ici_isys_pipeline_device *pipe_dev = + inode_to_ici_isys_pipeline_device(inode); + + put_device(&pipe_dev->dev); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + op[0] = pipe_dev->minor; + op[1] = 0; + + intel_ipu4_virtio_create_req(req, IPU4_CMD_PIPELINE_CLOSE, &op[0]); + + rval = g_fe_priv->bknd_ops->send_req(g_fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_1); + if (rval) { + pr_err("Failed to close virtual device\n"); + kfree(req); + return rval; + } + kfree(req); + + return rval; +} + +static long virt_pipeline_ioctl_common(void __user *up, + struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + union isys_ioctl_cmd_args { + struct ici_node_desc node_desc; + struct ici_link_desc link; + struct ici_pad_framefmt pad_prop; + struct ici_pad_supported_format_desc + format_desc; + struct ici_links_query links_query; + struct ici_pad_selection pad_sel; + }; + int err = 0; + union isys_ioctl_cmd_args *data = NULL; + struct ici_isys_pipeline_device *dev = file->private_data; + + if (_IOC_SIZE(ioctl_cmd) > sizeof(union isys_ioctl_cmd_args)) + return -ENOTTY; + + data = (union isys_ioctl_cmd_args *) kzalloc(sizeof(union isys_ioctl_cmd_args), GFP_KERNEL); + if (_IOC_DIR(ioctl_cmd) & _IOC_WRITE) { + err = copy_from_user(data, up, + _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + return -EFAULT; + } + } + mutex_lock(&dev->mutex); + switch (ioctl_cmd) { + case ICI_IOC_ENUM_NODES: + err = process_pipeline(file, g_fe_priv, + (void *)&data->node_desc, IPU4_CMD_ENUM_NODES); + break; + case ICI_IOC_ENUM_LINKS: + pr_debug("virt_pipeline_ioctl: ICI_IOC_ENUM_LINKS\n"); + err = process_pipeline(file, g_fe_priv, (void *)&data->links_query, IPU4_CMD_ENUM_LINKS); + break; + case ICI_IOC_SETUP_PIPE: + pr_debug("virt_pipeline_ioctl: ICI_IOC_SETUP_PIPE\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->link, IPU4_CMD_SETUP_PIPE); + break; + case ICI_IOC_SET_FRAMEFMT: + pr_debug("virt_pipeline_ioctl: ICI_IOC_SET_FRAMEFMT\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_prop, IPU4_CMD_SET_FRAMEFMT); + break; + case ICI_IOC_GET_FRAMEFMT: + pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_FRAMEFMT\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_prop, IPU4_CMD_GET_FRAMEFMT); + break; + case ICI_IOC_GET_SUPPORTED_FRAMEFMT: + pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_SUPPORTED_FRAMEFMT\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->format_desc, IPU4_CMD_GET_SUPPORTED_FRAMEFMT); + break; + case ICI_IOC_SET_SELECTION: + pr_debug("virt_pipeline_ioctl: ICI_IOC_SET_SELECTION\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_sel, IPU4_CMD_SET_SELECTION); + break; + case ICI_IOC_GET_SELECTION: + pr_debug("virt_pipeline_ioctl: ICI_IOC_GET_SELECTION\n"); + err = process_pipeline(file, g_fe_priv, + (void *)&data->pad_sel, IPU4_CMD_GET_SELECTION); + break; + default: + err = -ENOTTY; + break; + } + + mutex_unlock(&dev->mutex); + if (err < 0) { + kfree(data); + return err; + } + + if (_IOC_DIR(ioctl_cmd) & _IOC_READ) { + err = copy_to_user(up, data, + _IOC_SIZE(ioctl_cmd)); + if (err) { + kfree(data); + return -EFAULT; + } + } + kfree(data); + + return 0; +} + +static long virt_pipeline_ioctl(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + void __user *up = (void __user *)ioctl_arg; + return virt_pipeline_ioctl_common(up, file, ioctl_cmd, ioctl_arg); +} + +static long virt_pipeline_ioctl32(struct file *file, unsigned int ioctl_cmd, + unsigned long ioctl_arg) +{ + void __user *up = compat_ptr(ioctl_arg); + return virt_pipeline_ioctl_common(up, file, ioctl_cmd, ioctl_arg); +} + +static const struct file_operations virt_pipeline_fops = { + .owner = THIS_MODULE, + .open = virt_pipeline_fop_open, + .unlocked_ioctl = virt_pipeline_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = virt_pipeline_ioctl32, +#endif + .release = virt_pipeline_fop_release, +}; + +static int virt_fe_init(void) +{ + int rval; + + g_fe_priv = kcalloc(1, sizeof(struct ipu4_virtio_ctx), + GFP_KERNEL); + + if (!g_fe_priv) + return -ENOMEM; + + g_fe_priv->bknd_ops = &ipu4_virtio_bknd_ops; + + if (g_fe_priv->bknd_ops->init) { + rval = g_fe_priv->bknd_ops->init(); + if (rval < 0) { + pr_err("failed to initialize backend.\n"); + return rval; + } + } + + g_fe_priv->domid = g_fe_priv->bknd_ops->get_vm_id(); + + pr_debug("FE registered with domid:%d\n", g_fe_priv->domid); + + return 0; +} + +static int virt_ici_pipeline_init(void) +{ + int rval; + pr_notice("Initializing pipeline\n"); + virt_pipeline_dev_t = MKDEV(MAJOR_PIPELINE, 0); + + rval = register_chrdev_region(virt_pipeline_dev_t, + MAX_PIPELINE_DEVICES, ICI_PIPELINE_DEVICE_NAME); + if (rval) { + pr_err("can't register virt_ici stream chrdev region (%d)\n", + rval); + return rval; + } + + virt_pipeline_class = class_create(THIS_MODULE, ICI_PIPELINE_DEVICE_NAME); + if (IS_ERR(virt_pipeline_class)) { + unregister_chrdev_region(virt_pipeline_dev_t, MAX_PIPELINE_DEVICES); + pr_err("Failed to register device class %s\n", ICI_PIPELINE_DEVICE_NAME); + return PTR_ERR(virt_pipeline_class); + } + + pipeline_dev = kzalloc(sizeof(*pipeline_dev), GFP_KERNEL); + if (!pipeline_dev) + return -ENOMEM; + pipeline_dev->minor = -1; + cdev_init(&pipeline_dev->cdev, &virt_pipeline_fops); + pipeline_dev->cdev.owner = virt_pipeline_fops.owner; + + rval = cdev_add(&pipeline_dev->cdev, MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE), 1); + if (rval) { + pr_err("%s: failed to add cdevice\n", __func__); + return rval; + } + + pipeline_dev->dev.class = virt_pipeline_class; + pipeline_dev->dev.devt = MKDEV(MAJOR_PIPELINE, MINOR_PIPELINE); + dev_set_name(&pipeline_dev->dev, "%s", ICI_PIPELINE_DEVICE_NAME); + + rval = device_register(&pipeline_dev->dev); + if (rval < 0) { + pr_err("%s: device_register failed\n", __func__); + cdev_del(&pipeline_dev->cdev); + return rval; + } + pipeline_dev->dev.release = base_device_release; + strlcpy(pipeline_dev->name, pipeline_dev->dev.kobj.name, sizeof(pipeline_dev->name)); + pipeline_dev->minor = MINOR_PIPELINE; + + return 0; +} + +static int __init virt_ici_init(void) +{ + struct virtual_stream *vstream; + int rval = 0, i; + pr_notice("Initializing IPU Para virtual driver\n"); + for (i = 0; i < MAX_ISYS_VIRT_STREAM; i++) { + + vstream = kzalloc(sizeof(*vstream), GFP_KERNEL); + if (!vstream) + return -ENOMEM; + mutex_init(&vstream->mutex); + mutex_init(&fop_mutex); + vstream->strm_dev.mutex = &vstream->mutex; + + rval = virt_frame_buf_init(&vstream->buf_list); + if (rval) + goto init_fail; + + dev_set_drvdata(&vstream->strm_dev.dev, vstream); + + mutex_lock(&vstream->mutex); + rval = virt_ici_stream_init(vstream, &vstream->strm_dev); + mutex_unlock(&vstream->mutex); + + if (rval) + goto init_fail; + } + + rval = virt_ici_pipeline_init(); + if (rval) + goto init_fail; + + rval = virt_fe_init(); + return rval; + +init_fail: + mutex_destroy(&vstream->mutex); + mutex_destroy(&fop_mutex); + kfree(vstream); + return rval; +} + +static void virt_ici_pipeline_exit(void) +{ + class_unregister(virt_pipeline_class); + unregister_chrdev_region(virt_pipeline_dev_t, MAX_PIPELINE_DEVICES); + if (pipeline_dev) + kfree((void *)pipeline_dev); + if (g_fe_priv) + kfree((void *)g_fe_priv); + + pr_notice("virt_ici pipeline device unregistered\n"); +} + +static void __exit virt_ici_exit(void) +{ + virt_ici_stream_exit(); + virt_ici_pipeline_exit(); +} + +module_init(virt_ici_init); +module_exit(virt_ici_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Intel IPU Para virtualize ici input system driver"); +MODULE_AUTHOR("Kushal Bandi "); + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h new file mode 100644 index 000000000000..f44954b03be2 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-para-virt-drv.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef INTEL_IPU4_PARA_VIRT_H +#define INTEL_IPU4_PARA_VIRT_H + +#include +#include +#include +#include +#include +#include +#include + +#include "./ici/ici-isys-stream-device.h" +#include "./ici/ici-isys-frame-buf.h" +#include "intel-ipu4-virtio-common.h" + +#define MAX_STREAM_DEVICES 64 +#define MAX_PIPELINE_DEVICES 1 +#define MAX_ISYS_VIRT_STREAM 34 + +struct virtual_stream { + struct mutex mutex; + struct ici_stream_device strm_dev; + int virt_dev_id; + int actual_fd; + struct ipu4_virtio_ctx *ctx; + struct ici_isys_frame_buf_list buf_list; +}; + + +#define dev_to_vstream(dev) \ + container_of(dev, struct virtual_stream, strm_dev) + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c new file mode 100644 index 000000000000..a43d5e8b2812 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-be-bridge.h" +#include "./ici/ici-isys-frame-buf.h" +#include "intel-ipu4-virtio-be-pipeline.h" +#include "intel-ipu4-virtio-be-stream.h" + +int intel_ipu4_virtio_msg_parse(int domid, struct ipu4_virtio_req *req) +{ + int ret = 0; + if (!req) { + pr_err("IPU mediator: request is NULL\n"); + return -EINVAL; + } + if ((req->cmd < IPU4_CMD_DEVICE_OPEN) || + (req->cmd >= IPU4_CMD_GET_N)) { + pr_err("IPU mediator: invalid command\n"); + return -EINVAL; + } + switch (req->cmd) { + case IPU4_CMD_POLL: + /* + * Open video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("POLL: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + req->stat = IPU4_REQ_NEEDS_FOLLOW_UP; + break; + case IPU4_CMD_DEVICE_OPEN: + /* + * Open video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("DEVICE_OPEN: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + ret = process_device_open(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_DEVICE_CLOSE: + /* + * Close video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("DEVICE_CLOSE: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + ret = process_device_close(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_STREAM_ON: + /* Start Stream + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("STREAM ON: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + ret = process_stream_on(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_STREAM_OFF: + /* Stop Stream + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + pr_debug("STREAM OFF: virtual_dev_id:%d actual_fd:%d\n", req->op[0], req->op[1]); + ret = process_stream_off(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_GET_BUF: + /* Set Format of a given video node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + * op2 - Memory Type 1: USER_PTR 2: DMA_PTR + * op3 - Number of planes + * op4 - Buffer ID + * op5 - Length of Buffer + */ + + ret = process_get_buf(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_PUT_BUF: + /* Set Format of a given video node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + * op2 - Memory Type 1: USER_PTR 2: DMA_PTR + */ + ret = process_put_buf(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_SET_FORMAT: + ret = process_set_format(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_PIPELINE_OPEN: + ret = process_pipeline_open(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_PIPELINE_CLOSE: + ret = process_pipeline_close(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_ENUM_NODES: + ret = process_enum_nodes(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_ENUM_LINKS: + ret = process_enum_links(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_SETUP_PIPE: + ret = process_setup_pipe(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_SET_FRAMEFMT: + ret = process_set_framefmt(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_GET_FRAMEFMT: + ret = process_get_framefmt(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_GET_SUPPORTED_FRAMEFMT: + ret = process_get_supported_framefmt(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_SET_SELECTION: + ret = process_pad_set_sel(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + case IPU4_CMD_GET_SELECTION: + ret = process_pad_get_sel(domid, req); + if (ret) + req->stat = IPU4_REQ_ERROR; + else + req->stat = IPU4_REQ_PROCESSED; + break; + default: + return -EINVAL; + } + + return ret; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h new file mode 100644 index 000000000000..25238f29bc33 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-bridge.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_BRIDGE__ +#define __IPU4_VIRTIO_BE_BRIDGE__ + +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" + +int intel_ipu4_virtio_msg_parse(int domid, struct ipu4_virtio_req *req); + +void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req, + enum intel_ipu4_virtio_command cmd, int *op); + +int intel_ipu4_virtio_msg_parse(int domid, struct ipu4_virtio_req *req); + + +#endif + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c new file mode 100644 index 000000000000..3adf5b4c9640 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include "intel-ipu4-virtio-be-pipeline.h" +#include "./ici/ici-isys-pipeline.h" +#include "./ici/ici-isys-pipeline-device.h" + +static struct file *pipeline; +static int guestID = -1; + +int process_pipeline_open(int domid, struct ipu4_virtio_req *req) +{ + if (guestID != -1 && guestID != domid) { + pr_err("%s: pipeline device already opened by other guest! %d %d", __func__, guestID, domid); + return -1; + } + + pr_info("process_device_open: /dev/intel_pipeline"); + pipeline = filp_open("/dev/intel_pipeline", O_RDWR | O_NONBLOCK, 0); + guestID = domid; + + return 0; +} + +int process_pipeline_close(int domid, struct ipu4_virtio_req *req) +{ + pr_info("%s: %d", __func__, req->op[0]); + + filp_close(pipeline, 0); + guestID = -1; + + return 0; +} + +int process_enum_nodes(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_node_desc *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_node_desc *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("process_enum_nodes: NULL host_virt"); + return 0; + } + + err = dev->pipeline_ioctl_ops->pipeline_enum_nodes(pipeline, dev, host_virt); + + return err; +} + +int process_enum_links(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_links_query *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_links_query *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pipeline_enum_links(pipeline, dev, host_virt); + + return err; +} +int process_get_supported_framefmt(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_pad_supported_format_desc *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_pad_supported_format_desc *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pad_get_supported_format(pipeline, dev, host_virt); + + return err; +} + +int process_set_framefmt(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_pad_framefmt *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_pad_framefmt *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pad_set_ffmt(pipeline, dev, host_virt); + + return err; +} + +int process_get_framefmt(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_pad_framefmt *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_pad_framefmt *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pad_get_ffmt(pipeline, dev, host_virt); + + return err; +} + +int process_setup_pipe(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_link_desc *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_link_desc *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pipeline_setup_pipe(pipeline, dev, host_virt); + + return err; +} + +int process_pad_set_sel(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_pad_selection *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_pad_selection *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pad_set_sel(pipeline, dev, host_virt); + + return err; +} + +int process_pad_get_sel(int domid, struct ipu4_virtio_req *req) +{ + int err = 0; + struct ici_isys_pipeline_device *dev = pipeline->private_data; + struct ici_pad_selection *host_virt; + + pr_debug("%s\n", __func__); + + host_virt = (struct ici_pad_selection *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("%s: NULL host_virt\n", __func__); + return 0; + } + err = dev->pipeline_ioctl_ops->pad_get_sel(pipeline, dev, host_virt); + + return err; +} + +/* + union isys_ioctl_cmd_args { + struct ici_node_desc node_desc; + struct ici_link_desc link; + struct ici_pad_framefmt pad_prop; + struct ici_pad_supported_format_desc + format_desc; + struct ici_links_query links_query; + struct ici_pad_selection pad_sel; + }; + + .pipeline_setup_pipe = ici_setup_link, + .pipeline_enum_nodes = pipeline_enum_nodes, + .pipeline_enum_links = pipeline_enum_links, + .pad_set_ffmt = ici_pipeline_set_ffmt, + .pad_get_ffmt = ici_pipeline_get_ffmt, + .pad_get_supported_format = + ici_pipeline_get_supported_format, + .pad_set_sel = ici_pipeline_set_sel, + .pad_get_sel = ici_pipeline_get_sel, + +*/ + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h new file mode 100644 index 000000000000..df65e88050ea --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-pipeline.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_PIPELINE__ +#define __IPU4_VIRTIO_BE_PIPELINE__ + +#include +#include + +#include "intel-ipu4-virtio-common.h" + +int process_pipeline_open(int domid, struct ipu4_virtio_req *req); +int process_pipeline_close(int domid, struct ipu4_virtio_req *req); +int process_enum_nodes(int domid, struct ipu4_virtio_req *req); +int process_enum_links(int domid, struct ipu4_virtio_req *req); +int process_get_supported_framefmt(int domid, struct ipu4_virtio_req *req); +int process_set_framefmt(int domid, struct ipu4_virtio_req *req); +int process_get_framefmt(int domid, struct ipu4_virtio_req *req); +int process_pad_set_sel(int domid, struct ipu4_virtio_req *req); +int process_pad_get_sel(int domid, struct ipu4_virtio_req *req); +int process_setup_pipe(int domid, struct ipu4_virtio_req *req); + +#endif + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c new file mode 100644 index 000000000000..5f8b9ac4aea1 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "./ici/ici-isys-stream-device.h" +#include "./ici/ici-isys-stream.h" +#include "./ici/ici-isys-frame-buf.h" +#include "intel-ipu4-virtio-be-stream.h" +#include "intel-ipu4-virtio-be.h" + +#define MAX_SIZE 6 // max 2^6 + +#define dev_to_stream(dev) \ + container_of(dev, struct ici_isys_stream, strm_dev) + +DECLARE_HASHTABLE(STREAM_NODE_HASH, MAX_SIZE); +static bool hash_initialised; + +struct stream_node { + int client_id; + struct file *f; + struct hlist_node node; +}; + +int frame_done_callback(void) +{ + notify_fe(); + return 0; +} + +int process_device_open(int domid, struct ipu4_virtio_req *req) +{ + char node_name[25]; + struct stream_node *sn = NULL; + + if (!hash_initialised) { + hash_init(STREAM_NODE_HASH); + hash_initialised = true; + } + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + if (sn->client_id != domid) { + pr_err("process_device_open: stream device %d already opened by other guest!", sn->client_id); + return -EBUSY; + } + pr_info("process_device_open: stream device %d already opened by client %d", req->op[0], domid); + return 0; + } + } + + sprintf(node_name, "/dev/intel_stream%d", req->op[0]); + pr_info("process_device_open: %s", node_name); + sn = kzalloc(sizeof(struct stream_node), GFP_KERNEL); + sn->f = filp_open(node_name, O_RDWR | O_NONBLOCK, 0); + sn->client_id = domid; + + hash_add(STREAM_NODE_HASH, &sn->node, req->op[0]); + + return 0; +} + +int process_device_close(int domid, struct ipu4_virtio_req *req) +{ + struct stream_node *sn = NULL; + if (!hash_initialised) + return 0; //no node has been opened, do nothing + + pr_info("process_device_close: %d", req->op[0]); + + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_device_close: %d closed", req->op[0]); + hash_del(&sn->node); + filp_close(sn->f, 0); + kfree(sn); + } + } + + return 0; +} + +int process_set_format(int domid, struct ipu4_virtio_req *req) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ici_stream_format *host_virt; + int err, found; + + pr_debug("process_set_format: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return -1; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_set_format: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return -1; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return -1; + } + + host_virt = (struct ici_stream_format *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("process_set_format: NULL host_virt"); + return -1; + } + + err = strm_dev->ipu_ioctl_ops->ici_set_format(sn->f, strm_dev, host_virt); + + if (err) + pr_err("intel_ipu4_pvirt: internal set fmt failed\n"); + + return 0; +} + +int process_poll(int domid, struct ipu4_virtio_req *req) +{ + return 0; +} + +int process_put_buf(int domid, struct ipu4_virtio_req *req) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ici_frame_info *host_virt; + int err, found; + + pr_debug("process_put_buf: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return -1; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_debug("process_put_buf: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return -1; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return -1; + } + + host_virt = (struct ici_frame_info *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (host_virt == NULL) { + pr_err("process_put_buf: NULL host_virt"); + return -1; + } + err = strm_dev->ipu_ioctl_ops->ici_put_buf(sn->f, strm_dev, host_virt); + + if (err) + pr_err("process_put_buf: ici_put_buf failed\n"); + + return 0; +} + +int process_get_buf(int domid, struct ipu4_virtio_req *req) +{ + struct stream_node *sn = NULL; + struct ici_frame_buf_wrapper *shared_buf; + struct ici_stream_device *strm_dev; + int k, i = 0; + void *pageaddr; + u64 *page_table = NULL; + struct page **data_pages = NULL; + int err, found; + + pr_debug("process_get_buf: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return -1; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_debug("process_get_buf: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return -1; + } + + pr_debug("GET_BUF: Mapping buffer\n"); + shared_buf = (struct ici_frame_buf_wrapper *)map_guest_phys(domid, req->payload, PAGE_SIZE); + if (!shared_buf) { + pr_err("SOS Failed to map Buffer from UserOS\n"); + req->stat = IPU4_REQ_ERROR; + } + data_pages = kcalloc(shared_buf->kframe_info.planes[0].npages, sizeof(struct page *), GFP_KERNEL); + if (data_pages == NULL) { + pr_err("SOS Failed alloc data page set\n"); + req->stat = IPU4_REQ_ERROR; + } + pr_debug("Total number of pages:%d\n", shared_buf->kframe_info.planes[0].npages); + + page_table = (u64 *)map_guest_phys(domid, shared_buf->kframe_info.planes[0].page_table_ref, PAGE_SIZE); + + if (page_table == NULL) { + pr_err("SOS Failed to map page table\n"); + req->stat = IPU4_REQ_ERROR; + kfree(data_pages); + return -1; + } + + else { + pr_debug("SOS first page %lld\n", page_table[0]); + k = 0; + for (i = 0; i < shared_buf->kframe_info.planes[0].npages; i++) { + pageaddr = map_guest_phys(domid, page_table[i], PAGE_SIZE); + if (pageaddr == NULL) { + pr_err("Cannot map pages from UOS\n"); + req->stat = IPU4_REQ_ERROR; + break; + } + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + kfree(data_pages); + return -1; + } + err = strm_dev->ipu_ioctl_ops->ici_get_buf_virt(sn->f, strm_dev, shared_buf, data_pages); + + if (err) + pr_err("process_get_buf: ici_get_buf_virt failed\n"); + + kfree(data_pages); + return 0; +} + +int process_stream_on(int domid, struct ipu4_virtio_req *req) +{ + struct stream_node *sn = NULL; + struct ici_isys_stream *as; + struct ici_stream_device *strm_dev; + int err, found; + + pr_debug("process_stream_on: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return -1; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_stream_on: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return -1; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return -1; + } + + as = dev_to_stream(strm_dev); + as->frame_done_notify_queue = frame_done_callback; + + err = strm_dev->ipu_ioctl_ops->ici_stream_on(sn->f, strm_dev); + + if (err) + pr_err("process_stream_on: stream on failed\n"); + + return 0; +} + +int process_stream_off(int domid, struct ipu4_virtio_req *req) +{ + struct stream_node *sn = NULL; + struct ici_stream_device *strm_dev; + struct ici_isys_stream *as; + int err, found; + + pr_debug("process_stream_off: %d %d", hash_initialised, req->op[0]); + + if (!hash_initialised) + return -1; + + found = 0; + hash_for_each_possible(STREAM_NODE_HASH, sn, node, req->op[0]) { + if (sn != NULL) { + pr_err("process_stream_off: node %d %p", req->op[0], sn); + found = 1; + break; + } + } + + if (!found) { + pr_debug("%s: stream not found %d\n", __func__, req->op[0]); + return -1; + } + + strm_dev = sn->f->private_data; + if (strm_dev == NULL) { + pr_err("Native IPU stream device not found\n"); + return -1; + } + + as = dev_to_stream(strm_dev); + as->frame_done_notify_queue = NULL; + + err = strm_dev->ipu_ioctl_ops->ici_stream_off(sn->f, strm_dev); + + if (err) + pr_err("process_stream_off: stream off failed\n"); + + return 0; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h new file mode 100644 index 000000000000..0d85b3561274 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be-stream.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE_STREAM__ +#define __IPU4_VIRTIO_BE_STREAM__ + +#include +#include + +#include "intel-ipu4-virtio-common.h" + +int process_set_format(int domid, struct ipu4_virtio_req *req); +int process_device_open(int domid, struct ipu4_virtio_req *req); +int process_device_close(int domid, struct ipu4_virtio_req *req); +int process_poll(int domid, struct ipu4_virtio_req *req); +int process_put_buf(int domid, struct ipu4_virtio_req *req); +int process_stream_on(int domid, struct ipu4_virtio_req *req); +int process_stream_off(int domid, struct ipu4_virtio_req *req); +int process_get_buf(int domid, struct ipu4_virtio_req *req); + + +#endif + + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c new file mode 100644 index 000000000000..aa64d09adb35 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-be-bridge.h" +#include "intel-ipu4-virtio-be.h" + +/** + * struct ipu4_virtio_be_priv - Backend of virtio-rng based on VBS-K + * + * @dev : instance of struct virtio_dev_info + * @vqs : instances of struct virtio_vq_info + * @hwrng : device specific member + * @node : hashtable maintaining multiple connections + * from multiple guests/devices + */ +struct ipu4_virtio_be_priv { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[IPU_VIRTIO_QUEUE_MAX]; + bool busy; + struct ipu4_virtio_req *pending_tx_req; + struct mutex lock; + /* + * Each VBS-K module might serve multiple connections + * from multiple guests/device models/VBS-Us, so better + * to maintain the connections in a list, and here we + * use hashtable as an example. + */ + struct hlist_node node; +}; + +struct vq_request_data { + struct virtio_vq_info *vq; + struct ipu4_virtio_req *req; + int len; + uint16_t idx; +}; + +struct vq_request_data vq_req; + +#define RNG_MAX_HASH_BITS 4 /* MAX is 2^4 */ +#define HASH_NAME vbs_hash + +DECLARE_HASHTABLE(HASH_NAME, RNG_MAX_HASH_BITS); +static int ipu_vbk_hash_initialized; +static int ipu_vbk_connection_cnt; + +/* function declarations */ +static int handle_kick(int client_id, long unsigned int *req_cnt); +static void ipu_vbk_reset(struct ipu4_virtio_be_priv *rng); +static void ipu_vbk_stop(struct ipu4_virtio_be_priv *rng); +static void ipu_vbk_flush(struct ipu4_virtio_be_priv *rng); + +#ifdef RUNTIME_CTRL +static int ipu_vbk_enable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq); +static void ipu_vbk_disable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq); +static void ipu_vbk_stop_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq); +static void ipu_vbk_flush_vq(struct ipu4_virtio_be_priv *rng, int index); +#endif + +/* hash table related functions */ +static void ipu_vbk_hash_init(void) +{ + if (ipu_vbk_hash_initialized) + return; + + hash_init(HASH_NAME); + ipu_vbk_hash_initialized = 1; +} + +static int ipu_vbk_hash_add(struct ipu4_virtio_be_priv *entry) +{ + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_add(HASH_NAME, &entry->node, virtio_dev_client_id(&entry->dev)); + return 0; +} + +static struct ipu4_virtio_be_priv *ipu_vbk_hash_find(int client_id) +{ + struct ipu4_virtio_be_priv *entry; + int bkt; + + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return NULL; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) + return entry; + + pr_err("Not found item matching client_id!\n"); + return NULL; +} + +static int ipu_vbk_hash_del(int client_id) +{ + struct ipu4_virtio_be_priv *entry; + int bkt; + + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) { + hash_del(&entry->node); + return 0; + } + + pr_err("%s failed, not found matching client_id!\n", + __func__); + return -1; +} + +static int ipu_vbk_hash_del_all(void) +{ + struct ipu4_virtio_be_priv *entry; + int bkt; + + if (!ipu_vbk_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + hash_del(&entry->node); + + return 0; +} + +static void handle_vq_kick(struct ipu4_virtio_be_priv *priv, int vq_idx) +{ + struct iovec iov; + struct ipu4_virtio_be_priv *be; + struct virtio_vq_info *vq; + struct ipu4_virtio_req *req = NULL; + int len; + int ret; + uint16_t idx; + + pr_debug("%s: vq_idx %d\n", __func__, vq_idx); + + be = priv; + + if (!be) { + pr_err("rng is NULL! Cannot proceed!\n"); + return; + } + + vq = &(be->vqs[vq_idx]); + + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + /* device specific operations, for example: */ + pr_debug("iov base %p len %lx\n", iov.iov_base, iov.iov_len); + + if (iov.iov_len != sizeof(struct ipu4_virtio_req)) { + if (iov.iov_len == sizeof(int)) { + *((int *)iov.iov_base) = 1; + len = iov.iov_len; + printk(KERN_NOTICE "IPU VBK handle kick from vmid:%d\n", 1); + } else { + len = 0; + printk(KERN_WARNING "received request with wrong size"); + printk(KERN_WARNING "%zu != %zu\n", + iov.iov_len, + sizeof(struct ipu4_virtio_req)); + } + + pr_debug("vtrnd: vtrnd_notify(): %d\r\n", len); + virtio_vq_relchain(vq, idx, len); + continue; + } + + req = (struct ipu4_virtio_req *)iov.iov_base; + ret = intel_ipu4_virtio_msg_parse(1, req); + len = iov.iov_len; + + if (req->stat == IPU4_REQ_NEEDS_FOLLOW_UP) { + vq_req.vq = vq; + vq_req.req = req; + vq_req.idx = idx; + vq_req.len = len; + } else + virtio_vq_relchain(vq, idx, len); + } + pr_debug("IPU VBK data process on VQ Done\n"); + if (req && req->stat != IPU4_REQ_NEEDS_FOLLOW_UP) + virtio_vq_endchains(vq, 1); +} + +static int handle_kick(int client_id, long unsigned *ioreqs_map) +{ + int val[IPU_VIRTIO_QUEUE_MAX], i, count; + struct ipu4_virtio_be_priv *priv; + + if (unlikely(bitmap_empty(ioreqs_map, VHM_REQUEST_MAX))) + return -EINVAL; + + pr_debug("%s: IPU VBK handle kick!\n", __func__); + + priv = ipu_vbk_hash_find(client_id); + if (priv == NULL) { + pr_err("%s: client %d not found!\n", + __func__, client_id); + return -EINVAL; + } + + count = ipu_virtio_vqs_index_get(&priv->dev, ioreqs_map, val, IPU_VIRTIO_QUEUE_MAX); + + for (i = 0; i < count; i++) { + if (val[i] >= 0) { + handle_vq_kick(priv, val[i]); + } + } + + return 0; +} + +static int ipu_vbk_open(struct inode *inode, struct file *f) +{ + struct ipu4_virtio_be_priv *priv; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + priv = kcalloc(1, sizeof(struct ipu4_virtio_be_priv), + GFP_KERNEL); + + if (priv == NULL) { + pr_err("Failed to allocate memory for ipu4_virtio_be_priv!\n"); + return -ENOMEM; + } + + vqs = &priv->vqs[0]; + + dev = &priv->dev; + + strncpy(dev->name, "vbs_ipu", VBS_NAME_LEN); + dev->dev_notify = handle_kick; + + + for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) { + vqs[i].dev = dev; + vqs[i].vq_notify = NULL; + } + + /* link dev and vqs */ + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, IPU_VIRTIO_QUEUE_MAX); + + priv->pending_tx_req = kcalloc(1, sizeof(struct ipu4_virtio_req), + GFP_KERNEL); + + mutex_init(&priv->lock); + + f->private_data = priv; + + /* init a hash table to maintain multi-connections */ + ipu_vbk_hash_init(); + + return 0; +} + +static int ipu_vbk_release(struct inode *inode, struct file *f) +{ + struct ipu4_virtio_be_priv *priv = f->private_data; + int i; + + if (!priv) + pr_err("%s: UNLIKELY rng NULL!\n", + __func__); + + ipu_vbk_stop(priv); + ipu_vbk_flush(priv); + for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) + virtio_vq_reset(&(priv->vqs[i])); + + /* device specific release */ + ipu_vbk_reset(priv); + + pr_debug("ipu_vbk_connection cnt is %d\n", + ipu_vbk_connection_cnt); + + if (priv && ipu_vbk_connection_cnt--) + ipu_vbk_hash_del(virtio_dev_client_id(&priv->dev)); + if (!ipu_vbk_connection_cnt) { + pr_debug("ipu4_virtio_be_priv remove all hash entries\n"); + ipu_vbk_hash_del_all(); + } + + kfree(priv); + + pr_debug("%s done\n", __func__); + return 0; +} + +static long ipu_vbk_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct ipu4_virtio_be_priv *priv = f->private_data; + void __user *argp = (void __user *)arg; + /*u64 __user *featurep = argp;*/ + /*u64 features;*/ + int r; + + if (priv == NULL) { + pr_err("No IPU backend private data\n"); + return -EINVAL; + } + switch (ioctl) { +/* + * case VHOST_GET_FEATURES: + * features = VHOST_NET_FEATURES; + * if (copy_to_user(featurep, &features, sizeof features)) + * return -EFAULT; + * return 0; + * case VHOST_SET_FEATURES: + * if (copy_from_user(&features, featurep, sizeof features)) + * return -EFAULT; + * if (features & ~VHOST_NET_FEATURES) + * return -EOPNOTSUPP; + * return vhost_net_set_features(n, features); + */ + case VBS_SET_VQ: + /* + * we handle this here because we want to register VHM client + * after handling VBS_K_SET_VQ request + */ + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) { + pr_err("VBS_K_SET_VQ: virtio_vqs_ioctl failed!\n"); + return -EFAULT; + } + /* Register VHM client */ + if (virtio_dev_register(&priv->dev) < 0) { + pr_err("failed to register VHM client!\n"); + return -EFAULT; + } + /* Added to local hash table */ + if (ipu_vbk_hash_add(priv) < 0) { + pr_err("failed to add to hashtable!\n"); + return -EFAULT; + } + /* Increment counter */ + ipu_vbk_connection_cnt++; + return r; + default: + /*mutex_lock(&n->dev.mutex);*/ + r = virtio_dev_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + else + ipu_vbk_flush(priv); + /*mutex_unlock(&n->dev.mutex);*/ + return r; + } +} + +int notify_fe(void) +{ + if (vq_req.vq) { + pr_debug("%s: notifying fe", __func__); + vq_req.req->func_ret = 1; + virtio_vq_relchain(vq_req.vq, vq_req.idx, vq_req.len); + virtio_vq_endchains(vq_req.vq, 1); + vq_req.vq = NULL; + } else + pr_debug("%s: NULL vq!", __func__); + + return 0; +} + +int ipu_virtio_vqs_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map, + int *vqs_index, int max_vqs_index) +{ + int idx = 0; + struct vhm_request *req; + int vcpu; + + if (dev == NULL) { + pr_err("%s: dev is NULL!\n", __func__); + return -EINVAL; + } + + while (idx < max_vqs_index) { + vcpu = find_first_bit(ioreqs_map, dev->_ctx.max_vcpu); + if (vcpu == dev->_ctx.max_vcpu) + break; + req = &dev->_ctx.req_buf[vcpu]; + if (atomic_read(&req->processed) == REQ_STATE_PROCESSING && + req->client == dev->_ctx.vhm_client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) { + /* currently we handle kick only, + * so read will return 0 + */ + pr_debug("%s: read request!\n", __func__); + if (dev->io_range_type == PIO_RANGE) + req->reqs.pio_request.value = 0; + else + req->reqs.mmio_request.value = 0; + } else { + pr_debug("%s: write request! type %d\n", + __func__, req->type); + if (dev->io_range_type == PIO_RANGE) + vqs_index[idx++] = req->reqs.pio_request.value; + else + vqs_index[idx++] = req->reqs.mmio_request.value; + } + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + acrn_ioreq_complete_request(req->client, vcpu); + } + } + + return idx; +} + +/* device specific function to cleanup itself */ +static void ipu_vbk_reset(struct ipu4_virtio_be_priv *rng) +{ +} + +/* device specific function */ +static void ipu_vbk_stop(struct ipu4_virtio_be_priv *rng) +{ + virtio_dev_deregister(&rng->dev); +} + +/* device specific function */ +static void ipu_vbk_flush(struct ipu4_virtio_be_priv *rng) +{ +} + +#ifdef RUNTIME_CTRL +/* device specific function */ +static int ipu_vbk_enable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq) +{ + return 0; +} + +/* device specific function */ +static void ipu_vbk_disable_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void ipu_vbk_stop_vq(struct ipu4_virtio_be_priv *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void ipu_vbk_flush_vq(struct ipu4_virtio_be_priv *rng, int index) +{ +} + +/* Set feature bits in kernel side device */ +static int ipu_vbk_set_features(struct ipu4_virtio_be_priv *rng, u64 features) +{ + return 0; +} +#endif + +static const struct file_operations vbs_fops = { + .owner = THIS_MODULE, + .release = ipu_vbk_release, + .unlocked_ioctl = ipu_vbk_ioctl, + .open = ipu_vbk_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_ipu", + .fops = &vbs_fops, +}; + +static int ipu_vbk_init(void) +{ + return misc_register(&vbs_misc); +} +module_init(ipu_vbk_init); + +static void ipu_vbk_exit(void) +{ + misc_deregister(&vbs_misc); +} +module_exit(ipu_vbk_exit); + +MODULE_VERSION("0.1"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("IPU4 virtio driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h new file mode 100644 index 000000000000..999b543b58f6 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-be.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_BE__ +#define __IPU4_VIRTIO_BE__ + +#include + +int notify_fe(void); +int ipu_virtio_vqs_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map, + int *vqs_index, int max_vqs_index); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c new file mode 100644 index 000000000000..17b0f066aef3 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" + +DECLARE_HASHTABLE(ipu4_virtio_fe_hash, MAX_ENTRY_FE); + +void ipu4_virtio_fe_table_init(void) +{ + hash_init(ipu4_virtio_fe_hash); +} + +int ipu4_virtio_fe_add(struct ipu4_virtio_fe_info *fe_info) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = fe_info; + + hash_add(ipu4_virtio_fe_hash, &info_entry->node, + info_entry->info->client_id); + + return 0; +} + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find(int client_id) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) + return info_entry->info; + + return NULL; +} + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find_by_vmid(int vmid) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->vmid == vmid) + return info_entry->info; + + return NULL; +} + +int ipu4_virtio_fe_remove(int client_id) +{ + struct ipu4_virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(ipu4_virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h new file mode 100644 index 000000000000..e55685c3fdaf --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-common.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_COMMON_H__ +#define __IPU4_VIRTIO_COMMON_H__ + + +/* + * CWP uses physicall addresses for memory sharing, + * so size of one page ref will be 64-bits + */ + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64)) + +/* Defines size of requests circular buffer */ +#define REQ_RING_SIZE 128 + +#define MAX_NUMBER_OF_OPERANDS 64 + +#define MAX_ENTRY_FE 7 + +enum virio_queue_type { + IPU_VIRTIO_QUEUE_0 = 0, + IPU_VIRTIO_QUEUE_1, + IPU_VIRTIO_QUEUE_MAX +}; + +struct ipu4_virtio_req { + unsigned int req_id; + unsigned int stat; + unsigned int cmd; + unsigned int func_ret; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; + u64 payload; +}; +struct test_payload { + unsigned int data1; + long int data2; + char name[256]; +}; +/*Not used*/ +struct ipu4_virtio_resp { + unsigned int resp_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +/*Not used*/ +struct ipu4_virtio_fe_info { + struct ipu4_virtio_be_priv *priv; + int client_id; + int vmid; + int max_vcpu; + struct vhm_request *req_buf; +}; + +/*Not used*/ +struct ipu4_virtio_fe_info_entry { + struct ipu4_virtio_fe_info *info; + struct hlist_node node; +}; + +struct ipu4_bknd_ops { + /* backed initialization routine */ + int (*init)(void); + + /* backed cleanup routine */ + void (*cleanup)(void); + + /* retreiving id of current virtual machine */ + int (*get_vm_id)(void); + + int (*send_req)(int, struct ipu4_virtio_req *, int, int); +}; + +struct ipu4_virtio_ctx { + /* VM(domain) id of current VM instance */ + int domid; + + /* backend ops - hypervisor specific */ + struct ipu4_bknd_ops *bknd_ops; + + /* flag that shows whether backend is initialized */ + bool initialized; + + /* device global lock */ + struct mutex lock; +}; + +enum intel_ipu4_virtio_command { + IPU4_CMD_DEVICE_OPEN = 0x1, + IPU4_CMD_DEVICE_CLOSE, + IPU4_CMD_STREAM_ON, + IPU4_CMD_STREAM_OFF, + IPU4_CMD_GET_BUF, + IPU4_CMD_PUT_BUF, + IPU4_CMD_SET_FORMAT, + IPU4_CMD_ENUM_NODES, + IPU4_CMD_ENUM_LINKS, + IPU4_CMD_SETUP_PIPE, + IPU4_CMD_SET_FRAMEFMT, + IPU4_CMD_GET_FRAMEFMT, + IPU4_CMD_GET_SUPPORTED_FRAMEFMT, + IPU4_CMD_SET_SELECTION, + IPU4_CMD_GET_SELECTION, + IPU4_CMD_POLL, + IPU4_CMD_PIPELINE_OPEN, + IPU4_CMD_PIPELINE_CLOSE, + IPU4_CMD_GET_N +}; + +enum intel_ipu4_virtio_req_feedback { + IPU4_REQ_PROCESSED, + IPU4_REQ_NEEDS_FOLLOW_UP, + IPU4_REQ_ERROR, + IPU4_REQ_NOT_RESPONDED +}; +extern struct ipu4_bknd_ops ipu4_virtio_bknd_ops; + +void ipu4_virtio_fe_table_init(void); + +int ipu4_virtio_fe_add(struct ipu4_virtio_fe_info *fe_info); + +int ipu4_virtio_remove_fe(int client_id); + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find(int client_id); + +struct ipu4_virtio_fe_info *ipu4_virtio_fe_find_by_vmid(int vmid); + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c new file mode 100644 index 000000000000..44edf7414a15 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-common.h" +#include "intel-ipu4-virtio-fe-payload.h" + +void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req, + enum intel_ipu4_virtio_command cmd, int *op) +{ + int i; + + req->stat = IPU4_REQ_NOT_RESPONDED; + req->cmd = cmd; + + switch (cmd) { + case IPU4_CMD_POLL: + case IPU4_CMD_DEVICE_OPEN: + case IPU4_CMD_DEVICE_CLOSE: + case IPU4_CMD_STREAM_ON: + case IPU4_CMD_STREAM_OFF: + case IPU4_CMD_PUT_BUF: + case IPU4_CMD_SET_FORMAT: + case IPU4_CMD_ENUM_NODES: + case IPU4_CMD_ENUM_LINKS: + case IPU4_CMD_SETUP_PIPE: + case IPU4_CMD_SET_FRAMEFMT: + case IPU4_CMD_GET_FRAMEFMT: + case IPU4_CMD_GET_SUPPORTED_FRAMEFMT: + case IPU4_CMD_SET_SELECTION: + case IPU4_CMD_GET_SELECTION: + /* Open video device node + * op0 - virtual device node number + * op1 - Actual device fd. By default set to 0 + */ + for (i = 0; i < 2; i++) + req->op[i] = op[i]; + break; + case IPU4_CMD_GET_BUF: + for (i = 0; i < 3; i++) + req->op[i] = op[i]; + break; + default: + return; + } +} diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h new file mode 100644 index 000000000000..173c31a54692 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-payload.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef __IPU4_VIRTIO_FE_PAYLOAD__ +#define __IPU4_VIRTIO_FE_PAYLOAD__ + +#include "intel-ipu4-virtio-common.h" + +void intel_ipu4_virtio_create_req(struct ipu4_virtio_req *req, + enum intel_ipu4_virtio_command cmd, int *op); + +#endif \ No newline at end of file diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c new file mode 100644 index 000000000000..0f5d8b6f83ec --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.c @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include "intel-ipu4-virtio-fe-payload.h" +#include "intel-ipu4-virtio-fe-pipeline.h" + +int process_pipeline(struct file *file, struct ipu4_virtio_ctx *fe_priv, + void *data, int cmd) +{ + struct ipu4_virtio_req *req; + int rval = 0; + int op[10]; + + op[0] = 0; + op[1] = 0; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->payload = virt_to_phys(data); + + intel_ipu4_virtio_create_req(req, cmd, &op[0]); + + rval = fe_priv->bknd_ops->send_req(fe_priv->domid, req, true, IPU_VIRTIO_QUEUE_0); + if (rval) { + pr_err("Failed to send request to BE\n"); + kfree(req); + return rval; + } + + kfree(req); + + return rval; +} + diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h new file mode 100644 index 000000000000..d1fbe106beda --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe-pipeline.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) */ +/* + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef IPU4_VIRTIO_FE_PIPELINE_H +#define IPU4_VIRTIO_FE_PIPELINE_H + +#include + +#include "virtio/intel-ipu4-virtio-common.h" + +int process_pipeline(struct file *file, + struct ipu4_virtio_ctx *fe_priv, + void *data, + int cmd); + + +#endif diff --git a/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c new file mode 100644 index 000000000000..d95e52a09b32 --- /dev/null +++ b/drivers/media/pci/intel/virtio/intel-ipu4-virtio-fe.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "intel-ipu4-virtio-common.h" + +static DEFINE_IDA(index_ida); + +struct ipu4_virtio_uos { + struct virtqueue *vq[IPU_VIRTIO_QUEUE_MAX]; + struct completion have_data; + char name[25]; + unsigned int data_avail; + int index; + bool busy; + int vmid; +}; + +struct completion completion_queue[IPU_VIRTIO_QUEUE_MAX]; + +/* Assuming there will be one FE instance per VM */ +static struct ipu4_virtio_uos *ipu4_virtio_fe; + +static void ipu_virtio_fe_tx_done_vq_0(struct virtqueue *vq) +{ + struct ipu4_virtio_uos *priv = (struct ipu4_virtio_uos *)vq->vdev->priv; + + /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ + if (!virtqueue_get_buf(vq, &priv->data_avail)) + return; + + complete(&completion_queue[0]); + pr_debug("IPU FE:%s vmid:%d TX for VQ 0 done\n", __func__, priv->vmid); +} + +static void ipu_virtio_fe_tx_done_vq_1(struct virtqueue *vq) +{ + struct ipu4_virtio_uos *priv = (struct ipu4_virtio_uos *)vq->vdev->priv; + + /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ + if (!virtqueue_get_buf(vq, &priv->data_avail)) + return; + + complete(&completion_queue[1]); + pr_debug("IPU FE:%s vmid:%d TX for VQ 1 done\n", __func__, priv->vmid); +} + +/* The host will fill any buffer we give it with sweet, sweet randomness. */ +static void ipu_virtio_fe_register_buffer(struct ipu4_virtio_uos *vi, void *buf, size_t size, + int nqueue) +{ + struct scatterlist sg; + if (nqueue >= IPU_VIRTIO_QUEUE_MAX) { + pr_debug("Number of queue exceeding max queue number\n"); + return; + } + + sg_init_one(&sg, buf, size); + + /* There should always be room for one buffer. */ + virtqueue_add_inbuf(vi->vq[nqueue], &sg, 1, buf, GFP_KERNEL); + + virtqueue_kick(vi->vq[nqueue]); +} + +static int ipu_virtio_fe_probe_common(struct virtio_device *vdev) +{ + int err, index, i; + struct ipu4_virtio_uos *priv = NULL; + vq_callback_t *callbacks[] = {ipu_virtio_fe_tx_done_vq_0, + ipu_virtio_fe_tx_done_vq_1}; + static const char *names[] = {"csi_queue_0", "csi_queue_1"}; + priv = kzalloc(sizeof(struct ipu4_virtio_uos), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->index = index = ida_simple_get(&index_ida, 0, 0, GFP_KERNEL); + if (index < 0) { + err = index; + goto err_ida; + } + sprintf(priv->name, "virtio_.%d", index); + for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) + init_completion(&completion_queue[i]); + priv->vmid = -1; + vdev->priv = priv; + err = virtio_find_vqs(vdev, IPU_VIRTIO_QUEUE_MAX, + priv->vq, callbacks, names, NULL); + if (err) + goto err_find; + + ipu4_virtio_fe = priv; + + return 0; + +err_find: + ida_simple_remove(&index_ida, index); +err_ida: + kfree(priv); + return err; +} + +static void ipu_virtio_fe_remove_common(struct virtio_device *vdev) +{ + struct ipu4_virtio_uos *priv = vdev->priv; + int i; + + priv->data_avail = 0; + for (i = 0; i < IPU_VIRTIO_QUEUE_MAX; i++) + complete(&completion_queue[i]); + vdev->config->reset(vdev); + priv->busy = false; + + vdev->config->del_vqs(vdev); + //ida_simple_remove(&index_ida, priv->index); + kfree(priv); +} + +static int ipu_virtio_fe_send_req(int vmid, struct ipu4_virtio_req *req, + int wait, int idx) +{ + struct ipu4_virtio_uos *priv = ipu4_virtio_fe; + int ret = 0; + pr_debug("IPU FE:%s\n", __func__); + if (priv == NULL) { + pr_err("IPU Backend not connected\n"); + return -ENOENT; + } + + init_completion(&completion_queue[idx]); + ipu_virtio_fe_register_buffer(ipu4_virtio_fe, req, sizeof(*req), idx); + wait_for_completion(&completion_queue[idx]); + + return ret; +} +static int ipu_virtio_fe_get_vmid(void) +{ + struct ipu4_virtio_uos *priv = ipu4_virtio_fe; + + if (ipu4_virtio_fe == NULL) { + pr_err("IPU Backend not connected\n"); + return -1; + } + return priv->vmid; +} + +int ipu_virtio_fe_register(void) +{ + pr_debug("IPU FE:%s\n", __func__); + return 0; +} + +void ipu_virtio_fe_unregister(void) +{ + pr_debug("IPU FE:%s\n", __func__); + return; +} +static int virt_probe(struct virtio_device *vdev) +{ + return ipu_virtio_fe_probe_common(vdev); +} + +static void virt_remove(struct virtio_device *vdev) +{ + ipu_virtio_fe_remove_common(vdev); +} + +static void virt_scan(struct virtio_device *vdev) +{ + struct ipu4_virtio_uos *vi = (struct ipu4_virtio_uos *)vdev->priv; + int timeout = 1000; + + if (vi == NULL) { + pr_err("IPU No frontend private data\n"); + return; + } + ipu_virtio_fe_register_buffer(vi, &vi->vmid, sizeof(vi->vmid), + IPU_VIRTIO_QUEUE_0); + + while (timeout--) { + if (vi->vmid > 0) + break; + usleep_range(100, 120); + } + pr_debug("IPU FE:%s vmid:%d\n", __func__, vi->vmid); + + if (timeout < 0) + pr_err("IPU Cannot query vmid\n"); + +} + +#ifdef CONFIG_PM_SLEEP +static int virt_freeze(struct virtio_device *vdev) +{ + ipu_virtio_fe_remove_common(vdev); + return 0; +} + +static int virt_restore(struct virtio_device *vdev) +{ + return ipu_virtio_fe_probe_common(vdev); +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_IPU, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +struct ipu4_bknd_ops ipu4_virtio_bknd_ops = { + .init = ipu_virtio_fe_register, + .cleanup = ipu_virtio_fe_unregister, + .get_vm_id = ipu_virtio_fe_get_vmid, + .send_req = ipu_virtio_fe_send_req +}; + +static struct virtio_driver virtio_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virt_probe, + .remove = virt_remove, + .scan = virt_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virt_freeze, + .restore = virt_restore, +#endif +}; + + +module_virtio_driver(virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("IPU4 virtio driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c index ef4906406ebf..a50461861133 100644 --- a/drivers/media/pci/saa7164/saa7164-fw.c +++ b/drivers/media/pci/saa7164/saa7164-fw.c @@ -426,7 +426,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev) __func__, fw->size); if (fw->size != fwlength) { - printk(KERN_ERR "xc5000: firmware incorrect size\n"); + printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n", + fw->size, fwlength); ret = -ENOMEM; goto out; } diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c index 336e2f9bc1b6..b762e5f0ba1d 100644 --- a/drivers/media/pci/tw686x/tw686x-core.c +++ b/drivers/media/pci/tw686x/tw686x-core.c @@ -72,12 +72,12 @@ static const char *dma_mode_name(unsigned int mode) } } -static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp) +static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%s", dma_mode_name(dma_mode)); } -static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp) +static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp) { if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY))) dma_mode = TW686X_DMA_MODE_MEMCPY; diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c index c3fafa97b2d0..0ea8dd44026c 100644 --- a/drivers/media/pci/tw686x/tw686x-video.c +++ b/drivers/media/pci/tw686x/tw686x-video.c @@ -1228,7 +1228,8 @@ int tw686x_video_init(struct tw686x_dev *dev) vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vc->vidq.min_buffers_needed = 2; vc->vidq.lock = &vc->vb_mutex; - vc->vidq.gfp_flags = GFP_DMA32; + vc->vidq.gfp_flags = dev->dma_mode != TW686X_DMA_MODE_MEMCPY ? + GFP_DMA32 : 0; vc->vidq.dev = &dev->pci_dev->dev; err = vb2_queue_init(&vc->vidq); diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 3c4f7fa7b9d8..1e4986bead11 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -144,12 +144,24 @@ config VIDEO_STM32_DCMI To compile this driver as a module, choose M here: the module will be called stm32-dcmi. +config VIDEO_AGGREGATOR_STUB + tristate "Intel video aggregator stub" + ---help--- + Dummy video aggregator driver that is controlled outside + +config VIDEO_IRIS + tristate "Intel camera DC-iris" + default n + ---help--- + Camera DC-iris driver + source "drivers/media/platform/soc_camera/Kconfig" source "drivers/media/platform/exynos4-is/Kconfig" source "drivers/media/platform/am437x/Kconfig" source "drivers/media/platform/xilinx/Kconfig" source "drivers/media/platform/rcar-vin/Kconfig" source "drivers/media/platform/atmel/Kconfig" +source "drivers/media/platform/intel/Kconfig" config VIDEO_TI_CAL tristate "TI CAL (Camera Adaptation Layer) driver" @@ -538,6 +550,21 @@ if DVB_PLATFORM_DRIVERS source "drivers/media/platform/sti/c8sectpfe/Kconfig" endif #DVB_PLATFORM_DRIVERS +config VIDEO_SENSOR_STUB + tristate "Intel dummysensor stub" + ---help--- + Dummy sensor driver that is controlled outside (pixter/xactor) + +config I2C_ADAPTER_STUB + tristate "Intel fpga i2c adapter stub" + ---help--- + Dummy i2c adpater that allows use i2c sensor without i2c hw + +config VIDEO_SENSOR_STUB_PDATA + bool "Intel intel-ipu4 sensor stub platform data" + ---help--- + Pdata for sensor driver that is controlled outside (pixter/xactor) + menuconfig CEC_PLATFORM_DRIVERS bool "CEC platform devices" depends on MEDIA_CEC_SUPPORT diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 327f80a6f82c..94ea2e6e7716 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -3,6 +3,7 @@ # Makefile for the video capture/playback device drivers. # +obj-y += intel/ obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o @@ -82,6 +83,12 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/ +obj-$(CONFIG_VIDEO_SENSOR_STUB) += video-sensor-stub.o +obj-$(CONFIG_VIDEO_SENSOR_STUB_PDATA) += video-sensor-stub-pdata.o +obj-$(CONFIG_I2C_ADAPTER_STUB) += i2c-adapter-stub.o +obj-$(CONFIG_VIDEO_AGGREGATOR_STUB) += video-aggre-stub.o +obj-$(CONFIG_VIDEO_IRIS) += video-iris.o + obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/ obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/ diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 0ef36cec21d1..dc8fc2120b63 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -1397,9 +1397,9 @@ static int vpif_async_bound(struct v4l2_async_notifier *notifier, vpif_obj.config->chan_config->inputs[i].subdev_name = (char *)to_of_node(subdev->fwnode)->full_name; vpif_dbg(2, debug, - "%s: setting input %d subdev_name = %pOF\n", + "%s: setting input %d subdev_name = %s\n", __func__, i, - to_of_node(subdev->fwnode)); + vpif_obj.config->chan_config->inputs[i].subdev_name); return 0; } } @@ -1545,6 +1545,8 @@ vpif_capture_get_pdata(struct platform_device *pdev) sizeof(*chan->inputs) * VPIF_CAPTURE_NUM_CHANNELS, GFP_KERNEL); + if (!chan->inputs) + return NULL; chan->input_count++; chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA; diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c index 948fe01f6c96..809e7581cf94 100644 --- a/drivers/media/platform/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/exynos4-is/fimc-capture.c @@ -871,7 +871,7 @@ static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor, int pad; for (i = 0; i < num_planes; i++) - fd.entry[i].length = plane_fmt[i].sizeimage; + fd.entry[i].size.length = plane_fmt[i].sizeimage; pad = sensor->entity.num_pads - 1; if (try) @@ -886,11 +886,11 @@ static int fimc_get_sensor_frame_desc(struct v4l2_subdev *sensor, return -EINVAL; for (i = 0; i < num_planes; i++) - plane_fmt[i].sizeimage = fd.entry[i].length; + plane_fmt[i].sizeimage = fd.entry[i].size.length; - if (fd.entry[0].length > FIMC_MAX_JPEG_BUF_SIZE) { + if (fd.entry[0].size.length > FIMC_MAX_JPEG_BUF_SIZE) { v4l2_err(sensor->v4l2_dev, "Unsupported buffer size: %u\n", - fd.entry[0].length); + fd.entry[0].size.length); return -EINVAL; } diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index d4656d5175d7..7937e8a4c3ba 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1130,7 +1130,7 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable, * through active links. This is needed as we cannot power on/off the * subdevs in random order. */ - media_graph_walk_start(graph, entity); + media_graph_walk_start(graph, &entity->pads[0]); while ((entity = media_graph_walk_next(graph))) { if (!is_media_entity_v4l2_video_device(entity)) @@ -1145,7 +1145,7 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable, return 0; err: - media_graph_walk_start(graph, entity_err); + media_graph_walk_start(graph, &entity_err->pads[0]); while ((entity_err = media_graph_walk_next(graph))) { if (!is_media_entity_v4l2_video_device(entity_err)) diff --git a/drivers/media/platform/i2c-adapter-stub.c b/drivers/media/platform/i2c-adapter-stub.c new file mode 100644 index 000000000000..579d4c080609 --- /dev/null +++ b/drivers/media/platform/i2c-adapter-stub.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2013--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "video-sensor-stub.h" +#define STUB_ADAPTER_NAME "i2c-adapter-stub" + +static struct platform_device *stub_platform_device; +static const struct i2c_algorithm stub_is_i2c_algorithm; + +struct stub_i2c { + struct i2c_adapter adapter; + struct clk *clock; +}; + +static int ipu_platform_probe(struct platform_device *pdev) +{ + + struct device_node *node = pdev->dev.of_node; + struct stub_i2c *isp_i2c; + struct i2c_adapter *i2c_adap; + int rval = -E2BIG; + + dev_info(&pdev->dev, "Dummy adapter Probe\n"); + + isp_i2c = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c), GFP_KERNEL); + if (!isp_i2c) { + dev_err(&pdev->dev, "Failed to alloc i2c adapter structure\n"); + rval = -ENOMEM; + goto out; + } + + i2c_adap = &isp_i2c->adapter; + i2c_adap->dev.of_node = node; + i2c_adap->dev.parent = &pdev->dev; + strlcpy(i2c_adap->name, SENSOR_STUB_NAME, sizeof(i2c_adap->name)); + i2c_adap->owner = THIS_MODULE; + i2c_adap->algo = &stub_is_i2c_algorithm; + i2c_adap->class = I2C_CLASS_SPD; + + rval = i2c_add_adapter(i2c_adap); + + dev_info(&pdev->dev, "I2C adapter nr = %d\n ", i2c_adap->nr); + + if (rval < 0) + goto out; + + dev_info(&pdev->dev, "Dummy adapter probe ok\n"); + return 0; + +out: + dev_err(&pdev->dev, "Dummy adapter probe exit failed %d\n", rval); + return rval; + +} + +static int ipu_platform_remove(struct platform_device *pdev) +{ + struct stub_i2c *isp_i2c = platform_get_drvdata(pdev); + + i2c_del_adapter(&isp_i2c->adapter); + return 0; +} + +static const struct of_device_id adapter_stub_of_match[] = { + { .compatible = SENSOR_STUB_NAME }, + { }, +}; + +static struct platform_device_id sensor_id_table[] = { + { STUB_ADAPTER_NAME, 0 }, + { }, +}; + +static struct platform_driver stub_platform_driver = { + .driver = { + .name = STUB_ADAPTER_NAME, + .owner = THIS_MODULE, + .of_match_table = adapter_stub_of_match, + }, + .probe = ipu_platform_probe, + .remove = ipu_platform_remove, + .id_table = sensor_id_table, +}; + +static int __init stub_init(void) +{ + int rval; + + stub_platform_device = + platform_device_register_simple(STUB_ADAPTER_NAME, + -1, NULL, 0); + + rval = platform_driver_register(&stub_platform_driver); + if (rval) { + pr_err("can't register driver (%d)\n", rval); + platform_device_unregister(stub_platform_device); + } + return rval; +} + +static void __exit stub_exit(void) +{ + platform_driver_unregister(&stub_platform_driver); + platform_device_unregister(stub_platform_device); + stub_platform_device = NULL; +} + +module_init(stub_init); +module_exit(stub_exit); + +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel dummy i2c adapter"); diff --git a/drivers/media/platform/intel/Kconfig b/drivers/media/platform/intel/Kconfig new file mode 100755 index 000000000000..be7621cead7d --- /dev/null +++ b/drivers/media/platform/intel/Kconfig @@ -0,0 +1,124 @@ +config INTEL_IPU4_BXT_P_PDATA + bool "Enable built in platform data for Broxton-P" + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_IPU4 + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4_BXT_GP_PDATA + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_IPU4 + bool "Enable built in platform data for Gordon Peak" + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4_ICI_BXT_P_PDATA + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_ICI + bool "Enable built in platform data for Broxton-P ICI driver" + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4P_CNL_RVP_PDATA + depends on VIDEO_INTEL_IPU && VIDEO_INTEL_IPU4P + bool "Enable built in platform data for ipu4p" + depends on VIDEO_INTEL_IPU4P + ---help--- + Pre-ACPI system platform data is compiled inside kernel + +config INTEL_IPU4_OV13858 + bool "Compile platform data for OV13858" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + This module is used to enable + Omnivision 13MP camera sensor + which is similar to OV13860 + OS would be Yocto + +config INTEL_IPU4_OV2740 + bool "Compile platform data for OV2740" + depends on INTEL_IPU4_BXT_P_PDATA + +config INTEL_IPU4_IMX185 + bool "Compile platform data for IMX185" + depends on INTEL_IPU4_BXT_P_PDATA + +config INTEL_IPU4_IMX185_LI + bool "Compile platform data for IMX185 by Leopard Imaging" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + Sony IMX185 camera sensor module produced by Leopard Imaging + +config INTEL_IPU4_AR023Z + bool "Compile platform data for AR023Z" + ---help--- + Onsemi 2MP AR023Z camera sensor + +config INTEL_IPU4_IMX477 + bool "Compile platform data for IMX477 camera sensor" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + Sony IMX477 sensor is enabled for DUAL Camera input. + +config INTEL_IPU4_OV13860 + bool "Compile platform data for OV13860" + ---help--- + Omnivision 13MP camera sensor + +config INTEL_IPU4_OV9281 + bool "Compile platform data for OV9281" + ---help--- + Omnivision 1MP camera sensor + +config INTEL_IPU4_OV10635 + bool "Compile platform data for OV10635" + ---help--- + Omnivision 1MP camera sensor + +config INTEL_IPU4_AR0231AT + bool "Compile platform data for AR0231AT" + ---help--- + AR0231 camera sensor for MAXIM 9286 + +config INTEL_IPU4_MAGNA + bool "Compile platform data for MAGNA" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + MAGNA Camera Sensor + +config INTEL_IPU4_IMX274 + bool "Compile platform data for IMX274 camera sensor" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + Sony 14MP camera sensor is enabled for HDR function. + +config INTEL_IPU4_OV10640 + bool "Compile platform data for OV10640" + ---help--- + Omnivision 1.4MP camera sensor + +config INTEL_IPU4_ADV7481 + bool "Compile platform data for ADV7481" + ---help--- + HDMI2MIPI convertor device ADV7481 + +config INTEL_IPU4_ADV7481_I2C_ID + int "I2C bus ID for ADV7481" + range 0 8 + default 0 + ---help--- + I2C bus number of ADV7481 Mondello + +config INTEL_IPU4_ADV7481_EVAL + bool "Compile platform data for ADV7481 evaluation board" + ---help--- + HDMI2MIPI convertor device ADV7481 eval board + +config INTEL_IPU4_IMX290 + bool "Compile platform data for IMX290" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + "Sony 8MB camera sensor is enabled for HDR function" + +config INTEL_IPU4_OV2775 + bool "Compile platform data for OV2775" + depends on INTEL_IPU4_BXT_P_PDATA + ---help--- + "Omnivision OV2775 camera sensor" diff --git a/drivers/media/platform/intel/Makefile b/drivers/media/platform/intel/Makefile new file mode 100644 index 000000000000..88ecf076528c --- /dev/null +++ b/drivers/media/platform/intel/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2010 - 2018, Intel Corporation. + +ifneq ($(EXTERNAL_BUILD), 1) +srcpath := $(srctree) +endif + +# force check the compile warning to make sure zero warnings +# note we may have build issue when gcc upgraded. +ccflags-y := -Wall -Wextra +ccflags-y += $(call cc-disable-warning, unused-parameter) +ccflags-y += $(call cc-disable-warning, implicit-fallthrough) +ccflags-y += $(call cc-disable-warning, missing-field-initializers) +ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror + +ccflags-y += -I$(srcpath)/$(src)/../../../../include/ +ccflags-y += -I$(srcpath)/$(src)/../../pci/intel/ + +obj-$(CONFIG_INTEL_IPU4_ICI_BXT_P_PDATA) += ipu4-ici-bxt-p-pdata.o +obj-$(CONFIG_VIDEO_INTEL_IPU) += ipu4-acpi.o +obj-$(CONFIG_INTEL_IPU4_BXT_P_PDATA) += ipu4-bxt-p-pdata.o +obj-$(CONFIG_INTEL_IPU4_BXT_GP_PDATA) += ipu4-bxt-gp-pdata.o +obj-$(CONFIG_INTEL_IPU4P_CNL_RVP_PDATA) += ipu4p-cnl-rvp-pdata.o diff --git a/drivers/media/platform/intel/ipu4-acpi.c b/drivers/media/platform/intel/ipu4-acpi.c new file mode 100644 index 000000000000..ec027b09f587 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-acpi.c @@ -0,0 +1,1067 @@ +/* + * Copyright (c) 2016--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) +#include +#else +#include +#endif +#include +#include + +#define HID_BUFFER_SIZE 32 +#define VCM_BUFFER_SIZE 32 + +/* Data representation as it is in ACPI SSDB buffer */ +struct sensor_bios_data_packed { + u8 version; + u8 sku; + u8 guid_csi2[16]; + u8 devfunction; + u8 bus; + u32 dphylinkenfuses; + u32 clockdiv; + u8 link; + u8 lanes; + u32 csiparams[10]; + u32 maxlanespeed; + u8 sensorcalibfileidx; + u8 sensorcalibfileidxInMBZ[3]; + u8 romtype; + u8 vcmtype; + u8 platforminfo; + u8 platformsubinfo; + u8 flash; + u8 privacyled; + u8 degree; + u8 mipilinkdefined; + u32 mclkspeed; + u8 controllogicid; + u8 reserved1[3]; + u8 mclkport; + u8 reserved2[13]; +} __attribute__((__packed__)); + +/* Fields needed by ipu4 driver */ +struct sensor_bios_data { + struct device *dev; + u8 link; + u8 lanes; + u8 vcmtype; + u8 flash; + u8 degree; + u8 mclkport; + u32 mclkspeed; + u16 xshutdown; +}; + +static LIST_HEAD(devices); +static LIST_HEAD(new_devs); + +struct ipu4_i2c_helper { + int (*fn)(struct device *, void *, + struct ipu_isys_csi2_config *csi2, + bool reprobe); + void *driver_data; +}; + +struct ipu4_i2c_new_dev { + struct list_head list; + struct i2c_board_info info; + unsigned short int bus; +}; + +struct ipu4_camera_module_data { + struct list_head list; + struct ipu_isys_csi2_config csi2; + unsigned int ext_clk; + void *pdata; /* Ptr to generated platform data*/ + void *priv; /* Private for specific subdevice */ +}; + +struct ipu4_i2c_info { + unsigned short bus; + unsigned short addr; +}; + +struct ipu4_acpi_devices { + const char *hid_name; + const char *real_driver; + int (*get_platform_data)(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size); + void *priv_data; + size_t priv_size; + const struct ipu_regulator *regulators; +}; + +static uint64_t imx132_op_clocks[] = (uint64_t []){ 312000000, 0 }; + +/* + * Add a request to create new i2c devices later on. i2c_new_device can't be + * directly called from functions which are called by i2c_for_each_dev + * function. Both takes a same mutex inside i2c core code. + */ +static int add_new_i2c(unsigned short addr, unsigned short bus, + unsigned short flags, char *name, void *pdata) +{ + struct ipu4_i2c_new_dev *newdev = kzalloc(sizeof(*newdev), GFP_KERNEL); + + if (!newdev) + return -ENOMEM; + + newdev->info.flags = flags; + newdev->info.addr = addr; + newdev->bus = bus; + newdev->info.platform_data = pdata; + strlcpy(newdev->info.type, name, sizeof(newdev->info.type)); + + list_add(&newdev->list, &new_devs); + return 0; +} + +static int get_string_dsdt_data(struct device *dev, const u8 *dsdt, + int func, char *out, unsigned int size) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + union acpi_object *obj; + int ret = -ENODEV; + + obj = acpi_evaluate_dsm(dev_handle, (void *)dsdt, 0, func, NULL); + if (!obj) { + dev_err(dev, "No dsdt field\n"); + return -ENODEV; + } + dev_dbg(dev, "ACPI type %d", obj->type); + + if ((obj->type != ACPI_TYPE_STRING) || !obj->string.pointer) + goto exit; + + strlcpy(out, obj->string.pointer, + min((unsigned int)(obj->string.length + 1), size)); + dev_info(dev, "DSDT string id: %s\n", out); + + ret = 0; +exit: + ACPI_FREE(obj); + return ret; +} + +static int get_integer_dsdt_data(struct device *dev, const u8 *dsdt, + int func, u64 *out) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + union acpi_object *obj; + + obj = acpi_evaluate_dsm(dev_handle, (void *)dsdt, 0, func, NULL); + if (!obj) { + dev_err(dev, "No dsdt\n"); + return -ENODEV; + } + dev_dbg(dev, "ACPI type %d", obj->type); + + if (obj->type != ACPI_TYPE_INTEGER) { + ACPI_FREE(obj); + return -ENODEV; + } + *out = obj->integer.value; + ACPI_FREE(obj); + return 0; +} + +static int read_acpi_block(struct device *dev, char *id, void *data, u32 size) +{ + union acpi_object *obj; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + int status; + u32 buffer_length; + + status = acpi_evaluate_object(dev_handle, id, NULL, &buffer); + if (!ACPI_SUCCESS(status)) + return -ENODEV; + + obj = (union acpi_object *)buffer.pointer; + if (!obj || obj->type != ACPI_TYPE_BUFFER) { + dev_err(dev, "Could't read acpi buffer\n"); + status = -ENODEV; + goto err; + } + + if (obj->buffer.length > size) { + dev_err(dev, "Given buffer is too small\n"); + status = -ENODEV; + goto err; + } + + memcpy(data, obj->buffer.pointer, min(size, obj->buffer.length)); + buffer_length = obj->buffer.length; + kfree(buffer.pointer); + + return buffer_length; +err: + kfree(buffer.pointer); + return status; +} + +static struct ipu4_camera_module_data *add_device_to_list( + struct list_head *devices) +{ + struct ipu4_camera_module_data *cam_device; + + cam_device = kzalloc(sizeof(*cam_device), GFP_KERNEL); + if (!cam_device) + return NULL; + + list_add(&cam_device->list, devices); + return cam_device; +} + +static int get_sensor_gpio(struct device *dev, int index) +{ + struct gpio_desc *gpiod_gpio; + int gpio; + + gpiod_gpio = gpiod_get_index(dev, NULL, index, GPIOD_ASIS); + if (IS_ERR(gpiod_gpio)) { + dev_err(dev, "No gpio from index %d\n", index); + return -ENODEV; + } + gpio = desc_to_gpio(gpiod_gpio); + gpiod_put(gpiod_gpio); + return gpio; +} + +static void *get_dsdt_vcm(struct device *dev, char *vcm, char *second) +{ + void *pdata = NULL; + const u8 dsdt_cam_vcm[] = { + 0x39, 0xA6, 0xC9, 0x75, 0x8A, 0x5C, 0x00, 0x4A, + 0x9F, 0x48, 0xA9, 0xC3, 0xB5, 0xDA, 0x78, 0x9F }; + int ret = get_string_dsdt_data(dev, dsdt_cam_vcm, 0, + vcm, VCM_BUFFER_SIZE); + if (ret < 0) { + dev_err(dev, "get vcm failed - using override: %s\n", second); + strlcpy(vcm, second, VCM_BUFFER_SIZE); + } + dev_dbg(dev, "vcm: %s\n", vcm); + + if (!strcasecmp(vcm, LC898122_NAME)) { + struct lc898122_platform_data *lc_pdata; + + dev_dbg(dev, "Setting up voice coil motor lc898821"); + lc_pdata = kzalloc(sizeof(struct lc898122_platform_data), + GFP_KERNEL); + if (lc_pdata) + lc_pdata->sensor_device = dev; + pdata = lc_pdata; + strlcpy(vcm, LC898122_NAME, VCM_BUFFER_SIZE); + } else if (!strcasecmp(vcm, DW9714_NAME)) { + struct dw9714_platform_data *dw_pdata; + + dev_dbg(dev, "Setting up voice coil motor dw9714"); + dw_pdata = kzalloc(sizeof(struct dw9714_platform_data), + GFP_KERNEL); + if (dw_pdata) { + dw_pdata->sensor_dev = dev; + if (gpiod_count(dev, NULL) > 1) + dw_pdata->gpio_xsd = get_sensor_gpio(dev, 1); + else + dw_pdata->gpio_xsd = -ENODEV; + } + pdata = dw_pdata; + strlcpy(vcm, DW9714_NAME, VCM_BUFFER_SIZE); + } + return pdata; +} + +static int get_i2c_info(struct device *dev, struct ipu4_i2c_info *i2c, int size) +{ + const u8 dsdt_cam_i2c[] = { + 0x49, 0x75, 0x25, 0x26, 0x71, 0x92, 0xA4, 0x4C, + 0xBB, 0x43, 0xC4, 0x89, 0x9D, 0x5A, 0x48, 0x81}; + u64 num_i2c; + int i; + int rval = get_integer_dsdt_data(dev, dsdt_cam_i2c, 1, &num_i2c); + + if (rval < 0) { + dev_err(dev, "Failed to get number of I2C devices\n"); + return -ENODEV; + } + + for (i = 0; i < num_i2c && i < size; i++) { + u64 data; + + rval = get_integer_dsdt_data(dev, dsdt_cam_i2c, i + 2, + &data); + if (rval < 0) { + dev_err(dev, "No i2c data\n"); + return -ENODEV; + } + + i2c[i].bus = ((data >> 24) & 0xff) - 1; + i2c[i].addr = (data >> 8) & 0xff; + } + return num_i2c; +} + +static int match_depend(struct device *dev, void *data) +{ + return (dev && dev->fwnode == data) ? 1 : 0; +} + +#define MAX_CONSUMERS 1 +struct ipu4_gpio_regulator { + struct regulator_consumer_supply consumers[MAX_CONSUMERS]; + struct regulator_init_data init_data; + struct gpio_regulator_config info; + struct platform_device pdev; + struct list_head list; +}; +static LIST_HEAD(ipu4_gpio_regulator_head); + +static int create_gpio_regulator(struct device *dev, int index, const char *name) +{ + struct ipu4_gpio_regulator *reg_device; + struct platform_device *cam_regs[1]; + int gpio; + int num_consumers = 0; + + gpio = get_sensor_gpio(dev, 1); + if (gpio < 0) + return gpio; + + reg_device = kzalloc(sizeof(*reg_device), GFP_KERNEL); + if (!reg_device) + return -ENOMEM; + + INIT_LIST_HEAD(®_device->list); + reg_device->consumers[num_consumers].supply = "VANA"; + reg_device->consumers[num_consumers].dev_name = name; + num_consumers++; + + reg_device->init_data.constraints.input_uV = 3300000; + reg_device->init_data.constraints.min_uV = 2800000; + reg_device->init_data.constraints.max_uV = 2800000; + reg_device->init_data.constraints.valid_ops_mask = + REGULATOR_CHANGE_STATUS; + reg_device->init_data.num_consumer_supplies = num_consumers; + reg_device->init_data.consumer_supplies = reg_device->consumers; + + reg_device->info.supply_name = dev_name(dev); + reg_device->info.enable_gpio = gpio; + reg_device->info.enable_high = 1; + reg_device->info.enabled_at_boot = 1; + reg_device->info.type = REGULATOR_VOLTAGE; + reg_device->info.init_data = ®_device->init_data; + reg_device->pdev.name = "gpio-regulator"; + reg_device->pdev.id = -1; + reg_device->pdev.dev.platform_data = ®_device->info; + cam_regs[0] = ®_device->pdev; + + platform_add_devices(cam_regs, 1); + list_add_tail(®_device->list, &ipu4_gpio_regulator_head); + + return 0; +} + +static int remove_gpio_regulator(void) +{ + struct ipu4_gpio_regulator *reg_device; + + while (!list_empty(&ipu4_gpio_regulator_head)) { + reg_device = list_first_entry(&ipu4_gpio_regulator_head, + struct ipu4_gpio_regulator, list); + list_del(®_device->list); + + platform_device_unregister(®_device->pdev); + kfree(reg_device); + } + + return 0; +} + +static int get_acpi_dep_data(struct device *dev, + struct sensor_bios_data *sensor) +{ + struct acpi_handle *dev_handle = ACPI_HANDLE(dev); + struct acpi_handle_list dep_devices; + acpi_status status; + int i; + + if (!acpi_has_method(dev_handle, "_DEP")) + return 0; + + status = acpi_evaluate_reference(dev_handle, "_DEP", NULL, + &dep_devices); + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "Failed to evaluate _DEP.\n"); + return -ENODEV; + } + + for (i = 0; i < dep_devices.count; i++) { + struct acpi_device *device; + struct acpi_device_info *info; + struct device *p_dev; + int match; + + status = acpi_get_object_info(dep_devices.handles[i], &info); + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "Error reading _DEP device info\n"); + continue; + } + + match = info->valid & ACPI_VALID_HID && + !strcmp(info->hardware_id.string, "INT3472"); + + kfree(info); + + if (!match) + continue; + + /* Process device IN3472 created by acpi */ + if (acpi_bus_get_device(dep_devices.handles[i], &device)) + return -ENODEV; + + dev_dbg(dev, "Depend ACPI device found: %s\n", + dev_name(&device->dev)); + + p_dev = bus_find_device(&platform_bus_type, NULL, + &device->fwnode, match_depend); + if (p_dev) { + dev_dbg(dev, "Dependent platform device found %s\n", + dev_name(p_dev)); + sensor->dev = p_dev; + /* GPIO in index 1 is fixed regulator */ + create_gpio_regulator(p_dev, 1, dev_name(dev)); + } + } + return 0; +} + +static int get_acpi_ssdb_sensor_data(struct device *dev, + struct sensor_bios_data *sensor) +{ + struct sensor_bios_data_packed sensor_data; + int ret = read_acpi_block(dev, "SSDB", &sensor_data, + sizeof(sensor_data)); + if (ret < 0) + return ret; + + get_acpi_dep_data(dev, sensor); + + /* Xshutdown is not part of the ssdb data */ + sensor->link = sensor_data.link; + sensor->lanes = sensor_data.lanes; + sensor->mclkport = sensor_data.mclkport; + sensor->flash = sensor_data.flash; + sensor->mclkspeed = sensor_data.mclkspeed; + dev_dbg(dev, "sensor acpi data: link %d, lanes %d, mclk %d, flash %d, mclkspeed %d\n", + sensor->link, sensor->lanes, sensor->mclkport, sensor->flash, sensor->mclkspeed); + return 0; +} + +static int ipu_acpi_get_sensor_data(struct device *dev, + struct ipu4_camera_module_data *data, + struct sensor_bios_data *sensor) +{ + const u8 mipi_port_dsdt[] = { + 0xD8, 0x7B, 0x3B, 0xEA, 0x9B, 0xE0, 0x39, 0x42, + 0xAD, 0x6E, 0xED, 0x52, 0x5F, 0x3F, 0x26, 0xAB }; + const u8 mclk_out_dsdt[] = { + 0x51, 0x26, 0xBE, 0x8D, 0xC1, 0x70, 0x6F, 0x4C, + 0xAC, 0x87, 0xA3, 0x7C, 0xB4, 0x6E, 0x4A, 0xF6 }; + + int rval; + u64 acpi_data; + + if (sensor) { + /* Sensor data from ssdb block */ + data->csi2.port = sensor->link; + data->csi2.nlanes = sensor->lanes; + acpi_data = sensor->mclkport; + data->ext_clk = sensor->mclkspeed; + } else { + rval = get_integer_dsdt_data(dev, mipi_port_dsdt, 0, + &acpi_data); + if (rval < 0) { + dev_err(dev, "Can't get mipi port\n"); + return rval; + } + data->csi2.port = acpi_data & 0xf; + data->csi2.nlanes = (acpi_data & 0xf0) >> 4; + + rval = get_integer_dsdt_data(dev, mclk_out_dsdt, 0, &acpi_data); + if (rval < 0) { + dev_err(dev, "Can't get mclk info\n"); + return rval; + } + /* we have 24 MHz clock for sensors now */ + data->ext_clk = 286363636; + } + + /* dsdt data currently contains wrong numbers for combo ports */ + if (data->csi2.port >= 6) + data->csi2.port -= 2; + + if (data->csi2.nlanes == 0) + return -ENODEV; + + switch (acpi_data) { + case 0: + clk_add_alias(NULL, dev_name(dev), "ipu4_cam_clk0", NULL); + break; + case 1: + clk_add_alias(NULL, dev_name(dev), "ipu4_cam_clk1", NULL); + break; + case 2: + clk_add_alias(NULL, dev_name(dev), "ipu4_cam_clk2", NULL); + break; + default: + dev_err(dev, "Unknown clk data %u\n", (unsigned int)acpi_data); + break; + } + + dev_dbg(dev, "sensor: lanes %d, port %d, clk out %d, ext_clk %d\n", + data->csi2.nlanes, + data->csi2.port, (int)acpi_data, data->ext_clk); + return 0; +} + +static int get_custom_gpios(struct device *dev, + struct crlmodule_platform_data *pdata) +{ + int i, ret, c = gpiod_count(dev, NULL) - 1; + + for (i = 0; i < c; i++) { + ret = snprintf(pdata->custom_gpio[i].name, + sizeof(pdata->custom_gpio[i].name), + "custom_gpio%d", i); + if (ret < 0 || ret >= sizeof(pdata->custom_gpio[i].name)) { + dev_err(dev, "Failed to set custom gpio name\n"); + return -EINVAL; + } + /* First GPIO is xshutdown */ + pdata->custom_gpio[i].number = get_sensor_gpio(dev, i + 1); + if (pdata->custom_gpio[i].number < 0) { + dev_err(dev, "unable to get custom gpio number\n"); + return -ENODEV; + } + pdata->custom_gpio[i].val = 1; + pdata->custom_gpio[i].undo_val = 0; + } + + return 0; +} + +static int get_crlmodule_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct sensor_bios_data sensor; + struct crlmodule_platform_data *pdata; + struct ipu4_i2c_info i2c[2]; + void *vcm_pdata; + char vcm[VCM_BUFFER_SIZE]; + int num = get_i2c_info(&client->dev, i2c, ARRAY_SIZE(i2c)); + int rval; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + sensor.dev = &client->dev; + + rval = get_acpi_ssdb_sensor_data(&client->dev, &sensor); + + ipu_acpi_get_sensor_data(&client->dev, data, + rval == 0 ? &sensor : NULL); + + data->pdata = pdata; + /* sensor.dev may here point to sensor or dependent device */ + pdata->xshutdown = get_sensor_gpio(sensor.dev, 0); + if (pdata->xshutdown < 0) { + rval = pdata->xshutdown; + goto err_free_pdata; + } + + rval = get_custom_gpios(sensor.dev, pdata); + if (rval) + goto err_free_pdata; + + pdata->lanes = data->csi2.nlanes; + pdata->ext_clk = data->ext_clk; + client->dev.platform_data = pdata; + + helper->fn(&client->dev, helper->driver_data, &data->csi2, true); + + if ((num <= 1) || !priv) + return 0; + + vcm_pdata = get_dsdt_vcm(&client->dev, vcm, priv); + + dev_info(&client->dev, "Creating vcm instance: bus: %d addr 0x%x %s\n", + i2c[1].bus, i2c[1].addr, vcm); + + return add_new_i2c(i2c[1].addr, i2c[1].bus, 0, vcm, vcm_pdata); + +err_free_pdata: + kfree(pdata); + data->pdata = NULL; + return rval; +} + +#if defined (CONFIG_VIDEO_INTEL_ICI) +static int get_crlmodule_lite_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct sensor_bios_data sensor; + struct crlmodule_lite_platform_data *pdata; + struct ipu4_i2c_info i2c[2]; + void *vcm_pdata; + char vcm[VCM_BUFFER_SIZE]; + int num = get_i2c_info(&client->dev, i2c, ARRAY_SIZE(i2c)); + int rval; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + sensor.dev = &client->dev; + + rval = get_acpi_ssdb_sensor_data(&client->dev, &sensor); + + ipu_acpi_get_sensor_data(&client->dev, data, + rval == 0 ? &sensor : NULL); + + data->pdata = pdata; + /* sensor.dev may here point to sensor or dependent device */ +#if !defined(CONFIG_VIDEO_INTEL_UOS) + pdata->xshutdown = get_sensor_gpio(sensor.dev, 0); + if (pdata->xshutdown < 0) { + rval = pdata->xshutdown; + kfree(pdata); + data->pdata = NULL; + return rval; + } +#endif + pdata->lanes = data->csi2.nlanes; + pdata->ext_clk = data->ext_clk; + client->dev.platform_data = pdata; + + helper->fn(&client->dev, helper->driver_data, &data->csi2, true); + + if ((num <= 1) || !priv) + return 0; + + vcm_pdata = get_dsdt_vcm(&client->dev, vcm, priv); + + dev_info(&client->dev, "Creating vcm instance: bus: %d addr 0x%x %s\n", + i2c[1].bus, i2c[1].addr, vcm); + + return add_new_i2c(i2c[1].addr, i2c[1].bus, 0, vcm, vcm_pdata); +} +#endif + +static int get_smiapp_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + struct smiapp_platform_data *pdata; +#else + struct smiapp_hwconfig *pdata; +#endif + uint64_t *source = priv; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + data->pdata = pdata; + + data->priv = source; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) + pdata->xshutdown = get_sensor_gpio(&client->dev, 0); + if (pdata->xshutdown < 0) + return -ENODEV; +#endif + + ipu_acpi_get_sensor_data(&client->dev, data, NULL); + + pdata->op_sys_clock = source; + pdata->lanes = data->csi2.nlanes; + pdata->ext_clk = data->ext_clk; + + client->dev.platform_data = pdata; + helper->fn(&client->dev, helper->driver_data, &data->csi2, true); + + return 0; +} + +static int get_lm3643_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct lm3643_platform_data *pdata; + struct ipu4_i2c_info i2c[2]; + struct gpio_desc *gpiod_reset; + int i; + int num = get_i2c_info(&client->dev, i2c, ARRAY_SIZE(i2c)); + + if (num < 0) + return -ENODEV; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + gpiod_reset = gpiod_get_index(&client->dev, NULL, 0, GPIOD_ASIS); + if (IS_ERR(gpiod_reset)) { + pdata->gpio_reset = -1; + dev_info(&client->dev, "No reset for lm3643\n"); + } else { + pdata->gpio_reset = desc_to_gpio(gpiod_reset); + gpiod_put(gpiod_reset); + } + + /* These should be added to ACPI */ + data->pdata = pdata; + pdata->gpio_torch = -1; + pdata->gpio_strobe = -1; + pdata->flash_max_brightness = 500; + pdata->torch_max_brightness = 89; + + client->dev.platform_data = pdata; + helper->fn(&client->dev, helper->driver_data, NULL, true); + + /* + * Same I2C ACPI entry may contain several instances. I2C core + * ACPI code creates only the first one. Create rest of the instances + */ + dev_info(&client->dev, "Adding rest of lm3643 instances: %d\n", num); + for (i = 1; i < num; i++) { + int rval = add_new_i2c(i2c[i].addr, i2c[i].bus, + 0, client->name, pdata); + if (rval < 0) + return rval; + dev_info(&client->dev, "LM3643 instance: bus: %d addr 0x%x\n", + i2c[i].bus, i2c[i].addr); + return -ENOMEM; + } + + return 0; +}; + +static int get_as3638_pdata(struct i2c_client *client, + struct ipu4_camera_module_data *data, + struct ipu4_i2c_helper *helper, + void *priv, size_t size) +{ + struct as3638_platform_data *pdata; + struct gpio_desc *gpiod_pin; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + gpiod_pin = gpiod_get_index(&client->dev, NULL, 0, GPIOD_ASIS); + if (IS_ERR(gpiod_pin)) { + pdata->gpio_reset = -1; + dev_info(&client->dev, "No reset gpio for as3638\n"); + } else { + pdata->gpio_reset = desc_to_gpio(gpiod_pin); + gpiod_put(gpiod_pin); + } + + gpiod_pin = gpiod_get_index(&client->dev, NULL, 1, GPIOD_ASIS); + if (IS_ERR(gpiod_pin)) { + pdata->gpio_torch = -1; + dev_info(&client->dev, "No torch gpio for as3638\n"); + } else { + pdata->gpio_torch = desc_to_gpio(gpiod_pin); + gpiod_put(gpiod_pin); + } + + gpiod_pin = gpiod_get_index(&client->dev, NULL, 2, GPIOD_ASIS); + if (IS_ERR(gpiod_pin)) { + pdata->gpio_strobe = -1; + dev_info(&client->dev, "No strobe gpio for as3638\n"); + } else { + pdata->gpio_strobe = desc_to_gpio(gpiod_pin); + gpiod_put(gpiod_pin); + } + + /* These should be added to ACPI */ + data->pdata = pdata; + pdata->flash_max_brightness[AS3638_LED1] = + AS3638_FLASH_MAX_BRIGHTNESS_LED1; + pdata->torch_max_brightness[AS3638_LED1] = + AS3638_TORCH_MAX_BRIGHTNESS_LED1; + pdata->flash_max_brightness[AS3638_LED2] = + AS3638_FLASH_MAX_BRIGHTNESS_LED2; + pdata->torch_max_brightness[AS3638_LED2] = + AS3638_TORCH_MAX_BRIGHTNESS_LED2; + pdata->flash_max_brightness[AS3638_LED3] = + AS3638_FLASH_MAX_BRIGHTNESS_LED3; + pdata->torch_max_brightness[AS3638_LED3] = + AS3638_TORCH_MAX_BRIGHTNESS_LED3; + + client->dev.platform_data = pdata; + helper->fn(&client->dev, helper->driver_data, NULL, true); + + return 0; +}; + +static const struct ipu4_acpi_devices supported_devices[] = { + { "SONY230A", CRLMODULE_NAME, get_crlmodule_pdata, LC898122_NAME, 0, + imx230regulators }, + { "INT3477", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0, + ov8858regulators }, + { "INT3471", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, + { "OV5670AA", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, + { "SONY214A", CRLMODULE_NAME, get_crlmodule_pdata, "dw9714", 0 }, + { "SONY132A", SMIAPP_NAME, get_smiapp_pdata, imx132_op_clocks, + sizeof(imx132_op_clocks) }, + { "TXNW3643", LM3643_NAME, get_lm3643_pdata, NULL, 0 }, + { "AMS3638", AS3638_NAME, get_as3638_pdata, NULL, 0 }, +#if defined (CONFIG_VIDEO_INTEL_ICI) + { "ADV7481A", CRLMODULE_LITE_NAME, get_crlmodule_lite_pdata, NULL, 0 }, + { "ADV7481B", CRLMODULE_LITE_NAME, get_crlmodule_lite_pdata, NULL, 0 }, +#else + { "ADV7481A", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, + { "ADV7481B", CRLMODULE_NAME, get_crlmodule_pdata, NULL, 0 }, +#endif +}; + +static int get_table_index(struct device *device, const __u8 *acpi_name) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { + if (!strcmp(acpi_name, supported_devices[i].hid_name)) + return i; + } + + return -ENODEV; +} + +/* List of ACPI devices what we can handle */ +static const struct acpi_device_id ipu4_acpi_match[] = { + { "SONY230A", 0 }, + { "INT3477", 0 }, + { "INT3471", 0 }, + { "TXNW3643", 0 }, + { "AMS3638", 0 }, + { "SONY214A", 0 }, + { "SONY132A", 0 }, + { "OV5670AA", 0 }, + { "ADV7481A", 0 }, + { "ADV7481B", 0 }, + {}, +}; + +static int map_power_rails(char *src_dev_name, char *src_regulator, + struct device *dev, char *dest_rail) +{ + struct device *src_dev; + int rval; + + if (!src_dev_name) { + dev_dbg(dev, "Regulator device name missing"); + return -ENODEV; + } + + src_dev = bus_find_device_by_name(&platform_bus_type, NULL, + src_dev_name); + if (!src_dev) { + dev_dbg(dev, "Regulator device device not found"); + return -ENODEV; + } + + rval = regulator_register_supply_alias(dev, dest_rail, src_dev, + src_regulator); + if (rval < 0) { + dev_err(dev, "Regulator alias mapping fails %s, %s <-> %s, %s", + dev_name(src_dev), src_regulator, + dev_name(dev), dest_rail); + return -ENODEV; + } + return 0; +} + +static int ipu_acpi_pdata(struct i2c_client *client, + const struct acpi_device_id *acpi_id, + struct ipu4_i2c_helper *helper) +{ + struct ipu4_camera_module_data *camdata; + const struct ipu_regulator *regulators; + int index = get_table_index(&client->dev, acpi_id->id); + + if (index < 0) { + dev_err(&client->dev, + "Device is not in supported devices list\n"); + return -ENODEV; + } + + camdata = add_device_to_list(&devices); + if (!camdata) + return -ENOMEM; + + strlcpy(client->name, supported_devices[index].real_driver, + sizeof(client->name)); + + regulators = supported_devices[index].regulators; + while (regulators && regulators->src_dev_name) { + map_power_rails(regulators->src_dev_name, + regulators->src_rail, + &client->dev, + regulators->dest_rail); + regulators++; + } + + supported_devices[index].get_platform_data( + client, camdata, helper, + supported_devices[index].priv_data, + supported_devices[index].priv_size); + + return 0; +} + +static int ipu4_i2c_test(struct device *dev, void *priv) +{ + struct i2c_client *client = i2c_verify_client(dev); + const struct acpi_device_id *acpi_id; + + /* + * Check that we are handling only I2C devices which really has + * ACPI data and are one of the devices which we want to handle + */ + if (!ACPI_COMPANION(dev) || !client) + return 0; + + acpi_id = acpi_match_device(ipu4_acpi_match, dev); + if (!acpi_id) + return 0; + + /* + * Skip if platform data has already been added. + * Probably ACPI data overruled by kernel platform data + */ + if (client->dev.platform_data) { + dev_info(dev, "ACPI device has already platform data\n"); + return 0; + } + + /* Looks that we got what we are looking for */ + if (ipu_acpi_pdata(client, acpi_id, priv)) + dev_err(dev, "Failed to process ACPI data"); + + /* Don't return error since we want to process remaining devices */ + return 0; +} + +/* Scan all i2c devices and pick ones which we can handle */ +int ipu_get_acpi_devices(void *driver_data, + struct device *dev, + int (*fn) + (struct device *, void *, + struct ipu_isys_csi2_config *csi2, + bool reprobe)) +{ + struct ipu4_i2c_helper helper = { + .fn = fn, + .driver_data = driver_data, + }; + struct ipu4_i2c_new_dev *new_i2c_dev, *safe; + int rval; + + if ((!fn) || (!driver_data)) + return -ENODEV; + + rval = i2c_for_each_dev(&helper, ipu4_i2c_test); + if (rval < 0) + return rval; + + /* + * Some ACPI entries may contain several i2c devices. + * Create new devices here if those were added to list during + * ACPI processing + */ + list_for_each_entry_safe(new_i2c_dev, safe, &new_devs, list) { + struct i2c_adapter *adapter; + struct i2c_client *client; + + adapter = i2c_get_adapter(new_i2c_dev->bus); + if (!adapter) { + dev_err(dev, "Failed to get adapter\n"); + list_del(&new_i2c_dev->list); + kfree(new_i2c_dev); + continue; + } + + dev_info(dev, "New i2c device: %s\n", new_i2c_dev->info.type); + request_module(I2C_MODULE_PREFIX "%s", new_i2c_dev->info.type); + + client = i2c_new_device(adapter, &new_i2c_dev->info); + if (client) + fn(&client->dev, driver_data, NULL, false); + else + dev_err(dev, "failed to add I2C device from ACPI\n"); + + i2c_put_adapter(adapter); + list_del(&new_i2c_dev->list); + kfree(new_i2c_dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_get_acpi_devices); + +static void __exit ipu_acpi_exit(void) +{ + remove_gpio_regulator(); +} +module_exit(ipu_acpi_exit); + +MODULE_AUTHOR("Samu Onkalo "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IPU4 ACPI support"); diff --git a/drivers/media/platform/intel/ipu4-bxt-gp-pdata.c b/drivers/media/platform/intel/ipu4-bxt-gp-pdata.c new file mode 100644 index 000000000000..c49ab8d1ab06 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-bxt-gp-pdata.c @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2016--2017 Intel Corporation. + * + * Author: Jouni Ukkonen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include + +#include +#include +#include "ipu.h" + +#define ADV7481_HDMI_LANES 4 +#define ADV7481_HDMI_I2C_ADDRESS 0xe0 + +#define ADV7481_LANES 1 +/* + * below i2c address is dummy one, to be able to register single + * ADV7481 chip as two sensors + */ +#define ADV7481_I2C_ADDRESS 0xe1 + + +#define GPIO_BASE 434 + + +static struct crlmodule_platform_data adv7481_cvbs_pdata = { + .ext_clk = 286363636, + .xshutdown = GPIO_BASE + 64, /*dummy for now*/ + .lanes = ADV7481_LANES, + .module_name = "ADV7481 CVBS" +}; + +static struct ipu_isys_csi2_config adv7481_cvbs_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info adv7481_cvbs_crl_sd = { + .csi2 = &adv7481_cvbs_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_cvbs_pdata, + }, + .i2c_adapter_id = 0, + } +}; + +static struct crlmodule_platform_data adv7481_hdmi_pdata = { + /* FIXME: may need to revisit */ + .ext_clk = 286363636, + .xshutdown = GPIO_BASE + 30, + .lanes = ADV7481_HDMI_LANES, + .module_name = "ADV7481 HDMI", + .crl_irq_pin = GPIO_BASE + 22, + .irq_pin_flags = (IRQF_TRIGGER_RISING | IRQF_ONESHOT), + .irq_pin_name = "ADV7481_HDMI_IRQ", +}; + +static struct ipu_isys_csi2_config adv7481_hdmi_csi2_cfg = { + .nlanes = ADV7481_HDMI_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info adv7481_hdmi_crl_sd = { + .csi2 = &adv7481_hdmi_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_HDMI_I2C_ADDRESS, + .platform_data = &adv7481_hdmi_pdata, + }, + .i2c_adapter_id = 0, + } +}; + + + +/* + * Map buttress output sensor clocks to sensors - + * this should be coming from ACPI, in Gordon Peak + * ADV7481 have its own oscillator, no buttres clock + * needed. + */ +struct ipu_isys_clk_mapping gp_mapping[] = { + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { + &adv7481_hdmi_crl_sd, + &adv7481_cvbs_crl_sd, + NULL, + }, + .clk_map = gp_mapping, +}; + +static void ipu4_quirk(struct pci_dev *pci_dev) +{ + pci_dev->dev.platform_data = &pdata; +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, + ipu4_quirk); diff --git a/drivers/media/platform/intel/ipu4-bxt-p-pdata.c b/drivers/media/platform/intel/ipu4-bxt-p-pdata.c new file mode 100755 index 000000000000..53c0037295d9 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-bxt-p-pdata.c @@ -0,0 +1,1128 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2015 - 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "ipu.h" +#include +#define GPIO_BASE 422 +#ifdef CONFIG_INTEL_IPU4_OV13858 + +#define DW9714_VCM_ADDR 0x0c +// for port 1 +static struct dw9714_platform_data dw9714_pdata = { + .gpio_xsd = GPIO_BASE + 67, +}; + +static struct ipu_isys_subdev_info dw9714_common_sd = { + .i2c = { + .board_info = { + I2C_BOARD_INFO(DW9714_NAME, DW9714_VCM_ADDR), + .platform_data = &dw9714_pdata, + }, + .i2c_adapter_id = 4, + } +}; + +// for port2 +static struct dw9714_platform_data dw9714_pdata_1 = { + .gpio_xsd = GPIO_BASE + 64, +}; + +static struct ipu_isys_subdev_info dw9714_common_sd_1 = { + .i2c = { + .board_info = { + I2C_BOARD_INFO(DW9714_NAME, DW9714_VCM_ADDR), + .platform_data = &dw9714_pdata_1, + }, + .i2c_adapter_id = 2, + } +}; + +#define OV13858_LANES 4 +#define OV13858_I2C_ADDRESS 0x10 + +//for port1 +static struct crlmodule_platform_data ov13858_pdata = { + .xshutdown = GPIO_BASE + 67, + .lanes = OV13858_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){ 54000000 }, + .module_name = "OV13858", + .id_string = "0xd8 0x55", +}; + +static struct ipu_isys_csi2_config ov13858_csi2_cfg = { + .nlanes = OV13858_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info ov13858_crl_sd = { + .csi2 = &ov13858_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV13858_I2C_ADDRESS), + .platform_data = &ov13858_pdata, + }, + .i2c_adapter_id = 4, + }, +/* .acpiname = "i2c-OVTIF858:00", */ +}; + +// for port2 +static struct crlmodule_platform_data ov13858_pdata_2 = { + .xshutdown = GPIO_BASE + 64, + .lanes = OV13858_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){ 54000000 }, + .module_name = "OV13858-2", + .id_string = "0xd8 0x55", +}; + +static struct ipu_isys_csi2_config ov13858_csi2_cfg_2 = { + .nlanes = OV13858_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov13858_crl_sd_2 = { + .csi2 = &ov13858_csi2_cfg_2, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV13858_I2C_ADDRESS), + .platform_data = &ov13858_pdata_2, + }, + .i2c_adapter_id = 2, + }, +}; + +#endif + +#ifdef CONFIG_INTEL_IPU4_OV2740 +#define OV2740_LANES 2 +#define OV2740_I2C_ADDRESS 0x36 +static struct crlmodule_platform_data ov2740_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = OV2740_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){ 72000000 }, + .module_name = "INT3474", + .id_string = "0x27 0x40", +}; + +static struct ipu_isys_csi2_config ov2740_csi2_cfg = { + .nlanes = OV2740_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov2740_crl_sd = { + .csi2 = &ov2740_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV2740_I2C_ADDRESS), + .platform_data = &ov2740_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX185 +#define IMX185_LANES 4 +#define IMX185_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx185_pdata = { + .xshutdown = GPIO_BASE + 71, + .lanes = IMX185_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){ 55687500, 111375000, + 111375000, 222750000 }, + .module_name = "IMX185", + .id_string = "0x1 0x85", +}; + +static struct ipu_isys_csi2_config imx185_csi2_cfg = { + .nlanes = IMX185_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx185_crl_sd = { + .csi2 = &imx185_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX185_I2C_ADDRESS), + .platform_data = &imx185_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX185_LI +#define IMX185_LI_LANES 4 +#define IMX185_LI_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx185_li_pdata = { + .xshutdown = GPIO_BASE + 67, + .lanes = IMX185_LI_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){ 55687500, 111375000, + 111375000, 222750000 }, + .module_name = "IMX185", + .id_string = "0x1 0x85", +}; + +static struct ipu_isys_csi2_config imx185_li_csi2_cfg = { + .nlanes = IMX185_LI_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info imx185_li_crl_sd = { + .csi2 = &imx185_li_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX185_LI_I2C_ADDRESS), + .platform_data = &imx185_li_pdata, + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_AR023Z +#define AR023Z_MIPI_LANES 2 +/* Toshiba TC358778 Parallel-MIPI Bridge */ +#define TC358778_I2C_ADDRESS 0x0e + +static struct crlmodule_platform_data ar023z_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = AR023Z_MIPI_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){317250000}, + .module_name = "AR023Z", + .id_string = "0x4401 0x64", +}; + +static struct ipu_isys_csi2_config ar023z_csi2_cfg = { + .nlanes = AR023Z_MIPI_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ar023z_crl_sd = { + .csi2 = &ar023z_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, TC358778_I2C_ADDRESS), + .platform_data = &ar023z_pdata, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data ar023z_b_pdata = { + .xshutdown = GPIO_BASE + 67, + .lanes = AR023Z_MIPI_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t []){317250000}, + .module_name = "AR023Z", + .id_string = "0x4401 0x64", +}; + +static struct ipu_isys_csi2_config ar023z_b_csi2_cfg = { + .nlanes = AR023Z_MIPI_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info ar023z_b_crl_sd = { + .csi2 = &ar023z_b_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, TC358778_I2C_ADDRESS), + .platform_data = &ar023z_b_pdata, + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX477 +#define IMX477_LANES 2 + +#define IMX477_I2C_ADDRESS 0x10 + +static struct crlmodule_platform_data imx477_pdata_master = { + .xshutdown = GPIO_BASE + 64, + .lanes = IMX477_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "IMX477-MASTER", + .id_string = "0x4 0x77", +}; + +static struct ipu_isys_csi2_config imx477_csi2_cfg_master = { + .nlanes = IMX477_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx477_crl_sd_master = { + .csi2 = &imx477_csi2_cfg_master, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX477_I2C_ADDRESS), + .platform_data = &imx477_pdata_master, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data imx477_pdata_slave_1 = { + .xshutdown = GPIO_BASE + 67, + .lanes = IMX477_LANES, + .ext_clk = 19200000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "IMX477-SLAVE-1", + .id_string = "0x4 0x77", +}; + +static struct ipu_isys_csi2_config imx477_csi2_cfg_slave_1 = { + .nlanes = IMX477_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info imx477_crl_sd_slave_1 = { + .csi2 = &imx477_csi2_cfg_slave_1, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX477_I2C_ADDRESS), + .platform_data = &imx477_pdata_slave_1, + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX274 + +#define IMX274_LANES 4 +#define IMX274_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx274_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = IMX274_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){720000000}, + .module_name = "IMX274", + .id_string = "0x6 0x9", +}; + +static struct ipu_isys_csi2_config imx274_csi2_cfg = { + .nlanes = IMX274_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx274_crl_sd = { + .csi2 = &imx274_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX274_I2C_ADDRESS), + .platform_data = &imx274_pdata + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data imx274_b_pdata = { + .xshutdown = GPIO_BASE + 67, + .lanes = IMX274_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){720000000}, + .module_name = "IMX274", + .id_string = "0x6 0x9", +}; + +static struct ipu_isys_csi2_config imx274_b_csi2_cfg = { + .nlanes = IMX274_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info imx274_b_crl_sd = { + .csi2 = &imx274_b_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX274_I2C_ADDRESS), + .platform_data = &imx274_b_pdata + }, + .i2c_adapter_id = 4, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_IMX290 + +#define IMX290_LANES 4 +#define IMX290_I2C_ADDRESS 0x1a + +static struct crlmodule_platform_data imx290_pdata = { + .xshutdown = GPIO_BASE + 64, + .lanes = IMX290_LANES, + .ext_clk = 37125000, + .op_sys_clock = (uint64_t []){222750000, 445500000}, + .module_name = "IMX290", + .id_string = "0x2 0x90", +}; + +static struct ipu_isys_csi2_config imx290_csi2_cfg = { + .nlanes = IMX290_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info imx290_crl_sd = { + .csi2 = &imx290_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, IMX290_I2C_ADDRESS), + .platform_data = &imx290_pdata + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV13860 + +#define OV13860_LANES 2 +#define OV13860_I2C_ADDRESS 0x10 + +static struct crlmodule_platform_data ov13860_pdata = { + .xshutdown = GPIO_BASE + 71, + .lanes = OV13860_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 600000000, 300000000}, + .module_name = "OV13860" +}; + +static struct ipu_isys_csi2_config ov13860_csi2_cfg = { + .nlanes = OV13860_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov13860_crl_sd = { + .csi2 = &ov13860_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV13860_I2C_ADDRESS), + .platform_data = &ov13860_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV9281 + +#define OV9281_LANES 2 +#define OV9281_I2C_ADDRESS 0x10 + +static struct crlmodule_platform_data ov9281_pdata = { + .xshutdown = GPIO_BASE + 71, + .lanes = OV9281_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){400000000}, + .module_name = "OV9281", + .id_string = "0x92 0x81", +}; + +static struct ipu_isys_csi2_config ov9281_csi2_cfg = { + .nlanes = OV9281_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info ov9281_crl_sd = { + .csi2 = &ov9281_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV9281_I2C_ADDRESS), + .platform_data = &ov9281_pdata, + }, + .i2c_adapter_id = 0, + } +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_BU64295) + +#define BU64295_VCM_ADDR 0x0c +#define BU64295_NAME "bu64295" + +static struct ipu_isys_subdev_info bu64295_sd = { + .i2c = { + .board_info = { + I2C_BOARD_INFO(BU64295_NAME, BU64295_VCM_ADDR), + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_ADV7481 + +#define ADV7481_LANES 4 +#define ADV7481_I2C_ADDRESS 0xe0 +#define ADV7481B_I2C_ADDRESS 0xe2 + +static struct crlmodule_platform_data adv7481_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481" +}; + +static struct ipu_isys_csi2_config adv7481_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info adv7481_crl_sd = { + .csi2 = &adv7481_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + +#define ADV7481_LANES 4 +#define ADV7481_I2C_ADDRESS 0xe0 +#define ADV7481B_I2C_ADDRESS 0xe2 + +static struct crlmodule_platform_data adv7481_eval_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481_EVAL" +}; + +static struct ipu_isys_csi2_config adv7481_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info adv7481_eval_crl_sd = { + .csi2 = &adv7481_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_eval_pdata, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_platform_data adv7481b_eval_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481B_EVAL" +}; + +static struct ipu_isys_csi2_config adv7481b_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info adv7481b_eval_crl_sd = { + .csi2 = &adv7481b_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481B_I2C_ADDRESS, + .platform_data = &adv7481b_eval_pdata, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_AGGREGATOR_STUB) + +#define VIDEO_AGGRE_LANES 4 +#define VIDEO_AGGRE_I2C_ADDRESS 0x3b +#define VIDEO_AGGRE_B_I2C_ADDRESS 0x3c + +static struct ipu_isys_csi2_config video_aggre_csi2_cfg = { + .nlanes = VIDEO_AGGRE_LANES, + .port = 0, +}; + +static struct ipu_isys_subdev_info video_aggre_stub_sd = { + .csi2 = &video_aggre_csi2_cfg, + .i2c = { + .board_info = { + .type = "video-aggre", + .addr = VIDEO_AGGRE_I2C_ADDRESS, + }, + .i2c_adapter_id = 2, + } +}; + +static struct ipu_isys_csi2_config video_aggre_b_csi2_cfg = { + .nlanes = VIDEO_AGGRE_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info video_aggre_b_stub_sd = { + .csi2 = &video_aggre_b_csi2_cfg, + .i2c = { + .board_info = { + .type = "video-aggre", + .addr = VIDEO_AGGRE_B_I2C_ADDRESS, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#if IS_ENABLED(CONFIG_INTEL_IPU4_MAGNA) +#define MAGNA_LANES 4 +#define MAGNA_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define MAGNA_ADDRESS_A 0x61 + +static struct crlmodule_platform_data magna_pdata = { + .lanes = MAGNA_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "MAGNA", + .id_string = "0xa6 0x35", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 1, + /* + * this flags indicates the expected polarity for the LineValid + * indication received in Raw mode. + * 1 means LineValid is high for the duration of the video frame. + */ + .high_framevalid_flags = 1, +}; +#endif + +#if IS_ENABLED(CONFIG_INTEL_IPU4_OV10635) +#define OV10635_LANES 4 +#define OV10635_I2C_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define OV10635A_I2C_ADDRESS 0x61 +#define OV10635B_I2C_ADDRESS 0x62 +#define OV10635C_I2C_ADDRESS 0x63 +#define OV10635D_I2C_ADDRESS 0x64 + +static struct crlmodule_platform_data ov10635_pdata = { + .lanes = OV10635_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "OV10635", + .id_string = "0xa6 0x35", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 1, +}; +#endif + +#if IS_ENABLED(CONFIG_INTEL_IPU4_OV10640) +#define OV10640_LANES 4 +#define OV10640_I2C_PHY_ADDR 0x60 /* 0x30 for 7bit addr */ +#define OV10640A_I2C_ADDRESS 0x61 +#define OV10640B_I2C_ADDRESS 0x62 +#define OV10640C_I2C_ADDRESS 0x63 +#define OV10640D_I2C_ADDRESS 0x64 + +static struct crlmodule_platform_data ov10640_pdata = { + .lanes = OV10640_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "OV10640", + .id_string = "0xa6 0x40", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI964 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 1, +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_TI964) +#define TI964_I2C_ADAPTER 0 +#define TI964_I2C_ADAPTER_2 7 +#define TI964_I2C_ADDRESS 0x3d +#define TI964_LANES 4 + +static struct ipu_isys_csi2_config ti964_csi2_cfg = { + .nlanes = TI964_LANES, + .port = 0, +}; + +static struct ipu_isys_csi2_config ti964_csi2_cfg_2 = { + .nlanes = TI964_LANES, + .port = 4, +}; + +static struct ti964_subdev_info ti964_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV10640 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640A_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640B_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 1, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640C_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 2, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640D_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 3, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = MAGNA_ADDRESS_A, + .platform_data = &magna_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + .rx_port = 0, + .phy_i2c_addr = MAGNA_PHY_ADDR, + }, +#endif +}; + +static struct ti964_subdev_info ti964_subdevs_2[] = { +#ifdef CONFIG_INTEL_IPU4_OV10635 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635A_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635B_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635C_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10635D_I2C_ADDRESS, + .platform_data = &ov10635_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10635_I2C_PHY_ADDR, + }, +#endif +#ifdef CONFIG_INTEL_IPU4_OV10640 + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640A_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 0, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640B_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 1, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640C_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 2, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = OV10640D_I2C_ADDRESS, + .platform_data = &ov10640_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + .rx_port = 3, + .phy_i2c_addr = OV10640_I2C_PHY_ADDR, + }, +#endif +}; + +static struct ti964_pdata ti964_pdata = { + .subdev_info = ti964_subdevs, + .subdev_num = ARRAY_SIZE(ti964_subdevs), + .reset_gpio = GPIO_BASE + 63, +}; + +static struct ipu_isys_subdev_info ti964_sd = { + .csi2 = &ti964_csi2_cfg, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &ti964_pdata, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER, + } +}; + +static struct ti964_pdata ti964_pdata_2 = { + .subdev_info = ti964_subdevs_2, + .subdev_num = ARRAY_SIZE(ti964_subdevs_2), + .reset_gpio = GPIO_BASE + 66, +}; + +static struct ipu_isys_subdev_info ti964_sd_2 = { + .csi2 = &ti964_csi2_cfg_2, + .i2c = { + .board_info = { + .type = "ti964", + .addr = TI964_I2C_ADDRESS, + .platform_data = &ti964_pdata_2, + }, + .i2c_adapter_id = TI964_I2C_ADAPTER_2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_OV2775 +#define OV2775_LANES 2 +#define OV2775_I2C_ADAPTER 3 +#define OV2775_I2C_ADDRESS 0x6C + +static struct crlmodule_platform_data ov2775_pdata = { + .lanes = OV2775_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 480000000 }, + .module_name = "OV2775", + .id_string = "0x27 0x70", + /* + * The pin number of xshutdown will be determined + * and replaced inside TI960 driver. + * The number here stands for which GPIO to connect with. + * 1 means to connect sensor xshutdown to GPIO1 + */ + .xshutdown = 1, +}; + +static struct ipu_isys_csi2_config ov2775_csi2_cfg = { + .nlanes = OV2775_LANES, + .port = 4, +}; + +static struct ipu_isys_subdev_info ov2775_crl_sd = { + .csi2 = &ov2775_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_NAME, OV2775_I2C_ADDRESS), + .platform_data = &ov2775_pdata, + }, + .i2c_adapter_id = OV2775_I2C_ADAPTER, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_AR0231AT +#define AR0231AT_LANES 4 +#define AR0231ATA_I2C_ADDRESS 0x11 +#define AR0231ATB_I2C_ADDRESS 0x12 +#define AR0231ATC_I2C_ADDRESS 0x13 +#define AR0231ATD_I2C_ADDRESS 0x14 + +static struct crlmodule_platform_data ar0231at_pdata = { + .lanes = AR0231AT_LANES, + .ext_clk = 27000000, + .op_sys_clock = (uint64_t[]){ 87750000 }, + .module_name = "AR0231AT", +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) +#define DS_MAX9286_LANES 4 +#define DS_MAX9286_I2C_ADAPTER 4 +#define DS_MAX9286_I2C_ADDRESS 0x48 + +static struct ipu_isys_csi2_config max9286_csi2_cfg = { + .nlanes = DS_MAX9286_LANES, + .port = 4, +}; + +static struct max9286_subdev_i2c_info max9286_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_AR0231AT + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATA_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATB_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATC_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATD_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + }, +#endif +}; + +static struct max9286_pdata max9286_pdata = { + .subdev_info = max9286_subdevs, + .subdev_num = ARRAY_SIZE(max9286_subdevs), + .reset_gpio = GPIO_BASE + 63, +}; + +static struct ipu_isys_subdev_info max9286_sd = { + .csi2 = &max9286_csi2_cfg, + .i2c = { + .board_info = { + .type = "max9286", + .addr = DS_MAX9286_I2C_ADDRESS, + .platform_data = &max9286_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER, + } +}; +#endif + +/* + * Map buttress output sensor clocks to sensors - + * this should be coming from ACPI + */ +static struct ipu_isys_clk_mapping clk_mapping[] = { + { CLKDEV_INIT("2-0036", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-001a", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-001a", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("2-0010", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-0010", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("2-a0e0", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-a0e2", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("0-0010", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-000e", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-000e", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("0-0048", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("4-0048", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { +#ifdef CONFIG_INTEL_IPU4_OV13858 + &ov13858_crl_sd, + &dw9714_common_sd, + &ov13858_crl_sd_2, + &dw9714_common_sd_1, +#endif +#ifdef CONFIG_INTEL_IPU4_OV2740 + &ov2740_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX185 + &imx185_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX185_LI + &imx185_li_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_AR023Z + &ar023z_crl_sd, + &ar023z_b_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX477 + &imx477_crl_sd_slave_1, + &imx477_crl_sd_master, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX274 + &imx274_crl_sd, + &imx274_b_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_IMX290 + &imx290_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_OV13860 + &ov13860_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_OV9281 + &ov9281_crl_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_BU64295) + &bu64295_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_ADV7481 + &adv7481_crl_sd, +#endif +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + &adv7481_eval_crl_sd, + &adv7481b_eval_crl_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_AGGREGATOR_STUB) + &video_aggre_stub_sd, + &video_aggre_b_stub_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_TI964) + &ti964_sd, + &ti964_sd_2, +#endif +#ifdef CONFIG_INTEL_IPU4_OV2775 + &ov2775_crl_sd, +#endif +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) + &max9286_sd, +#endif + NULL, + }, + .clk_map = clk_mapping, +}; + +static void ipu4_quirk(struct pci_dev *pci_dev) +{ + pci_dev->dev.platform_data = &pdata; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, ipu4_quirk); diff --git a/drivers/media/platform/intel/ipu4-ici-bxt-p-pdata.c b/drivers/media/platform/intel/ipu4-ici-bxt-p-pdata.c new file mode 100644 index 000000000000..9d2a2a086395 --- /dev/null +++ b/drivers/media/platform/intel/ipu4-ici-bxt-p-pdata.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) +/* + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include "ipu.h" + +#define GPIO_BASE 422 + +#ifdef CONFIG_INTEL_IPU4_ADV7481 + +#define ADV7481_CVBS_LANES 1 +#define ADV7481_HDMI_LANES 4 +#define ADV7481_HDMI_I2C_ADDRESS 0xe0 +#define ADV7481_CVBS_I2C_ADDRESS 0xe1 +static struct crlmodule_lite_platform_data adv7481_hdmi_pdata_lite = { +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) +// xshutdown GPIO pin unavailable on ACRN UOS + .xshutdown = GPIO_BASE + 63, +#endif + .lanes = ADV7481_HDMI_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481 HDMI" +}; +static struct ipu_isys_csi2_config adv7481_hdmi_csi2_cfg = { + .nlanes = ADV7481_HDMI_LANES, + .port = 0, +}; +static struct ipu_isys_subdev_info adv7481_hdmi_crl_sd_lite = { + .csi2 = &adv7481_hdmi_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_HDMI_I2C_ADDRESS, + .platform_data = &adv7481_hdmi_pdata_lite, + }, + .i2c_adapter_id = CONFIG_INTEL_IPU4_ADV7481_I2C_ID, + } +}; + +static struct crlmodule_lite_platform_data adv7481_cvbs_pdata_lite = { +#if (!IS_ENABLED(CONFIG_VIDEO_INTEL_UOS)) +// xshutdown GPIO pin unavailable on ACRN UOS + .xshutdown = GPIO_BASE + 63, +#endif + .lanes = ADV7481_CVBS_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481 CVBS" +}; +static struct ipu_isys_csi2_config adv7481_cvbs_csi2_cfg = { + .nlanes = ADV7481_CVBS_LANES, + .port = 4, +}; +static struct ipu_isys_subdev_info adv7481_cvbs_crl_sd_lite = { + .csi2 = &adv7481_cvbs_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_CVBS_I2C_ADDRESS, + .platform_data = &adv7481_cvbs_pdata_lite, + }, + .i2c_adapter_id = CONFIG_INTEL_IPU4_ADV7481_I2C_ID, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + +#define ADV7481_LANES 4 +//below i2c address is dummy one, to be able to register single ADV7481 chip as two sensors +#define ADV7481_I2C_ADDRESS 0xe0 +#define ADV7481B_I2C_ADDRESS 0xe2 + +static struct crlmodule_lite_platform_data adv7481_eval_pdata_lite = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_HDMI_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481_EVAL" +}; +static struct ipu_isys_csi2_config adv7481_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 0, +}; +static struct ipu_isys_subdev_info adv7481_eval_crl_sd_lite = { + .csi2 = &adv7481_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481_I2C_ADDRESS, + .platform_data = &adv7481_eval_pdata_lite, + }, + .i2c_adapter_id = 2, + } +}; + +static struct crlmodule_lite_platform_data adv7481b_eval_pdata_lite = { + .xshutdown = GPIO_BASE + 63, + .lanes = ADV7481_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){600000000}, + .module_name = "ADV7481B_EVAL" +}; +static struct ipu_isys_csi2_config adv7481b_eval_csi2_cfg = { + .nlanes = ADV7481_LANES, + .port = 4, +}; +static struct ipu_isys_subdev_info adv7481b_eval_crl_sd_lite = { + .csi2 = &adv7481b_eval_csi2_cfg, + .i2c = { + .board_info = { + .type = CRLMODULE_LITE_NAME, + .flags = I2C_CLIENT_TEN, + .addr = ADV7481B_I2C_ADDRESS, + .platform_data = &adv7481b_eval_pdata_lite, + }, + .i2c_adapter_id = 2, + } +}; +#endif + +#ifdef CONFIG_INTEL_IPU4_MAGNA_TI964 + +#define MAGNA_TI964_MIPI_LANES 4 +#define TI964_I2C_ADDRESS 0x3d +static struct crlmodule_lite_platform_data magna_ti964_pdata = { + .xshutdown = GPIO_BASE + 63, + .lanes = MAGNA_TI964_MIPI_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t []){ 400000000 }, + .module_name = "MAGNA_TI964", +}; +static struct ipu_isys_csi2_config magna_ti964_csi2_cfg = { + .nlanes = MAGNA_TI964_MIPI_LANES, + .port = 0, +}; +static struct ipu_isys_subdev_info magna_ti964_crl_sd = { + .csi2 = &magna_ti964_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO(CRLMODULE_LITE_NAME, TI964_I2C_ADDRESS), + .platform_data = &magna_ti964_pdata, + }, + .i2c_adapter_id = 0, + } +}; + +#endif + +/* + * Map buttress output sensor clocks to sensors - + * this should be coming from ACPI + */ +struct ipu_isys_clk_mapping p_mapping[] = { + { CLKDEV_INIT("0-003d", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("0-00e1", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("0-00e0", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT("2-a0e0", NULL, NULL), "OSC_CLK_OUT0" }, + { CLKDEV_INIT("2-a0e2", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { +#ifdef CONFIG_INTEL_IPU4_ADV7481 + &adv7481_cvbs_crl_sd_lite, + &adv7481_hdmi_crl_sd_lite, +#endif +#ifdef CONFIG_INTEL_IPU4_ADV7481_EVAL + &adv7481_eval_crl_sd_lite, + &adv7481b_eval_crl_sd_lite, +#endif +#ifdef CONFIG_INTEL_IPU4_MAGNA_TI964 + &magna_ti964_crl_sd, +#endif + NULL, + }, + .clk_map = p_mapping, +}; + +static void ipu4_quirk(struct pci_dev *pci_dev) +{ + pci_dev->dev.platform_data = &pdata; +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, + ipu4_quirk); diff --git a/drivers/media/platform/intel/ipu4p-cnl-rvp-pdata.c b/drivers/media/platform/intel/ipu4p-cnl-rvp-pdata.c new file mode 100644 index 000000000000..74bfe3260d4b --- /dev/null +++ b/drivers/media/platform/intel/ipu4p-cnl-rvp-pdata.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Intel Corporation + +#include +#include +#include +#include +#include + +#include +#include +#include "ipu.h" +#include + +#define IMX355_LANES 4 +#define IMX355_I2C_ADDRESS 0x1a +#define IMX319_LANES 4 +#define IMX319_I2C_ADDRESS 0x10 +#define AK7375_I2C_ADDRESS 0xc + +static struct ipu_isys_csi2_config imx355_csi2_cfg = { + .nlanes = IMX355_LANES, + .port = 4, /* WF camera */ +}; + +static struct ipu_isys_subdev_info imx355_sd = { + .csi2 = &imx355_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO("imx355", IMX355_I2C_ADDRESS), + }, + .i2c_adapter_id = 9, + } +}; + +/* FIXME: Remove this after hardware transition. */ +static struct ipu_isys_subdev_info imx355_sd2 = { + .csi2 = &imx355_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO("imx355", 0x10), + }, + .i2c_adapter_id = 9, + } +}; + +static struct ipu_isys_subdev_info ak7375_sd = { + .i2c = { + .board_info = { + I2C_BOARD_INFO("ak7375", AK7375_I2C_ADDRESS), + }, + .i2c_adapter_id = 9, + } +}; + +static struct ipu_isys_csi2_config imx319_csi2_cfg = { + .nlanes = IMX319_LANES, + .port = 0, /* UF camera */ +}; + +static struct ipu_isys_subdev_info imx319_sd = { + .csi2 = &imx319_csi2_cfg, + .i2c = { + .board_info = { + I2C_BOARD_INFO("imx319", IMX319_I2C_ADDRESS), + }, + .i2c_adapter_id = 8, + } +}; + +#ifdef CONFIG_INTEL_IPU4_AR0231AT +#define AR0231AT_LANES 4 +#define AR0231ATA_I2C_ADDRESS 0x11 +#define AR0231ATB_I2C_ADDRESS 0x12 +#define AR0231ATC_I2C_ADDRESS 0x13 +#define AR0231ATD_I2C_ADDRESS 0x14 + +static struct crlmodule_platform_data ar0231at_pdata = { + .lanes = AR0231AT_LANES, + .ext_clk = 24000000, + .op_sys_clock = (uint64_t[]){ 264000000 }, + .module_name = "AR0231AT", +}; +#endif + +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) +#define DS_MAX9286_LANES 4 +#define DS_MAX9286_I2C_ADAPTER_B 3 +#define DS_MAX9286_I2C_ADDRESS 0x48 + +static struct ipu_isys_csi2_config max9286_b_csi2_cfg = { + .nlanes = DS_MAX9286_LANES, + .port = 4, +}; + +struct max9286_subdev_i2c_info max9286_b_subdevs[] = { +#ifdef CONFIG_INTEL_IPU4_AR0231AT + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATA_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATB_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATC_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, + { + .board_info = { + .type = CRLMODULE_NAME, + .addr = AR0231ATD_I2C_ADDRESS, + .platform_data = &ar0231at_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + }, +#endif +}; + +static struct max9286_pdata max9286_b_pdata = { + .subdev_info = max9286_b_subdevs, + .subdev_num = ARRAY_SIZE(max9286_b_subdevs), + .reset_gpio = 195, +}; + +static struct ipu_isys_subdev_info max9286_b_sd = { + .csi2 = &max9286_b_csi2_cfg, + .i2c = { + .board_info = { + .type = "max9286", + .addr = DS_MAX9286_I2C_ADDRESS, + .platform_data = &max9286_b_pdata, + }, + .i2c_adapter_id = DS_MAX9286_I2C_ADAPTER_B, + } +}; +#endif + +static struct ipu_isys_clk_mapping clk_mapping[] = { + { CLKDEV_INIT("3-0048", NULL, NULL), "OSC_CLK_OUT1" }, + { CLKDEV_INIT(NULL, NULL, NULL), NULL } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { + &imx355_sd, + &imx355_sd2, + &imx319_sd, + &ak7375_sd, +#if IS_ENABLED(CONFIG_VIDEO_MAX9286) + &max9286_b_sd, +#endif + NULL, + }, + .clk_map = clk_mapping, +}; + +static void ipu4p_quirk(struct pci_dev *pci_dev) +{ + pr_info("Intel platform data PCI quirk for IPU4P CNL\n"); + pci_dev->dev.platform_data = &pdata; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, IPU_PCI_ID, ipu4p_quirk); + +MODULE_AUTHOR("Bingbu Cao "); +MODULE_AUTHOR("Qiu, Tianshu "); +MODULE_AUTHOR("Kun Jiang "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c index 46768c056193..0c28d0b995cc 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c @@ -115,3 +115,6 @@ struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev) return ctx; } EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Mediatek video codec driver"); diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 1a428fe9f070..9f023bc6e1b7 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1945,6 +1945,7 @@ static int isp_initialize_modules(struct isp_device *isp) static void isp_detach_iommu(struct isp_device *isp) { + arm_iommu_detach_device(isp->dev); arm_iommu_release_mapping(isp->mapping); isp->mapping = NULL; } @@ -1961,8 +1962,7 @@ static int isp_attach_iommu(struct isp_device *isp) mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); - ret = PTR_ERR(mapping); - goto error; + return PTR_ERR(mapping); } isp->mapping = mapping; @@ -1977,7 +1977,8 @@ static int isp_attach_iommu(struct isp_device *isp) return 0; error: - isp_detach_iommu(isp); + arm_iommu_release_mapping(isp->mapping); + isp->mapping = NULL; return ret; } diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c index b22d2dfcd3c2..55232a912950 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c @@ -622,6 +622,9 @@ static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + } else { + /* On current devices output->wm_num is always <= 2 */ + break; } if (output->wm_idx[i] % 2 == 1) diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index cba092bcb76d..a0fe80df0cbd 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -194,7 +194,6 @@ struct venus_buffer { * @fh: a holder of v4l file handle structure * @streamon_cap: stream on flag for capture queue * @streamon_out: stream on flag for output queue - * @cmd_stop: a flag to signal encoder/decoder commands * @width: current capture width * @height: current capture height * @out_width: current output width @@ -258,7 +257,6 @@ struct venus_inst { } controls; struct v4l2_fh fh; unsigned int streamon_cap, streamon_out; - bool cmd_stop; u32 width; u32 height; u32 out_width; diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 9b2a401a4891..0ce9559a2924 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) mutex_lock(&inst->lock); - if (inst->cmd_stop) { - vbuf->flags |= V4L2_BUF_FLAG_LAST; - v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); - inst->cmd_stop = false; - goto unlock; - } - v4l2_m2m_buf_queue(m2m_ctx, vbuf); if (!(inst->streamon_out & inst->streamon_cap)) diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c index c09490876516..ba29fd4d4984 100644 --- a/drivers/media/platform/qcom/venus/hfi.c +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -484,6 +484,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd) return -EINVAL; } +EXPORT_SYMBOL_GPL(hfi_session_process_buf); irqreturn_t hfi_isr_thread(int irq, void *dev_id) { diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index 1caae8feaa36..734ce11b0ed0 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc, desc->attrs = DMA_ATTR_WRITE_COMBINE; desc->size = ALIGN(size, SZ_4K); - desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL, + desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL, desc->attrs); if (!desc->kva) return -ENOMEM; @@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev) if (ret) return ret; - hdev->ifaceq_table.kva = desc.kva; - hdev->ifaceq_table.da = desc.da; - hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE; - offset = hdev->ifaceq_table.size; + hdev->ifaceq_table = desc; + offset = IFACEQ_TABLE_SIZE; for (i = 0; i < IFACEQ_NUM; i++) { queue = &hdev->queues[i]; @@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev) if (ret) { hdev->sfr.da = 0; } else { - hdev->sfr.da = desc.da; - hdev->sfr.kva = desc.kva; - hdev->sfr.size = ALIGNED_SFR_SIZE; + hdev->sfr = desc; sfr = hdev->sfr.kva; sfr->buf_size = ALIGNED_SFR_SIZE; } diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index da611a5eb670..c9e9576bb08a 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh, static int vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) { - if (cmd->cmd != V4L2_DEC_CMD_STOP) + switch (cmd->cmd) { + case V4L2_DEC_CMD_STOP: + if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK) + return -EINVAL; + break; + default: return -EINVAL; + } return 0; } @@ -479,6 +485,7 @@ static int vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) { struct venus_inst *inst = to_inst(file); + struct hfi_frame_data fdata = {0}; int ret; ret = vdec_try_decoder_cmd(file, fh, cmd); @@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) return ret; mutex_lock(&inst->lock); - inst->cmd_stop = true; - mutex_unlock(&inst->lock); - hfi_session_flush(inst); + /* + * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder + * input to signal EOS. + */ + if (!(inst->streamon_out & inst->streamon_cap)) + goto unlock; + + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.flags |= HFI_BUFFERFLAG_EOS; + fdata.device_addr = 0xdeadbeef; - return 0; + ret = hfi_session_process_buf(inst, &fdata); + +unlock: + mutex_unlock(&inst->lock); + return ret; } static const struct v4l2_ioctl_ops vdec_ioctl_ops = { @@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) inst->reconfig = false; inst->sequence_cap = 0; inst->sequence_out = 0; - inst->cmd_stop = false; ret = vdec_init_session(inst); if (ret) @@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type, vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; - if (inst->cmd_stop) { - vbuf->flags |= V4L2_BUF_FLAG_LAST; - inst->cmd_stop = false; - } - if (vbuf->flags & V4L2_BUF_FLAG_LAST) { const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 6f123a387cf9..3fcf0e9b7b29 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type, if (!vbuf) return; - vb = &vbuf->vb2_buf; - vb->planes[0].bytesused = bytesused; - vb->planes[0].data_offset = data_offset; - vbuf->flags = flags; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb = &vbuf->vb2_buf; + vb2_set_plane_payload(vb, 0, bytesused + data_offset); + vb->planes[0].data_offset = data_offset; vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; } else { diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c index 070bac36d766..2e2b8c409150 100644 --- a/drivers/media/platform/rcar_jpu.c +++ b/drivers/media/platform/rcar_jpu.c @@ -1280,7 +1280,7 @@ static int jpu_open(struct file *file) /* ...issue software reset */ ret = jpu_reset(jpu); if (ret) - goto device_prepare_rollback; + goto jpu_reset_rollback; } jpu->ref_count++; @@ -1288,6 +1288,8 @@ static int jpu_open(struct file *file) mutex_unlock(&jpu->mutex); return 0; +jpu_reset_rollback: + clk_disable_unprepare(jpu->clk); device_prepare_rollback: mutex_unlock(&jpu->mutex); v4l_prepare_rollback: diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index 25c7a7d42292..0f0324a14d51 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -1256,16 +1256,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif, { const struct s3c_camif_variant *variant = camif->variant; const struct vp_pix_limits *pix_lim; - int i = ARRAY_SIZE(camif_mbus_formats); + unsigned int i; /* FIXME: constraints against codec or preview path ? */ pix_lim = &variant->vp_pix_limits[VP_CODEC]; - while (i-- >= 0) + for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++) if (camif_mbus_formats[i] == mf->code) break; - mf->code = camif_mbus_formats[i]; + if (i == ARRAY_SIZE(camif_mbus_formats)) + mf->code = camif_mbus_formats[0]; if (pad == CAMIF_SD_PAD_SINK) { v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH, diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 1afde5021ca6..8e9531f7f83f 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1315,6 +1315,12 @@ static int s5p_mfc_probe(struct platform_device *pdev) goto err_dma; } + /* + * Load fails if fs isn't mounted. Try loading anyway. + * _open() will load it, it it fails now. Ignore failure. + */ + s5p_mfc_load_firmware(dev); + mutex_init(&dev->mfc_mutex); init_waitqueue_head(&dev->queue); dev->hw_lock = 0; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 4220914529b2..76119a8cc477 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -290,6 +290,8 @@ struct s5p_mfc_priv_buf { * @mfc_cmds: cmd structure holding HW commands function pointers * @mfc_regs: structure holding MFC registers * @fw_ver: loaded firmware sub-version + * @fw_get_done flag set when request_firmware() is complete and + * copied into fw_buf * risc_on: flag indicates RISC is on or off * */ @@ -336,6 +338,7 @@ struct s5p_mfc_dev { struct s5p_mfc_hw_cmds *mfc_cmds; const struct s5p_mfc_regs *mfc_regs; enum s5p_mfc_fw_ver fw_ver; + bool fw_get_done; bool risc_on; /* indicates if RISC is on or off */ }; diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index 69ef9c23a99a..d94e59e79fe9 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -55,6 +55,9 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev) * into kernel. */ mfc_debug_enter(); + if (dev->fw_get_done) + return 0; + for (i = MFC_FW_MAX_VERSIONS - 1; i >= 0; i--) { if (!dev->variant->fw_name[i]) continue; @@ -82,6 +85,7 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev) } memcpy(dev->fw_buf.virt, fw_blob->data, fw_blob->size); wmb(); + dev->fw_get_done = true; release_firmware(fw_blob); mfc_debug_leave(); return 0; @@ -93,6 +97,7 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev) /* Before calling this function one has to make sure * that MFC is no longer processing */ s5p_mfc_release_priv_buf(dev, &dev->fw_buf); + dev->fw_get_done = false; return 0; } diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c index 0116097c0c0f..092c73f24589 100644 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c +++ b/drivers/media/platform/soc_camera/soc_scale_crop.c @@ -419,3 +419,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd, mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); } EXPORT_SYMBOL(soc_camera_calc_client_output); + +MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 59280ac31937..23d0cedf4d9d 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei) static void channel_swdemux_tsklet(unsigned long data) { struct channel_info *channel = (struct channel_info *)data; - struct c8sectpfei *fei = channel->fei; + struct c8sectpfei *fei; unsigned long wp, rp; int pos, num_packets, n, size; u8 *buf; @@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data) if (unlikely(!channel || !channel->irec)) return; + fei = channel->fei; + wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0)); rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0)); diff --git a/drivers/media/platform/video-aggre-stub.c b/drivers/media/platform/video-aggre-stub.c new file mode 100644 index 000000000000..df2d4b188d0f --- /dev/null +++ b/drivers/media/platform/video-aggre-stub.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2016--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "video-aggre-stub.h" + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct video_aggre_csi_data_format va_csi_data_formats[] = { + { MEDIA_BUS_FMT_RGB888_1X24, 24, 24, PIXEL_ORDER_GBRG, 0x24 }, + { MEDIA_BUS_FMT_RGB565_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x22 }, + { MEDIA_BUS_FMT_YUYV8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, 0x1e }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, 0x2c }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, 0x2c }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, 0x2c }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, 0x2c }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, 0x2b }, + { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, PIXEL_ORDER_GRBG, 0x2b }, + { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, PIXEL_ORDER_RGGB, 0x2b }, + { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, PIXEL_ORDER_BGGR, 0x2b }, + { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, PIXEL_ORDER_GBRG, 0x2b }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, PIXEL_ORDER_GRBG, 0x2a }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, PIXEL_ORDER_RGGB, 0x2a }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, PIXEL_ORDER_BGGR, 0x2a }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, PIXEL_ORDER_GBRG, 0x2a }, +}; + +static const char * const pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" }; + +#define to_csi_format_idx(fmt) (((unsigned long)(fmt) \ + - (unsigned long)va_csi_data_formats) \ + / sizeof(*va_csi_data_formats)) + +static const uint32_t video_aggre_supported_codes_pad[] = { + MEDIA_BUS_FMT_RGB888_1X24, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_YUYV8_1X16, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + +static const uint32_t *video_aggre_supported_codes[] = { + video_aggre_supported_codes_pad, +}; + +static void __dump_v4l2_mbus_framefmt(struct v4l2_subdev *sd, + struct v4l2_mbus_framefmt *ffmt) +{ + dev_dbg(sd->dev, "framefmt: width: %d, height: %d, code: 0x%x.\n", + ffmt->width, ffmt->height, ffmt->code); +} +static void __dump_v4l2_subdev_format(struct v4l2_subdev *sd, + struct v4l2_subdev_format *fmt) +{ + dev_dbg(sd->dev, "subdev_format: which: %s, pad: %d, stream: %d.\n", + fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE ? + "V4L2_SUBDEV_FORMAT_ACTIVE" : "V4L2_SUBDEV_FORMAT_TRY", + fmt->pad, fmt->stream); + + __dump_v4l2_mbus_framefmt(sd, &fmt->format); +} + +static int video_aggre_get_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct video_aggregator *va = to_video_aggre(sd); + int i, j; + + for (i = 0, j = 0; i < min(va->nstreams, route->num_routes); ++i) { + route->routes[j].sink_pad = 0; + route->routes[j].sink_stream = 0; + route->routes[j].source_pad = VA_PAD_SOURCE; + route->routes[j].source_stream = i; + route->routes[j++].flags = va->flags[i]; + } + + route->num_routes = j; + + return 0; +} + +static int video_aggre_set_routing(struct v4l2_subdev *sd, + struct v4l2_subdev_routing *route) +{ + struct video_aggregator *va = to_video_aggre(sd); + int i, ret = 0; + + for (i = 0; i < min(route->num_routes, va->nstreams); ++i) { + struct v4l2_subdev_route *t = &route->routes[i]; + + if (t->sink_stream > va->nstreams - 1 || + t->source_stream > va->nstreams - 1) { + continue; + } + + if (t->source_pad != VA_PAD_SOURCE) + continue; + + if (t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE) + va->flags[t->source_stream] |= + V4L2_SUBDEV_ROUTE_FL_ACTIVE; + else if (!(t->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) + va->flags[t->source_stream] &= + (~V4L2_SUBDEV_ROUTE_FL_ACTIVE); + } + + return ret; +} + +static int video_aggre_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct video_aggregator *va = to_video_aggre(sd); + const uint32_t *supported_code = video_aggre_supported_codes[code->pad]; + bool next_stream = false; + int i; + + if (code->stream & V4L2_SUBDEV_FLAG_NEXT_STREAM) { + next_stream = true; + code->stream &= ~V4L2_SUBDEV_FLAG_NEXT_STREAM; + } + + if (code->stream > va->nstreams) + return -EINVAL; + + if (next_stream) { + if (code->stream < va->nstreams - 1) { + code->stream++; + return 0; + } else { + return -EINVAL; + } + } + + for (i = 0; supported_code[i]; i++) { + if (i == code->index) { + code->code = supported_code[i]; + return 0; + } + } + + return -EINVAL; +} + +static const struct video_aggre_csi_data_format + *video_aggre_validate_csi_data_format(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(va_csi_data_formats); i++) { + if (va_csi_data_formats[i].code == code) + return &va_csi_data_formats[i]; + } + + return &va_csi_data_formats[0]; +} + +static int video_aggre_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *desc) +{ + struct video_aggregator *va = to_video_aggre(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + u8 vc = 0; + int i; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + + for (i = 0; i < min_t(unsigned int, va->nstreams, desc->num_entries); + i++) { + struct v4l2_mbus_framefmt *ffmt = + &va->ffmts[i][VA_PAD_SOURCE]; + const struct video_aggre_csi_data_format *csi_format = + video_aggre_validate_csi_data_format(ffmt->code); + + entry->size.two_dim.width = ffmt->width; + entry->size.two_dim.height = ffmt->height; + entry->pixelcode = ffmt->code; + entry->bus.csi2.channel = vc++; + entry->bpp = csi_format->compressed; + entry++; + desc->num_entries++; + } + + return 0; +} + +static struct v4l2_mbus_framefmt * +__video_aggre_get_ffmt(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, unsigned int which, + unsigned int stream) +{ + struct video_aggregator *va = to_video_aggre(subdev); + + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(subdev, cfg, pad); + else + return &va->ffmts[stream][pad]; +} + +static int video_aggre_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct video_aggregator *va = to_video_aggre(subdev); + + if (fmt->stream > va->nstreams) + return -EINVAL; + + mutex_lock(&va->mutex); + fmt->format = *__video_aggre_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + mutex_unlock(&va->mutex); + + __dump_v4l2_subdev_format(subdev, fmt); + + return 0; +} + +static int video_aggre_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct video_aggregator *va = to_video_aggre(subdev); + const struct video_aggre_csi_data_format *csi_format; + struct v4l2_mbus_framefmt *ffmt; + + if (fmt->stream > va->nstreams) + return -EINVAL; + + csi_format = video_aggre_validate_csi_data_format(fmt->format.code); + + mutex_lock(&va->mutex); + ffmt = __video_aggre_get_ffmt(subdev, cfg, fmt->pad, fmt->which, + fmt->stream); + + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ffmt->width = fmt->format.width; + ffmt->height = fmt->format.height; + ffmt->code = csi_format->code; + } + fmt->format = *ffmt; + mutex_unlock(&va->mutex); + + __dump_v4l2_mbus_framefmt(subdev, ffmt); + + return 0; +} + +static int video_aggre_open(struct v4l2_subdev *subdev, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_TRY, + .pad = VA_PAD_SOURCE, + .format = { + .width = 1920, + .height = 1080, + .code = MEDIA_BUS_FMT_RGB888_1X24, + }, + .stream = 0, + }; + + *try_fmt = fmt.format; + + return 0; +} + +static int stub_set_stream(struct v4l2_subdev *subdev, int enable) +{ + struct video_aggregator *va = to_video_aggre(subdev); + + if (enable) { + dev_dbg(va->sd.dev, "Streaming is ON\n"); + va->streaming = 1; + } else { + dev_dbg(va->sd.dev, "Streaming is OFF\n"); + va->streaming = 0; + } + + return 0; +} + +static struct v4l2_subdev_internal_ops video_aggre_sd_internal_ops = { + .open = video_aggre_open, +}; + +static const struct v4l2_subdev_video_ops video_aggre_sd_video_ops = { + .s_stream = stub_set_stream, +}; + +static const struct v4l2_subdev_core_ops video_aggre_core_subdev_ops = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + .g_ctrl = v4l2_subdev_g_ctrl, + .s_ctrl = v4l2_subdev_s_ctrl, + .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, + .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, + .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, + .queryctrl = v4l2_subdev_queryctrl, +#endif +}; + +static int video_aggre_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops video_aggre_ctrl_ops = { + .s_ctrl = video_aggre_s_ctrl, +}; + +static const s64 video_aggre_op_sys_clock[] = {400000000, }; +static const struct v4l2_ctrl_config video_aggre_controls[] = { + { + .ops = &video_aggre_ctrl_ops, + .id = V4L2_CID_LINK_FREQ, + .name = "V4L2_CID_LINK_FREQ", + .type = V4L2_CTRL_TYPE_INTEGER_MENU, + .max = ARRAY_SIZE(video_aggre_op_sys_clock) - 1, + .min = 0, + .step = 1, + .def = 0, + .qmenu_int = video_aggre_op_sys_clock, + } +}; + +static const struct v4l2_subdev_pad_ops video_aggre_sd_pad_ops = { + .get_fmt = video_aggre_get_format, + .set_fmt = video_aggre_set_format, + .get_frame_desc = video_aggre_get_frame_desc, + .enum_mbus_code = video_aggre_enum_mbus_code, + .set_routing = video_aggre_set_routing, + .get_routing = video_aggre_get_routing, +}; + +static struct v4l2_subdev_ops video_aggre_sd_ops = { + .video = &video_aggre_sd_video_ops, + .pad = &video_aggre_sd_pad_ops, +}; + +static int video_aggre_register_subdev(struct video_aggregator *va) +{ + int rval; + const struct v4l2_ctrl_config *ctrl = video_aggre_controls; + struct i2c_client *client = v4l2_get_subdevdata(&va->sd); + + v4l2_subdev_init(&va->sd, &video_aggre_sd_ops); + snprintf(va->sd.name, sizeof(va->sd.name), "Video Aggregator 0x%x", + client->addr); + + va->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_SUBSTREAMS; + + va->sd.internal_ops = &video_aggre_sd_internal_ops; + + v4l2_set_subdevdata(&va->sd, va); + + v4l2_ctrl_handler_init(&va->ctrl_handler, + ARRAY_SIZE(video_aggre_controls)); + + if (va->ctrl_handler.error) { + dev_err(va->sd.dev, + "Failed to init video aggre controls. ERR: %d!\n", + va->ctrl_handler.error); + return va->ctrl_handler.error; + } + + va->sd.ctrl_handler = &va->ctrl_handler; + v4l2_ctrl_handler_setup(&va->ctrl_handler); + + va->link_freq = v4l2_ctrl_new_int_menu(&va->ctrl_handler, + &video_aggre_ctrl_ops, + ctrl->id, ctrl->max, + ctrl->def, ctrl->qmenu_int); + + va->pad.flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MULTIPLEX; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&va->sd.entity, 1, &va->pad, 0); +#else + rval = media_entity_pads_init(&va->sd.entity, 1, &va->pad); +#endif + + if (rval) { + dev_err(va->sd.dev, + "Failed to init media entity video aggre!\n"); + return rval; + } + + return 0; +} + +static int video_aggre_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct video_aggregator *va; + int i, rval = 0; + + va = devm_kzalloc(&client->dev, sizeof(*va), GFP_KERNEL); + if (!va) { + dev_err(&client->dev, "Failed to alloc va structure\n"); + return -ENOMEM; + } + + va->nsources = NR_OF_VA_SOURCE_PADS; + va->nsinks = NR_OF_VA_SINK_PADS; + va->npads = NR_OF_VA_PADS; + va->nstreams = NR_OF_VA_STREAMS; + + for (i = 0; i < va->nstreams; i++) { + va->ffmts[i] = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_mbus_framefmt), + GFP_KERNEL); + if (!va->ffmts[i]) + return -ENOMEM; + } + + va->crop = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->compose = devm_kcalloc(&client->dev, va->npads, + sizeof(struct v4l2_rect), GFP_KERNEL); + + va->flags = devm_kcalloc(&client->dev, va->nstreams, + sizeof(*va->flags), GFP_KERNEL); + + if (!va->crop || !va->compose || !va->flags) + return -ENOMEM; + + for (i = 0; i < va->nstreams; i++) + va->flags[i] = V4L2_SUBDEV_ROUTE_FL_SOURCE; + + mutex_init(&va->mutex); + v4l2_i2c_subdev_init(&va->sd, client, &video_aggre_sd_ops); + rval = video_aggre_register_subdev(va); + if (rval) { + dev_err(&client->dev, "Failed to register va subdevice!\n"); + return rval; + } + + return 0; +} + +static int video_aggre_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct video_aggregator *va = to_video_aggre(subdev); + + mutex_destroy(&va->mutex); + v4l2_device_unregister_subdev(&va->sd); + + return 0; +} + +static const struct i2c_device_id video_aggre_id_table[] = { + { VIDEO_AGGRE_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, video_aggre_id_table); + +static struct i2c_driver video_aggre_i2c_driver = { + .driver = { + .name = VIDEO_AGGRE_NAME, + }, + .probe = video_aggre_probe, + .remove = video_aggre_remove, + .id_table = video_aggre_id_table, +}; +module_i2c_driver(video_aggre_i2c_driver); + +MODULE_AUTHOR("Jianxu Zheng "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel Dummy CSI2-Aggregator driver"); diff --git a/drivers/media/platform/video-aggre-stub.h b/drivers/media/platform/video-aggre-stub.h new file mode 100644 index 000000000000..cc57ad2e5229 --- /dev/null +++ b/drivers/media/platform/video-aggre-stub.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2016--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef VIDEO_AGGRE_H +#define VIDEO_AGGRE_H + +#include +#include +#include + +#define VIDEO_AGGRE_NAME "video-aggre" + +#define PIXEL_ORDER_GRBG 0 +#define PIXEL_ORDER_RGGB 1 +#define PIXEL_ORDER_BGGR 2 +#define PIXEL_ORDER_GBRG 3 + +#define NR_OF_VA_STREAMS 4 +#define NR_OF_VA_SOURCE_PADS 1 +#define NR_OF_VA_SINK_PADS 0 +#define NR_OF_VA_PADS (NR_OF_VA_SOURCE_PADS + NR_OF_VA_SINK_PADS) + +#define VA_PAD_SOURCE 0 + +#define VIDEO_AGGRE_MIN_WIDTH 640 +#define VIDEO_AGGRE_MIN_HEIGHT 480 +#define VIDEO_AGGRE_MAX_WIDTH 1920 +#define VIDEO_AGGRE_MAX_HEIGHT 1080 + +struct video_aggre_csi_data_format { + u32 code; + u8 width; + u8 compressed; + u8 pixel_order; + u8 mipi_dt_code; +}; + +struct video_aggregator { + struct v4l2_subdev sd; + struct media_pad pad; + struct v4l2_ctrl_handler ctrl_handler; + const char *name; + + bool streaming; + struct mutex mutex; + + struct v4l2_mbus_framefmt *ffmts[NR_OF_VA_STREAMS]; + struct rect *crop; + struct rect *compose; + + unsigned int *flags; + + unsigned int nsinks; + unsigned int nsources; + unsigned int nstreams; + unsigned int npads; + + struct v4l2_ctrl *link_freq; +}; + +#define to_video_aggre(_sd) \ + container_of(_sd, struct video_aggregator, sd) +#endif diff --git a/drivers/media/platform/video-iris.c b/drivers/media/platform/video-iris.c new file mode 100644 index 000000000000..ff9ffcae4498 --- /dev/null +++ b/drivers/media/platform/video-iris.c @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2016--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include "video-iris.h" + +static int camera_iris_set_pwm(struct camera_iris *iris, int val) +{ + struct camera_pwm_info *pwm = &iris->camera_pwm; + struct device *pdev = &iris->pdev->dev; + int rval; + + if (val > CAMERA_MAX_DUTY) { + dev_err(pdev, "Invalid pwm duty(>1000)!\n"); + return -EINVAL; + } + + pwm->duty_period = CAMERA_PWM_PERIOD + - (CAMERA_PWM_PERIOD * val / CAMERA_MAX_DUTY); + + rval = pwm_config(pwm->pwm, pwm->duty_period, CAMERA_PWM_PERIOD); + if (rval) { + dev_err(pdev, "Failed to configure camera PWM!\n"); + return rval; + } + + if (val == 0) { + pwm_disable(pwm->pwm); + return 0; + } + + rval = pwm_enable(pwm->pwm); + if (rval) + dev_err(pdev, "Failed to enable camera PWM!\n"); + + return rval; +} + +static void camera_piris_pwm1_enable(struct camera_iris *iris, int enable) +{ + gpio_set_value(iris->pdata->gpio_pwm1, enable); +} + +static void camera_piris_pwm2_enable(struct camera_iris *iris, int enable) +{ + gpio_set_value(iris->pdata->gpio_pwm2, enable); +} + +static void camera_piris_forward(struct camera_iris *iris, int step) +{ + int i; + + if (step < 0 || step > CAMERA_PIRIS_MAX_STEP) + return; + + for (i = 0; i < step; i++) { + /* Phrase 7->5->3->1(close->open) */ + camera_piris_pwm1_enable(iris, 0); + camera_piris_pwm2_enable(iris, 0); + usleep_range(2000, 3000); + camera_piris_pwm1_enable(iris, 1); + camera_piris_pwm2_enable(iris, 0); + usleep_range(2000, 3000); + camera_piris_pwm1_enable(iris, 1); + camera_piris_pwm2_enable(iris, 1); + usleep_range(2000, 3000); + camera_piris_pwm1_enable(iris, 0); + camera_piris_pwm2_enable(iris, 1); + usleep_range(2000, 3000); + } +} + +static void camera_piris_reverse(struct camera_iris *iris, int step) +{ + int i; + + if (step < 0 || step > CAMERA_PIRIS_MAX_STEP) + return; + + for (i = 0; i < step; i++) { + /* Phrase 1->3->5->7(open->close) */ + camera_piris_pwm1_enable(iris, 0); + camera_piris_pwm2_enable(iris, 1); + usleep_range(2000, 3000); + camera_piris_pwm1_enable(iris, 1); + camera_piris_pwm2_enable(iris, 1); + usleep_range(2000, 3000); + camera_piris_pwm1_enable(iris, 1); + camera_piris_pwm2_enable(iris, 0); + usleep_range(2000, 3000); + camera_piris_pwm1_enable(iris, 0); + camera_piris_pwm2_enable(iris, 0); + usleep_range(2000, 3000); + } +} + +static int camera_iris_set_piris(struct camera_iris *iris, int step) +{ + /* Enable P-Iris first */ + gpio_set_value(iris->pdata->gpio_piris_en, 1); + if (iris->piris_step > step) + camera_piris_forward(iris, iris->piris_step - step); + else + camera_piris_reverse(iris, step - iris->piris_step); + + return 0; +} + +static int camera_iris_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct camera_iris *iris = container_of(ctrl->handler, + struct camera_iris, ctrl_handler); + int rval = 0; + + switch (ctrl->id) { + case V4L2_CID_IRIS_MODE: + dev_info(&iris->pdev->dev, "IRIS Mode(gpio %d): %s\n", + iris->pdata->gpio_iris_set, + ctrl->val == 0 ? "DC-Iris":"P-Iris"); + gpio_set_value(iris->pdata->gpio_iris_set, ctrl->val); + iris->iris_mode = ctrl->val; + rval = 0; + break; + case V4L2_CID_PWM_DUTY: + if (iris->iris_mode == DC_IRIS_MODE) { + rval = camera_iris_set_pwm(iris, ctrl->val); + if (rval) + dev_err(&iris->pdev->dev, + "Failed to set camera_pwm(%d)\n", rval); + return rval; + } + break; + case V4L2_CID_IRIS_STEP: + if (iris->iris_mode == P_IRIS_MODE) { + rval = camera_iris_set_piris(iris, ctrl->val); + if (rval) + dev_err(&iris->pdev->dev, + "Failed to set piris mode:%d(%d)\n", + ctrl->val, rval); + else + iris->piris_step = ctrl->val; + + return rval; + } + break; + default: + break; + } + + return rval; +} + +static const struct v4l2_ctrl_ops camera_iris_ctrl_ops = { + .s_ctrl = camera_iris_s_ctrl, +}; + +static const struct v4l2_ctrl_config camera_iris_controls[] = { + { + .ops = &camera_iris_ctrl_ops, + .id = V4L2_CID_IRIS_MODE, + .name = "V4L2_CID_IRIS_MODE", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = 1, + .min = 0, + .step = 1, + .def = 0, /* Default to be DC-IRIS mode */ + }, + { + .ops = &camera_iris_ctrl_ops, + .id = V4L2_CID_PWM_DUTY, + .name = "V4L2_CID_PWM_DUTY", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = CAMERA_MAX_DUTY, + .min = 0, + .step = 1, + .def = CAMERA_MAX_DUTY, + }, + { + .ops = &camera_iris_ctrl_ops, + .id = V4L2_CID_IRIS_STEP, + .name = "V4L2_CID_IRIS_STEP", + .type = V4L2_CTRL_TYPE_INTEGER, + .max = CAMERA_PIRIS_MAX_STEP, + .min = 0, + .step = 1, + .def = 0, /*Default full open */ + } +}; + +static const struct v4l2_subdev_core_ops camera_iris_core_subdev_ops = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) + .g_ctrl = v4l2_subdev_g_ctrl, + .s_ctrl = v4l2_subdev_s_ctrl, + .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, + .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, + .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, + .queryctrl = v4l2_subdev_queryctrl, +#endif +}; + +static const struct v4l2_subdev_ops camera_iris_subdev_ops = { + .core = &camera_iris_core_subdev_ops, +}; + +static int camera_iris_register_subdev(struct camera_iris *iris) +{ + int i, rval; + + v4l2_subdev_init(&iris->sd, &camera_iris_subdev_ops); + snprintf(iris->sd.name, sizeof(iris->sd.name), "camera-pwm"); + + iris->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + + v4l2_ctrl_handler_init(&iris->ctrl_handler, + ARRAY_SIZE(camera_iris_controls)); + + for (i = 0; i < ARRAY_SIZE(camera_iris_controls); i++) + v4l2_ctrl_new_custom(&iris->ctrl_handler, + &camera_iris_controls[i], + NULL); + + if (iris->ctrl_handler.error) { + dev_err(&iris->pdev->dev, + "Failed to init camera iris controls. ERR: %d!\n", + iris->ctrl_handler.error); + return iris->ctrl_handler.error; + } + + iris->sd.ctrl_handler = &iris->ctrl_handler; + v4l2_ctrl_handler_setup(&iris->ctrl_handler); + + rval = v4l2_device_register_subdev(&iris->v4l2_dev, &iris->sd); + if (rval) { + dev_err(&iris->pdev->dev, + "Failed to register camera iris subdevice!\n"); + return rval; + } + + rval = v4l2_device_register_subdev_nodes(&iris->v4l2_dev); + if (rval) { + dev_err(&iris->pdev->dev, + "Failed to create camera iris node!\n"); + return rval; + } + + return 0; +} + +static int camera_iris_probe(struct platform_device *pdev) +{ + struct camera_iris *iris; + struct camera_iris_platform_data *pdata = dev_get_platdata(&pdev->dev); + int rval; + + iris = devm_kzalloc(&pdev->dev, + sizeof(struct camera_iris), + GFP_KERNEL); + if (!iris) { + dev_err(&pdev->dev, "Failed to alloc iris structure\n"); + return -ENOMEM; + } + + iris->pdev = pdev; + iris->pdata = pdata; + rval = gpio_request_one(iris->pdata->gpio_iris_set, + GPIOF_INIT_LOW | GPIOF_DIR_OUT, "iris_set"); + if (rval < 0) { + dev_err(&pdev->dev, "Failed to request gpio(%d)\n", + iris->pdata->gpio_iris_set); + return -ENODEV; + } + + rval = gpio_request_one(iris->pdata->gpio_piris_en, + GPIOF_INIT_LOW | GPIOF_DIR_OUT, "piris_en"); + if (rval < 0) { + dev_err(&pdev->dev, "Failed to request gpio(%d)\n", + iris->pdata->gpio_piris_en); + gpio_free(iris->pdata->gpio_iris_set); + return -ENODEV; + } + + rval = gpio_request_one(iris->pdata->gpio_pwm1, + GPIOF_INIT_HIGH | GPIOF_DIR_OUT, "pwm1"); + if (rval < 0) { + dev_err(&pdev->dev, "Failed to request gpio(%d)\n", + iris->pdata->gpio_pwm1); + gpio_free(iris->pdata->gpio_iris_set); + gpio_free(iris->pdata->gpio_piris_en); + return -ENODEV; + } + + rval = gpio_request_one(iris->pdata->gpio_pwm2, + GPIOF_INIT_HIGH | GPIOF_DIR_OUT, "pwm2"); + if (rval < 0) { + dev_err(&pdev->dev, "Failed to request gpio(%d)\n", + iris->pdata->gpio_pwm2); + gpio_free(iris->pdata->gpio_iris_set); + gpio_free(iris->pdata->gpio_piris_en); + gpio_free(iris->pdata->gpio_pwm1); + return -ENODEV; + } + + iris->camera_pwm.pwm = pwm_request(0, CAMERA_IRIS_NAME); + if (IS_ERR(iris->camera_pwm.pwm)) { + dev_err(&pdev->dev, + "Request PWM for camera dc-iris failed!\n"); + rval = -ENODEV; + goto out_free; + } + + iris->camera_pwm.duty_period = CAMERA_PWM_PERIOD; + iris->piris_step = CAMERA_PIRIS_MAX_STEP; + + rval = v4l2_device_register(&pdev->dev, &iris->v4l2_dev); + if (rval) { + dev_err(&pdev->dev, + "Failed to register camera iris!\n"); + goto out_free; + } + + rval = camera_iris_register_subdev(iris); + if (rval) { + v4l2_device_unregister(&iris->v4l2_dev); + dev_err(&pdev->dev, "Failed to register iris subdevice!\n"); + goto out_free; + } + + return 0; + +out_free: + gpio_free(iris->pdata->gpio_iris_set); + gpio_free(iris->pdata->gpio_piris_en); + gpio_free(iris->pdata->gpio_pwm1); + gpio_free(iris->pdata->gpio_pwm2); + return rval; +} + +static int camera_iris_remove(struct platform_device *pdev) +{ + struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev); + struct camera_iris *iris = container_of(v4l2_dev, + struct camera_iris, v4l2_dev); + + pwm_free(iris->camera_pwm.pwm); + gpio_free(iris->pdata->gpio_iris_set); + gpio_free(iris->pdata->gpio_piris_en); + gpio_free(iris->pdata->gpio_pwm1); + gpio_free(iris->pdata->gpio_pwm2); + v4l2_device_unregister_subdev(&iris->sd); + v4l2_ctrl_handler_free(&iris->ctrl_handler); + v4l2_device_unregister(&iris->v4l2_dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int camera_iris_suspend(struct device *dev) +{ + struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); + struct camera_iris *iris = container_of(v4l2_dev, + struct camera_iris, v4l2_dev); + + pwm_disable(iris->camera_pwm.pwm); + return 0; +} + +static int camera_iris_resume(struct device *dev) +{ + struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); + struct camera_iris *iris = container_of(v4l2_dev, + struct camera_iris, v4l2_dev); + int rval; + + if (iris->camera_pwm.duty_period == CAMERA_PWM_PERIOD) + return 0; + + rval = pwm_config(iris->camera_pwm.pwm, + iris->camera_pwm.duty_period, + CAMERA_PWM_PERIOD); + if (rval) { + dev_err(dev, "Failed to configure camera PWM!\n"); + return rval; + } + + return pwm_enable(iris->camera_pwm.pwm); +} +#endif + +static const struct platform_device_id camera_iris_id_table[] = { + { CAMERA_IRIS_NAME, 0 }, + { }, +}; + +static SIMPLE_DEV_PM_OPS(camera_iris_pm_ops, + camera_iris_suspend, camera_iris_resume); + +MODULE_DEVICE_TABLE(platform, camera_iris_id_table); + +struct camera_iris_platform_data pdata = { + .gpio_iris_set = 449, /* GP_15 */ + .gpio_piris_en = 448, /* GP_14 */ + .gpio_pwm1 = 469, /* PWM1 */ + .gpio_pwm2 = 470, /* PWM2 */ +}; + +static struct platform_driver camera_iris_driver = { + .driver = { + .name = CAMERA_IRIS_NAME, + .pm = &camera_iris_pm_ops, + }, + .probe = camera_iris_probe, + .remove = camera_iris_remove, + .id_table = camera_iris_id_table, +}; + +static struct platform_device camera_iris_device = { + .name = CAMERA_IRIS_NAME, + .dev = { + .platform_data = &pdata, + }, + .id = -1, +}; + +static struct platform_device *devices[] __initdata = { + &camera_iris_device, +}; + +static int __init camera_iris_init(void) +{ + int rval; + + rval = platform_add_devices(devices, ARRAY_SIZE(devices)); + if (rval) + return rval; + + return platform_driver_register(&camera_iris_driver); +} + +static void __exit camera_iris_exit(void) +{ + platform_driver_unregister(&camera_iris_driver); +} + +module_init(camera_iris_init); +module_exit(camera_iris_exit); + +MODULE_AUTHOR("Shuguang Gong "); +MODULE_AUTHOR("Yunliang Ding "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel camera DC-IRIS/P-IRIS driver"); diff --git a/drivers/media/platform/video-iris.h b/drivers/media/platform/video-iris.h new file mode 100644 index 000000000000..927e9b57639a --- /dev/null +++ b/drivers/media/platform/video-iris.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef CAMERA_IRIS_H +#define CAMERA_IRIS_H + +#include +#include +#include +#include + +#define CAMERA_IRIS_NAME "camera-iris" + +#define CAMERA_PWM_PERIOD 200 /* nseconds, 5MHz */ +#define CAMERA_MAX_DUTY 1000 + +#define CAMERA_PIRIS_MAX_STEP 21 + +enum IRIS_MODE { + DC_IRIS_MODE = 0, + P_IRIS_MODE, +}; + +struct camera_pwm_info { + struct pwm_device *pwm; + unsigned int duty_period; /* duty */ +}; + +struct camera_iris_platform_data { + unsigned int gpio_iris_set; + unsigned int gpio_piris_en; + unsigned int gpio_pwm1; + unsigned int gpio_pwm2; +}; + +struct camera_iris { + struct platform_device *pdev; + struct camera_iris_platform_data *pdata; + struct v4l2_device v4l2_dev; + struct v4l2_subdev sd; + struct v4l2_ctrl_handler ctrl_handler; + struct camera_pwm_info camera_pwm; + int iris_mode; + int piris_step; /* 21 steps for piris in total */ + const char *name; +}; + +#endif + diff --git a/drivers/media/platform/video-sensor-stub-pdata.c b/drivers/media/platform/video-sensor-stub-pdata.c new file mode 100644 index 000000000000..86fa86e207c5 --- /dev/null +++ b/drivers/media/platform/video-sensor-stub-pdata.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2014--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include "video-sensor-stub.h" + +static struct ipu_isys_csi2_config stub_csi2_cfg[] = { + { + .nlanes = 4, + .port = 0, + }, + { + .nlanes = 1, + .port = 1, + }, +}; + +static struct ipu_isys_subdev_info stub_sd[] = { + { + .csi2 = &stub_csi2_cfg[0], + .i2c = { + .board_info = { + I2C_BOARD_INFO(SENSOR_STUB_NAME, 0x7C), + }, + .i2c_adapter_id = 0, + } + }, + { + .csi2 = &stub_csi2_cfg[1], + .i2c = { + .board_info = { + I2C_BOARD_INFO(SENSOR_STUB_NAME, 0x7E), + }, + .i2c_adapter_id = 0, + } + } +}; + +static struct ipu_isys_subdev_pdata pdata = { + .subdevs = (struct ipu_isys_subdev_info *[]) { + &stub_sd[0], + &stub_sd[1], + NULL, + }, +}; + +static void ipu_quirk(struct pci_dev *pci_dev) +{ + pr_info("Sensor Stub platform data PCI quirk hack\n"); + pci_dev->dev.platform_data = &pdata; +} + +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x5a88, ipu_quirk); diff --git a/drivers/media/platform/video-sensor-stub.c b/drivers/media/platform/video-sensor-stub.c new file mode 100644 index 000000000000..08195e74c0b6 --- /dev/null +++ b/drivers/media/platform/video-sensor-stub.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2013--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "video-sensor-stub.h" + + +/* + * Order matters. + * + * 1. Bits-per-pixel, descending. + * 2. Bits-per-pixel compressed, descending. + * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel + * orders must be defined. + */ +static const struct sensor_csi_data_format sensor_csi_data_formats[] = { + { MEDIA_BUS_FMT_UYVY8_1X16, 16, 16, PIXEL_ORDER_GBRG, }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, PIXEL_ORDER_GRBG, }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, PIXEL_ORDER_RGGB, }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, PIXEL_ORDER_BGGR, }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, PIXEL_ORDER_GBRG, }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, PIXEL_ORDER_GRBG, }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, PIXEL_ORDER_RGGB, }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, PIXEL_ORDER_BGGR, }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, PIXEL_ORDER_GBRG, }, + { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, PIXEL_ORDER_GRBG, }, + { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, PIXEL_ORDER_RGGB, }, + { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, PIXEL_ORDER_BGGR, }, + { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, PIXEL_ORDER_GBRG, }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, PIXEL_ORDER_GRBG, }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, PIXEL_ORDER_RGGB, }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, PIXEL_ORDER_BGGR, }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, PIXEL_ORDER_GBRG, }, +}; + +static const char * const pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" }; + +#define to_csi_format_idx(fmt) (((unsigned long)(fmt) \ + - (unsigned long)sensor_csi_data_formats) \ + / sizeof(*sensor_csi_data_formats)) + +static const uint32_t sensor_supported_codes_pad[] = { + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + 0, +}; + + +static const uint32_t * const sensor_supported_codes[] = { + sensor_supported_codes_pad, +}; + +static u32 sensor_pixel_order(struct stub_sensor *sensor) +{ + return sensor->default_pixel_order; +} + + +static int sensor_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *desc) +{ + struct stub_sensor *sensor = to_stub_sensor(sd); + struct v4l2_mbus_frame_desc_entry *entry = desc->entry; + + desc->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + entry->bpp = sensor->csi_format->compressed; + entry->pixelcode = sensor->csi_format->code; + entry->size.two_dim.width = sensor->width; + entry->size.two_dim.height = sensor->height; + entry->bus.csi2.data_type = sensor->mipi_data_type; + desc->num_entries = 1; + + return 0; +} + + +static u32 __stub_get_mbus_code(struct v4l2_subdev *subdev, unsigned int pad) +{ + struct stub_sensor *sensor = to_stub_sensor(subdev); + + return sensor->csi_format->code; +} + +static int __stub_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct stub_sensor *sensor = to_stub_sensor(subdev); + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + fmt->format = *v4l2_subdev_get_try_format(subdev, + cfg, fmt->pad); + } else { + fmt->format.code = __stub_get_mbus_code(subdev, fmt->pad); + fmt->format.width = sensor->width; + fmt->format.height = sensor->height; + fmt->format.field = V4L2_FIELD_NONE; + } + + return 0; +} + + +static int sensor_stub_get_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct stub_sensor *sensor = to_stub_sensor(subdev); + int rval; + + mutex_lock(&sensor->mutex); + rval = __stub_get_format(subdev, cfg, fmt); + mutex_unlock(&sensor->mutex); + return rval; +} +static int stub_get_mbus_formats(struct stub_sensor *sensor) +{ + const struct sensor_csi_data_format *f = &sensor_csi_data_formats[12]; + + sensor->default_pixel_order = PIXEL_ORDER_GRBG; + sensor->mbus_frame_fmts = ~0; + sensor->csi_format = f; + return 0; +} + + +static int stub_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh) +{ + struct stub_sensor *sensor = to_stub_sensor(subdev); + u32 mbus_code = + sensor_csi_data_formats[sensor_pixel_order(sensor)].code; + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(subdev, fh->pad, 0); + + try_fmt->width = 4096; + try_fmt->height = 3072; + try_fmt->code = mbus_code; + + return 0; +} + +static const struct sensor_csi_data_format +*stub_validate_csi_data_format(struct stub_sensor *sensor, u32 code) +{ + const struct sensor_csi_data_format *csi_format = sensor->csi_format; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sensor_csi_data_formats); i++) { + if (sensor->mbus_frame_fmts & (1 << i) + && sensor_csi_data_formats[i].code == code) + return &sensor_csi_data_formats[i]; + } + + return csi_format; +} + + +static int sensor_stub_set_format(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct stub_sensor *sensor = to_stub_sensor(subdev); + u32 code = fmt->format.code; + int rval; + + sensor->height = fmt->format.height; + sensor->width = fmt->format.width; + rval = __stub_get_format(subdev, cfg, fmt); + mutex_lock(&sensor->mutex); + if (!rval && subdev == &sensor->sd) { + const struct sensor_csi_data_format *csi_format = + stub_validate_csi_data_format(sensor, code); + if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + sensor->csi_format = csi_format; + } + fmt->format.code = csi_format->code; + } + + mutex_unlock(&sensor->mutex); + return rval; +} + +static int stub_set_stream(struct v4l2_subdev *subdev, int enable) +{ + + struct stub_sensor *sensor = to_stub_sensor(subdev); + struct i2c_client *client = v4l2_get_subdevdata(&sensor->sd); + + if (enable) { + dev_dbg(&client->dev, "Streaming is ON\n"); + sensor->streaming = 1; + } else { + dev_dbg(&client->dev, "Streaming is OFF\n"); + sensor->streaming = 0; + } + return 0; +} + +static struct v4l2_subdev_internal_ops sensor_sd_internal_ops = { + .open = stub_open, +}; + +static const struct v4l2_subdev_video_ops sensor_sd_video_ops = { + .s_stream = stub_set_stream, +}; + +static int stub_sensor_s_ctrl(struct v4l2_ctrl *ctrl) +{ + return 0; +} + +static const struct v4l2_ctrl_ops ipu_isys_sensor_ctrl_ops = { + .s_ctrl = stub_sensor_s_ctrl, +}; + +static const struct v4l2_subdev_pad_ops sensor_sd_pad_ops = { + .get_fmt = sensor_stub_get_format, + .set_fmt = sensor_stub_set_format, + .get_frame_desc = sensor_get_frame_desc, +}; + +static struct v4l2_subdev_ops sensor_ops = { + .video = &sensor_sd_video_ops, + .pad = &sensor_sd_pad_ops, +}; + +static const struct i2c_device_id sensor_stub_id_table[] = { + { SENSOR_STUB_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, sensor_stub_id_table); + +static int sensor_stub_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct stub_sensor *sensor; + int rval; + + sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); + if (!sensor) { + dev_err(&client->dev, "Failed to alloc Sensor structure\n"); + return -ENOMEM; + } + mutex_init(&sensor->mutex); + v4l2_i2c_subdev_init(&sensor->sd, client, &sensor_ops); + sensor->sd.internal_ops = &sensor_sd_internal_ops; + sensor->csi_format = &sensor_csi_data_formats[12]; + sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + sensor->pad.flags = MEDIA_PAD_FL_SOURCE; + sensor->streaming = false; + snprintf(sensor->sd.name, sizeof(sensor->sd.name), + "pixter %d-%4.4x", i2c_adapter_id(client->adapter), + client->addr); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) + rval = media_entity_init(&sensor->sd.entity, 1, &sensor->pad, 0); +#else + rval = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad); +#endif + if (rval) + return rval; + + return stub_get_mbus_formats(sensor); +} + +static int sensor_stub_remove(struct i2c_client *client) +{ + struct v4l2_subdev *subdev = i2c_get_clientdata(client); + struct stub_sensor *sensor = to_stub_sensor(subdev); + + v4l2_device_unregister_subdev(&sensor->sd); + media_entity_cleanup(&sensor->sd.entity); + + return 0; + +} + +static struct i2c_driver sensor_stub_i2c_driver = { + .driver = { + .name = SENSOR_STUB_NAME, + }, + .probe = sensor_stub_probe, + .remove = sensor_stub_remove, + .id_table = sensor_stub_id_table, +}; + +module_i2c_driver(sensor_stub_i2c_driver); + + +MODULE_AUTHOR("Jouni Ukkonen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel Dummy Sensor driver"); diff --git a/drivers/media/platform/video-sensor-stub.h b/drivers/media/platform/video-sensor-stub.h new file mode 100644 index 000000000000..7f2b0841e2d5 --- /dev/null +++ b/drivers/media/platform/video-sensor-stub.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013--2018 Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + */ + +#ifndef SENSOR_STUB_H +#define SENSOR_STUB_H + +#define SENSOR_STUB_NAME "ipu4-sensor-stub" + +#include +#include +#include +#include +#include + + +#define PIXEL_ORDER_GRBG 0 +#define PIXEL_ORDER_RGGB 1 +#define PIXEL_ORDER_BGGR 2 +#define PIXEL_ORDER_GBRG 3 + +struct sensor_csi_data_format { + u32 code; + u8 width; + u8 compressed; + u8 pixel_order; +}; + +struct stub_sensor { + struct v4l2_subdev sd; + struct media_pad pad; + bool streaming; + struct mutex mutex; + const struct sensor_csi_data_format *csi_format; + int height; + int width; + u32 mbus_frame_fmts; + u32 default_mbus_frame_fmts; + uint32_t mipi_data_type; + int default_pixel_order; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *link_freq; + int dev_init_done; + +}; + +#define to_stub_sensor(_sd) \ + container_of(_sd, struct stub_sensor, sd) +#endif diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index 34731f71cc00..8ca9343b6730 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -1191,6 +1191,7 @@ static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl) v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls); + dev->radio_rx_dev.device_caps = dev->radio_rx_caps; break; case V4L2_CID_RDS_RECEPTION: dev->radio_rx_rds_enabled = ctrl->val; @@ -1265,6 +1266,7 @@ static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl) dev->radio_tx_caps &= ~V4L2_CAP_READWRITE; if (!dev->radio_tx_rds_controls) dev->radio_tx_caps |= V4L2_CAP_READWRITE; + dev->radio_tx_dev.device_caps = dev->radio_tx_caps; break; case V4L2_CID_RDS_TX_PTY: if (dev->radio_rx_rds_controls) diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c index f0f423c7ca41..6f6d4df1e8a8 100644 --- a/drivers/media/platform/vivid/vivid-vid-common.c +++ b/drivers/media/platform/vivid/vivid-vid-common.c @@ -858,7 +858,8 @@ int vidioc_g_edid(struct file *file, void *_fh, return -EINVAL; if (edid->start_block + edid->blocks > dev->edid_blocks) edid->blocks = dev->edid_blocks - edid->start_block; - cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr); + if (adap) + cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr); memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128); return 0; } diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index 8b5cbb6b7a70..e5d8d99e124c 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -508,7 +508,8 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); else - return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR)); + return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) + & VI6_CMD_UPDHDR); } static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 4dfbeac8f42c..d3cd57f6ba52 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -504,6 +504,15 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index) struct vsp1_rwpf *rpf = vsp1->rpf[i]; unsigned int j; + /* + * Make sure we don't accept more inputs than the hardware can + * handle. This is a temporary fix to avoid display stall, we + * need to instead allocate the BRU or BRS to display pipelines + * dynamically based on the number of planes they each use. + */ + if (pipe->num_inputs >= pipe->bru->source_pad) + pipe->inputs[i] = NULL; + if (!pipe->inputs[i]) continue; diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 962e4c304076..eed9516e25e1 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -571,7 +571,13 @@ static int __maybe_unused vsp1_pm_suspend(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - vsp1_pipelines_suspend(vsp1); + /* + * When used as part of a display pipeline, the VSP is stopped and + * restarted explicitly by the DU. + */ + if (!vsp1->drm) + vsp1_pipelines_suspend(vsp1); + pm_runtime_force_suspend(vsp1->dev); return 0; @@ -582,7 +588,13 @@ static int __maybe_unused vsp1_pm_resume(struct device *dev) struct vsp1_device *vsp1 = dev_get_drvdata(dev); pm_runtime_force_resume(vsp1->dev); - vsp1_pipelines_resume(vsp1); + + /* + * When used as part of a display pipeline, the VSP is stopped and + * restarted explicitly by the DU. + */ + if (!vsp1->drm) + vsp1_pipelines_resume(vsp1); return 0; } diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index c2d3b8f0f487..247cb5be9167 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -585,7 +585,7 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe, if (ret) return ret; - media_graph_walk_start(&graph, entity); + media_graph_walk_start(&graph, &entity->pads[0]); while ((entity = media_graph_walk_next(&graph))) { struct v4l2_subdev *subdev; @@ -849,9 +849,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) return 0; } -static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) +static void vsp1_video_release_buffers(struct vsp1_video *video) { - struct vsp1_video *video = pipe->output->video; struct vsp1_vb2_buffer *buffer; unsigned long flags; @@ -861,12 +860,18 @@ static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&video->irqqueue); spin_unlock_irqrestore(&video->irqlock, flags); +} + +static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) +{ + lockdep_assert_held(&pipe->lock); /* Release our partition table allocation */ - mutex_lock(&pipe->lock); kfree(pipe->part_table); pipe->part_table = NULL; - mutex_unlock(&pipe->lock); + + vsp1_dl_list_put(pipe->dl); + pipe->dl = NULL; } static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) @@ -881,8 +886,9 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) if (pipe->stream_count == pipe->num_inputs) { ret = vsp1_video_setup_pipeline(pipe); if (ret < 0) { - mutex_unlock(&pipe->lock); + vsp1_video_release_buffers(video); vsp1_video_cleanup_pipeline(pipe); + mutex_unlock(&pipe->lock); return ret; } @@ -932,13 +938,12 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) if (ret == -ETIMEDOUT) dev_err(video->vsp1->dev, "pipeline stop timeout\n"); - vsp1_dl_list_put(pipe->dl); - pipe->dl = NULL; + vsp1_video_cleanup_pipeline(pipe); } mutex_unlock(&pipe->lock); media_pipeline_stop(&video->video.entity); - vsp1_video_cleanup_pipeline(pipe); + vsp1_video_release_buffers(video); vsp1_video_pipeline_put(pipe); } diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c index f7f3b4b2c2de..8bd6b2f1af15 100644 --- a/drivers/media/platform/vsp1/vsp1_wpf.c +++ b/drivers/media/platform/vsp1/vsp1_wpf.c @@ -452,7 +452,7 @@ static void wpf_configure(struct vsp1_entity *entity, : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index); } - if (pipe->bru || pipe->num_inputs > 1) + if (pipe->bru) srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU ? VI6_WPF_SRCRPF_VIRACT_MST : VI6_WPF_SRCRPF_VIRACT2_MST; diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c index 522cdfdd3345..cad9c24a787c 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.c +++ b/drivers/media/platform/xilinx/xilinx-dma.c @@ -193,7 +193,7 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe, return ret; } - media_graph_walk_start(&graph, entity); + media_graph_walk_start(&graph, &entity->pads[0]); while ((entity = media_graph_walk_next(&graph))) { struct xvip_dma *dma; diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index b3034f80163f..8ce6f9cff746 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -92,7 +92,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); */ int si470x_get_register(struct si470x_device *radio, int regnr) { - u16 buf[READ_REG_NUM]; + __be16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, @@ -117,7 +117,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr) int si470x_set_register(struct si470x_device *radio, int regnr) { int i; - u16 buf[WRITE_REG_NUM]; + __be16 buf[WRITE_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, @@ -147,7 +147,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr) static int si470x_get_all_registers(struct si470x_device *radio) { int i; - u16 buf[READ_REG_NUM]; + __be16 buf[READ_REG_NUM]; struct i2c_msg msgs[1] = { { .addr = radio->client->addr, diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 7b3f31cc63d2..0c46155a8e9d 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -2517,6 +2517,11 @@ static int imon_probe(struct usb_interface *interface, mutex_lock(&driver_lock); first_if = usb_ifnum_to_if(usbdev, 0); + if (!first_if) { + ret = -ENODEV; + goto fail; + } + first_if_ctx = usb_get_intfdata(first_if); if (ifnum == 0) { diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index d2223c04e9ad..4c8f456238bc 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->max_timeout) return -ENOTTY; + /* Check for multiply overflow */ + if (val > U32_MAX / 1000) + return -EINVAL; + tmp = val * 1000; - if (tmp < dev->min_timeout || - tmp > dev->max_timeout) - return -EINVAL; + if (tmp < dev->min_timeout || tmp > dev->max_timeout) + return -EINVAL; if (dev->s_timeout) ret = dev->s_timeout(dev, tmp); diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c index 7c572a643656..2a1728edb3c6 100644 --- a/drivers/media/rc/ir-mce_kbd-decoder.c +++ b/drivers/media/rc/ir-mce_kbd-decoder.c @@ -130,6 +130,8 @@ static void mce_kbd_rx_timeout(unsigned long data) for (i = 0; i < MCIR2_MASK_KEYS_START; i++) input_report_key(mce_kbd->idev, kbd_keycodes[i], 0); + + input_sync(mce_kbd->idev); } static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data) diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c index 817c18f2ddd1..a95d09acc22a 100644 --- a/drivers/media/rc/ir-nec-decoder.c +++ b/drivers/media/rc/ir-nec-decoder.c @@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) data->state = STATE_BIT_PULSE; return 0; } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) { - rc_repeat(dev); - IR_dprintk(1, "Repeat last key\n"); data->state = STATE_TRAILER_PULSE; return 0; } @@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev) if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2)) break; - address = bitrev8((data->bits >> 24) & 0xff); - not_address = bitrev8((data->bits >> 16) & 0xff); - command = bitrev8((data->bits >> 8) & 0xff); - not_command = bitrev8((data->bits >> 0) & 0xff); + if (data->count == NEC_NBITS) { + address = bitrev8((data->bits >> 24) & 0xff); + not_address = bitrev8((data->bits >> 16) & 0xff); + command = bitrev8((data->bits >> 8) & 0xff); + not_command = bitrev8((data->bits >> 0) & 0xff); + + scancode = ir_nec_bytes_to_scancode(address, + not_address, + command, + not_command, + &rc_proto); - scancode = ir_nec_bytes_to_scancode(address, not_address, - command, not_command, - &rc_proto); + if (data->is_nec_x) + data->necx_repeat = true; - if (data->is_nec_x) - data->necx_repeat = true; + rc_keydown(dev, rc_proto, scancode, 0); + } else { + rc_repeat(dev); + } - rc_keydown(dev, rc_proto, scancode, 0); data->state = STATE_INACTIVE; return 0; } diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 981cccd6b988..a22828713c1c 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -38,41 +38,41 @@ static const struct { [RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 }, [RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 }, [RC_PROTO_RC5] = { .name = "rc-5", - .scancode_bits = 0x1f7f, .repeat_period = 164 }, + .scancode_bits = 0x1f7f, .repeat_period = 250 }, [RC_PROTO_RC5X_20] = { .name = "rc-5x-20", - .scancode_bits = 0x1f7f3f, .repeat_period = 164 }, + .scancode_bits = 0x1f7f3f, .repeat_period = 250 }, [RC_PROTO_RC5_SZ] = { .name = "rc-5-sz", - .scancode_bits = 0x2fff, .repeat_period = 164 }, + .scancode_bits = 0x2fff, .repeat_period = 250 }, [RC_PROTO_JVC] = { .name = "jvc", .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_SONY12] = { .name = "sony-12", - .scancode_bits = 0x1f007f, .repeat_period = 100 }, + .scancode_bits = 0x1f007f, .repeat_period = 250 }, [RC_PROTO_SONY15] = { .name = "sony-15", - .scancode_bits = 0xff007f, .repeat_period = 100 }, + .scancode_bits = 0xff007f, .repeat_period = 250 }, [RC_PROTO_SONY20] = { .name = "sony-20", - .scancode_bits = 0x1fff7f, .repeat_period = 100 }, + .scancode_bits = 0x1fff7f, .repeat_period = 250 }, [RC_PROTO_NEC] = { .name = "nec", - .scancode_bits = 0xffff, .repeat_period = 160 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_NECX] = { .name = "nec-x", - .scancode_bits = 0xffffff, .repeat_period = 160 }, + .scancode_bits = 0xffffff, .repeat_period = 250 }, [RC_PROTO_NEC32] = { .name = "nec-32", - .scancode_bits = 0xffffffff, .repeat_period = 160 }, + .scancode_bits = 0xffffffff, .repeat_period = 250 }, [RC_PROTO_SANYO] = { .name = "sanyo", .scancode_bits = 0x1fffff, .repeat_period = 250 }, [RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd", - .scancode_bits = 0xffff, .repeat_period = 150 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse", - .scancode_bits = 0x1fffff, .repeat_period = 150 }, + .scancode_bits = 0x1fffff, .repeat_period = 250 }, [RC_PROTO_RC6_0] = { .name = "rc-6-0", - .scancode_bits = 0xffff, .repeat_period = 164 }, + .scancode_bits = 0xffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20", - .scancode_bits = 0xfffff, .repeat_period = 164 }, + .scancode_bits = 0xfffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24", - .scancode_bits = 0xffffff, .repeat_period = 164 }, + .scancode_bits = 0xffffff, .repeat_period = 250 }, [RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32", - .scancode_bits = 0xffffffff, .repeat_period = 164 }, + .scancode_bits = 0xffffffff, .repeat_period = 250 }, [RC_PROTO_RC6_MCE] = { .name = "rc-6-mce", - .scancode_bits = 0xffff7fff, .repeat_period = 164 }, + .scancode_bits = 0xffff7fff, .repeat_period = 250 }, [RC_PROTO_SHARP] = { .name = "sharp", .scancode_bits = 0x1fff, .repeat_period = 250 }, [RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 }, @@ -1824,11 +1824,11 @@ void rc_unregister_device(struct rc_dev *dev) if (!dev) return; - del_timer_sync(&dev->timer_keyup); - if (dev->driver_type == RC_DRIVER_IR_RAW) ir_raw_event_unregister(dev); + del_timer_sync(&dev->timer_keyup); + rc_free_rx_device(dev); device_del(&dev->dev); diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c index bc906fb128d5..d59918878eb2 100644 --- a/drivers/media/rc/sir_ir.c +++ b/drivers/media/rc/sir_ir.c @@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val); static irqreturn_t sir_interrupt(int irq, void *dev_id); static void send_space(unsigned long len); static void send_pulse(unsigned long len); -static void init_hardware(void); +static int init_hardware(void); static void drop_hardware(void); /* Initialisation */ @@ -263,11 +263,36 @@ static void send_pulse(unsigned long len) } } -static void init_hardware(void) +static int init_hardware(void) { + u8 scratch, scratch2, scratch3; unsigned long flags; spin_lock_irqsave(&hardware_lock, flags); + + /* + * This is a simple port existence test, borrowed from the autoconfig + * function in drivers/tty/serial/8250/8250_port.c + */ + scratch = sinp(UART_IER); + soutp(UART_IER, 0); +#ifdef __i386__ + outb(0xff, 0x080); +#endif + scratch2 = sinp(UART_IER) & 0x0f; + soutp(UART_IER, 0x0f); +#ifdef __i386__ + outb(0x00, 0x080); +#endif + scratch3 = sinp(UART_IER) & 0x0f; + soutp(UART_IER, scratch); + if (scratch2 != 0 || scratch3 != 0x0f) { + /* we fail, there's nothing here */ + spin_unlock_irqrestore(&hardware_lock, flags); + pr_err("port existence test failed, cannot continue\n"); + return -ENODEV; + } + /* reset UART */ outb(0, io + UART_MCR); outb(0, io + UART_IER); @@ -285,6 +310,8 @@ static void init_hardware(void) /* turn on UART */ outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR); spin_unlock_irqrestore(&hardware_lock, flags); + + return 0; } static void drop_hardware(void) @@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev) pr_err("IRQ %d already in use.\n", irq); return retval; } + + retval = init_hardware(); + if (retval) { + del_timer_sync(&timerlist); + return retval; + } + pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq); retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev); if (retval < 0) return retval; - init_hardware(); - return 0; } diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index ba80376a3b86..d097eb04a0e9 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -396,9 +396,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val, return 0; } -static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) +static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val) { - return r820t_write(priv, reg, &val, 1); + u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */ + + return r820t_write(priv, reg, &tmp, 1); } static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) @@ -411,17 +413,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg) return -EINVAL; } -static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, +static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val, u8 bit_mask) { + u8 tmp = val; int rc = r820t_read_cache_reg(priv, reg); if (rc < 0) return rc; - val = (rc & ~bit_mask) | (val & bit_mask); + tmp = (rc & ~bit_mask) | (tmp & bit_mask); - return r820t_write(priv, reg, &val, 1); + return r820t_write(priv, reg, &tmp, 1); } static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len) diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c index 5a28ce3a1d49..38dbc128340d 100644 --- a/drivers/media/usb/as102/as102_fw.c +++ b/drivers/media/usb/as102/as102_fw.c @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { - struct as10x_fw_pkt_t fw_pkt; + struct as10x_fw_pkt_t *fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL); + if (!fw_pkt) + return -ENOMEM; + + for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), - fw_pkt.raw.address, - fw_pkt.raw.data, + fw_pkt->raw.address, + fw_pkt->raw.data, &data_len, &addr_has_changed); @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x03; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, 2, 0); + fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ - fw_pkt.u.request[0] = 0x00; - fw_pkt.u.request[1] = 0x01; + fw_pkt->u.request[0] = 0x00; + fw_pkt->u.request[1] = 0x01; - data_len += sizeof(fw_pkt.u.request); - data_len += sizeof(fw_pkt.raw.address); + data_len += sizeof(fw_pkt->u.request); + data_len += sizeof(fw_pkt->raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) - &fw_pkt, + fw_pkt, data_len, 0); if (errno < 0) @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, } } error: + kfree(fw_pkt); return (errno == 0) ? total_read_bytes : errno; } diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 3dedd83f0b19..2094f9955a62 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -808,7 +808,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct camera_data *cam = video_drvdata(file); if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; @@ -859,7 +859,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; DBG("QBUF #%d\n", buf->index); @@ -948,7 +948,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) buf->sequence = cam->buffers[buf->index].seq; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; - buf->reserved2 = 0; + buf->request = 0; buf->reserved = 0; memset(&buf->timecode, 0, sizeof(buf->timecode)); diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index e0daa9b6c2a0..c30cb0fb165d 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -918,6 +918,9 @@ struct usb_device_id cx231xx_id_table[] = { .driver_info = CX231XX_BOARD_CNXT_RDE_250}, {USB_DEVICE(0x0572, 0x58A0), .driver_info = CX231XX_BOARD_CNXT_RDU_250}, + /* AverMedia DVD EZMaker 7 */ + {USB_DEVICE(0x07ca, 0xc039), + .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER}, {USB_DEVICE(0x2040, 0xb110), .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL}, {USB_DEVICE(0x2040, 0xb111), @@ -1684,7 +1687,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface, nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; - if (assoc_desc->bFirstInterface != ifnum) { + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 5e320fa4a795..be26c029546b 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -494,18 +494,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, static int lme2510_return_status(struct dvb_usb_device *d) { - int ret = 0; + int ret; u8 *data; - data = kzalloc(10, GFP_KERNEL); + data = kzalloc(6, GFP_KERNEL); if (!data) return -ENOMEM; - ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), - 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); - info("Firmware Status: %x (%x)", ret , data[2]); + ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), + 0x06, 0x80, 0x0302, 0x00, + data, 0x6, 200); + if (ret != 6) + ret = -EINVAL; + else + ret = data[2]; + + info("Firmware Status: %6ph", data); - ret = (ret < 0) ? -ENODEV : data[2]; kfree(data); return ret; } @@ -1071,8 +1076,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) if (adap->fe[0]) { info("FE Found M88RS2000"); - dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config, - &d->i2c_adap); st->i2c_tuner_gate_w = 5; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0x60; @@ -1138,17 +1141,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) ret = st->tuner_config; break; case TUNER_RS2000: - ret = st->tuner_config; + if (dvb_attach(ts2020_attach, adap->fe[0], + &ts2020_config, &d->i2c_adap)) + ret = st->tuner_config; break; default: break; } - if (ret) + if (ret) { info("TUN Found %s tuner", tun_msg[ret]); - else { - info("TUN No tuner found --- resetting device"); - lme_coldreset(d); + } else { + info("TUN No tuner found"); return -ENODEV; } @@ -1189,6 +1193,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d) static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) { struct lme2510_state *st = d->priv; + int status; usb_reset_configuration(d->udev); @@ -1197,12 +1202,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name) st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware; - if (lme2510_return_status(d) == 0x44) { + status = lme2510_return_status(d); + if (status == 0x44) { *name = lme_firmware_switch(d, 0); return COLD; } - return 0; + if (status != 0x47) + return -EINVAL; + + return WARM; } static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 37dea0adc695..cfe86b4864b3 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -677,6 +677,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component, case XC2028_RESET_CLK: deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg); break; + case XC2028_I2C_FLUSH: + break; default: deb_info("%s: unknown command %d, arg %d\n", __func__, command, arg); diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c index 6020170fe99a..9be1e658ef47 100644 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c @@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -430,6 +430,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component, state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); break; case XC2028_RESET_CLK: + case XC2028_I2C_FLUSH: break; default: err("%s: unknown command %d, arg %d\n", __func__, @@ -478,7 +479,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1010,7 +1011,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -1068,7 +1069,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3056,7 +3057,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); @@ -3109,7 +3110,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3139,7 +3140,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3214,7 +3215,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, @@ -3309,7 +3310,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } @@ -3384,7 +3385,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } @@ -3620,7 +3621,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ - dvb_detach(&state->dib7000p_ops); + dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index 8207e6900656..bcacb0f22028 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo); int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) { - u8 wbuf[1] = { offs }; - return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1); + u8 *buf; + int rc; + + buf = kmalloc(2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = offs; + + rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1); + *val = buf[1]; + kfree(buf); + + return rc; } EXPORT_SYMBOL(dibusb_read_eeprom_byte); diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 4c57fd7929cb..11a59854a0a6 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -508,8 +508,10 @@ static struct em28xx_reg_seq plex_px_bcud[] = { }; /* - * 2040:0265 Hauppauge WinTV-dualHD DVB - * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM + * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc + * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk + * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc + * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk * reg 0x80/0x84: * GPIO_0: Yellow LED tuner 1, 0=on, 1=off * GPIO_1: Green LED tuner 1, 0=on, 1=off @@ -2392,7 +2394,8 @@ struct em28xx_board em28xx_boards[] = { .has_dvb = 1, }, /* - * 2040:0265 Hauppauge WinTV-dualHD (DVB version). + * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc. + * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk. * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = { @@ -2407,7 +2410,8 @@ struct em28xx_board em28xx_boards[] = { .leds = hauppauge_dualhd_leds, }, /* - * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM). + * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc. + * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk. * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = { @@ -2548,8 +2552,12 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 }, { USB_DEVICE(0x2040, 0x0265), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, + { USB_DEVICE(0x2040, 0x8265), + .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, { USB_DEVICE(0x2040, 0x026d), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, + { USB_DEVICE(0x2040, 0x826d), + .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, { USB_DEVICE(0x0438, 0xb002), .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 }, { USB_DEVICE(0x2001, 0xf112), @@ -2610,7 +2618,11 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x025f), .driver_info = EM28178_BOARD_PCTV_292E }, - { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD */ + { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */ + .driver_info = EM28178_BOARD_PCTV_292E }, + { USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */ + .driver_info = EM28178_BOARD_PCTV_292E }, + { USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x0413, 0x6f07), .driver_info = EM2861_BOARD_LEADTEK_VC100 }, diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 88084f24f033..094e83b6908d 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -191,7 +191,7 @@ USB 2.0 spec says bulk packet size is always 512 bytes */ #define EM28XX_BULK_PACKET_MULTIPLIER 384 -#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384 +#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94 #define EM28XX_INTERLACED_DEFAULT 1 diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index dbe29c6c4d8b..1e8cbaf36896 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -292,7 +292,7 @@ static int hdpvr_probe(struct usb_interface *interface, /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); - goto error; + goto error_free_dev; } mutex_init(&dev->io_mutex); @@ -301,7 +301,7 @@ static int hdpvr_probe(struct usb_interface *interface, dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); - goto error; + goto error_v4l2_unregister; } init_waitqueue_head(&dev->wait_buffer); @@ -339,13 +339,13 @@ static int hdpvr_probe(struct usb_interface *interface, } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); - goto error; + goto error_put_usb; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); - goto error; + goto error_put_usb; } mutex_lock(&dev->io_mutex); @@ -353,7 +353,7 @@ static int hdpvr_probe(struct usb_interface *interface, mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); - goto error; + goto error_put_usb; } mutex_unlock(&dev->io_mutex); @@ -361,7 +361,7 @@ static int hdpvr_probe(struct usb_interface *interface, retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); - goto error; + goto error_free_buffers; } client = hdpvr_register_ir_rx_i2c(dev); @@ -394,13 +394,17 @@ static int hdpvr_probe(struct usb_interface *interface, reg_fail: #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); +error_free_buffers: #endif + hdpvr_free_buffers(dev); +error_put_usb: + usb_put_dev(dev->udev); + kfree(dev->usbc_buf); +error_v4l2_unregister: + v4l2_device_unregister(&dev->v4l2_dev); +error_free_dev: + kfree(dev); error: - if (dev) { - flush_work(&dev->worker); - /* this frees allocated memory */ - hdpvr_delete(dev); - } return retval; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index ad5b25b89699..44975061b953 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -3642,6 +3642,12 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw, hdw); hdw->ctl_write_urb->actual_length = 0; hdw->ctl_write_pend_flag = !0; + if (usb_urb_ep_type_check(hdw->ctl_write_urb)) { + pvr2_trace( + PVR2_TRACE_ERROR_LEGS, + "Invalid write control endpoint"); + return -EINVAL; + } status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL); if (status < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, @@ -3666,6 +3672,12 @@ status); hdw); hdw->ctl_read_urb->actual_length = 0; hdw->ctl_read_pend_flag = !0; + if (usb_urb_ep_type_check(hdw->ctl_read_urb)) { + pvr2_trace( + PVR2_TRACE_ERROR_LEGS, + "Invalid read control endpoint"); + return -EINVAL; + } status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL); if (status < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index f06f09a0876e..68df16b3ce72 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -112,6 +112,8 @@ static int usbtv_probe(struct usb_interface *intf, return 0; usbtv_audio_fail: + /* we must not free at this point */ + usb_get_dev(usbtv->udev); usbtv_video_free(usbtv); usbtv_video_fail: @@ -144,6 +146,7 @@ static void usbtv_disconnect(struct usb_interface *intf) static const struct usb_device_id usbtv_id_table[] = { { USB_DEVICE(0x1b71, 0x3002) }, + { USB_DEVICE(0x1f71, 0x3301) }, {} }; MODULE_DEVICE_TABLE(usb, usbtv_id_table); diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c index 95b5f4319ec2..3668a04359e8 100644 --- a/drivers/media/usb/usbtv/usbtv-video.c +++ b/drivers/media/usb/usbtv/usbtv-video.c @@ -718,8 +718,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl) */ if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) { ret = usb_control_msg(usbtv->udev, - usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, USBTV_BASE + 0x0244, (void *)data, 3, 0); if (ret < 0) goto error; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 6d22b22cb35b..28b91b7d756f 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2230,7 +2230,7 @@ static int uvc_reset_resume(struct usb_interface *intf) * Module parameters */ -static int uvc_clock_param_get(char *buffer, struct kernel_param *kp) +static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp) { if (uvc_clock_param == CLOCK_MONOTONIC) return sprintf(buffer, "CLOCK_MONOTONIC"); @@ -2238,7 +2238,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp) return sprintf(buffer, "CLOCK_REALTIME"); } -static int uvc_clock_param_set(const char *val, struct kernel_param *kp) +static int uvc_clock_param_set(const char *val, const struct kernel_param *kp) { if (strncasecmp(val, "clock_", strlen("clock_")) == 0) val += strlen("clock_"); diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 821f2aa299ae..f5ea97b8004d 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -18,8 +18,18 @@ #include #include #include +#include +#include #include +/* Use the same argument order as copy_in_user */ +#define assign_in_user(to, from) \ +({ \ + typeof(*from) __assign_tmp; \ + \ + get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \ +}) + static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret = -ENOIOCTLCMD; @@ -46,135 +56,77 @@ struct v4l2_window32 { __u8 global_alpha; }; -static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) -{ - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) || - copy_from_user(&kp->w, &up->w, sizeof(up->w)) || - get_user(kp->field, &up->field) || - get_user(kp->chromakey, &up->chromakey) || - get_user(kp->clipcount, &up->clipcount) || - get_user(kp->global_alpha, &up->global_alpha)) - return -EFAULT; - if (kp->clipcount > 2048) - return -EINVAL; - if (kp->clipcount) { - struct v4l2_clip32 __user *uclips; - struct v4l2_clip __user *kclips; - int n = kp->clipcount; - compat_caddr_t p; - - if (get_user(p, &up->clips)) - return -EFAULT; - uclips = compat_ptr(p); - kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip)); - kp->clips = kclips; - while (--n >= 0) { - if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) - return -EFAULT; - if (put_user(n ? kclips + 1 : NULL, &kclips->next)) - return -EFAULT; - uclips += 1; - kclips += 1; - } - } else - kp->clips = NULL; - return 0; -} - -static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up) -{ - if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) || - put_user(kp->field, &up->field) || - put_user(kp->chromakey, &up->chromakey) || - put_user(kp->clipcount, &up->clipcount) || - put_user(kp->global_alpha, &up->global_alpha)) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format))) - return -EFAULT; - return 0; -} - -static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp, - struct v4l2_pix_format_mplane __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane))) - return -EFAULT; - return 0; -} - -static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) +static int get_v4l2_window32(struct v4l2_window __user *kp, + struct v4l2_window32 __user *up, + void __user *aux_buf, u32 aux_space) { - if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format))) + struct v4l2_clip32 __user *uclips; + struct v4l2_clip __user *kclips; + compat_caddr_t p; + u32 clipcount; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + copy_in_user(&kp->w, &up->w, sizeof(up->w)) || + assign_in_user(&kp->field, &up->field) || + assign_in_user(&kp->chromakey, &up->chromakey) || + assign_in_user(&kp->global_alpha, &up->global_alpha) || + get_user(clipcount, &up->clipcount) || + put_user(clipcount, &kp->clipcount)) return -EFAULT; - return 0; -} + if (clipcount > 2048) + return -EINVAL; + if (!clipcount) + return put_user(NULL, &kp->clips); -static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format))) + if (get_user(p, &up->clips)) return -EFAULT; - return 0; -} - -static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format))) + uclips = compat_ptr(p); + if (aux_space < clipcount * sizeof(*kclips)) return -EFAULT; - return 0; -} - -static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format))) + kclips = aux_buf; + if (put_user(kclips, &kp->clips)) return -EFAULT; - return 0; -} -static inline int get_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_sdr_format))) - return -EFAULT; + while (clipcount--) { + if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c))) + return -EFAULT; + if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next)) + return -EFAULT; + uclips++; + kclips++; + } return 0; } -static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sdr_format __user *up) +static int put_v4l2_window32(struct v4l2_window __user *kp, + struct v4l2_window32 __user *up) { - if (copy_to_user(up, kp, sizeof(struct v4l2_sdr_format))) + struct v4l2_clip __user *kclips; + struct v4l2_clip32 __user *uclips; + compat_caddr_t p; + u32 clipcount; + + if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) || + assign_in_user(&up->field, &kp->field) || + assign_in_user(&up->chromakey, &kp->chromakey) || + assign_in_user(&up->global_alpha, &kp->global_alpha) || + get_user(clipcount, &kp->clipcount) || + put_user(clipcount, &up->clipcount)) return -EFAULT; - return 0; -} + if (!clipcount) + return 0; -static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) -{ - if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format))) + if (get_user(kclips, &kp->clips)) return -EFAULT; - return 0; -} - -static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up) -{ - if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format))) + if (get_user(p, &up->clips)) return -EFAULT; + uclips = compat_ptr(p); + while (clipcount--) { + if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c))) + return -EFAULT; + uclips++; + kclips++; + } return 0; } @@ -209,101 +161,164 @@ struct v4l2_create_buffers32 { __u32 reserved[8]; }; -static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) +{ + u32 type; + + if (get_user(type, &up->type)) + return -EFAULT; + + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OVERLAY: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: { + u32 clipcount; + + if (get_user(clipcount, &up->fmt.win.clipcount)) + return -EFAULT; + if (clipcount > 2048) + return -EINVAL; + *size = clipcount * sizeof(struct v4l2_clip); + return 0; + } + default: + *size = 0; + return 0; + } +} + +static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size) { - if (get_user(kp->type, &up->type)) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; + return __bufsize_v4l2_format(up, size); +} + +static int __get_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up, + void __user *aux_buf, u32 aux_space) +{ + u32 type; - switch (kp->type) { + if (get_user(type, &up->type) || put_user(type, &kp->type)) + return -EFAULT; + + switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); + return copy_in_user(&kp->fmt.pix, &up->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp, - &up->fmt.pix_mp); + return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - return get_v4l2_window32(&kp->fmt.win, &up->fmt.win); + return get_v4l2_window32(&kp->fmt.win, &up->fmt.win, + aux_buf, aux_space); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); + return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); + return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); + return copy_in_user(&kp->fmt.sdr, &up->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); + return copy_in_user(&kp->fmt.meta, &up->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); return -EINVAL; } } -static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int get_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up, + void __user *aux_buf, u32 aux_space) +{ + if (!access_ok(VERIFY_READ, up, sizeof(*up))) + return -EFAULT; + return __get_v4l2_format32(kp, up, aux_buf, aux_space); +} + +static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up, + u32 *size) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; - return __get_v4l2_format32(kp, up); + return __bufsize_v4l2_format(&up->format, size); } -static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) +static int get_v4l2_create32(struct v4l2_create_buffers __user *kp, + struct v4l2_create_buffers32 __user *up, + void __user *aux_buf, u32 aux_space) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + copy_in_user(kp, up, + offsetof(struct v4l2_create_buffers32, format))) return -EFAULT; - return __get_v4l2_format32(&kp->format, &up->format); + return __get_v4l2_format32(&kp->format, &up->format, + aux_buf, aux_space); } -static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int __put_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up) { - if (put_user(kp->type, &up->type)) + u32 type; + + if (get_user(type, &kp->type)) return -EFAULT; - switch (kp->type) { + switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: - return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix); + return copy_in_user(&up->fmt.pix, &kp->fmt.pix, + sizeof(kp->fmt.pix)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp, - &up->fmt.pix_mp); + return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp, + sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0; case V4L2_BUF_TYPE_VIDEO_OVERLAY: case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: return put_v4l2_window32(&kp->fmt.win, &up->fmt.win); case V4L2_BUF_TYPE_VBI_CAPTURE: case V4L2_BUF_TYPE_VBI_OUTPUT: - return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi); + return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi, + sizeof(kp->fmt.vbi)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced); + return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced, + sizeof(kp->fmt.sliced)) ? -EFAULT : 0; case V4L2_BUF_TYPE_SDR_CAPTURE: case V4L2_BUF_TYPE_SDR_OUTPUT: - return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr); + return copy_in_user(&up->fmt.sdr, &kp->fmt.sdr, + sizeof(kp->fmt.sdr)) ? -EFAULT : 0; case V4L2_BUF_TYPE_META_CAPTURE: - return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta); + return copy_in_user(&up->fmt.meta, &kp->fmt.meta, + sizeof(kp->fmt.meta)) ? -EFAULT : 0; default: - pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n", - kp->type); return -EINVAL; } } -static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) +static int put_v4l2_format32(struct v4l2_format __user *kp, + struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; return __put_v4l2_format32(kp, up); } -static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) +static int put_v4l2_create32(struct v4l2_create_buffers __user *kp, + struct v4l2_create_buffers32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) || - copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) || - copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + copy_in_user(up, kp, + offsetof(struct v4l2_create_buffers32, format)) || + copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved))) return -EFAULT; return __put_v4l2_format32(&kp->format, &up->format); } @@ -317,25 +332,28 @@ struct v4l2_standard32 { __u32 reserved[4]; }; -static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) +static int get_v4l2_standard32(struct v4l2_standard __user *kp, + struct v4l2_standard32 __user *up) { /* other fields are not set by the user, nor used by the driver */ - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) || - get_user(kp->index, &up->index)) + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->index, &up->index)) return -EFAULT; return 0; } -static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up) +static int put_v4l2_standard32(struct v4l2_standard __user *kp, + struct v4l2_standard32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) || - put_user(kp->index, &up->index) || - put_user(kp->id, &up->id) || - copy_to_user(up->name, kp->name, 24) || - copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) || - put_user(kp->framelines, &up->framelines) || - copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32))) - return -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->index, &kp->index) || + assign_in_user(&up->id, &kp->id) || + copy_in_user(up->name, kp->name, sizeof(up->name)) || + copy_in_user(&up->frameperiod, &kp->frameperiod, + sizeof(up->frameperiod)) || + assign_in_user(&up->framelines, &kp->framelines) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; return 0; } @@ -370,140 +388,193 @@ struct v4l2_buffer32 { __s32 fd; } m; __u32 length; - __u32 reserved2; + __u32 request; __u32 reserved; }; -static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, - enum v4l2_memory memory) +static int get_v4l2_plane32(struct v4l2_plane __user *up, + struct v4l2_plane32 __user *up32, + enum v4l2_memory memory) { - void __user *up_pln; - compat_long_t p; + compat_ulong_t p; if (copy_in_user(up, up32, 2 * sizeof(__u32)) || - copy_in_user(&up->data_offset, &up32->data_offset, - sizeof(__u32))) + copy_in_user(&up->data_offset, &up32->data_offset, + sizeof(up->data_offset))) return -EFAULT; - if (memory == V4L2_MEMORY_USERPTR) { - if (get_user(p, &up32->m.userptr)) - return -EFAULT; - up_pln = compat_ptr(p); - if (put_user((unsigned long)up_pln, &up->m.userptr)) + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: + if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, + sizeof(up32->m.mem_offset))) return -EFAULT; - } else if (memory == V4L2_MEMORY_DMABUF) { - if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int))) + break; + case V4L2_MEMORY_USERPTR: + if (get_user(p, &up32->m.userptr) || + put_user((unsigned long)compat_ptr(p), &up->m.userptr)) return -EFAULT; - } else { - if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset, - sizeof(__u32))) + break; + case V4L2_MEMORY_DMABUF: + if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd))) return -EFAULT; + break; } return 0; } -static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32, - enum v4l2_memory memory) +static int put_v4l2_plane32(struct v4l2_plane __user *up, + struct v4l2_plane32 __user *up32, + enum v4l2_memory memory) { + unsigned long p; + if (copy_in_user(up32, up, 2 * sizeof(__u32)) || - copy_in_user(&up32->data_offset, &up->data_offset, - sizeof(__u32))) + copy_in_user(&up32->data_offset, &up->data_offset, + sizeof(up->data_offset))) return -EFAULT; - /* For MMAP, driver might've set up the offset, so copy it back. - * USERPTR stays the same (was userspace-provided), so no copying. */ - if (memory == V4L2_MEMORY_MMAP) + switch (memory) { + case V4L2_MEMORY_MMAP: + case V4L2_MEMORY_OVERLAY: if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset, - sizeof(__u32))) + sizeof(up->m.mem_offset))) + return -EFAULT; + break; + case V4L2_MEMORY_USERPTR: + if (get_user(p, &up->m.userptr) || + put_user((compat_ulong_t)ptr_to_compat((__force void *)p), + &up32->m.userptr)) return -EFAULT; - /* For DMABUF, driver might've set up the fd, so copy it back. */ - if (memory == V4L2_MEMORY_DMABUF) - if (copy_in_user(&up32->m.fd, &up->m.fd, - sizeof(int))) + break; + case V4L2_MEMORY_DMABUF: + if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd))) return -EFAULT; + break; + } return 0; } -static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) +static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size) { + u32 type; + u32 length; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(type, &up->type) || + get_user(length, &up->length)) + return -EFAULT; + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + if (length > VIDEO_MAX_PLANES) + return -EINVAL; + + /* + * We don't really care if userspace decides to kill itself + * by passing a very big length value + */ + *size = length * sizeof(struct v4l2_plane); + } else { + *size = 0; + } + return 0; +} + +static int get_v4l2_buffer32(struct v4l2_buffer __user *kp, + struct v4l2_buffer32 __user *up, + void __user *aux_buf, u32 aux_space) +{ + u32 type; + u32 length; + u32 request; + enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; compat_caddr_t p; int ret; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) || - get_user(kp->index, &up->index) || - get_user(kp->type, &up->type) || - get_user(kp->flags, &up->flags) || - get_user(kp->memory, &up->memory) || - get_user(kp->length, &up->length)) - return -EFAULT; + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->index, &up->index) || + get_user(type, &up->type) || + put_user(type, &kp->type) || + assign_in_user(&kp->flags, &up->flags) || + get_user(memory, &up->memory) || + put_user(memory, &kp->memory) || + get_user(length, &up->length) || + put_user(length, &kp->length) || + get_user(request, &up->request) || + put_user(request, &kp->request) ) + return -EFAULT; - if (V4L2_TYPE_IS_OUTPUT(kp->type)) - if (get_user(kp->bytesused, &up->bytesused) || - get_user(kp->field, &up->field) || - get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - get_user(kp->timestamp.tv_usec, - &up->timestamp.tv_usec)) + if (V4L2_TYPE_IS_OUTPUT(type)) + if (assign_in_user(&kp->bytesused, &up->bytesused) || + assign_in_user(&kp->field, &up->field) || + assign_in_user(&kp->timestamp.tv_sec, + &up->timestamp.tv_sec) || + assign_in_user(&kp->timestamp.tv_usec, + &up->timestamp.tv_usec)) return -EFAULT; - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - unsigned int num_planes; + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + u32 num_planes = length; - if (kp->length == 0) { - kp->m.planes = NULL; - /* num_planes == 0 is legal, e.g. when userspace doesn't - * need planes array on DQBUF*/ - return 0; - } else if (kp->length > VIDEO_MAX_PLANES) { - return -EINVAL; + if (num_planes == 0) { + /* + * num_planes == 0 is legal, e.g. when userspace doesn't + * need planes array on DQBUF + */ + return put_user(NULL, &kp->m.planes); } + if (num_planes > VIDEO_MAX_PLANES) + return -EINVAL; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); if (!access_ok(VERIFY_READ, uplane32, - kp->length * sizeof(struct v4l2_plane32))) + num_planes * sizeof(*uplane32))) return -EFAULT; - /* We don't really care if userspace decides to kill itself - * by passing a very big num_planes value */ - uplane = compat_alloc_user_space(kp->length * - sizeof(struct v4l2_plane)); - kp->m.planes = (__force struct v4l2_plane *)uplane; + /* + * We don't really care if userspace decides to kill itself + * by passing a very big num_planes value + */ + if (aux_space < num_planes * sizeof(*uplane)) + return -EFAULT; + + uplane = aux_buf; + if (put_user((__force struct v4l2_plane *)uplane, + &kp->m.planes)) + return -EFAULT; - for (num_planes = 0; num_planes < kp->length; num_planes++) { - ret = get_v4l2_plane32(uplane, uplane32, kp->memory); + while (num_planes--) { + ret = get_v4l2_plane32(uplane, uplane32, memory); if (ret) return ret; - ++uplane; - ++uplane32; + uplane++; + uplane32++; } } else { - switch (kp->memory) { + switch (memory) { case V4L2_MEMORY_MMAP: - if (get_user(kp->m.offset, &up->m.offset)) + case V4L2_MEMORY_OVERLAY: + if (assign_in_user(&kp->m.offset, &up->m.offset)) return -EFAULT; break; - case V4L2_MEMORY_USERPTR: - { - compat_long_t tmp; + case V4L2_MEMORY_USERPTR: { + compat_ulong_t userptr; - if (get_user(tmp, &up->m.userptr)) - return -EFAULT; - - kp->m.userptr = (unsigned long)compat_ptr(tmp); - } - break; - case V4L2_MEMORY_OVERLAY: - if (get_user(kp->m.offset, &up->m.offset)) + if (get_user(userptr, &up->m.userptr) || + put_user((unsigned long)compat_ptr(userptr), + &kp->m.userptr)) return -EFAULT; break; + } case V4L2_MEMORY_DMABUF: - if (get_user(kp->m.fd, &up->m.fd)) + if (assign_in_user(&kp->m.fd, &up->m.fd)) return -EFAULT; break; } @@ -512,65 +583,73 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user return 0; } -static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up) +static int put_v4l2_buffer32(struct v4l2_buffer __user *kp, + struct v4l2_buffer32 __user *up) { + u32 type; + u32 length; + u32 request; + enum v4l2_memory memory; struct v4l2_plane32 __user *uplane32; struct v4l2_plane __user *uplane; compat_caddr_t p; - int num_planes; int ret; - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) || - put_user(kp->index, &up->index) || - put_user(kp->type, &up->type) || - put_user(kp->flags, &up->flags) || - put_user(kp->memory, &up->memory)) - return -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->index, &kp->index) || + get_user(type, &kp->type) || + put_user(type, &up->type) || + assign_in_user(&up->flags, &kp->flags) || + get_user(memory, &kp->memory) || + put_user(memory, &up->memory)) + return -EFAULT; - if (put_user(kp->bytesused, &up->bytesused) || - put_user(kp->field, &up->field) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) || - copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->reserved2, &up->reserved2) || - put_user(kp->reserved, &up->reserved) || - put_user(kp->length, &up->length)) - return -EFAULT; + if (assign_in_user(&up->bytesused, &kp->bytesused) || + assign_in_user(&up->field, &kp->field) || + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || + assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) || + copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) || + assign_in_user(&up->sequence, &kp->sequence) || + assign_in_user(&up->request, &kp->request) || + assign_in_user(&up->reserved, &kp->reserved) || + get_user(length, &kp->length) || + put_user(length, &up->length) || + get_user(request, &kp->request) || + put_user(request, &up->request)) + return -EFAULT; + + if (V4L2_TYPE_IS_MULTIPLANAR(type)) { + u32 num_planes = length; - if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) { - num_planes = kp->length; if (num_planes == 0) return 0; - uplane = (__force struct v4l2_plane __user *)kp->m.planes; + if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes))) + return -EFAULT; if (get_user(p, &up->m.planes)) return -EFAULT; uplane32 = compat_ptr(p); - while (--num_planes >= 0) { - ret = put_v4l2_plane32(uplane, uplane32, kp->memory); + while (num_planes--) { + ret = put_v4l2_plane32(uplane, uplane32, memory); if (ret) return ret; ++uplane; ++uplane32; } } else { - switch (kp->memory) { + switch (memory) { case V4L2_MEMORY_MMAP: - if (put_user(kp->m.offset, &up->m.offset)) + case V4L2_MEMORY_OVERLAY: + if (assign_in_user(&up->m.offset, &kp->m.offset)) return -EFAULT; break; case V4L2_MEMORY_USERPTR: - if (put_user(kp->m.userptr, &up->m.userptr)) - return -EFAULT; - break; - case V4L2_MEMORY_OVERLAY: - if (put_user(kp->m.offset, &up->m.offset)) + if (assign_in_user(&up->m.userptr, &kp->m.userptr)) return -EFAULT; break; case V4L2_MEMORY_DMABUF: - if (put_user(kp->m.fd, &up->m.fd)) + if (assign_in_user(&up->m.fd, &kp->m.fd)) return -EFAULT; break; } @@ -595,30 +674,33 @@ struct v4l2_framebuffer32 { } fmt; }; -static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) +static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, + struct v4l2_framebuffer32 __user *up) { - u32 tmp; - - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) || - get_user(tmp, &up->base) || - get_user(kp->capability, &up->capability) || - get_user(kp->flags, &up->flags) || - copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt))) - return -EFAULT; - kp->base = (__force void *)compat_ptr(tmp); + compat_caddr_t tmp; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(tmp, &up->base) || + put_user((__force void *)compat_ptr(tmp), &kp->base) || + assign_in_user(&kp->capability, &up->capability) || + assign_in_user(&kp->flags, &up->flags) || + copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt))) + return -EFAULT; return 0; } -static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up) +static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp, + struct v4l2_framebuffer32 __user *up) { - u32 tmp = (u32)((unsigned long)kp->base); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) || - put_user(tmp, &up->base) || - put_user(kp->capability, &up->capability) || - put_user(kp->flags, &up->flags) || - copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt))) - return -EFAULT; + void *base; + + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + get_user(base, &kp->base) || + put_user(ptr_to_compat(base), &up->base) || + assign_in_user(&up->capability, &kp->capability) || + assign_in_user(&up->flags, &kp->flags) || + copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt))) + return -EFAULT; return 0; } @@ -634,18 +716,22 @@ struct v4l2_input32 { __u32 reserved[3]; }; -/* The 64-bit v4l2_input struct has extra padding at the end of the struct. - Otherwise it is identical to the 32-bit version. */ -static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) +/* + * The 64-bit v4l2_input struct has extra padding at the end of the struct. + * Otherwise it is identical to the 32-bit version. + */ +static inline int get_v4l2_input32(struct v4l2_input __user *kp, + struct v4l2_input32 __user *up) { - if (copy_from_user(kp, up, sizeof(struct v4l2_input32))) + if (copy_in_user(kp, up, sizeof(*up))) return -EFAULT; return 0; } -static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up) +static inline int put_v4l2_input32(struct v4l2_input __user *kp, + struct v4l2_input32 __user *up) { - if (copy_to_user(up, kp, sizeof(struct v4l2_input32))) + if (copy_in_user(up, kp, sizeof(*up))) return -EFAULT; return 0; } @@ -669,60 +755,95 @@ struct v4l2_ext_control32 { }; } __attribute__ ((packed)); -/* The following function really belong in v4l2-common, but that causes - a circular dependency between modules. We need to think about this, but - for now this will do. */ - -/* Return non-zero if this control is a pointer type. Currently only - type STRING is a pointer type. */ -static inline int ctrl_is_pointer(u32 id) +/* Return true if this control is a pointer type. */ +static inline bool ctrl_is_pointer(struct file *file, u32 id) { - switch (id) { - case V4L2_CID_RDS_TX_PS_NAME: - case V4L2_CID_RDS_TX_RADIO_TEXT: - return 1; - default: - return 0; + struct video_device *vdev = video_devdata(file); + struct v4l2_fh *fh = NULL; + struct v4l2_ctrl_handler *hdl = NULL; + struct v4l2_query_ext_ctrl qec = { id }; + const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) + fh = file->private_data; + + if (fh && fh->ctrl_handler) + hdl = fh->ctrl_handler; + else if (vdev->ctrl_handler) + hdl = vdev->ctrl_handler; + + if (hdl) { + struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id); + + return ctrl && ctrl->is_ptr; } + + if (!ops || !ops->vidioc_query_ext_ctrl) + return false; + + return !ops->vidioc_query_ext_ctrl(file, fh, &qec) && + (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD); +} + +static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up, + u32 *size) +{ + u32 count; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(count, &up->count)) + return -EFAULT; + if (count > V4L2_CID_MAX_CTRLS) + return -EINVAL; + *size = count * sizeof(struct v4l2_ext_control); + return 0; } -static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) +static int get_v4l2_ext_controls32(struct file *file, + struct v4l2_ext_controls __user *kp, + struct v4l2_ext_controls32 __user *up, + void __user *aux_buf, u32 aux_space) { struct v4l2_ext_control32 __user *ucontrols; struct v4l2_ext_control __user *kcontrols; - unsigned int n; + u32 count; + u32 n; compat_caddr_t p; - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) || - get_user(kp->which, &up->which) || - get_user(kp->count, &up->count) || - get_user(kp->error_idx, &up->error_idx) || - copy_from_user(kp->reserved, up->reserved, - sizeof(kp->reserved))) - return -EFAULT; - if (kp->count == 0) { - kp->controls = NULL; - return 0; - } else if (kp->count > V4L2_CID_MAX_CTRLS) { + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->which, &up->which) || + get_user(count, &up->count) || + put_user(count, &kp->count) || + assign_in_user(&kp->error_idx, &up->error_idx) || + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + return -EFAULT; + + if (count == 0) + return put_user(NULL, &kp->controls); + if (count > V4L2_CID_MAX_CTRLS) return -EINVAL; - } if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_READ, ucontrols, - kp->count * sizeof(struct v4l2_ext_control32))) + if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols))) + return -EFAULT; + if (aux_space < count * sizeof(*kcontrols)) return -EFAULT; - kcontrols = compat_alloc_user_space(kp->count * - sizeof(struct v4l2_ext_control)); - kp->controls = (__force struct v4l2_ext_control *)kcontrols; - for (n = 0; n < kp->count; n++) { + kcontrols = aux_buf; + if (put_user((__force struct v4l2_ext_control *)kcontrols, + &kp->controls)) + return -EFAULT; + + for (n = 0; n < count; n++) { u32 id; if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols))) return -EFAULT; + if (get_user(id, &kcontrols->id)) return -EFAULT; - if (ctrl_is_pointer(id)) { + + if (ctrl_is_pointer(file, id)) { void __user *s; if (get_user(p, &ucontrols->string)) @@ -737,43 +858,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext return 0; } -static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up) +static int put_v4l2_ext_controls32(struct file *file, + struct v4l2_ext_controls __user *kp, + struct v4l2_ext_controls32 __user *up) { struct v4l2_ext_control32 __user *ucontrols; - struct v4l2_ext_control __user *kcontrols = - (__force struct v4l2_ext_control __user *)kp->controls; - int n = kp->count; + struct v4l2_ext_control __user *kcontrols; + u32 count; + u32 n; compat_caddr_t p; - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) || - put_user(kp->which, &up->which) || - put_user(kp->count, &up->count) || - put_user(kp->error_idx, &up->error_idx) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) - return -EFAULT; - if (!kp->count) - return 0; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->which, &kp->which) || + get_user(count, &kp->count) || + put_user(count, &up->count) || + assign_in_user(&up->error_idx, &kp->error_idx) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) || + get_user(kcontrols, &kp->controls)) + return -EFAULT; + if (!count || count > (U32_MAX/sizeof(*ucontrols))) + return 0; if (get_user(p, &up->controls)) return -EFAULT; ucontrols = compat_ptr(p); - if (!access_ok(VERIFY_WRITE, ucontrols, - n * sizeof(struct v4l2_ext_control32))) + if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols))) return -EFAULT; - while (--n >= 0) { - unsigned size = sizeof(*ucontrols); + for (n = 0; n < count; n++) { + unsigned int size = sizeof(*ucontrols); u32 id; - if (get_user(id, &kcontrols->id)) + if (get_user(id, &kcontrols->id) || + put_user(id, &ucontrols->id) || + assign_in_user(&ucontrols->size, &kcontrols->size) || + copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2, + sizeof(ucontrols->reserved2))) return -EFAULT; - /* Do not modify the pointer when copying a pointer control. - The contents of the pointer was changed, not the pointer - itself. */ - if (ctrl_is_pointer(id)) + + /* + * Do not modify the pointer when copying a pointer control. + * The contents of the pointer was changed, not the pointer + * itself. + */ + if (ctrl_is_pointer(file, id)) size -= sizeof(ucontrols->value64); + if (copy_in_user(ucontrols, kcontrols, size)) return -EFAULT; + ucontrols++; kcontrols++; } @@ -793,18 +926,67 @@ struct v4l2_event32 { __u32 reserved[8]; }; -static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up) +static int put_v4l2_event32(struct v4l2_event __user *kp, + struct v4l2_event32 __user *up) { - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) || - put_user(kp->type, &up->type) || - copy_to_user(&up->u, &kp->u, sizeof(kp->u)) || - put_user(kp->pending, &up->pending) || - put_user(kp->sequence, &up->sequence) || - put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) || - put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) || - put_user(kp->id, &up->id) || - copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32))) - return -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->type, &kp->type) || + copy_in_user(&up->u, &kp->u, sizeof(kp->u)) || + assign_in_user(&up->pending, &kp->pending) || + assign_in_user(&up->sequence, &kp->sequence) || + assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) || + assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) || + assign_in_user(&up->id, &kp->id) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; + return 0; +} + +struct v4l2_subdev_routing32 { + compat_caddr_t routes; + __u32 num_routes; + __u32 reserved[5]; +}; + +static int get_v4l2_subdev_routing(struct v4l2_subdev_routing *kp, + struct v4l2_subdev_routing32 __user *up) +{ + compat_caddr_t p; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + get_user(p, &up->routes) || + get_user(kp->num_routes, &up->num_routes) || + !access_ok(VERIFY_READ, up->reserved, sizeof(*up->reserved)) || + kp->num_routes > U32_MAX / sizeof(*kp->routes)) + return -EFAULT; + + kp->routes = compat_ptr(p); + + if (!access_ok(VERIFY_READ, kp->routes, + kp->num_routes * (u32)sizeof(*kp->routes))) + return -EFAULT; + + return 0; +} + +static int put_v4l2_subdev_routing(struct v4l2_subdev_routing *kp, + struct v4l2_subdev_routing32 __user *up) +{ + struct v4l2_subdev_route __user *uroutes; + compat_caddr_t p; + + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + get_user(p, &up->routes) || + put_user(kp->num_routes, &up->num_routes) || + !access_ok(VERIFY_WRITE, up->reserved, sizeof(*up->reserved))) + return -EFAULT; + + uroutes = compat_ptr(p); + + if (!access_ok(VERIFY_WRITE, uroutes, + kp->num_routes * sizeof(*kp->routes))) + return -EFAULT; + return 0; } @@ -816,32 +998,35 @@ struct v4l2_edid32 { compat_caddr_t edid; }; -static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) +static int get_v4l2_edid32(struct v4l2_edid __user *kp, + struct v4l2_edid32 __user *up) { - u32 tmp; - - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) || - get_user(kp->pad, &up->pad) || - get_user(kp->start_block, &up->start_block) || - get_user(kp->blocks, &up->blocks) || - get_user(tmp, &up->edid) || - copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved))) - return -EFAULT; - kp->edid = (__force u8 *)compat_ptr(tmp); + compat_uptr_t tmp; + + if (!access_ok(VERIFY_READ, up, sizeof(*up)) || + assign_in_user(&kp->pad, &up->pad) || + assign_in_user(&kp->start_block, &up->start_block) || + assign_in_user(&kp->blocks, &up->blocks) || + get_user(tmp, &up->edid) || + put_user(compat_ptr(tmp), &kp->edid) || + copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved))) + return -EFAULT; return 0; } -static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) +static int put_v4l2_edid32(struct v4l2_edid __user *kp, + struct v4l2_edid32 __user *up) { - u32 tmp = (u32)((unsigned long)kp->edid); - - if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) || - put_user(kp->pad, &up->pad) || - put_user(kp->start_block, &up->start_block) || - put_user(kp->blocks, &up->blocks) || - put_user(tmp, &up->edid) || - copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved))) - return -EFAULT; + void *edid; + + if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || + assign_in_user(&up->pad, &kp->pad) || + assign_in_user(&up->start_block, &kp->start_block) || + assign_in_user(&up->blocks, &kp->blocks) || + get_user(edid, &kp->edid) || + put_user(ptr_to_compat(edid), &up->edid) || + copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved))) + return -EFAULT; return 0; } @@ -870,25 +1055,28 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up) #define VIDIOC_STREAMOFF32 _IOW ('V', 19, s32) #define VIDIOC_G_INPUT32 _IOR ('V', 38, s32) #define VIDIOC_S_INPUT32 _IOWR('V', 39, s32) +#define VIDIOC_SUBDEV_G_ROUTING32 _IOWR('V', 38, struct v4l2_subdev_routing32) +#define VIDIOC_SUBDEV_S_ROUTING32 _IOWR('V', 39, struct v4l2_subdev_routing32) #define VIDIOC_G_OUTPUT32 _IOR ('V', 46, s32) #define VIDIOC_S_OUTPUT32 _IOWR('V', 47, s32) +static int alloc_userspace(unsigned int size, u32 aux_space, + void __user **up_native) +{ + *up_native = compat_alloc_user_space(size + aux_space); + if (!*up_native) + return -ENOMEM; + if (clear_user(*up_native, size)) + return -EFAULT; + return 0; +} + static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - union { - struct v4l2_format v2f; - struct v4l2_buffer v2b; - struct v4l2_framebuffer v2fb; - struct v4l2_input v2i; - struct v4l2_standard v2s; - struct v4l2_ext_controls v2ecs; - struct v4l2_event v2ev; - struct v4l2_create_buffers v2crt; - struct v4l2_edid v2edid; - unsigned long vx; - int vi; - } karg; void __user *up = compat_ptr(arg); + void __user *up_native = NULL; + void __user *aux_buf; + u32 aux_space; int compatible_arg = 1; long err = 0; @@ -913,6 +1101,8 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break; case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break; case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break; + case VIDIOC_SUBDEV_G_ROUTING32: cmd = VIDIOC_SUBDEV_G_ROUTING; break; + case VIDIOC_SUBDEV_S_ROUTING32: cmd = VIDIOC_SUBDEV_S_ROUTING; break; case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break; case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break; case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break; @@ -927,30 +1117,60 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_STREAMOFF: case VIDIOC_S_INPUT: case VIDIOC_S_OUTPUT: - err = get_user(karg.vi, (s32 __user *)up); + err = alloc_userspace(sizeof(unsigned int), 0, &up_native); + if (!err && assign_in_user((unsigned int __user *)up_native, + (compat_uint_t __user *)up)) + err = -EFAULT; compatible_arg = 0; break; case VIDIOC_G_INPUT: case VIDIOC_G_OUTPUT: + err = alloc_userspace(sizeof(unsigned int), 0, &up_native); + compatible_arg = 0; + break; + + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + err = alloc_userspace(sizeof(struct v4l2_subdev_routing), 0, &up_native); + if (!err) + err = get_v4l2_subdev_routing(up_native, up); compatible_arg = 0; break; case VIDIOC_G_EDID: case VIDIOC_S_EDID: - err = get_v4l2_edid32(&karg.v2edid, up); + err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native); + if (!err) + err = get_v4l2_edid32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_FMT: case VIDIOC_S_FMT: case VIDIOC_TRY_FMT: - err = get_v4l2_format32(&karg.v2f, up); + err = bufsize_v4l2_format(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_format), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_format); + err = get_v4l2_format32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_CREATE_BUFS: - err = get_v4l2_create32(&karg.v2crt, up); + err = bufsize_v4l2_create(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_create_buffers), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_create_buffers); + err = get_v4l2_create32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; @@ -958,36 +1178,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: - err = get_v4l2_buffer32(&karg.v2b, up); + err = bufsize_v4l2_buffer(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_buffer), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_buffer); + err = get_v4l2_buffer32(up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_S_FBUF: - err = get_v4l2_framebuffer32(&karg.v2fb, up); + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, + &up_native); + if (!err) + err = get_v4l2_framebuffer32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_FBUF: + err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0, + &up_native); compatible_arg = 0; break; case VIDIOC_ENUMSTD: - err = get_v4l2_standard32(&karg.v2s, up); + err = alloc_userspace(sizeof(struct v4l2_standard), 0, + &up_native); + if (!err) + err = get_v4l2_standard32(up_native, up); compatible_arg = 0; break; case VIDIOC_ENUMINPUT: - err = get_v4l2_input32(&karg.v2i, up); + err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native); + if (!err) + err = get_v4l2_input32(up_native, up); compatible_arg = 0; break; case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - err = get_v4l2_ext_controls32(&karg.v2ecs, up); + err = bufsize_v4l2_ext_controls(up, &aux_space); + if (!err) + err = alloc_userspace(sizeof(struct v4l2_ext_controls), + aux_space, &up_native); + if (!err) { + aux_buf = up_native + sizeof(struct v4l2_ext_controls); + err = get_v4l2_ext_controls32(file, up_native, up, + aux_buf, aux_space); + } compatible_arg = 0; break; case VIDIOC_DQEVENT: + err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native); compatible_arg = 0; break; } @@ -996,28 +1243,34 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar if (compatible_arg) err = native_ioctl(file, cmd, (unsigned long)up); - else { - mm_segment_t old_fs = get_fs(); + else + err = native_ioctl(file, cmd, (unsigned long)up_native); - set_fs(KERNEL_DS); - err = native_ioctl(file, cmd, (unsigned long)&karg); - set_fs(old_fs); - } + if (err == -ENOTTY) + return err; - /* Special case: even after an error we need to put the - results back for these ioctls since the error_idx will - contain information on which control failed. */ + /* + * Special case: even after an error we need to put the + * results back for these ioctls since the error_idx will + * contain information on which control failed. + */ switch (cmd) { case VIDIOC_G_EXT_CTRLS: case VIDIOC_S_EXT_CTRLS: case VIDIOC_TRY_EXT_CTRLS: - if (put_v4l2_ext_controls32(&karg.v2ecs, up)) + if (put_v4l2_ext_controls32(file, up_native, up)) err = -EFAULT; break; case VIDIOC_S_EDID: - if (put_v4l2_edid32(&karg.v2edid, up)) + if (put_v4l2_edid32(up_native, up)) err = -EFAULT; break; + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: + err = alloc_userspace(sizeof(struct v4l2_subdev_routing), 0, &up_native); + if (!err) + err = put_v4l2_subdev_routing(up_native, up); + break; } if (err) return err; @@ -1027,43 +1280,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar case VIDIOC_S_OUTPUT: case VIDIOC_G_INPUT: case VIDIOC_G_OUTPUT: - err = put_user(((s32)karg.vi), (s32 __user *)up); + if (assign_in_user((compat_uint_t __user *)up, + ((unsigned int __user *)up_native))) + err = -EFAULT; break; case VIDIOC_G_FBUF: - err = put_v4l2_framebuffer32(&karg.v2fb, up); + err = put_v4l2_framebuffer32(up_native, up); break; case VIDIOC_DQEVENT: - err = put_v4l2_event32(&karg.v2ev, up); + err = put_v4l2_event32(up_native, up); break; case VIDIOC_G_EDID: - err = put_v4l2_edid32(&karg.v2edid, up); + err = put_v4l2_edid32(up_native, up); break; case VIDIOC_G_FMT: case VIDIOC_S_FMT: case VIDIOC_TRY_FMT: - err = put_v4l2_format32(&karg.v2f, up); + err = put_v4l2_format32(up_native, up); break; case VIDIOC_CREATE_BUFS: - err = put_v4l2_create32(&karg.v2crt, up); + err = put_v4l2_create32(up_native, up); break; + case VIDIOC_PREPARE_BUF: case VIDIOC_QUERYBUF: case VIDIOC_QBUF: case VIDIOC_DQBUF: - err = put_v4l2_buffer32(&karg.v2b, up); + err = put_v4l2_buffer32(up_native, up); break; case VIDIOC_ENUMSTD: - err = put_v4l2_standard32(&karg.v2s, up); + err = put_v4l2_standard32(up_native, up); break; case VIDIOC_ENUMINPUT: - err = put_v4l2_input32(&karg.v2i, up); + err = put_v4l2_input32(up_native, up); break; } return err; diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index dd1db678718c..8033d6f73501 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, } EXPORT_SYMBOL(v4l2_ctrl_fill); +static u32 user_flags(const struct v4l2_ctrl *ctrl) +{ + u32 flags = ctrl->flags; + + if (ctrl->is_ptr) + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; + + return flags; +} + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev->reserved, 0, sizeof(ev->reserved)); @@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; - ev->u.ctrl.flags = ctrl->flags; + ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else @@ -2577,10 +2587,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr else qc->id = ctrl->id; strlcpy(qc->name, ctrl->name, sizeof(qc->name)); - qc->flags = ctrl->flags; + qc->flags = user_flags(ctrl); qc->type = ctrl->type; - if (ctrl->is_ptr) - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index b60a6b0841d1..749240e9bf5e 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -40,6 +41,10 @@ #define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls) +#define v4l2_buf_type_type(type) ((type) & V4L2_BUF_TYPE_TYPE_MASK) +#define v4l2_buf_type_substream(type) (((type) & V4L2_BUF_TYPE_SUBSTREAM_MASK) \ + >> V4L2_BUF_TYPE_SUBSTREAM_SHIFT) + struct std_descr { v4l2_std_id std; const char *descr; @@ -187,9 +192,11 @@ static void v4l_print_enuminput(const void *arg, bool write_only) { const struct v4l2_input *p = arg; - pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n", - p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, - p->tuner, (unsigned long long)p->std, p->status, + pr_cont("index=%u, name=%.*s, type=%u, substream=%u, audioset=0x%x, " + "tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + v4l2_buf_type_type(p->type), v4l2_buf_type_substream(p->type), + p->audioset, p->tuner, (unsigned long long)p->std, p->status, p->capabilities); } @@ -197,9 +204,12 @@ static void v4l_print_enumoutput(const void *arg, bool write_only) { const struct v4l2_output *p = arg; - pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n", - p->index, (int)sizeof(p->name), p->name, p->type, p->audioset, - p->modulator, (unsigned long long)p->std, p->capabilities); + pr_cont("index=%u, name=%.*s, type=%u, substream=%u, audioset=0x%x, " + "modulator=%u, std=0x%08Lx, capabilities=0x%x\n", + p->index, (int)sizeof(p->name), p->name, + v4l2_buf_type_type(p->type), v4l2_buf_type_substream(p->type), + p->audioset, p->modulator, (unsigned long long)p->std, + p->capabilities); } static void v4l_print_audio(const void *arg, bool write_only) @@ -230,8 +240,11 @@ static void v4l_print_fmtdesc(const void *arg, bool write_only) { const struct v4l2_fmtdesc *p = arg; - pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%.*s'\n", - p->index, prt_names(p->type, v4l2_type_names), + pr_cont("index=%u, type=%s, substream=%u, flags=0x%x, " + "pixelformat=%c%c%c%c, description='%.*s'\n", + p->index, prt_names(v4l2_buf_type_type(p->type), + v4l2_type_names), + v4l2_buf_type_substream(p->type), p->flags, (p->pixelformat & 0xff), (p->pixelformat >> 8) & 0xff, (p->pixelformat >> 16) & 0xff, @@ -251,8 +264,9 @@ static void v4l_print_format(const void *arg, bool write_only) const struct v4l2_meta_format *meta; unsigned i; - pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); - switch (p->type) { + pr_cont("type=%s, substream=%u", prt_names(v4l2_buf_type_type(p->type), + v4l2_type_names), v4l2_buf_type_substream(p->type)); + switch (v4l2_buf_type_type(p->type)) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: pix = &p->fmt.pix; @@ -357,7 +371,8 @@ static void v4l_print_framebuffer(const void *arg, bool write_only) static void v4l_print_buftype(const void *arg, bool write_only) { - pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names)); + pr_cont("type=%s\n", prt_names(v4l2_buf_type_type(*(u32 *)arg), + v4l2_type_names)); } static void v4l_print_modulator(const void *arg, bool write_only) @@ -379,8 +394,12 @@ static void v4l_print_tuner(const void *arg, bool write_only) if (write_only) pr_cont("index=%u, audmode=%u\n", p->index, p->audmode); else - pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n", - p->index, (int)sizeof(p->name), p->name, p->type, + pr_cont("index=%u, name=%.*s, type=%u, substream %u, " + "capability=0x%x, rangelow=%u, rangehigh=%u, " + "signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n", + p->index, (int)sizeof(p->name), p->name, + v4l2_buf_type_type(p->type), + v4l2_buf_type_substream(p->type), p->capability, p->rangelow, p->rangehigh, p->signal, p->afc, p->rxsubchans, p->audmode); @@ -390,8 +409,9 @@ static void v4l_print_frequency(const void *arg, bool write_only) { const struct v4l2_frequency *p = arg; - pr_cont("tuner=%u, type=%u, frequency=%u\n", - p->tuner, p->type, p->frequency); + pr_cont("tuner=%u, type=%u, substream=%u, frequency=%u\n", + p->tuner, v4l2_buf_type_type(p->type), + v4l2_buf_type_substream(p->type), p->frequency); } static void v4l_print_standard(const void *arg, bool write_only) @@ -415,18 +435,21 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only) { const struct v4l2_hw_freq_seek *p = arg; - pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n", - p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, - p->rangelow, p->rangehigh); + pr_cont("tuner=%u, type=%u, substream=%u, seek_upward=%u, " + "wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n", + p->tuner, v4l2_buf_type_type(p->type), + v4l2_buf_type_substream(p->type), p->seek_upward, + p->wrap_around, p->spacing, p->rangelow, p->rangehigh); } static void v4l_print_requestbuffers(const void *arg, bool write_only) { const struct v4l2_requestbuffers *p = arg; - pr_cont("count=%d, type=%s, memory=%s\n", + pr_cont("count=%d, type=i%s, substream=%u, memory=%s\n", p->count, - prt_names(p->type, v4l2_type_names), + prt_names(v4l2_buf_type_type(p->type), v4l2_type_names), + v4l2_buf_type_substream(p->type), prt_names(p->memory, v4l2_memory_names)); } @@ -437,17 +460,20 @@ static void v4l_print_buffer(const void *arg, bool write_only) const struct v4l2_plane *plane; int i; - pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s", - p->timestamp.tv_sec / 3600, - (int)(p->timestamp.tv_sec / 60) % 60, - (int)(p->timestamp.tv_sec % 60), - (long)p->timestamp.tv_usec, - p->index, - prt_names(p->type, v4l2_type_names), - p->flags, prt_names(p->field, v4l2_field_names), - p->sequence, prt_names(p->memory, v4l2_memory_names)); + pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request %u, stream=%u, " + "flags=0x%08x, field=%s, sequence=%d, memory=%s", + p->timestamp.tv_sec / 3600, + (int)(p->timestamp.tv_sec / 60) % 60, + (int)(p->timestamp.tv_sec % 60), + (long)p->timestamp.tv_usec, + p->index, + prt_names(v4l2_buf_type_type(p->type), v4l2_type_names), + p->request, v4l2_buf_type_substream(p->type), + p->flags, prt_names(p->field, v4l2_field_names), + p->sequence, prt_names(p->memory, v4l2_memory_names)); - if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) { + if (V4L2_TYPE_IS_MULTIPLANAR(v4l2_buf_type_type(p->type)) && + p->m.planes) { pr_cont("\n"); for (i = 0; i < p->length; ++i) { plane = &p->m.planes[i]; @@ -470,9 +496,11 @@ static void v4l_print_exportbuffer(const void *arg, bool write_only) { const struct v4l2_exportbuffer *p = arg; - pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n", - p->fd, prt_names(p->type, v4l2_type_names), - p->index, p->plane, p->flags); + pr_cont("fd=%d, type=%s, substream=%u, index=%u, plane=%u, " + "flags=0x%08x\n", + p->fd, prt_names(v4l2_buf_type_type(p->type), + v4l2_type_names), + v4l2_buf_type_substream(p->type), p->index, p->plane, p->flags); } static void v4l_print_create_buffers(const void *arg, bool write_only) @@ -488,19 +516,22 @@ static void v4l_print_create_buffers(const void *arg, bool write_only) static void v4l_print_streamparm(const void *arg, bool write_only) { const struct v4l2_streamparm *p = arg; + u32 buf_type = v4l2_buf_type_type(p->type); - pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); + pr_cont("type=%s, substream=%u", prt_names(v4l2_buf_type_type(p->type), + v4l2_type_names), + v4l2_buf_type_substream(p->type)); - if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE || - p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE || + buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { const struct v4l2_captureparm *c = &p->parm.capture; pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n", c->capability, c->capturemode, c->timeperframe.numerator, c->timeperframe.denominator, c->extendedmode, c->readbuffers); - } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || - p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + } else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { const struct v4l2_outputparm *c = &p->parm.output; pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n", @@ -570,8 +601,11 @@ static void v4l_print_cropcap(const void *arg, bool write_only) { const struct v4l2_cropcap *p = arg; - pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n", - prt_names(p->type, v4l2_type_names), + pr_cont("type=%s, substream=%u, bounds wxh=%dx%d, x,y=%d,%d, " + "defrect wxh=%dx%d, x,y=%d,%d, " + "pixelaspect %d/%d\n", + prt_names(v4l2_buf_type_type(p->type), v4l2_type_names), + v4l2_buf_type_substream(p->type), p->bounds.width, p->bounds.height, p->bounds.left, p->bounds.top, p->defrect.width, p->defrect.height, @@ -583,8 +617,9 @@ static void v4l_print_crop(const void *arg, bool write_only) { const struct v4l2_crop *p = arg; - pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n", - prt_names(p->type, v4l2_type_names), + pr_cont("type=%s, substream=%u, wxh=%dx%d, x,y=%d,%d\n", + prt_names(v4l2_buf_type_type(p->type), v4l2_type_names), + v4l2_buf_type_substream(p->type), p->c.width, p->c.height, p->c.left, p->c.top); } @@ -593,8 +628,10 @@ static void v4l_print_selection(const void *arg, bool write_only) { const struct v4l2_selection *p = arg; - pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n", - prt_names(p->type, v4l2_type_names), + pr_cont("type=%s, substream=%u, target=%d, flags=0x%x, wxh=%dx%d, " + "x,y=%d,%d\n", + prt_names(v4l2_buf_type_type(p->type), v4l2_type_names), + v4l2_buf_type_substream(p->type), p->target, p->flags, p->r.width, p->r.height, p->r.left, p->r.top); } @@ -822,7 +859,8 @@ static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only) int i; pr_cont("type=%s, service_set=0x%08x\n", - prt_names(p->type, v4l2_type_names), p->service_set); + prt_names(v4l2_buf_type_type(p->type), v4l2_type_names), + p->service_set); for (i = 0; i < 24; i++) printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i, p->service_lines[0][i], @@ -890,7 +928,21 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv) return 1; } -static int check_fmt(struct file *file, enum v4l2_buf_type type) +static int check_buf_type(struct file *file, u32 type) +{ + struct video_device *vfd = video_devdata(file); + + if (type & ~(V4L2_BUF_TYPE_TYPE_MASK | V4L2_BUF_TYPE_SUBSTREAM_MASK)) + return -EINVAL; + + if (!test_bit(V4L2_FL_HAS_SUB_STREAMS, &vfd->flags) && + v4l2_buf_type_substream(type)) + return -EINVAL; + + return 0; +} + +static int check_fmt(struct file *file, u32 type) { struct video_device *vfd = video_devdata(file); const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; @@ -904,7 +956,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type) if (ops == NULL) return -EINVAL; - switch (type) { + if (check_buf_type(file, type)) + return -EINVAL; + + switch (v4l2_buf_type_type(type)) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if ((is_vid || is_tch) && is_rx && (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane)) @@ -1308,52 +1363,50 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_fmtdesc *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret = -EINVAL; + int ret = check_fmt(file, p->type); - switch (p->type) { + if (ret) + return ret; + ret = -EINVAL; + + switch (v4l2_buf_type_type(p->type)) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap)) + if (unlikely(!ops->vidioc_enum_fmt_vid_cap)) break; ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane)) break; ret = ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_enum_fmt_vid_overlay)) break; ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out)) + if (unlikely(!ops->vidioc_enum_fmt_vid_out)) break; ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg); break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_enum_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane)) break; ret = ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_enum_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_enum_fmt_sdr_cap)) break; ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg); break; case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_enum_fmt_sdr_out)) + if (unlikely(!ops->vidioc_enum_fmt_sdr_out)) break; ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg); break; case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap)) + if (unlikely(!ops->vidioc_enum_fmt_meta_cap)) break; ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg); break; @@ -1367,13 +1420,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; /* * fmt can't be cleared for these overlay types due to the 'clips' @@ -1399,9 +1449,9 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, break; } - switch (p->type) { + switch (v4l2_buf_type_type(p->type)) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap)) + if (unlikely(!ops->vidioc_g_fmt_vid_cap)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg); @@ -1409,23 +1459,15 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap_mplane)) - break; return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_overlay)) - break; return ops->vidioc_g_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_vbi_cap)) - break; return ops->vidioc_g_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_cap)) - break; return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out)) + if (unlikely(!ops->vidioc_g_fmt_vid_out)) break; p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; ret = ops->vidioc_g_fmt_vid_out(file, fh, arg); @@ -1433,32 +1475,18 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_mplane)) - break; return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_g_fmt_vid_out_overlay)) - break; return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_vbi_out)) - break; return ops->vidioc_g_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_g_fmt_sliced_vbi_out)) - break; return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_g_fmt_sdr_cap)) - break; return ops->vidioc_g_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out)) - break; return ops->vidioc_g_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap)) - break; return ops->vidioc_g_fmt_meta_cap(file, fh, arg); } return -EINVAL; @@ -1484,51 +1512,49 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, { struct v4l2_format *p = arg; struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; ret = v4l_enable_media_source(vfd); if (ret) return ret; v4l_sanitize_format(p); - switch (p->type) { + switch (v4l2_buf_type_type(p->type)) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap)) + if (unlikely(!ops->vidioc_s_fmt_vid_cap)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg); /* just in case the driver zeroed it again */ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; - if (is_tch) + if (vfd->vfl_type == VFL_TYPE_TOUCH) v4l_pix_format_touch(&p->fmt.pix); return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_s_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_vbi_cap)) + if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_cap)) + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out)) + if (unlikely(!ops->vidioc_s_fmt_vid_out)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_s_fmt_vid_out(file, fh, arg); @@ -1536,37 +1562,37 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay)) + if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_vbi_out)) + if (unlikely(!ops->vidioc_s_fmt_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_s_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_s_fmt_sliced_vbi_out)) + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_s_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_s_fmt_sdr_out)) + if (unlikely(!ops->vidioc_s_fmt_sdr_out)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_s_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap)) + if (unlikely(!ops->vidioc_s_fmt_meta_cap)) break; CLEAR_AFTER_FIELD(p, fmt.meta); return ops->vidioc_s_fmt_meta_cap(file, fh, arg); @@ -1578,19 +1604,16 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_format *p = arg; - struct video_device *vfd = video_devdata(file); - bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER; - bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR; - bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH; - bool is_rx = vfd->vfl_dir != VFL_DIR_TX; - bool is_tx = vfd->vfl_dir != VFL_DIR_RX; - int ret; + int ret = check_fmt(file, p->type); + + if (ret) + return ret; v4l_sanitize_format(p); - switch (p->type) { + switch (v4l2_buf_type_type(p->type)) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: - if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap)) + if (unlikely(!ops->vidioc_try_fmt_vid_cap)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg); @@ -1598,27 +1621,27 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane)) + if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OVERLAY: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay)) + if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_try_fmt_vid_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_vbi_cap)) + if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: - if (unlikely(!is_rx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_cap)) + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out)) + if (unlikely(!ops->vidioc_try_fmt_vid_out)) break; CLEAR_AFTER_FIELD(p, fmt.pix); ret = ops->vidioc_try_fmt_vid_out(file, fh, arg); @@ -1626,37 +1649,37 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC; return ret; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane)) + if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: - if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay)) + if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) break; CLEAR_AFTER_FIELD(p, fmt.win); return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg); case V4L2_BUF_TYPE_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_vbi_out)) + if (unlikely(!ops->vidioc_try_fmt_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.vbi); return ops->vidioc_try_fmt_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: - if (unlikely(!is_tx || is_vid || !ops->vidioc_try_fmt_sliced_vbi_out)) + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) break; CLEAR_AFTER_FIELD(p, fmt.sliced); return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); case V4L2_BUF_TYPE_SDR_CAPTURE: - if (unlikely(!is_rx || !is_sdr || !ops->vidioc_try_fmt_sdr_cap)) + if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); case V4L2_BUF_TYPE_SDR_OUTPUT: - if (unlikely(!is_tx || !is_sdr || !ops->vidioc_try_fmt_sdr_out)) + if (unlikely(!ops->vidioc_try_fmt_sdr_out)) break; CLEAR_AFTER_FIELD(p, fmt.sdr); return ops->vidioc_try_fmt_sdr_out(file, fh, arg); case V4L2_BUF_TYPE_META_CAPTURE: - if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap)) + if (unlikely(!ops->vidioc_try_fmt_meta_cap)) break; CLEAR_AFTER_FIELD(p, fmt.meta); return ops->vidioc_try_fmt_meta_cap(file, fh, arg); @@ -1959,9 +1982,13 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, return ret; if (ops->vidioc_g_parm) return ops->vidioc_g_parm(file, fh, p); - if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && - p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + switch (v4l2_buf_type_type(p->type)) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + break; + default: return -EINVAL; + } p->parm.capture.readbuffers = 2; ret = ops->vidioc_g_std(file, fh, &std); if (ret == 0) @@ -2204,7 +2231,7 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, /* simulate capture crop using selection api */ /* crop means compose for output devices */ - if (V4L2_TYPE_IS_OUTPUT(p->type)) + if (V4L2_TYPE_IS_OUTPUT(v4l2_buf_type_type(p->type))) s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE; else s.target = V4L2_SEL_TGT_CROP_ACTIVE; @@ -2226,12 +2253,15 @@ static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, .r = p->c, }; + if (check_buf_type(file, p->type)) + return -EINVAL; + if (ops->vidioc_s_crop) return ops->vidioc_s_crop(file, fh, p); /* simulate capture crop using selection api */ /* crop means compose for output devices */ - if (V4L2_TYPE_IS_OUTPUT(p->type)) + if (V4L2_TYPE_IS_OUTPUT(v4l2_buf_type_type(p->type))) s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE; else s.target = V4L2_SEL_TGT_CROP_ACTIVE; @@ -2243,7 +2273,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct v4l2_cropcap *p = arg; - struct v4l2_selection s = { .type = p->type }; + struct v4l2_selection s = { .type = v4l2_buf_type_type(p->type) }; int ret = 0; /* setting trivial pixelaspect */ @@ -2257,6 +2287,9 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection)) return -ENOTTY; + if (check_buf_type(file, p->type)) + return -EINVAL; + if (ops->vidioc_cropcap) ret = ops->vidioc_cropcap(file, fh, p); @@ -2273,7 +2306,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, /* Use g_selection() to fill in the bounds and defrect rectangles */ /* obtaining bounds */ - if (V4L2_TYPE_IS_OUTPUT(p->type)) + if (V4L2_TYPE_IS_OUTPUT(v4l2_buf_type_type(p->type))) s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS; else s.target = V4L2_SEL_TGT_CROP_BOUNDS; @@ -2284,7 +2317,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, p->bounds = s.r; /* obtaining defrect */ - if (V4L2_TYPE_IS_OUTPUT(p->type)) + if (V4L2_TYPE_IS_OUTPUT(v4l2_buf_type_type(p->type))) s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT; else s.target = V4L2_SEL_TGT_CROP_DEFAULT; @@ -2512,11 +2545,8 @@ struct v4l2_ioctl_info { unsigned int ioctl; u32 flags; const char * const name; - union { - u32 offset; - int (*func)(const struct v4l2_ioctl_ops *ops, - struct file *file, void *fh, void *p); - } u; + int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file, + void *fh, void *p); void (*debug)(const void *arg, bool write_only); }; @@ -2524,27 +2554,23 @@ struct v4l2_ioctl_info { #define INFO_FL_PRIO (1 << 0) /* This control can be valid if the filehandle passes a control handler. */ #define INFO_FL_CTRL (1 << 1) -/* This is a standard ioctl, no need for special code */ -#define INFO_FL_STD (1 << 2) /* This is ioctl has its own function */ -#define INFO_FL_FUNC (1 << 3) +#define INFO_FL_FUNC (1 << 2) /* Queuing ioctl */ -#define INFO_FL_QUEUE (1 << 4) +#define INFO_FL_QUEUE (1 << 3) /* Always copy back result, even on error */ -#define INFO_FL_ALWAYS_COPY (1 << 5) +#define INFO_FL_ALWAYS_COPY (1 << 4) /* Zero struct from after the field to the end */ #define INFO_FL_CLEAR(v4l2_struct, field) \ ((offsetof(struct v4l2_struct, field) + \ sizeof(((struct v4l2_struct *)0)->field)) << 16) #define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) -#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \ - [_IOC_NR(_ioctl)] = { \ - .ioctl = _ioctl, \ - .flags = _flags | INFO_FL_STD, \ - .name = #_ioctl, \ - .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc), \ - .debug = _debug, \ +#define DEFINE_IOCTL_STD_FNC(_vidioc) \ + static int __v4l_ ## _vidioc ## _fnc( \ + const struct v4l2_ioctl_ops *ops, \ + struct file *file, void *fh, void *p) { \ + return ops->_vidioc(file, fh, p); \ } #define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags) \ @@ -2552,10 +2578,42 @@ struct v4l2_ioctl_info { .ioctl = _ioctl, \ .flags = _flags | INFO_FL_FUNC, \ .name = #_ioctl, \ - .u.func = _func, \ + .func = _func, \ .debug = _debug, \ } +#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \ + IOCTL_INFO_FNC(_ioctl, __v4l_ ## _vidioc ## _fnc, _debug, _flags) + +DEFINE_IOCTL_STD_FNC(vidioc_g_fbuf) +DEFINE_IOCTL_STD_FNC(vidioc_s_fbuf) +DEFINE_IOCTL_STD_FNC(vidioc_expbuf) +DEFINE_IOCTL_STD_FNC(vidioc_g_std) +DEFINE_IOCTL_STD_FNC(vidioc_g_audio) +DEFINE_IOCTL_STD_FNC(vidioc_s_audio) +DEFINE_IOCTL_STD_FNC(vidioc_g_input) +DEFINE_IOCTL_STD_FNC(vidioc_g_edid) +DEFINE_IOCTL_STD_FNC(vidioc_s_edid) +DEFINE_IOCTL_STD_FNC(vidioc_g_output) +DEFINE_IOCTL_STD_FNC(vidioc_g_audout) +DEFINE_IOCTL_STD_FNC(vidioc_s_audout) +DEFINE_IOCTL_STD_FNC(vidioc_g_jpegcomp) +DEFINE_IOCTL_STD_FNC(vidioc_s_jpegcomp) +DEFINE_IOCTL_STD_FNC(vidioc_enumaudio) +DEFINE_IOCTL_STD_FNC(vidioc_enumaudout) +DEFINE_IOCTL_STD_FNC(vidioc_enum_framesizes) +DEFINE_IOCTL_STD_FNC(vidioc_enum_frameintervals) +DEFINE_IOCTL_STD_FNC(vidioc_g_enc_index) +DEFINE_IOCTL_STD_FNC(vidioc_encoder_cmd) +DEFINE_IOCTL_STD_FNC(vidioc_try_encoder_cmd) +DEFINE_IOCTL_STD_FNC(vidioc_decoder_cmd) +DEFINE_IOCTL_STD_FNC(vidioc_try_decoder_cmd) +DEFINE_IOCTL_STD_FNC(vidioc_s_dv_timings) +DEFINE_IOCTL_STD_FNC(vidioc_g_dv_timings) +DEFINE_IOCTL_STD_FNC(vidioc_enum_dv_timings) +DEFINE_IOCTL_STD_FNC(vidioc_query_dv_timings) +DEFINE_IOCTL_STD_FNC(vidioc_dv_timings_cap) + static struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO_FNC(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0), IOCTL_INFO_FNC(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)), @@ -2740,14 +2798,8 @@ static long __video_do_ioctl(struct file *file, } write_only = _IOC_DIR(cmd) == _IOC_WRITE; - if (info->flags & INFO_FL_STD) { - typedef int (*vidioc_op)(struct file *file, void *fh, void *p); - const void *p = vfd->ioctl_ops; - const vidioc_op *vidioc = p + info->u.offset; - - ret = (*vidioc)(file, fh, arg); - } else if (info->flags & INFO_FL_FUNC) { - ret = info->u.func(ops, file, fh, arg); + if (info->flags & INFO_FL_FUNC) { + ret = info->func(ops, file, fh, arg); } else if (!ops->vidioc_default) { ret = -ENOTTY; } else { @@ -2790,7 +2842,8 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, case VIDIOC_DQBUF: { struct v4l2_buffer *buf = parg; - if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) { + if (V4L2_TYPE_IS_MULTIPLANAR(v4l2_buf_type_type(buf->type)) && + buf->length > 0) { if (buf->length > VIDEO_MAX_PLANES) { ret = -EINVAL; break; @@ -2838,6 +2891,23 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, } break; } + + case VIDIOC_SUBDEV_G_ROUTING: + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *route = parg; + + if (route->num_routes > 0) { + if (route->num_routes > 256) + return -EINVAL; + + *user_ptr = (void __user *)route->routes; + *kernel_ptr = (void *)&route->routes; + *array_size = sizeof(struct v4l2_subdev_route) + * route->num_routes; + ret = 1; + } + break; + } } return ret; @@ -2924,8 +2994,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, /* Handles IOCTL */ err = func(file, cmd, parg); - if (err == -ENOIOCTLCMD) + if (err == -ENOTTY || err == -ENOIOCTLCMD) { err = -ENOTTY; + goto out; + } + if (err == 0) { if (cmd == VIDIOC_DQBUF) trace_v4l2_dqbuf(video_devdata(file)->minor, parg); diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index 303980b71aae..7e117cd1c383 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c @@ -265,12 +265,13 @@ EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source); * * Return the total number of users of all video device nodes in the pipeline. */ -static int pipeline_pm_use_count(struct media_entity *entity, +static int pipeline_pm_use_count(struct media_pad *pad, struct media_graph *graph) { + struct media_entity *entity = pad->entity; int use = 0; - media_graph_walk_start(graph, entity); + media_graph_walk_start(graph, pad); while ((entity = media_graph_walk_next(graph))) { if (is_media_entity_v4l2_video_device(entity)) @@ -333,7 +334,7 @@ static int pipeline_pm_power(struct media_entity *entity, int change, if (!change) return 0; - media_graph_walk_start(graph, entity); + media_graph_walk_start(graph, &entity->pads[0]); while (!ret && (entity = media_graph_walk_next(graph))) if (is_media_entity_v4l2_subdev(entity)) @@ -342,7 +343,7 @@ static int pipeline_pm_power(struct media_entity *entity, int change, if (!ret) return ret; - media_graph_walk_start(graph, first); + media_graph_walk_start(graph, &first->pads[0]); while ((first = media_graph_walk_next(graph)) && first != entity) @@ -385,14 +386,17 @@ int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, int sink_use; int ret = 0; - source_use = pipeline_pm_use_count(source, graph); - sink_use = pipeline_pm_use_count(sink, graph); + source_use = pipeline_pm_use_count(link->source, graph); + sink_use = pipeline_pm_use_count(link->sink, graph); if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && !(flags & MEDIA_LNK_FL_ENABLED)) { /* Powering off entities is assumed to never fail. */ pipeline_pm_power(source, -sink_use, graph); pipeline_pm_power(sink, -source_use, graph); + + source->use_count = 0; + sink->use_count = 0; return 0; } diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 43fefa73e0a3..be9d50657d5d 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -135,6 +135,9 @@ static int check_format(struct v4l2_subdev *sd, if (format->pad >= sd->entity.num_pads) return -EINVAL; + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_SUBSTREAMS) && format->stream) + return -EINVAL; + return 0; } @@ -160,6 +163,9 @@ static int check_selection(struct v4l2_subdev *sd, if (sel->pad >= sd->entity.num_pads) return -EINVAL; + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_SUBSTREAMS) && sel->stream) + return -EINVAL; + return 0; } @@ -444,7 +450,42 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) case VIDIOC_SUBDEV_S_DV_TIMINGS: return v4l2_subdev_call(sd, video, s_dv_timings, arg); + + case VIDIOC_SUBDEV_G_ROUTING: + return v4l2_subdev_call(sd, pad, get_routing, arg); + + case VIDIOC_SUBDEV_S_ROUTING: { + struct v4l2_subdev_routing *route = arg; + unsigned int i; + int rval; + + if (route->num_routes > sd->entity.num_pads) + return -EINVAL; + + for (i = 0; i < route->num_routes; ++i) { + unsigned int sink = route->routes[i].sink_pad; + unsigned int source = route->routes[i].source_pad; + struct media_pad *pads = sd->entity.pads; + + if (sink >= sd->entity.num_pads || + source >= sd->entity.num_pads) + return -EINVAL; + + if ((!(route->routes[i].flags & + V4L2_SUBDEV_ROUTE_FL_SOURCE) && + !(pads[sink].flags & MEDIA_PAD_FL_SINK)) || + !(pads[source].flags & MEDIA_PAD_FL_SOURCE)) + return -EINVAL; + } + + mutex_lock(&sd->entity.graph_obj.mdev->graph_mutex); + rval = v4l2_subdev_call(sd, pad, set_routing, route); + mutex_unlock(&sd->entity.graph_obj.mdev->graph_mutex); + + return rval; + } #endif + default: return v4l2_subdev_call(sd, core, ioctl, cmd, arg); } @@ -517,6 +558,9 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, sink_fmt->format.field != V4L2_FIELD_NONE) return -EPIPE; + if (source_fmt->stream != sink_fmt->stream) + return -EINVAL; + return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); @@ -541,19 +585,21 @@ v4l2_subdev_link_validate_get_format(struct media_pad *pad, return -EINVAL; } -int v4l2_subdev_link_validate(struct media_link *link) +static int v4l2_subdev_link_validate_one(struct media_link *link, + struct media_pad *source_pad, unsigned int source_stream, + struct media_pad *sink_pad, unsigned int sink_stream) { struct v4l2_subdev *sink; struct v4l2_subdev_format sink_fmt, source_fmt; int rval; - rval = v4l2_subdev_link_validate_get_format( - link->source, &source_fmt); + source_fmt.stream = source_stream; + rval = v4l2_subdev_link_validate_get_format(source_pad, &source_fmt); if (rval < 0) return 0; - rval = v4l2_subdev_link_validate_get_format( - link->sink, &sink_fmt); + sink_fmt.stream = sink_stream; + rval = v4l2_subdev_link_validate_get_format(sink_pad, &sink_fmt); if (rval < 0) return 0; @@ -567,6 +613,129 @@ int v4l2_subdev_link_validate(struct media_link *link) return v4l2_subdev_link_validate_default( sink, link, &source_fmt, &sink_fmt); } + +/* How many routes to assume there can be per a sub-device? */ +#define LINK_VALIDATE_ROUTES 8 + +int v4l2_subdev_link_validate(struct media_link *link) +{ + struct v4l2_subdev *sink; + struct v4l2_subdev_route sink_routes[LINK_VALIDATE_ROUTES]; + struct v4l2_subdev_routing sink_routing = { + .routes = sink_routes, + .num_routes = ARRAY_SIZE(sink_routes), + }; + struct v4l2_subdev_route src_routes[LINK_VALIDATE_ROUTES]; + struct v4l2_subdev_routing src_routing = { + .routes = src_routes, + .num_routes = ARRAY_SIZE(src_routes), + }; + unsigned int i, j; + int rval; + + sink = media_entity_to_v4l2_subdev(link->sink->entity); + + if (!(link->sink->flags & MEDIA_PAD_FL_MULTIPLEX && + link->source->flags & MEDIA_PAD_FL_MULTIPLEX)) + return v4l2_subdev_link_validate_one(link, link->source, 0, + link->sink, 0); + /* + * multiplex link cannot proceed without route information. + */ + rval = v4l2_subdev_call(sink, pad, get_routing, &sink_routing); + + if (rval) { + dev_err(sink->entity.graph_obj.mdev->dev, + "error %d in get_routing() on %s, sink pad %u\n", rval, + sink->entity.name, link->sink->index); + + return rval; + } + + rval = v4l2_subdev_call(media_entity_to_v4l2_subdev( + link->source->entity), + pad, get_routing, &src_routing); + if (rval) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "error %d in get_routing() on %s, source pad %u\n", + rval, sink->entity.name, link->source->index); + + return rval; + } + + dev_dbg(sink->entity.graph_obj.mdev->dev, + "validating multiplexed link \"%s\":%u -> \"%s\":%u; %u/%u routes\n", + link->source->entity->name, link->source->index, + sink->entity.name, link->sink->index, + src_routing.num_routes, sink_routing.num_routes); + + for (i = 0; i < sink_routing.num_routes; i++) { + /* Get the first active route for the sink pad. */ + if (sink_routes[i].sink_pad != link->sink->index || + !(sink_routes[i].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "skipping sink route %u/%u -> %u/%u[%u]\n", + sink_routes[i].sink_pad, + sink_routes[i].sink_stream, + sink_routes[i].source_pad, + sink_routes[i].source_stream, + (bool)(sink_routes[i].flags + & V4L2_SUBDEV_ROUTE_FL_ACTIVE)); + continue; + } + + /* + * Get the corresponding route for the source pad. + * It's ok for the source pad to have routes active + * where the sink pad does not, but the routes that + * are active on the source pad have to be active on + * the sink pad as well. + */ + + for (j = 0; j < src_routing.num_routes; j++) { + if (src_routes[j].source_pad == link->source->index && + src_routes[j].source_stream + == sink_routes[i].sink_stream) + break; + } + + if (j == src_routing.num_routes) { + dev_err(sink->entity.graph_obj.mdev->dev, + "no corresponding source found.\n"); + return -EINVAL; + } + + /* The source route must be active. */ + if (!(src_routes[j].flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "source route not active\n"); + return -EINVAL; + } + + dev_dbg(sink->entity.graph_obj.mdev->dev, + "validating link \"%s\": %u/%u => \"%s\" %u/%u\n", + link->source->entity->name, src_routes[j].source_pad, + src_routes[j].source_stream, sink->entity.name, + sink_routes[i].sink_pad, sink_routes[i].sink_stream); + + rval = v4l2_subdev_link_validate_one( + link, link->source, src_routes[j].source_stream, + link->sink, sink_routes[j].sink_stream); + if (rval) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "error %d in link validation\n", rval); + return rval; + } + } + + if (i < sink_routing.num_routes) { + dev_dbg(sink->entity.graph_obj.mdev->dev, + "not all sink routes verified; out of source routes\n"); + return -EINVAL; + } + + return 0; +} EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); struct v4l2_subdev_pad_config * diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 0b5c43f7e020..f412429cf5ba 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", data, size, dma->nr_pages); - err = get_user_pages(data & PAGE_MASK, dma->nr_pages, + err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages, flags, dma->pages, NULL); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; - dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages); + dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err, + dma->nr_pages); return err < 0 ? err : -EINVAL; } return 0; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index cb115ba6a1d2..2dbf632c10de 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -332,6 +332,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, struct vb2_buffer *vb; int ret; + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ + num_buffers = min_t(unsigned int, num_buffers, + VB2_MAX_FRAME - q->num_buffers); + for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); @@ -908,9 +912,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) dprintk(4, "done processing on buffer %d, state: %d\n", vb->index, state); - /* sync buffers */ - for (plane = 0; plane < vb->num_planes; ++plane) - call_void_memop(vb, finish, vb->planes[plane].mem_priv); + if (state != VB2_BUF_STATE_QUEUED && + state != VB2_BUF_STATE_REQUEUEING) { + /* sync buffers */ + for (plane = 0; plane < vb->num_planes; ++plane) + call_void_memop(vb, finish, vb->planes[plane].mem_priv); + } spin_lock_irqsave(&q->done_lock, flags); if (state == VB2_BUF_STATE_QUEUED || @@ -1685,6 +1692,15 @@ static void __vb2_queue_cancel(struct vb2_queue *q) for (i = 0; i < q->num_buffers; ++i) { struct vb2_buffer *vb = q->bufs[i]; + if (vb->state == VB2_BUF_STATE_PREPARED || + vb->state == VB2_BUF_STATE_QUEUED) { + unsigned int plane; + + for (plane = 0; plane < vb->num_planes; ++plane) + call_void_memop(vb, finish, + vb->planes[plane].mem_priv); + } + if (vb->state != VB2_BUF_STATE_DEQUEUED) { vb->state = VB2_BUF_STATE_PREPARED; call_void_vb_qop(vb, buf_finish, vb); diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index 0c0669976bdc..67f60eb57ede 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -178,6 +178,11 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, return -EINVAL; } + if (!q->allow_requests && b->request) { + dprintk(1, "%s: unsupported request ID\n", opname); + return -EINVAL; + } + return __verify_planes_array(q->bufs[b->index], b); } @@ -203,8 +208,8 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) b->timestamp = ns_to_timeval(vb->timestamp); b->timecode = vbuf->timecode; b->sequence = vbuf->sequence; - b->reserved2 = 0; - b->reserved = 0; + b->request = vbuf->request; + b->reserved = vbuf->reserved; if (q->is_multiplanar) { /* @@ -320,6 +325,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, } vb->timestamp = 0; vbuf->sequence = 0; + vbuf->request = b->request; if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { if (b->memory == VB2_MEMORY_USERPTR) { diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 3a7c80cd1a17..359fb9804d16 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -106,7 +106,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, if (nums[i-1] + 1 != nums[i]) goto fail_map; buf->vaddr = (__force void *) - ioremap_nocache(nums[0] << PAGE_SHIFT, size); + ioremap_nocache(__pfn_to_phys(nums[0]), size + offset); } else { buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, PAGE_KERNEL); diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index a4803ac192bb..1d49a8dd4a37 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -20,14 +20,6 @@ #include "mc.h" #define MC_INTSTATUS 0x000 -#define MC_INT_DECERR_MTS (1 << 16) -#define MC_INT_SECERR_SEC (1 << 13) -#define MC_INT_DECERR_VPR (1 << 12) -#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) -#define MC_INT_INVALID_SMMU_PAGE (1 << 10) -#define MC_INT_ARBITRATION_EMEM (1 << 9) -#define MC_INT_SECURITY_VIOLATION (1 << 8) -#define MC_INT_DECERR_EMEM (1 << 6) #define MC_INTMASK 0x004 @@ -248,12 +240,13 @@ static const char *const error_names[8] = { static irqreturn_t tegra_mc_irq(int irq, void *data) { struct tegra_mc *mc = data; - unsigned long status, mask; + unsigned long status; unsigned int bit; /* mask all interrupts to avoid flooding */ - status = mc_readl(mc, MC_INTSTATUS); - mask = mc_readl(mc, MC_INTMASK); + status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask; + if (!status) + return IRQ_NONE; for_each_set_bit(bit, &status, 32) { const char *error = status_names[bit] ?: "unknown"; @@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev) const struct of_device_id *match; struct resource *res; struct tegra_mc *mc; - u32 value; int err; match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); @@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev) WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); - value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | - MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | - MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM; - - mc_writel(mc, value, MC_INTMASK); + mc_writel(mc, mc->soc->intmask, MC_INTMASK); return 0; } diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h index ddb16676c3af..24e020b4609b 100644 --- a/drivers/memory/tegra/mc.h +++ b/drivers/memory/tegra/mc.h @@ -14,6 +14,15 @@ #include +#define MC_INT_DECERR_MTS (1 << 16) +#define MC_INT_SECERR_SEC (1 << 13) +#define MC_INT_DECERR_VPR (1 << 12) +#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) +#define MC_INT_INVALID_SMMU_PAGE (1 << 10) +#define MC_INT_ARBITRATION_EMEM (1 << 9) +#define MC_INT_SECURITY_VIOLATION (1 << 8) +#define MC_INT_DECERR_EMEM (1 << 6) + static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) { return readl(mc->regs + offset); diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c index ba8fff3d66a6..6d2a5a849d92 100644 --- a/drivers/memory/tegra/tegra114.c +++ b/drivers/memory/tegra/tegra114.c @@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = { .atom_size = 32, .client_id_mask = 0x7f, .smmu = &tegra114_smmu_soc, + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | + MC_INT_DECERR_EMEM, }; diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c index 5a58e440f4a7..9f68a56f2727 100644 --- a/drivers/memory/tegra/tegra124.c +++ b/drivers/memory/tegra/tegra124.c @@ -1020,6 +1020,9 @@ const struct tegra_mc_soc tegra124_mc_soc = { .smmu = &tegra124_smmu_soc, .emem_regs = tegra124_mc_emem_regs, .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, }; #endif /* CONFIG_ARCH_TEGRA_124_SOC */ @@ -1042,5 +1045,8 @@ const struct tegra_mc_soc tegra132_mc_soc = { .atom_size = 32, .client_id_mask = 0x7f, .smmu = &tegra132_smmu_soc, + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, }; #endif /* CONFIG_ARCH_TEGRA_132_SOC */ diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c index 5e144abe4c18..47c78a6d8f00 100644 --- a/drivers/memory/tegra/tegra210.c +++ b/drivers/memory/tegra/tegra210.c @@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = { .atom_size = 64, .client_id_mask = 0xff, .smmu = &tegra210_smmu_soc, + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, }; diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c index b44737840e70..d0689428ea1a 100644 --- a/drivers/memory/tegra/tegra30.c +++ b/drivers/memory/tegra/tegra30.c @@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = { .atom_size = 16, .client_id_mask = 0x7f, .smmu = &tegra30_smmu_soc, + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | + MC_INT_DECERR_EMEM, }; diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 84eab28665f3..7a93400eea2a 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -99,7 +99,7 @@ module_param(mpt_channel_mapping, int, 0); MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)"); static int mpt_debug_level; -static int mpt_set_debug_level(const char *val, struct kernel_param *kp); +static int mpt_set_debug_level(const char *val, const struct kernel_param *kp); module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int, &mpt_debug_level, 0600); MODULE_PARM_DESC(mpt_debug_level, @@ -242,7 +242,7 @@ pci_enable_io_access(struct pci_dev *pdev) pci_write_config_word(pdev, PCI_COMMAND, command_reg); } -static int mpt_set_debug_level(const char *val, struct kernel_param *kp) +static int mpt_set_debug_level(const char *val, const struct kernel_param *kp) { int ret = param_set_int(val, kp); MPT_ADAPTER *ioc; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 7b3b41368931..cf6ce9f600ca 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2698,6 +2698,8 @@ mptctl_hp_targetinfo(unsigned long arg) __FILE__, __LINE__, iocnum); return -ENODEV; } + if (karg.hdr.id >= MPT_MAX_FC_DEVICES) + return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", ioc->name)); diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 345f6035599e..f1d93676b0fc 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1995,6 +1995,7 @@ static struct scsi_host_template mptsas_driver_template = { .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mptscsih_host_attrs, + .no_write_same = 1, }; static int mptsas_get_linkerrors(struct sas_phy *phy) diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c index 5fe12961cfe5..e77d3c1a5310 100644 --- a/drivers/mfd/arizona-i2c.c +++ b/drivers/mfd/arizona-i2c.c @@ -23,6 +23,144 @@ #include "arizona.h" +/************************************************************/ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD1", "0-001a"), + REGULATOR_SUPPLY("LDOVDD", "0-001a"), + REGULATOR_SUPPLY("CPVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD2", "0-001a"), + REGULATOR_SUPPLY("DBVDD3", "0-001a"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "0-001a"), + REGULATOR_SUPPLY("SPKVDDR", "0-001a"), +}; +#else +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD1", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("LDOVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("CPVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD2", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD3", "i2c-INT34C1:00"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("SPKVDDR", "i2c-INT34C1:00"), +}; +#endif + +static struct regulator_init_data vflorida1_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida1_consumer), + .consumer_supplies = vflorida1_consumer, +}; + +static struct fixed_voltage_config vflorida1_config = { + .supply_name = "DC_1V8", + .microvolts = 1800000, + .gpio = -EINVAL, + .init_data = &vflorida1_data, +}; + +static struct platform_device vflorida1_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida1_config, + }, +}; + +static struct regulator_init_data vflorida2_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida2_consumer), + .consumer_supplies = vflorida2_consumer, +}; + +static struct fixed_voltage_config vflorida2_config = { + .supply_name = "DC_5V", + .microvolts = 3700000, + .gpio = -EINVAL, + .init_data = &vflorida2_data, +}; + +static struct platform_device vflorida2_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida2_config, + }, +}; + +/***********WM8280 Codec Driver platform data*************/ +static const struct arizona_micd_range micd_ctp_ranges[] = { + { .max = 11, .key = BTN_0 }, + { .max = 28, .key = BTN_1 }, + { .max = 54, .key = BTN_2 }, + { .max = 100, .key = BTN_3 }, + { .max = 186, .key = BTN_4 }, + { .max = 430, .key = BTN_5 }, +}; + +static struct arizona_micd_config micd_modes[] = { + /*{Acc Det on Micdet1, Use Micbias2 for detection, + * Set GPIO to 1 to selecte this polarity}*/ + { 0, 2, 1 }, +}; + +static struct arizona_pdata __maybe_unused florida_pdata = { + .reset = 0, /*No Reset GPIO from AP, use SW reset*/ + .ldo1 = { + .ldoena = 0, /*TODO: Add actual GPIO for LDOEN, use SW Control for now*/ + }, + .irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT, + .clk32k_src = ARIZONA_32KZ_MCLK2, /*Onboard OSC provides 32K on MCLK2*/ + /* + * IN1 uses both MICBIAS1 and MICBIAS2 based on jack polarity, + * the below values in dmic_ref only has meaning for DMIC's and not + * AMIC's + */ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, ARIZONA_DMIC_MICBIAS3, 0, 0}, + .inmode = {ARIZONA_INMODE_DIFF, ARIZONA_INMODE_DMIC, 0, 0}, +#else + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, 0, ARIZONA_DMIC_MICVDD, 0}, + .inmode = {ARIZONA_INMODE_SE, 0, ARIZONA_INMODE_DMIC, 0}, +#endif + .gpio_base = 0, /* Base allocated by gpio core */ + .micd_pol_gpio = 2, /* GPIO3 (offset 2 from gpio_base) of the codec */ + .micd_configs = micd_modes, + .num_micd_configs = ARRAY_SIZE(micd_modes), + .micd_force_micbias = true, +}; + +/************************************************************/ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct i2c_board_info arizona_i2c_device = { + I2C_BOARD_INFO("wm8280", 0x1A), + .platform_data = &florida_pdata, +}; +#endif + static int arizona_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -31,10 +169,16 @@ static int arizona_i2c_probe(struct i2c_client *i2c, unsigned long type; int ret; + pr_debug("%s:%d\n", __func__, __LINE__); if (i2c->dev.of_node) type = arizona_of_get_type(&i2c->dev); +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + else + type = WM8280; +#else else type = id->driver_data; +#endif switch (type) { case WM5102: @@ -105,6 +249,13 @@ static const struct i2c_device_id arizona_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, arizona_i2c_id); +#ifndef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct acpi_device_id __maybe_unused arizona_acpi_match[] = { + { "INT34C1", WM8280 }, + { } +}; +#endif + static struct i2c_driver arizona_i2c_driver = { .driver = { .name = "arizona", @@ -116,7 +267,53 @@ static struct i2c_driver arizona_i2c_driver = { .id_table = arizona_i2c_id, }; -module_i2c_driver(arizona_i2c_driver); +static int __init arizona_modinit(void) +{ + int ret = 0; +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + struct i2c_adapter *adapter; + struct i2c_client *client; +#endif + + pr_debug("%s Entry\n", __func__); + /***********WM8280 Register Regulator*************/ + platform_device_register(&vflorida1_device); + platform_device_register(&vflorida2_device); + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + adapter = i2c_get_adapter(0); + pr_debug("%s:%d\n", __func__, __LINE__); + if (adapter) { + client = i2c_new_device(adapter, &arizona_i2c_device); + pr_debug("%s:%d\n", __func__, __LINE__); + if (!client) { + pr_err("can't create i2c device %s\n", + arizona_i2c_device.type); + i2c_put_adapter(adapter); + pr_debug("%s:%d\n", __func__, __LINE__); + return -ENODEV; + } + } else { + pr_err("adapter is NULL\n"); + return -ENODEV; + } +#endif + pr_debug("%s:%d\n", __func__, __LINE__); + ret = i2c_add_driver(&arizona_i2c_driver); + + pr_debug("%s Exit\n", __func__); + + return ret; +} + +module_init(arizona_modinit); + +static void __exit arizona_modexit(void) +{ + i2c_del_driver(&arizona_i2c_driver); +} + +module_exit(arizona_modexit); MODULE_DESCRIPTION("Arizona I2C bus interface"); MODULE_AUTHOR("Mark Brown "); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index 09cf3699e354..32d4154fb87b 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -375,7 +375,8 @@ int arizona_irq_init(struct arizona *arizona) ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); - if (ret != 0) { + /* FPGA board doesn't have irq line */ + if (ret != 0 && !IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA)) { dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c index b0ca5a4c841e..c5528ae982f2 100644 --- a/drivers/mfd/cros_ec.c +++ b/drivers/mfd/cros_ec.c @@ -112,7 +112,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev) mutex_init(&ec_dev->lock); - cros_ec_query_all(ec_dev); + err = cros_ec_query_all(ec_dev); + if (err) { + dev_err(dev, "Cannot identify the EC: error %d\n", err); + return err; + } if (ec_dev->irq) { err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread, diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index c9714072e224..a14196e95e9b 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c @@ -667,6 +667,7 @@ static int cros_ec_spi_probe(struct spi_device *spi) sizeof(struct ec_response_get_protocol_info); ec_dev->dout_size = sizeof(struct ec_host_request); + ec_spi->last_transfer_ns = ktime_get_ns(); err = cros_ec_register(ec_dev); if (err) { diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c index b3767c3141e5..461b0990b56f 100644 --- a/drivers/mfd/fsl-imx25-tsadc.c +++ b/drivers/mfd/fsl-imx25-tsadc.c @@ -180,6 +180,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev) return devm_of_platform_populate(dev); } +static int mx25_tsadc_remove(struct platform_device *pdev) +{ + struct mx25_tsadc *tsadc = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (irq) { + irq_set_chained_handler_and_data(irq, NULL, NULL); + irq_domain_remove(tsadc->domain); + } + + return 0; +} + static const struct of_device_id mx25_tsadc_ids[] = { { .compatible = "fsl,imx25-tsadc" }, { /* Sentinel */ } @@ -192,6 +205,7 @@ static struct platform_driver mx25_tsadc_driver = { .of_match_table = of_match_ptr(mx25_tsadc_ids), }, .probe = mx25_tsadc_probe, + .remove = mx25_tsadc_remove, }; module_platform_driver(mx25_tsadc_driver); diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index d1c46de89eb4..253f359feb1b 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -124,6 +124,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = { .properties = apl_i2c_properties, }; +static const struct intel_lpss_platform_info cnl_i2c_info = { + .clk_rate = 216000000, + .properties = spt_i2c_properties, +}; + static const struct pci_device_id intel_lpss_pci_ids[] = { /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, @@ -134,13 +139,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x0ab6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x0ab8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x0aba), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x0abc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x0abe), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x0ac0), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x0ac2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info }, /* BXT B-Step */ { PCI_VDEVICE(INTEL, 0x1aac), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x1aae), (kernel_ulong_t)&bxt_i2c_info }, @@ -150,13 +151,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ab6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x1ab8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x1aba), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info }, @@ -166,10 +163,6 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x31ee), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x31c2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x31c4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x31c6), (kernel_ulong_t)&bxt_info }, @@ -182,16 +175,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x5ab6), (kernel_ulong_t)&apl_i2c_info }, { PCI_VDEVICE(INTEL, 0x5ab8), (kernel_ulong_t)&apl_i2c_info }, { PCI_VDEVICE(INTEL, 0x5aba), (kernel_ulong_t)&apl_i2c_info }, - { PCI_VDEVICE(INTEL, 0x5abc), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x5abe), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x5ac0), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x5ac2), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info }, /* SPT-LP */ - { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_i2c_info }, @@ -200,50 +187,39 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info }, /* CNL-LP */ - { PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info }, { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info }, /* SPT-H */ - { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa162), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, /* KBL-H */ - { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, /* CNL-H */ - { PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); @@ -258,7 +234,74 @@ static struct pci_driver intel_lpss_pci_driver = { }, }; -module_pci_driver(intel_lpss_pci_driver); +static int __init intel_lpss_pci_driver_init(void) +{ + return pci_register_driver(&intel_lpss_pci_driver); +} +fs_initcall_sync(intel_lpss_pci_driver_init); + +static void __exit intel_lpss_pci_driver_exit(void) +{ + pci_unregister_driver(&intel_lpss_pci_driver); +} +module_exit(intel_lpss_pci_driver_exit); + + +static const struct pci_device_id intel_lpss_pci_uart_ids[] = { + /* BXT A-Step */ + { PCI_VDEVICE(INTEL, 0x0abc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x0abe), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x0ac0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info }, + /* BXT B-Step */ + { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* GLK */ + { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x31ee), (kernel_ulong_t)&bxt_uart_info }, + /* APL */ + { PCI_VDEVICE(INTEL, 0x5abc), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x5abe), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x5ac0), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info }, + /* SPT-LP */ + { PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info }, + /* CNL-LP */ + { PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info }, + /* SPT-H */ + { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, + /* KBL-H */ + { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, + /* CNL-H */ + { PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info }, + { } +}; +MODULE_DEVICE_TABLE(pci, intel_lpss_pci_uart_ids); + +static struct pci_driver intel_lpss_pci_uart_driver = { + .name = "intel-lpss-uart", + .id_table = intel_lpss_pci_uart_ids, + .probe = intel_lpss_pci_probe, + .remove = intel_lpss_pci_remove, + .driver = { + .pm = &intel_lpss_pci_pm_ops, + }, +}; +module_pci_driver(intel_lpss_pci_uart_driver); MODULE_AUTHOR("Andy Shevchenko "); MODULE_AUTHOR("Mika Westerberg "); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 0e0ab9bb1530..40e8d9b59d07 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) intel_lpss_deassert_reset(lpss); + intel_lpss_set_remap_addr(lpss); + if (!intel_lpss_has_idma(lpss)) return; - intel_lpss_set_remap_addr(lpss); - /* Make sure that SPI multiblock DMA transfers are re-enabled */ if (lpss->type == LPSS_DEV_SPI) writel(value, lpss->priv + LPSS_PRIV_SSP_REG); diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 450ae36645aa..cf1120abbf52 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = { .name = "Avoton SoC", .iTCO_version = 3, .gpio_version = AVOTON_GPIO, + .spi_type = INTEL_SPI_BYT, }, [LPC_BAYTRAIL] = { .name = "Bay Trail SoC", diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c index 630bd19b2c0a..98e732a7ae96 100644 --- a/drivers/mfd/mxs-lradc.c +++ b/drivers/mfd/mxs-lradc.c @@ -196,8 +196,10 @@ static int mxs_lradc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, lradc); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENOMEM; + if (!res) { + ret = -ENOMEM; + goto err_clk; + } switch (lradc->soc) { case IMX23_LRADC: diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c index da16bf45fab4..dc94ffc6321a 100644 --- a/drivers/mfd/twl4030-audio.c +++ b/drivers/mfd/twl4030-audio.c @@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void) EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk); static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata, - struct device_node *node) + struct device_node *parent) { + struct device_node *node; + if (pdata && pdata->codec) return true; - if (of_find_node_by_name(node, "codec")) + node = of_get_child_by_name(parent, "codec"); + if (node) { + of_node_put(node); return true; + } return false; } diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index d66502d36ba0..dd19f17a1b63 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = { }; -static bool twl6040_has_vibra(struct device_node *node) +static bool twl6040_has_vibra(struct device_node *parent) { -#ifdef CONFIG_OF - if (of_find_node_by_name(node, "vibra")) + struct device_node *node; + + node = of_get_child_by_name(parent, "vibra"); + if (node) { + of_node_put(node); return true; -#endif + } + return false; } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 8136dc7e863d..ce2449ad1688 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -506,6 +506,27 @@ config PCI_ENDPOINT_TEST Enable this configuration option to enable the host side test driver for PCI Endpoint. +config UID_SYS_STATS + bool "Per-UID statistics" + depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING + help + Per UID based cpu time statistics exported to /proc/uid_cputime + Per UID based io statistics exported to /proc/uid_io + Per UID based procstat control in /proc/uid_procstat + +config UID_SYS_STATS_DEBUG + bool "Per-TASK statistics" + depends on UID_SYS_STATS + default n + help + Per TASK based io statistics exported to /proc/uid_io + +config MEMORY_STATE_TIME + tristate "Memory freq/bandwidth time statistics" + depends on PROFILING + help + Memory time statistics exported to /sys/kernel/memory_state_time + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index ad0e64fdba34..a4ccc7d3f914 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -57,6 +57,9 @@ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o +obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o +obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o + lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o @@ -66,6 +69,7 @@ lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o KCOV_INSTRUMENT_lkdtm_rodata.o := n +CFLAGS_lkdtm_rodata.o += $(DISABLE_LTO) OBJCOPYFLAGS := OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index 1922cb8f6b88..1c5b7aec13d4 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name, return ERR_PTR(-EINVAL); c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); - kmemcheck_annotate_bitfield(c2dev, flags); if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index a0c44d16bf30..c75daba57fd7 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -102,15 +102,15 @@ static struct file *cxl_getfile(const char *name, d_instantiate(path.dentry, inode); file = alloc_file(&path, OPEN_FMODE(flags), fops); - if (IS_ERR(file)) - goto err_dput; + if (IS_ERR(file)) { + path_put(&path); + goto err_fs; + } file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); file->private_data = priv; return file; -err_dput: - path_put(&path); err_inode: iput(inode); err_fs: diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index b1afeccbb97f..c96dcda1111f 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -365,6 +365,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ +/****** CXL_PSL_DEBUG *****************************************************/ +#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */ + /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ @@ -659,6 +662,7 @@ struct cxl_native { irq_hw_number_t err_hwirq; unsigned int err_virq; u64 ps_off; + bool no_data_cache; /* set if no data cache on the card */ const struct cxl_service_layer_ops *sl_ops; }; diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index dc9bc1807fdf..562a6803d690 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c @@ -207,49 +207,74 @@ int cxllib_get_PE_attributes(struct task_struct *task, } EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes); -int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) +static int get_vma_info(struct mm_struct *mm, u64 addr, + u64 *vma_start, u64 *vma_end, + unsigned long *page_size) { - int rc; - u64 dar; struct vm_area_struct *vma = NULL; - unsigned long page_size; - - if (mm == NULL) - return -EFAULT; + int rc = 0; down_read(&mm->mmap_sem); vma = find_vma(mm, addr); if (!vma) { - pr_err("Can't find vma for addr %016llx\n", addr); rc = -EFAULT; goto out; } - /* get the size of the pages allocated */ - page_size = vma_kernel_pagesize(vma); - - for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) { - if (dar < vma->vm_start || dar >= vma->vm_end) { - vma = find_vma(mm, addr); - if (!vma) { - pr_err("Can't find vma for addr %016llx\n", addr); - rc = -EFAULT; - goto out; - } - /* get the size of the pages allocated */ - page_size = vma_kernel_pagesize(vma); + *page_size = vma_kernel_pagesize(vma); + *vma_start = vma->vm_start; + *vma_end = vma->vm_end; +out: + up_read(&mm->mmap_sem); + return rc; +} + +int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) +{ + int rc; + u64 dar, vma_start, vma_end; + unsigned long page_size; + + if (mm == NULL) + return -EFAULT; + + /* + * The buffer we have to process can extend over several pages + * and may also cover several VMAs. + * We iterate over all the pages. The page size could vary + * between VMAs. + */ + rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size); + if (rc) + return rc; + + for (dar = (addr & ~(page_size - 1)); dar < (addr + size); + dar += page_size) { + if (dar < vma_start || dar >= vma_end) { + /* + * We don't hold the mm->mmap_sem semaphore + * while iterating, since the semaphore is + * required by one of the lower-level page + * fault processing functions and it could + * create a deadlock. + * + * It means the VMAs can be altered between 2 + * loop iterations and we could theoretically + * miss a page (however unlikely). But that's + * not really a problem, as the driver will + * retry access, get another page fault on the + * missing page and call us again. + */ + rc = get_vma_info(mm, dar, &vma_start, &vma_end, + &page_size); + if (rc) + return rc; } rc = cxl_handle_mm_fault(mm, flags, dar); - if (rc) { - pr_err("cxl_handle_mm_fault failed %d", rc); - rc = -EFAULT; - goto out; - } + if (rc) + return -EFAULT; } - rc = 0; -out: - up_read(&mm->mmap_sem); - return rc; + return 0; } EXPORT_SYMBOL_GPL(cxllib_handle_fault); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 4a82c313cf71..9c042b0b8c55 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -352,8 +352,17 @@ int cxl_data_cache_flush(struct cxl *adapter) u64 reg; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - pr_devel("Flushing data cache\n"); + /* + * Do a datacache flush only if datacache is available. + * In case of PSL9D datacache absent hence flush operation. + * would timeout. + */ + if (adapter->native->no_data_cache) { + pr_devel("No PSL data cache. Ignoring cache flush req.\n"); + return 0; + } + pr_devel("Flushing data cache\n"); reg = cxl_p1_read(adapter, CXL_PSL_Control); reg |= CXL_PSL_Control_Fr; cxl_p1_write(adapter, CXL_PSL_Control, reg); diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 3ba04f371380..2b3fd0a51701 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -457,6 +457,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, u64 chipid; u32 phb_index; u64 capp_unit_id; + u64 psl_debug; int rc; rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); @@ -507,6 +508,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, if (cxl_is_power9_dd1()) cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); + /* + * Check if PSL has data-cache. We need to flush adapter datacache + * when as its about to be removed. + */ + psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG); + if (psl_debug & CXL_PSL_DEBUG_CDC) { + dev_dbg(&dev->dev, "No data-cache present\n"); + adapter->native->no_data_cache = true; + } + return 0; } @@ -1450,10 +1461,8 @@ int cxl_pci_reset(struct cxl *adapter) /* * The adapter is about to be reset, so ignore errors. - * Not supported on P9 DD1 */ - if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) - cxl_data_cache_flush(adapter); + cxl_data_cache_flush(adapter); /* pcie_warm_reset requests a fundamental pci reset which includes a * PERST assert/deassert. PERST triggers a loading of the image @@ -1898,10 +1907,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) /* * Flush adapter datacache as its about to be removed. - * Not supported on P9 DD1. */ - if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) - cxl_data_cache_flush(adapter); + cxl_data_cache_flush(adapter); cxl_deconfigure_adapter(adapter); @@ -2043,6 +2050,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, /* There should only be one entry, but go through the list * anyway */ + if (afu->phb == NULL) + return result; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { if (!afu_dev->driver) continue; @@ -2084,8 +2094,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, * Tell the AFU drivers; but we don't care what they * say, we're going away. */ - if (afu->phb != NULL) - cxl_vphb_error_detected(afu, state); + cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; } @@ -2225,6 +2234,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) if (cxl_afu_select_best_mode(afu)) goto err; + if (afu->phb == NULL) + continue; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { /* Reset the device context. * TODO: make this less disruptive @@ -2287,6 +2299,9 @@ static void cxl_pci_resume(struct pci_dev *pdev) for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + if (afu->phb == NULL) + continue; + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { if (afu_dev->driver && afu_dev->driver->err_handler && afu_dev->driver->err_handler->resume) diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index a8b6d6a635e9..393a80bdb846 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -331,12 +331,20 @@ static ssize_t prefault_mode_store(struct device *device, struct cxl_afu *afu = to_cxl_afu(device); enum prefault_modes mode = -1; - if (!strncmp(buf, "work_element_descriptor", 23)) - mode = CXL_PREFAULT_WED; - if (!strncmp(buf, "all", 3)) - mode = CXL_PREFAULT_ALL; if (!strncmp(buf, "none", 4)) mode = CXL_PREFAULT_NONE; + else { + if (!radix_enabled()) { + + /* only allowed when not in radix mode */ + if (!strncmp(buf, "work_element_descriptor", 23)) + mode = CXL_PREFAULT_WED; + if (!strncmp(buf, "all", 3)) + mode = CXL_PREFAULT_ALL; + } else { + dev_err(device, "Cannot prefault with radix enabled\n"); + } + } if (mode == -1) return -EINVAL; diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 764ff5df0dbc..4cc0b42f2acc 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf, memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].buf = addrbuf; - addrbuf[0] = 0x90 + offset; + /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */ + addrbuf[0] = 0xa0 - at24->chip.byte_len + offset; msg[0].len = 1; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; @@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return count; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) if (unlikely(!count)) return -EINVAL; + if (off + count > at24->chip.byte_len) + return -EINVAL; + /* * Write data to chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -631,6 +638,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_warn(&client->dev, "page_size looks suspicious (no power of 2)!\n"); + /* + * REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while + * the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4. + * + * Eventually we'll get rid of the magic values altoghether in favor of + * real structs, but for now just manually set the right size. + */ + if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4) + chip.byte_len = 6; + /* Use I2C operations unless we're stuck with SMBus extensions. */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { if (chip.flags & AT24_FLAG_ADDR16) @@ -759,7 +776,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->nvmem_config.reg_read = at24_read; at24->nvmem_config.reg_write = at24_write; at24->nvmem_config.priv = at24; - at24->nvmem_config.stride = 4; + at24->nvmem_config.stride = 1; at24->nvmem_config.word_size = 1; at24->nvmem_config.size = chip.byte_len; diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index e05c3245930a..fa840666bdd1 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file) static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { void __iomem *address = (void __iomem *)file->private_data; - unsigned char *page; - int retval; int len = 0; unsigned int value; - - if (*offset < 0) - return -EINVAL; - if (count == 0 || count > 1024) - return 0; - if (*offset != 0) - return 0; - - page = (unsigned char *)__get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; + char lbuf[20]; value = readl(address); - len = sprintf(page, "%d\n", value); - - if (copy_to_user(buf, page, len)) { - retval = -EFAULT; - goto exit; - } - *offset += len; - retval = len; + len = snprintf(lbuf, sizeof(lbuf), "%d\n", value); -exit: - free_page((unsigned long)page); - return retval; + return simple_read_from_buffer(buf, count, offset, lbuf, len); } static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index fc7efedbc4be..24108bfad889 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -1132,7 +1132,8 @@ static void kgdbts_put_char(u8 chr) ts.run_test(0, chr); } -static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp) +static int param_set_kgdbts_var(const char *kmessage, + const struct kernel_param *kp) { int len = strlen(kmessage); diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c49e1d2269af..329f14388454 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -43,3 +43,16 @@ config INTEL_MEI_TXE Supported SoCs: Intel Bay Trail + +config INTEL_MEI_VIRTIO + tristate "Intel MEI interface emulation with virtio framework" + select INTEL_MEI + depends on X86 && PCI && VIRTIO_PCI + help + This module implements mei hw emulation over virtio transport. + The module will be called mei_virtio. + Enable this if your virtual machine supports virtual mei + device over virtio. + +source "drivers/misc/mei/dal/Kconfig" +source "drivers/misc/mei/spd/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index cd6825afa8e1..eed0af3087c0 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -9,6 +9,7 @@ mei-objs += hbm.o mei-objs += interrupt.o mei-objs += client.o mei-objs += main.o +mei-objs += dma-ring.o mei-objs += bus.o mei-objs += bus-fixup.o mei-$(CONFIG_DEBUG_FS) += debugfs.o @@ -21,5 +22,11 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o mei-txe-objs := pci-txe.o mei-txe-objs += hw-txe.o +obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o +mei-virtio-objs := hw-virtio.o + mei-$(CONFIG_EVENT_TRACING) += mei-trace.o CFLAGS_mei-trace.o = -I$(src) + +obj-$(CONFIG_INTEL_MEI_DAL) += dal/ +obj-$(CONFIG_INTEL_MEI_SPD) += spd/ diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index be64969d986a..81a09e8cd28d 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -460,7 +460,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(length, GFP_KERNEL); + cb->buf.data = kmalloc(roundup(length, MEI_DMA_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; @@ -1513,6 +1513,22 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) return rets; } +static inline void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, + struct mei_cl_cb *cb) +{ + mei_hdr->host_addr = mei_cl_host_addr(cb->cl); + mei_hdr->me_addr = mei_cl_me_id(cb->cl); + mei_hdr->reserved = 0; + mei_hdr->msg_complete = 0; + mei_hdr->dma_ring = 0; + mei_hdr->internal = cb->internal; +} + +struct mei_msg_hdr_ext { + struct mei_msg_hdr hdr; + u32 dma_len; +}; + /** * mei_cl_irq_write - write a message to device * from the interrupt thread context @@ -1528,12 +1544,15 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, { struct mei_device *dev; struct mei_msg_data *buf; - struct mei_msg_hdr mei_hdr; + struct mei_msg_hdr_ext ext_hdr; + struct mei_msg_hdr *mei_hdr = &ext_hdr.hdr; size_t len; u32 msg_slots; - int slots; + u32 dr_slots; + int hbuf_slots; int rets; bool first_chunk; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1553,40 +1572,48 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, return 0; } - slots = mei_hbuf_empty_slots(dev); + mei_msg_hdr_init(mei_hdr, cb); + + hbuf_slots = mei_hbuf_empty_slots(dev); + dr_slots = mei_dma_ring_empty_slots(dev); len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; msg_slots = mei_data2slots(len); - mei_hdr.host_addr = mei_cl_host_addr(cl); - mei_hdr.me_addr = mei_cl_me_id(cl); - mei_hdr.reserved = 0; - mei_hdr.internal = cb->internal; - - if (slots >= msg_slots) { - mei_hdr.length = len; - mei_hdr.msg_complete = 1; - /* Split the message only if we can write the whole host buffer */ - } else if (slots == dev->hbuf_depth) { - msg_slots = slots; - len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); - mei_hdr.length = len; - mei_hdr.msg_complete = 0; + if (hbuf_slots >= msg_slots) { + mei_hdr->length = len; + mei_hdr->msg_complete = 1; + } else if (dev->hbm_f_dr_supported && hbuf_slots > sizeof(ext_hdr) && + dr_slots) { + if (msg_slots < dr_slots) + mei_hdr->msg_complete = 1; + else + len = mei_slots2data(dr_slots); + + mei_hdr->dma_ring = 1; + mei_hdr->length = sizeof(ext_hdr.dma_len); + ext_hdr.dma_len = len; + data = &ext_hdr.dma_len; + + } else if (hbuf_slots == dev->hbuf_depth) { + len = mei_hbuf_max_len(dev); + mei_hdr->length = len; } else { /* wait for next time the host buffer is empty */ return 0; } - cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", - cb->buf.size, cb->buf_idx); + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); - rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); + rets = mei_write_message(dev, mei_hdr, data); if (rets) goto err; cl->status = 0; cl->writing_state = MEI_WRITING; - cb->buf_idx += mei_hdr.length; - cb->completed = mei_hdr.msg_complete == 1; + cb->buf_idx += len; + cb->completed = mei_hdr->msg_complete == 1; if (first_chunk) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { @@ -1595,7 +1622,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } } - if (mei_hdr.msg_complete) + if (mei_hdr->msg_complete) list_move_tail(&cb->list, &dev->write_waiting_list); return 0; @@ -1619,10 +1646,13 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) { struct mei_device *dev; struct mei_msg_data *buf; - struct mei_msg_hdr mei_hdr; - int size; + struct mei_msg_hdr_ext ext_hdr; + struct mei_msg_hdr *mei_hdr = &ext_hdr.hdr; int rets; bool blocking; + size_t len; + u32 hbuf_slots, dr_slots, msg_slots; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1633,10 +1663,11 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) dev = cl->dev; buf = &cb->buf; - size = buf->size; + len = buf->size; + data = buf->data; blocking = cb->blocking; - cl_dbg(dev, cl, "size=%d\n", size); + cl_dbg(dev, cl, "size = %zu\n", len); rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { @@ -1648,11 +1679,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) cb->buf_idx = 0; cl->writing_state = MEI_IDLE; - mei_hdr.host_addr = mei_cl_host_addr(cl); - mei_hdr.me_addr = mei_cl_me_id(cl); - mei_hdr.reserved = 0; - mei_hdr.msg_complete = 0; - mei_hdr.internal = cb->internal; + mei_msg_hdr_init(mei_hdr, cb); rets = mei_cl_tx_flow_ctrl_creds(cl); if (rets < 0) @@ -1660,25 +1687,41 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) if (rets == 0) { cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); - rets = size; goto out; } + if (!mei_hbuf_acquire(dev)) { cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); - rets = size; goto out; } - /* Check for a maximum length */ - if (size > mei_hbuf_max_len(dev)) { - mei_hdr.length = mei_hbuf_max_len(dev); - mei_hdr.msg_complete = 0; + hbuf_slots = mei_hbuf_empty_slots(dev); + dr_slots = mei_dma_ring_empty_slots(dev); + msg_slots = mei_data2slots(len); + + if (hbuf_slots >= msg_slots) { + mei_hdr->length = len; + mei_hdr->msg_complete = 1; + } else if (dev->hbm_f_dr_supported && hbuf_slots > sizeof(ext_hdr) && + dr_slots) { + if (msg_slots < dr_slots) + mei_hdr->msg_complete = 1; + else + len = mei_slots2data(dr_slots); + + mei_hdr->dma_ring = 1; + mei_hdr->length = sizeof(ext_hdr.dma_len); + ext_hdr.dma_len = len; + data = &ext_hdr.dma_len; } else { - mei_hdr.length = size; - mei_hdr.msg_complete = 1; + len = mei_hbuf_max_len(dev); + mei_hdr->length = len; } - rets = mei_write_message(dev, &mei_hdr, buf->data); + if (mei_hdr->dma_ring) + mei_dma_ring_write(dev, buf->data, len); + + rets = mei_write_message(dev, mei_hdr, data); if (rets) goto err; @@ -1687,11 +1730,13 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) goto err; cl->writing_state = MEI_WRITING; - cb->buf_idx = mei_hdr.length; - cb->completed = mei_hdr.msg_complete == 1; + cb->buf_idx = len; + cb->completed = mei_hdr->msg_complete == 1; + /* reset len to the original size for a function return value */ + len = buf->size; out: - if (mei_hdr.msg_complete) + if (mei_hdr->msg_complete) list_add_tail(&cb->list, &dev->write_waiting_list); else list_add_tail(&cb->list, &dev->write_list); @@ -1716,7 +1761,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) } } - rets = size; + rets = len; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); diff --git a/drivers/misc/mei/dal/Kconfig b/drivers/misc/mei/dal/Kconfig new file mode 100644 index 000000000000..8943d4f7f7e4 --- /dev/null +++ b/drivers/misc/mei/dal/Kconfig @@ -0,0 +1,15 @@ +config INTEL_MEI_DAL + tristate "Dynamic Application Loader for ME" + depends on INTEL_MEI + help + Dynamic Application Loader enables downloading java applets + to DAL FW and run it in a secure environment. + The DAL module exposes both user space api and kernel space api. + +config INTEL_MEI_DAL_TEST + tristate "Test Module for Dynamic Application Loader for ME" + depends on INTEL_MEI_DAL + help + Testing Module for Dynamic Application Loader, to test the + kernel space api from a user space client. The test module + calls the kernel space api functions of DAL module. diff --git a/drivers/misc/mei/dal/Makefile b/drivers/misc/mei/dal/Makefile new file mode 100644 index 000000000000..636fb6027e2c --- /dev/null +++ b/drivers/misc/mei/dal/Makefile @@ -0,0 +1,12 @@ +ccflags-y += -D__CHECK_ENDIAN__ + +obj-$(CONFIG_INTEL_MEI_DAL) += mei_dal.o +mei_dal-objs := dal_class.o +mei_dal-objs += acp_parser.o +mei_dal-objs += bh_external.o +mei_dal-objs += bh_internal.o +mei_dal-objs += dal_cdev.o +mei_dal-objs += dal_kdi.o +mei_dal-objs += dal_ta_access.o + +obj-$(CONFIG_INTEL_MEI_DAL_TEST) += dal_test.o diff --git a/drivers/misc/mei/dal/acp_format.h b/drivers/misc/mei/dal/acp_format.h new file mode 100644 index 000000000000..6f9c1f5b7647 --- /dev/null +++ b/drivers/misc/mei/dal/acp_format.h @@ -0,0 +1,253 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef _ACP_FORMAT_H +#define _ACP_FORMAT_H + +#include + +#define AC_MAX_INS_REASONS_LENGTH 1024 +#define AC_MAX_USED_SERVICES 20 +#define AC_MAX_PROPS_LENGTH 2048 +#define AC_MAX_PACK_HASH_LEN 32 + +/** + * enum ac_cmd_id - acp file command (acp type) + * + * @AC_CMD_INVALID: invalid command + * @AC_INSTALL_SD: install new sub security domain + * @AC_UNINSTALL_SD: uninstall sub security domain + * @AC_INSTALL_JTA: install java ta + * @AC_UNINSTALL_JTA: uninstall java ta + * @AC_INSTALL_NTA: install native ta (currently NOT SUPPORTED) + * @AC_UNINSTALL_NTA: uninstall native ta (currently NOT SUPPORTED) + * @AC_UPDATE_SVL: update the security version list + * @AC_INSTALL_JTA_PROP: ta properties for installation + * @AC_CMD_NUM: number of acp commands + */ +enum ac_cmd_id { + AC_CMD_INVALID, + AC_INSTALL_SD, + AC_UNINSTALL_SD, + AC_INSTALL_JTA, + AC_UNINSTALL_JTA, + AC_INSTALL_NTA, + AC_UNINSTALL_NTA, + AC_UPDATE_SVL, + AC_INSTALL_JTA_PROP, + AC_CMD_NUM +}; + +/** + * struct ac_pack_hash - ta pack hash + * + * @data: ta hash + */ +struct ac_pack_hash { + u8 data[AC_MAX_PACK_HASH_LEN]; +} __packed; + +/** + * struct ac_pack_header - admin comman pack header + * + * @magic: magic string which represents an ACP + * @version: package format version + * @byte_order: byte order of package (0 big endian, 1 little endian) + * @reserved: reserved bytes + * @size: total package size + * @cmd_id: acp command (acp file type) + * @svn: security version number + * + * @idx_num: the number of the indexed sections + * @idx_condition: condition section offset + * @idx_data: data section offset + */ +struct ac_pack_header { + /*ACP Header*/ + u8 magic[4]; + u8 version; + u8 byte_order; + u16 reserved; + u32 size; + u32 cmd_id; + u32 svn; + + /* Index Section */ + u32 idx_num; + u32 idx_condition; + u32 idx_data; +} __packed; + +/** + * struct ac_ta_id_list - A list of ta ids which the ta + * is allowed to communicate with. + * + * @num: ta ids count + * @list: ta ids list + */ +struct ac_ta_id_list { + u32 num; + uuid_t list[0]; +} __packed; + +/** + * struct ac_prop_list - TLV list of acp properties + * + * @num: number of properties + * @len: size of all properties + * @data: acp properties. TLV format is "type\0key\0value\0" + * (e.g. string\0name\0Tom\0int\0Age\013\0) + */ +struct ac_prop_list { + u32 num; + u32 len; + s8 data[0]; +} __packed; + +/** + * struct ac_ins_reasons - list of event codes that can be + * received or posted by ta + * + * @len: event codes count + * @data: event codes list + */ +struct ac_ins_reasons { + u32 len; + u32 data[0]; +} __packed; + +/** + * struct ac_pack - general struct to hold parsed acp content + * + * @head: acp pack header + * @data: acp parsed content + */ +struct ac_pack { + struct ac_pack_header *head; + char data[0]; +} __packed; + +/** + * struct ac_ins_ta_header - ta installation header + * + * @ta_id: ta id + * @ta_svn: ta security version number + * @hash_alg_type: ta hash algorithm type + * @ta_reserved: reserved bytes + * @hash: ta pack hash + */ +struct ac_ins_ta_header { + uuid_t ta_id; + u32 ta_svn; + u8 hash_alg_type; + u8 ta_reserved[3]; + struct ac_pack_hash hash; +} __packed; + +/** + * struct ac_ins_jta_pack - ta installation information + * + * @ins_cond: ta install conditions (contains some of the manifest data, + * including security.version, applet.version, applet.platform, + * applet.api.level) + * @head: ta installation header + */ +struct ac_ins_jta_pack { + struct ac_prop_list *ins_cond; + struct ac_ins_ta_header *head; +} __packed; + +/** + * struct ac_ins_jta_prop_header - ta manifest header + * + * @mem_quota: ta heap size + * @ta_encrypted: ta encrypted by provider flag + * @padding: padding + * @allowed_inter_session_num: allowed internal session count + * @ac_groups: ta permission groups + * @timeout: ta timeout in milliseconds + */ +struct ac_ins_jta_prop_header { + u32 mem_quota; + u8 ta_encrypted; + u8 padding; + u16 allowed_inter_session_num; + u64 ac_groups; + u32 timeout; +} __packed; + +/** + * struct ac_ins_jta_prop - ta manifest + * + * @head: manifest header + * @post_reasons: list of event codes that can be posted by ta + * @reg_reasons: list of event codes that can be received by ta + * @prop: all other manifest fields (acp properties) + * @used_service_list: list of ta ids which ta is allowed to communicate with + */ +struct ac_ins_jta_prop { + struct ac_ins_jta_prop_header *head; + struct ac_ins_reasons *post_reasons; + struct ac_ins_reasons *reg_reasons; + struct ac_prop_list *prop; + struct ac_ta_id_list *used_service_list; +} __packed; + +#endif /* _ACP_FORMAT_H */ diff --git a/drivers/misc/mei/dal/acp_parser.c b/drivers/misc/mei/dal/acp_parser.c new file mode 100644 index 000000000000..32e00cf8a74c --- /dev/null +++ b/drivers/misc/mei/dal/acp_parser.c @@ -0,0 +1,562 @@ +/****************************************************************************** + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include + +#include "acp_format.h" +#include "acp_parser.h" + +#define PR_ALIGN 4 + +/* CSS Header + CSS Crypto Block + * Prefixes each signed ACP package + */ +#define AC_CSS_HEADER_LENGTH (128 + 520) + +/** + * struct ac_pr_state - admin command pack reader state + * + * @cur : current read position + * @head : acp file head + * @total : size of acp file + */ +struct ac_pr_state { + const char *cur; + const char *head; + unsigned int total; +}; + +/** + * ac_pr_init - init pack reader + * + * @pr: pack reader + * @data: acp file content (without CSS header) + * @n: acp file size (without CSS header) + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_init(struct ac_pr_state *pr, const char *data, + unsigned int n) +{ + /* check integer overflow */ + if ((size_t)data > SIZE_MAX - n) + return -EINVAL; + + pr->cur = data; + pr->head = data; + pr->total = n; + return 0; +} + +/** + * ac_pr_8b_align_move - update pack reader cur pointer after reading n_move + * bytes. Leave cur aligned to 8 bytes. + * (e.g. when n_move is 3, increase cur by 8) + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * will be rounded up to keep cur 8 bytes aligned + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_8b_align_move(struct ac_pr_state *pr, size_t n_move) +{ + unsigned long offset; + const char *new_cur = pr->cur + n_move; + size_t len_from_head = new_cur - pr->head; + + if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) + return -EINVAL; + + offset = ((8 - (len_from_head & 7)) & 7); + if ((size_t)new_cur > SIZE_MAX - offset) + return -EINVAL; + + new_cur = new_cur + offset; + if (new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + return 0; +} + +/** + * ac_pr_align_move - update pack reader cur pointer after reading n_move bytes + * Leave cur aligned to 4 bytes. + * (e.g. when n_move is 1, increase cur by 4) + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * will be rounded up to keep cur 4 bytes aligned + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_align_move(struct ac_pr_state *pr, size_t n_move) +{ + const char *new_cur = pr->cur + n_move; + size_t len_from_head = new_cur - pr->head; + size_t offset; + + if ((size_t)pr->cur > SIZE_MAX - n_move || new_cur < pr->head) + return -EINVAL; + + offset = ((4 - (len_from_head & 3)) & 3); + if ((size_t)new_cur > SIZE_MAX - offset) + return -EINVAL; + + new_cur = new_cur + offset; + if (new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + return 0; +} + +/** + * ac_pr_move - update pack reader cur pointer after reading n_move bytes + * + * @pr: pack reader + * @n_move: number of bytes to move cur pointer ahead + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int ac_pr_move(struct ac_pr_state *pr, size_t n_move) +{ + const char *new_cur = pr->cur + n_move; + + /* integer overflow or out of acp pkg size */ + if ((size_t)pr->cur > SIZE_MAX - n_move || + new_cur > pr->head + pr->total) + return -EINVAL; + + pr->cur = new_cur; + + return 0; +} + +/** + * ac_pr_is_safe_to_read - check whether it is safe to read more n_move + * bytes from the acp file + * + * @pr: pack reader + * @n_move: number of bytes to check if it is safe to read + * + * Return: true when it is safe to read more n_move bytes + * false otherwise + */ +static bool ac_pr_is_safe_to_read(const struct ac_pr_state *pr, size_t n_move) +{ + /* pointer overflow */ + if ((size_t)pr->cur > SIZE_MAX - n_move) + return false; + + if (pr->cur + n_move > pr->head + pr->total) + return false; + + return true; +} + +/** + * ac_pr_is_end - check if cur is at the end of the acp file + * + * @pr: pack reader + * + * Return: true when cur is at the end of the acp + * false otherwise + */ +static bool ac_pr_is_end(struct ac_pr_state *pr) +{ + return (pr->cur == pr->head + pr->total); +} + +/** + * acp_load_reasons - load list of event codes that can be + * received or posted by ta + * + * @pr: pack reader + * @reasons: out param to hold the list of event codes + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_reasons(struct ac_pr_state *pr, + struct ac_ins_reasons **reasons) +{ + size_t len; + struct ac_ins_reasons *r; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*r))) + return -EINVAL; + + r = (struct ac_ins_reasons *)pr->cur; + + if (r->len > AC_MAX_INS_REASONS_LENGTH) + return -EINVAL; + + len = sizeof(*r) + r->len * sizeof(r->data[0]); + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *reasons = r; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_taid_list - load list of ta ids which ta is allowed + * to communicate with + * + * @pr: pack reader + * @taid_list: out param to hold the loaded ta ids + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_taid_list(struct ac_pr_state *pr, + struct ac_ta_id_list **taid_list) +{ + size_t len; + struct ac_ta_id_list *t; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*t))) + return -EINVAL; + + t = (struct ac_ta_id_list *)pr->cur; + if (t->num > AC_MAX_USED_SERVICES) + return -EINVAL; + + len = sizeof(*t) + t->num * sizeof(t->list[0]); + + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *taid_list = t; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_prop - load property from acp + * + * @pr: pack reader + * @prop: out param to hold the loaded property + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_prop(struct ac_pr_state *pr, struct ac_prop_list **prop) +{ + size_t len; + struct ac_prop_list *p; + + if (!ac_pr_is_safe_to_read(pr, sizeof(*p))) + return -EINVAL; + + p = (struct ac_prop_list *)pr->cur; + if (p->len > AC_MAX_PROPS_LENGTH) + return -EINVAL; + + len = sizeof(*p) + p->len * sizeof(p->data[0]); + + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *prop = p; + return ac_pr_align_move(pr, len); +} + +/** + * acp_load_ta_pack - load ta pack from acp + * + * @pr: pack reader + * @ta_pack: out param to hold the ta pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ta_pack(struct ac_pr_state *pr, char **ta_pack) +{ + size_t len; + char *t; + + /*8 byte align to obey jeff rule*/ + if (ac_pr_8b_align_move(pr, 0)) + return -EINVAL; + + t = (char *)pr->cur; + + /* + *assume ta pack is the last item of one package, + *move cursor to the end directly + */ + if (pr->cur > pr->head + pr->total) + return -EINVAL; + + len = pr->head + pr->total - pr->cur; + if (!ac_pr_is_safe_to_read(pr, len)) + return -EINVAL; + + *ta_pack = t; + return ac_pr_move(pr, len); +} + +/** + * acp_load_ins_jta_prop_head - load ta manifest header + * + * @pr: pack reader + * @head: out param to hold manifest header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_prop_head(struct ac_pr_state *pr, + struct ac_ins_jta_prop_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_ins_jta_prop_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_ins_jta_prop - load ta properties information (ta manifest) + * + * @pr: pack reader + * @pack: out param to hold ta manifest + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_prop(struct ac_pr_state *pr, + struct ac_ins_jta_prop *pack) +{ + int ret; + + ret = acp_load_ins_jta_prop_head(pr, &pack->head); + if (ret) + return ret; + + ret = acp_load_reasons(pr, &pack->post_reasons); + if (ret) + return ret; + + ret = acp_load_reasons(pr, &pack->reg_reasons); + if (ret) + return ret; + + ret = acp_load_prop(pr, &pack->prop); + if (ret) + return ret; + + ret = acp_load_taid_list(pr, &pack->used_service_list); + + return ret; +} + +/** + * acp_load_ins_jta_head - load ta installation header + * + * @pr: pack reader + * @head: out param to hold the installation header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta_head(struct ac_pr_state *pr, + struct ac_ins_ta_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_ins_ta_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_ins_jta - load ta installation information from acp + * + * @pr: pack reader + * @pack: out param to hold install information + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_ins_jta(struct ac_pr_state *pr, + struct ac_ins_jta_pack *pack) +{ + int ret; + + ret = acp_load_prop(pr, &pack->ins_cond); + if (ret) + return ret; + + ret = acp_load_ins_jta_head(pr, &pack->head); + + return ret; +} + +/** + * acp_load_pack_head - load acp pack header + * + * @pr: pack reader + * @head: out param to hold the acp header + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_pack_head(struct ac_pr_state *pr, + struct ac_pack_header **head) +{ + if (!ac_pr_is_safe_to_read(pr, sizeof(**head))) + return -EINVAL; + + *head = (struct ac_pack_header *)pr->cur; + return ac_pr_align_move(pr, sizeof(**head)); +} + +/** + * acp_load_pack - load and parse pack from acp file + * + * @raw_pack: acp file content, without the acp CSS header + * @size: acp file size (without CSS header) + * @cmd_id: command id + * @pack: out param to hold the loaded pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +static int acp_load_pack(const char *raw_pack, unsigned int size, + unsigned int cmd_id, struct ac_pack *pack) +{ + int ret; + struct ac_pr_state pr; + struct ac_ins_jta_pack_ext *pack_ext; + struct ac_ins_jta_prop_ext *prop_ext; + + ret = ac_pr_init(&pr, raw_pack, size); + if (ret) + return ret; + + if (cmd_id != AC_INSTALL_JTA_PROP) { + ret = acp_load_pack_head(&pr, &pack->head); + if (ret) + return ret; + } + + if (cmd_id != AC_INSTALL_JTA_PROP && cmd_id != pack->head->cmd_id) + return -EINVAL; + + switch (cmd_id) { + case AC_INSTALL_JTA: + pack_ext = (struct ac_ins_jta_pack_ext *)pack; + ret = acp_load_ins_jta(&pr, &pack_ext->cmd_pack); + if (ret) + break; + ret = acp_load_ta_pack(&pr, &pack_ext->ta_pack); + break; + case AC_INSTALL_JTA_PROP: + prop_ext = (struct ac_ins_jta_prop_ext *)pack; + ret = acp_load_ins_jta_prop(&pr, &prop_ext->cmd_pack); + if (ret) + break; + /* Note: the next section is JEFF file, + * and not ta_pack(JTA_properties+JEFF file), + * but we could reuse the ACP_load_ta_pack() here. + */ + ret = acp_load_ta_pack(&pr, &prop_ext->jeff_pack); + break; + default: + return -EINVAL; + } + + if (!ac_pr_is_end(&pr)) + return -EINVAL; + + return ret; +} + +/** + * acp_pload_ins_jta - load and parse ta pack from acp file + * + * Exported function in acp parser API + * + * @raw_data: acp file content + * @size: acp file size + * @pack: out param to hold the ta pack + * + * Return: 0 on success + * -EINVAL on invalid parameters + */ +int acp_pload_ins_jta(const void *raw_data, unsigned int size, + struct ac_ins_jta_pack_ext *pack) +{ + int ret; + + if (!raw_data || size <= AC_CSS_HEADER_LENGTH || !pack) + return -EINVAL; + + ret = acp_load_pack((const char *)raw_data + AC_CSS_HEADER_LENGTH, + size - AC_CSS_HEADER_LENGTH, + AC_INSTALL_JTA, (struct ac_pack *)pack); + + return ret; +} diff --git a/drivers/misc/mei/dal/acp_parser.h b/drivers/misc/mei/dal/acp_parser.h new file mode 100644 index 000000000000..11b104ffa71f --- /dev/null +++ b/drivers/misc/mei/dal/acp_parser.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef _ACP_PARSER_H +#define _ACP_PARSER_H + +#include "acp_format.h" + +/** + * struct ac_ins_jta_pack_ext - parsed ta pack from acp file + * + * @head: acp pack header + * @cmd_pack: ta installation information pack + * @ta_pack: raw ta pack + */ +struct ac_ins_jta_pack_ext { + struct ac_pack_header *head; + struct ac_ins_jta_pack cmd_pack; + char *ta_pack; +} __packed; + +/** + * struct ac_ins_jta_prop_ext - parsed ta properties information + * from acp file + * + * @cmd_pack: ta installation properties pack + * @jeff_pack: ta jeff pack + */ +struct ac_ins_jta_prop_ext { + struct ac_ins_jta_prop cmd_pack; + char *jeff_pack; +} __packed; + +int acp_pload_ins_jta(const void *raw_data, unsigned int size, + struct ac_ins_jta_pack_ext *pack); + +#endif /* _ACP_PARSER_H */ diff --git a/drivers/misc/mei/dal/bh_cmd_defs.h b/drivers/misc/mei/dal/bh_cmd_defs.h new file mode 100644 index 000000000000..aa86f522628e --- /dev/null +++ b/drivers/misc/mei/dal/bh_cmd_defs.h @@ -0,0 +1,293 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_DAL_H_ +#define __BH_DAL_H_ + +#include +#include + +/** + * enum bh_command_id - bh command ids + * + * @BHP_CMD_INIT: init command + * @BHP_CMD_DEINIT: deinit command + * @BHP_CMD_VERIFY_JAVATA: verify ta + * @BHP_CMD_DOWNLOAD_JAVATA: download ta to DAL + * @BHP_CMD_OPEN_JTASESSION: open session to ta + * @BHP_CMD_CLOSE_JTASESSION: close session with ta + * @BHP_CMD_FORCECLOSE_JTASESSION: force close session + * @BHP_CMD_SENDANDRECV: send and receive massages to ta + * @BHP_CMD_SENDANDRECV_INTERNAL: internal send and receive + * @BHP_CMD_RUN_NATIVETA: run native trusted application + * (currently NOT SUPPORTED) + * @BHP_CMD_STOP_NATIVETA: stop running native ta (currently NOT SUPPORTED) + * @BHP_CMD_OPEN_SDSESSION: open security domain session + * @BHP_CMD_CLOSE_SDSESSION: close security domain session + * @BHP_CMD_INSTALL_SD: install new sub security domain + * @BHP_CMD_UNINSTALL_SD: uninstall sub security domain + * @BHP_CMD_INSTALL_JAVATA: install java ta + * @BHP_CMD_UNINSTALL_JAVATA: uninstall java ta + * @BHP_CMD_INSTALL_NATIVETA: install native ta (currently NOT SUPPORTED) + * @BHP_CMD_UNINSTALL_NATIVETA: uninstall native ta (currently NOT SUPPORTED) + * @BHP_CMD_LIST_SD: get list of all security domains + * @BHP_CMD_LIST_TA: get list of all installed trusted applications + * @BHP_CMD_RESET: reset command + * @BHP_CMD_LIST_TA_PROPERTIES: get list of all ta properties (ta manifest) + * @BHP_CMD_QUERY_TA_PROPERTY: query specified ta property + * @BHP_CMD_LIST_JTA_SESSIONS: get list of all opened ta sessions + * @BHP_CMD_LIST_TA_PACKAGES: get list of all ta packages in DAL + * @BHP_CMD_GET_ISD: get Intel security domain uuid + * @BHP_CMD_GET_SD_BY_TA: get security domain id of ta + * @BHP_CMD_LAUNCH_VM: lunch IVM + * @BHP_CMD_CLOSE_VM: close IVM + * @BHP_CMD_QUERY_NATIVETA_STATUS: query specified native ta status + * (currently NOT SUPPORTED) + * @BHP_CMD_QUERY_SD_STATUS: query specified security domain status + * @BHP_CMD_LIST_DOWNLOADED_NTA: get list of all native trusted applications + * (currently NOT SUPPORTED) + * @BHP_CMD_UPDATE_SVL: update security version list + * @BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE: check if ta security version is blocked + * @BHP_CMD_QUERY_TEE_METADATA: get DAL metadata (including api_level, + * library_version, dal_key_hash and more) + * + * @BHP_CMD_MAX: max command id + */ + +enum bh_command_id { + BHP_CMD_INIT = 0, + BHP_CMD_DEINIT, + BHP_CMD_VERIFY_JAVATA, + BHP_CMD_DOWNLOAD_JAVATA, + BHP_CMD_OPEN_JTASESSION, + BHP_CMD_CLOSE_JTASESSION, + BHP_CMD_FORCECLOSE_JTASESSION, + BHP_CMD_SENDANDRECV, + BHP_CMD_SENDANDRECV_INTERNAL, + BHP_CMD_RUN_NATIVETA, + BHP_CMD_STOP_NATIVETA, + BHP_CMD_OPEN_SDSESSION, + BHP_CMD_CLOSE_SDSESSION, + BHP_CMD_INSTALL_SD, + BHP_CMD_UNINSTALL_SD, + BHP_CMD_INSTALL_JAVATA, + BHP_CMD_UNINSTALL_JAVATA, + BHP_CMD_INSTALL_NATIVETA, + BHP_CMD_UNINSTALL_NATIVETA, + BHP_CMD_LIST_SD, + BHP_CMD_LIST_TA, + BHP_CMD_RESET, + BHP_CMD_LIST_TA_PROPERTIES, + BHP_CMD_QUERY_TA_PROPERTY, + BHP_CMD_LIST_JTA_SESSIONS, + BHP_CMD_LIST_TA_PACKAGES, + BHP_CMD_GET_ISD, + BHP_CMD_GET_SD_BY_TA, + BHP_CMD_LAUNCH_VM, + BHP_CMD_CLOSE_VM, + BHP_CMD_QUERY_NATIVETA_STATUS, + BHP_CMD_QUERY_SD_STATUS, + BHP_CMD_LIST_DOWNLOADED_NTA, + BHP_CMD_UPDATE_SVL, + BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE, + BHP_CMD_QUERY_TEE_METADATA, + BHP_CMD_MAX +}; + +#define BH_MSG_RESP_MAGIC 0x55aaa5ff +#define BH_MSG_CMD_MAGIC 0x55aaa3ff + +/** + * struct bh_msg_header - transport header + * + * @magic: BH_MSG_RESP/CMD_MAGIC + * @length: overall message length + */ +struct bh_msg_header { + u32 magic; + u32 length; +}; + +/** + * struct bh_command_header - bh command header + * + * @h: transport header + * @seq: message sequence number + * @id: the command id (enum bh_command_id) + * @pad: padded for 64 bit + * @cmd: command buffer + */ +struct bh_command_header { + struct bh_msg_header h; + u64 seq; + u32 id; + u8 pad[4]; + s8 cmd[0]; +} __packed; + +/** + * struct bh_response_header - response header (from the DAL) + * + * @h: transport header + * @seq: message sequence number + * @ta_session_id: session id (DAL firmware address) + * @code: response code + * @pad: padded for 64 bit + * @data: response buffer + */ +struct bh_response_header { + struct bh_msg_header h; + u64 seq; + u64 ta_session_id; + s32 code; + u8 pad[4]; + s8 data[0]; +} __packed; + +/** + * struct bh_download_jta_cmd - download java trusted application. + * + * @ta_id: trusted application (ta) id + * @ta_blob: trusted application blob + */ +struct bh_download_jta_cmd { + uuid_t ta_id; + s8 ta_blob[0]; +} __packed; + +/** + * struct bh_open_jta_session_cmd - open session to TA command + * + * @ta_id: trusted application (ta) id + * @buffer: session initial parameters (optional) + */ +struct bh_open_jta_session_cmd { + uuid_t ta_id; + s8 buffer[0]; +} __packed; + +/** + * struct bh_close_jta_session_cmd - close session to TA command + * + * @ta_session_id: session id + */ +struct bh_close_jta_session_cmd { + u64 ta_session_id; +} __packed; + +/** + * struct bh_cmd - bh command + * + * @ta_session_id: session id + * @command: command id to ta + * @outlen: length of output buffer + * @buffer: data to send + */ +struct bh_cmd { + u64 ta_session_id; + s32 command; + u32 outlen; + s8 buffer[0]; +} __packed; + +/** + * struct bh_check_svl_ta_blocked_state_cmd - command to check if + * the trusted application security version is blocked + * + * @ta_id: trusted application id + */ +struct bh_check_svl_jta_blocked_state_cmd { + uuid_t ta_id; +} __packed; + +/** + * struct bh_resp - bh response + * + * @response: response code. Originated from java in big endian format + * @buffer: response buffer + */ +struct bh_resp { + __be32 response; + s8 buffer[0]; +} __packed; + +/** + * struct bh_resp_bof - response when output buffer is too small + * + * @response: response code. Originated from java in big endian format + * @request_length: the needed output buffer length + */ +struct bh_resp_bof { + __be32 response; + __be32 request_length; +} __packed; + +/** + * struct bh_resp_list_ta_packages - list of ta packages from DAL + * + * @count: count of ta packages + * @ta_ids: ta packages ids + */ +struct bh_resp_list_ta_packages { + u32 count; + uuid_t ta_ids[0]; +} __packed; + +#endif /* __BH_DAL_H_*/ diff --git a/drivers/misc/mei/dal/bh_errcode.h b/drivers/misc/mei/dal/bh_errcode.h new file mode 100644 index 000000000000..127324230c3a --- /dev/null +++ b/drivers/misc/mei/dal/bh_errcode.h @@ -0,0 +1,208 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_SHARED_ERRCODE_H +#define __BH_SHARED_ERRCODE_H + +/* + * BH Error codes numbers across Beihai Host and Firmware. + */ + +#define BH_SUCCESS 0x000 + +/* BHP specific error code section */ + +#define BPE_NOT_INIT 0x001 +#define BPE_SERVICE_UNAVAILABLE 0x002 +#define BPE_INTERNAL_ERROR 0x003 +#define BPE_COMMS_ERROR 0x004 +#define BPE_OUT_OF_MEMORY 0x005 +#define BPE_INVALID_PARAMS 0x006 +#define BPE_MESSAGE_TOO_SHORT 0x007 +#define BPE_MESSAGE_ILLEGAL 0x008 +#define BPE_NO_CONNECTION_TO_FIRMWARE 0x009 +#define BPE_NOT_IMPLEMENT 0x00A +#define BPE_OUT_OF_RESOURCE 0x00B +#define BPE_INITIALIZED_ALREADY 0x00C +#define BPE_CONNECT_FAILED 0x00D + +/* General error code section for Beihai on FW: 0x100 */ + +#define BHE_OUT_OF_MEMORY 0x101 +#define BHE_BAD_PARAMETER 0x102 +#define BHE_INSUFFICIENT_BUFFER 0x103 +#define BHE_MUTEX_INIT_FAIL 0x104 +#define BHE_COND_INIT_FAIL 0x105 +#define BHE_WD_TIMEOUT 0x106 +#define BHE_FAILED 0x107 +#define BHE_INVALID_HANDLE 0x108 +#define BHE_IPC_ERR_DEFAULT 0x109 +#define BHE_IPC_ERR_PLATFORM 0x10A +#define BHE_IPC_SRV_INIT_FAIL 0x10B + +/* VM communication error code section: 0x200 */ + +#define BHE_MAILBOX_NOT_FOUND 0x201 +#define BHE_APPLET_CRASHED BHE_MAILBOX_NOT_FOUND +#define BHE_MSG_QUEUE_IS_FULL 0x202 +#define BHE_MAILBOX_DENIED 0x203 + +/* VM InternalAppletCommunication error 0x240 */ + +#define BHE_IAC_INTERNAL_SESSION_NUM_EXCEED 0x241 +#define BHE_IAC_CLIENT_SLOT_FULL 0x242 +#define BHE_IAC_SERVICETA_EXITED 0x243 +#define BHE_IAC_EXIST_INTERNAL_SESSION 0x244 +#define BHE_IAC_SERVICETA_UNCAUGHT_EXCEPTION 0x245 +#define BHE_IAC_SERVICE_SESSION_NOT_FOUND 0x246 +#define BHE_IAC_SERVICE_HOST_SESSION_NUM_EXCEED 0x247 + +/* Firmware thread/mutex error code section: 0x280 */ +#define BHE_THREAD_ERROR 0x281 +#define BHE_THREAD_TIMED_OUT 0x282 + +/* Applet manager error code section: 0x300 */ + +#define BHE_LOAD_JEFF_FAIL 0x303 +#define BHE_PACKAGE_NOT_FOUND 0x304 +#define BHE_EXIST_LIVE_SESSION 0x305 +#define BHE_VM_INSTANCE_INIT_FAIL 0x306 +#define BHE_QUERY_PROP_NOT_SUPPORT 0x307 +#define BHE_INVALID_BPK_FILE 0x308 +#define BHE_PACKAGE_EXIST 0x309 +#define BHE_VM_INSTNACE_NOT_FOUND 0x312 +#define BHE_STARTING_JDWP_FAIL 0x313 +#define BHE_GROUP_CHECK_FAIL 0x314 +#define BHE_SDID_UNMATCH 0x315 +#define BHE_APPPACK_UNINITED 0x316 +#define BHE_SESSION_NUM_EXCEED 0x317 +#define BHE_TA_PACKAGE_HASH_VERIFY_FAIL 0x318 +#define BHE_SWITCH_ISD 0x319 +#define BHE_OPERATION_NOT_PERMITTED 0x31A + +/* VM Applet instance error code section: 0x400 */ +#define BHE_APPLET_GENERIC 0x400 +#define BHE_UNCAUGHT_EXCEPTION 0x401 +/* Bad parameters to applet */ +#define BHE_APPLET_BAD_PARAMETER 0x402 +/* Small response buffer */ +#define BHE_APPLET_SMALL_BUFFER 0x403 +/* Bad state */ +#define BHE_BAD_STATE 0x404 + +/*TODO: Should be removed these UI error code when integrate with ME 9 */ +#define BHE_UI_EXCEPTION 0x501 +#define BHE_UI_ILLEGAL_USE 0x502 +#define BHE_UI_ILLEGAL_PARAMETER 0x503 +#define BHE_UI_NOT_INITIALIZED 0x504 +#define BHE_UI_NOT_SUPPORTED 0x505 +#define BHE_UI_OUT_OF_RESOURCES 0x506 + +/* BeiHai VMInternalError code section: 0x600 */ +#define BHE_UNKNOWN 0x602 +#define BHE_MAGIC_UNMATCH 0x603 +#define BHE_UNIMPLEMENTED 0x604 +#define BHE_INTR 0x605 +#define BHE_CLOSED 0x606 +/* TODO: no used error, should remove*/ +#define BHE_BUFFER_OVERFLOW 0x607 +#define BHE_NOT_SUPPORTED 0x608 +#define BHE_WEAR_OUT_VIOLATION 0x609 +#define BHE_NOT_FOUND 0x610 +#define BHE_INVALID_PARAMS 0x611 +#define BHE_ACCESS_DENIED 0x612 +#define BHE_INVALID 0x614 +#define BHE_TIMEOUT 0x615 + +/* SDM specific error code section: 0x800 */ +#define BHE_SDM_FAILED 0x800 +#define BHE_SDM_NOT_FOUND 0x801 +#define BHE_SDM_ALREADY_EXIST 0x803 +#define BHE_SDM_TATYPE_MISMATCH 0x804 +#define BHE_SDM_TA_NUMBER_LIMIT 0x805 +#define BHE_SDM_SIGNAGURE_VERIFY_FAIL 0x806 +#define BHE_SDM_PERMGROUP_CHECK_FAIL 0x807 +#define BHE_SDM_INSTALL_CONDITION_FAIL 0x808 +#define BHE_SDM_SVN_CHECK_FAIL 0x809 +#define BHE_SDM_TA_DB_NO_FREE_SLOT 0x80A +#define BHE_SDM_SD_DB_NO_FREE_SLOT 0x80B +#define BHE_SDM_SVL_DB_NO_FREE_SLOT 0x80C +#define BHE_SDM_SVL_CHECK_FAIL 0x80D +#define BHE_SDM_DB_READ_FAIL 0x80E +#define BHE_SDM_DB_WRITE_FAIL 0x80F + +/* Launcher specific error code section: 0x900 */ +#define BHE_LAUNCHER_INIT_FAILED 0x901 +#define BHE_SD_NOT_INSTALLED 0x902 +#define BHE_NTA_NOT_INSTALLED 0x903 +#define BHE_PROCESS_SPAWN_FAILED 0x904 +#define BHE_PROCESS_KILL_FAILED 0x905 +#define BHE_PROCESS_ALREADY_RUNNING 0x906 +#define BHE_PROCESS_IN_TERMINATING 0x907 +#define BHE_PROCESS_NOT_EXIST 0x908 +#define BHE_PLATFORM_API_ERR 0x909 +#define BHE_PROCESS_NUM_EXCEED 0x09A + +/* + * BeihaiHAL Layer error code section: + * 0x1000,0x2000 reserved here, defined in CSG BeihaiStatusHAL.h + */ + +#endif /* __BH_SHARED_ERRCODE_H */ diff --git a/drivers/misc/mei/dal/bh_external.c b/drivers/misc/mei/dal/bh_external.c new file mode 100644 index 000000000000..282f9c395b11 --- /dev/null +++ b/drivers/misc/mei/dal/bh_external.c @@ -0,0 +1,582 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include +#include +#include +#include + +#include "bh_errcode.h" +#include "bh_external.h" +#include "bh_internal.h" + +/* BH initialization state */ +static atomic_t bh_state = ATOMIC_INIT(0); + +/** + * uuid_is_valid_hyphenless - check if uuid is valid in hyphenless format + * + * @uuid_str: uuid string + * + * Return: true when uuid is valid in hyphenless format + * false when uuid is invalid + */ +static bool uuid_is_valid_hyphenless(const char *uuid_str) +{ + unsigned int i; + + /* exclude (i == 8 || i == 13 || i == 18 || i == 23) */ + for (i = 0; i < UUID_STRING_LEN - 4; i++) + if (!isxdigit(uuid_str[i])) + return false; + + return true; +} + +/** + * uuid_normalize_hyphenless - convert uuid from hyphenless format + * to standard format + * + * @uuid_hl: uuid string in hyphenless format + * @uuid_str: output param to hold uuid string in standard format + */ +static void uuid_normalize_hyphenless(const char *uuid_hl, char *uuid_str) +{ + unsigned int i; + + for (i = 0; i < UUID_STRING_LEN; i++) { + if (i == 8 || i == 13 || i == 18 || i == 23) + uuid_str[i] = '-'; + else + uuid_str[i] = *uuid_hl++; + } + uuid_str[i] = '\0'; +} + +/** + * dal_uuid_parse - convert uuid string to binary form + * + * Input uuid is in either hyphenless or standard format + * + * @uuid_str: uuid string + * @uuid: output param to hold uuid bin + * + * Return: 0 on success + * <0 on failure + */ +int dal_uuid_parse(const char *uuid_str, uuid_t *uuid) +{ + char __uuid_str[UUID_STRING_LEN + 1]; + + if (!uuid_str || !uuid) + return -EINVAL; + + if (uuid_is_valid_hyphenless(uuid_str)) { + uuid_normalize_hyphenless(uuid_str, __uuid_str); + uuid_str = __uuid_str; + } + + return uuid_parse(uuid_str, uuid); +} +EXPORT_SYMBOL(dal_uuid_parse); + +/** + * bh_msg_is_response - check if message is response + * + * @msg: message + * @len: message length + * + * Return: true when message is response + * false otherwise + */ +bool bh_msg_is_response(const void *msg, size_t len) +{ + const struct bh_response_header *r = msg; + + return (len >= sizeof(*r) && r->h.magic == BH_MSG_RESP_MAGIC); +} + +/** + * bh_msg_is_cmd - check if message is command + * + * @msg: message + * @len: message length + * + * Return: true when message is command + * false otherwise + */ +bool bh_msg_is_cmd(const void *msg, size_t len) +{ + const struct bh_command_header *c = msg; + + return (len >= sizeof(*c) && c->h.magic == BH_MSG_CMD_MAGIC); +} + +/** + * bh_msg_cmd_hdr - get the command header if message is command + * + * @msg: message + * @len: message length + * + * Return: pointer to the command header when message is command + * NULL otherwise + */ +const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len) +{ + if (!bh_msg_is_cmd(msg, len)) + return NULL; + + return msg; +} + +/** + * bh_msg_is_cmd_open_session - check if command is open session command + * + * @hdr: message header + * + * Return: true when command is open session command + * false otherwise + */ +bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr) +{ + return hdr->id == BHP_CMD_OPEN_JTASESSION; +} + +/** + * bh_open_session_ta_id - get ta id from open session command + * + * @hdr: message header + * @count: message size + * + * Return: pointer to ta id when command is valid + * NULL otherwise + */ +const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, + size_t count) +{ + struct bh_open_jta_session_cmd *open_cmd; + + if (count < sizeof(*hdr) + sizeof(*open_cmd)) + return NULL; + + open_cmd = (struct bh_open_jta_session_cmd *)hdr->cmd; + + return &open_cmd->ta_id; +} + +/** + * bh_session_is_killed - check if session is killed + * + * @code: the session return code + * + * Return: true when the session is killed + * false otherwise + */ +static bool bh_session_is_killed(int code) +{ + return (code == BHE_WD_TIMEOUT || + code == BHE_UNCAUGHT_EXCEPTION || + code == BHE_APPLET_CRASHED); +} + +/** + * bh_ta_session_open - open session to ta + * + * This function will block until VM replied the response + * + * @host_id: out param to hold the session host_id + * @ta_id: trusted application (ta) id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * @init_param: init parameters to the session (optional) + * @init_len: length of the init parameters + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_open(u64 *host_id, const char *ta_id, + const u8 *ta_pkg, size_t pkg_len, + const u8 *init_param, size_t init_len) +{ + int ret; + uuid_t bin_ta_id; + unsigned int conn_idx; + unsigned int count; + bool found; + uuid_t *ta_ids = NULL; + int i; + + if (!ta_id || !host_id) + return -EINVAL; + + if (!ta_pkg || !pkg_len) + return -EINVAL; + + if (!init_param && init_len != 0) + return -EINVAL; + + if (dal_uuid_parse(ta_id, &bin_ta_id)) + return -EINVAL; + + *host_id = 0; + + ret = bh_proxy_check_svl_jta_blocked_state(&bin_ta_id); + if (ret) + return ret; + + /* 1: vm conn_idx is IVM dal FW client */ + conn_idx = CONN_IDX_IVM; + + /* 2.1: check whether the ta pkg existed in VM or not */ + count = 0; + ret = bh_proxy_list_jta_packages(conn_idx, &count, &ta_ids); + if (ret) + return ret; + + found = false; + for (i = 0; i < count; i++) { + if (uuid_equal(&bin_ta_id, &ta_ids[i])) { + found = true; + break; + } + } + kfree(ta_ids); + + /* 2.2: download ta pkg if not already present. */ + if (!found) { + ret = bh_proxy_dnload_jta(conn_idx, &bin_ta_id, + ta_pkg, pkg_len); + if (ret && ret != BHE_PACKAGE_EXIST) + return ret; + } + + /* 3: send open session command to VM */ + ret = bh_proxy_open_jta_session(conn_idx, &bin_ta_id, + init_param, init_len, + host_id, ta_pkg, pkg_len); + return ret; +} + +/** + * bh_ta_session_command - send and receive data to/from ta + * + * This function will block until VM replied the response + * + * @host_id: session host id + * @command_id: command id + * @input: message to be sent + * @length: sent message size + * @output: output param to hold pointer to the buffer which + * will contain received message. + * This buffer is allocated by Beihai and freed by the user + * @output_length: input and output param - + * - input: the expected maximum length of the received message + * - output: size of the received message + * @response_code: output param to hold the return value from the applet + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_command(u64 host_id, int command_id, + const void *input, size_t length, + void **output, size_t *output_length, + int *response_code) +{ + int ret; + struct bh_command_header *h; + struct bh_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + unsigned int resp_len; + struct bh_session_record *session; + struct bh_resp *resp; + unsigned int conn_idx = CONN_IDX_IVM; + unsigned int len; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + if (!bh_is_initialized()) + return -EFAULT; + + if (!input && length != 0) + return -EINVAL; + + if (!output_length) + return -EINVAL; + + if (output) + *output = NULL; + + session = bh_session_find(conn_idx, host_id); + if (!session) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_cmd *)h->cmd; + h->id = BHP_CMD_SENDANDRECV; + cmd->ta_session_id = session->ta_session_id; + cmd->command = command_id; + cmd->outlen = *output_length; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), input, length, + host_id, (void **)&resp_hdr); + if (!resp_hdr) + return ret ? ret : -EFAULT; + + if (!ret) + ret = resp_hdr->code; + + session->ta_session_id = resp_hdr->ta_session_id; + resp_len = resp_hdr->h.length - sizeof(*resp_hdr); + + if (ret == BHE_APPLET_SMALL_BUFFER && + resp_len == sizeof(struct bh_resp_bof)) { + struct bh_resp_bof *bof = + (struct bh_resp_bof *)resp_hdr->data; + + if (response_code) + *response_code = be32_to_cpu(bof->response); + + *output_length = be32_to_cpu(bof->request_length); + } + + if (ret) + goto out; + + if (resp_len < sizeof(struct bh_resp)) { + ret = -EBADMSG; + goto out; + } + + resp = (struct bh_resp *)resp_hdr->data; + + if (response_code) + *response_code = be32_to_cpu(resp->response); + + len = resp_len - sizeof(*resp); + + if (*output_length < len) { + ret = -EMSGSIZE; + goto out; + } + + if (len && output) { + *output = kmemdup(resp->buffer, len, GFP_KERNEL); + if (!*output) { + ret = -ENOMEM; + goto out; + } + } + + *output_length = len; + +out: + if (bh_session_is_killed(resp_hdr->code)) + bh_session_remove(conn_idx, session->host_id); + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_ta_session_close - close ta session + * + * This function will block until VM replied the response + * + * @host_id: session host id + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_ta_session_close(u64 host_id) +{ + int ret; + struct bh_command_header *h; + struct bh_close_jta_session_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + struct bh_session_record *session; + unsigned int conn_idx = CONN_IDX_IVM; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + session = bh_session_find(conn_idx, host_id); + if (!session) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_close_jta_session_cmd *)h->cmd; + h->id = BHP_CMD_CLOSE_JTASESSION; + cmd->ta_session_id = session->ta_session_id; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), NULL, 0, host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + /* + * An internal session exists, so we should not close the session. + * It means that host app should call this API at appropriate time. + */ + if (ret != BHE_IAC_EXIST_INTERNAL_SESSION) + bh_session_remove(conn_idx, host_id); + + return ret; +} + +/** + * bh_filter_hdr - filter the sent message + * + * Allow to send valid messages only. + * The filtering is done using given filter functions table + * + * @hdr: message header + * @count: message size + * @ctx: context to send to the filter functions + * @tbl: filter functions table + * + * Return: 0 when message is valid + * <0 on otherwise + */ +int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, + const bh_filter_func tbl[]) +{ + int i; + int ret; + + for (i = 0; tbl[i]; i++) { + ret = tbl[i](hdr, count, ctx); + if (ret < 0) + return ret; + } + return 0; +} + +/** + * bh_prep_access_denied_response - prepare package with 'access denied' + * response code. + * + * This function is used to send in band error to user who trying to send + * message when he lacks the needed permissions + * + * @cmd: the invalid command message + * @res: out param to hold the response header + */ +void bh_prep_access_denied_response(const char *cmd, + struct bh_response_header *res) +{ + struct bh_command_header *cmd_hdr = (struct bh_command_header *)cmd; + + res->h.magic = BH_MSG_RESP_MAGIC; + res->h.length = sizeof(*res); + res->code = BHE_OPERATION_NOT_PERMITTED; + res->seq = cmd_hdr->seq; +} + +/** + * bh_is_initialized - check if bhp is initialized + * + * Return: true when bhp is initialized + * false when bhp is not initialized + */ +bool bh_is_initialized(void) +{ + return atomic_read(&bh_state) == 1; +} + +/** + * bh_init_internal - Beihai init function + * + * The plugin initialization includes initializing the session lists of all + * dal devices (dal fw clients) + * + * Return: 0 + */ +void bh_init_internal(void) +{ + unsigned int i; + + if (atomic_add_unless(&bh_state, 1, 1)) + for (i = CONN_IDX_START; i < MAX_CONNECTIONS; i++) + bh_session_list_init(i); +} + +/** + * bh_deinit_internal - Beihai plugin deinit function + * + * The plugin deinitialization includes deinit the session lists of all + * dal devices (dal fw clients) + */ +void bh_deinit_internal(void) +{ + unsigned int i; + + if (atomic_add_unless(&bh_state, -1, 0)) + for (i = CONN_IDX_START; i < MAX_CONNECTIONS; i++) + bh_session_list_free(i); +} diff --git a/drivers/misc/mei/dal/bh_external.h b/drivers/misc/mei/dal/bh_external.h new file mode 100644 index 000000000000..c56809df2560 --- /dev/null +++ b/drivers/misc/mei/dal/bh_external.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_EXTERNAL_H +#define __BH_EXTERNAL_H + +#include +#include "bh_cmd_defs.h" + +# define MSG_SEQ_START_NUMBER BIT_ULL(32) + +bool bh_is_initialized(void); +void bh_init_internal(void); +void bh_deinit_internal(void); + +int bh_ta_session_open(u64 *host_id, const char *ta_id, const u8 *ta_pkg, + size_t pkg_len, const u8 *init_param, size_t init_len); + +int bh_ta_session_close(u64 host_id); + +int bh_ta_session_command(u64 host_id, int command_id, const void *input, + size_t length, void **output, size_t *output_length, + int *response_code); + +const struct bh_command_header *bh_msg_cmd_hdr(const void *msg, size_t len); + +typedef int (*bh_filter_func)(const struct bh_command_header *hdr, + size_t count, void *ctx); + +int bh_filter_hdr(const struct bh_command_header *hdr, size_t count, void *ctx, + const bh_filter_func tbl[]); + +bool bh_msg_is_cmd_open_session(const struct bh_command_header *hdr); + +const uuid_t *bh_open_session_ta_id(const struct bh_command_header *hdr, + size_t count); + +void bh_prep_access_denied_response(const char *cmd, + struct bh_response_header *res); + +bool bh_msg_is_cmd(const void *msg, size_t len); +bool bh_msg_is_response(const void *msg, size_t len); + +#endif /* __BH_EXTERNAL_H */ diff --git a/drivers/misc/mei/dal/bh_internal.c b/drivers/misc/mei/dal/bh_internal.c new file mode 100644 index 000000000000..d9a2c76e8a0b --- /dev/null +++ b/drivers/misc/mei/dal/bh_internal.c @@ -0,0 +1,660 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include "bh_errcode.h" +#include "bh_external.h" +#include "bh_internal.h" +#include "dal_dev.h" + +static u64 bh_host_id_number = MSG_SEQ_START_NUMBER; + +/* + * dal device session records list (array of list per dal device) + * represents opened sessions to dal fw client + */ +static struct list_head dal_dev_session_list[MAX_CONNECTIONS]; + +/** + * bh_get_msg_host_id - increase the shared variable bh_host_id_number by 1 + * and wrap around if needed + * + * Return: the updated host id number + */ +u64 bh_get_msg_host_id(void) +{ + bh_host_id_number++; + /* wrap around. sequence_number must + * not be 0, as required by Firmware VM + */ + if (bh_host_id_number == 0) + bh_host_id_number = MSG_SEQ_START_NUMBER; + + return bh_host_id_number; +} + +/** + * bh_session_find - find session record by handle + * + * @conn_idx: DAL client connection idx + * @host_id: session host id + * + * Return: pointer to bh_session_record if found + * NULL if the session wasn't found + */ +struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id) +{ + struct bh_session_record *pos; + struct list_head *session_list = &dal_dev_session_list[conn_idx]; + + list_for_each_entry(pos, session_list, link) { + if (pos->host_id == host_id) + return pos; + } + + return NULL; +} + +/** + * bh_session_add - add session record to list + * + * @conn_idx: fw client connection idx + * @session: session record + */ +void bh_session_add(unsigned int conn_idx, struct bh_session_record *session) +{ + list_add_tail(&session->link, &dal_dev_session_list[conn_idx]); +} + +/** + * bh_session_remove - remove session record from list, ad release its memory + * + * @conn_idx: fw client connection idx + * @host_id: session host id + */ +void bh_session_remove(unsigned int conn_idx, u64 host_id) +{ + struct bh_session_record *session; + + session = bh_session_find(conn_idx, host_id); + + if (session) { + list_del(&session->link); + kfree(session); + } +} + +static char skip_buffer[DAL_MAX_BUFFER_SIZE] = {0}; +/** + * bh_transport_recv - receive message from DAL FW, using kdi callback + * 'dal_kdi_recv' + * + * @conn_idx: fw client connection idx + * @buffer: output buffer to hold the received message + * @size: output buffer size + * + * Return: 0 on success + * <0 on failure + */ +static int bh_transport_recv(unsigned int conn_idx, void *buffer, size_t size) +{ + size_t got; + unsigned int count; + char *buf = buffer; + int ret; + + if (conn_idx > DAL_MEI_DEVICE_MAX) + return -ENODEV; + + for (count = 0; count < size; count += got) { + got = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); + if (buf) + ret = dal_kdi_recv(conn_idx, buf + count, &got); + else + ret = dal_kdi_recv(conn_idx, skip_buffer, &got); + + if (!got) + return -EFAULT; + + if (ret) + return ret; + } + + if (count != size) + return -EFAULT; + + return 0; +} + +/** + * bh_transport_send - send message to DAL FW, + * using kdi callback 'dal_kdi_send' + * + * @conn_idx: fw client connection idx + * @buffer: message to send + * @size: message size + * @host_id: message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_transport_send(unsigned int conn_idx, const void *buffer, + unsigned int size, u64 host_id) +{ + size_t chunk_sz; + unsigned int count; + const char *buf = buffer; + int ret; + + if (conn_idx > DAL_MEI_DEVICE_MAX) + return -ENODEV; + + for (count = 0; count < size; count += chunk_sz) { + chunk_sz = min_t(size_t, size - count, DAL_MAX_BUFFER_SIZE); + ret = dal_kdi_send(conn_idx, buf + count, chunk_sz, host_id); + if (ret) + return ret; + } + + return 0; +} + +/** + * bh_send_message - build and send command message to DAL + * + * @conn_idx: fw client connection idx + * @hdr: command header + * @hdr_len: command header length + * @data: command data (message content) + * @data_len: data length + * @host_id: message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_send_message(unsigned int conn_idx, + void *hdr, unsigned int hdr_len, + const void *data, unsigned int data_len, + u64 host_id) +{ + int ret; + struct bh_command_header *h; + + if (hdr_len < sizeof(*h) || !hdr) + return -EINVAL; + + h = hdr; + h->h.magic = BH_MSG_CMD_MAGIC; + h->h.length = hdr_len + data_len; + h->seq = host_id; + + ret = bh_transport_send(conn_idx, hdr, hdr_len, host_id); + if (!ret && data_len > 0) + ret = bh_transport_send(conn_idx, data, data_len, host_id); + + return ret; +} + +/** + * bh_recv_message - receive and prosses message from DAL + * + * @conn_idx: fw client connection idx + * @response: output param to hold the response + * @out_host_id: output param to hold the received message host id + * it should be identical to the sent message host id + * + * Return: 0 on success + * <0 on failure + */ +static int bh_recv_message(unsigned int conn_idx, void **response, + u64 *out_host_id) +{ + int ret; + char *data; + struct bh_response_header hdr; + + if (!response) + return -EINVAL; + + *response = NULL; + + memset(&hdr, 0, sizeof(hdr)); + ret = bh_transport_recv(conn_idx, &hdr, sizeof(hdr)); + if (ret) + return ret; + + if (hdr.h.length < sizeof(hdr)) + return -EBADMSG; + + /* check magic */ + if (hdr.h.magic != BH_MSG_RESP_MAGIC) + return -EBADMSG; + + data = kzalloc(hdr.h.length, GFP_KERNEL); + if (!data) + return -ENOMEM; + + memcpy(data, &hdr, sizeof(hdr)); + + /* message contains hdr only */ + if (hdr.h.length == sizeof(hdr)) + goto out; + + ret = bh_transport_recv(conn_idx, data + sizeof(hdr), + hdr.h.length - sizeof(hdr)); +out: + if (out_host_id) + *out_host_id = hdr.seq; + + *response = data; + + return ret; +} + +#define MAX_RETRY_COUNT 3 +/** + * bh_request - send request to DAL FW and receive response back + * + * @conn_idx: fw client connection idx + * @cmd_hdr: command header + * @cmd_hdr_len: command header length + * @cmd_data: command data (message content) + * @cmd_data_len: data length + * @host_id: message host id + * @response: output param to hold the response + * + * Return: 0 on success + * <0 on failure + */ +int bh_request(unsigned int conn_idx, void *cmd_hdr, unsigned int cmd_hdr_len, + const void *cmd_data, unsigned int cmd_data_len, + u64 host_id, void **response) +{ + int ret; + u32 retry_count; + u64 res_host_id; + + if (!cmd_hdr || !response) + return -EINVAL; + + ret = bh_send_message(conn_idx, cmd_hdr, cmd_hdr_len, cmd_data, + cmd_data_len, host_id); + if (ret) + return ret; + + for (retry_count = 0; retry_count < MAX_RETRY_COUNT; retry_count++) { + ret = bh_recv_message(conn_idx, response, &res_host_id); + if (ret) { + pr_debug("failed to recv msg = %d\n", ret); + continue; + } + + if (res_host_id != host_id) { + pr_debug("recv message with host_id=%llu != sent host_id=%llu\n", + res_host_id, host_id); + continue; + } + + pr_debug("recv message with try=%d host_id=%llu\n", + retry_count, host_id); + break; + } + + if (retry_count == MAX_RETRY_COUNT) { + pr_err("out of retry attempts\n"); + return -EFAULT; + } + + return ret; +} + +/** + * bh_ession_list_free - free session list of given dal fw client + * + * @conn_idx: fw client connection idx + */ +void bh_session_list_free(unsigned int conn_idx) +{ + struct bh_session_record *pos, *next; + struct list_head *session_list = &dal_dev_session_list[conn_idx]; + + list_for_each_entry_safe(pos, next, session_list, link) { + list_del(&pos->link); + kfree(pos); + } + + INIT_LIST_HEAD(session_list); +} + +/** + * bh_session_list_init - initialize session list of given dal fw client + * + * @conn_idx: fw client connection idx + */ +void bh_session_list_init(unsigned int conn_idx) +{ + INIT_LIST_HEAD(&dal_dev_session_list[conn_idx]); +} + +/** + * bh_proxy_check_svl_jta_blocked_state - check if ta security version + * is blocked + * + * When installing a ta, a minimum security version is given, + * so DAL will block installation of this ta from lower version. + * (even after the ta will be uninstalled) + * + * @ta_id: trusted application (ta) id + * + * Return: 0 when ta security version isn't blocked + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id) +{ + int ret; + struct bh_command_header *h; + struct bh_check_svl_jta_blocked_state_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + u64 host_id; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_check_svl_jta_blocked_state_cmd *)h->cmd; + h->id = BHP_CMD_CHECK_SVL_TA_BLOCKED_STATE; + cmd->ta_id = *ta_id; + + host_id = bh_get_msg_host_id(); + ret = bh_request(CONN_IDX_SDM, h, CMD_BUF_SIZE(*cmd), NULL, 0, + host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_proxy_list_jta_packages - get list of ta packages in DAL + * + * @conn_idx: fw client connection idx + * @count: out param to hold the count of ta packages in DAL + * @ta_ids: out param to hold pointer to the ids of ta packages in DAL + * The buffer which holds the ids is allocated in this function + * and freed by the caller + * + * Return: 0 when ta security version isn't blocked + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_list_jta_packages(unsigned int conn_idx, unsigned int *count, + uuid_t **ta_ids) +{ + int ret; + struct bh_command_header h; + struct bh_response_header *resp_hdr; + unsigned int resp_len; + struct bh_resp_list_ta_packages *resp; + uuid_t *outbuf; + unsigned int i; + u64 host_id; + + memset(&h, 0, sizeof(h)); + resp_hdr = NULL; + + if (!bh_is_initialized()) + return -EFAULT; + + if (!count || !ta_ids) + return -EINVAL; + + *ta_ids = NULL; + *count = 0; + + h.id = BHP_CMD_LIST_TA_PACKAGES; + + host_id = bh_get_msg_host_id(); + ret = bh_request(conn_idx, &h, sizeof(h), NULL, 0, host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + if (ret) + goto out; + + resp_len = resp_hdr->h.length - sizeof(*resp_hdr); + if (resp_len < sizeof(*resp)) { + ret = -EBADMSG; + goto out; + } + + resp = (struct bh_resp_list_ta_packages *)resp_hdr->data; + if (!resp->count) { + ret = -EBADMSG; + goto out; + } + + if (resp_len != sizeof(uuid_t) * resp->count + sizeof(*resp)) { + ret = -EBADMSG; + goto out; + } + + outbuf = kcalloc(resp->count, sizeof(uuid_t), GFP_KERNEL); + + if (!outbuf) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < resp->count; i++) + outbuf[i] = resp->ta_ids[i]; + + *ta_ids = outbuf; + *count = resp->count; + +out: + kfree(resp_hdr); + return ret; +} + +/** + * bh_proxy_dnload_jta - download ta package to DAL + * + * @conn_idx: fw client connection idx + * @ta_id: trusted application (ta) id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, + const char *ta_pkg, unsigned int pkg_len) +{ + struct bh_command_header *h; + struct bh_download_jta_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + u64 host_id; + int ret; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + if (!ta_pkg || !pkg_len) + return -EINVAL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_download_jta_cmd *)h->cmd; + h->id = BHP_CMD_DOWNLOAD_JAVATA; + cmd->ta_id = *ta_id; + + host_id = bh_get_msg_host_id(); + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), ta_pkg, pkg_len, + host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + kfree(resp_hdr); + + return ret; +} + +/** + * bh_proxy_open_jta_session - send open session command + * + * @conn_idx: fw client connection idx + * @ta_id: trusted application (ta) id + * @init_buffer: init parameters to the session (optional) + * @init_len: length of the init parameters + * @host_id: out param to hold the session host id + * @ta_pkg: ta binary package + * @pkg_len: ta binary package length + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int bh_proxy_open_jta_session(unsigned int conn_idx, + uuid_t *ta_id, + const char *init_buffer, + unsigned int init_len, + u64 *host_id, + const char *ta_pkg, + unsigned int pkg_len) +{ + int ret; + struct bh_command_header *h; + struct bh_open_jta_session_cmd *cmd; + char cmdbuf[CMD_BUF_SIZE(*cmd)]; + struct bh_response_header *resp_hdr; + struct bh_session_record *session; + + if (!host_id) + return -EINVAL; + + if (!init_buffer && init_len > 0) + return -EINVAL; + + memset(cmdbuf, 0, sizeof(cmdbuf)); + resp_hdr = NULL; + + h = (struct bh_command_header *)cmdbuf; + cmd = (struct bh_open_jta_session_cmd *)h->cmd; + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + session->host_id = bh_get_msg_host_id(); + bh_session_add(conn_idx, session); + + h->id = BHP_CMD_OPEN_JTASESSION; + cmd->ta_id = *ta_id; + + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, + init_len, session->host_id, (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + + if (ret == BHE_PACKAGE_NOT_FOUND) { + /* + * VM might delete the TA pkg when no live session. + * Download the TA pkg and open session again + */ + ret = bh_proxy_dnload_jta(conn_idx, ta_id, ta_pkg, pkg_len); + if (ret) + goto out_err; + + kfree(resp_hdr); + resp_hdr = NULL; + ret = bh_request(conn_idx, h, CMD_BUF_SIZE(*cmd), init_buffer, + init_len, session->host_id, + (void **)&resp_hdr); + + if (!ret) + ret = resp_hdr->code; + } + + if (ret) + goto out_err; + + session->ta_session_id = resp_hdr->ta_session_id; + + kfree(resp_hdr); + + *host_id = session->host_id; + + return 0; + +out_err: + bh_session_remove(conn_idx, session->host_id); + + return ret; +} diff --git a/drivers/misc/mei/dal/bh_internal.h b/drivers/misc/mei/dal/bh_internal.h new file mode 100644 index 000000000000..b15113986257 --- /dev/null +++ b/drivers/misc/mei/dal/bh_internal.h @@ -0,0 +1,133 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __BH_INTERNAL_H +#define __BH_INTERNAL_H + +#include +#include +#include +#include + +#include "bh_cmd_defs.h" + +/** + * struct bh_session_record - session record + * + * @link: link in dal_dev_session_list of dal fw client + * @host_id: message/session host id + * @ta_session_id: session id + */ +struct bh_session_record { + struct list_head link; + u64 host_id; + u64 ta_session_id; +}; + +/* command buffer size */ +#define CMD_BUF_SIZE(cmd) (sizeof(struct bh_command_header) + sizeof(cmd)) + +/** + * enum bh_connection_index - connection index to dal fw clients + * + * @CONN_IDX_START: start idx + * + * @CONN_IDX_IVM: Intel/Issuer Virtual Machine + * @CONN_IDX_SDM: Security Domain Manager + * @CONN_IDX_LAUNCHER: Run Time Manager (Launcher) + * + * @MAX_CONNECTIONS: max connection idx + */ +enum bh_connection_index { + CONN_IDX_START = 0, + + CONN_IDX_IVM = 0, + CONN_IDX_SDM = 1, + CONN_IDX_LAUNCHER = 2, + + MAX_CONNECTIONS +}; + +u64 bh_get_msg_host_id(void); + +struct bh_session_record *bh_session_find(unsigned int conn_idx, u64 host_id); +void bh_session_list_init(unsigned int conn_idx); +void bh_session_list_free(unsigned int conn_idx); +void bh_session_add(unsigned int conn_idx, struct bh_session_record *session); +void bh_session_remove(unsigned int conn_idx, u64 host_id); + +int bh_request(unsigned int conn_idx, + void *hdr, unsigned int hdr_len, + const void *data, unsigned int data_len, + u64 host_id, void **response); + +int bh_proxy_check_svl_jta_blocked_state(uuid_t *ta_id); + +int bh_proxy_list_jta_packages(unsigned int conn_idx, + unsigned int *count, uuid_t **ta_ids); + +int bh_proxy_dnload_jta(unsigned int conn_idx, uuid_t *ta_id, + const char *ta_pkg, unsigned int pkg_len); + +int bh_proxy_open_jta_session(unsigned int conn_idx, uuid_t *ta_id, + const char *init_buffer, unsigned int init_len, + u64 *host_id, const char *ta_pkg, + unsigned int pkg_len); + +#endif /* __BH_INTERNAL_H */ diff --git a/drivers/misc/mei/dal/dal_cdev.c b/drivers/misc/mei/dal/dal_cdev.c new file mode 100644 index 000000000000..c5d6e967d636 --- /dev/null +++ b/drivers/misc/mei/dal/dal_cdev.c @@ -0,0 +1,300 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dal_dev.h" +#include "dal_cdev.h" + +/* KDI user space devices major and minor numbers */ +static dev_t dal_devt; + +/** + * dal_dev_open - dal cdev open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_dev_open(struct inode *inode, struct file *fp) +{ + int ret; + struct dal_device *ddev; + + ddev = container_of(inode->i_cdev, struct dal_device, cdev); + if (!ddev) + return -ENODEV; + + /* single open */ + if (test_and_set_bit(DAL_DEV_OPENED, &ddev->status)) + return -EBUSY; + + ret = dal_dc_setup(ddev, DAL_INTF_CDEV); + if (ret) + goto err; + + fp->private_data = ddev->clients[DAL_INTF_CDEV]; + + return nonseekable_open(inode, fp); + +err: + clear_bit(DAL_DEV_OPENED, &ddev->status); + return ret; +} + +/** + * dal_dev_release - dal cdev release function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_dev_release(struct inode *inode, struct file *fp) +{ + struct dal_client *dc = fp->private_data; + struct dal_device *ddev = dc->ddev; + + if (mutex_lock_interruptible(&ddev->context_lock)) { + dev_dbg(&ddev->dev, "signal interrupted\n"); + return -ERESTARTSYS; + } + + dal_dc_destroy(ddev, dc->intf); + + mutex_unlock(&ddev->context_lock); + + clear_bit(DAL_DEV_OPENED, &ddev->status); + + return 0; +} + +/** + * dal_dev_read - dal cdev read function + * + * @fp: pointer to file structure + * @buf: pointer to user buffer + * @count: buffer length + * @off: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_dev_read(struct file *fp, char __user *buf, + size_t count, loff_t *off) +{ + struct dal_client *dc = fp->private_data; + struct dal_device *ddev = dc->ddev; + int ret; + size_t len; + unsigned int copied; + + ret = dal_wait_for_read(dc); + + if (ret != 0) + return ret; + + if (kfifo_is_empty(&dc->read_queue)) + return 0; + + ret = kfifo_out(&dc->read_queue, &len, sizeof(len)); + if (len > count) { + dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %zu\n", + len, count); + return -EFAULT; + } + + ret = kfifo_to_user(&dc->read_queue, buf, count, &copied); + if (ret) { + dev_dbg(&ddev->dev, "copy_to_user() failed\n"); + return -EFAULT; + } + + /*FIXME: need to drop rest of the data */ + + return copied; +} + +/** + * dal_dev_write - dal cdev write function + * + * @fp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @off: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_dev_write(struct file *fp, const char __user *buff, + size_t count, loff_t *off) +{ + struct dal_device *ddev; + struct dal_client *dc = fp->private_data; + + ddev = dc->ddev; + + if (count > DAL_MAX_BUFFER_SIZE) { + dev_dbg(&ddev->dev, "count is too big, count = %zu\n", count); + return -EMSGSIZE; + } + + if (count == 0) + return 0; + + if (!buff) + return -EINVAL; + + if (copy_from_user(dc->write_buffer, buff, count)) + return -EFAULT; + + return dal_write(dc, count, 0); +} + +static const struct file_operations mei_dal_fops = { + .owner = THIS_MODULE, + .open = dal_dev_open, + .release = dal_dev_release, + .read = dal_dev_read, + .write = dal_dev_write, + .llseek = no_llseek, +}; + +/** + * dal_dev_del - delete dal cdev + * + * @ddev: dal device + */ +void dal_dev_del(struct dal_device *ddev) +{ + cdev_del(&ddev->cdev); +} + +/** + * dal_dev_setup - initialize dal cdev + * + * @ddev: dal device + */ +void dal_dev_setup(struct dal_device *ddev) +{ + dev_t devno; + + cdev_init(&ddev->cdev, &mei_dal_fops); + devno = MKDEV(MAJOR(dal_devt), ddev->device_id); + ddev->cdev.owner = THIS_MODULE; + ddev->dev.devt = devno; + ddev->cdev.kobj.parent = &ddev->dev.kobj; +} + +/** + * dal_dev_add - add dal cdev + * + * @ddev: dal device + * + * Return: 0 on success + * <0 on failure + */ +int dal_dev_add(struct dal_device *ddev) +{ + return cdev_add(&ddev->cdev, ddev->dev.devt, 1); +} + +/** + * dal_dev_init - allocate dev_t number + * + * Return: 0 on success + * <0 on failure + */ +int __init dal_dev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&dal_devt, 0, DAL_MEI_DEVICE_MAX, "dal"); + if (ret < 0) + pr_err("failed allocate chrdev region = %d\n", ret); + + return ret; +} + +/** + * dal_dev_exit - unregister allocated dev_t number + */ +void dal_dev_exit(void) +{ + unregister_chrdev_region(dal_devt, DAL_MEI_DEVICE_MAX); +} diff --git a/drivers/misc/mei/dal/dal_cdev.h b/drivers/misc/mei/dal/dal_cdev.h new file mode 100644 index 000000000000..16b0cc6fb0a4 --- /dev/null +++ b/drivers/misc/mei/dal/dal_cdev.h @@ -0,0 +1,68 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __MEI_DAL_DEV_H__ +#define __MEI_DAL_DEV_H__ +void dal_dev_del(struct dal_device *ddev); +void dal_dev_setup(struct dal_device *ddev); +int dal_dev_add(struct dal_device *ddev); +int __init dal_dev_init(void); +void dal_dev_exit(void); +#endif /* __MEI_DAL_DEV_H__ */ diff --git a/drivers/misc/mei/dal/dal_class.c b/drivers/misc/mei/dal/dal_class.c new file mode 100644 index 000000000000..af1a1a31e9ef --- /dev/null +++ b/drivers/misc/mei/dal/dal_class.c @@ -0,0 +1,888 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bh_external.h" +#include "bh_cmd_defs.h" +#include "bh_errcode.h" +#include "dal_dev.h" +#include "dal_cdev.h" + +/* + * this class contains the 3 mei_cl_device, ivm, sdm, rtm. + * it is initialized during dal_probe and is used by the kernel space kdi + * to send/recv data to/from mei. + * + * this class must be initialized before the kernel space kdi uses it. + */ +struct class *dal_class; + +/** + * dal_dc_print - print client data for debug purpose + * + * @dev: device structure + * @dc: dal client + */ +void dal_dc_print(struct device *dev, struct dal_client *dc) +{ + if (!dc) { + dev_dbg(dev, "dc is null\n"); + return; + } + + dev_dbg(dev, "dc: intf = %d. expected to send: %d, sent: %d. expected to receive: %d, received: %d\n", + dc->intf, + dc->expected_msg_size_to_fw, + dc->bytes_sent_to_fw, + dc->expected_msg_size_from_fw, + dc->bytes_rcvd_from_fw); +} + +/** + * dal_dc_update_read_state - update client read state + * + * @dc : dal client + * @len: received message length + * + * Locking: called under "ddev->context_lock" lock + */ +static void dal_dc_update_read_state(struct dal_client *dc, ssize_t len) +{ + struct dal_device *ddev = dc->ddev; + + /* check BH msg magic, if it exists this is the header */ + if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { + struct bh_response_header *hdr = + (struct bh_response_header *)dc->ddev->bh_fw_msg.msg; + + dc->expected_msg_size_from_fw = hdr->h.length; + dev_dbg(&ddev->dev, "expected_msg_size_from_fw = %d bytes read = %zd\n", + dc->expected_msg_size_from_fw, len); + + /* clear data from the past. */ + dc->bytes_rcvd_from_fw = 0; + } + + /* update number of bytes rcvd */ + dc->bytes_rcvd_from_fw += len; +} + +/** + * dal_get_client_by_squence_number - find the client interface which + * the received message is sent to + * + * @ddev : dal device + * + * Return: kernel space interface or user space interface + */ +static enum dal_intf dal_get_client_by_squence_number(struct dal_device *ddev) +{ + struct bh_response_header *head; + + if (!ddev->clients[DAL_INTF_KDI]) + return DAL_INTF_CDEV; + + head = (struct bh_response_header *)ddev->bh_fw_msg.msg; + + dev_dbg(&ddev->dev, "msg seq = %llu\n", head->seq); + + if (head->seq == ddev->clients[DAL_INTF_KDI]->seq) + return DAL_INTF_KDI; + + return DAL_INTF_CDEV; +} + +/** + * dal_recv_cb - callback to receive message from DAL FW over mei + * + * @cldev : mei client device + */ +static void dal_recv_cb(struct mei_cl_device *cldev) +{ + struct dal_device *ddev; + struct dal_client *dc; + enum dal_intf intf; + ssize_t len; + ssize_t ret; + bool is_unexpected_msg = false; + + ddev = mei_cldev_get_drvdata(cldev); + + /* + * read the msg from MEI + */ + len = mei_cldev_recv(cldev, ddev->bh_fw_msg.msg, DAL_MAX_BUFFER_SIZE); + if (len < 0) { + dev_err(&cldev->dev, "recv failed %zd\n", len); + return; + } + + /* + * lock to prevent read from MEI while writing to MEI and to + * deal with just one msg at the same time + */ + mutex_lock(&ddev->context_lock); + + /* save msg len */ + ddev->bh_fw_msg.len = len; + + /* set to which interface the msg should be sent */ + if (bh_msg_is_response(ddev->bh_fw_msg.msg, len)) { + intf = dal_get_client_by_squence_number(ddev); + dev_dbg(&ddev->dev, "recv_cb(): Client set by sequence number\n"); + dc = ddev->clients[intf]; + } else if (!ddev->current_read_client) { + intf = DAL_INTF_CDEV; + dev_dbg(&ddev->dev, "recv_cb(): EXTRA msg received - curr == NULL\n"); + dc = ddev->clients[intf]; + is_unexpected_msg = true; + } else { + dc = ddev->current_read_client; + dev_dbg(&ddev->dev, "recv_cb(): FRAGMENT msg received - curr != NULL\n"); + } + + /* save the current read client */ + ddev->current_read_client = dc; + dev_dbg(&cldev->dev, "read client type %d data from mei client seq = %llu\n", + dc->intf, dc->seq); + + /* + * save new msg in queue, + * if the queue is full all new messages will be thrown + */ + ret = kfifo_in(&dc->read_queue, &ddev->bh_fw_msg.len, sizeof(len)); + ret += kfifo_in(&dc->read_queue, ddev->bh_fw_msg.msg, len); + if (ret < len + sizeof(len)) + dev_dbg(&ddev->dev, "queue is full - MSG THROWN\n"); + + dal_dc_update_read_state(dc, len); + + /* + * To clear current client we check if the whole msg received + * for the current client + */ + if (is_unexpected_msg || + dc->bytes_rcvd_from_fw == dc->expected_msg_size_from_fw) { + dev_dbg(&ddev->dev, "recv_cb(): setting CURRENT_READER to NULL\n"); + ddev->current_read_client = NULL; + } + /* wake up all clients waiting for read or write */ + wake_up_interruptible(&ddev->wq); + + mutex_unlock(&ddev->context_lock); + dev_dbg(&cldev->dev, "recv_cb(): unlock\n"); +} + +/** + * dal_mei_enable - enable mei cldev + * + * @ddev: dal device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_mei_enable(struct dal_device *ddev) +{ + int ret; + + ret = mei_cldev_enable(ddev->cldev); + if (ret < 0) { + dev_err(&ddev->cldev->dev, "mei_cl_enable_device() failed with ret = %d\n", + ret); + return ret; + } + + /* save pointer to the context in the device */ + mei_cldev_set_drvdata(ddev->cldev, ddev); + + /* register to mei bus callbacks */ + ret = mei_cldev_register_rx_cb(ddev->cldev, dal_recv_cb); + if (ret) { + dev_err(&ddev->cldev->dev, "mei_cl_register_event_cb() failed ret = %d\n", + ret); + mei_cldev_disable(ddev->cldev); + } + + return ret; +} + +/** + * dal_wait_for_write - wait until the dal client is the first writer + * in writers queue + * + * @ddev: dal device + * @dc: dal client + * + * Return: 0 on success + * -ERESTARTSYS when wait was interrupted + * -ENODEV when the device was removed + */ +static int dal_wait_for_write(struct dal_device *ddev, struct dal_client *dc) +{ + if (wait_event_interruptible(ddev->wq, + list_first_entry(&ddev->writers, + struct dal_client, + wrlink) == dc || + ddev->is_device_removed)) { + return -ERESTARTSYS; + } + + /* if the device was removed indicate that to the caller */ + if (ddev->is_device_removed) + return -ENODEV; + + return 0; +} + +/** + * dal_send_error_access_denied - put 'access denied' message + * into the client read queue. In-band error message. + * + * @dc: dal client + * + * Return: 0 on success + * -ENOMEM when client read queue is full + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_send_error_access_denied(struct dal_client *dc) +{ + struct dal_device *ddev = dc->ddev; + struct bh_response_header res; + size_t len; + int ret; + + mutex_lock(&ddev->context_lock); + + bh_prep_access_denied_response(dc->write_buffer, &res); + len = sizeof(res); + + ret = kfifo_in(&dc->read_queue, &len, sizeof(len)); + if (ret < sizeof(len)) { + ret = -ENOMEM; + goto out; + } + + ret = kfifo_in(&dc->read_queue, &res, len); + if (ret < len) { + ret = -ENOMEM; + goto out; + } + ret = 0; + +out: + mutex_unlock(&ddev->context_lock); + return ret; +} + +/** + * dal_validate_access - validate that the access is permitted. + * + * in case of open session command, validate that the client has the permissions + * to open session to the requested ta + * + * @hdr: command header + * @count: message size + * @ctx: context (not used) + * + * Return: 0 when command is permitted + * -EINVAL when message is invalid + * -EPERM when access is not permitted + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_validate_access(const struct bh_command_header *hdr, + size_t count, void *ctx) +{ + struct dal_client *dc = ctx; + struct dal_device *ddev = dc->ddev; + const uuid_t *ta_id; + + if (!bh_msg_is_cmd_open_session(hdr)) + return 0; + + ta_id = bh_open_session_ta_id(hdr, count); + if (!ta_id) + return -EINVAL; + + return dal_access_policy_allowed(ddev, ta_id, dc); +} + +/** + * dal_is_kdi_msg - check if sequence is in kernel space sequence range + * + * Each interface (kernel space and user space) has different range of + * sequence number. This function checks if given number is in kernel space + * sequence range + * + * @hdr: command header + * + * Return: true when seq fits kernel space intf + * false when seq fits user space intf + */ +static bool dal_is_kdi_msg(const struct bh_command_header *hdr) +{ + return hdr->seq >= MSG_SEQ_START_NUMBER; +} + +/** + * dal_validate_seq - validate that message sequence fits client interface, + * prevent user space client to use kernel space sequence + * + * @hdr: command header + * @count: message size + * @ctx: context - dal client + * + * Return: 0 when sequence match + * -EPERM when user space client uses kernel space sequence + * + * Locking: called under "ddev->write_lock" lock + */ +static int dal_validate_seq(const struct bh_command_header *hdr, + size_t count, void *ctx) +{ + struct dal_client *dc = ctx; + + if (dc->intf != DAL_INTF_KDI && dal_is_kdi_msg(hdr)) + return -EPERM; + + return 0; +} + +/* + * dal_write_filter_tbl - filter functions to validate that the message + * is being sent is valid, and the user client + * has the permissions to send it + */ +static const bh_filter_func dal_write_filter_tbl[] = { + dal_validate_access, + dal_validate_seq, + NULL, +}; + +/** + * dal_write - write message to DAL FW over mei + * + * @dc: dal client + * @count: message size + * @seq: message sequence (if client is kernel space client) + * + * Return: >=0 data length on success + * <0 on failure + */ +ssize_t dal_write(struct dal_client *dc, size_t count, u64 seq) +{ + struct dal_device *ddev = dc->ddev; + struct device *dev; + ssize_t wr; + ssize_t ret; + enum dal_intf intf = dc->intf; + + dev = &ddev->dev; + + dev_dbg(dev, "client interface %d\n", intf); + dal_dc_print(dev, dc); + + /* lock for adding new client that want to write to fifo */ + mutex_lock(&ddev->write_lock); + /* update client on latest msg seq number*/ + dc->seq = seq; + dev_dbg(dev, "current_write_client seq = %llu\n", dc->seq); + + /* put dc in the writers queue if not already */ + if (list_first_entry_or_null(&ddev->writers, + struct dal_client, wrlink) != dc) { + /* adding client to write queue - this is the first fragment */ + const struct bh_command_header *hdr; + + hdr = bh_msg_cmd_hdr(dc->write_buffer, count); + if (!hdr) { + dev_dbg(dev, "expected cmd hdr at first fragment\n"); + ret = -EINVAL; + goto out; + } + ret = bh_filter_hdr(hdr, count, dc, dal_write_filter_tbl); + if (ret == -EPERM) { + ret = dal_send_error_access_denied(dc); + ret = ret ? ret : count; + } + if (ret) + goto out; + + dc->bytes_sent_to_fw = 0; + dc->expected_msg_size_to_fw = hdr->h.length; + + list_add_tail(&dc->wrlink, &ddev->writers); + } + + /* wait for current writer to finish his write session */ + mutex_unlock(&ddev->write_lock); + ret = dal_wait_for_write(ddev, dc); + mutex_lock(&ddev->write_lock); + if (ret < 0) + goto out; + + dev_dbg(dev, "before mei_cldev_send - client type %d\n", intf); + + /* send msg via MEI */ + wr = mei_cldev_send(ddev->cldev, dc->write_buffer, count); + if (wr != count) { + /* ENODEV can be issued upon internal reset */ + if (wr != -ENODEV) { + dev_err(dev, "mei_cl_send() failed, write_bytes != count (%zd != %zu)\n", + wr, count); + ret = -EFAULT; + goto out; + } + /* if DAL FW client is disconnected, try to reconnect */ + dev_dbg(dev, "try to reconnect to DAL FW cl\n"); + ret = mei_cldev_disable(ddev->cldev); + if (ret < 0) { + dev_err(&ddev->cldev->dev, "failed to disable mei cl [%zd]\n", + ret); + goto out; + } + ret = dal_mei_enable(ddev); + if (ret < 0) + dev_err(&ddev->cldev->dev, "failed to reconnect to DAL FW client [%zd]\n", + ret); + else + ret = -EAGAIN; + + goto out; + } + + dev_dbg(dev, "wrote %zu bytes to fw - client type %d\n", wr, intf); + + /* update client byte sent */ + dc->bytes_sent_to_fw += count; + ret = wr; + + if (dc->bytes_sent_to_fw != dc->expected_msg_size_to_fw) { + dev_dbg(dev, "expecting to write more data to DAL FW - client type %d\n", + intf); + goto write_more; + } +out: + /* remove current dc from the queue */ + list_del_init(&dc->wrlink); + if (list_empty(&ddev->writers)) + wake_up_interruptible(&ddev->wq); + +write_more: + mutex_unlock(&ddev->write_lock); + return ret; +} + +/** + * dal_wait_for_read - wait until the client (dc) will have data + * in his read queue + * + * @dc: dal client + * + * Return: 0 on success + * -ENODEV when the device was removed + */ +int dal_wait_for_read(struct dal_client *dc) +{ + struct dal_device *ddev = dc->ddev; + struct device *dev = &ddev->dev; + + dal_dc_print(dev, dc); + + dev_dbg(dev, "before wait_for_data_to_read() - client type %d kfifo status %d\n", + dc->intf, kfifo_is_empty(&dc->read_queue)); + + /* wait until there is data in the read_queue */ + wait_event_interruptible(ddev->wq, !kfifo_is_empty(&dc->read_queue) || + ddev->is_device_removed); + + dev_dbg(dev, "after wait_for_data_to_read() - client type %d\n", + dc->intf); + + /* FIXME: use reference counter */ + if (ddev->is_device_removed) { + dev_dbg(dev, "woke up, device was removed\n"); + return -ENODEV; + } + + return 0; +} + +/** + * dal_dc_destroy - destroy dal client + * + * @ddev: dal device + * @intf: device interface + * + * Locking: called under "ddev->context_lock" lock + */ +void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf) +{ + struct dal_client *dc; + + dc = ddev->clients[intf]; + if (!dc) + return; + + kfifo_free(&dc->read_queue); + kfree(dc); + ddev->clients[intf] = NULL; +} + +/** + * dal_dc_setup - initialize dal client + * + * @ddev: dal device + * @intf: device interface + * + * Return: 0 on success + * -EINVAL when client is already initialized + * -ENOMEM on memory allocation failure + */ +int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf) +{ + int ret; + struct dal_client *dc; + size_t readq_sz; + + if (ddev->clients[intf]) { + dev_err(&ddev->dev, "client already set\n"); + return -EINVAL; + } + + dc = kzalloc(sizeof(*dc), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + /* each buffer contains data and length */ + readq_sz = (DAL_MAX_BUFFER_SIZE + sizeof(ddev->bh_fw_msg.len)) * + DAL_BUFFERS_PER_CLIENT; + ret = kfifo_alloc(&dc->read_queue, readq_sz, GFP_KERNEL); + if (ret) { + kfree(dc); + return ret; + } + + dc->intf = intf; + dc->ddev = ddev; + INIT_LIST_HEAD(&dc->wrlink); + ddev->clients[intf] = dc; + return 0; +} + +/** + * dal_dev_match - match function to find dal device + * + * Used to get dal device from dal_class by device id + * + * @dev: device structure + * @data: the device id + * + * Return: 1 on match + * 0 on mismatch + */ +static int dal_dev_match(struct device *dev, const void *data) +{ + struct dal_device *ddev; + const enum dal_dev_type *device_id = + (enum dal_dev_type *)data; + + ddev = container_of(dev, struct dal_device, dev); + + return ddev->device_id == *device_id; +} + +/** + * dal_find_dev - get dal device from dal_class by device id + * + * @device_id: device id + * + * Return: pointer to the requested device + * NULL if the device wasn't found + */ +struct device *dal_find_dev(enum dal_dev_type device_id) +{ + return class_find_device(dal_class, NULL, &device_id, dal_dev_match); +} + +/** + * dal_remove - dal remove callback in mei_cl_driver + * + * @cldev: mei client device + * + * Return: 0 + */ +static int dal_remove(struct mei_cl_device *cldev) +{ + struct dal_device *ddev = mei_cldev_get_drvdata(cldev); + + if (!ddev) + return 0; + + dal_dev_del(ddev); + + ddev->is_device_removed = true; + /* make sure the above is set */ + smp_mb(); + /* wakeup write waiters so we can unload */ + if (waitqueue_active(&ddev->wq)) + wake_up_interruptible(&ddev->wq); + + device_del(&ddev->dev); + + mei_cldev_set_drvdata(cldev, NULL); + + mei_cldev_disable(cldev); + + put_device(&ddev->dev); + + return 0; +} +/** + * dal_device_release - dal release callback in dev structure + * + * @dev: device structure + */ +static void dal_device_release(struct device *dev) +{ + struct dal_device *ddev = to_dal_device(dev); + + dal_access_list_free(ddev); + kfree(ddev->bh_fw_msg.msg); + kfree(ddev); +} + +/** + * dal_probe - dal probe callback in mei_cl_driver + * + * @cldev: mei client device + * @id: mei client device id + * + * Return: 0 on success + * <0 on failure + */ +static int dal_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct dal_device *ddev; + struct device *pdev = &cldev->dev; + int ret; + + ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); + if (!ddev) + return -ENOMEM; + + /* initialize the mutex and wait queue */ + mutex_init(&ddev->context_lock); + mutex_init(&ddev->write_lock); + init_waitqueue_head(&ddev->wq); + INIT_LIST_HEAD(&ddev->writers); + ddev->cldev = cldev; + ddev->device_id = id->driver_info; + + ddev->dev.parent = pdev; + ddev->dev.class = dal_class; + ddev->dev.release = dal_device_release; + dev_set_name(&ddev->dev, "dal%d", ddev->device_id); + + dal_dev_setup(ddev); + + ret = device_register(&ddev->dev); + if (ret) { + dev_err(pdev, "unable to register device\n"); + goto err; + } + + ddev->bh_fw_msg.msg = kzalloc(DAL_MAX_BUFFER_SIZE, GFP_KERNEL); + if (!ddev->bh_fw_msg.msg) { + ret = -ENOMEM; + goto err; + } + + ret = dal_access_list_init(ddev); + if (ret) + goto err; + + ret = dal_mei_enable(ddev); + if (ret < 0) + goto err; + + ret = dal_dev_add(ddev); + if (ret) + goto err; + + return 0; + +err: + dal_remove(cldev); + + return ret; +} + +/* DAL FW HECI client GUIDs */ +#define IVM_UUID UUID_LE(0x3c4852d6, 0xd47b, 0x4f46, \ + 0xb0, 0x5e, 0xb5, 0xed, 0xc1, 0xaa, 0x44, 0x0e) +#define SDM_UUID UUID_LE(0xdba4d603, 0xd7ed, 0x4931, \ + 0x88, 0x23, 0x17, 0xad, 0x58, 0x57, 0x05, 0xd5) +#define RTM_UUID UUID_LE(0x5565a099, 0x7fe2, 0x45c1, \ + 0xa2, 0x2b, 0xd7, 0xe9, 0xdf, 0xea, 0x9a, 0x2e) + +#define DAL_DEV_ID(__uuid, __device_type) \ + {.uuid = __uuid, \ + .version = MEI_CL_VERSION_ANY, \ + .driver_info = __device_type} + +/* + * dal_device_id - ids of dal FW devices, + * for all 3 dal FW clients (IVM, SDM and RTM) + */ +static const struct mei_cl_device_id dal_device_id[] = { + DAL_DEV_ID(IVM_UUID, DAL_MEI_DEVICE_IVM), + DAL_DEV_ID(SDM_UUID, DAL_MEI_DEVICE_SDM), + DAL_DEV_ID(RTM_UUID, DAL_MEI_DEVICE_RTM), + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, dal_device_id); + +static struct mei_cl_driver dal_driver = { + .id_table = dal_device_id, + .name = KBUILD_MODNAME, + + .probe = dal_probe, + .remove = dal_remove, +}; + +/** + * mei_dal_exit - module exit function + */ +static void __exit mei_dal_exit(void) +{ + mei_cldev_driver_unregister(&dal_driver); + + dal_dev_exit(); + + dal_kdi_exit(); + + class_destroy(dal_class); +} + +/** + * mei_dal_init - module init function + * + * Return: 0 on success + * <0 on failure + */ +static int __init mei_dal_init(void) +{ + int ret; + + dal_class = class_create(THIS_MODULE, "dal"); + if (IS_ERR(dal_class)) { + pr_err("couldn't create class\n"); + return PTR_ERR(dal_class); + } + + ret = dal_dev_init(); + if (ret < 0) + goto err_class; + + ret = dal_kdi_init(); + if (ret) + goto err_dev; + + ret = mei_cldev_driver_register(&dal_driver); + if (ret < 0) { + pr_err("mei_cl_driver_register failed with status = %d\n", ret); + goto err; + } + + return 0; + +err: + dal_kdi_exit(); +err_dev: + dal_dev_exit(); +err_class: + class_destroy(dal_class); + return ret; +} + +module_init(mei_dal_init); +module_exit(mei_dal_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) MEI Dynamic Application Loader (DAL)"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/dal/dal_dev.h b/drivers/misc/mei/dal/dal_dev.h new file mode 100644 index 000000000000..2ddb490ad560 --- /dev/null +++ b/drivers/misc/mei/dal/dal_dev.h @@ -0,0 +1,219 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef _DAL_KDI_H_ +#define _DAL_KDI_H_ + +#include +#include +#include +#include + +#define DAL_MAX_BUFFER_SIZE 4096 +#define DAL_BUFFERS_PER_CLIENT 10 + +#define DAL_CLIENTS_PER_DEVICE 2 + +extern struct class *dal_class; + +/** + * enum dal_intf - dal interface type + * + * @DAL_INTF_KDI: (kdi) kernel space interface + * @DAL_INTF_CDEV: char device interface + */ +enum dal_intf { + DAL_INTF_KDI, + DAL_INTF_CDEV, +}; + +/** + * enum dal_dev_type - devices that are exposed to userspace + * + * @DAL_MEI_DEVICE_IVM: IVM - Intel/Issuer Virtual Machine + * @DAL_MEI_DEVICE_SDM: SDM - Security Domain Manager + * @DAL_MEI_DEVICE_RTM: RTM - Run Time Manager (Launcher) + * + * @DAL_MEI_DEVICE_MAX: max dal device type + */ +enum dal_dev_type { + DAL_MEI_DEVICE_IVM, + DAL_MEI_DEVICE_SDM, + DAL_MEI_DEVICE_RTM, + + DAL_MEI_DEVICE_MAX +}; + +/** + * struct dal_client - host client + * + * @ddev: dal parent device + * @wrlink: link in the writers list + * @read_queue: queue of received messages from DAL FW + * @write_buffer: buffer to send to DAL FW + * @intf: client interface - user space or kernel space + * + * @seq: the sequence number of the last message sent (in kernel space API only) + * When a message is received from DAL FW, we use this sequence number + * to decide which client should get the message. If the sequence + * number of the message is equals to the kernel space sequence number, + * the kernel space client should get the message. + * Otherwise the user space client will get it. + * @expected_msg_size_from_fw: the expected msg size from DALFW + * @expected_msg_size_to_fw: the expected msg size that will be sent to DAL FW + * @bytes_rcvd_from_fw: number of bytes that were received from DAL FW + * @bytes_sent_to_fw: number of bytes that were sent to DAL FW + */ +struct dal_client { + struct dal_device *ddev; + struct list_head wrlink; + struct kfifo read_queue; + char write_buffer[DAL_MAX_BUFFER_SIZE]; + enum dal_intf intf; + + u64 seq; + u32 expected_msg_size_from_fw; + u32 expected_msg_size_to_fw; + u32 bytes_rcvd_from_fw; + u32 bytes_sent_to_fw; +}; + +/** + * struct dal_bh_msg - msg received from DAL FW. + * + * @len: message length + * @msg: message buffer + */ +struct dal_bh_msg { + size_t len; + char *msg; +}; + +/** + * struct dal_device - DAL private device struct. + * each DAL device has a context (i.e IVM, SDM, RTM) + * + * @dev: device on a bus + * @cdev: character device + * @status: dal device status + * + * @context_lock: big device lock + * @write_lock: lock over write list + * @wq: dal clients wait queue. When client wants to send or receive message, + * he waits in this queue until he is ready + * @writers: write pending list + * @clients: clients on this device (userspace and kernel space) + * @bh_fw_msg: message which was received from DAL FW + * @current_read_client: current reading client (which receives message from + * DAL FW) + * + * @cldev: the MEI CL device which corresponds to a single DAL FW HECI client + * + * @is_device_removed: device removed flag + * + * @device_id: DAL device type + */ +struct dal_device { + struct device dev; + struct cdev cdev; +#define DAL_DEV_OPENED 0 + unsigned long status; + + struct mutex context_lock; /* device lock */ + struct mutex write_lock; /* write lock */ + wait_queue_head_t wq; + struct list_head writers; + struct dal_client *clients[DAL_CLIENTS_PER_DEVICE]; + struct dal_bh_msg bh_fw_msg; + struct dal_client *current_read_client; + + struct mei_cl_device *cldev; + + bool is_device_removed; + + int device_id; +}; + +#define to_dal_device(d) container_of(d, struct dal_device, dev) + +ssize_t dal_write(struct dal_client *dc, size_t count, u64 seq); +int dal_wait_for_read(struct dal_client *dc); + +struct device *dal_find_dev(enum dal_dev_type device_id); + +void dal_dc_print(struct device *dev, struct dal_client *dc); +int dal_dc_setup(struct dal_device *ddev, enum dal_intf intf); +void dal_dc_destroy(struct dal_device *ddev, enum dal_intf intf); + +int dal_kdi_send(unsigned int handle, const unsigned char *buf, + size_t len, u64 seq); +int dal_kdi_recv(unsigned int handle, unsigned char *buf, size_t *count); +int dal_kdi_init(void); +void dal_kdi_exit(void); + +int dal_access_policy_add(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +int dal_access_policy_remove(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +int dal_access_policy_allowed(struct dal_device *ddev, + const uuid_t *ta_id, void *owner); +void dal_access_list_free(struct dal_device *ddev); +int dal_access_list_init(struct dal_device *ddev); + +#endif /* _DAL_KDI_H_ */ diff --git a/drivers/misc/mei/dal/dal_kdi.c b/drivers/misc/mei/dal/dal_kdi.c new file mode 100644 index 000000000000..bed43c96b80b --- /dev/null +++ b/drivers/misc/mei/dal/dal_kdi.c @@ -0,0 +1,610 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bh_external.h" +#include "bh_errcode.h" +#include "acp_parser.h" +#include "dal_dev.h" + +static DEFINE_MUTEX(dal_kdi_lock); + +/** + * to_kdi_err- converts error number to kdi error + * + * Beihai errors (>0) are converted to DAL_KDI errors (those errors came + * from DAL FW) + * system errors and success value (<=0) stay as is + * + * @err: error code to convert (either bh err or system err) + * + * Return: the converted kdi error number or system error + */ +static int to_kdi_err(int err) +{ + if (err) + pr_debug("got error: %d\n", err); + + if (err <= 0) + return err; + + /* err > 0: is error from DAL FW */ + switch (err) { + case BPE_INTERNAL_ERROR: + return DAL_KDI_STATUS_INTERNAL_ERROR; + case BPE_INVALID_PARAMS: + case BHE_INVALID_PARAMS: + return DAL_KDI_STATUS_INVALID_PARAMS; + case BHE_INVALID_HANDLE: + return DAL_KDI_STATUS_INVALID_HANDLE; + case BPE_NOT_INIT: + return DAL_KDI_STATUS_NOT_INITIALIZED; + case BPE_OUT_OF_MEMORY: + case BHE_OUT_OF_MEMORY: + return DAL_KDI_STATUS_OUT_OF_MEMORY; + case BHE_INSUFFICIENT_BUFFER: + case BHE_APPLET_SMALL_BUFFER: + return DAL_KDI_STATUS_BUFFER_TOO_SMALL; + case BPE_OUT_OF_RESOURCE: + case BHE_VM_INSTANCE_INIT_FAIL: + return DAL_KDI_STATUS_OUT_OF_RESOURCE; + case BHE_SESSION_NUM_EXCEED: + return DAL_KDI_STATUS_MAX_SESSIONS_REACHED; + case BHE_UNCAUGHT_EXCEPTION: + return DAL_KDI_STATUS_UNCAUGHT_EXCEPTION; + case BHE_WD_TIMEOUT: + return DAL_KDI_STATUS_WD_TIMEOUT; + case BHE_APPLET_CRASHED: + return DAL_KDI_STATUS_APPLET_CRASHED; + case BHE_TA_PACKAGE_HASH_VERIFY_FAIL: + return DAL_KDI_STATUS_INVALID_ACP; + case BHE_PACKAGE_NOT_FOUND: + return DAL_KDI_STATUS_TA_NOT_FOUND; + case BHE_PACKAGE_EXIST: + return DAL_KDI_STATUS_TA_EXIST; + default: + return DAL_KDI_STATUS_INTERNAL_ERROR; + } +} + +/** + * dal_kdi_send - a callback which is called from bhp to send msg over mei + * + * @dev_idx: DAL device type + * @buf: message buffer + * @len: buffer length + * @seq: message sequence + * + * Return: 0 on success + * -EINVAL on incorrect input + * -ENODEV when the device can't be found + * -EFAULT if client is NULL + * <0 on dal_write failure + */ +int dal_kdi_send(unsigned int dev_idx, const unsigned char *buf, + size_t len, u64 seq) +{ + enum dal_dev_type mei_device; + struct dal_device *ddev; + struct dal_client *dc; + struct device *dev; + ssize_t wr; + int ret; + + if (!buf) + return -EINVAL; + + if (dev_idx >= DAL_MEI_DEVICE_MAX) + return -EINVAL; + + if (!len) + return 0; + + if (len > DAL_MAX_BUFFER_SIZE) + return -EMSGSIZE; + + mei_device = (enum dal_dev_type)dev_idx; + dev = dal_find_dev(mei_device); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + return -ENODEV; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + if (!dc) { + dev_dbg(dev, "client is NULL\n"); + ret = -EFAULT; + goto out; + } + + /* copy data to client object */ + memcpy(dc->write_buffer, buf, len); + wr = dal_write(dc, len, seq); + if (wr > 0) + ret = 0; + else + ret = wr; +out: + put_device(dev); + return ret; +} + +/** + * dal_kdi_recv - a callback which is called from bhp to recv msg from DAL FW + * + * @dev_idx: DAL device type + * @buf: buffer of received message + * @count: input and output param - + * - input: buffer length + * - output: size of the received message + * + * Return: 0 on success + * -EINVAL on incorrect input + * -ENODEV when the device can't be found + * -EFAULT when client is NULL or copy failed + * -EMSGSIZE when buffer is too small + * <0 on dal_wait_for_read failure + */ +int dal_kdi_recv(unsigned int dev_idx, unsigned char *buf, size_t *count) +{ + enum dal_dev_type mei_device; + struct dal_device *ddev; + struct dal_client *dc; + struct device *dev; + int ret; + size_t len; + + if (!buf || !count) + return -EINVAL; + + if (dev_idx >= DAL_MEI_DEVICE_MAX) + return -EINVAL; + + mei_device = (enum dal_dev_type)dev_idx; + dev = dal_find_dev(mei_device); + if (!dev) + return -ENODEV; + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + if (!dc) { + dev_dbg(dev, "client is NULL\n"); + ret = -EFAULT; + goto out; + } + + ret = dal_wait_for_read(dc); + + if (ret) + goto out; + + if (kfifo_is_empty(&dc->read_queue)) { + *count = 0; + goto out; + } + + ret = kfifo_out(&dc->read_queue, &len, sizeof(len)); + if (ret != sizeof(len)) { + dev_err(&ddev->dev, "could not copy buffer: cannot fetch size\n"); + ret = -EFAULT; + goto out; + } + + if (len > *count) { + dev_dbg(&ddev->dev, "could not copy buffer: src size = %zd > dest size = %zd\n", + len, *count); + ret = -EMSGSIZE; + goto out; + } + + ret = kfifo_out(&dc->read_queue, buf, len); + if (ret != len) { + dev_err(&ddev->dev, "could not copy buffer: src size = %zd, dest size = %d\n", + len, ret); + ret = -EFAULT; + } + + *count = len; + ret = 0; +out: + put_device(dev); + return ret; +} + +/** + * dal_create_session - create session to an installed trusted application. + * + * @session_handle: output param to hold the session handle + * @ta_id: trusted application (ta) id + * @acp_pkg: acp file of the ta + * @acp_pkg_len: acp file length + * @init_param: init parameters to the session (optional) + * @init_param_len: length of the init parameters + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_create_session(u64 *session_handle, const char *ta_id, + const u8 *acp_pkg, size_t acp_pkg_len, + const u8 *init_param, size_t init_param_len) +{ + struct ac_ins_jta_pack_ext pack; + char *ta_pkg; + int ta_pkg_size; + int ret; + + if (!ta_id || !acp_pkg || !acp_pkg_len || !session_handle) + return -EINVAL; + + /* init_param are optional, if they exists the length shouldn't be 0 */ + if (!init_param && init_param_len != 0) { + pr_debug("INVALID_PARAMS init_param %p init_param_len %zu\n", + init_param, init_param_len); + return -EINVAL; + } + + mutex_lock(&dal_kdi_lock); + + ret = acp_pload_ins_jta(acp_pkg, acp_pkg_len, &pack); + if (ret) { + pr_debug("acp_pload_ins_jta() return %d\n", ret); + goto out; + } + + ta_pkg = pack.ta_pack; + if (!ta_pkg) { + ret = -EINVAL; + goto out; + } + + ta_pkg_size = ta_pkg - (char *)acp_pkg; + + if (ta_pkg_size < 0 || (unsigned int)ta_pkg_size > acp_pkg_len) { + ret = -EINVAL; + goto out; + } + + ta_pkg_size = acp_pkg_len - ta_pkg_size; + + ret = bh_ta_session_open(session_handle, ta_id, ta_pkg, ta_pkg_size, + init_param, init_param_len); + + if (ret) + pr_debug("bh_ta_session_open failed = %d\n", ret); + +out: + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_create_session); + +/** + * dal_send_and_receive - send and receive data to/from ta + * + * @session_handle: session handle + * @command_id: command id + * @input: message to be sent + * @input_len: sent message size + * @output: output param to hold a pointer to the buffer which + * will contain the received message. + * This buffer is allocated by DAL KDI module and freed by the user + * @output_len: input and output param - + * - input: the expected maximum length of the received message + * - output: size of the received message + * @response_code: output param to hold the return value from the applet + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_send_and_receive(u64 session_handle, int command_id, const u8 *input, + size_t input_len, u8 **output, size_t *output_len, + int *response_code) +{ + int ret; + + mutex_lock(&dal_kdi_lock); + + ret = bh_ta_session_command(session_handle, command_id, + input, input_len, + (void **)output, output_len, + response_code); + + if (ret) + pr_debug("bh_ta_session_command failed, status = %d\n", ret); + + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_send_and_receive); + +/** + * dal_close_session - close ta session + * + * @session_handle: session handle + * + * Return: 0 on success + * <0 on system failure + * >0 on DAL FW failure + */ +int dal_close_session(u64 session_handle) +{ + int ret; + + mutex_lock(&dal_kdi_lock); + + ret = bh_ta_session_close(session_handle); + + if (ret) + pr_debug("hp_close_ta_session failed = %d\n", ret); + + mutex_unlock(&dal_kdi_lock); + + return to_kdi_err(ret); +} +EXPORT_SYMBOL(dal_close_session); + +/** + * dal_set_ta_exclusive_access - set client to be owner of the ta, + * so no one else (especially user space client) + * will be able to open session to it + * + * @ta_id: trusted application (ta) id + * + * Return: 0 on success + * -ENODEV when the device can't be found + * -ENOMEM on memory allocation failure + * -EPERM when ta is owned by another client + * -EEXIST when ta is already owned by current client + */ +int dal_set_ta_exclusive_access(const uuid_t *ta_id) +{ + struct dal_device *ddev; + struct device *dev; + struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + + dev = dal_find_dev(DAL_MEI_DEVICE_IVM); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + ret = -ENODEV; + goto unlock; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + + ret = dal_access_policy_add(ddev, ta_id, dc); + + put_device(dev); +unlock: + mutex_unlock(&dal_kdi_lock); + return ret; +} +EXPORT_SYMBOL(dal_set_ta_exclusive_access); + +/** + * dal_unset_ta_exclusive_access - unset client from owning ta + * + * @ta_id: trusted application (ta) id + * + * Return: 0 on success + * -ENODEV when the device can't be found + * -ENOENT when ta isn't found in exclusiveness ta list + * -EPERM when ta is owned by another client + */ +int dal_unset_ta_exclusive_access(const uuid_t *ta_id) +{ + struct dal_device *ddev; + struct device *dev; + struct dal_client *dc; + int ret; + + mutex_lock(&dal_kdi_lock); + + dev = dal_find_dev(DAL_MEI_DEVICE_IVM); + if (!dev) { + dev_dbg(dev, "can't find device\n"); + ret = -ENODEV; + goto unlock; + } + + ddev = to_dal_device(dev); + dc = ddev->clients[DAL_INTF_KDI]; + + ret = dal_access_policy_remove(ddev, ta_id, dc); + + put_device(dev); +unlock: + mutex_unlock(&dal_kdi_lock); + return ret; +} +EXPORT_SYMBOL(dal_unset_ta_exclusive_access); + +#define KDI_MAJOR_VER "1" +#define KDI_MINOR_VER "0" +#define KDI_HOTFIX_VER "0" + +#define KDI_VERSION KDI_MAJOR_VER "." \ + KDI_MINOR_VER "." \ + KDI_HOTFIX_VER + +/** + * dal_get_version_info - return DAL driver version + * + * @version_info: output param to hold DAL driver version information + * + * Return: 0 on success + * -EINVAL on incorrect input + */ +int dal_get_version_info(struct dal_version_info *version_info) +{ + if (!version_info) + return -EINVAL; + + memset(version_info, 0, sizeof(*version_info)); + snprintf(version_info->version, DAL_VERSION_LEN, "%s", KDI_VERSION); + + return 0; +} +EXPORT_SYMBOL(dal_get_version_info); + +/** + * dal_kdi_add_dev - add new dal device (one of dal_dev_type) + * + * @dev: device object which is associated with dal device + * @class_intf: class interface + * + * Return: 0 on success + * <0 on failure + * + * When new dal device is added, a new client is created for + * this device in kernel space interface + */ +static int dal_kdi_add_dev(struct device *dev, + struct class_interface *class_intf) +{ + int ret; + struct dal_device *ddev; + + ddev = to_dal_device(dev); + mutex_lock(&ddev->context_lock); + ret = dal_dc_setup(ddev, DAL_INTF_KDI); + mutex_unlock(&ddev->context_lock); + return ret; +} + +/** + * dal_kdi_rm_dev - rm dal device (one of dal_dev_type) + * + * @dev: device object which is associated with dal device + * @class_intf: class interface + * + * Return: 0 on success + * <0 on failure + */ +static void dal_kdi_rm_dev(struct device *dev, + struct class_interface *class_intf) +{ + struct dal_device *ddev; + + ddev = to_dal_device(dev); + mutex_lock(&ddev->context_lock); + dal_dc_destroy(ddev, DAL_INTF_KDI); + mutex_unlock(&ddev->context_lock); +} + +/* + * dal_kdi_interface handles addition/removal of dal devices + */ +static struct class_interface dal_kdi_interface __refdata = { + .add_dev = dal_kdi_add_dev, + .remove_dev = dal_kdi_rm_dev, +}; + +/** + * dal_kdi_init - initialize dal kdi + * + * Return: 0 on success + * <0 on failure + */ +int dal_kdi_init(void) +{ + int ret; + + bh_init_internal(); + + dal_kdi_interface.class = dal_class; + ret = class_interface_register(&dal_kdi_interface); + if (ret) { + pr_err("failed to register class interface = %d\n", ret); + goto err; + } + + return 0; + +err: + bh_deinit_internal(); + return ret; +} + +/** + * dal_kdi_exit - dal kdi exit function + */ +void dal_kdi_exit(void) +{ + bh_deinit_internal(); + class_interface_unregister(&dal_kdi_interface); +} diff --git a/drivers/misc/mei/dal/dal_ta_access.c b/drivers/misc/mei/dal/dal_ta_access.c new file mode 100644 index 000000000000..1a8f27b6a95d --- /dev/null +++ b/drivers/misc/mei/dal/dal_ta_access.c @@ -0,0 +1,288 @@ +/****************************************************************************** + * Intel mei_dal Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include + +#include +#include "dal_dev.h" + +/* Spooler UUID */ +static const uuid_t spooler_ta_id = UUID_INIT(0xba8d1643, 0x50b6, 0x49cc, + 0x86, 0x1d, 0x2c, 0x01, + 0xbe, 0xd1, 0x4b, 0xe8); + +/** + * struct dal_access_policy - ta access information node + * + * @list: link in access list + * @ta_id: trusted application id + * @owner: owner of ta + */ +struct dal_access_policy { + struct list_head list; + uuid_t ta_id; + void *owner; +}; + +/** + * dal_dev_get_access_list - get access list of dal device + * + * @ddev: dal device + * + * Return: pointer to access list + */ +static struct list_head *dal_dev_get_access_list(struct dal_device *ddev) +{ + return dev_get_drvdata(&ddev->dev); +} + +/** + * dal_access_policy_alloc - allocate memory and initialize access list node + * + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: pointer to the new initialized access list node + * + * Locking: called under "kdi_lock" lock + */ +static struct dal_access_policy * +dal_access_policy_alloc(const uuid_t *ta_id, void *owner) +{ + struct dal_access_policy *e; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return NULL; + + INIT_LIST_HEAD(&e->list); + e->ta_id = *ta_id; + e->owner = owner; + + return e; +} + +/** + * dal_access_policy_find - find ta id in access list + * + * @access_list: access list + * @ta_id: trusted application id + * + * Return: pointer to access list node of ta + * NULL if ta is not found in access list + */ +static struct dal_access_policy * +dal_access_policy_find(struct list_head *access_list, const uuid_t *ta_id) +{ + struct dal_access_policy *e; + + list_for_each_entry(e, access_list, list) { + if (uuid_equal(&e->ta_id, ta_id)) + return e; + } + return NULL; +} + +/** + * dal_access_policy_add - add access information of ta and its owner + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: 0 on success + * -ENOMEM on memory allocation failure + * -EPERM when ta already has another owner + * -EEXIST when access information already exists (same ta and owner) + * + * Locking: called under "kdi_lock" lock + */ +int dal_access_policy_add(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (e) { + if (!e->owner) + return -EPERM; + + return -EEXIST; + } + + e = dal_access_policy_alloc(ta_id, owner); + if (!e) + return -ENOMEM; + + list_add_tail(&e->list, access_list); + return 0; +} + +/** + * dal_access_policy_remove - remove access information of ta and its owner + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner of ta + * + * Return: 0 on success + * -ENOENT when ta isn't found in access list + * -EPERM when ta has another owner + * + * Locking: called under "kdi_lock" lock + */ +int dal_access_policy_remove(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (!e) + return -ENOENT; + + if (!e->owner || e->owner != owner) + return -EPERM; + + list_del(&e->list); + kfree(e); + return 0; +} + +/** + * dal_access_policy_allowed - check if owner is allowed to use ta + * + * @ddev: dal device + * @ta_id: trusted application id + * @owner: owner + * + * Return: 0 on success + * -EPERM when owner is not allowed to use ta + * + * Locking: called under "ddev->write_lock" lock + */ +int dal_access_policy_allowed(struct dal_device *ddev, + const uuid_t *ta_id, void *owner) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e; + + e = dal_access_policy_find(access_list, ta_id); + if (!e) + return 0; + + if (e->owner && e->owner != owner) + return -EPERM; + + return 0; +} + +/** + * dal_access_list_free - free memory of access list + * + * @ddev: dal device + */ +void dal_access_list_free(struct dal_device *ddev) +{ + struct list_head *access_list = dal_dev_get_access_list(ddev); + struct dal_access_policy *e, *n; + + if (!access_list) + return; + + list_for_each_entry_safe(e, n, access_list, list) { + list_del(&e->list); + kfree(e); + } + + kfree(access_list); + dev_set_drvdata(&ddev->dev, NULL); +} + +/** + * dal_access_list_init - initialize an empty access list + * + * @ddev: dal device + * + * Note: Add spooler ta id with blank owner to the list. + * This will prevent any user from setting itself owner of the spooler, + * which will block others from openning session to it. + * + * Return: 0 on success + * -ENOMEM on memory allocation failure + */ +int dal_access_list_init(struct dal_device *ddev) +{ + struct list_head *access_list; + + access_list = kzalloc(sizeof(*access_list), GFP_KERNEL); + if (!access_list) + return -ENOMEM; + + INIT_LIST_HEAD(access_list); + dev_set_drvdata(&ddev->dev, access_list); + + /* Nobody can own SPOOLER TA */ + dal_access_policy_add(ddev, &spooler_ta_id, NULL); + + return 0; +} diff --git a/drivers/misc/mei/dal/dal_test.c b/drivers/misc/mei/dal/dal_test.c new file mode 100644 index 000000000000..baa3decab59c --- /dev/null +++ b/drivers/misc/mei/dal/dal_test.c @@ -0,0 +1,829 @@ +/****************************************************************************** + * Intel dal test Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "uapi/kdi_cmd_defs.h" +#define KDI_MODULE "mei_dal" + +/** + * this is the max data size possible: + * there is no actually max size for acp file, + * but for testing 512k is good enough + */ +#define MAX_DATA_SIZE SZ_512K + +#define KDI_TEST_OPENED 0 + +/** + * struct dal_test_data - dal test cmd and response data + * + * @cmd_data_size: size of cmd got from user space + * @cmd_data: the cmd got from user space + * @cmd_lock: protects cmd_data buffer + * + * @resp_data_size: size of response from kdi + * @resp_data: the response from kdi + * @resp_lock: protects resp_data buffer + */ +struct dal_test_data { + u32 cmd_data_size; + u8 *cmd_data; + struct mutex cmd_lock; /* protects cmd_data buffer */ + + u32 resp_data_size; + u8 *resp_data; + struct mutex resp_lock; /* protects resp_data buffer */ +}; + +/** + * struct dal_test_device - dal test private data + * + * @dev: the device structure + * @cdev: character device + * + * @kdi_test_status: status of test module + * @data: cmd and response data + */ +static struct dal_test_device { + struct device *dev; + struct cdev cdev; + + unsigned long kdi_test_status; + struct dal_test_data *data; +} dal_test_dev; + +#ifdef CONFIG_MODULES +/** + * dal_test_find_module - find the given module + * + * @mod_name: the module name to find + * + * Return: pointer to the module if it is found + * NULL otherwise + */ +static struct module *dal_test_find_module(const char *mod_name) +{ + struct module *mod; + + mutex_lock(&module_mutex); + mod = find_module(mod_name); + mutex_unlock(&module_mutex); + + return mod; +} + +/** + * dal_test_load_kdi - load kdi module + * + * @dev: dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_load_kdi(struct dal_test_device *dev) +{ + struct module *mod; + + /* load KDI if it wasn't loaded */ + request_module(KDI_MODULE); + + mod = dal_test_find_module(KDI_MODULE); + if (!mod) { + dev_err(dev->dev, "failed to find KDI module: %s\n", + KDI_MODULE); + return -ENODEV; + } + + if (!try_module_get(mod)) { + dev_err(dev->dev, "failed to get KDI module\n"); + return -EFAULT; + } + + return 0; +} + +/** + * dal_test_unload_kdi - unload kdi module + * + * @dev: dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_unload_kdi(struct dal_test_device *dev) +{ + struct module *mod; + + mod = dal_test_find_module(KDI_MODULE); + if (!mod) { + dev_err(dev->dev, "failed to find KDI module: %s\n", + KDI_MODULE); + return -ENODEV; + } + module_put(mod); + + return 0; +} +#else +static inline int dal_test_load_kdi(struct dal_test_device *dev) { return 0; } +static inline int dal_test_unload_kdi(struct dal_test_device *dev) { return 0; } +#endif + +/** + * dal_test_result_set - set data to the result buffer + * + * @test_data: test command and response buffers + * @data: new data + * @size: size of the data buffer + */ +static void dal_test_result_set(struct dal_test_data *test_data, + void *data, u32 size) +{ + memcpy(test_data->resp_data, data, size); + test_data->resp_data_size = size; +} + +/** + * dal_test_result_append - append data to the result buffer + * + * @test_data: test command and response buffers + * @data: new data + * @size: size of the data buffer + */ +static void dal_test_result_append(struct dal_test_data *test_data, + void *data, u32 size) +{ + size_t offset = test_data->resp_data_size; + + memcpy(test_data->resp_data + offset, data, size); + test_data->resp_data_size += size; +} + +/** + * dal_test_send_and_recv - call send and receive function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_send_and_recv(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct send_and_rcv_cmd *cmd; + struct send_and_rcv_resp resp; + ssize_t data_size; + size_t output_len; + s32 response_code; + u8 *input; + u8 *output; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct send_and_rcv_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + if (data_size < 0) { + dev_dbg(dev->dev, "malformed command struct: data_size = %zu\n", + data_size); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + response_code = 0; + output = NULL; + input = (data_size) ? cmd->input : NULL; + output_len = (cmd->is_output_len_ptr) ? cmd->output_buf_len : 0; + + dev_dbg(dev->dev, "call dal_send_and_receive: handle=%llu command_id=%d input_len=%zd\n", + cmd->session_handle, cmd->command_id, data_size); + + status = dal_send_and_receive(cmd->session_handle, cmd->command_id, + input, data_size, + cmd->is_output_buf ? &output : NULL, + cmd->is_output_len_ptr ? + &output_len : NULL, + cmd->is_response_code_ptr ? + &response_code : NULL); + + dev_dbg(dev->dev, "dal_send_and_receive return: status=%d output_len=%zu response_code=%d\n", + status, output_len, response_code); + + resp.output_len = (u32)output_len; + resp.response_code = response_code; + resp.status = status; + resp.test_mod_status = 0; + + /* in case the call failed we don't copy the data */ + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + if (output && resp.output_len) + dal_test_result_append(t_data, output, resp.output_len); + mutex_unlock(&t_data->resp_lock); + + kfree(output); +} + +/** + * dal_test_create_session - call create session function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_create_session(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct session_create_cmd *cmd; + struct session_create_resp resp; + u32 data_size; + u64 handle; + char *app_id; + u8 *acp_pkg; + u8 *init_params; + u32 offset; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct session_create_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + + if (cmd->app_id_len + cmd->acp_pkg_len + cmd->init_param_len != + data_size) { + dev_dbg(dev->dev, "malformed command struct: data_size = %d\n", + data_size); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + handle = 0; + + offset = 0; + app_id = (cmd->app_id_len) ? cmd->data + offset : NULL; + offset += cmd->app_id_len; + + acp_pkg = (cmd->acp_pkg_len) ? cmd->data + offset : NULL; + offset += cmd->acp_pkg_len; + + init_params = (cmd->init_param_len) ? cmd->data + offset : NULL; + offset += cmd->init_param_len; + + dev_dbg(dev->dev, "call dal_create_session params: app_id = %s, app_id len = %d, acp pkg len = %d, init params len = %d\n", + app_id, cmd->app_id_len, cmd->acp_pkg_len, cmd->init_param_len); + + status = dal_create_session(cmd->is_session_handle_ptr ? + &handle : NULL, + app_id, acp_pkg, + cmd->acp_pkg_len, + init_params, + cmd->init_param_len); + dev_dbg(dev->dev, "dal_create_session return: status = %d, handle = %llu\n", + status, handle); + + resp.session_handle = handle; + resp.status = status; + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_close_session - call close session function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_close_session(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct session_close_cmd *cmd; + struct session_close_resp resp; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct session_close_cmd *)t_cmd->data; + if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + resp.status = dal_close_session(cmd->session_handle); + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_version_info - call get version function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + */ +static void dal_test_version_info(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data) +{ + struct version_get_info_cmd *cmd; + struct version_get_info_resp resp; + struct dal_version_info *version; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct version_get_info_cmd *)t_cmd->data; + if (t_data->cmd_data_size != sizeof(t_cmd->cmd_id) + sizeof(*cmd)) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + version = (cmd->is_version_ptr) ? + (struct dal_version_info *)resp.kdi_version : NULL; + + resp.status = dal_get_version_info(version); + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_set_ex_access - call set/remove access function of kdi + * + * @dev: dal test device + * @t_cmd: the command to send kdi + * @t_data: test command and response buffers + * @set_access: true when calling set access function + * false when calling remove access function + */ +static void dal_test_set_ex_access(struct dal_test_device *dev, + struct kdi_test_command *t_cmd, + struct dal_test_data *t_data, + bool set_access) +{ + struct ta_access_set_remove_cmd *cmd; + struct ta_access_set_remove_resp resp; + u32 data_size; + uuid_t app_uuid; + char *app_id; + s32 status; + + memset(&resp, 0, sizeof(resp)); + + cmd = (struct ta_access_set_remove_cmd *)t_cmd->data; + data_size = t_data->cmd_data_size - sizeof(t_cmd->cmd_id) - + sizeof(*cmd); + + if (cmd->app_id_len != data_size) { + dev_dbg(dev->dev, "malformed command struct\n"); + resp.test_mod_status = -EINVAL; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); + return; + } + + app_id = (cmd->app_id_len) ? cmd->data : NULL; + + status = dal_uuid_parse(app_id, &app_uuid); + if (status < 0) + goto out; + + if (set_access) + status = dal_set_ta_exclusive_access(&app_uuid); + else + status = dal_unset_ta_exclusive_access(&app_uuid); + +out: + resp.status = status; + resp.test_mod_status = 0; + + mutex_lock(&t_data->resp_lock); + dal_test_result_set(t_data, &resp, sizeof(resp)); + mutex_unlock(&t_data->resp_lock); +} + +/** + * dal_test_kdi_command - parse and invoke the requested command + * + * @dev: dal test device + */ +static void dal_test_kdi_command(struct dal_test_device *dev) +{ + struct dal_test_data *test_data; + struct kdi_test_command *cmd; + s32 status; + + test_data = dev->data; + cmd = (struct kdi_test_command *)test_data->cmd_data; + + if (test_data->cmd_data_size < sizeof(cmd->cmd_id)) { + dev_dbg(dev->dev, "malformed command struct\n"); + status = -EINVAL; + goto prep_err_test_mod; + } + + switch (cmd->cmd_id) { + case KDI_SESSION_CREATE: { + dev_dbg(dev->dev, "KDI_CREATE_SESSION[%d]\n", cmd->cmd_id); + dal_test_create_session(dev, cmd, test_data); + break; + } + case KDI_SESSION_CLOSE: { + dev_dbg(dev->dev, "KDI_CLOSE_SESSION[%d]\n", cmd->cmd_id); + dal_test_close_session(dev, cmd, test_data); + break; + } + case KDI_SEND_AND_RCV: { + dev_dbg(dev->dev, "KDI_SEND_AND_RCV[%d]\n", cmd->cmd_id); + dal_test_send_and_recv(dev, cmd, test_data); + break; + } + case KDI_VERSION_GET_INFO: { + dev_dbg(dev->dev, "KDI_GET_VERSION_INFO[%d]\n", cmd->cmd_id); + dal_test_version_info(dev, cmd, test_data); + break; + } + case KDI_EXCLUSIVE_ACCESS_SET: + case KDI_EXCLUSIVE_ACCESS_REMOVE: { + dev_dbg(dev->dev, "KDI_SET_EXCLUSIVE_ACCESS or KDI_REMOVE_EXCLUSIVE_ACCESS[%d]\n", + cmd->cmd_id); + dal_test_set_ex_access(dev, cmd, test_data, + cmd->cmd_id == KDI_EXCLUSIVE_ACCESS_SET); + break; + } + default: + dev_dbg(dev->dev, "unknown command %d\n", cmd->cmd_id); + status = -EINVAL; + goto prep_err_test_mod; + } + + return; + +prep_err_test_mod: + mutex_lock(&test_data->resp_lock); + dal_test_result_set(test_data, &status, sizeof(status)); + mutex_unlock(&test_data->resp_lock); +} + +/** + * dal_test_read - dal test read function + * + * @filp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @offp: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_test_read(struct file *filp, char __user *buff, size_t count, + loff_t *offp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + int ret; + + dev = filp->private_data; + test_data = dev->data; + + mutex_lock(&test_data->resp_lock); + + if (test_data->resp_data_size > count) { + ret = -EMSGSIZE; + goto unlock; + } + + dev_dbg(dev->dev, "copying %d bytes to userspace\n", + test_data->resp_data_size); + if (copy_to_user(buff, test_data->resp_data, + test_data->resp_data_size)) { + dev_dbg(dev->dev, "copy_to_user failed\n"); + ret = -EFAULT; + goto unlock; + } + ret = test_data->resp_data_size; + +unlock: + mutex_unlock(&test_data->resp_lock); + + return ret; +} + +/** + * dal_test_write - dal test write function + * + * @filp: pointer to file structure + * @buff: pointer to user buffer + * @count: buffer length + * @offp: data offset in buffer + * + * Return: >=0 data length on success + * <0 on failure + */ +static ssize_t dal_test_write(struct file *filp, const char __user *buff, + size_t count, loff_t *offp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + + dev = filp->private_data; + test_data = dev->data; + + if (count > MAX_DATA_SIZE) + return -EMSGSIZE; + + mutex_lock(&test_data->cmd_lock); + + if (copy_from_user(test_data->cmd_data, buff, count)) { + mutex_unlock(&test_data->cmd_lock); + dev_dbg(dev->dev, "copy_from_user failed\n"); + return -EFAULT; + } + + test_data->cmd_data_size = count; + dev_dbg(dev->dev, "write %zu bytes\n", count); + + dal_test_kdi_command(dev); + + mutex_unlock(&test_data->cmd_lock); + + return count; +} + +/** + * dal_test_open - dal test open function + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_open(struct inode *inode, struct file *filp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + int ret; + + dev = container_of(inode->i_cdev, struct dal_test_device, cdev); + if (!dev) + return -ENODEV; + + /* single open */ + if (test_and_set_bit(KDI_TEST_OPENED, &dev->kdi_test_status)) + return -EBUSY; + + test_data = kzalloc(sizeof(*test_data), GFP_KERNEL); + if (!test_data) { + ret = -ENOMEM; + goto err_clear_bit; + } + + test_data->cmd_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); + test_data->resp_data = kzalloc(MAX_DATA_SIZE, GFP_KERNEL); + if (!test_data->cmd_data || !test_data->resp_data) { + ret = -ENOMEM; + goto err_free; + } + + mutex_init(&test_data->cmd_lock); + mutex_init(&test_data->resp_lock); + + ret = dal_test_load_kdi(dev); + if (ret) + goto err_free; + + dev->data = test_data; + filp->private_data = dev; + + return nonseekable_open(inode, filp); + +err_free: + kfree(test_data->cmd_data); + kfree(test_data->resp_data); + kfree(test_data); + +err_clear_bit: + clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); + + return ret; +} + +/** + * dal_test_release - dal test release function + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Return: 0 on success + * <0 on failure + */ +static int dal_test_release(struct inode *inode, struct file *filp) +{ + struct dal_test_device *dev; + struct dal_test_data *test_data; + + dev = filp->private_data; + if (!dev) + return -ENODEV; + + dal_test_unload_kdi(dev); + + test_data = dev->data; + if (test_data) { + kfree(test_data->cmd_data); + kfree(test_data->resp_data); + kfree(test_data); + } + + clear_bit(KDI_TEST_OPENED, &dev->kdi_test_status); + + filp->private_data = NULL; + + return 0; +} + +static const struct file_operations dal_test_fops = { + .owner = THIS_MODULE, + .open = dal_test_open, + .release = dal_test_release, + .read = dal_test_read, + .write = dal_test_write, + .llseek = no_llseek, +}; + +/** + * dal_test_exit - destroy dal test device + */ +static void __exit dal_test_exit(void) +{ + struct dal_test_device *dev = &dal_test_dev; + struct class *dal_test_class; + static dev_t devt; + + dal_test_class = dev->dev->class; + devt = dev->dev->devt; + + cdev_del(&dev->cdev); + unregister_chrdev_region(devt, MINORMASK); + device_destroy(dal_test_class, devt); + class_destroy(dal_test_class); +} + +/** + * dal_test_init - initiallize dal test device + * + * Return: 0 on success + * <0 on failure + */ +static int __init dal_test_init(void) +{ + struct dal_test_device *dev = &dal_test_dev; + struct class *dal_test_class; + static dev_t devt; + int ret; + + ret = alloc_chrdev_region(&devt, 0, 1, "mei_dal_test"); + if (ret) + return ret; + + dal_test_class = class_create(THIS_MODULE, "mei_dal_test"); + if (IS_ERR(dal_test_class)) { + ret = PTR_ERR(dal_test_class); + dal_test_class = NULL; + goto err_unregister_cdev; + } + + dev->dev = device_create(dal_test_class, NULL, devt, dev, "dal_test0"); + if (IS_ERR(dev->dev)) { + ret = PTR_ERR(dev->dev); + goto err_class_destroy; + } + + cdev_init(&dev->cdev, &dal_test_fops); + dev->cdev.owner = THIS_MODULE; + ret = cdev_add(&dev->cdev, devt, 1); + if (ret) + goto err_device_destroy; + + return 0; + +err_device_destroy: + device_destroy(dal_test_class, devt); +err_class_destroy: + class_destroy(dal_test_class); +err_unregister_cdev: + unregister_chrdev_region(devt, 1); + + return ret; +} + +module_init(dal_test_init); +module_exit(dal_test_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) DAL test"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h b/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h new file mode 100644 index 000000000000..ed7b8b707b73 --- /dev/null +++ b/drivers/misc/mei/dal/uapi/kdi_cmd_defs.h @@ -0,0 +1,230 @@ +/****************************************************************************** + * Intel mei_dal test Linux driver + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Corporation. + * linux-mei@linux.intel.com + * http://www.intel.com + * + * BSD LICENSE + * + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef KDI_CMD_DEFS_H +#define KDI_CMD_DEFS_H + +/** + * enum kdi_command_id - cmd id to invoke in kdi module + * + * @KDI_SESSION_CREATE: call kdi "create session" function + * @KDI_SESSION_CLOSE: call kdi "close session" function + * @KDI_SEND_AND_RCV: call kdi "send and receive" function + * @KDI_VERSION_GET_INFO: call kdi "get version" function + * @KDI_EXCLUSIVE_ACCESS_SET: call kdi "set exclusive access" function + * @KDI_EXCLUSIVE_ACCESS_REMOVE: call kdi "unset exclusive access" function + */ +enum kdi_command_id { + KDI_SESSION_CREATE, + KDI_SESSION_CLOSE, + KDI_SEND_AND_RCV, + KDI_VERSION_GET_INFO, + KDI_EXCLUSIVE_ACCESS_SET, + KDI_EXCLUSIVE_ACCESS_REMOVE +}; + +/** + * struct kdi_test_command - contains the command received from user space + * + * @cmd_id: the command id + * @data: the command data + */ +struct kdi_test_command { + __u8 cmd_id; + unsigned char data[0]; +} __packed; + +/** + * struct session_create_cmd - create session cmd data + * + * @app_id_len: length of app_id arg + * @acp_pkg_len: length of the acp_pkg arg + * @init_param_len: length of init param arg + * @is_session_handle_ptr: either send kdi a valid ptr to hold the + * session handle or NULL + * @data: buffer to hold the cmd arguments + */ +struct session_create_cmd { + __u32 app_id_len; + __u32 acp_pkg_len; + __u32 init_param_len; + __u8 is_session_handle_ptr; + unsigned char data[0]; +} __packed; + +/** + * struct session_create_resp - create session response + * + * @session_handle: the session handle + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct session_create_resp { + __u64 session_handle; + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct session_close_cmd - close session cmd + * + * @session_handle: the session handle to close + */ +struct session_close_cmd { + __u64 session_handle; +} __packed; + +/** + * struct session_close_resp - close session response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct session_close_resp { + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct send_and_rcv_cmd - send and receive cmd + * + * @session_handle: the session handle + * @command_id: the cmd id to send the applet + * @output_buf_len: the size of the output buffer + * @is_output_buf: either send kdi a valid ptr to hold the output buffer or NULL + * @is_output_len_ptr: either send kdi a valid ptr to hold + * the output len or NULL + * @is_response_code_ptr: either send kdi a valid ptr to hold + * the applet response code or NULL + * @input: the input data to send the applet + */ +struct send_and_rcv_cmd { + __u64 session_handle; + __u32 command_id; + __u32 output_buf_len; + __u8 is_output_buf; + __u8 is_output_len_ptr; + __u8 is_response_code_ptr; + unsigned char input[0]; +} __packed; + +/** + * struct send_and_rcv_resp - send and receive response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + * @response_code: response code returned from the applet + * @output_len: length of output from the applet + * @output: the output got from the applet + */ +struct send_and_rcv_resp { + __s32 test_mod_status; + __s32 status; + __s32 response_code; + __u32 output_len; + unsigned char output[0]; +} __packed; + +/** + * struct version_get_info_cmd - get version cmd + * + * @is_version_ptr: either send kdi a valid ptr to hold the version info or NULL + */ +struct version_get_info_cmd { + __u8 is_version_ptr; +} __packed; + +/** + * struct version_get_info_resp - get version response + * + * @kdi_version: kdi version + * @reserved: reserved bytes + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct version_get_info_resp { + char kdi_version[32]; + __u32 reserved[4]; + __s32 test_mod_status; + __s32 status; +} __packed; + +/** + * struct ta_access_set_remove_cmd - set/remove access cmd + * + * @app_id_len: length of app_id arg + * @data: the cmd data. contains the app_id + */ +struct ta_access_set_remove_cmd { + __u32 app_id_len; + unsigned char data[0]; +} __packed; + +/** + * struct ta_access_set_remove_resp - set/remove access response + * + * @test_mod_status: status returned from the test module + * @status: status returned from kdi + */ +struct ta_access_set_remove_resp { + __s32 test_mod_status; + __s32 status; +} __packed; + +#endif /* KDI_CMD_DEFS_H */ diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index a617aa5a3ad8..407718faf32f 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -182,6 +182,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, dev->hbm_f_fa_supported); pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n", dev->hbm_f_os_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tDR: %01d\n", + dev->hbm_f_dr_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c new file mode 100644 index 000000000000..f841b9f8c054 --- /dev/null +++ b/drivers/misc/mei/dma-ring.c @@ -0,0 +1,233 @@ +/* + * + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include +#include + +#include "mei_dev.h" + +static int mei_dmam_dscr_alloc(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->size) + return 0; + + if (dscr->vaddr) + return 0; + + dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + GFP_KERNEL); + if (!dscr->vaddr) + return -ENOMEM; + + return 0; +} + +static void mei_dmam_dscr_free(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->vaddr) + return; + + dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dscr->vaddr = NULL; +} + +/** + * mei_dmam_ring_free - free dma ring buffers + * + * @dev: mei device + */ +void mei_dmam_ring_free(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + mei_dmam_dscr_free(dev, &dev->dr_dscr[i]); +} + +/** + * mei_dmam_ring_alloc - allocate dma ring buffers + * + * @dev: mei device + * + * Return: -ENOMEM on allocation failure 0 otherwise + */ +int mei_dmam_ring_alloc(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i])) + goto err; + + return 0; + +err: + mei_dmam_ring_free(dev); + return -ENOMEM; +} + +/** + * mei_dma_ring_is_allocated - check if dma ring is allocated + * + * @dev: mei device + * + * Return: true if dma ring is allocated + */ +bool mei_dma_ring_is_allocated(struct mei_device *dev) +{ + return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; +} + +static inline +struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev) +{ + return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; +} + +void mei_dma_ring_reset(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + + if (!ctrl) + return; + + memset(ctrl, 0, sizeof(*ctrl)); +} + +static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(buf, dbuf + b_offset, b_n); + + return b_n; +} + +static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(hbuf + b_offset, buf, b_n); + + return b_n; +} +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 dbuf_depth; + u32 rd_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2; + rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1); + slots = DIV_ROUND_UP(len, MEI_DMA_SLOT_SIZE); + if (!buf) + goto out; + + if (rd_idx + slots > dbuf_depth) { + buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx); + rem = slots - (dbuf_depth - rd_idx); + rd_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_from(dev, buf, rd_idx, rem); +out: + WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); +} + +static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev) +{ + return dev->dr_dscr[DMA_DSCR_HOST].size >> 2; +} + +/** + * mei_dma_ring_empty_slots - calaculate number of empty slots in dma ring + * + * @dev: mei_device + * + * Return: number of empty slots + */ +u32 mei_dma_ring_empty_slots(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 wr_idx, rd_idx, hbuf_depth, empty; + + if (!mei_dma_ring_is_allocated(dev)) + return 0; + + if (WARN_ON(!ctrl)) + return 0; + + /* easier to work in slots */ + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + rd_idx = READ_ONCE(ctrl->hbuf_rd_idx); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx); + + if (rd_idx > wr_idx) + empty = rd_idx - wr_idx; + else + empty = hbuf_depth - (wr_idx - rd_idx); + + return empty; +} + +/** + * mei_dma_ring_write - write data to dma ring host buffer + * + * @dev: mei_device + * @buf: data will be written + * @len: data length + */ +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 hbuf_depth; + u32 wr_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); + slots = DIV_ROUND_UP(len, MEI_DMA_SLOT_SIZE); + + if (wr_idx + slots > hbuf_depth) { + buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx); + rem = slots - (hbuf_depth - wr_idx); + wr_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_to(dev, buf, wr_idx, rem); + + WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index fe6595fe94f1..81543bfbe194 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -65,6 +65,7 @@ const char *mei_hbm_state_str(enum mei_hbm_state state) MEI_HBM_STATE(IDLE); MEI_HBM_STATE(STARTING); MEI_HBM_STATE(STARTED); + MEI_HBM_STATE(DR_SETUP); MEI_HBM_STATE(ENUM_CLIENTS); MEI_HBM_STATE(CLIENT_PROPERTIES); MEI_HBM_STATE(STOPPED); @@ -131,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) hdr->me_addr = 0; hdr->length = length; hdr->msg_complete = 1; + hdr->dma_ring = 0; hdr->reserved = 0; hdr->internal = 0; } @@ -280,6 +282,49 @@ int mei_hbm_start_req(struct mei_device *dev) return 0; } +/** + * mei_hbm_dma_setup_req - setup DMA request + * + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_dma_setup_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_dma_setup_request req; + const size_t len = sizeof(struct hbm_dma_setup_request); + int i; + int ret; + + mei_hbm_hdr(&mei_hdr, len); + + memset(&req, 0, len); + req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD; + for (i = 0; i < DMA_DSCR_NUM; i++) { + phys_addr_t paddr; + + paddr = dev->dr_dscr[i].daddr; + req.dma_dscr[i].addr_hi = upper_32_bits(paddr); + req.dma_dscr[i].addr_lo = lower_32_bits(paddr); + req.dma_dscr[i].size = dev->dr_dscr[i].size; + } + + mei_dma_ring_reset(dev); + + ret = mei_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_DR_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + /** * mei_hbm_enum_clients_req - sends enumeration client request message. * @@ -992,6 +1037,12 @@ static void mei_hbm_config_features(struct mei_device *dev) /* OS ver message Support */ if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) dev->hbm_f_os_supported = 1; + + /* DMA Ring Support */ + if (dev->version.major_version > HBM_MAJOR_VERSION_DR || + (dev->version.major_version == HBM_MAJOR_VERSION_DR && + dev->version.minor_version >= HBM_MINOR_VERSION_DR)) + dev->hbm_f_dr_supported = 1; } /** @@ -1023,6 +1074,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct hbm_host_version_response *version_res; struct hbm_props_response *props_res; struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; struct hbm_add_client_request *add_cl_req; int ret; @@ -1087,14 +1139,52 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - if (mei_hbm_enum_clients_req(dev)) { - dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); - return -EIO; + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + } } + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); break; + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_DR_SETUP) { + dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; + + if (dma_setup_res->status) { + dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", + dma_setup_res->status, + mei_hbm_status_str(dma_setup_res->status)); + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + } + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + break; + case CLIENT_CONNECT_RES_CMD: dev_dbg(dev->dev, "hbm: client connect response: message received.\n"); mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT); diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h index a2025a5083a3..0171a7e79bab 100644 --- a/drivers/misc/mei/hbm.h +++ b/drivers/misc/mei/hbm.h @@ -26,6 +26,7 @@ struct mei_cl; * * @MEI_HBM_IDLE : protocol not started * @MEI_HBM_STARTING : start request message was sent + * @MEI_HBM_DR_SETUP : dma ring setup request message was sent * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties * @MEI_HBM_STARTED : enumeration was completed @@ -34,6 +35,7 @@ struct mei_cl; enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_STARTING, + MEI_HBM_DR_SETUP, MEI_HBM_ENUM_CLIENTS, MEI_HBM_CLIENT_PROPERTIES, MEI_HBM_STARTED, diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 0ccccbaf530d..79f6dc63449c 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -126,12 +126,24 @@ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ +#define MEI_DEV_ID_DNV 0x19D3 /* Denverton (BXT) */ #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ +#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */ +#define MEI_DEV_ID_CNP_LP_2 0x9DE1 /* Cannon Point LP 2 */ +#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */ +#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ +#define MEI_DEV_ID_CNP_H_2 0xA361 /* Cannon Point H 2 */ +#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ + +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ +#define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */ +#define MEI_DEV_ID_ICP_H 0x3DE0 /* Ice Lake Point H */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 10dcf4ff99a5..d6183537bdf3 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "mei_dev.h" #include "hbm.h" @@ -1375,6 +1376,11 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) .fw_status.status[4] = PCI_CFG_HFS_5, \ .fw_status.status[5] = PCI_CFG_HFS_6 +#define MEI_CFG_DMA_128 \ + .dma_size[DMA_DSCR_HOST] = SZ_128K, \ + .dma_size[DMA_DSCR_DEVICE] = SZ_128K, \ + .dma_size[DMA_DSCR_CTRL] = PAGE_SIZE + /* ICH Legacy devices */ static const struct mei_cfg mei_me_ich_cfg = { MEI_CFG_ICH_HFS, @@ -1407,6 +1413,12 @@ static const struct mei_cfg mei_me_pch8_sps_cfg = { MEI_CFG_FW_SPS, }; +/* Cannon Lake and newer devices */ +static const struct mei_cfg mei_me_pch12_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_DMA_128, +}; + /* * mei_cfg_list - A list of platform platform specific configurations. * Note: has to be synchronized with enum mei_cfg_idx. @@ -1419,6 +1431,7 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, }; const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) @@ -1444,15 +1457,21 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev, { struct mei_device *dev; struct mei_me_hw *hw; + int i; dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) + sizeof(struct mei_me_hw), GFP_KERNEL); if (!dev) return NULL; + hw = to_me_hw(dev); + for (i = 0; i < DMA_DSCR_NUM; i++) + dev->dr_dscr[i].size = cfg->dma_size[i]; + mei_device_init(dev, &pdev->dev, &mei_me_hw_ops); hw->cfg = cfg; + return dev; } diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index 67892533576e..5f0fd8934db7 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -31,10 +31,12 @@ * * @fw_status: FW status * @quirk_probe: device exclusion quirk + * @dma_size: device DMA buffers size */ struct mei_cfg { const struct mei_fw_status fw_status; bool (*quirk_probe)(struct pci_dev *pdev); + size_t dma_size[DMA_DSCR_NUM]; }; @@ -78,6 +80,7 @@ struct mei_me_hw { * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer * servers platforms with quirk for * SPS firmware exclusion. + * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer * @MEI_ME_NUM_CFG: Upper Sentinel. */ enum mei_cfg_idx { @@ -88,6 +91,7 @@ enum mei_cfg_idx { MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH12_CFG, MEI_ME_NUM_CFG, }; diff --git a/drivers/misc/mei/hw-virtio.c b/drivers/misc/mei/hw-virtio.c new file mode 100644 index 000000000000..72386b399d6b --- /dev/null +++ b/drivers/misc/mei/hw-virtio.c @@ -0,0 +1,802 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +/* + * Intel Management Engine Interface (Intel MEI) Linux driver + * Copyright (c) 2018, Intel Corporation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mei_dev.h" +#include "hbm.h" +#include "client.h" + +/* FIXME: move to mei_dev.h */ +static inline u32 mei_len2slots(u32 len) +{ + return DIV_ROUND_UP(len, sizeof(u32)); +} + +/* FIXME: need to adjust backend */ +struct mei_virtio_cfg { + u32 buf_depth; + u8 hw_ready; + u8 host_reset; + u8 reserved[2]; +} __packed; + +struct mei_virtio_hw { + struct virtio_device *vdev; + struct mei_device mdev; + char name[32]; + + struct virtqueue *in; + struct virtqueue *out; + + bool host_ready; + struct work_struct intr_handler; + + /* recv buffer stuff, double/triple buffer? */ + u32 *recv_buf; + size_t recv_sz; + int recv_idx; + u32 recv_len; + bool recv_in_progress; + spinlock_t recv_lock; /* locks receiving buffer */ + + struct mei_virtio_cfg cfg; +}; + +#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev) + +/** + * mei_vritio_fw_status - read status register of HECI + * + * @dev: mei device + * @fw_status: fw status register values + * + * Return: 0 on success, error otherwise + */ +static int mei_vritio_fw_status(struct mei_device *dev, + struct mei_fw_status *fw_status) +{ + int i; + + /* TODO: fake FW status in PV mode */ + fw_status->count = MEI_FW_STATUS_MAX; + for (i = 0; i < MEI_FW_STATUS_MAX; i++) + fw_status->status[i] = 0; + return 0; +} + +/** + * mei_vritio_pg_state - translate internal pg state + * to the mei power gating state + * + * @dev: mei device + * + * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise + */ +static inline enum mei_pg_state mei_vritio_pg_state(struct mei_device *dev) +{ + /* TODO: not support power management in PV mode */ + return MEI_PG_OFF; +} + +/** + * mei_vritio_hw_config - configure hw dependent settings + * + * @dev: mei device + */ +static void mei_vritio_hw_config(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + /* + * hbuf_depth is u8 which limit max depth to 255. + */ + dev->hbuf_depth = min_t(int, hw->cfg.buf_depth, 255); +} + +/** + * mei_vritio_hbuf_empty_slots - counts write empty slots. + * + * @dev: the device structure + * + * Return: always return frontend buf size as we use virtio transfer data + */ +static int mei_vritio_hbuf_empty_slots(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + return hw->cfg.buf_depth; +} + +/** + * mei_vritio_hbuf_is_empty - checks if write buffer is empty. + * + * @dev: the device structure + * + * Return: true always + */ +static bool mei_vritio_hbuf_is_empty(struct mei_device *dev) +{ + /* + * We are using synchronous sending and virtio virtqueue, so treat + * the write buffer always be avaiabled. + */ + return true; +} + +/** + * mei_vritio_hbuf_max_len - returns size of FE write buffer. + * + * @dev: the device structure + * + * Return: size of frontend write buffer in bytes + */ +static size_t mei_vritio_hbuf_max_len(const struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + return mei_slots2msglen(hw->cfg.buf_depth); +} + +/** + * mei_vritio_intr_clear - clear and stop interrupts + * + * @dev: the device structure + */ +static void mei_vritio_intr_clear(struct mei_device *dev) +{ + /* + * In our virtio solution, there are two types of interrupts, + * vq interrupt and config change interrupt. + * 1) start/reset rely on virtio config changed interrupt; + * 2) send/recv rely on virtio virtqueue interrupts. + * They are all virtual interrupts. So, we don't have corresponding + * operation to do here. + */ +} + +/** + * mei_vritio_intr_enable - enables mei BE virtqueues callbacks + * + * @dev: the device structure + */ +static void mei_vritio_intr_enable(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + /* mostly, the interrupts are for read/write FIFO */ + virtqueue_enable_cb(hw->in); + virtqueue_enable_cb(hw->out); +} + +/** + * mei_vritio_intr_disable - disables mei BE virtqueues callbacks + * + * @dev: the device structure + */ +static void mei_vritio_intr_disable(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + /* mostly, the interrupts are for read/write FIFO */ + virtqueue_disable_cb(hw->in); + virtqueue_disable_cb(hw->out); +} + +/** + * mei_vritio_synchronize_irq - wait for pending IRQ handlers for all virtqueue + * + * @dev: the device structure + */ +static void mei_vritio_synchronize_irq(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + /* + * Now, all IRQ handlers are converted to workqueue. + * Change synchronize irq to flush this work. + */ + flush_work(&hw->intr_handler); +} + +/** + * mei_vritio_write_message - writes a message to mei virtio back-end service. + * + * @dev: the device structure + * @hdr: mei HECI header of message + * @buf: message payload will be written + * + * Return: -EIO if write has failed + */ +static int mei_vritio_write_message(struct mei_device *dev, + struct mei_msg_hdr *hdr, + const unsigned char *buf) +{ + /* FIXME: why we need this to be static */ + static struct mei_msg_hdr header; + struct mei_virtio_hw *hw = to_virtio_hw(dev); + unsigned long length = hdr->length; + struct scatterlist sg[2]; + unsigned int len; + const unsigned char *kbuf; + + /* + * Some bufs are on the stack to satisfy scatter list requirement, + * we need make sure they are all continuous in the physical memory. + */ + header = *hdr; + + if (!virt_addr_valid(buf)) + kbuf = kmemdup(buf, length, GFP_KERNEL); + else + kbuf = buf; + + if (!kbuf) + return -ENOMEM; + + dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(hdr)); + + sg_init_table(sg, 2); + sg_set_buf(&sg[0], &header, sizeof(header)); + sg_set_buf(&sg[1], kbuf, length); + + /* cast to drop the const */ + if (!virtqueue_add_outbuf(hw->out, sg, 2, (void *)kbuf, GFP_KERNEL)) { + virtqueue_kick(hw->out); + /* + * Block sending is here. + * The reason is that the kbuf needs to be handled synchronously + * FIXME: Can be optimized later. + * + * Drain the out buffers after BE processing. + */ + while (!virtqueue_get_buf(hw->out, &len) && + !virtqueue_is_broken(hw->out)) + cpu_relax(); + + /* + * schedule_work after synchronous sending complete, we don't + * rely virtqueue's callback here + */ + schedule_work(&hw->intr_handler); + } + + if (kbuf != buf) + kfree(kbuf); + + return 0; +} + +/** + * mei_vritio_count_full_read_slots - counts read full slots. + * + * @dev: the device structure + * + * Return: -EOVERFLOW if overflow, otherwise filled slots count + */ +static int mei_vritio_count_full_read_slots(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + if (hw->recv_idx > hw->recv_len) + return -EOVERFLOW; + + return hw->recv_len - hw->recv_idx; +} + +/** + * mei_vritio_read_hdr - Reads 32bit dword from mei virtio receive buffer + * + * @dev: the device structure + * + * Return: 32bit dword of receive buffer (u32) + */ +static inline u32 mei_vritio_read_hdr(const struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1); + + return hw->recv_buf[hw->recv_idx++]; +} + +static int mei_vritio_read(struct mei_device *dev, unsigned char *buffer, + unsigned long len) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + size_t slots = mei_len2slots(len); + + if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots)) + return -EOVERFLOW; + + /* + * Assumption: There is only one HECI message in recv_buf each time. + * Backend service need follow this rule too. + * + * TODO: use double/triple buffers for recv_buf + */ + memcpy(buffer, hw->recv_buf + hw->recv_idx, len); + hw->recv_idx += slots; + + return 0; +} + +static bool mei_vritio_pg_is_enabled(struct mei_device *dev) +{ + return false; +} + +static bool mei_vritio_pg_in_transition(struct mei_device *dev) +{ + return false; +} + +/** + * mei_vritio_hw_reset - resets virtio hw. + * + * @dev: the device structure + * @intr_enable: virtio use data/config callbacks + * + * Return: 0 on success an error code otherwise + */ +static int mei_vritio_hw_reset(struct mei_device *dev, bool intr_enable) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + dev->recvd_hw_ready = false; + hw->host_ready = false; + hw->cfg.host_reset = 1; + virtio_cwrite(hw->vdev, struct mei_virtio_cfg, + host_reset, &hw->cfg.host_reset); + return 0; +} + +/** + * mei_vritio_hw_ready_wait - wait until the virtio(hw) has turned ready + * or timeout is reached + * + * @dev: mei device + * Return: 0 on success, error otherwise + */ +static int mei_vritio_hw_ready_wait(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + mutex_unlock(&dev->device_lock); + wait_event_timeout(dev->wait_hw_ready, + dev->recvd_hw_ready, + mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + mutex_lock(&dev->device_lock); + if (!dev->recvd_hw_ready) { + dev_err(dev->dev, "wait hw ready failed\n"); + return -ETIMEDOUT; + } + + hw->cfg.host_reset = 0; + virtio_cwrite(hw->vdev, struct mei_virtio_cfg, + host_reset, &hw->cfg.host_reset); + dev->recvd_hw_ready = false; + return 0; +} + +/** + * mei_vritio_hw_start - hw start routine + * + * @dev: mei device + * Return: 0 on success, error otherwise + */ +static int mei_vritio_hw_start(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + int ret; + + ret = mei_vritio_hw_ready_wait(dev); + if (ret) + return ret; + + dev_dbg(dev->dev, "hw is ready\n"); + hw->host_ready = true; + + return 0; +} + +/** + * mei_vritio_hw_is_ready - check whether the BE(hw) has turned ready + * + * @dev: mei device + * Return: bool + */ +static bool mei_vritio_hw_is_ready(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + virtio_cread(hw->vdev, struct mei_virtio_cfg, + hw_ready, &hw->cfg.hw_ready); + return hw->cfg.hw_ready; +} + +/** + * mei_vritio_host_is_ready - check whether the FE has turned ready + * + * @dev: mei device + * Return: bool + */ +static bool mei_vritio_host_is_ready(struct mei_device *dev) +{ + struct mei_virtio_hw *hw = to_virtio_hw(dev); + + return hw->host_ready; +} + +/** + * mei_vritio_data_in - The callback of recv virtqueue of virtio HECI + * + * @vq: receiving virtqueue + */ +static void mei_vritio_data_in(struct virtqueue *vq) +{ + struct mei_virtio_hw *hw = vq->vdev->priv; + void *data; + unsigned int len; + unsigned long flags; + + spin_lock_irqsave(&hw->recv_lock, flags); + if (hw->recv_in_progress) { + spin_unlock_irqrestore(&hw->recv_lock, flags); + return; + } + + /* disable interrupts (enabled again from in the interrupt worker) */ + virtqueue_disable_cb(hw->in); + + hw->recv_in_progress = true; + data = virtqueue_get_buf(hw->in, &len); + if (!data || !len) { + spin_unlock_irqrestore(&hw->recv_lock, flags); + return; + } + WARN_ON(data != hw->recv_buf); + hw->recv_len = mei_len2slots(len); + spin_unlock_irqrestore(&hw->recv_lock, flags); + + schedule_work(&hw->intr_handler); +} + +/** + * mei_vritio_data_out - The callback of send virtqueue of virtio HECI + * + * @vq: transmiting virtqueue + */ +static void mei_vritio_data_out(struct virtqueue *vq) +{ + /* + * As sending is synchronous, we do nothing here for now + */ +} + +static void mei_vritio_add_recv_buf(struct mei_virtio_hw *hw) +{ + struct scatterlist sg; + unsigned long flags; + + /* refill the recv_buf to IN virtqueue to get next message */ + sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth)); + spin_lock_irqsave(&hw->recv_lock, flags); + virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL); + hw->recv_len = 0; + hw->recv_idx = 0; + hw->recv_in_progress = false; + spin_unlock_irqrestore(&hw->recv_lock, flags); + virtqueue_kick(hw->in); +} + +static void mei_vritio_intr_handler(struct work_struct *work) +{ + struct mei_virtio_hw *hw = + container_of(work, struct mei_virtio_hw, intr_handler); + struct mei_device *dev = &hw->mdev; + struct list_head complete_list; + s32 slots; + int rets = 0; + bool in_has_pending = false; + bool has_recv_data = false; + + /* initialize our complete list */ + mutex_lock(&dev->device_lock); + INIT_LIST_HEAD(&complete_list); + + /* check if ME wants a reset */ + if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { + dev_warn(dev->dev, "BE service not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; + } + + /* check if we need to start the dev */ + if (!mei_host_is_ready(dev)) { + if (mei_hw_is_ready(dev)) { + dev_info(dev->dev, "we need to start the dev.\n"); + dev->recvd_hw_ready = true; + wake_up(&dev->wait_hw_ready); + } else { + dev_warn(dev->dev, "Spurious Interrupt\n"); + } + goto end; + } + /* check slots available for reading */ + slots = mei_count_full_read_slots(dev); + while (slots > 0) { + has_recv_data = true; + dev_dbg(dev->dev, "slots to read = %08x\n", slots); + rets = mei_irq_read_handler(dev, &complete_list, &slots); + + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", + rets); + schedule_work(&dev->reset_work); + goto end; + } + } + + /* add the buffer back to the recv virtqueue + * just add after recv is consumed to avoid duplication buffer + */ + if (has_recv_data) + mei_vritio_add_recv_buf(hw); + + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + mei_irq_write_handler(dev, &complete_list); + + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); + + mei_irq_compl_handler(dev, &complete_list); + +end: + in_has_pending = !virtqueue_enable_cb(hw->in); + dev_dbg(dev->dev, "IN queue pending[%d]\n", in_has_pending); + if (in_has_pending) + mei_vritio_data_in(hw->in); + + mutex_unlock(&dev->device_lock); +} + +static void mei_vritio_config_intr(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + + virtio_cread(hw->vdev, struct mei_virtio_cfg, + hw_ready, &hw->cfg.hw_ready); + /* Run intr handler once to handle reset notify */ + schedule_work(&hw->intr_handler); +} + +/* + * There are two virtqueues, one is for send and another is for recv. + */ +static int mei_vritio_init_vqs(struct mei_virtio_hw *hw) +{ + struct virtqueue *vqs[2]; + vq_callback_t *cbs[] = { + mei_vritio_data_in, + mei_vritio_data_out, + }; + static const char * const names[] = { + "in", + "out", + }; + int ret; + + ret = virtio_find_vqs(hw->vdev, 2, vqs, cbs, names, NULL); + if (ret) + return ret; + + hw->in = vqs[0]; + hw->out = vqs[1]; + + return 0; +} + +static const struct mei_hw_ops mei_virtio_ops = { + .fw_status = mei_vritio_fw_status, + .pg_state = mei_vritio_pg_state, + + .host_is_ready = mei_vritio_host_is_ready, + + .hw_is_ready = mei_vritio_hw_is_ready, + .hw_reset = mei_vritio_hw_reset, + .hw_config = mei_vritio_hw_config, + .hw_start = mei_vritio_hw_start, + + .pg_in_transition = mei_vritio_pg_in_transition, + .pg_is_enabled = mei_vritio_pg_is_enabled, + + .intr_clear = mei_vritio_intr_clear, + .intr_enable = mei_vritio_intr_enable, + .intr_disable = mei_vritio_intr_disable, + .synchronize_irq = mei_vritio_synchronize_irq, + + .hbuf_free_slots = mei_vritio_hbuf_empty_slots, + .hbuf_is_ready = mei_vritio_hbuf_is_empty, + .hbuf_max_len = mei_vritio_hbuf_max_len, + + .write = mei_vritio_write_message, + + .rdbuf_full_slots = mei_vritio_count_full_read_slots, + .read_hdr = mei_vritio_read_hdr, + .read = mei_vritio_read, +}; + +static int mei_vritio_probe(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw; + int ret; + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + return -ENOMEM; + + vdev->priv = hw; + hw->vdev = vdev; + + INIT_WORK(&hw->intr_handler, mei_vritio_intr_handler); + + ret = mei_vritio_init_vqs(hw); + if (ret) + goto vqs_failed; + + virtio_cread(vdev, struct mei_virtio_cfg, + buf_depth, &hw->cfg.buf_depth); + + hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL); + if (!hw->recv_buf) { + ret = -ENOMEM; + goto hbuf_failed; + } + spin_lock_init(&hw->recv_lock); + mei_vritio_add_recv_buf(hw); + + virtio_device_ready(vdev); + virtio_config_enable(vdev); + + mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops); + + ret = mei_start(&hw->mdev); + if (ret) + goto mei_start_failed; + + ret = mei_register(&hw->mdev, &vdev->dev); + if (ret) + goto mei_failed; + + pm_runtime_enable(&vdev->dev); + + dev_info(&vdev->dev, "virtio HECI initialization is successful.\n"); + + return 0; + +mei_failed: + mei_stop(&hw->mdev); +mei_start_failed: + mei_cancel_work(&hw->mdev); + mei_disable_interrupts(&hw->mdev); + kfree(hw->recv_buf); +hbuf_failed: + vdev->config->del_vqs(vdev); +vqs_failed: + kfree(hw); + return ret; +} + +static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device) +{ + struct virtio_device *vdev = dev_to_virtio(device); + struct mei_virtio_hw *hw = vdev->priv; + + dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n"); + + if (!hw) + return -ENODEV; + + if (mei_write_is_idle(&hw->mdev)) + pm_runtime_autosuspend(device); + + return -EBUSY; +} + +static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device) +{ + return 0; +} + +static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device) +{ + return 0; +} + +static int __maybe_unused mei_virtio_suspend(struct device *device) +{ + struct virtio_device *vdev = dev_to_virtio(device); + struct mei_virtio_hw *hw = vdev->priv; + + if (!hw) + return -ENODEV; + + dev_dbg(&vdev->dev, "suspend\n"); + + mei_stop(&hw->mdev); + mei_disable_interrupts(&hw->mdev); + return 0; +} + +static int __maybe_unused mei_virtio_resume(struct device *device) +{ + struct virtio_device *vdev = dev_to_virtio(device); + struct mei_virtio_hw *hw = vdev->priv; + int ret; + + if (!hw) + return -ENODEV; + + ret = mei_restart(&hw->mdev); + if (ret) + return ret; + + /* Start timer if stopped in suspend */ + schedule_delayed_work(&hw->mdev.timer_work, HZ); + + return 0; +} + +static const struct dev_pm_ops mei_virtio_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mei_virtio_suspend, + mei_virtio_resume) + SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend, + mei_virtio_pm_runtime_resume, + mei_virtio_pm_runtime_idle) +}; + +static void mei_vritio_remove(struct virtio_device *vdev) +{ + struct mei_virtio_hw *hw = vdev->priv; + + mei_stop(&hw->mdev); + mei_cancel_work(&hw->mdev); + mei_disable_interrupts(&hw->mdev); + mei_deregister(&hw->mdev); + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + kfree(hw->recv_buf); + kfree(hw); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_HECI, VIRTIO_DEV_ANY_ID }, + { } +}; + +static struct virtio_driver mei_virtio_driver = { + .id_table = id_table, + .probe = mei_vritio_probe, + .remove = mei_vritio_remove, + .config_changed = mei_vritio_config_intr, + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .pm = &mei_virtio_pm_ops, + }, +}; + +module_virtio_driver(mei_virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio HECI frontend driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 5c8286b40b62..7ed3fe4dbf52 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -37,7 +37,7 @@ /* * MEI Version */ -#define HBM_MINOR_VERSION 0 +#define HBM_MINOR_VERSION 1 #define HBM_MAJOR_VERSION 2 /* @@ -82,6 +82,12 @@ #define HBM_MINOR_VERSION_OS 0 #define HBM_MAJOR_VERSION_OS 2 +/* + * MEI version with dma ring support + */ +#define HBM_MINOR_VERSION_DR 1 +#define HBM_MAJOR_VERSION_DR 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ @@ -124,6 +130,9 @@ #define MEI_HBM_NOTIFY_RES_CMD 0x90 #define MEI_HBM_NOTIFICATION_CMD 0x11 +#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12 +#define MEI_HBM_DMA_SETUP_RES_CMD 0x92 + /* * MEI Stop Reason * used by hbm_host_stop_request.reason @@ -189,19 +198,29 @@ enum mei_cl_disconnect_status { MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS }; -/* - * MEI BUS Interface Section +/** + * struct mei_msg_hdr - MEI BUS Interface Section + * + * @me_addr: device address + * @host_addr: host address + * @length: message length + * @reserved: reserved + * @dma_ring: message is on dma ring + * @internal: message is internal + * @msg_complete: last packet of the message + * @dr_length: [OPTIONAL] length of the message on dma ring */ struct mei_msg_hdr { u32 me_addr:8; u32 host_addr:8; u32 length:9; - u32 reserved:5; + u32 reserved:4; + u32 dma_ring:1; u32 internal:1; u32 msg_complete:1; + u32 dr_length[0]; } __packed; - struct mei_bus_message { u8 hbm_cmd; u8 data[0]; @@ -451,4 +470,75 @@ struct hbm_notification { u8 reserved[1]; } __packed; +/** + * struct hbm_dma_mem_dscr - dma ring + * + * @addr_hi: the high 32bits of 64 bit address + * @addr_lo: the low 32bits of 64 bit address + * @size : size in bytes + */ +struct hbm_dma_mem_dscr { + u32 addr_hi; + u32 addr_lo; + u32 size; +} __packed; + +enum { + DMA_DSCR_HOST = 0, + DMA_DSCR_DEVICE = 1, + DMA_DSCR_CTRL = 2, + DMA_DSCR_NUM, +}; + +/** + * struct hbm_dma_setup_request - dma setup request + * + * @hbm_cmd: bus message command header + * @reserved: reserved for alignment + * @dma_dscr: dma descriptor for HOST, DEVICE, and CTRL + */ +struct hbm_dma_setup_request { + u8 hbm_cmd; + u8 reserved[3]; + struct hbm_dma_mem_dscr dma_dscr[DMA_DSCR_NUM]; +} __packed; + +/** + * struct hbm_dma_setup_response - dma setup response + * + * @hbm_cmd: bus message command header + * @status: 0 on success; otherwise DMA setup failed. + * @reserved: reserved for alignment + */ +struct hbm_dma_setup_response { + u8 hbm_cmd; + u8 status; + u8 reserved[2]; +} __packed; + +#define MEI_DMA_SLOT_SIZE 4 + +/** + * struct mei_dma_ring_ctrl - dma ring control block + * + * @hbuf_wr_idx: host circular buffer write index in slots + * @reserved1: reserved for alignment + * @hbuf_rd_idx: host circular buffer read index in slots + * @reserved2: reserved for alignment + * @dbuf_wr_idx: device circular buffer write index in slots + * @reserved3: reserved for alignment + * @dbuf_rd_idx: device circular buffer read index in slots + * @reserved4: reserved for alignment + */ +struct hbm_dma_ring_ctrl { + u32 hbuf_wr_idx; + u32 reserved1; + u32 hbuf_rd_idx; + u32 reserved2; + u32 dbuf_wr_idx; + u32 reserved3; + u32 dbuf_rd_idx; + u32 reserved4; +}; + #endif diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index d2f691424dd1..fe89660762ee 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -151,7 +151,7 @@ int mei_reset(struct mei_device *dev) mei_hbm_reset(dev); - dev->rd_msg_hdr = 0; + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); if (ret) { dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index b0b8f18a85e3..8f2ea7f6d7d9 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -75,6 +75,8 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, */ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) { + if (hdr->dma_ring) + mei_dma_ring_read(dev, NULL, hdr->dr_length[0]); /* * no need to check for size as it is guarantied * that length fits into rd_msg_buf @@ -100,6 +102,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_device *dev = cl->dev; struct mei_cl_cb *cb; size_t buf_sz; + u32 length; cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); if (!cb) { @@ -119,25 +122,31 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, goto discard; } - buf_sz = mei_hdr->length + cb->buf_idx; + length = mei_hdr->dma_ring ? mei_hdr->dr_length[0] : mei_hdr->length; + + buf_sz = length + cb->buf_idx; /* catch for integer overflow */ if (buf_sz < cb->buf_idx) { cl_err(dev, cl, "message is too big len %d idx %zu\n", - mei_hdr->length, cb->buf_idx); + length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } if (cb->buf.size < buf_sz) { cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", - cb->buf.size, mei_hdr->length, cb->buf_idx); + cb->buf.size, length, cb->buf_idx); cb->status = -EMSGSIZE; goto discard; } + if (mei_hdr->dma_ring) + mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length); + + /* for DMA read 0 length to generate an interrupt to the device */ mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length); - cb->buf_idx += mei_hdr->length; + cb->buf_idx += length; if (mei_hdr->msg_complete) { cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); @@ -152,6 +161,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl, discard: if (cb) list_move_tail(&cb->list, cmpl_list); + mei_irq_discard_msg(dev, mei_hdr); return 0; } @@ -243,6 +253,9 @@ static inline int hdr_is_valid(u32 msg_hdr) if (!msg_hdr || mei_hdr->reserved) return -EBADMSG; + if (mei_hdr->dma_ring && mei_hdr->length != sizeof(u32)) + return -EBADMSG; + return 0; } @@ -263,20 +276,21 @@ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl *cl; int ret; - if (!dev->rd_msg_hdr) { - dev->rd_msg_hdr = mei_read_hdr(dev); + if (!dev->rd_msg_hdr[0]) { + dev->rd_msg_hdr[0] = mei_read_hdr(dev); (*slots)--; dev_dbg(dev->dev, "slots =%08x.\n", *slots); - ret = hdr_is_valid(dev->rd_msg_hdr); + ret = hdr_is_valid(dev->rd_msg_hdr[0]); if (ret) { dev_err(dev->dev, "corrupted message header 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); goto end; } + dev_dbg(dev->dev, "slots = %08x.\n", *slots); } - mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr; + mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_slots2data(*slots) < mei_hdr->length) { @@ -287,6 +301,11 @@ int mei_irq_read_handler(struct mei_device *dev, goto end; } + if (mei_hdr->dma_ring) { + dev->rd_msg_hdr[1] = mei_read_hdr(dev); + mei_hdr->length = 0; + } + /* HBM message */ if (hdr_is_hbm(mei_hdr)) { ret = mei_hbm_dispatch(dev, mei_hdr); @@ -317,7 +336,7 @@ int mei_irq_read_handler(struct mei_device *dev, goto reset_slots; } dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr); + dev->rd_msg_hdr[0]); ret = -EBADMSG; goto end; } @@ -327,9 +346,8 @@ int mei_irq_read_handler(struct mei_device *dev, reset_slots: /* reset the number of slots and header */ + memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr)); *slots = mei_count_full_read_slots(dev); - dev->rd_msg_hdr = 0; - if (*slots == -EOVERFLOW) { /* overflow - reset */ dev_err(dev->dev, "resetting due to slots overflow.\n"); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index e825f013e54e..22efc039f302 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -507,7 +507,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; default: - dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); rets = -ENOIOCTLCMD; } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index ebcd5132e447..2bba52157663 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -26,7 +26,8 @@ #include "hw.h" #include "hbm.h" -#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) +#define MEI_SLOT_SIZE sizeof(u32) +#define MEI_RD_MSG_BUF_SIZE (128 * MEI_SLOT_SIZE) /* * Number of Maximum MEI Clients @@ -121,6 +122,19 @@ struct mei_msg_data { unsigned char *data; }; +/** + * struct mei_dma_dscr - dma address descriptor + * + * @vaddr: dma buffer virtual address + * @daddr: dma buffer physical address + * @size : dma buffer size + */ +struct mei_dma_dscr { + void *vaddr; + dma_addr_t daddr; + size_t size; +}; + /* Maximum number of processed FW status registers */ #define MEI_FW_STATUS_MAX 6 /* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ @@ -387,6 +401,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * * @hbuf_depth : depth of hardware host/write buffer is slots * @hbuf_is_ready : query if the host host/write buffer is ready + * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL * * @version : HBM protocol version in use * @hbm_f_pg_supported : hbm feature pgi protocol @@ -396,6 +411,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @hbm_f_fa_supported : hbm feature fixed address client * @hbm_f_ie_supported : hbm feature immediate reply to enum request * @hbm_f_os_supported : hbm feature support OS ver message + * @hbm_f_dr_supported : hbm feature dma ring supported * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -457,12 +473,14 @@ struct mei_device { #endif /* CONFIG_PM */ unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; - u32 rd_msg_hdr; + u32 rd_msg_hdr[2]; /* write buffer */ u8 hbuf_depth; bool hbuf_is_ready; + struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM]; + struct hbm_version version; unsigned int hbm_f_pg_supported:1; unsigned int hbm_f_dc_supported:1; @@ -471,6 +489,7 @@ struct mei_device { unsigned int hbm_f_fa_supported:1; unsigned int hbm_f_ie_supported:1; unsigned int hbm_f_os_supported:1; + unsigned int hbm_f_dr_supported:1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; @@ -526,6 +545,18 @@ static inline u32 mei_slots2data(int slots) return slots * 4; } +/** + * mei_slots2mesg - return number of butes available for message data + * + * @slots: number of slots + * + * return: available bytes + */ +static inline size_t mei_slots2msglen(u32 slots) +{ + return slots * MEI_SLOT_SIZE - sizeof(struct mei_msg_hdr); +} + /* * mei init function prototypes */ @@ -538,6 +569,14 @@ int mei_restart(struct mei_device *dev); void mei_stop(struct mei_device *dev); void mei_cancel_work(struct mei_device *dev); +int mei_dmam_ring_alloc(struct mei_device *dev); +void mei_dmam_ring_free(struct mei_device *dev); +bool mei_dma_ring_is_allocated(struct mei_device *dev); +void mei_dma_ring_reset(struct mei_device *dev); +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len); +u32 mei_dma_ring_empty_slots(struct mei_device *dev); + /* * MEI interrupt functions prototype */ @@ -675,10 +714,10 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {} int mei_register(struct mei_device *dev, struct device *parent); void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d" #define MEI_HDR_PRM(hdr) \ (hdr)->host_addr, (hdr)->me_addr, \ - (hdr)->length, (hdr)->internal, (hdr)->msg_complete + (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); /** diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 78b3172c8e6e..2a75dd544a1f 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -92,12 +92,24 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_2, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_H, MEI_ME_PCH12_CFG)}, + /* required last entry */ {0, } }; @@ -238,8 +250,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ mei_me_set_pm_domain(dev); - if (mei_pg_is_enabled(dev)) + if (mei_pg_is_enabled(dev)) { pm_runtime_put_noidle(&pdev->dev); + if (hw->d0i3_supported) + pm_runtime_allow(&pdev->dev); + } dev_dbg(&pdev->dev, "initialization successful.\n"); diff --git a/drivers/misc/mei/spd/Kconfig b/drivers/misc/mei/spd/Kconfig new file mode 100644 index 000000000000..085f9caa8c66 --- /dev/null +++ b/drivers/misc/mei/spd/Kconfig @@ -0,0 +1,12 @@ +# +# Storage proxy device configuration +# +config INTEL_MEI_SPD + tristate "Intel MEI Host Storage Proxy Driver" + depends on INTEL_MEI && BLOCK && RPMB + help + A driver for the host storage proxy ME client + The driver enables ME FW to store data on a storage devices + that are accessible only from the host. + + To compile this driver as a module, choose M here. diff --git a/drivers/misc/mei/spd/Makefile b/drivers/misc/mei/spd/Makefile new file mode 100644 index 000000000000..72d0bca2974e --- /dev/null +++ b/drivers/misc/mei/spd/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Storage Proxy device driver. +# + +obj-$(CONFIG_INTEL_MEI_SPD) += mei_spd.o +mei_spd-objs := main.o +mei_spd-objs += cmd.o +mei_spd-objs += gpp.o +mei_spd-objs += rpmb.o +mei_spd-$(CONFIG_DEBUG_FS) += debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c new file mode 100644 index 000000000000..3f45902e23da --- /dev/null +++ b/drivers/misc/mei/spd/cmd.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +#define spd_cmd_size(_cmd) \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_##_cmd)) +#define spd_cmd_rpmb_size(_cmd) \ + (spd_cmd_size(_cmd) + SPD_CLIENT_RPMB_DATA_MAX_SIZE) + +#define to_spd_hdr(_buf) (struct spd_cmd_hdr *)(_buf) +#define to_spd_cmd(_cmd, _buf) \ + (struct spd_cmd_##_cmd *)((_buf) + sizeof(struct spd_cmd_hdr)) + +const char *spd_cmd_str(enum spd_cmd_type cmd) +{ +#define __SPD_CMD(_cmd) SPD_##_cmd##_CMD +#define SPD_CMD(cmd) case __SPD_CMD(cmd): return #cmd + switch (cmd) { + SPD_CMD(NONE); + SPD_CMD(START_STOP); + SPD_CMD(RPMB_WRITE); + SPD_CMD(RPMB_READ); + SPD_CMD(RPMB_GET_COUNTER); + SPD_CMD(GPP_WRITE); + SPD_CMD(GPP_READ); + SPD_CMD(TRIM); + SPD_CMD(INIT); + SPD_CMD(STORAGE_STATUS); + SPD_CMD(MAX); + default: + return "unknown"; + } +#undef SPD_CMD +#undef __SPD_CMD +} + +const char *mei_spd_dev_str(enum spd_storage_type type) +{ +#define SPD_TYPE(type) case SPD_TYPE_##type: return #type + switch (type) { + SPD_TYPE(UNDEF); + SPD_TYPE(EMMC); + SPD_TYPE(UFS); + default: + return "unknown"; + } +#undef SPD_TYPE +} + +const char *mei_spd_state_str(enum mei_spd_state state) +{ +#define SPD_STATE(state) case MEI_SPD_STATE_##state: return #state + switch (state) { + SPD_STATE(INIT); + SPD_STATE(INIT_WAIT); + SPD_STATE(INIT_DONE); + SPD_STATE(RUNNING); + SPD_STATE(STOPPING); + default: + return "unknown"; + } +#undef SPD_STATE +} + +/** + * mei_spd_init_req - send init request + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_init_req(struct mei_spd *spd) +{ + const int req_len = sizeof(struct spd_cmd_hdr); + struct spd_cmd_hdr *hdr; + u32 cmd_type = SPD_INIT_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state != MEI_SPD_STATE_INIT) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + spd->state = MEI_SPD_STATE_INIT_WAIT; + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "start send failed ret = %zd\n", ret); + return ret; + } + + return 0; +} + +/** + * mei_spd_cmd_init_rsp - handle init response message + * + * @spd: spd device + * @cmd: received spd command + * @cmd_sz: received command size + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t cmd_sz) +{ + int type; + int gpp_id; + int i; + + if (cmd_sz < spd_cmd_size(init_resp)) { + spd_err(spd, "Wrong init response size\n"); + return -EINVAL; + } + + if (spd->state != MEI_SPD_STATE_INIT_WAIT) + return -EPROTO; + + type = cmd->init_rsp.type; + gpp_id = cmd->init_rsp.gpp_partition_id; + + switch (type) { + case SPD_TYPE_EMMC: + if (gpp_id < 1 || gpp_id > 4) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + case SPD_TYPE_UFS: + if (gpp_id < 1 || gpp_id > 6) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + default: + spd_err(spd, "unsupported storage type %d\n", + cmd->init_rsp.type); + return -EINVAL; + } + + spd->dev_type = type; + spd->gpp_partition_id = gpp_id; + + if (cmd->init_rsp.serial_no_sz != 0) { + if (cmd->init_rsp.serial_no_sz != + cmd_sz - spd_cmd_size(init_resp)) { + spd_err(spd, "wrong serial no size %u?=%zu\n", + cmd->init_rsp.serial_no_sz, + cmd_sz - spd_cmd_size(init_resp)); + return -EMSGSIZE; + } + + if (cmd->init_rsp.serial_no_sz > 256) { + spd_err(spd, "serial no is too large %u\n", + cmd->init_rsp.serial_no_sz); + return -EMSGSIZE; + } + + spd->dev_id = kzalloc(cmd->init_rsp.serial_no_sz, GFP_KERNEL); + if (!spd->dev_id) + return -ENOMEM; + + spd->dev_id_sz = cmd->init_rsp.serial_no_sz; + if (type == SPD_TYPE_EMMC) { + /* FW have this in be32 format */ + __be32 *sno = (__be32 *)cmd->init_rsp.serial_no; + u32 *dev_id = (u32 *)spd->dev_id; + + for (i = 0; i < spd->dev_id_sz / sizeof(u32); i++) + dev_id[i] = be32_to_cpu(sno[i]); + } else { + memcpy(spd->dev_id, &cmd->init_rsp.serial_no, + cmd->init_rsp.serial_no_sz); + } + } + + spd->state = MEI_SPD_STATE_INIT_DONE; + + return 0; +} + +/** + * mei_spd_cmd_storage_status_req - send storage status message + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_storage_status_req(struct mei_spd *spd) +{ + struct spd_cmd_hdr *hdr; + struct spd_cmd_storage_status_req *req; + const int req_len = spd_cmd_size(storage_status_req); + u32 cmd_type = SPD_STORAGE_STATUS_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_INIT_DONE) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + req = to_spd_cmd(storage_status_req, spd->buf); + req->gpp_on = mei_spd_gpp_is_open(spd); + req->rpmb_on = mei_spd_rpmb_is_open(spd); + + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "send storage status failed ret = %zd\n", ret); + return ret; + } + + if (req->gpp_on || req->rpmb_on) + spd->state = MEI_SPD_STATE_RUNNING; + else + spd->state = MEI_SPD_STATE_INIT_DONE; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + return 0; +} + +static int mei_spd_cmd_gpp_write(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len = SPD_GPP_WRITE_DATA_LEN(*cmd); + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_write_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_write(spd, cmd->gpp_write_req.offset, + cmd->gpp_write_req.data, len); + if (ret) { + spd_err(spd, "Failed to write to gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "wrote %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_write_rsp); + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_gpp_read(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len; + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_read_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + len = cmd->gpp_read_req.size_to_read; + if (len > SPD_CLIENT_GPP_DATA_MAX_SIZE) { + spd_err(spd, "Block is to large to read\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_read(spd, cmd->gpp_read_req.offset, + cmd->gpp_read_resp.data, len); + + if (ret) { + spd_err(spd, "Failed to read from gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "read %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_read_rsp) + len; + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_read(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_read.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_read)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_READ_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "read RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_write(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_write.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_write)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "write RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_get_counter(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_get_counter.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_get_counter)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "get RPMB counter performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "rsp [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + switch (spd_cmd) { + case SPD_INIT_CMD: + ret = mei_spd_cmd_init_rsp(spd, cmd, out_buf_sz); + if (ret) + break; + mutex_unlock(&spd->lock); + mei_spd_rpmb_init(spd); + mei_spd_gpp_init(spd); + mutex_lock(&spd->lock); + break; + default: + ret = -EINVAL; + spd_err(spd, "Wrong response command %d\n", spd_cmd); + break; + } + + return ret; +} + +/** + * mei_spd_cmd_request - dispatch command requests from the SPD device + * + * @spd: spd device + * @out_buf_sz: buffer size + * + * Return: (TBD) + */ +static int mei_spd_cmd_request(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t written; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "req [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_RUNNING) { + spd_err(spd, "Wrong state %d\n", spd->state); + ret = SPD_STATUS_INVALID_COMMAND; + goto reply; + } + + switch (spd_cmd) { + case SPD_RPMB_WRITE_CMD: + ret = mei_spd_cmd_rpmb_write(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_READ_CMD: + ret = mei_spd_cmd_rpmb_read(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_GET_COUNTER_CMD: + ret = mei_spd_cmd_rpmb_get_counter(spd, cmd, out_buf_sz); + break; + case SPD_GPP_WRITE_CMD: + ret = mei_spd_cmd_gpp_write(spd, cmd, out_buf_sz); + break; + case SPD_GPP_READ_CMD: + ret = mei_spd_cmd_gpp_read(spd, cmd, out_buf_sz); + break; + case SPD_TRIM_CMD: + spd_err(spd, "Command %d is not supported\n", spd_cmd); + ret = SPD_STATUS_NOT_SUPPORTED; + break; + default: + spd_err(spd, "Wrong request command %d\n", spd_cmd); + ret = SPD_STATUS_INVALID_COMMAND; + break; + } +reply: + cmd->header.is_response = 1; + cmd->header.status = ret; + if (ret != SPD_STATUS_SUCCESS) + cmd->header.len = sizeof(struct spd_cmd_hdr); + + written = mei_cldev_send(spd->cldev, spd->buf, cmd->header.len); + if (written != cmd->header.len) { + ret = SPD_STATUS_GENERAL_FAILURE; + spd_err(spd, "Failed to send reply written = %zd\n", written); + } + + /* FIXME: translate ret to errno */ + if (ret) + return -EINVAL; + + return 0; +} + +ssize_t mei_spd_cmd(struct mei_spd *spd) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t out_buf_sz; + int ret; + + out_buf_sz = mei_cldev_recv(spd->cldev, spd->buf, spd->buf_sz); + if (out_buf_sz < 0) { + spd_err(spd, "failure in receive ret = %zd\n", out_buf_sz); + return out_buf_sz; + } + + if (out_buf_sz == 0) { + spd_err(spd, "received empty msg\n"); + return 0; + } + + /* check that we've received at least sizeof(header) */ + if (out_buf_sz < sizeof(struct spd_cmd_hdr)) { + spd_err(spd, "Request is too short\n"); + return -EFAULT; + } + + if (cmd->header.is_response) + ret = mei_spd_cmd_response(spd, out_buf_sz); + else + ret = mei_spd_cmd_request(spd, out_buf_sz); + + return ret; +} + +static void mei_spd_status_send_work(struct work_struct *work) +{ + struct mei_spd *spd = + container_of(work, struct mei_spd, status_send_w); + + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); +} + +void mei_spd_free(struct mei_spd *spd) +{ + if (!spd) + return; + + cancel_work_sync(&spd->status_send_w); + + kfree(spd->buf); + kfree(spd); +} + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev) +{ + struct mei_spd *spd; + u8 *buf; + + spd = kzalloc(sizeof(*spd), GFP_KERNEL); + if (!spd) + return NULL; + + spd->buf_sz = sizeof(struct spd_cmd) + SPD_CLIENT_GPP_DATA_MAX_SIZE; + buf = kmalloc(spd->buf_sz, GFP_KERNEL); + if (!buf) + goto free; + + spd->cldev = cldev; + spd->buf = buf; + spd->state = MEI_SPD_STATE_INIT; + mutex_init(&spd->lock); + INIT_WORK(&spd->status_send_w, mei_spd_status_send_work); + + return spd; +free: + kfree(spd); + return NULL; +} diff --git a/drivers/misc/mei/spd/cmd.h b/drivers/misc/mei/spd/cmd.h new file mode 100644 index 000000000000..3f77550f44ab --- /dev/null +++ b/drivers/misc/mei/spd/cmd.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifndef _SPD_CMD_H +#define _SPD_CMD_H + +#include + +/** + * enum spd_cmd_type - available commands + * + * @SPD_NONE_CMD : Lower command sentinel. + * @SPD_START_STOP_CMD : start stop command (deprecated). [Host -> TEE] + * @SPD_RPMB_WRITE_CMD : RPMB write request. [TEE -> Host] + * @SPD_RPMB_READ_CMD : RPMB read request. [TEE -> Host] + * @SPD_RPMB_GET_COUNTER_CMD: get counter request [TEE -> Host] + * @SPD_GPP_WRITE_CMD : GPP write request. [TEE -> Host] + * @SPD_GPP_READ_CMD : GPP read request. [TEE -> Host] + * @SPD_TRIM_CMD : TRIM command [TEE -> Host] + * @SPD_INIT_CMD : initial handshake between host and fw. [Host -> TEE] + * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] + * @SPD_MAX_CMD: Upper command sentinel. + */ +enum spd_cmd_type { + SPD_NONE_CMD = 0, + SPD_START_STOP_CMD, + SPD_RPMB_WRITE_CMD, + SPD_RPMB_READ_CMD, + SPD_RPMB_GET_COUNTER_CMD, + SPD_GPP_WRITE_CMD, + SPD_GPP_READ_CMD, + SPD_TRIM_CMD, + SPD_INIT_CMD, + SPD_STORAGE_STATUS_CMD, + SPD_MAX_CMD, +}; + +enum spd_status { + SPD_STATUS_SUCCESS = 0, + SPD_STATUS_GENERAL_FAILURE = 1, + SPD_STATUS_NOT_READY = 2, + SPD_STATUS_NOT_SUPPORTED = 3, + SPD_STATUS_INVALID_COMMAND = 4, +}; + +/** + * enum spd_storage_type - storage device type + * + * @SPD_TYPE_UNDEF: lower enum sentinel + * @SPD_TYPE_EMMC: emmc device + * @SPD_TYPE_UFS: ufs device + * @SPD_TYPE_MAX: upper enum sentinel + */ +enum spd_storage_type { + SPD_TYPE_UNDEF = 0, + SPD_TYPE_EMMC = 1, + SPD_TYPE_UFS = 2, + SPD_TYPE_MAX +}; + +/** + * struct spd_cmd_hdr - Host storage Command Header + * + * @command_type: SPD_TYPES + * @is_response: 1 == Response, 0 == Request + * @len: command length + * @status: command status + * @reserved: reserved + */ +struct spd_cmd_hdr { + u32 command_type : 7; + u32 is_response : 1; + u32 len : 13; + u32 status : 8; + u32 reserved : 3; +} __packed; + +/** + * RPMB Frame Size as defined by the JDEC spec + */ +#define SPD_CLIENT_RPMB_DATA_MAX_SIZE (512) + +/** + * struct spd_cmd_init_resp + * commandType == HOST_STORAGE_INIT_CMD + * + * @gpp_partition_id: gpp_partition: + * UFS: LUN Number (0-7) + * EMMC: 1-4. + * 0xff: GPP not supported + * @type: storage hw type + * SPD_TYPE_EMMC + * SPD_TYPE_UFS + * @serial_no_sz: serial_no size + * @serial_no: device serial number + */ +struct spd_cmd_init_resp { + u32 gpp_partition_id; + u32 type; + u32 serial_no_sz; + u8 serial_no[0]; +}; + +/** + * struct spd_cmd_storage_status_req + * commandType == SPD_STORAGE_STATUS_CMD + * + * @gpp_on: availability of the gpp backing storage + * 0 - GP partition is accessible + * 1 - GP partition is not accessible + * @rpmb_on: availability of the backing storage + * 0 - RPMB partition is accessible + * 1 - RPBM partition is not accessible + */ +struct spd_cmd_storage_status_req { + u32 gpp_on; + u32 rpmb_on; +} __packed; + +/** + * struct spd_cmd_rpmb_write + * command_type == SPD_RPMB_WRITE_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_write { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_read + * command_type == SPD_RPMB_READ_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_read { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_get_counter + * command_type == SPD_RPMB_GET_COUNTER_CMD + * + * @rpmb_frame: frame containing frame counter + */ +struct spd_cmd_rpmb_get_counter { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_req + * command_type == SPD_GPP_WRITE_CMD + * + * @offset: frame offset in partition + * @data: 4K page + */ +struct spd_cmd_gpp_write_req { + u32 offset; + u8 data[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_rsp + * command_type == SPD_GPP_WRITE_CMD + * + * @reserved: reserved + */ +struct spd_cmd_gpp_write_rsp { + u32 reserved[2]; +} __packed; + +/** + * struct spd_cmd_gpp_read_req + * command_type == SPD_GPP_READ_CMD + * + * @offset: offset of a frame on GPP partition + * @size_to_read: data length to read (must be ) + */ +struct spd_cmd_gpp_read_req { + u32 offset; + u32 size_to_read; +} __packed; + +/** + * struct spd_cmd_gpp_read_rsp + * command_type == SPD_GPP_READ_CMD + * + * @reserved: reserved + * @data: data + */ +struct spd_cmd_gpp_read_rsp { + u32 reserved; + u8 data[0]; +} __packed; + +#define SPD_GPP_READ_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_read_rsp))) + +#define SPD_GPP_WRITE_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_write_req))) + +struct spd_cmd { + struct spd_cmd_hdr header; + + union { + struct spd_cmd_rpmb_write rpmb_write; + struct spd_cmd_rpmb_read rpmb_read; + struct spd_cmd_rpmb_get_counter rpmb_get_counter; + + struct spd_cmd_gpp_write_req gpp_write_req; + struct spd_cmd_gpp_write_rsp gpp_write_rsp; + + struct spd_cmd_gpp_read_req gpp_read_req; + struct spd_cmd_gpp_read_rsp gpp_read_resp; + + struct spd_cmd_init_resp init_rsp; + struct spd_cmd_storage_status_req status_req; + }; +} __packed; + +/* GPP Max data 4K */ +#define SPD_CLIENT_GPP_DATA_MAX_SIZE (4096) + +const char *spd_cmd_str(enum spd_cmd_type cmd); +const char *mei_spd_dev_str(enum spd_storage_type type); + +#endif /* _SPD_CMD_H */ diff --git a/drivers/misc/mei/spd/debugfs.c b/drivers/misc/mei/spd/debugfs.c new file mode 100644 index 000000000000..dfbb62a49fcc --- /dev/null +++ b/drivers/misc/mei/spd/debugfs.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static ssize_t mei_spd_dbgfs_read_info(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_spd *spd = fp->private_data; + size_t bufsz = 4095; + char *buf; + int pos = 0; + int ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "DEV STATE: [%d] %s\n", + spd->state, mei_spd_state_str(spd->state)); + pos += scnprintf(buf + pos, bufsz - pos, "DEV TYPE : [%d] %s\n", + spd->dev_type, mei_spd_dev_str(spd->dev_type)); + pos += scnprintf(buf + pos, bufsz - pos, " ID SIZE : %d\n", + spd->dev_id_sz); + pos += scnprintf(buf + pos, bufsz - pos, " ID : '%s'\n", "N/A"); + pos += scnprintf(buf + pos, bufsz - pos, "GPP\n"); + pos += scnprintf(buf + pos, bufsz - pos, " id : %d\n", + spd->gpp_partition_id); + pos += scnprintf(buf + pos, bufsz - pos, " opened : %1d\n", + mei_spd_gpp_is_open(spd)); + + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_spd_dbgfs_fops_info = { + .open = simple_open, + .read = mei_spd_dbgfs_read_info, + .llseek = generic_file_llseek, +}; + +void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ + if (!spd->dbgfs_dir) + return; + debugfs_remove_recursive(spd->dbgfs_dir); + spd->dbgfs_dir = NULL; +} + +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + struct dentry *dir, *f; + + dir = debugfs_create_dir(name, NULL); + if (!dir) + return -ENOMEM; + + spd->dbgfs_dir = dir; + + f = debugfs_create_file("info", 0400, dir, + spd, &mei_spd_dbgfs_fops_info); + if (!f) { + spd_err(spd, "info: registration failed\n"); + goto err; + } + + return 0; +err: + mei_spd_dbgfs_deregister(spd); + return -ENODEV; +} diff --git a/drivers/misc/mei/spd/gpp.c b/drivers/misc/mei/spd/gpp.c new file mode 100644 index 000000000000..b5d1a27a50ee --- /dev/null +++ b/drivers/misc/mei/spd/gpp.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static struct page *page_read(struct address_space *mapping, int index) +{ + return read_mapping_page(mapping, index, NULL); +} + +static int mei_spd_bd_read(struct mei_spd *spd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct page *page; + int index = from >> PAGE_SHIFT; + int offset = from & (PAGE_SIZE - 1); + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(spd->gpp->bd_inode->i_mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + memcpy(buf, page_address(page) + offset, cpylen); + put_page(page); + + if (retlen) + *retlen += cpylen; + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int _mei_spd_bd_write(struct block_device *dev, const u_char *buf, + loff_t to, size_t len, size_t *retlen) +{ + struct page *page; + struct address_space *mapping = dev->bd_inode->i_mapping; + int index = to >> PAGE_SHIFT; /* page index */ + int offset = to & ~PAGE_MASK; /* page offset */ + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (memcmp(page_address(page) + offset, buf, cpylen)) { + lock_page(page); + memcpy(page_address(page) + offset, buf, cpylen); + set_page_dirty(page); + unlock_page(page); + balance_dirty_pages_ratelimited(mapping); + } + put_page(page); + + if (retlen) + *retlen += cpylen; + + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int mei_spd_bd_write(struct mei_spd *spd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int ret; + + ret = _mei_spd_bd_write(spd->gpp, buf, to, len, retlen); + if (ret > 0) + ret = 0; + + sync_blockdev(spd->gpp); + + return ret; +} + +static void mei_spd_bd_sync(struct mei_spd *spd) +{ + sync_blockdev(spd->gpp); +} + +#define GPP_FMODE (FMODE_WRITE | FMODE_READ | FMODE_EXCL) + +bool mei_spd_gpp_is_open(struct mei_spd *spd) +{ + struct request_queue *q; + + if (!spd->gpp) + return false; + + q = spd->gpp->bd_queue; + if (q && !blk_queue_stopped(q)) + return true; + + return false; +} + +static int mei_spd_gpp_open(struct mei_spd *spd, struct device *dev) +{ + int ret; + + if (spd->gpp) + return 0; + + spd->gpp = blkdev_get_by_dev(dev->devt, GPP_FMODE, spd); + if (IS_ERR(spd->gpp)) { + ret = PTR_ERR(spd->gpp); + spd->gpp = NULL; + spd_dbg(spd, "Can't get GPP block device %s ret = %d\n", + dev_name(dev), ret); + return ret; + } + + spd_dbg(spd, "gpp partition created\n"); + return 0; +} + +static int mei_spd_gpp_close(struct mei_spd *spd) +{ + if (!spd->gpp) + return 0; + + mei_spd_bd_sync(spd); + blkdev_put(spd->gpp, GPP_FMODE); + spd->gpp = NULL; + + spd_dbg(spd, "gpp partition removed\n"); + return 0; +} + +#define UFSHCD "ufshcd" +static bool mei_spd_lun_ufs_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + struct scsi_device *sdev; + + switch (disk->major) { + case SCSI_DISK0_MAJOR: + case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: + case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: + break; + default: + return false; + } + + sdev = to_scsi_device(dev->parent); + + if (!sdev->host || + strncmp(sdev->host->hostt->name, UFSHCD, strlen(UFSHCD))) + return false; + + return sdev->lun == spd->gpp_partition_id; +} + +static bool mei_spd_gpp_mmc_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + int idx, part_id; + + if (disk->major != MMC_BLOCK_MAJOR) + return false; + + if (sscanf(disk->disk_name, "mmcblk%dgp%d", &idx, &part_id) != 2) + return false; + + return part_id == spd->gpp_partition_id - 1; +} + +static bool mei_spd_gpp_match(struct mei_spd *spd, struct device *dev) +{ + /* we are only interested in physical partitions */ + if (strncmp(dev->type->name, "disk", sizeof("disk"))) + return false; + + if (spd->dev_type == SPD_TYPE_EMMC) + return mei_spd_gpp_mmc_match(spd, dev); + else if (spd->dev_type == SPD_TYPE_UFS) + return mei_spd_lun_ufs_match(spd, dev); + else + return false; +} + +static int gpp_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_open(spd, dev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void gpp_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_close(spd)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP read offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_read(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP read failed ret = %d\n", ret); + + return ret; +} + +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP write offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_write(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP write failed ret = %d\n", ret); + + return ret; +} + +void mei_spd_gpp_prepare(struct mei_spd *spd) +{ + spd->gpp_interface.add_dev = gpp_add_device; + spd->gpp_interface.remove_dev = gpp_remove_device; + spd->gpp_interface.class = &block_class; +} + +int mei_spd_gpp_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->gpp_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +void mei_spd_gpp_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->gpp_interface); +} diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c new file mode 100644 index 000000000000..468cceffb7a0 --- /dev/null +++ b/drivers/misc/mei/spd/main.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include + +#include "spd.h" + +static void mei_spd_rx_cb(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + mutex_lock(&spd->lock); + mei_spd_cmd(spd); + mutex_unlock(&spd->lock); +} + +static int mei_spd_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct mei_spd *spd; + u8 ver = mei_cldev_ver(cldev); + int ret; + + dev_dbg(&cldev->dev, "probing mei spd ver = %d\n", ver); + + if (ver < 2) { + dev_warn(&cldev->dev, "unuspported protocol version %d\n", ver); + return -ENODEV; + } + + spd = mei_spd_alloc(cldev); + if (!spd) + return -ENOMEM; + + mei_cldev_set_drvdata(cldev, spd); + + ret = mei_spd_dbgfs_register(spd, "spd"); + if (ret) + goto free; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "Could not enable device ret = %d\n", ret); + goto free; + } + + ret = mei_cldev_register_rx_cb(cldev, mei_spd_rx_cb); + if (ret) { + dev_err(&cldev->dev, "Error register event %d\n", ret); + goto disable; + } + + spd_dbg(spd, "protocol version %d\n", ver); + mei_spd_gpp_prepare(spd); + mei_spd_rpmb_prepare(spd); + mutex_lock(&spd->lock); + ret = mei_spd_cmd_init_req(spd); + mutex_unlock(&spd->lock); + if (ret) { + dev_err(&cldev->dev, "Could not start ret = %d\n", ret); + goto disable; + } + + return 0; + +disable: + mei_cldev_disable(cldev); + +free: + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + return ret; +} + +static int mei_spd_remove(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + if (spd->state == MEI_SPD_STATE_RUNNING) { + spd->state = MEI_SPD_STATE_STOPPING; + mei_spd_gpp_exit(spd); + mei_spd_rpmb_exit(spd); + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); + } + + mei_cldev_disable(cldev); + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + + return 0; +} + +#define MEI_SPD_UUID UUID_LE(0x2a39291f, 0x5551, 0x482f, \ + 0x99, 0xcb, 0x9e, 0x22, 0x74, 0x97, 0x8c, 0xa8) + +static struct mei_cl_device_id mei_spd_tbl[] = { + { .uuid = MEI_SPD_UUID, .version = MEI_CL_VERSION_ANY}, + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, mei_spd_tbl); + +static struct mei_cl_driver mei_spd_driver = { + .id_table = mei_spd_tbl, + .name = "mei_spd", + + .probe = mei_spd_probe, + .remove = mei_spd_remove, +}; + +module_mei_cl_driver(mei_spd_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Storage Proxy driver based on mei bus"); diff --git a/drivers/misc/mei/spd/rpmb.c b/drivers/misc/mei/spd/rpmb.c new file mode 100644 index 000000000000..b74d0cd8f802 --- /dev/null +++ b/drivers/misc/mei/spd/rpmb.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Intel Host Storage Interface Linux driver + * Copyright (c) 2015 - 2018, Intel Corporation. + */ + +#include "cmd.h" +#include "spd.h" +#include + +static int mei_spd_rpmb_start(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->rdev == rdev) + return 0; + + if (spd->rdev) { + spd_warn(spd, "rpmb device already registered\n"); + return -EEXIST; + } + + spd->rdev = rpmb_dev_get(rdev); + spd_dbg(spd, "rpmb partition created\n"); + return 0; +} + +static int mei_spd_rpmb_stop(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (!spd->rdev) { + spd_dbg(spd, "Already stopped\n"); + return -EPROTO; + } + + if (rdev && spd->rdev != rdev) { + spd_dbg(spd, "Wrong RPMB on stop\n"); + return -EINVAL; + } + + rpmb_dev_put(spd->rdev); + spd->rdev = NULL; + + spd_dbg(spd, "rpmb partition removed\n"); + return 0; +} + +static int mei_spd_rpmb_match(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->dev_id_sz && rdev->ops->dev_id) { + if (rdev->ops->dev_id_len != spd->dev_id_sz || + memcmp(rdev->ops->dev_id, spd->dev_id, + rdev->ops->dev_id_len)) { + spd_dbg(spd, "ignore request for another rpmb\n"); + /* return 0; FW sends garbage now, ignore it */ + } + } + + switch (rdev->ops->type) { + case RPMB_TYPE_EMMC: + if (spd->dev_type != SPD_TYPE_EMMC) + return 0; + break; + case RPMB_TYPE_UFS: + if (spd->dev_type != SPD_TYPE_UFS) + return 0; + break; + default: + return 0; + } + + return 1; +} + +static int rpmb_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_start(spd, rdev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void rpmb_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_stop(spd, rdev)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +void mei_spd_rpmb_prepare(struct mei_spd *spd) +{ + spd->rpmb_interface.add_dev = rpmb_add_device; + spd->rpmb_interface.remove_dev = rpmb_remove_device; + spd->rpmb_interface.class = &rpmb_class; +} + +/** + * mei_spd_rpmb_init - init RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + * Returns: 0 if initialized successfully, <0 otherwise + */ +int mei_spd_rpmb_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->rpmb_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +/** + * mei_spd_rpmb_exit - clean RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + */ +void mei_spd_rpmb_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->rpmb_interface); +} + +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req, void *buf) +{ + struct rpmb_cmd cmd[3]; + struct rpmb_frame_jdec *frame_res = NULL; + u32 flags; + unsigned int i; + int ret; + + if (!spd->rdev) { + spd_err(spd, "RPMB not ready\n"); + return -ENODEV; + } + + i = 0; + flags = RPMB_F_WRITE; + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) + flags |= RPMB_F_REL_WRITE; + cmd[i].flags = flags; + cmd[i].nframes = 1; + cmd[i].frames = buf; + i++; + + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) { + frame_res = kzalloc(sizeof(*frame_res), GFP_KERNEL); + if (!frame_res) + return -ENOMEM; + frame_res->req_resp = cpu_to_be16(RPMB_RESULT_READ); + cmd[i].flags = RPMB_F_WRITE; + cmd[i].nframes = 1; + cmd[i].frames = frame_res; + i++; + } + + cmd[i].flags = 0; + cmd[i].nframes = 1; + cmd[i].frames = buf; + i++; + + ret = rpmb_cmd_seq(spd->rdev, cmd, i); + if (ret) + spd_err(spd, "RPMB req failed ret = %d\n", ret); + + kfree(frame_res); + return ret; +} + +bool mei_spd_rpmb_is_open(struct mei_spd *spd) +{ + return !!spd->rdev; +} diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h new file mode 100644 index 000000000000..b919a5cb7a4c --- /dev/null +++ b/drivers/misc/mei/spd/spd.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifndef _MEI_SPD_H +#define _MEI_SPD_H + +#include +#include +#include + +enum mei_spd_state { + MEI_SPD_STATE_INIT, + MEI_SPD_STATE_INIT_WAIT, + MEI_SPD_STATE_INIT_DONE, + MEI_SPD_STATE_RUNNING, + MEI_SPD_STATE_STOPPING, +}; + +/** + * struct mei_spd - spd device struct + * + * @cldev: client bus device + * @gpp: GPP partition block device + * @gpp_partition_id: GPP partition id (1-6) + * @gpp_interface: gpp class interface for discovery + * @dev_type: storage device type + * @dev_id_sz: device id size + * @dev_id: device id string + * @rdev: RPMB device + * @rpmb_interface: gpp class interface for discovery + * @lock: mutex to sync request processing + * @state: driver state + * @status_send_w: workitem for sending status to the FW + * @buf_sz: receive/transmit buffer allocated size + * @buf: receive/transmit buffer + * @dbgfs_dir: debugfs directory entry + */ +struct mei_spd { + struct mei_cl_device *cldev; + struct block_device *gpp; + u32 gpp_partition_id; + struct class_interface gpp_interface; + u32 dev_type; + u32 dev_id_sz; + u8 *dev_id; + struct rpmb_dev *rdev; + struct class_interface rpmb_interface; + struct mutex lock; /* mutex to sync request processing */ + enum mei_spd_state state; + struct work_struct status_send_w; + size_t buf_sz; + u8 *buf; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ +}; + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev); +void mei_spd_free(struct mei_spd *spd); + +int mei_spd_cmd_init_req(struct mei_spd *spd); +int mei_spd_cmd_storage_status_req(struct mei_spd *spd); +ssize_t mei_spd_cmd(struct mei_spd *spd); + +void mei_spd_gpp_prepare(struct mei_spd *spd); +bool mei_spd_gpp_is_open(struct mei_spd *spd); +int mei_spd_gpp_init(struct mei_spd *spd); +void mei_spd_gpp_exit(struct mei_spd *spd); +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size); +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size); + +void mei_spd_rpmb_prepare(struct mei_spd *spd); +bool mei_spd_rpmb_is_open(struct mei_spd *spd); +int mei_spd_rpmb_init(struct mei_spd *spd); +void mei_spd_rpmb_exit(struct mei_spd *spd); +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req_type, void *buf); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name); +void mei_spd_dbgfs_deregister(struct mei_spd *spd); +#else +static inline int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + return 0; +} + +static inline void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +const char *mei_spd_state_str(enum mei_spd_state state); + +#define spd_err(spd, fmt, ...) \ + dev_err(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_warn(spd, fmt, ...) \ + dev_warn(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_dbg(spd, fmt, ...) \ + dev_dbg(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) + +#endif /* _MEI_SPD_H */ diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c new file mode 100644 index 000000000000..ba94dcf09169 --- /dev/null +++ b/drivers/misc/memory_state_time.c @@ -0,0 +1,462 @@ +/* drivers/misc/memory_state_time.c + * + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KERNEL_ATTR_RO(_name) \ +static struct kobj_attribute _name##_attr = __ATTR_RO(_name) + +#define KERNEL_ATTR_RW(_name) \ +static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +#define FREQ_HASH_BITS 4 +DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS); + +static DEFINE_MUTEX(mem_lock); + +#define TAG "memory_state_time" +#define BW_NODE "/soc/memory-state-time" +#define FREQ_TBL "freq-tbl" +#define BW_TBL "bw-buckets" +#define NUM_SOURCES "num-sources" + +#define LOWEST_FREQ 2 + +static int curr_bw; +static int curr_freq; +static u32 *bw_buckets; +static u32 *freq_buckets; +static int num_freqs; +static int num_buckets; +static int registered_bw_sources; +static u64 last_update; +static bool init_success; +static struct workqueue_struct *memory_wq; +static u32 num_sources = 10; +static int *bandwidths; + +struct freq_entry { + int freq; + u64 *buckets; /* Bandwidth buckets. */ + struct hlist_node hash; +}; + +struct queue_container { + struct work_struct update_state; + int value; + u64 time_now; + int id; + struct mutex *lock; +}; + +static int find_bucket(int bw) +{ + int i; + + if (bw_buckets != NULL) { + for (i = 0; i < num_buckets; i++) { + if (bw_buckets[i] > bw) { + pr_debug("Found bucket %d for bandwidth %d\n", + i, bw); + return i; + } + } + return num_buckets - 1; + } + return 0; +} + +static u64 get_time_diff(u64 time_now) +{ + u64 ms; + + ms = time_now - last_update; + last_update = time_now; + return ms; +} + +static ssize_t show_stat_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i, j; + int len = 0; + struct freq_entry *freq_entry; + + for (i = 0; i < num_freqs; i++) { + hash_for_each_possible(freq_hash_table, freq_entry, hash, + freq_buckets[i]) { + if (freq_entry->freq == freq_buckets[i]) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "%d ", freq_buckets[i]); + if (len >= PAGE_SIZE) + break; + for (j = 0; j < num_buckets; j++) { + len += scnprintf(buf + len, + PAGE_SIZE - len, + "%llu ", + freq_entry->buckets[j]); + } + len += scnprintf(buf + len, PAGE_SIZE - len, + "\n"); + } + } + } + pr_debug("Current Time: %llu\n", ktime_get_boot_ns()); + return len; +} +KERNEL_ATTR_RO(show_stat); + +static void update_table(u64 time_now) +{ + struct freq_entry *freq_entry; + + pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq); + hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) { + if (curr_freq == freq_entry->freq) { + freq_entry->buckets[find_bucket(curr_bw)] + += get_time_diff(time_now); + break; + } + } +} + +static bool freq_exists(int freq) +{ + int i; + + for (i = 0; i < num_freqs; i++) { + if (freq == freq_buckets[i]) + return true; + } + return false; +} + +static int calculate_total_bw(int bw, int index) +{ + int i; + int total_bw = 0; + + pr_debug("memory_state_time New bw %d for id %d\n", bw, index); + bandwidths[index] = bw; + for (i = 0; i < registered_bw_sources; i++) + total_bw += bandwidths[i]; + return total_bw; +} + +static void freq_update_do_work(struct work_struct *work) +{ + struct queue_container *freq_state_update + = container_of(work, struct queue_container, + update_state); + if (freq_state_update) { + mutex_lock(&mem_lock); + update_table(freq_state_update->time_now); + curr_freq = freq_state_update->value; + mutex_unlock(&mem_lock); + kfree(freq_state_update); + } +} + +static void bw_update_do_work(struct work_struct *work) +{ + struct queue_container *bw_state_update + = container_of(work, struct queue_container, + update_state); + if (bw_state_update) { + mutex_lock(&mem_lock); + update_table(bw_state_update->time_now); + curr_bw = calculate_total_bw(bw_state_update->value, + bw_state_update->id); + mutex_unlock(&mem_lock); + kfree(bw_state_update); + } +} + +static void memory_state_freq_update(struct memory_state_update_block *ub, + int value) +{ + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + if (freq_exists(value) && init_success) { + struct queue_container *freq_container + = kmalloc(sizeof(struct queue_container), + GFP_KERNEL); + if (!freq_container) + return; + INIT_WORK(&freq_container->update_state, + freq_update_do_work); + freq_container->time_now = ktime_get_boot_ns(); + freq_container->value = value; + pr_debug("Scheduling freq update in work queue\n"); + queue_work(memory_wq, &freq_container->update_state); + } else { + pr_debug("Freq does not exist.\n"); + } + } +} + +static void memory_state_bw_update(struct memory_state_update_block *ub, + int value) +{ + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + if (init_success) { + struct queue_container *bw_container + = kmalloc(sizeof(struct queue_container), + GFP_KERNEL); + if (!bw_container) + return; + INIT_WORK(&bw_container->update_state, + bw_update_do_work); + bw_container->time_now = ktime_get_boot_ns(); + bw_container->value = value; + bw_container->id = ub->id; + pr_debug("Scheduling bandwidth update in work queue\n"); + queue_work(memory_wq, &bw_container->update_state); + } + } +} + +struct memory_state_update_block *memory_state_register_frequency_source(void) +{ + struct memory_state_update_block *block; + + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + pr_debug("Allocating frequency source\n"); + block = kmalloc(sizeof(struct memory_state_update_block), + GFP_KERNEL); + if (!block) + return NULL; + block->update_call = memory_state_freq_update; + return block; + } + pr_err("Config option disabled.\n"); + return NULL; +} +EXPORT_SYMBOL_GPL(memory_state_register_frequency_source); + +struct memory_state_update_block *memory_state_register_bandwidth_source(void) +{ + struct memory_state_update_block *block; + + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + pr_debug("Allocating bandwidth source %d\n", + registered_bw_sources); + block = kmalloc(sizeof(struct memory_state_update_block), + GFP_KERNEL); + if (!block) + return NULL; + block->update_call = memory_state_bw_update; + if (registered_bw_sources < num_sources) { + block->id = registered_bw_sources++; + } else { + pr_err("Unable to allocate source; max number reached\n"); + kfree(block); + return NULL; + } + return block; + } + pr_err("Config option disabled.\n"); + return NULL; +} +EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source); + +/* Buckets are designated by their maximum. + * Returns the buckets decided by the capability of the device. + */ +static int get_bw_buckets(struct device *dev) +{ + int ret, lenb; + struct device_node *node = dev->of_node; + + of_property_read_u32(node, NUM_SOURCES, &num_sources); + if (!of_find_property(node, BW_TBL, &lenb)) { + pr_err("Missing %s property\n", BW_TBL); + return -ENODATA; + } + + bandwidths = devm_kzalloc(dev, + sizeof(*bandwidths) * num_sources, GFP_KERNEL); + if (!bandwidths) + return -ENOMEM; + lenb /= sizeof(*bw_buckets); + bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets), + GFP_KERNEL); + if (!bw_buckets) { + devm_kfree(dev, bandwidths); + return -ENOMEM; + } + ret = of_property_read_u32_array(node, BW_TBL, bw_buckets, + lenb); + if (ret < 0) { + devm_kfree(dev, bandwidths); + devm_kfree(dev, bw_buckets); + pr_err("Unable to read bandwidth table from device tree.\n"); + return ret; + } + + curr_bw = 0; + num_buckets = lenb; + return 0; +} + +/* Adds struct freq_entry nodes to the hashtable for each compatible frequency. + * Returns the supported number of frequencies. + */ +static int freq_buckets_init(struct device *dev) +{ + struct freq_entry *freq_entry; + int i; + int ret, lenf; + struct device_node *node = dev->of_node; + + if (!of_find_property(node, FREQ_TBL, &lenf)) { + pr_err("Missing %s property\n", FREQ_TBL); + return -ENODATA; + } + + lenf /= sizeof(*freq_buckets); + freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets), + GFP_KERNEL); + if (!freq_buckets) + return -ENOMEM; + pr_debug("freqs found len %d\n", lenf); + ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets, + lenf); + if (ret < 0) { + devm_kfree(dev, freq_buckets); + pr_err("Unable to read frequency table from device tree.\n"); + return ret; + } + pr_debug("ret freq %d\n", ret); + + num_freqs = lenf; + curr_freq = freq_buckets[LOWEST_FREQ]; + + for (i = 0; i < num_freqs; i++) { + freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry), + GFP_KERNEL); + if (!freq_entry) + return -ENOMEM; + freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets, + GFP_KERNEL); + if (!freq_entry->buckets) { + devm_kfree(dev, freq_entry); + return -ENOMEM; + } + pr_debug("memory_state_time Adding freq to ht %d\n", + freq_buckets[i]); + freq_entry->freq = freq_buckets[i]; + hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]); + } + return 0; +} + +struct kobject *memory_kobj; +EXPORT_SYMBOL_GPL(memory_kobj); + +static struct attribute *memory_attrs[] = { + &show_stat_attr.attr, + NULL +}; + +static struct attribute_group memory_attr_group = { + .attrs = memory_attrs, +}; + +static int memory_state_time_probe(struct platform_device *pdev) +{ + int error; + + error = get_bw_buckets(&pdev->dev); + if (error) + return error; + error = freq_buckets_init(&pdev->dev); + if (error) + return error; + last_update = ktime_get_boot_ns(); + init_success = true; + + pr_debug("memory_state_time initialized with num_freqs %d\n", + num_freqs); + return 0; +} + +static const struct of_device_id match_table[] = { + { .compatible = "memory-state-time" }, + {} +}; + +static struct platform_driver memory_state_time_driver = { + .probe = memory_state_time_probe, + .driver = { + .name = "memory-state-time", + .of_match_table = match_table, + .owner = THIS_MODULE, + }, +}; + +static int __init memory_state_time_init(void) +{ + int error; + + hash_init(freq_hash_table); + memory_wq = create_singlethread_workqueue("memory_wq"); + if (!memory_wq) { + pr_err("Unable to create workqueue.\n"); + return -EINVAL; + } + /* + * Create sys/kernel directory for memory_state_time. + */ + memory_kobj = kobject_create_and_add(TAG, kernel_kobj); + if (!memory_kobj) { + pr_err("Unable to allocate memory_kobj for sysfs directory.\n"); + error = -ENOMEM; + goto wq; + } + error = sysfs_create_group(memory_kobj, &memory_attr_group); + if (error) { + pr_err("Unable to create sysfs folder.\n"); + goto kobj; + } + + error = platform_driver_register(&memory_state_time_driver); + if (error) { + pr_err("Unable to register memory_state_time platform driver.\n"); + goto group; + } + return 0; + +group: sysfs_remove_group(memory_kobj, &memory_attr_group); +kobj: kobject_put(memory_kobj); +wq: destroy_workqueue(memory_wq); + return error; +} +module_init(memory_state_time_init); diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index deb203026496..e089bb6dde3a 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -533,6 +533,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, test->base = test->bar[test_reg_bar]; if (!test->base) { + err = -ENOMEM; dev_err(dev, "Cannot perform PCI test without BAR%d\n", test_reg_bar); goto err_iounmap; @@ -542,6 +543,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); if (id < 0) { + err = id; dev_err(dev, "unable to get id\n"); goto err_iounmap; } @@ -588,6 +590,8 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) return; + if (id < 0) + return; misc_deregister(&test->miscdev); ida_simple_remove(&pci_endpoint_test_ida, id); diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index eda38cbe8530..41f2a9f6851d 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index fc0415771c00..4dd0d868ff88 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -407,13 +407,20 @@ static int sram_probe(struct platform_device *pdev) if (init_func) { ret = init_func(); if (ret) - return ret; + goto err_disable_clk; } dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", gen_pool_size(sram->pool) / 1024, sram->virt_base); return 0; + +err_disable_clk: + if (sram->clk) + clk_disable_unprepare(sram->clk); + sram_free_partitions(sram); + + return ret; } static int sram_remove(struct platform_device *pdev) diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c new file mode 100644 index 000000000000..88dc1cd3a204 --- /dev/null +++ b/drivers/misc/uid_sys_stats.c @@ -0,0 +1,703 @@ +/* drivers/misc/uid_sys_stats.c + * + * Copyright (C) 2014 - 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define UID_HASH_BITS 10 +DECLARE_HASHTABLE(hash_table, UID_HASH_BITS); + +static DEFINE_RT_MUTEX(uid_lock); +static struct proc_dir_entry *cpu_parent; +static struct proc_dir_entry *io_parent; +static struct proc_dir_entry *proc_parent; + +struct io_stats { + u64 read_bytes; + u64 write_bytes; + u64 rchar; + u64 wchar; + u64 fsync; +}; + +#define UID_STATE_FOREGROUND 0 +#define UID_STATE_BACKGROUND 1 +#define UID_STATE_BUCKET_SIZE 2 + +#define UID_STATE_TOTAL_CURR 2 +#define UID_STATE_TOTAL_LAST 3 +#define UID_STATE_DEAD_TASKS 4 +#define UID_STATE_SIZE 5 + +#define MAX_TASK_COMM_LEN 256 + +struct task_entry { + char comm[MAX_TASK_COMM_LEN]; + pid_t pid; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +}; + +struct uid_entry { + uid_t uid; + u64 utime; + u64 stime; + u64 active_utime; + u64 active_stime; + int state; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + DECLARE_HASHTABLE(task_entries, UID_HASH_BITS); +#endif +}; + +static u64 compute_write_bytes(struct task_struct *task) +{ + if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes) + return 0; + + return task->ioac.write_bytes - task->ioac.cancelled_write_bytes; +} + +static void compute_io_bucket_stats(struct io_stats *io_bucket, + struct io_stats *io_curr, + struct io_stats *io_last, + struct io_stats *io_dead) +{ + /* tasks could switch to another uid group, but its io_last in the + * previous uid group could still be positive. + * therefore before each update, do an overflow check first + */ + int64_t delta; + + delta = io_curr->read_bytes + io_dead->read_bytes - + io_last->read_bytes; + io_bucket->read_bytes += delta > 0 ? delta : 0; + delta = io_curr->write_bytes + io_dead->write_bytes - + io_last->write_bytes; + io_bucket->write_bytes += delta > 0 ? delta : 0; + delta = io_curr->rchar + io_dead->rchar - io_last->rchar; + io_bucket->rchar += delta > 0 ? delta : 0; + delta = io_curr->wchar + io_dead->wchar - io_last->wchar; + io_bucket->wchar += delta > 0 ? delta : 0; + delta = io_curr->fsync + io_dead->fsync - io_last->fsync; + io_bucket->fsync += delta > 0 ? delta : 0; + + io_last->read_bytes = io_curr->read_bytes; + io_last->write_bytes = io_curr->write_bytes; + io_last->rchar = io_curr->rchar; + io_last->wchar = io_curr->wchar; + io_last->fsync = io_curr->fsync; + + memset(io_dead, 0, sizeof(struct io_stats)); +} + +#ifdef CONFIG_UID_SYS_STATS_DEBUG +static void get_full_task_comm(struct task_entry *task_entry, + struct task_struct *task) +{ + int i = 0, offset = 0, len = 0; + /* save one byte for terminating null character */ + int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1; + char buf[unused_len]; + struct mm_struct *mm = task->mm; + + /* fill the first TASK_COMM_LEN bytes with thread name */ + __get_task_comm(task_entry->comm, TASK_COMM_LEN, task); + i = strlen(task_entry->comm); + while (i < TASK_COMM_LEN) + task_entry->comm[i++] = ' '; + + /* next the executable file name */ + if (mm) { + down_read(&mm->mmap_sem); + if (mm->exe_file) { + char *pathname = d_path(&mm->exe_file->f_path, buf, + unused_len); + + if (!IS_ERR(pathname)) { + len = strlcpy(task_entry->comm + i, pathname, + unused_len); + i += len; + task_entry->comm[i++] = ' '; + unused_len--; + } + } + up_read(&mm->mmap_sem); + } + unused_len -= len; + + /* fill the rest with command line argument + * replace each null or new line character + * between args in argv with whitespace */ + len = get_cmdline(task, buf, unused_len); + while (offset < len) { + if (buf[offset] != '\0' && buf[offset] != '\n') + task_entry->comm[i++] = buf[offset]; + else + task_entry->comm[i++] = ' '; + offset++; + } + + /* get rid of trailing whitespaces in case when arg is memset to + * zero before being reset in userspace + */ + while (task_entry->comm[i-1] == ' ') + i--; + task_entry->comm[i] = '\0'; +} + +static struct task_entry *find_task_entry(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + + hash_for_each_possible(uid_entry->task_entries, task_entry, hash, + task->pid) { + if (task->pid == task_entry->pid) { + /* if thread name changed, update the entire command */ + int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN) + - task_entry->comm; + + if (strncmp(task_entry->comm, task->comm, len)) + get_full_task_comm(task_entry, task); + return task_entry; + } + } + return NULL; +} + +static struct task_entry *find_or_register_task(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + pid_t pid = task->pid; + + task_entry = find_task_entry(uid_entry, task); + if (task_entry) + return task_entry; + + task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC); + if (!task_entry) + return NULL; + + get_full_task_comm(task_entry, task); + + task_entry->pid = pid; + hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid); + + return task_entry; +} + +static void remove_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + struct hlist_node *tmp_task; + + hash_for_each_safe(uid_entry->task_entries, bkt_task, + tmp_task, task_entry, hash) { + hash_del(&task_entry->hash); + kfree(task_entry); + } +} + +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + } +} + +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct task_entry *task_entry = find_or_register_task(uid_entry, task); + struct io_stats *task_io_slot = &task_entry->io[slot]; + + task_io_slot->read_bytes += task->ioac.read_bytes; + task_io_slot->write_bytes += compute_write_bytes(task); + task_io_slot->rchar += task->ioac.rchar; + task_io_slot->wchar += task->ioac.wchar; + task_io_slot->fsync += task->ioac.syscfs; +} + +static void compute_io_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + compute_io_bucket_stats(&task_entry->io[uid_entry->state], + &task_entry->io[UID_STATE_TOTAL_CURR], + &task_entry->io[UID_STATE_TOTAL_LAST], + &task_entry->io[UID_STATE_DEAD_TASKS]); + } +} + +static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + /* Separated by comma because space exists in task comm */ + seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", + task_entry->comm, + (unsigned long)task_entry->pid, + task_entry->io[UID_STATE_FOREGROUND].rchar, + task_entry->io[UID_STATE_FOREGROUND].wchar, + task_entry->io[UID_STATE_FOREGROUND].read_bytes, + task_entry->io[UID_STATE_FOREGROUND].write_bytes, + task_entry->io[UID_STATE_BACKGROUND].rchar, + task_entry->io[UID_STATE_BACKGROUND].wchar, + task_entry->io[UID_STATE_BACKGROUND].read_bytes, + task_entry->io[UID_STATE_BACKGROUND].write_bytes, + task_entry->io[UID_STATE_FOREGROUND].fsync, + task_entry->io[UID_STATE_BACKGROUND].fsync); + } +} +#else +static void remove_uid_tasks(struct uid_entry *uid_entry) {}; +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {}; +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) {}; +static void compute_io_uid_tasks(struct uid_entry *uid_entry) {}; +static void show_io_uid_tasks(struct seq_file *m, + struct uid_entry *uid_entry) {} +#endif + +static struct uid_entry *find_uid_entry(uid_t uid) +{ + struct uid_entry *uid_entry; + hash_for_each_possible(hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +static struct uid_entry *find_or_register_uid(uid_t uid) +{ + struct uid_entry *uid_entry; + + uid_entry = find_uid_entry(uid); + if (uid_entry) + return uid_entry; + + uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + hash_init(uid_entry->task_entries); +#endif + hash_add(hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static int uid_cputime_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry = NULL; + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + u64 utime; + u64 stime; + unsigned long bkt; + uid_t uid; + + rt_mutex_lock(&uid_lock); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + uid_entry->active_stime = 0; + uid_entry->active_utime = 0; + } + + rcu_read_lock(); + do_each_thread(temp, task) { + uid = from_kuid_munged(user_ns, task_uid(task)); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + rcu_read_unlock(); + rt_mutex_unlock(&uid_lock); + pr_err("%s: failed to find the uid_entry for uid %d\n", + __func__, uid); + return -ENOMEM; + } + task_cputime_adjusted(task, &utime, &stime); + uid_entry->active_utime += utime; + uid_entry->active_stime += stime; + } while_each_thread(temp, task); + rcu_read_unlock(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + u64 total_utime = uid_entry->utime + + uid_entry->active_utime; + u64 total_stime = uid_entry->stime + + uid_entry->active_stime; + seq_printf(m, "%d: %llu %llu\n", uid_entry->uid, + ktime_to_ms(total_utime), ktime_to_ms(total_stime)); + } + + rt_mutex_unlock(&uid_lock); + return 0; +} + +static int uid_cputime_open(struct inode *inode, struct file *file) +{ + return single_open(file, uid_cputime_show, PDE_DATA(inode)); +} + +static const struct file_operations uid_cputime_fops = { + .open = uid_cputime_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int uid_remove_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, NULL); +} + +static ssize_t uid_remove_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + char uids[128]; + char *start_uid, *end_uid = NULL; + long int uid_start = 0, uid_end = 0; + + if (count >= sizeof(uids)) + count = sizeof(uids) - 1; + + if (copy_from_user(uids, buffer, count)) + return -EFAULT; + + uids[count] = '\0'; + end_uid = uids; + start_uid = strsep(&end_uid, "-"); + + if (!start_uid || !end_uid) + return -EINVAL; + + if (kstrtol(start_uid, 10, &uid_start) != 0 || + kstrtol(end_uid, 10, &uid_end) != 0) { + return -EINVAL; + } + + /* Also remove uids from /proc/uid_time_in_state */ + cpufreq_task_times_remove_uids(uid_start, uid_end); + + rt_mutex_lock(&uid_lock); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(hash_table, uid_entry, tmp, + hash, (uid_t)uid_start) { + if (uid_start == uid_entry->uid) { + remove_uid_tasks(uid_entry); + hash_del(&uid_entry->hash); + kfree(uid_entry); + } + } + } + + rt_mutex_unlock(&uid_lock); + return count; +} + +static const struct file_operations uid_remove_fops = { + .open = uid_remove_open, + .release = single_release, + .write = uid_remove_write, +}; + + +static void add_uid_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct io_stats *io_slot = &uid_entry->io[slot]; + + io_slot->read_bytes += task->ioac.read_bytes; + io_slot->write_bytes += compute_write_bytes(task); + io_slot->rchar += task->ioac.rchar; + io_slot->wchar += task->ioac.wchar; + io_slot->fsync += task->ioac.syscfs; + + add_uid_tasks_io_stats(uid_entry, task, slot); +} + +static void update_io_stats_all_locked(void) +{ + struct uid_entry *uid_entry = NULL; + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + unsigned long bkt; + uid_t uid; + + hash_for_each(hash_table, bkt, uid_entry, hash) { + memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + } + + rcu_read_lock(); + do_each_thread(temp, task) { + uid = from_kuid_munged(user_ns, task_uid(task)); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); + if (!uid_entry) + continue; + add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); + } while_each_thread(temp, task); + rcu_read_unlock(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], + &uid_entry->io[UID_STATE_TOTAL_CURR], + &uid_entry->io[UID_STATE_TOTAL_LAST], + &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); + } +} + +static void update_io_stats_uid_locked(struct uid_entry *uid_entry) +{ + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + + memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + + rcu_read_lock(); + do_each_thread(temp, task) { + if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid) + continue; + add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); + } while_each_thread(temp, task); + rcu_read_unlock(); + + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], + &uid_entry->io[UID_STATE_TOTAL_CURR], + &uid_entry->io[UID_STATE_TOTAL_LAST], + &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); +} + + +static int uid_io_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + unsigned long bkt; + + rt_mutex_lock(&uid_lock); + + update_io_stats_all_locked(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", + uid_entry->uid, + uid_entry->io[UID_STATE_FOREGROUND].rchar, + uid_entry->io[UID_STATE_FOREGROUND].wchar, + uid_entry->io[UID_STATE_FOREGROUND].read_bytes, + uid_entry->io[UID_STATE_FOREGROUND].write_bytes, + uid_entry->io[UID_STATE_BACKGROUND].rchar, + uid_entry->io[UID_STATE_BACKGROUND].wchar, + uid_entry->io[UID_STATE_BACKGROUND].read_bytes, + uid_entry->io[UID_STATE_BACKGROUND].write_bytes, + uid_entry->io[UID_STATE_FOREGROUND].fsync, + uid_entry->io[UID_STATE_BACKGROUND].fsync); + + show_io_uid_tasks(m, uid_entry); + } + + rt_mutex_unlock(&uid_lock); + return 0; +} + +static int uid_io_open(struct inode *inode, struct file *file) +{ + return single_open(file, uid_io_show, PDE_DATA(inode)); +} + +static const struct file_operations uid_io_fops = { + .open = uid_io_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int uid_procstat_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, NULL); +} + +static ssize_t uid_procstat_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + struct uid_entry *uid_entry; + uid_t uid; + int argc, state; + char input[128]; + + if (count >= sizeof(input)) + return -EINVAL; + + if (copy_from_user(input, buffer, count)) + return -EFAULT; + + input[count] = '\0'; + + argc = sscanf(input, "%u %d", &uid, &state); + if (argc != 2) + return -EINVAL; + + if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND) + return -EINVAL; + + rt_mutex_lock(&uid_lock); + + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + rt_mutex_unlock(&uid_lock); + return -EINVAL; + } + + if (uid_entry->state == state) { + rt_mutex_unlock(&uid_lock); + return count; + } + + update_io_stats_uid_locked(uid_entry); + + uid_entry->state = state; + + rt_mutex_unlock(&uid_lock); + + return count; +} + +static const struct file_operations uid_procstat_fops = { + .open = uid_procstat_open, + .release = single_release, + .write = uid_procstat_write, +}; + +static int process_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + struct task_struct *task = v; + struct uid_entry *uid_entry; + u64 utime, stime; + uid_t uid; + + if (!task) + return NOTIFY_OK; + + rt_mutex_lock(&uid_lock); + uid = from_kuid_munged(current_user_ns(), task_uid(task)); + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + pr_err("%s: failed to find uid %d\n", __func__, uid); + goto exit; + } + + task_cputime_adjusted(task, &utime, &stime); + uid_entry->utime += utime; + uid_entry->stime += stime; + + add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS); + +exit: + rt_mutex_unlock(&uid_lock); + return NOTIFY_OK; +} + +static struct notifier_block process_notifier_block = { + .notifier_call = process_notifier, +}; + +static int __init proc_uid_sys_stats_init(void) +{ + hash_init(hash_table); + + cpu_parent = proc_mkdir("uid_cputime", NULL); + if (!cpu_parent) { + pr_err("%s: failed to create uid_cputime proc entry\n", + __func__); + goto err; + } + + proc_create_data("remove_uid_range", 0222, cpu_parent, + &uid_remove_fops, NULL); + proc_create_data("show_uid_stat", 0444, cpu_parent, + &uid_cputime_fops, NULL); + + io_parent = proc_mkdir("uid_io", NULL); + if (!io_parent) { + pr_err("%s: failed to create uid_io proc entry\n", + __func__); + goto err; + } + + proc_create_data("stats", 0444, io_parent, + &uid_io_fops, NULL); + + proc_parent = proc_mkdir("uid_procstat", NULL); + if (!proc_parent) { + pr_err("%s: failed to create uid_procstat proc entry\n", + __func__); + goto err; + } + + proc_create_data("set", 0222, proc_parent, + &uid_procstat_fops, NULL); + + profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block); + + return 0; + +err: + remove_proc_subtree("uid_cputime", NULL); + remove_proc_subtree("uid_io", NULL); + remove_proc_subtree("uid_procstat", NULL); + return -ENOMEM; +} + +early_initcall(proc_uid_sys_stats_init); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 1e688bfec567..56c6f79a5c5a 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.lock[is_2m_pages]); @@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b, unsigned int num_pages, bool is_2m_pages, unsigned int *target) { unsigned long status; - unsigned long pfn = page_to_pfn(b->page); + unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page)); STATS_INC(b->stats.unlock[is_2m_pages]); @@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b) } } - if (b->batch_page) { - vunmap(b->batch_page); - b->batch_page = NULL; - } - - if (b->page) { - __free_page(b->page); - b->page = NULL; - } + /* Clearing the batch_page unconditionally has no adverse effect */ + free_page((unsigned long)b->batch_page); + b->batch_page = NULL; } /* @@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = { static bool vmballoon_init_batching(struct vmballoon *b) { - b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP); - if (!b->page) - return false; + struct page *page; - b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL); - if (!b->batch_page) { - __free_page(b->page); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) return false; - } + b->batch_page = page_address(page); return true; } @@ -1271,7 +1262,7 @@ static int __init vmballoon_init(void) * Check if we are running on VMware's hypervisor and bail out * if we are not. */ - if (x86_hyper != &x86_hyper_vmware) + if (x86_hyper_type != X86_HYPER_VMWARE) return -ENODEV; for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES; diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41..53fc4818f4e0 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -36,6 +36,7 @@ config PWRSEQ_SIMPLE config MMC_BLOCK tristate "MMC block device driver" depends on BLOCK + select RPMB default y help Say Y here to enable the MMC block device driver support. @@ -80,3 +81,17 @@ config MMC_TEST This driver is only of interest to those developing or testing a host driver. Most people should say N here. +config MMC_EMBEDDED_SDIO + boolean "MMC embedded SDIO device support" + help + If you say Y here, support will be added for embedded SDIO + devices which do not contain the necessary enumeration + support in hardware to be properly detected. + +config MMC_PARANOID_SD_INIT + bool "Enable paranoid SD card initialization" + help + If you say Y here, the MMC layer will be extra paranoid + about re-trying SD init requests. This can be a useful + work-around for buggy controllers and hardware. Enable + if you are experiencing issues with SD detection. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 2ad7b5c69156..491f9363f849 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -43,6 +43,7 @@ #include #include #include +#include #include @@ -65,6 +66,7 @@ MODULE_ALIAS("mmc:block"); #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ #define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) +#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ (rq_data_dir(req) == WRITE)) @@ -109,6 +111,7 @@ struct mmc_blk_data { #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) +#define MMC_BLK_RPMB BIT(4) /* * Only set in main mmc_blk_data associated @@ -119,6 +122,10 @@ struct mmc_blk_data { struct device_attribute force_ro; struct device_attribute power_ro_lock; int area_type; + + /* debugfs files (only in main mmc_blk_data) */ + struct dentry *status_dentry; + struct dentry *ext_csd_dentry; }; static DEFINE_MUTEX(open_lock); @@ -204,9 +211,14 @@ static ssize_t power_ro_lock_store(struct device *dev, /* Dispatch locking to the block layer */ req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); + if (IS_ERR(req)) { + count = PTR_ERR(req); + goto out_put; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; + blk_put_request(req); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", @@ -219,7 +231,7 @@ static ssize_t power_ro_lock_store(struct device *dev, set_disk_ro(part_md->disk, 1); } } - +out_put: mmc_blk_put(md); return count; } @@ -369,8 +381,8 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, - u32 retries_max) +static int mmc_blk_rpmb_status_poll(struct mmc_card *card, u32 *status, + u32 retries_max) { int err; u32 retry_count = 0; @@ -529,6 +541,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return data.error; } + /* + * Make sure the cache of the PARTITION_CONFIG register and + * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write + * changed it successfully. + */ + if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && + (cmd.opcode == MMC_SWITCH)) { + struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); + u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); + + /* + * Update cache so the next mmc_blk_part_switch call operates + * on up-to-date data. + */ + card->ext_csd.part_config = value; + main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; + } + /* * According to the SD specs, some commands require a delay after * issuing the command. @@ -543,7 +573,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, * Ensure RPMB command has completed by polling CMD13 * "Send Status". */ - err = ioctl_rpmb_card_status_poll(card, &status, 5); + err = mmc_blk_rpmb_status_poll(card, &status, 5); if (err) dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", @@ -580,6 +610,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_done; + } idatas[0] = idata; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idatas; @@ -643,6 +677,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_err; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; @@ -1226,6 +1264,273 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } +static int mmc_rpmb_send_cmd(struct mmc_card *card, + unsigned int data_type, bool do_rel_wr, + void *buf, u16 blks) +{ + int err; + u32 status; + struct mmc_command sbc = { + .opcode = MMC_SET_BLOCK_COUNT, + .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, + }; + + struct mmc_command cmd = { + .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC, + }; + + struct mmc_data data = { + .blksz = 512, + }; + struct mmc_request mrq = { + .sbc = &sbc, + .cmd = &cmd, + .data = &data, + .stop = NULL, + }; + struct scatterlist sg; + + /* set CMD23 */ + sbc.arg = blks & 0x0000FFFF; + if (do_rel_wr) + sbc.arg |= MMC_CMD23_ARG_REL_WR; + + /* set CMD25/18 */ + cmd.opcode = (data_type == MMC_DATA_WRITE) ? + MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; + + sg_init_one(&sg, buf, 512 * blks); + + data.blocks = blks; + data.sg = &sg; + data.sg_len = 1; + data.flags = data_type; + mmc_set_data_timeout(&data, card); + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) { + dev_err(mmc_dev(card->host), "cmd error (%d)\n", cmd.error); + return cmd.error; + } + + if (data.error) { + dev_err(mmc_dev(card->host), "data error (%d)\n", data.error); + return data.error; + } + + err = mmc_blk_rpmb_status_poll(card, &status, 5); + if (err) + dev_err(mmc_dev(card->host), "Card Status=0x%08X, error %d\n", + status, err); + return err; +} + +static int mmc_blk_rpmb_sequence(struct mmc_card *card, + struct rpmb_cmd *cmds, u32 ncmds) +{ + int err, i; + struct rpmb_cmd *cmd; + unsigned int data_type; + bool do_rel_wr; + + for (err = 0, i = 0; i < ncmds && !err; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) { + data_type = MMC_DATA_WRITE; + do_rel_wr = !!(cmd->flags & RPMB_F_REL_WRITE); + } else { + data_type = MMC_DATA_READ; + do_rel_wr = false; + } + + err = mmc_rpmb_send_cmd(card, data_type, do_rel_wr, + cmd->frames, cmd->nframes); + } + + return err; +} + +static int mmc_blk_rpmb_process(struct mmc_blk_data *md, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct mmc_card *card; + struct mmc_blk_data *main_md; + int ret; + + if (WARN_ON(!md || !cmds || !ncmds)) + return -EINVAL; + + if (!(md->flags & MMC_BLK_CMD23) || + md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB) + return -EOPNOTSUPP; + + card = md->queue.card; + if (!card || !mmc_card_mmc(card)) + return -ENODEV; + + mmc_get_card(card); + + /* switch to RPMB partition */ + ret = mmc_blk_part_switch(card, md->part_type); + if (ret) { + dev_err(mmc_dev(card->host), "Invalid RPMB partition switch (%d)!\n", + ret); + /* + * In case partition is not in user data area, make + * a force partition switch. + * we need reset eMMC card at here + */ + ret = mmc_blk_reset(md, card->host, MMC_BLK_RPMB); + if (!ret) + mmc_blk_reset_success(md, MMC_BLK_RPMB); + else + dev_err(mmc_dev(card->host), "eMMC card reset failed (%d)\n", + ret); + goto out; + } + + ret = mmc_blk_rpmb_sequence(card, cmds, ncmds); + if (ret) + dev_err(mmc_dev(card->host), "failed (%d) to handle RPMB request\n", + ret); + + /* Always switch back to main area after RPMB access */ + + main_md = dev_get_drvdata(&card->dev); + mmc_blk_part_switch(card, main_md->part_type); +out: + mmc_put_card(card); + return ret; +} + +static int mmc_blk_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct mmc_blk_data *md; + int ret; + + md = mmc_blk_get(dev_to_disk(dev)); + if (!md) + return -ENODEV; + + if (!cmds || !ncmds) + return -EINVAL; + + ret = mmc_blk_rpmb_process(md, cmds, ncmds); + + mmc_blk_put(md); + + return ret; +} + +static int mmc_blk_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct mmc_blk_data *md; + struct mmc_card *card; + + md = mmc_blk_get(dev_to_disk(dev)); + if (!md) + return -ENODEV; + + card = md->queue.card; + return card->ext_csd.raw_rpmb_size_mult; +} + +static struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .get_capacity = mmc_blk_rpmb_get_capacity, + .type = RPMB_TYPE_EMMC, + .auth_method = RPMB_HMAC_ALGO_SHA_256, +}; + +static struct mmc_blk_data *mmc_blk_rpmb_part_get(struct mmc_blk_data *md) +{ + struct mmc_blk_data *part_md; + + if (!md) + return NULL; + + list_for_each_entry(part_md, &md->part, part) { + if (part_md->area_type == MMC_BLK_DATA_AREA_RPMB) + return part_md; + } + + return NULL; +} + +static void mmc_blk_rpmb_unset_dev_id(struct rpmb_ops *ops) +{ + kfree(ops->dev_id); + ops->dev_id = NULL; +} + +static int mmc_blk_rpmb_set_dev_id(struct rpmb_ops *ops, struct mmc_card *card) +{ + char *id; + + id = kmalloc(sizeof(card->raw_cid), GFP_KERNEL); + if (!id) + return -ENOMEM; + + memcpy(id, card->raw_cid, sizeof(card->raw_cid)); + ops->dev_id = id; + ops->dev_id_len = sizeof(card->raw_cid); + + return 0; +} + +static void mmc_blk_rpmb_set_cap(struct rpmb_ops *ops, + struct mmc_card *card) +{ + u16 rel_wr_cnt; + + /* RPMB blocks are written in half sectors hence '* 2' */ + rel_wr_cnt = card->ext_csd.rel_sectors * 2; + /* eMMC 5.1 may support RPMB 8K (32) frames */ + if (card->ext_csd.rev >= 8) { + if (card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) + rel_wr_cnt = 32; + else + rel_wr_cnt = 2; + } + + ops->wr_cnt_max = rel_wr_cnt; + ops->rd_cnt_max = card->host->max_blk_count; + ops->block_size = 1; /* 256B */ +} + +static void mmc_blk_rpmb_add(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_blk_data *part_md = mmc_blk_rpmb_part_get(md); + struct rpmb_dev *rdev; + + if (!part_md) + return; + + mmc_blk_rpmb_set_dev_id(&mmc_rpmb_dev_ops, card); + mmc_blk_rpmb_set_cap(&mmc_rpmb_dev_ops, card); + + rdev = rpmb_dev_register(disk_to_dev(part_md->disk), 0, + &mmc_rpmb_dev_ops); + if (IS_ERR(rdev)) { + pr_warn("%s: cannot register to rpmb %ld\n", + part_md->disk->disk_name, PTR_ERR(rdev)); + } +} + +static void mmc_blk_rpmb_remove(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_blk_data *part_md = mmc_blk_rpmb_part_get(md); + + if (part_md) + rpmb_dev_unregister_by_device(disk_to_dev(part_md->disk), 0); + + mmc_blk_rpmb_unset_dev_id(&mmc_rpmb_dev_ops); +} + static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; @@ -2314,6 +2619,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) /* Ask the block layer about the card status */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) + return PTR_ERR(req); req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; blk_execute_rq(mq->queue, NULL, req, 0); ret = req_to_mmc_queue_req(req)->drv_op_result; @@ -2321,6 +2628,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) *val = ret; ret = 0; } + blk_put_request(req); return ret; } @@ -2347,10 +2655,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) /* Ask the block layer for the EXT CSD */ req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_free; + } req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; blk_execute_rq(mq->queue, NULL, req, 0); err = req_to_mmc_queue_req(req)->drv_op_result; + blk_put_request(req); if (err) { pr_err("FAILED %d\n", err); goto out_free; @@ -2362,6 +2675,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) if (n != EXT_CSD_STR_LEN) { err = -EINVAL; + kfree(ext_csd); goto out_free; } @@ -2396,7 +2710,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = { .llseek = default_llseek, }; -static int mmc_blk_add_debugfs(struct mmc_card *card) +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) { struct dentry *root; @@ -2406,28 +2720,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card) root = card->debugfs_root; if (mmc_card_mmc(card) || mmc_card_sd(card)) { - if (!debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops)) + md->status_dentry = + debugfs_create_file("status", S_IRUSR, root, card, + &mmc_dbg_card_status_fops); + if (!md->status_dentry) return -EIO; } if (mmc_card_mmc(card)) { - if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, - &mmc_dbg_ext_csd_fops)) + md->ext_csd_dentry = + debugfs_create_file("ext_csd", S_IRUSR, root, card, + &mmc_dbg_ext_csd_fops); + if (!md->ext_csd_dentry) return -EIO; } return 0; } +static void mmc_blk_remove_debugfs(struct mmc_card *card, + struct mmc_blk_data *md) +{ + if (!card->debugfs_root) + return; + + if (!IS_ERR_OR_NULL(md->status_dentry)) { + debugfs_remove(md->status_dentry); + md->status_dentry = NULL; + } + + if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) { + debugfs_remove(md->ext_csd_dentry); + md->ext_csd_dentry = NULL; + } +} #else -static int mmc_blk_add_debugfs(struct mmc_card *card) +static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) { return 0; } +static void mmc_blk_remove_debugfs(struct mmc_card *card, + struct mmc_blk_data *md) +{ +} + #endif /* CONFIG_DEBUG_FS */ static int mmc_blk_probe(struct mmc_card *card) @@ -2466,8 +2805,11 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } + /* add rpmb layer */ + mmc_blk_rpmb_add(card); + /* Add two debugfs entries */ - mmc_blk_add_debugfs(card); + mmc_blk_add_debugfs(card, md); pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_use_autosuspend(&card->dev); @@ -2493,6 +2835,8 @@ static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + mmc_blk_remove_debugfs(card, md); + mmc_blk_rpmb_remove(card); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); @@ -2521,6 +2865,7 @@ static int _mmc_blk_suspend(struct mmc_card *card) static void mmc_blk_shutdown(struct mmc_card *card) { + mmc_blk_rpmb_remove(card); _mmc_blk_suspend(card); } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 301246513a37..7f428e387de3 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev) return ret; ret = host->bus_ops->suspend(host); + if (ret) + pm_generic_resume(dev); + return ret; } diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index f06cd91964ce..9c821eedd156 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -75,11 +75,14 @@ struct mmc_fixup { #define EXT_CSD_REV_ANY (-1u) #define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 +#define CID_MANFID_APACER 0x27 #define CID_MANFID_KINGSTON 0x70 #define CID_MANFID_HYNIX 0x90 +#define CID_MANFID_NUMONYX 0xFE #define END_FIXUP { NULL } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 66c9cf49ad2f..e71e6070ae75 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2751,6 +2751,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block, if (!err) break; + if (!mmc_card_is_removable(host)) { + dev_warn(mmc_dev(host), + "pre_suspend failed for non-removable host: " + "%d\n", err); + /* Avoid removing non-removable hosts */ + break; + } + /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); @@ -2802,6 +2810,22 @@ void mmc_init_context_info(struct mmc_host *host) init_waitqueue_head(&host->context_info.wait); } +#ifdef CONFIG_MMC_EMBEDDED_SDIO +void mmc_set_embedded_sdio_data(struct mmc_host *host, + struct sdio_cis *cis, + struct sdio_cccr *cccr, + struct sdio_embedded_func *funcs, + int num_funcs) +{ + host->embedded_sdio_data.cis = cis; + host->embedded_sdio_data.cccr = cccr; + host->embedded_sdio_data.funcs = funcs; + host->embedded_sdio_data.num_funcs = num_funcs; +} + +EXPORT_SYMBOL(mmc_set_embedded_sdio_data); +#endif + static int __init mmc_init(void) { int ret; diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 01e459a34f33..0f4a7d7b2626 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -314,4 +314,5 @@ void mmc_add_card_debugfs(struct mmc_card *card) void mmc_remove_card_debugfs(struct mmc_card *card) { debugfs_remove_recursive(card->debugfs_root); + card->debugfs_root = NULL; } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ad88deb2e8f3..f469254bf78a 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -429,7 +429,8 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); - mmc_register_pm_notifier(host); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + mmc_register_pm_notifier(host); return 0; } @@ -446,7 +447,8 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - mmc_unregister_pm_notifier(host); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + mmc_unregister_pm_notifier(host); mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 36217ad5e9b1..13871e2bcb0a 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -780,7 +780,8 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); -MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info); +MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev); +MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info); MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n", card->ext_csd.device_life_time_est_typ_a, card->ext_csd.device_life_time_est_typ_b); @@ -790,7 +791,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr); MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en); static ssize_t mmc_fwrev_show(struct device *dev, @@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_name.attr, &dev_attr_oemid.attr, &dev_attr_prv.attr, + &dev_attr_rev.attr, &dev_attr_pre_eol_info.attr, &dev_attr_life_time.attr, &dev_attr_serial.attr, diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c index 478869805b96..789afef66fce 100644 --- a/drivers/mmc/core/mmc_test.c +++ b/drivers/mmc/core/mmc_test.c @@ -2328,10 +2328,17 @@ static int mmc_test_reset(struct mmc_test_card *test) int err; err = mmc_hw_reset(host); - if (!err) + if (!err) { + /* + * Reset will re-enable the card's command queue, but tests + * expect it to be disabled. + */ + if (card->ext_csd.cmdq_en) + mmc_cmdq_disable(card); return RESULT_OK; - else if (err == -EOPNOTSUPP) + } else if (err == -EOPNOTSUPP) { return RESULT_UNSUP_HOST; + } return RESULT_FAIL; } diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index 13ef162cf066..a8b9fee4d62a 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -40,14 +40,18 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq, struct gpio_descs *reset_gpios = pwrseq->reset_gpios; if (!IS_ERR(reset_gpios)) { - int i; - int values[reset_gpios->ndescs]; + int i, *values; + int nvalues = reset_gpios->ndescs; - for (i = 0; i < reset_gpios->ndescs; i++) + values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL); + if (!values) + return; + + for (i = 0; i < nvalues; i++) values[i] = value; - gpiod_set_array_value_cansleep( - reset_gpios->ndescs, reset_gpios->desc, values); + gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values); + kfree(values); } } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 0a4e77a5ba33..8c4721f1b4bc 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -17,6 +17,8 @@ #include #include +#include +#include #include "queue.h" #include "block.h" @@ -43,6 +45,11 @@ static int mmc_queue_thread(void *d) struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct mmc_context_info *cntx = &mq->card->host->context_info; + struct sched_param scheduler_params = {0}; + + scheduler_params.sched_priority = 1; + + sched_setscheduler(current, SCHED_FIFO, &scheduler_params); current->flags |= PF_MEMALLOC; diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index f664e9cbc9f8..5153577754f0 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -52,6 +52,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = { MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), + /* + * Some SD cards lockup while using CMD23 multiblock transfers. + */ + MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd, + MMC_QUIRK_BLK_NO_CMD23), + /* * Some MMC cards need longer data read timeout than indicated in CSD. */ @@ -101,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = { */ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), + /* + * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI + * feature is used so disable the HPI feature for such buggy cards. + */ + MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX, + 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6), END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 4fd1620b732d..4bbdfa382e84 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); -MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); +MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr); static ssize_t mmc_dsr_show(struct device *dev, @@ -834,6 +834,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, bool reinit) { int err; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif if (!reinit) { /* @@ -860,7 +863,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, /* * Fetch switch information from card. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + for (retries = 1; retries <= 3; retries++) { + err = mmc_read_switch(card); + if (!err) { + if (retries > 1) { + printk(KERN_WARNING + "%s: recovered\n", + mmc_hostname(host)); + } + break; + } else { + printk(KERN_WARNING + "%s: read switch failed (attempt %d)\n", + mmc_hostname(host), retries); + } + } +#else err = mmc_read_switch(card); +#endif + if (err) return err; } @@ -1054,14 +1076,33 @@ static int mmc_sd_alive(struct mmc_host *host) */ static void mmc_sd_detect(struct mmc_host *host) { - int err; + int err = 0; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries = 5; +#endif mmc_get_card(host->card); /* * Just check if our card has been removed. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + while(retries) { + err = mmc_send_status(host->card, NULL); + if (err) { + retries--; + udelay(5); + continue; + } + break; + } + if (!retries) { + printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n", + __func__, mmc_hostname(host), err); + } +#else err = _mmc_detect_card_removed(host); +#endif mmc_put_card(host->card); @@ -1120,6 +1161,9 @@ static int mmc_sd_suspend(struct mmc_host *host) static int _mmc_sd_resume(struct mmc_host *host) { int err = 0; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif mmc_claim_host(host); @@ -1127,7 +1171,23 @@ static int _mmc_sd_resume(struct mmc_host *host) goto out; mmc_power_up(host, host->card->ocr); +#ifdef CONFIG_MMC_PARANOID_SD_INIT + retries = 5; + while (retries) { + err = mmc_sd_init_card(host, host->card->ocr, host->card); + + if (err) { + printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n", + mmc_hostname(host), err, retries); + mdelay(5); + retries--; + continue; + } + break; + } +#else err = mmc_sd_init_card(host, host->card->ocr, host->card); +#endif mmc_card_clr_suspended(host->card); out: @@ -1202,6 +1262,9 @@ int mmc_attach_sd(struct mmc_host *host) { int err; u32 ocr, rocr; +#ifdef CONFIG_MMC_PARANOID_SD_INIT + int retries; +#endif WARN_ON(!host->claimed); @@ -1237,9 +1300,27 @@ int mmc_attach_sd(struct mmc_host *host) /* * Detect and init the card. */ +#ifdef CONFIG_MMC_PARANOID_SD_INIT + retries = 5; + while (retries) { + err = mmc_sd_init_card(host, rocr, NULL); + if (err) { + retries--; + continue; + } + break; + } + + if (!retries) { + printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", + mmc_hostname(host), err); + goto err; + } +#else err = mmc_sd_init_card(host, rocr, NULL); if (err) goto err; +#endif mmc_release_host(host); err = mmc_add_card(host->card); diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index cc43687ca241..c42e3cd537f2 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -31,6 +31,10 @@ #include "sdio_ops.h" #include "sdio_cis.h" +#ifdef CONFIG_MMC_EMBEDDED_SDIO +#include +#endif + static int sdio_read_fbr(struct sdio_func *func) { int ret; @@ -706,28 +710,44 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto finish; } - /* - * Read the common registers. Note that we should try to - * validate whether UHS would work or not. - */ - err = sdio_read_cccr(card, ocr); - if (err) { - mmc_sdio_resend_if_cond(host, card); - if (ocr & R4_18V_PRESENT) { - /* Retry init sequence, but without R4_18V_PRESENT. */ - retries = 0; - goto try_again; - } else { - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.cccr) + memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr)); + else { +#endif + /* + * Read the common registers. Note that we should try to + * validate whether UHS would work or not. + */ + err = sdio_read_cccr(card, ocr); + if (err) { + mmc_sdio_resend_if_cond(host, card); + if (ocr & R4_18V_PRESENT) { + /* Retry init sequence, but without R4_18V_PRESENT. */ + retries = 0; + goto try_again; + } else { + goto remove; + } } +#ifdef CONFIG_MMC_EMBEDDED_SDIO } +#endif - /* - * Read the common CIS tuples. - */ - err = sdio_read_common_cis(card); - if (err) - goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.cis) + memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis)); + else { +#endif + /* + * Read the common CIS tuples. + */ + err = sdio_read_common_cis(card); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif if (oldcard) { int same = (card->cis.vendor == oldcard->cis.vendor && @@ -1129,14 +1149,36 @@ int mmc_attach_sdio(struct mmc_host *host) funcs = (ocr & 0x70000000) >> 28; card->sdio_funcs = 0; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.funcs) + card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs; +#endif + /* * Initialize (but don't add) all present functions. */ for (i = 0; i < funcs; i++, card->sdio_funcs++) { - err = sdio_init_func(host->card, i + 1); - if (err) - goto remove; - +#ifdef CONFIG_MMC_EMBEDDED_SDIO + if (host->embedded_sdio_data.funcs) { + struct sdio_func *tmp; + + tmp = sdio_alloc_func(host->card); + if (IS_ERR(tmp)) + goto remove; + tmp->num = (i + 1); + card->sdio_func[i] = tmp; + tmp->class = host->embedded_sdio_data.funcs[i].f_class; + tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize; + tmp->vendor = card->cis.vendor; + tmp->device = card->cis.device; + } else { +#endif + err = sdio_init_func(host->card, i + 1); + if (err) + goto remove; +#ifdef CONFIG_MMC_EMBEDDED_SDIO + } +#endif /* * Enable Runtime PM for this func (if supported) */ diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2b32b88949ba..997d556fccf1 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -29,6 +29,10 @@ #include "sdio_cis.h" #include "sdio_bus.h" +#ifdef CONFIG_MMC_EMBEDDED_SDIO +#include +#endif + #define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) /* show configuration fields */ @@ -264,7 +268,14 @@ static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - sdio_free_func_cis(func); +#ifdef CONFIG_MMC_EMBEDDED_SDIO + /* + * If this device is embedded then we never allocated + * cis tables for this func + */ + if (!func->card->host->embedded_sdio_data.funcs) +#endif + sdio_free_func_cis(func); kfree(func->info); kfree(func->tmpbuf); diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 229dc18f0581..768972af8b85 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host) char pio_limit_string[20]; int ret; - mmc->f_max = host->max_clk; + if (!mmc->f_max || mmc->f_max > host->max_clk) + mmc->f_max = host->max_clk; mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 35026795be28..a84aa3f1ae85 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) static int dw_mci_exynos_runtime_resume(struct device *dev) { struct dw_mci *host = dev_get_drvdata(dev); + int ret; + + ret = dw_mci_runtime_resume(dev); + if (ret) + return ret; dw_mci_exynos_config_smu(host); - return dw_mci_runtime_resume(dev); + + return ret; } /** @@ -487,6 +493,7 @@ static unsigned long exynos_dwmmc_caps[4] = { static const struct dw_mci_drv_data exynos_drv_data = { .caps = exynos_dwmmc_caps, + .num_caps = ARRAY_SIZE(exynos_dwmmc_caps), .init = dw_mci_exynos_priv_init, .set_ios = dw_mci_exynos_set_ios, .parse_dt = dw_mci_exynos_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 64cda84b2302..864e7fcaffaf 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) if (priv->ctrl_id < 0) priv->ctrl_id = 0; + if (priv->ctrl_id >= TIMING_MODE) + return -EINVAL; + host->priv = priv; return 0; } @@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode) static const struct dw_mci_drv_data hi6220_data = { .caps = dw_mci_hi6220_caps, + .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps), .switch_voltage = dw_mci_hi6220_switch_voltage, .set_ios = dw_mci_hi6220_set_ios, .parse_dt = dw_mci_hi6220_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index a3f1c2b30145..339295212935 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = { static const struct dw_mci_drv_data rk3288_drv_data = { .caps = dw_mci_rk3288_dwmmc_caps, + .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps), .set_ios = dw_mci_rk3288_set_ios, .execute_tuning = dw_mci_rk3288_execute_tuning, .parse_dt = dw_mci_rk3288_parse_dt, diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c index d38e94ae2b85..c06b5393312f 100644 --- a/drivers/mmc/host/dw_mmc-zx.c +++ b/drivers/mmc/host/dw_mmc-zx.c @@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = { static const struct dw_mci_drv_data zx_drv_data = { .caps = zx_dwmmc_caps, + .num_caps = ARRAY_SIZE(zx_dwmmc_caps), .execute_tuning = dw_mci_zx_execute_tuning, .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, .parse_dt = dw_mci_zx_parse_dt, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 4f2806720c5c..5252885e5cda 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) { struct dw_mci *host = s->private; + pm_runtime_get_sync(host->dev); + seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); @@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); + pm_runtime_put_autosuspend(host->dev); + return 0; } @@ -409,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host) cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; if (cto_div == 0) cto_div = 1; - cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); + + cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, + host->bus_hz); /* add a bit spare time */ cto_ms += 10; @@ -558,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) (sizeof(struct idmac_desc_64addr) * (i + 1))) >> 32; /* Initialize reserved and buffer size fields to "0" */ + p->des0 = 0; p->des1 = 0; p->des2 = 0; p->des3 = 0; @@ -580,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) i++, p++) { p->des3 = cpu_to_le32(host->sg_dma + (sizeof(struct idmac_desc) * (i + 1))); + p->des0 = 0; p->des1 = 0; } @@ -1081,8 +1089,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) * It's used when HS400 mode is enabled. */ if (data->flags & MMC_DATA_WRITE && - !(host->timing != MMC_TIMING_MMC_HS400)) - return; + host->timing != MMC_TIMING_MMC_HS400) + goto disable; if (data->flags & MMC_DATA_WRITE) enable = SDMMC_CARD_WR_THR_EN; @@ -1090,7 +1098,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) enable = SDMMC_CARD_RD_THR_EN; if (host->timing != MMC_TIMING_MMC_HS200 && - host->timing != MMC_TIMING_UHS_SDR104) + host->timing != MMC_TIMING_UHS_SDR104 && + host->timing != MMC_TIMING_MMC_HS400) goto disable; blksz_depth = blksz / (1 << host->data_shift); @@ -1246,6 +1255,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) if (host->state == STATE_WAITING_CMD11_DONE) sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; + slot->mmc->actual_clock = 0; + if (!clock) { mci_writel(host, CLKENA, 0); mci_send_cmd(slot, sdmmc_cmd_bits, 0); @@ -1304,6 +1315,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) /* keep the last clock value that was requested from core */ slot->__clk_old = clock; + slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : + host->bus_hz; } host->current_speed = clock; @@ -1795,8 +1808,8 @@ static bool dw_mci_reset(struct dw_mci *host) } if (host->use_dma == TRANS_MODE_IDMAC) - /* It is also recommended that we reset and reprogram idmac */ - dw_mci_idmac_reset(host); + /* It is also required that we reinit idmac */ + dw_mci_idmac_init(host); ret = true; @@ -1943,8 +1956,9 @@ static void dw_mci_set_drto(struct dw_mci *host) drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; if (drto_div == 0) drto_div = 1; - drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, - host->bus_hz); + + drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, + host->bus_hz); /* add a bit spare time */ drto_ms += 10; @@ -2758,12 +2772,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + struct mmc_host *mmc = slot->mmc; + int ctrl_id; + + if (host->pdata->caps) + mmc->caps = host->pdata->caps; + + /* + * Support MMC_CAP_ERASE by default. + * It needs to use trim/discard/erase commands. + */ + mmc->caps |= MMC_CAP_ERASE; + + if (host->pdata->pm_caps) + mmc->pm_caps = host->pdata->pm_caps; + + if (host->dev->of_node) { + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (ctrl_id < 0) + ctrl_id = 0; + } else { + ctrl_id = to_platform_device(host->dev)->id; + } + + if (drv_data && drv_data->caps) { + if (ctrl_id >= drv_data->num_caps) { + dev_err(host->dev, "invalid controller id %d\n", + ctrl_id); + return -EINVAL; + } + mmc->caps |= drv_data->caps[ctrl_id]; + } + + if (host->pdata->caps2) + mmc->caps2 = host->pdata->caps2; + + /* Process SDIO IRQs through the sdio_irq_work. */ + if (mmc->caps & MMC_CAP_SDIO_IRQ) + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + + return 0; +} + static int dw_mci_init_slot(struct dw_mci *host) { struct mmc_host *mmc; struct dw_mci_slot *slot; - const struct dw_mci_drv_data *drv_data = host->drv_data; - int ctrl_id, ret; + int ret; u32 freq[2]; mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); @@ -2797,38 +2856,13 @@ static int dw_mci_init_slot(struct dw_mci *host) if (!mmc->ocr_avail) mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - if (host->pdata->caps) - mmc->caps = host->pdata->caps; - - /* - * Support MMC_CAP_ERASE by default. - * It needs to use trim/discard/erase commands. - */ - mmc->caps |= MMC_CAP_ERASE; - - if (host->pdata->pm_caps) - mmc->pm_caps = host->pdata->pm_caps; - - if (host->dev->of_node) { - ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); - if (ctrl_id < 0) - ctrl_id = 0; - } else { - ctrl_id = to_platform_device(host->dev)->id; - } - if (drv_data && drv_data->caps) - mmc->caps |= drv_data->caps[ctrl_id]; - - if (host->pdata->caps2) - mmc->caps2 = host->pdata->caps2; - ret = mmc_of_parse(mmc); if (ret) goto err_host_allocated; - /* Process SDIO IRQs through the sdio_irq_work. */ - if (mmc->caps & MMC_CAP_SDIO_IRQ) - mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + ret = dw_mci_init_slot_caps(slot); + if (ret) + goto err_host_allocated; /* Useful defaults if platform data is unset. */ if (host->use_dma == TRANS_MODE_IDMAC) { diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 34474ad731aa..044c87ce6725 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -542,6 +542,7 @@ struct dw_mci_slot { /** * dw_mci driver data - dw-mshc implementation specific driver data. * @caps: mmc subsystem specified capabilities of the controller(s). + * @num_caps: number of capabilities specified by @caps. * @init: early implementation specific initialization. * @set_ios: handle bus specific extensions. * @parse_dt: parse implementation specific device tree properties. @@ -553,6 +554,7 @@ struct dw_mci_slot { */ struct dw_mci_drv_data { unsigned long *caps; + u32 num_caps; int (*init)(struct dw_mci *host); void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); int (*parse_dt)(struct dw_mci *host); diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 7db8c7a8d38d..48b67f552afe 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -362,9 +362,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, host->irq_mask &= ~irq; else host->irq_mask |= irq; - spin_unlock_irqrestore(&host->lock, flags); writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); + spin_unlock_irqrestore(&host->lock, flags); } static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 85745ef179e2..08a55c2e96e1 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -716,22 +716,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); - int ret; - - /* - * If this is the initial tuning, try to get a sane Rx starting - * phase before doing the actual tuning. - */ - if (!mmc->doing_retune) { - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); - - if (ret) - return ret; - } - - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); - if (ret) - return ret; return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } @@ -762,9 +746,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); - /* Reset phases */ + /* Reset rx phase */ clk_set_phase(host->rx_clk, 0); - clk_set_phase(host->tx_clk, 270); break; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fcf7235d5742..157e1d9e7725 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(renesas_sdhi_remove); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 85140c9af581..c81de2f25281 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -305,6 +305,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) if (imx_data->socdata->flags & ESDHC_FLAG_HS400) val |= SDHCI_SUPPORT_HS400; + + /* + * Do not advertise faster UHS modes if there are no + * pinctrl states for 100MHz/200MHz. + */ + if (IS_ERR_OR_NULL(imx_data->pins_100mhz) || + IS_ERR_OR_NULL(imx_data->pins_200mhz)) + val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50 + | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400); } } @@ -687,6 +696,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, return; } + /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */ + if (is_imx53_esdhc(imx_data)) { + /* + * According to the i.MX53 reference manual, if DLLCTRL[10] can + * be set, then the controller is eSDHCv3, else it is eSDHCv2. + */ + val = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL); + temp = readl(host->ioaddr + ESDHC_DLL_CTRL); + writel(val, host->ioaddr + ESDHC_DLL_CTRL); + if (temp & BIT(10)) + pre_div = 2; + } + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); @@ -1121,18 +1144,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_200MHZ); - if (IS_ERR(imx_data->pins_100mhz) || - IS_ERR(imx_data->pins_200mhz)) { - dev_warn(mmc_dev(host->mmc), - "could not get ultra high speed state, work on normal mode\n"); - /* - * fall back to not supporting uhs by specifying no - * 1.8v quirk - */ - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } - } else { - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; } /* call to generic mmc_of_parse to support additional capabilities */ diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 61666d269771..0cfbdb3ab68a 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -33,6 +33,8 @@ struct sdhci_iproc_host { const struct sdhci_iproc_data *data; u32 shadow_cmd; u32 shadow_blk; + bool is_cmd_shadowed; + bool is_blk_shadowed; }; #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) @@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg) static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) { - u32 val = sdhci_iproc_readl(host, (reg & ~3)); - u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host); + u32 val; + u16 word; + + if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) { + /* Get the saved transfer mode */ + val = iproc_host->shadow_cmd; + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && + iproc_host->is_blk_shadowed) { + /* Get the saved block info */ + val = iproc_host->shadow_blk; + } else { + val = sdhci_iproc_readl(host, (reg & ~3)); + } + word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; return word; } @@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) if (reg == SDHCI_COMMAND) { /* Write the block now as we are issuing a command */ - if (iproc_host->shadow_blk != 0) { + if (iproc_host->is_blk_shadowed) { sdhci_iproc_writel(host, iproc_host->shadow_blk, SDHCI_BLOCK_SIZE); - iproc_host->shadow_blk = 0; + iproc_host->is_blk_shadowed = false; } oldval = iproc_host->shadow_cmd; - } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { + iproc_host->is_cmd_shadowed = false; + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && + iproc_host->is_blk_shadowed) { /* Block size and count are stored in shadow reg */ oldval = iproc_host->shadow_blk; } else { @@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) if (reg == SDHCI_TRANSFER_MODE) { /* Save the transfer mode until the command is issued */ iproc_host->shadow_cmd = newval; + iproc_host->is_cmd_shadowed = true; } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { /* Save the block info until the command is issued */ iproc_host->shadow_blk = newval; + iproc_host->is_blk_shadowed = true; } else { /* Command or other regular 32-bit write */ sdhci_iproc_writel(host, newval, reg & ~3); @@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, - .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, .ops = &sdhci_iproc_32only_ops, }; @@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = { .caps1 = SDHCI_DRIVER_TYPE_C | SDHCI_DRIVER_TYPE_D | SDHCI_SUPPORT_DDR50, - .mmc_caps = MMC_CAP_1_8V_DDR, }; static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index fc73e56eb1e2..92c483ec6cb2 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1251,6 +1251,21 @@ static int sdhci_msm_probe(struct platform_device *pdev) CORE_VENDOR_SPEC_CAPABILITIES0); } + /* + * Power on reset state may trigger power irq if previous status of + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq + * interrupt in GIC, any pending power irq interrupt should be + * acknowledged. Otherwise power irq interrupt handler would be + * fired prematurely. + */ + sdhci_msm_voltage_switch(host); + + /* + * Ensure that above writes are propogated before interrupt enablement + * in GIC. + */ + mb(); + /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { @@ -1260,6 +1275,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + /* Enable pwr irq interrupts */ + writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d96a057a7db8..4ffa6b173a21 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) return clock / 256 / 16; } +static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +{ + u32 val; + ktime_t timeout; + + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + + if (enable) + val |= ESDHC_CLOCK_SDCLKEN; + else + val &= ~ESDHC_CLOCK_SDCLKEN; + + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + val = ESDHC_CLOCK_STABLE; + while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { + if (ktime_after(ktime_get(), timeout)) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + udelay(10); + } +} + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - if (clock == 0) + if (clock == 0) { + esdhc_clock_enable(host, false); return; + } /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ if (esdhc->vendor_ver < VENDOR_V_23) @@ -558,39 +587,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) sdhci_writel(host, ctrl, ESDHC_PROCTL); } -static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +static void esdhc_reset(struct sdhci_host *host, u8 mask) { u32 val; - ktime_t timeout; - - val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - if (enable) - val |= ESDHC_CLOCK_SDCLKEN; - else - val &= ~ESDHC_CLOCK_SDCLKEN; - - sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { - if (ktime_after(ktime_get(), timeout)) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - break; - } - udelay(10); - } -} - -static void esdhc_reset(struct sdhci_host *host, u8 mask) -{ sdhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + if (mask & SDHCI_RESET_ALL) { + val = sdhci_readl(host, ESDHC_TBCTL); + val &= ~ESDHC_TB_EN; + sdhci_writel(host, val, ESDHC_TBCTL); + } } /* The SCFG, Supplemental Configuration Unit, provides SoC specific diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 67d787fa3306..30e9c2405070 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -594,9 +594,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot) slot->chip->rpm_retune = intel_host->d3_retune; } -static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + int err = sdhci_execute_tuning(mmc, opcode); + struct sdhci_host *host = mmc_priv(mmc); + + if (err) + return err; + + /* + * Tuning can leave the IP in an active state (Buffer Read Enable bit + * set) which prevents the entry to low power states (i.e. S0i3). Data + * reset will clear it. + */ + sdhci_reset(host, SDHCI_RESET_DATA); + + return 0; +} + +static void byt_probe_slot(struct sdhci_pci_slot *slot) { + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + byt_read_dsm(slot); + + ops->execute_tuning = intel_execute_tuning; +} + +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + byt_probe_slot(slot); slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | MMC_CAP_CMD_DURING_TFR | @@ -606,6 +633,9 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ slot->host->mmc_host_ops.select_drive_strength = intel_select_drive_strength; + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_EMMC) { + slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO | MMC_CAP2_NO_SD; + } return 0; } @@ -651,7 +681,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { int err; - byt_read_dsm(slot); + byt_probe_slot(slot); err = ni_set_max_freq(slot); if (err) @@ -664,7 +694,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) { - byt_read_dsm(slot); + byt_probe_slot(slot); slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | MMC_CAP_WAIT_WHILE_BUSY; return 0; @@ -672,7 +702,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) { - byt_read_dsm(slot); + byt_probe_slot(slot); slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; slot->cd_idx = 0; @@ -779,6 +809,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; break; case INTEL_MRFLD_SDIO: + /* Advertise 2.0v for compatibility with the SDIO card's OCR */ + slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD; break; @@ -1163,7 +1195,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev) pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val); } -static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) +static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode) { struct sdhci_pci_slot *slot = sdhci_priv(host); struct pci_dev *pdev = slot->chip->pdev; @@ -1202,6 +1234,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode) return 0; } +static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* AMD requires custom HS200 tuning */ + if (host->timing == MMC_TIMING_MMC_HS200) + return amd_execute_tuning_hs200(host, opcode); + + /* Otherwise perform standard SDHCI tuning */ + return sdhci_execute_tuning(mmc, opcode); +} + +static int amd_probe_slot(struct sdhci_pci_slot *slot) +{ + struct mmc_host_ops *ops = &slot->host->mmc_host_ops; + + ops->execute_tuning = amd_execute_tuning; + + return 0; +} + static int amd_probe(struct sdhci_pci_chip *chip) { struct pci_dev *smbus_dev; @@ -1236,12 +1289,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = { .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, - .platform_execute_tuning = amd_execute_tuning, }; static const struct sdhci_pci_fixes sdhci_amd = { .probe = amd_probe, .ops = &amd_sdhci_pci_ops, + .probe_slot = amd_probe_slot, }; static const struct pci_device_id pci_ids[] = { diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 0842bbc2d7ad..4d0791f6ec23 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode, mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); } +static void xenon_voltage_switch(struct sdhci_host *host) +{ + /* Wait for 5ms after set 1.8V signal enable bit */ + usleep_range(5000, 5500); +} + static const struct sdhci_ops sdhci_xenon_ops = { + .voltage_switch = xenon_voltage_switch, .set_clock = sdhci_set_clock, .set_power = xenon_set_power, .set_bus_width = sdhci_set_bus_width, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0d5fcca18c9e..d35deb79965d 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include #include @@ -501,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host, if (data->host_cookie == COOKIE_PRE_MAPPED) return data->sg_count; - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + /* Bounce write requests to the bounce buffer */ + if (host->bounce_buffer) { + unsigned int length = data->blksz * data->blocks; + + if (length > host->bounce_buffer_size) { + pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", + mmc_hostname(host->mmc), length, + host->bounce_buffer_size); + return -EIO; + } + if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { + /* Copy the data to the bounce buffer */ + sg_copy_to_buffer(data->sg, data->sg_len, + host->bounce_buffer, + length); + } + /* Switch ownership to the DMA */ + dma_sync_single_for_device(host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + /* Just a dummy value */ + sg_count = 1; + } else { + /* Just access the data directly from memory */ + sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } if (sg_count == 0) return -ENOSPC; @@ -672,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host, } } +static u32 sdhci_sdma_address(struct sdhci_host *host) +{ + if (host->bounce_buffer) + return host->bounce_addr; + else + return sg_dma_address(host->data->sg); +} + static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; @@ -857,8 +894,8 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) SDHCI_ADMA_ADDRESS_HI); } else { WARN_ON(sg_cnt != 1); - sdhci_writel(host, sg_dma_address(data->sg), - SDHCI_DMA_ADDRESS); + sdhci_writel(host, sdhci_sdma_address(host), + SDHCI_DMA_ADDRESS); } } @@ -1433,6 +1470,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, if (mode != MMC_POWER_OFF) { switch (1 << vdd) { case MMC_VDD_165_195: + /* + * Without a regulator, SDHCI does not support 2.0v + * so we only get here if the driver deliberately + * added the 2.0v range to ocr_avail. Map it to 1.8v + * for the purpose of turning on the power. + */ + case MMC_VDD_20_21: pwr = SDHCI_POWER_180; break; case MMC_VDD_29_30: @@ -2247,7 +2291,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) mrq->data->host_cookie = COOKIE_UNMAPPED; - if (host->flags & SDHCI_REQ_USE_DMA) + /* + * No pre-mapping in the pre hook if we're using the bounce buffer, + * for that we would need two bounce buffers since one buffer is + * in flight when this is getting called. + */ + if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); } @@ -2351,8 +2400,45 @@ static bool sdhci_request_done(struct sdhci_host *host) struct mmc_data *data = mrq->data; if (data && data->host_cookie == COOKIE_MAPPED) { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + if (host->bounce_buffer) { + /* + * On reads, copy the bounced data into the + * sglist + */ + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + unsigned int length = data->bytes_xfered; + + if (length > host->bounce_buffer_size) { + pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", + mmc_hostname(host->mmc), + host->bounce_buffer_size, + data->bytes_xfered); + /* Cap it down and continue */ + length = host->bounce_buffer_size; + } + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + DMA_FROM_DEVICE); + sg_copy_from_buffer(data->sg, + data->sg_len, + host->bounce_buffer, + length); + } else { + /* No copying, just switch ownership */ + dma_sync_single_for_cpu( + host->mmc->parent, + host->bounce_addr, + host->bounce_buffer_size, + mmc_get_dma_dir(data)); + } + } else { + /* Unmap the raw data */ + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + } data->host_cookie = COOKIE_UNMAPPED; } } @@ -2635,7 +2721,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) */ if (intmask & SDHCI_INT_DMA_END) { u32 dmastart, dmanow; - dmastart = sg_dma_address(host->data->sg); + + dmastart = sdhci_sdma_address(host); dmanow = dmastart + host->data->bytes_xfered; /* * Force update to the next DMA block boundary. @@ -3216,6 +3303,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) } EXPORT_SYMBOL_GPL(__sdhci_read_caps); +static int sdhci_allocate_bounce_buffer(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + unsigned int max_blocks; + unsigned int bounce_size; + int ret; + + /* + * Cap the bounce buffer at 64KB. Using a bigger bounce buffer + * has diminishing returns, this is probably because SD/MMC + * cards are usually optimized to handle this size of requests. + */ + bounce_size = SZ_64K; + /* + * Adjust downwards to maximum request size if this is less + * than our segment size, else hammer down the maximum + * request size to the maximum buffer size. + */ + if (mmc->max_req_size < bounce_size) + bounce_size = mmc->max_req_size; + max_blocks = bounce_size / 512; + + /* + * When we just support one segment, we can get significant + * speedups by the help of a bounce buffer to group scattered + * reads/writes together. + */ + host->bounce_buffer = devm_kmalloc(mmc->parent, + bounce_size, + GFP_KERNEL); + if (!host->bounce_buffer) { + pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", + mmc_hostname(mmc), + bounce_size); + /* + * Exiting with zero here makes sure we proceed with + * mmc->max_segs == 1. + */ + return 0; + } + + host->bounce_addr = dma_map_single(mmc->parent, + host->bounce_buffer, + bounce_size, + DMA_BIDIRECTIONAL); + ret = dma_mapping_error(mmc->parent, host->bounce_addr); + if (ret) + /* Again fall back to max_segs == 1 */ + return 0; + host->bounce_buffer_size = bounce_size; + + /* Lie about this since we're bouncing */ + mmc->max_segs = max_blocks; + mmc->max_seg_size = bounce_size; + mmc->max_req_size = bounce_size; + + pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", + mmc_hostname(mmc), max_blocks, bounce_size); + + return 0; +} + int sdhci_setup_host(struct sdhci_host *host) { struct mmc_host *mmc; @@ -3650,23 +3799,30 @@ int sdhci_setup_host(struct sdhci_host *host) spin_lock_init(&host->lock); + /* + * Maximum number of sectors in one transfer. Limited by SDMA boundary + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this + * is less anyway. + */ + mmc->max_req_size = 524288; + /* * Maximum number of segments. Depends on if the hardware * can do scatter/gather or not. */ - if (host->flags & SDHCI_USE_ADMA) + if (host->flags & SDHCI_USE_ADMA) { mmc->max_segs = SDHCI_MAX_SEGS; - else if (host->flags & SDHCI_USE_SDMA) + } else if (host->flags & SDHCI_USE_SDMA) { mmc->max_segs = 1; - else /* PIO */ + if (swiotlb_max_segment()) { + unsigned int max_req_size = (1 << IO_TLB_SHIFT) * + IO_TLB_SEGSIZE; + mmc->max_req_size = min(mmc->max_req_size, + max_req_size); + } + } else { /* PIO */ mmc->max_segs = SDHCI_MAX_SEGS; - - /* - * Maximum number of sectors in one transfer. Limited by SDMA boundary - * size (512KiB). Note some tuning modes impose a 4MiB limit, but this - * is less anyway. - */ - mmc->max_req_size = 524288; + } /* * Maximum segment size. Could be one segment with the maximum number @@ -3705,6 +3861,13 @@ int sdhci_setup_host(struct sdhci_host *host) */ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; + if (mmc->max_segs == 1) { + /* This may alter mmc->*_blk_* parameters */ + ret = sdhci_allocate_bounce_buffer(host); + if (ret) + return ret; + } + return 0; unreg: diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 54bc444c317f..1d7d61e25dbf 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -440,6 +440,9 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + char *bounce_buffer; /* For packing SDMA reads/writes */ + dma_addr_t bounce_addr; + unsigned int bounce_buffer_size; const struct sdhci_ops *ops; /* Low level hw interface */ diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 9c4e6199b854..de1562f27fdb 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -911,7 +911,7 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) host->check_scc_error(host); /* If SET_BLOCK_COUNT, continue with main command */ - if (host->mrq) { + if (host->mrq && !mrq->cmd->error) { tmio_process_mrq(host, mrq); return; } @@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) { struct tmio_mmc_data *pdata = host->pdata; struct mmc_host *mmc = host->mmc; + int err; - mmc_regulator_get_supply(mmc); + err = mmc_regulator_get_supply(mmc); + if (err) + return err; /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 5e1b68cbcd0a..e1b603ca0170 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -45,6 +45,7 @@ #define I82802AB 0x00ad #define I82802AC 0x00ac #define PF38F4476 0x881c +#define M28F00AP30 0x8963 /* STMicroelectronics chips */ #define M50LPW080 0x002F #define M50FLW080A 0x0080 @@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi, extp->MinorVersion = '1'; } +static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip) +{ + /* + * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t + * Erase Supend for their small Erase Blocks(0x8000) + */ + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30) + return 1; + return 0; +} + static inline struct cfi_pri_intelext * read_pri_intelext(struct map_info *map, __u16 adr) { @@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) goto sleep; + /* Do not allow suspend iff read/write to EB address */ + if ((adr & chip->in_progress_block_mask) == + chip->in_progress_block_addr) + goto sleep; + + /* do not suspend small EBs, buggy Micron Chips */ + if (cfi_is_micron_28F00AP30(cfi, chip) && + (chip->in_progress_block_mask == ~(0x8000-1))) + goto sleep; /* Erase suspend */ - map_write(map, CMD(0xB0), adr); + map_write(map, CMD(0xB0), chip->in_progress_block_addr); /* If the flash has finished erasing, then 'erase suspend' * appears to make some (28F320) flash devices switch to * 'read' mode. Make sure that we switch to 'read status' * mode so we get the right data. --rmk */ - map_write(map, CMD(0x70), adr); + map_write(map, CMD(0x70), chip->in_progress_block_addr); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { - status = map_read(map, adr); + status = map_read(map, chip->in_progress_block_addr); if (map_word_andequal(map, status, status_OK, status_OK)) break; @@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad sending the 0x70 (Read Status) command to an erasing chip and expecting it to be ignored, that's what we do. */ - map_write(map, CMD(0xd0), adr); - map_write(map, CMD(0x70), adr); + map_write(map, CMD(0xd0), chip->in_progress_block_addr); + map_write(map, CMD(0x70), chip->in_progress_block_addr); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; @@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, map_write(map, CMD(0xD0), adr); chip->state = FL_ERASING; chip->erase_suspended = 0; + chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(len - 1); ret = INVAL_CACHE_AND_WAIT(map, chip, adr, adr, len, diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 56aa6b75213d..af3d207c9cc4 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -42,7 +42,7 @@ #define AMD_BOOTLOC_BUG #define FORCE_WORD_WRITE 0 -#define MAX_WORD_RETRIES 3 +#define MAX_RETRIES 3 #define SST49LF004B 0x0060 #define SST49LF040B 0x0050 @@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) goto sleep; - /* We could check to see if we're trying to access the sector - * that is currently being erased. However, no user will try - * anything like that so we just wait for the timeout. */ + /* Do not allow suspend iff read/write to EB address */ + if ((adr & chip->in_progress_block_mask) == + chip->in_progress_block_addr) + goto sleep; /* Erase suspend */ /* It's harmless to issue the Erase-Suspend and Erase-Resume @@ -1646,7 +1647,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - if (++retry_cnt <= MAX_WORD_RETRIES) + if (++retry_cnt <= MAX_RETRIES) goto retry; ret = -EIO; @@ -1879,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, if (time_after(jiffies, timeo) && !chip_ready(map, adr)) break; - if (chip_ready(map, adr)) { + if (chip_good(map, adr, datum)) { xip_enable(map, chip, adr); goto op_done; } @@ -2105,7 +2106,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ - if (++retry_cnt <= MAX_WORD_RETRIES) + if (++retry_cnt <= MAX_RETRIES) goto retry; ret = -EIO; @@ -2240,6 +2241,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) unsigned long int adr; DECLARE_WAITQUEUE(wait, current); int ret = 0; + int retry_cnt = 0; adr = cfi->addr_unlock1; @@ -2257,6 +2259,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) ENABLE_VPP(map); xip_disable(map, chip, adr); + retry: cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); @@ -2267,6 +2270,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(map->size - 1); INVALIDATE_CACHE_UDELAY(map, chip, adr, map->size, @@ -2292,12 +2296,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->erase_suspended = 0; } - if (chip_ready(map, adr)) + if (chip_good(map, adr, map_word_ff(map))) break; if (time_after(jiffies, timeo)) { printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); + ret = -EIO; break; } @@ -2305,12 +2310,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) UDELAY(map, chip, adr, 1000000/HZ); } /* Did we succeed? */ - if (!chip_good(map, adr, map_word_ff(map))) { + if (ret) { /* reset on all failures. */ map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - ret = -EIO; + if (++retry_cnt <= MAX_RETRIES) { + ret = 0; + goto retry; + } } chip->state = FL_READY; @@ -2329,6 +2337,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long timeo = jiffies + HZ; DECLARE_WAITQUEUE(wait, current); int ret = 0; + int retry_cnt = 0; adr += chip->start; @@ -2346,6 +2355,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, ENABLE_VPP(map); xip_disable(map, chip, adr); + retry: cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); @@ -2356,6 +2366,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->state = FL_ERASING; chip->erase_suspended = 0; chip->in_progress_block_addr = adr; + chip->in_progress_block_mask = ~(len - 1); INVALIDATE_CACHE_UDELAY(map, chip, adr, len, @@ -2381,7 +2392,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->erase_suspended = 0; } - if (chip_ready(map, adr)) { + if (chip_good(map, adr, map_word_ff(map))) { xip_enable(map, chip, adr); break; } @@ -2390,6 +2401,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__ ); + ret = -EIO; break; } @@ -2397,12 +2409,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, UDELAY(map, chip, adr, 1000000/HZ); } /* Did we succeed? */ - if (!chip_good(map, adr, map_word_ff(map))) { + if (ret) { /* reset on all failures. */ map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - ret = -EIO; + if (++retry_cnt <= MAX_RETRIES) { + ret = 0; + goto retry; + } } chip->state = FL_READY; @@ -2532,7 +2547,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) struct ppb_lock { struct flchip *chip; - loff_t offset; + unsigned long adr; int locked; }; @@ -2550,8 +2565,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, unsigned long timeo; int ret; + adr += chip->start; mutex_lock(&chip->mutex); - ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; @@ -2569,8 +2585,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { chip->state = FL_LOCKING; - map_write(map, CMD(0xA0), chip->start + adr); - map_write(map, CMD(0x00), chip->start + adr); + map_write(map, CMD(0xA0), adr); + map_write(map, CMD(0x00), adr); } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { /* * Unlocking of one specific sector is not supported, so we @@ -2608,7 +2624,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, map_write(map, CMD(0x00), chip->start); chip->state = FL_READY; - put_chip(map, chip, adr + chip->start); + put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; @@ -2665,9 +2681,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, * sectors shall be unlocked, so lets keep their locking * status at "unlocked" (locked=0) for the final re-locking. */ - if ((adr < ofs) || (adr >= (ofs + len))) { + if ((offset < ofs) || (offset >= (ofs + len))) { sect[sectors].chip = &cfi->chips[chipnum]; - sect[sectors].offset = offset; + sect[sectors].adr = adr; sect[sectors].locked = do_ppb_xxlock( map, &cfi->chips[chipnum], adr, 0, DO_XXLOCK_ONEBLOCK_GETLOCK); @@ -2681,6 +2697,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, i++; if (adr >> cfi->chipshift) { + if (offset >= (ofs + len)) + break; adr = 0; chipnum++; @@ -2711,7 +2729,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, */ for (i = 0; i < sectors; i++) { if (sect[i].locked) - do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, + do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, DO_XXLOCK_ONEBLOCK_LOCK); } diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 7c0b27d132b1..b479bd81120b 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, do { uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); mask = (1 << (cfi->device_type * 8)) - 1; + if (ofs >= map->size) + return 0; result = map_read(map, base + ofs); bank++; } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 7c887f111a7d..62fd6905c648 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -431,7 +431,7 @@ static int block2mtd_setup2(const char *val) } -static int block2mtd_setup(const char *val, struct kernel_param *kp) +static int block2mtd_setup(const char *val, const struct kernel_param *kp) { #ifdef MODULE return block2mtd_setup2(val); diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 84b16133554b..0806f72102c0 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor) struct dentry *root = floor->dbg.dfs_dir; struct docg3 *docg3 = floor->priv; - if (IS_ERR_OR_NULL(root)) + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + dev_warn(floor->dev.parent, + "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return; + } debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, &flashcontrol_fops); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 8b66e52ca3cc..7287696a21f9 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -266,7 +266,7 @@ static int phram_setup(const char *val) return ret; } -static int phram_param_call(const char *val, struct kernel_param *kp) +static int phram_param_call(const char *val, const struct kernel_param *kp) { #ifdef MODULE return phram_setup(val); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 3568294d4854..b25f444c5914 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -487,7 +487,7 @@ static int shrink_ecclayout(struct mtd_info *mtd, for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { u32 eccpos; - ret = mtd_ooblayout_ecc(mtd, section, &oobregion); + ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); if (ret < 0) { if (ret != -ERANGE) return ret; @@ -534,7 +534,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) for (i = 0; i < ARRAY_SIZE(to->eccpos);) { u32 eccpos; - ret = mtd_ooblayout_ecc(mtd, section, &oobregion); + ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); if (ret < 0) { if (ret != -ERANGE) return ret; diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index f25eca79f4e5..68c9d98a3347 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -2547,6 +2547,7 @@ static struct platform_driver atmel_nand_controller_driver = { .driver = { .name = "atmel-nand-controller", .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), + .pm = &atmel_nand_controller_pm_ops, }, .probe = atmel_nand_controller_probe, .remove = atmel_nand_controller_remove, diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 8268636675ef..4124bf91bee6 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -426,7 +426,7 @@ static int get_strength(struct atmel_pmecc_user *user) static int get_sectorsize(struct atmel_pmecc_user *user) { - return user->cache.cfg & PMECC_LOOKUP_TABLE_SIZE_1024 ? 1024 : 512; + return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512; } static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector) diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index e0eb51d8c012..2a978d9832a7 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -1763,7 +1763,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, err = brcmstb_nand_verify_erased_page(mtd, chip, buf, addr); /* erased page bitflips corrected */ - if (err > 0) + if (err >= 0) return err; } @@ -2193,16 +2193,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) if (ctrl->nand_version >= 0x0702) tmp |= ACC_CONTROL_RD_ERASED; tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; - if (ctrl->features & BRCMNAND_HAS_PREFETCH) { - /* - * FIXME: Flash DMA + prefetch may see spurious erased-page ECC - * errors - */ - if (has_flash_dma(ctrl)) - tmp &= ~ACC_CONTROL_PREFETCH; - else - tmp |= ACC_CONTROL_PREFETCH; - } + if (ctrl->features & BRCMNAND_HAS_PREFETCH) + tmp &= ~ACC_CONTROL_PREFETCH; + nand_writereg(ctrl, offs, tmp); return 0; diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 56e2e177644d..3f4f4aea0e8b 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -122,7 +122,11 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) return ret; - denali->clk_x_rate = clk_get_rate(dt->clk); + /* + * Hardcode the clock rate for the backward compatibility. + * This works for both SOCFPGA and UniPhier. + */ + denali->clk_x_rate = 200000000; ret = denali_init(denali); if (ret) diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index 81370c79aa48..7ad0db65a6fa 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c @@ -124,3 +124,7 @@ static struct pci_driver denali_pci_driver = { }; module_pci_driver(denali_pci_driver); + +MODULE_DESCRIPTION("PCI driver for Denali NAND controller"); +MODULE_AUTHOR("Intel Corporation and its suppliers"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 9e03bac7f34c..16deba1a2385 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) /* returns nonzero if entire page is blank */ static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, - u32 *eccstat, unsigned int bufnum) + u32 eccstat, unsigned int bufnum) { - u32 reg = eccstat[bufnum / 4]; - int errors; - - errors = (reg >> ((3 - bufnum % 4) * 8)) & 15; - - return errors; + return (eccstat >> ((3 - bufnum % 4) * 8)) & 15; } /* @@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 eccstat[4]; + u32 eccstat; int i; /* set the chip select for NAND Transaction */ @@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) if (nctrl->eccread) { int errors; int bufnum = nctrl->page & priv->bufnum_mask; - int sector = bufnum * chip->ecc.steps; - int sector_end = sector + chip->ecc.steps - 1; + int sector_start = bufnum * chip->ecc.steps; + int sector_end = sector_start + chip->ecc.steps - 1; __be32 *eccstat_regs; - if (ctrl->version >= FSL_IFC_VERSION_2_0_0) - eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; - else - eccstat_regs = ifc->ifc_nand.v1_nand_eccstat; + eccstat_regs = ifc->ifc_nand.nand_eccstat; + eccstat = ifc_in32(&eccstat_regs[sector_start / 4]); - for (i = sector / 4; i <= sector_end / 4; i++) - eccstat[i] = ifc_in32(&eccstat_regs[i]); + for (i = sector_start; i <= sector_end; i++) { + if (i != sector_start && !(i % 4)) + eccstat = ifc_in32(&eccstat_regs[i / 4]); - for (i = sector; i <= sector_end; i++) { errors = check_read_ecc(mtd, ctrl, eccstat, i); if (errors == 15) { @@ -349,9 +342,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, case NAND_CMD_READID: case NAND_CMD_PARAM: { + /* + * For READID, read 8 bytes that are currently used. + * For PARAM, read all 3 copies of 256-bytes pages. + */ + int len = 8; int timing = IFC_FIR_OP_RB; - if (command == NAND_CMD_PARAM) + if (command == NAND_CMD_PARAM) { timing = IFC_FIR_OP_RBCD; + len = 256 * 3; + } ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | @@ -361,12 +361,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, &ifc->ifc_nand.nand_fcr0); ifc_out32(column, &ifc->ifc_nand.row3); - /* - * although currently it's 8 bytes for READID, we always read - * the maximum 256 bytes(for PARAM) - */ - ifc_out32(256, &ifc->ifc_nand.nand_fbcr); - ifc_nand_ctrl->read_bytes = 256; + ifc_out32(len, &ifc->ifc_nand.nand_fbcr); + ifc_nand_ctrl->read_bytes = len; set_addr(mtd, 0, 0, 0); fsl_ifc_run_command(mtd); @@ -626,6 +622,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; u32 nand_fsr; + int status; /* Use READ_STATUS command, but wait for the device to be ready */ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | @@ -640,12 +637,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) fsl_ifc_run_command(mtd); nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - + status = nand_fsr >> 24; /* * The chip always seems to report that it is * write-protected, even when it is not. */ - return nand_fsr | NAND_STATUS_WP; + return status | NAND_STATUS_WP; } /* @@ -916,6 +913,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) if (ctrl->version >= FSL_IFC_VERSION_1_1_0) fsl_ifc_sram_init(priv); + /* + * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older + * versions which had 8KB. Hence bufnum mask needs to be updated. + */ + if (ctrl->version >= FSL_IFC_VERSION_2_0_0) + priv->bufnum_mask = (priv->bufnum_mask * 2) + 1; + return 0; } diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 50f8d4a1b983..d4d824ef64e9 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, return ret; } - /* handle the block mark swapping */ - block_mark_swapping(this, payload_virt, auxiliary_virt); - /* Loop over status bytes, accumulating ECC status. */ status = auxiliary_virt + nfc_geo->auxiliary_status_offset; @@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, max_bitflips = max_t(unsigned int, max_bitflips, *status); } + /* handle the block mark swapping */ + block_mark_swapping(this, buf, auxiliary_virt); + if (oob_required) { /* * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 7f3b065b6b8f..c51d214d169e 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c @@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) op = ECC_DECODE; dec = readw(ecc->regs + ECC_DECDONE); if (dec & ecc->sectors) { + /* + * Clear decode IRQ status once again to ensure that + * there will be no extra IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); ecc->sectors = 0; complete(&ecc->done); } else { @@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) } } - writel(0, ecc->regs + ECC_IRQ_REG(op)); - return IRQ_HANDLED; } @@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc) /* disable it */ mtk_ecc_wait_idle(ecc, op); + if (op == ECC_DECODE) + /* + * Clear decode IRQ status in case there is a timeout to wait + * decode IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); writew(0, ecc->regs + ECC_IRQ_REG(op)); writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 53e5e0337c3e..fcb575d55b89 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -48,7 +48,7 @@ #define NFC_V1_V2_CONFIG (host->regs + 0x0a) #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) -#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) +#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10) #define NFC_V1_V2_WRPROT (host->regs + 0x12) #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) @@ -1119,6 +1119,9 @@ static void preset_v2(struct mtd_info *mtd) writew(config1, NFC_V1_V2_CONFIG1); /* preset operation */ + /* spare area size in 16-bit half-words */ + writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA); + /* Unlock the internal RAM Buffer */ writew(0x2, NFC_V1_V2_CONFIG); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 12edaae17d81..d410de331854 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs) for (; page < page_end; page++) { res = chip->ecc.read_oob(mtd, chip, page); - if (res) + if (res < 0) return res; bad = chip->oob_poi[chip->badblockpos]; @@ -710,7 +710,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, chip->cmd_ctrl(mtd, readcmd, ctrl); ctrl &= ~NAND_CTRL_CHANGE; } - chip->cmd_ctrl(mtd, command, ctrl); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, ctrl); /* Address cycle, when necessary */ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; @@ -739,6 +740,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: @@ -832,7 +834,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /* Command latch cycle */ - chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, + NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); if (column != -1 || page_addr != -1) { int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; @@ -868,6 +872,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_CACHEDPROG: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: @@ -1246,6 +1251,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) return 0; } +EXPORT_SYMBOL_GPL(nand_reset); /** * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data @@ -2200,6 +2206,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome); static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { + unsigned int max_bitflips = 0; int page, realpage, chipnr; struct nand_chip *chip = mtd_to_nand(mtd); struct mtd_ecc_stats stats; @@ -2257,6 +2264,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, nand_wait_ready(mtd); } + max_bitflips = max_t(unsigned int, max_bitflips, ret); + readlen -= len; if (!readlen) break; @@ -2282,7 +2291,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, if (mtd->ecc_stats.failed - stats.failed) return -EBADMSG; - return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; + return max_bitflips; } /** @@ -2799,15 +2808,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd_to_nand(mtd); + int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; - /* Wait for the device to get ready */ - panic_nand_wait(mtd, chip, 400); - /* Grab the device */ panic_nand_get_device(chip, mtd, FL_WRITING); + chip->select_chip(mtd, chipnr); + + /* Wait for the device to get ready */ + panic_nand_wait(mtd, chip, 400); + memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 246b4393118e..44322a363ba5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev) struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; - if (!IS_ENABLED(CONFIG_DEBUG_FS)) + /* + * Just skip debugfs initialization when the debugfs directory is + * missing. + */ + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return 0; - - if (IS_ERR_OR_NULL(root)) - return -1; + } dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, root, dev, &dfs_fops); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 54540c8fa1a2..9f98f74ff221 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, 0x97, 0x79, 0xe5, 0x24, 0xb5}; /** - * omap_calculate_ecc_bch - Generate bytes of ECC bytes + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed * @ecc_code: The ecc_code buffer + * @i: The sector number (for a multi sector page) * - * Support calculating of BCH4/8 ecc vectors for the page + * Support calculating of BCH4/8/16 ECC vectors for one sector + * within a page. Sector number is in @i. */ -static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, - const u_char *dat, u_char *ecc_calc) +static int _omap_calculate_ecc_bch(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc, int i) { struct omap_nand_info *info = mtd_to_omap(mtd); int eccbytes = info->nand.ecc.bytes; struct gpmc_nand_regs *gpmc_regs = &info->reg; u8 *ecc_code; - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; + unsigned long bch_val1, bch_val2, bch_val3, bch_val4; u32 val; - int i, j; + int j; + + ecc_code = ecc_calc; + switch (info->ecc_opt) { + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH8_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); + *ecc_code++ = (bch_val4 & 0xFF); + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); + *ecc_code++ = (bch_val3 & 0xFF); + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); + *ecc_code++ = (bch_val2 & 0xFF); + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); + *ecc_code++ = (bch_val1 & 0xFF); + break; + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH4_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); + *ecc_code++ = ((bch_val2 & 0xF) << 4) | + ((bch_val1 >> 28) & 0xF); + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); + *ecc_code++ = ((bch_val1 & 0xF) << 4); + break; + case OMAP_ECC_BCH16_CODE_HW: + val = readl(gpmc_regs->gpmc_bch_result6[i]); + ecc_code[0] = ((val >> 8) & 0xFF); + ecc_code[1] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result5[i]); + ecc_code[2] = ((val >> 24) & 0xFF); + ecc_code[3] = ((val >> 16) & 0xFF); + ecc_code[4] = ((val >> 8) & 0xFF); + ecc_code[5] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result4[i]); + ecc_code[6] = ((val >> 24) & 0xFF); + ecc_code[7] = ((val >> 16) & 0xFF); + ecc_code[8] = ((val >> 8) & 0xFF); + ecc_code[9] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result3[i]); + ecc_code[10] = ((val >> 24) & 0xFF); + ecc_code[11] = ((val >> 16) & 0xFF); + ecc_code[12] = ((val >> 8) & 0xFF); + ecc_code[13] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result2[i]); + ecc_code[14] = ((val >> 24) & 0xFF); + ecc_code[15] = ((val >> 16) & 0xFF); + ecc_code[16] = ((val >> 8) & 0xFF); + ecc_code[17] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result1[i]); + ecc_code[18] = ((val >> 24) & 0xFF); + ecc_code[19] = ((val >> 16) & 0xFF); + ecc_code[20] = ((val >> 8) & 0xFF); + ecc_code[21] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result0[i]); + ecc_code[22] = ((val >> 24) & 0xFF); + ecc_code[23] = ((val >> 16) & 0xFF); + ecc_code[24] = ((val >> 8) & 0xFF); + ecc_code[25] = ((val >> 0) & 0xFF); + break; + default: + return -EINVAL; + } + + /* ECC scheme specific syndrome customizations */ + switch (info->ecc_opt) { + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch4_polynomial[j]; + break; + case OMAP_ECC_BCH4_CODE_HW: + /* Set 8th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch8_polynomial[j]; + break; + case OMAP_ECC_BCH8_CODE_HW: + /* Set 14th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH16_CODE_HW: + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used + * when SW based correction is required as ECC is required for one sector + * at a time. + */ +static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); +} + +/** + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. + */ +static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + struct omap_nand_info *info = mtd_to_omap(mtd); + int eccbytes = info->nand.ecc.bytes; + unsigned long nsectors; + int i, ret; nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; for (i = 0; i < nsectors; i++) { - ecc_code = ecc_calc; - switch (info->ecc_opt) { - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH8_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); - *ecc_code++ = (bch_val4 & 0xFF); - *ecc_code++ = ((bch_val3 >> 24) & 0xFF); - *ecc_code++ = ((bch_val3 >> 16) & 0xFF); - *ecc_code++ = ((bch_val3 >> 8) & 0xFF); - *ecc_code++ = (bch_val3 & 0xFF); - *ecc_code++ = ((bch_val2 >> 24) & 0xFF); - *ecc_code++ = ((bch_val2 >> 16) & 0xFF); - *ecc_code++ = ((bch_val2 >> 8) & 0xFF); - *ecc_code++ = (bch_val2 & 0xFF); - *ecc_code++ = ((bch_val1 >> 24) & 0xFF); - *ecc_code++ = ((bch_val1 >> 16) & 0xFF); - *ecc_code++ = ((bch_val1 >> 8) & 0xFF); - *ecc_code++ = (bch_val1 & 0xFF); - break; - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH4_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - *ecc_code++ = ((bch_val2 >> 12) & 0xFF); - *ecc_code++ = ((bch_val2 >> 4) & 0xFF); - *ecc_code++ = ((bch_val2 & 0xF) << 4) | - ((bch_val1 >> 28) & 0xF); - *ecc_code++ = ((bch_val1 >> 20) & 0xFF); - *ecc_code++ = ((bch_val1 >> 12) & 0xFF); - *ecc_code++ = ((bch_val1 >> 4) & 0xFF); - *ecc_code++ = ((bch_val1 & 0xF) << 4); - break; - case OMAP_ECC_BCH16_CODE_HW: - val = readl(gpmc_regs->gpmc_bch_result6[i]); - ecc_code[0] = ((val >> 8) & 0xFF); - ecc_code[1] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result5[i]); - ecc_code[2] = ((val >> 24) & 0xFF); - ecc_code[3] = ((val >> 16) & 0xFF); - ecc_code[4] = ((val >> 8) & 0xFF); - ecc_code[5] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result4[i]); - ecc_code[6] = ((val >> 24) & 0xFF); - ecc_code[7] = ((val >> 16) & 0xFF); - ecc_code[8] = ((val >> 8) & 0xFF); - ecc_code[9] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result3[i]); - ecc_code[10] = ((val >> 24) & 0xFF); - ecc_code[11] = ((val >> 16) & 0xFF); - ecc_code[12] = ((val >> 8) & 0xFF); - ecc_code[13] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result2[i]); - ecc_code[14] = ((val >> 24) & 0xFF); - ecc_code[15] = ((val >> 16) & 0xFF); - ecc_code[16] = ((val >> 8) & 0xFF); - ecc_code[17] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result1[i]); - ecc_code[18] = ((val >> 24) & 0xFF); - ecc_code[19] = ((val >> 16) & 0xFF); - ecc_code[20] = ((val >> 8) & 0xFF); - ecc_code[21] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result0[i]); - ecc_code[22] = ((val >> 24) & 0xFF); - ecc_code[23] = ((val >> 16) & 0xFF); - ecc_code[24] = ((val >> 8) & 0xFF); - ecc_code[25] = ((val >> 0) & 0xFF); - break; - default: - return -EINVAL; - } - - /* ECC scheme specific syndrome customizations */ - switch (info->ecc_opt) { - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch4_polynomial[j]; - break; - case OMAP_ECC_BCH4_CODE_HW: - /* Set 8th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch8_polynomial[j]; - break; - case OMAP_ECC_BCH8_CODE_HW: - /* Set 14th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH16_CODE_HW: - break; - default: - return -EINVAL; - } + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); + if (ret) + return ret; - ecc_calc += eccbytes; + ecc_calc += eccbytes; } return 0; @@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->write_buf(mtd, buf, mtd->writesize); /* Update ecc vector from GPMC result registers */ - chip->ecc.calculate(mtd, buf, &ecc_calc[0]); + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); @@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, return 0; } +/** + * omap_write_subpage_bch - BCH hardware ECC based subpage write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @offset: column address of subpage within the page + * @data_len: data length + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * OMAP optimized subpage write method. + */ +static int omap_write_subpage_bch(struct mtd_info *mtd, + struct nand_chip *chip, u32 offset, + u32 data_len, const u8 *buf, + int oob_required, int page) +{ + u8 *ecc_calc = chip->buffers->ecccalc; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int ecc_steps = chip->ecc.steps; + u32 start_step = offset / ecc_size; + u32 end_step = (offset + data_len - 1) / ecc_size; + int step, ret = 0; + + /* + * Write entire page at one go as it would be optimal + * as ECC is calculated by hardware. + * ECC is calculated for all subpages but we choose + * only what we want. + */ + + /* Enable GPMC ECC engine */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* Write data */ + chip->write_buf(mtd, buf, mtd->writesize); + + for (step = 0; step < ecc_steps; step++) { + /* mask ECC of un-touched subpages by padding 0xFF */ + if (step < start_step || step > end_step) + memset(ecc_calc, 0xff, ecc_bytes); + else + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); + + if (ret) + return ret; + + buf += ecc_size; + ecc_calc += ecc_bytes; + } + + /* copy calculated ECC for whole page to chip->buffer->oob */ + /* this include masked-value(0xFF) for unwritten subpages */ + ecc_calc = chip->buffers->ecccalc; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* write OOB buffer to NAND device */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + /** * omap_read_page_bch - BCH ecc based page read function for entire page * @mtd: mtd info structure @@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->ecc.total); /* Calculate ecc bytes */ - chip->ecc.calculate(mtd, buf, ecc_calc); + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); @@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 16; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 85cff68643e0..125b744c9c28 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -950,6 +950,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) switch (command) { case NAND_CMD_READ0: + case NAND_CMD_READOOB: case NAND_CMD_PAGEPROG: info->use_ecc = 1; break; diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 3baddfc997d1..b49ca02b399d 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -2544,6 +2544,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc, nand_set_flash_node(chip, dn); mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs); + if (!mtd->name) + return -ENOMEM; + mtd->owner = THIS_MODULE; mtd->dev.parent = dev; diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 82244be3e766..958974821582 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -1853,8 +1853,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, /* Add ECC info retrieval from DT */ for (i = 0; i < ARRAY_SIZE(strengths); i++) { - if (ecc->strength <= strengths[i]) + if (ecc->strength <= strengths[i]) { + /* + * Update ecc->strength value with the actual strength + * that will be used by the ECC engine. + */ + ecc->strength = strengths[i]; break; + } } if (i >= ARRAY_SIZE(strengths)) { diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 766906f03943..ce366816a7ef 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -654,7 +654,7 @@ static int tango_nand_probe(struct platform_device *pdev) writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); - clk = clk_get(&pdev->dev, NULL); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 8037d4b48a05..e2583a539b41 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (mtd->oobsize > 64) mtd->oobsize = 64; - /* - * mtd->ecclayout is not specified here because we're using the - * default large page ECC layout defined in NAND core. - */ + /* Use default large page ECC layout defined in NAND core */ + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); if (chip->ecc.strength == 32) { nfc->ecc_mode = ECC_60_BYTE; chip->ecc.bytes = 60; diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 53c7d8e0327a..8d89204b90d2 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -495,7 +495,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, void __iomem *reg_base = cqspi->iobase; void __iomem *ahb_base = cqspi->ahb_base; unsigned int remaining = n_rx; + unsigned int mod_bytes = n_rx % 4; unsigned int bytes_to_read = 0; + u8 *rxbuf_end = rxbuf + n_rx; int ret = 0; writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); @@ -523,11 +525,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, } while (bytes_to_read != 0) { + unsigned int word_remain = round_down(remaining, 4); + bytes_to_read *= cqspi->fifo_width; bytes_to_read = bytes_to_read > remaining ? remaining : bytes_to_read; - ioread32_rep(ahb_base, rxbuf, - DIV_ROUND_UP(bytes_to_read, 4)); + bytes_to_read = round_down(bytes_to_read, 4); + /* Read 4 byte word chunks then single bytes */ + if (bytes_to_read) { + ioread32_rep(ahb_base, rxbuf, + (bytes_to_read / 4)); + } else if (!word_remain && mod_bytes) { + unsigned int temp = ioread32(ahb_base); + + bytes_to_read = mod_bytes; + memcpy(rxbuf, &temp, min((unsigned int) + (rxbuf_end - rxbuf), + bytes_to_read)); + } rxbuf += bytes_to_read; remaining -= bytes_to_read; bytes_to_read = cqspi_get_rd_sram_level(cqspi); diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 8a596bfeddff..7802ac3ba934 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -422,7 +422,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, if (ret < 0) return ret; - val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; val |= ret << SSFSTS_CTL_COP_SHIFT; val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; val |= SSFSTS_CTL_SCGO; @@ -432,7 +432,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, if (ret) return ret; - status = readl(ispi->base + SSFSTS_CTL); + status = readl(ispi->sregs + SSFSTS_CTL); if (status & SSFSTS_CTL_FCERR) return -EIO; else if (status & SSFSTS_CTL_AEL) diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c index 86c0931543c5..ad6a3e1844cb 100644 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ b/drivers/mtd/spi-nor/stm32-quadspi.c @@ -240,12 +240,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, STM32_QSPI_FIFO_TIMEOUT_US); if (ret) { dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); - break; + return ret; } tx_fifo(buf++, qspi->io_base + QUADSPI_DR); } - return ret; + return 0; } static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c index 1cb3f7758fb6..766b2c385682 100644 --- a/drivers/mtd/tests/oobtest.c +++ b/drivers/mtd/tests/oobtest.c @@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum) ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err || ops.oobretlen != use_len) { pr_err("error: readoob failed at %#llx\n", (long long)addr); @@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum) ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err || ops.oobretlen != mtd->oobavail) { pr_err("error: readoob failed at %#llx\n", (long long)addr); @@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum) /* read entire block's OOB at one go */ err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err || ops.oobretlen != len) { pr_err("error: readoob failed at %#llx\n", (long long)addr); @@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void) pr_info("attempting to start read past end of OOB\n"); pr_info("an error is expected...\n"); err = mtd_read_oob(mtd, addr0, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) { pr_info("error occurred as expected\n"); err = 0; @@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void) pr_info("attempting to read past end of device\n"); pr_info("an error is expected...\n"); err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) { pr_info("error occurred as expected\n"); err = 0; @@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void) pr_info("attempting to read past end of device\n"); pr_info("an error is expected...\n"); err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) { pr_info("error occurred as expected\n"); err = 0; @@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void) ops.datbuf = NULL; ops.oobbuf = readbuf; err = mtd_read_oob(mtd, addr, &ops); + if (mtd_is_bitflip(err)) + err = 0; + if (err) goto out; if (memcmpshow(addr, readbuf, writebuf, diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index b210fdb31c98..d0b63bbf46a7 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -99,6 +99,8 @@ struct ubiblock { /* Linked list of all ubiblock instances */ static LIST_HEAD(ubiblock_devices); +static DEFINE_IDR(ubiblock_minor_idr); +/* Protects ubiblock_devices and ubiblock_minor_idr */ static DEFINE_MUTEX(devices_mutex); static int ubiblock_major; @@ -242,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode) * in any case. */ if (mode & FMODE_WRITE) { - ret = -EPERM; + ret = -EROFS; goto out_unlock; } @@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = { .init_request = ubiblock_init_request, }; -static DEFINE_IDR(ubiblock_minor_idr); - int ubiblock_create(struct ubi_volume_info *vi) { struct ubiblock *dev; @@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi) /* Check that the volume isn't already handled */ mutex_lock(&devices_mutex); if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { - mutex_unlock(&devices_mutex); - return -EEXIST; + ret = -EEXIST; + goto out_unlock; } - mutex_unlock(&devices_mutex); dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL); - if (!dev) - return -ENOMEM; + if (!dev) { + ret = -ENOMEM; + goto out_unlock; + } mutex_init(&dev->dev_mutex); @@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi) goto out_free_queue; } - mutex_lock(&devices_mutex); list_add_tail(&dev->list, &ubiblock_devices); - mutex_unlock(&devices_mutex); /* Must be the last step: anyone can call file ops from now on */ add_disk(dev->gd); dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", dev->ubi_num, dev->vol_id, vi->name); + mutex_unlock(&devices_mutex); return 0; out_free_queue: @@ -457,6 +457,8 @@ int ubiblock_create(struct ubi_volume_info *vi) put_disk(dev->gd); out_free_dev: kfree(dev); +out_unlock: + mutex_unlock(&devices_mutex); return ret; } @@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev) int ubiblock_remove(struct ubi_volume_info *vi) { struct ubiblock *dev; + int ret; mutex_lock(&devices_mutex); dev = find_dev_nolock(vi->ubi_num, vi->vol_id); if (!dev) { - mutex_unlock(&devices_mutex); - return -ENODEV; + ret = -ENODEV; + goto out_unlock; } /* Found a device, let's lock it so we can check if it's busy */ mutex_lock(&dev->dev_mutex); if (dev->refcnt > 0) { - mutex_unlock(&dev->dev_mutex); - mutex_unlock(&devices_mutex); - return -EBUSY; + ret = -EBUSY; + goto out_unlock_dev; } /* Remove from device list */ list_del(&dev->list); - mutex_unlock(&devices_mutex); - ubiblock_cleanup(dev); mutex_unlock(&dev->dev_mutex); + mutex_unlock(&devices_mutex); + kfree(dev); return 0; + +out_unlock_dev: + mutex_unlock(&dev->dev_mutex); +out_unlock: + mutex_unlock(&devices_mutex); + return ret; } static int ubiblock_resize(struct ubi_volume_info *vi) @@ -630,6 +638,7 @@ static void ubiblock_remove_all(void) struct ubiblock *next; struct ubiblock *dev; + mutex_lock(&devices_mutex); list_for_each_entry_safe(dev, next, &ubiblock_devices, list) { /* The module is being forcefully removed */ WARN_ON(dev->desc); @@ -638,6 +647,7 @@ static void ubiblock_remove_all(void) ubiblock_cleanup(dev); kfree(dev); } + mutex_unlock(&devices_mutex); } int __init ubiblock_init(void) diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 842550b5712a..21d316fd516e 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -845,6 +845,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, return -EINVAL; } + /* + * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes. + * MLC NAND is different and needs special care, otherwise UBI or UBIFS + * will die soon and you will lose all your data. + */ + if (mtd->type == MTD_MLCNANDFLASH) { + pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n", + mtd->index); + return -EINVAL; + } + if (ubi_num == UBI_DEV_NUM_AUTO) { /* Search for an empty slot in the @ubi_devices array */ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) @@ -1071,6 +1082,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) if (ubi->bgt_thread) kthread_stop(ubi->bgt_thread); +#ifdef CONFIG_MTD_UBI_FASTMAP + cancel_work_sync(&ubi->fm_work); +#endif ubi_debugfs_exit_dev(ubi); uif_close(ubi); @@ -1334,7 +1348,7 @@ static int bytes_str_to_int(const char *str) * This function returns zero in case of success and a negative error code in * case of error. */ -static int ubi_mtd_param_parse(const char *val, struct kernel_param *kp) +static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp) { int i, len; struct mtd_dev_param *p; diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 388e46be6ad9..d0884bd9d955 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -490,6 +490,82 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, return err; } +#ifdef CONFIG_MTD_UBI_FASTMAP +/** + * check_mapping - check and fixup a mapping + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @pnum: physical eraseblock number + * + * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap + * operations, if such an operation is interrupted the mapping still looks + * good, but upon first read an ECC is reported to the upper layer. + * Normaly during the full-scan at attach time this is fixed, for Fastmap + * we have to deal with it while reading. + * If the PEB behind a LEB shows this symthom we change the mapping to + * %UBI_LEB_UNMAPPED and schedule the PEB for erasure. + * + * Returns 0 on success, negative error code in case of failure. + */ +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + int *pnum) +{ + int err; + struct ubi_vid_io_buf *vidb; + + if (!ubi->fast_attach) + return 0; + + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); + if (!vidb) + return -ENOMEM; + + err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0); + if (err > 0 && err != UBI_IO_BITFLIPS) { + int torture = 0; + + switch (err) { + case UBI_IO_FF: + case UBI_IO_FF_BITFLIPS: + case UBI_IO_BAD_HDR: + case UBI_IO_BAD_HDR_EBADMSG: + break; + default: + ubi_assert(0); + } + + if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS) + torture = 1; + + down_read(&ubi->fm_eba_sem); + vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; + up_read(&ubi->fm_eba_sem); + ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture); + + *pnum = UBI_LEB_UNMAPPED; + } else if (err < 0) { + ubi_err(ubi, "unable to read VID header back from PEB %i: %i", + *pnum, err); + + goto out_free; + } + + err = 0; + +out_free: + ubi_free_vid_buf(vidb); + + return err; +} +#else +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + int *pnum) +{ + return 0; +} +#endif + /** * ubi_eba_read_leb - read data. * @ubi: UBI device description object @@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, return err; pnum = vol->eba_tbl->entries[lnum].pnum; - if (pnum < 0) { + if (pnum >= 0) { + err = check_mapping(ubi, vol, lnum, &pnum); + if (err < 0) + goto out_unlock; + } + + if (pnum == UBI_LEB_UNMAPPED) { /* * The logical eraseblock is not mapped, fill the whole buffer * with 0xFF bytes. The exception is static volumes for which @@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, return err; pnum = vol->eba_tbl->entries[lnum].pnum; + if (pnum >= 0) { + err = check_mapping(ubi, vol, lnum, &pnum); + if (err < 0) + goto out; + } + if (pnum >= 0) { dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", len, offset, vol_id, lnum, pnum); diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 4f0bd6b4422a..69dd21679a30 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) { int i; - flush_work(&ubi->fm_work); return_unused_pool_pebs(ubi, &ubi->fm_pool); return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 85237cf661f9..3fd8d7ff7a02 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->last_eb_bytes = vol->usable_leb_size; } + /* Make volume "available" before it becomes accessible via sysfs */ + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; + ubi->vol_count += 1; + spin_unlock(&ubi->volumes_lock); + /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; @@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) if (err) goto out_sysfs; - spin_lock(&ubi->volumes_lock); - ubi->volumes[vol_id] = vol; - ubi->vol_count += 1; - spin_unlock(&ubi->volumes_lock); - ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); self_check_volumes(ubi); return err; @@ -315,6 +316,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) */ cdev_device_del(&vol->cdev, &vol->dev); out_mapping: + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = NULL; + ubi->vol_count -= 1; + spin_unlock(&ubi->volumes_lock); ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index b5b8cd6f481c..23a6986d512b 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1505,6 +1505,7 @@ int ubi_thread(void *u) } dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); + ubi->thread_enabled = 0; return 0; } @@ -1514,9 +1515,6 @@ int ubi_thread(void *u) */ static void shutdown_work(struct ubi_device *ubi) { -#ifdef CONFIG_MTD_UBI_FASTMAP - flush_work(&ubi->fm_work); -#endif while (!list_empty(&ubi->works)) { struct ubi_work *wrk; @@ -1528,6 +1526,46 @@ static void shutdown_work(struct ubi_device *ubi) } } +/** + * erase_aeb - erase a PEB given in UBI attach info PEB + * @ubi: UBI device description object + * @aeb: UBI attach info PEB + * @sync: If true, erase synchronously. Otherwise schedule for erasure + */ +static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) +{ + struct ubi_wl_entry *e; + int err; + + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->pnum = aeb->pnum; + e->ec = aeb->ec; + ubi->lookuptbl[e->pnum] = e; + + if (sync) { + err = sync_erase(ubi, e, false); + if (err) + goto out_free; + + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } else { + err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false); + if (err) + goto out_free; + } + + return 0; + +out_free: + wl_entry_destroy(ubi, e); + + return err; +} + /** * ubi_wl_init - initialize the WL sub-system using attaching information. * @ubi: UBI device description object @@ -1566,17 +1604,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { cond_resched(); - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) - goto out_free; - - e->pnum = aeb->pnum; - e->ec = aeb->ec; - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { - wl_entry_destroy(ubi, e); + err = erase_aeb(ubi, aeb, false); + if (err) goto out_free; - } found_pebs++; } @@ -1635,6 +1665,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) ubi_assert(!ubi->lookuptbl[e->pnum]); ubi->lookuptbl[e->pnum] = e; } else { + bool sync = false; + /* * Usually old Fastmap PEBs are scheduled for erasure * and we don't have to care about them but if we face @@ -1644,18 +1676,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->lookuptbl[aeb->pnum]) continue; - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) - goto out_free; + /* + * The fastmap update code might not find a free PEB for + * writing the fastmap anchor to and then reuses the + * current fastmap anchor PEB. When this PEB gets erased + * and a power cut happens before it is written again we + * must make sure that the fastmap attach code doesn't + * find any outdated fastmap anchors, hence we erase the + * outdated fastmap anchor PEBs synchronously here. + */ + if (aeb->vol_id == UBI_FM_SB_VOLUME_ID) + sync = true; - e->pnum = aeb->pnum; - e->ec = aeb->ec; - ubi_assert(!ubi->lookuptbl[e->pnum]); - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) { - wl_entry_destroy(ubi, e); + err = erase_aeb(ubi, aeb, sync); + if (err) goto out_free; - } } found_pebs++; diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 2260063b0ea8..6e5cf9d9cd99 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data) return dev->of_node == data; } +/* Note this function returns a reference to the mux_chip dev. */ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) { struct device *dev; @@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) (!args.args_count && (mux_chip->controllers > 1))) { dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", np, args.np); + put_device(&mux_chip->dev); return ERR_PTR(-EINVAL); } @@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) if (controller >= mux_chip->controllers) { dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", np, controller, args.np); + put_device(&mux_chip->dev); return ERR_PTR(-EINVAL); } - get_device(&mux_chip->dev); return &mux_chip->mux[controller]; } EXPORT_SYMBOL_GPL(mux_control_get); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490..60666db31886 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) { int i; - if (!client_info->slave) + if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst)) return; for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { @@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; + netdev_dbg(slave->bond->dev, + "Send learning packet: dev %s mac %pM vlan %d\n", + slave->dev->name, mac_addr, vid); + if (vid) __vlan_hwaccel_put_tag(skb, vlan_proto, vid); @@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) u8 *mac_addr = data->mac_addr; struct bond_vlan_tag *tags; - if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { - if (strict_match && - ether_addr_equal_64bits(mac_addr, - upper->dev_addr)) { + if (is_vlan_dev(upper) && + bond->nest_level == vlan_get_encap_level(upper) - 1) { + if (upper->addr_assign_type == NET_ADDR_STOLEN) { alb_send_lp_vid(slave, mac_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); - } else if (!strict_match) { + } else { alb_send_lp_vid(slave, upper->dev_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2db581131b2..15aedb64a02b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1524,44 +1524,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_close; } - /* If the mode uses primary, then the following is handled by - * bond_change_active_slave(). - */ - if (!bond_uses_primary(bond)) { - /* set promiscuity level to new slave */ - if (bond_dev->flags & IFF_PROMISC) { - res = dev_set_promiscuity(slave_dev, 1); - if (res) - goto err_close; - } - - /* set allmulti level to new slave */ - if (bond_dev->flags & IFF_ALLMULTI) { - res = dev_set_allmulti(slave_dev, 1); - if (res) - goto err_close; - } - - netif_addr_lock_bh(bond_dev); - - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - - netif_addr_unlock_bh(bond_dev); - } - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); - } - res = vlan_vids_add_by_dev(slave_dev, bond_dev); if (res) { netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", slave_dev->name); - goto err_close; + goto err_hwaddr_unsync; } prev_slave = bond_last_slave(bond); @@ -1689,8 +1656,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } /* switch(bond_mode) */ #ifdef CONFIG_NET_POLL_CONTROLLER - slave_dev->npinfo = bond->dev->npinfo; - if (slave_dev->npinfo) { + if (bond->dev->npinfo) { if (slave_enable_netpoll(new_slave)) { netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); res = -EBUSY; @@ -1721,6 +1687,42 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_upper_unlink; } + bond->nest_level = dev_get_nest_level(bond_dev) + 1; + + /* If the mode uses primary, then the following is handled by + * bond_change_active_slave(). + */ + if (!bond_uses_primary(bond)) { + /* set promiscuity level to new slave */ + if (bond_dev->flags & IFF_PROMISC) { + res = dev_set_promiscuity(slave_dev, 1); + if (res) + goto err_sysfs_del; + } + + /* set allmulti level to new slave */ + if (bond_dev->flags & IFF_ALLMULTI) { + res = dev_set_allmulti(slave_dev, 1); + if (res) { + if (bond_dev->flags & IFF_PROMISC) + dev_set_promiscuity(slave_dev, -1); + goto err_sysfs_del; + } + } + + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + /* add lacpdu mc addr to mc list */ + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + + dev_mc_add(slave_dev, lacpdu_multicast); + } + } + bond->slave_cnt++; bond_compute_features(bond); bond_set_carrier(bond); @@ -1734,6 +1736,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_mode_uses_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); + netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, bond_is_active_slave(new_slave) ? "an active" : "a backup", @@ -1744,6 +1747,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ +err_sysfs_del: + bond_sysfs_slave_del(new_slave); + err_upper_unlink: bond_upper_dev_unlink(bond, new_slave); @@ -1751,9 +1757,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) netdev_rx_handler_unregister(slave_dev); err_detach: - if (!bond_uses_primary(bond)) - bond_hw_addr_flush(bond_dev, slave_dev); - vlan_vids_del_by_dev(slave_dev, bond_dev); if (rcu_access_pointer(bond->primary_slave) == new_slave) RCU_INIT_POINTER(bond->primary_slave, NULL); @@ -1767,6 +1770,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) synchronize_rcu(); slave_disable_netpoll(new_slave); +err_hwaddr_unsync: + if (!bond_uses_primary(bond)) + bond_hw_addr_flush(bond_dev, slave_dev); + err_close: slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); @@ -3373,6 +3380,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, } } +static int bond_get_nest_level(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + + return bond->nest_level; +} + static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3381,7 +3395,7 @@ static void bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; - spin_lock(&bond->stats_lock); + spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev)); memcpy(stats, &bond->bond_stats, sizeof(*stats)); rcu_read_lock(); @@ -4176,6 +4190,7 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_neigh_setup = bond_neigh_setup, .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, + .ndo_get_lock_subclass = bond_get_nest_level, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, @@ -4674,6 +4689,7 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->nest_level = SINGLE_DEPTH_NESTING; netdev_lockdep_set_classes(bond_dev); list_add_tail(&bond->bond_list, &bn->dev_list); diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 5931aa2fe997..3d154eb63dcf 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option) static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { - if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { - netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", - newval->string); - /* disable arp monitoring */ - bond->params.arp_interval = 0; - /* set miimon to default value */ - bond->params.miimon = BOND_DEFAULT_MIIMON; - netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", - bond->params.miimon); + if (!bond_mode_uses_arp(newval->value)) { + if (bond->params.arp_interval) { + netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", + newval->string); + /* disable arp monitoring */ + bond->params.arp_interval = 0; + } + + if (!bond->params.miimon) { + /* set miimon to default value */ + bond->params.miimon = BOND_DEFAULT_MIIMON; + netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", + bond->params.miimon); + } } if (newval->value == BOND_MODE_ALB) @@ -1142,6 +1147,7 @@ static int bond_option_primary_set(struct bonding *bond, slave->dev->name); rcu_assign_pointer(bond->primary_slave, slave); strcpy(bond->params.primary, slave->dev->name); + bond->force_primary = true; bond_select_active_slave(bond); goto out; } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..6da69af103e6 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, return 0; } -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +static void cc770_tx(struct net_device *dev, int mo) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - unsigned int mo = obj2msgobj(CC770_OBJ_TX); + struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; u8 dlc, rtr; u32 id; int i; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - if ((cc770_read_reg(priv, - msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { - netdev_err(dev, "TX register is still occupied!\n"); - return NETDEV_TX_BUSY; - } - - netif_stop_queue(dev); - dlc = cf->can_dlc; id = cf->can_id; - if (cf->can_id & CAN_RTR_FLAG) - rtr = 0; - else - rtr = MSGCFG_DIR; + rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); + if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, @@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < dlc; i++) cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); - /* Store echo skb before starting the transfer */ - can_put_echo_skb(skb, dev, 0); - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); +} - stats->tx_bytes += dlc; +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + unsigned int mo = obj2msgobj(CC770_OBJ_TX); + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; - /* - * HM: We had some cases of repeated IRQs so make sure the - * INT is acknowledged I know it's already further up, but - * doing again fixed the issue - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + netif_stop_queue(dev); + + if ((cc770_read_reg(priv, + msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { + netdev_err(dev, "TX register is still occupied!\n"); + return NETDEV_TX_BUSY; + } + + priv->tx_skb = skb; + cc770_tx(dev, mo); return NETDEV_TX_OK; } @@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); + struct can_frame *cf; + u8 ctrl1; + + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - /* Nothing more to send, switch off interrupts */ cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); - /* - * We had some cases of repeated IRQ so make sure the - * INT is acknowledged + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); + + if (unlikely(!priv->tx_skb)) { + netdev_err(dev, "missing tx skb in tx interrupt\n"); + return; + } + + if (unlikely(ctrl1 & MSGLST_SET)) { + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* When the CC770 is sending an RTR message and it receives a regular + * message that matches the id of the RTR message, it will overwrite the + * outgoing message in the TX register. When this happens we must + * process the received message and try to transmit the outgoing skb + * again. */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + if (unlikely(ctrl1 & NEWDAT_SET)) { + cc770_rx(dev, mo, ctrl1); + cc770_tx(dev, mo); + return; + } + cf = (struct can_frame *)priv->tx_skb->data; + stats->tx_bytes += cf->can_dlc; stats->tx_packets++; + + can_put_echo_skb(priv->tx_skb, dev, 0); can_get_echo_skb(dev, 0); + priv->tx_skb = NULL; + netif_wake_queue(dev); } @@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) priv->can.do_set_bittiming = cc770_set_bittiming; priv->can.do_set_mode = cc770_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + priv->tx_skb = NULL; memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -193,6 +193,8 @@ struct cc770_priv { u8 cpu_interface; /* CPU interface register */ u8 clkout; /* Clock out register */ u8 bus_config; /* Bus conffiguration register */ + + struct sk_buff *tx_skb; }; struct net_device *alloc_cc770dev(int sizeof_priv); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 365a8cc62405..b6a681bce400 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -604,7 +604,7 @@ void can_bus_off(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - netdev_dbg(dev, "bus-off\n"); + netdev_info(dev, "bus-off\n"); netif_carrier_off(dev); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index a13a4896a8bd..ed8a2a7ce500 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -189,7 +189,7 @@ * MX35 FlexCAN2 03.00.00.00 no no ? no no * MX53 FlexCAN2 03.00.00.00 yes no no no no * MX6s FlexCAN3 10.00.12.00 yes yes no no yes - * VF610 FlexCAN3 ? no yes ? yes yes? + * VF610 FlexCAN3 ? no yes no yes yes? * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ @@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | + FLEXCAN_QUIRK_BROKEN_PERR_STATE, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -525,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) data = be32_to_cpup((__be32 *)&cf->data[0]); flexcan_write(data, &priv->tx_mb->data[0]); } - if (cf->can_dlc > 3) { + if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); flexcan_write(data, &priv->tx_mb->data[1]); } diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2772d05ff11c..fedd927ba6ed 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -30,6 +30,7 @@ #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) #define IFI_CANFD_STCMD_BUSOFF BIT(4) +#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5) #define IFI_CANFD_STCMD_BUSMONITOR BIT(16) #define IFI_CANFD_STCMD_LOOPBACK BIT(18) #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) @@ -52,7 +53,10 @@ #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) #define IFI_CANFD_INTERRUPT 0xc +#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0) #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) +#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2) +#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3) #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) @@ -61,6 +65,10 @@ #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) #define IFI_CANFD_IRQMASK 0x10 +#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0) +#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1) +#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2) +#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3) #define IFI_CANFD_IRQMASK_SET_ERR BIT(7) #define IFI_CANFD_IRQMASK_SET_TS BIT(15) #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) @@ -136,6 +144,8 @@ #define IFI_CANFD_SYSCLOCK 0x50 #define IFI_CANFD_VER 0x54 +#define IFI_CANFD_VER_REV_MASK 0xff +#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15 #define IFI_CANFD_IP_ID 0x58 #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD @@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) if (enable) { enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | - IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; + IFI_CANFD_IRQMASK_RXFIFO_NEMPTY | + IFI_CANFD_IRQMASK_ERROR_STATE_CHG | + IFI_CANFD_IRQMASK_ERROR_WARNING | + IFI_CANFD_IRQMASK_ERROR_BUSOFF; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; } @@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev) return 1; } -static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) +static int ifi_canfd_handle_lec_err(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | @@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, switch (new_state) { case CAN_STATE_ERROR_ACTIVE: + /* error active state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_ACTIVE; + break; + case CAN_STATE_ERROR_WARNING: /* error warning state */ priv->can.can_stats.error_warning++; priv->can.state = CAN_STATE_ERROR_WARNING; @@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, ifi_canfd_get_berr_counter(ndev, &bec); switch (new_state) { - case CAN_STATE_ERROR_ACTIVE: + case CAN_STATE_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? @@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, return 1; } -static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) +static int ifi_canfd_handle_state_errors(struct net_device *ndev) { struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); int work_done = 0; - u32 isr; - /* - * The ErrWarn condition is a little special, since the bit is - * located in the INTERRUPT register instead of STCMD register. - */ - isr = readl(priv->base + IFI_CANFD_INTERRUPT); - if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && + if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) && + (priv->can.state != CAN_STATE_ERROR_ACTIVE)) { + netdev_dbg(ndev, "Error, entered active state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_ERROR_ACTIVE); + } + + if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) && (priv->can.state != CAN_STATE_ERROR_WARNING)) { - /* Clear the interrupt */ - writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, - priv->base + IFI_CANFD_INTERRUPT); netdev_dbg(ndev, "Error, entered warning state\n"); work_done += ifi_canfd_handle_state_change(ndev, CAN_STATE_ERROR_WARNING); @@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct ifi_canfd_priv *priv = netdev_priv(ndev); - const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | - IFI_CANFD_STCMD_BUSOFF; - int work_done = 0; - - u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); - u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); + int work_done = 0; /* Handle bus state changes */ - if ((stcmd & stcmd_state_mask) || - ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) - work_done += ifi_canfd_handle_state_errors(ndev, stcmd); + work_done += ifi_canfd_handle_state_errors(ndev); /* Handle lost messages on RX */ if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) @@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) /* Handle lec errors on the bus */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) - work_done += ifi_canfd_handle_lec_err(ndev, errctr); + work_done += ifi_canfd_handle_lec_err(ndev); /* Handle normal messages on RX */ if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) @@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) struct net_device_stats *stats = &ndev->stats; const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | + IFI_CANFD_INTERRUPT_ERROR_COUNTER | + IFI_CANFD_INTERRUPT_ERROR_STATE_CHG | IFI_CANFD_INTERRUPT_ERROR_WARNING | - IFI_CANFD_INTERRUPT_ERROR_COUNTER; + IFI_CANFD_INTERRUPT_ERROR_BUSOFF; const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; - const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | - IFI_CANFD_INTERRUPT_ERROR_WARNING)); + const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ); u32 isr; isr = readl(priv->base + IFI_CANFD_INTERRUPT); @@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) struct resource *res; void __iomem *addr; int irq, ret; - u32 id; + u32 id, rev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) return -EINVAL; } + rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK; + if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) { + dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n", + rev, IFI_CANFD_VER_REV_MIN_SUPPORTED); + return -EINVAL; + } + ndev = alloc_candev(sizeof(*priv), 1); if (!ndev) return -ENOMEM; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index f4947a74b65f..ca3fa82316c2 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -25,6 +25,7 @@ #include #include #include +#include /* napi related */ #define M_CAN_NAPI_WEIGHT 64 @@ -246,7 +247,7 @@ enum m_can_mram_cfg { /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ #define RXFC_FWM_SHIFT 24 -#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) +#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT) #define RXFC_FS_SHIFT 16 #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) @@ -1072,7 +1073,8 @@ static void m_can_chip_config(struct net_device *dev) } else { /* Version 3.1.x or 3.2.x */ - cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | + CCCR_NISO); /* Only 3.2.x has NISO Bit implemented */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) @@ -1682,6 +1684,8 @@ static __maybe_unused int m_can_suspend(struct device *dev) m_can_clk_stop(priv); } + pinctrl_pm_select_sleep_state(dev); + priv->can.state = CAN_STATE_SLEEPING; return 0; @@ -1692,6 +1696,8 @@ static __maybe_unused int m_can_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct m_can_priv *priv = netdev_priv(ndev); + pinctrl_pm_select_default_state(dev); + m_can_init_ram(priv); priv->can.state = CAN_STATE_ERROR_ACTIVE; diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 85268be0c913..ed8561d4a90f 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -258,22 +258,19 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, /* if this frame is an echo, */ if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { - int n; unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); - n = can_get_echo_skb(priv->ndev, msg->client); - spin_unlock_irqrestore(&priv->echo_lock, flags); + can_get_echo_skb(priv->ndev, msg->client); /* count bytes of the echo instead of skb */ stats->tx_bytes += cf_len; stats->tx_packets++; - if (n) { - /* restart tx queue only if a slot is free */ - netif_wake_queue(priv->ndev); - } + /* restart tx queue (a slot is free) */ + netif_wake_queue(priv->ndev); + spin_unlock_irqrestore(&priv->echo_lock, flags); return 0; } @@ -336,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ if (pucan_status_is_rx_barrier(msg)) { - unsigned long flags; if (priv->enable_tx_path) { int err = priv->enable_tx_path(priv); @@ -345,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, return err; } - /* restart network queue only if echo skb array is free */ - spin_lock_irqsave(&priv->echo_lock, flags); - - if (!priv->can.echo_skb[priv->echo_idx]) { - spin_unlock_irqrestore(&priv->echo_lock, flags); - - netif_wake_queue(ndev); - } else { - spin_unlock_irqrestore(&priv->echo_lock, flags); - } + /* start network queue (echo_skb array is empty) */ + netif_start_queue(ndev); return 0; } @@ -729,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, */ should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); - spin_unlock_irqrestore(&priv->echo_lock, flags); - - /* write the skb on the interface */ - priv->write_tx_msg(priv, msg); - /* stop network tx queue if not enough room to save one more msg too */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) should_stop_tx_queue |= (room_left < @@ -745,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, if (should_stop_tx_queue) netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->echo_lock, flags); + + /* write the skb on the interface */ + priv->write_tx_msg(priv, msg); + return NETDEV_TX_OK; } diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index b4efd711f824..fa689854f16b 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2"); #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ +#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ + ((u32)(y) << 16) | \ + ((u32)(z) << 8)) + /* System Control Registers Bits */ #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ @@ -349,8 +353,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg) priv->tx_pages_free++; spin_unlock_irqrestore(&priv->tx_lock, flags); - /* wake producer up */ - netif_wake_queue(priv->ucan.ndev); + /* wake producer up (only if enough room in echo_skb array) */ + spin_lock_irqsave(&priv->ucan.echo_lock, flags); + if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) + netif_wake_queue(priv->ucan.ndev); + + spin_unlock_irqrestore(&priv->ucan.echo_lock, flags); } /* re-enable Rx DMA transfer for this CAN */ @@ -779,6 +787,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev, "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, hw_ver_major, hw_ver_minor, hw_ver_sub); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and + * 64-bit logical addresses: this workaround forces usage of 32-bit + * DMA addresses only when such a fw is detected. + */ + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < + PCIEFD_FW_VERSION(3, 3, 0)) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + dev_warn(&pdev->dev, + "warning: can't set DMA mask %llxh (err %d)\n", + DMA_BIT_MASK(32), err); + } +#endif + /* stop system clock */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, PCIEFD_REG_SYS_CTL_CLR); @@ -825,7 +848,10 @@ static int peak_pciefd_probe(struct pci_dev *pdev, err_disable_pci: pci_disable_device(pdev); - return err; + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) */ + return pcibios_err_to_errno(err); } /* free the board structure object, as well as its resources: */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 131026fbc2d7..5adc95c922ee 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -717,7 +717,10 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) failure_disable_pci: pci_disable_device(pdev); - return err; + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) */ + return pcibios_err_to_errno(err); } static void peak_pci_remove(struct pci_dev *pdev) diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 5590c559a8ca..53e320c92a8b 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -91,6 +91,7 @@ #define HI3110_STAT_BUSOFF BIT(2) #define HI3110_STAT_ERRP BIT(3) #define HI3110_STAT_ERRW BIT(4) +#define HI3110_STAT_TXMTY BIT(7) #define HI3110_BTR0_SJW_SHIFT 6 #define HI3110_BTR0_BRP_SHIFT 0 @@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net, struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; + mutex_lock(&priv->hi3110_lock); bec->txerr = hi3110_read(spi, HI3110_READ_TEC); bec->rxerr = hi3110_read(spi, HI3110_READ_REC); + mutex_unlock(&priv->hi3110_lock); return 0; } @@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) } } - if (intf == 0) - break; - - if (intf & HI3110_INT_TXCPLT) { + if (priv->tx_len && statf & HI3110_STAT_TXMTY) { net->stats.tx_packets++; net->stats.tx_bytes += priv->tx_len - 1; can_led_event(net, CAN_LED_EVENT_TX); @@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) } netif_wake_queue(net); } + + if (intf == 0) + break; } mutex_unlock(&priv->hi3110_lock); return IRQ_HANDLED; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 4d4941469cfc..db6ea936dc3f 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) mbx_mask = hecc_read(priv, HECC_CANMIM); mbx_mask |= HECC_TX_MBOX_MASK; hecc_write(priv, HECC_CANMIM, mbx_mask); + } else { + /* repoll is done only if whole budget is used */ + num_pkts = quota; } return num_pkts; diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b3d02759c226..d0846ae9e0e4 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) case -ECONNRESET: /* unlink */ case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; @@ -1069,6 +1071,7 @@ static void ems_usb_disconnect(struct usb_interface *intf) usb_free_urb(dev->intr_urb); kfree(dev->intr_in_buffer); + kfree(dev->tx_msg_buffer); } } diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 9fdb0f0bfa06..c6dcf93675c0 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 68ac3e88a8ce..8bf80ad9dc44 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", rc); - return rc; + return (rc > 0) ? 0 : rc; } static void gs_usb_xmit_callback(struct urb *urb) diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 9b18d96ef526..daed57d3d209 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, } if (pos + tmp->len > actual_len) { - dev_err(dev->udev->dev.parent, - "Format error\n"); + dev_err_ratelimited(dev->udev->dev.parent, + "Format error\n"); break; } @@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, if (err) { netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); + kfree(buf); usb_free_urb(urb); return err; } @@ -1178,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, skb = alloc_can_skb(priv->netdev, &cf); if (!skb) { - stats->tx_dropped++; + stats->rx_dropped++; return; } @@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) case 0: break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; default: @@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) goto resubmit_urb; } - while (pos <= urb->actual_length - MSG_HEADER_LEN) { + while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) { msg = urb->transfer_buffer + pos; /* The Kvaser firmware can only read and write messages that @@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } if (pos + msg->len > urb->actual_length) { - dev_err(dev->udev->dev.parent, "Format error\n"); + dev_err_ratelimited(dev->udev->dev.parent, + "Format error\n"); break; } @@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); usb_unanchor_urb(urb); + kfree(buf); stats->tx_dropped++; diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 7f0272558bef..e0c24abce16c 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 7ccdc3e30c98..53d6bb045e9e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) void *cmd_head = pcan_usb_fd_cmd_buffer(dev); int err = 0; u8 *packet_ptr; - int i, n = 1, packet_len; + int packet_len; ptrdiff_t cmd_len; /* usb device unregistered? */ @@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) } packet_ptr = cmd_head; + packet_len = cmd_len; /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ - if ((dev->udev->speed != USB_SPEED_HIGH) && - (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) { - packet_len = PCAN_UFD_LOSPD_PKT_SIZE; - n += cmd_len / packet_len; - } else { - packet_len = cmd_len; - } + if (unlikely(dev->udev->speed != USB_SPEED_HIGH)) + packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE); - for (i = 0; i < n; i++) { + do { err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), @@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) } packet_ptr += packet_len; - } + cmd_len -= packet_len; + + if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE) + packet_len = cmd_len; + + } while (packet_len > 0); return err; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index d000cb62d6ae..27861c417c94 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb) break; case -ENOENT: + case -EPIPE: + case -EPROTO: case -ESHUTDOWN: return; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 8404e8852a0f..b4c4a2c76437 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, tbp = peer_tb; } - if (tbp[IFLA_IFNAME]) { + if (ifmp && tbp[IFLA_IFNAME]) { nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 89aec07c225f..5a24039733ef 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -2,6 +2,7 @@ * * Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. + * Copyright (C) 2017 Sandvik Mining and Construction Oy * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. @@ -25,8 +26,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -101,7 +104,7 @@ enum xcan_reg { #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) /* CAN register bit shift - XCAN___SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ @@ -118,6 +121,7 @@ enum xcan_reg { /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. + * @tx_lock: Lock for synchronizing TX interrupt handling * @tx_head: Tx CAN packets ready to send on the queue * @tx_tail: Tx CAN packets successfully sended on the queue * @tx_max: Maximum number packets the driver can send @@ -132,6 +136,7 @@ enum xcan_reg { */ struct xcan_priv { struct can_priv can; + spinlock_t tx_lock; unsigned int tx_head; unsigned int tx_tail; unsigned int tx_max; @@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { .brp_inc = 1, }; +#define XCAN_CAP_WATERMARK 0x0001 +struct xcan_devtype_data { + unsigned int caps; +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) usleep_range(500, 10000); } + /* reset clears FIFOs */ + priv->tx_head = 0; + priv->tx_tail = 0; + return 0; } @@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; + unsigned long flags; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; @@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); + + spin_lock_irqsave(&priv->tx_lock, flags); + priv->tx_head++; /* Write the Frame to Xilinx CAN TX FIFO */ @@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) stats->tx_bytes += cf->can_dlc; } + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ + if (priv->tx_max > 1) + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); + /* Check if the TX buffer is full */ if ((priv->tx_head - priv->tx_tail) == priv->tx_max) netif_stop_queue(ndev); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; } @@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev) return 1; } +/** + * xcan_current_error_state - Get current error state from HW + * @ndev: Pointer to net_device structure + * + * Checks the current CAN error state from the HW. Note that this + * only checks for ERROR_PASSIVE and ERROR_WARNING. + * + * Return: + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE + * otherwise. + */ +static enum can_state xcan_current_error_state(struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); + + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) + return CAN_STATE_ERROR_PASSIVE; + else if (status & XCAN_SR_ERRWRN_MASK) + return CAN_STATE_ERROR_WARNING; + else + return CAN_STATE_ERROR_ACTIVE; +} + +/** + * xcan_set_error_state - Set new CAN error state + * @ndev: Pointer to net_device structure + * @new_state: The new CAN state to be set + * @cf: Error frame to be populated or NULL + * + * Set new CAN error state for the device, updating statistics and + * populating the error frame if given. + */ +static void xcan_set_error_state(struct net_device *ndev, + enum can_state new_state, + struct can_frame *cf) +{ + struct xcan_priv *priv = netdev_priv(ndev); + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); + u32 txerr = ecr & XCAN_ECR_TEC_MASK; + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; + + priv->can.state = new_state; + + if (cf) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + switch (new_state) { + case CAN_STATE_ERROR_PASSIVE: + priv->can.can_stats.error_passive++; + if (cf) + cf->data[1] = (rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + break; + case CAN_STATE_ERROR_WARNING: + priv->can.can_stats.error_warning++; + if (cf) + cf->data[1] |= (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + break; + case CAN_STATE_ERROR_ACTIVE: + if (cf) + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; + break; + default: + /* non-ERROR states are handled elsewhere */ + WARN_ON(1); + break; + } +} + +/** + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX + * @ndev: Pointer to net_device structure + * + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if + * the performed RX/TX has caused it to drop to a lesser state and set + * the interface state accordingly. + */ +static void xcan_update_error_state_after_rxtx(struct net_device *ndev) +{ + struct xcan_priv *priv = netdev_priv(ndev); + enum can_state old_state = priv->can.state; + enum can_state new_state; + + /* changing error state due to successful frame RX/TX can only + * occur from these states + */ + if (old_state != CAN_STATE_ERROR_WARNING && + old_state != CAN_STATE_ERROR_PASSIVE) + return; + + new_state = xcan_current_error_state(ndev); + + if (new_state != old_state) { + struct sk_buff *skb; + struct can_frame *cf; + + skb = alloc_can_err_skb(ndev, &cf); + + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); + + if (skb) { + struct net_device_stats *stats = &ndev->stats; + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + } +} + /** * xcan_err_interrupt - error frame Isr * @ndev: net_device pointer @@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; - u32 err_status, status, txerr = 0, rxerr = 0; + u32 err_status; skb = alloc_can_err_skb(ndev, &cf); err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); - status = priv->read_reg(priv, XCAN_SR_OFFSET); if (isr & XCAN_IXR_BSOFF_MASK) { priv->can.state = CAN_STATE_BUS_OFF; @@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - priv->can.can_stats.error_passive++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = (rxerr > 127) ? - CAN_ERR_CRTL_RX_PASSIVE : - CAN_ERR_CRTL_TX_PASSIVE; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } - } else if (status & XCAN_SR_ERRWRN_MASK) { - priv->can.state = CAN_STATE_ERROR_WARNING; - priv->can.can_stats.error_warning++; - if (skb) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] |= (txerr > rxerr) ? - CAN_ERR_CRTL_TX_WARNING : - CAN_ERR_CRTL_RX_WARNING; - cf->data[6] = txerr; - cf->data[7] = rxerr; - } + } else { + enum can_state new_state = xcan_current_error_state(ndev); + + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); } /* Check for Arbitration lost interrupt */ @@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) if (isr & XCAN_IXR_RXOFLW_MASK) { stats->rx_over_errors++; stats->rx_errors++; - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; @@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) isr = priv->read_reg(priv, XCAN_ISR_OFFSET); while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { - if (isr & XCAN_IXR_RXOK_MASK) { - priv->write_reg(priv, XCAN_ICR_OFFSET, - XCAN_IXR_RXOK_MASK); - work_done += xcan_rx(ndev); - } else { - priv->write_reg(priv, XCAN_ICR_OFFSET, - XCAN_IXR_RXNEMP_MASK); - break; - } + work_done += xcan_rx(ndev); priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } - if (work_done) + if (work_done) { can_led_event(ndev, CAN_LED_EVENT_RX); + xcan_update_error_state_after_rxtx(ndev); + } if (work_done < quota) { napi_complete_done(napi, work_done); ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); + ier |= XCAN_IXR_RXNEMP_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); } return work_done; @@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; + unsigned int frames_in_fifo; + int frames_sent = 1; /* TXOK => at least 1 frame was sent */ + unsigned long flags; + int retries = 0; + + /* Synchronize with xmit as we need to know the exact number + * of frames in the FIFO to stay in sync due to the TXFEMP + * handling. + * This also prevents a race between netif_wake_queue() and + * netif_stop_queue(). + */ + spin_lock_irqsave(&priv->tx_lock, flags); + + frames_in_fifo = priv->tx_head - priv->tx_tail; + + if (WARN_ON_ONCE(frames_in_fifo == 0)) { + /* clear TXOK anyway to avoid getting back here */ + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return; + } + + /* Check if 2 frames were sent (TXOK only means that at least 1 + * frame was sent). + */ + if (frames_in_fifo > 1) { + WARN_ON(frames_in_fifo > priv->tx_max); + + /* Synchronize TXOK and isr so that after the loop: + * (1) isr variable is up-to-date at least up to TXOK clear + * time. This avoids us clearing a TXOK of a second frame + * but not noticing that the FIFO is now empty and thus + * marking only a single frame as sent. + * (2) No TXOK is left. Having one could mean leaving a + * stray TXOK as we might process the associated frame + * via TXFEMP handling as we read TXFEMP *after* TXOK + * clear to satisfy (1). + */ + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); + } - while ((priv->tx_head - priv->tx_tail > 0) && - (isr & XCAN_IXR_TXOK_MASK)) { + if (isr & XCAN_IXR_TXFEMP_MASK) { + /* nothing in FIFO anymore */ + frames_sent = frames_in_fifo; + } + } else { + /* single frame in fifo, just clear TXOK */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); + } + + while (frames_sent--) { can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max); priv->tx_tail++; stats->tx_packets++; - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } - can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + can_led_event(ndev, CAN_LED_EVENT_TX); + xcan_update_error_state_after_rxtx(ndev); } /** @@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) struct net_device *ndev = (struct net_device *)dev_id; struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; + u32 isr_errors; /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); @@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) xcan_tx_interrupt(ndev, isr); /* Check for the type of error interrupt and Processing it */ - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | - XCAN_IXR_ARBLST_MASK)); + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); + if (isr_errors) { + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { + if (isr & XCAN_IXR_RXNEMP_MASK) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); + ier &= ~XCAN_IXR_RXNEMP_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } @@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) static void xcan_chip_stop(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 ier; /* Disable interrupts and leave the can in configuration mode */ - ier = priv->read_reg(priv, XCAN_IER_OFFSET); - ier &= ~XCAN_INTR_ALL; - priv->write_reg(priv, XCAN_IER_OFFSET, ier); - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); + set_reset_mode(ndev); priv->can.state = CAN_STATE_STOPPED; } @@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { */ static int __maybe_unused xcan_suspend(struct device *dev) { - if (!device_may_wakeup(dev)) - return pm_runtime_force_suspend(dev); + struct net_device *ndev = dev_get_drvdata(dev); - return 0; + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + xcan_chip_stop(ndev); + } + + return pm_runtime_force_suspend(dev); } /** @@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) */ static int __maybe_unused xcan_resume(struct device *dev) { - if (!device_may_wakeup(dev)) - return pm_runtime_force_resume(dev); + struct net_device *ndev = dev_get_drvdata(dev); + int ret; - return 0; + ret = pm_runtime_force_resume(dev); + if (ret) { + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); + return ret; + } + + if (netif_running(ndev)) { + ret = xcan_chip_start(ndev); + if (ret) { + dev_err(dev, "xcan_chip_start failed on resume\n"); + return ret; + } + + netif_device_attach(ndev); + netif_start_queue(ndev); + } + return 0; } /** @@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); - if (netif_running(ndev)) { - netif_stop_queue(ndev); - netif_device_detach(ndev); - } - - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); - priv->can.state = CAN_STATE_SLEEPING; - clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); @@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); int ret; - u32 isr, status; ret = clk_prepare_enable(priv->bus_clk); if (ret) { @@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) return ret; } - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); - status = priv->read_reg(priv, XCAN_SR_OFFSET); - - if (netif_running(ndev)) { - if (isr & XCAN_IXR_BSOFF_MASK) { - priv->can.state = CAN_STATE_BUS_OFF; - priv->write_reg(priv, XCAN_SRR_OFFSET, - XCAN_SRR_RESET_MASK); - } else if ((status & XCAN_SR_ESTAT_MASK) == - XCAN_SR_ESTAT_MASK) { - priv->can.state = CAN_STATE_ERROR_PASSIVE; - } else if (status & XCAN_SR_ERRWRN_MASK) { - priv->can.state = CAN_STATE_ERROR_WARNING; - } else { - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - netif_device_attach(ndev); - netif_start_queue(ndev); - } - return 0; } @@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) }; +static const struct xcan_devtype_data xcan_zynq_data = { + .caps = XCAN_CAP_WATERMARK, +}; + +/* Match table for OF platform binding */ +static const struct of_device_id xcan_of_match[] = { + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, + { .compatible = "xlnx,axi-can-1.00.a", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, xcan_of_match); + /** * xcan_probe - Platform registration call * @pdev: Handle to the platform device structure @@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) struct resource *res; /* IO mem resources */ struct net_device *ndev; struct xcan_priv *priv; + const struct of_device_id *of_id; + int caps = 0; void __iomem *addr; - int ret, rx_max, tx_max; + int ret, rx_max, tx_max, tx_fifo_depth; /* Get the virtual base address for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) goto err; } - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &tx_fifo_depth); if (ret < 0) goto err; @@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) if (ret < 0) goto err; + of_id = of_match_device(xcan_of_match, &pdev->dev); + if (of_id) { + const struct xcan_devtype_data *devtype_data = of_id->data; + + if (devtype_data) + caps = devtype_data->caps; + } + + /* There is no way to directly figure out how many frames have been + * sent when the TXOK interrupt is processed. If watermark programming + * is supported, we can have 2 frames in the FIFO and use TXFEMP + * to determine if 1 or 2 frames have been sent. + * Theoretically we should be able to use TXFWMEMP to determine up + * to 3 frames, but it seems that after putting a second frame in the + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was + * sent), which is not a sensible state - possibly TXFWMEMP is not + * completely synchronized with the rest of the bits? + */ + if (caps & XCAN_CAP_WATERMARK) + tx_max = min(tx_fifo_depth, 2); + else + tx_max = 1; + /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) @@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) CAN_CTRLMODE_BERR_REPORTING; priv->reg_base = addr; priv->tx_max = tx_max; + spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ ndev->irq = platform_get_irq(pdev, 0); @@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) pm_runtime_put(&pdev->dev); - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, - priv->tx_max); + tx_fifo_depth, priv->tx_max); return 0; @@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) return 0; } -/* Match table for OF platform binding */ -static const struct of_device_id xcan_of_match[] = { - { .compatible = "xlnx,zynq-can-1.0", }, - { .compatible = "xlnx,axi-can-1.00.a", }, - { /* end of list */ }, -}; -MODULE_DEVICE_TABLE(of, xcan_of_match); - static struct platform_driver xcan_driver = { .probe = xcan_probe, .remove = xcan_remove, diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index d040aeb45172..15c2a831edf1 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,7 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o -obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o +obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o +ifdef CONFIG_NET_DSA_LOOP +obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o +endif obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 274f3679f33d..acf64d4cd94c 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1549,6 +1549,18 @@ static const struct b53_chip_data b53_switch_chips[] = { .cpu_port = B53_CPU_PORT_25, .duplex_reg = B53_DUPLEX_STAT_FE, }, + { + .chip_id = BCM5389_DEVICE_ID, + .dev_name = "BCM5389", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, { .chip_id = BCM5395_DEVICE_ID, .dev_name = "BCM5395", @@ -1872,6 +1884,7 @@ int b53_switch_detect(struct b53_device *dev) else dev->chip_id = BCM5365_DEVICE_ID; break; + case BCM5389_DEVICE_ID: case BCM5395_DEVICE_ID: case BCM5397_DEVICE_ID: case BCM5398_DEVICE_ID: diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index fa7556f5d4fb..a533a90e3904 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = { #define B53_BRCM_OUI_1 0x0143bc00 #define B53_BRCM_OUI_2 0x03625c00 #define B53_BRCM_OUI_3 0x00406000 +#define B53_BRCM_OUI_4 0x01410c00 static int b53_mdio_probe(struct mdio_device *mdiodev) { @@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev) */ if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && - (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 && + (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) { dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); return -ENODEV; } @@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = { { .compatible = "brcm,bcm53125" }, { .compatible = "brcm,bcm53128" }, { .compatible = "brcm,bcm5365" }, + { .compatible = "brcm,bcm5389" }, { .compatible = "brcm,bcm5395" }, { .compatible = "brcm,bcm5397" }, { .compatible = "brcm,bcm5398" }, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 01bd8cbe9a3f..6b9e39ddaec1 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -48,6 +48,7 @@ struct b53_io_ops { enum { BCM5325_DEVICE_ID = 0x25, BCM5365_DEVICE_ID = 0x65, + BCM5389_DEVICE_ID = 0x89, BCM5395_DEVICE_ID = 0x95, BCM5397_DEVICE_ID = 0x97, BCM5398_DEVICE_ID = 0x98, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d7b53d53c116..72d6ffbfd638 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -167,7 +167,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) reg = reg_readl(priv, REG_SPHY_CNTRL); if (enable) { reg |= PHY_RESET; - reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); reg_writel(priv, reg, REG_SPHY_CNTRL); udelay(21); reg = reg_readl(priv, REG_SPHY_CNTRL); diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index b471413d3df9..1e5a69b9d90a 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -569,7 +569,7 @@ static int lan9303_disable_processing(struct lan9303 *chip) { int p; - for (p = 0; p < LAN9303_NUM_PORTS; p++) { + for (p = 1; p < LAN9303_NUM_PORTS; p++) { int ret = lan9303_disable_processing_port(chip, p); if (ret) diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index c142b97add2c..3b073e152237 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1122,6 +1122,7 @@ static const struct of_device_id mt7530_of_match[] = { { .compatible = "mediatek,mt7530" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, mt7530_of_match); static struct mdio_driver mt7530_mdio_driver = { .probe = mt7530_probe, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d74c7335c512..eebda5ec9676 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -339,7 +339,7 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) u16 mask; mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &mask); - mask |= GENMASK(chip->g1_irq.nirqs, 0); + mask &= ~GENMASK(chip->g1_irq.nirqs, 0); mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); free_irq(chip->irq, chip); @@ -395,7 +395,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) return 0; out_disable: - mask |= GENMASK(chip->g1_irq.nirqs, 0); + mask &= ~GENMASK(chip->g1_irq.nirqs, 0); mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask); out_mapping: @@ -2153,6 +2153,19 @@ static const struct of_device_id mv88e6xxx_mdio_external_match[] = { { }, }; +static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) + +{ + struct mv88e6xxx_mdio_bus *mdio_bus; + struct mii_bus *bus; + + list_for_each_entry(mdio_bus, &chip->mdios, list) { + bus = mdio_bus->bus; + + mdiobus_unregister(bus); + } +} + static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, struct device_node *np) { @@ -2177,27 +2190,16 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, match = of_match_node(mv88e6xxx_mdio_external_match, child); if (match) { err = mv88e6xxx_mdio_register(chip, child, true); - if (err) + if (err) { + mv88e6xxx_mdios_unregister(chip); return err; + } } } return 0; } -static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) - -{ - struct mv88e6xxx_mdio_bus *mdio_bus; - struct mii_bus *bus; - - list_for_each_entry(mdio_bus, &chip->mdios, list) { - bus = mdio_bus->bus; - - mdiobus_unregister(bus); - } -} - static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 5ada7a41449c..9645c8f05c7f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -473,7 +473,7 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) static void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) { - u32 mask = QCA8K_PORT_STATUS_TXMAC; + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; /* Port 0 and 6 have no internal PHY */ if ((port > 0) && (port < 6)) @@ -490,6 +490,7 @@ qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int ret, i, phy_mode = -1; + u32 mask; /* Make sure that port 0 is the cpu port */ if (!dsa_is_cpu_port(ds, 0)) { @@ -515,7 +516,10 @@ qca8k_setup(struct dsa_switch *ds) if (ret < 0) return ret; - /* Enable CPU Port */ + /* Enable CPU Port, force it to maximum bandwidth and full-duplex */ + mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW | + QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX; + qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask); qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); @@ -584,6 +588,47 @@ qca8k_setup(struct dsa_switch *ds) return 0; } +static void +qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg; + + /* Force fixed-link setting for CPU port, skip others. */ + if (!phy_is_pseudo_fixed_link(phy)) + return; + + /* Set port speed */ + switch (phy->speed) { + case 10: + reg = QCA8K_PORT_STATUS_SPEED_10; + break; + case 100: + reg = QCA8K_PORT_STATUS_SPEED_100; + break; + case 1000: + reg = QCA8K_PORT_STATUS_SPEED_1000; + break; + default: + dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n", + port, phy->speed); + return; + } + + /* Set duplex mode */ + if (phy->duplex == DUPLEX_FULL) + reg |= QCA8K_PORT_STATUS_DUPLEX; + + /* Force flow control */ + if (dsa_is_cpu_port(ds, port)) + reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW; + + /* Force link down before changing MAC options */ + qca8k_port_set_status(priv, port, 0); + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); + qca8k_port_set_status(priv, port, 1); +} + static int qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) { @@ -832,6 +877,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds) static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, + .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, .phy_read = qca8k_phy_read, .phy_write = qca8k_phy_write, @@ -863,6 +909,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) return -ENOMEM; priv->bus = mdiodev->bus; + priv->dev = &mdiodev->dev; /* read the switches ID register */ id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); @@ -934,6 +981,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, qca8k_suspend, qca8k_resume); static const struct of_device_id qca8k_of_match[] = { + { .compatible = "qca,qca8334" }, { .compatible = "qca,qca8337" }, { /* sentinel */ }, }; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1cf8a920d4ff..613fe5c50236 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -51,8 +51,10 @@ #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) -#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0) -#define QCA8K_PORT_STATUS_SPEED_S 0 +#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) +#define QCA8K_PORT_STATUS_SPEED_10 0 +#define QCA8K_PORT_STATUS_SPEED_100 0x1 +#define QCA8K_PORT_STATUS_SPEED_1000 0x2 #define QCA8K_PORT_STATUS_TXMAC BIT(2) #define QCA8K_PORT_STATUS_RXMAC BIT(3) #define QCA8K_PORT_STATUS_TXFLOW BIT(4) @@ -165,6 +167,7 @@ struct qca8k_priv { struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; struct dsa_switch *ds; struct mutex reg_mutex; + struct device *dev; }; struct qca8k_mib_desc { diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 52beba8c7a39..e3b7a71fcad9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -331,6 +331,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); + io_sq->dma_addr_bits = ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c6bd5e24005d..67df5053dc30 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1565,7 +1565,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) static int ena_up_complete(struct ena_adapter *adapter) { - int rc, i; + int rc; rc = ena_rss_configure(adapter); if (rc) @@ -1584,17 +1584,6 @@ static int ena_up_complete(struct ena_adapter *adapter) ena_napi_enable_all(adapter); - /* Enable completion queues interrupt */ - for (i = 0; i < adapter->num_queues; i++) - ena_unmask_interrupt(&adapter->tx_ring[i], - &adapter->rx_ring[i]); - - /* schedule napi in case we had pending packets - * from the last time we disable napi - */ - for (i = 0; i < adapter->num_queues; i++) - napi_schedule(&adapter->ena_napi[i].napi); - return 0; } @@ -1731,7 +1720,7 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) static int ena_up(struct ena_adapter *adapter) { - int rc; + int rc, i; netdev_dbg(adapter->netdev, "%s\n", __func__); @@ -1774,6 +1763,17 @@ static int ena_up(struct ena_adapter *adapter) set_bit(ENA_FLAG_DEV_UP, &adapter->flags); + /* Enable completion queues interrupt */ + for (i = 0; i < adapter->num_queues; i++) + ena_unmask_interrupt(&adapter->tx_ring[i], + &adapter->rx_ring[i]); + + /* schedule napi in case we had pending packets + * from the last time we disable napi + */ + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); + return rc; err_up: diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ea72ef11a55..d272dc6984ac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1321,6 +1321,10 @@ #define MDIO_VEND2_AN_STAT 0x8002 #endif +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif @@ -1369,6 +1373,10 @@ #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7d128be61310..b91143947ed2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) "debugfs_create_file failed\n"); } + if (pdata->vdata->an_cdr_workaround) { + pfile = debugfs_create_bool("an_cdr_workaround", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_workaround); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + + pfile = debugfs_create_bool("an_cdr_track_early", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_track_early); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + } + kfree(buf); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 608693d11bd7..75c4455e2271 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -595,7 +595,7 @@ static void xgbe_isr_task(unsigned long data) reissue_mask = 1 << 0; if (!pdata->per_channel_irq) - reissue_mask |= 0xffff < 4; + reissue_mask |= 0xffff << 4; XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index d91fa595be98..e31d9d1fb6a6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround; ret = pdata->phy_if.phy_init(pdata); if (ret) return ret; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 072b9f664597..119777986ea4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; + netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); } static void xgbe_an_restart(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata) static void xgbe_an_disable(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); } return XGBE_AN_PAGE_RECEIVED; @@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) return XGBE_AN_NO_LINK; } - xgbe_an73_disable(pdata); + xgbe_an_disable(pdata); xgbe_switch_mode(pdata); - xgbe_an73_restart(pdata); + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; } @@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } @@ -903,6 +914,9 @@ static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata) pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } @@ -1114,14 +1128,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; - pdata->hw_if.config_tx_flow_control(pdata); pdata->tx_pause = pdata->phy.tx_pause; + pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; - pdata->hw_if.config_rx_flow_control(pdata); pdata->rx_pause = pdata->phy.rx_pause; + pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e5833cf1fab..82d1f416ee2a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev) struct net_device *netdev = pdata->netdev; int ret = 0; + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); @@ -454,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct xgbe_version_data xgbe_v2b = { @@ -468,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3304a291aa96..aac884314000 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -147,6 +147,14 @@ /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 +/* CDR delay values for KR support (in usec) */ +#define XGBE_CDR_DELAY_INIT 10000 +#define XGBE_CDR_DELAY_INC 10000 +#define XGBE_CDR_DELAY_MAX 100000 + +/* RRC frequency during link status check */ +#define XGBE_RRC_FREQUENCY 10 + enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, @@ -245,6 +253,10 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 +#define XGBE_SFP_EXTD_OPT1 1 +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) + #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) @@ -324,6 +336,7 @@ struct xgbe_phy_data { unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; @@ -355,6 +368,10 @@ struct xgbe_phy_data { unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; }; /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ @@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) phy_data->sfp_phy_avail = 1; } +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) +{ + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) + return true; + + return false; +} + static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) return; + /* Update transceiver signals (eeprom extd/options) */ + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); + if (xgbe_phy_sfp_parse_quirks(pdata)) return; @@ -1184,7 +1248,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int gpio_input; u8 gpio_reg, gpio_ports[2]; int ret; @@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) return; } - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; - - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) { - /* No GPIO, just assume the module is present for now */ - phy_data->sfp_mod_absent = 0; - } else { - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) - phy_data->sfp_mod_absent = 0; - } - - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) && - (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) - phy_data->sfp_rx_los = 1; + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) - phy_data->sfp_tx_fault = 1; + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) @@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 1; /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++) { + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } @@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) return true; } +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + usleep_range(phy_data->phy_cdr_delay, + phy_data->phy_cdr_delay + 500); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_OFF); + + xgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + if (!pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + if (pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case XGBE_AN_READY: + case XGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; + else + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + break; + } + break; + default: + break; + } +} + +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* Power off the PHY */ xgbe_phy_power_off(pdata); @@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: @@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } } + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + /* Register for driving external PHYs */ mii = devm_mdiobus_alloc(pdata->dev); if (!mii) { @@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->an_pre = xgbe_phy_an_pre; + phy_impl->an_post = xgbe_phy_an_post; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ad102c8bac7b..95d4b56448c6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -833,6 +833,7 @@ struct xgbe_hw_if { /* This structure represents implementation specific routines for an * implementation of a PHY. All routines are required unless noted below. * Optional routines: + * an_pre, an_post * kr_training_pre, kr_training_post */ struct xgbe_phy_impl_if { @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if { /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct xgbe_prv_data *); + void (*an_post)(struct xgbe_prv_data *); + /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); @@ -989,6 +994,7 @@ struct xgbe_version_data { unsigned int irq_reissue_support; unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; + unsigned int an_cdr_workaround; }; struct xgbe_vxlan_data { @@ -1257,6 +1263,9 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; + + bool debugfs_an_cdr_workaround; + bool debugfs_an_cdr_track_early; }; /* Function prototypes*/ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 0207927dc8a6..4ebd53b3c7da 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -85,7 +85,9 @@ struct aq_hw_ops { void (*destroy)(struct aq_hw_s *self); int (*get_hw_caps)(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps); + struct aq_hw_caps_s *aq_hw_caps, + unsigned short device, + unsigned short subsystem_device); int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, unsigned int frags); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 483e97691eea..cc658a29cc33 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -222,7 +222,7 @@ static struct net_device *aq_nic_ndev_alloc(void) struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, const struct ethtool_ops *et_ops, - struct device *dev, + struct pci_dev *pdev, struct aq_pci_func_s *aq_pci_func, unsigned int port, const struct aq_hw_ops *aq_hw_ops) @@ -242,7 +242,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, ndev->netdev_ops = ndev_ops; ndev->ethtool_ops = et_ops; - SET_NETDEV_DEV(ndev, dev); + SET_NETDEV_DEV(ndev, &pdev->dev); ndev->if_port = port; self->ndev = ndev; @@ -254,7 +254,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, &self->aq_hw_ops); - err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); + err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, + pdev->device, pdev->subsystem_device); if (err < 0) goto err_exit; @@ -309,6 +310,8 @@ int aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->hw_features |= aq_hw_caps->hw_features; self->ndev->features = aq_hw_caps->hw_features; + self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 4309983acdd6..3c9f8db03d5f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -71,7 +71,7 @@ struct aq_nic_cfg_s { struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, const struct ethtool_ops *et_ops, - struct device *dev, + struct pci_dev *pdev, struct aq_pci_func_s *aq_pci_func, unsigned int port, const struct aq_hw_ops *aq_hw_ops); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index cadaa646c89f..58c29d04b186 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, pci_set_drvdata(pdev, self); self->pdev = pdev; - err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); + err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, + pdev->subsystem_device); if (err < 0) goto err_exit; @@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, for (port = 0; port < self->ports; ++port) { struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, - &pdev->dev, self, + pdev, self, port, aq_hw_ops); if (!aq_nic) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 07b3c49a16a4..b0abd187cead 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -18,9 +18,20 @@ #include "hw_atl_a0_internal.h" static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps) + struct aq_hw_caps_s *aq_hw_caps, + unsigned short device, + unsigned short subsystem_device) { memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); + + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; + + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; + aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; + } + return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index ec68c20efcbd..f4b3554b0b67 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -16,11 +16,23 @@ #include "hw_atl_utils.h" #include "hw_atl_llh.h" #include "hw_atl_b0_internal.h" +#include "hw_atl_llh_internal.h" static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, - struct aq_hw_caps_s *aq_hw_caps) + struct aq_hw_caps_s *aq_hw_caps, + unsigned short device, + unsigned short subsystem_device) { memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); + + if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; + + if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; + aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; + } + return 0; } @@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, }; int err = 0; + u32 val; self->aq_nic_cfg = aq_nic_cfg; @@ -374,6 +387,16 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + /* Force limit MRRS on RDM/TDM to 2K */ + val = aq_hw_read_reg(self, pci_reg_control6_adr); + aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); + + /* TX DMA total request limit. B0 hardware is not capable to + * handle more than (8K-MRRS) incoming DMA data. + * Value 24 in 256byte units + */ + aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); + err = aq_hw_err_from_flags(self); if (err < 0) goto err_exit; @@ -729,7 +752,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self, rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); rpfl2multicast_flr_en_set(self, - IS_FILTER_ENABLED(IFF_MULTICAST), 0); + IS_FILTER_ENABLED(IFF_ALLMULTI), 0); rpfl2_accept_all_mc_packets_set(self, IS_FILTER_ENABLED(IFF_ALLMULTI)); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 5527fc0e5942..93450ec930e8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2343,6 +2343,9 @@ #define tx_dma_desc_base_addrmsw_adr(descriptor) \ (0x00007c04u + (descriptor) * 0x40) +/* tx dma total request limit */ +#define tx_dma_total_req_limit_adr 0x00007b20u + /* tx interrupt moderation control register definitions * Preprocessor definitions for TX Interrupt Moderation Control Register * Base Address: 0x00008980 @@ -2369,6 +2372,9 @@ /* default value of bitfield reg_res_dsbl */ #define pci_reg_res_dsbl_default 0x1 +/* PCI core control register */ +#define pci_reg_control6_adr 0x1014u + /* global microprocessor scratch pad definitions */ #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 3241af1ce718..5b422be56165 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -210,39 +210,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget) continue; } - pktlen = info & LEN_MASK; - stats->rx_packets++; - stats->rx_bytes += pktlen; - skb = rx_buff->skb; - skb_put(skb, pktlen); - skb->dev = ndev; - skb->protocol = eth_type_trans(skb, ndev); - - dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), - dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); - - /* Prepare the BD for next cycle */ - rx_buff->skb = netdev_alloc_skb_ip_align(ndev, - EMAC_BUFFER_SIZE); - if (unlikely(!rx_buff->skb)) { + /* Prepare the BD for next cycle. netif_receive_skb() + * only if new skb was allocated and mapped to avoid holes + * in the RX fifo. + */ + skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_err(ndev, "cannot allocate skb\n"); + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; - /* Because receive_skb is below, increment rx_dropped */ stats->rx_dropped++; continue; } - /* receive_skb only if new skb was allocated to avoid holes */ - netif_receive_skb(skb); - - addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + addr = dma_map_single(&ndev->dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { if (net_ratelimit()) - netdev_err(ndev, "cannot dma map\n"); - dev_kfree_skb(rx_buff->skb); + netdev_err(ndev, "cannot map dma buffer\n"); + dev_kfree_skb(skb); + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; + stats->rx_dropped++; continue; } + + /* unmap previosly mapped skb */ + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + + pktlen = info & LEN_MASK; + stats->rx_packets++; + stats->rx_bytes += pktlen; + skb_put(rx_buff->skb, pktlen); + rx_buff->skb->dev = ndev; + rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); + + netif_receive_skb(rx_buff->skb); + + rx_buff->skb = skb; dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index e278e3d96ee0..c770ca37c9b2 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* Optional regulator for PHY */ priv->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(priv->regulator)) { - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out_clk_disable; + } dev_err(dev, "no regulator found\n"); priv->regulator = NULL; } @@ -220,9 +222,11 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* RMII TX/RX needs always a rate of 25MHz */ err = clk_set_rate(priv->macclk, 25000000); - if (err) + if (err) { dev_err(dev, "failed to change mac clock rate (%d)\n", err); + goto out_clk_disable_macclk; + } } err = arc_emac_probe(ndev, interface); @@ -232,7 +236,8 @@ static int emac_rockchip_probe(struct platform_device *pdev) } return 0; - +out_clk_disable_macclk: + clk_disable_unprepare(priv->macclk); out_regulator_disable: if (priv->regulator) regulator_disable(priv->regulator); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 567ee54504bc..5e5022fa1d04 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; + int err; alx_reset_phy(hw); if (!netif_running(alx->dev)) return 0; netif_device_attach(alx->dev); - return __alx_open(alx, true); + + rtnl_lock(); + err = __alx_open(alx, true); + rtnl_unlock(); + + return err; } static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 4f3845a58126..68470c7c630a 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1062,7 +1062,8 @@ static int bcm_enet_open(struct net_device *dev) val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->rx_chan); @@ -1773,7 +1774,9 @@ static int bcm_enet_probe(struct platform_device *pdev) ret = PTR_ERR(priv->mac_clk); goto out; } - clk_prepare_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out_put_clk_mac; /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1805,9 +1808,11 @@ static int bcm_enet_probe(struct platform_device *pdev) if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; - goto out_put_clk_mac; + goto out_disable_clk_mac; } - clk_prepare_enable(priv->phy_clk); + ret = clk_prepare_enable(priv->phy_clk); + if (ret) + goto out_put_clk_phy; } /* do minimal hardware init to be able to probe mii bus */ @@ -1901,13 +1906,16 @@ static int bcm_enet_probe(struct platform_device *pdev) out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); - if (priv->phy_clk) { + if (priv->phy_clk) clk_disable_unprepare(priv->phy_clk); + +out_put_clk_phy: + if (priv->phy_clk) clk_put(priv->phy_clk); - } -out_put_clk_mac: +out_disable_clk_mac: clk_disable_unprepare(priv->mac_clk); +out_put_clk_mac: clk_put(priv->mac_clk); out: free_netdev(dev); @@ -2752,7 +2760,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = PTR_ERR(priv->mac_clk); goto out_unmap; } - clk_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out_put_clk; priv->rx_chan = 0; priv->tx_chan = 1; @@ -2773,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret) - goto out_put_clk; + goto out_disable_clk; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); @@ -2782,6 +2792,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) return 0; +out_disable_clk: + clk_disable_unprepare(priv->mac_clk); + out_put_clk: clk_put(priv->mac_clk); @@ -2813,6 +2826,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); + clk_disable_unprepare(priv->mac_clk); + clk_put(priv->mac_clk); + free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index eb441e5e2cd8..0fff2432ab4c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct net_device *ndev = priv->netdev; + unsigned int txbds_processed = 0; struct bcm_sysport_cb *cb; + unsigned int txbds_ready; + unsigned int c_index; u32 hw_ind; /* Clear status before servicing to reduce spurious interrupts */ @@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - - last_c_index = ring->c_index; - num_tx_cbs = ring->size; - - c_index &= (num_tx_cbs - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; - else - last_tx_cn = num_tx_cbs - last_c_index + c_index; + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; netif_dbg(priv, tx_done, ndev, - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", - ring->index, c_index, last_tx_cn, last_c_index); + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + ring->index, ring->c_index, c_index, txbds_ready); - while (last_tx_cn-- > 0) { - cb = ring->cbs + last_c_index; + while (txbds_processed < txbds_ready) { + cb = &ring->cbs[ring->clean_index]; bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); ring->desc_count++; - last_c_index++; - last_c_index &= (num_tx_cbs - 1); + txbds_processed++; + + if (likely(ring->clean_index < ring->size - 1)) + ring->clean_index++; + else + ring->clean_index = 0; } u64_stats_update_begin(&priv->syncp); @@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; + ring->clean_index = 0; ring->alloc_size = ring->size; ring->desc_cpu = p; ring->desc_count = ring->size; @@ -1854,8 +1851,8 @@ static int bcm_sysport_open(struct net_device *dev) if (!priv->is_lite) priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); else - priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & - GIB_FCS_STRIP); + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 0, priv->phy_interface); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 82e401df199e..86ae751ccb5c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -277,7 +277,8 @@ struct bcm_rsb { #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) -#define GIB_FCS_STRIP (1 << 6) +#define GIB_FCS_STRIP_SHIFT 6 +#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT) #define GIB_LCL_LOOP_EN (1 << 7) #define GIB_LCL_LOOP_TXEN (1 << 8) #define GIB_RMT_LOOP_EN (1 << 9) @@ -706,7 +707,7 @@ struct bcm_sysport_tx_ring { unsigned int desc_count; /* Number of descriptors */ unsigned int curr_desc; /* Current descriptor */ unsigned int c_index; /* Last consumer index */ - unsigned int p_index; /* Current producer index */ + unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 48d672b204a4..a4080f18135c 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -532,7 +532,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, int i; for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { - int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1); + unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN; slot = &ring->slots[i]; dev_kfree_skb(slot->skb); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4040d846da8e..40d02fec2747 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -479,9 +479,9 @@ struct bgmac_rx_header { struct bgmac { union { struct { - void *base; - void *idm_base; - void *nicpm_base; + void __iomem *base; + void __iomem *idm_base; + void __iomem *nicpm_base; } plat; struct { struct bcma_device *core; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1216c1f1e052..6465414dad74 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) del_timer_sync(&bp->timer); - if (IS_PF(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { /* Set ALWAYS_ALIVE bit in shmem */ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bnx2x_drv_pulse(bp); @@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->cnic_loaded = false; /* Clear driver version indication in shmem */ - if (IS_PF(bp)) + if (IS_PF(bp) && !BP_NOMCP(bp)) bnx2x_update_mng_version(bp); /* Check if there are pending parity attentions. If there are - set diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 7dd83d0ef0a0..22243c480a05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, * slots for the highest priority. */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : - NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); /* Mapping between the CREDIT_WEIGHT registers and actual client * numbers */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c12b4d3e946e..e855a271db48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp) do { bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + + /* If we read all 0xFFs, means we are in PCI error state and + * should bail out to avoid crashes on adapter's FW reads. + */ + if (bp->common.shmem_base == 0xFFFFFFFF) { + bp->flags |= NO_MCP_FLAG; + return -ENODEV; + } + if (bp->common.shmem_base) { val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); if (val & SHR_MEM_VALIDITY_MB) @@ -14315,7 +14324,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) BNX2X_ERR("IO slot reset --> driver unload\n"); /* MCP should have been reset; Need to wait for validity */ - bnx2x_init_shmem(bp); + if (bnx2x_init_shmem(bp)) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 v; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dc5de275352a..94931318587c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1698,12 +1698,16 @@ static int bnxt_async_event_process(struct bnxt *bp, if (BNXT_VF(bp)) goto async_event_process_exit; - if (data1 & 0x20000) { + + /* print unsupported speed warning in forced speed mode only */ + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && + (data1 & 0x20000)) { u16 fw_speed = link_info->force_link_speed; u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); - netdev_warn(bp->dev, "Link speed %d no longer supported\n", - speed); + if (speed != SPEED_UNKNOWN) + netdev_warn(bp->dev, "Link speed %d no longer supported\n", + speed); } set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); /* fall thru */ @@ -1875,7 +1879,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) * here forever if we consistently cannot allocate * buffers. */ - else if (rc == -ENOMEM) + else if (rc == -ENOMEM && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; @@ -1961,7 +1965,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); - if (likely(rc == -EIO)) + if (likely(rc == -EIO) && budget) rx_pkts++; else if (rc == -EBUSY) /* partial completion */ break; @@ -3804,6 +3808,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_tpa_cfg_input req = {0}; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); if (tpa_flags) { @@ -5920,6 +5927,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } mutex_unlock(&bp->hwrm_cmd_lock); + if (!BNXT_SINGLE_PF(bp)) + return 0; + diff = link_info->support_auto_speeds ^ link_info->advertising; if ((link_info->support_auto_speeds | diff) != link_info->support_auto_speeds) { @@ -8218,8 +8228,9 @@ static void bnxt_shutdown(struct pci_dev *pdev) if (netif_running(dev)) dev_close(dev); + bnxt_ulp_shutdown(bp); + if (system_state == SYSTEM_POWER_OFF) { - bnxt_ulp_shutdown(bp); bnxt_clear_int_mode(bp); pci_wake_from_d3(pdev, bp->wol); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3cbe771b3352..a22336fef66b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2133,8 +2133,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev, /* Read A2 portion of the EEPROM */ if (length) { start -= ETH_MODULE_SFF_8436_LEN; - bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, - length, data); + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, + start, length, data); } return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 5ee18660bc33..c9617675f934 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); return -EINVAL; } - if (vf_id >= bp->pf.max_vfs) { + if (vf_id >= bp->pf.active_vfs) { netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 7dd3d131043a..6a185344b378 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -327,7 +327,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ - if (is_wildcard(&l3_mask, sizeof(l3_mask)) && + if (is_wildcard(l3_mask, sizeof(*l3_mask)) && is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; } else { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 656e6af70f0a..1b1d2a67f412 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8723,14 +8723,15 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); - /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ - tg3_full_lock(tp, 0); + /* tp->hw_stats can be referenced safely: + * 1. under rtnl_lock + * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. + */ if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } - tg3_full_unlock(tp); } /* @@ -9278,6 +9279,15 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_restore_clk(tp); + /* Increase the core clock speed to fix tx timeout issue for 5762 + * with 100Mbps link speed. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | + TG3_CPMU_MAC_ORIDE_ENABLE); + } + /* Reprobe ASF enable state. */ tg3_flag_clear(tp, ENABLE_ASF); tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | @@ -10052,6 +10062,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) tw32(GRC_MODE, tp->grc_mode | val); + /* On one of the AMD platform, MRRS is restricted to 4000 because of + * south bridge limitation. As a workaround, Driver is setting MRRS + * to 2048 instead of default 4096. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { + val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; + tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); + } + /* Setup the timer prescalar register. Clock is always 66Mhz. */ val = tr32(GRC_MISC_CFG); val &= ~0xff; @@ -14157,7 +14177,7 @@ static void tg3_get_stats64(struct net_device *dev, struct tg3 *tp = netdev_priv(dev); spin_lock_bh(&tp->lock); - if (!tp->hw_stats) { + if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); return; @@ -14227,7 +14247,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) /* Reset PHY, otherwise the read DMA engine will be in a mode that * breaks all requests to 256 bytes. */ - if (tg3_asic_rev(tp) == ASIC_REV_57766) + if (tg3_asic_rev(tp) == ASIC_REV_57766 || + tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) reset_phy = true; err = tg3_restart_hw(tp, reset_phy); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index c2d02d02d1e6..b057f71aed48 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -96,6 +96,7 @@ #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a +#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0 #define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a @@ -281,6 +282,9 @@ #define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ #define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ /* 0xa8 --> 0xb8 unused */ +#define TG3PCI_DEV_STATUS_CTRL 0x000000b4 +#define MAX_READ_REQ_SIZE_2048 0x00004000 +#define MAX_READ_REQ_MASK 0x00007000 #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 #define DUAL_MAC_CTRL_CH_MASK 0x00000003 #define DUAL_MAC_CTRL_ID 0x00000004 diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 2220c771092b..678835136bf8 100755 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) if (delta > TSU_NSEC_MAX_VAL) { gem_tsu_get_time(&bp->ptp_clock_info, &now); - if (sign) - now = timespec64_sub(now, then); - else - now = timespec64_add(now, then); + now = timespec64_add(now, then); gem_tsu_set_time(&bp->ptp_clock_info, (const struct timespec64 *)&now); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 2e993ce43b66..4d2db22e011b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1289,6 +1289,9 @@ static int liquidio_stop(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; + /* tell Octeon to stop forwarding packets to host */ + send_rx_ctrl_cmd(lio, 0); + if (oct->props[lio->ifidx].napi_enabled) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); @@ -1306,9 +1309,6 @@ static int liquidio_stop(struct net_device *netdev) netif_carrier_off(netdev); lio->link_changes++; - /* tell Octeon to stop forwarding packets to host */ - send_rx_ctrl_cmd(lio, 0); - ifstate_reset(lio, LIO_IFSTATE_RUNNING); txqs_stop(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 805ab45e9b5a..2237ef8e4344 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1832,6 +1832,11 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->pdev = pdev; nic->pnicvf = nic; nic->max_queues = qcount; + /* If no of CPUs are too low, there won't be any queues left + * for XDP_TX, hence double it. + */ + if (!nic->t88) + nic->max_queues *= 2; /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d4496e9afcdf..a3d12dbde95b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, /* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - hdr->csum_l3 = 1; /* Enable IP csum calculation */ + if (ip.v4->version == 4) + hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 6a015362c340..bf291e90cdb0 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -51,6 +51,7 @@ #include #include #include +#include #include "common.h" #include "cxgb3_ioctl.h" @@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (t.qset_idx >= nqsets) return -EINVAL; + t.qset_idx = array_index_nospec(t.qset_idx, nqsets); q = &adapter->params.sge.qset[q1 + t.qset_idx]; t.rspq_size = q->rspq_size; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 92d9d795d874..44a0d04dd8a0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -815,8 +815,6 @@ static int setup_fw_sge_queues(struct adapter *adap) err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], adap->msi_idx, NULL, fwevtq_handler, NULL, -1); - if (err) - t4_free_sge_resources(adap); return err; } @@ -4679,7 +4677,6 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->needs_free_netdev = true; } static int config_mgmt_dev(struct pci_dev *pdev) @@ -5117,6 +5114,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_dev; + err = setup_fw_sge_queues(adapter); + if (err) { + dev_err(adapter->pdev_dev, + "FW sge queue allocation failed, err %d", err); + goto out_free_dev; + } + /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only @@ -5165,7 +5169,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cxgb4_ptp_init(adapter); print_adapter_info(adapter); - setup_fw_sge_queues(adapter); return 0; sriov: @@ -5221,6 +5224,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif out_free_dev: + t4_free_sge_resources(adapter); free_some_resources(adapter); if (adapter->flags & USING_MSIX) free_msix_info(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 71a315bc1409..99a9d5278369 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + adap->sge.uld_rxq_info[uld_type] = NULL; kfree(rxq_info->rspq_id); kfree(rxq_info->uldrxq); kfree(rxq_info); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index b65ce26ff72f..1802debbd3c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2632,7 +2632,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 @@ -2670,15 +2669,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; - /* We have two VPD data structures stored in the adapter VPD area. - * By default, Linux calculates the size of the VPD area by traversing - * the first VPD area at offset 0x0, so we need to tell the OS what - * our real VPD size is. - */ - ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); - if (ret < 0) - goto out; - /* Card information normally starts at VPD_BASE but early cards had * it at 0. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 05498e7f2840..6246003f9922 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; /* @@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) != @@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d24ee1ad3be1..a03a32a4ffca 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1897,6 +1897,8 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->rq_count; i++) { + /* enable rq before updating rq desc */ + vnic_rq_enable(&enic->rq[i]); vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[i]) == 0) { @@ -1908,8 +1910,6 @@ static int enic_open(struct net_device *netdev) for (i = 0; i < enic->wq_count; i++) vnic_wq_enable(&enic->wq[i]); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_enable(&enic->rq[i]); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); @@ -1935,8 +1935,12 @@ static int enic_open(struct net_device *netdev) return 0; err_out_free_rq: - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + } enic_dev_notify_unset(enic); err_out_free_intr: enic_unset_affinity_hint(enic); @@ -2699,11 +2703,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 64-bit first, and + * limitation for the device. Try 47-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -2717,10 +2721,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 64); + "for consistent allocations, aborting\n", 47); goto err_out_release_regions; } using_dac = 1; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..7e2b70c2bba3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3294,7 +3294,9 @@ void be_detect_error(struct be_adapter *adapter) if ((val & POST_STAGE_FAT_LOG_START) != POST_STAGE_FAT_LOG_START && (val & POST_STAGE_ARMFW_UE) - != POST_STAGE_ARMFW_UE) + != POST_STAGE_ARMFW_UE && + (val & POST_STAGE_RECOVERABLE_ERR) + != POST_STAGE_RECOVERABLE_ERR) return; } @@ -4634,6 +4636,15 @@ int be_update_queues(struct be_adapter *adapter) be_schedule_worker(adapter); + /* + * The IF was destroyed and re-created. We need to clear + * all promiscuous flags valid for the destroyed IF. + * Without this promisc mode is not restored during + * be_open() because the driver thinks that it is + * already enabled in HW. + */ + adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS; + if (netif_running(netdev)) status = be_open(netdev); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index e92859dab7ae..e191c4ebeaf4 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -257,8 +257,8 @@ enum rx_desc_status_bits { RXFSD = 0x00000800, /* first descriptor */ RXLSD = 0x00000400, /* last descriptor */ ErrorSummary = 0x80, /* error summary */ - RUNT = 0x40, /* runt packet received */ - LONG = 0x20, /* long packet received */ + RUNTPKT = 0x40, /* runt packet received */ + LONGPKT = 0x20, /* long packet received */ FAE = 0x10, /* frame align error */ CRC = 0x08, /* crc error */ RXER = 0x04, /* receive error */ @@ -1632,7 +1632,7 @@ static int netdev_rx(struct net_device *dev) dev->name, rx_status); dev->stats.rx_errors++; /* end of a packet. */ - if (rx_status & (LONG | RUNT)) + if (rx_status & (LONGPKT | RUNTPKT)) dev->stats.rx_length_errors++; if (rx_status & RXER) dev->stats.rx_frame_errors++; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 42258060f142..519a021c0a25 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1930,8 +1930,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, goto csum_failed; } + /* SGT[0] is used by the linear part */ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); - qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); + frag_len = skb_headlen(skb); + qm_sg_entry_set_len(&sgt[0], frag_len); sgt[0].bpid = FSL_DPAA_BPID_INV; sgt[0].offset = 0; addr = dma_map_single(dev, skb->data, @@ -1944,9 +1946,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, qm_sg_entry_set64(&sgt[0], addr); /* populate the rest of SGT entries */ - frag = &skb_shinfo(skb)->frags[0]; - frag_len = frag->size; - for (i = 1; i <= nr_frags; i++, frag++) { + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + frag_len = frag->size; WARN_ON(!skb_frag_page(frag)); addr = skb_frag_dma_map(dev, frag, 0, frag_len, dma_dir); @@ -1956,15 +1958,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, goto sg_map_failed; } - qm_sg_entry_set_len(&sgt[i], frag_len); - sgt[i].bpid = FSL_DPAA_BPID_INV; - sgt[i].offset = 0; + qm_sg_entry_set_len(&sgt[i + 1], frag_len); + sgt[i + 1].bpid = FSL_DPAA_BPID_INV; + sgt[i + 1].offset = 0; /* keep the offset in the address */ - qm_sg_entry_set64(&sgt[i], addr); - frag_len = frag->size; + qm_sg_entry_set64(&sgt[i + 1], addr); } - qm_sg_entry_set_f(&sgt[i - 1], frag_len); + + /* Set the final bit in the last used entry of the SGT */ + qm_sg_entry_set_f(&sgt[nr_frags], frag_len); qm_fd_set_sg(fd, priv->tx_headroom, skb->len); @@ -2022,7 +2025,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, } if (unlikely(err < 0)) { - percpu_stats->tx_errors++; percpu_stats->tx_fifo_errors++; return err; } @@ -2292,7 +2294,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, vaddr = phys_to_virt(addr); prefetch(vaddr + qm_fd_get_offset(fd)); - fd_format = qm_fd_get_format(fd); /* The only FD types that we may receive are contig and S/G */ WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); @@ -2325,8 +2326,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, skb_len = skb->len; - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { + percpu_stats->rx_dropped++; return qman_cb_dqrr_consume; + } percpu_stats->rx_packets++; percpu_stats->rx_bytes += skb_len; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index faea674094b9..85306d1b2acf 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, if (epause->rx_pause) newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; if (epause->tx_pause) - newadv |= ADVERTISED_Asym_Pause; + newadv ^= ADVERTISED_Asym_Pause; oldadv = phydev->advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3dc2d771a222..eb2ea231c7ca 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; @@ -3452,6 +3458,10 @@ fec_probe(struct platform_device *pdev) goto failed_regulator; } } else { + if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto failed_regulator; + } fep->reg_phy = NULL; } @@ -3533,8 +3543,9 @@ fec_probe(struct platform_device *pdev) failed_clk: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); -failed_phy: of_node_put(phy_node); +failed_phy: + dev_id--; failed_ioremap: free_netdev(ndev); @@ -3554,6 +3565,8 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index ea43b4974149..7af31ddd093f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) set_bucket(dtsec->regs, bucket, true); /* Create element to be added to the driver hash table */ - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); if (!hash_entry) return -ENOMEM; hash_entry->addr = addr; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 5be52d89b182..63daae120b2d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1378,9 +1378,11 @@ static int gfar_probe(struct platform_device *ofdev) gfar_init_addr_hash_table(priv); - /* Insert receive time stamps into padding alignment bytes */ + /* Insert receive time stamps into padding alignment bytes, and + * plus 2 bytes padding to ensure the cpu alignment. + */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - priv->padding = 8; + priv->padding = 8 + DEFAULT_PADDING; if (dev->features & NETIF_F_IP_CSUM || priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) @@ -1790,6 +1792,7 @@ static int init_phy(struct net_device *dev) GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; struct phy_device *phydev; + struct ethtool_eee edata; priv->oldlink = 0; priv->oldspeed = 0; @@ -1814,6 +1817,10 @@ static int init_phy(struct net_device *dev) /* Add support for flow control, but don't advertise it by default */ phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + /* disable EEE autoneg, EEE not supported by eTSEC */ + memset(&edata, 0, sizeof(struct ethtool_eee)); + phy_ethtool_set_eee(phydev, &edata); + return 0; } @@ -2925,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, struct sk_buff *skb, bool first) { - unsigned int size = lstatus & BD_LENGTH_MASK; + int size = lstatus & BD_LENGTH_MASK; struct page *page = rxb->page; bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); @@ -2940,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, if (last) size -= skb->len; - /* in case the last fragment consisted only of the FCS */ + /* Add the last fragment if it contains something other than + * the FCS, otherwise drop it and trim off any part of the FCS + * that was already received. + */ if (size > 0) skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rxb->page_offset + RXBUF_ALIGNMENT, size, GFAR_RXB_TRUESIZE); + else if (size < 0) + pskb_trim(skb, skb->len + size); } /* try reuse page */ @@ -3060,9 +3072,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); - /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, ndev); - /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. @@ -3133,13 +3142,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) continue; } + gfar_process_frame(ndev, skb); + /* Increment the number of packets */ total_pkts++; total_bytes += skb->len; skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(ndev, skb); + skb->protocol = eth_type_trans(skb, ndev); /* Send the packet up the stack */ napi_gro_receive(&rx_queue->grp->napi_rx, skb); diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 544114281ea7..9f8d4f8e57e3 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) now = tmr_cnt_read(etsects); now += delta; tmr_cnt_write(etsects, now); + set_fipers(etsects); spin_unlock_irqrestore(&etsects->lock, flags); - set_fipers(etsects); - return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 3e62692af011..fa5b30f547f6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -87,7 +87,7 @@ do { \ #define HNAE_AE_REGISTER 0x1 -#define RCB_RING_NAME_LEN 16 +#define RCB_RING_NAME_LEN (IFNAMSIZ + 4) #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 #define HNAE_LOW_LATENCY_COAL_PARAM 80 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 86944bc3b273..74bd260ca02a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data) static int hns_gmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_gmac_stats_string); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index b62816c1574e..93e71e27401b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb) int hns_ppe_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return ETH_PPE_STATIC_NUM; return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6f3570cfb501..e2e28532e4dc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) */ int hns_rcb_get_ring_sset_count(int stringset) { - if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) + if (stringset == ETH_SS_STATS) return HNS_RING_STATIC_REG_NUM; return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7ea7f8a4aa2a..2e14a3ae1d8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) cnt--; return cnt; - } else { + } else if (stringset == ETH_SS_STATS) { return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); + } else { + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c1cdbfd83bdb..c133491ad9fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1272,8 +1272,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) /* We need to alloc a vport for main NIC of PF */ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; - if (hdev->num_tqps < num_vport) - num_vport = hdev->num_tqps; + if (hdev->num_tqps < num_vport) { + dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", + hdev->num_tqps, num_vport); + return -EINVAL; + } /* Alloc the same number of TQPs for every vport */ tqp_per_vport = hdev->num_tqps / num_vport; @@ -2092,6 +2095,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (phydev) + return phydev->autoneg; hclge_query_autoneg_result(hdev); @@ -3981,7 +3988,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->roce.client = client; } - if (hdev->roce_client) { + if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) goto err; @@ -4007,13 +4014,19 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport = &hdev->vport[i]; - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) + if (client->ops->uninit_instance) { client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; + } } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 35369e1c8036..69726908e72c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -721,7 +721,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) HNS3_TXD_BDTYPE_M, 0); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, @@ -1060,6 +1060,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) u64 rx_bytes = 0; u64 tx_pkts = 0; u64 rx_pkts = 0; + u64 tx_drop = 0; + u64 rx_drop = 0; for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ @@ -1068,6 +1070,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; + tx_drop += ring->stats.tx_busy; + tx_drop += ring->stats.sw_err_cnt; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1076,6 +1080,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; + rx_drop += ring->stats.non_vld_descs; + rx_drop += ring->stats.err_pkt_len; + rx_drop += ring->stats.l2_err; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1091,8 +1098,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = netdev->stats.rx_missed_errors; stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = netdev->stats.rx_dropped; - stats->tx_dropped = netdev->stats.tx_dropped; + stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; + stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1306,6 +1313,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } + netdev->mtu = new_mtu; + /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) ret = -EINVAL; @@ -1546,7 +1555,7 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return 0; out_with_buf: - hns3_free_buffers(ring); + hns3_free_buffer(ring, cb); out: return ret; } @@ -1586,9 +1595,10 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) { - hns3_map_buffer(ring, &ring->desc_cb[i]); + hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) @@ -1596,6 +1606,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc_cb[i].reuse_flag = 0; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, @@ -2460,9 +2471,8 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) (void)irq_set_affinity_hint( priv->tqp_vector[i].vector_irq, NULL); - devm_free_irq(&pdev->dev, - priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); + free_irq(priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -2489,16 +2499,16 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, if (ring_type == HNAE3_RING_TYPE_TX) { ring_data[q->tqp_index].ring = ring; + ring_data[q->tqp_index].queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { ring_data[q->tqp_index + queue_num].ring = ring; + ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; ring->io_base = q->io_base; } hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); - ring_data[q->tqp_index].queue_index = q->tqp_index; - ring->tqp = q; ring->desc = NULL; ring->desc_cb = NULL; @@ -2688,8 +2698,12 @@ static int hns3_uninit_all_ring(struct hns3_nic_priv *priv) h->ae_algo->ops->reset_queue(h, i); hns3_fini_ring(priv->ring_data[i].ring); + devm_kfree(priv->dev, priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); } + devm_kfree(priv->dev, priv->ring_data); return 0; } @@ -2869,6 +2883,8 @@ static int __init hns3_init_module(void) client.ops = &client_ops; + INIT_LIST_HEAD(&client.node); + ret = hnae3_register_client(&client); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index d636399232fb..a64a5a413d4d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -22,7 +22,8 @@ struct hns3_stats { #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ - .stats_offset = offsetof(struct hns3_enet_ring, stats), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ + offsetof(struct ring_stats, _member), \ } \ static const struct hns3_stats hns3_txq_stats[] = { @@ -189,13 +190,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hns3_enet_ring *ring; u8 *stat; - u32 i; + int i, j; /* get stats for Tx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i].ring; - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -203,8 +204,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Rx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -375,6 +376,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev, break; } + if (!cmd->base.autoneg) + advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + /* now, map driver link modes to ethtool link modes */ hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(advertised_caps, cmd, true); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c66abd476023..98493be7b4af 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -927,11 +927,41 @@ static int ibmvnic_open(struct net_device *netdev) } rc = __ibmvnic_open(netdev); + netif_carrier_on(netdev); mutex_unlock(&adapter->reset_lock); return rc; } +static void clean_rx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_rx_pool *rx_pool; + u64 rx_entries; + int rx_scrqs; + int i, j; + + if (!adapter->rx_pool) + return; + + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_entries = adapter->req_rx_add_entries_per_subcrq; + + /* Free any remaining skbs in the rx buffer pools */ + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + if (!rx_pool) + continue; + + netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); + for (j = 0; j < rx_entries; j++) { + if (rx_pool->rx_buff[j].skb) { + dev_kfree_skb_any(rx_pool->rx_buff[j].skb); + rx_pool->rx_buff[j].skb = NULL; + } + } + } +} + static void clean_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@ -1009,7 +1039,7 @@ static int __ibmvnic_close(struct net_device *netdev) } } } - + clean_rx_pools(adapter); clean_tx_pools(adapter); adapter->state = VNIC_CLOSED; return rc; @@ -1459,8 +1489,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } - netif_carrier_on(netdev); - /* kick napi */ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); @@ -1468,6 +1496,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (adapter->reset_reason != VNIC_RESET_FAILOVER) netdev_notify_peers(netdev); + netif_carrier_on(netdev); + return 0; } @@ -1635,6 +1665,12 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; + dev_kfree_skb_any(rx_buff->skb); + remove_buff_from_pool(adapter, rx_buff); + continue; + } else if (!rx_buff->skb) { + /* free the entry */ + next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); continue; } @@ -1926,6 +1962,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, } memset(scrq->msgs, 0, 4 * PAGE_SIZE); + atomic_set(&scrq->used, 0); scrq->cur = 0; rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, @@ -2208,6 +2245,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + /* When booting a kdump kernel we can hit pending interrupts + * prior to completing driver initialization. + */ + if (unlikely(adapter->state != VNIC_OPEN)) + return IRQ_NONE; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { @@ -3899,6 +3942,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (rc) goto ibmvnic_init_fail; + netif_carrier_off(netdev); rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index d7bdea79e9fa..8fd2458060a0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -331,7 +331,8 @@ struct e1000_adapter { enum e1000_state_t { __E1000_TESTING, __E1000_RESETTING, - __E1000_DOWN + __E1000_DOWN, + __E1000_DISABLED }; #undef pr_fmt diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 1982f7917a8d..3dd4aeb2706d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter, static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; - struct e1000_adapter *adapter; + struct e1000_adapter *adapter = NULL; struct e1000_hw *hw; static int cards_found; @@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 tmp = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; int bars, need_ioport; + bool disable_dev = false; /* do not allocate ioport bars when not needed */ need_ioport = e1000_is_need_ioport(pdev); @@ -1259,11 +1260,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iounmap(hw->ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, bars); err_pci_reg: - pci_disable_device(pdev); + if (!adapter || disable_dev) + pci_disable_device(pdev); return err; } @@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + bool disable_dev; e1000_down_and_stop(adapter); e1000_release_manageability(adapter); @@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev) iounmap(hw->flash_address); pci_release_selected_regions(pdev, adapter->bars); + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); free_netdev(netdev); - pci_disable_device(pdev); + if (disable_dev) + pci_disable_device(pdev); } /** @@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) if (netif_running(netdev)) e1000_free_irq(adapter); - pci_disable_device(pdev); + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); return 0; } @@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev) pr_err("Cannot enable PCI device from suspend\n"); return err; } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); @@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) e1000_down(adapter); - pci_disable_device(pdev); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) pr_err("Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 0641c0098738..afb7ebe20b24 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -398,6 +398,7 @@ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ /* If this bit asserted, the driver should claim the interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d6d4ed7acf03..ff308b05d68c 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,6 +1367,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { @@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -1599,7 +1602,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * we have already determined whether we have link or not. */ if (!mac->autoneg) - return -E1000_ERR_CONFIG; + return 1; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to @@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 67163ca898ba..00a36df02a3f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -113,7 +113,8 @@ #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000 +#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b322011ec282..db735644b312 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -447,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * we have already determined whether we have link or not. */ if (!mac->autoneg) - return -E1000_ERR_CONFIG; + return 1; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 327dfe5bedc0..6265ce8915b6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 icr; + bool enable = true; + + icr = er32(ICR); + if (icr & E1000_ICR_RXO) { + ew32(ICR, E1000_ICR_RXO); + enable = false; + /* napi poll will re-enable Other, make sure it runs */ + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + } + if (icr & E1000_ICR_LSC) { + ew32(ICR, E1000_ICR_LSC); + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } - hw->mac.get_link_status = true; - - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) { - mod_timer(&adapter->watchdog_timer, jiffies + 1); + if (enable && !test_bit(__E1000_DOWN, &adapter->state)) ew32(IMS, E1000_IMS_OTHER); - } return IRQ_HANDLED; } @@ -2313,8 +2329,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; - ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, - GFP_KERNEL); + ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; @@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight) napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val); + ew32(IMS, adapter->rx_ring->ims_val | + E1000_IMS_OTHER); else e1000_irq_enable(adapter); } @@ -3004,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) hw->mac.ops.config_collision_dist(hw); - /* SPT and CNP Si errata workaround to avoid data corruption */ - if (hw->mac.type >= e1000_pch_spt) { + /* SPT and KBL Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); @@ -3013,7 +3030,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(IOSFPC, reg_val); reg_val = er32(TARC(0)); - reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + /* SPT and KBL Si errata workaround to avoid Tx hang. + * Dropping the number of outstanding requests from + * 3 to 2 in order to avoid a buffer overrun. + */ + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; ew32(TARC(0), reg_val); } } @@ -3536,15 +3558,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) } break; case e1000_pch_spt: - if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { - /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHZ; - incvalue = INCVALUE_24MHZ; - shift = INCVALUE_SHIFT_24MHZ; - adapter->cc.shift = shift; - break; - } - return -EINVAL; + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + break; case e1000_pch_cnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ @@ -4204,7 +4223,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) - ew32(ICS, E1000_ICS_OTHER); + ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER); else ew32(ICS, E1000_ICS_LSC); } @@ -5081,7 +5100,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; + link_active = ret_val > 0; } else { link_active = true; } @@ -5099,7 +5118,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; } - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index d78d47b41a71..86ff0969efb6 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, s32 ret_val = 0; u16 i, phy_status; + *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & BMSR_LSTATUS) + if (phy_status & BMSR_LSTATUS) { + *success = true; break; + } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } - *success = (i < iterations); - return ret_val; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 689c413b7782..d2f9a2dd76a2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -526,8 +526,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, u8 qos, __be16 vlan_proto); -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate, - int unused); +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused min_rate, int max_rate); int fm10k_ndo_get_vf_config(struct net_device *netdev, int vf_idx, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5f4dac0d36ef..e72fd52bacfe 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -126,6 +126,9 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) struct fm10k_mbx_info *mbx = &vf_info->mbx; u16 glort = vf_info->glort; + /* process the SM mailbox first to drain outgoing messages */ + hw->mbx.ops.process(hw, &hw->mbx); + /* verify port mapping is valid, if not reset port */ if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) hw->iov.ops.reset_lport(hw, vf_info); @@ -482,7 +485,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, } int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, - int __always_unused unused, int rate) + int __always_unused min_rate, int max_rate) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; @@ -493,14 +496,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, return -EINVAL; /* rate limit cannot be less than 10Mbs or greater than link speed */ - if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) + if (max_rate && + (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX)) return -EINVAL; /* store values */ - iov_data->vf_info[vf_idx].rate = rate; + iov_data->vf_info[vf_idx].rate = max_rate; /* update hardware configuration */ - hw->iov.ops.configure_tc(hw, vf_idx, rate); + hw->iov.ops.configure_tc(hw, vf_idx, max_rate); return 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 9dffaba85ae6..103c0a742d03 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1229,7 +1229,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index e69d49d91d67..914258310ddd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -815,8 +815,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (vid >= VLAN_N_VID) return -EINVAL; - /* Verify we have permission to add VLANs */ - if (hw->mac.vlan_override) + /* Verify that we have permission to add VLANs. If this is a request + * to remove a VLAN, we still want to allow the user to remove the + * VLAN device. In that case, we need to clear the bit in the + * active_vlans bitmask. + */ + if (set && hw->mac.vlan_override) return -EACCES; /* update active_vlans bitmask */ @@ -835,6 +839,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) rx_ring->vid &= ~FM10K_VLAN_CLEAR; } + /* If our VLAN has been overridden, there is no reason to send VLAN + * removal requests as they will be silently ignored. + */ + if (hw->mac.vlan_override) + return 0; + /* Do not remove default VLAN ID related entries from VLAN and MAC * tables */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..ef22793d6a03 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2588,16 +2588,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, no_input_set: if (input_set & I40E_L3_SRC_MASK) - fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF); + fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF); if (input_set & I40E_L3_DST_MASK) - fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF); + fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF); if (input_set & I40E_L4_SRC_MASK) - fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF); + fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF); if (input_set & I40E_L4_DST_MASK) - fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF); + fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF); if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) fsp->ring_cookie = RX_CLS_FLOW_DISC; @@ -3648,6 +3648,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, i40e_write_fd_input_set(pf, index, new_mask); + /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented + * frames. If we're programming the input set for IPv4/Other, we also + * need to program the IPv4/Fragmented input set. Since we don't have + * separate support, we'll always assume and enforce that the two flow + * types must have matching input sets. + */ + if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + new_mask); + /* Add the new offset and update table, if necessary */ if (new_flex_offset) { err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..04dbf64fb1cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1553,11 +1553,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p) else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + /* Copy the address first, so that we avoid a possible race with + * .set_rx_mode(). If we copy after changing the address in the filter + * list, we might open ourselves to a narrow race window where + * .set_rx_mode could delete our dev_addr filter and prevent traffic + * from passing. + */ + ether_addr_copy(netdev->dev_addr, addr->sa_data); + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); - ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; @@ -1739,6 +1746,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + i40e_del_mac_filter(vsi, addr); return 0; @@ -2874,14 +2889,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; + int cpu; if (!ring->q_vector || !ring->netdev) return; if ((vsi->tc_config.numtc <= 1) && !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { - netif_set_xps_queue(ring->netdev, - get_cpu_mask(ring->q_vector->v_idx), + cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); + netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); } @@ -3471,6 +3487,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) int tx_int_idx = 0; int vector, err; int irq_num; + int cpu; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; @@ -3506,10 +3523,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; @@ -3760,7 +3781,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & @@ -5807,6 +5828,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) /* Reprogram the default input set for Other/IPv4 */ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, I40E_L3_SRC_MASK | I40E_L3_DST_MASK); + + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } /** @@ -7172,6 +7196,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } i40e_get_oem_version(&pf->hw); + if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && + ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || + hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { + /* The following delay is necessary for 4.33 firmware and older + * to recover after EMP reset. 200 ms should suffice but we + * put here 300 ms to be sure that FW is ready to operate + * after reset. + */ + mdelay(300); + } + /* re-verify the eeprom if we just had an EMP reset */ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..ef242dbae116 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -337,6 +337,8 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { + struct sk_buff *skb; + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; @@ -349,9 +351,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { - dev_kfree_skb_any(pf->ptp_tx_skb); + skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + + /* Free the skb after we clear the bitlock */ + dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 120c68f78951..542c00b1c823 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ @@ -3048,10 +3048,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -3059,7 +3079,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4d1e670f490e..4a85a24ced1c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1008,8 +1008,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); /* Do not notify the client during VF init */ - if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, - &vf->vf_states)) + if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, + &vf->vf_states)) i40e_notify_client_of_vf_reset(pf, abs_vf_id); vf->num_vlan = 0; } @@ -2779,7 +2779,9 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct i40e_mac_filter *f; struct i40e_vf *vf; int ret = 0; + struct hlist_node *h; int bkt; + u8 i; /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { @@ -2791,6 +2793,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) vf = &(pf->vf[vf_id]); vsi = pf->vsi[vf->lan_vsi_idx]; + + /* When the VF is resetting wait until it is done. + * It can take up to 200 milliseconds, + * but wait for up to 300 milliseconds to be safe. + */ + for (i = 0; i < 15; i++) { + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) + break; + msleep(20); + } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", vf_id); @@ -2817,7 +2829,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Delete all the filters for this VSI - we're going to kill it * anyway. */ - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index c32c62462c84..7368b0dc3af8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ @@ -2014,10 +2014,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -2025,7 +2045,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 82f69031e5cd..2ef32ab1dfae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -186,6 +186,7 @@ enum i40evf_state_t { enum i40evf_critical_section_t { __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ __I40EVF_IN_CLIENT_TASK, + __I40EVF_IN_REMOVE_TASK, /* device being removed */ }; /* board specific private data structure */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1825d956bb00..1b5d204c57c1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -546,6 +546,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; + int cpu; i40evf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -584,10 +585,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector->affinity_notify.release = i40evf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; @@ -1772,7 +1775,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - if (netif_running(adapter->netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (adapter->state == __I40EVF_RUNNING) { set_bit(__I40E_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); @@ -1830,6 +1837,13 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; + bool running; + + /* When device is being removed it doesn't make sense to run the reset + * task, just return in such a case. + */ + if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section)) + return; while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) @@ -1889,7 +1903,13 @@ static void i40evf_reset_task(struct work_struct *work) } continue_reset: - if (netif_running(netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->state == __I40EVF_RUNNING); + + if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -1933,7 +1953,10 @@ static void i40evf_reset_task(struct work_struct *work) mod_timer(&adapter->watchdog_timer, jiffies + 2); - if (netif_running(adapter->netdev)) { + /* We were running when the reset started, so we need to restore some + * state here. + */ + if (running) { /* allocate transmit descriptors */ err = i40evf_setup_all_tx_resources(adapter); if (err) @@ -3005,7 +3028,8 @@ static void i40evf_remove(struct pci_dev *pdev) struct i40evf_mac_filter *f, *ftmp; struct i40e_hw *hw = &adapter->hw; int err; - + /* Indicate we are in remove and not to run reset_task */ + set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 85876f4fb1fb..46bf11afba08 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -937,23 +937,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; + bool link_up = vpe->event_data.link_event.link_status; switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: adapter->link_speed = vpe->event_data.link_event.link_speed; - if (adapter->link_up != - vpe->event_data.link_event.link_status) { - adapter->link_up = - vpe->event_data.link_event.link_status; - if (adapter->link_up) { - netif_tx_start_all_queues(netdev); - netif_carrier_on(netdev); - } else { - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - } - i40evf_print_link_message(adapter); + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) + break; + + /* If we get link up message and start queues before + * our queues are configured it will trigger a TX hang. + * In that case, just ignore the link status message, + * we'll get another one after we enable queues and + * actually prepared to send traffic. + */ + if (link_up && adapter->state != __I40EVF_RUNNING) + break; + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); } + i40evf_print_link_message(adapter); break; case VIRTCHNL_EVENT_RESET_IMPENDING: dev_info(&adapter->pdev->dev, "PF reset warning received\n"); diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 1de82f247312..62bc7d85a138 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -353,7 +353,18 @@ #define E1000_RXPBS_CFG_TS_EN 0x80000000 #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_8KB (8 << 0) +#define I210_TXPBSIZE_PB1_8KB (8 << 6) +#define I210_TXPBSIZE_PB2_4KB (4 << 12) +#define I210_TXPBSIZE_PB3_4KB (4 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 @@ -479,6 +490,8 @@ * manageability enabled, allowing us room for 15 multicast addresses. */ #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 #define E1000_RAL_MAC_ADDR_LEN 4 #define E1000_RAH_MAC_ADDR_LEN 2 #define E1000_RAH_POOL_MASK 0x03FC0000 @@ -1051,4 +1064,32 @@ #define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define E1000_VLAPQF_QUEUE_MASK 0x03 +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) +#define E1000_TQAVCTRL_DATATRANTIM BIT(9) +#define E1000_TQAVCTRL_SP_WAIT_SR BIT(10) +/* Fetch Time Delta - bits 31:16 + * + * This field holds the value to be reduced from the launch time for + * fetch time decision. The FetchTimeDelta value is defined in 32 ns + * granularity. + * + * This field is 16 bits wide, and so the maximum value is: + * + * 65535 * 32 = 2097120 ~= 2.1 msec + * + * XXX: We are configuring the max value here since we couldn't come up + * with a reason for not doing so. + */ +#define E1000_TQAVCTRL_FETCHTIME_DELTA (0xFFFF << 16) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..8eee081d395f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -421,6 +421,14 @@ do { \ #define E1000_I210_FLA 0x1201C +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 06ffb2bc713e..6b23a579bd28 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -281,6 +281,12 @@ struct igb_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool launchtime_enable; /* true if LaunchTime is enabled */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ /* everything past this point are written often */ u16 next_to_clean; @@ -435,6 +441,8 @@ struct hwmon_buff { enum igb_filter_match_flags { IGB_FILTER_FLAG_ETHER_TYPE = 0x1, IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, }; #define IGB_MAX_RXNFC_FILTERS 16 @@ -449,6 +457,8 @@ struct igb_nfc_input { u8 match_flags; __be16 etype; __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; }; struct igb_nfc_filter { @@ -467,6 +477,8 @@ struct igb_mac_addr { #define IGB_MAC_STATE_DEFAULT 0x1 #define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 /* board specific private data structure */ struct igb_adapter { @@ -621,6 +633,7 @@ struct igb_adapter { #define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_VLAN_PROMISC BIT(15) #define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 @@ -730,4 +743,9 @@ int igb_add_filter(struct igb_adapter *adapter, int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index d06a8db514d4..b9d089f19f45 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2494,6 +2494,23 @@ static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, fsp->h_ext.vlan_tci = rule->filter.vlan_tci; fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + return 0; } return -EINVAL; @@ -2767,14 +2784,41 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) { + struct e1000_hw *hw = &adapter->hw; int err = -EINVAL; + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { err = igb_rxnfc_write_etype_filter(adapter, input); if (err) return err; } + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) err = igb_rxnfc_write_vlan_prio_filter(adapter, input); @@ -2823,6 +2867,15 @@ int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) igb_clear_vlan_prio_filter(adapter, ntohs(input->filter.vlan_tci)); + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + return 0; } @@ -2864,7 +2917,7 @@ static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, /* add filter to the list */ if (parent) - hlist_add_behind(&parent->nfc_node, &input->nfc_node); + hlist_add_behind(&input->nfc_node, &parent->nfc_node); else hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); @@ -2904,10 +2957,6 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) return -EINVAL; - if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK && - fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) - return -EINVAL; - input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) return -ENOMEM; @@ -2917,6 +2966,20 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; } + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { err = -EINVAL; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea69af267d63..26416201d695 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -62,6 +63,17 @@ #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = @@ -1271,6 +1283,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + u64_stats_init(&ring->tx_syncp); u64_stats_init(&ring->tx_syncp2); @@ -1598,6 +1616,394 @@ static void igb_get_hw_control(struct igb_adapter *adapter) ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->cbs_enable) + return true; + } + + return false; +} + +static bool is_any_txtime_enabled(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]->launchtime_enable) + return true; + } + + return false; +} + +/** + * igb_config_tx_modes - Configure "Qav Tx mode" features on igb + * @adapter: pointer to adapter struct + * @queue: queue number + * + * Configure CBS and Launchtime for a given hardware queue. + * Parameters are retrieved from the correct Tx ring, so + * igb_save_cbs_params() and igb_save_txtime_params() should be used + * for setting those correctly prior to this function being called. + **/ +static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) +{ + struct igb_ring *ring = adapter->tx_ring[queue]; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tqavcc, tqavctrl; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + /* If any of the Qav features is enabled, configure queues as SR and + * with HIGH PRIO. If none is, then configure them with LOW PRIO and + * as SP. + */ + if (ring->cbs_enable || ring->launchtime_enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + } + + /* If CBS is enabled, set DataTranARB and config its parameters. */ + if (ring->cbs_enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !ring->cbs_enable) { + /* max "linkspeed" idleslope in kbps */ + ring->idleslope = 1000000; + ring->hicredit = ETH_FRAME_LEN; + } + + /* Always set data transfer arbitration to credit-based + * shaper algorithm on TQAVCTRL if CBS is enabled for any of + * the queues. + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + * + * NOTE: For i210, given the above, we can see that idleslope + * is represented in 16.38431 kbps units by the value at + * the TQAVCC register (1Gbps / 61034), which reduces + * the granularity for idleslope increments. + * For instance, if you want to configure a 2576kbps + * idleslope, the value to be written on the register + * would have to be 157.23. If rounded down, you end + * up with less bandwidth available than originally + * required (~2572 kbps). If rounded up, you end up + * with a higher bandwidth (~2589 kbps). Below the + * approach we take is to always round up the + * calculated value, so the resulting bandwidth might + * be slightly higher for some configurations. + */ + value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), + 0x80000000 + ring->hicredit * 0x7735); + } else { + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + + /* If CBS is not enabled for any queues anymore, then return to + * the default state of Data Transmission Arbitration on + * TQAVCTRL. + */ + if (!is_any_cbs_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* If LaunchTime is enabled, set DataTranTIM. */ + if (ring->launchtime_enable) { + /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled + * for any of the SR queues, and configure fetchtime delta. + * XXX NOTE: + * - LaunchTime will be enabled for all SR queues. + * - A fixed offset can be added relative to the launch + * time of all packets if configured at reg LAUNCH_OS0. + * We are keeping it as 0 for now (default value). + */ + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl |= E1000_TQAVCTRL_DATATRANTIM | + E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } else { + /* If Launchtime is not enabled for any SR queues anymore, + * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta, + * effectively disabling Launchtime. + */ + if (!is_any_txtime_enabled(adapter)) { + tqavctrl = rd32(E1000_I210_TQAVCTRL); + tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM; + tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA; + wr32(E1000_I210_TQAVCTRL, tqavctrl); + } + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ + idleslope %d sendslope %d hiCredit %d \ + locredit %d\n", + (ring->cbs_enable) ? "enabled" : "disabled", + (ring->launchtime_enable) ? "enabled" : "disabled", queue, + ring->idleslope, ring->sendslope, ring->hicredit, + ring->locredit); +} + +static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, + bool enable) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + ring->launchtime_enable = enable; + + return 0; +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +/** + * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable + * @adapter: pointer to adapter struct + * + * Configure TQAVCTRL register switching the controller's Tx mode + * if FQTSS mode is enabled or disabled. Additionally, will issue + * a call to igb_config_tx_modes() per queue so any previously saved + * Tx parameters are applied. + **/ +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin', set SP_WAIT_SR + * so SP queues wait for SR ones. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | + I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_32KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + igb_config_tx_modes(adapter, i); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + /** * igb_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -1609,6 +2015,7 @@ static void igb_configure(struct igb_adapter *adapter) igb_get_hw_control(adapter); igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); igb_restore_vlan(adapter); @@ -2150,6 +2557,82 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static void igb_offload_apply(struct igb_adapter *adapter, s32 queue) +{ + if (!is_fqtss_enabled(adapter)) { + enable_fqtss(adapter, true); + return; + } + + igb_config_tx_modes(adapter, queue); + + if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter)) + enable_fqtss(adapter, false); +} + +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +static int igb_offload_txtime(struct igb_adapter *adapter, + struct tc_etf_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* Launchtime offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* Launchtime offloading is only supported by queues 0 and 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable); + if (err) + return err; + + igb_offload_apply(adapter, qopt->queue); + + return 0; +} + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_CBS: + return igb_offload_cbs(adapter, type_data); + case TC_SETUP_QDISC_ETF: + return igb_offload_txtime(adapter, type_data); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2175,6 +2658,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, }; /** @@ -2443,6 +2927,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CRC; + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ @@ -3162,6 +3649,8 @@ static int igb_sw_init(struct igb_adapter *adapter) /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), GFP_ATOMIC); + if (!adapter->shadow_vfta) + return -ENOMEM; /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter, true)) { @@ -3329,7 +3818,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) int igb_close(struct net_device *netdev) { - if (netif_device_present(netdev)) + if (netif_device_present(netdev) || netdev->dismantle) return __igb_close(netdev, false); return 0; } @@ -4948,11 +5437,14 @@ static void igb_set_itr(struct igb_q_vector *q_vector) } } -static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; + struct timespec64 ts; context_desc = IGB_TX_CTXTDESC(tx_ring, i); @@ -4967,9 +5459,18 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, mss_l4len_idx |= tx_ring->reg_idx << 4; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + /* We assume there is always a valid tx time available. Invalid times + * should have been handled by the upper layers. + */ + if (tx_ring->launchtime_enable) { + ts = ns_to_timespec64(first->skb->tstamp); + context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); + } else { + context_desc->seqnum_seed = 0; + } } static int igb_tso(struct igb_ring *tx_ring, @@ -5052,7 +5553,8 @@ static int igb_tso(struct igb_ring *tx_ring, vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, + type_tucmd, mss_l4len_idx); return 1; } @@ -5107,7 +5609,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); } #define IGB_SET_FLAG(_input, _flag, _result) \ @@ -5395,8 +5897,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, } } - skb_tx_timestamp(skb); - if (skb_vlan_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); @@ -5412,6 +5912,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, else if (!tso) igb_tx_csum(tx_ring, first); + skb_tx_timestamp(skb); + if (igb_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_tstamp; @@ -6468,8 +6970,35 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter) igb_rar_set_index(adapter, 0); } -static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, - const u8 queue) +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) { struct e1000_hw *hw = &adapter->hw; int rar_entries = hw->mac.rar_entry_count - @@ -6484,12 +7013,13 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, * addresses. */ for (i = 0; i < rar_entries; i++) { - if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) continue; ether_addr_copy(adapter->mac_table[i].addr, addr); adapter->mac_table[i].queue = queue; - adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; igb_rar_set_index(adapter, i); return i; @@ -6498,8 +7028,21 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, return -ENOSPC; } -static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, const u8 queue) +{ + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) { struct e1000_hw *hw = &adapter->hw; int rar_entries = hw->mac.rar_entry_count - @@ -6516,14 +7059,26 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, for (i = 0; i < rar_entries; i++) { if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; if (adapter->mac_table[i].queue != queue) continue; if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) continue; - adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].queue = 0; + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } igb_rar_set_index(adapter, i); return 0; @@ -6532,6 +7087,34 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, return -ENOENT; } +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -6970,7 +7553,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) @@ -8371,14 +8954,27 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) /* Indicate to hardware the Address is Valid. */ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) { - rar_high |= E1000_RAH_AV; + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; + + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; - if (hw->mac.type == e1000_82575) rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; - else + break; + default: rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; + break; + } } wr32(E1000_RAL(index), rar_low); @@ -8409,17 +9005,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter, static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct igb_adapter *adapter = netdev_priv(netdev); - if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC + * flag and allows to overwrite the MAC via VF netdev. This + * is necessary to allow libvirt a way to restore the original + * MAC after unbinding vfio-pci and reloading igbvf after shutting + * down a VM. + */ + if (is_zero_ether_addr(mac)) { + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, + "remove administratively set MAC on VF %d\n", + vf); + } else if (is_valid_ether_addr(mac)) { + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", + mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + /* Generate additional warning if PF is down */ + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { return -EINVAL; - adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); - dev_info(&adapter->pdev->dev, - "Reload the VF driver to make this change effective."); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF MAC address has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); } return igb_set_vf_mac(adapter, vf, mac); } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 841c2a083349..0746b19ec6d3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); return; } @@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) */ void igb_ptp_tx_hang(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IGB_PTP_TX_TIMEOUT); @@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; + /* Clear the tx valid bit in TSYNCTXCTL register to enable + * interrupt + */ + rd32(E1000_TXSTMPH); dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); } } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1ed556911b14..6f5888bd9194 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 6e6ab6f6875e..64429a14c630 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3781,10 +3781,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); fw_cmd.pad = 0; fw_cmd.pad2 = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, &fw_cmd, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d5f31e94358..b68d94b49a8a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -1877,6 +1877,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE, IXGBE_RX_DMA_ATTR); + } else if (ring_uses_build_skb(rx_ring)) { + unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; @@ -2203,9 +2211,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED 1 -#define IXGBE_XDP_TX 2 +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, struct xdp_buff *xdp); @@ -2234,7 +2243,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) - result = IXGBE_XDP_TX; + result = IXGBE_XDP_REDIR; else result = IXGBE_XDP_CONSUMED; break; @@ -2294,7 +2303,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); - bool xdp_xmit = false; + unsigned int xdp_xmit = 0; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; @@ -2334,8 +2343,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (IS_ERR(skb)) { - if (PTR_ERR(skb) == -IXGBE_XDP_TX) { - xdp_xmit = true; + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); } else { rx_buffer->pagecnt_bias++; @@ -2407,7 +2418,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_packets++; } - if (xdp_xmit) { + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; /* Force memory writes to complete before letting h/w @@ -2415,8 +2429,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); - - xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); @@ -3979,11 +3991,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; #if (PAGE_SIZE < 8192) - } else { + /* RXDCTL.RLPML does not work on 82599 */ + } else if (hw->mac.type != ixgbe_mac_82599EB) { rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | IXGBE_RXDCTL_RLPML_EN); - /* Limit the maximum frame size so we don't overrun the skb */ + /* Limit the maximum frame size so we don't overrun the skb. + * This can happen in SRIOV mode when the MTU of the VF is + * higher than the MTU of the PF. + */ if (ring_uses_build_skb(ring) && !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | @@ -7646,7 +7662,8 @@ static void ixgbe_service_task(struct work_struct *work) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); - ixgbe_ptp_rx_hang(adapter); + if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) + ixgbe_ptp_rx_hang(adapter); ixgbe_ptp_tx_hang(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19fbb2f28ea4..cf6a245db6d5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, /* convert offset from words to bytes */ buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); + buffer.pad2 = 0; + buffer.pad3 = 0; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); @@ -3411,6 +3413,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = false; } + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + /* Reset PHY */ if (!hw->phy.reset_disable && hw->phy.ops.reset) hw->phy.ops.reset(hw); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..90be4385bf36 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) @@ -3737,6 +3737,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) return -EPERM; ether_addr_copy(hw->mac.addr, addr->sa_data); + ether_addr_copy(hw->mac.perm_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index c9798210fa0f..0495487f7b42 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -344,7 +344,8 @@ static int orion_mdio_probe(struct platform_device *pdev) dev->regs + MVMDIO_ERR_INT_MASK); } else if (dev->err_interrupt == -EPROBE_DEFER) { - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out_mdio; } if (pdev->dev.of_node) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 64a04975bcf8..3deaa3413313 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, { u32 val; - /* Only 255 descriptors can be added at once ; Assume caller - * process TX desriptors in quanta less than 256 - */ - val = pend_desc + txq->pending; - mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc += txq->pending; + + /* Only 255 Tx descriptors can be added at once */ + do { + val = min(pend_desc, 255); + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc -= val; + } while (pend_desc > 0); txq->pending = 0; } @@ -1109,6 +1112,7 @@ static void mvneta_port_up(struct mvneta_port *pp) } mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + q_map = 0; /* Enable all initialized RXQs. */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; @@ -1211,6 +1215,10 @@ static void mvneta_port_disable(struct mvneta_port *pp) val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); + pp->link = 0; + pp->duplex = -1; + pp->speed = 0; + udelay(200); } @@ -1951,13 +1959,13 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); index = rx_desc - rxq->descs; data = rxq->buf_virt_addr[index]; - phys_addr = rx_desc->buf_phys_addr; + phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { + mvneta_rx_error(pp, rx_desc); err_drop_frame: dev->stats.rx_errors++; - mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ continue; } @@ -3008,7 +3016,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { int queue; - for (queue = 0; queue < txq_number; queue++) + for (queue = 0; queue < rxq_number; queue++) mvneta_rxq_deinit(pp, &pp->rxqs[queue]); } diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index fcf9ba5eb8d1..529be74f609d 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4552,11 +4552,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val |= MVPP2_GMAC_DISABLE_PADDING; - val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | @@ -4564,10 +4559,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; val &= ~MVPP22_CTRL4_DP_CLK_SEL; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val &= ~MVPP2_GMAC_DISABLE_PADDING; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); } /* The port is connected to a copper PHY */ @@ -5408,7 +5399,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, u32 txq_dma; /* Allocate memory for TX descriptors */ - aggr_txq->descs = dma_alloc_coherent(&pdev->dev, + aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) @@ -5606,7 +5597,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, sizeof(*txq_pcpu->buffs), GFP_KERNEL); if (!txq_pcpu->buffs) - goto cleanup; + return -ENOMEM; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -5619,26 +5610,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port, &txq_pcpu->tso_headers_dma, GFP_KERNEL); if (!txq_pcpu->tso_headers) - goto cleanup; + return -ENOMEM; } return 0; -cleanup: - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->buffs); - - dma_free_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - txq_pcpu->tso_headers, - txq_pcpu->tso_headers_dma); - } - - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_dma); - - return -ENOMEM; } /* Free allocated TXQ resources */ @@ -6913,6 +6888,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) int id = port->id; bool allmulti = dev->flags & IFF_ALLMULTI; +retry: mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); @@ -6920,9 +6896,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) /* Remove all port->id's mcast enries */ mvpp2_prs_mcast_del_all(priv, id); - if (allmulti && !netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(ha, dev) - mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); + if (!allmulti) { + netdev_for_each_mc_addr(ha, dev) { + if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { + allmulti = true; + goto retry; + } + } } } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 1145cde2274a..b12e3a4f9439 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 150; + pdev->d3_delay = 200; return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 5e81a7263654..3fd71cf5cd60 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1959,11 +1959,12 @@ static int mtk_hw_init(struct mtk_eth *eth) /* set GE2 TUNE */ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); - /* GE1, Force 1000M/FD, FC ON */ - mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); - - /* GE2, Force 1000M/FD, FC ON */ - mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); + /* Set linkdown as the default for each GMAC. Its own MCR would be set + * up with the more appropriate value when mtk_phy_link_adjust call is + * being invoked. + */ + for (i = 0; i < MTK_MAC_COUNT; i++) + mtk_w32(eth, 0, MTK_MAC_MCR(i)); /* Indicates CDM to parse the MTK special tag from CPU * which also is working out for untag packets. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5f41dc92aa68..752a72499b4f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) { struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; if (priv->cee_config.pfc_state) { int tc; + rx_ppp = prof->rx_ppp; + tx_ppp = prof->tx_ppp; - priv->prof->rx_pause = 0; - priv->prof->tx_pause = 0; for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { u8 tc_mask = 1 << tc; switch (priv->cee_config.dcb_pfc[tc]) { case pfc_disabled: - priv->prof->tx_ppp &= ~tc_mask; - priv->prof->rx_ppp &= ~tc_mask; + tx_ppp &= ~tc_mask; + rx_ppp &= ~tc_mask; break; case pfc_enabled_full: - priv->prof->tx_ppp |= tc_mask; - priv->prof->rx_ppp |= tc_mask; + tx_ppp |= tc_mask; + rx_ppp |= tc_mask; break; case pfc_enabled_tx: - priv->prof->tx_ppp |= tc_mask; - priv->prof->rx_ppp &= ~tc_mask; + tx_ppp |= tc_mask; + rx_ppp &= ~tc_mask; break; case pfc_enabled_rx: - priv->prof->tx_ppp &= ~tc_mask; - priv->prof->rx_ppp |= tc_mask; + tx_ppp &= ~tc_mask; + rx_ppp |= tc_mask; break; default: break; } } - en_dbg(DRV, priv, "Set pfc on\n"); + rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause; + tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause; } else { - priv->prof->rx_pause = 1; - priv->prof->tx_pause = 1; - en_dbg(DRV, priv, "Set pfc off\n"); + rx_ppp = 0; + tx_ppp = 0; + rx_pause = prof->rx_pause; + tx_pause = prof->tx_pause; } if (mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp)) { + tx_pause, tx_ppp, rx_pause, rx_ppp)) { en_err(priv, "Failed setting pause params\n"); return 1; } + prof->tx_ppp = tx_ppp; + prof->rx_ppp = rx_ppp; + prof->tx_pause = tx_pause; + prof->rx_pause = rx_pause; + return 0; } @@ -310,6 +316,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) } switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: case IEEE_8021QAZ_TSA_STRICT: break; case IEEE_8021QAZ_TSA_ETS: @@ -347,6 +354,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, /* higher TC means higher priority => lower pg */ for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + pg[i] = MLX4_EN_TC_VENDOR; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; case IEEE_8021QAZ_TSA_STRICT: pg[i] = num_strict++; tc_tx_bw[i] = MLX4_EN_BW_MAX; @@ -403,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_dev *mdev = priv->mdev; + u32 tx_pause, tx_ppp, rx_pause, rx_ppp; int err; en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", @@ -411,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, pfc->mbc, pfc->delay); - prof->rx_pause = !pfc->pfc_en; - prof->tx_pause = !pfc->pfc_en; - prof->rx_ppp = pfc->pfc_en; - prof->tx_ppp = pfc->pfc_en; + rx_pause = prof->rx_pause && !pfc->pfc_en; + tx_pause = prof->tx_pause && !pfc->pfc_en; + rx_ppp = pfc->pfc_en; + tx_ppp = pfc->pfc_en; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - prof->tx_pause, - prof->tx_ppp, - prof->rx_pause, - prof->rx_ppp); - if (err) + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - prof->rx_ppp, prof->rx_pause, - prof->tx_ppp, prof->tx_pause); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + prof->tx_ppp = tx_ppp; + prof->rx_ppp = rx_ppp; + prof->rx_pause = rx_pause; + prof->tx_pause = tx_pause; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3d4e4a5d00d1..5fe56dc4cfae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1013,6 +1013,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev, if (!coal->tx_max_coalesced_frames_irq) return -EINVAL; + if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) { + netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n", + __func__, MLX4_EN_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS || + coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) { + netdev_info(dev, "%s: maximum coalesced frames supported is %d\n", + __func__, MLX4_EN_MAX_COAL_PKTS); + return -ERANGE; + } + priv->rx_frames = (coal->rx_max_coalesced_frames == MLX4_EN_AUTO_CONF) ? MLX4_EN_RX_COAL_TARGET : @@ -1046,27 +1062,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; int err; if (pause->autoneg) return -EINVAL; - priv->prof->tx_pause = pause->tx_pause != 0; - priv->prof->rx_pause = pause->rx_pause != 0; + tx_pause = !!(pause->tx_pause); + rx_pause = !!(pause->rx_pause); + rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); + tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); + err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, - priv->prof->tx_pause, - priv->prof->tx_ppp, - priv->prof->rx_pause, - priv->prof->rx_ppp); - if (err) - en_err(priv, "Failed setting pause params\n"); - else - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, - priv->prof->rx_ppp, - priv->prof->rx_pause, - priv->prof->tx_ppp, - priv->prof->tx_pause); + tx_pause, tx_ppp, rx_pause, rx_ppp); + if (err) { + en_err(priv, "Failed setting pause params, err = %d\n", err); + return err; + } + + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + rx_ppp, rx_pause, tx_ppp, tx_pause); + + priv->prof->tx_pause = tx_pause; + priv->prof->rx_pause = rx_pause; + priv->prof->tx_ppp = tx_ppp; + priv->prof->rx_ppp = rx_ppp; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 686e18de9a97..6b2f7122b3ab 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->udp_rss = 0; } for (i = 1; i <= MLX4_MAX_PORTS; i++) { - params->prof[i].rx_pause = 1; + params->prof[i].rx_pause = !(pfcrx || pfctx); params->prof[i].rx_ppp = pfcrx; - params->prof[i].tx_pause = 1; + params->prof[i].tx_pause = !(pfcrx || pfctx); params->prof[i].tx_ppp = pfctx; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 9c218f1cfc6c..faa4bd21f148 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3318,12 +3318,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_ring[t]) { err = -ENOMEM; - goto err_free_tx; + goto out; } priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_cq[t]) { - kfree(priv->tx_ring[t]); err = -ENOMEM; goto out; } @@ -3335,6 +3334,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + u8 prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { + priv->ets.prio_tc[prio] = prio; + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; + } + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; priv->flags |= MLX4_EN_DCB_ENABLED; @@ -3569,11 +3575,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, return 0; -err_free_tx: - while (t--) { - kfree(priv->tx_ring[t]); - kfree(priv->tx_cq[t]); - } out: mlx4_en_destroy_netdev(dev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b97a55c827eb..ab2a9dbb46c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -472,10 +472,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, { const struct mlx4_en_frag_info *frag_info = priv->frag_info; unsigned int truesize = 0; + bool release = true; int nr, frag_size; struct page *page; dma_addr_t dma; - bool release; /* Collect used fragments while replacing them in the HW descriptors */ for (nr = 0;; frags++) { @@ -498,7 +498,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, release = page_count(page) != 1 || page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); - } else { + } else if (!priv->rx_headroom) { + /* rx_headroom for non XDP setup is always 0. + * When XDP is set, the above condition will + * guarantee page is always released. + */ u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); frags->page_offset += sz_align; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e61c99ef741d..c273a3ebb8e8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3007,6 +3007,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) mlx4_err(dev, "Failed to create file for port %d\n", port); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); @@ -3028,9 +3029,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) &info->port_attr); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } - return err; + return 0; } static void mlx4_cleanup_port_info(struct mlx4_port_info *info) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index fdb3ad0cbe54..09f4764a3f39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -131,6 +131,9 @@ #define MLX4_EN_TX_COAL_PKTS 16 #define MLX4_EN_TX_COAL_TIME 0x10 +#define MLX4_EN_MAX_COAL_PKTS U16_MAX +#define MLX4_EN_MAX_COAL_TIME U16_MAX + #define MLX4_EN_RX_RATE_LOW 400000 #define MLX4_EN_RX_COAL_TIME_LOW 0 #define MLX4_EN_RX_RATE_HIGH 450000 @@ -476,6 +479,7 @@ struct mlx4_en_frag_info { #define MLX4_EN_BW_MIN 1 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ +#define MLX4_EN_TC_VENDOR 0 #define MLX4_EN_TC_ETS 7 enum dcb_pfc_type { @@ -546,8 +550,8 @@ struct mlx4_en_priv { u16 rx_usecs_low; u32 pkt_rate_high; u16 rx_usecs_high; - u16 sample_interval; - u16 adaptive_rx_coal; + u32 sample_interval; + u32 adaptive_rx_coal; u32 msg_enable; u32 loopback_ok; u32 validate_loopback; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 728a2fb1f5c0..73419224367a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) u64 in_param = 0; int err; + if (!cnt) + return; + if (mlx4_is_mfunc(dev)) { set_param_l(&in_param, base_qpn); set_param_h(&in_param, cnt); @@ -390,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp *qp; - spin_lock(&qp_table->lock); + spin_lock_irq(&qp_table->lock); qp = __mlx4_qp_lookup(dev, qpn); - spin_unlock(&qp_table->lock); + spin_unlock_irq(&qp_table->lock); return qp; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index fabb53379727..b26da0952a4d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2957,7 +2957,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, u32 srqn = qp_get_srqn(qpc) & 0xffffff; int use_srq = (qp_get_srqn(qpc) >> 24) & 1; struct res_srq *srq; - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + int local_qpn = vhcr->in_modifier & 0xffffff; err = adjust_qp_sched_queue(dev, slave, qpc, inbox); if (err) @@ -5089,6 +5089,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) &tracker->res_tree[RES_FS_RULE]); list_del(&fs_rule->com.list); spin_unlock_irq(mlx4_tlock(dev)); + kfree(fs_rule->mirr_mbox); kfree(fs_rule); state = 0; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index fdaef00465d7..576b61c119bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -46,7 +46,7 @@ config MLX5_MPFS config MLX5_ESWITCH bool "Mellanox Technologies MLX5 SRIOV E-Switch support" - depends on MLX5_CORE_EN + depends on MLX5_CORE_EN && NET_SWITCHDEV default y ---help--- Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 1fffdebbc9e8..cf94fdf25155 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: - case MLX5_CMD_OP_SET_RATE_LIMIT: + case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: @@ -505,7 +505,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); - MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); + MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); @@ -801,6 +801,7 @@ static void cmd_work_handler(struct work_struct *work) unsigned long flags; bool poll_cmd = ent->polling; int alloc_ret; + int cmd_mode; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); @@ -847,6 +848,7 @@ static void cmd_work_handler(struct work_struct *work) set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); + cmd_mode = cmd->mode; if (ent->callback) schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); @@ -871,7 +873,7 @@ static void cmd_work_handler(struct work_struct *work) iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point */ - if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { + if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); @@ -1272,7 +1274,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, { struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - char outlen_str[8]; + char outlen_str[8] = {0}; int outlen; void *ptr; int err; @@ -1287,8 +1289,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, if (copy_from_user(outlen_str, buf, count)) return -EFAULT; - outlen_str[7] = 0; - err = sscanf(outlen_str, "%d", &outlen); if (err < 0) return err; @@ -1802,7 +1802,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; - cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 13b5ef9d8703..5fa071620104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -590,6 +590,7 @@ struct mlx5e_channel { struct mlx5_core_dev *mdev; struct mlx5e_tstamp *tstamp; int ix; + int cpu; }; struct mlx5e_channels { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 12d3ced61114..e87923e046c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) HLIST_HEAD(del_list); spin_lock_bh(&priv->fs.arfs.arfs_lock); mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) - break; if (!work_pending(&arfs_rule->arfs_work) && rps_may_expire_flow(priv->netdev, arfs_rule->rxq, arfs_rule->flow_id, arfs_rule->filter_id)) { hlist_del_init(&arfs_rule->hlist); hlist_add_head(&arfs_rule->hlist, &del_list); + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) + break; } } spin_unlock_bh(&priv->fs.arfs.arfs_lock); @@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, skb->protocol != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; + if (skb->encapsulation) + return -EPROTONOSUPPORT; + arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); if (!arfs_t) return -EPROTONOSUPPORT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 84dd63e74041..27040009d87a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -545,6 +545,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, void mlx5e_timestamp_init(struct mlx5e_priv *priv) { struct mlx5e_tstamp *tstamp = &priv->tstamp; + u64 overflow_cycles; u64 ns; u64 frac = 0; u32 dev_freq; @@ -569,10 +570,17 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. + * The period is calculated as the minimum between max HW cycles count + * (The clock source mask) and max amount of cycles that can be + * multiplied by clock multiplier where the result doesn't exceed + * 64bits. */ - ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, + overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult); + overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1); + + ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles, frac, &frac); - do_div(ns, NSEC_PER_SEC / 2 / HZ); + do_div(ns, NSEC_PER_SEC / HZ); tstamp->overflow_period = ns; INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 51c4cc00a186..9d64d0759ee9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -259,6 +259,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, struct ieee_ets *ets) { + bool have_ets_tc = false; int bw_sum = 0; int i; @@ -273,11 +274,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, } /* Validate Bandwidth Sum */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + have_ets_tc = true; bw_sum += ets->tc_tx_bw[i]; + } + } - if (bw_sum != 0 && bw_sum != 100) { + if (have_ets_tc && bw_sum != 100) { netdev_err(netdev, "Failed to validate ETS: BW sum is illegal\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cc11bbbd0309..bf34264c734b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -71,11 +71,6 @@ struct mlx5e_channel_param { struct mlx5e_cq_param icosq_cq; }; -static int mlx5e_get_node(struct mlx5e_priv *priv, int ix) -{ - return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix); -} - static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { return MLX5_CAP_GEN(mdev, striding_rq) && @@ -452,17 +447,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int wq_sz = mlx5_wq_ll_get_size(&rq->wq); int mtt_sz = mlx5e_get_wqe_mtt_sz(); int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1; - int node = mlx5e_get_node(c->priv, c->ix); int i; rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), - GFP_KERNEL, node); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->mpwqe.info) goto err_out; /* We allocate more than mtt_sz as we will align the pointer */ - rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, - GFP_KERNEL, node); + rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL, + cpu_to_node(c->cpu)); if (unlikely(!rq->mpwqe.mtt_no_align)) goto err_free_wqe_info; @@ -570,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, int err; int i; - rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + rqp->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); @@ -636,8 +630,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, default: /* MLX5_WQ_TYPE_LINKED_LIST */ rq->wqe.frag_info = kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, - mlx5e_get_node(c->priv, c->ix)); + GFP_KERNEL, cpu_to_node(c->cpu)); if (!rq->wqe.frag_info) { err = -ENOMEM; goto err_rq_wq_destroy; @@ -1007,13 +1000,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1060,13 +1053,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1132,13 +1125,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) return err; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix)); + err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; @@ -1510,8 +1503,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->priv->mdev; int err; - param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix); - param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix); + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); param->eq_ix = c->ix; err = mlx5e_alloc_cq_common(mdev, param, cq); @@ -1610,6 +1603,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) mlx5e_free_cq(cq); } +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1758,12 +1756,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, { struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; unsigned int irq; int err; int eqn; - c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; @@ -1771,6 +1770,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mdev = priv->mdev; c->tstamp = &priv->tstamp; c->ix = ix; + c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); @@ -1859,8 +1859,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_rq(&c->rq); - netif_set_xps_queue(c->netdev, - mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix); + netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); } static void mlx5e_deactivate_channel(struct mlx5e_channel *c) @@ -1919,13 +1918,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, param->wq.linear = 1; } -static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) +static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, + struct mlx5e_rq_param *param) { void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); } static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, @@ -2624,7 +2626,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2635,7 +2637,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -2716,6 +2718,9 @@ int mlx5e_open(struct net_device *netdev) mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); + if (mlx5e_vxlan_allowed(priv->mdev)) + udp_tunnel_get_rx_info(netdev); + return err; } @@ -2779,6 +2784,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { + param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); + param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); + return mlx5e_alloc_cq_common(mdev, param, cq); } @@ -2790,7 +2798,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, struct mlx5e_cq *cq = &drop_rq->cq; int err; - mlx5e_build_drop_rq_param(&rq_param); + mlx5e_build_drop_rq_param(mdev, &rq_param); err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); if (err) @@ -3554,6 +3562,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, struct sk_buff *skb, netdev_features_t features) { + unsigned int offset = 0; struct udphdr *udph; u8 proto; u16 port; @@ -3563,7 +3572,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, proto = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): - proto = ipv6_hdr(skb)->nexthdr; + proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); break; default: goto out; @@ -4013,7 +4022,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } -#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; @@ -4117,8 +4126,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) - if (MLX5_VPORT_MANAGER(mdev)) +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) + if (MLX5_ESWITCH_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4264,19 +4273,12 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) return; - /* Device already registered: sync netdev system state */ - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - udp_tunnel_get_rx_info(netdev); - rtnl_unlock(); - } - queue_work(priv->wq, &priv->set_rx_mode_work); rtnl_lock(); @@ -4298,7 +4300,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_VPORT_MANAGER(priv->mdev)) + if (MLX5_ESWITCH_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4481,7 +4483,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; #ifdef CONFIG_MLX5_ESWITCH - if (MLX5_VPORT_MANAGER(mdev)) { + if (MLX5_ESWITCH_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e03c427faf..281911698f72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -43,6 +43,11 @@ #include "en_tc.h" #include "fs_core.h" +#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) +#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \ + max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE) + static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; static void mlx5e_rep_get_drvinfo(struct net_device *dev, @@ -230,7 +235,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) { #if IS_ENABLED(CONFIG_IPV6) - unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms, + unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME); #else unsigned long ipv6_interval = ~0UL; @@ -366,7 +371,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, case NETEVENT_NEIGH_UPDATE: n = ptr; #if IS_ENABLED(CONFIG_IPV6) - if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) + if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) #else if (n->tbl != &arp_tbl) #endif @@ -414,7 +419,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, * done per device delay prob time parameter. */ #if IS_ENABLED(CONFIG_IPV6) - if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) + if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) #else if (!p->dev || p->tbl != &arp_tbl) #endif @@ -610,7 +615,6 @@ static int mlx5e_rep_open(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; mutex_lock(&priv->state_lock); @@ -618,8 +622,9 @@ static int mlx5e_rep_open(struct net_device *dev) if (err) goto unlock; - if (!mlx5_eswitch_set_vport_state(esw, rep->vport, - MLX5_ESW_VPORT_ADMIN_STATE_UP)) + if (!mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -632,11 +637,12 @@ static int mlx5e_rep_close(struct net_device *dev) struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; mutex_lock(&priv->state_lock); - (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + mlx5_modify_vport_admin_state(priv->mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@ -704,7 +710,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; rep = rpriv->rep; @@ -718,8 +724,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_eswitch_rep *rep; + if (!MLX5_ESWITCH_MANAGER(priv->mdev)) + return false; + + rep = rpriv->rep; if (rep && rep->vport != FDB_UPLINK_VPORT) return true; @@ -797,9 +807,9 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; - params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + params->log_rq_size = MLX5E_REP_PARAMS_LOG_RQ_SIZE; params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); @@ -819,9 +829,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; -#ifdef CONFIG_NET_SWITCHDEV netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; -#endif netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 91b1b0938931..8285e6d24f30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) return true; } +static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) +{ + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); + u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); + + tcp->check = 0; + tcp->psh = get_cqe_lro_tcppsh(cqe); + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } +} + static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct tcphdr *tcp; int network_depth = 0; + __wsum check; __be16 proto; u16 tot_len; void *ip_p; - u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); - u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || - (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); - skb->mac_len = ETH_HLEN; proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); @@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, ipv4->ihl); + + mlx5e_lro_update_tcp_hdr(cqe, tcp); + check = csum_partial(tcp, tcp->doff * 4, + csum_unfold((__force __sum16)cqe->check_sum)); + /* Almost done, don't forget the pseudo header */ + tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, + tot_len - sizeof(struct iphdr), + IPPROTO_TCP, check); } else { + u16 payload_len = tot_len - sizeof(struct ipv6hdr); struct ipv6hdr *ipv6 = ip_p; tcp = ip_p + sizeof(struct ipv6hdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; ipv6->hop_limit = cqe->lro_min_ttl; - ipv6->payload_len = cpu_to_be16(tot_len - - sizeof(struct ipv6hdr)); - } - - tcp->psh = get_cqe_lro_tcppsh(cqe); - - if (tcp_ack) { - tcp->ack = 1; - tcp->ack_seq = cqe->lro_ack_seq_num; - tcp->window = cqe->lro_tcp_win; + ipv6->payload_len = cpu_to_be16(payload_len); + + mlx5e_lro_update_tcp_hdr(cqe, tcp); + check = csum_partial(tcp, tcp->doff * 4, + csum_unfold((__force __sum16)cqe->check_sum)); + /* Almost done, don't forget the pseudo header */ + tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, + IPPROTO_TCP, check); } } @@ -614,6 +635,45 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb) return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } +static __be32 mlx5e_get_fcs(struct sk_buff *skb) +{ + int last_frag_sz, bytes_in_prev, nr_frags; + u8 *fcs_p1, *fcs_p2; + skb_frag_t *last_frag; + __be32 fcs_bytes; + + if (!skb_is_nonlinear(skb)) + return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + + nr_frags = skb_shinfo(skb)->nr_frags; + last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; + last_frag_sz = skb_frag_size(last_frag); + + /* If all FCS data is in last frag */ + if (last_frag_sz >= ETH_FCS_LEN) + return *(__be32 *)(skb_frag_address(last_frag) + + last_frag_sz - ETH_FCS_LEN); + + fcs_p2 = (u8 *)skb_frag_address(last_frag); + bytes_in_prev = ETH_FCS_LEN - last_frag_sz; + + /* Find where the other part of the FCS is - Linear or another frag */ + if (nr_frags == 1) { + fcs_p1 = skb_tail_pointer(skb); + } else { + skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; + + fcs_p1 = skb_frag_address(prev_frag) + + skb_frag_size(prev_frag); + } + fcs_p1 -= bytes_in_prev; + + memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); + memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); + + return fcs_bytes; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -632,6 +692,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (is_first_ethertype_ip(skb)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + if (unlikely(netdev->features & NETIF_F_RXFCS)) + skb->csum = csum_add(skb->csum, + (__force __wsum)mlx5e_get_fcs(skb)); rq->stats.csum_complete++; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index acf32fe952cd..3d3b1f97dc27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -197,9 +197,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_WORSE; + if (!prev->ppms) + return curr->ppms ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_WORSE; + if (!prev->epms) + return MLX5E_AM_STATS_SAME; if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 1f1f8af87d4d..707976482c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, if (iph->protocol != IPPROTO_UDP) goto out; - udph = udp_hdr(skb); + /* Don't assume skb_transport_header() was set */ + udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl); if (udph->dest != htons(9)) goto out; @@ -238,15 +239,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, int err = 0; /* Temporarily enable local_lb */ - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { - mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); - if (!lbtp->local_lb) - mlx5_nic_vport_update_local_lb(priv->mdev, true); + err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); + if (err) + return err; + + if (!lbtp->local_lb) { + err = mlx5_nic_vport_update_local_lb(priv->mdev, true); + if (err) + return err; } err = mlx5e_refresh_tirs(priv, true); if (err) - return err; + goto out; lbtp->loopback_ok = false; init_completion(&lbtp->comp); @@ -256,16 +261,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, lbtp->pt.dev = priv->netdev; lbtp->pt.af_packet_priv = lbtp; dev_add_pack(&lbtp->pt); + + return 0; + +out: + if (!lbtp->local_lb) + mlx5_nic_vport_update_local_lb(priv->mdev, false); + return err; } static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, struct mlx5e_lbt_priv *lbtp) { - if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { - if (!lbtp->local_lb) - mlx5_nic_vport_update_local_lb(priv->mdev, false); - } + if (!lbtp->local_lb) + mlx5_nic_vport_update_local_lb(priv->mdev, false); dev_remove_pack(&lbtp->pt); mlx5e_refresh_tirs(priv, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9ba1f72060aa..e28f9dab9ceb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -484,7 +484,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) tbl = &arp_tbl; #if IS_ENABLED(CONFIG_IPV6) else if (m_neigh->family == AF_INET6) - tbl = ipv6_stub->nd_tbl; + tbl = &nd_tbl; #endif else return; @@ -780,6 +780,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->mask); addr_type = key->addr_type; + /* the HW doesn't support frag first/later */ + if (mask->flags & FLOW_DIS_FIRST_FRAG) + return -EOPNOTSUPP; + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, @@ -1383,7 +1387,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + if (modify_ip_header && ip_proto != IPPROTO_TCP && + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } @@ -2013,7 +2018,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) + if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a)) return -EOPNOTSUPP; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; @@ -2091,19 +2097,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, if (err != -EAGAIN) flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || + !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) + kvfree(parse_attr); + err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); - if (err) - goto err_del_rule; + if (err) { + mlx5e_tc_del_flow(priv, flow); + kfree(flow); + } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH && - !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) - kvfree(parse_attr); return err; -err_del_rule: - mlx5e_tc_del_flow(priv, flow); - err_free: kvfree(parse_attr); kfree(flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1d6925d4369a..d560047c0bf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -155,7 +155,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, default: hlen = mlx5e_skb_l2_header_offset(skb); } - return min_t(u16, hlen, skb->len); + return min_t(u16, hlen, skb_headlen(skb)); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, @@ -234,7 +234,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -252,7 +252,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -264,6 +264,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, } return num_dma; + +dma_unmap_wqe_err: + mlx5e_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; } static inline void @@ -355,17 +359,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -594,17 +596,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index fc606bfd1d6e..eb91de86202b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -776,7 +776,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) return err; } -int mlx5_stop_eqs(struct mlx5_core_dev *dev) +void mlx5_stop_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; int err; @@ -785,22 +785,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, pg)) { err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); if (err) - return err; + mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n", + err); } #endif err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); if (err) - return err; + mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", + err); - mlx5_destroy_unmap_eq(dev, &table->async_eq); + err = mlx5_destroy_unmap_eq(dev, &table->async_eq); + if (err) + mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", + err); mlx5_cmd_use_polling(dev); err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); if (err) - mlx5_cmd_use_events(dev); - - return err; + mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", + err); } int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c77f4c0c7769..f697084937c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1535,7 +1535,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (!ESW_ALLOWED(esw)) return 0; - if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || + if (!MLX5_ESWITCH_MANAGER(esw->dev) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); return -EOPNOTSUPP; @@ -1616,7 +1616,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; esw_info(dev, @@ -1689,7 +1689,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) + if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); @@ -2054,26 +2054,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, memset(vf_stats, 0, sizeof(*vf_stats)); vf_stats->rx_packets = MLX5_GET_CTR(out, received_eth_unicast.packets) + + MLX5_GET_CTR(out, received_ib_unicast.packets) + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_ib_multicast.packets) + MLX5_GET_CTR(out, received_eth_broadcast.packets); vf_stats->rx_bytes = MLX5_GET_CTR(out, received_eth_unicast.octets) + + MLX5_GET_CTR(out, received_ib_unicast.octets) + MLX5_GET_CTR(out, received_eth_multicast.octets) + + MLX5_GET_CTR(out, received_ib_multicast.octets) + MLX5_GET_CTR(out, received_eth_broadcast.octets); vf_stats->tx_packets = MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + + MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + + MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); vf_stats->tx_bytes = MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + + MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + + MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); vf_stats->multicast = - MLX5_GET_CTR(out, received_eth_multicast.packets); + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_ib_multicast.packets); vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 565c8b7a399a..10bf770675f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -39,6 +39,8 @@ #include #include "lib/mpfs.h" +#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) + enum { SRIOV_NONE, SRIOV_LEGACY, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d9fd8570b07c..c699055c0ffd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -912,8 +912,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; + if(!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c index 3c11d6e2160a..14962969c5ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c @@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size, u8 actual_size; int err; + if (!size) + return -EINVAL; + if (!fdev->mdev) return -ENOTCONN; @@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size, u8 actual_size; int err; + if (!size) + return -EINVAL; + if (!fdev->mdev) return -ENOTCONN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5a7bea688ec8..dd05cf148845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -36,6 +36,7 @@ #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" +#include "eswitch.h" #include "diag/fs_tracepoint.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ @@ -174,6 +175,7 @@ static void del_flow_group(struct fs_node *node); static void del_fte(struct fs_node *node); static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2); +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); static struct mlx5_flow_rule * find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest); @@ -2041,23 +2043,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) static int init_root_ns(struct mlx5_flow_steering *steering) { + int err; + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); if (!steering->root_ns) - goto cleanup; + return -ENOMEM; - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) - goto cleanup; + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); + if (err) + goto out_err; set_prio_attrs(steering->root_ns); - - if (create_anchor_flow_table(steering)) - goto cleanup; + err = create_anchor_flow_table(steering); + if (err) + goto out_err; return 0; -cleanup: - mlx5_cleanup_fs(steering->dev); - return -ENOMEM; +out_err: + cleanup_root_ns(steering->root_ns); + steering->root_ns = NULL; + return err; } static void clean_tree(struct fs_node *node) @@ -2206,7 +2212,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { err = init_fdb_root_ns(steering); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 2c71557d1cee..d69897a1e2ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -34,6 +34,7 @@ #include #include #include "mlx5_core.h" +#include "eswitch.h" #include "../../mlxfw/mlxfw.h" static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, @@ -152,13 +153,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) } if (MLX5_CAP_GEN(dev, vport_group_manager) && - MLX5_CAP_GEN(dev, eswitch_flow_table)) { + MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); if (err) return err; } - if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { + if (MLX5_ESWITCH_MANAGER(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 7cb67122e8b5..22811ecd8fcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -34,6 +34,7 @@ #include #include #include "mlx5_core.h" +#include "eswitch.h" #include "lib/mpfs.h" /* HW L2 Table (MPFS) management */ @@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); struct mlx5_mpfs *mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); @@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) u32 index; int err; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); @@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_VPORT_MANAGER(dev)) + if (!MLX5_ESWITCH_MANAGER(dev)) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 06562c9a6b9c..4ddd632d10f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -316,9 +316,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; - struct irq_affinity irqdesc = { - .pre_vectors = MLX5_EQ_VEC_COMP_BASE, - }; int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; @@ -332,10 +329,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) if (!priv->irq_info) goto err_free_msix; - nvec = pci_alloc_irq_vectors_affinity(dev->pdev, + nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, nvec, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, - &irqdesc); + PCI_IRQ_MSIX); if (nvec < 0) return nvec; @@ -581,8 +577,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) int ret = 0; /* Disable local_lb by default */ - if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && - MLX5_CAP_GEN(dev, disable_local_lb)) + if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) ret = mlx5_nic_vport_update_local_lb(dev, false); return ret; @@ -621,6 +616,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) return (u64)timer_l | (u64)timer_h1 << 32; } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), + priv->irq_info[i].mask); + + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); + + return 0; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn) { @@ -1093,6 +1145,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_stop_eqs; } + err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_affinity_hints; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1150,6 +1208,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_cleanup_fs(dev); err_fs: + mlx5_irq_clear_affinity_hints(dev); + +err_affinity_hints: free_comp_eqs(dev); err_stop_eqs: @@ -1218,6 +1279,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_put_uars_page(dev, priv->uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index e07061f565d6..ccb6287aeeb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -641,7 +641,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; @@ -653,7 +653,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index db9e665ab104..889130edb715 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); - MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); - MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index e651e4c02867..d3c33e9eea72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, return ret_entry; } -static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, +static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, u32 rate, u16 index) { - u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; - MLX5_SET(set_rate_limit_in, in, opcode, - MLX5_CMD_OP_SET_RATE_LIMIT); - MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); - MLX5_SET(set_rate_limit_in, in, rate_limit, rate); + MLX5_SET(set_pp_rate_limit_in, in, opcode, + MLX5_CMD_OP_SET_PP_RATE_LIMIT); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); + MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) entry->refcount++; } else { /* new rate limit */ - err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); + err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index); if (err) { mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", rate, err); @@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) entry->refcount--; if (!entry->refcount) { /* need to remove rate */ - mlx5_set_rate_limit_cmd(dev, 0, entry->index); + mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index); entry->rate = 0; } @@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) /* Clear all configured rates */ for (i = 0; i < table->max_size; i++) if (table->rl_entry[i].rate) - mlx5_set_rate_limit_cmd(dev, 0, - table->rl_entry[i].index); + mlx5_set_pp_rate_limit_cmd(dev, 0, + table->rl_entry[i].index); kfree(dev->priv.rl_table.rl_entry); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 2a8b529ce6dd..a0674962f02c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, @@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return err; } +enable_vfs_hca: for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); if (err) { @@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) } out: - mlx5_eswitch_disable_sriov(dev->priv.eswitch); + if (MLX5_ESWITCH_MANAGER(dev)) + mlx5_eswitch_disable_sriov(dev->priv.eswitch); if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index d653b0025b13..71153c0f1605 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -36,6 +36,9 @@ #include #include "mlx5_core.h" +/* Mutex to hold while enabling or disabling RoCE */ +static DEFINE_MUTEX(mlx5_roce_en_lock); + static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u32 *out, int outlen) { @@ -908,23 +911,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable) void *in; int err; - mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); + if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) && + !MLX5_CAP_GEN(mdev, disable_local_lb_uc)) + return 0; + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; - MLX5_SET(modify_nic_vport_context_in, in, - field_select.disable_mc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_mc_local_lb, !enable); - - MLX5_SET(modify_nic_vport_context_in, in, - field_select.disable_uc_local_lb, 1); MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.disable_uc_local_lb, !enable); + if (MLX5_CAP_GEN(mdev, disable_local_lb_mc)) + MLX5_SET(modify_nic_vport_context_in, in, + field_select.disable_mc_local_lb, 1); + + if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) + MLX5_SET(modify_nic_vport_context_in, in, + field_select.disable_uc_local_lb, 1); + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + if (!err) + mlx5_core_dbg(mdev, "%s local_lb\n", + enable ? "enable" : "disable"); + kvfree(in); return err; } @@ -988,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev, int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev) { - if (atomic_inc_return(&mdev->roce.roce_en) != 1) - return 0; - return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); + int err = 0; + + mutex_lock(&mlx5_roce_en_lock); + if (!mdev->roce.roce_en) + err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED); + + if (!err) + mdev->roce.roce_en++; + mutex_unlock(&mlx5_roce_en_lock); + + return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce); int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev) { - if (atomic_dec_return(&mdev->roce.roce_en) != 0) - return 0; - return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); + int err = 0; + + mutex_lock(&mlx5_roce_en_lock); + if (mdev->roce.roce_en) { + mdev->roce.roce_en--; + if (mdev->roce.roce_en == 0) + err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED); + + if (err) + mdev->roce.roce_en++; + } + mutex_unlock(&mlx5_roce_en_lock); + return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 07a9ba6cfc70..2f74953e4561 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; struct mlx5e_vxlan *vxlan; - spin_lock(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); vxlan = radix_tree_lookup(&vxlan_db->tree, port); - spin_unlock(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); return vxlan; } @@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) struct mlx5e_vxlan *vxlan; int err; - if (mlx5e_vxlan_lookup_port(priv, port)) + mutex_lock(&priv->state_lock); + vxlan = mlx5e_vxlan_lookup_port(priv, port); + if (vxlan) { + atomic_inc(&vxlan->refcount); goto free_work; + } if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) goto free_work; @@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) goto err_delete_port; vxlan->udp_port = port; + atomic_set(&vxlan->refcount, 1); - spin_lock_irq(&vxlan_db->lock); + spin_lock_bh(&vxlan_db->lock); err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); - spin_unlock_irq(&vxlan_db->lock); + spin_unlock_bh(&vxlan_db->lock); if (err) goto err_free; @@ -113,35 +118,39 @@ static void mlx5e_vxlan_add_port(struct work_struct *work) err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); free_work: + mutex_unlock(&priv->state_lock); kfree(vxlan_work); } -static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) +static void mlx5e_vxlan_del_port(struct work_struct *work) { + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + u16 port = vxlan_work->port; struct mlx5e_vxlan *vxlan; + bool remove = false; - spin_lock_irq(&vxlan_db->lock); - vxlan = radix_tree_delete(&vxlan_db->tree, port); - spin_unlock_irq(&vxlan_db->lock); - + mutex_lock(&priv->state_lock); + spin_lock_bh(&vxlan_db->lock); + vxlan = radix_tree_lookup(&vxlan_db->tree, port); if (!vxlan) - return; - - mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); - - kfree(vxlan); -} + goto out_unlock; -static void mlx5e_vxlan_del_port(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; + if (atomic_dec_and_test(&vxlan->refcount)) { + radix_tree_delete(&vxlan_db->tree, port); + remove = true; + } - __mlx5e_vxlan_core_del_port(priv, port); +out_unlock: + spin_unlock_bh(&vxlan_db->lock); + if (remove) { + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + kfree(vxlan); + } + mutex_unlock(&priv->state_lock); kfree(vxlan_work); } @@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) struct mlx5e_vxlan *vxlan; unsigned int port = 0; - spin_lock_irq(&vxlan_db->lock); + /* Lockless since we are the only radix-tree consumers, wq is disabled */ while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { port = vxlan->udp_port; - spin_unlock_irq(&vxlan_db->lock); - __mlx5e_vxlan_core_del_port(priv, (u16)port); - spin_lock_irq(&vxlan_db->lock); + radix_tree_delete(&vxlan_db->tree, port); + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + kfree(vxlan); } - spin_unlock_irq(&vxlan_db->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index 5def12c048e3..5ef6ae7d568a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -36,6 +36,7 @@ #include "en.h" struct mlx5e_vxlan { + atomic_t refcount; u16 udp_port; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a55..122506daa586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), - MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), - MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), - MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), }; -#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 struct mlxsw_afk_element_inst { /* element instance in actual block */ const struct mlxsw_afk_element_info *info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 23f7d828cf67..6ef20e5cc77d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, return 0; } - wmb(); /* reset needs to be written before we read control register */ + /* Reset needs to be written before we read control register, and + * we must wait for the HW to become responsive once again + */ + wmb(); + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); do { u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index a6441208e9d9..fb082ad21b00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -59,6 +59,7 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF #define MLXSW_PCI_FW_READY_MAGIC 0x5E diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 696b99e65a5a..8b48338b4a70 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1417,6 +1417,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; + mlxsw_sp_port_vlan->ref_count = 1; mlxsw_sp_port_vlan->vid = vid; list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); @@ -1444,8 +1445,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (mlxsw_sp_port_vlan) + if (mlxsw_sp_port_vlan) { + mlxsw_sp_port_vlan->ref_count++; return mlxsw_sp_port_vlan; + } return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); } @@ -1454,6 +1457,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + if (--mlxsw_sp_port_vlan->ref_count != 0) + return; + if (mlxsw_sp_port_vlan->bridge_port) mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); else if (fid) @@ -2974,6 +2980,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", mlxsw_sp_port->local_port); + err = PTR_ERR(mlxsw_sp_port_vlan); goto err_port_vlan_get; } @@ -4163,6 +4170,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) { + u16 vid = 1; int err; err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); @@ -4175,8 +4183,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) true, false); if (err) goto err_port_vlan_set; + + for (; vid <= VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, + vid, false); + if (err) + goto err_vid_learning_set; + } + return 0; +err_vid_learning_set: + for (vid--; vid >= 1; vid--) + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); err_port_vlan_set: mlxsw_sp_port_stp_set(mlxsw_sp_port, false); err_port_stp_set: @@ -4186,6 +4205,12 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) { + u16 vid; + + for (vid = VLAN_N_VID - 1; vid >= 1; vid--) + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, + vid, true); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, false, false); mlxsw_sp_port_stp_set(mlxsw_sp_port, false); @@ -4216,7 +4241,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, return -EINVAL; if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, @@ -4328,6 +4356,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct netdev_notifier_changeupper_info *info = ptr; struct net_device *upper_dev; int err = 0; @@ -4339,7 +4368,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, return -EINVAL; if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) return -EINVAL; break; case NETDEV_CHANGEUPPER: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 84ce83acdc19..8c4ce0a0cc82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -194,6 +194,7 @@ struct mlxsw_sp_port_vlan { struct list_head list; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_fid *fid; + unsigned int ref_count; u16 vid; struct mlxsw_sp_bridge_port *bridge_port; struct list_head bridge_vlan_node; @@ -326,6 +327,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev); +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev); /* spectrum.c */ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 93728c694e6d..0a9adc5962fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(10000, 0, 0), + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05..54262af4e98f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 5189022a1c8c..516e63244606 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -729,26 +729,29 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { + struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; struct mlxsw_sp_vr *vr; int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) return ERR_PTR(-EBUSY); - vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); - if (IS_ERR(vr->fib4)) - return ERR_CAST(vr->fib4); - vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); - if (IS_ERR(vr->fib6)) { - err = PTR_ERR(vr->fib6); + fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(fib4)) + return ERR_CAST(fib4); + fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib6)) { + err = PTR_ERR(fib6); goto err_fib6_create; } + vr->fib4 = fib4; + vr->fib6 = fib6; vr->tb_id = tb_id; return vr; err_fib6_create: - mlxsw_sp_fib_destroy(vr->fib4); - vr->fib4 = NULL; + mlxsw_sp_fib_destroy(fib4); return ERR_PTR(err); } @@ -1531,11 +1534,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, dipn = htonl(dip); dev = mlxsw_sp->router->rifs[rif]->dev; n = neigh_lookup(&arp_tbl, &dipn, dev); - if (!n) { - netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n", - &dip); + if (!n) return; - } netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); neigh_event_send(n, NULL); @@ -1562,11 +1562,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, dev = mlxsw_sp->router->rifs[rif]->dev; n = neigh_lookup(&nd_tbl, &dip, dev); - if (!n) { - netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n", - &dip); + if (!n) return; - } netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); neigh_event_send(n, NULL); @@ -2536,7 +2533,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, { if (!removing) nh->should_offload = 1; - else if (nh->offloaded) + else nh->should_offload = 0; nh->update = 1; } @@ -3035,6 +3032,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; int i; + if (!list_is_singular(&nh_grp->fib_list)) + return; + for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d39ffbfcc436..7924f241e3ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -134,6 +134,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, return NULL; } +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev) @@ -906,8 +912,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, int err; /* No need to continue if only VLAN flags were changed */ - if (mlxsw_sp_port_vlan->bridge_port) + if (mlxsw_sp_port_vlan->bridge_port) { + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); return 0; + } err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); if (err) @@ -1092,6 +1100,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1101,9 +1110,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, action, local_port); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; +out: + kfree(sfd_pl); return err; } @@ -1128,6 +1144,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, bool adding, bool dynamic) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1138,9 +1155,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, lag_vid, lag_id); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); - kfree(sfd_pl); + if (err) + goto out; + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); return err; } @@ -1185,6 +1209,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, u16 fid, u16 mid, bool adding) { char *sfd_pl; + u8 num_rec; int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); @@ -1194,7 +1219,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mid); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: kfree(sfd_pl); return err; } diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 612c7a44b26c..23821540ab07 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -71,7 +71,7 @@ static int sonic_open(struct net_device *dev) for (i = 0; i < SONIC_NUM_RRS; i++) { dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { while(i > 0) { /* free any that were mapped successfully */ i--; dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c20dd00a1cae..899e7d53e669 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -52,8 +52,7 @@ struct nfp_app; #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 -#define NFP_FL_META_FLAG_NEW_MASK 128 -#define NFP_FL_META_FLAG_LAST_MASK 1 +#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7) #define NFP_FL_MASK_REUSE_TIME_NS 40000 #define NFP_FL_MASK_ID_LOCATION 1 diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 3226ddc55f99..d9582ccc0025 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len, id = nfp_add_mask_table(app, mask_data, mask_len); if (id < 0) return false; - *meta_flags |= NFP_FL_META_FLAG_NEW_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } *mask_id = id; @@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, if (!mask_entry) return false; + if (meta_flags) + *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK; + *mask_id = mask_entry->mask_id; mask_entry->ref_cnt--; if (!mask_entry->ref_cnt) { @@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, nfp_release_mask_id(app, *mask_id); kfree(mask_entry); if (meta_flags) - *meta_flags |= NFP_FL_META_FLAG_LAST_MASK; + *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; } return true; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index f8fa63b66739..a1a15e0c2245 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -492,6 +492,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "Error: %d VFs already enabled, but loaded FW can only support %d\n", pf->num_vfs, pf->limit_vfs); + err = -EINVAL; goto err_fw_unload; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e118b5f23996..8d53a593fb27 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, return err; } nn_writeb(nn, ctrl_offset, entry->entry); + nfp_net_irq_unmask(nn, entry->entry); return 0; } @@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, unsigned int vector_idx) { nn_writeb(nn, ctrl_offset, 0xff); + nn_pci_flush(nn); free_irq(nn->irq_entries[vector_idx].vector, nn); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index dc016dfec64d..8e623d8fa78e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -306,7 +306,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev, ls >= ARRAY_SIZE(ls_to_ethtool)) return 0; - cmd->base.speed = ls_to_ethtool[sts]; + cmd->base.speed = ls_to_ethtool[ls]; cmd->base.duplex = DUPLEX_FULL; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d540a9dc77b3..9a7655560629 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -84,16 +84,13 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port, { u8 __iomem *mem = port->eth_stats; - /* TX and RX stats are flipped as we are returning the stats as seen - * at the switch port corresponding to the phys port. - */ - stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK); - stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS); - stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS); + stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); + stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); + stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); - stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK); - stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS); - stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS); + stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK); + stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS); + stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS); } static void @@ -297,6 +294,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->netdev_ops = &nfp_repr_netdev_ops; netdev->ethtool_ops = &nfp_port_ethtool_ops; + netdev->max_mtu = pf_netdev->max_mtu; + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); if (nfp_app_has_tc(app)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 37364555c42b..35d14af235f7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -68,10 +68,11 @@ /* CPP address to retrieve the data from */ #define NSP_BUFFER 0x10 #define NSP_BUFFER_CPP GENMASK_ULL(63, 40) -#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) -#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER 0x18 +#define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER_CONFIG 0x20 #define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) @@ -276,8 +277,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, if ((*reg & mask) == val) return 0; - if (msleep_interruptible(25)) - return -ERESTARTSYS; + msleep(25); if (time_after(start_time, wait_until)) return -ETIMEDOUT; @@ -412,8 +412,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (err < 0) return err; - cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; - cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); if (in_buf && in_size) { err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index af106be8cc08..27ba476f761d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -77,7 +77,7 @@ #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET /* ILT entry structure */ -#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) #define ILT_ENTRY_PHY_ADDR_SHIFT 0 #define ILT_ENTRY_VALID_MASK 0x1ULL #define ILT_ENTRY_VALID_SHIFT 52 @@ -2471,7 +2471,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) if (rc) return rc; - /* Free Task CXT */ + /* Free Task CXT ( Intentionally RoCE as task-id is shared between + * RoCE and iWARP ) + */ + proto = PROTOCOLID_ROCE; rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, qed_cxt_get_proto_tid_count(p_hwfn, proto)); if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 8f6ccc0c39e5..b306961b02fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -700,9 +700,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, - ARRAY_SIZE(p_local->local_chassis_id)); + sizeof(p_local->local_chassis_id)); memcpy(params->lldp_local.local_port_id, p_local->local_port_id, - ARRAY_SIZE(p_local->local_port_id)); + sizeof(p_local->local_port_id)); } static void @@ -714,9 +714,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, - ARRAY_SIZE(p_remote->peer_chassis_id)); + sizeof(p_remote->peer_chassis_id)); memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, - ARRAY_SIZE(p_remote->peer_port_id)); + sizeof(p_remote->peer_port_id)); } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 58a689fb04db..ef2374699726 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1782,7 +1782,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) DP_INFO(p_hwfn, "Failed to update driver state\n"); rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, - QED_OV_ESWITCH_VEB); + QED_OV_ESWITCH_NONE); if (rc) DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 9d989c96278c..e41f28602535 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1663,6 +1663,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); if (eth_type == ETH_P_IP) { + if (iph->protocol != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + cm_info->local_ip[0] = ntohl(iph->daddr); cm_info->remote_ip[0] = ntohl(iph->saddr); cm_info->ip_version = TCP_IPV4; @@ -1671,6 +1678,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, *payload_len = ntohs(iph->tot_len) - ip_hlen; } else if (eth_type == ETH_P_IPV6) { ip6h = (struct ipv6hdr *)iph; + + if (ip6h->nexthdr != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + for (i = 0; i < 4; i++) { cm_info->local_ip[i] = ntohl(ip6h->daddr.in6_u.u6_addr32[i]); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 085338990f49..c5452b445c37 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) void qed_l2_setup(struct qed_hwfn *p_hwfn) { - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; mutex_init(&p_hwfn->p_l2_info->lock); @@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn) { u32 i; - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; if (!p_hwfn->p_l2_info) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 27832885a87f..2c958921dfb3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -779,6 +779,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + if (is_kdump_kernel()) { + DP_INFO(cdev, + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", + cdev->int_params.in.min_msix_cnt); + cdev->int_params.in.num_vectors = + cdev->int_params.in.min_msix_cnt; + } + rc = qed_set_int_mode(cdev, false); if (rc) { DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 6fb99518a61f..1b6554866138 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -360,6 +360,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); qed_rdma_resc_free(p_hwfn); + qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); } static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 3f40b1de7957..d08fe350ab6c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4396,6 +4396,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, static int qed_sriov_enable(struct qed_dev *cdev, int num) { struct qed_iov_vf_init_params params; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { @@ -4408,8 +4410,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { - struct qed_hwfn *hwfn = &cdev->hwfns[j]; - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + hwfn = &cdev->hwfns[j]; + ptt = qed_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, @@ -4445,6 +4447,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num) goto err; } + hwfn = QED_LEADING_HWFN(cdev); + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(cdev, "Failed to update eswitch mode\n"); + qed_ptt_release(hwfn, ptt); + return num; err: diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 6fc854b120b0..d50cc2635477 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -320,13 +320,11 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. + /* Fence required to flush the write combined buffer, since another + * CPU may write to the same doorbell address and data may be lost + * due to relaxed order nature of write combined bar. */ - mmiowb(); + wmb(); } static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, @@ -1247,16 +1245,10 @@ static int qede_rx_process_cqe(struct qede_dev *edev, csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) rxq->rx_ip_frags++; - } else { - DP_NOTICE(edev, - "CQE has error, flags = %x, dropping incoming packet\n", - parse_flag); + else rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } } /* Basic validation passed; Need to prepare an SKB. This would also diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index e5ee9f274a71..6eab2c632c75 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2066,8 +2066,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); - qede_rdma_dev_event_open(edev); - edev->state = QEDE_STATE_OPEN; DP_INFO(edev, "Ending successfully qede load\n"); @@ -2168,12 +2166,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link) DP_NOTICE(edev, "Link is up\n"); netif_tx_start_all_queues(edev->ndev); netif_carrier_on(edev->ndev); + qede_rdma_dev_event_open(edev); } } else { if (netif_carrier_ok(edev->ndev)) { DP_NOTICE(edev, "Link is down\n"); netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); + qede_rdma_dev_event_close(edev); } } } diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 9b2280badaf7..475f6ae5d4b3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; - if (!ptp) - return -EIO; + if (!ptp) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; + } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 50b142fad6b8..1900bf7e67d1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev) } if (!found) { - event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); + event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); if (!event_node) { DP_NOTICE(edev, "qedr: Could not allocate memory for rdma work\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f7080d0ab874..46b0372dd032 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) struct list_head *head = &mbx->cmd_q; struct qlcnic_cmd_args *cmd = NULL; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); while (!list_empty(head)) { cmd = list_entry(head->next, struct qlcnic_cmd_args, list); @@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) qlcnic_83xx_notify_cmd_completion(adapter, cmd); } - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); } static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) @@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, { struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_del(&cmd->list); mbx->num_cmds--; - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); qlcnic_83xx_notify_cmd_completion(adapter, cmd); } @@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, init_completion(&cmd->completion); cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); list_add_tail(&cmd->list, &mbx->cmd_q); mbx->num_cmds++; @@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; queue_work(mbx->work_q, &mbx->work); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return 0; } @@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; spin_unlock_irqrestore(&mbx->aen_lock, flags); - spin_lock(&mbx->queue_lock); + spin_lock_bh(&mbx->queue_lock); if (list_empty(head)) { - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); return; } cmd = list_entry(head->next, struct qlcnic_cmd_args, list); - spin_unlock(&mbx->queue_lock); + spin_unlock_bh(&mbx->queue_lock); mbx_ops->encode_cmd(adapter, cmd); mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 3ed9033e56db..44f797ab5d15 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1204,9 +1204,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) while (tx_q->tpd.consume_idx != hw_consume_idx) { tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); if (tpbuf->dma_addr) { - dma_unmap_single(adpt->netdev->dev.parent, - tpbuf->dma_addr, tpbuf->length, - DMA_TO_DEVICE); + dma_unmap_page(adpt->netdev->dev.parent, + tpbuf->dma_addr, tpbuf->length, + DMA_TO_DEVICE); tpbuf->dma_addr = 0; } @@ -1363,9 +1363,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); tpbuf->length = mapped_len; - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, - skb->data, tpbuf->length, - DMA_TO_DEVICE); + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, + virt_to_page(skb->data), + offset_in_page(skb->data), + tpbuf->length, + DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) @@ -1381,9 +1383,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, if (mapped_len < len) { tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); tpbuf->length = len - mapped_len; - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, - skb->data + mapped_len, - tpbuf->length, DMA_TO_DEVICE); + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, + virt_to_page(skb->data + + mapped_len), + offset_in_page(skb->data + + mapped_len), + tpbuf->length, DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 540c7622dcb1..929fb8d96ec0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -166,12 +166,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) - return RMNET_MAP_CONSUMED; + goto fail; } map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) - return RMNET_MAP_CONSUMED; + goto fail; if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { if (ep->mux_id == 0xff) @@ -183,6 +183,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, skb->protocol = htons(ETH_P_MAP); return RMNET_MAP_SUCCESS; + +fail: + kfree_skb(skb); + return RMNET_MAP_CONSUMED; } /* Ingress / Egress Entry Points */ diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d24b47b8e0b2..d118da5a10a2 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq(irq); + disable_irq_nosync(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a3c949ea7d1a..b98fcc9e93e5 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1388,7 +1388,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond) { void __iomem *ioaddr = tp->mmio_addr; - return RTL_R8(IBISR0) & 0x02; + return RTL_R8(IBISR0) & 0x20; } static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) @@ -1396,7 +1396,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); } @@ -2025,21 +2025,6 @@ static int rtl8169_set_speed(struct net_device *dev, return ret; } -static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), - cmd->duplex, cmd->advertising); - rtl_unlock_work(tp); - - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -2166,6 +2151,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev, return rc; } +static int rtl8169_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + u32 advertising; + + if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising)) + return -EINVAL; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, advertising); + rtl_unlock_work(tp); + + return rc; +} + static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2233,19 +2239,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) void __iomem *ioaddr = tp->mmio_addr; dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; - bool ret; RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(CounterAddrHigh); cmd = (u64)paddr & DMA_BIT_MASK(32); RTL_W32(CounterAddrLow, cmd); RTL_W32(CounterAddrLow, cmd | counter_cmd); - ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); - - RTL_W32(CounterAddrLow, 0); - RTL_W32(CounterAddrHigh, 0); - - return ret; + return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } static bool rtl8169_reset_counters(struct net_device *dev) @@ -2367,7 +2368,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, .get_link = ethtool_op_get_link, - .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, .get_regs = rtl8169_get_regs, @@ -2379,6 +2379,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .nway_reset = rtl8169_nway_reset, .get_link_ksettings = rtl8169_get_link_ksettings, + .set_link_ksettings = rtl8169_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -4885,6 +4886,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) static void rtl_pll_power_up(struct rtl8169_private *tp) { rtl_generic_op(tp, tp->pll_power_ops.up); + + /* give MAC/PHY some time to resume */ + msleep(20); } static void rtl_init_pll_power_ops(struct rtl8169_private *tp) @@ -8465,12 +8469,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_msi_5; } + pci_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) goto err_out_cnt_6; - pci_set_drvdata(pdev, dev); - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d2e88a30f57b..38080e95a82d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -753,6 +753,7 @@ static struct sh_eth_cpu_data sh7757_data = { .rpadir = 1, .rpadir_value = 2 << 16, .rtrate = 1, + .dual_port = 1, }; #define SH_GIGA_ETH_BASE 0xfee00000UL @@ -831,6 +832,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .no_trimd = 1, .no_ade = 1, .tsu = 1, + .dual_port = 1, }; /* SH7734 */ @@ -901,6 +903,7 @@ static struct sh_eth_cpu_data sh7763_data = { .tsu = 1, .irq_flags = IRQF_SHARED, .magic = 1, + .dual_port = 1, }; static struct sh_eth_cpu_data sh7619_data = { @@ -933,6 +936,7 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, + .dual_port = 1, }; static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) @@ -2911,7 +2915,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, /* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) { - if (sh_eth_is_rz_fast_ether(mdp)) { + if (!mdp->cd->dual_port) { sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); /* Enable POST registers */ @@ -3212,18 +3216,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* ioremap the TSU registers */ if (mdp->cd->tsu) { struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); - if (IS_ERR(mdp->tsu_addr)) { - ret = PTR_ERR(mdp->tsu_addr); + if (!rtsu) { + dev_err(&pdev->dev, "no TSU resource\n"); + ret = -ENODEV; + goto out_release; + } + /* We can only request the TSU region for the first port + * of the two sharing this TSU for the probe to succeed... + */ + if (devno % 2 == 0 && + !devm_request_mem_region(&pdev->dev, rtsu->start, + resource_size(rtsu), + dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "can't request TSU resource.\n"); + ret = -EBUSY; + goto out_release; + } + mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, + resource_size(rtsu)); + if (!mdp->tsu_addr) { + dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); + ret = -ENOMEM; goto out_release; } mdp->port = devno % 2; ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; } - /* initialize first or needed device */ - if (!devno || pd->needs_init) { + /* Need to init only the first port of the two sharing a TSU */ + if (devno % 2 == 0) { if (mdp->cd->chip_reset) mdp->cd->chip_reset(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711..6ab3d46d4f28 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -509,6 +509,7 @@ struct sh_eth_cpu_data { unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ + unsigned dual_port:1; /* Dual EtherC/E-DMAC */ }; struct sh_eth_private { diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index fc8f8bdf6579..056cb6093630 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_alloc_ordered_workqueue; } + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + /* Only FIBs pointing to our own netdevs are programmed into * the device, so no need to pass a callback. */ @@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - err = rocker_probe_ports(rocker); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); - goto err_probe_ports; - } - dev_info(&pdev->dev, "Rocker switch with id %*phN\n", (int)sizeof(rocker->hw.id), &rocker->hw.id); return 0; -err_probe_ports: - unregister_switchdev_notifier(&rocker_switchdev_notifier); err_register_switchdev_notifier: unregister_fib_notifier(&rocker->fib_nb); err_register_fib_notifier: + rocker_remove_ports(rocker); +err_probe_ports: destroy_workqueue(rocker->rocker_owq); err_alloc_ordered_workqueue: free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); @@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev) { struct rocker *rocker = pci_get_drvdata(pdev); - rocker_remove_ports(rocker); unregister_switchdev_notifier(&rocker_switchdev_notifier); unregister_fib_notifier(&rocker->fib_nb); + rocker_remove_ports(rocker); rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); destroy_workqueue(rocker->rocker_owq); free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..09352ee43b55 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5726,7 +5726,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) * MCFW do not support VFs. */ rc = efx_ef10_vport_set_mac_address(efx); - } else { + } else if (rc) { efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, sizeof(inbuf), NULL, 0, rc); } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 3df872f56289..37026473cf6d 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -376,7 +376,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) * because generally mcdi responses are fast. After that, back off * and poll once a jiffy (approximately) */ - spins = TICK_USEC; + spins = USER_TICK_USEC; finish = jiffies + MCDI_RPC_TIMEOUT; while (1) { diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..9b85cbd5a231 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -77,6 +77,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, } if (buffer->flags & EFX_TX_BUF_SKB) { + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; dev_consume_skb_any((struct sk_buff *)buffer->skb); @@ -426,12 +427,14 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); - efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); } } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 012fb66eed8d..f0afb88d7bc2 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) pdata = netdev_priv(dev); BUG_ON(!pdata); BUG_ON(!pdata->ioaddr); - WARN_ON(dev->phydev); SMSC_TRACE(pdata, ifdown, "Stopping driver"); + unregister_netdev(dev); + mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); - unregister_netdev(dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smsc911x-memory"); if (!res) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e82b4b70b7be..627fec210e2f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -409,7 +409,7 @@ struct stmmac_desc_ops { /* get timestamp value */ u64(*get_timestamp) (void *desc, u32 ats); /* get rx timestamp status */ - int (*get_rx_timestamp_status) (void *desc, u32 ats); + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); /* Display ring */ void (*display_ring)(void *head, unsigned int size, bool rx); /* set MSS via context descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 4404650b32c5..8be4b32544ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -116,7 +116,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev)); init.name = clk_name; init.ops = &clk_mux_ops; - init.flags = 0; + init.flags = CLK_SET_RATE_PARENT; init.parent_names = mux_parent_names; init.num_parents = MUX_CLK_NUM_PARENTS; @@ -144,7 +144,9 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; dwmac->m250_div.hw.init = &init; - dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; + dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | + CLK_DIVIDER_ALLOW_ZERO | + CLK_DIVIDER_ROUND_CLOSEST; dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw); if (WARN_ON(IS_ERR(dwmac->m250_div_clk))) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 2f7d7ec59962..e1d03489ae63 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -562,10 +562,12 @@ static int dwmac4_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { void __iomem *ioaddr = hw->pcsr; - u32 intr_status; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_enable = readl(ioaddr + GMAC_INT_EN); int ret = 0; - intr_status = readl(ioaddr + GMAC_INT_STATUS); + /* Discard disabled bits */ + intr_status &= intr_enable; /* Not used events (e.g. MMC interrupts) are not handled. */ if ((intr_status & mmc_tx_irq)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 4b286e27c4ca..7e089bf906b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc) return ret; } -static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; int ret = -EINVAL; @@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) /* Check if timestamp is OK from context descriptor */ do { - ret = dwmac4_rx_check_timestamp(desc); + ret = dwmac4_rx_check_timestamp(next_desc); if (ret < 0) goto exit; i++; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7546b3664113..2a828a312814 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats) return ns; } -static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) +static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, + u32 ats) { if (ats) { struct dma_extended_desc *p = (struct dma_extended_desc *)desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index f817f8f36569..db4cee57bb24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats) return ns; } -static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) +static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 721b61655261..08c19ebd5306 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; + u32 reg_value; /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second * formula = (1/ptp_clock) * 1000000000 @@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, data &= PTP_SSIR_SSINC_MASK; + reg_value = data; if (gmac4) - data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; + reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT; - writel(data, ioaddr + PTP_SSIR); + writel(reg_value, ioaddr + PTP_SSIR); return data; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 16bd50929084..1a9a382bf1c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -51,7 +51,7 @@ #include #include "dwmac1000.h" -#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ @@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; + int interface = priv->plat->interface; unsigned long flags; bool ret = false; + if ((interface != PHY_INTERFACE_MODE_MII) && + (interface != PHY_INTERFACE_MODE_GMII) && + !phy_interface_mode_is_rgmii(interface)) + goto out; + /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ @@ -483,7 +489,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, desc = np; /* Check if timestamp is available */ - if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { + if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) { ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); @@ -908,6 +914,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) static int stmmac_init_phy(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 tx_cnt = priv->plat->tx_queues_to_use; struct phy_device *phydev; char phy_id_fmt[MII_BUS_ID_SIZE + 3]; char bus_id[MII_BUS_ID_SIZE]; @@ -948,6 +955,15 @@ static int stmmac_init_phy(struct net_device *dev) phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); + /* + * Half-duplex mode not supported with multiqueue + * half-duplex can only works with single queue + */ + if (tx_cnt > 1) + phydev->supported &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + /* * Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning @@ -1823,6 +1839,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) if (unlikely(status & tx_dma_own)) break; + /* Make sure descriptor fields are read after reading + * the own bit. + */ + dma_rmb(); + /* Just consider the last segment and ...*/ if (likely(!(status & tx_not_ls))) { /* ... verify the status error condition */ @@ -2362,7 +2383,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) continue; packet = priv->plat->rx_queues_cfg[queue].pkt_route; - priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); + priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); } } @@ -2564,6 +2585,7 @@ static int stmmac_open(struct net_device *dev) priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); priv->rx_copybreak = STMMAC_RX_COPYBREAK; + priv->mss = 0; ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -2911,8 +2933,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ - if (mss_desc) + if (mss_desc) { + /* Make sure that first descriptor has been completely + * written, including its own bit. This is because MSS is + * actually before first descriptor, so we need to make + * sure that MSS's own bit is the last thing written. + */ + dma_wmb(); priv->hw->desc->set_tx_owner(mss_desc); + } /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 8d375e51a526..6a393b16a1fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, return -ENOMEM; /* Enable pci device */ - ret = pcim_enable_device(pdev); + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); @@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev, static void stmmac_pci_remove(struct pci_dev *pdev) { stmmac_dvr_remove(&pdev->dev); + pci_disable_device(pdev); } -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); +static int stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = stmmac_suspend(dev); + if (ret) + return ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + return 0; +} + +static int stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return stmmac_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); /* synthetic ID, no official vendor */ #define PCI_VENDOR_ID_STMMAC 0x700 diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..e92f41d20a2c 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; - len -= ETH_FCS_LEN; + append_size = len + ETH_HLEN + ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; @@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; - append_size = rcr_size; if (num_rcr == 1) { int ptype; @@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, else skb_checksum_none_assert(skb); } else if (!(val & RCR_ENTRY_MULTI)) - append_size = len - skb->len; + append_size = append_size - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index fa607d062cb3..15cd086e3f47 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -59,8 +59,7 @@ #include #include "sungem.h" -/* Stripping FCS is causing problems, disabled for now */ -#undef STRIP_FCS +#define STRIP_FCS #define DEFAULT_MSG (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ @@ -434,7 +433,7 @@ static int gem_rxmac_reset(struct gem *gp) writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | @@ -759,7 +758,6 @@ static int gem_rx(struct gem *gp, int work_to_do) struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; - __sum16 csum; if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", @@ -854,9 +852,13 @@ static int gem_rx(struct gem *gp, int work_to_do) skb = copy_skb; } - csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); - skb->csum = csum_unfold(csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (likely(dev->features & NETIF_F_RXCSUM)) { + __sum16 csum; + + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); + skb->csum = csum_unfold(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } skb->protocol = eth_type_trans(skb, gp->dev); napi_gro_receive(&gp->napi, skb); @@ -1760,7 +1762,7 @@ static void gem_init_dma(struct gem *gp) writel(0, gp->regs + TXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | - ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); @@ -2986,8 +2988,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); /* We can do scatter/gather and HW checksum */ - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; - dev->features |= dev->hw_features | NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + dev->features = dev->hw_features; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 0b95105f7060..65347d2f139b 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -311,7 +311,7 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->ethtool_ops = &vnet_ethtool_ops; dev->watchdog_timeo = VNET_TX_TIMEOUT; - dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 18013645e76c..0c1adad7415d 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) } dev = bus_find_device(&platform_bus_type, NULL, node, match); - of_node_put(node); + if (!dev) { + dev_err(dev, "unable to find platform device for %pOF\n", node); + goto out; + } + priv = dev_get_drvdata(dev); priv->cpsw_phy_sel(priv, phy_mode, slave); put_device(dev); +out: + of_node_put(node); } EXPORT_SYMBOL_GPL(cpsw_phy_sel); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index db8a4bcfc6c7..8cb44eabc283 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -124,7 +124,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x01234567 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 @@ -996,7 +996,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); - else if (phy->speed == 10) + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) mac_control |= BIT(18); /* In Band mode */ if (priv->rx_pause) @@ -1259,6 +1260,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); + cpsw_ale_control_set(cpsw->ale, slave_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 1); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -1618,6 +1621,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, q_idx = q_idx % cpsw->tx_ch_num; txch = cpsw->txv[q_idx].ch; + txq = netdev_get_tx_queue(ndev, q_idx); ret = cpsw_tx_packet_submit(priv, skb, txch); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); @@ -1628,15 +1632,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, * tell the kernel to stop sending us tx frames. */ if (unlikely(!cpdma_check_free_tx_desc(txch))) { - txq = netdev_get_tx_queue(ndev, q_idx); netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); } return NETDEV_TX_OK; fail: ndev->stats.tx_dropped++; - txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 6d68c8a8f4f2..da4ec575ccf9 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -34,6 +34,7 @@ config XILINX_AXI_EMAC config XILINX_LL_TEMAC tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" depends on (PPC || MICROBLAZE) + depends on !64BIT || BROKEN select PHYLIB ---help--- This driver supports the Xilinx 10/100/1000 LocalLink TEMAC diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index ed51018a813e..cb51448389a1 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -474,7 +474,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } @@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(rt)) return PTR_ERR(rt); + if (skb_dst(skb)) { + int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) - + GENEVE_BASE_HLEN - info->options_len - 14; + + skb_dst_update_pmtu(skb, mtu); + } + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); @@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); + if (skb_dst(skb)) { + int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) - + GENEVE_BASE_HLEN - info->options_len - 14; + + skb_dst_update_pmtu(skb, mtu); + } + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); @@ -1503,6 +1517,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->info; + bool metadata = geneve->collect_md; __u8 tmp_vni[3]; __u32 vni; @@ -1511,32 +1526,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) goto nla_put_failure; - if (rtnl_dereference(geneve->sock4)) { + if (!metadata && ip_tunnel_info_af(info) == AF_INET) { if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, info->key.u.ipv4.dst)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, !!(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; - } - #if IS_ENABLED(CONFIG_IPV6) - if (rtnl_dereference(geneve->sock6)) { + } else if (!metadata) { if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, &info->key.u.ipv6.dst)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, !(info->key.tun_flags & TUNNEL_CSUM))) goto nla_put_failure; - - if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, - !geneve->use_udp6_rx_checksums)) - goto nla_put_failure; - } #endif + } if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || @@ -1546,10 +1553,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; - if (geneve->collect_md) { - if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) + if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) goto nla_put_failure; - } + + if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + !geneve->use_udp6_rx_checksums)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 71ddadbf2368..d7ba2b813eff 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1381,8 +1381,8 @@ static int rr_close(struct net_device *dev) rrpriv->info_dma); rrpriv->info = NULL; - free_irq(pdev->irq, dev); spin_unlock_irqrestore(&rrpriv->lock, flags); + free_irq(pdev->irq, dev); return 0; } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5176be76ca7d..e33a6c672a0a 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -179,7 +179,7 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; - u16 ind_table[ITAB_NUM]; + u16 rx_table[ITAB_NUM]; }; @@ -192,7 +192,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct net_device_context *ndc, +int netvsc_send(struct net_device *net, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *page_buffer, @@ -207,8 +207,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); -void rndis_set_subchannel(struct work_struct *w); -bool rndis_filter_opened(const struct netvsc_device *nvdev); +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -635,17 +634,34 @@ struct nvsp_message { #define NETVSC_MTU 65535 #define NETVSC_MTU_MIN ETH_MIN_MTU -#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ -#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ -#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +/* Max buffer sizes allowed by a host */ +#define NETVSC_RECEIVE_BUFFER_SIZE (1024 * 1024 * 31) /* 31MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_RECEIVE_BUFFER_DEFAULT (1024 * 1024 * 16) + +#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ +#define NETVSC_SEND_BUFFER_DEFAULT (1024 * 1024) + #define NETVSC_INVALID_INDEX -1 #define NETVSC_SEND_SECTION_SIZE 6144 #define NETVSC_RECV_SECTION_SIZE 1728 +/* Default size of TX buf: 1MB, RX buf: 16MB */ +#define NETVSC_MIN_TX_SECTIONS 10 +#define NETVSC_DEFAULT_TX (NETVSC_SEND_BUFFER_DEFAULT \ + / NETVSC_SEND_SECTION_SIZE) +#define NETVSC_MIN_RX_SECTIONS 10 +#define NETVSC_DEFAULT_RX (NETVSC_RECEIVE_BUFFER_DEFAULT \ + / NETVSC_RECV_SECTION_SIZE) + #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 +#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \ + NETIF_F_TSO | NETIF_F_IPV6_CSUM | \ + NETIF_F_TSO6) + #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 #define VRSS_CHANNEL_DEFAULT 8 @@ -708,6 +724,8 @@ struct net_device_context { struct hv_device *device_ctx; /* netvsc_device */ struct netvsc_device __rcu *nvdev; + /* list of netvsc net_devices */ + struct list_head list; /* reconfigure work */ struct delayed_work dwork; /* last reconfig time */ @@ -721,7 +739,7 @@ struct net_device_context { u32 tx_checksum_mask; - u32 tx_send_table[VRSS_SEND_TAB_SIZE]; + u32 tx_table[VRSS_SEND_TAB_SIZE]; /* Ethtool settings */ bool udp4_l4_hash; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 8d5077fb0492..806239b89990 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -62,6 +62,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } +/* Worker to setup sub channels on initial setup + * Initial hotplug event occurs in softirq context + * and can't wait for channels. + */ +static void netvsc_subchan_work(struct work_struct *w) +{ + struct netvsc_device *nvdev = + container_of(w, struct netvsc_device, subchan_work); + struct rndis_device *rdev; + int i, ret; + + /* Avoid deadlock with device removal already under RTNL */ + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + + rdev = nvdev->extension; + if (rdev) { + ret = rndis_set_subchannel(rdev->ndev, nvdev); + if (ret == 0) { + netif_device_attach(rdev->ndev); + } else { + /* fallback to only primary channel */ + for (i = 1; i < nvdev->num_chn; i++) + netif_napi_del(&nvdev->chan_table[i].napi); + + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + rtnl_unlock(); +} + static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -78,7 +113,7 @@ static struct netvsc_device *alloc_net_device(void) init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); - INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); + INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); return net_device; } @@ -89,6 +124,11 @@ static void free_netvsc_device(struct rcu_head *head) = container_of(head, struct netvsc_device, rcu); int i; + kfree(nvdev->extension); + vfree(nvdev->recv_buf); + vfree(nvdev->send_buf); + kfree(nvdev->send_section_map); + for (i = 0; i < VRSS_CHANNEL_MAX; i++) vfree(nvdev->chan_table[i].mrc.slots); @@ -100,12 +140,11 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev) call_rcu(&nvdev->rcu, free_netvsc_device); } -static void netvsc_destroy_buf(struct hv_device *device) +static void netvsc_revoke_recv_buf(struct hv_device *device, + struct netvsc_device *net_device) { - struct nvsp_message *revoke_packet; struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *ndc = netdev_priv(ndev); - struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev); + struct nvsp_message *revoke_packet; int ret; /* @@ -147,28 +186,14 @@ static void netvsc_destroy_buf(struct hv_device *device) } net_device->recv_section_cnt = 0; } +} - /* Teardown the gpadl on the vsp end */ - if (net_device->recv_buf_gpadl_handle) { - ret = vmbus_teardown_gpadl(device->channel, - net_device->recv_buf_gpadl_handle); - - /* If we failed here, we might as well return and have a leak - * rather than continue and a bugchk - */ - if (ret != 0) { - netdev_err(ndev, - "unable to teardown receive buffer's gpadl\n"); - return; - } - net_device->recv_buf_gpadl_handle = 0; - } - - if (net_device->recv_buf) { - /* Free up the receive buffer */ - vfree(net_device->recv_buf); - net_device->recv_buf = NULL; - } +static void netvsc_revoke_send_buf(struct hv_device *device, + struct netvsc_device *net_device) +{ + struct net_device *ndev = hv_get_drvdata(device); + struct nvsp_message *revoke_packet; + int ret; /* Deal with the send buffer we may have setup. * If we got a send section size, it means we received a @@ -210,7 +235,36 @@ static void netvsc_destroy_buf(struct hv_device *device) } net_device->send_section_cnt = 0; } - /* Teardown the gpadl on the vsp end */ +} + +static void netvsc_teardown_recv_gpadl(struct hv_device *device, + struct netvsc_device *net_device) +{ + struct net_device *ndev = hv_get_drvdata(device); + int ret; + + if (net_device->recv_buf_gpadl_handle) { + ret = vmbus_teardown_gpadl(device->channel, + net_device->recv_buf_gpadl_handle); + + /* If we failed here, we might as well return and have a leak + * rather than continue and a bugchk + */ + if (ret != 0) { + netdev_err(ndev, + "unable to teardown receive buffer's gpadl\n"); + return; + } + net_device->recv_buf_gpadl_handle = 0; + } +} + +static void netvsc_teardown_send_gpadl(struct hv_device *device, + struct netvsc_device *net_device) +{ + struct net_device *ndev = hv_get_drvdata(device); + int ret; + if (net_device->send_buf_gpadl_handle) { ret = vmbus_teardown_gpadl(device->channel, net_device->send_buf_gpadl_handle); @@ -225,12 +279,6 @@ static void netvsc_destroy_buf(struct hv_device *device) } net_device->send_buf_gpadl_handle = 0; } - if (net_device->send_buf) { - /* Free up the send buffer */ - vfree(net_device->send_buf); - net_device->send_buf = NULL; - } - kfree(net_device->send_section_map); } int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) @@ -262,6 +310,11 @@ static int netvsc_init_buf(struct hv_device *device, buf_size = device_info->recv_sections * device_info->recv_section_size; buf_size = roundup(buf_size, PAGE_SIZE); + /* Legacy hosts only allow smaller receive buffer */ + if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + buf_size = min_t(unsigned int, buf_size, + NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); + net_device->recv_buf = vzalloc(buf_size); if (!net_device->recv_buf) { netdev_err(ndev, @@ -420,7 +473,10 @@ static int netvsc_init_buf(struct hv_device *device, goto exit; cleanup: - netvsc_destroy_buf(device); + netvsc_revoke_recv_buf(device, net_device); + netvsc_revoke_send_buf(device, net_device); + netvsc_teardown_recv_gpadl(device, net_device); + netvsc_teardown_send_gpadl(device, net_device); exit: return ret; @@ -539,11 +595,6 @@ static int netvsc_connect_vsp(struct hv_device *device, return ret; } -static void netvsc_disconnect_vsp(struct hv_device *device) -{ - netvsc_destroy_buf(device); -} - /* * netvsc_device_remove - Callback when the root bus device is removed */ @@ -555,12 +606,24 @@ void netvsc_device_remove(struct hv_device *device) = rtnl_dereference(net_device_ctx->nvdev); int i; - cancel_work_sync(&net_device->subchan_work); + /* + * Revoke receive buffer. If host is pre-Win2016 then tear down + * receive buffer GPADL. Do the same for send buffer. + */ + netvsc_revoke_recv_buf(device, net_device); + if (vmbus_proto_version < VERSION_WIN10) + netvsc_teardown_recv_gpadl(device, net_device); - netvsc_disconnect_vsp(device); + netvsc_revoke_send_buf(device, net_device); + if (vmbus_proto_version < VERSION_WIN10) + netvsc_teardown_send_gpadl(device, net_device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + /* And disassociate NAPI context from device */ + for (i = 0; i < net_device->num_chn; i++) + netif_napi_del(&net_device->chan_table[i].napi); + /* * At this point, no one should be accessing net_device * except in here @@ -570,9 +633,14 @@ void netvsc_device_remove(struct hv_device *device) /* Now, we can close the channel safely */ vmbus_close(device->channel); - /* And dissassociate NAPI context from device */ - for (i = 0; i < net_device->num_chn; i++) - netif_napi_del(&net_device->chan_table[i].napi); + /* + * If host is Win2016 or higher then we do the GPADL tear down + * here after VMBus is closed. + */ + if (vmbus_proto_version >= VERSION_WIN10) { + netvsc_teardown_recv_gpadl(device, net_device); + netvsc_teardown_send_gpadl(device, net_device); + } /* Release all resources */ free_netvsc_device_rcu(net_device); @@ -638,13 +706,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, queue_sends = atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && queue_sends == 0) - wake_up(&net_device->wait_drain); + if (unlikely(net_device->destroy)) { + if (queue_sends == 0) + wake_up(&net_device->wait_drain); + } else { + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || - queue_sends < 1)) - netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); + if (netif_tx_queue_stopped(txq) && + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || + queue_sends < 1)) { + netif_tx_wake_queue(txq); + } + } } static void netvsc_send_completion(struct netvsc_device *net_device, @@ -692,13 +765,13 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) return NETVSC_INVALID_INDEX; } -static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, - unsigned int section_index, - u32 pend_size, - struct hv_netvsc_packet *packet, - struct rndis_message *rndis_msg, - struct hv_page_buffer *pb, - struct sk_buff *skb) +static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + unsigned int section_index, + u32 pend_size, + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer *pb, + bool xmit_more) { char *start = net_device->send_buf; char *dest = start + (section_index * net_device->send_section_size) @@ -711,7 +784,8 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, packet->page_buf_cnt; /* Add padding */ - if (skb->xmit_more && remain && !packet->cp_partial) { + remain = packet->total_data_buflen & (net_device->pkt_align - 1); + if (xmit_more && remain) { padding = net_device->pkt_align - remain; rndis_msg->msg_len += padding; packet->total_data_buflen += padding; @@ -731,8 +805,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, memset(dest, 0, padding); msg_size += padding; } - - return msg_size; } static inline int netvsc_send_pkt( @@ -820,12 +892,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ -int netvsc_send(struct net_device_context *ndev_ctx, +int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *pb, struct sk_buff *skb) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *net_device = rcu_dereference_bh(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; @@ -836,20 +909,12 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct sk_buff *msd_skb = NULL; - bool try_batch; - bool xmit_more = (skb != NULL) ? skb->xmit_more : false; + bool try_batch, xmit_more; /* If device is rescinded, return error and packet will get dropped. */ if (unlikely(!net_device || net_device->destroy)) return -ENODEV; - /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get - * here before the negotiation with the host is finished and - * send_section_map may not be allocated yet. - */ - if (unlikely(!net_device->send_section_map)) - return -EAGAIN; - nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -857,10 +922,8 @@ int netvsc_send(struct net_device_context *ndev_ctx, /* Send control message directly without accessing msd (Multi-Send * Data) field which may be changed during data packet processing. */ - if (!skb) { - cur_send = packet; - goto send_now; - } + if (!skb) + return netvsc_send_pkt(device, packet, net_device, pb, skb); /* batch packets in send buffer if possible */ msdp = &nvchan->msd; @@ -888,10 +951,17 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } + /* Keep aggregating only if stack says more data is coming + * and not doing mixed modes send and not flow blocked + */ + xmit_more = skb->xmit_more && + !packet->cp_partial && + !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); + if (section_index != NETVSC_INVALID_INDEX) { netvsc_copy_to_send_buf(net_device, section_index, msd_len, - packet, rndis_msg, pb, skb); + packet, rndis_msg, pb, xmit_more); packet->send_buf_index = section_index; @@ -911,7 +981,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, if (msdp->skb) dev_consume_skb_any(msdp->skb); - if (xmit_more && !packet->cp_partial) { + if (xmit_more) { msdp->skb = skb; msdp->pkt = packet; msdp->count++; @@ -937,7 +1007,6 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } -send_now: if (cur_send) ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); @@ -1102,7 +1171,7 @@ static void netvsc_send_table(struct hv_device *hdev, nvmsg->msg.v5_msg.send_table.offset); for (i = 0; i < count; i++) - net_device_ctx->tx_send_table[i] = tab[i]; + net_device_ctx->tx_table[i] = tab[i]; } static void netvsc_send_vf(struct net_device_context *net_device_ctx, @@ -1181,6 +1250,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) struct hv_device *device = netvsc_channel_to_device(channel); struct net_device *ndev = hv_get_drvdata(device); int work_done = 0; + int ret; /* If starting a new interval */ if (!nvchan->desc) @@ -1192,18 +1262,21 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If send of pending receive completions suceeded - * and did not exhaust NAPI budget this time - * and not doing busy poll + /* Send any pending receive completions */ + ret = send_recv_completions(ndev, net_device, nvchan); + + /* If it did not exhaust NAPI budget this time + * and not doing busy poll * then re-enable host interrupts - * and reschedule if ring is not empty. + * and reschedule if ring is not empty + * or sending receive completion failed. */ - if (send_recv_completions(ndev, net_device, nvchan) == 0 && - work_done < budget && + if (work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound)) { + (ret || hv_end_read(&channel->inbound)) && + napi_schedule_prep(napi)) { hv_begin_read(&channel->inbound); - napi_reschedule(napi); + __napi_schedule(napi); } /* Driver may overshoot since multiple packets per descriptor */ @@ -1226,7 +1299,7 @@ void netvsc_channel_cb(void *context) /* disable interupts from host */ hv_begin_read(rbi); - __napi_schedule(&nvchan->napi); + __napi_schedule_irqoff(&nvchan->napi); } } @@ -1247,6 +1320,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, if (!net_device) return ERR_PTR(-ENOMEM); + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + net_device_ctx->tx_table[i] = 0; + net_device->ring_size = ring_size; /* Because the device uses NAPI, all the interrupt batching and @@ -1281,7 +1357,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, net_device->chan_table); if (ret != 0) { - netif_napi_del(&net_device->chan_table[0].napi); netdev_err(ndev, "unable to open channel: %d\n", ret); goto cleanup; } @@ -1291,11 +1366,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, napi_enable(&net_device->chan_table[0].napi); - /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is - * populated. - */ - rcu_assign_pointer(net_device_ctx->nvdev, net_device); - /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { @@ -1304,6 +1374,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, goto close; } + /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is + * populated. + */ + rcu_assign_pointer(net_device_ctx->nvdev, net_device); + return net_device; close: @@ -1314,6 +1389,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, vmbus_close(device->channel); cleanup: + netif_napi_del(&net_device->chan_table[0].napi); free_netvsc_device(&net_device->rcu); return ERR_PTR(ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a32ae02e1b6c..6a77ef38c549 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -45,11 +45,10 @@ #include "hyperv_net.h" -#define RING_SIZE_MIN 64 -#define NETVSC_MIN_TX_SECTIONS 10 -#define NETVSC_DEFAULT_TX 192 /* ~1M */ -#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ -#define NETVSC_DEFAULT_RX 10485 /* Max ~16M */ +#define RING_SIZE_MIN 64 +#define RETRY_US_LO 5000 +#define RETRY_US_HI 10000 +#define RETRY_MAX 2000 /* >10 sec */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) @@ -67,12 +66,45 @@ static int debug = -1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void netvsc_set_multicast_list(struct net_device *net) +static LIST_HEAD(netvsc_dev_list); + +static void netvsc_change_rx_flags(struct net_device *net, int change) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + int inc; + + if (!vf_netdev) + return; + + if (change & IFF_PROMISC) { + inc = (net->flags & IFF_PROMISC) ? 1 : -1; + dev_set_promiscuity(vf_netdev, inc); + } + + if (change & IFF_ALLMULTI) { + inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; + dev_set_allmulti(vf_netdev, inc); + } +} - rndis_filter_update(nvdev); +static void netvsc_set_rx_mode(struct net_device *net) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev; + struct netvsc_device *nvdev; + + rcu_read_lock(); + vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) { + dev_uc_sync(vf_netdev, net); + dev_mc_sync(vf_netdev, net); + } + + nvdev = rcu_dereference(ndev_ctx->nvdev); + if (nvdev) + rndis_filter_update(nvdev); + rcu_read_unlock(); } static int netvsc_open(struct net_device *net) @@ -92,12 +124,11 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_wake_all_queues(net); - rdev = nvdev->extension; - - if (!rdev->link_state) + if (!rdev->link_state) { netif_carrier_on(net); + netif_tx_wake_all_queues(net); + } if (vf_netdev) { /* Setting synthetic device up transparently sets @@ -113,36 +144,25 @@ static int netvsc_open(struct net_device *net) return 0; } -static int netvsc_close(struct net_device *net) +static int netvsc_wait_until_empty(struct netvsc_device *nvdev) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct net_device *vf_netdev - = rtnl_dereference(net_device_ctx->vf_netdev); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - int ret = 0; - u32 aread, i, msec = 10, retry = 0, retry_max = 20; - struct vmbus_channel *chn; - - netif_tx_disable(net); - - /* No need to close rndis filter if it is removed already */ - if (!nvdev) - goto out; - - ret = rndis_filter_close(nvdev); - if (ret != 0) { - netdev_err(net, "unable to close device (ret %d).\n", ret); - return ret; - } + unsigned int retry = 0; + int i; /* Ensure pending bytes in ring are read */ - while (true) { - aread = 0; + for (;;) { + u32 aread = 0; + for (i = 0; i < nvdev->num_chn; i++) { - chn = nvdev->chan_table[i].channel; + struct vmbus_channel *chn + = nvdev->chan_table[i].channel; + if (!chn) continue; + /* make sure receive not running now */ + napi_synchronize(&nvdev->chan_table[i].napi); + aread = hv_get_bytes_to_read(&chn->inbound); if (aread) break; @@ -152,22 +172,40 @@ static int netvsc_close(struct net_device *net) break; } - retry++; - if (retry > retry_max || aread == 0) - break; + if (aread == 0) + return 0; - msleep(msec); + if (++retry > RETRY_MAX) + return -ETIMEDOUT; - if (msec < 1000) - msec *= 2; + usleep_range(RETRY_US_LO, RETRY_US_HI); } +} - if (aread) { - netdev_err(net, "Ring buffer not empty after closing rndis\n"); - ret = -ETIMEDOUT; +static int netvsc_close(struct net_device *net) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct net_device *vf_netdev + = rtnl_dereference(net_device_ctx->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + int ret; + + netif_tx_disable(net); + + /* No need to close rndis filter if it is removed already */ + if (!nvdev) + return 0; + + ret = rndis_filter_close(nvdev); + if (ret != 0) { + netdev_err(net, "unable to close device (ret %d).\n", ret); + return ret; } -out: + ret = netvsc_wait_until_empty(nvdev); + if (ret) + netdev_err(net, "Ring buffer not empty after closing rndis\n"); + if (vf_netdev) dev_close(vf_netdev); @@ -238,8 +276,8 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sock *sk = skb->sk; int q_idx; - q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) & - (VRSS_SEND_TAB_SIZE - 1)]; + q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & + (VRSS_SEND_TAB_SIZE - 1)]; /* If queue index changed record the new value */ if (q_idx != old_idx && @@ -288,8 +326,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, rcu_read_lock(); vf_netdev = rcu_dereference(ndc->vf_netdev); if (vf_netdev) { - txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; - qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; + + if (vf_ops->ndo_select_queue) + txq = vf_ops->ndo_select_queue(vf_netdev, skb, + accel_priv, fallback); + else + txq = fallback(vf_netdev, skb); + + /* Record the queue selected by VF so that it can be + * used for common case where VF has more queues than + * the synthetic device. + */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; } else { txq = netvsc_pick_tx(ndev, skb); } @@ -618,7 +667,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); + ret = netvsc_send(net, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; @@ -814,16 +863,93 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_detach(struct net_device *ndev, + struct netvsc_device *nvdev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hdev = ndev_ctx->device_ctx; + int ret; + + /* Don't try continuing to try and setup sub channels */ + if (cancel_work_sync(&nvdev->subchan_work)) + nvdev->num_chn = 1; + + /* If device was up (receiving) then shutdown */ + if (netif_running(ndev)) { + netif_tx_disable(ndev); + + ret = rndis_filter_close(nvdev); + if (ret) { + netdev_err(ndev, + "unable to close device (ret %d).\n", ret); + return ret; + } + + ret = netvsc_wait_until_empty(nvdev); + if (ret) { + netdev_err(ndev, + "Ring buffer not empty after closing rndis\n"); + return ret; + } + } + + netif_device_detach(ndev); + + rndis_filter_device_remove(hdev, nvdev); + + return 0; +} + +static int netvsc_attach(struct net_device *ndev, + struct netvsc_device_info *dev_info) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hdev = ndev_ctx->device_ctx; + struct netvsc_device *nvdev; + struct rndis_device *rdev; + int ret; + + nvdev = rndis_filter_device_add(hdev, dev_info); + if (IS_ERR(nvdev)) + return PTR_ERR(nvdev); + + if (nvdev->num_chn > 1) { + ret = rndis_set_subchannel(ndev, nvdev); + + /* if unavailable, just proceed with one queue */ + if (ret) { + nvdev->max_chn = 1; + nvdev->num_chn = 1; + } + } + + /* In any case device is now ready */ + netif_device_attach(ndev); + + /* Note: enable and attach happen when sub-channels setup */ + netif_carrier_off(ndev); + + if (netif_running(ndev)) { + ret = rndis_filter_open(nvdev); + if (ret) + return ret; + + rdev = nvdev->extension; + if (!rdev->link_state) + netif_carrier_on(ndev); + } + + return 0; +} + static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int orig, count = channels->combined_count; struct netvsc_device_info device_info; - bool was_opened; - int ret = 0; + int ret; /* We do not support separate count for rx, tx, or other */ if (count == 0 || @@ -840,9 +966,6 @@ static int netvsc_set_channels(struct net_device *net, return -EINVAL; orig = nvdev->num_chn; - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; @@ -852,28 +975,17 @@ static int netvsc_set_channels(struct net_device *net, device_info.recv_sections = nvdev->recv_section_cnt; device_info.recv_section_size = nvdev->recv_section_size; - rndis_filter_device_remove(dev, nvdev); + ret = netvsc_detach(net, nvdev); + if (ret) + return ret; - nvdev = rndis_filter_device_add(dev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); + ret = netvsc_attach(net, &device_info); + if (ret) { device_info.num_chn = orig; - nvdev = rndis_filter_device_add(dev, &device_info); - - if (IS_ERR(nvdev)) { - netdev_err(net, "restoring channel setting failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } + if (netvsc_attach(net, &device_info)) + netdev_err(net, "restoring channel setting failed\n"); } - if (was_opened) - rndis_filter_open(nvdev); - - /* We may have missed link change notifications */ - net_device_ctx->last_reconfig = 0; - schedule_delayed_work(&net_device_ctx->dwork, 0); - return ret; } @@ -940,10 +1052,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device_context *ndevctx = netdev_priv(ndev); struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct hv_device *hdev = ndevctx->device_ctx; int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; - bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) @@ -956,11 +1066,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return ret; } - netif_device_detach(ndev); - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); - memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; @@ -969,35 +1074,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.recv_sections = nvdev->recv_section_cnt; device_info.recv_section_size = nvdev->recv_section_size; - rndis_filter_device_remove(hdev, nvdev); + ret = netvsc_detach(ndev, nvdev); + if (ret) + goto rollback_vf; ndev->mtu = mtu; - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); - - /* Attempt rollback to original MTU */ - ndev->mtu = orig_mtu; - nvdev = rndis_filter_device_add(hdev, &device_info); - - if (vf_netdev) - dev_set_mtu(vf_netdev, orig_mtu); - - if (IS_ERR(nvdev)) { - netdev_err(ndev, "restoring mtu failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } - } + ret = netvsc_attach(ndev, &device_info); + if (ret) + goto rollback; - if (was_opened) - rndis_filter_open(nvdev); + return 0; - netif_device_attach(ndev); +rollback: + /* Attempt rollback to original MTU */ + ndev->mtu = orig_mtu; - /* We may have missed link change notifications */ - schedule_delayed_work(&ndevctx->dwork, 0); + if (netvsc_attach(ndev, &device_info)) + netdev_err(ndev, "restoring mtu failed\n"); +rollback_vf: + if (vf_netdev) + dev_set_mtu(vf_netdev, orig_mtu); return ret; } @@ -1382,7 +1479,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - indir[i] = rndis_dev->ind_table[i]; + indir[i] = rndis_dev->rx_table[i]; } if (key) @@ -1412,7 +1509,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return -EINVAL; for (i = 0; i < ITAB_NUM; i++) - rndis_dev->ind_table[i] = indir[i]; + rndis_dev->rx_table[i] = indir[i]; } if (!key) { @@ -1463,11 +1560,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; struct ethtool_ringparam orig; u32 new_tx, new_rx; - bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) @@ -1493,34 +1588,18 @@ static int netvsc_set_ringparam(struct net_device *ndev, device_info.recv_sections = new_rx; device_info.recv_section_size = nvdev->recv_section_size; - netif_device_detach(ndev); - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); - - rndis_filter_device_remove(hdev, nvdev); - - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); + ret = netvsc_detach(ndev, nvdev); + if (ret) + return ret; + ret = netvsc_attach(ndev, &device_info); + if (ret) { device_info.send_sections = orig.tx_pending; device_info.recv_sections = orig.rx_pending; - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - netdev_err(ndev, "restoring ringparam failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } - } - - if (was_opened) - rndis_filter_open(nvdev); - netif_device_attach(ndev); - /* We may have missed link change notifications */ - ndevctx->last_reconfig = 0; - schedule_delayed_work(&ndevctx->dwork, 0); + if (netvsc_attach(ndev, &device_info)) + netdev_err(ndev, "restoring ringparam failed"); + } return ret; } @@ -1550,7 +1629,8 @@ static const struct net_device_ops device_ops = { .ndo_open = netvsc_open, .ndo_stop = netvsc_close, .ndo_start_xmit = netvsc_start_xmit, - .ndo_set_rx_mode = netvsc_set_multicast_list, + .ndo_change_rx_flags = netvsc_change_rx_flags, + .ndo_set_rx_mode = netvsc_set_rx_mode, .ndo_change_mtu = netvsc_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = netvsc_set_mac_addr, @@ -1671,13 +1751,10 @@ static void netvsc_link_change(struct work_struct *w) static struct net_device *get_netvsc_bymac(const u8 *mac) { - struct net_device *dev; - - ASSERT_RTNL(); + struct net_device_context *ndev_ctx; - for_each_netdev(&init_net, dev) { - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); if (ether_addr_equal(mac, dev->perm_addr)) return dev; @@ -1688,25 +1765,18 @@ static struct net_device *get_netvsc_bymac(const u8 *mac) static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) { + struct net_device_context *net_device_ctx; struct net_device *dev; - ASSERT_RTNL(); - - for_each_netdev(&init_net, dev) { - struct net_device_context *net_device_ctx; + dev = netdev_master_upper_dev_get(vf_netdev); + if (!dev || dev->netdev_ops != &device_ops) + return NULL; /* not a netvsc device */ - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ + net_device_ctx = netdev_priv(dev); + if (!rtnl_dereference(net_device_ctx->nvdev)) + return NULL; /* device is removed */ - net_device_ctx = netdev_priv(dev); - if (!rtnl_dereference(net_device_ctx->nvdev)) - continue; /* device is removed */ - - if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) - return dev; /* a match */ - } - - return NULL; + return dev; } /* Called when VF is injecting data into network stack. @@ -1746,7 +1816,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto rx_handler_failed; } - ret = netdev_upper_dev_link(vf_netdev, ndev); + ret = netdev_master_upper_dev_link(vf_netdev, ndev, + NULL, NULL); if (ret != 0) { netdev_err(vf_netdev, "can not set master device %s (err = %d)\n", @@ -1781,6 +1852,15 @@ static void __netvsc_vf_setup(struct net_device *ndev, netdev_warn(vf_netdev, "unable to change mtu to %u\n", ndev->mtu); + /* set multicast etc flags on VF */ + dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); + + /* sync address list from ndev to VF */ + netif_addr_lock_bh(ndev); + dev_uc_sync(vf_netdev, ndev); + dev_mc_sync(vf_netdev, ndev); + netif_addr_unlock_bh(ndev); + if (netif_running(ndev)) { ret = dev_open(vf_netdev); if (ret) @@ -1935,6 +2015,12 @@ static int netvsc_probe(struct hv_device *dev, /* We always need headroom for rndis header */ net->needed_headroom = RNDIS_AND_PPI_SIZE; + /* Initialize the number of queues to be 1, we may change it if more + * channels are offered later. + */ + netif_set_real_num_tx_queues(net, 1); + netif_set_real_num_rx_queues(net, 1); + /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; @@ -1953,7 +2039,10 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); - /* hw_features computed in rndis_filter_device_add */ + if (nvdev->num_chn > 1) + schedule_work(&nvdev->subchan_work); + + /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; @@ -1968,15 +2057,19 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; - ret = register_netdev(net); + rtnl_lock(); + ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); goto register_failed; } - return ret; + list_add(&net_device_ctx->list, &netvsc_dev_list); + rtnl_unlock(); + return 0; register_failed: + rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: free_percpu(net_device_ctx->vf_stats); @@ -1990,8 +2083,8 @@ static int netvsc_probe(struct hv_device *dev, static int netvsc_remove(struct hv_device *dev) { struct net_device_context *ndev_ctx; - struct net_device *vf_netdev; - struct net_device *net; + struct net_device *vf_netdev, *net; + struct netvsc_device *nvdev; net = hv_get_drvdata(dev); if (net == NULL) { @@ -2001,10 +2094,14 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); - netif_device_detach(net); - cancel_delayed_work_sync(&ndev_ctx->dwork); + rcu_read_lock(); + nvdev = rcu_dereference(ndev_ctx->nvdev); + + if (nvdev) + cancel_work_sync(&nvdev->subchan_work); + /* * Call to the vsc driver to let it know that the device is being * removed. Also blocks mtu and channel changes. @@ -2014,11 +2111,14 @@ static int netvsc_remove(struct hv_device *dev) if (vf_netdev) netvsc_unregister_vf(vf_netdev); + if (nvdev) + rndis_filter_device_remove(dev, nvdev); + unregister_netdevice(net); + list_del(&ndev_ctx->list); - rndis_filter_device_remove(dev, - rtnl_dereference(ndev_ctx->nvdev)); rtnl_unlock(); + rcu_read_unlock(); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 065b204d8e17..cb03a6ea076a 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -217,7 +217,6 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); int ret; /* Setup the packet to send it */ @@ -245,7 +244,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } rcu_read_lock_bh(); - ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; @@ -267,13 +266,23 @@ static void rndis_set_link_state(struct rndis_device *rdev, } } -static void rndis_filter_receive_response(struct rndis_device *dev, - struct rndis_message *resp) +static void rndis_filter_receive_response(struct net_device *ndev, + struct netvsc_device *nvdev, + const struct rndis_message *resp) { + struct rndis_device *dev = nvdev->extension; struct rndis_request *request = NULL; bool found = false; unsigned long flags; - struct net_device *ndev = dev->ndev; + + /* This should never happen, it means control message + * response received after device removed. + */ + if (dev->state == RNDIS_DEV_UNINITIALIZED) { + netdev_err(ndev, + "got rndis message uninitialized\n"); + return; + } spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { @@ -354,7 +363,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) } static int rndis_filter_receive_data(struct net_device *ndev, - struct rndis_device *dev, + struct netvsc_device *nvdev, struct rndis_message *msg, struct vmbus_channel *channel, void *data, u32 data_buflen) @@ -374,7 +383,7 @@ static int rndis_filter_receive_data(struct net_device *ndev, * should be the data packet size plus the trailer padding size */ if (unlikely(data_buflen < rndis_pkt->data_len)) { - netdev_err(dev->ndev, "rndis message buffer " + netdev_err(ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", data_buflen, rndis_pkt->data_len); @@ -402,34 +411,20 @@ int rndis_filter_receive(struct net_device *ndev, void *data, u32 buflen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct rndis_device *rndis_dev = net_dev->extension; struct rndis_message *rndis_msg = data; - /* Make sure the rndis device state is initialized */ - if (unlikely(!rndis_dev)) { - netif_err(net_device_ctx, rx_err, ndev, - "got rndis message but no rndis device!\n"); - return NVSP_STAT_FAIL; - } - - if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { - netif_err(net_device_ctx, rx_err, ndev, - "got rndis message uninitialized\n"); - return NVSP_STAT_FAIL; - } - if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + return rndis_filter_receive_data(ndev, net_dev, rndis_msg, channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: /* completion msgs */ - rndis_filter_receive_response(rndis_dev, rndis_msg); + rndis_filter_receive_response(ndev, net_dev, rndis_msg); break; case RNDIS_MSG_INDICATE: @@ -759,7 +754,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = rdev->ind_table[i]; + itab[i] = rdev->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); @@ -855,15 +850,19 @@ static void rndis_set_multicast(struct work_struct *w) { struct rndis_device *rdev = container_of(w, struct rndis_device, mcast_work); + u32 filter = NDIS_PACKET_TYPE_DIRECTED; + unsigned int flags = rdev->ndev->flags; - if (rdev->ndev->flags & IFF_PROMISC) - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_PROMISCUOUS); - else - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_BROADCAST | - NDIS_PACKET_TYPE_ALL_MULTICAST | - NDIS_PACKET_TYPE_DIRECTED); + if (flags & IFF_PROMISC) { + filter = NDIS_PACKET_TYPE_PROMISCUOUS; + } else { + if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI)) + filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; + if (flags & IFF_BROADCAST) + filter |= NDIS_PACKET_TYPE_BROADCAST; + } + + rndis_filter_set_packet_filter(rdev, filter); } void rndis_filter_update(struct netvsc_device *nvdev) @@ -1056,29 +1055,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ -void rndis_set_subchannel(struct work_struct *w) +int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) { - struct netvsc_device *nvdev - = container_of(w, struct netvsc_device, subchan_work); struct nvsp_message *init_packet = &nvdev->channel_init_pkt; - struct net_device_context *ndev_ctx; - struct rndis_device *rdev; - struct net_device *ndev; - struct hv_device *hv_dev; + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hv_dev = ndev_ctx->device_ctx; + struct rndis_device *rdev = nvdev->extension; int i, ret; - if (!rtnl_trylock()) { - schedule_work(w); - return; - } - - rdev = nvdev->extension; - if (!rdev) - goto unlock; /* device was removed */ - - ndev = rdev->ndev; - ndev_ctx = netdev_priv(ndev); - hv_dev = ndev_ctx->device_ctx; + ASSERT_RTNL(); memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; @@ -1092,13 +1077,13 @@ void rndis_set_subchannel(struct work_struct *w) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); - goto failed; + return ret; } wait_for_completion(&nvdev->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "sub channel request failed\n"); - goto failed; + return -EIO; } nvdev->num_chn = 1 + @@ -1114,83 +1099,26 @@ void rndis_set_subchannel(struct work_struct *w) netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); - rtnl_unlock(); - return; + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + ndev_ctx->tx_table[i] = i % nvdev->num_chn; -failed: - /* fallback to only primary channel */ - for (i = 1; i < nvdev->num_chn; i++) - netif_napi_del(&nvdev->chan_table[i].napi); - - nvdev->max_chn = 1; - nvdev->num_chn = 1; -unlock: - rtnl_unlock(); + return 0; } -struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *device_info) +static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, + struct netvsc_device *nvdev) { - struct net_device *net = hv_get_drvdata(dev); + struct net_device *net = rndis_device->ndev; struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *net_device; - struct rndis_device *rndis_device; struct ndis_offload hwcaps; struct ndis_offload_params offloads; - struct ndis_recv_scale_cap rsscap; - u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; - u32 mtu, size; - const struct cpumask *node_cpu_mask; - u32 num_possible_rss_qs; - int i, ret; - - rndis_device = get_rndis_device(); - if (!rndis_device) - return ERR_PTR(-ENODEV); - - /* - * Let the inner driver handle this first to create the netvsc channel - * NOTE! Once the channel is created, we may get a receive callback - * (RndisFilterOnReceive()) before this call is completed - */ - net_device = netvsc_device_add(dev, device_info); - if (IS_ERR(net_device)) { - kfree(rndis_device); - return net_device; - } - - /* Initialize the rndis device */ - net_device->max_chn = 1; - net_device->num_chn = 1; - - net_device->extension = rndis_device; - rndis_device->ndev = net; - - /* Send the rndis initialization message */ - ret = rndis_filter_init_device(rndis_device, net_device); - if (ret != 0) - goto err_dev_remv; - - /* Get the MTU from the host */ - size = sizeof(u32); - ret = rndis_filter_query_device(rndis_device, net_device, - RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, - &mtu, &size); - if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) - net->mtu = mtu; - - /* Get the mac address */ - ret = rndis_filter_query_device_mac(rndis_device, net_device); - if (ret != 0) - goto err_dev_remv; - - memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); + int ret; /* Find HW offload capabilities */ - ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps); + ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps); if (ret != 0) - goto err_dev_remv; + return ret; /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); @@ -1198,8 +1126,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, /* Linux does not care about IP checksum, always does in kernel */ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; + /* Reset previously set hw_features flags */ + net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES; + net_device_ctx->tx_checksum_mask = 0; + /* Compute tx offload settings based on hw capabilities */ - net->hw_features = NETIF_F_RXCSUM; + net->hw_features |= NETIF_F_RXCSUM; if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { /* Can checksum TCP */ @@ -1243,10 +1175,74 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, } } + /* In case some hw_features disappeared we need to remove them from + * net->features list as they're no longer supported. + */ + net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features; + netif_set_gso_max_size(net, gso_max_size); - ret = rndis_filter_set_offload_params(net, net_device, &offloads); - if (ret) + ret = rndis_filter_set_offload_params(net, nvdev, &offloads); + + return ret; +} + +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *device_info) +{ + struct net_device *net = hv_get_drvdata(dev); + struct netvsc_device *net_device; + struct rndis_device *rndis_device; + struct ndis_recv_scale_cap rsscap; + u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + u32 mtu, size; + u32 num_possible_rss_qs; + int i, ret; + + rndis_device = get_rndis_device(); + if (!rndis_device) + return ERR_PTR(-ENODEV); + + /* Let the inner driver handle this first to create the netvsc channel + * NOTE! Once the channel is created, we may get a receive callback + * (RndisFilterOnReceive()) before this call is completed + */ + net_device = netvsc_device_add(dev, device_info); + if (IS_ERR(net_device)) { + kfree(rndis_device); + return net_device; + } + + /* Initialize the rndis device */ + net_device->max_chn = 1; + net_device->num_chn = 1; + + net_device->extension = rndis_device; + rndis_device->ndev = net; + + /* Send the rndis initialization message */ + ret = rndis_filter_init_device(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; + + /* Get the MTU from the host */ + size = sizeof(u32); + ret = rndis_filter_query_device(rndis_device, net_device, + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, + &mtu, &size); + if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) + net->mtu = mtu; + + /* Get the mac address */ + ret = rndis_filter_query_device_mac(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; + + memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); + + /* Query and set hardware capabilities */ + ret = rndis_netdev_set_hwcaps(rndis_device, net_device); + if (ret != 0) goto err_dev_remv; rndis_filter_query_device_link_status(rndis_device, net_device); @@ -1256,7 +1252,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) - return net_device; + goto out; rndis_filter_query_link_speed(rndis_device, net_device); @@ -1268,14 +1264,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; - /* - * We will limit the VRSS channels to the number CPUs in the NUMA node - * the primary channel is currently bound to. - * - * This also guarantees that num_possible_rss_qs <= num_online_cpus - */ - node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); - num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask), + /* This guarantees that num_possible_rss_qs <= num_online_cpus */ + num_possible_rss_qs = min_t(u32, num_online_cpus(), rsscap.num_recv_que); net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs); @@ -1284,8 +1274,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = min(net_device->max_chn, device_info->num_chn); for (i = 0; i < ITAB_NUM; i++) - rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, - net_device->num_chn); + rndis_device->rx_table[i] = ethtool_rxfh_indir_default( + i, net_device->num_chn); atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1303,17 +1293,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, netif_napi_add(net, &net_device->chan_table[i].napi, netvsc_poll, NAPI_POLL_WEIGHT); - if (net_device->num_chn > 1) - schedule_work(&net_device->subchan_work); + return net_device; out: - /* if unavailable, just proceed with one queue */ - if (ret) { - net_device->max_chn = 1; - net_device->num_chn = 1; - } - - return net_device; + /* setting up multiple channels failed */ + net_device->max_chn = 1; + net_device->num_chn = 1; err_dev_remv: rndis_filter_device_remove(dev, net_device); @@ -1331,7 +1316,6 @@ void rndis_filter_device_remove(struct hv_device *dev, net_dev->extension = NULL; netvsc_device_remove(dev); - kfree(rndis_dev); } int rndis_filter_open(struct netvsc_device *nvdev) @@ -1355,8 +1339,3 @@ int rndis_filter_close(struct netvsc_device *nvdev) return rndis_filter_close_device(nvdev->extension); } - -bool rndis_filter_opened(const struct netvsc_device *nvdev) -{ - return atomic_read(&nvdev->open_cnt) > 0; -} diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 3e4c8b21403c..46b42de13d76 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -888,7 +888,7 @@ static const struct ieee802154_ops adf7242_ops = { .set_cca_ed_level = adf7242_set_cca_ed_level, }; -static void adf7242_debug(u8 irq1) +static void adf7242_debug(struct adf7242_local *lp, u8 irq1) { #ifdef DEBUG u8 stat; @@ -932,7 +932,7 @@ static irqreturn_t adf7242_isr(int irq, void *data) dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n", __func__, irq1); - adf7242_debug(irq1); + adf7242_debug(lp, irq1); xmit = test_bit(FLAG_XMIT, &lp->flags); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 24a1eabbbc9d..22e466ea919a 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write( struct ca8210_priv *priv = filp->private_data; u8 command[CA8210_SPI_BUF_SIZE]; - if (len > CA8210_SPI_BUF_SIZE) { + memset(command, SPI_IDLE, 6); + if (len > CA8210_SPI_BUF_SIZE || len < 2) { dev_warn( &priv->spi->dev, - "userspace requested erroneously long write (%zu)\n", + "userspace requested erroneous write length (%zu)\n", len ); - return -EMSGSIZE; + return -EBADE; } ret = copy_from_user(command, in_buf, len); @@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write( ); return -EIO; } + if (len != command[1] + 2) { + dev_err( + &priv->spi->dev, + "write len does not match packet length field\n" + ); + return -EBADE; + } ret = ca8210_test_check_upstream(command, priv->spi); if (ret == 0) { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1f3295e274d0..71ff6bd4be9f 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -304,6 +304,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { + if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + ipvlan->phy_dev->dev_addr)) + skb->pkt_type = PACKET_OTHERHOST; + ret = RX_HANDLER_ANOTHER; success = true; } @@ -375,6 +379,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) .flowi4_oif = dev->ifindex, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC, + .flowi4_mark = skb->mark, .daddr = ip4h->daddr, .saddr = ip4h->saddr, }; @@ -409,7 +414,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct dst_entry *dst; int err, ret = NET_XMIT_DROP; struct flowi6 fl6 = { - .flowi6_iif = dev->ifindex, + .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index c74893c1e620..e7f7a1a002ee 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -546,7 +546,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->dev = dev; ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; - ipvlan_adjust_mtu(ipvlan, phy_dev); + if (!tb[IFLA_MTU]) + ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); /* If the port-id base is at the MAX value, then wrap it around and diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d2aea961e0f4..963a02c988e9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -480,7 +480,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); - if (vlan == NULL) + if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE) return RX_HANDLER_PASS; dev = vlan->dev; @@ -1037,7 +1037,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, lowerdev_features &= (features | ~NETIF_F_LRO); features = netdev_increment_features(lowerdev_features, features, mask); features |= ALWAYS_ON_FEATURES; - features &= ~NETIF_F_NETNS_LOCAL; + features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); return features; } @@ -1441,9 +1441,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, return 0; unregister_netdev: + /* macvlan_uninit would free the macvlan port */ unregister_netdevice(dev); + return err; destroy_macvlan_port: - if (create) + /* the macvlan port may be freed by macvlan_uninit when fail to register. + * so we destroy the macvlan port only when it's valid. + */ + if (create && macvlan_port_get_rtnl(lowerdev)) macvlan_port_destroy(port->dev); return err; } diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index c1e52b9dc58d..e911e4990b20 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -167,7 +167,7 @@ static int at803x_set_wol(struct phy_device *phydev, mac = (const u8 *) ndev->dev_addr; if (!is_valid_ether_addr(mac)) - return -EFAULT; + return -EINVAL; for (i = 0; i < 3; i++) { phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, @@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev) { int value; - mutex_lock(&phydev->lock); - value = phy_read(phydev, MII_BMCR); value &= ~(BMCR_PDOWN | BMCR_ISOLATE); phy_write(phydev, MII_BMCR, value); - mutex_unlock(&phydev->lock); - return 0; } diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index 3fe8cc5c177e..9b27ca264c66 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev) return rc; /* make rcal=100, since rdb default is 000 */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10); if (rc < 0) return rc; /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10); if (rc < 0) return rc; /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00); return 0; } diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 171010eb4d9c..8d96c6f048d0 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum) /* The register must be written to both the Shadow Register Select and * the Shadow Read Register Selector */ - phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | + phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK | regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); return phy_read(phydev, MII_BCM54XX_AUX_CTL); } diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 7c73808cbbde..81cceaa412fe 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -14,11 +14,18 @@ #ifndef _LINUX_BCM_PHY_LIB_H #define _LINUX_BCM_PHY_LIB_H +#include #include int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); +static inline int bcm_phy_write_exp_sel(struct phy_device *phydev, + u16 reg, u16 val) +{ + return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val); +} + int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 8b33f688ac8a..3c5b2a2e2fcc 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv { static void r_rc_cal_reset(struct phy_device *phydev) { /* Reset R_CAL/RC_CAL Engine */ - bcm_phy_write_exp(phydev, 0x00b0, 0x0010); + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); /* Disable Reset R_AL/RC_CAL Engine */ - bcm_phy_write_exp(phydev, 0x00b0, 0x0000); + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); } static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cbd629822f04..26fbbd3ffe33 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev) kfree(dp83640); } +static int dp83640_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + + /* From DP83640 datasheet: "Software driver code must wait 3 us + * following a software reset before allowing further serial MII + * operations with the DP83640." + */ + udelay(10); /* Taking udelay inaccuracy into account */ + + return 0; +} + static int dp83640_config_init(struct phy_device *phydev) { struct dp83640_private *dp83640 = phydev->priv; @@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = { .flags = PHY_HAS_INTERRUPT, .probe = dp83640_probe, .remove = dp83640_remove, + .soft_reset = dp83640_soft_reset, .config_init = dp83640_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4d02b27df044..e9e67c22c8bb 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1409,6 +1409,15 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (err < 0) return err; + /* If WOL event happened once, the LED[2] interrupt pin + * will not be cleared unless we reading the interrupt status + * register. If interrupts are in use, the normal interrupt + * handling will clear the WOL event. Clear the WOL event + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) + phy_read(phydev, MII_M1011_IEVENT); + /* Enable the WOL interrupt */ temp = phy_read(phydev, MII_88E1318S_PHY_CSIER); temp |= MII_88E1318S_PHY_CSIER_WOL_EIE; @@ -2069,7 +2078,7 @@ static struct phy_driver marvell_drivers[] = { .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1145_config_init, - .config_aneg = &marvell_config_aneg, + .config_aneg = &m88e1101_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index aebc08beceba..21b3f36e023a 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -16,6 +16,7 @@ * link takes priority and the other port is completely locked out. */ #include +#include enum { MV_PCS_BASE_T = 0x0000, @@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = 0x002b09aa, - .phy_id_mask = 0xffffffff, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", .features = SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | @@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { 0x002b09aa, 0xffffffff }, + { 0x002b09aa, MARVELL_PHY_ID_MASK }, { }, }; MODULE_DEVICE_TABLE(mdio, mv3310_tbl); diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 0831b7142df7..0c5b68e7da51 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -218,7 +218,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev) static int mdio_mux_iproc_remove(struct platform_device *pdev) { - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 135296508a7e..6425ce04d3f9 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev) data->regulator = devm_regulator_get(&pdev->dev, "phy"); if (IS_ERR(data->regulator)) { - if (PTR_ERR(data->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(data->regulator) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_out_free_mdiobus; + } dev_info(&pdev->dev, "no regulator found\n"); data->regulator = NULL; diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index bfd3090fb055..07c6048200c6 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata) } ret = xgene_enet_ecc_init(pdata); - if (ret) + if (ret) { + if (pdata->dev->of_node) + clk_disable_unprepare(pdata->clk); return ret; + } xgene_gmac_reset(pdata); return 0; @@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev) return ret; mdio_bus = mdiobus_alloc(); - if (!mdio_bus) - return -ENOMEM; + if (!mdio_bus) { + ret = -ENOMEM; + goto out_clk; + } mdio_bus->name = "APM X-Gene MDIO bus"; @@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev) mdio_bus->phy_mask = ~0; ret = mdiobus_register(mdio_bus); if (ret) - goto out; + goto out_mdiobus; acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, acpi_register_phy, NULL, mdio_bus, NULL); @@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev) } if (ret) - goto out; + goto out_mdiobus; pdata->mdio_bus = mdio_bus; xgene_mdio_status = true; return 0; -out: +out_mdiobus: mdiobus_free(mdio_bus); +out_clk: + if (dev->of_node) + clk_disable_unprepare(pdata->clk); + return ret; } diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 1ea69b7585d9..7ddb709f69fc 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -25,27 +25,53 @@ static int meson_gxl_config_init(struct phy_device *phydev) { + int ret; + /* Enable Analog and DSP register Bank access by */ - phy_write(phydev, 0x14, 0x0000); - phy_write(phydev, 0x14, 0x0400); - phy_write(phydev, 0x14, 0x0000); - phy_write(phydev, 0x14, 0x0400); + ret = phy_write(phydev, 0x14, 0x0000); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x0400); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x0000); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x0400); + if (ret) + return ret; /* Write Analog register 23 */ - phy_write(phydev, 0x17, 0x8E0D); - phy_write(phydev, 0x14, 0x4417); + ret = phy_write(phydev, 0x17, 0x8E0D); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x4417); + if (ret) + return ret; /* Enable fractional PLL */ - phy_write(phydev, 0x17, 0x0005); - phy_write(phydev, 0x14, 0x5C1B); + ret = phy_write(phydev, 0x17, 0x0005); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x5C1B); + if (ret) + return ret; /* Program fraction FR_PLL_DIV1 */ - phy_write(phydev, 0x17, 0x029A); - phy_write(phydev, 0x14, 0x5C1D); + ret = phy_write(phydev, 0x17, 0x029A); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x5C1D); + if (ret) + return ret; /* Program fraction FR_PLL_DIV1 */ - phy_write(phydev, 0x17, 0xAAAA); - phy_write(phydev, 0x14, 0x5C1C); + ret = phy_write(phydev, 0x17, 0xAAAA); + if (ret) + return ret; + ret = phy_write(phydev, 0x14, 0x5C1C); + if (ret) + return ret; return 0; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fdb43dd9b5cd..6c45ff650ec7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -622,6 +622,7 @@ static int ksz9031_read_status(struct phy_device *phydev) phydev->link = 0; if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) phydev->drv->config_intr(phydev); + return genphy_config_aneg(phydev); } return 0; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 2b1e67bc1e73..47d2ef2fb9b3 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -511,7 +511,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) * negotiation may already be done and aneg interrupt may not be * generated. */ - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { err = phy_aneg_done(phydev); if (err > 0) { trigger = true; @@ -614,6 +614,91 @@ static void phy_error(struct phy_device *phydev) phy_trigger_machine(phydev, false); } +/** + * phy_disable_interrupts - Disable the PHY interrupts from the PHY side + * @phydev: target phy_device struct + */ +static int phy_disable_interrupts(struct phy_device *phydev) +{ + int err; + + /* Disable PHY interrupts */ + err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); + if (err) + goto phy_err; + + /* Clear the interrupt */ + err = phy_clear_interrupt(phydev); + if (err) + goto phy_err; + + return 0; + +phy_err: + phy_error(phydev); + + return err; +} + +/** + * phy_change - Called by the phy_interrupt to handle PHY changes + * @phydev: phy_device struct that interrupted + */ +static irqreturn_t phy_change(struct phy_device *phydev) +{ + if (phy_interrupt_is_valid(phydev)) { + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; + + if (phy_disable_interrupts(phydev)) + goto phy_err; + } + + mutex_lock(&phydev->lock); + if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) + phydev->state = PHY_CHANGELINK; + mutex_unlock(&phydev->lock); + + if (phy_interrupt_is_valid(phydev)) { + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); + + /* Reenable interrupts */ + if (PHY_HALTED != phydev->state && + phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) + goto irq_enable_err; + } + + /* reschedule state queue work to run as soon as possible */ + phy_trigger_machine(phydev, true); + return IRQ_HANDLED; + +ignore: + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); + return IRQ_NONE; + +irq_enable_err: + disable_irq(phydev->irq); + atomic_inc(&phydev->irq_disable); +phy_err: + phy_error(phydev); + return IRQ_NONE; +} + +/** + * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes + * @work: work_struct that describes the work to be done + */ +void phy_change_work(struct work_struct *work) +{ + struct phy_device *phydev = + container_of(work, struct phy_device, phy_queue); + + phy_change(phydev); +} + /** * phy_interrupt - PHY interrupt handler * @irq: interrupt line @@ -632,9 +717,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) disable_irq_nosync(irq); atomic_inc(&phydev->irq_disable); - phy_change(phydev); - - return IRQ_HANDLED; + return phy_change(phydev); } /** @@ -651,32 +734,6 @@ static int phy_enable_interrupts(struct phy_device *phydev) return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); } -/** - * phy_disable_interrupts - Disable the PHY interrupts from the PHY side - * @phydev: target phy_device struct - */ -static int phy_disable_interrupts(struct phy_device *phydev) -{ - int err; - - /* Disable PHY interrupts */ - err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); - if (err) - goto phy_err; - - /* Clear the interrupt */ - err = phy_clear_interrupt(phydev); - if (err) - goto phy_err; - - return 0; - -phy_err: - phy_error(phydev); - - return err; -} - /** * phy_start_interrupts - request and enable interrupts for a PHY device * @phydev: target phy_device struct @@ -727,64 +784,6 @@ int phy_stop_interrupts(struct phy_device *phydev) } EXPORT_SYMBOL(phy_stop_interrupts); -/** - * phy_change - Called by the phy_interrupt to handle PHY changes - * @phydev: phy_device struct that interrupted - */ -void phy_change(struct phy_device *phydev) -{ - if (phy_interrupt_is_valid(phydev)) { - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - goto ignore; - - if (phy_disable_interrupts(phydev)) - goto phy_err; - } - - mutex_lock(&phydev->lock); - if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) - phydev->state = PHY_CHANGELINK; - mutex_unlock(&phydev->lock); - - if (phy_interrupt_is_valid(phydev)) { - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); - - /* Reenable interrupts */ - if (PHY_HALTED != phydev->state && - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) - goto irq_enable_err; - } - - /* reschedule state queue work to run as soon as possible */ - phy_trigger_machine(phydev, true); - return; - -ignore: - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); - return; - -irq_enable_err: - disable_irq(phydev->irq); - atomic_inc(&phydev->irq_disable); -phy_err: - phy_error(phydev); -} - -/** - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes - * @work: work_struct that describes the work to be done - */ -void phy_change_work(struct work_struct *work) -{ - struct phy_device *phydev = - container_of(work, struct phy_device, phy_queue); - - phy_change(phydev); -} - /** * phy_stop - Bring down the PHY link, and stop checking the status * @phydev: target phy_device struct @@ -828,7 +827,6 @@ EXPORT_SYMBOL(phy_stop); */ void phy_start(struct phy_device *phydev) { - bool do_resume = false; int err = 0; mutex_lock(&phydev->lock); @@ -841,25 +839,23 @@ void phy_start(struct phy_device *phydev) phydev->state = PHY_UP; break; case PHY_HALTED: + /* if phy was suspended, bring the physical link up again */ + __phy_resume(phydev); + /* make sure interrupts are re-enabled for the PHY */ - if (phydev->irq != PHY_POLL) { + if (phy_interrupt_is_valid(phydev)) { err = phy_enable_interrupts(phydev); if (err < 0) break; } phydev->state = PHY_RESUMING; - do_resume = true; break; default: break; } mutex_unlock(&phydev->lock); - /* if phy was suspended, bring the physical link up again */ - if (do_resume) - phy_resume(phydev); - phy_trigger_machine(phydev, true); } EXPORT_SYMBOL(phy_start); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 67f25ac29025..a174d05a9752 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -999,10 +999,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, "attached_dev"); if (!err) { - err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, - "phydev"); - if (err) - goto error; + err = sysfs_create_link_nowarn(&dev->dev.kobj, + &phydev->mdio.dev.kobj, + "phydev"); + if (err) { + dev_err(&dev->dev, "could not add device link to %s err %d\n", + kobject_name(&phydev->mdio.dev.kobj), + err); + /* non-fatal - some net drivers can use one netdevice + * with more then one phy + */ + } phydev->sysfs_links = true; } @@ -1152,11 +1159,13 @@ int phy_suspend(struct phy_device *phydev) } EXPORT_SYMBOL(phy_suspend); -int phy_resume(struct phy_device *phydev) +int __phy_resume(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; + WARN_ON(!mutex_is_locked(&phydev->lock)); + if (phydev->drv && phydrv->resume) ret = phydrv->resume(phydev); @@ -1167,6 +1176,18 @@ int phy_resume(struct phy_device *phydev) return ret; } +EXPORT_SYMBOL(__phy_resume); + +int phy_resume(struct phy_device *phydev) +{ + int ret; + + mutex_lock(&phydev->lock); + ret = __phy_resume(phydev); + mutex_unlock(&phydev->lock); + + return ret; +} EXPORT_SYMBOL(phy_resume); int phy_loopback(struct phy_device *phydev, bool enable) @@ -1639,13 +1660,9 @@ int genphy_resume(struct phy_device *phydev) { int value; - mutex_lock(&phydev->lock); - value = phy_read(phydev, MII_BMCR); phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); - mutex_unlock(&phydev->lock); - return 0; } EXPORT_SYMBOL(genphy_resume); @@ -1669,11 +1686,8 @@ EXPORT_SYMBOL(genphy_loopback); static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - /* The default values for phydev->supported are provided by the PHY - * driver "features" member, we want to reset to sane defaults first - * before supporting higher speeds. - */ - phydev->supported &= PHY_DEFAULT_FEATURES; + phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES | + PHY_10BT_FEATURES); switch (max_speed) { default: diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index bcb4755bcd95..e4a6ed88b9cf 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -525,6 +525,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, pl->link_config.pause = MLO_PAUSE_AN; pl->link_config.speed = SPEED_UNKNOWN; pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->link_config.an_enabled = true; pl->ops = ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); @@ -560,6 +561,8 @@ void phylink_destroy(struct phylink *pl) { if (pl->sfp_bus) sfp_unregister_upstream(pl->sfp_bus); + if (!IS_ERR(pl->link_gpio)) + gpiod_put(pl->link_gpio); cancel_work_sync(&pl->resolve); kfree(pl); @@ -771,6 +774,7 @@ void phylink_stop(struct phylink *pl) sfp_upstream_stop(pl->sfp_bus); set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + queue_work(system_power_efficient_wq, &pl->resolve); flush_work(&pl->resolve); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -948,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, mutex_lock(&pl->state_mutex); /* Configure the MAC to match the new settings */ linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.interface = config.interface; pl->link_config.speed = our_kset.base.speed; pl->link_config.duplex = our_kset.base.duplex; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; @@ -1426,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream) WARN_ON(!lockdep_rtnl_is_held()); set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + queue_work(system_power_efficient_wq, &pl->resolve); flush_work(&pl->resolve); - - netif_carrier_off(pl->netdev); } static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 5cb5384697ea..7ae815bee52d 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -359,7 +359,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream); void sfp_unregister_upstream(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->sfp) + sfp_unregister_bus(bus); bus->upstream = NULL; bus->netdev = NULL; rtnl_unlock(); @@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket); void sfp_unregister_socket(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->netdev) + sfp_unregister_bus(bus); bus->sfp_dev = NULL; bus->sfp = NULL; bus->socket_ops = NULL; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index baee371bf767..3165bc7b8e1e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -318,12 +318,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp) msleep(T_PHY_RESET_MS); phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); - if (IS_ERR(phy)) { - dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + if (phy == ERR_PTR(-ENODEV)) { + dev_info(sfp->dev, "no PHY detected\n"); return; } - if (!phy) { - dev_info(sfp->dev, "no PHY detected\n"); + if (IS_ERR(phy)) { + dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); return; } @@ -358,7 +358,7 @@ static void sfp_sm_link_check_los(struct sfp *sfp) * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume * the same as SFP_OPTIONS_LOS_NORMAL set. */ - if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) + if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_LOS_INVERTED)) los ^= SFP_F_LOS; if (los) @@ -583,7 +583,8 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) if (event == SFP_E_TX_FAULT) sfp_sm_fault(sfp, true); else if (event == - (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + (sfp->id.ext.options & + cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) ? SFP_E_LOS_HIGH : SFP_E_LOS_LOW)) sfp_sm_link_up(sfp); break; @@ -593,7 +594,8 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) sfp_sm_link_down(sfp); sfp_sm_fault(sfp, true); } else if (event == - (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + (sfp->id.ext.options & + cpu_to_be16(SFP_OPTIONS_LOS_INVERTED) ? SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) { sfp_sm_link_down(sfp); sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); @@ -665,20 +667,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); len -= first; - ret = sfp->read(sfp, false, first, data, len); + ret = sfp_read(sfp, false, first, data, len); if (ret < 0) return ret; first += len; data += len; } - if (first >= ETH_MODULE_SFF_8079_LEN && - first < ETH_MODULE_SFF_8472_LEN) { + if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) { len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); len -= first; first -= ETH_MODULE_SFF_8079_LEN; - ret = sfp->read(sfp, true, first, data, len); + ret = sfp_read(sfp, true, first, data, len); if (ret < 0) return ret; } diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig index 1373c6d7278d..282aec4860eb 100644 --- a/drivers/net/ppp/Kconfig +++ b/drivers/net/ppp/Kconfig @@ -149,6 +149,23 @@ config PPPOL2TP tunnels. L2TP is replacing PPTP for VPN uses. if TTY +config PPPOLAC + tristate "PPP on L2TP Access Concentrator" + depends on PPP && INET + help + L2TP (RFC 2661) is a tunneling protocol widely used in virtual private + networks. This driver handles L2TP data packets between a UDP socket + and a PPP channel, but only permits one session per socket. Thus it is + fairly simple and suited for clients. + +config PPPOPNS + tristate "PPP on PPTP Network Server" + depends on PPP && INET + help + PPTP (RFC 2637) is a tunneling protocol widely used in virtual private + networks. This driver handles PPTP data packets between a RAW socket + and a PPP channel. It is fairly simple and easy to use. + config PPP_ASYNC tristate "PPP support for async serial ports" depends on PPP diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile index 16c457d6b324..879e1663dd48 100644 --- a/drivers/net/ppp/Makefile +++ b/drivers/net/ppp/Makefile @@ -12,3 +12,5 @@ obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o obj-$(CONFIG_PPPOE) += pppox.o pppoe.o obj-$(CONFIG_PPPOL2TP) += pppox.o obj-$(CONFIG_PPTP) += pppox.o pptp.o +obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o +obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e365866600ba..34b24d7e1e2f 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -256,7 +256,7 @@ struct ppp_net { /* Prototypes. */ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg); -static void ppp_xmit_process(struct ppp *ppp); +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); static void ppp_push(struct ppp *ppp); static void ppp_channel_push(struct channel *pch); @@ -512,13 +512,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, goto out; } - skb_queue_tail(&pf->xq, skb); - switch (pf->kind) { case INTERFACE: - ppp_xmit_process(PF_TO_PPP(pf)); + ppp_xmit_process(PF_TO_PPP(pf), skb); break; case CHANNEL: + skb_queue_tail(&pf->xq, skb); ppp_channel_push(PF_TO_CHANNEL(pf)); break; } @@ -959,6 +958,7 @@ static __net_exit void ppp_exit_net(struct net *net) unregister_netdevice_many(&list); rtnl_unlock(); + mutex_destroy(&pn->all_ppp_mutex); idr_destroy(&pn->units_idr); } @@ -1002,17 +1002,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) if (!ifname_is_set) snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); + mutex_unlock(&pn->all_ppp_mutex); + ret = register_netdevice(ppp->dev); if (ret < 0) goto err_unit; atomic_inc(&ppp_unit_count); - mutex_unlock(&pn->all_ppp_mutex); - return 0; err_unit: + mutex_lock(&pn->all_ppp_mutex); unit_put(&pn->units_idr, ppp->file.index); err: mutex_unlock(&pn->all_ppp_mutex); @@ -1262,8 +1263,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) put_unaligned_be16(proto, pp); skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); - skb_queue_tail(&ppp->file.xq, skb); - ppp_xmit_process(ppp); + ppp_xmit_process(ppp, skb); + return NETDEV_TX_OK; outf: @@ -1415,13 +1416,14 @@ static void ppp_setup(struct net_device *dev) */ /* Called to do any work queued up on the transmit side that can now be done */ -static void __ppp_xmit_process(struct ppp *ppp) +static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) { - struct sk_buff *skb; - ppp_xmit_lock(ppp); if (!ppp->closing) { ppp_push(ppp); + + if (skb) + skb_queue_tail(&ppp->file.xq, skb); while (!ppp->xmit_pending && (skb = skb_dequeue(&ppp->file.xq))) ppp_send_frame(ppp, skb); @@ -1435,7 +1437,7 @@ static void __ppp_xmit_process(struct ppp *ppp) ppp_xmit_unlock(ppp); } -static void ppp_xmit_process(struct ppp *ppp) +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) { local_bh_disable(); @@ -1443,7 +1445,7 @@ static void ppp_xmit_process(struct ppp *ppp) goto err; (*this_cpu_ptr(ppp->xmit_recursion))++; - __ppp_xmit_process(ppp); + __ppp_xmit_process(ppp, skb); (*this_cpu_ptr(ppp->xmit_recursion))--; local_bh_enable(); @@ -1453,6 +1455,8 @@ static void ppp_xmit_process(struct ppp *ppp) err: local_bh_enable(); + kfree_skb(skb); + if (net_ratelimit()) netdev_err(ppp->dev, "recursion detected\n"); } @@ -1937,7 +1941,7 @@ static void __ppp_channel_push(struct channel *pch) if (skb_queue_empty(&pch->file.xq)) { ppp = pch->ppp; if (ppp) - __ppp_xmit_process(ppp); + __ppp_xmit_process(ppp, NULL); } } @@ -3156,6 +3160,15 @@ ppp_connect_channel(struct channel *pch, int unit) goto outl; ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 4e1da1645b15..71e2aef6b7a1 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppox)) + goto end; + if (sp->sa_protocol != PX_PROTO_OE) goto end; @@ -842,6 +846,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, struct pppoe_hdr *ph; struct net_device *dev; char *start; + int hlen; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { @@ -860,16 +865,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, if (total_len > (dev->mtu + dev->hard_header_len)) goto end; - - skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, - 0, GFP_KERNEL); + hlen = LL_RESERVED_SPACE(dev); + skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len + + dev->needed_tailroom, 0, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto end; } /* Reserve space for headers. */ - skb_reserve(skb, dev->hard_header_len); + skb_reserve(skb, hlen); skb_reset_network_header(skb); skb->dev = dev; @@ -930,7 +935,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) /* Copy the data if there is no space for the header or if it's * read-only. */ - if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) + if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph))) goto abort; __skb_push(skb, sizeof(*ph)); diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c new file mode 100644 index 000000000000..95cc6be54ad5 --- /dev/null +++ b/drivers/net/ppp/pppolac.c @@ -0,0 +1,450 @@ +/* drivers/net/pppolac.c + * + * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661) + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This driver handles L2TP data packets between a UDP socket and a PPP channel. + * The socket must keep connected, and only one session per socket is permitted. + * Sequencing of outgoing packets is controlled by LNS. Incoming packets with + * sequences are reordered within a sliding window of one second. Currently + * reordering only happens when a packet is received. It is done for simplicity + * since no additional locks or threads are required. This driver only works on + * IPv4 due to the lack of UDP encapsulation support in IPv6. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define L2TP_CONTROL_BIT 0x80 +#define L2TP_LENGTH_BIT 0x40 +#define L2TP_SEQUENCE_BIT 0x08 +#define L2TP_OFFSET_BIT 0x02 +#define L2TP_VERSION 0x02 +#define L2TP_VERSION_MASK 0x0F + +#define PPP_ADDR 0xFF +#define PPP_CTRL 0x03 + +union unaligned { + __u32 u32; +} __attribute__((packed)); + +static inline union unaligned *unaligned(void *ptr) +{ + return (union unaligned *)ptr; +} + +struct meta { + __u32 sequence; + __u32 timestamp; +}; + +static inline struct meta *skb_meta(struct sk_buff *skb) +{ + return (struct meta *)skb->cb; +} + +/******************************************************************************/ + +static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *)sk_udp->sk_user_data; + struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac; + struct meta *meta = skb_meta(skb); + __u32 now = jiffies; + __u8 bits; + __u8 *ptr; + + /* Drop the packet if L2TP header is missing. */ + if (skb->len < sizeof(struct udphdr) + 6) + goto drop; + + /* Put it back if it is a control packet. */ + if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT) + return opt->backlog_rcv(sk_udp, skb); + + /* Skip UDP header. */ + skb_pull(skb, sizeof(struct udphdr)); + + /* Check the version. */ + if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION) + goto drop; + bits = skb->data[0]; + ptr = &skb->data[2]; + + /* Check the length if it is present. */ + if (bits & L2TP_LENGTH_BIT) { + if ((ptr[0] << 8 | ptr[1]) != skb->len) + goto drop; + ptr += 2; + } + + /* Skip all fields including optional ones. */ + if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) + + (bits & L2TP_LENGTH_BIT ? 2 : 0) + + (bits & L2TP_OFFSET_BIT ? 2 : 0))) + goto drop; + + /* Skip the offset padding if it is present. */ + if (bits & L2TP_OFFSET_BIT && + !skb_pull(skb, skb->data[-2] << 8 | skb->data[-1])) + goto drop; + + /* Check the tunnel and the session. */ + if (unaligned(ptr)->u32 != opt->local) + goto drop; + + /* Check the sequence if it is present. */ + if (bits & L2TP_SEQUENCE_BIT) { + meta->sequence = ptr[4] << 8 | ptr[5]; + if ((__s16)(meta->sequence - opt->recv_sequence) < 0) + goto drop; + } + + /* Skip PPP address and control if they are present. */ + if (skb->len >= 2 && skb->data[0] == PPP_ADDR && + skb->data[1] == PPP_CTRL) + skb_pull(skb, 2); + + /* Fix PPP protocol if it is compressed. */ + if (skb->len >= 1 && skb->data[0] & 1) + *(u8 *)skb_push(skb, 1) = 0; + + /* Drop the packet if PPP protocol is missing. */ + if (skb->len < 2) + goto drop; + + /* Perform reordering if sequencing is enabled. */ + atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT); + if (bits & L2TP_SEQUENCE_BIT) { + struct sk_buff *skb1; + + /* Insert the packet into receive queue in order. */ + skb_set_owner_r(skb, sk); + skb_queue_walk(&sk->sk_receive_queue, skb1) { + struct meta *meta1 = skb_meta(skb1); + __s16 order = meta->sequence - meta1->sequence; + if (order == 0) + goto drop; + if (order < 0) { + meta->timestamp = meta1->timestamp; + skb_insert(skb1, skb, &sk->sk_receive_queue); + skb = NULL; + break; + } + } + if (skb) { + meta->timestamp = now; + skb_queue_tail(&sk->sk_receive_queue, skb); + } + + /* Remove packets from receive queue as long as + * 1. the receive buffer is full, + * 2. they are queued longer than one second, or + * 3. there are no missing packets before them. */ + skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { + meta = skb_meta(skb); + if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && + now - meta->timestamp < HZ && + meta->sequence != opt->recv_sequence) + break; + skb_unlink(skb, &sk->sk_receive_queue); + opt->recv_sequence = (__u16)(meta->sequence + 1); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + } + return NET_RX_SUCCESS; + } + + /* Flush receive queue if sequencing is disabled. */ + skb_queue_purge(&sk->sk_receive_queue); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + return NET_RX_SUCCESS; +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb) +{ + sock_hold(sk_udp); + sk_receive_skb(sk_udp, skb, 0); + return 0; +} + +static struct sk_buff_head delivery_queue; + +static void pppolac_xmit_core(struct work_struct *delivery_work) +{ + mm_segment_t old_fs = get_fs(); + struct sk_buff *skb; + + set_fs(KERNEL_DS); + while ((skb = skb_dequeue(&delivery_queue))) { + struct sock *sk_udp = skb->sk; + struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; + struct msghdr msg = { + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, + skb->len); + sk_udp->sk_prot->sendmsg(sk_udp, &msg, skb->len); + kfree_skb(skb); + } + set_fs(old_fs); +} + +static DECLARE_WORK(delivery_work, pppolac_xmit_core); + +static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk_udp = (struct sock *)chan->private; + struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac; + + /* Install PPP address and control. */ + skb_push(skb, 2); + skb->data[0] = PPP_ADDR; + skb->data[1] = PPP_CTRL; + + /* Install L2TP header. */ + if (atomic_read(&opt->sequencing)) { + skb_push(skb, 10); + skb->data[0] = L2TP_SEQUENCE_BIT; + skb->data[6] = opt->xmit_sequence >> 8; + skb->data[7] = opt->xmit_sequence; + skb->data[8] = 0; + skb->data[9] = 0; + opt->xmit_sequence++; + } else { + skb_push(skb, 6); + skb->data[0] = 0; + } + skb->data[1] = L2TP_VERSION; + unaligned(&skb->data[2])->u32 = opt->remote; + + /* Now send the packet via the delivery queue. */ + skb_set_owner_w(skb, sk_udp); + skb_queue_tail(&delivery_queue, skb); + schedule_work(&delivery_work); + return 1; +} + +/******************************************************************************/ + +static struct ppp_channel_ops pppolac_channel_ops = { + .start_xmit = pppolac_xmit, +}; + +static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr; + struct socket *sock_udp = NULL; + struct sock *sk_udp; + int error; + + if (addrlen != sizeof(struct sockaddr_pppolac) || + !addr->local.tunnel || !addr->local.session || + !addr->remote.tunnel || !addr->remote.session) { + return -EINVAL; + } + + lock_sock(sk); + error = -EALREADY; + if (sk->sk_state != PPPOX_NONE) + goto out; + + sock_udp = sockfd_lookup(addr->udp_socket, &error); + if (!sock_udp) + goto out; + sk_udp = sock_udp->sk; + lock_sock(sk_udp); + + /* Remove this check when IPv6 supports UDP encapsulation. */ + error = -EAFNOSUPPORT; + if (sk_udp->sk_family != AF_INET) + goto out; + error = -EPROTONOSUPPORT; + if (sk_udp->sk_protocol != IPPROTO_UDP) + goto out; + error = -EDESTADDRREQ; + if (sk_udp->sk_state != TCP_ESTABLISHED) + goto out; + error = -EBUSY; + if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data) + goto out; + if (!sk_udp->sk_bound_dev_if) { + struct dst_entry *dst = sk_dst_get(sk_udp); + error = -ENODEV; + if (!dst) + goto out; + sk_udp->sk_bound_dev_if = dst->dev->ifindex; + dst_release(dst); + } + + po->chan.hdrlen = 12; + po->chan.private = sk_udp; + po->chan.ops = &pppolac_channel_ops; + po->chan.mtu = PPP_MRU - 80; + po->proto.lac.local = unaligned(&addr->local)->u32; + po->proto.lac.remote = unaligned(&addr->remote)->u32; + atomic_set(&po->proto.lac.sequencing, 1); + po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv; + + error = ppp_register_channel(&po->chan); + if (error) + goto out; + + sk->sk_state = PPPOX_CONNECTED; + udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP; + udp_sk(sk_udp)->encap_rcv = pppolac_recv; + sk_udp->sk_backlog_rcv = pppolac_recv_core; + sk_udp->sk_user_data = sk; +out: + if (sock_udp) { + release_sock(sk_udp); + if (error) + sockfd_put(sock_udp); + } + release_sock(sk); + return error; +} + +static int pppolac_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + if (sk->sk_state != PPPOX_NONE) { + struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private; + lock_sock(sk_udp); + skb_queue_purge(&sk->sk_receive_queue); + pppox_unbind_sock(sk); + udp_sk(sk_udp)->encap_type = 0; + udp_sk(sk_udp)->encap_rcv = NULL; + sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv; + sk_udp->sk_user_data = NULL; + release_sock(sk_udp); + sockfd_put(sk_udp->sk_socket); + } + + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + sock_put(sk); + return 0; +} + +/******************************************************************************/ + +static struct proto pppolac_proto = { + .name = "PPPOLAC", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static struct proto_ops pppolac_proto_ops = { + .family = PF_PPPOX, + .owner = THIS_MODULE, + .release = pppolac_release, + .bind = sock_no_bind, + .connect = pppolac_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = sock_no_poll, + .ioctl = pppox_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, +}; + +static int pppolac_create(struct net *net, struct socket *sock, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sock->state = SS_UNCONNECTED; + sock->ops = &pppolac_proto_ops; + sk->sk_protocol = PX_PROTO_OLAC; + sk->sk_state = PPPOX_NONE; + return 0; +} + +/******************************************************************************/ + +static struct pppox_proto pppolac_pppox_proto = { + .create = pppolac_create, + .owner = THIS_MODULE, +}; + +static int __init pppolac_init(void) +{ + int error; + + error = proto_register(&pppolac_proto, 0); + if (error) + return error; + + error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto); + if (error) + proto_unregister(&pppolac_proto); + else + skb_queue_head_init(&delivery_queue); + return error; +} + +static void __exit pppolac_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OLAC); + proto_unregister(&pppolac_proto); +} + +module_init(pppolac_init); +module_exit(pppolac_exit); + +MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)"); +MODULE_AUTHOR("Chia-chi Yeh "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c new file mode 100644 index 000000000000..a01a07152c86 --- /dev/null +++ b/drivers/net/ppp/pppopns.c @@ -0,0 +1,429 @@ +/* drivers/net/pppopns.c + * + * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637) + * + * Copyright (C) 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This driver handles PPTP data packets between a RAW socket and a PPP channel. + * The socket is created in the kernel space and connected to the same address + * of the control socket. Outgoing packets are always sent with sequences but + * without acknowledgements. Incoming packets with sequences are reordered + * within a sliding window of one second. Currently reordering only happens when + * a packet is received. It is done for simplicity since no additional locks or + * threads are required. This driver should work on both IPv4 and IPv6. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GRE_HEADER_SIZE 8 + +#define PPTP_GRE_BITS htons(0x2001) +#define PPTP_GRE_BITS_MASK htons(0xEF7F) +#define PPTP_GRE_SEQ_BIT htons(0x1000) +#define PPTP_GRE_ACK_BIT htons(0x0080) +#define PPTP_GRE_TYPE htons(0x880B) + +#define PPP_ADDR 0xFF +#define PPP_CTRL 0x03 + +struct header { + __u16 bits; + __u16 type; + __u16 length; + __u16 call; + __u32 sequence; +} __attribute__((packed)); + +struct meta { + __u32 sequence; + __u32 timestamp; +}; + +static inline struct meta *skb_meta(struct sk_buff *skb) +{ + return (struct meta *)skb->cb; +} + +/******************************************************************************/ + +static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *)sk_raw->sk_user_data; + struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns; + struct meta *meta = skb_meta(skb); + __u32 now = jiffies; + struct header *hdr; + + /* Skip transport header */ + skb_pull(skb, skb_transport_header(skb) - skb->data); + + /* Drop the packet if GRE header is missing. */ + if (skb->len < GRE_HEADER_SIZE) + goto drop; + hdr = (struct header *)skb->data; + + /* Check the header. */ + if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local || + (hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS) + goto drop; + + /* Skip all fields including optional ones. */ + if (!skb_pull(skb, GRE_HEADER_SIZE + + (hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) + + (hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0))) + goto drop; + + /* Check the length. */ + if (skb->len != ntohs(hdr->length)) + goto drop; + + /* Check the sequence if it is present. */ + if (hdr->bits & PPTP_GRE_SEQ_BIT) { + meta->sequence = ntohl(hdr->sequence); + if ((__s32)(meta->sequence - opt->recv_sequence) < 0) + goto drop; + } + + /* Skip PPP address and control if they are present. */ + if (skb->len >= 2 && skb->data[0] == PPP_ADDR && + skb->data[1] == PPP_CTRL) + skb_pull(skb, 2); + + /* Fix PPP protocol if it is compressed. */ + if (skb->len >= 1 && skb->data[0] & 1) + *(u8 *)skb_push(skb, 1) = 0; + + /* Drop the packet if PPP protocol is missing. */ + if (skb->len < 2) + goto drop; + + /* Perform reordering if sequencing is enabled. */ + if (hdr->bits & PPTP_GRE_SEQ_BIT) { + struct sk_buff *skb1; + + /* Insert the packet into receive queue in order. */ + skb_set_owner_r(skb, sk); + skb_queue_walk(&sk->sk_receive_queue, skb1) { + struct meta *meta1 = skb_meta(skb1); + __s32 order = meta->sequence - meta1->sequence; + if (order == 0) + goto drop; + if (order < 0) { + meta->timestamp = meta1->timestamp; + skb_insert(skb1, skb, &sk->sk_receive_queue); + skb = NULL; + break; + } + } + if (skb) { + meta->timestamp = now; + skb_queue_tail(&sk->sk_receive_queue, skb); + } + + /* Remove packets from receive queue as long as + * 1. the receive buffer is full, + * 2. they are queued longer than one second, or + * 3. there are no missing packets before them. */ + skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) { + meta = skb_meta(skb); + if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && + now - meta->timestamp < HZ && + meta->sequence != opt->recv_sequence) + break; + skb_unlink(skb, &sk->sk_receive_queue); + opt->recv_sequence = meta->sequence + 1; + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + } + return NET_RX_SUCCESS; + } + + /* Flush receive queue if sequencing is disabled. */ + skb_queue_purge(&sk->sk_receive_queue); + skb_orphan(skb); + ppp_input(&pppox_sk(sk)->chan, skb); + return NET_RX_SUCCESS; +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static void pppopns_recv(struct sock *sk_raw) +{ + struct sk_buff *skb; + while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) { + sock_hold(sk_raw); + sk_receive_skb(sk_raw, skb, 0); + } +} + +static struct sk_buff_head delivery_queue; + +static void pppopns_xmit_core(struct work_struct *delivery_work) +{ + mm_segment_t old_fs = get_fs(); + struct sk_buff *skb; + + set_fs(KERNEL_DS); + while ((skb = skb_dequeue(&delivery_queue))) { + struct sock *sk_raw = skb->sk; + struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len}; + struct msghdr msg = { + .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT, + }; + + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, + skb->len); + sk_raw->sk_prot->sendmsg(sk_raw, &msg, skb->len); + kfree_skb(skb); + } + set_fs(old_fs); +} + +static DECLARE_WORK(delivery_work, pppopns_xmit_core); + +static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk_raw = (struct sock *)chan->private; + struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns; + struct header *hdr; + __u16 length; + + /* Install PPP address and control. */ + skb_push(skb, 2); + skb->data[0] = PPP_ADDR; + skb->data[1] = PPP_CTRL; + length = skb->len; + + /* Install PPTP GRE header. */ + hdr = (struct header *)skb_push(skb, 12); + hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT; + hdr->type = PPTP_GRE_TYPE; + hdr->length = htons(length); + hdr->call = opt->remote; + hdr->sequence = htonl(opt->xmit_sequence); + opt->xmit_sequence++; + + /* Now send the packet via the delivery queue. */ + skb_set_owner_w(skb, sk_raw); + skb_queue_tail(&delivery_queue, skb); + schedule_work(&delivery_work); + return 1; +} + +/******************************************************************************/ + +static struct ppp_channel_ops pppopns_channel_ops = { + .start_xmit = pppopns_xmit, +}; + +static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr, + int addrlen, int flags) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po = pppox_sk(sk); + struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr; + struct sockaddr_storage ss; + struct socket *sock_tcp = NULL; + struct socket *sock_raw = NULL; + struct sock *sk_tcp; + struct sock *sk_raw; + int error; + + if (addrlen != sizeof(struct sockaddr_pppopns)) + return -EINVAL; + + lock_sock(sk); + error = -EALREADY; + if (sk->sk_state != PPPOX_NONE) + goto out; + + sock_tcp = sockfd_lookup(addr->tcp_socket, &error); + if (!sock_tcp) + goto out; + sk_tcp = sock_tcp->sk; + error = -EPROTONOSUPPORT; + if (sk_tcp->sk_protocol != IPPROTO_TCP) + goto out; + addrlen = sizeof(struct sockaddr_storage); + error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen); + if (error) + goto out; + if (!sk_tcp->sk_bound_dev_if) { + struct dst_entry *dst = sk_dst_get(sk_tcp); + error = -ENODEV; + if (!dst) + goto out; + sk_tcp->sk_bound_dev_if = dst->dev->ifindex; + dst_release(dst); + } + + error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw); + if (error) + goto out; + sk_raw = sock_raw->sk; + sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if; + error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0); + if (error) + goto out; + + po->chan.hdrlen = 14; + po->chan.private = sk_raw; + po->chan.ops = &pppopns_channel_ops; + po->chan.mtu = PPP_MRU - 80; + po->proto.pns.local = addr->local; + po->proto.pns.remote = addr->remote; + po->proto.pns.data_ready = sk_raw->sk_data_ready; + po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv; + + error = ppp_register_channel(&po->chan); + if (error) + goto out; + + sk->sk_state = PPPOX_CONNECTED; + lock_sock(sk_raw); + sk_raw->sk_data_ready = pppopns_recv; + sk_raw->sk_backlog_rcv = pppopns_recv_core; + sk_raw->sk_user_data = sk; + release_sock(sk_raw); +out: + if (sock_tcp) + sockfd_put(sock_tcp); + if (error && sock_raw) + sock_release(sock_raw); + release_sock(sk); + return error; +} + +static int pppopns_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + if (sk->sk_state != PPPOX_NONE) { + struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private; + lock_sock(sk_raw); + skb_queue_purge(&sk->sk_receive_queue); + pppox_unbind_sock(sk); + sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready; + sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv; + sk_raw->sk_user_data = NULL; + release_sock(sk_raw); + sock_release(sk_raw->sk_socket); + } + + sock_orphan(sk); + sock->sk = NULL; + release_sock(sk); + sock_put(sk); + return 0; +} + +/******************************************************************************/ + +static struct proto pppopns_proto = { + .name = "PPPOPNS", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static struct proto_ops pppopns_proto_ops = { + .family = PF_PPPOX, + .owner = THIS_MODULE, + .release = pppopns_release, + .bind = sock_no_bind, + .connect = pppopns_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = sock_no_getname, + .poll = sock_no_poll, + .ioctl = pppox_ioctl, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, +}; + +static int pppopns_create(struct net *net, struct socket *sock, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + sock->state = SS_UNCONNECTED; + sock->ops = &pppopns_proto_ops; + sk->sk_protocol = PX_PROTO_OPNS; + sk->sk_state = PPPOX_NONE; + return 0; +} + +/******************************************************************************/ + +static struct pppox_proto pppopns_pppox_proto = { + .create = pppopns_create, + .owner = THIS_MODULE, +}; + +static int __init pppopns_init(void) +{ + int error; + + error = proto_register(&pppopns_proto, 0); + if (error) + return error; + + error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto); + if (error) + proto_unregister(&pppopns_proto); + else + skb_queue_head_init(&delivery_queue); + return error; +} + +static void __exit pppopns_exit(void) +{ + unregister_pppox_proto(PX_PROTO_OPNS); + proto_unregister(&pppopns_proto); +} + +module_init(pppopns_init); +module_exit(pppopns_exit); + +MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)"); +MODULE_AUTHOR("Chia-chi Yeh "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 6dde9a0cfe76..9b70a3af678e 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -464,7 +464,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; - ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c index 5782733959f0..f4e93f5fc204 100644 --- a/drivers/net/slip/slhc.c +++ b/drivers/net/slip/slhc.c @@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize) if(x < 0 || x > comp->rslot_limit) goto bad; + /* Check if the cstate is initialized */ + if (!comp->rstate[x].initialized) + goto bad; + comp->flags &=~ SLF_TOSS; comp->recv_current = x; } else { @@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize) if (cs->cs_tcp.doff > 5) memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4); cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2; + cs->initialized = true; /* Put headers back on packet * Neither header checksum is recalculated */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6c0c84c33e1f..773a3fea8f0e 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -777,13 +777,16 @@ static ssize_t tap_put_user(struct tap_queue *q, int total; if (q->flags & IFF_VNET_HDR) { + int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; struct virtio_net_hdr vnet_hdr; + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &vnet_hdr, - tap_is_little_endian(q), true)) + tap_is_little_endian(q), true, + vlan_hlen)) BUG(); if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != @@ -829,8 +832,11 @@ static ssize_t tap_do_read(struct tap_queue *q, DEFINE_WAIT(wait); ssize_t ret = 0; - if (!iov_iter_count(to)) + if (!iov_iter_count(to)) { + if (skb) + kfree_skb(skb); return 0; + } if (skb) goto put; @@ -1077,7 +1083,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN)) + TUN_F_TSO_ECN | TUN_F_UFO)) return -EINVAL; rtnl_lock(); @@ -1154,11 +1160,14 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); + struct sk_buff *skb = m->msg_control; int ret; - if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) + if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { + if (skb) + kfree_skb(skb); return -EINVAL; - ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, - m->msg_control); + } + ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ae53e899259f..83c591713837 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } +static bool __team_option_inst_tmp_find(const struct list_head *opts, + const struct team_option_inst *needle) +{ + struct team_option_inst *opt_inst; + + list_for_each_entry(opt_inst, opts, tmp_list) + if (opt_inst == needle) + return true; + return false; +} + static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -993,7 +1004,8 @@ static void team_port_disable(struct team *team, static void __team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t vlan_features = TEAM_VLAN_FEATURES & + NETIF_F_ALL_FOR_ALL; netdev_features_t enc_features = TEAM_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | @@ -1061,14 +1073,11 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int __team_port_enable_netpoll(struct team_port *port) { struct netpoll *np; int err; - if (!team->dev->npinfo) - return 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; @@ -1082,6 +1091,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port) return err; } +static int team_port_enable_netpoll(struct team_port *port) +{ + if (!port->team->dev->npinfo) + return 0; + + return __team_port_enable_netpoll(port); +} + static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; @@ -1096,7 +1113,7 @@ static void team_port_disable_netpoll(struct team_port *port) kfree(np); } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team_port *port) { return 0; } @@ -1197,11 +1214,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_dev_open; } - netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); - err = vlan_vids_add_by_dev(port_dev, dev); if (err) { netdev_err(dev, "Failed to add vlan ids to device %s\n", @@ -1209,7 +1221,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_vids_add; } - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(port); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1241,6 +1253,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_option_port_add; } + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); @@ -1265,8 +1282,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) vlan_vids_del_by_dev(port_dev, dev); err_vids_add: - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); dev_close(port_dev); err_dev_open: @@ -1903,7 +1918,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = __team_port_enable_netpoll(port); if (err) { __team_netpoll_cleanup(team); break; @@ -2394,7 +2409,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } @@ -2563,6 +2578,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; + + /* dumb/evil user-space can send us duplicate opt, + * keep only the last one + */ + if (__team_option_inst_tmp_find(&opt_inst_list, + opt_inst)) + continue; + list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { @@ -2680,7 +2703,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42bb820a56c9..8f8701682bd5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -534,6 +534,14 @@ static void tun_queue_purge(struct tun_file *tfile) skb_queue_purge(&tfile->sk.sk_error_queue); } +static void tun_cleanup_tx_array(struct tun_file *tfile) +{ + if (tfile->tx_array.ring.queue) { + skb_array_cleanup(&tfile->tx_array); + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + } +} + static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; @@ -575,8 +583,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - if (tun) - skb_array_cleanup(&tfile->tx_array); + tun_cleanup_tx_array(tfile); sock_put(&tfile->sk); } } @@ -616,11 +623,13 @@ static void tun_detach_all(struct net_device *dev) /* Drop read queue */ tun_queue_purge(tfile); sock_put(&tfile->sk); + tun_cleanup_tx_array(tfile); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); sock_put(&tfile->sk); + tun_cleanup_tx_array(tfile); } BUG_ON(tun->numdisabled != 0); @@ -1306,6 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, else *skb_xdp = 0; + local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@ -1324,8 +1334,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, get_page(alloc_frag->page); alloc_frag->offset += buflen; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + xdp_do_flush_map(); if (err) goto err_redirect; + rcu_read_unlock(); + local_bh_enable(); return NULL; case XDP_TX: xdp_xmit = true; @@ -1347,6 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); + local_bh_enable(); return ERR_PTR(-ENOMEM); } @@ -1358,11 +1372,13 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, if (xdp_xmit) { skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); - rcu_read_lock(); + rcu_read_unlock(); + local_bh_enable(); return NULL; } rcu_read_unlock(); + local_bh_enable(); return skb; @@ -1370,6 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); + local_bh_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -1539,16 +1556,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct bpf_prog *xdp_prog; int ret; + local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { ret = do_xdp_generic(xdp_prog, skb); if (ret != XDP_PASS) { rcu_read_unlock(); + local_bh_enable(); return total_len; } } rcu_read_unlock(); + local_bh_enable(); } rxhash = __skb_get_hash_symmetric(skb); @@ -1628,7 +1648,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, - tun_is_little_endian(tun), true)) { + tun_is_little_endian(tun), true, + vlan_hlen)) { struct skb_shared_info *sinfo = skb_shinfo(skb); pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", @@ -1734,8 +1755,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, tun_debug(KERN_INFO, tun, "tun_do_read\n"); - if (!iov_iter_count(to)) + if (!iov_iter_count(to)) { + if (skb) + kfree_skb(skb); return 0; + } if (!skb) { /* Read frames from ring */ @@ -1851,22 +1875,24 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = __tun_get(tfile); + struct sk_buff *skb = m->msg_control; int ret; - if (!tun) - return -EBADFD; + if (!tun) { + ret = -EBADFD; + goto out_free_skb; + } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; - goto out; + goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, - m->msg_control); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -1874,6 +1900,13 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, out: tun_put(tun); return ret; + +out_put_tun: + tun_put(tun); +out_free_skb: + if (skb) + kfree_skb(skb); + return ret; } static int tun_peek_len(struct socket *sock) @@ -2144,6 +2177,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } + + arg &= ~TUN_F_UFO; } /* This gives the user a way to test for new features in future by @@ -2252,6 +2287,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, int le; int ret; +#ifdef CONFIG_ANDROID_PARANOID_NETWORK + if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) { + return -EPERM; + } +#endif + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; @@ -2609,6 +2650,8 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); + memset(&tfile->tx_array, 0, sizeof(tfile->tx_array)); + return 0; } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 3d4f7959dabb..b1b3d8f7e67d 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev) priv->presvd_phy_advertise); /* Restore BMCR */ + if (priv->presvd_phy_bmcr & BMCR_ANENABLE) + priv->presvd_phy_bmcr |= BMCR_ANRESTART; + asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, priv->presvd_phy_bmcr); - mii_nway_restart(&dev->mii); priv->presvd_phy_advertise = 0; priv->presvd_phy_bmcr = 0; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..178b956501a7 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -895,6 +895,12 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, +}, { + /* Cinterion AHS3 modem by GEMALTO */ + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, }, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 7220cd620717..0362acd5cdca 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = { */ static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { .description = "CDC MBIM", - .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, .bind = cdc_mbim_bind, .unbind = cdc_mbim_unbind, .manage_power = cdc_mbim_manage_power, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 47cab1bde065..f5316ab68a0a 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ int err; u8 iface_no; struct usb_cdc_parsed_header hdr; - u16 curr_ntb_format; + __le16 curr_ntb_format; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } - if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) { + if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) { dev_info(&intf->dev, "resetting NTB format to 16-bit"); err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS | USB_DIR_OUT @@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * accordingly. Otherwise, we should check here. */ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) - delayed_ndp_size = ctx->max_ndp_size; + delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); else delayed_ndp_size = 0; @@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) /* If requested, put NDP at end of frame. */ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; - cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size); + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size); nth16->wNdpIndex = cpu_to_le16(skb_out->len); skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0161f77641fa..9e3f632e22f1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -928,7 +928,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, offset += 0x100; else ret = -EINVAL; - ret = lan78xx_read_raw_otp(dev, offset, length, data); + if (!ret) + ret = lan78xx_read_raw_otp(dev, offset, length, data); } return ret; @@ -1215,6 +1216,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } + + tasklet_schedule(&dev->bh); } return ret; @@ -2082,10 +2085,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) dev->fc_autoneg = phydev->autoneg; - phy_start(phydev); - - netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); - return 0; error: @@ -2351,6 +2350,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) u32 buf; int ret = 0; unsigned long timeout; + u8 sig; ret = lan78xx_read_reg(dev, HW_CFG, &buf); buf |= HW_CFG_LRST_; @@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; dev->rx_qlen = 4; + dev->tx_qlen = 4; } ret = lan78xx_write_reg(dev, BURST_CAP, buf); @@ -2449,6 +2450,15 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; + + if (dev->chipid == ID_REV_CHIP_ID_7800_) { + ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); + if (!ret && sig != EEPROM_INDICATOR) { + /* Implies there is no external eeprom. Set mac speed */ + netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); + buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; + } + } ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_read_reg(dev, MAC_TX, &buf); @@ -2511,9 +2521,9 @@ static int lan78xx_open(struct net_device *net) if (ret < 0) goto done; - ret = lan78xx_phy_init(dev); - if (ret < 0) - goto done; + phy_start(net->phydev); + + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); /* for Link Check */ if (dev->urb_intr) { @@ -2574,13 +2584,8 @@ static int lan78xx_stop(struct net_device *net) if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); - - phy_stop(net->phydev); - phy_disconnect(net->phydev); - - net->phydev = NULL; + if (net->phydev) + phy_stop(net->phydev); clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue(net); @@ -2862,8 +2867,7 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) if (ret < 0) { netdev_warn(dev->net, "lan78xx_setup_irq_domain() failed : %d", ret); - kfree(pdata); - return ret; + goto out1; } dev->net->hard_header_len += TX_OVERHEAD; @@ -2871,14 +2875,32 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) /* Init all registers */ ret = lan78xx_reset(dev); + if (ret) { + netdev_warn(dev->net, "Registers INIT FAILED...."); + goto out2; + } ret = lan78xx_mdio_init(dev); + if (ret) { + netdev_warn(dev->net, "MDIO INIT FAILED....."); + goto out2; + } dev->net->flags |= IFF_MULTICAST; pdata->wol = WAKE_MAGIC; return ret; + +out2: + lan78xx_remove_irq_domain(dev); + +out1: + netdev_warn(dev->net, "Bind routine FAILED"); + cancel_work_sync(&pdata->set_multicast); + cancel_work_sync(&pdata->set_vlan); + kfree(pdata); + return ret; } static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) @@ -2890,6 +2912,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) lan78xx_remove_mdio(dev); if (pdata) { + cancel_work_sync(&pdata->set_multicast); + cancel_work_sync(&pdata->set_vlan); netif_dbg(dev, ifdown, dev->net, "free pdata"); kfree(pdata); pdata = NULL; @@ -3175,6 +3199,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) pkt_cnt = 0; count = 0; length = 0; + spin_lock_irqsave(&tqp->lock, flags); for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { if (skb_is_gso(skb)) { if (pkt_cnt) { @@ -3183,7 +3208,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) } count = 1; length = skb->len - TX_OVERHEAD; - skb2 = skb_dequeue(tqp); + __skb_unlink(skb, tqp); + spin_unlock_irqrestore(&tqp->lock, flags); goto gso_skb; } @@ -3192,6 +3218,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); pkt_cnt++; } + spin_unlock_irqrestore(&tqp->lock, flags); /* copy to a single skb */ skb = alloc_skb(skb_totallen, GFP_ATOMIC); @@ -3476,8 +3503,13 @@ static void lan78xx_disconnect(struct usb_interface *intf) return; udev = interface_to_usbdev(intf); - net = dev->net; + + phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); + phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); + + phy_disconnect(net->phydev); + unregister_netdev(net); cancel_delayed_work_sync(&dev->wq); @@ -3637,8 +3669,14 @@ static int lan78xx_probe(struct usb_interface *intf, pm_runtime_set_autosuspend_delay(&udev->dev, DEFAULT_AUTOSUSPEND_DELAY); + ret = lan78xx_phy_init(dev); + if (ret < 0) + goto out4; + return 0; +out4: + unregister_netdev(netdev); out3: lan78xx_unbind(dev, intf); out2: @@ -3986,7 +4024,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf) lan78xx_reset(dev); - lan78xx_phy_init(dev); + phy_start(dev->net->phydev); return lan78xx_resume(intf); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8d4a6f7cba61..6d3811c869fd 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -261,9 +261,11 @@ static void qmi_wwan_netdev_setup(struct net_device *net) net->hard_header_len = 0; net->addr_len = 0; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + set_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: raw IP\n"); } else if (!net->header_ops) { /* don't bother if already set */ ether_setup(net); + clear_bit(EVENT_NO_IP_ALIGN, &dev->flags); netdev_dbg(net, "mode: Ethernet\n"); } @@ -824,7 +826,7 @@ static int qmi_wwan_resume(struct usb_interface *intf) static const struct driver_info qmi_wwan_info = { .description = "WWAN/QMI device", - .flags = FLAG_WWAN, + .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, @@ -833,7 +835,7 @@ static const struct driver_info qmi_wwan_info = { static const struct driver_info qmi_wwan_info_quirk_dtr = { .description = "WWAN/QMI device", - .flags = FLAG_WWAN, + .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = qmi_wwan_bind, .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, @@ -1096,11 +1098,17 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, + {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, + {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, + {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1177,6 +1185,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ @@ -1202,12 +1211,14 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ + {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ @@ -1234,11 +1245,16 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ @@ -1330,6 +1346,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, id->driver_info = (unsigned long)&qmi_wwan_info; } + /* There are devices where the same interface number can be + * configured as different functions. We should only bind to + * vendor specific functions when matching on interface number + */ + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { + dev_dbg(&intf->dev, + "Rejecting interface number match for class %02x\n", + desc->bInterfaceClass); + return -ENODEV; + } + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d51d9abf7986..0fa64cc1a011 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1793,7 +1793,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) tx_data += len; agg->skb_len += len; - agg->skb_num++; + agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); @@ -3959,7 +3959,8 @@ static int rtl8152_close(struct net_device *netdev) #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif - napi_disable(&tp->napi); + if (!test_bit(RTL8152_UNPLUG, &tp->flags)) + napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index d0a113743195..7a6a1fe79309 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev, /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - if (ret < 0) + if (ret < 0) { netdev_warn(dev->net, "Error writing RFE_CTL\n"); - - return ret; + return ret; + } + return 0; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 6510e5cc1817..32fc69539126 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + unsigned long flags; int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { @@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) if (skb->protocol == 0) skb->protocol = eth_type_trans (skb, dev->net); - u64_stats_update_begin(&stats64->syncp); + flags = u64_stats_update_begin_irqsave(&stats64->syncp); stats64->rx_packets++; stats64->rx_bytes += skb->len; - u64_stats_update_end(&stats64->syncp); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof (struct ethhdr), skb->protocol); @@ -484,7 +485,10 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) return -ENOLINK; } - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); + if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags)) + skb = __netdev_alloc_skb(dev->net, size, flags); + else + skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); if (!skb) { netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); usbnet_defer_kevent (dev, EVENT_RX_MEMORY); @@ -1247,11 +1251,12 @@ static void tx_complete (struct urb *urb) if (urb->status == 0) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + unsigned long flags; - u64_stats_update_begin(&stats64->syncp); + flags = u64_stats_update_begin_irqsave(&stats64->syncp); stats64->tx_packets += entry->packets; stats64->tx_bytes += entry->length; - u64_stats_update_end(&stats64->syncp); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); } else { dev->net->stats.tx_errors++; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f5438d0978ca..a69ad39ee57e 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; + peer->gso_max_size = dev->gso_max_size; + peer->gso_max_segs = dev->gso_max_segs; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 511f8339fa96..910c46b47769 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -116,6 +116,17 @@ struct receive_queue { char name[40]; }; +/* Control VQ buffers: protected by the rtnl lock */ +struct control_buf { + struct virtio_net_ctrl_hdr hdr; + virtio_net_ctrl_ack status; + struct virtio_net_ctrl_mq mq; + u8 promisc; + u8 allmulti; + __virtio16 vid; + u64 offloads; +}; + struct virtnet_info { struct virtio_device *vdev; struct virtqueue *cvq; @@ -164,14 +175,7 @@ struct virtnet_info { struct hlist_node node; struct hlist_node node_dead; - /* Control VQ buffers: protected by the rtnl lock */ - struct virtio_net_ctrl_hdr ctrl_hdr; - virtio_net_ctrl_ack ctrl_status; - struct virtio_net_ctrl_mq ctrl_mq; - u8 ctrl_promisc; - u8 ctrl_allmulti; - u16 ctrl_vid; - u64 ctrl_offloads; + struct control_buf *ctrl; /* Ethtool settings */ u8 duplex; @@ -260,9 +264,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi, int opaque; opaque = virtqueue_enable_cb_prepare(vq); - if (napi_complete_done(napi, processed) && - unlikely(virtqueue_poll(vq, opaque))) - virtqueue_napi_schedule(napi, vq); + if (napi_complete_done(napi, processed)) { + if (unlikely(virtqueue_poll(vq, opaque))) + virtqueue_napi_schedule(napi, vq); + } else { + virtqueue_disable_cb(vq); + } } static void skb_xmit_done(struct virtqueue *vq) @@ -506,7 +513,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *orig_data; u32 act; - if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) + if (unlikely(hdr->hdr.gso_type)) goto err_xdp; if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { @@ -625,6 +632,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *data; u32 act; + /* Transient failure which in theory could occur if + * in-flight packets from before XDP was enabled reach + * the receive path after XDP is loaded. + */ + if (unlikely(hdr->hdr.gso_type)) + goto err_xdp; + /* This happens when rx buffer size is underestimated */ if (unlikely(num_buf > 1 || headroom < virtnet_get_headroom(vi))) { @@ -640,14 +654,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp_page = page; } - /* Transient failure which in theory could occur if - * in-flight packets from before XDP was enabled reach - * the receive path after XDP is loaded. In practice I - * was not able to create this condition. - */ - if (unlikely(hdr->hdr.gso_type)) - goto err_xdp; - /* Allow consuming headroom but reserve enough space to push * the descriptor on if we get an XDP_TX return code. */ @@ -681,7 +687,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) - goto err_xdp; + put_page(page); rcu_read_unlock(); goto xdp_xmit; default: @@ -714,7 +720,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int num_skb_frags; buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx); - if (unlikely(!ctx)) { + if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", dev->name, num_buf, virtio16_to_cpu(vi->vdev, @@ -770,7 +776,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); err_skb: put_page(page); - while (--num_buf) { + while (num_buf-- > 1) { buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", @@ -1231,7 +1237,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr = skb_vnet_hdr(skb); if (virtio_net_hdr_from_skb(skb, &hdr->hdr, - virtio_is_little_endian(vi->vdev), false)) + virtio_is_little_endian(vi->vdev), false, + 0)) BUG(); if (vi->mergeable_rx_bufs) @@ -1337,25 +1344,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); - vi->ctrl_status = ~0; - vi->ctrl_hdr.class = class; - vi->ctrl_hdr.cmd = cmd; + vi->ctrl->status = ~0; + vi->ctrl->hdr.class = class; + vi->ctrl->hdr.cmd = cmd; /* Add header */ - sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); + sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; /* Add return status. */ - sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); + sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); if (unlikely(!virtqueue_kick(vi->cvq))) - return vi->ctrl_status == VIRTIO_NET_OK; + return vi->ctrl->status == VIRTIO_NET_OK; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. @@ -1364,7 +1371,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, !virtqueue_is_broken(vi->cvq)) cpu_relax(); - return vi->ctrl_status == VIRTIO_NET_OK; + return vi->ctrl->status == VIRTIO_NET_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) @@ -1475,8 +1482,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); + vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -1534,22 +1541,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; - vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); - vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); + vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); - sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); + sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - vi->ctrl_promisc ? "en" : "dis"); + vi->ctrl->promisc ? "en" : "dis"); - sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); + sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - vi->ctrl_allmulti ? "en" : "dis"); + vi->ctrl->allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); @@ -1595,8 +1602,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - vi->ctrl_vid = vid; - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -1610,8 +1617,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - vi->ctrl_vid = vid; - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -1909,9 +1916,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { struct scatterlist sg; - vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); + vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); - sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); + sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { @@ -1995,8 +2002,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - for (i = 0; i < vi->max_queue_pairs; i++) - napi_disable(&vi->rq[i].napi); + if (netif_running(dev)) + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); @@ -2015,7 +2023,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } if (old_prog) bpf_prog_put(old_prog); - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (netif_running(dev)) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; @@ -2129,6 +2138,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) kfree(vi->rq); kfree(vi->sq); + kfree(vi->ctrl); } static void _free_receive_bufs(struct virtnet_info *vi) @@ -2321,6 +2331,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) { int i; + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); + if (!vi->ctrl) + goto err_ctrl; vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); if (!vi->sq) goto err_sq; @@ -2346,6 +2359,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) err_rq: kfree(vi->sq); err_sq: + kfree(vi->ctrl); +err_ctrl: return -ENOMEM; } @@ -2640,8 +2655,8 @@ static int virtnet_probe(struct virtio_device *vdev) /* Assume link up if device can't report link status, otherwise get link status from config. */ + netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { - netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d1c7029ded7c..3628fd7e606f 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { + /* Prevent any &gdesc->tcd field from being (speculatively) + * read before (&gdesc->tcd)->gen is read. + */ + dma_rmb(); + completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( &gdesc->tcd), tq, adapter->pdev, adapter); @@ -1099,6 +1104,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.tci = skb_vlan_tag_get(skb); } + /* Ensure that the write to (&gdesc->txd)->gen will be observed after + * all other writes to &gdesc->txd. + */ + dma_wmb(); + /* finally flips the GEN bit of the SOP desc. */ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ VMXNET3_TXD_GEN); @@ -1286,6 +1296,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, */ break; } + + /* Prevent any rcd field from being (speculatively) read before + * rcd->gen is read. + */ + dma_rmb(); + BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && rcd->rqID != rq->dataRingQid); idx = rcd->rxdIdx; @@ -1515,6 +1531,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ring->next2comp = idx; num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); ring = rq->rx_ring + ring_idx; + + /* Ensure that the writes to rxd->gen bits will be observed + * after all other writes to rxd objects. + */ + dma_wmb(); + while (num_to_alloc) { vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, &rxCmdDesc); @@ -1616,7 +1638,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, rq->rx_ring[i].basePA); rq->rx_ring[i].base = NULL; } - rq->buf_info[i] = NULL; } if (rq->data_ring.base) { @@ -1638,6 +1659,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, (rq->rx_ring[0].size + rq->rx_ring[1].size); dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0], rq->buf_info_pa); + rq->buf_info[0] = rq->buf_info[1] = NULL; } } @@ -2675,7 +2697,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p) /* ==================== initialization and cleanup routines ============ */ static int -vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) +vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter) { int err; unsigned long mmio_start, mmio_len; @@ -2687,30 +2709,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) return err; } - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } - *dma64 = true; - } else { - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - dev_err(&pdev->dev, - "pci_set_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } - *dma64 = false; - } - err = pci_request_selected_regions(pdev, (1 << 2) - 1, vmxnet3_driver_name); if (err) { dev_err(&pdev->dev, "Failed to request region for adapter: error %d\n", err); - goto err_set_mask; + goto err_enable_device; } pci_set_master(pdev); @@ -2738,7 +2742,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) iounmap(adapter->hw_addr0); err_ioremap: pci_release_selected_regions(pdev, (1 << 2) - 1); -err_set_mask: +err_enable_device: pci_disable_device(pdev); return err; } @@ -3243,7 +3247,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, #endif }; int err; - bool dma64 = false; /* stupid gcc */ + bool dma64; u32 ver; struct net_device *netdev; struct vmxnet3_adapter *adapter; @@ -3289,6 +3293,24 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); + err = -EIO; + goto err_set_mask; + } + dma64 = true; + } else { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); + err = -EIO; + goto err_set_mask; + } + dma64 = false; + } + spin_lock_init(&adapter->cmd_lock); adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, sizeof(struct vmxnet3_adapter), @@ -3296,7 +3318,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { dev_err(&pdev->dev, "Failed to map dma\n"); err = -EFAULT; - goto err_dma_map; + goto err_set_mask; } adapter->shared = dma_alloc_coherent( &adapter->pdev->dev, @@ -3347,7 +3369,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, } #endif /* VMXNET3_RSS */ - err = vmxnet3_alloc_pci_resources(adapter, &dma64); + err = vmxnet3_alloc_pci_resources(adapter); if (err < 0) goto err_alloc_pci; @@ -3493,7 +3515,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, err_alloc_shared: dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); -err_dma_map: +err_set_mask: free_netdev(netdev); return err; } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7dc3bcac3506..5c6a8ef54aec 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -579,12 +579,13 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s if (!IS_ERR(neigh)) { sock_confirm_neigh(skb, neigh); ret = neigh_output(neigh, skb); + rcu_read_unlock_bh(); + return ret; } rcu_read_unlock_bh(); err: - if (unlikely(ret < 0)) - vrf_tx_error(skb->dev, skb); + vrf_tx_error(skb->dev, skb); return ret; } @@ -674,8 +675,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, struct sock *sk, struct sk_buff *skb) { - /* don't divert multicast */ - if (ipv4_is_multicast(ip_hdr(skb)->daddr)) + /* don't divert multicast or local broadcast */ + if (ipv4_is_multicast(ip_hdr(skb)->daddr) || + ipv4_is_lbcast(ip_hdr(skb)->daddr)) return skb; if (qdisc_tx_is_default(vrf_dev)) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d7c49cf1d5e9..13d39a72fe0d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, flush = 0; out: - skb_gro_remcsum_cleanup(skb, &grc); - skb->remcsum_offload = 0; - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final_remcsum(skb, pp, flush, &grc); return pp; } @@ -638,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -/* Add new entry to forwarding table -- assumes lock held */ +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, + const u8 *mac, __u16 state, + __be32 src_vni, __u8 ndm_flags) +{ + struct vxlan_fdb *f; + + f = kmalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + f->state = state; + f->flags = ndm_flags; + f->updated = f->used = jiffies; + f->vni = src_vni; + INIT_LIST_HEAD(&f->remotes); + memcpy(f->eth_addr, mac, ETH_ALEN); + + return f; +} + static int vxlan_fdb_create(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __be16 port, __be32 src_vni, + __be32 vni, __u32 ifindex, __u8 ndm_flags, + struct vxlan_fdb **fdb) +{ + struct vxlan_rdst *rd = NULL; + struct vxlan_fdb *f; + int rc; + + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) + return -ENOSPC; + + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); + if (!f) + return -ENOMEM; + + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) { + kfree(f); + return rc; + } + + ++vxlan->addrcnt; + hlist_add_head_rcu(&f->hlist, + vxlan_fdb_head(vxlan, mac, src_vni)); + + *fdb = f; + + return 0; +} + +/* Add new entry to forwarding table -- assumes lock held */ +static int vxlan_fdb_update(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, @@ -689,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->cfg.addrmax && - vxlan->addrcnt >= vxlan->cfg.addrmax) - return -ENOSPC; - /* Disallow replace to add a multicast entry */ if ((flags & NLM_F_REPLACE) && (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) return -EOPNOTSUPP; netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - f = kmalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return -ENOMEM; - - notify = 1; - f->state = state; - f->flags = ndm_flags; - f->updated = f->used = jiffies; - f->vni = src_vni; - INIT_LIST_HEAD(&f->remotes); - memcpy(f->eth_addr, mac, ETH_ALEN); - - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - if (rc < 0) { - kfree(f); + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, + vni, ifindex, ndm_flags, &f); + if (rc < 0) return rc; - } - - ++vxlan->addrcnt; - hlist_add_head_rcu(&f->hlist, - vxlan_fdb_head(vxlan, mac, src_vni)); + notify = 1; } if (notify) { @@ -743,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head) kfree(f); } -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, + bool do_notify) { netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); + if (do_notify) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -865,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EAFNOSUPPORT; spin_lock_bh(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags); spin_unlock_bh(&vxlan->hash_lock); @@ -899,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, goto out; } - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); out: return 0; @@ -1008,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev, /* close off race between vxlan_flush and incoming packets */ if (netif_running(dev)) - vxlan_fdb_create(vxlan, src_mac, src_ip, + vxlan_fdb_update(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, vxlan->cfg.dst_port, @@ -1623,26 +1656,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct nd_msg *msg; - const struct ipv6hdr *iphdr; const struct in6_addr *daddr; - struct neighbour *n; + const struct ipv6hdr *iphdr; struct inet6_dev *in6_dev; + struct neighbour *n; + struct nd_msg *msg; in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; - if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) - goto out; - iphdr = ipv6_hdr(skb); daddr = &iphdr->daddr; - msg = (struct nd_msg *)(iphdr + 1); - if (msg->icmph.icmp6_code != 0 || - msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) - goto out; if (ipv6_addr_loopback(daddr) || ipv6_addr_is_multicast(&msg->target)) @@ -2162,6 +2188,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ndst = &rt->dst; + if (skb_dst(skb)) { + int mtu = dst_mtu(ndst) - VXLAN_HEADROOM; + + skb_dst_update_pmtu(skb, mtu); + } + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), @@ -2197,6 +2229,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } + if (skb_dst(skb)) { + int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM; + + skb_dst_update_pmtu(skb, mtu); + } + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); @@ -2240,11 +2278,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *rdst, *fdst = NULL; const struct ip_tunnel_info *info; - struct ethhdr *eth; bool did_rsc = false; - struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + struct ethhdr *eth; __be32 vni = 0; info = skb_tunnel_info(skb); @@ -2269,12 +2307,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) if (ntohs(eth->h_proto) == ETH_P_ARP) return arp_reduce(dev, skb, vni); #if IS_ENABLED(CONFIG_IPV6) - else if (ntohs(eth->h_proto) == ETH_P_IPV6) { - struct ipv6hdr *hdr, _hdr; - if ((hdr = skb_header_pointer(skb, - skb_network_offset(skb), - sizeof(_hdr), &_hdr)) && - hdr->nexthdr == IPPROTO_ICMPV6) + else if (ntohs(eth->h_proto) == ETH_P_IPV6 && + pskb_may_pull(skb, sizeof(struct ipv6hdr) + + sizeof(struct nd_msg)) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1); + + if (m->icmph.icmp6_code == 0 && + m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb, vni); } #endif @@ -2355,7 +2395,7 @@ static void vxlan_cleanup(unsigned long arg) "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } else if (time_before(timeout, next_timer)) next_timer = timeout; } @@ -2406,7 +2446,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); spin_unlock_bh(&vxlan->hash_lock); } @@ -2460,7 +2500,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f); + vxlan_fdb_destroy(vxlan, f, true); } } spin_unlock_bh(&vxlan->hash_lock); @@ -3110,6 +3150,11 @@ static void vxlan_config_apply(struct net_device *dev, max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + if (max_mtu < ETH_MIN_MTU) + max_mtu = ETH_MIN_MTU; + + if (!changelink && !conf->mtu) + dev->mtu = max_mtu; } if (dev->mtu > max_mtu) @@ -3147,6 +3192,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f = NULL; int err; err = vxlan_dev_configure(net, dev, conf, false, extack); @@ -3160,24 +3206,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, err = vxlan_fdb_create(vxlan, all_zeros_mac, &vxlan->default_dst.remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_EXCL | NLM_F_CREATE, vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_vni, vxlan->default_dst.remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) return err; } err = register_netdevice(dev); + if (err) + goto errout; + + err = rtnl_configure_link(dev, NULL); if (err) { - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); - return err; + unregister_netdevice(dev); + goto errout; } + /* notify default fdb entry */ + if (f) + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); + list_add(&vxlan->next, &vn->vxlan_list); return 0; +errout: + if (f) + vxlan_fdb_destroy(vxlan, f, false); + return err; } static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], @@ -3406,6 +3463,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct vxlan_rdst *dst = &vxlan->default_dst; struct vxlan_rdst old_dst; struct vxlan_config conf; + struct vxlan_fdb *f = NULL; int err; err = vxlan_nl2conf(tb, data, @@ -3434,16 +3492,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], err = vxlan_fdb_create(vxlan, all_zeros_mac, &dst->remote_ip, NUD_REACHABLE | NUD_PERMANENT, - NLM_F_CREATE | NLM_F_APPEND, vxlan->cfg.dst_port, dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF); + NTF_SELF, &f); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); } spin_unlock_bh(&vxlan->hash_lock); } diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0d2e00ece804..f3c1d5245978 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; - } else + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 166920ae23f8..a19fb7b6dc07 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -114,4 +114,6 @@ config USB_NET_RNDIS_WLAN If you choose to build a module, it'll be called rndis_wlan. +source "drivers/net/wireless/bcmdhd/Kconfig" + endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 7fc96306712a..c6c15878aecc 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -27,3 +27,5 @@ obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o + +obj-$(CONFIG_BCMDHD) += bcmdhd/ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 5683f1a5330e..df11bb449988 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2553,7 +2553,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } break; case WMI_VDEV_TYPE_STA: - if (vif->bss_conf.qos) + if (sta->wme) arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; break; case WMI_VDEV_TYPE_IBSS: @@ -5955,9 +5955,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) sta->addr, smps, err); } - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || - changed & IEEE80211_RC_NSS_CHANGED) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", sta->addr); err = ath10k_station_assoc(ar, arvif->vif, sta, true); @@ -6183,6 +6182,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); + if (sta->tdls) { + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, + sta, + WMI_TDLS_PEER_STATE_TEARDOWN); + if (ret) + ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", + sta->addr, + WMI_TDLS_PEER_STATE_TEARDOWN, ret); + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", @@ -7050,10 +7059,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_peer *peer; u32 bw, smps; spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", + sta->addr, arvif->vdev_id); + return; + } + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->bandwidth, sta->rx_nss, @@ -7801,6 +7820,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { .max_interfaces = 8, .num_different_channels = 1, .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, #ifdef CONFIG_ATH10K_DFS_CERTIFIED .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | @@ -7924,6 +7944,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { .max_interfaces = 16, .num_different_channels = 1, .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, #ifdef CONFIG_ATH10K_DFS_CERTIFIED .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 195dafb98131..d790ea20b95d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -2577,9 +2577,13 @@ void ath10k_pci_hif_power_down(struct ath10k *ar) */ } -#ifdef CONFIG_PM - static int ath10k_pci_hif_suspend(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver suspend. */ + return 0; +} + +static int ath10k_pci_suspend(struct ath10k *ar) { /* The grace timer can still be counting down and ar->ps_awake be true. * It is known that the device may be asleep after resuming regardless @@ -2592,6 +2596,12 @@ static int ath10k_pci_hif_suspend(struct ath10k *ar) } static int ath10k_pci_hif_resume(struct ath10k *ar) +{ + /* Nothing to do; the important stuff is in the driver resume. */ + return 0; +} + +static int ath10k_pci_resume(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; @@ -2615,7 +2625,6 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) return ret; } -#endif static bool ath10k_pci_validate_cal(void *data, size_t size) { @@ -2770,10 +2779,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .power_down = ath10k_pci_hif_power_down, .read32 = ath10k_pci_read32, .write32 = ath10k_pci_write32, -#ifdef CONFIG_PM .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, -#endif .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; @@ -3401,11 +3408,7 @@ static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) struct ath10k *ar = dev_get_drvdata(dev); int ret; - if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->running_fw->fw_file.fw_features)) - return 0; - - ret = ath10k_hif_suspend(ar); + ret = ath10k_pci_suspend(ar); if (ret) ath10k_warn(ar, "failed to suspend hif: %d\n", ret); @@ -3417,11 +3420,7 @@ static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) struct ath10k *ar = dev_get_drvdata(dev); int ret; - if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, - ar->running_fw->fw_file.fw_features)) - return 0; - - ret = ath10k_hif_resume(ar); + ret = ath10k_pci_resume(ar); if (ret) ath10k_warn(ar, "failed to resume hif: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 7a3606dde227..bab876cf25fe 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -5235,7 +5235,8 @@ enum wmi_10_4_vdev_param { #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) #define WMI_TXBF_STS_CAP_OFFSET_LSB 4 -#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) #define WMI_BF_SOUND_DIM_OFFSET_LSB 8 #define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index f0439f2d566b..173891b11b2d 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -1112,7 +1112,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, if (!avp->assoc) return false; - skb = ieee80211_nullfunc_get(sc->hw, vif); + skb = ieee80211_nullfunc_get(sc->hw, vif, false); if (!skb) return false; diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 5e77fe1f5b0d..a41bcbda1d9e 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv) { int i = 0; int ret = 0; + struct rchan_buf *buf; struct rchan *rc = spec_priv->rfs_chan_spec_scan; - for_each_online_cpu(i) - ret += relay_buf_full(*per_cpu_ptr(rc->buf, i)); - - i = num_online_cpus(); + for_each_possible_cpu(i) { + if ((buf = *per_cpu_ptr(rc->buf, i))) { + ret += relay_buf_full(buf); + } + } - if (ret == i) + if (ret) return 1; else return 0; diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 49ed1afb913c..fe3a8263b224 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, ssize_t len; int r; + if (count < 1) + return -EINVAL; + if (sc->cur_chan->nvifs > 1) return -EOPNOTSUPP; @@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, if (copy_from_user(buf, user_buf, len)) return -EFAULT; + buf[len] = '\0'; + if (strtobool(buf, &start)) return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 396bf05c6bf6..d8b041f48ca8 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2892,6 +2892,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) struct ath_txq *txq; int tidno; + rcu_read_lock(); + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); txq = tid->txq; @@ -2909,6 +2911,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) if (!an->sta) break; /* just one multicast ath_atx_tid */ } + + rcu_read_unlock(); } #ifdef CONFIG_ATH9K_TX99 diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 5d80be213fac..869f276cc1d8 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -68,12 +68,14 @@ enum CountryCode { CTRY_AUSTRALIA = 36, CTRY_AUSTRIA = 40, CTRY_AZERBAIJAN = 31, + CTRY_BAHAMAS = 44, CTRY_BAHRAIN = 48, CTRY_BANGLADESH = 50, CTRY_BARBADOS = 52, CTRY_BELARUS = 112, CTRY_BELGIUM = 56, CTRY_BELIZE = 84, + CTRY_BERMUDA = 60, CTRY_BOLIVIA = 68, CTRY_BOSNIA_HERZ = 70, CTRY_BRAZIL = 76, @@ -159,6 +161,7 @@ enum CountryCode { CTRY_ROMANIA = 642, CTRY_RUSSIA = 643, CTRY_SAUDI_ARABIA = 682, + CTRY_SERBIA = 688, CTRY_SERBIA_MONTENEGRO = 891, CTRY_SINGAPORE = 702, CTRY_SLOVAKIA = 703, @@ -170,11 +173,13 @@ enum CountryCode { CTRY_SWITZERLAND = 756, CTRY_SYRIA = 760, CTRY_TAIWAN = 158, + CTRY_TANZANIA = 834, CTRY_THAILAND = 764, CTRY_TRINIDAD_Y_TOBAGO = 780, CTRY_TUNISIA = 788, CTRY_TURKEY = 792, CTRY_UAE = 784, + CTRY_UGANDA = 800, CTRY_UKRAINE = 804, CTRY_UNITED_KINGDOM = 826, CTRY_UNITED_STATES = 840, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index bdd2b4d61f2f..15bbd1e0d912 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -35,6 +35,7 @@ enum EnumRd { FRANCE_RES = 0x31, FCC3_FCCA = 0x3A, FCC3_WORLD = 0x3B, + FCC3_ETSIC = 0x3F, ETSI1_WORLD = 0x37, ETSI3_ETSIA = 0x32, @@ -44,6 +45,7 @@ enum EnumRd { ETSI4_ETSIC = 0x38, ETSI5_WORLD = 0x39, ETSI6_WORLD = 0x34, + ETSI8_WORLD = 0x3D, ETSI_RESERVED = 0x33, MKK1_MKKA = 0x40, @@ -59,6 +61,7 @@ enum EnumRd { MKK1_MKKA1 = 0x4A, MKK1_MKKA2 = 0x4B, MKK1_MKKC = 0x4C, + APL2_FCCA = 0x4D, APL3_FCCA = 0x50, APL1_WORLD = 0x52, @@ -67,6 +70,7 @@ enum EnumRd { APL1_ETSIC = 0x55, APL2_ETSIC = 0x56, APL5_WORLD = 0x58, + APL13_WORLD = 0x5A, APL6_WORLD = 0x5B, APL7_FCCA = 0x5C, APL8_WORLD = 0x5D, @@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, {FCC3_FCCA, CTL_FCC, CTL_FCC}, {FCC3_WORLD, CTL_FCC, CTL_ETSI}, + {FCC3_ETSIC, CTL_FCC, CTL_ETSI}, {FCC4_FCCA, CTL_FCC, CTL_FCC}, {FCC5_FCCA, CTL_FCC, CTL_FCC}, {FCC6_FCCA, CTL_FCC, CTL_FCC}, @@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, @@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC1_FCCA, CTL_FCC, CTL_FCC}, {APL1_WORLD, CTL_FCC, CTL_ETSI}, {APL2_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_FCCA, CTL_FCC, CTL_FCC}, {APL3_WORLD, CTL_FCC, CTL_ETSI}, {APL4_WORLD, CTL_FCC, CTL_ETSI}, {APL5_WORLD, CTL_FCC, CTL_ETSI}, + {APL13_WORLD, CTL_ETSI, CTL_ETSI}, {APL6_WORLD, CTL_ETSI, CTL_ETSI}, {APL8_WORLD, CTL_ETSI, CTL_ETSI}, {APL9_WORLD, CTL_ETSI, CTL_ETSI}, @@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, {CTRY_BAHRAIN, APL6_WORLD, "BH"}, {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, {CTRY_BARBADOS, FCC2_WORLD, "BB"}, @@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, {CTRY_BELIZE, APL1_ETSIC, "BZ"}, + {CTRY_BERMUDA, FCC3_FCCA, "BM"}, {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, {CTRY_BRAZIL, FCC3_WORLD, "BR"}, @@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_ROMANIA, NULL1_WORLD, "RO"}, {CTRY_RUSSIA, NULL1_WORLD, "RU"}, {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, + {CTRY_SERBIA, ETSI1_WORLD, "RS"}, {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, {CTRY_SINGAPORE, APL6_WORLD, "SG"}, {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, @@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, {CTRY_SYRIA, NULL1_WORLD, "SY"}, {CTRY_TAIWAN, APL3_FCCA, "TW"}, + {CTRY_TANZANIA, APL1_WORLD, "TZ"}, {CTRY_THAILAND, FCC3_WORLD, "TH"}, {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, {CTRY_TURKEY, ETSI3_WORLD, "TR"}, + {CTRY_UGANDA, FCC3_WORLD, "UG"}, {CTRY_UKRAINE, NULL1_WORLD, "UA"}, {CTRY_UAE, NULL1_WORLD, "AE"}, {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index b83f01d6e3dd..af37c19dbfd7 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) } } + if (changed & IEEE80211_CONF_CHANGE_PS) { + list_for_each_entry(tmp, &wcn->vif_list, list) { + vif = wcn36xx_priv_to_vif(tmp); + if (hw->conf.flags & IEEE80211_CONF_PS) { + if (vif->bss_conf.ps) /* ps allowed ? */ + wcn36xx_pmc_enter_bmps_state(wcn, vif); + } else { + wcn36xx_pmc_exit_bmps_state(wcn, vif); + } + } + } + mutex_unlock(&wcn->conf_mutex); return 0; @@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, vif_priv->dtim_period = bss_conf->dtim_period; } - if (changed & BSS_CHANGED_PS) { - wcn36xx_dbg(WCN36XX_DBG_MAC, - "mac bss PS set %d\n", - bss_conf->ps); - if (bss_conf->ps) { - wcn36xx_pmc_enter_bmps_state(wcn, vif); - } else { - wcn36xx_pmc_exit_bmps_state(wcn, vif); - } - } - if (changed & BSS_CHANGED_BSSID) { wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n", bss_conf->bssid); diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c index 589fe5f70971..1976b80c235f 100644 --- a/drivers/net/wireless/ath/wcn36xx/pmc.c +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c @@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn, struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); if (WCN36XX_BMPS != vif_priv->pw_state) { - wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n"); - return -EINVAL; + /* Unbalanced call or last BMPS enter failed */ + wcn36xx_dbg(WCN36XX_DBG_PMC, + "Not in BMPS mode, no need to exit\n"); + return -EALREADY; } wcn36xx_smd_exit_bmps(wcn, vif); vif_priv->pw_state = WCN36XX_FULL_POWER; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index ffdd2fa401b1..d63d7c326801 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1380,8 +1380,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) }; int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; - struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); + struct wmi_set_appie_cmd *cmd; + if (len < ie_len) { + rc = -EINVAL; + goto out; + } + + cmd = kzalloc(len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; goto out; diff --git a/drivers/net/wireless/bcmdhd/Kconfig b/drivers/net/wireless/bcmdhd/Kconfig new file mode 100755 index 000000000000..8ecbfb248037 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/Kconfig @@ -0,0 +1,44 @@ +config BCMDHD + tristate "Broadcom FullMAC wireless cards support" + ---help--- + This module adds support for wireless adapters based on + Broadcom FullMAC chipset. + + If you choose to build a module, it'll be called dhd. Say M if + unsure. + +config BCMDHD_SDIO + bool "SDIO bus interface support" + depends on BCMDHD && MMC + +config BCMDHD_PCIE + bool "PCIe bus interface support" + depends on BCMDHD && PCI && !BCMDHD_SDIO + +config BCMDHD_FW_PATH + depends on BCMDHD + string "Firmware path" + default "/system/vendor/firmware/fw_bcmdhd.bin" + ---help--- + Path to the firmware file. + +config BCMDHD_NVRAM_PATH + depends on BCMDHD + string "NVRAM path" + default "/system/etc/wifi/bcmdhd.cal" + ---help--- + Path to the calibration file. + +config DHD_USE_STATIC_BUF + bool "Enable memory preallocation" + depends on BCMDHD + default n + ---help--- + Use memory preallocated in platform + +config DHD_USE_SCHED_SCAN + bool "Use CFG80211 sched scan" + depends on BCMDHD && CFG80211 + default n + ---help--- + Use CFG80211 sched scan diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile new file mode 100755 index 000000000000..02baabf6e503 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/Makefile @@ -0,0 +1,89 @@ +# bcmdhd +# +# +# +# +# + +DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \ + -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \ + -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DCUSTOMER_HW2 -DWLP2P \ + -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \ + -DKEEP_ALIVE -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \ + -DEMBEDDED_PLATFORM -DPNO_SUPPORT \ + -DDHD_USE_IDLECOUNT -DSET_RANDOM_MAC_SOFTAP -DROAM_ENABLE -DVSDB \ + -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST \ + -DESCAN_RESULT_PATCH -DSUPPORT_PM2_ONLY -DWLTDLS \ + -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \ + -DMIRACAST_AMPDU_SIZE=8 -DROAM_ENABLE -DWL_IFACE_COMB_NUM_CHANNELS \ + -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include + +DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \ + dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \ + dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \ + bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \ + hnd_pktq.o hnd_pktpool.o + +obj-$(CONFIG_BCMDHD) += bcmdhd.o +bcmdhd-objs += $(DHDOFILES) + +ifneq ($(CONFIG_CFG80211),) +bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o wl_cfg_btcoex.o +DHDCFLAGS += -DWL_CFG80211 -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF +DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65 +DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15 +DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000 +DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7 +endif +ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),) +DHDCFLAGS += -DWL_SCHED_SCAN +endif +EXTRA_CFLAGS = $(DHDCFLAGS) +ifeq ($(CONFIG_BCMDHD),m) +EXTRA_LDFLAGS += --strip-debug +else +DHDCFLAGS += -DENABLE_INSMOD_NO_FW_LOAD +endif + +DHDCFLAGS += -DCONFIG_DTS + +######################### +# Chip dependent feature +######################### +ifneq ($(CONFIG_BCM4339),) +DHDCFLAGS += -DCUSTOM_GLOM_SETTING=8 -DCUSTOM_RXCHAIN=1 +DHDCFLAGS += -DUSE_DYNAMIC_F2_BLKSIZE -DDYNAMIC_F2_BLKSIZE_FOR_NONLEGACY=128 +DHDCFLAGS += -DBCMSDIOH_TXGLOM -DCUSTOM_TXGLOM=1 -DBCMSDIOH_TXGLOM_HIGHSPEED +DHDCFLAGS += -DCUSTOM_SDIO_F2_BLKSIZE=512 +DHDCFLAGS += -DDHDTCPACK_SUPPRESS +DHDCFLAGS += -DUSE_WL_TXBF +DHDCFLAGS += -DUSE_WL_FRAMEBURST +DHDCFLAGS += -DCUSTOM_AMPDU_BA_WSIZE=32 +DHDCFLAGS += -DPROP_TXSTATUS_VSDB +DHDCFLAGS += -DCUSTOM_MAX_TXGLOM_SIZE=32 +DHDCFLAGS += -DREPEAT_READFRAME +DHDCFLAGS += -DROAM_AP_ENV_DETECTION +endif + +bcmdhd-$(CONFIG_BCMDHD_SDIO) += \ + bcmsdh.o \ + bcmsdh_linux.o \ + bcmsdh_sdmmc.o \ + bcmsdh_sdmmc_linux.o \ + dhd_sdio.o \ + dhd_cdc.o \ + dhd_wlfc.o +bcmdhd-$(CONFIG_BCMDHD_PCIE) += \ + dhd_pcie.o \ + dhd_pcie_linux.o \ + dhd_msgbuf.o \ + dhd_log.o \ + circularbuf.o \ + pcie_core.o \ + dhd_flowring.o +ccflags-$(CONFIG_BCMDHD_SDIO) += \ + -DSDTEST -DBDC -DDHD_BCMEVENTS -DPROP_TXSTATUS -DOOB_INTR_ONLY \ + -DHW_OOB -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DSDIO_CRC_ERROR_FIX \ + -DCUSTOM_SDIO_F2_BLKSIZE=128 -DUSE_SDIOFIFO_IOVAR +ccflags-$(CONFIG_BCMDHD_PCIE) += \ + -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 diff --git a/drivers/net/wireless/bcmdhd/aiutils.c b/drivers/net/wireless/bcmdhd/aiutils.c new file mode 100644 index 000000000000..70b2b6301ecb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/aiutils.c @@ -0,0 +1,1284 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: aiutils.c 607900 2015-12-22 13:38:53Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + +#define BCM47162_DMP() (0) +#define BCM5357_DMP() (0) +#define BCM53573_DMP() (0) +#define BCM4707_DMP() (0) +#define PMU_DMP() (0) +#define GCI_DMP() (0) +#define remap_coreid(sih, coreid) (coreid) +#define remap_corerev(sih, corerev) (corerev) + +/* EROM parsing */ + +static uint32 +get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) +{ + uint32 ent; + uint inv = 0, nom = 0; + uint32 size = 0; + + while (TRUE) { + ent = R_REG(si_osh(sih), *eromptr); + (*eromptr)++; + + if (mask == 0) + break; + + if ((ent & ER_VALID) == 0) { + inv++; + continue; + } + + if (ent == (ER_END | ER_VALID)) + break; + + if ((ent & mask) == match) + break; + + /* escape condition related EROM size if it has invalid values */ + size += sizeof(*eromptr); + if (size >= ER_SZ_MAX) { + SI_ERROR(("Failed to find end of EROM marker\n")); + break; + } + + nom++; + } + + SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); + if (inv + nom) { + SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); + } + return ent; +} + +static uint32 +get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, + uint32 *sizel, uint32 *sizeh) +{ + uint32 asd, sz, szd; + + asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); + if (((asd & ER_TAG1) != ER_ADD) || + (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || + ((asd & AD_ST_MASK) != st)) { + /* This is not what we want, "push" it back */ + (*eromptr)--; + return 0; + } + *addrl = asd & AD_ADDR_MASK; + if (asd & AD_AG32) + *addrh = get_erom_ent(sih, eromptr, 0, 0); + else + *addrh = 0; + *sizeh = 0; + sz = asd & AD_SZ_MASK; + if (sz == AD_SZ_SZD) { + szd = get_erom_ent(sih, eromptr, 0, 0); + *sizel = szd & SD_SZ_MASK; + if (szd & SD_SG32) + *sizeh = get_erom_ent(sih, eromptr, 0, 0); + } else + *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); + + SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", + sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); + + return asd; +} + +static void +ai_hwfixup(si_info_t *sii) +{ +} + + +/* parse the enumeration rom to identify all cores */ +void +ai_scan(si_t *sih, void *regs, uint devid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = (chipcregs_t *)regs; + uint32 erombase, *eromptr, *eromlim; + + erombase = R_REG(sii->osh, &cc->eromptr); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + break; + + case PCI_BUS: + /* Set wrappers address */ + sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); + + /* Now point the window at the erom */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); + eromptr = regs; + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + eromptr = (uint32 *)(uintptr)erombase; + break; +#endif /* BCMSDIO */ + + case PCMCIA_BUS: + default: + SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); + ASSERT(0); + return; + } + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", + regs, erombase, eromptr, eromlim)); + while (eromptr < eromlim) { + uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; + uint32 mpd, asd, addrl, addrh, sizel, sizeh; + uint i, j, idx; + bool br; + + br = FALSE; + + /* Grok a component */ + cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); + if (cia == (ER_END | ER_VALID)) { + SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); + ai_hwfixup(sii); + return; + } + + cib = get_erom_ent(sih, &eromptr, 0, 0); + + if ((cib & ER_TAG) != ER_CI) { + SI_ERROR(("CIA not followed by CIB\n")); + goto error; + } + + cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; + mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; + crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; + nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; + nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + +#ifdef BCMDBG_SI + SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " + "nsw = %d, nmp = %d & nsp = %d\n", + mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp)); +#else + BCM_REFERENCE(crev); +#endif + + if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) + continue; + if ((nmw + nsw == 0)) { + /* A component which is not a core */ + if (cid == OOB_ROUTER_CORE_ID) { + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, + &addrl, &addrh, &sizel, &sizeh); + if (asd != 0) { + sii->oob_router = addrl; + } + } + if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID && + cid != PMU_CORE_ID && cid != GCI_CORE_ID) + continue; + } + + idx = sii->numcores; + + cores_info->cia[idx] = cia; + cores_info->cib[idx] = cib; + cores_info->coreid[idx] = remap_coreid(sih, cid); + + for (i = 0; i < nmp; i++) { + mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + if ((mpd & ER_TAG) != ER_MP) { + SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); + goto error; + } + SI_VMSG((" Master port %d, mp: %d id: %d\n", i, + (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, + (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); + } + + /* First Slave Address Descriptor should be port 0: + * the main register space for the core + */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + do { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + if (asd != 0) + br = TRUE; + else { + if (br == TRUE) { + break; + } + else if ((addrh != 0) || (sizeh != 0) || + (sizel != SI_CORE_SIZE)) { + SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 =" + "0x%x\n", addrh, sizeh, sizel)); + SI_ERROR(("First Slave ASD for" + "core 0x%04x malformed " + "(0x%08x)\n", cid, asd)); + goto error; + } + } + } while (1); + } + cores_info->coresba[idx] = addrl; + cores_info->coresba_size[idx] = sizel; + /* Get any more ASDs in port 0 */ + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { + cores_info->coresba2[idx] = addrl; + cores_info->coresba2_size[idx] = sizel; + } + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + + if (asd == 0) + break; + j++; + } while (1); + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + goto error; + } + } + + /* Now get master wrappers */ + for (i = 0; i < nmw; i++) { + asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) { + SI_ERROR(("Missing descriptor for MW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Master wrapper %d is not 4KB\n", i)); + goto error; + } + if (i == 0) + cores_info->wrapba[idx] = addrl; + else if (i == 1) + cores_info->wrapba2[idx] = addrl; + } + + /* And finally slave wrappers */ + for (i = 0; i < nsw; i++) { + uint fwp = (nsp == 1) ? 0 : 1; + asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, + &sizel, &sizeh); + + /* cache APB bridge wrapper address for set/clear timeout */ + if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) { + ASSERT(sii->num_br < SI_MAXBR); + sii->br_wrapba[sii->num_br++] = addrl; + } + if (asd == 0) { + SI_ERROR(("Missing descriptor for SW %d\n", i)); + goto error; + } + if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { + SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); + goto error; + } + if ((nmw == 0) && (i == 0)) + cores_info->wrapba[idx] = addrl; + else if ((nmw == 0) && (i == 1)) + cores_info->wrapba2[idx] = addrl; + } + + + /* Don't record bridges */ + if (br) + continue; + + /* Done with core */ + sii->numcores++; + } + + SI_ERROR(("Reached end of erom without finding END")); + +error: + sii->numcores = 0; + return; +} + +#define AI_SETCOREIDX_MAPSIZE(coreid) \ + (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE) + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static void * +_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 addr, wrap, wrap2; + void *regs; + + if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) + return (NULL); + + addr = cores_info->coresba[coreidx]; + wrap = cores_info->wrapba[coreidx]; + wrap2 = cores_info->wrapba2[coreidx]; + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + switch (BUSTYPE(sih->bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(addr, + AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx])); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + sii->curmap = regs = cores_info->regs[coreidx]; + if (!cores_info->wrappers[coreidx] && (wrap != 0)) { + cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers[coreidx])); + } + if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) { + cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->wrappers2[coreidx])); + } + if (use_wrap2) + sii->curwrap = cores_info->wrappers2[coreidx]; + else + sii->curwrap = cores_info->wrappers[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); + regs = sii->curmap; + /* point bar0 2nd 4KB window to the primary wrapper */ + if (use_wrap2) + wrap = wrap2; + if (PCIE_GEN2(sii)) + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); + else + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap); + break; + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sii->curmap = regs = (void *)((uintptr)addr); + if (use_wrap2) + sii->curwrap = (void *)((uintptr)wrap2); + else + sii->curwrap = (void *)((uintptr)wrap); + break; +#endif /* BCMSDIO */ + + case PCMCIA_BUS: + default: + ASSERT(0); + regs = NULL; + break; + } + + sii->curmap = regs; + sii->curidx = coreidx; + + return regs; +} + +void * +ai_setcoreidx(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 0); +} + +void * +ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx) +{ + return _ai_setcoreidx(sih, coreidx, 1); +} + +void +ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc = NULL; + uint32 erombase, *eromptr, *eromlim; + uint i, j, cidx; + uint32 cia, cib, nmp, nsp; + uint32 asd, addrl, addrh, sizel, sizeh; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == CC_CORE_ID) { + cc = (chipcregs_t *)cores_info->regs[i]; + break; + } + } + if (cc == NULL) + goto error; + + erombase = R_REG(sii->osh, &cc->eromptr); + eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); + eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); + + cidx = sii->curidx; + cia = cores_info->cia[cidx]; + cib = cores_info->cib[cidx]; + + nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; + nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; + + /* scan for cores */ + while (eromptr < eromlim) { + if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) && + (get_erom_ent(sih, &eromptr, 0, 0) == cib)) { + break; + } + } + + /* skip master ports */ + for (i = 0; i < nmp; i++) + get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); + + /* Skip ASDs in port 0 */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); + if (asd == 0) { + /* Try again to see if it is a bridge */ + asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, + &sizel, &sizeh); + } + + j = 1; + do { + asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + j++; + } while (asd != 0); + + /* Go through the ASDs for other slave ports */ + for (i = 1; i < nsp; i++) { + j = 0; + do { + asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, + &sizel, &sizeh); + if (asd == 0) + break; + + if (!asidx--) { + *addr = addrl; + *size = sizel; + return; + } + j++; + } while (1); + + if (j == 0) { + SI_ERROR((" SP %d has no address descriptors\n", i)); + break; + } + } + +error: + *size = 0; + return; +} + +/* Return the number of address spaces in current core */ +int +ai_numaddrspaces(si_t *sih) +{ + return 2; +} + +/* Return the address of the nth address space in the current core */ +uint32 +ai_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + + if (asidx == 0) + return cores_info->coresba[cidx]; + else if (asidx == 1) + return cores_info->coresba2[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +/* Return the size of the nth address space in the current core */ +uint32 +ai_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint cidx; + + cidx = sii->curidx; + + if (asidx == 0) + return cores_info->coresba_size[cidx]; + else if (asidx == 1) + return cores_info->coresba2_size[cidx]; + else { + SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", + __FUNCTION__, asidx)); + return 0; + } +} + +uint +ai_flag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); + return sii->curidx; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); + return sii->curidx; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", + __FUNCTION__)); + return sii->curidx; + } + if (BCM53573_DMP()) { + SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__)); + return sii->curidx; + } +#ifdef REROUTE_OOBINT + if (PMU_DMP()) { + SI_ERROR(("%s: Attempting to read PMU DMP registers\n", + __FUNCTION__)); + return PMU_OOB_BIT; + } +#else + if (PMU_DMP()) { + uint idx, flag; + idx = sii->curidx; + ai_setcoreidx(sih, SI_CC_IDX); + flag = ai_flag_alt(sih); + ai_setcoreidx(sih, idx); + return flag; + } +#endif /* REROUTE_OOBINT */ + + ai = sii->curwrap; + ASSERT(ai != NULL); + + return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); +} + +uint +ai_flag_alt(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__)); + return sii->curidx; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__)); + return sii->curidx; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n", + __FUNCTION__)); + return sii->curidx; + } +#ifdef REROUTE_OOBINT + if (PMU_DMP()) { + SI_ERROR(("%s: Attempting to read PMU DMP registers\n", + __FUNCTION__)); + return PMU_OOB_BIT; + } +#endif /* REROUTE_OOBINT */ + + ai = sii->curwrap; + + return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK); +} + +void +ai_setint(si_t *sih, int siflag) +{ +} + +uint +ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + uint32 *map = (uint32 *) sii->curwrap; + + if (mask || val) { + uint32 w = R_REG(sii->osh, map+(offset/4)); + w &= ~mask; + w |= val; + W_REG(sii->osh, map+(offset/4), w); + } + + return (R_REG(sii->osh, map+(offset/4))); +} + +uint +ai_corevendor(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cia; + + cia = cores_info->cia[sii->curidx]; + return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); +} + +uint +ai_corerev(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 cib; + + + cib = cores_info->cib[sii->curidx]; + return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT); +} + +bool +ai_iscoreup(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + + ai = sii->curwrap; + + return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && + ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fiddling with interrupts or core switches is needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + + /* readback */ + w = R_REG(sii->osh, r); + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + ai_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +uint32 * +ai_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sih->bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sih->bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + ASSERT(sii->curidx == coreidx); + r = (uint32*) ((uchar*)sii->curmap + regoff); + } + + return (r); +} + +void +ai_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii = SI_INFO(sih); + volatile uint32 dummy; + uint32 status; + aidmp_t *ai; + + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* if core is already in reset, just return */ + if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) + return; + + /* ensure there are no pending backplane operations */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + /* if pending backplane ops still, try waiting longer */ + if (status != 0) { + /* 300usecs was sufficient to allow backplane ops to clear for big hammer */ + /* during driver load we may need more time */ + SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000); + /* if still pending ops, continue on and try disable anyway */ + /* this is in big hammer path, so don't call wl_reinit in this case... */ + } + + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + dummy = R_REG(sii->osh, &ai->resetctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + W_REG(sii->osh, &ai->ioctrl, bits); + dummy = R_REG(sii->osh, &ai->ioctrl); + BCM_REFERENCE(dummy); + OSL_DELAY(10); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +static void +_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + volatile uint32 dummy; + uint loop_counter = 10; +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: bits: 0x%x, resetbits: 0x%x\n", __FUNCTION__, bits, resetbits); +#endif + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: resetstatus: %p dummy: %x\n", __FUNCTION__, &ai->resetstatus, dummy); +#endif + + + /* put core into reset state */ +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: resetctrl: %p\n", __FUNCTION__, &ai->resetctrl); +#endif + W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + OSL_DELAY(10); + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + + W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: ioctrl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); +#endif + BCM_REFERENCE(dummy); + + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + + while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) { + /* ensure there are no pending backplane operations */ + SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); + + + /* take core out of reset */ + W_REG(sii->osh, &ai->resetctrl, 0); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: loop_counter: %d resetstatus: %p resetctrl: %p\n", + __FUNCTION__, loop_counter, &ai->resetstatus, &ai->resetctrl); +#endif + + /* ensure there are no pending backplane operations */ + SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300); + } + + + W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); + dummy = R_REG(sii->osh, &ai->ioctrl); +#ifdef CUSTOMER_HW4_DEBUG + printf("%s: ioctl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy); +#endif + BCM_REFERENCE(dummy); + OSL_DELAY(1); +} + +void +ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx = sii->curidx; + + if (cores_info->wrapba2[idx] != 0) { + ai_setcoreidx_2ndwrap(sih, idx); + _ai_core_reset(sih, bits, resetbits); + ai_setcoreidx(sih, idx); + } + + _ai_core_reset(sih, bits, resetbits); +} + +void +ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", + __FUNCTION__)); + return; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", + __FUNCTION__)); + return; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return; + } + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } +} + +uint32 +ai_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0", + __FUNCTION__)); + return 0; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n", + __FUNCTION__)); + return 0; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return 0; + } + + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return 0; + } + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); + W_REG(sii->osh, &ai->ioctrl, w); + } + + return R_REG(sii->osh, &ai->ioctrl); +} + +uint32 +ai_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + uint32 w; + + if (BCM47162_DMP()) { + SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", + __FUNCTION__)); + return 0; + } + if (BCM5357_DMP()) { + SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n", + __FUNCTION__)); + return 0; + } + if (BCM4707_DMP()) { + SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n", + __FUNCTION__)); + return 0; + } + if (PMU_DMP()) { + SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n", + __FUNCTION__)); + return 0; + } + + ASSERT(GOODREGS(sii->curwrap)); + ai = sii->curwrap; + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + if (mask || val) { + w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); + W_REG(sii->osh, &ai->iostatus, w); + } + + return R_REG(sii->osh, &ai->iostatus); +} + +#if defined(BCMDBG_PHYDUMP) +/* print interesting aidmp registers */ +void +ai_dumpregs(si_t *sih, struct bcmstrbuf *b) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + osl_t *osh; + aidmp_t *ai; + uint i; + + osh = sii->osh; + + for (i = 0; i < sii->numcores; i++) { + si_setcoreidx(&sii->pub, i); + ai = sii->curwrap; + + bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]); + if (BCM47162_DMP()) { + bcm_bprintf(b, "Skipping mips74k in 47162a0\n"); + continue; + } + if (BCM5357_DMP()) { + bcm_bprintf(b, "Skipping usb20h in 5357\n"); + continue; + } + if (BCM4707_DMP()) { + bcm_bprintf(b, "Skipping chipcommonb in 4707\n"); + continue; + } + + if (PMU_DMP()) { + bcm_bprintf(b, "Skipping pmu core\n"); + continue; + } + + bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x" + "ioctrlwidth 0x%x iostatuswidth 0x%x\n" + "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n" + "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x" + "errlogaddrlo 0x%x errlogaddrhi 0x%x\n" + "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n" + "intstatus 0x%x config 0x%x itcr 0x%x\n", + R_REG(osh, &ai->ioctrlset), + R_REG(osh, &ai->ioctrlclear), + R_REG(osh, &ai->ioctrl), + R_REG(osh, &ai->iostatus), + R_REG(osh, &ai->ioctrlwidth), + R_REG(osh, &ai->iostatuswidth), + R_REG(osh, &ai->resetctrl), + R_REG(osh, &ai->resetstatus), + R_REG(osh, &ai->resetreadid), + R_REG(osh, &ai->resetwriteid), + R_REG(osh, &ai->errlogctrl), + R_REG(osh, &ai->errlogdone), + R_REG(osh, &ai->errlogstatus), + R_REG(osh, &ai->errlogaddrlo), + R_REG(osh, &ai->errlogaddrhi), + R_REG(osh, &ai->errlogid), + R_REG(osh, &ai->errloguser), + R_REG(osh, &ai->errlogflags), + R_REG(osh, &ai->intstatus), + R_REG(osh, &ai->config), + R_REG(osh, &ai->itcr)); + } +} +#endif + + +void +ai_enable_backplane_timeouts(si_t *sih) +{ +#ifdef AXI_TIMEOUTS + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + int i; + + for (i = 0; i < sii->num_br; ++i) { + ai = (aidmp_t *) sii->br_wrapba[i]; + W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) | + ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK)); + } +#endif /* AXI_TIMEOUTS */ +} + +void +ai_clear_backplane_to(si_t *sih) +{ +#ifdef AXI_TIMEOUTS + si_info_t *sii = SI_INFO(sih); + aidmp_t *ai; + int i; + uint32 errlogstatus; + + for (i = 0; i < sii->num_br; ++i) { + ai = (aidmp_t *) sii->br_wrapba[i]; + /* check for backplane timeout & clear backplane hang */ + errlogstatus = R_REG(sii->osh, &ai->errlogstatus); + + if ((errlogstatus & AIELS_TIMEOUT_MASK) != 0) { + /* set ErrDone to clear the condition */ + W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK); + + /* SPINWAIT on errlogstatus timeout status bits */ + while (R_REG(sii->osh, &ai->errlogstatus) & AIELS_TIMEOUT_MASK) + ; + + /* only reset APB Bridge on timeout (not slave error, or dec error) */ + switch (errlogstatus & AIELS_TIMEOUT_MASK) { + case 0x1: + printf("AXI slave error"); + break; + case 0x2: + /* reset APB Bridge */ + OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + /* clear Reset bit */ + AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET)); + /* sync write */ + (void)R_REG(sii->osh, &ai->resetctrl); + printf("AXI timeout"); + break; + case 0x3: + printf("AXI decode error"); + break; + default: + ; /* should be impossible */ + } + printf("; APB Bridge %d\n", i); + printf("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x", + R_REG(sii->osh, &ai->errlogaddrlo), + R_REG(sii->osh, &ai->errlogaddrhi), + R_REG(sii->osh, &ai->errlogid), + R_REG(sii->osh, &ai->errlogflags)); + printf(", status 0x%08x\n", errlogstatus); + } + } +#endif /* AXI_TIMEOUTS */ +} diff --git a/drivers/net/wireless/bcmdhd/bcm_app_utils.c b/drivers/net/wireless/bcmdhd/bcm_app_utils.c new file mode 100644 index 000000000000..d138849d65c6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcm_app_utils.c @@ -0,0 +1,1012 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_app_utils.c 547371 2015-04-08 12:51:39Z $ + */ + +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else /* BCMDRIVER */ +#include +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif + +#include +#include +#include + +#ifndef BCMDRIVER +/* Take an array of measurments representing a single channel over time and return + a summary. Currently implemented as a simple average but could easily evolve + into more cpomplex alogrithms. +*/ +cca_congest_channel_req_t * +cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent) +{ + int sec; + cca_congest_t totals; + + totals.duration = 0; + totals.congest_ibss = 0; + totals.congest_obss = 0; + totals.interference = 0; + avg->num_secs = 0; + + for (sec = 0; sec < input->num_secs; sec++) { + if (input->secs[sec].duration) { + totals.duration += input->secs[sec].duration; + totals.congest_ibss += input->secs[sec].congest_ibss; + totals.congest_obss += input->secs[sec].congest_obss; + totals.interference += input->secs[sec].interference; + avg->num_secs++; + } + } + avg->chanspec = input->chanspec; + + if (!avg->num_secs || !totals.duration) + return (avg); + + if (percent) { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration; + avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration; + avg->secs[0].interference = totals.interference * 100/totals.duration; + } else { + avg->secs[0].duration = totals.duration / avg->num_secs; + avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs; + avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs; + avg->secs[0].interference = totals.interference / avg->num_secs; + } + + return (avg); +} + +static void +cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos) +{ + int i; + for (*left = 0, i = 0; i < num_bits; i++) { + if (isset(bitmap, i)) { + (*left)++; + *bit_pos = i; + } + } +} + +static uint8 +spec_to_chan(chanspec_t chspec) +{ + uint8 center_ch, edge, primary, sb; + + center_ch = CHSPEC_CHANNEL(chspec); + + if (CHSPEC_IS20(chspec)) { + return center_ch; + } else { + /* the lower edge of the wide channel is half the bw from + * the center channel. + */ + if (CHSPEC_IS40(chspec)) { + edge = center_ch - CH_20MHZ_APART; + } else { + /* must be 80MHz (until we support more) */ + ASSERT(CHSPEC_IS80(chspec)); + edge = center_ch - CH_40MHZ_APART; + } + + /* find the channel number of the lowest 20MHz primary channel */ + primary = edge + CH_10MHZ_APART; + + /* select the actual subband */ + sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT; + primary = primary + sb * CH_20MHZ_APART; + + return primary; + } +} + +/* + Take an array of measumrements representing summaries of different channels. + Return a recomended channel. + Interference is evil, get rid of that first. + Then hunt for lowest Other bss traffic. + Don't forget that channels with low duration times may not have accurate readings. + For the moment, do not overwrite input array. +*/ +int +cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer) +{ + uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */ + int i, left, winner, ret_val = 0; + uint32 min_obss = 1 << 30; + uint bitmap_sz; + + bitmap_sz = CEIL(num_chans, NBBY); + bitmap = (uint8 *)malloc(bitmap_sz); + if (bitmap == NULL) { + printf("unable to allocate memory\n"); + return BCME_NOMEM; + } + + memset(bitmap, 0, bitmap_sz); + /* Initially, all channels are up for consideration */ + for (i = 0; i < num_chans; i++) { + if (input[i]->chanspec) + setbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_TOO_FEW; + goto f_exit; + } + + /* Filter for 2.4 GHz Band */ + if (flags & CCA_FLAG_2G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS2G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for 5 GHz Band */ + if (flags & CCA_FLAG_5G_ONLY) { + for (i = 0; i < num_chans; i++) { + if (!CHSPEC_IS5G(input[i]->chanspec)) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_BAND; + goto f_exit; + } + + /* Filter for Duration */ + if (!(flags & CCA_FLAG_IGNORE_DURATION)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].duration < CCA_THRESH_MILLI) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_DURATION; + goto f_exit; + } + + /* Filter for 1 6 11 on 2.4 Band */ + if (flags & CCA_FLAGS_PREFER_1_6_11) { + int tmp_channel = spec_to_chan(input[i]->chanspec); + int is2g = CHSPEC_IS2G(input[i]->chanspec); + for (i = 0; i < num_chans; i++) { + if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11) + clrbit(bitmap, i); + } + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_PREF_CHAN; + goto f_exit; + } + + /* Toss high interference interference */ + if (!(flags & CCA_FLAG_IGNORE_INTERFER)) { + for (i = 0; i < num_chans; i++) { + if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE) + clrbit(bitmap, i); + } + cca_info(bitmap, num_chans, &left, &i); + if (!left) { + ret_val = CCA_ERRNO_INTERFER; + goto f_exit; + } + } + + /* Now find lowest obss */ + winner = 0; + for (i = 0; i < num_chans; i++) { + if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) { + winner = i; + min_obss = input[i]->secs[0].congest_obss; + } + } + *answer = input[winner]->chanspec; + f_exit: + free(bitmap); /* free the allocated memory for bitmap */ + return ret_val; +} +#endif /* !BCMDRIVER */ + +/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */ +#define IDX_IN_WL_CNT_VER_6_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32)) + +#define IDX_IN_WL_CNT_VER_11_T(cntmember) \ + ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \ + / sizeof(uint32)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \ + ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2)) + +/* Exclude version and length fields */ +#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \ + ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32)) +/* Exclude 64 macstat cnt variables. */ +#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \ + (NUM_OF_CNT_IN_WL_CNT_VER_11_T - WL_CNT_MCST_VAR_NUM) + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */ +static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = { + IDX_IN_WL_CNT_VER_6_T(txframe), + IDX_IN_WL_CNT_VER_6_T(txbyte), + IDX_IN_WL_CNT_VER_6_T(txretrans), + IDX_IN_WL_CNT_VER_6_T(txerror), + IDX_IN_WL_CNT_VER_6_T(txctl), + IDX_IN_WL_CNT_VER_6_T(txprshort), + IDX_IN_WL_CNT_VER_6_T(txserr), + IDX_IN_WL_CNT_VER_6_T(txnobuf), + IDX_IN_WL_CNT_VER_6_T(txnoassoc), + IDX_IN_WL_CNT_VER_6_T(txrunt), + IDX_IN_WL_CNT_VER_6_T(txchit), + IDX_IN_WL_CNT_VER_6_T(txcmiss), + IDX_IN_WL_CNT_VER_6_T(txuflo), + IDX_IN_WL_CNT_VER_6_T(txphyerr), + IDX_IN_WL_CNT_VER_6_T(txphycrs), + IDX_IN_WL_CNT_VER_6_T(rxframe), + IDX_IN_WL_CNT_VER_6_T(rxbyte), + IDX_IN_WL_CNT_VER_6_T(rxerror), + IDX_IN_WL_CNT_VER_6_T(rxctl), + IDX_IN_WL_CNT_VER_6_T(rxnobuf), + IDX_IN_WL_CNT_VER_6_T(rxnondata), + IDX_IN_WL_CNT_VER_6_T(rxbadds), + IDX_IN_WL_CNT_VER_6_T(rxbadcm), + IDX_IN_WL_CNT_VER_6_T(rxfragerr), + IDX_IN_WL_CNT_VER_6_T(rxrunt), + IDX_IN_WL_CNT_VER_6_T(rxgiant), + IDX_IN_WL_CNT_VER_6_T(rxnoscb), + IDX_IN_WL_CNT_VER_6_T(rxbadproto), + IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_6_T(rxbadda), + IDX_IN_WL_CNT_VER_6_T(rxfilter), + IDX_IN_WL_CNT_VER_6_T(rxoflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo), + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_6_T(dmade), + IDX_IN_WL_CNT_VER_6_T(dmada), + IDX_IN_WL_CNT_VER_6_T(dmape), + IDX_IN_WL_CNT_VER_6_T(reset), + IDX_IN_WL_CNT_VER_6_T(tbtt), + IDX_IN_WL_CNT_VER_6_T(txdmawar), + IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_6_T(txfrag), + IDX_IN_WL_CNT_VER_6_T(txmulti), + IDX_IN_WL_CNT_VER_6_T(txfail), + IDX_IN_WL_CNT_VER_6_T(txretry), + IDX_IN_WL_CNT_VER_6_T(txretrie), + IDX_IN_WL_CNT_VER_6_T(rxdup), + IDX_IN_WL_CNT_VER_6_T(txrts), + IDX_IN_WL_CNT_VER_6_T(txnocts), + IDX_IN_WL_CNT_VER_6_T(txnoack), + IDX_IN_WL_CNT_VER_6_T(rxfrag), + IDX_IN_WL_CNT_VER_6_T(rxmulti), + IDX_IN_WL_CNT_VER_6_T(rxcrc), + IDX_IN_WL_CNT_VER_6_T(txfrmsnt), + IDX_IN_WL_CNT_VER_6_T(rxundec), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_6_T(tkipreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay), + IDX_IN_WL_CNT_VER_6_T(ccmpundec), + IDX_IN_WL_CNT_VER_6_T(fourwayfail), + IDX_IN_WL_CNT_VER_6_T(wepundec), + IDX_IN_WL_CNT_VER_6_T(wepicverr), + IDX_IN_WL_CNT_VER_6_T(decsuccess), + IDX_IN_WL_CNT_VER_6_T(tkipicverr), + IDX_IN_WL_CNT_VER_6_T(wepexcluded), + IDX_IN_WL_CNT_VER_6_T(txchanrej), + IDX_IN_WL_CNT_VER_6_T(psmwds), + IDX_IN_WL_CNT_VER_6_T(phywatchdog), + IDX_IN_WL_CNT_VER_6_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_6_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_6_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_6_T(rx1mbps), + IDX_IN_WL_CNT_VER_6_T(rx2mbps), + IDX_IN_WL_CNT_VER_6_T(rx5mbps5), + IDX_IN_WL_CNT_VER_6_T(rx6mbps), + IDX_IN_WL_CNT_VER_6_T(rx9mbps), + IDX_IN_WL_CNT_VER_6_T(rx11mbps), + IDX_IN_WL_CNT_VER_6_T(rx12mbps), + IDX_IN_WL_CNT_VER_6_T(rx18mbps), + IDX_IN_WL_CNT_VER_6_T(rx24mbps), + IDX_IN_WL_CNT_VER_6_T(rx36mbps), + IDX_IN_WL_CNT_VER_6_T(rx48mbps), + IDX_IN_WL_CNT_VER_6_T(rx54mbps), + IDX_IN_WL_CNT_VER_6_T(rx108mbps), + IDX_IN_WL_CNT_VER_6_T(rx162mbps), + IDX_IN_WL_CNT_VER_6_T(rx216mbps), + IDX_IN_WL_CNT_VER_6_T(rx270mbps), + IDX_IN_WL_CNT_VER_6_T(rx324mbps), + IDX_IN_WL_CNT_VER_6_T(rx378mbps), + IDX_IN_WL_CNT_VER_6_T(rx432mbps), + IDX_IN_WL_CNT_VER_6_T(rx486mbps), + IDX_IN_WL_CNT_VER_6_T(rx540mbps), + IDX_IN_WL_CNT_VER_6_T(rfdisable), + IDX_IN_WL_CNT_VER_6_T(txexptime), + IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_6_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_6_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst) +}; + +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */ +static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = { + IDX_IN_WL_CNT_VER_11_T(txframe), + IDX_IN_WL_CNT_VER_11_T(txbyte), + IDX_IN_WL_CNT_VER_11_T(txretrans), + IDX_IN_WL_CNT_VER_11_T(txerror), + IDX_IN_WL_CNT_VER_11_T(txctl), + IDX_IN_WL_CNT_VER_11_T(txprshort), + IDX_IN_WL_CNT_VER_11_T(txserr), + IDX_IN_WL_CNT_VER_11_T(txnobuf), + IDX_IN_WL_CNT_VER_11_T(txnoassoc), + IDX_IN_WL_CNT_VER_11_T(txrunt), + IDX_IN_WL_CNT_VER_11_T(txchit), + IDX_IN_WL_CNT_VER_11_T(txcmiss), + IDX_IN_WL_CNT_VER_11_T(txuflo), + IDX_IN_WL_CNT_VER_11_T(txphyerr), + IDX_IN_WL_CNT_VER_11_T(txphycrs), + IDX_IN_WL_CNT_VER_11_T(rxframe), + IDX_IN_WL_CNT_VER_11_T(rxbyte), + IDX_IN_WL_CNT_VER_11_T(rxerror), + IDX_IN_WL_CNT_VER_11_T(rxctl), + IDX_IN_WL_CNT_VER_11_T(rxnobuf), + IDX_IN_WL_CNT_VER_11_T(rxnondata), + IDX_IN_WL_CNT_VER_11_T(rxbadds), + IDX_IN_WL_CNT_VER_11_T(rxbadcm), + IDX_IN_WL_CNT_VER_11_T(rxfragerr), + IDX_IN_WL_CNT_VER_11_T(rxrunt), + IDX_IN_WL_CNT_VER_11_T(rxgiant), + IDX_IN_WL_CNT_VER_11_T(rxnoscb), + IDX_IN_WL_CNT_VER_11_T(rxbadproto), + IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac), + IDX_IN_WL_CNT_VER_11_T(rxbadda), + IDX_IN_WL_CNT_VER_11_T(rxfilter), + IDX_IN_WL_CNT_VER_11_T(rxoflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo), + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4, + IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5, + IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off), + IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off), + IDX_IN_WL_CNT_VER_11_T(dmade), + IDX_IN_WL_CNT_VER_11_T(dmada), + IDX_IN_WL_CNT_VER_11_T(dmape), + IDX_IN_WL_CNT_VER_11_T(reset), + IDX_IN_WL_CNT_VER_11_T(tbtt), + IDX_IN_WL_CNT_VER_11_T(txdmawar), + IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail), + IDX_IN_WL_CNT_VER_11_T(txfrag), + IDX_IN_WL_CNT_VER_11_T(txmulti), + IDX_IN_WL_CNT_VER_11_T(txfail), + IDX_IN_WL_CNT_VER_11_T(txretry), + IDX_IN_WL_CNT_VER_11_T(txretrie), + IDX_IN_WL_CNT_VER_11_T(rxdup), + IDX_IN_WL_CNT_VER_11_T(txrts), + IDX_IN_WL_CNT_VER_11_T(txnocts), + IDX_IN_WL_CNT_VER_11_T(txnoack), + IDX_IN_WL_CNT_VER_11_T(rxfrag), + IDX_IN_WL_CNT_VER_11_T(rxmulti), + IDX_IN_WL_CNT_VER_11_T(rxcrc), + IDX_IN_WL_CNT_VER_11_T(txfrmsnt), + IDX_IN_WL_CNT_VER_11_T(rxundec), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr), + IDX_IN_WL_CNT_VER_11_T(tkipreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay), + IDX_IN_WL_CNT_VER_11_T(ccmpundec), + IDX_IN_WL_CNT_VER_11_T(fourwayfail), + IDX_IN_WL_CNT_VER_11_T(wepundec), + IDX_IN_WL_CNT_VER_11_T(wepicverr), + IDX_IN_WL_CNT_VER_11_T(decsuccess), + IDX_IN_WL_CNT_VER_11_T(tkipicverr), + IDX_IN_WL_CNT_VER_11_T(wepexcluded), + IDX_IN_WL_CNT_VER_11_T(txchanrej), + IDX_IN_WL_CNT_VER_11_T(psmwds), + IDX_IN_WL_CNT_VER_11_T(phywatchdog), + IDX_IN_WL_CNT_VER_11_T(prq_entries_handled), + IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries), + IDX_IN_WL_CNT_VER_11_T(prq_bad_entries), + IDX_IN_WL_CNT_VER_11_T(atim_suppress_count), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready), + IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done), + IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc), + IDX_IN_WL_CNT_VER_11_T(rx1mbps), + IDX_IN_WL_CNT_VER_11_T(rx2mbps), + IDX_IN_WL_CNT_VER_11_T(rx5mbps5), + IDX_IN_WL_CNT_VER_11_T(rx6mbps), + IDX_IN_WL_CNT_VER_11_T(rx9mbps), + IDX_IN_WL_CNT_VER_11_T(rx11mbps), + IDX_IN_WL_CNT_VER_11_T(rx12mbps), + IDX_IN_WL_CNT_VER_11_T(rx18mbps), + IDX_IN_WL_CNT_VER_11_T(rx24mbps), + IDX_IN_WL_CNT_VER_11_T(rx36mbps), + IDX_IN_WL_CNT_VER_11_T(rx48mbps), + IDX_IN_WL_CNT_VER_11_T(rx54mbps), + IDX_IN_WL_CNT_VER_11_T(rx108mbps), + IDX_IN_WL_CNT_VER_11_T(rx162mbps), + IDX_IN_WL_CNT_VER_11_T(rx216mbps), + IDX_IN_WL_CNT_VER_11_T(rx270mbps), + IDX_IN_WL_CNT_VER_11_T(rx324mbps), + IDX_IN_WL_CNT_VER_11_T(rx378mbps), + IDX_IN_WL_CNT_VER_11_T(rx432mbps), + IDX_IN_WL_CNT_VER_11_T(rx486mbps), + IDX_IN_WL_CNT_VER_11_T(rx540mbps), + IDX_IN_WL_CNT_VER_11_T(rfdisable), + IDX_IN_WL_CNT_VER_11_T(txexptime), + IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi), + IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc), + IDX_IN_WL_CNT_VER_11_T(rxundec_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst), + IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst), + IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst), + IDX_IN_WL_CNT_VER_11_T(wepundec_mcst), + IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst), + IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst), + IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst), + IDX_IN_WL_CNT_VER_11_T(dma_hang), + IDX_IN_WL_CNT_VER_11_T(reinit), + IDX_IN_WL_CNT_VER_11_T(pstatxucast), + IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc), + IDX_IN_WL_CNT_VER_11_T(pstarxucast), + IDX_IN_WL_CNT_VER_11_T(pstarxbcmc), + IDX_IN_WL_CNT_VER_11_T(pstatxbcmc), + IDX_IN_WL_CNT_VER_11_T(cso_passthrough), + IDX_IN_WL_CNT_VER_11_T(cso_normal), + IDX_IN_WL_CNT_VER_11_T(chained), + IDX_IN_WL_CNT_VER_11_T(chainedsz1), + IDX_IN_WL_CNT_VER_11_T(unchained), + IDX_IN_WL_CNT_VER_11_T(maxchainsz), + IDX_IN_WL_CNT_VER_11_T(currchainsz), + IDX_IN_WL_CNT_VER_11_T(pciereset), + IDX_IN_WL_CNT_VER_11_T(cfgrestore), + IDX_IN_WL_CNT_VER_11_T(reinitreason), + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6, + IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7, + IDX_IN_WL_CNT_VER_11_T(rxrtry), + IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu), + IDX_IN_WL_CNT_VER_11_T(txbar), + IDX_IN_WL_CNT_VER_11_T(rxbar), + IDX_IN_WL_CNT_VER_11_T(txpspoll), + IDX_IN_WL_CNT_VER_11_T(rxpspoll), + IDX_IN_WL_CNT_VER_11_T(txnull), + IDX_IN_WL_CNT_VER_11_T(rxnull), + IDX_IN_WL_CNT_VER_11_T(txqosnull), + IDX_IN_WL_CNT_VER_11_T(rxqosnull), + IDX_IN_WL_CNT_VER_11_T(txassocreq), + IDX_IN_WL_CNT_VER_11_T(rxassocreq), + IDX_IN_WL_CNT_VER_11_T(txreassocreq), + IDX_IN_WL_CNT_VER_11_T(rxreassocreq), + IDX_IN_WL_CNT_VER_11_T(txdisassoc), + IDX_IN_WL_CNT_VER_11_T(rxdisassoc), + IDX_IN_WL_CNT_VER_11_T(txassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxassocrsp), + IDX_IN_WL_CNT_VER_11_T(txreassocrsp), + IDX_IN_WL_CNT_VER_11_T(rxreassocrsp), + IDX_IN_WL_CNT_VER_11_T(txauth), + IDX_IN_WL_CNT_VER_11_T(rxauth), + IDX_IN_WL_CNT_VER_11_T(txdeauth), + IDX_IN_WL_CNT_VER_11_T(rxdeauth), + IDX_IN_WL_CNT_VER_11_T(txprobereq), + IDX_IN_WL_CNT_VER_11_T(rxprobereq), + IDX_IN_WL_CNT_VER_11_T(txprobersp), + IDX_IN_WL_CNT_VER_11_T(rxprobersp), + IDX_IN_WL_CNT_VER_11_T(txaction), + IDX_IN_WL_CNT_VER_11_T(rxaction) +}; + +/* Index conversion table from wl_cnt_ver_11_t to + * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t + */ +static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + IDX_IN_WL_CNT_VER_11_T(txmpdu), + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + IDX_IN_WL_CNT_VER_11_T(rxnodelim), + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + +/* For mcst offsets that were not used. (2 Pads) */ +#define INVALID_MCST_IDX ((uint8)(-1)) +/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_11_T(txallfrm), + IDX_IN_WL_CNT_VER_11_T(txrtsfrm), + IDX_IN_WL_CNT_VER_11_T(txctsfrm), + IDX_IN_WL_CNT_VER_11_T(txackfrm), + IDX_IN_WL_CNT_VER_11_T(txdnlfrm), + IDX_IN_WL_CNT_VER_11_T(txbcnfrm), + IDX_IN_WL_CNT_VER_11_T(txfunfl), + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_11_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(txtplunfl), + IDX_IN_WL_CNT_VER_11_T(txphyerror), + IDX_IN_WL_CNT_VER_11_T(pktengrxducast), + IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_11_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_11_T(rxbadfcs), + IDX_IN_WL_CNT_VER_11_T(rxbadplcp), + IDX_IN_WL_CNT_VER_11_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxstrt), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_11_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_11_T(rxrtsucast), + IDX_IN_WL_CNT_VER_11_T(rxctsucast), + IDX_IN_WL_CNT_VER_11_T(rxackucast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_11_T(rxrtsocast), + IDX_IN_WL_CNT_VER_11_T(rxctsocast), + IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_11_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_11_T(rxrsptmout), + IDX_IN_WL_CNT_VER_11_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_11_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_11_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_11_T(txsfovfl), + IDX_IN_WL_CNT_VER_11_T(pmqovfl), + IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_11_T(txcgprsfail), + IDX_IN_WL_CNT_VER_11_T(txcgprssuc), + IDX_IN_WL_CNT_VER_11_T(prs_timeout), + IDX_IN_WL_CNT_VER_11_T(rxnack), + IDX_IN_WL_CNT_VER_11_T(frmscons), + IDX_IN_WL_CNT_VER_11_T(txnack), + IDX_IN_WL_CNT_VER_11_T(rxback), + IDX_IN_WL_CNT_VER_11_T(txback), + IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_11_T(rxdrop20s), + IDX_IN_WL_CNT_VER_11_T(rxtoolate), + IDX_IN_WL_CNT_VER_11_T(bphy_badplcp) +}; + + +/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */ +static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = { + IDX_IN_WL_CNT_VER_6_T(txallfrm), + IDX_IN_WL_CNT_VER_6_T(txrtsfrm), + IDX_IN_WL_CNT_VER_6_T(txctsfrm), + IDX_IN_WL_CNT_VER_6_T(txackfrm), + IDX_IN_WL_CNT_VER_6_T(txdnlfrm), + IDX_IN_WL_CNT_VER_6_T(txbcnfrm), + IDX_IN_WL_CNT_VER_6_T(txfunfl), + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4, + IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5, + IDX_IN_WL_CNT_VER_6_T(txfbw), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(txtplunfl), + IDX_IN_WL_CNT_VER_6_T(txphyerror), + IDX_IN_WL_CNT_VER_6_T(pktengrxducast), + IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast), + IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong), + IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt), + IDX_IN_WL_CNT_VER_6_T(rxinvmachdr), + IDX_IN_WL_CNT_VER_6_T(rxbadfcs), + IDX_IN_WL_CNT_VER_6_T(rxbadplcp), + IDX_IN_WL_CNT_VER_6_T(rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxstrt), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss), + IDX_IN_WL_CNT_VER_6_T(rxcfrmucast), + IDX_IN_WL_CNT_VER_6_T(rxrtsucast), + IDX_IN_WL_CNT_VER_6_T(rxctsucast), + IDX_IN_WL_CNT_VER_6_T(rxackucast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmocast), + IDX_IN_WL_CNT_VER_6_T(rxrtsocast), + IDX_IN_WL_CNT_VER_6_T(rxctsocast), + IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast), + IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss), + IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss), + IDX_IN_WL_CNT_VER_6_T(rxbeaconobss), + IDX_IN_WL_CNT_VER_6_T(rxrsptmout), + IDX_IN_WL_CNT_VER_6_T(bcntxcancl), + INVALID_MCST_IDX, + IDX_IN_WL_CNT_VER_6_T(rxf0ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf1ovfl), + IDX_IN_WL_CNT_VER_6_T(rxf2ovfl), + IDX_IN_WL_CNT_VER_6_T(txsfovfl), + IDX_IN_WL_CNT_VER_6_T(pmqovfl), + IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm), + IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl), + IDX_IN_WL_CNT_VER_6_T(txcgprsfail), + IDX_IN_WL_CNT_VER_6_T(txcgprssuc), + IDX_IN_WL_CNT_VER_6_T(prs_timeout), + IDX_IN_WL_CNT_VER_6_T(rxnack), + IDX_IN_WL_CNT_VER_6_T(frmscons), + IDX_IN_WL_CNT_VER_6_T(txnack), + IDX_IN_WL_CNT_VER_6_T(rxback), + IDX_IN_WL_CNT_VER_6_T(txback), + IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch), + IDX_IN_WL_CNT_VER_6_T(rxdrop20s), + IDX_IN_WL_CNT_VER_6_T(rxtoolate), + IDX_IN_WL_CNT_VER_6_T(bphy_badplcp) +}; + +/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */ +static int +wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx) +{ + uint i; + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + /* Init wlccnt with invalid value. Unchanged value will not be printed out */ + for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) { + dst[i] = INVALID_CNT_VAL; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) { + if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver6t_to_wlcntwlct[i]]; + } + } else { + for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) { + if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) { + /* src buffer does not have counters from here */ + break; + } + dst[i] = src[wlcntver11t_to_wlcntwlct[i]]; + } + } + return BCME_OK; +} + +/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */ +static int +wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + if (cntver == WL_CNT_VERSION_6) { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_6_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]]; + } + } + } else { + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) { + /* This mcst counter does not exist in wl_cnt_ver_11_t */ + dst[i] = INVALID_CNT_VAL; + } else { + dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]]; + } + } + } + return BCME_OK; +} + +static int +wl_copy_macstat_ver11(uint32 *dst, uint32 *src) +{ + uint i; + + if (dst == NULL || src == NULL) { + return BCME_ERROR; + } + + for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) { + dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]]; + } + return BCME_OK; +} + +/** + * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format. + * Parameters: + * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW. + * Newly translated xtlv format is written to this pointer. + * buflen: length of the "cntbuf" without any padding. + * corerev: chip core revision of the driver/FW. + */ +int +wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev) +{ + wl_cnt_wlc_t *wlccnt = NULL; + uint32 *macstat = NULL; + xtlv_desc_t xtlv_desc[3]; + uint16 mcst_xtlv_id; + int res = BCME_OK; + wl_cnt_info_t *cntinfo = cntbuf; + void *xtlvbuf_p = cntinfo->data; + uint16 ver = cntinfo->version; + uint16 xtlvbuflen = (uint16)buflen; + uint16 src_max_idx; +#ifdef BCMDRIVER + osl_t *osh = ctx; +#else + BCM_REFERENCE(ctx); +#endif + + if (ver == WL_CNT_T_VERSION) { + /* Already in xtlv format. */ + goto exit; + } + +#ifdef BCMDRIVER + wlccnt = MALLOC(osh, sizeof(*wlccnt)); + macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ); +#else + wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt)); + macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ); +#endif + if (!wlccnt) { + printf("wl_cntbuf_to_xtlv_format malloc fail!\n"); + res = BCME_NOMEM; + goto exit; + } + + /* Check if the max idx in the struct exceeds the boundary of uint8 */ + if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) || + NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n"); + res = BCME_ERROR; + goto exit; + } + + /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */ + src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32); + + if (src_max_idx > (uint8)(-1)) { + printf("wlcntverXXt_to_wlcntwlct and src_max_idx need" + " to be of uint16 instead of uint8\n" + "Try updating wl utility to the latest.\n"); + res = BCME_ERROR; + } + + /* Copy wlc layer counters to wl_cnt_wlc_t */ + res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx); + if (res != BCME_OK) { + printf("wl_copy_wlccnt fail!\n"); + goto exit; + } + + /* Copy macstat counters to wl_cnt_wlc_t */ + if (ver == WL_CNT_VERSION_11) { + res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_ver11 fail!\n"); + goto exit; + } + if (corerev >= 40) { + mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1; + } else { + mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1; + } + } else { + res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data); + if (res != BCME_OK) { + printf("wl_copy_macstat_upto_ver10 fail!\n"); + goto exit; + } + mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE; + } + + xtlv_desc[0].type = WL_CNT_XTLV_WLC; + xtlv_desc[0].len = sizeof(*wlccnt); + xtlv_desc[0].ptr = wlccnt; + + xtlv_desc[1].type = mcst_xtlv_id; + xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ; + xtlv_desc[1].ptr = macstat; + + xtlv_desc[2].type = 0; + xtlv_desc[2].len = 0; + xtlv_desc[2].ptr = NULL; + + memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + + res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen, + xtlv_desc, BCM_XTLV_OPTION_ALIGN32); + cntinfo->datalen = (buflen - xtlvbuflen); +exit: +#ifdef BCMDRIVER + if (wlccnt) { + MFREE(osh, wlccnt, sizeof(*wlccnt)); + } + if (macstat) { + MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ); + } +#else + if (wlccnt) { + free(wlccnt); + } + if (macstat) { + free(macstat); + } +#endif + return res; +} diff --git a/drivers/net/wireless/bcmdhd/bcmevent.c b/drivers/net/wireless/bcmdhd/bcmevent.c new file mode 100644 index 000000000000..1746f47fd613 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmevent.c @@ -0,0 +1,230 @@ +/* + * bcmevent read-only data shared by kernel or app layers + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.c 530174 2015-01-29 09:47:55Z $ + */ + +#include +#include +#include +#include +#include +#include + + +/* Table of event name strings for UIs and debugging dumps */ +typedef struct { + uint event; + const char *name; +} bcmevent_name_str_t; + +/* Use the actual name for event tracing */ +#define BCMEVENT_NAME(_event) {(_event), #_event} + +static const bcmevent_name_str_t bcmevent_names[] = { + BCMEVENT_NAME(WLC_E_SET_SSID), + BCMEVENT_NAME(WLC_E_JOIN), + BCMEVENT_NAME(WLC_E_START), + BCMEVENT_NAME(WLC_E_AUTH), + BCMEVENT_NAME(WLC_E_AUTH_IND), + BCMEVENT_NAME(WLC_E_DEAUTH), + BCMEVENT_NAME(WLC_E_DEAUTH_IND), + BCMEVENT_NAME(WLC_E_ASSOC), + BCMEVENT_NAME(WLC_E_ASSOC_IND), + BCMEVENT_NAME(WLC_E_REASSOC), + BCMEVENT_NAME(WLC_E_REASSOC_IND), + BCMEVENT_NAME(WLC_E_DISASSOC), + BCMEVENT_NAME(WLC_E_DISASSOC_IND), + BCMEVENT_NAME(WLC_E_QUIET_START), + BCMEVENT_NAME(WLC_E_QUIET_END), + BCMEVENT_NAME(WLC_E_BEACON_RX), + BCMEVENT_NAME(WLC_E_LINK), + BCMEVENT_NAME(WLC_E_MIC_ERROR), + BCMEVENT_NAME(WLC_E_NDIS_LINK), + BCMEVENT_NAME(WLC_E_ROAM), + BCMEVENT_NAME(WLC_E_TXFAIL), + BCMEVENT_NAME(WLC_E_PMKID_CACHE), + BCMEVENT_NAME(WLC_E_RETROGRADE_TSF), + BCMEVENT_NAME(WLC_E_PRUNE), + BCMEVENT_NAME(WLC_E_AUTOAUTH), + BCMEVENT_NAME(WLC_E_EAPOL_MSG), + BCMEVENT_NAME(WLC_E_SCAN_COMPLETE), + BCMEVENT_NAME(WLC_E_ADDTS_IND), + BCMEVENT_NAME(WLC_E_DELTS_IND), + BCMEVENT_NAME(WLC_E_BCNSENT_IND), + BCMEVENT_NAME(WLC_E_BCNRX_MSG), + BCMEVENT_NAME(WLC_E_BCNLOST_MSG), + BCMEVENT_NAME(WLC_E_ROAM_PREP), + BCMEVENT_NAME(WLC_E_PFN_NET_FOUND), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), + BCMEVENT_NAME(WLC_E_PFN_NET_LOST), +#if defined(IBSS_PEER_DISCOVERY_EVENT) + BCMEVENT_NAME(WLC_E_IBSS_ASSOC), +#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */ + BCMEVENT_NAME(WLC_E_RADIO), + BCMEVENT_NAME(WLC_E_PSM_WATCHDOG), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG), + BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND), + BCMEVENT_NAME(WLC_E_PSK_SUP), + BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED), + BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME), + BCMEVENT_NAME(WLC_E_ICV_ERROR), + BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR), + BCMEVENT_NAME(WLC_E_TRACE), + BCMEVENT_NAME(WLC_E_IF), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE), +#endif + BCMEVENT_NAME(WLC_E_RSSI), + BCMEVENT_NAME(WLC_E_EXTLOG_MSG), + BCMEVENT_NAME(WLC_E_ACTION_FRAME), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE), + BCMEVENT_NAME(WLC_E_ESCAN_RESULT), + BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE), +#ifdef WLP2P + BCMEVENT_NAME(WLC_E_PROBRESP_MSG), + BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG), +#endif +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP), +#endif + BCMEVENT_NAME(WLC_E_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_DCS_REQUEST), + BCMEVENT_NAME(WLC_E_RM_COMPLETE), +#ifdef WLMEDIA_HTSF + BCMEVENT_NAME(WLC_E_HTSFSYNC), +#endif + BCMEVENT_NAME(WLC_E_OVERLAY_REQ), + BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND), + BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT), + BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE), + BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE), +#ifdef SOFTAP + BCMEVENT_NAME(WLC_E_GTK_PLUMBED), +#endif + BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE), + BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE), + BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX), +#ifdef WLTDLS + BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT), +#endif /* WLTDLS */ + BCMEVENT_NAME(WLC_E_NATIVE), +#ifdef WLPKTDLYSTAT + BCMEVENT_NAME(WLC_E_PKTDELAY_IND), +#endif /* WLPKTDLYSTAT */ + BCMEVENT_NAME(WLC_E_SERVICE_FOUND), + BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX), + BCMEVENT_NAME(WLC_E_GAS_COMPLETE), + BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE), + BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE), +#ifdef WLWNM + BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP), +#endif /* WLWNM */ +#if defined(WL_PROXDETECT) + BCMEVENT_NAME(WLC_E_PROXD), +#endif + BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL), + BCMEVENT_NAME(WLC_E_BSSID), +#ifdef PROP_TXSTATUS + BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT), +#endif + BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND), + BCMEVENT_NAME(WLC_E_TXFAIL_THRESH), +#ifdef GSCAN_SUPPORT + BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT), + BCMEVENT_NAME(WLC_E_PFN_SWC), +#endif /* GSCAN_SUPPORT */ +#ifdef WLBSSLOAD_REPORT + BCMEVENT_NAME(WLC_E_BSS_LOAD), +#endif +#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW) + BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ), +#endif + BCMEVENT_NAME(WLC_E_AUTHORIZED), + BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX), + BCMEVENT_NAME(WLC_E_CSA_START_IND), + BCMEVENT_NAME(WLC_E_CSA_DONE_IND), + BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND), + BCMEVENT_NAME(WLC_E_RMC_EVENT), + BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND), +}; + + +const char *bcmevent_get_name(uint event_type) +{ + /* note: first coded this as a static const but some + * ROMs already have something called event_name so + * changed it so we don't have a variable for the + * 'unknown string + */ + const char *event_name = NULL; + + uint idx; + for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) { + + if (bcmevent_names[idx].event == event_type) { + event_name = bcmevent_names[idx].name; + break; + } + } + + /* if we find an event name in the array, return it. + * otherwise return unknown string. + */ + return ((event_name) ? event_name : "Unknown Event"); +} + +void +wl_event_to_host_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = ntoh32(evt->event_type); + evt->flags = ntoh16(evt->flags); + evt->status = ntoh32(evt->status); + evt->reason = ntoh32(evt->reason); + evt->auth_type = ntoh32(evt->auth_type); + evt->datalen = ntoh32(evt->datalen); + evt->version = ntoh16(evt->version); +} + +void +wl_event_to_network_order(wl_event_msg_t * evt) +{ + /* Event struct members passed from dongle to host are stored in network + * byte order. Convert all members to host-order. + */ + evt->event_type = hton32(evt->event_type); + evt->flags = hton16(evt->flags); + evt->status = hton32(evt->status); + evt->reason = hton32(evt->reason); + evt->auth_type = hton32(evt->auth_type); + evt->datalen = hton32(evt->datalen); + evt->version = hton16(evt->version); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c new file mode 100644 index 000000000000..42b29bd8574e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh.c @@ -0,0 +1,708 @@ +/* + * BCMSDH interface glue + * implement bcmsdh API for SDIOH driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh.c 514727 2014-11-12 03:02:48Z $ + */ + +/** + * @file bcmsdh.c + */ + +/* ****************** BCMSDH Interface Functions *************************** */ + +#include +#include +#include +#include +#include +#include +#include + +#include /* BRCM API for SDIO clients (such as wl, dhd) */ +#include /* common SDIO/controller interface */ +#include /* SDIO device core hardware definitions. */ +#include /* SDIO Device and Protocol Specs */ + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 +const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL; + +/* local copy of bcm sd handler */ +bcmsdh_info_t * l_bcmsdh = NULL; + + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern int +sdioh_enable_hw_oob_intr(void *sdioh, bool enable); + +void +bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable) +{ + sdioh_enable_hw_oob_intr(sdh->sdioh, enable); +} +#endif + +/* Attach BCMSDH layer to SDIO Host Controller Driver + * + * @param osh OSL Handle. + * @param cfghdl Configuration Handle. + * @param regsva Virtual address of controller registers. + * @param irq Interrupt number of SDIO controller. + * + * @return bcmsdh_info_t Handle to BCMSDH context. + */ +bcmsdh_info_t * +bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva) +{ + bcmsdh_info_t *bcmsdh; + + if ((bcmsdh = (bcmsdh_info_t *)MALLOC(osh, sizeof(bcmsdh_info_t))) == NULL) { + BCMSDH_ERROR(("bcmsdh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)bcmsdh, sizeof(bcmsdh_info_t)); + bcmsdh->sdioh = sdioh; + bcmsdh->osh = osh; + bcmsdh->init_success = TRUE; + *regsva = SI_ENUM_BASE; + + /* Report the BAR, to fix if needed */ + bcmsdh->sbwad = SI_ENUM_BASE; + + /* save the handler locally */ + l_bcmsdh = bcmsdh; + + return bcmsdh; +} + +int +bcmsdh_detach(osl_t *osh, void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bcmsdh != NULL) { + MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t)); + } + + l_bcmsdh = NULL; + + return 0; +} + +int +bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set); +} + +bool +bcmsdh_intr_query(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + bool on; + + ASSERT(bcmsdh); + status = sdioh_interrupt_query(bcmsdh->sdioh, &on); + if (SDIOH_API_SUCCESS(status)) + return FALSE; + else + return on; +} + +int +bcmsdh_intr_enable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_disable(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_intr_dereg(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + ASSERT(bcmsdh); + + status = sdioh_interrupt_deregister(bcmsdh->sdioh); + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +#if defined(DHD_DEBUG) +bool +bcmsdh_intr_pending(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + ASSERT(sdh); + return sdioh_interrupt_pending(bcmsdh->sdioh); +} +#endif + + +int +bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh) +{ + ASSERT(sdh); + + /* don't support yet */ + return BCME_UNSUPPORTED; +} + +/** + * Read from SDIO Configuration Space + * @param sdh SDIO Host context. + * @param func_num Function number to read from. + * @param addr Address to read from. + * @param err Error return. + * @return value read from SDIO configuration space. + */ +uint8 +bcmsdh_cfg_read(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + uint8 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write(void *sdh, uint fnc_num, uint32 addr, uint8 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + int32 retry = 0; +#endif + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + do { + if (retry) /* wait for 1 ms till bus get settled down */ + OSL_DELAY(1000); +#endif + status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data); +#ifdef SDIOH_API_ACCESS_RETRY_LIMIT + } while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); +#endif + if (err) + *err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR; + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint8data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); +} + +uint32 +bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 data = 0; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, + fnc_num, addr, data)); + + return data; +} + +void +bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, fnc_num, + addr, &data, 4); + + if (err) + *err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, uint32data = 0x%x\n", __FUNCTION__, fnc_num, + addr, data)); +} + + +int +bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + uint8 *tmp_buf, *tmp_ptr; + uint8 *ptr; + bool ascii = func & ~0xf; + func &= 0x7; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + ASSERT(cis); + ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT); + + status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length); + + if (ascii) { + /* Move binary bits to tmp and format them into the provided buffer. */ + if ((tmp_buf = (uint8 *)MALLOC(bcmsdh->osh, length)) == NULL) { + BCMSDH_ERROR(("%s: out of memory\n", __FUNCTION__)); + return BCME_NOMEM; + } + bcopy(cis, tmp_buf, length); + for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { + ptr += snprintf((char*)ptr, (cis + length - ptr - 4), + "%.2x ", *tmp_ptr & 0xff); + if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) + ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n"); + } + MFREE(bcmsdh->osh, tmp_buf, length); + } + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + + +int +bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set) +{ + int err = 0; + uint bar0 = address & ~SBSDIO_SB_OFT_ADDR_MASK; + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (bar0 != bcmsdh->sbwad || force_set) { + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + + if (!err) + bcmsdh->sbwad = bar0; + else + /* invalidate cached window var */ + bcmsdh->sbwad = 0; + + } + + return err; +} + +uint32 +bcmsdh_reg_read(void *sdh, uint32 addr, uint size) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint32 word = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __FUNCTION__, addr)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) + return 0xFFFFFFFF; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, + SDIOH_READ, SDIO_FUNC_1, addr, &word, size); + + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + BCMSDH_INFO(("uint32data = 0x%x\n", word)); + + /* if ok, return appropriately masked word */ + if (SDIOH_API_SUCCESS(status)) { + switch (size) { + case sizeof(uint8): + return (word & 0xff); + case sizeof(uint16): + return (word & 0xffff); + case sizeof(uint32): + return word; + default: + bcmsdh->regfail = TRUE; + + } + } + + /* otherwise, bad sdio access or invalid size */ + BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __FUNCTION__, addr, size)); + return 0xFFFFFFFF; +} + +uint32 +bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + int err = 0; + + BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + __FUNCTION__, addr, size*8, data)); + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + ASSERT(bcmsdh->init_success); + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + bcmsdh->regfail = !(SDIOH_API_SUCCESS(status)); + + if (SDIOH_API_SUCCESS(status)) + return 0; + + BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n", + __FUNCTION__, data, addr, size)); + return 0xFFFFFFFF; +} + +bool +bcmsdh_regfail(void *sdh) +{ + return ((bcmsdh_info_t *)sdh)->regfail; +} + +int +bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_READ, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR); +} + +int +bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + uint incr_fix; + uint width; + int err = 0; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + + BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n", + __FUNCTION__, fn, addr, nbytes)); + + /* Async not implemented yet */ + ASSERT(!(flags & SDIO_REQ_ASYNC)); + if (flags & SDIO_REQ_ASYNC) + return BCME_UNSUPPORTED; + + if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE))) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix, + SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + SDIOH_API_RC status; + + ASSERT(bcmsdh); + ASSERT(bcmsdh->init_success); + ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); + + return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR); +} + +int +bcmsdh_abort(void *sdh, uint fn) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_abort(bcmsdh->sdioh, fn); +} + +int +bcmsdh_start(void *sdh, int stage) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_start(bcmsdh->sdioh, stage); +} + +int +bcmsdh_stop(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_stop(bcmsdh->sdioh); +} + +int +bcmsdh_waitlockfree(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_waitlockfree(bcmsdh->sdioh); +} + + +int +bcmsdh_query_device(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + bcmsdh->vendevid = (VENDOR_BROADCOM << 16) | 0; + return (bcmsdh->vendevid); +} + +uint +bcmsdh_query_iofnum(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (sdioh_query_iofnum(bcmsdh->sdioh)); +} + +int +bcmsdh_reset(bcmsdh_info_t *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + return sdioh_sdio_reset(bcmsdh->sdioh); +} + +void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh) +{ + ASSERT(sdh); + return sdh->sdioh; +} + +/* Function to pass device-status bits to DHD. */ +uint32 +bcmsdh_get_dstatus(void *sdh) +{ + return 0; +} +uint32 +bcmsdh_cur_sbwad(void *sdh) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh; + + if (!bcmsdh) + bcmsdh = l_bcmsdh; + + return (bcmsdh->sbwad); +} + +void +bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev) +{ + return; +} + + +int +bcmsdh_sleep(void *sdh, bool enab) +{ +#ifdef SDIOH_SLEEP_ENABLED + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_sleep(sd, enab); +#else + return BCME_UNSUPPORTED; +#endif +} + +int +bcmsdh_gpio_init(void *sdh) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpio_init(sd); +} + +bool +bcmsdh_gpioin(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioin(sd, gpio); +} + +int +bcmsdh_gpioouten(void *sdh, uint32 gpio) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioouten(sd, gpio); +} + +int +bcmsdh_gpioout(void *sdh, uint32 gpio, bool enab) +{ + bcmsdh_info_t *p = (bcmsdh_info_t *)sdh; + sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh); + + return sdioh_gpioout(sd, gpio, enab); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c new file mode 100644 index 000000000000..bf882610f263 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c @@ -0,0 +1,468 @@ +/* + * SDIO access interface for drivers - linux specific (pci only) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_linux.c 514727 2014-11-12 03:02:48Z $ + */ + +/** + * @file bcmsdh_linux.c + */ + +#define __UNDEF_NO_VERSION__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +extern void dhdsdio_isr(void * args); +#include +#include +#include +#if defined(CONFIG_ARCH_ODIN) +#include +#endif /* defined(CONFIG_ARCH_ODIN) */ +#include + +/* driver info, initialized when bcmsdh_register is called */ +static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL}; + +typedef enum { + DHD_INTR_INVALID = 0, + DHD_INTR_INBAND, + DHD_INTR_HWOOB, + DHD_INTR_SWOOB +} DHD_HOST_INTR_TYPE; + +/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g. + * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather + * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this + * structure. + */ +typedef struct bcmsdh_os_info { + DHD_HOST_INTR_TYPE intr_type; + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + bcmsdh_cb_fn_t oob_irq_handler; + void *oob_irq_handler_context; + void *context; /* context returned from upper layer */ + void *sdioh; /* handle to lower layer (sdioh) */ + void *dev; /* handle to the underlying device */ + bool dev_wake_enabled; +} bcmsdh_os_info_t; + +/* debugging macros */ +#define SDLX_MSG(x) + +/** + * Checks to see if vendor and device IDs match a supported SDIO Host Controller. + */ +bool +bcmsdh_chipmatch(uint16 vendor, uint16 device) +{ + /* Add other vendors and devices as required */ + +#ifdef BCMSDIOH_STD + /* Check for Arasan host controller */ + if (vendor == VENDOR_SI_IMAGE) { + return (TRUE); + } + /* Check for BRCM 27XX Standard host controller */ + if (device == BCM27XX_SDIOH_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for BRCM Standard host controller */ + if (device == SDIOH_FPGA_ID && vendor == VENDOR_BROADCOM) { + return (TRUE); + } + /* Check for TI PCIxx21 Standard host controller */ + if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI) { + return (TRUE); + } + if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI) { + return (TRUE); + } + /* Ricoh R5C822 Standard SDIO Host */ + if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH) { + return (TRUE); + } + /* JMicron Standard SDIO Host */ + if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON) { + return (TRUE); + } + +#endif /* BCMSDIOH_STD */ +#ifdef BCMSDIOH_SPI + /* This is the PciSpiHost. */ + if (device == SPIH_FPGA_ID && vendor == VENDOR_BROADCOM) { + printf("Found PCI SPI Host Controller\n"); + return (TRUE); + } + +#endif /* BCMSDIOH_SPI */ + + return (FALSE); +} + +void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num) +{ + ulong regs; + bcmsdh_info_t *bcmsdh; + uint32 vendevid; + bcmsdh_os_info_t *bcmsdh_osinfo = NULL; + + bcmsdh = bcmsdh_attach(osh, sdioh, ®s); + if (bcmsdh == NULL) { + SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__)); + goto err; + } + bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t)); + if (bcmsdh_osinfo == NULL) { + SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__)); + goto err; + } + bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + bcmsdh->os_cxt = bcmsdh_osinfo; + bcmsdh_osinfo->sdioh = sdioh; + bcmsdh_osinfo->dev = dev; + osl_set_bus_handle(osh, bcmsdh); + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dev && device_init_wakeup(dev, true) == 0) + bcmsdh_osinfo->dev_wake_enabled = TRUE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + +#if defined(OOB_INTR_ONLY) + spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock); + /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */ + bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info, + &bcmsdh_osinfo->oob_irq_flags); + if (bcmsdh_osinfo->oob_irq_num < 0) { + SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + goto err; + } +#endif /* defined(BCMLXSDMMC) */ + + /* Read the vendor/device ID from the CIS */ + vendevid = bcmsdh_query_device(bcmsdh); + /* try to attach to the target device */ + bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num, + slot_num, 0, bus_type, (void *)regs, osh, bcmsdh); + if (bcmsdh_osinfo->context == NULL) { + SDLX_MSG(("%s: device attach failed\n", __FUNCTION__)); + goto err; + } + + return bcmsdh; + + /* error handling */ +err: + if (bcmsdh != NULL) + bcmsdh_detach(osh, bcmsdh); + if (bcmsdh_osinfo != NULL) + MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t)); + return NULL; +} + +int bcmsdh_remove(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (bcmsdh_osinfo->dev) + device_init_wakeup(bcmsdh_osinfo->dev, false); + bcmsdh_osinfo->dev_wake_enabled = FALSE; +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ + + drvinfo.remove(bcmsdh_osinfo->context); + MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t)); + bcmsdh_detach(bcmsdh->osh, bcmsdh); + + return 0; +} + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context)) + return -EBUSY; + return 0; +} + +int bcmsdh_resume(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + if (drvinfo.resume) + return drvinfo.resume(bcmsdh_osinfo->context); + return 0; +} + +extern int bcmsdh_register_client_driver(void); +extern void bcmsdh_unregister_client_driver(void); +extern int sdio_func_reg_notify(void* semaphore); +extern void sdio_func_unreg_notify(void); + +#if defined(BCMLXSDMMC) +int bcmsdh_reg_sdio_notify(void* semaphore) +{ + return sdio_func_reg_notify(semaphore); +} + +void bcmsdh_unreg_sdio_notify(void) +{ + sdio_func_unreg_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +int +bcmsdh_register(bcmsdh_driver_t *driver) +{ + int error = 0; + + drvinfo = *driver; + SDLX_MSG(("%s: register client driver\n", __FUNCTION__)); + error = bcmsdh_register_client_driver(); + if (error) + SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error)); + + return error; +} + +void +bcmsdh_unregister(void) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (bcmsdh_pci_driver.node.next == NULL) + return; +#endif + + bcmsdh_unregister_client_driver(); +} + +void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_stay_awake(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh) +{ +#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + pm_relax(bcmsdh_osinfo->dev); +#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */ +} + +bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh) +{ + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + return bcmsdh_osinfo->dev_wake_enabled; +} + +#if defined(OOB_INTR_ONLY) +void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable) +{ + unsigned long flags; + bcmsdh_os_info_t *bcmsdh_osinfo; + + if (!bcmsdh) + return; + + bcmsdh_osinfo = bcmsdh->os_cxt; + spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags); + if (bcmsdh_osinfo->oob_irq_enabled != enable) { + if (enable) + enable_irq(bcmsdh_osinfo->oob_irq_num); + else + disable_irq_nosync(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *dev_id) +{ + bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + bcmsdh_oob_intr_set(bcmsdh, FALSE); + bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context); + + return IRQ_HANDLED; +} + +int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + if (bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } + SDLX_MSG(("%s OOB irq=%d flags=%X \n", __FUNCTION__, + (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags)); + bcmsdh_osinfo->oob_irq_handler = oob_irq_handler; + bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context; +#if defined(CONFIG_ARCH_ODIN) + err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); +#else + err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq, + bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh); +#endif /* defined(CONFIG_ARCH_ODIN) */ + if (err) { + SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err)); + return err; + } + + err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = TRUE; + bcmsdh_osinfo->oob_irq_enabled = TRUE; + bcmsdh_osinfo->oob_irq_registered = TRUE; + return err; +} + +void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh) +{ + int err = 0; + bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt; + + SDLX_MSG(("%s: Enter\n", __FUNCTION__)); + if (!bcmsdh_osinfo->oob_irq_registered) { + SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (bcmsdh_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num); + if (!err) + bcmsdh_osinfo->oob_irq_wake_enabled = FALSE; + } + if (bcmsdh_osinfo->oob_irq_enabled) { + disable_irq(bcmsdh_osinfo->oob_irq_num); + bcmsdh_osinfo->oob_irq_enabled = FALSE; + } + free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh); + bcmsdh_osinfo->oob_irq_registered = FALSE; +} +#endif + +/* Module parameters specific to each host-controller driver */ + +extern uint sd_msglevel; /* Debug message level */ +module_param(sd_msglevel, uint, 0); + +extern uint sd_power; /* 0 = SD Power OFF, 1 = SD Power ON. */ +module_param(sd_power, uint, 0); + +extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF, 1 = SD Clock ON */ +module_param(sd_clock, uint, 0); + +extern uint sd_divisor; /* Divisor (-1 means external clock) */ +module_param(sd_divisor, uint, 0); + +extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */ +module_param(sd_sdmode, uint, 0); + +extern uint sd_hiok; /* Ok to use hi-speed mode */ +module_param(sd_hiok, uint, 0); + +extern uint sd_f2_blocksize; +module_param(sd_f2_blocksize, int, 0); + +#ifdef BCMSDIOH_STD +extern int sd_uhsimode; +module_param(sd_uhsimode, int, 0); +extern uint sd_tuning_period; +module_param(sd_tuning_period, uint, 0); +extern int sd_delay_value; +module_param(sd_delay_value, uint, 0); + +/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */ +extern char dhd_sdiod_uhsi_ds_override[2]; +module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0); + +#endif + +#ifdef BCMSDH_MODULE +EXPORT_SYMBOL(bcmsdh_attach); +EXPORT_SYMBOL(bcmsdh_detach); +EXPORT_SYMBOL(bcmsdh_intr_query); +EXPORT_SYMBOL(bcmsdh_intr_enable); +EXPORT_SYMBOL(bcmsdh_intr_disable); +EXPORT_SYMBOL(bcmsdh_intr_reg); +EXPORT_SYMBOL(bcmsdh_intr_dereg); + +#if defined(DHD_DEBUG) +EXPORT_SYMBOL(bcmsdh_intr_pending); +#endif + +EXPORT_SYMBOL(bcmsdh_devremove_reg); +EXPORT_SYMBOL(bcmsdh_cfg_read); +EXPORT_SYMBOL(bcmsdh_cfg_write); +EXPORT_SYMBOL(bcmsdh_cis_read); +EXPORT_SYMBOL(bcmsdh_reg_read); +EXPORT_SYMBOL(bcmsdh_reg_write); +EXPORT_SYMBOL(bcmsdh_regfail); +EXPORT_SYMBOL(bcmsdh_send_buf); +EXPORT_SYMBOL(bcmsdh_recv_buf); + +EXPORT_SYMBOL(bcmsdh_rwdata); +EXPORT_SYMBOL(bcmsdh_abort); +EXPORT_SYMBOL(bcmsdh_query_device); +EXPORT_SYMBOL(bcmsdh_query_iofnum); +EXPORT_SYMBOL(bcmsdh_iovar_op); +EXPORT_SYMBOL(bcmsdh_register); +EXPORT_SYMBOL(bcmsdh_unregister); +EXPORT_SYMBOL(bcmsdh_chipmatch); +EXPORT_SYMBOL(bcmsdh_reset); +EXPORT_SYMBOL(bcmsdh_waitlockfree); + +EXPORT_SYMBOL(bcmsdh_get_dstatus); +EXPORT_SYMBOL(bcmsdh_cfg_read_word); +EXPORT_SYMBOL(bcmsdh_cfg_write_word); +EXPORT_SYMBOL(bcmsdh_cur_sbwad); +EXPORT_SYMBOL(bcmsdh_chipinfo); + +#endif /* BCMSDH_MODULE */ diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c new file mode 100644 index 000000000000..e1c0fb721176 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c @@ -0,0 +1,1475 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc.c 591104 2015-10-07 04:45:18Z $ + */ +#include + +#include +#include +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* Standard SDIO Host Controller Specification */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ + +#include +#include +#include +#include +#include + +#include +#include + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +extern volatile bool dhd_mmc_suspend; +#endif +#include "bcmsdh_sdmmc.h" + +#ifndef BCMSDH_MODULE +extern int sdio_function_init(void); +extern void sdio_function_cleanup(void); +#endif /* BCMSDH_MODULE */ + +#if !defined(OOB_INTR_ONLY) +static void IRQHandler(struct sdio_func *func); +static void IRQHandlerF2(struct sdio_func *func); +#endif /* !defined(OOB_INTR_ONLY) */ +static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr); +int __attribute__((weak)) sdio_reset_comm(struct mmc_card *card) +{ + return 0; +} + +#define DEFAULT_SDIO_F2_BLKSIZE 512 +#ifndef CUSTOM_SDIO_F2_BLKSIZE +#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE +#endif + +#define MAX_IO_RW_EXTENDED_BLK 511 + +uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ +uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE; +uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ + +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */ +uint sd_msglevel = 0x01; +uint sd_use_dma = TRUE; + +#ifndef CUSTOM_RXCHAIN +#define CUSTOM_RXCHAIN 0 +#endif + +DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait); +DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait); + +#define DMA_ALIGN_MASK 0x03 +#define MMC_SDIO_ABORT_RETRY_LIMIT 5 + +int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); + +static int +sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd) +{ + int err_ret; + uint32 fbraddr; + uint8 func; + + sd_trace(("%s\n", __FUNCTION__)); + + /* Get the Card's common CIS address */ + sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Get the Card's function CIS (for each function) */ + for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; + func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { + sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); + sd_info(("%s: Function %d CIS Ptr = 0x%x\n", + __FUNCTION__, func, sd->func_cis_ptr[func])); + } + + sd->func_cis_ptr[0] = sd->com_cis_ptr; + sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); + + /* Enable Function 1 */ + sdio_claim_host(sd->func[1]); + err_ret = sdio_enable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret)); + } + + return FALSE; +} + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, struct sdio_func *func) +{ + sdioh_info_t *sd = NULL; + int err_ret; + + sd_trace(("%s\n", __FUNCTION__)); + + if (func == NULL) { + sd_err(("%s: sdio function device is NULL\n", __FUNCTION__)); + return NULL; + } + + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + sd->fake_func0.num = 0; + sd->fake_func0.card = func->card; + sd->func[0] = &sd->fake_func0; + sd->func[1] = func->card->sdio_func[0]; + sd->func[2] = func->card->sdio_func[1]; + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + sd->use_rxchain = CUSTOM_RXCHAIN; + if (sd->func[1] == NULL || sd->func[2] == NULL) { + sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__)); + goto fail; + } + sdio_set_drvdata(sd->func[1], sd); + + sdio_claim_host(sd->func[1]); + sd->client_block_size[1] = 64; + err_ret = sdio_set_block_size(sd->func[1], 64); + sdio_release_host(sd->func[1]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret)); + goto fail; + } + + sdio_claim_host(sd->func[2]); + sd->client_block_size[2] = sd_f2_blocksize; + err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + sdio_release_host(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n", + sd_f2_blocksize, err_ret)); + goto fail; + } + + sdioh_sdmmc_card_enablefuncs(sd); + + sd_trace(("%s: Done\n", __FUNCTION__)); + return sd; + +fail: + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; +} + + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + + if (sd) { + + /* Disable Function 2 */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_disable_func(sd->func[2]); + sdio_release_host(sd->func[2]); + } + + /* Disable Function 1 */ + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_disable_func(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + sd->func[1] = NULL; + sd->func[2] = NULL; + + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +extern SDIOH_API_RC +sdioh_enable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + /* Enable F1 and F2 interrupts, clear master enable */ + reg &= ~INTR_CTL_MASTER_EN; + reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_disable_func_intr(sdioh_info_t *sd) +{ + uint8 reg; + int err; + + if (sd->func[0] == NULL) { + sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sdio_claim_host(sd->func[0]); + reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err); + if (err) { + sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + sdio_release_host(sd->func[0]); + return SDIOH_API_RC_FAIL; + } + reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN); + /* Disable master interrupt with the last function interrupt */ + if (!(reg & 0xFE)) + reg = 0; + sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err); + sdio_release_host(sd->func[0]); + + if (err) { + sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err)); + return SDIOH_API_RC_FAIL; + } + + return SDIOH_API_RC_SUCCESS; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + if (fn == NULL) { + sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; + + /* register and unmask irq */ + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + sdio_claim_irq(sd->func[2], IRQHandlerF2); + sdio_release_host(sd->func[2]); + } + + if (sd->func[1]) { + sdio_claim_host(sd->func[1]); + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[1]); + } +#elif defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + +#if !defined(OOB_INTR_ONLY) + if (sd->func[1]) { + /* register and unmask irq */ + sdio_claim_host(sd->func[1]); + sdio_release_irq(sd->func[1]); + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + sdio_release_irq(sd->func[2]); + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#elif defined(HW_OOB) + sdioh_disable_func_intr(sd); +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return (0); +} +#endif + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_RXCHAIN +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 }, + {"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + BCM_REFERENCE(bool_val); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKMODE): + int_val = (int32)si->sd_blockmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKMODE): + si->sd_blockmode = (bool)int_val; + /* Haven't figured out how to make non-block mode with DMA */ + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_BLOCKSIZE): + { + uint func = ((uint32)int_val >> 16); + uint blksize = (uint16)int_val; + uint maxsize; + + if (func > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + + switch (func) { + case 0: maxsize = 32; break; + case 1: maxsize = BLOCK_SIZE_4318; break; + case 2: maxsize = BLOCK_SIZE_4328; break; + default: maxsize = 0; + } + if (blksize > maxsize) { + bcmerror = BCME_BADARG; + break; + } + if (!blksize) { + blksize = maxsize; + } + + /* Now set it */ + si->client_block_size[func] = blksize; + +#ifdef USE_DYNAMIC_F2_BLKSIZE + if (si->func[func] == NULL) { + sd_err(("%s: SDIO Device not present\n", __FUNCTION__)); + bcmerror = BCME_NORESOURCE; + break; + } + sdio_claim_host(si->func[func]); + bcmerror = sdio_set_block_size(si->func[func], blksize); + if (bcmerror) + sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n", + __FUNCTION__, func, blksize, bcmerror)); + sdio_release_host(si->func[func]); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + break; + } + + case IOV_GVAL(IOV_RXCHAIN): + int_val = (int32)si->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + si->use_client_ints = (bool)int_val; + if (si->use_client_ints) + si->intmask |= CLIENT_INTR; + else + si->intmask &= ~CLIENT_INTR; + + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + if (sd_ptr->offset & 1) + int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */ + else if (sd_ptr->offset & 2) + int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */ + else + int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */ + + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_HOSTREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + + if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { + sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); + bcmerror = BCME_BADARG; + break; + } + + sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, + (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), + sd_ptr->offset)); + break; + } + + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = 0; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) + +SDIOH_API_RC +sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable) +{ + SDIOH_API_RC status; + uint8 data; + + if (enable) + data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI; + else + data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */ + + status = sdioh_request_byte(sd, SDIOH_WRITE, 0, SDIOD_CCCR_BRCM_SEPINT, &data); + return status; +} +#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */ + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +static int +sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) +{ + /* read 24 bits and return valid 17 bit addr */ + int i; + uint32 scratch, regdata; + uint8 *ptr = (uint8 *)&scratch; + for (i = 0; i < 3; i++) { + if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS) + sd_err(("%s: Can't read!\n", __FUNCTION__)); + + *ptr++ = (uint8) regdata; + regaddr++; + } + + /* Only the lower 17-bits are valid */ + scratch = ltoh32(scratch); + scratch &= 0x0001FFFF; + return (scratch); +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 foo; + uint8 *cis = cisd; + + sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); + + if (!sd->func_cis_ptr[func]) { + bzero(cis, length); + sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func)); + return SDIOH_API_RC_FAIL; + } + + sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); + + for (count = 0; count < length; count++) { + offset = sd->func_cis_ptr[func] + count; + if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + *cis = (uint8)(foo & 0xff); + cis++; + } + + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int err_ret = 0; +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif + + sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr)); + + DHD_PM_RESUME_WAIT(sdioh_request_byte_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + if(rw) { /* CMD52 Write */ + if (func == 0) { + /* Can only directly write to some F0 registers. Handle F2 enable + * as a special case. + */ + if (regaddr == SDIOD_CCCR_IOEN) { + if (sd->func[2]) { + sdio_claim_host(sd->func[2]); + if (*byte & SDIO_FUNC_ENABLE_2) { + /* Enable Function 2 */ + err_ret = sdio_enable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: enable F2 failed:%d", + err_ret)); + } + } else { + /* Disable Function 2 */ + err_ret = sdio_disable_func(sd->func[2]); + if (err_ret) { + sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d", + err_ret)); + } + } + sdio_release_host(sd->func[2]); + } + } +#if defined(MMC_SDIO_ABORT) + /* to allow abort command through F1 */ + else if (regaddr == SDIOD_CCCR_IOABORT) { + while (sdio_abort_retry--) { + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + /* + * this sdio_f0_writeb() can be replaced with + * another api depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + if (!err_ret) + break; + } + } +#endif /* MMC_SDIO_ABORT */ + else if (regaddr < 0xF0) { + sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); + } else { + /* Claim host controller, perform F0 write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_f0_writeb(sd->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { + /* Claim host controller, perform Fn write, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + sdio_writeb(sd->func[func], *byte, regaddr, &err_ret); + sdio_release_host(sd->func[func]); + } + } + } else { /* CMD52 Read */ + /* Claim host controller, perform Fn read, and release */ + if (sd->func[func]) { + sdio_claim_host(sd->func[func]); + if (func == 0) { + *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(sd->func[func], regaddr, &err_ret); + } + sdio_release_host(sd->func[func]); + } + } + + if (err_ret) { + if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) { + } else { + sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", + rw ? "Write" : "Read", func, regaddr, *byte, err_ret)); + } + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int err_ret = SDIOH_API_RC_FAIL; +#if defined(MMC_SDIO_ABORT) + int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT; +#endif + + if (func == 0) { + sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n", + __FUNCTION__, cmd_type, rw, func, addr, nbytes)); + + DHD_PM_RESUME_WAIT(sdioh_request_word_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + /* Claim host controller */ + sdio_claim_host(sd->func[func]); + + if(rw) { /* CMD52 Write */ + if (nbytes == 4) { + sdio_writel(sd->func[func], *word, addr, &err_ret); + } else if (nbytes == 2) { + sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret); + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } else { /* CMD52 Read */ + if (nbytes == 4) { + *word = sdio_readl(sd->func[func], addr, &err_ret); + } else if (nbytes == 2) { + *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF; + } else { + sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes)); + } + } + + /* Release host controller */ + sdio_release_host(sd->func[func]); + + if (err_ret) { +#if defined(MMC_SDIO_ABORT) + /* Any error on CMD53 transaction should abort that function using function 0. */ + while (sdio_abort_retry--) { + if (sd->func[0]) { + sdio_claim_host(sd->func[0]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(sd->func[0], + func, SDIOD_CCCR_IOABORT, &err_ret); + sdio_release_host(sd->func[0]); + } + if (!err_ret) + break; + } + if (err_ret) +#endif /* MMC_SDIO_ABORT */ + { + sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x", + rw ? "Write" : "Read", err_ret)); + } + } + + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +static SDIOH_API_RC +sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, void *pkt) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + void *pnext; + uint ttl_len, pkt_offset; + uint blk_num; + uint blk_size; + uint max_blk_count; + uint max_req_size; + struct mmc_request mmc_req; + struct mmc_command mmc_cmd; + struct mmc_data mmc_dat; + uint32 sg_count; + struct sdio_func *sdio_func = sd->func[func]; + struct mmc_host *host = sdio_func->card->host; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(pkt); + DHD_PM_RESUME_WAIT(sdioh_request_packet_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + blk_size = sd->client_block_size[func]; + max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK); + max_req_size = min(max_blk_count * blk_size, host->max_req_size); + + pkt_offset = 0; + pnext = pkt; + + while (pnext != NULL) { + ttl_len = 0; + sg_count = 0; + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&mmc_cmd, 0, sizeof(struct mmc_command)); + memset(&mmc_dat, 0, sizeof(struct mmc_data)); + sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list)); + + /* Set up scatter-gather DMA descriptors. this loop is to find out the max + * data we can transfer with one command 53. blocks per command is limited by + * host max_req_size and 9-bit max block number. when the total length of this + * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED + * commands (each transfer is still block aligned) + */ + while (pnext != NULL && ttl_len < max_req_size) { + int pkt_len; + int sg_data_size; + uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext); + + ASSERT(pdata != NULL); + pkt_len = PKTLEN(sd->osh, pnext); + sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len)); + /* sg_count is unlikely larger than the array size, and this is + * NOT something we can handle here, but in case it happens, PLEASE put + * a restriction on max tx/glom count (based on host->max_segs). + */ + if (sg_count >= ARRAYSIZE(sd->sg_list)) { + sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__)); + return (SDIOH_API_RC_FAIL); + } + pdata += pkt_offset; + + sg_data_size = pkt_len - pkt_offset; + if (sg_data_size > max_req_size - ttl_len) + sg_data_size = max_req_size - ttl_len; + /* some platforms put a restriction on the data size of each scatter-gather + * DMA descriptor, use multiple sg buffers when xfer_size is bigger than + * max_seg_size + */ + if (sg_data_size > host->max_seg_size) + sg_data_size = host->max_seg_size; + sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size); + + ttl_len += sg_data_size; + pkt_offset += sg_data_size; + if (pkt_offset == pkt_len) { + pnext = PKTNEXT(sd->osh, pnext); + pkt_offset = 0; + } + } + + if (ttl_len % blk_size != 0) { + sd_err(("%s, data length %d not aligned to block size %d\n", + __FUNCTION__, ttl_len, blk_size)); + return SDIOH_API_RC_FAIL; + } + blk_num = ttl_len / blk_size; + mmc_dat.sg = sd->sg_list; + mmc_dat.sg_len = sg_count; + mmc_dat.blksz = blk_size; + mmc_dat.blocks = blk_num; + mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */ + mmc_cmd.arg = write ? 1<<31 : 0; + mmc_cmd.arg |= (func & 0x7) << 28; + mmc_cmd.arg |= 1<<27; + mmc_cmd.arg |= fifo ? 0 : 1<<26; + mmc_cmd.arg |= (addr & 0x1FFFF) << 9; + mmc_cmd.arg |= blk_num & 0x1FF; + mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + mmc_req.cmd = &mmc_cmd; + mmc_req.data = &mmc_dat; + if (!fifo) + addr += ttl_len; + + sdio_claim_host(sdio_func); + mmc_set_data_timeout(&mmc_dat, sdio_func->card); + mmc_wait_for_req(host, &mmc_req); + sdio_release_host(sdio_func); + + err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error; + if (0 != err_ret) { + sd_err(("%s:CMD53 %s failed with code %d\n", + __FUNCTION__, write ? "write" : "read", err_ret)); + return SDIOH_API_RC_FAIL; + } + } + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +static SDIOH_API_RC +sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func, + uint addr, uint8 *buf, uint len) +{ + bool fifo = (fix_inc == SDIOH_DATA_FIX); + int err_ret = 0; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + ASSERT(buf); + + /* NOTE: + * For all writes, each packet length is aligned to 32 (or 4) + * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length + * is aligned to block boundary. If you want to align each packet to + * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here + * + * For reads, the alignment is doen in sdioh_request_buffer. + * + */ + sdio_claim_host(sd->func[func]); + + if ((write) && (!fifo)) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (write) + err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len); + else if (fifo) + err_ret = sdio_readsb(sd->func[func], buf, addr, len); + else + err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len); + + sdio_release_host(sd->func[func]); + + if (err_ret) + sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len, err_ret)); + else + sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__, + (write) ? "TX" : "RX", buf, addr, len)); + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + + +/* + * This function takes a buffer or packet, and fixes everything up so that in the + * end, a DMA-able packet is created. + * + * A buffer does not have an associated packet pointer, and may or may not be aligned. + * A packet may consist of a single packet, or a packet chain. If it is a packet chain, + * then all the packets in the chain must be properly aligned. If the packet data is not + * aligned, then there may only be one packet, and in this case, it is copied to a new + * aligned packet. + * + */ +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func, + uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt) +{ + SDIOH_API_RC status; + void *tmppkt; + + sd_trace(("%s: Enter\n", __FUNCTION__)); + DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait); + DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL); + + if (pkt) { + /* packet chain, only used for tx/rx glom, all packets length + * are aligned, total length is a block multiple + */ + if (PKTNEXT(sd->osh, pkt)) + return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt); + + /* non-glom mode, ignore the buffer parameter and use the packet pointer + * (this shouldn't happen) + */ + buffer = PKTDATA(sd->osh, pkt); + buf_len = PKTLEN(sd->osh, pkt); + } + + ASSERT(buffer); + + /* buffer and length are aligned, use it directly so we can avoid memory copy */ + if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0) + return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len); + + sd_err(("%s: [%d] doing memory copy buf=%p, len=%d\n", + __FUNCTION__, write, buffer, buf_len)); + + /* otherwise, a memory copy is needed as the input buffer is not aligned */ + tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE); + if (tmppkt == NULL) { + sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len)); + return SDIOH_API_RC_FAIL; + } + + if (write) + bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len); + + status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, + PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1))); + + if (!write) + bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len); + + PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE); + + return status; +} + +/* this function performs "abort" for both of host & device */ +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ +#if defined(MMC_SDIO_ABORT) + char t_func = (char) func; +#endif /* defined(MMC_SDIO_ABORT) */ + sd_trace(("%s: Enter\n", __FUNCTION__)); + +#if defined(MMC_SDIO_ABORT) + /* issue abort cmd52 command through F1 */ + sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func); +#endif /* defined(MMC_SDIO_ABORT) */ + + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Reset and re-initialize the device */ +int sdioh_sdio_reset(sdioh_info_t *si) +{ + sd_trace(("%s: Enter\n", __FUNCTION__)); + sd_trace(("%s: Exit\n", __FUNCTION__)); + return SDIOH_API_RC_SUCCESS; +} + +/* Disable device interrupt */ +void +sdioh_sdmmc_devintr_off(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask &= ~CLIENT_INTR; +} + +/* Enable device interrupt */ +void +sdioh_sdmmc_devintr_on(sdioh_info_t *sd) +{ + sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); + sd->intmask |= CLIENT_INTR; +} + +/* Read client card reg */ +int +sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp = 0; + + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + *data = temp; + *data &= 0xff; + sd_data(("%s: byte read data=0x%02x\n", + __FUNCTION__, *data)); + } else { + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize); + if (regsize == 2) + *data &= 0xffff; + + sd_data(("%s: word read data=0x%08x\n", + __FUNCTION__, *data)); + } + + return SUCCESS; +} + +#if !defined(OOB_INTR_ONLY) +/* bcmsdh_sdmmc interrupt handler */ +static void IRQHandler(struct sdio_func *func) +{ + sdioh_info_t *sd; + + sd = sdio_get_drvdata(func); + + ASSERT(sd != NULL); + sdio_release_host(sd->func[0]); + + if (sd->use_client_ints) { + sd->intrcount++; + ASSERT(sd->intr_handler); + ASSERT(sd->intr_handler_arg); + (sd->intr_handler)(sd->intr_handler_arg); + } else { + sd_err(("bcmsdh_sdmmc: ***IRQHandler\n")); + + sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", + __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); + } + + sdio_claim_host(sd->func[0]); +} + +/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */ +static void IRQHandlerF2(struct sdio_func *func) +{ + sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n")); +} +#endif /* !defined(OOB_INTR_ONLY) */ + +#ifdef NOTUSED +/* Write client card reg */ +static int +sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + + if ((func == 0) || (regsize == 1)) { + uint8 temp; + + temp = data & 0xff; + sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp); + sd_data(("%s: byte write data=0x%02x\n", + __FUNCTION__, data)); + } else { + if (regsize == 2) + data &= 0xffff; + + sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize); + + sd_data(("%s: word write data=0x%08x\n", + __FUNCTION__, data)); + } + + return SUCCESS; +} +#endif /* NOTUSED */ + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + int ret; + + if (!sd) { + sd_err(("%s Failed, sd is NULL\n", __FUNCTION__)); + return (0); + } + + /* Need to do this stages as we can't enable the interrupt till + downloading of the firmware is complete, other wise polling + sdio access will come in way + */ + if (sd->func[0]) { + if (stage == 0) { + /* Since the power to the chip is killed, we will have + re enumerate the device again. Set the block size + and enable the fucntion 1 for in preparation for + downloading the code + */ + /* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux + 2.6.27. The implementation prior to that is buggy, and needs broadcom's + patch for it + */ + if ((ret = sdio_reset_comm(sd->func[0]->card))) { + sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret)); + return ret; + } + else { + sd->num_funcs = 2; + sd->sd_blockmode = TRUE; + sd->use_client_ints = TRUE; + sd->client_block_size[0] = 64; + + if (sd->func[1]) { + /* Claim host controller */ + sdio_claim_host(sd->func[1]); + + sd->client_block_size[1] = 64; + ret = sdio_set_block_size(sd->func[1], 64); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F1 " + "blocksize(%d)\n", ret)); + } + + /* Release host controller F1 */ + sdio_release_host(sd->func[1]); + } + + if (sd->func[2]) { + /* Claim host controller F2 */ + sdio_claim_host(sd->func[2]); + + sd->client_block_size[2] = sd_f2_blocksize; + ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize); + if (ret) { + sd_err(("bcmsdh_sdmmc: Failed to set F2 " + "blocksize to %d(%d)\n", sd_f2_blocksize, ret)); + } + + /* Release host controller F2 */ + sdio_release_host(sd->func[2]); + } + + sdioh_sdmmc_card_enablefuncs(sd); + } + } else { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[2]) + sdio_claim_irq(sd->func[2], IRQHandlerF2); + if (sd->func[1]) + sdio_claim_irq(sd->func[1], IRQHandler); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_enable_func_intr(sd); +#endif + bcmsdh_oob_intr_set(sd->bcmsdh, TRUE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + + return (0); +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + /* MSM7201A Android sdio stack has bug with interrupt + So internaly within SDIO stack they are polling + which cause issue when device is turned off. So + unregister interrupt with SDIO stack to stop the + polling + */ + if (sd->func[0]) { +#if !defined(OOB_INTR_ONLY) + sdio_claim_host(sd->func[0]); + if (sd->func[1]) + sdio_release_irq(sd->func[1]); + if (sd->func[2]) + sdio_release_irq(sd->func[2]); + sdio_release_host(sd->func[0]); +#else /* defined(OOB_INTR_ONLY) */ +#if defined(HW_OOB) + sdioh_disable_func_intr(sd); +#endif + bcmsdh_oob_intr_set(sd->bcmsdh, FALSE); +#endif /* !defined(OOB_INTR_ONLY) */ + } + else + sd_err(("%s Failed\n", __FUNCTION__)); + return (0); +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return (1); +} + + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c new file mode 100644 index 000000000000..19fa8b5c8b17 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c @@ -0,0 +1,399 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc_linux.c 591173 2015-10-07 06:24:22Z $ + */ + +#include +#include +#include /* SDIO Device and Protocol Specs */ +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include /* request_irq() */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(SDIO_VENDOR_ID_BROADCOM) +#define SDIO_VENDOR_ID_BROADCOM 0x02d0 +#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */ + +#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000 + +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) +#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */ +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4325) +#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4329) +#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4319) +#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4330) +#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4334) +#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_4324) +#define SDIO_DEVICE_ID_BROADCOM_4324 0x4324 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */ +#if !defined(SDIO_DEVICE_ID_BROADCOM_43239) +#define SDIO_DEVICE_ID_BROADCOM_43239 43239 +#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */ + +extern void wl_cfg80211_set_parent_dev(void *dev); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type, + uint bus_num, uint slot_num); +extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh); + +int sdio_function_init(void); +void sdio_function_cleanup(void); + +#define DESCRIPTION "bcmsdh_sdmmc Driver" +#define AUTHOR "Broadcom Corporation" + +/* module param defaults */ +static int clockoverride = 0; + +module_param(clockoverride, int, 0644); +MODULE_PARM_DESC(clockoverride, "SDIO card clock override"); + +/* Maximum number of bcmsdh_sdmmc devices supported by driver */ +#define BCMSDH_SDMMC_MAX_DEVICES 1 + +extern volatile bool dhd_mmc_suspend; + +static int sdioh_probe(struct sdio_func *func) +{ + int host_idx = func->card->host->index; + uint32 rca = func->card->rca; + wifi_adapter_info_t *adapter; + osl_t *osh = NULL; + sdioh_info_t *sdioh = NULL; + + sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca)); + adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca); + if (adapter != NULL) + sd_err(("found adapter info '%s'\n", adapter->name)); + else + sd_err(("can't find adapter info for this chip\n")); + +#ifdef WL_CFG80211 + wl_cfg80211_set_parent_dev(&func->dev); +#endif + + /* allocate SDIO Host Controller state info */ + osh = osl_attach(&func->dev, SDIO_BUS, TRUE); + if (osh == NULL) { + sd_err(("%s: osl_attach failed\n", __FUNCTION__)); + goto fail; + } + osl_static_mem_init(osh, adapter); + sdioh = sdioh_attach(osh, func); + if (sdioh == NULL) { + sd_err(("%s: sdioh_attach failed\n", __FUNCTION__)); + goto fail; + } + sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca); + if (sdioh->bcmsdh == NULL) { + sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__)); + goto fail; + } + + sdio_set_drvdata(func, sdioh); + return 0; + +fail: + if (sdioh != NULL) + sdioh_detach(osh, sdioh); + if (osh != NULL) + osl_detach(osh); + return -ENOMEM; +} + +static void sdioh_remove(struct sdio_func *func) +{ + sdioh_info_t *sdioh; + osl_t *osh; + + sdioh = sdio_get_drvdata(func); + if (sdioh == NULL) { + sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__)); + return; + } + + osh = sdioh->osh; + bcmsdh_remove(sdioh->bcmsdh); + sdioh_detach(osh, sdioh); + osl_detach(osh); +} + +static int bcmsdh_sdmmc_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret = 0; + + if (func == NULL) + return -EINVAL; + + sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + /* 4318 doesn't have function 2 */ + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + ret = sdioh_probe(func); + + return ret; +} + +static void bcmsdh_sdmmc_remove(struct sdio_func *func) +{ + if (func == NULL) { + sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__)); + return; + } + + sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); + sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); + sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); + sd_info(("sdio_device: 0x%04x\n", func->device)); + sd_info(("Function#: 0x%04x\n", func->num)); + + if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) + sdioh_remove(func); +} + +/* devices we support, null terminated */ +static const struct sdio_device_id bcmsdh_sdmmc_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) }, + { SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) }, + { /* end: all zeroes */ }, +}; + +MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) +static int bcmsdh_sdmmc_suspend(struct device *pdev) +{ + int err; + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + mmc_pm_flag_t sdio_flags; + + sd_err(("%s Enter\n", __FUNCTION__)); + if (func->num != 2) + return 0; + +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = TRUE; +#endif + + sdioh = sdio_get_drvdata(func); + err = bcmsdh_suspend(sdioh->bcmsdh); + if (err) { +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + return err; + } + + sdio_flags = sdio_get_host_pm_caps(func); + if (!(sdio_flags & MMC_PM_KEEP_POWER)) { + sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + return -EINVAL; + } + + /* keep power while host suspended */ + err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (err) { + sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + return err; + } + smp_mb(); + + return 0; +} + +static int bcmsdh_sdmmc_resume(struct device *pdev) +{ + sdioh_info_t *sdioh; + struct sdio_func *func = dev_to_sdio_func(pdev); + + sd_err(("%s Enter\n", __FUNCTION__)); + if (func->num != 2) + return 0; + + sdioh = sdio_get_drvdata(func); +#ifdef CONFIG_PM_SLEEP + dhd_mmc_suspend = FALSE; +#endif + + smp_mb(); + return 0; +} + +static const struct dev_pm_ops bcmsdh_sdmmc_pm_ops = { + .suspend = bcmsdh_sdmmc_suspend, + .resume = bcmsdh_sdmmc_resume, +}; +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + +#if defined(BCMLXSDMMC) +static struct semaphore *notify_semaphore = NULL; + +static int dummy_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + if (func && (func->num != 2)) { + return 0; + } + + if (notify_semaphore) + up(notify_semaphore); + return 0; +} + +static void dummy_remove(struct sdio_func *func) +{ +} + +static struct sdio_driver dummy_sdmmc_driver = { + .probe = dummy_probe, + .remove = dummy_remove, + .name = "dummy_sdmmc", + .id_table = bcmsdh_sdmmc_ids, + }; + +int sdio_func_reg_notify(void* semaphore) +{ + notify_semaphore = semaphore; + return sdio_register_driver(&dummy_sdmmc_driver); +} + +void sdio_func_unreg_notify(void) +{ + OSL_SLEEP(15); + sdio_unregister_driver(&dummy_sdmmc_driver); +} + +#endif /* defined(BCMLXSDMMC) */ + +static struct sdio_driver bcmsdh_sdmmc_driver = { + .probe = bcmsdh_sdmmc_probe, + .remove = bcmsdh_sdmmc_remove, + .name = "bcmsdh_sdmmc", + .id_table = bcmsdh_sdmmc_ids, +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) + .drv = { + .pm = &bcmsdh_sdmmc_pm_ops, + }, +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM) */ + }; + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; +}; + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + if (!sd) + return BCME_BADARG; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + return SDIOH_API_RC_SUCCESS; +} + +#ifdef BCMSDH_MODULE +static int __init +bcmsdh_module_init(void) +{ + int error = 0; + error = sdio_function_init(); + return error; +} + +static void __exit +bcmsdh_module_cleanup(void) +{ + sdio_function_cleanup(); +} + +module_init(bcmsdh_module_init); +module_exit(bcmsdh_module_cleanup); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DESCRIPTION); +MODULE_AUTHOR(AUTHOR); + +#endif /* BCMSDH_MODULE */ +/* + * module init +*/ +int bcmsdh_register_client_driver(void) +{ + return sdio_register_driver(&bcmsdh_sdmmc_driver); +} + +/* + * module cleanup +*/ +void bcmsdh_unregister_client_driver(void) +{ + sdio_unregister_driver(&bcmsdh_sdmmc_driver); +} diff --git a/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c new file mode 100644 index 000000000000..139288e73ad8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmsdspi_linux.c @@ -0,0 +1,252 @@ +/* + * Broadcom SPI Host Controller Driver - Linux Per-port + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi_linux.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* to get msglevel bit values */ + +#include +#include /* SDIO Device and Protocol Specs */ +#include /* request_irq(), free_irq() */ +#include +#include + +extern uint sd_crc; +module_param(sd_crc, uint, 0); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define KERNEL26 +#endif + +struct sdos_info { + sdioh_info_t *sd; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define BLOCKABLE() (!in_atomic()) +#else +#define BLOCKABLE() (!in_interrupt()) +#endif + +/* Interrupt handler */ +static irqreturn_t +sdspi_isr(int irq, void *dev_id +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) +, struct pt_regs *ptregs +#endif +) +{ + sdioh_info_t *sd; + struct sdos_info *sdos; + bool ours; + + sd = (sdioh_info_t *)dev_id; + sd->local_intrcount++; + + if (!sd->card_init_done) { + sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); + return IRQ_RETVAL(FALSE); + } else { + ours = spi_check_client_intr(sd, NULL); + + /* For local interrupts, wake the waiting process */ + if (ours && sd->got_hcint) { + sdos = (struct sdos_info *)sd->sdos_info; + wake_up_interruptible(&sdos->intr_wait_queue); + } + + return IRQ_RETVAL(ours); + } +} + + +/* Register with Linux for interrupts */ +int +spi_register_irq(sdioh_info_t *sd, uint irq) +{ + sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); + if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { + sd_err(("%s: request_irq() failed\n", __FUNCTION__)); + return ERROR; + } + return SUCCESS; +} + +/* Free Linux irq */ +void +spi_free_irq(uint irq, sdioh_info_t *sd) +{ + free_irq(irq, sd); +} + +/* Map Host controller registers */ +uint32 * +spi_reg_map(osl_t *osh, uintptr addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +spi_reg_unmap(osl_t *osh, uintptr addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); +} + +int +spi_osinit(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + + sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); + sd->sdos_info = (void*)sdos; + if (sdos == NULL) + return BCME_NOMEM; + + sdos->sd = sd; + spin_lock_init(&sdos->lock); + init_waitqueue_head(&sdos->intr_wait_queue); + return BCME_OK; +} + +void +spi_osfree(sdioh_info_t *sd) +{ + struct sdos_info *sdos; + ASSERT(sd && sd->sdos_info); + + sdos = (struct sdos_info *)sd->sdos_info; + MFREE(sd->osh, sdos, sizeof(struct sdos_info)); +} + +/* Interrupt enable/disable */ +SDIOH_API_RC +sdioh_interrupt_set(sdioh_info_t *sd, bool enable) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + if (!(sd->host_init_done && sd->card_init_done)) { + sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { + sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); + return SDIOH_API_RC_FAIL; + } + + /* Ensure atomicity for enable/disable calls */ + spin_lock_irqsave(&sdos->lock, flags); + + sd->client_intr_enabled = enable; + if (enable && !sd->lockcount) + spi_devintr_on(sd); + else + spi_devintr_off(sd); + + spin_unlock_irqrestore(&sdos->lock, flags); + + return SDIOH_API_RC_SUCCESS; +} + +/* Protect against reentrancy (disable device interrupts while executing) */ +void +spi_lock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); + + spin_lock_irqsave(&sdos->lock, flags); + if (sd->lockcount) { + sd_err(("%s: Already locked!\n", __FUNCTION__)); + ASSERT(sd->lockcount == 0); + } + spi_devintr_off(sd); + sd->lockcount++; + spin_unlock_irqrestore(&sdos->lock, flags); +} + +/* Enable client interrupt */ +void +spi_unlock(sdioh_info_t *sd) +{ + ulong flags; + struct sdos_info *sdos; + + sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); + ASSERT(sd->lockcount > 0); + + sdos = (struct sdos_info *)sd->sdos_info; + ASSERT(sdos); + + spin_lock_irqsave(&sdos->lock, flags); + if (--sd->lockcount == 0 && sd->client_intr_enabled) { + spi_devintr_on(sd); + } + spin_unlock_irqrestore(&sdos->lock, flags); +} + +void spi_waitbits(sdioh_info_t *sd, bool yield) +{ +#ifndef BCMSDYIELD + ASSERT(!yield); +#endif + sd_trace(("%s: yield %d canblock %d\n", + __FUNCTION__, yield, BLOCKABLE())); + + /* Clear the "interrupt happened" flag and last intrstatus */ + sd->got_hcint = FALSE; + +#ifdef BCMSDYIELD + if (yield && BLOCKABLE()) { + struct sdos_info *sdos; + sdos = (struct sdos_info *)sd->sdos_info; + /* Wait for the indication, the interrupt will be masked when the ISR fires. */ + wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); + } else +#endif /* BCMSDYIELD */ + { + spi_spinbits(sd); + } + +} diff --git a/drivers/net/wireless/bcmdhd/bcmspibrcm.c b/drivers/net/wireless/bcmdhd/bcmspibrcm.c new file mode 100644 index 000000000000..10d982e0f8d8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmspibrcm.c @@ -0,0 +1,1819 @@ +/* + * Broadcom BCMSDH to gSPI Protocol Conversion Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.c 591086 2015-10-07 02:51:01Z $ + */ + +#define HSMODE + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* SDIO device core hardware definitions. */ +#include + +#include /* bcmsdh to/from specific controller APIs */ +#include /* ioctl/iovars */ +#include /* SDIO Device and Protocol Specs */ + +#include + + +#include +#include + +/* these are for the older cores... for newer cores we have control for each of them */ +#define F0_RESPONSE_DELAY 16 +#define F1_RESPONSE_DELAY 16 +#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY + + +#define GSPI_F0_RESP_DELAY 0 +#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY +#define GSPI_F2_RESP_DELAY 0 +#define GSPI_F3_RESP_DELAY 0 + +#define CMDLEN 4 + +#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE) + +/* Globals */ +#if defined(DHD_DEBUG) +uint sd_msglevel = SDH_ERROR_VAL; +#else +uint sd_msglevel = 0; +#endif + +uint sd_hiok = FALSE; /* Use hi-speed mode if available? */ +uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */ +uint sd_f2_blocksize = 64; /* Default blocksize */ + + +uint sd_divisor = 2; +uint sd_power = 1; /* Default to SD Slot powered ON */ +uint sd_clock = 1; /* Default to SD Clock turned ON */ +uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */ +uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ + +uint8 spi_outbuf[SPI_MAX_PKT_LEN]; +uint8 spi_inbuf[SPI_MAX_PKT_LEN]; + +/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits + * assuming we will not exceed F0 response delay > 100 bytes at 48MHz. + */ +#define BUF2_PKT_LEN 128 +uint8 spi_outbuf2[BUF2_PKT_LEN]; +uint8 spi_inbuf2[BUF2_PKT_LEN]; + +#define SPISWAP_WD4(x) bcmswap32(x); +#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \ + (bcmswap16((x & 0xffff0000) >> 16) << 16); + +/* Prototypes */ +static bool bcmspi_test_card(sdioh_info_t *sd); +static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd); +static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode); +static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen); +static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 *data); +static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, + int regsize, uint32 data); +static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, + uint8 *data); +static int bcmspi_driver_init(sdioh_info_t *sd); +static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data); +static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, + uint32 *data); +static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer); +static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg); + +/* + * Public entry points & extern's + */ +extern sdioh_info_t * +sdioh_attach(osl_t *osh, void *bar0, uint irq) +{ + sdioh_info_t *sd; + + sd_trace(("%s\n", __FUNCTION__)); + if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { + sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); + return NULL; + } + bzero((char *)sd, sizeof(sdioh_info_t)); + sd->osh = osh; + if (spi_osinit(sd) != 0) { + sd_err(("%s: spi_osinit() failed\n", __FUNCTION__)); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return NULL; + } + + sd->bar0 = bar0; + sd->irq = irq; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; + sd->intr_handler_valid = FALSE; + + /* Set defaults */ + sd->use_client_ints = TRUE; + sd->sd_use_dma = FALSE; /* DMA Not supported */ + + /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit + * mode + */ + sd->wordlen = 2; + + + if (!spi_hw_attach(sd)) { + sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__)); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (bcmspi_driver_init(sd) != SUCCESS) { + sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + if (spi_register_irq(sd, irq) != SUCCESS) { + sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + return (NULL); + } + + sd_trace(("%s: Done\n", __FUNCTION__)); + + return sd; +} + +extern SDIOH_API_RC +sdioh_detach(osl_t *osh, sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if (sd) { + sd_err(("%s: detaching from hardware\n", __FUNCTION__)); + spi_free_irq(sd->irq, sd); + spi_hw_detach(sd); + spi_osfree(sd); + MFREE(sd->osh, sd, sizeof(sdioh_info_t)); + } + return SDIOH_API_RC_SUCCESS; +} + +/* Configure callback to client when we recieve client interrupt */ +extern SDIOH_API_RC +sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler = fn; + sd->intr_handler_arg = argh; + sd->intr_handler_valid = TRUE; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_deregister(sdioh_info_t *sd) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); +#if !defined(OOB_INTR_ONLY) + sd->intr_handler_valid = FALSE; + sd->intr_handler = NULL; + sd->intr_handler_arg = NULL; +#endif /* !defined(OOB_INTR_ONLY) */ + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) +{ + sd_trace(("%s: Entering\n", __FUNCTION__)); + *onoff = sd->client_intr_enabled; + return SDIOH_API_RC_SUCCESS; +} + +#if defined(DHD_DEBUG) +extern bool +sdioh_interrupt_pending(sdioh_info_t *sd) +{ + return 0; +} +#endif + +extern SDIOH_API_RC +sdioh_query_device(sdioh_info_t *sd) +{ + /* Return a BRCM ID appropriate to the dongle class */ + return (sd->num_funcs > 1) ? BCM4329_D11N_ID : BCM4318_D11G_ID; +} + +/* Provide dstatus bits of spi-transaction for dhd layers. */ +extern uint32 +sdioh_get_dstatus(sdioh_info_t *sd) +{ + return sd->card_dstatus; +} + +extern void +sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev) +{ + sd->chip = chip; + sd->chiprev = chiprev; +} + +extern void +sdioh_dwordmode(sdioh_info_t *sd, bool set) +{ + uint8 reg = 0; + int status; + + if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } + + if (set) { + reg |= DWORD_PKT_LEN_EN; + sd->dwordmode = TRUE; + sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */ + } else { + reg &= ~DWORD_PKT_LEN_EN; + sd->dwordmode = FALSE; + sd->client_block_size[SPI_FUNC_2] = 2048; + } + + if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) != + SUCCESS) { + sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__)); + return; + } +} + + +uint +sdioh_query_iofnum(sdioh_info_t *sd) +{ + return sd->num_funcs; +} + +/* IOVar table */ +enum { + IOV_MSGLEVEL = 1, + IOV_BLOCKMODE, + IOV_BLOCKSIZE, + IOV_DMA, + IOV_USEINTS, + IOV_NUMINTS, + IOV_NUMLOCALINTS, + IOV_HOSTREG, + IOV_DEVREG, + IOV_DIVISOR, + IOV_SDMODE, + IOV_HISPEED, + IOV_HCIREGS, + IOV_POWER, + IOV_CLOCK, + IOV_SPIERRSTATS, + IOV_RESP_DELAY_ALL +}; + +const bcm_iovar_t sdioh_iovars[] = { + {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, + {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ + {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 }, + {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, + {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, + {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, + {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, + {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, + {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, + {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, + {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, + {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) }, + {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +int +sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + int32 int_val = 0; + bool bool_val; + uint32 actionid; +/* + sdioh_regs_t *regs; +*/ + + ASSERT(name); + ASSERT(len >= 0); + + /* Get must have return space; Set does not take qualifiers */ + ASSERT(set || (arg && len)); + ASSERT(!set || (!params && !plen)); + + sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); + + if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) + goto exit; + + /* Set up params so get and set can share the convenience variables */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + val_size = sizeof(int); + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + switch (actionid) { + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)sd_msglevel; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): + sd_msglevel = int_val; + break; + + case IOV_GVAL(IOV_BLOCKSIZE): + if ((uint32)int_val > si->num_funcs) { + bcmerror = BCME_BADARG; + break; + } + int_val = (int32)si->client_block_size[int_val]; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DMA): + int_val = (int32)si->sd_use_dma; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DMA): + si->sd_use_dma = (bool)int_val; + break; + + case IOV_GVAL(IOV_USEINTS): + int_val = (int32)si->use_client_ints; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_USEINTS): + break; + + case IOV_GVAL(IOV_DIVISOR): + int_val = (uint32)sd_divisor; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DIVISOR): + sd_divisor = int_val; + if (!spi_start_clock(si, (uint16)sd_divisor)) { + sd_err(("%s: set clock failed\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + } + break; + + case IOV_GVAL(IOV_POWER): + int_val = (uint32)sd_power; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POWER): + sd_power = int_val; + break; + + case IOV_GVAL(IOV_CLOCK): + int_val = (uint32)sd_clock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_CLOCK): + sd_clock = int_val; + break; + + case IOV_GVAL(IOV_SDMODE): + int_val = (uint32)sd_sdmode; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDMODE): + sd_sdmode = int_val; + break; + + case IOV_GVAL(IOV_HISPEED): + int_val = (uint32)sd_hiok; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_HISPEED): + sd_hiok = int_val; + + if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) { + sd_err(("%s: Failed changing highspeed mode to %d.\n", + __FUNCTION__, sd_hiok)); + bcmerror = BCME_ERROR; + return ERROR; + } + break; + + case IOV_GVAL(IOV_NUMINTS): + int_val = (int32)si->intrcount; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_NUMLOCALINTS): + int_val = (int32)si->local_intrcount; + bcopy(&int_val, arg, val_size); + break; + case IOV_GVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data; + + if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + + int_val = (int)data; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_DEVREG): + { + sdreg_t *sd_ptr = (sdreg_t *)params; + uint8 data = (uint8)sd_ptr->value; + + if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + } + + + case IOV_GVAL(IOV_SPIERRSTATS): + { + bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t)); + break; + } + + case IOV_SVAL(IOV_SPIERRSTATS): + { + bzero(&si->spierrstats, sizeof(struct spierrstats_t)); + break; + } + + case IOV_GVAL(IOV_RESP_DELAY_ALL): + int_val = (int32)si->resp_delay_all; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RESP_DELAY_ALL): + si->resp_delay_all = (bool)int_val; + int_val = STATUS_ENABLE|INTR_WITH_STATUS; + if (si->resp_delay_all) + int_val |= RESP_DELAY_ALL; + else { + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1, + F1_RESPONSE_DELAY) != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + } + + if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val) + != SUCCESS) { + sd_err(("%s: Unable to set response delay.\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + break; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } +exit: + + return bcmerror; +} + +extern SDIOH_API_RC +sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + SDIOH_API_RC status; + /* No lock needed since sdioh_request_byte does locking */ + status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) +{ + /* No lock needed since sdioh_request_byte does locking */ + SDIOH_API_RC status; + + if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) { + uint8 dummy_data; + status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data); + if (status) { + sd_err(("sdioh_cfg_read() failed.\n")); + return status; + } + } + + status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); + return status; +} + +extern SDIOH_API_RC +sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) +{ + uint32 count; + int offset; + uint32 cis_byte; + uint16 *cis = (uint16 *)cisd; + uint bar0 = SI_ENUM_BASE; + int status; + uint8 data; + + sd_trace(("%s: Func %d\n", __FUNCTION__, func)); + + spi_lock(sd); + + /* Set sb window address to 0x18000000 */ + data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data); + if (status == SUCCESS) { + data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + if (status == SUCCESS) { + data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK; + status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data); + } else { + sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + offset = CC_SROM_OTP; /* OTP offset in chipcommon. */ + for (count = 0; count < length/2; count++) { + if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) { + sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); + spi_unlock(sd); + return (BCME_ERROR); + } + + *cis = (uint16)cis_byte; + cis++; + offset += 2; + } + + spi_unlock(sd); + + return (BCME_OK); +} + +extern SDIOH_API_RC +sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + spi_lock(sd); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + if (rw == SDIOH_READ) { + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr)); + } else { + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + } + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) { + spi_unlock(sd); + return status; + } + + if (rw == SDIOH_READ) { + *byte = (uint8)data; + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte)); + } + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +extern SDIOH_API_RC +sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, + uint32 *word, uint nbytes) +{ + int status; + + spi_lock(sd); + + if (rw == SDIOH_READ) + status = bcmspi_card_regread(sd, func, addr, nbytes, word); + else + status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word); + + spi_unlock(sd); + return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); +} + +extern SDIOH_API_RC +sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, + uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) +{ + int len; + int buflen = (int)buflen_u; + bool fifo = (fix_inc == SDIOH_DATA_FIX); + + spi_lock(sd); + + ASSERT(reg_width == 4); + ASSERT(buflen_u < (1 << 30)); + ASSERT(sd->client_block_size[func]); + + sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", + __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', + buflen_u, sd->r_cnt, sd->t_cnt, pkt)); + + /* Break buffer down into blocksize chunks. */ + while (buflen > 0) { + len = MIN(sd->client_block_size[func], buflen); + if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { + sd_err(("%s: bcmspi_card_buf %s failed\n", + __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write")); + spi_unlock(sd); + return SDIOH_API_RC_FAIL; + } + buffer += len; + buflen -= len; + if (!fifo) + addr += len; + } + spi_unlock(sd); + return SDIOH_API_RC_SUCCESS; +} + +/* This function allows write to gspi bus when another rd/wr function is deep down the call stack. + * Its main aim is to have simpler spi writes rather than recursive writes. + * e.g. When there is a need to program response delay on the fly after detecting the SPI-func + * this call will allow to program the response delay. + */ +static int +bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte) +{ + uint32 cmd_arg; + uint32 datalen = 1; + uint32 hostlen; + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen); + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + } else { + sd_err(("%s: Host is %d bit spid, could not create SPI command.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (datalen != 0) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte); + } + } + + /* +4 for cmd, +4 for dstatus */ + hostlen = datalen + 8; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +/* Program the response delay corresponding to the spi function */ +static int +bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay) +{ + if (sd->resp_delay_all == FALSE) + return (BCME_OK); + + if (sd->prev_fun == func) + return (BCME_OK); + + if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY) + return (BCME_OK); + + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay); + + /* Remember function for which to avoid reprogramming resp-delay in next iteration */ + sd->prev_fun = func; + + return (BCME_OK); + +} + +#define GSPI_RESYNC_PATTERN 0x0 + +/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI. + * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is + * synchronised and all queued resuests are cancelled. + */ +static int +bcmspi_resync_f1(sdioh_info_t *sd) +{ + uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0; + + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + ASSERT(sd->wordlen == 4 || sd->wordlen == 2); + datalen = ROUNDUP(datalen, sd->wordlen); + + /* Start by copying command in the spi-outbuffer */ + *(uint32 *)spi_outbuf2 = cmd_arg; + + /* for Write, put the data into the output buffer */ + *(uint32 *)&spi_outbuf2[CMDLEN] = data; + + /* +4 for cmd, +4 for dstatus */ + spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8); + + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]); + } else { + sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n", + __FUNCTION__, 8 * sd->wordlen)); + return ERROR; + } + + if (sd->card_dstatus) + sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus)); + + return (BCME_OK); +} + +uint32 dstatus_count = 0; + +static int +bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg) +{ + uint32 dstatus = sd->card_dstatus; + struct spierrstats_t *spierrstats = &sd->spierrstats; + int err = SUCCESS; + + sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus)); + + /* Store dstatus of last few gSPI transactions */ + spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus; + spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg; + dstatus_count++; + + if (sd->card_init_done == FALSE) + return err; + + if (dstatus & STATUS_DATA_NOT_AVAILABLE) { + spierrstats->dna++; + sd_trace(("Read data not available on F1 addr = 0x%x\n", + GFIELD(cmd_arg, SPI_REG_ADDR))); + /* Clear dna bit */ + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE); + } + + if (dstatus & STATUS_UNDERFLOW) { + spierrstats->rdunderflow++; + sd_err(("FIFO underflow happened due to current F2 read command.\n")); + } + + if (dstatus & STATUS_OVERFLOW) { + spierrstats->wroverflow++; + sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n")); + bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW); + bcmspi_resync_f1(sd); + sd_err(("Recovering from F1 FIFO overflow.\n")); + } + + if (dstatus & STATUS_F2_INTR) { + spierrstats->f2interrupt++; + sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_F3_INTR) { + spierrstats->f3interrupt++; + sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n")); + } + + if (dstatus & STATUS_HOST_CMD_DATA_ERR) { + spierrstats->hostcmddataerr++; + sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n")); + } + + if (dstatus & STATUS_F2_PKT_AVAILABLE) { + spierrstats->f2pktavailable++; + sd_trace(("Packet is available/ready in F2 TX FIFO\n")); + sd_trace(("Packet length = %d\n", sd->dwordmode ? + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) : + ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT))); + } + + if (dstatus & STATUS_F3_PKT_AVAILABLE) { + spierrstats->f3pktavailable++; + sd_err(("Packet is available/ready in F3 TX FIFO\n")); + sd_err(("Packet length = %d\n", + (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT)); + } + + return err; +} + +extern int +sdioh_abort(sdioh_info_t *sd, uint func) +{ + return 0; +} + +int +sdioh_start(sdioh_info_t *sd, int stage) +{ + return SUCCESS; +} + +int +sdioh_stop(sdioh_info_t *sd) +{ + return SUCCESS; +} + +int +sdioh_waitlockfree(sdioh_info_t *sd) +{ + return SUCCESS; +} + + +/* + * Private/Static work routines + */ +static int +bcmspi_host_init(sdioh_info_t *sd) +{ + + /* Default power on mode */ + sd->sd_mode = SDIOH_MODE_SPI; + sd->polled_mode = TRUE; + sd->host_init_done = TRUE; + sd->card_init_done = FALSE; + sd->adapter_slot = 1; + + return (SUCCESS); +} + +static int +get_client_blocksize(sdioh_info_t *sd) +{ + uint32 regdata[2]; + int status; + + /* Find F1/F2/F3 max packet size */ + if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG, + 8, regdata)) != SUCCESS) { + return status; + } + + sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n", + regdata[0], regdata[1])); + + sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2; + sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1])); + ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1); + + sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2; + sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2])); + ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2); + + sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2; + sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3])); + ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3); + + return 0; +} + +static int +bcmspi_client_init(sdioh_info_t *sd) +{ + uint32 status_en_reg = 0; + sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); + +#ifdef HSMODE + if (!spi_start_clock(sd, (uint16)sd_divisor)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#else + /* Start at ~400KHz clock rate for initialization */ + if (!spi_start_clock(sd, 128)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + if (!bcmspi_host_device_init_adapt(sd)) { + sd_err(("bcmspi_host_device_init_adapt failed\n")); + return ERROR; + } + + if (!bcmspi_test_card(sd)) { + sd_err(("bcmspi_test_card failed\n")); + return ERROR; + } + + sd->num_funcs = SPI_MAX_IOFUNCS; + + get_client_blocksize(sd); + + /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */ + bcmspi_resync_f1(sd); + + sd->dwordmode = FALSE; + + bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg); + + sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__)); + status_en_reg |= INTR_WITH_STATUS; + + if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, + status_en_reg & 0xff) != SUCCESS) { + sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__)); + return ERROR; + } + +#ifndef HSMODE + /* After configuring for High-Speed mode, set the desired clock rate. */ + if (!spi_start_clock(sd, 4)) { + sd_err(("spi_start_clock failed\n")); + return ERROR; + } +#endif /* HSMODE */ + + /* check to see if the response delay needs to be programmed properly */ + { + uint32 f1_respdelay = 0; + bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay); + if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) { + /* older sdiodevice core and has no separte resp delay for each of */ + sd_err(("older corerev < 4 so use the same resp delay for all funcs\n")); + sd->resp_delay_new = FALSE; + } + else { + /* older sdiodevice core and has no separte resp delay for each of */ + int ret_val; + sd->resp_delay_new = TRUE; + sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n")); + sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n", + GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY, + GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY)); + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1, + GSPI_F0_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1, + GSPI_F1_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1, + GSPI_F2_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1, + GSPI_F3_RESP_DELAY); + if (ret_val != SUCCESS) { + sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__)); + return ERROR; + } + } + } + + + sd->card_init_done = TRUE; + + /* get the device rev to program the prop respdelays */ + + return SUCCESS; +} + +static int +bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG, + 4, ®data)) != SUCCESS) + return status; + + sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata)); + + + if (hsmode == TRUE) { + sd_trace(("Attempting to enable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + sd_trace(("Device is already in High-Speed mode.\n")); + return status; + } else { + regdata |= HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) { + return status; + } + } + } else { + sd_trace(("Attempting to disable High-Speed mode.\n")); + + if (regdata & HIGH_SPEED_MODE) { + regdata &= ~HIGH_SPEED_MODE; + sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG)); + if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG, + 4, regdata)) != SUCCESS) + return status; + } + else { + sd_trace(("Device is already in Low-Speed mode.\n")); + return status; + } + } + spi_controller_highspeed_mode(sd, hsmode); + + return TRUE; +} + +#define bcmspi_find_curr_mode(sd) { \ + sd->wordlen = 2; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd->wordlen = 4; \ + status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \ + regdata &= 0xff; \ + if ((regdata == 0xad) || (regdata == 0x5b) || \ + (regdata == 0x5d) || (regdata == 0x5a)) \ + break; \ + sd_trace(("Silicon testability issue: regdata = 0x%x." \ + " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \ + OSL_DELAY(100000); \ +} + +#define INIT_ADAPT_LOOP 100 + +/* Adapt clock-phase-speed-bitwidth between host and device */ +static bool +bcmspi_host_device_init_adapt(sdioh_info_t *sd) +{ + uint32 wrregdata, regdata = 0; + int status; + int i; + + /* Due to a silicon testability issue, the first command from the Host + * to the device will get corrupted (first bit will be lost). So the + * Host should poll the device with a safe read request. ie: The Host + * should try to read F0 addr 0x14 using the Fixed address mode + * (This will prevent a unintended write command to be detected by device) + */ + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + /* If device was not power-cycled it will stay in 32bit mode with + * response-delay-all bit set. Alternate the iteration so that + * read either with or without response-delay for F0 to succeed. + */ + bcmspi_find_curr_mode(sd); + sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = TRUE; + + bcmspi_find_curr_mode(sd); + sd->dwordmode = FALSE; + } + + /* Bail out, device not detected */ + if (i == INIT_ADAPT_LOOP) + return FALSE; + + /* Softreset the spid logic */ + if ((sd->dwordmode) || (sd->wordlen == 4)) { + bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI); + bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data); + sd_trace(("reset reg read = 0x%x\n", regdata)); + sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode, + sd->wordlen, sd->resp_delay_all)); + /* Restore default state after softreset */ + sd->wordlen = 2; + sd->dwordmode = FALSE; + } + + if (sd->wordlen == 4) { + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != + SUCCESS) + return FALSE; + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n", + regdata)); + sd_trace(("Spid power was left on.\n")); + } else { + sd_err(("Spid power was left on but signature read failed." + " Value read = 0x%x\n", regdata)); + return FALSE; + } + } else { + sd->wordlen = 2; + +#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */ + + wrregdata = (CTRL_REG_DEFAULT); + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata)); + +#ifndef HSMODE + wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY); + wrregdata &= ~HIGH_SPEED_MODE; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); +#endif /* HSMODE */ + + for (i = 0; i < INIT_ADAPT_LOOP; i++) { + if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) { + sd_trace(("0xfeedbead was leftshifted by 1-bit.\n")); + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, + ®data)) != SUCCESS) + return FALSE; + } + OSL_DELAY(1000); + } + +#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH) + /* Change to host controller intr-polarity of active-high */ + wrregdata |= INTR_POLARITY; +#else + /* Change to host controller intr-polarity of active-low */ + wrregdata &= ~INTR_POLARITY; +#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */ + + sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n", + wrregdata)); + /* Change to 32bit mode */ + wrregdata |= WORD_LENGTH_32; + bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata); + + /* Change command/data packaging in 32bit LE mode */ + sd->wordlen = 4; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == TEST_RO_DATA_32BIT_LE) { + sd_trace(("Read spid passed. Value read = 0x%x\n", regdata)); + sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n")); + } else { + sd_err(("Stale spid reg values read as it was kept powered. Value read =" + "0x%x\n", regdata)); + return FALSE; + } + } + + + return TRUE; +} + +static bool +bcmspi_test_card(sdioh_info_t *sd) +{ + uint32 regdata; + int status; + + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS) + return FALSE; + + if (regdata == (TEST_RO_DATA_32BIT_LE)) + sd_trace(("32bit LE regdata = 0x%x\n", regdata)); + else { + sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata)); + return FALSE; + } + + +#define RW_PATTERN1 0xA0A1A2A3 +#define RW_PATTERN2 0x4B5B6B7B + + regdata = RW_PATTERN1; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN1) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN1, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + regdata = RW_PATTERN2; + if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS) + return FALSE; + regdata = 0; + if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS) + return FALSE; + if (regdata != RW_PATTERN2) { + sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n", + RW_PATTERN2, regdata)); + return FALSE; + } else + sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata)); + + return TRUE; +} + +static int +bcmspi_driver_init(sdioh_info_t *sd) +{ + sd_trace(("%s\n", __FUNCTION__)); + if ((bcmspi_host_init(sd)) != SUCCESS) { + return ERROR; + } + + if (bcmspi_client_init(sd) != SUCCESS) { + return ERROR; + } + + return SUCCESS; +} + +/* Read device reg */ +static int +bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +static int +bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) +{ + + int status; + uint32 cmd_arg; + uint32 dstatus; + + ASSERT(regsize); + + if (func == 2) + sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n")); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize); + + sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS) + return status; + + sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data)); + + bcmspi_cmd_getdstatus(sd, &dstatus); + sd_trace(("dstatus =0x%x\n", dstatus)); + return SUCCESS; +} + +/* write a device register */ +static int +bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) +{ + int status; + uint32 cmd_arg, dstatus; + + ASSERT(regsize); + + cmd_arg = 0; + + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, regsize, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus=0x%x\n", dstatus)); + + return SUCCESS; +} + +/* write a device register - 1 byte */ +static int +bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte) +{ + int status; + uint32 cmd_arg; + uint32 dstatus; + uint32 data = (uint32)(*byte); + + cmd_arg = 0; + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */ + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1); + + sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n", + __FUNCTION__, cmd_arg, func, regaddr, data)); + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) + return status; + + bcmspi_cmd_getdstatus(sd, &dstatus); + if (dstatus) + sd_trace(("dstatus =0x%x\n", dstatus)); + + return SUCCESS; +} + +void +bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer) +{ + *dstatus_buffer = sd->card_dstatus; +} + +/* 'data' is of type uint32 whereas other buffers are of type uint8 */ +static int +bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg, + uint32 *data, uint32 datalen) +{ + uint32 i, j; + uint8 resp_delay = 0; + int err = SUCCESS; + uint32 hostlen; + uint32 spilen = 0; + uint32 dstatus_idx = 0; + uint16 templen, buslen, len, *ptr = NULL; + + sd_trace(("spi cmd = 0x%x\n", cmd_arg)); + + if (DWORDMODE_ON) { + spilen = GFIELD(cmd_arg, SPI_LEN); + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) || + (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1)) + dstatus_idx = spilen * 3; + + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + spilen = spilen << 2; + dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0; + /* convert len to mod16 size */ + spilen = ROUNDUP(spilen, 16); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } + } + + /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen + * according to the wordlen mode(16/32bit) the device is in. + */ + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg); + if (datalen & 0x3) + datalen += (4 - (datalen & 0x3)); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg); + if (datalen & 0x1) + datalen++; + if (datalen < 4) + datalen = ROUNDUP(datalen, 4); + } else { + sd_err(("Host is %d bit spid, could not create SPI command.\n", + 8 * sd->wordlen)); + return ERROR; + } + + /* for Write, put the data into the output buffer */ + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) { + /* We send len field of hw-header always a mod16 size, both from host and dongle */ + if (DWORDMODE_ON) { + if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + /* ASSERT(*ptr == ~*(ptr + 1)); */ + templen = ROUNDUP(templen, 16); + *ptr = templen; + sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1)))); + } + } + + if (datalen != 0) { + for (i = 0; i < datalen/4; i++) { + if (sd->wordlen == 4) { /* 32bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD4(data[i]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] = + SPISWAP_WD2(data[i]); + } + } + } + } + + /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */ + if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) { + int func = GFIELD(cmd_arg, SPI_FUNCTION); + switch (func) { + case 0: + if (sd->resp_delay_new) + resp_delay = GSPI_F0_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0; + break; + case 1: + if (sd->resp_delay_new) + resp_delay = GSPI_F1_RESP_DELAY; + else + resp_delay = F1_RESPONSE_DELAY; + break; + case 2: + if (sd->resp_delay_new) + resp_delay = GSPI_F2_RESP_DELAY; + else + resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0; + break; + default: + ASSERT(0); + break; + } + /* Program response delay */ + if (sd->resp_delay_new == FALSE) + bcmspi_prog_resp_delay(sd, func, resp_delay); + } + + /* +4 for cmd and +4 for dstatus */ + hostlen = datalen + 8 + resp_delay; + hostlen += dstatus_idx; + hostlen += (4 - (hostlen & 0x3)); + spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen); + + /* for Read, get the data into the input buffer */ + if (datalen != 0) { + if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */ + for (j = 0; j < datalen/4; j++) { + if (sd->wordlen == 4) { /* 32bit spid */ + data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 + + CMDLEN + resp_delay]); + } + } + + if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + ptr = (uint16 *)&data[0]; + templen = *ptr; + buslen = len = ~(*(ptr + 1)); + buslen = ROUNDUP(buslen, 16); + /* populate actual len in hw-header */ + if (templen == buslen) + *ptr = len; + } + } + } + + /* Restore back the len field of the hw header */ + if (DWORDMODE_ON) { + if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) && + (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) { + ptr = (uint16 *)&data[0]; + *ptr = (uint16)(~*(ptr+1)); + } + } + + dstatus_idx += (datalen + CMDLEN + resp_delay); + /* Last 4bytes are dstatus. Device is configured to return status bits. */ + if (sd->wordlen == 4) { /* 32bit spid */ + sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else if (sd->wordlen == 2) { /* 16bit spid */ + sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]); + } else { + sd_err(("Host is %d bit machine, could not read SPI dstatus.\n", + 8 * sd->wordlen)); + return ERROR; + } + if (sd->card_dstatus == 0xffffffff) { + sd_err(("looks like not a GSPI device or device is not powered.\n")); + } + + err = bcmspi_update_stats(sd, cmd_arg); + + return err; + +} + +static int +bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, + uint32 addr, int nbytes, uint32 *data) +{ + int status; + uint32 cmd_arg; + bool write = rw == SDIOH_READ ? 0 : 1; + uint retries = 0; + + bool enable; + uint32 spilen; + + cmd_arg = 0; + + ASSERT(nbytes); + ASSERT(nbytes <= sd->client_block_size[func]); + + if (write) sd->t_cnt++; else sd->r_cnt++; + + if (func == 2) { + /* Frame len check limited by gSPI. */ + if ((nbytes > 2000) && write) { + sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes)); + } + /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */ + /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */ + if (write) { + uint32 dstatus; + /* check F2 ready with cached one */ + bcmspi_cmd_getdstatus(sd, &dstatus); + if ((dstatus & STATUS_F2_RX_READY) == 0) { + retries = WAIT_F2RXFIFORDY; + enable = 0; + while (retries-- && !enable) { + OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000); + bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4, + &dstatus); + if (dstatus & STATUS_F2_RX_READY) + enable = TRUE; + } + if (!enable) { + struct spierrstats_t *spierrstats = &sd->spierrstats; + spierrstats->f2rxnotready++; + sd_err(("F2 FIFO is not ready to receive data.\n")); + return ERROR; + } + sd_trace(("No of retries on F2 ready %d\n", + (WAIT_F2RXFIFORDY - retries))); + } + } + } + + /* F2 transfers happen on 0 addr */ + addr = (func == 2) ? 0 : addr; + + /* In pio mode buffer is read using fixed address fifo in func 1 */ + if ((func == 1) && (fifo)) + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); + else + cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); + + cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func); + cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr); + cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write); + spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes); + if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) { + /* convert len to mod4 size */ + spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0); + cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2)); + } else + cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen); + + if ((func == 2) && (fifo == 1)) { + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wr" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + } + + sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg)); + sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", + __FUNCTION__, write ? "Wd" : "Rd", func, "INCR", + addr, nbytes, sd->r_cnt, sd->t_cnt)); + + + if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) { + sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, + (write ? "write" : "read"))); + return status; + } + + /* gSPI expects that hw-header-len is equal to spi-command-len */ + if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) { + ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff)); + ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16))); + } + + if ((nbytes > 2000) && !write) { + sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes)); + } + + return SUCCESS; +} + +/* Reset and re-initialize the device */ +int +sdioh_sdio_reset(sdioh_info_t *si) +{ + si->card_init_done = FALSE; + return bcmspi_client_init(si); +} + +SDIOH_API_RC +sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio) +{ + return SDIOH_API_RC_FAIL; +} + +SDIOH_API_RC +sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab) +{ + return SDIOH_API_RC_FAIL; +} + +bool +sdioh_gpioin(sdioh_info_t *sd, uint32 gpio) +{ + return FALSE; +} + +SDIOH_API_RC +sdioh_gpio_init(sdioh_info_t *sd) +{ + return SDIOH_API_RC_FAIL; +} diff --git a/drivers/net/wireless/bcmdhd/bcmutils.c b/drivers/net/wireless/bcmdhd/bcmutils.c new file mode 100644 index 000000000000..a4f1f25b0a20 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmutils.c @@ -0,0 +1,3545 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.c 591286 2015-10-07 11:59:26Z $ + */ + +#include +#include +#include +#include +#ifdef BCMDRIVER + +#include +#include + +#else /* !BCMDRIVER */ + +#include +#include +#include + +#if defined(BCMEXTSUP) +#include +#endif + +#ifndef ASSERT +#define ASSERT(exp) +#endif + +#endif /* !BCMDRIVER */ + +#include +#include +#include +#include +#include +#include +#include + + +void *_bcmutils_dummy_fn = NULL; + + + + +#ifdef BCMDRIVER + + + +/* copy a pkt buffer chain into a buffer */ +uint +pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + if (len < 0) + len = 4096; /* "infinite" */ + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(PKTDATA(osh, p) + offset, buf, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + +/* copy a buffer into a pkt buffer chain */ +uint +pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) +{ + uint n, ret = 0; + + + /* skip 'offset' bytes */ + for (; p && offset; p = PKTNEXT(osh, p)) { + if (offset < (uint)PKTLEN(osh, p)) + break; + offset -= PKTLEN(osh, p); + } + + if (!p) + return 0; + + /* copy the data */ + for (; p && len; p = PKTNEXT(osh, p)) { + n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); + bcopy(buf, PKTDATA(osh, p) + offset, n); + buf += n; + len -= n; + ret += n; + offset = 0; + } + + return ret; +} + + + +/* return total length of buffer chain */ +uint BCMFASTPATH +pkttotlen(osl_t *osh, void *p) +{ + uint total; + int len; + + total = 0; + for (; p; p = PKTNEXT(osh, p)) { + len = PKTLEN(osh, p); + total += len; +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB()) { + if (PKTISFRAG(osh, p)) { + total += PKTFRAGTOTLEN(osh, p); + } + } +#endif + } + + return (total); +} + +/* return the last buffer of chained pkt */ +void * +pktlast(osl_t *osh, void *p) +{ + for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) + ; + + return (p); +} + +/* count segments of a chained packet */ +uint BCMFASTPATH +pktsegcnt(osl_t *osh, void *p) +{ + uint cnt; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) { + cnt++; +#ifdef BCMLFRAG + if (BCMLFRAG_ENAB()) { + if (PKTISFRAG(osh, p)) { + cnt += PKTFRAGTOTNUM(osh, p); + } + } +#endif + } + + return cnt; +} + + +/* count segments of a chained packet */ +uint BCMFASTPATH +pktsegcnt_war(osl_t *osh, void *p) +{ + uint cnt; + uint8 *pktdata; + uint len, remain, align64; + + for (cnt = 0; p; p = PKTNEXT(osh, p)) { + cnt++; + len = PKTLEN(osh, p); + if (len > 128) { + pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */ + /* Check for page boundary straddle (2048B) */ + if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff)) + cnt++; + + align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */ + align64 = (64 - align64) & 0x3f; + len -= align64; /* bytes from aligned 64B to end */ + /* if aligned to 128B, check for MOD 128 between 1 to 4B */ + remain = len % 128; + if (remain > 0 && remain <= 4) + cnt++; /* add extra seg */ + } + } + + return cnt; +} + +uint8 * BCMFASTPATH +pktdataoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint pkt_off = 0, len = 0; + uint8 *pdata = (uint8 *) PKTDATA(osh, p); + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + pdata = (uint8 *) PKTDATA(osh, p); + pkt_off = offset - len; + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return (uint8*) (pdata+pkt_off); +} + + +/* given a offset in pdata, find the pkt seg hdr */ +void * +pktoffset(osl_t *osh, void *p, uint offset) +{ + uint total = pkttotlen(osh, p); + uint len = 0; + + if (offset > total) + return NULL; + + for (; p; p = PKTNEXT(osh, p)) { + len += PKTLEN(osh, p); + if (len > offset) + break; + } + return p; +} + +#endif /* BCMDRIVER */ + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +const unsigned char bcm_ctype[] = { + + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ + _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, + _BCM_C, /* 8-15 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ + _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ + _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ + _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ + _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ + _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ + _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, + _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ + _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ + _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, + _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ + _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ + _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, + _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, + _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, + _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ +}; + +ulong +bcm_strtoul(const char *cp, char **endp, uint base) +{ + ulong result, last_result = 0, value; + bool minus; + + minus = FALSE; + + while (bcm_isspace(*cp)) + cp++; + + if (cp[0] == '+') + cp++; + else if (cp[0] == '-') { + minus = TRUE; + cp++; + } + + if (base == 0) { + if (cp[0] == '0') { + if ((cp[1] == 'x') || (cp[1] == 'X')) { + base = 16; + cp = &cp[2]; + } else { + base = 8; + cp = &cp[1]; + } + } else + base = 10; + } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { + cp = &cp[2]; + } + + result = 0; + + while (bcm_isxdigit(*cp) && + (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { + result = result*base + value; + /* Detected overflow */ + if (result < last_result && !minus) + return (ulong)-1; + last_result = result; + cp++; + } + + if (minus) + result = (ulong)(-(long)result); + + if (endp) + *endp = DISCARD_QUAL(cp, char); + + return (result); +} + +int +bcm_atoi(const char *s) +{ + return (int)bcm_strtoul(s, NULL, 10); +} + +/* return pointer to location of substring 'needle' in 'haystack' */ +char * +bcmstrstr(const char *haystack, const char *needle) +{ + int len, nlen; + int i; + + if ((haystack == NULL) || (needle == NULL)) + return DISCARD_QUAL(haystack, char); + + nlen = (int)strlen(needle); + len = (int)strlen(haystack) - nlen + 1; + + for (i = 0; i < len; i++) + if (memcmp(needle, &haystack[i], nlen) == 0) + return DISCARD_QUAL(&haystack[i], char); + return (NULL); +} + +char * +bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len) +{ + for (; s_len >= substr_len; s++, s_len--) + if (strncmp(s, substr, substr_len) == 0) + return DISCARD_QUAL(s, char); + + return NULL; +} + +char * +bcmstrcat(char *dest, const char *src) +{ + char *p; + + p = dest + strlen(dest); + + while ((*p++ = *src++) != '\0') + ; + + return (dest); +} + +char * +bcmstrncat(char *dest, const char *src, uint size) +{ + char *endp; + char *p; + + p = dest + strlen(dest); + endp = p + size; + + while (p != endp && (*p++ = *src++) != '\0') + ; + + return (dest); +} + + +/**************************************************************************** +* Function: bcmstrtok +* +* Purpose: +* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), +* but allows strToken() to be used by different strings or callers at the same +* time. Each call modifies '*string' by substituting a NULL character for the +* first delimiter that is encountered, and updates 'string' to point to the char +* after the delimiter. Leading delimiters are skipped. +* +* Parameters: +* string (mod) Ptr to string ptr, updated by token. +* delimiters (in) Set of delimiter characters. +* tokdelim (out) Character that delimits the returned token. (May +* be set to NULL if token delimiter is not required). +* +* Returns: Pointer to the next token found. NULL when no more tokens are found. +***************************************************************************** +*/ +char * +bcmstrtok(char **string, const char *delimiters, char *tokdelim) +{ + unsigned char *str; + unsigned long map[8]; + int count; + char *nextoken; + + if (tokdelim != NULL) { + /* Prime the token delimiter */ + *tokdelim = '\0'; + } + + /* Clear control map */ + for (count = 0; count < 8; count++) { + map[count] = 0; + } + + /* Set bits in delimiter table */ + do { + map[*delimiters >> 5] |= (1 << (*delimiters & 31)); + } + while (*delimiters++); + + str = (unsigned char*)*string; + + /* Find beginning of token (skip over leading delimiters). Note that + * there is no token iff this loop sets str to point to the terminal + * null (*str == '\0') + */ + while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { + str++; + } + + nextoken = (char*)str; + + /* Find the end of the token. If it is not the end of the string, + * put a null there. + */ + for (; *str; str++) { + if (map[*str >> 5] & (1 << (*str & 31))) { + if (tokdelim != NULL) { + *tokdelim = *str; + } + + *str++ = '\0'; + break; + } + } + + *string = (char*)str; + + /* Determine if a token has been found. */ + if (nextoken == (char *) str) { + return NULL; + } + else { + return nextoken; + } +} + + +#define xToLower(C) \ + ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) + + +/**************************************************************************** +* Function: bcmstricmp +* +* Purpose: Compare to strings case insensitively. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstricmp(const char *s1, const char *s2) +{ + char dc, sc; + + while (*s2 && *s1) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + } + + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + + +/**************************************************************************** +* Function: bcmstrnicmp +* +* Purpose: Compare to strings case insensitively, upto a max of 'cnt' +* characters. +* +* Parameters: s1 (in) First string to compare. +* s2 (in) Second string to compare. +* cnt (in) Max characters to compare. +* +* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if +* t1 > t2, when ignoring case sensitivity. +***************************************************************************** +*/ +int +bcmstrnicmp(const char* s1, const char* s2, int cnt) +{ + char dc, sc; + + while (*s2 && *s1 && cnt) { + dc = xToLower(*s1); + sc = xToLower(*s2); + if (dc < sc) return -1; + if (dc > sc) return 1; + s1++; + s2++; + cnt--; + } + + if (!cnt) return 0; + if (*s1 && !*s2) return 1; + if (!*s1 && *s2) return -1; + return 0; +} + +/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ +int +bcm_ether_atoe(const char *p, struct ether_addr *ea) +{ + int i = 0; + char *ep; + + for (;;) { + ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); + p = ep; + if (!*p++ || i == 6) + break; + } + + return (i == 6); +} + +int +bcm_atoipv4(const char *p, struct ipv4_addr *ip) +{ + + int i = 0; + char *c; + for (;;) { + ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0); + if (*c++ != '.' || i == IPV4_ADDR_LEN) + break; + p = c; + } + return (i == IPV4_ADDR_LEN); +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + + +#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) +/* registry routine buffer preparation utility functions: + * parameter order is like strncpy, but returns count + * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) + */ +ulong +wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) +{ + ulong copyct = 1; + ushort i; + + if (abuflen == 0) + return 0; + + /* wbuflen is in bytes */ + wbuflen /= sizeof(ushort); + + for (i = 0; i < wbuflen; ++i) { + if (--abuflen == 0) + break; + *abuf++ = (char) *wbuf++; + ++copyct; + } + *abuf = '\0'; + + return copyct; +} +#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ + +char * +bcm_ether_ntoa(const struct ether_addr *ea, char *buf) +{ + static const char hex[] = + { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' + }; + const uint8 *octet = ea->octet; + char *p = buf; + int i; + + for (i = 0; i < 6; i++, octet++) { + *p++ = hex[(*octet >> 4) & 0xf]; + *p++ = hex[*octet & 0xf]; + *p++ = ':'; + } + + *(p-1) = '\0'; + + return (buf); +} + +char * +bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) +{ + snprintf(buf, 16, "%d.%d.%d.%d", + ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); + return (buf); +} + +char * +bcm_ipv6_ntoa(void *ipv6, char *buf) +{ + /* Implementing RFC 5952 Sections 4 + 5 */ + /* Not thoroughly tested */ + uint16 tmp[8]; + uint16 *a = &tmp[0]; + char *p = buf; + int i, i_max = -1, cnt = 0, cnt_max = 1; + uint8 *a4 = NULL; + memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if (a[i]) { + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + cnt = 0; + } else + cnt++; + } + if (cnt > cnt_max) { + cnt_max = cnt; + i_max = i - cnt; + } + if (i_max == 0 && + /* IPv4-translated: ::ffff:0:a.b.c.d */ + ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) || + /* IPv4-mapped: ::ffff:a.b.c.d */ + (cnt_max == 5 && a[5] == 0xffff))) + a4 = (uint8*) (a + 6); + + for (i = 0; i < IPV6_ADDR_LEN/2; i++) { + if ((uint8*) (a + i) == a4) { + snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]); + break; + } else if (i == i_max) { + *p++ = ':'; + i += cnt_max - 1; + p[0] = ':'; + p[1] = '\0'; + } else { + if (i) + *p++ = ':'; + p += snprintf(p, 8, "%x", ntoh16(a[i])); + } + } + + return buf; +} +#ifdef BCMDRIVER + +void +bcm_mdelay(uint ms) +{ + uint i; + + for (i = 0; i < ms; i++) { + OSL_DELAY(1000); + } +} + + + + + +#if defined(DHD_DEBUG) +/* pretty hex print a pkt buffer chain */ +void +prpkt(const char *msg, osl_t *osh, void *p0) +{ + void *p; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + for (p = p0; p; p = PKTNEXT(osh, p)) + prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); +} +#endif + +/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. + * Also updates the inplace vlan tag if requested. + * For debugging, it returns an indication of what it did. + */ +uint BCMFASTPATH +pktsetprio(void *pkt, bool update_vtag) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *pktdata; + int priority = 0; + int rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { + uint16 vlan_tag; + int vlan_prio, dscp_prio = 0; + + evh = (struct ethervlan_header *)eh; + + vlan_tag = ntoh16(evh->vlan_tag); + vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; + + if ((evh->ether_type == hton16(ETHER_TYPE_IP)) || + (evh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); + uint8 tos_tc = IP_TOS46(ip_body); + dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + } + + /* DSCP priority gets precedence over 802.1P (vlan tag) */ + if (dscp_prio != 0) { + priority = dscp_prio; + rc |= PKTPRIO_VDSCP; + } else { + priority = vlan_prio; + rc |= PKTPRIO_VLAN; + } + /* + * If the DSCP priority is not the same as the VLAN priority, + * then overwrite the priority field in the vlan tag, with the + * DSCP priority value. This is required for Linux APs because + * the VLAN driver on Linux, overwrites the skb->priority field + * with the priority value in the vlan tag + */ + if (update_vtag && (priority != vlan_prio)) { + vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); + vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; + evh->vlan_tag = hton16(vlan_tag); + rc |= PKTPRIO_UPD; + } +#ifdef DHD_LOSSLESS_ROAMING + } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + priority = PRIO_8021D_NC; + rc = PKTPRIO_DSCP; +#endif /* DHD_LOSSLESS_ROAMING */ + } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) || + (eh->ether_type == hton16(ETHER_TYPE_IPV6))) { + uint8 *ip_body = pktdata + sizeof(struct ether_header); + uint8 tos_tc = IP_TOS46(ip_body); + uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT; + switch (dscp) { + case DSCP_EF: + priority = PRIO_8021D_VO; + break; + case DSCP_AF31: + case DSCP_AF32: + case DSCP_AF33: + priority = PRIO_8021D_CL; + break; + case DSCP_AF21: + case DSCP_AF22: + case DSCP_AF23: + case DSCP_AF11: + case DSCP_AF12: + case DSCP_AF13: + priority = PRIO_8021D_EE; + break; + default: + priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); + break; + } + + rc |= PKTPRIO_DSCP; + } + + ASSERT(priority >= 0 && priority <= MAXPRIO); + PKTSETPRIO(pkt, priority); + return (rc | priority); +} + +/* lookup user priority for specified DSCP */ +static uint8 +dscp2up(uint8 *up_table, uint8 dscp) +{ + uint8 user_priority = 255; + + /* lookup up from table if parameters valid */ + if (up_table != NULL && dscp < UP_TABLE_MAX) { + user_priority = up_table[dscp]; + } + + /* 255 is unused value so return up from dscp */ + if (user_priority == 255) { + user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT); + } + + return user_priority; +} + +/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */ +uint BCMFASTPATH +pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag) +{ + if (up_table) { + uint8 *pktdata; + uint pktlen; + uint8 dscp; + uint user_priority = 0; + uint rc = 0; + + pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt); + pktlen = PKTLEN(OSH_NULL, pkt); + + if (pktgetdscp(pktdata, pktlen, &dscp)) { + rc = PKTPRIO_DSCP; + user_priority = dscp2up(up_table, dscp); + PKTSETPRIO(pkt, user_priority); + } + + return (rc | user_priority); + } else { + return pktsetprio(pkt, update_vtag); + } +} + +/* Returns TRUE and DSCP if IP header found, FALSE otherwise. + */ +bool BCMFASTPATH +pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp) +{ + struct ether_header *eh; + struct ethervlan_header *evh; + uint8 *ip_body; + bool rc = FALSE; + + /* minimum length is ether header and IP header */ + if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN) + return FALSE; + + eh = (struct ether_header *) pktdata; + + if (eh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ether_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) { + evh = (struct ethervlan_header *)eh; + + /* minimum length is ethervlan header and IP header */ + if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN && + evh->ether_type == HTON16(ETHER_TYPE_IP)) { + ip_body = pktdata + sizeof(struct ethervlan_header); + *dscp = IP_DSCP46(ip_body); + rc = TRUE; + } + } + + return rc; +} + +/* The 0.5KB string table is not removed by compiler even though it's unused */ + +static char bcm_undeferrstr[32]; +static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE; + +/* Convert the error codes into related error strings */ +const char * +bcmerrorstr(int bcmerror) +{ + /* check if someone added a bcmerror code but forgot to add errorstring */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); + + if (bcmerror > 0 || bcmerror < BCME_LAST) { + snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); + return bcm_undeferrstr; + } + + ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); + + return bcmerrorstrtable[-bcmerror]; +} + + + +/* iovar table lookup */ +/* could mandate sorted tables and do a binary search */ +const bcm_iovar_t* +bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) +{ + const bcm_iovar_t *vi; + const char *lookup_name; + + /* skip any ':' delimited option prefixes */ + lookup_name = strrchr(name, ':'); + if (lookup_name != NULL) + lookup_name++; + else + lookup_name = name; + + ASSERT(table != NULL); + + for (vi = table; vi->name; vi++) { + if (!strcmp(vi->name, lookup_name)) + return vi; + } + /* ran to end of table */ + + return NULL; /* var name not found */ +} + +int +bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) +{ + int bcmerror = 0; + + /* length check on io buf */ + switch (vi->type) { + case IOVT_BOOL: + case IOVT_INT8: + case IOVT_INT16: + case IOVT_INT32: + case IOVT_UINT8: + case IOVT_UINT16: + case IOVT_UINT32: + /* all integers are int32 sized args at the ioctl interface */ + if (len < (int)sizeof(int)) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_BUFFER: + /* buffer must meet minimum length requirement */ + if (len < vi->minlen) { + bcmerror = BCME_BUFTOOSHORT; + } + break; + + case IOVT_VOID: + if (!set) { + /* Cannot return nil... */ + bcmerror = BCME_UNSUPPORTED; + } else if (len) { + /* Set is an action w/o parameters */ + bcmerror = BCME_BUFTOOLONG; + } + break; + + default: + /* unknown type for length check in iovar info */ + ASSERT(0); + bcmerror = BCME_UNSUPPORTED; + } + + return bcmerror; +} + +#endif /* BCMDRIVER */ + +#ifdef BCM_OBJECT_TRACE + +#define BCM_OBJECT_MERGE_SAME_OBJ 0 + +/* some place may add / remove the object to trace list for Linux: */ +/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */ +/* remove: osl_pktfree dev_kfree_skb netif_rx */ + +#define BCM_OBJDBG_COUNT (1024 * 100) +static spinlock_t dbgobj_lock; +#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock) +#define BCM_OBJDBG_LOCK_DESTROY() +#define BCM_OBJDBG_LOCK spin_lock_irqsave +#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore + +#define BCM_OBJDBG_ADDTOHEAD 0 +#define BCM_OBJDBG_ADDTOTAIL 1 + +#define BCM_OBJDBG_CALLER_LEN 32 +struct bcm_dbgobj { + struct bcm_dbgobj *prior; + struct bcm_dbgobj *next; + uint32 flag; + void *obj; + uint32 obj_sn; + uint32 obj_state; + uint32 line; + char caller[BCM_OBJDBG_CALLER_LEN]; +}; + +static struct bcm_dbgobj *dbgobj_freehead = NULL; +static struct bcm_dbgobj *dbgobj_freetail = NULL; +static struct bcm_dbgobj *dbgobj_objhead = NULL; +static struct bcm_dbgobj *dbgobj_objtail = NULL; + +static uint32 dbgobj_sn = 0; +static int dbgobj_count = 0; +static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT]; + +void +bcm_object_trace_init(void) +{ + int i = 0; + BCM_OBJDBG_LOCK_INIT(); + memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT); + dbgobj_freehead = &bcm_dbg_objs[0]; + dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1]; + + for (i = 0; i < BCM_OBJDBG_COUNT; ++i) { + bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ? + dbgobj_freehead : &bcm_dbg_objs[i + 1]; + bcm_dbg_objs[i].prior = (i == 0) ? + dbgobj_freetail : &bcm_dbg_objs[i - 1]; + } +} + +void +bcm_object_trace_deinit(void) +{ + if (dbgobj_objhead || dbgobj_objtail) { + printf("%s: not all objects are released\n", __FUNCTION__); + ASSERT(0); + } + BCM_OBJDBG_LOCK_DESTROY(); +} + +static void +bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj) +{ + if ((dbgobj == *head) && (dbgobj == *tail)) { + *head = NULL; + *tail = NULL; + } else if (dbgobj == *head) { + *head = (*head)->next; + } else if (dbgobj == *tail) { + *tail = (*tail)->prior; + } + dbgobj->next->prior = dbgobj->prior; + dbgobj->prior->next = dbgobj->next; +} + +static void +bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int addtotail) +{ + if (!(*head) && !(*tail)) { + *head = dbgobj; + *tail = dbgobj; + dbgobj->next = dbgobj; + dbgobj->prior = dbgobj; + } else if ((*head) && (*tail)) { + (*tail)->next = dbgobj; + (*head)->prior = dbgobj; + dbgobj->next = *head; + dbgobj->prior = *tail; + if (addtotail == BCM_OBJDBG_ADDTOTAIL) + *tail = dbgobj; + else + *head = dbgobj; + } else { + ASSERT(0); /* can't be this case */ + } +} + +static INLINE void +bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail, + struct bcm_dbgobj *dbgobj, int movetotail) +{ + if ((*head) && (*tail)) { + if (movetotail == BCM_OBJDBG_ADDTOTAIL) { + if (dbgobj != (*tail)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } else { + if (dbgobj != (*head)) { + bcm_object_rm_list(head, tail, dbgobj); + bcm_object_add_list(head, tail, dbgobj, movetotail); + } + } + } else { + ASSERT(0); /* can't be this case */ + } +} + +void +bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + if (opt == BCM_OBJDBG_ADD_PKT || + opt == BCM_OBJDBG_ADD) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p allocated from %s(%d)," + " allocate again from %s(%d)\n", + __FUNCTION__, dbgobj->obj, + dbgobj->caller, dbgobj->line, + caller, line); + ASSERT(0); + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +#if BCM_OBJECT_MERGE_SAME_OBJ + dbgobj = dbgobj_freetail; + while (dbgobj) { + if (dbgobj->obj == obj) { + goto FREED_ENTRY_FOUND; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + + dbgobj = dbgobj_freehead; +#if BCM_OBJECT_MERGE_SAME_OBJ +FREED_ENTRY_FOUND: +#endif /* BCM_OBJECT_MERGE_SAME_OBJ */ + if (!dbgobj) { + printf("%s: already got %d objects ?????????????????????\n", + __FUNCTION__, BCM_OBJDBG_COUNT); + ASSERT(0); + goto EXIT; + } + + bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj); + dbgobj->obj = obj; + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + dbgobj->flag = 0; + if (opt == BCM_OBJDBG_ADD_PKT) { + dbgobj->obj_sn = dbgobj_sn++; + dbgobj->obj_state = 0; + /* first 4 bytes is pkt sn */ + if (((unsigned long)PKTTAG(obj)) & 0x3) + printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj)); + *(uint32*)PKTTAG(obj) = dbgobj->obj_sn; + } + bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + + dbgobj_count++; + + } else if (opt == BCM_OBJDBG_REMOVE) { + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (dbgobj->flag) { + printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n", + __FUNCTION__, obj, dbgobj->flag, caller, line); + } + bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj); + memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN); + strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN); + dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0'; + dbgobj->line = line; + bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj, + BCM_OBJDBG_ADDTOTAIL); + dbgobj_count--; + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj && dbgobj->obj) { + if (dbgobj->obj == obj) { + printf("%s: obj %p already freed from from %s(%d)," + " try free again from %s(%d)\n", + __FUNCTION__, obj, + dbgobj->caller, dbgobj->line, + caller, line); + //ASSERT(0); /* release same obj more than one time? */ + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: ################### release none-existing obj %p from %s(%d)\n", + __FUNCTION__, obj, caller, line); + //ASSERT(0); /* release same obj more than one time? */ + + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_upd(void *obj, void *obj_new) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + dbgobj->obj = obj_new; + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + dbgobj = dbgobj_freetail; + while (dbgobj) { + if ((dbgobj->obj == obj) && + ((!chksn) || (dbgobj->obj_sn == sn))) { + printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state, + dbgobj->caller, dbgobj->line); + goto EXIT; + } + else if (dbgobj->obj == NULL) { + break; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_freetail) + break; + } + + printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n", + __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn); + dbgobj = dbgobj_objtail; + while (dbgobj) { + printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n", + __FUNCTION__, caller, line, + dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line); + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +void +bcm_object_feature_set(void *obj, uint32 type, uint32 value) +{ + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + if (value & BCM_OBJECT_FEATURE_CLEAR) + dbgobj->flag &= ~(value); + else + dbgobj->flag |= (value); + } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) { + dbgobj->obj_state = value; + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return; +} + +int +bcm_object_feature_get(void *obj, uint32 type, uint32 value) +{ + int rtn = 0; + struct bcm_dbgobj *dbgobj; + unsigned long flags; + + BCM_REFERENCE(flags); + BCM_OBJDBG_LOCK(&dbgobj_lock, flags); + + dbgobj = dbgobj_objtail; + while (dbgobj) { + if (dbgobj->obj == obj) { + if (type == BCM_OBJECT_FEATURE_FLAG) { + rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR); + } + if (dbgobj != dbgobj_objtail) { + bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail, + dbgobj, BCM_OBJDBG_ADDTOTAIL); + } + goto EXIT; + } + dbgobj = dbgobj->prior; + if (dbgobj == dbgobj_objtail) + break; + } + + printf("%s: obj %p not found in active list\n", __FUNCTION__, obj); + ASSERT(0); + +EXIT: + BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags); + return rtn; +} + +#endif /* BCM_OBJECT_TRACE */ + +uint8 * +bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst) +{ + uint8 *new_dst = dst; + bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst; + + /* dst buffer should always be valid */ + ASSERT(dst); + + /* data len must be within valid range */ + ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)); + + /* source data buffer pointer should be valid, unless datalen is 0 + * meaning no data with this TLV + */ + ASSERT((data != NULL) || (datalen == 0)); + + /* only do work if the inputs are valid + * - must have a dst to write to AND + * - datalen must be within range AND + * - the source data pointer must be non-NULL if datalen is non-zero + * (this last condition detects datalen > 0 with a NULL data pointer) + */ + if ((dst != NULL) && + ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) && + ((data != NULL) || (datalen == 0))) { + + /* write type, len fields */ + dst_tlv->id = (uint8)type; + dst_tlv->len = (uint8)datalen; + + /* if data is present, copy to the output buffer and update + * pointer to output buffer + */ + if (datalen > 0) { + + memcpy(dst_tlv->data, data, datalen); + } + + /* update the output destination poitner to point past + * the TLV written + */ + new_dst = dst + BCM_TLV_HDR_SIZE + datalen; + } + + return (new_dst); +} + +uint8 * +bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen) +{ + uint8 *new_dst = dst; + + if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) { + + /* if len + tlv hdr len is more than destlen, don't do anything + * just return the buffer untouched + */ + if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) { + + new_dst = bcm_write_tlv(type, data, datalen, dst); + } + } + + return (new_dst); +} + +uint8 * +bcm_copy_tlv(const void *src, uint8 *dst) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + uint totlen; + + ASSERT(dst && src); + if (dst && src) { + + totlen = BCM_TLV_HDR_SIZE + src_tlv->len; + memcpy(dst, src_tlv, totlen); + new_dst = dst + totlen; + } + + return (new_dst); +} + + +uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen) +{ + uint8 *new_dst = dst; + const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src; + + ASSERT(src); + if (src) { + if (bcm_valid_tlv(src_tlv, dst_maxlen)) { + new_dst = bcm_copy_tlv(src, dst); + } + } + + return (new_dst); +} + + +#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS) +/******************************************************************************* + * crc8 + * + * Computes a crc8 over the input data using the polynomial: + * + * x^8 + x^7 +x^6 + x^4 + x^2 + 1 + * + * The caller provides the initial value (either CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC8_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint8 crc8_table[256] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F +}; + +#define CRC_INNER_LOOP(n, c, x) \ + (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] + +uint8 +hndcrc8( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint8 crc /* either CRC8_INIT_VALUE or previous return value */ +) +{ + /* hard code the crc loop instead of using CRC_INNER_LOOP macro + * to avoid the undefined and unnecessary (uint8 >> 8) operation. + */ + while (nbytes-- > 0) + crc = crc8_table[(crc ^ *pdata++) & 0xff]; + + return crc; +} + +/******************************************************************************* + * crc16 + * + * Computes a crc16 over the input data using the polynomial: + * + * x^16 + x^12 +x^5 + 1 + * + * The caller provides the initial value (either CRC16_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When checking, a final + * return value of CRC16_GOOD_VALUE indicates a valid CRC. + * + * Reference: Dallas Semiconductor Application Note 27 + * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", + * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., + * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt + * + * **************************************************************************** + */ + +static const uint16 crc16_table[256] = { + 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, + 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, + 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, + 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, + 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, + 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, + 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, + 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, + 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, + 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, + 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, + 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, + 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, + 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, + 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, + 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, + 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, + 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, + 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, + 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, + 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, + 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, + 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, + 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, + 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, + 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, + 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, + 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, + 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, + 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, + 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, + 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 +}; + +uint16 +hndcrc16( + uint8 *pdata, /* pointer to array of data to process */ + uint nbytes, /* number of input data bytes to process */ + uint16 crc /* either CRC16_INIT_VALUE or previous return value */ +) +{ + while (nbytes-- > 0) + CRC_INNER_LOOP(16, crc, *pdata++); + return crc; +} + +static const uint32 crc32_table[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +/* + * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if + * accumulating over multiple pieces. + */ +uint32 +hndcrc32(uint8 *pdata, uint nbytes, uint32 crc) +{ + uint8 *pend; + pend = pdata + nbytes; + while (pdata < pend) + CRC_INNER_LOOP(32, crc, *pdata++); + + return crc; +} + +#ifdef notdef +#define CLEN 1499 /* CRC Length */ +#define CBUFSIZ (CLEN+4) +#define CNBUFS 5 /* # of bufs */ + +void +testcrc32(void) +{ + uint j, k, l; + uint8 *buf; + uint len[CNBUFS]; + uint32 crcr; + uint32 crc32tv[CNBUFS] = + {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; + + ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); + + /* step through all possible alignments */ + for (l = 0; l <= 4; l++) { + for (j = 0; j < CNBUFS; j++) { + len[j] = CLEN; + for (k = 0; k < len[j]; k++) + *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; + } + + for (j = 0; j < CNBUFS; j++) { + crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); + ASSERT(crcr == crc32tv[j]); + } + } + + MFREE(buf, CBUFSIZ*CNBUFS); + return; +} +#endif /* notdef */ + +/* + * Advance from the current 1-byte tag/1-byte length/variable-length value + * triple, to the next, returning a pointer to the next. + * If the current or next TLV is invalid (does not fit in given buffer length), + * NULL is returned. + * *buflen is not modified if the TLV elt parameter is invalid, or is decremented + * by the TLV parameter's length if it is valid. + */ +bcm_tlv_t * +bcm_next_tlv(bcm_tlv_t *elt, int *buflen) +{ + int len; + + /* validate current elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + /* advance to next elt */ + len = elt->len; + elt = (bcm_tlv_t*)(elt->data + len); + *buflen -= (TLV_HDR_LEN + len); + + /* validate next elt */ + if (!bcm_valid_tlv(elt, *buflen)) { + return NULL; + } + + return elt; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +bcm_tlv_t * +bcm_parse_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + if ((elt = (bcm_tlv_t*)buf) == NULL) { + return NULL; + } + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len; + + /* validate remaining totlen */ + if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { + + return (elt); + } + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + * return NULL if not found or length field < min_varlen + */ +bcm_tlv_t * +bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen) +{ + bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key); + if (ret == NULL || ret->len < min_bodylen) { + return NULL; + } + return ret; +} + +/* + * Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag. Stop parsing when we see an element whose ID is greater + * than the target key. + */ +bcm_tlv_t * +bcm_parse_ordered_tlvs(void *buf, int buflen, uint key) +{ + bcm_tlv_t *elt; + int totlen; + + elt = (bcm_tlv_t*)buf; + totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + uint id = elt->id; + int len = elt->len; + + /* Punt if we start seeing IDs > than target key */ + if (id > key) { + return (NULL); + } + + /* validate remaining totlen */ + if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) { + return (elt); + } + + elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + return NULL; +} +#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */ + +#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ + defined(DHD_DEBUG) +int +bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len) +{ + int i, slen = 0; + uint32 bit, mask; + const char *name; + mask = bd->mask; + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) { + bit = bd->bitfield[i].bit; + if ((flags & mask) == bit) { + if (len > (int)strlen(name)) { + slen = strlen(name); + strncpy(buf, name, slen+1); + } + break; + } + } + return slen; +} + +int +bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) +{ + int i; + char* p = buf; + char hexstr[16]; + int slen = 0, nlen = 0; + uint32 bit; + const char* name; + + if (len < 2 || !buf) + return 0; + + buf[0] = '\0'; + + for (i = 0; flags != 0; i++) { + bit = bd[i].bit; + name = bd[i].name; + if (bit == 0 && flags != 0) { + /* print any unnamed bits */ + snprintf(hexstr, 16, "0x%X", flags); + name = hexstr; + flags = 0; /* exit loop */ + } else if ((flags & bit) == 0) + continue; + flags &= ~bit; + nlen = strlen(name); + slen += nlen; + /* count btwn flag space */ + if (flags != 0) + slen += 1; + /* need NULL char as well */ + if (len <= slen) + break; + /* copy NULL char but don't count it */ + strncpy(p, name, nlen + 1); + p += nlen; + /* copy btwn flag space and NULL char */ + if (flags != 0) + p += snprintf(p, 2, " "); + } + + /* indicate the str was too short */ + if (flags != 0) { + p += snprintf(p, 2, ">"); + } + + return (int)(p - buf); +} +#endif + +/* print bytes formatted as hex to a string. return the resulting string length */ +int +bcm_format_hex(char *str, const void *bytes, int len) +{ + int i; + char *p = str; + const uint8 *src = (const uint8*)bytes; + + for (i = 0; i < len; i++) { + p += snprintf(p, 3, "%02X", *src); + src++; + } + return (int)(p - str); +} + +/* pretty hex print a contiguous buffer */ +void +prhex(const char *msg, uchar *buf, uint nbytes) +{ + char line[128], *p; + int len = sizeof(line); + int nchar; + uint i; + + if (msg && (msg[0] != '\0')) + printf("%s:\n", msg); + + p = line; + for (i = 0; i < nbytes; i++) { + if (i % 16 == 0) { + nchar = snprintf(p, len, " %04x: ", i); /* line prefix */ + p += nchar; + len -= nchar; + } + if (len > 0) { + nchar = snprintf(p, len, "%02x ", buf[i]); + p += nchar; + len -= nchar; + } + + if (i % 16 == 15) { + printf("%s\n", line); /* flush line */ + p = line; + len = sizeof(line); + } + } + + /* flush last partial line */ + if (p != line) + printf("%s\n", line); +} + +static const char *crypto_algo_names[] = { + "NONE", + "WEP1", + "TKIP", + "WEP128", + "AES_CCM", + "AES_OCB_MSDU", + "AES_OCB_MPDU", + "NALG", + "UNDEF", + "UNDEF", + "UNDEF", + "UNDEF" + "PMK", + "BIP", + "AES_GCM", + "AES_CCM256", + "AES_GCM256", + "BIP_CMAC256", + "BIP_GMAC", + "BIP_GMAC256", + "UNDEF" +}; + +const char * +bcm_crypto_algo_name(uint algo) +{ + return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; +} + + +char * +bcm_chipname(uint chipid, char *buf, uint len) +{ + const char *fmt; + + fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; + snprintf(buf, len, fmt, chipid); + return buf; +} + +/* Produce a human-readable string for boardrev */ +char * +bcm_brev_str(uint32 brev, char *buf) +{ + if (brev < 0x100) + snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); + else + snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); + + return (buf); +} + +#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ + +/* dump large strings to console */ +void +printbig(char *buf) +{ + uint len, max_len; + char c; + + len = (uint)strlen(buf); + + max_len = BUFSIZE_TODUMP_ATONCE; + + while (len > max_len) { + c = buf[max_len]; + buf[max_len] = '\0'; + printf("%s", buf); + buf[max_len] = c; + + buf += max_len; + len -= max_len; + } + /* print the remaining string */ + printf("%s\n", buf); + return; +} + +/* routine to dump fields in a fileddesc structure */ +uint +bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, + char *buf, uint32 bufsize) +{ + uint filled_len; + int len; + struct fielddesc *cur_ptr; + + filled_len = 0; + cur_ptr = fielddesc_array; + + while (bufsize > 1) { + if (cur_ptr->nameandfmt == NULL) + break; + len = snprintf(buf, bufsize, cur_ptr->nameandfmt, + read_rtn(arg0, arg1, cur_ptr->offset)); + /* check for snprintf overflow or error */ + if (len < 0 || (uint32)len >= bufsize) + len = bufsize - 1; + buf += len; + bufsize -= len; + filled_len += len; + cur_ptr++; + } + return filled_len; +} + +uint +bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint buflen) +{ + uint len; + + len = (uint)strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + strncpy(buf, name, buflen); + + /* append data onto the end of the name string */ + memcpy(&buf[len], data, datalen); + len += datalen; + + return len; +} + +/* Quarter dBm units to mW + * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 + * Table is offset so the last entry is largest mW value that fits in + * a uint16. + */ + +#define QDBM_OFFSET 153 /* Offset for first entry */ +#define QDBM_TABLE_LEN 40 /* Table size */ + +/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. + * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 + */ +#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ + +/* Largest mW value that will round down to the last table entry, + * QDBM_OFFSET + QDBM_TABLE_LEN-1. + * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. + */ +#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ + +static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { +/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ +/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, +/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, +/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, +/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, +/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 +}; + +uint16 +bcm_qdbm_to_mw(uint8 qdbm) +{ + uint factor = 1; + int idx = qdbm - QDBM_OFFSET; + + if (idx >= QDBM_TABLE_LEN) { + /* clamp to max uint16 mW value */ + return 0xFFFF; + } + + /* scale the qdBm index up to the range of the table 0-40 + * where an offset of 40 qdBm equals a factor of 10 mW. + */ + while (idx < 0) { + idx += 40; + factor *= 10; + } + + /* return the mW value scaled down to the correct factor of 10, + * adding in factor/2 to get proper rounding. + */ + return ((nqdBm_to_mW_map[idx] + factor/2) / factor); +} + +uint8 +bcm_mw_to_qdbm(uint16 mw) +{ + uint8 qdbm; + int offset; + uint mw_uint = mw; + uint boundary; + + /* handle boundary case */ + if (mw_uint <= 1) + return 0; + + offset = QDBM_OFFSET; + + /* move mw into the range of the table */ + while (mw_uint < QDBM_TABLE_LOW_BOUND) { + mw_uint *= 10; + offset -= 40; + } + + for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { + boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - + nqdBm_to_mW_map[qdbm])/2; + if (mw_uint < boundary) break; + } + + qdbm += (uint8)offset; + + return (qdbm); +} + + +uint +bcm_bitcount(uint8 *bitmap, uint length) +{ + uint bitcount = 0, i; + uint8 tmp; + for (i = 0; i < length; i++) { + tmp = bitmap[i]; + while (tmp) { + bitcount++; + tmp &= (tmp - 1); + } + } + return bitcount; +} + +#if defined(BCMDRIVER) || defined(WL_UNITTEST) + +/* triggers bcm_bprintf to print to kernel log */ +bool bcm_bprintf_bypass = FALSE; + +/* Initialization of bcmstrbuf structure */ +void +bcm_binit(struct bcmstrbuf *b, char *buf, uint size) +{ + b->origsize = b->size = size; + b->origbuf = b->buf = buf; +} + +/* Buffer sprintf wrapper to guard against buffer overflow */ +int +bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) +{ + va_list ap; + int r; + + va_start(ap, fmt); + + r = vsnprintf(b->buf, b->size, fmt, ap); + if (bcm_bprintf_bypass == TRUE) { + printf(b->buf); + goto exit; + } + + /* Non Ansi C99 compliant returns -1, + * Ansi compliant return r >= b->size, + * bcmstdlib returns 0, handle all + */ + /* r == 0 is also the case when strlen(fmt) is zero. + * typically the case when "" is passed as argument. + */ + if ((r == -1) || (r >= (int)b->size)) { + b->size = 0; + } else { + b->size -= r; + b->buf += r; + } + +exit: + va_end(ap); + + return r; +} + +void +bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len) +{ + int i; + + if (msg != NULL && msg[0] != '\0') + bcm_bprintf(b, "%s", msg); + for (i = 0; i < len; i ++) + bcm_bprintf(b, "%02X", buf[i]); + if (newline) + bcm_bprintf(b, "\n"); +} + +void +bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) +{ + int i; + + for (i = 0; i < num_bytes; i++) { + num[i] += amount; + if (num[i] >= amount) + break; + amount = 1; + } +} + +int +bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes) +{ + int i; + + for (i = nbytes - 1; i >= 0; i--) { + if (arg1[i] != arg2[i]) + return (arg1[i] - arg2[i]); + } + return 0; +} + +void +bcm_print_bytes(const char *name, const uchar *data, int len) +{ + int i; + int per_line = 0; + + printf("%s: %d \n", name ? name : "", len); + for (i = 0; i < len; i++) { + printf("%02x ", *data++); + per_line++; + if (per_line == 16) { + per_line = 0; + printf("\n"); + } + } + printf("\n"); +} + +/* Look for vendor-specific IE with specified OUI and optional type */ +bcm_tlv_t * +bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len) +{ + bcm_tlv_t *ie; + uint8 ie_len; + + ie = (bcm_tlv_t*)tlvs; + + /* make sure we are looking at a valid IE */ + if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) { + return NULL; + } + + /* Walk through the IEs looking for an OUI match */ + do { + ie_len = ie->len; + if ((ie->id == DOT11_MNG_PROPR_ID) && + (ie_len >= (DOT11_OUI_LEN + type_len)) && + !bcmp(ie->data, voui, DOT11_OUI_LEN)) + { + /* compare optional type */ + if (type_len == 0 || + !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) { + return (ie); /* a match */ + } + } + } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL); + + return NULL; +} + +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) + +int +bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) +{ + uint i, c; + char *p = buf; + char *endp = buf + SSID_FMT_BUF_LEN; + + if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN; + + for (i = 0; i < ssid_len; i++) { + c = (uint)ssid[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = '\\'; + } else if (bcm_isprint((uchar)c)) { + *p++ = (char)c; + } else { + p += snprintf(p, (endp - p), "\\x%02X", c); + } + } + *p = '\0'; + ASSERT(p < endp); + + return (int)(p - buf); +} +#endif + +#endif /* BCMDRIVER || WL_UNITTEST */ + +/* + * ProcessVars:Takes a buffer of "=\n" lines read from a file and ending in a NUL. + * also accepts nvram files which are already in the format of =\0\=\0 + * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. + * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. +*/ + +unsigned int +process_nvram_vars(char *varbuf, unsigned int len) +{ + char *dp; + bool findNewline; + int column; + unsigned int buf_len, n; + unsigned int pad = 0; + + dp = varbuf; + + findNewline = FALSE; + column = 0; + + for (n = 0; n < len; n++) { + if (varbuf[n] == '\r') + continue; + if (findNewline && varbuf[n] != '\n') + continue; + findNewline = FALSE; + if (varbuf[n] == '#') { + findNewline = TRUE; + continue; + } + if (varbuf[n] == '\n') { + if (column == 0) + continue; + *dp++ = 0; + column = 0; + continue; + } + *dp++ = varbuf[n]; + column++; + } + buf_len = (unsigned int)(dp - varbuf); + if (buf_len % 4) { + pad = 4 - buf_len % 4; + if (pad && (buf_len + pad <= len)) { + buf_len += pad; + } + } + + while (dp < varbuf + n) + *dp++ = 0; + + return buf_len; +} + +/* calculate a * b + c */ +void +bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c) +{ +#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;} + uint32 r1, r0; + uint32 a1, a0, b1, b0, t, cc = 0; + + a1 = a >> 16; + a0 = a & 0xffff; + b1 = b >> 16; + b0 = b & 0xffff; + + r0 = a0 * b0; + FORMALIZE(r0); + + t = (a1 * b0) << 16; + FORMALIZE(t); + + r0 += t; + FORMALIZE(r0); + + t = (a0 * b1) << 16; + FORMALIZE(t); + + r0 += t; + FORMALIZE(r0); + + FORMALIZE(c); + + r0 += c; + FORMALIZE(r0); + + r0 |= (cc % 2) ? 0x80000000 : 0; + r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2); + + *r_high = r1; + *r_low = r0; +} + +/* calculate a / b */ +void +bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b) +{ + uint32 a1 = a_high, a0 = a_low, r0 = 0; + + if (b < 2) + return; + + while (a1 != 0) { + r0 += (0xffffffff / b) * a1; + bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0); + } + + r0 += a0 / b; + *r = r0; +} + +#ifndef setbit /* As in the header file */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +/* Set bit in byte array. */ +void +setbit(void *array, uint bit) +{ + ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY); +} + +/* Clear bit in byte array. */ +void +clrbit(void *array, uint bit) +{ + ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY)); +} + +/* Test if bit is set in byte array. */ +bool +isset(const void *array, uint bit) +{ + return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))); +} + +/* Test if bit is clear in byte array. */ +bool +isclr(const void *array, uint bit) +{ + return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0); +} +#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */ +#endif /* setbit */ + +void +set_bitrange(void *array, uint start, uint end, uint maxbit) +{ + uint startbyte = start/NBBY; + uint endbyte = end/NBBY; + uint i, startbytelastbit, endbytestartbit; + + if (end >= start) { + if (endbyte - startbyte > 1) + { + startbytelastbit = (startbyte+1)*NBBY - 1; + endbytestartbit = endbyte*NBBY; + for (i = startbyte+1; i < endbyte; i++) + ((uint8 *)array)[i] = 0xFF; + for (i = start; i <= startbytelastbit; i++) + setbit(array, i); + for (i = endbytestartbit; i <= end; i++) + setbit(array, i); + } else { + for (i = start; i <= end; i++) + setbit(array, i); + } + } + else { + set_bitrange(array, start, maxbit, maxbit); + set_bitrange(array, 0, end, maxbit); + } +} + +void +bcm_bitprint32(const uint32 u32arg) +{ + int i; + for (i = NBITS(uint32) - 1; i >= 0; i--) { + isbitset(u32arg, i) ? printf("1") : printf("0"); + if ((i % NBBY) == 0) printf(" "); + } + printf("\n"); +} + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 +bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum) +{ + while (len > 1) { + sum += (buf[0] << 8) | buf[1]; + buf += 2; + len -= 2; + } + + if (len > 0) { + sum += (*buf) << 8; + } + + while (sum >> 16) { + sum = (sum & 0xffff) + (sum >> 16); + } + + return ((uint16)~sum); +} +#if defined(BCMDRIVER) && !defined(_CFEZ_) +/* + * Hierarchical Multiword bitmap based small id allocator. + * + * Multilevel hierarchy bitmap. (maximum 2 levels) + * First hierarchy uses a multiword bitmap to identify 32bit words in the + * second hierarchy that have at least a single bit set. Each bit in a word of + * the second hierarchy represents a unique ID that may be allocated. + * + * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed. + * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word + * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs. + * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non + * non-zero bitmap word carrying at least one free ID. + * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations. + * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID + * + * Design Notes: + * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many + * bits are computed each time on allocation and deallocation, requiring 4 + * array indexed access and 3 arithmetic operations. When not defined, a runtime + * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed. + * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation. + * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may + * be used by defining BCM_MWBMAP_USE_CNTSETBITS. + * + * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array + * size is fixed. No intention to support larger than 4K indice allocation. ID + * allocators for ranges smaller than 4K will have a wastage of only 12Bytes + * with savings in not having to use an indirect access, had it been dynamically + * allocated. + */ +#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */ + +#define BCM_MWBMAP_BITS_WORD (NBITS(uint32)) +#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD) +#define BCM_MWBMAP_SHIFT_OP (5) +#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1)) +#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP) +#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP) + +/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */ +#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl)) +#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr)) + +#if defined(BCM_MWBMAP_DEBUG) +#define BCM_MWBMAP_AUDIT(mwb) \ + do { \ + ASSERT((mwb != NULL) && \ + (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \ + bcm_mwbmap_audit(mwb); \ + } while (0) +#define MWBMAP_ASSERT(exp) ASSERT(exp) +#define MWBMAP_DBG(x) printf x +#else /* !BCM_MWBMAP_DEBUG */ +#define BCM_MWBMAP_AUDIT(mwb) do {} while (0) +#define MWBMAP_ASSERT(exp) do {} while (0) +#define MWBMAP_DBG(x) +#endif /* !BCM_MWBMAP_DEBUG */ + + +typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */ + uint16 wmaps; /* Total number of words in free wd bitmap */ + uint16 imaps; /* Total number of words in free id bitmap */ + int32 ifree; /* Count of free indices. Used only in audits */ + uint16 total; /* Total indices managed by multiword bitmap */ + + void * magic; /* Audit handle parameter from user */ + + uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + uint32 id_bitmap[0]; /* Second level bitmap */ +} bcm_mwbmap_t; + +/* Incarnate a hierarchical multiword bitmap based small index allocator. */ +struct bcm_mwbmap * +bcm_mwbmap_init(osl_t *osh, uint32 items_max) +{ + struct bcm_mwbmap * mwbmap_p; + uint32 wordix, size, words, extra; + + /* Implementation Constraint: Uses 32bit word bitmap */ + MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U); + MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U); + MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX)); + MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U); + + ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX); + + /* Determine the number of words needed in the multiword bitmap */ + extra = BCM_MWBMAP_MODOP(items_max); + words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U); + + /* Allocate runtime state of multiword bitmap */ + /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */ + size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words); + mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size); + if (mwbmap_p == (bcm_mwbmap_t *)NULL) { + ASSERT(0); + goto error1; + } + memset(mwbmap_p, 0, size); + + /* Initialize runtime multiword bitmap state */ + mwbmap_p->imaps = (uint16)words; + mwbmap_p->ifree = (int32)items_max; + mwbmap_p->total = (uint16)items_max; + + /* Setup magic, for use in audit of handle */ + mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p); + + /* Setup the second level bitmap of free indices */ + /* Mark all indices as available */ + for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) { + mwbmap_p->id_bitmap[wordix] = (uint32)(~0U); +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD; +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Ensure that extra indices are tagged as un-available */ + if (extra) { /* fixup the free ids in last bitmap and wd_count */ + uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */ +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + } + + /* Setup the first level bitmap hierarchy */ + extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps); + words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U); + + mwbmap_p->wmaps = (uint16)words; + + for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++) + mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U); + if (extra) { + uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1]; + *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */ + } + + return mwbmap_p; + +error1: + return BCM_MWBMAP_INVALID_HDL; +} + +/* Release resources used by multiword bitmap based small index allocator. */ +void +bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap) + + (sizeof(uint32) * mwbmap_p->imaps)); + return; +} + +/* Allocate a unique small index using a multiword bitmap index allocator. */ +uint32 BCMFASTPATH +bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + /* Start with the first hierarchy */ + for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */ + + if (bitmap != 0U) { + + uint32 count, bitix, *bitmap_p; + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + /* clear all except trailing 1 */ + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + wordix = BCM_MWBMAP_MULOP(wordix) + bitix; + + /* Clear bit if wd count is 0, without conditional branch */ +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1; +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[wordix]--; + count = mwbmap_p->wd_count[wordix]; + MWBMAP_ASSERT(count == + (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1)); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + /* clear wd_bitmap bit if id_map count is 0 */ + bitmap = (count == 0) << bitix; + + MWBMAP_DBG(( + "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; + + /* Use bitix in the second hierarchy */ + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */ + MWBMAP_ASSERT(bitmap != 0U); + + /* clear all except trailing 1 */ + bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap)))); + MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) == + bcm_count_leading_zeros(bitmap)); + bitix = BCM_MWBMAP_MULOP(wordix) + + (BCM_MWBMAP_BITS_WORD - 1) + - bcm_count_leading_zeros(bitmap); /* use asm clz */ + + mwbmap_p->ifree--; /* decrement system wide free count */ + MWBMAP_ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(( + "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */ + + return bitix; + } + } + + ASSERT(mwbmap_p->ifree == 0); + + return BCM_MWBMAP_INVALID_IDX; +} + +/* Force an index at a specified position to be in use */ +void +bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == bitmap); + + mwbmap_p->ifree--; /* update free count */ + ASSERT(mwbmap_p->ifree >= 0); + + MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, + mwbmap_p->ifree)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + /* Update first hierarchy */ + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + mwbmap_p->wd_count[bitix]--; + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + MWBMAP_ASSERT(count >= 0); + + bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix); + + MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d", + BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap, + (*bitmap_p) ^ bitmap, count)); + + *bitmap_p ^= bitmap; /* mark as in use */ + + return; +} + +/* Free a previously allocated index back into the multiword bitmap allocator */ +void BCMFASTPATH +bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap, *bitmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + /* Start with second level hierarchy */ + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->id_bitmap[wordix]; + + ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */ + + mwbmap_p->ifree++; /* update free count */ + ASSERT(mwbmap_p->ifree <= mwbmap_p->total); + + MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, + mwbmap_p->ifree)); + + *bitmap_p |= bitmap; /* mark as available */ + + /* Now update first level hierarchy */ + + bitix = wordix; + + wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */ + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + +#if !defined(BCM_MWBMAP_USE_CNTSETBITS) + mwbmap_p->wd_count[bitix]++; +#endif + +#if defined(BCM_MWBMAP_DEBUG) + { + uint32 count; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[bitix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + + MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD); + + MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d", + bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count)); + } +#endif /* BCM_MWBMAP_DEBUG */ + + *bitmap_p |= bitmap; + + return; +} + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +uint32 +bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(mwbmap_p->ifree >= 0); + + return mwbmap_p->ifree; +} + +/* Determine whether an index is inuse or free */ +bool +bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 wordix, bitmap; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + ASSERT(bitix < mwbmap_p->total); + + wordix = BCM_MWBMAP_DIVOP(bitix); + bitmap = (1U << BCM_MWBMAP_MODOP(bitix)); + + return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U); +} + +/* Debug dump a multiword bitmap allocator */ +void +bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl) +{ + uint32 ix, count; + bcm_mwbmap_t * mwbmap_p; + + BCM_MWBMAP_AUDIT(mwbmap_hdl); + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p, + mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total); + for (ix = 0U; ix < mwbmap_p->wmaps; ix++) { + printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]); + bcm_bitprint32(mwbmap_p->wd_bitmap[ix]); + printf("\n"); + } + for (ix = 0U; ix < mwbmap_p->imaps; ix++) { +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[ix]; + MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count); + bcm_bitprint32(mwbmap_p->id_bitmap[ix]); + printf("\n"); + } + + return; +} + +/* Audit a hierarchical multiword bitmap */ +void +bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl) +{ + bcm_mwbmap_t * mwbmap_p; + uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p; + + mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl); + + for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) { + + bitmap_p = &mwbmap_p->wd_bitmap[wordix]; + + for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) { + if ((*bitmap_p) & (1 << bitix)) { + idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix; +#if defined(BCM_MWBMAP_USE_CNTSETBITS) + count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]); +#else /* ! BCM_MWBMAP_USE_CNTSETBITS */ + count = mwbmap_p->wd_count[idmap_ix]; + ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix])); +#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */ + ASSERT(count != 0U); + free_cnt += count; + } + } + } + + ASSERT((int)free_cnt == mwbmap_p->ifree); +} +/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */ + +/* Simple 16bit Id allocator using a stack implementation. */ +typedef struct id16_map { + uint32 failures; /* count of failures */ + void *dbg; /* debug placeholder */ + uint16 total; /* total number of ids managed by allocator */ + uint16 start; /* start value of 16bit ids to be managed */ + int stack_idx; /* index into stack of available ids */ + uint16 stack[0]; /* stack of 16 bit ids */ +} id16_map_t; + +#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \ + (sizeof(uint16) * (items))) + +#if defined(BCM_DBG) + +/* Uncomment BCM_DBG_ID16 to debug double free */ +/* #define BCM_DBG_ID16 */ + +typedef struct id16_map_dbg { + uint16 total; + bool avail[0]; +} id16_map_dbg_t; +#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \ + (sizeof(bool) * (items))) +#define ID16_MAP_MSG(x) print x +#else +#define ID16_MAP_MSG(x) +#endif /* BCM_DBG */ + +void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */ +id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids)); + if (id16_map == NULL) { + return NULL; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + id16_map->dbg = NULL; + + /* + * Populate stack with 16bit id values, commencing with start_val16. + * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map. + */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids)); + + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return (void *)id16_map; +} + +void * /* Destruct an id16 allocator instance */ +id16_map_fini(osl_t *osh, void * id16_map_hndl) +{ + uint16 total_ids; + id16_map_t * id16_map; + + if (id16_map_hndl == NULL) + return NULL; + + id16_map = (id16_map_t *)id16_map_hndl; + + total_ids = id16_map->total; + ASSERT(total_ids > 0); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids)); + id16_map->dbg = NULL; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->total = 0; + MFREE(osh, id16_map, ID16_MAP_SZ(total_ids)); + + return NULL; +} + +void +id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16) +{ + uint16 idx, val16; + id16_map_t * id16_map; + + ASSERT(total_ids > 0); + /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map + * with random values. + */ + ASSERT((start_val16 == ID16_UNDEFINED) || + (start_val16 + total_ids) < ID16_INVALID); + + id16_map = (id16_map_t *)id16_map_hndl; + if (id16_map == NULL) { + return; + } + + id16_map->total = total_ids; + id16_map->start = start_val16; + id16_map->failures = 0; + + /* Populate stack with 16bit id values, commencing with start_val16 */ + id16_map->stack_idx = -1; + + if (id16_map->start != ID16_UNDEFINED) { + val16 = start_val16; + + for (idx = 0; idx < total_ids; idx++, val16++) { + id16_map->stack_idx = idx; + id16_map->stack[id16_map->stack_idx] = val16; + } + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->start != ID16_UNDEFINED) { + if (id16_map->dbg) { + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + id16_map_dbg->total = total_ids; + for (idx = 0; idx < total_ids; idx++) { + id16_map_dbg->avail[idx] = TRUE; + } + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ +} + +uint16 BCMFASTPATH /* Allocate a unique 16bit id */ +id16_map_alloc(void * id16_map_hndl) +{ + uint16 val16; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->total > 0); + + if (id16_map->stack_idx < 0) { + id16_map->failures++; + return ID16_INVALID; + } + + val16 = id16_map->stack[id16_map->stack_idx]; + id16_map->stack_idx--; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE); + id16_map_dbg->avail[val16 - id16_map->start] = FALSE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + return val16; +} + + +void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */ +id16_map_free(void * id16_map_hndl, uint16 val16) +{ + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + ASSERT((id16_map->start == ID16_UNDEFINED) || + (val16 < (id16_map->start + id16_map->total))); + + if (id16_map->dbg) { /* Validate val16 */ + id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg; + + ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE); + id16_map_dbg->avail[val16 - id16_map->start] = TRUE; + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + + id16_map->stack_idx++; + id16_map->stack[id16_map->stack_idx] = val16; +} + +uint32 /* Returns number of failures to allocate an unique id16 */ +id16_map_failures(void * id16_map_hndl) +{ + ASSERT(id16_map_hndl != NULL); + return ((id16_map_t *)id16_map_hndl)->failures; +} + +bool +id16_map_audit(void * id16_map_hndl) +{ + int idx; + int insane = 0; + id16_map_t * id16_map; + + ASSERT(id16_map_hndl != NULL); + + id16_map = (id16_map_t *)id16_map_hndl; + + ASSERT(id16_map->stack_idx >= -1); + ASSERT(id16_map->stack_idx < (int)id16_map->total); + + if (id16_map->start == ID16_UNDEFINED) + goto done; + + for (idx = 0; idx <= id16_map->stack_idx; idx++) { + ASSERT(id16_map->stack[idx] >= id16_map->start); + ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total)); + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 val16 = id16_map->stack[idx]; + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n", + id16_map_hndl, idx, val16)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + } + +#if defined(BCM_DBG) && defined(BCM_DBG_ID16) + if (id16_map->dbg) { + uint16 avail = 0; /* Audit available ids counts */ + for (idx = 0; idx < id16_map_dbg->total; idx++) { + if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE) + avail++; + } + if (avail && (avail != (id16_map->stack_idx + 1))) { + insane |= 1; + ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n", + id16_map_hndl, avail, id16_map->stack_idx)); + } + } +#endif /* BCM_DBG && BCM_DBG_ID16 */ + +done: + /* invoke any other system audits */ + return (!!insane); +} +/* END: Simple id16 allocator */ + + +#endif + +/* calculate a >> b; and returns only lower 32 bits */ +void +bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b) +{ + uint32 a1 = a_high, a0 = a_low, r0 = 0; + + if (b == 0) { + r0 = a_low; + *r = r0; + return; + } + + if (b < 32) { + a0 = a0 >> b; + a1 = a1 & ((1 << b) - 1); + a1 = a1 << (32 - b); + r0 = a0 | a1; + *r = r0; + return; + } else { + r0 = a1 >> (b - 32); + *r = r0; + return; + } + +} + +/* calculate a + b where a is a 64 bit number and b is a 32 bit number */ +void +bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset) +{ + uint32 r1_lo = *r_lo; + (*r_lo) += offset; + if (*r_lo < r1_lo) + (*r_hi) ++; +} + +/* calculate a - b where a is a 64 bit number and b is a 32 bit number */ +void +bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset) +{ + uint32 r1_lo = *r_lo; + (*r_lo) -= offset; + if (*r_lo > r1_lo) + (*r_hi) --; +} + +#ifdef DEBUG_COUNTER +#if (OSL_SYSUPTIME_SUPPORT == TRUE) +void counter_printlog(counter_tbl_t *ctr_tbl) +{ + uint32 now; + + if (!ctr_tbl->enabled) + return; + + now = OSL_SYSUPTIME(); + + if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) { + uint8 i = 0; + printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print); + + for (i = 0; i < ctr_tbl->needed_cnt; i++) { + printf(" %u", ctr_tbl->cnt[i]); + } + printf("\n"); + + ctr_tbl->prev_log_print = now; + bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint)); + } +} +#else +/* OSL_SYSUPTIME is not supported so no way to get time */ +#define counter_printlog(a) do {} while (0) +#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */ +#endif /* DEBUG_COUNTER */ + +#if defined(BCMDRIVER) && !defined(_CFEZ_) +void +dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size; + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + if (pool) + MFREE(osh, pool, mem_size); +} +dll_pool_t * +dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size) +{ + uint32 mem_size, i; + dll_pool_t * dll_pool_p; + dll_t * elem_p; + + ASSERT(elem_size > sizeof(dll_t)); + + mem_size = sizeof(dll_pool_t) + (elems_max * elem_size); + + if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) { + printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n", + elems_max, elem_size); + ASSERT(0); + return dll_pool_p; + } + + dll_init(&dll_pool_p->free_list); + dll_pool_p->elems_max = elems_max; + dll_pool_p->elem_size = elem_size; + + elem_p = dll_pool_p->elements; + for (i = 0; i < elems_max; i++) { + dll_append(&dll_pool_p->free_list, elem_p); + elem_p = (dll_t *)((uintptr)elem_p + elem_size); + } + + dll_pool_p->free_count = elems_max; + + return dll_pool_p; +} + + +void * +dll_pool_alloc(dll_pool_t * dll_pool_p) +{ + dll_t * elem_p; + + if (dll_pool_p->free_count == 0) { + ASSERT(dll_empty(&dll_pool_p->free_list)); + return NULL; + } + + elem_p = dll_head_p(&dll_pool_p->free_list); + dll_delete(elem_p); + dll_pool_p->free_count -= 1; + + return (void *)elem_p; +} + +void +dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_prepend(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + + +void +dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p) +{ + dll_t * node_p = (dll_t *)elem_p; + dll_append(&dll_pool_p->free_list, node_p); + dll_pool_p->free_count += 1; +} + +#endif diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.c b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c new file mode 100644 index 000000000000..be884cc33cc1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.c @@ -0,0 +1,1253 @@ +/* + * Misc utility routines used by kernel or app-level. + * Contents are wifi-specific, used by any kernel or app-level + * software that might want wifi things as it grows. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include + +#ifdef BCMDRIVER +#include +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#else +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +#endif /* BCMDRIVER */ + +#include + +#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL)) +#include /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */ +#endif + +/* Definitions for D11AC capable Chanspec type */ + +/* Chanspec ASCII representation with 802.11ac capability: + * [ 'g'] ['/' []['/'<1st80channel>'-'<2nd80channel>]] + * + * : + * (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively. + * Default value is 2g if channel <= 14, otherwise 5g. + * : + * channel number of the 5MHz, 10MHz, 20MHz channel, + * or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel. + * : + * (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20. + * : + * (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower. + * + * For 2.4GHz band 40MHz channels, the same primary channel may be the + * upper sideband for one 40MHz channel, and the lower sideband for an + * overlapping 40MHz channel. The U/L disambiguates which 40MHz channel + * is being specified. + * + * For 40MHz in the 5GHz band and all channel bandwidths greater than + * 40MHz, the U/L specificaion is not allowed since the channels are + * non-overlapping and the primary sub-band is derived from its + * position in the wide bandwidth channel. + * + * <1st80Channel>: + * <2nd80Channel>: + * Required for 80+80, otherwise not allowed. + * Specifies the center channel of the first and second 80MHz band. + * + * In its simplest form, it is a 20MHz channel number, with the implied band + * of 2.4GHz if channel number <= 14, and 5GHz otherwise. + * + * To allow for backward compatibility with scripts, the old form for + * 40MHz channels is also allowed: + * + * : + * primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz + * : + * "U" for upper, "L" for lower (or lower case "u" "l") + * + * 5 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 5g8 20MHz 8 - - + * 52 20MHz 52 - - + * 52/40 40MHz 54 52-56 52 + * 56/40 40MHz 54 52-56 56 + * 52/80 80MHz 58 52-64 52 + * 56/80 80MHz 58 52-64 56 + * 60/80 80MHz 58 52-64 60 + * 64/80 80MHz 58 52-64 64 + * 52/160 160MHz 50 36-64 52 + * 36/160 160MGz 50 36-64 36 + * 36/80+80/42-106 80+80MHz 42,106 36-48,100-112 36 + * + * 2 GHz Examples: + * Chanspec BW Center Ch Channel Range Primary Ch + * 2g8 20MHz 8 - - + * 8 20MHz 8 - - + * 6 20MHz 6 - - + * 6/40l 40MHz 8 6-10 6 + * 6l 40MHz 8 6-10 6 + * 6/40u 40MHz 4 2-6 6 + * 6u 40MHz 4 2-6 6 + */ + +/* bandwidth ASCII string */ +static const char *wf_chspec_bw_str[] = +{ + "5", + "10", + "20", + "40", + "80", + "160", + "80+80", +#ifdef WL11ULB + "2.5" +#else /* WL11ULB */ + "na" +#endif /* WL11ULB */ +}; + +static const uint8 wf_chspec_bw_mhz[] = +{5, 10, 20, 40, 80, 160, 160}; + +#define WF_NUM_BW \ + (sizeof(wf_chspec_bw_mhz)/sizeof(uint8)) + +/* 40MHz channels in 5GHz band */ +static const uint8 wf_5g_40m_chans[] = +{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159}; +#define WF_NUM_5G_40M_CHANS \ + (sizeof(wf_5g_40m_chans)/sizeof(uint8)) + +/* 80MHz channels in 5GHz band */ +static const uint8 wf_5g_80m_chans[] = +{42, 58, 106, 122, 138, 155}; +#define WF_NUM_5G_80M_CHANS \ + (sizeof(wf_5g_80m_chans)/sizeof(uint8)) + +/* 160MHz channels in 5GHz band */ +static const uint8 wf_5g_160m_chans[] = +{50, 114}; +#define WF_NUM_5G_160M_CHANS \ + (sizeof(wf_5g_160m_chans)/sizeof(uint8)) + + +/* convert bandwidth from chanspec to MHz */ +static uint +bw_chspec_to_mhz(chanspec_t chspec) +{ + uint bw; + + bw = (chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT; + return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]); +} + +/* bw in MHz, return the channel count from the center channel to the + * the channel at the edge of the band + */ +static uint8 +center_chan_to_edge(uint bw) +{ + /* edge channels separated by BW - 10MHz on each side + * delta from cf to edge is half of that, + * MHz to channel num conversion is 5MHz/channel + */ + return (uint8)(((bw - 20) / 2) / 5); +} + +/* return channel number of the low edge of the band + * given the center channel and BW + */ +static uint8 +channel_low_edge(uint center_ch, uint bw) +{ + return (uint8)(center_ch - center_chan_to_edge(bw)); +} + +/* return side band number given center channel and control channel + * return -1 on error + */ +static int +channel_to_sb(uint center_ch, uint ctl_ch, uint bw) +{ + uint lowest = channel_low_edge(center_ch, bw); + uint sb; + + if ((ctl_ch - lowest) % 4) { + /* bad ctl channel, not mult 4 */ + return -1; + } + + sb = ((ctl_ch - lowest) / 4); + + /* sb must be a index to a 20MHz channel in range */ + if (sb >= (bw / 20)) { + /* ctl_ch must have been too high for the center_ch */ + return -1; + } + + return sb; +} + +/* return control channel given center channel and side band */ +static uint8 +channel_to_ctl_chan(uint center_ch, uint bw, uint sb) +{ + return (uint8)(channel_low_edge(center_ch, bw) + sb * 4); +} + +/* return index of 80MHz channel from channel number + * return -1 on error + */ +static int +channel_80mhz_to_id(uint ch) +{ + uint i; + for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) { + if (ch == wf_5g_80m_chans[i]) + return i; + } + + return -1; +} + +/* wrapper function for wf_chspec_ntoa. In case of an error it puts + * the original chanspec in the output buffer, prepended with "invalid". + * Can be directly used in print routines as it takes care of null + */ +char * +wf_chspec_ntoa_ex(chanspec_t chspec, char *buf) +{ + if (wf_chspec_ntoa(chspec, buf) == NULL) + snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec); + return buf; +} + +/* given a chanspec and a string buffer, format the chanspec as a + * string, and return the original pointer a. + * Min buffer length must be CHANSPEC_STR_LEN. + * On error return NULL + */ +char * +wf_chspec_ntoa(chanspec_t chspec, char *buf) +{ + const char *band; + uint ctl_chan; + + if (wf_chspec_malformed(chspec)) + return NULL; + + band = ""; + + /* check for non-default band spec */ + if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) || + (CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL)) + band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g"; + + /* ctl channel */ + ctl_chan = wf_chspec_ctlchan(chspec); + + /* bandwidth and ctl sideband */ + if (CHSPEC_IS20(chspec)) { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan); + } else if (!CHSPEC_IS8080(chspec)) { + const char *bw; + const char *sb = ""; + + bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT]; + +#ifdef CHANSPEC_NEW_40MHZ_FORMAT + /* ctl sideband string if needed for 2g 40MHz */ + if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + } + + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb); +#else + /* ctl sideband string instead of BW for 40MHz */ + if (CHSPEC_IS40(chspec)) { + sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l"; + snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb); + } else { + snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw); + } +#endif /* CHANSPEC_NEW_40MHZ_FORMAT */ + + } else { + /* 80+80 */ + uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT; + uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT; + + /* convert to channel number */ + chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0; + chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0; + + /* Outputs a max of CHANSPEC_STR_LEN chars including '\0' */ + snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2); + } + + return (buf); +} + +static int +read_uint(const char **p, unsigned int *num) +{ + unsigned long val; + char *endp = NULL; + + val = strtoul(*p, &endp, 10); + /* if endp is the initial pointer value, then a number was not read */ + if (endp == *p) + return 0; + + /* advance the buffer pointer to the end of the integer string */ + *p = endp; + /* return the parsed integer */ + *num = (unsigned int)val; + + return 1; +} + +/* given a chanspec string, convert to a chanspec. + * On error return 0 + */ +chanspec_t +wf_chspec_aton(const char *a) +{ + chanspec_t chspec; + uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb; + uint num, ctl_ch; + uint ch1, ch2; + char c, sb_ul = '\0'; + int i; + + bw = 20; + chspec_sb = 0; + chspec_ch = ch1 = ch2 = 0; + + /* parse channel num or band */ + if (!read_uint(&a, &num)) + return 0; + /* if we are looking at a 'g', then the first number was a band */ + c = tolower((int)a[0]); + if (c == 'g') { + a++; /* consume the char */ + + /* band must be "2" or "5" */ + if (num == 2) + chspec_band = WL_CHANSPEC_BAND_2G; + else if (num == 5) + chspec_band = WL_CHANSPEC_BAND_5G; + else + return 0; + + /* read the channel number */ + if (!read_uint(&a, &ctl_ch)) + return 0; + + c = tolower((int)a[0]); + } + else { + /* first number is channel, use default for band */ + ctl_ch = num; + chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + } + + if (c == '\0') { + /* default BW of 20MHz */ + chspec_bw = WL_CHANSPEC_BW_20; + goto done_read; + } + + a ++; /* consume the 'u','l', or '/' */ + + /* check 'u'/'l' */ + if (c == 'u' || c == 'l') { + sb_ul = c; + chspec_bw = WL_CHANSPEC_BW_40; + goto done_read; + } + + /* next letter must be '/' */ + if (c != '/') + return 0; + + /* read bandwidth */ + if (!read_uint(&a, &bw)) + return 0; + + /* convert to chspec value */ + if (bw == 2) { + chspec_bw = WL_CHANSPEC_BW_2P5; + } else if (bw == 5) { + chspec_bw = WL_CHANSPEC_BW_5; + } else if (bw == 10) { + chspec_bw = WL_CHANSPEC_BW_10; + } else if (bw == 20) { + chspec_bw = WL_CHANSPEC_BW_20; + } else if (bw == 40) { + chspec_bw = WL_CHANSPEC_BW_40; + } else if (bw == 80) { + chspec_bw = WL_CHANSPEC_BW_80; + } else if (bw == 160) { + chspec_bw = WL_CHANSPEC_BW_160; + } else { + return 0; + } + + /* So far we have g/ + * Can now be followed by u/l if bw = 40, + * or '+80' if bw = 80, to make '80+80' bw, + * or '.5' if bw = 2.5 to make '2.5' bw . + */ + + c = tolower((int)a[0]); + + /* if we have a 2g/40 channel, we should have a l/u spec now */ + if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) { + if (c == 'u' || c == 'l') { + a ++; /* consume the u/l char */ + sb_ul = c; + goto done_read; + } + } + + /* check for 80+80 */ + if (c == '+') { + /* 80+80 */ + const char plus80[] = "80/"; + + /* must be looking at '+80/' + * check and consume this string. + */ + chspec_bw = WL_CHANSPEC_BW_8080; + + a ++; /* consume the char '+' */ + + /* consume the '80/' string */ + for (i = 0; i < 3; i++) { + if (*a++ != plus80[i]) { + return 0; + } + } + + /* read primary 80MHz channel */ + if (!read_uint(&a, &ch1)) + return 0; + + /* must followed by '-' */ + if (a[0] != '-') + return 0; + a ++; /* consume the char */ + + /* read secondary 80MHz channel */ + if (!read_uint(&a, &ch2)) + return 0; + } else if (c == '.') { + /* 2.5 */ + /* must be looking at '.5' + * check and consume this string. + */ + chspec_bw = WL_CHANSPEC_BW_2P5; + + a ++; /* consume the char '.' */ + + /* consume the '5' string */ + if (*a++ != '5') { + return 0; + } + } + +done_read: + /* skip trailing white space */ + while (a[0] == ' ') { + a ++; + } + + /* must be end of string */ + if (a[0] != '\0') + return 0; + + /* Now have all the chanspec string parts read; + * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2. + * chspec_band and chspec_bw are chanspec values. + * Need to convert ctl_ch, sb_ul, and ch1,ch2 into + * a center channel (or two) and sideband. + */ + + /* if a sb u/l string was given, just use that, + * guaranteed to be bw = 40 by sting parse. + */ + if (sb_ul != '\0') { + if (sb_ul == 'l') { + chspec_ch = UPPER_20_SB(ctl_ch); + chspec_sb = WL_CHANSPEC_CTL_SB_LLL; + } else if (sb_ul == 'u') { + chspec_ch = LOWER_20_SB(ctl_ch); + chspec_sb = WL_CHANSPEC_CTL_SB_LLU; + } + } + /* if the bw is 20, center and sideband are trivial */ + else if (BW_LE20(chspec_bw)) { + chspec_ch = ctl_ch; + chspec_sb = WL_CHANSPEC_CTL_SB_NONE; + } + /* if the bw is 40/80/160, not 80+80, a single method + * can be used to to find the center and sideband + */ + else if (chspec_bw != WL_CHANSPEC_BW_8080) { + /* figure out ctl sideband based on ctl channel and bandwidth */ + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + + if (chspec_bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], ctl_ch, bw); + if (sb >= 0) { + chspec_ch = center_ch[i]; + chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + } + /* Otherwise, bw is 80+80. Figure out channel pair and sb */ + else { + int ch1_id = 0, ch2_id = 0; + int sb; + + /* look up the channel ID for the specified channel numbers */ + ch1_id = channel_80mhz_to_id(ch1); + ch2_id = channel_80mhz_to_id(ch2); + + /* validate channels */ + if (ch1_id < 0 || ch2_id < 0) + return 0; + + /* combine 2 channel IDs in channel field of chspec */ + chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) | + ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT)); + + /* figure out primary 20 MHz sideband */ + + /* is the primary channel contained in the 1st 80MHz channel? */ + sb = channel_to_sb(ch1, ctl_ch, bw); + if (sb < 0) { + /* no match for primary channel 'ctl_ch' in segment0 80MHz channel */ + return 0; + } + + chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT; + } + + chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb); + + if (wf_chspec_malformed(chspec)) + return 0; + + return chspec; +} + +/* + * Verify the chanspec is using a legal set of parameters, i.e. that the + * chanspec specified a band, bw, ctl_sb and channel and that the + * combination could be legal given any set of circumstances. + * RETURNS: TRUE is the chanspec is malformed, false if it looks good. + */ +bool +wf_chspec_malformed(chanspec_t chanspec) +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = CHSPEC_CHANNEL(chanspec); + + /* must be 2G or 5G band */ + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth */ + if (!BW_LE40(chspec_bw)) { + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec)) { + if (chspec_bw == WL_CHANSPEC_BW_8080) { + uint ch1_id, ch2_id; + + /* channel IDs in 80+80 must be in range */ + ch1_id = CHSPEC_CHAN1(chanspec); + ch2_id = CHSPEC_CHAN2(chanspec); + if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS) + return TRUE; + + } else if (BW_LE160(chspec_bw)) { + if (chspec_ch > MAXCHANNEL) { + return TRUE; + } + } else { + /* invalid bandwidth */ + return TRUE; + } + } else { + /* must be 2G or 5G band */ + return TRUE; + } + + /* side band needs to be consistent with bandwidth */ + if (BW_LE20(chspec_bw)) { + if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_80 || + chspec_bw == WL_CHANSPEC_BW_8080) { + if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU) + return TRUE; + } + else if (chspec_bw == WL_CHANSPEC_BW_160) { + ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU); + } + return FALSE; +} + +/* + * Verify the chanspec specifies a valid channel according to 802.11. + * RETURNS: TRUE if the chanspec is a valid 802.11 channel + */ +bool +wf_chspec_valid(chanspec_t chanspec) +{ + uint chspec_bw = CHSPEC_BW(chanspec); + uint chspec_ch = CHSPEC_CHANNEL(chanspec); + + if (wf_chspec_malformed(chanspec)) + return FALSE; + + if (CHSPEC_IS2G(chanspec)) { + /* must be valid bandwidth and channel range */ + if (BW_LE20(chspec_bw)) { + if (chspec_ch >= 1 && chspec_ch <= 14) + return TRUE; + } else if (chspec_bw == WL_CHANSPEC_BW_40) { + if (chspec_ch >= 3 && chspec_ch <= 11) + return TRUE; + } + } else if (CHSPEC_IS5G(chanspec)) { + if (chspec_bw == WL_CHANSPEC_BW_8080) { + uint16 ch1, ch2; + + ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)]; + ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)]; + + /* the two channels must be separated by more than 80MHz by VHT req */ + if ((ch2 > ch1 + CH_80MHZ_APART) || + (ch1 > ch2 + CH_80MHZ_APART)) + return TRUE; + } else { + const uint8 *center_ch; + uint num_ch, i; + + if (BW_LE40(chspec_bw)) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + } else if (chspec_bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + } else { + /* invalid bandwidth */ + return FALSE; + } + + /* check for a valid center channel */ + if (BW_LE20(chspec_bw)) { + /* We don't have an array of legal 20MHz 5G channels, but they are + * each side of the legal 40MHz channels. Check the chanspec + * channel against either side of the 40MHz channels. + */ + for (i = 0; i < num_ch; i ++) { + if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) || + chspec_ch == (uint)UPPER_20_SB(center_ch[i])) + break; /* match found */ + } + + if (i == num_ch) { + /* check for channel 165 which is not the side band + * of 40MHz 5G channel + */ + if (chspec_ch == 165) + i = 0; + + /* check for legacy JP channels on failure */ + if (chspec_ch == 34 || chspec_ch == 38 || + chspec_ch == 42 || chspec_ch == 46) + i = 0; + } + } else { + /* check the chanspec channel to each legal channel */ + for (i = 0; i < num_ch; i ++) { + if (chspec_ch == center_ch[i]) + break; /* match found */ + } + } + + if (i < num_ch) { + /* match found */ + return TRUE; + } + } + } + + return FALSE; +} + +/* + * This function returns the channel number that control traffic is being sent on, for 20MHz + * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ + * sideband depending on the chanspec selected + */ +uint8 +wf_chspec_ctlchan(chanspec_t chspec) +{ + uint center_chan; + uint bw_mhz; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (CHSPEC_BW_LE20(chspec)) { + return CHSPEC_CHANNEL(chspec); + } else { + sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + if (CHSPEC_IS8080(chspec)) { + /* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband + * (LL, LU, UL, LU) for the 80 MHz frequency segment 0. + */ + uint chan_id = CHSPEC_CHAN1(chspec); + + bw_mhz = 80; + + /* convert from channel index to channel number */ + center_chan = wf_5g_80m_chans[chan_id]; + } + else { + bw_mhz = bw_chspec_to_mhz(chspec); + center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT; + } + + return (channel_to_ctl_chan(center_chan, bw_mhz, sb)); + } +} + +/* given a chanspec, return the bandwidth string */ +char * +wf_chspec_to_bw_str(chanspec_t chspec) +{ + return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)]; +} + +/* + * This function returns the chanspec of the control channel of a given chanspec + */ +chanspec_t +wf_chspec_ctlchspec(chanspec_t chspec) +{ + chanspec_t ctl_chspec = chspec; + uint8 ctl_chan; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* Is there a sideband ? */ + if (!CHSPEC_BW_LE20(chspec)) { + ctl_chan = wf_chspec_ctlchan(chspec); + ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20; + ctl_chspec |= CHSPEC_BAND(chspec); + } + return ctl_chspec; +} + +/* return chanspec given control channel and bandwidth + * return 0 on error + */ +uint16 +wf_channel2chspec(uint ctl_ch, uint bw) +{ + uint16 chspec; + const uint8 *center_ch = NULL; + int num_ch = 0; + int sb = -1; + int i = 0; + + chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G); + + chspec |= bw; + + if (bw == WL_CHANSPEC_BW_40) { + center_ch = wf_5g_40m_chans; + num_ch = WF_NUM_5G_40M_CHANS; + bw = 40; + } else if (bw == WL_CHANSPEC_BW_80) { + center_ch = wf_5g_80m_chans; + num_ch = WF_NUM_5G_80M_CHANS; + bw = 80; + } else if (bw == WL_CHANSPEC_BW_160) { + center_ch = wf_5g_160m_chans; + num_ch = WF_NUM_5G_160M_CHANS; + bw = 160; + } else if (BW_LE20(bw)) { + chspec |= ctl_ch; + return chspec; + } else { + return 0; + } + + for (i = 0; i < num_ch; i ++) { + sb = channel_to_sb(center_ch[i], ctl_ch, bw); + if (sb >= 0) { + chspec |= center_ch[i]; + chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT); + break; + } + } + + /* check for no matching sb/center */ + if (sb < 0) { + return 0; + } + + return chspec; +} + +/* + * This function returns the chanspec for the primary 40MHz of an 80MHz channel. + * The control sideband specifies the same 20MHz channel that the 80MHz channel is using + * as the primary 20MHz channel. + */ +extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec) +{ + chanspec_t chspec40 = chspec; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + + /* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */ + if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) { + chspec = wf_chspec_primary80_chspec(chspec); + } + + /* determine primary 40 MHz sub-channel of an 80 MHz chanspec */ + if (CHSPEC_IS80(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_UL) { + /* Primary 40MHz is on lower side */ + center_chan -= CH_20MHZ_APART; + /* sideband bits are the same for LL/LU and L/U */ + } else { + /* Primary 40MHz is on upper side */ + center_chan += CH_20MHZ_APART; + /* sideband bits need to be adjusted by UL offset */ + sb -= WL_CHANSPEC_CTL_SB_UL; + } + + /* Create primary 40MHz chanspec */ + chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 | + sb | center_chan); + } + + return chspec40; +} + +/* + * Return the channel number for a given frequency and base frequency. + * The returned channel number is relative to the given base frequency. + * If the given base frequency is zero, a base frequency of 5 GHz is assumed for + * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. + * + * Frequency is specified in MHz. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band + * and [0, 200] otherwise. + * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel, or if the frequency is not and even + * multiple of 5 MHz from the base frequency to the base plus 1 GHz. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + */ +int +wf_mhz2channel(uint freq, uint start_factor) +{ + int ch = -1; + uint base; + int offset; + + /* take the default channel start frequency */ + if (start_factor == 0) { + if (freq >= 2400 && freq <= 2500) + start_factor = WF_CHAN_FACTOR_2_4_G; + else if (freq >= 5000 && freq <= 6000) + start_factor = WF_CHAN_FACTOR_5_G; + } + + if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G) + return 14; + + base = start_factor / 2; + + /* check that the frequency is in 1GHz range of the base */ + if ((freq < base) || (freq > base + 1000)) + return -1; + + offset = freq - base; + ch = offset / 5; + + /* check that frequency is a 5MHz multiple from the base */ + if (offset != (ch * 5)) + return -1; + + /* restricted channel range check for 2.4G */ + if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13)) + return -1; + + return ch; +} + +/* + * Return the center frequency in MHz of the given channel and base frequency. + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G + * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + */ +int +wf_channel2mhz(uint ch, uint start_factor) +{ + int freq; + + if ((start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 14)) || + (ch > 200)) + freq = -1; + else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14)) + freq = 2484; + else + freq = ch * 5 + start_factor / 2; + + return freq; +} + +static const uint16 sidebands[] = { + WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU, + WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU, + WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU, + WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU +}; + +/* + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +chanspec_t +wf_chspec_80(uint8 center_channel, uint8 primary_channel) +{ + + chanspec_t chanspec = INVCHANSPEC; + chanspec_t chanspec_cur; + uint i; + + for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) { + chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]); + if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) { + chanspec = chanspec_cur; + break; + } + } + /* If the loop ended early, we are good, otherwise we did not + * find a 80MHz chanspec with the given center_channel that had a primary channel + *matching the given primary_channel. + */ + return chanspec; +} + +/* + * Returns the 80+80 chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0 - center channel number of one frequency segment + * chan1 - center channel number of the other frequency segment + * + * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to IEEE802.11ac section 22.3.14 "Channelization". + */ +chanspec_t +wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1) +{ + int sb = 0; + uint16 chanspec = 0; + int chan0_id = 0, chan1_id = 0; + int seg0, seg1; + + chan0_id = channel_80mhz_to_id(chan0); + chan1_id = channel_80mhz_to_id(chan1); + + /* make sure the channel numbers were valid */ + if (chan0_id == -1 || chan1_id == -1) + return INVCHANSPEC; + + /* does the primary channel fit with the 1st 80MHz channel ? */ + sb = channel_to_sb(chan0, primary_20mhz, 80); + if (sb >= 0) { + /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */ + seg0 = chan0_id; + seg1 = chan1_id; + } else { + /* no, so does the primary channel fit with the 2nd 80MHz channel ? */ + sb = channel_to_sb(chan1, primary_20mhz, 80); + if (sb < 0) { + /* no match for ctl_ch to either 80MHz center channel */ + return INVCHANSPEC; + } + /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */ + seg0 = chan1_id; + seg1 = chan0_id; + } + + chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) | + (seg1 << WL_CHANSPEC_CHAN2_SHIFT) | + (sb << WL_CHANSPEC_CTL_SB_SHIFT) | + WL_CHANSPEC_BW_8080 | + WL_CHANSPEC_BAND_5G); + + return chanspec; +} + +/* + * This function returns the 80Mhz channel for the given id. + */ +static uint8 +wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id) +{ + if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS) + return wf_5g_80m_chans[chan_80Mhz_id]; + + return 0; +} + +/* + * Returns the primary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40 Mhz chanspec + */ + +uint8 +wf_chspec_primary80_channel(chanspec_t chanspec) +{ + uint8 primary80_chan; + + if (CHSPEC_IS80(chanspec)) { + primary80_chan = CHSPEC_CHANNEL(chanspec); + } + else if (CHSPEC_IS8080(chanspec)) { + /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */ + primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec)); + } + else if (CHSPEC_IS160(chanspec)) { + uint8 center_chan = CHSPEC_CHANNEL(chanspec); + uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + /* based on the sb value primary 80 channel can be retrieved + * if sb is in range 0 to 3 the lower band is the 80Mhz primary band + */ + if (sb < 4) { + primary80_chan = center_chan - CH_40MHZ_APART; + } + /* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */ + else + { + primary80_chan = center_chan + CH_40MHZ_APART; + } + } + else { + /* for 20 and 40 Mhz */ + primary80_chan = -1; + } + return primary80_chan; +} + +/* + * Returns the secondary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40/80 Mhz chanspec + */ +uint8 +wf_chspec_secondary80_channel(chanspec_t chanspec) +{ + uint8 secondary80_chan; + + if (CHSPEC_IS8080(chanspec)) { + secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec)); + } + else if (CHSPEC_IS160(chanspec)) { + uint8 center_chan = CHSPEC_CHANNEL(chanspec); + uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT; + + /* based on the sb value secondary 80 channel can be retrieved + * if sb is in range 0 to 3 upper band is the secondary 80Mhz band + */ + if (sb < 4) { + secondary80_chan = center_chan + CH_40MHZ_APART; + } + /* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */ + else + { + secondary80_chan = center_chan - CH_40MHZ_APART; + } + } + else { + /* for 20, 40, and 80 Mhz */ + secondary80_chan = -1; + } + return secondary80_chan; +} + +/* + * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel. + * + * chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived + * + * returns the input chanspec in case the provided chanspec is an 80 MHz chanspec + * returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec + */ +chanspec_t +wf_chspec_primary80_chspec(chanspec_t chspec) +{ + chanspec_t chspec80; + uint center_chan; + uint sb; + + ASSERT(!wf_chspec_malformed(chspec)); + if (CHSPEC_IS80(chspec)) { + chspec80 = chspec; + } + else if (CHSPEC_IS8080(chspec)) { + + /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */ + center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec)); + + sb = CHSPEC_CTL_SB(chspec); + + /* Create primary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else if (CHSPEC_IS160(chspec)) { + center_chan = CHSPEC_CHANNEL(chspec); + sb = CHSPEC_CTL_SB(chspec); + + if (sb < WL_CHANSPEC_CTL_SB_ULL) { + /* Primary 80MHz is on lower side */ + center_chan -= CH_40MHZ_APART; + } + else { + /* Primary 80MHz is on upper side */ + center_chan += CH_40MHZ_APART; + sb -= WL_CHANSPEC_CTL_SB_ULL; + } + /* Create primary 80MHz chanspec */ + chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan); + } + else { + chspec80 = INVCHANSPEC; + } + + return chspec80; +} + +#ifdef WL11AC_80P80 +uint8 +wf_chspec_channel(chanspec_t chspec) +{ + if (CHSPEC_IS8080(chspec)) { + return wf_chspec_primary80_channel(chspec); + } + else { + return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)); + } +} +#endif /* WL11AC_80P80 */ diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_channels.h b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h new file mode 100644 index 000000000000..186c0e18ee92 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi_channels.h @@ -0,0 +1,631 @@ +/* + * Misc utility routines for WL and Apps + * This header file housing the define and function prototype use by + * both the wl driver, tools & Apps. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_channels.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _bcmwifi_channels_h_ +#define _bcmwifi_channels_h_ + + +/* A chanspec holds the channel number, band, bandwidth and control sideband */ +typedef uint16 chanspec_t; + +/* channel defines */ +#define CH_UPPER_SB 0x01 +#define CH_LOWER_SB 0x02 +#define CH_EWA_VALID 0x04 +#define CH_80MHZ_APART 16 +#define CH_40MHZ_APART 8 +#define CH_20MHZ_APART 4 +#define CH_10MHZ_APART 2 +#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ +#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ + +/* maximum # channels the s/w supports */ +#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above, + * this is that + 1 rounded up to a multiple of NBBY (8). + * DO NOT MAKE it > 255: channels are uint8's all over + */ +#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */ + +/* channel bitvec */ +typedef struct { + uint8 vec[MAXCHANNEL/8]; /* bitvec of channels */ +} chanvec_t; + +/* make sure channel num is within valid range */ +#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM) + +#define CHSPEC_CTLOVLP(sp1, sp2, sep) \ + (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep)) + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +#define WL_CHANSPEC_CHAN_MASK 0x00ff +#define WL_CHANSPEC_CHAN_SHIFT 0 +#define WL_CHANSPEC_CHAN1_MASK 0x000f +#define WL_CHANSPEC_CHAN1_SHIFT 0 +#define WL_CHANSPEC_CHAN2_MASK 0x00f0 +#define WL_CHANSPEC_CHAN2_SHIFT 4 + +#define WL_CHANSPEC_CTL_SB_MASK 0x0700 +#define WL_CHANSPEC_CTL_SB_SHIFT 8 +#define WL_CHANSPEC_CTL_SB_LLL 0x0000 +#define WL_CHANSPEC_CTL_SB_LLU 0x0100 +#define WL_CHANSPEC_CTL_SB_LUL 0x0200 +#define WL_CHANSPEC_CTL_SB_LUU 0x0300 +#define WL_CHANSPEC_CTL_SB_ULL 0x0400 +#define WL_CHANSPEC_CTL_SB_ULU 0x0500 +#define WL_CHANSPEC_CTL_SB_UUL 0x0600 +#define WL_CHANSPEC_CTL_SB_UUU 0x0700 +#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL +#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU +#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL +#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU +#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL + +#define WL_CHANSPEC_BW_MASK 0x3800 +#define WL_CHANSPEC_BW_SHIFT 11 +#define WL_CHANSPEC_BW_5 0x0000 +#define WL_CHANSPEC_BW_10 0x0800 +#define WL_CHANSPEC_BW_20 0x1000 +#define WL_CHANSPEC_BW_40 0x1800 +#define WL_CHANSPEC_BW_80 0x2000 +#define WL_CHANSPEC_BW_160 0x2800 +#define WL_CHANSPEC_BW_8080 0x3000 +#define WL_CHANSPEC_BW_2P5 0x3800 + +#define WL_CHANSPEC_BAND_MASK 0xc000 +#define WL_CHANSPEC_BAND_SHIFT 14 +#define WL_CHANSPEC_BAND_2G 0x0000 +#define WL_CHANSPEC_BAND_3G 0x4000 +#define WL_CHANSPEC_BAND_4G 0x8000 +#define WL_CHANSPEC_BAND_5G 0xc000 +#define INVCHANSPEC 255 +#define MAX_CHANSPEC 0xFFFF + +/* channel defines */ +#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ + ((channel) - CH_10MHZ_APART) : 0) +#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ + ((channel) + CH_10MHZ_APART) : 0) + +#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0) +#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \ + ((channel) + 3 * CH_10MHZ_APART) : 0) +#define LU_20_SB(channel) LOWER_20_SB(channel) +#define UL_20_SB(channel) UPPER_20_SB(channel) + +#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART) +#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART) +#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) +#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH2P5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_2P5 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_5 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define CH10MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_10 | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) +#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ + ((channel) + CH_20MHZ_APART) : 0) +#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ + ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ + WL_CHANSPEC_BAND_5G)) +#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G) +#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ + ((channel) | (ctlsb) | \ + WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G) +#define CHBW_CHSPEC(bw, channel) (chanspec_t)((chanspec_t)(channel) | (bw) | \ + (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) + +/* simple MACROs to get different fields of chanspec */ +#ifdef WL11AC_80P80 +#define CHSPEC_CHANNEL(chspec) wf_chspec_channel(chspec) +#else +#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) +#endif +#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT +#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT +#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) +#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) + +#ifdef WL11N_20MHZONLY + +#define CHSPEC_IS2P5(chspec) 0 +#define CHSPEC_IS5(chspec) 0 +#define CHSPEC_IS10(chspec) 0 +#define CHSPEC_IS20(chspec) 1 +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) 0 +#endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) 0 +#endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) 0 +#endif +#ifndef CHSPEC_IS8080 +#define CHSPEC_IS8080(chspec) 0 +#endif +#define BW_LE20(bw) TRUE +#define CHSPEC_ISLE20(chspec) TRUE +#else /* !WL11N_20MHZONLY */ + +#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5) +#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5) +#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) +#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) +#ifndef CHSPEC_IS40 +#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) +#endif +#ifndef CHSPEC_IS80 +#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80) +#endif +#ifndef CHSPEC_IS160 +#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160) +#endif +#ifndef CHSPEC_IS8080 +#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080) +#endif + +#ifdef WL11ULB +#define BW_LT20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \ + ((bw) == WL_CHANSPEC_BW_5) || \ + ((bw) == WL_CHANSPEC_BW_10)) +#define CHSPEC_BW_LT20(chspec) (BW_LT20(CHSPEC_BW(chspec))) +/* This MACRO is strictly to avoid abandons in existing code with ULB feature and is in no way + * optimial to use. Should be replaced with CHSPEC_BW_LE() instead + */ +#define BW_LE20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \ + ((bw) == WL_CHANSPEC_BW_5) || \ + ((bw) == WL_CHANSPEC_BW_10) || \ + ((bw) == WL_CHANSPEC_BW_20)) +#define CHSPEC_ISLE20(chspec) (BW_LE20(CHSPEC_BW(chspec))) + +#else /* WL11ULB */ +#define BW_LE20(bw) ((bw) == WL_CHANSPEC_BW_20) +#define CHSPEC_ISLE20(chspec) (CHSPEC_IS20(chspec)) +#endif /* WL11ULB */ +#endif /* !WL11N_20MHZONLY */ + +#define BW_LE40(bw) (BW_LE20(bw) || ((bw) == WL_CHANSPEC_BW_40)) +#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80)) +#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160)) +#define CHSPEC_BW_LE20(chspec) (BW_LE20(CHSPEC_BW(chspec))) +#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) +#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) +#define CHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) +#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G) + +/** + * Number of chars needed for wf_chspec_ntoa() destination character buffer. + */ +#define CHANSPEC_STR_LEN 20 + + +#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\ + CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080) + +/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made +* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80, +* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080). +* +* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide. +* If both chspec bandwidth and bw is not 160 wide, then the comparison is made. +*/ +#ifdef WL11ULB +#define CHSPEC_BW_GE(chspec, bw) \ + (((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) && \ + (!(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5))) +#else /* WL11ULB */ +#define CHSPEC_BW_GE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) >= (bw))) +#endif /* WL11ULB */ + +#ifdef WL11ULB +#define CHSPEC_BW_LE(chspec, bw) \ + (((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) || \ + (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5)) +#else /* WL11ULB */ +#define CHSPEC_BW_LE(chspec, bw) \ + ((CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\ + (CHSPEC_BW(chspec) <= (bw))) +#endif /* WL11ULB */ + +#ifdef WL11ULB +#define CHSPEC_BW_GT(chspec, bw) \ + ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) && \ + (CHSPEC_BW(chspec) != WL_CHANSPEC_BW_2P5)) +#else /* WL11ULB */ +#define CHSPEC_BW_GT(chspec, bw) \ + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) > (bw))) +#endif /* WL11ULB */ + +#ifdef WL11ULB +#define CHSPEC_BW_LT(chspec, bw) \ + ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) || \ + ((CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5))) +#else /* WL11ULB */ +#define CHSPEC_BW_LT(chspec, bw) \ + (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\ + ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\ + (CHSPEC_BW(chspec) < (bw))) +#endif /* WL11ULB */ + +/* Legacy Chanspec defines + * These are the defines for the previous format of the chanspec_t + */ +#define WL_LCHANSPEC_CHAN_MASK 0x00ff +#define WL_LCHANSPEC_CHAN_SHIFT 0 + +#define WL_LCHANSPEC_CTL_SB_MASK 0x0300 +#define WL_LCHANSPEC_CTL_SB_SHIFT 8 +#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_LCHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_LCHANSPEC_BW_MASK 0x0C00 +#define WL_LCHANSPEC_BW_SHIFT 10 +#define WL_LCHANSPEC_BW_10 0x0400 +#define WL_LCHANSPEC_BW_20 0x0800 +#define WL_LCHANSPEC_BW_40 0x0C00 + +#define WL_LCHANSPEC_BAND_MASK 0xf000 +#define WL_LCHANSPEC_BAND_SHIFT 12 +#define WL_LCHANSPEC_BAND_5G 0x1000 +#define WL_LCHANSPEC_BAND_2G 0x2000 + +#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK)) +#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK) +#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK) +#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK) +#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10) +#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20) +#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40) +#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G) +#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G) + +#define LCHSPEC_SB_UPPER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) +#define LCHSPEC_SB_LOWER(chspec) \ + ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \ + (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)) + +#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band))) + +#define CH20MHZ_LCHSPEC(channel) \ + (chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \ + WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ + WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G)) + +/* + * WF_CHAN_FACTOR_* constants are used to calculate channel frequency + * given a channel number. + * chan_freq = chan_factor * 500Mhz + chan_number * 5 + */ + +/** + * Channel Factor for the starting frequence of 2.4 GHz channels. + * The value corresponds to 2407 MHz. + */ +#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */ + +/** + * Channel Factor for the starting frequence of 5 GHz channels. + * The value corresponds to 5000 MHz. + */ +#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */ + +/** + * Channel Factor for the starting frequence of 4.9 GHz channels. + * The value corresponds to 4000 MHz. + */ +#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */ + +#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */ + +/** + * No of sub-band vlaue of the specified Mhz chanspec + */ +#define WF_NUM_SIDEBANDS_40MHZ 2 +#define WF_NUM_SIDEBANDS_80MHZ 4 +#define WF_NUM_SIDEBANDS_8080MHZ 4 +#define WF_NUM_SIDEBANDS_160MHZ 8 + +/** + * Convert chanspec to ascii string + * + * @param chspec chanspec format + * @param buf ascii string of chanspec + * + * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * Original chanspec in case of error + * + * @see CHANSPEC_STR_LEN + */ +extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf); + +/** + * Convert chanspec to ascii string + * + * @param chspec chanspec format + * @param buf ascii string of chanspec + * + * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes + * NULL in case of error + * + * @see CHANSPEC_STR_LEN + */ +extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf); + +/** + * Convert ascii string to chanspec + * + * @param a pointer to input string + * + * @return >= 0 if successful or 0 otherwise + */ +extern chanspec_t wf_chspec_aton(const char *a); + +/** + * Verify the chanspec fields are valid. + * + * Verify the chanspec is using a legal set field values, i.e. that the chanspec + * specified a band, bw, ctl_sb and channel and that the combination could be + * legal given some set of circumstances. + * + * @param chanspec input chanspec to verify + * + * @return TRUE if the chanspec is malformed, FALSE if it looks good. + */ +extern bool wf_chspec_malformed(chanspec_t chanspec); + +/** + * Verify the chanspec specifies a valid channel according to 802.11. + * + * @param chanspec input chanspec to verify + * + * @return TRUE if the chanspec is a valid 802.11 channel + */ +extern bool wf_chspec_valid(chanspec_t chanspec); + +/** + * Return the primary (control) channel. + * + * This function returns the channel number of the primary 20MHz channel. For + * 20MHz channels this is just the channel number. For 40MHz or wider channels + * it is the primary 20MHz channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the channel number of the primary 20MHz channel + */ +extern uint8 wf_chspec_ctlchan(chanspec_t chspec); + +/* + * Return the bandwidth string. + * + * This function returns the bandwidth string for the passed chanspec. + * + * @param chspec input chanspec + * + * @return Returns the bandwidth string + */ +extern char * wf_chspec_to_bw_str(chanspec_t chspec); + +/** + * Return the primary (control) chanspec. + * + * This function returns the chanspec of the primary 20MHz channel. For 20MHz + * channels this is just the chanspec. For 40MHz or wider channels it is the + * chanspec of the primary 20MHZ channel specified by the chanspec. + * + * @param chspec input chanspec + * + * @return Returns the chanspec of the primary 20MHz channel + */ +extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec); + +/** + * Return a channel number corresponding to a frequency. + * + * This function returns the chanspec for the primary 40MHz of an 80MHz channel. + * The control sideband specifies the same 20MHz channel that the 80MHz channel is using + * as the primary 20MHz channel. + */ +extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec); + +/* + * Return the channel number for a given frequency and base frequency. + * The returned channel number is relative to the given base frequency. + * If the given base frequency is zero, a base frequency of 5 GHz is assumed for + * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. + * + * Frequency is specified in MHz. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * + * The returned channel will be in the range [1, 14] in the 2.4 GHz band + * and [0, 200] otherwise. + * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the + * frequency is not a 2.4 GHz channel, or if the frequency is not and even + * multiple of 5 MHz from the base frequency to the base plus 1 GHz. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + * + * @param freq frequency in MHz + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a channel number + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + */ +extern int wf_mhz2channel(uint freq, uint start_factor); + +/** + * Return the center frequency in MHz of the given channel and base frequency. + * + * Return the center frequency in MHz of the given channel and base frequency. + * The channel number is interpreted relative to the given base frequency. + * + * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. + * The base frequency is specified as (start_factor * 500 kHz). + * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for + * 2.4 GHz and 5 GHz bands. + * The channel range of [1, 14] is only checked for a start_factor of + * WF_CHAN_FACTOR_2_4_G (4814). + * Odd start_factors produce channels on .5 MHz boundaries, in which case + * the answer is rounded down to an integral MHz. + * -1 is returned for an out of range channel. + * + * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 + * + * @param channel input channel number + * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz + * + * @return Returns a frequency in MHz + * + * @see WF_CHAN_FACTOR_2_4_G + * @see WF_CHAN_FACTOR_5_G + */ +extern int wf_channel2mhz(uint channel, uint start_factor); + +/** + * Returns the chanspec 80Mhz channel corresponding to the following input + * parameters + * + * primary_channel - primary 20Mhz channel + * center_channel - center frequecny of the 80Mhz channel + * + * The center_channel can be one of {42, 58, 106, 122, 138, 155} + * + * returns INVCHANSPEC in case of error + */ +extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel); + +/** + * Convert ctl chan and bw to chanspec + * + * @param ctl_ch channel + * @param bw bandwidth + * + * @return > 0 if successful or 0 otherwise + * + */ +extern uint16 wf_channel2chspec(uint ctl_ch, uint bw); + +extern uint wf_channel2freq(uint channel); +extern uint wf_freq2channel(uint freq); + +/* + * Returns the 80+80 MHz chanspec corresponding to the following input parameters + * + * primary_20mhz - Primary 20 MHz channel + * chan0_80MHz - center channel number of one frequency segment + * chan1_80MHz - center channel number of the other frequency segment + * + * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}. + * The primary channel must be contained in one of the 80MHz channels. This routine + * will determine which frequency segment is the primary 80 MHz segment. + * + * Returns INVCHANSPEC in case of error. + * + * Refer to IEEE802.11ac section 22.3.14 "Channelization". + */ +extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz, + uint8 chan0_80Mhz, uint8 chan1_80Mhz); + +/* + * Returns the primary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40 Mhz chanspec + */ +extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec); + +/* + * Returns the secondary 80 Mhz channel for the provided chanspec + * + * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved + * + * returns -1 in case the provided channel is 20/40 Mhz chanspec + */ +extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec); + +/* + * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel. + */ +extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec); + +#ifdef WL11AC_80P80 +/* + * This function returns the centre chanel for the given chanspec. + * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel + */ +extern uint8 wf_chspec_channel(chanspec_t chspec); +#endif +#endif /* _bcmwifi_channels_h_ */ diff --git a/drivers/net/wireless/bcmdhd/bcmwifi_rates.h b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h new file mode 100644 index 000000000000..1329e9bc80da --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmwifi_rates.h @@ -0,0 +1,793 @@ +/* + * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmwifi_rates.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _bcmwifi_rates_h_ +#define _bcmwifi_rates_h_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +#define WL_RATESET_SZ_DSSS 4 +#define WL_RATESET_SZ_OFDM 8 +#define WL_RATESET_SZ_VHT_MCS 10 +#define WL_RATESET_SZ_VHT_MCS_P 12 + +#if defined(WLPROPRIETARY_11N_RATES) +#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS +#else +#define WL_RATESET_SZ_HT_MCS 8 +#endif + +#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */ + +#define WL_TX_CHAINS_MAX 4 + +#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */ + +/* Transmit channel bandwidths */ +typedef enum wl_tx_bw { + WL_TX_BW_20, + WL_TX_BW_40, + WL_TX_BW_80, + WL_TX_BW_20IN40, + WL_TX_BW_20IN80, + WL_TX_BW_40IN80, + WL_TX_BW_160, + WL_TX_BW_20IN160, + WL_TX_BW_40IN160, + WL_TX_BW_80IN160, + WL_TX_BW_ALL, + WL_TX_BW_8080, + WL_TX_BW_8080CHAN2, + WL_TX_BW_20IN8080, + WL_TX_BW_40IN8080, + WL_TX_BW_80IN8080, + WL_TX_BW_2P5, + WL_TX_BW_5, + WL_TX_BW_10 +} wl_tx_bw_t; + + +/* + * Transmit modes. + * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed + */ +typedef enum wl_tx_mode { + WL_TX_MODE_NONE, + WL_TX_MODE_STBC, + WL_TX_MODE_CDD, + WL_TX_MODE_TXBF, + WL_NUM_TX_MODES +} wl_tx_mode_t; + + +/* Number of transmit chains */ +typedef enum wl_tx_chains { + WL_TX_CHAINS_1 = 1, + WL_TX_CHAINS_2, + WL_TX_CHAINS_3, + WL_TX_CHAINS_4 +} wl_tx_chains_t; + + +/* Number of transmit streams */ +typedef enum wl_tx_nss { + WL_TX_NSS_1 = 1, + WL_TX_NSS_2, + WL_TX_NSS_3, + WL_TX_NSS_4 +} wl_tx_nss_t; + + +/* This enum maps each rate to a CLM index */ + +typedef enum clm_rates { + /************ + * 1 chain * + ************ + */ + + /* 1 Stream */ + WL_RATE_1X1_DSSS_1 = 0, + WL_RATE_1X1_DSSS_2 = 1, + WL_RATE_1X1_DSSS_5_5 = 2, + WL_RATE_1X1_DSSS_11 = 3, + + WL_RATE_1X1_OFDM_6 = 4, + WL_RATE_1X1_OFDM_9 = 5, + WL_RATE_1X1_OFDM_12 = 6, + WL_RATE_1X1_OFDM_18 = 7, + WL_RATE_1X1_OFDM_24 = 8, + WL_RATE_1X1_OFDM_36 = 9, + WL_RATE_1X1_OFDM_48 = 10, + WL_RATE_1X1_OFDM_54 = 11, + + WL_RATE_1X1_MCS0 = 12, + WL_RATE_1X1_MCS1 = 13, + WL_RATE_1X1_MCS2 = 14, + WL_RATE_1X1_MCS3 = 15, + WL_RATE_1X1_MCS4 = 16, + WL_RATE_1X1_MCS5 = 17, + WL_RATE_1X1_MCS6 = 18, + WL_RATE_1X1_MCS7 = 19, + WL_RATE_P_1X1_MCS87 = 20, + WL_RATE_P_1X1_MCS88 = 21, + + WL_RATE_1X1_VHT0SS1 = 12, + WL_RATE_1X1_VHT1SS1 = 13, + WL_RATE_1X1_VHT2SS1 = 14, + WL_RATE_1X1_VHT3SS1 = 15, + WL_RATE_1X1_VHT4SS1 = 16, + WL_RATE_1X1_VHT5SS1 = 17, + WL_RATE_1X1_VHT6SS1 = 18, + WL_RATE_1X1_VHT7SS1 = 19, + WL_RATE_1X1_VHT8SS1 = 20, + WL_RATE_1X1_VHT9SS1 = 21, + WL_RATE_P_1X1_VHT10SS1 = 22, + WL_RATE_P_1X1_VHT11SS1 = 23, + + + /************ + * 2 chains * + ************ + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_DSSS_1 = 24, + WL_RATE_1X2_DSSS_2 = 25, + WL_RATE_1X2_DSSS_5_5 = 26, + WL_RATE_1X2_DSSS_11 = 27, + + WL_RATE_1X2_CDD_OFDM_6 = 28, + WL_RATE_1X2_CDD_OFDM_9 = 29, + WL_RATE_1X2_CDD_OFDM_12 = 30, + WL_RATE_1X2_CDD_OFDM_18 = 31, + WL_RATE_1X2_CDD_OFDM_24 = 32, + WL_RATE_1X2_CDD_OFDM_36 = 33, + WL_RATE_1X2_CDD_OFDM_48 = 34, + WL_RATE_1X2_CDD_OFDM_54 = 35, + + WL_RATE_1X2_CDD_MCS0 = 36, + WL_RATE_1X2_CDD_MCS1 = 37, + WL_RATE_1X2_CDD_MCS2 = 38, + WL_RATE_1X2_CDD_MCS3 = 39, + WL_RATE_1X2_CDD_MCS4 = 40, + WL_RATE_1X2_CDD_MCS5 = 41, + WL_RATE_1X2_CDD_MCS6 = 42, + WL_RATE_1X2_CDD_MCS7 = 43, + WL_RATE_P_1X2_CDD_MCS87 = 44, + WL_RATE_P_1X2_CDD_MCS88 = 45, + + WL_RATE_1X2_VHT0SS1 = 36, + WL_RATE_1X2_VHT1SS1 = 37, + WL_RATE_1X2_VHT2SS1 = 38, + WL_RATE_1X2_VHT3SS1 = 39, + WL_RATE_1X2_VHT4SS1 = 40, + WL_RATE_1X2_VHT5SS1 = 41, + WL_RATE_1X2_VHT6SS1 = 42, + WL_RATE_1X2_VHT7SS1 = 43, + WL_RATE_1X2_VHT8SS1 = 44, + WL_RATE_1X2_VHT9SS1 = 45, + WL_RATE_P_1X2_VHT10SS1 = 46, + WL_RATE_P_1X2_VHT11SS1 = 47, + + /* 2 Streams */ + WL_RATE_2X2_STBC_MCS0 = 48, + WL_RATE_2X2_STBC_MCS1 = 49, + WL_RATE_2X2_STBC_MCS2 = 50, + WL_RATE_2X2_STBC_MCS3 = 51, + WL_RATE_2X2_STBC_MCS4 = 52, + WL_RATE_2X2_STBC_MCS5 = 53, + WL_RATE_2X2_STBC_MCS6 = 54, + WL_RATE_2X2_STBC_MCS7 = 55, + WL_RATE_P_2X2_STBC_MCS87 = 56, + WL_RATE_P_2X2_STBC_MCS88 = 57, + + WL_RATE_2X2_STBC_VHT0SS1 = 48, + WL_RATE_2X2_STBC_VHT1SS1 = 49, + WL_RATE_2X2_STBC_VHT2SS1 = 50, + WL_RATE_2X2_STBC_VHT3SS1 = 51, + WL_RATE_2X2_STBC_VHT4SS1 = 52, + WL_RATE_2X2_STBC_VHT5SS1 = 53, + WL_RATE_2X2_STBC_VHT6SS1 = 54, + WL_RATE_2X2_STBC_VHT7SS1 = 55, + WL_RATE_2X2_STBC_VHT8SS1 = 56, + WL_RATE_2X2_STBC_VHT9SS1 = 57, + WL_RATE_P_2X2_STBC_VHT10SS1 = 58, + WL_RATE_P_2X2_STBC_VHT11SS1 = 59, + + WL_RATE_2X2_SDM_MCS8 = 60, + WL_RATE_2X2_SDM_MCS9 = 61, + WL_RATE_2X2_SDM_MCS10 = 62, + WL_RATE_2X2_SDM_MCS11 = 63, + WL_RATE_2X2_SDM_MCS12 = 64, + WL_RATE_2X2_SDM_MCS13 = 65, + WL_RATE_2X2_SDM_MCS14 = 66, + WL_RATE_2X2_SDM_MCS15 = 67, + WL_RATE_P_2X2_SDM_MCS99 = 68, + WL_RATE_P_2X2_SDM_MCS100 = 69, + + WL_RATE_2X2_VHT0SS2 = 60, + WL_RATE_2X2_VHT1SS2 = 61, + WL_RATE_2X2_VHT2SS2 = 62, + WL_RATE_2X2_VHT3SS2 = 63, + WL_RATE_2X2_VHT4SS2 = 64, + WL_RATE_2X2_VHT5SS2 = 65, + WL_RATE_2X2_VHT6SS2 = 66, + WL_RATE_2X2_VHT7SS2 = 67, + WL_RATE_2X2_VHT8SS2 = 68, + WL_RATE_2X2_VHT9SS2 = 69, + WL_RATE_P_2X2_VHT10SS2 = 70, + WL_RATE_P_2X2_VHT11SS2 = 71, + + /**************************** + * TX Beamforming, 2 chains * + **************************** + */ + + /* 1 Stream expanded + 1 */ + WL_RATE_1X2_TXBF_OFDM_6 = 72, + WL_RATE_1X2_TXBF_OFDM_9 = 73, + WL_RATE_1X2_TXBF_OFDM_12 = 74, + WL_RATE_1X2_TXBF_OFDM_18 = 75, + WL_RATE_1X2_TXBF_OFDM_24 = 76, + WL_RATE_1X2_TXBF_OFDM_36 = 77, + WL_RATE_1X2_TXBF_OFDM_48 = 78, + WL_RATE_1X2_TXBF_OFDM_54 = 79, + + WL_RATE_1X2_TXBF_MCS0 = 80, + WL_RATE_1X2_TXBF_MCS1 = 81, + WL_RATE_1X2_TXBF_MCS2 = 82, + WL_RATE_1X2_TXBF_MCS3 = 83, + WL_RATE_1X2_TXBF_MCS4 = 84, + WL_RATE_1X2_TXBF_MCS5 = 85, + WL_RATE_1X2_TXBF_MCS6 = 86, + WL_RATE_1X2_TXBF_MCS7 = 87, + WL_RATE_P_1X2_TXBF_MCS87 = 88, + WL_RATE_P_1X2_TXBF_MCS88 = 89, + + WL_RATE_1X2_TXBF_VHT0SS1 = 80, + WL_RATE_1X2_TXBF_VHT1SS1 = 81, + WL_RATE_1X2_TXBF_VHT2SS1 = 82, + WL_RATE_1X2_TXBF_VHT3SS1 = 83, + WL_RATE_1X2_TXBF_VHT4SS1 = 84, + WL_RATE_1X2_TXBF_VHT5SS1 = 85, + WL_RATE_1X2_TXBF_VHT6SS1 = 86, + WL_RATE_1X2_TXBF_VHT7SS1 = 87, + WL_RATE_1X2_TXBF_VHT8SS1 = 88, + WL_RATE_1X2_TXBF_VHT9SS1 = 89, + WL_RATE_P_1X2_TXBF_VHT10SS1 = 90, + WL_RATE_P_1X2_TXBF_VHT11SS1 = 91, + + /* 2 Streams */ + WL_RATE_2X2_TXBF_SDM_MCS8 = 92, + WL_RATE_2X2_TXBF_SDM_MCS9 = 93, + WL_RATE_2X2_TXBF_SDM_MCS10 = 94, + WL_RATE_2X2_TXBF_SDM_MCS11 = 95, + WL_RATE_2X2_TXBF_SDM_MCS12 = 96, + WL_RATE_2X2_TXBF_SDM_MCS13 = 97, + WL_RATE_2X2_TXBF_SDM_MCS14 = 98, + WL_RATE_2X2_TXBF_SDM_MCS15 = 99, + WL_RATE_P_2X2_TXBF_SDM_MCS99 = 100, + WL_RATE_P_2X2_TXBF_SDM_MCS100 = 101, + + WL_RATE_2X2_TXBF_VHT0SS2 = 92, + WL_RATE_2X2_TXBF_VHT1SS2 = 93, + WL_RATE_2X2_TXBF_VHT2SS2 = 94, + WL_RATE_2X2_TXBF_VHT3SS2 = 95, + WL_RATE_2X2_TXBF_VHT4SS2 = 96, + WL_RATE_2X2_TXBF_VHT5SS2 = 97, + WL_RATE_2X2_TXBF_VHT6SS2 = 98, + WL_RATE_2X2_TXBF_VHT7SS2 = 99, + WL_RATE_2X2_TXBF_VHT8SS2 = 100, + WL_RATE_2X2_TXBF_VHT9SS2 = 101, + WL_RATE_P_2X2_TXBF_VHT10SS2 = 102, + WL_RATE_P_2X2_TXBF_VHT11SS2 = 103, + + + /************ + * 3 chains * + ************ + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_DSSS_1 = 104, + WL_RATE_1X3_DSSS_2 = 105, + WL_RATE_1X3_DSSS_5_5 = 106, + WL_RATE_1X3_DSSS_11 = 107, + + WL_RATE_1X3_CDD_OFDM_6 = 108, + WL_RATE_1X3_CDD_OFDM_9 = 109, + WL_RATE_1X3_CDD_OFDM_12 = 110, + WL_RATE_1X3_CDD_OFDM_18 = 111, + WL_RATE_1X3_CDD_OFDM_24 = 112, + WL_RATE_1X3_CDD_OFDM_36 = 113, + WL_RATE_1X3_CDD_OFDM_48 = 114, + WL_RATE_1X3_CDD_OFDM_54 = 115, + + WL_RATE_1X3_CDD_MCS0 = 116, + WL_RATE_1X3_CDD_MCS1 = 117, + WL_RATE_1X3_CDD_MCS2 = 118, + WL_RATE_1X3_CDD_MCS3 = 119, + WL_RATE_1X3_CDD_MCS4 = 120, + WL_RATE_1X3_CDD_MCS5 = 121, + WL_RATE_1X3_CDD_MCS6 = 122, + WL_RATE_1X3_CDD_MCS7 = 123, + WL_RATE_P_1X3_CDD_MCS87 = 124, + WL_RATE_P_1X3_CDD_MCS88 = 125, + + WL_RATE_1X3_VHT0SS1 = 116, + WL_RATE_1X3_VHT1SS1 = 117, + WL_RATE_1X3_VHT2SS1 = 118, + WL_RATE_1X3_VHT3SS1 = 119, + WL_RATE_1X3_VHT4SS1 = 120, + WL_RATE_1X3_VHT5SS1 = 121, + WL_RATE_1X3_VHT6SS1 = 122, + WL_RATE_1X3_VHT7SS1 = 123, + WL_RATE_1X3_VHT8SS1 = 124, + WL_RATE_1X3_VHT9SS1 = 125, + WL_RATE_P_1X3_VHT10SS1 = 126, + WL_RATE_P_1X3_VHT11SS1 = 127, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_STBC_MCS0 = 128, + WL_RATE_2X3_STBC_MCS1 = 129, + WL_RATE_2X3_STBC_MCS2 = 130, + WL_RATE_2X3_STBC_MCS3 = 131, + WL_RATE_2X3_STBC_MCS4 = 132, + WL_RATE_2X3_STBC_MCS5 = 133, + WL_RATE_2X3_STBC_MCS6 = 134, + WL_RATE_2X3_STBC_MCS7 = 135, + WL_RATE_P_2X3_STBC_MCS87 = 136, + WL_RATE_P_2X3_STBC_MCS88 = 137, + + WL_RATE_2X3_STBC_VHT0SS1 = 128, + WL_RATE_2X3_STBC_VHT1SS1 = 129, + WL_RATE_2X3_STBC_VHT2SS1 = 130, + WL_RATE_2X3_STBC_VHT3SS1 = 131, + WL_RATE_2X3_STBC_VHT4SS1 = 132, + WL_RATE_2X3_STBC_VHT5SS1 = 133, + WL_RATE_2X3_STBC_VHT6SS1 = 134, + WL_RATE_2X3_STBC_VHT7SS1 = 135, + WL_RATE_2X3_STBC_VHT8SS1 = 136, + WL_RATE_2X3_STBC_VHT9SS1 = 137, + WL_RATE_P_2X3_STBC_VHT10SS1 = 138, + WL_RATE_P_2X3_STBC_VHT11SS1 = 139, + + WL_RATE_2X3_SDM_MCS8 = 140, + WL_RATE_2X3_SDM_MCS9 = 141, + WL_RATE_2X3_SDM_MCS10 = 142, + WL_RATE_2X3_SDM_MCS11 = 143, + WL_RATE_2X3_SDM_MCS12 = 144, + WL_RATE_2X3_SDM_MCS13 = 145, + WL_RATE_2X3_SDM_MCS14 = 146, + WL_RATE_2X3_SDM_MCS15 = 147, + WL_RATE_P_2X3_SDM_MCS99 = 148, + WL_RATE_P_2X3_SDM_MCS100 = 149, + + WL_RATE_2X3_VHT0SS2 = 140, + WL_RATE_2X3_VHT1SS2 = 141, + WL_RATE_2X3_VHT2SS2 = 142, + WL_RATE_2X3_VHT3SS2 = 143, + WL_RATE_2X3_VHT4SS2 = 144, + WL_RATE_2X3_VHT5SS2 = 145, + WL_RATE_2X3_VHT6SS2 = 146, + WL_RATE_2X3_VHT7SS2 = 147, + WL_RATE_2X3_VHT8SS2 = 148, + WL_RATE_2X3_VHT9SS2 = 149, + WL_RATE_P_2X3_VHT10SS2 = 150, + WL_RATE_P_2X3_VHT11SS2 = 151, + + /* 3 Streams */ + WL_RATE_3X3_SDM_MCS16 = 152, + WL_RATE_3X3_SDM_MCS17 = 153, + WL_RATE_3X3_SDM_MCS18 = 154, + WL_RATE_3X3_SDM_MCS19 = 155, + WL_RATE_3X3_SDM_MCS20 = 156, + WL_RATE_3X3_SDM_MCS21 = 157, + WL_RATE_3X3_SDM_MCS22 = 158, + WL_RATE_3X3_SDM_MCS23 = 159, + WL_RATE_P_3X3_SDM_MCS101 = 160, + WL_RATE_P_3X3_SDM_MCS102 = 161, + + WL_RATE_3X3_VHT0SS3 = 152, + WL_RATE_3X3_VHT1SS3 = 153, + WL_RATE_3X3_VHT2SS3 = 154, + WL_RATE_3X3_VHT3SS3 = 155, + WL_RATE_3X3_VHT4SS3 = 156, + WL_RATE_3X3_VHT5SS3 = 157, + WL_RATE_3X3_VHT6SS3 = 158, + WL_RATE_3X3_VHT7SS3 = 159, + WL_RATE_3X3_VHT8SS3 = 160, + WL_RATE_3X3_VHT9SS3 = 161, + WL_RATE_P_3X3_VHT10SS3 = 162, + WL_RATE_P_3X3_VHT11SS3 = 163, + + + /**************************** + * TX Beamforming, 3 chains * + **************************** + */ + + /* 1 Stream expanded + 2 */ + WL_RATE_1X3_TXBF_OFDM_6 = 164, + WL_RATE_1X3_TXBF_OFDM_9 = 165, + WL_RATE_1X3_TXBF_OFDM_12 = 166, + WL_RATE_1X3_TXBF_OFDM_18 = 167, + WL_RATE_1X3_TXBF_OFDM_24 = 168, + WL_RATE_1X3_TXBF_OFDM_36 = 169, + WL_RATE_1X3_TXBF_OFDM_48 = 170, + WL_RATE_1X3_TXBF_OFDM_54 = 171, + + WL_RATE_1X3_TXBF_MCS0 = 172, + WL_RATE_1X3_TXBF_MCS1 = 173, + WL_RATE_1X3_TXBF_MCS2 = 174, + WL_RATE_1X3_TXBF_MCS3 = 175, + WL_RATE_1X3_TXBF_MCS4 = 176, + WL_RATE_1X3_TXBF_MCS5 = 177, + WL_RATE_1X3_TXBF_MCS6 = 178, + WL_RATE_1X3_TXBF_MCS7 = 179, + WL_RATE_P_1X3_TXBF_MCS87 = 180, + WL_RATE_P_1X3_TXBF_MCS88 = 181, + + WL_RATE_1X3_TXBF_VHT0SS1 = 172, + WL_RATE_1X3_TXBF_VHT1SS1 = 173, + WL_RATE_1X3_TXBF_VHT2SS1 = 174, + WL_RATE_1X3_TXBF_VHT3SS1 = 175, + WL_RATE_1X3_TXBF_VHT4SS1 = 176, + WL_RATE_1X3_TXBF_VHT5SS1 = 177, + WL_RATE_1X3_TXBF_VHT6SS1 = 178, + WL_RATE_1X3_TXBF_VHT7SS1 = 179, + WL_RATE_1X3_TXBF_VHT8SS1 = 180, + WL_RATE_1X3_TXBF_VHT9SS1 = 181, + WL_RATE_P_1X3_TXBF_VHT10SS1 = 182, + WL_RATE_P_1X3_TXBF_VHT11SS1 = 183, + + /* 2 Streams expanded + 1 */ + WL_RATE_2X3_TXBF_SDM_MCS8 = 184, + WL_RATE_2X3_TXBF_SDM_MCS9 = 185, + WL_RATE_2X3_TXBF_SDM_MCS10 = 186, + WL_RATE_2X3_TXBF_SDM_MCS11 = 187, + WL_RATE_2X3_TXBF_SDM_MCS12 = 188, + WL_RATE_2X3_TXBF_SDM_MCS13 = 189, + WL_RATE_2X3_TXBF_SDM_MCS14 = 190, + WL_RATE_2X3_TXBF_SDM_MCS15 = 191, + WL_RATE_P_2X3_TXBF_SDM_MCS99 = 192, + WL_RATE_P_2X3_TXBF_SDM_MCS100 = 193, + + WL_RATE_2X3_TXBF_VHT0SS2 = 184, + WL_RATE_2X3_TXBF_VHT1SS2 = 185, + WL_RATE_2X3_TXBF_VHT2SS2 = 186, + WL_RATE_2X3_TXBF_VHT3SS2 = 187, + WL_RATE_2X3_TXBF_VHT4SS2 = 188, + WL_RATE_2X3_TXBF_VHT5SS2 = 189, + WL_RATE_2X3_TXBF_VHT6SS2 = 190, + WL_RATE_2X3_TXBF_VHT7SS2 = 191, + WL_RATE_2X3_TXBF_VHT8SS2 = 192, + WL_RATE_2X3_TXBF_VHT9SS2 = 193, + WL_RATE_P_2X3_TXBF_VHT10SS2 = 194, + WL_RATE_P_2X3_TXBF_VHT11SS2 = 195, + + /* 3 Streams */ + WL_RATE_3X3_TXBF_SDM_MCS16 = 196, + WL_RATE_3X3_TXBF_SDM_MCS17 = 197, + WL_RATE_3X3_TXBF_SDM_MCS18 = 198, + WL_RATE_3X3_TXBF_SDM_MCS19 = 199, + WL_RATE_3X3_TXBF_SDM_MCS20 = 200, + WL_RATE_3X3_TXBF_SDM_MCS21 = 201, + WL_RATE_3X3_TXBF_SDM_MCS22 = 202, + WL_RATE_3X3_TXBF_SDM_MCS23 = 203, + WL_RATE_P_3X3_TXBF_SDM_MCS101 = 204, + WL_RATE_P_3X3_TXBF_SDM_MCS102 = 205, + + WL_RATE_3X3_TXBF_VHT0SS3 = 196, + WL_RATE_3X3_TXBF_VHT1SS3 = 197, + WL_RATE_3X3_TXBF_VHT2SS3 = 198, + WL_RATE_3X3_TXBF_VHT3SS3 = 199, + WL_RATE_3X3_TXBF_VHT4SS3 = 200, + WL_RATE_3X3_TXBF_VHT5SS3 = 201, + WL_RATE_3X3_TXBF_VHT6SS3 = 202, + WL_RATE_3X3_TXBF_VHT7SS3 = 203, + WL_RATE_3X3_TXBF_VHT8SS3 = 204, + WL_RATE_3X3_TXBF_VHT9SS3 = 205, + WL_RATE_P_3X3_TXBF_VHT10SS3 = 206, + WL_RATE_P_3X3_TXBF_VHT11SS3 = 207, + + + /************ + * 4 chains * + ************ + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_DSSS_1 = 208, + WL_RATE_1X4_DSSS_2 = 209, + WL_RATE_1X4_DSSS_5_5 = 210, + WL_RATE_1X4_DSSS_11 = 211, + + WL_RATE_1X4_CDD_OFDM_6 = 212, + WL_RATE_1X4_CDD_OFDM_9 = 213, + WL_RATE_1X4_CDD_OFDM_12 = 214, + WL_RATE_1X4_CDD_OFDM_18 = 215, + WL_RATE_1X4_CDD_OFDM_24 = 216, + WL_RATE_1X4_CDD_OFDM_36 = 217, + WL_RATE_1X4_CDD_OFDM_48 = 218, + WL_RATE_1X4_CDD_OFDM_54 = 219, + + WL_RATE_1X4_CDD_MCS0 = 220, + WL_RATE_1X4_CDD_MCS1 = 221, + WL_RATE_1X4_CDD_MCS2 = 222, + WL_RATE_1X4_CDD_MCS3 = 223, + WL_RATE_1X4_CDD_MCS4 = 224, + WL_RATE_1X4_CDD_MCS5 = 225, + WL_RATE_1X4_CDD_MCS6 = 226, + WL_RATE_1X4_CDD_MCS7 = 227, + WL_RATE_P_1X4_CDD_MCS87 = 228, + WL_RATE_P_1X4_CDD_MCS88 = 229, + + WL_RATE_1X4_VHT0SS1 = 220, + WL_RATE_1X4_VHT1SS1 = 221, + WL_RATE_1X4_VHT2SS1 = 222, + WL_RATE_1X4_VHT3SS1 = 223, + WL_RATE_1X4_VHT4SS1 = 224, + WL_RATE_1X4_VHT5SS1 = 225, + WL_RATE_1X4_VHT6SS1 = 226, + WL_RATE_1X4_VHT7SS1 = 227, + WL_RATE_1X4_VHT8SS1 = 228, + WL_RATE_1X4_VHT9SS1 = 229, + WL_RATE_P_1X4_VHT10SS1 = 230, + WL_RATE_P_1X4_VHT11SS1 = 231, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_STBC_MCS0 = 232, + WL_RATE_2X4_STBC_MCS1 = 233, + WL_RATE_2X4_STBC_MCS2 = 234, + WL_RATE_2X4_STBC_MCS3 = 235, + WL_RATE_2X4_STBC_MCS4 = 236, + WL_RATE_2X4_STBC_MCS5 = 237, + WL_RATE_2X4_STBC_MCS6 = 238, + WL_RATE_2X4_STBC_MCS7 = 239, + WL_RATE_P_2X4_STBC_MCS87 = 240, + WL_RATE_P_2X4_STBC_MCS88 = 241, + + WL_RATE_2X4_STBC_VHT0SS1 = 232, + WL_RATE_2X4_STBC_VHT1SS1 = 233, + WL_RATE_2X4_STBC_VHT2SS1 = 234, + WL_RATE_2X4_STBC_VHT3SS1 = 235, + WL_RATE_2X4_STBC_VHT4SS1 = 236, + WL_RATE_2X4_STBC_VHT5SS1 = 237, + WL_RATE_2X4_STBC_VHT6SS1 = 238, + WL_RATE_2X4_STBC_VHT7SS1 = 239, + WL_RATE_2X4_STBC_VHT8SS1 = 240, + WL_RATE_2X4_STBC_VHT9SS1 = 241, + WL_RATE_P_2X4_STBC_VHT10SS1 = 242, + WL_RATE_P_2X4_STBC_VHT11SS1 = 243, + + WL_RATE_2X4_SDM_MCS8 = 244, + WL_RATE_2X4_SDM_MCS9 = 245, + WL_RATE_2X4_SDM_MCS10 = 246, + WL_RATE_2X4_SDM_MCS11 = 247, + WL_RATE_2X4_SDM_MCS12 = 248, + WL_RATE_2X4_SDM_MCS13 = 249, + WL_RATE_2X4_SDM_MCS14 = 250, + WL_RATE_2X4_SDM_MCS15 = 251, + WL_RATE_P_2X4_SDM_MCS99 = 252, + WL_RATE_P_2X4_SDM_MCS100 = 253, + + WL_RATE_2X4_VHT0SS2 = 244, + WL_RATE_2X4_VHT1SS2 = 245, + WL_RATE_2X4_VHT2SS2 = 246, + WL_RATE_2X4_VHT3SS2 = 247, + WL_RATE_2X4_VHT4SS2 = 248, + WL_RATE_2X4_VHT5SS2 = 249, + WL_RATE_2X4_VHT6SS2 = 250, + WL_RATE_2X4_VHT7SS2 = 251, + WL_RATE_2X4_VHT8SS2 = 252, + WL_RATE_2X4_VHT9SS2 = 253, + WL_RATE_P_2X4_VHT10SS2 = 254, + WL_RATE_P_2X4_VHT11SS2 = 255, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_SDM_MCS16 = 256, + WL_RATE_3X4_SDM_MCS17 = 257, + WL_RATE_3X4_SDM_MCS18 = 258, + WL_RATE_3X4_SDM_MCS19 = 259, + WL_RATE_3X4_SDM_MCS20 = 260, + WL_RATE_3X4_SDM_MCS21 = 261, + WL_RATE_3X4_SDM_MCS22 = 262, + WL_RATE_3X4_SDM_MCS23 = 263, + WL_RATE_P_3X4_SDM_MCS101 = 264, + WL_RATE_P_3X4_SDM_MCS102 = 265, + + WL_RATE_3X4_VHT0SS3 = 256, + WL_RATE_3X4_VHT1SS3 = 257, + WL_RATE_3X4_VHT2SS3 = 258, + WL_RATE_3X4_VHT3SS3 = 259, + WL_RATE_3X4_VHT4SS3 = 260, + WL_RATE_3X4_VHT5SS3 = 261, + WL_RATE_3X4_VHT6SS3 = 262, + WL_RATE_3X4_VHT7SS3 = 263, + WL_RATE_3X4_VHT8SS3 = 264, + WL_RATE_3X4_VHT9SS3 = 265, + WL_RATE_P_3X4_VHT10SS3 = 266, + WL_RATE_P_3X4_VHT11SS3 = 267, + + + /* 4 Streams */ + WL_RATE_4X4_SDM_MCS24 = 268, + WL_RATE_4X4_SDM_MCS25 = 269, + WL_RATE_4X4_SDM_MCS26 = 270, + WL_RATE_4X4_SDM_MCS27 = 271, + WL_RATE_4X4_SDM_MCS28 = 272, + WL_RATE_4X4_SDM_MCS29 = 273, + WL_RATE_4X4_SDM_MCS30 = 274, + WL_RATE_4X4_SDM_MCS31 = 275, + WL_RATE_P_4X4_SDM_MCS103 = 276, + WL_RATE_P_4X4_SDM_MCS104 = 277, + + WL_RATE_4X4_VHT0SS4 = 268, + WL_RATE_4X4_VHT1SS4 = 269, + WL_RATE_4X4_VHT2SS4 = 270, + WL_RATE_4X4_VHT3SS4 = 271, + WL_RATE_4X4_VHT4SS4 = 272, + WL_RATE_4X4_VHT5SS4 = 273, + WL_RATE_4X4_VHT6SS4 = 274, + WL_RATE_4X4_VHT7SS4 = 275, + WL_RATE_4X4_VHT8SS4 = 276, + WL_RATE_4X4_VHT9SS4 = 277, + WL_RATE_P_4X4_VHT10SS4 = 278, + WL_RATE_P_4X4_VHT11SS4 = 279, + + + /**************************** + * TX Beamforming, 4 chains * + **************************** + */ + + /* 1 Stream expanded + 3 */ + WL_RATE_1X4_TXBF_OFDM_6 = 280, + WL_RATE_1X4_TXBF_OFDM_9 = 281, + WL_RATE_1X4_TXBF_OFDM_12 = 282, + WL_RATE_1X4_TXBF_OFDM_18 = 283, + WL_RATE_1X4_TXBF_OFDM_24 = 284, + WL_RATE_1X4_TXBF_OFDM_36 = 285, + WL_RATE_1X4_TXBF_OFDM_48 = 286, + WL_RATE_1X4_TXBF_OFDM_54 = 287, + + WL_RATE_1X4_TXBF_MCS0 = 288, + WL_RATE_1X4_TXBF_MCS1 = 289, + WL_RATE_1X4_TXBF_MCS2 = 290, + WL_RATE_1X4_TXBF_MCS3 = 291, + WL_RATE_1X4_TXBF_MCS4 = 292, + WL_RATE_1X4_TXBF_MCS5 = 293, + WL_RATE_1X4_TXBF_MCS6 = 294, + WL_RATE_1X4_TXBF_MCS7 = 295, + WL_RATE_P_1X4_TXBF_MCS87 = 296, + WL_RATE_P_1X4_TXBF_MCS88 = 297, + + WL_RATE_1X4_TXBF_VHT0SS1 = 288, + WL_RATE_1X4_TXBF_VHT1SS1 = 289, + WL_RATE_1X4_TXBF_VHT2SS1 = 290, + WL_RATE_1X4_TXBF_VHT3SS1 = 291, + WL_RATE_1X4_TXBF_VHT4SS1 = 292, + WL_RATE_1X4_TXBF_VHT5SS1 = 293, + WL_RATE_1X4_TXBF_VHT6SS1 = 294, + WL_RATE_1X4_TXBF_VHT7SS1 = 295, + WL_RATE_1X4_TXBF_VHT8SS1 = 296, + WL_RATE_1X4_TXBF_VHT9SS1 = 297, + WL_RATE_P_1X4_TXBF_VHT10SS1 = 298, + WL_RATE_P_1X4_TXBF_VHT11SS1 = 299, + + /* 2 Streams expanded + 2 */ + WL_RATE_2X4_TXBF_SDM_MCS8 = 300, + WL_RATE_2X4_TXBF_SDM_MCS9 = 301, + WL_RATE_2X4_TXBF_SDM_MCS10 = 302, + WL_RATE_2X4_TXBF_SDM_MCS11 = 303, + WL_RATE_2X4_TXBF_SDM_MCS12 = 304, + WL_RATE_2X4_TXBF_SDM_MCS13 = 305, + WL_RATE_2X4_TXBF_SDM_MCS14 = 306, + WL_RATE_2X4_TXBF_SDM_MCS15 = 307, + WL_RATE_P_2X4_TXBF_SDM_MCS99 = 308, + WL_RATE_P_2X4_TXBF_SDM_MCS100 = 309, + + WL_RATE_2X4_TXBF_VHT0SS2 = 300, + WL_RATE_2X4_TXBF_VHT1SS2 = 301, + WL_RATE_2X4_TXBF_VHT2SS2 = 302, + WL_RATE_2X4_TXBF_VHT3SS2 = 303, + WL_RATE_2X4_TXBF_VHT4SS2 = 304, + WL_RATE_2X4_TXBF_VHT5SS2 = 305, + WL_RATE_2X4_TXBF_VHT6SS2 = 306, + WL_RATE_2X4_TXBF_VHT7SS2 = 307, + WL_RATE_2X4_TXBF_VHT8SS2 = 308, + WL_RATE_2X4_TXBF_VHT9SS2 = 309, + WL_RATE_P_2X4_TXBF_VHT10SS2 = 310, + WL_RATE_P_2X4_TXBF_VHT11SS2 = 311, + + /* 3 Streams expanded + 1 */ + WL_RATE_3X4_TXBF_SDM_MCS16 = 312, + WL_RATE_3X4_TXBF_SDM_MCS17 = 313, + WL_RATE_3X4_TXBF_SDM_MCS18 = 314, + WL_RATE_3X4_TXBF_SDM_MCS19 = 315, + WL_RATE_3X4_TXBF_SDM_MCS20 = 316, + WL_RATE_3X4_TXBF_SDM_MCS21 = 317, + WL_RATE_3X4_TXBF_SDM_MCS22 = 318, + WL_RATE_3X4_TXBF_SDM_MCS23 = 319, + WL_RATE_P_3X4_TXBF_SDM_MCS101 = 320, + WL_RATE_P_3X4_TXBF_SDM_MCS102 = 321, + + WL_RATE_3X4_TXBF_VHT0SS3 = 312, + WL_RATE_3X4_TXBF_VHT1SS3 = 313, + WL_RATE_3X4_TXBF_VHT2SS3 = 314, + WL_RATE_3X4_TXBF_VHT3SS3 = 315, + WL_RATE_3X4_TXBF_VHT4SS3 = 316, + WL_RATE_3X4_TXBF_VHT5SS3 = 317, + WL_RATE_3X4_TXBF_VHT6SS3 = 318, + WL_RATE_3X4_TXBF_VHT7SS3 = 319, + WL_RATE_P_3X4_TXBF_VHT8SS3 = 320, + WL_RATE_P_3X4_TXBF_VHT9SS3 = 321, + WL_RATE_P_3X4_TXBF_VHT10SS3 = 322, + WL_RATE_P_3X4_TXBF_VHT11SS3 = 323, + + /* 4 Streams */ + WL_RATE_4X4_TXBF_SDM_MCS24 = 324, + WL_RATE_4X4_TXBF_SDM_MCS25 = 325, + WL_RATE_4X4_TXBF_SDM_MCS26 = 326, + WL_RATE_4X4_TXBF_SDM_MCS27 = 327, + WL_RATE_4X4_TXBF_SDM_MCS28 = 328, + WL_RATE_4X4_TXBF_SDM_MCS29 = 329, + WL_RATE_4X4_TXBF_SDM_MCS30 = 330, + WL_RATE_4X4_TXBF_SDM_MCS31 = 331, + WL_RATE_P_4X4_TXBF_SDM_MCS103 = 332, + WL_RATE_P_4X4_TXBF_SDM_MCS104 = 333, + + WL_RATE_4X4_TXBF_VHT0SS4 = 324, + WL_RATE_4X4_TXBF_VHT1SS4 = 325, + WL_RATE_4X4_TXBF_VHT2SS4 = 326, + WL_RATE_4X4_TXBF_VHT3SS4 = 327, + WL_RATE_4X4_TXBF_VHT4SS4 = 328, + WL_RATE_4X4_TXBF_VHT5SS4 = 329, + WL_RATE_4X4_TXBF_VHT6SS4 = 330, + WL_RATE_4X4_TXBF_VHT7SS4 = 331, + WL_RATE_P_4X4_TXBF_VHT8SS4 = 332, + WL_RATE_P_4X4_TXBF_VHT9SS4 = 333, + WL_RATE_P_4X4_TXBF_VHT10SS4 = 334, + WL_RATE_P_4X4_TXBF_VHT11SS4 = 335 + +} clm_rates_t; + +/* Number of rate codes */ +#define WL_NUMRATES 336 + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _bcmwifi_rates_h_ */ diff --git a/drivers/net/wireless/bcmdhd/bcmxtlv.c b/drivers/net/wireless/bcmdhd/bcmxtlv.c new file mode 100644 index 000000000000..26cfb9ac264a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/bcmxtlv.c @@ -0,0 +1,457 @@ +/* + * Driver O/S-independent utility routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmxtlv.c 527361 2015-01-17 01:48:34Z $ + */ + +#include + +#include +#include + +#include + +#ifdef BCMDRIVER +#include +#else /* !BCMDRIVER */ + #include /* AS!!! */ +#include +#include +#include +#ifndef ASSERT +#define ASSERT(exp) +#endif +INLINE void* MALLOCZ(void *o, size_t s) { BCM_REFERENCE(o); return calloc(1, s); } +INLINE void MFREE(void *o, void *p, size_t s) { BCM_REFERENCE(o); BCM_REFERENCE(s); free(p); } +#endif /* !BCMDRIVER */ + +#include +#include + +static INLINE int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts) +{ + return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + BCM_XTLV_HDR_SIZE, 4) + : (dlen + BCM_XTLV_HDR_SIZE)); +} + +bcm_xtlv_t * +bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts) +{ + int sz; + /* advance to next elt */ + sz = BCM_XTLV_SIZE(elt, opts); + elt = (bcm_xtlv_t*)((uint8 *)elt + sz); + *buflen -= sz; + + /* validate next elt */ + if (!bcm_valid_xtlv(elt, *buflen, opts)) + return NULL; + + return elt; +} + +int +bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts) +{ + if (!tlv_buf || !buf || !len) + return BCME_BADARG; + + tlv_buf->opts = opts; + tlv_buf->size = len; + tlv_buf->head = buf; + tlv_buf->buf = buf; + return BCME_OK; +} + +uint16 +bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return 0; + return (uint16)(tbuf->buf - tbuf->head); +} +uint16 +bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return 0; + return tbuf->size - bcm_xtlv_buf_len(tbuf); +} +uint8 * +bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return NULL; + return tbuf->buf; +} +uint8 * +bcm_xtlv_head(bcm_xtlvbuf_t *tbuf) +{ + if (tbuf == NULL) return NULL; + return tbuf->head; +} +int +bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(dlen, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(dlen); + memcpy(xtlv->data, data, dlen); + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(1, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + xtlv->data[0] = data; + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(2, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + htol16_ua_store(data, xtlv->data); + tbuf->buf += size; + return BCME_OK; +} +int +bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data) +{ + bcm_xtlv_t *xtlv; + int size; + + if (tbuf == NULL) + return BCME_BADARG; + size = bcm_xtlv_size_for_data(4, tbuf->opts); + if (bcm_xtlv_buf_rlen(tbuf) < size) + return BCME_NOMEM; + xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf); + xtlv->id = htol16(type); + xtlv->len = htol16(sizeof(data)); + htol32_ua_store(data, xtlv->data); + tbuf->buf += size; + return BCME_OK; +} + +/* + * upacks xtlv record from buf checks the type + * copies data to callers buffer + * advances tlv pointer to next record + * caller's resposible for dst space check + */ +int +bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst, + bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + uint16 len; + uint16 type; + + ASSERT(ptlv); + /* tlv headr is always packed in LE order */ + len = ltoh16(ptlv->len); + type = ltoh16(ptlv->id); + if (len == 0) { + /* z-len tlv headers: allow, but don't process */ + printf("z-len, skip unpack\n"); + } else { + if ((type != xpct_type) || + (len > xpct_len)) { + printf("xtlv_unpack Error: found[type:%d,len:%d] != xpct[type:%d,len:%d]\n", + type, len, xpct_type, xpct_len); + return BCME_BADARG; + } + /* copy tlv record to caller's buffer */ + memcpy(dst, ptlv->data, ptlv->len); + } + *tlv_buf += BCM_XTLV_SIZE(ptlv, opts); + return BCME_OK; +} + +/* + * packs user data into tlv record + * advances tlv pointer to next xtlv slot + * buflen is used for tlv_buf space check + */ +int +bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src, + bcm_xtlv_opts_t opts) +{ + bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf; + int size; + + ASSERT(ptlv); + ASSERT(src); + + size = bcm_xtlv_size_for_data(len, opts); + + /* copy data from tlv buffer to dst provided by user */ + if (size > *buflen) { + printf("bcm_pack_xtlv_entry: no space tlv_buf: requested:%d, available:%d\n", + size, *buflen); + return BCME_BADLEN; + } + ptlv->id = htol16(type); + ptlv->len = htol16(len); + + /* copy callers data */ + memcpy(ptlv->data, src, len); + + /* advance callers pointer to tlv buff */ + *tlv_buf += size; + /* decrement the len */ + *buflen -= (uint16)size; + return BCME_OK; +} + +/* + * unpack all xtlv records from the issue a callback + * to set function one call per found tlv record + */ +int +bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_xtlv_unpack_cbfn_t *cbfn) +{ + uint16 len; + uint16 type; + int res = BCME_OK; + int size; + bcm_xtlv_t *ptlv; + int sbuflen = buflen; + + ASSERT(!buflen || tlv_buf); + ASSERT(!buflen || cbfn); + + while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) { + ptlv = (bcm_xtlv_t *)tlv_buf; + + /* tlv header is always packed in LE order */ + len = ltoh16(ptlv->len); + type = ltoh16(ptlv->id); + + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + /* check for possible buffer overrun */ + if (sbuflen < 0) + break; + + if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK) + break; + tlv_buf += size; + } + return res; +} + +int +bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts, + bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next, + int *outlen) +{ + int res = BCME_OK; + uint16 tlv_id; + uint16 tlv_len; + uint8 *startp; + uint8 *endp; + uint8 *buf; + bool more; + int size; + + ASSERT(get_next && pack_next); + + buf = (uint8 *)tlv_buf; + startp = buf; + endp = (uint8 *)buf + buflen; + more = TRUE; + while (more && (buf < endp)) { + more = get_next(ctx, &tlv_id, &tlv_len); + size = bcm_xtlv_size_for_data(tlv_len, opts); + if ((buf + size) >= endp) { + res = BCME_BUFTOOSHORT; + goto done; + } + + htol16_ua_store(tlv_id, buf); + htol16_ua_store(tlv_len, buf + sizeof(tlv_id)); + pack_next(ctx, tlv_id, tlv_len, buf + BCM_XTLV_HDR_SIZE); + buf += size; + } + + if (more) + res = BCME_BUFTOOSHORT; + +done: + if (outlen) { + *outlen = (int)(buf - startp); + } + return res; +} + +/* + * pack xtlv buffer from memory according to xtlv_desc_t + */ +int +bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + uint8 *ptlv = (uint8 *)*tlv_buf; + + while (items->type != 0) { + if ((res = bcm_pack_xtlv_entry(&ptlv, + buflen, items->type, + items->len, items->ptr, opts) != BCME_OK)) { + break; + } + items++; + } + *tlv_buf = ptlv; /* update the external pointer */ + return res; +} + +/* + * unpack xtlv buffer to memory according to xtlv_desc_t + * + */ +int +bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items, bcm_xtlv_opts_t opts) +{ + int res = BCME_OK; + bcm_xtlv_t *elt; + + elt = bcm_valid_xtlv((bcm_xtlv_t *)tlv_buf, *buflen, opts) ? (bcm_xtlv_t *)tlv_buf : NULL; + if (!elt || !items) { + res = BCME_BADARG; + return res; + } + + for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) { + /* find matches in desc_t items */ + xtlv_desc_t *dst_desc = items; + uint16 len = ltoh16(elt->len); + + while (dst_desc->type != 0) { + if (ltoh16(elt->id) == dst_desc->type) { + if (len != dst_desc->len) { + res = BCME_BADLEN; + } else { + memcpy(dst_desc->ptr, elt->data, len); + } + break; + } + dst_desc++; + } + } + + if (res == BCME_OK && *buflen != 0) + res = BCME_BUFTOOSHORT; + + return res; +} + +/* + * return data pointer of a given ID from xtlv buffer. + * If the specified xTLV ID is found, on return *data_len_out will contain + * the the data length of the xTLV ID. + */ +void * +bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen_out, bcm_xtlv_opts_t opts) +{ + void *retptr = NULL; + uint16 type, len; + int size; + bcm_xtlv_t *ptlv; + int sbuflen = buflen; + + while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) { + ptlv = (bcm_xtlv_t *)tlv_buf; + + /* tlv header is always packed in LE order */ + type = ltoh16(ptlv->id); + len = ltoh16(ptlv->len); + size = bcm_xtlv_size_for_data(len, opts); + + sbuflen -= size; + /* check for possible buffer overrun */ + if (sbuflen < 0) { + printf("%s %d: Invalid sbuflen %d\n", + __FUNCTION__, __LINE__, sbuflen); + break; + } + + if (id == type) { + retptr = ptlv->data; + if (datalen_out) { + *datalen_out = len; + } + break; + } + tlv_buf += size; + } + + return retptr; +} + +int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts) +{ + int size; /* entire size of the XTLV including header, data, and optional padding */ + int len; /* XTLV's value real length wthout padding */ + + len = BCM_XTLV_LEN(elt); + + size = bcm_xtlv_size_for_data(len, opts); + + return size; +} diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h new file mode 100644 index 000000000000..fbc6269a7594 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd.h @@ -0,0 +1,1607 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd.h 610267 2016-01-06 16:03:53Z $ + */ + +/**************** + * Common types * + */ + +#ifndef _dhd_h_ +#define _dhd_h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK) +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */ +#include +/* The kernel threading is sdio-specific */ +struct task_struct; +struct sched_param; +int setScheduler(struct task_struct *p, int policy, struct sched_param *param); +int get_scheduler_policy(struct task_struct *p); +#define MAX_EVENT 16 + +#define ALL_INTERFACES 0xff + +#include +#include + +#if defined(BCMWDF) +#include +#include +#endif /* (BCMWDF) */ + +#ifdef DEBUG_DPC_THREAD_WATCHDOG +#define MAX_RESCHED_CNT 600 +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT)) +#define WL_VENDOR_EXT_SUPPORT +#endif /* 3.13.0 <= LINUX_KERNEL_VERSION < 3.18.0 || CONFIG_BCMDHD_VENDOR_EXT */ +#if defined(CONFIG_ANDROID) && defined(WL_VENDOR_EXT_SUPPORT) +#if !defined(GSCAN_SUPPORT) +#define GSCAN_SUPPORT +#endif +#endif /* CONFIG_ANDROID && WL_VENDOR_EXT_SUPPORT */ + +#if defined(KEEP_ALIVE) +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define KEEP_ALIVE_PERIOD 55000 +#define NULL_PKT_STR "null_pkt" +#endif /* KEEP_ALIVE */ + +/* Forward decls */ +struct dhd_bus; +struct dhd_prot; +struct dhd_info; +struct dhd_ioctl; + +/* The level of bus communication with the dongle */ +enum dhd_bus_state { + DHD_BUS_DOWN, /* Not ready for frame transfers */ + DHD_BUS_LOAD, /* Download access only (CPU reset) */ + DHD_BUS_DATA, /* Ready for frame transfers */ + DHD_BUS_SUSPEND, /* Bus has been suspended */ + DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */ +}; + +/* + * Bit fields to Indicate clean up process that wait till they are finished. + * Future synchronizable processes can add their bit filed below and update + * their functionalities accordingly + */ +#define DHD_BUS_BUSY_IN_TX 0x01 +#define DHD_BUS_BUSY_IN_SEND_PKT 0x02 +#define DHD_BUS_BUSY_IN_DPC 0x04 +#define DHD_BUS_BUSY_IN_WD 0x08 +#define DHD_BUS_BUSY_IN_IOVAR 0x10 +#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20 +#define DHD_BUS_BUSY_IN_SUSPEND 0x40 +#define DHD_BUS_BUSY_IN_RESUME 0x80 +#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100 +#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200 +#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400 +#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \ + DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \ + DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) + +/* Download Types */ +typedef enum download_type { + FW, + NVRAM +} download_type_t; + + +/* For supporting multiple interfaces */ +#define DHD_MAX_IFS 16 +#define DHD_DEL_IF -0xE +#define DHD_BAD_IF -0xF + +enum dhd_op_flags { +/* Firmware requested operation mode */ + DHD_FLAG_STA_MODE = (1 << (0)), /* STA only */ + DHD_FLAG_HOSTAP_MODE = (1 << (1)), /* SOFTAP only */ + DHD_FLAG_P2P_MODE = (1 << (2)), /* P2P Only */ + /* STA + P2P */ + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE), + DHD_FLAG_CONCURR_MULTI_CHAN_MODE = (1 << (4)), /* STA + P2P */ + /* Current P2P mode for P2P connection */ + DHD_FLAG_P2P_GC_MODE = (1 << (5)), + DHD_FLAG_P2P_GO_MODE = (1 << (6)), + DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */ + DHD_FLAG_IBSS_MODE = (1 << (8)), + DHD_FLAG_MFG_MODE = (1 << (9)), + DHD_FLAG_RSDB_MODE = (1 << (10)), + DHD_FLAG_MP2P_MODE = (1 << (11)) +}; + +#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \ + (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1) + +/* Max sequential TX/RX Control timeouts to set HANG event */ +#ifndef MAX_CNTL_TX_TIMEOUT +#define MAX_CNTL_TX_TIMEOUT 2 +#endif /* MAX_CNTL_TX_TIMEOUT */ +#ifndef MAX_CNTL_RX_TIMEOUT +#define MAX_CNTL_RX_TIMEOUT 1 +#endif /* MAX_CNTL_RX_TIMEOUT */ + +#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */ +#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */ +#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */ + +#ifndef POWERUP_MAX_RETRY +#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */ +#endif +#ifndef POWERUP_WAIT_MS +#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */ +#endif +#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */ +#ifdef DHD_DEBUG +#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */ +#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */ +#endif + +#define FW_VER_STR_LEN 128 + +enum dhd_bus_wake_state { + WAKE_LOCK_OFF, + WAKE_LOCK_PRIV, + WAKE_LOCK_DPC, + WAKE_LOCK_IOCTL, + WAKE_LOCK_DOWNLOAD, + WAKE_LOCK_TMOUT, + WAKE_LOCK_WATCHDOG, + WAKE_LOCK_LINK_DOWN_TMOUT, + WAKE_LOCK_PNO_FIND_TMOUT, + WAKE_LOCK_SOFTAP_SET, + WAKE_LOCK_SOFTAP_STOP, + WAKE_LOCK_SOFTAP_START, + WAKE_LOCK_SOFTAP_THREAD +}; + +enum dhd_prealloc_index { + DHD_PREALLOC_PROT = 0, + DHD_PREALLOC_RXBUF, + DHD_PREALLOC_DATABUF, + DHD_PREALLOC_OSL_BUF, +#if defined(STATIC_WL_PRIV_STRUCT) + DHD_PREALLOC_WIPHY_ESCAN0 = 5, +#endif /* STATIC_WL_PRIV_STRUCT */ + DHD_PREALLOC_DHD_INFO = 7, + DHD_PREALLOC_DHD_WLFC_INFO = 8, + DHD_PREALLOC_IF_FLOW_LKUP = 9, + DHD_PREALLOC_MEMDUMP_BUF = 10, + DHD_PREALLOC_MEMDUMP_RAM = 11, + DHD_PREALLOC_DHD_WLFC_HANGER = 12, + DHD_PREALLOC_PKTID_MAP = 13, + DHD_PREALLOC_PKTID_MAP_IOCTL = 14, + DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15 +}; + +enum dhd_dongledump_mode { + DUMP_DISABLED = 0, + DUMP_MEMONLY, + DUMP_MEMFILE, + DUMP_MEMFILE_BUGON, + DUMP_MEMFILE_MAX +}; + +enum dhd_dongledump_type { + DUMP_TYPE_RESUMED_ON_TIMEOUT = 1, + DUMP_TYPE_D3_ACK_TIMEOUT, + DUMP_TYPE_DONGLE_TRAP, + DUMP_TYPE_MEMORY_CORRUPTION, + DUMP_TYPE_PKTID_AUDIT_FAILURE, + DUMP_TYPE_SCAN_TIMEOUT, + DUMP_TYPE_SCAN_BUSY, + DUMP_TYPE_BY_SYSDUMP, + DUMP_TYPE_BY_LIVELOCK, + DUMP_TYPE_AP_LINKUP_FAILURE +}; + +enum dhd_hang_reason { + HANG_REASON_MASK = 0x8000, + HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001, + HANG_REASON_DONGLE_TRAP = 0x8002, + HANG_REASON_D3_ACK_TIMEOUT = 0x8003, + HANG_REASON_BUS_DOWN = 0x8004, + HANG_REASON_PCIE_LINK_DOWN = 0x8005, + HANG_REASON_MSGBUF_LIVELOCK = 0x8006, + HANG_REASON_P2P_IFACE_DEL_FAILURE = 0x8007, + HANG_REASON_HT_AVAIL_ERROR = 0x8008, + HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009, + HANG_REASON_MAX = 0x800a +}; + +enum dhd_rsdb_scan_features { + /* Downgraded scan feature for AP active */ + RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01, + /* Downgraded scan feature for P2P Discovery */ + RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02, + /* Enable channel pruning for ROAM SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10, + /* Enable channel pruning for any SCAN */ + RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20 +}; + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + +/** + * DMA-able buffer parameters + * - dmaaddr_t is 32bits on a 32bit host. + * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr + * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer. + */ +typedef struct dhd_dma_buf { + void *va; /* virtual address of buffer */ + uint32 len; /* user requested buffer length */ + dmaaddr_t pa; /* physical address of buffer */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ + uint32 _alloced; /* actual size of buffer allocated with align and pad */ +} dhd_dma_buf_t; + +/* host reordering packts logic */ +/* followed the structure to hold the reorder buffers (void **p) */ +typedef struct reorder_info { + void **p; + uint8 flow_id; + uint8 cur_idx; + uint8 exp_idx; + uint8 max_idx; + uint8 pend_pkts; +} reorder_info_t; + +#ifdef DHDTCPACK_SUPPRESS + +enum { + /* TCPACK suppress off */ + TCPACK_SUP_OFF, + /* Replace TCPACK in txq when new coming one has higher ACK number. */ + TCPACK_SUP_REPLACE, + /* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA. + * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that + * 1. we are able to read TCP DATA packets first from the bus + * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed. + */ + TCPACK_SUP_DELAYTX, + TCPACK_SUP_HOLD, + TCPACK_SUP_LAST_MODE +}; +#endif /* DHDTCPACK_SUPPRESS */ + + +/* + * Accumulating the queue lengths of all flowring queues in a parent object, + * to assert flow control, when the cummulative queue length crosses an upper + * threshold defined on a parent object. Upper threshold may be maintained + * at a station level, at an interface level, or at a dhd instance. + * + * cumm_ctr_t abstraction: + * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis + * pause on/off threshold callback. + * All macros use the address of the cummulative length in the parent objects. + * + * BCM_GMAC3 builds use a single perimeter lock, as opposed to a per queue lock. + * Cummulative counters in parent objects may be updated without spinlocks. + * + * In non BCM_GMAC3, if a cummulative queue length is desired across all flows + * belonging to either of (a station, or an interface or a dhd instance), then + * an atomic operation is required using an atomic_t cummulative counters or + * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct. + */ + +/* Cummulative length not supported. */ +typedef uint32 cumm_ctr_t; +#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen)) +#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */ +#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */ +#define DHD_CUMM_CTR_INIT(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_INCR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); +#define DHD_CUMM_CTR_DECR(clen) \ + ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL)); + +/* DMA'ing r/w indices for rings supported */ +#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */ +#define DMA_INDX_ENAB(dma_indxsup) 0 +#elif defined BCM_INDX_DMA /* FW gets r/w indices from Host memory */ +#define DMA_INDX_ENAB(dma_indxsup) 1 +#else /* r/w indices in TCM or host memory based on FW/Host agreement */ +#define DMA_INDX_ENAB(dma_indxsup) dma_indxsup +#endif /* BCM_INDX_TCM */ + +#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) +struct tdls_peer_node { + uint8 addr[ETHER_ADDR_LEN]; + struct tdls_peer_node *next; +}; +typedef struct tdls_peer_node tdls_peer_node_t; +typedef struct { + tdls_peer_node_t *node; + uint8 tdls_peer_count; +} tdls_peer_tbl_t; +#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */ + +#ifdef DHD_LOG_DUMP +/* below structure describe ring buffer. */ +struct dhd_log_dump_buf +{ + spinlock_t lock; + unsigned int wraparound; + unsigned long max; + unsigned int remain; + char* present; + char* front; + char* buffer; +}; + +#define DHD_LOG_DUMP_BUFFER_SIZE (1024 * 1024) +#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256 + +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#endif /* DHD_LOG_DUMP */ +#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/log/" + +/* Common structure for module and instance linkage */ +typedef struct dhd_pub { + /* Linkage ponters */ + osl_t *osh; /* OSL handle */ + struct dhd_bus *bus; /* Bus module handle */ + struct dhd_prot *prot; /* Protocol module handle */ + struct dhd_info *info; /* Info module handle */ + + /* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + + + /* Internal dhd items */ + bool up; /* Driver up/down (to OS) */ + bool txoff; /* Transmit flow-controlled */ + bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */ + enum dhd_bus_state busstate; + uint dhd_bus_busy_state; /* Bus busy state */ + uint hdrlen; /* Total DHD header length (proto + bus) */ + uint maxctl; /* Max size rxctl request from proto to bus */ + uint rxsz; /* Rx buffer size bus module should use */ + uint8 wme_dp; /* wme discard priority */ + + /* Dongle media info */ + bool iswl; /* Dongle-resident driver is wl */ + ulong drv_version; /* Version of dongle-resident driver */ + struct ether_addr mac; /* MAC address obtained from dongle */ + dngl_stats_t dstats; /* Stats for dongle-based data */ + + /* Additional stats for the bus level */ + ulong tx_packets; /* Data packets sent to dongle */ + ulong tx_dropped; /* Data packets dropped in dhd */ + ulong tx_multicast; /* Multicast data packets sent to dongle */ + ulong tx_errors; /* Errors in sending data to dongle */ + ulong tx_ctlpkts; /* Control packets sent to dongle */ + ulong tx_ctlerrs; /* Errors sending control frames to dongle */ + ulong rx_packets; /* Packets sent up the network interface */ + ulong rx_multicast; /* Multicast packets sent up the network interface */ + ulong rx_errors; /* Errors processing rx data packets */ + ulong rx_ctlpkts; /* Control frames processed from dongle */ + ulong rx_ctlerrs; /* Errors in processing rx control frames */ + ulong rx_dropped; /* Packets dropped locally (no memory) */ + ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */ + ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */ + ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */ + ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */ + ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */ + ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */ + ulong fc_packets; /* Number of flow control pkts recvd */ + + /* Last error return */ + int bcmerror; + uint tickcnt; + + /* Last error from dongle */ + int dongle_error; + + uint8 country_code[WLC_CNTRY_BUF_SZ]; + + /* Suspend disable flag and "in suspend" flag */ + int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */ + int in_suspend; /* flag set to 1 when early suspend called */ +#ifdef PNO_SUPPORT + int pno_enable; /* pno status : "1" is pno enable */ + int pno_suspend; /* pno suspend status : "1" is pno suspended */ +#endif /* PNO_SUPPORT */ + /* DTIM skip value, default 0(or 1) means wake each DTIM + * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3) + */ + int suspend_bcn_li_dtim; /* bcn_li_dtim value in suspend mode */ +#ifdef PKT_FILTER_SUPPORT + int early_suspended; /* Early suspend status */ + int dhcp_in_progress; /* DHCP period */ +#endif + + /* Pkt filter defination */ + char * pktfilter[100]; + int pktfilter_count; + + wl_country_t dhd_cspec; /* Current Locale info */ +#ifdef CUSTOM_COUNTRY_CODE + u32 dhd_cflags; +#endif /* CUSTOM_COUNTRY_CODE */ + bool force_country_change; + char eventmask[WL_EVENTING_MASK_LEN]; + int op_mode; /* STA, HostAPD, WFD, SoftAP */ + +/* Set this to 1 to use a seperate interface (p2p0) for p2p operations. + * For ICS MR1 releases it should be disable to be compatable with ICS MR1 Framework + * see target dhd-cdc-sdmmc-panda-cfg80211-icsmr1-gpl-debug in Makefile + */ +/* #define WL_ENABLE_P2P_IF 1 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */ + struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */ +#endif + +#ifdef PROP_TXSTATUS + bool wlfc_enabled; + int wlfc_mode; + void* wlfc_state; + /* + Mode in which the dhd flow control shall operate. Must be set before + traffic starts to the device. + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + 3 - Only AMPDU hostreorder used. no wlfc. + */ + uint8 proptxstatus_mode; + bool proptxstatus_txoff; + bool proptxstatus_module_ignore; + bool proptxstatus_credit_ignore; + bool proptxstatus_txstatus_ignore; + + bool wlfc_rxpkt_chk; + /* + * implement below functions in each platform if needed. + */ + /* platform specific function whether to skip flow control */ + bool (*skip_fc)(void); + /* platform specific function for wlfc_enable and wlfc_deinit */ + void (*plat_init)(void *dhd); + void (*plat_deinit)(void *dhd); +#ifdef DHD_WLFC_THREAD + bool wlfc_thread_go; + struct task_struct* wlfc_thread; + wait_queue_head_t wlfc_wqhead; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + void *pno_state; +#endif +#ifdef RTT_SUPPORT + void *rtt_state; +#endif + bool dongle_isolation; + bool dongle_trap_occured; /* flag for sending HANG event to upper layer */ + int hang_was_sent; + int rxcnt_timeout; /* counter rxcnt timeout to send HANG */ + int txcnt_timeout; /* counter txcnt timeout to send HANG */ +#ifdef BCMPCIE + int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */ +#endif /* BCMPCIE */ + bool hang_report; /* enable hang report by default */ + uint16 hang_reason; /* reason codes for HANG event */ +#ifdef WLMEDIA_HTSF + uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */ +#endif +#ifdef WLTDLS + bool tdls_enable; +#endif + struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS]; + #define WLC_IOCTL_MAXBUF_FWCAP 512 + char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP]; + #define MAXSKBPEND 1024 + void *skbbuf[MAXSKBPEND]; + uint32 store_idx; + uint32 sent_idx; +#ifdef DHDTCPACK_SUPPRESS + uint8 tcpack_sup_mode; /* TCPACK suppress mode */ + void *tcpack_sup_module; /* TCPACK suppress module */ + uint32 tcpack_sup_ratio; + uint32 tcpack_sup_delay; +#endif /* DHDTCPACK_SUPPRESS */ +#if defined(ARP_OFFLOAD_SUPPORT) + uint32 arp_version; +#endif +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool dhd_bug_on; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ +#ifdef CUSTOM_SET_CPUCORE + struct task_struct * current_dpc; + struct task_struct * current_rxf; + int chan_isvht80; +#endif /* CUSTOM_SET_CPUCORE */ + + + void *sta_pool; /* pre-allocated pool of sta objects */ + void *staid_allocator; /* allocator of sta indexes */ +#ifdef PCIE_FULL_DONGLE + bool flow_rings_inited; /* set this flag after initializing flow rings */ +#endif /* PCIE_FULL_DONGLE */ + void *flowid_allocator; /* unique flowid allocator */ + void *flow_ring_table; /* flow ring table, include prot and bus info */ + void *if_flow_lkup; /* per interface flowid lkup hash table */ + void *flowid_lock; /* per os lock for flowid info protection */ + void *flowring_list_lock; /* per os lock for flowring list protection */ + uint32 num_flow_rings; + cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */ + uint32 d2h_sync_mode; /* D2H DMA completion sync mode */ + uint8 flow_prio_map[NUMPRIO]; + uint8 flow_prio_map_type; + char enable_log[MAX_EVENT]; + bool dma_d2h_ring_upd_support; + bool dma_h2d_ring_upd_support; + +#ifdef DHD_WMF + bool wmf_ucast_igmp; +#ifdef DHD_IGMP_UCQUERY + bool wmf_ucast_igmp_query; +#endif +#ifdef DHD_UCAST_UPNP + bool wmf_ucast_upnp; +#endif +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */ +#endif /* DHD_L2_FILTER */ + uint8 *soc_ram; + uint32 soc_ram_length; + uint32 memdump_type; +#ifdef DHD_FW_COREDUMP + uint32 memdump_enabled; +#endif /* DHD_FW_COREDUMP */ +#ifdef PCIE_FULL_DONGLE +#ifdef WLTDLS + tdls_peer_tbl_t peer_tbl; +#endif /* WLTDLS */ +#endif /* PCIE_FULL_DONGLE */ +#ifdef CACHE_FW_IMAGES + char *cached_fw; + int cached_fw_length; + char *cached_nvram; + int cached_nvram_length; +#endif +#ifdef WLTDLS + uint32 tdls_mode; +#endif +#ifdef DHD_LOSSLESS_ROAMING + uint8 dequeue_prec_map; +#endif + struct mutex wl_up_lock; + bool is_fw_download_done; +#ifdef DHD_LOG_DUMP + struct dhd_log_dump_buf dld_buf; + unsigned int dld_enable; +#endif /* DHD_LOG_DUMP */ +} dhd_pub_t; + +#if defined(PCIE_FULL_DONGLE) + +/* Packet Tag for PCIE Full Dongle DHD */ +typedef struct dhd_pkttag_fd { + uint16 flowid; /* Flowring Id */ + uint16 dataoff; /* start of packet */ + uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */ + dmaaddr_t pa; /* physical address */ + void *dmah; /* dma mapper handle */ + void *secdma; /* secure dma sec_cma_info handle */ +} dhd_pkttag_fd_t; + +/* Packet Tag for DHD PCIE Full Dongle */ +#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt))) + +#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid) +#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \ + DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid) + +#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff) +#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \ + DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff) + +#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len) +#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \ + DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len) + +#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa) +#define DHD_PKT_SET_PA(pkt, pkt_pa) \ + DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa) + +#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah) +#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \ + DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah) + +#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma) +#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \ + DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma) +#endif /* PCIE_FULL_DONGLE */ + +#if defined(BCMWDF) +typedef struct { + dhd_pub_t *dhd_pub; +} dhd_workitem_context_t; + +WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context) +#endif /* (BCMWDF) */ + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) + + #define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define _DHD_PM_RESUME_WAIT(a, b) do {\ + int retry = 0; \ + SMP_RD_BARRIER_DEPENDS(); \ + while (dhd_mmc_suspend && retry++ != b) { \ + SMP_RD_BARRIER_DEPENDS(); \ + wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \ + } \ + } while (0) + #define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200) + #define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0) + #define DHD_PM_RESUME_RETURN_ERROR(a) do { \ + if (dhd_mmc_suspend) { \ + printf("%s[%d]: mmc is still in suspend state!!!\n", \ + __FUNCTION__, __LINE__); \ + return a; \ + } \ + } while (0) + #define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0) + + #define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a); + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9999; \ + while ((exp) && (countdown >= 10000)) { \ + wait_event_interruptible_timeout(a, FALSE, 1); \ + countdown -= 10000; \ + } \ + } while (0) + + #else + + #define DHD_PM_RESUME_WAIT_INIT(a) + #define DHD_PM_RESUME_WAIT(a) + #define DHD_PM_RESUME_WAIT_FOREVER(a) + #define DHD_PM_RESUME_RETURN_ERROR(a) + #define DHD_PM_RESUME_RETURN + + #define DHD_SPINWAIT_SLEEP_INIT(a) + #define SPINWAIT_SLEEP(a, exp, us) do { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) { \ + OSL_DELAY(10); \ + countdown -= 10; \ + } \ + } while (0) + + #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#ifndef OSL_SLEEP +#define OSL_SLEEP(ms) OSL_DELAY(ms*1000) +#endif /* OSL_SLEEP */ + +#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */ + +#ifdef PNO_SUPPORT +int dhd_pno_clean(dhd_pub_t *dhd); +#endif /* PNO_SUPPORT */ +/* + * Wake locks are an Android power management concept. They are used by applications and services + * to request CPU resources. + */ +extern int dhd_os_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wake_unlock(dhd_pub_t *pub); +extern int dhd_event_wake_lock(dhd_pub_t *pub); +extern int dhd_event_wake_unlock(dhd_pub_t *pub); +extern int dhd_os_wake_lock_waive(dhd_pub_t *pub); +extern int dhd_os_wake_lock_restore(dhd_pub_t *pub); +extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); +extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub); +extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); +extern void dhd_os_wake_lock_init(struct dhd_info *dhd); +extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd); +#ifdef BCMPCIE_OOB_HOST_WAKE +extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK +extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val); +extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub); +#endif /* BCMPCIE_SCAN_WAKELOCK */ + +inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhdp->wl_softap_lock); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +} + +#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) +#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub) +#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub) +#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) +#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_rx_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \ + dhd_os_wake_lock_ctrl_timeout_enable(pub, val) +#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \ + dhd_os_wake_lock_ctrl_timeout_cancel(pub) +#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub) +#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub) +#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd); +#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd); + +#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) +#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) + +#ifdef BCMPCIE_OOB_HOST_WAKE +#define OOB_WAKE_LOCK_TIMEOUT 500 +#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val) +#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub) +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK +#ifdef DHD_DEBUG_SCAN_WAKELOCK +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \ + do { \ + printf("call wake_lock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_lock_timeout(pub, val); \ + } while (0) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \ + do { \ + printf("call wake_unlock_scan: %s %d\n", \ + __FUNCTION__, __LINE__); \ + dhd_os_scan_wake_unlock(pub); \ + } while (0) +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub) +#endif /* DHD_DEBUG_SCAN_WAKELOCK */ +#else +#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) +#define DHD_OS_SCAN_WAKE_UNLOCK(pub) +#endif /* DHD_USE_SCAN_WAKELOCK */ +#define DHD_PACKET_TIMEOUT_MS 500 +#define DHD_EVENT_TIMEOUT_MS 1500 +#define SCAN_WAKE_LOCK_TIMEOUT 10000 + +/* Enum for IOCTL recieved status */ +typedef enum dhd_ioctl_recieved_status +{ + IOCTL_WAIT = 0, + IOCTL_RETURN_ON_SUCCESS, + IOCTL_RETURN_ON_TRAP, + IOCTL_RETURN_ON_BUS_STOP +} dhd_ioctl_recieved_status_t; + +/* interface operations (register, remove) should be atomic, use this lock to prevent race + * condition among wifi on/off and interface operation functions + */ +void dhd_net_if_lock(struct net_device *dev); +void dhd_net_if_unlock(struct net_device *dev); + + +typedef enum dhd_attach_states +{ + DHD_ATTACH_STATE_INIT = 0x0, + DHD_ATTACH_STATE_NET_ALLOC = 0x1, + DHD_ATTACH_STATE_DHD_ALLOC = 0x2, + DHD_ATTACH_STATE_ADD_IF = 0x4, + DHD_ATTACH_STATE_PROT_ATTACH = 0x8, + DHD_ATTACH_STATE_WL_ATTACH = 0x10, + DHD_ATTACH_STATE_THREADS_CREATED = 0x20, + DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40, + DHD_ATTACH_STATE_CFG80211 = 0x80, + DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100, + DHD_ATTACH_STATE_DONE = 0x200 +} dhd_attach_states_t; + +/* Value -1 means we are unsuccessful in creating the kthread. */ +#define DHD_PID_KT_INVALID -1 +/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */ +#define DHD_PID_KT_TL_INVALID -2 + +/* + * Exported from dhd OS modules (dhd_linux/dhd_ndis) + */ + +/* Indication from bus module regarding presence/insertion of dongle. + * Return dhd_pub_t pointer, used as handle to OS module in later calls. + * Returned structure should have bus and prot pointers filled in. + * bus_hdrlen specifies required headroom for bus module header. + */ +extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen); +#if defined(WLP2P) && defined(WL_CFG80211) +/* To allow attach/detach calls corresponding to p2p0 interface */ +extern int dhd_attach_p2p(dhd_pub_t *); +extern int dhd_detach_p2p(dhd_pub_t *); +#endif /* WLP2P && WL_CFG80211 */ +extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock); + +/* Indication from bus module regarding removal/absence of dongle */ +extern void dhd_detach(dhd_pub_t *dhdp); +extern void dhd_free(dhd_pub_t *dhdp); +extern void dhd_clear(dhd_pub_t *dhdp); + +/* Indication from bus module to change flow-control state */ +extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on); + +/* Store the status of a connection attempt for later retrieval by an iovar */ +extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason); + +extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec); + +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan); + +/* Return pointer to interface name */ +extern char *dhd_ifname(dhd_pub_t *dhdp, int idx); + +/* Request scheduling of the bus dpc */ +extern void dhd_sched_dpc(dhd_pub_t *dhdp); + +/* Notify tx completion */ +extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success); +extern void dhd_dpc_enable(dhd_pub_t *dhdp); + +#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */ +#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */ +#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */ +#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */ +#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */ +#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */ +#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */ +#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */ +#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */ +#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */ +#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */ +#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */ +#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */ +#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */ +#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */ +#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */ +#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */ + +#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3 + +extern int dhd_dev_get_feature_set(struct net_device *dev); +extern int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num); +#ifdef CUSTOM_FORCE_NODFS_FLAG +extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs); +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +/* OS independent layer functions */ +extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub); +extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub); +extern int dhd_os_proto_block(dhd_pub_t * pub); +extern int dhd_os_proto_unblock(dhd_pub_t * pub); +extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub); +extern unsigned int dhd_os_get_ioctl_resp_timeout(void); +extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec); +extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub); +extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub); +extern int dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason); + +#define DHD_OS_IOCTL_RESP_LOCK(x) +#define DHD_OS_IOCTL_RESP_UNLOCK(x) + + +extern int dhd_os_get_image_block(char * buf, int len, void * image); +extern void * dhd_os_open_image(char * filename); +extern void dhd_os_close_image(void * image); +extern void dhd_os_wd_timer(void *bus, uint wdtick); +#ifdef DHD_PCIE_RUNTIMEPM +extern void dhd_os_runtimepm_timer(void *bus, uint tick); +#endif /* DHD_PCIE_RUNTIMEPM */ +extern void dhd_os_sdlock(dhd_pub_t * pub); +extern void dhd_os_sdunlock(dhd_pub_t * pub); +extern void dhd_os_sdlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_txq(dhd_pub_t * pub); +extern void dhd_os_sdlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub); +#ifdef DHDTCPACK_SUPPRESS +extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub); +extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags); +#endif /* DHDTCPACK_SUPPRESS */ + +extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr); +extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff); +extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +extern void get_customized_country_code(void *adapter, char *country_iso_code, +wl_country_t *cspec, u32 flags); +#else +extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec); +#endif /* CUSTOM_COUNTRY_CODE */ +extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub); +extern void dhd_os_sdlock_eventq(dhd_pub_t * pub); +extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub); +extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret); +extern int dhd_os_send_hang_message(dhd_pub_t *dhdp); +extern void dhd_set_version_info(dhd_pub_t *pub, char *fw); +extern bool dhd_os_check_if_up(dhd_pub_t *pub); +extern int dhd_os_check_wakelock(dhd_pub_t *pub); +extern int dhd_os_check_wakelock_all(dhd_pub_t *pub); +extern int dhd_get_instance(dhd_pub_t *pub); +#ifdef CUSTOM_SET_CPUCORE +extern void dhd_set_cpucore(dhd_pub_t *dhd, int set); +#endif /* CUSTOM_SET_CPUCORE */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +#ifdef SUPPORT_AP_POWERSAVE +extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); +#endif + +#if defined(DHD_FW_COREDUMP) +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size); +#endif /* DHD_FW_COREDUMP */ + +#ifdef SUPPORT_AP_POWERSAVE +extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable); +#endif /* SUPPORT_AP_POWERSAVE */ + + +#ifdef PKT_FILTER_SUPPORT +#define DHD_UNICAST_FILTER_NUM 0 +#define DHD_BROADCAST_FILTER_NUM 1 +#define DHD_MULTICAST4_FILTER_NUM 2 +#define DHD_MULTICAST6_FILTER_NUM 3 +#define DHD_MDNS_FILTER_NUM 4 +#define DHD_ARP_FILTER_NUM 5 +extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val); +extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd); +extern int net_os_enable_packet_filter(struct net_device *dev, int val); +extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num); +#endif /* PKT_FILTER_SUPPORT */ + +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); +extern bool dhd_support_sta_mode(dhd_pub_t *dhd); + +extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size); + +typedef struct { + uint32 limit; /* Expiration time (usec) */ + uint32 increment; /* Current expiration increment (usec) */ + uint32 elapsed; /* Current elapsed time (usec) */ + uint32 tick; /* O/S tick time (usec) */ +} dhd_timeout_t; + +#ifdef SHOW_LOGTRACE +typedef struct { + int num_fmts; + char **fmts; + char *raw_fmts; + char *raw_sstr; + uint32 ramstart; + uint32 rodata_start; + uint32 rodata_end; + char *rom_raw_sstr; + uint32 rom_ramstart; + uint32 rom_rodata_start; + uint32 rom_rodata_end; +} dhd_event_log_t; +#endif /* SHOW_LOGTRACE */ + +extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec); +extern int dhd_timeout_expired(dhd_timeout_t *tmo); + +extern int dhd_ifname2idx(struct dhd_info *dhd, char *name); +extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net); +extern struct net_device * dhd_idx2net(void *pub, int ifidx); +extern int net_os_send_hang_message(struct net_device *dev); +extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num); +extern bool dhd_wowl_cap(void *bus); + +extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, size_t pktlen, + wl_event_msg_t *, void **data_ptr, void *); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern int wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr); + +extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len); +extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, + int ifindex); +extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx); +extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx); +extern void dhd_common_init(osl_t *osh); + +extern int dhd_do_driver_init(struct net_device *net); +extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent, + char *name, uint8 *mac); +extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name); +extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock); +extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name); +extern void dhd_vif_del(struct dhd_info *dhd, int ifidx); +extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx); +extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len); + +/* Send packet to dongle via data channel */ +extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt); + +/* send up locally generated event */ +extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +/* Send event to host */ +extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data); +#ifdef LOG_INTO_TCPDUMP +extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len); +#endif /* LOG_INTO_TCPDUMP */ +extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag); +extern uint dhd_bus_status(dhd_pub_t *dhdp); +extern int dhd_bus_start(dhd_pub_t *dhdp); +extern int dhd_bus_suspend(dhd_pub_t *dhdpub); +extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage); +extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size); +extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line); +extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval); +#if defined(BCMSDIO) || defined(BCMPCIE) +extern uint dhd_bus_chip_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp); +extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp); +#endif /* defined(BCMSDIO) || defined(BCMPCIE) */ + +#if defined(KEEP_ALIVE) +extern int dhd_keep_alive_onoff(dhd_pub_t *dhd); +#endif /* KEEP_ALIVE */ + +/* OS spin lock API */ +extern void *dhd_os_spin_lock_init(osl_t *osh); +extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock); +extern unsigned long dhd_os_spin_lock(void *lock); +void dhd_os_spin_unlock(void *lock, unsigned long flags); + +/* + * Manage sta objects in an interface. Interface is identified by an ifindex and + * sta(s) within an interfaces are managed using a MacAddress of the sta. + */ +struct dhd_sta; +extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea); +extern void dhd_del_sta(void *pub, int ifidx, void *ea); +extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val); +#if defined(BCM_GMAC3) +extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val); +#endif +extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx); +extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition); +extern int dhd_os_d3ack_wake(dhd_pub_t * pub); +extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition); +extern int dhd_os_busbusy_wake(dhd_pub_t * pub); + +extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd); +extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set); +typedef enum cust_gpio_modes { + WLAN_RESET_ON, + WLAN_RESET_OFF, + WLAN_POWER_ON, + WLAN_POWER_OFF +} cust_gpio_modes_t; + +extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag); +extern int wl_iw_send_priv_event(struct net_device *dev, char *flag); +/* + * Insmod parameters for debug/test + */ + +/* Watchdog timer interval */ +extern uint dhd_watchdog_ms; + +#ifdef DHD_PCIE_RUNTIMEPM +extern uint dhd_runtimepm_ms; +#endif /* DHD_PCIE_RUNTIMEPM */ + +#if defined(DHD_DEBUG) +/* Console output poll interval */ +extern uint dhd_console_ms; +extern uint wl_msg_level; +#endif /* defined(DHD_DEBUG) */ + +extern uint dhd_slpauto; + +/* Use interrupts */ +extern uint dhd_intr; + +/* Use polling */ +extern uint dhd_poll; + +/* ARP offload agent mode */ +extern uint dhd_arp_mode; + +/* ARP offload enable */ +extern uint dhd_arp_enable; + +/* Pkt filte enable control */ +extern uint dhd_pkt_filter_enable; + +/* Pkt filter init setup */ +extern uint dhd_pkt_filter_init; + +/* Pkt filter mode control */ +extern uint dhd_master_mode; + +/* Roaming mode control */ +extern uint dhd_roam_disable; + +/* Roaming mode control */ +extern uint dhd_radio_up; + +/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */ +extern int dhd_idletime; +#ifdef DHD_USE_IDLECOUNT +#define DHD_IDLETIME_TICKS 5 +#else +#define DHD_IDLETIME_TICKS 1 +#endif /* DHD_USE_IDLECOUNT */ + +/* SDIO Drive Strength */ +extern uint dhd_sdiod_drive_strength; + +/* triggers bcm_bprintf to print to kernel log */ +extern bool bcm_bprintf_bypass; + +/* Override to force tx queueing all the time */ +extern uint dhd_force_tx_queueing; +/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */ +#define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */ +#ifndef CUSTOM_KEEP_ALIVE_SETTING +#define CUSTOM_KEEP_ALIVE_SETTING DEFAULT_KEEP_ALIVE_VALUE +#endif /* DEFAULT_KEEP_ALIVE_VALUE */ + +#define NULL_PKT_STR "null_pkt" + +/* hooks for custom glom setting option via Makefile */ +#define DEFAULT_GLOM_VALUE -1 +#ifndef CUSTOM_GLOM_SETTING +#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE +#endif +#define WL_AUTO_ROAM_TRIGGER -75 +/* hooks for custom Roaming Trigger setting via Makefile */ +#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */ +#define DEFAULT_ROAM_TRIGGER_SETTING -1 +#ifndef CUSTOM_ROAM_TRIGGER_SETTING +#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE +#endif + +/* hooks for custom Roaming Romaing setting via Makefile */ +#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */ +#define DEFAULT_ROAM_DELTA_SETTING -1 +#ifndef CUSTOM_ROAM_DELTA_SETTING +#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE +#endif + +/* hooks for custom PNO Event wake lock to guarantee enough time + for the Platform to detect Event before system suspended +*/ +#define DEFAULT_PNO_EVENT_LOCK_xTIME 2 /* multiplay of DHD_PACKET_TIMEOUT_MS */ +#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME +#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME +#endif +/* hooks for custom dhd_dpc_prio setting option via Makefile */ +#define DEFAULT_DHP_DPC_PRIO 1 +#ifndef CUSTOM_DPC_PRIO_SETTING +#define CUSTOM_DPC_PRIO_SETTING DEFAULT_DHP_DPC_PRIO +#endif + +#ifndef CUSTOM_LISTEN_INTERVAL +#define CUSTOM_LISTEN_INTERVAL LISTEN_INTERVAL +#endif /* CUSTOM_LISTEN_INTERVAL */ + +#define DEFAULT_SUSPEND_BCN_LI_DTIM 3 +#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM +#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM +#endif + +#ifndef CUSTOM_RXF_PRIO_SETTING +#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1) +#endif + +#define DEFAULT_WIFI_TURNOFF_DELAY 0 +#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY + +#define DEFAULT_WIFI_TURNON_DELAY 200 +#ifndef WIFI_TURNON_DELAY +#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY +#endif /* WIFI_TURNON_DELAY */ + +#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */ +#ifndef CUSTOM_DHD_WATCHDOG_MS +#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS +#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */ + +#define DEFAULT_ASSOC_RETRY_MAX 3 +#ifndef CUSTOM_ASSOC_RETRY_MAX +#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX +#endif /* DEFAULT_ASSOC_RETRY_MAX */ + + +#ifdef WLTDLS +#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING +#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */ +#endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH +#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */ +#endif +#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW +#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */ +#endif +#endif /* WLTDLS */ + +#define DEFAULT_BCN_TIMEOUT 8 +#ifndef CUSTOM_BCN_TIMEOUT +#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT +#endif + +#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */ +#ifndef MAX_DTIM_ALLOWED_INTERVAL +#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */ +#endif +#define NO_DTIM_SKIP 1 +#ifdef SDTEST +/* Echo packet generator (SDIO), pkts/s */ +extern uint dhd_pktgen; + +/* Echo packet len (0 => sawtooth, max 1800) */ +extern uint dhd_pktgen_len; +#define MAX_PKTGEN_LEN 1800 +#endif + + +/* optionally set by a module_param_string() */ +#define MOD_PARAM_PATHLEN 2048 +#define MOD_PARAM_INFOLEN 512 + +#ifdef SOFTAP +extern char fw_path2[MOD_PARAM_PATHLEN]; +#endif + +/* Flag to indicate if we should download firmware on driver load */ +extern uint dhd_download_fw_on_driverload; +extern int allow_delay_fwdl; + + +extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar); +extern void dhd_wait_event_wakeup(dhd_pub_t*dhd); + +#define IFLOCK_INIT(lock) *lock = 0 +#define IFLOCK(lock) while (InterlockedCompareExchange((lock), 1, 0)) \ + NdisStallExecution(1); +#define IFUNLOCK(lock) InterlockedExchange((lock), 0) +#define IFLOCK_FREE(lock) +#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL)) +#ifdef ARP_OFFLOAD_SUPPORT +#define MAX_IPV4_ENTRIES 8 +void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode); +void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable); + +/* dhd_commn arp offload wrapers */ +void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx); +void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx); +int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx); +void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx); +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef WLTDLS +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac); +int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode); +#ifdef PCIE_FULL_DONGLE +void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr); +#endif /* PCIE_FULL_DONGLE */ +#endif /* WLTDLS */ +/* Neighbor Discovery Offload Support */ +extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable); +int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx); +int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx); +/* ioctl processing for nl80211 */ +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf); + +void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path); +void dhd_set_bus_state(void *bus, uint32 state); + +/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */ +typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ); +extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn); + +#ifdef PROP_TXSTATUS +int dhd_os_wlfc_block(dhd_pub_t *pub); +int dhd_os_wlfc_unblock(dhd_pub_t *pub); +extern const uint8 prio2fifo[]; +#endif /* PROP_TXSTATUS */ + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail); +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size); + +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) +#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE) +#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size) +#else +#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size) +#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size) +#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ + +#ifdef USE_WFA_CERT_CONF +enum { + SET_PARAM_BUS_TXGLOM_MODE, + SET_PARAM_ROAMOFF, +#ifdef USE_WL_FRAMEBURST + SET_PARAM_FRAMEBURST, +#endif /* USE_WL_FRAMEBURST */ +#ifdef USE_WL_TXBF + SET_PARAM_TXBF, +#endif /* USE_WL_TXBF */ +#ifdef PROP_TXSTATUS + SET_PARAM_PROPTX, + SET_PARAM_PROPTXMODE, +#endif /* PROP_TXSTATUS */ + PARAM_LAST_VALUE +}; +extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val); +#endif /* USE_WFA_CERT_CONF */ + +#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0) +#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0) + +extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub); +extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags); + +/** Miscellaenous DHD Spin Locks */ + +/* Disable router 3GMAC bypass path perimeter lock */ +#define DHD_PERIM_LOCK(dhdp) do {} while (0) +#define DHD_PERIM_UNLOCK(dhdp) do {} while (0) +#define DHD_PERIM_LOCK_ALL(processor_id) do {} while (0) +#define DHD_PERIM_UNLOCK_ALL(processor_id) do {} while (0) + +/* Enable DHD general spin lock/unlock */ +#define DHD_GENERAL_LOCK(dhdp, flags) \ + (flags) = dhd_os_general_spin_lock(dhdp) +#define DHD_GENERAL_UNLOCK(dhdp, flags) \ + dhd_os_general_spin_unlock((dhdp), (flags)) + +/* Enable DHD flowring spin lock/unlock */ +#define DHD_FLOWRING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring info spin lock/unlock */ +#define DHD_FLOWID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWID_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +/* Enable DHD common flowring list spin lock/unlock */ +#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock) +#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags)) + +extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp); + + +#ifdef DHD_L2_FILTER +extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val); +extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx); +extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val); +#endif /* DHD_L2_FILTER */ + +typedef struct wl_io_pport { + dhd_pub_t *dhd_pub; + uint ifidx; +} wl_io_pport_t; + +typedef struct wl_evt_pport { + dhd_pub_t *dhd_pub; + int *ifidx; + void *pktdata; + void **data_ptr; + void *raw_event; +} wl_evt_pport_t; + +extern void *dhd_pub_shim(dhd_pub_t *dhd_pub); +#ifdef DHD_FW_COREDUMP +void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length); +#endif /* DHD_FW_COREDUMP */ + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable); +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len); +void custom_rps_map_clear(struct netdev_rx_queue *queue); +#define PRIMARY_INF 0 +#define VIRTUAL_INF 1 +#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \ + defined(CONFIG_SOC_EXYNOS8890) +#define RPS_CPUS_MASK "10" +#define RPS_CPUS_MASK_P2P "10" +#define RPS_CPUS_MASK_IBSS "10" +#define RPS_CPUS_WLAN_CORE_ID 4 +#else +#define RPS_CPUS_MASK "6" +#define RPS_CPUS_MASK_P2P "6" +#define RPS_CPUS_MASK_IBSS "6" +#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */ +#endif + +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length); + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length); + +#define dhd_is_device_removed(x) FALSE +#define dhd_os_ind_firmware_stall(x) + +#ifdef DHD_FW_COREDUMP +extern void dhd_get_memdump_info(dhd_pub_t *dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG +extern void dhd_get_assert_info(dhd_pub_t *dhd); +#endif /* BCMASSERT_LOG */ + + +#if defined(DHD_LB_STATS) +#include +extern void dhd_lb_stats_init(dhd_pub_t *dhd); +extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count); +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp); +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp); +#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp) +/* Reset is called from common layer so it takes dhd_pub_t as argument */ +#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp) +#define DHD_LB_STATS_CLR(x) (x) = 0U +#define DHD_LB_STATS_INCR(x) (x) = (x) + 1 +#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c) +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \ + { \ + int cpu = get_cpu(); put_cpu(); \ + DHD_LB_STATS_INCR(x[cpu]); \ + } +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x) +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x) +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp) +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp) +#else /* !DHD_LB_STATS */ +#define DHD_LB_STATS_NOOP do { /* noop */ } while (0) +#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP +#endif /* !DHD_LB_STATS */ + +#ifdef DHD_PCIE_RUNTIMEPM +extern bool dhd_runtimepm_state(dhd_pub_t *dhd); +extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr); +extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr); +extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp); +extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp); +extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp); +/* Disable the Runtime PM and wake up if the bus is already in suspend */ +#define DHD_DISABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_disable(dhdp); \ +} while (0); + +/* Enable the Runtime PM */ +#define DHD_ENABLE_RUNTIME_PM(dhdp) \ +do { \ + dhd_runtime_pm_enable(dhdp); \ +} while (0); +#else +#define DHD_DISABLE_RUNTIME_PM(dhdp) +#define DHD_ENABLE_RUNTIME_PM(dhdp) +#endif /* DHD_PCIE_RUNTIMEPM */ + +extern void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs); + +/* + * Enable this macro if you want to track the calls to wake lock + * This records can be printed using the following command + * cat /sys/bcm-dhd/wklock_trace + * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#undef DHD_TRACE_WAKE_LOCK +#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */ + +#if defined(DHD_TRACE_WAKE_LOCK) +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif + +extern int dhd_prot_debug_info_print(dhd_pub_t *dhd); + +#ifdef ENABLE_TEMP_THROTTLING +#define TEMP_THROTTLE_CONTROL_BIT 0xf //Enable all feature. +#endif /* ENABLE_TEMP_THROTTLING */ + +#ifdef DHD_PKTID_AUDIT_ENABLED +void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +#endif /* _dhd_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.c b/drivers/net/wireless/bcmdhd/dhd_bta.c new file mode 100644 index 000000000000..dc24edbb5c30 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bta.c @@ -0,0 +1,340 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bta.c 514727 2014-11-12 03:02:48Z $ + */ +#error "WLBTAMP is not defined" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +#ifdef SEND_HCI_CMD_VIA_IOCTL +#define BTA_HCI_CMD_MAX_LEN HCI_CMD_PREAMBLE_SIZE + HCI_CMD_DATA_SIZE + +/* Send HCI cmd via wl iovar HCI_cmd to the dongle. */ +int +dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) +{ + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; + uint8 buf[BTA_HCI_CMD_MAX_LEN + 16]; + uint len = sizeof(buf); + wl_ioctl_t ioc; + + if (cmd_len < HCI_CMD_PREAMBLE_SIZE) + return BCME_BADLEN; + + if ((uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE > cmd_len) + return BCME_BADLEN; + + len = bcm_mkiovar("HCI_cmd", + (char *)cmd, (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE, (char *)buf, len); + + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = TRUE; + + return dhd_wl_ioctl(pub, &ioc, ioc.buf, ioc.len); +} +#else /* !SEND_HCI_CMD_VIA_IOCTL */ + +static void +dhd_bta_flush_hcidata(dhd_pub_t *pub, uint16 llh) +{ + int prec; + struct pktq *q; + uint count = 0; + + q = dhd_bus_txq(pub->bus); + if (q == NULL) + return; + + DHD_BTA(("dhd: flushing HCI ACL data for logical link %u...\n", llh)); + + dhd_os_sdlock_txq(pub); + + /* Walk through the txq and toss all HCI ACL data packets */ + PKTQ_PREC_ITER(q, prec) { + void *head_pkt = NULL; + + while (pktq_ppeek(q, prec) != head_pkt) { + void *pkt = pktq_pdeq(q, prec); + int ifidx; + + dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL); + + if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) { + struct ether_header *eh = + (struct ether_header *)PKTDATA(pub->osh, pkt); + + if (ntoh16(eh->ether_type) < ETHER_TYPE_MIN) { + struct dot11_llc_snap_header *lsh = + (struct dot11_llc_snap_header *)&eh[1]; + + if (bcmp(lsh, BT_SIG_SNAP_MPROT, + DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && + ntoh16(lsh->type) == BTA_PROT_L2CAP) { + amp_hci_ACL_data_t *ACL_data = + (amp_hci_ACL_data_t *)&lsh[1]; + uint16 handle = ltoh16(ACL_data->handle); + + if (HCI_ACL_DATA_HANDLE(handle) == llh) { + PKTFREE(pub->osh, pkt, TRUE); + count ++; + continue; + } + } + } + } + + dhd_prot_hdrpush(pub, ifidx, pkt); + + if (head_pkt == NULL) + head_pkt = pkt; + pktq_penq(q, prec, pkt); + } + } + + dhd_os_sdunlock_txq(pub); + + DHD_BTA(("dhd: flushed %u packet(s) for logical link %u...\n", count, llh)); +} + +/* Handle HCI cmd locally. + * Return 0: continue to send the cmd across SDIO + * < 0: stop, fail + * > 0: stop, succuess + */ +static int +_dhd_bta_docmd(dhd_pub_t *pub, amp_hci_cmd_t *cmd) +{ + int status = 0; + + switch (ltoh16_ua((uint8 *)&cmd->opcode)) { + case HCI_Enhanced_Flush: { + eflush_cmd_parms_t *cmdparms = (eflush_cmd_parms_t *)cmd->parms; + dhd_bta_flush_hcidata(pub, ltoh16_ua(cmdparms->llh)); + break; + } + default: + break; + } + + return status; +} + +/* Send HCI cmd encapsulated in BT-SIG frame via data channel to the dongle. */ +int +dhd_bta_docmd(dhd_pub_t *pub, void *cmd_buf, uint cmd_len) +{ + amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)cmd_buf; + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + osl_t *osh = pub->osh; + uint len; + void *p; + int status; + + if (cmd_len < HCI_CMD_PREAMBLE_SIZE) { + DHD_ERROR(("dhd_bta_docmd: short command, cmd_len %u\n", cmd_len)); + return BCME_BADLEN; + } + + if ((len = (uint)cmd->plen + HCI_CMD_PREAMBLE_SIZE) > cmd_len) { + DHD_ERROR(("dhd_bta_docmd: malformed command, len %u cmd_len %u\n", + len, cmd_len)); + /* return BCME_BADLEN; */ + } + + p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); + if (p == NULL) { + DHD_ERROR(("dhd_bta_docmd: out of memory\n")); + return BCME_NOMEM; + } + + + /* intercept and handle the HCI cmd locally */ + if ((status = _dhd_bta_docmd(pub, cmd)) > 0) + return 0; + else if (status < 0) + return status; + + /* copy in HCI cmd */ + PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); + bcopy(cmd, PKTDATA(osh, p), len); + + /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ + PKTPUSH(osh, p, RFC1042_HDR_LEN); + eh = (struct ether_header *)PKTDATA(osh, p); + bzero(eh->ether_dhost, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(eh->ether_dhost); + bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); + eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); + lsh->type = 0; + + return dhd_sendpkt(pub, 0, p); +} +#endif /* !SEND_HCI_CMD_VIA_IOCTL */ + +/* Send HCI ACL data to dongle via data channel */ +int +dhd_bta_tx_hcidata(dhd_pub_t *pub, void *data_buf, uint data_len) +{ + amp_hci_ACL_data_t *data = (amp_hci_ACL_data_t *)data_buf; + struct ether_header *eh; + struct dot11_llc_snap_header *lsh; + osl_t *osh = pub->osh; + uint len; + void *p; + + if (data_len < HCI_ACL_DATA_PREAMBLE_SIZE) { + DHD_ERROR(("dhd_bta_tx_hcidata: short data_buf, data_len %u\n", data_len)); + return BCME_BADLEN; + } + + if ((len = (uint)ltoh16(data->dlen) + HCI_ACL_DATA_PREAMBLE_SIZE) > data_len) { + DHD_ERROR(("dhd_bta_tx_hcidata: malformed hci data, len %u data_len %u\n", + len, data_len)); + /* return BCME_BADLEN; */ + } + + p = PKTGET(osh, pub->hdrlen + RFC1042_HDR_LEN + len, TRUE); + if (p == NULL) { + DHD_ERROR(("dhd_bta_tx_hcidata: out of memory\n")); + return BCME_NOMEM; + } + + + /* copy in HCI ACL data header and HCI ACL data */ + PKTPULL(osh, p, pub->hdrlen + RFC1042_HDR_LEN); + bcopy(data, PKTDATA(osh, p), len); + + /* copy in partial Ethernet header with BT-SIG LLC/SNAP header */ + PKTPUSH(osh, p, RFC1042_HDR_LEN); + eh = (struct ether_header *)PKTDATA(osh, p); + bzero(eh->ether_dhost, ETHER_ADDR_LEN); + bcopy(&pub->mac, eh->ether_shost, ETHER_ADDR_LEN); + eh->ether_type = hton16(len + DOT11_LLC_SNAP_HDR_LEN); + lsh = (struct dot11_llc_snap_header *)&eh[1]; + bcopy(BT_SIG_SNAP_MPROT, lsh, DOT11_LLC_SNAP_HDR_LEN - 2); + lsh->type = HTON16(BTA_PROT_L2CAP); + + return dhd_sendpkt(pub, 0, p); +} + +/* txcomplete callback */ +void +dhd_bta_tx_hcidata_complete(dhd_pub_t *dhdp, void *txp, bool success) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, txp); + amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)(pktdata + RFC1042_HDR_LEN); + uint16 handle = ltoh16(ACL_data->handle); + uint16 llh = HCI_ACL_DATA_HANDLE(handle); + + wl_event_msg_t event; + uint8 data[HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t)]; + amp_hci_event_t *evt; + num_completed_data_blocks_evt_parms_t *parms; + + uint16 len = HCI_EVT_PREAMBLE_SIZE + sizeof(num_completed_data_blocks_evt_parms_t); + + /* update the event struct */ + memset(&event, 0, sizeof(event)); + event.version = hton16(BCM_EVENT_MSG_VERSION); + event.event_type = hton32(WLC_E_BTA_HCI_EVENT); + event.status = 0; + event.reason = 0; + event.auth_type = 0; + event.datalen = hton32(len); + event.flags = 0; + + /* generate Number of Completed Blocks event */ + evt = (amp_hci_event_t *)data; + evt->ecode = HCI_Number_of_Completed_Data_Blocks; + evt->plen = sizeof(num_completed_data_blocks_evt_parms_t); + + parms = (num_completed_data_blocks_evt_parms_t *)evt->parms; + htol16_ua_store(dhdp->maxdatablks, (uint8 *)&parms->num_blocks); + parms->num_handles = 1; + htol16_ua_store(llh, (uint8 *)&parms->completed[0].handle); + parms->completed[0].pkts = 1; + parms->completed[0].blocks = 1; + + dhd_sendup_event_common(dhdp, &event, data); +} + +/* event callback */ +void +dhd_bta_doevt(dhd_pub_t *dhdp, void *data_buf, uint data_len) +{ + amp_hci_event_t *evt = (amp_hci_event_t *)data_buf; + + ASSERT(dhdp); + ASSERT(evt); + + switch (evt->ecode) { + case HCI_Command_Complete: { + cmd_complete_parms_t *parms = (cmd_complete_parms_t *)evt->parms; + switch (ltoh16_ua((uint8 *)&parms->opcode)) { + case HCI_Read_Data_Block_Size: { + read_data_block_size_evt_parms_t *parms2 = + (read_data_block_size_evt_parms_t *)parms->parms; + dhdp->maxdatablks = ltoh16_ua((uint8 *)&parms2->data_block_num); + break; + } + } + break; + } + + case HCI_Flush_Occurred: { + flush_occurred_evt_parms_t *evt_parms = (flush_occurred_evt_parms_t *)evt->parms; + dhd_bta_flush_hcidata(dhdp, ltoh16_ua((uint8 *)&evt_parms->handle)); + break; + } + default: + break; + } +} diff --git a/drivers/net/wireless/bcmdhd/dhd_bta.h b/drivers/net/wireless/bcmdhd/dhd_bta.h new file mode 100644 index 000000000000..df9d1f91b9ce --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bta.h @@ -0,0 +1,42 @@ +/* + * BT-AMP support routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bta.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef __dhd_bta_h__ +#define __dhd_bta_h__ + +struct dhd_pub; + +extern int dhd_bta_docmd(struct dhd_pub *pub, void *cmd_buf, uint cmd_len); + +extern void dhd_bta_doevt(struct dhd_pub *pub, void *data_buf, uint data_len); + +extern int dhd_bta_tx_hcidata(struct dhd_pub *pub, void *data_buf, uint data_len); +extern void dhd_bta_tx_hcidata_complete(struct dhd_pub *dhdp, void *txp, bool success); + + +#endif /* __dhd_bta_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_bus.h b/drivers/net/wireless/bcmdhd/dhd_bus.h new file mode 100644 index 000000000000..05ac0345cea7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_bus.h @@ -0,0 +1,213 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_bus.h 602721 2015-11-27 10:32:48Z $ + */ + +#ifndef _dhd_bus_h_ +#define _dhd_bus_h_ + +/* + * Exported from dhd bus module (dhd_usb, dhd_sdio) + */ + +/* Indicate (dis)interest in finding dongles. */ +extern int dhd_bus_register(void); +extern void dhd_bus_unregister(void); + +/* Download firmware image and nvram image */ +extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, char *fw_path, char *nv_path); + +/* Stop bus module: clear pending frames, disable data flow */ +extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex); + +/* Initialize bus module: prepare for communication w/dongle */ +extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex); + +/* Get the Bus Idle Time */ +extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int *idletime); + +/* Set the Bus Idle Time */ +extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time); + +/* Send a data frame to the dongle. Callee disposes of txp. */ +#ifdef BCMPCIE +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx); +#else +extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp); +#endif + + +/* Send/receive a control message to/from the dongle. + * Expects caller to enforce a single outstanding transaction. + */ +extern int dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen); +extern int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen); + +/* Watchdog timer function */ +extern bool dhd_bus_watchdog(dhd_pub_t *dhd); + +extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp); +extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable); +extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub); +extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub); +extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub); + +#if defined(DHD_DEBUG) +/* Device console input function */ +extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen); +#endif /* defined(DHD_DEBUG) */ + +/* Deferred processing for the bus, return TRUE requests reschedule */ +extern bool dhd_bus_dpc(struct dhd_bus *bus); +extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg); + + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add bus dump output to a buffer */ +extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Clear any bus counters */ +extern void dhd_bus_clearcounts(dhd_pub_t *dhdp); + +/* return the dongle chipid */ +extern uint dhd_bus_chip(struct dhd_bus *bus); + +/* return the dongle chiprev */ +extern uint dhd_bus_chiprev(struct dhd_bus *bus); + +/* Set user-specified nvram parameters. */ +extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params); + +extern void *dhd_bus_pub(struct dhd_bus *bus); +extern void *dhd_bus_txq(struct dhd_bus *bus); +extern void *dhd_bus_sih(struct dhd_bus *bus); +extern uint dhd_bus_hdrlen(struct dhd_bus *bus); +#ifdef BCMSDIO +extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val); +/* return sdio io status */ +extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus); +#else +#define dhd_bus_set_dotxinrx(a, b) do {} while (0) +#endif + +#define DHD_SET_BUS_STATE_DOWN(_bus) do { \ + (_bus)->dhd->busstate = DHD_BUS_DOWN; \ +} while (0) + +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +extern int dhd_bus_reg_sdio_notify(void* semaphore); +extern void dhd_bus_unreg_sdio_notify(void); +extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable); +extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, + uint32 *slot_num); + +#ifdef BCMPCIE +enum { + /* Scratch buffer confiuguration update */ + D2H_DMA_SCRATCH_BUF, + D2H_DMA_SCRATCH_BUF_LEN, + + /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */ + H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */ + H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */ + D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */ + D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */ + + /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */ + H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */ + H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */ + D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */ + D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */ + + /* H2D and D2H Mailbox data update */ + H2D_MB_DATA, + D2H_MB_DATA, + + /* (Common) MsgBuf Ring configuration update */ + RING_BUF_ADDR, /* update ring base address to dongle */ + RING_ITEM_LEN, /* update ring item size to dongle */ + RING_MAX_ITEMS, /* update ring max items to dongle */ + + /* Update of WR or RD index, for a MsgBuf Ring */ + RING_RD_UPD, /* update ring read index from/to dongle */ + RING_WR_UPD, /* update ring write index from/to dongle */ + + TOTAL_LFRAG_PACKET_CNT, + MAX_HOST_RXBUFS +}; + +typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32); +extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type, + uint16 ringid); +extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value); +extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid); +extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus); +extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count); +extern void dhd_bus_start_queue(struct dhd_bus *bus); +extern void dhd_bus_stop_queue(struct dhd_bus *bus); + +extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus); +extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus, + void * data, uint16 flowid); +extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus, + void * data, uint8 flowid); +extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status); +extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node); +extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status); +extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus); +extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs); +extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val); + + +extern int dhdpcie_bus_clock_start(struct dhd_bus *bus); +extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus); +extern int dhdpcie_bus_enable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_disable_device(struct dhd_bus *bus); +extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus); +extern void dhdpcie_bus_free_resource(struct dhd_bus *bus); +extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus); +extern int dhd_bus_release_dongle(struct dhd_bus *bus); +extern int dhd_bus_request_irq(struct dhd_bus *bus); + + +#ifdef DHD_FW_COREDUMP +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#endif /* DHD_FW_COREDUMP */ + +#endif /* BCMPCIE */ +#endif /* _dhd_bus_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_buzzz.h b/drivers/net/wireless/bcmdhd/dhd_buzzz.h new file mode 100644 index 000000000000..a5422d58d7ef --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_buzzz.h @@ -0,0 +1,37 @@ +#ifndef _DHD_BUZZZ_H_INCLUDED_ +#define _DHD_BUZZZ_H_INCLUDED_ + +/* + * Broadcom logging system - Empty implementaiton + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_buzzz.h 591283 2015-10-07 11:52:00Z $ + */ + +#define dhd_buzzz_attach() do { /* noop */ } while (0) +#define dhd_buzzz_detach() do { /* noop */ } while (0) +#define dhd_buzzz_panic(x) do { /* noop */ } while (0) +#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0) + +#endif /* _DHD_BUZZZ_H_INCLUDED_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cdc.c b/drivers/net/wireless/bcmdhd/dhd_cdc.c new file mode 100644 index 000000000000..66a311069d09 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cdc.c @@ -0,0 +1,817 @@ +/* + * DHD Protocol Module for CDC and BDC. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cdc.c 596022 2015-10-29 11:02:47Z $ + * + * BDC is like CDC, except it includes a header for data packets to convey + * packet priority over the bus, and flags (e.g. to indicate checksum status + * for dongle offload.) + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + + +#ifdef PROP_TXSTATUS +#include +#include +#endif + + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ +#define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE + * defined in dhd_sdio.c (amount of header tha might be added) + * plus any space that might be needed for alignment padding. + */ +#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for + * round off at the end of buffer + */ + +typedef struct dhd_prot { + uint16 reqid; + uint8 pending; + uint32 lastcmd; + uint8 bus_header[BUS_HEADER_LEN]; + cdc_ioctl_t msg; + unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN]; +} dhd_prot_t; + + +static int +dhdcdc_msg(dhd_pub_t *dhd) +{ + int err = 0; + dhd_prot_t *prot = dhd->prot; + int len = ltoh32(prot->msg.len) + sizeof(cdc_ioctl_t); + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_OS_WAKE_LOCK(dhd); + + /* NOTE : cdc->msg.len holds the desired length of the buffer to be + * returned. Only up to CDC_MAX_MSG_SIZE of this buffer area + * is actually sent to the dongle + */ + if (len > CDC_MAX_MSG_SIZE) + len = CDC_MAX_MSG_SIZE; + + /* Send request */ + err = dhd_bus_txctl(dhd->bus, (uchar*)&prot->msg, len); + + DHD_OS_WAKE_UNLOCK(dhd); + return err; +} + +static int +dhdcdc_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len) +{ + int ret; + int cdc_len = len + sizeof(cdc_ioctl_t); + dhd_prot_t *prot = dhd->prot; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + do { + ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len); + if (ret < 0) + break; + } while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id); + + return ret; +} + +static int +dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0, retries = 0; + uint32 id, flags = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT); + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + if (!dhd->hang_was_sent) + DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status %d\n", ret)); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if ((id < prot->reqid) && (++retries < RETRIES)) + goto retry; + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Copy info buffer */ + if (buf) + { + if (ret < (int)len) + len = ret; + memcpy(buf, (void*) prot->buf, len); + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + + +static int +dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + dhd_prot_t *prot = dhd->prot; + cdc_ioctl_t *msg = &prot->msg; + int ret = 0; + uint32 flags, id; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + if (cmd == WLC_SET_PM) { + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + } + + memset(msg, 0, sizeof(cdc_ioctl_t)); + + msg->cmd = htol32(cmd); + msg->len = htol32(len); + msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT); + CDC_SET_IF_IDX(msg, ifidx); + /* add additional action bits */ + action &= WL_IOCTL_ACTION_MASK; + msg->flags |= (action << CDCF_IOC_ACTION_SHIFT) | CDCF_IOC_SET; + msg->flags = htol32(msg->flags); + + if (buf) + memcpy(prot->buf, buf, len); + + if ((ret = dhdcdc_msg(dhd)) < 0) { + DHD_ERROR(("%s: dhdcdc_msg failed w/status %d\n", __FUNCTION__, ret)); + goto done; + } + + if ((ret = dhdcdc_cmplt(dhd, prot->reqid, len)) < 0) + goto done; + + flags = ltoh32(msg->flags); + id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT; + + if (id != prot->reqid) { + DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n", + dhd_ifname(dhd, ifidx), __FUNCTION__, id, prot->reqid)); + ret = -EINVAL; + goto done; + } + + /* Check the ERROR flag */ + if (flags & CDCF_IOC_ERROR) + { + ret = ltoh32(msg->status); + /* Cache error from dongle */ + dhd->dongle_error = ret; + } + +done: + return ret; +} + + +int +dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + dhd_prot_t *prot = dhd->prot; + int ret = -1; + uint8 action; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) + goto done; + + if (prot->pending == TRUE) { + DHD_ERROR(("CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", + ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, + (unsigned long)prot->lastcmd)); + if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { + DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); + } + goto done; + } + + prot->pending = TRUE; + prot->lastcmd = ioc->cmd; + action = ioc->set; + if (action & WL_IOCTL_ACTION_SET) + ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + else { + ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) + ioc->used = ret - sizeof(cdc_ioctl_t); + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) + ret = 0; + else { + cdc_ioctl_t *msg = &prot->msg; + ioc->needed = ltoh32(msg->len); /* len == needed when set/query fails from dongle */ + } + + /* Intercept the wme_dp ioctl here */ + if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) + bcopy(((char *)buf + slen), &val, sizeof(int)); + dhd->wme_dp = (uint8) ltoh32(val); + } + + prot->pending = FALSE; + +done: + + return ret; +} + +int +dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +void +dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + if (!dhdp || !dhdp->prot) + return; + bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid); +#ifdef PROP_TXSTATUS + dhd_wlfc_dump(dhdp, strbuf); +#endif +} + +/* The FreeBSD PKTPUSH could change the packet buf pinter + so we need to make it changable +*/ +#define PKTBUF pktbuf +void +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ +#ifdef BDC + struct bdc_header *h; +#endif /* BDC */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + /* Push BDC header used to convey priority for buses that don't */ + + PKTPUSH(dhd->osh, PKTBUF, BDC_HEADER_LEN); + + h = (struct bdc_header *)PKTDATA(dhd->osh, PKTBUF); + + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(PKTBUF)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = 0; +#endif /* BDC */ + BDC_SET_IF_IDX(h, ifidx); +} +#undef PKTBUF /* Only defined in the above routine */ + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + uint hdrlen = 0; +#ifdef BDC + /* Length of BDC(+WLFC) headers pushed */ + hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4); +#endif + return hdrlen; +} + +int +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info, + uint *reorder_info_len) +{ +#ifdef BDC + struct bdc_header *h; +#endif + uint8 data_offset = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BDC + if (reorder_info_len) + *reorder_info_len = 0; + /* Pop BDC header used to convey priority for buses that don't */ + + if (PKTLEN(dhd->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(dhd->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + + h = (struct bdc_header *)PKTDATA(dhd->osh, pktbuf); + + if (!ifidx) { + /* for tx packet, skip the analysis */ + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); + goto exit; + } + + *ifidx = BDC_GET_IF_IDX(h); + + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) { + DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) == BDC_PROTO_VER_1) + h->dataOffset = 0; + else + return BCME_ERROR; + } + + if (h->flags & BDC_FLAG_SUM_GOOD) { + DHD_INFO(("%s: BDC packet received with good rx-csum, flags 0x%x\n", + dhd_ifname(dhd, *ifidx), h->flags)); + PKTSETSUMGOOD(pktbuf, TRUE); + } + + PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK)); + data_offset = h->dataOffset; + PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN); +#endif /* BDC */ + + +#ifdef PROP_TXSTATUS + if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) { + /* + - parse txstatus only for packets that came from the firmware + */ + dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2), + reorder_buf_info, reorder_info_len); + + } +#endif /* PROP_TXSTATUS */ + +exit: + PKTPULL(dhd->osh, pktbuf, (data_offset << 2)); + return 0; +} + + +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + dhd_prot_t *cdc; + + if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(cdc, 0, sizeof(dhd_prot_t)); + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((uintptr)(&cdc->msg + 1) != (uintptr)cdc->buf) { + DHD_ERROR(("dhd_prot_t is not correctly defined\n")); + goto fail; + } + + dhd->prot = cdc; +#ifdef BDC + dhd->hdrlen += BDC_HEADER_LEN; +#endif + dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN; + return 0; + +fail: + if (cdc != NULL) + DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t)); + return BCME_NOMEM; +} + +/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ +#ifdef PROP_TXSTATUS + dhd_wlfc_deinit(dhd); +#endif + DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t)); + dhd->prot = NULL; +} + +void +dhd_prot_dstats(dhd_pub_t *dhd) +{ + /* copy bus stats */ + + dhd->dstats.tx_packets = dhd->tx_packets; + dhd->dstats.tx_errors = dhd->tx_errors; + dhd->dstats.rx_packets = dhd->rx_packets; + dhd->dstats.rx_errors = dhd->rx_errors; + dhd->dstats.rx_dropped = dhd->rx_dropped; + dhd->dstats.multicast = dhd->rx_multicast; + return; +} + +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) + goto done; + + + dhd_process_cid_mac(dhd, TRUE); + + ret = dhd_preinit_ioctls(dhd); + + if (!ret) + dhd_process_cid_mac(dhd, FALSE); + + /* Always assumes wl for now */ + dhd->iswl = TRUE; + +done: + return ret; +} + +int dhd_prot_init(dhd_pub_t *dhd) +{ + return BCME_OK; +} + +void +dhd_prot_stop(dhd_pub_t *dhd) +{ +/* Nothing to do for CDC */ +} + + +static void +dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt, + uint32 *pkt_count, void **pplast, uint8 start, uint8 end) +{ + void *plast = NULL, *p; + uint32 pkt_cnt = 0; + + if (ptr->pend_pkts == 0) { + DHD_REORDER(("%s: no packets in reorder queue \n", __FUNCTION__)); + *pplast = NULL; + *pkt_count = 0; + *pkt = NULL; + return; + } + do { + p = (void *)(ptr->p[start]); + ptr->p[start] = NULL; + + if (p != NULL) { + if (plast == NULL) + *pkt = p; + else + PKTSETNEXT(osh, plast, p); + + plast = p; + pkt_cnt++; + } + start++; + if (start > ptr->max_idx) + start = 0; + } while (start != end); + *pplast = plast; + *pkt_count = pkt_cnt; + ptr->pend_pkts -= (uint8)pkt_cnt; +} + +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count) +{ + uint8 flow_id, max_idx, cur_idx, exp_idx; + struct reorder_info *ptr; + uint8 flags; + void *cur_pkt, *plast = NULL; + uint32 cnt = 0; + + if (pkt == NULL) { + if (pkt_count != NULL) + *pkt_count = 0; + return 0; + } + + flow_id = reorder_info_buf[WLHOST_REORDERDATA_FLOWID_OFFSET]; + flags = reorder_info_buf[WLHOST_REORDERDATA_FLAGS_OFFSET]; + + DHD_REORDER(("flow_id %d, flags 0x%02x, idx(%d, %d, %d)\n", flow_id, flags, + reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET], + reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET])); + + /* validate flags and flow id */ + if (flags == 0xFF) { + DHD_ERROR(("%s: invalid flags...so ignore this packet\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + + cur_pkt = *pkt; + *pkt = NULL; + + ptr = dhd->reorder_bufs[flow_id]; + if (flags & WLHOST_REORDERDATA_DEL_FLOW) { + uint32 buf_size = sizeof(struct reorder_info); + + DHD_REORDER(("%s: Flags indicating to delete a flow id %d\n", + __FUNCTION__, flow_id)); + + if (ptr == NULL) { + DHD_REORDER(("%s: received flags to cleanup, but no flow (%d) yet\n", + __FUNCTION__, flow_id)); + *pkt_count = 1; + *pkt = cur_pkt; + return 0; + } + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + /* set it to the last packet */ + if (plast) { + PKTSETNEXT(dhd->osh, plast, cur_pkt); + cnt++; + } + else { + if (cnt != 0) { + DHD_ERROR(("%s: del flow: something fishy, pending packets %d\n", + __FUNCTION__, cnt)); + } + *pkt = cur_pkt; + cnt = 1; + } + buf_size += ((ptr->max_idx + 1) * sizeof(void *)); + MFREE(dhd->osh, ptr, buf_size); + dhd->reorder_bufs[flow_id] = NULL; + *pkt_count = cnt; + return 0; + } + /* all the other cases depend on the existance of the reorder struct for that flow id */ + if (ptr == NULL) { + uint32 buf_size_alloc = sizeof(reorder_info_t); + max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + + buf_size_alloc += ((max_idx + 1) * sizeof(void*)); + /* allocate space to hold the buffers, index etc */ + + DHD_REORDER(("%s: alloc buffer of size %d size, reorder info id %d, maxidx %d\n", + __FUNCTION__, buf_size_alloc, flow_id, max_idx)); + ptr = (struct reorder_info *)MALLOC(dhd->osh, buf_size_alloc); + if (ptr == NULL) { + DHD_ERROR(("%s: Malloc failed to alloc buffer\n", __FUNCTION__)); + *pkt_count = 1; + return 0; + } + bzero(ptr, buf_size_alloc); + dhd->reorder_bufs[flow_id] = ptr; + ptr->p = (void *)(ptr+1); + ptr->max_idx = max_idx; + } + if (flags & WLHOST_REORDERDATA_NEW_HOLE) { + DHD_REORDER(("%s: new hole, so cleanup pending buffers\n", __FUNCTION__)); + if (ptr->pend_pkts) { + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, ptr->exp_idx); + ptr->pend_pkts = 0; + } + ptr->cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + ptr->exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + ptr->max_idx = reorder_info_buf[WLHOST_REORDERDATA_MAXIDX_OFFSET]; + ptr->p[ptr->cur_idx] = cur_pkt; + ptr->pend_pkts++; + *pkt_count = cnt; + } + else if (flags & WLHOST_REORDERDATA_CURIDX_VALID) { + cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET]; + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + + if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: HOLE: ERROR buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + ptr->cur_idx = cur_idx; + DHD_REORDER(("%s: fill up a hole..pending packets is %d\n", + __FUNCTION__, ptr->pend_pkts)); + *pkt_count = 0; + *pkt = NULL; + } + else if (ptr->exp_idx == cur_idx) { + /* got the right one ..flush from cur to exp and update exp */ + DHD_REORDER(("%s: got the right one now, cur_idx is %d\n", + __FUNCTION__, cur_idx)); + if (ptr->p[cur_idx] != NULL) { + DHD_REORDER(("%s: Error buffer pending..free it\n", + __FUNCTION__)); + PKTFREE(dhd->osh, ptr->p[cur_idx], TRUE); + ptr->p[cur_idx] = NULL; + } + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + + ptr->cur_idx = cur_idx; + ptr->exp_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + cur_idx, exp_idx); + *pkt_count = cnt; + DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n", + __FUNCTION__, cnt, ptr->pend_pkts)); + } + else { + uint8 end_idx; + bool flush_current = FALSE; + /* both cur and exp are moved now .. */ + DHD_REORDER(("%s:, flow %d, both moved, cur %d(%d), exp %d(%d)\n", + __FUNCTION__, flow_id, ptr->cur_idx, cur_idx, + ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, + ptr->exp_idx, end_idx); + + if (cur_idx == ptr->max_idx) { + if (exp_idx == 0) + flush_current = TRUE; + } else { + if (exp_idx == cur_idx + 1) + flush_current = TRUE; + } + if (flush_current) { + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + } + else { + ptr->p[cur_idx] = cur_pkt; + ptr->pend_pkts++; + } + ptr->exp_idx = exp_idx; + ptr->cur_idx = cur_idx; + *pkt_count = cnt; + } + } + else { + uint8 end_idx; + /* no real packet but update to exp_seq...that means explicit window move */ + exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET]; + + DHD_REORDER(("%s: move the window, cur_idx is %d, exp is %d, new exp is %d\n", + __FUNCTION__, ptr->cur_idx, ptr->exp_idx, exp_idx)); + if (flags & WLHOST_REORDERDATA_FLUSH_ALL) + end_idx = ptr->exp_idx; + else + end_idx = exp_idx; + + dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx); + if (plast) + PKTSETNEXT(dhd->osh, plast, cur_pkt); + else + *pkt = cur_pkt; + cnt++; + *pkt_count = cnt; + /* set the new expected idx */ + ptr->exp_idx = exp_idx; + } + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c new file mode 100644 index 000000000000..390747fe101d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c @@ -0,0 +1,268 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include + +#include +#include +#include +#include + +#ifdef PKT_FILTER_SUPPORT +#include +#include +#endif + +extern struct bcm_cfg80211 *g_bcm_cfg; + +#ifdef PKT_FILTER_SUPPORT +extern uint dhd_pkt_filter_enable; +extern uint dhd_master_mode; +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +static int dhd_dongle_up = FALSE; + +#include +#include +#include +#include +#include +#include + +static s32 wl_dongle_up(struct net_device *ndev); +static s32 wl_dongle_down(struct net_device *ndev); + +/** + * Function implementations + */ + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg) +{ + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (!dhd_dongle_up) { + WL_ERR(("Dongle is already down\n")); + return err; + } + + ndev = bcmcfg_to_prmry_ndev(cfg); + wl_dongle_down(ndev); + dhd_dongle_up = FALSE; + return 0; +} + +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode |= val; + WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode)); +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arp_version == 1) { + /* IF P2P is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, false); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE); + WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode)); + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->arp_version == 1) { + /* IF P2P is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, true); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + return 0; +} + +struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, + uint8 *mac, uint8 bssidx, char *dngl_name) +{ + return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name); +} + +int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) +{ + return dhd_register_if(cfg->pub, ifidx, FALSE); +} + +int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev) +{ + return dhd_remove_if(cfg->pub, ifidx, FALSE); +} + +struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev) +{ + if (ndev) { + if (ndev->ieee80211_ptr) { + kfree(ndev->ieee80211_ptr); + ndev->ieee80211_ptr = NULL; + } + free_netdev(ndev); + return NULL; + } + + return ndev; +} + +void dhd_netdev_free(struct net_device *ndev) +{ +#ifdef WL_CFG80211 + ndev = dhd_cfg80211_netdev_free(ndev); +#endif + if (ndev) + free_netdev(ndev); +} + +static s32 +wl_dongle_up(struct net_device *ndev) +{ + s32 err = 0; + u32 up = 0; + + err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + } + return err; +} + +static s32 +wl_dongle_down(struct net_device *ndev) +{ + s32 err = 0; + u32 down = 0; + + err = wldev_ioctl(ndev, WLC_DOWN, &down, sizeof(down), true); + if (unlikely(err)) { + WL_ERR(("WLC_DOWN error (%d)\n", err)); + } + return err; +} + + +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg) +{ +#ifndef DHD_SDALIGN +#define DHD_SDALIGN 32 +#endif + struct net_device *ndev; + s32 err = 0; + + WL_TRACE(("In\n")); + if (dhd_dongle_up) { + WL_ERR(("Dongle is already up\n")); + return err; + } + + ndev = bcmcfg_to_prmry_ndev(cfg); + + err = wl_dongle_up(ndev); + if (unlikely(err)) { + WL_ERR(("wl_dongle_up failed\n")); + goto default_conf_out; + } + dhd_dongle_up = true; + +default_conf_out: + + return err; + +} + +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, + const struct bcm_nlmsg_hdr *nlioc, void *buf) +{ + struct net_device *ndev = NULL; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0 }; + int ret = 0; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + + dhd = cfg->pub; + DHD_OS_WAKE_LOCK(dhd); + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->hang_was_sent) { + WL_ERR(("HANG was sent up earlier\n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); + DHD_OS_WAKE_UNLOCK(dhd); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + +done: + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.h b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h new file mode 100644 index 000000000000..cae7cc9b247b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.h @@ -0,0 +1,54 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg80211.h 591285 2015-10-07 11:56:29Z $ + */ + + +#ifndef __DHD_CFG80211__ +#define __DHD_CFG80211__ + +#include +#include +#include + +#ifndef WL_ERR +#define WL_ERR CFG80211_ERR +#endif +#ifndef WL_TRACE +#define WL_TRACE CFG80211_TRACE +#endif + +s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg); +s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val); +s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg); +s32 dhd_config_dongle(struct bcm_cfg80211 *cfg); +int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data); + +#endif /* __DHD_CFG80211__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c b/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c new file mode 100644 index 000000000000..c72f8299aadb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_cfg_vendor.c @@ -0,0 +1,174 @@ +/* + * Linux cfg80211 vendor command/event handlers of DHD + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_cfg_vendor.c 525516 2015-01-09 23:12:53Z $ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef VENDOR_EXT_SUPPORT +static int dhd_cfgvendor_priv_string_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + const struct bcm_nlmsg_hdr *nlioc = data; + struct net_device *ndev = NULL; + struct bcm_cfg80211 *cfg; + struct sk_buff *reply; + void *buf = NULL, *cur; + dhd_pub_t *dhd; + dhd_ioctl_t ioc = { 0 }; + int ret = 0, ret_len, payload, msglen; + int maxmsglen = PAGE_SIZE - 0x100; + int8 index; + + WL_TRACE(("entry: cmd = %d\n", nlioc->cmd)); + DHD_ERROR(("entry: cmd = %d\n", nlioc->cmd)); + + cfg = wiphy_priv(wiphy); + dhd = cfg->pub; + + DHD_OS_WAKE_LOCK(dhd); + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->hang_was_sent) { + WL_ERR(("HANG was sent up earlier\n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS); + DHD_OS_WAKE_UNLOCK(dhd); + return OSL_ERROR(BCME_DONGLE_DOWN); + } + + len -= sizeof(struct bcm_nlmsg_hdr); + ret_len = nlioc->len; + if (ret_len > 0 || len > 0) { + if (len > DHD_IOCTL_MAXLEN) { + WL_ERR(("oversize input buffer %d\n", len)); + len = DHD_IOCTL_MAXLEN; + } + if (ret_len > DHD_IOCTL_MAXLEN) { + WL_ERR(("oversize return buffer %d\n", ret_len)); + ret_len = DHD_IOCTL_MAXLEN; + } + payload = max(ret_len, len) + 1; + buf = vzalloc(payload); + if (!buf) { + DHD_OS_WAKE_UNLOCK(dhd); + return -ENOMEM; + } + memcpy(buf, (void *)nlioc + nlioc->offset, len); + *(char *)(buf + len) = '\0'; + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + index = dhd_net2idx(dhd->info, ndev); + if (index == DHD_BAD_IF) { + WL_ERR(("Bad ifidx from wdev:%p\n", wdev)); + ret = BCME_ERROR; + goto done; + } + + ioc.cmd = nlioc->cmd; + ioc.len = nlioc->len; + ioc.set = nlioc->set; + ioc.driver = nlioc->magic; + ret = dhd_ioctl_process(dhd, index, &ioc, buf); + if (ret) { + WL_TRACE(("dhd_ioctl_process return err %d\n", ret)); + ret = OSL_ERROR(ret); + goto done; + } + + cur = buf; + while (ret_len > 0) { + msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len; + ret_len -= msglen; + payload = msglen + sizeof(msglen); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + if (!reply) { + WL_ERR(("Failed to allocate reply msg\n")); + ret = -ENOMEM; + break; + } + + if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) || + nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) { + kfree_skb(reply); + ret = -ENOBUFS; + break; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) { + WL_ERR(("testmode reply failed:%d\n", ret)); + break; + } + cur += msglen; + } + +done: + vfree(buf); + DHD_OS_WAKE_UNLOCK(dhd); + return ret; +} + +const struct wiphy_vendor_command dhd_cfgvendor_cmds [] = { + { + { + .vendor_id = OUI_BRCM, + .subcmd = BRCM_VENDOR_SCMD_PRIV_STR + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = dhd_cfgvendor_priv_string_handler + }, +}; + +int cfgvendor_attach(struct wiphy *wiphy) +{ + wiphy->vendor_commands = dhd_cfgvendor_cmds; + wiphy->n_vendor_commands = ARRAY_SIZE(dhd_cfgvendor_cmds); + + return 0; +} + +int cfgvendor_detach(struct wiphy *wiphy) +{ + wiphy->vendor_commands = NULL; + wiphy->n_vendor_commands = 0; + + return 0; +} +#endif /* VENDOR_EXT_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c new file mode 100644 index 000000000000..55bc9ebc992a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_common.c @@ -0,0 +1,3557 @@ +/* + * Broadcom Dongle Host Driver (DHD), common DHD core. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_common.c 609263 2015-12-31 16:21:33Z $ + */ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef SHOW_LOGTRACE +#include +#endif /* SHOW_LOGTRACE */ + +#ifdef BCMPCIE +#include +#endif + +#include +#include +#include +#include + +#ifdef WL_CFG80211 +#include +#endif +#ifdef PNO_SUPPORT +#include +#endif + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#ifdef DHD_WMF +#include +#include +#endif /* DHD_WMF */ + +#ifdef DHD_L2_FILTER +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + + +#ifdef WLMEDIA_HTSF +extern void htsf_update(struct dhd_info *dhd, void *data); +#endif + +#ifdef DHD_LOG_DUMP +int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL; +#else +int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL; +#endif /* DHD_LOG_DUMP */ + + +#if defined(WL_WLC_SHIM) +#include +#else +#endif /* WL_WLC_SHIM */ + +#include + +#ifdef SOFTAP +char fw_path2[MOD_PARAM_PATHLEN]; +extern bool softap_enabled; +#endif + +/* Last connection success/failure status */ +uint32 dhd_conn_event; +uint32 dhd_conn_status; +uint32 dhd_conn_reason; + +#if defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) +static int check_event_log_sequence_number(uint32 seq_no); +#endif /* defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) */ +extern int dhd_iscan_request(void * dhdp, uint16 action); +extern void dhd_ind_scan_confirm(void *h, bool status); +extern int dhd_iscan_in_progress(void *h); +void dhd_iscan_lock(void); +void dhd_iscan_unlock(void); +extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx); +#if !defined(AP) && defined(WLP2P) +extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd); +#endif + +extern int dhd_socram_dump(struct dhd_bus *bus); + +bool ap_cfg_running = FALSE; +bool ap_fw_loaded = FALSE; + +/* Version string to report */ +#ifdef DHD_DEBUG +#ifndef SRCBASE +#define SRCBASE "drivers/net/wireless/bcmdhd" +#endif +#define DHD_COMPILED "\nCompiled in " SRCBASE +#endif /* DHD_DEBUG */ + +#if defined(DHD_DEBUG) +const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR; +#else +const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from "; +#endif +char fw_version[FW_VER_STR_LEN] = "\0"; + +void dhd_set_timer(void *bus, uint wdtick); + + + +/* IOVar table */ +enum { + IOV_VERSION = 1, + IOV_MSGLEVEL, + IOV_BCMERRORSTR, + IOV_BCMERROR, + IOV_WDTICK, + IOV_DUMP, + IOV_CLEARCOUNTS, + IOV_LOGDUMP, + IOV_LOGCAL, + IOV_LOGSTAMP, + IOV_GPIOOB, + IOV_IOCTLTIMEOUT, +#if defined(DHD_DEBUG) + IOV_CONS, + IOV_DCONSOLE_POLL, + IOV_DHD_JOIN_TIMEOUT_DBG, + IOV_SCAN_TIMEOUT, +#endif /* defined(DHD_DEBUG) */ +#ifdef PROP_TXSTATUS + IOV_PROPTXSTATUS_ENABLE, + IOV_PROPTXSTATUS_MODE, + IOV_PROPTXSTATUS_OPT, + IOV_PROPTXSTATUS_MODULE_IGNORE, + IOV_PROPTXSTATUS_CREDIT_IGNORE, + IOV_PROPTXSTATUS_TXSTATUS_IGNORE, + IOV_PROPTXSTATUS_RXPKT_CHK, +#endif /* PROP_TXSTATUS */ + IOV_BUS_TYPE, +#ifdef WLMEDIA_HTSF + IOV_WLPKTDLYSTAT_SZ, +#endif + IOV_CHANGEMTU, + IOV_HOSTREORDER_FLOWS, +#ifdef DHDTCPACK_SUPPRESS + IOV_TCPACK_SUPPRESS, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + IOV_WMF_BSS_ENAB, + IOV_WMF_UCAST_IGMP, + IOV_WMF_MCAST_DATA_SENDUP, +#ifdef WL_IGMP_UCQUERY + IOV_WMF_UCAST_IGMP_QUERY, +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + IOV_WMF_UCAST_UPNP, +#endif /* DHD_UCAST_UPNP */ +#endif /* DHD_WMF */ + IOV_AP_ISOLATE, +#ifdef DHD_L2_FILTER + IOV_DHCP_UNICAST, + IOV_BLOCK_PING, + IOV_PROXY_ARP, + IOV_GRAT_ARP, +#endif /* DHD_L2_FILTER */ +#ifdef DHD_PSTA + IOV_PSTA, +#endif /* DHD_PSTA */ + IOV_CFG80211_OPMODE, + IOV_ASSERT_TYPE, + IOV_LMTEST, + IOV_LAST +}; + +const bcm_iovar_t dhd_iovars[] = { + {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) }, +#ifdef DHD_DEBUG + {"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ + {"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN }, + {"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 }, + {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 }, + {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, +#ifdef DHD_DEBUG + {"cons", IOV_CONS, 0, IOVT_BUFFER, 0 }, + {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 }, +#endif + {"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 }, + {"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 }, + {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 }, +#ifdef PROP_TXSTATUS + {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_BOOL, 0 }, + /* + set the proptxtstatus operation mode: + 0 - Do not do any proptxtstatus flow control + 1 - Use implied credit from a packet status + 2 - Use explicit credit + */ + {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 }, + {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 }, + {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 }, + {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 }, + {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 }, + {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, IOVT_BOOL, 0 }, +#endif /* PROP_TXSTATUS */ + {"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0}, +#ifdef WLMEDIA_HTSF + {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 }, +#endif + {"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 }, + {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER, + (WLHOST_REORDERDATA_MAXFLOWS + 1) }, +#ifdef DHDTCPACK_SUPPRESS + {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, IOVT_UINT8, 0 }, +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, IOVT_BOOL, 0 }, + {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, IOVT_BOOL, 0 }, + {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, IOVT_BOOL, 0 }, +#ifdef WL_IGMP_UCQUERY + {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), IOVT_BOOL, 0 }, +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 }, +#endif /* DHD_UCAST_UPNP */ +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + {"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 }, +#endif /* DHD_L2_FILTER */ + {"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0}, +#ifdef DHD_L2_FILTER + {"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0}, + {"proxy_arp", IOV_PROXY_ARP, (0), IOVT_BOOL, 0}, + {"grat_arp", IOV_GRAT_ARP, (0), IOVT_BOOL, 0}, +#endif /* DHD_L2_FILTER */ +#ifdef DHD_PSTA + /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */ + {"psta", IOV_PSTA, 0, IOVT_UINT32, 0}, +#endif /* DHD PSTA */ + {"op_mode", IOV_CFG80211_OPMODE, 0, IOVT_UINT32, 0 }, + {"assert_type", IOV_ASSERT_TYPE, (0), IOVT_UINT32, 0}, + {"lmtest", IOV_LMTEST, 0, IOVT_UINT32, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +#define DHD_IOVAR_BUF_SIZE 128 + +#ifdef DHD_FW_COREDUMP +void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length) +{ + if (dhd_pub->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd_pub, dhd_pub->soc_ram, dhd_pub->soc_ram_length); +#else + MFREE(dhd_pub->osh, dhd_pub->soc_ram, dhd_pub->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhd_pub->soc_ram = NULL; + dhd_pub->soc_ram_length = 0; + } + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub, + DHD_PREALLOC_MEMDUMP_RAM, length); + memset(dhd_pub->soc_ram, 0, length); +#else + dhd_pub->soc_ram = (uint8*) MALLOCZ(dhd_pub->osh, length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (dhd_pub->soc_ram == NULL) { + DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n", + __FUNCTION__)); + return; + } + + dhd_pub->soc_ram_length = length; + memcpy(dhd_pub->soc_ram, buffer, length); +} +#endif /* DHD_FW_COREDUMP */ + +/* to NDIS developer, the structure dhd_common is redundant, + * please do NOT merge it back from other branches !!! + */ + +static int +dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen) +{ + char eabuf[ETHER_ADDR_STR_LEN]; + + struct bcmstrbuf b; + struct bcmstrbuf *strbuf = &b; + if (!dhdp || !dhdp->prot || !buf) + return BCME_ERROR; + + bcm_binit(strbuf, buf, buflen); + + /* Base DHD info */ + bcm_bprintf(strbuf, "%s\n", dhd_version); + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n", + dhdp->up, dhdp->txoff, dhdp->busstate); + bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n", + dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz); + bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n", + dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf)); + bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt); + + bcm_bprintf(strbuf, "dongle stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n", + dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes, + dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped); + bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n", + dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes, + dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped); + bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast); + + bcm_bprintf(strbuf, "bus stats:\n"); + bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n", + dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors); + bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n", + dhdp->tx_ctlpkts, dhdp->tx_ctlerrs); + bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n", + dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors); + bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n", + dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped); + bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n", + dhdp->rx_readahead_cnt, dhdp->tx_realloc); + bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n", + dhdp->tx_pktgetfail, dhdp->rx_pktgetfail); + bcm_bprintf(strbuf, "\n"); + + /* Add any prot info */ + dhd_prot_dump(dhdp, strbuf); + bcm_bprintf(strbuf, "\n"); + + /* Add any bus info */ + dhd_bus_dump(dhdp, strbuf); + + +#if defined(DHD_LB_STATS) + dhd_lb_stats_dump(dhdp, strbuf); +#endif /* DHD_LB_STATS */ + + return (!strbuf->size ? BCME_BUFTOOSHORT : 0); +} + +void +dhd_dump_to_kernelog(dhd_pub_t *dhdp) +{ + char buf[512]; + + DHD_ERROR(("F/W version: %s\n", fw_version)); + bcm_bprintf_bypass = TRUE; + dhd_dump(dhdp, buf, sizeof(buf)); + bcm_bprintf_bypass = FALSE; +} + +int +dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx) +{ + wl_ioctl_t ioc; + + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len); +} + +int +dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + + /* memset(iovbuf, 0, sizeof(iovbuf)); */ + if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (!ret) { + *pval = ltoh32(*((uint*)iovbuf)); + } else { + DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val, + int cmd, uint8 set, int ifidx) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = -1; + int lval = htol32(val); + + /* memset(iovbuf, 0, sizeof(iovbuf)); */ + if (bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf))) { + ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx); + if (ret) { + DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n", + __FUNCTION__, name, ret)); + } + } else { + DHD_ERROR(("%s: mkiovar %s failed\n", + __FUNCTION__, name)); + } + + return ret; +} + +int +dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len) +{ + int ret = BCME_ERROR; + unsigned long flags; + + if (dhd_os_proto_block(dhd_pub)) + { +#ifdef DHD_LOG_DUMP + int slen, i, val, rem; + long int lval; + char *pval, *pos, *msg; + char tmp[64]; +#endif /* DHD_LOG_DUMP */ + DHD_GENERAL_LOCK(dhd_pub, flags); + if (dhd_pub->busstate == DHD_BUS_DOWN || + dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_proto_unblock(dhd_pub); + return -ENODEV; + } + dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR; + DHD_GENERAL_UNLOCK(dhd_pub, flags); + +#ifdef DHD_LOG_DUMP + /* WLC_GET_VAR */ + if (ioc->cmd == WLC_GET_VAR) { + memset(tmp, 0, sizeof(tmp)); + bcopy(ioc->buf, tmp, strlen(ioc->buf) + 1); + } +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ +#if defined(WL_WLC_SHIM) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + + wl_io_pport_t io_pport; + io_pport.dhd_pub = dhd_pub; + io_pport.ifidx = ifidx; + + ret = wl_shim_ioctl(shim, ioc, len, &io_pport); + if (ret != BCME_OK) { + DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n", + __FUNCTION__, ioc->cmd, ret)); + } + } +#else + ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len); +#endif /* defined(WL_WLC_SHIM) */ + + if (ret && dhd_pub->up) { + /* Send hang event only if dhd_open() was success */ + dhd_os_check_hang(dhd_pub, ifidx, ret); + } + + if (ret == -ETIMEDOUT && !dhd_pub->up) { + DHD_ERROR(("%s: 'resumed on timeout' error is " + "occurred before the interface does not" + " bring up\n", __FUNCTION__)); + dhd_pub->busstate = DHD_BUS_DOWN; + } + + DHD_GENERAL_LOCK(dhd_pub, flags); + dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR; + dhd_os_busbusy_wake(dhd_pub); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + + dhd_os_proto_unblock(dhd_pub); + +#ifdef DHD_LOG_DUMP + if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) { + lval = 0; + slen = strlen(ioc->buf) + 1; + msg = (char*)ioc->buf; + if (ioc->cmd == WLC_GET_VAR) { + bcopy(msg, &lval, sizeof(long int)); + msg = tmp; + } else { + bcopy((msg + slen), &lval, sizeof(long int)); + } + DHD_ERROR_EX(("%s: cmd: %d, msg: %s, val: 0x%lx, len: %d, set: %d\n", + ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR", + ioc->cmd, msg, lval, ioc->len, ioc->set)); + } else { + slen = ioc->len; + if (ioc->buf != NULL) { + val = *(int*)ioc->buf; + pval = (char*)ioc->buf; + pos = tmp; + rem = sizeof(tmp); + memset(tmp, 0, sizeof(tmp)); + for (i = 0; i < slen; i++) { + pos += snprintf(pos, rem, "%02x ", pval[i]); + rem = sizeof(tmp) - (int)(pos - tmp); + if (rem <= 0) { + break; + } + } + DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, val: %d(%s), len: %d, set: %d\n", + ioc->cmd, val, tmp, ioc->len, ioc->set)); + } else { + DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd)); + } + } +#endif /* DHD_LOG_DUMP */ + } + + return ret; +} + +uint wl_get_port_num(wl_io_pport_t *io_pport) +{ + return 0; +} + +/* Get bssidx from iovar params + * Input: dhd_pub - pointer to dhd_pub_t + * params - IOVAR params + * Output: idx - BSS index + * val - ponter to the IOVAR arguments + */ +static int +dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val) +{ + char *prefix = "bsscfg:"; + uint32 bssidx; + + if (!(strncmp(params, prefix, strlen(prefix)))) { + /* per bss setting should be prefixed with 'bsscfg:' */ + char *p = (char *)params + strlen(prefix); + + /* Skip Name */ + while (*p != '\0') + p++; + /* consider null */ + p = p + 1; + bcopy(p, &bssidx, sizeof(uint32)); + /* Get corresponding dhd index */ + bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx)); + + if (bssidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* skip bss idx */ + p += sizeof(uint32); + *val = p; + *idx = bssidx; + } else { + DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#if defined(DHD_DEBUG) && defined(BCMDHDUSB) +/* USB Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + DHD_TRACE(("%s \n", __FUNCTION__)); + + return dhd_iovar(dhd, 0, "cons", msg, msglen, 1); + +} +#endif /* DHD_DEBUG && BCMDHDUSB */ + +static int +dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + switch (actionid) { + case IOV_GVAL(IOV_VERSION): + /* Need to have checked buffer length */ + bcm_strncpy_s((char*)arg, len, dhd_version, len); + break; + + case IOV_GVAL(IOV_MSGLEVEL): + int_val = (int32)dhd_msg_level; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MSGLEVEL): +#ifdef WL_CFG80211 + /* Enable DHD and WL logs in oneshot */ + if (int_val & DHD_WL_VAL2) + wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2)); + else if (int_val & DHD_WL_VAL) + wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG); + if (!(int_val & DHD_WL_VAL2)) +#endif /* WL_CFG80211 */ + dhd_msg_level = int_val; + break; + case IOV_GVAL(IOV_BCMERRORSTR): + bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN); + ((char *)arg)[BCME_STRLEN - 1] = 0x00; + break; + + case IOV_GVAL(IOV_BCMERROR): + int_val = (int32)dhd_pub->bcmerror; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_WDTICK): + int_val = (int32)dhd_watchdog_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WDTICK): + if (!dhd_pub->up) { + bcmerror = BCME_NOTUP; + break; + } + + if (CUSTOM_DHD_WATCHDOG_MS == 0 && int_val == 0) { + dhd_watchdog_ms = (uint)int_val; + } + + dhd_os_wd_timer(dhd_pub, (uint)int_val); + break; + + case IOV_GVAL(IOV_DUMP): + bcmerror = dhd_dump(dhd_pub, arg, len); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_DCONSOLE_POLL): + int_val = (int32)dhd_console_ms; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DCONSOLE_POLL): + dhd_console_ms = (uint)int_val; + break; + + case IOV_SVAL(IOV_CONS): + if (len > 0) + bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1); + break; +#endif /* DHD_DEBUG */ + + case IOV_SVAL(IOV_CLEARCOUNTS): + dhd_pub->tx_packets = dhd_pub->rx_packets = 0; + dhd_pub->tx_errors = dhd_pub->rx_errors = 0; + dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0; + dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0; + dhd_pub->tx_dropped = 0; + dhd_pub->rx_dropped = 0; + dhd_pub->tx_pktgetfail = 0; + dhd_pub->rx_pktgetfail = 0; + dhd_pub->rx_readahead_cnt = 0; + dhd_pub->tx_realloc = 0; + dhd_pub->wd_dpc_sched = 0; + memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats)); + dhd_bus_clearcounts(dhd_pub); +#ifdef PROP_TXSTATUS + /* clear proptxstatus related counters */ + dhd_wlfc_clear_counts(dhd_pub); +#endif /* PROP_TXSTATUS */ + DHD_LB_STATS_RESET(dhd_pub); + break; + + + case IOV_GVAL(IOV_IOCTLTIMEOUT): { + int_val = (int32)dhd_os_get_ioctl_resp_timeout(); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + + case IOV_SVAL(IOV_IOCTLTIMEOUT): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_os_set_ioctl_resp_timeout((unsigned int)int_val); + break; + } + + +#ifdef PROP_TXSTATUS + case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + int_val = wlfc_enab ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): { + bool wlfc_enab = FALSE; + bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab); + if (bcmerror != BCME_OK) + goto exit; + + /* wlfc is already set as desired */ + if (wlfc_enab == (int_val == 0 ? FALSE : TRUE)) + goto exit; + + if (int_val == TRUE) + bcmerror = dhd_wlfc_init(dhd_pub); + else + bcmerror = dhd_wlfc_deinit(dhd_pub); + + break; + } + case IOV_GVAL(IOV_PROPTXSTATUS_MODE): + bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODE): + dhd_wlfc_set_mode(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE): + dhd_wlfc_set_module_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE): + dhd_wlfc_set_credit_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE): + dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val); + break; + + case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val); + if (bcmerror != BCME_OK) + goto exit; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK): + dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val); + break; + +#endif /* PROP_TXSTATUS */ + + case IOV_GVAL(IOV_BUS_TYPE): + /* The dhd application queries the driver to check if its usb or sdio. */ +#ifdef BCMDHDUSB + int_val = BUS_TYPE_USB; +#endif +#ifdef BCMSDIO + int_val = BUS_TYPE_SDIO; +#endif +#ifdef PCIE_FULL_DONGLE + int_val = BUS_TYPE_PCIE; +#endif + bcopy(&int_val, arg, val_size); + break; + + +#ifdef WLMEDIA_HTSF + case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ): + int_val = dhd_pub->htsfdlystat_sz; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ): + dhd_pub->htsfdlystat_sz = int_val & 0xff; + printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz); + break; +#endif + case IOV_SVAL(IOV_CHANGEMTU): + int_val &= 0xffff; + bcmerror = dhd_change_mtu(dhd_pub, int_val, 0); + break; + + case IOV_GVAL(IOV_HOSTREORDER_FLOWS): + { + uint i = 0; + uint8 *ptr = (uint8 *)arg; + uint8 count = 0; + + ptr++; + for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) { + if (dhd_pub->reorder_bufs[i] != NULL) { + *ptr = dhd_pub->reorder_bufs[i]->flow_id; + ptr++; + count++; + } + } + ptr = (uint8 *)arg; + *ptr = count; + break; + } +#ifdef DHDTCPACK_SUPPRESS + case IOV_GVAL(IOV_TCPACK_SUPPRESS): { + int_val = (uint32)dhd_pub->tcpack_sup_mode; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_TCPACK_SUPPRESS): { + bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val); + break; + } +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_WMF + case IOV_GVAL(IOV_WMF_BSS_ENAB): { + uint32 bssidx; + dhd_wmf_t *wmf; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + wmf = dhd_wmf_conf(dhd_pub, bssidx); + int_val = wmf->wmf_enable ? 1 :0; + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_WMF_BSS_ENAB): { + /* Enable/Disable WMF */ + uint32 bssidx; + dhd_wmf_t *wmf; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + wmf = dhd_wmf_conf(dhd_pub, bssidx); + if (wmf->wmf_enable == int_val) + break; + if (int_val) { + /* Enable WMF */ + if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) { + DHD_ERROR(("%s: Error in creating WMF instance\n", + __FUNCTION__)); + break; + } + if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) { + DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__)); + break; + } + wmf->wmf_enable = TRUE; + } else { + /* Disable WMF */ + wmf->wmf_enable = FALSE; + dhd_wmf_stop(dhd_pub, bssidx); + dhd_wmf_instance_del(dhd_pub, bssidx); + } + break; + } + case IOV_GVAL(IOV_WMF_UCAST_IGMP): + int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_IGMP): + if (dhd_pub->wmf_ucast_igmp == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_igmp = int_val; + else + bcmerror = BCME_RANGE; + break; + case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP): + int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP): + dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val); + break; + +#ifdef WL_IGMP_UCQUERY + case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY): + int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY): + if (dhd_pub->wmf_ucast_igmp_query == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_igmp_query = int_val; + else + bcmerror = BCME_RANGE; + break; +#endif /* WL_IGMP_UCQUERY */ +#ifdef DHD_UCAST_UPNP + case IOV_GVAL(IOV_WMF_UCAST_UPNP): + int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_WMF_UCAST_UPNP): + if (dhd_pub->wmf_ucast_upnp == int_val) + break; + + if (int_val >= OFF && int_val <= ON) + dhd_pub->wmf_ucast_upnp = int_val; + else + bcmerror = BCME_RANGE; + break; +#endif /* DHD_UCAST_UPNP */ +#endif /* DHD_WMF */ + + +#ifdef DHD_L2_FILTER + case IOV_GVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_DHCP_UNICAST): { + uint32 bssidx; + char *val; + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n", + __FUNCTION__, name)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_BLOCK_PING): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_block_ping_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_BLOCK_PING): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } + case IOV_GVAL(IOV_PROXY_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_parp_status(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PROXY_ARP): { + uint32 bssidx; + char *val; + char iobuf[32]; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + bcopy(val, &int_val, sizeof(int_val)); + + /* Issue a iovar request to WL to update the proxy arp capability bit + * in the Extended Capability IE of beacons/probe responses. + */ + bcm_mkiovar("proxy_arp_advertise", val, sizeof(int_val), iobuf, + sizeof(iobuf)); + bcmerror = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, iobuf, + sizeof(iobuf), TRUE, bssidx); + + if (bcmerror == BCME_OK) { + dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0); + } + break; + } + case IOV_GVAL(IOV_GRAT_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + int_val = dhd_get_grat_arp_status(dhd_pub, bssidx); + memcpy(arg, &int_val, val_size); + break; + } + case IOV_SVAL(IOV_GRAT_ARP): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + memcpy(&int_val, val, sizeof(int_val)); + bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0); + break; + } +#endif /* DHD_L2_FILTER */ + case IOV_GVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + int_val = dhd_get_ap_isolate(dhd_pub, bssidx); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_AP_ISOLATE): { + uint32 bssidx; + char *val; + + if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) { + DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__)); + bcmerror = BCME_BADARG; + break; + } + + ASSERT(val); + bcopy(val, &int_val, sizeof(uint32)); + dhd_set_ap_isolate(dhd_pub, bssidx, int_val); + break; + } +#ifdef DHD_PSTA + case IOV_GVAL(IOV_PSTA): { + int_val = dhd_get_psta_mode(dhd_pub); + bcopy(&int_val, arg, val_size); + break; + } + case IOV_SVAL(IOV_PSTA): { + if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) { + dhd_set_psta_mode(dhd_pub, int_val); + } else { + bcmerror = BCME_RANGE; + } + break; + } +#endif /* DHD_PSTA */ + case IOV_GVAL(IOV_CFG80211_OPMODE): { + int_val = (int32)dhd_pub->op_mode; + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_CFG80211_OPMODE): { + if (int_val <= 0) + bcmerror = BCME_BADARG; + else + dhd_pub->op_mode = int_val; + break; + } + + case IOV_GVAL(IOV_ASSERT_TYPE): + int_val = g_assert_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ASSERT_TYPE): + g_assert_type = (uint32)int_val; + break; + + + case IOV_GVAL(IOV_LMTEST): { + *(uint32 *)arg = (uint32)lmtest; + break; + } + + case IOV_SVAL(IOV_LMTEST): { + uint32 val = *(uint32 *)arg; + if (val > 50) + bcmerror = BCME_BADARG; + else { + lmtest = (uint)val; + DHD_ERROR(("%s: lmtest %s\n", + __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON")); + } + break; + } + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror)); + return bcmerror; +} + +/* Store the status of a connection attempt for later retrieval by an iovar */ +void +dhd_store_conn_status(uint32 event, uint32 status, uint32 reason) +{ + /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID + * because an encryption/rsn mismatch results in both events, and + * the important information is in the WLC_E_PRUNE. + */ + if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL && + dhd_conn_event == WLC_E_PRUNE)) { + dhd_conn_event = event; + dhd_conn_status = status; + dhd_conn_reason = reason; + } +} + +bool +dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec) +{ + void *p; + int eprec = -1; /* precedence to evict from */ + bool discard_oldest; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(q, prec) && !pktq_full(q)) { + pktq_penq(q, prec, pkt); + return TRUE; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(q, prec)) + eprec = prec; + else if (pktq_full(q)) { + p = pktq_peek_tail(q, &eprec); + ASSERT(p); + if (eprec > prec || eprec < 0) + return FALSE; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktq_pempty(q, eprec)); + discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec); + if (eprec == prec && !discard_oldest) + return FALSE; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec); + ASSERT(p); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + PKTFREE(dhdp->osh, p, TRUE); + } + + /* Enqueue */ + p = pktq_penq(q, prec, pkt); + ASSERT(p); + + return TRUE; +} + +/* + * Functions to drop proper pkts from queue: + * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only + * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts + * If can't find pkts matching upper 2 cases, drop first pkt anyway + */ +bool +dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn) +{ + struct pktq_prec *q = NULL; + void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL; + pkt_frag_t frag_info; + + ASSERT(dhdp && pq); + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + if (p == NULL) + return FALSE; + + while (p) { + frag_info = pkt_frag_info(dhdp->osh, p); + if (frag_info == DHD_PKT_FRAG_NONE) { + break; + } else if (frag_info == DHD_PKT_FRAG_FIRST) { + if (first) { + /* No last frag pkt, use prev as last */ + last = prev; + break; + } else { + first = p; + prev_first = prev; + } + } else if (frag_info == DHD_PKT_FRAG_LAST) { + if (first) { + last = p; + break; + } + } + + prev = p; + p = PKTLINK(p); + } + + if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) { + /* Not found matching pkts, use oldest */ + prev = NULL; + p = q->head; + frag_info = 0; + } + + if (frag_info == DHD_PKT_FRAG_NONE) { + first = last = p; + prev_first = prev; + } + + p = first; + while (p) { + next = PKTLINK(p); + q->len--; + pq->len--; + + PKTSETLINK(p, NULL); + + if (fn) + fn(dhdp, prec, p, TRUE); + + if (p == last) + break; + + p = next; + } + + if (prev_first == NULL) { + if ((q->head = next) == NULL) + q->tail = NULL; + } else { + PKTSETLINK(prev_first, next); + if (!next) + q->tail = prev_first; + } + + return TRUE; +} + +static int +dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + int bcmerror = 0; + int val_size; + const bcm_iovar_t *vi = NULL; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) { + bcmerror = BCME_UNSUPPORTED; + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + + bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +int +dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen) +{ + int bcmerror = 0; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!buf) { + return BCME_BADARG; + } + + dhd_os_dhdiovar_lock(dhd_pub); + switch (ioc->cmd) { + case DHD_GET_MAGIC: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_MAGIC; + break; + + case DHD_GET_VERSION: + if (buflen < sizeof(int)) + bcmerror = BCME_BUFTOOSHORT; + else + *(int*)buf = DHD_IOCTL_VERSION; + break; + + case DHD_GET_VAR: + case DHD_SET_VAR: + { + char *arg; + uint arglen; + + DHD_GENERAL_LOCK(dhd_pub, flags); + if (dhd_pub->busstate == DHD_BUS_DOWN || + dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + /* In platforms like FC19, the FW download is done via IOCTL + * and should not return error for IOCTLs fired before FW + * Download is done + */ + if (dhd_pub->is_fw_download_done) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhd_pub->busstate)); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return -ENODEV; + } + } + dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR; + DHD_GENERAL_UNLOCK(dhd_pub, flags); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl); +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* scan past the name to any arguments */ + for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--) + ; + + if (*arg) { + bcmerror = BCME_BUFTOOSHORT; + goto unlock_exit; + } + + /* account for the NUL terminator */ + arg++, arglen--; + + /* call with the appropriate arguments */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen, + buf, buflen, IOV_GET); + } else { + bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, + arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* not in generic table, try protocol module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg, + arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_prot_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + if (bcmerror != BCME_UNSUPPORTED) { + goto unlock_exit; + } + + /* if still not found, try bus module */ + if (ioc->cmd == DHD_GET_VAR) { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + arg, arglen, buf, buflen, IOV_GET); + } else { + bcmerror = dhd_bus_iovar_op(dhd_pub, buf, + NULL, 0, arg, arglen, IOV_SET); + } + } + goto unlock_exit; + + default: + bcmerror = BCME_UNSUPPORTED; + } + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; + +unlock_exit: + DHD_GENERAL_LOCK(dhd_pub, flags); + dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR; + dhd_os_busbusy_wake(dhd_pub); + DHD_GENERAL_UNLOCK(dhd_pub, flags); + dhd_os_dhdiovar_unlock(dhd_pub); + return bcmerror; +} + +#ifdef SHOW_EVENTS +#ifdef SHOW_LOGTRACE + +#define MAX_NO_OF_ARG 16 + +#define FMTSTR_SIZE 132 +#define SIZE_LOC_STR 50 +#define MIN_DLEN 4 +#define TAG_BYTES 12 +#define TAG_WORDS 3 +#define ROMSTR_SIZE 200 + + +static int +check_event_log_sequence_number(uint32 seq_no) +{ + int32 diff; + uint32 ret; + static uint32 logtrace_seqnum_prev = 0; + + diff = ntoh32(seq_no)-logtrace_seqnum_prev; + switch (diff) + { + case 0: + ret = -1; /* duplicate packet . drop */ + break; + + case 1: + ret =0; /* in order */ + break; + + default: + if ((ntoh32(seq_no) == 0) && + (logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */ + ret = 0; + } else { + + if (diff > 0) { + DHD_EVENT(("WLC_E_TRACE:" + "Event lost (log) seqnum %d nblost %d\n", + ntoh32(seq_no), (diff-1))); + } else { + DHD_EVENT(("WLC_E_TRACE:" + "Event Packets coming out of order!!\n")); + } + ret = 0; + } + } + + logtrace_seqnum_prev = ntoh32(seq_no); + + return ret; +} + +static void +dhd_eventmsg_print(dhd_pub_t *dhd_pub, void *event_data, void *raw_event_ptr, + uint datalen, const char *event_name) +{ + msgtrace_hdr_t hdr; + uint32 nblost; + uint8 count; + char *s, *p; + static uint32 seqnum_prev = 0; + uint32 *log_ptr = NULL; + uchar *buf; + event_log_hdr_t event_hdr; + uint32 i; + int32 j; + + dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr; + + char fmtstr_loc_buf[FMTSTR_SIZE] = {0}; + char (*str_buf)[SIZE_LOC_STR] = NULL; + char * str_tmpptr = NULL; + uint32 addr = 0; + uint32 **hdr_ptr = NULL; + uint32 h_i = 0; + uint32 hdr_ptr_len = 0; + + typedef union { + uint32 val; + char * addr; + } u_arg; + u_arg arg[MAX_NO_OF_ARG] = {{0}}; + char *c_ptr = NULL; + char rom_log_str[ROMSTR_SIZE] = {0}; + uint32 rom_str_len = 0; + + BCM_REFERENCE(arg); + + if (!DHD_FWLOG_ON()) + return; + + buf = (uchar *) event_data; + memcpy(&hdr, buf, MSGTRACE_HDRLEN); + + if (hdr.version != MSGTRACE_VERSION) { + DHD_EVENT(("\nMACEVENT: %s [unsupported version --> " + "dhd version:%d dongle version:%d]\n", + event_name, MSGTRACE_VERSION, hdr.version)); + /* Reset datalen to avoid display below */ + datalen = 0; + return; + } + + if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) { + /* There are 2 bytes available at the end of data */ + buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0'; + + if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) { + DHD_FWLOG(("WLC_E_TRACE: [Discarded traces in dongle -->" + "discarded_bytes %d discarded_printf %d]\n", + ntoh32(hdr.discarded_bytes), + ntoh32(hdr.discarded_printf))); + } + + nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1; + if (nblost > 0) { + DHD_FWLOG(("WLC_E_TRACE:" + "[Event lost (msg) --> seqnum %d nblost %d\n", + ntoh32(hdr.seqnum), nblost)); + } + seqnum_prev = ntoh32(hdr.seqnum); + + /* Display the trace buffer. Advance from + * \n to \n to avoid display big + * printf (issue with Linux printk ) + */ + p = (char *)&buf[MSGTRACE_HDRLEN]; + while (*p != '\0' && (s = strstr(p, "\n")) != NULL) { + *s = '\0'; + DHD_FWLOG(("[FWLOG] %s\n", p)); + p = s+1; + } + if (*p) + DHD_FWLOG(("[FWLOG] %s", p)); + + /* Reset datalen to avoid display below */ + datalen = 0; + + } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) { + /* Let the standard event printing work for now */ + uint32 timestamp, seq, pktlen; + + if (check_event_log_sequence_number(hdr.seqnum)) { + + DHD_EVENT(("%s: WLC_E_TRACE:" + "[Event duplicate (log) %d] dropping!!\n", + __FUNCTION__, hdr.seqnum)); + return; /* drop duplicate events */ + } + + p = (char *)&buf[MSGTRACE_HDRLEN]; + datalen -= MSGTRACE_HDRLEN; + pktlen = ltoh16(*((uint16 *)p)); + seq = ltoh16(*((uint16 *)(p + 2))); + p += MIN_DLEN; + datalen -= MIN_DLEN; + timestamp = ltoh32(*((uint32 *)p)); + BCM_REFERENCE(pktlen); + BCM_REFERENCE(seq); + BCM_REFERENCE(timestamp); + + /* + * Allocating max possible number of event TAGs in the received buffer + * considering that each event requires minimum of TAG_BYTES. + */ + hdr_ptr_len = ((datalen/TAG_BYTES)+1) * sizeof(uint32*); + + if ((raw_event->fmts)) { + if (!(str_buf = MALLOCZ(dhd_pub->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)))) { + DHD_ERROR(("%s: malloc failed str_buf \n", __FUNCTION__)); + } + } + + if (!(hdr_ptr = MALLOCZ(dhd_pub->osh, hdr_ptr_len))) { + DHD_ERROR(("%s: malloc failed hdr_ptr \n", __FUNCTION__)); + } + + + DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[No.%d]: timestamp 0x%08x length = %d\n", + seq, timestamp, pktlen)); + + /* (raw_event->fmts) has value */ + + log_ptr = (uint32 *) (p + datalen); + + /* Store all hdr pointer while parsing from last of the log buffer + * sample format of + * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639 + * 001d3c54 00000064 00000064 035d6d89 0c580439 + * in above example 0c580439 -- 39 is tag , 04 is count, 580c is format number + * all these uint32 values comes in reverse order as group as EL data + * while decoding we can parse only from last to first + */ + + while (datalen > MIN_DLEN) { + log_ptr--; + datalen -= MIN_DLEN; + event_hdr.t = *log_ptr; + /* + * Check for partially overriten entries + */ + if (log_ptr - (uint32 *) p < event_hdr.count) { + break; + } + /* + * Check argument count (only when format is valid) + */ + if ((event_hdr.count > MAX_NO_OF_ARG) && + (event_hdr.fmt_num != 0xffff)) { + break; + } + /* + * Check for end of the Frame. + */ + if (event_hdr.tag == EVENT_LOG_TAG_NULL) { + continue; + } + log_ptr[0] = event_hdr.t; + if (h_i < (hdr_ptr_len / sizeof(uint32*))) { + hdr_ptr[h_i++] = log_ptr; + } + + /* Now place the header at the front + * and copy back. + */ + log_ptr -= event_hdr.count; + + c_ptr = NULL; + datalen = datalen - (event_hdr.count * MIN_DLEN); + } + datalen = 0; + + /* print all log using stored hdr pointer in reverse order of EL data + * which is actually print older log first and then other in order + */ + + for (j = (h_i-1); j >= 0; j--) { + if (!(hdr_ptr[j])) { + break; + } + + event_hdr.t = *hdr_ptr[j]; + + log_ptr = hdr_ptr[j]; + + /* Now place the header at the front + * and copy back. + */ + log_ptr -= event_hdr.count; + + if (event_hdr.tag == EVENT_LOG_TAG_ROM_PRINTF) { + + rom_str_len = ((event_hdr.count)-1) * sizeof(uint32); + + if (rom_str_len >= (ROMSTR_SIZE -1)) { + rom_str_len = ROMSTR_SIZE - 1; + } + + /* copy all ascii data for ROM printf to local string */ + memcpy(rom_log_str, log_ptr, rom_str_len); + /* add end of line at last */ + rom_log_str[rom_str_len] = '\0'; + + DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s", + log_ptr[event_hdr.count - 1], rom_log_str)); + + /* Add newline if missing */ + if (rom_log_str[strlen(rom_log_str) - 1] != '\n') { + DHD_EVENT(("\n")); + } + + memset(rom_log_str, 0, ROMSTR_SIZE); + + continue; + } + + /* + * Check For Special Time Stamp Packet + */ + if (event_hdr.tag == EVENT_LOG_TAG_TS) { + DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n", + log_ptr[event_hdr.count-1], log_ptr[0], log_ptr[1])); + continue; + } + + /* Simply print out event dump buffer (fmt_num = 0xffff) */ + if (!str_buf || event_hdr.fmt_num == 0xffff) { + /* + * Print out raw value if unable to interpret + */ +#ifdef DHD_LOG_DUMP + char buf[256]; + char *pos = buf; + memset(buf, 0, sizeof(buf)); + pos += snprintf(pos, 256, +#else + DHD_MSGTRACE_LOG(( +#endif /* DHD_LOG_DUMP */ + "EVENT_LOG_BUF[0x%08x]: tag=%d len=%d fmt=%04x", + log_ptr[event_hdr.count-1], event_hdr.tag, + event_hdr.count, event_hdr.fmt_num +#ifdef DHD_LOG_DUMP +); +#else +)); +#endif /* DHD_LOG_DUMP */ + + for (count = 0; count < (event_hdr.count-1); count++) { +#ifdef DHD_LOG_DUMP + if (strlen(buf) >= (256 - 1)) { + DHD_MSGTRACE_LOG(("%s\n", buf)); + memset(buf, 0, sizeof(buf)); + pos = buf; + } + pos += snprintf(pos, (256 - (int)(pos-buf)), + " %08x", log_ptr[count]); +#else + if (count % 8 == 0) + DHD_MSGTRACE_LOG(("\n\t%08x", log_ptr[count])); + else + DHD_MSGTRACE_LOG((" %08x", log_ptr[count])); +#endif /* DHD_LOG_DUMP */ + } +#ifdef DHD_LOG_DUMP + DHD_MSGTRACE_LOG(("%s\n", buf)); +#else + DHD_MSGTRACE_LOG(("\n")); +#endif /* DHD_LOG_DUMP */ + continue; + } + + /* Copy the format string to parse %s and add "EVENT_LOG: */ + if ((event_hdr.fmt_num >> 2) < raw_event->num_fmts) { + snprintf(fmtstr_loc_buf, FMTSTR_SIZE, + "EVENT_LOG[0x%08x]: %s", log_ptr[event_hdr.count-1], + raw_event->fmts[event_hdr.fmt_num >> 2]); + c_ptr = fmtstr_loc_buf; + } else { + DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__)); + continue; + } + + for (count = 0; count < (event_hdr.count-1); count++) { + if (c_ptr != NULL) { + if ((c_ptr = strstr(c_ptr, "%")) != NULL) { + c_ptr++; + } + } + + if ((c_ptr != NULL) && (*c_ptr == 's')) { + if ((raw_event->raw_sstr) && + ((log_ptr[count] > raw_event->rodata_start) && + (log_ptr[count] < raw_event->rodata_end))) { + /* ram static string */ + addr = log_ptr[count] - raw_event->rodata_start; + str_tmpptr = raw_event->raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else if ((raw_event->rom_raw_sstr) && + ((log_ptr[count] > + raw_event->rom_rodata_start) && + (log_ptr[count] < + raw_event->rom_rodata_end))) { + /* rom static string */ + addr = log_ptr[count] - raw_event->rom_rodata_start; + str_tmpptr = raw_event->rom_raw_sstr + addr; + memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR); + str_buf[count][SIZE_LOC_STR-1] = '\0'; + arg[count].addr = str_buf[count]; + } else { + /* + * Dynamic string OR + * No data for static string. + * So store all string's address as string. + */ + snprintf(str_buf[count], SIZE_LOC_STR, "(s)0x%x", + log_ptr[count]); + arg[count].addr = str_buf[count]; + } + } else { + /* Other than string */ + arg[count].val = log_ptr[count]; + } + } + + DHD_MSGTRACE_LOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10], + arg[11], arg[12], arg[13], arg[14], arg[15])); + + if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') { + /* Add newline if missing */ + DHD_MSGTRACE_LOG(("\n")); + } + + memset(fmtstr_loc_buf, 0, FMTSTR_SIZE); + + for (i = 0; i < MAX_NO_OF_ARG; i++) { + arg[i].addr = 0; + } + for (i = 0; i < MAX_NO_OF_ARG; i++) { + memset(str_buf[i], 0, SIZE_LOC_STR); + } + + } + DHD_MSGTRACE_LOG(("\n")); + + if (str_buf) { + MFREE(dhd_pub->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR)); + } + + if (hdr_ptr) { + MFREE(dhd_pub->osh, hdr_ptr, hdr_ptr_len); + } + } +} + +#endif /* SHOW_LOGTRACE */ + +static void +wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data, + void *raw_event_ptr, char *eventmask) +{ + uint i, status, reason; + bool group = FALSE, flush_txq = FALSE, link = FALSE; + bool host_data = FALSE; /* prints event data after the case when set */ + const char *auth_str; + const char *event_name; + uchar *buf; + char err_msg[256], eabuf[ETHER_ADDR_STR_LEN]; + uint event_type, flags, auth_type, datalen; + + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + reason = ntoh32(event->reason); + BCM_REFERENCE(reason); + auth_type = ntoh32(event->auth_type); + datalen = ntoh32(event->datalen); + + /* debug dump of event messages */ + snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x", + (uchar)event->addr.octet[0]&0xff, + (uchar)event->addr.octet[1]&0xff, + (uchar)event->addr.octet[2]&0xff, + (uchar)event->addr.octet[3]&0xff, + (uchar)event->addr.octet[4]&0xff, + (uchar)event->addr.octet[5]&0xff); + + event_name = bcmevent_get_name(event_type); + BCM_REFERENCE(event_name); + + if (flags & WLC_EVENT_MSG_LINK) + link = TRUE; + if (flags & WLC_EVENT_MSG_GROUP) + group = TRUE; + if (flags & WLC_EVENT_MSG_FLUSHTXQ) + flush_txq = TRUE; + + switch (event_type) { + case WLC_E_START: + case WLC_E_DEAUTH: + case WLC_E_DISASSOC: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + + case WLC_E_ASSOC: + case WLC_E_REASSOC: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n", + event_name, eabuf, (int)reason)); + } else { + DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n", + event_name, eabuf, (int)status)); + } + break; + + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason)); + break; + + case WLC_E_AUTH: + case WLC_E_AUTH_IND: + if (auth_type == DOT11_OPEN_SYSTEM) + auth_str = "Open System"; + else if (auth_type == DOT11_SHARED_KEY) + auth_str = "Shared Key"; + else { + snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type); + auth_str = err_msg; + } + if (event_type == WLC_E_AUTH_IND) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_TIMEOUT) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n", + event_name, eabuf, auth_str)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n", + event_name, eabuf, auth_str, (int)reason)); + } + BCM_REFERENCE(auth_str); + + break; + + case WLC_E_JOIN: + case WLC_E_ROAM: + case WLC_E_SET_SSID: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, failed\n", event_name)); + } else if (status == WLC_E_STATUS_NO_NETWORKS) { + DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, unexpected status %d\n", + event_name, (int)status)); + } + break; + + case WLC_E_BEACON_RX: + if (status == WLC_E_STATUS_SUCCESS) { + DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name)); + } else if (status == WLC_E_STATUS_FAIL) { + DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name)); + } else { + DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status)); + } + break; + + case WLC_E_LINK: + DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN")); + BCM_REFERENCE(link); + break; + + case WLC_E_MIC_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n", + event_name, eabuf, group, flush_txq)); + BCM_REFERENCE(group); + BCM_REFERENCE(flush_txq); + break; + + case WLC_E_ICV_ERROR: + case WLC_E_UNICAST_DECODE_ERROR: + case WLC_E_MULTICAST_DECODE_ERROR: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", + event_name, eabuf)); + break; + + case WLC_E_TXFAIL: + DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status)); + break; + + case WLC_E_ASSOC_REQ_IE: + case WLC_E_ASSOC_RESP_IE: + case WLC_E_PMKID_CACHE: + case WLC_E_SCAN_COMPLETE: + DHD_EVENT(("MACEVENT: %s\n", event_name)); + break; + + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_NET_LOST: + case WLC_E_PFN_SCAN_NONE: + case WLC_E_PFN_SCAN_ALLGONE: + case WLC_E_PFN_GSCAN_FULL_RESULT: + case WLC_E_PFN_SWC: + DHD_EVENT(("PNOEVENT: %s\n", event_name)); + break; + + case WLC_E_PSK_SUP: + case WLC_E_PRUNE: + DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n", + event_name, (int)status, (int)reason)); + break; + +#ifdef WIFI_ACT_FRAME + case WLC_E_ACTION_FRAME: + DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf)); + break; +#endif /* WIFI_ACT_FRAME */ + +#ifdef SHOW_LOGTRACE + case WLC_E_TRACE: + { + dhd_eventmsg_print(dhd_pub, event_data, raw_event_ptr, datalen, event_name); + break; + } +#endif /* SHOW_LOGTRACE */ + + case WLC_E_RSSI: + DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data)))); + break; + + case WLC_E_SERVICE_FOUND: + case WLC_E_P2PO_ADD_DEVICE: + case WLC_E_P2PO_DEL_DEVICE: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; + +#ifdef BT_WIFI_HANDOBER + case WLC_E_BT_WIFI_HANDOVER_REQ: + DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf)); + break; +#endif + + case WLC_E_CCA_CHAN_QUAL: + if (datalen) { + buf = (uchar *) event_data; + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d, " + "channel 0x%02x \n", event_name, event_type, eabuf, (int)status, + (int)reason, (int)auth_type, *(buf + 4))); + } + break; + case WLC_E_ESCAN_RESULT: + { +#ifndef DHD_IFDEBUG + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n", + event_name, event_type, eabuf, (int)status)); +#endif + } + break; + default: + DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n", + event_name, event_type, eabuf, (int)status, (int)reason, + (int)auth_type)); + break; + } + + /* show any appended data if message level is set to bytes or host_data is set */ + if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) { + buf = (uchar *) event_data; + BCM_REFERENCE(buf); + DHD_EVENT((" data (%d) : ", datalen)); + for (i = 0; i < datalen; i++) + DHD_EVENT((" 0x%02x ", *buf++)); + DHD_EVENT(("\n")); + } +} +#endif /* SHOW_EVENTS */ + +/* Stub for now. Will become real function as soon as shim + * is being integrated to Android, Linux etc. + */ +int +wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport) +{ + return BCME_OK; +} + +int +wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, void **data_ptr, void *raw_event) +{ + wl_evt_pport_t evt_pport; + wl_event_msg_t event; + + /* make sure it is a BRCM event pkt and record event data */ + int ret = wl_host_event_get_data(pktdata, &event, data_ptr); + if (ret != BCME_OK) { + return ret; + } + + /* convert event from network order to host order */ + wl_event_to_host_order(&event); + + /* record event params to evt_pport */ + evt_pport.dhd_pub = dhd_pub; + evt_pport.ifidx = ifidx; + evt_pport.pktdata = pktdata; + evt_pport.data_ptr = data_ptr; + evt_pport.raw_event = raw_event; + +#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS) + { + struct wl_shim_node *shim = dhd_pub_shim(dhd_pub); + ASSERT(shim); + ret = wl_shim_event_process(shim, &event, &evt_pport); + } +#else + ret = wl_event_process_default(&event, &evt_pport); +#endif + + return ret; +} + +/* Check whether packet is a BRCM event pkt. If it is, record event data. */ +int +wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr) +{ + bcm_event_t *pvt_data = (bcm_event_t *)pktdata; + + if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) { + DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */ + if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) { + DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__)); + return BCME_ERROR; + } + + *data_ptr = &pvt_data[1]; + + /* memcpy since BRCM event pkt may be unaligned. */ + memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t)); + + return BCME_OK; +} + +int +wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, size_t pktlen, + wl_event_msg_t *event, void **data_ptr, void *raw_event) +{ + bcm_event_t *pvt_data; + uint8 *event_data; + uint32 type, status, datalen; + uint16 flags; + int evlen; + + /* make sure it is a BRCM event pkt and record event data */ + int ret = wl_host_event_get_data(pktdata, event, data_ptr); + if (ret != BCME_OK) { + return ret; + } + + if (pktlen < sizeof(bcm_event_t)) + return (BCME_ERROR); + + pvt_data = (bcm_event_t *)pktdata; + event_data = *data_ptr; + + type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + + datalen = ntoh32_ua((void *)&event->datalen); + if (datalen > pktlen) + return (BCME_ERROR); + + evlen = datalen + sizeof(bcm_event_t); + if (evlen > pktlen) { + return (BCME_ERROR); + } + + switch (type) { +#ifdef PROP_TXSTATUS + case WLC_E_FIFO_CREDIT_MAP: + dhd_wlfc_enable(dhd_pub); + dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data); + WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): " + "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1], + event_data[2], + event_data[3], event_data[4], event_data[5])); + break; + + case WLC_E_BCMC_CREDIT_SUPPORT: + dhd_wlfc_BCMCCredit_support_event(dhd_pub); + break; +#endif + + case WLC_E_IF: + { + struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data; + + /* Ignore the event if NOIF is set */ + if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) { + DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n")); + return (BCME_UNSUPPORTED); + } +#ifdef PCIE_FULL_DONGLE + dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx, + ifevent->opcode, ifevent->role); +#endif +#ifdef PROP_TXSTATUS + { + uint8* ea = pvt_data->eth.ether_dhost; + WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, " + "[%02x:%02x:%02x:%02x:%02x:%02x]\n", + ifevent->ifidx, + ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"), + ((ifevent->role == 0) ? "STA":"AP "), + ea[0], ea[1], ea[2], ea[3], ea[4], ea[5])); + (void)ea; + + if (ifevent->opcode == WLC_E_IF_CHANGE) + dhd_wlfc_interface_event(dhd_pub, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + ifevent->ifidx, ifevent->role, ea); + else + dhd_wlfc_interface_event(dhd_pub, + ((ifevent->opcode == WLC_E_IF_ADD) ? + eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL), + ifevent->ifidx, ifevent->role, ea); + + /* dhd already has created an interface by default, for 0 */ + if (ifevent->ifidx == 0) + break; + } +#endif /* PROP_TXSTATUS */ + + if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) { + if (ifevent->opcode == WLC_E_IF_ADD) { + if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname, + event->addr.octet)) { + + DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); + return (BCME_ERROR); + } + } else if (ifevent->opcode == WLC_E_IF_DEL) { + dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname, + event->addr.octet); + } else if (ifevent->opcode == WLC_E_IF_CHANGE) { +#ifdef WL_CFG80211 + wl_cfg80211_notify_ifchange(ifevent->ifidx, + event->ifname, event->addr.octet, ifevent->bssidx); +#endif /* WL_CFG80211 */ + } + } else { +#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211) + DHD_ERROR(("%s: Invalid ifidx %d for %s\n", + __FUNCTION__, ifevent->ifidx, event->ifname)); +#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */ + } + /* send up the if event: btamp user needs it */ + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + break; + } + +#ifdef WLMEDIA_HTSF + case WLC_E_HTSFSYNC: + htsf_update(dhd_pub->info, event_data); + break; +#endif /* WLMEDIA_HTSF */ + case WLC_E_NDIS_LINK: + break; + case WLC_E_PFN_NET_FOUND: + case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */ + case WLC_E_PFN_NET_LOST: + break; +#if defined(PNO_SUPPORT) + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BEST_BATCHING: + dhd_pno_event_handler(dhd_pub, event, (void *)event_data); + break; +#endif + /* These are what external supplicant/authenticator wants */ + case WLC_E_ASSOC_IND: + case WLC_E_AUTH_IND: + case WLC_E_REASSOC_IND: + dhd_findadd_sta(dhd_pub, + dhd_ifname2idx(dhd_pub->info, event->ifname), + &event->addr.octet); + break; +#if defined(DHD_FW_COREDUMP) + case WLC_E_PSM_WATCHDOG: + DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__)); + if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) { + DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__)); + } + break; +#endif + case WLC_E_LINK: +#ifdef PCIE_FULL_DONGLE + if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname), (uint8)flags) != BCME_OK) + break; + if (!flags) { + dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info, + event->ifname)); + } + /* fall through */ +#endif + case WLC_E_DEAUTH: + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC: + case WLC_E_DISASSOC_IND: + DHD_EVENT(("%s: Link event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); +#ifdef PCIE_FULL_DONGLE + if (type != WLC_E_LINK) { + uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname); + uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex); + uint8 del_sta = TRUE; +#ifdef WL_CFG80211 + if (role == WLC_E_IF_ROLE_STA && !wl_cfg80211_is_roam_offload() && + !wl_cfg80211_is_event_from_connected_bssid(event, *ifidx)) { + del_sta = FALSE; + } +#endif /* WL_CFG80211 */ + + if (del_sta) { + dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, + event->ifname), &event->addr.octet); + if (role == WLC_E_IF_ROLE_STA) { + dhd_flow_rings_delete(dhd_pub, ifindex); + } else { + dhd_flow_rings_delete_for_peer(dhd_pub, ifindex, + &event->addr.octet[0]); + } + } + } +#endif /* PCIE_FULL_DONGLE */ + /* fall through */ + default: + *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname); + /* push up to external supp/auth */ + dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx); + DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n", + __FUNCTION__, type, flags, status)); + BCM_REFERENCE(flags); + BCM_REFERENCE(status); + + break; + } + +#ifdef SHOW_EVENTS + if (DHD_FWLOG_ON() || DHD_EVENT_ON()) { + wl_show_host_event(dhd_pub, event, + (void *)event_data, raw_event, dhd_pub->enable_log); + } +#endif /* SHOW_EVENTS */ + + return (BCME_OK); +} + +void +dhd_print_buf(void *pbuf, int len, int bytes_per_line) +{ +#ifdef DHD_DEBUG + int i, j = 0; + unsigned char *buf = pbuf; + + if (bytes_per_line == 0) { + bytes_per_line = len; + } + + for (i = 0; i < len; i++) { + printf("%2.2x", *buf++); + j++; + if (j == bytes_per_line) { + printf("\n"); + j = 0; + } else { + printf(":"); + } + } + printf("\n"); +#endif /* DHD_DEBUG */ +} +#ifndef strtoul +#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base)) +#endif + +#ifdef PKT_FILTER_SUPPORT +/* Convert user's input in hex pattern to byte-size mask */ +static int +wl_pattern_atoh(char *src, char *dst) +{ + int i; + if (strncmp(src, "0x", 2) != 0 && + strncmp(src, "0X", 2) != 0) { + DHD_ERROR(("Mask invalid format. Needs to start with 0x\n")); + return -1; + } + src = src + 2; /* Skip past 0x */ + if (strlen(src) % 2 != 0) { + DHD_ERROR(("Mask invalid format. Needs to be of even length\n")); + return -1; + } + for (i = 0; *src != '\0'; i++) { + char num[3]; + bcm_strncpy_s(num, sizeof(num), src, 2); + num[2] = '\0'; + dst[i] = (uint8)strtoul(num, NULL, 16); + src += 2; + } + return i; +} + +void +dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode) +{ + char *argv[8]; + int i = 0; + const char *str; + int buf_len; + int str_len; + char *arg_save = 0, *arg_org = 0; + int rc; + char buf[32] = {0}; + wl_pkt_filter_enable_t enable_parm; + wl_pkt_filter_enable_t * pkt_filterp; + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + arg_org = arg_save; + memcpy(arg_save, arg, strlen(arg) + 1); + + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_enable"; + str_len = strlen(str); + bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1); + + /* Parse packet filter id. */ + enable_parm.id = htod32(strtoul(argv[i], NULL, 0)); + + /* Parse enable/disable value. */ + enable_parm.enable = htod32(enable); + + buf_len += sizeof(enable_parm); + memcpy((char *)pkt_filterp, + &enable_parm, + sizeof(enable_parm)); + + /* Enable/disable the specified filter. */ + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + + /* Contorl the master mode */ + rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode", + master_mode, WLC_SET_VAR, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); +} + +void +dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg) +{ + const char *str; + wl_pkt_filter_t pkt_filter; + wl_pkt_filter_t *pkt_filterp; + int buf_len; + int str_len; + int rc; + uint32 mask_size; + uint32 pattern_size; + char *argv[8], * buf = 0; + int i = 0; + char *arg_save = 0, *arg_org = 0; +#define BUF_SIZE 2048 + + if (!arg) + return; + + if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + arg_org = arg_save; + + if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) { + DHD_ERROR(("%s: malloc failed\n", __FUNCTION__)); + goto fail; + } + + memcpy(arg_save, arg, strlen(arg) + 1); + + if (strlen(arg) > BUF_SIZE) { + DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf))); + goto fail; + } + + argv[i] = bcmstrtok(&arg_save, " ", 0); + while (argv[i++]) + argv[i] = bcmstrtok(&arg_save, " ", 0); + + i = 0; + if (argv[i] == NULL) { + DHD_ERROR(("No args provided\n")); + goto fail; + } + + str = "pkt_filter_add"; + str_len = strlen(str); + bcm_strncpy_s(buf, BUF_SIZE, str, str_len); + buf[ str_len ] = '\0'; + buf_len = str_len + 1; + + pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1); + + /* Parse packet filter id. */ + pkt_filter.id = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Polarity not provided\n")); + goto fail; + } + + /* Parse filter polarity. */ + pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Filter type not provided\n")); + goto fail; + } + + /* Parse filter type. */ + pkt_filter.type = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Offset not provided\n")); + goto fail; + } + + /* Parse pattern filter offset. */ + pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Bitmask not provided\n")); + goto fail; + } + + /* Parse pattern filter mask. */ + mask_size = + htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern)); + + if (argv[++i] == NULL) { + DHD_ERROR(("Pattern not provided\n")); + goto fail; + } + + /* Parse pattern filter pattern. */ + pattern_size = + htod32(wl_pattern_atoh(argv[i], + (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size])); + + if (mask_size != pattern_size) { + DHD_ERROR(("Mask and pattern not the same size\n")); + goto fail; + } + + pkt_filter.u.pattern.size_bytes = mask_size; + buf_len += WL_PKT_FILTER_FIXED_LEN; + buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); + + /* Keep-alive attributes are set in local variable (keep_alive_pkt), and + ** then memcpy'ed into buffer (keep_alive_pktp) since there is no + ** guarantee that the buffer is properly aligned. + */ + memcpy((char *)pkt_filterp, + &pkt_filter, + WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN); + + rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + rc = rc >= 0 ? 0 : rc; + + if (rc) + DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n", + __FUNCTION__, arg, rc)); + else + DHD_TRACE(("%s: successfully added pktfilter %s\n", + __FUNCTION__, arg)); + +fail: + if (arg_org) + MFREE(dhd->osh, arg_org, strlen(arg) + 1); + + if (buf) + MFREE(dhd->osh, buf, BUF_SIZE); +} + +void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id) +{ + int ret; + + ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete", + id, WLC_SET_VAR, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n", + __FUNCTION__, id, ret)); + } +} +#endif /* PKT_FILTER_SUPPORT */ + +/* ========================== */ +/* ==== ARP OFFLOAD SUPPORT = */ +/* ========================== */ +#ifdef ARP_OFFLOAD_SUPPORT +void +dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol", + arp_mode, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n", + __FUNCTION__, arp_mode, retcode)); + else + DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n", + __FUNCTION__, arp_mode)); +} + +void +dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable) +{ + int retcode; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe", + arp_enable, WLC_SET_VAR, TRUE, 0); + + retcode = retcode >= 0 ? 0 : retcode; + if (retcode) + DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n", + __FUNCTION__, arp_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ARP offload to %d\n", + __FUNCTION__, arp_enable)); + if (arp_enable) { + uint32 version; + retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version", + &version, WLC_GET_VAR, FALSE, 0); + if (retcode) { + DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n", + __FUNCTION__, retcode)); + dhd->arp_version = 1; + } + else { + DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version)); + dhd->arp_version = version; + } + } +} + +void +dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return; + } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx) +{ + int ret = 0; + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + + iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return; + } + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0) + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); +} + +void +dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + + if (dhd == NULL) return; + if (dhd->arp_version == 1) + idx = 0; + iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, + sizeof(ipaddr), iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: sARP H ipaddr entry added \n", + __FUNCTION__)); +} + +int +dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx) +{ + int retcode, i; + int iov_len; + uint32 *ptr32 = buf; + bool clr_bottom = FALSE; + + if (!buf) + return -1; + if (dhd == NULL) return -1; + if (dhd->arp_version == 1) + idx = 0; + + iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen); + BCM_REFERENCE(iov_len); + retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx); + + if (retcode) { + DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n", + __FUNCTION__, retcode)); + + return -1; + } + + /* clean up the buf, ascii reminder */ + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (!clr_bottom) { + if (*ptr32 == 0) + clr_bottom = TRUE; + } else { + *ptr32 = 0; + } + ptr32++; + } + + return 0; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* + * Neighbor Discovery Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up/goes down + */ +int +dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable) +{ + int retcode; + + if (dhd == NULL) + return -1; + + retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe", + ndo_enable, WLC_SET_VAR, TRUE, 0); + if (retcode) + DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n", + __FUNCTION__, ndo_enable, retcode)); + else + DHD_TRACE(("%s: successfully enabed ndo offload to %d\n", + __FUNCTION__, ndo_enable)); + + return retcode; +} + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + */ +int +dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr, + IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry added \n", + __FUNCTION__)); + + return retcode; +} +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface goes down + */ +int +dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx) +{ + int iov_len = 0; + char iovbuf[DHD_IOVAR_BUF_SIZE]; + int retcode; + + if (dhd == NULL) + return -1; + + iov_len = bcm_mkiovar("nd_hostip_clear", NULL, + 0, iovbuf, sizeof(iovbuf)); + if (!iov_len) { + DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n", + __FUNCTION__, sizeof(iovbuf))); + return -1; + } + retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx); + + if (retcode) + DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n", + __FUNCTION__, retcode)); + else + DHD_TRACE(("%s: ndo ipaddr entry removed \n", + __FUNCTION__)); + + return retcode; +} + + + +/* + * returns = TRUE if associated, FALSE if not associated + */ +bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval) +{ + char bssid[6], zbuf[6]; + int ret = -1; + + bzero(bssid, 6); + bzero(zbuf, 6); + + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, + ETHER_ADDR_LEN, FALSE, ifidx); + DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTASSOCIATED) { + DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret)); + } + + if (retval) + *retval = ret; + + if (ret < 0) + return FALSE; + + if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) { + DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__)); + return FALSE; + } + return TRUE; +} + +/* Function to estimate possible DTIM_SKIP value */ +int +dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd) +{ + int bcn_li_dtim = 1; /* deafult no dtim skip setting */ + int ret = -1; + int dtim_period = 0; + int ap_beacon = 0; +#ifndef ENABLE_MAX_DTIM_IN_SUSPEND + int allowed_skip_dtim_cnt = 0; +#endif /* !ENABLE_MAX_DTIM_IN_SUSPEND */ + /* Check if associated */ + if (dhd_is_associated(dhd, 0, NULL) == FALSE) { + DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated AP beacon interval */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD, + &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) { + DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* read associated ap's dtim setup */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + /* if not assocated just eixt */ + if (dtim_period == 0) { + goto exit; + } + +#ifdef ENABLE_MAX_DTIM_IN_SUSPEND + bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period)); + if (bcn_li_dtim == 0) { + bcn_li_dtim = 1; + } +#else /* ENABLE_MAX_DTIM_IN_SUSPEND */ + /* attemp to use platform defined dtim skip interval */ + bcn_li_dtim = dhd->suspend_bcn_li_dtim; + + /* check if sta listen interval fits into AP dtim */ + if (dtim_period > CUSTOM_LISTEN_INTERVAL) { + /* AP DTIM to big for our Listen Interval : no dtim skiping */ + bcn_li_dtim = NO_DTIM_SKIP; + DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", + __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL)); + goto exit; + } + + if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) { + allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon); + bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP; + } + + if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) { + /* Round up dtim_skip to fit into STAs Listen Interval */ + bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period); + DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim)); + } +#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */ + + DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL)); + +exit: + return bcn_li_dtim; +} + +/* Check if the mode supports STA MODE */ +bool dhd_support_sta_mode(dhd_pub_t *dhd) +{ + +#ifdef WL_CFG80211 + if (!(dhd->op_mode & DHD_FLAG_STA_MODE)) + return FALSE; + else +#endif /* WL_CFG80211 */ + return TRUE; +} + +#if defined(KEEP_ALIVE) +int dhd_keep_alive_onoff(dhd_pub_t *dhd) +{ + char buf[32] = {0}; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0}; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + + if (!dhd_support_sta_mode(dhd)) + return res; + + DHD_TRACE(("%s execution\n", __FUNCTION__)); + + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, sizeof(buf) - 1); + buf[ sizeof(buf) - 1 ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data)); + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0); + + return res; +} +#endif /* defined(KEEP_ALIVE) */ +/* Android ComboSCAN support */ + +/* + * data parsing from ComboScan tlv list +*/ +int +wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token, + int input_size, int *bytes_left) +{ + char* str; + uint16 short_temp; + uint32 int_temp; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + /* Clean all dest bytes */ + memset(dst, 0, dst_size); + while (*bytes_left > 0) { + + if (str[0] != token) { + DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n", + __FUNCTION__, token, str[0], *bytes_left)); + return -1; + } + + *bytes_left -= 1; + str += 1; + + if (input_size == 1) { + memcpy(dst, str, input_size); + } + else if (input_size == 2) { + memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)), + input_size); + } + else if (input_size == 4) { + memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)), + input_size); + } + + *bytes_left -= input_size; + str += input_size; + *list_str = str; + return 1; + } + return 1; +} + +/* + * channel list parsing from cscan tlv list +*/ +int +wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list, + int channel_num, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) { + *list_str = str; + DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* All channels */ + channel_list[idx] = 0x0; + } + else { + channel_list[idx] = (uint16)str[0]; + DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx])); + } + *bytes_left -= 1; + str += 1; + + if (idx++ > 255) { + DHD_ERROR(("%s Too many channels \n", __FUNCTION__)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* + * SSIDs list parsing from cscan tlv list + */ +int +wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left) +{ + char* str; + int idx = 0; + + if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) { + DHD_ERROR(("%s error paramters\n", __FUNCTION__)); + return -1; + } + str = *list_str; + while (*bytes_left > 0) { + + if (str[0] != CSCAN_TLV_TYPE_SSID_IE) { + *list_str = str; + DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0])); + return idx; + } + + /* Get proper CSCAN_TLV_TYPE_SSID_IE */ + *bytes_left -= 1; + str += 1; + + if (str[0] == 0) { + /* Broadcast SSID */ + ssid[idx].SSID_len = 0; + memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN); + *bytes_left -= 1; + str += 1; + + DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left)); + } + else if (str[0] <= DOT11_MAX_SSID_LEN) { + /* Get proper SSID size */ + ssid[idx].SSID_len = str[0]; + *bytes_left -= 1; + str += 1; + + /* Get SSID */ + if (ssid[idx].SSID_len > *bytes_left) { + DHD_ERROR(("%s out of memory range len=%d but left=%d\n", + __FUNCTION__, ssid[idx].SSID_len, *bytes_left)); + return -1; + } + + memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len); + + *bytes_left -= ssid[idx].SSID_len; + str += ssid[idx].SSID_len; + ssid[idx].hidden = TRUE; + + DHD_TRACE(("%s :size=%d left=%d\n", + (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left)); + } + else { + DHD_ERROR(("### SSID size more that %d\n", str[0])); + return -1; + } + + if (idx++ > max) { + DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx)); + return -1; + } + } + + *list_str = str; + return idx; +} + +/* Parse a comma-separated list from list_str into ssid array, starting + * at index idx. Max specifies size of the ssid array. Parses ssids + * and returns updated idx; if idx >= max not all fit, the excess have + * not been copied. Returns -1 on empty string, or on ssid too long. + */ +int +wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max) +{ + char* str, *ptr; + + if ((list_str == NULL) || (*list_str == NULL)) + return -1; + + for (str = *list_str; str != NULL; str = ptr) { + + /* check for next TAG */ + if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) { + *list_str = str + strlen(GET_CHANNEL); + return idx; + } + + if ((ptr = strchr(str, ',')) != NULL) { + *ptr++ = '\0'; + } + + if (strlen(str) > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN)); + return -1; + } + + if (strlen(str) == 0) + ssid[idx].SSID_len = 0; + + if (idx < max) { + bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID)); + strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1); + ssid[idx].SSID_len = strlen(str); + } + idx++; + } + return idx; +} + +/* + * Parse channel list from iwpriv CSCAN + */ +int +wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num) +{ + int num; + int val; + char* str; + char* endptr = NULL; + + if ((list_str == NULL)||(*list_str == NULL)) + return -1; + + str = *list_str; + num = 0; + while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) { + val = (int)strtoul(str, &endptr, 0); + if (endptr == str) { + printf("could not parse channel number starting at" + " substring \"%s\" in list:\n%s\n", + str, *list_str); + return -1; + } + str = endptr + strspn(endptr, " ,"); + + if (num == channel_num) { + DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n", + channel_num, *list_str)); + return -1; + } + + channel_list[num++] = (uint16)val; + } + *list_str = str; + return num; +} + + +/* Given filename and download type, returns a buffer pointer and length + * for download to f/w. Type can be FW or NVRAM. + * + */ +int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component, + char ** buffer, int *length) + +{ + int ret = BCME_ERROR; + int len = 0; + int file_len; + void *image = NULL; + uint8 *buf = NULL; + + /* Point to cache if available. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (dhd->cached_fw_length) { + len = dhd->cached_fw_length; + buf = dhd->cached_fw; + } + } else if (component == NVRAM) { + if (dhd->cached_nvram_length) { + len = dhd->cached_nvram_length; + buf = dhd->cached_nvram; + } + } else { + return ret; + } +#endif + /* No Valid cache found on this call */ + if (!len) { + file_len = *length; + *length = 0; + + if (file_path) { + image = dhd_os_open_image(file_path); + if (image == NULL) { + goto err; + } + } + + buf = MALLOCZ(dhd->osh, file_len); + if (buf == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, file_len)); + goto err; + } + + /* Download image */ + len = dhd_os_get_image_block(buf, file_len, image); + if ((len <= 0 || len > file_len)) { + MFREE(dhd->osh, buf, file_len); + goto err; + } + } + + ret = BCME_OK; + *length = len; + *buffer = buf; + + /* Cache if first call. */ +#ifdef CACHE_FW_IMAGES + if (component == FW) { + if (!dhd->cached_fw_length) { + dhd->cached_fw = buf; + dhd->cached_fw_length = len; + } + } else if (component == NVRAM) { + if (!dhd->cached_nvram_length) { + dhd->cached_nvram = buf; + dhd->cached_nvram_length = len; + } + } +#endif + +err: + if (image) + dhd_os_close_image(image); + + return ret; +} + +void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length) +{ +#ifdef CACHE_FW_IMAGES + return; +#endif + MFREE(dhd->osh, buffer, length); +} +/* Parse EAPOL 4 way handshake messages */ +void +dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction) +{ + unsigned char type; + int pair, ack, mic, kerr, req, sec, install; + unsigned short us_tmp; + type = dump_data[18]; + if (type == 2 || type == 254) { + us_tmp = (dump_data[19] << 8) | dump_data[20]; + pair = 0 != (us_tmp & 0x08); + ack = 0 != (us_tmp & 0x80); + mic = 0 != (us_tmp & 0x100); + kerr = 0 != (us_tmp & 0x400); + req = 0 != (us_tmp & 0x800); + sec = 0 != (us_tmp & 0x200); + install = 0 != (us_tmp & 0x40); + if (!sec && !mic && ack && !install && pair && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M1 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && !install && !ack && mic && !sec && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M2 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && ack && mic && sec && !kerr && !req) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M3 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else if (pair && !install && !ack && mic && sec && !req && !kerr) { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M4 of 4way\n", + ifname, direction ? "TX" : "RX")); + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } + } else { + DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n", + ifname, direction ? "TX" : "RX", + dump_data[14], dump_data[15], dump_data[30])); + } +} diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c new file mode 100644 index 000000000000..43a714041d2a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c @@ -0,0 +1,309 @@ +/* + * Customer code to add GPIO control during WLAN start/stop + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_custom_gpio.c 591129 2015-10-07 05:22:14Z $ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#if defined(WL_WIRELESS_EXT) +#include +#endif + +#define WL_ERROR(x) printf x +#define WL_TRACE(x) + +#if defined(OOB_INTR_ONLY) + +#if defined(BCMLXSDMMC) +extern int sdioh_mmc_irq(int irq); +#endif /* (BCMLXSDMMC) */ + +/* Customer specific Host GPIO defintion */ +static int dhd_oob_gpio_num = -1; + +module_param(dhd_oob_gpio_num, int, 0644); +MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number"); + +/* This function will return: + * 1) return : Host gpio interrupt number per customer platform + * 2) irq_flags_ptr : Type of Host interrupt as Level or Edge + * + * NOTE : + * Customer should check his platform definitions + * and his Host Interrupt spec + * to figure out the proper setting for his platform. + * Broadcom provides just reference settings as example. + * + */ +int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr) +{ + int host_oob_irq = 0; + +#if defined(CUSTOMER_HW2) + host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr); + +#else +#if defined(CUSTOM_OOB_GPIO_NUM) + if (dhd_oob_gpio_num < 0) { + dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM; + } +#endif /* CUSTOMER_OOB_GPIO_NUM */ + + if (dhd_oob_gpio_num < 0) { + WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", + __FUNCTION__)); + return (dhd_oob_gpio_num); + } + + WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", + __FUNCTION__, dhd_oob_gpio_num)); + +#endif + + return (host_oob_irq); +} +#endif + +/* Customer function to control hw specific wlan gpios */ +int +dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff) +{ + int err = 0; + + return err; +} + +#ifdef GET_CUSTOM_MAC_ENABLE +/* Function to get custom MAC address */ +int +dhd_custom_get_mac_address(void *adapter, unsigned char *buf) +{ + int ret = 0; + + WL_TRACE(("%s Enter\n", __FUNCTION__)); + if (!buf) + return -EINVAL; + + /* Customer access to MAC address stored outside of DHD driver */ +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) + ret = wifi_platform_get_mac_addr(adapter, buf); +#endif + +#ifdef EXAMPLE_GET_MAC + /* EXAMPLE code */ + { + struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}}; + bcopy((char *)&ea_example, buf, sizeof(struct ether_addr)); + } +#endif /* EXAMPLE_GET_MAC */ + + return ret; +} +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#if !defined(WL_WIRELESS_EXT) +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; +#endif /* WL_WIRELESS_EXT */ + +/* Customized Locale table : OPTIONAL feature */ +const struct cntry_locales_custom translate_custom_table[] = { +/* Table should be filled out based on custom platform regulatory requirement */ +#ifdef EXAMPLE_TABLE + {"", "XY", 4}, /* Universal if Country code is unknown or empty */ + {"US", "US", 69}, /* input ISO "US" to : US regrev 69 */ + {"CA", "US", 69}, /* input ISO "CA" to : US regrev 69 */ + {"EU", "EU", 5}, /* European union countries to : EU regrev 05 */ + {"AT", "EU", 5}, + {"BE", "EU", 5}, + {"BG", "EU", 5}, + {"CY", "EU", 5}, + {"CZ", "EU", 5}, + {"DK", "EU", 5}, + {"EE", "EU", 5}, + {"FI", "EU", 5}, + {"FR", "EU", 5}, + {"DE", "EU", 5}, + {"GR", "EU", 5}, + {"HU", "EU", 5}, + {"IE", "EU", 5}, + {"IT", "EU", 5}, + {"LV", "EU", 5}, + {"LI", "EU", 5}, + {"LT", "EU", 5}, + {"LU", "EU", 5}, + {"MT", "EU", 5}, + {"NL", "EU", 5}, + {"PL", "EU", 5}, + {"PT", "EU", 5}, + {"RO", "EU", 5}, + {"SK", "EU", 5}, + {"SI", "EU", 5}, + {"ES", "EU", 5}, + {"SE", "EU", 5}, + {"GB", "EU", 5}, + {"KR", "XY", 3}, + {"AU", "XY", 3}, + {"CN", "XY", 3}, /* input ISO "CN" to : XY regrev 03 */ + {"TW", "XY", 3}, + {"AR", "XY", 3}, + {"MX", "XY", 3}, + {"IL", "IL", 0}, + {"CH", "CH", 0}, + {"TR", "TR", 0}, + {"NO", "NO", 0}, +#endif /* EXMAPLE_TABLE */ +#if defined(CUSTOMER_HW2) +#if defined(BCM4335_CHIP) + {"", "XZ", 11}, /* Universal if Country code is unknown or empty */ +#endif + {"AE", "AE", 1}, + {"AR", "AR", 1}, + {"AT", "AT", 1}, + {"AU", "AU", 2}, + {"BE", "BE", 1}, + {"BG", "BG", 1}, + {"BN", "BN", 1}, + {"CA", "CA", 2}, + {"CH", "CH", 1}, + {"CY", "CY", 1}, + {"CZ", "CZ", 1}, + {"DE", "DE", 3}, + {"DK", "DK", 1}, + {"EE", "EE", 1}, + {"ES", "ES", 1}, + {"FI", "FI", 1}, + {"FR", "FR", 1}, + {"GB", "GB", 1}, + {"GR", "GR", 1}, + {"HR", "HR", 1}, + {"HU", "HU", 1}, + {"IE", "IE", 1}, + {"IS", "IS", 1}, + {"IT", "IT", 1}, + {"ID", "ID", 1}, + {"JP", "JP", 8}, + {"KR", "KR", 24}, + {"KW", "KW", 1}, + {"LI", "LI", 1}, + {"LT", "LT", 1}, + {"LU", "LU", 1}, + {"LV", "LV", 1}, + {"MA", "MA", 1}, + {"MT", "MT", 1}, + {"MX", "MX", 1}, + {"NL", "NL", 1}, + {"NO", "NO", 1}, + {"PL", "PL", 1}, + {"PT", "PT", 1}, + {"PY", "PY", 1}, + {"RO", "RO", 1}, + {"SE", "SE", 1}, + {"SI", "SI", 1}, + {"SK", "SK", 1}, + {"TR", "TR", 7}, + {"TW", "TW", 1}, + {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */ + {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */ + {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */ + {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */ + {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */ + {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */ + {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */ +#ifdef BCM4330_CHIP + {"RU", "RU", 1}, + {"US", "US", 5} +#endif +#endif +}; + + +/* Customized Locale convertor +* input : ISO 3166-1 country abbreviation +* output: customized cspec +*/ +#ifdef CUSTOM_COUNTRY_CODE +void get_customized_country_code(void *adapter, char *country_iso_code, + wl_country_t *cspec, u32 flags) +#else +void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec) +#endif /* CUSTOM_COUNTRY_CODE */ +{ +#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + + struct cntry_locales_custom *cloc_ptr; + + if (!cspec) + return; +#ifdef CUSTOM_COUNTRY_CODE + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code, + flags); +#else + cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code); +#endif /* CUSTOM_COUNTRY_CODE */ + if (cloc_ptr) { + strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = cloc_ptr->custom_locale_rev; + } + return; +#else + int size, i; + + size = ARRAYSIZE(translate_custom_table); + + if (cspec == 0) + return; + + if (size == 0) + return; + + for (i = 0; i < size; i++) { + if (strcmp(country_iso_code, translate_custom_table[i].iso_abbrev) == 0) { + memcpy(cspec->ccode, + translate_custom_table[i].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[i].custom_locale_rev; + return; + } + } +#ifdef EXAMPLE_TABLE + /* if no country code matched return first universal code from translate_custom_table */ + memcpy(cspec->ccode, translate_custom_table[0].custom_locale, WLC_CNTRY_BUF_SZ); + cspec->rev = translate_custom_table[0].custom_locale_rev; +#endif /* EXMAPLE_TABLE */ + return; +#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */ +} diff --git a/drivers/net/wireless/bcmdhd/dhd_dbg.h b/drivers/net/wireless/bcmdhd/dhd_dbg.h new file mode 100644 index 000000000000..44899819a918 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_dbg.h @@ -0,0 +1,193 @@ +/* + * Debug/trace/assert driver definitions for Dongle Host Driver. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_dbg.h 598059 2015-11-07 07:31:52Z $ + */ + +#ifndef _dhd_dbg_ +#define _dhd_dbg_ + +#if defined(DHD_DEBUG) +#ifdef DHD_LOG_DUMP +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +#define DHD_ERROR(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0) +#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0) +#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0) +#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0) +#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0) +#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0) +#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0) +#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0) +#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0) +#ifdef DHD_LOG_DUMP +#define DHD_EVENT(args) \ +do { \ + if (dhd_msg_level & DHD_EVENT_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0) +#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0) +#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0) +#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0) +#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0) +#ifdef DHD_LOG_DUMP +#define DHD_MSGTRACE_LOG(args) \ +do { \ + if (dhd_msg_level & DHD_MSGTRACE_VAL) { \ + printf args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0) +#endif /* DHD_LOG_DUMP */ +#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0) +#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0) +#define DHD_IOV_INFO(args) do {if (dhd_msg_level & DHD_IOV_INFO_VAL) printf args;} while (0) + +#ifdef DHD_LOG_DUMP +#define DHD_ERROR_EX(args) \ +do { \ + if (dhd_msg_level & DHD_ERROR_VAL) { \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define DHD_ERROR_EX(args) DHD_ERROR(args) +#endif /* DHD_LOG_DUMP */ + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#else +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#endif /* CUSTOMER_HW4_DEBUG */ + +#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL) +#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL) +#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL) +#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL) +#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL) +#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL) +#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL) +#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL) +#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL) +#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL) +#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL) +#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL) +#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL) +#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL) +#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL) +#define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL) +#define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL) +#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL) +#define DHD_IOV_INFO_ON() (dhd_msg_level & DHD_IOV_INFO_VAL) + +#else /* defined(BCMDBG) || defined(DHD_DEBUG) */ + +#define DHD_ERROR(args) do {printf args;} while (0) +#define DHD_TRACE(args) +#define DHD_INFO(args) +#define DHD_DATA(args) +#define DHD_CTL(args) +#define DHD_TIMER(args) +#define DHD_HDRS(args) +#define DHD_BYTES(args) +#define DHD_INTR(args) +#define DHD_GLOM(args) +#define DHD_EVENT(args) +#define DHD_BTA(args) +#define DHD_ISCAN(args) +#define DHD_ARPOE(args) +#define DHD_REORDER(args) +#define DHD_PNO(args) +#define DHD_MSGTRACE_LOG(args) +#define DHD_FWLOG(args) +#define DHD_IOV_INFO(args) +#define DHD_ERROR_EX(args) DHD_ERROR(args) + +#ifdef CUSTOMER_HW4_DEBUG +#define DHD_TRACE_HW4 DHD_ERROR +#define DHD_INFO_HW4 DHD_ERROR +#else +#define DHD_TRACE_HW4 DHD_TRACE +#define DHD_INFO_HW4 DHD_INFO +#endif /* CUSTOMER_HW4_DEBUG */ + +#define DHD_ERROR_ON() 0 +#define DHD_TRACE_ON() 0 +#define DHD_INFO_ON() 0 +#define DHD_DATA_ON() 0 +#define DHD_CTL_ON() 0 +#define DHD_TIMER_ON() 0 +#define DHD_HDRS_ON() 0 +#define DHD_BYTES_ON() 0 +#define DHD_INTR_ON() 0 +#define DHD_GLOM_ON() 0 +#define DHD_EVENT_ON() 0 +#define DHD_BTA_ON() 0 +#define DHD_ISCAN_ON() 0 +#define DHD_ARPOE_ON() 0 +#define DHD_REORDER_ON() 0 +#define DHD_NOCHECKDIED_ON() 0 +#define DHD_PNO_ON() 0 +#define DHD_FWLOG_ON() 0 +#define DHD_IOV_INFO_ON() 0 + +#endif + +#define DHD_LOG(args) + +#define DHD_BLOG(cp, size) + +#define DHD_NONE(args) +extern int dhd_msg_level; + +/* Defines msg bits */ +#include + +#endif /* _dhd_dbg_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.c b/drivers/net/wireless/bcmdhd/dhd_flowring.c new file mode 100644 index 000000000000..759dd0ed8bcf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_flowring.c @@ -0,0 +1,964 @@ +/* + * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $ + */ + + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue); + +static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da); + +static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid); +int BCMFASTPATH dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt); + +#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p) +#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x)) + +#ifdef DHD_LOSSLESS_ROAMING +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 }; +#else +const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }; +#endif +const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + +/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */ +static INLINE int +dhd_flow_queue_throttle(flow_queue_t *queue) +{ + return DHD_FLOW_QUEUE_FULL(queue); +} + +int BCMFASTPATH +dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt) +{ + return BCME_NORESOURCE; +} + +/** Returns flow ring given a flowid */ +flow_ring_node_t * +dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(flowid < dhdp->num_flow_rings); + + flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]); + + ASSERT(flow_ring_node->flowid == flowid); + return flow_ring_node; +} + +/** Returns 'backup' queue given a flowid */ +flow_queue_t * +dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid) +{ + flow_ring_node_t * flow_ring_node; + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + return &flow_ring_node->queue; +} + +/* Flow ring's queue management functions */ + +/** Initialize a flow ring's queue, called on driver initialization. */ +void +dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max) +{ + ASSERT((queue != NULL) && (max > 0)); + + dll_init(&queue->list); + queue->head = queue->tail = NULL; + queue->len = 0; + + /* Set queue's threshold and queue's parent cummulative length counter */ + ASSERT(max > 1); + DHD_FLOW_QUEUE_SET_MAX(queue, max); + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max); + DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr); + + queue->failures = 0U; + queue->cb = &dhd_flow_queue_overflow; +} + +/** Register an enqueue overflow callback handler */ +void +dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb) +{ + ASSERT(queue != NULL); + queue->cb = cb; +} + +/** + * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on + * to the flow ring itself. + */ +int BCMFASTPATH +dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + int ret = BCME_OK; + + ASSERT(queue != NULL); + + if (dhd_flow_queue_throttle(queue)) { + queue->failures++; + ret = (*queue->cb)(queue, pkt); + goto done; + } + + if (queue->head) { + FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt); + } else { + queue->head = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); + + queue->tail = pkt; /* at tail */ + + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + +done: + return ret; +} + +/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */ +void * BCMFASTPATH +dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue) +{ + void * pkt; + + ASSERT(queue != NULL); + + pkt = queue->head; /* from head */ + + if (pkt == NULL) { + ASSERT((queue->len == 0) && (queue->tail == NULL)); + goto done; + } + + queue->head = FLOW_QUEUE_PKT_NEXT(pkt); + if (queue->head == NULL) + queue->tail = NULL; + + queue->len--; + /* decrement parent's cummulative length */ + DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); + + FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */ + +done: + return pkt; +} + +/** Reinsert a dequeued 802.3 packet back at the head */ +void BCMFASTPATH +dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt) +{ + if (queue->head == NULL) { + queue->tail = pkt; + } + + FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head); + queue->head = pkt; + queue->len++; + /* increment parent's cummulative length */ + DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue)); +} + +/** Fetch the backup queue for a flowring, and assign flow control thresholds */ +void +dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr) +{ + flow_queue_t * queue; + + ASSERT(dhdp != (dhd_pub_t*)NULL); + ASSERT(queue_budget > 1); + ASSERT(cumm_threshold > 1); + ASSERT(cumm_ctr != (void*)NULL); + + queue = dhd_flow_queue(dhdp, flowid); + + DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */ + + /* Set the queue's parent threshold and cummulative counter */ + DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold); + DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr); +} + +/** Initializes data structures of multiple flow rings */ +int +dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings) +{ + uint32 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz = 0; + void * flowid_allocator; + flow_ring_table_t *flow_ring_table = NULL; + if_flow_lkup_t *if_flow_lkup = NULL; + void *lock = NULL; + void *list_lock = NULL; + unsigned long flags; + + DHD_INFO(("%s\n", __FUNCTION__)); + + /* Construct a 16bit flowid allocator */ + flowid_allocator = id16_map_init(dhdp->osh, + num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED); + if (flowid_allocator == NULL) { + DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Allocate a flow ring table, comprising of requested number of rings */ + flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t)); + flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz); + if (flow_ring_table == NULL) { + DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize flow ring table state */ + DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr); + bzero((uchar *)flow_ring_table, flow_ring_table_sz); + for (idx = 0; idx < num_flow_rings; idx++) { + flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED; + flow_ring_table[idx].flowid = (uint16)idx; + flow_ring_table[idx].lock = dhd_os_spin_lock_init(dhdp->osh); + if (flow_ring_table[idx].lock == NULL) { + DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__)); + goto fail; + } + + dll_init(&flow_ring_table[idx].list); + + /* Initialize the per flow ring backup queue */ + dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue, + FLOW_RING_QUEUE_THRESHOLD); + } + + /* Allocate per interface hash table (for fast lookup from interface to flow ring) */ + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp, + DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz); + if (if_flow_lkup == NULL) { + DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__)); + goto fail; + } + + /* Initialize per interface hash table */ + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + int hash_ix; + if_flow_lkup[idx].status = 0; + if_flow_lkup[idx].role = 0; + for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++) + if_flow_lkup[idx].fl_hash[hash_ix] = NULL; + } + + lock = dhd_os_spin_lock_init(dhdp->osh); + if (lock == NULL) + goto fail; + + list_lock = dhd_os_spin_lock_init(dhdp->osh); + if (list_lock == NULL) + goto lock_fail; + + dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP; + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); +#ifdef DHD_LOSSLESS_ROAMING + dhdp->dequeue_prec_map = ALLPRIO; +#endif + /* Now populate into dhd pub */ + DHD_FLOWID_LOCK(lock, flags); + dhdp->num_flow_rings = num_flow_rings; + dhdp->flowid_allocator = (void *)flowid_allocator; + dhdp->flow_ring_table = (void *)flow_ring_table; + dhdp->if_flow_lkup = (void *)if_flow_lkup; + dhdp->flowid_lock = lock; + dhdp->flow_rings_inited = TRUE; + dhdp->flowring_list_lock = list_lock; + DHD_FLOWID_UNLOCK(lock, flags); + + DHD_INFO(("%s done\n", __FUNCTION__)); + return BCME_OK; + +lock_fail: + /* deinit the spinlock */ + dhd_os_spin_lock_deinit(dhdp->osh, lock); + +fail: + /* Destruct the per interface flow lkup table */ + if (if_flow_lkup != NULL) { + DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz); + } + if (flow_ring_table != NULL) { + for (idx = 0; idx < num_flow_rings; idx++) { + if (flow_ring_table[idx].lock != NULL) + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + } + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + id16_map_fini(dhdp->osh, flowid_allocator); + + return BCME_NOMEM; +} + +/** Deinit Flow Ring specific data structures */ +void dhd_flow_rings_deinit(dhd_pub_t *dhdp) +{ + uint16 idx; + uint32 flow_ring_table_sz; + uint32 if_flow_lkup_sz; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + void *lock; + + DHD_INFO(("dhd_flow_rings_deinit\n")); + + if (!(dhdp->flow_rings_inited)) { + DHD_ERROR(("dhd_flow_rings not initialized!\n")); + return; + } + + if (dhdp->flow_ring_table != NULL) { + + ASSERT(dhdp->num_flow_rings > 0); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + dhdp->flow_ring_table = NULL; + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + for (idx = 0; idx < dhdp->num_flow_rings; idx++) { + if (flow_ring_table[idx].active) { + dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue)); + + /* Deinit flow ring queue locks before destroying flow ring table */ + dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock); + flow_ring_table[idx].lock = NULL; + + } + + /* Destruct the flow ring table */ + flow_ring_table_sz = dhdp->num_flow_rings * sizeof(flow_ring_table_t); + MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz); + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + + /* Destruct the per interface flow lkup table */ + if (dhdp->if_flow_lkup != NULL) { + if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS; + bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz); + DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz); + dhdp->if_flow_lkup = NULL; + } + + /* Destruct the flowid allocator */ + if (dhdp->flowid_allocator != NULL) + dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator); + + dhdp->num_flow_rings = 0U; + bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + lock = dhdp->flowid_lock; + dhdp->flowid_lock = NULL; + + DHD_FLOWID_UNLOCK(lock, flags); + dhd_os_spin_lock_deinit(dhdp->osh, lock); + + dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock); + dhdp->flowring_list_lock = NULL; + + ASSERT(dhdp->if_flow_lkup == NULL); + ASSERT(dhdp->flowid_allocator == NULL); + ASSERT(dhdp->flow_ring_table == NULL); + dhdp->flow_rings_inited = FALSE; +} + +/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */ +uint8 +dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex) +{ + if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + ASSERT(if_flow_lkup); + return if_flow_lkup[ifindex].role; +} + +#ifdef WLTDLS +bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da) +{ + tdls_peer_node_t *cur = dhdp->peer_tbl.node; + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + return TRUE; + } + cur = cur->next; + } + return FALSE; +} +#endif /* WLTDLS */ + +/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */ +static INLINE uint16 +dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + int hash; + bool ismcast = FALSE; + flow_hash_info_t *cur; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + ASSERT(if_flow_lkup); + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { +#ifdef WLTDLS + if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) && + is_tdls_destination(dhdp, da)) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + while (cur != NULL) { + if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return FLOWID_INVALID; + } +#endif /* WLTDLS */ + cur = if_flow_lkup[ifindex].fl_hash[prio]; + if (cur) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + } else { + + if (ETHER_ISMULTI(da)) { + ismcast = TRUE; + hash = 0; + } else { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + } + + cur = if_flow_lkup[ifindex].fl_hash[hash]; + + while (cur) { + if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) || + (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) && + (cur->flow_info.tid == prio))) { + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + return cur->flowid; + } + cur = cur->next; + } + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__)); + return FLOWID_INVALID; +} /* dhd_flowid_find */ + +/** Create unique Flow ID, called when a flow ring is created. */ +static INLINE uint16 +dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da) +{ + flow_hash_info_t *fl_hash_node, *cur; + if_flow_lkup_t *if_flow_lkup; + int hash; + uint16 flowid; + unsigned long flags; + + fl_hash_node = (flow_hash_info_t *) MALLOC(dhdp->osh, sizeof(flow_hash_info_t)); + memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + ASSERT(dhdp->flowid_allocator != NULL); + flowid = id16_map_alloc(dhdp->flowid_allocator); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + if (flowid == FLOWID_INVALID) { + MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t)); + DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__)); + return FLOWID_INVALID; + } + + fl_hash_node->flowid = flowid; + fl_hash_node->flow_info.tid = prio; + fl_hash_node->flow_info.ifindex = ifindex; + fl_hash_node->next = NULL; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { + /* For STA non TDLS dest we allocate entry based on prio only */ +#ifdef WLTDLS + if (dhdp->peer_tbl.tdls_peer_count && + (is_tdls_destination(dhdp, da))) { + hash = DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else { + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + } else +#endif /* WLTDLS */ + if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node; + } else { + + /* For bcast/mcast assign first slot in in interface */ + hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio); + cur = if_flow_lkup[ifindex].fl_hash[hash]; + if (cur) { + while (cur->next) { + cur = cur->next; + } + cur->next = fl_hash_node; + } else + if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid)); + + return fl_hash_node->flowid; +} /* dhd_flowid_alloc */ + +/** Get flow ring ID, if not present try to create one */ +static INLINE int +dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex, + uint8 prio, char *sa, char *da, uint16 *flowid) +{ + uint16 id; + flow_ring_node_t *flow_ring_node; + flow_ring_table_t *flow_ring_table; + unsigned long flags; + int ret; + + DHD_INFO(("%s\n", __FUNCTION__)); + + if (!dhdp->flow_ring_table) { + return BCME_ERROR; + } + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + + id = dhd_flowid_find(dhdp, ifindex, prio, sa, da); + + if (id == FLOWID_INVALID) { + + if_flow_lkup_t *if_flow_lkup; + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (!if_flow_lkup[ifindex].status) + return BCME_ERROR; + + + id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da); + if (id == FLOWID_INVALID) { + DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n", + __FUNCTION__, ifindex, if_flow_lkup[ifindex].status)); + return BCME_ERROR; + } + + /* register this flowid in dhd_pub */ + dhd_add_flowid(dhdp, ifindex, prio, da, id); + + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Init Flow info */ + memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa)); + memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da)); + flow_ring_node->flow_info.tid = prio; + flow_ring_node->flow_info.ifindex = ifindex; + flow_ring_node->active = TRUE; + flow_ring_node->status = FLOW_RING_STATUS_PENDING; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Create and inform device about the new flow */ + if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node) + != BCME_OK) { + DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id)); + return BCME_ERROR; + } + + *flowid = id; + return BCME_OK; + } else { + /* if the Flow id was found in the hash */ + ASSERT(id < dhdp->num_flow_rings); + + flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id]; + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* + * If the flow_ring_node is in Open State or Status pending state then + * we can return the Flow id to the caller.If the flow_ring_node is in + * FLOW_RING_STATUS_PENDING this means the creation is in progress and + * hence the packets should be queued. + * + * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or + * FLOW_RING_STATUS_CLOSED, then we should return Error. + * Note that if the flowing is being deleted we would mark it as + * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and + * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets. + * We should drop the packets in that case. + * The decission to return OK should NOT be based on 'active' variable, beause + * active is made TRUE when a flow_ring_node gets allocated and is made + * FALSE when the flow ring gets removed and does not reflect the True state + * of the Flow ring. + */ + if (flow_ring_node->status == FLOW_RING_STATUS_OPEN || + flow_ring_node->status == FLOW_RING_STATUS_PENDING) { + *flowid = id; + ret = BCME_OK; + } else { + *flowid = FLOWID_INVALID; + ret = BCME_ERROR; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return ret; + + } /* Flow Id found in the hash */ +} /* dhd_flowid_lookup */ + +/** + * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to + * select the flowring to send the packet to the dongle. + */ +int BCMFASTPATH +dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf) +{ + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + uint16 flowid; + + ASSERT(ifindex < DHD_MAX_IFS); + + if (ifindex >= DHD_MAX_IFS) { + return BCME_BADARG; + } + + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return BCME_ERROR; + } + + if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost, + &flowid) != BCME_OK) { + return BCME_ERROR; + } + + DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid)); + + /* Tag the packet with flowid */ + DHD_PKT_SET_FLOWID(pktbuf, flowid); + return BCME_OK; +} + +void +dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid) +{ + int hashix; + bool found = FALSE; + flow_hash_info_t *cur, *prev; + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) { + + cur = if_flow_lkup[ifindex].fl_hash[hashix]; + + if (cur) { + if (cur->flowid == flowid) { + found = TRUE; + } + + prev = NULL; + while (!found && cur) { + if (cur->flowid == flowid) { + found = TRUE; + break; + } + prev = cur; + cur = cur->next; + } + if (found) { + if (!prev) { + if_flow_lkup[ifindex].fl_hash[hashix] = cur->next; + } else { + prev->next = cur->next; + } + + /* deregister flowid from dhd_pub. */ + dhd_del_flowid(dhdp, ifindex, flowid); + + id16_map_free(dhdp->flowid_allocator, flowid); + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t)); + + return; + } + } + } + + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n", + __FUNCTION__, flowid)); +} /* dhd_flowid_free */ + +/** + * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle + * indicates that a wireless link has gone down. + */ +void +dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex)) { + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */ +void +dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr) +{ + uint32 id; + flow_ring_table_t *flow_ring_table; + + DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex)); + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + if (!dhdp->flow_ring_table) + return; + + flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table; + for (id = 0; id < dhdp->num_flow_rings; id++) { + if (flow_ring_table[id].active && + (flow_ring_table[id].flow_info.ifindex == ifindex) && + (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) && + (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) { + DHD_INFO(("%s: deleting flowid %d\n", + __FUNCTION__, flow_ring_table[id].flowid)); + dhd_bus_flow_ring_delete_request(dhdp->bus, + (void *) &flow_ring_table[id]); + } + } +} + +/** Handles interface ADD, CHANGE, DEL indications from the dongle */ +void +dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return; + + DHD_INFO(("%s: ifindex %u op %u role is %u \n", + __FUNCTION__, ifindex, op, role)); + if (!dhdp->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + return; + } + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) { + + if_flow_lkup[ifindex].role = role; + + if (role != WLC_E_IF_ROLE_STA) { + if_flow_lkup[ifindex].status = TRUE; + DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + /* Create Mcast Flow */ + } + } else if (op == WLC_E_IF_DEL) { + if_flow_lkup[ifindex].status = FALSE; + DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n", + __FUNCTION__, ifindex, role)); + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); +} + +/** Handles a STA 'link' indication from the dongle */ +int +dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status) +{ + if_flow_lkup_t *if_flow_lkup; + unsigned long flags; + + ASSERT(ifindex < DHD_MAX_IFS); + if (ifindex >= DHD_MAX_IFS) + return BCME_BADARG; + + DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status)); + + DHD_FLOWID_LOCK(dhdp->flowid_lock, flags); + if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup; + + if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) { + if (status) + if_flow_lkup[ifindex].status = TRUE; + else + if_flow_lkup[ifindex].status = FALSE; + } + DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags); + + return BCME_OK; +} + +/** Update flow priority mapping, called on IOVAR */ +int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map) +{ + uint16 flowid; + flow_ring_node_t *flow_ring_node; + + if (map > DHD_FLOW_PRIO_LLR_MAP) + return BCME_BADOPTION; + + /* Check if we need to change prio map */ + if (map == dhdp->flow_prio_map_type) + return BCME_OK; + + /* If any ring is active we cannot change priority mapping for flow rings */ + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (flow_ring_node->active) + return BCME_EPERM; + } + + /* Inform firmware about new mapping type */ + if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE)) + return BCME_ERROR; + + /* update internal structures */ + dhdp->flow_prio_map_type = map; + if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP) + bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + else + bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO); + + return BCME_OK; +} + +/** Inform firmware on updated flow priority mapping, called on IOVAR */ +int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set) +{ + uint8 iovbuf[24]; + if (!set) { + bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) { + DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__)); + return BCME_ERROR; + } + *map = iovbuf[0]; + return BCME_OK; + } + bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) { + DHD_ERROR(("%s: failed to set fl_prio_map \n", + __FUNCTION__)); + return BCME_ERROR; + } + return BCME_OK; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_flowring.h b/drivers/net/wireless/bcmdhd/dhd_flowring.h new file mode 100644 index 000000000000..7c36de5459bf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_flowring.h @@ -0,0 +1,235 @@ +/* + * @file Header file describing the flow rings DHD interfaces. + * + * Flow rings are transmit traffic (=propagating towards antenna) related entities. + * + * Provides type definitions and function prototypes used to create, delete and manage flow rings at + * high level. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_flowring.h 591285 2015-10-07 11:56:29Z $ + */ + + +/**************** + * Common types * + */ + +#ifndef _dhd_flowrings_h_ +#define _dhd_flowrings_h_ + +/* Max pkts held in a flow ring's backup queue */ +#define FLOW_RING_QUEUE_THRESHOLD (2048) + +/* Number of H2D common rings */ +#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS + +#define FLOWID_INVALID (ID16_INVALID) +#define FLOWID_RESERVED (FLOW_RING_COMMON) + +#define FLOW_RING_STATUS_OPEN 0 +#define FLOW_RING_STATUS_PENDING 1 +#define FLOW_RING_STATUS_CLOSED 2 +#define FLOW_RING_STATUS_DELETE_PENDING 3 +#define FLOW_RING_STATUS_FLUSH_PENDING 4 +#define FLOW_RING_STATUS_STA_FREEING 5 + +#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048 + +#define DHD_FLOW_PRIO_AC_MAP 0 +#define DHD_FLOW_PRIO_TID_MAP 1 +#define DHD_FLOW_PRIO_LLR_MAP 2 + +/* Pkttag not compatible with PROP_TXSTATUS or WLFC */ +typedef struct dhd_pkttag_fr { + uint16 flowid; + uint16 ifid; + int dataoff; + dmaaddr_t physaddr; + uint32 pa_len; + +} dhd_pkttag_fr_t; + +#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow)) +#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx)) +#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset)) +#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa)) +#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen)) + +#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid) +#define DHD_PKTTAG_IFID(tag) ((tag)->ifid) +#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff) +#define DHD_PKTTAG_PA(tag) ((tag)->physaddr) +#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len) + +/* Hashing a MacAddress for lkup into a per interface flow hash table */ +#define DHD_FLOWRING_HASH_SIZE 256 +#define DHD_FLOWRING_HASHINDEX(ea, prio) \ + ((((uint8 *)(ea))[3] ^ ((uint8 *)(ea))[4] ^ ((uint8 *)(ea))[5] ^ ((uint8)(prio))) \ + % DHD_FLOWRING_HASH_SIZE) + +#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role) +#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP) +#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA) +#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO) +#define DHD_FLOW_RING(dhdp, flowid) \ + (flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid]) + +struct flow_queue; + +/* Flow Ring Queue Enqueue overflow callback */ +typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt); + +/** + * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred + * between queue and ring. A packet from the host stack is first added to the queue, and in a later + * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the + * flow ring are device owned. + */ +typedef struct flow_queue { + dll_t list; /* manage a flowring queue in a double linked list */ + void * head; /* first packet in the queue */ + void * tail; /* last packet in the queue */ + uint16 len; /* number of packets in the queue */ + uint16 max; /* maximum or min budget (used in cumm) */ + uint32 threshold; /* parent's cummulative length threshold */ + void * clen_ptr; /* parent's cummulative length counter */ + uint32 failures; /* enqueue failures due to queue overflow */ + flow_queue_cb_t cb; /* callback invoked on threshold crossing */ +} flow_queue_t; + +#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len) +#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max) +#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold) +#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0) +#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures) + +#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len)) +#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max) + +#define DHD_FLOW_QUEUE_OVFL(queue, budget) \ + (((queue)->len) > budget) + +#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \ + ((queue)->max) = ((budget) - 1) + +/* Queue's cummulative threshold. */ +#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \ + ((queue)->threshold) = ((cumm_threshold) - 1) + +/* Queue's cummulative length object accessor. */ +#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr) + +/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */ +#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \ + ((queue)->clen_ptr) = (void *)(parent_clen_ptr) + +/* see wlfc_proto.h for tx status details */ +#define DHD_FLOWRING_MAXSTATUS_MSGS 5 +#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus) +/** each flow ring is dedicated to a tid/sa/da combination */ +typedef struct flow_info { + uint8 tid; + uint8 ifindex; + char sa[ETHER_ADDR_LEN]; + char da[ETHER_ADDR_LEN]; +} flow_info_t; + +/** a flow ring is used for outbound (towards antenna) 802.3 packets */ +typedef struct flow_ring_node { + dll_t list; /* manage a constructed flowring in a dll, must be at first place */ + flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */ + bool active; + uint8 status; + /* + * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For + * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation. + */ + uint16 flowid; + flow_info_t flow_info; + void *prot_info; + void *lock; /* lock for flowring access protection */ +} flow_ring_node_t; + +typedef flow_ring_node_t flow_ring_table_t; + +typedef struct flow_hash_info { + uint16 flowid; + flow_info_t flow_info; + struct flow_hash_info *next; +} flow_hash_info_t; + +typedef struct if_flow_lkup { + bool status; + uint8 role; /* Interface role: STA/AP */ + flow_hash_info_t *fl_hash[DHD_FLOWRING_HASH_SIZE]; /* Lkup Hash table */ +} if_flow_lkup_t; + +static INLINE flow_ring_node_t * +dhd_constlist_to_flowring(dll_t *item) +{ + return ((flow_ring_node_t *)item); +} + +/* Exported API */ + +/* Flow ring's queue management functions */ +extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid); +extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid); + +extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max); +extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb); +extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); +extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue); +extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt); + +extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid, + int queue_budget, int cumm_threshold, void *cumm_ctr); +extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings); + +extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp); + +extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, + void *pktbuf); + +extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid); + +extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex); + +extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, + char *addr); + +/* Handle Interface ADD, DEL operations */ +extern void dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex, + uint8 op, uint8 role); + +/* Handle a STA interface link status update */ +extern int dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, + uint8 status); +extern int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set); +extern int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map); + +extern uint8 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex); +#endif /* _dhd_flowrings_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.c b/drivers/net/wireless/bcmdhd/dhd_ip.c new file mode 100644 index 000000000000..96c5a23d3823 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_ip.c @@ -0,0 +1,1287 @@ +/* + * IP Packet Parser Module. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_ip.c 569132 2015-07-07 09:09:33Z $ + */ +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#ifdef DHDTCPACK_SUPPRESS +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* special values */ +/* 802.3 llc/snap header */ +static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; + +pkt_frag_t pkt_frag_info(osl_t *osh, void *p) +{ + uint8 *frame; + int length; + uint8 *pt; /* Pointer to type field */ + uint16 ethertype; + struct ipv4_hdr *iph; /* IP frame pointer */ + int ipl; /* IP frame length */ + uint16 iph_frag; + + ASSERT(osh && p); + + frame = PKTDATA(osh, p); + length = PKTLEN(osh, p); + + /* Process Ethernet II or SNAP-encapsulated 802.3 frames */ + if (length < ETHER_HDR_LEN) { + DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) { + /* Frame is Ethernet II */ + pt = frame + ETHER_TYPE_OFFSET; + } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN && + !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) { + pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN; + } else { + DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + + /* Skip VLAN tag, if any */ + if (ethertype == ETHER_TYPE_8021Q) { + pt += VLAN_TAG_LEN; + + if (pt + ETHER_TYPE_LEN > frame + length) { + DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length)); + return DHD_PKT_FRAG_NONE; + } + + ethertype = ntoh16(*(uint16 *)pt); + } + + if (ethertype != ETHER_TYPE_IP) { + DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n", + __FUNCTION__, ethertype, length)); + return DHD_PKT_FRAG_NONE; + } + + iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN); + ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame)); + + /* We support IPv4 only */ + if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) { + DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl)); + return DHD_PKT_FRAG_NONE; + } + + iph_frag = ntoh16(iph->frag); + + if (iph_frag & IPV4_FRAG_DONT) { + return DHD_PKT_FRAG_NONE; + } else if ((iph_frag & IPV4_FRAG_MORE) == 0) { + return DHD_PKT_FRAG_LAST; + } else { + return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST; + } +} + +#ifdef DHDTCPACK_SUPPRESS + +typedef struct { + void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */ + void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */ + int ifidx; + uint8 supp_cnt; + dhd_pub_t *dhdp; + struct timer_list timer; +} tcpack_info_t; + +typedef struct _tdata_psh_info_t { + uint32 end_seq; /* end seq# of a received TCP PSH DATA pkt */ + struct _tdata_psh_info_t *next; /* next pointer of the link chain */ +} tdata_psh_info_t; + +typedef struct { + struct { + uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */ + uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */ + } ip_addr; + struct { + uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */ + uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */ + } tcp_port; + tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */ + tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */ + uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */ +} tcpdata_info_t; + +/* TCPACK SUPPRESS module */ +typedef struct { + int tcpack_info_cnt; + tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM]; /* Info of TCP ACK to send */ + int tcpdata_info_cnt; + tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM]; /* Info of received TCP DATA */ + tdata_psh_info_t *tdata_psh_info_pool; /* Pointer to tdata_psh_info elements pool */ + tdata_psh_info_t *tdata_psh_info_free; /* free tdata_psh_info elements chain in pool */ +#ifdef DHDTCPACK_SUP_DBG + int psh_info_enq_num; /* Number of free TCP PSH DATA info elements in pool */ +#endif /* DHDTCPACK_SUP_DBG */ +} tcpack_sup_module_t; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1}; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + +static void +_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod, + tdata_psh_info_t *tdata_psh_info) +{ + if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) { + DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod, tdata_psh_info)); + return; + } + + ASSERT(tdata_psh_info->next == NULL); + tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free; + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num++; +#endif +} + +static tdata_psh_info_t* +_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info = NULL; + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__, + tcpack_sup_mod)); + return NULL; + } + + tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free; + if (tdata_psh_info == NULL) + DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__)); + else { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num--; +#endif /* DHDTCPACK_SUP_DBG */ + } + + return tdata_psh_info; +} + +#ifdef BCMSDIO +static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + tdata_psh_info_t *tdata_psh_info_pool = NULL; + uint i; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) + return BCME_ERROR; + + ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL); + ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL); + + tdata_psh_info_pool = + MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + + if (tdata_psh_info_pool == NULL) + return BCME_NOMEM; + bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); +#ifdef DHDTCPACK_SUP_DBG + tcpack_sup_mod->psh_info_enq_num = 0; +#endif /* DHDTCPACK_SUP_DBG */ + + /* Enqueue newly allocated tcpdata psh info elements to the pool */ + for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++) + _tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]); + + ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL); + tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool; + + return BCME_OK; +} + +static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp, + tcpack_sup_module_t *tcpack_sup_mod) +{ + uint i; + tdata_psh_info_t *tdata_psh_info; + + DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__)); + + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n", + __FUNCTION__, __LINE__)); + return; + } + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + /* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */ + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + } + tcpdata_info->tdata_psh_info_tail = NULL; + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + i = 0; + /* Be sure we recollected all tdata_psh_info elements */ + while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) { + tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next; + tdata_psh_info->next = NULL; + i++; + } + ASSERT(i == TCPDATA_PSH_INFO_MAXNUM); + MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool, + sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM); + tcpack_sup_mod->tdata_psh_info_pool = NULL; + + return; +} +#endif /* BCMSDIO */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void dhd_tcpack_send(struct timer_list *t) +#else +static void dhd_tcpack_send(ulong data) +#endif +{ + tcpack_sup_module_t *tcpack_sup_mod; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + tcpack_info_t *cur_tbl = from_timer(cur_tbl, t, timer);; +#else + tcpack_info_t *cur_tbl = (tcpack_info_t *)data; +#endif + dhd_pub_t *dhdp; + int ifidx; + void* pkt; + unsigned long flags; + + if (!cur_tbl) { + return; + } + + dhdp = cur_tbl->dhdp; + if (!dhdp) { + return; + } + + flags = dhd_os_tcpacklock(dhdp); + + tcpack_sup_mod = dhdp->tcpack_sup_module; + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + return; + } + pkt = cur_tbl->pkt_in_q; + ifidx = cur_tbl->ifidx; + if (!pkt) { + dhd_os_tcpackunlock(dhdp, flags); + return; + } + cur_tbl->pkt_in_q = NULL; + cur_tbl->pkt_ether_hdr = NULL; + cur_tbl->ifidx = 0; + cur_tbl->supp_cnt = 0; + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + } + + dhd_os_tcpackunlock(dhdp, flags); + + dhd_sendpkt(dhdp, ifidx, pkt); +} + +int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode) +{ + int ret = BCME_OK; + unsigned long flags; + + flags = dhd_os_tcpacklock(dhdp); + + if (dhdp->tcpack_sup_mode == mode) { + DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode)); + goto exit; + } + + if (mode >= TCPACK_SUP_LAST_MODE || +#ifndef BCMSDIO + mode == TCPACK_SUP_DELAYTX || +#endif /* !BCMSDIO */ + FALSE) { + DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode)); + ret = BCME_BADARG; + goto exit; + } + + DHD_TRACE(("%s: %d -> %d\n", + __FUNCTION__, dhdp->tcpack_sup_mode, mode)); + +#ifdef BCMSDIO + /* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */ + if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) { + tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; + /* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */ + _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod); + tcpack_sup_mod->tcpdata_info_cnt = 0; + bzero(tcpack_sup_mod->tcpdata_info_tbl, + sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM); + /* For half duplex bus interface, tx precedes rx by default */ + if (dhdp->bus) + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + } +#endif /* BCMSDIO */ + dhdp->tcpack_sup_mode = mode; + + if (mode == TCPACK_SUP_OFF) { + ASSERT(dhdp->tcpack_sup_module != NULL); + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); + MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = NULL; + goto exit; + } + + if (dhdp->tcpack_sup_module == NULL) { + tcpack_sup_module_t *tcpack_sup_mod = + MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t)); + if (tcpack_sup_mod == NULL) { + DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__)); + dhdp->tcpack_sup_mode = TCPACK_SUP_OFF; + ret = BCME_NOMEM; + goto exit; + } + bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t)); + dhdp->tcpack_sup_module = tcpack_sup_mod; + } + +#ifdef BCMSDIO + if (mode == TCPACK_SUP_DELAYTX) { + ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module); + if (ret != BCME_OK) + DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret)); + else if (dhdp->bus) + dhd_bus_set_dotxinrx(dhdp->bus, FALSE); + } +#endif /* BCMSDIO */ + + if (mode == TCPACK_SUP_HOLD) { + int i; + tcpack_sup_module_t *tcpack_sup_mod = + (tcpack_sup_module_t *)dhdp->tcpack_sup_module; + dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO; + dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME; + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) + { + tcpack_sup_mod->tcpack_info_tbl[i].dhdp = dhdp; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&tcpack_sup_mod->tcpack_info_tbl[i].timer, dhd_tcpack_send, 0); +#else + init_timer(&tcpack_sup_mod->tcpack_info_tbl[i].timer); + tcpack_sup_mod->tcpack_info_tbl[i].timer.data = + (ulong)&tcpack_sup_mod->tcpack_info_tbl[i]; + tcpack_sup_mod->tcpack_info_tbl[i].timer.function = dhd_tcpack_send; +#endif + } + } + +exit: + dhd_os_tcpackunlock(dhdp, flags); + return ret; +} + +void +dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp) +{ + tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module; + int i; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + flags = dhd_os_tcpacklock(dhdp); + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", + __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + if (tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q) { + PKTFREE(dhdp->osh, tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q, + TRUE); + tcpack_sup_mod->tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_sup_mod->tcpack_info_tbl[i].ifidx = 0; + tcpack_sup_mod->tcpack_info_tbl[i].supp_cnt = 0; + } + } + } else { + tcpack_sup_mod->tcpack_info_cnt = 0; + bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM); + } + + dhd_os_tcpackunlock(dhdp, flags); + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) { + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer); + } + } + +exit: + return; +} + +inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt) +{ + uint8 i; + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int tbl_cnt; + int ret = BCME_OK; + void *pdata; + uint32 pktlen; + unsigned long flags; + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + pdata = PKTDATA(dhdp->osh, pkt); + pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata); + + if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, pktlen)); + goto exit; + } + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tbl_cnt = tcpack_sup_mod->tcpack_info_cnt; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM); + + for (i = 0; i < tbl_cnt; i++) { + if (tcpack_info_tbl[i].pkt_in_q == pkt) { + DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n", + __FUNCTION__, __LINE__, pkt, i, tbl_cnt)); + /* This pkt is being transmitted so remove the tcp_ack_info of it. */ + if (i < tbl_cnt - 1) { + bcopy(&tcpack_info_tbl[tbl_cnt - 1], + &tcpack_info_tbl[i], sizeof(tcpack_info_t)); + } + bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t)); + if (--tcpack_sup_mod->tcpack_info_cnt < 0) { + DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt)); + ret = BCME_ERROR; + } + break; + } + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr, + uint8 *tcp_hdr, uint32 tcp_ack_num) +{ + tcpack_sup_module_t *tcpack_sup_mod; + int i; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info = NULL; + bool ret = FALSE; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_ack_num)); + + for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) { + tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tcpdata_info_tmp->tcp_port.src), + ntoh16_ua(tcpdata_info_tmp->tcp_port.dst))); + + /* If either IP address or TCP port number does not match, skip. */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 && + memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET], + tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 && + memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET], + tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) { + tcpdata_info = tcpdata_info_tmp; + break; + } + } + + if (tcpdata_info == NULL) { + DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__)); + goto exit; + } + + if (tcpdata_info->tdata_psh_info_head == NULL) { + DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__)); + } + + while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) { + if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) { + DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n", + __FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq)); + tcpdata_info->tdata_psh_info_head = tdata_psh_info->next; + tdata_psh_info->next = NULL; + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info); + ret = TRUE; + } else + break; + } + if (tdata_psh_info == NULL) + tcpdata_info->tdata_psh_info_tail = NULL; + +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + +exit: + return ret; +} + +bool +dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i; + bool ret = FALSE; + bool set_dotxinrx = TRUE; + unsigned long flags; + + + if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF) + goto exit; + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + counter_printlog(&tack_tbl); + tack_tbl.cnt[0]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) { + /* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[5]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else + set_dotxinrx = FALSE; + + for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt)); + break; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) + continue; + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) { + /* New packet has higher TCP ACK number, so it replaces the old packet */ + if (new_ip_hdr_len == old_ip_hdr_len && + new_tcp_hdr_len == old_tcp_hdr_len) { + ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0); + bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len); + PKTFREE(dhdp->osh, pkt, FALSE); + DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n", + __FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num)); +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[2]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + ret = TRUE; + } else { +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[6]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d" + " ACK %u -> %u\n", __FUNCTION__, __LINE__, + new_ip_hdr_len, old_ip_hdr_len, + new_tcp_hdr_len, old_tcp_hdr_len, + old_tcpack_num, new_tcp_ack_num)); + } + } else if (new_tcp_ack_num == old_tcpack_num) { + set_dotxinrx = TRUE; + /* TCPACK retransmission */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[3]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n", + __FUNCTION__, __LINE__, old_tcpack_num, oldpkt, + new_tcp_ack_num, pkt)); + } + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + tcpack_sup_mod->tcpack_info_cnt)); + + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt; + tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr; + tcpack_sup_mod->tcpack_info_cnt++; +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[1]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + } else { + ASSERT(i == tcpack_sup_mod->tcpack_info_cnt); + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */ + if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx) + dhd_bus_set_dotxinrx(dhdp->bus, TRUE); + + return ret; +} + +bool +dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt) +{ + uint8 *ether_hdr; /* Ethernet header of the new packet */ + uint16 ether_type; /* Ethernet type of the new packet */ + uint8 *ip_hdr; /* IP header of the new packet */ + uint8 *tcp_hdr; /* TCP header of the new packet */ + uint32 ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint16 ip_total_len; /* Total length of IP packet for the new packet */ + uint32 tcp_hdr_len; /* TCP header length of the new packet */ + uint32 tcp_seq_num; /* TCP sequence number of the new packet */ + uint16 tcp_data_len; /* TCP DATA length that excludes IP and TCP headers */ + uint32 end_tcp_seq_num; /* TCP seq number of the last byte in the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpdata_info_t *tcpdata_info = NULL; + tdata_psh_info_t *tdata_psh_info; + + int i; + bool ret = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX) + goto exit; + + ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + ether_type = ether_hdr[12] << 8 | ether_hdr[13]; + + if (ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type)); + + ip_hdr = ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + ip_hdr_len = IPV4_HLEN(ip_hdr); + if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr))); + goto exit; + } + + tcp_hdr = ip_hdr + ip_hdr_len; + cur_framelen -= ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]); + tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet is mere TCP ACK, so do nothing */ + if (ip_total_len == ip_hdr_len + tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len); + + if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) { + DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__)); + goto exit; + } + + DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]), + tcp_hdr[TCP_FLAGS_OFFSET])); + + flags = dhd_os_tcpacklock(dhdp); + tcpack_sup_mod = dhdp->tcpack_sup_module; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + /* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */ + i = 0; + while (i < tcpack_sup_mod->tcpdata_info_cnt) { + tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i]; + uint32 now_in_ms = OSL_SYSUPTIME(); + DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, i, + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)), + IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)), + ntoh16_ua(tdata_info_tmp->tcp_port.src), + ntoh16_ua(tdata_info_tmp->tcp_port.dst))); + + /* If both IP address and TCP port number match, we found it so break. + * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET], + (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 && + memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET], + (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) { + tcpdata_info = tdata_info_tmp; + tcpdata_info->last_used_time = now_in_ms; + break; + } + + if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) { + tdata_psh_info_t *tdata_psh_info_tmp; + tcpdata_info_t *last_tdata_info; + + while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) { + tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next; + tdata_psh_info_tmp->next = NULL; + DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n", + __FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq)); + _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp); + } +#ifdef DHDTCPACK_SUP_DBG + DHD_ERROR(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + tcpack_sup_mod->tcpdata_info_cnt--; + ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0); + + last_tdata_info = + &tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt]; + if (i < tcpack_sup_mod->tcpdata_info_cnt) { + ASSERT(last_tdata_info != tdata_info_tmp); + bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t)); + } + bzero(last_tdata_info, sizeof(tcpdata_info_t)); + DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt)); + /* Don't increase "i" here, so that the prev last tcpdata_info is checked */ + } else + i++; + } + + tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]); + tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len; + end_tcp_seq_num = tcp_seq_num + tcp_data_len; + + if (tcpdata_info == NULL) { + ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt); + if (i >= TCPDATA_INFO_MAXNUM) { + DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i]; + + /* No TCP flow with the same IP addr and TCP port is found + * in tcp_data_info_tbl. So add this flow to the table. + */ + DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt, + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]))); + /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total. + * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total. + */ + bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr, + IPV4_ADDR_LEN * 2); + bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port, + TCP_PORT_LEN * 2); + + tcpdata_info->last_used_time = OSL_SYSUPTIME(); + tcpack_sup_mod->tcpdata_info_cnt++; + } + + ASSERT(tcpdata_info != NULL); + + tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod); +#ifdef DHDTCPACK_SUP_DBG + DHD_TRACE(("%s %d: PSH INFO ENQ %d\n", + __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num)); +#endif /* DHDTCPACK_SUP_DBG */ + + if (tdata_psh_info == NULL) { + DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__)); + ret = BCME_ERROR; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + tdata_psh_info->end_seq = end_tcp_seq_num; + +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) + tack_tbl.cnt[4]++; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ + + DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n", + __FUNCTION__, __LINE__, tdata_psh_info->end_seq)); + + ASSERT(tdata_psh_info->next == NULL); + + if (tcpdata_info->tdata_psh_info_head == NULL) + tcpdata_info->tdata_psh_info_head = tdata_psh_info; + else { + ASSERT(tcpdata_info->tdata_psh_info_tail); + tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info; + } + tcpdata_info->tdata_psh_info_tail = tdata_psh_info; + + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return ret; +} + +bool +dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + uint8 *new_ether_hdr; /* Ethernet header of the new packet */ + uint16 new_ether_type; /* Ethernet type of the new packet */ + uint8 *new_ip_hdr; /* IP header of the new packet */ + uint8 *new_tcp_hdr; /* TCP header of the new packet */ + uint32 new_ip_hdr_len; /* IP header length of the new packet */ + uint32 cur_framelen; + uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */ + uint16 new_ip_total_len; /* Total length of IP packet for the new packet */ + uint32 new_tcp_hdr_len; /* TCP header length of the new packet */ + tcpack_sup_module_t *tcpack_sup_mod; + tcpack_info_t *tcpack_info_tbl; + int i, free_slot = TCPACK_INFO_MAXNUM; + bool hold = FALSE; + unsigned long flags; + + if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) { + goto exit; + } + + if (dhdp->tcpack_sup_ratio == 1) { + goto exit; + } + + new_ether_hdr = PKTDATA(dhdp->osh, pkt); + cur_framelen = PKTLEN(dhdp->osh, pkt); + + if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) { + DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n", + __FUNCTION__, __LINE__, cur_framelen)); + goto exit; + } + + new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13]; + + if (new_ether_type != ETHER_TYPE_IP) { + DHD_TRACE(("%s %d: Not a IP packet 0x%x\n", + __FUNCTION__, __LINE__, new_ether_type)); + goto exit; + } + + DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type)); + + new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN; + cur_framelen -= ETHER_HDR_LEN; + + ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN); + + new_ip_hdr_len = IPV4_HLEN(new_ip_hdr); + if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) { + DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n", + __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr))); + goto exit; + } + + new_tcp_hdr = new_ip_hdr + new_ip_hdr_len; + cur_framelen -= new_ip_hdr_len; + + ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN); + + DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__)); + + /* is it an ack ? Allow only ACK flag, not to suppress others. */ + if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) { + DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n", + __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET])); + goto exit; + } + + new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]); + new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]); + + /* This packet has TCP data, so just send */ + if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) { + DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__)); + goto exit; + } + + ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len); + + new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + DHD_TRACE(("%s %d: TCP ACK with zero DATA length" + " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n", + __FUNCTION__, __LINE__, + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */ + flags = dhd_os_tcpacklock(dhdp); + + tcpack_sup_mod = dhdp->tcpack_sup_module; + tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl; + + if (!tcpack_sup_mod) { + DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__)); + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + hold = TRUE; + + for (i = 0; i < TCPACK_INFO_MAXNUM; i++) { + void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */ + uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr; + uint32 old_ip_hdr_len, old_tcp_hdr_len; + uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */ + + if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) { + if (free_slot == TCPACK_INFO_MAXNUM) { + free_slot = i; + } + continue; + } + + if (PKTDATA(dhdp->osh, oldpkt) == NULL) { + DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n", + __FUNCTION__, __LINE__, i)); + hold = FALSE; + dhd_os_tcpackunlock(dhdp, flags); + goto exit; + } + + old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr; + old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN; + old_ip_hdr_len = IPV4_HLEN(old_ip_hdr); + old_tcp_hdr = old_ip_hdr + old_ip_hdr_len; + old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]); + + DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR + " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i, + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])), + IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])), + ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]), + ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET]))); + + /* If either of IP address or TCP port number does not match, skip. */ + if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET], + &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) || + memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET], + &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2)) { + continue; + } + + old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]); + + if (IS_TCPSEQ_GE(new_tcp_ack_num, old_tcpack_num)) { + tcpack_info_tbl[i].supp_cnt++; + if (tcpack_info_tbl[i].supp_cnt >= dhdp->tcpack_sup_ratio) { + tcpack_info_tbl[i].pkt_in_q = NULL; + tcpack_info_tbl[i].pkt_ether_hdr = NULL; + tcpack_info_tbl[i].ifidx = 0; + tcpack_info_tbl[i].supp_cnt = 0; + hold = FALSE; + } else { + tcpack_info_tbl[i].pkt_in_q = pkt; + tcpack_info_tbl[i].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[i].ifidx = ifidx; + } + PKTFREE(dhdp->osh, oldpkt, TRUE); + } else { + PKTFREE(dhdp->osh, pkt, TRUE); + } + dhd_os_tcpackunlock(dhdp, flags); + + if (!hold) { + del_timer_sync(&tcpack_info_tbl[i].timer); + } + goto exit; + } + + if (free_slot < TCPACK_INFO_MAXNUM) { + /* No TCPACK packet with the same IP addr and TCP port is found + * in tcp_ack_info_tbl. So add this packet to the table. + */ + DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n", + __FUNCTION__, __LINE__, pkt, new_ether_hdr, + free_slot)); + + tcpack_info_tbl[free_slot].pkt_in_q = pkt; + tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr; + tcpack_info_tbl[free_slot].ifidx = ifidx; + tcpack_info_tbl[free_slot].supp_cnt = 1; + mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer, + jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay)); + tcpack_sup_mod->tcpack_info_cnt++; + } else { + DHD_TRACE(("%s %d: No empty tcp ack info tbl\n", + __FUNCTION__, __LINE__)); + } + dhd_os_tcpackunlock(dhdp, flags); + +exit: + return hold; +} +#endif /* DHDTCPACK_SUPPRESS */ diff --git a/drivers/net/wireless/bcmdhd/dhd_ip.h b/drivers/net/wireless/bcmdhd/dhd_ip.h new file mode 100644 index 000000000000..a72976b07ccf --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_ip.h @@ -0,0 +1,85 @@ +/* + * Header file describing the common ip parser function. + * + * Provides type definitions and function prototypes used to parse ip packet. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_ip.h 537119 2015-02-25 04:24:14Z $ + */ + +#ifndef _dhd_ip_h_ +#define _dhd_ip_h_ + +#ifdef DHDTCPACK_SUPPRESS +#include +#include +#include +#endif /* DHDTCPACK_SUPPRESS */ + +typedef enum pkt_frag +{ + DHD_PKT_FRAG_NONE = 0, + DHD_PKT_FRAG_FIRST, + DHD_PKT_FRAG_CONT, + DHD_PKT_FRAG_LAST +} pkt_frag_t; + +extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p); + +#ifdef DHDTCPACK_SUPPRESS +#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN) +/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */ +#define TCPACKSZMAX (TCPACKSZMIN + 100) + +/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */ +#define TCPACK_INFO_MAXNUM 4 +#define TCPDATA_INFO_MAXNUM 4 +#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM) + +#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */ + +#define DEFAULT_TCPACK_SUPP_RATIO 3 +#ifndef CUSTOM_TCPACK_SUPP_RATIO +#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO +#endif /* CUSTOM_TCPACK_SUPP_RATIO */ + +#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */ +#ifndef CUSTOM_TCPACK_DELAY_TIME +#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME +#endif /* CUSTOM_TCPACK_DELAY_TIME */ + +extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on); +extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp); +extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt); +extern bool dhd_tcpack_hold(dhd_pub_t *dhdp, void *pkt, int ifidx); +/* #define DHDTCPACK_SUP_DBG */ +#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG) +extern counter_tbl_t tack_tbl; +#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */ +#endif /* DHDTCPACK_SUPPRESS */ + +#endif /* _dhd_ip_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c new file mode 100644 index 000000000000..2786a6640181 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux.c @@ -0,0 +1,13676 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux-specific network interface + * Basically selected code segments from usb-cdc.c and usb-rndis.c + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $ + */ + +#include +#include +#include +#ifdef SHOW_LOGTRACE +#include +#include +#endif /* SHOW_LOGTRACE */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef ENABLE_ADAPTIVE_SCHED +#include +#endif /* ENABLE_ADAPTIVE_SCHED */ + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef PCIE_FULL_DONGLE +#include +#endif +#include +#include +#include +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif +#ifdef WL_CFG80211 +#include +#endif +#ifdef PNO_SUPPORT +#include +#endif +#ifdef RTT_SUPPORT +#include +#endif + +#ifdef CONFIG_COMPAT +#include +#endif + +#ifdef DHD_WMF +#include +#endif /* DHD_WMF */ + +#ifdef DHD_L2_FILTER +#include +#include +#include +#endif /* DHD_L2_FILTER */ + +#ifdef DHD_PSTA +#include +#endif /* DHD_PSTA */ + + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef DHD_DEBUG_PAGEALLOC +typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len); +void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len); +extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle); +#endif /* DHD_DEBUG_PAGEALLOC */ + + +#if defined(DHD_LB) +/* Dynamic CPU selection for load balancing */ +#include +#include +#include +#include +#include + +#if !defined(DHD_LB_PRIMARY_CPUS) +#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */ +#endif + +#if !defined(DHD_LB_SECONDARY_CPUS) +#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */ +#endif + +#define HIST_BIN_SIZE 8 + +#if defined(DHD_LB_RXP) +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work); +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + +#ifdef WLMEDIA_HTSF +#include +#include + +#define HTSF_MINLEN 200 /* min. packet length to timestamp */ +#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */ +#define TSMAX 1000 /* max no. of timing record kept */ +#define NUMBIN 34 + +static uint32 tsidx = 0; +static uint32 htsf_seqnum = 0; +uint32 tsfsync; +struct timeval tsync; +static uint32 tsport = 5010; + +typedef struct histo_ { + uint32 bin[NUMBIN]; +} histo_t; + +#if !ISPOWEROF2(DHD_SDALIGN) +#error DHD_SDALIGN is not a power of 2! +#endif + +static histo_t vi_d1, vi_d2, vi_d3, vi_d4; +#endif /* WLMEDIA_HTSF */ + +#ifdef STBLINUX +#ifdef quote_str +#undef quote_str +#endif /* quote_str */ +#ifdef to_str +#undef to_str +#endif /* quote_str */ +#define to_str(s) #s +#define quote_str(s) to_str(s) + +static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET); +#endif /* STBLINUX */ + + +#if defined(SOFTAP) +extern bool ap_cfg_running; +extern bool ap_fw_loaded; +#endif +extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction); + +#ifdef FIX_CPU_MIN_CLOCK +#include +#endif /* FIX_CPU_MIN_CLOCK */ +#ifdef SET_RANDOM_MAC_SOFTAP +#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL +#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11 +#endif +static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL; +#endif /* SET_RANDOM_MAC_SOFTAP */ +#ifdef ENABLE_ADAPTIVE_SCHED +#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */ +#ifndef CUSTOM_CPUFREQ_THRESH +#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH +#endif /* CUSTOM_CPUFREQ_THRESH */ +#endif /* ENABLE_ADAPTIVE_SCHED */ + +/* enable HOSTIP cache update from the host side when an eth0:N is up */ +#define AOE_IP_ALIAS_SUPPORT 1 + +#ifdef BCM_FD_AGGR +#include +#include +#endif +#ifdef PROP_TXSTATUS +#include +#include +#endif + +#include + +/* Maximum STA per radio */ +#define DHD_MAX_STA 32 + + + +const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; +const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; +#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] + +#ifdef ARP_OFFLOAD_SUPPORT +void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx); +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inetaddr_notifier = { + .notifier_call = dhd_inetaddr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inetaddr_notifier_registered = FALSE; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +static int dhd_inet6addr_notifier_call(struct notifier_block *this, + unsigned long event, void *ptr); +static struct notifier_block dhd_inet6addr_notifier = { + .notifier_call = dhd_inet6addr_notifier_call +}; +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_inet6addr_notifier_registered = FALSE; +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) +#include +volatile bool dhd_mmc_suspend = FALSE; +DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ + +#if defined(OOB_INTR_ONLY) +extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(void *dhd_info, void *event_data, u8 event); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +MODULE_LICENSE("GPL and additional rights"); +#endif /* LinuxVer */ + +#include + +#ifdef BCM_FD_AGGR +#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE) +#else +#ifndef PROP_TXSTATUS +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) +#else +#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) +#endif +#endif /* BCM_FD_AGGR */ + +#ifdef PROP_TXSTATUS +extern bool dhd_wlfc_skip_fc(void); +extern void dhd_wlfc_plat_init(void *dhd); +extern void dhd_wlfc_plat_deinit(void *dhd); +#endif /* PROP_TXSTATUS */ +#ifdef USE_DYNAMIC_F2_BLKSIZE +extern uint sd_f2_blocksize; +extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) +const char * +print_tainted() +{ + return ""; +} +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ + +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +extern wl_iw_extra_params_t g_wl_iw_params; +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef CONFIG_PARTIALSUSPEND_SLP +#include +#define CONFIG_HAS_EARLYSUSPEND +#define DHD_USE_EARLYSUSPEND +#define register_early_suspend register_pre_suspend +#define unregister_early_suspend unregister_pre_suspend +#define early_suspend pre_suspend +#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50 +#else +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ +#endif /* CONFIG_PARTIALSUSPEND_SLP */ + +extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd); + +#ifdef PKT_FILTER_SUPPORT +extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id); +#endif + + +#ifdef READ_MACADDR +extern int dhd_read_macaddr(struct dhd_info *dhd); +#else +static inline int dhd_read_macaddr(struct dhd_info *dhd) { return 0; } +#endif +#ifdef WRITE_MACADDR +extern int dhd_write_macaddr(struct ether_addr *mac); +#else +static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; } +#endif + + + + + +#ifdef DHD_FW_COREDUMP +static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event); +#endif /* DHD_FW_COREDUMP */ +#ifdef DHD_LOG_DUMP +static void dhd_log_dump_init(dhd_pub_t *dhd); +static void dhd_log_dump_deinit(dhd_pub_t *dhd); +static void dhd_log_dump(void *handle, void *event_info, u8 event); +void dhd_schedule_log_dump(dhd_pub_t *dhdp); +static int do_dhd_log_dump(dhd_pub_t *dhdp); +#endif /* DHD_LOG_DUMP */ + +static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); +static struct notifier_block dhd_reboot_notifier = { + .notifier_call = dhd_reboot_callback, + .priority = 1, +}; + +#ifdef BCMPCIE +static int is_reboot = 0; +#endif /* BCMPCIE */ + +typedef struct dhd_if_event { + struct list_head list; + wl_event_data_if_t event; + char name[IFNAMSIZ+1]; + uint8 mac[ETHER_ADDR_LEN]; +} dhd_if_event_t; + +/* Interface control information */ +typedef struct dhd_if { + struct dhd_info *info; /* back pointer to dhd_info */ + /* OS/stack specifics */ + struct net_device *net; + int idx; /* iface idx in dongle */ + uint subunit; /* subunit */ + uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ + bool set_macaddress; + bool set_multicast; + uint8 bssidx; /* bsscfg index for the interface */ + bool attached; /* Delayed attachment when unset */ + bool txflowcontrol; /* Per interface flow control indicator */ + char name[IFNAMSIZ+1]; /* linux interface name */ + char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */ + struct net_device_stats stats; +#ifdef DHD_WMF + dhd_wmf_t wmf; /* per bsscfg wmf setting */ +#endif /* DHD_WMF */ +#ifdef PCIE_FULL_DONGLE + struct list_head sta_list; /* sll of associated stations */ +#if !defined(BCM_GMAC3) + spinlock_t sta_list_lock; /* lock for manipulating sll */ +#endif /* ! BCM_GMAC3 */ +#endif /* PCIE_FULL_DONGLE */ + uint32 ap_isolate; /* ap-isolation settings */ +#ifdef DHD_L2_FILTER + bool parp_enable; + bool parp_discard; + bool parp_allnode; + arp_table_t *phnd_arp_table; +/* for Per BSS modification */ + bool dhcp_unicast; + bool block_ping; + bool grat_arp; +#endif /* DHD_L2_FILTER */ +} dhd_if_t; + +#ifdef WLMEDIA_HTSF +typedef struct { + uint32 low; + uint32 high; +} tsf_t; + +typedef struct { + uint32 last_cycle; + uint32 last_sec; + uint32 last_tsf; + uint32 coef; /* scaling factor */ + uint32 coefdec1; /* first decimal */ + uint32 coefdec2; /* second decimal */ +} htsf_t; + +typedef struct { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} tstamp_t; + +static tstamp_t ts[TSMAX]; +static tstamp_t maxdelayts; +static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; + +#endif /* WLMEDIA_HTSF */ + +struct ipv6_work_info_t { + uint8 if_idx; + char ipv6_addr[16]; + unsigned long event; +}; + +#ifdef DHD_DEBUG +typedef struct dhd_dump { + uint8 *buf; + int bufsize; +} dhd_dump_t; +#endif /* DHD_DEBUG */ + +/* When Perimeter locks are deployed, any blocking calls must be preceeded + * with a PERIM UNLOCK and followed by a PERIM LOCK. + * Examples of blocking calls are: schedule_timeout(), down_interruptible(), + * wait_event_timeout(). + */ + +/* Local private structure (extension of pub) */ +typedef struct dhd_info { +#if defined(WL_WIRELESS_EXT) + wl_iw_t iw; /* wireless extensions state (must be first) */ +#endif /* defined(WL_WIRELESS_EXT) */ + dhd_pub_t pub; + dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */ + + void *adapter; /* adapter information, interrupt, fw path etc. */ + char fw_path[PATH_MAX]; /* path to firmware image */ + char nv_path[PATH_MAX]; /* path to nvram vars file */ + + /* serialize dhd iovars */ + struct mutex dhd_iovar_mutex; + + struct semaphore proto_sem; +#ifdef PROP_TXSTATUS + spinlock_t wlfc_spinlock; + +#endif /* PROP_TXSTATUS */ +#ifdef WLMEDIA_HTSF + htsf_t htsf; +#endif + wait_queue_head_t ioctl_resp_wait; + wait_queue_head_t d3ack_wait; + wait_queue_head_t dhd_bus_busy_state_wait; + uint32 default_wd_interval; + + struct timer_list timer; + bool wd_timer_valid; +#ifdef DHD_PCIE_RUNTIMEPM + struct timer_list rpm_timer; + bool rpm_timer_valid; + tsk_ctl_t thr_rpm_ctl; +#endif /* DHD_PCIE_RUNTIMEPM */ + struct tasklet_struct tasklet; + spinlock_t sdlock; + spinlock_t txqlock; + spinlock_t dhd_lock; + + struct semaphore sdsem; + tsk_ctl_t thr_dpc_ctl; + tsk_ctl_t thr_wdt_ctl; + + tsk_ctl_t thr_rxf_ctl; + spinlock_t rxf_lock; + bool rxthread_enabled; + + /* Wakelocks */ +#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + struct wake_lock wl_wifi; /* Wifi wakelock */ + struct wake_lock wl_rxwake; /* Wifi rx wakelock */ + struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ + struct wake_lock wl_wdwake; /* Wifi wd wakelock */ + struct wake_lock wl_evtwake; /* Wifi event wakelock */ +#ifdef BCMPCIE_OOB_HOST_WAKE + struct wake_lock wl_intrwake; /* Host wakeup wakelock */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + struct wake_lock wl_scanwake; /* Wifi scan wakelock */ +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + /* net_device interface lock, prevent race conditions among net_dev interface + * calls and wifi_on or wifi_off + */ + struct mutex dhd_net_if_mutex; + struct mutex dhd_suspend_mutex; +#endif + spinlock_t wakelock_spinlock; + spinlock_t wakelock_evt_spinlock; + uint32 wakelock_event_counter; + uint32 wakelock_counter; + int wakelock_wd_counter; + int wakelock_rx_timeout_enable; + int wakelock_ctrl_timeout_enable; + bool waive_wakelock; + uint32 wakelock_before_waive; + + /* Thread to issue ioctl for multicast */ + wait_queue_head_t ctrl_wait; + atomic_t pend_8021x_cnt; + dhd_attach_states_t dhd_state; +#ifdef SHOW_LOGTRACE + dhd_event_log_t event_data; +#endif /* SHOW_LOGTRACE */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + u32 pend_ipaddr; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef BCM_FD_AGGR + void *rpc_th; + void *rpc_osh; + struct timer_list rpcth_timer; + bool rpcth_timer_active; + uint8 fdaggr; +#endif +#ifdef DHDTCPACK_SUPPRESS + spinlock_t tcpack_lock; +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef FIX_CPU_MIN_CLOCK + bool cpufreq_fix_status; + struct mutex cpufreq_fix; + struct pm_qos_request dhd_cpu_qos; +#ifdef FIX_BUS_MIN_CLOCK + struct pm_qos_request dhd_bus_qos; +#endif /* FIX_BUS_MIN_CLOCK */ +#endif /* FIX_CPU_MIN_CLOCK */ + void *dhd_deferred_wq; +#ifdef DEBUG_CPU_FREQ + struct notifier_block freq_trans; + int __percpu *new_freq; +#endif + unsigned int unit; + struct notifier_block pm_notifier; +#ifdef DHD_PSTA + uint32 psta_mode; /* PSTA or PSR */ +#endif /* DHD_PSTA */ +#ifdef DHD_DEBUG + dhd_dump_t *dump; + struct timer_list join_timer; + u32 join_timeout_val; + bool join_timer_active; + uint scan_time_count; + struct timer_list scan_timer; + bool scan_timer_active; +#endif +#if defined(DHD_LB) + /* CPU Load Balance dynamic CPU selection */ + + /* Variable that tracks the currect CPUs available for candidacy */ + cpumask_var_t cpumask_curr_avail; + + /* Primary and secondary CPU mask */ + cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ + cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ + + struct notifier_block cpu_notifier; + + /* Tasklet to handle Tx Completion packet freeing */ + struct tasklet_struct tx_compl_tasklet; + atomic_t tx_compl_cpu; + + + /* Tasklet to handle RxBuf Post during Rx completion */ + struct tasklet_struct rx_compl_tasklet; + atomic_t rx_compl_cpu; + + /* Napi struct for handling rx packet sendup. Packets are removed from + * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then + * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled + * to run to rx_napi_cpu. + */ + struct sk_buff_head rx_pend_queue ____cacheline_aligned; + struct sk_buff_head rx_napi_queue ____cacheline_aligned; + struct napi_struct rx_napi_struct ____cacheline_aligned; + atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ + struct net_device *rx_napi_netdev; /* netdev of primary interface */ + + struct work_struct rx_napi_dispatcher_work; + struct work_struct tx_compl_dispatcher_work; + struct work_struct rx_compl_dispatcher_work; + /* Number of times DPC Tasklet ran */ + uint32 dhd_dpc_cnt; + + /* Number of times NAPI processing got scheduled */ + uint32 napi_sched_cnt; + + /* Number of times NAPI processing ran on each available core */ + uint32 napi_percpu_run_cnt[NR_CPUS]; + + /* Number of times RX Completions got scheduled */ + uint32 rxc_sched_cnt; + /* Number of times RX Completion ran on each available core */ + uint32 rxc_percpu_run_cnt[NR_CPUS]; + + /* Number of times TX Completions got scheduled */ + uint32 txc_sched_cnt; + /* Number of times TX Completions ran on each available core */ + uint32 txc_percpu_run_cnt[NR_CPUS]; + + /* CPU status */ + /* Number of times each CPU came online */ + uint32 cpu_online_cnt[NR_CPUS]; + + /* Number of times each CPU went offline */ + uint32 cpu_offline_cnt[NR_CPUS]; + + /* + * Consumer Histogram - NAPI RX Packet processing + * ----------------------------------------------- + * On Each CPU, when the NAPI RX Packet processing call back was invoked + * how many packets were processed is captured in this data structure. + * Now its difficult to capture the "exact" number of packets processed. + * So considering the packet counter to be a 32 bit one, we have a + * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets + * processed is rounded off to the next power of 2 and put in the + * approriate "bin" the value in the bin gets incremented. + * For example, assume that in CPU 1 if NAPI Rx runs 3 times + * and the packet count processed is as follows (assume the bin counters are 0) + * iteration 1 - 10 (the bin counter 2^4 increments to 1) + * iteration 2 - 30 (the bin counter 2^5 increments to 1) + * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) + */ + uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE]; + uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE]; +#endif /* DHD_LB */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + + struct kobject dhd_kobj; +#ifdef SUPPORT_SENSORHUB + uint32 shub_enable; +#endif /* SUPPORT_SENSORHUB */ + + struct delayed_work dhd_memdump_work; +} dhd_info_t; + +#define DHDIF_FWDER(dhdif) FALSE + +/* Flag to indicate if we should download firmware on driver load */ +uint dhd_download_fw_on_driverload = TRUE; + +/* Flag to indicate if driver is initialized */ +uint dhd_driver_init_done = FALSE; + +/* Definitions to provide path to the firmware and nvram + * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" + */ +char firmware_path[MOD_PARAM_PATHLEN]; +char nvram_path[MOD_PARAM_PATHLEN]; + +/* backup buffer for firmware and nvram path */ +char fw_bak_path[MOD_PARAM_PATHLEN]; +char nv_bak_path[MOD_PARAM_PATHLEN]; + +/* information string to keep firmware, chio, cheip version info visiable from log */ +char info_string[MOD_PARAM_INFOLEN]; +module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444); +int op_mode = 0; +int disable_proptx = 0; +module_param(op_mode, int, 0644); + +#if defined(DHD_LB_RXP) +static int dhd_napi_weight = 32; +module_param(dhd_napi_weight, int, 0644); +#endif /* DHD_LB_RXP */ + +extern int wl_control_wl_start(struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC) +struct semaphore dhd_registration_sem; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + +/* deferred handlers */ +static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event); +static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event); +static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event); +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event); +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef WL_CFG80211 +extern void dhd_netdev_free(struct net_device *ndev); +#endif /* WL_CFG80211 */ + +/* Error bits */ +module_param(dhd_msg_level, int, 0); + +#ifdef ARP_OFFLOAD_SUPPORT +/* ARP offload enable */ +uint dhd_arp_enable = TRUE; +module_param(dhd_arp_enable, uint, 0); + +/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ + +#ifdef ENABLE_ARP_SNOOP_MODE +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP; +#else +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; +#endif /* ENABLE_ARP_SNOOP_MODE */ + +module_param(dhd_arp_mode, uint, 0); +#endif /* ARP_OFFLOAD_SUPPORT */ + +/* Disable Prop tx */ +module_param(disable_proptx, int, 0644); +/* load firmware and/or nvram values from the filesystem */ +module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); +module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660); + +/* Watchdog interval */ + +/* extend watchdog expiration to 2 seconds when DPC is running */ +#define WATCHDOG_EXTEND_INTERVAL (2000) + +uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS; +module_param(dhd_watchdog_ms, uint, 0); + +#ifdef DHD_PCIE_RUNTIMEPM +uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS; +#endif /* DHD_PCIE_RUNTIMEPMT */ +#if defined(DHD_DEBUG) +/* Console poll interval */ +uint dhd_console_ms = 0; +module_param(dhd_console_ms, uint, 0644); +#endif /* defined(DHD_DEBUG) */ + + +uint dhd_slpauto = TRUE; +module_param(dhd_slpauto, uint, 0); + +#ifdef PKT_FILTER_SUPPORT +/* Global Pkt filter enable control */ +uint dhd_pkt_filter_enable = TRUE; +module_param(dhd_pkt_filter_enable, uint, 0); +#endif + +/* Pkt filter init setup */ +uint dhd_pkt_filter_init = 0; +module_param(dhd_pkt_filter_init, uint, 0); + +/* Pkt filter mode control */ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER +uint dhd_master_mode = FALSE; +#else +uint dhd_master_mode = TRUE; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ +module_param(dhd_master_mode, uint, 0); + +int dhd_watchdog_prio = 0; +module_param(dhd_watchdog_prio, int, 0); + +/* DPC thread priority */ +int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING; +module_param(dhd_dpc_prio, int, 0); + +/* RX frame thread priority */ +int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING; +module_param(dhd_rxf_prio, int, 0); + +int passive_channel_skip = 0; +module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR)); + +#if !defined(BCMDHDUSB) +extern int dhd_dongle_ramsize; +module_param(dhd_dongle_ramsize, int, 0); +#endif /* BCMDHDUSB */ + +/* Keep track of number of instances */ +static int dhd_found = 0; +static int instance_base = 0; /* Starting instance number */ +module_param(instance_base, int, 0644); + +/* Functions to manage sysfs interface for dhd */ +static int dhd_sysfs_init(dhd_info_t *dhd); +static void dhd_sysfs_exit(dhd_info_t *dhd); + +#if defined(DHD_LB) + +static void +dhd_lb_set_default_cpus(dhd_info_t *dhd) +{ + /* Default CPU allocation for the jobs */ + atomic_set(&dhd->rx_napi_cpu, 1); + atomic_set(&dhd->rx_compl_cpu, 2); + atomic_set(&dhd->tx_compl_cpu, 2); +} + +static void +dhd_cpumasks_deinit(dhd_info_t *dhd) +{ + free_cpumask_var(dhd->cpumask_curr_avail); + free_cpumask_var(dhd->cpumask_primary); + free_cpumask_var(dhd->cpumask_primary_new); + free_cpumask_var(dhd->cpumask_secondary); + free_cpumask_var(dhd->cpumask_secondary_new); +} + +static int +dhd_cpumasks_init(dhd_info_t *dhd) +{ + int id; + uint32 cpus; + int ret = 0; + + if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) || + !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) { + DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__)); + ret = -ENOMEM; + goto fail; + } + + cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask); + cpumask_clear(dhd->cpumask_primary); + cpumask_clear(dhd->cpumask_secondary); + + cpus = DHD_LB_PRIMARY_CPUS; + for (id = 0; id < NR_CPUS; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_primary); + } + + cpus = DHD_LB_SECONDARY_CPUS; + for (id = 0; id < NR_CPUS; id++) { + if (isset(&cpus, id)) + cpumask_set_cpu(id, dhd->cpumask_secondary); + } + + return ret; +fail: + dhd_cpumasks_deinit(dhd); + return ret; +} + +/* + * The CPU Candidacy Algorithm + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The available CPUs for selection are divided into two groups + * Primary Set - A CPU mask that carries the First Choice CPUs + * Secondary Set - A CPU mask that carries the Second Choice CPUs. + * + * There are two types of Job, that needs to be assigned to + * the CPUs, from one of the above mentioned CPU group. The Jobs are + * 1) Rx Packet Processing - napi_cpu + * 2) Completion Processiong (Tx, RX) - compl_cpu + * + * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes + * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy + * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu. + * If there are more processors free, it assigns one to compl_cpu. + * It also tries to ensure that both napi_cpu and compl_cpu are not on the same + * CPU, as much as possible. + * + * By design, both Tx and Rx completion jobs are run on the same CPU core, as it + * would allow Tx completion skb's to be released into a local free pool from + * which the rx buffer posts could have been serviced. it is important to note + * that a Tx packet may not have a large enough buffer for rx posting. + */ +void dhd_select_cpu_candidacy(dhd_info_t *dhd) +{ + uint32 primary_available_cpus; /* count of primary available cpus */ + uint32 secondary_available_cpus; /* count of secondary available cpus */ + uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ + uint32 compl_cpu = 0; /* cpu selected for completion jobs */ + + cpumask_clear(dhd->cpumask_primary_new); + cpumask_clear(dhd->cpumask_secondary_new); + + /* + * Now select from the primary mask. Even if a Job is + * already running on a CPU in secondary group, we still move + * to primary CPU. So no conditional checks. + */ + cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary, + dhd->cpumask_curr_avail); + + cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary, + dhd->cpumask_curr_avail); + + primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new); + + if (primary_available_cpus > 0) { + napi_cpu = cpumask_first(dhd->cpumask_primary_new); + + /* If no further CPU is available, + * cpumask_next returns >= nr_cpu_ids + */ + compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new); + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + + DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu)); + + /* -- Now check for the CPUs from the secondary mask -- */ + secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new); + + DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n", + __FUNCTION__, secondary_available_cpus, nr_cpu_ids)); + + if (secondary_available_cpus > 0) { + /* At this point if napi_cpu is unassigned it means no CPU + * is online from Primary Group + */ + if (napi_cpu == 0) { + napi_cpu = cpumask_first(dhd->cpumask_secondary_new); + compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new); + } else if (compl_cpu == 0) { + compl_cpu = cpumask_first(dhd->cpumask_secondary_new); + } + + /* If no CPU was available for completion, choose CPU 0 */ + if (compl_cpu >= nr_cpu_ids) + compl_cpu = 0; + } + if ((primary_available_cpus == 0) && + (secondary_available_cpus == 0)) { + /* No CPUs available from primary or secondary mask */ + napi_cpu = 0; + compl_cpu = 0; + } + + DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n", + __FUNCTION__, napi_cpu, compl_cpu)); + ASSERT(napi_cpu < nr_cpu_ids); + ASSERT(compl_cpu < nr_cpu_ids); + + atomic_set(&dhd->rx_napi_cpu, napi_cpu); + atomic_set(&dhd->tx_compl_cpu, compl_cpu); + atomic_set(&dhd->rx_compl_cpu, compl_cpu); + return; +} + +/* + * Function to handle CPU Hotplug notifications. + * One of the task it does is to trigger the CPU Candidacy algorithm + * for load balancing. + */ +int +dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + + dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier); + + switch (action) + { + case CPU_ONLINE: + DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); + cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]); + cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail); + dhd_select_cpu_candidacy(dhd); + break; + default: + break; + } + + return NOTIFY_OK; +} + +#if defined(DHD_LB_STATS) +void dhd_lb_stats_init(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + int i, j; + + if (dhdp == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n", + __FUNCTION__)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt); + DHD_LB_STATS_CLR(dhd->napi_sched_cnt); + DHD_LB_STATS_CLR(dhd->rxc_sched_cnt); + DHD_LB_STATS_CLR(dhd->txc_sched_cnt); + + for (i = 0; i < NR_CPUS; i++) { + DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]); + DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]); + + DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]); + DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]); + } + + for (i = 0; i < NR_CPUS; i++) { + for (j = 0; j < HIST_BIN_SIZE; j++) { + DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]); + DHD_LB_STATS_CLR(dhd->txc_hist[i][j]); + DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]); + } + } + + return; +} + +static void dhd_lb_stats_dump_histo( + struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE]) +{ + int i, j; + uint32 per_cpu_total[NR_CPUS] = {0}; + uint32 total = 0; + + bcm_bprintf(strbuf, "CPU: \t\t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\nBin\n"); + + for (i = 0; i < HIST_BIN_SIZE; i++) { + bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1)); + for (j = 0; j < num_possible_cpus(); j++) { + bcm_bprintf(strbuf, "%d\t", hist[j][i]); + } + bcm_bprintf(strbuf, "\n"); + } + bcm_bprintf(strbuf, "Per CPU Total \t"); + total = 0; + for (i = 0; i < num_possible_cpus(); i++) { + for (j = 0; j < HIST_BIN_SIZE; j++) { + per_cpu_total[i] += (hist[i][j] * (1<<(j+1))); + } + bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]); + total += per_cpu_total[i]; + } + bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total); + + return; +} + +static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p) +{ + int i; + + bcm_bprintf(strbuf, "CPU: \t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%d\t", i); + bcm_bprintf(strbuf, "\n"); + + bcm_bprintf(strbuf, "Val: \t"); + for (i = 0; i < num_possible_cpus(); i++) + bcm_bprintf(strbuf, "%u\t", *(p+i)); + bcm_bprintf(strbuf, "\n"); + return; +} + +void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_info_t *dhd; + + if (dhdp == NULL || strbuf == NULL) { + DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n", + __FUNCTION__, dhdp, strbuf)); + return; + } + + dhd = dhdp->info; + if (dhd == NULL) { + DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__)); + return; + } + + bcm_bprintf(strbuf, "\ncpu_online_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt); + + bcm_bprintf(strbuf, "cpu_offline_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt); + + bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n", + dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt, + dhd->txc_sched_cnt); +#ifdef DHD_LB_RXP + bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt); + bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist); +#endif /* DHD_LB_RXP */ + +#ifdef DHD_LB_RXC + bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist); +#endif /* DHD_LB_RXC */ + + +#ifdef DHD_LB_TXC + bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n"); + dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt); + bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n"); + dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist); +#endif /* DHD_LB_TXC */ +} + +static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count) +{ + uint32 bin_power; + uint32 *p = NULL; + + bin_power = next_larger_power2(count); + + switch (bin_power) { + case 0: break; + case 1: /* Fall through intentionally */ + case 2: p = bin + 0; break; + case 4: p = bin + 1; break; + case 8: p = bin + 2; break; + case 16: p = bin + 3; break; + case 32: p = bin + 4; break; + case 64: p = bin + 5; break; + case 128: p = bin + 6; break; + default : p = bin + 7; break; + } + if (p) + *p = *p + 1; + return; +} + +extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count) +{ + int cpu; + dhd_info_t *dhd = dhdp->info; + + cpu = get_cpu(); + put_cpu(); + dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count); + + return; +} + +extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt); +} + +extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt); +} + +#endif /* DHD_LB_STATS */ +#endif /* DHD_LB */ + + +#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) +int g_frameburst = 1; +#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ + +static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd); + +/* DHD Perimiter lock only used in router with bypass forwarding. */ +#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0) +#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0) +#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0) + +#ifdef PCIE_FULL_DONGLE +#if defined(BCM_GMAC3) +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); }) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else /* ! BCM_GMAC3 */ +#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) +#define DHD_IF_STA_LIST_LOCK(ifp, flags) \ + spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) +#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \ + spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, + struct list_head *snapshot_list); +static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list); +#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); }) +#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); }) +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#endif /* ! BCM_GMAC3 */ +#endif /* PCIE_FULL_DONGLE */ + +/* Control fw roaming */ +uint dhd_roam_disable = 0; + +#ifdef BCMDBGFS +extern int dhd_dbg_init(dhd_pub_t *dhdp); +extern void dhd_dbg_remove(void); +#endif + +/* Control radio state */ +uint dhd_radio_up = 1; + +/* Network inteface name */ +char iface_name[IFNAMSIZ] = {'\0'}; +module_param_string(iface_name, iface_name, IFNAMSIZ, 0); + +/* The following are specific to the SDIO dongle */ + +/* IOCTL response timeout */ +int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; + +/* Idle timeout for backplane clock */ +int dhd_idletime = DHD_IDLETIME_TICKS; +module_param(dhd_idletime, int, 0); + +/* Use polling */ +uint dhd_poll = FALSE; +module_param(dhd_poll, uint, 0); + +/* Use interrupts */ +uint dhd_intr = TRUE; +module_param(dhd_intr, uint, 0); + +/* SDIO Drive Strength (in milliamps) */ +uint dhd_sdiod_drive_strength = 6; +module_param(dhd_sdiod_drive_strength, uint, 0); + +#ifdef BCMSDIO +/* Tx/Rx bounds */ +extern uint dhd_txbound; +extern uint dhd_rxbound; +module_param(dhd_txbound, uint, 0); +module_param(dhd_rxbound, uint, 0); + +/* Deferred transmits */ +extern uint dhd_deferred_tx; +module_param(dhd_deferred_tx, uint, 0); + +#endif /* BCMSDIO */ + + +#ifdef SDTEST +/* Echo packet generator (pkts/s) */ +uint dhd_pktgen = 0; +module_param(dhd_pktgen, uint, 0); + +/* Echo packet len (0 => sawtooth, max 2040) */ +uint dhd_pktgen_len = 0; +module_param(dhd_pktgen_len, uint, 0); +#endif /* SDTEST */ + + + +/* Allow delayed firmware download for debug purpose */ +int allow_delay_fwdl = FALSE; +module_param(allow_delay_fwdl, int, 0); + +extern char dhd_version[]; +extern char fw_version[]; + +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +static void dhd_net_if_lock_local(dhd_info_t *dhd); +static void dhd_net_if_unlock_local(dhd_info_t *dhd); +static void dhd_suspend_lock(dhd_pub_t *dhdp); +static void dhd_suspend_unlock(dhd_pub_t *dhdp); + +#ifdef WLMEDIA_HTSF +void htsf_update(dhd_info_t *dhd, void *data); +tsf_t prev_tsf, cur_tsf; + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); +static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); +static void dhd_dump_latency(void); +static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); +static void dhd_dump_htsfhisto(histo_t *his, char *s); +#endif /* WLMEDIA_HTSF */ + +/* Monitor interface */ +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +#endif /* defined(WL_WIRELESS_EXT) */ + +static void dhd_dpc(ulong data); +/* forward decl */ +extern int dhd_wait_pend8021x(struct net_device *dev); +void dhd_os_wd_timer_extend(void *bus, bool extend); + +#ifdef TOE +#ifndef BDC +#error TOE requires BDC +#endif /* !BDC */ +static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); +static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); +#endif /* TOE */ + +static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, size_t pktlen, + wl_event_msg_t *event_ptr, void **data_ptr); + +#if defined(CONFIG_PM_SLEEP) +static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) +{ + int ret = NOTIFY_DONE; + bool suspend = FALSE; + dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier); + + BCM_REFERENCE(dhdinfo); + + switch (action) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + suspend = TRUE; + break; + + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + suspend = FALSE; + break; + } + +#if defined(SUPPORT_P2P_GO_PS) +#ifdef PROP_TXSTATUS + if (suspend) { + DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub); + dhd_wlfc_suspend(&dhdinfo->pub); + DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub); + } else + dhd_wlfc_resume(&dhdinfo->pub); +#endif /* PROP_TXSTATUS */ +#endif /* defined(SUPPORT_P2P_GO_PS) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 39)) + dhd_mmc_suspend = suspend; + smp_mb(); +#endif + + return ret; +} + +/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool dhd_pm_notifier_registered = FALSE; + +extern int register_pm_notifier(struct notifier_block *nb); +extern int unregister_pm_notifier(struct notifier_block *nb); +#endif /* CONFIG_PM_SLEEP */ + +/* Request scheduling of the bus rx frame */ +static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb); +static void dhd_os_rxflock(dhd_pub_t *pub); +static void dhd_os_rxfunlock(dhd_pub_t *pub); + +/** priv_link is the link between netdev and the dhdif and dhd_info structs. */ +typedef struct dhd_dev_priv { + dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */ + dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */ + int ifidx; /* interface index */ +} dhd_dev_priv_t; + +#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t)) +#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev)) +#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd) +#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp) +#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx) + +/** Clear the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_clear(struct net_device * dev) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = (dhd_info_t *)NULL; + dev_priv->ifp = (dhd_if_t *)NULL; + dev_priv->ifidx = DHD_BAD_IF; +} + +/** Setup the dhd net_device's private structure. */ +static inline void +dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp, + int ifidx) +{ + dhd_dev_priv_t * dev_priv; + ASSERT(dev != (struct net_device *)NULL); + dev_priv = DHD_DEV_PRIV(dev); + dev_priv->dhd = dhd; + dev_priv->ifp = ifp; + dev_priv->ifidx = ifidx; +} + +#ifdef PCIE_FULL_DONGLE + +/** Dummy objects are defined with state representing bad|down. + * Performance gains from reducing branch conditionals, instruction parallelism, + * dual issue, reducing load shadows, avail of larger pipelines. + * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer + * is accessed via the dhd_sta_t. + */ + +/* Dummy dhd_info object */ +dhd_info_t dhd_info_null = { +#if defined(BCM_GMAC3) + .fwdh = FWDER_NULL, +#endif + .pub = { + .info = &dhd_info_null, +#ifdef DHDTCPACK_SUPPRESS + .tcpack_sup_mode = TCPACK_SUP_REPLACE, +#endif /* DHDTCPACK_SUPPRESS */ + .up = FALSE, + .busstate = DHD_BUS_DOWN + } +}; +#define DHD_INFO_NULL (&dhd_info_null) +#define DHD_PUB_NULL (&dhd_info_null.pub) + +/* Dummy netdevice object */ +struct net_device dhd_net_dev_null = { + .reg_state = NETREG_UNREGISTERED +}; +#define DHD_NET_DEV_NULL (&dhd_net_dev_null) + +/* Dummy dhd_if object */ +dhd_if_t dhd_if_null = { +#if defined(BCM_GMAC3) + .fwdh = FWDER_NULL, +#endif +#ifdef WMF + .wmf = { .wmf_enable = TRUE }, +#endif + .info = DHD_INFO_NULL, + .net = DHD_NET_DEV_NULL, + .idx = DHD_BAD_IF +}; +#define DHD_IF_NULL (&dhd_if_null) + +#define DHD_STA_NULL ((dhd_sta_t *)NULL) + +/** Interface STA list management. */ + +/** Fetch the dhd_if object, given the interface index in the dhd. */ +static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx); + +/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */ +static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta); +static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp); + +/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */ +static void dhd_if_del_sta_list(dhd_if_t * ifp); +static void dhd_if_flush_sta(dhd_if_t * ifp); + +/* Construct/Destruct a sta pool. */ +static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta); +static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta); +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta); + + +/* Return interface pointer */ +static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx) +{ + ASSERT(ifidx < DHD_MAX_IFS); + + if (ifidx >= DHD_MAX_IFS) + return NULL; + + return dhdp->info->iflist[ifidx]; +} + +/** Reset a dhd_sta object and free into the dhd pool. */ +static void +dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta) +{ + int prio; + + ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID)); + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + /* + * Flush and free all packets in all flowring's queues belonging to sta. + * Packets in flow ring will be flushed later. + */ + for (prio = 0; prio < (int)NUMPRIO; prio++) { + uint16 flowid = sta->flowid[prio]; + + if (flowid != FLOWID_INVALID) { + unsigned long flags; + flow_queue_t * queue = dhd_flow_queue(dhdp, flowid); + flow_ring_node_t * flow_ring_node; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + + flow_ring_node = dhd_flow_ring_node(dhdp, flowid); + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING; + + if (!DHD_FLOW_QUEUE_EMPTY(queue)) { + void * pkt; + while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) { + PKTFREE(dhdp->osh, pkt, TRUE); + } + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + } + + sta->flowid[prio] = FLOWID_INVALID; + } + + id16_map_free(dhdp->staid_allocator, sta->idx); + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */ + sta->ifidx = DHD_BAD_IF; + bzero(sta->ea.octet, ETHER_ADDR_LEN); + INIT_LIST_HEAD(&sta->list); + sta->idx = ID16_INVALID; /* implying free */ +} + +/** Allocate a dhd_sta object from the dhd pool. */ +static dhd_sta_t * +dhd_sta_alloc(dhd_pub_t * dhdp) +{ + uint16 idx; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + + ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL)); + + idx = id16_map_alloc(dhdp->staid_allocator); + if (idx == ID16_INVALID) { + DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool); + sta = &sta_pool[idx]; + + ASSERT((sta->idx == ID16_INVALID) && + (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF)); + + DHD_CUMM_CTR_INIT(&sta->cumm_ctr); + + sta->idx = idx; /* implying allocated */ + + return sta; +} + +/** Delete all STAs in an interface's STA list. */ +static void +dhd_if_del_sta_list(dhd_if_t *ifp) +{ + dhd_sta_t *sta, *next; + unsigned long flags; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { +#if defined(BCM_GMAC3) + if (ifp->fwdh) { + /* Remove sta from WOFA forwarder. */ + fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return; +} + +/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */ +static void +dhd_if_flush_sta(dhd_if_t * ifp) +{ +#if defined(BCM_GMAC3) + + if (ifp && (ifp->fwdh != FWDER_NULL)) { + dhd_sta_t *sta, *next; + unsigned long flags; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + /* Remove any sta entry from WOFA forwarder. */ + fwder_flush(ifp->fwdh, (wofa_t)sta); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + } +#endif /* BCM_GMAC3 */ +} + +/** Construct a pool of dhd_sta_t objects to be used by interfaces. */ +static int +dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void * staid_allocator; + + ASSERT(dhdp != (dhd_pub_t *)NULL); + ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL)); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + staid_allocator = id16_map_init(dhdp->osh, max_sta, 1); + if (staid_allocator == NULL) { + DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Pre allocate a pool of dhd_sta objects (one extra). */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */ + sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz); + if (sta_pool == NULL) { + DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__)); + id16_map_fini(dhdp->osh, staid_allocator); + return BCME_ERROR; + } + + dhdp->sta_pool = sta_pool; + dhdp->staid_allocator = staid_allocator; + + /* Initialize all sta(s) for the pre-allocated free pool. */ + bzero((uchar *)sta_pool, sta_pool_memsz); + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } + + return BCME_OK; +} + +/** Destruct the pool of dhd_sta_t objects. + * Caller must ensure that no STA objects are currently associated with an if. + */ +static void +dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) +{ + dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + + if (sta_pool) { + int idx; + int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + for (idx = 1; idx <= max_sta; idx++) { + ASSERT(sta_pool[idx].ifp == DHD_IF_NULL); + ASSERT(sta_pool[idx].idx == ID16_INVALID); + } + MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz); + dhdp->sta_pool = NULL; + } + + id16_map_fini(dhdp->osh, dhdp->staid_allocator); + dhdp->staid_allocator = NULL; +} + +/* Clear the pool of dhd_sta_t objects for built-in type driver */ +static void +dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) +{ + int idx, prio, sta_pool_memsz; + dhd_sta_t * sta; + dhd_sta_pool_t * sta_pool; + void *staid_allocator; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return; + } + + sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool; + staid_allocator = dhdp->staid_allocator; + + if (!sta_pool) { + DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__)); + return; + } + + if (!staid_allocator) { + DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__)); + return; + } + + /* clear free pool */ + sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); + bzero((uchar *)sta_pool, sta_pool_memsz); + + /* dhd_sta objects per radio are managed in a table. id#0 reserved. */ + id16_map_clear(staid_allocator, max_sta, 1); + + /* Initialize all sta(s) for the pre-allocated free pool. */ + for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */ + sta = &sta_pool[idx]; + sta->idx = id16_map_alloc(staid_allocator); + ASSERT(sta->idx <= max_sta); + } + /* Now place them into the pre-allocated free pool. */ + for (idx = 1; idx <= max_sta; idx++) { + sta = &sta_pool[idx]; + for (prio = 0; prio < (int)NUMPRIO; prio++) { + sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */ + } + dhd_sta_free(dhdp, sta); + } +} + +/** Find STA with MAC address ea in an interface's STA list. */ +dhd_sta_t * +dhd_find_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + return sta; + } + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return DHD_STA_NULL; +} + +/** Add STA into the interface's STA list. */ +dhd_sta_t * +dhd_add_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return DHD_STA_NULL; + + sta = dhd_sta_alloc((dhd_pub_t *)pub); + if (sta == DHD_STA_NULL) { + DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__)); + return DHD_STA_NULL; + } + + memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN); + + /* link the sta and the dhd interface */ + sta->ifp = ifp; + sta->ifidx = ifidx; + INIT_LIST_HEAD(&sta->list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_add_tail(&sta->list, &ifp->sta_list); + +#if defined(BCM_GMAC3) + if (ifp->fwdh) { + ASSERT(ISALIGNED(ea, 2)); + /* Add sta to WOFA forwarder. */ + fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return sta; +} + +/** Delete STA from the interface's STA list. */ +void +dhd_del_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta, *next; + dhd_if_t *ifp; + unsigned long flags; + + ASSERT(ea != NULL); + ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx); + if (ifp == NULL) + return; + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) { +#if defined(BCM_GMAC3) + if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */ + ASSERT(ISALIGNED(ea, 2)); + fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta); + } +#endif /* BCM_GMAC3 */ + list_del(&sta->list); + dhd_sta_free(&ifp->info->pub, sta); + } + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); +#ifdef DHD_L2_FILTER + if (ifp->parp_enable) { + /* clear Proxy ARP cache of specific Ethernet Address */ + bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE, + ea, FALSE, ((dhd_pub_t*)pub)->tickcnt); + } +#endif /* DHD_L2_FILTER */ + return; +} + +/** Add STA if it doesn't exist. Not reentrant. */ +dhd_sta_t* +dhd_findadd_sta(void *pub, int ifidx, void *ea) +{ + dhd_sta_t *sta; + + sta = dhd_find_sta(pub, ifidx, ea); + + if (!sta) { + /* Add entry */ + sta = dhd_add_sta(pub, ifidx, ea); + } + + return sta; +} + +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) +#if !defined(BCM_GMAC3) +static struct list_head * +dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list) +{ + unsigned long flags; + dhd_sta_t *sta, *snapshot; + + INIT_LIST_HEAD(snapshot_list); + + DHD_IF_STA_LIST_LOCK(ifp, flags); + + list_for_each_entry(sta, &ifp->sta_list, list) { + /* allocate one and add to snapshot */ + snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t)); + if (snapshot == NULL) { + DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__)); + continue; + } + + memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN); + + INIT_LIST_HEAD(&snapshot->list); + list_add_tail(&snapshot->list, snapshot_list); + } + + DHD_IF_STA_LIST_UNLOCK(ifp, flags); + + return snapshot_list; +} + +static void +dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list) +{ + dhd_sta_t *sta, *next; + + list_for_each_entry_safe(sta, next, snapshot_list, list) { + list_del(&sta->list); + MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t)); + } +} +#endif /* !BCM_GMAC3 */ +#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */ + +#else +static inline void dhd_if_flush_sta(dhd_if_t * ifp) { } +static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {} +static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; } +static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {} +static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {} +dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; } +void dhd_del_sta(void *pub, int ifidx, void *ea) {} +#endif /* PCIE_FULL_DONGLE */ + + +#if defined(DHD_LB) + +#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) +/** + * dhd_tasklet_schedule - Function that runs in IPI context of the destination + * CPU and schedules a tasklet. + * @tasklet: opaque pointer to the tasklet + */ +static INLINE void +dhd_tasklet_schedule(void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); +} + +/** + * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU + * @tasklet: tasklet to be scheduled + * @on_cpu: cpu core id + * + * If the requested cpu is online, then an IPI is sent to this cpu via the + * smp_call_function_single with no wait and the tasklet_schedule function + * will be invoked to schedule the specified tasklet on the requested CPU. + */ +static void +dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu) +{ + const int wait = 0; + smp_call_function_single(on_cpu, + dhd_tasklet_schedule, (void *)tasklet, wait); +} +#endif /* DHD_LB_TXC || DHD_LB_RXC */ + + +#if defined(DHD_LB_TXC) +/** + * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet + * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and + * freeing the packets placed in the tx_compl workq + */ +void +dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->txc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->tx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + } else { + schedule_work(&dhd->tx_compl_dispatcher_work); + } +} + +static void dhd_tx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, tx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->tx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu); + put_online_cpus(); +} + +#endif /* DHD_LB_TXC */ + + +#if defined(DHD_LB_RXC) +/** + * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet + * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers + * in the H2D RxBuffer Post common ring, by using the recycled pktids that were + * placed in the rx_compl workq. + * + * @dhdp: pointer to dhd_pub object + */ +void +dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + int curr_cpu, on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_LB_STATS_INCR(dhd->rxc_sched_cnt); + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->rx_compl_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + } else { + schedule_work(&dhd->rx_compl_dispatcher_work); + } +} + +static void dhd_rx_compl_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_compl_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->tx_compl_cpu); + if (!cpu_online(cpu)) + dhd_tasklet_schedule(&dhd->rx_compl_tasklet); + else + dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu); + put_online_cpus(); +} + +#endif /* DHD_LB_RXC */ + + +#if defined(DHD_LB_RXP) +/** + * dhd_napi_poll - Load balance napi poll function to process received + * packets and send up the network stack using netif_receive_skb() + * + * @napi: napi object in which context this poll function is invoked + * @budget: number of packets to be processed. + * + * Fetch the dhd_info given the rx_napi_struct. Move all packets from the + * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock). + * Dequeue each packet from head of rx_process_queue, fetch the ifid from the + * packet tag and sendup. + */ +static int +dhd_napi_poll(struct napi_struct *napi, int budget) +{ + int ifid; + const int pkt_count = 1; + const int chan = 0; + struct sk_buff * skb; + unsigned long flags; + struct dhd_info *dhd; + int processed = 0; + struct sk_buff_head rx_process_queue; + + dhd = container_of(napi, struct dhd_info, rx_napi_struct); + DHD_INFO(("%s napi_queue<%d> budget<%d>\n", + __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget)); + + __skb_queue_head_init(&rx_process_queue); + + /* extract the entire rx_napi_queue into local rx_process_queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) { + OSL_PREFETCH(skb->data); + + ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb)); + + DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n", + __FUNCTION__, skb, ifid)); + + dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan); + processed++; + } + + DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed); + + DHD_INFO(("%s processed %d\n", __FUNCTION__, processed)); + napi_complete(napi); + + return budget - 1; +} + +/** + * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi + * poll list. This function may be invoked via the smp_call_function_single + * from a remote CPU. + * + * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ) + * after the napi_struct is added to the softnet data's poll_list + * + * @info: pointer to a dhd_info struct + */ +static void +dhd_napi_schedule(void *info) +{ + dhd_info_t *dhd = (dhd_info_t *)info; + + DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n", + __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu))); + + /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */ + if (napi_schedule_prep(&dhd->rx_napi_struct)) { + __napi_schedule(&dhd->rx_napi_struct); + DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt); + } + + /* + * If the rx_napi_struct was already running, then we let it complete + * processing all its packets. The rx_napi_struct may only run on one + * core at a time, to avoid out-of-order handling. + */ +} + +/** + * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ + * action after placing the dhd's rx_process napi object in the the remote CPU's + * softnet data's poll_list. + * + * @dhd: dhd_info which has the rx_process napi object + * @on_cpu: desired remote CPU id + */ +static INLINE int +dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu) +{ + int wait = 0; /* asynchronous IPI */ + + DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n", + __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu)); + + if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) { + DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n", + __FUNCTION__, on_cpu)); + } + + DHD_LB_STATS_INCR(dhd->napi_sched_cnt); + + return 0; +} + +/* + * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on + * Why should we do this? + * The candidacy algorithm is run from the call back function + * registered to CPU hotplug notifier. This call back happens from Worker + * context. The dhd_napi_schedule_on is also from worker context. + * Note that both of this can run on two different CPUs at the same time. + * So we can possibly have a window where a given CPUn is being brought + * down from CPUm while we try to run a function on CPUn. + * To prevent this its better have the whole code to execute an SMP + * function under get_online_cpus. + * This function call ensures that hotplug mechanism does not kick-in + * until we are done dealing with online CPUs + * If the hotplug worker is already running, no worries because the + * candidacy algo would then reflect the same in dhd->rx_napi_cpu. + * + * The below mentioned code structure is proposed in + * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt + * for the question + * Q: I need to ensure that a particular cpu is not removed when there is some + * work specific to this cpu is in progress + * + * According to the documentation calling get_online_cpus is NOT required, if + * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can + * run from Work Queue context we have to call these functions + */ +static void dhd_rx_napi_dispatcher_fn(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, rx_napi_dispatcher_work); + int cpu; + + get_online_cpus(); + cpu = atomic_read(&dhd->rx_napi_cpu); + if (!cpu_online(cpu)) + dhd_napi_schedule(dhd); + else + dhd_napi_schedule_on(dhd, cpu); + put_online_cpus(); +} + +/** + * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct + * to run on another CPU. The rx_napi_struct's poll function will retrieve all + * the packets enqueued into the rx_napi_queue and sendup. + * The producer's rx packet queue is appended to the rx_napi_queue before + * dispatching the rx_napi_struct. + */ +void +dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp) +{ + unsigned long flags; + dhd_info_t *dhd = dhdp->info; + int curr_cpu; + int on_cpu; + + if (dhd->rx_napi_netdev == NULL) { + DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__)); + return; + } + + DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__, + skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue))); + + /* append the producer's queue of packets to the napi's rx process queue */ + spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags); + skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue); + spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags); + + /* + * If the destination CPU is NOT online or is same as current CPU + * no need to schedule the work + */ + curr_cpu = get_cpu(); + put_cpu(); + + on_cpu = atomic_read(&dhd->rx_napi_cpu); + + if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) { + dhd_napi_schedule(dhd); + } else { + schedule_work(&dhd->rx_napi_dispatcher_work); + } +} + +/** + * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue + */ +void +dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx) +{ + dhd_info_t *dhd = dhdp->info; + + DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__, + pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue))); + DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx); + __skb_queue_tail(&dhd->rx_pend_queue, pkt); +} +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + +static void dhd_memdump_work_handler(struct work_struct * work) +{ + struct dhd_info *dhd = + container_of(work, struct dhd_info, dhd_memdump_work.work); + + BCM_REFERENCE(dhd); +#ifdef BCMPCIE + dhd_prot_collect_memdump(&dhd->pub); +#endif +} + + +/** Returns dhd iflist index corresponding the the bssidx provided by apps */ +int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_if_t *ifp; + dhd_info_t *dhd = dhdp->info; + int i; + + ASSERT(bssidx < DHD_MAX_IFS); + ASSERT(dhdp); + + for (i = 0; i < DHD_MAX_IFS; i++) { + ifp = dhd->iflist[i]; + if (ifp && (ifp->bssidx == bssidx)) { + DHD_TRACE(("Index manipulated for %s from %d to %d\n", + ifp->name, bssidx, i)); + break; + } + } + return i; +} + +static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb) +{ + uint32 store_idx; + uint32 sent_idx; + + if (!skb) { + DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n")); + return BCME_ERROR; + } + + dhd_os_rxflock(dhdp); + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + if (dhdp->skbbuf[store_idx] != NULL) { + /* Make sure the previous packets are processed */ + dhd_os_rxfunlock(dhdp); +#ifdef RXF_DEQUEUE_ON_BUSY + DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + return BCME_BUSY; +#else /* RXF_DEQUEUE_ON_BUSY */ + DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx)); + /* removed msleep here, should use wait_event_timeout if we + * want to give rx frame thread a chance to run + */ +#if defined(WAIT_DEQUEUE) + OSL_SLEEP(1); +#endif + return BCME_ERROR; +#endif /* RXF_DEQUEUE_ON_BUSY */ + } + DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n", + skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1))); + dhdp->skbbuf[store_idx] = skb; + dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); + dhd_os_rxfunlock(dhdp); + + return BCME_OK; +} + +static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp) +{ + uint32 store_idx; + uint32 sent_idx; + void *skb; + + dhd_os_rxflock(dhdp); + + store_idx = dhdp->store_idx; + sent_idx = dhdp->sent_idx; + skb = dhdp->skbbuf[sent_idx]; + + if (skb == NULL) { + dhd_os_rxfunlock(dhdp); + DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n", + store_idx, sent_idx)); + return NULL; + } + + dhdp->skbbuf[sent_idx] = NULL; + dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); + + DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n", + skb, sent_idx)); + + dhd_os_rxfunlock(dhdp); + + return skb; +} + +int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (prepost) { /* pre process */ + dhd_read_macaddr(dhd); + } else { /* post process */ + dhd_write_macaddr(&dhd->pub.mac); + } + + return 0; +} + +#if defined(PKT_FILTER_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER) +static bool +_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode) +{ + bool _apply = FALSE; + /* In case of IBSS mode, apply arp pkt filter */ + if (op_mode & DHD_FLAG_IBSS_MODE) { + _apply = TRUE; + goto exit; + } + /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */ + if ((dhd->arp_version == 1) && + (op_mode & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) { + _apply = TRUE; + goto exit; + } + +exit: + return _apply; +} +#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */ + +void dhd_set_packet_filter(dhd_pub_t *dhd) +{ +#ifdef PKT_FILTER_SUPPORT + int i; + + DHD_TRACE(("%s: enter\n", __FUNCTION__)); + if (dhd_pkt_filter_enable) { + for (i = 0; i < dhd->pktfilter_count; i++) { + dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); + } + } +#endif /* PKT_FILTER_SUPPORT */ +} + +void dhd_enable_packet_filter(int value, dhd_pub_t *dhd) +{ +#ifdef PKT_FILTER_SUPPORT + int i; + + DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value)); + + if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) { + DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__)); + return; + } + /* 1 - Enable packet filter, only allow unicast packet to send up */ + /* 0 - Disable packet filter */ + if (dhd_pkt_filter_enable && (!value || + (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress))) + { + for (i = 0; i < dhd->pktfilter_count; i++) { +#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER + if (value && (i == DHD_ARP_FILTER_NUM) && + !_turn_on_arp_filter(dhd, dhd->op_mode)) { + DHD_TRACE(("Do not turn on ARP white list pkt filter:" + "val %d, cnt %d, op_mode 0x%x\n", + value, i, dhd->op_mode)); + continue; + } +#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */ + dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], + value, dhd_master_mode); + } + } +#endif /* PKT_FILTER_SUPPORT */ +} + +static int dhd_set_suspend(int value, dhd_pub_t *dhd) +{ +#ifndef SUPPORT_PM2_ONLY + int power_mode = PM_MAX; +#endif /* SUPPORT_PM2_ONLY */ +#ifdef SUPPORT_SENSORHUB + uint32 shub_msreq; +#endif /* SUPPORT_SENSORHUB */ + /* wl_pkt_filter_enable_t enable_parm; */ + char iovbuf[32]; + int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */ +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + int bcn_timeout = 0; +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + int roam_time_thresh = 0; /* (ms) */ +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + uint roamvar = 1; +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + int bcn_li_bcn; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint nd_ra_filter = 0; + int ret = 0; +#endif /* DHD_USE_EARLYSUSPEND */ +#ifdef PASS_ALL_MCAST_PKTS + struct dhd_info *dhdinfo; + uint32 allmulti; + uint i; +#endif /* PASS_ALL_MCAST_PKTS */ +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + if (!dhd) + return -ENODEV; + +#ifdef PASS_ALL_MCAST_PKTS + dhdinfo = dhd->info; +#endif /* PASS_ALL_MCAST_PKTS */ + + DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", + __FUNCTION__, value, dhd->in_suspend)); + + dhd_suspend_lock(dhd); + +#ifdef CUSTOM_SET_CPUCORE + DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value)); + /* set specific cpucore */ + dhd_set_cpucore(dhd, TRUE); +#endif /* CUSTOM_SET_CPUCORE */ + if (dhd->up) { + if (value && dhd->in_suspend) { +#ifdef PKT_FILTER_SUPPORT + dhd->early_suspended = 1; +#endif + /* Kernel suspended */ + DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__)); + +#ifdef SUPPORT_SENSORHUB + shub_msreq = 1; + if (dhd->info->shub_enable == 1) { + bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n", + __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + +#ifndef SUPPORT_PM2_ONLY + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter, + * only allow unicast packet to send up + */ + dhd_enable_packet_filter(1, dhd); +#endif /* PKT_FILTER_SUPPORT */ + +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 0; + bcm_mkiovar("allmulti", (char *)&allmulti, 4, + iovbuf, sizeof(iovbuf)); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, i); + } +#endif /* PASS_ALL_MCAST_PKTS */ + + /* If DTIM skip is set up as default, force it to wake + * each third DTIM for better power savings. Note that + * one side effect is a chance to miss BC/MC packet. + */ +#ifdef WLTDLS + /* Do not set bcn_li_ditm on WFD mode */ + if (dhd->tdls_mode) { + bcn_li_dtim = 0; + } else +#endif /* WLTDLS */ + bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd); + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), + TRUE, 0) < 0) + DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__)); + +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND; + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND; + bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + /* Disable firmware roaming during suspend */ + bcm_mkiovar("roam_off", (char *)&roamvar, 4, + iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 0; + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + if (FW_SUPPORTED(dhd, ndoe)) { + /* enable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 1; + bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = CUSTOM_INTR_WIDTH; + bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#endif /* DHD_USE_EARLYSUSPEND */ + } else { +#ifdef PKT_FILTER_SUPPORT + dhd->early_suspended = 0; +#endif + /* Kernel resumed */ + DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__)); + +#ifdef SUPPORT_SENSORHUB + shub_msreq = 0; + if (dhd->info->shub_enable == 1) { + bcm_mkiovar("shub_msreq", (char *)&shub_msreq, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Sensor Hub move/stop stop:" + "failed %d\n", __FUNCTION__, ret)); + } + } +#endif /* SUPPORT_SENSORHUB */ + + +#ifdef DYNAMIC_SWOOB_DURATION + intr_width = 0; + bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("failed to set intr_width (%d)\n", ret)); + } +#endif /* DYNAMIC_SWOOB_DURATION */ +#ifndef SUPPORT_PM2_ONLY + power_mode = PM_FAST; + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, + sizeof(power_mode), TRUE, 0); +#endif /* SUPPORT_PM2_ONLY */ +#ifdef PKT_FILTER_SUPPORT + /* disable pkt filter */ + dhd_enable_packet_filter(0, dhd); +#endif /* PKT_FILTER_SUPPORT */ +#ifdef PASS_ALL_MCAST_PKTS + allmulti = 1; + bcm_mkiovar("allmulti", (char *)&allmulti, 4, + iovbuf, sizeof(iovbuf)); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, i); + } +#endif /* PASS_ALL_MCAST_PKTS */ + + /* restore pre-suspend setting for dtim_skip */ + bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, + 4, iovbuf, sizeof(iovbuf)); + + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#ifdef DHD_USE_EARLYSUSPEND +#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND + bcn_timeout = CUSTOM_BCN_TIMEOUT; + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */ +#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND + roam_time_thresh = 2000; + bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */ +#ifndef ENABLE_FW_ROAM_SUSPEND + roamvar = dhd_roam_disable; + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, + sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_FW_ROAM_SUSPEND */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcn_li_bcn = 1; + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, + 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + if (FW_SUPPORTED(dhd, ndoe)) { + /* disable IPv6 RA filter in firmware during suspend */ + nd_ra_filter = 0; + bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("failed to set nd_ra_filter (%d)\n", + ret)); + } +#endif /* DHD_USE_EARLYSUSPEND */ + } + } + dhd_suspend_unlock(dhd); + + return 0; +} + +static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) +{ + dhd_pub_t *dhdp = &dhd->pub; + int ret = 0; + + DHD_OS_WAKE_LOCK(dhdp); + DHD_PERIM_LOCK(dhdp); + + /* Set flag when early suspend was called */ + dhdp->in_suspend = val; + if ((force || !dhdp->suspend_disable_flag) && + dhd_support_sta_mode(dhdp)) + { + ret = dhd_set_suspend(val, dhdp); + } + + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WAKE_UNLOCK(dhdp); + return ret; +} + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +static void dhd_early_suspend(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 1, 0); +} + +static void dhd_late_resume(struct early_suspend *h) +{ + struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); + DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__)); + + if (dhd) + dhd_suspend_resume_helper(dhd, 0, 0); +} +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +/* + * Generalized timeout mechanism. Uses spin sleep with exponential back-off until + * the sleep time reaches one jiffy, then switches over to task delay. Usage: + * + * dhd_timeout_start(&tmo, usec); + * while (!dhd_timeout_expired(&tmo)) + * if (poll_something()) + * break; + * if (dhd_timeout_expired(&tmo)) + * fatal(); + */ + +void +dhd_timeout_start(dhd_timeout_t *tmo, uint usec) +{ + tmo->limit = usec; + tmo->increment = 0; + tmo->elapsed = 0; + tmo->tick = jiffies_to_usecs(1); +} + +int +dhd_timeout_expired(dhd_timeout_t *tmo) +{ + /* Does nothing the first call */ + if (tmo->increment == 0) { + tmo->increment = 1; + return 0; + } + + if (tmo->elapsed >= tmo->limit) + return 1; + + /* Add the delay that's about to take place */ + tmo->elapsed += tmo->increment; + + if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) { + OSL_DELAY(tmo->increment); + tmo->increment *= 2; + if (tmo->increment > tmo->tick) + tmo->increment = tmo->tick; + } else { + wait_queue_head_t delay_wait; + DECLARE_WAITQUEUE(wait, current); + init_waitqueue_head(&delay_wait); + add_wait_queue(&delay_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + (void)schedule_timeout(1); + remove_wait_queue(&delay_wait, &wait); + set_current_state(TASK_RUNNING); + } + + return 0; +} + +int +dhd_net2idx(dhd_info_t *dhd, struct net_device *net) +{ + int i = 0; + + if (!dhd) { + DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__)); + return DHD_BAD_IF; + } + + while (i < DHD_MAX_IFS) { + if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net)) + return i; + i++; + } + + return DHD_BAD_IF; +} + +struct net_device * dhd_idx2net(void *pub, int ifidx) +{ + struct dhd_pub *dhd_pub = (struct dhd_pub *)pub; + struct dhd_info *dhd_info; + + if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS) + return NULL; + dhd_info = dhd_pub->info; + if (dhd_info && dhd_info->iflist[ifidx]) + return dhd_info->iflist[ifidx]->net; + return NULL; +} + +int +dhd_ifname2idx(dhd_info_t *dhd, char *name) +{ + int i = DHD_MAX_IFS; + + ASSERT(dhd); + + if (name == NULL || *name == '\0') + return 0; + + while (--i > 0) + if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ)) + break; + + DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); + + return i; /* default - the primary interface */ +} + +char * +dhd_ifname(dhd_pub_t *dhdp, int ifidx) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + ASSERT(dhd); + + if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { + DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx] == NULL) { + DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); + return ""; + } + + if (dhd->iflist[ifidx]->net) + return dhd->iflist[ifidx]->net->name; + + return ""; +} + +uint8 * +dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx) +{ + int i; + dhd_info_t *dhd = (dhd_info_t *)dhdp; + + ASSERT(dhd); + for (i = 0; i < DHD_MAX_IFS; i++) + if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx) + return dhd->iflist[i]->mac_addr; + + return NULL; +} + + +static void +_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) +{ + struct net_device *dev; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + struct netdev_hw_addr *ha; +#else + struct dev_mc_list *mclist; +#endif + uint32 allmulti, cnt; + + wl_ioctl_t ioc; + char *buf, *bufp; + uint buflen; + int ret; + + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx)); + return; + } + dev = dhd->iflist[ifidx]->net; + if (!dev) + return; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + cnt = netdev_mc_count(dev); +#else + cnt = dev->mc_count; +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + + /* Determine initial value of allmulti flag */ + allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; + +#ifdef PASS_ALL_MCAST_PKTS +#ifdef PKT_FILTER_SUPPORT + if (!dhd->pub.early_suspended) +#endif /* PKT_FILTER_SUPPORT */ + allmulti = TRUE; +#endif /* PASS_ALL_MCAST_PKTS */ + + /* Send down the multicast list first. */ + + + buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); + if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + return; + } + + strncpy(bufp, "mcast_list", buflen - 1); + bufp[buflen - 1] = '\0'; + bufp += strlen("mcast_list") + 1; + + cnt = htol32(cnt); + memcpy(bufp, &cnt, sizeof(cnt)); + bufp += sizeof(cnt); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_lock_bh(dev); +#endif /* LINUX >= 2.6.27 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) + netdev_for_each_mc_addr(ha, dev) { + if (!cnt) + break; + memcpy(bufp, ha->addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + cnt--; + } +#else /* LINUX < 2.6.35 */ + for (mclist = dev->mc_list; (mclist && (cnt > 0)); + cnt--, mclist = mclist->next) { + memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); + bufp += ETHER_ADDR_LEN; + } +#endif /* LINUX >= 2.6.35 */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + netif_addr_unlock_bh(dev); +#endif /* LINUX >= 2.6.27 */ + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", + dhd_ifname(&dhd->pub, ifidx), cnt)); + allmulti = cnt ? TRUE : allmulti; + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + + buflen = sizeof("allmulti") + sizeof(allmulti); + if (!(buf = MALLOC(dhd->pub.osh, buflen))) { + DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx))); + return; + } + allmulti = htol32(allmulti); + + if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) { + DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n", + dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen)); + MFREE(dhd->pub.osh, buf, buflen); + return; + } + + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = buflen; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set allmulti %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } + + MFREE(dhd->pub.osh, buf, buflen); + + /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ + + allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; + + allmulti = htol32(allmulti); + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_PROMISC; + ioc.buf = &allmulti; + ioc.len = sizeof(allmulti); + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set promisc %d failed\n", + dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); + } +} + +int +_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr) +{ + char buf[32]; + wl_ioctl_t ioc; + int ret; + + if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) { + DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx))); + return -1; + } + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = 32; + ioc.set = TRUE; + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); + if (ret < 0) { + DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); + } else { + memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); + if (ifidx == 0) + memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); + } + + return ret; +} + +#ifdef SOFTAP +extern struct net_device *ap_net_dev; +extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */ +#endif + +#ifdef DHD_PSTA +/* Get psta/psr configuration configuration */ +int dhd_get_psta_mode(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + return (int)dhd->psta_mode; +} +/* Set psta/psr configuration configuration */ +int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val) +{ + dhd_info_t *dhd = dhdp->info; + dhd->psta_mode = val; + return 0; +} +#endif /* DHD_PSTA */ + +static void +dhd_ifadd_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_event_t *if_event = event_info; + struct net_device *ndev; + int ifidx, bssidx; + int ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + struct wireless_dev *vwdev, *primary_wdev; + struct net_device *primary_ndev; +#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ + + if (event != DHD_WQ_WORK_IF_ADD) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + bssidx = if_event->event.bssidx; + DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx)); + + /* This path is for non-android case */ + /* The interface name in host and in event msg are same */ + /* if name in event msg is used to create dongle if list on host */ + ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name, + if_event->mac, bssidx, TRUE, if_event->name); + if (!ndev) { + DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__)); + goto done; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); + if (unlikely(!vwdev)) { + DHD_ERROR(("Could not allocate wireless device\n")); + goto done; + } + primary_ndev = dhd->pub.info->iflist[0]->net; + primary_wdev = ndev_to_wdev(primary_ndev); + vwdev->wiphy = primary_wdev->wiphy; + vwdev->iftype = if_event->event.role; + vwdev->netdev = ndev; + ndev->ieee80211_ptr = vwdev; + SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy)); + DHD_ERROR(("virtual interface(%s) is created\n", if_event->name)); +#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */ + + DHD_PERIM_UNLOCK(&dhd->pub); + ret = dhd_register_if(&dhd->pub, ifidx, TRUE); + DHD_PERIM_LOCK(&dhd->pub); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + goto done; + } +#ifdef PCIE_FULL_DONGLE + /* Turn on AP isolation in the firmware for interfaces operating in AP mode */ + if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) { + char iovbuf[WLC_IOCTL_SMLEN]; + uint32 var_int = 1; + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx); + + if (ret != BCME_OK) { + DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__)); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + } + } +#endif /* PCIE_FULL_DONGLE */ + +done: + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_ifdel_event_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + int ifidx; + dhd_if_event_t *if_event = event_info; + + + if (event != DHD_WQ_WORK_IF_DEL) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + if (!if_event) { + DHD_ERROR(("%s: event data is null \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ifidx = if_event->event.ifidx; + DHD_TRACE(("Removing interface with idx %d\n", ifidx)); + + DHD_PERIM_UNLOCK(&dhd->pub); + dhd_remove_if(&dhd->pub, ifidx, TRUE); + DHD_PERIM_LOCK(&dhd->pub); + + MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t)); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_t *ifp = event_info; + + if (event != DHD_WQ_WORK_SET_MAC) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + +#ifdef SOFTAP + { + unsigned long flags; + bool in_ap = FALSE; + DHD_GENERAL_LOCK(&dhd->pub, flags); + in_ap = (ap_net_dev != NULL); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + if (in_ap) { + DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n", + ifp->net->name)); + goto done; + } + } +#endif /* SOFTAP */ + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__)); + ifp->set_macaddress = FALSE; + if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0) + DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__)); + else + DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__)); + +done: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static void +dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_if_t *ifp = event_info; + int ifidx; + + if (event != DHD_WQ_WORK_SET_MCAST_LIST) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!dhd) { + DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__)); + return; + } + + dhd_net_if_lock_local(dhd); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + +#ifdef SOFTAP + { + bool in_ap = FALSE; + unsigned long flags; + DHD_GENERAL_LOCK(&dhd->pub, flags); + in_ap = (ap_net_dev != NULL); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + if (in_ap) { + DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n", + ifp->net->name)); + ifp->set_multicast = FALSE; + goto done; + } + } +#endif /* SOFTAP */ + + if (ifp == NULL || !dhd->pub.up) { + DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__)); + goto done; + } + + ifidx = ifp->idx; + + + _dhd_set_multicast_list(dhd, ifidx); + DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx)); + +done: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + dhd_net_if_unlock_local(dhd); +} + +static int +dhd_set_mac_address(struct net_device *dev, void *addr) +{ + int ret = 0; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + struct sockaddr *sa = (struct sockaddr *)addr; + int ifidx; + dhd_if_t *dhdif; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return -1; + + dhdif = dhd->iflist[ifidx]; + + dhd_net_if_lock_local(dhd); + memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN); + dhdif->set_macaddress = TRUE; + dhd_net_if_unlock_local(dhd); + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC, + dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW); + return ret; +} + +static void +dhd_set_multicast_list(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ifidx; + + ifidx = dhd_net2idx(dhd, dev); + if (ifidx == DHD_BAD_IF) + return; + + dhd->iflist[ifidx]->set_multicast = TRUE; + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx], + DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW); +} + +#ifdef PROP_TXSTATUS +int +dhd_os_wlfc_block(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + ASSERT(di != NULL); + spin_lock_bh(&di->wlfc_spinlock); + return 1; +} + +int +dhd_os_wlfc_unblock(dhd_pub_t *pub) +{ + dhd_info_t *di = (dhd_info_t *)(pub->info); + + ASSERT(di != NULL); + spin_unlock_bh(&di->wlfc_spinlock); + return 1; +} + +#endif /* PROP_TXSTATUS */ + +#if defined(DHD_8021X_DUMP) +void +dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt) +{ + uint8 *dump_data; + uint16 protocol; + char *ifname; + + dump_data = PKTDATA(osh, pkt); + protocol = (dump_data[12] << 8) | dump_data[13]; + ifname = ndev ? ndev->name : "N/A"; + + if (protocol == ETHER_TYPE_802_1X) { + dhd_dump_eapol_4way_message(ifname, dump_data, TRUE); + } +} +#endif /* DHD_8021X_DUMP */ + +/* This routine do not support Packet chain feature, Currently tested for + * proxy arp feature + */ +int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p) +{ + struct sk_buff *skb; + void *skbhead = NULL; + void *skbprev = NULL; + dhd_if_t *ifp; + ASSERT(!PKTISCHAINED(p)); + skb = PKTTONATIVE(dhdp->osh, p); + + ifp = dhdp->info->iflist[ifidx]; + skb->dev = ifp->net; +#if defined(BCM_GMAC3) + /* Forwarder capable interfaces use WOFA based forwarding */ + if (ifp->fwdh) { + struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p); + uint16 * da = (uint16 *)(eh->ether_dhost); + wofa_t wofa; + ASSERT(ISALIGNED(da, 2)); + + wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx); + if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */ + if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) { + return BCME_OK; + } + } + PKTFRMNATIVE(dhdp->osh, p); + PKTFREE(dhdp->osh, p, FALSE); + return BCME_OK; + } +#endif /* BCM_GMAC3 */ + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx(skb); + } else { + if (dhdp->info->rxthread_enabled) { + if (!skbhead) { + skbhead = skb; + } else { + PKTSETNEXT(dhdp->osh, skbprev, skb); + } + skbprev = skb; + } else { + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + ulong flags; + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + } + } + + if (dhdp->info->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + return BCME_OK; +} + +int BCMFASTPATH +__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = BCME_OK; + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh = NULL; +#ifdef DHD_L2_FILTER + dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx); +#endif +#ifdef DHD_8021X_DUMP + struct net_device *ndev; +#endif /* DHD_8021X_DUMP */ + + /* Reject if down */ + if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { + /* free the packet here since the caller won't */ + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + +#ifdef PCIE_FULL_DONGLE + if (dhdp->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTFREE(dhdp->osh, pktbuf, TRUE); + return -EBUSY; + } +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + /* if dhcp_unicast is enabled, we need to convert the */ + /* broadcast DHCP ACK/REPLY packets to Unicast. */ + if (ifp->dhcp_unicast) { + uint8* mac_addr; + uint8* ehptr = NULL; + int ret; + ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr); + if (ret == BCME_OK) { + /* if given mac address having valid entry in sta list + * copy the given mac address, and return with BCME_OK + */ + if (dhd_find_sta(dhdp, ifidx, mac_addr)) { + ehptr = PKTDATA(dhdp->osh, pktbuf); + bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN); + } + } + } + + if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } + + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + } +#endif /* DHD_L2_FILTER */ + /* Update multicast statistic */ + if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); + eh = (struct ether_header *)pktdata; + + if (ETHER_ISMULTI(eh->ether_dhost)) + dhdp->tx_multicast++; + if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) + atomic_inc(&dhd->pend_8021x_cnt); +#ifdef DHD_DHCP_DUMP + if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) { + uint16 dump_hex; + uint16 source_port; + uint16 dest_port; + uint16 udp_port_pos; + uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN]; + uint8 ip_header_len = (*ptr8 & 0x0f)<<2; + struct net_device *net; + char *ifname; + + net = dhd_idx2net(dhdp, ifidx); + ifname = net ? net->name : "N/A"; + udp_port_pos = ETHER_HDR_LEN + ip_header_len; + source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1]; + dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3]; + if (source_port == 0x0044 || dest_port == 0x0044) { + dump_hex = (pktdata[udp_port_pos+249] << 8) | + pktdata[udp_port_pos+250]; + if (dump_hex == 0x0101) { + DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname)); + } else if (dump_hex == 0x0102) { + DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname)); + } else if (dump_hex == 0x0103) { + DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname)); + } else if (dump_hex == 0x0105) { + DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname)); + } else { + DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex)); + } +#ifdef DHD_LOSSLESS_ROAMING + if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) { + DHD_ERROR(("/%d", dhdp->dequeue_prec_map)); + } +#endif /* DHD_LOSSLESS_ROAMING */ + DHD_ERROR(("\n")); + } else if (source_port == 0x0043 || dest_port == 0x0043) { + DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); + } + } +#endif /* DHD_DHCP_DUMP */ + } else { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return BCME_ERROR; + } + + /* Look into the packet and update the packet priority */ +#ifndef PKTPRIO_OVERRIDE + if (PKTPRIO(pktbuf) == 0) +#endif /* !PKTPRIO_OVERRIDE */ + { +#ifdef QOS_MAP_SET + pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE); +#else + pktsetprio(pktbuf, FALSE); +#endif /* QOS_MAP_SET */ + } + + +#ifdef PCIE_FULL_DONGLE + /* + * Lkup the per interface hash table, for a matching flowring. If one is not + * available, allocate a unique flowid and add a flowring entry. + * The found or newly created flowid is placed into the pktbuf's tag. + */ + ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf); + if (ret != BCME_OK) { + PKTCFREE(dhd->pub.osh, pktbuf, TRUE); + return ret; + } +#endif + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_supported(dhdp)) { + /* store the interface ID */ + DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx); + + /* store destination MAC in the tag as well */ + DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost); + + /* decide which FIFO this packet belongs to */ + if (ETHER_ISMULTI(eh->ether_dhost)) + /* one additional queue index (highest AC + 1) is used for bc/mc queue */ + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT); + else + DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); + } else +#endif /* PROP_TXSTATUS */ + { + /* If the protocol uses a data header, apply it */ + dhd_prot_hdrpush(dhdp, ifidx, pktbuf); + } + + /* Use bus module to send data frame */ +#ifdef WLMEDIA_HTSF + dhd_htsf_addtxts(dhdp, pktbuf); +#endif +#if defined(DHD_8021X_DUMP) + ndev = dhd_idx2net(dhdp, ifidx); + dhd_tx_dump(ndev, dhdp->osh, pktbuf); +#endif +#ifdef PROP_TXSTATUS + { + if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata, + dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) { + /* non-proptxstatus way */ +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ + } + } +#else +#ifdef BCMPCIE + ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx); +#else + ret = dhd_bus_txdata(dhdp->bus, pktbuf); +#endif /* BCMPCIE */ +#endif /* PROP_TXSTATUS */ + + return ret; +} + +int BCMFASTPATH +dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) +{ + int ret = 0; + unsigned long flags; + + DHD_GENERAL_LOCK(dhdp, flags); + if (dhdp->busstate == DHD_BUS_DOWN || + dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: returning as busstate=%d\n", + __FUNCTION__, dhdp->busstate)); + DHD_GENERAL_UNLOCK(dhdp, flags); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + return -ENODEV; + } + dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT; + DHD_GENERAL_UNLOCK(dhdp, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) { + DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, TRUE); + ret = -EBUSY; + goto exit; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + ret = __dhd_sendpkt(dhdp, ifidx, pktbuf); + +#ifdef DHD_PCIE_RUNTIMEPM +exit: +#endif + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT; + DHD_GENERAL_UNLOCK(dhdp, flags); + return ret; +} + +int BCMFASTPATH +dhd_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + int ret; + uint datalen; + void *pktbuf; + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp = NULL; + int ifidx; + unsigned long flags; +#ifdef WLMEDIA_HTSF + uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz; +#else + uint8 htsfdlystat_sz = 0; +#endif +#ifdef DHD_WMF + struct ether_header *eh; + uint8 *iph; +#endif /* DHD_WMF */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_PCIE_RUNTIMEPM + if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) { + /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */ + /* stop the network queue temporarily until resume done */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + if (!dhdpcie_is_resume_done(&dhd->pub)) { + dhd_bus_stop_queue(dhd->pub.bus); + } + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + DHD_GENERAL_LOCK(&dhd->pub, flags); +#ifdef PCIE_FULL_DONGLE + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } +#endif /* PCIE_FULL_DONGLE */ + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + + /* Reject if down */ + if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN || + dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", + __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); + netif_stop_queue(net); + /* Send Event when bus down detected during data session */ + if (dhd->pub.up && !dhd->pub.hang_was_sent) { + DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); + dhd->pub.hang_reason = HANG_REASON_BUS_DOWN; + net_os_send_hang_message(net); + } +#ifdef PCIE_FULL_DONGLE + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } + + ifp = DHD_DEV_IFP(net); + ifidx = DHD_DEV_IFIDX(net); + BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb); + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); + netif_stop_queue(net); +#ifdef PCIE_FULL_DONGLE + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return -ENODEV; +#else + return NETDEV_TX_BUSY; +#endif + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + + ASSERT(ifidx == dhd_net2idx(dhd, net)); + ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx]))); + + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + + /* re-align socket buffer if "skb->data" is odd address */ + if (((unsigned long)(skb->data)) & 0x1) { + unsigned char *data = skb->data; + uint32 length = skb->len; + PKTPUSH(dhd->pub.osh, skb, 1); + memmove(skb->data, data, length); + PKTSETLEN(dhd->pub.osh, skb, length); + } + + datalen = PKTLEN(dhd->pub.osh, skb); + + /* Make sure there's enough room for any header */ + if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { + struct sk_buff *skb2; + + DHD_INFO(("%s: insufficient headroom\n", + dhd_ifname(&dhd->pub, ifidx))); + dhd->pub.tx_realloc++; + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); + + dev_kfree_skb(skb); + if ((skb = skb2) == NULL) { + DHD_ERROR(("%s: skb_realloc_headroom failed\n", + dhd_ifname(&dhd->pub, ifidx))); + ret = -ENOMEM; + goto done; + } + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__); + } + + /* Convert to packet */ + if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { + DHD_ERROR(("%s: PKTFRMNATIVE failed\n", + dhd_ifname(&dhd->pub, ifidx))); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__); + dev_kfree_skb_any(skb); + ret = -ENOMEM; + goto done; + } + +#if defined(WLMEDIA_HTSF) + if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) { + uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf); + struct ether_header *eh = (struct ether_header *)pktdata; + + if (!ETHER_ISMULTI(eh->ether_dhost) && + (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) { + eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS); + } + } +#endif + +#ifdef DHD_WMF + eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf); + iph = (uint8 *)eh + ETHER_HDR_LEN; + + /* WMF processing for multicast packets + * Only IPv4 packets are handled + */ + if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) && + (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) || + ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) { +#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) + void *sdu_clone; + bool ucast_convert = FALSE; +#ifdef DHD_UCAST_UPNP + uint32 dest_ip; + + dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET))); + ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip); +#endif /* DHD_UCAST_UPNP */ +#ifdef DHD_IGMP_UCQUERY + ucast_convert |= dhd->pub.wmf_ucast_igmp_query && + (IPV4_PROT(iph) == IP_PROT_IGMP) && + (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY); +#endif /* DHD_IGMP_UCQUERY */ + if (ucast_convert) { + dhd_sta_t *sta; +#ifdef PCIE_FULL_DONGLE + unsigned long flags; +#endif + struct list_head snapshot_list; + struct list_head *wmf_ucforward_list; + + ret = NETDEV_TX_OK; + + /* For non BCM_GMAC3 platform we need a snapshot sta_list to + * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue. + */ + wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list); + + /* Convert upnp/igmp query to unicast for each assoc STA */ + list_for_each_entry(sta, wmf_ucforward_list, list) { + if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) { + ret = WMF_NOP; + break; + } + dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1); + } + DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list); + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + if (ret == NETDEV_TX_OK) + PKTFREE(dhd->pub.osh, pktbuf, TRUE); + + return ret; + } else +#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */ + { + /* There will be no STA info if the packet is coming from LAN host + * Pass as NULL + */ + ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0); + switch (ret) { + case WMF_TAKEN: + case WMF_DROP: + /* Either taken by WMF or we should drop it. + * Exiting send path + */ +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + return NETDEV_TX_OK; + default: + /* Continue the transmit path */ + break; + } + } + } +#endif /* DHD_WMF */ +#ifdef DHD_PSTA + /* PSR related packet proto manipulation should be done in DHD + * since dongle doesn't have complete payload + */ + if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub, + ifidx, &pktbuf, TRUE) < 0)) { + DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__, + dhd_ifname(&dhd->pub, ifidx))); + } +#endif /* DHD_PSTA */ + +#ifdef DHDTCPACK_SUPPRESS + if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) { + /* If this packet has been hold or got freed, just return */ + if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) { + ret = 0; + goto done; + } + } else { + /* If this packet has replaced another packet and got freed, just return */ + if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) { + ret = 0; + goto done; + } + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* segmented SKB support (Kernel-3.18.y) */ + if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) { + PKTSETLINK(skb, NULL); + } + + ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf); + +done: + if (ret) { + ifp->stats.tx_dropped++; + dhd->pub.tx_dropped++; + } else { + +#ifdef PROP_TXSTATUS + /* tx_packets counter can counted only when wlfc is disabled */ + if (!dhd_wlfc_is_supported(&dhd->pub)) +#endif + { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } + } + +#ifdef PCIE_FULL_DONGLE + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX; + dhd_os_busbusy_wake(&dhd->pub); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); +#endif /* PCIE_FULL_DONGLE */ + + DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + BUZZZ_LOG(START_XMIT_END, 0); + + /* Return ok: we always eat the packet */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)) + return 0; +#else + return NETDEV_TX_OK; +#endif +} + + +void +dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) +{ + struct net_device *net; + dhd_info_t *dhd = dhdp->info; + int i; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(dhd); + +#ifdef DHD_LOSSLESS_ROAMING + /* block flowcontrol during roaming */ + if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) { + return; + } +#endif + + if (ifidx == ALL_INTERFACES) { + /* Flow control on all active interfaces */ + dhdp->txoff = state; + for (i = 0; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) { + net = dhd->iflist[i]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } + } else { + if (dhd->iflist[ifidx]) { + net = dhd->iflist[ifidx]->net; + if (state == ON) + netif_stop_queue(net); + else + netif_wake_queue(net); + } + } +} + +#ifdef DHD_RX_DUMP +typedef struct { + uint16 type; + const char *str; +} PKTTYPE_INFO; + +static const PKTTYPE_INFO packet_type_info[] = +{ + { ETHER_TYPE_IP, "IP" }, + { ETHER_TYPE_ARP, "ARP" }, + { ETHER_TYPE_BRCM, "BRCM" }, + { ETHER_TYPE_802_1X, "802.1X" }, + { 0, ""} +}; + +static const char *_get_packet_type_str(uint16 type) +{ + int i; + int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1; + + for (i = 0; i < n; i++) { + if (packet_type_info[i].type == type) + return packet_type_info[i].str; + } + + return packet_type_info[n].str; +} +#endif /* DHD_RX_DUMP */ + + +#ifdef DHD_WMF +bool +dhd_is_rxthread_enabled(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = dhdp->info; + + return dhd->rxthread_enabled; +} +#endif /* DHD_WMF */ + +/** Called when a frame is received by the dongle on interface 'ifidx' */ +void +dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + uchar *eth; + uint len; + void *data, *pnext = NULL; + int i; + dhd_if_t *ifp; + wl_event_msg_t event; + int tout_rx = 0; + int tout_ctrl = 0; + void *skbhead = NULL; + void *skbprev = NULL; +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) + char *dump_data; + uint16 protocol; + char *ifname; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { + struct ether_header *eh; + + pnext = PKTNEXT(dhdp->osh, pktbuf); + PKTSETNEXT(dhdp->osh, pktbuf, NULL); + + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) { + DHD_ERROR(("%s: ifp is NULL. drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + + /* Dropping only data packets before registering net device to avoid kernel panic */ +#ifndef PROP_TXSTATUS_VSDB + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) { +#else + if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) && + (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM)) { +#endif /* PROP_TXSTATUS_VSDB */ + DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + + +#ifdef PROP_TXSTATUS + if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) { + /* WLFC may send header only packet when + there is an urgent message but no packet to + piggy-back on + */ + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } +#endif +#ifdef DHD_L2_FILTER + /* If block_ping is enabled drop the ping packet */ + if (ifp->block_ping) { + if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) { + if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + } + } + if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) { + int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE); + + /* Drop the packets if l2 filter has processed it already + * otherwise continue with the normal path + */ + if (ret == BCME_OK) { + PKTCFREE(dhdp->osh, pktbuf, TRUE); + continue; + } + } +#endif /* DHD_L2_FILTER */ +#ifdef DHD_WMF + /* WMF processing for multicast packets */ + if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) { + dhd_sta_t *sta; + int ret; + + sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost); + ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1); + switch (ret) { + case WMF_TAKEN: + /* The packet is taken by WMF. Continue to next iteration */ + continue; + case WMF_DROP: + /* Packet DROP decision by WMF. Toss it */ + DHD_ERROR(("%s: WMF decides to drop packet\n", + __FUNCTION__)); + PKTCFREE(dhdp->osh, pktbuf, FALSE); + continue; + default: + /* Continue the transmit path */ + break; + } + } +#endif /* DHD_WMF */ + +#ifdef DHDTCPACK_SUPPRESS + dhd_tcpdata_info_get(dhdp, pktbuf); +#endif + skb = PKTTONATIVE(dhdp->osh, pktbuf); + + ASSERT(ifp); + skb->dev = ifp->net; + +#ifdef DHD_PSTA + if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) { + DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__, + dhd_ifname(dhdp, ifidx))); + } +#endif /* DHD_PSTA */ + +#ifdef PCIE_FULL_DONGLE + if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) && + (!ifp->ap_isolate)) { + eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf); + if (ETHER_ISUCAST(eh->ether_dhost)) { + if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) { + dhd_sendpkt(dhdp, ifidx, pktbuf); + continue; + } + } else { + void *npktbuf = PKTDUP(dhdp->osh, pktbuf); + if (npktbuf) + dhd_sendpkt(dhdp, ifidx, npktbuf); + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* Get the protocol, maintain skb around eth_type_trans() + * The main reason for this hack is for the limitation of + * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' + * to perform skb_pull inside vs ETH_HLEN. Since to avoid + * coping of the packet coming from the network stack to add + * BDC, Hardware header etc, during network interface registration + * we set the 'net->hard_header_len' to ETH_HLEN + extra space required + * for BDC, Hardware header etc. and not just the ETH_HLEN + */ + eth = skb->data; + len = skb->len; + +#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP) + dump_data = skb->data; + protocol = (dump_data[12] << 8) | dump_data[13]; + ifname = skb->dev ? skb->dev->name : "N/A"; +#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */ +#ifdef DHD_8021X_DUMP + if (protocol == ETHER_TYPE_802_1X) { + dhd_dump_eapol_4way_message(ifname, dump_data, FALSE); + } +#endif /* DHD_8021X_DUMP */ +#ifdef DHD_DHCP_DUMP + if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) { + uint16 dump_hex; + uint16 source_port; + uint16 dest_port; + uint16 udp_port_pos; + uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN]; + uint8 ip_header_len = (*ptr8 & 0x0f)<<2; + + udp_port_pos = ETHER_HDR_LEN + ip_header_len; + source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1]; + dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3]; + if (source_port == 0x0044 || dest_port == 0x0044) { + dump_hex = (dump_data[udp_port_pos+249] << 8) | + dump_data[udp_port_pos+250]; + if (dump_hex == 0x0101) { + DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname)); + } else if (dump_hex == 0x0102) { + DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname)); + } else if (dump_hex == 0x0103) { + DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname)); + } else if (dump_hex == 0x0105) { + DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname)); + } else { + DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex)); + } + } else if (source_port == 0x0043 || dest_port == 0x0043) { + DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname)); + } + } +#endif /* DHD_DHCP_DUMP */ +#if defined(DHD_RX_DUMP) + DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol))); + if (protocol != ETHER_TYPE_BRCM) { + if (dump_data[0] == 0xFF) { + DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__)); + + if ((dump_data[12] == 8) && + (dump_data[13] == 6)) { + DHD_ERROR(("%s: ARP %d\n", + __FUNCTION__, dump_data[0x15])); + } + } else if (dump_data[0] & 1) { + DHD_ERROR(("%s: MULTICAST: " MACDBG "\n", + __FUNCTION__, MAC2STRDBG(dump_data))); + } +#ifdef DHD_RX_FULL_DUMP + { + int k; + for (k = 0; k < skb->len; k++) { + DHD_ERROR(("%02X ", dump_data[k])); + if ((k & 15) == 15) + DHD_ERROR(("\n")); + } + DHD_ERROR(("\n")); + } +#endif /* DHD_RX_FULL_DUMP */ + } +#endif /* DHD_RX_DUMP */ + + skb->protocol = eth_type_trans(skb, skb->dev); + + if (skb->pkt_type == PACKET_MULTICAST) { + dhd->pub.rx_multicast++; + ifp->stats.multicast++; + } + + skb->data = eth; + skb->len = len; + +#ifdef WLMEDIA_HTSF + dhd_htsf_addrxts(dhdp, pktbuf); +#endif + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + /* Process special event packets and then discard them */ + memset(&event, 0, sizeof(event)); + if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { + dhd_wl_host_event(dhd, &ifidx, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + skb_mac_header(skb), +#else + skb->mac.raw, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */ + len - 2, + &event, + &data); + + wl_event_to_host_order(&event); + if (!tout_ctrl) + tout_ctrl = DHD_PACKET_TIMEOUT_MS; + +#if defined(PNO_SUPPORT) + if (event.event_type == WLC_E_PFN_NET_FOUND) { + /* enforce custom wake lock to garantee that Kernel not suspended */ + tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS; + } +#endif /* PNO_SUPPORT */ + +#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE); +#else + PKTFREE(dhdp->osh, pktbuf, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + continue; +#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */ + } else { + tout_rx = DHD_PACKET_TIMEOUT_MS; + +#ifdef PROP_TXSTATUS + dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb)); +#endif /* PROP_TXSTATUS */ + } + + ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; + + if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) { + dhdp->dstats.rx_bytes += skb->len; + dhdp->rx_packets++; /* Local count */ + ifp->stats.rx_bytes += skb->len; + ifp->stats.rx_packets++; + } + + if (in_interrupt()) { + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#if defined(DHD_LB) && defined(DHD_LB_RXP) + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + } else { + if (dhd->rxthread_enabled) { + if (!skbhead) + skbhead = skb; + else + PKTSETNEXT(dhdp->osh, skbprev, skb); + skbprev = skb; + } else { + + /* If the receive is not processed inside an ISR, + * the softirqd must be woken explicitly to service + * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled + * by netif_rx_ni(), but in earlier kernels, we need + * to do it manually. + */ + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + +#if defined(DHD_LB) && defined(DHD_LB_RXP) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_receive_skb(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx_ni(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); +#else + ulong flags; + DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + netif_rx(skb); + DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT)); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ +#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */ + } + } + } + + if (dhd->rxthread_enabled && skbhead) + dhd_sched_rxf(dhdp, skbhead); + + DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); + DHD_OS_WAKE_LOCK_TIMEOUT(dhdp); +} + +void +dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) +{ + /* Linux version has nothing to do */ + return; +} + +void +dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct ether_header *eh; + uint16 type; + + dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL); + + eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); + type = ntoh16(eh->ether_type); + + if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) + atomic_dec(&dhd->pend_8021x_cnt); + +#ifdef PROP_TXSTATUS + if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) { + dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))]; + uint datalen = PKTLEN(dhd->pub.osh, txp); + if (ifp != NULL) { + if (success) { + dhd->pub.tx_packets++; + ifp->stats.tx_packets++; + ifp->stats.tx_bytes += datalen; + } else { + ifp->stats.tx_dropped++; + } + } + } +#endif +} + +static struct net_device_stats * +dhd_get_stats(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); + + memset(&net->stats, 0, sizeof(net->stats)); + return &net->stats; + } + + ifp = dhd->iflist[ifidx]; + ASSERT(dhd && ifp); + + if (dhd->pub.up) { + /* Use the protocol to get dongle stats */ + dhd_prot_dstats(&dhd->pub); + } + return &ifp->stats; +} + +static int +dhd_watchdog_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_watchdog_prio > 0) { + struct sched_param param; + param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? + dhd_watchdog_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; + + DHD_OS_WD_WAKE_LOCK(&dhd->pub); + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + dhd_bus_watchdog(&dhd->pub); + + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) { + mod_timer(&dhd->timer, + jiffies + + msecs_to_jiffies(dhd_watchdog_ms) - + min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void dhd_watchdog(struct timer_list *t) +#else +static void dhd_watchdog(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + dhd_info_t *dhd = from_timer(dhd, t, timer); +#else + dhd_info_t *dhd = (dhd_info_t *)data; +#endif + unsigned long flags; + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->pub.busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__)); + return; + } + + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + up(&dhd->thr_wdt_ctl.sema); + return; + } + + DHD_OS_WD_WAKE_LOCK(&dhd->pub); + /* Call the bus module watchdog */ + dhd_bus_watchdog(&dhd->pub); + DHD_GENERAL_LOCK(&dhd->pub, flags); + /* Count the tick for reference */ + dhd->pub.tickcnt++; + +#ifdef DHD_L2_FILTER + dhd_l2_filter_watchdog(&dhd->pub); +#endif /* DHD_L2_FILTER */ + /* Reschedule the watchdog */ + if (dhd->wd_timer_valid) + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); +} + +#ifdef DHD_PCIE_RUNTIMEPM +static int +dhd_rpm_state_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + while (1) { + if (down_interruptible (&tsk->sema) == 0) { + unsigned long flags; + unsigned long jiffies_at_start = jiffies; + unsigned long time_lapse; + + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + if (dhd->pub.dongle_reset == FALSE) { + DHD_TIMER(("%s:\n", __FUNCTION__)); + if (dhd->pub.up) { + dhd_runtimepm_state(&dhd->pub); + } + + DHD_GENERAL_LOCK(&dhd->pub, flags); + time_lapse = jiffies - jiffies_at_start; + + /* Reschedule the watchdog */ + if (dhd->rpm_timer_valid) { + mod_timer(&dhd->rpm_timer, + jiffies + + msecs_to_jiffies(dhd_runtimepm_ms) - + min(msecs_to_jiffies(dhd_runtimepm_ms), + time_lapse)); + } + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + } + } else { + break; + } + } + + complete_and_exit(&tsk->completed, 0); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void dhd_runtimepm(struct timer_list *t) +#else +static void dhd_runtimepm(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + dhd_info_t *dhd = from_timer(dhd, t, rpm_timer);; +#else + dhd_info_t *dhd = (dhd_info_t *)data; +#endif + + if (dhd->pub.dongle_reset) { + return; + } + + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + up(&dhd->thr_rpm_ctl.sema); + return; + } +} + +void dhd_runtime_pm_disable(dhd_pub_t *dhdp) +{ + dhd_os_runtimepm_timer(dhdp, 0); + dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0)); + DHD_ERROR(("DHD Runtime PM Disabled \n")); +} + +void dhd_runtime_pm_enable(dhd_pub_t *dhdp) +{ + dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); + DHD_ERROR(("DHD Runtime PM Enabled \n")); +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + + +#ifdef ENABLE_ADAPTIVE_SCHED +static void +dhd_sched_policy(int prio) +{ + struct sched_param param; + if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) { + param.sched_priority = 0; + setScheduler(current, SCHED_NORMAL, ¶m); + } else { + if (get_scheduler_policy(current) != SCHED_FIFO) { + param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + } +} +#endif /* ENABLE_ADAPTIVE_SCHED */ +#ifdef DEBUG_CPU_FREQ +static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) +{ + dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans); + struct cpufreq_freqs *freq = data; + if (dhd) { + if (!dhd->new_freq) + goto exit; + if (val == CPUFREQ_POSTCHANGE) { + DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n", + freq->new, freq->cpu)); + *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new; + } + } +exit: + return 0; +} +#endif /* DEBUG_CPU_FREQ */ +static int +dhd_dpc_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_dpc_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + +#ifdef CUSTOM_DPC_CPUCORE + set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE)); +#endif +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_dpc = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (!binary_sema_down(tsk)) { +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_dpc_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { +#ifdef DEBUG_DPC_THREAD_WATCHDOG + int resched_cnt = 0; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + dhd_os_wd_timer_extend(&dhd->pub, TRUE); + while (dhd_bus_dpc(dhd->pub.bus)) { + /* process all data */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + resched_cnt++; + if (resched_cnt > MAX_RESCHED_CNT) { + DHD_INFO(("%s Calling msleep to" + "let other processes run. \n", + __FUNCTION__)); + dhd->pub.dhd_bug_on = true; + resched_cnt = 0; + OSL_SLEEP(1); + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + dhd_os_wd_timer_extend(&dhd->pub, FALSE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } else { + if (dhd->pub.up) + dhd_bus_stop(dhd->pub.bus, TRUE); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + } + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +static int +dhd_rxf_thread(void *data) +{ + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + dhd_info_t *dhd = (dhd_info_t *)tsk->parent; +#if defined(WAIT_DEQUEUE) +#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */ + ulong watchdogTime = OSL_SYSUPTIME(); /* msec */ +#endif + dhd_pub_t *pub = &dhd->pub; + + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + if (dhd_rxf_prio > 0) + { + struct sched_param param; + param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1); + setScheduler(current, SCHED_FIFO, ¶m); + } + + DAEMONIZE("dhd_rxf"); + /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */ + + /* signal: thread has started */ + complete(&tsk->completed); +#ifdef CUSTOM_SET_CPUCORE + dhd->pub.current_rxf = current; +#endif /* CUSTOM_SET_CPUCORE */ + /* Run until signal received */ + while (1) { + if (down_interruptible(&tsk->sema) == 0) { + void *skb; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) + ulong flags; +#endif +#ifdef ENABLE_ADAPTIVE_SCHED + dhd_sched_policy(dhd_rxf_prio); +#endif /* ENABLE_ADAPTIVE_SCHED */ + + SMP_RD_BARRIER_DEPENDS(); + + if (tsk->terminated) { + break; + } + skb = dhd_rxf_dequeue(pub); + + if (skb == NULL) { + continue; + } + while (skb) { + void *skbnext = PKTNEXT(pub->osh, skb); + PKTSETNEXT(pub->osh, skb, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) + netif_rx_ni(skb); +#else + netif_rx(skb); + local_irq_save(flags); + RAISE_RX_SOFTIRQ(); + local_irq_restore(flags); + +#endif + skb = skbnext; + } +#if defined(WAIT_DEQUEUE) + if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) { + OSL_SLEEP(1); + watchdogTime = OSL_SYSUPTIME(); + } +#endif + + DHD_OS_WAKE_UNLOCK(pub); + } else { + break; + } + } + complete_and_exit(&tsk->completed, 0); +} + +#ifdef BCMPCIE +void dhd_dpc_enable(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp || !dhdp->info) + return; + dhd = dhdp->info; + +#ifdef DHD_LB +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#ifdef DHD_LB_TXC + if (atomic_read(&dhd->tx_compl_tasklet.count) == 1) + tasklet_enable(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#ifdef DHD_LB_RXC + if (atomic_read(&dhd->rx_compl_tasklet.count) == 1) + tasklet_enable(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + if (atomic_read(&dhd->tasklet.count) == 1) + tasklet_enable(&dhd->tasklet); +} +#endif /* BCMPCIE */ + + +#ifdef BCMPCIE +void +dhd_dpc_kill(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + if (!dhdp) { + return; + } + + dhd = dhdp->info; + + if (!dhd) { + return; + } + + if (dhd->thr_dpc_ctl.thr_pid < 0) { + tasklet_disable(&dhd->tasklet); + tasklet_kill(&dhd->tasklet); + DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__)); + } +#if defined(DHD_LB) +#ifdef DHD_LB_RXP + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_disable(&dhd->tx_compl_tasklet); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_disable(&dhd->rx_compl_tasklet); + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ +} +#endif /* BCMPCIE */ + +static void +dhd_dpc(ulong data) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)data; + + /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c] + * down below , wake lock is set, + * the tasklet is initialized in dhd_attach() + */ + /* Call bus dpc unless it indicated down (then clean stop) */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + if (dhd_bus_dpc(dhd->pub.bus)) { + DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt); + tasklet_schedule(&dhd->tasklet); + } + } else { + dhd_bus_stop(dhd->pub.bus, TRUE); + } +} + +void +dhd_sched_dpc(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + DHD_OS_WAKE_LOCK(dhdp); + /* If the semaphore does not get up, + * wake unlock should be done here + */ + if (!binary_sema_up(&dhd->thr_dpc_ctl)) { + DHD_OS_WAKE_UNLOCK(dhdp); + } + return; + } else { + tasklet_schedule(&dhd->tasklet); + } +} + +static void +dhd_sched_rxf(dhd_pub_t *dhdp, void *skb) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; +#ifdef RXF_DEQUEUE_ON_BUSY + int ret = BCME_OK; + int retry = 2; +#endif /* RXF_DEQUEUE_ON_BUSY */ + + DHD_OS_WAKE_LOCK(dhdp); + + DHD_TRACE(("dhd_sched_rxf: Enter\n")); +#ifdef RXF_DEQUEUE_ON_BUSY + do { + ret = dhd_rxf_enqueue(dhdp, skb); + if (ret == BCME_OK || ret == BCME_ERROR) + break; + else + OSL_SLEEP(50); /* waiting for dequeueing */ + } while (retry-- > 0); + + if (retry <= 0 && ret == BCME_BUSY) { + void *skbp = skb; + + while (skbp) { + void *skbnext = PKTNEXT(dhdp->osh, skbp); + PKTSETNEXT(dhdp->osh, skbp, NULL); + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + netif_rx_ni(skbp); + skbp = skbnext; + } + DHD_ERROR(("send skb to kernel backlog without rxf_thread\n")); + } else { + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } + } +#else /* RXF_DEQUEUE_ON_BUSY */ + do { + if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK) + break; + } while (1); + if (dhd->thr_rxf_ctl.thr_pid >= 0) { + up(&dhd->thr_rxf_ctl.sema); + } + return; +#endif /* RXF_DEQUEUE_ON_BUSY */ +} + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef TOE +/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ +static int +dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strncpy(buf, "toe_ol", sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + /* Check for older dongle image that doesn't support toe_ol */ + if (ret == -EIO) { + DHD_ERROR(("%s: toe not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + + DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + memcpy(toe_ol, buf, sizeof(uint32)); + return 0; +} + +/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ +static int +dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) +{ + wl_ioctl_t ioc; + char buf[32]; + int toe, ret; + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = WLC_SET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = TRUE; + + /* Set toe_ol as requested */ + + strncpy(buf, "toe_ol", sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32)); + + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", + dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + /* Enable toe globally only if any components are enabled. */ + + toe = (toe_ol != 0); + + strcpy(buf, "toe"); + memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32)); + + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); + return ret; + } + + return 0; +} +#endif /* TOE */ + +#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE) +void dhd_set_scb_probe(dhd_pub_t *dhd) +{ + int ret = 0; + wl_scb_probe_t scb_probe; + char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)]; + + memset(&scb_probe, 0, sizeof(wl_scb_probe_t)); + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + return; + } + + bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf)); + + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__)); + } + + memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t)); + + scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE; + + bcm_mkiovar("scb_probe", (char *)&scb_probe, + sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__)); + return; + } +} +#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void +dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + + snprintf(info->driver, sizeof(info->driver), "wl"); + snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version); +} + +struct ethtool_ops dhd_ethtool_ops = { + .get_drvinfo = dhd_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) +static int +dhd_ethtool(dhd_info_t *dhd, void *uaddr) +{ + struct ethtool_drvinfo info; + char drvname[sizeof(info.driver)]; + uint32 cmd; +#ifdef TOE + struct ethtool_value edata; + uint32 toe_cmpnt, csum_dir; + int ret; +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* all ethtool calls start with a cmd word */ + if (copy_from_user(&cmd, uaddr, sizeof (uint32))) + return -EFAULT; + + switch (cmd) { + case ETHTOOL_GDRVINFO: + /* Copy out any request driver name */ + if (copy_from_user(&info, uaddr, sizeof(info))) + return -EFAULT; + strncpy(drvname, info.driver, sizeof(info.driver)); + drvname[sizeof(info.driver)-1] = '\0'; + + /* clear struct for return */ + memset(&info, 0, sizeof(info)); + info.cmd = cmd; + + /* if dhd requested, identify ourselves */ + if (strcmp(drvname, "?dhd") == 0) { + snprintf(info.driver, sizeof(info.driver), "dhd"); + strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1); + info.version[sizeof(info.version) - 1] = '\0'; + } + + /* otherwise, require dongle to be up */ + else if (!dhd->pub.up) { + DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); + return -ENODEV; + } + + /* finally, report dongle driver type */ + else if (dhd->pub.iswl) + snprintf(info.driver, sizeof(info.driver), "wl"); + else + snprintf(info.driver, sizeof(info.driver), "xx"); + + snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version); + if (copy_to_user(uaddr, &info, sizeof(info))) + return -EFAULT; + DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, + (int)sizeof(drvname), drvname, info.driver)); + break; + +#ifdef TOE + /* Get toe offload components from dongle */ + case ETHTOOL_GRXCSUM: + case ETHTOOL_GTXCSUM: + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + edata.cmd = cmd; + edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; + + if (copy_to_user(uaddr, &edata, sizeof(edata))) + return -EFAULT; + break; + + /* Set toe offload components in dongle */ + case ETHTOOL_SRXCSUM: + case ETHTOOL_STXCSUM: + if (copy_from_user(&edata, uaddr, sizeof(edata))) + return -EFAULT; + + /* Read the current settings, update and write back */ + if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) + return ret; + + csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; + + if (edata.data != 0) + toe_cmpnt |= csum_dir; + else + toe_cmpnt &= ~csum_dir; + + if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) + return ret; + + /* If setting TX checksum mode, tell Linux the new mode */ + if (cmd == ETHTOOL_STXCSUM) { + if (edata.data) + dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; + else + dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; + } + + break; +#endif /* TOE */ + + default: + return -EOPNOTSUPP; + } + + return 0; +} +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + +static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) +{ + dhd_info_t *dhd; + + if (!dhdp) { + DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__)); + return FALSE; + } + + if (!dhdp->up) + return FALSE; + + dhd = (dhd_info_t *)dhdp->info; +#if !defined(BCMPCIE) + if (dhd->thr_dpc_ctl.thr_pid < 0) { + DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__)); + return FALSE; + } +#endif + + if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) || + ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) { +#ifdef BCMPCIE + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n", + __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, + dhdp->d3ackcnt_timeout, error, dhdp->busstate)); +#else + DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, + dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); +#endif /* BCMPCIE */ + if (dhdp->hang_reason == 0) { + if (dhdp->dongle_trap_occured) { + dhdp->hang_reason = HANG_REASON_DONGLE_TRAP; +#ifdef BCMPCIE + } else if (dhdp->d3ackcnt_timeout) { + dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT; +#endif /* BCMPCIE */ + } else { + dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT; + } + } + net_os_send_hang_message(net); + return TRUE; + } + return FALSE; +} + +int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf) +{ + int bcmerror = BCME_OK; + int buflen = 0; + struct net_device *net; + + net = dhd_idx2net(pub, ifidx); + if (!net) { + bcmerror = BCME_BADARG; + goto done; + } + + if (data_buf) + buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN); + + /* check for local dhd ioctl and handle it */ + if (ioc->driver == DHD_IOCTL_MAGIC) { + bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen); + if (bcmerror) + pub->bcmerror = bcmerror; + goto done; + } + + /* send to dongle (must be up, and wl). */ + if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) { + if (allow_delay_fwdl) { + int ret = dhd_bus_start(pub); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } else { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + } + + if (!pub->iswl) { + bcmerror = BCME_DONGLE_DOWN; + goto done; + } + + /* + * Flush the TX queue if required for proper message serialization: + * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to + * prevent M4 encryption and + * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to + * prevent disassoc frame being sent before WPS-DONE frame. + */ + if (ioc->cmd == WLC_SET_KEY || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("wsec_key", data_buf, 9) == 0) || + (ioc->cmd == WLC_SET_VAR && data_buf != NULL && + strncmp("bsscfg:wsec_key", data_buf, 15) == 0) || + ioc->cmd == WLC_DISASSOC) + dhd_wait_pend8021x(net); + +#ifdef WLMEDIA_HTSF + if (data_buf) { + /* short cut wl ioctl calls here */ + if (strcmp("htsf", data_buf) == 0) { + dhd_ioctl_htsf_get(dhd, 0); + return BCME_OK; + } + + if (strcmp("htsflate", data_buf) == 0) { + if (ioc->set) { + memset(ts, 0, sizeof(tstamp_t)*TSMAX); + memset(&maxdelayts, 0, sizeof(tstamp_t)); + maxdelay = 0; + tspktcnt = 0; + maxdelaypktno = 0; + memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); + } else { + dhd_dump_latency(); + } + return BCME_OK; + } + if (strcmp("htsfclear", data_buf) == 0) { + memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); + memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); + htsf_seqnum = 0; + return BCME_OK; + } + if (strcmp("htsfhis", data_buf) == 0) { + dhd_dump_htsfhisto(&vi_d1, "H to D"); + dhd_dump_htsfhisto(&vi_d2, "D to D"); + dhd_dump_htsfhisto(&vi_d3, "D to H"); + dhd_dump_htsfhisto(&vi_d4, "H to H"); + return BCME_OK; + } + if (strcmp("tsport", data_buf) == 0) { + if (ioc->set) { + memcpy(&tsport, data_buf + 7, 4); + } else { + DHD_ERROR(("current timestamp port: %d \n", tsport)); + } + return BCME_OK; + } + } +#endif /* WLMEDIA_HTSF */ + + if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) && + data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) { +#ifdef BCM_FD_AGGR + bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); +#else + bcmerror = BCME_UNSUPPORTED; +#endif + goto done; + } + +#ifdef DHD_DEBUG + if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) { + if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) { + /* Print IOVAR Information */ + DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n", + __FUNCTION__, (char *)data_buf, ioc->set)); + if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { + prhex(NULL, data_buf + strlen(data_buf) + 1, + buflen - strlen(data_buf) - 1); + } + } else { + /* Print IOCTL Information */ + DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n", + __FUNCTION__, ioc->cmd, ioc->set)); + if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) { + prhex(NULL, data_buf, buflen); + } + } + } +#endif /* DHD_DEBUG */ + + bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen); + +done: + dhd_check_hang(net, pub, bcmerror); + + return bcmerror; +} + +static int +dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_ioctl_t ioc; + int ifidx; + int ret; + void *local_buf = NULL; + u16 buflen = 0; + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + /* Interface up check for built-in type */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) { + DHD_TRACE(("%s: Interface is down \n", __FUNCTION__)); + ret = BCME_NOTUP; + goto exit; + } + + /* send to dongle only if we are not waiting for reload already */ + if (dhd->pub.hang_was_sent) { + DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); + ret = BCME_DONGLE_DOWN; + goto exit; + } + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); + + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + +#if defined(WL_WIRELESS_EXT) + /* linux wireless extensions */ + if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { + /* may recurse, do NOT lock */ + ret = wl_iw_ioctl(net, ifr, cmd); + goto exit; + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) + if (cmd == SIOCETHTOOL) { + ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); + goto exit; + } +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ + + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(net, ifr, cmd); + dhd_check_hang(net, &dhd->pub, ret); + goto exit; + } + + if (cmd != SIOCDEVPRIVATE) { + ret = -EOPNOTSUPP; + goto exit; + } + + memset(&ioc, 0, sizeof(ioc)); + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + compat_wl_ioctl_t compat_ioc; + if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) { + ret = BCME_BADADDR; + goto done; + } + ioc.cmd = compat_ioc.cmd; + ioc.buf = compat_ptr(compat_ioc.buf); + ioc.len = compat_ioc.len; + ioc.set = compat_ioc.set; + ioc.used = compat_ioc.used; + ioc.needed = compat_ioc.needed; + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t), + sizeof(uint)) != 0)) { + ret = BCME_BADADDR; + goto done; + } + } else +#endif /* CONFIG_COMPAT */ + { + /* Copy the ioc control structure part of ioctl request */ + if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { + ret = BCME_BADADDR; + goto done; + } + + /* To differentiate between wl and dhd read 4 more byes */ + if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), + sizeof(uint)) != 0)) { + ret = BCME_BADADDR; + goto done; + } + } + + if (!capable(CAP_NET_ADMIN)) { + ret = BCME_EPERM; + goto done; + } + + if (ioc.len > 0) { + buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); + if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) { + ret = BCME_NOMEM; + goto done; + } + + DHD_PERIM_UNLOCK(&dhd->pub); + if (copy_from_user(local_buf, ioc.buf, buflen)) { + DHD_PERIM_LOCK(&dhd->pub); + ret = BCME_BADADDR; + goto done; + } + DHD_PERIM_LOCK(&dhd->pub); + + *(char *)(local_buf + buflen) = '\0'; + } + + ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf); + + if (!ret && buflen && local_buf && ioc.buf) { + DHD_PERIM_UNLOCK(&dhd->pub); + if (copy_to_user(ioc.buf, local_buf, buflen)) + ret = -EFAULT; + DHD_PERIM_LOCK(&dhd->pub); + } + +done: + if (local_buf) + MFREE(dhd->pub.osh, local_buf, buflen+1); + +exit: + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return OSL_ERROR(ret); +} + + +#ifdef FIX_CPU_MIN_CLOCK +static int dhd_init_cpufreq_fix(dhd_info_t *dhd) +{ + if (dhd) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->cpufreq_fix); +#endif + dhd->cpufreq_fix_status = FALSE; + } + return 0; +} + +static void dhd_fix_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd->cpufreq_fix); +#endif + if (dhd && !dhd->cpufreq_fix_status) { + pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = TRUE; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif +} + +static void dhd_rollback_cpu_freq(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_lock(&dhd ->cpufreq_fix); +#endif + if (dhd && dhd->cpufreq_fix_status != TRUE) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif + return; + } + + pm_qos_remove_request(&dhd->dhd_cpu_qos); +#ifdef FIX_BUS_MIN_CLOCK + pm_qos_remove_request(&dhd->dhd_bus_qos); +#endif /* FIX_BUS_MIN_CLOCK */ + DHD_ERROR(("pm_qos_add_requests called\n")); + + dhd->cpufreq_fix_status = FALSE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_unlock(&dhd->cpufreq_fix); +#endif +} +#endif /* FIX_CPU_MIN_CLOCK */ + +static int +dhd_stop(struct net_device *net) +{ + int ifidx = 0; + dhd_info_t *dhd = DHD_DEV_INFO(net); + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net)); + dhd->pub.rxcnt_timeout = 0; + dhd->pub.txcnt_timeout = 0; + +#ifdef BCMPCIE + dhd->pub.d3ackcnt_timeout = 0; +#endif /* BCMPCIE */ + + if (dhd->pub.up == 0) { + goto exit; + } + + dhd_if_flush_sta(DHD_DEV_IFP(net)); + + /* Disable Runtime PM before interface down */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) + dhd_rollback_cpu_freq(dhd); +#endif /* FIX_CPU_MIN_CLOCK */ + + ifidx = dhd_net2idx(dhd, net); + BCM_REFERENCE(ifidx); + + /* Set state and stop OS transmissions */ + netif_stop_queue(net); + dhd->pub.up = 0; + +#ifdef WL_CFG80211 + if (ifidx == 0) { + dhd_if_t *ifp; + wl_cfg80211_down(NULL); + + ifp = dhd->iflist[0]; + ASSERT(ifp && ifp->net); + /* + * For CFG80211: Clean up all the left over virtual interfaces + * when the primary Interface is brought down. [ifconfig wlan0 down] + */ + if (!dhd_download_fw_on_driverload) { + if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && + (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { + int i; + +#ifdef WL_CFG80211_P2P_DEV_IF + wl_cfg80211_del_p2p_wdev(); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) + dhd_remove_if(&dhd->pub, i, FALSE); + + if (ifp && ifp->net) { + dhd_if_del_sta_list(ifp); + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd_net_if_unlock_local(dhd); + } + cancel_work_sync(dhd->dhd_deferred_wq); +#if defined(DHD_LB) && defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB && DHD_LB_RXP */ + } + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + if (ifp->net == dhd->rx_napi_netdev) { + DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + skb_queue_purge(&dhd->rx_napi_queue); + napi_disable(&dhd->rx_napi_struct); + netif_napi_del(&dhd->rx_napi_struct); + dhd->rx_napi_netdev = NULL; + } +#endif /* DHD_LB && DHD_LB_RXP */ + + } +#endif /* WL_CFG80211 */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_cleanup(&dhd->pub, NULL, 0); +#endif + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + OLD_MOD_DEC_USE_COUNT; +exit: +#if defined(WL_CFG80211) + if (ifidx == 0 && !dhd_download_fw_on_driverload) + wl_android_wifi_off(net, TRUE); +#endif + dhd->pub.hang_was_sent = 0; + + /* Clear country spec for for built-in type driver */ + if (!dhd_download_fw_on_driverload) { + dhd->pub.dhd_cspec.country_abbrev[0] = 0x00; + dhd->pub.dhd_cspec.rev = 0; + dhd->pub.dhd_cspec.ccode[0] = 0x00; + } + +#ifdef BCMDBGFS + dhd_dbg_remove(); +#endif + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + /* Destroy wakelock */ + if (!dhd_download_fw_on_driverload && + (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_DESTROY(dhd); + dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + + return 0; +} + +#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME) +extern bool g_first_broadcast_scan; +#endif + +#ifdef WL11U +static int dhd_interworking_enable(dhd_pub_t *dhd) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + uint32 enable = true; + int ret = BCME_OK; + + bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret)); + } + + if (ret == BCME_OK) { + /* basic capabilities for HS20 REL2 */ + uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF; + bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret)); + } + } + + return ret; +} +#endif /* WL11u */ + +static int +dhd_open(struct net_device *net) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); +#ifdef TOE + uint32 toe_ol; +#endif +#ifdef BCM_FD_AGGR + char iovbuf[WLC_IOCTL_SMLEN]; + dbus_config_t config; + uint32 agglimit = 0; + uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */ +#endif /* BCM_FD_AGGR */ + int ifidx; + int32 ret = 0; + + if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) { + DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__)); + return -1; + } + + /* Init wakelock */ + if (!dhd_download_fw_on_driverload && + !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) { + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + } + +#ifdef PREVENT_REOPEN_DURING_HANG + /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */ + if (dhd->pub.hang_was_sent == 1) { + DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); + /* Force to bring down WLAN interface in case dhd_stop() is not called + * from the upper layer when HANG event is triggered. + */ + if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) { + DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__)); + dhd_stop(net); + } else { + return -1; + } + } +#endif /* PREVENT_REOPEN_DURING_HANG */ + + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + dhd->pub.dongle_trap_occured = 0; + dhd->pub.hang_was_sent = 0; + dhd->pub.hang_reason = 0; +#ifdef DHD_LOSSLESS_ROAMING + dhd->pub.dequeue_prec_map = ALLPRIO; +#endif +#if !defined(WL_CFG80211) + /* + * Force start if ifconfig_up gets called before START command + * We keep WEXT's wl_control_wl_start to provide backward compatibility + * This should be removed in the future + */ + ret = wl_control_wl_start(net); + if (ret != 0) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + +#endif + + ifidx = dhd_net2idx(dhd, net); + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + if (ifidx < 0) { + DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (!dhd->iflist[ifidx]) { + DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); + ret = -1; + goto exit; + } + + if (ifidx == 0) { + atomic_set(&dhd->pend_8021x_cnt, 0); +#if defined(WL_CFG80211) + if (!dhd_download_fw_on_driverload) { + DHD_ERROR(("\n%s\n", dhd_version)); +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + g_first_broadcast_scan = TRUE; +#endif + ret = wl_android_wifi_on(net); + if (ret != 0) { + DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n", + __FUNCTION__, ret)); + ret = -1; + goto exit; + } + } +#ifdef FIX_CPU_MIN_CLOCK + if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) { + dhd_init_cpufreq_fix(dhd); + dhd_fix_cpu_freq(dhd); + } +#endif /* FIX_CPU_MIN_CLOCK */ +#endif + + if (dhd->pub.busstate != DHD_BUS_DATA) { + + /* try to bring up bus */ + DHD_PERIM_UNLOCK(&dhd->pub); + ret = dhd_bus_start(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + if (ret) { + DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); + ret = -1; + goto exit; + } + + } + +#ifdef BCM_FD_AGGR + config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT; + + + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4, + iovbuf, sizeof(iovbuf)); + + if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) { + agglimit = *(uint32 *)iovbuf; + config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT; + config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK; + DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n", + agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize)); + if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) { + DHD_ERROR(("set tx/rx queue size and buffersize failed\n")); + } + } else { + DHD_ERROR(("get rpc_dngl_agglimit failed\n")); + rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC; + } + + /* Set aggregation for TX */ + bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK, + rpc_agg & BCM_RPC_TP_HOST_AGG_MASK); + + /* Set aggregation for RX */ + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf)); + if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) { + dhd->pub.info->fdaggr = 0; + if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK) + dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED; + if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK) + dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED; + } else { + DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret)); + } +#endif /* BCM_FD_AGGR */ + + /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */ + memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + +#ifdef TOE + /* Get current TOE mode from dongle */ + if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) { + dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; + } else { + dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; + } +#endif /* TOE */ + +#if defined(WL_CFG80211) + if (unlikely(wl_cfg80211_up(NULL))) { + DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); + ret = -1; + goto exit; + } + if (!dhd_download_fw_on_driverload) { +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#ifdef DHD_LB + DHD_LB_STATS_INIT(&dhd->pub); +#ifdef DHD_LB_RXP + __skb_queue_head_init(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ +#endif /* DHD_LB */ + } + +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) +#if defined(SET_RPS_CPUS) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#endif +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + if (dhd->rx_napi_netdev == NULL) { + dhd->rx_napi_netdev = dhd->iflist[ifidx]->net; + memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct)); + netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct, + dhd_napi_poll, dhd_napi_weight); + DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n", + __FUNCTION__, &dhd->rx_napi_struct, net, net->name)); + napi_enable(&dhd->rx_napi_struct); + DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__)); + skb_queue_head_init(&dhd->rx_napi_queue); + } +#endif /* DHD_LB && DHD_LB_RXP */ +#if defined(NUM_SCB_MAX_PROBE) + dhd_set_scb_probe(&dhd->pub); +#endif /* NUM_SCB_MAX_PROBE */ +#endif /* WL_CFG80211 */ + } + + /* Allow transmit calls */ + netif_start_queue(net); + dhd->pub.up = 1; + + OLD_MOD_INC_USE_COUNT; + +#ifdef BCMDBGFS + dhd_dbg_init(&dhd->pub); +#endif + +exit: + if (ret) { + dhd_stop(net); + } + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + + return ret; +} + +int dhd_do_driver_init(struct net_device *net) +{ + dhd_info_t *dhd = NULL; + + if (!net) { + DHD_ERROR(("Primary Interface not initialized \n")); + return -EINVAL; + } + + + /* && defined(OEM_ANDROID) && defined(BCMSDIO) */ + dhd = DHD_DEV_INFO(net); + + /* If driver is already initialized, do nothing + */ + if (dhd->pub.busstate == DHD_BUS_DATA) { + DHD_TRACE(("Driver already Inititalized. Nothing to do")); + return 0; + } + + if (dhd_open(net) < 0) { + DHD_ERROR(("Driver Init Failed \n")); + return -1; + } + + return 0; +} + +int +dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + return BCME_OK; +#endif + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else. This has to be done asynchronously otherwise + * DPC will be blocked (and iovars will timeout as DPC has no chance + * to read the response back) + */ + if (ifevent->ifidx > 0) { + dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, + DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW); + } + + return BCME_OK; +} + +int +dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac) +{ + dhd_if_event_t *if_event; + +#ifdef WL_CFG80211 + if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK) + return BCME_OK; +#endif /* WL_CFG80211 */ + + /* handle IF event caused by wl commands, SoftAP, WEXT and + * anything else + */ + if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t)); + if (if_event == NULL) { + DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes", + MALLOCED(dhdinfo->pub.osh))); + return BCME_NOMEM; + } + memcpy(&if_event->event, ifevent, sizeof(if_event->event)); + memcpy(if_event->mac, mac, ETHER_ADDR_LEN); + strncpy(if_event->name, name, IFNAMSIZ); + if_event->name[IFNAMSIZ - 1] = '\0'; + dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL, + dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW); + + return BCME_OK; +} + +/* unregister and free the existing net_device interface (if any) in iflist and + * allocate a new one. the slot is reused. this function does NOT register the + * new interface to linux kernel. dhd_register_if does the job + */ +struct net_device* +dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name, + uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + + ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS)); + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { + if (ifp->net != NULL) { + DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name)); + + dhd_dev_priv_clear(ifp->net); /* clear net_device private */ + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_stop_queue(ifp->net); + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); + } + ifp->net = NULL; + } + } else { + ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t)); + if (ifp == NULL) { + DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t))); + return NULL; + } + } + + memset(ifp, 0, sizeof(dhd_if_t)); + ifp->info = dhdinfo; + ifp->idx = ifidx; + ifp->bssidx = bssidx; + if (mac != NULL) + memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN); + + /* Allocate etherdev, including space for private structure */ + ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE); + if (ifp->net == NULL) { + DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo))); + goto fail; + } + + /* Setup the dhd interface's netdevice private structure. */ + dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx); + + if (name && name[0]) { + strncpy(ifp->net->name, name, IFNAMSIZ); + ifp->net->name[IFNAMSIZ - 1] = '\0'; + } + + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + ifp->net->needs_free_netdev = true; +#ifdef WL_CFG80211 + if (ifidx == 0) + ifp->net->priv_destructor = free_netdev; + else + ifp->net->priv_destructor = dhd_netdev_free; +#else + ifp->net->priv_destructor = free_netdev; +#endif /* WL_CFG80211 */ +#else +#ifdef WL_CFG80211 + if (ifidx == 0) + ifp->net->destructor = free_netdev; + else + ifp->net->destructor = dhd_netdev_free; +#else + ifp->net->destructor = free_netdev; +#endif /* WL_CFG80211 */ +#endif + strncpy(ifp->name, ifp->net->name, IFNAMSIZ); + ifp->name[IFNAMSIZ - 1] = '\0'; + dhdinfo->iflist[ifidx] = ifp; + +/* initialize the dongle provided if name */ + if (dngl_name) + strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ); + else + strncpy(ifp->dngl_name, name, IFNAMSIZ); + +#ifdef PCIE_FULL_DONGLE + /* Initialize STA info list */ + INIT_LIST_HEAD(&ifp->sta_list); + DHD_IF_STA_LIST_LOCK_INIT(ifp); +#endif /* PCIE_FULL_DONGLE */ + +#ifdef DHD_L2_FILTER + ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh); + ifp->parp_allnode = TRUE; +#endif + return ifp->net; + +fail: + + if (ifp != NULL) { + if (ifp->net != NULL) { + dhd_dev_priv_clear(ifp->net); + free_netdev(ifp->net); + ifp->net = NULL; + } + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + ifp = NULL; + } + + dhdinfo->iflist[ifidx] = NULL; + return NULL; +} + +/* unregister and free the the net_device interface associated with the indexed + * slot, also free the slot memory and set the slot pointer to NULL + */ +int +dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info; + dhd_if_t *ifp; + + ifp = dhdinfo->iflist[ifidx]; + + if (ifp != NULL) { + if (ifp->net != NULL) { + DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx)); + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { + netif_tx_disable(ifp->net); + + + +#if defined(SET_RPS_CPUS) + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ +#if defined(SET_RPS_CPUS) +#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE)) + dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */ +#endif + if (need_rtnl_lock) + unregister_netdev(ifp->net); + else + unregister_netdevice(ifp->net); + } + ifp->net = NULL; + dhdinfo->iflist[ifidx] = NULL; + } +#ifdef DHD_WMF + dhd_wmf_cleanup(dhdpub, ifidx); +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdpub->tickcnt); + deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + + dhd_if_del_sta_list(ifp); + + MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp)); + + } + + return BCME_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +static struct net_device_ops dhd_ops_pri = { + .ndo_open = dhd_open, + .ndo_stop = dhd_stop, + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif +}; + +static struct net_device_ops dhd_ops_virt = { + .ndo_get_stats = dhd_get_stats, + .ndo_do_ioctl = dhd_ioctl_entry, + .ndo_start_xmit = dhd_start_xmit, + .ndo_set_mac_address = dhd_set_mac_address, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_set_multicast_list, +#endif +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ + +#ifdef DEBUGGER +extern void debugger_init(void *bus_handle); +#endif + + +#ifdef SHOW_LOGTRACE +static char *logstrs_path = "/root/logstrs.bin"; +static char *st_str_file_path = "/root/rtecdc.bin"; +static char *map_file_path = "/root/rtecdc.map"; +static char *rom_st_str_file_path = "/root/roml.bin"; +static char *rom_map_file_path = "/root/roml.map"; + +#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */ +#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */ +#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */ +static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */ +static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */ +static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */ +static char *ram_file_str = "rtecdc"; +static char *rom_file_str = "roml"; +#define RAMSTART_BIT 0x01 +#define RDSTART_BIT 0x02 +#define RDEND_BIT 0x04 +#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT) + +module_param(logstrs_path, charp, S_IRUGO); +module_param(st_str_file_path, charp, S_IRUGO); +module_param(map_file_path, charp, S_IRUGO); +module_param(rom_st_str_file_path, charp, S_IRUGO); +module_param(rom_map_file_path, charp, S_IRUGO); + +static void +dhd_init_logstrs_array(dhd_event_log_t *temp) +{ + struct file *filep = NULL; + struct kstat stat; + mm_segment_t fs; + char *raw_fmts = NULL; + int logstrs_size = 0; + + logstr_header_t *hdr = NULL; + uint32 *lognums = NULL; + char *logstrs = NULL; + int ram_index = 0; + char **fmts; + int num_fmts = 0; + uint32 i = 0; + int error = 0; + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(logstrs_path, O_RDONLY, 0); + + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + error = vfs_stat(logstrs_path, &stat); + if (error) { + DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path)); + goto fail; + } + logstrs_size = (int) stat.size; + + raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__)); + goto fail; + } + if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) { + DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path)); + goto fail; + } + + /* Remember header from the logstrs.bin file */ + hdr = (logstr_header_t *) (raw_fmts + logstrs_size - + sizeof(logstr_header_t)); + + if (hdr->log_magic == LOGSTRS_MAGIC) { + /* + * logstrs.bin start with header. + */ + num_fmts = hdr->rom_logstrs_offset / sizeof(uint32); + ram_index = (hdr->ram_lognums_offset - + hdr->rom_lognums_offset) / sizeof(uint32); + lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset]; + logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset]; + } else { + /* + * Legacy logstrs.bin format without header. + */ + num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32); + if (num_fmts == 0) { + /* Legacy ROM/RAM logstrs.bin format: + * - ROM 'lognums' section + * - RAM 'lognums' section + * - ROM 'logstrs' section. + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is 0 (index of first + * string in ROM 'logstrs' section). + * + * The 4324b5 is the only ROM that uses this legacy format. Use the + * fixed number of ROM fmtnums to find the start of the RAM + * 'lognums' section. Use the fixed first ROM string ("Con\n") to + * find the ROM 'logstrs' section. + */ + #define NUM_4324B5_ROM_FMTS 186 + #define FIRST_4324B5_ROM_LOGSTR "Con\n" + ram_index = NUM_4324B5_ROM_FMTS; + lognums = (uint32 *) raw_fmts; + num_fmts = ram_index; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) { + num_fmts++; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + } else { + /* Legacy RAM-only logstrs.bin format: + * - RAM 'lognums' section + * - RAM 'logstrs' section. + * + * 'lognums' is an array of indexes for the strings in the + * 'logstrs' section. The first uint32 is an index to the + * start of 'logstrs'. Therefore, if this index is divided + * by 'sizeof(uint32)' it provides the number of logstr + * entries. + */ + ram_index = 0; + lognums = (uint32 *) raw_fmts; + logstrs = (char *) &raw_fmts[num_fmts << 2]; + } + } + fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL); + if (fmts == NULL) { + DHD_ERROR(("Failed to allocate fmts memory")); + goto fail; + } + + for (i = 0; i < num_fmts; i++) { + /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base + * (they are 0-indexed relative to 'rom_logstrs_offset'). + * + * RAM lognums are already indexed to point to the correct RAM logstrs (they + * are 0-indexed relative to the start of the logstrs.bin file). + */ + if (i == ram_index) { + logstrs = raw_fmts; + } + fmts[i] = &logstrs[lognums[i]]; + } + temp->fmts = fmts; + temp->raw_fmts = raw_fmts; + temp->num_fmts = num_fmts; + filp_close(filep, NULL); + set_fs(fs); + return; +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + set_fs(fs); + temp->fmts = NULL; + return; +} + +static int +dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start, + uint32 *rodata_end) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 read_size = READ_NUM_BYTES; + int error = 0; + char * cptr = NULL; + char c; + uint8 count = 0; + + *ramstart = 0; + *rodata_start = 0; + *rodata_end = 0; + + if (fname == NULL) { + DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__)); + return BCME_ERROR; + } + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(fname, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname)); + goto fail; + } + + /* Allocate 1 byte more than read_size to terminate it with NULL */ + raw_fmts = kmalloc(read_size + 1, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + /* read ram start, rodata_start and rodata_end values from map file */ + + while (count != ALL_MAP_VAL) + { + error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos)); + if (error < 0) { + DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__, + map_file_path, error)); + goto fail; + } + + if (error < read_size) { + /* + * since we reset file pos back to earlier pos by + * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF. + * So if ret value is less than read_size, reached EOF don't read further + */ + break; + } + /* End raw_fmts with NULL as strstr expects NULL terminated strings */ + raw_fmts[read_size] = '\0'; + + /* Get ramstart address */ + if ((cptr = strstr(raw_fmts, ramstart_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c text_start", ramstart, &c); + count |= RAMSTART_BIT; + } + + /* Get ram rodata start address */ + if ((cptr = strstr(raw_fmts, rodata_start_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_start", rodata_start, &c); + count |= RDSTART_BIT; + } + + /* Get ram rodata end address */ + if ((cptr = strstr(raw_fmts, rodata_end_str))) { + cptr = cptr - BYTES_AHEAD_NUM; + sscanf(cptr, "%x %c rodata_end", rodata_end, &c); + count |= RDEND_BIT; + } + memset(raw_fmts, 0, read_size); + /* + * go back to predefined NUM of bytes so that we won't miss + * the string and addr even if it comes as splited in next read. + */ + filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES; + } + + DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + *ramstart, *rodata_start, *rodata_end)); + + DHD_ERROR(("readmap over \n")); + +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + + set_fs(fs); + if (count == ALL_MAP_VAL) { + return BCME_OK; + } + DHD_ERROR(("readmap error 0X%x \n", count)); + return BCME_ERROR; +} + +static void +dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file) +{ + struct file *filep = NULL; + mm_segment_t fs; + char *raw_fmts = NULL; + uint32 logstrs_size = 0; + + int error = 0; + uint32 ramstart = 0; + uint32 rodata_start = 0; + uint32 rodata_end = 0; + uint32 logfilebase = 0; + + error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end); + if (error == BCME_ERROR) { + DHD_ERROR(("readmap Error!! \n")); + /* don't do event log parsing in actual case */ + temp->raw_sstr = NULL; + return; + } + DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n", + ramstart, rodata_start, rodata_end)); + + fs = get_fs(); + set_fs(KERNEL_DS); + + filep = filp_open(str_file, O_RDONLY, 0); + if (IS_ERR(filep)) { + DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file)); + goto fail; + } + + /* Full file size is huge. Just read required part */ + logstrs_size = rodata_end - rodata_start; + + raw_fmts = kmalloc(logstrs_size, GFP_KERNEL); + if (raw_fmts == NULL) { + DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__)); + goto fail; + } + + logfilebase = rodata_start - ramstart; + + error = generic_file_llseek(filep, logfilebase, SEEK_SET); + if (error < 0) { + DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos)); + if (error != logstrs_size) { + DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error)); + goto fail; + } + + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = raw_fmts; + temp->ramstart = ramstart; + temp->rodata_start = rodata_start; + temp->rodata_end = rodata_end; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = raw_fmts; + temp->rom_ramstart = ramstart; + temp->rom_rodata_start = rodata_start; + temp->rom_rodata_end = rodata_end; + } + + filp_close(filep, NULL); + set_fs(fs); + + return; +fail: + if (raw_fmts) { + kfree(raw_fmts); + raw_fmts = NULL; + } + if (!IS_ERR(filep)) + filp_close(filep, NULL); + set_fs(fs); + if (strstr(str_file, ram_file_str) != NULL) { + temp->raw_sstr = NULL; + } else if (strstr(str_file, rom_file_str) != NULL) { + temp->rom_raw_sstr = NULL; + } + return; +} + +#endif /* SHOW_LOGTRACE */ + + +dhd_pub_t * +dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) +{ + dhd_info_t *dhd = NULL; + struct net_device *net = NULL; + char if_name[IFNAMSIZ] = {'\0'}; + uint32 bus_type = -1; + uint32 bus_num = -1; + uint32 slot_num = -1; + wifi_adapter_info_t *adapter = NULL; + + dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef STBLINUX + DHD_ERROR(("%s\n", driver_target)); +#endif /* STBLINUX */ + /* will implement get_ids for DBUS later */ +#if defined(BCMSDIO) + dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num); +#endif + adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num); + + /* Allocate primary dhd_info */ + dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t)); + if (dhd == NULL) { + dhd = MALLOC(osh, sizeof(dhd_info_t)); + if (dhd == NULL) { + DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); + goto fail; + } + } + memset(dhd, 0, sizeof(dhd_info_t)); + dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC; + + dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */ + + dhd->pub.osh = osh; + dhd->adapter = adapter; + +#ifdef GET_CUSTOM_MAC_ENABLE + wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet); +#endif /* GET_CUSTOM_MAC_ENABLE */ +#ifdef CUSTOM_FORCE_NODFS_FLAG + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, + dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec, + dhd->pub.dhd_cflags); +#endif /* CUSTOM_COUNTRY_CODE */ + dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; + dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; + + /* Initialize thread based operation and lock */ + sema_init(&dhd->sdsem, 1); + + /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name. + * This is indeed a hack but we have to make it work properly before we have a better + * solution + */ + dhd_update_fw_nv_path(dhd); + + /* Link to info module */ + dhd->pub.info = dhd; + + + /* Link to bus module */ + dhd->pub.bus = bus; + dhd->pub.hdrlen = bus_hdrlen; + + /* Set network interface name if it was provided as module parameter */ + if (iface_name[0]) { + int len; + char ch; + strncpy(if_name, iface_name, IFNAMSIZ); + if_name[IFNAMSIZ - 1] = 0; + len = strlen(if_name); + ch = if_name[len - 1]; + if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) + strcat(if_name, "%d"); + } + + /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */ + net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL); + if (net == NULL) { + goto fail; + } + + + dhd_state |= DHD_ATTACH_STATE_ADD_IF; +#ifdef DHD_L2_FILTER + /* initialize the l2_filter_cnt */ + dhd->pub.l2_filter_cnt = 0; +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + + mutex_init(&dhd->dhd_iovar_mutex); + sema_init(&dhd->proto_sem, 1); + +#ifdef PROP_TXSTATUS + spin_lock_init(&dhd->wlfc_spinlock); + + dhd->pub.skip_fc = dhd_wlfc_skip_fc; + dhd->pub.plat_init = dhd_wlfc_plat_init; + dhd->pub.plat_deinit = dhd_wlfc_plat_deinit; + +#ifdef DHD_WLFC_THREAD + init_waitqueue_head(&dhd->pub.wlfc_wqhead); + dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread"); + if (IS_ERR(dhd->pub.wlfc_thread)) { + DHD_ERROR(("create wlfc thread failed\n")); + goto fail; + } else { + wake_up_process(dhd->pub.wlfc_thread); + } +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + + /* Initialize other structure content */ + init_waitqueue_head(&dhd->ioctl_resp_wait); + init_waitqueue_head(&dhd->d3ack_wait); + init_waitqueue_head(&dhd->ctrl_wait); + init_waitqueue_head(&dhd->dhd_bus_busy_state_wait); + dhd->pub.dhd_bus_busy_state = 0; + + /* Initialize the spinlocks */ + spin_lock_init(&dhd->sdlock); + spin_lock_init(&dhd->txqlock); + spin_lock_init(&dhd->dhd_lock); + spin_lock_init(&dhd->rxf_lock); +#if defined(RXFRAME_THREAD) + dhd->rxthread_enabled = TRUE; +#endif /* defined(RXFRAME_THREAD) */ + +#ifdef DHDTCPACK_SUPPRESS + spin_lock_init(&dhd->tcpack_lock); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Initialize Wakelock stuff */ + spin_lock_init(&dhd->wakelock_spinlock); + spin_lock_init(&dhd->wakelock_evt_spinlock); + DHD_OS_WAKE_LOCK_INIT(dhd); + dhd->wakelock_wd_counter = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); +#endif /* CONFIG_HAS_WAKELOCK */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + mutex_init(&dhd->dhd_net_if_mutex); + mutex_init(&dhd->dhd_suspend_mutex); +#endif + dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; + + /* Attach and link in the protocol */ + if (dhd_prot_attach(&dhd->pub) != 0) { + DHD_ERROR(("dhd_prot_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; + +#ifdef WL_CFG80211 + /* Attach and link in the cfg80211 */ + if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { + DHD_ERROR(("wl_cfg80211_attach failed\n")); + goto fail; + } + + dhd_monitor_init(&dhd->pub); + dhd_state |= DHD_ATTACH_STATE_CFG80211; +#endif +#ifdef DHD_LOG_DUMP + dhd_log_dump_init(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#if defined(WL_WIRELESS_EXT) + /* Attach and link in the iw */ + if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) { + if (wl_iw_attach(net, (void *)&dhd->pub) != 0) { + DHD_ERROR(("wl_iw_attach failed\n")); + goto fail; + } + dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef SHOW_LOGTRACE + dhd_init_logstrs_array(&dhd->event_data); + dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path); + dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path); +#endif /* SHOW_LOGTRACE */ + + if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) { + DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA)); + goto fail; + } + + + + /* Set up the watchdog timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&dhd->timer, dhd_watchdog, dhd_watchdog_ms); +#else + init_timer(&dhd->timer); + dhd->timer.data = (ulong)dhd; + dhd->timer.function = dhd_watchdog; + dhd->default_wd_interval = dhd_watchdog_ms; +#endif + + if (dhd_watchdog_prio >= 0) { + /* Initialize watchdog thread */ + PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread"); + if (dhd->thr_wdt_ctl.thr_pid < 0) { + goto fail; + } + + } else { + dhd->thr_wdt_ctl.thr_pid = -1; + } + +#ifdef DHD_PCIE_RUNTIMEPM + /* Setup up the runtime PM Idlecount timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&dhd->rpm_timer, dhd_runtimepm, 0); +#else + init_timer(&dhd->rpm_timer); + dhd->rpm_timer.data = (ulong)dhd; + dhd->rpm_timer.function = dhd_runtimepm; +#endif + + dhd->rpm_timer_valid = FALSE; + + dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID; + PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread"); + if (dhd->thr_rpm_ctl.thr_pid < 0) { + goto fail; + } +#endif /* DHD_PCIE_RUNTIMEPM */ + +#ifdef DEBUGGER + debugger_init((void *) bus); +#endif + + /* Set up the bottom half handler */ + if (dhd_dpc_prio >= 0) { + /* Initialize DPC thread */ + PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc"); + if (dhd->thr_dpc_ctl.thr_pid < 0) { + goto fail; + } + } else { + /* use tasklet for dpc */ + tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); + dhd->thr_dpc_ctl.thr_pid = -1; + } + + if (dhd->rxthread_enabled) { + bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND); + /* Initialize RXF thread */ + PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf"); + if (dhd->thr_rxf_ctl.thr_pid < 0) { + goto fail; + } + } + + dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; + +#if defined(CONFIG_PM_SLEEP) + if (!dhd_pm_notifier_registered) { + dhd_pm_notifier_registered = TRUE; + dhd->pm_notifier.notifier_call = dhd_pm_callback; + dhd->pm_notifier.priority = 10; + register_pm_notifier(&dhd->pm_notifier); + } + +#endif /* CONFIG_PM_SLEEP */ + +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; + dhd->early_suspend.suspend = dhd_early_suspend; + dhd->early_suspend.resume = dhd_late_resume; + register_early_suspend(&dhd->early_suspend); + dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE; +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#ifdef ARP_OFFLOAD_SUPPORT + dhd->pend_ipaddr = 0; + if (!dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = TRUE; + register_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (!dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = TRUE; + register_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd); +#ifdef DEBUG_CPU_FREQ + dhd->new_freq = alloc_percpu(int); + dhd->freq_trans.notifier_call = dhd_cpufreq_notifier; + cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif +#ifdef DHDTCPACK_SUPPRESS +#ifdef BCMSDIO + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX); +#elif defined(BCMPCIE) + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD); +#else + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* BCMSDIO */ +#endif /* DHDTCPACK_SUPPRESS */ + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + + dhd_state |= DHD_ATTACH_STATE_DONE; + dhd->dhd_state = dhd_state; + + dhd_found++; +#ifdef DHD_DEBUG_PAGEALLOC + register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#if defined(DHD_LB) + DHD_ERROR(("DHD LOAD BALANCING Enabled\n")); + + dhd_lb_set_default_cpus(dhd); + + /* Initialize the CPU Masks */ + if (dhd_cpumasks_init(dhd) == 0) { + + /* Now we have the current CPU maps, run through candidacy */ + dhd_select_cpu_candidacy(dhd); + + /* + * If we are able to initialize CPU masks, lets register to the + * CPU Hotplug framework to change the CPU for each job dynamically + * using candidacy algorithm. + */ + dhd->cpu_notifier.notifier_call = dhd_cpu_callback; + register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */ + } else { + /* + * We are unable to initialize CPU masks, so candidacy algorithm + * won't run, but still Load Balancing will be honoured based + * on the CPUs allocated for a given job statically during init + */ + dhd->cpu_notifier.notifier_call = NULL; + DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n", + __FUNCTION__)); + } + + + DHD_LB_STATS_INIT(&dhd->pub); + + /* Initialize the Load Balancing Tasklets and Napi object */ +#if defined(DHD_LB_TXC) + tasklet_init(&dhd->tx_compl_tasklet, + dhd_lb_tx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + tasklet_init(&dhd->rx_compl_tasklet, + dhd_lb_rx_compl_handler, (ulong)(&dhd->pub)); + INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn); + DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__)); +#endif /* DHD_LB_RXC */ + +#if defined(DHD_LB_RXP) + __skb_queue_head_init(&dhd->rx_pend_queue); + skb_queue_head_init(&dhd->rx_napi_queue); + + /* Initialize the work that dispatches NAPI job to a given core */ + INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn); + DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__)); +#endif /* DHD_LB_RXP */ + +#endif /* DHD_LB */ + + INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler); + + (void)dhd_sysfs_init(dhd); + + return &dhd->pub; + +fail: + if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) { + DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n", + __FUNCTION__, dhd_state, &dhd->pub)); + dhd->dhd_state = dhd_state; + dhd_detach(&dhd->pub); + dhd_free(&dhd->pub); + } + + return NULL; +} + +#include + +void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs) +{ + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + + schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs)); +} + +int dhd_get_fw_mode(dhd_info_t *dhdinfo) +{ + if (strstr(dhdinfo->fw_path, "_apsta") != NULL) + return DHD_FLAG_HOSTAP_MODE; + if (strstr(dhdinfo->fw_path, "_p2p") != NULL) + return DHD_FLAG_P2P_MODE; + if (strstr(dhdinfo->fw_path, "_ibss") != NULL) + return DHD_FLAG_IBSS_MODE; + if (strstr(dhdinfo->fw_path, "_mfg") != NULL) + return DHD_FLAG_MFG_MODE; + + return DHD_FLAG_STA_MODE; +} + +bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo) +{ + int fw_len; + int nv_len; + const char *fw = NULL; + const char *nv = NULL; + wifi_adapter_info_t *adapter = dhdinfo->adapter; + + + /* Update firmware and nvram path. The path may be from adapter info or module parameter + * The path from adapter info is used for initialization only (as it won't change). + * + * The firmware_path/nvram_path module parameter may be changed by the system at run + * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private + * command may change dhdinfo->fw_path. As such we need to clear the path info in + * module parameter after it is copied. We won't update the path until the module parameter + * is changed again (first character is not '\0') + */ + + /* set default firmware and nvram path for built-in type driver */ + if (!dhd_download_fw_on_driverload) { +#ifdef CONFIG_BCMDHD_FW_PATH + fw = CONFIG_BCMDHD_FW_PATH; +#endif /* CONFIG_BCMDHD_FW_PATH */ +#ifdef CONFIG_BCMDHD_NVRAM_PATH + nv = CONFIG_BCMDHD_NVRAM_PATH; +#endif /* CONFIG_BCMDHD_NVRAM_PATH */ + } + + /* check if we need to initialize the path */ + if (dhdinfo->fw_path[0] == '\0') { + if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0') + fw = adapter->fw_path; + + } + if (dhdinfo->nv_path[0] == '\0') { + if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0') + nv = adapter->nv_path; + } + + /* Use module parameter if it is valid, EVEN IF the path has not been initialized + * + * TODO: need a solution for multi-chip, can't use the same firmware for all chips + */ + if (firmware_path[0] != '\0') + fw = firmware_path; + if (nvram_path[0] != '\0') + nv = nvram_path; + + if (fw && fw[0] != '\0') { + fw_len = strlen(fw); + if (fw_len >= sizeof(dhdinfo->fw_path)) { + DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n")); + return FALSE; + } + strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path)); + if (dhdinfo->fw_path[fw_len-1] == '\n') + dhdinfo->fw_path[fw_len-1] = '\0'; + } + if (nv && nv[0] != '\0') { + nv_len = strlen(nv); + if (nv_len >= sizeof(dhdinfo->nv_path)) { + DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n")); + return FALSE; + } + strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path)); + if (dhdinfo->nv_path[nv_len-1] == '\n') + dhdinfo->nv_path[nv_len-1] = '\0'; + } + + /* clear the path in module parameter */ + if (dhd_download_fw_on_driverload) { + firmware_path[0] = '\0'; + nvram_path[0] = '\0'; + } +#ifndef BCMEMBEDIMAGE + /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */ + if (dhdinfo->fw_path[0] == '\0') { + DHD_ERROR(("firmware path not found\n")); + return FALSE; + } + if (dhdinfo->nv_path[0] == '\0') { + DHD_ERROR(("nvram path not found\n")); + return FALSE; + } +#endif /* BCMEMBEDIMAGE */ + + return TRUE; +} + +#ifdef CUSTOMER_HW4_DEBUG +bool dhd_validate_chipid(dhd_pub_t *dhdp) +{ + uint chipid = dhd_bus_chip_id(dhdp); + uint config_chipid; + +#ifdef BCM4359_CHIP + config_chipid = BCM4359_CHIP_ID; +#elif defined(BCM4358_CHIP) + config_chipid = BCM4358_CHIP_ID; +#elif defined(BCM4354_CHIP) + config_chipid = BCM4354_CHIP_ID; +#elif defined(BCM4356_CHIP) + config_chipid = BCM4356_CHIP_ID; +#elif defined(BCM4339_CHIP) + config_chipid = BCM4339_CHIP_ID; +#elif defined(BCM43349_CHIP) + config_chipid = BCM43349_CHIP_ID; +#elif defined(BCM4335_CHIP) + config_chipid = BCM4335_CHIP_ID; +#elif defined(BCM43241_CHIP) + config_chipid = BCM4324_CHIP_ID; +#elif defined(BCM4330_CHIP) + config_chipid = BCM4330_CHIP_ID; +#elif defined(BCM43430_CHIP) + config_chipid = BCM43430_CHIP_ID; +#elif defined(BCM4334W_CHIP) + config_chipid = BCM43342_CHIP_ID; +#elif defined(BCM43455_CHIP) + config_chipid = BCM4345_CHIP_ID; +#else + DHD_ERROR(("%s: Unknown chip id, if you use new chipset," + " please add CONFIG_BCMXXXX into the Kernel and" + " BCMXXXX_CHIP definition into the DHD driver\n", + __FUNCTION__)); + config_chipid = 0; + + return FALSE; +#endif /* BCM4354_CHIP */ + +#if defined(BCM4359_CHIP) + if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) { + return TRUE; + } +#endif /* BCM4359_CHIP */ + + return config_chipid == chipid; +} +#endif /* CUSTOMER_HW4_DEBUG */ + +int +dhd_bus_start(dhd_pub_t *dhdp) +{ + int ret = -1; + dhd_info_t *dhd = (dhd_info_t*)dhdp->info; + unsigned long flags; + + ASSERT(dhd); + + DHD_TRACE(("Enter %s:\n", __FUNCTION__)); + + DHD_PERIM_LOCK(dhdp); + + /* try to download image and nvram to the dongle */ + if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) { + /* Indicate FW Download has not yet done */ + dhd->pub.is_fw_download_done = FALSE; + DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path)); + ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, + dhd->fw_path, dhd->nv_path); + if (ret < 0) { + DHD_ERROR(("%s: failed to download firmware %s\n", + __FUNCTION__, dhd->fw_path)); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + /* Indicate FW Download has succeeded */ + dhd->pub.is_fw_download_done = TRUE; + } + if (dhd->pub.busstate != DHD_BUS_LOAD) { + DHD_PERIM_UNLOCK(dhdp); + return -ENETDOWN; + } + + dhd_os_sdlock(dhdp); + + /* Start the watchdog timer */ + dhd->pub.tickcnt = 0; + dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); + DHD_ENABLE_RUNTIME_PM(&dhd->pub); + + /* Bring up the bus */ + if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { + + DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); + dhd_os_sdunlock(dhdp); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } +#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhd_os_sdunlock(dhdp); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + /* Host registration for OOB interrupt */ + if (dhd_bus_oob_intr_register(dhdp)) { + /* deactivate timer and wait for the handler to finish */ +#if !defined(BCMPCIE_OOB_HOST_WAKE) + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + + dhd_os_sdunlock(dhdp); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); + return -ENODEV; + } + +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhd_os_sdlock(dhdp); + dhd_bus_oob_intr_set(dhdp, TRUE); +#else + /* Enable oob at firmware */ + dhd_enable_oob_intr(dhd->pub.bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#endif +#ifdef PCIE_FULL_DONGLE + { + /* max_h2d_rings includes H2D common rings */ + uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus); + + DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__, + max_h2d_rings)); + if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) { + dhd_os_sdunlock(dhdp); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + } +#endif /* PCIE_FULL_DONGLE */ + + /* Do protocol initialization necessary for IOCTL/IOVAR */ +#ifdef PCIE_FULL_DONGLE + dhd_os_sdunlock(dhdp); +#endif /* PCIE_FULL_DONGLE */ + ret = dhd_prot_init(&dhd->pub); + if (unlikely(ret) != BCME_OK) { + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return ret; + } +#ifdef PCIE_FULL_DONGLE + dhd_os_sdlock(dhdp); +#endif /* PCIE_FULL_DONGLE */ + + /* If bus is not ready, can't come up */ + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + dhd_os_sdunlock(dhdp); + DHD_PERIM_UNLOCK(dhdp); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + return -ENODEV; + } + + dhd_os_sdunlock(dhdp); + + /* Bus is ready, query any dongle information */ + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) { + DHD_GENERAL_LOCK(&dhd->pub, flags); + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + del_timer_sync(&dhd->timer); + DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__)); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); + DHD_PERIM_UNLOCK(dhdp); + return ret; + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd->pend_ipaddr) { +#ifdef AOE_IP_ALIAS_SUPPORT + aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0); +#endif /* AOE_IP_ALIAS_SUPPORT */ + dhd->pend_ipaddr = 0; + } +#endif /* ARP_OFFLOAD_SUPPORT */ + + DHD_PERIM_UNLOCK(dhdp); + return 0; +} +#ifdef WLTDLS +int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + uint32 tdls = tdls_on; + int ret = 0; + uint32 tdls_auto_op = 0; + uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING; + int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH; + int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW; + BCM_REFERENCE(mac); + if (!FW_SUPPORTED(dhd, tdls)) + return BCME_ERROR; + + if (dhd->tdls_enable == tdls_on) + goto auto_mode; + bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret)); + goto exit; + } + dhd->tdls_enable = tdls_on; +auto_mode: + + tdls_auto_op = auto_on; + bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret)); + goto exit; + } + + if (tdls_auto_op) { + bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time, + sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret)); + goto exit; + } + bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret)); + goto exit; + } + bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret)); + goto exit; + } + } + +exit: + return ret; +} +int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + if (dhd) + ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac); + else + ret = BCME_ERROR; + return ret; +} +int +dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode) +{ + char iovbuf[WLC_IOCTL_SMLEN]; + int ret = 0; + bool auto_on = false; + uint32 mode = wfd_mode; + +#ifdef ENABLE_TDLS_AUTO_MODE + if (wfd_mode) { + auto_on = false; + } else { + auto_on = true; + } +#else + auto_on = false; +#endif /* ENABLE_TDLS_AUTO_MODE */ + ret = _dhd_tdls_enable(dhd, false, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + + bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode), + iovbuf, sizeof(iovbuf)); + if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) && + (ret != BCME_UNSUPPORTED)) { + DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret)); + return ret; + } + + ret = _dhd_tdls_enable(dhd, true, auto_on, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret)); + return ret; + } + + dhd->tdls_mode = mode; + return ret; +} +#ifdef PCIE_FULL_DONGLE +void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub; + tdls_peer_node_t *cur = dhdp->peer_tbl.node; + tdls_peer_node_t *new = NULL, *prev = NULL; + dhd_if_t *dhdif; + uint8 sa[ETHER_ADDR_LEN]; + int ifidx = dhd_net2idx(dhd, dev); + + if (ifidx == DHD_BAD_IF) + return; + + dhdif = dhd->iflist[ifidx]; + memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN); + + if (connect) { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + DHD_ERROR(("%s: TDLS Peer exist already %d\n", + __FUNCTION__, __LINE__)); + return; + } + cur = cur->next; + } + + new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t)); + if (new == NULL) { + DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__)); + return; + } + memcpy(new->addr, da, ETHER_ADDR_LEN); + new->next = dhdp->peer_tbl.node; + dhdp->peer_tbl.node = new; + dhdp->peer_tbl.tdls_peer_count++; + + } else { + while (cur != NULL) { + if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) { + dhd_flow_rings_delete_for_peer(dhdp, ifidx, da); + if (prev) + prev->next = cur->next; + else + dhdp->peer_tbl.node = cur->next; + MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t)); + dhdp->peer_tbl.tdls_peer_count--; + return; + } + prev = cur; + cur = cur->next; + } + DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__)); + } +} +#endif /* PCIE_FULL_DONGLE */ +#endif + +bool dhd_is_concurrent_mode(dhd_pub_t *dhd) +{ + if (!dhd) + return FALSE; + + if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE) + return TRUE; + else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) == + DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) + return TRUE; + else + return FALSE; +} +#if !defined(AP) && defined(WLP2P) +/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware + * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA + * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware + * would still be named as fw_bcmdhd_apsta. + */ +uint32 +dhd_get_concurrent_capabilites(dhd_pub_t *dhd) +{ + int32 ret = 0; + char buf[WLC_IOCTL_SMLEN]; + bool mchan_supported = FALSE; + /* if dhd->op_mode is already set for HOSTAP and Manufacturing + * test mode, that means we only will use the mode as it is + */ + if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)) + return 0; + if (FW_SUPPORTED(dhd, vsdb)) { + mchan_supported = TRUE; + } + if (!FW_SUPPORTED(dhd, p2p)) { + DHD_TRACE(("Chip does not support p2p\n")); + return 0; + } else { + /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); + return 0; + } else { + if (buf[0] == 1) { + /* By default, chip supports single chan concurrency, + * now lets check for mchan + */ + ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE; + if (mchan_supported) + ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE; + if (FW_SUPPORTED(dhd, rsdb)) { + ret |= DHD_FLAG_RSDB_MODE; + } + if (FW_SUPPORTED(dhd, mp2p)) { + ret |= DHD_FLAG_MP2P_MODE; + } +#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) + return ret; +#else + return 0; +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ + } + } + } + return 0; +} +#endif + +#ifdef SUPPORT_AP_POWERSAVE +#define RXCHAIN_PWRSAVE_PPS 10 +#define RXCHAIN_PWRSAVE_QUIET_TIME 10 +#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0 +int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable) +{ + char iovbuf[128]; + int32 pps = RXCHAIN_PWRSAVE_PPS; + int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME; + int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK; + + if (enable) { + bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to enable AP power save")); + } + bcm_mkiovar("rxchain_pwrsave_pps", (char *)&pps, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to set pps")); + } + bcm_mkiovar("rxchain_pwrsave_quiet_time", (char *)&quiet_time, + 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to set quiet time")); + } + bcm_mkiovar("rxchain_pwrsave_stas_assoc_check", (char *)&stas_assoc_check, + 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to set stas assoc check")); + } + } else { + bcm_mkiovar("rxchain_pwrsave_enable", (char *)&enable, 4, iovbuf, sizeof(iovbuf)); + if (dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0) != BCME_OK) { + DHD_ERROR(("Failed to disable AP power save")); + } + } + + return 0; +} +#endif /* SUPPORT_AP_POWERSAVE */ + + +int +dhd_preinit_ioctls(dhd_pub_t *dhd) +{ + int ret = 0; + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + uint32 buf_key_b4_m4 = 1; + uint8 msglen; + eventmsgs_ext_t *eventmask_msg = NULL; + char* iov_buf = NULL; + int ret2 = 0; +#if defined(CUSTOM_AMPDU_BA_WSIZE) + uint32 ampdu_ba_wsize = 0; +#endif +#if defined(CUSTOM_AMPDU_MPDU) + int32 ampdu_mpdu = 0; +#endif +#if defined(CUSTOM_AMPDU_RELEASE) + int32 ampdu_release = 0; +#endif +#if defined(CUSTOM_AMSDU_AGGSF) + int32 amsdu_aggsf = 0; +#endif +#ifdef SUPPORT_SENSORHUB + int32 shub_enable = 0; +#endif /* SUPPORT_SENSORHUB */ +#if defined(BCMSDIO) +#ifdef PROP_TXSTATUS + int wlfc_enable = TRUE; +#ifndef DISABLE_11N + uint32 hostreorder = 1; +#endif /* DISABLE_11N */ +#endif /* PROP_TXSTATUS */ +#endif +#ifdef PCIE_FULL_DONGLE + uint32 wl_ap_isolate; +#endif /* PCIE_FULL_DONGLE */ + +#if defined(BCMSDIO) + /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */ + uint32 frameburst = 0; +#else + uint32 frameburst = 1; +#endif /* BCMSDIO */ + +#ifdef DHD_ENABLE_LPC + uint32 lpc = 1; +#endif /* DHD_ENABLE_LPC */ + uint power_mode = PM_FAST; +#if defined(BCMSDIO) + uint32 dongle_align = DHD_SDALIGN; + uint32 glom = CUSTOM_GLOM_SETTING; +#endif /* defined(BCMSDIO) */ +#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL) + uint32 credall = 1; +#endif +#if defined(VSDB) || defined(ROAM_ENABLE) + uint bcn_timeout = CUSTOM_BCN_TIMEOUT; +#else + uint bcn_timeout = 4; +#endif /* VSDB || ROAM_ENABLE */ +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + uint32 bcn_li_bcn = 1; +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + uint retry_max = CUSTOM_ASSOC_RETRY_MAX; +#if defined(ARP_OFFLOAD_SUPPORT) + int arpoe = 1; +#endif + int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME; + int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME; + int scan_passive_time = DHD_SCAN_PASSIVE_TIME; + char buf[WLC_IOCTL_SMLEN]; + char *ptr; + uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ +#ifdef ROAM_ENABLE + uint roamvar = 0; + int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL}; + int roam_scan_period[2] = {10, WLC_BAND_ALL}; + int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL}; +#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC + int roam_fullscan_period = 60; +#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ + int roam_fullscan_period = 120; +#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */ +#else +#ifdef DISABLE_BUILTIN_ROAM + uint roamvar = 1; +#endif /* DISABLE_BUILTIN_ROAM */ +#endif /* ROAM_ENABLE */ + +#if defined(SOFTAP) + uint dtim = 1; +#endif +#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) + uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */ + struct ether_addr p2p_ea; +#endif +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ +#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) + uint32 apsta = 1; /* Enable APSTA mode */ +#elif defined(SOFTAP_AND_GC) + uint32 apsta = 0; + int ap_mode = 1; +#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */ +#ifdef GET_CUSTOM_MAC_ENABLE + struct ether_addr ea_addr; +#endif /* GET_CUSTOM_MAC_ENABLE */ + +#ifdef DISABLE_11N + uint32 nmode = 0; +#endif /* DISABLE_11N */ + +#ifdef USE_WL_TXBF + uint32 txbf = 1; +#endif /* USE_WL_TXBF */ +#if defined(PROP_TXSTATUS) +#ifdef USE_WFA_CERT_CONF + uint32 proptx = 0; +#endif /* USE_WFA_CERT_CONF */ +#endif /* PROP_TXSTATUS */ +#ifdef CUSTOM_PSPRETEND_THR + uint32 pspretend_thr = CUSTOM_PSPRETEND_THR; +#endif + uint32 rsdb_mode = 0; +#ifdef ENABLE_TEMP_THROTTLING + wl_temp_control_t temp_control; +#endif /* ENABLE_TEMP_THROTTLING */ +#ifdef DISABLE_PRUNED_SCAN + uint32 scan_features = 0; +#endif /* DISABLE_PRUNED_SCAN */ +#ifdef CUSTOM_EVENT_PM_WAKE + uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE; +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = TRUE; +#endif /* PKT_FILTER_SUPPORT */ +#ifdef WLTDLS + dhd->tdls_enable = FALSE; + dhd_tdls_set_mode(dhd, false); +#endif /* WLTDLS */ + dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM; + DHD_TRACE(("Enter %s\n", __FUNCTION__)); + dhd->op_mode = 0; +#ifdef CUSTOMER_HW4_DEBUG + if (!dhd_validate_chipid(dhd)) { + DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n", + __FUNCTION__, dhd_bus_chip_id(dhd))); +#ifndef SUPPORT_MULTIPLE_CHIPS + ret = BCME_BADARG; + goto done; +#endif /* !SUPPORT_MULTIPLE_CHIPS */ + } +#endif /* CUSTOMER_HW4_DEBUG */ + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { +#ifdef DHD_PCIE_RUNTIMEPM + /* Disable RuntimePM in mfg mode */ + DHD_DISABLE_RUNTIME_PM(dhd); + DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__)); +#endif /* DHD_PCIE_RUNTIME_PM */ + /* Check and adjust IOCTL response timeout for Manufactring firmware */ + dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT); + DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n", + __FUNCTION__)); + } else { + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__)); + } +#ifdef GET_CUSTOM_MAC_ENABLE + ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet); + if (!ret) { + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN); + } else { +#endif /* GET_CUSTOM_MAC_ENABLE */ + /* Get the default device MAC address directly from firmware */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); + ret = BCME_NOTUP; + goto done; + } + /* Update public MAC address after reading from Firmware */ + memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); + +#ifdef GET_CUSTOM_MAC_ENABLE + } +#endif /* GET_CUSTOM_MAC_ENABLE */ + + /* get a capabilities from firmware */ + { + uint32 cap_buf_size = sizeof(dhd->fw_capabilities); + memset(dhd->fw_capabilities, 0, cap_buf_size); + bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities, + (cap_buf_size - 1), FALSE, 0)) < 0) + { + DHD_ERROR(("%s: Get Capability failed (error=%d)\n", + __FUNCTION__, ret)); + return 0; + } + + memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1)); + dhd->fw_capabilities[0] = ' '; + dhd->fw_capabilities[cap_buf_size - 2] = ' '; + dhd->fw_capabilities[cap_buf_size - 1] = '\0'; + } + + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) || + (op_mode == DHD_FLAG_HOSTAP_MODE)) { +#ifdef SET_RANDOM_MAC_SOFTAP + uint rand_mac; +#endif /* SET_RANDOM_MAC_SOFTAP */ + dhd->op_mode = DHD_FLAG_HOSTAP_MODE; +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif +#ifdef SET_RANDOM_MAC_SOFTAP + SRANDOM32((uint)jiffies); + rand_mac = RANDOM32(); + iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */ + iovbuf[1] = (unsigned char)(vendor_oui >> 8); + iovbuf[2] = (unsigned char)vendor_oui; + iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; + iovbuf[4] = (unsigned char)(rand_mac >> 8); + iovbuf[5] = (unsigned char)(rand_mac >> 16); + + bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); + } else + memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); +#endif /* SET_RANDOM_MAC_SOFTAP */ +#if !defined(AP) && defined(WL_CFG80211) + /* Turn off MPC in AP mode */ + bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret)); + } +#endif +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, TRUE); +#endif /* SUPPORT_AP_POWERSAVE */ +#ifdef SOFTAP_UAPSD_OFF + bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", + __FUNCTION__, ret)); + } +#endif /* SOFTAP_UAPSD_OFF */ + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) || + (op_mode == DHD_FLAG_MFG_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif /* ARP_OFFLOAD_SUPPORT */ +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif /* PKT_FILTER_SUPPORT */ + dhd->op_mode = DHD_FLAG_MFG_MODE; +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + if (FW_SUPPORTED(dhd, rsdb)) { + rsdb_mode = 0; + bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n", + __FUNCTION__, ret)); + } + } + } else { + uint32 concurrent_mode = 0; + if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) || + (op_mode == DHD_FLAG_P2P_MODE)) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 0; +#endif +#ifdef PKT_FILTER_SUPPORT + dhd_pkt_filter_enable = FALSE; +#endif + dhd->op_mode = DHD_FLAG_P2P_MODE; + } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) || + (op_mode == DHD_FLAG_IBSS_MODE)) { + dhd->op_mode = DHD_FLAG_IBSS_MODE; + } else + dhd->op_mode = DHD_FLAG_STA_MODE; +#if !defined(AP) && defined(WLP2P) + if (dhd->op_mode != DHD_FLAG_IBSS_MODE && + (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) { +#if defined(ARP_OFFLOAD_SUPPORT) + arpoe = 1; +#endif + dhd->op_mode |= concurrent_mode; + } + + /* Check if we are enabling p2p */ + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret)); + } + +#if defined(SOFTAP_AND_GC) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP, + (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) { + DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret)); + } +#endif + memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN); + ETHER_SET_LOCALADDR(&p2p_ea); + bcm_mkiovar("p2p_da_override", (char *)&p2p_ea, + ETHER_ADDR_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret)); + } else { + DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n")); + } + } +#else + (void)concurrent_mode; +#endif + } + +#ifdef RSDB_MODE_FROM_FILE + (void)dhd_rsdb_mode_from_file(dhd); +#endif /* RSDB_MODE_FROM_FILE */ + +#ifdef DISABLE_PRUNED_SCAN + if (FW_SUPPORTED(dhd, rsdb)) { + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("scan_features", (char *)&scan_features, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, + iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s get scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } else { + memcpy(&scan_features, iovbuf, 4); + scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM; + memset(iovbuf, 0, sizeof(iovbuf)); + bcm_mkiovar("scan_features", (char *)&scan_features, + 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set scan_features is failed ret=%d\n", + __FUNCTION__, ret)); + } + } + } +#endif /* DISABLE_PRUNED_SCAN */ + + DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n", + dhd->op_mode, MAC2STRDBG(dhd->mac.octet))); + #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA) + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) + dhd->info->rxthread_enabled = FALSE; + else + dhd->info->rxthread_enabled = TRUE; + #endif + /* Set Country code */ + if (dhd->dhd_cspec.ccode[0] != 0) { + bcm_mkiovar("country", (char *)&dhd->dhd_cspec, + sizeof(wl_country_t), iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); + } + + + /* Set Listen Interval */ + bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); + +#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM) +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) { + DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar)); + } +#endif /* USE_WFA_CERT_CONF */ + /* Disable built-in roaming to allowed ext supplicant to take care of roaming */ + bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */ +#if defined(ROAM_ENABLE) + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger, + sizeof(roam_trigger), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period, + sizeof(roam_scan_period), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta, + sizeof(roam_delta), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret)); + bcm_mkiovar("fullroamperiod", (char *)&roam_fullscan_period, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret)); +#endif /* ROAM_ENABLE */ + +#ifdef CUSTOM_EVENT_PM_WAKE + bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret)); + } +#endif /* CUSTOM_EVENT_PM_WAKE */ +#ifdef WLTDLS +#ifdef ENABLE_TDLS_AUTO_MODE + /* by default TDLS on and auto mode on */ + _dhd_tdls_enable(dhd, true, true, NULL); +#else + /* by default TDLS on and auto mode off */ + _dhd_tdls_enable(dhd, true, false, NULL); +#endif /* ENABLE_TDLS_AUTO_MODE */ +#endif /* WLTDLS */ + +#ifdef DHD_ENABLE_LPC + /* Set lpc 1 */ + bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret)); + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, + (char *)&wl_down, sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc)); + + bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret)); + } + } +#endif /* DHD_ENABLE_LPC */ + + /* Set PowerSave mode */ + dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); + +#if defined(BCMSDIO) + /* Match Host and Dongle rx alignment */ + bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + +#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL) + /* enable credall to reduce the chance of no bus credit happened. */ + bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) { + DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom)); + } +#endif /* USE_WFA_CERT_CONF */ + if (glom != DEFAULT_GLOM_VALUE) { + DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom)); + bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + } +#endif /* defined(BCMSDIO) */ + + /* Setup timeout if Beacons are lost and roam is off to report link down */ + bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + /* Setup assoc_retry_max count to reconnect target AP in dongle */ + bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#if defined(AP) && !defined(WLP2P) + /* Turn off MPC in AP mode */ + bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* defined(AP) && !defined(WLP2P) */ + +#ifdef MIMO_ANT_SETTING + dhd_sel_ant_from_file(dhd); +#endif /* MIMO_ANT_SETTING */ + +#if defined(SOFTAP) + if (ap_fw_loaded == TRUE) { + dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); + } +#endif + +#if defined(KEEP_ALIVE) + { + /* Set Keep Alive : be sure to use FW with -keepalive */ + int res; + +#if defined(SOFTAP) + if (ap_fw_loaded == FALSE) +#endif + if (!(dhd->op_mode & + (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) { + if ((res = dhd_keep_alive_onoff(dhd)) < 0) + DHD_ERROR(("%s set keeplive failed %d\n", + __FUNCTION__, res)); + } + } +#endif /* defined(KEEP_ALIVE) */ + +#ifdef USE_WL_TXBF + bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret)); + } +#endif /* USE_WL_TXBF */ + +#ifdef USE_WFA_CERT_CONF +#ifdef USE_WL_FRAMEBURST + if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) { + DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst)); + } +#endif /* USE_WL_FRAMEBURST */ +#ifdef DISABLE_FRAMEBURST_VSDB + g_frameburst = frameburst; +#endif /* DISABLE_FRAMEBURST_VSDB */ +#endif /* USE_WFA_CERT_CONF */ +#ifdef DISABLE_WL_FRAMEBURST_SOFTAP + /* Disable Framebursting for SofAP */ + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + frameburst = 0; + } +#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */ + /* Set frameburst to value */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst, + sizeof(frameburst), TRUE, 0)) < 0) { + DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret)); + } +#if defined(CUSTOM_AMPDU_BA_WSIZE) + /* Set ampdu ba wsize to 64 or 16 */ +#ifdef CUSTOM_AMPDU_BA_WSIZE + ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE; +#endif + if (ampdu_ba_wsize != 0) { + bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n", + __FUNCTION__, ampdu_ba_wsize, ret)); + } + } +#endif + + iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL); + if (iov_buf == NULL) { + DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN)); + ret = BCME_NOMEM; + goto done; + } +#ifdef ENABLE_TEMP_THROTTLING + if (dhd->op_mode & DHD_FLAG_STA_MODE) { + memset(&temp_control, 0, sizeof(temp_control)); + temp_control.enable = 1; + temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT; + bcm_mkiovar("temp_throttle_control", (char *)&temp_control, + sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s Set temp_throttle_control to %d failed \n", + __FUNCTION__, ret)); + } + } +#endif /* ENABLE_TEMP_THROTTLING */ +#if defined(CUSTOM_AMPDU_MPDU) + ampdu_mpdu = CUSTOM_AMPDU_MPDU; + if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) { + bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_MPDU, ret)); + } + } +#endif /* CUSTOM_AMPDU_MPDU */ + +#if defined(CUSTOM_AMPDU_RELEASE) + ampdu_release = CUSTOM_AMPDU_RELEASE; + if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) { + bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set ampdu_release to %d failed %d\n", + __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret)); + } + } +#endif /* CUSTOM_AMPDU_RELEASE */ + +#if defined(CUSTOM_AMSDU_AGGSF) + amsdu_aggsf = CUSTOM_AMSDU_AGGSF; + if (amsdu_aggsf != 0) { + bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + if (ret < 0) { + DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n", + __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret)); + } + } +#endif /* CUSTOM_AMSDU_AGGSF */ + +#ifdef CUSTOM_PSPRETEND_THR + /* Turn off MPC in AP mode */ + bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4, + iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n", + __FUNCTION__, ret)); + } +#endif + + bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, + sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret)); + } + + /* Read event_msgs mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { + DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + + /* Setup event_msgs */ + setbit(eventmask, WLC_E_SET_SSID); + setbit(eventmask, WLC_E_PRUNE); + setbit(eventmask, WLC_E_AUTH); + setbit(eventmask, WLC_E_AUTH_IND); + setbit(eventmask, WLC_E_ASSOC); + setbit(eventmask, WLC_E_REASSOC); + setbit(eventmask, WLC_E_REASSOC_IND); + if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE)) + setbit(eventmask, WLC_E_DEAUTH); + setbit(eventmask, WLC_E_DEAUTH_IND); + setbit(eventmask, WLC_E_DISASSOC_IND); + setbit(eventmask, WLC_E_DISASSOC); + setbit(eventmask, WLC_E_JOIN); + setbit(eventmask, WLC_E_START); + setbit(eventmask, WLC_E_ASSOC_IND); + setbit(eventmask, WLC_E_PSK_SUP); + setbit(eventmask, WLC_E_LINK); + setbit(eventmask, WLC_E_MIC_ERROR); + setbit(eventmask, WLC_E_ASSOC_REQ_IE); + setbit(eventmask, WLC_E_ASSOC_RESP_IE); +#ifndef WL_CFG80211 + setbit(eventmask, WLC_E_PMKID_CACHE); + setbit(eventmask, WLC_E_TXFAIL); +#endif + setbit(eventmask, WLC_E_JOIN_START); + setbit(eventmask, WLC_E_SCAN_COMPLETE); +#ifdef DHD_DEBUG + setbit(eventmask, WLC_E_SCAN_CONFIRM_IND); +#endif +#ifdef WLMEDIA_HTSF + setbit(eventmask, WLC_E_HTSFSYNC); +#endif /* WLMEDIA_HTSF */ +#ifdef PNO_SUPPORT + setbit(eventmask, WLC_E_PFN_NET_FOUND); + setbit(eventmask, WLC_E_PFN_BEST_BATCHING); + setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND); + setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST); +#endif /* PNO_SUPPORT */ + /* enable dongle roaming event */ + setbit(eventmask, WLC_E_ROAM); + setbit(eventmask, WLC_E_BSSID); +#ifdef WLTDLS + setbit(eventmask, WLC_E_TDLS_PEER_EVENT); +#endif /* WLTDLS */ +#ifdef WL_CFG80211 + setbit(eventmask, WLC_E_ESCAN_RESULT); + setbit(eventmask, WLC_E_AP_STARTED); + if (dhd->op_mode & DHD_FLAG_P2P_MODE) { + setbit(eventmask, WLC_E_ACTION_FRAME_RX); + setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); + } +#endif /* WL_CFG80211 */ + +#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) + if (dhd_logtrace_from_file(dhd)) { + setbit(eventmask, WLC_E_TRACE); + } else { + clrbit(eventmask, WLC_E_TRACE); + } +#elif defined(SHOW_LOGTRACE) + setbit(eventmask, WLC_E_TRACE); +#else + clrbit(eventmask, WLC_E_TRACE); +#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */ + + setbit(eventmask, WLC_E_CSA_COMPLETE_IND); +#ifdef DHD_LOSSLESS_ROAMING + setbit(eventmask, WLC_E_ROAM_PREP); +#endif +#ifdef CUSTOM_EVENT_PM_WAKE + setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT); +#endif /* CUSTOM_EVENT_PM_WAKE */ +#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) + dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP); +#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */ + + /* Write updated Event mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret)); + goto done; + } + + /* make up event mask ext message iovar for event larger than 128 */ + msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE; + eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL); + if (eventmask_msg == NULL) { + DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen)); + ret = BCME_NOMEM; + goto done; + } + bzero(eventmask_msg, msglen); + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + + /* Read event_msgs_ext mask */ + bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN); + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0); + if (ret2 == 0) { /* event_msgs_ext must be supported */ + bcopy(iov_buf, eventmask_msg, msglen); +#ifdef GSCAN_SUPPORT + setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT); + setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE); + setbit(eventmask_msg->mask, WLC_E_PFN_SWC); +#endif /* GSCAN_SUPPORT */ +#ifdef BT_WIFI_HANDOVER + setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ); +#endif /* BT_WIFI_HANDOVER */ + + /* Write updated Event mask */ + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = EVENTMSGS_SET_MASK; + eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY; + bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, + msglen, iov_buf, WLC_IOCTL_SMLEN); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, + iov_buf, WLC_IOCTL_SMLEN, TRUE, 0)) < 0) { + DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret)); + goto done; + } + } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) { + /* Skip for BCME_UNSUPPORTED or BCME_VERSION */ + DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n", + __FUNCTION__, ret2)); + } else { + DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2)); + ret = ret2; + goto done; + } + + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, + sizeof(scan_assoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, + sizeof(scan_unassoc_time), TRUE, 0); + dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, + sizeof(scan_passive_time), TRUE, 0); + +#ifdef ARP_OFFLOAD_SUPPORT + /* Set and enable ARP offload feature for STA only */ +#if defined(SOFTAP) + if (arpoe && !ap_fw_loaded) { +#else + if (arpoe) { +#endif + dhd_arp_offload_enable(dhd, TRUE); + dhd_arp_offload_set(dhd, dhd_arp_mode); + } else { + dhd_arp_offload_enable(dhd, FALSE); + dhd_arp_offload_set(dhd, 0); + } + dhd_arp_enable = arpoe; +#endif /* ARP_OFFLOAD_SUPPORT */ + +#ifdef PKT_FILTER_SUPPORT + /* Setup default defintions for pktfilter , enable in suspend */ + dhd->pktfilter_count = 6; + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; + /* apply APP pktfilter */ + dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806"; + + /* Setup filter to allow only unicast */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00"; + + /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ + dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL; + +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + dhd->pktfilter_count = 4; + /* Setup filter to block broadcast and NAT Keepalive packets */ + /* discard all broadcast packets */ + dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009"; + /* discard NAT Keepalive packets */ + dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009"; + dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ + +#if defined(SOFTAP) + if (ap_fw_loaded) { + dhd_enable_packet_filter(0, dhd); + } +#endif /* defined(SOFTAP) */ + dhd_set_packet_filter(dhd); +#endif /* PKT_FILTER_SUPPORT */ +#ifdef DISABLE_11N + bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret)); +#endif /* DISABLE_11N */ + +#ifdef ENABLE_BCN_LI_BCN_WAKEUP + bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf)); + dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); +#endif /* ENABLE_BCN_LI_BCN_WAKEUP */ + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + ptr = buf; + bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + else { + bcmstrtok(&ptr, "\n", 0); + /* Print fw version info */ + DHD_ERROR(("Firmware version = %s\n", buf)); + strncpy(fw_version, buf, FW_VER_STR_LEN); +#if defined(BCMSDIO) + dhd_set_version_info(dhd, buf); +#endif /* defined(BCMSDIO) */ +#ifdef WRITE_WLANINFO + sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path); +#endif /* WRITE_WLANINFO */ + } + +#if defined(BCMSDIO) + dhd_txglom_enable(dhd, TRUE); +#endif /* defined(BCMSDIO) */ + +#if defined(BCMSDIO) +#ifdef PROP_TXSTATUS + if (disable_proptx || +#ifdef PROP_TXSTATUS_VSDB + /* enable WLFC only if the firmware is VSDB when it is in STA mode */ + (dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) || +#endif /* PROP_TXSTATUS_VSDB */ + FALSE) { + wlfc_enable = FALSE; + } + +#ifdef USE_WFA_CERT_CONF + if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) { + DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx)); + wlfc_enable = proptx; + } +#endif /* USE_WFA_CERT_CONF */ + +#ifndef DISABLE_11N + bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf)); + if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { + DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + + if (ret == BCME_NOTDOWN) { + uint wl_down = 1; + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, + sizeof(wl_down), TRUE, 0); + DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n", + __FUNCTION__, ret2, hostreorder)); + + bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, + iovbuf, sizeof(iovbuf)); + ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); + DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2)); + if (ret2 != BCME_UNSUPPORTED) + ret = ret2; + } + if (ret2 != BCME_OK) + hostreorder = 0; + } +#endif /* DISABLE_11N */ + + + if (wlfc_enable) + dhd_wlfc_init(dhd); +#ifndef DISABLE_11N + else if (hostreorder) + dhd_wlfc_hostreorder_init(dhd); +#endif /* DISABLE_11N */ + +#endif /* PROP_TXSTATUS */ +#endif /* BCMSDIO || BCMBUS */ +#ifdef PCIE_FULL_DONGLE + /* For FD we need all the packets at DHD to handle intra-BSS forwarding */ + if (FW_SUPPORTED(dhd, ap)) { + wl_ap_isolate = AP_ISOLATE_SENDUP_ALL; + bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) + DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); + } +#endif /* PCIE_FULL_DONGLE */ +#ifdef PNO_SUPPORT + if (!dhd->pno_state) { + dhd_pno_init(dhd); + } +#endif +#ifdef WL11U + dhd_interworking_enable(dhd); +#endif /* WL11U */ + +#ifdef SUPPORT_SENSORHUB + bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), + FALSE, 0)) < 0) { + DHD_ERROR(("%s failed to get shub hub enable information %d\n", + __FUNCTION__, ret)); + dhd->info->shub_enable = 0; + } else { + memcpy(&shub_enable, iovbuf, sizeof(uint32)); + dhd->info->shub_enable = shub_enable; + DHD_ERROR(("%s: checking sensorhub enable %d\n", + __FUNCTION__, dhd->info->shub_enable)); + } +#endif /* SUPPORT_SENSORHUB */ +done: + + if (eventmask_msg) + kfree(eventmask_msg); + if (iov_buf) + kfree(iov_buf); + + return ret; +} + + +int +dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set) +{ + char buf[strlen(name) + 1 + cmd_len]; + int len = sizeof(buf); + wl_ioctl_t ioc; + int ret; + + len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); + + memset(&ioc, 0, sizeof(ioc)); + + ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR; + ioc.buf = buf; + ioc.len = len; + ioc.set = set; + + ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); + if (!set && ret >= 0) + memcpy(cmd_buf, buf, cmd_len); + + return ret; +} + +int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) +{ + struct dhd_info *dhd = dhdp->info; + struct net_device *dev = NULL; + + ASSERT(dhd && dhd->iflist[ifidx]); + dev = dhd->iflist[ifidx]->net; + ASSERT(dev); + + if (netif_running(dev)) { + DHD_ERROR(("%s: Must be down to change its MTU", dev->name)); + return BCME_NOTDOWN; + } + +#define DHD_MIN_MTU 1500 +#define DHD_MAX_MTU 1752 + + if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) { + DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu)); + return BCME_BADARG; + } + + dev->mtu = new_mtu; + return 0; +} + +#ifdef ARP_OFFLOAD_SUPPORT +/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ +void +aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx) +{ + u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ + int i; + int ret; + + bzero(ipv4_buf, sizeof(ipv4_buf)); + + /* display what we've got */ + ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__)); +#ifdef AOE_DBG + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif + /* now we saved hoste_ip table, clr it in the dongle AOE */ + dhd_aoe_hostip_clr(dhd_pub, idx); + + if (ret) { + DHD_ERROR(("%s failed\n", __FUNCTION__)); + return; + } + + for (i = 0; i < MAX_IPV4_ENTRIES; i++) { + if (add && (ipv4_buf[i] == 0)) { + ipv4_buf[i] = ipa; + add = FALSE; /* added ipa to local table */ + DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", + __FUNCTION__, i)); + } else if (ipv4_buf[i] == ipa) { + ipv4_buf[i] = 0; + DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", + __FUNCTION__, ipa, i)); + } + + if (ipv4_buf[i] != 0) { + /* add back host_ip entries from our local cache */ + dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx); + DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n", + __FUNCTION__, ipv4_buf[i], i)); + } + } +#ifdef AOE_DBG + /* see the resulting hostip table */ + dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx); + DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__)); + dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ +#endif +} + +/* + * Notification mechanism from kernel to our driver. This function is called by the Linux kernel + * whenever there is an event related to an IP address. + * ptr : kernel provided pointer to IP address that has changed + */ +static int dhd_inetaddr_notifier_call(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + int idx; + + if (!dhd_arp_enable) + return NOTIFY_DONE; + if (!ifa || !(ifa->ifa_dev->dev)) + return NOTIFY_DONE; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + /* Filter notifications meant for non Broadcom devices */ + if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) && + (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) { +#if defined(WL_ENABLE_P2P_IF) + if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops)) +#endif /* WL_ENABLE_P2P_IF */ + return NOTIFY_DONE; + } +#endif /* LINUX_VERSION_CODE */ + + dhd = DHD_DEV_INFO(ifa->ifa_dev->dev); + if (!dhd) + return NOTIFY_DONE; + + dhd_pub = &dhd->pub; + + if (dhd_pub->arp_version == 1) { + idx = 0; + } else { + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev) + break; + } + if (idx < DHD_MAX_IFS) { + DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net, + dhd->iflist[idx]->name, dhd->iflist[idx]->idx)); + } else { + DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label)); + idx = 0; + } + } + + switch (event) { + case NETDEV_UP: + DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + + if (dhd->pub.busstate != DHD_BUS_DATA) { + DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__)); + if (dhd->pend_ipaddr) { + DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n", + __FUNCTION__, dhd->pend_ipaddr)); + } + dhd->pend_ipaddr = ifa->ifa_address; + break; + } + +#ifdef AOE_IP_ALIAS_SUPPORT + DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx); +#endif /* AOE_IP_ALIAS_SUPPORT */ + break; + + case NETDEV_DOWN: + DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n", + __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); + dhd->pend_ipaddr = 0; +#ifdef AOE_IP_ALIAS_SUPPORT + DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n", + __FUNCTION__)); + aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx); +#else + dhd_aoe_hostip_clr(&dhd->pub, idx); + dhd_aoe_arp_clr(&dhd->pub, idx); +#endif /* AOE_IP_ALIAS_SUPPORT */ + break; + + default: + DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n", + __func__, ifa->ifa_label, event)); + break; + } + return NOTIFY_DONE; +} +#endif /* ARP_OFFLOAD_SUPPORT */ + +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) +/* Neighbor Discovery Offload: defered handler */ +static void +dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event) +{ + struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data; + dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub; + int ret; + + if (event != DHD_WQ_WORK_IPV6_NDO) { + DHD_ERROR(("%s: unexpected event \n", __FUNCTION__)); + return; + } + + if (!ndo_work) { + DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__)); + return; + } + + if (!pub) { + DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__)); + return; + } + + if (ndo_work->if_idx) { + DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx)); + return; + } + + switch (ndo_work->event) { + case NETDEV_UP: + DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__)); + ret = dhd_ndo_enable(pub, TRUE); + if (ret < 0) { + DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret)); + } + + ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx); + if (ret < 0) { + DHD_ERROR(("%s: Adding host ip for NDO failed %d\n", + __FUNCTION__, ret)); + } + break; + case NETDEV_DOWN: + DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__)); + ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx); + if (ret < 0) { + DHD_ERROR(("%s: Removing host ip for NDO failed %d\n", + __FUNCTION__, ret)); + goto done; + } + + ret = dhd_ndo_enable(pub, FALSE); + if (ret < 0) { + DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret)); + goto done; + } + break; + default: + DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__)); + break; + } +done: + /* free ndo_work. alloced while scheduling the work */ + kfree(ndo_work); + + return; +} + +/* + * Neighbor Discovery Offload: Called when an interface + * is assigned with ipv6 address. + * Handles only primary interface + */ +static int dhd_inet6addr_notifier_call(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + dhd_info_t *dhd; + dhd_pub_t *dhd_pub; + struct inet6_ifaddr *inet6_ifa = ptr; + struct in6_addr *ipv6_addr = &inet6_ifa->addr; + struct ipv6_work_info_t *ndo_info; + int idx = 0; /* REVISIT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) + /* Filter notifications meant for non Broadcom devices */ + if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) { + return NOTIFY_DONE; + } +#endif /* LINUX_VERSION_CODE */ + + dhd = DHD_DEV_INFO(inet6_ifa->idev->dev); + if (!dhd) + return NOTIFY_DONE; + + if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev) + return NOTIFY_DONE; + dhd_pub = &dhd->pub; + + if (!FW_SUPPORTED(dhd_pub, ndoe)) + return NOTIFY_DONE; + + ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC); + if (!ndo_info) { + DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__)); + return NOTIFY_DONE; + } + + ndo_info->event = event; + ndo_info->if_idx = idx; + memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN); + + /* defer the work to thread as it may block kernel */ + dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO, + dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW); + return NOTIFY_DONE; +} +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ + +int +dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + dhd_if_t *ifp; + struct net_device *net = NULL; + int err = 0; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; + + DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); + + ASSERT(dhd && dhd->iflist[ifidx]); + ifp = dhd->iflist[ifidx]; + net = ifp->net; + ASSERT(net && (ifp->idx == ifidx)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->get_stats = dhd_get_stats; + net->do_ioctl = dhd_ioctl_entry; + net->hard_start_xmit = dhd_start_xmit; + net->set_mac_address = dhd_set_mac_address; + net->set_multicast_list = dhd_set_multicast_list; + net->open = net->stop = NULL; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &dhd_ops_virt; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + /* Ok, link into the network layer... */ + if (ifidx == 0) { + /* + * device functions for the primary interface only + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + net->open = dhd_open; + net->stop = dhd_stop; +#else + net->netdev_ops = &dhd_ops_pri; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + if (!ETHER_ISNULLADDR(dhd->pub.mac.octet)) + memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); + } else { + /* + * We have to use the primary MAC for virtual interfaces + */ + memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN); + /* + * Android sets the locally administered bit to indicate that this is a + * portable hotspot. This will not work in simultaneous AP/STA mode, + * nor with P2P. Need to set the Donlge's MAC address, and then use that. + */ + if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr, + ETHER_ADDR_LEN)) { + DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n", + __func__, net->name)); + temp_addr[0] |= 0x02; + } + } + + net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &dhd_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(WL_WIRELESS_EXT) +#if WIRELESS_EXT < 19 + net->get_wireless_stats = dhd_get_wireless_stats; +#endif /* WIRELESS_EXT < 19 */ +#if WIRELESS_EXT > 12 + net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ +#endif /* defined(WL_WIRELESS_EXT) */ + + dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); + + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + if (ifidx == 0) + printf("%s\n", dhd_version); + + if (need_rtnl_lock) + err = register_netdev(net); + else + err = register_netdevice(net); + + if (err != 0) { + DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err)); + goto fail; + } + + + + printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name, +#if defined(CUSTOMER_HW4_DEBUG) + MAC2STRDBG(dhd->pub.mac.octet)); +#else + MAC2STRDBG(net->dev_addr)); +#endif /* CUSTOMER_HW4_DEBUG */ + +#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211) + wl_iw_iscan_set_scan_broadcast_prep(net, 1); +#endif + +#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(2, 6, 27)))) + if (ifidx == 0) { +#ifdef BCMLXSDMMC + up(&dhd_registration_sem); +#endif /* BCMLXSDMMC */ + if (!dhd_download_fw_on_driverload) { +#ifdef WL_CFG80211 + wl_terminate_event_handler(); +#endif /* WL_CFG80211 */ +#if defined(DHD_LB) && defined(DHD_LB_RXP) + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB && DHD_LB_RXP */ +#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS) + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); +#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */ + dhd_net_bus_devreset(net, TRUE); +#ifdef BCMLXSDMMC + dhd_net_bus_suspend(net); +#endif /* BCMLXSDMMC */ + wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */ + return 0; + +fail: +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + net->open = NULL; +#else + net->netdev_ops = NULL; +#endif + return err; +} + +void +dhd_bus_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + dhd = (dhd_info_t *)dhdp->info; + if (dhd) { + + /* + * In case of Android cfg80211 driver, the bus is down in dhd_stop, + * calling stop again will cuase SD read/write errors. + */ + if (dhd->pub.busstate != DHD_BUS_DOWN) { + /* Stop the protocol module */ + dhd_prot_stop(&dhd->pub); + + /* Stop the bus module */ + dhd_bus_stop(dhd->pub.bus, TRUE); + } + +#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE) + dhd_bus_oob_intr_unregister(dhdp); +#endif + } + } +} + + +void dhd_detach(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + unsigned long flags; + int timer_valid = FALSE; + struct net_device *dev; + + if (!dhdp) + return; + + dhd = (dhd_info_t *)dhdp->info; + if (!dhd) + return; + + dev = dhd->iflist[0]->net; + + if (dev) { + rtnl_lock(); + if (dev->flags & IFF_UP) { + /* If IFF_UP is still up, it indicates that + * "ifconfig wlan0 down" hasn't been called. + * So invoke dev_close explicitly here to + * bring down the interface. + */ + DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n")); + dev_close(dev); + } + rtnl_unlock(); + } + + DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); + + dhd->pub.up = 0; + if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { + /* Give sufficient time for threads to start running in case + * dhd_attach() has failed + */ + OSL_SLEEP(100); + } + +#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) +#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ + +#ifdef PROP_TXSTATUS +#ifdef DHD_WLFC_THREAD + if (dhd->pub.wlfc_thread) { + kthread_stop(dhd->pub.wlfc_thread); + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); + } + dhd->pub.wlfc_thread = NULL; +#endif /* DHD_WLFC_THREAD */ +#endif /* PROP_TXSTATUS */ + + if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { + + dhd_bus_detach(dhdp); +#ifdef BCMPCIE + if (is_reboot == SYS_RESTART) { + extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata; + if (dhd_wifi_platdata && !dhdp->dongle_reset) { + dhdpcie_bus_clock_stop(dhdp->bus); + wifi_platform_set_power(dhd_wifi_platdata->adapters, + FALSE, WIFI_TURNOFF_DELAY); + } + } +#endif /* BCMPCIE */ +#ifndef PCIE_FULL_DONGLE + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif + } + +#ifdef ARP_OFFLOAD_SUPPORT + if (dhd_inetaddr_notifier_registered) { + dhd_inetaddr_notifier_registered = FALSE; + unregister_inetaddr_notifier(&dhd_inetaddr_notifier); + } +#endif /* ARP_OFFLOAD_SUPPORT */ +#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT) + if (dhd_inet6addr_notifier_registered) { + dhd_inet6addr_notifier_registered = FALSE; + unregister_inet6addr_notifier(&dhd_inet6addr_notifier); + } +#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { + if (dhd->early_suspend.suspend) + unregister_early_suspend(&dhd->early_suspend); + } +#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ + +#if defined(WL_WIRELESS_EXT) + if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { + /* Detatch and unlink in the iw */ + wl_iw_detach(); + } +#endif /* defined(WL_WIRELESS_EXT) */ + + /* delete all interfaces, start with virtual */ + if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { + int i = 1; + dhd_if_t *ifp; + + /* Cleanup virtual interfaces */ + dhd_net_if_lock_local(dhd); + for (i = 1; i < DHD_MAX_IFS; i++) { + if (dhd->iflist[i]) + dhd_remove_if(&dhd->pub, i, TRUE); + } + dhd_net_if_unlock_local(dhd); + + /* delete primary interface 0 */ + ifp = dhd->iflist[0]; + ASSERT(ifp); + ASSERT(ifp->net); + if (ifp && ifp->net) { + + + + /* in unregister_netdev case, the interface gets freed by net->destructor + * (which is set to free_netdev) + */ + if (ifp->net->reg_state == NETREG_UNINITIALIZED) { + free_netdev(ifp->net); + } else { +#ifdef SET_RPS_CPUS + custom_rps_map_clear(ifp->net->_rx); +#endif /* SET_RPS_CPUS */ + netif_tx_disable(ifp->net); + unregister_netdev(ifp->net); + } + ifp->net = NULL; +#ifdef DHD_WMF + dhd_wmf_cleanup(dhdp, 0); +#endif /* DHD_WMF */ +#ifdef DHD_L2_FILTER + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, + NULL, FALSE, dhdp->tickcnt); + deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table); + ifp->phnd_arp_table = NULL; +#endif /* DHD_L2_FILTER */ + + dhd_if_del_sta_list(ifp); + + MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); + dhd->iflist[0] = NULL; + } + } + + /* Clear the watchdog timer */ + DHD_GENERAL_LOCK(&dhd->pub, flags); + timer_valid = dhd->wd_timer_valid; + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(&dhd->pub, flags); + if (timer_valid) + del_timer_sync(&dhd->timer); + DHD_DISABLE_RUNTIME_PM(&dhd->pub); + + if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { +#ifdef DHD_PCIE_RUNTIMEPM + if (dhd->thr_rpm_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rpm_ctl); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + if (dhd->thr_wdt_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_wdt_ctl); + } + + if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_rxf_ctl); + } + + if (dhd->thr_dpc_ctl.thr_pid >= 0) { + PROC_STOP(&dhd->thr_dpc_ctl); + } else { + tasklet_kill(&dhd->tasklet); +#ifdef DHD_LB_RXP + __skb_queue_purge(&dhd->rx_pend_queue); +#endif /* DHD_LB_RXP */ + } + } + +#if defined(DHD_LB) + /* Kill the Load Balancing Tasklets */ +#if defined(DHD_LB_TXC) + tasklet_disable(&dhd->tx_compl_tasklet); + tasklet_kill(&dhd->tx_compl_tasklet); +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + tasklet_disable(&dhd->rx_compl_tasklet); + tasklet_kill(&dhd->rx_compl_tasklet); +#endif /* DHD_LB_RXC */ + if (dhd->cpu_notifier.notifier_call != NULL) + unregister_cpu_notifier(&dhd->cpu_notifier); + dhd_cpumasks_deinit(dhd); +#endif /* DHD_LB */ + +#ifdef DHD_LOG_DUMP + dhd_log_dump_deinit(&dhd->pub); +#endif /* DHD_LOG_DUMP */ +#ifdef WL_CFG80211 + if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { + wl_cfg80211_detach(NULL); + dhd_monitor_uninit(); + } +#endif + /* free deferred work queue */ + dhd_deferred_work_deinit(dhd->dhd_deferred_wq); + dhd->dhd_deferred_wq = NULL; + +#ifdef SHOW_LOGTRACE + if (dhd->event_data.fmts) + kfree(dhd->event_data.fmts); + if (dhd->event_data.raw_fmts) + kfree(dhd->event_data.raw_fmts); + if (dhd->event_data.raw_sstr) + kfree(dhd->event_data.raw_sstr); +#endif /* SHOW_LOGTRACE */ + +#ifdef PNO_SUPPORT + if (dhdp->pno_state) + dhd_pno_deinit(dhdp); +#endif +#if defined(CONFIG_PM_SLEEP) + if (dhd_pm_notifier_registered) { + unregister_pm_notifier(&dhd->pm_notifier); + dhd_pm_notifier_registered = FALSE; + } +#endif /* CONFIG_PM_SLEEP */ + +#ifdef DEBUG_CPU_FREQ + if (dhd->new_freq) + free_percpu(dhd->new_freq); + dhd->new_freq = NULL; + cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER); +#endif + if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { + DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_wd_counter = 0; + wake_lock_destroy(&dhd->wl_wdwake); +#endif /* CONFIG_HAS_WAKELOCK */ + DHD_OS_WAKE_LOCK_DESTROY(dhd); + } + + + +#ifdef DHDTCPACK_SUPPRESS + /* This will free all MEM allocated for TCPACK SUPPRESS */ + dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF); +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef PCIE_FULL_DONGLE + dhd_flow_rings_deinit(dhdp); + if (dhdp->prot) + dhd_prot_detach(dhdp); +#endif + + + dhd_sysfs_exit(dhd); + dhd->pub.is_fw_download_done = FALSE; +} + + +void +dhd_free(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + dhdp->reorder_bufs[i] = NULL; + } + } + + dhd_sta_pool_fini(dhdp, DHD_MAX_STA); + + dhd = (dhd_info_t *)dhdp->info; + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } +#ifdef CACHE_FW_IMAGES + if (dhdp->cached_fw) { + MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize); + dhdp->cached_fw = NULL; + } + + if (dhdp->cached_nvram) { + MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE); + dhdp->cached_nvram = NULL; + } +#endif + /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */ + if (dhd && + dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE)) + MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); + dhd = NULL; + } +} + +void +dhd_clear(dhd_pub_t *dhdp) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhdp) { + int i; +#ifdef DHDTCPACK_SUPPRESS + /* Clean up timer/data structure for any remaining/pending packet or timer. */ + dhd_tcpack_info_tbl_clean(dhdp); +#endif /* DHDTCPACK_SUPPRESS */ + for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) { + if (dhdp->reorder_bufs[i]) { + reorder_info_t *ptr; + uint32 buf_size = sizeof(struct reorder_info); + + ptr = dhdp->reorder_bufs[i]; + + buf_size += ((ptr->max_idx + 1) * sizeof(void*)); + DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n", + i, ptr->max_idx, buf_size)); + + MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size); + dhdp->reorder_bufs[i] = NULL; + } + } + + dhd_sta_pool_clear(dhdp, DHD_MAX_STA); + + if (dhdp->soc_ram) { +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length); +#else + MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + dhdp->soc_ram = NULL; + } + } +} + +static void +dhd_module_cleanup(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_bus_unregister(); + + wl_android_exit(); + + dhd_wifi_platform_unregister_drv(); +} + +static void __exit +dhd_module_exit(void) +{ + dhd_buzzz_detach(); + dhd_module_cleanup(); + unregister_reboot_notifier(&dhd_reboot_notifier); +} + +static int __init +dhd_module_init(void) +{ + int err; + int retry = POWERUP_MAX_RETRY; + + DHD_ERROR(("%s in\n", __FUNCTION__)); + + dhd_buzzz_attach(); + + DHD_PERIM_RADIO_INIT(); + + + if (firmware_path[0] != '\0') { + strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN); + fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + + if (nvram_path[0] != '\0') { + strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN); + nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + + do { + err = dhd_wifi_platform_register_drv(); + if (!err) { + register_reboot_notifier(&dhd_reboot_notifier); + break; + } + else { + DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n", + __FUNCTION__, retry)); + strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN); + firmware_path[MOD_PARAM_PATHLEN-1] = '\0'; + strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN); + nvram_path[MOD_PARAM_PATHLEN-1] = '\0'; + } + } while (retry--); + + if (err) { + DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__)); + } else { + if (!dhd_download_fw_on_driverload) { + dhd_driver_init_done = TRUE; + } + } + + DHD_ERROR(("%s out\n", __FUNCTION__)); + + return err; +} + +static int +dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused) +{ + DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code)); + if (code == SYS_RESTART) { +#ifdef BCMPCIE + is_reboot = code; +#endif /* BCMPCIE */ + } + return NOTIFY_DONE; +} + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) +#if defined(CONFIG_DEFERRED_INITCALLS) +#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \ + defined(CONFIG_ARCH_MSM8996) +deferred_module_init_sync(dhd_module_init); +#else +deferred_module_init(dhd_module_init); +#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 || + * CONFIG_ARCH_MSM8996 + */ +#elif defined(USE_LATE_INITCALL_SYNC) +late_initcall_sync(dhd_module_init); +#else +late_initcall(dhd_module_init); +#endif /* USE_LATE_INITCALL_SYNC */ +#else +module_init(dhd_module_init); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ + +module_exit(dhd_module_exit); + +/* + * OS specific functions required to implement DHD driver in OS independent way + */ +int +dhd_os_proto_block(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + DHD_PERIM_UNLOCK(pub); + + down(&dhd->proto_sem); + + DHD_PERIM_LOCK(pub); + return 1; + } + + return 0; +} + +int +dhd_os_proto_unblock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + up(&dhd->proto_sem); + return 1; + } + + return 0; +} + +void +dhd_os_dhdiovar_lock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_lock(&dhd->dhd_iovar_mutex); + } +} + +void +dhd_os_dhdiovar_unlock(dhd_pub_t *pub) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + mutex_unlock(&dhd->dhd_iovar_mutex); + } +} + +unsigned int +dhd_os_get_ioctl_resp_timeout(void) +{ + return ((unsigned int)dhd_ioctl_timeout_msec); +} + +void +dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) +{ + dhd_ioctl_timeout_msec = (int)timeout_msec; +} + +int +dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); +#else + timeout = dhd_ioctl_timeout_msec * HZ / 1000; +#endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_ioctl_resp_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->ioctl_resp_wait); + return 0; +} + +int +dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec); +#else + timeout = dhd_ioctl_timeout_msec * HZ / 1000; +#endif + + DHD_PERIM_UNLOCK(pub); + + timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout); + + DHD_PERIM_LOCK(pub); + + return timeout; +} + +int +dhd_os_d3ack_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + wake_up(&dhd->d3ack_wait); + return 0; +} + +int +dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition) +{ + dhd_info_t * dhd = (dhd_info_t *)(pub->info); + int timeout; + + /* Wait for bus usage contexts to gracefully exit within some timeout value + * Set time out to little higher than dhd_ioctl_timeout_msec, + * so that IOCTL timeout should not get affected. + */ + /* Convert timeout in millsecond to jiffies */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT); +#else + timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000; +#endif + + timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout); + + return timeout; +} + +int INLINE +dhd_os_busbusy_wake(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + /* Call wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + wake_up(&dhd->dhd_bus_busy_state_wait); + return 0; +} + +void +dhd_os_wd_timer_extend(void *bus, bool extend) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + + if (extend) + dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL); + else + dhd_os_wd_timer(bus, dhd->default_wd_interval); +} + + +void +dhd_os_wd_timer(void *bus, uint wdtick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__)); + return; + } + + DHD_OS_WD_WAKE_LOCK(pub); + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the wd until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN) { + DHD_GENERAL_UNLOCK(pub, flags); + if (!wdtick) + DHD_OS_WD_WAKE_UNLOCK(pub); + return; + } + + /* Totally stop the timer */ + if (!wdtick && dhd->wd_timer_valid == TRUE) { + dhd->wd_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->timer); + DHD_OS_WD_WAKE_UNLOCK(pub); + return; + } + + if (wdtick) { + DHD_OS_WD_WAKE_LOCK(pub); + dhd_watchdog_ms = (uint)wdtick; + /* Re arm the timer, at last watchdog period */ + mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); + dhd->wd_timer_valid = TRUE; + } + DHD_GENERAL_UNLOCK(pub, flags); + DHD_OS_WD_WAKE_UNLOCK(pub); +} + +#ifdef DHD_PCIE_RUNTIMEPM +void +dhd_os_runtimepm_timer(void *bus, uint tick) +{ + dhd_pub_t *pub = bus; + dhd_info_t *dhd = (dhd_info_t *)pub->info; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + DHD_GENERAL_LOCK(pub, flags); + + /* don't start the RPM until fw is loaded */ + if (pub->busstate == DHD_BUS_DOWN || + pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(pub, flags); + return; + } + + /* If tick is non-zero, the request is to start the timer */ + if (tick) { + /* Start the timer only if its not already running */ + if (dhd->rpm_timer_valid == FALSE) { + mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms)); + dhd->rpm_timer_valid = TRUE; + } + } else { + /* tick is zero, we have to stop the timer */ + /* Stop the timer only if its running, otherwise we don't have to do anything */ + if (dhd->rpm_timer_valid == TRUE) { + dhd->rpm_timer_valid = FALSE; + DHD_GENERAL_UNLOCK(pub, flags); + del_timer_sync(&dhd->rpm_timer); + /* we have already released the lock, so just go to exit */ + goto exit; + } + } + + DHD_GENERAL_UNLOCK(pub, flags); +exit: + return; + +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +void * +dhd_os_open_image(char *filename) +{ + struct file *fp; + int size; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) { + fp = NULL; + goto err; + } + + if (!S_ISREG(file_inode(fp)->i_mode)) { + DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename)); + fp = NULL; + goto err; + } + + size = i_size_read(file_inode(fp)); + if (size <= 0) { + DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size)); + fp = NULL; + goto err; + } + + DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size)); + +err: + return fp; +} + +int +dhd_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + int size; + + if (!image) + return 0; + + size = i_size_read(file_inode(fp)); + rdlen = kernel_read(fp, buf, MIN(len, size), &fp->f_pos); + + if (len >= size && size != rdlen) { + return -EIO; + } + + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +dhd_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + +void +dhd_os_sdlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd_dpc_prio >= 0) + down(&dhd->sdsem); + else + spin_lock_bh(&dhd->sdlock); +} + +void +dhd_os_sdunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd_dpc_prio >= 0) + up(&dhd->sdsem); + else + spin_unlock_bh(&dhd->sdlock); +} + +void +dhd_os_sdlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->txqlock); +} + +void +dhd_os_sdunlock_txq(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->txqlock); +} + +void +dhd_os_sdlock_rxq(dhd_pub_t *pub) +{ +} + +void +dhd_os_sdunlock_rxq(dhd_pub_t *pub) +{ +} + +static void +dhd_os_rxflock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_lock_bh(&dhd->rxf_lock); + +} + +static void +dhd_os_rxfunlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + + dhd = (dhd_info_t *)(pub->info); + spin_unlock_bh(&dhd->rxf_lock); +} + +#ifdef DHDTCPACK_SUPPRESS +unsigned long +dhd_os_tcpacklock(dhd_pub_t *pub) +{ + dhd_info_t *dhd; + unsigned long flags = 0; + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_lock_irqsave(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } + + return flags; +} + +void +dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd; + +#ifdef BCMSDIO + BCM_REFERENCE(flags); +#endif /* BCMSDIO */ + + dhd = (dhd_info_t *)(pub->info); + + if (dhd) { +#ifdef BCMSDIO + spin_lock_bh(&dhd->tcpack_lock); +#else + spin_unlock_irqrestore(&dhd->tcpack_lock, flags); +#endif /* BCMSDIO */ + } +} +#endif /* DHDTCPACK_SUPPRESS */ + +uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail) +{ + uint8* buf; + gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + + buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size); + if (buf == NULL && kmalloc_if_fail) + buf = kmalloc(size, flags); + + return buf; +} + +void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size) +{ +} + +#if defined(WL_WIRELESS_EXT) +struct iw_statistics * +dhd_get_wireless_stats(struct net_device *dev) +{ + int res = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!dhd->pub.up) { + return NULL; + } + + res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); + + if (res == 0) + return &dhd->iw.wstats; + else + return NULL; +} +#endif /* defined(WL_WIRELESS_EXT) */ + +static int +dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, size_t pktlen, + wl_event_msg_t *event, void **data) +{ + int bcmerror = 0; + ASSERT(dhd != NULL); + +#ifdef SHOW_LOGTRACE + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, pktlen, event, data, &dhd->event_data); +#else + bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, pktlen, event, data, NULL); +#endif /* SHOW_LOGTRACE */ + + if (bcmerror != BCME_OK) + return (bcmerror); + +#if defined(WL_WIRELESS_EXT) + if (event->bsscfgidx == 0) { + /* + * Wireless ext is on primary interface only + */ + + ASSERT(dhd->iflist[*ifidx] != NULL); + ASSERT(dhd->iflist[*ifidx]->net != NULL); + + if (dhd->iflist[*ifidx]->net) { + wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); + } + } +#endif /* defined(WL_WIRELESS_EXT) */ + +#ifdef WL_CFG80211 + ASSERT(dhd->iflist[*ifidx] != NULL); + ASSERT(dhd->iflist[*ifidx]->net != NULL); + if (dhd->iflist[*ifidx]->net) + wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data); +#endif /* defined(WL_CFG80211) */ + + return (bcmerror); +} + +/* send up locally generated event */ +void +dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) +{ + switch (ntoh32(event->event_type)) { + + default: + break; + } +} + +#ifdef LOG_INTO_TCPDUMP +void +dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len) +{ + struct sk_buff *p, *skb; + uint32 pktlen; + int len; + dhd_if_t *ifp; + dhd_info_t *dhd; + uchar *skb_data; + int ifidx = 0; + struct ether_header eth; + + pktlen = sizeof(eth) + data_len; + dhd = dhdp->info; + + if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { + ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); + + bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN); + bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN); + ETHER_TOGGLE_LOCALADDR(ð.ether_shost); + eth.ether_type = hton16(ETHER_TYPE_BRCM); + + bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth)); + bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len); + skb = PKTTONATIVE(dhdp->osh, p); + skb_data = skb->data; + len = skb->len; + + ifidx = dhd_ifname2idx(dhd, "wlan0"); + ifp = dhd->iflist[ifidx]; + if (ifp == NULL) + ifp = dhd->iflist[0]; + + ASSERT(ifp); + skb->dev = ifp->net; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->data = skb_data; + skb->len = len; + + /* Strip header, count, deliver upward */ + skb_pull(skb, ETH_HLEN); + + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, + __FUNCTION__, __LINE__); + /* Send the packet */ + if (in_interrupt()) { + netif_rx(skb); + } else { + netif_rx_ni(skb); + } + } + else { + /* Could not allocate a sk_buf */ + DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__)); + } +} +#endif /* LOG_INTO_TCPDUMP */ + +void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) +{ +#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT); +#else + int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ + + dhd_os_sdunlock(dhd); + wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); + dhd_os_sdlock(dhd); +#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + return; +} + +void dhd_wait_event_wakeup(dhd_pub_t *dhd) +{ +#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct dhd_info *dhdinfo = dhd->info; + if (waitqueue_active(&dhdinfo->ctrl_wait)) + wake_up(&dhdinfo->ctrl_wait); +#endif + return; +} + +#if defined(BCMSDIO) || defined(BCMPCIE) +int +dhd_net_bus_devreset(struct net_device *dev, uint8 flag) +{ + int ret; + + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (flag == TRUE) { + /* Issue wl down command before resetting the chip */ + if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); + } +#ifdef PROP_TXSTATUS + if (dhd->pub.wlfc_enabled) + dhd_wlfc_deinit(&dhd->pub); +#endif /* PROP_TXSTATUS */ +#ifdef PNO_SUPPORT + if (dhd->pub.pno_state) + dhd_pno_deinit(&dhd->pub); +#endif + } + +#ifdef BCMSDIO + if (!flag) { + dhd_update_fw_nv_path(dhd); + /* update firmware and nvram path to sdio bus */ + dhd_bus_update_fw_nv_path(dhd->pub.bus, + dhd->fw_path, dhd->nv_path); + } +#endif /* BCMSDIO */ + + ret = dhd_bus_devreset(&dhd->pub, flag); + if (ret) { + DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); + return ret; + } + + return ret; +} + +#ifdef BCMSDIO +int +dhd_net_bus_suspend(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_suspend(&dhd->pub); +} + +int +dhd_net_bus_resume(struct net_device *dev, uint8 stage) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return dhd_bus_resume(&dhd->pub, stage); +} + +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE */ + +int net_os_set_suspend_disable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + ret = dhd->pub.suspend_disable_flag; + dhd->pub.suspend_disable_flag = val; + } + return ret; +} + +int net_os_set_suspend(struct net_device *dev, int val, int force) +{ + int ret = 0; + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) { +#ifdef CONFIG_MACH_UNIVERSAL7420 +#endif /* CONFIG_MACH_UNIVERSAL7420 */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) + ret = dhd_set_suspend(val, &dhd->pub); +#else + ret = dhd_suspend_resume_helper(dhd, val, force); +#endif +#ifdef WL_CFG80211 + wl_cfg80211_update_power_mode(dev); +#endif + } + return ret; +} + +int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd) + dhd->pub.suspend_bcn_li_dtim = val; + + return 0; +} + +#ifdef PKT_FILTER_SUPPORT +int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) +{ +#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER + return 0; +#else + dhd_info_t *dhd = DHD_DEV_INFO(dev); + char *filterp = NULL; + int filter_id = 0; + int ret = 0; + + DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num)); + if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) + return ret; + if (num >= dhd->pub.pktfilter_count) + return -EINVAL; + switch (num) { + case DHD_BROADCAST_FILTER_NUM: + filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + filter_id = 101; + break; + case DHD_MULTICAST4_FILTER_NUM: + filterp = "102 0 0 0 0xFFFFFF 0x01005E"; + filter_id = 102; + break; + case DHD_MULTICAST6_FILTER_NUM: + filterp = "103 0 0 0 0xFFFF 0x3333"; + filter_id = 103; + break; + case DHD_MDNS_FILTER_NUM: + filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + filter_id = 104; + break; + default: + return -EINVAL; + } + + /* Add filter */ + if (add_remove) { + dhd->pub.pktfilter[num] = filterp; + dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]); + } else { /* Delete filter */ + if (dhd->pub.pktfilter[num] != NULL) { + dhd_pktfilter_offload_delete(&dhd->pub, filter_id); + dhd->pub.pktfilter[num] = NULL; + } + } + return ret; +#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */ +} + +int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val) + +{ + int ret = 0; + + /* Packet filtering is set only if we still in early-suspend and + * we need either to turn it ON or turn it OFF + * We can always turn it OFF in case of early-suspend, but we turn it + * back ON only if suspend_disable_flag was not set + */ + if (dhdp && dhdp->up) { + if (dhdp->in_suspend) { + if (!val || (val && !dhdp->suspend_disable_flag)) + dhd_enable_packet_filter(val, dhdp); + } + } + return ret; +} + +/* function to enable/disable packet for Network device */ +int net_os_enable_packet_filter(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val)); + return dhd_os_enable_packet_filter(&dhd->pub, val); +} +#endif /* PKT_FILTER_SUPPORT */ + +int +dhd_dev_init_ioctl(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret; + + if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) + goto done; + +done: + return ret; +} + +int +dhd_dev_get_feature_set(struct net_device *dev) +{ + dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev); + dhd_pub_t *dhd = (&ptr->pub); + int feature_set = 0; + +#ifdef DYNAMIC_SWOOB_DURATION +#ifndef CUSTOM_INTR_WIDTH +#define CUSTOM_INTR_WIDTH 100 + int intr_width = 0; +#endif /* CUSTOM_INTR_WIDTH */ +#endif /* DYNAMIC_SWOOB_DURATION */ + if (!dhd) + return feature_set; + + if (FW_SUPPORTED(dhd, sta)) + feature_set |= WIFI_FEATURE_INFRA; + if (FW_SUPPORTED(dhd, dualband)) + feature_set |= WIFI_FEATURE_INFRA_5G; + if (FW_SUPPORTED(dhd, p2p)) + feature_set |= WIFI_FEATURE_P2P; + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) + feature_set |= WIFI_FEATURE_SOFT_AP; + if (FW_SUPPORTED(dhd, tdls)) + feature_set |= WIFI_FEATURE_TDLS; + if (FW_SUPPORTED(dhd, vsdb)) + feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL; + if (FW_SUPPORTED(dhd, nan)) { + feature_set |= WIFI_FEATURE_NAN; + /* NAN is essentail for d2d rtt */ + if (FW_SUPPORTED(dhd, rttd2d)) + feature_set |= WIFI_FEATURE_D2D_RTT; + } +#ifdef RTT_SUPPORT + feature_set |= WIFI_FEATURE_D2AP_RTT; +#endif /* RTT_SUPPORT */ +#ifdef LINKSTAT_SUPPORT + feature_set |= WIFI_FEATURE_LINKSTAT; +#endif /* LINKSTAT_SUPPORT */ + /* Supports STA + STA always */ + feature_set |= WIFI_FEATURE_ADDITIONAL_STA; +#ifdef PNO_SUPPORT + if (dhd_is_pno_supported(dhd)) { + feature_set |= WIFI_FEATURE_PNO; + feature_set |= WIFI_FEATURE_BATCH_SCAN; +#ifdef GSCAN_SUPPORT + feature_set |= WIFI_FEATURE_GSCAN; +#endif /* GSCAN_SUPPORT */ + } +#endif /* PNO_SUPPORT */ +#ifdef WL11U + feature_set |= WIFI_FEATURE_HOTSPOT; +#endif /* WL11U */ + return feature_set; +} + + +int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num) +{ + int feature_set_full, mem_needed; + int *ret; + + *num = 0; + mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS; + ret = (int *) kmalloc(mem_needed, GFP_KERNEL); + if (!ret) { + DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__, + mem_needed)); + return ret; + } + + feature_set_full = dhd_dev_get_feature_set(dev); + + ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_PNO) | + (feature_set_full & WIFI_FEATURE_BATCH_SCAN) | + (feature_set_full & WIFI_FEATURE_GSCAN) | + (feature_set_full & WIFI_FEATURE_HOTSPOT) | + (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) | + (feature_set_full & WIFI_FEATURE_EPR); + + ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + /* Not yet verified NAN with P2P */ + /* (feature_set_full & WIFI_FEATURE_NAN) | */ + (feature_set_full & WIFI_FEATURE_P2P) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_EPR); + + ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) | + (feature_set_full & WIFI_FEATURE_INFRA_5G) | + (feature_set_full & WIFI_FEATURE_NAN) | + (feature_set_full & WIFI_FEATURE_D2D_RTT) | + (feature_set_full & WIFI_FEATURE_D2AP_RTT) | + (feature_set_full & WIFI_FEATURE_TDLS) | + (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) | + (feature_set_full & WIFI_FEATURE_EPR); + *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS; + + return ret; +} +#ifdef CUSTOM_FORCE_NODFS_FLAG +int +dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (nodfs) + dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG; + else + dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG; + dhd->pub.force_country_change = TRUE; + return 0; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef PNO_SUPPORT +/* Linux wrapper to call common dhd_pno_stop_for_ssid */ +int +dhd_dev_pno_stop_for_ssid(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_stop_for_ssid(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_set_for_ssid */ +int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr, + pno_repeat, pno_freq_expo_max, channel_list, nchan)); +} + +/* Linux wrapper to call common dhd_pno_enable */ +int +dhd_dev_pno_enable(struct net_device *dev, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + return (dhd_pno_enable(&dhd->pub, enable)); +} + +/* Linux wrapper to call common dhd_pno_set_for_hotlist */ +int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params)); +} +/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */ +int +dhd_dev_pno_stop_for_batch(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_stop_for_batch(&dhd->pub)); +} +/* Linux wrapper to call common dhd_dev_pno_set_for_batch */ +int +dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_for_batch(&dhd->pub, batch_params)); +} +/* Linux wrapper to call common dhd_dev_pno_get_for_batch */ +int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL)); +} +/* Linux wrapper to call common dhd_pno_set_mac_oui */ +int +dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return (dhd_pno_set_mac_oui(&dhd->pub, oui)); +} +#endif /* PNO_SUPPORT */ + +#if defined(PNO_SUPPORT) +#ifdef GSCAN_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush)); +} + +/* Linux wrapper to call common dhd_pno_get_gscan */ +void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_get_gscan(&dhd->pub, type, info, len)); +} + +/* Linux wrapper to call common dhd_wait_batch_results_complete */ +void +dhd_dev_wait_batch_results_complete(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_wait_batch_results_complete(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_lock_batch_results */ +void +dhd_dev_pno_lock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_lock_batch_results(&dhd->pub)); +} +/* Linux wrapper to call common dhd_pno_unlock_batch_results */ +void +dhd_dev_pno_unlock_access_batch_results(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_unlock_batch_results(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_pno_initiate_gscan_request */ +int +dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush)); +} + +/* Linux wrapper to call common dhd_pno_enable_full_scan_result */ +int +dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag)); +} + +/* Linux wrapper to call common dhd_handle_swc_evt */ +void * +dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes)); +} + +/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */ +void * +dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type)); +} + +/* Linux wrapper to call common dhd_process_full_gscan_result */ +void * +dhd_dev_process_full_gscan_result(struct net_device *dev, +const void *data, int *send_evt_bytes) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes)); +} + +void +dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type); + + return; +} + +int +dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_gscan_batch_cache_cleanup(&dhd->pub)); +} + +/* Linux wrapper to call common dhd_retreive_batch_scan_results */ +int +dhd_dev_retrieve_batch_scan(struct net_device *dev) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_retreive_batch_scan_results(&dhd->pub)); +} +#endif /* GSCAN_SUPPORT */ +#endif +#ifdef RTT_SUPPORT +/* Linux wrapper to call common dhd_pno_set_cfg_gscan */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_set_cfg(&dhd->pub, buf)); +} +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt)); +} +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn)); +} +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn)); +} + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa) +{ + dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + + return (dhd_rtt_capability(&dhd->pub, capa)); +} + +#endif /* RTT_SUPPORT */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +static void dhd_hang_process(void *dhd_info, void *event_info, u8 event) +{ + dhd_info_t *dhd; + struct net_device *dev; + + dhd = (dhd_info_t *)dhd_info; + dev = dhd->iflist[0]->net; + + if (dev) { +#if defined(WL_WIRELESS_EXT) + wl_iw_send_priv_event(dev, "HANG"); +#endif +#if defined(WL_CFG80211) + wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif + } +} + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +extern dhd_pub_t *link_recovery; +void dhd_host_recover_link(void) +{ + DHD_ERROR(("****** %s ******\n", __FUNCTION__)); + link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_bus_set_linkdown(link_recovery, TRUE); + dhd_os_send_hang_message(link_recovery); +} +EXPORT_SYMBOL(dhd_host_recover_link); +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + +int dhd_os_send_hang_message(dhd_pub_t *dhdp) +{ + int ret = 0; + if (dhdp) { + if (!dhdp->hang_was_sent) { + dhdp->hang_was_sent = 1; + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp, + DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH); + } + } + return ret; +} + +int net_os_send_hang_message(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) { + /* Report FW problem when enabled */ + if (dhd->pub.hang_report) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + ret = dhd_os_send_hang_message(&dhd->pub); +#else + ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); +#endif + } else { + DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n", + __FUNCTION__)); + /* Enforce bus down to stop any future traffic */ + dhd->pub.busstate = DHD_BUS_DOWN; + } + } + return ret; +} + +int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num) +{ + dhd_info_t *dhd = NULL; + dhd_pub_t *dhdp = NULL; + int reason; + + dhd = DHD_DEV_INFO(dev); + if (dhd) { + dhdp = &dhd->pub; + } + + if (!dhd || !dhdp) { + return 0; + } + + reason = bcm_strtoul(string_num, NULL, 0); + DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason)); + + if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) { + reason = 0; + } + + dhdp->hang_reason = reason; + + return net_os_send_hang_message(dev); +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ + + +int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + return wifi_platform_set_power(dhd->adapter, on, delay_msec); +} + +bool dhd_force_country_change(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (dhd && dhd->pub.up) + return dhd->pub.force_country_change; + return FALSE; +} +void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); +#ifdef CUSTOM_COUNTRY_CODE + get_customized_country_code(dhd->adapter, country_iso_code, cspec, + dhd->pub.dhd_cflags); +#else + get_customized_country_code(dhd->adapter, country_iso_code, cspec); +#endif /* CUSTOM_COUNTRY_CODE */ +} +void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (dhd && dhd->pub.up) { + memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); +#ifdef WL_CFG80211 + wl_update_wiphybands(NULL, notify); +#endif + } +} + +void dhd_bus_band_set(struct net_device *dev, uint band) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + if (dhd && dhd->pub.up) { +#ifdef WL_CFG80211 + wl_update_wiphybands(NULL, true); +#endif + } +} + +int dhd_net_set_fw_path(struct net_device *dev, char *fw) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + + if (!fw || fw[0] == '\0') + return -EINVAL; + + strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1); + dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0'; + +#if defined(SOFTAP) + if (strstr(fw, "apsta") != NULL) { + DHD_INFO(("GOT APSTA FIRMWARE\n")); + ap_fw_loaded = TRUE; + } else { + DHD_INFO(("GOT STA FIRMWARE\n")); + ap_fw_loaded = FALSE; + } +#endif + return 0; +} + +void dhd_net_if_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_lock_local(dhd); +} + +void dhd_net_if_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + dhd_net_if_unlock_local(dhd); +} + +static void dhd_net_if_lock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_lock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_net_if_unlock_local(dhd_info_t *dhd) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + if (dhd) + mutex_unlock(&dhd->dhd_net_if_mutex); +#endif +} + +static void dhd_suspend_lock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_lock(&dhd->dhd_suspend_mutex); +#endif +} + +static void dhd_suspend_unlock(dhd_pub_t *pub) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + if (dhd) + mutex_unlock(&dhd->dhd_suspend_mutex); +#endif +} + +unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags = 0; + + if (dhd) + spin_lock_irqsave(&dhd->dhd_lock, flags); + + return flags; +} + +void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) + spin_unlock_irqrestore(&dhd->dhd_lock, flags); +} + +/* Linux specific multipurpose spinlock API */ +void * +dhd_os_spin_lock_init(osl_t *osh) +{ + /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */ + /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */ + /* and this results in kernel asserts in internal builds */ + spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4); + if (lock) + spin_lock_init(lock); + return ((void *)lock); +} +void +dhd_os_spin_lock_deinit(osl_t *osh, void *lock) +{ + if (lock) + MFREE(osh, lock, sizeof(spinlock_t) + 4); +} +unsigned long +dhd_os_spin_lock(void *lock) +{ + unsigned long flags = 0; + + if (lock) + spin_lock_irqsave((spinlock_t *)lock, flags); + + return flags; +} +void +dhd_os_spin_unlock(void *lock, unsigned long flags) +{ + if (lock) + spin_unlock_irqrestore((spinlock_t *)lock, flags); +} + +static int +dhd_get_pend_8021x_cnt(dhd_info_t *dhd) +{ + return (atomic_read(&dhd->pend_8021x_cnt)); +} + +#define MAX_WAIT_FOR_8021X_TX 100 + +int +dhd_wait_pend8021x(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int timeout = msecs_to_jiffies(10); + int ntimes = MAX_WAIT_FOR_8021X_TX; + int pend = dhd_get_pend_8021x_cnt(dhd); + + while (ntimes && pend) { + if (pend) { + set_current_state(TASK_INTERRUPTIBLE); + DHD_PERIM_UNLOCK(&dhd->pub); + schedule_timeout(timeout); + DHD_PERIM_LOCK(&dhd->pub); + set_current_state(TASK_RUNNING); + ntimes--; + } + pend = dhd_get_pend_8021x_cnt(dhd); + } + if (ntimes == 0) + { + atomic_set(&dhd->pend_8021x_cnt, 0); + DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__)); + } + return pend; +} + +#ifdef DHD_DEBUG +static void +dhd_convert_memdump_type_to_str(uint32 type, char *buf) +{ + char *type_str = NULL; + + switch (type) { + case DUMP_TYPE_RESUMED_ON_TIMEOUT: + type_str = "resumed_on_timeout"; + break; + case DUMP_TYPE_D3_ACK_TIMEOUT: + type_str = "D3_ACK_timeout"; + break; + case DUMP_TYPE_DONGLE_TRAP: + type_str = "Dongle_Trap"; + break; + case DUMP_TYPE_MEMORY_CORRUPTION: + type_str = "Memory_Corruption"; + break; + case DUMP_TYPE_PKTID_AUDIT_FAILURE: + type_str = "PKTID_AUDIT_Fail"; + break; + case DUMP_TYPE_SCAN_TIMEOUT: + type_str = "SCAN_timeout"; + break; + case DUMP_TYPE_SCAN_BUSY: + type_str = "SCAN_Busy"; + break; + case DUMP_TYPE_BY_SYSDUMP: + type_str = "BY_SYSDUMP"; + break; + case DUMP_TYPE_BY_LIVELOCK: + type_str = "BY_LIVELOCK"; + break; + case DUMP_TYPE_AP_LINKUP_FAILURE: + type_str = "BY_AP_LINK_FAILURE"; + break; + default: + type_str = "Unknown_type"; + break; + } + + strncpy(buf, type_str, strlen(type_str)); + buf[strlen(type_str)] = 0; +} + +int +write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + char memdump_path[128]; + char memdump_type[32]; + struct timeval curtime; + uint32 file_mode; + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Init file name */ + memset(memdump_path, 0, sizeof(memdump_path)); + memset(memdump_type, 0, sizeof(memdump_type)); + do_gettimeofday(&curtime); + dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type); +#ifdef CUSTOMER_HW4_DEBUG + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + DHD_COMMON_DUMP_PATH "mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#elif defined(CUSTOMER_HW2) + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + "/data/misc/wifi/mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; +#else + snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld", + "/installmedia/mem_dump", memdump_type, + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are + * calling BUG_ON immediately after collecting the socram dump. + * So the file write operation should directly write the contents into the + * file instead of caching it. O_TRUNC flag ensures that file will be re-written + * instead of appending. + */ + file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC; +#endif /* CUSTOMER_HW4_DEBUG */ + + /* print SOCRAM dump file path */ + DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path)); + + /* open file to write */ + fp = filp_open(memdump_path, file_mode, 0644); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + printf("%s: open file error, err = %d\n", __FUNCTION__, ret); + goto exit; + } + + /* Write buf to file */ + fp->f_op->write(fp, buf, size, &pos); + +exit: + /* close file before return */ + if (!ret) + filp_close(fp, current->files); + + /* restore previous address limit */ + set_fs(old_fs); + + /* free buf before return */ +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, buf, size); +#else + MFREE(dhd->osh, buf, size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + return ret; +} +#endif /* DHD_DEBUG */ + +int dhd_os_wake_lock_timeout(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? + dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; +#ifdef CONFIG_HAS_WAKELOCK + if (dhd->wakelock_rx_timeout_enable) + wake_lock_timeout(&dhd->wl_rxwake, + msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); + if (dhd->wakelock_ctrl_timeout_enable) + wake_lock_timeout(&dhd->wl_ctrlwake, + msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); +#endif + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int net_os_wake_lock_timeout(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_timeout(&dhd->pub); + return ret; +} + +int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_rx_timeout_enable) + dhd->wakelock_rx_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (val > dhd->wakelock_ctrl_timeout_enable) + dhd->wakelock_ctrl_timeout_enable = val; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + if (wake_lock_active(&dhd->wl_ctrlwake)) + wake_unlock(&dhd->wl_ctrlwake); +#endif + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return 0; +} + +int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); + return ret; +} + +int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); + return ret; +} + + +#if defined(DHD_TRACE_WAKE_LOCK) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#include +#else +#include +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +/* Define 2^5 = 32 bucket size hash table */ +DEFINE_HASHTABLE(wklock_history, 5); +#else +/* Define 2^5 = 32 bucket size hash table */ +struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT }; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + +int trace_wklock_onoff = 1; + +typedef enum dhd_wklock_type { + DHD_WAKE_LOCK, + DHD_WAKE_UNLOCK, + DHD_WAIVE_LOCK, + DHD_RESTORE_LOCK +} dhd_wklock_t; + +struct wk_trace_record { + unsigned long addr; /* Address of the instruction */ + dhd_wklock_t lock_type; /* lock_type */ + unsigned long long counter; /* counter information */ + struct hlist_node wklock_node; /* hash node */ +}; + + +static struct wk_trace_record *find_wklock_entry(unsigned long addr) +{ + struct wk_trace_record *wklock_info; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr) +#else + struct hlist_node *entry; + int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history))); + hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + if (wklock_info->addr == addr) { + return wklock_info; + } + } + return NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) +#define HASH_ADD(hashtable, node, key) \ + do { \ + hash_add(hashtable, node, key); \ + } while (0); +#else +#define HASH_ADD(hashtable, node, key) \ + do { \ + int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \ + hlist_add_head(node, &hashtable[index]); \ + } while (0); +#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */ + +#define STORE_WKLOCK_RECORD(wklock_type) \ + do { \ + struct wk_trace_record *wklock_info = NULL; \ + unsigned long func_addr = (unsigned long)__builtin_return_address(0); \ + wklock_info = find_wklock_entry(func_addr); \ + if (wklock_info) { \ + if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + } else { \ + wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \ + if (!wklock_info) {\ + printk("Can't allocate wk_trace_record \n"); \ + } else { \ + wklock_info->addr = func_addr; \ + wklock_info->lock_type = wklock_type; \ + if (wklock_type == DHD_WAIVE_LOCK || \ + wklock_type == DHD_RESTORE_LOCK) { \ + wklock_info->counter = dhd->wakelock_counter; \ + } else { \ + wklock_info->counter++; \ + } \ + HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \ + } \ + } \ + } while (0); + +static inline void dhd_wk_lock_rec_dump(void) +{ + int bkt; + struct wk_trace_record *wklock_info; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each(wklock_history, bkt, wklock_info, wklock_node) +#else + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + { + switch (wklock_info->lock_type) { + case DHD_WAKE_LOCK: + DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAKE_UNLOCK: + DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_WAIVE_LOCK: + DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + case DHD_RESTORE_LOCK: + DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n", + (void *)wklock_info->addr, wklock_info->counter)); + break; + } + } +} + +static void dhd_wk_lock_trace_init(struct dhd_info *dhd) +{ + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + int i; +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_init(wklock_history); +#else + for (i = 0; i < ARRAY_SIZE(wklock_history); i++) + INIT_HLIST_HEAD(&wklock_history[i]); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd) +{ + int bkt; + struct wk_trace_record *wklock_info; + struct hlist_node *tmp; + unsigned long flags; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + struct hlist_node *entry = NULL; + int max_index = ARRAY_SIZE(wklock_history); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */ + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node) +#else + for (bkt = 0; bkt < max_index; bkt++) + hlist_for_each_entry_safe(wklock_info, entry, tmp, + &wklock_history[bkt], wklock_node) +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + hash_del(&wklock_info->wklock_node); +#else + hlist_del_init(&wklock_info->wklock_node); +#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */ + kfree(wklock_info); + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); +} + +void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + unsigned long flags; + + DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n")); + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + dhd_wk_lock_rec_dump(); + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter)); +} +#else +#define STORE_WKLOCK_RECORD(wklock_type) +#endif /* ! DHD_TRACE_WAKE_LOCK */ + +int dhd_os_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd->wakelock_counter++; + ret = dhd->wakelock_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + + return ret; +} + +int dhd_event_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); + if (dhd->wakelock_event_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(pub); +#endif + } + dhd->wakelock_event_counter++; + ret = dhd->wakelock_event_counter; + spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); + } + + return ret; +} + +int net_os_wake_lock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_lock(&dhd->pub); + return ret; +} + +int dhd_os_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + dhd_os_wake_lock_timeout(pub); + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + if (dhd->wakelock_counter > 0) { + dhd->wakelock_counter--; +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif + } + ret = dhd->wakelock_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_event_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags); + + if (dhd->wakelock_event_counter > 0) { + dhd->wakelock_event_counter--; + if (dhd->wakelock_event_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_evtwake); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(pub); +#endif + } + ret = dhd->wakelock_event_counter; + } + spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags); + } + return ret; +} + +int dhd_os_check_wakelock(dhd_pub_t *pub) +{ +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) + dhd_info_t *dhd; + + if (!pub) + return 0; + dhd = (dhd_info_t *)(pub->info); +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + /* Indicate to the SD Host to avoid going to suspend if internal locks are up */ + if (dhd && (wake_lock_active(&dhd->wl_wifi) || + (wake_lock_active(&dhd->wl_wdwake)))) + return 1; +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) + return 1; +#endif + return 0; +} + +int +dhd_os_check_wakelock_all(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + int l1, l2, l3, l4, l7; + int l5 = 0, l6 = 0; + int c, lock_active; +#endif /* CONFIG_HAS_WAKELOCK */ +#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \ + KERNEL_VERSION(2, 6, 36))) + dhd_info_t *dhd; + + if (!pub) { + return 0; + } + dhd = (dhd_info_t *)(pub->info); + if (!dhd) { + return 0; + } +#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */ + +#ifdef CONFIG_HAS_WAKELOCK + c = dhd->wakelock_counter; + l1 = wake_lock_active(&dhd->wl_wifi); + l2 = wake_lock_active(&dhd->wl_wdwake); + l3 = wake_lock_active(&dhd->wl_rxwake); + l4 = wake_lock_active(&dhd->wl_ctrlwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + l5 = wake_lock_active(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + l6 = wake_lock_active(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ + l7 = wake_lock_active(&dhd->wl_evtwake); + lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7); + + /* Indicate to the Host to avoid going to suspend if internal locks are up */ + if (dhd && lock_active) { + DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d " + "ctl-%d intr-%d scan-%d evt-%d\n", + __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7)); + return 1; + } +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) { + return 1; + } +#endif /* CONFIG_HAS_WAKELOCK */ + return 0; +} + +int net_os_wake_unlock(struct net_device *dev) +{ + dhd_info_t *dhd = DHD_DEV_INFO(dev); + int ret = 0; + + if (dhd) + ret = dhd_os_wake_unlock(&dhd->pub); + return ret; +} + +int dhd_os_wd_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#ifdef CONFIG_HAS_WAKELOCK + /* if wakelock_wd_counter was never used : lock it at once */ + if (!dhd->wakelock_wd_counter) + wake_lock(&dhd->wl_wdwake); +#endif + dhd->wakelock_wd_counter++; + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wd_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter) { + dhd->wakelock_wd_counter = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wdwake); +#endif + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +void +dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_intrwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_intrwake)) { + wake_unlock(&dhd->wl_intrwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_USE_SCAN_WAKELOCK +void +dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val)); + } +#endif /* CONFIG_HAS_WAKELOCK */ +} + +void +dhd_os_scan_wake_unlock(dhd_pub_t *pub) +{ +#ifdef CONFIG_HAS_WAKELOCK + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + + if (dhd) { + /* if wl_scanwake is active, unlock it */ + if (wake_lock_active(&dhd->wl_scanwake)) { + wake_unlock(&dhd->wl_scanwake); + } + } +#endif /* CONFIG_HAS_WAKELOCK */ +} +#endif /* DHD_USE_SCAN_WAKELOCK */ + +/* waive wakelocks for operations such as IOVARs in suspend function, must be closed + * by a paired function call to dhd_wakelock_restore. returns current wakelock counter + */ +int dhd_os_wake_lock_waive(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (dhd->waive_wakelock == FALSE) { +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + /* record current lock status */ + dhd->wakelock_before_waive = dhd->wakelock_counter; + dhd->waive_wakelock = TRUE; + } + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wake_lock_restore(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (!dhd) + return 0; + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + + /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */ + if (!dhd->waive_wakelock) + goto exit; + + dhd->waive_wakelock = FALSE; + /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore, + * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases + * the lock in between, do the same by calling wake_unlock or pm_relax + */ +#ifdef DHD_TRACE_WAKE_LOCK + if (trace_wklock_onoff) { + STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK); + } +#endif /* DHD_TRACE_WAKE_LOCK */ + + if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_lock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_stay_awake(&dhd->pub); +#endif + } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) { +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wifi); +#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) + dhd_bus_dev_pm_relax(&dhd->pub); +#endif + } + dhd->wakelock_before_waive = 0; +exit: + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + return ret; +} + +void dhd_os_wake_lock_init(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__)); + dhd->wakelock_event_counter = 0; + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); + wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); + wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake"); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake"); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake"); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_init(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +} + +void dhd_os_wake_lock_destroy(struct dhd_info *dhd) +{ + DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__)); +#ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_event_counter = 0; + dhd->wakelock_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; + wake_lock_destroy(&dhd->wl_wifi); + wake_lock_destroy(&dhd->wl_rxwake); + wake_lock_destroy(&dhd->wl_ctrlwake); + wake_lock_destroy(&dhd->wl_evtwake); +#ifdef BCMPCIE_OOB_HOST_WAKE + wake_lock_destroy(&dhd->wl_intrwake); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef DHD_USE_SCAN_WAKELOCK + wake_lock_destroy(&dhd->wl_scanwake); +#endif /* DHD_USE_SCAN_WAKELOCK */ +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_trace_deinit(dhd); +#endif /* DHD_TRACE_WAKE_LOCK */ +#endif /* CONFIG_HAS_WAKELOCK */ +} + +bool dhd_os_check_if_up(dhd_pub_t *pub) +{ + if (!pub) + return FALSE; + return pub->up; +} + +#if defined(BCMSDIO) +/* function to collect firmware, chip id and chip version info */ +void dhd_set_version_info(dhd_pub_t *dhdp, char *fw) +{ + int i; + + i = snprintf(info_string, sizeof(info_string), + " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw); + + if (!dhdp) + return; + + i = snprintf(&info_string[i], sizeof(info_string) - i, + "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp), + dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp)); +} +#endif /* defined(BCMSDIO) */ +int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) +{ + int ifidx; + int ret = 0; + dhd_info_t *dhd = NULL; + + if (!net || !DEV_PRIV(net)) { + DHD_ERROR(("%s invalid parameter\n", __FUNCTION__)); + return -EINVAL; + } + + dhd = DHD_DEV_INFO(net); + if (!dhd) + return -EINVAL; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + DHD_OS_WAKE_LOCK(&dhd->pub); + DHD_PERIM_LOCK(&dhd->pub); + + ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); + dhd_check_hang(net, &dhd->pub, ret); + + DHD_PERIM_UNLOCK(&dhd->pub); + DHD_OS_WAKE_UNLOCK(&dhd->pub); + + return ret; +} + +bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) +{ + struct net_device *net; + + net = dhd_idx2net(dhdp, ifidx); + if (!net) { + DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + return dhd_check_hang(net, dhdp, ret); +} + +/* Return instance */ +int dhd_get_instance(dhd_pub_t *dhdp) +{ + return dhdp->info->unit; +} + + +#ifdef PROP_TXSTATUS + +void dhd_wlfc_plat_init(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +void dhd_wlfc_plat_deinit(void *dhd) +{ +#ifdef USE_DYNAMIC_F2_BLKSIZE + dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize); +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + return; +} + +bool dhd_wlfc_skip_fc(void) +{ +#ifdef SKIP_WLFC_ON_CONCURRENT +#ifdef WL_CFG80211 + + /* enable flow control in vsdb mode */ + return !(wl_cfg80211_is_concurrent_mode()); +#else + return TRUE; /* skip flow control */ +#endif /* WL_CFG80211 */ + +#else + return FALSE; +#endif /* SKIP_WLFC_ON_CONCURRENT */ +} +#endif /* PROP_TXSTATUS */ + +#ifdef BCMDBGFS +#include + +typedef struct dhd_dbgfs { + struct dentry *debugfs_dir; + struct dentry *debugfs_mem; + dhd_pub_t *dhdp; + uint32 size; +} dhd_dbgfs_t; + +dhd_dbgfs_t g_dbgfs; + +extern uint32 dhd_readregl(void *bp, uint32 addr); +extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); + +static int +dhd_dbg_state_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +dhd_dbg_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + ssize_t rval; + uint32 tmp; + loff_t pos = *ppos; + size_t ret; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ + tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); + + ret = copy_to_user(ubuf, &tmp, 4); + if (ret == count) + return -EFAULT; + + count -= ret; + *ppos = pos + count; + rval = count; + + return rval; +} + + +static ssize_t +dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) +{ + loff_t pos = *ppos; + size_t ret; + uint32 buf; + + if (pos < 0) + return -EINVAL; + if (pos >= g_dbgfs.size || !count) + return 0; + if (count > g_dbgfs.size - pos) + count = g_dbgfs.size - pos; + + ret = copy_from_user(&buf, ubuf, sizeof(uint32)); + if (ret == count) + return -EFAULT; + + /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ + dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); + + return count; +} + + +loff_t +dhd_debugfs_lseek(struct file *file, loff_t off, int whence) +{ + loff_t pos = -1; + + switch (whence) { + case 0: + pos = off; + break; + case 1: + pos = file->f_pos + off; + break; + case 2: + pos = g_dbgfs.size - off; + } + return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); +} + +static const struct file_operations dhd_dbg_state_ops = { + .read = dhd_dbg_state_read, + .write = dhd_debugfs_write, + .open = dhd_dbg_state_open, + .llseek = dhd_debugfs_lseek +}; + +static void dhd_dbg_create(void) +{ + if (g_dbgfs.debugfs_dir) { + g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, + NULL, &dhd_dbg_state_ops); + } +} + +void dhd_dbg_init(dhd_pub_t *dhdp) +{ + g_dbgfs.dhdp = dhdp; + g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ + + g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); + if (IS_ERR(g_dbgfs.debugfs_dir)) { + g_dbgfs.debugfs_dir = NULL; + return; + } + + dhd_dbg_create(); + + return; +} + +void dhd_dbg_remove(void) +{ + debugfs_remove(g_dbgfs.debugfs_mem); + debugfs_remove(g_dbgfs.debugfs_dir); + + bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); +} +#endif /* BCMDBGFS */ + +#ifdef WLMEDIA_HTSF + +static +void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); + struct sk_buff *skb; + uint32 htsf = 0; + uint16 dport = 0, oldmagic = 0xACAC; + char *p1; + htsfts_t ts; + + /* timestamp packet */ + + p1 = (char*) PKTDATA(dhdp->osh, pktbuf); + + if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) { +/* memcpy(&proto, p1+26, 4); */ + memcpy(&dport, p1+40, 2); +/* proto = ((ntoh32(proto))>> 16) & 0xFF; */ + dport = ntoh16(dport); + } + + /* timestamp only if icmp or udb iperf with port 5555 */ +/* if (proto == 17 && dport == tsport) { */ + if (dport >= tsport && dport <= tsport + 20) { + + skb = (struct sk_buff *) pktbuf; + + htsf = dhd_get_htsf(dhd, 0); + memset(skb->data + 44, 0, 2); /* clear checksum */ + memcpy(skb->data+82, &oldmagic, 2); + memcpy(skb->data+84, &htsf, 4); + + memset(&ts, 0, sizeof(htsfts_t)); + ts.magic = HTSFMAGIC; + ts.prio = PKTPRIO(pktbuf); + ts.seqnum = htsf_seqnum++; + ts.c10 = get_cycles(); + ts.t10 = htsf; + ts.endmagic = HTSFENDMAGIC; + + memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts)); + } +} + +static void dhd_dump_htsfhisto(histo_t *his, char *s) +{ + int pktcnt = 0, curval = 0, i; + for (i = 0; i < (NUMBIN-2); i++) { + curval += 500; + printf("%d ", his->bin[i]); + pktcnt += his->bin[i]; + } + printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt, + his->bin[NUMBIN-1], s); +} + +static +void sorttobin(int value, histo_t *histo) +{ + int i, binval = 0; + + if (value < 0) { + histo->bin[NUMBIN-1]++; + return; + } + if (value > histo->bin[NUMBIN-2]) /* store the max value */ + histo->bin[NUMBIN-2] = value; + + for (i = 0; i < (NUMBIN-2); i++) { + binval += 500; /* 500m s bins */ + if (value <= binval) { + histo->bin[i]++; + return; + } + } + histo->bin[NUMBIN-3]++; +} + +static +void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf) +{ + dhd_info_t *dhd = (dhd_info_t *)dhdp->info; + struct sk_buff *skb; + char *p1; + uint16 old_magic; + int d1, d2, d3, end2end; + htsfts_t *htsf_ts; + uint32 htsf; + + skb = PKTTONATIVE(dhdp->osh, pktbuf); + p1 = (char*)PKTDATA(dhdp->osh, pktbuf); + + if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) { + memcpy(&old_magic, p1+78, 2); + htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4); + } else { + return; + } + if (htsf_ts->magic == HTSFMAGIC) { + htsf_ts->tE0 = dhd_get_htsf(dhd, 0); + htsf_ts->cE0 = get_cycles(); + } + + if (old_magic == 0xACAC) { + + tspktcnt++; + htsf = dhd_get_htsf(dhd, 0); + memcpy(skb->data+92, &htsf, sizeof(uint32)); + + memcpy(&ts[tsidx].t1, skb->data+80, 16); + + d1 = ts[tsidx].t2 - ts[tsidx].t1; + d2 = ts[tsidx].t3 - ts[tsidx].t2; + d3 = ts[tsidx].t4 - ts[tsidx].t3; + end2end = ts[tsidx].t4 - ts[tsidx].t1; + + sorttobin(d1, &vi_d1); + sorttobin(d2, &vi_d2); + sorttobin(d3, &vi_d3); + sorttobin(end2end, &vi_d4); + + if (end2end > 0 && end2end > maxdelay) { + maxdelay = end2end; + maxdelaypktno = tspktcnt; + memcpy(&maxdelayts, &ts[tsidx], 16); + } + if (++tsidx >= TSMAX) + tsidx = 0; + } +} + +uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) +{ + uint32 htsf = 0, cur_cycle, delta, delta_us; + uint32 factor, baseval, baseval2; + cycles_t t; + + t = get_cycles(); + cur_cycle = t; + + if (cur_cycle > dhd->htsf.last_cycle) { + delta = cur_cycle - dhd->htsf.last_cycle; + } else { + delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle); + } + + delta = delta >> 4; + + if (dhd->htsf.coef) { + /* times ten to get the first digit */ + factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1); + baseval = (delta*10)/factor; + baseval2 = (delta*10)/(factor+1); + delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10); + htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY; + } else { + DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n")); + } + + return htsf; +} + +static void dhd_dump_latency(void) +{ + int i, max = 0; + int d1, d2, d3, d4, d5; + + printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n"); + for (i = 0; i < TSMAX; i++) { + d1 = ts[i].t2 - ts[i].t1; + d2 = ts[i].t3 - ts[i].t2; + d3 = ts[i].t4 - ts[i].t3; + d4 = ts[i].t4 - ts[i].t1; + d5 = ts[max].t4-ts[max].t1; + if (d4 > d5 && d4 > 0) { + max = i; + } + printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n", + ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4, + d1, d2, d3, d4, i); + } + + printf("current idx = %d \n", tsidx); + + printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt); + printf("%08X %08X %08X %08X \t%d %d %d %d\n", + maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4, + maxdelayts.t2 - maxdelayts.t1, + maxdelayts.t3 - maxdelayts.t2, + maxdelayts.t4 - maxdelayts.t3, + maxdelayts.t4 - maxdelayts.t1); +} + + +static int +dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx) +{ + wl_ioctl_t ioc; + char buf[32]; + int ret; + uint32 s1, s2; + + struct tsf { + uint32 low; + uint32 high; + } tsf_buf; + + memset(&ioc, 0, sizeof(ioc)); + memset(&tsf_buf, 0, sizeof(tsf_buf)); + + ioc.cmd = WLC_GET_VAR; + ioc.buf = buf; + ioc.len = (uint)sizeof(buf); + ioc.set = FALSE; + + strncpy(buf, "tsf", sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\0'; + s1 = dhd_get_htsf(dhd, 0); + if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { + if (ret == -EIO) { + DHD_ERROR(("%s: tsf is not supported by device\n", + dhd_ifname(&dhd->pub, ifidx))); + return -EOPNOTSUPP; + } + return ret; + } + s2 = dhd_get_htsf(dhd, 0); + + memcpy(&tsf_buf, buf, sizeof(tsf_buf)); + printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ", + tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1, + dhd->htsf.coefdec2, s2-tsf_buf.low); + printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle); + return 0; +} + +void htsf_update(dhd_info_t *dhd, void *data) +{ + static ulong cur_cycle = 0, prev_cycle = 0; + uint32 htsf, tsf_delta = 0; + uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp; + ulong b, a; + cycles_t t; + + /* cycles_t in inlcude/mips/timex.h */ + + t = get_cycles(); + + prev_cycle = cur_cycle; + cur_cycle = t; + + if (cur_cycle > prev_cycle) + cyc_delta = cur_cycle - prev_cycle; + else { + b = cur_cycle; + a = prev_cycle; + cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle); + } + + if (data == NULL) + printf(" tsf update ata point er is null \n"); + + memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t)); + memcpy(&cur_tsf, data, sizeof(tsf_t)); + + if (cur_tsf.low == 0) { + DHD_INFO((" ---- 0 TSF, do not update, return\n")); + return; + } + + if (cur_tsf.low > prev_tsf.low) + tsf_delta = (cur_tsf.low - prev_tsf.low); + else { + DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n", + cur_tsf.low, prev_tsf.low)); + if (cur_tsf.high > prev_tsf.high) { + tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low); + DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta)); + } else { + return; /* do not update */ + } + } + + if (tsf_delta) { + hfactor = cyc_delta / tsf_delta; + tmp = (cyc_delta - (hfactor * tsf_delta))*10; + dec1 = tmp/tsf_delta; + dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta; + tmp = (tmp - (dec1*tsf_delta))*10; + dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta; + + if (dec3 > 4) { + if (dec2 == 9) { + dec2 = 0; + if (dec1 == 9) { + dec1 = 0; + hfactor++; + } else { + dec1++; + } + } else { + dec2++; + } + } + } + + if (hfactor) { + htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low; + dhd->htsf.coef = hfactor; + dhd->htsf.last_cycle = cur_cycle; + dhd->htsf.last_tsf = cur_tsf.low; + dhd->htsf.coefdec1 = dec1; + dhd->htsf.coefdec2 = dec2; + } else { + htsf = prev_tsf.low; + } +} + +#endif /* WLMEDIA_HTSF */ + +#ifdef CUSTOM_SET_CPUCORE +void dhd_set_cpucore(dhd_pub_t *dhd, int set) +{ + int e_dpc = 0, e_rxf = 0, retry_set = 0; + + if (!(dhd->chan_isvht80)) { + DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80)); + return; + } + + if (DPC_CPUCORE) { + do { + if (set == TRUE) { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(DPC_CPUCORE)); + } else { + e_dpc = set_cpus_allowed_ptr(dhd->current_dpc, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc)); + return; + } + if (e_dpc < 0) + OSL_SLEEP(1); + } while (e_dpc < 0); + } + if (RXF_CPUCORE) { + do { + if (set == TRUE) { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(RXF_CPUCORE)); + } else { + e_rxf = set_cpus_allowed_ptr(dhd->current_rxf, + cpumask_of(PRIMARY_CPUCORE)); + } + if (retry_set++ > MAX_RETRY_SET_CPUCORE) { + DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf)); + return; + } + if (e_rxf < 0) + OSL_SLEEP(1); + } while (e_rxf < 0); + } +#ifdef DHD_OF_SUPPORT + interrupt_set_cpucore(set); +#endif /* DHD_OF_SUPPORT */ + DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set)); + + return; +} +#endif /* CUSTOM_SET_CPUCORE */ + +/* Get interface specific ap_isolate configuration */ +int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + return ifp->ap_isolate; +} + +/* Set interface specific ap_isolate configuration */ +int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ifp->ap_isolate = val; + + return 0; +} + +#ifdef DHD_FW_COREDUMP + + +#ifdef CUSTOMER_HW4_DEBUG +#ifdef PLATFORM_SLP +#define MEMDUMPINFO "/opt/etc/.memdump.info" +#else +#define MEMDUMPINFO "/data/.memdump.info" +#endif /* PLATFORM_SLP */ +#elif defined(CUSTOMER_HW2) +#define MEMDUMPINFO "/data/misc/wifi/.memdump.info" +#else +#define MEMDUMPINFO "/installmedia/.memdump.info" +#endif /* CUSTOMER_HW4_DEBUG */ + +void dhd_get_memdump_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + uint32 mem_val = DUMP_MEMFILE_MAX; + int ret = 0; + char *filepath = MEMDUMPINFO; + loff_t pos=0; + + /* Read memdump info from the file */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } else { + ret = kernel_read(fp, (char *)&mem_val, 4, &pos); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val)); + filp_close(fp, NULL); + } + +done: +#ifdef CUSTOMER_HW4_DEBUG + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED; +#else + dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON; +#endif /* CUSTOMER_HW4_DEBUG */ +} + + +void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size) +{ + dhd_dump_t *dump = NULL; + dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t)); + if (dump == NULL) { + DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__)); + return; + } + dump->buf = buf; + dump->bufsize = size; + +#if defined(CONFIG_ARM64) + DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__, + (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size)); +#elif defined(__ARM_ARCH_7A__) + DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__, + (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size)); +#endif /* __ARM_ARCH_7A__ */ + if (dhdp->memdump_enabled == DUMP_MEMONLY) { + BUG_ON(1); + } + +#ifdef DHD_LOG_DUMP + if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) { + dhd_schedule_log_dump(dhdp); + } +#endif /* DHD_LOG_DUMP */ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump, + DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH); +} +static void +dhd_mem_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + dhd_dump_t *dump = event_info; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (!dump) { + DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__)); + return; + } + + if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) { + DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__)); + } + + if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON && +#ifdef DHD_LOG_DUMP + dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP && +#endif + TRUE) { + BUG_ON(1); + } + MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t)); +} +#endif /* DHD_FW_COREDUMP */ + +#ifdef DHD_LOG_DUMP +static void +dhd_log_dump(void *handle, void *event_info, u8 event) +{ + dhd_info_t *dhd = handle; + + if (!dhd) { + DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__)); + return; + } + + if (do_dhd_log_dump(&dhd->pub)) { + DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__)); + return; + } +} + +void dhd_schedule_log_dump(dhd_pub_t *dhdp) +{ + dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, + (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP, + dhd_log_dump, DHD_WORK_PRIORITY_HIGH); +} + +static int +do_dhd_log_dump(dhd_pub_t *dhdp) +{ + int ret = 0; + struct file *fp = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + char dump_path[128]; + char common_info[1024]; + struct timeval curtime; + uint32 file_mode; + unsigned long flags = 0; + + if (!dhdp) { + return -1; + } + + /* Building the additional information like DHD, F/W version */ + memset(common_info, 0, sizeof(common_info)); + snprintf(common_info, sizeof(common_info), + "---------- Common information ----------\n" + "DHD version: %s\n" + "F/W version: %s\n" + "----------------------------------------\n", + dhd_version, fw_version); + + /* change to KERNEL_DS address limit */ + old_fs = get_fs(); + set_fs(KERNEL_DS); + + /* Init file name */ + memset(dump_path, 0, sizeof(dump_path)); + do_gettimeofday(&curtime); + snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld", + DHD_COMMON_DUMP_PATH "debug_dump", + (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec); + file_mode = O_CREAT | O_WRONLY | O_SYNC; + + DHD_ERROR(("debug_dump_path = %s\n", dump_path)); + fp = filp_open(dump_path, file_mode, 0644); + if (IS_ERR(fp)) { + ret = PTR_ERR(fp); + DHD_ERROR(("open file error, err = %d\n", ret)); + ret = -1; + goto exit; + } + + fp->f_op->write(fp, common_info, strlen(common_info), &pos); + if (dhdp->dld_buf.wraparound) { + fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos); + } else { + fp->f_op->write(fp, dhdp->dld_buf.buffer, + (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos); + } + + /* re-init dhd_log_dump_buf structure */ + spin_lock_irqsave(&dhdp->dld_buf.lock, flags); + dhdp->dld_buf.wraparound = 0; + dhdp->dld_buf.present = dhdp->dld_buf.front; + dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); + spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags); +exit: + if (!ret) { + filp_close(fp, NULL); + } + set_fs(old_fs); + + return ret; +} +#endif /* DHD_LOG_DUMP */ + +#ifdef BCMASSERT_LOG +#ifdef CUSTOMER_HW4_DEBUG +#ifdef PLATFORM_SLP +#define ASSERTINFO "/opt/etc/.assert.info" +#else +#define ASSERTINFO "/data/.assert.info" +#endif /* PLATFORM_SLP */ +#elif defined(CUSTOMER_HW2) +#define ASSERTINFO "/data/misc/wifi/.assert.info" +#else +#define ASSERTINFO "/installmedia/.assert.info" +#endif /* CUSTOMER_HW4_DEBUG */ +void dhd_get_assert_info(dhd_pub_t *dhd) +{ + struct file *fp = NULL; + char *filepath = ASSERTINFO; + loff_t pos=0; + + /* + * Read assert info from the file + * 0: Trigger Kernel crash by panic() + * 1: Print out the logs and don't trigger Kernel panic. (default) + * 2: Trigger Kernel crash by BUG() + * File doesn't exist: Keep default value (1). + */ + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + } else { + int mem_val = 0; + int ret = kernel_read(fp, (char *)&mem_val, 4, &pos); + if (ret < 0) { + DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret)); + } else { + mem_val = bcm_atoi((char *)&mem_val); + DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val)); + g_assert_type = mem_val; + } + filp_close(fp, NULL); + } +} +#endif /* BCMASSERT_LOG */ + + +#ifdef DHD_WMF +/* Returns interface specific WMF configuration */ +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + return &ifp->wmf; +} +#endif /* DHD_WMF */ + + +#if defined(DHD_L2_FILTER) +bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac) +{ + return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE; +} +#endif + +#ifdef DHD_L2_FILTER +arp_table_t* +dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(bssidx < DHD_MAX_IFS); + + ifp = dhd->iflist[bssidx]; + return ifp->phnd_arp_table; +} + +int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + if (ifp) + return ifp->parp_enable; + else + return FALSE; +} + +/* Set interface specific proxy arp configuration */ +int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + if (!ifp) + return BCME_ERROR; + + /* At present all 3 variables are being + * handled at once + */ + ifp->parp_enable = val; + ifp->parp_discard = val; + ifp->parp_allnode = !val; + + /* Flush ARP entries when disabled */ + if (val == FALSE) { + bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL, + FALSE, dhdp->tickcnt); + } + return BCME_OK; +} + +bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + return ifp->parp_discard; +} + +bool +dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->parp_allnode; +} + +int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->dhcp_unicast; +} + +int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->dhcp_unicast = val; + return BCME_OK; +} + +int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->block_ping; +} + +int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->block_ping = val; + + return BCME_OK; +} + +int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + + ASSERT(idx < DHD_MAX_IFS); + + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + return ifp->grat_arp; +} + +int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val) +{ + dhd_info_t *dhd = dhdp->info; + dhd_if_t *ifp; + ASSERT(idx < DHD_MAX_IFS); + ifp = dhd->iflist[idx]; + + ASSERT(ifp); + + ifp->grat_arp = val; + + return BCME_OK; +} +#endif /* DHD_L2_FILTER */ + + +#if defined(SET_RPS_CPUS) +int dhd_rps_cpus_enable(struct net_device *net, int enable) +{ + dhd_info_t *dhd = DHD_DEV_INFO(net); + dhd_if_t *ifp; + int ifidx; + char * RPS_CPU_SETBUF; + + ifidx = dhd_net2idx(dhd, net); + if (ifidx == DHD_BAD_IF) { + DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); + return -ENODEV; + } + + if (ifidx == PRIMARY_INF) { + if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) { + DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS; + } else { + DHD_INFO(("%s : set for BSS.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK; + } + } else if (ifidx == VIRTUAL_INF) { + DHD_INFO(("%s : set for P2P.\n", __FUNCTION__)); + RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P; + } else { + DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx)); + return -EINVAL; + } + + ifp = dhd->iflist[ifidx]; + if (ifp) { + if (enable) { + DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF)); + custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF)); + } else { + custom_rps_map_clear(ifp->net->_rx); + } + } else { + DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__)); + return -ENODEV; + } + return BCME_OK; +} + +int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len) +{ + struct rps_map *old_map, *map; + cpumask_var_t mask; + int err, cpu, i; + static DEFINE_SPINLOCK(rps_map_lock); + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); + if (err) { + free_cpumask_var(mask); + DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__)); + return err; + } + + map = kzalloc(max_t(unsigned int, + RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), + GFP_KERNEL); + if (!map) { + free_cpumask_var(mask); + DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__)); + return -ENOMEM; + } + + i = 0; + for_each_cpu(cpu, mask) { + map->cpus[i++] = cpu; + } + + if (i) { + map->len = i; + } else { + kfree(map); + map = NULL; + free_cpumask_var(mask); + DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__)); + return -1; + } + + spin_lock(&rps_map_lock); + old_map = rcu_dereference_protected(queue->rps_map, + lockdep_is_held(&rps_map_lock)); + rcu_assign_pointer(queue->rps_map, map); + spin_unlock(&rps_map_lock); + + if (map) { + static_key_slow_inc(&rps_needed); + } + if (old_map) { + kfree_rcu(old_map, rcu); + static_key_slow_dec(&rps_needed); + } + free_cpumask_var(mask); + + DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len)); + return map->len; +} + +void custom_rps_map_clear(struct netdev_rx_queue *queue) +{ + struct rps_map *map; + + DHD_INFO(("%s : Entered.\n", __FUNCTION__)); + + map = rcu_dereference_protected(queue->rps_map, 1); + if (map) { + RCU_INIT_POINTER(queue->rps_map, NULL); + kfree_rcu(map, rcu); + DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__)); + } +} +#endif + + + +#ifdef DHD_DEBUG_PAGEALLOC + +void +dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)handle; + + DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n", + __FUNCTION__, addr_corrupt, (uint32)len)); + + DHD_OS_WAKE_LOCK(dhdp); + prhex("Page Corruption:", addr_corrupt, len); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMONLY; + dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +EXPORT_SYMBOL(dhd_page_corrupt_cb); +#endif /* DHD_DEBUG_PAGEALLOC */ + +#ifdef DHD_PKTID_AUDIT_ENABLED +void +dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp) +{ + DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(dhdp); + dhd_dump_to_kernelog(dhdp); +#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + /* Load the dongle side dump to host memory and then BUG_ON() */ + dhdp->memdump_enabled = DUMP_MEMFILE_BUGON; + dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE; + dhd_bus_mem_dump(dhdp); +#endif /* BCMPCIE && DHD_FW_COREDUMP */ + DHD_OS_WAKE_UNLOCK(dhdp); +} +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* ---------------------------------------------------------------------------- + * Infrastructure code for sysfs interface support for DHD + * + * What is sysfs interface? + * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + * + * Why sysfs interface? + * This is the Linux standard way of changing/configuring Run Time parameters + * for a driver. We can use this interface to control "linux" specific driver + * parameters. + * + * ----------------------------------------------------------------------------- + */ + +#include +#include + +#if defined(DHD_TRACE_WAKE_LOCK) + +/* Function to show the history buffer */ +static ssize_t +show_wklock_trace(struct dhd_info *dev, char *buf) +{ + ssize_t ret = 0; + dhd_info_t *dhd = (dhd_info_t *)dev; + + buf[ret] = '\n'; + buf[ret+1] = 0; + + dhd_wk_lock_stats_dump(&dhd->pub); + return ret+1; +} + +/* Function to enable/disable wakelock trace */ +static ssize_t +wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count) +{ + unsigned long onoff; + unsigned long flags; + dhd_info_t *dhd = (dhd_info_t *)dev; + + onoff = bcm_strtoul(buf, NULL, 10); + if (onoff != 0 && onoff != 1) { + return -EINVAL; + } + + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + trace_wklock_onoff = onoff; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + if (trace_wklock_onoff) { + printk("ENABLE WAKLOCK TRACE\n"); + } else { + printk("DISABLE WAKELOCK TRACE\n"); + } + + return (ssize_t)(onoff+1); +} +#endif /* DHD_TRACE_WAKE_LOCK */ + +/* + * Generic Attribute Structure for DHD. + * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have + * to instantiate an object of type dhd_attr, populate it with + * the required show/store functions (ex:- dhd_attr_cpumask_primary) + * and add the object to default_attrs[] array, that gets registered + * to the kobject of dhd (named bcm-dhd). + */ + +struct dhd_attr { + struct attribute attr; + ssize_t(*show)(struct dhd_info *, char *); + ssize_t(*store)(struct dhd_info *, const char *, size_t count); +}; + +#if defined(DHD_TRACE_WAKE_LOCK) +static struct dhd_attr dhd_attr_wklock = + __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff); +#endif /* defined(DHD_TRACE_WAKE_LOCK */ + +/* Attribute object that gets registered with "bcm-dhd" kobject tree */ +static struct attribute *default_attrs[] = { +#if defined(DHD_TRACE_WAKE_LOCK) + &dhd_attr_wklock.attr, +#endif + NULL +}; + +#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj) +#define to_attr(a) container_of(a, struct dhd_attr, attr) + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the show function is called. + */ +static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); + int ret; + + if (d_attr->show) + ret = d_attr->show(dhd, buf); + else + ret = -EIO; + + return ret; +} + + +/* + * bcm-dhd kobject show function, the "attr" attribute specifices to which + * node under "bcm-dhd" the store function is called. + */ +static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + dhd_info_t *dhd = to_dhd(kobj); + struct dhd_attr *d_attr = to_attr(attr); + int ret; + + if (d_attr->store) + ret = d_attr->store(dhd, buf, count); + else + ret = -EIO; + + return ret; + +} + +static struct sysfs_ops dhd_sysfs_ops = { + .show = dhd_show, + .store = dhd_store, +}; + +static struct kobj_type dhd_ktype = { + .sysfs_ops = &dhd_sysfs_ops, + .default_attrs = default_attrs, +}; + +/* Create a kobject and attach to sysfs interface */ +static int dhd_sysfs_init(dhd_info_t *dhd) +{ + int ret = -1; + + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return ret; + } + + /* Initialize the kobject */ + ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd"); + if (ret) { + kobject_put(&dhd->dhd_kobj); + DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__)); + return ret; + } + + /* + * We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD); + + return ret; +} + +/* Done with the kobject and detach the sysfs interface */ +static void dhd_sysfs_exit(dhd_info_t *dhd) +{ + if (dhd == NULL) { + DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__)); + return; + } + + /* Releae the kobject */ + kobject_put(&dhd->dhd_kobj); +} + +#ifdef DHD_LOG_DUMP +void +dhd_log_dump_init(dhd_pub_t *dhd) +{ + spin_lock_init(&dhd->dld_buf.lock); +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd, + DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE); +#else + dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + + if (!dhd->dld_buf.buffer) { + dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL); + DHD_ERROR(("Try to allocate memory using kmalloc().\n")); + + if (!dhd->dld_buf.buffer) { + DHD_ERROR(("Failed to allocate memory for dld_buf.\n")); + return; + } + } + + dhd->dld_buf.wraparound = 0; + dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE; + dhd->dld_buf.present = dhd->dld_buf.buffer; + dhd->dld_buf.front = dhd->dld_buf.buffer; + dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + dhd->dld_enable = 1; +} + +void +dhd_log_dump_deinit(dhd_pub_t *dhd) +{ + dhd->dld_enable = 0; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + DHD_OS_PREFREE(dhd, + dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE); +#else + kfree(dhd->dld_buf.buffer); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ +} + +void +dhd_log_dump_print(const char *fmt, ...) +{ + int len = 0; + char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, }; + va_list args; + dhd_pub_t *dhd = NULL; + unsigned long flags = 0; + + if (wl_get_bcm_cfg80211_ptr()) { + dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub); + } + + if (!dhd || dhd->dld_enable != 1) { + return; + } + + va_start(args, fmt); + + len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args); + if (len < 0) { + return; + } + + /* make a critical section to eliminate race conditions */ + spin_lock_irqsave(&dhd->dld_buf.lock, flags); + if (dhd->dld_buf.remain < len) { + dhd->dld_buf.wraparound = 1; + dhd->dld_buf.present = dhd->dld_buf.front; + dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE; + } + + strncpy(dhd->dld_buf.present, tmp_buf, len); + dhd->dld_buf.remain -= len; + dhd->dld_buf.present += len; + spin_unlock_irqrestore(&dhd->dld_buf.lock, flags); + + /* double check invalid memory operation */ + ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max); + va_end(args); +} + +char* +dhd_log_dump_get_timestamp(void) +{ + static char buf[16]; + u64 ts_nsec; + unsigned long rem_nsec; + + ts_nsec = local_clock(); + rem_nsec = do_div(ts_nsec, 1000000000); + snprintf(buf, sizeof(buf), "%5lu.%06lu", + (unsigned long)ts_nsec, rem_nsec / 1000); + + return buf; +} + +#endif /* DHD_LOG_DUMP */ + +/* ---------------------------- End of sysfs implementation ------------------------------------- */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.h b/drivers/net/wireless/bcmdhd/dhd_linux.h new file mode 100644 index 000000000000..dd3397723f73 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux.h @@ -0,0 +1,127 @@ +/* + * DHD Linux header file (dhd_linux exports for cfg80211 and other components) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux.h 591285 2015-10-07 11:56:29Z $ + */ + +/* wifi platform functions for power, interrupt and pre-alloc, either + * from Android-like platform device data, or Broadcom wifi platform + * device data. + * + */ +#ifndef __DHD_LINUX_H__ +#define __DHD_LINUX_H__ + +#include +#include +#include +#include +#include +#ifdef DHD_WMF +#include +#endif +/* Linux wireless extension support */ +#if defined(WL_WIRELESS_EXT) +#include +#endif /* defined(WL_WIRELESS_EXT) */ +#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) +#include +#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */ + +#if defined(CONFIG_WIFI_CONTROL_FUNC) +#include +#endif + +#if !defined(CONFIG_WIFI_CONTROL_FUNC) +#define WLAN_PLAT_NODFS_FLAG 0x01 +struct wifi_platform_data { + int (*set_power)(int val); + int (*set_reset)(int val); + int (*set_carddetect)(int val); + void *(*mem_prealloc)(int section, unsigned long size); + int (*get_mac_addr)(unsigned char *buf); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) || defined(CUSTOM_COUNTRY_CODE) + void *(*get_country_code)(char *ccode, u32 flags); +#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) || defined (CUSTOM_COUNTRY_CODE) */ + void *(*get_country_code)(char *ccode); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) */ + }; +#endif /* CONFIG_WIFI_CONTROL_FUNC */ +#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ + +typedef struct wifi_adapter_info { + const char *name; + uint irq_num; + uint intr_flags; + const char *fw_path; + const char *nv_path; + void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */ + uint bus_type; + uint bus_num; + uint slot_num; +} wifi_adapter_info_t; + +typedef struct bcmdhd_wifi_platdata { + uint num_adapters; + wifi_adapter_info_t *adapters; +} bcmdhd_wifi_platdata_t; + +/** Per STA params. A list of dhd_sta objects are managed in dhd_if */ +typedef struct dhd_sta { + cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */ + uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */ + void * ifp; /* associated dhd_if */ + struct ether_addr ea; /* stations ethernet mac address */ + struct list_head list; /* link into dhd_if::sta_list */ + int idx; /* index of self in dhd_pub::sta_pool[] */ + int ifidx; /* index of interface in dhd */ +} dhd_sta_t; +typedef dhd_sta_t dhd_sta_pool_t; + +int dhd_wifi_platform_register_drv(void); +void dhd_wifi_platform_unregister_drv(void); +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, + uint32 slot_num); +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec); +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present); +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr); +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf); +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, + u32 flags); +#else +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode); +#endif /* CUSTOM_COUNTRY_CODE */ +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size); +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter); + +int dhd_get_fw_mode(struct dhd_info *dhdinfo); +bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo); + +#ifdef DHD_WMF +dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx); +#endif /* DHD_WMF */ +#endif /* __DHD_LINUX_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c new file mode 100644 index 000000000000..f7696d80a09a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_platdev.c @@ -0,0 +1,823 @@ +/* + * Linux platform device for DHD WLAN adapter + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_platdev.c 591285 2015-10-07 11:56:29Z $ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_WIFI_CONTROL_FUNC) +#include +#endif +#ifdef CONFIG_DTS +#include +#include +#endif /* CONFIG_DTS */ + + +#define WIFI_PLAT_NAME "bcmdhd_wlan" +#define WIFI_PLAT_NAME2 "bcm4329_wlan" +#define WIFI_PLAT_EXT "bcmdhd_wifi_platform" + +#ifdef CONFIG_DTS +struct regulator *wifi_regulator = NULL; +#endif /* CONFIG_DTS */ + +bool cfg_multichip = FALSE; +bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL; +static int wifi_plat_dev_probe_ret = 0; +static bool is_power_on = FALSE; +#if !defined(CONFIG_DTS) +#if defined(DHD_OF_SUPPORT) +static bool dts_enabled = TRUE; +extern struct resource dhd_wlan_resources; +extern struct wifi_platform_data dhd_wlan_control; +#else +static bool dts_enabled = FALSE; +struct resource dhd_wlan_resources = {0}; +struct wifi_platform_data dhd_wlan_control = {0}; +#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */ +#endif /* !defind(CONFIG_DTS) */ + +static int dhd_wifi_platform_load(void); + +extern void* wl_cfg80211_get_dhdp(void); + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num) +{ + int i; + + if (dhd_wifi_platdata == NULL) + return NULL; + + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i]; + if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) && + (adapter->bus_num == -1 || adapter->bus_num == bus_num) && + (adapter->slot_num == -1 || adapter->slot_num == slot_num)) { + DHD_TRACE(("found adapter info '%s'\n", adapter->name)); + return adapter; + } + } + return NULL; +} + +void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size) +{ + void *alloc_ptr = NULL; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + if (plat_data->mem_prealloc) { + alloc_ptr = plat_data->mem_prealloc(section, size); + if (alloc_ptr) { + DHD_INFO(("success alloc section %d\n", section)); + if (size != 0L) + bzero(alloc_ptr, size); + return alloc_ptr; + } + } + + DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section)); + return NULL; +} + +void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter) +{ + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + return plat_data->mem_prealloc; +} + +int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr) +{ + if (adapter == NULL) + return -1; + if (irq_flags_ptr) + *irq_flags_ptr = adapter->intr_flags; + return adapter->irq_num; +} + +int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec) +{ + int err = 0; +#ifdef CONFIG_DTS + if (on) { + // err = regulator_enable(wifi_regulator); + is_power_on = TRUE; + } + else { + // err = regulator_disable(wifi_regulator); + is_power_on = FALSE; + } + if (err < 0) + DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__)); +#else + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s = %d\n", __FUNCTION__, on)); + if (plat_data->set_power) { +#ifdef ENABLE_4335BT_WAR + if (on) { + printk("WiFi: trying to acquire BT lock\n"); + if (bcm_bt_lock(lock_cookie_wifi) != 0) + printk("** WiFi: timeout in acquiring bt lock**\n"); + printk("%s: btlock acquired\n", __FUNCTION__); + } + else { + /* For a exceptional case, release btlock */ + bcm_bt_unlock(lock_cookie_wifi); + } +#endif /* ENABLE_4335BT_WAR */ + + err = plat_data->set_power(on); + } + + if (msec && !err) + OSL_SLEEP(msec); + + if (on && !err) + is_power_on = TRUE; + else + is_power_on = FALSE; + +#endif /* CONFIG_DTS */ + + return err; +} + +int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present) +{ + int err = 0; + struct wifi_platform_data *plat_data; + + if (!adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + + DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present)); + if (plat_data->set_carddetect) { + err = plat_data->set_carddetect(device_present); + } + return err; + +} + +int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf) +{ + struct wifi_platform_data *plat_data; + + DHD_ERROR(("%s\n", __FUNCTION__)); + if (!buf || !adapter || !adapter->wifi_plat_data) + return -EINVAL; + plat_data = adapter->wifi_plat_data; + if (plat_data->get_mac_addr) { + return plat_data->get_mac_addr(buf); + } + return -EOPNOTSUPP; +} +#ifdef CUSTOM_COUNTRY_CODE +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags) +#else +void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode) +#endif /* CUSTOM_COUNTRY_CODE */ +{ + /* get_country_code was added after 2.6.39 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct wifi_platform_data *plat_data; + + if (!ccode || !adapter || !adapter->wifi_plat_data) + return NULL; + plat_data = adapter->wifi_plat_data; + + DHD_TRACE(("%s\n", __FUNCTION__)); + if (plat_data->get_country_code) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) + return plat_data->get_country_code(ccode, WLAN_PLAT_NODFS_FLAG); +#else +#ifdef CUSTOM_COUNTRY_CODE + return plat_data->get_country_code(ccode, flags); +#else + return plat_data->get_country_code(ccode); +#endif /* CUSTOM_COUNTRY_CODE */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) */ + } +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */ + + return NULL; +} + +static int wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + struct resource *resource; + wifi_adapter_info_t *adapter; +#ifdef CONFIG_DTS + int irq, gpio; +#endif /* CONFIG_DTS */ + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data); + + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq"); + if (resource == NULL) + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq"); + if (resource) { + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; + } + +#ifdef CONFIG_DTS + wifi_regulator = regulator_get(&pdev->dev, "wlreg_on"); + if (wifi_regulator == NULL) { + DHD_ERROR(("%s regulator is null\n", __FUNCTION__)); + return -1; + } + + /* This is to get the irq for the OOB */ + gpio = of_get_gpio(pdev->dev.of_node, 0); + + if (gpio < 0) { + DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__)); + return -1; + } + irq = gpio_to_irq(gpio); + if (irq < 0) { + DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__)); + return -1; + } + adapter->irq_num = irq; + + /* need to change the flags according to our requirement */ + adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | + IORESOURCE_IRQ_SHAREABLE; +#endif /* CONFIG_DTS */ + + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + return wifi_plat_dev_probe_ret; +} + +static int wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + wifi_adapter_info_t *adapter; + + /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan") + * is kept for backward compatibility and supports only 1 adapter + */ + ASSERT(dhd_wifi_platdata != NULL); + ASSERT(dhd_wifi_platdata->num_adapters == 1); + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { +#ifdef BCMPCIE + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); +#else + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); +#endif /* BCMPCIE */ + } + +#ifdef CONFIG_DTS + regulator_put(wifi_regulator); +#endif /* CONFIG_DTS */ + return 0; +} + +static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + bcmsdh_oob_intr_set(0); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +static int wifi_plat_dev_drv_resume(struct platform_device *pdev) +{ + DHD_TRACE(("##> %s\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \ + defined(BCMSDIO) + if (dhd_os_check_if_up(wl_cfg80211_get_dhdp())) + bcmsdh_oob_intr_set(1); +#endif /* (OOB_INTR_ONLY) */ + return 0; +} + +#ifdef CONFIG_DTS +static const struct of_device_id wifi_device_dt_match[] = { + { .compatible = "android,bcmdhd_wlan", }, + {}, +}; +#endif /* CONFIG_DTS */ +static struct platform_driver wifi_platform_dev_driver = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, + .driver = { + .name = WIFI_PLAT_NAME, +#ifdef CONFIG_DTS + .of_match_table = wifi_device_dt_match, +#endif /* CONFIG_DTS */ + } +}; + +static struct platform_driver wifi_platform_dev_driver_legacy = { + .probe = wifi_plat_dev_drv_probe, + .remove = wifi_plat_dev_drv_remove, + .suspend = wifi_plat_dev_drv_suspend, + .resume = wifi_plat_dev_drv_resume, + .driver = { + .name = WIFI_PLAT_NAME2, + } +}; + +static int wifi_platdev_match(struct device *dev, void *data) +{ + char *name = (char*)data; + struct platform_device *pdev = to_platform_device(dev); + + if (strcmp(pdev->name, name) == 0) { + DHD_ERROR(("found wifi platform device %s\n", name)); + return TRUE; + } + + return FALSE; +} + +static int wifi_ctrlfunc_register_drv(void) +{ + int err = 0; + struct device *dev1, *dev2; + wifi_adapter_info_t *adapter; + + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); + +#if !defined(CONFIG_DTS) + if (!dts_enabled) { + if (dev1 == NULL && dev2 == NULL) { + DHD_ERROR(("no wifi platform data, skip\n")); + return -ENXIO; + } + } +#endif /* !defined(CONFIG_DTS) */ + + /* multi-chip support not enabled, build one adapter information for + * DHD (either SDIO, USB or PCIe) + */ + adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL); + if (adapter == NULL) { + DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__)); + return ENOMEM; + } + adapter->name = "DHD generic adapter"; + adapter->bus_type = -1; + adapter->bus_num = -1; + adapter->slot_num = -1; + adapter->irq_num = -1; + is_power_on = FALSE; + wifi_plat_dev_probe_ret = 0; + dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL); + dhd_wifi_platdata->num_adapters = 1; + dhd_wifi_platdata->adapters = adapter; + + if (dev1) { + err = platform_driver_register(&wifi_platform_dev_driver); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func driver\n", + __FUNCTION__)); + return err; + } + } + if (dev2) { + err = platform_driver_register(&wifi_platform_dev_driver_legacy); + if (err) { + DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n", + __FUNCTION__)); + return err; + } + } + +#if !defined(CONFIG_DTS) + if (dts_enabled) { + struct resource *resource; + adapter->wifi_plat_data = (void *)&dhd_wlan_control; + resource = &dhd_wlan_resources; + adapter->irq_num = resource->start; + adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK; + wifi_plat_dev_probe_ret = dhd_wifi_platform_load(); + } +#endif /* !defined(CONFIG_DTS) */ + + +#ifdef CONFIG_DTS + wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver); +#endif /* CONFIG_DTS */ + + /* return probe function's return value if registeration succeeded */ + return wifi_plat_dev_probe_ret; +} + +void wifi_ctrlfunc_unregister_drv(void) +{ + +#ifdef CONFIG_DTS + DHD_ERROR(("unregister wifi platform drivers\n")); + platform_driver_unregister(&wifi_platform_dev_driver); +#else + struct device *dev1, *dev2; + dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match); + dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match); + if (!dts_enabled) + if (dev1 == NULL && dev2 == NULL) + return; + + DHD_ERROR(("unregister wifi platform drivers\n")); + if (dev1) + platform_driver_unregister(&wifi_platform_dev_driver); + if (dev2) + platform_driver_unregister(&wifi_platform_dev_driver_legacy); + if (dts_enabled) { + wifi_adapter_info_t *adapter; + adapter = &dhd_wifi_platdata->adapters[0]; + if (is_power_on) { + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } + } +#endif /* !defined(CONFIG_DTS) */ + + kfree(dhd_wifi_platdata->adapters); + dhd_wifi_platdata->adapters = NULL; + dhd_wifi_platdata->num_adapters = 0; + kfree(dhd_wifi_platdata); + dhd_wifi_platdata = NULL; +} + +static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev) +{ + dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data); + + return dhd_wifi_platform_load(); +} + +static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev) +{ + int i; + wifi_adapter_info_t *adapter; + ASSERT(dhd_wifi_platdata != NULL); + + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } + return 0; +} + +static struct platform_driver dhd_wifi_platform_dev_driver = { + .probe = bcmdhd_wifi_plat_dev_drv_probe, + .remove = bcmdhd_wifi_plat_dev_drv_remove, + .driver = { + .name = WIFI_PLAT_EXT, + } +}; + +int dhd_wifi_platform_register_drv(void) +{ + int err = 0; + struct device *dev; + + /* register Broadcom wifi platform data driver if multi-chip is enabled, + * otherwise use Android style wifi platform data (aka wifi control function) + * if it exists + * + * to support multi-chip DHD, Broadcom wifi platform data device must + * be added in kernel early boot (e.g. board config file). + */ + if (cfg_multichip) { + dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match); + if (dev == NULL) { + DHD_ERROR(("bcmdhd wifi platform data device not found!!\n")); + return -ENXIO; + } + err = platform_driver_register(&dhd_wifi_platform_dev_driver); + } else { + err = wifi_ctrlfunc_register_drv(); + + /* no wifi ctrl func either, load bus directly and ignore this error */ + if (err) { + if (err == -ENXIO) { + /* wifi ctrl function does not exist */ + err = dhd_wifi_platform_load(); + } else { + /* unregister driver due to initialization failure */ + wifi_ctrlfunc_unregister_drv(); + } + } + } + + return err; +} + +#ifdef BCMPCIE +static int dhd_wifi_platform_load_pcie(void) +{ + int err = 0; + int i; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + + if (dhd_wifi_platdata == NULL) { + err = dhd_bus_register(); + } else { + if (dhd_download_fw_on_driverload) { + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + int retry = POWERUP_MAX_RETRY; + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, + adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { + err = wifi_platform_set_power(adapter, + TRUE, WIFI_TURNON_DELAY); + if (err) { + DHD_ERROR(("failed to power up %s," + " %d retry left\n", + adapter->name, retry)); + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + err = wifi_platform_bus_enumerate(adapter, TRUE); + if (err) { + DHD_ERROR(("failed to enumerate bus %s, " + "%d retry left\n", + adapter->name, retry)); + wifi_platform_set_power(adapter, FALSE, + WIFI_TURNOFF_DELAY); + } else { + break; + } + } + } while (retry--); + + if (!retry) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", + adapter->name)); + return -ENODEV; + } + } + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__)); + if (dhd_download_fw_on_driverload) { + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_bus_enumerate(adapter, FALSE); + wifi_platform_set_power(adapter, + FALSE, WIFI_TURNOFF_DELAY); + } + } + } + } + + return err; +} +#else +static int dhd_wifi_platform_load_pcie(void) +{ + return 0; +} +#endif /* BCMPCIE */ + + +void dhd_wifi_platform_unregister_drv(void) +{ + if (cfg_multichip) + platform_driver_unregister(&dhd_wifi_platform_dev_driver); + else + wifi_ctrlfunc_unregister_drv(); +} + +extern int dhd_watchdog_prio; +extern int dhd_dpc_prio; +extern uint dhd_deferred_tx; +#if defined(BCMLXSDMMC) +extern struct semaphore dhd_registration_sem; +#endif + +#ifdef BCMSDIO +static int dhd_wifi_platform_load_sdio(void) +{ + int i; + int err = 0; + wifi_adapter_info_t *adapter; + + BCM_REFERENCE(i); + BCM_REFERENCE(adapter); + /* Sanity check on the module parameters + * - Both watchdog and DPC as tasklets are ok + * - If both watchdog and DPC are threads, TX must be deferred + */ + if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) && + !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx)) + return -EINVAL; + +#if defined(BCMLXSDMMC) + if (dhd_wifi_platdata == NULL) { + DHD_ERROR(("DHD wifi platform data is required for Android build\n")); + return -EINVAL; + } + + sema_init(&dhd_registration_sem, 0); + /* power up all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + bool chip_up = FALSE; + int retry = POWERUP_MAX_RETRY; + struct semaphore dhd_chipup_sem; + + adapter = &dhd_wifi_platdata->adapters[i]; + + DHD_ERROR(("Power-up adapter '%s'\n", adapter->name)); + DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n", + adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path)); + DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n", + adapter->bus_type, adapter->bus_num, adapter->slot_num)); + + do { + sema_init(&dhd_chipup_sem, 0); + err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem); + if (err) { + DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n", + __FUNCTION__, err)); + return err; + } + err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY); + if (err) { + /* WL_REG_ON state unknown, Power off forcely */ + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + continue; + } else { + wifi_platform_bus_enumerate(adapter, TRUE); + err = 0; + } + + if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) { + dhd_bus_unreg_sdio_notify(); + chip_up = TRUE; + break; + } + + DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry)); + dhd_bus_unreg_sdio_notify(); + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } while (retry--); + + if (!chip_up) { + DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name)); + return -ENODEV; + } + + } + + err = dhd_bus_register(); + + if (err) { + DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); + goto fail; + } + + + /* + * Wait till MMC sdio_register_driver callback called and made driver attach. + * It's needed to make sync up exit from dhd insmod and + * Kernel MMC sdio device callback registration + */ + err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)); + if (err) { + DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__)); + dhd_bus_unregister(); + goto fail; + } + + return err; + +fail: + /* power down all adapters */ + for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) { + adapter = &dhd_wifi_platdata->adapters[i]; + wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY); + wifi_platform_bus_enumerate(adapter, FALSE); + } +#else + + /* x86 bring-up PC needs no power-up operations */ + err = dhd_bus_register(); + +#endif + + return err; +} +#else /* BCMSDIO */ +static int dhd_wifi_platform_load_sdio(void) +{ + return 0; +} +#endif /* BCMSDIO */ + +static int dhd_wifi_platform_load_usb(void) +{ + return 0; +} + +static int dhd_wifi_platform_load() +{ + int err = 0; + + wl_android_init(); + + if ((err = dhd_wifi_platform_load_usb())) + goto end; + else if ((err = dhd_wifi_platform_load_sdio())) + goto end; + else + err = dhd_wifi_platform_load_pcie(); + +end: + if (err) + wl_android_exit(); + else + wl_android_post_init(); + + return err; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_sched.c b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c new file mode 100644 index 000000000000..66eb8940ba3f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_sched.c @@ -0,0 +1,51 @@ +/* + * Expose some of the kernel scheduler routines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $ + */ +#include +#include +#include +#include +#include + +int setScheduler(struct task_struct *p, int policy, struct sched_param *param) +{ + int rc = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = sched_setscheduler(p, policy, param); +#endif /* LinuxVer */ + return rc; +} + +int get_scheduler_policy(struct task_struct *p) +{ + int rc = SCHED_NORMAL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + rc = p->policy; +#endif /* LinuxVer */ + return rc; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.c b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c new file mode 100644 index 000000000000..d2513cc4ab0d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.c @@ -0,0 +1,320 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct dhd_deferred_event_t { + u8 event; /* holds the event */ + void *event_data; /* Holds event specific data */ + event_handler_t event_handler; +}; +#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t) + +struct dhd_deferred_wq { + struct work_struct deferred_work; /* should be the first member */ + + /* + * work events may occur simultaneously. + * Can hold upto 64 low priority events and 4 high priority events + */ +#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t)) +#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t)) + struct kfifo *prio_fifo; + struct kfifo *work_fifo; + u8 *prio_fifo_buf; + u8 *work_fifo_buf; + spinlock_t work_lock; + void *dhd_info; /* review: does it require */ +}; + +static inline struct kfifo* +dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) +{ + struct kfifo *fifo; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) + fifo = kfifo_init(buf, size, flags, lock); +#else + fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); + if (!fifo) { + return NULL; + } + kfifo_init(fifo, buf, size); +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + return fifo; +} + +static inline void +dhd_kfifo_free(struct kfifo *fifo) +{ + kfifo_free(fifo); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) + /* FC11 releases the fifo memory */ + kfree(fifo); +#endif +} + +/* deferred work functions */ +static void dhd_deferred_work_handler(struct work_struct *data); + +void* +dhd_deferred_work_init(void *dhd_info) +{ + struct dhd_deferred_wq *work = NULL; + u8* buf; + unsigned long fifo_size = 0; + gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; + + if (!dhd_info) { + DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); + goto return_null; + } + + work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), + flags); + + if (!work) { + DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__)); + goto return_null; + } + + INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); + + /* initialize event fifo */ + spin_lock_init(&work->work_lock); + + /* allocate buffer to hold prio events */ + fifo_size = DHD_PRIO_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__)); + goto return_null; + } + + /* Initialize prio event fifo */ + work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->prio_fifo) { + kfree(buf); + goto return_null; + } + + /* allocate buffer to hold work events */ + fifo_size = DHD_WORK_FIFO_SIZE; + fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); + buf = (u8*)kzalloc(fifo_size, flags); + if (!buf) { + DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__)); + goto return_null; + } + + /* Initialize event fifo */ + work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); + if (!work->work_fifo) { + kfree(buf); + goto return_null; + } + + work->dhd_info = dhd_info; + DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__)); + return work; + +return_null: + + if (work) + dhd_deferred_work_deinit(work); + + return NULL; +} + +void +dhd_deferred_work_deinit(void *work) +{ + struct dhd_deferred_wq *deferred_work = work; + + + if (!deferred_work) { + DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__)); + return; + } + + /* cancel the deferred work handling */ + cancel_work_sync((struct work_struct *)deferred_work); + + /* + * free work event fifo. + * kfifo_free frees locally allocated fifo buffer + */ + if (deferred_work->prio_fifo) + dhd_kfifo_free(deferred_work->prio_fifo); + + if (deferred_work->work_fifo) + dhd_kfifo_free(deferred_work->work_fifo); + + kfree(deferred_work); +} + +/* + * Prepares event to be queued + * Schedules the event + */ +int +dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t event_handler, u8 priority) +{ + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq; + struct dhd_deferred_event_t deferred_event; + int status; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); + ASSERT(0); + return DHD_WQ_STS_UNINITIALIZED; + } + + if (!event || (event >= DHD_MAX_WQ_EVENTS)) { + DHD_ERROR(("%s: Unknown event \n", __FUNCTION__)); + return DHD_WQ_STS_UNKNOWN_EVENT; + } + + /* + * default element size is 1, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + deferred_event.event = event; + deferred_event.event_data = event_data; + deferred_event.event_handler = event_handler; + + if (priority == DHD_WORK_PRIORITY_HIGH) { + status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } else { + status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + if (!status) { + return DHD_WQ_STS_SCHED_FAILED; + } + schedule_work((struct work_struct *)deferred_wq); + return DHD_WQ_STS_OK; +} + +static int +dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event) +{ + int status = 0; + + if (!deferred_wq) { + DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); + return DHD_WQ_STS_UNINITIALIZED; + } + + /* + * default element size is 1 byte, which can be changed + * using kfifo_esize(). Older kernel(FC11) doesn't support + * changing element size. For compatibility changing + * element size is not prefered + */ + ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); + ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); + + /* first read priorit event fifo */ + status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + + if (!status) { + /* priority fifo is empty. Now read low prio work fifo */ + status = kfifo_out_spinlocked(deferred_wq->work_fifo, event, + DEFRD_EVT_SIZE, &deferred_wq->work_lock); + } + + return status; +} + +/* + * Called when work is scheduled + */ +static void +dhd_deferred_work_handler(struct work_struct *work) +{ + struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; + struct dhd_deferred_event_t work_event; + int status; + + if (!deferred_work) { + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); + return; + } + + do { + status = dhd_get_scheduled_work(deferred_work, &work_event); + DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status)); + if (!status) { + DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status)); + break; + } + + if (work_event.event > DHD_MAX_WQ_EVENTS) { + DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event)); + break; + } + + if (work_event.event_handler) { + work_event.event_handler(deferred_work->dhd_info, + work_event.event_data, work_event.event); + } else { + DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event)); + } + } while (1); + return; +} diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_wq.h b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h new file mode 100644 index 000000000000..e6197b26f211 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_linux_wq.h @@ -0,0 +1,69 @@ +/* + * Broadcom Dongle Host Driver (DHD), Generic work queue framework + * Generic interface to handle dhd deferred work events + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_linux_wq.h 597512 2015-11-05 11:37:36Z $ + */ +#ifndef _dhd_linux_wq_h_ +#define _dhd_linux_wq_h_ +/* + * Work event definitions + */ +enum _wq_event { + DHD_WQ_WORK_IF_ADD = 1, + DHD_WQ_WORK_IF_DEL, + DHD_WQ_WORK_SET_MAC, + DHD_WQ_WORK_SET_MCAST_LIST, + DHD_WQ_WORK_IPV6_NDO, + DHD_WQ_WORK_HANG_MSG, + DHD_WQ_WORK_SOC_RAM_DUMP, + DHD_WQ_WORK_DHD_LOG_DUMP, + + DHD_MAX_WQ_EVENTS +}; + +/* + * Work event priority + */ +#define DHD_WORK_PRIORITY_LOW 0 +#define DHD_WORK_PRIORITY_HIGH 1 + +/* + * Error definitions + */ +#define DHD_WQ_STS_OK 0 +#define DHD_WQ_STS_FAILED -1 /* General failure */ +#define DHD_WQ_STS_UNINITIALIZED -2 +#define DHD_WQ_STS_SCHED_FAILED -3 +#define DHD_WQ_STS_UNKNOWN_EVENT -4 + +typedef void (*event_handler_t)(void *handle, void *event_data, u8 event); + +void *dhd_deferred_work_init(void *dhd); +void dhd_deferred_work_deinit(void *workq); +int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, + event_handler_t evt_handler, u8 priority); +#endif /* _dhd_linux_wq_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_msgbuf.c b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c new file mode 100644 index 000000000000..3bd9a45d7906 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_msgbuf.c @@ -0,0 +1,6413 @@ +/** + * @file definition of host message ring functionality + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $ + */ + + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + + +#include + +#include +#include +#include + +#if defined(DHD_LB) +#include +#include +#define DHD_LB_WORKQ_SZ (8192) +#define DHD_LB_WORKQ_SYNC (16) +#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) +#endif /* DHD_LB */ + + +/** + * Host configures a soft doorbell for d2h rings, by specifying a 32bit host + * address where a value must be written. Host may also interrupt coalescing + * on this soft doorbell. + * Use Case: Hosts with network processors, may register with the dongle the + * network processor's thread wakeup register and a value corresponding to the + * core/thread context. Dongle will issue a write transaction + * to the PCIE RC which will need to be routed to the mapped register space, by + * the host. + */ +/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */ + +/* Dependency Check */ +#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF" +#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */ + +#define RETRIES 2 /* # of retries to retrieve matching ioctl response */ + +#define DEFAULT_RX_BUFFERS_TO_POST 256 +#define RXBUFPOST_THRESHOLD 32 +#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */ + +#define DHD_STOP_QUEUE_THRESHOLD 200 +#define DHD_START_QUEUE_THRESHOLD 100 + +#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ +#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) +#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE) + +/* flags for ioctl pending status */ +#define MSGBUF_IOCTL_ACK_PENDING (1<<0) +#define MSGBUF_IOCTL_RESP_PENDING (1<<1) + +#define DMA_ALIGN_LEN 4 + +#define DMA_D2H_SCRATCH_BUF_LEN 8 +#define DMA_XFER_LEN_LIMIT 0x400000 + +#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 + +#define DHD_FLOWRING_MAX_EVENTBUF_POST 8 +#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 + +#define DHD_PROT_FUNCS 37 + +/* Length of buffer in host for bus throughput measurement */ +#define DHD_BUS_TPUT_BUF_LEN 2048 + +#define TXP_FLUSH_NITEMS + +/* optimization to write "n" tx items at a time to ring */ +#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 + +#define RING_NAME_MAX_LENGTH 24 + + +struct msgbuf_ring; /* ring context for common and flow rings */ + +/** + * PCIE D2H DMA Complete Sync Modes + * + * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into + * Host system memory. A WAR using one of 3 approaches is needed: + * 1. Dongle places a modulo-253 seqnum in last word of each D2H message + * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum + * writes in the last word of each work item. Each work item has a seqnum + * number = sequence num % 253. + * + * 3. Read Barrier: Dongle does a host memory read access prior to posting an + * interrupt, ensuring that D2H data transfer indeed completed. + * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing + * ring contents before the indices. + * + * Host does not sync for DMA to complete with option #3 or #4, and a noop sync + * callback (see dhd_prot_d2h_sync_none) may be bound. + * + * Dongle advertizes host side sync mechanism requirements. + */ +#define PCIE_D2H_SYNC + +#if defined(PCIE_D2H_SYNC) +#define PCIE_D2H_SYNC_WAIT_TRIES (512UL) +#define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL) +#define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */ + +/** + * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. + * + * On success: return cmn_msg_hdr_t::msg_type + * On failure: return 0 (invalid msg_type) + */ +typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +#endif /* PCIE_D2H_SYNC */ + + +/* + * +---------------------------------------------------------------------------- + * + * RingIds and FlowId are not equivalent as ringids include D2H rings whereas + * flowids do not. + * + * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes + * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings + * + * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where, + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings, + * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings. + * + * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated + * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated + * + * D2H Control Complete RingId = 2 + * D2H Transmit Complete RingId = 3 + * D2H Receive Complete RingId = 4 + * + * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring) + * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring) + * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring) + * + * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are + * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS. + * + * Example: when a system supports 4 bc/mc and 128 uc flowrings, with + * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the + * FlowId values would be in the range [2..133] and the corresponding + * RingId values would be in the range [5..136]. + * + * The flowId allocator, may chose to, allocate Flowids: + * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS)) + * X# of uc flowids in consecutive ranges (per station Id), where X is the + * packet's access category (e.g. 4 uc flowids per station). + * + * CAUTION: + * When DMA indices array feature is used, RingId=5, corresponding to the 0th + * FLOWRING, will actually use the FlowId as index into the H2D DMA index, + * since the FlowId truly represents the index in the H2D DMA indices array. + * + * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS, + * will represent the index in the D2H DMA indices array. + * + * +---------------------------------------------------------------------------- + */ + +/* First TxPost Flowring Id */ +#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS + +/* Determine whether a ringid belongs to a TxPost flowring */ +#define DHD_IS_FLOWRING(ringid) \ + ((ringid) >= BCMPCIE_COMMON_MSGRINGS) + +/* Convert a H2D TxPost FlowId to a MsgBuf RingId */ +#define DHD_FLOWID_TO_RINGID(flowid) \ + (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)) + +/* Convert a MsgBuf RingId to a H2D TxPost FlowId */ +#define DHD_RINGID_TO_FLOWID(ringid) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS)) + +/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array + * This may be used for the H2D DMA WR index array or H2D DMA RD index array or + * any array of H2D rings. + */ +#define DHD_H2D_RING_OFFSET(ringid) \ + ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) + +/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array + * This may be used for the D2H DMA WR index array or D2H DMA RD index array or + * any array of D2H rings. + */ +#define DHD_D2H_RING_OFFSET(ringid) \ + ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Convert a D2H DMA Indices Offset to a RingId */ +#define DHD_D2H_RINGID(offset) \ + ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS) + + +#define DHD_DMAH_NULL ((void*)NULL) + +/* + * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able + * buffer does not occupy the entire cacheline, and another object is placed + * following the DMA-able buffer, data corruption may occur if the DMA-able + * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency + * is not available. + */ +#if defined(L1_CACHE_BYTES) +#define DHD_DMA_PAD (L1_CACHE_BYTES) +#else +#define DHD_DMA_PAD (128) +#endif + +/* Used in loopback tests */ +typedef struct dhd_dmaxfer { + dhd_dma_buf_t srcmem; + dhd_dma_buf_t dstmem; + uint32 srcdelay; + uint32 destdelay; + uint32 len; + bool in_progress; +} dhd_dmaxfer_t; + +/** + * msgbuf_ring : This object manages the host side ring that includes a DMA-able + * buffer, the WR and RD indices, ring parameters such as max number of items + * an length of each items, and other miscellaneous runtime state. + * A msgbuf_ring may be used to represent a H2D or D2H common ring or a + * H2D TxPost ring as specified in the PCIE FullDongle Spec. + * Ring parameters are conveyed to the dongle, which maintains its own peer end + * ring state. Depending on whether the DMA Indices feature is supported, the + * host will update the WR/RD index in the DMA indices array in host memory or + * directly in dongle memory. + */ +typedef struct msgbuf_ring { + bool inited; + uint16 idx; /* ring id */ + uint16 rd; /* read index */ + uint16 curr_rd; /* read index for debug */ + uint16 wr; /* write index */ + uint16 max_items; /* maximum number of items in ring */ + uint16 item_len; /* length of each item in the ring */ + sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */ + dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */ + uint32 seqnum; /* next expected item's sequence number */ +#ifdef TXP_FLUSH_NITEMS + void *start_addr; + /* # of messages on ring not yet announced to dongle */ + uint16 pend_items_count; +#endif /* TXP_FLUSH_NITEMS */ + uchar name[RING_NAME_MAX_LENGTH]; +} msgbuf_ring_t; + +#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) +#define DHD_RING_END_VA(ring) \ + ((uint8 *)(DHD_RING_BGN_VA((ring))) + \ + (((ring)->max_items - 1) * (ring)->item_len)) + + + +/** DHD protocol handle. Is an opaque type to other DHD software layers. */ +typedef struct dhd_prot { + osl_t *osh; /* OSL handle */ + uint16 rxbufpost; + uint16 max_rxbufpost; + uint16 max_eventbufpost; + uint16 max_ioctlrespbufpost; + uint16 cur_event_bufs_posted; + uint16 cur_ioctlresp_bufs_posted; + + /* Flow control mechanism based on active transmits pending */ + uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */ + uint16 max_tx_count; + uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ + + /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ + msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */ + msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */ + msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ + msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ + msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ + + msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ + dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ + uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */ + + uint32 rx_dataoffset; + + dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ + + /* ioctl related resources */ + uint8 ioctl_state; + int16 ioctl_status; /* status returned from dongle */ + uint16 ioctl_resplen; + dhd_ioctl_recieved_status_t ioctl_received; + uint curr_ioctl_cmd; + dhd_dma_buf_t retbuf; /* For holding ioctl response */ + dhd_dma_buf_t ioctbuf; /* For holding ioctl request */ + + dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */ + + /* DMA-able arrays for holding WR and RD indices */ + uint32 rw_index_sz; /* Size of a RD or WR index in dongle */ + dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */ + dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ + dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ + dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ + + dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ + + dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ + uint32 flowring_num; + +#if defined(PCIE_D2H_SYNC) + d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ + ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ + ulong d2h_sync_wait_tot; /* total wait loops */ +#endif /* PCIE_D2H_SYNC */ + + dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ + + uint16 ioctl_seq_no; + uint16 data_seq_no; + uint16 ioctl_trans_id; + void *pktid_map_handle; /* a pktid maps to a packet and its metadata */ + bool metadata_dbg; + void *pktid_map_handle_ioctl; + + /* Applications/utilities can read tx and rx metadata using IOVARs */ + uint16 rx_metadata_offset; + uint16 tx_metadata_offset; + + +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + /* Host's soft doorbell configuration */ + bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +#if defined(DHD_LB) + /* Work Queues to be used by the producer and the consumer, and threshold + * when the WRITE index must be synced to consumer's workq + */ +#if defined(DHD_LB_TXC) + uint32 tx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t tx_compl_prod, tx_compl_cons; +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + uint32 rx_compl_prod_sync ____cacheline_aligned; + bcm_workq_t rx_compl_prod, rx_compl_cons; +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ +} dhd_prot_t; + +/* Convert a dmaaddr_t to a base_addr with htol operations */ +static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); + +/* APIs for managing a DMA-able buffer */ +static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len); +static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); +static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); + +/* msgbuf ring management */ +static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, + const char *name, uint16 max_items, uint16 len_item, uint16 ringid); +static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); +static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ +static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd); +static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd); + +/* Fetch and Release a flowring msgbuf_ring from flowring pool */ +static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, + uint16 flowid); +/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */ + +/* Producer: Allocate space in a msgbuf ring */ +static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 *alloced, bool exactly_nitems); +static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, + uint16 *alloced, bool exactly_nitems); + +/* Consumer: Determine the location where the next message may be consumed */ +static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 *available_len); + +/* Producer (WR index update) or Consumer (RD index update) indication */ +static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, + void *p, uint16 len); +static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); + +/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */ +static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz); + +/* Set/Get a RD or WR index in the array of indices */ +/* See also: dhd_prot_dma_indx_init() */ +static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, + uint16 ringid); +static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); + +/* Locate a packet given a pktid */ +static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, + bool free_pktid); +/* Locate a packet given a PktId and free it. */ +static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send); + +static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, + void *buf, uint len, uint8 action); +static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf); +static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, + void *buf, int ifidx); + +/* Post buffers for Rx, control ioctl response and events */ +static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post); +static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); +static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); +static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); + +static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt); + +/* D2H Message handling */ +static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); + +/* D2H Message handlers */ +static void dhd_prot_noop(dhd_pub_t *dhd, void *msg); +static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); + +/* Loopback test with dongle */ +static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); +static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, + uint destdelay, dhd_dmaxfer_t *dma); +static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); + +/* Flowring management communication with dongle */ +static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); +static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); + +/* Configure a soft doorbell per D2H ring */ +static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); +static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg); + +typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); + +/** callback functions for messages generated by the dongle */ +#define MSG_TYPE_INVALID 0 + +static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { + dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */ + dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ + dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ + NULL, + dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ + NULL, + dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ + NULL, + dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ + NULL, + dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ + NULL, + dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ + NULL, + dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ + NULL, + dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ + NULL, + dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */ + NULL, + dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME */ + NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ + NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ + NULL, /* MSG_TYPE_INFO_BUF_POST */ + NULL, /* MSG_TYPE_INFO_BUF_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CREATE */ + NULL, /* MSG_TYPE_D2H_RING_CREATE */ + NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG */ + NULL, /* MSG_TYPE_D2H_RING_CONFIG */ + NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ + dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ + NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ + NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */ +}; + + +#ifdef DHD_RX_CHAINING + +#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ + (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ + !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ + !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ + ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ + ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ + (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \ + dhd_l2_filter_chainable((dhd), (evh), (ifidx))) + +static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); +static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); +static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); + +#define DHD_PKT_CTF_MAX_CHAIN_LEN 64 + +#endif /* DHD_RX_CHAINING */ + +static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); + +#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */ + +/** + * D2H DMA to completion callback handlers. Based on the mode advertised by the + * dongle through the PCIE shared region, the appropriate callback will be + * registered in the proto layer to be invoked prior to precessing any message + * from a D2H DMA ring. If the dongle uses a read barrier or another mode that + * does not require host participation, then a noop callback handler will be + * bound that simply returns the msg_type. + */ +static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint32 tries, uchar *msg, int msglen); +static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen); +static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); + +void dhd_prot_collect_memdump(dhd_pub_t *dhd) +{ + DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__)); +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +} + +/** + * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has + * not completed, a livelock condition occurs. Host will avert this livelock by + * dropping this message and moving to the next. This dropped message can lead + * to a packet leak, or even something disastrous in the case the dropped + * message happens to be a control response. + * Here we will log this condition. One may choose to reboot the dongle. + * + */ +static void +dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries, + uchar *msg, int msglen) +{ + uint32 seqnum = ring->seqnum; + + DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>" + "dma_buf va<%p> msg<%p> curr_rd<%d>\n", + dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries, + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, + ring->dma_buf.va, msg, ring->curr_rd)); + prhex("D2H MsgBuf Failure", (uchar *)msg, msglen); + dhd_dump_to_kernelog(dhd); + +#ifdef DHD_FW_COREDUMP + if (dhd->memdump_enabled) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; + dhd_os_send_hang_message(dhd); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +} + +/** + * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM + * mode. Sequence number is always in the last word of a message. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */ + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + uint32 msg_seqnum = *marker; + if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) + /* For ARM there is no pause in cpu_relax, so add extra delay */ + OSL_DELAY(delay * step); +#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for number of steps */ + + dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += total_tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM + * mode. The xorcsum is placed in the last word of a message. Dongle will also + * place a seqnum in the epoch field of the cmn_msg_hdr. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + uint32 tries; + uint32 prot_checksum = 0; /* computed checksum */ + int num_words = msglen / sizeof(uint32); /* num of 32bit words */ + uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; + dhd_prot_t *prot = dhd->prot; + uint32 step = 0; + uint32 delay = PCIE_D2H_SYNC_DELAY; + uint32 total_tries = 0; + + ASSERT(msglen == ring->item_len); + + BCM_REFERENCE(delay); + + /* + * For retries we have to make some sort of stepper algorithm. + * We see that every time when the Dongle comes out of the D3 + * Cold state, the first D2H mem2mem DMA takes more time to + * complete, leading to livelock issues. + * + * Case 1 - Apart from Host CPU some other bus master is + * accessing the DDR port, probably page close to the ring + * so, PCIE does not get a change to update the memory. + * Solution - Increase the number of tries. + * + * Case 2 - The 50usec delay given by the Host CPU is not + * sufficient for the PCIe RC to start its work. + * In this case the breathing time of 50usec given by + * the Host CPU is not sufficient. + * Solution: Increase the delay in a stepper fashion. + * This is done to ensure that there are no + * unwanted extra delay introdcued in normal conditions. + */ + for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { + for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) { + prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); + if (prot_checksum == 0U) { /* checksum is OK */ + if (msg->epoch == ring_seqnum) { + ring->seqnum++; /* next expected sequence number */ + goto dma_completed; + } + } + + total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; + + if (total_tries > prot->d2h_sync_wait_max) + prot->d2h_sync_wait_max = total_tries; + + OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ + OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ +#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) + /* For ARM there is no pause in cpu_relax, so add extra delay */ + OSL_DELAY(delay * step); +#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */ + + } /* for PCIE_D2H_SYNC_WAIT_TRIES */ + } /* for number of steps */ + + dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen); + + ring->seqnum++; /* skip this message ... leak of a pktid */ + return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ + +dma_completed: + + prot->d2h_sync_wait_tot += total_tries; + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host + * need to try to sync. This noop sync handler will be bound when the dongle + * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. + */ +static uint8 BCMFASTPATH +dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, + volatile cmn_msg_hdr_t *msg, int msglen) +{ + return msg->msg_type; +} + +/** + * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what + * dongle advertizes. + */ +static void +dhd_prot_d2h_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->d2h_sync_wait_max = 0UL; + prot->d2h_sync_wait_tot = 0UL; + + prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; + + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; + } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { + prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; + } else { + prot->d2h_sync_cb = dhd_prot_d2h_sync_none; + } +} + +#endif /* PCIE_D2H_SYNC */ + +int INLINE +dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) +{ + /* To synchronize with the previous memory operations call wmb() */ + OSL_SMP_WMB(); + dhd->prot->ioctl_received = reason; + /* Call another wmb() to make sure before waking up the other event value gets updated */ + OSL_SMP_WMB(); + dhd_os_ioctl_resp_wake(dhd); + return 0; +} + +/** + * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum + */ +static void +dhd_prot_h2d_sync_init(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; + prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; +} + +/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ + + +/* + * +---------------------------------------------------------------------------+ + * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the + * virtual and physical address, the buffer lenght and the DMA handler. + * A secdma handler is also included in the dhd_dma_buf object. + * +---------------------------------------------------------------------------+ + */ + +static INLINE void +dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) +{ + base_addr->low_addr = htol32(PHYSADDRLO(pa)); + base_addr->high_addr = htol32(PHYSADDRHI(pa)); +} + + +/** + * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer. + */ +static int +dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + uint32 base, end; /* dongle uses 32bit ptr arithmetic */ + + ASSERT(dma_buf); + base = PHYSADDRLO(dma_buf->pa); + ASSERT(base); + ASSERT(ISALIGNED(base, DMA_ALIGN_LEN)); + ASSERT(dma_buf->len != 0); + + /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ + end = (base + dma_buf->len); /* end address */ + + if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */ + DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", + __FUNCTION__, base, dma_buf->len)); + return BCME_ERROR; + } + + return BCME_OK; +} + +/** + * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer. + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) +{ + uint32 dma_pad = 0; + osl_t *osh = dhd->osh; + + ASSERT(dma_buf != NULL); + ASSERT(dma_buf->va == NULL); + ASSERT(dma_buf->len == 0); + + /* Pad the buffer length by one extra cacheline size. + * Required for D2H direction. + */ + dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, + DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); + + if (dma_buf->va == NULL) { + DHD_ERROR(("%s: buf_len %d, no memory available\n", + __FUNCTION__, buf_len)); + return BCME_NOMEM; + } + + dma_buf->len = buf_len; /* not including padded len */ + + if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */ + dhd_dma_buf_free(dhd, dma_buf); + return BCME_ERROR; + } + + dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */ + + return BCME_OK; +} + +/** + * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer. + */ +static void +dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + if ((dma_buf == NULL) || (dma_buf->va == NULL)) { + return; + } + + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* Zero out the entire buffer and cache flush */ + memset((void*)dma_buf->va, 0, dma_buf->len); + OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); +} + +/** + * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using + * dhd_dma_buf_alloc(). + */ +static void +dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) +{ + osl_t *osh = dhd->osh; + + ASSERT(dma_buf); + + if (dma_buf->va == NULL) { + return; /* Allow for free invocation, when alloc failed */ + } + + /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ + (void)dhd_dma_buf_audit(dhd, dma_buf); + + /* dma buffer may have been padded at allocation */ + DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, + dma_buf->pa, dma_buf->dmah); + + memset(dma_buf, 0, sizeof(dhd_dma_buf_t)); +} + +/** + * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values. + * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0. + */ +void +dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma) +{ + dhd_dma_buf_t *dma_buf; + ASSERT(dhd_dma_buf); + dma_buf = (dhd_dma_buf_t *)dhd_dma_buf; + dma_buf->va = va; + dma_buf->len = len; + dma_buf->pa = pa; + dma_buf->dmah = dmah; + dma_buf->secdma = secdma; + + /* Audit user defined configuration */ + (void)dhd_dma_buf_audit(dhd, dma_buf); +} + +/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ + +/* + * +---------------------------------------------------------------------------+ + * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. + * Main purpose is to save memory on the dongle, has other purposes as well. + * The packet id map, also includes storage for some packet parameters that + * may be saved. A native packet pointer along with the parameters may be saved + * and a unique 32bit pkt id will be returned. Later, the saved packet pointer + * and the metadata may be retrieved using the previously allocated packet id. + * +---------------------------------------------------------------------------+ + */ +#define DHD_PCIE_PKTID +#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */ + +/* On Router, the pktptr serves as a pktid. */ + + +#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID) +#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC" +#endif + +/* Enum for marking the buffer color based on usage */ +typedef enum dhd_pkttype { + PKTTYPE_DATA_TX = 0, + PKTTYPE_DATA_RX, + PKTTYPE_IOCTL_RX, + PKTTYPE_EVENT_RX, + /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ + PKTTYPE_NO_CHECK +} dhd_pkttype_t; + +#define DHD_PKTID_INVALID (0U) +#define DHD_IOCTL_REQ_PKTID (0xFFFE) +#define DHD_FAKE_PKTID (0xFACE) + +#define DHD_PKTID_FREE_LOCKER (FALSE) +#define DHD_PKTID_RSV_LOCKER (TRUE) + +typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ + +/* Construct a packet id mapping table, returning an opaque map handle */ +static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index); + +/* Destroy a packet id mapping table, freeing all packets active in the table */ +static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); + +#define PKTID_MAP_HANDLE (0) +#define PKTID_MAP_HANDLE_IOCTL (1) + +#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index)) +#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) + +#if defined(DHD_PCIE_PKTID) + + +/* Determine number of pktids that are available */ +static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); + +/* Allocate a unique pktid against which a pkt and some metadata is saved */ +static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt); +static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, + void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); +static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, + void *dmah, void *secdma, dhd_pkttype_t pkttype); + +/* Return an allocated pktid, retrieving previously saved pkt and metadata */ +static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, + uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, + void **secdma, dhd_pkttype_t pkttype, bool rsv_locker); + +/* + * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees + * + * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator + * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation + * + * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined, + * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected. + */ +#if defined(DHD_PKTID_AUDIT_ENABLED) +#define USE_DHD_PKTID_AUDIT_LOCK 1 +/* Audit the pktidmap allocator */ +/* #define DHD_PKTID_AUDIT_MAP */ + +/* Audit the pktid during production/consumption of workitems */ +#define DHD_PKTID_AUDIT_RING + +#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING) +#error "May only enabled audit of MAP or RING, at a time." +#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */ + +#define DHD_DUPLICATE_ALLOC 1 +#define DHD_DUPLICATE_FREE 2 +#define DHD_TEST_IS_ALLOC 3 +#define DHD_TEST_IS_FREE 4 + +#ifdef USE_DHD_PKTID_AUDIT_LOCK +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0) +#define DHD_PKTID_AUDIT_LOCK(lock) 0 +#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0) +#endif /* !USE_DHD_PKTID_AUDIT_LOCK */ + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* #define USE_DHD_PKTID_LOCK 1 */ + +#ifdef USE_DHD_PKTID_LOCK +#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) +#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock) +#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) +#else +#define DHD_PKTID_LOCK_INIT(osh) (void *)(1) +#define DHD_PKTID_LOCK_DEINIT(osh, lock) \ + do { \ + BCM_REFERENCE(osh); \ + BCM_REFERENCE(lock); \ + } while (0) +#define DHD_PKTID_LOCK(lock) 0 +#define DHD_PKTID_UNLOCK(lock, flags) \ + do { \ + BCM_REFERENCE(lock); \ + BCM_REFERENCE(flags); \ + } while (0) +#endif /* !USE_DHD_PKTID_LOCK */ + +/* Packet metadata saved in packet id mapper */ + +/* The Locker can be 3 states + * LOCKER_IS_FREE - Locker is free and can be allocated + * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the + * locker (buffer address, len, phy addr etc) are populated + * with valid values + * LOCKER_IS_RSVD - The locker is reserved for future use, but the values + * in the locker are not valid. Especially pkt should be + * NULL in this state. When the user wants to re-use the + * locker dhd_pktid_map_free can be called with a flag + * to reserve the pktid for future use, which will clear + * the contents of the locker. When the user calls + * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY + */ +typedef enum dhd_locker_state { + LOCKER_IS_FREE, + LOCKER_IS_BUSY, + LOCKER_IS_RSVD +} dhd_locker_state_t; + +typedef struct dhd_pktid_item { + dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ + uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ + dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ + uint16 len; /* length of mapped packet's buffer */ + void *pkt; /* opaque native pointer to a packet */ + dmaaddr_t pa; /* physical address of mapped packet's buffer */ + void *dmah; /* handle to OS specific DMA map */ + void *secdma; +} dhd_pktid_item_t; + +typedef struct dhd_pktid_map { + uint32 items; /* total items in map */ + uint32 avail; /* total available items */ + int failures; /* lockers unavailable count */ + /* Spinlock to protect dhd_pktid_map in process/tasklet context */ + void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + void *pktid_audit_lock; + struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */ + dhd_pktid_item_t lockers[0]; /* metadata storage */ +} dhd_pktid_map_t; + +/* + * PktId (Locker) #0 is never allocated and is considered invalid. + * + * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a + * depleted pktid pool and must not be used by the caller. + * + * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. + */ + +#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) +#define DHD_PKIDMAP_ITEMS(items) (items) +#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ + (DHD_PKTID_ITEM_SZ * ((items) + 1))) + +#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map)) + +/* Convert a packet to a pktid, and save pkt pointer in busy locker */ +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt)) + +/* Reuse a previously reserved locker to save packet params */ +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert a packet to a pktid, and save packet params in locker */ +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ + dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ + (uint8)(dir), (void *)(dmah), (void *)(secdma), \ + (dhd_pkttype_t)(pkttype)) + +/* Convert pktid to a packet, and free the locker */ +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) + +/* Convert the pktid to a packet, empty locker, but keep it reserved */ +#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) + +#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) + +#if defined(DHD_PKTID_AUDIT_ENABLED) + +static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg); + +/** +* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. +*/ +static int +dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, + const int test_for, const char *errmsg) +{ +#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " + + const uint32 max_pktid_items = (MAX_PKTID_ITEMS); + struct bcm_mwbmap *handle; + uint32 flags; + bool ignore_audit; + + if (pktid_map == (dhd_pktid_map_t *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg)); + return BCME_OK; + } + + flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); + + handle = pktid_map->pktid_audit; + if (handle == (struct bcm_mwbmap *)NULL) { + DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg)); + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + } + + /* Exclude special pktids from audit */ + ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID); + if (ignore_audit) { + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + } + + if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); + /* lock is released in "error" */ + goto error; + } + + /* Perform audit */ + switch (test_for) { + case DHD_DUPLICATE_ALLOC: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n", + errmsg, pktid)); + goto error; + } + bcm_mwbmap_force(handle, pktid); + break; + + case DHD_DUPLICATE_FREE: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n", + errmsg, pktid)); + goto error; + } + bcm_mwbmap_free(handle, pktid); + break; + + case DHD_TEST_IS_ALLOC: + if (bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n", + errmsg, pktid)); + goto error; + } + break; + + case DHD_TEST_IS_FREE: + if (!bcm_mwbmap_isfree(handle, pktid)) { + DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free", + errmsg, pktid)); + goto error; + } + break; + + default: + goto error; + } + + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + return BCME_OK; + +error: + + DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); + /* May insert any trap mechanism here ! */ + dhd_pktid_audit_fail_cb(dhd); + + return BCME_ERROR; +} + +#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ + dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + +/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */ + + +/** + * +---------------------------------------------------------------------------+ + * Packet to Packet Id mapper using a paradigm. + * + * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS]. + * + * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique + * packet id is returned. This unique packet id may be used to retrieve the + * previously saved packet metadata, using dhd_pktid_map_free(). On invocation + * of dhd_pktid_map_free(), the unique packet id is essentially freed. A + * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. + * + * Implementation Note: + * Convert this into a abstraction and place into bcmutils ! + * Locker abstraction should treat contents as opaque storage, and a + * callback should be registered to handle busy lockers on destructor. + * + * +---------------------------------------------------------------------------+ + */ + +/** Allocate and initialize a mapper of num_items */ + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + uint32 map_items; +#ifdef DHD_USE_STATIC_PKTIDMAP + uint32 section; +#endif /* DHD_USE_STATIC_PKTIDMAP */ + osh = dhd->osh; + + ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS)); + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); + +#ifdef DHD_USE_STATIC_PKTIDMAP + if (index == PKTID_MAP_HANDLE) { + section = DHD_PREALLOC_PKTID_MAP; + } else { + section = DHD_PREALLOC_PKTID_MAP_IOCTL; + } + + map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz); +#else + map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ + + if (map == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", + __FUNCTION__, __LINE__, dhd_pktid_map_sz)); + goto error; + } + + bzero(map, dhd_pktid_map_sz); + + /* Initialize the lock that protects this structure */ + map->pktid_lock = DHD_PKTID_LOCK_INIT(osh); + if (map->pktid_lock == NULL) { + DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__)); + goto error; + } + + map->items = num_items; + map->avail = num_items; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ + map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); + if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { + DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); + goto error; + } else { + DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", + __FUNCTION__, __LINE__, map_items + 1)); + } + + map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); + +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ + map->keys[nkey] = nkey; /* populate with unique keys */ + map->lockers[nkey].state = LOCKER_IS_FREE; + map->lockers[nkey].pkt = NULL; /* bzero: redundant */ + map->lockers[nkey].len = 0; + } + + /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */ + map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; + map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ + map->lockers[DHD_PKTID_INVALID].len = 0; + +#if defined(DHD_PKTID_AUDIT_ENABLED) + /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */ + bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + return (dhd_pktid_map_handle_t *)map; /* opaque handle */ + +error: + + if (map) { + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + if (map->pktid_lock) + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + + MFREE(osh, map, dhd_pktid_map_sz); + } + + return (dhd_pktid_map_handle_t *)NULL; +} + +/** + * Retrieve all allocated keys and free all . + * Freeing implies: unmapping the buffers and freeing the native packet + * This could have been a callback registered with the pktid mapper. + */ + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + void *osh; + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + dhd_pktid_item_t *locker; + uint32 map_items; + uint32 flags; + + if (handle == NULL) { + return; + } + + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + osh = dhd->osh; + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + + nkey = 1; /* skip reserved KEY #0, and start from 1 */ + locker = &map->lockers[nkey]; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + for (; nkey <= map_items; nkey++, locker++) { + + if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ + + locker->state = LOCKER_IS_FREE; /* force open the locker */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + { /* This could be a callback registered with dhd_pktid_map */ + DMA_UNMAP(osh, locker->pa, locker->len, + locker->dir, 0, DHD_DMAH_NULL); + dhd_prot_packet_free(dhd, (ulong*)locker->pkt, + locker->pkttype, TRUE); + } + } +#if defined(DHD_PKTID_AUDIT_ENABLED) + else { + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + locker->pkt = NULL; /* clear saved pkt */ + locker->len = 0; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + +#ifdef DHD_USE_STATIC_PKTIDMAP + DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); +#else + MFREE(osh, handle, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ +} + +#ifdef IOCTLRESP_USE_CONSTMEM +/** Called in detach scenario. Releasing IOCTL buffers. */ +static void +dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) +{ + uint32 nkey; + dhd_pktid_map_t *map; + uint32 dhd_pktid_map_sz; + dhd_pktid_item_t *locker; + uint32 map_items; + uint32 flags; + osl_t *osh = dhd->osh; + + if (handle == NULL) { + return; + } + + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + + dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); + + nkey = 1; /* skip reserved KEY #0, and start from 1 */ + locker = &map->lockers[nkey]; + + map_items = DHD_PKIDMAP_ITEMS(map->items); + + for (; nkey <= map_items; nkey++, locker++) { + + if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */ + + locker->state = LOCKER_IS_FREE; /* force open the locker */ + +#if defined(DHD_PKTID_AUDIT_ENABLED) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + { + dhd_dma_buf_t retbuf; + retbuf.va = locker->pkt; + retbuf.len = locker->len; + retbuf.pa = locker->pa; + retbuf.dmah = locker->dmah; + retbuf.secdma = locker->secdma; + + /* This could be a callback registered with dhd_pktid_map */ + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + free_ioctl_return_buffer(dhd, &retbuf); + flags = DHD_PKTID_LOCK(map->pktid_lock); + } + } +#if defined(DHD_PKTID_AUDIT_ENABLED) + else { + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + locker->pkt = NULL; /* clear saved pkt */ + locker->len = 0; + } + +#if defined(DHD_PKTID_AUDIT_ENABLED) + if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { + bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ + map->pktid_audit = (struct bcm_mwbmap *)NULL; + if (map->pktid_audit_lock) { + DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); + } + } +#endif /* DHD_PKTID_AUDIT_ENABLED */ + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock); + +#ifdef DHD_USE_STATIC_PKTIDMAP + DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz); +#else + MFREE(osh, handle, dhd_pktid_map_sz); +#endif /* DHD_USE_STATIC_PKTIDMAP */ +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +/** Get the pktid free count */ +static INLINE uint32 BCMFASTPATH +dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) +{ + dhd_pktid_map_t *map; + uint32 flags; + uint32 avail; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + avail = map->avail; + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return avail; +} + +/** + * Allocate locker, save pkt contents, and return the locker's numbered key. + * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility. + * Caller must treat a returned value DHD_PKTID_INVALID as a failure case, + * implying a depleted pool of pktids. + */ + +static INLINE uint32 +__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +{ + uint32 nkey; + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + if (map->avail <= 0) { /* no more pktids to allocate */ + map->failures++; + DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); + return DHD_PKTID_INVALID; /* failed alloc request */ + } + + ASSERT(map->avail <= map->items); + nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ + locker = &map->lockers[nkey]; /* save packet metadata in locker */ + map->avail--; + locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ + locker->len = 0; + locker->state = LOCKER_IS_BUSY; /* reserve this locker */ + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + ASSERT(nkey != DHD_PKTID_INVALID); + return nkey; /* return locker's numbered key */ +} + + +/** + * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not + * yet populated. Invoke the pktid save api to populate the packet parameters + * into the locker. + * Wrapper that takes the required lock when called directly. + */ +static INLINE uint32 +dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt) +{ + dhd_pktid_map_t *map; + uint32 flags; + uint32 ret; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + ret = __dhd_pktid_map_reserve(dhd, handle, pkt); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return ret; +} + +static INLINE void +__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + + locker = &map->lockers[nkey]; + + ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || + ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + /* store contents in locker */ + locker->dir = dir; + locker->pa = pa; + locker->len = (uint16)len; /* 16bit len */ + locker->dmah = dmah; /* 16bit len */ + locker->secdma = secdma; + locker->pkttype = pkttype; + locker->pkt = pkt; + locker->state = LOCKER_IS_BUSY; /* make this locker busy */ +} + +/** + * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding + * to a previously reserved unique numbered key. + * Wrapper that takes the required lock when called directly. + */ +static INLINE void +dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + dhd_pktid_map_t *map; + uint32 flags; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + flags = DHD_PKTID_LOCK(map->pktid_lock); + __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len, + dir, dmah, secdma, pkttype); + DHD_PKTID_UNLOCK(map->pktid_lock, flags); +} + +/** + * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet + * contents into the corresponding locker. Return the numbered key. + */ +static uint32 BCMFASTPATH +dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, + dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + uint32 nkey; + uint32 flags; + dhd_pktid_map_t *map; + + ASSERT(handle != NULL); + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + + nkey = __dhd_pktid_map_reserve(dhd, handle, pkt); + if (nkey != DHD_PKTID_INVALID) { + __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, + len, dir, dmah, secdma, pkttype); +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */ +#endif /* DHD_PKTID_AUDIT_MAP */ + } + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + return nkey; +} + +/** + * dhd_pktid_map_free - Given a numbered key, return the locker contents. + * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. + * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid + * value. Only a previously allocated pktid may be freed. + */ +static void * BCMFASTPATH +dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, + dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, + dhd_pkttype_t pkttype, bool rsv_locker) +{ + dhd_pktid_map_t *map; + dhd_pktid_item_t *locker; + void * pkt; + uint32 flags; + + ASSERT(handle != NULL); + + map = (dhd_pktid_map_t *)handle; + + flags = DHD_PKTID_LOCK(map->pktid_lock); + + ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); + + locker = &map->lockers[nkey]; + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ +#endif /* DHD_PKTID_AUDIT_MAP */ + + if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */ + DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n", + __FUNCTION__, __LINE__, nkey)); + ASSERT(locker->state != LOCKER_IS_FREE); + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return NULL; + } + + /* Check for the colour of the buffer i.e The buffer posted for TX, + * should be freed for TX completion. Similarly the buffer posted for + * IOCTL should be freed for IOCT completion etc. + */ + if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + + DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", + __FUNCTION__, __LINE__, nkey)); + ASSERT(locker->pkttype == pkttype); + + return NULL; + } + + if (rsv_locker == DHD_PKTID_FREE_LOCKER) { + map->avail++; + map->keys[map->avail] = nkey; /* make this numbered key available */ + locker->state = LOCKER_IS_FREE; /* open and free Locker */ + } else { + /* pktid will be reused, but the locker does not have a valid pkt */ + locker->state = LOCKER_IS_RSVD; + } + +#if defined(DHD_PKTID_AUDIT_MAP) + DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); +#endif /* DHD_PKTID_AUDIT_MAP */ + + *pa = locker->pa; /* return contents of locker */ + *len = (uint32)locker->len; + *dmah = locker->dmah; + *secdma = locker->secdma; + + pkt = locker->pkt; + locker->pkt = NULL; /* Clear pkt */ + locker->len = 0; + + DHD_PKTID_UNLOCK(map->pktid_lock, flags); + return pkt; +} + +#else /* ! DHD_PCIE_PKTID */ + + +typedef struct pktlist { + PKT_LIST *tx_pkt_list; /* list for tx packets */ + PKT_LIST *rx_pkt_list; /* list for rx packets */ + PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */ +} pktlists_t; + +/* + * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail + * of a one to one mapping 32bit pktptr and a 32bit pktid. + * + * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail. + * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by + * a lock. + * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined. + */ +#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32)) +#define DHD_PKTPTR32(pktid32) ((void *)(pktid32)) + + +static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype); +static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype); + +static dhd_pktid_map_handle_t * +dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = NULL; + + if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(pktlists_t))); + goto error_done; + } + + if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { + DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", + __FUNCTION__, __LINE__, sizeof(PKT_LIST))); + goto error; + } + + PKTLIST_INIT(handle->tx_pkt_list); + PKTLIST_INIT(handle->rx_pkt_list); + PKTLIST_INIT(handle->ctrl_pkt_list); + + return (dhd_pktid_map_handle_t *) handle; + +error: + if (handle->ctrl_pkt_list) { + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } + +error_done: + return (dhd_pktid_map_handle_t *)NULL; +} + +static void +dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) +{ + osl_t *osh = dhd->osh; + pktlists_t *handle = (pktlists_t *) map; + + ASSERT(handle != NULL); + if (handle == (pktlists_t *)NULL) { + return; + } + + if (handle->ctrl_pkt_list) { + PKTLIST_FINI(handle->ctrl_pkt_list); + MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->rx_pkt_list) { + PKTLIST_FINI(handle->rx_pkt_list); + MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle->tx_pkt_list) { + PKTLIST_FINI(handle->tx_pkt_list); + MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); + } + + if (handle) { + MFREE(osh, handle, sizeof(pktlists_t)); + } +} + +/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */ +static INLINE uint32 +dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, + dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + ASSERT(pktptr32 != NULL); + DHD_PKT_SET_DMA_LEN(pktptr32, dma_len); + DHD_PKT_SET_DMAH(pktptr32, dmah); + DHD_PKT_SET_PA(pktptr32, pa); + DHD_PKT_SET_SECDMA(pktptr32, secdma); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); + } + + return DHD_PKTID32(pktptr32); +} + +/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */ +static INLINE void * +dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, + dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, + dhd_pkttype_t pkttype) +{ + pktlists_t *handle = (pktlists_t *) map; + void *pktptr32; + + ASSERT(pktid32 != 0U); + pktptr32 = DHD_PKTPTR32(pktid32); + *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32); + *dmah = DHD_PKT_GET_DMAH(pktptr32); + *pa = DHD_PKT_GET_PA(pktptr32); + *secdma = DHD_PKT_GET_SECDMA(pktptr32); + + if (pkttype == PKTTYPE_DATA_TX) { + PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); + } else if (pkttype == PKTTYPE_DATA_RX) { + PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); + } else { + PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); + } + + return pktptr32; +} + +#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt) + +#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \ + dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ + (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ + ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \ + dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \ + (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ + (void **)&secdma, (dhd_pkttype_t)(pkttype)); \ + }) + +#define DHD_PKTID_AVAIL(map) (~0) + +#endif /* ! DHD_PCIE_PKTID */ + +/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */ + + +/** + * The PCIE FD protocol layer is constructed in two phases: + * Phase 1. dhd_prot_attach() + * Phase 2. dhd_prot_init() + * + * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields. + * All Common rings are allose attached (msgbuf_ring_t objects are allocated + * with DMA-able buffers). + * All dhd_dma_buf_t objects are also allocated here. + * + * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any + * initialization of objects that requires information advertized by the dongle + * may not be performed here. + * E.g. the number of TxPost flowrings is not know at this point, neither do + * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or + * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H + * rings (common + flow). + * + * dhd_prot_init() is invoked after the bus layer has fetched the information + * advertized by the dongle in the pcie_shared_t. + */ +int +dhd_prot_attach(dhd_pub_t *dhd) +{ + osl_t *osh = dhd->osh; + dhd_prot_t *prot; + + /* Allocate prot structure */ + if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, + sizeof(dhd_prot_t)))) { + DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); + goto fail; + } + memset(prot, 0, sizeof(*prot)); + + prot->osh = osh; + dhd->prot = prot; + + /* DMAing ring completes supported? FALSE by default */ + dhd->dma_d2h_ring_upd_support = FALSE; + dhd->dma_h2d_ring_upd_support = FALSE; + + /* Common Ring Allocations */ + + /* Ring 0: H2D Control Submission */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", + H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 1: H2D Receive Buffer Post */ + if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", + H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 2: D2H Control Completion */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", + D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n", + __FUNCTION__)); + goto fail; + } + + /* Ring 3: D2H Transmit Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", + D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* Ring 4: D2H Receive Complete */ + if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", + D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, + BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) { + DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n", + __FUNCTION__)); + goto fail; + + } + + /* + * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able + * buffers for flowrings will be instantiated, in dhd_prot_init() . + * See dhd_prot_flowrings_pool_attach() + */ + /* ioctl response buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* IOCTL request buffer */ + if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { + goto fail; + } + + /* Scratch buffer for dma rx offset */ + if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) { + goto fail; + } + + /* scratch buffer bus throughput measurement */ + if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { + goto fail; + } + +#ifdef DHD_RX_CHAINING + dhd_rxchain_reset(&prot->rxchain); +#endif + +#if defined(DHD_LB) + + /* Initialize the work queues to be used by the Load Balancing logic */ +#if defined(DHD_LB_TXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); + bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->tx_compl_prod_sync = 0; + DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) + { + void *buffer; + buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ); + bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, + buffer, DHD_LB_WORKQ_SZ); + prot->rx_compl_prod_sync = 0; + DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n", + __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); + } +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + + return BCME_OK; + +fail: + +#ifndef CONFIG_DHD_USE_STATIC_BUF + if (prot != NULL) { + dhd_prot_detach(dhd); + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + return BCME_NOMEM; +} /* dhd_prot_attach */ + + +/** + * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has + * completed it's initialization of the pcie_shared structure, we may now fetch + * the dongle advertized features and adjust the protocol layer accordingly. + * + * dhd_prot_init() may be invoked again after a dhd_prot_reset(). + */ +int +dhd_prot_init(dhd_pub_t *dhd) +{ + sh_addr_t base_addr; + dhd_prot_t *prot = dhd->prot; + + /* PKTID handle INIT */ + if (prot->pktid_map_handle != NULL) { + DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (prot->pktid_map_handle_ioctl != NULL) { + DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__)); + ASSERT(0); + return BCME_ERROR; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE); + if (prot->pktid_map_handle == NULL) { + DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__)); + ASSERT(0); + return BCME_NOMEM; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, + DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL); + if (prot->pktid_map_handle_ioctl == NULL) { + DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__)); + ASSERT(0); + return BCME_NOMEM; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ + + /* Max pkts in ring */ + prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM; + + DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count)); + + /* Read max rx packets supported by dongle */ + dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); + if (prot->max_rxbufpost == 0) { + /* This would happen if the dongle firmware is not */ + /* using the latest shared structure template */ + prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; + } + DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); + + /* Initialize. bzero() would blow away the dma pointers. */ + prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; + prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; + + prot->cur_ioctlresp_bufs_posted = 0; + prot->active_tx_count = 0; + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + + prot->dmaxfer.srcmem.va = NULL; + prot->dmaxfer.dstmem.va = NULL; + prot->dmaxfer.in_progress = FALSE; + + prot->metadata_dbg = FALSE; + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; + + prot->ioctl_trans_id = 0; + + /* Register the interrupt function upfront */ + /* remove corerev checks in data path */ + prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); + + /* Initialize Common MsgBuf Rings */ + + dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); + +#if defined(PCIE_D2H_SYNC) + dhd_prot_d2h_sync_init(dhd); +#endif /* PCIE_D2H_SYNC */ + + dhd_prot_h2d_sync_init(dhd); + + /* init the scratch buffer */ + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_SCRATCH_BUF, 0); + dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, + sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); + + /* If supported by the host, indicate the memory block + * for completion writes / submission reads to shared space + */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_RD_BUF, 0); + } + + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + H2D_DMA_INDX_WR_BUF, 0); + dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); + dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), + D2H_DMA_INDX_RD_BUF, 0); + } + + /* + * If the DMA-able buffers for flowring needs to come from a specific + * contiguous memory region, then setup prot->flowrings_dma_buf here. + * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from + * this contiguous memory region, for each of the flowrings. + */ + + /* Pre-allocate pool of msgbuf_ring for flowrings */ + if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) { + return BCME_ERROR; + } + + /* Host should configure soft doorbells if needed ... here */ + + /* Post to dongle host configured soft doorbells */ + dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); + + /* Post buffers for packet reception and ioctl/event responses */ + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + return BCME_OK; +} /* dhd_prot_init */ + + +/** + * dhd_prot_detach - PCIE FD protocol layer destructor. + * Unlink, frees allocated protocol memory (including dhd_prot) + */ +void +dhd_prot_detach(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + + /* Stop the protocol module */ + if (prot) { + + /* free up all DMA-able buffers allocated during prot attach/init */ + + dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */ + dhd_dma_buf_free(dhd, &prot->ioctbuf); + dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); + + /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); + dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); + + /* Common MsgBuf Rings */ + dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); + + /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ + dhd_prot_flowrings_pool_detach(dhd); + + DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle); + +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +#if defined(DHD_LB) +#if defined(DHD_LB_TXC) + if (prot->tx_compl_prod.buffer) { + MFREE(dhd->osh, prot->tx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); + } +#endif /* DHD_LB_TXC */ +#if defined(DHD_LB_RXC) + if (prot->rx_compl_prod.buffer) { + MFREE(dhd->osh, prot->rx_compl_prod.buffer, + sizeof(void*) * DHD_LB_WORKQ_SZ); + } +#endif /* DHD_LB_RXC */ +#endif /* DHD_LB */ + + dhd->prot = NULL; + } +} /* dhd_prot_detach */ + + +/** + * dhd_prot_reset - Reset the protocol layer without freeing any objects. This + * may be invoked to soft reboot the dongle, without having to detach and attach + * the entire protocol layer. + * + * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through + * a dhd_prot_attach() phase. + */ +void +dhd_prot_reset(dhd_pub_t *dhd) +{ + struct dhd_prot *prot = dhd->prot; + + DHD_TRACE(("%s\n", __FUNCTION__)); + + if (prot == NULL) { + return; + } + + dhd_prot_flowrings_pool_reset(dhd); + + dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); + dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); + dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); + dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); + + dhd_dma_buf_reset(dhd, &prot->retbuf); + dhd_dma_buf_reset(dhd, &prot->ioctbuf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); + dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); + + + prot->rx_metadata_offset = 0; + prot->tx_metadata_offset = 0; + + prot->rxbufpost = 0; + prot->cur_event_bufs_posted = 0; + prot->cur_ioctlresp_bufs_posted = 0; + + prot->active_tx_count = 0; + prot->data_seq_no = 0; + prot->ioctl_seq_no = 0; + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + prot->ioctl_trans_id = 0; + + /* dhd_flow_rings_init is located at dhd_bus_start, + * so when stopping bus, flowrings shall be deleted + */ + if (dhd->flow_rings_inited) { + dhd_flow_rings_deinit(dhd); + } + + if (prot->pktid_map_handle) { + DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle); + prot->pktid_map_handle = NULL; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (prot->pktid_map_handle_ioctl) { + DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); + prot->pktid_map_handle_ioctl = NULL; + } +#endif /* IOCTLRESP_USE_CONSTMEM */ +} /* dhd_prot_reset */ + + +void +dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) +{ + dhd_prot_t *prot = dhd->prot; + prot->rx_dataoffset = rx_offset; +} + +/** + * Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +int +dhd_sync_with_dongle(dhd_pub_t *dhd) +{ + int ret = 0; + wlc_rev_info_t revinfo; + + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + + + +#ifdef DHD_FW_COREDUMP + /* Check the memdump capability */ + dhd_get_memdump_info(dhd); +#endif /* DHD_FW_COREDUMP */ +#ifdef BCMASSERT_LOG + dhd_get_assert_info(dhd); +#endif /* BCMASSERT_LOG */ + + /* Get the device rev info */ + memset(&revinfo, 0, sizeof(revinfo)); + ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); + if (ret < 0) { + DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); + goto done; + } + DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, + revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); + + dhd_process_cid_mac(dhd, TRUE); + + ret = dhd_preinit_ioctls(dhd); + + if (!ret) { + dhd_process_cid_mac(dhd, FALSE); + } + + /* Always assumes wl for now */ + dhd->iswl = TRUE; +done: + return ret; +} /* dhd_sync_with_dongle */ + +#if defined(DHD_LB) + +/* DHD load balancing: deferral of work to another online CPU */ + +/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */ +extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); +extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); + +extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); + +/** + * dhd_lb_dispatch - load balance by dispatch work to other CPU cores + * Note: rx_compl_tasklet is dispatched explicitly. + */ +static INLINE void +dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx) +{ + switch (ring_idx) { + +#if defined(DHD_LB_TXC) + case BCMPCIE_D2H_MSGRING_TX_COMPLETE: + bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ + dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ + break; +#endif /* DHD_LB_TXC */ + + case BCMPCIE_D2H_MSGRING_RX_COMPLETE: + { +#if defined(DHD_LB_RXC) + dhd_prot_t *prot = dhdp->prot; + /* Schedule the takslet only if we have to */ + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + /* flush WR index */ + bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); + dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ + } +#endif /* DHD_LB_RXC */ +#if defined(DHD_LB_RXP) + dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ +#endif /* DHD_LB_RXP */ + break; + } + default: + break; + } +} + + +#if defined(DHD_LB_TXC) +/** + * DHD load balanced tx completion tasklet handler, that will perform the + * freeing of packets on the selected CPU. Packet pointers are delivered to + * this tasklet via the tx complete workq. + */ +void +dhd_lb_tx_compl_handler(unsigned long data) +{ + int elem_ix; + void *pkt, **elem; + dmaaddr_t pa; + uint32 pa_len; + dhd_pub_t *dhd = (dhd_pub_t *)data; + dhd_prot_t *prot = dhd->prot; + bcm_workq_t *workq = &prot->tx_compl_cons; + uint32 count = 0; + + DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd); + + while (1) { + elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + if (elem_ix == BCM_RING_EMPTY) { + break; + } + + elem = WORKQ_ELEMENT(void *, workq, elem_ix); + pkt = *elem; + + DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt)); + + OSL_PREFETCH(PKTTAG(pkt)); + OSL_PREFETCH(pkt); + + pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt)); + pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt)); + + DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); + +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif + + PKTFREE(dhd->osh, pkt, TRUE); + count++; + } + + /* smp_wmb(); */ + bcm_workq_cons_sync(workq); + DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count); +} +#endif /* DHD_LB_TXC */ + +#if defined(DHD_LB_RXC) +void +dhd_lb_rx_compl_handler(unsigned long data) +{ + dhd_pub_t *dhd = (dhd_pub_t *)data; + bcm_workq_t *workq = &dhd->prot->rx_compl_cons; + + DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd); + + dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */ + bcm_workq_cons_sync(workq); +} +#endif /* DHD_LB_RXC */ + +#endif /* DHD_LB */ + +#define DHD_DBG_SHOW_METADATA 0 + +#if DHD_DBG_SHOW_METADATA +static void BCMFASTPATH +dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) +{ + uint8 tlv_t; + uint8 tlv_l; + uint8 *tlv_v = (uint8 *)ptr; + + if (len <= BCMPCIE_D2H_METADATA_HDRLEN) + return; + + len -= BCMPCIE_D2H_METADATA_HDRLEN; + tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; + + while (len > TLV_HDR_LEN) { + tlv_t = tlv_v[TLV_TAG_OFF]; + tlv_l = tlv_v[TLV_LEN_OFF]; + + len -= TLV_HDR_LEN; + tlv_v += TLV_HDR_LEN; + if (len < tlv_l) + break; + if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) + break; + + switch (tlv_t) { + case WLFC_CTL_TYPE_TXSTATUS: { + uint32 txs; + memcpy(&txs, tlv_v, sizeof(uint32)); + if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) { + printf("METADATA TX_STATUS: %08x\n", txs); + } else { + wl_txstatus_additional_info_t tx_add_info; + memcpy(&tx_add_info, tlv_v + sizeof(uint32), + sizeof(wl_txstatus_additional_info_t)); + printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" + " rate = %08x tries = %d - %d\n", txs, + tx_add_info.seq, tx_add_info.entry_ts, + tx_add_info.enq_ts, tx_add_info.last_ts, + tx_add_info.rspec, tx_add_info.rts_cnt, + tx_add_info.tx_cnt); + } + } break; + + case WLFC_CTL_TYPE_RSSI: { + if (tlv_l == 1) + printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v); + else + printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n", + (*(tlv_v + 3) << 8) | *(tlv_v + 2), + (int8)(*tlv_v), *(tlv_v + 1)); + } break; + + case WLFC_CTL_TYPE_FIFO_CREDITBACK: + bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_TX_ENTRY_STAMP: + bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_RX_STAMP: { + struct { + uint32 rspec; + uint32 bus_time; + uint32 wlan_time; + } rx_tmstamp; + memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp)); + printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", + rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec); + } break; + + case WLFC_CTL_TYPE_TRANS_ID: + bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); + break; + + case WLFC_CTL_TYPE_COMP_TXSTATUS: + bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); + break; + + default: + bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); + break; + } + + len -= tlv_l; + tlv_v += tlv_l; + } +} +#endif /* DHD_DBG_SHOW_METADATA */ + +static INLINE void BCMFASTPATH +dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) +{ + if (pkt) { + if (pkttype == PKTTYPE_IOCTL_RX || + pkttype == PKTTYPE_EVENT_RX) { +#ifdef DHD_USE_STATIC_CTRLBUF + PKTFREE_STATIC(dhd->osh, pkt, send); +#else + PKTFREE(dhd->osh, pkt, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } else { + PKTFREE(dhd->osh, pkt, send); + } + } +} + +static INLINE void * BCMFASTPATH +dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) +{ + void *PKTBUF; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + +#ifdef DHD_PCIE_PKTID + if (free_pktid) { + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, pkttype); + } else { + PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, pkttype); + } +#else + PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa, + len, dmah, secdma, pkttype); +#endif /* DHD_PCIE_PKTID */ + + if (PKTBUF) { + { + if (SECURE_DMA_ENAB(dhd->osh)) { + SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah, + secdma, 0); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } + } + } + + return PKTBUF; +} + +#ifdef IOCTLRESP_USE_CONSTMEM +static INLINE void BCMFASTPATH +dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf) +{ + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, + retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); + + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static void BCMFASTPATH +dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) +{ + dhd_prot_t *prot = dhd->prot; + int16 fillbufs; + uint16 cnt = 256; + int retcount = 0; + + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + while (fillbufs >= RX_BUF_BURST) { + cnt--; + if (cnt == 0) { + /* find a better way to reschedule rx buf post if space not available */ + DHD_ERROR(("h2d rx post ring not available to post host buffers \n")); + DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost)); + break; + } + + /* Post in a burst of 32 buffers at a time */ + fillbufs = MIN(fillbufs, RX_BUF_BURST); + + /* Post buffers */ + retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid); + + if (retcount >= 0) { + prot->rxbufpost += (uint16)retcount; +#ifdef DHD_LB_RXC + /* dhd_prot_rxbuf_post returns the number of buffers posted */ + DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount); +#endif /* DHD_LB_RXC */ + /* how many more to post */ + fillbufs = prot->max_rxbufpost - prot->rxbufpost; + } else { + /* Make sure we don't run loop any further */ + fillbufs = 0; + } + } +} + +/** Post 'count' no of rx buffers to dongle */ +static int BCMFASTPATH +dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) +{ + void *p; + uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + uint8 *rxbuf_post_tmp; + host_rxbuf_post_t *rxbuf_post; + void *msg_start; + dmaaddr_t pa; + uint32 pktlen; + uint8 i = 0; + uint16 alloced = 0; + unsigned long flags; + uint32 pktid; + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Claim space for exactly 'count' no of messages, for mitigation purpose */ + msg_start = (void *) + dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); + + DHD_GENERAL_UNLOCK(dhd, flags); + + if (msg_start == NULL) { + DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); + return -1; + } + /* if msg_start != NULL, we should have alloced space for atleast 1 item */ + ASSERT(alloced > 0); + + rxbuf_post_tmp = (uint8*)msg_start; + + /* loop through each allocated message in the rxbuf post msgbuf_ring */ + for (i = 0; i < alloced; i++) { + rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; + /* Create a rx buffer */ + if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { + DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); + dhd->rx_pktgetfail++; + break; + } + + pktlen = PKTLEN(dhd->osh, p); + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + } + + if (PHYSADDRISZERO(pa)) { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Invalid phyaddr 0\n")); + ASSERT(0); + break; + } + + PKTPULL(dhd->osh, p, prot->rx_metadata_offset); + pktlen = PKTLEN(dhd->osh, p); + + /* Common msg header */ + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + +#if defined(DHD_LB_RXC) + if (use_rsv_pktid == TRUE) { + bcm_workq_t *workq = &prot->rx_compl_cons; + int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_EMPTY) { + DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); + pktid = DHD_PKTID_INVALID; + goto alloc_pkt_id; + } else { + uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + pktid = *elem; + } + + /* Now populate the previous locker with valid information */ + if (pktid != DHD_PKTID_INVALID) { + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid, + pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma, + PKTTYPE_DATA_RX); + } + } else +#endif /* DHD_LB_RXC */ + { +#if defined(DHD_LB_RXC) +alloc_pkt_id: +#endif +#if defined(DHD_PCIE_PKTID) + /* get the lock before calling DHD_NATIVE_TO_PKTID */ + DHD_GENERAL_LOCK(dhd, flags); +#endif + pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa, + pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); + +#if defined(DHD_PCIE_PKTID) + /* free lock */ + DHD_GENERAL_UNLOCK(dhd, flags); + + if (pktid == DHD_PKTID_INVALID) { + + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + + PKTFREE(dhd->osh, p, FALSE); + DHD_ERROR(("Pktid pool depleted.\n")); + break; + } +#endif /* DHD_PCIE_PKTID */ + } + + rxbuf_post->data_buf_len = htol16((uint16)pktlen); + rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->data_buf_addr.low_addr = + htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); + + if (prot->rx_metadata_offset) { + rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; + rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + } else { + rxbuf_post->metadata_buf_len = 0; + rxbuf_post->metadata_buf_addr.high_addr = 0; + rxbuf_post->metadata_buf_addr.low_addr = 0; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + + /* Move rxbuf_post_tmp to next item */ + rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; + } + + if (i < alloced) { + if (ring->wr < (alloced - i)) { + ring->wr = ring->max_items - (alloced - i); + } else { + ring->wr -= (alloced - i); + } + + alloced = i; + } + + /* Update ring's WR index and ring doorbell to dongle */ + if (alloced > 0) { + dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); + } + + return alloced; +} /* dhd_prot_rxbuf_post */ + +#ifdef IOCTLRESP_USE_CONSTMEM +static int +alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + int err; + memset(retbuf, 0, sizeof(dhd_dma_buf_t)); + + if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) { + DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err)); + ASSERT(0); + return BCME_NOMEM; + } + + return BCME_OK; +} + +static void +free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) +{ + /* retbuf (declared on stack) not fully populated ... */ + if (retbuf->va) { + uint32 dma_pad; + dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; + retbuf->len = IOCT_RETBUF_SIZE; + retbuf->_alloced = retbuf->len + dma_pad; + /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle. + * Need to reassign before free to pass the check in dhd_dma_buf_audit(). + */ + retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL); + } + + dhd_dma_buf_free(dhd, retbuf); + return; +} +#endif /* IOCTLRESP_USE_CONSTMEM */ + +static int +dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) +{ + void *p; + uint16 pktsz; + ioctl_resp_evt_buf_post_msg_t *rxbuf_post; + dmaaddr_t pa; + uint32 pktlen; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + unsigned long flags; + dhd_dma_buf_t retbuf; + void *dmah = NULL; + uint32 pktid; + void *map_handle; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return -1; + } + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + if (event_buf) { + /* Allocate packet for event buffer post */ + pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; + } else { + /* Allocate packet for ctrl/ioctl buffer post */ + pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { + DHD_ERROR(("Could not allocate IOCTL response buffer\n")); + return -1; + } + ASSERT(retbuf.len == IOCT_RETBUF_SIZE); + p = retbuf.va; + pktlen = retbuf.len; + pa = retbuf.pa; + dmah = retbuf.dmah; + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { +#ifdef DHD_USE_STATIC_CTRLBUF + p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); +#else + p = PKTGET(dhd->osh, pktsz, FALSE); +#endif /* DHD_USE_STATIC_CTRLBUF */ + if (p == NULL) { + DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", + __FUNCTION__, __LINE__, event_buf ? + "EVENT" : "IOCTL RESP")); + dhd->rx_pktgetfail++; + return -1; + } + + pktlen = PKTLEN(dhd->osh, p); + + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, + DMA_RX, p, 0, ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); + } + + if (PHYSADDRISZERO(pa)) { + DHD_ERROR(("Invalid physaddr 0\n")); + ASSERT(0); + goto free_pkt_return; + } + } + + DHD_GENERAL_LOCK(dhd, flags); + + rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (rxbuf_post == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n", + __FUNCTION__, __LINE__)); + +#ifdef IOCTLRESP_USE_CONSTMEM + if (event_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } + + /* CMN msg header */ + if (event_buf) { + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST; + } else { + rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST; + } + +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + map_handle = dhd->prot->pktid_map_handle_ioctl; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, + DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + map_handle = dhd->prot->pktid_map_handle; + pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, + p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, + event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX); + } + + if (pktid == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + } + DHD_GENERAL_UNLOCK(dhd, flags); + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + goto free_pkt_return; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + rxbuf_post->cmn_hdr.request_id = htol32(pktid); + rxbuf_post->cmn_hdr.if_id = 0; + rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + +#if defined(DHD_PCIE_PKTID) + if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { + if (ring->wr == 0) { + ring->wr = ring->max_items - 1; + } else { + ring->wr--; + } + DHD_GENERAL_UNLOCK(dhd, flags); +#ifdef IOCTLRESP_USE_CONSTMEM + if (event_buf) +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + if (SECURE_DMA_ENAB(dhd->osh)) { + DHD_GENERAL_LOCK(dhd, flags); + SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, + ring->dma_buf.secdma, 0); + DHD_GENERAL_UNLOCK(dhd, flags); + } else { + DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); + } + } + goto free_pkt_return; + } +#endif /* DHD_PCIE_PKTID */ + + rxbuf_post->cmn_hdr.flags = 0; +#ifndef IOCTLRESP_USE_CONSTMEM + rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); +#else + rxbuf_post->host_buf_len = htol16((uint16)pktlen); +#endif /* IOCTLRESP_USE_CONSTMEM */ + rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 1; + +free_pkt_return: +#ifdef IOCTLRESP_USE_CONSTMEM + if (!event_buf) { + free_ioctl_return_buffer(dhd, &retbuf); + } else +#endif /* IOCTLRESP_USE_CONSTMEM */ + { + dhd_prot_packet_free(dhd, p, + event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX, + FALSE); + } + + return -1; +} /* dhd_prot_rxbufpost_ctrl */ + +static uint16 +dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post) +{ + uint32 i = 0; + int32 ret_val; + + DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); + return 0; + } + + while (i < max_to_post) { + ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf); + if (ret_val < 0) { + break; + } + i++; + } + DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf)); + return (uint16)i; +} + +static void +dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + DHD_INFO(("ioctl resp buf post\n")); + max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n", + __FUNCTION__)); + return; + } + prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + FALSE, max_to_post); +} + +static void +dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + int max_to_post; + + max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; + if (max_to_post <= 0) { + DHD_INFO(("%s: Cannot post more than max event buffers\n", + __FUNCTION__)); + return; + } + prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, + TRUE, max_to_post); +} + +/** called when DHD needs to check for 'receive complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Update read pointer */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check RX bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +/** + * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) + */ +void +dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) +{ + msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; + + /* Update read pointer */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } + + DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", + ring->idx, flowid, ring->wr, ring->rd)); + + /* Need more logic here, but for now use it directly */ + dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ +} + +/** called when DHD needs to check for 'transmit complete' messages from the dongle */ +bool BCMFASTPATH +dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) +{ + bool more = TRUE; + uint n = 0; + msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + more = FALSE; + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + more = FALSE; + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + + /* After batch processing, check bound */ + n += msg_len / ring->item_len; + if (n >= bound) { + break; + } + } + + return more; +} + +/** called when DHD needs to check for 'ioctl complete' messages from the dongle */ +int BCMFASTPATH +dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; + + /* Process all the messages - DTOH direction */ + while (!dhd_is_device_removed(dhd)) { + uint8 *msg_addr; + uint32 msg_len; + + if (dhd->hang_was_sent) { + break; + } + + /* Get the address of the next message to be read from ring */ + msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); + if (msg_addr == NULL) { + break; + } + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(msg_addr); + + if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { + DHD_ERROR(("%s: process %s msg addr %p len %d\n", + __FUNCTION__, ring->name, msg_addr, msg_len)); + } + + /* Write to dngl rd ptr */ + dhd_prot_upd_read_idx(dhd, ring); + } + + return 0; +} + +/** + * Consume messages out of the D2H ring. Ensure that the message's DMA to host + * memory has completed, before invoking the message handler via a table lookup + * of the cmn_msg_hdr::msg_type. + */ +static int BCMFASTPATH +dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) +{ + int buf_len = len; + uint16 item_len; + uint8 msg_type; + cmn_msg_hdr_t *msg = NULL; + int ret = BCME_OK; + + ASSERT(ring); + item_len = ring->item_len; + if (item_len == 0) { + DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n", + __FUNCTION__, ring->idx, item_len, buf_len)); + return BCME_ERROR; + } + + while (buf_len > 0) { + if (dhd->hang_was_sent) { + ret = BCME_ERROR; + goto done; + } + + msg = (cmn_msg_hdr_t *)buf; + + /* + * Update the curr_rd to the current index in the ring, from where + * the work item is fetched. This way if the fetched work item + * fails in LIVELOCK, we can print the exact read index in the ring + * that shows up the corrupted work item. + */ + if ((ring->curr_rd + 1) >= ring->max_items) { + ring->curr_rd = 0; + } else { + ring->curr_rd += 1; + } + +#if defined(PCIE_D2H_SYNC) + /* Wait until DMA completes, then fetch msg_type */ + msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); +#else + msg_type = msg->msg_type; +#endif /* !PCIE_D2H_SYNC */ + + /* Prefetch data to populate the cache */ + OSL_PREFETCH(buf + item_len); + + DHD_INFO(("msg_type %d item_len %d buf_len %d\n", + msg_type, item_len, buf_len)); + + if (msg_type == MSG_TYPE_LOOPBACK) { + bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len); + DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len)); + } + + ASSERT(msg_type < DHD_PROT_FUNCS); + if (msg_type >= DHD_PROT_FUNCS) { + DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n", + __FUNCTION__, msg_type, item_len, buf_len)); + ret = BCME_ERROR; + goto done; + } + + if (table_lookup[msg_type]) { + table_lookup[msg_type](dhd, buf); + } + + if (buf_len < item_len) { + ret = BCME_ERROR; + goto done; + } + buf_len = buf_len - item_len; + buf = buf + item_len; + } + +done: + +#ifdef DHD_RX_CHAINING + dhd_rxchain_commit(dhd); +#endif +#if defined(DHD_LB) + dhd_lb_dispatch(dhd, ring->idx); +#endif + return ret; +} /* dhd_prot_process_msgtype */ + +static void +dhd_prot_noop(dhd_pub_t *dhd, void *msg) +{ + return; +} + +/** called on MSG_TYPE_RING_STATUS message received from dongle */ +static void +dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg; + DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", + ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status, + ring_status->compl_hdr.flow_ring_id, ring_status->write_idx)); + /* How do we track this to pair it with ??? */ + return; +} + +/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */ +static void +dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) +{ + pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg; + DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n", + gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, + gen_status->compl_hdr.flow_ring_id)); + + /* How do we track this to pair it with ??? */ + return; +} + +/** + * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the + * dongle received the ioctl message in dongle memory. + */ +static void +dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) +{ + uint32 pktid; + ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; + unsigned long flags; + + pktid = ltoh32(ioct_ack->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */ + if (pktid != DHD_IOCTL_REQ_PKTID) { + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_TEST_IS_ALLOC) == BCME_ERROR) { + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && + (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; + } else { + DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctack_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", + ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, + ioct_ack->compl_hdr.flow_ring_id)); + if (ioct_ack->compl_hdr.status != 0) { + DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); + } +} + +/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ +static void +dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + uint32 pkt_id, xt_id; + ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg; + void *pkt; + unsigned long flags; + dhd_dma_buf_t retbuf; + + memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); + + pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + { + int ret; +#ifndef IOCTLRESP_USE_CONSTMEM + ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id, + DHD_DUPLICATE_FREE); +#else + ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id, + DHD_DUPLICATE_FREE); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (ret == BCME_ERROR) { + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || + !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { + DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", + __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); + prhex("dhd_prot_ioctcmplt_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } +#ifndef IOCTLRESP_USE_CONSTMEM + pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); +#else + dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf); + pkt = retbuf.va; +#endif /* !IOCTLRESP_USE_CONSTMEM */ + if (!pkt) { + prot->ioctl_state = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); + return; + } + DHD_GENERAL_UNLOCK(dhd, flags); + + prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); + prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); + xt_id = ltoh16(ioct_resp->trans_id); + if (xt_id != prot->ioctl_trans_id) { + ASSERT(0); + goto exit; + } + + DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", + pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); + + if (prot->ioctl_resplen > 0) { +#ifndef IOCTLRESP_USE_CONSTMEM + bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); +#else + bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); +#endif /* !IOCTLRESP_USE_CONSTMEM */ + } + + /* wake up any dhd_os_ioctl_resp_wait() */ + dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS); + +exit: +#ifndef IOCTLRESP_USE_CONSTMEM + dhd_prot_packet_free(dhd, pkt, + PKTTYPE_IOCTL_RX, FALSE); +#else + free_ioctl_return_buffer(dhd, &retbuf); +#endif /* !IOCTLRESP_USE_CONSTMEM */ +} + +/** called on MSG_TYPE_TX_STATUS message received from dongle */ +static void BCMFASTPATH +dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + host_txbuf_cmpl_t * txstatus; + unsigned long flags; + uint32 pktid; + void *pkt = NULL; + dmaaddr_t pa; + uint32 len; + void *dmah; + void *secdma; + + /* locks required to protect circular buffer accesses */ + DHD_GENERAL_LOCK(dhd, flags); + + txstatus = (host_txbuf_cmpl_t *)msg; + pktid = ltoh32(txstatus->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_txstatus_process:", + (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); + if (prot->active_tx_count) { + prot->active_tx_count--; + + /* Release the Lock when no more tx packets are pending */ + if (prot->active_tx_count == 0) + DHD_OS_WAKE_UNLOCK(dhd); + + } else { + DHD_ERROR(("Extra packets are freed\n")); + } + + ASSERT(pktid != 0); + +#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA) + { + int elem_ix; + void **elem; + bcm_workq_t *workq; + + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + + workq = &prot->tx_compl_prod; + /* + * Produce the packet into the tx_compl workq for the tx compl tasklet + * to consume. + */ + OSL_PREFETCH(PKTTAG(pkt)); + + /* fetch next available slot in workq */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + + DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa); + DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len); + + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n")); + goto workq_ring_full; + } + + elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix); + *elem = pkt; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->tx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", + __FUNCTION__, pkt, prot->tx_compl_prod_sync)); + + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + +workq_ring_full: + +#endif /* !DHD_LB_TXC */ + + /* + * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is + * defined but the tx_compl queue is full. + */ + if (pkt == NULL) { + pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, + pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX); + } + + if (pkt) { + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (dhd->prot->tx_metadata_offset) + offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN; + SECURE_DMA_UNMAP(dhd->osh, (uint) pa, + (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah, + secdma, offset); + } else { + DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); + } +#if defined(BCMPCIE) + dhd_txcomplete(dhd, pkt, true); +#endif + +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->tx_metadata_offset && txstatus->metadata_len) { + uchar *ptr; + /* The Ethernet header of TX frame was copied and removed. + * Here, move the data pointer forward by Ethernet header size. + */ + PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); + bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); + dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + PKTFREE(dhd->osh, pkt, TRUE); + DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, + txstatus->tx_status); + } + + DHD_GENERAL_UNLOCK(dhd, flags); + + return; +} /* dhd_prot_txstatus_process */ + +/** called on MSG_TYPE_WL_EVENT message received from dongle */ +static void +dhd_prot_event_process(dhd_pub_t *dhd, void *msg) +{ + wlevent_req_msg_t *evnt; + uint32 bufid; + uint16 buflen; + int ifidx = 0; + void* pkt; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + + /* Event complete header */ + evnt = (wlevent_req_msg_t *)msg; + bufid = ltoh32(evnt->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_event_process:", + (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + buflen = ltoh16(evnt->event_data_len); + + ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); + + /* Post another rxbuf to the device */ + if (prot->cur_event_bufs_posted) { + prot->cur_event_bufs_posted--; + } + dhd_msgbuf_rxbuf_post_event_bufs(dhd); + + /* locks required to protect pktid_map */ + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + return; + } + + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) { + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + } + + PKTSETLEN(dhd->osh, pkt, buflen); + + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +} + +/** called on MSG_TYPE_RX_CMPLT message received from dongle */ +static void BCMFASTPATH +dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg) +{ + host_rxbuf_cmpl_t *rxcmplt_h; + uint16 data_offset; /* offset at which data starts */ + void *pkt; + unsigned long flags; + uint ifidx; + uint32 pktid; +#if defined(DHD_LB_RXC) + const bool free_pktid = FALSE; +#else + const bool free_pktid = TRUE; +#endif /* DHD_LB_RXC */ + + /* RXCMPLT HDR */ + rxcmplt_h = (host_rxbuf_cmpl_t *)msg; + + /* offset from which data starts is populated in rxstatus0 */ + data_offset = ltoh16(rxcmplt_h->data_offset); + + pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id); + +#if defined(DHD_PKTID_AUDIT_RING) + if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid, + DHD_DUPLICATE_FREE) == BCME_ERROR) { + prhex("dhd_prot_rxcmplt_process:", + (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE); + } +#endif /* DHD_PKTID_AUDIT_RING */ + + DHD_GENERAL_LOCK(dhd, flags); + pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid); + DHD_GENERAL_UNLOCK(dhd, flags); + + if (!pkt) { + return; + } + + /* Post another set of rxbufs to the device */ + dhd_prot_return_rxbuf(dhd, pktid, 1); + + DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n", + ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len), + rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), + ltoh16(rxcmplt_h->metadata_len))); +#if DHD_DBG_SHOW_METADATA + if (dhd->prot->metadata_dbg && + dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) { + uchar *ptr; + ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset); + /* header followed by data */ + bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len); + dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len); + } +#endif /* DHD_DBG_SHOW_METADATA */ + + if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) { + DHD_INFO(("D11 frame rxed \n")); + } + + /* data_offset from buf start */ + if (data_offset) { + /* data offset given from dongle after split rx */ + PKTPULL(dhd->osh, pkt, data_offset); /* data offset */ + } else { + /* DMA RX offset updated through shared area */ + if (dhd->prot->rx_dataoffset) { + PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); + } + } + /* Actual length of the packet */ + PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len)); + + ifidx = rxcmplt_h->cmn_hdr.if_id; + +#if defined(DHD_LB_RXP) + dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); +#else /* ! DHD_LB_RXP */ +#ifdef DHD_RX_CHAINING + /* Chain the packets */ + dhd_rxchain_frame(dhd, pkt, ifidx); +#else /* ! DHD_RX_CHAINING */ + /* offset from which data starts is populated in rxstatus0 */ + dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); +#endif /* ! DHD_RX_CHAINING */ +#endif /* ! DHD_LB_RXP */ +} /* dhd_prot_rxcmplt_process */ + +/** Stop protocol: sync w/dongle state. */ +void dhd_prot_stop(dhd_pub_t *dhd) +{ + ASSERT(dhd); + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +} + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +void BCMFASTPATH +dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) +{ + return; +} + +uint +dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) +{ + return 0; +} + + +#define PKTBUF pktbuf + +/** + * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in + * the corresponding flow ring. + */ +int BCMFASTPATH +dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + host_txbuf_post_t *txdesc = NULL; + dmaaddr_t pa, meta_pa; + uint8 *pktdata; + uint32 pktlen; + uint32 pktid; + uint8 prio; + uint16 flowid = 0; + uint16 alloced = 0; + uint16 headroom; + msgbuf_ring_t *ring; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + + if (dhd->flow_ring_table == NULL) { + return BCME_NORESOURCE; + } + + flowid = DHD_PKT_GET_FLOWID(PKTBUF); + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + + DHD_GENERAL_LOCK(dhd, flags); + + /* Create a unique 32-bit packet id */ + pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF); +#if defined(DHD_PCIE_PKTID) + if (pktid == DHD_PKTID_INVALID) { + DHD_ERROR(("Pktid pool depleted.\n")); + /* + * If we return error here, the caller would queue the packet + * again. So we'll just free the skb allocated in DMA Zone. + * Since we have not freed the original SKB yet the caller would + * requeue the same. + */ + goto err_no_res_pktfree; + } +#endif /* DHD_PCIE_PKTID */ + + /* Reserve space in the circular buffer */ + txdesc = (host_txbuf_post_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (txdesc == NULL) { +#if defined(DHD_PCIE_PKTID) + void *dmah; + void *secdma; + /* Free up the PKTID. physaddr and pktlen will be garbage. */ + DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, + pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); +#endif /* DHD_PCIE_PKTID */ + DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", + __FUNCTION__, __LINE__, prot->active_tx_count)); + goto err_no_res_pktfree; + } + + /* Extract the data pointer and length information */ + pktdata = PKTDATA(dhd->osh, PKTBUF); + pktlen = PKTLEN(dhd->osh, PKTBUF); + + /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ + bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); + + /* Extract the ethernet header and adjust the data pointer and length */ + pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); + pktlen -= ETHER_HDR_LEN; + + /* Map the data pointer to a DMA-able address */ + if (SECURE_DMA_ENAB(dhd->osh)) { + int offset = 0; + BCM_REFERENCE(offset); + + if (prot->tx_metadata_offset) { + offset = prot->tx_metadata_offset + ETHER_HDR_LEN; + } + + pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, + DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset); + } else { + pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); + } + + if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) { + DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + ASSERT(0); + } + + /* No need to lock. Save the rest of the packet's metadata */ + DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid, + pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); + +#ifdef TXP_FLUSH_NITEMS + if (ring->pend_items_count == 0) { + ring->start_addr = (void *)txdesc; + } + ring->pend_items_count++; +#endif + + /* Form the Tx descriptor message buffer */ + + /* Common message hdr */ + txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; + txdesc->cmn_hdr.if_id = ifidx; + + txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; + prio = (uint8)PKTPRIO(PKTBUF); + + + txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; + txdesc->seg_cnt = 1; + + txdesc->data_len = htol16((uint16) pktlen); + txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); + txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); + + /* Move data pointer to keep ether header in local PKTBUF for later reference */ + PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); + + /* Handle Tx metadata */ + headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); + if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) { + DHD_ERROR(("No headroom for Metadata tx %d %d\n", + prot->tx_metadata_offset, headroom)); + } + + if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { + DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); + + /* Adjust the data pointer to account for meta data in DMA_MAP */ + PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + if (SECURE_DMA_ENAB(dhd->osh)) { + meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, + 0, ring->dma_buf.secdma); + } else { + meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), + prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); + } + + if (PHYSADDRISZERO(meta_pa)) { + DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); + ASSERT(0); + } + + /* Adjust the data pointer back to original value */ + PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); + + txdesc->metadata_buf_len = prot->tx_metadata_offset; + txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); + txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); + } else { + txdesc->metadata_buf_len = htol16(0); + txdesc->metadata_buf_addr.high_addr = 0; + txdesc->metadata_buf_addr.low_addr = 0; + } + +#if defined(DHD_PKTID_AUDIT_RING) + DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, + DHD_DUPLICATE_ALLOC); +#endif /* DHD_PKTID_AUDIT_RING */ + + txdesc->cmn_hdr.request_id = htol32(pktid); + + DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, + txdesc->cmn_hdr.request_id)); + + /* Update the write pointer in TCM & ring bell */ +#ifdef TXP_FLUSH_NITEMS + /* Flush if we have either hit the txp_threshold or if this msg is */ + /* occupying the last slot in the flow_ring - before wrap around. */ + if ((ring->pend_items_count == prot->txp_threshold) || + ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { + dhd_prot_txdata_write_flush(dhd, flowid, TRUE); + } +#else + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); +#endif + + prot->active_tx_count++; + + /* + * Take a wake lock, do not sleep if we have atleast one packet + * to finish. + */ + if (prot->active_tx_count == 1) + DHD_OS_WAKE_LOCK(dhd); + + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; + +err_no_res_pktfree: + + + + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NORESOURCE; +} /* dhd_prot_txdata */ + +/* called with a lock */ +/** optimization to write "n" tx items at a time to ring */ +void BCMFASTPATH +dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock) +{ +#ifdef TXP_FLUSH_NITEMS + unsigned long flags = 0; + flow_ring_table_t *flow_ring_table; + flow_ring_node_t *flow_ring_node; + msgbuf_ring_t *ring; + + if (dhd->flow_ring_table == NULL) { + return; + } + + if (!in_lock) { + DHD_GENERAL_LOCK(dhd, flags); + } + + flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; + flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; + ring = (msgbuf_ring_t *)flow_ring_node->prot_info; + + if (ring->pend_items_count) { + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ring->start_addr, + ring->pend_items_count); + ring->pend_items_count = 0; + ring->start_addr = NULL; + } + + if (!in_lock) { + DHD_GENERAL_UNLOCK(dhd, flags); + } +#endif /* TXP_FLUSH_NITEMS */ +} + +#undef PKTBUF /* Only defined in the above routine */ + +int BCMFASTPATH +dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) +{ + return 0; +} + +/** post a set of receive buffers to the dongle */ +static void BCMFASTPATH +dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt) +{ + dhd_prot_t *prot = dhd->prot; +#if defined(DHD_LB_RXC) + int elem_ix; + uint32 *elem; + bcm_workq_t *workq; + + workq = &prot->rx_compl_prod; + + /* Produce the work item */ + elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); + if (elem_ix == BCM_RING_FULL) { + DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__)); + ASSERT(0); + return; + } + + elem = WORKQ_ELEMENT(uint32, workq, elem_ix); + *elem = pktid; + + smp_wmb(); + + /* Sync WR index to consumer if the SYNC threshold has been reached */ + if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { + bcm_workq_prod_sync(workq); + prot->rx_compl_prod_sync = 0; + } + + DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n", + __FUNCTION__, pktid, prot->rx_compl_prod_sync)); + +#endif /* DHD_LB_RXC */ + + + if (prot->rxbufpost >= rxcnt) { + prot->rxbufpost -= rxcnt; + } else { + /* ASSERT(0); */ + prot->rxbufpost = 0; + } + +#if !defined(DHD_LB_RXC) + if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { + dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ + } +#endif /* !DHD_LB_RXC */ +} + +/* called before an ioctl is sent to the dongle */ +static void +dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf) +{ + dhd_prot_t *prot = dhd->prot; + + if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) { + int slen = 0; + pcie_bus_tput_params_t *tput_params; + + slen = strlen("pcie_bus_tput") + 1; + tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen); + bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr, + sizeof(tput_params->host_buf_addr)); + tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN; + } +} + + +/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */ +int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) +{ + int ret = -1; + uint8 action; + + if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + goto done; + } + + if (dhd->busstate == DHD_BUS_SUSPEND) { + DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); + goto done; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (ioc->cmd == WLC_SET_PM) { + DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); + } + + ASSERT(len <= WLC_IOCTL_MAXLEN); + + if (len > WLC_IOCTL_MAXLEN) { + goto done; + } + + action = ioc->set; + + dhd_prot_wlioctl_intercept(dhd, ioc, buf); + + if (action & WL_IOCTL_ACTION_SET) { + ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + } else { + ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); + if (ret > 0) { + ioc->used = ret; + } + } + + /* Too many programs assume ioctl() returns 0 on success */ + if (ret >= 0) { + ret = 0; + } else { + DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret)); + dhd->dongle_error = ret; + } + + if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) { + /* Intercept the wme_dp ioctl here */ + if (!strcmp(buf, "wme_dp")) { + int slen, val = 0; + + slen = strlen("wme_dp") + 1; + if (len >= (int)(slen + sizeof(int))) { + bcopy(((char *)buf + slen), &val, sizeof(int)); + } + dhd->wme_dp = (uint8) ltoh32(val); + } + + } + +done: + return ret; + +} /* dhd_prot_ioctl */ + +/** test / loopback */ + +int +dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) +{ + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + uint16 alloced = 0; + + ioct_reqst_hdr_t *ioct_rqst; + + uint16 hdrlen = sizeof(ioct_reqst_hdr_t); + uint16 msglen = len + hdrlen; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); + msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); + + DHD_GENERAL_LOCK(dhd, flags); + + ioct_rqst = (ioct_reqst_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (ioct_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + return 0; + } + + { + uint8 *ptr; + uint16 i; + + ptr = (uint8 *)ioct_rqst; + for (i = 0; i < msglen; i++) { + ptr[i] = i % 256; + } + } + + /* Common msg buf hdr */ + ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; + ioct_rqst->msg.if_id = 0; + + bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} + +/** test / loopback */ +void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) +{ + if (dmaxfer == NULL) { + return; + } + + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + dhd_dma_buf_free(dhd, &dmaxfer->dstmem); +} + +/** test / loopback */ +int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, + uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) +{ + uint i; + if (!dmaxfer) { + return BCME_ERROR; + } + + /* First free up existing buffers */ + dmaxfer_free_dmaaddr(dhd, dmaxfer); + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) { + return BCME_NOMEM; + } + + if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) { + dhd_dma_buf_free(dhd, &dmaxfer->srcmem); + return BCME_NOMEM; + } + + dmaxfer->len = len; + + /* Populate source with a pattern */ + for (i = 0; i < dmaxfer->len; i++) { + ((uint8*)dmaxfer->srcmem.va)[i] = i % 256; + } + OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len); + + dmaxfer->srcdelay = srcdelay; + dmaxfer->destdelay = destdelay; + + return BCME_OK; +} /* dmaxfer_prepare_dmaaddr */ + +static void +dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) +{ + dhd_prot_t *prot = dhd->prot; + + OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { + if (memcmp(prot->dmaxfer.srcmem.va, + prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) { + bcm_print_bytes("XFER SRC: ", + prot->dmaxfer.srcmem.va, prot->dmaxfer.len); + bcm_print_bytes("XFER DST: ", + prot->dmaxfer.dstmem.va, prot->dmaxfer.len); + } else { + DHD_INFO(("DMA successful\n")); + } + } + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + dhd->prot->dmaxfer.in_progress = FALSE; +} + +/** Test functionality. + * Transfers bytes from host to dongle and to host again using DMA + * This function is not reentrant, as prot->dmaxfer.in_progress is not protected + * by a spinlock. + */ +int +dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) +{ + unsigned long flags; + int ret = BCME_OK; + dhd_prot_t *prot = dhd->prot; + pcie_dma_xfer_params_t *dmap; + uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT); + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + if (prot->dmaxfer.in_progress) { + DHD_ERROR(("DMA is in progress...\n")); + return ret; + } + + prot->dmaxfer.in_progress = TRUE; + if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, + &prot->dmaxfer)) != BCME_OK) { + prot->dmaxfer.in_progress = FALSE; + return ret; + } + + DHD_GENERAL_LOCK(dhd, flags); + + dmap = (pcie_dma_xfer_params_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (dmap == NULL) { + dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); + prot->dmaxfer.in_progress = FALSE; + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; + dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); + dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); + dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); + dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa)); + dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa)); + dmap->xfer_len = htol32(prot->dmaxfer.len); + dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); + dmap->destdelay = htol32(prot->dmaxfer.destdelay); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, dmap, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + DHD_ERROR(("DMA Started...\n")); + + return BCME_OK; +} /* dhdmsgbuf_dmaxfer_req */ + +/** Called in the process of submitting an ioctl to the dongle */ +static int +dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Respond "bcmerror" and "bcmerrorstr" with local cache */ + if (cmd == WLC_GET_VAR && buf) + { + if (!strcmp((char *)buf, "bcmerrorstr")) + { + strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); + goto done; + } + else if (!strcmp((char *)buf, "bcmerror")) + { + *(int *)buf = dhd->dongle_error; + goto done; + } + } + + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + /* wait for IOCTL completion message from dongle and get first fragment */ + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + +done: + return ret; +} + +/** + * Waits for IOCTL completion message from the dongle, copies this into caller + * provided parameter 'buf'. + */ +static int +dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) +{ + dhd_prot_t *prot = dhd->prot; + int timeleft; + unsigned long flags; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (dhd->dongle_reset) { + ret = -EIO; + goto out; + } + + if (prot->cur_ioctlresp_bufs_posted) { + prot->cur_ioctlresp_bufs_posted--; + } + + dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); + + timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received); + if (timeleft == 0) { + dhd->rxcnt_timeout++; + dhd->rx_ctlerrs++; + DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " + "trans_id %d state %d busstate=%d ioctl_received=%d\n", + __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, + prot->ioctl_trans_id, prot->ioctl_state, + dhd->busstate, prot->ioctl_received)); + + dhd_prot_debug_info_print(dhd); + +#ifdef DHD_FW_COREDUMP + /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */ + if (dhd->memdump_enabled && !dhd->dongle_trap_occured) { + /* collect core dump */ + dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; + dhd_bus_mem_dump(dhd); + } +#endif /* DHD_FW_COREDUMP */ + if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__)); + } + ret = -ETIMEDOUT; + goto out; + } else { + if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { + DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", + __FUNCTION__, prot->ioctl_received)); + ret = -ECONNABORTED; + goto out; + } + dhd->rxcnt_timeout = 0; + dhd->rx_ctlpkts++; + DHD_CTL(("%s: ioctl resp resumed, got %d\n", + __FUNCTION__, prot->ioctl_resplen)); + } + + if (dhd->dongle_trap_occured) { +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + dhd->bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__)); + ret = -EREMOTEIO; + goto out; + } + + if (dhd->prot->ioctl_resplen > len) { + dhd->prot->ioctl_resplen = (uint16)len; + } + if (buf) { + bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); + } + + ret = (int)(dhd->prot->ioctl_status); +out: + DHD_GENERAL_LOCK(dhd, flags); + dhd->prot->ioctl_state = 0; + dhd->prot->ioctl_resplen = 0; + dhd->prot->ioctl_received = IOCTL_WAIT; + dhd->prot->curr_ioctl_cmd = 0; + DHD_GENERAL_UNLOCK(dhd, flags); + + return ret; +} /* dhd_msgbuf_wait_ioctl_cmplt */ + +static int +dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) +{ + int ret = 0; + + DHD_TRACE(("%s: Enter \n", __FUNCTION__)); + + if (dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return -EIO; + } + + /* don't talk to the dongle if fw is about to be reloaded */ + if (dhd->hang_was_sent) { + DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", + __FUNCTION__)); + return -EIO; + } + + /* Fill up msgbuf for ioctl req */ + ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); + + DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", + action, ifidx, cmd, len)); + + ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); + + return ret; +} + +/** Called by upper DHD layer. Handles a protocol control response asynchronously. */ +int dhd_prot_ctl_complete(dhd_pub_t *dhd) +{ + return 0; +} + +/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ +int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + return BCME_UNSUPPORTED; +} + +/** Add prot dump output to a buffer */ +void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + +#if defined(PCIE_D2H_SYNC) + if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) + bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); + else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) + bcm_bprintf(b, "\nd2h_sync: XORCSUM:"); + else + bcm_bprintf(b, "\nd2h_sync: NONE:"); + bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", + dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); +#endif /* PCIE_D2H_SYNC */ + + bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", + DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support), + DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support), + dhd->prot->rw_index_sz); +} + +/* Update local copy of dongle statistics */ +void dhd_prot_dstats(dhd_pub_t *dhd) +{ + return; +} + +/** Called by upper DHD layer */ +int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count) +{ + return 0; +} + +/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */ +int +dhd_post_dummy_msg(dhd_pub_t *dhd) +{ + unsigned long flags; + hostevent_hdr_t *hevent = NULL; + uint16 alloced = 0; + + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + hevent = (hostevent_hdr_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (hevent == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + return -1; + } + + /* CMN msg header */ + hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; + hevent->msg.if_id = 0; + + /* Event payload */ + hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); + + /* Since, we are filling the data directly into the bufptr obtained + * from the msgbuf, we can directly call the write_complete + */ + dhd_prot_ring_write_complete(dhd, ring, hevent, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} + +/** + * If exactly_nitems is true, this function will allocate space for nitems or fail + * If exactly_nitems is false, this function will allocate space for nitems or less + */ +static void * BCMFASTPATH +dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, + uint16 nitems, uint16 * alloced, bool exactly_nitems) +{ + void * ret_buf; + + /* Alloc space for nitems in the ring */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + /* if alloc failed , invalidate cached read ptr */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); + } + + /* Try allocating once more */ + ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); + + if (ret_buf == NULL) { + DHD_INFO(("%s: Ring space not available \n", ring->name)); + return NULL; + } + } + + /* Return alloced space */ + return ret_buf; +} + +/** + * Non inline ioct request. + * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer + * Form a separate request buffer where a 4 byte cmn header is added in the front + * buf contents from parent function is copied to remaining section of this buffer + */ +static int +dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) +{ + dhd_prot_t *prot = dhd->prot; + ioctl_req_msg_t *ioct_rqst; + void * ioct_buf; /* For ioctl payload */ + uint16 rqstlen, resplen; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + rqstlen = len; + resplen = len; + + /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ + /* 8K allocation of dongle buffer fails */ + /* dhd doesnt give separate input & output buf lens */ + /* so making the assumption that input length can never be more than 1.5k */ + rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); + + DHD_GENERAL_LOCK(dhd, flags); + + if (prot->ioctl_state) { + DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_BUSY; + } else { + prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; + } + + /* Request for cbuf space */ + ioct_rqst = (ioctl_req_msg_t*) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (ioct_rqst == NULL) { + DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); + prot->ioctl_state = 0; + prot->curr_ioctl_cmd = 0; + prot->ioctl_received = IOCTL_WAIT; + DHD_GENERAL_UNLOCK(dhd, flags); + return -1; + } + + /* Common msg buf hdr */ + ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; + ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; + ioct_rqst->cmn_hdr.flags = 0; + ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); + ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + ioct_rqst->cmd = htol32(cmd); + prot->curr_ioctl_cmd = cmd; + ioct_rqst->output_buf_len = htol16(resplen); + prot->ioctl_trans_id++; + ioct_rqst->trans_id = prot->ioctl_trans_id; + + /* populate ioctl buffer info */ + ioct_rqst->input_buf_len = htol16(rqstlen); + ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); + ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); + /* copy ioct payload */ + ioct_buf = (void *) prot->ioctbuf.va; + + if (buf) { + memcpy(ioct_buf, buf, len); + } + + OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); + + if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) { + DHD_ERROR(("host ioct address unaligned !!!!! \n")); + } + + DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", + ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, + ioct_rqst->trans_id)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return 0; +} /* dhd_fillup_ioct_reqst */ + + +/** + * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a + * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring + * information is posted to the dongle. + * + * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for + * each flowring in pool of flowrings. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, + uint16 max_items, uint16 item_len, uint16 ringid) +{ + int dma_buf_alloced = BCME_NOMEM; + uint32 dma_buf_len = max_items * item_len; + dhd_prot_t *prot = dhd->prot; + + ASSERT(ring); + ASSERT(name); + ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF)); + + /* Init name */ + strncpy(ring->name, name, RING_NAME_MAX_LENGTH); + ring->name[RING_NAME_MAX_LENGTH - 1] = '\0'; + + ring->idx = ringid; + + ring->max_items = max_items; + ring->item_len = item_len; + + /* A contiguous space may be reserved for all flowrings */ + if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) { + /* Carve out from the contiguous DMA-able flowring buffer */ + uint16 flowid; + uint32 base_offset; + + dhd_dma_buf_t *dma_buf = &ring->dma_buf; + dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf; + + flowid = DHD_RINGID_TO_FLOWID(ringid); + base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len; + + ASSERT(base_offset + dma_buf_len <= rsv_buf->len); + + dma_buf->len = dma_buf_len; + dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset); + PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa)); + PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset); + + /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */ + ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa)); + + dma_buf->dmah = rsv_buf->dmah; + dma_buf->secdma = rsv_buf->secdma; + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + } else { + /* Allocate a dhd_dma_buf */ + dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len); + if (dma_buf_alloced != BCME_OK) { + return BCME_NOMEM; + } + } + + /* CAUTION: Save ring::base_addr in little endian format! */ + dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa); + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t)); + if (ring->dma_buf.secdma == NULL) { + goto free_dma_buf; + } + } +#endif /* BCM_SECURE_DMA */ + + DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " + "ring start %p buf phys addr %x:%x \n", + ring->name, ring->max_items, ring->item_len, + dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr))); + + return BCME_OK; + +#ifdef BCM_SECURE_DMA +free_dma_buf: + if (dma_buf_alloced == BCME_OK) { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } +#endif /* BCM_SECURE_DMA */ + + return BCME_NOMEM; + +} /* dhd_prot_ring_attach */ + + +/** + * dhd_prot_ring_init - Post the common ring information to dongle. + * + * Used only for common rings. + * + * The flowrings information is passed via the create flowring control message + * (tx_flowring_create_request_t) sent over the H2D control submission common + * ring. + */ +static void +dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + + /* CAUTION: ring::base_addr already in Little Endian */ + dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, + sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, + sizeof(uint16), RING_MAX_ITEMS, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, + sizeof(uint16), RING_ITEM_LEN, ring->idx); + + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + + /* ring inited */ + ring->inited = TRUE; + +} /* dhd_prot_ring_init */ + + +/** + * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush + * Reset WR and RD indices to 0. + */ +static void +dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + DHD_TRACE(("%s\n", __FUNCTION__)); + + dhd_dma_buf_reset(dhd, &ring->dma_buf); + + ring->rd = ring->wr = 0; + ring->curr_rd = 0; +} + + +/** + * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects + * hanging off the msgbuf_ring. + */ +static void +dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) +{ + dhd_prot_t *prot = dhd->prot; + ASSERT(ring); + + ring->inited = FALSE; + /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */ + +#ifdef BCM_SECURE_DMA + if (SECURE_DMA_ENAB(prot->osh)) { + SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); + if (ring->dma_buf.secdma) { + MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t)); + } + ring->dma_buf.secdma = NULL; + } +#endif /* BCM_SECURE_DMA */ + + /* If the DMA-able buffer was carved out of a pre-reserved contiguous + * memory, then simply stop using it. + */ + if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) { + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); + } else { + dhd_dma_buf_free(dhd, &ring->dma_buf); + } + +} /* dhd_prot_ring_detach */ + + +/* + * +---------------------------------------------------------------------------- + * Flowring Pool + * + * Unlike common rings, which are attached very early on (dhd_prot_attach), + * flowrings are dynamically instantiated. Moreover, flowrings may require a + * larger DMA-able buffer. To avoid issues with fragmented cache coherent + * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. + * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. + * + * Each DMA-able buffer may be allocated independently, or may be carved out + * of a single large contiguous region that is registered with the protocol + * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region + * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). + * + * No flowring pool action is performed in dhd_prot_attach(), as the number + * of h2d rings is not yet known. + * + * In dhd_prot_init(), the dongle advertized number of h2d rings is used to + * determine the number of flowrings required, and a pool of msgbuf_rings are + * allocated and a DMA-able buffer (carved or allocated) is attached. + * See: dhd_prot_flowrings_pool_attach() + * + * A flowring msgbuf_ring object may be fetched from this pool during flowring + * creation, using the flowid. Likewise, flowrings may be freed back into the + * pool on flowring deletion. + * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() + * + * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers + * are detached (returned back to the carved region or freed), and the pool of + * msgbuf_ring and any objects allocated against it are freed. + * See: dhd_prot_flowrings_pool_detach() + * + * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a + * state as-if upon an attach. All DMA-able buffers are retained. + * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring + * pool attach will notice that the pool persists and continue to use it. This + * will avoid the case of a fragmented DMA-able region. + * + * +---------------------------------------------------------------------------- + */ + +/* Fetch number of H2D flowrings given the total number of h2d rings */ +#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \ + ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Conversion of a flowid to a flowring pool index */ +#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) + +/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ +#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ + (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid) + +/* Traverse each flowring in the flowring pool, assigning ring and flowid */ +#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \ + for ((flowid) = DHD_FLOWRING_START_FLOWID, \ + (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ + (flowid) < (prot)->h2d_rings_total; \ + (flowid)++, (ring)++) + +/** + * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. + * + * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings. + * Dongle includes common rings when it advertizes the number of H2D rings. + * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to + * allocate the DMA-able buffer and initialize each msgbuf_ring_t object. + * + * dhd_prot_ring_attach is invoked to perform the actual initialization and + * attaching the DMA-able buffer. + * + * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and + * initialized msgbuf_ring_t object. + * + * returns BCME_OK=0 on success + * returns non-zero negative error value on failure. + */ +static int +dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + uint16 h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + char ring_name[RING_NAME_MAX_LENGTH]; + + if (prot->h2d_flowrings_pool != NULL) { + return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ + } + + ASSERT(prot->h2d_rings_total == 0); + + /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */ + prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus); + + if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) { + DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", + __FUNCTION__, prot->h2d_rings_total)); + return BCME_ERROR; + } + + /* Subtract number of H2D common rings, to determine number of flowrings */ + h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + + DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); + + /* Allocate pool of msgbuf_ring_t objects for all flowrings */ + prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + if (prot->h2d_flowrings_pool == NULL) { + DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n", + __FUNCTION__, h2d_flowrings_total)); + goto fail; + } + + /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); + ring_name[RING_NAME_MAX_LENGTH - 1] = '\0'; + if (dhd_prot_ring_attach(dhd, ring, ring_name, + H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, + DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { + goto attach_fail; + } + } + + return BCME_OK; + +attach_fail: + dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */ + +fail: + prot->h2d_rings_total = 0; + return BCME_NOMEM; + +} /* dhd_prot_flowrings_pool_attach */ + + +/** + * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool. + * Invokes dhd_prot_ring_reset to perform the actual reset. + * + * The DMA-able buffer is not freed during reset and neither is the flowring + * pool freed. + * + * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following + * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool + * from a previous flowring pool instantiation will be reused. + * + * This will avoid a fragmented DMA-able memory condition, if multiple + * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach + * cycle. + */ +static void +dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) +{ + uint16 flowid; + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + /* Reset each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + dhd_prot_ring_reset(dhd, ring); + ring->inited = FALSE; + } + + /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */ +} + + +/** + * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with + * DMA-able buffers for flowrings. + * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any + * de-initialization of each msgbuf_ring_t. + */ +static void +dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) +{ + int flowid; + msgbuf_ring_t *ring; + int h2d_flowrings_total; /* exclude H2D common rings */ + dhd_prot_t *prot = dhd->prot; + + if (prot->h2d_flowrings_pool == NULL) { + ASSERT(prot->h2d_rings_total == 0); + return; + } + + /* Detach the DMA-able buffer for each flowring in the flowring pool */ + FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) { + dhd_prot_ring_detach(dhd, ring); + } + + h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total); + + MFREE(prot->osh, prot->h2d_flowrings_pool, + (h2d_flowrings_total * sizeof(msgbuf_ring_t))); + + prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL; + prot->h2d_rings_total = 0; + +} /* dhd_prot_flowrings_pool_detach */ + + +/** + * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized + * msgbuf_ring from the flowring pool, and assign it. + * + * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common + * ring information to the dongle, a flowring's information is passed via a + * flowring create control message. + * + * Only the ring state (WR, RD) index are initialized. + */ +static msgbuf_ring_t * +dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + /* ASSERT flow_ring->inited == FALSE */ + + ring->wr = 0; + ring->rd = 0; + ring->curr_rd = 0; + ring->inited = TRUE; + + return ring; +} + + +/** + * dhd_prot_flowrings_pool_release - release a previously fetched flowring's + * msgbuf_ring back to the flow_ring pool. + */ +void +dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring) +{ + msgbuf_ring_t *ring; + dhd_prot_t *prot = dhd->prot; + + ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); + ASSERT(flowid < prot->h2d_rings_total); + ASSERT(prot->h2d_flowrings_pool != NULL); + + ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); + + ASSERT(ring == (msgbuf_ring_t*)flow_ring); + /* ASSERT flow_ring->inited == TRUE */ + + (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); + + ring->wr = 0; + ring->rd = 0; + ring->inited = FALSE; + + ring->curr_rd = 0; +} + + +/* Assumes only one index is updated at a time */ +/* If exactly_nitems is true, this function will allocate space for nitems or fail */ +/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */ +/* If exactly_nitems is false, this function will allocate space for nitems or less */ +static void *BCMFASTPATH +dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, + bool exactly_nitems) +{ + void *ret_ptr = NULL; + uint16 ring_avail_cnt; + + ASSERT(nitems <= ring->max_items); + + ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items); + + if ((ring_avail_cnt == 0) || + (exactly_nitems && (ring_avail_cnt < nitems) && + ((ring->max_items - ring->wr) >= nitems))) { + DHD_INFO(("Space not available: ring %s items %d write %d read %d\n", + ring->name, nitems, ring->wr, ring->rd)); + return NULL; + } + *alloced = MIN(nitems, ring_avail_cnt); + + /* Return next available space */ + ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); + + /* Update write index */ + if ((ring->wr + *alloced) == ring->max_items) { + ring->wr = 0; + } else if ((ring->wr + *alloced) < ring->max_items) { + ring->wr += *alloced; + } else { + /* Should never hit this */ + ASSERT(0); + return NULL; + } + + return ret_ptr; +} /* dhd_prot_get_ring_space */ + + +/** + * dhd_prot_ring_write_complete - Host updates the new WR index on producing + * new messages in a H2D ring. The messages are flushed from cache prior to + * posting the new WR index. The new WR index will be updated in the DMA index + * array or directly in the dongle's ring state memory. + * A PCIE doorbell will be generated to wake up the dongle. + */ +static void BCMFASTPATH +dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, + uint16 nitems) +{ + dhd_prot_t *prot = dhd->prot; + + /* cache flush */ + OSL_CACHE_FLUSH(p, ring->item_len * nitems); + + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, ring->wr, + H2D_DMA_INDX_WR_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), + sizeof(uint16), RING_WR_UPD, ring->idx); + } + + /* raise h2d interrupt */ + prot->mb_ring_fn(dhd->bus, ring->wr); +} + + +/** + * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages + * from a D2H ring. The new RD index will be updated in the DMA Index array or + * directly in dongle's ring state memory. + */ +static void +dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) +{ + /* update read index */ + /* If dma'ing h2d indices supported + * update the r -indices in the + * host memory o/w in TCM + */ + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, ring->rd, + D2H_DMA_INDX_RD_UPD, ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), + sizeof(uint16), RING_RD_UPD, ring->idx); + } +} + + +/** + * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. + * Dongle will DMA the entire array (if DMA_INDX feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static void +dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + *(uint16*)ptr = htol16(new_index); + + OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, new_index, type, ringid, ptr, offset)); + +} /* dhd_prot_dma_indx_set */ + + +/** + * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index + * array. + * Dongle DMAes an entire array to host memory (if the feature is enabled). + * See dhd_prot_dma_indx_init() + */ +static uint16 +dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) +{ + uint8 *ptr; + uint16 data; + uint16 offset; + dhd_prot_t *prot = dhd->prot; + + switch (type) { + case H2D_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case H2D_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va); + offset = DHD_H2D_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_WR_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + case D2H_DMA_INDX_RD_UPD: + ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); + offset = DHD_D2H_RING_OFFSET(ringid); + break; + + default: + DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", + __FUNCTION__)); + return 0; + } + + ASSERT(prot->rw_index_sz != 0); + ptr += offset * prot->rw_index_sz; + + OSL_CACHE_INV((void *)ptr, prot->rw_index_sz); + + data = LTOH16(*((uint16*)ptr)); + + DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", + __FUNCTION__, data, type, ringid, ptr, offset)); + + return (data); + +} /* dhd_prot_dma_indx_get */ + +/** + * An array of DMA read/write indices, containing information about host rings, can be maintained + * either in host memory or in device memory, dependent on preprocessor options. This function is, + * dependent on these options, called during driver initialization. It reserves and initializes + * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical + * address of these host memory blocks are communicated to the dongle later on. By reading this host + * memory, the dongle learns about the state of the host rings. + */ + +static INLINE int +dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, + dhd_dma_buf_t *dma_buf, uint32 bufsz) +{ + int rc; + + if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) + return BCME_OK; + + rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz); + + return rc; +} + +int +dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length) +{ + uint32 bufsz; + dhd_prot_t *prot = dhd->prot; + dhd_dma_buf_t *dma_buf; + + if (prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + + /* Dongle advertizes 2B or 4B RW index size */ + ASSERT(rw_index_sz != 0); + prot->rw_index_sz = rw_index_sz; + + bufsz = rw_index_sz * length; + + switch (type) { + case H2D_DMA_INDX_WR_BUF: + dma_buf = &prot->h2d_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case H2D_DMA_INDX_RD_BUF: + dma_buf = &prot->h2d_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_WR_BUF: + dma_buf = &prot->d2h_dma_indx_wr_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + case D2H_DMA_INDX_RD_BUF: + dma_buf = &prot->d2h_dma_indx_rd_buf; + if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) { + goto ret_no_mem; + } + DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", + dma_buf->len, rw_index_sz, length)); + break; + + default: + DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); + return BCME_BADOPTION; + } + + return BCME_OK; + +ret_no_mem: + DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n", + __FUNCTION__, type, bufsz)); + return BCME_NOMEM; + +} /* dhd_prot_dma_indx_init */ + + +/** + * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read + * from, or NULL if there are no more messages to read. + */ +static uint8* +dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len) +{ + uint16 wr; + uint16 rd; + uint16 depth; + uint16 items; + void *read_addr = NULL; /* address of next msg to be read in ring */ + uint16 d2h_wr = 0; + + DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n", + __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va), + (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va))); + + /* Remember the read index in a variable. + * This is becuase ring->rd gets updated in the end of this function + * So if we have to print the exact read index from which the + * message is read its not possible. + */ + ring->curr_rd = ring->rd; + + /* update write pointer */ + if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { + /* DMAing write/read indices supported */ + d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); + ring->wr = d2h_wr; + } else { + dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx); + } + + wr = ring->wr; + rd = ring->rd; + depth = ring->max_items; + + /* check for avail space, in number of ring items */ + items = READ_AVAIL_SPACE(wr, rd, depth); + if (items == 0) { + return NULL; + } + + ASSERT(items < ring->max_items); + + /* + * Note that there are builds where Assert translates to just printk + * so, even if we had hit this condition we would never halt. Now + * dhd_prot_process_msgtype can get into an big loop if this + * happens. + */ + if (items >= ring->max_items) { + DHD_ERROR(("\r\n======================= \r\n")); + DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", + __FUNCTION__, ring, ring->name, ring->max_items, items)); + DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); + DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n", + dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack)); + DHD_ERROR(("\r\n======================= \r\n")); + + *available_len = 0; + return NULL; + } + + /* if space is available, calculate address to be read */ + read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); + + /* update read pointer */ + if ((ring->rd + items) >= ring->max_items) { + ring->rd = 0; + } else { + ring->rd += items; + } + + ASSERT(ring->rd < ring->max_items); + + /* convert items to bytes : available_len must be 32bits */ + *available_len = (uint32)(items * ring->item_len); + + OSL_CACHE_INV(read_addr, *available_len); + + /* return read address */ + return read_addr; + +} /* dhd_prot_get_read_addr */ + +/** Creates a flow ring and informs dongle of this event */ +int +dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_create_request_t *flow_create_rqst; + msgbuf_ring_t *flow_ring; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + + /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ + flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); + if (flow_ring == NULL) { + DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_NOMEM; + } + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ctrl_ring buffer space */ + flow_create_rqst = (tx_flowring_create_request_t *) + dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); + + if (flow_create_rqst == NULL) { + dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); + DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", + __FUNCTION__, flow_ring_node->flowid)); + DHD_GENERAL_UNLOCK(dhd, flags); + return BCME_NOMEM; + } + + flow_ring_node->prot_info = (void *)flow_ring; + + /* Common msg buf hdr */ + flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; + flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_create_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + /* Update flow create message */ + flow_create_rqst->tid = flow_ring_node->flow_info.tid; + flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); + memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); + /* CAUTION: ring::base_addr already in Little Endian */ + flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; + flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; + flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM); + flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); + DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* Update the flow_ring's WRITE index */ + if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { + dhd_prot_dma_indx_set(dhd, flow_ring->wr, + H2D_DMA_INDX_WR_UPD, flow_ring->idx); + } else { + dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), + sizeof(uint16), RING_WR_UPD, flow_ring->idx); + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); + + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_create */ + +/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */ +static void +dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg; + + DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__, + ltoh16(flow_create_resp->cmplt.status), + ltoh16(flow_create_resp->cmplt.flow_ring_id))); + + dhd_bus_flow_ring_create_response(dhd->bus, + ltoh16(flow_create_resp->cmplt.flow_ring_id), + ltoh16(flow_create_resp->cmplt.status)); +} + +/** called on e.g. flow ring delete */ +void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) +{ + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + dhd_prot_ring_detach(dhd, flow_ring); + DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); +} + +void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt) +{ + const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n"; + msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; + uint16 rd, wr; + uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; + + if (fmt == NULL) { + fmt = default_fmt; + } + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); + bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, + ltoh32(flow_ring->base_addr.high_addr), + ltoh32(flow_ring->base_addr.low_addr), dma_buf_len); +} + +void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + dhd_prot_t *prot = dhd->prot; + bcm_bprintf(strbuf, "CtrlPost: "); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL); + bcm_bprintf(strbuf, "CtrlCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL); + + bcm_bprintf(strbuf, "RxPost: "); + bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost); + dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL); + bcm_bprintf(strbuf, "RxCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL); + + bcm_bprintf(strbuf, "TxCpl: "); + dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL); + bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n", + dhd->prot->active_tx_count, + DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle)); +} + +int +dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_delete_request_t *flow_delete_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_delete_rqst = (tx_flowring_delete_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + + if (flow_delete_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; + flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + /* Update Delete info */ + flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_delete_rqst->reason = htol16(BCME_OK); + + DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG + " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, + MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, + flow_ring_node->flow_info.ifindex)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} + +static void +dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg; + + DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__, + flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id)); + + dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, + flow_delete_resp->cmplt.status); +} + +int +dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) +{ + tx_flowring_flush_request_t *flow_flush_rqst; + dhd_prot_t *prot = dhd->prot; + unsigned long flags; + uint16 alloced = 0; + msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; + + DHD_GENERAL_LOCK(dhd, flags); + + /* Request for ring buffer space */ + flow_flush_rqst = (tx_flowring_flush_request_t *) + dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); + if (flow_flush_rqst == NULL) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); + return BCME_NOMEM; + } + + /* Common msg buf hdr */ + flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; + flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; + flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ + + flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; + ring->seqnum++; + + flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); + flow_flush_rqst->reason = htol16(BCME_OK); + + DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__)); + + /* update ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); + DHD_GENERAL_UNLOCK(dhd, flags); + + return BCME_OK; +} /* dhd_prot_flow_ring_flush */ + +static void +dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg) +{ + tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg; + + DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__, + flow_flush_resp->cmplt.status)); + + dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, + flow_flush_resp->cmplt.status); +} + +/** + * Request dongle to configure soft doorbells for D2H rings. Host populated soft + * doorbell information is transferred to dongle via the d2h ring config control + * message. + */ +void +dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) +{ +#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) + uint16 ring_idx; + uint8 *msg_next; + void *msg_start; + uint16 alloced = 0; + unsigned long flags; + dhd_prot_t *prot = dhd->prot; + ring_config_req_t *ring_config_req; + bcmpcie_soft_doorbell_t *soft_doorbell; + msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; + const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; + + /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ + DHD_GENERAL_LOCK(dhd, flags); + msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); + + if (msg_start == NULL) { + DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", + __FUNCTION__, d2h_rings)); + DHD_GENERAL_UNLOCK(dhd, flags); + return; + } + + msg_next = (uint8*)msg_start; + + for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) { + + /* position the ring_config_req into the ctrl subm ring */ + ring_config_req = (ring_config_req_t *)msg_next; + + /* Common msg header */ + ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG; + ring_config_req->msg.if_id = 0; + ring_config_req->msg.flags = 0; + + ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; + ctrl_ring->seqnum++; + + ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */ + + /* Ring Config subtype and d2h ring_id */ + ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL); + ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx)); + + /* Host soft doorbell configuration */ + soft_doorbell = &prot->soft_doorbell[ring_idx]; + + ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value); + ring_config_req->soft_doorbell.haddr.high = + htol32(soft_doorbell->haddr.high); + ring_config_req->soft_doorbell.haddr.low = + htol32(soft_doorbell->haddr.low); + ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items); + ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs); + + DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n", + __FUNCTION__, ring_config_req->soft_doorbell.haddr.high, + ring_config_req->soft_doorbell.haddr.low, + ring_config_req->soft_doorbell.value)); + + msg_next = msg_next + ctrl_ring->item_len; + } + + /* update control subn ring's WR index and ring doorbell to dongle */ + dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); + DHD_GENERAL_UNLOCK(dhd, flags); +#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ +} + +static void +dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg) +{ + DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", + __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), + ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); +} + +int +dhd_prot_debug_info_print(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + msgbuf_ring_t *ring; + uint16 rd, wr; + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 mbintstatus = 0; + uint32 d2h_mb_data = 0; + uint32 dma_buf_len; + + DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); + + ring = &prot->h2dring_ctrl_subn; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + + ring = &prot->d2hring_ctrl_cpln; + dma_buf_len = ring->max_items * ring->item_len; + DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", + ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), + ltoh32(ring->base_addr.low_addr), dma_buf_len)); + DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); + dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); + dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); + DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); + DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum)); + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); + DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,", + intstatus, intmask, mbintstatus)); + DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask)); + + return 0; +} + +int +dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) +{ + uint32 *ptr; + uint32 value; + uint32 i; + uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus); + + OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va, + dhd->prot->d2h_dma_indx_wr_buf.len); + + ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va); + + bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues); + + bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); + + ptr++; + bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); + for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { + value = ltoh32(*ptr); + bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); + ptr++; + } + + OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va, + dhd->prot->h2d_dma_indx_rd_buf.len); + + ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va); + + bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); + ptr++; + value = ltoh32(*ptr); + bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); + + return 0; +} + +uint32 +dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val) +{ + dhd_prot_t *prot = dhd->prot; +#if DHD_DBG_SHOW_METADATA + prot->metadata_dbg = val; +#endif + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadata_dbg_get(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + return (uint32)prot->metadata_dbg; +} + +uint32 +dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + prot->rx_metadata_offset = (uint16)val; + else + prot->tx_metadata_offset = (uint16)val; + return dhd_prot_metadatalen_get(dhd, rx); +} + +uint32 +dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) +{ + dhd_prot_t *prot = dhd->prot; + if (rx) + return prot->rx_metadata_offset; + else + return prot->tx_metadata_offset; +} + +/** optimization to write "n" tx items at a time to ring */ +uint32 +dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) +{ + dhd_prot_t *prot = dhd->prot; + if (set) + prot->txp_threshold = (uint16)val; + val = prot->txp_threshold; + return val; +} + +#ifdef DHD_RX_CHAINING + +static INLINE void BCMFASTPATH +dhd_rxchain_reset(rxchain_info_t *rxchain) +{ + rxchain->pkt_count = 0; +} + +static void BCMFASTPATH +dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) +{ + uint8 *eh; + uint8 prio; + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + ASSERT(!PKTISCHAINED(pkt)); + ASSERT(PKTCLINK(pkt) == NULL); + ASSERT(PKTCGETATTR(pkt) == 0); + + eh = PKTDATA(dhd->osh, pkt); + prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; + + if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, + rxchain->h_da, rxchain->h_prio))) { + /* Different flow - First release the existing chain */ + dhd_rxchain_commit(dhd); + } + + /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ + /* so that the chain can be handed off to CTF bridge as is. */ + if (rxchain->pkt_count == 0) { + /* First packet in chain */ + rxchain->pkthead = rxchain->pkttail = pkt; + + /* Keep a copy of ptr to ether_da, ether_sa and prio */ + rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; + rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; + rxchain->h_prio = prio; + rxchain->ifidx = ifidx; + rxchain->pkt_count++; + } else { + /* Same flow - keep chaining */ + PKTSETCLINK(rxchain->pkttail, pkt); + rxchain->pkttail = pkt; + rxchain->pkt_count++; + } + + if ((!ETHER_ISMULTI(rxchain->h_da)) && + ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || + (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { + PKTSETCHAINED(dhd->osh, pkt); + PKTCINCRCNT(rxchain->pkthead); + PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); + } else { + dhd_rxchain_commit(dhd); + return; + } + + /* If we have hit the max chain length, dispatch the chain and reset */ + if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { + dhd_rxchain_commit(dhd); + } +} + +static void BCMFASTPATH +dhd_rxchain_commit(dhd_pub_t *dhd) +{ + dhd_prot_t *prot = dhd->prot; + rxchain_info_t *rxchain = &prot->rxchain; + + if (rxchain->pkt_count == 0) + return; + + /* Release the packets to dhd_linux */ + dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); + + /* Reset the chain */ + dhd_rxchain_reset(rxchain); +} + +#endif /* DHD_RX_CHAINING */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.c b/drivers/net/wireless/bcmdhd/dhd_pcie.c new file mode 100644 index 000000000000..7438d581b4ac --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pcie.c @@ -0,0 +1,5700 @@ +/* + * DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.c 609007 2015-12-30 07:44:52Z $ + */ + + +/* include files */ +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#ifdef PCIE_OOB +#include "ftdi_sio_external.h" +#endif /* PCIE_OOB */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */ + +#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32)) +#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32)) +/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */ + +#if defined(SUPPORT_MULTIPLE_BOARD_REV) + extern unsigned int system_rev; +#endif /* SUPPORT_MULTIPLE_BOARD_REV */ + +int dhd_dongle_memsize; +int dhd_dongle_ramsize; +static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG +static int dhdpcie_bus_readconsole(dhd_bus_t *bus); +#endif /* DHD_DEBUG */ +#if defined(DHD_FW_COREDUMP) +static int dhdpcie_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ + +static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size); +static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, + const char *name, void *params, + int plen, void *arg, int len, int val_size); +static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval); +static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, + uint32 len, uint32 srcdelay, uint32 destdelay); +static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter); +static int _dhdpcie_download_firmware(struct dhd_bus *bus); +static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_bus_write_vars(dhd_bus_t *bus); +static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus); +static bool dhdpci_bus_read_frames(dhd_bus_t *bus); +static int dhdpcie_readshared(dhd_bus_t *bus); +static void dhdpcie_init_shared_addr(dhd_bus_t *bus); +static bool dhdpcie_dongle_attach(dhd_bus_t *bus); +static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size); +static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, + bool dongle_isolation, bool reset_flag); +static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh); +static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len); +static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data); +static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data); +static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data); +static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data); +static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset); +static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data); +static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size); +static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b); +static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data); +static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info); +extern void dhd_dpc_kill(dhd_pub_t *dhdp); + +#ifdef BCMEMBEDIMAGE +static int dhdpcie_download_code_array(dhd_bus_t *bus); +#endif /* BCMEMBEDIMAGE */ + + +#ifdef EXYNOS_PCIE_DEBUG +extern void exynos_pcie_register_dump(int ch_num); +#endif /* EXYNOS_PCIE_DEBUG */ + +#define PCI_VENDOR_ID_BROADCOM 0x14e4 + +static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val); +extern void wl_nddbg_wpp_log(const char *format, ...); +#ifdef PCIE_OOB +static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus); + +#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */ +static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT; + +#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */ +#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */ +#define BIT_WL_REG_ON 6 +#define BIT_BT_REG_ON 7 + +int gpio_handle_val = 0; +unsigned char gpio_port = 0; +unsigned char gpio_direction = 0; +#define OOB_PORT "ttyUSB0" +#endif /* PCIE_OOB */ +static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version); + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_MEMBYTES, + IOV_MEMSIZE, + IOV_SET_DOWNLOAD_STATE, + IOV_DEVRESET, + IOV_VARS, + IOV_MSI_SIM, + IOV_PCIE_LPBK, + IOV_CC_NVMSHADOW, + IOV_RAMSIZE, + IOV_RAMSTART, + IOV_SLEEP_ALLOWED, + IOV_PCIE_DMAXFER, + IOV_PCIE_SUSPEND, + IOV_PCIEREG, + IOV_PCIECFGREG, + IOV_PCIECOREREG, + IOV_PCIESERDESREG, + IOV_BAR0_SECWIN_REG, + IOV_SBREG, + IOV_DONGLEISOLATION, + IOV_LTRSLEEPON_UNLOOAD, + IOV_METADATA_DBG, + IOV_RX_METADATALEN, + IOV_TX_METADATALEN, + IOV_TXP_THRESHOLD, + IOV_BUZZZ_DUMP, + IOV_DUMP_RINGUPD_BLOCK, + IOV_DMA_RINGINDICES, + IOV_DB1_FOR_MB, + IOV_FLOW_PRIO_MAP, +#ifdef DHD_PCIE_RUNTIMEPM + IOV_IDLETIME, +#endif /* DHD_PCIE_RUNTIMEPM */ + IOV_RXBOUND, + IOV_TXBOUND, + IOV_HANGREPORT, +#ifdef PCIE_OOB + IOV_OOB_BT_REG_ON, + IOV_OOB_ENABLE +#endif /* PCIE_OOB */ +}; + + +const bcm_iovar_t dhdpcie_iovars[] = { + {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, + {"pcie_lpbk", IOV_PCIE_LPBK, 0, IOVT_UINT32, 0 }, + {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 }, + {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 }, + {"pciereg", IOV_PCIEREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) }, + {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) }, + {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 }, +#ifdef PCIE_OOB + {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, IOVT_UINT32, 0 }, + {"oob_enable", IOV_OOB_ENABLE, 0, IOVT_UINT32, 0 }, +#endif /* PCIE_OOB */ + {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, + {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 }, + {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 }, + {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0}, + {"metadata_dbg", IOV_METADATA_DBG, 0, IOVT_BOOL, 0 }, + {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 }, + {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 }, + {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 }, + {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 }, + {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 }, + {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 }, +#ifdef DHD_PCIE_RUNTIMEPM + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, +#endif /* DHD_PCIE_RUNTIMEPM */ + {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, + {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, + {NULL, 0, 0, 0, 0 } +}; + + +#define MAX_READ_TIMEOUT 5 * 1000 * 1000 + +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 64 +#endif +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 64 +#endif +uint dhd_rxbound = DHD_RXBOUND; +uint dhd_txbound = DHD_TXBOUND; + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return dhdpcie_bus_register(); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhdpcie_bus_unregister(); + return; +} + + +/** returns a host virtual address */ +uint32 * +dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size) +{ + return (uint32 *)REG_MAP(addr, size); +} + +void +dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size) +{ + REG_UNMAP((void*)(uintptr)addr); + return; +} + +/** + * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096 + * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The + * precondition is that the PCIEBAR0Window register 'points' at the PCIe core. + * + * 'tcm' is the *host* virtual address at which tcm is mapped. + */ +dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, void *pci_dev) +{ + dhd_bus_t *bus; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + break; + } + + bus->regs = regs; + bus->tcm = tcm; + bus->osh = osh; + /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */ + bus->dev = (struct pci_dev *)pci_dev; + + dll_init(&bus->const_flowring); + + /* Attach pcie shared structure */ + if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) { + DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__)); + break; + } + + /* dhd_common_init(osh); */ + + if (dhdpcie_dongle_attach(bus)) { + DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__)); + break; + } + + /* software resources */ + if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + + break; + } + bus->dhd->busstate = DHD_BUS_DOWN; + bus->db1_for_mb = TRUE; + bus->dhd->hang_report = TRUE; + bus->irq_registered = FALSE; + + bus->d3_ack_war_cnt = 0; + + DHD_TRACE(("%s: EXIT SUCCESS\n", + __FUNCTION__)); + + return bus; + } while (0); + + DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__)); + + if (bus && bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + } + + if (bus) { + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + return NULL; +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +/** Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chip; +} + +/** Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chiprev; +} + +/** Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->sih->chippkg; +} + +/** Read and clear intstatus. This should be called with interupts disabled or inside isr */ +uint32 +dhdpcie_bus_intstatus(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + + if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) || + (bus->sih->buscorerev == 2)) { + intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus); + intstatus &= I_MB; + } else { + /* this is a PCIE core register..not a config register... */ + intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + + /* this is a PCIE core register..not a config register... */ + intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + + /* + * The fourth argument to si_corereg is the "mask" fields of the register to update + * and the fifth field is the "value" to update. Now if we are interested in only + * few fields of the "mask" bit map, we should not be writing back what we read + * By doing so, we might clear/ack interrupts that are not handled yet. + */ + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask, + intstatus); + + intstatus &= intmask; + + /* Is device removed. intstatus & intmask read 0xffffffff */ + if (intstatus == (uint32)-1) { + DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__)); + intstatus = 0; +#ifdef CUSTOMER_HW4_DEBUG +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(bus->dhd); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */ +#endif /* CUSTOMER_HW4_DEBUG */ + } + + intstatus &= bus->def_intmask; + } + + return intstatus; +} + +/** + * Name: dhdpcie_bus_isr + * Parameters: + * 1: IN int irq -- interrupt vector + * 2: IN void *arg -- handle to private data structure + * Return value: + * Status (TRUE or FALSE) + * + * Description: + * Interrupt Service routine checks for the status register, + * disable interrupt and queue DPC if mail box interrupts are raised. + */ +int32 +dhdpcie_bus_isr(dhd_bus_t *bus) +{ + uint32 intstatus = 0; + + do { + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + /* verify argument */ + if (!bus) { + DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__)); + break; + } + + if (bus->dhd->dongle_reset) { + break; + } + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n", + __FUNCTION__)); + break; + } + + intstatus = dhdpcie_bus_intstatus(bus); + + /* Check if the interrupt is ours or not */ + if (intstatus == 0) { + break; + } + + /* save the intstatus */ + bus->intstatus = intstatus; + + /* Overall operation: + * - Mask further interrupts + * - Read/ack intstatus + * - Take action based on bits and state + * - Reenable interrupts (as per state) + */ + + /* Count the interrupt call */ + bus->intrcount++; + + /* read interrupt status register!! Status bits will be cleared in DPC !! */ + bus->ipend = TRUE; + dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */ + bus->intdis = TRUE; + +#if defined(PCIE_ISR_THREAD) + + DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + while (dhd_bus_dpc(bus)); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); /* queue DPC now!! */ +#endif /* defined(SDIO_ISR_THREAD) */ + + DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__)); + return TRUE; + + } while (0); + + DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__)); + return FALSE; +} + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +dhd_pub_t *link_recovery = NULL; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +static bool +dhdpcie_dongle_attach(dhd_bus_t *bus) +{ + + osl_t *osh = bus->osh; + void *regsva = (void*)bus->regs; + uint16 devid = bus->cl_devid; + uint32 val; + sbpcieregs_t *sbpcieregs; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY + link_recovery = bus->dhd; +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Set bar0 window to si_enum_base */ + dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE); + + /* Checking PCIe bus status with reading configuration space */ + val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32)); + if ((val & 0xFFFF) != VENDOR_BROADCOM) { + DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__)); + goto fail; + } + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + + + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + sbpcieregs = (sbpcieregs_t*)(bus->regs); + + /* WAR where the BAR1 window may not be sized properly */ + W_REG(osh, &sbpcieregs->configaddr, 0x4e0); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + + /* Get info on the ARM and SOCRAM cores... */ + /* Should really be qualified by device id */ + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + bus->dongle_ram_base = CA7_4365_RAM_BASE; + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4339_CHIP_ID: + case BCM4335_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4358_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM43567_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM4350_CHIP_ID: + case BCM43570_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + CASE_BCM4345_CHIP: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + CASE_BCM43602_CHIP: + bus->dongle_ram_base = CR4_43602_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM base changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9); + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_memsize) + dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + + + bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1; + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + + bus->wait_for_d3_ack = 1; + bus->suspended = FALSE; + +#ifdef PCIE_OOB + gpio_handle_val = get_handle(OOB_PORT); + if (gpio_handle_val < 0) + { + DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__)); + ASSERT(FALSE); + } + + gpio_direction = 0; + ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG); + + /* Note BT core is also enabled here */ + gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + gpio_write_port(gpio_handle_val, gpio_port); + + gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE; + ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG); + + bus->oob_enabled = TRUE; + + /* drive the Device_Wake GPIO low on startup */ + bus->device_wake_state = TRUE; + dhd_bus_set_device_wake(bus, FALSE); + dhd_bus_doorbell_timeout_reset(bus); +#endif /* PCIE_OOB */ + + DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__)); + return 0; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__)); + return -1; +} + +int +dhpcie_bus_unmask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB); + return 0; +} +int +dhpcie_bus_mask_interrupt(dhd_bus_t *bus) +{ + dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0); + return 0; +} + +void +dhdpcie_bus_intr_enable(dhd_bus_t *bus) +{ + DHD_TRACE(("enable interrupts\n")); + if (bus && bus->sih && !bus->is_linkdown) { + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_unmask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, + bus->def_intmask, bus->def_intmask); + } + } else { + DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); + DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", + bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); + } +} + +void +dhdpcie_bus_intr_disable(dhd_bus_t *bus) +{ + + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + if (bus && bus->sih && !bus->is_linkdown) { + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + dhpcie_bus_mask_interrupt(bus); + } else { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, + bus->def_intmask, 0); + } + } else { + DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__)); + DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n", + bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1)); + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/* + * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress + * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts + * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for + * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so + * they will exit from there itself without marking dhd_bus_busy_state as BUSY. + */ +static void +dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp) +{ + unsigned long flags; + int timeleft; + + DHD_GENERAL_LOCK(dhdp, flags); + dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhdp, flags); + + timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state); + if (timeleft == 0) { + DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n", + __FUNCTION__, dhdp->dhd_bus_busy_state)); + BUG_ON(1); + } + + return; +} + +static void +dhdpcie_bus_remove_prep(dhd_bus_t *bus) +{ + unsigned long flags; + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhd_os_sdlock(bus->dhd); + + dhdpcie_bus_intr_disable(bus); + if (!bus->dhd->dongle_isolation) { + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + } + + dhd_os_sdunlock(bus->dhd); + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +/** Detach and free everything */ +void +dhdpcie_bus_release(dhd_bus_t *bus) +{ + bool dongle_isolation = FALSE; + osl_t *osh = NULL; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { + dhdpcie_advertise_bus_cleanup(bus->dhd); + dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_remove_prep(bus); + + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_detach(bus->dhd); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + /* unmap the regs and tcm here!! */ + if (bus->regs) { + dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE); + bus->regs = NULL; + } + if (bus->tcm) { + dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE); + bus->tcm = NULL; + } + + dhdpcie_bus_release_malloc(bus, osh); + /* Detach pcie shared structure */ + if (bus->pcie_sh) { + MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t)); + bus->pcie_sh = NULL; + } + +#ifdef DHD_DEBUG + + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif + + + /* Finally free bus info */ + MFREE(osh, bus, sizeof(dhd_bus_t)); + + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); +} /* dhdpcie_bus_release */ + + +void +dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) { + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + return; + } + + if (bus->sih) { + + if (!dongle_isolation) + pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + + if (bus->ltrsleep_on_unload) { + si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0); + } + + if (bus->sih->buscorerev == 13) + pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs)); + + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); +} + +uint32 +dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size) +{ + uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size); + return data; +} + +/** 32 bit config write */ +void +dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data); +} + +void +dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data) +{ + OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data); +} + +void +dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_MEMSIZE; + /* Restrict the memsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_memsize, min_size)); + if ((dhd_dongle_memsize > min_size) && + (dhd_dongle_memsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_memsize; +} + +void +dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; + +} + +/** Stop bus module: clear pending frames, disable data flow */ +void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + uint32 status; + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus->dhd) + return; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__)); + goto done; + } + + DHD_DISABLE_RUNTIME_PM(bus->dhd); + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + dhdpcie_bus_intr_disable(bus); + status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4); + dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status); + + if (!dhd_download_fw_on_driverload) { + dhd_dpc_kill(bus->dhd); + } + + /* Clear rx control and wake any waiters */ + dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT); + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP); + +done: + return; +} + +/** Watchdog timer function */ +bool dhd_bus_watchdog(dhd_pub_t *dhd) +{ + unsigned long flags; +#ifdef DHD_DEBUG + dhd_bus_t *bus; + bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + if (dhd->busstate == DHD_BUS_DOWN || + dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; + } + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD; + DHD_GENERAL_UNLOCK(dhd, flags); + +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + + + + /* Poll for console output periodically */ + if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd_console_ms) { + bus->console.count -= dhd_console_ms; + /* Make sure backplane clock is on */ + if (dhdpcie_bus_readconsole(bus) < 0) + dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef PCIE_OOB + /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */ + if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) && + dhd_timeout_expired(&bus->doorbell_timer)) { + dhd_bus_set_device_wake(bus, FALSE); + } +#endif /* PCIE_OOB */ + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD; + DHD_GENERAL_UNLOCK(dhd, flags); + + return TRUE; +} /* dhd_bus_watchdog */ + + +#define DEADBEEF_PATTERN 0xADDEADDE // "DeadDead" +#define MEMCHECKINFO "/data/.memcheck.info" + +static int +dhd_get_memcheck_info(void) +{ + struct file *fp = NULL; + uint32 mem_val = 0; + int ret = 0; + char *filepath = MEMCHECKINFO; + loff_t pos=0; + + fp = filp_open(filepath, O_RDONLY, 0); + if (IS_ERR(fp)) { + DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath)); + goto done; + } else { + ret = kernel_read(fp, (char *)&mem_val, 4, &pos); + if (ret < 0) { + DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret)); + filp_close(fp, NULL); + goto done; + } + + mem_val = bcm_atoi((char *)&mem_val); + + DHD_ERROR(("[WIFI_SEC]%s: MEMCHECK ENABLED = %d\n", __FUNCTION__, mem_val)); + filp_close(fp, NULL); + } +done: + return mem_val; +} + +static int +dhdpcie_mem_check(struct dhd_bus *bus) +{ + int bcmerror = BCME_OK; + int offset = 0; + int len = 0; + uint8 *memblock = NULL, *memptr; + int size = bus->ramsize; + int i; + uint32 memcheck_enabled; + + /* Read memcheck info from the file */ + /* 0 : Disable */ + /* 1 : "Dead Beef" pattern write */ + /* 2 : "Dead Beef" pattern write and checking the pattern value */ + + memcheck_enabled = dhd_get_memcheck_info(); + + DHD_ERROR(("%s: memcheck_enabled: %d \n", __FUNCTION__, memcheck_enabled)); + + if (memcheck_enabled == 0) { + return bcmerror; + } + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + + if ((ulong)memblock % DHD_SDALIGN) { + memptr += (DHD_SDALIGN - ((ulong)memblock % DHD_SDALIGN)); + } + + for (i = 0; i < MEMBLOCK; i = i + 4) { + *(ulong*)(memptr + i) = DEADBEEF_PATTERN; + } + + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + if (offset == 0) { + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + /* Write "DeadBeef" pattern with MEMBLOCK size */ + while (size) { + len = MIN(MEMBLOCK, size); + + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + if (memcheck_enabled == 2) { + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on read %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } else { + for (i = 0; i < len; i = i+4) { + if ((*(uint32*)(memptr + i)) != DEADBEEF_PATTERN) { + DHD_ERROR(("%s: error on reading pattern at " + "0x%08x\n", __FUNCTION__, (offset + i))); + bcmerror = BCME_ERROR; + goto err; + } + } + } + } + offset += MEMBLOCK; + size -= MEMBLOCK; + } + + DHD_ERROR(("%s: Writing the Dead Beef pattern is Done \n", __FUNCTION__)); + +err: + if (memblock) { + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + } + + return bcmerror; +} + +/* Download firmware image and nvram image */ +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + + DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + + dhdpcie_mem_check(bus); + + ret = dhdpcie_download_firmware(bus, osh); + + return ret; +} + +static int +dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh) +{ + int ret = 0; +#if defined(BCM_REQUEST_FW) + uint chipid = bus->sih->chip; + uint revid = bus->sih->chiprev; + char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */ + char nv_path[64]; /* path to nvram vars file */ + bus->fw_path = fw_path; + bus->nv_path = nv_path; + switch (chipid) { + case BCM43570_CHIP_ID: + bcmstrncat(fw_path, "43570", 5); + switch (revid) { + case 0: + bcmstrncat(fw_path, "a0", 2); + break; + case 2: + bcmstrncat(fw_path, "a2", 2); + break; + default: + DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__, + revid)); + break; + } + break; + default: + DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__, + chipid)); + return 0; + } + /* load board specific nvram file */ + snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path); + /* load firmware */ + snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path); +#endif /* BCM_REQUEST_FW */ + + DHD_OS_WAKE_LOCK(bus->dhd); + + ret = _dhdpcie_download_firmware(bus); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +static int +dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = BCME_ERROR; + int offset = 0; + int len = 0; + char *imgbuf = NULL; + uint8 *memblock = NULL, *memptr; + + int offset_end = bus->ramsize; + + DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + /* Should succeed in opening image if it is actually given through registry + * entry or in module param. + */ + imgbuf = dhd_os_open_image(pfw_path); + if (imgbuf == NULL) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + DHD_INFO_HW4(("%s: dongle_ram_base: 0x%x ramsize: 0x%x tcm: %p\n", + __FUNCTION__, bus->dongle_ram_base, bus->ramsize, bus->tcm)); + /* Download image with MEMBLOCK size */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + offset_end += offset; + } + } + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + offset += MEMBLOCK; + + if (offset >= offset_end) { + DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n", + __FUNCTION__, offset, offset_end)); + bcmerror = BCME_ERROR; + goto err; + } + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + if (imgbuf) + dhd_os_close_image(imgbuf); + + return bcmerror; +} /* dhdpcie_download_code_file */ + +#ifdef CUSTOMER_HW4_DEBUG +#define MIN_NVRAMVARS_SIZE 128 +#endif /* CUSTOMER_HW4_DEBUG */ + +static int +dhdpcie_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = BCME_ERROR; + uint len; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + bool nvram_uefi_exists = FALSE; + bool local_alloc = FALSE; + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* First try UEFI */ + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len); + + /* If UEFI empty, then read from file system */ + if ((len == 0) || (memblock[0] == '\0')) { + + if (nvram_file_exists) { + len = MAX_NVRAMBUF_SIZE; + dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len); + if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) { + goto err; + } + } + else { + /* For SROM OTP no external file or UEFI required */ + bcmerror = BCME_OK; + } + } else { + nvram_uefi_exists = TRUE; + } + + DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len)); + + if (len > 0 && len <= MAX_NVRAMBUF_SIZE) { + bufp = (char *) memblock; + +#ifdef CACHE_FW_IMAGES + if (bus->processed_nvram_params_len) { + len = bus->processed_nvram_params_len; + } + + if (!bus->processed_nvram_params_len) { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + bus->processed_nvram_params_len = len; + } + } else +#else + { + bufp[len] = 0; + if (nvram_uefi_exists || nvram_file_exists) { + len = process_nvram_vars(bufp, len); + } + } +#endif /* CACHE_FW_IMAGES */ + + DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len)); +#ifdef CUSTOMER_HW4_DEBUG + if (len < MIN_NVRAMVARS_SIZE) { + DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto err; + } +#endif /* CUSTOMER_HW4_DEBUG */ + + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + + +err: + if (memblock) { + if (local_alloc) { + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + } else { + dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE); + } + } + + return bcmerror; +} + + +#ifdef BCMEMBEDIMAGE +int +dhdpcie_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *p_dlarray = NULL; + unsigned int dlarray_size = 0; + unsigned int downloded_len, remaining_len, len; + char *p_dlimagename, *p_dlimagever, *p_dlimagedate; + uint8 *memblock = NULL, *memptr; + + downloded_len = 0; + remaining_len = 0; + len = 0; + + p_dlarray = dlarray; + dlarray_size = sizeof(dlarray); + p_dlimagename = dlimagename; + p_dlimagever = dlimagever; + p_dlimagedate = dlimagedate; + + if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) || + (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0)) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + while (downloded_len < dlarray_size) { + remaining_len = dlarray_size - downloded_len; + if (remaining_len >= MEMBLOCK) + len = MEMBLOCK; + else + len = remaining_len; + + memcpy(memptr, (p_dlarray + downloded_len), len); + /* check if CR4/CA7 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len); + downloded_len += len; + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + offset += MEMBLOCK; + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + unsigned char *ularray = NULL; + unsigned int uploded_len; + uploded_len = 0; + bcmerror = -1; + ularray = MALLOC(bus->dhd->osh, dlarray_size); + if (ularray == NULL) + goto upload_err; + /* Upload image to verify downloaded contents. */ + offset = bus->dongle_ram_base; + memset(ularray, 0xaa, dlarray_size); + while (uploded_len < dlarray_size) { + remaining_len = dlarray_size - uploded_len; + if (remaining_len >= MEMBLOCK) + len = MEMBLOCK; + else + len = remaining_len; + bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, + (uint8 *)(ularray + uploded_len), len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto upload_err; + } + + uploded_len += len; + offset += MEMBLOCK; + } + + if (memcmp(p_dlarray, ularray, dlarray_size)) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); + goto upload_err; + + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate)); +upload_err: + if (ularray) + MFREE(bus->dhd->osh, ularray, dlarray_size); + } +#endif /* DHD_DEBUG */ +err: + + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + return bcmerror; +} /* dhdpcie_download_code_array */ +#endif /* BCMEMBEDIMAGE */ + + +static int +_dhdpcie_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__)); + return 0; +#endif + } + + /* Keep arm in reset */ + if (dhdpcie_bus_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdpcie_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } else { + embed = FALSE; + dlok = TRUE; + } + } + +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdpcie_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } else { + dlok = TRUE; + } + } +#else + BCM_REFERENCE(embed); +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* EXAMPLE: nvram_array */ + /* If a valid nvram_arry is specified as above, it can be passed down to dongle */ + /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */ + + + /* External nvram takes precedence if specified */ + if (dhdpcie_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdpcie_bus_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} /* _dhdpcie_download_firmware */ + +#define CONSOLE_LINE_MAX 192 + +#ifdef DHD_DEBUG +static int +dhdpcie_bus_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return -1; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); + + } + } +break2: + + return BCME_OK; +} /* dhdpcie_bus_readconsole */ +#endif /* DHD_DEBUG */ + +static int +dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + trap_t tr; + pciedev_shared_t *pciedev_shared = bus->pcie_sh; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) { + return 0; + } + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdpcie_readshared(bus)) < 0) { + goto done; + } + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + pciedev_shared->msgtrace_addr, pciedev_shared->console_addr); + + if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (bus->pcie_sh->assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (bus->pcie_sh->assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) { + goto done; + } + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line); + } + + if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) { + bus->dhd->dongle_trap_occured = TRUE; + if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE, + bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) { + goto done; + } + + bcm_bprintf(&strbuf, + "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + " lp 0x%x, rpc 0x%x" + "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), + ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), + ltoh32(bus->pcie_sh->trap_addr), + ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), + ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) { + goto printbuf; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) { + goto printbuf; + } + + addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) { + goto printbuf; + } + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) { + goto printbuf; + } + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) { + goto printbuf; + } + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + printf("CONSOLE: %s\n", line); + } + } + } + } + +printbuf: + if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) { + printf("%s: %s\n", __FUNCTION__, strbuf.origbuf); + + /* wake up IOCTL wait event */ + dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP); + +#if defined(DHD_FW_COREDUMP) + /* save core dump or write to a file */ + if (bus->dhd->memdump_enabled) { + bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + + + } + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} /* dhdpcie_checkdied */ + + +/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */ +void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf) +{ + int ret = 0; + int size; /* Full mem size */ + int start; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *databuf = buf; + + if (bus == NULL) { + return; + } + + start = bus->dongle_ram_base; + /* Get full mem size */ + size = bus->ramsize; + /* Read mem content */ + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) { + return; + } + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + bus->dhd->soc_ram = buf; + bus->dhd->soc_ram_length = bus->ramsize; + return; +} + + +#if defined(DHD_FW_COREDUMP) +static int +dhdpcie_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + +#ifdef EXYNOS_PCIE_DEBUG + exynos_pcie_register_dump(1); +#endif /* EXYNOS_PCIE_DEBUG */ + +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down so skip\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + /* Get full mem size */ + size = bus->ramsize; +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP) + buf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_MEMDUMP_BUF, size); + bzero(buf, size); +#else + buf = MALLOC(bus->dhd->osh, size); +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */ + if (!buf) { + DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size)); + return BCME_ERROR; + } + + /* Read mem content */ + DHD_TRACE_HW4(("Dump dongle memory")); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) + { + DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret)); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return BCME_ERROR; + } + DHD_TRACE((".")); + + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + + DHD_TRACE_HW4(("%s FUNC: Copy fw image to the embedded buffer \n", __FUNCTION__)); + + dhd_save_fwdump(bus->dhd, buf, bus->ramsize); + dhd_schedule_memdump(bus->dhd, buf, bus->ramsize); + + return ret; +} + +int +dhd_bus_mem_dump(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + if (bus->suspended) { + DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__)); + return 0; + } + + return dhdpcie_mem_dump(bus); +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t *bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdpcie_mem_dump(bus)); +#else + return -1; +#endif +} + +/** + * Transfers bytes from host to dongle using pio mode. + * Parameter 'address' is a backplane address. + */ +static int +dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size) +{ + uint dsize; + int detect_endian_flag = 0x01; + bool little_endian; + + if (write && bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } + + /* Detect endianness. */ + little_endian = *(char *)&detect_endian_flag; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + + /* Determine initial transfer parameters */ + dsize = sizeof(uint64); + + /* Do the transfer(s) */ + if (write) { + while (size) { + if (size >= sizeof(uint64) && little_endian && +#ifdef CONFIG_64BIT + !(address % 8) && +#endif /* CONFIG_64BIT */ + 1) { + dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data)); + } else { + dsize = sizeof(uint8); + dhdpcie_bus_wtcm8(bus, address, *data); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + } + } + } else { + while (size) { + if (size >= sizeof(uint64) && little_endian && +#ifdef CONFIG_64BIT + !(address % 8) && +#endif /* CONFIG_64BIT */ + 1) { + *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address); + } else { + dsize = sizeof(uint8); + *data = dhdpcie_bus_rtcm8(bus, address); + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize) > 0) { + data += dsize; + address += dsize; + } + } + } + return BCME_OK; +} /* dhdpcie_bus_membytes */ + +/** + * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue + * to the (non flow controlled) flow ring. + */ +int BCMFASTPATH +dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs) +{ + flow_ring_node_t *flow_ring_node; + int ret = BCME_OK; +#ifdef DHD_LOSSLESS_ROAMING + dhd_pub_t *dhdp = bus->dhd; +#endif + DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id)); + + /* ASSERT on flow_id */ + if (flow_id >= bus->max_sub_queues) { + DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__, + flow_id, bus->max_sub_queues)); + return 0; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id); + +#ifdef DHD_LOSSLESS_ROAMING + if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) { + DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n", + __FUNCTION__, flow_ring_node->flow_info.tid)); + return BCME_OK; + } +#endif /* DHD_LOSSLESS_ROAMING */ + + { + unsigned long flags; + void *txp = NULL; + flow_queue_t *queue; +#ifdef DHD_LOSSLESS_ROAMING + struct ether_header *eh; + uint8 *pktdata; +#endif /* DHD_LOSSLESS_ROAMING */ + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + return BCME_NOTREADY; + } + + while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTORPHAN(txp); + + /* + * Modifying the packet length caused P2P cert failures. + * Specifically on test cases where a packet of size 52 bytes + * was injected, the sniffer capture showed 62 bytes because of + * which the cert tests failed. So making the below change + * only Router specific. + */ + +#ifdef DHDTCPACK_SUPPRESS + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) { + ret = dhd_tcpack_check_xmit(bus->dhd, txp); + if (ret != BCME_OK) { + DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n", + __FUNCTION__)); + } + } +#endif /* DHDTCPACK_SUPPRESS */ +#ifdef DHD_LOSSLESS_ROAMING + pktdata = (uint8 *)PKTDATA(OSH_NULL, txp); + eh = (struct ether_header *) pktdata; + if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) { + uint8 prio = (uint8)PKTPRIO(txp); + + /* Restore to original priority for 802.1X packet */ + if (prio == PRIO_8021D_NC) { + PKTSETPRIO(txp, PRIO_8021D_BE); + } + } +#endif /* DHD_LOSSLESS_ROAMING */ + + /* Attempt to transfer packet over flow ring */ + ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex); + if (ret != BCME_OK) { /* may not have resources in flow ring */ + DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret)); + dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE); + /* reinsert at head */ + dhd_flow_queue_reinsert(bus->dhd, queue, txp); + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* If we are able to requeue back, return success */ + return BCME_OK; + } + } + + dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; +} /* dhd_bus_schedule_queue */ + +/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */ +int BCMFASTPATH +dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx) +{ + uint16 flowid; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + int ret = BCME_OK; + void *txp_pend = NULL; + + if (!bus->dhd->flowid_allocator) { + DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__)); + goto toss; + } + + flowid = DHD_PKT_GET_FLOWID(txp); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + + DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if ((flowid >= bus->dhd->num_flow_rings) || + (!flow_ring_node->active) || + (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) || + (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + ret = BCME_ERROR; + goto toss; + } + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) { + txp_pend = txp; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + if (flow_ring_node->status) { + DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n", + __FUNCTION__, flowid, flow_ring_node->status, + flow_ring_node->active)); + if (txp_pend) { + txp = txp_pend; + goto toss; + } + return BCME_OK; + } + ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + /* If we have anything pending, try to push into q */ + if (txp_pend) { + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + txp = txp_pend; + goto toss; + } + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + } + + return ret; + +toss: + DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret)); + PKTCFREE(bus->dhd->osh, txp, TRUE); + return ret; +} /* dhd_bus_txdata */ + + +void +dhd_bus_stop_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + bus->bus_flowctrl = TRUE; +} + +void +dhd_bus_start_queue(struct dhd_bus *bus) +{ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); + bus->bus_flowctrl = TRUE; +} + +#if defined(DHD_DEBUG) +/* Device console input function */ +int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhd->bus; + uint32 addr, val; + int rv; + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* generate an interrupt to dongle to indicate that it needs to process cons command */ + dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT); +done: + return rv; +} /* dhd_bus_console_in */ +#endif /* defined(DHD_DEBUG) */ + +/** + * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is + * contained in 'pkt'. Processes rx frame, forwards up the layer to netif. + */ +void BCMFASTPATH +dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count) +{ + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0); +} + +/** 'offset' is a backplane address */ +void +dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data) +{ + *(volatile uint8 *)(bus->tcm + offset) = (uint8)data; +} + +uint8 +dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset) +{ + volatile uint8 data; + + data = *(volatile uint8 *)(bus->tcm + offset); + + return data; +} + +void +dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data) +{ + *(volatile uint32 *)(bus->tcm + offset) = (uint32)data; +} +void +dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data) +{ + *(volatile uint16 *)(bus->tcm + offset) = (uint16)data; +} +void +dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) +{ + *(volatile uint64 *)(bus->tcm + offset) = (uint64)data; +} + +uint16 +dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset) +{ + volatile uint16 data; + + data = *(volatile uint16 *)(bus->tcm + offset); + + return data; +} + +uint32 +dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset) +{ + volatile uint32 data; + + data = *(volatile uint32 *)(bus->tcm + offset); + + return data; +} + +uint64 +dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) +{ + volatile uint64 data; + + data = *(volatile uint64 *)(bus->tcm + offset); + + return data; +} + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid) +{ + uint64 long_data; + ulong tcm_offset; + + DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + switch (type) { + case D2H_DMA_SCRATCH_BUF: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)&(sh->host_dma_scratch_buffer); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case D2H_DMA_SCRATCH_BUF_LEN: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len); + dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data)); + prhex(__FUNCTION__, data, len); + break; + } + + case H2D_DMA_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case H2D_DMA_INDX_RD_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case D2H_DMA_INDX_WR_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case D2H_DMA_INDX_RD_BUF: + { + pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh; + long_data = HTOL64(*(uint64 *)data); + tcm_offset = (ulong)shmem->rings_info_ptr; + tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + } + + case RING_ITEM_LEN: + tcm_offset = bus->ring_sh[ringid].ring_mem_addr; + tcm_offset += OFFSETOF(ring_mem_t, len_items); + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_MAX_ITEMS: + tcm_offset = bus->ring_sh[ringid].ring_mem_addr; + tcm_offset += OFFSETOF(ring_mem_t, max_item); + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_BUF_ADDR: + long_data = HTOL64(*(uint64 *)data); + tcm_offset = bus->ring_sh[ringid].ring_mem_addr; + tcm_offset += OFFSETOF(ring_mem_t, base_addr); + dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len); + prhex(__FUNCTION__, data, len); + break; + + case RING_WR_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_w; + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case RING_RD_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_r; + dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data)); + break; + + case D2H_MB_DATA: + dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr, + (uint32) HTOL32(*(uint32 *)data)); + break; + + case H2D_MB_DATA: + dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr, + (uint32) HTOL32(*(uint32 *)data)); + break; + + default: + break; + } +} /* dhd_bus_cmn_writeshared */ + +/** A snippet of dongle memory is shared between host and dongle */ +void +dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid) +{ + ulong tcm_offset; + + switch (type) { + case RING_WR_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_w; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); + break; + case RING_RD_UPD: + tcm_offset = bus->ring_sh[ringid].ring_state_r; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset)); + break; + case TOTAL_LFRAG_PACKET_CNT: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, + (ulong) &sh->total_lfrag_pkt_cnt)); + break; + } + case H2D_MB_DATA: + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr)); + break; + case D2H_MB_DATA: + *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr)); + break; + case MAX_HOST_RXBUFS: + { + pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr; + *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, + (ulong) &sh->max_host_rxbufs)); + break; + } + default : + break; + } +} + +uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus) +{ + return ((pciedev_shared_t*)bus->pcie_sh)->flags; +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) { + goto exit; + } + + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} /* dhd_bus_iovar_op */ + +#ifdef BCM_BUZZZ +#include + +int +dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log, + const int num_counters) +{ + int bytes = 0; + uint32 ctr; + uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX]; + uint32 delta[BCM_BUZZZ_COUNTERS_MAX]; + + /* Compute elapsed counter values per counter event type */ + for (ctr = 0U; ctr < num_counters; ctr++) { + prev[ctr] = core[ctr]; + curr[ctr] = *log++; + core[ctr] = curr[ctr]; /* saved for next log */ + + if (curr[ctr] < prev[ctr]) + delta[ctr] = curr[ctr] + (~0U - prev[ctr]); + else + delta[ctr] = (curr[ctr] - prev[ctr]); + + bytes += sprintf(p + bytes, "%12u ", delta[ctr]); + } + + return bytes; +} + +typedef union cm3_cnts { /* export this in bcm_buzzz.h */ + uint32 u32; + uint8 u8[4]; + struct { + uint8 cpicnt; + uint8 exccnt; + uint8 sleepcnt; + uint8 lsucnt; + }; +} cm3_cnts_t; + +int +dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log) +{ + int bytes = 0; + + uint32 cyccnt, instrcnt; + cm3_cnts_t cm3_cnts; + uint8 foldcnt; + + { /* 32bit cyccnt */ + uint32 curr, prev, delta; + prev = core[0]; curr = *log++; core[0] = curr; + if (curr < prev) + delta = curr + (~0U - prev); + else + delta = (curr - prev); + + bytes += sprintf(p + bytes, "%12u ", delta); + cyccnt = delta; + } + + { /* Extract the 4 cnts: cpi, exc, sleep and lsu */ + int i; + uint8 max8 = ~0; + cm3_cnts_t curr, prev, delta; + prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32; + for (i = 0; i < 4; i++) { + if (curr.u8[i] < prev.u8[i]) + delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]); + else + delta.u8[i] = (curr.u8[i] - prev.u8[i]); + bytes += sprintf(p + bytes, "%4u ", delta.u8[i]); + } + cm3_cnts.u32 = delta.u32; + } + + { /* Extract the foldcnt from arg0 */ + uint8 curr, prev, delta, max8 = ~0; + bcm_buzzz_arg0_t arg0; arg0.u32 = *log; + prev = core[2]; curr = arg0.klog.cnt; core[2] = curr; + if (curr < prev) + delta = curr + (max8 - prev); + else + delta = (curr - prev); + bytes += sprintf(p + bytes, "%4u ", delta); + foldcnt = delta; + } + + instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2] + + cm3_cnts.u8[3]) + foldcnt; + if (instrcnt > 0xFFFFFF00) + bytes += sprintf(p + bytes, "[%10s] ", "~"); + else + bytes += sprintf(p + bytes, "[%10u] ", instrcnt); + return bytes; +} + +int +dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz) +{ + int bytes = 0; + bcm_buzzz_arg0_t arg0; + static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS; + + if (buzzz->counters == 6) { + bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log); + log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */ + } else { + bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters); + log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */ + } + + /* Dump the logged arguments using the registered formats */ + arg0.u32 = *log++; + + switch (arg0.klog.args) { + case 0: + bytes += sprintf(p + bytes, fmt[arg0.klog.id]); + break; + case 1: + { + uint32 arg1 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1); + break; + } + case 2: + { + uint32 arg1, arg2; + arg1 = *log++; arg2 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2); + break; + } + case 3: + { + uint32 arg1, arg2, arg3; + arg1 = *log++; arg2 = *log++; arg3 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3); + break; + } + case 4: + { + uint32 arg1, arg2, arg3, arg4; + arg1 = *log++; arg2 = *log++; + arg3 = *log++; arg4 = *log++; + bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4); + break; + } + default: + printf("Maximum one argument supported\n"); + break; + } + + bytes += sprintf(p + bytes, "\n"); + + return bytes; +} + +void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p) +{ + int i; + uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX]; + void * log; + + for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) { + core[i] = 0; + } + + log_sz = buzzz_p->log_sz; + + part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz; + + if (buzzz_p->wrap == TRUE) { + part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz; + total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz; + } else { + part2 = 0U; + total = buzzz_p->count; + } + + if (total == 0U) { + printf("bcm_buzzz_dump total<%u> done\n", total); + return; + } else { + printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", + total, part2, part1); + } + + if (part2) { /* with wrap */ + log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log)); + while (part2--) { /* from cur to end : part2 */ + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + } + + log = (void*)buffer_p; + while (part1--) { + p[0] = '\0'; + dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p); + printf("%s", p); + log = (void*)((size_t)log + buzzz_p->log_sz); + } + + printf("bcm_buzzz_dump done.\n"); +} + +int dhd_buzzz_dump_dngl(dhd_bus_t *bus) +{ + bcm_buzzz_t * buzzz_p = NULL; + void * buffer_p = NULL; + char * page_p = NULL; + pciedev_shared_t *sh; + int ret = 0; + + if (bus->dhd->busstate != DHD_BUS_DATA) { + return BCME_UNSUPPORTED; + } + if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) { + printf("Page memory allocation failure\n"); + goto done; + } + if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) { + printf("BCM BUZZZ memory allocation failure\n"); + goto done; + } + + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + goto done; + } + + sh = bus->pcie_sh; + + DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz)); + + if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */ + + dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz, + (uint8 *)buzzz_p, sizeof(bcm_buzzz_t)); + + printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> " + "count<%u> status<%u> wrap<%u>\n" + "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n", + (int)sh->buzzz, + (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end, + buzzz_p->count, buzzz_p->status, buzzz_p->wrap, + buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group, + buzzz_p->buffer_sz, buzzz_p->log_sz); + + if (buzzz_p->count == 0) { + printf("Empty dongle BUZZZ trace\n\n"); + goto done; + } + + /* Allocate memory for trace buffer and format strings */ + buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz); + if (buffer_p == NULL) { + printf("Buffer memory allocation failure\n"); + goto done; + } + + /* Fetch the trace. format strings are exported via bcm_buzzz.h */ + dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */ + (uint8 *)buffer_p, buzzz_p->buffer_sz); + + /* Process and display the trace using formatted output */ + + { + int ctr; + for (ctr = 0; ctr < buzzz_p->counters; ctr++) { + printf(" ", buzzz_p->eventid[ctr]); + } + printf("\n"); + } + + dhd_buzzz_dump(buzzz_p, buffer_p, page_p); + + printf("----- End of dongle BCM BUZZZ Trace -----\n\n"); + + MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL; + } + +done: + + if (page_p) MFREE(bus->dhd->osh, page_p, 4096); + if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t)); + if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); + + return BCME_OK; +} +#endif /* BCM_BUZZZ */ + +#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \ + ((sih)->buscoretype == PCIE2_CORE_ID)) + +static bool +pcie2_mdiosetblock(dhd_bus_t *bus, uint blk) +{ + uint mdiodata, mdioctrl, i = 0; + uint pcie_serdes_spinwait = 200; + + mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF); + mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata); + + OSL_DELAY(10); + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, + 0, 0); + if (!(mdioctrl_read & MDIODATA2_DONE)) { + break; + } + OSL_DELAY(1000); + i++; + } + + if (i >= pcie_serdes_spinwait) { + DHD_ERROR(("pcie_mdiosetblock: timed out\n")); + return FALSE; + } + + return TRUE; +} + + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + dhd_bus_t *bus = dhdp->bus; + int bcmerror = 0; + unsigned long flags; +#ifdef CONFIG_ARCH_MSM + int retry = POWERUP_MAX_RETRY; +#endif /* CONFIG_ARCH_MSM */ + + if (dhd_download_fw_on_driverload) { + bcmerror = dhd_bus_start(dhdp); + } else { + if (flag == TRUE) { /* Turn off WLAN */ + /* Removing Power */ + DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__)); + + bus->dhd->up = FALSE; + + if (bus->dhd->busstate != DHD_BUS_DOWN) { + dhdpcie_advertise_bus_cleanup(bus->dhd); + if (bus->intr) { + dhdpcie_bus_intr_disable(bus); + dhdpcie_free_irq(bus); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_os_wd_timer(dhdp, 0); + dhd_bus_stop(bus, TRUE); + dhd_prot_reset(dhdp); + dhd_clear(dhdp); + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_bus_clock_stop(bus); + if (bcmerror) { + DHD_ERROR(("%s: host clock stop failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { + if (bus->intr) { + dhdpcie_free_irq(bus); + } +#ifdef BCMPCIE_OOB_HOST_WAKE + /* Clean up any pending host wake IRQ */ + dhd_bus_oob_intr_set(bus->dhd, FALSE); + dhd_bus_oob_intr_unregister(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + dhd_prot_reset(dhdp); + dhd_clear(dhdp); + dhd_bus_release_dongle(bus); + dhdpcie_bus_free_resource(bus); + bcmerror = dhdpcie_bus_disable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_bus_clock_stop(bus); + if (bcmerror) { + DHD_ERROR(("%s: host clock stop failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + } + + bus->dhd->dongle_reset = TRUE; + DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__)); + + } else { /* Turn on WLAN */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + /* Powering On */ + DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + while (--retry) { + bcmerror = dhdpcie_bus_clock_start(bus); + if (!bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n", + __FUNCTION__)); + break; + } else { + OSL_SLEEP(10); + } + } + + if (bcmerror && !retry) { + DHD_ERROR(("%s: host pcie clock enable failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 0; + bus->pci_d3hot_done = 0; + bcmerror = dhdpcie_bus_enable_device(bus); + if (bcmerror) { + DHD_ERROR(("%s: host configuration restore failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_alloc_resource(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhdpcie_bus_dongle_attach(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bcmerror = dhd_bus_request_irq(bus); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->dongle_reset = FALSE; + + bcmerror = dhd_bus_start(dhdp); + if (bcmerror) { + DHD_ERROR(("%s: dhd_bus_start: %d\n", + __FUNCTION__, bcmerror)); + goto done; + } + + bus->dhd->up = TRUE; + DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: what should we do here\n", __FUNCTION__)); + goto done; + } + } + } + +done: + if (bcmerror) { + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DOWN; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + + return bcmerror; +} + +static int +pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val, + bool slave_bypass) +{ + uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl; + uint32 reg32; + + pcie2_mdiosetblock(bus, physmedia); + + /* enable mdio access to SERDES */ + mdio_ctrl = MDIOCTL2_DIVISOR_VAL; + mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF); + + if (slave_bypass) + mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS; + + if (!write) + mdio_ctrl |= MDIOCTL2_READ; + + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl); + + if (write) { + reg32 = PCIE2_MDIO_WR_DATA; + si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, + *val | MDIODATA2_DONE); + } else + reg32 = PCIE2_MDIO_RD_DATA; + + /* retry till the transaction is complete */ + while (i < pcie_serdes_spinwait) { + uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0); + if (!(done_val & MDIODATA2_DONE)) { + if (!write) { + *val = si_corereg(bus->sih, bus->sih->buscoreidx, + PCIE2_MDIO_RD_DATA, 0, 0); + *val = *val & MDIODATA2_MASK; + } + return 0; + } + OSL_DELAY(1000); + i++; + } + return -1; +} + +static int +dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + int32 int_val2 = 0; + int32 int_val3 = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + if (plen >= (int)sizeof(int_val) * 2) + bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2)); + + if (plen >= (int)sizeof(int_val) * 3) + bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + switch (actionid) { + + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdpcie_downloadvars(bus, arg, len); + break; + + case IOV_SVAL(IOV_PCIEREG): + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + int_val); + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0, + int_val2); + break; + + case IOV_GVAL(IOV_PCIEREG): + si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, + int_val); + int_val = si_corereg(bus->sih, bus->sih->buscoreidx, + OFFSETOF(sbpcieregs_t, configdata), 0, 0); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIECOREREG): + si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2); + break; + case IOV_GVAL(IOV_BAR0_SECWIN_REG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + + if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_BAR0_SECWIN_REG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset; + size = sdreg.func; + if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE; + size = sdreg.func; + + if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + break; + } + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = sdreg.offset | SI_ENUM_BASE; + size = sdreg.func; + if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) { + DHD_ERROR(("Invalid size/addr combination \n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_GVAL(IOV_PCIESERDESREG): + { + uint val; + if (!PCIE_GEN2(bus->sih)) { + DHD_ERROR(("supported only in pcie gen2\n")); + bcmerror = BCME_ERROR; + break; + } + + if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) { + bcopy(&val, arg, sizeof(int32)); + } else { + DHD_ERROR(("pcie2_mdioop failed.\n")); + bcmerror = BCME_ERROR; + } + break; + } + + case IOV_SVAL(IOV_PCIESERDESREG): + if (!PCIE_GEN2(bus->sih)) { + DHD_ERROR(("supported only in pcie gen2\n")); + bcmerror = BCME_ERROR; + break; + } + if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) { + DHD_ERROR(("pcie2_mdioop failed.\n")); + bcmerror = BCME_ERROR; + } + break; + case IOV_GVAL(IOV_PCIECOREREG): + int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIECFGREG): + OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2); + break; + + case IOV_GVAL(IOV_PCIECFGREG): + int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4); + bcopy(&int_val, arg, sizeof(int_val)); + break; + + case IOV_SVAL(IOV_PCIE_LPBK): + bcmerror = dhdpcie_bus_lpback_req(bus, int_val); + break; + + case IOV_SVAL(IOV_PCIE_DMAXFER): + bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3); + break; + + case IOV_GVAL(IOV_PCIE_SUSPEND): + int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_PCIE_SUSPEND): + dhdpcie_bus_suspend(bus, bool_val); + break; + + case IOV_GVAL(IOV_MEMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; /* absolute backplane address */ + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__, + (set ? "write" : "read"), size, address, dsize)); + + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) || + si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + if (set && address == bus->dongle_ram_base) { + bus->resetinstr = *(((uint32*)params) + 2); + } + } else { + /* If we know about SOCRAM, check for a fit */ + if ((bus->orig_ramsize) && + ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) + { + uint8 enable, protect, remap; + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + if (!enable || protect) { + DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", + __FUNCTION__, bus->orig_ramsize, size, address)); + DHD_ERROR(("%s: socram enable %d, protect %d\n", + __FUNCTION__, enable, protect)); + bcmerror = BCME_BADARG; + break; + } + + if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) { + uint32 devramsize = si_socdevram_size(bus->sih); + if ((address < SOCDEVRAM_ARM_ADDR) || + (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) { + DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", + __FUNCTION__, address, size)); + DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", + __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize)); + bcmerror = BCME_BADARG; + break; + } + /* move it such that address is real now */ + address -= SOCDEVRAM_ARM_ADDR; + address += SOCDEVRAM_BP_ADDR; + DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", + __FUNCTION__, (set ? "write" : "read"), size, address)); + } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) { + /* Can not access remap region while devram remap bit is set + * ROM content would be returned in this case + */ + DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n", + __FUNCTION__, address)); + bcmerror = BCME_ERROR; + break; + } + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size); + + break; + } + +#ifdef BCM_BUZZZ + /* Dump dongle side buzzz trace to console */ + case IOV_GVAL(IOV_BUZZZ_DUMP): + bcmerror = dhd_buzzz_dump_dngl(bus); + break; +#endif /* BCM_BUZZZ */ + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdpcie_bus_download_state(bus, bool_val); + break; + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_CC_NVMSHADOW): + { + struct bcmstrbuf dump_b; + + bcm_binit(&dump_b, arg, len); + bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b); + break; + } + + case IOV_GVAL(IOV_SLEEP_ALLOWED): + bool_val = bus->sleep_allowed; + bcopy(&bool_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SLEEP_ALLOWED): + bus->sleep_allowed = bool_val; + break; + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD): + int_val = bus->ltrsleep_on_unload; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD): + bus->ltrsleep_on_unload = bool_val; + break; + + case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK): + { + struct bcmstrbuf dump_b; + bcm_binit(&dump_b, arg, len); + bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b); + break; + } + case IOV_GVAL(IOV_DMA_RINGINDICES): + { int h2d_support, d2h_support; + + d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0; + h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0; + int_val = d2h_support | (h2d_support << 1); + bcopy(&int_val, arg, sizeof(int_val)); + break; + } + case IOV_SVAL(IOV_DMA_RINGINDICES): + /* Can change it only during initialization/FW download */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + if ((int_val > 3) || (int_val < 0)) { + DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n")); + bcmerror = BCME_BADARG; + } else { + bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE; + bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE; + } + } else { + DHD_ERROR(("%s: Can change only when bus down (before FW download)\n", + __FUNCTION__)); + bcmerror = BCME_NOTDOWN; + } + break; + + case IOV_GVAL(IOV_METADATA_DBG): + int_val = dhd_prot_metadata_dbg_get(bus->dhd); + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_METADATA_DBG): + dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0)); + break; + + case IOV_GVAL(IOV_RX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RX_METADATALEN): + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE); + break; + + case IOV_SVAL(IOV_TXP_THRESHOLD): + dhd_prot_txp_threshold(bus->dhd, TRUE, int_val); + break; + + case IOV_GVAL(IOV_TXP_THRESHOLD): + int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DB1_FOR_MB): + if (int_val) + bus->db1_for_mb = TRUE; + else + bus->db1_for_mb = FALSE; + break; + + case IOV_GVAL(IOV_DB1_FOR_MB): + if (bus->db1_for_mb) + int_val = 1; + else + int_val = 0; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TX_METADATALEN): + int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TX_METADATALEN): + if (int_val > 64) { + bcmerror = BCME_BUFTOOLONG; + break; + } + dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE); + break; + + case IOV_SVAL(IOV_DEVRESET): + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + break; + + case IOV_GVAL(IOV_FLOW_PRIO_MAP): + int_val = bus->dhd->flow_prio_map_type; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FLOW_PRIO_MAP): + int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val); + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_PCIE_RUNTIMEPM + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; +#endif /* DHD_PCIE_RUNTIMEPM */ + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", + __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + return bcmerror; +} /* dhdpcie_bus_doiovar */ + +/** Transfers bytes from host to dongle using pio mode */ +static int +dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return 0; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return 0; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + return 0; + } + dhdmsgbuf_lpbk_req(bus->dhd, len); + return 0; +} + +int +dhdpcie_bus_suspend(struct dhd_bus *bus, bool state) +{ + int timeleft; + unsigned long flags; + int rc = 0; + + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + DHD_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return BCME_ERROR; + } + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->dongle_reset) { + DHD_ERROR(("Dongle is in reset state.\n")); + return -EIO; + } + + if (bus->suspended == state) { /* Set to same state */ + DHD_ERROR(("Bus is already in SUSPEND state.\n")); + return BCME_OK; + } + + if (state) { + int idle_retry = 0; + int active; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down, state=%d\n", + __FUNCTION__, state)); + return BCME_ERROR; + } + + /* Suspend */ + DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__)); + bus->wait_for_d3_ack = 0; + bus->suspended = TRUE; + + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + bus->dhd->busstate = DHD_BUS_SUSPEND; + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) { + DHD_ERROR(("Tx Request is not ended\n")); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + bus->suspended = FALSE; + return -EBUSY; + } + + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT); + dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM); + timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack); + dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + + { + uint32 d2h_mb_data = 0; + uint32 zero = 0; + + /* If wait_for_d3_ack was not updated because D2H MB was not received */ + if (bus->wait_for_d3_ack == 0) { + /* Read the Mb data to see if the Dongle has actually sent D3 ACK */ + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + + if (d2h_mb_data & D2H_DEV_D3_ACK) { + DHD_ERROR(("*** D3 WAR for missing interrupt ***\r\n")); + /* Clear the MB Data */ + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), + D2H_MB_DATA, 0); + + /* Consider that D3 ACK is received */ + bus->wait_for_d3_ack = 1; + bus->d3_ack_war_cnt++; + + } /* d2h_mb_data & D2H_DEV_D3_ACK */ + } /* bus->wait_for_d3_ack was 0 */ + } + + /* To allow threads that got pre-empted to complete. + */ + while ((active = dhd_os_check_wakelock_all(bus->dhd)) && + (idle_retry < MAX_WKLK_IDLE_CHECK)) { + msleep(1); + idle_retry++; + } + + if (bus->wait_for_d3_ack) { + DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__)); + /* Got D3 Ack. Suspend the bus */ + if (active) { + DHD_ERROR(("%s():Suspend failed because of wakelock restoring " + "Dongle to D0\n", __FUNCTION__)); + + /* + * Dongle still thinks that it has to be in D3 state + * until gets a D0 Inform, but we are backing off from suspend. + * Ensure that Dongle is brought back to D0. + * + * Bringing back Dongle from D3 Ack state to D0 state + * is a 2 step process. Dongle would want to know that D0 Inform + * would be sent as a MB interrupt + * to bring it out of D3 Ack state to D0 state. + * So we have to send both this message. + */ + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, + (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + rc = BCME_ERROR; + } else { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + dhdpcie_bus_intr_disable(bus); + rc = dhdpcie_pci_suspend_resume(bus, state); + dhd_bus_set_device_wake(bus, FALSE); + } + bus->dhd->d3ackcnt_timeout = 0; +#if defined(BCMPCIE_OOB_HOST_WAKE) + dhdpcie_oob_intr_set(bus, TRUE); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + } else if (timeleft == 0) { + bus->dhd->d3ackcnt_timeout++; + DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n", + __FUNCTION__, bus->dhd->d3ackcnt_timeout)); + dhd_prot_debug_info_print(bus->dhd); +#ifdef DHD_FW_COREDUMP + if (bus->dhd->memdump_enabled) { + /* write core dump to file */ + bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT; + dhdpcie_mem_dump(bus); + } +#endif /* DHD_FW_COREDUMP */ + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) { + DHD_ERROR(("%s: Event HANG send up " + "due to PCIe linkdown\n", __FUNCTION__)); +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT); + } + rc = -ETIMEDOUT; + + } + + bus->wait_for_d3_ack = 1; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND; + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } else { + /* Resume */ +#if defined(BCMPCIE_OOB_HOST_WAKE) + DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + rc = dhdpcie_pci_suspend_resume(bus, state); + if (bus->dhd->busstate == DHD_BUS_SUSPEND) { + DHD_OS_WAKE_LOCK_WAIVE(bus->dhd); + dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM)); + DHD_OS_WAKE_LOCK_RESTORE(bus->dhd); + dhd_bus_set_device_wake(bus, TRUE); + } + bus->suspended = FALSE; + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->busstate = DHD_BUS_DATA; + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME; +#ifdef DHD_PCIE_RUNTIMEPM + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + bus->bus_wake = 1; + OSL_SMP_WMB(); + wake_up_interruptible(&bus->rpm_queue); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + /* resume all interface network queue. */ + dhd_bus_start_queue(bus); + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + dhdpcie_bus_intr_enable(bus); + } + return rc; +} + +/** Transfers bytes from host to dongle and to host again using DMA */ +static int +dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay) +{ + if (bus->dhd == NULL) { + DHD_ERROR(("bus not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->prot == NULL) { + DHD_ERROR(("prot is not inited\n")); + return BCME_ERROR; + } + if (bus->dhd->busstate != DHD_BUS_DATA) { + DHD_ERROR(("not in a readystate to LPBK is not inited\n")); + return BCME_ERROR; + } + + if (len < 5 || len > 4194296) { + DHD_ERROR(("len is too small or too large\n")); + return BCME_ERROR; + } + return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay); +} + + + +static int +dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter) +{ + int bcmerror = 0; + uint32 *cr4_regs; + + if (!bus->sih) { + DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__)); + return BCME_ERROR; + } + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */ + cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + + if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } else if (cr4_regs == NULL) { /* no CR4 present on chip */ + si_core_disable(bus->sih, 0); + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + if (BCM43602_CHIP(bus->sih->chip)) { + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7); + W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0); + } + /* reset last 4 bytes of RAM address. to be used for shared area */ + dhdpcie_init_shared_addr(bus); + } + } else { + if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) { + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + /* now remove reset and halt and continue to run CA7 */ + } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + if (BCM43602_CHIP(bus->sih->chip)) { + /* Firmware crashes on SOCSRAM access when core is in reset */ + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", + __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + si_core_reset(bus->sih, 0, 0); + si_setcore(bus->sih, ARMCR4_CORE_ID, 0); + } + + /* write vars */ + if ((bcmerror = dhdpcie_bus_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + /* write address 0 with reset instruction */ + bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to PCIE core */ + si_setcore(bus->sih, PCIE2_CORE_ID, 0); + + return bcmerror; +} /* dhdpcie_bus_download_state */ + +static int +dhdpcie_bus_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + /* Write the vars list */ + DHD_INFO_HW4(("%s: tcm: %p varaddr: 0x%x varsize: %d\n", + __FUNCTION__, bus->tcm, varaddr, varsize)); + bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize); + + /* Implement read back and verify later */ +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) + return BCME_NOMEM; + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + phys_size, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + bus->nvram_csm = varsizew; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + bus->nvram_csm = varsizew; + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + DHD_INFO_HW4(("%s: tcm: %p phys_size: 0x%x varsizew: %x\n", + __FUNCTION__, bus->tcm, phys_size, varsizew)); + bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} /* dhdpcie_bus_write_vars */ + +int +dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); + + +err: + return bcmerror; +} + +#ifndef BCMPCIE_OOB_HOST_WAKE +/* loop through the capability list and see if the pcie capabilty exists */ +uint8 +dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id) +{ + uint8 cap_id; + uint8 cap_ptr = 0; + uint8 byte_val; + + /* check for Header type 0 */ + byte_val = read_pci_cfg_byte(PCI_CFG_HDR); + if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) { + DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__)); + goto end; + } + + /* check if the capability pointer field exists */ + byte_val = read_pci_cfg_byte(PCI_CFG_STAT); + if (!(byte_val & PCI_CAPPTR_PRESENT)) { + DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__)); + goto end; + } + + cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR); + /* check if the capability pointer is 0x00 */ + if (cap_ptr == 0x00) { + DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__)); + goto end; + } + + /* loop thr'u the capability list and see if the pcie capabilty exists */ + + cap_id = read_pci_cfg_byte(cap_ptr); + + while (cap_id != req_cap_id) { + cap_ptr = read_pci_cfg_byte((cap_ptr + 1)); + if (cap_ptr == 0x00) break; + cap_id = read_pci_cfg_byte(cap_ptr); + } + +end: + return cap_ptr; +} + +void +dhdpcie_pme_active(osl_t *osh, bool enable) +{ + uint8 cap_ptr; + uint32 pme_csr; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return; + } + + pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32)); + DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr)); + + pme_csr |= PME_CSR_PME_STAT; + if (enable) { + pme_csr |= PME_CSR_PME_EN; + } else { + pme_csr &= ~PME_CSR_PME_EN; + } + + OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr); +} + +bool +dhdpcie_pme_cap(osl_t *osh) +{ + uint8 cap_ptr; + uint32 pme_cap; + + cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID); + + if (!cap_ptr) { + DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__)); + return FALSE; + } + + pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32)); + + DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap)); + + return ((pme_cap & PME_CAP_PM_STATES) != 0); +} +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + +void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) +{ + uint32 intstatus = 0; + uint32 intmask = 0; + uint32 mbintstatus = 0; + uint32 d2h_mb_data = 0; + + intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); + intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); + mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0); + dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); + + bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n", + intstatus, intmask, mbintstatus); + bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n", + d2h_mb_data, dhd->bus->def_intmask); +} + +/** Add bus dump output to a buffer */ +void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + uint16 flowid; + int ix = 0; + flow_ring_node_t *flow_ring_node; + flow_info_t *flow_info; + char eabuf[ETHER_ADDR_STR_LEN]; + + if (dhdp->busstate != DHD_BUS_DATA) + return; + + dhd_prot_print_info(dhdp, strbuf); + dhd_dump_intr_registers(dhdp, strbuf); + bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n", + dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr); + bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr)); + bcm_bprintf(strbuf, + "%s %4s %2s %4s %17s %4s %4s %10s %4s %4s ", + "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", + "Overflows", "RD", "WR"); + bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack"); + + for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) { + flow_ring_node = DHD_FLOW_RING(dhdp, flowid); + if (flow_ring_node->active) { + flow_info = &flow_ring_node->flow_info; + bcm_bprintf(strbuf, + "%3d. %4d %2d %4d %17s %4d %4d %10u ", ix++, + flow_ring_node->flowid, flow_info->ifindex, flow_info->tid, + bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf), + DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue), + DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)), + DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue)); + dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf, + "%4d %4d "); + bcm_bprintf(strbuf, + "%5s %6s %5s\n", "NA", "NA", "NA"); + } + } + bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt); + bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt); + bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt); + bcm_bprintf(strbuf, "D3 Ack WAR cnt %d\n", dhdp->bus->d3_ack_war_cnt); +} + +/** + * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their + * flow queue to their flow ring. + */ +static void +dhd_update_txflowrings(dhd_pub_t *dhd) +{ + unsigned long flags; + dll_t *item, *next; + flow_ring_node_t *flow_ring_node; + struct dhd_bus *bus = dhd->bus; + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + for (item = dll_head_p(&bus->const_flowring); + (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item)); + item = next) { + if (dhd->hang_was_sent) { + break; + } + + next = dll_next_p(item); + flow_ring_node = dhd_constlist_to_flowring(item); + + /* Ensure that flow_ring_node in the list is Not Null */ + ASSERT(flow_ring_node != NULL); + + /* Ensure that the flowring node has valid contents */ + ASSERT(flow_ring_node->prot_info != NULL); + + dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info); + } + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); +} + +/** Mailbox ringbell Function */ +static void +dhd_bus_gen_devmb_intr(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + DHD_ERROR(("mailbox communication not supported\n")); + return; + } + if (bus->db1_for_mb) { + /* this is a pcie core register, not the config register */ + DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n")); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678); + } else { + DHD_INFO(("writing a mail box interrupt to the device, through config space\n")); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0)); + } +} + +static void +dhd_bus_set_device_wake(struct dhd_bus *bus, bool val) +{ + if (bus->device_wake_state != val) + { + DHD_INFO(("Set Device_Wake to %d\n", val)); +#ifdef PCIE_OOB + if (bus->oob_enabled) + { + if (val) + { + gpio_port = gpio_port | (1 << DEVICE_WAKE); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE)); + gpio_write_port_non_block(gpio_handle_val, gpio_port); + } + } +#endif /* PCIE_OOB */ + bus->device_wake_state = val; + } +} + +#ifdef PCIE_OOB +void +dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val) +{ + DHD_INFO(("Set Device_Wake to %d\n", val)); + if (val) + { + gpio_port = gpio_port | (1 << BIT_BT_REG_ON); + gpio_write_port(gpio_handle_val, gpio_port); + } else { + gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON)); + gpio_write_port(gpio_handle_val, gpio_port); + } +} + +int +dhd_oob_get_bt_reg_on(struct dhd_bus *bus) +{ + int ret; + uint8 val; + ret = gpio_read_port(gpio_handle_val, &val); + + if (ret < 0) { + DHD_ERROR(("gpio_read_port returns %d\n", ret)); + return ret; + } + + if (val & (1 << BIT_BT_REG_ON)) + { + ret = 1; + } else { + ret = 0; + } + + return ret; +} + +static void +dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus) +{ + if (dhd_doorbell_timeout) + dhd_timeout_start(&bus->doorbell_timer, + (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms); + else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) + dhd_bus_set_device_wake(bus, FALSE); +} +#endif /* PCIE_OOB */ + +/** mailbox doorbell ring function */ +void +dhd_bus_ringbell(struct dhd_bus *bus, uint32 value) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB); + } else { + /* this is a pcie core register, not the config regsiter */ + DHD_INFO(("writing a door bell to the device\n")); + si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678); + } +} + +void +dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value) +{ +#ifdef PCIE_OOB + dhd_bus_set_device_wake(bus, TRUE); + dhd_bus_doorbell_timeout_reset(bus); +#endif + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value); +} + +static void +dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value) +{ + uint32 w; + w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB; + W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w); +} + +dhd_mb_ring_t +dhd_bus_get_mbintr_fn(struct dhd_bus *bus) +{ + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + PCIMailBoxInt); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhd_bus_ringbell_oldpcie; + } + } else { + bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx, + PCIH2D_MailBox); + if (bus->pcie_mb_intr_addr) { + bus->pcie_mb_intr_osh = si_osh(bus->sih); + return dhdpcie_bus_ringbell_fast; + } + } + return dhd_bus_ringbell; +} + +bool BCMFASTPATH +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched = FALSE; /* Flag indicating resched wanted */ + unsigned long flags; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + DHD_GENERAL_LOCK(bus->dhd, flags); + /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS + * to avoid IOCTL Resumed On timeout when ioctl is waiting for response + * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS + * and if we return from here, then IOCTL response will never be handled + */ + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + return 0; + } + bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC; + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus); + if (!resched) { + bus->intstatus = 0; + if (!bus->pci_d3hot_done) { + dhdpcie_bus_intr_enable(bus); + } else { + DHD_ERROR(("%s: dhdpcie_bus_intr_enable skip in pci D3hot state \n", + __FUNCTION__)); + } + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC; + dhd_os_busbusy_wake(bus->dhd); + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return resched; + +} + + +static void +dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data) +{ + uint32 cur_h2d_mb_data = 0; + + DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data)); + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return; + } + + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + + if (cur_h2d_mb_data != 0) { + uint32 i = 0; + DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data)); + while ((i++ < 100) && cur_h2d_mb_data) { + OSL_DELAY(10); + dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0); + } + if (i >= 100) { + DHD_ERROR(("%s : waited 1ms for the dngl " + "to ack the previous mb transaction\n", __FUNCTION__)); + DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n", + __FUNCTION__, cur_h2d_mb_data)); + } + } + + dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0); + dhd_bus_gen_devmb_intr(bus); + + if (h2d_mb_data == H2D_HOST_D3_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__)); + bus->d3_inform_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__)); + bus->d0_inform_in_use_cnt++; + } + if (h2d_mb_data == H2D_HOST_D0_INFORM) { + DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__)); + bus->d0_inform_cnt++; + } +} + +static void +dhdpcie_handle_mb_data(dhd_bus_t *bus) +{ + uint32 d2h_mb_data = 0; + uint32 zero = 0; + dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0); + if (!d2h_mb_data) { + DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n", + __FUNCTION__, d2h_mb_data)); + return; + } + + dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0); + + DHD_INFO_HW4(("D2H_MB_DATA: 0x%08x\n", d2h_mb_data)); + if (d2h_mb_data & D2H_DEV_FWHALT) { + DHD_ERROR(("FW trap has happened\n")); + dhdpcie_checkdied(bus, NULL, 0); + /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */ + bus->dhd->busstate = DHD_BUS_DOWN; + return; + } + if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n")); + dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK); + DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n")); + } + if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) { + /* what should we do */ + DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n")); + } + if (d2h_mb_data & D2H_DEV_D3_ACK) { + /* what should we do */ + DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n")); + if (!bus->wait_for_d3_ack) { + bus->wait_for_d3_ack = 1; + dhd_os_d3ack_wake(bus->dhd); + } + } +} + +/* Inform Dongle to print HW Registers for Livelock Debug */ +void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus) +{ + dhdpcie_send_mb_data(bus, H2D_FW_TRAP); +} + +static bool +dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus) +{ + bool resched = FALSE; + + if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) || + (bus->sih->buscorerev == 4)) { + /* Msg stream interrupt */ + if (intstatus & I_BIT1) { + resched = dhdpci_bus_read_frames(bus); + } else if (intstatus & I_BIT0) { + /* do nothing for Now */ + } + } else { + if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1)) + dhdpcie_handle_mb_data(bus); + + if (bus->dhd->busstate == DHD_BUS_SUSPEND) { + goto exit; + } + + if (intstatus & PCIE_MB_D2H_MB_MASK) { + resched = dhdpci_bus_read_frames(bus); + } + } + +exit: + return resched; +} + +static bool +dhdpci_bus_read_frames(dhd_bus_t *bus) +{ + bool more = FALSE; + + /* There may be frames in both ctrl buf and data buf; check ctrl buf first */ + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + dhd_prot_process_ctrlbuf(bus->dhd); + /* Unlock to give chance for resp to be handled */ + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + + DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + /* update the flow ring cpls */ + dhd_update_txflowrings(bus->dhd); + + /* With heavy TX traffic, we could get a lot of TxStatus + * so add bound + */ + more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound); + + /* With heavy RX traffic, this routine potentially could spend some time + * processing RX frames without RX bound + */ + more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound); + + /* don't talk to the dongle if fw is about to be reloaded */ + if (bus->dhd->hang_was_sent) { + more = FALSE; + } + DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT)); + + return more; +} + +bool +dhdpcie_tcm_valid(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv; + uint32 shaddr = 0; + pciedev_shared_t sh; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n", + __FUNCTION__, addr)); + return FALSE; + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return FALSE; + } + + /* Compare any field in pciedev_shared_t */ + if (sh.console_addr != bus->pcie_sh->console_addr) { + DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n")); + return FALSE; + } + + return TRUE; +} + +static bool +dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version) +{ + DHD_INFO(("firmware api revision %d, host api revision %d\n", + firmware_api_version, host_api_version)); + if (firmware_api_version <= host_api_version) + return TRUE; + if ((firmware_api_version == 6) && (host_api_version == 5)) + return TRUE; + if ((firmware_api_version == 5) && (host_api_version == 6)) + return TRUE; + return FALSE; +} + +static int +dhdpcie_readshared(dhd_bus_t *bus) +{ + uint32 addr = 0; + int rv, dma_indx_wr_buf, dma_indx_rd_buf; + uint32 shaddr = 0; + pciedev_shared_t *sh = bus->pcie_sh; + dhd_timeout_t tmo; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + + DHD_INFO_HW4(("%s: ram_base: 0x%x ramsize 0x%x tcm: %p shaddr: 0x%x nvram_csm: 0x%x\n", + __FUNCTION__, bus->dongle_ram_base, bus->ramsize, + bus->tcm, shaddr, bus->nvram_csm)); + /* start a timer for 5 seconds */ + dhd_timeout_start(&tmo, MAX_READ_TIMEOUT); + + while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) { + /* Read last word in memory to determine address of pciedev_shared structure */ + addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr)); + } + + if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) || + (addr > shaddr)) { + DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n", + __FUNCTION__, addr)); + DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed)); + return BCME_ERROR; + } else { + bus->shared_addr = (ulong)addr; + DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec " + "before dongle is ready\n", addr, tmo.elapsed)); + } + + /* Read hndrte_shared structure */ + if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh, + sizeof(pciedev_shared_t))) < 0) { + DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv)); + return rv; + } + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + sh->dma_rxoffset = ltoh32(sh->dma_rxoffset); + sh->rings_info_ptr = ltoh32(sh->rings_info_ptr); + +#ifdef DHD_DEBUG + /* load bus console address */ + bus->console_addr = sh->console_addr; +#endif + + /* Read the dma rx offset */ + bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset; + dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset); + + DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset)); + + if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK, + PCIE_SHARED_VERSION))) + { + DHD_ERROR(("%s: pcie_shared version %d in dhd " + "is older than pciedev_shared version %d in dongle\n", + __FUNCTION__, PCIE_SHARED_VERSION, + sh->flags & PCIE_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ? + sizeof(uint16) : sizeof(uint32); + DHD_ERROR(("%s: Dongle advertizes %d size indices\n", + __FUNCTION__, bus->rw_index_sz)); + + /* Does the FW support DMA'ing r/w indices */ + if (sh->flags & PCIE_SHARED_DMA_INDEX) { + + + DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n", + __FUNCTION__, + (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0), + (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0))); + + } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) || + DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { + +#ifdef BCM_INDX_DMA + DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n", + __FUNCTION__)); + return BCME_ERROR; +#endif + DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n", + __FUNCTION__)); + bus->dhd->dma_d2h_ring_upd_support = FALSE; + bus->dhd->dma_h2d_ring_upd_support = FALSE; + } + + + /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */ + { + ring_info_t ring_info; + + if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr, + (uint8 *)&ring_info, sizeof(ring_info_t))) < 0) + return rv; + + bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr); + bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr); + + + bus->max_sub_queues = ltoh16(ring_info.max_sub_queues); + + /* If both FW and Host support DMA'ing indices, allocate memory and notify FW + * The max_sub_queues is read from FW initialized ring_info + */ + if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_WR_BUF, bus->max_sub_queues); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_h2d_ring_upd_support = FALSE; + } + } + + if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) { + dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS); + dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz, + H2D_DMA_INDX_RD_BUF, bus->max_sub_queues); + + if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) { + DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices" + "Host will use w/r indices in TCM\n", + __FUNCTION__)); + bus->dhd->dma_d2h_ring_upd_support = FALSE; + } + } + + /* read ringmem and ringstate ptrs from shared area and store in host variables */ + dhd_fillup_ring_sharedptr_info(bus, &ring_info); + + bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t)); + DHD_INFO(("ring_info\n")); + + DHD_ERROR(("%s: max H2D queues %d\n", + __FUNCTION__, ltoh16(ring_info.max_sub_queues))); + + DHD_INFO(("mail box address\n")); + DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->h2d_mb_data_ptr_addr)); + DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", + __FUNCTION__, bus->d2h_mb_data_ptr_addr)); + } + + bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK; + DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", + __FUNCTION__, bus->dhd->d2h_sync_mode)); + + return BCME_OK; +} /* dhdpcie_readshared */ + +/** Read ring mem and ring state ptr info from shared memory area in device memory */ +static void +dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info) +{ + uint16 i = 0; + uint16 j = 0; + uint32 tcm_memloc; + uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr; + + /* Ring mem ptr info */ + /* Alloated in the order + H2D_MSGRING_CONTROL_SUBMIT 0 + H2D_MSGRING_RXPOST_SUBMIT 1 + D2H_MSGRING_CONTROL_COMPLETE 2 + D2H_MSGRING_TX_COMPLETE 3 + D2H_MSGRING_RX_COMPLETE 4 + */ + + { + /* ringmemptr holds start of the mem block address space */ + tcm_memloc = ltoh32(ring_info->ringmem_ptr); + + /* Find out ringmem ptr for each ring common ring */ + for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) { + bus->ring_sh[i].ring_mem_addr = tcm_memloc; + /* Update mem block */ + tcm_memloc = tcm_memloc + sizeof(ring_mem_t); + DHD_INFO(("ring id %d ring mem addr 0x%04x \n", + i, bus->ring_sh[i].ring_mem_addr)); + } + } + + /* Ring state mem ptr info */ + { + d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr); + d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr); + h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr); + h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr); + + /* Store h2d common ring write/read pointers */ + for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store d2h common ring write/read pointers */ + for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) { + bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr; + bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr; + + /* update mem block */ + d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz; + d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r)); + } + + /* Store txflow ring write/read pointers */ + for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS); + i++, j++) + { + bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr; + bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr; + + /* update mem block */ + h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz; + h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz; + + DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i, + bus->ring_sh[i].ring_state_w, + bus->ring_sh[i].ring_state_r)); + } + } +} /* dhd_fillup_ring_sharedptr_info */ + +/** + * Initialize bus module: prepare for communication with the dongle. Called after downloading + * firmware into the dongle. + */ +int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + int ret = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* before opening up bus for data transfer, check if shared are is intact */ + ret = dhdpcie_readshared(bus); + if (ret < 0) { + DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__)); + return ret; + } + + /* Make sure we're talking to the core. */ + bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0); + ASSERT(bus->reg != NULL); + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + if (!dhd_download_fw_on_driverload) + dhd_dpc_enable(bus->dhd); + + /* Enable the interrupt after device is up */ + dhdpcie_bus_intr_enable(bus); + + /* bcmsdh_intr_unmask(bus->sdh); */ + +#ifdef DHD_PCIE_RUNTIMEPM + bus->idlecount = 0; + bus->idletime = (int32)MAX_IDLE_COUNT; + init_waitqueue_head(&bus->rpm_queue); + mutex_init(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + + bus->d3_ack_war_cnt = 0; + + return ret; +} + +static void +dhdpcie_init_shared_addr(dhd_bus_t *bus) +{ + uint32 addr = 0; + uint32 val = 0; + addr = bus->dongle_ram_base + bus->ramsize - 4; +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0)); +#endif /* DHD_PCIE_RUNTIMEPM */ + DHD_INFO_HW4(("%s: tcm: %p, addr: 0x%x val: 0x%x\n", __FUNCTION__, bus->tcm, addr, val)); + dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val)); +} + + +bool +dhdpcie_chipmatch(uint16 vendor, uint16 device) +{ + if (vendor != PCI_VENDOR_ID_BROADCOM) { + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, + vendor, device)); + return (-ENODEV); + } + + if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) || + (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) || + (device == BCM43569_CHIP_ID)) + return 0; + + if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) || + (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) + return 0; + + if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) || + (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) + return 0; + + if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) || + (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) + return 0; + + if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) || + (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) + return 0; + + if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) || + (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) + return 0; + + if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) || + (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) + return 0; + + if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) || + (device == BCM4358_D11AC5G_ID)) + return 0; + + if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) || + (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) + return 0; + + if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) || + (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) + return 0; + + if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) || + (device == BCM4359_D11AC5G_ID)) + return 0; + + if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) || + (device == BCM43596_D11AC5G_ID)) + return 0; + + + if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) || + (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) + return 0; + + if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) || + (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID)) + return 0; + + DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device)); + return (-ENODEV); +} /* dhdpcie_chipmatch */ + +/** + * Name: dhdpcie_cc_nvmshadow + * + * Description: + * A shadow of OTP/SPROM exists in ChipCommon Region + * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF). + * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size + * can also be read from ChipCommon Registers. + */ +static int +dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b) +{ + uint16 dump_offset = 0; + uint32 dump_size = 0, otp_size = 0, sprom_size = 0; + + /* Table for 65nm OTP Size (in bits) */ + int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024}; + + volatile uint16 *nvm_shadow; + + uint cur_coreid; + uint chipc_corerev; + chipcregs_t *chipcregs; + + /* Save the current core */ + cur_coreid = si_coreid(bus->sih); + /* Switch to ChipC */ + chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0); + ASSERT(chipcregs != NULL); + + chipc_corerev = si_corerev(bus->sih); + + /* Check ChipcommonCore Rev */ + if (chipc_corerev < 44) { + DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev)); + return BCME_UNSUPPORTED; + } + + /* Check ChipID */ + if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n", + __FUNCTION__)); + return BCME_UNSUPPORTED; + } + + /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */ + if (chipcregs->sromcontrol & SRC_PRESENT) { + /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */ + sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK) + >> SRC_SIZE_SHIFT))) * 1024; + bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size); + } + + if (chipcregs->sromcontrol & SRC_OTPPRESENT) { + bcm_bprintf(b, "\nOTP Present"); + + if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT) + == OTPL_WRAP_TYPE_40NM) { + /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */ + otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + } else { + /* This part is untested since newer chips have 40nm OTP */ + otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE) + >> CC_CAP_OTPSIZE_SHIFT]; + bcm_bprintf(b, "(Size %d bits)\n", otp_size); + DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n", + __FUNCTION__)); + } + } + + if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) && + ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) { + DHD_ERROR(("%s: SPROM and OTP could not be found \n", + __FUNCTION__)); + return BCME_NOTFOUND; + } + + /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */ + if ((chipcregs->sromcontrol & SRC_OTPSEL) && + (chipcregs->sromcontrol & SRC_OTPPRESENT)) { + + bcm_bprintf(b, "OTP Strap selected.\n" + "\nOTP Shadow in ChipCommon:\n"); + + dump_size = otp_size / 16 ; /* 16bit words */ + + } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) && + (chipcregs->sromcontrol & SRC_PRESENT)) { + + bcm_bprintf(b, "SPROM Strap selected\n" + "\nSPROM Shadow in ChipCommon:\n"); + + /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */ + /* dump_size in 16bit words */ + dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16; + } else { + DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n", + __FUNCTION__)); + return BCME_NOTFOUND; + } + + if (bus->regs == NULL) { + DHD_ERROR(("ChipCommon Regs. not initialized\n")); + return BCME_NOTREADY; + } else { + bcm_bprintf(b, "\n OffSet:"); + + /* Point to the SPROM/OTP shadow in ChipCommon */ + nvm_shadow = chipcregs->sromotp; + + /* + * Read 16 bits / iteration. + * dump_size & dump_offset in 16-bit words + */ + while (dump_offset < dump_size) { + if (dump_offset % 2 == 0) + /* Print the offset in the shadow space in Bytes */ + bcm_bprintf(b, "\n 0x%04x", dump_offset * 2); + + bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset)); + dump_offset += 0x1; + } + } + + /* Switch back to the original core */ + si_setcore(bus->sih, cur_coreid, 0); + + return BCME_OK; +} /* dhdpcie_cc_nvmshadow */ + +/** Flow rings are dynamically created and destroyed */ +void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node; + unsigned long flags; + + queue = &flow_ring_node->queue; + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* clean up BUS level info */ + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + flow_ring_node->status = FLOW_RING_STATUS_CLOSED; + flow_ring_node->active = FALSE; + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_delete(&flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + /* Release the flowring object back into the pool */ + dhd_prot_flowrings_pool_release(bus->dhd, + flow_ring_node->flowid, flow_ring_node->prot_info); + + /* Free the flowid back to the flowid allocator */ + dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex, + flow_ring_node->flowid); +} + +/** + * Allocate a Flow ring buffer, + * Init Ring buffer, send Msg to device about flow ring creation +*/ +int +dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg) +{ + flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg; + + DHD_INFO(("%s :Flow create\n", __FUNCTION__)); + + /* Send Msg to device about flow ring creation */ + if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK) + return BCME_NOMEM; + + return BCME_OK; +} + +/** Handle response from dongle on a 'flow ring create' request */ +void +dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status) +{ + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow create Response failure error status = %d \n", + __FUNCTION__, status)); + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + return; + } + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Now add the Flow ring node into the active list + * Note that this code to add the newly created node to the active + * list was living in dhd_flowid_lookup. But note that after + * adding the node to the active list the contents of node is being + * filled in dhd_prot_flow_ring_create. + * If there is a D2H interrupt after the node gets added to the + * active list and before the node gets populated with values + * from the Bottom half dhd_update_txflowrings would be called. + * which will then try to walk through the active flow ring list, + * pickup the nodes and operate on them. Now note that since + * the function dhd_prot_flow_ring_create is not finished yet + * the contents of flow_ring_node can still be NULL leading to + * crashes. Hence the flow_ring_node should be added to the + * active list only after its truely created, which is after + * receiving the create response message from the Host. + */ + + DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags); + dll_prepend(&bus->const_flowring, &flow_ring_node->list); + DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags); + + dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */ + + return; +} + +int +dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg) +{ + void * pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) { + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + DHD_ERROR(("%s :Delete Pending Flow %d\n", + __FUNCTION__, flow_ring_node->flowid)); + return BCME_ERROR; + } + flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING; + + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring deletion */ + dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node); + + return BCME_OK; +} + +void +dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid)); + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow Delete Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + /* Call Flow clean up */ + dhd_bus_clean_flow_ring(bus, flow_ring_node); + + return; + +} + +/** This function is not called. Obsolete ? */ +int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg) +{ + void *pkt; + flow_queue_t *queue; + flow_ring_node_t *flow_ring_node; + unsigned long flags; + + DHD_INFO(("%s :Flow Delete\n", __FUNCTION__)); + + flow_ring_node = (flow_ring_node_t *)arg; + queue = &flow_ring_node->queue; /* queue associated with flow ring */ + + DHD_FLOWRING_LOCK(flow_ring_node->lock, flags); + +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + + /* Flush all pending packets in the queue, if any */ + while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) { + PKTFREE(bus->dhd->osh, pkt, TRUE); + } + ASSERT(DHD_FLOW_QUEUE_EMPTY(queue)); + + DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags); + + /* Send Msg to device about flow ring flush */ + dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node); + + flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING; + return BCME_OK; +} + +void +dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status) +{ + flow_ring_node_t *flow_ring_node; + + if (status != BCME_OK) { + DHD_ERROR(("%s Flow flush Response failure error status = %d \n", + __FUNCTION__, status)); + return; + } + + flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid); + ASSERT(flow_ring_node->flowid == flowid); + + flow_ring_node->status = FLOW_RING_STATUS_OPEN; + return; +} + +uint32 +dhd_bus_max_h2d_queues(struct dhd_bus *bus) +{ + return bus->max_sub_queues; +} + +/* To be symmetric with SDIO */ +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + return; +} + +void +dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) +{ + dhdp->bus->is_linkdown = val; +} + +int +dhdpcie_bus_clock_start(struct dhd_bus *bus) +{ + return dhdpcie_start_host_pcieclock(bus); +} + +int +dhdpcie_bus_clock_stop(struct dhd_bus *bus) +{ + return dhdpcie_stop_host_pcieclock(bus); +} + +int +dhdpcie_bus_disable_device(struct dhd_bus *bus) +{ + return dhdpcie_disable_device(bus); +} + +int +dhdpcie_bus_enable_device(struct dhd_bus *bus) +{ + return dhdpcie_enable_device(bus); +} + +int +dhdpcie_bus_alloc_resource(struct dhd_bus *bus) +{ + return dhdpcie_alloc_resource(bus); +} + +void +dhdpcie_bus_free_resource(struct dhd_bus *bus) +{ + dhdpcie_free_resource(bus); +} + +int +dhd_bus_request_irq(struct dhd_bus *bus) +{ + return dhdpcie_bus_request_irq(bus); +} + +bool +dhdpcie_bus_dongle_attach(struct dhd_bus *bus) +{ + return dhdpcie_dongle_attach(bus); +} + +int +dhd_bus_release_dongle(struct dhd_bus *bus) +{ + bool dongle_isolation; + osl_t *osh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + osh = bus->osh; + ASSERT(osh); + + if (bus->dhd) { + dongle_isolation = bus->dhd->dongle_isolation; + dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE); + } + } + + return 0; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +int +dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + return dhdpcie_oob_intr_register(dhdp->bus); +} + +void +dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ + dhdpcie_oob_intr_unregister(dhdp->bus); +} + +void +dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ + dhdpcie_oob_intr_set(dhdp->bus, enable); +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie.h b/drivers/net/wireless/bcmdhd/dhd_pcie.h new file mode 100644 index 000000000000..511d00e8ce2c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pcie.h @@ -0,0 +1,315 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie.h 607608 2015-12-21 13:14:19Z $ + */ + + +#ifndef dhd_pcie_h +#define dhd_pcie_h + +#include +#include +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#ifdef CONFIG_PCI_MSM +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 +#include +extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg); +extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +#ifdef DHD_PCIE_RUNTIMEPM +#include +#include + +#define DEFAULT_DHD_RUNTIME_MS 100 +#ifndef CUSTOM_DHD_RUNTIME_MS +#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS +#endif /* CUSTOM_DHD_RUNTIME_MS */ + + +#ifndef MAX_IDLE_COUNT +#define MAX_IDLE_COUNT 16 +#endif /* MAX_IDLE_COUNT */ + +#ifndef MAX_RESUME_WAIT +#define MAX_RESUME_WAIT 100 +#endif /* MAX_RESUME_WAIT */ +#endif /* DHD_PCIE_RUNTIMEPM */ + +/* defines */ + +#define PCMSGBUF_HDRLEN 0 +#define DONGLE_REG_MAP_SIZE (32 * 1024) +#define DONGLE_TCM_MAP_SIZE (4096 * 1024) +#define DONGLE_MIN_MEMSIZE (128 *1024) +#ifdef DHD_DEBUG +#define DHD_PCIE_SUCCESS 0 +#define DHD_PCIE_FAILURE 1 +#endif /* DHD_DEBUG */ +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM +#define struct_pcie_notify struct msm_pcie_notify +#define struct_pcie_register_event struct msm_pcie_register_event +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 +#define struct_pcie_notify struct exynos_pcie_notify +#define struct_pcie_register_event struct exynos_pcie_register_event +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + +/* + * Router with 4366 can have 128 stations and 16 BSS, + * hence (128 stations x 4 access categories for ucast) + 16 bc/mc flowrings + */ +#define MAX_DHD_TX_FLOWS 320 + +/* user defined data structures */ +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX (8 * 1024) + +#ifndef MAX_CNTL_D3ACK_TIMEOUT +#define MAX_CNTL_D3ACK_TIMEOUT 2 +#endif /* MAX_CNTL_D3ACK_TIMEOUT */ + +#ifdef DHD_DEBUG + +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; +#endif /* DHD_DEBUG */ +typedef struct ring_sh_info { + uint32 ring_mem_addr; + uint32 ring_state_w; + uint32 ring_state_r; +} ring_sh_info_t; + +typedef struct dhd_bus { + dhd_pub_t *dhd; + struct pci_dev *dev; /* pci device handle */ + dll_t const_flowring; /* constructed list of tx flowring queues */ + + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + sbpcieregs_t *reg; /* Registers for PCIE core */ + + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ +#ifdef CACHE_FW_IMAGES + int processed_nvram_params_len; /* Modified len of NVRAM info */ +#endif + + + struct pktq txq; /* Queue length used for flow-control */ + + bool intr; /* Use interrupts */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + bool alp_only; /* Don't use HT clock (ALP only) */ + + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + uint32 resetinstr; + uint32 dongle_ram_base; + + ulong shared_addr; + pciedev_shared_t *pcie_sh; + bool bus_flowctrl; + uint32 dma_rxoffset; + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + osl_t *osh; + uint32 nvram_csm; /* Nvram checksum */ + uint16 pollrate; + uint16 polltick; + + uint32 *pcie_mb_intr_addr; + void *pcie_mb_intr_osh; + bool sleep_allowed; + + /* version 3 shared struct related info start */ + ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS]; + uint8 h2d_ring_count; + uint8 d2h_ring_count; + uint32 ringmem_ptr; + uint32 ring_state_ptr; + + uint32 d2h_dma_scratch_buffer_mem_addr; + + uint32 h2d_mb_data_ptr_addr; + uint32 d2h_mb_data_ptr_addr; + /* version 3 shared struct related info end */ + + uint32 def_intmask; + bool ltrsleep_on_unload; + uint wait_for_d3_ack; + uint32 max_sub_queues; + uint32 rw_index_sz; + bool db1_for_mb; + bool suspended; + + dhd_timeout_t doorbell_timer; + bool device_wake_state; + bool irq_registered; +#ifdef PCIE_OOB + bool oob_enabled; +#endif /* PCIE_OOB */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ + defined(CONFIG_SOC_EXYNOS8890)) +#ifdef CONFIG_ARCH_MSM + uint8 no_cfg_restore; +#endif /* CONFIG_ARCH_MSM */ + struct_pcie_register_event pcie_event; +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#ifdef DHD_PCIE_RUNTIMEPM + int32 idlecount; /* Activity timeout counter */ + int32 idletime; /* Control for activity timeout */ + int32 bus_wake; /* For wake up the bus */ + bool runtime_resume_done; /* For check runtime suspend end */ + struct mutex pm_lock; /* Synchronize for system PM & runtime PM */ + wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */ +#endif /* DHD_PCIE_RUNTIMEPM */ + uint32 d3_inform_cnt; + uint32 d0_inform_cnt; + uint32 d0_inform_in_use_cnt; + uint8 force_suspend; + uint32 d3_ack_war_cnt; + uint8 is_linkdown; + uint32 pci_d3hot_done; +} dhd_bus_t; + +/* function declarations */ + +extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size); +extern int dhdpcie_bus_register(void); +extern void dhdpcie_bus_unregister(void); +extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device); + +extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, + volatile char *regs, volatile char *tcm, void *pci_dev); +extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size); +extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data); +extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus); +extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus); +extern void dhdpcie_bus_release(struct dhd_bus *bus); +extern int32 dhdpcie_bus_isr(struct dhd_bus *bus); +extern void dhdpcie_free_irq(dhd_bus_t *bus); +extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value); +extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state); +extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state); +extern bool dhdpcie_tcm_valid(dhd_bus_t *bus); +extern void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus); +#ifndef BCMPCIE_OOB_HOST_WAKE +extern void dhdpcie_pme_active(osl_t *osh, bool enable); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ +extern bool dhdpcie_pme_cap(osl_t *osh); +extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus); +extern int dhdpcie_disable_device(dhd_bus_t *bus); +extern int dhdpcie_enable_device(dhd_bus_t *bus); +extern int dhdpcie_alloc_resource(dhd_bus_t *bus); +extern void dhdpcie_free_resource(dhd_bus_t *bus); +extern int dhdpcie_bus_request_irq(struct dhd_bus *bus); +#ifdef BCMPCIE_OOB_HOST_WAKE +extern int dhdpcie_oob_intr_register(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus); +extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable); +#endif /* BCMPCIE_OOB_HOST_WAKE */ +#ifdef PCIE_OOB +extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val); +extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus); +#endif /* PCIE_OOB */ + +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH +#if defined(CONFIG_MACH_UNIVERSAL5433) +#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3 +#define SAMSUNG_PCIE_CH_NUM +#elif defined(CONFIG_MACH_UNIVERSAL7420) +#define SAMSUNG_PCIE_DEVICE_ID 0xa575 +#define SAMSUNG_PCIE_CH_NUM 1 +#elif defined(CONFIG_SOC_EXYNOS8890) +#define SAMSUNG_PCIE_DEVICE_ID 0xa544 +#define SAMSUNG_PCIE_CH_NUM 0 +#else +#error "Not supported platform" +#endif +#ifdef CONFIG_MACH_UNIVERSAL5433 +extern int exynos_pcie_pm_suspend(void); +extern int exynos_pcie_pm_resume(void); +#else +extern int exynos_pcie_pm_suspend(int ch_num); +extern int exynos_pcie_pm_resume(int ch_num); +#endif /* CONFIG_MACH_UNIVERSAL5433 */ +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ + +extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus); +#endif /* dhd_pcie_h */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c new file mode 100644 index 000000000000..e9814d22e559 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pcie_linux.c @@ -0,0 +1,1562 @@ +/* + * Linux DHD Bus Module for PCIE + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pcie_linux.c 610267 2016-01-06 16:03:53Z $ + */ + + +/* include files */ +#include +#include +#include +#include +#include +#include +#include +#if defined(DHD_DEBUG) +#include +#include +#endif /* defined(DHD_DEBUG) */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARCH_MSM +#ifdef CONFIG_PCI_MSM +#include +#else +#include +#endif /* CONFIG_PCI_MSM */ +#endif /* CONFIG_ARCH_MSM */ + +#define PCI_CFG_RETRY 10 +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ + +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) + + +/* user defined data structures */ + +typedef struct dhd_pc_res { + uint32 bar0_size; + void* bar0_addr; + uint32 bar1_size; + void* bar1_addr; +} pci_config_res, *pPci_config_res; + +typedef bool (*dhdpcie_cb_fn_t)(void *); + +typedef struct dhdpcie_info +{ + dhd_bus_t *bus; + osl_t *osh; + struct pci_dev *dev; /* pci device handle */ + volatile char *regs; /* pci device memory va */ + volatile char *tcm; /* pci device memory va */ + uint32 tcm_size; /* pci device memory size */ + struct pcos_info *pcos_info; + uint16 last_intrstatus; /* to cache intrstatus */ + int irq; + char pciname[32]; + struct pci_saved_state* default_state; + struct pci_saved_state* state; +#ifdef BCMPCIE_OOB_HOST_WAKE + void *os_cxt; /* Pointer to per-OS private data */ +#endif /* BCMPCIE_OOB_HOST_WAKE */ +} dhdpcie_info_t; + + +struct pcos_info { + dhdpcie_info_t *pc; + spinlock_t lock; + wait_queue_head_t intr_wait_queue; + struct timer_list tuning_timer; + int tuning_timer_exp; + atomic_t timer_enab; + struct tasklet_struct tuning_tasklet; +}; + +#ifdef BCMPCIE_OOB_HOST_WAKE +typedef struct dhdpcie_os_info { + int oob_irq_num; /* valid when hardware or software oob in use */ + unsigned long oob_irq_flags; /* valid when hardware or software oob in use */ + bool oob_irq_registered; + bool oob_irq_enabled; + bool oob_irq_wake_enabled; + spinlock_t oob_irq_spinlock; + void *dev; /* handle to the underlying device */ +} dhdpcie_os_info_t; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +/* function declarations */ +static int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev); +static int dhdpcie_init(struct pci_dev *pdev); +static irqreturn_t dhdpcie_isr(int irq, void *arg); +/* OS Routine functions for PCI suspend/resume */ + +static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state); +static int dhdpcie_resume_host_dev(dhd_bus_t *bus); +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus); +static int dhdpcie_resume_dev(struct pci_dev *dev); +static int dhdpcie_suspend_dev(struct pci_dev *dev); +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev); +static int dhdpcie_pm_prepare(struct device *dev); +static int dhdpcie_pm_resume(struct device *dev); +static void dhdpcie_pm_complete(struct device *dev); +#else +static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state); +static int dhdpcie_pci_resume(struct pci_dev *dev); +#endif /* DHD_PCIE_RUNTIMEPM */ +static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = { + { vendor: 0x14e4, + device: PCI_ANY_ID, + subvendor: PCI_ANY_ID, + subdevice: PCI_ANY_ID, + class: PCI_CLASS_NETWORK_OTHER << 8, + class_mask: 0xffff00, + driver_data: 0, + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid); + +/* Power Management Hooks */ +#ifdef DHD_PCIE_RUNTIMEPM +static const struct dev_pm_ops dhd_pcie_pm_ops = { + .prepare = dhdpcie_pm_prepare, + .suspend = dhdpcie_pm_suspend, + .resume = dhdpcie_pm_resume, + .complete = dhdpcie_pm_complete, +}; +#endif /* DHD_PCIE_RUNTIMEPM */ + +static struct pci_driver dhdpcie_driver = { + node: {}, + name: "pcieh", + id_table: dhdpcie_pci_devid, + probe: dhdpcie_pci_probe, + remove: dhdpcie_pci_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + save_state: NULL, +#endif +#ifdef DHD_PCIE_RUNTIMEPM + .driver.pm = &dhd_pcie_pm_ops, +#else + suspend: dhdpcie_pci_suspend, + resume: dhdpcie_pci_resume, +#endif /* DHD_PCIE_RUNTIMEPM */ +}; + +int dhdpcie_init_succeeded = FALSE; + +#ifdef DHD_PCIE_RUNTIMEPM +static int dhdpcie_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + return dhdpcie_set_suspend_resume(pdev, TRUE); +} + +static int dhdpcie_pm_prepare(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + DHD_DISABLE_RUNTIME_PM(bus->dhd); + } + + return 0; +} + +static int dhdpcie_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + return dhdpcie_set_suspend_resume(pdev, FALSE); +} + +static void dhdpcie_pm_complete(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + DHD_ENABLE_RUNTIME_PM(bus->dhd); + } + + return; +} +#else +static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state) +{ + BCM_REFERENCE(state); + return dhdpcie_set_suspend_resume(pdev, TRUE); +} + +static int dhdpcie_pci_resume(struct pci_dev *pdev) +{ + return dhdpcie_set_suspend_resume(pdev, FALSE); +} + +#endif /* DHD_PCIE_RUNTIMEPM */ + +static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state) +{ + int ret = 0; + dhdpcie_info_t *pch = pci_get_drvdata(pdev); + dhd_bus_t *bus = NULL; + + if (pch) { + bus = pch->bus; + } + +#ifdef DHD_PCIE_RUNTIMEPM + if (bus && !bus->dhd->dongle_reset) { + /* if wakelock is held during suspend, return failed */ + if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) { + return -EBUSY; + } + + mutex_lock(&bus->pm_lock); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + + /* When firmware is not loaded do the PCI bus */ + /* suspend/resume only */ + if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) && + !bus->dhd->dongle_reset) { + ret = dhdpcie_pci_suspend_resume(bus, state); +#ifdef DHD_PCIE_RUNTIMEPM + mutex_unlock(&bus->pm_lock); +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; + } + + if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)|| + (bus->dhd->busstate == DHD_BUS_DATA)) && + (bus->suspended != state)) { + ret = dhdpcie_bus_suspend(bus, state); + } + +#ifdef DHD_PCIE_RUNTIMEPM + if (bus && !bus->dhd->dongle_reset) { + mutex_unlock(&bus->pm_lock); + } +#endif /* DHD_PCIE_RUNTIMEPM */ + return ret; +} + +static int dhdpcie_suspend_dev(struct pci_dev *dev) +{ + int ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__)); + return BCME_ERROR; + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + bus->pci_d3hot_done = 1; +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_save_state(dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch->state = pci_store_saved_state(dev); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_enable_wake(dev, PCI_D0, TRUE); + if (pci_is_enabled(dev)) { + pci_disable_device(dev); + } + ret = pci_set_power_state(dev, PCI_D3hot); + if (ret) { + DHD_ERROR(("%s: pci_set_power_state error %d\n", + __FUNCTION__, ret)); + } + disable_irq(dev->irq); + return ret; +} + +static int dhdpcie_resume_dev(struct pci_dev *dev) +{ + int err = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch = pci_get_drvdata(dev); + dhd_bus_t *bus = pch->bus; + pci_load_and_free_saved_state(dev, &pch->state); +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + bus->pci_d3hot_done = 0; +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + pci_restore_state(dev); + err = pci_enable_device(dev); + if (err) { + printf("%s:pci_enable_device error %d \n", __FUNCTION__, err); + goto out; + } + pci_set_master(dev); + err = pci_set_power_state(dev, PCI_D0); + if (err) { + printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err); + goto out; + } + +out: + enable_irq(dev->irq); + return err; +} + +static int dhdpcie_resume_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_start_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ + if (bcmerror < 0) { + DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n", + __FUNCTION__, bcmerror)); + bus->is_linkdown = 1; +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + } + + return bcmerror; +} + +static int dhdpcie_suspend_host_dev(dhd_bus_t *bus) +{ + int bcmerror = 0; +#ifdef USE_EXYNOS_PCIE_RC_PMPATCH + struct pci_dev *rc_pci_dev; + rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL); + if (rc_pci_dev) { + pci_save_state(rc_pci_dev); + } + exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM); +#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */ +#ifdef CONFIG_ARCH_MSM + bcmerror = dhdpcie_stop_host_pcieclock(bus); +#endif /* CONFIG_ARCH_MSM */ + return bcmerror; +} + +int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state) +{ + int rc; + + struct pci_dev *dev = bus->dev; + + if (state) { + if (bus->is_linkdown) { + DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__)); + return BCME_ERROR; + } +#ifndef BCMPCIE_OOB_HOST_WAKE + dhdpcie_pme_active(bus->osh, state); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ + rc = dhdpcie_suspend_dev(dev); + if (!rc) { + dhdpcie_suspend_host_dev(bus); + } + } else { + dhdpcie_resume_host_dev(bus); + rc = dhdpcie_resume_dev(dev); +#ifndef BCMPCIE_OOB_HOST_WAKE + dhdpcie_pme_active(bus->osh, state); +#endif /* !BCMPCIE_OOB_HOST_WAKE */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + if (bus->is_linkdown) { + bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL; + dhd_os_send_hang_message(bus->dhd); + } +#endif + } + return rc; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +static int dhdpcie_device_scan(struct device *dev, void *data) +{ + struct pci_dev *pcidev; + int *cnt = data; + + pcidev = container_of(dev, struct pci_dev, dev); + if (pcidev->vendor != 0x14e4) + return 0; + + DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device)); + *cnt += 1; + if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name)) + DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n", + pcidev->device, pcidev->driver->name)); + + return 0; +} +#endif /* LINUX_VERSION >= 2.6.0 */ + +int +dhdpcie_bus_register(void) +{ + int error = 0; + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + if (!(error = pci_module_init(&dhdpcie_driver))) + return 0; + + DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error)); +#else + if (!(error = pci_register_driver(&dhdpcie_driver))) { + bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan); + if (!error) { + DHD_ERROR(("No Broadcom PCI device enumerated!\n")); + } else if (!dhdpcie_init_succeeded) { + DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__)); + } else { + return 0; + } + + pci_unregister_driver(&dhdpcie_driver); + error = BCME_ERROR; + } +#endif /* LINUX_VERSION < 2.6.0 */ + + return error; +} + + +void +dhdpcie_bus_unregister(void) +{ + pci_unregister_driver(&dhdpcie_driver); +} + +int __devinit +dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + + if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) { + DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__)); + return -ENODEV; + } + printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X" + "(good PCI location)\n", pdev->bus->number, + PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device); + + if (dhdpcie_init (pdev)) { + DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__)); + return -ENODEV; + } + +#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND + /* disable async suspend */ + device_disable_async_suspend(&pdev->dev); +#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */ + + DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__)); + return 0; +} + +int +dhdpcie_detach(dhdpcie_info_t *pch) +{ + if (pch) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (!dhd_download_fw_on_driverload) { + pci_load_and_free_saved_state(pch->dev, &pch->default_state); + } +#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + MFREE(pch->osh, pch, sizeof(dhdpcie_info_t)); + } + return 0; +} + + +void __devexit +dhdpcie_pci_remove(struct pci_dev *pdev) +{ + osl_t *osh = NULL; + dhdpcie_info_t *pch = NULL; + dhd_bus_t *bus = NULL; + + DHD_TRACE(("%s Enter\n", __FUNCTION__)); + pch = pci_get_drvdata(pdev); + bus = pch->bus; + osh = pch->osh; + +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus) { +#ifdef CONFIG_ARCH_MSM + msm_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 + exynos_pcie_deregister_event(&bus->pcie_event); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + } +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + dhdpcie_bus_release(bus); + pci_disable_device(pdev); +#ifdef BCMPCIE_OOB_HOST_WAKE + /* pcie os info detach */ + MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t)); +#endif /* BCMPCIE_OOB_HOST_WAKE */ + /* pcie info detach */ + dhdpcie_detach(pch); + /* osl detach */ + osl_detach(osh); + + dhdpcie_init_succeeded = FALSE; + + DHD_TRACE(("%s Exit\n", __FUNCTION__)); + + return; +} + +/* Free Linux irq */ +int +dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info) +{ + dhd_bus_t *bus = dhdpcie_info->bus; + struct pci_dev *pdev = dhdpcie_info->bus->dev; + + if (!bus->irq_registered) { + snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname), + "dhdpcie:%s", pci_name(pdev)); + if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED, + dhdpcie_info->pciname, bus) < 0) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + return -1; + } else { + bus->irq_registered = TRUE; + } + } else { + DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__)); + } + + DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname)); + + + return 0; /* SUCCESS */ +} + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define PRINTF_RESOURCE "0x%016llx" +#else +#define PRINTF_RESOURCE "0x%08x" +#endif + +/* + +Name: osl_pci_get_resource + +Parametrs: + +1: struct pci_dev *pdev -- pci device structure +2: pci_res -- structure containing pci configuration space values + + +Return value: + +int - Status (TRUE or FALSE) + +Description: +Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure. + + */ +int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info) +{ + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + struct pci_dev *pdev = NULL; + pdev = dhdpcie_info->dev; + do { + if (pci_enable_device(pdev)) { + printf("%s: Cannot enable PCI device\n", __FUNCTION__); + break; + } + pci_set_master(pdev); + bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(pdev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + goto err; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); + dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; + + if (!dhdpcie_info->regs || !dhdpcie_info->tcm) { + DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__)); + break; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (!dhd_download_fw_on_driverload) { + /* Backup PCIe configuration so as to use Wi-Fi on/off process + * in case of built in driver + */ + pci_save_state(pdev); + dhdpcie_info->default_state = pci_store_saved_state(pdev); + + if (dhdpcie_info->default_state == NULL) { + DHD_ERROR(("%s pci_store_saved_state returns NULL\n", + __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + REG_UNMAP(dhdpcie_info->tcm); + pci_disable_device(pdev); + break; + } + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; /* SUCCESS */ + } while (0); +err: + return -1; /* FAILURE */ +} + +int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info) +{ + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + do { + /* define it here only!! */ + if (dhdpcie_get_resource (dhdpcie_info)) { + DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__)); + break; + } + DHD_TRACE(("%s:Exit - SUCCESS \n", + __FUNCTION__)); + + return 0; /* SUCCESS */ + + } while (0); + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* FAILURE */ + +} + +#ifdef SUPPORT_LINKDOWN_RECOVERY +#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \ + defined(CONFIG_SOC_EXYNOS8890)) +void dhdpcie_linkdown_cb(struct_pcie_notify *noti) +{ + struct pci_dev *pdev = (struct pci_dev *)noti->user; + dhdpcie_info_t *pch = NULL; + + if (pdev) { + pch = pci_get_drvdata(pdev); + if (pch) { + dhd_bus_t *bus = pch->bus; + if (bus) { + dhd_pub_t *dhd = bus->dhd; + if (dhd) { + DHD_ERROR(("%s: Event HANG send up " + "due to PCIe linkdown\n", + __FUNCTION__)); +#ifdef CONFIG_ARCH_MSM + bus->no_cfg_restore = 1; +#endif /* CONFIG_ARCH_MSM */ + bus->is_linkdown = 1; + DHD_OS_WAKE_LOCK(dhd); + dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN; + dhd_os_send_hang_message(dhd); + } + } + } + } + +} +#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ + +int dhdpcie_init(struct pci_dev *pdev) +{ + + osl_t *osh = NULL; + dhd_bus_t *bus = NULL; + dhdpcie_info_t *dhdpcie_info = NULL; + wifi_adapter_info_t *adapter = NULL; +#ifdef BCMPCIE_OOB_HOST_WAKE + dhdpcie_os_info_t *dhdpcie_osinfo = NULL; +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + do { + /* osl attach */ + if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) { + DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__)); + break; + } + + /* initialize static buffer */ + adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number, + PCI_SLOT(pdev->devfn)); + if (adapter != NULL) + DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name)); + else + DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__)); + osl_static_mem_init(osh, adapter); + + /* Set ACP coherence flag */ + if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT()) + osl_flag_set(osh, OSL_ACP_COHERENCE); + + /* allocate linux spcific pcie structure here */ + if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + break; + } + bzero(dhdpcie_info, sizeof(dhdpcie_info_t)); + dhdpcie_info->osh = osh; + dhdpcie_info->dev = pdev; + +#ifdef BCMPCIE_OOB_HOST_WAKE + /* allocate OS speicific structure */ + dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t)); + if (dhdpcie_osinfo == NULL) { + DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n", + __FUNCTION__)); + break; + } + bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo; + + /* Initialize host wake IRQ */ + spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock); + /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */ + dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter, + &dhdpcie_osinfo->oob_irq_flags); + if (dhdpcie_osinfo->oob_irq_num < 0) { + DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + /* Find the PCI resources, verify the */ + /* vendor and device ID, map BAR regions and irq, update in structures */ + if (dhdpcie_scan_resource(dhdpcie_info)) { + DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__)); + + break; + } + + /* Bus initialization */ + bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev); + if (!bus) { + DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_info->bus = bus; + bus->is_linkdown = 0; + bus->pci_d3hot_done = 0; +#ifdef DONGLE_ENABLE_ISOLATION + bus->dhd->dongle_isolation = TRUE; +#endif /* DONGLE_ENABLE_ISOLATION */ +#ifdef SUPPORT_LINKDOWN_RECOVERY +#ifdef CONFIG_ARCH_MSM + bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY; + msm_pcie_register_event(&bus->pcie_event); + bus->no_cfg_restore = 0; +#endif /* CONFIG_ARCH_MSM */ +#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY +#ifdef CONFIG_SOC_EXYNOS8890 + bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN; + bus->pcie_event.user = pdev; + bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK; + bus->pcie_event.callback = dhdpcie_linkdown_cb; + exynos_pcie_register_event(&bus->pcie_event); +#endif /* CONFIG_SOC_EXYNOS8890 */ +#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */ +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + dhdpcie_bus_intr_disable(bus); + + if (dhdpcie_request_irq(dhdpcie_info)) { + DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__)); + break; + } + } else { + bus->pollrate = 1; + DHD_INFO(("%s: PCIe interrupt function is NOT registered " + "due to polling mode\n", __FUNCTION__)); + } + +#if defined(BCM_REQUEST_FW) + if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) { + DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__)); + } + bus->nv_path = NULL; + bus->fw_path = NULL; +#endif /* BCM_REQUEST_FW */ + + /* set private data for pci_dev */ + pci_set_drvdata(pdev, dhdpcie_info); + + if (dhd_download_fw_on_driverload) { + if (dhd_bus_start(bus->dhd)) { + DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__)); + if (!allow_delay_fwdl) + break; + } + } else { + /* Set ramdom MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } + + /* Attach to the OS network interface */ + DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__)); + if (dhd_register_if(bus->dhd, 0, TRUE)) { + DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__)); + break; + } + + dhdpcie_init_succeeded = TRUE; + + DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__)); + return 0; /* return SUCCESS */ + + } while (0); + /* reverse the initialization in order in case of error */ + + if (bus) + dhdpcie_bus_release(bus); + +#ifdef BCMPCIE_OOB_HOST_WAKE + if (dhdpcie_osinfo) { + MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t)); + } +#endif /* BCMPCIE_OOB_HOST_WAKE */ + + if (dhdpcie_info) + dhdpcie_detach(dhdpcie_info); + pci_disable_device(pdev); + if (osh) + osl_detach(osh); + + dhdpcie_init_succeeded = FALSE; + + DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__)); + + return -1; /* return FAILURE */ +} + +/* Free Linux irq */ +void +dhdpcie_free_irq(dhd_bus_t *bus) +{ + struct pci_dev *pdev = NULL; + + DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__)); + if (!bus) { + return; + } + + if (bus->irq_registered) { + pdev = bus->dev; + free_irq(pdev->irq, bus); + bus->irq_registered = FALSE; + } else { + DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__)); + } + DHD_TRACE(("%s: Exit\n", __FUNCTION__)); + return; +} + +/* + +Name: dhdpcie_isr + +Parametrs: + +1: IN int irq -- interrupt vector +2: IN void *arg -- handle to private data structure + +Return value: + +Status (TRUE or FALSE) + +Description: +Interrupt Service routine checks for the status register, +disable interrupt and queue DPC if mail box interrupts are raised. +*/ + + +irqreturn_t +dhdpcie_isr(int irq, void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + if (dhdpcie_bus_isr(bus)) + return TRUE; + else + return FALSE; +} + +int +dhdpcie_start_host_pcieclock(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + int options = 0; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->no_cfg_restore) { + options = MSM_PCIE_CONFIG_NO_CFG_RESTORE; + } + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, options); + if (bus->no_cfg_restore && !ret) { + msm_pcie_recover_config(bus->dev); + bus->no_cfg_restore = 0; + } +#else + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number, + bus->dev, NULL, 0); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + if (ret) { + DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__)); + goto done; + } + +done: +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_stop_host_pcieclock(dhd_bus_t *bus) +{ + int ret = 0; +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + int options = 0; +#endif /* SUPPORT_LINKDOWN_RECOVERY */ +#endif /* CONFIG_ARCH_MSM */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#ifdef CONFIG_ARCH_MSM +#ifdef SUPPORT_LINKDOWN_RECOVERY + if (bus->no_cfg_restore) { + options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN; + } + + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, options); +#else + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number, + bus->dev, NULL, 0); +#endif /* SUPPORT_LINKDOWN_RECOVERY */ + if (ret) { + DHD_ERROR(("Failed to stop PCIe link\n")); + goto done; + } +done: +#endif /* CONFIG_ARCH_MSM */ + DHD_TRACE(("%s Exit:\n", __FUNCTION__)); + return ret; +} + +int +dhdpcie_disable_device(dhd_bus_t *bus) +{ + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + + pci_disable_device(bus->dev); + + return 0; +} + +int +dhdpcie_enable_device(dhd_bus_t *bus) +{ + int ret = BCME_ERROR; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + dhdpcie_info_t *pch; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */ + + DHD_TRACE(("%s Enter:\n", __FUNCTION__)); + + if (bus == NULL) { + return BCME_ERROR; + } + + if (bus->dev == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !defined(CONFIG_SOC_EXYNOS8890) + /* Updated with pci_load_and_free_saved_state to compatible + * with kernel 3.14 or higher + */ + pci_load_and_free_saved_state(bus->dev, &pch->default_state); + pch->default_state = pci_store_saved_state(bus->dev); +#else + pci_load_saved_state(bus->dev, pch->default_state); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !CONFIG_SOC_EXYNOS8890 */ + + pci_restore_state(bus->dev); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */ + + ret = pci_enable_device(bus->dev); + if (ret) { + pci_disable_device(bus->dev); + } else { + pci_set_master(bus->dev); + } + + return ret; +} + +int +dhdpcie_alloc_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + + do { + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + break; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + break; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + break; + } + + bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */ + bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */ + + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(bus->dev, 2); + + if ((bar1_size == 0) || (bar1_addr == 0)) { + printf("%s: BAR1 Not enabled for this device size(%ld)," + " addr(0x"PRINTF_RESOURCE")\n", + __FUNCTION__, bar1_size, bar1_addr); + break; + } + + dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE); + if (!dhdpcie_info->regs) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + break; + } + + bus->regs = dhdpcie_info->regs; + dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE); + dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE; + if (!dhdpcie_info->tcm) { + DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__)); + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + break; + } + + bus->tcm = dhdpcie_info->tcm; + + DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->regs, bar0_addr)); + DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n", + __FUNCTION__, dhdpcie_info->tcm, bar1_addr)); + + return 0; + } while (0); + + return BCME_ERROR; +} + +void +dhdpcie_free_resource(dhd_bus_t *bus) +{ + dhdpcie_info_t *dhdpcie_info; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return; + } + + if (bus->regs) { + REG_UNMAP(dhdpcie_info->regs); + bus->regs = NULL; + } + + if (bus->tcm) { + REG_UNMAP(dhdpcie_info->tcm); + bus->tcm = NULL; + } +} + +int +dhdpcie_bus_request_irq(struct dhd_bus *bus) +{ + dhdpcie_info_t *dhdpcie_info; + int ret = 0; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + dhdpcie_info = pci_get_drvdata(bus->dev); + if (dhdpcie_info == NULL) { + DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__)); + return BCME_ERROR; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__)); + dhdpcie_bus_intr_disable(bus); + ret = dhdpcie_request_irq(dhdpcie_info); + if (ret) { + DHD_ERROR(("%s: request_irq() failed, ret=%d\n", + __FUNCTION__, ret)); + return ret; + } + } + + return ret; +} + +#ifdef BCMPCIE_OOB_HOST_WAKE +void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable) +{ + unsigned long flags; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags); + if ((dhdpcie_osinfo->oob_irq_enabled != enable) && + (dhdpcie_osinfo->oob_irq_num > 0)) { + if (enable) { + enable_irq(dhdpcie_osinfo->oob_irq_num); + } else { + disable_irq_nosync(dhdpcie_osinfo->oob_irq_num); + } + dhdpcie_osinfo->oob_irq_enabled = enable; + } + spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags); +} + +static irqreturn_t wlan_oob_irq(int irq, void *data) +{ + dhd_bus_t *bus; + DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__)); + bus = (dhd_bus_t *)data; + dhdpcie_oob_intr_set(bus, FALSE); +#ifdef DHD_PCIE_RUNTIMEPM + dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq); +#endif /* DHD_PCIE_RUNTIMPM */ + if (bus->dhd->up && bus->suspended) { + DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT); + } + return IRQ_HANDLED; +} + +int dhdpcie_oob_intr_register(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return -EINVAL; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__)); + return -EBUSY; + } + + if (dhdpcie_osinfo->oob_irq_num > 0) { + DHD_INFO_HW4(("%s OOB irq=%d flags=%X \n", __FUNCTION__, + (int)dhdpcie_osinfo->oob_irq_num, + (int)dhdpcie_osinfo->oob_irq_flags)); + err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq, + dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake", + bus); + if (err) { + DHD_ERROR(("%s: request_irq failed with %d\n", + __FUNCTION__, err)); + return err; + } + err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = TRUE; + } + dhdpcie_osinfo->oob_irq_enabled = TRUE; + } + + dhdpcie_osinfo->oob_irq_registered = TRUE; + + return err; +} + +void dhdpcie_oob_intr_unregister(dhd_bus_t *bus) +{ + int err = 0; + dhdpcie_info_t *pch; + dhdpcie_os_info_t *dhdpcie_osinfo; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (bus == NULL) { + DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__)); + return; + } + + if (bus->dev == NULL) { + DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__)); + return; + } + + pch = pci_get_drvdata(bus->dev); + if (pch == NULL) { + DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__)); + return; + } + + dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt; + if (!dhdpcie_osinfo->oob_irq_registered) { + DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__)); + return; + } + if (dhdpcie_osinfo->oob_irq_num > 0) { + if (dhdpcie_osinfo->oob_irq_wake_enabled) { + err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num); + if (!err) { + dhdpcie_osinfo->oob_irq_wake_enabled = FALSE; + } + } + if (dhdpcie_osinfo->oob_irq_enabled) { + disable_irq(dhdpcie_osinfo->oob_irq_num); + dhdpcie_osinfo->oob_irq_enabled = FALSE; + } + free_irq(dhdpcie_osinfo->oob_irq_num, bus); + } + dhdpcie_osinfo->oob_irq_registered = FALSE; +} +#endif /* BCMPCIE_OOB_HOST_WAKE */ + +#ifdef DHD_PCIE_RUNTIMEPM +bool dhd_runtimepm_state(dhd_pub_t *dhd) +{ + dhd_bus_t *bus; + unsigned long flags; + bus = dhd->bus; + + DHD_GENERAL_LOCK(dhd, flags); + if (bus->suspended == TRUE) { + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_INFO(("Bus is already suspended system PM: %d\n", bus->suspended)); + return FALSE; + } + + bus->idlecount++; + + DHD_TRACE(("%s : Enter \n", __FUNCTION__)); + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + bus->idlecount = 0; + if (dhd->dhd_bus_busy_state == 0 && dhd->busstate != DHD_BUS_DOWN && + dhd->busstate != DHD_BUS_DOWN_IN_PROGRESS) { + bus->bus_wake = 0; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + bus->runtime_resume_done = FALSE; + /* stop all interface network queue. */ + dhd_bus_stop_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n", + __FUNCTION__, bus->idletime, dhd_runtimepm_ms)); + /* RPM suspend is failed, return FALSE then re-trying */ + if (dhdpcie_set_suspend_resume(bus->dev, TRUE)) { + DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__)); + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + bus->runtime_resume_done = TRUE; + /* It can make stuck NET TX Queue without below */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + return FALSE; + } + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + wait_event_interruptible(bus->rpm_queue, bus->bus_wake); + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE; + dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + DHD_GENERAL_UNLOCK(dhd, flags); + + dhdpcie_set_suspend_resume(bus->dev, FALSE); + + DHD_GENERAL_LOCK(dhd, flags); + dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS; + /* Inform the wake up context that Resume is over */ + bus->runtime_resume_done = TRUE; + /* For making sure NET TX Queue active */ + dhd_bus_start_queue(bus); + DHD_GENERAL_UNLOCK(dhd, flags); + + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + DHD_ERROR(("%s : runtime resume ended\n", __FUNCTION__)); + return TRUE; + } else { + DHD_GENERAL_UNLOCK(dhd, flags); + /* Since one of the contexts are busy (TX, IOVAR or RX) + * we should not suspend + */ + DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n", + __FUNCTION__, dhd->dhd_bus_busy_state)); + return FALSE; + } + } + + DHD_GENERAL_UNLOCK(dhd, flags); + return FALSE; +} /* dhd_runtimepm_state */ + +/* + * dhd_runtime_bus_wake + * TRUE - related with runtime pm context + * FALSE - It isn't invloved in runtime pm context + */ +bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr) +{ + unsigned long flags; + bus->idlecount = 0; + DHD_TRACE(("%s : enter\n", __FUNCTION__)); + if (bus->dhd->up == FALSE) { + DHD_INFO(("%s : dhd is not up\n", __FUNCTION__)); + return FALSE; + } + + DHD_GENERAL_LOCK(bus->dhd, flags); + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) { + /* Wake up RPM state thread if it is suspend in progress or suspended */ + if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS || + bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) { + bus->bus_wake = 1; + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr)); + smp_wmb(); + wake_up_interruptible(&bus->rpm_queue); + /* No need to wake up the RPM state thread */ + } else if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) { + DHD_GENERAL_UNLOCK(bus->dhd, flags); + } + + /* If wait is TRUE, function with wait = TRUE will be wait in here */ + if (wait) { + wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done); + } else { + DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__)); + } + /* If it is called from RPM context, it returns TRUE */ + return TRUE; + } + + DHD_GENERAL_UNLOCK(bus->dhd, flags); + + return FALSE; +} + +bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr) +{ + dhd_bus_t *bus = dhdp->bus; + return dhd_runtime_bus_wake(bus, wait, func_addr); +} + +void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bus->idletime = 0; +} + +bool dhdpcie_is_resume_done(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + return bus->runtime_resume_done; +} +#endif /* DHD_PCIE_RUNTIMEPM */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.c b/drivers/net/wireless/bcmdhd/dhd_pno.c new file mode 100644 index 000000000000..90e6877ec686 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pno.c @@ -0,0 +1,3889 @@ +/* + * Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.c 606280 2015-12-15 05:28:25Z $ + */ + +#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT) +#error "GSCAN needs PNO to be enabled!" +#endif + +#ifdef PNO_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef GSCAN_SUPPORT +#include +#endif /* GSCAN_SUPPORT */ + +#ifdef __BIG_ENDIAN +#include +#define htod32(i) (bcmswap32(i)) +#define htod16(i) (bcmswap16(i)) +#define dtoh32(i) (bcmswap32(i)) +#define dtoh16(i) (bcmswap16(i)) +#define htodchanspec(i) htod16(i) +#define dtohchanspec(i) dtoh16(i) +#else +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) +#endif /* IL_BIGENDINA */ + +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) +#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state) +#define PNO_BESTNET_LEN 1024 +#define PNO_ON 1 +#define PNO_OFF 0 +#define CHANNEL_2G_MAX 14 +#define CHANNEL_5G_MAX 165 +#define MAX_NODE_CNT 5 +#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE) +#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \ + - (uint32)(timestamp2/1000))) +#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \ + - (uint32)(timestamp2))) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) + +#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====") +#define TIME_MIN_DIFF 5 +static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, + dhd_pno_status_info_t *pno_state); +#ifdef GSCAN_SUPPORT +static wl_pfn_gscan_channel_bucket_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state, +uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw); +#endif /* GSCAN_SUPPORT */ + +static inline bool +is_dfs(uint16 channel) +{ + if (channel >= 52 && channel <= 64) /* class 2 */ + return TRUE; + else if (channel >= 100 && channel <= 140) /* class 4 */ + return TRUE; + else + return FALSE; +} +int +dhd_pno_clean(dhd_pub_t *dhd) +{ + int pfn = 0; + int err; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + /* Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&pfn, sizeof(pfn), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn(error : %d)\n", + __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = DHD_PNO_DISABLED; + err = dhd_iovar(dhd, 0, "pfnclear", NULL, 0, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfnclear(error : %d)\n", + __FUNCTION__, err)); + } +exit: + return err; +} + +bool +dhd_is_pno_supported(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", + __FUNCTION__)); + return FALSE; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + return WLS_SUPPORTED(_pno_state); +} + +int +dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + + if (!dhd || !dhd->pno_state) { + DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__)); + return BCME_ERROR; + } + _pno_state = PNO_GET_PNOSTATE(dhd); + if (ETHER_ISMULTI(oui)) { + DHD_ERROR(("Expected unicast OUI\n")); + err = BCME_ERROR; + } else { + memcpy(_pno_state->pno_oui, oui, DOT11_OUI_LEN); + DHD_PNO(("PNO mac oui to be used - %02x:%02x:%02x\n", _pno_state->pno_oui[0], + _pno_state->pno_oui[1], _pno_state->pno_oui[2])); + } + + return err; +} + +#ifdef GSCAN_SUPPORT +static uint64 +convert_fw_rel_time_to_systime(uint32 fw_ts_ms) +{ + struct timespec ts; + + get_monotonic_boottime(&ts); + return ((uint64)(TIMESPEC_TO_US(ts)) - (uint64)(fw_ts_ms * 1000)); +} + +static int +_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +static bool +is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params) +{ + smp_rmb(); + return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE); +} +#endif /* GSCAN_SUPPORT */ + +static int +dhd_pno_set_mac_addr(dhd_pub_t *dhd, struct ether_addr *macaddr) +{ + int err; + wl_pfn_macaddr_cfg_t cfg; + + cfg.version = WL_PFN_MACADDR_CFG_VER; + if (ETHER_ISNULLADDR(macaddr)) { + cfg.flags = 0; + } else { + cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK); + } + memcpy(&cfg.macaddr, macaddr, ETHER_ADDR_LEN); + + err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&cfg, sizeof(cfg), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_macaddr\n", __FUNCTION__)); + } + + return err; +} + +static int +_dhd_pno_suspend(dhd_pub_t *dhd) +{ + int err; + int suspend = 1; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err)); + goto exit; + + } + _pno_state->pno_status = DHD_PNO_SUSPEND; +exit: + return err; +} +static int +_dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (enable & 0xfffe) { + DHD_ERROR(("%s invalid value\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + if (!dhd_support_sta_mode(dhd)) { + DHD_ERROR(("PNO is not allowed for non-STA mode")); + err = BCME_BADOPTION; + goto exit; + } + if (enable) { + if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) && + dhd_is_associated(dhd, 0, NULL)) { + DHD_ERROR(("%s Legacy PNO mode cannot be enabled " + "in assoc mode , ignore it\n", __FUNCTION__)); + err = BCME_BADOPTION; + goto exit; + } + } + /* Enable/Disable PNO */ + err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err)); + goto exit; + } + _pno_state->pno_status = (enable)? + DHD_PNO_ENABLED : DHD_PNO_DISABLED; + if (!enable) + _pno_state->pno_mode = DHD_PNO_NONE_MODE; + + DHD_PNO(("%s set pno as %s\n", + __FUNCTION__, enable ? "Enable" : "Disable")); +exit: + return err; +} + +static int +_dhd_pno_set(dhd_pub_t *dhd, const dhd_pno_params_t *pno_params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + wl_pfn_param_t pfn_param; + dhd_pno_params_t *_params; + dhd_pno_status_info_t *_pno_state; + bool combined_scan = FALSE; + struct ether_addr macaddr; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + memset(&pfn_param, 0, sizeof(pfn_param)); + + /* set pfn parameters */ + pfn_param.version = htod32(PFN_VERSION); + pfn_param.flags = ((PFN_LIST_ORDER << SORT_CRITERIA_BIT) | + (ENABLE << IMMEDIATE_SCAN_BIT) | (ENABLE << REPORT_SEPERATELY_BIT)); + if (mode == DHD_PNO_LEGACY_MODE) { + /* check and set extra pno params */ + if ((pno_params->params_legacy.pno_repeat != 0) || + (pno_params->params_legacy.pno_freq_expo_max != 0)) { + pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = (uchar) (pno_params->params_legacy.pno_repeat); + pfn_param.exp = (uchar) (pno_params->params_legacy.pno_freq_expo_max); + } + /* set up pno scan fr */ + if (pno_params->params_legacy.scan_fr != 0) + pfn_param.scan_freq = htod32(pno_params->params_legacy.scan_fr); + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + DHD_PNO(("will enable combined scan with BATCHIG SCAN MODE\n")); + mode |= DHD_PNO_BATCH_MODE; + combined_scan = TRUE; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + DHD_PNO(("will enable combined scan with HOTLIST SCAN MODE\n")); + mode |= DHD_PNO_HOTLIST_MODE; + combined_scan = TRUE; + } +#ifdef GSCAN_SUPPORT + else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n")); + mode |= DHD_PNO_GSCAN_MODE; + } +#endif /* GSCAN_SUPPORT */ + } + if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* Scan frequency of 30 sec */ + pfn_param.scan_freq = htod32(30); + /* slow adapt scan is off by default */ + pfn_param.slow_freq = htod32(0); + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* Network timeout 60 sec */ + pfn_param.lost_network_timeout = htod32(60); + /* best n = 2 by default */ + pfn_param.bestn = DEFAULT_BESTN; + /* mscan m=0 by default, so not record best networks by default */ + pfn_param.mscan = DEFAULT_MSCAN; + /* default repeat = 10 */ + pfn_param.repeat = DEFAULT_REPEAT; + /* by default, maximum scan interval = 2^2 + * scan_freq when adaptive scan is turned on + */ + pfn_param.exp = DEFAULT_EXP; + if (mode == DHD_PNO_BATCH_MODE) { + /* In case of BATCH SCAN */ + if (pno_params->params_batch.bestn) + pfn_param.bestn = pno_params->params_batch.bestn; + if (pno_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_batch.scan_fr); + if (pno_params->params_batch.mscan) + pfn_param.mscan = pno_params->params_batch.mscan; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } else if (mode == DHD_PNO_HOTLIST_MODE) { + /* In case of HOTLIST SCAN */ + if (pno_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(pno_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + /* enable broadcast scan */ + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + if (combined_scan) { + /* Disable Adaptive Scan */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + pfn_param.repeat = 0; + pfn_param.exp = 0; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* In case of Legacy PNO + BATCH SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params->params_batch.bestn) + pfn_param.bestn = _params->params_batch.bestn; + if (_params->params_batch.scan_fr) + pfn_param.scan_freq = htod32(_params->params_batch.scan_fr); + if (_params->params_batch.mscan) + pfn_param.mscan = _params->params_batch.mscan; + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* In case of Legacy PNO + HOTLIST SCAN */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params->params_hotlist.scan_fr) + pfn_param.scan_freq = htod32(_params->params_hotlist.scan_fr); + pfn_param.bestn = 0; + pfn_param.repeat = 0; + } + } + } +#ifdef GSCAN_SUPPORT + if (mode & DHD_PNO_GSCAN_MODE) { + uint32 lost_network_timeout; + + pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr); + if (pno_params->params_gscan.mscan) { + pfn_param.bestn = pno_params->params_gscan.bestn; + pfn_param.mscan = pno_params->params_gscan.mscan; + pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT); + } + /* RSSI margin of 30 dBm */ + pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM); + /* ADAPTIVE turned off */ + pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT)); + pfn_param.repeat = 0; + pfn_param.exp = 0; + pfn_param.slow_freq = 0; + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + dhd_pno_params_t *_params; + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + + pfn_param.scan_freq = htod32(MIN(pno_params->params_gscan.scan_fr, + _params->params_legacy.scan_fr)); + } + + lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq * + pfn_param.scan_freq * + pno_params->params_gscan.lost_ap_window); + if (lost_network_timeout) { + pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout, + GSCAN_MIN_BSSID_TIMEOUT)); + } else { + pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT); + } + } else +#endif /* GSCAN_SUPPORT */ + { + if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) || + pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) { + DHD_ERROR(("%s pno freq(%d sec) is not valid \n", + __FUNCTION__, PNO_SCAN_MIN_FW_SEC)); + err = BCME_BADARG; + goto exit; + } + } + + memset(&macaddr, 0, ETHER_ADDR_LEN); + memcpy(&macaddr, _pno_state->pno_oui, DOT11_OUI_LEN); + + DHD_PNO(("Setting mac oui to FW - %02x:%02x:%02x\n", _pno_state->pno_oui[0], + _pno_state->pno_oui[1], _pno_state->pno_oui[2])); + err = dhd_pno_set_mac_addr(dhd, &macaddr); + if (err < 0) { + DHD_ERROR(("%s : failed to set pno mac address, error - %d\n", __FUNCTION__, err)); + goto exit; + } + + +#ifdef GSCAN_SUPPORT + if (mode == DHD_PNO_BATCH_MODE || + ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) { +#else + if (mode == DHD_PNO_BATCH_MODE) { +#endif /* GSCAN_SUPPORT */ + + int _tmp = pfn_param.bestn; + /* set bestn to calculate the max mscan which firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__)); + goto exit; + } + /* get max mscan which the firmware supports */ + err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0); + if (err < 0) { + DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__)); + goto exit; + } + DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn)); + pfn_param.mscan = MIN(pfn_param.mscan, _tmp); + } + err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err)); + goto exit; + } + /* need to return mscan if this is for batch scan instead of err */ + err = (mode == DHD_PNO_BATCH_MODE)? pfn_param.mscan : err; +exit: + return err; +} +static int +_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssids_list, int nssid) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_t pfn_element; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nssid) { + NULL_CHECK(ssids_list, "ssid list is NULL", err); + } + memset(&pfn_element, 0, sizeof(pfn_element)); + { + int j; + for (j = 0; j < nssid; j++) { + DHD_PNO(("%d: scan for %s size = %d hidden = %d\n", j, + ssids_list[j].SSID, ssids_list[j].SSID_len, ssids_list[j].hidden)); + } + } + /* Check for broadcast ssid */ + for (i = 0; i < nssid; i++) { + if (!ssids_list[i].SSID_len) { + DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", i)); + err = BCME_ERROR; + goto exit; + } + } + /* set all pfn ssid */ + for (i = 0; i < nssid; i++) { + pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE); + pfn_element.auth = (DOT11_OPEN_SYSTEM); + pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY); + pfn_element.wsec = htod32(0); + pfn_element.infra = htod32(1); + if (ssids_list[i].hidden) { + pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT); + } else { + pfn_element.flags = 0; + } + memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID, + ssids_list[i].SSID_len); + pfn_element.ssid.SSID_len = ssids_list[i].SSID_len; + err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_element, + sizeof(pfn_element), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__)); + goto exit; + } + } +exit: + return err; +} +/* qsort compare function */ +static int +_dhd_pno_cmpfunc(const void *a, const void *b) +{ + return (*(uint16*)a - *(uint16*)b); +} +static int +_dhd_pno_chan_merge(uint16 *d_chan_list, int *nchan, + uint16 *chan_list1, int nchan1, uint16 *chan_list2, int nchan2) +{ + int err = BCME_OK; + int i = 0, j = 0, k = 0; + uint16 tmp; + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + NULL_CHECK(nchan, "nchan is NULL", err); + NULL_CHECK(chan_list1, "chan_list1 is NULL", err); + NULL_CHECK(chan_list2, "chan_list2 is NULL", err); + /* chan_list1 and chan_list2 should be sorted at first */ + while (i < nchan1 && j < nchan2) { + tmp = chan_list1[i] < chan_list2[j]? + chan_list1[i++] : chan_list2[j++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + } + + while (i < nchan1) { + tmp = chan_list1[i++]; + for (; i < nchan1 && chan_list1[i] == tmp; i++); + d_chan_list[k++] = tmp; + } + + while (j < nchan2) { + tmp = chan_list2[j++]; + for (; j < nchan2 && chan_list2[j] == tmp; j++); + d_chan_list[k++] = tmp; + + } + *nchan = k; + return err; +} +static int +_dhd_pno_get_channels(dhd_pub_t *dhd, uint16 *d_chan_list, + int *nchan, uint8 band, bool skip_dfs) +{ + int err = BCME_OK; + int i, j; + uint32 chan_buf[WL_NUMCHANNELS + 1]; + wl_uint32_list_t *list; + NULL_CHECK(dhd, "dhd is NULL", err); + if (*nchan) { + NULL_CHECK(d_chan_list, "d_chan_list is NULL", err); + } + list = (wl_uint32_list_t *) (void *)chan_buf; + list->count = htod32(WL_NUMCHANNELS); + err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0); + if (err < 0) { + DHD_ERROR(("failed to get channel list (err: %d)\n", err)); + goto exit; + } + for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) { + if (band == WLC_BAND_2G) { + if (dtoh32(list->element[i]) > CHANNEL_2G_MAX) + continue; + } else if (band == WLC_BAND_5G) { + if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX) + continue; + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + continue; + + + } else if (band == WLC_BAND_AUTO) { + if (skip_dfs || !is_dfs(dtoh32(list->element[i]))) + continue; + } else { /* All channels */ + if (skip_dfs && is_dfs(dtoh32(list->element[i]))) + continue; + } + if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) { + d_chan_list[j++] = (uint16) dtoh32(list->element[i]); + } else { + err = BCME_BADCHAN; + goto exit; + } + } + *nchan = j; +exit: + return err; +} +static int +_dhd_pno_convert_format(dhd_pub_t *dhd, struct dhd_pno_batch_params *params_batch, + char *buf, int nbufsize) +{ + int err = BCME_OK; + int bytes_written = 0, nreadsize = 0; + int t_delta = 0; + int nleftsize = nbufsize; + uint8 cnt = 0; + char *bp = buf; + char eabuf[ETHER_ADDR_STR_LEN]; +#ifdef PNO_DEBUG + char *_base_bp; + char msg[150]; +#endif + dhd_pno_bestnet_entry_t *iter, *next; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + NULL_CHECK(params_batch, "params_batch is NULL", err); + if (nbufsize > 0) + NULL_CHECK(buf, "buf is NULL", err); + /* initialize the buffer */ + memset(buf, 0, nbufsize); + DHD_PNO(("%s enter \n", __FUNCTION__)); + /* # of scans */ + if (!params_batch->get_batch.batch_started) { + bp += nreadsize = sprintf(bp, "scancount=%d\n", + params_batch->get_batch.expired_tot_scan_cnt); + nleftsize -= nreadsize; + params_batch->get_batch.batch_started = TRUE; + } + DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt)); + /* preestimate scan count until which scan result this report is going to end */ + list_for_each_entry_safe(siter, snext, + ¶ms_batch->get_batch.expired_scan_results_list, list) { + phead = siter->bestnetheader; + while (phead != NULL) { + /* if left_size is less than bestheader total size , stop this */ + if (nleftsize <= + (phead->tot_size + phead->tot_cnt * ENTRY_OVERHEAD)) + goto exit; + /* increase scan count */ + cnt++; + /* # best of each scan */ + DHD_PNO(("\n\n", cnt - 1, phead->tot_cnt)); + /* attribute of the scan */ + if (phead->reason & PNO_STATUS_ABORT_MASK) { + bp += nreadsize = sprintf(bp, "trunc\n"); + nleftsize -= nreadsize; + } + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + t_delta = jiffies_to_msecs(jiffies - iter->recorded_time); +#ifdef PNO_DEBUG + _base_bp = bp; + memset(msg, 0, sizeof(msg)); +#endif + /* BSSID info */ + bp += nreadsize = sprintf(bp, "bssid=%s\n", + bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf)); + nleftsize -= nreadsize; + /* SSID */ + bp += nreadsize = sprintf(bp, "ssid=%s\n", iter->SSID); + nleftsize -= nreadsize; + /* channel */ + bp += nreadsize = sprintf(bp, "freq=%d\n", + wf_channel2mhz(iter->channel, + iter->channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + nleftsize -= nreadsize; + /* RSSI */ + bp += nreadsize = sprintf(bp, "level=%d\n", iter->RSSI); + nleftsize -= nreadsize; + /* add the time consumed in Driver to the timestamp of firmware */ + iter->timestamp += t_delta; + bp += nreadsize = sprintf(bp, "age=%d\n", iter->timestamp); + nleftsize -= nreadsize; + /* RTT0 */ + bp += nreadsize = sprintf(bp, "dist=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt0); + nleftsize -= nreadsize; + /* RTT1 */ + bp += nreadsize = sprintf(bp, "distSd=%d\n", + (iter->rtt0 == 0)? -1 : iter->rtt1); + nleftsize -= nreadsize; + bp += nreadsize = sprintf(bp, "%s", AP_END_MARKER); + nleftsize -= nreadsize; + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); +#ifdef PNO_DEBUG + memcpy(msg, _base_bp, bp - _base_bp); + DHD_PNO(("Entry : \n%s", msg)); +#endif + } + bp += nreadsize = sprintf(bp, "%s", SCAN_END_MARKER); + DHD_PNO(("%s", SCAN_END_MARKER)); + nleftsize -= nreadsize; + pprev = phead; + /* reset the header */ + siter->bestnetheader = phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + + siter->cnt_header--; + } + if (phead == NULL) { + /* we store all entry in this scan , so it is ok to delete */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } +exit: + if (cnt < params_batch->get_batch.expired_tot_scan_cnt) { + DHD_ERROR(("Buffer size is small to save all batch entry," + " cnt : %d (remained_scan_cnt): %d\n", + cnt, params_batch->get_batch.expired_tot_scan_cnt - cnt)); + } + params_batch->get_batch.expired_tot_scan_cnt -= cnt; + /* set FALSE only if the link list is empty after returning the data */ + if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) { + params_batch->get_batch.batch_started = FALSE; + bp += sprintf(bp, "%s", RESULTS_END_MARKER); + DHD_PNO(("%s", RESULTS_END_MARKER)); + DHD_PNO(("%s : Getting the batching data is complete\n", __FUNCTION__)); + } + /* return used memory in buffer */ + bytes_written = (int32)(bp - buf); + return bytes_written; +} +static int +_dhd_pno_clear_all_batch_results(dhd_pub_t *dhd, struct list_head *head, bool only_last) +{ + int err = BCME_OK; + int removed_scan_cnt = 0; + dhd_pno_scan_results_t *siter, *snext; + dhd_pno_best_header_t *phead, *pprev; + dhd_pno_bestnet_entry_t *iter, *next; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(head, "head is NULL", err); + NULL_CHECK(head->next, "head->next is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + list_for_each_entry_safe(siter, snext, + head, list) { + if (only_last) { + /* in case that we need to delete only last one */ + if (!list_is_last(&siter->list, head)) { + /* skip if the one is not last */ + continue; + } + } + /* delete all data belong if the one is last */ + phead = siter->bestnetheader; + while (phead != NULL) { + removed_scan_cnt++; + list_for_each_entry_safe(iter, next, + &phead->entry_list, list) { + list_del(&iter->list); + MFREE(dhd->osh, iter, BESTNET_ENTRY_SIZE); + } + pprev = phead; + phead = phead->next; + MFREE(dhd->osh, pprev, BEST_HEADER_SIZE); + } + if (phead == NULL) { + /* it is ok to delete top node */ + list_del(&siter->list); + MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE); + } + } + return removed_scan_cnt; +} + +static int +_dhd_pno_cfg(dhd_pub_t *dhd, uint16 *channel_list, int nchan) +{ + int err = BCME_OK; + int i = 0; + wl_pfn_cfg_t pfncfg_param; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nchan) { + NULL_CHECK(channel_list, "nchan is NULL", err); + } + DHD_PNO(("%s enter : nchan : %d\n", __FUNCTION__, nchan)); + memset(&pfncfg_param, 0, sizeof(wl_pfn_cfg_t)); + /* Setup default values */ + pfncfg_param.reporttype = htod32(WL_PFN_REPORT_ALLNET); + pfncfg_param.channel_num = htod32(0); + + for (i = 0; i < nchan && nchan < WL_NUMCHANNELS; i++) + pfncfg_param.channel_list[i] = channel_list[i]; + + pfncfg_param.channel_num = htod32(nchan); + err = dhd_iovar(dhd, 0, "pfn_cfg", (char *)&pfncfg_param, sizeof(pfncfg_param), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} +static int +_dhd_pno_reinitialize_prof(dhd_pub_t *dhd, dhd_pno_params_t *params, dhd_pno_mode_t mode) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL\n", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + switch (mode) { + case DHD_PNO_LEGACY_MODE: { + struct dhd_pno_ssid *iter, *next; + if (params->params_legacy.nssid > 0) { + list_for_each_entry_safe(iter, next, + ¶ms->params_legacy.ssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + params->params_legacy.nssid = 0; + params->params_legacy.scan_fr = 0; + params->params_legacy.pno_freq_expo_max = 0; + params->params_legacy.pno_repeat = 0; + params->params_legacy.nchan = 0; + memset(params->params_legacy.chan_list, 0, + sizeof(params->params_legacy.chan_list)); + break; + } + case DHD_PNO_BATCH_MODE: { + params->params_batch.scan_fr = 0; + params->params_batch.mscan = 0; + params->params_batch.nchan = 0; + params->params_batch.rtt = 0; + params->params_batch.bestn = 0; + params->params_batch.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_batch.chan_list, 0, + sizeof(params->params_batch.chan_list)); + params->params_batch.get_batch.batch_started = FALSE; + params->params_batch.get_batch.buf = NULL; + params->params_batch.get_batch.bufsize = 0; + params->params_batch.get_batch.reason = 0; + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.scan_results_list, FALSE); + _dhd_pno_clear_all_batch_results(dhd, + ¶ms->params_batch.get_batch.expired_scan_results_list, FALSE); + params->params_batch.get_batch.tot_scan_cnt = 0; + params->params_batch.get_batch.expired_tot_scan_cnt = 0; + params->params_batch.get_batch.top_node_cnt = 0; + INIT_LIST_HEAD(¶ms->params_batch.get_batch.scan_results_list); + INIT_LIST_HEAD(¶ms->params_batch.get_batch.expired_scan_results_list); + break; + } + case DHD_PNO_HOTLIST_MODE: { + struct dhd_pno_bssid *iter, *next; + if (params->params_hotlist.nbssid > 0) { + list_for_each_entry_safe(iter, next, + ¶ms->params_hotlist.bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + params->params_hotlist.scan_fr = 0; + params->params_hotlist.nbssid = 0; + params->params_hotlist.nchan = 0; + params->params_batch.band = WLC_BAND_AUTO; + memset(params->params_hotlist.chan_list, 0, + sizeof(params->params_hotlist.chan_list)); + break; + } + default: + DHD_ERROR(("%s : unknown mode : %d\n", __FUNCTION__, mode)); + break; + } + mutex_unlock(&_pno_state->pno_mutex); + return err; +} +static int +_dhd_pno_add_bssid(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + if (nbssid) { + NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err); + } + err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid, + sizeof(wl_pfn_bssid_t) * nbssid, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__)); + goto exit; + } +exit: + return err; +} + +#ifdef GSCAN_SUPPORT +static int +_dhd_pno_add_significant_bssid(dhd_pub_t *dhd, + wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + + if (!nbssid) { + err = BCME_ERROR; + goto exit; + } + + NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err); + + err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", (char *)p_pfn_significant_bssid, + sizeof(wl_pfn_significant_bssid_t) * nbssid, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err)); + goto exit; + } +exit: + return err; +} +#endif /* GSCAN_SUPPORT */ + +int +dhd_pno_stop_for_ssid(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + NULL_CHECK(dhd, "dev is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) { + DHD_ERROR(("%s : LEGACY PNO MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = &_params->params_gscan; + + if (gscan_params->mscan) + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + /* Restart gscan */ + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + /* restart Batch mode if the batch mode is on */ + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + dhd_pno_clean(dhd); + /* restore previous pno_mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + /* restart HOTLIST SCAN */ + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + /* convert dhd_pno_bssid to wl_pfn_bssid */ + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid->flags = iter->flags; + p_pfn_bssid++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + kfree(p_pfn_bssid); + return err; +} + +int +dhd_pno_enable(dhd_pub_t *dhd, int enable) +{ + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + return (_dhd_pno_enable(dhd, enable)); +} + +static wlc_ssid_ext_t * +dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state) +{ + int err = BCME_OK; + int i; + struct dhd_pno_ssid *iter, *next; + dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + wlc_ssid_ext_t *p_ssid_list; + + p_ssid_list = kzalloc(sizeof(wlc_ssid_ext_t) * + _params1->params_legacy.nssid, GFP_KERNEL); + if (p_ssid_list == NULL) { + DHD_ERROR(("%s : failed to allocate wlc_ssid_ext_t array (count: %d)", + __FUNCTION__, _params1->params_legacy.nssid)); + err = BCME_ERROR; + pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_ssid to wlc_ssid_ext_t */ + list_for_each_entry_safe(iter, next, &_params1->params_legacy.ssid_list, list) { + p_ssid_list[i].SSID_len = iter->SSID_len; + p_ssid_list[i].hidden = iter->hidden; + memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len); + i++; + } +exit: + return p_ssid_list; +} + +static int +dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list, + int nssid) +{ + int ret = 0; + int i; + struct dhd_pno_ssid *_pno_ssid; + + for (i = 0; i < nssid; i++) { + if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s : Invalid SSID length %d\n", + __FUNCTION__, ssid_list[i].SSID_len)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL); + if (_pno_ssid == NULL) { + DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n", + __FUNCTION__)); + ret = BCME_ERROR; + goto exit; + } + _pno_ssid->SSID_len = ssid_list[i].SSID_len; + _pno_ssid->hidden = ssid_list[i].hidden; + memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len); + list_add_tail(&_pno_ssid->list, ¶ms->params_legacy.ssid_list); + } + +exit: + return ret; +} + +int +dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan) +{ + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + uint16 _chan_list[WL_NUMCHANNELS]; + int32 tot_nchan = 0; + int err = BCME_OK; + int i; + int mode = 0; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_clear; + } + DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d," + "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__, + scan_fr, pno_repeat, pno_freq_expo_max, nchan)); + + _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + /* If GSCAN is also ON will handle this down below */ +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE && + !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { +#else + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { +#endif /* GSCAN_SUPPORT */ + DHD_ERROR(("%s : Legacy PNO mode was already started, " + "will disable previous one to start new one\n", __FUNCTION__)); + err = dhd_pno_stop_for_ssid(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n", + __FUNCTION__, err)); + goto exit_no_clear; + } + } + _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n", + __FUNCTION__, err)); + goto exit_no_clear; + } + memset(_chan_list, 0, sizeof(_chan_list)); + tot_nchan = MIN(nchan, WL_NUMCHANNELS); + if (tot_nchan > 0 && channel_list) { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i]; + } +#ifdef GSCAN_SUPPORT + else { + tot_nchan = WL_NUMCHANNELS; + err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan, + (WLC_BAND_2G | WLC_BAND_5G), TRUE); + if (err < 0) { + tot_nchan = 0; + DHD_PNO(("Could not get channel list for PNO SSID\n")); + } else { + for (i = 0; i < tot_nchan; i++) + _params->params_legacy.chan_list[i] = _chan_list[i]; + } + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) { + DHD_PNO(("BATCH SCAN is on progress in firmware\n")); + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit_no_clear; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* use superset of channel list between two mode */ + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + if (_params2->params_batch.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_batch.chan_list[0], + _params2->params_batch.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit_no_clear; + } + } else { + DHD_PNO(("superset channel will use" + " all channels in firmware\n")); + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_hotlist.chan_list[0], + _params2->params_hotlist.nchan, + &channel_list[0], tot_nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and hotlist\n", + __FUNCTION__)); + goto exit_no_clear; + } + } + } + } + _params->params_legacy.scan_fr = scan_fr; + _params->params_legacy.pno_repeat = pno_repeat; + _params->params_legacy.pno_freq_expo_max = pno_freq_expo_max; + _params->params_legacy.nchan = tot_nchan; + _params->params_legacy.nssid = nssid; + INIT_LIST_HEAD(&_params->params_legacy.ssid_list); +#ifdef GSCAN_SUPPORT + /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */ + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { + err = BCME_ERROR; + goto exit; + } + DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n")); + err = dhd_pno_initiate_gscan_request(dhd, 1, 0); + goto exit; + } +#endif /* GSCAN_SUPPORT */ + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) { + DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid)); + goto exit; + } + if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) { + err = BCME_ERROR; + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + if (err < 0) { + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } +exit_no_clear: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + } + } + return err; +} +int +dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params) +{ + int err = BCME_OK; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0, tot_nchan = 0; + int mode = 0, mscan = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + dhd_pno_status_info_t *_pno_state; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(batch_params, "batch_params is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + _pno_state->pno_mode |= DHD_PNO_BATCH_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } else { + /* batch mode is already started */ + return -EBUSY; + } + _params->params_batch.scan_fr = batch_params->scan_fr; + _params->params_batch.bestn = batch_params->bestn; + _params->params_batch.mscan = (batch_params->mscan)? + batch_params->mscan : DEFAULT_BATCH_MSCAN; + _params->params_batch.nchan = batch_params->nchan; + memcpy(_params->params_batch.chan_list, batch_params->chan_list, + sizeof(_params->params_batch.chan_list)); + + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(batch_params->chan_list) - batch_params->nchan; + if (batch_params->band == WLC_BAND_2G || batch_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_batch.chan_list[batch_params->nchan], + &rem_nchan, batch_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, batch_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_batch.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_batch.chan_list, _params->params_batch.nchan, + sizeof(_params->params_batch.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_batch.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list)); + tot_nchan = _params->params_batch.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && _params->params_batch.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_batch.chan_list[0], _params->params_batch.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + " between legacy and batch\n", + __FUNCTION__)); + goto exit; + } + } else { + DHD_PNO(("superset channel will use all channels in firmware\n")); + } + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list, + _params2->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_BATCH_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } else { + /* we need to return mscan */ + mscan = err; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + else { + /* return #max scan firmware can do */ + err = mscan; + } + if (p_ssid_list) + kfree(p_ssid_list); + return err; +} + + +#ifdef GSCAN_SUPPORT +static void +dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params, + dhd_pno_status_info_t *_pno_state, uint8 flags) +{ + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (flags & GSCAN_FLUSH_SCAN_CFG) { + _params->params_gscan.bestn = 0; + _params->params_gscan.mscan = 0; + _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + _params->params_gscan.scan_fr = 0; + _params->params_gscan.send_all_results_flag = 0; + memset(_params->params_gscan.channel_bucket, 0, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + _params->params_gscan.nchannel_buckets = 0; + DHD_PNO(("Flush Scan config\n")); + } + if (flags & GSCAN_FLUSH_HOTLIST_CFG) { + struct dhd_pno_bssid *iter, *next; + if (_params->params_gscan.nbssid_hotlist > 0) { + list_for_each_entry_safe(iter, next, + &_params->params_gscan.hotlist_bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + _params->params_gscan.nbssid_hotlist = 0; + DHD_PNO(("Flush Hotlist Config\n")); + } + if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG) { + dhd_pno_significant_bssid_t *iter, *next; + + if (_params->params_gscan.nbssid_significant_change > 0) { + list_for_each_entry_safe(iter, next, + &_params->params_gscan.significant_bssid_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + _params->params_gscan.nbssid_significant_change = 0; + DHD_PNO(("Flush Significant Change Config\n")); + } + + return; +} + +void +dhd_pno_lock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_lock(&_pno_state->pno_mutex); + return; +} + +void +dhd_pno_unlock_batch_results(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + _pno_state = PNO_GET_PNOSTATE(dhd); + mutex_unlock(&_pno_state->pno_mutex); + return; +} + +void +dhd_wait_batch_results_complete(dhd_pub_t *dhd) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + /* Has the workqueue finished its job already?? */ + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) { + DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__)); + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */ + gscan_results_cache_t *iter; + uint16 num_results = 0; + int err; + + mutex_lock(&_pno_state->pno_mutex); + iter = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + iter = iter->next; + } + mutex_unlock(&_pno_state->pno_mutex); + + /* All results consumed/No results cached?? + * Get fresh results from FW + */ + if (!num_results) { + DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__)); + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(&_params->params_gscan), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } + } + DHD_PNO(("%s: Wait complete\n", __FUNCTION__)); + + return; +} + +static void * +dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len) +{ + gscan_results_cache_t *iter, *results; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + uint16 num_scan_ids = 0, num_results = 0; + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + iter = results = _params->params_gscan.gscan_batch_cache; + while (iter) { + num_results += iter->tot_count - iter->tot_consumed; + num_scan_ids++; + iter = iter->next; + } + + *len = ((num_results << 16) | (num_scan_ids)); + return results; +} + +void * +dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *info, uint32 *len) +{ + void *ret = NULL; + dhd_pno_gscan_capabilities_t *ptr; + + if (!len) { + DHD_ERROR(("%s: len is NULL\n", __FUNCTION__)); + return ret; + } + + switch (type) { + case DHD_PNO_GET_CAPABILITIES: + ptr = (dhd_pno_gscan_capabilities_t *) + kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL); + if (!ptr) + break; + /* Hardcoding these values for now, need to get + * these values from FW, will change in a later check-in + */ + ptr->max_scan_cache_size = 12; + ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS; + ptr->max_ap_cache_per_scan = 16; + ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX; + ptr->max_scan_reporting_threshold = 100; + ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS; + ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS; + ret = (void *)ptr; + *len = sizeof(dhd_pno_gscan_capabilities_t); + break; + + case DHD_PNO_GET_BATCH_RESULTS: + ret = dhd_get_gscan_batch_results(dhd, len); + break; + case DHD_PNO_GET_CHANNEL_LIST: + if (info) { + uint16 ch_list[WL_NUMCHANNELS]; + uint32 *ptr, mem_needed, i; + int32 err, nchan = WL_NUMCHANNELS; + uint32 *gscan_band = (uint32 *) info; + uint8 band = 0; + + /* No band specified?, nothing to do */ + if ((*gscan_band & GSCAN_BAND_MASK) == 0) { + DHD_PNO(("No band specified\n")); + *len = 0; + break; + } + + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (*gscan_band & GSCAN_BG_BAND_MASK) { + band |= WLC_BAND_2G; + } + if (*gscan_band & GSCAN_A_BAND_MASK) { + band |= WLC_BAND_5G; + } + + err = _dhd_pno_get_channels(dhd, ch_list, &nchan, + (band & GSCAN_ABG_BAND_MASK), + !(*gscan_band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list\n", + __FUNCTION__)); + *len = 0; + } else { + mem_needed = sizeof(uint32) * nchan; + ptr = (uint32 *) kmalloc(mem_needed, GFP_KERNEL); + if (!ptr) { + DHD_ERROR(("%s: Unable to malloc %d bytes\n", + __FUNCTION__, mem_needed)); + break; + } + for (i = 0; i < nchan; i++) { + ptr[i] = wf_channel2mhz(ch_list[i], + (ch_list[i] <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + } + ret = ptr; + *len = mem_needed; + } + } else { + *len = 0; + DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__)); + } + break; + + default: + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } + + return ret; + +} + +int +dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush) +{ + int err = BCME_OK; + dhd_pno_params_t *_params; + int i; + dhd_pno_status_info_t *_pno_state; + + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + + switch (type) { + case DHD_PNO_BATCH_SCAN_CFG_ID: + { + gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf; + _params->params_gscan.bestn = ptr->bestn; + _params->params_gscan.mscan = ptr->mscan; + _params->params_gscan.buffer_threshold = ptr->buffer_threshold; + break; + } + case DHD_PNO_GEOFENCE_SCAN_CFG_ID: + { + gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf; + struct dhd_pno_bssid *_pno_bssid; + struct bssid_t *bssid_ptr; + int8 flags; + + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_HOTLIST_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_hotlist) { + INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list); + } + if ((_params->params_gscan.nbssid_hotlist + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of hotlist APs programmed %d\n", + (_params->params_gscan.nbssid_hotlist + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) { + _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); + + if (!_pno_bssid) { + DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes", + sizeof(struct dhd_pno_bssid))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN); + + flags = (int8) bssid_ptr->rssi_reporting_threshold; + _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT; + list_add_tail(&_pno_bssid->list, + &_params->params_gscan.hotlist_bssid_list); + } + + _params->params_gscan.nbssid_hotlist += ptr->nbssid; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + break; + } + case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID: + { + gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf; + dhd_pno_significant_bssid_t *_pno_significant_change_bssid; + wl_pfn_significant_bssid_t *significant_bssid_ptr; + + if (flush) { + dhd_pno_reset_cfg_gscan(_params, _pno_state, + GSCAN_FLUSH_SIGNIFICANT_CFG); + } + + if (!ptr->nbssid) { + break; + } + if (!_params->params_gscan.nbssid_significant_change) { + INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list); + } + if ((_params->params_gscan.nbssid_significant_change + + ptr->nbssid) > PFN_SWC_MAX_NUM_APS) { + DHD_ERROR(("Excessive number of SWC APs programmed %d\n", + (_params->params_gscan.nbssid_significant_change + + ptr->nbssid))); + err = BCME_RANGE; + goto exit; + } + + for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list; + i < ptr->nbssid; i++, significant_bssid_ptr++) { + _pno_significant_change_bssid = + kzalloc(sizeof(dhd_pno_significant_bssid_t), + GFP_KERNEL); + + if (!_pno_significant_change_bssid) { + DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes", + sizeof(dhd_pno_significant_bssid_t))); + err = BCME_NOMEM; + goto exit; + } + memcpy(&_pno_significant_change_bssid->BSSID, + &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN); + _pno_significant_change_bssid->rssi_low_threshold = + significant_bssid_ptr->rssi_low_threshold; + _pno_significant_change_bssid->rssi_high_threshold = + significant_bssid_ptr->rssi_high_threshold; + list_add_tail(&_pno_significant_change_bssid->list, + &_params->params_gscan.significant_bssid_list); + } + + _params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold; + _params->params_gscan.swc_rssi_window_size = ptr->rssi_window; + _params->params_gscan.lost_ap_window = ptr->lost_ap_window; + _params->params_gscan.nbssid_significant_change += ptr->nbssid; + break; + } + case DHD_PNO_SCAN_CFG_ID: + { + int i, k, valid = 0; + uint16 band, min; + gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) { + _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets; + + memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket, + _params->params_gscan.nchannel_buckets * + sizeof(struct dhd_pno_gscan_channel_bucket)); + min = ptr->channel_bucket[0].bucket_freq_multiple; + ch_bucket = _params->params_gscan.channel_bucket; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + band = ch_bucket[i].band; + for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) { + ch_bucket[i].chan_list[k] = + wf_mhz2channel(ptr->channel_bucket[i].chan_list[k], + 0); + } + ch_bucket[i].band = 0; + /* HAL and DHD use different bits for 2.4G and + * 5G in bitmap. Hence translating it here... + */ + if (band & GSCAN_BG_BAND_MASK) + ch_bucket[i].band |= WLC_BAND_2G; + + if (band & GSCAN_A_BAND_MASK) + ch_bucket[i].band |= WLC_BAND_5G; + + if (band & GSCAN_DFS_MASK) + ch_bucket[i].band |= GSCAN_DFS_MASK; + if (ptr->scan_fr == + ptr->channel_bucket[i].bucket_freq_multiple) { + valid = 1; + } + if (ptr->channel_bucket[i].bucket_freq_multiple < min) + min = ptr->channel_bucket[i].bucket_freq_multiple; + + DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band, + ch_bucket[i].report_flag)); + } + if (!valid) + ptr->scan_fr = min; + + for (i = 0; i < ptr->nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple/ptr->scan_fr; + } + _params->params_gscan.scan_fr = ptr->scan_fr; + + DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets, + _params->params_gscan.scan_fr)); + } else { + err = BCME_BADARG; + } + break; + } + default: + err = BCME_BADARG; + DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type)); + break; + } +exit: + mutex_unlock(&_pno_state->pno_mutex); + return err; + +} + + +static bool +validate_gscan_params(struct dhd_pno_gscan_params *gscan_params) +{ + unsigned int i, k; + + if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) { + DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n", + __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets)); + return false; + } + + for (i = 0; i < gscan_params->nchannel_buckets; i++) { + if (!gscan_params->channel_bucket[i].band) { + for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) { + if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) { + DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__, + gscan_params->channel_bucket[i].chan_list[k])); + return false; + } + } + } + } + + return true; +} + +static int +dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params) +{ + int err = BCME_OK; + int mode, i = 0, k; + uint16 _chan_list[WL_NUMCHANNELS]; + int tot_nchan = 0; + int num_buckets_to_fw, tot_num_buckets, gscan_param_size; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL; + wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL; + wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + wlc_ssid_ext_t *pssid_list = NULL; + dhd_pno_params_t *params_legacy; + dhd_pno_params_t *_params; + + params_legacy = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(gscan_params, "gscan_params is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!validate_gscan_params(gscan_params)) { + DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__)); + err = BCME_BADARG; + goto exit; + } + /* Create channel list based on channel buckets */ + if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state, + _chan_list, &tot_num_buckets, &num_buckets_to_fw))) { + goto exit; + } + + if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) { + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + } + + _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE; + + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + + if (!pssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + + if ((err = _dhd_pno_add_ssid(dhd, pssid_list, + params_legacy->params_legacy.nssid)) < 0) { + DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err)); + goto exit; + } + } + + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) { + DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err)); + goto exit; + } + + gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) + + (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t); + pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size); + + if (!pfn_gscan_cfg_t) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, gscan_param_size)); + err = BCME_NOMEM; + goto exit; + } + + + if (gscan_params->mscan) { + pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold; + } else { + pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET; + } + if (gscan_params->nbssid_significant_change) { + pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold; + pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size; + pfn_gscan_cfg_t->lost_ap_window = gscan_params->lost_ap_window; + } else { + pfn_gscan_cfg_t->swc_nbssid_threshold = 0; + pfn_gscan_cfg_t->swc_rssi_window_size = 0; + pfn_gscan_cfg_t->lost_ap_window = 0; + } + + pfn_gscan_cfg_t->flags = + (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK); + pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw; + + + for (i = 0, k = 0; i < tot_num_buckets; i++) { + if (ch_bucket[i].bucket_end_index != CHANNEL_BUCKET_EMPTY_INDEX) { + pfn_gscan_cfg_t->channel_bucket[k].bucket_end_index = + ch_bucket[i].bucket_end_index; + pfn_gscan_cfg_t->channel_bucket[k].bucket_freq_multiple = + ch_bucket[i].bucket_freq_multiple; + pfn_gscan_cfg_t->channel_bucket[k].report_flag = + ch_bucket[i].report_flag; + k++; + } + } + + tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1; + DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan, + tot_num_buckets, num_buckets_to_fw)); + + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + + if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) { + DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (gscan_params->nbssid_significant_change) { + dhd_pno_significant_bssid_t *iter, *next; + + + p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) * + gscan_params->nbssid_significant_change, GFP_KERNEL); + if (p_pfn_significant_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate memory %zd\n", + __FUNCTION__, + sizeof(wl_pfn_significant_bssid_t) * + gscan_params->nbssid_significant_change)); + err = BCME_NOMEM; + goto exit; + } + i = 0; + /* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */ + list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) { + p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold; + p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold; + memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN); + i++; + } + DHD_PNO(("nbssid_significant_change %d \n", + gscan_params->nbssid_significant_change)); + err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid, + gscan_params->nbssid_significant_change); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if (gscan_params->nbssid_hotlist) { + struct dhd_pno_bssid *iter, *next; + wl_pfn_bssid_t *ptr; + p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) * + gscan_params->nbssid_hotlist, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_NOMEM; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + ptr = p_pfn_bssid; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist)); + list_for_each_entry_safe(iter, next, + &gscan_params->hotlist_bssid_list, list) { + memcpy(&ptr->macaddr, + &iter->macaddr, ETHER_ADDR_LEN); + ptr->flags = iter->flags; + ptr++; + } + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) { + DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err)); + } + +exit: + /* clear mode in case of error */ + if (err < 0) { + int ret = dhd_pno_clean(dhd); + + if (ret < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, ret)); + } else { + _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE; + } + } + kfree(pssid_list); + kfree(p_pfn_significant_bssid); + kfree(p_pfn_bssid); + if (pfn_gscan_cfg_t) { + MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size); + } + if (ch_bucket) { + MFREE(dhd->osh, ch_bucket, + (tot_num_buckets * sizeof(wl_pfn_gscan_channel_bucket_t))); + } + return err; + +} + + +static void +dhd_pno_merge_gscan_pno_channels(dhd_pno_status_info_t *pno_state, + uint16 *chan_list, + uint8 *ch_scratch_pad, + wl_pfn_gscan_channel_bucket_t *ch_bucket, + uint32 *num_buckets_to_fw, + int num_channels) +{ + uint16 chan_buf[WL_NUMCHANNELS]; + int i, j = 0, ch_bucket_idx = 0; + dhd_pno_params_t *_params = &pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + uint16 *legacy_chan_list = _params1->params_legacy.chan_list; + bool is_legacy_scan_freq_higher; + uint8 report_flag = CH_BUCKET_REPORT_REGULAR; + + if (!_params1->params_legacy.scan_fr) + _params1->params_legacy.scan_fr = PNO_SCAN_MIN_FW_SEC; + + is_legacy_scan_freq_higher = + _params->params_gscan.scan_fr < _params1->params_legacy.scan_fr; + + /* Calculate new Legacy scan multiple of base scan_freq + * The legacy PNO channel bucket is added at the end of the + * channel bucket list. + */ + if (is_legacy_scan_freq_higher) { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = + _params1->params_legacy.scan_fr/_params->params_gscan.scan_fr; + + } else { + uint16 max = 0; + + /* Calculate new multiple of base scan_freq for gscan buckets */ + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = 1; + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr; + ch_bucket[i].bucket_freq_multiple /= _params1->params_legacy.scan_fr; + if (max < ch_bucket[i].bucket_freq_multiple) + max = ch_bucket[i].bucket_freq_multiple; + } + _params->params_gscan.max_ch_bucket_freq = max; + } + + /* Off to remove duplicates!! + * Find channels that are already being serviced by gscan before legacy bucket + * These have to be removed from legacy bucket. + * !!Assuming chan_list channels are validated list of channels!! + * ch_scratch_pad is 1 at gscan bucket locations see dhd_pno_gscan_create_channel_list() + */ + for (i = 0; i < _params1->params_legacy.nchan; i++) + ch_scratch_pad[legacy_chan_list[i]] += 2; + + ch_bucket_idx = 0; + memcpy(chan_buf, chan_list, num_channels * sizeof(uint16)); + + /* Finally create channel list and bucket + * At this point ch_scratch_pad can have 4 values: + * 0 - Channel not present in either Gscan or Legacy PNO bucket + * 1 - Channel present only in Gscan bucket + * 2 - Channel present only in Legacy PNO bucket + * 3 - Channel present in both Gscan and Legacy PNO buckets + * Thus Gscan buckets can have values 1 or 3 and Legacy 2 or 3 + * For channel buckets with scan_freq < legacy accept all + * channels i.e. ch_scratch_pad = 1 and 3 + * else accept only ch_scratch_pad = 1 and mark rejects as + * ch_scratch_pad = 4 so that they go in legacy + */ + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + if (ch_bucket[i].bucket_freq_multiple <= + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple) { + for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++, j++) + chan_list[j] = chan_buf[ch_bucket_idx]; + + ch_bucket[i].bucket_end_index = j - 1; + } else { + num_channels = 0; + for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++) { + if (ch_scratch_pad[chan_buf[ch_bucket_idx]] == 1) { + chan_list[j] = chan_buf[ch_bucket_idx]; + j++; + num_channels++; + } else { + ch_scratch_pad[chan_buf[ch_bucket_idx]] = 4; + /* If Gscan channel is merged off to legacy bucket and + * if the gscan channel bucket has a report flag > 0 + * use the same for legacy + */ + if (report_flag < ch_bucket[i].report_flag) + report_flag = ch_bucket[i].report_flag; + } + } + + if (num_channels) { + ch_bucket[i].bucket_end_index = j - 1; + } else { + ch_bucket[i].bucket_end_index = CHANNEL_BUCKET_EMPTY_INDEX; + *num_buckets_to_fw = *num_buckets_to_fw - 1; + } + } + + } + + num_channels = 0; + ch_bucket[_params->params_gscan.nchannel_buckets].report_flag = report_flag; + /* Now add channels to the legacy scan bucket + * ch_scratch_pad = 0 to 4 at this point, for legacy -> 2,3,4. 2 means exclusively + * Legacy so add to bucket. 4 means it is a reject of gscan bucket and must + * be added to Legacy bucket,reject 3 + */ + for (i = 0; i < _params1->params_legacy.nchan; i++) { + if (ch_scratch_pad[legacy_chan_list[i]] != 3) { + chan_list[j] = legacy_chan_list[i]; + j++; + num_channels++; + } + } + if (num_channels) { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = j - 1; + } + else { + ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = + CHANNEL_BUCKET_EMPTY_INDEX; + *num_buckets_to_fw = *num_buckets_to_fw - 1; + } + + return; +} +static wl_pfn_gscan_channel_bucket_t * +dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, + dhd_pno_status_info_t *_pno_state, + uint16 *chan_list, + uint32 *num_buckets, + uint32 *num_buckets_to_fw) +{ + int i, num_channels, err, nchan = WL_NUMCHANNELS; + uint16 *ptr = chan_list, max; + uint8 *ch_scratch_pad; + wl_pfn_gscan_channel_bucket_t *ch_bucket; + dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + bool is_pno_legacy_running = _pno_state->pno_mode & DHD_PNO_LEGACY_MODE; + dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket; + + if (is_pno_legacy_running) + *num_buckets = _params->params_gscan.nchannel_buckets + 1; + else + *num_buckets = _params->params_gscan.nchannel_buckets; + + + *num_buckets_to_fw = *num_buckets; + + + ch_bucket = (wl_pfn_gscan_channel_bucket_t *) MALLOC(dhd->osh, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + + if (!ch_bucket) { + DHD_ERROR(("%s: failed to malloc memory of size %zd\n", + __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + max = gscan_buckets[0].bucket_freq_multiple; + num_channels = 0; + for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) { + if (!gscan_buckets[i].band) { + num_channels += gscan_buckets[i].num_channels; + memcpy(ptr, gscan_buckets[i].chan_list, + gscan_buckets[i].num_channels * sizeof(uint16)); + ptr = ptr + gscan_buckets[i].num_channels; + } else { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, ptr, + &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK), + !(gscan_buckets[i].band & GSCAN_DFS_MASK)); + + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, gscan_buckets[i].band)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + num_channels += nchan; + ptr = ptr + nchan; + } + + ch_bucket[i].bucket_end_index = num_channels - 1; + ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple; + ch_bucket[i].report_flag = gscan_buckets[i].report_flag; + if (max < gscan_buckets[i].bucket_freq_multiple) + max = gscan_buckets[i].bucket_freq_multiple; + nchan = WL_NUMCHANNELS - num_channels; + DHD_PNO(("end_idx %d freq_mult - %d\n", + ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple)); + } + + ch_scratch_pad = (uint8 *) kzalloc(CHANNEL_5G_MAX, GFP_KERNEL); + if (!ch_scratch_pad) { + DHD_ERROR(("%s: failed to malloc memory of size %d\n", + __FUNCTION__, CHANNEL_5G_MAX)); + MFREE(dhd->osh, ch_bucket, + ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + return NULL; + } + + /* Need to look for duplicates in gscan buckets if the framework programmed + * the gscan buckets badly, for now return error if there are duplicates. + * Plus as an added bonus, we get all channels in Gscan bucket + * set to 1 for dhd_pno_merge_gscan_pno_channels() + */ + for (i = 0; i < num_channels; i++) { + if (!ch_scratch_pad[chan_list[i]]) { + ch_scratch_pad[chan_list[i]] = 1; + } else { + DHD_ERROR(("%s: Duplicate channel - %d programmed in channel bucket\n", + __FUNCTION__, chan_list[i])); + MFREE(dhd->osh, ch_bucket, ((*num_buckets) * + sizeof(wl_pfn_gscan_channel_bucket_t))); + *num_buckets_to_fw = *num_buckets = 0; + kfree(ch_scratch_pad); + return NULL; + } + } + _params->params_gscan.max_ch_bucket_freq = max; + /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket + * Plus need to remove duplicates as the legacy PNO chan_list may have common channels + * If channel is to be scanned more frequently as per gscan requirements + * remove from legacy PNO ch_bucket. Similarly, if legacy wants a channel scanned + * more often, it is removed from the Gscan channel bucket. + * In the end both are satisfied. + */ + if (is_pno_legacy_running) + dhd_pno_merge_gscan_pno_channels(_pno_state, chan_list, + ch_scratch_pad, ch_bucket, num_buckets_to_fw, num_channels); + + kfree(ch_scratch_pad); + return ch_bucket; +} + +static int +dhd_pno_stop_for_gscan(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode; + wlc_ssid_ext_t *pssid_list = NULL; + dhd_pno_status_info_t *_pno_state; + + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit; + } + mutex_lock(&_pno_state->pno_mutex); + mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE; + err = dhd_pno_clean(dhd); + if (err < 0) { + + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + mutex_unlock(&_pno_state->pno_mutex); + return err; + } + _pno_state->pno_mode = mode; + mutex_unlock(&_pno_state->pno_mutex); + + /* Reprogram Legacy PNO if it was running */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *params_legacy; + uint16 chan_list[WL_NUMCHANNELS]; + + params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!pssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + + DHD_PNO(("Restarting Legacy PNO SSID scan...\n")); + memcpy(chan_list, params_legacy->chan_list, + (params_legacy->nchan * sizeof(uint16))); + err = dhd_pno_set_for_ssid(dhd, pssid_list, params_legacy->nssid, + params_legacy->scan_fr, params_legacy->pno_repeat, + params_legacy->pno_freq_expo_max, chan_list, + params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + + } + +exit: + kfree(pssid_list); + return err; +} + +int +dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush)); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + if (run) { + err = dhd_pno_set_for_gscan(dhd, gscan_params); + } else { + if (flush) { + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } + /* Need to stop all gscan */ + err = dhd_pno_stop_for_gscan(dhd); + } + + return err; +} + +int +dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag) +{ + int err = BCME_OK; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_gscan_params *gscan_params; + uint8 old_flag; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + + mutex_lock(&_pno_state->pno_mutex); + + old_flag = gscan_params->send_all_results_flag; + gscan_params->send_all_results_flag = (uint8) real_time_flag; + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + if (old_flag != gscan_params->send_all_results_flag) { + wl_pfn_gscan_cfg_t gscan_cfg; + gscan_cfg.flags = (gscan_params->send_all_results_flag & + GSCAN_SEND_ALL_RESULTS_MASK); + gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK; + + if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg, + sizeof(wl_pfn_gscan_cfg_t))) < 0) { + DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + } else { + DHD_PNO(("No change in flag - %d\n", old_flag)); + } + } else { + DHD_PNO(("Gscan not started\n")); + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + return err; +} + +int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd) +{ + int ret = 0; + dhd_pno_params_t *params; + struct dhd_pno_gscan_params *gscan_params; + dhd_pno_status_info_t *_pno_state; + gscan_results_cache_t *iter, *tmp; + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + gscan_params = ¶ms->params_gscan; + iter = gscan_params->gscan_batch_cache; + + while (iter) { + if (iter->tot_consumed == iter->tot_count) { + tmp = iter->next; + kfree(iter); + iter = tmp; + } else + break; +} + gscan_params->gscan_batch_cache = iter; + ret = (iter == NULL); + return ret; +} + +static int +_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 timestamp = 0, ts = 0, i, j, timediff; + dhd_pno_params_t *params; + dhd_pno_status_info_t *_pno_state; + wl_pfn_lnet_info_t *plnetinfo; + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_lscanresults_t *plbestnet = NULL; + gscan_results_cache_t *iter, *tail; + wifi_gscan_result_t *result; + uint8 *nAPs_per_scan = NULL; + uint8 num_scans_in_cur_iter; + uint16 count, scan_id = 0; + + NULL_CHECK(dhd, "dhd is NULL\n", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + + _pno_state = PNO_GET_PNOSTATE(dhd); + params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + gscan_params = ¶ms->params_gscan; + nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan); + + if (!nAPs_per_scan) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__, + gscan_params->mscan)); + err = BCME_NOMEM; + goto exit; + } + + plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + + mutex_lock(&_pno_state->pno_mutex); + iter = gscan_params->gscan_batch_cache; + /* If a cache has not been consumed , just delete it */ + while (iter) { + iter->tot_consumed = iter->tot_count; + iter = iter->next; + } + dhd_gscan_batch_cache_cleanup(dhd); + + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) { + DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__)); + goto exit_mutex_unlock; + } + + timediff = gscan_params->scan_fr * 1000; + timediff = timediff >> 1; + + /* Ok, now lets start getting results from the FW */ + plbestnet->status = PFN_INCOMPLETE; + tail = gscan_params->gscan_batch_cache; + while (plbestnet->status != PFN_COMPLETE) { + memset(plbestnet, 0, PNO_BESTNET_LEN); + err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + if (err < 0) { + DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n", + __FUNCTION__, err)); + goto exit_mutex_unlock; + } + DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, + plbestnet->status, plbestnet->count)); + if (plbestnet->version != PFN_SCANRESULT_VERSION) { + err = BCME_VERSION; + DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", + plbestnet->version, PFN_SCANRESULT_VERSION)); + goto exit_mutex_unlock; + } + + num_scans_in_cur_iter = 0; + timestamp = plbestnet->netinfo[0].timestamp; + /* find out how many scans' results did we get in this batch of FW results */ + for (i = 0, count = 0; i < plbestnet->count; i++, count++) { + plnetinfo = &plbestnet->netinfo[i]; + /* Unlikely to happen, but just in case the results from + * FW doesnt make sense..... Assume its part of one single scan + */ + if (num_scans_in_cur_iter > gscan_params->mscan) { + num_scans_in_cur_iter = 0; + count = plbestnet->count; + break; + } + if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) { + nAPs_per_scan[num_scans_in_cur_iter] = count; + count = 0; + num_scans_in_cur_iter++; + } + timestamp = plnetinfo->timestamp; + } + nAPs_per_scan[num_scans_in_cur_iter] = count; + num_scans_in_cur_iter++; + + DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter)); + plnetinfo = &plbestnet->netinfo[0]; + + for (i = 0; i < num_scans_in_cur_iter; i++) { + iter = (gscan_results_cache_t *) + kzalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) + + sizeof(gscan_results_cache_t), GFP_KERNEL); + if (!iter) { + DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", + __FUNCTION__, gscan_params->mscan)); + err = BCME_NOMEM; + goto exit_mutex_unlock; + } + /* Need this check because the new set of results from FW + * maybe a continuation of previous sets' scan results + */ + if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) { + iter->scan_id = ++scan_id; + } else { + iter->scan_id = scan_id; + } + DHD_PNO(("scan_id %d tot_count %d\n", scan_id, nAPs_per_scan[i])); + iter->tot_count = nAPs_per_scan[i]; + iter->tot_consumed = 0; + + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + DHD_PNO(("This scan is aborted\n")); + iter->flag = (ENABLE << PNO_STATUS_ABORT); + } else if (gscan_params->reason) { + iter->flag = (ENABLE << gscan_params->reason); + } + + if (!tail) { + gscan_params->gscan_batch_cache = iter; + } else { + tail->next = iter; + } + tail = iter; + iter->next = NULL; + for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) { + result = &iter->results[j]; + + result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) plnetinfo->RSSI; + /* Info not available & not expected */ + result->beacon_period = 0; + result->capability = 0; + result->ie_length = 0; + result->rtt = (uint64) plnetinfo->rtt0; + result->rtt_sd = (uint64) plnetinfo->rtt1; + result->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + ts = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(result->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID, + ETHER_ADDR_LEN); + + DHD_PNO(("\tSSID : ")); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + result->macaddr.octet[0], + result->macaddr.octet[1], + result->macaddr.octet[2], + result->macaddr.octet[3], + result->macaddr.octet[4], + result->macaddr.octet[5])); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", + plnetinfo->rtt0, plnetinfo->rtt1)); + + } + } + } +exit_mutex_unlock: + mutex_unlock(&_pno_state->pno_mutex); +exit: + params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE; + smp_wmb(); + wake_up_interruptible(&_pno_state->batch_get_wait); + if (nAPs_per_scan) { + MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan); + } + if (plbestnet) { + MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); + } + DHD_PNO(("Batch retrieval done!\n")); + return err; +} +#endif /* GSCAN_SUPPORT */ + +static int +_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + int i, j; + uint32 timestamp = 0; + dhd_pno_params_t *_params = NULL; + dhd_pno_status_info_t *_pno_state = NULL; + wl_pfn_lscanresults_t *plbestnet = NULL; + wl_pfn_lnet_info_t *plnetinfo; + dhd_pno_bestnet_entry_t *pbestnet_entry; + dhd_pno_best_header_t *pbestnetheader = NULL; + dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext; + bool allocate_header = FALSE; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit_no_unlock; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit_no_unlock; + } +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_GSCAN_MODE))) { +#else + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { +#endif /* GSCAN_SUPPORT */ + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + goto exit_no_unlock; + } + mutex_lock(&_pno_state->pno_mutex); + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + if (buf && bufsize) { + if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) { + /* need to check whether we have cashed data or not */ + DHD_PNO(("%s: have cashed batching data in Driver\n", + __FUNCTION__)); + /* convert to results format */ + goto convert_format; + } else { + /* this is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + goto convert_format; + } + } + } + /* create dhd_pno_scan_results_t whenever we got event WLC_E_PFN_BEST_BATCHING */ + pscan_results = (dhd_pno_scan_results_t *)MALLOC(dhd->osh, SCAN_RESULTS_SIZE); + if (pscan_results == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_scan_results_t\n")); + goto exit; + } + pscan_results->bestnetheader = NULL; + pscan_results->cnt_header = 0; + /* add the element into list unless total node cnt is less than MAX_NODE_ CNT */ + if (_params->params_batch.get_batch.top_node_cnt < MAX_NODE_CNT) { + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + _params->params_batch.get_batch.top_node_cnt++; + } else { + int _removed_scan_cnt; + /* remove oldest one and add new one */ + DHD_PNO(("%s : Remove oldest node and add new one\n", __FUNCTION__)); + _removed_scan_cnt = _dhd_pno_clear_all_batch_results(dhd, + &_params->params_batch.get_batch.scan_results_list, TRUE); + _params->params_batch.get_batch.tot_scan_cnt -= _removed_scan_cnt; + list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list); + + } + plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN); + NULL_CHECK(plbestnet, "failed to allocate buffer for bestnet", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + memset(plbestnet, 0, PNO_BESTNET_LEN); + while (plbestnet->status != PFN_COMPLETE) { + memset(plbestnet, 0, PNO_BESTNET_LEN); + err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0); + if (err < 0) { + if (err == BCME_EPERM) { + DHD_ERROR(("we cannot get the batching data " + "during scanning in firmware, try again\n,")); + msleep(500); + continue; + } else { + DHD_ERROR(("%s : failed to execute pfnlbest (err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + } + DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version, + plbestnet->status, plbestnet->count)); + if (plbestnet->version != PFN_SCANRESULT_VERSION) { + err = BCME_VERSION; + DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n", + plbestnet->version, PFN_SCANRESULT_VERSION)); + goto exit; + } + plnetinfo = plbestnet->netinfo; + for (i = 0; i < plbestnet->count; i++) { + pbestnet_entry = (dhd_pno_bestnet_entry_t *) + MALLOC(dhd->osh, BESTNET_ENTRY_SIZE); + if (pbestnet_entry == NULL) { + err = BCME_NOMEM; + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE); + pbestnet_entry->recorded_time = jiffies; /* record the current time */ + /* create header for the first entry */ + allocate_header = (i == 0)? TRUE : FALSE; + /* check whether the new generation is started or not */ + if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp) + > TIME_MIN_DIFF)) + allocate_header = TRUE; + timestamp = plnetinfo->timestamp; + if (allocate_header) { + pbestnetheader = (dhd_pno_best_header_t *) + MALLOC(dhd->osh, BEST_HEADER_SIZE); + if (pbestnetheader == NULL) { + err = BCME_NOMEM; + if (pbestnet_entry) + MFREE(dhd->osh, pbestnet_entry, + BESTNET_ENTRY_SIZE); + DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n")); + goto exit; + } + /* increase total cnt of bestnet header */ + pscan_results->cnt_header++; + /* need to record the reason to call dhd_pno_get_for_bach */ + if (reason) + pbestnetheader->reason = (ENABLE << reason); + memset(pbestnetheader, 0, BEST_HEADER_SIZE); + /* initialize the head of linked list */ + INIT_LIST_HEAD(&(pbestnetheader->entry_list)); + /* link the pbestnet heaer into existed list */ + if (pscan_results->bestnetheader == NULL) + /* In case of header */ + pscan_results->bestnetheader = pbestnetheader; + else { + dhd_pno_best_header_t *head = pscan_results->bestnetheader; + pscan_results->bestnetheader = pbestnetheader; + pbestnetheader->next = head; + } + } + /* fills the best network info */ + pbestnet_entry->channel = plnetinfo->pfnsubnet.channel; + pbestnet_entry->RSSI = plnetinfo->RSSI; + if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) { + /* if RSSI is positive value, we assume that + * this scan is aborted by other scan + */ + DHD_PNO(("This scan is aborted\n")); + pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT); + } + pbestnet_entry->rtt0 = plnetinfo->rtt0; + pbestnet_entry->rtt1 = plnetinfo->rtt1; + pbestnet_entry->timestamp = plnetinfo->timestamp; + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n", + __FUNCTION__, plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len; + memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID, + pbestnet_entry->SSID_len); + memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + /* add the element into list */ + list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list); + /* increase best entry count */ + pbestnetheader->tot_cnt++; + pbestnetheader->tot_size += BESTNET_ENTRY_SIZE; + DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1)); + DHD_PNO(("\tSSID : ")); + for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++) + DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j])); + DHD_PNO(("\n")); + DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", + plnetinfo->pfnsubnet.BSSID.octet[0], + plnetinfo->pfnsubnet.BSSID.octet[1], + plnetinfo->pfnsubnet.BSSID.octet[2], + plnetinfo->pfnsubnet.BSSID.octet[3], + plnetinfo->pfnsubnet.BSSID.octet[4], + plnetinfo->pfnsubnet.BSSID.octet[5])); + DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n", + plnetinfo->pfnsubnet.channel, + plnetinfo->RSSI, plnetinfo->timestamp)); + DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1)); + plnetinfo++; + } + } + if (pscan_results->cnt_header == 0) { + /* In case that we didn't get any data from the firmware + * Remove the current scan_result list from get_bach.scan_results_list. + */ + DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n")); + list_del(&pscan_results->list); + MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE); + _params->params_batch.get_batch.top_node_cnt--; + } + /* increase total scan count using current scan count */ + _params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header; + + if (buf && bufsize) { + /* This is a first try to get batching results */ + if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) { + /* move the scan_results_list to expired_scan_results_lists */ + list_for_each_entry_safe(siter, snext, + &_params->params_batch.get_batch.scan_results_list, list) { + list_move_tail(&siter->list, + &_params->params_batch.get_batch.expired_scan_results_list); + } + /* reset gloval values after moving to expired list */ + _params->params_batch.get_batch.top_node_cnt = 0; + _params->params_batch.get_batch.expired_tot_scan_cnt = + _params->params_batch.get_batch.tot_scan_cnt; + _params->params_batch.get_batch.tot_scan_cnt = 0; + } +convert_format: + err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize); + if (err < 0) { + DHD_ERROR(("failed to convert the data into upper layer format\n")); + goto exit; + } + } +exit: + if (plbestnet) + MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN); + if (_params) { + _params->params_batch.get_batch.buf = NULL; + _params->params_batch.get_batch.bufsize = 0; + _params->params_batch.get_batch.bytes_written = err; + } + mutex_unlock(&_pno_state->pno_mutex); +exit_no_unlock: + if (waitqueue_active(&_pno_state->get_batch_done.wait)) + complete(&_pno_state->get_batch_done); + return err; +} +static void +_dhd_pno_get_batch_handler(struct work_struct *work) +{ + dhd_pno_status_info_t *_pno_state; + dhd_pub_t *dhd; + struct dhd_pno_batch_params *params_batch; + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = container_of(work, struct dhd_pno_status_info, work); + dhd = _pno_state->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _dhd_pno_get_gscan_batch_from_fw(dhd); + return; + } else +#endif /* GSCAN_SUPPORT */ + { + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + + _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf, + params_batch->get_batch.bufsize, params_batch->get_batch.reason); + } + +} + +int +dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason) +{ + int err = BCME_OK; + char *pbuf = buf; + dhd_pno_status_info_t *_pno_state; + struct dhd_pno_batch_params *params_batch; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + struct dhd_pno_gscan_params *gscan_params; + gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan; + gscan_params->reason = reason; + err = dhd_retreive_batch_scan_results(dhd); + if (err == BCME_OK) { + wait_event_interruptible_timeout(_pno_state->batch_get_wait, + is_batch_retrieval_complete(gscan_params), + msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT)); + } + } else +#endif + { + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__)); + memset(pbuf, 0, bufsize); + pbuf += sprintf(pbuf, "scancount=%d\n", 0); + sprintf(pbuf, "%s", RESULTS_END_MARKER); + err = strlen(buf); + goto exit; + } + params_batch->get_batch.buf = buf; + params_batch->get_batch.bufsize = bufsize; + params_batch->get_batch.reason = reason; + params_batch->get_batch.bytes_written = 0; + schedule_work(&_pno_state->work); + wait_for_completion(&_pno_state->get_batch_done); + } + +#ifdef GSCAN_SUPPORT + if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) +#endif + err = params_batch->get_batch.bytes_written; +exit: + return err; +} + +int +dhd_pno_stop_for_batch(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mode = 0; + int i = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wl_pfn_bssid_t *p_pfn_bssid = NULL; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + DHD_PNO(("Gscan is ongoing, nothing to stop here\n")); + return err; + } +#endif + + if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) { + DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) { + mode = _pno_state->pno_mode; + dhd_pno_clean(dhd); + _pno_state->pno_mode = mode; + /* restart Legacy PNO if the Legacy PNO is on */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, + _params_legacy->scan_fr, _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, + _params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) { + struct dhd_pno_bssid *iter, *next; + _params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]); + p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) * + _params->params_hotlist.nbssid, GFP_KERNEL); + if (p_pfn_bssid == NULL) { + DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array" + " (count: %d)", + __FUNCTION__, _params->params_hotlist.nbssid)); + err = BCME_ERROR; + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + goto exit; + } + i = 0; + /* convert dhd_pno_bssid to wl_pfn_bssid */ + list_for_each_entry_safe(iter, next, + &_params->params_hotlist.bssid_list, list) { + memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN); + p_pfn_bssid[i].flags = iter->flags; + i++; + } + err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + DHD_ERROR(("%s : failed to restart hotlist scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + kfree(p_ssid_list); + kfree(p_pfn_bssid); + return err; +} + +int +dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params) +{ + int err = BCME_OK; + int i; + uint16 _chan_list[WL_NUMCHANNELS]; + int rem_nchan = 0; + int tot_nchan = 0; + int mode = 0; + dhd_pno_params_t *_params; + dhd_pno_params_t *_params2; + struct dhd_pno_bssid *_pno_bssid; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + NULL_CHECK(hotlist_params, "hotlist_params is NULL", err); + NULL_CHECK(p_pfn_bssid, "p_pfn_bssid is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + DHD_PNO(("%s enter\n", __FUNCTION__)); + + if (!dhd_support_sta_mode(dhd)) { + err = BCME_BADOPTION; + goto exit; + } + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + _params = &_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]; + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + _pno_state->pno_mode |= DHD_PNO_HOTLIST_MODE; + err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_HOTLIST_MODE); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_reinitialize_prof\n", + __FUNCTION__)); + goto exit; + } + } + _params->params_batch.nchan = hotlist_params->nchan; + _params->params_batch.scan_fr = hotlist_params->scan_fr; + if (hotlist_params->nchan) + memcpy(_params->params_hotlist.chan_list, hotlist_params->chan_list, + sizeof(_params->params_hotlist.chan_list)); + memset(_chan_list, 0, sizeof(_chan_list)); + + rem_nchan = ARRAYSIZE(hotlist_params->chan_list) - hotlist_params->nchan; + if (hotlist_params->band == WLC_BAND_2G || hotlist_params->band == WLC_BAND_5G) { + /* get a valid channel list based on band B or A */ + err = _dhd_pno_get_channels(dhd, + &_params->params_hotlist.chan_list[hotlist_params->nchan], + &rem_nchan, hotlist_params->band, FALSE); + if (err < 0) { + DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n", + __FUNCTION__, hotlist_params->band)); + goto exit; + } + /* now we need to update nchan because rem_chan has valid channel count */ + _params->params_hotlist.nchan += rem_nchan; + /* need to sort channel list */ + sort(_params->params_hotlist.chan_list, _params->params_hotlist.nchan, + sizeof(_params->params_hotlist.chan_list[0]), _dhd_pno_cmpfunc, NULL); + } +#ifdef PNO_DEBUG +{ + int i; + DHD_PNO(("Channel list : ")); + for (i = 0; i < _params->params_batch.nchan; i++) { + DHD_PNO(("%d ", _params->params_batch.chan_list[i])); + } + DHD_PNO(("\n")); +} +#endif + if (_params->params_hotlist.nchan) { + /* copy the channel list into local array */ + memcpy(_chan_list, _params->params_hotlist.chan_list, + sizeof(_chan_list)); + tot_nchan = _params->params_hotlist.nchan; + } + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + DHD_PNO(("PNO SSID is on progress in firmware\n")); + /* store current pno_mode before disabling pno */ + mode = _pno_state->pno_mode; + err = _dhd_pno_enable(dhd, PNO_OFF); + if (err < 0) { + DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__)); + goto exit; + } + /* restore the previous mode */ + _pno_state->pno_mode = mode; + /* Use the superset for channelist between two mode */ + _params2 = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]); + if (_params2->params_legacy.nchan > 0 && + _params->params_hotlist.nchan > 0) { + err = _dhd_pno_chan_merge(_chan_list, &tot_nchan, + &_params2->params_legacy.chan_list[0], + _params2->params_legacy.nchan, + &_params->params_hotlist.chan_list[0], + _params->params_hotlist.nchan); + if (err < 0) { + DHD_ERROR(("%s : failed to merge channel list" + "between legacy and hotlist\n", + __FUNCTION__)); + goto exit; + } + } + + } + + INIT_LIST_HEAD(&(_params->params_hotlist.bssid_list)); + + err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, hotlist_params->nbssid); + if (err < 0) { + DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n", + __FUNCTION__, err)); + goto exit; + } + if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_HOTLIST_MODE)) < 0) { + DHD_ERROR(("%s : failed to set call pno_set (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + if (tot_nchan > 0) { + if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) { + DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n", + __FUNCTION__, err)); + goto exit; + } + } + for (i = 0; i < hotlist_params->nbssid; i++) { + _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL); + NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err); + memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN); + _pno_bssid->flags = p_pfn_bssid[i].flags; + list_add_tail(&_pno_bssid->list, &_params->params_hotlist.bssid_list); + } + _params->params_hotlist.nbssid = hotlist_params->nbssid; + if (_pno_state->pno_status == DHD_PNO_DISABLED) { + if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) + DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__)); + } +exit: + /* clear mode in case of error */ + if (err < 0) + _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE; + return err; +} + +int +dhd_pno_stop_for_hotlist(dhd_pub_t *dhd) +{ + int err = BCME_OK; + uint32 mode = 0; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + wlc_ssid_ext_t *p_ssid_list = NULL; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", + __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + + if (!(_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE)) { + DHD_ERROR(("%s : Hotlist MODE is not enabled\n", + __FUNCTION__)); + goto exit; + } + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + + if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_BATCH_MODE)) { + /* retrieve the batching data from firmware into host */ + dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE); + /* save current pno_mode before calling dhd_pno_clean */ + mode = _pno_state->pno_mode; + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + /* restore previos pno mode */ + _pno_state->pno_mode = mode; + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + /* restart Legacy PNO Scan */ + struct dhd_pno_legacy_params *_params_legacy; + _params_legacy = + &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy); + p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state); + if (!p_ssid_list) { + err = BCME_NOMEM; + DHD_ERROR(("failed to get Legacy PNO SSID list\n")); + goto exit; + } + err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid, + _params_legacy->scan_fr, _params_legacy->pno_repeat, + _params_legacy->pno_freq_expo_max, _params_legacy->chan_list, + _params_legacy->nchan); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE; + DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } else if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + /* restart Batching Scan */ + _params = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]); + /* restart BATCH SCAN */ + err = dhd_pno_set_for_batch(dhd, &_params->params_batch); + if (err < 0) { + _pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE; + DHD_ERROR(("%s : failed to restart batch scan(err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } + } else { + err = dhd_pno_clean(dhd); + if (err < 0) { + DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n", + __FUNCTION__, err)); + goto exit; + } + } +exit: + kfree(p_ssid_list); + return err; +} + +#ifdef GSCAN_SUPPORT +int +dhd_retreive_batch_scan_results(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + struct dhd_pno_batch_params *params_batch; + _pno_state = PNO_GET_PNOSTATE(dhd); + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) { + DHD_PNO(("Retreive batch results\n")); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS; + schedule_work(&_pno_state->work); + } else { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval" + "already in progress, will skip\n", __FUNCTION__)); + err = BCME_ERROR; + } + + return err; +} + +/* Handle Significant WiFi Change (SWC) event from FW + * Send event to HAL when all results arrive from FW + */ +void * +dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + struct dhd_pno_swc_evt_param *params; + wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data; + wl_pfn_significant_net_t *change_array; + int i; + + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + params = &(gscan_params->param_significant); + + if (!results->total_count) { + *send_evt_bytes = 0; + return ptr; + } + + if (!params->results_rxed_so_far) { + if (!params->change_array) { + params->change_array = (wl_pfn_significant_net_t *) + kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count, + GFP_KERNEL); + + if (!params->change_array) { + DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__, + sizeof(wl_pfn_significant_net_t) * results->total_count)); + *send_evt_bytes = 0; + return ptr; + } + } else { + DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!")); + *send_evt_bytes = 0; + return ptr; + } + + } + + DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__, + results->pkt_count, results->total_count)); + + for (i = 0; i < results->pkt_count; i++) { + DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n", + results->list[i].BSSID.octet[0], + results->list[i].BSSID.octet[1], + results->list[i].BSSID.octet[2], + results->list[i].BSSID.octet[3], + results->list[i].BSSID.octet[4], + results->list[i].BSSID.octet[5])); + } + + change_array = ¶ms->change_array[params->results_rxed_so_far]; + memcpy(change_array, results->list, sizeof(wl_pfn_significant_net_t) * results->pkt_count); + params->results_rxed_so_far += results->pkt_count; + + if (params->results_rxed_so_far == results->total_count) { + params->results_rxed_so_far = 0; + *send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count; + /* Pack up change buffer to send up and reset + * results_rxed_so_far, after its done. + */ + ptr = (void *) params->change_array; + /* expecting the callee to free this mem chunk */ + params->change_array = NULL; + } + else { + *send_evt_bytes = 0; + } + + return ptr; +} + +void +dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type) +{ + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + gscan_results_cache_t *iter, *tmp; + + if (!_pno_state) { + return; + } + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (type == HOTLIST_FOUND) { + iter = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = NULL; + } else { + iter = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = NULL; + } + + while (iter) { + tmp = iter->next; + kfree(iter); + iter = tmp; + } + + return; +} + +void * +dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size) +{ + wl_bss_info_t *bi = NULL; + wl_gscan_result_t *gscan_result; + wifi_gscan_result_t *result = NULL; + u32 bi_length = 0; + uint8 channel; + uint32 mem_needed; + + struct timespec ts; + + *size = 0; + + gscan_result = (wl_gscan_result_t *)data; + + if (!gscan_result) { + DHD_ERROR(("Invalid gscan result (NULL pointer)\n")); + goto exit; + } + if (!gscan_result->bss_info) { + DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n")); + goto exit; + } + bi = &gscan_result->bss_info[0].info; + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(gscan_result->buflen) - + WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) { + DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + if (bi->SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len)); + bi->SSID_len = DOT11_MAX_SSID_LEN; + } + + mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length; + result = kmalloc(mem_needed, GFP_KERNEL); + + if (!result) { + DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n", + __FUNCTION__, mem_needed)); + goto exit; + } + + memcpy(result->ssid, bi->SSID, bi->SSID_len); + result->ssid[bi->SSID_len] = '\0'; + channel = wf_chspec_ctlchan(bi->chanspec); + result->channel = wf_channel2mhz(channel, + (channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + result->rssi = (int32) bi->RSSI; + result->rtt = 0; + result->rtt_sd = 0; + get_monotonic_boottime(&ts); + result->ts = (uint64) TIMESPEC_TO_US(ts); + result->beacon_period = dtoh16(bi->beacon_period); + result->capability = dtoh16(bi->capability); + result->ie_length = dtoh32(bi->ie_length); + memcpy(&result->macaddr, &bi->BSSID, ETHER_ADDR_LEN); + memcpy(result->ie_data, ((uint8 *)bi + bi->ie_offset), bi->ie_length); + *size = mem_needed; +exit: + return result; +} + +void * +dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type) +{ + void *ptr = NULL; + dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd); + struct dhd_pno_gscan_params *gscan_params; + wl_pfn_scanresults_t *results = (wl_pfn_scanresults_t *)event_data; + wifi_gscan_result_t *hotlist_found_array; + wl_pfn_net_info_t *plnetinfo; + gscan_results_cache_t *gscan_hotlist_cache; + int malloc_size = 0, i, total = 0; + + gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan); + + if (!results->count) { + *send_evt_bytes = 0; + return ptr; + } + + malloc_size = sizeof(gscan_results_cache_t) + + ((results->count - 1) * sizeof(wifi_gscan_result_t)); + gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL); + + if (!gscan_hotlist_cache) { + DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size)); + *send_evt_bytes = 0; + return ptr; + } + + if (type == HOTLIST_FOUND) { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found; + gscan_params->gscan_hotlist_found = gscan_hotlist_cache; + DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count)); + } else { + gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost; + gscan_params->gscan_hotlist_lost = gscan_hotlist_cache; + DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count)); + } + + gscan_hotlist_cache->tot_count = results->count; + gscan_hotlist_cache->tot_consumed = 0; + plnetinfo = results->netinfo; + + for (i = 0; i < results->count; i++, plnetinfo++) { + hotlist_found_array = &gscan_hotlist_cache->results[i]; + hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel, + (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G)); + hotlist_found_array->rssi = (int32) plnetinfo->RSSI; + /* Info not available & not expected */ + hotlist_found_array->beacon_period = 0; + hotlist_found_array->capability = 0; + hotlist_found_array->ie_length = 0; + + hotlist_found_array->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp); + if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) { + DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", + plnetinfo->pfnsubnet.SSID_len)); + plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN; + } + memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.SSID, + plnetinfo->pfnsubnet.SSID_len); + hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0'; + + memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN); + DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid, + hotlist_found_array->macaddr.octet[0], + hotlist_found_array->macaddr.octet[1], + hotlist_found_array->macaddr.octet[2], + hotlist_found_array->macaddr.octet[3], + hotlist_found_array->macaddr.octet[4], + hotlist_found_array->macaddr.octet[5], + hotlist_found_array->rssi)); + } + + + if (results->status == PFN_COMPLETE) { + ptr = (void *) gscan_hotlist_cache; + while (gscan_hotlist_cache) { + total += gscan_hotlist_cache->tot_count; + gscan_hotlist_cache = gscan_hotlist_cache->next; + } + *send_evt_bytes = total * sizeof(wifi_gscan_result_t); + } + + return ptr; +} +#endif /* GSCAN_SUPPORT */ +int +dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + uint status, event_type, flags, datalen; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(dhd->pno_state, "pno_state is NULL", err); + _pno_state = PNO_GET_PNOSTATE(dhd); + if (!WLS_SUPPORTED(_pno_state)) { + DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__)); + err = BCME_UNSUPPORTED; + goto exit; + } + event_type = ntoh32(event->event_type); + flags = ntoh16(event->flags); + status = ntoh32(event->status); + datalen = ntoh32(event->datalen); + DHD_PNO(("%s enter : event_type :%d\n", __FUNCTION__, event_type)); + switch (event_type) { + case WLC_E_PFN_BSSID_NET_FOUND: + case WLC_E_PFN_BSSID_NET_LOST: + /* TODO : need to implement event logic using generic netlink */ + break; + case WLC_E_PFN_BEST_BATCHING: +#ifndef GSCAN_SUPPORT + { + struct dhd_pno_batch_params *params_batch; + params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch; + if (!waitqueue_active(&_pno_state->get_batch_done.wait)) { + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__)); + params_batch->get_batch.buf = NULL; + params_batch->get_batch.bufsize = 0; + params_batch->get_batch.reason = PNO_STATUS_EVENT; + schedule_work(&_pno_state->work); + } else + DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING" + "will skip this event\n", __FUNCTION__)); + break; + } +#else + break; +#endif /* !GSCAN_SUPPORT */ + default: + DHD_ERROR(("unknown event : %d\n", event_type)); + } +exit: + return err; +} + +int dhd_pno_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + NULL_CHECK(dhd, "dhd is NULL", err); + DHD_PNO(("%s enter\n", __FUNCTION__)); + UNUSED_PARAMETER(_dhd_pno_suspend); + if (dhd->pno_state) + goto exit; + dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t)); + NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err); + memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t)); + /* need to check whether current firmware support batching and hotlist scan */ + _pno_state = PNO_GET_PNOSTATE(dhd); + _pno_state->wls_supported = TRUE; + _pno_state->dhd = dhd; + mutex_init(&_pno_state->pno_mutex); + INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler); + init_completion(&_pno_state->get_batch_done); +#ifdef GSCAN_SUPPORT + init_waitqueue_head(&_pno_state->batch_get_wait); +#endif /* GSCAN_SUPPORT */ + err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0); + if (err == BCME_UNSUPPORTED) { + _pno_state->wls_supported = FALSE; + DHD_INFO(("Current firmware doesn't support" + " Android Location Service\n")); + } else { + DHD_ERROR(("%s: Support Android Location Service\n", + __FUNCTION__)); + } +exit: + return err; +} + +int dhd_pno_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + dhd_pno_status_info_t *_pno_state; + dhd_pno_params_t *_params; + NULL_CHECK(dhd, "dhd is NULL", err); + + DHD_PNO(("%s enter\n", __FUNCTION__)); + _pno_state = PNO_GET_PNOSTATE(dhd); + NULL_CHECK(_pno_state, "pno_state is NULL", err); + /* may need to free legacy ssid_list */ + if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]; + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE); + } + +#ifdef GSCAN_SUPPORT + if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS]; + mutex_lock(&_pno_state->pno_mutex); + dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG); + mutex_unlock(&_pno_state->pno_mutex); + } +#endif /* GSCAN_SUPPORT */ + + if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) { + _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]; + /* clear resource if the BATCH MODE is on */ + _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE); + } + cancel_work_sync(&_pno_state->work); + MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t)); + dhd->pno_state = NULL; + return err; +} +#endif /* PNO_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_pno.h b/drivers/net/wireless/bcmdhd/dhd_pno.h new file mode 100644 index 000000000000..990ec6ce4ad1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_pno.h @@ -0,0 +1,498 @@ +/* + * Header file of Broadcom Dongle Host Driver (DHD) + * Prefered Network Offload code and Wi-Fi Location Service(WLS) code. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_pno.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef __DHD_PNO_H__ +#define __DHD_PNO_H__ + +#if defined(PNO_SUPPORT) +#define PNO_TLV_PREFIX 'S' +#define PNO_TLV_VERSION '1' +#define PNO_TLV_SUBTYPE_LEGACY_PNO '2' +#define PNO_TLV_RESERVED '0' +#define PNO_BATCHING_SET "SET" +#define PNO_BATCHING_GET "GET" +#define PNO_BATCHING_STOP "STOP" +#define PNO_PARAMS_DELIMETER " " +#define PNO_PARAM_CHANNEL_DELIMETER "," +#define PNO_PARAM_VALUE_DELLIMETER '=' +#define PNO_PARAM_SCANFREQ "SCANFREQ" +#define PNO_PARAM_BESTN "BESTN" +#define PNO_PARAM_MSCAN "MSCAN" +#define PNO_PARAM_CHANNEL "CHANNEL" +#define PNO_PARAM_RTT "RTT" + +#define PNO_TLV_TYPE_SSID_IE 'S' +#define PNO_TLV_TYPE_TIME 'T' +#define PNO_TLV_FREQ_REPEAT 'R' +#define PNO_TLV_FREQ_EXPO_MAX 'M' + +#define MAXNUM_SSID_PER_ADD 16 +#define MAXNUM_PNO_PARAMS 2 +#define PNO_TLV_COMMON_LENGTH 1 +#define DEFAULT_BATCH_MSCAN 16 + +#define RESULTS_END_MARKER "----\n" +#define SCAN_END_MARKER "####\n" +#define AP_END_MARKER "====\n" +#define PNO_RSSI_MARGIN_DBM 30 + +#ifdef GSCAN_SUPPORT + +#define GSCAN_MAX_CH_BUCKETS 8 +#define GSCAN_BG_BAND_MASK (1 << 0) +#define GSCAN_A_BAND_MASK (1 << 1) +#define GSCAN_DFS_MASK (1 << 2) +#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK) +#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK) + +#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0) +#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1) +#define GSCAN_FLUSH_SCAN_CFG (1 << 2) +#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \ + GSCAN_FLUSH_SIGNIFICANT_CFG | \ + GSCAN_FLUSH_HOTLIST_CFG) +/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */ +#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0 +#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1 +#define GSCAN_BATCH_NO_THR_SET 101 +#define GSCAN_LOST_AP_WINDOW_DEFAULT 4 +#define GSCAN_MIN_BSSID_TIMEOUT 90 +#define GSCAN_BATCH_GET_MAX_WAIT 500 +#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF +#define GSCAN_RETRY_THRESHOLD 3 +#endif /* GSCAN_SUPPORT */ + +enum scan_status { + /* SCAN ABORT by other scan */ + PNO_STATUS_ABORT, + /* RTT is presence or not */ + PNO_STATUS_RTT_PRESENCE, + /* Disable PNO by Driver */ + PNO_STATUS_DISABLE, + /* NORMAL BATCHING GET */ + PNO_STATUS_NORMAL, + /* WLC_E_PFN_BEST_BATCHING */ + PNO_STATUS_EVENT, + PNO_STATUS_MAX +}; +#define PNO_STATUS_ABORT_MASK 0x0001 +#define PNO_STATUS_RTT_MASK 0x0002 +#define PNO_STATUS_DISABLE_MASK 0x0004 +#define PNO_STATUS_OOM_MASK 0x0010 + +enum index_mode { + INDEX_OF_LEGACY_PARAMS, + INDEX_OF_BATCH_PARAMS, + INDEX_OF_HOTLIST_PARAMS, + /* GSCAN includes hotlist scan and they do not run + * independent of each other + */ +#ifdef GSCAN_SUPPORT + INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS, +#endif /* GSCAN_SUPPORT */ + INDEX_MODE_MAX +}; +enum dhd_pno_status { + DHD_PNO_DISABLED, + DHD_PNO_ENABLED, + DHD_PNO_SUSPEND +}; +typedef struct cmd_tlv { + char prefix; + char version; + char subtype; + char reserved; +} cmd_tlv_t; +#ifdef GSCAN_SUPPORT +typedef enum { + WIFI_BAND_UNSPECIFIED, + WIFI_BAND_BG = 1, /* 2.4 GHz */ + WIFI_BAND_A = 2, /* 5 GHz without DFS */ + WIFI_BAND_A_DFS = 4, /* 5 GHz DFS only */ + WIFI_BAND_A_WITH_DFS = 6, /* 5 GHz with DFS */ + WIFI_BAND_ABG = 3, /* 2.4 GHz + 5 GHz; no DFS */ + WIFI_BAND_ABG_WITH_DFS = 7, /* 2.4 GHz + 5 GHz with DFS */ +} gscan_wifi_band_t; + +typedef enum { + HOTLIST_LOST, + HOTLIST_FOUND +} hotlist_type_t; + +typedef enum dhd_pno_gscan_cmd_cfg { + DHD_PNO_BATCH_SCAN_CFG_ID, + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + DHD_PNO_SCAN_CFG_ID, + DHD_PNO_GET_CAPABILITIES, + DHD_PNO_GET_BATCH_RESULTS, + DHD_PNO_GET_CHANNEL_LIST +} dhd_pno_gscan_cmd_cfg_t; + +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)), + /* Wi-Fi Google Android SCAN Mode */ + DHD_PNO_GSCAN_MODE = (1 << (3)) +} dhd_pno_mode_t; +#else +typedef enum dhd_pno_mode { + /* Wi-Fi Legacy PNO Mode */ + DHD_PNO_NONE_MODE = 0, + DHD_PNO_LEGACY_MODE = (1 << (0)), + /* Wi-Fi Android BATCH SCAN Mode */ + DHD_PNO_BATCH_MODE = (1 << (1)), + /* Wi-Fi Android Hotlist SCAN Mode */ + DHD_PNO_HOTLIST_MODE = (1 << (2)) +} dhd_pno_mode_t; +#endif /* GSCAN_SUPPORT */ +struct dhd_pno_ssid { + bool hidden; + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; + struct list_head list; +}; +struct dhd_pno_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; + struct list_head list; +}; +typedef struct dhd_pno_bestnet_entry { + struct ether_addr BSSID; + uint8 SSID_len; + uint8 SSID[DOT11_MAX_SSID_LEN]; + int8 RSSI; + uint8 channel; + uint32 timestamp; + uint16 rtt0; /* distance_cm based on RTT */ + uint16 rtt1; /* distance_cm based on sample standard deviation */ + unsigned long recorded_time; + struct list_head list; +} dhd_pno_bestnet_entry_t; +#define BESTNET_ENTRY_SIZE (sizeof(dhd_pno_bestnet_entry_t)) + +typedef struct dhd_pno_bestnet_header { + struct dhd_pno_bestnet_header *next; + uint8 reason; + uint32 tot_cnt; + uint32 tot_size; + struct list_head entry_list; +} dhd_pno_best_header_t; +#define BEST_HEADER_SIZE (sizeof(dhd_pno_best_header_t)) + +typedef struct dhd_pno_scan_results { + dhd_pno_best_header_t *bestnetheader; + uint8 cnt_header; + struct list_head list; +} dhd_pno_scan_results_t; +#define SCAN_RESULTS_SIZE (sizeof(dhd_pno_scan_results_t)) + +struct dhd_pno_get_batch_info { + /* info related to get batch */ + char *buf; + bool batch_started; + uint32 tot_scan_cnt; + uint32 expired_tot_scan_cnt; + uint32 top_node_cnt; + uint32 bufsize; + uint32 bytes_written; + int reason; + struct list_head scan_results_list; + struct list_head expired_scan_results_list; +}; +struct dhd_pno_legacy_params { + uint16 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + int pno_repeat; + int pno_freq_expo_max; + int nssid; + struct list_head ssid_list; +}; +struct dhd_pno_batch_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 band; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 rtt; + struct dhd_pno_get_batch_info get_batch; +}; +struct dhd_pno_hotlist_params { + uint8 band; + int32 scan_fr; + uint16 chan_list[WL_NUMCHANNELS]; + uint16 nchan; + uint16 nbssid; + struct list_head bssid_list; +}; +#ifdef GSCAN_SUPPORT +typedef struct dhd_pno_gscan_channel_bucket { + uint16 bucket_freq_multiple; + /* band = 1 All bg band channels, + * band = 2 All a band channels, + * band = 0 chan_list channels + */ + uint16 band; + uint8 report_flag; + uint8 num_channels; + uint16 chan_list[GSCAN_MAX_CH_BUCKETS]; +} dhd_pno_gscan_channel_bucket_t; + +typedef struct dhd_pno_swc_evt_param { + uint16 results_rxed_so_far; + wl_pfn_significant_net_t *change_array; +} dhd_pno_swc_evt_param_t; + +typedef struct wifi_gscan_result { + uint64 ts; /* Time of discovery */ + char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */ + struct ether_addr macaddr; /* BSSID */ + uint32 channel; /* channel frequency in MHz */ + int32 rssi; /* in db */ + uint64 rtt; /* in nanoseconds */ + uint64 rtt_sd; /* standard deviation in rtt */ + uint16 beacon_period; /* units are Kusec */ + uint16 capability; /* Capability information */ + uint32 ie_length; /* byte length of Information Elements */ + char ie_data[1]; /* IE data to follow */ +} wifi_gscan_result_t; + +typedef struct gscan_results_cache { + struct gscan_results_cache *next; + uint8 scan_id; + uint8 flag; + uint8 tot_count; + uint8 tot_consumed; + wifi_gscan_result_t results[1]; +} gscan_results_cache_t; + +typedef struct dhd_pno_gscan_capabilities { + int max_scan_cache_size; + int max_scan_buckets; + int max_ap_cache_per_scan; + int max_rssi_sample_size; + int max_scan_reporting_threshold; + int max_hotlist_aps; + int max_significant_wifi_change_aps; +} dhd_pno_gscan_capabilities_t; + +struct dhd_pno_gscan_params { + int32 scan_fr; + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; + uint8 swc_nbssid_threshold; + uint8 swc_rssi_window_size; + uint8 lost_ap_window; + uint8 nchannel_buckets; + uint8 reason; + uint8 get_batch_flag; + uint8 send_all_results_flag; + uint16 max_ch_bucket_freq; + gscan_results_cache_t *gscan_batch_cache; + gscan_results_cache_t *gscan_hotlist_found; + gscan_results_cache_t *gscan_hotlist_lost; + uint16 nbssid_significant_change; + uint16 nbssid_hotlist; + struct dhd_pno_swc_evt_param param_significant; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; + struct list_head hotlist_bssid_list; + struct list_head significant_bssid_list; +}; + +typedef struct gscan_scan_params { + int32 scan_fr; + uint16 nchannel_buckets; + struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS]; +} gscan_scan_params_t; + +typedef struct gscan_batch_params { + uint8 bestn; + uint8 mscan; + uint8 buffer_threshold; +} gscan_batch_params_t; + +struct bssid_t { + struct ether_addr macaddr; + int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */ +}; + +typedef struct gscan_hotlist_scan_params { + uint16 lost_ap_window; /* number of scans to declare LOST */ + uint16 nbssid; /* number of bssids */ + struct bssid_t bssid[1]; /* n bssids to follow */ +} gscan_hotlist_scan_params_t; + +/* SWC (Significant WiFi Change) params */ +typedef struct gscan_swc_params { + /* Rssi averaging window size */ + uint8 rssi_window; + /* Number of scans that the AP has to be absent before + * being declared LOST + */ + uint8 lost_ap_window; + /* if x Aps have a significant change generate an event. */ + uint8 swc_threshold; + uint8 nbssid; + wl_pfn_significant_bssid_t bssid_elem_list[1]; +} gscan_swc_params_t; + +typedef struct dhd_pno_significant_bssid { + struct ether_addr BSSID; + int8 rssi_low_threshold; + int8 rssi_high_threshold; + struct list_head list; +} dhd_pno_significant_bssid_t; +#endif /* GSCAN_SUPPORT */ +typedef union dhd_pno_params { + struct dhd_pno_legacy_params params_legacy; + struct dhd_pno_batch_params params_batch; + struct dhd_pno_hotlist_params params_hotlist; +#ifdef GSCAN_SUPPORT + struct dhd_pno_gscan_params params_gscan; +#endif /* GSCAN_SUPPORT */ +} dhd_pno_params_t; +typedef struct dhd_pno_status_info { + uint8 pno_oui[DOT11_OUI_LEN]; + dhd_pub_t *dhd; + struct work_struct work; + struct mutex pno_mutex; +#ifdef GSCAN_SUPPORT + wait_queue_head_t batch_get_wait; +#endif /* GSCAN_SUPPORT */ + struct completion get_batch_done; + bool wls_supported; /* wifi location service supported or not */ + enum dhd_pno_status pno_status; + enum dhd_pno_mode pno_mode; + dhd_pno_params_t pno_params_arr[INDEX_MODE_MAX]; + struct list_head head_list; +} dhd_pno_status_info_t; + +/* wrapper functions */ +extern int +dhd_dev_pno_enable(struct net_device *dev, int enable); + +extern int +dhd_dev_pno_stop_for_ssid(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int +dhd_dev_pno_set_for_batch(struct net_device *dev, + struct dhd_pno_batch_params *batch_params); + +extern int +dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize); + +extern int +dhd_dev_pno_stop_for_batch(struct net_device *dev); + +extern int +dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); +extern int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui); +#ifdef GSCAN_SUPPORT +extern int +dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush); +extern void * +dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +void dhd_dev_pno_lock_access_batch_results(struct net_device *dev); +void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev); +extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush); +extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time); +extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data, + int *send_evt_bytes); +int dhd_retreive_batch_scan_results(dhd_pub_t *dhd); +extern void * dhd_dev_hotlist_scan_event(struct net_device *dev, + const void *data, int *send_evt_bytes, hotlist_type_t type); +void * dhd_dev_process_full_gscan_result(struct net_device *dev, + const void *data, int *send_evt_bytes); +extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev); +extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type); +extern void dhd_dev_wait_batch_results_complete(struct net_device *dev); +#endif /* GSCAN_SUPPORT */ +/* dhd pno fuctions */ +extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd); +extern int dhd_pno_enable(dhd_pub_t *dhd, int enable); +extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid, + uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan); + +extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params); + +extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason); + + +extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd); + +extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid, + struct dhd_pno_hotlist_params *hotlist_params); + +extern int dhd_pno_stop_for_hotlist(dhd_pub_t *dhd); + +extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); +extern int dhd_pno_init(dhd_pub_t *dhd); +extern int dhd_pno_deinit(dhd_pub_t *dhd); +extern bool dhd_is_pno_supported(dhd_pub_t *dhd); +extern int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui); +#ifdef GSCAN_SUPPORT +extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, + void *buf, uint8 flush); +extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info, + uint32 *len); +extern void dhd_pno_lock_batch_results(dhd_pub_t *dhd); +extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd); +extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush); +extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag); +extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf); +extern int dhd_dev_retrieve_batch_scan(struct net_device *dev); +extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes); +extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes, hotlist_type_t type); +extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data, + int *send_evt_bytes); +extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd); +extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type); +extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd); +#endif /* GSCAN_SUPPORT */ +#endif + +#endif /* __DHD_PNO_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_proto.h b/drivers/net/wireless/bcmdhd/dhd_proto.h new file mode 100644 index 000000000000..6dcb56328140 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_proto.h @@ -0,0 +1,169 @@ +/* + * Header file describing the internal (inter-module) DHD interfaces. + * + * Provides type definitions and function prototypes used to link the + * DHD OS, bus, and protocol modules. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_proto.h 604483 2015-12-07 14:47:36Z $ + */ + +#ifndef _dhd_proto_h_ +#define _dhd_proto_h_ + +#include +#include +#ifdef BCMPCIE +#include +#endif + +#define DEFAULT_IOCTL_RESP_TIMEOUT 2000 +#ifndef IOCTL_RESP_TIMEOUT +/* In milli second default value for Production FW */ +#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT +#endif /* IOCTL_RESP_TIMEOUT */ + +#ifndef MFG_IOCTL_RESP_TIMEOUT +#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */ +#endif /* MFG_IOCTL_RESP_TIMEOUT */ + +#define DEFAULT_D3_ACK_RESP_TIMEOUT 4000 +#ifndef D3_ACK_RESP_TIMEOUT +#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT +#endif /* D3_ACK_RESP_TIMEOUT */ + +#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000) +#ifndef DHD_BUS_BUSY_TIMEOUT +#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT +#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */ + +#define IOCTL_DISABLE_TIMEOUT 0 +/* + * Exported from the dhd protocol module (dhd_cdc, dhd_rndis) + */ + +/* Linkage, sets prot link and updates hdrlen in pub */ +extern int dhd_prot_attach(dhd_pub_t *dhdp); + +/* Initilizes the index block for dma'ing indices */ +extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz, + uint8 type, uint32 length); + +/* Unlink, frees allocated protocol memory (including dhd_prot) */ +extern void dhd_prot_detach(dhd_pub_t *dhdp); + +/* Initialize protocol: sync w/dongle state. + * Sets dongle media info (iswl, drv_version, mac address). + */ +extern int dhd_sync_with_dongle(dhd_pub_t *dhdp); + +/* Protocol initialization needed for IOCTL/IOVAR path */ +extern int dhd_prot_init(dhd_pub_t *dhd); + +/* Stop protocol: sync w/dongle state. */ +extern void dhd_prot_stop(dhd_pub_t *dhdp); + +/* Add any protocol-specific data header. + * Caller must reserve prot_hdrlen prepend space. + */ +extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp); +extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp); + +/* Remove any protocol-specific data header. */ +extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len); + +/* Use protocol to issue ioctl to dongle */ +extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len); + +/* Handles a protocol control response asynchronously */ +extern int dhd_prot_ctl_complete(dhd_pub_t *dhd); + +/* Check for and handle local prot-specific iovar commands */ +extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Add prot dump output to a buffer */ +extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); + +/* Update local copy of dongle statistics */ +extern void dhd_prot_dstats(dhd_pub_t *dhdp); + +extern int dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen); + +extern int dhd_preinit_ioctls(dhd_pub_t *dhd); + +extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, + uint reorder_info_len, void **pkt, uint32 *free_buf_count); + +#ifdef BCMPCIE +extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound); +extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound); +extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd); +extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd); +extern int dhd_post_dummy_msg(dhd_pub_t *dhd); +extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len); +extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset); +extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx); +extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay); + +extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf, + void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma); +extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, + uint16 flowid, void *msgbuf_ring); +extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex); +extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node); +extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b); +extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val); +extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd); +extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx); +extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx); +extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, + struct bcmstrbuf *strbuf, const char * fmt); +extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf); +extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info); +extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock); +extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val); +extern void dhd_prot_reset(dhd_pub_t *dhd); +#ifdef DHD_LB +extern void dhd_lb_tx_compl_handler(unsigned long data); +extern void dhd_lb_rx_compl_handler(unsigned long data); +extern void dhd_lb_rx_process_handler(unsigned long data); +#endif /* DHD_LB */ +void dhd_prot_collect_memdump(dhd_pub_t *dhd); +#endif /* BCMPCIE */ +/******************************** + * For version-string expansion * + */ +#if defined(BDC) +#define DHD_PROTOCOL "bdc" +#elif defined(CDC) +#define DHD_PROTOCOL "cdc" +#else +#define DHD_PROTOCOL "unknown" +#endif /* proto */ + +#endif /* _dhd_proto_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.c b/drivers/net/wireless/bcmdhd/dhd_rtt.c new file mode 100644 index 000000000000..cc0ebb2ecd2d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_rtt.c @@ -0,0 +1,731 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_rtt.c 606280 2015-12-15 05:28:25Z $ + */ +#ifdef RTT_SUPPORT +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state) +static DEFINE_SPINLOCK(noti_list_lock); +#define NULL_CHECK(p, s, err) \ + do { \ + if (!(p)) { \ + printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \ + err = BCME_ERROR; \ + return err; \ + } \ + } while (0) + +#define RTT_TWO_SIDED(capability) \ + do { \ + if ((capability & RTT_CAP_ONE_WAY) == (uint8) (RTT_CAP_ONE_WAY)) \ + return FALSE; \ + else \ + return TRUE; \ + } while (0) +#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \ + (ts).tv_nsec / NSEC_PER_USEC) +struct rtt_noti_callback { + struct list_head list; + void *ctx; + dhd_rtt_compl_noti_fn noti_fn; +}; + +typedef struct rtt_status_info { + dhd_pub_t *dhd; + int8 status; /* current status for the current entry */ + int8 cur_idx; /* current entry to do RTT */ + int32 capability; /* rtt capability */ + struct mutex rtt_mutex; + rtt_config_params_t rtt_config; + struct work_struct work; + struct list_head noti_fn_list; + struct list_head rtt_results_cache; /* store results for RTT */ +} rtt_status_info_t; + +static int dhd_rtt_start(dhd_pub_t *dhd); + +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel) +{ + int bw; + /* set witdh to 20MHZ for 2.4G HZ */ + if (channel.center_freq >= 2400 && channel.center_freq <= 2500) { + channel.width = WIFI_CHAN_WIDTH_20; + } + switch (channel.width) { + case WIFI_CHAN_WIDTH_20: + bw = WL_CHANSPEC_BW_20; + break; + case WIFI_CHAN_WIDTH_40: + bw = WL_CHANSPEC_BW_40; + break; + case WIFI_CHAN_WIDTH_80: + bw = WL_CHANSPEC_BW_80; + break; + case WIFI_CHAN_WIDTH_160: + bw = WL_CHANSPEC_BW_160; + break; + default: + DHD_ERROR(("doesn't support this bandwith : %d", channel.width)); + bw = -1; + break; + } + return wf_channel2chspec(wf_mhz2channel(channel.center_freq, 0), bw); +} + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params) +{ + int err = BCME_OK; + int idx; + rtt_status_info_t *rtt_status; + NULL_CHECK(params, "params is NULL", err); + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->capability == RTT_CAP_NONE) { + DHD_ERROR(("doesn't support RTT \n")); + return BCME_ERROR; + } + if (rtt_status->status == RTT_STARTED) { + DHD_ERROR(("rtt is already started\n")); + return BCME_BUSY; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + bcopy(params, &rtt_status->rtt_config, sizeof(rtt_config_params_t)); + rtt_status->status = RTT_STARTED; + /* start to measure RTT from 1th device */ + /* find next target to trigger RTT */ + for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx)); + schedule_work(&rtt_status->work); + } + return err; +} + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt) +{ + int err = BCME_OK; + int i = 0, j = 0; + rtt_status_info_t *rtt_status; + + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + if (rtt_status->status == RTT_STOPPED) { + DHD_ERROR(("rtt is not started\n")); + return BCME_OK; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + mutex_lock(&rtt_status->rtt_mutex); + for (i = 0; i < mac_cnt; i++) { + for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) { + if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr, + ETHER_ADDR_LEN)) { + rtt_status->rtt_config.target_info[j].disable = TRUE; + } + } + } + mutex_unlock(&rtt_status->rtt_mutex); + return err; +} + +static int +dhd_rtt_start(dhd_pub_t *dhd) +{ + int err = BCME_OK; + int mpc = 0; + int nss, mcs, bw; + uint32 rspec = 0; + int8 eabuf[ETHER_ADDR_STR_LEN]; + int8 chanbuf[CHANSPEC_STR_LEN]; + bool set_mpc = FALSE; + wl_proxd_iovar_t proxd_iovar; + wl_proxd_params_iovar_t proxd_params; + wl_proxd_params_iovar_t proxd_tune; + wl_proxd_params_tof_method_t *tof_params = &proxd_params.u.tof_params; + rtt_status_info_t *rtt_status; + rtt_target_info_t *rtt_target; + NULL_CHECK(dhd, "dhd is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + /* turn off mpc in case of non-associted */ + if (!dhd_is_associated(dhd, 0, NULL)) { + err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_tune\n", __FUNCTION__)); + goto exit; + } + set_mpc = TRUE; + } + + if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) { + err = BCME_RANGE; + goto exit; + } + DHD_RTT(("%s enter\n", __FUNCTION__)); + bzero(&proxd_tune, sizeof(proxd_tune)); + bzero(&proxd_params, sizeof(proxd_params)); + mutex_lock(&rtt_status->rtt_mutex); + /* Get a target information */ + rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + mutex_unlock(&rtt_status->rtt_mutex); + /* set role */ + proxd_iovar.method = PROXD_TOF_METHOD; + proxd_iovar.mode = WL_PROXD_MODE_INITIATOR; + + /* make sure that proxd is stop */ + /* dhd_iovar(dhd, 0, "proxd_stop", (char *)NULL, 0, 1); */ + + err = dhd_iovar(dhd, 0, "proxd", (char *)&proxd_iovar, sizeof(proxd_iovar), 1); + if (err < 0 && err != BCME_BUSY) { + DHD_ERROR(("%s : failed to set proxd %d\n", __FUNCTION__, err)); + goto exit; + } + if (err == BCME_BUSY) { + DHD_RTT(("BCME_BUSY occurred\n")); + } + /* mac address */ + bcopy(&rtt_target->addr, &tof_params->tgt_mac, ETHER_ADDR_LEN); + /* frame count */ + if (rtt_target->ftm_cnt > RTT_MAX_FRAME_CNT) { + rtt_target->ftm_cnt = RTT_MAX_FRAME_CNT; + } + + if (rtt_target->ftm_cnt) { + tof_params->ftm_cnt = htol16(rtt_target->ftm_cnt); + } else { + tof_params->ftm_cnt = htol16(DEFAULT_FTM_CNT); + } + + if (rtt_target->retry_cnt > RTT_MAX_RETRY_CNT) { + rtt_target->retry_cnt = RTT_MAX_RETRY_CNT; + } + + /* retry count */ + if (rtt_target->retry_cnt) { + tof_params->retry_cnt = htol16(rtt_target->retry_cnt); + } else { + tof_params->retry_cnt = htol16(DEFAULT_RETRY_CNT); + } + + /* chanspec */ + tof_params->chanspec = htol16(rtt_target->chanspec); + /* set parameter */ + DHD_RTT(("Target addr(Idx %d) %s, Channel : %s for RTT (ftm_cnt %d, rety_cnt : %d)\n", + rtt_status->cur_idx, + bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf), + wf_chspec_ntoa(rtt_target->chanspec, chanbuf), rtt_target->ftm_cnt, + rtt_target->retry_cnt)); + + if (rtt_target->type == RTT_ONE_WAY) { + proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_ONEWAY); + /* report RTT results for initiator */ + proxd_tune.u.tof_tune.flags |= htol32(WL_PROXD_FLAG_INITIATOR_RPTRTT); + proxd_tune.u.tof_tune.vhtack = 0; + tof_params->tx_rate = htol16(WL_RATE_6M); + tof_params->vht_rate = htol16((WL_RATE_6M >> 16)); + } else { /* RTT TWO WAY */ + /* initiator will send the rtt result to the target */ + proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_INITIATOR_REPORT); + tof_params->timeout = 10; /* 10ms for timeout */ + rspec = WL_RSPEC_ENCODE_VHT; /* 11ac VHT */ + nss = 1; /* default Nss = 1 */ + mcs = 0; /* default MCS 0 */ + rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs; + bw = 0; + switch (CHSPEC_BW(rtt_target->chanspec)) { + case WL_CHANSPEC_BW_20: + bw = WL_RSPEC_BW_20MHZ; + break; + case WL_CHANSPEC_BW_40: + bw = WL_RSPEC_BW_40MHZ; + break; + case WL_CHANSPEC_BW_80: + bw = WL_RSPEC_BW_80MHZ; + break; + case WL_CHANSPEC_BW_160: + bw = WL_RSPEC_BW_160MHZ; + break; + default: + DHD_ERROR(("CHSPEC_BW not supported : %d", + CHSPEC_BW(rtt_target->chanspec))); + goto exit; + } + rspec |= bw; + tof_params->tx_rate = htol16(rspec & 0xffff); + tof_params->vht_rate = htol16(rspec >> 16); + } + + /* Set Method to TOF */ + proxd_tune.method = PROXD_TOF_METHOD; + err = dhd_iovar(dhd, 0, "proxd_tune", (char *)&proxd_tune, sizeof(proxd_tune), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_tune %d\n", __FUNCTION__, err)); + goto exit; + } + + /* Set Method to TOF */ + proxd_params.method = PROXD_TOF_METHOD; + err = dhd_iovar(dhd, 0, "proxd_params", (char *)&proxd_params, sizeof(proxd_params), 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_params %d\n", __FUNCTION__, err)); + goto exit; + } + err = dhd_iovar(dhd, 0, "proxd_find", (char *)NULL, 0, 1); + if (err < 0) { + DHD_ERROR(("%s : failed to set proxd_find %d\n", __FUNCTION__, err)); + goto exit; + } +exit: + if (err < 0) { + rtt_status->status = RTT_STOPPED; + if (set_mpc) { + /* enable mpc again in case of error */ + mpc = 1; + err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1); + } + } + return err; +} + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + goto exit; + } + } + cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC); + if (!cb) { + err = -ENOMEM; + goto exit; + } + cb->noti_fn = noti_fn; + cb->ctx = ctx; + list_add(&cb->list, &rtt_status->noti_fn_list); +exit: + spin_unlock_bh(¬i_list_lock); + return err; +} + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn) +{ + int err = BCME_OK; + struct rtt_noti_callback *cb = NULL, *iter; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + NULL_CHECK(noti_fn, "noti_fn is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + spin_lock_bh(¬i_list_lock); + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + if (iter->noti_fn == noti_fn) { + cb = iter; + list_del(&cb->list); + break; + } + } + spin_unlock_bh(¬i_list_lock); + if (cb) { + kfree(cb); + } + return err; +} + +static int +dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* evp) +{ + int err = BCME_OK; + int i; + char eabuf[ETHER_ADDR_STR_LEN]; + char diststr[40]; + struct timespec ts; + NULL_CHECK(rtt_results, "rtt_results is NULL", err); + NULL_CHECK(evp, "evp is NULL", err); + DHD_RTT(("%s enter\n", __FUNCTION__)); + rtt_results->distance = ntoh32(evp->distance); + rtt_results->sdrtt = ntoh32(evp->sdrtt); + rtt_results->ftm_cnt = ntoh16(evp->ftm_cnt); + rtt_results->avg_rssi = ntoh16(evp->avg_rssi); + rtt_results->validfrmcnt = ntoh16(evp->validfrmcnt); + rtt_results->meanrtt = ntoh32(evp->meanrtt); + rtt_results->modertt = ntoh32(evp->modertt); + rtt_results->medianrtt = ntoh32(evp->medianrtt); + rtt_results->err_code = evp->err_code; + rtt_results->tx_rate.preamble = (evp->OFDM_frame_type == TOF_FRAME_RATE_VHT)? 3 : 0; + rtt_results->tx_rate.nss = 0; /* 1 x 1 */ + rtt_results->tx_rate.bw = + (evp->bandwidth == TOF_BW_80MHZ)? 2 : (evp->bandwidth == TOF_BW_40MHZ)? 1 : 0; + rtt_results->TOF_type = evp->TOF_type; + if (evp->TOF_type == TOF_TYPE_ONE_WAY) { + /* convert to 100kbps unit */ + rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; + rtt_results->tx_rate.rateMcsIdx = WL_RATE_6M; + } else { + rtt_results->tx_rate.bitrate = WL_RATE_6M * 5; + rtt_results->tx_rate.rateMcsIdx = 0; /* MCS 0 */ + } + memset(diststr, 0, sizeof(diststr)); + if (rtt_results->distance == 0xffffffff || rtt_results->distance == 0) { + sprintf(diststr, "distance=-1m\n"); + } else { + sprintf(diststr, "distance=%d.%d m\n", + rtt_results->distance >> 4, ((rtt_results->distance & 0xf) * 125) >> 1); + } + + if (ntoh32(evp->mode) == WL_PROXD_MODE_INITIATOR) { + DHD_RTT(("Target:(%s) %s;\n", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); + DHD_RTT(("RTT : mean %d mode %d median %d\n", rtt_results->meanrtt, + rtt_results->modertt, rtt_results->medianrtt)); + } else { + DHD_RTT(("Initiator:(%s) %s; ", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr)); + } + if (rtt_results->sdrtt > 0) { + DHD_RTT(("sigma:%d.%d\n", rtt_results->sdrtt/10, rtt_results->sdrtt % 10)); + } else { + DHD_RTT(("sigma:0\n")); + } + + DHD_RTT(("rssi:%d validfrmcnt %d, err_code : %d\n", rtt_results->avg_rssi, + rtt_results->validfrmcnt, evp->err_code)); + + switch (evp->err_code) { + case TOF_REASON_OK: + rtt_results->err_code = RTT_REASON_SUCCESS; + break; + case TOF_REASON_TIMEOUT: + rtt_results->err_code = RTT_REASON_TIMEOUT; + break; + case TOF_REASON_NOACK: + rtt_results->err_code = RTT_REASON_NO_RSP; + break; + case TOF_REASON_ABORT: + rtt_results->err_code = RTT_REASON_ABORT; + break; + default: + rtt_results->err_code = RTT_REASON_FAILURE; + break; + } + rtt_results->peer_mac = evp->peer_mac; + /* get the time elapsed from boot time */ + get_monotonic_boottime(&ts); + rtt_results->ts = (uint64) TIMESPEC_TO_US(ts); + + for (i = 0; i < rtt_results->ftm_cnt; i++) { + rtt_results->ftm_buff[i].value = ltoh32(evp->ftm_buff[i].value); + rtt_results->ftm_buff[i].rssi = ltoh32(evp->ftm_buff[i].rssi); + } + return err; +} + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data) +{ + int err = BCME_OK; + int len = 0; + int idx; + uint status, event_type, flags, reason, ftm_cnt; + rtt_status_info_t *rtt_status; + wl_proxd_event_data_t* evp; + struct rtt_noti_callback *iter; + rtt_result_t *rtt_result, *entry, *next; + gfp_t kflags; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + event_type = ntoh32_ua((void *)&event->event_type); + flags = ntoh16_ua((void *)&event->flags); + status = ntoh32_ua((void *)&event->status); + reason = ntoh32_ua((void *)&event->reason); + + if (event_type != WLC_E_PROXD) { + goto exit; + } + kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL; + evp = (wl_proxd_event_data_t*)event_data; + DHD_RTT(("%s enter : mode: %s, reason :%d \n", __FUNCTION__, + (ntoh16(evp->mode) == WL_PROXD_MODE_INITIATOR)? + "initiator":"target", reason)); + switch (reason) { + case WLC_E_PROXD_STOP: + DHD_RTT(("WLC_E_PROXD_STOP\n")); + break; + case WLC_E_PROXD_ERROR: + case WLC_E_PROXD_COMPLETED: + if (reason == WLC_E_PROXD_ERROR) { + DHD_RTT(("WLC_E_PROXD_ERROR\n")); + } else { + DHD_RTT(("WLC_E_PROXD_COMPLETED\n")); + } + + if (!in_atomic()) { + mutex_lock(&rtt_status->rtt_mutex); + } + ftm_cnt = ntoh16(evp->ftm_cnt); + + if (ftm_cnt > 0) { + len = OFFSETOF(rtt_result_t, ftm_buff); + } else { + len = sizeof(rtt_result_t); + } + /* check whether the results is already reported or not */ + list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) { + if (!memcmp(&entry->peer_mac, &evp->peer_mac, ETHER_ADDR_LEN)) { + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + goto exit; + } + } + rtt_result = kzalloc(len + sizeof(ftm_sample_t) * ftm_cnt, kflags); + if (!rtt_result) { + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + err = -ENOMEM; + goto exit; + } + /* point to target_info in status struct and increase pointer */ + rtt_result->target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx]; + /* find next target to trigger RTT */ + for (idx = (rtt_status->cur_idx + 1); + idx < rtt_status->rtt_config.rtt_target_cnt; idx++) { + /* skip the disabled device */ + if (rtt_status->rtt_config.target_info[idx].disable) { + continue; + } else { + /* set the idx to cur_idx */ + rtt_status->cur_idx = idx; + break; + } + } + /* convert the event results to host format */ + dhd_rtt_convert_to_host(rtt_result, evp); + list_add_tail(&rtt_result->list, &rtt_status->rtt_results_cache); + if (idx < rtt_status->rtt_config.rtt_target_cnt) { + /* restart to measure RTT from next device */ + schedule_work(&rtt_status->work); + } else { + DHD_RTT(("RTT_STOPPED\n")); + rtt_status->status = RTT_STOPPED; + /* to turn on mpc mode */ + schedule_work(&rtt_status->work); + /* notify the completed information to others */ + list_for_each_entry(iter, &rtt_status->noti_fn_list, list) { + iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache); + } + /* remove the rtt results in cache */ + list_for_each_entry_safe(rtt_result, next, + &rtt_status->rtt_results_cache, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + /* reinit the HEAD */ + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + /* clear information for rtt_config */ + bzero(&rtt_status->rtt_config, sizeof(rtt_status->rtt_config)); + rtt_status->cur_idx = 0; + } + if (!in_atomic()) { + mutex_unlock(&rtt_status->rtt_mutex); + } + + break; + case WLC_E_PROXD_GONE: + DHD_RTT(("WLC_E_PROXD_GONE\n")); + break; + case WLC_E_PROXD_START: + /* event for targets / accesspoints */ + DHD_RTT(("WLC_E_PROXD_START\n")); + break; + case WLC_E_PROXD_COLLECT_START: + DHD_RTT(("WLC_E_PROXD_COLLECT_START\n")); + break; + case WLC_E_PROXD_COLLECT_STOP: + DHD_RTT(("WLC_E_PROXD_COLLECT_STOP\n")); + break; + case WLC_E_PROXD_COLLECT_COMPLETED: + DHD_RTT(("WLC_E_PROXD_COLLECT_COMPLETED\n")); + break; + case WLC_E_PROXD_COLLECT_ERROR: + DHD_RTT(("WLC_E_PROXD_COLLECT_ERROR; ")); + break; + default: + DHD_ERROR(("WLC_E_PROXD: supported EVENT reason code:%d\n", reason)); + break; + } + +exit: + return err; +} + +static void +dhd_rtt_work(struct work_struct *work) +{ + rtt_status_info_t *rtt_status; + dhd_pub_t *dhd; + rtt_status = container_of(work, rtt_status_info_t, work); + if (rtt_status == NULL) { + DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__)); + return; + } + dhd = rtt_status->dhd; + if (dhd == NULL) { + DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__)); + return; + } + (void) dhd_rtt_start(dhd); +} + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa) +{ + rtt_status_info_t *rtt_status; + int err = BCME_OK; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + NULL_CHECK(capa, "capa is NULL", err); + bzero(capa, sizeof(rtt_capabilities_t)); + + if (rtt_status->capability & RTT_CAP_ONE_WAY) { + capa->rtt_one_sided_supported = 1; + } + if (rtt_status->capability & RTT_CAP_11V_WAY) { + capa->rtt_11v_supported = 1; + } + if (rtt_status->capability & RTT_CAP_11MC_WAY) { + capa->rtt_ftm_supported = 1; + } + if (rtt_status->capability & RTT_CAP_VS_WAY) { + capa->rtt_vs_supported = 1; + } + + return err; +} + +int +dhd_rtt_init(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + NULL_CHECK(dhd, "dhd is NULL", err); + if (dhd->rtt_state) { + goto exit; + } + dhd->rtt_state = MALLOC(dhd->osh, sizeof(rtt_status_info_t)); + if (dhd->rtt_state == NULL) { + DHD_ERROR(("failed to create rtt_state\n")); + goto exit; + } + bzero(dhd->rtt_state, sizeof(rtt_status_info_t)); + rtt_status = GET_RTTSTATE(dhd); + rtt_status->dhd = dhd; + err = dhd_iovar(dhd, 0, "proxd_params", NULL, 0, 1); + if (err != BCME_UNSUPPORTED) { + rtt_status->capability |= RTT_CAP_ONE_WAY; + rtt_status->capability |= RTT_CAP_VS_WAY; + DHD_ERROR(("%s: Support RTT Service\n", __FUNCTION__)); + } + mutex_init(&rtt_status->rtt_mutex); + INIT_LIST_HEAD(&rtt_status->noti_fn_list); + INIT_LIST_HEAD(&rtt_status->rtt_results_cache); + INIT_WORK(&rtt_status->work, dhd_rtt_work); +exit: + return err; +} + +int +dhd_rtt_deinit(dhd_pub_t *dhd) +{ + int err = BCME_OK; + rtt_status_info_t *rtt_status; + rtt_result_t *rtt_result, *next; + struct rtt_noti_callback *iter, *iter2; + NULL_CHECK(dhd, "dhd is NULL", err); + rtt_status = GET_RTTSTATE(dhd); + NULL_CHECK(rtt_status, "rtt_status is NULL", err); + rtt_status->status = RTT_STOPPED; + /* clear evt callback list */ + if (!list_empty(&rtt_status->noti_fn_list)) { + list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) { + list_del(&iter->list); + kfree(iter); + } + } + /* remove the rtt results */ + if (!list_empty(&rtt_status->rtt_results_cache)) { + list_for_each_entry_safe(rtt_result, next, &rtt_status->rtt_results_cache, list) { + list_del(&rtt_result->list); + kfree(rtt_result); + } + } + MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t)); + dhd->rtt_state = NULL; + return err; +} +#endif /* RTT_SUPPORT */ diff --git a/drivers/net/wireless/bcmdhd/dhd_rtt.h b/drivers/net/wireless/bcmdhd/dhd_rtt.h new file mode 100644 index 000000000000..2fbb9c973cd3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_rtt.h @@ -0,0 +1,234 @@ +/* + * Broadcom Dongle Host Driver (DHD), RTT + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * $Id: dhd_rtt.h 558438 2015-05-22 06:05:11Z $ + */ +#ifndef __DHD_RTT_H__ +#define __DHD_RTT_H__ + +#include "dngl_stats.h" + +#define RTT_MAX_TARGET_CNT 10 +#define RTT_MAX_FRAME_CNT 25 +#define RTT_MAX_RETRY_CNT 10 +#define DEFAULT_FTM_CNT 6 +#define DEFAULT_RETRY_CNT 6 + + +/* DSSS, CCK and 802.11n rates in [500kbps] units */ +#define WL_MAXRATE 108 /* in 500kbps units */ +#define WL_RATE_1M 2 /* in 500kbps units */ +#define WL_RATE_2M 4 /* in 500kbps units */ +#define WL_RATE_5M5 11 /* in 500kbps units */ +#define WL_RATE_11M 22 /* in 500kbps units */ +#define WL_RATE_6M 12 /* in 500kbps units */ +#define WL_RATE_9M 18 /* in 500kbps units */ +#define WL_RATE_12M 24 /* in 500kbps units */ +#define WL_RATE_18M 36 /* in 500kbps units */ +#define WL_RATE_24M 48 /* in 500kbps units */ +#define WL_RATE_36M 72 /* in 500kbps units */ +#define WL_RATE_48M 96 /* in 500kbps units */ +#define WL_RATE_54M 108 /* in 500kbps units */ + + +enum rtt_role { + RTT_INITIATOR = 0, + RTT_TARGET = 1 +}; +enum rtt_status { + RTT_STOPPED = 0, + RTT_STARTED = 1 +}; +typedef int64_t wifi_timestamp; /* In microseconds (us) */ +typedef int64_t wifi_timespan; +typedef int wifi_rssi; + +typedef enum { + RTT_INVALID, + RTT_ONE_WAY, + RTT_TWO_WAY, + RTT_AUTO +} rtt_type_t; + +typedef enum { + RTT_PEER_STA, + RTT_PEER_AP, + RTT_PEER_P2P, + RTT_PEER_NAN, + RTT_PEER_INVALID +} rtt_peer_type_t; + +typedef enum rtt_reason { + RTT_REASON_SUCCESS, + RTT_REASON_FAILURE, + RTT_REASON_NO_RSP, + RTT_REASON_REJECTED, + RTT_REASON_NOT_SCHEDULED_YET, + RTT_REASON_TIMEOUT, + RTT_REASON_AP_ON_DIFF_CH, + RTT_REASON_AP_NO_CAP, + RTT_REASON_ABORT +} rtt_reason_t; + +typedef enum rtt_capability { + RTT_CAP_NONE = 0, + RTT_CAP_ONE_WAY = (1 << (0)), + RTT_CAP_11V_WAY = (1 << (1)), /* IEEE802.11v */ + RTT_CAP_11MC_WAY = (1 << (2)), /* IEEE802.11mc */ + RTT_CAP_VS_WAY = (1 << (3)) /* BRCM vendor specific */ +} rtt_capability_t; + +typedef struct wifi_channel_info { + wifi_channel_width_t width; + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center freq (MHz) first segment */ + wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */ +} wifi_channel_info_t; + +typedef struct wifi_rate { + uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */ + uint32 nss :2; /* 0 : 1x1, 1: 2x2, 3: 3x3, 4: 4x4 */ + uint32 bw :3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */ + /* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb + * HT/VHT it would be mcs index + */ + uint32 rateMcsIdx :8; + uint32 reserved :16; /* reserved */ + uint32 bitrate; /* unit of 100 Kbps */ +} wifi_rate_t; + +typedef struct rtt_target_info { + struct ether_addr addr; + rtt_type_t type; /* rtt_type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + chanspec_t chanspec; /* chanspec for channel */ + int8 continuous; /* 0 = single shot or 1 = continous raging */ + bool disable; /* disable for RTT measurement */ + uint32 interval; /* interval of RTT measurement (unit ms) when continuous = true */ + uint32 measure_cnt; /* total number of RTT measurement when continuous */ + uint32 ftm_cnt; /* num of packets in each RTT measurement */ + uint32 retry_cnt; /* num of retries if sampling fails */ +} rtt_target_info_t; + +typedef struct rtt_result { + struct list_head list; + uint16 ver; /* version */ + rtt_target_info_t *target_info; /* target info */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + wifi_rate_t tx_rate; /* tx rate */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + int32 distance; /* dst to tgt, units (meter * 16) */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int16 avg_rssi; /* avg rssi across the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + wifi_timestamp ts; /* the time elapsed from boot time when driver get this result */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} rtt_result_t; + +typedef struct rtt_report { + struct ether_addr addr; + uint num_measurement; /* measurement number in case of continous raging */ + rtt_reason_t status; /* raging status */ + rtt_type_t type; /* rtt type */ + rtt_peer_type_t peer; /* peer type */ + wifi_channel_info_t channel; /* channel information */ + wifi_rssi rssi; /* avg rssi accroos the ftm frames */ + wifi_rssi rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */ + wifi_rate_t tx_rate; /* tx rate */ + wifi_timespan rtt; /* round trip time in nanoseconds */ + wifi_timespan rtt_sd; /* rtt standard deviation in nanoseconds */ + wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */ + int32 distance; /* distance in cm (optional) */ + int32 distance_sd; /* standard deviation in cm (optional) */ + int32 distance_spread; /* difference between max and min distance recorded (optional) */ + wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */ +} rtt_report_t; + +/* RTT Capabilities */ +typedef struct rtt_capabilities { + uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */ + uint8 rtt_11v_supported; /* if 11v rtt data collection is supported */ + uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */ + uint8 rtt_vs_supported; /* if vendor specific data collection supported */ +} rtt_capabilities_t; + +typedef struct rtt_config_params { + int8 rtt_target_cnt; + rtt_target_info_t target_info[RTT_MAX_TARGET_CNT]; +} rtt_config_params_t; + +typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data); +/* Linux wrapper to call common dhd_rtt_set_cfg */ +int +dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf); + +int +dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt); + +int +dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, + dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa); + +/* export to upper layer */ +chanspec_t +dhd_rtt_convert_to_chspec(wifi_channel_info_t channel); + +int +dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params); + +int +dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt); + + +int +dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn); + +int +dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data); + +int +dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa); + +int +dhd_rtt_init(dhd_pub_t *dhd); + +int +dhd_rtt_deinit(dhd_pub_t *dhd); +#endif /* __DHD_RTT_H__ */ diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c new file mode 100644 index 000000000000..2a9830e3dc03 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c @@ -0,0 +1,8407 @@ +/* + * DHD Bus Module for SDIO + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_sdio.c 593728 2015-10-19 09:20:32Z $ + */ + +#include +#include +#include + +#ifdef BCMEMBEDIMAGE +#include BCMEMBEDIMAGE +#endif /* BCMEMBEDIMAGE */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +bool dhd_mp_halting(dhd_pub_t *dhdp); +extern void bcmsdh_waitfor_iodrain(void *sdh); +extern void bcmsdh_reject_ioreqs(void *sdh, bool reject); +extern bool bcmsdh_fatal_error(void *sdh); + +#ifndef DHDSDIO_MEM_DUMP_FNAME +#define DHDSDIO_MEM_DUMP_FNAME "mem_dump" +#endif + +#define QLEN (1024) /* bulk rx and tx queue lengths */ +#define FCHI (QLEN - 10) +#define FCLOW (FCHI / 2) +#define PRIOMASK 7 + +#define TXRETRIES 2 /* # of retries for tx frames */ +#define READ_FRM_CNT_RETRIES 3 +#ifndef DHD_RXBOUND +#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */ +#endif + +#ifndef DHD_TXBOUND +#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */ +#endif + +#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */ +#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */ + +#ifndef DHD_FIRSTREAD +#define DHD_FIRSTREAD 32 +#endif +#if !ISPOWEROF2(DHD_FIRSTREAD) +#error DHD_FIRSTREAD is not a power of 2! +#endif + +/* Total length of frame header for dongle protocol */ +#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN) +#define SDPCM_HDRLEN_TXGLOM (SDPCM_HDRLEN + SDPCM_HWEXT_LEN) +#define MAX_TX_PKTCHAIN_CNT SDPCM_MAXGLOM_SIZE + +#ifdef SDTEST +#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN) +#else +#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN) +#endif + +/* Space for header read, limit for data packets */ +#ifndef MAX_HDR_READ +#define MAX_HDR_READ 32 +#endif +#if !ISPOWEROF2(MAX_HDR_READ) +#error MAX_HDR_READ is not a power of 2! +#endif + +#define MAX_RX_DATASZ 2048 + +/* Maximum milliseconds to wait for F2 to come up */ +#define DHD_WAIT_F2RDY 3000 + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#if (PMU_MAX_TRANSITION_DLY <= 1000000) +#undef PMU_MAX_TRANSITION_DLY +#define PMU_MAX_TRANSITION_DLY 1000000 +#endif + +/* hooks for limiting threshold custom tx num in rx processing */ +#define DEFAULT_TXINRX_THRES 0 +#ifndef CUSTOM_TXINRX_THRES +#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES +#endif + +/* Value for ChipClockCSR during initial setup */ +#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ) +#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) + +/* Packet free applicable unconditionally for sdio and sdspi. Conditional if + * bufpool was present for gspi bus. + */ +#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \ + PKTFREE(bus->dhd->osh, pkt, FALSE); +DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep); + + +/* Device console log buffer state */ +#define CONSOLE_LINE_MAX 192 +#define CONSOLE_BUFFER_MAX 2024 +typedef struct dhd_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + hnd_log_t log; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + uint8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +} dhd_console_t; + +#define REMAP_ENAB(bus) ((bus)->remap) +#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize))) +#define KSO_ENAB(bus) ((bus)->kso) +#define SR_ENAB(bus) ((bus)->_srenab) +#define SLPAUTO_ENAB(bus) ((SR_ENAB(bus)) && ((bus)->_slpauto)) +#define MIN_RSRC_ADDR (SI_ENUM_BASE + 0x618) +#define MIN_RSRC_SR 0x3 +#define CORE_CAPEXT_ADDR (SI_ENUM_BASE + 0x64c) +#define CORE_CAPEXT_SR_SUPPORTED_MASK (1 << 1) +#define RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define RCTL_LOGIC_DISABLE_MASK (1 << 27) + +#define OOB_WAKEUP_ENAB(bus) ((bus)->_oobwakeup) +#define GPIO_DEV_SRSTATE 16 /* Host gpio17 mapped to device gpio0 SR state */ +#define GPIO_DEV_SRSTATE_TIMEOUT 320000 /* 320ms */ +#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */ +#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0) +#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2) +#define OVERFLOW_BLKSZ512_WM 96 +#define OVERFLOW_BLKSZ512_MES 80 + +#define CC_PMUCC3 (0x3) +/* Private data for SDIO bus interaction */ +typedef struct dhd_bus { + dhd_pub_t *dhd; + + bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */ + si_t *sih; /* Handle for SI calls */ + char *vars; /* Variables (from CIS and/or other) */ + uint varsz; /* Size of variables buffer */ + uint32 sbaddr; /* Current SB window pointer (-1, invalid) */ + + sdpcmd_regs_t *regs; /* Registers for SDIO core */ + uint sdpcmrev; /* SDIO core revision */ + uint armrev; /* CPU core revision */ + uint ramrev; /* SOCRAM core revision */ + uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */ + uint32 srmemsize; /* Size of SRMEM */ + + uint32 bus; /* gSPI or SDIO bus */ + uint32 bus_num; /* bus number */ + uint32 slot_num; /* slot ID */ + uint32 hostintmask; /* Copy of Host Interrupt Mask */ + uint32 intstatus; /* Intstatus bits (events) pending */ + bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */ + bool fcstate; /* State of dongle flow-control */ + + uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */ + char *fw_path; /* module_param: path to firmware image */ + char *nv_path; /* module_param: path to nvram vars file */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + uint8 flowcontrol; /* per prio flow control bitmask */ + uint8 tx_seq; /* Transmit sequence number (next) */ + uint8 tx_max; /* Maximum transmit sequence allowed */ + + uint8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN]; + uint8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + uint16 nextlen; /* Next Read Len from last header */ + uint8 rx_seq; /* Receive sequence number (expected) */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + + void *glomd; /* Packet containing glomming descriptor */ + void *glom; /* Packet chain for glommed superframe */ + uint glomerr; /* Glom packet read errors */ + + uint8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + uint8 *rxctl; /* Aligned pointer into rxbuf */ + uint8 *databuf; /* Buffer for receiving big glom packet */ + uint8 *dataptr; /* Aligned pointer into databuf */ + uint rxlen; /* Length of valid data in buffer */ + + uint8 sdpcm_ver; /* Bus protocol reported by dongle */ + + bool intr; /* Use interrupts */ + bool poll; /* Use polling */ + bool ipend; /* Device interrupt is pending */ + bool intdis; /* Interrupts disabled by isr */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint spurious; /* Count of spurious interrupts */ + uint pollrate; /* Ticks between device polls */ + uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ + +#ifdef DHD_DEBUG + dhd_console_t console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DHD_DEBUG */ + + uint regfails; /* Count of R_REG/W_REG failures */ + + uint clkstate; /* State of sd and backplane clock(s) */ + bool activity; /* Activity flag for clock down */ + int32 idletime; /* Control for activity timeout */ + int32 idlecount; /* Activity timeout counter */ + int32 idleclock; /* How to set bus driver when idle */ + int32 sd_divisor; /* Speed control to bus driver */ + int32 sd_mode; /* Mode control to bus driver */ + int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */ + bool use_rxchain; /* If dhd should use PKT chains */ + bool sleeping; /* Is SDIO bus sleeping? */ +#if defined(SUPPORT_P2P_GO_PS) + wait_queue_head_t bus_sleep; +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + uint rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */ + bool alp_only; /* Don't use HT clock (ALP only) */ + /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ + bool usebufpool; + int32 txinrx_thres; /* num of in-queued pkts */ + int32 dotxinrx; /* tx first in dhdsdio_readframes */ +#ifdef SDTEST + /* external loopback */ + bool ext_loop; + uint8 loopid; + + /* pktgen configuration */ + uint pktgen_freq; /* Ticks between bursts */ + uint pktgen_count; /* Packets to send each burst */ + uint pktgen_print; /* Bursts between count displays */ + uint pktgen_total; /* Stop after this many */ + uint pktgen_minlen; /* Minimum packet data len */ + uint pktgen_maxlen; /* Maximum packet data len */ + uint pktgen_mode; /* Configured mode: tx, rx, or echo */ + uint pktgen_stop; /* Number of tx failures causing stop */ + + /* active pktgen fields */ + uint pktgen_tick; /* Tick counter for bursts */ + uint pktgen_ptick; /* Burst counter for printing */ + uint pktgen_sent; /* Number of test packets generated */ + uint pktgen_rcvd; /* Number of test packets received */ + uint pktgen_prev_time; /* Time at which previous stats where printed */ + uint pktgen_prev_sent; /* Number of test packets generated when + * previous stats were printed + */ + uint pktgen_prev_rcvd; /* Number of test packets received when + * previous stats were printed + */ + uint pktgen_fail; /* Number of failed send attempts */ + uint16 pktgen_len; /* Length of next packet to send */ +#define PKTGEN_RCV_IDLE (0) +#define PKTGEN_RCV_ONGOING (1) + uint16 pktgen_rcv_state; /* receive state */ + uint pktgen_rcvd_rcvsession; /* test pkts rcvd per rcv session. */ +#endif /* SDTEST */ + + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ +#ifdef DHDENABLE_TAILPAD + uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */ + uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */ +#endif /* DHDENABLE_TAILPAD */ + uint8 *ctrl_frame_buf; + uint32 ctrl_frame_len; + bool ctrl_frame_stat; + uint32 rxint_mode; /* rx interrupt mode */ + bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram + * Available with socram rev 16 + * Remap region not DMA-able + */ + bool kso; + bool _slpauto; + bool _oobwakeup; + bool _srenab; + bool readframes; + bool reqbussleep; + uint32 resetinstr; + uint32 dongle_ram_base; + + void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */ + uint32 txglom_cnt; /* Number of pkts in the glom array */ + uint32 txglom_total_len; /* Total length of pkts in glom array */ + bool txglom_enable; /* Flag to indicate whether tx glom is enabled/disabled */ + uint32 txglomsize; /* Glom size limitation */ +#ifdef DHDENABLE_TAILPAD + void *pad_pkt; +#endif /* DHDENABLE_TAILPAD */ +} dhd_bus_t; + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 /* Not used yet */ +#define CLK_AVAIL 3 + +#define DHD_NOPMU(dhd) (FALSE) + +#if defined(BCMSDIOH_STD) +#define BLK_64_MAXTXGLOM 20 +#endif /* BCMSDIOH_STD */ + +#ifdef DHD_DEBUG +static int qcount[NUMPRIO]; +static int tx_packets[NUMPRIO]; +#endif /* DHD_DEBUG */ + +/* Deferred transmit */ +const uint dhd_deferred_tx = 1; + +extern uint dhd_watchdog_ms; + +extern void dhd_os_wd_timer(void *bus, uint wdtick); +int dhd_enableOOB(dhd_pub_t *dhd, bool sleep); + +/* Tx/Rx bounds */ +uint dhd_txbound; +uint dhd_rxbound; +uint dhd_txminmax = DHD_TXMINMAX; + +/* override the RAM size if possible */ +#define DONGLE_MIN_RAMSIZE (128 *1024) +int dhd_dongle_ramsize; + +uint dhd_doflow = TRUE; +uint dhd_dpcpoll = FALSE; + +module_param(dhd_doflow, uint, 0644); +module_param(dhd_dpcpoll, uint, 0644); + +static bool dhd_alignctl; + +static bool sd1idle; + +static bool retrydata; +#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata) + +static uint watermark = 8; +static uint mesbusyctrl = 0; +static const uint firstread = DHD_FIRSTREAD; + +/* Retry count for register access failures */ +static const uint retry_limit = 2; + +/* Force even SD lengths (some host controllers mess up on odd bytes) */ +static bool forcealign; + +#define ALIGNMENT 4 + +#if defined(OOB_INTR_ONLY) && defined(HW_OOB) +extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable); +#endif + +#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) +#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD +#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */ +#define PKTALIGN(osh, p, len, align) \ + do { \ + uintptr datalign; \ + datalign = (uintptr)PKTDATA((osh), (p)); \ + datalign = ROUNDUP(datalign, (align)) - datalign; \ + ASSERT(datalign < (align)); \ + ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \ + if (datalign) \ + PKTPULL((osh), (p), (uint)datalign); \ + PKTSETLEN((osh), (p), (len)); \ + } while (0) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +/* Try doing readahead */ +static bool dhd_readahead; + +/* To check if there's window offered */ +#define DATAOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) > 1) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* To check if there's window offered for ctrl frame */ +#define TXCTLOK(bus) \ + (((uint8)(bus->tx_max - bus->tx_seq) != 0) && \ + (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0)) + +/* Number of pkts available in dongle for data RX */ +#define DATABUFCNT(bus) \ + ((uint8)(bus->tx_max - bus->tx_seq) - 1) + +/* Macros to get register read/write status */ +/* NOTE: these assume a local dhdsdio_bus_t *bus! */ +#define R_SDREG(regvar, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + regvar = R_REG(bus->dhd->osh, regaddr); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) { \ + DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + regvar = 0; \ + } \ + } \ +} while (0) + +#define W_SDREG(regval, regaddr, retryvar) \ +do { \ + retryvar = 0; \ + do { \ + W_REG(bus->dhd->osh, regaddr, regval); \ + } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \ + if (retryvar) { \ + bus->regfails += (retryvar-1); \ + if (retryvar > retry_limit) \ + DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \ + __FUNCTION__, __LINE__)); \ + } \ +} while (0) + +#define BUS_WAKE(bus) \ + do { \ + bus->idlecount = 0; \ + if ((bus)->sleeping) \ + dhdsdio_bussleep((bus), FALSE); \ + } while (0); + +/* + * pktavail interrupts from dongle to host can be managed in 3 different ways + * whenever there is a packet available in dongle to transmit to host. + * + * Mode 0: Dongle writes the software host mailbox and host is interrupted. + * Mode 1: (sdiod core rev >= 4) + * Device sets a new bit in the intstatus whenever there is a packet + * available in fifo. Host can't clear this specific status bit until all the + * packets are read from the FIFO. No need to ack dongle intstatus. + * Mode 2: (sdiod core rev >= 4) + * Device sets a bit in the intstatus, and host acks this by writing + * one to this bit. Dongle won't generate anymore packet interrupts + * until host reads all the packets from the dongle and reads a zero to + * figure that there are no more packets. No need to disable host ints. + * Need to ack the intstatus. + */ + +#define SDIO_DEVICE_HMB_RXINT 0 /* default old way */ +#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */ +#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */ + + +#define FRAME_AVAIL_MASK(bus) \ + ((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL) + +#define DHD_BUS SDIO_BUS + +#define PKT_AVAILABLE(bus, intstatus) ((intstatus) & (FRAME_AVAIL_MASK(bus))) + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +#define GSPI_PR55150_BAILOUT + +#ifdef SDTEST +static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq); +static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count); +#endif + +static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size); +#ifdef DHD_DEBUG +static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror); +#endif /* DHD_DEBUG */ + +#if defined(DHD_FW_COREDUMP) +static int dhdsdio_mem_dump(dhd_bus_t *bus); +#endif /* DHD_FW_COREDUMP */ +static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap); +static int dhdsdio_download_state(dhd_bus_t *bus, bool enter); + +static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh); +static void dhdsdio_disconnect(void *ptr); +static bool dhdsdio_chipmatch(uint16 chipid); +static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh, + void * regsva, uint16 devid); +static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh); +static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh); +static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, + bool reset_flag); + +static void dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size); +static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle); +static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry); +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt); +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt); +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt); + +static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh); +static int _dhdsdio_download_firmware(dhd_bus_t *bus); + +static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path); +static int dhdsdio_download_nvram(dhd_bus_t *bus); +#ifdef BCMEMBEDIMAGE +static int dhdsdio_download_code_array(dhd_bus_t *bus); +#endif +static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep); +static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok); +static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus); + +#ifdef WLMEDIA_HTSF +#include +extern uint32 dhd_get_htsf(void *dhd, int ifidx); +#endif /* WLMEDIA_HTSF */ + +static void +dhdsdio_tune_fifoparam(struct dhd_bus *bus) +{ + int err; + uint8 devctl, wm, mes; + + if (bus->sih->buscorerev >= 15) { + /* See .ppt in PR for these recommended values */ + if (bus->blocksize == 512) { + wm = OVERFLOW_BLKSZ512_WM; + mes = OVERFLOW_BLKSZ512_MES; + } else { + mes = bus->blocksize/4; + wm = bus->blocksize/4; + } + + watermark = wm; + mesbusyctrl = mes; + } else { + DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n", + bus->sih->buscorerev)); + return; + } + + /* Update watermark */ + if (wm > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err); + + devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + /* Update MES */ + if (mes > 0) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + (mes | SBSDIO_MESBUSYCTRL_ENAB), &err); + } + + DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n", + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, &err), + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, &err))); +} + +static void +dhd_dongle_setramsize(struct dhd_bus *bus, int mem_size) +{ + int32 min_size = DONGLE_MIN_RAMSIZE; + /* Restrict the ramsize to user specified limit */ + DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n", + dhd_dongle_ramsize, min_size)); + if ((dhd_dongle_ramsize > min_size) && + (dhd_dongle_ramsize < (int32)bus->orig_ramsize)) + bus->ramsize = dhd_dongle_ramsize; +} + +static int +dhdsdio_set_siaddr_window(dhd_bus_t *bus, uint32 address) +{ + int err = 0; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, &err); + if (!err) + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, &err); + return err; +} + + +#ifdef USE_OOB_GPIO1 +static int +dhdsdio_oobwakeup_init(dhd_bus_t *bus) +{ + uint32 val, addr, data; + + bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP); + + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + + /* Set device for gpio1 wakeup */ + bcmsdh_reg_write(bus->sdh, addr, 4, 2); + val = bcmsdh_reg_read(bus->sdh, data, 4); + val |= CC_CHIPCTRL2_GPIO1_WAKEUP; + bcmsdh_reg_write(bus->sdh, data, 4, val); + + bus->_oobwakeup = TRUE; + + return 0; +} +#endif /* USE_OOB_GPIO1 */ + +/* + * Query if FW is in SR mode + */ +static bool +dhdsdio_sr_cap(dhd_bus_t *bus) +{ + bool cap = FALSE; + uint32 core_capext, addr, data; + + if (bus->sih->chip == BCM43430_CHIP_ID) { + /* check if fw initialized sr engine */ + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1); + if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0) + cap = TRUE; + + return cap; + } + if (bus->sih->chip == BCM4324_CHIP_ID) { + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + bcmsdh_reg_write(bus->sdh, addr, 4, 3); + core_capext = bcmsdh_reg_read(bus->sdh, data, 4); + } else if (bus->sih->chip == BCM4330_CHIP_ID) { + core_capext = FALSE; + } else if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + (bus->sih->chip == BCM43349_CHIP_ID) || + (bus->sih->chip == BCM4345_CHIP_ID) || + (bus->sih->chip == BCM43454_CHIP_ID) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4356_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (BCM4349_CHIP(bus->sih->chip)) || + (bus->sih->chip == BCM4350_CHIP_ID)) { + core_capext = TRUE; + } else { + core_capext = bcmsdh_reg_read(bus->sdh, CORE_CAPEXT_ADDR, 4); + core_capext = (core_capext & CORE_CAPEXT_SR_SUPPORTED_MASK); + } + if (!(core_capext)) + return FALSE; + + if (bus->sih->chip == BCM4324_CHIP_ID) { + /* FIX: Should change to query SR control register instead */ + cap = TRUE; + } else if ((bus->sih->chip == BCM4335_CHIP_ID) || + (bus->sih->chip == BCM4339_CHIP_ID) || + (bus->sih->chip == BCM43349_CHIP_ID) || + (bus->sih->chip == BCM4345_CHIP_ID) || + (bus->sih->chip == BCM43454_CHIP_ID) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4356_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID) || + (bus->sih->chip == BCM4350_CHIP_ID)) { + uint32 enabval = 0; + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3); + enabval = bcmsdh_reg_read(bus->sdh, data, 4); + + if ((bus->sih->chip == BCM4350_CHIP_ID) || + (bus->sih->chip == BCM4345_CHIP_ID) || + (bus->sih->chip == BCM43454_CHIP_ID) || + (bus->sih->chip == BCM4354_CHIP_ID) || + (bus->sih->chip == BCM4356_CHIP_ID) || + (bus->sih->chip == BCM4358_CHIP_ID)) + enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE; + + if (enabval) + cap = TRUE; + } else { + data = bcmsdh_reg_read(bus->sdh, + SI_ENUM_BASE + OFFSETOF(chipcregs_t, retention_ctl), 4); + if ((data & (RCTL_MACPHY_DISABLE_MASK | RCTL_LOGIC_DISABLE_MASK)) == 0) + cap = TRUE; + } + + return cap; +} + +static int +dhdsdio_srwar_init(dhd_bus_t *bus) +{ + bcmsdh_gpio_init(bus->sdh); + +#ifdef USE_OOB_GPIO1 + dhdsdio_oobwakeup_init(bus); +#endif + + + return 0; +} + +static int +dhdsdio_sr_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) + dhdsdio_srwar_init(bus); + + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, + 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT, &err); + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL); + +#ifdef USE_CMD14 + /* Add CMD14 Support */ + dhdsdio_devcap_set(bus, + (SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT)); +#endif /* USE_CMD14 */ + + dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, SBSDIO_FORCE_HT, &err); + + bus->_slpauto = dhd_slpauto ? TRUE : FALSE; + + bus->_srenab = TRUE; + + return 0; +} + +/* + * FIX: Be sure KSO bit is enabled + * Currently, it's defaulting to 0 which should be 1. + */ +static int +dhdsdio_clk_kso_init(dhd_bus_t *bus) +{ + uint8 val; + int err = 0; + + /* set flag */ + bus->kso = TRUE; + + /* + * Enable KeepSdioOn (KSO) bit for normal operation + * Default is 0 (4334A0) so set it. Fixed in B0. + */ + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, NULL); + if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, val, &err); + if (err) + DHD_ERROR(("%s: SBSDIO_FUNC1_SLEEPCSR err: 0x%x\n", __FUNCTION__, err)); + } + + return 0; +} + +#define KSO_DBG(x) +#define KSO_WAIT_US 50 +#define KSO_WAIT_MS 1 +#define KSO_SLEEP_RETRY_COUNT 20 +#define KSO_WAKE_RETRY_COUNT 100 +#define ERROR_BCME_NODEVICE_MAX 1 + +#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US) +#ifndef CUSTOM_MAX_KSO_ATTEMPTS +#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS +#endif + +static int +dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on) +{ + uint8 wr_val = 0, rd_val, cmp_val, bmask; + int err = 0; + int try_cnt = 0; + + KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"))); + + wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + + if (on) { + cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; + bmask = cmp_val; + + OSL_SLEEP(3); + + } else { + /* Put device to sleep, turn off KSO */ + cmp_val = 0; + bmask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK; + } + + do { + rd_val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (((rd_val & bmask) == cmp_val) && !err) + break; + + KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err)); + + if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) { + OSL_SLEEP(KSO_WAIT_MS); + } else + OSL_DELAY(KSO_WAIT_US); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + } while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS); + + + if (try_cnt > 2) + KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + + if (try_cnt > CUSTOM_MAX_KSO_ATTEMPTS) { + DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n", + __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err)); + } + + return err; +} + +static int +dhdsdio_clk_kso_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0; + + if (on == FALSE) { + + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + DHD_ERROR(("%s: KSO disable clk: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + dhdsdio_clk_kso_enab(bus, FALSE); + } else { + DHD_ERROR(("%s: KSO enable\n", __FUNCTION__)); + + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_ERROR(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + + dhdsdio_clk_kso_enab(bus, TRUE); + + DHD_ERROR(("%s: sleepcsr: 0x%x\n", __FUNCTION__, + dhdsdio_sleepcsr_get(bus))); + } + + bus->kso = on; + BCM_REFERENCE(err); + + return 0; +} + +static uint8 +dhdsdio_sleepcsr_get(dhd_bus_t *bus) +{ + int err = 0; + uint8 val = 0; + + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, &err); + if (err) + DHD_TRACE(("Failed to read SLEEPCSR: %d\n", err)); + + return val; +} + +uint8 +dhdsdio_devcap_get(dhd_bus_t *bus) +{ + return bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, NULL); +} + +static int +dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap) +{ + int err = 0; + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_BRCM_CARDCAP, cap, &err); + if (err) + DHD_ERROR(("%s: devcap set err: 0x%x\n", __FUNCTION__, err)); + + return 0; +} + +static int +dhdsdio_clk_devsleep_iovar(dhd_bus_t *bus, bool on) +{ + int err = 0, retry; + uint8 val; + + retry = 0; + if (on == TRUE) { + /* Enter Sleep */ + + /* Be sure we request clk before going to sleep + * so we can wake-up with clk request already set + * else device can go back to sleep immediately + */ + if (!SLPAUTO_ENAB(bus)) + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + else { + val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if ((val & SBSDIO_CSR_MASK) == 0) { + DHD_ERROR(("%s: No clock before enter sleep:0x%x\n", + __FUNCTION__, val)); + + /* Reset clock request */ + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_ALP_AVAIL_REQ, &err); + DHD_ERROR(("%s: clock before sleep:0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); + } + } + + DHD_TRACE(("%s: clk before sleep: 0x%x\n", __FUNCTION__, + bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err))); +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, TRUE); +#else + + + err = dhdsdio_clk_kso_enab(bus, FALSE); + if (OOB_WAKEUP_ENAB(bus)) + { + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */ + } +#endif /* USE_CMD14 */ + } else { + /* Exit Sleep */ + /* Make sure we have SD bus access */ + if (bus->clkstate == CLK_NONE) { + DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__)); + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + + if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE), + GPIO_DEV_SRSTATE_TIMEOUT); + + if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) { + DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n")); + } + } +#ifdef USE_CMD14 + err = bcmsdh_sleep(bus->sdh, FALSE); + if (SLPAUTO_ENAB(bus) && (err != 0)) { + OSL_DELAY(10000); + DHD_TRACE(("%s: Resync device sleep\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + + if (err) { + OSL_DELAY(10000); + DHD_ERROR(("%s: CMD14 exit failed again!\n", __FUNCTION__)); + + /* Toggle sleep to resync with host and device */ + err = bcmsdh_sleep(bus->sdh, TRUE); + OSL_DELAY(10000); + err = bcmsdh_sleep(bus->sdh, FALSE); + if (err) { + DHD_ERROR(("%s: CMD14 exit failed twice!\n", __FUNCTION__)); + DHD_ERROR(("%s: FATAL: Device non-response!\n", + __FUNCTION__)); + err = 0; + } + } + } +#else + if (OOB_WAKEUP_ENAB(bus)) + { + err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */ + } + do { + err = dhdsdio_clk_kso_enab(bus, TRUE); + if (err) + OSL_SLEEP(10); + } while ((err != 0) && (++retry < 3)); + + if (err != 0) { + DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry)); + err = 0; /* continue anyway */ + } + + +#endif /* !USE_CMD14 */ + + if (err == 0) { + uint8 csr; + + /* Wait for device ready during transition to wake-up */ + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = dhdsdio_sleepcsr_get(bus)) & + SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK) != + (SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)), (20000)); + + DHD_TRACE(("%s: ExitSleep sleepcsr: 0x%x\n", __FUNCTION__, csr)); + + if (!(csr & SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK)) { + DHD_ERROR(("%s:ERROR: ExitSleep device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + (((csr = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) != + (SBSDIO_HT_AVAIL)), (10000)); + + DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr)); + if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) { + DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n", + __FUNCTION__, csr)); + err = BCME_NODEVICE; + } + } + } + + /* Update if successful */ + if (err == 0) + bus->kso = on ? FALSE : TRUE; + else { + DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n", + __FUNCTION__, bus->kso, on, err)); + if (!on && retry > 2) + bus->kso = FALSE; + } + + return err; +} + +/* Turn backplane clock on or off */ +static int +dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) +{ +#define HT_AVAIL_ERROR_MAX 10 + static int ht_avail_error = 0; + int err; + uint8 clkctl, clkreq, devctl; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + clkctl = 0; + sdh = bus->sdh; + + + if (!KSO_ENAB(bus)) + return BCME_OK; + + if (SLPAUTO_ENAB(bus)) { + bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); + return BCME_OK; + } + + if (on) { + /* Request HT Avail */ + clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + if (err) { + ht_avail_error++; + if (ht_avail_error < HT_AVAIL_ERROR_MAX) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) + else if (ht_avail_error == HT_AVAIL_ERROR_MAX) { + bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR; + dhd_os_send_hang_message(bus->dhd); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */ + return BCME_ERROR; + } else { + ht_avail_error = 0; + } + + + /* Check current status */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: HT Avail read error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + +#if !defined(OOB_INTR_ONLY) + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: Devctl access error setting CA: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + DHD_INFO(("CLKCTL: set PENDING\n")); + bus->clkstate = CLK_PENDING; + return BCME_OK; + } else +#endif /* !defined (OOB_INTR_ONLY) */ + { + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + } + + /* Otherwise, wait here (polling) for HT Avail */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + SPINWAIT_SLEEP(sdioh_spinwait_sleep, + ((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, &err)), + !SBSDIO_CLKAV(clkctl, bus->alp_only)), PMU_MAX_TRANSITION_DLY); + } + if (err) { + DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", + __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + return BCME_ERROR; + } + + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + DHD_INFO(("CLKCTL: turned ON\n")); + +#if defined(DHD_DEBUG) + if (bus->alp_only == TRUE) { +#if !defined(BCMLXSDMMC) + if (!SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock, when ALP Only\n", __FUNCTION__)); + } +#endif /* !defined(BCMLXSDMMC) */ + } else { + if (SBSDIO_ALPONLY(clkctl)) { + DHD_ERROR(("%s: HT Clock should be on.\n", __FUNCTION__)); + } + } +#endif /* defined (DHD_DEBUG) */ + + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + if (!SR_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err); + DHD_INFO(("CLKCTL: turned OFF\n")); + if (err) { + DHD_ERROR(("%s: Failed access turning clock off: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + } + return BCME_OK; +} + +/* Change idle/active SD state */ +static int +dhdsdio_sdclk(dhd_bus_t *bus, bool on) +{ + int err; + int32 iovalue; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (on) { + if (bus->idleclock == DHD_IDLE_STOP) { + /* Turn on clock and restore mode */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error enabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + + iovalue = bus->sd_mode; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_mode: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Restore clock speed */ + iovalue = bus->sd_divisor; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error restoring sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_SDONLY; + } else { + /* Stop or slow the SD clock itself */ + if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) { + DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n", + __FUNCTION__, bus->sd_divisor, bus->sd_mode)); + return BCME_ERROR; + } + if (bus->idleclock == DHD_IDLE_STOP) { + if (sd1idle) { + /* Change to SD1 mode and turn off clock */ + iovalue = 1; + err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + + iovalue = 0; + err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error disabling sd_clock: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } else if (bus->idleclock != DHD_IDLE_ACTIVE) { + /* Set divisor to idle value */ + iovalue = bus->idleclock; + err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &iovalue, sizeof(iovalue), TRUE); + if (err) { + DHD_ERROR(("%s: error changing sd_divisor: %d\n", + __FUNCTION__, err)); + return BCME_ERROR; + } + } + bus->clkstate = CLK_NONE; + } + + return BCME_OK; +} + +/* Transition SD and backplane clock readiness */ +static int +dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok) +{ + int ret = BCME_OK; +#ifdef DHD_DEBUG + uint oldstate = bus->clkstate; +#endif /* DHD_DEBUG */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Early exit if we're already there */ + if (bus->clkstate == target) { + if (target == CLK_AVAIL) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + return ret; + } + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + dhdsdio_sdclk(bus, TRUE); + /* Now request HT Avail on the backplane */ + ret = dhdsdio_htclk(bus, TRUE, pendok); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + bus->activity = TRUE; +#ifdef DHD_USE_IDLECOUNT + bus->idlecount = 0; +#endif /* DHD_USE_IDLECOUNT */ + } + break; + + case CLK_SDONLY: + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + ret = dhdsdio_sdclk(bus, TRUE); + else if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + else + DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n", + bus->clkstate, target)); + if (ret == BCME_OK) { + dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms); + } + break; + + case CLK_NONE: + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + ret = dhdsdio_htclk(bus, FALSE, FALSE); + /* Now remove the SD clock */ + ret = dhdsdio_sdclk(bus, FALSE); +#ifdef DHD_DEBUG + if (dhd_console_ms == 0) +#endif /* DHD_DEBUG */ + if (bus->poll == 0) + dhd_os_wd_timer(bus->dhd, 0); + break; + } +#ifdef DHD_DEBUG + DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate)); +#endif /* DHD_DEBUG */ + + return ret; +} + +static int +dhdsdio_bussleep(dhd_bus_t *bus, bool sleep) +{ + int err = 0; + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE"))); + + if (bus->dhd->hang_was_sent) + return BCME_ERROR; + + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + return BCME_OK; + + /* Going to sleep: set the alarm and turn off the lights... */ + if (sleep) { + /* Don't sleep if something is pending */ +#ifdef DHD_USE_IDLECOUNT + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes || + bus->ctrl_frame_stat) +#else + if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq)) +#endif /* DHD_USE_IDLECOUNT */ + return BCME_BUSY; + + + if (!SLPAUTO_ENAB(bus)) { + /* Disable SDIO interrupts (no longer interested) */ + bcmsdh_intr_disable(bus->sdh); + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_FORCE_HW_CLKREQ_OFF, NULL); + + /* Isolate the bus */ + if (bus->sih->chip != BCM4329_CHIP_ID && + bus->sih->chip != BCM4319_CHIP_ID) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, + SBSDIO_DEVCTL_PADS_ISO, NULL); + } + } else { + /* Leave interrupts enabled since device can exit sleep and + * interrupt host + */ + err = dhdsdio_clk_devsleep_iovar(bus, TRUE /* sleep */); + } + + /* Change state */ + bus->sleeping = TRUE; +#if defined(SUPPORT_P2P_GO_PS) + wake_up(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + } else { + /* Waking up: bus power up is ok, set local state */ + + if (!SLPAUTO_ENAB(bus)) { + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, &err); + + /* Force pad isolation off if possible (in case power never toggled) */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL); + + + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + /* Enable interrupts again */ + if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) { + bus->intdis = FALSE; + bcmsdh_intr_enable(bus->sdh); + } + } else { + err = dhdsdio_clk_devsleep_iovar(bus, FALSE /* wake */); + } + + if (err == 0) { + /* Change state */ + bus->sleeping = FALSE; + } + } + + return err; +} + +#ifdef USE_DYNAMIC_F2_BLKSIZE +int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size) +{ + int func_blk_size = function_num; + int bcmerr = 0; + int result; + + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size, + sizeof(int), &result, sizeof(int), IOV_GET); + + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num)); + return BCME_ERROR; + } + + if (result != block_size) { + DHD_TRACE_HW4(("%s: F%d Block size set from %d to %d\n", + __FUNCTION__, function_num, result, block_size)); + func_blk_size = function_num << 16 | block_size; + bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL, + 0, &func_blk_size, sizeof(int32), IOV_SET); + if (bcmerr != BCME_OK) { + DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__)); + return BCME_ERROR; + } + } + + return BCME_OK; +} +#endif /* USE_DYNAMIC_F2_BLKSIZE */ + +#if defined(OOB_INTR_ONLY) +void +dhd_enable_oob_intr(struct dhd_bus *bus, bool enable) +{ +#if defined(HW_OOB) + bcmsdh_enable_hw_oob_intr(bus->sdh, enable); +#else + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (enable == TRUE) { + + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + + } else { + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + } + + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); +#endif /* !defined(HW_OOB) */ +} +#endif + +int +dhd_bus_txdata(struct dhd_bus *bus, void *pkt) +{ + int ret = BCME_ERROR; + osl_t *osh; + uint datalen, prec; +#if defined(DHD_TX_DUMP) + uint8 *dump_data; + uint16 protocol; +#endif /* DHD_TX_DUMP */ + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + osh = bus->dhd->osh; + datalen = PKTLEN(osh, pkt); + +#ifdef SDTEST + /* Push the test header if doing loopback */ + if (bus->ext_loop) { + uint8* data; + PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN); + data = PKTDATA(osh, pkt); + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->loopid++; + *data++ = (datalen >> 0); + *data++ = (datalen >> 8); + datalen += SDPCM_TEST_HDRLEN; + } +#else /* SDTEST */ + BCM_REFERENCE(datalen); +#endif /* SDTEST */ + +#if defined(DHD_TX_DUMP) + dump_data = PKTDATA(osh, pkt); + dump_data += 4; /* skip 4 bytes header */ + protocol = (dump_data[12] << 8) | dump_data[13]; + + if (protocol == ETHER_TYPE_802_1X) { + DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n", + dump_data[14], dump_data[15], dump_data[30])); + } +#endif /* DHD_TX_DUMP */ + +#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP) + { + int i; + DHD_ERROR(("TX DUMP\n")); + + for (i = 0; i < (datalen - 4); i++) { + DHD_ERROR(("%02X ", dump_data[i])); + if ((i & 15) == 15) + printk("\n"); + } + DHD_ERROR(("\n")); + } +#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */ + + prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK)); + + /* Check for existing queue, current flow-control, pending event, or pending clock */ + if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched || + (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) || + (bus->clkstate != CLK_AVAIL)) { + bool deq_ret; + int pkq_len; + + DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq))); + bus->fcqueued++; + + /* Priority based enq */ + dhd_os_sdlock_txq(bus->dhd); + deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec); + dhd_os_sdunlock_txq(bus->dhd); + + if (!deq_ret) { +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0) +#endif /* PROP_TXSTATUS */ + { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + dhd_txcomplete(bus->dhd, pkt, FALSE); + PKTFREE(osh, pkt, TRUE); + } + ret = BCME_NORESOURCE; + } else + ret = BCME_OK; + + dhd_os_sdlock_txq(bus->dhd); + pkq_len = pktq_len(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + if (pkq_len >= FCHI) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) != + WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled && dhd_doflow) { + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); + } + } + +#ifdef DHD_DEBUG + dhd_os_sdlock_txq(bus->dhd); + if (pktq_plen(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktq_plen(&bus->txq, prec); + dhd_os_sdunlock_txq(bus->dhd); +#endif + + /* Schedule DPC if needed to send queued packet(s) */ + if (dhd_deferred_tx && !bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } else { + int chan = SDPCM_DATA_CHANNEL; + +#ifdef SDTEST + chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL); +#endif + /* Lock: we're about to use shared data/code (and SDIO) */ + dhd_os_sdlock(bus->dhd); + + /* Otherwise, send it now */ + BUS_WAKE(bus); + /* Make sure back plane ht clk is on, no pending allowed */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + + ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE); + + if (ret != BCME_OK) + bus->dhd->tx_errors++; + else + bus->dhd->dstats.tx_bytes += datalen; + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + } + + return ret; +} + +/* align packet data pointer and packet length to n-byte boundary, process packet headers, + * a new packet may be allocated if there is not enough head and/or tail from for padding. + * the caller is responsible for updating the glom size in the head packet (when glom is + * used) + * + * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter + * is taken in tx glom mode only + * + * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment + * padding, NULL if not needed, the caller is responsible for freeing the new packet + * + * return: positive value - length of the packet, including head and tail padding + * negative value - errors + */ +static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq, + int prev_chain_total_len, bool last_chained_pkt, + int *pad_pkt_len, void **new_pkt) +{ + osl_t *osh; + uint8 *frame; + int pkt_len; + int modulo; + int head_padding; + int tail_padding = 0; + uint32 swheader; + uint32 swhdr_offset; + bool alloc_new_pkt = FALSE; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + + *new_pkt = NULL; + osh = bus->dhd->osh; + +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + + /* Add space for the SDPCM hardware/software headers */ + PKTPUSH(osh, pkt, sdpcm_hdrlen); + ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2)); + + frame = (uint8*)PKTDATA(osh, pkt); + pkt_len = (uint16)PKTLEN(osh, pkt); + +#ifdef WLMEDIA_HTSF + frame = (uint8*)PKTDATA(osh, pkt); + if (PKTLEN(osh, pkt) >= 100) { + htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12); + if (htsf_ts->magic == HTSFMAGIC) { + htsf_ts->c20 = get_cycles(); + htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0); + } + } +#endif /* WLMEDIA_HTSF */ +#ifdef DHD_DEBUG + if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) + tx_packets[PKTPRIO(pkt)]++; +#endif /* DHD_DEBUG */ + + /* align the data pointer, allocate a new packet if there is not enough space (new + * packet data pointer will be aligned thus no padding will be needed) + */ + head_padding = (ulong)frame % DHD_SDALIGN; + if (PKTHEADROOM(osh, pkt) < head_padding) { + head_padding = 0; + alloc_new_pkt = TRUE; + } else { + uint cur_chain_total_len; + int chain_tail_padding = 0; + + /* All packets need to be aligned by DHD_SDALIGN */ + modulo = (pkt_len + head_padding) % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + + /* Total pkt chain length needs to be aligned by block size, + * unless it is a single pkt chain with total length less than one block size, + * which we prefer sending by byte mode. + * + * Do the chain alignment here if + * 1. This is the last pkt of the chain of multiple pkts or a single pkt. + * 2-1. This chain is of multiple pkts, or + * 2-2. This is a single pkt whose size is longer than one block size. + */ + cur_chain_total_len = prev_chain_total_len + + (head_padding + pkt_len + tail_padding); + if (last_chained_pkt && bus->blocksize != 0 && + (cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_chain_total_len % bus->blocksize; + chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } + +#ifdef DHDENABLE_TAILPAD + if (PKTTAILROOM(osh, pkt) < tail_padding) { + /* We don't have tail room to align by DHD_SDALIGN */ + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) { + /* We have tail room for tail_padding of this pkt itself, but not for + * total pkt chain alignment by block size. + * Use the padding packet to avoid memory copy if applicable, + * otherwise, just allocate a new pkt. + */ + if (bus->pad_pkt) { + *pad_pkt_len = chain_tail_padding; + bus->tx_tailpad_chain++; + } else { + alloc_new_pkt = TRUE; + bus->tx_tailpad_pktget++; + } + } else + /* This last pkt's tailroom is sufficient to hold both tail_padding + * of the pkt itself and chain_tail_padding of total pkt chain + */ +#endif /* DHDENABLE_TAILPAD */ + tail_padding += chain_tail_padding; + } + + DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n", + __FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len)); + + if (alloc_new_pkt) { + void *tmp_pkt; + int newpkt_size; + int cur_total_len; + + ASSERT(*pad_pkt_len == 0); + + DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__)); + + /* head pointer is aligned now, no padding needed */ + head_padding = 0; + + /* update the tail padding as it depends on the head padding, since a new packet is + * allocated, the head padding is non longer needed and packet length is chagned + */ + + cur_total_len = prev_chain_total_len + pkt_len; + if (last_chained_pkt && bus->blocksize != 0 && + (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) { + modulo = cur_total_len % bus->blocksize; + tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0; + } + else { + modulo = pkt_len % DHD_SDALIGN; + tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0; + } + + newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN; + bus->dhd->tx_realloc++; + tmp_pkt = PKTGET(osh, newpkt_size, TRUE); + if (tmp_pkt == NULL) { + DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size)); + return BCME_NOMEM; + } + PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN); + bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt)); + *new_pkt = tmp_pkt; + pkt = tmp_pkt; + } + + if (head_padding) + PKTPUSH(osh, pkt, head_padding); + + frame = (uint8*)PKTDATA(osh, pkt); + bzero(frame, head_padding + sdpcm_hdrlen); + pkt_len = (uint16)PKTLEN(osh, pkt); + + /* the header has the followming format + * 4-byte HW frame tag: length, ~length (for glom this is the total length) + * + * 8-byte HW extesion flags (glom mode only) as the following: + * 2-byte packet length, excluding HW tag and padding + * 2-byte frame channel and frame flags (e.g. next frame following) + * 2-byte header length + * 2-byte tail padding size + * + * 8-byte SW frame tags as the following + * 4-byte flags: host tx seq, channel, data offset + * 4-byte flags: TBD + */ + + swhdr_offset = SDPCM_FRAMETAG_LEN; + + /* hardware frame tag: + * + * in tx-glom mode, dongle only checks the hardware frame tag in the first + * packet and sees it as the total lenght of the glom (including tail padding), + * for each packet in the glom, the packet length needs to be updated, (see + * below PKTSETLEN) + * + * in non tx-glom mode, PKTLEN still need to include tail padding as to be + * referred to in sdioh_request_buffer(). The tail length will be excluded in + * dhdsdio_txpkt_postprocess(). + */ + *(uint16*)frame = (uint16)htol16(pkt_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len); + pkt_len += tail_padding; + + /* hardware extesion flags */ + if (bus->txglom_enable) { + uint32 hwheader1; + uint32 hwheader2; + + swhdr_offset += SDPCM_HWEXT_LEN; + hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) | + (last_chained_pkt << 24); + hwheader2 = (tail_padding) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + } + PKTSETLEN((osh), (pkt), (pkt_len)); + + /* software frame tags */ + swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | (txseq % SDPCM_SEQUENCE_WRAP) | + (((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + swhdr_offset); + htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader)); + + return pkt_len; +} + +static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt) +{ + osl_t *osh; + uint8 *frame; + int data_offset; + int tail_padding; + int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0); + + (void)osh; + osh = bus->dhd->osh; + + /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */ + frame = (uint8*)PKTDATA(osh, pkt); + + DHD_INFO(("%s PKTLEN before postprocess %d", + __FUNCTION__, PKTLEN(osh, pkt))); + + /* PKTLEN still includes tail_padding, so exclude it. + * We shall have head_padding + original pkt_len for PKTLEN afterwards. + */ + if (bus->txglom_enable) { + /* txglom pkts have tail_padding length in HW ext header */ + tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16; + PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding); + DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n", + tail_padding, PKTLEN(osh, pkt))); + } else { + /* non-txglom pkts have head_padding + original pkt length in HW frame tag. + * We cannot refer to this field for txglom pkts as the first pkt of the chain will + * have the field for the total length of the chain. + */ + PKTSETLEN(osh, pkt, *(uint16*)frame); + DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n", + *(uint16*)frame, PKTLEN(osh, pkt))); + } + + data_offset = ltoh32_ua(frame + swhdr_offset); + data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT; + /* Get rid of sdpcm header + head_padding */ + PKTPULL(osh, pkt, data_offset); + + DHD_INFO(("%s data_offset %d, PKTLEN %d\n", + __FUNCTION__, data_offset, PKTLEN(osh, pkt))); + + return BCME_OK; +} + +static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt) +{ + int i; + int ret = 0; + osl_t *osh; + bcmsdh_info_t *sdh; + void *pkt = NULL; + void *pkt_chain; + int total_len = 0; + void *head_pkt = NULL; + void *prev_pkt = NULL; + int pad_pkt_len = 0; + int new_pkt_num = 0; + void *new_pkts[MAX_TX_PKTCHAIN_CNT]; + bool wlfc_enabled = FALSE; + + if (bus->dhd->dongle_reset) + return BCME_NOTREADY; + + sdh = bus->sdh; + osh = bus->dhd->osh; + /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */ + new_pkts[0] = NULL; + + for (i = 0; i < num_pkt; i++) { + int pkt_len; + bool last_pkt; + void *new_pkt = NULL; + + pkt = pkts[i]; + ASSERT(pkt); + last_pkt = (i == num_pkt - 1); + pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i, + total_len, last_pkt, &pad_pkt_len, &new_pkt); + if (pkt_len <= 0) + goto done; + if (new_pkt) { + pkt = new_pkt; + new_pkts[new_pkt_num++] = new_pkt; + } + total_len += pkt_len; + + PKTSETNEXT(osh, pkt, NULL); + /* insert the packet into the list */ + head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt); + prev_pkt = pkt; + + } + + /* Update the HW frame tag (total length) in the first pkt of the glom */ + if (bus->txglom_enable) { + uint8 *frame; + + total_len += pad_pkt_len; + frame = (uint8*)PKTDATA(osh, head_pkt); + *(uint16*)frame = (uint16)htol16(total_len); + *(((uint16*)frame) + 1) = (uint16)htol16(~total_len); + + } + +#ifdef DHDENABLE_TAILPAD + /* if a padding packet if needed, insert it to the end of the link list */ + if (pad_pkt_len) { + PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len); + PKTSETNEXT(osh, pkt, bus->pad_pkt); + } +#endif /* DHDENABLE_TAILPAD */ + + /* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet + * parameter is not NULL, for non packet chian we pass NULL pkt pointer + * so it will take the aligned length and buffer pointer. + */ + pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL; + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP; + + /* if a padding packet was needed, remove it from the link list as it not a data pkt */ + if (pad_pkt_len && pkt) + PKTSETNEXT(osh, pkt, NULL); + +done: + pkt = head_pkt; + while (pkt) { + void *pkt_next = PKTNEXT(osh, pkt); + PKTSETNEXT(osh, pkt, NULL); + dhdsdio_txpkt_postprocess(bus, pkt); + pkt = pkt_next; + } + + /* new packets might be allocated due to insufficient room for padding, but we + * still have to indicate the original packets to upper layer + */ + for (i = 0; i < num_pkt; i++) { + pkt = pkts[i]; + wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) { + wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) != + WLFC_UNSUPPORTED); + } +#endif /* PROP_TXSTATUS */ + if (!wlfc_enabled) { + PKTSETNEXT(osh, pkt, NULL); + dhd_txcomplete(bus->dhd, pkt, ret != 0); + if (free_pkt) + PKTFREE(osh, pkt, TRUE); + } + } + + for (i = 0; i < new_pkt_num; i++) + PKTFREE(osh, new_pkts[i], TRUE); + + return ret; +} + +static uint +dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes) +{ + uint cnt = 0; + uint8 tx_prec_map; + uint16 txpktqlen = 0; + uint32 intstatus = 0; + uint retries = 0; + osl_t *osh; + uint datalen = 0; + dhd_pub_t *dhd = bus->dhd; + sdpcmd_regs_t *regs = bus->regs; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + osh = dhd->osh; + tx_prec_map = ~bus->flowcontrol; +#ifdef DHD_LOSSLESS_ROAMING + tx_prec_map &= dhd->dequeue_prec_map; +#endif + for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) { + int i; + int num_pkt = 1; + void *pkts[MAX_TX_PKTCHAIN_CNT]; + int prec_out; + + dhd_os_sdlock_txq(bus->dhd); + if (bus->txglom_enable) { + uint32 glomlimit = (uint32)bus->txglomsize; +#if defined(BCMSDIOH_STD) + if (bus->blocksize == 64) { + glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM); + } +#endif /* BCMSDIOH_STD */ + num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit); + num_pkt = MIN(num_pkt, ARRAYSIZE(pkts)); + } + num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map)); + for (i = 0; i < num_pkt; i++) { + pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out); + if (!pkts[i]) { + DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n", + __FUNCTION__)); + ASSERT(0); + break; + } + PKTORPHAN(pkts[i]); + datalen += PKTLEN(osh, pkts[i]); + } + dhd_os_sdunlock_txq(bus->dhd); + + if (i == 0) + break; + if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK) + dhd->tx_errors++; + else + dhd->dstats.tx_bytes += datalen; + cnt += i; + + /* In poll mode, need to check for other events */ + if (!bus->intr && cnt) + { + /* Check device status, signal pending interrupt */ + R_SDREG(intstatus, ®s->intstatus, retries); + bus->f2txdata++; + if (bcmsdh_regfail(bus->sdh)) + break; + if (intstatus & bus->hostintmask) + bus->ipend = TRUE; + } + + } + + dhd_os_sdlock_txq(bus->dhd); + txpktqlen = pktq_len(&bus->txq); + dhd_os_sdunlock_txq(bus->dhd); + + /* Do flow-control if needed */ + if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) { + bool wlfc_enabled = FALSE; +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled && dhd_doflow && dhd->txoff) { + dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF); + } + } + + return cnt; +} + +static void +dhdsdio_sendpendctl(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + int ret; + uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN; + + if (bus->txglom_enable) + frame_seq += SDPCM_HWEXT_LEN; + + if (*frame_seq != bus->tx_seq) { + DHD_INFO(("%s IOCTL frame seq lag detected!" + " frm_seq:%d != bus->tx_seq:%d, corrected\n", + __FUNCTION__, *frame_seq, bus->tx_seq)); + *frame_seq = bus->tx_seq; + } + + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len, + NULL, NULL, NULL, 1); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + + bus->ctrl_frame_stat = FALSE; + dhd_wait_event_wakeup(bus->dhd); +} + +int +dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + static int err_nodevice = 0; + uint8 *frame; + uint16 len; + uint32 swheader; + bcmsdh_info_t *sdh = bus->sdh; + uint8 doff = 0; + int ret = -1; + uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Back the pointer to make a room for bus header */ + frame = msg - sdpcm_hdrlen; + len = (msglen += sdpcm_hdrlen); + + /* Add alignment padding (optional for ctl frames) */ + if (dhd_alignctl) { + if ((doff = ((uintptr)frame % DHD_SDALIGN))) { + frame -= doff; + len += doff; + msglen += doff; + bzero(frame, doff + sdpcm_hdrlen); + } + ASSERT(doff < DHD_SDALIGN); + } + doff += sdpcm_hdrlen; + + /* Round send length to next SDIO block */ + if (bus->roundup && bus->blocksize && (len > bus->blocksize)) { + uint16 pad = bus->blocksize - (len % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize)) + len += pad; + } else if (len % DHD_SDALIGN) { + len += DHD_SDALIGN - (len % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (len & (ALIGNMENT - 1))) + len = ROUNDUP(len, ALIGNMENT); + + ASSERT(ISALIGNED((uintptr)frame, 2)); + + + /* Need to lock here to protect txseq and SDIO tx calls */ + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ + *(uint16*)frame = htol16((uint16)msglen); + *(((uint16*)frame) + 1) = htol16(~msglen); + + if (bus->txglom_enable) { + uint32 hwheader1, hwheader2; + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq + | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + + SDPCM_HWEXT_LEN + sizeof(swheader)); + + hwheader1 = (msglen - SDPCM_FRAMETAG_LEN) | (1 << 24); + hwheader2 = (len - (msglen)) << 16; + htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4); + + *(uint16*)frame = htol16(len); + *(((uint16*)frame) + 1) = htol16(~(len)); + } else { + /* Software tag: channel, sequence number, data offset */ + swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) + | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK); + htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN); + htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader)); + } + if (!TXCTLOK(bus)) { + DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n", + __FUNCTION__, bus->tx_max, bus->tx_seq)); + bus->ctrl_frame_stat = TRUE; + /* Send from dpc */ + bus->ctrl_frame_buf = frame; + bus->ctrl_frame_len = len; + + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if (bus->ctrl_frame_stat) { + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + } + + if (bus->ctrl_frame_stat == FALSE) { + DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); + ret = 0; + } else { + bus->dhd->txcnt_timeout++; + if (!bus->dhd->hang_was_sent) { +#ifdef CUSTOMER_HW4_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n", + __FUNCTION__, status)); + DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n", + __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate)); +#endif /* CUSTOMER_HW4_DEBUG */ + DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n", + __FUNCTION__, bus->dhd->txcnt_timeout)); + } + ret = -1; + bus->ctrl_frame_stat = FALSE; + goto done; + } + } + + bus->dhd->txcnt_timeout = 0; + bus->ctrl_frame_stat = TRUE; + + if (ret == -1) { +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("Tx Frame", frame, len); + } else if (DHD_HDRS_ON()) { + prhex("TxHdr", frame, MIN(len, 16)); + } +#endif + ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + frame, len, NULL, NULL, NULL, TXRETRIES); + if (ret == BCME_OK) + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; + } + bus->ctrl_frame_stat = FALSE; + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + if (ret) + bus->dhd->tx_ctlerrs++; + else + bus->dhd->tx_ctlpkts++; + + if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) + return -ETIMEDOUT; + + if (ret == BCME_NODEVICE) + err_nodevice++; + else + err_nodevice = 0; + + return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0; +} + +int +dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) + return -EIO; + + /* Wait until control frame is available */ + timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen); + + dhd_os_sdlock(bus->dhd); + rxlen = bus->rxlen; + bcopy(bus->rxctl, msg, MIN(msglen, rxlen)); + bus->rxlen = 0; + dhd_os_sdunlock(bus->dhd); + + if (rxlen) { + DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n", + __FUNCTION__, rxlen, msglen)); + } else if (timeleft == 0) { +#ifdef DHD_DEBUG + uint32 status, retry = 0; + R_SDREG(status, &bus->regs->intstatus, retry); + DHD_ERROR(("%s: resumed on timeout, INT status=0x%08X\n", + __FUNCTION__, status)); +#else + DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__)); +#endif /* DHD_DEBUG */ + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } else { + DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__)); + dhd_os_sdlock(bus->dhd); + dhdsdio_checkdied(bus, NULL, 0); + dhd_os_sdunlock(bus->dhd); + } + if (timeleft == 0) { + if (rxlen == 0) + bus->dhd->rxcnt_timeout++; + DHD_ERROR(("%s: rxcnt_timeout=%d, rxlen=%d\n", __FUNCTION__, + bus->dhd->rxcnt_timeout, rxlen)); + } + else + bus->dhd->rxcnt_timeout = 0; + + if (rxlen) + bus->dhd->rx_ctlpkts++; + else + bus->dhd->rx_ctlerrs++; + + if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) + return -ETIMEDOUT; + + if (bus->dhd->dongle_trap_occured) + return -EREMOTEIO; + + return rxlen ? (int)rxlen : -EIO; +} + +/* IOVar table */ +enum { + IOV_INTR = 1, + IOV_POLLRATE, + IOV_SDREG, + IOV_SBREG, + IOV_SDCIS, + IOV_MEMBYTES, + IOV_RAMSIZE, + IOV_RAMSTART, +#ifdef DHD_DEBUG + IOV_CHECKDIED, + IOV_SERIALCONS, +#endif /* DHD_DEBUG */ + IOV_SET_DOWNLOAD_STATE, + IOV_SOCRAM_STATE, + IOV_FORCEEVEN, + IOV_SDIOD_DRIVE, + IOV_READAHEAD, + IOV_SDRXCHAIN, + IOV_ALIGNCTL, + IOV_SDALIGN, + IOV_DEVRESET, + IOV_CPU, +#if defined(USE_SDIOFIFO_IOVAR) + IOV_WATERMARK, + IOV_MESBUSYCTRL, +#endif /* USE_SDIOFIFO_IOVAR */ +#ifdef SDTEST + IOV_PKTGEN, + IOV_EXTLOOP, +#endif /* SDTEST */ + IOV_SPROM, + IOV_TXBOUND, + IOV_RXBOUND, + IOV_TXMINMAX, + IOV_IDLETIME, + IOV_IDLECLOCK, + IOV_SD1IDLE, + IOV_SLEEP, + IOV_DONGLEISOLATION, + IOV_KSO, + IOV_DEVSLEEP, + IOV_DEVCAP, + IOV_VARS, +#ifdef SOFTAP + IOV_FWPATH, +#endif + IOV_TXGLOMSIZE, + IOV_TXGLOMMODE, + IOV_HANGREPORT, + IOV_TXINRX_THRES +}; + +const bcm_iovar_t dhdsdio_iovars[] = { + {"intr", IOV_INTR, 0, IOVT_BOOL, 0 }, + {"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0 }, + {"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0 }, + {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 }, + {"idleclock", IOV_IDLECLOCK, 0, IOVT_INT32, 0 }, + {"sd1idle", IOV_SD1IDLE, 0, IOVT_BOOL, 0 }, + {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) }, + {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 }, + {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 }, + {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 }, + {"socram_state", IOV_SOCRAM_STATE, 0, IOVT_BOOL, 0 }, + {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 }, + {"sdiod_drive", IOV_SDIOD_DRIVE, 0, IOVT_UINT32, 0 }, + {"readahead", IOV_READAHEAD, 0, IOVT_BOOL, 0 }, + {"sdrxchain", IOV_SDRXCHAIN, 0, IOVT_BOOL, 0 }, + {"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0 }, + {"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0 }, + {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, + {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN }, + {"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0 }, + {"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 }, + {"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 }, + {"txminmax", IOV_TXMINMAX, 0, IOVT_UINT32, 0 }, + {"cpu", IOV_CPU, 0, IOVT_BOOL, 0 }, +#ifdef DHD_DEBUG + {"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0 }, + {"serial", IOV_SERIALCONS, 0, IOVT_UINT32, 0 }, +#endif /* DHD_DEBUG */ +#endif /* DHD_DEBUG */ +#ifdef SDTEST + {"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 }, + {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) }, +#endif /* SDTEST */ +#if defined(USE_SDIOFIFO_IOVAR) + {"watermark", IOV_WATERMARK, 0, IOVT_UINT32, 0 }, + {"mesbusyctrl", IOV_MESBUSYCTRL, 0, IOVT_UINT32, 0 }, +#endif /* USE_SDIOFIFO_IOVAR */ + {"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 }, + {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 }, + {"kso", IOV_KSO, 0, IOVT_UINT32, 0 }, + {"devsleep", IOV_DEVSLEEP, 0, IOVT_UINT32, 0 }, +#ifdef SOFTAP + {"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 }, +#endif + {"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 }, + {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 }, + {"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 }, + {NULL, 0, 0, 0, 0 } +}; + +static void +dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div) +{ + uint q1, q2; + + if (!div) { + bcm_bprintf(strbuf, "%s N/A", desc); + } else { + q1 = num / div; + q2 = (100 * (num - (q1 * div))) / div; + bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2); + } +} + +void +dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + dhd_bus_t *bus = dhdp->bus; + + bcm_bprintf(strbuf, "Bus SDIO structure:\n"); + bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n", + bus->hostintmask, bus->intstatus, bus->sdpcm_ver); + bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n", + bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip, + bus->rxlen, bus->rx_seq); + bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n", + bus->intr, bus->intrcount, bus->lastintrs, bus->spurious); + bcm_bprintf(strbuf, "pollrate %u pollcnt %u regfails %u\n", + bus->pollrate, bus->pollcnt, bus->regfails); + + bcm_bprintf(strbuf, "\nAdditional counters:\n"); +#ifdef DHDENABLE_TAILPAD + bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n", + bus->tx_tailpad_chain, bus->tx_tailpad_pktget); +#endif /* DHDENABLE_TAILPAD */ + bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n", + bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong, + bus->rxc_errors); + bcm_bprintf(strbuf, "rx_hdrfail %u badhdr %u badseq %u\n", + bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq); + bcm_bprintf(strbuf, "fc_rcvd %u, fc_xoff %u, fc_xon %u\n", + bus->fc_rcvd, bus->fc_xoff, bus->fc_xon); + bcm_bprintf(strbuf, "rxglomfail %u, rxglomframes %u, rxglompkts %u\n", + bus->rxglomfail, bus->rxglomframes, bus->rxglompkts); + bcm_bprintf(strbuf, "f2rx (hdrs/data) %u (%u/%u), f2tx %u f1regs %u\n", + (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs, bus->f2rxdata, + bus->f2txdata, bus->f1regdata); + { + dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets, + (bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts), + bus->dhd->rx_packets); + dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts, bus->rxglomframes); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets, bus->f2txdata); + dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets, bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets, + (bus->f2txdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets, bus->intrcount); + bcm_bprintf(strbuf, "\n"); + + dhd_dump_pct(strbuf, "Total: pkts/f2rw", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata)); + dhd_dump_pct(strbuf, ", pkts/f1sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->f1regdata); + dhd_dump_pct(strbuf, ", pkts/sd", + (bus->dhd->tx_packets + bus->dhd->rx_packets), + (bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata)); + dhd_dump_pct(strbuf, ", pkts/int", + (bus->dhd->tx_packets + bus->dhd->rx_packets), bus->intrcount); + bcm_bprintf(strbuf, "\n\n"); + } + +#ifdef SDTEST + if (bus->pktgen_count) { + bcm_bprintf(strbuf, "pktgen config and count:\n"); + bcm_bprintf(strbuf, "freq %u count %u print %u total %u min %u len %u\n", + bus->pktgen_freq, bus->pktgen_count, bus->pktgen_print, + bus->pktgen_total, bus->pktgen_minlen, bus->pktgen_maxlen); + bcm_bprintf(strbuf, "send attempts %u rcvd %u fail %u\n", + bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + } +#endif /* SDTEST */ +#ifdef DHD_DEBUG + bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n", + bus->dpc_sched, (bcmsdh_intr_pending(bus->sdh) ? " " : " not ")); + bcm_bprintf(strbuf, "blocksize %u roundup %u\n", bus->blocksize, bus->roundup); +#endif /* DHD_DEBUG */ + bcm_bprintf(strbuf, "clkstate %d activity %d idletime %d idlecount %d sleeping %d\n", + bus->clkstate, bus->activity, bus->idletime, bus->idlecount, bus->sleeping); +} + +void +dhd_bus_clearcounts(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = (dhd_bus_t *)dhdp->bus; + + bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0; + bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0; + bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0; +#ifdef DHDENABLE_TAILPAD + bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0; +#endif /* DHDENABLE_TAILPAD */ + bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0; + bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0; + bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0; +} + +#ifdef SDTEST +static int +dhdsdio_pktgen_get(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + + pktgen.version = DHD_PKTGEN_VERSION; + pktgen.freq = bus->pktgen_freq; + pktgen.count = bus->pktgen_count; + pktgen.print = bus->pktgen_print; + pktgen.total = bus->pktgen_total; + pktgen.minlen = bus->pktgen_minlen; + pktgen.maxlen = bus->pktgen_maxlen; + pktgen.numsent = bus->pktgen_sent; + pktgen.numrcvd = bus->pktgen_rcvd; + pktgen.numfail = bus->pktgen_fail; + pktgen.mode = bus->pktgen_mode; + pktgen.stop = bus->pktgen_stop; + + bcopy(&pktgen, arg, sizeof(pktgen)); + + return 0; +} + +static int +dhdsdio_pktgen_set(dhd_bus_t *bus, uint8 *arg) +{ + dhd_pktgen_t pktgen; + uint oldcnt, oldmode; + + bcopy(arg, &pktgen, sizeof(pktgen)); + if (pktgen.version != DHD_PKTGEN_VERSION) + return BCME_BADARG; + + oldcnt = bus->pktgen_count; + oldmode = bus->pktgen_mode; + + bus->pktgen_freq = pktgen.freq; + bus->pktgen_count = pktgen.count; + bus->pktgen_print = pktgen.print; + bus->pktgen_total = pktgen.total; + bus->pktgen_minlen = pktgen.minlen; + bus->pktgen_maxlen = pktgen.maxlen; + bus->pktgen_mode = pktgen.mode; + bus->pktgen_stop = pktgen.stop; + + bus->pktgen_tick = bus->pktgen_ptick = 0; + bus->pktgen_prev_time = jiffies; + bus->pktgen_len = MAX(bus->pktgen_len, bus->pktgen_minlen); + bus->pktgen_len = MIN(bus->pktgen_len, bus->pktgen_maxlen); + + /* Clear counts for a new pktgen (mode change, or was stopped) */ + if (bus->pktgen_count && (!oldcnt || oldmode != bus->pktgen_mode)) { + bus->pktgen_sent = bus->pktgen_prev_sent = bus->pktgen_rcvd = 0; + bus->pktgen_prev_rcvd = bus->pktgen_fail = 0; + } + + return 0; +} +#endif /* SDTEST */ + +static void +dhdsdio_devram_remap(dhd_bus_t *bus, bool val) +{ + uint8 enable, protect, remap; + + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + remap = val ? TRUE : FALSE; + si_socdevram(bus->sih, TRUE, &enable, &protect, &remap); +} + +static int +dhdsdio_membytes(dhd_bus_t *bus, bool write, uint32 address, uint8 *data, uint size) +{ + int bcmerror = 0; + uint32 sdaddr; + uint dsize; + + /* In remap mode, adjust address beyond socram and redirect + * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize + * is not backplane accessible + */ + if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address)) { + address -= bus->orig_ramsize; + address += SOCDEVRAM_BP_ADDR; + } + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + /* Set the backplane window to include the start address */ + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + goto xfer_done; + } + + /* Do the transfer(s) */ + while (size) { + DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n", + __FUNCTION__, (write ? "write" : "read"), dsize, sdaddr, + (address & SBSDIO_SBWINDOW_MASK))); + if ((bcmerror = bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize))) { + DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__)); + break; + } + + /* Adjust for next transfer (if any) */ + if ((size -= dsize)) { + data += dsize; + address += dsize; + if ((bcmerror = dhdsdio_set_siaddr_window(bus, address))) { + DHD_ERROR(("%s: window change failed\n", __FUNCTION__)); + break; + } + sdaddr = 0; + dsize = MIN(SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + + } + +xfer_done: + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) { + DHD_ERROR(("%s: FAILED to set window back to 0x%x\n", __FUNCTION__, + bcmsdh_cur_sbwad(bus->sdh))); + } + + return bcmerror; +} + +static int +dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh) +{ + uint32 addr; + int rv, i; + uint32 shaddr = 0; + + if (bus->sih == NULL) { + if (bus->dhd && bus->dhd->dongle_reset) { + DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__)); + return BCME_NOTREADY; + } else { + ASSERT(bus->dhd); + ASSERT(bus->sih); + DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__)); + return BCME_ERROR; + } + } + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus)) + bus->srmemsize = 0; + + shaddr = bus->dongle_ram_base + bus->ramsize - 4; + i = 0; + do { + /* Read last word in memory to determine address of sdpcm_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, shaddr, (uint8 *)&addr, 4)) < 0) + return rv; + + addr = ltoh32(addr); + + DHD_INFO(("sdpcm_shared address 0x%08X\n", addr)); + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) { + if ((bus->srmemsize > 0) && (i++ == 0)) { + shaddr -= bus->srmemsize; + } else { + DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n", + __FUNCTION__, addr)); + return BCME_ERROR; + } + } else + break; + } while (i < 2); + + /* Read hndrte_shared structure */ + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)sh, sizeof(sdpcm_shared_t))) < 0) + return rv; + + /* Endianness */ + sh->flags = ltoh32(sh->flags); + sh->trap_addr = ltoh32(sh->trap_addr); + sh->assert_exp_addr = ltoh32(sh->assert_exp_addr); + sh->assert_file_addr = ltoh32(sh->assert_file_addr); + sh->assert_line = ltoh32(sh->assert_line); + sh->console_addr = ltoh32(sh->console_addr); + sh->msgtrace_addr = ltoh32(sh->msgtrace_addr); + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1) + return BCME_OK; + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { + DHD_ERROR(("%s: sdpcm_shared version %d in dhd " + "is different than sdpcm_shared version %d in dongle\n", + __FUNCTION__, SDPCM_SHARED_VERSION, + sh->flags & SDPCM_SHARED_VERSION_MASK)); + return BCME_ERROR; + } + + return BCME_OK; +} + +#define CONSOLE_LINE_MAX 192 + +#ifdef DHD_DEBUG +static int +dhdsdio_readconsole(dhd_bus_t *bus) +{ + dhd_console_t *c = &bus->console; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + if (!KSO_ENAB(bus)) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (c->buf == NULL) { + c->bufsize = ltoh32(c->log.buf_size); + if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL) + return BCME_NOMEM; + } + + idx = ltoh32(c->log.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return BCME_ERROR; + + /* Skip reading the console buffer if the index pointer has not moved */ + if (idx == c->last) + return BCME_OK; + + /* Read the console buffer */ + addr = ltoh32(c->log.buf); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. Instead, back up + * the buffer pointer and output this line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + printf("CONSOLE: %s\n", line); +#ifdef LOG_INTO_TCPDUMP + dhd_sendup_log(bus->dhd, line, n); +#endif /* LOG_INTO_TCPDUMP */ + } + } +break2: + + return BCME_OK; +} +#endif /* DHD_DEBUG */ + +static int +dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size) +{ + int bcmerror = 0; + uint msize = 512; + char *mbuffer = NULL; + char *console_buffer = NULL; + uint maxstrlen = 256; + char *str = NULL; + trap_t tr; + sdpcm_shared_t sdpcm_shared; + struct bcmstrbuf strbuf; + uint32 console_ptr, console_size, console_index; + uint8 line[CONSOLE_LINE_MAX], ch; + uint32 n, i, addr; + int rv; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (DHD_NOCHECKDIED_ON()) + return 0; + + if (data == NULL) { + /* + * Called after a rx ctrl timeout. "data" is NULL. + * allocate memory to trace the trap or assert. + */ + size = msize; + mbuffer = data = MALLOC(bus->dhd->osh, msize); + if (mbuffer == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize)); + bcmerror = BCME_NOMEM; + goto done; + } + } + + if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) { + DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen)); + bcmerror = BCME_NOMEM; + goto done; + } + + if ((bcmerror = dhdsdio_readshared(bus, &sdpcm_shared)) < 0) + goto done; + + bcm_binit(&strbuf, data, size); + + bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n", + sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr); + + if ((sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "Assrt not built in dongle\n"); + } + + if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT|SDPCM_SHARED_TRAP)) == 0) { + /* NOTE: Misspelled assert is intentional - DO NOT FIX. + * (Avoids conflict with real asserts for programmatic parsing of output.) + */ + bcm_bprintf(&strbuf, "No trap%s in dongle", + (sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT) + ?"/assrt" :""); + } else { + if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) { + /* Download assert */ + bcm_bprintf(&strbuf, "Dongle assert"); + if (sdpcm_shared.assert_exp_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_exp_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " expr \"%s\"", str); + } + + if (sdpcm_shared.assert_file_addr != 0) { + str[0] = '\0'; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.assert_file_addr, + (uint8 *)str, maxstrlen)) < 0) + goto done; + + str[maxstrlen - 1] = '\0'; + bcm_bprintf(&strbuf, " file \"%s\"", str); + } + + bcm_bprintf(&strbuf, " line %d ", sdpcm_shared.assert_line); + } + + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + bus->dhd->dongle_trap_occured = TRUE; + if ((bcmerror = dhdsdio_membytes(bus, FALSE, + sdpcm_shared.trap_addr, + (uint8*)&tr, sizeof(trap_t))) < 0) + goto done; + + bcm_bprintf(&strbuf, + "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x," + "lp 0x%x, rpc 0x%x Trap offset 0x%x, " + "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, " + "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n", + ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr), + ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc), + ltoh32(sdpcm_shared.trap_addr), + ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3), + ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7)); + + addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_size, sizeof(console_size))) < 0) + goto printbuf; + + addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx); + if ((rv = dhdsdio_membytes(bus, FALSE, addr, + (uint8 *)&console_index, sizeof(console_index))) < 0) + goto printbuf; + + console_ptr = ltoh32(console_ptr); + console_size = ltoh32(console_size); + console_index = ltoh32(console_index); + + if (console_size > CONSOLE_BUFFER_MAX || + !(console_buffer = MALLOC(bus->dhd->osh, console_size))) + goto printbuf; + + if ((rv = dhdsdio_membytes(bus, FALSE, console_ptr, + (uint8 *)console_buffer, console_size)) < 0) + goto printbuf; + + for (i = 0, n = 0; i < console_size; i += n + 1) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + ch = console_buffer[(console_index + i + n) % console_size]; + if (ch == '\n') + break; + line[n] = ch; + } + + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + /* Don't use DHD_ERROR macro since we print + * a lot of information quickly. The macro + * will truncate a lot of the printfs + */ + + if (dhd_msg_level & DHD_ERROR_VAL) + printf("CONSOLE: %s\n", line); + } + } + } + } + +printbuf: + if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) { + DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf)); + } + +#if defined(DHD_FW_COREDUMP) + if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) { + /* Mem dump to a file on device */ + dhdsdio_mem_dump(bus); + } +#endif /* #if defined(DHD_FW_COREDUMP) */ + +done: + if (mbuffer) + MFREE(bus->dhd->osh, mbuffer, msize); + if (str) + MFREE(bus->dhd->osh, str, maxstrlen); + if (console_buffer) + MFREE(bus->dhd->osh, console_buffer, console_size); + + return bcmerror; +} + +#if defined(DHD_FW_COREDUMP) +static int +dhdsdio_mem_dump(dhd_bus_t *bus) +{ + int ret = 0; + int size; /* Full mem size */ + int start = bus->dongle_ram_base; /* Start address */ + int read_size = 0; /* Read size of each iteration */ + uint8 *buf = NULL, *databuf = NULL; + + /* Get full mem size */ + size = bus->ramsize; + buf = MALLOC(bus->dhd->osh, size); + if (!buf) { + printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size); + return -1; + } + + /* Read mem content */ + printf("Dump dongle memory"); + databuf = buf; + while (size) + { + read_size = MIN(MEMBLOCK, size); + if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size))) + { + printf("%s: Error membytes %d\n", __FUNCTION__, ret); + if (buf) { + MFREE(bus->dhd->osh, buf, size); + } + return -1; + } + /* Decrement size and increment start address */ + size -= read_size; + start += read_size; + databuf += read_size; + } + printf("Done\n"); + + dhd_save_fwdump(bus->dhd, buf, bus->ramsize); + /* free buf before return !!! */ + if (write_to_file(bus->dhd, buf, bus->ramsize)) + { + printf("%s: Error writing to files\n", __FUNCTION__); + return -1; + } + + /* buf free handled in write_to_file, not here */ + return 0; +} +#endif /* DHD_FW_COREDUMP */ + +int +dhd_socram_dump(dhd_bus_t * bus) +{ +#if defined(DHD_FW_COREDUMP) + return (dhdsdio_mem_dump(bus)); +#else + return -1; +#endif +} + +int +dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len) +{ + int bcmerror = BCME_OK; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Basic sanity checks */ + if (bus->dhd->up) { + bcmerror = BCME_NOTDOWN; + goto err; + } + if (!len) { + bcmerror = BCME_BUFTOOSHORT; + goto err; + } + + /* Free the old ones and replace with passed variables */ + if (bus->vars) + MFREE(bus->dhd->osh, bus->vars, bus->varsz); + + bus->vars = MALLOC(bus->dhd->osh, len); + bus->varsz = bus->vars ? len : 0; + if (bus->vars == NULL) { + bcmerror = BCME_NOMEM; + goto err; + } + + /* Copy the passed variables, which should include the terminating double-null */ + bcopy(arg, bus->vars, bus->varsz); +err: + return bcmerror; +} + +#ifdef DHD_DEBUG + +#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24) +#define CC_CHIPCTRL_JTAG_SEL (1 << 3) +#define CC_CHIPCTRL_GPIO_SEL (0x3) +#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334 (1 << 28) + +static int +dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror) +{ + int int_val; + uint32 addr, data, uart_enab = 0; + uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL; + uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL; + + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr); + data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data); + *bcmerror = 0; + + bcmsdh_reg_write(bus->sdh, addr, 4, 1); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + int_val = bcmsdh_reg_read(bus->sdh, data, 4); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + if (bus->sih->chip == BCM4330_CHIP_ID) { + uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB; + } + else if (bus->sih->chip == BCM4334_CHIP_ID || + bus->sih->chip == BCM43340_CHIP_ID || + bus->sih->chip == BCM43341_CHIP_ID || + bus->sih->chip == BCM43342_CHIP_ID || + 0) { + if (enable) { + /* Moved to PMU chipcontrol 1 from 4330 */ + int_val &= ~gpio_sel; + int_val |= jtag_sel; + } else { + int_val |= gpio_sel; + int_val &= ~jtag_sel; + } + uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334; + } + + if (!set) + return (int_val & uart_enab); + if (enable) + int_val |= uart_enab; + else + int_val &= ~uart_enab; + bcmsdh_reg_write(bus->sdh, data, 4, int_val); + if (bcmsdh_regfail(bus->sdh)) { + *bcmerror = BCME_SDIO_ERROR; + return -1; + } + if (bus->sih->chip == BCM4330_CHIP_ID) { + uint32 chipcontrol; + addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol); + chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4); + chipcontrol &= ~jtag_sel; + if (enable) { + chipcontrol |= jtag_sel; + chipcontrol &= ~gpio_sel; + } + bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol); + } + + return (int_val & uart_enab); +} +#endif + +static int +dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name, + void *params, int plen, void *arg, int len, int val_size) +{ + int bcmerror = 0; + int32 int_val = 0; + bool bool_val = 0; + + DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n", + __FUNCTION__, actionid, name, params, plen, arg, len, val_size)); + + if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0) + goto exit; + + if (plen >= (int)sizeof(int_val)) + bcopy(params, &int_val, sizeof(int_val)); + + bool_val = (int_val != 0) ? TRUE : FALSE; + + + /* Some ioctls use the bus */ + dhd_os_sdlock(bus->dhd); + + /* Check if dongle is in reset. If so, only allow DEVRESET iovars */ + if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) || + actionid == IOV_GVAL(IOV_DEVRESET))) { + bcmerror = BCME_NOTREADY; + goto exit; + } + + /* + * Special handling for keepSdioOn: New SDIO Wake-up Mechanism + */ + if ((vi->varid == IOV_KSO) && (IOV_ISSET(actionid))) { + dhdsdio_clk_kso_iovar(bus, bool_val); + goto exit; + } else if ((vi->varid == IOV_DEVSLEEP) && (IOV_ISSET(actionid))) { + { + dhdsdio_clk_devsleep_iovar(bus, bool_val); + if (!SLPAUTO_ENAB(bus) && (bool_val == FALSE) && (bus->ipend)) { + DHD_ERROR(("INT pending in devsleep 1, dpc_sched: %d\n", + bus->dpc_sched)); + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + } + goto exit; + } + + /* Handle sleep stuff before any clock mucking */ + if (vi->varid == IOV_SLEEP) { + if (IOV_ISSET(actionid)) { + bcmerror = dhdsdio_bussleep(bus, bool_val); + } else { + int_val = (int32)bus->sleeping; + bcopy(&int_val, arg, val_size); + } + goto exit; + } + + /* Request clock to allow SDIO accesses */ + if (!bus->dhd->dongle_reset) { + BUS_WAKE(bus); + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + + switch (actionid) { + case IOV_GVAL(IOV_INTR): + int_val = (int32)bus->intr; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_INTR): + bus->intr = bool_val; + bus->intdis = FALSE; + if (bus->dhd->up) { + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + } + break; + + case IOV_GVAL(IOV_POLLRATE): + int_val = (int32)bus->pollrate; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_POLLRATE): + bus->pollrate = (uint)int_val; + bus->poll = (bus->pollrate != 0); + break; + + case IOV_GVAL(IOV_IDLETIME): + int_val = bus->idletime; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLETIME): + if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE)) { + bcmerror = BCME_BADARG; + } else { + bus->idletime = int_val; + } + break; + + case IOV_GVAL(IOV_IDLECLOCK): + int_val = (int32)bus->idleclock; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_IDLECLOCK): + bus->idleclock = int_val; + break; + + case IOV_GVAL(IOV_SD1IDLE): + int_val = (int32)sd1idle; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SD1IDLE): + sd1idle = bool_val; + break; + + + case IOV_SVAL(IOV_MEMBYTES): + case IOV_GVAL(IOV_MEMBYTES): + { + uint32 address; + uint size, dsize; + uint8 *data; + + bool set = (actionid == IOV_SVAL(IOV_MEMBYTES)); + + ASSERT(plen >= 2*sizeof(int)); + + address = (uint32)int_val; + bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val)); + size = (uint)int_val; + + /* Do some validation */ + dsize = set ? plen - (2 * sizeof(int)) : len; + if (dsize < size) { + DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n", + __FUNCTION__, (set ? "set" : "get"), address, size, dsize)); + bcmerror = BCME_BADARG; + break; + } + + DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n", __FUNCTION__, + (set ? "write" : "read"), size, address)); + + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* + * If address is start of RAM (i.e. a downloaded image), + * store the reset instruction to be written in 0 + */ + if (set && address == bus->dongle_ram_base) { + bus->resetinstr = *(((uint32*)params) + 2); + } + } else { + /* If we know about SOCRAM, check for a fit */ + if ((bus->orig_ramsize) && + ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize))) + { + uint8 enable, protect, remap; + si_socdevram(bus->sih, FALSE, &enable, &protect, &remap); + if (!enable || protect) { + DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n", + __FUNCTION__, bus->orig_ramsize, size, address)); + DHD_ERROR(("%s: socram enable %d, protect %d\n", + __FUNCTION__, enable, protect)); + bcmerror = BCME_BADARG; + break; + } + + if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) { + uint32 devramsize = si_socdevram_size(bus->sih); + if ((address < SOCDEVRAM_ARM_ADDR) || + (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) { + DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n", + __FUNCTION__, address, size)); + DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n", + __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize)); + bcmerror = BCME_BADARG; + break; + } + /* move it such that address is real now */ + address -= SOCDEVRAM_ARM_ADDR; + address += SOCDEVRAM_BP_ADDR; + DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n", + __FUNCTION__, (set ? "write" : "read"), size, address)); + } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) { + /* Can not access remap region while devram remap bit is set + * ROM content would be returned in this case + */ + DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n", + __FUNCTION__, address)); + bcmerror = BCME_ERROR; + break; + } + } + } + + /* Generate the actual data pointer */ + data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg; + + /* Call to do the transfer */ + bcmerror = dhdsdio_membytes(bus, set, address, data, size); + + break; + } + + case IOV_GVAL(IOV_RAMSIZE): + int_val = (int32)bus->ramsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_RAMSTART): + int_val = (int32)bus->dongle_ram_base; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_SDIOD_DRIVE): + int_val = (int32)dhd_sdiod_drive_strength; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDIOD_DRIVE): + dhd_sdiod_drive_strength = int_val; + si_sdiod_drive_strength_init(bus->sih, bus->dhd->osh, dhd_sdiod_drive_strength); + break; + + case IOV_SVAL(IOV_SET_DOWNLOAD_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_SOCRAM_STATE): + bcmerror = dhdsdio_download_state(bus, bool_val); + break; + + case IOV_SVAL(IOV_VARS): + bcmerror = dhdsdio_downloadvars(bus, arg, len); + break; + + case IOV_GVAL(IOV_READAHEAD): + int_val = (int32)dhd_readahead; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_READAHEAD): + if (bool_val && !dhd_readahead) + bus->nextlen = 0; + dhd_readahead = bool_val; + break; + + case IOV_GVAL(IOV_SDRXCHAIN): + int_val = (int32)bus->use_rxchain; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SDRXCHAIN): + if (bool_val && !bus->sd_rxchain) + bcmerror = BCME_UNSUPPORTED; + else + bus->use_rxchain = bool_val; + break; + case IOV_GVAL(IOV_ALIGNCTL): + int_val = (int32)dhd_alignctl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_ALIGNCTL): + dhd_alignctl = bool_val; + break; + + case IOV_GVAL(IOV_SDALIGN): + int_val = DHD_SDALIGN; + bcopy(&int_val, arg, val_size); + break; + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_VARS): + if (bus->varsz < (uint)len) + bcopy(bus->vars, arg, bus->varsz); + else + bcmerror = BCME_BUFTOOSHORT; + break; +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG + case IOV_GVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uint32)((ulong)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SDREG): + { + sdreg_t *sd_ptr; + uint32 addr, size; + + sd_ptr = (sdreg_t *)params; + + addr = (uint32)((ulong)bus->regs + sd_ptr->offset); + size = sd_ptr->func; + bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + /* Same as above, but offset is not backplane (not SDIO core) */ + case IOV_GVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + bcopy(&int_val, arg, sizeof(int32)); + break; + } + + case IOV_SVAL(IOV_SBREG): + { + sdreg_t sdreg; + uint32 addr, size; + + bcopy(params, &sdreg, sizeof(sdreg)); + + addr = SI_ENUM_BASE + sdreg.offset; + size = sdreg.func; + bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value); + if (bcmsdh_regfail(bus->sdh)) + bcmerror = BCME_SDIO_ERROR; + break; + } + + case IOV_GVAL(IOV_SDCIS): + { + *(char *)arg = 0; + + bcmstrcat(arg, "\nFunc 0\n"); + bcmsdh_cis_read(bus->sdh, 0x10, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 1\n"); + bcmsdh_cis_read(bus->sdh, 0x11, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + bcmstrcat(arg, "\nFunc 2\n"); + bcmsdh_cis_read(bus->sdh, 0x12, (uint8 *)arg + strlen(arg), SBSDIO_CIS_SIZE_LIMIT); + break; + } + + case IOV_GVAL(IOV_FORCEEVEN): + int_val = (int32)forcealign; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_FORCEEVEN): + forcealign = bool_val; + break; + + case IOV_GVAL(IOV_TXBOUND): + int_val = (int32)dhd_txbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXBOUND): + dhd_txbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_RXBOUND): + int_val = (int32)dhd_rxbound; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_RXBOUND): + dhd_rxbound = (uint)int_val; + break; + + case IOV_GVAL(IOV_TXMINMAX): + int_val = (int32)dhd_txminmax; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXMINMAX): + dhd_txminmax = (uint)int_val; + break; + + case IOV_GVAL(IOV_SERIALCONS): + int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror); + if (bcmerror != 0) + break; + + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_SERIALCONS): + dhd_serialconsole(bus, TRUE, bool_val, &bcmerror); + break; + + +#endif /* DHD_DEBUG */ + + +#ifdef SDTEST + case IOV_GVAL(IOV_EXTLOOP): + int_val = (int32)bus->ext_loop; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_EXTLOOP): + bus->ext_loop = bool_val; + break; + + case IOV_GVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_get(bus, arg); + break; + + case IOV_SVAL(IOV_PKTGEN): + bcmerror = dhdsdio_pktgen_set(bus, arg); + break; +#endif /* SDTEST */ + +#if defined(USE_SDIOFIFO_IOVAR) + case IOV_GVAL(IOV_WATERMARK): + int_val = (int32)watermark; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_WATERMARK): + watermark = (uint)int_val; + watermark = (watermark > SBSDIO_WATERMARK_MASK) ? SBSDIO_WATERMARK_MASK : watermark; + DHD_ERROR(("Setting watermark as 0x%x.\n", watermark)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, (uint8)watermark, NULL); + break; + + case IOV_GVAL(IOV_MESBUSYCTRL): + int_val = (int32)mesbusyctrl; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_MESBUSYCTRL): + mesbusyctrl = (uint)int_val; + mesbusyctrl = (mesbusyctrl > SBSDIO_MESBUSYCTRL_MASK) + ? SBSDIO_MESBUSYCTRL_MASK : mesbusyctrl; + DHD_ERROR(("Setting mesbusyctrl as 0x%x.\n", mesbusyctrl)); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL, + ((uint8)mesbusyctrl | 0x80), NULL); + break; +#endif + + + case IOV_GVAL(IOV_DONGLEISOLATION): + int_val = bus->dhd->dongle_isolation; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DONGLEISOLATION): + bus->dhd->dongle_isolation = bool_val; + break; + + case IOV_SVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d busstate=%d\n", + __FUNCTION__, bool_val, bus->dhd->dongle_reset, + bus->dhd->busstate)); + + ASSERT(bus->dhd->osh); + /* ASSERT(bus->cl_devid); */ + + dhd_bus_devreset(bus->dhd, (uint8)bool_val); + + break; + /* + * softap firmware is updated through module parameter or android private command + */ + + case IOV_GVAL(IOV_DEVRESET): + DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__)); + + /* Get its status */ + int_val = (bool) bus->dhd->dongle_reset; + bcopy(&int_val, arg, val_size); + + break; + + case IOV_GVAL(IOV_KSO): + int_val = dhdsdio_sleepcsr_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_DEVCAP): + int_val = dhdsdio_devcap_get(bus); + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_DEVCAP): + dhdsdio_devcap_set(bus, (uint8) int_val); + break; + case IOV_GVAL(IOV_TXGLOMSIZE): + int_val = (int32)bus->txglomsize; + bcopy(&int_val, arg, val_size); + break; + + case IOV_SVAL(IOV_TXGLOMSIZE): + if (int_val > SDPCM_MAXGLOM_SIZE) { + bcmerror = BCME_ERROR; + } else { + bus->txglomsize = (uint)int_val; + } + break; + case IOV_SVAL(IOV_HANGREPORT): + bus->dhd->hang_report = bool_val; + DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report)); + break; + + case IOV_GVAL(IOV_HANGREPORT): + int_val = (int32)bus->dhd->hang_report; + bcopy(&int_val, arg, val_size); + break; + + case IOV_GVAL(IOV_TXINRX_THRES): + int_val = bus->txinrx_thres; + bcopy(&int_val, arg, val_size); + break; + case IOV_SVAL(IOV_TXINRX_THRES): + if (int_val < 0) { + bcmerror = BCME_BADARG; + } else { + bus->txinrx_thres = int_val; + } + break; + + default: + bcmerror = BCME_UNSUPPORTED; + break; + } + +exit: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return bcmerror; +} + +static int +dhdsdio_write_vars(dhd_bus_t *bus) +{ + int bcmerror = 0; + uint32 varsize, phys_size; + uint32 varaddr; + uint8 *vbuffer; + uint32 varsizew; +#ifdef DHD_DEBUG + uint8 *nvram_ularray; +#endif /* DHD_DEBUG */ + + /* Even if there are no vars are to be written, we still need to set the ramsize. */ + varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0; + varaddr = (bus->ramsize - 4) - varsize; + + varaddr += bus->dongle_ram_base; + + if (bus->vars) { + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 7)) { + if (((varaddr & 0x3C) == 0x3C) && (varsize > 4)) { + DHD_ERROR(("PR85623WAR in place\n")); + varsize += 4; + varaddr -= 4; + } + } + + vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize); + if (!vbuffer) + return BCME_NOMEM; + + bzero(vbuffer, varsize); + bcopy(bus->vars, vbuffer, bus->varsz); + + /* Write the vars list */ + bcmerror = dhdsdio_membytes(bus, TRUE, varaddr, vbuffer, varsize); +#ifdef DHD_DEBUG + /* Verify NVRAM bytes */ + DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize)); + nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize); + if (!nvram_ularray) + return BCME_NOMEM; + + /* Upload image to verify downloaded contents. */ + memset(nvram_ularray, 0xaa, varsize); + + /* Read the vars list to temp buffer for comparison */ + bcmerror = dhdsdio_membytes(bus, FALSE, varaddr, nvram_ularray, varsize); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n", + __FUNCTION__, bcmerror, varsize, varaddr)); + } + /* Compare the org NVRAM with the one read from RAM */ + if (memcmp(vbuffer, nvram_ularray, varsize)) { + DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__)); + } else + DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n", + __FUNCTION__)); + + MFREE(bus->dhd->osh, nvram_ularray, varsize); +#endif /* DHD_DEBUG */ + + MFREE(bus->dhd->osh, vbuffer, varsize); + } + + phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize; + + phys_size += bus->dongle_ram_base; + + /* adjust to the user specified RAM */ + DHD_INFO(("Physical memory size: %d, usable memory size: %d\n", + phys_size, bus->ramsize)); + DHD_INFO(("Vars are at %d, orig varsize is %d\n", + varaddr, varsize)); + varsize = ((phys_size - 4) - varaddr); + + /* + * Determine the length token: + * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits. + */ + if (bcmerror) { + varsizew = 0; + } else { + varsizew = varsize / 4; + varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF); + varsizew = htol32(varsizew); + } + + DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew)); + + /* Write the length token to the last word */ + bcmerror = dhdsdio_membytes(bus, TRUE, (phys_size - 4), + (uint8*)&varsizew, 4); + + return bcmerror; +} + +static int +dhdsdio_download_state(dhd_bus_t *bus, bool enter) +{ + uint retries; + int bcmerror = 0; + int foundcr4 = 0; + + if (!bus->sih) + return BCME_ERROR; + /* To enter download state, disable ARM and reset SOCRAM. + * To exit download state, simply reset ARM (default is RAM boot). + */ + if (enter) { + bus->alp_only = TRUE; + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + foundcr4 = 1; + } else { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } + + if (!foundcr4) { + si_core_disable(bus->sih, 0); + if (bcmsdh_regfail(bus->sdh)) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying reset SOCRAM core?\n", + __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Disable remap for download */ + if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih)) + dhdsdio_devram_remap(bus, FALSE); + + if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID) { + /* Disabling Remap for SRAM_3 */ + si_socram_set_bankpda(bus->sih, 0x3, 0x0); + } + + /* Clear the top bit of memory */ + if (bus->ramsize) { + uint32 zeros = 0; + if (dhdsdio_membytes(bus, TRUE, bus->ramsize - 4, + (uint8*)&zeros, 4) < 0) { + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + } else { + /* For CR4, + * Halt ARM + * Remove ARM reset + * Read RAM base address [0x18_0000] + * [next] Download firmware + * [done at else] Populate the reset vector + * [done at else] Remove ARM halt + */ + /* Halt ARM & remove reset */ + si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT); + } + } else { + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if (!si_iscoreup(bus->sih)) { + DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + /* Enable remap before ARM reset but after vars. + * No backplane access in remap mode + */ + if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih)) + dhdsdio_devram_remap(bus, TRUE); + + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + + if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) && + !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + } else { + /* cr4 has no socram, but tcm's */ + /* write vars */ + if ((bcmerror = dhdsdio_write_vars(bus))) { + DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) && + !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) { + DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries); + + /* switch back to arm core again */ + if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__)); + bcmerror = BCME_ERROR; + goto fail; + } + /* write address 0 with reset instruction */ + bcmerror = dhdsdio_membytes(bus, TRUE, 0, + (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr)); + + if (bcmerror == BCME_OK) { + uint32 tmp; + + /* verify write */ + bcmerror = dhdsdio_membytes(bus, FALSE, 0, + (uint8 *)&tmp, sizeof(tmp)); + + if (bcmerror == BCME_OK && tmp != bus->resetinstr) { + DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n", + __FUNCTION__, bus->resetinstr)); + DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n", + __FUNCTION__, tmp)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + } + + /* now remove reset and halt and continue to run CR4 */ + } + + si_core_reset(bus->sih, 0, 0); + if (bcmsdh_regfail(bus->sdh)) { + DHD_ERROR(("%s: Failure trying to reset ARM core?\n", __FUNCTION__)); + bcmerror = BCME_SDIO_ERROR; + goto fail; + } + + /* Allow HT Clock now that the ARM is running. */ + bus->alp_only = FALSE; + + bus->dhd->busstate = DHD_BUS_LOAD; + } + +fail: + /* Always return to SDIOD core */ + if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) + si_setcore(bus->sih, SDIOD_CORE_ID, 0); + + return bcmerror; +} + +int +dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name, + void *params, int plen, void *arg, int len, bool set) +{ + dhd_bus_t *bus = dhdp->bus; + const bcm_iovar_t *vi = NULL; + int bcmerror = 0; + int val_size; + uint32 actionid; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(name); + ASSERT(len >= 0); + + /* Get MUST have return space */ + ASSERT(set || (arg && len)); + + /* Set does NOT take qualifiers */ + ASSERT(!set || (!params && !plen)); + + /* Look up var locally; if not found pass to host driver */ + if ((vi = bcm_iovar_lookup(dhdsdio_iovars, name)) == NULL) { + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + /* Turn on clock in case SD command needs backplane */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmerror = bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len, set); + + /* Check for bus configuration changes of interest */ + + /* If it was divisor change, read the new one */ + if (set && strcmp(name, "sd_divisor") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_divisor = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_divisor)); + } + } + /* If it was a mode change, read the new one */ + if (set && strcmp(name, "sd_mode") == 0) { + if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_mode = -1; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, name)); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, name, bus->sd_mode)); + } + } + /* Similar check for blocksize change */ + if (set && strcmp(name, "sd_blocksize") == 0) { + int32 fnum = 2; + if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: noted %s update, value now %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + } + bus->roundup = MIN(max_roundup, bus->blocksize); + + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + goto exit; + } + + DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__, + name, (set ? "set" : "get"), len, plen)); + + /* set up 'params' pointer in case this is a set command so that + * the convenience int and bool code can be common to set and get + */ + if (params == NULL) { + params = arg; + plen = len; + } + + if (vi->type == IOVT_VOID) + val_size = 0; + else if (vi->type == IOVT_BUFFER) + val_size = len; + else + /* all other types are integer sized */ + val_size = sizeof(int); + + actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); + bcmerror = dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size); + +exit: + return bcmerror; +} + +void +dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex) +{ + osl_t *osh; + uint32 local_hostintmask; + uint8 saveclk; + uint retries; + int err; + bool wlfc_enabled = FALSE; + + if (!bus->dhd) + return; + + osh = bus->dhd->osh; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_waitlockfree(bus->sdh); + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) { + /* if Firmware already hangs disbale any interrupt */ + bus->dhd->busstate = DHD_BUS_DOWN; + bus->hostintmask = 0; + bcmsdh_intr_disable(bus->sdh); + } else { + + BUS_WAKE(bus); + + if (KSO_ENAB(bus)) { + + /* Enable clock for device interrupts */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Disable and clear interrupts at the chip level also */ + W_SDREG(0, &bus->regs->hostintmask, retries); + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Change our idea of bus state */ + bus->dhd->busstate = DHD_BUS_DOWN; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", + __FUNCTION__, err)); + } + + /* Turn off the bus (F2), free any pending packets */ + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + /* Clear any pending interrupts now that F2 is disabled */ + W_SDREG(local_hostintmask, &bus->regs->intstatus, retries); + } + + /* Turn off the backplane clock (only) */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Clear the data packet queues */ + pktq_flush(osh, &bus->txq, TRUE, NULL, 0); + } + + /* Clear any held glomming stuff */ + if (bus->glomd) + PKTFREE(osh, bus->glomd, FALSE); + + if (bus->glom) + PKTFREE(osh, bus->glom, FALSE); + + bus->glom = bus->glomd = NULL; + + /* Clear rx control and wake any waiters */ + bus->rxlen = 0; + dhd_os_ioctl_resp_wake(bus->dhd); + + /* Reset some F2 state stuff */ + bus->rxskip = FALSE; + bus->tx_seq = bus->rx_seq = 0; + + bus->tx_max = 4; + + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); +} + +#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD) +extern uint sd_txglom; +#endif +void +dhd_txglom_enable(dhd_pub_t *dhdp, bool enable) +{ + /* can't enable host txglom by default, some platforms have no + * (or crappy) ADMA support and txglom will cause kernel assertions (e.g. + * panda board) + */ + dhd_bus_t *bus = dhdp->bus; +#ifdef BCMSDIOH_TXGLOM + char buf[256]; + uint32 rxglom; + int32 ret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + +#ifdef BCMSDIOH_STD + if (enable) + enable = sd_txglom; +#endif /* BCMSDIOH_STD */ + + if (enable) { + rxglom = 1; + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf)); + ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); + if (ret >= 0) + bus->txglom_enable = TRUE; + else { +#ifdef BCMSDIOH_STD + sd_txglom = 0; +#endif /* BCMSDIOH_STD */ + bus->txglom_enable = FALSE; + } + } else +#endif /* BCMSDIOH_TXGLOM */ + bus->txglom_enable = FALSE; +} + +int +dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex) +{ + dhd_bus_t *bus = dhdp->bus; + dhd_timeout_t tmo; + uint retries = 0; + uint8 ready, enable; + int err, ret = 0; + uint8 saveclk; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + ASSERT(bus->dhd); + if (!bus->dhd) + return 0; + + if (enforce_mutex) + dhd_os_sdlock(bus->dhd); + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (bus->clkstate != CLK_AVAIL) { + DHD_ERROR(("%s: clock state is wrong. state = %d\n", __FUNCTION__, bus->clkstate)); + ret = -1; + goto exit; + } + + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + (saveclk | SBSDIO_FORCE_HT), &err); + } + if (err) { + DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err)); + ret = -1; + goto exit; + } + + /* Enable function 2 (frame transfers) */ + W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT), + &bus->regs->tosbmailboxdata, retries); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + + /* Give the dongle some time to do its thing and set IOR2 */ + dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000); + + ready = 0; + while (ready != enable && !dhd_timeout_expired(&tmo)) + ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL); + + DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n", + __FUNCTION__, enable, ready, tmo.elapsed)); + + + /* If F2 successfully enabled, set core and enable interrupts */ + if (ready == enable) { + /* Make sure we're talking to the core. */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0))) + bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0); + ASSERT(bus->regs != NULL); + + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + /* corerev 4 could use the newer interrupt logic to detect the frames */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) && + (bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) { + bus->hostintmask &= ~I_HMB_FRAME_IND; + bus->hostintmask |= I_XMTDATA_AVAIL; + } + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + + if (bus->sih->buscorerev < 15) { + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, + (uint8)watermark, &err); + } + + /* Set bus state according to enable result */ + dhdp->busstate = DHD_BUS_DATA; + + /* Need to set fn2 block size to match fn1 block size. + * Requests to fn2 go thru fn1. * + * faltwig has this code contitioned with #if !BCMSPI_ANDROID. + * It would be cleaner to use the ->sdh->block_sz[fno] instead of + * 64, but this layer has no access to sdh types. + */ + + /* bcmsdh_intr_unmask(bus->sdh); */ + + bus->intdis = FALSE; + if (bus->intr) { + DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__)); + bcmsdh_intr_enable(bus->sdh); + } else { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + bcmsdh_intr_disable(bus->sdh); + } + + } + + + else { + /* Disable F2 again */ + enable = SDIO_FUNC_ENABLE_1; + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL); + } + + if (dhdsdio_sr_cap(bus)) { + dhdsdio_sr_init(bus); + /* Masking the chip active interrupt permanantly */ + bus->hostintmask &= ~I_CHIPACTIVE; + W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries); + DHD_INFO(("%s: disable I_CHIPACTIVE in hostintmask[0x%08x]\n", + __FUNCTION__, bus->hostintmask)); + } + else + bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); + + /* If we didn't come up, turn off backplane clock */ + if (dhdp->busstate != DHD_BUS_DATA) + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + +exit: + if (enforce_mutex) + dhd_os_sdunlock(bus->dhd); + + return ret; +} + +static void +dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + uint16 lastrbc; + uint8 hi, lo; + int err; + + DHD_ERROR(("%s: %sterminate frame%s\n", __FUNCTION__, + (abort ? "abort command, " : ""), (rtx ? ", send NAK" : ""))); + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return; + } + + if (abort) { + bcmsdh_abort(sdh, SDIO_FUNC_2); + } + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_FRAMECTRL cmd err\n", __FUNCTION__)); + goto fail; + } + bus->f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI, NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO, &err); + if (err) { + DHD_ERROR(("%s: SBSDIO_FUNC1_RFAMEBCLO cmd err\n", __FUNCTION__)); + goto fail; + } + + bus->f1regdata += 2; + + if ((hi == 0) && (lo == 0)) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + DHD_ERROR(("%s: count growing: last 0x%04x now 0x%04x\n", + __FUNCTION__, lastrbc, ((hi << 8) + lo))); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) { + DHD_ERROR(("%s: count never zeroed: last 0x%04x\n", __FUNCTION__, lastrbc)); + } else { + DHD_INFO(("%s: flush took %d iterations\n", __FUNCTION__, (0xffff - retries))); + } + + if (rtx) { + bus->rxrtx++; + W_SDREG(SMB_NAK, ®s->tosbmailbox, retries); + bus->f1regdata++; + if (retries <= retry_limit) { + bus->rxskip = TRUE; + } + } + + /* Clear partial in any case */ + bus->nextlen = 0; + +fail: + /* If we can't reach the device, signal failure */ + if (err || bcmsdh_regfail(sdh)) + bus->dhd->busstate = DHD_BUS_DOWN; +} + +static void +dhdsdio_read_control(dhd_bus_t *bus, uint8 *hdr, uint len, uint doff) +{ + bcmsdh_info_t *sdh = bus->sdh; + uint rdlen, pad; + + int sdret; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Control data already received in aligned rxctl */ + if ((bus->bus == SPI_BUS) && (!bus->usebufpool)) + goto gotpkt; + + ASSERT(bus->rxbuf); + /* Set rxctl for frame (w/optional alignment) */ + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + + /* Copy the already-read portion over */ + bcopy(hdr, bus->rxctl, firstread); + if (len <= firstread) + goto gotpkt; + + /* Copy the full data pkt in gSPI case and process ioctl. */ + if (bus->bus == SPI_BUS) { + bcopy(hdr, bus->rxctl, len); + goto gotpkt; + } + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - firstread; + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((len + pad) < bus->dhd->maxctl)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + firstread) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n", + __FUNCTION__, rdlen, bus->dhd->maxctl)); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + if ((len - doff) > bus->dhd->maxctl) { + DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + __FUNCTION__, len, (len - doff), bus->dhd->maxctl)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + goto done; + } + + + /* Read remainder of frame body into the rxctl buffer */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + (bus->rxctl + firstread), rdlen, NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", __FUNCTION__, rdlen, sdret)); + bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */ + dhdsdio_rxfail(bus, TRUE, TRUE); + goto done; + } + +gotpkt: + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_CTL_ON()) { + prhex("RxCtrl", bus->rxctl, len); + } +#endif + + /* Point to valid data and indicate its length */ + bus->rxctl += doff; + bus->rxlen = len - doff; + +done: + /* Awake any waiters */ + dhd_os_ioctl_resp_wake(bus->dhd); +} +int +dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, + void **pkt, uint32 *pkt_count); + +static uint8 +dhdsdio_rxglom(dhd_bus_t *bus, uint8 rxseq) +{ + uint16 dlen, totlen; + uint8 *dptr, num = 0; + + uint16 sublen, check; + void *pfirst, *plast, *pnext; + void * list_tail[DHD_MAX_IFS] = { NULL }; + void * list_head[DHD_MAX_IFS] = { NULL }; + uint8 idx; + osl_t *osh = bus->dhd->osh; + + int errcode; + uint8 chan, seq, doff, sfdoff; + uint8 txmax; + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + + int ifidx = 0; + bool usechain = bus->use_rxchain; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd, bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + dhd_os_sdlock_rxq(bus->dhd); + + pfirst = plast = pnext = NULL; + dlen = (uint16)PKTLEN(osh, bus->glomd); + dptr = PKTDATA(osh, bus->glomd); + if (!dlen || (dlen & 1)) { + DHD_ERROR(("%s: bad glomd len (%d), ignore descriptor\n", + __FUNCTION__, dlen)); + dlen = 0; + } + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = ltoh16_ua(dptr); + dlen -= sizeof(uint16); + dptr += sizeof(uint16); + if ((sublen < SDPCM_HDRLEN) || + ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { + DHD_ERROR(("%s: descriptor len %d bad: %d\n", + __FUNCTION__, num, sublen)); + pnext = NULL; + break; + } + if (sublen % DHD_SDALIGN) { + DHD_ERROR(("%s: sublen %d not a multiple of %d\n", + __FUNCTION__, sublen, DHD_SDALIGN)); + usechain = FALSE; + } + totlen += sublen; + + /* For last frame, adjust read len so total is a block multiple */ + if (!dlen) { + sublen += (ROUNDUP(totlen, bus->blocksize) - totlen); + totlen = ROUNDUP(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + if ((pnext = PKTGET(osh, sublen + DHD_SDALIGN, FALSE)) == NULL) { + DHD_ERROR(("%s: PKTGET failed, num %d len %d\n", + __FUNCTION__, num, sublen)); + break; + } + ASSERT(!PKTLINK(pnext)); + if (!pfirst) { + ASSERT(!plast); + pfirst = plast = pnext; + } else { + ASSERT(plast); + PKTSETNEXT(osh, plast, pnext); + plast = pnext; + } + + /* Adhere to start alignment requirements */ + PKTALIGN(osh, pnext, sublen, DHD_SDALIGN); + } + + /* If all allocations succeeded, save packet chain in bus structure */ + if (pnext) { + DHD_GLOM(("%s: allocated %d-byte packet chain for %d subframes\n", + __FUNCTION__, totlen, num)); + if (DHD_GLOM_ON() && bus->nextlen) { + if (totlen != bus->nextlen) { + DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " + "rxseq %d\n", __FUNCTION__, bus->nextlen, + totlen, rxseq)); + } + } + bus->glom = pfirst; + pfirst = pnext = NULL; + } else { + if (pfirst) + PKTFREE(osh, pfirst, FALSE); + bus->glom = NULL; + num = 0; + } + + /* Done with descriptor packet */ + PKTFREE(osh, bus->glomd, FALSE); + bus->glomd = NULL; + bus->nextlen = 0; + + dhd_os_sdunlock_rxq(bus->dhd); + } + + /* Ok -- either we just generated a packet chain, or had one from before */ + if (bus->glom) { + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s: attempt superframe read, packet chain:\n", __FUNCTION__)); + for (pnext = bus->glom; pnext; pnext = PKTNEXT(osh, pnext)) { + DHD_GLOM((" %p: %p len 0x%04x (%d)\n", + pnext, (uint8*)PKTDATA(osh, pnext), + PKTLEN(osh, pnext), PKTLEN(osh, pnext))); + } + } + + pfirst = bus->glom; + dlen = (uint16)pkttotlen(osh, pfirst); + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and and copy into the chain. + */ + if (usechain) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, (uint8*)PKTDATA(osh, pfirst), + dlen, pfirst, NULL, NULL); + } else if (bus->dataptr) { + errcode = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2, + F2SYNC, bus->dataptr, + dlen, NULL, NULL, NULL); + sublen = (uint16)pktfrombuf(osh, pfirst, 0, dlen, bus->dataptr); + if (sublen != dlen) { + DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n", + __FUNCTION__, dlen, sublen)); + errcode = -1; + } + pnext = NULL; + } else { + DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", dlen)); + errcode = -1; + } + bus->f2rxdata++; + ASSERT(errcode != BCME_PENDING); + + /* On failure, kill the superframe, allow a couple retries */ + if (errcode < 0) { + DHD_ERROR(("%s: glom read of %d bytes failed: %d\n", + __FUNCTION__, dlen, errcode)); + bus->dhd->rx_errors++; + + if (bus->glomerr++ < 3) { + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + return 0; + } + +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("SUPERFRAME", PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 48)); + } +#endif + + + /* Validate the superframe header */ + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s: got frame w/nextlen too large (%d) seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + errcode = 0; + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (superframe): HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, sublen, check)); + errcode = -1; + } else if (ROUNDUP(sublen, bus->blocksize) != dlen) { + DHD_ERROR(("%s (superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n", + __FUNCTION__, sublen, ROUNDUP(sublen, bus->blocksize), dlen)); + errcode = -1; + } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) != SDPCM_GLOM_CHANNEL) { + DHD_ERROR(("%s (superframe): bad channel %d\n", __FUNCTION__, + SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]))); + errcode = -1; + } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) { + DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || + (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) { + DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n", + __FUNCTION__, doff, sublen, PKTLEN(osh, pfirst), + SDPCM_HDRLEN)); + errcode = -1; + } + + /* Check sequence number of superframe SW header */ + if (rxseq != seq) { + DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Remove superframe header, remember offset */ + PKTPULL(osh, pfirst, doff); + sfdoff = doff; + + /* Validate all the subframe headers */ + for (num = 0, pnext = pfirst; pnext && !errcode; + num++, pnext = PKTNEXT(osh, pnext)) { + dptr = (uint8 *)PKTDATA(osh, pnext); + dlen = (uint16)PKTLEN(osh, pnext); + sublen = ltoh16_ua(dptr); + check = ltoh16_ua(dptr + sizeof(uint16)); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("subframe", dptr, 32); + } +#endif + + if ((uint16)~(sublen^check)) { + DHD_ERROR(("%s (subframe %d): HW hdr error: " + "len/check 0x%04x/0x%04x\n", + __FUNCTION__, num, sublen, check)); + errcode = -1; + } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) { + DHD_ERROR(("%s (subframe %d): length mismatch: " + "len 0x%04x, expect 0x%04x\n", + __FUNCTION__, num, sublen, dlen)); + errcode = -1; + } else if ((chan != SDPCM_DATA_CHANNEL) && + (chan != SDPCM_EVENT_CHANNEL)) { + DHD_ERROR(("%s (subframe %d): bad channel %d\n", + __FUNCTION__, num, chan)); + errcode = -1; + } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) { + DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n", + __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN)); + errcode = -1; + } + } + + if (errcode) { + /* Terminate frame on error, request a couple retries */ + if (bus->glomerr++ < 3) { + /* Restore superframe header space */ + PKTPUSH(osh, pfirst, sfdoff); + dhdsdio_rxfail(bus, TRUE, TRUE); + } else { + bus->glomerr = 0; + dhdsdio_rxfail(bus, TRUE, FALSE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(osh, bus->glom, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rxglomfail++; + bus->glom = NULL; + } + bus->nextlen = 0; + return 0; + } + + /* Basic SD framing looks ok - process each packet (header) */ + bus->glom = NULL; + plast = NULL; + + dhd_os_sdlock_rxq(bus->dhd); + for (num = 0; pfirst; rxseq++, pfirst = pnext) { + pnext = PKTNEXT(osh, pfirst); + PKTSETNEXT(osh, pfirst, NULL); + + dptr = (uint8 *)PKTDATA(osh, pfirst); + sublen = ltoh16_ua(dptr); + chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); + + DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n", + __FUNCTION__, num, pfirst, PKTDATA(osh, pfirst), + PKTLEN(osh, pfirst), sublen, chan, seq)); + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL)); + + if (rxseq != seq) { + DHD_GLOM(("%s: rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Subframe Data", dptr, dlen); + } +#endif + + PKTSETLEN(osh, pfirst, sublen); + PKTPULL(osh, pfirst, doff); + + reorder_info_len = sizeof(reorder_info_buf); + + if (PKTLEN(osh, pfirst) == 0) { + PKTFREE(bus->dhd->osh, pfirst, FALSE); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + bus->dhd->rx_errors++; + PKTFREE(osh, pfirst, FALSE); + continue; + } + if (reorder_info_len) { + uint32 free_buf_count; + void *ppfirst; + + ppfirst = pfirst; + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, + reorder_info_len, &ppfirst, &free_buf_count); + + if (free_buf_count == 0) { + continue; + } + else { + void *temp; + + /* go to the end of the chain and attach the pnext there */ + temp = ppfirst; + while (PKTNEXT(osh, temp) != NULL) { + temp = PKTNEXT(osh, temp); + } + pfirst = temp; + if (list_tail[ifidx] == NULL) + list_head[ifidx] = ppfirst; + else + PKTSETNEXT(osh, list_tail[ifidx], ppfirst); + list_tail[ifidx] = pfirst; + } + + num += (uint8)free_buf_count; + } + else { + /* this packet will go up, link back into chain and count it */ + + if (list_tail[ifidx] == NULL) { + list_head[ifidx] = list_tail[ifidx] = pfirst; + } + else { + PKTSETNEXT(osh, list_tail[ifidx], pfirst); + list_tail[ifidx] = pfirst; + } + num++; + } +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) nxt/lnk %p/%p\n", + __FUNCTION__, num, pfirst, + PKTDATA(osh, pfirst), PKTLEN(osh, pfirst), + PKTNEXT(osh, pfirst), PKTLINK(pfirst))); + prhex("", (uint8 *)PKTDATA(osh, pfirst), + MIN(PKTLEN(osh, pfirst), 32)); + } +#endif /* DHD_DEBUG */ + } + dhd_os_sdunlock_rxq(bus->dhd); + + for (idx = 0; idx < DHD_MAX_IFS; idx++) { + if (list_head[idx]) { + void *temp; + uint8 cnt = 0; + temp = list_head[idx]; + do { + temp = PKTNEXT(osh, temp); + cnt++; + } while (temp); + if (cnt) { + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, idx, list_head[idx], cnt, 0); + dhd_os_sdlock(bus->dhd); + } + } + } + bus->rxglomframes++; + bus->rxglompkts += num; + } + return num; +} + + +/* Return TRUE if there may be more frames to read */ +static uint +dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished) +{ + osl_t *osh = bus->dhd->osh; + bcmsdh_info_t *sdh = bus->sdh; + + uint16 len, check; /* Extracted hardware header fields */ + uint8 chan, seq, doff; /* Extracted software header fields */ + uint8 fcbits; /* Extracted fcbits from software header */ + uint8 delta; + + void *pkt; /* Packet for event or data frames */ + uint16 pad; /* Number of pad bytes to read */ + uint16 rdlen; /* Total number of bytes to read */ + uint8 rxseq; /* Next sequence number to expect */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int sdret; /* Return code from bcmsdh calls */ + uint8 txmax; /* Maximum tx sequence offered */ + bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */ + uint8 *rxbuf; + int ifidx = 0; + uint rxcount = 0; /* Total frames read */ + uchar reorder_info_buf[WLHOST_REORDERDATA_TOTLEN]; + uint reorder_info_len; + uint pkt_count; + +#if defined(DHD_DEBUG) || defined(SDTEST) + bool sdtest = FALSE; /* To limit message spew from test mode */ +#endif + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->readframes = TRUE; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: KSO off\n", __FUNCTION__)); + bus->readframes = FALSE; + return 0; + } + + ASSERT(maxframes); + +#ifdef SDTEST + /* Allow pktgen to override maxframes */ + if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) { + maxframes = bus->pktgen_count; + sdtest = TRUE; + } +#endif + + /* Not finished unless we encounter no more frames indication */ + *finished = FALSE; + + + for (rxseq = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN; + rxseq++, rxleft--) { +#ifdef DHDTCPACK_SUP_DBG + if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) { + if (bus->dotxinrx == FALSE) + DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n", + __FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode)); + } +#ifdef DEBUG_COUNTER + else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) { + tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++; + } +#endif /* DEBUG_COUNTER */ +#endif /* DHDTCPACK_SUP_DBG */ + /* tx more to improve rx performance */ + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) { + dhdsdio_sendpendctl(bus); + } else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) && + !bus->fcstate && DATAOK(bus) && + (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) { + dhdsdio_sendfromq(bus, dhd_txbound); +#ifdef DHDTCPACK_SUPPRESS + /* In TCPACK_SUP_DELAYTX mode, do txinrx only if + * 1. Any DATA packet to TX + * 2. TCPACK to TCPDATA PSH packets. + * in bus txq. + */ + bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ? + FALSE : TRUE; +#endif + } + + /* Handle glomming separately */ + if (bus->glom || bus->glomd) { + uint8 cnt; + DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n", + __FUNCTION__, bus->glomd, bus->glom)); + cnt = dhdsdio_rxglom(bus, rxseq); + DHD_GLOM(("%s: rxglom returned %d\n", __FUNCTION__, cnt)); + rxseq += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + /* Try doing single read if we can */ + if (dhd_readahead && bus->nextlen) { + uint16 nextlen = bus->nextlen; + bus->nextlen = 0; + + if (bus->bus == SPI_BUS) { + rdlen = len = nextlen; + } + else { + rdlen = len = nextlen << 4; + + /* Pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + } + + /* We use bus->rxctl buffer in WinXP for initial control pkt receives. + * Later we use buffer-poll for data as well as control packets. + * This is required because dhd receives full frame in gSPI unlike SDIO. + * After the frame is received we have to distinguish whether it is data + * or non-data frame. + */ + /* Allocate a packet buffer */ + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, rdlen + DHD_SDALIGN, FALSE))) { + if (bus->bus == SPI_BUS) { + bus->usebufpool = FALSE; + bus->rxctl = bus->rxbuf; + if (dhd_alignctl) { + bus->rxctl += firstread; + if ((pad = ((uintptr)bus->rxctl % DHD_SDALIGN))) + bus->rxctl += (DHD_SDALIGN - pad); + bus->rxctl -= firstread; + } + ASSERT(bus->rxctl >= bus->rxbuf); + rxbuf = bus->rxctl; + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, + bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + NULL, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + + /* Control frame failures need retransmission */ + if (sdret < 0) { + DHD_ERROR(("%s: read %d control bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + /* dhd.rx_ctlerrs is higher level */ + bus->rxc_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } else { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " + "expected rxseq %d\n", + __FUNCTION__, len, rdlen, rxseq)); + /* Just go try again w/normal header read */ + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } else { + if (bus->bus == SPI_BUS) + bus->usebufpool = TRUE; + + ASSERT(!PKTLINK(pkt)); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + rxbuf = (uint8 *)PKTDATA(osh, pkt); + /* Read the entire frame */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), + SDIO_FUNC_2, + F2SYNC, rxbuf, rdlen, + pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n", + __FUNCTION__, rdlen, sdret)); + PKTFREE(bus->dhd->osh, pkt, FALSE); + bus->dhd->rx_errors++; + dhd_os_sdunlock_rxq(bus->dhd); + /* Force retry w/normal header read. Don't attempt NAK for + * gSPI + */ + dhdsdio_rxfail(bus, TRUE, + (bus->bus == SPI_BUS) ? FALSE : TRUE); + continue; + } + } + dhd_os_sdunlock_rxq(bus->dhd); + + /* Now check the header */ + bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN); + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means readahead info was bad */ + if (!(len|check)) { + DHD_INFO(("%s (nextlen): read zeros in HW header???\n", + __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s (nextlen): HW hdr error: nextlen/len/check" + " 0x%04x/0x%04x/0x%04x\n", __FUNCTION__, nextlen, + len, check)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n", + __FUNCTION__, len)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + GSPI_PR55150_BAILOUT; + continue; + } + + /* Check for consistency with readahead info */ + len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4)); + if (len_consistent) { + /* Mismatch, force retry w/normal header (may be >4K) */ + DHD_ERROR(("%s (nextlen): mismatch, nextlen %d len %d rnd %d; " + "expected rxseq %d\n", + __FUNCTION__, nextlen, len, ROUNDUP(len, 16), rxseq)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, TRUE, (bus->bus == SPI_BUS) ? FALSE : TRUE); + GSPI_PR55150_BAILOUT; + continue; + } + + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + bus->nextlen = + bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large" + " (%d), seq %d\n", __FUNCTION__, bus->nextlen, + seq)); + bus->nextlen = 0; + } + + bus->dhd->rx_readahead_cnt ++; + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s (nextlen): rx_seq %d, expected %d\n", + __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", rxbuf, len); + } else if (DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + if (chan == SDPCM_CONTROL_CHANNEL) { + if (bus->bus == SPI_BUS) { + dhdsdio_read_control(bus, rxbuf, len, doff); + if (bus->usebufpool) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + } + continue; + } else { + DHD_ERROR(("%s (nextlen): readahead on control" + " packet %d?\n", __FUNCTION__, seq)); + /* Force retry w/normal header read */ + bus->nextlen = 0; + dhdsdio_rxfail(bus, FALSE, TRUE); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } + } + + if ((bus->bus == SPI_BUS) && !bus->usebufpool) { + DHD_ERROR(("Received %d bytes on %d channel. Running out of " + "rx pktbuf's or not yet malloced.\n", len, chan)); + continue; + } + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE2(); + dhd_os_sdunlock_rxq(bus->dhd); + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* All done with this one -- now deliver the packet */ + goto deliver; + } + /* gSPI frames should not be handled in fractions */ + if (bus->bus == SPI_BUS) { + break; + } + + /* Read frame header (hardware and software) */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + bus->rxhdr, firstread, NULL, NULL, NULL); + bus->f2rxhdrs++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __FUNCTION__, sdret)); + bus->rx_hdrfail++; + dhdsdio_rxfail(bus, TRUE, TRUE); + continue; + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() || DHD_HDRS_ON()) { + prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN); + } +#endif + + /* Extract hardware header fields */ + len = ltoh16_ua(bus->rxhdr); + check = ltoh16_ua(bus->rxhdr + sizeof(uint16)); + + /* All zeros means no more frames */ + if (!(len|check)) { + *finished = TRUE; + break; + } + + /* Validate check bytes */ + if ((uint16)~(len^check)) { + DHD_ERROR(("%s: HW hdr error: len/check 0x%04x/0x%04x\n", + __FUNCTION__, len, check)); + bus->rx_badhdr++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Validate frame length */ + if (len < SDPCM_HDRLEN) { + DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len)); + continue; + } + + /* Extract software header fields */ + chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + /* Validate data offset */ + if ((doff < SDPCM_HDRLEN) || (doff > len)) { + DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n", + __FUNCTION__, doff, len, SDPCM_HDRLEN, seq)); + bus->rx_badhdr++; + ASSERT(0); + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + /* Save the readahead length if there is one */ + bus->nextlen = bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; + if ((bus->nextlen << 4) > MAX_RX_DATASZ) { + DHD_INFO(("%s (nextlen): got frame w/nextlen too large (%d), seq %d\n", + __FUNCTION__, bus->nextlen, seq)); + bus->nextlen = 0; + } + + /* Handle Flow Control */ + fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]); + + delta = 0; + if (~bus->flowcontrol & fcbits) { + bus->fc_xoff++; + delta = 1; + } + if (bus->flowcontrol & ~fcbits) { + bus->fc_xon++; + delta = 1; + } + + if (delta) { + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Check and update sequence number */ + if (rxseq != seq) { + DHD_INFO(("%s: rx_seq %d, expected %d\n", __FUNCTION__, seq, rxseq)); + bus->rx_badseq++; + rxseq = seq; + } + + /* Check window for sanity */ + if ((uint8)(txmax - bus->tx_seq) > 0x70) { + DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n", + __FUNCTION__, txmax, bus->tx_seq)); + txmax = bus->tx_max; + } + bus->tx_max = txmax; + + /* Call a separate function for control frames */ + if (chan == SDPCM_CONTROL_CHANNEL) { + dhdsdio_read_control(bus, bus->rxhdr, len, doff); + continue; + } + + ASSERT((chan == SDPCM_DATA_CHANNEL) || (chan == SDPCM_EVENT_CHANNEL) || + (chan == SDPCM_TEST_CHANNEL) || (chan == SDPCM_GLOM_CHANNEL)); + + /* Length to read */ + rdlen = (len > firstread) ? (len - firstread) : 0; + + /* May pad read to blocksize for efficiency */ + if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if ((pad <= bus->roundup) && (pad < bus->blocksize) && + ((rdlen + pad + firstread) < MAX_RX_DATASZ)) + rdlen += pad; + } else if (rdlen % DHD_SDALIGN) { + rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN); + } + + /* Satisfy length-alignment requirements */ + if (forcealign && (rdlen & (ALIGNMENT - 1))) + rdlen = ROUNDUP(rdlen, ALIGNMENT); + + if ((rdlen + firstread) > MAX_RX_DATASZ) { + /* Too long -- skip this frame */ + DHD_ERROR(("%s: too long: len %d rdlen %d\n", __FUNCTION__, len, rdlen)); + bus->dhd->rx_errors++; bus->rx_toolong++; + dhdsdio_rxfail(bus, FALSE, FALSE); + continue; + } + + dhd_os_sdlock_rxq(bus->dhd); + if (!(pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), FALSE))) { + /* Give up on data, request rtx of events */ + DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n", + __FUNCTION__, rdlen, chan)); + bus->dhd->rx_dropped++; + dhd_os_sdunlock_rxq(bus->dhd); + dhdsdio_rxfail(bus, FALSE, RETRYCHAN(chan)); + continue; + } + dhd_os_sdunlock_rxq(bus->dhd); + + ASSERT(!PKTLINK(pkt)); + + /* Leave room for what we already read, and align remainder */ + ASSERT(firstread < (PKTLEN(osh, pkt))); + PKTPULL(osh, pkt, firstread); + PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN); + + /* Read the remaining frame data */ + sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC, + ((uint8 *)PKTDATA(osh, pkt)), rdlen, pkt, NULL, NULL); + bus->f2rxdata++; + ASSERT(sdret != BCME_PENDING); + + if (sdret < 0) { + DHD_ERROR(("%s: read %d %s bytes failed: %d\n", __FUNCTION__, rdlen, + ((chan == SDPCM_EVENT_CHANNEL) ? "event" : + ((chan == SDPCM_DATA_CHANNEL) ? "data" : "test")), sdret)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + dhdsdio_rxfail(bus, TRUE, RETRYCHAN(chan)); + continue; + } + + /* Copy the already-read portion */ + PKTPUSH(osh, pkt, firstread); + bcopy(bus->rxhdr, PKTDATA(osh, pkt), firstread); + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + prhex("Rx Data", PKTDATA(osh, pkt), len); + } +#endif + +deliver: + /* Save superframe descriptor and allocate packet frame */ + if (chan == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) { + DHD_GLOM(("%s: got glom descriptor, %d bytes:\n", + __FUNCTION__, len)); +#ifdef DHD_DEBUG + if (DHD_GLOM_ON()) { + prhex("Glom Data", PKTDATA(osh, pkt), len); + } +#endif + PKTSETLEN(osh, pkt, len); + ASSERT(doff == SDPCM_HDRLEN); + PKTPULL(osh, pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__)); + dhdsdio_rxfail(bus, FALSE, FALSE); + } + continue; + } + + /* Fill in packet len and prio, deliver upward */ + PKTSETLEN(osh, pkt, len); + PKTPULL(osh, pkt, doff); + +#ifdef SDTEST + /* Test channel packets are processed separately */ + if (chan == SDPCM_TEST_CHANNEL) { + dhdsdio_testrcv(bus, pkt, seq); + continue; + } +#endif /* SDTEST */ + + if (PKTLEN(osh, pkt) == 0) { + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + continue; + } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt, reorder_info_buf, + &reorder_info_len) != 0) { + DHD_ERROR(("%s: rx protocol error\n", __FUNCTION__)); + dhd_os_sdlock_rxq(bus->dhd); + PKTFREE(bus->dhd->osh, pkt, FALSE); + dhd_os_sdunlock_rxq(bus->dhd); + bus->dhd->rx_errors++; + continue; + } + if (reorder_info_len) { + /* Reordering info from the firmware */ + dhd_process_pkt_reorder_info(bus->dhd, reorder_info_buf, reorder_info_len, + &pkt, &pkt_count); + if (pkt_count == 0) + continue; + } + else + pkt_count = 1; + + /* Unlock during rx call */ + dhd_os_sdunlock(bus->dhd); + dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, chan); + dhd_os_sdlock(bus->dhd); + } + rxcount = maxframes - rxleft; +#ifdef DHD_DEBUG + /* Message if we hit the limit */ + if (!rxleft && !sdtest) + DHD_DATA(("%s: hit rx limit of %d frames\n", __FUNCTION__, maxframes)); + else +#endif /* DHD_DEBUG */ + DHD_DATA(("%s: processed %d frames\n", __FUNCTION__, rxcount)); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rxseq--; + bus->rx_seq = rxseq; + + if (bus->reqbussleep) + { + dhdsdio_bussleep(bus, TRUE); + bus->reqbussleep = FALSE; + } + bus->readframes = FALSE; + + return rxcount; +} + +static uint32 +dhdsdio_hostmail(dhd_bus_t *bus) +{ + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus = 0; + uint32 hmb_data; + uint8 fcbits; + uint retries = 0; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Read mailbox data and ack that we did so */ + R_SDREG(hmb_data, ®s->tohostmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_INT_ACK, ®s->tosbmailbox, retries); + bus->f1regdata += 2; + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n", bus->rx_seq)); + if (!bus->rxskip) { + DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __FUNCTION__)); + } + bus->rxskip = FALSE; + intstatus |= FRAME_AVAIL_MASK(bus); + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = (hmb_data & HMB_DATA_VERSION_MASK) >> HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + DHD_ERROR(("Version mismatch, dongle reports %d, expecting %d\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION)); + else + DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver)); + /* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */ + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) { + uint32 val; + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(bus->dhd->osh, &bus->regs->corecontrol, val); + + val = R_REG(bus->dhd->osh, &bus->regs->corecontrol); + } + +#ifdef DHD_DEBUG + /* Retrieve console state address now that firmware should have updated it */ + { + sdpcm_shared_t shared; + if (dhdsdio_readshared(bus, &shared) == 0) + bus->console_addr = shared.console_addr; + } +#endif /* DHD_DEBUG */ + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. Leave this here for possibly remaining backward + * compatible with older dongles + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->fc_xoff++; + if (bus->flowcontrol & ~fcbits) + bus->fc_xon++; + + bus->fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* At least print a message if FW halted */ + if (hmb_data & HMB_DATA_FWHALT) { + DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n")); + dhdsdio_checkdied(bus, NULL, 0); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_FWHALT | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FCDATA_MASK | + HMB_DATA_VERSION_MASK)) { + DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data)); + } + + return intstatus; +} + +static bool +dhdsdio_dpc(dhd_bus_t *bus) +{ + bcmsdh_info_t *sdh = bus->sdh; + sdpcmd_regs_t *regs = bus->regs; + uint32 intstatus, newstatus = 0; + uint retries = 0; + uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */ + uint txlimit = dhd_txbound; /* Tx frames to send before resched */ + uint framecnt = 0; /* Temporary counter of tx/rx frames */ + bool rxdone = TRUE; /* Flag for no more read data */ + bool resched = FALSE; /* Flag indicating resched wanted */ +#ifdef DEBUG_DPC_THREAD_WATCHDOG + bool is_resched_by_readframe = FALSE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + dhd_os_sdlock(bus->dhd); + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__)); + bus->intstatus = 0; + dhd_os_sdunlock(bus->dhd); + return 0; + } + + /* Start with leftover status bits */ + intstatus = bus->intstatus; + + if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + goto exit; + } + + /* If waiting for HTAVAIL, check status */ + if (!SLPAUTO_ENAB(bus) && (bus->clkstate == CLK_PENDING)) { + int err; + uint8 clkctl, devctl = 0; + +#ifdef DHD_DEBUG + /* Check for inconsistent device control */ + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } else { + ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY); + } +#endif /* DHD_DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + + DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl, clkctl)); + + if (SBSDIO_HTAV(clkctl)) { + devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err); + if (err) { + DHD_ERROR(("%s: error reading DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err); + if (err) { + DHD_ERROR(("%s: error writing DEVCTL: %d\n", + __FUNCTION__, err)); + bus->dhd->busstate = DHD_BUS_DOWN; + } + bus->clkstate = CLK_AVAIL; + } else { + goto clkwait; + } + } + + BUS_WAKE(bus); + + /* Make sure backplane clock is on */ + dhdsdio_clkctl(bus, CLK_AVAIL, TRUE); + if (bus->clkstate != CLK_AVAIL) + goto clkwait; + + /* Pending interrupt indicates new device status */ + if (bus->ipend) { + bus->ipend = FALSE; + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata++; + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + newstatus &= bus->hostintmask; + bus->fcstate = !!(newstatus & I_HMB_FC_STATE); + if (newstatus) { + bus->f1regdata++; + if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) && + (newstatus == I_XMTDATA_AVAIL)) { + } + else + W_SDREG(newstatus, ®s->intstatus, retries); + } + } + + /* Merge new bits with previous */ + intstatus |= newstatus; + bus->intstatus = 0; + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + W_SDREG(I_HMB_FC_CHANGE, ®s->intstatus, retries); + R_SDREG(newstatus, ®s->intstatus, retries); + bus->f1regdata += 2; + bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Just being here means nothing more to do for chipactive */ + if (intstatus & I_CHIPACTIVE) { + /* ASSERT(bus->clkstate == CLK_AVAIL); */ + intstatus &= ~I_CHIPACTIVE; + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + intstatus &= ~I_HMB_HOST_INT; + intstatus |= dhdsdio_hostmail(bus); + } + + /* Generally don't ask for these, can get CRC errors... */ + if (intstatus & I_WR_OOSYNC) { + DHD_ERROR(("Dongle reports WR_OOSYNC\n")); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + DHD_ERROR(("Dongle reports RD_OOSYNC\n")); + intstatus &= ~I_RD_OOSYNC; + } + + if (intstatus & I_SBINT) { + DHD_ERROR(("Dongle reports SBINT\n")); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + DHD_INFO(("Dongle reports CHIPACTIVE\n")); + intstatus &= ~I_CHIPACTIVE; + } + + if (intstatus & I_HMB_FC_STATE) { + DHD_INFO(("Dongle reports HMB_FC_STATE\n")); + intstatus &= ~I_HMB_FC_STATE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) { + intstatus &= ~FRAME_AVAIL_MASK(bus); + } + + /* On frame indication, read available frames */ + if (PKT_AVAILABLE(bus, intstatus)) { + framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone); + if (rxdone || bus->rxskip) + intstatus &= ~FRAME_AVAIL_MASK(bus); + rxlimit -= MIN(framecnt, rxlimit); + } + + /* Keep still-pending events for next scheduling */ + bus->intstatus = intstatus; + +clkwait: + /* Re-enable interrupts to detect new device events (mailbox, rx frame) + * or clock availability. (Allows tx loop to check ipend if desired.) + * (Unless register access seems hosed, as we may not be able to ACK...) + */ + if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) { + DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n", + __FUNCTION__, rxdone, framecnt)); + bus->intdis = FALSE; +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif /* defined(OOB_INTR_ONLY) */ + bcmsdh_intr_enable(sdh); + } + +#if defined(OOB_INTR_ONLY) && !defined(HW_OOB) + /* In case of SW-OOB(using edge trigger), + * Check interrupt status in the dongle again after enable irq on the host. + * and rechedule dpc if interrupt is pended in the dongle. + * There is a chance to miss OOB interrupt while irq is disabled on the host. + * No need to do this with HW-OOB(level trigger) + */ + R_SDREG(newstatus, ®s->intstatus, retries); + if (bcmsdh_regfail(bus->sdh)) + newstatus = 0; + if (newstatus & bus->hostintmask) { + bus->ipend = TRUE; + resched = TRUE; + } +#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */ + +#ifdef PROP_TXSTATUS + dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE); +#endif + + if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) + dhdsdio_sendpendctl(bus); + + /* Send queued frames (limit 1 if rx may still be pending) */ + else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate && + pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) { + framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax); + framecnt = dhdsdio_sendfromq(bus, framecnt); + txlimit -= framecnt; + } + /* Resched the DPC if ctrl cmd is pending on bus credit */ + if (bus->ctrl_frame_stat) + resched = TRUE; + + /* Resched if events or tx frames are pending, else await next interrupt */ + /* On failed register access, all bets are off: no resched or interrupts */ + if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) { + if ((bus->sih && bus->sih->buscorerev >= 12) && !(dhdsdio_sleepcsr_get(bus) & + SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + /* Bus failed because of KSO */ + DHD_ERROR(("%s: Bus failed due to KSO\n", __FUNCTION__)); + bus->kso = FALSE; + } else { + DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n", + __FUNCTION__)); + bus->dhd->busstate = DHD_BUS_DOWN; + bus->intstatus = 0; + } + } else if (bus->clkstate == CLK_PENDING) { + /* Awaiting I_CHIPACTIVE; don't resched */ + } else if (bus->intstatus || bus->ipend || + (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) || + PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */ + resched = TRUE; + } + + bus->dpc_sched = resched; + + /* If we're done for now, turn off clock request. */ + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && (bus->clkstate != CLK_PENDING)) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + +exit: + + if (!resched && dhd_dpcpoll) { + if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) { + resched = TRUE; +#ifdef DEBUG_DPC_THREAD_WATCHDOG + is_resched_by_readframe = TRUE; +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + } + } + + dhd_os_sdunlock(bus->dhd); +#ifdef DEBUG_DPC_THREAD_WATCHDOG + if (bus->dhd->dhd_bug_on) { + DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x" + " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n", + __FUNCTION__, resched, bus->ctrl_frame_stat, + bus->intstatus, bus->ipend, + pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe)); + + bus->dhd->dhd_bug_on = FALSE; + } +#endif /* DEBUG_DPC_THREAD_WATCHDOG */ + return resched; +} + +bool +dhd_bus_dpc(struct dhd_bus *bus) +{ + bool resched; + + /* Call the DPC directly. */ + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + resched = dhdsdio_dpc(bus); + + return resched; +} + +void +dhdsdio_isr(void *arg) +{ + dhd_bus_t *bus = (dhd_bus_t*)arg; + bcmsdh_info_t *sdh; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (!bus) { + DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__)); + return; + } + sdh = bus->sdh; + + if (bus->dhd->busstate == DHD_BUS_DOWN) { + DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); + return; + } + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + /* Count the interrupt call */ + bus->intrcount++; + bus->ipend = TRUE; + + /* Shouldn't get this interrupt if we're sleeping? */ + if (!SLPAUTO_ENAB(bus)) { + if (bus->sleeping) { + DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n")); + return; + } else if (!KSO_ENAB(bus)) { + DHD_ERROR(("ISR in devsleep 1\n")); + } + } + + /* Disable additional interrupts (is this needed now)? */ + if (bus->intr) { + DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__)); + } else { + DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n")); + } + + bcmsdh_intr_disable(sdh); + bus->intdis = TRUE; + +#if defined(SDIO_ISR_THREAD) + DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __FUNCTION__)); + DHD_OS_WAKE_LOCK(bus->dhd); + dhdsdio_dpc(bus); + DHD_OS_WAKE_UNLOCK(bus->dhd); +#else + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + +#endif /* defined(SDIO_ISR_THREAD) */ + +} + +#ifdef SDTEST +static void +dhdsdio_pktgen_init(dhd_bus_t *bus) +{ + /* Default to specified length, or full range */ + if (dhd_pktgen_len) { + bus->pktgen_maxlen = MIN(dhd_pktgen_len, MAX_PKTGEN_LEN); + bus->pktgen_minlen = bus->pktgen_maxlen; + } else { + bus->pktgen_maxlen = MAX_PKTGEN_LEN; + bus->pktgen_minlen = 0; + } + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Default to per-watchdog burst with 10s print time */ + bus->pktgen_freq = 1; + bus->pktgen_print = dhd_watchdog_ms ? (10000 / dhd_watchdog_ms) : 0; + bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000; + + /* Default to echo mode */ + bus->pktgen_mode = DHD_PKTGEN_ECHO; + bus->pktgen_stop = 1; +} + +static void +dhdsdio_pktgen(dhd_bus_t *bus) +{ + void *pkt; + uint8 *data; + uint pktcount; + uint fillbyte; + osl_t *osh = bus->dhd->osh; + uint16 len; + ulong time_lapse; + uint sent_pkts; + uint rcvd_pkts; + + /* Display current count if appropriate */ + if (bus->pktgen_print && (++bus->pktgen_ptick >= bus->pktgen_print)) { + bus->pktgen_ptick = 0; + printf("%s: send attempts %d, rcvd %d, errors %d\n", + __FUNCTION__, bus->pktgen_sent, bus->pktgen_rcvd, bus->pktgen_fail); + + /* Print throughput stats only for constant length packet runs */ + if (bus->pktgen_minlen == bus->pktgen_maxlen) { + time_lapse = jiffies - bus->pktgen_prev_time; + bus->pktgen_prev_time = jiffies; + sent_pkts = bus->pktgen_sent - bus->pktgen_prev_sent; + bus->pktgen_prev_sent = bus->pktgen_sent; + rcvd_pkts = bus->pktgen_rcvd - bus->pktgen_prev_rcvd; + bus->pktgen_prev_rcvd = bus->pktgen_rcvd; + + printf("%s: Tx Throughput %d kbps, Rx Throughput %d kbps\n", + __FUNCTION__, + (sent_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8, + (rcvd_pkts * bus->pktgen_len / jiffies_to_msecs(time_lapse)) * 8); + } + } + + /* For recv mode, just make sure dongle has started sending */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state == PKTGEN_RCV_IDLE) { + bus->pktgen_rcv_state = PKTGEN_RCV_ONGOING; + dhdsdio_sdtest_set(bus, bus->pktgen_total); + } + return; + } + + /* Otherwise, generate or request the specified number of packets */ + for (pktcount = 0; pktcount < bus->pktgen_count; pktcount++) { + /* Stop if total has been reached */ + if (bus->pktgen_total && (bus->pktgen_sent >= bus->pktgen_total)) { + bus->pktgen_count = 0; + break; + } + + /* Allocate an appropriate-sized packet */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + len = SDPCM_TEST_PKT_CNT_FLD_LEN; + } else { + len = bus->pktgen_len; + } + if (!(pkt = PKTGET(osh, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN), + TRUE))) {; + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + break; + } + PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Write test header cmd and extra based on mode */ + switch (bus->pktgen_mode) { + case DHD_PKTGEN_ECHO: + *data++ = SDPCM_TEST_ECHOREQ; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_SEND: + *data++ = SDPCM_TEST_DISCARD; + *data++ = (uint8)bus->pktgen_sent; + break; + + case DHD_PKTGEN_RXBURST: + *data++ = SDPCM_TEST_BURST; + *data++ = (uint8)bus->pktgen_count; /* Just for backward compatability */ + break; + + default: + DHD_ERROR(("Unrecognized pktgen mode %d\n", bus->pktgen_mode)); + PKTFREE(osh, pkt, TRUE); + bus->pktgen_count = 0; + return; + } + + /* Write test header length field */ + *data++ = (bus->pktgen_len >> 0); + *data++ = (bus->pktgen_len >> 8); + + /* Write frame count in a 4 byte field adjucent to SDPCM test header for + * burst mode + */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) { + *data++ = (uint8)(bus->pktgen_count >> 0); + *data++ = (uint8)(bus->pktgen_count >> 8); + *data++ = (uint8)(bus->pktgen_count >> 16); + *data++ = (uint8)(bus->pktgen_count >> 24); + } else { + + /* Then fill in the remainder -- N/A for burst */ + for (fillbyte = 0; fillbyte < len; fillbyte++) + *data++ = SDPCM_TEST_FILL(fillbyte, (uint8)bus->pktgen_sent); + } + +#ifdef DHD_DEBUG + if (DHD_BYTES_ON() && DHD_DATA_ON()) { + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN); + } +#endif + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) { + bus->pktgen_fail++; + if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail) + bus->pktgen_count = 0; + } + bus->pktgen_sent++; + + /* Bump length if not fixed, wrap at max */ + if (++bus->pktgen_len > bus->pktgen_maxlen) + bus->pktgen_len = (uint16)bus->pktgen_minlen; + + /* Special case for burst mode: just send one request! */ + if (bus->pktgen_mode == DHD_PKTGEN_RXBURST) + break; + } +} + +static void +dhdsdio_sdtest_set(dhd_bus_t *bus, uint count) +{ + void *pkt; + uint8 *data; + osl_t *osh = bus->dhd->osh; + + /* Allocate the packet */ + if (!(pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN + DHD_SDALIGN, TRUE))) { + DHD_ERROR(("%s: PKTGET failed!\n", __FUNCTION__)); + return; + } + PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + + SDPCM_TEST_PKT_CNT_FLD_LEN), DHD_SDALIGN); + data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN; + + /* Fill in the test header */ + *data++ = SDPCM_TEST_SEND; + *data++ = (count > 0)?TRUE:FALSE; + *data++ = (bus->pktgen_maxlen >> 0); + *data++ = (bus->pktgen_maxlen >> 8); + *data++ = (uint8)(count >> 0); + *data++ = (uint8)(count >> 8); + *data++ = (uint8)(count >> 16); + *data++ = (uint8)(count >> 24); + + /* Send it */ + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) + bus->pktgen_fail++; +} + + +static void +dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq) +{ + osl_t *osh = bus->dhd->osh; + uint8 *data; + uint pktlen; + + uint8 cmd; + uint8 extra; + uint16 len; + uint16 offset; + + /* Check for min length */ + if ((pktlen = PKTLEN(osh, pkt)) < SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n", pktlen)); + PKTFREE(osh, pkt, FALSE); + return; + } + + /* Extract header fields */ + data = PKTDATA(osh, pkt); + cmd = *data++; + extra = *data++; + len = *data++; len += *data++ << 8; + DHD_TRACE(("%s:cmd:%d, xtra:%d,len:%d\n", __FUNCTION__, cmd, extra, len)); + /* Check length for relevant commands */ + if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ || cmd == SDPCM_TEST_ECHORSP) { + if (pktlen != len + SDPCM_TEST_HDRLEN) { + DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + return; + } + } + + /* Process as per command */ + switch (cmd) { + case SDPCM_TEST_ECHOREQ: + /* Rx->Tx turnaround ok (even on NDIS w/current implementation) */ + *(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP; + if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) { + bus->pktgen_sent++; + } else { + bus->pktgen_fail++; + PKTFREE(osh, pkt, FALSE); + } + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_ECHORSP: + if (bus->ext_loop) { + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + } + + for (offset = 0; offset < len; offset++, data++) { + if (*data != SDPCM_TEST_FILL(offset, extra)) { + DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " + "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n", + offset, len, SDPCM_TEST_FILL(offset, extra), *data)); + break; + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_DISCARD: + { + int i = 0; + uint8 *prn = data; + uint8 testval = extra; + for (i = 0; i < len; i++) { + if (*prn != testval) { + DHD_ERROR(("DIErr@Pkt#:%d,Ix:%d, expected:0x%x, got:0x%x\n", + i, bus->pktgen_rcvd_rcvsession, testval, *prn)); + prn++; testval++; + } + } + } + PKTFREE(osh, pkt, FALSE); + bus->pktgen_rcvd++; + break; + + case SDPCM_TEST_BURST: + case SDPCM_TEST_SEND: + default: + DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, pktlen %d seq %d" + " cmd %d extra %d len %d\n", pktlen, seq, cmd, extra, len)); + PKTFREE(osh, pkt, FALSE); + break; + } + + /* For recv mode, stop at limit (and tell dongle to stop sending) */ + if (bus->pktgen_mode == DHD_PKTGEN_RECV) { + if (bus->pktgen_rcv_state != PKTGEN_RCV_IDLE) { + bus->pktgen_rcvd_rcvsession++; + + if (bus->pktgen_total && + (bus->pktgen_rcvd_rcvsession >= bus->pktgen_total)) { + bus->pktgen_count = 0; + DHD_ERROR(("Pktgen:rcv test complete!\n")); + bus->pktgen_rcv_state = PKTGEN_RCV_IDLE; + dhdsdio_sdtest_set(bus, FALSE); + bus->pktgen_rcvd_rcvsession = 0; + } + } + } +} +#endif /* SDTEST */ + +int dhd_bus_oob_intr_register(dhd_pub_t *dhdp) +{ + int err = 0; + +#if defined(OOB_INTR_ONLY) + err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus); +#endif + return err; +} + +void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp) +{ +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_unregister(dhdp->bus->sdh); +#endif +} + +void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable) +{ +#if defined(OOB_INTR_ONLY) + bcmsdh_oob_intr_set(dhdp->bus->sdh, enable); +#endif +} + +void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub) +{ + bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh); +} + +void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub) +{ + bcmsdh_dev_relax(dhdpub->bus->sdh); +} + +bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub) +{ + bool enabled = FALSE; + + enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh); + return enabled; +} + +extern bool +dhd_bus_watchdog(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus; + + DHD_TIMER(("%s: Enter\n", __FUNCTION__)); + + bus = dhdp->bus; + + if (bus->dhd->dongle_reset) + return FALSE; + + if (bus->dhd->hang_was_sent) { + dhd_os_wd_timer(bus->dhd, 0); + return FALSE; + } + + /* Ignore the timer if simulating bus down */ + if (!SLPAUTO_ENAB(bus) && bus->sleeping) + return FALSE; + + if (dhdp->busstate == DHD_BUS_DOWN) + return FALSE; + + dhd_os_sdlock(bus->dhd); + + /* Poll period: check device if appropriate. */ + if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) { + uint32 intstatus = 0; + + /* Reset poll tick */ + bus->polltick = 0; + + /* Check device if no interrupts */ + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { + + if (!bus->dpc_sched) { + uint8 devpend; + devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, + SDIOD_CCCR_INTPEND, NULL); + intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2); + } + + /* If there is something, make like the ISR and schedule the DPC */ + if (intstatus) { + bus->pollcnt++; + bus->ipend = TRUE; + if (bus->intr) { + bcmsdh_intr_disable(bus->sdh); + } + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + } + + /* Update interrupt tracking */ + bus->lastintrs = bus->intrcount; + } + +#ifdef DHD_DEBUG + /* Poll for console output periodically */ + if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) { + bus->console.count += dhd_watchdog_ms; + if (bus->console.count >= dhd_console_ms) { + bus->console.count -= dhd_console_ms; + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + if (dhdsdio_readconsole(bus) < 0) + dhd_console_ms = 0; /* On error, stop trying */ + } + } +#endif /* DHD_DEBUG */ + +#ifdef SDTEST + /* Generate packets if configured */ + if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) { + /* Make sure backplane clock is on */ + if (SLPAUTO_ENAB(bus)) + dhdsdio_bussleep(bus, FALSE); + else + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + bus->pktgen_tick = 0; + dhdsdio_pktgen(bus); + } +#endif + + /* On idle timeout clear activity flag and/or turn off clock */ +#ifdef DHD_USE_IDLECOUNT + if (bus->activity) + bus->activity = FALSE; + else { + bus->idlecount++; + + if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) { + DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__)); + if (SLPAUTO_ENAB(bus)) { + if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY) + dhd_os_wd_timer(bus->dhd, 0); + } else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + + bus->idlecount = 0; + } + } +#else + if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { + if (++bus->idlecount >= bus->idletime) { + bus->idlecount = 0; + if (bus->activity) { + bus->activity = FALSE; + if (SLPAUTO_ENAB(bus)) { + if (!bus->readframes) + dhdsdio_bussleep(bus, TRUE); + else + bus->reqbussleep = TRUE; + } + else + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + } + } +#endif /* DHD_USE_IDLECOUNT */ + + dhd_os_sdunlock(bus->dhd); + + return bus->ipend; +} + +#ifdef DHD_DEBUG +extern int +dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) +{ + dhd_bus_t *bus = dhdp->bus; + uint32 addr, val; + int rv; + void *pkt; + + /* Address could be zero if CONSOLE := 0 in dongle Makefile */ + if (bus->console_addr == 0) + return BCME_UNSUPPORTED; + + /* Exclusive bus access */ + dhd_os_sdlock(bus->dhd); + + /* Don't allow input if dongle is in reset */ + if (bus->dhd->dongle_reset) { + dhd_os_sdunlock(bus->dhd); + return BCME_NOTREADY; + } + + /* Request clock to allow SDIO accesses */ + BUS_WAKE(bus); + /* No pend allowed since txpkt is called later, ht clk has to be on */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Zero cbuf_index */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx); + val = htol32(0); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Write message into cbuf */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0) + goto done; + + /* Write length into vcons_in */ + addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in); + val = htol32(msglen); + if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0) + goto done; + + /* Bump dongle by sending an empty packet on the event channel. + * sdpcm_sendup (RX) checks for virtual console input. + */ + if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL) + rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE); + +done: + if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) { + bus->activity = FALSE; + dhdsdio_clkctl(bus, CLK_NONE, TRUE); + } + + dhd_os_sdunlock(bus->dhd); + + return rv; +} +#endif /* DHD_DEBUG */ + +#ifdef DHD_DEBUG +static void +dhd_dump_cis(uint fn, uint8 *cis) +{ + uint byte, tag, tdata; + DHD_INFO(("Function %d CIS:\n", fn)); + + for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) { + if ((byte % 16) == 0) + DHD_INFO((" ")); + DHD_INFO(("%02x ", cis[byte])); + if ((byte % 16) == 15) + DHD_INFO(("\n")); + if (!tdata--) { + tag = cis[byte]; + if (tag == 0xff) + break; + else if (!tag) + tdata = 0; + else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT) + tdata = cis[byte + 1] + 1; + else + DHD_INFO(("]")); + } + } + if ((byte % 16) != 15) + DHD_INFO(("\n")); +} +#endif /* DHD_DEBUG */ + +static bool +dhdsdio_chipmatch(uint16 chipid) +{ + if (chipid == BCM4325_CHIP_ID) + return TRUE; + if (chipid == BCM4329_CHIP_ID) + return TRUE; + if (chipid == BCM4315_CHIP_ID) + return TRUE; + if (chipid == BCM4319_CHIP_ID) + return TRUE; + if (chipid == BCM4336_CHIP_ID) + return TRUE; + if (chipid == BCM4330_CHIP_ID) + return TRUE; + if (chipid == BCM43237_CHIP_ID) + return TRUE; + if (chipid == BCM43362_CHIP_ID) + return TRUE; + if (chipid == BCM4314_CHIP_ID) + return TRUE; + if (chipid == BCM43242_CHIP_ID) + return TRUE; + if (chipid == BCM43340_CHIP_ID) + return TRUE; + if (chipid == BCM43341_CHIP_ID) + return TRUE; + if (chipid == BCM43143_CHIP_ID) + return TRUE; + if (chipid == BCM43342_CHIP_ID) + return TRUE; + if (chipid == BCM4334_CHIP_ID) + return TRUE; + if (chipid == BCM43239_CHIP_ID) + return TRUE; + if (chipid == BCM4324_CHIP_ID) + return TRUE; + if (chipid == BCM4335_CHIP_ID) + return TRUE; + if (chipid == BCM4339_CHIP_ID) + return TRUE; + if (chipid == BCM43349_CHIP_ID) + return TRUE; + if (chipid == BCM4345_CHIP_ID || chipid == BCM43454_CHIP_ID) + return TRUE; + if (chipid == BCM4350_CHIP_ID) + return TRUE; + if (chipid == BCM4354_CHIP_ID) + return TRUE; + if (chipid == BCM4356_CHIP_ID) + return TRUE; + if (chipid == BCM4358_CHIP_ID) + return TRUE; + if (chipid == BCM43430_CHIP_ID) + return TRUE; + if (BCM4349_CHIP(chipid)) + return TRUE; + return FALSE; +} + +static void * +dhdsdio_probe(uint16 venid, uint16 devid, uint16 bus_no, uint16 slot, + uint16 func, uint bustype, void *regsva, osl_t * osh, void *sdh) +{ + int ret; + dhd_bus_t *bus; + + + /* Init global variables at run-time, not as part of the declaration. + * This is required to support init/de-init of the driver. Initialization + * of globals as part of the declaration results in non-deterministic + * behavior since the value of the globals may be different on the + * first time that the driver is initialized vs subsequent initializations. + */ + dhd_txbound = DHD_TXBOUND; + dhd_rxbound = DHD_RXBOUND; + dhd_alignctl = TRUE; + sd1idle = TRUE; + dhd_readahead = TRUE; + retrydata = FALSE; + +#ifdef DISABLE_FLOW_CONTROL + dhd_doflow = FALSE; +#endif /* DISABLE_FLOW_CONTROL */ + dhd_dongle_ramsize = 0; + dhd_txminmax = DHD_TXMINMAX; + + forcealign = TRUE; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid)); + + /* We make assumptions about address window mappings */ + ASSERT((uintptr)regsva == SI_ENUM_BASE); + + /* BCMSDH passes venid and devid based on CIS parsing -- but low-power start + * means early parse could fail, so here we should get either an ID + * we recognize OR (-1) indicating we must request power first. + */ + /* Check the Vendor ID */ + switch (venid) { + case 0x0000: + case VENDOR_BROADCOM: + break; + default: + DHD_ERROR(("%s: unknown vendor: 0x%04x\n", + __FUNCTION__, venid)); + goto forcereturn; + } + + /* Check the Device ID and make sure it's one that we support */ + switch (devid) { + case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */ + case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */ + case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */ + DHD_INFO(("%s: found 4325 Dongle\n", __FUNCTION__)); + break; + case BCM4329_D11N_ID: /* 4329 802.11n dualband device */ + case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */ + case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */ + case 0x4329: + DHD_INFO(("%s: found 4329 Dongle\n", __FUNCTION__)); + break; + case BCM4315_D11DUAL_ID: /* 4315 802.11a/g id */ + case BCM4315_D11G_ID: /* 4315 802.11g id */ + case BCM4315_D11A_ID: /* 4315 802.11a id */ + DHD_INFO(("%s: found 4315 Dongle\n", __FUNCTION__)); + break; + case BCM4319_D11N_ID: /* 4319 802.11n id */ + case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */ + case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */ + DHD_INFO(("%s: found 4319 Dongle\n", __FUNCTION__)); + break; + case 0: + DHD_INFO(("%s: allow device id 0, will check chip internals\n", + __FUNCTION__)); + break; + + default: + DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n", + __FUNCTION__, venid, devid)); + goto forcereturn; + } + + if (osh == NULL) { + DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__)); + goto forcereturn; + } + + /* Allocate private bus interface state */ + if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) { + DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__)); + goto fail; + } + bzero(bus, sizeof(dhd_bus_t)); + bus->sdh = sdh; + bus->cl_devid = (uint16)devid; + bus->bus = DHD_BUS; + bus->bus_num = bus_no; + bus->slot_num = slot; + bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; + bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */ + +#if defined(SUPPORT_P2P_GO_PS) + init_waitqueue_head(&bus->bus_sleep); +#endif /* LINUX && SUPPORT_P2P_GO_PS */ + + /* attempt to attach to the dongle */ + if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) { + DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Attach to the dhd/OS/network interface */ + if (!(bus->dhd = dhd_attach(osh, bus, SDPCM_RESERVE))) { + DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__)); + goto fail; + } + + /* Allocate buffers */ + if (!(dhdsdio_probe_malloc(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __FUNCTION__)); + goto fail; + } + + if (!(dhdsdio_probe_init(bus, osh, sdh))) { + DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __FUNCTION__)); + goto fail; + } + + if (bus->intr) { + /* Register interrupt callback, but mask it (not operational yet). */ + DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n", __FUNCTION__)); + bcmsdh_intr_disable(sdh); + if ((ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus)) != 0) { + DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n", + __FUNCTION__, ret)); + goto fail; + } + DHD_INTR(("%s: registered SDIO interrupt function ok\n", __FUNCTION__)); + } else { + DHD_INFO(("%s: SDIO interrupt function is NOT registered due to polling mode\n", + __FUNCTION__)); + } + + DHD_INFO(("%s: completed!!\n", __FUNCTION__)); + + /* if firmware path present try to download and bring up bus */ + bus->dhd->hang_report = TRUE; + if (dhd_download_fw_on_driverload) { + if ((ret = dhd_bus_start(bus->dhd)) != 0) { + DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__)); + goto fail; + } + } + else { + /* Set random MAC address during boot time */ + get_random_bytes(&bus->dhd->mac.octet[3], 3); + /* Adding BRCM OUI */ + bus->dhd->mac.octet[0] = 0; + bus->dhd->mac.octet[1] = 0x90; + bus->dhd->mac.octet[2] = 0x4C; + } + /* Ok, have the per-port tell the stack we're open for business */ + if (dhd_register_if(bus->dhd, 0, TRUE) != 0) { + DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__)); + goto fail; + } + +#ifdef BCMHOST_XTAL_PU_TIME_MOD + bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11); +#ifdef BCM4330_CHIP + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x0000F801); +#else + bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001); +#endif /* BCM4330_CHIP */ +#endif /* BCMHOST_XTAL_PU_TIME_MOD */ + + + return bus; + +fail: + dhdsdio_release(bus, osh); + +forcereturn: + + return NULL; +} + +static bool +dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, + uint16 devid) +{ + int err = 0; + uint8 clkctl = 0; + + bus->alp_only = TRUE; + bus->sih = NULL; + + /* Return the window to backplane enumeration space for core access */ + if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) { + DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__)); + } + +#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG) + DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n", + bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4))); +#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */ + + + /* Force PLL off until si_attach() programs PLL control regs */ + + + + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err); + if (!err) + clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { + DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, DHD_INIT_CLKCTL1, clkctl)); + goto fail; + } + +#ifdef DHD_DEBUG + if (DHD_INFO_ON()) { + uint fn, numfn; + uint8 *cis[SDIOD_MAX_IOFUNCS]; + int err = 0; + + numfn = bcmsdh_query_iofnum(sdh); + ASSERT(numfn <= SDIOD_MAX_IOFUNCS); + + /* Make sure ALP is available before trying to read CIS */ + SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), + !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY); + + /* Now request ALP be put on the bus */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + DHD_INIT_CLKCTL2, &err); + OSL_DELAY(65); + + for (fn = 0; fn <= numfn; fn++) { + if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis malloc failed\n", fn)); + break; + } + bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT); + + if ((err = bcmsdh_cis_read(sdh, fn, cis[fn], SBSDIO_CIS_SIZE_LIMIT))) { + DHD_INFO(("dhdsdio_probe: fn %d cis read err %d\n", fn, err)); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + break; + } + dhd_dump_cis(fn, cis[fn]); + } + + while (fn-- > 0) { + ASSERT(cis[fn]); + MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT); + } + + if (err) { + DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n")); + goto fail; + } + } +#endif /* DHD_DEBUG */ + + /* si_attach() will provide an SI handle and scan the backplane */ + if (!(bus->sih = si_attach((uint)devid, osh, regsva, DHD_BUS, sdh, + &bus->vars, &bus->varsz))) { + DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__)); + goto fail; + } + +#ifdef DHD_DEBUG + DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n", + bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg)); +#endif /* DHD_DEBUG */ + + + bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev); + + if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) { + DHD_ERROR(("%s: unsupported chip: 0x%04x\n", + __FUNCTION__, bus->sih->chip)); + goto fail; + } + + if (bus->sih->buscorerev >= 12) + dhdsdio_clk_kso_init(bus); + else + bus->kso = TRUE; + + if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) { + } + + si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength); + + + /* Get info on the ARM and SOCRAM cores... */ + if (!DHD_NOPMU(bus)) { + if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) || + (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) { + bus->armrev = si_corerev(bus->sih); + } else { + DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__)); + goto fail; + } + + if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + if (!(bus->orig_ramsize = si_socram_size(bus->sih))) { + DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__)); + goto fail; + } + } else { + /* cr4 has a different way to find the RAM size from TCM's */ + if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) { + DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__)); + goto fail; + } + /* also populate base address */ + switch ((uint16)bus->sih->chip) { + case BCM4335_CHIP_ID: + case BCM4339_CHIP_ID: + case BCM43349_CHIP_ID: + bus->dongle_ram_base = CR4_4335_RAM_BASE; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM4358_CHIP_ID: + bus->dongle_ram_base = CR4_4350_RAM_BASE; + break; + case BCM4360_CHIP_ID: + bus->dongle_ram_base = CR4_4360_RAM_BASE; + break; + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */ + ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE; + break; + case BCM4349_CHIP_GRPID: + /* RAM base changed from 4349c0(revid=9) onwards */ + bus->dongle_ram_base = ((bus->sih->chiprev < 9) ? + CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9); + break; + default: + bus->dongle_ram_base = 0; + DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n", + __FUNCTION__, bus->dongle_ram_base)); + } + } + bus->ramsize = bus->orig_ramsize; + if (dhd_dongle_ramsize) + dhd_dongle_setramsize(bus, dhd_dongle_ramsize); + + DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n", + bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base)); + + bus->srmemsize = si_socram_srmem_size(bus->sih); + } + + /* ...but normally deal with the SDPCMDEV core */ + if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) && + !(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) { + DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__)); + goto fail; + } + bus->sdpcmrev = si_corerev(bus->sih); + + /* Set core control so an SDIO reset does a backplane reset */ + OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN); + bus->rxint_mode = SDIO_DEVICE_HMB_RXINT; + + if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) && + (bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) + { + uint32 val; + + val = R_REG(osh, &bus->regs->corecontrol); + val &= ~CC_XMTDATAAVAIL_MODE; + val |= CC_XMTDATAAVAIL_CTRL; + W_REG(osh, &bus->regs->corecontrol, val); + } + + + pktq_init(&bus->txq, (PRIOMASK + 1), QLEN); + + /* Locate an appropriately-aligned portion of hdrbuf */ + bus->rxhdr = (uint8 *)ROUNDUP((uintptr)&bus->hdrbuf[0], DHD_SDALIGN); + + /* Set the poll and/or interrupt flags */ + bus->intr = (bool)dhd_intr; + if ((bus->poll = (bool)dhd_poll)) + bus->pollrate = 1; + + /* Setting default Glom size */ + bus->txglomsize = SDPCM_DEFGLOM_SIZE; + + return TRUE; + +fail: + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + return FALSE; +} + +static bool +dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd->maxctl) { + bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN; + if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) { + DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n", + __FUNCTION__, bus->rxblen)); + goto fail; + } + } + /* Allocate buffer to receive glomed packet */ + if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) { + DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n", + __FUNCTION__, MAX_DATA_BUF)); + /* release rxbuf which was already located as above */ + if (!bus->rxblen) + DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen); + goto fail; + } + + /* Align the buffer */ + if ((uintptr)bus->databuf % DHD_SDALIGN) + bus->dataptr = bus->databuf + (DHD_SDALIGN - ((uintptr)bus->databuf % DHD_SDALIGN)); + else + bus->dataptr = bus->databuf; + + return TRUE; + +fail: + return FALSE; +} + +static bool +dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh) +{ + int32 fnum; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bus->_srenab = FALSE; + +#ifdef SDTEST + dhdsdio_pktgen_init(bus); +#endif /* SDTEST */ + + /* Disable F2 to clear any intermediate frame state on the dongle */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL); + + bus->dhd->busstate = DHD_BUS_DOWN; + bus->sleeping = FALSE; + bus->rxflow = FALSE; + bus->prev_rxlim_hit = 0; + + /* Done with backplane-dependent accesses, can drop clock... */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + bus->idletime = (int32)dhd_idletime; + bus->idleclock = DHD_IDLE_ACTIVE; + + /* Query the SD clock speed */ + if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0, + &bus->sd_divisor, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_divisor")); + bus->sd_divisor = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_divisor", bus->sd_divisor)); + } + + /* Query the SD bus mode */ + if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0, + &bus->sd_mode, sizeof(int32), FALSE) != BCME_OK) { + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_mode")); + bus->sd_mode = -1; + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_mode", bus->sd_mode)); + } + + /* Query the F2 block size, set roundup accordingly */ + fnum = 2; + if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32), + &bus->blocksize, sizeof(int32), FALSE) != BCME_OK) { + bus->blocksize = 0; + DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize")); + } else { + DHD_INFO(("%s: Initial value for %s is %d\n", + __FUNCTION__, "sd_blocksize", bus->blocksize)); + + dhdsdio_tune_fifoparam(bus); + } + bus->roundup = MIN(max_roundup, bus->blocksize); + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); + bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE); + if (bus->pad_pkt == NULL) + DHD_ERROR(("failed to allocate padding packet\n")); + else { + int alignment_offset = 0; + uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt); + if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN))) + PKTPUSH(osh, bus->pad_pkt, alignment_offset); + PKTSETNEXT(osh, bus->pad_pkt, NULL); + } +#endif /* DHDENABLE_TAILPAD */ + + /* Query if bus module supports packet chaining, default to use if supported */ + if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0, + &bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) { + bus->sd_rxchain = FALSE; + } else { + DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n", + __FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support"))); + } + bus->use_rxchain = (bool)bus->sd_rxchain; + bus->txinrx_thres = CUSTOM_TXINRX_THRES; + /* TX first in dhdsdio_readframes() */ + bus->dotxinrx = TRUE; + + return TRUE; +} + +int +dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh, + char *pfw_path, char *pnv_path) +{ + int ret; + + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; + + ret = dhdsdio_download_firmware(bus, osh, bus->sdh); + + + return ret; +} + +static int +dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh) +{ + int ret; + + + DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n", + __FUNCTION__, bus->fw_path, bus->nv_path)); + DHD_OS_WAKE_LOCK(bus->dhd); + + /* Download the firmware */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + ret = _dhdsdio_download_firmware(bus); + + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + + DHD_OS_WAKE_UNLOCK(bus->dhd); + return ret; +} + +/* Detach and free everything */ +static void +dhdsdio_release(dhd_bus_t *bus, osl_t *osh) +{ + bool dongle_isolation = FALSE; + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus) { + ASSERT(osh); + + if (bus->dhd) { + dongle_isolation = bus->dhd->dongle_isolation; + dhd_detach(bus->dhd); + } + + /* De-register interrupt handler */ + bcmsdh_intr_disable(bus->sdh); + bcmsdh_intr_dereg(bus->sdh); + + if (bus->dhd) { + dhdsdio_release_dongle(bus, osh, dongle_isolation, TRUE); + dhd_free(bus->dhd); + bus->dhd = NULL; + } + + dhdsdio_release_malloc(bus, osh); + +#ifdef DHD_DEBUG + if (bus->console.buf != NULL) + MFREE(osh, bus->console.buf, bus->console.bufsize); +#endif + +#ifdef DHDENABLE_TAILPAD + if (bus->pad_pkt) + PKTFREE(osh, bus->pad_pkt, FALSE); +#endif /* DHDENABLE_TAILPAD */ + + MFREE(osh, bus, sizeof(dhd_bus_t)); + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + if (bus->dhd && bus->dhd->dongle_reset) + return; + + if (bus->rxbuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->rxbuf, bus->rxblen); +#endif + bus->rxctl = bus->rxbuf = NULL; + bus->rxlen = 0; + } + + if (bus->databuf) { +#ifndef CONFIG_DHD_USE_STATIC_BUF + MFREE(osh, bus->databuf, MAX_DATA_BUF); +#endif + bus->databuf = NULL; + } + + if (bus->vars && bus->varsz) { + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + +} + + +static void +dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag) +{ + DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__, + bus->dhd, bus->dhd->dongle_reset)); + + if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) + return; + + if (bus->sih) { +#if !defined(BCMLXSDMMC) + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + } + if (KSO_ENAB(bus) && (dongle_isolation == FALSE)) + si_watchdog(bus->sih, 4); +#endif /* !defined(BCMLXSDMMC) */ + if (bus->dhd) { + dhdsdio_clkctl(bus, CLK_NONE, FALSE); + } + si_detach(bus->sih); + bus->sih = NULL; + if (bus->vars && bus->varsz) + MFREE(osh, bus->vars, bus->varsz); + bus->vars = NULL; + } + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static void +dhdsdio_disconnect(void *ptr) +{ + dhd_bus_t *bus = (dhd_bus_t *)ptr; + + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + + + if (bus) { + ASSERT(bus->dhd); + dhdsdio_release(bus, bus->dhd->osh); + } + + + + DHD_TRACE(("%s: Disconnected\n", __FUNCTION__)); +} + +static int +dhdsdio_suspend(void *context) +{ + int ret = 0; + + dhd_bus_t *bus = (dhd_bus_t*)context; +#ifdef SUPPORT_P2P_GO_PS + int wait_time = 0; + + if (bus->idletime > 0) { + wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms); + } +#endif /* SUPPORT_P2P_GO_PS */ + ret = dhd_os_check_wakelock(bus->dhd); +#ifdef SUPPORT_P2P_GO_PS + if ((!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) { + if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) { + if (!bus->sleeping) { + return 1; + } + } + } +#endif /* SUPPORT_P2P_GO_PS */ + return ret; +} + +static int +dhdsdio_resume(void *context) +{ +#if defined(OOB_INTR_ONLY) + dhd_bus_t *bus = (dhd_bus_t*)context; + + if (dhd_os_check_if_up(bus->dhd)) + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif + return 0; +} + + +/* Register/Unregister functions are called by the main DHD entry + * point (e.g. module insertion) to link with the bus driver, in + * order to look for or await the device. + */ + +static bcmsdh_driver_t dhd_sdio = { + dhdsdio_probe, + dhdsdio_disconnect, + dhdsdio_suspend, + dhdsdio_resume +}; + +int +dhd_bus_register(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + return bcmsdh_register(&dhd_sdio); +} + +void +dhd_bus_unregister(void) +{ + DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + + bcmsdh_unregister(); +} + +#if defined(BCMLXSDMMC) +/* Register a dummy SDIO client driver in order to be notified of new SDIO device */ +int dhd_bus_reg_sdio_notify(void* semaphore) +{ + return bcmsdh_reg_sdio_notify(semaphore); +} + +void dhd_bus_unreg_sdio_notify(void) +{ + bcmsdh_unreg_sdio_notify(); +} +#endif /* defined(BCMLXSDMMC) */ + +#ifdef BCMEMBEDIMAGE +static int +dhdsdio_download_code_array(struct dhd_bus *bus) +{ + int bcmerror = -1; + int offset = 0; + unsigned char *ularray = NULL; + + DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__)); + + /* Download image */ + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)dlarray)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, TRUE, offset, + (uint8 *) (dlarray + offset), sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + +#ifdef DHD_DEBUG + /* Upload and compare the downloaded code */ + { + ularray = MALLOC(bus->dhd->osh, bus->ramsize); + /* Upload image to verify downloaded contents. */ + offset = 0; + memset(ularray, 0xaa, bus->ramsize); + while ((offset + MEMBLOCK) < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + + if (offset < sizeof(dlarray)) { + bcmerror = dhdsdio_membytes(bus, FALSE, offset, + ularray + offset, sizeof(dlarray) - offset); + if (bcmerror) { + DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset)); + goto err; + } + } + + if (memcmp(dlarray, ularray, sizeof(dlarray))) { + DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + goto err; + } else + DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n", + __FUNCTION__, dlimagename, dlimagever, dlimagedate)); + + } +#endif /* DHD_DEBUG */ + +err: + if (ularray) + MFREE(bus->dhd->osh, ularray, bus->ramsize); + return bcmerror; +} +#endif /* BCMEMBEDIMAGE */ + +static int +dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path) +{ + int bcmerror = -1; + int offset = 0; + int len; + void *image = NULL; + uint8 *memblock = NULL, *memptr; + + DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path)); + + image = dhd_os_open_image(pfw_path); + if (image == NULL) + goto err; + + memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK)); + goto err; + } + if ((uint32)(uintptr)memblock % DHD_SDALIGN) + memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN)); + + /* Download image */ + while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) { + if (len < 0) { + DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len)); + bcmerror = BCME_ERROR; + goto err; + } + /* check if CR4 */ + if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) { + /* if address is 0, store the reset instruction to be written in 0 */ + + if (offset == 0) { + bus->resetinstr = *(((uint32*)memptr)); + /* Add start of RAM address to the address given by user */ + offset += bus->dongle_ram_base; + } + } + + bcmerror = dhdsdio_membytes(bus, TRUE, offset, memptr, len); + if (bcmerror) { + DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n", + __FUNCTION__, bcmerror, MEMBLOCK, offset)); + goto err; + } + + offset += MEMBLOCK; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +static int +dhdsdio_download_nvram(struct dhd_bus *bus) +{ + int bcmerror = -1; + uint len; + void * image = NULL; + char * memblock = NULL; + char *bufp; + char *pnv_path; + bool nvram_file_exists; + + pnv_path = bus->nv_path; + + nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0')); + + /* For Get nvram from UEFI */ + if (nvram_file_exists) + image = dhd_os_open_image(pnv_path); + + memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE); + if (memblock == NULL) { + DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", + __FUNCTION__, MAX_NVRAMBUF_SIZE)); + goto err; + } + + /* For Get nvram from image or UEFI (when image == NULL ) */ + len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image); + + if (len > 0 && len < MAX_NVRAMBUF_SIZE) { + bufp = (char *)memblock; + bufp[len] = 0; + len = process_nvram_vars(bufp, len); + if (len % 4) { + len += 4 - (len % 4); + } + bufp += len; + *bufp++ = 0; + if (len) + bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1); + if (bcmerror) { + DHD_ERROR(("%s: error downloading vars: %d\n", + __FUNCTION__, bcmerror)); + } + } + else { + DHD_ERROR(("%s: error reading nvram file: %d\n", + __FUNCTION__, len)); + bcmerror = BCME_SDIO_ERROR; + } + +err: + if (memblock) + MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE); + + if (image) + dhd_os_close_image(image); + + return bcmerror; +} + +static int +_dhdsdio_download_firmware(struct dhd_bus *bus) +{ + int bcmerror = -1; + + bool embed = FALSE; /* download embedded firmware */ + bool dlok = FALSE; /* download firmware succeeded */ + + /* Out immediately if no image to download */ + if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) { +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + return 0; +#endif + } + + /* Keep arm in reset */ + if (dhdsdio_download_state(bus, TRUE)) { + DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__)); + goto err; + } + + /* External image takes precedence if specified */ + if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) { + if (dhdsdio_download_code_file(bus, bus->fw_path)) { + DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__)); +#ifdef BCMEMBEDIMAGE + embed = TRUE; +#else + goto err; +#endif + } + else { + embed = FALSE; + dlok = TRUE; + } + } + +#ifdef BCMEMBEDIMAGE + if (embed) { + if (dhdsdio_download_code_array(bus)) { + DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__)); + goto err; + } + else { + dlok = TRUE; + } + } +#else + BCM_REFERENCE(embed); +#endif + if (!dlok) { + DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__)); + goto err; + } + + /* External nvram takes precedence if specified */ + if (dhdsdio_download_nvram(bus)) { + DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__)); + goto err; + } + + /* Take arm out of reset */ + if (dhdsdio_download_state(bus, FALSE)) { + DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__)); + goto err; + } + + bcmerror = 0; + +err: + return bcmerror; +} + +static int +dhd_bcmsdh_recv_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle) +{ + int status; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + status = bcmsdh_recv_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle); + + return status; +} + +static int +dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes, + void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry) +{ + int ret; + int i = 0; + int retries = 0; + bcmsdh_info_t *sdh; + + if (!KSO_ENAB(bus)) { + DHD_ERROR(("%s: Device asleep\n", __FUNCTION__)); + return BCME_NODEVICE; + } + + sdh = bus->sdh; + do { + ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, + pkt, complete, handle); + + bus->f2txdata++; + ASSERT(ret != BCME_PENDING); + + if (ret == BCME_NODEVICE) { + DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__)); + } else if (ret < 0) { + /* On failure, abort the command and terminate the frame */ + DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n", + __FUNCTION__, ret)); + bus->tx_sderrs++; + bus->f1regdata++; + bus->dhd->tx_errors++; + bcmsdh_abort(sdh, SDIO_FUNC_2); + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, + SFC_WF_TERM, NULL); + for (i = 0; i < READ_FRM_CNT_RETRIES; i++) { + uint8 hi, lo; + hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI, + NULL); + lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO, + NULL); + bus->f1regdata += 2; + if ((hi == 0) && (lo == 0)) + break; + } + } + } while ((ret < 0) && retrydata && ++retries < max_retry); + + return ret; +} + +uint8 +dhd_bus_is_ioready(struct dhd_bus *bus) +{ + uint8 enable; + bcmsdh_info_t *sdh; + ASSERT(bus); + ASSERT(bus->sih != NULL); + enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2); + sdh = bus->sdh; + return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL)); +} + +uint +dhd_bus_chip(struct dhd_bus *bus) +{ + ASSERT(bus->sih != NULL); + return bus->sih->chip; +} + +uint +dhd_bus_chiprev(struct dhd_bus *bus) +{ + ASSERT(bus); + ASSERT(bus->sih != NULL); + return bus->sih->chiprev; +} + +void * +dhd_bus_pub(struct dhd_bus *bus) +{ + return bus->dhd; +} + +void * +dhd_bus_sih(struct dhd_bus *bus) +{ + return (void *)bus->sih; +} + +void * +dhd_bus_txq(struct dhd_bus *bus) +{ + return &bus->txq; +} + +uint +dhd_bus_hdrlen(struct dhd_bus *bus) +{ + return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN; +} + +void +dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val) +{ + bus->dotxinrx = val; +} + +int +dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) +{ + int bcmerror = 0; + dhd_bus_t *bus; + + bus = dhdp->bus; + + if (flag == TRUE) { + if (!bus->dhd->dongle_reset) { + dhd_os_sdlock(dhdp); + dhd_os_wd_timer(dhdp, 0); +#if !defined(IGNORE_ETH0_DOWN) + /* Force flow control as protection when stop come before ifconfig_down */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON); +#endif /* !defined(IGNORE_ETH0_DOWN) */ + /* Expect app to have torn down any connection before calling */ + /* Stop the bus, disable F2 */ + dhd_bus_stop(bus, FALSE); + +#if defined(OOB_INTR_ONLY) + /* Clean up any pending IRQ */ + dhd_enable_oob_intr(bus, FALSE); + bcmsdh_oob_intr_set(bus->sdh, FALSE); + bcmsdh_oob_intr_unregister(bus->sdh); +#endif + + /* Clean tx/rx buffer pointers, detach from the dongle */ + dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE); + + bus->dhd->dongle_reset = TRUE; + bus->dhd->up = FALSE; + dhd_txglom_enable(dhdp, FALSE); + dhd_os_sdunlock(dhdp); + + DHD_TRACE(("%s: WLAN OFF DONE\n", __FUNCTION__)); + /* App can now remove power from device */ + } else + bcmerror = BCME_SDIO_ERROR; + } else { + /* App must have restored power to device before calling */ + + DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __FUNCTION__)); + + if (bus->dhd->dongle_reset) { + /* Turn on WLAN */ + dhd_os_sdlock(dhdp); + /* Reset SD client */ + bcmsdh_reset(bus->sdh); + + /* Attempt to re-attach & download */ + if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh, + (uint32 *)SI_ENUM_BASE, + bus->cl_devid)) { + /* Attempt to download binary to the dongle */ + if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) && + dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) { + + /* Re-init bus, enable F2 transfer */ + bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE); + if (bcmerror == BCME_OK) { +#if defined(OOB_INTR_ONLY) + dhd_enable_oob_intr(bus, TRUE); + bcmsdh_oob_intr_register(bus->sdh, + dhdsdio_isr, bus); + bcmsdh_oob_intr_set(bus->sdh, TRUE); +#endif + + bus->dhd->dongle_reset = FALSE; + bus->dhd->up = TRUE; + +#if !defined(IGNORE_ETH0_DOWN) + /* Restore flow control */ + dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF); +#endif + dhd_os_wd_timer(dhdp, dhd_watchdog_ms); + + DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__)); + } else { + dhd_bus_stop(bus, FALSE); + dhdsdio_release_dongle(bus, bus->dhd->osh, + TRUE, FALSE); + } + } else { + DHD_ERROR(("%s Failed to download binary to the dongle\n", + __FUNCTION__)); + if (bus->sih != NULL) { + si_detach(bus->sih); + bus->sih = NULL; + } + bcmerror = BCME_SDIO_ERROR; + } + } else + bcmerror = BCME_SDIO_ERROR; + + dhd_os_sdunlock(dhdp); + } else { + bcmerror = BCME_SDIO_ERROR; + DHD_INFO(("%s called when dongle is not in reset\n", + __FUNCTION__)); + DHD_INFO(("Will call dhd_bus_start instead\n")); + dhd_bus_resume(dhdp, 1); + if ((bcmerror = dhd_bus_start(dhdp)) != 0) + DHD_ERROR(("%s: dhd_bus_start fail with %d\n", + __FUNCTION__, bcmerror)); + } + } + return bcmerror; +} + +int dhd_bus_suspend(dhd_pub_t *dhdpub) +{ + return bcmsdh_stop(dhdpub->bus->sdh); +} + +int dhd_bus_resume(dhd_pub_t *dhdpub, int stage) +{ + return bcmsdh_start(dhdpub->bus->sdh, stage); +} + +/* Get Chip ID version */ +uint dhd_bus_chip_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chip; +} + +/* Get Chip Rev ID version */ +uint dhd_bus_chiprev_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chiprev; +} + +/* Get Chip Pkg ID version */ +uint dhd_bus_chippkg_id(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + + return bus->sih->chippkg; +} + +int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num) +{ + *bus_type = bus->bus; + *bus_num = bus->bus_num; + *slot_num = bus->slot_num; + return 0; +} + +int +dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size) +{ + dhd_bus_t *bus; + + bus = dhdp->bus; + return dhdsdio_membytes(bus, set, address, data, size); +} + + +void +dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path) +{ + bus->fw_path = pfw_path; + bus->nv_path = pnv_path; +} + +int +dhd_enableOOB(dhd_pub_t *dhd, bool sleep) +{ + dhd_bus_t *bus = dhd->bus; + sdpcmd_regs_t *regs = bus->regs; + uint retries = 0; + + if (sleep) { + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + /* Tell device to start using OOB wakeup */ + W_SDREG(SMB_USE_OOB, ®s->tosbmailbox, retries); + if (retries > retry_limit) { + DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n")); + return BCME_BUSY; + } + /* Turn off our contribution to the HT clock request */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } else { + /* Make sure the controller has the bus up */ + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + /* Send misc interrupt to indicate OOB not needed */ + W_SDREG(0, ®s->tosbmailboxdata, retries); + if (retries <= retry_limit) + W_SDREG(SMB_DEV_INT, ®s->tosbmailbox, retries); + + if (retries > retry_limit) + DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n")); + + /* Make sure we have SD bus access */ + dhdsdio_clkctl(bus, CLK_SDONLY, FALSE); + } + return BCME_OK; +} + +void +dhd_bus_pktq_flush(dhd_pub_t *dhdp) +{ + dhd_bus_t *bus = dhdp->bus; + bool wlfc_enabled = FALSE; + +#ifdef PROP_TXSTATUS + wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED); +#endif + if (!wlfc_enabled) { +#ifdef DHDTCPACK_SUPPRESS + /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt, + * when there is a newly coming packet from network stack. + */ + dhd_tcpack_info_tbl_clean(bus->dhd); +#endif /* DHDTCPACK_SUPPRESS */ + /* Clear the data packet queues */ + pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0); + } +} + +#ifdef BCMSDIO +int +dhd_sr_config(dhd_pub_t *dhd, bool on) +{ + dhd_bus_t *bus = dhd->bus; + + if (!bus->_srenab) + return -1; + + return dhdsdio_clk_devsleep_iovar(bus, on); +} + +uint16 +dhd_get_chipid(dhd_pub_t *dhd) +{ + dhd_bus_t *bus = dhd->bus; + + if (bus && bus->sih) + return (uint16)bus->sih->chip; + else + return 0; +} +#endif /* BCMSDIO */ + +#ifdef DEBUGGER +uint32 dhd_sdio_reg_read(void *h, uint32 addr) +{ + uint32 rval; + struct dhd_bus *bus = (struct dhd_bus *) h; + + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + rval = bcmsdh_reg_read(bus->sdh, addr, 4); + + dhd_os_sdunlock(bus->dhd); + + return rval; +} + +void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val) +{ + struct dhd_bus *bus = (struct dhd_bus *) h; + + dhd_os_sdlock(bus->dhd); + + BUS_WAKE(bus); + + dhdsdio_clkctl(bus, CLK_AVAIL, FALSE); + + bcmsdh_reg_write(bus->sdh, addr, 4, val); + + dhd_os_sdunlock(bus->dhd); +} +#endif /* DEBUGGER */ diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.c b/drivers/net/wireless/bcmdhd/dhd_wlfc.c new file mode 100644 index 000000000000..0e74318d173a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.c @@ -0,0 +1,4507 @@ +/* + * DHD PROP_TXSTATUS Module. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.c 579277 2015-08-14 04:49:50Z $ + * + */ + + +#include +#include + +#include +#include + +#include +#include + +#include + +#include + +#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */ +#include +#include +#endif + +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + + +/* + * wlfc naming and lock rules: + * + * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation. + * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed. + * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation. + * + */ + +#if defined(DHD_WLFC_THREAD) +#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */ +#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */ +#endif /* defined (DHD_WLFC_THREAD) */ + + +#ifdef PROP_TXSTATUS + +#define DHD_WLFC_QMON_COMPLETE(entry) + +#define LIMIT_BORROW + + +/** reordering related */ + +#if defined(DHD_WLFC_THREAD) +static void +_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp) +{ + dhdp->wlfc_thread_go = TRUE; + wake_up_interruptible(&dhdp->wlfc_wqhead); +} +#endif /* DHD_WLFC_THREAD */ + +static uint16 +_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq) +{ + uint16 seq; + + if (!p) { + return 0xffff; + } + + seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + if (seq < current_seq) { + /* wrap around */ + seq += 256; + } + + return seq; +} + +/** + * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder + * suppressed packets. + * @param[in] pq caller supplied packet queue to enqueue the packet on + * @param[in] prec precedence of the to-be-queued packet + * @param[in] p transmit packet to enqueue + * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order. + * @param[in] current_seq + * @param[in] reOrder reOrder on odd precedence (=suppress queue) + */ +static void +_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead, + uint8 current_seq, bool reOrder) +{ + struct pktq_prec *q; + uint16 seq, seq2; + void *p2, *p2_prev; + + if (!p) + return; + + ASSERT(prec >= 0 && prec < pq->num_prec); + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + PKTSETLINK(p, NULL); + if (q->head == NULL) { + /* empty queue */ + q->head = p; + q->tail = p; + } else { + if (reOrder && (prec & 1)) { + seq = _dhd_wlfc_adjusted_seq(p, current_seq); + p2 = qHead ? q->head : q->tail; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) { + /* need reorder */ + p2 = q->head; + p2_prev = NULL; + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + + while (seq > seq2) { + p2_prev = p2; + p2 = PKTLINK(p2); + if (!p2) { + break; + } + seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq); + } + + if (p2_prev == NULL) { + /* insert head */ + PKTSETLINK(p, q->head); + q->head = p; + } else if (p2 == NULL) { + /* insert tail */ + PKTSETLINK(p2_prev, p); + q->tail = p; + } else { + /* insert after p2_prev */ + PKTSETLINK(p, PKTLINK(p2_prev)); + PKTSETLINK(p2_prev, p); + } + goto exit; + } + } + + if (qHead) { + PKTSETLINK(p, q->head); + q->head = p; + } else { + PKTSETLINK(q->tail, p); + q->tail = p; + } + } + +exit: + + q->len++; + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; +} /* _dhd_wlfc_prec_enque */ + +/** + * Create a place to store all packet pointers submitted to the firmware until a status comes back, + * suppress or otherwise. + * + * hang-er: noun, a contrivance on which things are hung, as a hook. + */ +/** @deprecated soon */ +static void* +_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items) +{ + int i; + wlfc_hanger_t* hanger; + + /* allow only up to a specific size for now */ + ASSERT(max_items == WLFC_HANGER_MAXITEMS); + + if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER, + WLFC_HANGER_SIZE(max_items))) == NULL) { + return NULL; + } + memset(hanger, 0, WLFC_HANGER_SIZE(max_items)); + hanger->max_items = max_items; + + for (i = 0; i < hanger->max_items; i++) { + hanger->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + } + return hanger; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger) +{ + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items)); + return BCME_OK; + } + return BCME_BADARG; +} + +/** @deprecated soon */ +static uint16 +_dhd_wlfc_hanger_get_free_slot(void* hanger) +{ + uint32 i; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h) { + i = h->slot_pos + 1; + if (i == h->max_items) { + i = 0; + } + while (i != h->slot_pos) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->slot_pos = i; + return (uint16)i; + } + i++; + if (i == h->max_items) + i = 0; + } + h->failed_slotfind++; + } + return WLFC_HANGER_MAXITEMS; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *gen = 0xff; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *gen = h->items[slot_id].gen; + } + else { + DHD_ERROR(("Error: %s():%d item not used\n", + __FUNCTION__, __LINE__)); + rc = BCME_NOTFOUND; + } + + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + if (h && (slot_id < WLFC_HANGER_MAXITEMS)) { + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE; + h->items[slot_id].pkt = pkt; + h->items[slot_id].pkt_state = 0; + h->items[slot_id].pkt_txstatus = 0; + h->pushed++; + } else { + h->failed_to_push++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + *pktout = NULL; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + + if (h) { + if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) { + *pktout = h->items[slot_id].pkt; + if (remove_from_hanger) { + h->items[slot_id].state = + WLFC_HANGER_ITEM_STATE_FREE; + h->items[slot_id].pkt = NULL; + h->items[slot_id].gen = 0xff; + h->items[slot_id].identifier = 0; + h->popped++; + } + } else { + h->failed_to_pop++; + rc = BCME_NOTFOUND; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** @deprecated soon */ +static int +_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen) +{ + int rc = BCME_OK; + wlfc_hanger_t* h = (wlfc_hanger_t*)hanger; + + /* this packet was not pushed at the time it went to the firmware */ + if (slot_id == WLFC_HANGER_MAXITEMS) + return BCME_NOTFOUND; + if (h) { + h->items[slot_id].gen = gen; + if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) { + h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED; + } else { + rc = BCME_BADARG; + } + } else { + rc = BCME_BADARG; + } + + return rc; +} + +/** remove reference of specific packet in hanger */ +/** @deprecated soon */ +static bool +_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt) +{ + int i; + + if (!h || !pkt) { + return FALSE; + } + + i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt))); + + if ((i < h->max_items) && (pkt == h->items[i].pkt)) { + if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE; + h->items[i].pkt = NULL; + h->items[i].gen = 0xff; + h->items[i].identifier = 0; + return TRUE; + } else { + DHD_ERROR(("Error: %s():%d item not suppressed\n", + __FUNCTION__, __LINE__)); + } + } + + return FALSE; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p) +{ + wlfc_mac_descriptor_t* entry; + uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + + if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[entry_idx]; + else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pktq_penq(&entry->afq, prec, p); + + return BCME_OK; +} + +/** afq = At Firmware Queue, queue containing packets pending in the dongle */ +static int +_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec, + void **pktout) +{ + wlfc_mac_descriptor_t *entry; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!ctx) { + DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout)); + return BCME_BADARG; + } + + if (pktout) { + *pktout = NULL; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->afq; + + ASSERT(prec < pq->num_prec); + + q = &pq->q[prec]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) + { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + bcm_pkt_validate_chk(p); + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt, + WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head))))); + ctx->stats.ooo_pkts[prec]++; + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->len--; + pq->len--; + + PKTSETLINK(p, NULL); + + if (pktout) { + *pktout = p; + } + + return BCME_OK; +} /* _dhd_wlfc_deque_afq */ + +/** + * Flow control information piggy backs on packets, in the form of one or more TLVs. This function + * pushes one or more TLVs onto a packet that is going to be sent towards the dongle. + * + * @param[in] ctx + * @param[in/out] packet + * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid + * @param[in] tim_bmp + * @param[in] mac_handle + * @param[in] htodtag + * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon + * earlier between host and firmware. + * @param[in] skip_wlfc_hdr + */ +static int +_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal, + uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr) +{ + uint32 wl_pktinfo = 0; + uint8* wlh; + uint8 dataOffset = 0; + uint8 fillers; + uint8 tim_signal_len = 0; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + struct bdc_header *h; + void *p = *packet; + + if (skip_wlfc_hdr) + goto push_bdc_hdr; + + if (tim_signal) { + tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + } + + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len; + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + dataOffset += WLFC_CTL_VALUE_LEN_SEQ; + } + + fillers = ROUNDUP(dataOffset, 4) - dataOffset; + dataOffset += fillers; + + PKTPUSH(ctx->osh, p, dataOffset); + wlh = (uint8*) PKTDATA(ctx->osh, p); + + wl_pktinfo = htol32(htodtag); + + wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG; + wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG; + memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32)); + + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + uint16 wl_seqinfo = htol16(htodseq); + wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ; + memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo, + WLFC_CTL_VALUE_LEN_SEQ); + } + + if (tim_signal_len) { + wlh[dataOffset - fillers - tim_signal_len ] = + WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 1] = + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP; + wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle; + wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp; + } + if (fillers) + memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers); + +push_bdc_hdr: + PKTPUSH(ctx->osh, p, BDC_HEADER_LEN); + h = (struct bdc_header *)PKTDATA(ctx->osh, p); + h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT); + if (PKTSUMNEEDED(p)) + h->flags |= BDC_FLAG_SUM_NEEDED; + + + h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK); + h->flags2 = 0; + h->dataOffset = dataOffset >> 2; + BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p))); + *packet = p; + return BCME_OK; +} /* _dhd_wlfc_pushheader */ + +/** + * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg + * when a packet is about to be freed. + */ +static int +_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf) +{ + struct bdc_header *h; + + if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN)); + return BCME_ERROR; + } + h = (struct bdc_header *)PKTDATA(ctx->osh, pktbuf); + + /* pull BDC header */ + PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN); + + if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) { + DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__, + PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2))); + return BCME_ERROR; + } + + /* pull wl-header */ + PKTPULL(ctx->osh, pktbuf, (h->dataOffset << 2)); + return BCME_OK; +} + +/** + * @param[in/out] p packet + */ +static wlfc_mac_descriptor_t* +_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p) +{ + int i; + wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes; + uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p)); + uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p)); + wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p)); + int iftype = ctx->destination_entries.interfaces[ifid].iftype; + + /* saved one exists, return it */ + if (entry) + return entry; + + /* Multicast destination, STA and P2P clients get the interface entry. + * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations + * have their own entry. + */ + if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) || + iftype == WLC_E_IF_ROLE_P2P_CLIENT) && + (ctx->destination_entries.interfaces[ifid].occupied)) { + entry = &ctx->destination_entries.interfaces[ifid]; + } + + if (entry && ETHER_ISMULTI(dstn)) { + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + return entry; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (table[i].occupied) { + if (table[i].interface_id == ifid) { + if (!memcmp(table[i].ea, dstn, ETHER_ADDR_LEN)) { + entry = &table[i]; + break; + } + } + } + } + + if (entry == NULL) + entry = &ctx->destination_entries.other; + + DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry); + + return entry; +} /* _dhd_wlfc_find_table_entry */ + +/** + * In case a packet must be dropped (because eg the queues are full), various tallies have to be + * be updated. Called from several other functions. + * @param[in] dhdp pointer to public DHD structure + * @param[in] prec precedence of the packet + * @param[in] p the packet to be dropped + * @param[in] bPktInQ TRUE if packet is part of a queue + */ +static int +_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ) +{ + athost_wl_status_info_t* ctx; + void *pout = NULL; + + ASSERT(dhdp && p); + ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT); + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + /* suppressed queue, need pop from hanger */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG + (PKTTAG(p))), &pout, TRUE); + ASSERT(p == pout); + } + + if (!(prec & 1)) { +#ifdef DHDTCPACK_SUPPRESS + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, 0, 0, TRUE); + + /* This packet is about to be freed, so remove it from tcp_ack_info_tbl + * This must be one of... + * 1. A pkt already in delayQ is evicted by another pkt with higher precedence + * in _dhd_wlfc_prec_enq_with_drop() + * 2. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_enque_delayq(). + * 3. A pkt could not be enqueued to delayQ because it is full, + * in _dhd_wlfc_rollback_packet_toq(). + */ + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + + if (bPktInQ) { + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + } + + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--; + ctx->stats.pktout++; + ctx->stats.drop_pkts[prec]++; + + dhd_txcomplete(dhdp, p, FALSE); + PKTFREE(ctx->osh, p, TRUE); + + return 0; +} /* _dhd_wlfc_prec_drop */ + +/** + * Called when eg the host handed a new packet over to the driver, or when the dongle reported + * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit + * packet in the host driver to be (re)transmitted at a later opportunity. + * @param[in] dhdp pointer to public DHD structure + * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence + */ +static bool +_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead, + uint8 current_seq) +{ + void *p = NULL; + int eprec = -1; /* precedence to evict from */ + athost_wl_status_info_t* ctx; + + ASSERT(dhdp && pq && pkt); + ASSERT(prec >= 0 && prec < pq->num_prec); + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(pq, prec) && !pktq_full(pq)) { + goto exit; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(pq, prec)) { + eprec = prec; + } else if (pktq_full(pq)) { + p = pktq_peek_tail(pq, &eprec); + if (!p) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + if ((eprec > prec) || (eprec < 0)) { + if (!pktq_pempty(pq, prec)) { + eprec = prec; + } else { + return FALSE; + } + } + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + ASSERT(!pktq_pempty(pq, eprec)); + /* Evict all fragmented frames */ + dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop); + } + +exit: + /* Enqueue */ + _dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq, + WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)); + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++; + ctx->pkt_cnt_per_ac[prec>>1]++; + ctx->pkt_cnt_in_psq++; + + return TRUE; +} /* _dhd_wlfc_prec_enq_with_drop */ + +/** + * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in + * the event that this 'commit' failed. + */ +static int +_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx, + void* p, ewlfc_packet_state_t pkt_type, uint32 hslot) +{ + /* + * put the packet back to the head of queue + * - suppressed packet goes back to suppress sub-queue + * - pull out the header, if new or delayed packet + * + * Note: hslot is used only when header removal is done. + */ + wlfc_mac_descriptor_t* entry; + int rc = BCME_OK; + int prec, fifo_id; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + prec = DHD_PKTTAG_FIFO(PKTTAG(p)); + fifo_id = prec << 1; + if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) + fifo_id += 1; + if (entry != NULL) { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the firmware (for pspoll etc.) + */ + if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) + entry->requested_credit++; + + if (pkt_type == eWLFC_PKTTYPE_DELAYED) { + /* decrement sequence count */ + WLFC_DECR_SEQCOUNT(entry, prec); + /* remove header first */ + rc = _dhd_wlfc_pullheader(ctx, p); + if (rc != BCME_OK) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + goto exit; + } + } + + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE, + WLFC_SEQCOUNT(entry, fifo_id>>1)) + == FALSE) { + /* enque failed */ + DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n", + __FUNCTION__, __LINE__, fifo_id)); + rc = BCME_ERROR; + } + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + rc = BCME_ERROR; + } + +exit: + if (rc != BCME_OK) { + ctx->stats.rollback_failed++; + _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE); + } else { + ctx->stats.rollback++; + } + + return rc; +} /* _dhd_wlfc_rollback_packet_toq */ + +/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */ +static bool +_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid) +{ + int prec, ac_traffic = WLFC_NO_TRAFFIC; + + for (prec = 0; prec < AC_COUNT; prec++) { + if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) { + if (ac_traffic == WLFC_NO_TRAFFIC) + ac_traffic = prec + 1; + else if (ac_traffic != (prec + 1)) + ac_traffic = WLFC_MULTI_TRAFFIC; + } + } + + if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) { + /* single AC (BE/BK/VI/VO) in queue */ + if (ctx->allow_fc) { + return TRUE; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (ctx->fc_defer_timestamp == 0) { + /* first single ac scenario */ + ctx->fc_defer_timestamp = curr_t; + return FALSE; + } + + /* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */ + delta = curr_t - ctx->fc_defer_timestamp; + if (delta >= WLFC_FC_DEFER_PERIOD_MS) { + ctx->allow_fc = TRUE; + } + } + } else { + /* multiple ACs or BCMC in queue */ + ctx->allow_fc = FALSE; + ctx->fc_defer_timestamp = 0; + } + + return ctx->allow_fc; +} /* _dhd_wlfc_allow_fc */ + +/** + * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on + * low/high watermarks. + */ +static void +_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id) +{ + dhd_pub_t *dhdp; + + ASSERT(ctx); + + dhdp = (dhd_pub_t *)ctx->dhdp; + ASSERT(dhdp); + + if (dhdp->skip_fc && dhdp->skip_fc()) + return; + + if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id)) + return; + + if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) { + /* start traffic */ + ctx->hostif_flow_state[if_id] = OFF; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n", + pq->len, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("F")); + + dhd_txflowcontrol(dhdp, if_id, OFF); + + ctx->toggle_host_if = 0; + } + + if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) { + /* stop traffic */ + ctx->hostif_flow_state[if_id] = ON; + /* + WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n", + pq->len, if_id, __FUNCTION__)); + */ + WLFC_DBGMESG(("N")); + + dhd_txflowcontrol(dhdp, if_id, ON); + + ctx->host_ifidx = if_id; + ctx->toggle_host_if = 1; + } + + return; +} /* _dhd_wlfc_flow_control_check */ + +static int +_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 ta_bmp) +{ + int rc = BCME_OK; + void* p = NULL; + int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + if (dhdp->proptxstatus_txoff) { + rc = BCME_NORESOURCE; + return rc; + } + + /* allocate a dummy packet */ + p = PKTGET(ctx->osh, dummylen, TRUE); + if (p) { + PKTPULL(ctx->osh, p, dummylen); + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0); + _dhd_wlfc_pushheader(ctx, &p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE); + DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1); +#ifdef PROP_TXSTATUS_DEBUG + ctx->stats.signal_only_pkts_sent++; +#endif + +#if defined(BCMPCIE) + rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx); +#else + rc = dhd_bus_txdata(dhdp->bus, p); +#endif + if (rc != BCME_OK) { + _dhd_wlfc_pullheader(ctx, p); + PKTFREE(ctx->osh, p, TRUE); + } + } else { + DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n", + __FUNCTION__, dummylen)); + rc = BCME_NOMEM; + dhdp->tx_pktgetfail++; + } + + return rc; +} /* _dhd_wlfc_send_signalonly_packet */ + +/** + * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration + * maintained in caller supplied parameter 'entry'. + * + * @param[in/out] entry administration about a remote MAC entity + * @param[in] prec precedence queue for this remote MAC entitity + * + * Return value: TRUE if traffic availability changed + */ +static bool +_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + int prec) +{ + bool rc = FALSE; + + if (entry->state == WLFC_STATE_CLOSE) { + if ((pktq_plen(&entry->psq, (prec << 1)) == 0) && + (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) { + /* no packets in both 'normal' and 'suspended' queues */ + if (entry->traffic_pending_bmp & NBITVAL(prec)) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp & ~ NBITVAL(prec); + } + } else { + /* packets are queued in host for transmission to dongle */ + if (!(entry->traffic_pending_bmp & NBITVAL(prec))) { + rc = TRUE; + entry->traffic_pending_bmp = + entry->traffic_pending_bmp | NBITVAL(prec); + } + } + } + + if (rc) { + /* request a TIM update to firmware at the next piggyback opportunity */ + if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) { + entry->send_tim_signal = 1; + _dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp); + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + entry->send_tim_signal = 0; + } else { + rc = FALSE; + } + } + + return rc; +} /* _dhd_wlfc_traffic_pending_check */ + +/** + * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues + * the packet to transmit to firmware again at a later opportunity. + */ +static int +_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p) +{ + wlfc_mac_descriptor_t* entry; + + entry = _dhd_wlfc_find_table_entry(ctx, p); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_NOTFOUND; + } + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE, + WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + ctx->stats.delayq_full_error++; + /* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */ + WLFC_DBGMESG(("s")); + return BCME_ERROR; + } + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p))); + return BCME_OK; +} + +/** + * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer + * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet. + * + * @param[in/out] ctx driver specific flow control administration + * @param[in/out] entry The remote MAC entity for which the packet is destined. + * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet. + * @param[in] header_needed True if packet is 'new' to flow control + * @param[out] slot Handle to container in which the packet was 'parked' + */ +static int +_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot) +{ + int rc = BCME_OK; + int hslot = WLFC_HANGER_MAXITEMS; + bool send_tim_update = FALSE; + uint32 htod = 0; + uint16 htodseq = 0; + uint8 free_ctr; + int gen = 0xff; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + void * p = *packet; + + *slot = hslot; + + if (entry == NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, p); + } + + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + if (entry->send_tim_signal) { + /* sends a traffic indication bitmap to the dongle */ + send_tim_update = TRUE; + entry->send_tim_signal = 0; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + } + + if (header_needed) { + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + hslot = (uint)(entry - &ctx->destination_entries.nodes[0]); + } else { + hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger); + } + gen = entry->generation; + free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } else { + if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p)); + } + + hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + + if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) { + gen = entry->generation; + } else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + } else { + _dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen); + } + + free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))); + /* remove old header */ + _dhd_wlfc_pullheader(ctx, p); + } + + if (hslot >= WLFC_HANGER_MAXITEMS) { + DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__)); + return BCME_ERROR; + } + + WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr); + WL_TXSTATUS_SET_HSLOT(htod, hslot); + WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p))); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + WL_TXSTATUS_SET_GENERATION(htod, gen); + DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1); + + if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) { + /* + Indicate that this packet is being sent in response to an + explicit request from the firmware side. + */ + WLFC_PKTFLAG_SET_PKTREQUESTED(htod); + } else { + WLFC_PKTFLAG_CLR_PKTREQUESTED(htod); + } + + rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update, + entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE); + if (rc == BCME_OK) { + DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger); + if (header_needed) { + /* + a new header was created for this packet. + push to hanger slot and scrub q. Since bus + send succeeded, increment seq number as well. + */ + rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + if (rc == BCME_OK) { +#ifdef PROP_TXSTATUS_DEBUG + h->items[hslot].push_time = + OSL_SYSUPTIME(); +#endif + } else { + DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n", + __FUNCTION__, rc)); + } + } else { + /* clear hanger state */ + if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p) + DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n", + __FUNCTION__, p, h->items[hslot].pkt)); + ASSERT(h->items[hslot].pkt == p); + bcm_object_feature_set(h->items[hslot].pkt, + BCM_OBJECT_FEATURE_PKT_STATE, 0); + h->items[hslot].pkt_state = 0; + h->items[hslot].pkt_txstatus = 0; + h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE; + } + } else if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + /* clear hanger state */ + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_state = 0; + ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_txstatus = 0; + } + + if ((rc == BCME_OK) && header_needed) { + /* increment free running sequence count */ + WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p))); + } + } + *slot = hslot; + *packet = p; + return rc; +} /* _dhd_wlfc_pretx_pktprocess */ + +/** + * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote + * mac is in the 'open' state, otherwise '0'. + */ +static int +_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx, + wlfc_mac_descriptor_t* entry, int prec) +{ + if (entry->interface_id >= WLFC_MAX_IFNUM) { + ASSERT(&ctx->destination_entries.other == entry); + return 1; + } + + if (ctx->destination_entries.interfaces[entry->interface_id].iftype == + WLC_E_IF_ROLE_P2P_GO) { + /* - destination interface is of type p2p GO. + For a p2pGO interface, if the destination is OPEN but the interface is + CLOSEd, do not send traffic. But if the dstn is CLOSEd while there is + destination-specific-credit left send packets. This is because the + firmware storing the destination-specific-requested packet in queue. + */ + if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) { + return 0; + } + } + + /* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */ + if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) && + (entry->requested_packet == 0)) || + (!(entry->ac_bitmap & (1 << prec)))) { + return 0; + } + + return 1; +} /* _dhd_wlfc_is_destination_open */ + +/** + * Dequeues a suppressed or delayed packet from a queue + * @param[in/out] ctx Driver specific flow control administration + * @param[in] prec Precedence of queue to dequeue from + * @param[out] ac_credit_spent Boolean, returns 0 or 1 + * @param[out] needs_hdr Boolean, returns 0 or 1 + * @param[out] entry_out The remote MAC for which the packet is destined + * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones + * + * Return value: the dequeued packet + */ +static void* +_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec, + uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out, + bool only_no_credit) +{ + wlfc_mac_descriptor_t* entry; + int total_entries; + void* p = NULL; + int i; + uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1; + + *entry_out = NULL; + /* most cases a packet will count against FIFO credit */ + *ac_credit_spent = credit_spent; + + /* search all entries, include nodes as well as interfaces */ + if (only_no_credit) { + total_entries = ctx->requested_entry_count; + } else { + total_entries = ctx->active_entry_count; + } + + for (i = 0; i < total_entries; i++) { + if (only_no_credit) { + entry = ctx->requested_entry[i]; + } else { + entry = ctx->active_entry_head; + /* move head to ensure fair round-robin */ + ctx->active_entry_head = ctx->active_entry_head->next; + } + ASSERT(entry); + + if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) && + (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) && + (!entry->suppressed)) { + *ac_credit_spent = credit_spent; + if (entry->state == WLFC_STATE_CLOSE) { + *ac_credit_spent = 0; + } + + /* higher precedence will be picked up first, + * i.e. suppressed packets before delayed ones + */ + p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec)); + *needs_hdr = 0; + if (p == NULL) { + /* De-Q from delay Q */ + p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec)); + *needs_hdr = 1; + } + + if (p != NULL) { + bcm_pkt_validate_chk(p); + /* did the packet come from suppress sub-queue? */ + if (entry->requested_credit > 0) { + entry->requested_credit--; +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_sent_packets++; +#endif + } else if (entry->requested_packet > 0) { + entry->requested_packet--; + DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p)); + } + + *entry_out = entry; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + ctx->pkt_cnt_in_psq--; + _dhd_wlfc_flow_control_check(ctx, &entry->psq, + DHD_PKTTAG_IF(PKTTAG(p))); + /* + * A packet has been picked up, update traffic availability bitmap, + * if applicable. + */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + return p; + } + } + } + return NULL; +} /* _dhd_wlfc_deque_delayedq */ + +/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */ +static int +_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec) +{ + wlfc_mac_descriptor_t* entry; + + if (pktbuf != NULL) { + entry = _dhd_wlfc_find_table_entry(ctx, pktbuf); + if (entry == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_ERROR; + } + + /* + - suppressed packets go to sub_queue[2*prec + 1] AND + - delayed packets go to sub_queue[2*prec + 0] to ensure + order of delivery. + */ + if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1), + FALSE, WLFC_SEQCOUNT(entry, prec)) + == FALSE) { + WLFC_DBGMESG(("D")); + ctx->stats.delayq_full_error++; + return BCME_ERROR; + } + + + /* A packet has been pushed, update traffic availability bitmap, if applicable */ + _dhd_wlfc_traffic_pending_check(ctx, entry, prec); + } + + return BCME_OK; +} /* _dhd_wlfc_enque_delayq */ + +/** Returns TRUE if caller supplied packet is destined for caller supplied interface */ +static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid) +{ + if (!p || !p_ifid) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p)))); +} + +/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */ +static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry) +{ + if (!p || !entry) + return FALSE; + + return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p)))); +} + +static void +_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt) +{ + dhd_pub_t *dhdp; + bool credit_return = FALSE; + + if (!wlfc || !pkt) { + return; + } + + dhdp = (dhd_pub_t *)(wlfc->dhdp); + if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) && + DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + int lender, credit_returned = 0; + uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt)); + + credit_return = TRUE; + + /* Note that borrower is fifo_id */ + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; lender >= 0; lender--) { + if (wlfc->credits_borrowed[fifo_id][lender] > 0) { + wlfc->FIFO_credit[lender]++; + wlfc->credits_borrowed[fifo_id][lender]--; + credit_returned = 1; + break; + } + } + + if (!credit_returned) { + wlfc->FIFO_credit[fifo_id]++; + } + } + + BCM_REFERENCE(credit_return); +#if defined(DHD_WLFC_THREAD) + if (credit_return) { + _dhd_wlfc_thread_wakeup(dhdp); + } +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** Removes and frees a packet from the hanger. Called during eg tx complete. */ +static void +_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state, + int pkt_txstatus) +{ + wlfc_hanger_t* hanger; + wlfc_hanger_item_t* item; + + if (!wlfc) + return; + + hanger = (wlfc_hanger_t*)wlfc->hanger; + if (!hanger) + return; + + if (slot_id == WLFC_HANGER_MAXITEMS) + return; + + item = &hanger->items[slot_id]; + + if (item->pkt) { + item->pkt_state |= pkt_state; + if (pkt_txstatus != -1) + item->pkt_txstatus = (uint8)pkt_txstatus; + bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state); + if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) { + void *p = NULL; + void *pkt = item->pkt; + uint8 old_state = item->state; + int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE); + BCM_REFERENCE(ret); + BCM_REFERENCE(pkt); + ASSERT((ret == BCME_OK) && p && (pkt == p)); + if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n", + pkt, old_state, item->pkt_state); + } + ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED); + + /* free packet */ + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))] + [DHD_PKTTAG_FIFO(PKTTAG(p))]--; + wlfc->stats.pktout++; + dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus); + PKTFREE(wlfc->osh, p, TRUE); + } + } else { + /* free slot */ + if (item->state == WLFC_HANGER_ITEM_STATE_FREE) + DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n", + __FUNCTION__, __LINE__, item->pkt_state, pkt_state)); + item->state = WLFC_HANGER_ITEM_STATE_FREE; + } +} /* _dhd_wlfc_hanger_free_pkt */ + +/** Called during eg detach() */ +static void +_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq, + bool dir, f_processpkt_t fn, void *arg, q_type_t q_type) +{ + int prec; + dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp; + + ASSERT(dhdp); + + /* Optimize flush, if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->len == 0) { + return; + } + + for (prec = 0; prec < pq->num_prec; prec++) { + struct pktq_prec *q; + void *p, *prev = NULL; + + q = &pq->q[prec]; + p = q->head; + while (p) { + bcm_pkt_validate_chk(p); + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = PKTLINK(p); + else + PKTSETLINK(prev, PKTLINK(p)); + if (q_type == Q_TYPE_PSQ) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) { + _dhd_wlfc_hanger_remove_reference(ctx->hanger, p); + } + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->pkt_cnt_per_ac[prec>>1]--; + ctx->pkt_cnt_in_psq--; + ctx->stats.cleanup_psq_cnt++; + if (!(prec & 1)) { + /* pkt in delayed q, so fake push BDC header for + * dhd_tcpack_check_xmit() and dhd_txcomplete(). + */ + _dhd_wlfc_pushheader(ctx, &p, FALSE, 0, 0, + 0, 0, TRUE); +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!" + " Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhdp, + TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + } + } else if (q_type == Q_TYPE_AFQ) { + wlfc_mac_descriptor_t* entry = + _dhd_wlfc_find_table_entry(ctx, p); + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + _dhd_wlfc_return_implied_credit(ctx, p); + ctx->stats.cleanup_fw_cnt++; + } + PKTSETLINK(p, NULL); + if (dir) { + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--; + ctx->stats.pktout++; + dhd_txcomplete(dhdp, p, FALSE); + } + PKTFREE(ctx->osh, p, dir); + + q->len--; + pq->len--; + p = (head ? q->head : PKTLINK(prev)); + } else { + prev = p; + p = PKTLINK(p); + } + } + + if (q->head == NULL) { + ASSERT(q->len == 0); + q->tail = NULL; + } + + } + + if (fn == NULL) + ASSERT(pq->len == 0); +} /* _dhd_wlfc_pktq_flush */ + + +/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */ +static void* +_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + return NULL; + + bcm_pkt_validate_chk(p); + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + + return p; +} + +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + wlfc_mac_descriptor_t* entry; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { +#ifdef DHDTCPACK_SUPPRESS + if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) { + DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n", + __FUNCTION__, __LINE__)); + dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF); + } +#endif /* DHDTCPACK_SUPPRESS */ + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode) && + !_dhd_wlfc_hanger_remove_reference(h, pkt)) { + DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n", + __FUNCTION__, pkt)); + } + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + _dhd_wlfc_return_implied_credit(wlfc, pkt); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--; + wlfc->stats.pktout++; + wlfc->stats.cleanup_txq_cnt++; + dhd_txcomplete(dhd, pkt, FALSE); + PKTFREE(wlfc->osh, pkt, TRUE); + } +} /* _dhd_wlfc_cleanup_txq */ + +/** called during eg detach */ +void +_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + int i; + int total_entries; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + + wlfc->stats.cleanup_txq_cnt = 0; + wlfc->stats.cleanup_psq_cnt = 0; + wlfc->stats.cleanup_fw_cnt = 0; + + /* + * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one + */ + /* flush bus->txq */ + _dhd_wlfc_cleanup_txq(dhd, fn, arg); + + /* flush psq, search all entries, include nodes as well as interfaces */ + total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t); + table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries; + + for (i = 0; i < total_entries; i++) { + if (table[i].occupied) { + /* release packets held in PSQ (both delayed and suppressed) */ + if (table[i].psq.len) { + WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n", + __FUNCTION__, i, table[i].psq.len)); + _dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE, + fn, arg, Q_TYPE_PSQ); + } + + /* free packets held in AFQ */ + if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) { + _dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE, + fn, arg, Q_TYPE_AFQ); + } + + if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) { + table[i].occupied = 0; + if (table[i].transit_count || table[i].suppr_transit_count) { + DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n", + __FUNCTION__, i, + table[i].transit_count, + table[i].suppr_transit_count)); + } + } + } + } + + /* + . flush remained pkt in hanger queue, not in bus->txq nor psq. + . the remained pkt was successfully downloaded to dongle already. + . hanger slot state cannot be set to free until receive txstatus update. + */ + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + for (i = 0; i < h->max_items; i++) { + if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) || + (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) { + if (fn == NULL || (*fn)(h->items[i].pkt, arg)) { + h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED; + } + } + } + } + + return; +} /* _dhd_wlfc_cleanup */ + +/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */ +static int +_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea, + f_processpkt_t fn, void *arg) +{ + int rc = BCME_OK; + + + if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) { + entry->occupied = 1; + entry->state = WLFC_STATE_OPEN; + entry->requested_credit = 0; + entry->interface_id = ifid; + entry->iftype = iftype; + entry->ac_bitmap = 0xff; /* update this when handling APSD */ + + /* for an interface entry we may not care about the MAC address */ + if (ea != NULL) + memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN); + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + entry->suppressed = FALSE; + entry->transit_count = 0; + entry->suppr_transit_count = 0; + entry->onbus_pkts_count = 0; + } + + if (action == eWLFC_MAC_ENTRY_ACTION_ADD) { + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN); + + if (WLFC_GET_AFQ(dhdp->wlfc_mode)) { + pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN); + } + + if (entry->next == NULL) { + /* not linked to anywhere, add to tail */ + if (ctx->active_entry_head) { + entry->prev = ctx->active_entry_head->prev; + ctx->active_entry_head->prev->next = entry; + ctx->active_entry_head->prev = entry; + entry->next = ctx->active_entry_head; + } else { + ASSERT(ctx->active_entry_count == 0); + entry->prev = entry->next = entry; + ctx->active_entry_head = entry; + } + ctx->active_entry_count++; + } else { + DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__, + (int)(entry - &ctx->destination_entries.nodes[0]))); + } + } + } else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) { + /* When the entry is deleted, the packets that are queued in the entry must be + cleanup. The cleanup action should be before the occupied is set as 0. + */ + _dhd_wlfc_cleanup(ctx->dhdp, fn, arg); + _dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid); + + entry->occupied = 0; + entry->state = WLFC_STATE_CLOSE; + memset(&entry->ea[0], 0, ETHER_ADDR_LEN); + + if (entry->next) { + /* not floating, remove from Q */ + if (ctx->active_entry_count <= 1) { + /* last item */ + ctx->active_entry_head = NULL; + ctx->active_entry_count = 0; + } else { + entry->prev->next = entry->next; + entry->next->prev = entry->prev; + if (entry == ctx->active_entry_head) { + ctx->active_entry_head = entry->next; + } + ctx->active_entry_count--; + } + entry->next = entry->prev = NULL; + } else { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + } + } + return rc; +} /* _dhd_wlfc_mac_entry_update */ + + +#ifdef LIMIT_BORROW + +/** LIMIT_BORROW specific function */ +static int +_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac, + bool bBorrowAll) +{ + int lender_ac, borrow_limit = 0; + int rc = -1; + + if (ctx == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return -1; + } + + /* Borrow from lowest priority available AC (including BC/MC credits) */ + for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) { + if (!bBorrowAll) { + borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO; + } else { + borrow_limit = 0; + } + + if (ctx->FIFO_credit[lender_ac] > borrow_limit) { + ctx->credits_borrowed[borrower_ac][lender_ac]++; + ctx->FIFO_credit[lender_ac]--; + rc = lender_ac; + break; + } + } + + return rc; +} + +/** LIMIT_BORROW specific function */ +static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac) +{ + if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) || + (borrower_ac < 0) || (borrower_ac > AC_COUNT)) { + DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n", + __FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac)); + + return BCME_BADARG; + } + + ctx->credits_borrowed[borrower_ac][lender_ac]--; + ctx->FIFO_credit[lender_ac]++; + + return BCME_OK; +} + +#endif /* LIMIT_BORROW */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware. + * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +static int +_dhd_wlfc_interface_entry_update(void* state, + uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + wlfc_mac_descriptor_t* entry; + + if (ifid >= WLFC_MAX_IFNUM) + return BCME_BADARG; + + entry = &ctx->destination_entries.interfaces[ifid]; + + return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea, + _dhd_wlfc_ifpkt_fn, &ifid); +} + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +static int +_dhd_wlfc_BCMCCredit_support_update(void* state) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + + ctx->bcmc_credit_supported = TRUE; + return BCME_OK; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +static int +_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits) +{ + athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state; + int i; + + for (i = 0; i <= 4; i++) { + if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) { + DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n", + __FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i])); + } + } + + /* update the AC FIFO credit map */ + ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]); + ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]); + ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]); + ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]); + ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]); + + ctx->Init_FIFO_credit[0] = credits[0]; + ctx->Init_FIFO_credit[1] = credits[1]; + ctx->Init_FIFO_credit[2] = credits[2]; + ctx->Init_FIFO_credit[3] = credits[3]; + ctx->Init_FIFO_credit[4] = credits[4]; + + /* credit for ATIM FIFO is not used yet. */ + ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0; + + return BCME_OK; +} + +/** + * Called during committing of a transmit packet from the OS DHD layer to the next layer towards + * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer. + * + * @param[in/out] ctx Driver specific flow control administration + * @param[in] ac Access Category (QoS) of called supplied packet + * @param[in] commit_info Contains eg the packet to send + * @param[in] fcommit Function pointer to transmit function of next software layer + * @param[in] commit_ctx Opaque context used when calling next layer + */ +static int +_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac, + dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx) +{ + uint32 hslot; + int rc; + dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp); + + /* + if ac_fifo_credit_spent = 0 + + This packet will not count against the FIFO credit. + To ensure the txstatus corresponding to this packet + does not provide an implied credit (default behavior) + mark the packet accordingly. + + if ac_fifo_credit_spent = 1 + + This is a normal packet and it counts against the FIFO + credit count. + */ + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), commit_info->ac_fifo_credit_spent); + rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, &commit_info->p, + commit_info->needs_hdr, &hslot); + + if (rc == BCME_OK) { + rc = fcommit(commit_ctx, commit_info->p); + if (rc == BCME_OK) { + uint8 gen = WL_TXSTATUS_GET_GENERATION( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))); + ctx->stats.pkt2bus++; + if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) { + ctx->stats.send_pkts[ac]++; + WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac); + } + + if (gen != commit_info->mac_entry->generation) { + /* will be suppressed back by design */ + if (!commit_info->mac_entry->suppressed) { + commit_info->mac_entry->suppressed = TRUE; + } + commit_info->mac_entry->suppr_transit_count++; + } + commit_info->mac_entry->transit_count++; + commit_info->mac_entry->onbus_pkts_count++; + } else if (commit_info->needs_hdr) { + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + void *pout = NULL; + /* pop hanger for delayed packet */ + _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE); + ASSERT(commit_info->p == pout); + } + } + } else { + ctx->stats.generic_error++; + } + + if (rc != BCME_OK) { + /* + pretx pkt process or bus commit has failed, rollback. + - remove wl-header for a delayed packet + - save wl-header header for suppressed packets + - reset credit check flag + */ + _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot); + DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0); + } + + return rc; +} /* _dhd_wlfc_handle_packet_commit */ + +/** Returns remote MAC descriptor for caller supplied MAC address */ +static uint8 +_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea) +{ + wlfc_mac_descriptor_t* table = + ((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes; + uint8 table_index; + + if (ea != NULL) { + for (table_index = 0; table_index < WLFC_MAC_DESC_TABLE_SIZE; table_index++) { + if ((memcmp(ea, &table[table_index].ea[0], ETHER_ADDR_LEN) == 0) && + table[table_index].occupied) + return table_index; + } + } + return WLFC_MAC_DESC_ID_INVALID; +} + +/** + * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the + * status of a frame that the dongle attempted to transmit over the wireless medium. + */ +static int +dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt) +{ + athost_wl_status_info_t* ctx; + wlfc_mac_descriptor_t* entry = NULL; + struct pktq *pq; + struct pktq_prec *q; + void *p, *b; + + if (!dhd) { + DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd)); + return BCME_BADARG; + } + ctx = (athost_wl_status_info_t*)dhd->wlfc_state; + if (!ctx) { + DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx)); + return BCME_ERROR; + } + + ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1)); + + if (hslot < WLFC_MAC_DESC_TABLE_SIZE) + entry = &ctx->destination_entries.nodes[hslot]; + else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM)) + entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE]; + else + entry = &ctx->destination_entries.other; + + pq = &entry->psq; + + ASSERT(((prec << 1) + 1) < pq->num_prec); + + q = &pq->q[((prec << 1) + 1)]; + + b = NULL; + p = q->head; + + while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) { + b = p; + p = PKTLINK(p); + } + + if (p == NULL) { + /* none is matched */ + if (b) { + DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt)); + } else { + DHD_ERROR(("%s: queue is empty\n", __FUNCTION__)); + } + + return BCME_ERROR; + } + + if (!b) { + /* head packet is matched */ + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + /* middle packet is matched */ + PKTSETLINK(b, PKTLINK(p)); + if (PKTLINK(p) == NULL) { + q->tail = b; + } + } + + q->len--; + pq->len--; + ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--; + ctx->pkt_cnt_per_ac[prec]--; + + PKTSETLINK(p, NULL); + + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(ctx, p); + } else { + _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot); + } + + entry->transit_count++; + + return BCME_OK; +} + +static int +_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac) +{ + uint8 status_flag_ori, status_flag; + uint32 status; + int ret = BCME_OK; + int remove_from_hanger_ori, remove_from_hanger = 1; + void* pktbuf = NULL; + uint8 fifo_id = 0, gen = 0, count = 0, hcnt; + uint16 hslot; + wlfc_mac_descriptor_t* entry = NULL; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + uint16 seq = 0, seq_fromfw = 0, seq_num = 0; + + memcpy(&status, pkt_info, sizeof(uint32)); + status = ltoh32(status); + status_flag = WL_TXSTATUS_GET_FLAGS(status); + hcnt = WL_TXSTATUS_GET_FREERUNCTR(status); + hslot = WL_TXSTATUS_GET_HSLOT(status); + fifo_id = WL_TXSTATUS_GET_FIFO(status); + gen = WL_TXSTATUS_GET_GENERATION(status); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ); + seq = ltoh16(seq); + seq_fromfw = WL_SEQ_GET_FROMFW(seq); + seq_num = WL_SEQ_GET_NUM(seq); + } + + wlfc->stats.txstatus_in += len; + + if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) { + wlfc->stats.pkt_freed += len; + } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) { + wlfc->stats.d11_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) { + wlfc->stats.wl_suppress += len; + remove_from_hanger = 0; + } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) { + wlfc->stats.wlc_tossed_pkts += len; + } + + else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + wlfc->stats.pkt_freed += len; + } + + if (dhd->proptxstatus_txstatus_ignore) { + if (!remove_from_hanger) { + DHD_ERROR(("suppress txstatus: %d\n", status_flag)); + } + return BCME_OK; + } + + status_flag_ori = status_flag; + remove_from_hanger_ori = remove_from_hanger; + + while (count < len) { + if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) { + dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf); + } else { + status_flag = status_flag_ori; + remove_from_hanger = remove_from_hanger_ori; + ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE); + if (!pktbuf) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, -1); + goto cont; + } else { + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) { + status_flag = WLFC_CTL_PKTFLAG_DISCARD; + remove_from_hanger = 1; + } + } + } + + if ((ret != BCME_OK) || !pktbuf) { + goto cont; + } + + bcm_pkt_validate_chk(pktbuf); + + /* set fifo_id to correct value because not all FW does that */ + fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + + entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf); + + if (!remove_from_hanger) { + /* this packet was suppressed */ + if (!entry->suppressed || (entry->generation != gen)) { + if (!entry->suppressed) { + entry->suppr_transit_count = entry->transit_count; + if (p_mac) { + *p_mac = entry; + } + } else { + DHD_ERROR(("gen(%d), entry->generation(%d)\n", + gen, entry->generation)); + } + entry->suppressed = TRUE; + + } + entry->generation = gen; + } + +#ifdef PROP_TXSTATUS_DEBUG + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) + { + uint32 new_t = OSL_SYSUPTIME(); + uint32 old_t; + uint32 delta; + old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time; + + + wlfc->stats.latency_sample_count++; + if (new_t > old_t) + delta = new_t - old_t; + else + delta = 0xffffffff + new_t - old_t; + wlfc->stats.total_status_latency += delta; + wlfc->stats.latency_most_recent = delta; + + wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta; + if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32)) + wlfc->stats.idx_delta = 0; + } +#endif /* PROP_TXSTATUS_DEBUG */ + + /* pick up the implicit credit from this packet */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) { + _dhd_wlfc_return_implied_credit(wlfc, pktbuf); + } else { + /* + if this packet did not count against FIFO credit, it must have + taken a requested_credit from the destination entry (for pspoll etc.) + */ + if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) { + entry->requested_credit++; +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* DHD_WLFC_THREAD */ + } +#ifdef PROP_TXSTATUS_DEBUG + entry->dstncredit_acks++; +#endif + } + + if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) || + (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) { + /* save generation bit inside packet */ + WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen); + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw); + WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num); + } + + ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf); + if (ret != BCME_OK) { + /* delay q is full, drop this packet */ + DHD_WLFC_QMON_COMPLETE(entry); + _dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE); + } else { + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + /* Mark suppressed to avoid a double free + during wlfc cleanup + */ + _dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen); + } + } + } else { + + DHD_WLFC_QMON_COMPLETE(entry); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE); + } else { + dhd_txcomplete(dhd, pktbuf, TRUE); + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))] + [DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--; + wlfc->stats.pktout++; + /* free the packet */ + PKTFREE(wlfc->osh, pktbuf, TRUE); + } + } + /* pkt back from firmware side */ + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) { + entry->suppr_transit_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + +cont: + hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK; + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK; + } + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) { + seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK; + } + + count++; + } + + return BCME_OK; +} /* _dhd_wlfc_compressed_txstatus_update */ + +/** + * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle. + * @param[in] credits caller supplied credit that will be added to the host credit. + */ +static int +_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits) +{ + int i; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.fifo_credits_back[i] += credits[i]; +#endif + + /* update FIFO credits */ + if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT) + { + int lender; /* Note that borrower is i */ + + /* Return credits to highest priority lender first */ + for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) { + if (wlfc->credits_borrowed[i][lender] > 0) { + if (credits[i] >= wlfc->credits_borrowed[i][lender]) { + credits[i] -= + (uint8)wlfc->credits_borrowed[i][lender]; + wlfc->FIFO_credit[lender] += + wlfc->credits_borrowed[i][lender]; + wlfc->credits_borrowed[i][lender] = 0; + } else { + wlfc->credits_borrowed[i][lender] -= credits[i]; + wlfc->FIFO_credit[lender] += credits[i]; + credits[i] = 0; + } + } + } + + /* If we have more credits left over, these must belong to the AC */ + if (credits[i] > 0) { + wlfc->FIFO_credit[i] += credits[i]; + } + + if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) { + wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i]; + } + } + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} /* _dhd_wlfc_fifocreditback_indicate */ + + +/** !BCMDBUS specific function */ +static void +_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* entry; + int prec; + void *pkt = NULL, *head = NULL, *tail = NULL; + struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus); + uint8 results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ]; + uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0}; + uint32 htod = 0; + uint16 htodseq = 0; + bool bCreditUpdate = FALSE; + + dhd_os_sdlock_txq(dhd); + for (prec = 0; prec < txq->num_prec; prec++) { + while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) { + if (!head) { + head = pkt; + } + if (tail) { + PKTSETLINK(tail, pkt); + } + tail = pkt; + } + } + dhd_os_sdunlock_txq(dhd); + + while ((pkt = head)) { + head = PKTLINK(pkt); + PKTSETLINK(pkt, NULL); + + entry = _dhd_wlfc_find_table_entry(wlfc, pkt); + if (entry) { + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; + } + + /* fake a suppression txstatus */ + htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt)); + WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS); + WL_TXSTATUS_SET_GENERATION(htod, entry->generation); + htod = htol32(htod); + memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS); + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt)); + if (WL_SEQ_GET_FROMDRV(htodseq)) { + WL_SEQ_SET_FROMFW(htodseq, 1); + WL_SEQ_SET_FROMDRV(htodseq, 0); + } + htodseq = htol16(htodseq); + memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq, + WLFC_CTL_VALUE_LEN_SEQ); + } + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, pkt); + } + _dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL); + + /* fake a fifo credit back */ + if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) { + credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++; + bCreditUpdate = TRUE; + } + } + + if (bCreditUpdate) { + _dhd_wlfc_fifocreditback_indicate(dhd, credits); + } +} /* _dhd_wlfc_suppress_txq */ + +static int +_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value) +{ + uint32 timestamp; + + (void)dhd; + + bcopy(&value[2], ×tamp, sizeof(uint32)); + timestamp = ltoh32(timestamp); + DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp)); + return BCME_OK; +} + +static int +_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi) +{ + (void)dhd; + (void)rssi; + return BCME_OK; +} + +static void +_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i == wlfc->requested_entry_count) { + /* no match entry found */ + ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1)); + wlfc->requested_entry[wlfc->requested_entry_count++] = entry; + } +} + +/** called on eg receiving 'mac open' event from the dongle. */ +static void +_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry) +{ + int i; + + if (!wlfc || !entry) { + return; + } + + for (i = 0; i < wlfc->requested_entry_count; i++) { + if (entry == wlfc->requested_entry[i]) { + break; + } + } + + if (i < wlfc->requested_entry_count) { + /* found */ + ASSERT(wlfc->requested_entry_count > 0); + wlfc->requested_entry_count--; + if (i != wlfc->requested_entry_count) { + wlfc->requested_entry[i] = + wlfc->requested_entry[wlfc->requested_entry_count]; + } + wlfc->requested_entry[wlfc->requested_entry_count] = NULL; + } +} + +/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */ +static int +_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + int rc; + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 existing_index; + uint8 table_index; + uint8 ifid; + uint8* ea; + + WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n", + __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7], + ((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"), + WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0])); + + table = wlfc->destination_entries.nodes; + table_index = WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]); + ifid = value[1]; + ea = &value[2]; + + _dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]); + if (type == WLFC_CTL_TYPE_MACDESC_ADD) { + existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]); + if ((existing_index != WLFC_MAC_DESC_ID_INVALID) && + (existing_index != table_index) && table[existing_index].occupied) { + /* + there is an existing different entry, free the old one + and move it to new index if necessary. + */ + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index], + eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id, + table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn, + &table[existing_index]); + } + + if (!table[table_index].occupied) { + /* this new MAC entry does not exist, create one */ + table[table_index].mac_handle = value[0]; + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_ADD, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, NULL, NULL); + } else { + /* the space should have been empty, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + + if (type == WLFC_CTL_TYPE_MACDESC_DEL) { + if (table[table_index].occupied) { + rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index], + eWLFC_MAC_ENTRY_ACTION_DEL, ifid, + wlfc->destination_entries.interfaces[ifid].iftype, + ea, _dhd_wlfc_entrypkt_fn, &table[table_index]); + } else { + /* the space should have been occupied, but it's not */ + wlfc->stats.mac_update_failed++; + } + } + BCM_REFERENCE(rc); + return BCME_OK; +} /* _dhd_wlfc_mac_table_update */ + +/** Called on a 'mac open' or 'mac close' event indicated by the dongle */ +static int +_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */ + uint8 mac_handle = value[0]; + int i; + + table = wlfc->destination_entries.nodes; + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + if (type == WLFC_CTL_TYPE_MAC_OPEN) { + desc->state = WLFC_STATE_OPEN; + desc->ac_bitmap = 0xff; + DHD_WLFC_CTRINC_MAC_OPEN(desc); + desc->requested_credit = 0; + desc->requested_packet = 0; + _dhd_wlfc_remove_requested_entry(wlfc, desc); + } else { + desc->state = WLFC_STATE_CLOSE; + DHD_WLFC_CTRINC_MAC_CLOSE(desc); + /* Indicate to firmware if there is any traffic pending. */ + for (i = 0; i < AC_COUNT; i++) { + _dhd_wlfc_traffic_pending_check(wlfc, desc, i); + } + } + } else { + wlfc->stats.psmode_update_failed++; + } + + return BCME_OK; +} /* _dhd_wlfc_psmode_update */ + +/** called upon receiving 'interface open' or 'interface close' event from the dongle */ +static int +_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type) +{ + /* Handle PS on/off indication */ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + uint8 if_id = value[0]; + + if (if_id < WLFC_MAX_IFNUM) { + table = wlfc->destination_entries.interfaces; + if (table[if_id].occupied) { + if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) { + table[if_id].state = WLFC_STATE_OPEN; + /* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */ + } else { + table[if_id].state = WLFC_STATE_CLOSE; + /* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */ + } + return BCME_OK; + } + } + wlfc->stats.interface_update_failed++; + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */ +static int +_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 credit; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + credit = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_credit = credit; + + desc->ac_bitmap = value[2] & (~(1<stats.credit_request_failed++; + } + + return BCME_OK; +} + +/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */ +static int +_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value) +{ + athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + wlfc_mac_descriptor_t* table; + wlfc_mac_descriptor_t* desc; + uint8 mac_handle; + uint8 packet_count; + + table = wlfc->destination_entries.nodes; + mac_handle = value[1]; + packet_count = value[0]; + + desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)]; + if (desc->occupied) { + desc->requested_packet = packet_count; + + desc->ac_bitmap = value[2] & (~(1<stats.packet_request_failed++; + } + + return BCME_OK; +} + +/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */ +static void +_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len) +{ + if (info_len) { + if (info_buf) { + bcopy(val, info_buf, len); + *info_len = len; + } else { + *info_len = 0; + } + } +} + +/* + * public functions + */ + +bool dhd_wlfc_is_supported(dhd_pub_t *dhd) +{ + bool rc = TRUE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rc = FALSE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_enable(dhd_pub_t *dhd) +{ + int i, rc = BCME_OK; + athost_wl_status_info_t* wlfc; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_enabled || dhd->wlfc_state) { + rc = BCME_OK; + goto exit; + } + + /* allocate space to track txstatus propagated from firmware */ + dhd->wlfc_state = DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_INFO, + sizeof(athost_wl_status_info_t)); + if (dhd->wlfc_state == NULL) { + rc = BCME_NOMEM; + goto exit; + } + + /* initialize state space */ + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + memset(wlfc, 0, sizeof(athost_wl_status_info_t)); + + /* remember osh & dhdp */ + wlfc->osh = dhd->osh; + wlfc->dhdp = dhd; + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS); + if (wlfc->hanger == NULL) { + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + rc = BCME_NOMEM; + goto exit; + } + } + + dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT; + /* default to check rx pkt */ + dhd->wlfc_rxpkt_chk = TRUE; + if (dhd->op_mode & DHD_FLAG_IBSS_MODE) { + dhd->wlfc_rxpkt_chk = FALSE; + } + + /* initialize all interfaces to accept traffic */ + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + wlfc->hostif_flow_state[i] = OFF; + } + + _dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other, + eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL); + + wlfc->allow_credit_borrow = 0; + wlfc->single_ac = 0; + wlfc->single_ac_timestamp = 0; + + +exit: + dhd_os_wlfc_unblock(dhd); + + return rc; +} /* dhd_wlfc_enable */ + +#ifdef SUPPORT_P2P_GO_PS + +/** + * Called when the host platform enters a lower power mode, eg right before a system hibernate. + * SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_suspend(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0) + return 0; + tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +/** + * Called when the host platform resumes from a power management operation, eg resume after a + * system hibernate. SUPPORT_P2P_GO_PS specific function. + */ +int +dhd_wlfc_resume(dhd_pub_t *dhd) +{ + uint32 tlv = 0; + + DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__)); + if (!dhd->wlfc_enabled) + return -1; + + if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0)) + return -1; + if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == + (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) + return 0; + tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS); + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) + return -1; + + return 0; +} + +#endif /* SUPPORT_P2P_GO_PS */ + +/** A flow control header was received from firmware, containing one or more TLVs */ +int +dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf, + uint *reorder_info_len) +{ + uint8 type, len; + uint8* value; + uint8* tmpbuf; + uint16 remainder = (uint16)tlv_hdr_len; + uint16 processed = 0; + athost_wl_status_info_t* wlfc = NULL; + void* entry; + + if ((dhd == NULL) || (pktbuf == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) { + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + } + + tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf); + + if (remainder) { + while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) { + type = tmpbuf[processed]; + if (type == WLFC_CTL_TYPE_FILLER) { + remainder -= 1; + processed += 1; + continue; + } + + len = tmpbuf[processed + 1]; + value = &tmpbuf[processed + 2]; + + if (remainder < (2 + len)) + break; + + remainder -= 2 + len; + processed += 2 + len; + entry = NULL; + + DHD_INFO(("%s():%d type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + + if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS) + _dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf, + reorder_info_len); + + if (wlfc == NULL) { + ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER); + + if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS && + type != WLFC_CTL_TYPE_TRANS_ID) + DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!" + " type %d remainder %d processed %d\n", + __FUNCTION__, __LINE__, type, remainder, processed)); + continue; + } + + if (type == WLFC_CTL_TYPE_TXSTATUS) { + _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry); + } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) { + uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS; + + if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) { + compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ; + } + _dhd_wlfc_compressed_txstatus_update(dhd, value, + value[compcnt_offset], &entry); + } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) { + _dhd_wlfc_fifocreditback_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_RSSI) { + _dhd_wlfc_rssi_indicate(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) { + _dhd_wlfc_credit_request(dhd, value); + } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) { + _dhd_wlfc_packet_request(dhd, value); + } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) || + (type == WLFC_CTL_TYPE_MAC_CLOSE)) { + _dhd_wlfc_psmode_update(dhd, value, type); + } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) || + (type == WLFC_CTL_TYPE_MACDESC_DEL)) { + _dhd_wlfc_mac_table_update(dhd, value, type); + } else if (type == WLFC_CTL_TYPE_TRANS_ID) { + _dhd_wlfc_dbg_senum_check(dhd, value); + } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) || + (type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) { + _dhd_wlfc_interface_update(dhd, value, type); + } + + if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) { + /* suppress all packets for this mac entry from bus->txq */ + _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry); + } + } /* while */ + + if (remainder != 0 && wlfc) { + /* trouble..., something is not right */ + wlfc->stats.tlv_parse_failed++; + } + } /* if */ + + if (wlfc) + wlfc->stats.dhd_hdrpulls++; + + dhd_os_wlfc_unblock(dhd); + return BCME_OK; +} /* dhd_wlfc_parse_header_info */ + +KERNEL_THREAD_RETURN_TYPE +dhd_wlfc_transfer_packets(void *data) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)data; + int ac, single_ac = 0, rc = BCME_OK; + dhd_wlfc_commit_info_t commit_info; + athost_wl_status_info_t* ctx; + int bus_retry_count = 0; + int pkt_send = 0; + + uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */ + uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */ + uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */ + bool no_credit = FALSE; + + int lender; + +#if defined(DHD_WLFC_THREAD) + /* wait till someone wakeup me up, will change it at running time */ + int wait_msec = msecs_to_jiffies(0xFFFFFFFF); +#endif /* defined(DHD_WLFC_THREAD) */ + +#if defined(DHD_WLFC_THREAD) + while (1) { + bus_retry_count = 0; + pkt_send = 0; + tx_map = 0; + rx_map = 0; + packets_map = 0; + wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead, + dhdp->wlfc_thread_go, wait_msec); + if (kthread_should_stop()) { + break; + } + dhdp->wlfc_thread_go = FALSE; + + dhd_os_wlfc_block(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; +#if defined(DHD_WLFC_THREAD) + if (!ctx) + goto exit; +#endif /* defined(DHD_WLFC_THREAD) */ + + memset(&commit_info, 0, sizeof(commit_info)); + + /* + Commit packets for regular AC traffic. Higher priority first. + First, use up FIFO credits available to each AC. Based on distribution + and credits left, borrow from other ACs as applicable + + -NOTE: + If the bus between the host and firmware is overwhelmed by the + traffic from host, it is possible that higher priority traffic + starves the lower priority queue. If that occurs often, we may + have to employ weighted round-robin or ucode scheme to avoid + low priority packet starvation. + */ + + for (ac = AC_COUNT; ac >= 0; ac--) { + if (dhdp->wlfc_rxpkt_chk) { + /* check rx packet */ + uint32 curr_t = OSL_SYSUPTIME(), delta; + + delta = curr_t - ctx->rx_timestamp[ac]; + if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) { + rx_map |= (1 << ac); + } + } + + if (ctx->pkt_cnt_per_ac[ac] == 0) { + continue; + } + + tx_map |= (1 << ac); + single_ac = ac + 1; + while (FALSE == dhdp->proptxstatus_txoff) { + /* packets from delayQ with less priority are fresh and + * they'd need header and have no MAC entry + */ + no_credit = (ctx->FIFO_credit[ac] < 1); + if (dhdp->proptxstatus_credit_ignore || + ((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) { + no_credit = FALSE; + } + + lender = -1; +#ifdef LIMIT_BORROW + if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map)) { + /* try borrow from lower priority */ + lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE); + if (lender != -1) { + no_credit = FALSE; + } + } +#endif + commit_info.needs_hdr = 1; + commit_info.mac_entry = NULL; + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + no_credit); + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + if (commit_info.p == NULL) { +#ifdef LIMIT_BORROW + if (lender != -1) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + break; + } + + if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) { + ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent); + } + /* here we can ensure have credit or no credit needed */ + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + if (commit_info.ac_fifo_credit_spent && (lender == -1)) { + ctx->FIFO_credit[ac]--; + } +#ifdef LIMIT_BORROW + else if (!commit_info.ac_fifo_credit_spent && (lender != -1)) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + } else { +#ifdef LIMIT_BORROW + if (lender != -1) { + _dhd_wlfc_return_credit(ctx, lender, ac); + } +#endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + if (ctx->pkt_cnt_per_ac[ac]) { + packets_map |= (1 << ac); + } + } + + if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) { + /* nothing send out or remain in queue */ + rc = BCME_OK; + goto exit; + } + + if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) { + /* only one tx ac exist and no higher rx ac */ + if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) { + ac = single_ac - 1; + } else { + uint32 delta; + uint32 curr_t = OSL_SYSUPTIME(); + + if (single_ac != ctx->single_ac) { + /* new single ac traffic (first single ac or different single ac) */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = curr_t; + ctx->single_ac = (uint8)single_ac; + rc = BCME_OK; + goto exit; + } + /* same ac traffic, check if it lasts enough time */ + delta = curr_t - ctx->single_ac_timestamp; + + if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) { + /* wait enough time, can borrow now */ + ctx->allow_credit_borrow = 1; + ac = single_ac - 1; + } else { + rc = BCME_OK; + goto exit; + } + } + } else { + /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */ + ctx->allow_credit_borrow = 0; + ctx->single_ac_timestamp = 0; + ctx->single_ac = 0; + rc = BCME_OK; + goto exit; + } + + if (packets_map == 0) { + /* nothing to send, skip borrow */ + rc = BCME_OK; + goto exit; + } + + /* At this point, borrow all credits only for ac */ + while (FALSE == dhdp->proptxstatus_txoff) { +#ifdef LIMIT_BORROW + if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) { + break; + } +#endif + commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac, + &(commit_info.ac_fifo_credit_spent), + &(commit_info.needs_hdr), + &(commit_info.mac_entry), + FALSE); + if (commit_info.p == NULL) { + /* before borrow only one ac exists and now this only ac is empty */ +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + break; + } + + commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED : + eWLFC_PKTTYPE_SUPPRESSED; + + rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, + ctx->fcommit, ctx->commit_ctx); + + /* Bus commits may fail (e.g. flow control); abort after retries */ + if (rc == BCME_OK) { + pkt_send++; + if (commit_info.ac_fifo_credit_spent) { +#ifndef LIMIT_BORROW + ctx->FIFO_credit[ac]--; +#endif + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + } + } else { +#ifdef LIMIT_BORROW + _dhd_wlfc_return_credit(ctx, lender, ac); +#endif + bus_retry_count++; + if (bus_retry_count >= BUS_RETRIES) { + DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc)); + goto exit; + } + } + } + + BCM_REFERENCE(pkt_send); + +exit: +#if defined(DHD_WLFC_THREAD) + dhd_os_wlfc_unblock(dhdp); + if (ctx && ctx->pkt_cnt_in_psq && pkt_send) { + wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS); + } else { + wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS); + } + } + return 0; +#else + return rc; +#endif /* defined(DHD_WLFC_THREAD) */ +} + +/** + * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by + * eg dhd_sendpkt(). + * @param[in] dhdp Pointer to public DHD structure + * @param[in] fcommit Pointer to transmit function of next layer + * @param[in] commit_ctx Opaque context used when calling next layer + * @param[in] pktbuf Packet to send + * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if + */ +int +dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf, + bool need_toggle_host_if) +{ + int rc = BCME_OK; + athost_wl_status_info_t* ctx; + +#if defined(DHD_WLFC_THREAD) + if (!pktbuf) + return BCME_OK; +#endif /* defined(DHD_WLFC_THREAD) */ + + if ((dhdp == NULL) || (fcommit == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + if (pktbuf) { + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0); + } + rc = WLFC_UNSUPPORTED; + goto exit; + } + + ctx = (athost_wl_status_info_t*)dhdp->wlfc_state; + + + if (dhdp->proptxstatus_module_ignore) { + if (pktbuf) { + uint32 htod = 0; + WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST); + _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE); + if (fcommit(commit_ctx, pktbuf)) { + /* free it if failed, otherwise do it in tx complete cb */ + PKTFREE(ctx->osh, pktbuf, TRUE); + } + rc = BCME_OK; + } + goto exit; + } + + if (pktbuf) { + int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf)); + ASSERT(ac <= AC_COUNT); + DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1); + /* en-queue the packets to respective queue. */ + rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac); + if (rc) { + _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE); + } else { + ctx->stats.pktin++; + ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++; + } + } + + if (!ctx->fcommit) { + ctx->fcommit = fcommit; + } else { + ASSERT(ctx->fcommit == fcommit); + } + if (!ctx->commit_ctx) { + ctx->commit_ctx = commit_ctx; + } else { + ASSERT(ctx->commit_ctx == commit_ctx); + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhdp); +#else + dhd_wlfc_transfer_packets(dhdp); +#endif /* defined(DHD_WLFC_THREAD) */ + +exit: + dhd_os_wlfc_unblock(dhdp); + return rc; +} /* dhd_wlfc_commit_packets */ + +/** + * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet + */ +int +dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success) +{ + athost_wl_status_info_t* wlfc; + wlfc_mac_descriptor_t *entry; + void* pout = NULL; + int rtn = BCME_OK; + if ((dhd == NULL) || (txp == NULL)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + bcm_pkt_validate_chk(txp); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + rtn = WLFC_UNSUPPORTED; + goto EXIT; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) { +#ifdef PROP_TXSTATUS_DEBUG + wlfc->stats.signal_only_pkts_freed++; +#endif + /* is this a signal-only packet? */ + _dhd_wlfc_pullheader(wlfc, txp); + PKTFREE(wlfc->osh, txp, TRUE); + goto EXIT; + } + + entry = _dhd_wlfc_find_table_entry(wlfc, txp); + ASSERT(entry); + + if (!success || dhd->proptxstatus_txstatus_ignore) { + WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n", + __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp)))); + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT( + DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE); + ASSERT(txp == pout); + } + + /* indicate failure and free the packet */ + dhd_txcomplete(dhd, txp, success); + + /* return the credit, if necessary */ + _dhd_wlfc_return_implied_credit(wlfc, txp); + + if (entry->transit_count) + entry->transit_count--; + if (entry->suppr_transit_count) + entry->suppr_transit_count--; + wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--; + wlfc->stats.pktout++; + PKTFREE(wlfc->osh, txp, TRUE); + } else { + /* bus confirmed pkt went to firmware side */ + if (WLFC_GET_AFQ(dhd->wlfc_mode)) { + _dhd_wlfc_enque_afq(wlfc, txp); + } else { + int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp))); + _dhd_wlfc_hanger_free_pkt(wlfc, hslot, + WLFC_HANGER_PKT_STATE_BUSRETURNED, -1); + } + } + + ASSERT(entry->onbus_pkts_count > 0); + if (entry->onbus_pkts_count > 0) + entry->onbus_pkts_count--; + if (entry->suppressed && + (!entry->onbus_pkts_count) && + (!entry->suppr_transit_count)) + entry->suppressed = FALSE; +EXIT: + dhd_os_wlfc_unblock(dhd); + return rtn; +} /* dhd_wlfc_txcomplete */ + +int +dhd_wlfc_init(dhd_pub_t *dhd) +{ + /* enable all signals & indicate host proptxstatus logic is active */ + uint32 tlv, mode, fw_caps; + int ret = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (dhd->wlfc_enabled) { + DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + dhd->wlfc_enabled = TRUE; + dhd_os_wlfc_unblock(dhd); + + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | + WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + + /* + try to enable/disable signaling by sending "tlv" iovar. if that fails, + fallback to no flow control? Print a message for now. + */ + + /* enable proptxtstatus signaling by default */ + if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n", + dhd->wlfc_enabled?"enabled":"disabled", tlv)); + } + + mode = 0; + + /* query caps */ + ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0); + + if (!ret) { + DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps)); + + if (WLFC_IS_OLD_DEF(fw_caps)) { + /* enable proptxtstatus v2 by default */ + mode = WLFC_MODE_AFQ; + } else { + WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps)); + WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps)); + WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps)); + } + ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0); + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_mode = 0; + if (ret >= 0) { + if (WLFC_IS_OLD_DEF(mode)) { + WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ)); + } else { + dhd->wlfc_mode = mode; + } + } + + DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret)); + + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_init) + dhd->plat_init((void *)dhd); + + return BCME_OK; +} /* dhd_wlfc_init */ + +/** AMPDU host reorder specific function */ +int +dhd_wlfc_hostreorder_init(dhd_pub_t *dhd) +{ + /* enable only ampdu hostreorder here */ + uint32 tlv; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__)); + + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + + /* enable proptxtstatus signaling by default */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n", + __FUNCTION__)); + } else { + /* + Leaving the message for now, it should be removed after a while; once + the tlv situation is stable. + */ + DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n", + __FUNCTION__, tlv)); + } + + dhd_os_wlfc_block(dhd); + dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER; + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int +dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + _dhd_wlfc_cleanup_txq(dhd, fn, arg); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** release all packet resources */ +int +dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg) +{ + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + _dhd_wlfc_cleanup(dhd, fn, arg); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +int +dhd_wlfc_deinit(dhd_pub_t *dhd) +{ + /* cleanup all psq related resources */ + athost_wl_status_info_t* wlfc; + uint32 tlv = 0; + uint32 hostreorder = 0; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + if (!dhd->wlfc_enabled) { + DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__)); + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + dhd->wlfc_enabled = FALSE; + dhd_os_wlfc_unblock(dhd); + + /* query ampdu hostreorder */ + (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder", + &hostreorder, WLC_GET_VAR, FALSE, 0); + + if (hostreorder) { + tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n", + __FUNCTION__, __LINE__)); + } + + /* Disable proptxtstatus signaling for deinit */ + (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0); + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + _dhd_wlfc_cleanup(dhd, NULL, NULL); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + int i; + wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger; + for (i = 0; i < h->max_items; i++) { + if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) { + _dhd_wlfc_hanger_free_pkt(wlfc, i, + WLFC_HANGER_PKT_STATE_COMPLETE, TRUE); + } + } + + /* delete hanger */ + _dhd_wlfc_hanger_delete(dhd, h); + } + + + /* free top structure */ + DHD_OS_PREFREE(dhd, dhd->wlfc_state, + sizeof(athost_wl_status_info_t)); + dhd->wlfc_state = NULL; + dhd->proptxstatus_mode = hostreorder ? + WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE; + + dhd_os_wlfc_unblock(dhd); + + if (dhd->plat_deinit) + dhd->plat_deinit((void *)dhd); + return BCME_OK; +} /* dhd_wlfc_init */ + +/** + * Called on an interface event (WLC_E_IF) indicated by firmware + * @param[in] dhdp Pointer to public DHD structure + * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD + */ +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */ +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data); + + dhd_os_wlfc_unblock(dhdp); + + return rc; +} + +/** + * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast + * specific) + */ +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp) +{ + int rc; + + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state); + + dhd_os_wlfc_unblock(dhdp); + return rc; +} + +/** debug specific function */ +int +dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf) +{ + int i; + uint8* ea; + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* h; + wlfc_mac_descriptor_t* mac_table; + wlfc_mac_descriptor_t* interfaces; + char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"}; + + if (!dhdp || !strbuf) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhdp); + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhdp); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state; + + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } + + mac_table = wlfc->destination_entries.nodes; + interfaces = wlfc->destination_entries.interfaces; + bcm_bprintf(strbuf, "---- wlfc stats ----\n"); + + if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) { + h = (wlfc_hanger_t*)wlfc->hanger; + if (h == NULL) { + bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n"); + } else { + bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push," + "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n", + h->pushed, + h->popped, + h->failed_to_push, + h->failed_to_pop, + h->failed_slotfind, + (h->pushed - h->popped)); + } + } + + bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), " + "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n", + wlfc->stats.tlv_parse_failed, + wlfc->stats.credit_request_failed, + wlfc->stats.mac_update_failed, + wlfc->stats.psmode_update_failed, + wlfc->stats.delayq_full_error, + wlfc->stats.rollback_failed); + + bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) " + "(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d]," + "AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n", + wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0], + wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0], + wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1], + wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1], + wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2], + wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2], + wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3], + wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3], + wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4], + wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]); + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (interfaces[i].occupied) { + char* iftype_desc; + + if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT) + iftype_desc = "hostif_flow_state[i] == OFF) + ? " OFF":" ON")); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.len, + ((interfaces[i].state == + WLFC_STATE_OPEN) ? "OPEN":"CLOSE"), + interfaces[i].requested_credit, + interfaces[i].transit_count, + interfaces[i].suppr_transit_count, + interfaces[i].onbus_pkts_count); + + bcm_bprintf(strbuf, "INTERFACE[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + interfaces[i].psq.q[0].len, + interfaces[i].psq.q[1].len, + interfaces[i].afq.q[0].len, + interfaces[i].psq.q[2].len, + interfaces[i].psq.q[3].len, + interfaces[i].afq.q[1].len, + interfaces[i].psq.q[4].len, + interfaces[i].psq.q[5].len, + interfaces[i].afq.q[2].len, + interfaces[i].psq.q[6].len, + interfaces[i].psq.q[7].len, + interfaces[i].afq.q[3].len, + interfaces[i].psq.q[8].len, + interfaces[i].psq.q[9].len, + interfaces[i].afq.q[4].len); + } + } + + bcm_bprintf(strbuf, "\n"); + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (mac_table[i].occupied) { + ea = mac_table[i].ea; + bcm_bprintf(strbuf, "MAC_table[%d].ea = " + "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i, + ea[0], ea[1], ea[2], ea[3], ea[4], ea[5], + mac_table[i].interface_id); + + bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit)," + "(trans,supp_trans,onbus)" + "= (%d,%s,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.len, + ((mac_table[i].state == + WLFC_STATE_OPEN) ? " OPEN":"CLOSE"), + mac_table[i].requested_credit, + mac_table[i].transit_count, + mac_table[i].suppr_transit_count, + mac_table[i].onbus_pkts_count); +#ifdef PROP_TXSTATUS_DEBUG + bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n", + i, mac_table[i].opened_ct, mac_table[i].closed_ct); +#endif + bcm_bprintf(strbuf, "MAC_table[%d].PSQ" + "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2)," + "(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d)," + "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n", + i, + mac_table[i].psq.q[0].len, + mac_table[i].psq.q[1].len, + mac_table[i].afq.q[0].len, + mac_table[i].psq.q[2].len, + mac_table[i].psq.q[3].len, + mac_table[i].afq.q[1].len, + mac_table[i].psq.q[4].len, + mac_table[i].psq.q[5].len, + mac_table[i].afq.q[2].len, + mac_table[i].psq.q[6].len, + mac_table[i].psq.q[7].len, + mac_table[i].afq.q[3].len, + mac_table[i].psq.q[8].len, + mac_table[i].psq.q[9].len, + mac_table[i].afq.q[4].len); + + } + } + +#ifdef PROP_TXSTATUS_DEBUG + { + int avg; + int moving_avg = 0; + int moving_samples; + + if (wlfc->stats.latency_sample_count) { + moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32); + + for (i = 0; i < moving_samples; i++) + moving_avg += wlfc->stats.deltas[i]; + moving_avg /= moving_samples; + + avg = (100 * wlfc->stats.total_status_latency) / + wlfc->stats.latency_sample_count; + bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = " + "(%d.%d, %03d, %03d)\n", + moving_samples, avg/100, (avg - (avg/100)*100), + wlfc->stats.latency_most_recent, + moving_avg); + } + } + + bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), " + "back = (%d,%d,%d,%d,%d,%d)\n", + wlfc->stats.fifo_credits_sent[0], + wlfc->stats.fifo_credits_sent[1], + wlfc->stats.fifo_credits_sent[2], + wlfc->stats.fifo_credits_sent[3], + wlfc->stats.fifo_credits_sent[4], + wlfc->stats.fifo_credits_sent[5], + + wlfc->stats.fifo_credits_back[0], + wlfc->stats.fifo_credits_back[1], + wlfc->stats.fifo_credits_back[2], + wlfc->stats.fifo_credits_back[3], + wlfc->stats.fifo_credits_back[4], + wlfc->stats.fifo_credits_back[5]); + { + uint32 fifo_cr_sent = 0; + uint32 fifo_cr_acked = 0; + uint32 request_cr_sent = 0; + uint32 request_cr_ack = 0; + uint32 bc_mc_cr_ack = 0; + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) { + fifo_cr_sent += wlfc->stats.fifo_credits_sent[i]; + } + + for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) { + fifo_cr_acked += wlfc->stats.fifo_credits_back[i]; + } + + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_sent += + wlfc->destination_entries.nodes[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_sent += + wlfc->destination_entries.interfaces[i].dstncredit_sent_packets; + } + } + for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) { + if (wlfc->destination_entries.nodes[i].occupied) { + request_cr_ack += + wlfc->destination_entries.nodes[i].dstncredit_acks; + } + } + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + if (wlfc->destination_entries.interfaces[i].occupied) { + request_cr_ack += + wlfc->destination_entries.interfaces[i].dstncredit_acks; + } + } + bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d)," + "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)", + fifo_cr_sent, fifo_cr_acked, + request_cr_sent, request_cr_ack, + wlfc->destination_entries.other.dstncredit_acks, + bc_mc_cr_ack, + wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed); + } +#endif /* PROP_TXSTATUS_DEBUG */ + bcm_bprintf(strbuf, "\n"); + bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)" + "(freed,free_err,rollback)) = " + "((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.pktin, + wlfc->stats.pkt2bus, + wlfc->stats.txstatus_in, + wlfc->stats.dhd_hdrpulls, + wlfc->stats.pktout, + + wlfc->stats.pktdropped, + wlfc->stats.wlfc_header_only_pkt, + wlfc->stats.wlc_tossed_pkts, + + wlfc->stats.pkt_freed, + wlfc->stats.pkt_free_err, wlfc->stats.rollback); + + bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = " + "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n", + wlfc->stats.d11_suppress, + wlfc->stats.wl_suppress, + wlfc->stats.bad_suppress, + + wlfc->stats.psq_d11sup_enq, + wlfc->stats.psq_wlsup_enq, + wlfc->stats.psq_hostq_enq, + wlfc->stats.mac_handle_notfound, + + wlfc->stats.psq_d11sup_retx, + wlfc->stats.psq_wlsup_retx, + wlfc->stats.psq_hostq_retx); + + bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n", + wlfc->stats.cleanup_txq_cnt, + wlfc->stats.cleanup_psq_cnt, + wlfc->stats.cleanup_fw_cnt); + + bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error); + + for (i = 0; i < WLFC_MAX_IFNUM; i++) { + bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i, + wlfc->pkt_cnt_in_q[i][0], + wlfc->pkt_cnt_in_q[i][1], + wlfc->pkt_cnt_in_q[i][2], + wlfc->pkt_cnt_in_q[i][3], + wlfc->pkt_cnt_in_q[i][4]); + } + bcm_bprintf(strbuf, "\n"); + + dhd_os_wlfc_unblock(dhdp); + return BCME_OK; +} /* dhd_wlfc_dump */ + +int dhd_wlfc_clear_counts(dhd_pub_t *dhd) +{ + athost_wl_status_info_t* wlfc; + wlfc_hanger_t* hanger; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t)); + + if (!WLFC_GET_AFQ(dhd->wlfc_mode)) { + hanger = (wlfc_hanger_t*)wlfc->hanger; + + hanger->pushed = 0; + hanger->popped = 0; + hanger->failed_slotfind = 0; + hanger->failed_to_pop = 0; + hanger->failed_to_push = 0; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** returns TRUE if flow control is enabled */ +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_enabled; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called via an IOVAR */ +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (dhd->wlfc_state) { + dhd->proptxstatus_mode = val & 0xff; + } + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** Called when rx frame is received from the dongle */ +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf) +{ + athost_wl_status_info_t* wlfc; + bool rc = FALSE; + + if (dhd == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return FALSE; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return FALSE; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + if (PKTLEN(wlfc->osh, pktbuf) == 0) { + wlfc->stats.wlfc_header_only_pkt++; + rc = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + return rc; +} + +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock) +{ + if (dhdp == NULL) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + if (bAcquireLock) { + dhd_os_wlfc_block(dhdp); + } + + if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) || + dhdp->proptxstatus_module_ignore) { + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + return WLFC_UNSUPPORTED; + } + + if (state != dhdp->proptxstatus_txoff) { + dhdp->proptxstatus_txoff = state; + } + + if (bAcquireLock) { + dhd_os_wlfc_unblock(dhdp); + } + +#if defined(DHD_WLFC_THREAD) + _dhd_wlfc_thread_wakeup(dhd); +#endif /* defined(DHD_WLFC_THREAD) */ + + return BCME_OK; +} + +/** Called when eg an rx frame is received from the dongle */ +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio) +{ + athost_wl_status_info_t* wlfc; + int rx_path_ac = -1; + + if ((dhd == NULL) || (prio >= NUMPRIO)) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if (!dhd->wlfc_rxpkt_chk) { + dhd_os_wlfc_unblock(dhd); + return BCME_OK; + } + + if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) { + dhd_os_wlfc_unblock(dhd); + return WLFC_UNSUPPORTED; + } + + wlfc = (athost_wl_status_info_t*)dhd->wlfc_state; + + rx_path_ac = prio2fifo[prio]; + wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME(); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_module_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val) +{ + uint32 tlv = 0; + bool bChanged = FALSE; + + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + if ((bool)val != dhd->proptxstatus_module_ignore) { + dhd->proptxstatus_module_ignore = (val != 0); + /* force txstatus_ignore sync with proptxstatus_module_ignore */ + dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore; + if (FALSE == dhd->proptxstatus_module_ignore) { + tlv = WLFC_FLAGS_RSSI_SIGNALS | + WLFC_FLAGS_XONXOFF_SIGNALS | + WLFC_FLAGS_CREDIT_STATUS_SIGNALS | + WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE; + } + /* always enable host reorder */ + tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE; + bChanged = TRUE; + } + + dhd_os_wlfc_unblock(dhd); + + if (bChanged) { + /* select enable proptxtstatus signaling */ + if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) { + DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } else { + DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n", + __FUNCTION__, tlv)); + } + } + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_credit_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_credit_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->proptxstatus_txstatus_ignore; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->proptxstatus_txstatus_ignore = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val) +{ + if (!dhd || !val) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + *val = dhd->wlfc_rxpkt_chk; + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +/** called via an IOVAR */ +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val) +{ + if (!dhd) { + DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__)); + return BCME_BADARG; + } + + dhd_os_wlfc_block(dhd); + + dhd->wlfc_rxpkt_chk = (val != 0); + + dhd_os_wlfc_unblock(dhd); + + return BCME_OK; +} + +#endif /* PROP_TXSTATUS */ diff --git a/drivers/net/wireless/bcmdhd/dhd_wlfc.h b/drivers/net/wireless/bcmdhd/dhd_wlfc.h new file mode 100644 index 000000000000..a6fd465e35fd --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dhd_wlfc.h @@ -0,0 +1,554 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhd_wlfc.h 557035 2015-05-15 18:48:57Z $ + * + */ +#ifndef __wlfc_host_driver_definitions_h__ +#define __wlfc_host_driver_definitions_h__ + + +/* #define OOO_DEBUG */ + +#define KERNEL_THREAD_RETURN_TYPE int + +typedef int (*f_commitpkt_t)(void* ctx, void* p); +typedef bool (*f_processpkt_t)(void* p, void* arg); + +#define WLFC_UNSUPPORTED -9999 + +#define WLFC_NO_TRAFFIC -1 +#define WLFC_MULTI_TRAFFIC 0 + +#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */ + +/** 16 bits will provide an absolute max of 65536 slots */ +#define WLFC_HANGER_MAXITEMS 3072 + +#define WLFC_HANGER_ITEM_STATE_FREE 1 +#define WLFC_HANGER_ITEM_STATE_INUSE 2 +#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3 +#define WLFC_HANGER_ITEM_STATE_FLUSHED 4 + +#define WLFC_HANGER_PKT_STATE_TXSTATUS 1 +#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2 +#define WLFC_HANGER_PKT_STATE_COMPLETE \ + (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED) + +typedef enum { + Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */ + Q_TYPE_AFQ /**< At Firmware Queue */ +} q_type_t; + +typedef enum ewlfc_packet_state { + eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */ + eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */ + eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */ + eWLFC_PKTTYPE_MAX +} ewlfc_packet_state_t; + +typedef enum ewlfc_mac_entry_action { + eWLFC_MAC_ENTRY_ACTION_ADD, + eWLFC_MAC_ENTRY_ACTION_DEL, + eWLFC_MAC_ENTRY_ACTION_UPDATE, + eWLFC_MAC_ENTRY_ACTION_MAX +} ewlfc_mac_entry_action_t; + +typedef struct wlfc_hanger_item { + uint8 state; + uint8 gen; + uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */ + uint8 pkt_txstatus; + uint32 identifier; + void* pkt; +#ifdef PROP_TXSTATUS_DEBUG + uint32 push_time; +#endif + struct wlfc_hanger_item *next; +} wlfc_hanger_item_t; + +/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */ +typedef struct wlfc_hanger { + int max_items; + uint32 pushed; + uint32 popped; + uint32 failed_to_push; + uint32 failed_to_pop; + uint32 failed_slotfind; + uint32 slot_pos; + wlfc_hanger_item_t items[1]; +} wlfc_hanger_t; + +#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \ + sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t))) + +#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */ +#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */ + +#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */ +#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1) + +#define WLFC_PSQ_LEN 2048 + +#define WLFC_FLOWCONTROL_HIWATER (2048 - 256) +#define WLFC_FLOWCONTROL_LOWATER 256 + +#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256)) +#undef WLFC_FLOWCONTROL_HIWATER +#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256) +#undef WLFC_FLOWCONTROL_LOWATER +#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4) +#endif + +#define WLFC_LOG_BUF_SIZE (1024*1024) + +/** Properties related to a remote MAC entity */ +typedef struct wlfc_mac_descriptor { + uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */ + uint8 interface_id; + uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */ + uint8 state; /**< eg WLFC_STATE_OPEN */ + uint8 ac_bitmap; /**< automatic power save delivery (APSD) */ + uint8 requested_credit; + uint8 requested_packet; /**< unit: [number of packets] */ + uint8 ea[ETHER_ADDR_LEN]; + + /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */ + uint8 seq[AC_COUNT + 1]; + uint8 generation; /**< toggles between 0 and 1 */ + struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */ + /** packets at firmware queue */ + struct pktq afq; + /** The AC pending bitmap that was reported to the fw at last change */ + uint8 traffic_lastreported_bmp; + /** The new AC pending bitmap */ + uint8 traffic_pending_bmp; + /** 1= send on next opportunity */ + uint8 send_tim_signal; + uint8 mac_handle; /**< mac handles are assigned by the dongle */ + /** Number of packets at dongle for this entry. */ + int transit_count; + /** Number of suppression to wait before evict from delayQ */ + int suppr_transit_count; + /** pkt sent to bus but no bus TX complete yet */ + int onbus_pkts_count; + /** flag. TRUE when remote MAC is in suppressed state */ + uint8 suppressed; + + +#ifdef PROP_TXSTATUS_DEBUG + uint32 dstncredit_sent_packets; + uint32 dstncredit_acks; + uint32 opened_ct; + uint32 closed_ct; +#endif + struct wlfc_mac_descriptor* prev; + struct wlfc_mac_descriptor* next; +} wlfc_mac_descriptor_t; + +/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */ +typedef struct dhd_wlfc_commit_info { + uint8 needs_hdr; + uint8 ac_fifo_credit_spent; + ewlfc_packet_state_t pkt_type; + wlfc_mac_descriptor_t* mac_entry; + void* p; +} dhd_wlfc_commit_info_t; + +#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\ + entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0) + +#define WLFC_INCR_SEQCOUNT(entry, prec) entry->seq[(prec)]++ +#define WLFC_SEQCOUNT(entry, prec) entry->seq[(prec)] + +typedef struct athost_wl_stat_counters { + uint32 pktin; + uint32 pktout; + uint32 pkt2bus; + uint32 pktdropped; + uint32 tlv_parse_failed; + uint32 rollback; + uint32 rollback_failed; + uint32 delayq_full_error; + uint32 credit_request_failed; + uint32 packet_request_failed; + uint32 mac_update_failed; + uint32 psmode_update_failed; + uint32 interface_update_failed; + uint32 wlfc_header_only_pkt; + uint32 txstatus_in; + uint32 d11_suppress; + uint32 wl_suppress; + uint32 bad_suppress; + uint32 pkt_freed; + uint32 pkt_free_err; + uint32 psq_wlsup_retx; + uint32 psq_wlsup_enq; + uint32 psq_d11sup_retx; + uint32 psq_d11sup_enq; + uint32 psq_hostq_retx; + uint32 psq_hostq_enq; + uint32 mac_handle_notfound; + uint32 wlc_tossed_pkts; + uint32 dhd_hdrpulls; + uint32 generic_error; + /* an extra one for bc/mc traffic */ + uint32 send_pkts[AC_COUNT + 1]; + uint32 drop_pkts[WLFC_PSQ_PREC_COUNT]; + uint32 ooo_pkts[AC_COUNT + 1]; +#ifdef PROP_TXSTATUS_DEBUG + /** all pkt2bus -> txstatus latency accumulated */ + uint32 latency_sample_count; + uint32 total_status_latency; + uint32 latency_most_recent; + int idx_delta; + uint32 deltas[10]; + uint32 fifo_credits_sent[6]; + uint32 fifo_credits_back[6]; + uint32 dropped_qfull[6]; + uint32 signal_only_pkts_sent; + uint32 signal_only_pkts_freed; +#endif + uint32 cleanup_txq_cnt; + uint32 cleanup_psq_cnt; + uint32 cleanup_fw_cnt; +} athost_wl_stat_counters_t; + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_sent[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do { \ + (ctx)->stats.fifo_credits_back[(ac)]++;} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do { \ + (ctx)->stats.dropped_qfull[(ac)]++;} while (0) +#else +#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0) +#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0) +#endif + +#define WLFC_FCMODE_NONE 0 +#define WLFC_FCMODE_IMPLIED_CREDIT 1 +#define WLFC_FCMODE_EXPLICIT_CREDIT 2 +#define WLFC_ONLY_AMPDU_HOSTREORDER 3 + +/** Reserved credits ratio when borrowed by hihger priority */ +#define WLFC_BORROW_LIMIT_RATIO 4 + +/** How long to defer borrowing in milliseconds */ +#define WLFC_BORROW_DEFER_PERIOD_MS 100 + +/** How long to defer flow control in milliseconds */ +#define WLFC_FC_DEFER_PERIOD_MS 200 + +/** How long to detect occurance per AC in miliseconds */ +#define WLFC_RX_DETECTION_THRESHOLD_MS 100 + +/** Mask to represent available ACs (note: BC/MC is ignored) */ +#define WLFC_AC_MASK 0xF + +/** flow control specific information, only 1 instance during driver lifetime */ +typedef struct athost_wl_status_info { + uint8 last_seqid_to_wlc; + + /** OSL handle */ + osl_t *osh; + /** dhd public struct pointer */ + void *dhdp; + + f_commitpkt_t fcommit; + void* commit_ctx; + + /** statistics */ + athost_wl_stat_counters_t stats; + + /** incremented on eg receiving a credit map event from the dongle */ + int Init_FIFO_credit[AC_COUNT + 2]; + /** the additional ones are for bc/mc and ATIM FIFO */ + int FIFO_credit[AC_COUNT + 2]; + /** Credit borrow counts for each FIFO from each of the other FIFOs */ + int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2]; + + /** packet hanger and MAC->handle lookup table */ + void *hanger; + + struct { + /** table for individual nodes */ + wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE]; + /** table for interfaces */ + wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM]; + /* OS may send packets to unknown (unassociated) destinations */ + /** A place holder for bc/mc and packets to unknown destinations */ + wlfc_mac_descriptor_t other; + } destination_entries; + + wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */ + int active_entry_count; + + wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE]; + int requested_entry_count; + + /* pkt counts for each interface and ac */ + int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_per_ac[AC_COUNT+1]; + int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1]; + int pkt_cnt_in_psq; + uint8 allow_fc; /**< Boolean */ + uint32 fc_defer_timestamp; + uint32 rx_timestamp[AC_COUNT+1]; + + /** ON/OFF state for flow control to the host network interface */ + uint8 hostif_flow_state[WLFC_MAX_IFNUM]; + uint8 host_ifidx; + + /** to flow control an OS interface */ + uint8 toggle_host_if; + + /** To borrow credits */ + uint8 allow_credit_borrow; + + /** ac number for the first single ac traffic */ + uint8 single_ac; + + /** Timestamp for the first single ac traffic */ + uint32 single_ac_timestamp; + + bool bcmc_credit_supported; + +} athost_wl_status_info_t; + +/** Please be mindful that total pkttag space is 32 octets only */ +typedef struct dhd_pkttag { + +#ifdef BCM_OBJECT_TRACE + /* if use this field, keep it at the first 4 bytes */ + uint32 sn; +#endif /* BCM_OBJECT_TRACE */ + + /** + b[15] - 1 = wlfc packet + b[14:13] - encryption exemption + b[12 ] - 1 = event channel + b[11 ] - 1 = this packet was sent in response to one time packet request, + do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET]. + b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on] + b[9 ] - 1 = packet is host->firmware (transmit direction) + - 0 = packet received from firmware (firmware->host) + b[8 ] - 1 = packet was sent due to credit_request (pspoll), + packet does not count against FIFO credit. + - 0 = normal transaction, packet counts against FIFO credit + b[7 ] - 1 = AP, 0 = STA + b[6:4] - AC FIFO number + b[3:0] - interface index + */ + uint16 if_flags; + + /** + * destination MAC address for this packet so that not every module needs to open the packet + * to find this + */ + uint8 dstn_ether[ETHER_ADDR_LEN]; + + /** This 32-bit goes from host to device for every packet. */ + uint32 htod_tag; + + /** This 16-bit is original seq number for every suppress packet. */ + uint16 htod_seq; + + /** This address is mac entry for every packet. */ + void *entry; + + /** bus specific stuff */ + union { + struct { + void *stuff; + uint32 thing1; + uint32 thing2; + } sd; + + struct { + void *bus; + void *urb; + } usb; + } bus_specific; +} dhd_pkttag_t; + +#define DHD_PKTTAG_WLFCPKT_MASK 0x1 +#define DHD_PKTTAG_WLFCPKT_SHIFT 15 +#define DHD_PKTTAG_WLFCPKT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \ + (((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT) +#define DHD_PKTTAG_WLFCPKT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK) + +#define DHD_PKTTAG_EXEMPT_MASK 0x3 +#define DHD_PKTTAG_EXEMPT_SHIFT 13 +#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \ + (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT) +#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK) + +#define DHD_PKTTAG_EVENT_MASK 0x1 +#define DHD_PKTTAG_EVENT_SHIFT 12 +#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \ + (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT) +#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK) + +#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1 +#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11 +#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \ + (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) +#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK) + +#define DHD_PKTTAG_SIGNALONLY_MASK 0x1 +#define DHD_PKTTAG_SIGNALONLY_SHIFT 10 +#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \ + (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT) +#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK) + +#define DHD_PKTTAG_PKTDIR_MASK 0x1 +#define DHD_PKTTAG_PKTDIR_SHIFT 9 +#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \ + (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT) +#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK) + +#define DHD_PKTTAG_CREDITCHECK_MASK 0x1 +#define DHD_PKTTAG_CREDITCHECK_SHIFT 8 +#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \ + (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT) +#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK) + +#define DHD_PKTTAG_IFTYPE_MASK 0x1 +#define DHD_PKTTAG_IFTYPE_SHIFT 7 +#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & \ + ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \ + (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT) +#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK) + +#define DHD_PKTTAG_FIFO_MASK 0x7 +#define DHD_PKTTAG_FIFO_SHIFT 4 +#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \ + (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT) +#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK) + +#define DHD_PKTTAG_IF_MASK 0xf +#define DHD_PKTTAG_IF_SHIFT 0 +#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \ + (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \ + (((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT) +#define DHD_PKTTAG_IF(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \ + DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK) + +#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \ + (dstn_MAC_ea), ETHER_ADDR_LEN) +#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether + +#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue) +#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag) + +#define DHD_PKTTAG_SET_H2DSEQ(tag, seq) ((dhd_pkttag_t*)(tag))->htod_seq = (seq) +#define DHD_PKTTAG_H2DSEQ(tag) (((dhd_pkttag_t*)(tag))->htod_seq) + +#define DHD_PKTTAG_SET_ENTRY(tag, entry) ((dhd_pkttag_t*)(tag))->entry = (entry) +#define DHD_PKTTAG_ENTRY(tag) (((dhd_pkttag_t*)(tag))->entry) + +#define PSQ_SUP_IDX(x) (x * 2 + 1) +#define PSQ_DLY_IDX(x) (x * 2) + +#ifdef PROP_TXSTATUS_DEBUG +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0) +#else +#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0) +#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0) +#endif + +#ifdef BCM_OBJECT_TRACE +#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val) +#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn) +#endif /* BCM_OBJECT_TRACE */ + +/* public functions */ +int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, + uchar *reorder_info_buf, uint *reorder_info_len); +KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data); +int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, + void* commit_ctx, void *pktbuf, bool need_toggle_host_if); +int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success); +int dhd_wlfc_init(dhd_pub_t *dhd); +#ifdef SUPPORT_P2P_GO_PS +int dhd_wlfc_suspend(dhd_pub_t *dhd); +int dhd_wlfc_resume(dhd_pub_t *dhd); +#endif /* SUPPORT_P2P_GO_PS */ +int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd); +int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg); +int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg); +int dhd_wlfc_deinit(dhd_pub_t *dhd); +int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea); +int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data); +int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp); +int dhd_wlfc_enable(dhd_pub_t *dhdp); +int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf); +int dhd_wlfc_clear_counts(dhd_pub_t *dhd); +int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val); +int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val); +bool dhd_wlfc_is_supported(dhd_pub_t *dhd); +bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf); +int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock); +int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio); + +int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val); +int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val); + +int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val); +int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val); + +#endif /* __wlfc_host_driver_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/dngl_stats.h b/drivers/net/wireless/bcmdhd/dngl_stats.h new file mode 100644 index 000000000000..66e4f4528f7d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dngl_stats.h @@ -0,0 +1,283 @@ +/* + * Common stats definitions for clients of dongle + * ports + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_stats.h 523030 2014-12-25 17:28:07Z $ + */ + +#ifndef _dngl_stats_h_ +#define _dngl_stats_h_ + +#include +#include + +typedef struct { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* packets dropped by dongle */ + unsigned long tx_dropped; /* packets dropped by dongle */ + unsigned long multicast; /* multicast packets received */ +} dngl_stats_t; + +typedef int32 wifi_radio; +typedef int32 wifi_channel; +typedef int32 wifi_rssi; +typedef struct { uint16 version; uint16 length; } ver_len; + +typedef enum wifi_channel_width { + WIFI_CHAN_WIDTH_20 = 0, + WIFI_CHAN_WIDTH_40 = 1, + WIFI_CHAN_WIDTH_80 = 2, + WIFI_CHAN_WIDTH_160 = 3, + WIFI_CHAN_WIDTH_80P80 = 4, + WIFI_CHAN_WIDTH_5 = 5, + WIFI_CHAN_WIDTH_10 = 6, + WIFI_CHAN_WIDTH_INVALID = -1 +} wifi_channel_width_t; + +typedef enum { + WIFI_DISCONNECTED = 0, + WIFI_AUTHENTICATING = 1, + WIFI_ASSOCIATING = 2, + WIFI_ASSOCIATED = 3, + WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */ + WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */ +} wifi_connection_state; + +typedef enum { + WIFI_ROAMING_IDLE = 0, + WIFI_ROAMING_ACTIVE = 1 +} wifi_roam_state; + +typedef enum { + WIFI_INTERFACE_STA = 0, + WIFI_INTERFACE_SOFTAP = 1, + WIFI_INTERFACE_IBSS = 2, + WIFI_INTERFACE_P2P_CLIENT = 3, + WIFI_INTERFACE_P2P_GO = 4, + WIFI_INTERFACE_NAN = 5, + WIFI_INTERFACE_MESH = 6 +} wifi_interface_mode; + +#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */ +#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11 + * beacon frame control protected bit set) + */ +#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities + * element interworking bit is set + */ +#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */ +#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities + * element UTF-8 SSID bit is set + */ +#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */ + +typedef struct { + wifi_interface_mode mode; /* interface mode */ + uint8 mac_addr[6]; /* interface mac address (self) */ + wifi_connection_state state; /* connection state (valid for STA, CLI only) */ + wifi_roam_state roaming; /* roaming state */ + uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */ + uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */ + uint8 bssid[ETHER_ADDR_LEN]; /* bssid */ + uint8 ap_country_str[3]; /* country string advertised by AP */ + uint8 country_str[3]; /* country string for this association */ +} wifi_interface_info; + +typedef wifi_interface_info *wifi_interface_handle; + +/* channel information */ +typedef struct { + wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */ + wifi_channel center_freq; /* primary 20 MHz channel */ + wifi_channel center_freq0; /* center frequency (MHz) first segment */ + wifi_channel center_freq1; /* center frequency (MHz) second segment */ +} wifi_channel_info; + +/* wifi rate */ +typedef struct { + uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */ + uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */ + uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */ + uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std + * in the units of 0.5mbps + */ + /* HT/VHT it would be mcs index */ + uint32 reserved; /* reserved */ + uint32 bitrate; /* units of 100 Kbps */ +} wifi_rate; + +/* channel statistics */ +typedef struct { + wifi_channel_info channel; /* channel */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number + * accruing over time) + */ +} wifi_channel_stat; + +/* radio statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + wifi_radio radio; /* wifi radio (if multiple radio supported) */ + uint32 on_time; /* msecs the radio is awake (32 bits number + * accruing over time) + */ + uint32 tx_time; /* msecs the radio is transmitting (32 bits + * number accruing over time) + */ + uint32 rx_time; /* msecs the radio is in active receive (32 bits + * number accruing over time) + */ + uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits + * number accruing over time) + */ + uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits + * number accruing over time) + */ + uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits + * number accruing over time) + */ + uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits + * number accruing over time) + */ + uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits + * number accruing over time) + */ + uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and + * GAS exchange (32 bits number accruing over time) + */ + uint32 num_channels; /* number of channels */ + wifi_channel_stat channels[1]; /* channel statistics */ +} wifi_radio_stat; + +/* per rate statistics */ +typedef struct { + struct { + uint16 version; + uint16 length; + }; + uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */ + uint32 rx_mpdu; /* number of received data pkts */ + uint32 mpdu_lost; /* number of data packet losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + wifi_rate rate; /* rate information */ +} wifi_rate_stat; + +/* access categories */ +typedef enum { + WIFI_AC_VO = 0, + WIFI_AC_VI = 1, + WIFI_AC_BE = 2, + WIFI_AC_BK = 3, + WIFI_AC_MAX = 4 +} wifi_traffic_ac; + +/* wifi peer type */ +typedef enum +{ + WIFI_PEER_STA, + WIFI_PEER_AP, + WIFI_PEER_P2P_GO, + WIFI_PEER_P2P_CLIENT, + WIFI_PEER_NAN, + WIFI_PEER_TDLS, + WIFI_PEER_INVALID +} wifi_peer_type; + +/* per peer statistics */ +typedef struct { + wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */ + uint8 peer_mac_address[6]; /* mac address */ + uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */ + uint32 num_rate; /* number of rates */ + wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */ +} wifi_peer_info; + +/* per access category statistics */ +typedef struct { + wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */ + uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts + * (ACK rcvd) + */ + uint32 rx_mpdu; /* number of received unicast mpdus */ + uint32 tx_mcast; /* number of succesfully transmitted multicast + * data packets + */ + /* STA case: implies ACK received from AP for the + * unicast packet in which mcast pkt was sent + */ + uint32 rx_mcast; /* number of received multicast data packets */ + uint32 rx_ampdu; /* number of received unicast a-mpdus */ + uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */ + uint32 mpdu_lost; /* number of data pkt losses (no ACK) */ + uint32 retries; /* total number of data pkt retries */ + uint32 retries_short; /* number of short data pkt retries */ + uint32 retries_long; /* number of long data pkt retries */ + uint32 contention_time_min; /* data pkt min contention time (usecs) */ + uint32 contention_time_max; /* data pkt max contention time (usecs) */ + uint32 contention_time_avg; /* data pkt avg contention time (usecs) */ + uint32 contention_num_samples; /* num of data pkts used for contention statistics */ +} wifi_wmm_ac_stat; + +/* interface statistics */ +typedef struct { + wifi_interface_handle iface; /* wifi interface */ + wifi_interface_info info; /* current state of the interface */ + uint32 beacon_rx; /* access point beacon received count from + * connected AP + */ + uint32 mgmt_rx; /* access point mgmt frames received count from + * connected AP (including Beacon) + */ + uint32 mgmt_action_rx; /* action frames received count */ + uint32 mgmt_action_tx; /* action frames transmit count */ + wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI + * (averaged) + */ + wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from + * connected AP + */ + wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from + * connected AP + */ + wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */ + uint32 num_peers; /* number of peers */ + wifi_peer_info peer_info[1]; /* per peer statistics */ +} wifi_iface_stat; + +#endif /* _dngl_stats_h_ */ diff --git a/drivers/net/wireless/bcmdhd/dngl_wlhdr.h b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h new file mode 100644 index 000000000000..93e0b5a5b69d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/dngl_wlhdr.h @@ -0,0 +1,43 @@ +/* + * Dongle WL Header definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dngl_wlhdr.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _dngl_wlhdr_h_ +#define _dngl_wlhdr_h_ + +typedef struct wl_header { + uint8 type; /* Header type */ + uint8 version; /* Header version */ + int8 rssi; /* RSSI */ + uint8 pad; /* Unused */ +} wl_header_t; + +#define WL_HEADER_LEN sizeof(wl_header_t) +#define WL_HEADER_TYPE 0 +#define WL_HEADER_VER 1 +#endif /* _dngl_wlhdr_h_ */ diff --git a/drivers/net/wireless/bcmdhd/hnd_pktpool.c b/drivers/net/wireless/bcmdhd/hnd_pktpool.c new file mode 100644 index 000000000000..f3555e40ce91 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hnd_pktpool.c @@ -0,0 +1,1131 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktpool.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include +#include +#include + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + +/* Registry size is one larger than max pools, as slot #0 is reserved */ +#define PKTPOOLREG_RSVD_ID (0U) +#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead)) +#define PKTPOOLREG_FREE_PTR (POOLPTR(NULL)) + +#define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp))) +#define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp))) + +/* Tag a registry entry as free for use */ +#define PKTPOOL_REGISTRY_CLR(id) \ + PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR) +#define PKTPOOL_REGISTRY_ISCLR(id) \ + (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR)) + +/* Tag registry entry 0 as reserved */ +#define PKTPOOL_REGISTRY_RSV() \ + PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR) +#define PKTPOOL_REGISTRY_ISRSVD() \ + (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)) + +/* Walk all un-reserved entries in registry */ +#define PKTPOOL_REGISTRY_FOREACH(id) \ + for ((id) = 1U; (id) <= pktpools_max; (id)++) + +enum pktpool_empty_cb_state { + EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */ + EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */ + EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */ +}; + +uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */ +pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */ + +/* Register/Deregister a pktpool with registry during pktpool_init/deinit */ +static int pktpool_register(pktpool_t * poolptr); +static int pktpool_deregister(pktpool_t * poolptr); + +/** add declaration */ +static int pktpool_avail_notify(pktpool_t *pktp); + +/** accessor functions required when ROMming this file, forced into RAM */ + + +pktpool_t * +BCMRAMFN(get_pktpools_registry)(int id) +{ + return pktpools_registry[id]; +} + +static void +BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp) +{ + pktpools_registry[id] = pp; +} + +static bool +BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp) +{ + return pktpools_registry[id] == pp; +} + +int /* Construct a pool registry to serve a maximum of total_pools */ +pktpool_attach(osl_t *osh, uint32 total_pools) +{ + uint32 poolid; + + if (pktpools_max != 0U) { + return BCME_ERROR; + } + + ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID); + + /* Initialize registry: reserve slot#0 and tag others as free */ + PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */ + PKTPOOL_REGISTRY_CLR(poolid); + } + + pktpools_max = total_pools; + + return (int)pktpools_max; +} + +int /* Destruct the pool registry. Ascertain all pools were first de-inited */ +pktpool_dettach(osl_t *osh) +{ + uint32 poolid; + + if (pktpools_max == 0U) { + return BCME_OK; + } + + /* Ascertain that no pools are still registered */ + ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */ + + PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */ + ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid)); + } + + pktpools_max = 0U; /* restore boot state */ + + return BCME_OK; +} + +static int /* Register a pool in a free slot; return the registry slot index */ +pktpool_register(pktpool_t * poolptr) +{ + uint32 poolid; + + if (pktpools_max == 0U) { + return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */ + } + + ASSERT(pktpools_max != 0U); + + /* find an empty slot in pktpools_registry */ + PKTPOOL_REGISTRY_FOREACH(poolid) { + if (PKTPOOL_REGISTRY_ISCLR(poolid)) { + PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */ + return (int)poolid; /* return pool ID */ + } + } /* FOREACH */ + + return PKTPOOL_INVALID_ID; /* error: registry is full */ +} + +static int /* Deregister a pktpool, given the pool pointer; tag slot as free */ +pktpool_deregister(pktpool_t * poolptr) +{ + uint32 poolid; + + ASSERT(POOLPTR(poolptr) != POOLPTR(NULL)); + + poolid = POOLID(poolptr); + ASSERT(poolid <= pktpools_max); + + /* Asertain that a previously registered poolptr is being de-registered */ + if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) { + PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */ + } else { + ASSERT(0); + return BCME_ERROR; /* mismatch in registry */ + } + + return BCME_OK; +} + + +/* + * pktpool_init: + * User provides a pktpool_t sturcture and specifies the number of packets to + * be pre-filled into the pool (pplen). The size of all packets in a pool must + * be the same and is specified by plen. + * pktpool_init first attempts to register the pool and fetch a unique poolid. + * If registration fails, it is considered an BCME_ERR, caused by either the + * registry was not pre-created (pktpool_attach) or the registry is full. + * If registration succeeds, then the requested number of packets will be filled + * into the pool as part of initialization. In the event that there is no + * available memory to service the request, then BCME_NOMEM will be returned + * along with the count of how many packets were successfully allocated. + * In dongle builds, prior to memory reclaimation, one should limit the number + * of packets to be allocated during pktpool_init and fill the pool up after + * reclaim stage. + */ +int +pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type) +{ + int i, err = BCME_OK; + int pktplen; + uint8 pktp_id; + + ASSERT(pktp != NULL); + ASSERT(osh != NULL); + ASSERT(pplen != NULL); + + pktplen = *pplen; + + bzero(pktp, sizeof(pktpool_t)); + + /* assign a unique pktpool id */ + if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) { + return BCME_ERROR; + } + POOLSETID(pktp, pktp_id); + + pktp->inited = TRUE; + pktp->istx = istx ? TRUE : FALSE; + pktp->plen = (uint16)plen; + pktp->type = type; + + if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) { + return BCME_ERROR; + } + + pktp->maxlen = PKTPOOL_LEN_MAX; + pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen); + + for (i = 0; i < pktplen; i++) { + void *p; + p = PKTGET(osh, plen, TRUE); + + if (p == NULL) { + /* Not able to allocate all requested pkts + * so just return what was actually allocated + * We can add to the pool later + */ + if (pktp->freelist == NULL) /* pktpool free list is empty */ + err = BCME_NOMEM; + + goto exit; + } + + PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */ + + PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */ + pktp->freelist = p; + + pktp->avail++; + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif + } + +exit: + pktp->len = pktp->avail; + + *pplen = pktp->len; + return err; +} + +/* + * pktpool_deinit: + * Prior to freeing a pktpool, all packets must be first freed into the pktpool. + * Upon pktpool_deinit, all packets in the free pool will be freed to the heap. + * An assert is in place to ensure that there are no packets still lingering + * around. Packets freed to a pool after the deinit will cause a memory + * corruption as the pktpool_t structure no longer exists. + */ +int +pktpool_deinit(osl_t *osh, pktpool_t *pktp) +{ + uint16 freed = 0; + + ASSERT(osh != NULL); + ASSERT(pktp != NULL); + +#ifdef BCMDBG_POOL + { + int i; + for (i = 0; i <= pktp->len; i++) { + pktp->dbg_q[i].p = NULL; + } + } +#endif + + while (pktp->freelist != NULL) { + void * p = pktp->freelist; + + pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */ + PKTSETFREELIST(p, NULL); + + PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */ + + PKTFREE(osh, p, pktp->istx); /* free the packet */ + + freed++; + ASSERT(freed <= pktp->len); + } + + pktp->avail -= freed; + ASSERT(pktp->avail == 0); + + pktp->len -= freed; + + pktpool_deregister(pktp); /* release previously acquired unique pool id */ + POOLSETID(pktp, PKTPOOL_INVALID_ID); + + if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->inited = FALSE; + + /* Are there still pending pkts? */ + ASSERT(pktp->len == 0); + + return 0; +} + +int +pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) +{ + void *p; + int err = 0; + int len, psize, maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->plen != 0); + + maxlen = pktp->maxlen; + psize = minimal ? (maxlen >> 2) : maxlen; + for (len = (int)pktp->len; len < psize; len++) { + + p = PKTGET(osh, pktp->len, TRUE); + + if (p == NULL) { + err = BCME_NOMEM; + break; + } + + if (pktpool_add(pktp, p) != BCME_OK) { + PKTFREE(osh, p, FALSE); + err = BCME_ERROR; + break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (pktp->cbcnt) { + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } + + return err; +} + +static void * +pktpool_deq(pktpool_t *pktp) +{ + void *p = NULL; + + if (pktp->avail == 0) + return NULL; + + ASSERT(pktp->freelist != NULL); + + p = pktp->freelist; /* dequeue packet from head of pktpool free list */ + pktp->freelist = PKTFREELIST(p); /* free list points to next packet */ + PKTSETFREELIST(p, NULL); + + pktp->avail--; + + return p; +} + +static void +pktpool_enq(pktpool_t *pktp, void *p) +{ + ASSERT(p != NULL); + + PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */ + pktp->freelist = p; /* free list points to newly inserted packet */ + + pktp->avail++; + ASSERT(pktp->avail <= pktp->len); +} + +/* utility for registering host addr fill function called from pciedev */ +int +/* BCMATTACHFN */ +(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + ASSERT(pktp->cbext.cb == NULL); + pktp->cbext.cb = cb; + pktp->cbext.arg = arg; + return 0; +} + +int +pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + if (pktp == NULL) + return BCME_ERROR; + ASSERT(pktp->rxcplidfn.cb == NULL); + pktp->rxcplidfn.cb = cb; + pktp->rxcplidfn.arg = arg; + return 0; +} +/* Callback functions for split rx modes */ +/* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */ +void +pktpool_invoke_dmarxfill(pktpool_t *pktp) +{ + ASSERT(pktp->dmarxfill.cb); + ASSERT(pktp->dmarxfill.arg); + + if (pktp->dmarxfill.cb) + pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg); +} +int +pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + + ASSERT(cb != NULL); + + pktp->dmarxfill.cb = cb; + pktp->dmarxfill.arg = arg; + + return 0; +} +/* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */ +int +pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + i = pktp->cbcnt; + if (i == PKTPOOL_CB_MAX_AVL) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->cbs[i].cb == NULL); + pktp->cbs[i].cb = cb; + pktp->cbs[i].arg = arg; + pktp->cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +int +pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb != NULL); + + i = pktp->ecbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->ecbs[i].cb == NULL); + pktp->ecbs[i].cb = cb; + pktp->ecbs[i].arg = arg; + pktp->ecbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +static int +pktpool_empty_notify(pktpool_t *pktp) +{ + int i; + + pktp->empty = TRUE; + for (i = 0; i < pktp->ecbcnt; i++) { + ASSERT(pktp->ecbs[i].cb != NULL); + pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg); + } + pktp->empty = FALSE; + + return 0; +} + +#ifdef BCMDBG_POOL +int +pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) +{ + int err = 0; + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(cb); + + i = pktp->dbg_cbcnt; + if (i == PKTPOOL_CB_MAX) { + err = BCME_ERROR; + goto done; + } + + ASSERT(pktp->dbg_cbs[i].cb == NULL); + pktp->dbg_cbs[i].cb = cb; + pktp->dbg_cbs[i].arg = arg; + pktp->dbg_cbcnt++; + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +int pktpool_dbg_notify(pktpool_t *pktp); + +int +pktpool_dbg_notify(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + for (i = 0; i < pktp->dbg_cbcnt; i++) { + ASSERT(pktp->dbg_cbs[i].cb); + pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_dbg_dump(pktpool_t *pktp) +{ + int i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p); + printf("%d, p: 0x%x dur:%lu us state:%d\n", i, + pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) +{ + int i; + int state; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + bzero(stats, sizeof(pktpool_stats_t)); + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + state = PKTPOOLSTATE(pktp->dbg_q[i].p); + switch (state) { + case POOL_TXENQ: + stats->enq++; break; + case POOL_TXDH: + stats->txdh++; break; + case POOL_TXD11: + stats->txd11++; break; + case POOL_RXDH: + stats->rxdh++; break; + case POOL_RXD11: + stats->rxd11++; break; + case POOL_RXFILL: + stats->rxfill++; break; + case POOL_IDLE: + stats->idle++; break; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_start_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + pktp->dbg_q[i].cycles = cycles; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int pktpool_stop_trigger(pktpool_t *pktp, void *p); +int +pktpool_stop_trigger(pktpool_t *pktp, void *p) +{ + uint32 cycles, i; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (!PKTPOOL(OSH_NULL, p)) + goto done; + + OSL_GETCYCLES(cycles); + + for (i = 0; i < pktp->dbg_qlen; i++) { + ASSERT(pktp->dbg_q[i].p != NULL); + + if (pktp->dbg_q[i].p == p) { + if (pktp->dbg_q[i].cycles == 0) + break; + + if (cycles >= pktp->dbg_q[i].cycles) + pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles; + else + pktp->dbg_q[i].dur = + (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1; + + pktp->dbg_q[i].cycles = 0; + break; + } + } + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} +#endif /* BCMDBG_POOL */ + +int +pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) +{ + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + pktp->availcb_excl = NULL; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return 0; +} + +int +pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) +{ + int i; + int err; + + ASSERT(pktp); + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(pktp->availcb_excl == NULL); + for (i = 0; i < pktp->cbcnt; i++) { + if (cb == pktp->cbs[i].cb) { + pktp->availcb_excl = &pktp->cbs[i]; + break; + } + } + + if (pktp->availcb_excl == NULL) + err = BCME_ERROR; + else + err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +static int +pktpool_avail_notify(pktpool_t *pktp) +{ + int i, k, idx; + int avail; + + ASSERT(pktp); + if (pktp->availcb_excl != NULL) { + pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); + return 0; + } + + k = pktp->cbcnt - 1; + for (i = 0; i < pktp->cbcnt; i++) { + avail = pktp->avail; + + if (avail) { + if (pktp->cbtoggle) + idx = i; + else + idx = k--; + + ASSERT(pktp->cbs[idx].cb != NULL); + pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg); + } + } + + /* Alternate between filling from head or tail + */ + pktp->cbtoggle ^= 1; + + return 0; +} + +void * +pktpool_get(pktpool_t *pktp) +{ + void *p; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + + p = pktpool_deq(pktp); + + if (p == NULL) { + /* Notify and try to reclaim tx pkts */ + if (pktp->ecbcnt) + pktpool_empty_notify(pktp); + + p = pktpool_deq(pktp); + if (p == NULL) + goto done; + } + + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktpool_free(pktpool_t *pktp, void *p) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + ASSERT(p != NULL); +#ifdef BCMDBG_POOL + /* pktpool_stop_trigger(pktp, p); */ +#endif + + pktpool_enq(pktp, p); + + /** + * Feed critical DMA with freshly freed packets, to avoid DMA starvation. + * If any avail callback functions are registered, send a notification + * that a new packet is available in the pool. + */ + if (pktp->cbcnt) { + /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * This allows to feed on burst basis as opposed to inefficient per-packet basis. + */ + if (pktp->emptycb_disable == EMPTYCB_ENABLED) { + /** + * If the call originated from pktpool_empty_notify, the just freed packet + * is needed in pktpool_get. + * Therefore don't call pktpool_avail_notify. + */ + if (pktp->empty == FALSE) + pktpool_avail_notify(pktp); + } else { + /** + * The callback is temporarily disabled, log that a packet has been freed. + */ + pktp->emptycb_disable = EMPTYCB_SKIPPED; + } + } + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return; +} + +int +pktpool_add(pktpool_t *pktp, void *p) +{ + int err = 0; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + ASSERT(p != NULL); + + if (pktp->len == pktp->maxlen) { + err = BCME_RANGE; + goto done; + } + + /* pkts in pool have same length */ + ASSERT(pktp->plen == PKTLEN(OSH_NULL, p)); + PKTSETPOOL(OSH_NULL, p, TRUE, pktp); + + pktp->len++; + pktpool_enq(pktp, p); + +#ifdef BCMDBG_POOL + pktp->dbg_q[pktp->dbg_qlen++].p = p; +#endif + +done: + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return err; +} + +/* Force pktpool_setmaxlen () into RAM as it uses a constant + * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips. + */ +int +BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen) +{ + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + if (maxlen > PKTPOOL_LEN_MAX) + maxlen = PKTPOOL_LEN_MAX; + + /* if pool is already beyond maxlen, then just cap it + * since we currently do not reduce the pool len + * already allocated + */ + pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen; + + /* protect shared resource */ + if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) + return BCME_ERROR; + + return pktp->maxlen; +} + +void +pktpool_emptycb_disable(pktpool_t *pktp, bool disable) +{ + ASSERT(pktp); + + /** + * To more efficiently use the cpu cycles, callbacks can be temporarily disabled. + * If callback is going to be re-enabled, check if any packet got + * freed and added back to the pool while callback was disabled. + * When this is the case do the callback now, provided that callback functions + * are registered and this call did not originate from pktpool_empty_notify. + */ + if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) && + (pktp->emptycb_disable == EMPTYCB_SKIPPED)) { + pktpool_avail_notify(pktp); + } + + /* Enable or temporarily disable callback when packet becomes available. */ + pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED; +} + +bool +pktpool_emptycb_disabled(pktpool_t *pktp) +{ + ASSERT(pktp); + return pktp->emptycb_disable != EMPTYCB_ENABLED; +} + +#ifdef BCMPKTPOOL +#include + +pktpool_t *pktpool_shared = NULL; + +#ifdef BCMFRAGPOOL +pktpool_t *pktpool_shared_lfrag = NULL; +#endif /* BCMFRAGPOOL */ + +pktpool_t *pktpool_shared_rxlfrag = NULL; + +static osl_t *pktpool_osh = NULL; + +void +hnd_pktpool_init(osl_t *osh) +{ + int n; + + /* Construct a packet pool registry before initializing packet pools */ + n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID); + if (n != PKTPOOL_MAXIMUM_ID) { + ASSERT(0); + return; + } + + pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared == NULL) { + ASSERT(0); + goto error1; + } + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_lfrag == NULL) { + ASSERT(0); + goto error2; + } +#endif + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t)); + if (pktpool_shared_rxlfrag == NULL) { + ASSERT(0); + goto error3; + } +#endif + + + /* + * At this early stage, there's not enough memory to allocate all + * requested pkts in the shared pool. Need to add to the pool + * after reclaim + * + * n = NRXBUFPOST + SDPCMD_RXBUFS; + * + * Initialization of packet pools may fail (BCME_ERROR), if the packet pool + * registry is not initialized or the registry is depleted. + * + * A BCME_NOMEM error only indicates that the requested number of packets + * were not filled into the pool. + */ + n = 1; + if (pktpool_init(osh, pktpool_shared, + &n, PKTBUFSZ, FALSE, lbuf_basic) == BCME_ERROR) { + ASSERT(0); + goto error4; + } + pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN); + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + n = 1; + if (pktpool_init(osh, pktpool_shared_lfrag, + &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) { + ASSERT(0); + goto error5; + } + pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN); +#endif +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + n = 1; + if (pktpool_init(osh, pktpool_shared_rxlfrag, + &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag) == BCME_ERROR) { + ASSERT(0); + goto error6; + } + pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN); +#endif + + pktpool_osh = osh; + + return; + +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) +error6: +#endif + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + pktpool_deinit(osh, pktpool_shared_lfrag); +error5: +#endif + +#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \ + (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)) + pktpool_deinit(osh, pktpool_shared); +#endif + +error4: +#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_rxlfrag); + pktpool_shared_rxlfrag = (pktpool_t *)NULL; +error3: +#endif /* BCMRXFRAGPOOL */ + +#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) + hnd_free(pktpool_shared_lfrag); + pktpool_shared_lfrag = (pktpool_t *)NULL; +error2: +#endif /* BCMFRAGPOOL */ + + hnd_free(pktpool_shared); + pktpool_shared = (pktpool_t *)NULL; + +error1: + pktpool_dettach(osh); +} + +void +hnd_pktpool_fill(pktpool_t *pktpool, bool minimal) +{ + pktpool_fill(pktpool_osh, pktpool, minimal); +} + +/* refill pktpools after reclaim */ +void +hnd_pktpool_refill(bool minimal) +{ + if (POOL_ENAB(pktpool_shared)) { + pktpool_fill(pktpool_osh, pktpool_shared, minimal); + } +/* fragpool reclaim */ +#ifdef BCMFRAGPOOL + if (POOL_ENAB(pktpool_shared_lfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal); + } +#endif /* BCMFRAGPOOL */ +/* rx fragpool reclaim */ +#ifdef BCMRXFRAGPOOL + if (POOL_ENAB(pktpool_shared_rxlfrag)) { + pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal); + } +#endif +} +#endif /* BCMPKTPOOL */ diff --git a/drivers/net/wireless/bcmdhd/hnd_pktq.c b/drivers/net/wireless/bcmdhd/hnd_pktq.c new file mode 100644 index 000000000000..4d1a7804d092 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hnd_pktq.c @@ -0,0 +1,888 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktq.c 605726 2015-12-11 07:08:16Z $ + */ + +#include +#include +#include +#include +#include + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex) +#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex) +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec) +#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex) +#else +#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS +#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS +#endif + +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +void * BCMFASTPATH +pktq_penq(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, p); + else + q->head = p; + + q->tail = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_penq_head(struct pktq *pq, int prec, void *p) +{ + struct pktq_prec *q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */ + ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p))); + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head == NULL) + q->tail = p; + + PKTSETLINK(p, q->head); + q->head = p; + q->len++; + + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +/* + * Append spktq 'list' to the tail of pktq 'pq' + */ +void BCMFASTPATH +pktq_append(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q[0]; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + if (q->head) + PKTSETLINK(q->tail, list_q->head); + else + q->head = list_q->head; + + q->tail = list_q->tail; + q->len += list_q->len; + pq->len += list_q->len; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + list_q->head = NULL; + list_q->tail = NULL; + list_q->len = 0; + list->len = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* + * Prepend spktq 'list' to the head of pktq 'pq' + */ +void BCMFASTPATH +pktq_prepend(struct pktq *pq, int prec, struct spktq *list) +{ + struct pktq_prec *q; + struct pktq_prec *list_q; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + list_q = &list->q[0]; + + /* empty list check */ + if (list_q->head == NULL) + goto done; + + ASSERT(prec >= 0 && prec < pq->num_prec); + ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */ + + ASSERT(!pktq_full(pq)); + ASSERT(!pktq_pfull(pq, prec)); + + q = &pq->q[prec]; + + /* set the tail packet of list to point at the former pq head */ + PKTSETLINK(list_q->tail, q->head); + /* the new q head is the head of list */ + q->head = list_q->head; + + /* If the q tail was non-null, then it stays as is. + * If the q tail was null, it is now the tail of list + */ + if (q->tail == NULL) { + q->tail = list_q->tail; + } + + q->len += list_q->len; + pq->len += list_q->len; + + if (pq->hi_prec < prec) + pq->hi_prec = (uint8)prec; + + list_q->head = NULL; + list_q->tail = NULL; + list_q->len = 0; + list->len = 0; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * BCMFASTPATH +pktq_pdeq(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p) +{ + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if (prev_p == NULL) + goto done; + + if ((p = PKTLINK(prev_p)) == NULL) + goto done; + + q->len--; + + pq->len--; + + PKTSETLINK(prev_p, PKTLINK(p)); + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + p = q->head; + + while (p) { + if (fn == NULL || (*fn)(p, arg)) { + break; + } else { + prev = p; + p = PKTLINK(p); + } + } + if (p == NULL) + goto done; + + if (prev == NULL) { + if ((q->head = PKTLINK(p)) == NULL) { + q->tail = NULL; + } + } else { + PKTSETLINK(prev, PKTLINK(p)); + if (q->tail == p) { + q->tail = prev; + } + } + + q->len--; + + pq->len--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_pdeq_tail(struct pktq *pq, int prec) +{ + struct pktq_prec *q; + void *p, *prev; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) +{ + struct pktq_prec *q; + void *p, *next, *prev = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + q = &pq->q[prec]; + p = q->head; + while (p) { + next = PKTLINK(p); + if (fn == NULL || (*fn)(p, arg)) { + bool head = (p == q->head); + if (head) + q->head = next; + else + PKTSETLINK(prev, next); + PKTSETLINK(p, NULL); + PKTFREE(osh, p, dir); + q->len--; + pq->len--; + } else { + prev = p; + } + p = next; + } + + q->tail = prev; + + if (q->head == NULL) { + ASSERT(q->len == 0); + ASSERT(q->tail == NULL); + } + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +bool BCMFASTPATH +pktq_pdel(struct pktq *pq, void *pktbuf, int prec) +{ + bool ret = FALSE; + struct pktq_prec *q; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* Should this just assert pktbuf? */ + if (!pktbuf) + goto done; + + q = &pq->q[prec]; + + if (q->head == pktbuf) { + if ((q->head = PKTLINK(pktbuf)) == NULL) + q->tail = NULL; + } else { + for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) + ; + if (p == NULL) + goto done; + + PKTSETLINK(p, PKTLINK(pktbuf)); + if (q->tail == pktbuf) + q->tail = p; + } + + q->len--; + pq->len--; + PKTSETLINK(pktbuf, NULL); + ret = TRUE; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +bool +pktq_init(struct pktq *pq, int num_prec, int max_len) +{ + int prec; + + if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); + + /* pq is variable size; only zero out what's requested */ + bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + pq->num_prec = (uint16)num_prec; + + pq->max = (uint16)max_len; + + for (prec = 0; prec < num_prec; prec++) + pq->q[prec].max = pq->max; + + return TRUE; +} + +bool +pktq_deinit(struct pktq *pq) +{ + if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return TRUE; +} + +void +pktq_set_max_plen(struct pktq *pq, int prec, int max_len) +{ + ASSERT(prec >= 0 && prec < pq->num_prec); + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + if (prec < pq->num_prec) + pq->q[prec].max = (uint16)max_len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +void * BCMFASTPATH +pktq_deq(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * BCMFASTPATH +pktq_deq_tail(struct pktq *pq, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL, *prev; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + for (prev = NULL; p != q->tail; p = PKTLINK(p)) + prev = p; + + if (prev) + PKTSETLINK(prev, NULL); + else + q->head = NULL; + + q->tail = prev; + q->len--; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].head; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void * +pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + void *p = NULL; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (pq->q[prec].head) + break; + + if (prec_out) + *prec_out = prec; + + p = pq->q[prec].tail; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +void +pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) +{ + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return; + + /* Optimize flush, if pktq len = 0, just return. + * pktq len of 0 means pktq's prec q's are all empty. + */ + if (pq->len == 0) + goto done; + + for (prec = 0; prec < pq->num_prec; prec++) + pktq_pflush(osh, pq, prec, dir, fn, arg); + if (fn == NULL) + ASSERT(pq->len == 0); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return; +} + +/* Return sum of lengths of a specific set of precedences */ +int +pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return len; +} + +/* Priority peek from a specific set of precedences */ +void * BCMFASTPATH +pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if (prec_out) + *prec_out = prec; + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} +/* Priority dequeue from a specific set of precedences */ +void * BCMFASTPATH +pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) +{ + struct pktq_prec *q; + void *p = NULL; + int prec; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return NULL; + + if (pq->len == 0) + goto done; + + while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) + pq->hi_prec--; + + while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0)) + if (prec-- == 0) + goto done; + + q = &pq->q[prec]; + + if ((p = q->head) == NULL) + goto done; + + if ((q->head = PKTLINK(p)) == NULL) + q->tail = NULL; + + q->len--; + + if (prec_out) + *prec_out = prec; + + pq->len--; + + PKTSETLINK(p, NULL); + +done: + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return NULL; + + return p; +} + +#ifdef HND_PKTQ_THREAD_SAFE +int +pktq_pavail(struct pktq *pq, int prec) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].max - pq->q[prec].len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_pfull(struct pktq *pq, int prec) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ASSERT(prec >= 0 && prec < pq->num_prec); + + ret = pq->q[prec].len >= pq->q[prec].max; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} + +int +pktq_avail(struct pktq *pq) +{ + int ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return 0; + + ret = pq->max - pq->len; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return 0; + + return ret; +} + +bool +pktq_full(struct pktq *pq) +{ + bool ret; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) + return FALSE; + + ret = pq->len >= pq->max; + + /* protect shared resource */ + if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS) + return FALSE; + + return ret; +} +#endif /* HND_PKTQ_THREAD_SAFE */ diff --git a/drivers/net/wireless/bcmdhd/hndpmu.c b/drivers/net/wireless/bcmdhd/hndpmu.c new file mode 100644 index 000000000000..c0c658203dda --- /dev/null +++ b/drivers/net/wireless/bcmdhd/hndpmu.c @@ -0,0 +1,292 @@ +/* + * Misc utility routines for accessing PMU corerev specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.c 530092 2015-01-29 04:44:58Z $ + */ + + +/* + * Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs. + * However, in the context of this file the baseband ('BB') PLL/FLL is referred to. + * + * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used. + * They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012) + * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports + * fractional frequency generation. pmu2_ does not support fractional frequency generation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PMU_ERROR(args) + +#define PMU_MSG(args) + +/* To check in verbose debugging messages not intended + * to be on except on private builds. + */ +#define PMU_NONE(args) + +/** contains resource bit positions for a specific chip */ +struct rsc_per_chip_s { + uint8 ht_avail; + uint8 macphy_clkavail; + uint8 ht_start; + uint8 otp_pu; +}; + +typedef struct rsc_per_chip_s rsc_per_chip_t; + + +/* SDIO Pad drive strength to select value mappings. + * The last strength value in each table must be 0 (the tri-state value). + */ +typedef struct { + uint8 strength; /* Pad Drive Strength in mA */ + uint8 sel; /* Chip-specific select value */ +} sdiod_drive_str_t; + +/* SDIO Drive Strength to sel value table for PMU Rev 1 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = { + {4, 0x2}, + {2, 0x3}, + {1, 0x0}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ +static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = { + {12, 0x7}, + {10, 0x6}, + {8, 0x5}, + {6, 0x4}, + {4, 0x2}, + {2, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = { + {32, 0x7}, + {26, 0x6}, + {22, 0x5}, + {16, 0x4}, + {12, 0x3}, + {8, 0x2}, + {4, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = { + {32, 0x6}, + {26, 0x7}, + {22, 0x4}, + {16, 0x5}, + {12, 0x2}, + {8, 0x3}, + {4, 0x0}, + {0, 0x1} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */ + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */ + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = { + {6, 0x7}, + {5, 0x6}, + {4, 0x5}, + {3, 0x4}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */ + +/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */ +static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = { + {3, 0x3}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} }; + + +/** + * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel + * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture + * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has + * been written '1'. + */ +#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33 + +static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = { + /* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */ + {16, 0x7}, + {12, 0x5}, + {8, 0x3}, + {4, 0x1} }; /* note: 43143 does not support tristate */ + +#else + +static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = { + /* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */ + {8, 0x7}, + {6, 0x5}, + {4, 0x3}, + {2, 0x1} }; /* note: 43143 does not support tristate */ + +#endif /* BCM_SDIO_VDDIO */ + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu)) + +/** + * Balance between stable SDIO operation and power consumption is achieved using this function. + * Note that each drive strength table is for a specific VDDIO of the SDIO pads, ideally this + * function should read the VDDIO itself to select the correct table. For now it has been solved + * with the 'BCM_SDIO_VDDIO' preprocessor constant. + * + * 'drivestrength': desired pad drive strength in mA. Drive strength of 0 requests tri-state (if + * hardware supports this), if no hw support drive strength is not programmed. + */ +void +si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength) +{ + sdiod_drive_str_t *str_tab = NULL; + uint32 str_mask = 0; /* only alter desired bits in PMU chipcontrol 1 register */ + uint32 str_shift = 0; + uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */ + uint32 str_ovr_pmuval = 0; /* position of bit within this register */ + pmuregs_t *pmu; + uint origidx; + + if (!(sih->cccaps & CC_CAP_PMU)) { + return; + } + + /* Remember original core before switch to chipc/pmu */ + origidx = si_coreidx(sih); + if (AOB_ENAB(sih)) { + pmu = si_setcore(sih, PMU_CORE_ID, 0); + } else { + pmu = si_setcoreidx(sih, SI_CC_IDX); + } + ASSERT(pmu != NULL); + + switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), sih->pmurev)) { + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1; + str_mask = 0x30000000; + str_shift = 28; + break; + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2): + case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3): + case SDIOD_DRVSTR_KEY(BCM4315_CHIP_ID, 4): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab2; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8): + case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11): + if (sih->pmurev == 8) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3; + } + else if (sih->pmurev == 11) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; + } + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8; + str_mask = 0x00003800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17): + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8; + str_mask = 0x00001800; + str_shift = 11; + break; + case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17): +#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33 + if (drivestrength >= ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3; + } +#else + if (drivestrength >= ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) { + str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8; + } +#endif /* BCM_SDIO_VDDIO */ + str_mask = 0x00000007; + str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR; + break; + default: + PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + bcm_chipname( + CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), sih->pmurev)); + break; + } + + if (str_tab != NULL) { + uint32 cc_data_temp; + int i; + + /* Pick the lowest available drive strength equal or greater than the + * requested strength. Drive strength of 0 requests tri-state. + */ + for (i = 0; drivestrength < str_tab[i].strength; i++) + ; + + if (i > 0 && drivestrength > str_tab[i].strength) + i--; + + W_REG(osh, &pmu->chipcontrol_addr, PMU_CHIPCTL1); + cc_data_temp = R_REG(osh, &pmu->chipcontrol_data); + cc_data_temp &= ~str_mask; + cc_data_temp |= str_tab[i].sel << str_shift; + W_REG(osh, &pmu->chipcontrol_data, cc_data_temp); + if (str_ovr_pmuval) { /* enables the selected drive strength */ + W_REG(osh, &pmu->chipcontrol_addr, str_ovr_pmuctl); + OR_REG(osh, &pmu->chipcontrol_data, str_ovr_pmuval); + } + PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n", + drivestrength, str_tab[i].strength)); + } + + /* Return to original core */ + si_setcoreidx(sih, origidx); +} /* si_sdiod_drive_strength_init */ diff --git a/drivers/net/wireless/bcmdhd/include/aidmp.h b/drivers/net/wireless/bcmdhd/include/aidmp.h new file mode 100644 index 000000000000..6654364b9103 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/aidmp.h @@ -0,0 +1,402 @@ +/* + * Broadcom AMBA Interconnect definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: aidmp.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _AIDMP_H +#define _AIDMP_H + +/* Manufacturer Ids */ +#define MFGID_ARM 0x43b +#define MFGID_BRCM 0x4bf +#define MFGID_MIPS 0x4a7 + +/* Component Classes */ +#define CC_SIM 0 +#define CC_EROM 1 +#define CC_CORESIGHT 9 +#define CC_VERIF 0xb +#define CC_OPTIMO 0xd +#define CC_GEN 0xe +#define CC_PRIMECELL 0xf + +/* Enumeration ROM registers */ +#define ER_EROMENTRY 0x000 +#define ER_REMAPCONTROL 0xe00 +#define ER_REMAPSELECT 0xe04 +#define ER_MASTERSELECT 0xe10 +#define ER_ITCR 0xf00 +#define ER_ITIP 0xf04 + +/* Erom entries */ +#define ER_TAG 0xe +#define ER_TAG1 0x6 +#define ER_VALID 1 +#define ER_CI 0 +#define ER_MP 2 +#define ER_ADD 4 +#define ER_END 0xe +#define ER_BAD 0xffffffff +#define ER_SZ_MAX 4096 /* 4KB */ + +/* EROM CompIdentA */ +#define CIA_MFG_MASK 0xfff00000 +#define CIA_MFG_SHIFT 20 +#define CIA_CID_MASK 0x000fff00 +#define CIA_CID_SHIFT 8 +#define CIA_CCL_MASK 0x000000f0 +#define CIA_CCL_SHIFT 4 + +/* EROM CompIdentB */ +#define CIB_REV_MASK 0xff000000 +#define CIB_REV_SHIFT 24 +#define CIB_NSW_MASK 0x00f80000 +#define CIB_NSW_SHIFT 19 +#define CIB_NMW_MASK 0x0007c000 +#define CIB_NMW_SHIFT 14 +#define CIB_NSP_MASK 0x00003e00 +#define CIB_NSP_SHIFT 9 +#define CIB_NMP_MASK 0x000001f0 +#define CIB_NMP_SHIFT 4 + +/* EROM MasterPortDesc */ +#define MPD_MUI_MASK 0x0000ff00 +#define MPD_MUI_SHIFT 8 +#define MPD_MP_MASK 0x000000f0 +#define MPD_MP_SHIFT 4 + +/* EROM AddrDesc */ +#define AD_ADDR_MASK 0xfffff000 +#define AD_SP_MASK 0x00000f00 +#define AD_SP_SHIFT 8 +#define AD_ST_MASK 0x000000c0 +#define AD_ST_SHIFT 6 +#define AD_ST_SLAVE 0x00000000 +#define AD_ST_BRIDGE 0x00000040 +#define AD_ST_SWRAP 0x00000080 +#define AD_ST_MWRAP 0x000000c0 +#define AD_SZ_MASK 0x00000030 +#define AD_SZ_SHIFT 4 +#define AD_SZ_4K 0x00000000 +#define AD_SZ_8K 0x00000010 +#define AD_SZ_16K 0x00000020 +#define AD_SZ_SZD 0x00000030 +#define AD_AG32 0x00000008 +#define AD_ADDR_ALIGN 0x00000fff +#define AD_SZ_BASE 0x00001000 /* 4KB */ + +/* EROM SizeDesc */ +#define SD_SZ_MASK 0xfffff000 +#define SD_SG32 0x00000008 +#define SD_SZ_ALIGN 0x00000fff + + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _aidmp { + uint32 oobselina30; /* 0x000 */ + uint32 oobselina74; /* 0x004 */ + uint32 PAD[6]; + uint32 oobselinb30; /* 0x020 */ + uint32 oobselinb74; /* 0x024 */ + uint32 PAD[6]; + uint32 oobselinc30; /* 0x040 */ + uint32 oobselinc74; /* 0x044 */ + uint32 PAD[6]; + uint32 oobselind30; /* 0x060 */ + uint32 oobselind74; /* 0x064 */ + uint32 PAD[38]; + uint32 oobselouta30; /* 0x100 */ + uint32 oobselouta74; /* 0x104 */ + uint32 PAD[6]; + uint32 oobseloutb30; /* 0x120 */ + uint32 oobseloutb74; /* 0x124 */ + uint32 PAD[6]; + uint32 oobseloutc30; /* 0x140 */ + uint32 oobseloutc74; /* 0x144 */ + uint32 PAD[6]; + uint32 oobseloutd30; /* 0x160 */ + uint32 oobseloutd74; /* 0x164 */ + uint32 PAD[38]; + uint32 oobsynca; /* 0x200 */ + uint32 oobseloutaen; /* 0x204 */ + uint32 PAD[6]; + uint32 oobsyncb; /* 0x220 */ + uint32 oobseloutben; /* 0x224 */ + uint32 PAD[6]; + uint32 oobsyncc; /* 0x240 */ + uint32 oobseloutcen; /* 0x244 */ + uint32 PAD[6]; + uint32 oobsyncd; /* 0x260 */ + uint32 oobseloutden; /* 0x264 */ + uint32 PAD[38]; + uint32 oobaextwidth; /* 0x300 */ + uint32 oobainwidth; /* 0x304 */ + uint32 oobaoutwidth; /* 0x308 */ + uint32 PAD[5]; + uint32 oobbextwidth; /* 0x320 */ + uint32 oobbinwidth; /* 0x324 */ + uint32 oobboutwidth; /* 0x328 */ + uint32 PAD[5]; + uint32 oobcextwidth; /* 0x340 */ + uint32 oobcinwidth; /* 0x344 */ + uint32 oobcoutwidth; /* 0x348 */ + uint32 PAD[5]; + uint32 oobdextwidth; /* 0x360 */ + uint32 oobdinwidth; /* 0x364 */ + uint32 oobdoutwidth; /* 0x368 */ + uint32 PAD[37]; + uint32 ioctrlset; /* 0x400 */ + uint32 ioctrlclear; /* 0x404 */ + uint32 ioctrl; /* 0x408 */ + uint32 PAD[61]; + uint32 iostatus; /* 0x500 */ + uint32 PAD[127]; + uint32 ioctrlwidth; /* 0x700 */ + uint32 iostatuswidth; /* 0x704 */ + uint32 PAD[62]; + uint32 resetctrl; /* 0x800 */ + uint32 resetstatus; /* 0x804 */ + uint32 resetreadid; /* 0x808 */ + uint32 resetwriteid; /* 0x80c */ + uint32 PAD[60]; + uint32 errlogctrl; /* 0x900 */ + uint32 errlogdone; /* 0x904 */ + uint32 errlogstatus; /* 0x908 */ + uint32 errlogaddrlo; /* 0x90c */ + uint32 errlogaddrhi; /* 0x910 */ + uint32 errlogid; /* 0x914 */ + uint32 errloguser; /* 0x918 */ + uint32 errlogflags; /* 0x91c */ + uint32 PAD[56]; + uint32 intstatus; /* 0xa00 */ + uint32 PAD[255]; + uint32 config; /* 0xe00 */ + uint32 PAD[63]; + uint32 itcr; /* 0xf00 */ + uint32 PAD[3]; + uint32 itipooba; /* 0xf10 */ + uint32 itipoobb; /* 0xf14 */ + uint32 itipoobc; /* 0xf18 */ + uint32 itipoobd; /* 0xf1c */ + uint32 PAD[4]; + uint32 itipoobaout; /* 0xf30 */ + uint32 itipoobbout; /* 0xf34 */ + uint32 itipoobcout; /* 0xf38 */ + uint32 itipoobdout; /* 0xf3c */ + uint32 PAD[4]; + uint32 itopooba; /* 0xf50 */ + uint32 itopoobb; /* 0xf54 */ + uint32 itopoobc; /* 0xf58 */ + uint32 itopoobd; /* 0xf5c */ + uint32 PAD[4]; + uint32 itopoobain; /* 0xf70 */ + uint32 itopoobbin; /* 0xf74 */ + uint32 itopoobcin; /* 0xf78 */ + uint32 itopoobdin; /* 0xf7c */ + uint32 PAD[4]; + uint32 itopreset; /* 0xf90 */ + uint32 PAD[15]; + uint32 peripherialid4; /* 0xfd0 */ + uint32 peripherialid5; /* 0xfd4 */ + uint32 peripherialid6; /* 0xfd8 */ + uint32 peripherialid7; /* 0xfdc */ + uint32 peripherialid0; /* 0xfe0 */ + uint32 peripherialid1; /* 0xfe4 */ + uint32 peripherialid2; /* 0xfe8 */ + uint32 peripherialid3; /* 0xfec */ + uint32 componentid0; /* 0xff0 */ + uint32 componentid1; /* 0xff4 */ + uint32 componentid2; /* 0xff8 */ + uint32 componentid3; /* 0xffc */ +} aidmp_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* Out-of-band Router registers */ +#define OOB_BUSCONFIG 0x020 +#define OOB_STATUSA 0x100 +#define OOB_STATUSB 0x104 +#define OOB_STATUSC 0x108 +#define OOB_STATUSD 0x10c +#define OOB_ENABLEA0 0x200 +#define OOB_ENABLEA1 0x204 +#define OOB_ENABLEA2 0x208 +#define OOB_ENABLEA3 0x20c +#define OOB_ENABLEB0 0x280 +#define OOB_ENABLEB1 0x284 +#define OOB_ENABLEB2 0x288 +#define OOB_ENABLEB3 0x28c +#define OOB_ENABLEC0 0x300 +#define OOB_ENABLEC1 0x304 +#define OOB_ENABLEC2 0x308 +#define OOB_ENABLEC3 0x30c +#define OOB_ENABLED0 0x380 +#define OOB_ENABLED1 0x384 +#define OOB_ENABLED2 0x388 +#define OOB_ENABLED3 0x38c +#define OOB_ITCR 0xf00 +#define OOB_ITIPOOBA 0xf10 +#define OOB_ITIPOOBB 0xf14 +#define OOB_ITIPOOBC 0xf18 +#define OOB_ITIPOOBD 0xf1c +#define OOB_ITOPOOBA 0xf30 +#define OOB_ITOPOOBB 0xf34 +#define OOB_ITOPOOBC 0xf38 +#define OOB_ITOPOOBD 0xf3c + +/* DMP wrapper registers */ +#define AI_OOBSELINA30 0x000 +#define AI_OOBSELINA74 0x004 +#define AI_OOBSELINB30 0x020 +#define AI_OOBSELINB74 0x024 +#define AI_OOBSELINC30 0x040 +#define AI_OOBSELINC74 0x044 +#define AI_OOBSELIND30 0x060 +#define AI_OOBSELIND74 0x064 +#define AI_OOBSELOUTA30 0x100 +#define AI_OOBSELOUTA74 0x104 +#define AI_OOBSELOUTB30 0x120 +#define AI_OOBSELOUTB74 0x124 +#define AI_OOBSELOUTC30 0x140 +#define AI_OOBSELOUTC74 0x144 +#define AI_OOBSELOUTD30 0x160 +#define AI_OOBSELOUTD74 0x164 +#define AI_OOBSYNCA 0x200 +#define AI_OOBSELOUTAEN 0x204 +#define AI_OOBSYNCB 0x220 +#define AI_OOBSELOUTBEN 0x224 +#define AI_OOBSYNCC 0x240 +#define AI_OOBSELOUTCEN 0x244 +#define AI_OOBSYNCD 0x260 +#define AI_OOBSELOUTDEN 0x264 +#define AI_OOBAEXTWIDTH 0x300 +#define AI_OOBAINWIDTH 0x304 +#define AI_OOBAOUTWIDTH 0x308 +#define AI_OOBBEXTWIDTH 0x320 +#define AI_OOBBINWIDTH 0x324 +#define AI_OOBBOUTWIDTH 0x328 +#define AI_OOBCEXTWIDTH 0x340 +#define AI_OOBCINWIDTH 0x344 +#define AI_OOBCOUTWIDTH 0x348 +#define AI_OOBDEXTWIDTH 0x360 +#define AI_OOBDINWIDTH 0x364 +#define AI_OOBDOUTWIDTH 0x368 + + +#define AI_IOCTRLSET 0x400 +#define AI_IOCTRLCLEAR 0x404 +#define AI_IOCTRL 0x408 +#define AI_IOSTATUS 0x500 +#define AI_RESETCTRL 0x800 +#define AI_RESETSTATUS 0x804 + +#define AI_IOCTRLWIDTH 0x700 +#define AI_IOSTATUSWIDTH 0x704 + +#define AI_RESETREADID 0x808 +#define AI_RESETWRITEID 0x80c +#define AI_ERRLOGCTRL 0x900 +#define AI_ERRLOGDONE 0x904 +#define AI_ERRLOGSTATUS 0x908 +#define AI_ERRLOGADDRLO 0x90c +#define AI_ERRLOGADDRHI 0x910 +#define AI_ERRLOGID 0x914 +#define AI_ERRLOGUSER 0x918 +#define AI_ERRLOGFLAGS 0x91c +#define AI_INTSTATUS 0xa00 +#define AI_CONFIG 0xe00 +#define AI_ITCR 0xf00 +#define AI_ITIPOOBA 0xf10 +#define AI_ITIPOOBB 0xf14 +#define AI_ITIPOOBC 0xf18 +#define AI_ITIPOOBD 0xf1c +#define AI_ITIPOOBAOUT 0xf30 +#define AI_ITIPOOBBOUT 0xf34 +#define AI_ITIPOOBCOUT 0xf38 +#define AI_ITIPOOBDOUT 0xf3c +#define AI_ITOPOOBA 0xf50 +#define AI_ITOPOOBB 0xf54 +#define AI_ITOPOOBC 0xf58 +#define AI_ITOPOOBD 0xf5c +#define AI_ITOPOOBAIN 0xf70 +#define AI_ITOPOOBBIN 0xf74 +#define AI_ITOPOOBCIN 0xf78 +#define AI_ITOPOOBDIN 0xf7c +#define AI_ITOPRESET 0xf90 +#define AI_PERIPHERIALID4 0xfd0 +#define AI_PERIPHERIALID5 0xfd4 +#define AI_PERIPHERIALID6 0xfd8 +#define AI_PERIPHERIALID7 0xfdc +#define AI_PERIPHERIALID0 0xfe0 +#define AI_PERIPHERIALID1 0xfe4 +#define AI_PERIPHERIALID2 0xfe8 +#define AI_PERIPHERIALID3 0xfec +#define AI_COMPONENTID0 0xff0 +#define AI_COMPONENTID1 0xff4 +#define AI_COMPONENTID2 0xff8 +#define AI_COMPONENTID3 0xffc + +/* resetctrl */ +#define AIRC_RESET 1 + +/* errlogctrl */ +#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */ +#define AIELC_TO_EXP_SHIFT 4 +#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */ + +/* errlogdone */ +#define AIELD_ERRDONE_MASK 0x3 + +/* errlogstatus */ +#define AIELS_TIMEOUT_MASK 0x3 + +/* config */ +#define AICFG_OOB 0x00000020 +#define AICFG_IOS 0x00000010 +#define AICFG_IOC 0x00000008 +#define AICFG_TO 0x00000004 +#define AICFG_ERRL 0x00000002 +#define AICFG_RST 0x00000001 + +/* bit defines for AI_OOBSELOUTB74 reg */ +#define OOB_SEL_OUTEN_B_5 15 +#define OOB_SEL_OUTEN_B_6 23 + +/* AI_OOBSEL for A/B/C/D, 0-7 */ +#define AI_OOBSEL_MASK 0x1F +#define AI_OOBSEL_0_SHIFT 0 +#define AI_OOBSEL_1_SHIFT 8 +#define AI_OOBSEL_2_SHIFT 16 +#define AI_OOBSEL_3_SHIFT 24 +#define AI_OOBSEL_4_SHIFT 0 +#define AI_OOBSEL_5_SHIFT 8 +#define AI_OOBSEL_6_SHIFT 16 +#define AI_OOBSEL_7_SHIFT 24 +#define AI_IOCTRL_ENABLE_D11_PME (1 << 14) + +#endif /* _AIDMP_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_cfg.h b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h new file mode 100644 index 000000000000..e71f5c82da6c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcm_cfg.h @@ -0,0 +1,32 @@ +/* + * BCM common config options + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_cfg.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcm_cfg_h_ +#define _bcm_cfg_h_ +#endif /* _bcm_cfg_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h new file mode 100644 index 000000000000..79ae0f5d4a9c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcm_mpool_pub.h @@ -0,0 +1,364 @@ +/* + * Memory pools library, Public interface + * + * API Overview + * + * This package provides a memory allocation subsystem based on pools of + * homogenous objects. + * + * Instrumentation is available for reporting memory utilization both + * on a per-data-structure basis and system wide. + * + * There are two main types defined in this API. + * + * pool manager: A singleton object that acts as a factory for + * pool allocators. It also is used for global + * instrumentation, such as reporting all blocks + * in use across all data structures. The pool manager + * creates and provides individual memory pools + * upon request to application code. + * + * memory pool: An object for allocating homogenous memory blocks. + * + * Global identifiers in this module use the following prefixes: + * bcm_mpm_* Memory pool manager + * bcm_mp_* Memory pool + * + * There are two main types of memory pools: + * + * prealloc: The contiguous memory block of objects can either be supplied + * by the client or malloc'ed by the memory manager. The objects are + * allocated out of a block of memory and freed back to the block. + * + * heap: The memory pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing statistics + * and instrumentation on top of the heap, without modifying the heap + * allocation implementation. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcm_mpool_pub.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _BCM_MPOOL_PUB_H +#define _BCM_MPOOL_PUB_H 1 + +#include /* needed for uint16 */ + + +/* +************************************************************************** +* +* Type definitions, handles +* +************************************************************************** +*/ + +/* Forward declaration of OSL handle. */ +struct osl_info; + +/* Forward declaration of string buffer. */ +struct bcmstrbuf; + +/* + * Opaque type definition for the pool manager handle. This object is used for global + * memory pool operations such as obtaining a new pool, deleting a pool, iterating and + * instrumentation/debugging. + */ +struct bcm_mpm_mgr; +typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h; + +/* + * Opaque type definition for an instance of a pool. This handle is used for allocating + * and freeing memory through the pool, as well as management/instrumentation on this + * specific pool. + */ +struct bcm_mp_pool; +typedef struct bcm_mp_pool *bcm_mp_pool_h; + + +/* + * To make instrumentation more readable, every memory + * pool must have a readable name. Pool names are up to + * 8 bytes including '\0' termination. (7 printable characters.) + */ +#define BCM_MP_NAMELEN 8 + + +/* + * Type definition for pool statistics. + */ +typedef struct bcm_mp_stats { + char name[BCM_MP_NAMELEN]; /* Name of this pool. */ + unsigned int objsz; /* Object size allocated in this pool */ + uint16 nobj; /* Total number of objects in this pool */ + uint16 num_alloc; /* Number of objects currently allocated */ + uint16 high_water; /* Max number of allocated objects. */ + uint16 failed_alloc; /* Failed allocations. */ +} bcm_mp_stats_t; + + +/* +************************************************************************** +* +* API Routines on the pool manager. +* +************************************************************************** +*/ + +/* + * bcm_mpm_init() - initialize the whole memory pool system. + * + * Parameters: + * osh: INPUT Operating system handle. Needed for heap memory allocation. + * max_pools: INPUT Maximum number of mempools supported. + * mgr: OUTPUT The handle is written with the new pools manager object/handle. + * + * Returns: + * BCME_OK Object initialized successfully. May be used. + * BCME_NOMEM Initialization failed due to no memory. Object must not be used. + */ +int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp); + + +/* + * bcm_mpm_deinit() - de-initialize the whole memory pool system. + * + * Parameters: + * mgr: INPUT Pointer to pool manager handle. + * + * Returns: + * BCME_OK Memory pool manager successfully de-initialized. + * other Indicated error occured during de-initialization. + */ +int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp); + +/* + * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The + * pool uses a contiguous block of pre-alloced + * memory. The memory block may either be provided + * by the client or dynamically allocated by the + * pool manager. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * Must be >= sizeof(void *). + * nobj: INPUT Maximum number of concurrently existing objects to support + * memstart INPUT Pointer to the memory to use, or NULL to malloc() + * memsize INPUT Number of bytes referenced from memstart (for error checking). + * Must be 0 if 'memstart' is NULL. + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr, + unsigned int obj_sz, + int nobj, + void *memstart, + unsigned int memsize, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + + +/* + * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + +/* + * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory + * pool allocator uses the heap (malloc/free) for memory. + * In this case, the pool allocator is just providing + * statistics and instrumentation on top of the heap, + * without modifying the heap allocation implementation. + * + * Parameters: + * mgr: INPUT The handle to the pool manager + * obj_sz: INPUT Size of objects that will be allocated by the new pool + * poolname INPUT For instrumentation, the name of the pool + * newp: OUTPUT The handle for the new pool, if creation is successful + * + * Returns: + * BCME_OK Pool created ok. + * other Pool not created due to indicated error. newpoolp set to NULL. + * + * + */ +int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz, + const char poolname[BCM_MP_NAMELEN], + bcm_mp_pool_h *newp); + + +/* + * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after + * all memory objects have been freed back to the pool. + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * pool: INPUT The handle of the pool to delete + * + * Returns: + * BCME_OK Pool deleted ok. + * other Pool not deleted due to indicated error. + * + */ +int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); + + +/* + * bcm_mpm_stats() - Return stats for all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * stats: OUTPUT Array of pool statistics. + * nentries: MOD Max elements in 'stats' array on INPUT. Actual number + * of array elements copied to 'stats' on OUTPUT. + * + * Returns: + * BCME_OK Ok + * other Error getting stats. + * + */ +int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries); + + +/* + * bcm_mpm_dump() - Display statistics on all pools + * + * Parameters: + * mgr: INPUT The handle to the pools manager + * b: OUTPUT Output buffer. + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b); + + +/* + * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to + * compensate for alignment requirements of the objects. + * This function provides the padded object size. If clients + * pre-allocate a memory slab for a memory pool, the + * padded object size should be used by the client to allocate + * the memory slab (in order to provide sufficent space for + * the maximum number of objects). + * + * Parameters: + * mgr: INPUT The handle to the pools manager. + * obj_sz: INPUT Input object size. + * padded_obj_sz: OUTPUT Padded object size. + * + * Returns: + * BCME_OK Ok + * BCME_BADARG Bad arguments. + * + */ +int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz); + + +/* +*************************************************************************** +* +* API Routines on a specific pool. +* +*************************************************************************** +*/ + + +/* + * bcm_mp_alloc() - Allocate a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * + * Returns: + * A pointer to the new object. NULL on error. + * + */ +void* bcm_mp_alloc(bcm_mp_pool_h pool); + +/* + * bcm_mp_free() - Free a memory pool object. + * + * Parameters: + * pool: INPUT The handle to the pool. + * objp: INPUT A pointer to the object to free. + * + * Returns: + * BCME_OK Ok + * other Error during free. + * + */ +int bcm_mp_free(bcm_mp_pool_h pool, void *objp); + +/* + * bcm_mp_stats() - Return stats for this pool + * + * Parameters: + * pool: INPUT The handle to the pool + * stats: OUTPUT Pool statistics + * + * Returns: + * BCME_OK Ok + * other Error getting statistics. + * + */ +int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); + + +/* + * bcm_mp_dump() - Dump a pool + * + * Parameters: + * pool: INPUT The handle to the pool + * b OUTPUT Output buffer + * + * Returns: + * BCME_OK Ok + * other Error during dump. + * + */ +int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b); + + +#endif /* _BCM_MPOOL_PUB_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcm_ring.h b/drivers/net/wireless/bcmdhd/include/bcm_ring.h new file mode 100644 index 000000000000..5f1b38c65e3c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcm_ring.h @@ -0,0 +1,616 @@ +#ifndef __bcm_ring_included__ +#define __bcm_ring_included__ + +/* + * +---------------------------------------------------------------------------- + * + * bcm_ring.h : Ring context abstraction + * + * The ring context tracks the WRITE and READ indices where elements may be + * produced and consumed respectively. All elements in the ring need to be + * fixed size. + * + * NOTE: A ring of size N, may only hold N-1 elements. + * + * +---------------------------------------------------------------------------- + * + * API Notes: + * + * Ring manipulation API allows for: + * Pending operations: Often before some work can be completed, it may be + * desired that several resources are available, e.g. space for production in + * a ring. Approaches such as, #1) reserve resources one by one and return them + * if another required resource is not available, or #2) employ a two pass + * algorithm of first testing whether all resources are available, have a + * an impact on performance critical code. The approach taken here is more akin + * to approach #2, where a test for resource availability essentially also + * provides the index for production in an un-committed state. + * The same approach is taken for the consumer side. + * + * - Pending production: Fetch the next index where a ring element may be + * produced. The caller may not commit the WRITE of the element. + * - Pending consumption: Fetch the next index where a ring element may be + * consumed. The caller may not commut the READ of the element. + * + * Producer side API: + * - bcm_ring_is_full : Test whether ring is full + * - bcm_ring_prod : Fetch index where an element may be produced (commit) + * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending) + * - bcm_ring_prod_done: Commit a previous pending produce fetch + * - bcm_ring_prod_avail: Fetch total number free slots eligible for production + * + * Consumer side API: + * - bcm_ring_is_empty : Test whether ring is empty + * - bcm_ring_cons : Fetch index where an element may be consumed (commit) + * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending) + * - bcm_ring_cons_done: Commit a previous pending consume fetch + * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption + * + * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring + * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring + * + * +---------------------------------------------------------------------------- + * + * Design Notes: + * Following items are not tracked in a ring context (design decision) + * - width of a ring element. + * - depth of the ring. + * - base of the buffer, where the elements are stored. + * - count of number of free slots in the ring + * + * Implementation Notes: + * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init(). + * - BCM_RING_EMPTY and BCM_RING_FULL are (-1) + * + * +---------------------------------------------------------------------------- + * + * Usage Notes: + * An application may incarnate a ring of some fixed sized elements, by defining + * - a ring data buffer to store the ring elements. + * - depth of the ring (max number of elements managed by ring context). + * Preferrably, depth may be represented as a constant. + * - width of a ring element: to be used in pointer arithmetic with the ring's + * data buffer base and an index to fetch the ring element. + * + * Use bcm_workq_t to instantiate a pair of workq constructs, one for the + * producer and the other for the consumer, both pointing to the same circular + * buffer. The producer may operate on it's own local workq and flush the write + * index to the consumer. Likewise the consumer may use its local workq and + * flush the read index to the producer. This way we do not repeatedly access + * the peer's context. The two peers may reside on different CPU cores with a + * private L1 data cache. + * +---------------------------------------------------------------------------- + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * <> + * + * $Id: bcm_ring.h 591283 2015-10-07 11:52:00Z $ + * + * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- + * vim: set ts=4 noet sw=4 tw=80: + * + * +---------------------------------------------------------------------------- + */ + +#ifdef ____cacheline_aligned +#define __ring_aligned ____cacheline_aligned +#else +#define __ring_aligned +#endif + +/* Conditional compile for debug */ +/* #define BCM_RING_DEBUG */ + +#define BCM_RING_EMPTY (-1) +#define BCM_RING_FULL (-1) +#define BCM_RING_NULL ((bcm_ring_t *)NULL) + +#if defined(BCM_RING_DEBUG) +#define RING_ASSERT(exp) ASSERT(exp) +#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \ + ((ring)->self == (ring))) +#else /* ! BCM_RING_DEBUG */ +#define RING_ASSERT(exp) do {} while (0) +#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL) +#endif /* ! BCM_RING_DEBUG */ + +#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0) + +/* + * +---------------------------------------------------------------------------- + * Ring Context + * +---------------------------------------------------------------------------- + */ +typedef struct bcm_ring { /* Ring context */ +#if defined(BCM_RING_DEBUG) + struct bcm_ring *self; /* ptr to self for IS VALID test */ +#endif /* BCM_RING_DEBUG */ + int write __ring_aligned; /* WRITE index in a circular ring */ + int read __ring_aligned; /* READ index in a circular ring */ +} bcm_ring_t; + + +static INLINE void bcm_ring_init(bcm_ring_t *ring); +static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from); +static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring); + +static INLINE int __bcm_ring_next_write(bcm_ring_t *ring, const int ring_size); + +static INLINE bool __bcm_ring_full(bcm_ring_t *ring, int next_write); +static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write); +static INLINE int bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, + const int ring_size); +static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read); +static INLINE int bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, + const int ring_size); +static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size); + +static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self); +static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self); + +static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring, + const int ring_size); +static INLINE void bcm_ring_cons_all(bcm_ring_t *ring); + + +/** + * bcm_ring_init - initialize a ring context. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_init(bcm_ring_t *ring) +{ + ASSERT(ring != (bcm_ring_t *)NULL); +#if defined(BCM_RING_DEBUG) + ring->self = ring; +#endif /* BCM_RING_DEBUG */ + ring->write = 0; + ring->read = 0; +} + +/** + * bcm_ring_copy - copy construct a ring + * @to: pointer to the new ring context + * @from: pointer to orig ring context + */ +static INLINE void +bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from) +{ + bcm_ring_init(to); + + to->write = from->write; + to->read = from->read; +} + +/** + * bcm_ring_is_empty - "Boolean" test whether ring is empty. + * @ring: pointer to a ring context + * + * PS. does not return BCM_RING_EMPTY value. + */ +static INLINE bool +bcm_ring_is_empty(bcm_ring_t *ring) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + return (ring->read == ring->write); +} + + +/** + * __bcm_ring_next_write - determine the index where the next write may occur + * (with wrap-around). + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE int +__bcm_ring_next_write(bcm_ring_t *ring, const int ring_size) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + return ((ring->write + 1) % ring_size); +} + + +/** + * __bcm_ring_full - support function for ring full test. + * @ring: pointer to a ring context + * @next_write: next location in ring where an element is to be produced + * + * PRIVATE INTERNAL USE ONLY. + */ +static INLINE bool +__bcm_ring_full(bcm_ring_t *ring, int next_write) +{ + return (next_write == ring->read); +} + + +/** + * bcm_ring_is_full - "Boolean" test whether a ring is full. + * @ring: pointer to a ring context + * @ring_size: size of the ring + * + * PS. does not return BCM_RING_FULL value. + */ +static INLINE bool +bcm_ring_is_full(bcm_ring_t *ring, const int ring_size) +{ + int next_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + next_write = __bcm_ring_next_write(ring, ring_size); + return __bcm_ring_full(ring, next_write); +} + + +/** + * bcm_ring_prod_done - commit a previously pending index where production + * was requested. + * @ring: pointer to a ring context + * @write: index into ring upto where production was done. + * +---------------------------------------------------------------------------- + */ +static INLINE void +bcm_ring_prod_done(bcm_ring_t *ring, int write) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->write = write; +} + + +/** + * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be + * produced. + * @ring: pointer to a ring context + * @pend_write: next index, after the returned index + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + *pend_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, *pend_write)) { + *pend_write = BCM_RING_FULL; + rtn = BCM_RING_FULL; + } else { + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->write; + } + return rtn; +} + + +/** + * bcm_ring_prod - Fetch and "commit" the next index where a ring element may + * be produced. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod(bcm_ring_t *ring, const int ring_size) +{ + int next_write, prod_write; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + + next_write = __bcm_ring_next_write(ring, ring_size); + if (__bcm_ring_full(ring, next_write)) { + prod_write = BCM_RING_FULL; + } else { + prod_write = ring->write; + bcm_ring_prod_done(ring, next_write); /* "commit" production */ + } + return prod_write; +} + + +/** + * bcm_ring_cons_done - commit a previously pending read + * @ring: pointer to a ring context + * @read: index upto which elements have been consumed. + */ +static INLINE void +bcm_ring_cons_done(bcm_ring_t *ring, int read) +{ + RING_ASSERT(BCM_RING_IS_VALID(ring)); + ring->read = read; +} + + +/** + * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring + * element may be consumed. + * @ring: pointer to a ring context + * @pend_read: index into ring upto which elements may be consumed. + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, const int ring_size) +{ + int rtn; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + *pend_read = BCM_RING_EMPTY; + rtn = BCM_RING_EMPTY; + } else { + *pend_read = (ring->read + 1) % ring_size; + /* production is not committed, caller needs to explicitly commit */ + rtn = ring->read; + } + return rtn; +} + + +/** + * bcm_ring_cons - fetch and "commit" the next index where a ring element may + * be consumed. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons(bcm_ring_t *ring, const int ring_size) +{ + int cons_read; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (bcm_ring_is_empty(ring)) { + cons_read = BCM_RING_EMPTY; + } else { + cons_read = ring->read; + ring->read = (ring->read + 1) % ring_size; /* read is committed */ + } + return cons_read; +} + + +/** + * bcm_ring_sync_read - on consumption, update peer's read index. + * @peer: pointer to peer's producer ring context + * @self: pointer to consumer's ring context + */ +static INLINE void +bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->read = self->read; /* flush read update to peer producer */ +} + + +/** + * bcm_ring_sync_write - on consumption, update peer's write index. + * @peer: pointer to peer's consumer ring context + * @self: pointer to producer's ring context + */ +static INLINE void +bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self) +{ + RING_ASSERT(BCM_RING_IS_VALID(peer)); + RING_ASSERT(BCM_RING_IS_VALID(self)); + peer->write = self->write; /* flush write update to peer consumer */ +} + + +/** + * bcm_ring_prod_avail - fetch total number of available empty slots in the + * ring for production. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size) +{ + int prod_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->write >= ring->read) { + prod_avail = (ring_size - (ring->write - ring->read) - 1); + } else { + prod_avail = (ring->read - (ring->write + 1)); + } + ASSERT(prod_avail < ring_size); + return prod_avail; +} + + +/** + * bcm_ring_cons_avail - fetch total number of available elements for consumption. + * @ring: pointer to a ring context + * @ring_size: size of the ring + */ +static INLINE int +bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size) +{ + int cons_avail; + RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size)); + if (ring->read == ring->write) { + cons_avail = 0; + } else if (ring->read > ring->write) { + cons_avail = ((ring_size - ring->read) + ring->write); + } else { + cons_avail = ring->write - ring->read; + } + ASSERT(cons_avail < ring_size); + return cons_avail; +} + + +/** + * bcm_ring_cons_all - set ring in state where all elements are consumed. + * @ring: pointer to a ring context + */ +static INLINE void +bcm_ring_cons_all(bcm_ring_t *ring) +{ + ring->read = ring->write; +} + + +/** + * Work Queue + * A work Queue is composed of a ring of work items, of a specified depth. + * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a + * producer/consumer circular ring. + */ + +struct bcm_workq { + bcm_ring_t ring; /* Ring context abstraction */ + struct bcm_workq *peer; /* Peer workq context */ + void *buffer; /* Buffer storage for work items in workQ */ + int ring_size; /* Depth of workQ */ +} __ring_aligned; + +typedef struct bcm_workq bcm_workq_t; + + +/* #define BCM_WORKQ_DEBUG */ +#if defined(BCM_WORKQ_DEBUG) +#define WORKQ_ASSERT(exp) ASSERT(exp) +#else /* ! BCM_WORKQ_DEBUG */ +#define WORKQ_ASSERT(exp) do {} while (0) +#endif /* ! BCM_WORKQ_DEBUG */ + +#define WORKQ_AUDIT(workq) \ + WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \ + WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size); + +#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL) + +#define WORKQ_PEER(workq) ((workq)->peer) +#define WORKQ_RING(workq) (&((workq)->ring)) +#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring)) + +#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \ + WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \ + WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \ + ((__elem_type *)((__workq)->buffer)) + (__index); \ +}) + + +static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size); + +static INLINE bool bcm_workq_is_empty(bcm_workq_t *workq_prod); + +static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons); + +static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod); +static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons); + +/** + * bcm_workq_init - initialize a workq + * @workq: pointer to a workq context + * @buffer: pointer to a pre-allocated circular buffer to serve as a ring + * @ring_size: size of the ring in terms of max number of elements. + */ +static INLINE void +bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer, + void *buffer, int ring_size) +{ + ASSERT(workq != BCM_WORKQ_NULL); + ASSERT(workq_peer != BCM_WORKQ_NULL); + ASSERT(buffer != NULL); + ASSERT(ring_size > 0); + + WORKQ_PEER(workq) = workq_peer; + WORKQ_PEER(workq_peer) = workq; + + bcm_ring_init(WORKQ_RING(workq)); + bcm_ring_init(WORKQ_RING(workq_peer)); + + workq->buffer = workq_peer->buffer = buffer; + workq->ring_size = workq_peer->ring_size = ring_size; +} + +/** + * bcm_workq_empty - test whether there is work + * @workq_prod: producer's workq + */ +static INLINE bool +bcm_workq_is_empty(bcm_workq_t *workq_prod) +{ + return bcm_ring_is_empty(WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring + * @workq_prod: producer's workq whose write index must be synced to peer + */ +static INLINE void +bcm_workq_prod_sync(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod)); +} + +/** + * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring + * @workq_cons: consumer's workq whose read index must be synced to peer + */ +static INLINE void +bcm_workq_cons_sync(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons)); +} + + +/** + * bcm_workq_prod_refresh - Fetch the updated consumer's read index + * @workq_prod: producer's workq whose read index must be refreshed from peer + */ +static INLINE void +bcm_workq_prod_refresh(bcm_workq_t *workq_prod) +{ + WORKQ_AUDIT(workq_prod); + + /* prod::read <--- cons::read */ + bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod)); +} + +/** + * bcm_workq_cons_refresh - Fetch the updated producer's write index + * @workq_cons: consumer's workq whose write index must be refreshed from peer + */ +static INLINE void +bcm_workq_cons_refresh(bcm_workq_t *workq_cons) +{ + WORKQ_AUDIT(workq_cons); + + /* cons::write <--- prod::write */ + bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons)); +} + + +#endif /* ! __bcm_ring_h_included__ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmcdc.h b/drivers/net/wireless/bcmdhd/include/bcmcdc.h new file mode 100644 index 000000000000..a95dc31c27bd --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmcdc.h @@ -0,0 +1,135 @@ +/* + * CDC network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmcdc.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _bcmcdc_h_ +#define _bcmcdc_h_ +#include + +typedef struct cdc_ioctl { + uint32 cmd; /* ioctl command value */ + uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */ + uint32 flags; /* flag defns given below */ + uint32 status; /* status code returned from the device */ +} cdc_ioctl_t; + +/* Max valid buffer size that can be sent to the dongle */ +#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN + +/* len field is divided into input and output buffer lengths */ +#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */ + /* excluding IOCTL header */ +#define CDCL_IOC_OUTLEN_SHIFT 0 +#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */ +#define CDCL_IOC_INLEN_SHIFT 16 + +/* CDC flag definitions */ +#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */ +#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */ +#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */ +#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */ +#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */ +#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */ +#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */ +#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */ +#define CDCF_IOC_IF_SHIFT 12 +#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */ +#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */ + +#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) +#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) + +#define CDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) +#define CDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) + +/* + * BDC header + * + * The BDC header is used on data packets to convey priority across USB. + */ + +struct bdc_header { + uint8 flags; /* Flags */ + uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */ + uint8 flags2; + uint8 dataOffset; /* Offset from end of BDC header to packet data, in + * 4-byte words. Leaves room for optional headers. + */ +}; + +#define BDC_HEADER_LEN 4 + +/* flags field bitmap */ +#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */ +#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */ +#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */ +#define BDC_FLAG_EVENT_MSG 0x08 /* Payload contains an event msg: device->host */ +#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ +#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ + +/* priority field bitmap */ +#define BDC_PRIORITY_MASK 0x07 +#define BDC_PRIORITY_FC_MASK 0xf0 /* flow control info mask */ +#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */ + +/* flags2 field bitmap */ +#define BDC_FLAG2_IF_MASK 0x0f /* interface index (host <-> dongle) */ +#define BDC_FLAG2_IF_SHIFT 0 +#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */ + /* FLOW CONTROL info only */ + +/* version numbers */ +#define BDC_PROTO_VER_1 1 /* Old Protocol version */ +#define BDC_PROTO_VER 2 /* Protocol version */ + +/* flags2.if field access macros */ +#define BDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) +#define BDC_SET_IF_IDX(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) + +#define BDC_FLAG2_PAD_MASK 0xf0 +#define BDC_FLAG_PAD_MASK 0x03 +#define BDC_FLAG2_PAD_SHIFT 2 +#define BDC_FLAG_PAD_SHIFT 0 +#define BDC_FLAG2_PAD_IDX 0x3c +#define BDC_FLAG_PAD_IDX 0x03 +#define BDC_GET_PAD_LEN(hdr) \ + ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \ + ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT))) +#define BDC_SET_PAD_LEN(hdr, idx) \ + ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \ + (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \ + ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \ + (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT))) + +#endif /* _bcmcdc_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmdefs.h b/drivers/net/wireless/bcmdhd/include/bcmdefs.h new file mode 100644 index 000000000000..a02499996f61 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmdefs.h @@ -0,0 +1,382 @@ +/* + * Misc system wide definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdefs.h 601026 2015-11-20 06:53:19Z $ + */ + +#ifndef _bcmdefs_h_ +#define _bcmdefs_h_ + +/* + * One doesn't need to include this file explicitly, gets included automatically if + * typedefs.h is included. + */ + +/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function + * arguments or local variables. + */ +#define BCM_REFERENCE(data) ((void)(data)) + +/* Allow for suppressing unused variable warnings. */ +#ifdef __GNUC__ +#define UNUSED_VAR __attribute__ ((unused)) +#else +#define UNUSED_VAR +#endif + +/* Compile-time assert can be used in place of ASSERT if the expression evaluates + * to a constant at compile time. + */ +#define STATIC_ASSERT(expr) { \ + /* Make sure the expression is constant. */ \ + typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \ + /* Make sure the expression is true. */ \ + typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \ +} + +/* Reclaiming text and data : + * The following macros specify special linker sections that can be reclaimed + * after a system is considered 'up'. + * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN, + * as in most cases, the attach function calls the detach function to clean up on error). + */ + +#define bcmreclaimed 0 +#define _data _data +#define _fn _fn +#define BCMPREATTACHDATA(_data) _data +#define BCMPREATTACHFN(_fn) _fn +#define _data _data +#define _fn _fn +#define _fn _fn +#define BCMNMIATTACHFN(_fn) _fn +#define BCMNMIATTACHDATA(_data) _data +#define CONST const + +#undef BCM47XX_CA9 + +#ifndef BCMFASTPATH +#define BCMFASTPATH +#define BCMFASTPATH_HOST +#endif /* BCMFASTPATH */ + + +/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from + * ROM). This should eliminate the need to manually specify these functions in the ROM config file. + * It should only be used in special cases where the function must be in RAM for *all* ROM-based + * chips. + */ + #define BCMRAMFN(_fn) _fn + +#define STATIC static + +/* Bus types */ +#define SI_BUS 0 /* SOC Interconnect */ +#define PCI_BUS 1 /* PCI target */ +#define PCMCIA_BUS 2 /* PCMCIA target */ +#define SDIO_BUS 3 /* SDIO target */ +#define JTAG_BUS 4 /* JTAG */ +#define USB_BUS 5 /* USB (does not support R/W REG) */ +#define SPI_BUS 6 /* gSPI target */ +#define RPC_BUS 7 /* RPC target */ + +/* Allows size optimization for single-bus image */ +#ifdef BCMBUSTYPE +#define BUSTYPE(bus) (BCMBUSTYPE) +#else +#define BUSTYPE(bus) (bus) +#endif + +/* Allows size optimization for single-backplane image */ +#ifdef BCMCHIPTYPE +#define CHIPTYPE(bus) (BCMCHIPTYPE) +#else +#define CHIPTYPE(bus) (bus) +#endif + + +/* Allows size optimization for SPROM support */ +#if defined(BCMSPROMBUS) +#define SPROMBUS (BCMSPROMBUS) +#elif defined(SI_PCMCIA_SROM) +#define SPROMBUS (PCMCIA_BUS) +#else +#define SPROMBUS (PCI_BUS) +#endif + +/* Allows size optimization for single-chip image */ +#ifdef BCMCHIPID +#define CHIPID(chip) (BCMCHIPID) +#else +#define CHIPID(chip) (chip) +#endif + +#ifdef BCMCHIPREV +#define CHIPREV(rev) (BCMCHIPREV) +#else +#define CHIPREV(rev) (rev) +#endif + +#ifdef BCMPCIEREV +#define PCIECOREREV(rev) (BCMPCIEREV) +#else +#define PCIECOREREV(rev) (rev) +#endif + +/* Defines for DMA Address Width - Shared between OSL and HNDDMA */ +#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ +#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ +#define DMADDR_MASK_26 0xFC000000 /* Address maks for 26-bits */ +#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */ + +#define DMADDRWIDTH_26 26 /* 26-bit addressing capability */ +#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */ +#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */ +#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */ +#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */ + +typedef struct { + uint32 loaddr; + uint32 hiaddr; +} dma64addr_t; + +#define PHYSADDR64HI(_pa) ((_pa).hiaddr) +#define PHYSADDR64HISET(_pa, _val) \ + do { \ + (_pa).hiaddr = (_val); \ + } while (0) +#define PHYSADDR64LO(_pa) ((_pa).loaddr) +#define PHYSADDR64LOSET(_pa, _val) \ + do { \ + (_pa).loaddr = (_val); \ + } while (0) + +#ifdef BCMDMA64OSL +typedef dma64addr_t dmaaddr_t; +#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa) +#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val) +#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa) +#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val) +#define PHYSADDRTOULONG(_pa, _ulong) \ + do { \ + _ulong = ((unsigned long)(_pa).hiaddr << 32) | ((_pa).loaddr); \ + } while (0) + +#else +typedef unsigned long dmaaddr_t; +#define PHYSADDRHI(_pa) (0) +#define PHYSADDRHISET(_pa, _val) +#define PHYSADDRLO(_pa) ((_pa)) +#define PHYSADDRLOSET(_pa, _val) \ + do { \ + (_pa) = (_val); \ + } while (0) +#endif /* BCMDMA64OSL */ +#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0) + +/* One physical DMA segment */ +typedef struct { + dmaaddr_t addr; + uint32 length; +} hnddma_seg_t; + +#define MAX_DMA_SEGS 8 + + +typedef struct { + void *oshdmah; /* Opaque handle for OSL to store its information */ + uint origsize; /* Size of the virtual packet */ + uint nsegs; + hnddma_seg_t segs[MAX_DMA_SEGS]; +} hnddma_seg_map_t; + + +/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF). + * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL. + * There is a compile time check in wlc.c which ensure that this value is at least as big + * as TXOFF. This value is used in dma_rxfill (hnddma.c). + */ + +#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY) +/* add 40 bytes to allow for extra RPC header and info */ +#define BCMEXTRAHDROOM 260 +#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ +#define BCMEXTRAHDROOM 204 +#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#ifndef SDALIGN +#define SDALIGN 32 +#endif + +/* Headroom required for dongle-to-host communication. Packets allocated + * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should + * leave this much room in front for low-level message headers which may + * be needed to get across the dongle bus to the host. (These messages + * don't go over the network, so room for the full WL header above would + * be a waste.). +*/ +#define BCMDONGLEHDRSZ 12 +#define BCMDONGLEPADSZ 16 + +#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) + + +#if defined(NO_BCMDBG_ASSERT) +# undef BCMDBG_ASSERT +# undef BCMASSERT_LOG +#endif + +#if defined(BCMASSERT_LOG) +#define BCMASSERT_SUPPORT +#endif + +/* Macros for doing definition and get/set of bitfields + * Usage example, e.g. a three-bit field (bits 4-6): + * #define _M BITFIELD_MASK(3) + * #define _S 4 + * ... + * regval = R_REG(osh, ®s->regfoo); + * field = GFIELD(regval, ); + * regval = SFIELD(regval, , 1); + * W_REG(osh, ®s->regfoo, regval); + */ +#define BITFIELD_MASK(width) \ + (((unsigned)1 << (width)) - 1) +#define GFIELD(val, field) \ + (((val) >> field ## _S) & field ## _M) +#define SFIELD(val, field, bits) \ + (((val) & (~(field ## _M << field ## _S))) | \ + ((unsigned)(bits) << field ## _S)) + +/* define BCMSMALL to remove misc features for memory-constrained environments */ +#ifdef BCMSMALL +#undef BCMSPACE +#define bcmspace FALSE /* if (bcmspace) code is discarded */ +#else +#define BCMSPACE +#define bcmspace TRUE /* if (bcmspace) code is retained */ +#endif + +/* Max. nvram variable table size */ +#ifndef MAXSZ_NVRAM_VARS +#ifdef LARGE_NVRAM_MAXSZ +#define MAXSZ_NVRAM_VARS LARGE_NVRAM_MAXSZ +#else +/* SROM12 changes */ +#define MAXSZ_NVRAM_VARS 6144 +#endif /* LARGE_NVRAM_MAXSZ */ +#endif /* !MAXSZ_NVRAM_VARS */ + + + +/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also + * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles). + */ + + +#ifdef BCMLFRAG /* BCMLFRAG support enab macros */ + extern bool _bcmlfrag; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMLFRAG_ENAB() (_bcmlfrag) + #elif defined(BCMLFRAG_DISABLED) + #define BCMLFRAG_ENAB() (0) + #else + #define BCMLFRAG_ENAB() (1) + #endif +#else + #define BCMLFRAG_ENAB() (0) +#endif /* BCMLFRAG_ENAB */ +#define RXMODE1 1 /* descriptor split */ +#define RXMODE2 2 /* descriptor split + classification */ +#define RXMODE3 3 /* fifo split + classification */ +#define RXMODE4 4 /* fifo split + classification + hdr conversion */ + +#ifdef BCMSPLITRX /* BCMLFRAG support enab macros */ + extern bool _bcmsplitrx; + extern uint8 _bcmsplitrx_mode; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCMSPLITRX_ENAB() (_bcmsplitrx) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) + #elif defined(BCMSPLITRX_DISABLED) + #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) + #else + #define BCMSPLITRX_ENAB() (1) + #define BCMSPLITRX_MODE() (_bcmsplitrx_mode) + #endif +#else + #define BCMSPLITRX_ENAB() (0) + #define BCMSPLITRX_MODE() (0) +#endif /* BCMSPLITRX */ + +#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */ +extern bool _pciedevenab; + #if defined(WL_ENAB_RUNTIME_CHECK) + #define BCMPCIEDEV_ENAB() (_pciedevenab) + #elif defined(BCMPCIEDEV_ENABLED) + #define BCMPCIEDEV_ENAB() 1 + #else + #define BCMPCIEDEV_ENAB() 0 + #endif +#else + #define BCMPCIEDEV_ENAB() 0 +#endif /* BCMPCIEDEV */ + +#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1)) +#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2)) +#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3)) +#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4)) + +#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4()) +#define HDR_CONV() (SPLIT_RXMODE4()) + +#define PKT_CLASSIFY_EN(x) ((PKT_CLASSIFY()) && (PKT_CLASSIFY_FIFO == (x))) +#ifdef BCM_SPLITBUF + extern bool _bcmsplitbuf; + #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD) + #define BCM_SPLITBUF_ENAB() (_bcmsplitbuf) + #elif defined(BCM_SPLITBUF_DISABLED) + #define BCM_SPLITBUF_ENAB() (0) + #else + #define BCM_SPLITBUF_ENAB() (1) + #endif +#else + #define BCM_SPLITBUF_ENAB() (0) +#endif /* BCM_SPLITBUF */ + +/* Max size for reclaimable NVRAM array */ +#ifdef DL_NVRAM +#define NVRAM_ARRAY_MAXSIZE DL_NVRAM +#else +#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS +#endif /* DL_NVRAM */ + +extern uint32 gFWID; + + +#endif /* _bcmdefs_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmdevs.h b/drivers/net/wireless/bcmdhd/include/bcmdevs.h new file mode 100644 index 000000000000..2b0ec490584b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmdevs.h @@ -0,0 +1,803 @@ +/* + * Broadcom device-specific manifest constants. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmdevs.h 582052 2015-08-26 09:30:53Z $ + */ + +#ifndef _BCMDEVS_H +#define _BCMDEVS_H + +/* PCI vendor IDs */ +#define VENDOR_EPIGRAM 0xfeda +#define VENDOR_BROADCOM 0x14e4 +#define VENDOR_3COM 0x10b7 +#define VENDOR_NETGEAR 0x1385 +#define VENDOR_DIAMOND 0x1092 +#define VENDOR_INTEL 0x8086 +#define VENDOR_DELL 0x1028 +#define VENDOR_HP 0x103c +#define VENDOR_HP_COMPAQ 0x0e11 +#define VENDOR_APPLE 0x106b +#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */ +#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */ +#define VENDOR_TI 0x104c /* Texas Instruments */ +#define VENDOR_RICOH 0x1180 /* Ricoh */ +#define VENDOR_JMICRON 0x197b + + +/* PCMCIA vendor IDs */ +#define VENDOR_BROADCOM_PCMCIA 0x02d0 + +/* SDIO vendor IDs */ +#define VENDOR_BROADCOM_SDIO 0x00BF + +/* DONGLE VID/PIDs */ +#define BCM_DNGL_VID 0x0a5c +#define BCM_DNGL_BL_PID_4328 0xbd12 +#define BCM_DNGL_BL_PID_4322 0xbd13 +#define BCM_DNGL_BL_PID_4319 0xbd16 +#define BCM_DNGL_BL_PID_43236 0xbd17 +#define BCM_DNGL_BL_PID_4332 0xbd18 +#define BCM_DNGL_BL_PID_4330 0xbd19 +#define BCM_DNGL_BL_PID_4334 0xbd1a +#define BCM_DNGL_BL_PID_43239 0xbd1b +#define BCM_DNGL_BL_PID_4324 0xbd1c +#define BCM_DNGL_BL_PID_4360 0xbd1d +#define BCM_DNGL_BL_PID_43143 0xbd1e +#define BCM_DNGL_BL_PID_43242 0xbd1f +#define BCM_DNGL_BL_PID_43342 0xbd21 +#define BCM_DNGL_BL_PID_4335 0xbd20 +#define BCM_DNGL_BL_PID_43341 0xbd22 +#define BCM_DNGL_BL_PID_4350 0xbd23 +#define BCM_DNGL_BL_PID_4345 0xbd24 +#define BCM_DNGL_BL_PID_4349 0xbd25 +#define BCM_DNGL_BL_PID_4354 0xbd26 +#define BCM_DNGL_BL_PID_43569 0xbd27 +#define BCM_DNGL_BL_PID_43909 0xbd28 + +#define BCM_DNGL_BDC_PID 0x0bdc +#define BCM_DNGL_JTAG_PID 0x4a44 + +/* HW USB BLOCK [CPULESS USB] PIDs */ +#define BCM_HWUSB_PID_43239 43239 + +/* PCI Device IDs */ +#define BCM4210_DEVICE_ID 0x1072 /* never used */ +#define BCM4230_DEVICE_ID 0x1086 /* never used */ +#define BCM4401_ENET_ID 0x170c /* 4401b0 production enet cards */ +#define BCM3352_DEVICE_ID 0x3352 /* bcm3352 device id */ +#define BCM3360_DEVICE_ID 0x3360 /* bcm3360 device id */ +#define BCM4211_DEVICE_ID 0x4211 +#define BCM4231_DEVICE_ID 0x4231 +#define BCM4303_D11B_ID 0x4303 /* 4303 802.11b */ +#define BCM4311_D11G_ID 0x4311 /* 4311 802.11b/g id */ +#define BCM4311_D11DUAL_ID 0x4312 /* 4311 802.11a/b/g id */ +#define BCM4311_D11A_ID 0x4313 /* 4311 802.11a id */ +#define BCM4328_D11DUAL_ID 0x4314 /* 4328/4312 802.11a/g id */ +#define BCM4328_D11G_ID 0x4315 /* 4328/4312 802.11g id */ +#define BCM4328_D11A_ID 0x4316 /* 4328/4312 802.11a id */ +#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ +#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */ +#define BCM4318_D11A_ID 0x431a /* 4318 802.11a id */ +#define BCM4325_D11DUAL_ID 0x431b /* 4325 802.11a/g id */ +#define BCM4325_D11G_ID 0x431c /* 4325 802.11g id */ +#define BCM4325_D11A_ID 0x431d /* 4325 802.11a id */ +#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */ +#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */ +#define BCM4306_UART_ID 0x4322 /* 4306 uart */ +#define BCM4306_V90_ID 0x4323 /* 4306 v90 codec */ +#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */ +#define BCM4306_D11G_ID2 0x4325 /* BCM4306_D11G_ID; INF w/loose binding war */ +#define BCM4321_D11N_ID 0x4328 /* 4321 802.11n dualband id */ +#define BCM4321_D11N2G_ID 0x4329 /* 4321 802.11n 2.4Ghz band id */ +#define BCM4321_D11N5G_ID 0x432a /* 4321 802.11n 5Ghz band id */ +#define BCM4322_D11N_ID 0x432b /* 4322 802.11n dualband device */ +#define BCM4322_D11N2G_ID 0x432c /* 4322 802.11n 2.4GHz device */ +#define BCM4322_D11N5G_ID 0x432d /* 4322 802.11n 5GHz device */ +#define BCM4329_D11N_ID 0x432e /* 4329 802.11n dualband device */ +#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */ +#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */ +#define BCM4315_D11DUAL_ID 0x4334 /* 4315 802.11a/g id */ +#define BCM4315_D11G_ID 0x4335 /* 4315 802.11g id */ +#define BCM4315_D11A_ID 0x4336 /* 4315 802.11a id */ +#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */ +#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */ +#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */ +#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */ +#define BCM43221_D11N2G_ID 0x4341 /* 43221 802.11n 2.4GHz device */ +#define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */ +#define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */ +#define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */ +#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ +#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */ +#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */ +#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ +#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ +#define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */ +#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */ +#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */ +#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ +#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */ +#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */ +#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */ +#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */ +#define BCM6362_D11N_ID 0x435f /* 6362 802.11n dualband device */ +#define BCM6362_D11N2G_ID 0x433f /* 6362 802.11n 2.4Ghz band id */ +#define BCM6362_D11N5G_ID 0x434f /* 6362 802.11n 5Ghz band id */ +#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */ +#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */ +#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */ +#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */ +#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */ +#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */ +#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */ +#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */ +#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */ +#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */ +#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */ +#define BCM43217_D11N2G_ID 0x43a9 /* 43217 802.11n 2.4GHz device */ +#define BCM43131_D11N2G_ID 0x43aa /* 43131 802.11n 2.4GHz device */ +#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */ +#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */ +#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */ +#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */ +#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */ +#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */ +#define BCM43342_D11N_ID 0x4383 /* 43342 802.11n dualband device */ +#define BCM43342_D11N2G_ID 0x4384 /* 43342 802.11n 2.4G device */ +#define BCM43342_D11N5G_ID 0x4385 /* 43342 802.11n 5G device */ +#define BCM43341_D11N_ID 0x4386 /* 43341 802.11n dualband device */ +#define BCM43341_D11N2G_ID 0x4387 /* 43341 802.11n 2.4G device */ +#define BCM43341_D11N5G_ID 0x4388 /* 43341 802.11n 5G device */ +#define BCM4360_D11AC_ID 0x43a0 +#define BCM4360_D11AC2G_ID 0x43a1 +#define BCM4360_D11AC5G_ID 0x43a2 +#define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */ +#define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */ +#define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */ +#define BCM4335_D11AC_ID 0x43ae +#define BCM4335_D11AC2G_ID 0x43af +#define BCM4335_D11AC5G_ID 0x43b0 +#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */ +#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */ +#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */ +#define BCM43602_D11AC_ID 0x43ba /* ac dualband PCI devid SPROM programmed */ +#define BCM43602_D11AC2G_ID 0x43bb /* 43602 802.11ac 2.4G device */ +#define BCM43602_D11AC5G_ID 0x43bc /* 43602 802.11ac 5G device */ +#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */ +#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */ +#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */ +#define BCM53573_D11AC_ID 0x43b4 /* 53573 802.11ac dualband device */ +#define BCM53573_D11AC2G_ID 0x43b5 /* 53573 802.11ac 2.4G device */ +#define BCM53573_D11AC5G_ID 0x43b6 /* 53573 802.11ac 5G device */ +#define BCM47189_D11AC_ID 0x43c6 /* 47189 802.11ac dualband device */ +#define BCM47189_D11AC2G_ID 0x43c7 /* 47189 802.11ac 2.4G device */ +#define BCM47189_D11AC5G_ID 0x43c8 /* 47189 802.11ac 5G device */ +#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */ +#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */ +#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */ +#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */ +#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */ +#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */ +#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */ +#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */ +#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */ +#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */ +#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */ +#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */ + +/* PCI Subsystem ID */ +#define BCM943228HMB_SSID_VEN1 0x0607 +#define BCM94313HMGBL_SSID_VEN1 0x0608 +#define BCM94313HMG_SSID_VEN1 0x0609 +#define BCM943142HM_SSID_VEN1 0x0611 + +#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */ + +#define BCM43242_D11N_ID 0x4367 /* 43242 802.11n dualband device */ +#define BCM43242_D11N2G_ID 0x4368 /* 43242 802.11n 2.4G device */ +#define BCM43242_D11N5G_ID 0x4369 /* 43242 802.11n 5G device */ + +#define BCM4350_D11AC_ID 0x43a3 +#define BCM4350_D11AC2G_ID 0x43a4 +#define BCM4350_D11AC5G_ID 0x43a5 + +#define BCM43556_D11AC_ID 0x43b7 +#define BCM43556_D11AC2G_ID 0x43b8 +#define BCM43556_D11AC5G_ID 0x43b9 + +#define BCM43558_D11AC_ID 0x43c0 +#define BCM43558_D11AC2G_ID 0x43c1 +#define BCM43558_D11AC5G_ID 0x43c2 + +#define BCM43566_D11AC_ID 0x43d3 +#define BCM43566_D11AC2G_ID 0x43d4 +#define BCM43566_D11AC5G_ID 0x43d5 + +#define BCM43568_D11AC_ID 0x43d6 +#define BCM43568_D11AC2G_ID 0x43d7 +#define BCM43568_D11AC5G_ID 0x43d8 + +#define BCM43569_D11AC_ID 0x43d9 +#define BCM43569_D11AC2G_ID 0x43da +#define BCM43569_D11AC5G_ID 0x43db + +#define BCM43570_D11AC_ID 0x43d9 +#define BCM43570_D11AC2G_ID 0x43da +#define BCM43570_D11AC5G_ID 0x43db + +#define BCM4354_D11AC_ID 0x43df /* 4354 802.11ac dualband device */ +#define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */ +#define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */ +#define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */ + + +#define BCM4365_D11AC_ID 0x43ca +#define BCM4365_D11AC2G_ID 0x43cb +#define BCM4365_D11AC5G_ID 0x43cc + +#define BCM4366_D11AC_ID 0x43c3 +#define BCM4366_D11AC2G_ID 0x43c4 +#define BCM4366_D11AC5G_ID 0x43c5 + +#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */ +#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */ +#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */ + +#define BCM4358_D11AC_ID 0x43e9 /* 4358 802.11ac dualband device */ +#define BCM4358_D11AC2G_ID 0x43ea /* 4358 802.11ac 2.4G device */ +#define BCM4358_D11AC5G_ID 0x43eb /* 4358 802.11ac 5G device */ + +#define BCM4356_D11AC_ID 0x43ec /* 4356 802.11ac dualband device */ +#define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */ +#define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */ + +#define BCMGPRS_UART_ID 0x4333 /* Uart id used by 4306/gprs card */ +#define BCMGPRS2_UART_ID 0x4344 /* Uart id used by 4306/gprs card */ +#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */ +#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */ +#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */ +#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */ +#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */ +#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */ +#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */ +#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */ +#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */ +#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */ +#define BCM4402_ENET_ID 0x4402 /* 4402 enet */ +#define BCM4402_V90_ID 0x4403 /* 4402 v90 codec */ +#define BCM4410_DEVICE_ID 0x4410 /* bcm44xx family pci iline */ +#define BCM4412_DEVICE_ID 0x4412 /* bcm44xx family pci enet */ +#define BCM4430_DEVICE_ID 0x4430 /* bcm44xx family cardbus iline */ +#define BCM4432_DEVICE_ID 0x4432 /* bcm44xx family cardbus enet */ +#define BCM4704_ENET_ID 0x4706 /* 4704 enet (Use 47XX_ENET_ID instead!) */ +#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */ +#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */ +#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */ +#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */ +#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */ +#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */ +#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */ +#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */ +#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */ +#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */ +#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */ +#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */ +#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */ +#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */ +#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */ +#define BCM4712_MIPS_ID 0x4720 /* 4712 base devid */ +#define BCM4716_DEVICE_ID 0x4722 /* 4716 base devid */ +#define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */ +#define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */ +#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */ +#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */ +#define EPI41210_DEVICE_ID 0xa0fa /* bcm4210 */ +#define EPI41230_DEVICE_ID 0xa10e /* bcm4230 */ +#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */ +#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */ +#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */ +#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */ +#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */ + +/* Chip IDs */ +#define BCM4306_CHIP_ID 0x4306 /* 4306 chipcommon chipid */ +#define BCM4311_CHIP_ID 0x4311 /* 4311 PCIe 802.11a/b/g */ +#define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */ +#define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */ +#define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */ +#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ +#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */ +#define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */ +#define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */ +#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ +#define BCM4320_CHIP_ID 0x4320 /* 4320 chipcommon chipid */ +#define BCM4321_CHIP_ID 0x4321 /* 4321 chipcommon chipid */ +#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */ +#define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */ +#define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */ +#define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */ +#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ +#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ +#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */ +#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */ +#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */ +#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */ +#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */ +#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ +#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ +#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */ +#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */ +#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */ +#define BCM43420_CHIP_ID 43420 /* 43222 chipcommon chipid (OTP, RBBU) */ +#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */ +#define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */ +#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */ +#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */ +#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */ +#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */ +#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ +#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */ +#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */ +#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */ +#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */ +#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */ +#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */ +#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */ +#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */ +#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */ +#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */ +#define BCM43243_CHIP_ID 43243 /* 43243 chipcommon chipid */ +#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */ +#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */ +#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */ +#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */ +#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */ +#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */ +#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */ +#define BCM43526_CHIP_ID 0xAA06 +#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */ +#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */ +#define BCM43342_CHIP_ID 43342 /* 43342 chipcommon chipid */ +#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */ +#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */ +#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */ +#define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */ +#define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */ +#define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */ +#define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */ +#define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */ +#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */ +#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */ +#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */ +#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */ +#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \ + (CHIPID(chipid) == BCM4354_CHIP_ID) || \ + (CHIPID(chipid) == BCM4356_CHIP_ID) || \ + (CHIPID(chipid) == BCM43556_CHIP_ID) || \ + (CHIPID(chipid) == BCM43558_CHIP_ID) || \ + (CHIPID(chipid) == BCM43566_CHIP_ID) || \ + (CHIPID(chipid) == BCM43567_CHIP_ID) || \ + (CHIPID(chipid) == BCM43568_CHIP_ID) || \ + (CHIPID(chipid) == BCM43569_CHIP_ID) || \ + (CHIPID(chipid) == BCM43570_CHIP_ID) || \ + (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */ +#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */ +#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */ +#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */ +#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */ +#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */ +#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */ +#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */ +#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */ +#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */ +#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \ + (CHIPID(chipid) == BCM4355_CHIP_ID) || \ + (CHIPID(chipid) == BCM4359_CHIP_ID)) + +#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \ + CHIPID(chipid) == BCM43454_CHIP_ID || \ + CHIPID(chipid) == BCM43455_CHIP_ID || \ + CHIPID(chipid) == BCM43457_CHIP_ID || \ + CHIPID(chipid) == BCM43458_CHIP_ID) + +#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \ + case BCM43454_CHIP_ID: /* fallthrough */ \ + case BCM43455_CHIP_ID: /* fallthrough */ \ + case BCM43457_CHIP_ID: /* fallthrough */ \ + case BCM43458_CHIP_ID + +#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \ + case BCM4355_CHIP_ID: \ + case BCM4359_CHIP_ID + +#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */ +#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */ + +#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */ + +#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */ +#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */ +#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */ +#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \ + (CHIPID(chipid) == BCM43462_CHIP_ID) || \ + (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */ +#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \ + case BCM43462_CHIP_ID: /* fallthrough */ \ + case BCM43522_CHIP_ID + +#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ +#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */ +#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */ +#define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */ +#define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */ +#define BCM47094_CHIP_ID 53030 /* 47094 chipcommon chipid */ +#define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */ +#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || \ + ((chipid) == BCM53018_CHIP_ID) || \ + ((chipid) == BCM47094_CHIP_ID)) +#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */ +#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ +#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ +#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */ +#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */ +#define BCM4749_CHIP_ID 0x4749 /* 5357 chipcommon chipid (OTP, RBBU) */ +#define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */ +#define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */ +#define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */ +#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */ +#define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */ +#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */ +#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */ +#define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */ +#define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */ +#define BCM53573_CHIP(chipid) (CHIPID(chipid) == BCM53573_CHIP_ID) +#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID + +/* Package IDs */ +#define BCM4303_PKG_ID 2 /* 4303 package id */ +#define BCM4309_PKG_ID 1 /* 4309 package id */ +#define BCM4712LARGE_PKG_ID 0 /* 340pin 4712 package id */ +#define BCM4712SMALL_PKG_ID 1 /* 200pin 4712 package id */ +#define BCM4712MID_PKG_ID 2 /* 225pin 4712 package id */ +#define BCM4328USBD11G_PKG_ID 2 /* 4328 802.11g USB package id */ +#define BCM4328USBDUAL_PKG_ID 3 /* 4328 802.11a/g USB package id */ +#define BCM4328SDIOD11G_PKG_ID 4 /* 4328 802.11g SDIO package id */ +#define BCM4328SDIODUAL_PKG_ID 5 /* 4328 802.11a/g SDIO package id */ +#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */ +#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */ +#define BCM5354E_PKG_ID 1 /* 5354E package id */ +#define BCM4716_PKG_ID 8 /* 4716 package id */ +#define BCM4717_PKG_ID 9 /* 4717 package id */ +#define BCM4718_PKG_ID 10 /* 4718 package id */ +#define BCM5356_PKG_NONMODE 1 /* 5356 package without nmode suppport */ +#define BCM5358U_PKG_ID 8 /* 5358U package id */ +#define BCM5358_PKG_ID 9 /* 5358 package id */ +#define BCM47186_PKG_ID 10 /* 47186 package id */ +#define BCM5357_PKG_ID 11 /* 5357 package id */ +#define BCM5356U_PKG_ID 12 /* 5356U package id */ +#define BCM53572_PKG_ID 8 /* 53572 package id */ +#define BCM5357C0_PKG_ID 8 /* 5357c0 package id (the same as 53572) */ +#define BCM47188_PKG_ID 9 /* 47188 package id */ +#define BCM5358C0_PKG_ID 0xa /* 5358c0 package id */ +#define BCM5356C0_PKG_ID 0xb /* 5356c0 package id */ +#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */ +#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */ +#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */ +#define BCM47189_PKG_ID 1 /* 47189 package id */ +#define BCM53573_PKG_ID 0 /* 53573 package id */ +#define BCM4706L_PKG_ID 1 /* 4706L package id */ + +#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */ +#define HDLSIM_PKG_ID 14 /* HDL simulator package id */ +#define HWSIM_PKG_ID 15 /* Hardware simulator package id */ +#define BCM43224_FAB_CSM 0x8 /* the chip is manufactured by CSM */ +#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */ +#define BCM4336_WLBGA_PKG_ID 0x8 +#define BCM4330_WLBGA_PKG_ID 0x0 +#define BCM4314PCIE_ARM_PKG_ID (8 | 0) /* 4314 QFN PCI package id, bit 3 tie high */ +#define BCM4314SDIO_PKG_ID (8 | 1) /* 4314 QFN SDIO package id */ +#define BCM4314PCIE_PKG_ID (8 | 2) /* 4314 QFN PCI (ARM-less) package id */ +#define BCM4314SDIO_ARM_PKG_ID (8 | 3) /* 4314 QFN SDIO (ARM-less) package id */ +#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4) /* 4314 FpBGA SDIO package id */ +#define BCM4314DEV_PKG_ID (8 | 6) /* 4314 Developement package id */ + +#define BCM4707_PKG_ID 1 /* 4707 package id */ +#define BCM4708_PKG_ID 2 /* 4708 package id */ +#define BCM4709_PKG_ID 0 /* 4709 package id */ + +#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */ +#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */ + +#define BCM4335_WLCSP_PKG_ID (0x0) /* WLCSP Module/Mobile SDIO/HSIC. */ +#define BCM4335_FCBGA_PKG_ID (0x1) /* FCBGA PC/Embeded/Media PCIE/SDIO */ +#define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */ +#define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */ +#define BCM4335_PKG_MASK (0x3) +#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */ + +/* boardflags */ +#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ +#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */ +#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio 13 radio disable indication, UNUSED */ +#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */ +#define BFL_DIS_256QAM 0x00000008 +#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */ +#define BFL_TSSIAVG 0x00000010 /* TSSI averaging for ACPHY chips */ +#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */ +#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */ +#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */ +#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */ +#define BFL_LTECOEX 0x00000200 /* LTE Coex enabled */ +#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ +#define BFL_FEM 0x00000800 /* Board supports the Front End Module */ +#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_HGPA 0x00002000 /* Board has a high gain PA */ +#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */ +#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */ +#define BFL_NOPA 0x00010000 /* Board has no PA */ +#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */ +#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */ +#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */ +#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */ +#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */ +#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */ +#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */ +#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */ +#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */ +#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */ +#define BFL_FASTPWR 0x08000000 +#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */ +#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */ +#define BFL_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */ +#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field + * when this flag is set + */ +#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */ + +/* boardflags2 */ +#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */ +#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */ +#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */ +#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */ +#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */ +#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */ +#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */ +#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace + * BFL2_BTC3WIRE + */ +#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */ +#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */ +#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */ +#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */ +#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */ +#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */ +#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */ +#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000 /* Reducing DAC Spurs */ +#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */ +#define BFL2_REDUCED_PA_TURNONTIME 0x00010000 /* Flag to reduce PA turn on Time */ +#define BFL2_IPALVLSHIFT_3P3 0x00020000 +#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */ +#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */ + /* Most drivers will turn it off without this flag */ + /* to save power. */ + +#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */ +#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */ +#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */ +#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value + * than programmed. The exact delta is decided by + * driver per chip/boardtype. This can be used + * when tempsense qualification happens after shipment + */ +#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */ +#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */ +#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */ + /* ucode control of eLNA during Tx */ +#define BFL2_4313_RADIOREG 0x10000000 + /* board rework */ +#define BFL2_DYNAMIC_VMID 0x10000000 /* enable dynamic Vmid in idle TSSI CAL for 4331 */ + +#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */ +#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */ +#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */ +#define BFL2_LNA1BYPFORTR5G 0x80000000 /* acphy, enable lna1 bypass for clip gain, 5g */ + +/* SROM 11 - 11ac boardflag definitions */ +#define BFL_SROM11_BTCOEX 0x00000001 /* Board supports BTCOEX */ +#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002 /* bluetooth and wlan share same crystal */ +#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ +#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */ +#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15 +#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */ + /* per subband, as derived from 43602A1 MCH5 */ +#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ +#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */ +#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ +#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */ +#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ +#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */ + +/* boardflags3 */ +#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */ +#define BFL3_RCAL_WAR 0x00000008 /* acphy, rcal war active on this board (4335a0) */ +#define BFL3_TXGAINTBLID 0x00000070 /* acphy, txgain table id */ +#define BFL3_TXGAINTBLID_SHIFT 0x4 /* acphy, txgain table id shift bit */ +#define BFL3_TSSI_DIV_WAR 0x00000080 /* acphy, Seperate paparam for 20/40/80 */ +#define BFL3_TSSI_DIV_WAR_SHIFT 0x7 /* acphy, Seperate paparam for 20/40/80 shift bit */ +#define BFL3_FEMTBL_FROM_NVRAM 0x00000100 /* acphy, femctrl table is read from nvram */ +#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8 /* acphy, femctrl table is read from nvram */ +#define BFL3_AGC_CFG_2G 0x00000200 /* acphy, gain control configuration for 2G */ +#define BFL3_AGC_CFG_5G 0x00000400 /* acphy, gain control configuration for 5G */ +#define BFL3_PPR_BIT_EXT 0x00000800 /* acphy, bit position for 1bit extension for ppr */ +#define BFL3_PPR_BIT_EXT_SHIFT 11 /* acphy, bit shift for 1bit extension for ppr */ +#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000 /* acphy, disables bbpll spur modes */ +#define BFL3_RCAL_OTP_VAL_EN 0x00002000 /* acphy, to read rcal_trim value from otp */ +#define BFL3_2GTXGAINTBL_BLANK 0x00004000 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14 /* acphy, blank the first X ticks of 2g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK 0x00008000 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15 /* acphy, blank the first X ticks of 5g gaintbl */ +#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000 /* acphy, to max out alpha,beta to 511 */ +#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16 /* acphy, to max out alpha,beta to 511 */ +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN 0x00060000 +/* acphy, to use backed off gaintbl for lte-coex */ +#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17 +#define BFL3_5G_SPUR_WAR 0x00080000 /* acphy, enable spur WAR in 5G band */ +#define BFL3_1X1_RSDB_ANT 0x01000000 /* to find if 2-ant RSDB board or 1-ant RSDB board */ +#define BFL3_1X1_RSDB_ANT_SHIFT 24 + +/* acphy: lpmode2g and lpmode_5g related boardflags */ +#define BFL3_ACPHY_LPMODE_2G 0x00300000 /* bits 20:21 for lpmode_2g choice */ +#define BFL3_ACPHY_LPMODE_2G_SHIFT 20 + +#define BFL3_ACPHY_LPMODE_5G 0x00C00000 /* bits 22:23 for lpmode_5g choice */ +#define BFL3_ACPHY_LPMODE_5G_SHIFT 22 + +#define BFL3_EXT_LPO_ISCLOCK 0x02000000 /* External LPO is clock, not x-tal */ +#define BFL3_FORCE_INT_LPO_SEL 0x04000000 /* Force internal lpo */ +#define BFL3_FORCE_EXT_LPO_SEL 0x08000000 /* Force external lpo */ + +#define BFL3_EN_BRCM_IMPBF 0x10000000 /* acphy, Allow BRCM Implicit TxBF */ +#define BFL3_AVVMID_FROM_NVRAM 0x40000000 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM 0x80000000 /* Read Vlin En from NVRAM */ + +#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */ +#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */ + +/* boardflags4 for SROM12 */ +#define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */ + + +/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ +#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */ +#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */ +#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */ +#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */ +#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */ +#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */ +#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */ +#define BOARD_GPIO_12 0x1000 /* gpio 12 */ +#define BOARD_GPIO_13 0x2000 /* gpio 13 */ +#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */ +#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */ +#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */ +#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */ +#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */ +#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */ +#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */ +#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */ +#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */ + +#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0 /* bit 5 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_43225 0x0e0 /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */ +#define GPIO_BTC4W_OUT_43421 0x020 /* bit 5 is BT_IODISABLE */ +#define GPIO_BTC4W_OUT_4313 0x060 /* bit 5 SW_BT, bit 6 SW_WL */ +#define GPIO_BTC4W_OUT_4331_SHARED 0x010 /* GPIO 4 */ + +#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */ +#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */ + +/* power control defines */ +#define PLL_DELAY 150 /* us pll on delay */ +#define FREF_DELAY 200 /* us fref change delay */ +#define MIN_SLOW_CLK 32 /* us Slow clock period */ +#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */ + + +/* 43341 Boards */ +#define BCM943341WLABGS_SSID 0x062d + +/* 43342 Boards */ +#define BCM943342FCAGBI_SSID 0x0641 + +/* 43602 Boards, unclear yet what boards will be created. */ +#define BCM943602RSVD1_SSID 0x06a5 +#define BCM943602RSVD2_SSID 0x06a6 +#define BCM943602X87 0X0133 +#define BCM943602X87P2 0X0143 +#define BCM943602X238 0X0132 +#define BCM943602X238D 0X014A + +/* # of GPIO pins */ +#define GPIO_NUMPINS 32 + +/* These values are used by dhd host driver. */ +#define RDL_RAM_BASE_4319 0x60000000 +#define RDL_RAM_BASE_4329 0x60000000 +#define RDL_RAM_SIZE_4319 0x48000 +#define RDL_RAM_SIZE_4329 0x48000 +#define RDL_RAM_SIZE_43236 0x70000 +#define RDL_RAM_BASE_43236 0x60000000 +#define RDL_RAM_SIZE_4328 0x60000 +#define RDL_RAM_BASE_4328 0x80000000 +#define RDL_RAM_SIZE_4322 0x60000 +#define RDL_RAM_BASE_4322 0x60000000 +#define RDL_RAM_SIZE_4360 0xA0000 +#define RDL_RAM_BASE_4360 0x60000000 +#define RDL_RAM_SIZE_43242 0x90000 +#define RDL_RAM_BASE_43242 0x60000000 +#define RDL_RAM_SIZE_43143 0x70000 +#define RDL_RAM_BASE_43143 0x60000000 +#define RDL_RAM_SIZE_4350 0xC0000 +#define RDL_RAM_BASE_4350 0x180800 + +/* generic defs for nvram "muxenab" bits +* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options. +*/ +#define MUXENAB_UART 0x00000001 +#define MUXENAB_GPIO 0x00000002 +#define MUXENAB_ERCX 0x00000004 /* External Radio BT coex */ +#define MUXENAB_JTAG 0x00000008 +#define MUXENAB_HOST_WAKE 0x00000010 /* configure GPIO for SDIO host_wake */ +#define MUXENAB_I2S_EN 0x00000020 +#define MUXENAB_I2S_MASTER 0x00000040 +#define MUXENAB_I2S_FULL 0x00000080 +#define MUXENAB_SFLASH 0x00000100 +#define MUXENAB_RFSWCTRL0 0x00000200 +#define MUXENAB_RFSWCTRL1 0x00000400 +#define MUXENAB_RFSWCTRL2 0x00000800 +#define MUXENAB_SECI 0x00001000 +#define MUXENAB_BT_LEGACY 0x00002000 +#define MUXENAB_HOST_WAKE1 0x00004000 /* configure alternative GPIO for SDIO host_wake */ + +/* Boot flags */ +#define FLASH_KERNEL_NFLASH 0x00000001 +#define FLASH_BOOT_NFLASH 0x00000002 + +#endif /* _BCMDEVS_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmendian.h b/drivers/net/wireless/bcmdhd/include/bcmendian.h new file mode 100644 index 000000000000..27f237947324 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmendian.h @@ -0,0 +1,332 @@ +/* + * Byte order utilities + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmendian.h 514727 2014-11-12 03:02:48Z $ + * + * This file by default provides proper behavior on little-endian architectures. + * On big-endian architectures, IL_BIGENDIAN should be defined. + */ + +#ifndef _BCMENDIAN_H_ +#define _BCMENDIAN_H_ + +#include + +/* Reverse the bytes in a 16-bit value */ +#define BCMSWAP16(val) \ + ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ + (((uint16)(val) & (uint16)0xff00U) >> 8))) + +/* Reverse the bytes in a 32-bit value */ +#define BCMSWAP32(val) \ + ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ + (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ + (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ + (((uint32)(val) & (uint32)0xff000000U) >> 24))) + +/* Reverse the two 16-bit halves of a 32-bit value */ +#define BCMSWAP32BY16(val) \ + ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ + (((uint32)(val) & (uint32)0xffff0000U) >> 16))) + +/* Reverse the bytes in a 64-bit value */ +#define BCMSWAP64(val) \ + ((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \ + (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \ + (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \ + (((uint64)(val) & 0x00000000ff000000ULL) << 8) | \ + (((uint64)(val) & 0x000000ff00000000ULL) >> 8) | \ + (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \ + (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \ + (((uint64)(val) & 0xff00000000000000ULL) >> 56))) + +/* Reverse the two 32-bit halves of a 64-bit value */ +#define BCMSWAP64BY32(val) \ + ((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \ + (((uint64)(val) & 0xffffffff00000000ULL) >> 32))) + + +/* Byte swapping macros + * Host <=> Network (Big Endian) for 16- and 32-bit values + * Host <=> Little-Endian for 16- and 32-bit values + */ +#ifndef hton16 +#define HTON16(i) BCMSWAP16(i) +#define hton16(i) bcmswap16(i) +#define HTON32(i) BCMSWAP32(i) +#define hton32(i) bcmswap32(i) +#define NTOH16(i) BCMSWAP16(i) +#define ntoh16(i) bcmswap16(i) +#define NTOH32(i) BCMSWAP32(i) +#define ntoh32(i) bcmswap32(i) +#define LTOH16(i) (i) +#define ltoh16(i) (i) +#define LTOH32(i) (i) +#define ltoh32(i) (i) +#define HTOL16(i) (i) +#define htol16(i) (i) +#define HTOL32(i) (i) +#define htol32(i) (i) +#define HTOL64(i) (i) +#define htol64(i) (i) +#endif /* hton16 */ + +#define ltoh16_buf(buf, i) +#define htol16_buf(buf, i) + +/* Unaligned loads and stores in host byte order */ +#define load32_ua(a) ltoh32_ua(a) +#define store32_ua(a, v) htol32_ua_store(v, a) +#define load16_ua(a) ltoh16_ua(a) +#define store16_ua(a, v) htol16_ua_store(v, a) + +#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) +#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) +#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) +#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) + +#define ltoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#define ntoh_ua(ptr) \ + (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ + sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \ + sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \ + *(uint8 *)0) + +#ifdef __GNUC__ + +/* GNU macro versions avoid referencing the argument multiple times, while also + * avoiding the -fno-inline used in ROM builds. + */ + +#define bcmswap16(val) ({ \ + uint16 _val = (val); \ + BCMSWAP16(_val); \ +}) + +#define bcmswap32(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32(_val); \ +}) + +#define bcmswap64(val) ({ \ + uint64 _val = (val); \ + BCMSWAP64(_val); \ +}) + +#define bcmswap32by16(val) ({ \ + uint32 _val = (val); \ + BCMSWAP32BY16(_val); \ +}) + +#define bcmswap16_buf(buf, len) ({ \ + uint16 *_buf = (uint16 *)(buf); \ + uint _wds = (len) / 2; \ + while (_wds--) { \ + *_buf = bcmswap16(*_buf); \ + _buf++; \ + } \ +}) + +#define htol16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = _val >> 8; \ +}) + +#define htol32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val & 0xff; \ + _bytes[1] = (_val >> 8) & 0xff; \ + _bytes[2] = (_val >> 16) & 0xff; \ + _bytes[3] = _val >> 24; \ +}) + +#define hton16_ua_store(val, bytes) ({ \ + uint16 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 8; \ + _bytes[1] = _val & 0xff; \ +}) + +#define hton32_ua_store(val, bytes) ({ \ + uint32 _val = (val); \ + uint8 *_bytes = (uint8 *)(bytes); \ + _bytes[0] = _val >> 24; \ + _bytes[1] = (_val >> 16) & 0xff; \ + _bytes[2] = (_val >> 8) & 0xff; \ + _bytes[3] = _val & 0xff; \ +}) + +#define ltoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH16_UA(_bytes); \ +}) + +#define ltoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _LTOH32_UA(_bytes); \ +}) + +#define ntoh16_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH16_UA(_bytes); \ +}) + +#define ntoh32_ua(bytes) ({ \ + const uint8 *_bytes = (const uint8 *)(bytes); \ + _NTOH32_UA(_bytes); \ +}) + +#else /* !__GNUC__ */ + +/* Inline versions avoid referencing the argument multiple times */ +static INLINE uint16 +bcmswap16(uint16 val) +{ + return BCMSWAP16(val); +} + +static INLINE uint32 +bcmswap32(uint32 val) +{ + return BCMSWAP32(val); +} + +static INLINE uint64 +bcmswap64(uint64 val) +{ + return BCMSWAP64(val); +} + +static INLINE uint32 +bcmswap32by16(uint32 val) +{ + return BCMSWAP32BY16(val); +} + +/* Reverse pairs of bytes in a buffer (not for high-performance use) */ +/* buf - start of buffer of shorts to swap */ +/* len - byte length of buffer */ +static INLINE void +bcmswap16_buf(uint16 *buf, uint len) +{ + len = len / 2; + + while (len--) { + *buf = bcmswap16(*buf); + buf++; + } +} + +/* + * Store 16-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = val >> 8; +} + +/* + * Store 32-bit value to unaligned little-endian byte array. + */ +static INLINE void +htol32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val & 0xff; + bytes[1] = (val >> 8) & 0xff; + bytes[2] = (val >> 16) & 0xff; + bytes[3] = val >> 24; +} + +/* + * Store 16-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton16_ua_store(uint16 val, uint8 *bytes) +{ + bytes[0] = val >> 8; + bytes[1] = val & 0xff; +} + +/* + * Store 32-bit value to unaligned network-(big-)endian byte array. + */ +static INLINE void +hton32_ua_store(uint32 val, uint8 *bytes) +{ + bytes[0] = val >> 24; + bytes[1] = (val >> 16) & 0xff; + bytes[2] = (val >> 8) & 0xff; + bytes[3] = val & 0xff; +} + +/* + * Load 16-bit value from unaligned little-endian byte array. + */ +static INLINE uint16 +ltoh16_ua(const void *bytes) +{ + return _LTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned little-endian byte array. + */ +static INLINE uint32 +ltoh32_ua(const void *bytes) +{ + return _LTOH32_UA((const uint8 *)bytes); +} + +/* + * Load 16-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint16 +ntoh16_ua(const void *bytes) +{ + return _NTOH16_UA((const uint8 *)bytes); +} + +/* + * Load 32-bit value from unaligned big-(network-)endian byte array. + */ +static INLINE uint32 +ntoh32_ua(const void *bytes) +{ + return _NTOH32_UA((const uint8 *)bytes); +} + +#endif /* !__GNUC__ */ +#endif /* !_BCMENDIAN_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h new file mode 100644 index 000000000000..ab1375ea854d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmmsgbuf.h @@ -0,0 +1,863 @@ +/* + * MSGBUF network driver ioctl/indication encoding + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmmsgbuf.h 541060 2015-03-13 23:28:01Z $ + */ +#ifndef _bcmmsgbuf_h_ +#define _bcmmsgbuf_h_ + +#include +#include +#include + +#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN + +#define D2H_EPOCH_MODULO 253 /* sequence number wrap */ +#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1) + +#define H2D_EPOCH_MODULO 253 /* sequence number wrap */ +#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1) + +#define H2DRING_TXPOST_ITEMSIZE 48 +#define H2DRING_RXPOST_ITEMSIZE 32 +#define H2DRING_CTRL_SUB_ITEMSIZE 40 +#define D2HRING_TXCMPLT_ITEMSIZE 16 +#define D2HRING_RXCMPLT_ITEMSIZE 32 +#define D2HRING_CTRL_CMPLT_ITEMSIZE 24 + +#define H2DRING_TXPOST_MAX_ITEM 512 +#define H2DRING_RXPOST_MAX_ITEM 512 +#define H2DRING_CTRL_SUB_MAX_ITEM 64 +#define D2HRING_TXCMPLT_MAX_ITEM 1024 +#define D2HRING_RXCMPLT_MAX_ITEM 512 + +#define D2HRING_CTRL_CMPLT_MAX_ITEM 64 + +enum { + DNGL_TO_HOST_MSGBUF, + HOST_TO_DNGL_MSGBUF +}; + +enum { + HOST_TO_DNGL_TXP_DATA, + HOST_TO_DNGL_RXP_DATA, + HOST_TO_DNGL_CTRL, + DNGL_TO_HOST_DATA, + DNGL_TO_HOST_CTRL +}; + +#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE + +#ifdef PCIE_API_REV1 + +#define BCMMSGBUF_DUMMY_REF(a, b) do {BCM_REFERENCE((a));BCM_REFERENCE((b));} while (0) + +#define BCMMSGBUF_API_IFIDX(a) 0 +#define BCMMSGBUF_API_SEQNUM(a) 0 +#define BCMMSGBUF_IOCTL_XTID(a) 0 +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->cmd_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) BCMMSGBUF_DUMMY_REF(a, b) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID(a) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) BCMMSGBUF_DUMMY_REF(a, b) + +#else /* PCIE_API_REV1 */ + +#define BCMMSGBUF_API_IFIDX(a) ((a)->if_id) +#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->pkt_id) +#define BCMMSGBUF_API_SEQNUM(a) ((a)->u.seq.seq_no) +#define BCMMSGBUF_IOCTL_XTID(a) ((a)->xt_id) + +#define BCMMSGBUF_SET_API_IFIDX(a, b) (BCMMSGBUF_API_IFIDX((a)) = (b)) +#define BCMMSGBUF_SET_API_SEQNUM(a, b) (BCMMSGBUF_API_SEQNUM((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID((a)) = (b)) +#define BCMMSGBUF_IOCTL_SET_XTID(a, b) (BCMMSGBUF_IOCTL_XTID((a)) = (b)) + +#endif /* PCIE_API_REV1 */ + +/* utility data structures */ + +union addr64 { + struct { + uint32 low; + uint32 high; + }; + struct { + uint32 low_addr; + uint32 high_addr; + }; + uint64 u64; +} DECLSPEC_ALIGN(8); + +typedef union addr64 bcm_addr64_t; + +/* IOCTL req Hdr */ +/* cmn Msg Hdr */ +typedef struct cmn_msg_hdr { + /** message type */ + uint8 msg_type; + /** interface index this is valid for */ + uint8 if_id; + /* flags */ + uint8 flags; + /** sequence number */ + uint8 epoch; + /** packet Identifier for the associated host buffer */ + uint32 request_id; +} cmn_msg_hdr_t; + +/** message type */ +typedef enum bcmpcie_msgtype { + MSG_TYPE_GEN_STATUS = 0x1, + MSG_TYPE_RING_STATUS = 0x2, + MSG_TYPE_FLOW_RING_CREATE = 0x3, + MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4, + MSG_TYPE_FLOW_RING_DELETE = 0x5, + MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6, + MSG_TYPE_FLOW_RING_FLUSH = 0x7, + MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8, + MSG_TYPE_IOCTLPTR_REQ = 0x9, + MSG_TYPE_IOCTLPTR_REQ_ACK = 0xA, + MSG_TYPE_IOCTLRESP_BUF_POST = 0xB, + MSG_TYPE_IOCTL_CMPLT = 0xC, + MSG_TYPE_EVENT_BUF_POST = 0xD, + MSG_TYPE_WL_EVENT = 0xE, + MSG_TYPE_TX_POST = 0xF, + MSG_TYPE_TX_STATUS = 0x10, + MSG_TYPE_RXBUF_POST = 0x11, + MSG_TYPE_RX_CMPLT = 0x12, + MSG_TYPE_LPBK_DMAXFER = 0x13, + MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14, + MSG_TYPE_FLOW_RING_RESUME = 0x15, + MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16, + MSG_TYPE_FLOW_RING_SUSPEND = 0x17, + MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18, + MSG_TYPE_INFO_BUF_POST = 0x19, + MSG_TYPE_INFO_BUF_CMPLT = 0x1A, + MSG_TYPE_H2D_RING_CREATE = 0x1B, + MSG_TYPE_D2H_RING_CREATE = 0x1C, + MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D, + MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E, + MSG_TYPE_H2D_RING_CONFIG = 0x1F, + MSG_TYPE_D2H_RING_CONFIG = 0x20, + MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21, + MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22, + MSG_TYPE_H2D_MAILBOX_DATA = 0x23, + MSG_TYPE_D2H_MAILBOX_DATA = 0x24, + + MSG_TYPE_API_MAX_RSVD = 0x3F +} bcmpcie_msg_type_t; + +typedef enum bcmpcie_msgtype_int { + MSG_TYPE_INTERNAL_USE_START = 0x40, + MSG_TYPE_EVENT_PYLD = 0x41, + MSG_TYPE_IOCT_PYLD = 0x42, + MSG_TYPE_RX_PYLD = 0x43, + MSG_TYPE_HOST_FETCH = 0x44, + MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45, + MSG_TYPE_TXMETADATA_PYLD = 0x46, + MSG_TYPE_INDX_UPDATE = 0x47 +} bcmpcie_msgtype_int_t; + +typedef enum bcmpcie_msgtype_u { + MSG_TYPE_TX_BATCH_POST = 0x80, + MSG_TYPE_IOCTL_REQ = 0x81, + MSG_TYPE_HOST_EVNT = 0x82, /* console related */ + MSG_TYPE_LOOPBACK = 0x83 +} bcmpcie_msgtype_u_t; + +/** + * D2H ring host wakeup soft doorbell, override the PCIE doorbell. + * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE + * Transl0 to write specified value to host address. + * + * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register + * and value is Core/Thread context. Host will ensure routing the 32bit address + * offerred to PCIE to the mapped register. + * + * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL + */ +typedef struct bcmpcie_soft_doorbell { + uint32 value; /* host defined value to be written, eg HW threadid */ + bcm_addr64_t haddr; /* host address, eg thread wakeup register address */ + uint16 items; /* interrupt coalescing: item count before wakeup */ + uint16 msecs; /* interrupt coalescing: timeout in millisecs */ +} bcmpcie_soft_doorbell_t; + + +/* if_id */ +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX 0x7 +#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT 0 +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX 0x1F +#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK \ + (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT) + +/* flags */ +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1 +#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2 +#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80 + + +/* IOCTL request message */ +typedef struct ioctl_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** ioctl command type */ + uint32 cmd; + /** ioctl transaction ID, to pair with a ioctl response */ + uint16 trans_id; + /** input arguments buffer len */ + uint16 input_buf_len; + /** expected output len */ + uint16 output_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 rsvd[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + /* rsvd */ + uint32 rsvd1[2]; +} ioctl_req_msg_t; + +/** buffer post messages for device to use to return IOCTL responses, Events */ +typedef struct ioctl_resp_evt_buf_post_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** length of the host buffer supplied */ + uint16 host_buf_len; + /** to align the host address on 8 byte boundary */ + uint16 reserved[3]; + /** always align on 8 byte boundary */ + bcm_addr64_t host_buf_addr; + uint32 rsvd[4]; +} ioctl_resp_evt_buf_post_msg_t; + + +typedef struct pcie_dma_xfer_params { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_input_buf_addr; + + /** always align on 8 byte boundary */ + bcm_addr64_t host_ouput_buf_addr; + + /** length of transfer */ + uint32 xfer_len; + /** delay before doing the src txfer */ + uint32 srcdelay; + /** delay before doing the dest txfer */ + uint32 destdelay; + uint32 rsvd; +} pcie_dma_xfer_params_t; + +/** Complete msgbuf hdr for flow ring update from host to dongle */ +typedef struct tx_flowring_create_request { + cmn_msg_hdr_t msg; + uint8 da[ETHER_ADDR_LEN]; + uint8 sa[ETHER_ADDR_LEN]; + uint8 tid; + uint8 if_flags; + uint16 flow_ring_id; + uint8 tc; + uint8 priority; + uint16 int_vector; + uint16 max_items; + uint16 len_item; + bcm_addr64_t flow_ring_ptr; +} tx_flowring_create_request_t; + +typedef struct tx_flowring_delete_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_delete_request_t; + +typedef struct tx_flowring_flush_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_flowring_flush_request_t; + +/** Subtypes for ring_config_req control message */ +typedef enum ring_config_subtype { + /** Default D2H PCIE doorbell override using ring_config_req msg */ + D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */ + D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */ +} ring_config_subtype_t; + +typedef struct ring_config_req { + cmn_msg_hdr_t msg; + uint16 subtype; + uint16 ring_id; + uint32 rsvd; + union { + uint32 data[6]; + /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */ + bcmpcie_soft_doorbell_t soft_doorbell; + }; +} ring_config_req_t; + +typedef union ctrl_submit_item { + ioctl_req_msg_t ioctl_req; + ioctl_resp_evt_buf_post_msg_t resp_buf_post; + pcie_dma_xfer_params_t dma_xfer; + tx_flowring_create_request_t flow_create; + tx_flowring_delete_request_t flow_delete; + tx_flowring_flush_request_t flow_flush; + ring_config_req_t ring_config_req; + unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE]; +} ctrl_submit_item_t; + +/** Control Completion messages (20 bytes) */ +typedef struct compl_msg_hdr { + /** status for the completion */ + int16 status; + /** submisison flow ring id which generated this status */ + uint16 flow_ring_id; +} compl_msg_hdr_t; + +/** XOR checksum or a magic number to audit DMA done */ +typedef uint32 dma_done_t; + +/* completion header status codes */ +#define BCMPCIE_SUCCESS 0 +#define BCMPCIE_NOTFOUND 1 +#define BCMPCIE_NOMEM 2 +#define BCMPCIE_BADOPTION 3 +#define BCMPCIE_RING_IN_USE 4 +#define BCMPCIE_RING_ID_INVALID 5 +#define BCMPCIE_PKT_FLUSH 6 +#define BCMPCIE_NO_EVENT_BUF 7 +#define BCMPCIE_NO_RX_BUF 8 +#define BCMPCIE_NO_IOCTLRESP_BUF 9 +#define BCMPCIE_MAX_IOCTLRESP_BUF 10 +#define BCMPCIE_MAX_EVENT_BUF 11 + +/** IOCTL completion response */ +typedef struct ioctl_compl_resp_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** response buffer len where a host buffer is involved */ + uint16 resp_len; + /** transaction id to pair with a request */ + uint16 trans_id; + /** cmd id */ + uint32 cmd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_comp_resp_msg_t; + +/** IOCTL request acknowledgement */ +typedef struct ioctl_req_ack_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** cmd id */ + uint32 cmd; + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ioctl_req_ack_msg_t; + +/** WL event message: send from device to host */ +typedef struct wlevent_req_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** event data len valid with the event buffer */ + uint16 event_data_len; + /** sequence number */ + uint16 seqnum; + /** rsvd */ + uint32 rsvd; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} wlevent_req_msg_t; + +/** dma xfer complete message */ +typedef struct pcie_dmaxfer_cmplt { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_dmaxfer_cmplt_t; + +/** general status message */ +typedef struct pcie_gen_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_gen_status_t; + +/** ring status message */ +typedef struct pcie_ring_status { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** message which firmware couldn't decode */ + uint16 write_idx; + uint16 rsvd[3]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} pcie_ring_status_t; + +typedef struct tx_flowring_create_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_create_response_t; + +typedef struct tx_flowring_delete_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_delete_response_t; + +typedef struct tx_flowring_flush_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} tx_flowring_flush_response_t; + +/** Common layout of all d2h control messages */ +typedef struct ctrl_compl_msg { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ctrl_compl_msg_t; + +typedef struct ring_config_resp { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + uint32 rsvd[2]; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} ring_config_resp_t; + +typedef union ctrl_completion_item { + ioctl_comp_resp_msg_t ioctl_resp; + wlevent_req_msg_t event; + ioctl_req_ack_msg_t ioct_ack; + pcie_dmaxfer_cmplt_t pcie_xfer_cmplt; + pcie_gen_status_t pcie_gen_status; + pcie_ring_status_t pcie_ring_status; + tx_flowring_create_response_t txfl_create_resp; + tx_flowring_delete_response_t txfl_delete_resp; + tx_flowring_flush_response_t txfl_flush_resp; + ctrl_compl_msg_t ctrl_compl; + ring_config_resp_t ring_config_resp; + unsigned char check[D2HRING_CTRL_CMPLT_ITEMSIZE]; +} ctrl_completion_item_t; + +/** H2D Rxpost ring work items */ +typedef struct host_rxbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_buf_len; + /** alignment to make the host buffers start on 8 byte boundary */ + uint32 rsvd; + /** provided meta data buffer */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; +} host_rxbuf_post_t; + +typedef union rxbuf_submit_item { + host_rxbuf_post_t rxpost; + unsigned char check[H2DRING_RXPOST_ITEMSIZE]; +} rxbuf_submit_item_t; + + +/** D2H Rxcompletion ring work items */ +typedef struct host_rxbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + /** filled up meta data len */ + uint16 metadata_len; + /** filled up buffer len to receive data */ + uint16 data_len; + /** offset in the host rx buffer where the data starts */ + uint16 data_offset; + /** offset in the host rx buffer where the data starts */ + uint16 flags; + /** rx status */ + uint32 rx_status_0; + uint32 rx_status_1; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_rxbuf_cmpl_t; + +typedef union rxbuf_complete_item { + host_rxbuf_cmpl_t rxcmpl; + unsigned char check[D2HRING_RXCMPLT_ITEMSIZE]; +} rxbuf_complete_item_t; + + +typedef struct host_txbuf_post { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** eth header */ + uint8 txhdr[ETHER_HDR_LEN]; + /** flags */ + uint8 flags; + /** number of segments */ + uint8 seg_cnt; + + /** provided meta data buffer for txstatus */ + bcm_addr64_t metadata_buf_addr; + /** provided data buffer to receive data */ + bcm_addr64_t data_buf_addr; + /** provided meta data buffer len */ + uint16 metadata_buf_len; + /** provided data buffer len to receive data */ + uint16 data_len; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; +} host_txbuf_post_t; + +#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01 +#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02 + +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */ +#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */ + + +#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5 +#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT) + +/* These are added to fix up compile issues */ +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3 +#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11 +#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT +#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK + +/** H2D Txpost ring work items */ +typedef union txbuf_submit_item { + host_txbuf_post_t txpost; + unsigned char check[H2DRING_TXPOST_ITEMSIZE]; +} txbuf_submit_item_t; + +/** D2H Txcompletion ring work items */ +typedef struct host_txbuf_cmpl { + /** common message header */ + cmn_msg_hdr_t cmn_hdr; + /** completion message header */ + compl_msg_hdr_t compl_hdr; + union { + struct { + /** provided meta data len */ + uint16 metadata_len; + /** WLAN side txstatus */ + uint16 tx_status; + }; + /** XOR checksum or a magic number to audit DMA done */ + dma_done_t marker; + }; +} host_txbuf_cmpl_t; + +typedef union txbuf_complete_item { + host_txbuf_cmpl_t txcmpl; + unsigned char check[D2HRING_TXCMPLT_ITEMSIZE]; +} txbuf_complete_item_t; + +#define BCMPCIE_D2H_METADATA_HDRLEN 4 +#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4) + +/** ret buf struct */ +typedef struct ret_buf_ptr { + uint32 low_addr; + uint32 high_addr; +} ret_buf_t; + + +#ifdef PCIE_API_REV1 + +/* ioctl specific hdr */ +typedef struct ioctl_hdr { + uint16 cmd; + uint16 retbuf_len; + uint32 cmd_id; +} ioctl_hdr_t; + +typedef struct ioctlptr_hdr { + uint16 cmd; + uint16 retbuf_len; + uint16 buflen; + uint16 rsvd; + uint32 cmd_id; +} ioctlptr_hdr_t; + +#else /* PCIE_API_REV1 */ + +typedef struct ioctl_req_hdr { + uint32 pkt_id; /**< Packet ID */ + uint32 cmd; /**< IOCTL ID */ + uint16 retbuf_len; + uint16 buflen; + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +} ioctl_req_hdr_t; + +#endif /* PCIE_API_REV1 */ + + +/** Complete msgbuf hdr for ioctl from host to dongle */ +typedef struct ioct_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctl_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif + ret_buf_t ret_buf; +} ioct_reqst_hdr_t; + +typedef struct ioctptr_reqst_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + ioctlptr_hdr_t ioct_hdr; +#else + ioctl_req_hdr_t ioct_hdr; +#endif + ret_buf_t ret_buf; + ret_buf_t ioct_buf; +} ioctptr_reqst_hdr_t; + +/** ioctl response header */ +typedef struct ioct_resp_hdr { + cmn_msg_hdr_t msg; +#ifdef PCIE_API_REV1 + uint32 cmd_id; +#else + uint32 pkt_id; +#endif + uint32 status; + uint32 ret_len; + uint32 inline_data; +#ifdef PCIE_API_REV1 +#else + uint16 xt_id; /**< transaction ID */ + uint16 rsvd[1]; +#endif +} ioct_resp_hdr_t; + +/* ioct resp header used in dongle */ +/* ret buf hdr will be stripped off inside dongle itself */ +typedef struct msgbuf_ioctl_resp { + ioct_resp_hdr_t ioct_hdr; + ret_buf_t ret_buf; /**< ret buf pointers */ +} msgbuf_ioct_resp_t; + +/** WL event hdr info */ +typedef struct wl_event_hdr { + cmn_msg_hdr_t msg; + uint16 event; + uint8 flags; + uint8 rsvd; + uint16 retbuf_len; + uint16 rsvd1; + uint32 rxbufid; +} wl_event_hdr_t; + +#define TXDESCR_FLOWID_PCIELPBK_1 0xFF +#define TXDESCR_FLOWID_PCIELPBK_2 0xFE + +typedef struct txbatch_lenptr_tup { + uint32 pktid; + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} txbatch_lenptr_tup_t; + +typedef struct txbatch_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 pktcnt; + uint8 flowid; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; +} txbatch_cmn_msghdr_t; + +typedef struct txbatch_msghdr { + txbatch_cmn_msghdr_t txcmn; + txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */ +} txbatch_msghdr_t; + +/* TX desc posting header */ +typedef struct tx_lenptr_tup { + uint16 pktlen; + uint16 rsvd; + ret_buf_t ret_buf; /**< ret buf pointers */ +} tx_lenptr_tup_t; + +typedef struct txdescr_cmn_msghdr { + cmn_msg_hdr_t msg; + uint8 priority; + uint8 hdrlen; + uint8 descrcnt; + uint8 flowid; + uint32 pktid; +} txdescr_cmn_msghdr_t; + +typedef struct txdescr_msghdr { + txdescr_cmn_msghdr_t txcmn; + uint8 txhdr[ETHER_HDR_LEN]; + uint16 rsvd; + tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */ +} txdescr_msghdr_t; + +/** Tx status header info */ +typedef struct txstatus_hdr { + cmn_msg_hdr_t msg; + uint32 pktid; +} txstatus_hdr_t; + +/** RX bufid-len-ptr tuple */ +typedef struct rx_lenptr_tup { + uint32 rxbufid; + uint16 len; + uint16 rsvd2; + ret_buf_t ret_buf; /**< ret buf pointers */ +} rx_lenptr_tup_t; + +/** Rx descr Post hdr info */ +typedef struct rxdesc_msghdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint8 rsvd1; + uint8 descnt; + rx_lenptr_tup_t rx_tup[0]; +} rxdesc_msghdr_t; + +/** RX complete tuples */ +typedef struct rxcmplt_tup { + uint16 retbuf_len; + uint16 data_offset; + uint32 rxstatus0; + uint32 rxstatus1; + uint32 rxbufid; +} rxcmplt_tup_t; + +/** RX complete messge hdr */ +typedef struct rxcmplt_hdr { + cmn_msg_hdr_t msg; + uint16 rsvd0; + uint16 rxcmpltcnt; + rxcmplt_tup_t rx_tup[0]; +} rxcmplt_hdr_t; + +typedef struct hostevent_hdr { + cmn_msg_hdr_t msg; + uint32 evnt_pyld; +} hostevent_hdr_t; + +typedef struct dma_xfer_params { + uint32 src_physaddr_hi; + uint32 src_physaddr_lo; + uint32 dest_physaddr_hi; + uint32 dest_physaddr_lo; + uint32 len; + uint32 srcdelay; + uint32 destdelay; +} dma_xfer_params_t; + +enum { + HOST_EVENT_CONS_CMD = 1 +}; + +/* defines for flags */ +#define MSGBUF_IOC_ACTION_MASK 0x1 + +#define MAX_SUSPEND_REQ 15 + +typedef struct tx_idle_flowring_suspend_request { + cmn_msg_hdr_t msg; + uint16 ring_id[MAX_SUSPEND_REQ]; /**< ring Id's */ + uint16 num; /**< number of flowid's to suspend */ +} tx_idle_flowring_suspend_request_t; + +typedef struct tx_idle_flowring_suspend_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_suspend_response_t; + +typedef struct tx_idle_flowring_resume_request { + cmn_msg_hdr_t msg; + uint16 flow_ring_id; + uint16 reason; + uint32 rsvd[7]; +} tx_idle_flowring_resume_request_t; + +typedef struct tx_idle_flowring_resume_response { + cmn_msg_hdr_t msg; + compl_msg_hdr_t cmplt; + uint32 rsvd[2]; + dma_done_t marker; +} tx_idle_flowring_resume_response_t; + +#endif /* _bcmmsgbuf_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmnvram.h b/drivers/net/wireless/bcmdhd/include/bcmnvram.h new file mode 100644 index 000000000000..e3ba9b4166fb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmnvram.h @@ -0,0 +1,310 @@ +/* + * NVRAM variable manipulation + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmnvram.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcmnvram_h_ +#define _bcmnvram_h_ + +#ifndef _LANGUAGE_ASSEMBLY + +#include +#include + +struct nvram_header { + uint32 magic; + uint32 len; + uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ + uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ + uint32 config_ncdl; /* ncdl values for memc */ +}; + +struct nvram_tuple { + char *name; + char *value; + struct nvram_tuple *next; +}; + +/* + * Get default value for an NVRAM variable + */ +extern char *nvram_default_get(const char *name); +/* + * validate/restore all per-interface related variables + */ +extern void nvram_validate_all(char *prefix, bool restore); + +/* + * restore specific per-interface variable + */ +extern void nvram_restore_var(char *prefix, char *name); + +/* + * Initialize NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern int nvram_init(void *sih); +extern int nvram_deinit(void *sih); + + +/* + * Append a chunk of nvram variables to the global list + */ +extern int nvram_append(void *si, char *vars, uint varsz); + +extern void nvram_get_global_vars(char **varlst, uint *varsz); + + +/* + * Check for reset button press for restoring factory defaults. + */ +extern int nvram_reset(void *sih); + +/* + * Disable NVRAM access. May be unnecessary or undefined on certain + * platforms. + */ +extern void nvram_exit(void *sih); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get(const char *name); + +/* + * Get the value of an NVRAM variable. The pointer returned may be + * invalid after a set. + * @param name name of variable to get + * @param bit bit value to get + * @return value of variable or NULL if undefined + */ +extern char * nvram_get_bitflag(const char *name, const int bit); + +/* + * Read the reset GPIO value from the nvram and set the GPIO + * as input + */ +extern int nvram_resetgpio_init(void *sih); + +/* + * Get the value of an NVRAM variable. + * @param name name of variable to get + * @return value of variable or NUL if undefined + */ +static INLINE char * +nvram_safe_get(const char *name) +{ + char *p = nvram_get(name); + return p ? p : ""; +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match(const char *name, const char *match) +{ + const char *value = nvram_get(name); + return (value && !strcmp(value, match)); +} + +/* + * Match an NVRAM variable. + * @param name name of variable to match + * @param bit bit value to get + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is string equal + * to match or FALSE otherwise + */ +static INLINE int +nvram_match_bitflag(const char *name, const int bit, const char *match) +{ + const char *value = nvram_get_bitflag(name, bit); + return (value && !strcmp(value, match)); +} + +/* + * Inversely match an NVRAM variable. + * @param name name of variable to match + * @param match value to compare against value of variable + * @return TRUE if variable is defined and its value is not string + * equal to invmatch or FALSE otherwise + */ +static INLINE int +nvram_invmatch(const char *name, const char *invmatch) +{ + const char *value = nvram_get(name); + return (value && strcmp(value, invmatch)); +} + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set(const char *name, const char *value); + +/* + * Set the value of an NVRAM variable. The name and value strings are + * copied into private storage. Pointers to previously set values + * may become invalid. The new value may be immediately + * retrieved but will not be permanently stored until a commit. + * @param name name of variable to set + * @param bit bit value to set + * @param value value of variable + * @return 0 on success and errno on failure + */ +extern int nvram_set_bitflag(const char *name, const int bit, const int value); +/* + * Unset an NVRAM variable. Pointers to previously set values + * remain valid until a set. + * @param name name of variable to unset + * @return 0 on success and errno on failure + * NOTE: use nvram_commit to commit this change to flash. + */ +extern int nvram_unset(const char *name); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @param nvram_corrupt true to corrupt nvram, false otherwise. + * @return 0 on success and errno on failure + */ +extern int nvram_commit_internal(bool nvram_corrupt); + +/* + * Commit NVRAM variables to permanent storage. All pointers to values + * may be invalid after a commit. + * NVRAM values are undefined after a commit. + * @return 0 on success and errno on failure + */ +extern int nvram_commit(void); + +/* + * Get all NVRAM variables (format name=value\0 ... \0\0). + * @param buf buffer to store variables + * @param count size of buffer in bytes + * @return 0 on success and errno on failure + */ +extern int nvram_getall(char *nvram_buf, int count); + +/* + * returns the crc value of the nvram + * @param nvh nvram header pointer + */ +uint8 nvram_calc_crc(struct nvram_header * nvh); + +extern int nvram_space; +#endif /* _LANGUAGE_ASSEMBLY */ + +/* The NVRAM version number stored as an NVRAM variable */ +#define NVRAM_SOFTWARE_VERSION "1" + +#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ +#define NVRAM_CLEAR_MAGIC 0x0 +#define NVRAM_INVALID_MAGIC 0xFFFFFFFF +#define NVRAM_VERSION 1 +#define NVRAM_HEADER_SIZE 20 +/* This definition is for precommit staging, and will be removed */ +#define NVRAM_SPACE 0x8000 +/* For CFE builds this gets passed in thru the makefile */ +#ifndef MAX_NVRAM_SPACE +#define MAX_NVRAM_SPACE 0x10000 +#endif +#define DEF_NVRAM_SPACE 0x8000 +#define ROM_ENVRAM_SPACE 0x1000 +#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */ + +#define NVRAM_MAX_VALUE_LEN 255 +#define NVRAM_MAX_PARAM_LEN 64 + +#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */ +#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */ + +/* Offsets to embedded nvram area */ +#define NVRAM_START_COMPRESSED 0x400 +#define NVRAM_START 0x1000 + +#define BCM_JUMBO_NVRAM_DELIMIT '\n' +#define BCM_JUMBO_START "Broadcom Jumbo Nvram file" + + +#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \ + defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__)) +#define IMAGE_SIZE "image_size" +#define BOOTPARTITION "bootpartition" +#define IMAGE_BOOT BOOTPARTITION +#define PARTIALBOOTS "partialboots" +#define MAXPARTIALBOOTS "maxpartialboots" +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#endif + +#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \ + defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__)) +/* Shared by all: CFE, Linux Kernel, and Ap */ +#define IMAGE_BOOT "image_boot" +#define BOOTPARTITION IMAGE_BOOT +/* CFE variables */ +#define IMAGE_1ST_FLASH_TRX "flash0.trx" +#define IMAGE_1ST_FLASH_OS "flash0.os" +#define IMAGE_2ND_FLASH_TRX "flash0.trx2" +#define IMAGE_2ND_FLASH_OS "flash0.os2" +#define IMAGE_SIZE "image_size" + +/* CFE and Linux Kernel shared variables */ +#define IMAGE_FIRST_OFFSET "image_first_offset" +#define IMAGE_SECOND_OFFSET "image_second_offset" + +/* Linux application variables */ +#define LINUX_FIRST "linux" +#define LINUX_SECOND "linux2" +#define POLICY_TOGGLE "toggle" +#define LINUX_PART_TO_FLASH "linux_to_flash" +#define LINUX_FLASH_POLICY "linux_flash_policy" + +#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */ + +#endif /* _bcmnvram_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcie.h b/drivers/net/wireless/bcmdhd/include/bcmpcie.h new file mode 100644 index 000000000000..0c15055a0353 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmpcie.h @@ -0,0 +1,318 @@ +/* + * Broadcom PCIE + * Software-specific definitions shared between device and host side + * Explains the shared area between host and dongle + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcie.h 604490 2015-12-07 15:48:45Z $ + */ + + +#ifndef _bcmpcie_h_ +#define _bcmpcie_h_ + +#include + +#define ADDR_64(x) (x.addr) +#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr)) +#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr)) + +typedef struct { + uint32 low_addr; + uint32 high_addr; +} sh_addr_t; + + +/* May be overridden by 43xxxxx-roml.mk */ +#if !defined(BCMPCIE_MAX_TX_FLOWS) +#define BCMPCIE_MAX_TX_FLOWS 40 +#endif /* ! BCMPCIE_MAX_TX_FLOWS */ + +/** + * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that + * is located in device memory. + */ +#define PCIE_SHARED_VERSION 0x00005 +#define PCIE_SHARED_VERSION_MASK 0x000FF +#define PCIE_SHARED_ASSERT_BUILT 0x00100 +#define PCIE_SHARED_ASSERT 0x00200 +#define PCIE_SHARED_TRAP 0x00400 +#define PCIE_SHARED_IN_BRPT 0x00800 +#define PCIE_SHARED_SET_BRPT 0x01000 +#define PCIE_SHARED_PENDING_BRPT 0x02000 +#define PCIE_SHARED_TXPUSH_SPRT 0x04000 +#define PCIE_SHARED_EVT_SEQNUM 0x08000 +#define PCIE_SHARED_DMA_INDEX 0x10000 + +/** + * There are host types where a device interrupt can 'race ahead' of data written by the device into + * host memory. The dongle can avoid this condition using a variety of techniques (read barrier, + * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately + * these techniques have drawbacks on router platforms. For these platforms, it was decided to not + * avoid the condition, but to detect the condition instead and act on it. + * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM + */ +#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000 +#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000 +#define PCIE_SHARED_D2H_SYNC_MODE_MASK \ + (PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM) +#define PCIE_SHARED_IDLE_FLOW_RING 0x80000 +#define PCIE_SHARED_2BYTE_INDICES 0x100000 + + +#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09 +#define PCIE_SHARED_H2D_MAGIC 0x12345678 + +/** + * Message rings convey messages between host and device. They are unidirectional, and are located + * in host memory. + * + * This is the minimal set of message rings, known as 'common message rings': + */ +#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0 +#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1 +#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2 +#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3 +#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4 +#define BCMPCIE_COMMON_MSGRING_MAX_ID 4 + +#define BCMPCIE_H2D_COMMON_MSGRINGS 2 +#define BCMPCIE_D2H_COMMON_MSGRINGS 3 +#define BCMPCIE_COMMON_MSGRINGS 5 + +#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \ + (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows)) + +/** + * H2D and D2H, WR and RD index, are maintained in the following arrays: + * - Array of all H2D WR Indices + * - Array of all H2D RD Indices + * - Array of all D2H WR Indices + * - Array of all D2H RD Indices + * + * The offset of the WR or RD indexes (for common rings) in these arrays are + * listed below. Arrays ARE NOT indexed by a ring's id. + * + * D2H common rings WR and RD index start from 0, even though their ringids + * start from BCMPCIE_H2D_COMMON_MSGRINGS + */ + +#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id) + +enum h2dring_idx { + /* H2D common rings */ + BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT), + BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT), + + /* First TxPost's WR or RD index starts after all H2D common rings */ + BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = + BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS) +}; + +#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \ + ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS) + +enum d2hring_idx { + /* D2H Common Rings */ + BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE), + BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE), + BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = + BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE) +}; + +/** + * Macros for managing arrays of RD WR indices: + * rw_index_sz: + * - in dongle, rw_index_sz is known at compile time + * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags + * + * ring_idx: See h2dring_idx and d2hring_idx + */ + +/** Offset of a RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \ + ((rw_index_sz) * (ring_idx)) + +/** Fetch the address of RD or WR index in H2D or D2H indices array */ +#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \ + (void *)((uint32)(indices_array_base) + \ + BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx))) + +/** H2D DMA Indices array size: given max flow rings */ +#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \ + ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows)) + +/** D2H DMA Indices array size */ +#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \ + ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS) + +/** + * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used + * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated + * both in host as well as device memory. + */ +typedef struct ring_mem { + uint16 idx; /* ring id */ + uint8 type; + uint8 rsvd; + uint16 max_item; /* Max number of items in flow ring */ + uint16 len_items; /* Items are fixed size. Length in bytes of one item */ + sh_addr_t base_addr; /* 64 bits address, either in host or device memory */ +} ring_mem_t; + + +/** + * Per flow ring, information is maintained in device memory, e.g. at what address the ringmem and + * ringstate are located. The flow ring itself can be instantiated in either host or device memory. + * + * Perhaps this type should be renamed to make clear that it resides in device memory only. + */ +typedef struct ring_info { + uint32 ringmem_ptr; /* ring mem location in dongle memory */ + + /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not + * by a ringid. + */ + + /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */ + uint32 h2d_w_idx_ptr; /* Array of all H2D ring's WR indices */ + uint32 h2d_r_idx_ptr; /* Array of all H2D ring's RD indices */ + uint32 d2h_w_idx_ptr; /* Array of all D2H ring's WR indices */ + uint32 d2h_r_idx_ptr; /* Array of all D2H ring's RD indices */ + + /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host. + * Host may directly fetch WR and RD indices from these host-side arrays. + * + * 64bit ptr to arrays of WR or RD indices for all rings in host memory. + */ + sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */ + sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */ + sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */ + sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */ + + uint16 max_sub_queues; /* maximum number of H2D rings: common + flow */ + uint16 rsvd; +} ring_info_t; + +/** + * A structure located in TCM that is shared between host and device, primarily used during + * initialization. + */ +typedef struct { + /** shared area version captured at flags 7:0 */ + uint32 flags; + + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /**< Address of hnd_cons_t */ + + uint32 msgtrace_addr; + + uint32 fwid; + + /* Used for debug/flow control */ + uint16 total_lfrag_pkt_cnt; + uint16 max_host_rxbufs; /* rsvd in spec */ + + uint32 dma_rxoffset; /* rsvd in spec */ + + /** these will be used for sleep request/ack, d3 req/ack */ + uint32 h2d_mb_data_ptr; + uint32 d2h_mb_data_ptr; + + /* information pertinent to host IPC/msgbuf channels */ + /** location in the TCM memory which has the ring_info */ + uint32 rings_info_ptr; + + /** block of host memory for the scratch buffer */ + uint32 host_dma_scratch_buffer_len; + sh_addr_t host_dma_scratch_buffer; + + /** block of host memory for the dongle to push the status into */ + uint32 device_rings_stsblk_len; + sh_addr_t device_rings_stsblk; + + uint32 buzzz; /* BUZZZ state format strings and trace buffer */ + +} pciedev_shared_t; + +extern pciedev_shared_t pciedev_shared; + +/** + * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware + * support. + */ + +/* H2D mail box Data */ +#define H2D_HOST_D3_INFORM 0x00000001 +#define H2D_HOST_DS_ACK 0x00000002 +#define H2D_HOST_DS_NAK 0x00000004 +#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */ +#define H2D_FW_TRAP 0x20000000 /**< dump HW reg info for Livelock issue */ +#define H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define H2D_HOST_D0_INFORM 0x00000010 + +/* D2H mail box Data */ +#define D2H_DEV_D3_ACK 0x00000001 +#define D2H_DEV_DS_ENTER_REQ 0x00000002 +#define D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define D2H_DEV_FWHALT 0x10000000 +#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \ + D2H_DEV_DS_EXIT_NOTE | D2H_DEV_FWHALT) +#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK)) + +/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */ +#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1)) +#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w))) +#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1) + +/* Function can be used to notify host of FW halt */ +#define READ_AVAIL_SPACE(w, r, d) \ + ((w >= r) ? (w - r) : (d - r)) + +#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w)) +#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1) +#define CHECK_WRITE_SPACE(r, w, d) \ + MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d)) + + +#define WRT_PEND(x) ((x)->wr_pending) +#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) +#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a)) + +#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr)) +#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a)) + +#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr) +#define RING_MAX_ITEM(x) ((x)->ringmem->max_item) +#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items) + +#endif /* _bcmpcie_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmpcispi.h b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h new file mode 100644 index 000000000000..66c783c4aeff --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmpcispi.h @@ -0,0 +1,184 @@ +/* + * Broadcom PCI-SPI Host Controller Register Definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmpcispi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_PCI_SPI_H +#define _BCM_PCI_SPI_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + uint32 spih_ctrl; /* 0x00 SPI Control Register */ + uint32 spih_stat; /* 0x04 SPI Status Register */ + uint32 spih_data; /* 0x08 SPI Data Register, 32-bits wide */ + uint32 spih_ext; /* 0x0C SPI Extension Register */ + uint32 PAD[4]; /* 0x10-0x1F PADDING */ + + uint32 spih_gpio_ctrl; /* 0x20 SPI GPIO Control Register */ + uint32 spih_gpio_data; /* 0x24 SPI GPIO Data Register */ + uint32 PAD[6]; /* 0x28-0x3F PADDING */ + + uint32 spih_int_edge; /* 0x40 SPI Interrupt Edge Register (0=Level, 1=Edge) */ + uint32 spih_int_pol; /* 0x44 SPI Interrupt Polarity Register (0=Active Low, */ + /* 1=Active High) */ + uint32 spih_int_mask; /* 0x48 SPI Interrupt Mask */ + uint32 spih_int_status; /* 0x4C SPI Interrupt Status */ + uint32 PAD[4]; /* 0x50-0x5F PADDING */ + + uint32 spih_hex_disp; /* 0x60 SPI 4-digit hex display value */ + uint32 spih_current_ma; /* 0x64 SPI SD card current consumption in mA */ + uint32 PAD[1]; /* 0x68 PADDING */ + uint32 spih_disp_sel; /* 0x6c SPI 4-digit hex display mode select (1=current) */ + uint32 PAD[4]; /* 0x70-0x7F PADDING */ + uint32 PAD[8]; /* 0x80-0x9F PADDING */ + uint32 PAD[8]; /* 0xA0-0xBF PADDING */ + uint32 spih_pll_ctrl; /* 0xC0 PLL Control Register */ + uint32 spih_pll_status; /* 0xC4 PLL Status Register */ + uint32 spih_xtal_freq; /* 0xC8 External Clock Frequency in units of 10000Hz */ + uint32 spih_clk_count; /* 0xCC External Clock Count Register */ + +} spih_regs_t; + +typedef volatile struct { + uint32 cfg_space[0x40]; /* 0x000-0x0FF PCI Configuration Space (Read Only) */ + uint32 P_IMG_CTRL0; /* 0x100 PCI Image0 Control Register */ + + uint32 P_BA0; /* 0x104 32 R/W PCI Image0 Base Address register */ + uint32 P_AM0; /* 0x108 32 R/W PCI Image0 Address Mask register */ + uint32 P_TA0; /* 0x10C 32 R/W PCI Image0 Translation Address register */ + uint32 P_IMG_CTRL1; /* 0x110 32 R/W PCI Image1 Control register */ + uint32 P_BA1; /* 0x114 32 R/W PCI Image1 Base Address register */ + uint32 P_AM1; /* 0x118 32 R/W PCI Image1 Address Mask register */ + uint32 P_TA1; /* 0x11C 32 R/W PCI Image1 Translation Address register */ + uint32 P_IMG_CTRL2; /* 0x120 32 R/W PCI Image2 Control register */ + uint32 P_BA2; /* 0x124 32 R/W PCI Image2 Base Address register */ + uint32 P_AM2; /* 0x128 32 R/W PCI Image2 Address Mask register */ + uint32 P_TA2; /* 0x12C 32 R/W PCI Image2 Translation Address register */ + uint32 P_IMG_CTRL3; /* 0x130 32 R/W PCI Image3 Control register */ + uint32 P_BA3; /* 0x134 32 R/W PCI Image3 Base Address register */ + uint32 P_AM3; /* 0x138 32 R/W PCI Image3 Address Mask register */ + uint32 P_TA3; /* 0x13C 32 R/W PCI Image3 Translation Address register */ + uint32 P_IMG_CTRL4; /* 0x140 32 R/W PCI Image4 Control register */ + uint32 P_BA4; /* 0x144 32 R/W PCI Image4 Base Address register */ + uint32 P_AM4; /* 0x148 32 R/W PCI Image4 Address Mask register */ + uint32 P_TA4; /* 0x14C 32 R/W PCI Image4 Translation Address register */ + uint32 P_IMG_CTRL5; /* 0x150 32 R/W PCI Image5 Control register */ + uint32 P_BA5; /* 0x154 32 R/W PCI Image5 Base Address register */ + uint32 P_AM5; /* 0x158 32 R/W PCI Image5 Address Mask register */ + uint32 P_TA5; /* 0x15C 32 R/W PCI Image5 Translation Address register */ + uint32 P_ERR_CS; /* 0x160 32 R/W PCI Error Control and Status register */ + uint32 P_ERR_ADDR; /* 0x164 32 R PCI Erroneous Address register */ + uint32 P_ERR_DATA; /* 0x168 32 R PCI Erroneous Data register */ + + uint32 PAD[5]; /* 0x16C-0x17F PADDING */ + + uint32 WB_CONF_SPC_BAR; /* 0x180 32 R WISHBONE Configuration Space Base Address */ + uint32 W_IMG_CTRL1; /* 0x184 32 R/W WISHBONE Image1 Control register */ + uint32 W_BA1; /* 0x188 32 R/W WISHBONE Image1 Base Address register */ + uint32 W_AM1; /* 0x18C 32 R/W WISHBONE Image1 Address Mask register */ + uint32 W_TA1; /* 0x190 32 R/W WISHBONE Image1 Translation Address reg */ + uint32 W_IMG_CTRL2; /* 0x194 32 R/W WISHBONE Image2 Control register */ + uint32 W_BA2; /* 0x198 32 R/W WISHBONE Image2 Base Address register */ + uint32 W_AM2; /* 0x19C 32 R/W WISHBONE Image2 Address Mask register */ + uint32 W_TA2; /* 0x1A0 32 R/W WISHBONE Image2 Translation Address reg */ + uint32 W_IMG_CTRL3; /* 0x1A4 32 R/W WISHBONE Image3 Control register */ + uint32 W_BA3; /* 0x1A8 32 R/W WISHBONE Image3 Base Address register */ + uint32 W_AM3; /* 0x1AC 32 R/W WISHBONE Image3 Address Mask register */ + uint32 W_TA3; /* 0x1B0 32 R/W WISHBONE Image3 Translation Address reg */ + uint32 W_IMG_CTRL4; /* 0x1B4 32 R/W WISHBONE Image4 Control register */ + uint32 W_BA4; /* 0x1B8 32 R/W WISHBONE Image4 Base Address register */ + uint32 W_AM4; /* 0x1BC 32 R/W WISHBONE Image4 Address Mask register */ + uint32 W_TA4; /* 0x1C0 32 R/W WISHBONE Image4 Translation Address reg */ + uint32 W_IMG_CTRL5; /* 0x1C4 32 R/W WISHBONE Image5 Control register */ + uint32 W_BA5; /* 0x1C8 32 R/W WISHBONE Image5 Base Address register */ + uint32 W_AM5; /* 0x1CC 32 R/W WISHBONE Image5 Address Mask register */ + uint32 W_TA5; /* 0x1D0 32 R/W WISHBONE Image5 Translation Address reg */ + uint32 W_ERR_CS; /* 0x1D4 32 R/W WISHBONE Error Control and Status reg */ + uint32 W_ERR_ADDR; /* 0x1D8 32 R WISHBONE Erroneous Address register */ + uint32 W_ERR_DATA; /* 0x1DC 32 R WISHBONE Erroneous Data register */ + uint32 CNF_ADDR; /* 0x1E0 32 R/W Configuration Cycle register */ + uint32 CNF_DATA; /* 0x1E4 32 R/W Configuration Cycle Generation Data reg */ + + uint32 INT_ACK; /* 0x1E8 32 R Interrupt Acknowledge register */ + uint32 ICR; /* 0x1EC 32 R/W Interrupt Control register */ + uint32 ISR; /* 0x1F0 32 R/W Interrupt Status register */ +} spih_pciregs_t; + +/* + * PCI Core interrupt enable and status bit definitions. + */ + +/* PCI Core ICR Register bit definitions */ +#define PCI_INT_PROP_EN (1 << 0) /* Interrupt Propagation Enable */ +#define PCI_WB_ERR_INT_EN (1 << 1) /* Wishbone Error Interrupt Enable */ +#define PCI_PCI_ERR_INT_EN (1 << 2) /* PCI Error Interrupt Enable */ +#define PCI_PAR_ERR_INT_EN (1 << 3) /* Parity Error Interrupt Enable */ +#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */ +#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */ + + +/* PCI Core ISR Register bit definitions */ +#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */ +#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */ +#define PCI_PCI_ERR_INT_ST (1 << 2) /* PCI Error Interrupt Status */ +#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */ +#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */ + + +/* Registers on the Wishbone bus */ +#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */ +#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */ +#define SPIH_WFIFO_INTR (1 << 2) /* SPI Tx FIFO Empty Intr (FPGA Rev >= 8) */ + +/* GPIO Bit definitions */ +#define SPIH_CS (1 << 0) /* SPI Chip Select (active low) */ +#define SPIH_SLOT_POWER (1 << 1) /* SD Card Slot Power Enable */ +#define SPIH_CARD_DETECT (1 << 2) /* SD Card Detect */ + +/* SPI Status Register Bit definitions */ +#define SPIH_STATE_MASK 0x30 /* SPI Transfer State Machine state mask */ +#define SPIH_STATE_SHIFT 4 /* SPI Transfer State Machine state shift */ +#define SPIH_WFFULL (1 << 3) /* SPI Write FIFO Full */ +#define SPIH_WFEMPTY (1 << 2) /* SPI Write FIFO Empty */ +#define SPIH_RFFULL (1 << 1) /* SPI Read FIFO Full */ +#define SPIH_RFEMPTY (1 << 0) /* SPI Read FIFO Empty */ + +#define SPIH_EXT_CLK (1U << 31) /* Use External Clock as PLL Clock source. */ + +#define SPIH_PLL_NO_CLK (1 << 1) /* Set to 1 if the PLL's input clock is lost. */ +#define SPIH_PLL_LOCKED (1 << 3) /* Set to 1 when the PLL is locked. */ + +/* Spin bit loop bound check */ +#define SPI_SPIN_BOUND 0xf4240 /* 1 million */ + +#endif /* _BCM_PCI_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmperf.h b/drivers/net/wireless/bcmdhd/include/bcmperf.h new file mode 100644 index 000000000000..823c3b62f09a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmperf.h @@ -0,0 +1,39 @@ +/* + * Performance counters software interface. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmperf.h 514727 2014-11-12 03:02:48Z $ + */ +/* essai */ +#ifndef _BCMPERF_H_ +#define _BCMPERF_H_ +/* get cache hits and misses */ +#define BCMPERF_ENABLE_INSTRCOUNT() +#define BCMPERF_ENABLE_ICACHE_MISS() +#define BCMPERF_ENABLE_ICACHE_HIT() +#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) +#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) +#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) +#endif /* _BCMPERF_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdbus.h b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h new file mode 100644 index 000000000000..ce75ffa367c1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdbus.h @@ -0,0 +1,146 @@ +/* + * Definitions for API from sdio common code (bcmsdh) to individual + * host controller drivers. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdbus.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sdio_api_h_ +#define _sdio_api_h_ + + +#define SDIOH_API_RC_SUCCESS (0x00) +#define SDIOH_API_RC_FAIL (0x01) +#define SDIOH_API_SUCCESS(status) (status == 0) + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */ +#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */ +#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */ + +#define SDIOH_DATA_PIO 0 /* PIO mode */ +#define SDIOH_DATA_DMA 1 /* DMA mode */ + +/* Max number of glommed pkts */ +#ifdef CUSTOM_MAX_TXGLOM_SIZE +#define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE +#else +#define SDPCM_MAXGLOM_SIZE 40 +#endif /* CUSTOM_MAX_TXGLOM_SIZE */ + +#define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */ +#define SDPCM_TXGLOM_MDESC 1 /* SDIO 3.0 should use multi-desc mode */ + +#ifdef CUSTOM_DEF_TXGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE CUSTOM_DEF_TXGLOM_SIZE +#else +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif /* CUSTOM_DEF_TXGLOM_SIZE */ + +#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE +#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!" +#undef SDPCM_DEFGLOM_SIZE +#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE +#endif + +typedef int SDIOH_API_RC; + +/* SDio Host structure */ +typedef struct sdioh_info sdioh_info_t; + +/* callback function, taking one arg */ +typedef void (*sdioh_cb_fn_t)(void *); + +extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh); +extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si); + +/* query whether SD interrupt is enabled or not */ +extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff); + +/* enable or disable SD interrupt */ +extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable); + +#if defined(DHD_DEBUG) +extern bool sdioh_interrupt_pending(sdioh_info_t *si); +#endif + +/* read or write one byte using cmd52 */ +extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte); + +/* read or write 2/4 bytes using cmd53 */ +extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type, uint rw, uint fnc, + uint addr, uint32 *word, uint nbyte); + +/* read or write any buffer using cmd53 */ +extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc, + uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer, + void *pkt); + +/* get cis data */ +extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length); + +extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); +extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, uint32 addr, uint8 *data); + +/* query number of io functions */ +extern uint sdioh_query_iofnum(sdioh_info_t *si); + +/* handle iovars */ +extern int sdioh_iovar_op(sdioh_info_t *si, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Issue abort to the specified function and clear controller as needed */ +extern int sdioh_abort(sdioh_info_t *si, uint fnc); + +/* Start and Stop SDIO without re-enumerating the SD card. */ +extern int sdioh_start(sdioh_info_t *si, int stage); +extern int sdioh_stop(sdioh_info_t *si); + +/* Wait system lock free */ +extern int sdioh_waitlockfree(sdioh_info_t *si); + +/* Reset and re-initialize the device */ +extern int sdioh_sdio_reset(sdioh_info_t *si); + + + +#if defined(BCMSDIOH_STD) + #define SDIOH_SLEEP_ENABLED +#endif +extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab); + +/* GPIO support */ +extern SDIOH_API_RC sdioh_gpio_init(sdioh_info_t *sd); +extern bool sdioh_gpioin(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio); +extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab); + +#endif /* _sdio_api_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh.h b/drivers/net/wireless/bcmdhd/include/bcmsdh.h new file mode 100644 index 000000000000..3b3e6b6ab45d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdh.h @@ -0,0 +1,255 @@ +/* + * SDIO host client driver interface of Broadcom HNBU + * export functions to client drivers + * abstract OS and BUS specific details of SDIO + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh.h 514727 2014-11-12 03:02:48Z $ + */ + +/** + * @file bcmsdh.h + */ + +#ifndef _bcmsdh_h_ +#define _bcmsdh_h_ + +#define BCMSDH_ERROR_VAL 0x0001 /* Error */ +#define BCMSDH_INFO_VAL 0x0002 /* Info */ +extern const uint bcmsdh_msglevel; + +#define BCMSDH_ERROR(x) +#define BCMSDH_INFO(x) + +#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \ + defined(BCMSDIOH_SPI)) +#define BCMSDH_ADAPTER +#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */ + +/* forward declarations */ +typedef struct bcmsdh_info bcmsdh_info_t; +typedef void (*bcmsdh_cb_fn_t)(void *); + +extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva); +/** + * BCMSDH API context + */ +struct bcmsdh_info +{ + bool init_success; /* underlying driver successfully attached */ + void *sdioh; /* handler for sdioh */ + uint32 vendevid; /* Target Vendor and Device ID on SD bus */ + osl_t *osh; + bool regfail; /* Save status of last reg_read/reg_write call */ + uint32 sbwad; /* Save backplane window address */ + void *os_cxt; /* Pointer to per-OS private data */ +}; + +/* Detach - freeup resources allocated in attach */ +extern int bcmsdh_detach(osl_t *osh, void *sdh); + +/* Query if SD device interrupts are enabled */ +extern bool bcmsdh_intr_query(void *sdh); + +/* Enable/disable SD interrupt */ +extern int bcmsdh_intr_enable(void *sdh); +extern int bcmsdh_intr_disable(void *sdh); + +/* Register/deregister device interrupt handler. */ +extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); +extern int bcmsdh_intr_dereg(void *sdh); +/* Enable/disable SD card interrupt forward */ +extern void bcmsdh_intr_forward(void *sdh, bool pass); + +#if defined(DHD_DEBUG) +/* Query pending interrupt status from the host controller */ +extern bool bcmsdh_intr_pending(void *sdh); +#endif + +/* Register a callback to be called if and when bcmsdh detects + * device removal. No-op in the case of non-removable/hardwired devices. + */ +extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh); + +/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface). + * fn: function number + * addr: unmodified SDIO-space address + * data: data byte to write + * err: pointer to error code (or NULL) + */ +extern uint8 bcmsdh_cfg_read(void *sdh, uint func, uint32 addr, int *err); +extern void bcmsdh_cfg_write(void *sdh, uint func, uint32 addr, uint8 data, int *err); + +/* Read/Write 4bytes from/to cfg space */ +extern uint32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, uint32 addr, int *err); +extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, uint32 addr, uint32 data, int *err); + +/* Read CIS content for specified function. + * fn: function whose CIS is being requested (0 is common CIS) + * cis: pointer to memory location to place results + * length: number of bytes to read + * Internally, this routine uses the values from the cis base regs (0x9-0xB) + * to form an SDIO-space address to read the data from. + */ +extern int bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length); + +/* Synchronous access to device (client) core registers via CMD53 to F1. + * addr: backplane address (i.e. >= regsva from attach) + * size: register width in bytes (2 or 4) + * data: data for register write + */ +extern uint32 bcmsdh_reg_read(void *sdh, uint32 addr, uint size); +extern uint32 bcmsdh_reg_write(void *sdh, uint32 addr, uint size, uint32 data); + +/* set sb address window */ +extern int bcmsdhsdio_set_sbaddr_window(void *sdh, uint32 address, bool force_set); + +/* Indicate if last reg read/write failed */ +extern bool bcmsdh_regfail(void *sdh); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * addr: backplane address (i.e. >= regsva from attach) + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ +typedef void (*bcmsdh_cmplt_fn_t)(void *handle, int status, bool sync_waiting); +extern int bcmsdh_send_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); +extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags, + uint8 *buf, uint nbytes, void *pkt, + bcmsdh_cmplt_fn_t complete_fn, void *handle); + +extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len); +extern void bcmsdh_glom_clear(void *sdh); +extern uint bcmsdh_set_mode(void *sdh, uint mode); +extern bool bcmsdh_glom_enabled(void); +/* Flags bits */ +#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */ +#define SDIO_BYTE_MODE 0x8 /* Byte mode request(non-block mode) */ + +/* Pending (non-error) return code */ +#define BCME_PENDING 1 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +extern int bcmsdh_rwdata(void *sdh, uint rw, uint32 addr, uint8 *buf, uint nbytes); + +/* Issue an abort to the specified function */ +extern int bcmsdh_abort(void *sdh, uint fn); + +/* Start SDIO Host Controller communication */ +extern int bcmsdh_start(void *sdh, int stage); + +/* Stop SDIO Host Controller communication */ +extern int bcmsdh_stop(void *sdh); + +/* Wait system lock free */ +extern int bcmsdh_waitlockfree(void *sdh); + +/* Returns the "Device ID" of target device on the SDIO bus. */ +extern int bcmsdh_query_device(void *sdh); + +/* Returns the number of IO functions reported by the device */ +extern uint bcmsdh_query_iofnum(void *sdh); + +/* Miscellaneous knob tweaker. */ +extern int bcmsdh_iovar_op(void *sdh, const char *name, + void *params, int plen, void *arg, int len, bool set); + +/* Reset and reinitialize the device */ +extern int bcmsdh_reset(bcmsdh_info_t *sdh); + +/* helper functions */ + +/* callback functions */ +typedef struct { + /* probe the device */ + void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot, + uint16 func, uint bustype, void * regsva, osl_t * osh, + void * param); + /* remove the device */ + void (*remove)(void *context); + /* can we suspend now */ + int (*suspend)(void *context); + /* resume from suspend */ + int (*resume)(void *context); +} bcmsdh_driver_t; + +/* platform specific/high level functions */ +extern int bcmsdh_register(bcmsdh_driver_t *driver); +extern void bcmsdh_unregister(void); +extern bool bcmsdh_chipmatch(uint16 vendor, uint16 device); +extern void bcmsdh_device_remove(void * sdh); + +extern int bcmsdh_reg_sdio_notify(void* semaphore); +extern void bcmsdh_unreg_sdio_notify(void); + +#if defined(OOB_INTR_ONLY) +extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler, + void* oob_irq_handler_context); +extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh); +extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable); +#endif +extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh); +extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh); +extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh); + +int bcmsdh_suspend(bcmsdh_info_t *bcmsdh); +int bcmsdh_resume(bcmsdh_info_t *bcmsdh); + +/* Function to pass device-status bits to DHD. */ +extern uint32 bcmsdh_get_dstatus(void *sdh); + +/* Function to return current window addr */ +extern uint32 bcmsdh_cur_sbwad(void *sdh); + +/* Function to pass chipid and rev to lower layers for controlling pr's */ +extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev); + + +extern int bcmsdh_sleep(void *sdh, bool enab); + +/* GPIO support */ +extern int bcmsdh_gpio_init(void *sd); +extern bool bcmsdh_gpioin(void *sd, uint32 gpio); +extern int bcmsdh_gpioouten(void *sd, uint32 gpio); +extern int bcmsdh_gpioout(void *sd, uint32 gpio, bool enab); + +#endif /* _bcmsdh_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h new file mode 100644 index 000000000000..07903f99fce3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdh_sdmmc.h @@ -0,0 +1,119 @@ +/* + * BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdh_sdmmc.h 591160 2015-10-07 06:01:58Z $ + */ + +#ifndef __BCMSDH_SDMMC_H__ +#define __BCMSDH_SDMMC_H__ + +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SD4 2 +#define CLIENT_INTR 0x100 /* Get rid of this! */ +#define SDIOH_SDMMC_MAX_SG_ENTRIES 32 + +struct sdioh_info { + osl_t *osh; /* osh handler */ + void *bcmsdh; /* upper layer handle */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + uint16 intmask; /* Current active interrupts */ + + int intrcount; /* Client interrupts */ + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + bool use_rxchain; + struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES]; + struct sdio_func fake_func0; + struct sdio_func *func[SDIOD_MAX_IOFUNCS]; + +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdh_sdmmc.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd); +extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd); + + +/************************************************************** + * Internal interfaces: bcmsdh_sdmmc.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdioh_sdmmc_reg_map(osl_t *osh, int32 addr, int size); +extern void sdioh_sdmmc_reg_unmap(osl_t *osh, int32 addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq); +extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif /* __BCMSDH_SDMMC_H__ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h new file mode 100644 index 000000000000..5c0adff8e8ad --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdpcm.h @@ -0,0 +1,281 @@ +/* + * Broadcom SDIO/PCMCIA + * Software-specific definitions shared between device and host side + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdpcm.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _bcmsdpcm_h_ +#define _bcmsdpcm_h_ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* intstatus bits */ +#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ +#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ +#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ +#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ + +#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT) + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ +#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ +#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ +#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ +#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ + +/* tosbmailboxdata */ +#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ + +#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT) + +/* tohostmailbox bits corresponding to intstatus bits */ +#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */ +#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */ +#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ +#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ +#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */ +#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */ +#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */ +#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ +#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ +#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ + +#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ +#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ + +/* + * Software-defined protocol header + */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* SW frame header */ +#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ +#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ + +#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ +#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ +#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ + +#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ +#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ +#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ + +/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ +#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ +#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ +#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ +#define SDPCM_NEXTLEN_OFFSET 2 + +/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ +#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ +#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 + +#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ +#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) +#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ +#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) +#define SDPCM_VERSION_OFFSET 6 /* Version # */ +#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) +#define SDPCM_UNUSED_OFFSET 7 /* Spare */ +#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) + +#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ + +/* logical channel numbers */ +#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ +#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ +#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ +#define SDPCM_MAX_CHANNEL 15 + +#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ + +#define SDPCM_FLAG_RESVD0 0x01 +#define SDPCM_FLAG_RESVD1 0x02 +#define SDPCM_FLAG_GSPI_TXENAB 0x04 +#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ + +/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ +#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) + +#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) + +/* For TEST_CHANNEL packets, define another 4-byte header */ +#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); + * Semantics of Ext byte depend on command. + * Len is current or requested frame length, not + * including test header; sent little-endian. + */ +#define SDPCM_TEST_PKT_CNT_FLD_LEN 4 /* Packet count filed legth */ +#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ +#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ +#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ +#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count + * (Backward compatabilty) Set frame count in a + * 4 byte filed adjacent to the HDR + */ +#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off + * Set frame count in a 4 byte filed adjacent to + * the HDR + */ + +/* Handy macro for filling in datagen packets with a pattern */ +#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) + +/* + * Software counters (first part matches hardware counters) + */ + +typedef volatile struct { + uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ + uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ + uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ + uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ + uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ + uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ + uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ + uint32 rxdescuflo; /* receive descriptor underflows */ + uint32 rxfifooflo; /* receive fifo overflows */ + uint32 txfifouflo; /* transmit fifo underflows */ + uint32 runt; /* runt (too short) frames recv'd from bus */ + uint32 badlen; /* frame's rxh len does not match its hw tag len */ + uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ + uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ + uint32 rxfcrc; /* frame rx header indicates crc error */ + uint32 rxfwoos; /* frame rx header indicates write out of sync */ + uint32 rxfwft; /* frame rx header indicates write frame termination */ + uint32 rxfabort; /* frame rx header indicates frame aborted */ + uint32 woosint; /* write out of sync interrupt */ + uint32 roosint; /* read out of sync interrupt */ + uint32 rftermint; /* read frame terminate interrupt */ + uint32 wftermint; /* write frame terminate interrupt */ +} sdpcmd_cnt_t; + +/* + * Register Access Macros + */ + +#define SDIODREV_IS(var, val) ((var) == (val)) +#define SDIODREV_GE(var, val) ((var) >= (val)) +#define SDIODREV_GT(var, val) ((var) > (val)) +#define SDIODREV_LT(var, val) ((var) < (val)) +#define SDIODREV_LE(var, val) ((var) <= (val)) + +#define SDIODDMAREG32(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) + +#define SDIODDMAREG64(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ + (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) + +#define SDIODDMAREG(h, dir, chnl) \ + (SDIODREV_LT((h)->corerev, 1) ? \ + SDIODDMAREG32((h), (dir), (chnl)) : \ + SDIODDMAREG64((h), (dir), (chnl))) + +#define PCMDDMAREG(h, dir, chnl) \ + ((dir) == DMA_TX ? \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ + (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) + +#define SDPCMDMAREG(h, dir, chnl, coreid) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODDMAREG(h, dir, chnl) : \ + PCMDDMAREG(h, dir, chnl)) + +#define SDIODFIFOREG(h, corerev) \ + (SDIODREV_LT((corerev), 1) ? \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) + +#define PCMDFIFOREG(h) \ + ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) + +#define SDPCMFIFOREG(h, coreid, corerev) \ + ((coreid) == SDIOD_CORE_ID ? \ + SDIODFIFOREG(h, corerev) : \ + PCMDFIFOREG(h)) + +/* + * Shared structure between dongle and the host. + * The structure contains pointers to trap or assert information. + */ +#define SDPCM_SHARED_VERSION 0x0001 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 +#define SDPCM_SHARED_IN_BRPT 0x0800 +#define SDPCM_SHARED_SET_BRPT 0x1000 +#define SDPCM_SHARED_PENDING_BRPT 0x2000 + +typedef struct { + uint32 flags; + uint32 trap_addr; + uint32 assert_exp_addr; + uint32 assert_file_addr; + uint32 assert_line; + uint32 console_addr; /* Address of hnd_cons_t */ + uint32 msgtrace_addr; + uint32 fwid; +} sdpcm_shared_t; + +extern sdpcm_shared_t sdpcm_shared; + +#endif /* _bcmsdpcm_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdspi.h b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h new file mode 100644 index 000000000000..b1831db8b19b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdspi.h @@ -0,0 +1,138 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SD_SPI_H +#define _BCM_SD_SPI_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + + uint lockcount; /* nest count of sdspi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + bool got_hcint; /* Host Controller interrupt. */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current register transfer size */ + uint32 cmd53_wr_data; /* Used to pass CMD53 write data */ + uint32 card_response; /* Used to pass back response status byte */ + uint32 card_rsp_data; /* Used to pass back response data word */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmsdspi.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmsdspi.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *spi_reg_map(osl_t *osh, uintptr addr, int size); +extern void spi_reg_unmap(osl_t *osh, uintptr addr, int size); + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#endif /* _BCM_SD_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsdstd.h b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h new file mode 100644 index 000000000000..24df8de685d7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsdstd.h @@ -0,0 +1,285 @@ +/* + * 'Standard' SDIO HOST CONTROLLER driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsdstd.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SD_STD_H +#define _BCM_SD_STD_H + +/* global msglevel for debug messages - bitvals come from sdiovar.h */ +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#define sd_dma(x) + +#define sd_sync_dma(sd, read, nbytes) +#define sd_init_dma(sd) +#define sd_ack_intr(sd) +#define sd_wakeup(sd); +/* Allocate/init/free per-OS private data */ +extern int sdstd_osinit(sdioh_info_t *sd); +extern void sdstd_osfree(sdioh_info_t *sd); + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_4318 64 +#define BLOCK_SIZE_4328 512 + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 +#define SDIOH_MODE_SD1 1 +#define SDIOH_MODE_SD4 2 + +#define MAX_SLOTS 6 /* For PCI: Only 6 BAR entries => 6 slots */ +#define SDIOH_REG_WINSZ 0x100 /* Number of registers in Standard Host Controller */ + +#define SDIOH_TYPE_ARASAN_HDK 1 +#define SDIOH_TYPE_BCM27XX 2 +#define SDIOH_TYPE_TI_PCIXX21 4 /* TI PCIxx21 Standard Host Controller */ +#define SDIOH_TYPE_RICOH_R5C822 5 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter */ +#define SDIOH_TYPE_JMICRON 6 /* JMicron Standard SDIO Host Controller */ + +/* For linux, allow yielding for dongle */ +#define BCMSDYIELD + +/* Expected card status value for CMD7 */ +#define SDIOH_CMD7_EXP_STATUS 0x00001E00 + +#define RETRIES_LARGE 100000 +#define sdstd_os_yield(sd) do {} while (0) +#define RETRIES_SMALL 100 + + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +#define USE_FIFO 0x8 /* Fifo vs non-fifo */ + +#define CLIENT_INTR 0x100 /* Get rid of this! */ + +#define HC_INTR_RETUNING 0x1000 + + +#ifdef BCMSDIOH_TXGLOM +/* Total glom pkt can not exceed 64K + * need one more slot for glom padding packet + */ +#define SDIOH_MAXGLOM_SIZE (40+1) + +typedef struct glom_buf { + uint32 count; /* Total number of pkts queued */ + void *dma_buf_arr[SDIOH_MAXGLOM_SIZE]; /* Frame address */ + ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */ + uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */ +} glom_buf_t; +#endif + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + uint32 curr_caps; /* max current capabilities reg */ + + osl_t *osh; /* osh handler */ + volatile char *mem_space; /* pci device memory va */ + uint lockcount; /* nest count of sdstd_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint target_dev; /* Target device ID */ + uint16 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + void *bcmsdh; /* handler to upper layer stack (bcmsdh) */ + + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + int intrcount; /* Client interrupts */ + int local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS]; + void *dma_buf; /* DMA Buffer virtual address */ + ulong dma_phys; /* DMA Buffer physical address */ + void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */ + ulong adma2_dscr_phys; /* ADMA2 Descriptor Buffer physical address */ + + /* adjustments needed to make the dma align properly */ + void *dma_start_buf; + ulong dma_start_phys; + uint alloced_dma_size; + void *adma2_dscr_start_buf; + ulong adma2_dscr_start_phys; + uint alloced_adma2_dscr_size; + + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + bool got_hcint; /* local interrupt flag */ + uint16 last_intrstatus; /* to cache intrstatus */ + int host_UHSISupported; /* whether UHSI is supported for HC. */ + int card_UHSI_voltage_Supported; /* whether UHSI is supported for + * Card in terms of Voltage [1.8 or 3.3]. + */ + int global_UHSI_Supp; /* type of UHSI support in both host and card. + * HOST_SDR_UNSUPP: capabilities not supported/matched + * HOST_SDR_12_25: SDR12 and SDR25 supported + * HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd + */ + volatile int sd3_dat_state; /* data transfer state used for retuning check */ + volatile int sd3_tun_state; /* tuning state used for retuning check */ + bool sd3_tuning_reqd; /* tuning requirement parameter */ + uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */ +#ifdef BCMSDIOH_TXGLOM + glom_buf_t glom_info; /* pkt information used for glomming */ + uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */ +#endif +}; + +#define DMA_MODE_NONE 0 +#define DMA_MODE_SDMA 1 +#define DMA_MODE_ADMA1 2 +#define DMA_MODE_ADMA2 3 +#define DMA_MODE_ADMA2_64 4 +#define DMA_MODE_AUTO -1 + +#define USE_DMA(sd) ((bool)((sd->sd_dma_mode > 0) ? TRUE : FALSE)) + +/* States for Tuning and corr data */ +#define TUNING_IDLE 0 +#define TUNING_START 1 +#define TUNING_START_AFTER_DAT 2 +#define TUNING_ONGOING 3 + +#define DATA_TRANSFER_IDLE 0 +#define DATA_TRANSFER_ONGOING 1 + +#define CHECK_TUNING_PRE_DATA 1 +#define CHECK_TUNING_POST_DATA 2 + + +#ifdef DHD_DEBUG +#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01 +#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00 +#endif + + +/************************************************************ + * Internal interfaces: per-port references into bcmsdstd.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/* OS-independent interrupt handler */ +extern bool check_client_intr(sdioh_info_t *sd); + +/* Core interrupt enable/disable of device interrupts */ +extern void sdstd_devintr_on(sdioh_info_t *sd); +extern void sdstd_devintr_off(sdioh_info_t *sd); + +/* Enable/disable interrupts for local controller events */ +extern void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err); +extern void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err); + +/* Wait for specified interrupt and error bits to be set */ +extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err); + + +/************************************************************** + * Internal interfaces: bcmsdstd.c references to per-port code + */ + +/* Register mapping routines */ +extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size); +extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size); + +/* Interrupt (de)registration routines */ +extern int sdstd_register_irq(sdioh_info_t *sd, uint irq); +extern void sdstd_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void sdstd_lock(sdioh_info_t *sd); +extern void sdstd_unlock(sdioh_info_t *sd); +extern void sdstd_waitlockfree(sdioh_info_t *sd); + +/* OS-specific wrappers for safe concurrent register access */ +extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags); +extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags); + +/* OS-specific wait-for-interrupt-or-status */ +extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits); + +/* used by bcmsdstd_linux [implemented in sdstd] */ +extern void sdstd_3_enable_retuning_int(sdioh_info_t *sd); +extern void sdstd_3_disable_retuning_int(sdioh_info_t *sd); +extern bool sdstd_3_is_retuning_int_set(sdioh_info_t *sd); +extern void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param); +extern bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd); +extern int sdstd_3_get_tune_state(sdioh_info_t *sd); +extern int sdstd_3_get_data_state(sdioh_info_t *sd); +extern void sdstd_3_set_tune_state(sdioh_info_t *sd, int state); +extern void sdstd_3_set_data_state(sdioh_info_t *sd, int state); +extern uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd); +extern uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd); +extern int sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode); + +/* used by sdstd [implemented in bcmsdstd_linux/ndis] */ +extern void sdstd_3_start_tuning(sdioh_info_t *sd); +extern void sdstd_3_osinit_tuning(sdioh_info_t *sd); +extern void sdstd_3_osclean_tuning(sdioh_info_t *sd); + +extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val); + +extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq); +extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd); +#endif /* _BCM_SD_STD_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmspi.h b/drivers/net/wireless/bcmdhd/include/bcmspi.h new file mode 100644 index 000000000000..e9a906e79734 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmspi.h @@ -0,0 +1,43 @@ +/* + * Broadcom SPI Low-Level Hardware Driver API + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspi.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_H +#define _BCM_SPI_H + +extern void spi_devintr_off(sdioh_info_t *sd); +extern void spi_devintr_on(sdioh_info_t *sd); +extern bool spi_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor); +extern bool spi_controller_highspeed_mode(sdioh_info_t *sd, bool hsmode); +extern bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr); +extern bool spi_hw_attach(sdioh_info_t *sd); +extern bool spi_hw_detach(sdioh_info_t *sd); +extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen); +extern void spi_spinbits(sdioh_info_t *sd); +extern void spi_waitbits(sdioh_info_t *sd, bool yield); + +#endif /* _BCM_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h new file mode 100644 index 000000000000..7c2bfc4653c1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmspibrcm.h @@ -0,0 +1,165 @@ +/* + * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmspibrcm.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _BCM_SPI_BRCM_H +#define _BCM_SPI_BRCM_H + +#ifndef SPI_MAX_IOFUNCS +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 +#endif +/* global msglevel for debug messages - bitvals come from sdiovar.h */ + +#if defined(DHD_DEBUG) +#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0) +#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0) +#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0) +#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0) +#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0) +#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0) +#else +#define sd_err(x) +#define sd_trace(x) +#define sd_info(x) +#define sd_debug(x) +#define sd_data(x) +#define sd_ctrl(x) +#endif + +#define sd_log(x) + +#define SDIOH_ASSERT(exp) \ + do { if (!(exp)) \ + printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \ + } while (0) + +#define BLOCK_SIZE_F1 64 +#define BLOCK_SIZE_F2 2048 +#define BLOCK_SIZE_F3 2048 + +/* internal return code */ +#define SUCCESS 0 +#undef ERROR +#define ERROR 1 +#define ERROR_UF 2 +#define ERROR_OF 3 + +/* private bus modes */ +#define SDIOH_MODE_SPI 0 + +#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */ +#define USE_MULTIBLOCK 0x4 + +struct sdioh_info { + uint cfg_bar; /* pci cfg address for bar */ + uint32 caps; /* cached value of capabilities reg */ + void *bar0; /* BAR0 for PCI Device */ + osl_t *osh; /* osh handler */ + void *controller; /* Pointer to SPI Controller's private data struct */ + uint lockcount; /* nest count of spi_lock() calls */ + bool client_intr_enabled; /* interrupt connnected flag */ + bool intr_handler_valid; /* client driver interrupt handler valid */ + sdioh_cb_fn_t intr_handler; /* registered interrupt handler */ + void *intr_handler_arg; /* argument to call interrupt handler */ + bool initialized; /* card initialized */ + uint32 target_dev; /* Target device ID */ + uint32 intmask; /* Current active interrupts */ + void *sdos_info; /* Pointer to per-OS private data */ + uint32 controller_type; /* Host controller type */ + uint8 version; /* Host Controller Spec Compliance Version */ + uint irq; /* Client irq */ + uint32 intrcount; /* Client interrupts */ + uint32 local_intrcount; /* Controller interrupts */ + bool host_init_done; /* Controller initted */ + bool card_init_done; /* Client SDIO interface initted */ + bool polled_mode; /* polling for command completion */ + + bool sd_use_dma; /* DMA on CMD53 */ + bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */ + /* Must be on for sd_multiblock to be effective */ + bool use_client_ints; /* If this is false, make sure to restore */ + /* polling hack in wl_linux.c:wl_timer() */ + int adapter_slot; /* Maybe dealing with multiple slots/controllers */ + int sd_mode; /* SD1/SD4/SPI */ + int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */ + uint32 data_xfer_count; /* Current transfer */ + uint16 card_rca; /* Current Address */ + uint8 num_funcs; /* Supported funcs on client */ + uint32 card_dstatus; /* 32bit device status */ + uint32 com_cis_ptr; + uint32 func_cis_ptr[SPI_MAX_IOFUNCS]; + void *dma_buf; + ulong dma_phys; + int r_cnt; /* rx count */ + int t_cnt; /* tx_count */ + uint32 wordlen; /* host processor 16/32bits */ + uint32 prev_fun; + uint32 chip; + uint32 chiprev; + bool resp_delay_all; + bool dwordmode; + bool resp_delay_new; + + struct spierrstats_t spierrstats; +}; + +/************************************************************ + * Internal interfaces: per-port references into bcmspibrcm.c + */ + +/* Global message bits */ +extern uint sd_msglevel; + +/************************************************************** + * Internal interfaces: bcmspibrcm.c references to per-port code + */ + +/* Interrupt (de)registration routines */ +extern int spi_register_irq(sdioh_info_t *sd, uint irq); +extern void spi_free_irq(uint irq, sdioh_info_t *sd); + +/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */ +extern void spi_lock(sdioh_info_t *sd); +extern void spi_unlock(sdioh_info_t *sd); + +/* Allocate/init/free per-OS private data */ +extern int spi_osinit(sdioh_info_t *sd); +extern void spi_osfree(sdioh_info_t *sd); + +#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */ +#define SPI_RW_FLAG_S 31 +#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */ +#define SPI_ACCESS_S 30 +#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */ +#define SPI_FUNCTION_S 28 +#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */ +#define SPI_REG_ADDR_S 11 +#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */ +#define SPI_LEN_S 0 + +#endif /* _BCM_SPI_BRCM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h new file mode 100644 index 000000000000..a40bd569da34 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_fmt.h @@ -0,0 +1,965 @@ +/* + * SROM format definition. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_fmt.h 553280 2015-04-29 07:55:29Z $ + */ + +#ifndef _bcmsrom_fmt_h_ +#define _bcmsrom_fmt_h_ + +#define SROM_MAXREV 13 /* max revision supported by driver */ + +/* Maximum srom: 12 Kilobits == 1536 bytes */ + +#define SROM_MAX 1536 +#define SROM_MAXW 594 + +#ifdef LARGE_NVRAM_MAXSZ +#define VARS_MAX LARGE_NVRAM_MAXSZ +#else +#define VARS_MAX 4096 +#endif /* LARGE_NVRAM_MAXSZ */ + +/* PCI fields */ +#define PCI_F0DEVID 48 + + +#define SROM_WORDS 64 + +#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */ + +#define SROM_SSID 2 +#define SROM_SVID 3 + +#define SROM_WL1LHMAXP 29 + +#define SROM_WL1LPAB0 30 +#define SROM_WL1LPAB1 31 +#define SROM_WL1LPAB2 32 + +#define SROM_WL1HPAB0 33 +#define SROM_WL1HPAB1 34 +#define SROM_WL1HPAB2 35 + +#define SROM_MACHI_IL0 36 +#define SROM_MACMID_IL0 37 +#define SROM_MACLO_IL0 38 +#define SROM_MACHI_ET0 39 +#define SROM_MACMID_ET0 40 +#define SROM_MACLO_ET0 41 +#define SROM_MACHI_ET1 42 +#define SROM_MACMID_ET1 43 +#define SROM_MACLO_ET1 44 +#define SROM3_MACHI 37 +#define SROM3_MACMID 38 +#define SROM3_MACLO 39 + +#define SROM_BXARSSI2G 40 +#define SROM_BXARSSI5G 41 + +#define SROM_TRI52G 42 +#define SROM_TRI5GHL 43 + +#define SROM_RXPO52G 45 + +#define SROM2_ENETPHY 45 + +#define SROM_AABREV 46 +/* Fields in AABREV */ +#define SROM_BR_MASK 0x00ff +#define SROM_CC_MASK 0x0f00 +#define SROM_CC_SHIFT 8 +#define SROM_AA0_MASK 0x3000 +#define SROM_AA0_SHIFT 12 +#define SROM_AA1_MASK 0xc000 +#define SROM_AA1_SHIFT 14 + +#define SROM_WL0PAB0 47 +#define SROM_WL0PAB1 48 +#define SROM_WL0PAB2 49 + +#define SROM_LEDBH10 50 +#define SROM_LEDBH32 51 + +#define SROM_WL10MAXP 52 + +#define SROM_WL1PAB0 53 +#define SROM_WL1PAB1 54 +#define SROM_WL1PAB2 55 + +#define SROM_ITT 56 + +#define SROM_BFL 57 +#define SROM_BFL2 28 +#define SROM3_BFL2 61 + +#define SROM_AG10 58 + +#define SROM_CCODE 59 + +#define SROM_OPO 60 + +#define SROM3_LEDDC 62 + +#define SROM_CRCREV 63 + +/* SROM Rev 4: Reallocate the software part of the srom to accomodate + * MIMO features. It assumes up to two PCIE functions and 440 bytes + * of useable srom i.e. the useable storage in chips with OTP that + * implements hardware redundancy. + */ + +#define SROM4_WORDS 220 + +#define SROM4_SIGN 32 +#define SROM4_SIGNATURE 0x5372 + +#define SROM4_BREV 33 + +#define SROM4_BFL0 34 +#define SROM4_BFL1 35 +#define SROM4_BFL2 36 +#define SROM4_BFL3 37 +#define SROM5_BFL0 37 +#define SROM5_BFL1 38 +#define SROM5_BFL2 39 +#define SROM5_BFL3 40 + +#define SROM4_MACHI 38 +#define SROM4_MACMID 39 +#define SROM4_MACLO 40 +#define SROM5_MACHI 41 +#define SROM5_MACMID 42 +#define SROM5_MACLO 43 + +#define SROM4_CCODE 41 +#define SROM4_REGREV 42 +#define SROM5_CCODE 34 +#define SROM5_REGREV 35 + +#define SROM4_LEDBH10 43 +#define SROM4_LEDBH32 44 +#define SROM5_LEDBH10 59 +#define SROM5_LEDBH32 60 + +#define SROM4_LEDDC 45 +#define SROM5_LEDDC 45 + +#define SROM4_AA 46 +#define SROM4_AA2G_MASK 0x00ff +#define SROM4_AA2G_SHIFT 0 +#define SROM4_AA5G_MASK 0xff00 +#define SROM4_AA5G_SHIFT 8 + +#define SROM4_AG10 47 +#define SROM4_AG32 48 + +#define SROM4_TXPID2G 49 +#define SROM4_TXPID5G 51 +#define SROM4_TXPID5GL 53 +#define SROM4_TXPID5GH 55 + +#define SROM4_TXRXC 61 +#define SROM4_TXCHAIN_MASK 0x000f +#define SROM4_TXCHAIN_SHIFT 0 +#define SROM4_RXCHAIN_MASK 0x00f0 +#define SROM4_RXCHAIN_SHIFT 4 +#define SROM4_SWITCH_MASK 0xff00 +#define SROM4_SWITCH_SHIFT 8 + + +/* Per-path fields */ +#define MAX_PATH_SROM 4 +#define SROM4_PATH0 64 +#define SROM4_PATH1 87 +#define SROM4_PATH2 110 +#define SROM4_PATH3 133 + +#define SROM4_2G_ITT_MAXP 0 +#define SROM4_2G_PA 1 +#define SROM4_5G_ITT_MAXP 5 +#define SROM4_5GLH_MAXP 6 +#define SROM4_5G_PA 7 +#define SROM4_5GL_PA 11 +#define SROM4_5GH_PA 15 + +/* Fields in the ITT_MAXP and 5GLH_MAXP words */ +#define B2G_MAXP_MASK 0xff +#define B2G_ITT_SHIFT 8 +#define B5G_MAXP_MASK 0xff +#define B5G_ITT_SHIFT 8 +#define B5GH_MAXP_MASK 0xff +#define B5GL_MAXP_SHIFT 8 + +/* All the miriad power offsets */ +#define SROM4_2G_CCKPO 156 +#define SROM4_2G_OFDMPO 157 +#define SROM4_5G_OFDMPO 159 +#define SROM4_5GL_OFDMPO 161 +#define SROM4_5GH_OFDMPO 163 +#define SROM4_2G_MCSPO 165 +#define SROM4_5G_MCSPO 173 +#define SROM4_5GL_MCSPO 181 +#define SROM4_5GH_MCSPO 189 +#define SROM4_CDDPO 197 +#define SROM4_STBCPO 198 +#define SROM4_BW40PO 199 +#define SROM4_BWDUPPO 200 + +#define SROM4_CRCREV 219 + + +/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6. + * This is acombined srom for both MIMO and SISO boards, usable in + * the .130 4Kilobit OTP with hardware redundancy. + */ + +#define SROM8_SIGN 64 + +#define SROM8_BREV 65 + +#define SROM8_BFL0 66 +#define SROM8_BFL1 67 +#define SROM8_BFL2 68 +#define SROM8_BFL3 69 + +#define SROM8_MACHI 70 +#define SROM8_MACMID 71 +#define SROM8_MACLO 72 + +#define SROM8_CCODE 73 +#define SROM8_REGREV 74 + +#define SROM8_LEDBH10 75 +#define SROM8_LEDBH32 76 + +#define SROM8_LEDDC 77 + +#define SROM8_AA 78 + +#define SROM8_AG10 79 +#define SROM8_AG32 80 + +#define SROM8_TXRXC 81 + +#define SROM8_BXARSSI2G 82 +#define SROM8_BXARSSI5G 83 +#define SROM8_TRI52G 84 +#define SROM8_TRI5GHL 85 +#define SROM8_RXPO52G 86 + +#define SROM8_FEM2G 87 +#define SROM8_FEM5G 88 +#define SROM8_FEM_ANTSWLUT_MASK 0xf800 +#define SROM8_FEM_ANTSWLUT_SHIFT 11 +#define SROM8_FEM_TR_ISO_MASK 0x0700 +#define SROM8_FEM_TR_ISO_SHIFT 8 +#define SROM8_FEM_PDET_RANGE_MASK 0x00f8 +#define SROM8_FEM_PDET_RANGE_SHIFT 3 +#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006 +#define SROM8_FEM_EXTPA_GAIN_SHIFT 1 +#define SROM8_FEM_TSSIPOS_MASK 0x0001 +#define SROM8_FEM_TSSIPOS_SHIFT 0 + +#define SROM8_THERMAL 89 + +/* Temp sense related entries */ +#define SROM8_MPWR_RAWTS 90 +#define SROM8_TS_SLP_OPT_CORRX 91 +/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */ +#define SROM8_FOC_HWIQ_IQSWP 92 + +#define SROM8_EXTLNAGAIN 93 + +/* Temperature delta for PHY calibration */ +#define SROM8_PHYCAL_TEMPDELTA 94 + +/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */ +#define SROM8_MPWR_1_AND_2 95 + + +/* Per-path offsets & fields */ +#define SROM8_PATH0 96 +#define SROM8_PATH1 112 +#define SROM8_PATH2 128 +#define SROM8_PATH3 144 + +#define SROM8_2G_ITT_MAXP 0 +#define SROM8_2G_PA 1 +#define SROM8_5G_ITT_MAXP 4 +#define SROM8_5GLH_MAXP 5 +#define SROM8_5G_PA 6 +#define SROM8_5GL_PA 9 +#define SROM8_5GH_PA 12 + +/* All the miriad power offsets */ +#define SROM8_2G_CCKPO 160 + +#define SROM8_2G_OFDMPO 161 +#define SROM8_5G_OFDMPO 163 +#define SROM8_5GL_OFDMPO 165 +#define SROM8_5GH_OFDMPO 167 + +#define SROM8_2G_MCSPO 169 +#define SROM8_5G_MCSPO 177 +#define SROM8_5GL_MCSPO 185 +#define SROM8_5GH_MCSPO 193 + +#define SROM8_CDDPO 201 +#define SROM8_STBCPO 202 +#define SROM8_BW40PO 203 +#define SROM8_BWDUPPO 204 + +/* SISO PA parameters are in the path0 spaces */ +#define SROM8_SISO 96 + +/* Legacy names for SISO PA paramters */ +#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP) +#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA) +#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1) +#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2) +#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP) +#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP) +#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA) +#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1) +#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2) +#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA) +#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1) +#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2) +#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA) +#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1) +#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2) + +#define SROM8_CRCREV 219 + +/* SROM REV 9 */ +#define SROM9_2GPO_CCKBW20 160 +#define SROM9_2GPO_CCKBW20UL 161 +#define SROM9_2GPO_LOFDMBW20 162 +#define SROM9_2GPO_LOFDMBW20UL 164 + +#define SROM9_5GLPO_LOFDMBW20 166 +#define SROM9_5GLPO_LOFDMBW20UL 168 +#define SROM9_5GMPO_LOFDMBW20 170 +#define SROM9_5GMPO_LOFDMBW20UL 172 +#define SROM9_5GHPO_LOFDMBW20 174 +#define SROM9_5GHPO_LOFDMBW20UL 176 + +#define SROM9_2GPO_MCSBW20 178 +#define SROM9_2GPO_MCSBW20UL 180 +#define SROM9_2GPO_MCSBW40 182 + +#define SROM9_5GLPO_MCSBW20 184 +#define SROM9_5GLPO_MCSBW20UL 186 +#define SROM9_5GLPO_MCSBW40 188 +#define SROM9_5GMPO_MCSBW20 190 +#define SROM9_5GMPO_MCSBW20UL 192 +#define SROM9_5GMPO_MCSBW40 194 +#define SROM9_5GHPO_MCSBW20 196 +#define SROM9_5GHPO_MCSBW20UL 198 +#define SROM9_5GHPO_MCSBW40 200 + +#define SROM9_PO_MCS32 202 +#define SROM9_PO_LOFDM40DUP 203 +#define SROM9_EU_EDCRSTH 204 +#define SROM10_EU_EDCRSTH 204 +#define SROM8_RXGAINERR_2G 205 +#define SROM8_RXGAINERR_5GL 206 +#define SROM8_RXGAINERR_5GM 207 +#define SROM8_RXGAINERR_5GH 208 +#define SROM8_RXGAINERR_5GU 209 +#define SROM8_SUBBAND_PPR 210 +#define SROM8_PCIEINGRESS_WAR 211 +#define SROM8_EU_EDCRSTH 212 +#define SROM9_SAR 212 + +#define SROM8_NOISELVL_2G 213 +#define SROM8_NOISELVL_5GL 214 +#define SROM8_NOISELVL_5GM 215 +#define SROM8_NOISELVL_5GH 216 +#define SROM8_NOISELVL_5GU 217 +#define SROM8_NOISECALOFFSET 218 + +#define SROM9_REV_CRC 219 + +#define SROM10_CCKPWROFFSET 218 +#define SROM10_SIGN 219 +#define SROM10_SWCTRLMAP_2G 220 +#define SROM10_CRCREV 229 + +#define SROM10_WORDS 230 +#define SROM10_SIGNATURE SROM4_SIGNATURE + + +/* SROM REV 11 */ +#define SROM11_BREV 65 + +#define SROM11_BFL0 66 +#define SROM11_BFL1 67 +#define SROM11_BFL2 68 +#define SROM11_BFL3 69 +#define SROM11_BFL4 70 +#define SROM11_BFL5 71 + +#define SROM11_MACHI 72 +#define SROM11_MACMID 73 +#define SROM11_MACLO 74 + +#define SROM11_CCODE 75 +#define SROM11_REGREV 76 + +#define SROM11_LEDBH10 77 +#define SROM11_LEDBH32 78 + +#define SROM11_LEDDC 79 + +#define SROM11_AA 80 + +#define SROM11_AGBG10 81 +#define SROM11_AGBG2A0 82 +#define SROM11_AGA21 83 + +#define SROM11_TXRXC 84 + +#define SROM11_FEM_CFG1 85 +#define SROM11_FEM_CFG2 86 + +/* Masks and offsets for FEM_CFG */ +#define SROM11_FEMCTRL_MASK 0xf800 +#define SROM11_FEMCTRL_SHIFT 11 +#define SROM11_PAPDCAP_MASK 0x0400 +#define SROM11_PAPDCAP_SHIFT 10 +#define SROM11_TWORANGETSSI_MASK 0x0200 +#define SROM11_TWORANGETSSI_SHIFT 9 +#define SROM11_PDGAIN_MASK 0x01f0 +#define SROM11_PDGAIN_SHIFT 4 +#define SROM11_EPAGAIN_MASK 0x000e +#define SROM11_EPAGAIN_SHIFT 1 +#define SROM11_TSSIPOSSLOPE_MASK 0x0001 +#define SROM11_TSSIPOSSLOPE_SHIFT 0 +#define SROM11_GAINCTRLSPH_MASK 0xf800 +#define SROM11_GAINCTRLSPH_SHIFT 11 + +#define SROM11_THERMAL 87 +#define SROM11_MPWR_RAWTS 88 +#define SROM11_TS_SLP_OPT_CORRX 89 +#define SROM11_XTAL_FREQ 90 +#define SROM11_5GB0_4080_W0_A1 91 +#define SROM11_PHYCAL_TEMPDELTA 92 +#define SROM11_MPWR_1_AND_2 93 +#define SROM11_5GB0_4080_W1_A1 94 +#define SROM11_TSSIFLOOR_2G 95 +#define SROM11_TSSIFLOOR_5GL 96 +#define SROM11_TSSIFLOOR_5GM 97 +#define SROM11_TSSIFLOOR_5GH 98 +#define SROM11_TSSIFLOOR_5GU 99 + +/* Masks and offsets for Thermal parameters */ +#define SROM11_TEMPS_PERIOD_MASK 0xf0 +#define SROM11_TEMPS_PERIOD_SHIFT 4 +#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f +#define SROM11_TEMPS_HYSTERESIS_SHIFT 0 +#define SROM11_TEMPCORRX_MASK 0xfc +#define SROM11_TEMPCORRX_SHIFT 2 +#define SROM11_TEMPSENSE_OPTION_MASK 0x3 +#define SROM11_TEMPSENSE_OPTION_SHIFT 0 + +#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f +#define SROM11_PDOFF_2G_40M_A0_SHIFT 0 +#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0 +#define SROM11_PDOFF_2G_40M_A1_SHIFT 4 +#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00 +#define SROM11_PDOFF_2G_40M_A2_SHIFT 8 +#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000 +#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15 + +#define SROM11_PDOFF_2G_40M 100 +#define SROM11_PDOFF_40M_A0 101 +#define SROM11_PDOFF_40M_A1 102 +#define SROM11_PDOFF_40M_A2 103 +#define SROM11_5GB0_4080_W2_A1 103 +#define SROM11_PDOFF_80M_A0 104 +#define SROM11_PDOFF_80M_A1 105 +#define SROM11_PDOFF_80M_A2 106 +#define SROM11_5GB1_4080_W0_A1 106 + +#define SROM11_SUBBAND5GVER 107 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_11 3 +#define SROM11_PATH0 108 +#define SROM11_PATH1 128 +#define SROM11_PATH2 148 + +#define SROM11_2G_MAXP 0 +#define SROM11_5GB1_4080_PA 0 +#define SROM11_2G_PA 1 +#define SROM11_5GB2_4080_PA 2 +#define SROM11_RXGAINS1 4 +#define SROM11_RXGAINS 5 +#define SROM11_5GB3_4080_PA 5 +#define SROM11_5GB1B0_MAXP 6 +#define SROM11_5GB3B2_MAXP 7 +#define SROM11_5GB0_PA 8 +#define SROM11_5GB1_PA 11 +#define SROM11_5GB2_PA 14 +#define SROM11_5GB3_PA 17 + +/* Masks and offsets for rxgains */ +#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS2GTRISOA_MASK 0x0078 +#define SROM11_RXGAINS2GTRISOA_SHIFT 3 +#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0 +#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000 +#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15 +#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800 +#define SROM11_RXGAINS5GHTRISOA_SHIFT 11 +#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700 +#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8 +#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080 +#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7 +#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078 +#define SROM11_RXGAINS5GMTRISOA_SHIFT 3 +#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007 +#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0 + +/* Power per rate */ +#define SROM11_CCKBW202GPO 168 +#define SROM11_CCKBW20UL2GPO 169 +#define SROM11_MCSBW202GPO 170 +#define SROM11_MCSBW202GPO_1 171 +#define SROM11_MCSBW402GPO 172 +#define SROM11_MCSBW402GPO_1 173 +#define SROM11_DOT11AGOFDMHRBW202GPO 174 +#define SROM11_OFDMLRBW202GPO 175 + +#define SROM11_MCSBW205GLPO 176 +#define SROM11_MCSBW205GLPO_1 177 +#define SROM11_MCSBW405GLPO 178 +#define SROM11_MCSBW405GLPO_1 179 +#define SROM11_MCSBW805GLPO 180 +#define SROM11_MCSBW805GLPO_1 181 +#define SROM11_RPCAL_2G 182 +#define SROM11_RPCAL_5GL 183 +#define SROM11_MCSBW205GMPO 184 +#define SROM11_MCSBW205GMPO_1 185 +#define SROM11_MCSBW405GMPO 186 +#define SROM11_MCSBW405GMPO_1 187 +#define SROM11_MCSBW805GMPO 188 +#define SROM11_MCSBW805GMPO_1 189 +#define SROM11_RPCAL_5GM 190 +#define SROM11_RPCAL_5GH 191 +#define SROM11_MCSBW205GHPO 192 +#define SROM11_MCSBW205GHPO_1 193 +#define SROM11_MCSBW405GHPO 194 +#define SROM11_MCSBW405GHPO_1 195 +#define SROM11_MCSBW805GHPO 196 +#define SROM11_MCSBW805GHPO_1 197 +#define SROM11_RPCAL_5GU 198 +#define SROM11_PDOFF_2G_CCK 199 +#define SROM11_MCSLR5GLPO 200 +#define SROM11_MCSLR5GMPO 201 +#define SROM11_MCSLR5GHPO 202 + +#define SROM11_SB20IN40HRPO 203 +#define SROM11_SB20IN80AND160HR5GLPO 204 +#define SROM11_SB40AND80HR5GLPO 205 +#define SROM11_SB20IN80AND160HR5GMPO 206 +#define SROM11_SB40AND80HR5GMPO 207 +#define SROM11_SB20IN80AND160HR5GHPO 208 +#define SROM11_SB40AND80HR5GHPO 209 +#define SROM11_SB20IN40LRPO 210 +#define SROM11_SB20IN80AND160LR5GLPO 211 +#define SROM11_SB40AND80LR5GLPO 212 +#define SROM11_TXIDXCAP2G 212 +#define SROM11_SB20IN80AND160LR5GMPO 213 +#define SROM11_SB40AND80LR5GMPO 214 +#define SROM11_TXIDXCAP5G 214 +#define SROM11_SB20IN80AND160LR5GHPO 215 +#define SROM11_SB40AND80LR5GHPO 216 + +#define SROM11_DOT11AGDUPHRPO 217 +#define SROM11_DOT11AGDUPLRPO 218 + +/* MISC */ +#define SROM11_PCIEINGRESS_WAR 220 +#define SROM11_SAR 221 + +#define SROM11_NOISELVL_2G 222 +#define SROM11_NOISELVL_5GL 223 +#define SROM11_NOISELVL_5GM 224 +#define SROM11_NOISELVL_5GH 225 +#define SROM11_NOISELVL_5GU 226 + +#define SROM11_RXGAINERR_2G 227 +#define SROM11_RXGAINERR_5GL 228 +#define SROM11_RXGAINERR_5GM 229 +#define SROM11_RXGAINERR_5GH 230 +#define SROM11_RXGAINERR_5GU 231 + +#define SROM11_EU_EDCRSTH 232 +#define SROM12_EU_EDCRSTH 232 + +#define SROM11_SIGN 64 +#define SROM11_CRCREV 233 + +#define SROM11_WORDS 234 +#define SROM11_SIGNATURE 0x0634 + + +/* SROM REV 12 */ +#define SROM12_SIGN 64 +#define SROM12_WORDS 512 +#define SROM12_SIGNATURE 0x8888 +#define SROM12_CRCREV 511 + +#define SROM12_BFL6 486 +#define SROM12_BFL7 487 + +#define SROM12_MCSBW205GX1PO 234 +#define SROM12_MCSBW205GX1PO_1 235 +#define SROM12_MCSBW405GX1PO 236 +#define SROM12_MCSBW405GX1PO_1 237 +#define SROM12_MCSBW805GX1PO 238 +#define SROM12_MCSBW805GX1PO_1 239 +#define SROM12_MCSLR5GX1PO 240 +#define SROM12_SB40AND80LR5GX1PO 241 +#define SROM12_SB20IN80AND160LR5GX1PO 242 +#define SROM12_SB20IN80AND160HR5GX1PO 243 +#define SROM12_SB40AND80HR5GX1PO 244 + +#define SROM12_MCSBW205GX2PO 245 +#define SROM12_MCSBW205GX2PO_1 246 +#define SROM12_MCSBW405GX2PO 247 +#define SROM12_MCSBW405GX2PO_1 248 +#define SROM12_MCSBW805GX2PO 249 +#define SROM12_MCSBW805GX2PO_1 250 +#define SROM12_MCSLR5GX2PO 251 +#define SROM12_SB40AND80LR5GX2PO 252 +#define SROM12_SB20IN80AND160LR5GX2PO 253 +#define SROM12_SB20IN80AND160HR5GX2PO 254 +#define SROM12_SB40AND80HR5GX2PO 255 + +/* MISC */ +#define SROM12_RXGAINS10 483 +#define SROM12_RXGAINS11 484 +#define SROM12_RXGAINS12 485 + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_12 3 +#define SROM12_PATH0 256 +#define SROM12_PATH1 328 +#define SROM12_PATH2 400 + +#define SROM12_5GB42G_MAXP 0 +#define SROM12_2GB0_PA 1 +#define SROM12_2GB0_PA_W0 1 +#define SROM12_2GB0_PA_W1 2 +#define SROM12_2GB0_PA_W2 3 +#define SROM12_2GB0_PA_W3 4 + +#define SROM12_RXGAINS 5 +#define SROM12_5GB1B0_MAXP 6 +#define SROM12_5GB3B2_MAXP 7 + +#define SROM12_5GB0_PA 8 +#define SROM12_5GB0_PA_W0 8 +#define SROM12_5GB0_PA_W1 9 +#define SROM12_5GB0_PA_W2 10 +#define SROM12_5GB0_PA_W3 11 + +#define SROM12_5GB1_PA 12 +#define SROM12_5GB1_PA_W0 12 +#define SROM12_5GB1_PA_W1 13 +#define SROM12_5GB1_PA_W2 14 +#define SROM12_5GB1_PA_W3 15 + +#define SROM12_5GB2_PA 16 +#define SROM12_5GB2_PA_W0 16 +#define SROM12_5GB2_PA_W1 17 +#define SROM12_5GB2_PA_W2 18 +#define SROM12_5GB2_PA_W3 19 + +#define SROM12_5GB3_PA 20 +#define SROM12_5GB3_PA_W0 20 +#define SROM12_5GB3_PA_W1 21 +#define SROM12_5GB3_PA_W2 22 +#define SROM12_5GB3_PA_W3 23 + +#define SROM12_5GB4_PA 24 +#define SROM12_5GB4_PA_W0 24 +#define SROM12_5GB4_PA_W1 25 +#define SROM12_5GB4_PA_W2 26 +#define SROM12_5GB4_PA_W3 27 + +#define SROM12_2G40B0_PA 28 +#define SROM12_2G40B0_PA_W0 28 +#define SROM12_2G40B0_PA_W1 29 +#define SROM12_2G40B0_PA_W2 30 +#define SROM12_2G40B0_PA_W3 31 + +#define SROM12_5G40B0_PA 32 +#define SROM12_5G40B0_PA_W0 32 +#define SROM12_5G40B0_PA_W1 33 +#define SROM12_5G40B0_PA_W2 34 +#define SROM12_5G40B0_PA_W3 35 + +#define SROM12_5G40B1_PA 36 +#define SROM12_5G40B1_PA_W0 36 +#define SROM12_5G40B1_PA_W1 37 +#define SROM12_5G40B1_PA_W2 38 +#define SROM12_5G40B1_PA_W3 39 + +#define SROM12_5G40B2_PA 40 +#define SROM12_5G40B2_PA_W0 40 +#define SROM12_5G40B2_PA_W1 41 +#define SROM12_5G40B2_PA_W2 42 +#define SROM12_5G40B2_PA_W3 43 + +#define SROM12_5G40B3_PA 44 +#define SROM12_5G40B3_PA_W0 44 +#define SROM12_5G40B3_PA_W1 45 +#define SROM12_5G40B3_PA_W2 46 +#define SROM12_5G40B3_PA_W3 47 + +#define SROM12_5G40B4_PA 48 +#define SROM12_5G40B4_PA_W0 48 +#define SROM12_5G40B4_PA_W1 49 +#define SROM12_5G40B4_PA_W2 50 +#define SROM12_5G40B4_PA_W3 51 + +#define SROM12_5G80B0_PA 52 +#define SROM12_5G80B0_PA_W0 52 +#define SROM12_5G80B0_PA_W1 53 +#define SROM12_5G80B0_PA_W2 54 +#define SROM12_5G80B0_PA_W3 55 + +#define SROM12_5G80B1_PA 56 +#define SROM12_5G80B1_PA_W0 56 +#define SROM12_5G80B1_PA_W1 57 +#define SROM12_5G80B1_PA_W2 58 +#define SROM12_5G80B1_PA_W3 59 + +#define SROM12_5G80B2_PA 60 +#define SROM12_5G80B2_PA_W0 60 +#define SROM12_5G80B2_PA_W1 61 +#define SROM12_5G80B2_PA_W2 62 +#define SROM12_5G80B2_PA_W3 63 + +#define SROM12_5G80B3_PA 64 +#define SROM12_5G80B3_PA_W0 64 +#define SROM12_5G80B3_PA_W1 65 +#define SROM12_5G80B3_PA_W2 66 +#define SROM12_5G80B3_PA_W3 67 + +#define SROM12_5G80B4_PA 68 +#define SROM12_5G80B4_PA_W0 68 +#define SROM12_5G80B4_PA_W1 69 +#define SROM12_5G80B4_PA_W2 70 +#define SROM12_5G80B4_PA_W3 71 + +/* PD offset */ +#define SROM12_PDOFF_2G_CCK 472 + +#define SROM12_PDOFF_20in40M_5G_B0 473 +#define SROM12_PDOFF_20in40M_5G_B1 474 +#define SROM12_PDOFF_20in40M_5G_B2 475 +#define SROM12_PDOFF_20in40M_5G_B3 476 +#define SROM12_PDOFF_20in40M_5G_B4 477 + +#define SROM12_PDOFF_40in80M_5G_B0 478 +#define SROM12_PDOFF_40in80M_5G_B1 479 +#define SROM12_PDOFF_40in80M_5G_B2 480 +#define SROM12_PDOFF_40in80M_5G_B3 481 +#define SROM12_PDOFF_40in80M_5G_B4 482 + +#define SROM12_PDOFF_20in80M_5G_B0 488 +#define SROM12_PDOFF_20in80M_5G_B1 489 +#define SROM12_PDOFF_20in80M_5G_B2 490 +#define SROM12_PDOFF_20in80M_5G_B3 491 +#define SROM12_PDOFF_20in80M_5G_B4 492 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 + +#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */ +#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */ + +#define SROM13_SIGN 64 +#define SROM13_WORDS 590 +#define SROM13_SIGNATURE 0x4d55 +#define SROM13_CRCREV 589 + + +/* Per-path fields and offset */ +#define MAX_PATH_SROM_13 4 +#define SROM13_PATH0 256 +#define SROM13_PATH1 328 +#define SROM13_PATH2 400 +#define SROM13_PATH3 512 +#define SROM13_RXGAINS 5 + +#define SROM13_XTALFREQ 90 + +#define SROM13_PDOFFSET20IN40M2G 94 +#define SROM13_PDOFFSET20IN40M2GCORE3 95 +#define SROM13_SB20IN40HRLRPOX 96 + +#define SROM13_RXGAINS1CORE3 97 + +#define SROM13_PDOFFSET20IN40M5GCORE3 98 +#define SROM13_PDOFFSET20IN40M5GCORE3_1 99 + +#define SROM13_ANTGAIN_BANDBGA 100 + +#define SROM13_RXGAINS2CORE0 101 +#define SROM13_RXGAINS2CORE1 102 +#define SROM13_RXGAINS2CORE2 103 +#define SROM13_RXGAINS2CORE3 104 + +#define SROM13_PDOFFSET40IN80M5GCORE3 105 +#define SROM13_PDOFFSET40IN80M5GCORE3_1 106 + +/* power per rate */ +#define SROM13_MCS1024QAM2GPO 108 +#define SROM13_MCS1024QAM5GLPO 109 +#define SROM13_MCS1024QAM5GLPO_1 110 +#define SROM13_MCS1024QAM5GMPO 111 +#define SROM13_MCS1024QAM5GMPO_1 112 +#define SROM13_MCS1024QAM5GHPO 113 +#define SROM13_MCS1024QAM5GHPO_1 114 +#define SROM13_MCS1024QAM5GX1PO 115 +#define SROM13_MCS1024QAM5GX1PO_1 116 +#define SROM13_MCS1024QAM5GX2PO 117 +#define SROM13_MCS1024QAM5GX2PO_1 118 + +#define SROM13_MCSBW1605GLPO 119 +#define SROM13_MCSBW1605GLPO_1 120 +#define SROM13_MCSBW1605GMPO 121 +#define SROM13_MCSBW1605GMPO_1 122 +#define SROM13_MCSBW1605GHPO 123 +#define SROM13_MCSBW1605GHPO_1 124 + +#define SROM13_MCSBW1605GX1PO 125 +#define SROM13_MCSBW1605GX1PO_1 126 +#define SROM13_MCSBW1605GX2PO 127 +#define SROM13_MCSBW1605GX2PO_1 128 + +#define SROM13_ULBPPROFFS5GB0 129 +#define SROM13_ULBPPROFFS5GB1 130 +#define SROM13_ULBPPROFFS5GB2 131 +#define SROM13_ULBPPROFFS5GB3 132 +#define SROM13_ULBPPROFFS5GB4 133 +#define SROM13_ULBPPROFFS2G 134 + +#define SROM13_MCS8POEXP 135 +#define SROM13_MCS8POEXP_1 136 +#define SROM13_MCS9POEXP 137 +#define SROM13_MCS9POEXP_1 138 +#define SROM13_MCS10POEXP 139 +#define SROM13_MCS10POEXP_1 140 +#define SROM13_MCS11POEXP 141 +#define SROM13_MCS11POEXP_1 142 +#define SROM13_ULBPDOFFS5GB0A0 143 +#define SROM13_ULBPDOFFS5GB0A1 144 +#define SROM13_ULBPDOFFS5GB0A2 145 +#define SROM13_ULBPDOFFS5GB0A3 146 +#define SROM13_ULBPDOFFS5GB1A0 147 +#define SROM13_ULBPDOFFS5GB1A1 148 +#define SROM13_ULBPDOFFS5GB1A2 149 +#define SROM13_ULBPDOFFS5GB1A3 150 +#define SROM13_ULBPDOFFS5GB2A0 151 +#define SROM13_ULBPDOFFS5GB2A1 152 +#define SROM13_ULBPDOFFS5GB2A2 153 +#define SROM13_ULBPDOFFS5GB2A3 154 +#define SROM13_ULBPDOFFS5GB3A0 155 +#define SROM13_ULBPDOFFS5GB3A1 156 +#define SROM13_ULBPDOFFS5GB3A2 157 +#define SROM13_ULBPDOFFS5GB3A3 158 +#define SROM13_ULBPDOFFS5GB4A0 159 +#define SROM13_ULBPDOFFS5GB4A1 160 +#define SROM13_ULBPDOFFS5GB4A2 161 +#define SROM13_ULBPDOFFS5GB4A3 162 +#define SROM13_ULBPDOFFS2GA0 163 +#define SROM13_ULBPDOFFS2GA1 164 +#define SROM13_ULBPDOFFS2GA2 165 +#define SROM13_ULBPDOFFS2GA3 166 + +#define SROM13_RPCAL5GB4 199 + +#define SROM13_EU_EDCRSTH 232 + +#define SROM13_SWCTRLMAP4_CFG 493 +#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494 +#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496 +#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497 +#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498 +#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500 +#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501 +#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502 +#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503 +#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504 +#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505 +#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506 +#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507 +#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508 +#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509 + +#define SROM13_PDOFFSET20IN80M5GCORE3 510 +#define SROM13_PDOFFSET20IN80M5GCORE3_1 511 + +#define SROM13_NOISELVLCORE3 584 +#define SROM13_NOISELVLCORE3_1 585 +#define SROM13_RXGAINERRCORE3 586 +#define SROM13_RXGAINERRCORE3_1 587 + + +typedef struct { + uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */ + uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */ + uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */ + uint8 triso; /* TR switch isolation */ + uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */ +} srom_fem_t; + +#endif /* _bcmsrom_fmt_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h new file mode 100644 index 000000000000..f2775fbba1c5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmsrom_tbl.h @@ -0,0 +1,1400 @@ +/* + * Table that encodes the srom formats for PCI/PCIe NICs. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmsrom_tbl.h 553564 2015-04-30 06:19:30Z $ + */ + +#ifndef _bcmsrom_tbl_h_ +#define _bcmsrom_tbl_h_ + +#include "sbpcmcia.h" +#include "wlioctl.h" +#include + +typedef struct { + const char *name; + uint32 revmask; + uint32 flags; + uint16 off; + uint16 mask; +} sromvar_t; + +#define SRFL_MORE 1 /* value continues as described by the next entry */ +#define SRFL_NOFFS 2 /* value bits can't be all one's */ +#define SRFL_PRHEX 4 /* value is in hexdecimal format */ +#define SRFL_PRSIGN 8 /* value is in signed decimal format */ +#define SRFL_CCODE 0x10 /* value is in country code format */ +#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */ +#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */ +#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */ +#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST + * ONE in the array should have this flag set. + */ + + +#define SROM_DEVID_PCIE 48 + +/** + * Assumptions: + * - Ethernet address spans across 3 consecutive words + * + * Table rules: + * - Add multiple entries next to each other if a value spans across multiple words + * (even multiple fields in the same word) with each entry except the last having + * it's SRFL_MORE bit set. + * - Ethernet address entry does not follow above rule and must not have SRFL_MORE + * bit set. Its SRFL_ETHADDR bit implies it takes multiple words. + * - The last entry's name field must be NULL to indicate the end of the table. Other + * entries must have non-NULL name. + */ +static const sromvar_t pci_sromvars[] = { +/* name revmask flags off mask */ +#if defined(CABLECPE) + {"devid", 0xffffff00, SRFL_PRHEX, PCI_F0DEVID, 0xffff}, +#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED) + {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff}, +#else + {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff}, +#endif + {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK}, + {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff}, + {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff}, + {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff}, + {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM_BFL2, 0xffff}, + {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff}, + {"", 0, 0, SROM3_BFL2, 0xffff}, + {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff}, + {"", 0, 0, SROM4_BFL1, 0xffff}, + {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff}, + {"", 0, 0, SROM5_BFL1, 0xffff}, + {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff}, + {"", 0, 0, SROM8_BFL1, 0xffff}, + {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff}, + {"", 0, 0, SROM4_BFL3, 0xffff}, + {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff}, + {"", 0, 0, SROM5_BFL3, 0xffff}, + {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff}, + {"", 0, 0, SROM8_BFL3, 0xffff}, + {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff}, + {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff}, + {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff}, + {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff}, + {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff}, + {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff}, + {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff}, + {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK}, + {"regrev", 0x00000008, 0, SROM_OPO, 0xff00}, + {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff}, + {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff}, + {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff}, + {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff}, + {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00}, + {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff}, + {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00}, + {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00}, + {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00}, + {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff}, + {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00}, + {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff}, + {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00}, + {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff}, + {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00}, + {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff}, + {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00}, + {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff}, + {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff}, + {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff}, + {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff}, + {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff}, + {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff}, + {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff}, + {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff}, + {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00}, + {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff}, + {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff}, + {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff}, + {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK}, + {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff}, + {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff}, + {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK}, + {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00}, + {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00}, + {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff}, + {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00}, + {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff}, + {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00}, + {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff}, + {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00}, + {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff}, + {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00}, + {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff}, + {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00}, + {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff}, + {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff}, + {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff}, + {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff}, + {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff}, + {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff}, + {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff}, + {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff}, + {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff}, + {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00}, + {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00}, + {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00}, + {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff}, + {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff}, + {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff}, + {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff}, + {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff}, + {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff}, + {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff}, + {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff}, + {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff}, + {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff}, + {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00}, + {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff}, + {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00}, + {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff}, + {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f}, + {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800}, + {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700}, + {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0}, + {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f}, + {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f}, + {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800}, + {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700}, + {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0}, + {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f}, + {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff}, + {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00}, + {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00}, + {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff}, + {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00}, + {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff}, + {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00}, + {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00}, + {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff}, + {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00}, + {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK}, + {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK}, + {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK}, + {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK}, + {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK}, + {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK}, + {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK}, + {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK}, + {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff}, + {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00}, + {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff}, + {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00}, + {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff}, + {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00}, + {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff}, + {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00}, + {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff}, + {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00}, + {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff}, + {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00}, + {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff}, + {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00}, + {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff}, + {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00}, + + {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff}, + {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff}, + {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff}, + {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff}, + {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff}, + {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff}, + {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff}, + {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff}, + {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff}, + {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff}, + {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff}, + {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff}, + {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff}, + {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff}, + + {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00}, + {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff}, + {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff}, + {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300}, + {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f}, + {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010}, + {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020}, + {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff}, + {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00}, + {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80}, + + {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff}, + {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff}, + {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff}, + {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff}, + {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff}, + {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff}, + {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff}, + {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff}, + {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff}, + {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff}, + {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff}, + {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff}, + {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff}, + {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff}, + {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff}, + {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff}, + {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff}, + {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff}, + {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff}, + {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff}, + {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff}, + {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff}, + {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff}, + {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff}, + {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff}, + {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff}, + {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff}, + {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff}, + {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff}, + {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff}, + {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff}, + {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff}, + {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff}, + {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff}, + {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff}, + {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff}, + {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff}, + {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff}, + {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff}, + {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff}, + {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff}, + {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff}, + {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff}, + {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff}, + {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff}, + {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff}, + {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff}, + {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff}, + {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff}, + + /* power per rate from sromrev 9 */ + {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff}, + {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff}, + {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff}, + {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff}, + {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff}, + {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff}, + {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff}, + {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff}, + {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff}, + {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff}, + {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff}, + {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff}, + {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf}, + {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00}, + {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800}, + {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f}, + {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0}, + {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800}, + {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f}, + {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0}, + {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800}, + {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f}, + {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0}, + {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800}, + {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800}, + {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff}, + {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00}, + {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00}, + {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f}, + {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0}, + {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00}, + {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f}, + {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0}, + {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00}, + {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f}, + {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0}, + {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00}, + {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f}, + {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0}, + {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00}, + {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff}, + {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00}, + {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7}, + + {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff}, + {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00}, + /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */ + {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff}, + {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff}, + {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff}, + {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff}, + + /* sromrev 11 */ + {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff}, + {"", 0, 0, SROM11_BFL5, 0xffff}, + {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff}, + {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff}, + {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff}, + {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff}, + {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff}, + {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00}, + {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff}, + {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00}, + {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff}, + {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff}, + {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00}, + {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00}, + {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff}, + {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00}, + {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff}, + {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00}, + {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff}, + {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK}, + {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK}, + {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK}, + + {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001}, + {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e}, + {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0}, + {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200}, + {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400}, + {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800}, + + {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001}, + {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e}, + {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0}, + {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200}, + {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400}, + {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800}, + + {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00}, + {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff}, + {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff}, + {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00}, + {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff}, + {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00}, + {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300}, + {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */ + {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff}, + {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff}, + {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00}, + {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000}, + {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f}, + {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80}, + {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff}, + {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff}, + {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f}, + {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0}, + {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00}, + {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000}, + {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff}, + {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff}, + {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff}, + {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff}, + {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff}, + {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff}, + + {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff}, + {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000}, + {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000}, + /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */ + {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 40 MHz BW */ + {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 5G Band, 80 MHz BW */ + {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff}, + /* Special PA Params for 4335 2G Band, CCK */ + {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff}, + {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff}, + + /* power per rate */ + {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff}, + {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff}, + {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff}, + {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff}, + {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff}, + {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff}, + {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff}, + {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff}, + {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff}, + {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff}, + {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff}, + {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff}, + {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff}, + {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff}, + {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff}, + {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff}, + {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff}, + {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff}, + {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff}, + {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff}, + {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff}, + {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff}, + {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff}, + {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff}, + {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff}, + {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff}, + {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff}, + {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff}, + {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff}, + {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff}, + {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff}, + {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff}, + {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff}, + {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff}, + {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff}, + + /* Misc */ + {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff}, + {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00}, + + {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f}, + {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0}, + {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00}, + {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f}, + {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0}, + {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00}, + {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00}, + {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00}, + + {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f}, + {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0}, + {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800}, + {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f}, + {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0}, + {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800}, + {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800}, + {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800}, + {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff}, + {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff}, + {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff}, + {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff}, + {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff}, + {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0}, + {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0}, + {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f}, + {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0}, + {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00}, + + /* sromrev 12 */ + {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff}, + {"", 0, 0, SROM12_BFL7, 0xffff}, + {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff}, + {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff}, + {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff}, + {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff}, + {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff}, + {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff}, + {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff}, + {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff}, + {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff}, + {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff}, + {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff}, + {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff}, + {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff}, + {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff}, + {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff}, + {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff}, + + {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + + /* power per rate */ + {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff}, + {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff}, + {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff}, + {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff}, + {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff}, + {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff}, + {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff}, + + {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff}, + {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff}, + {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff}, + {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff}, + + {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007}, + {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007}, + {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007}, + {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078}, + {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078}, + {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078}, + {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080}, + {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080}, + {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080}, + {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700}, + {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700}, + {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700}, + {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800}, + {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800}, + {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800}, + {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000}, + {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000}, + {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000}, + {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00}, + + {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff}, + {"", 0, 0, SROM12_GPDN_H, 0xffff}, + + {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff}, + {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00}, + + {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00}, + {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff}, + {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f}, + {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0}, + {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f}, + {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00}, + {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f}, + {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0}, + {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007}, + {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078}, + {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080}, + {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700}, + {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800}, + {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000}, + + /* power per rate */ + {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff}, + {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff}, + {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff}, + {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff}, + {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff}, + {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff}, + + {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff}, + {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff}, + {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff}, + {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff}, + {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff}, + + {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff}, + + {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff}, + {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff}, + {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff}, + {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff}, + {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff}, + + {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff}, + {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff}, + {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff}, + {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff}, + {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff}, + {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff}, + {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff}, + {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff}, + {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff}, + {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff}, + {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff}, + {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff}, + {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff}, + {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff}, + {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff}, + {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff}, + {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff}, + {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff}, + {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff}, + {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff}, + {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff}, + {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff}, + {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff}, + {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff}, + + {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff}, + + {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff}, + + {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff}, + {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff}, + + {"pdoffset20in40m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff}, + {"pdoffset40in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff}, + {"pdoffset20in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff}, + {"", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff}, + + {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff}, + {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff}, + {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff}, + {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff}, + {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff}, + {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff}, + {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff}, + {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff}, + {NULL, 0, 0, 0, 0} +}; + +static const sromvar_t perpath_pci_sromvars[] = { + {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff}, + {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff}, + {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff}, + {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff}, + {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff}, + {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff}, + {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff}, + {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff}, + {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff}, + {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff}, + {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff}, + {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff}, + {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00}, + {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00}, + {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff}, + {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff}, + {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff}, + {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff}, + {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff}, + {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00}, + {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff}, + {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff}, + {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff}, + {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff}, + {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff}, + {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff}, + {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff}, + {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff}, + {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff}, + + /* sromrev 11 */ + {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff}, + {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff}, + {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007}, + {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078}, + {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080}, + {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700}, + {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800}, + {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000}, + {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000}, + {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00}, + {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff}, + {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00}, + {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff}, + {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff}, + {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff}, + + /* sromrev 12 */ + {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00}, + {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff}, + + {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff}, + {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff}, + {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00}, + {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff}, + {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00}, + + {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff}, + + {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff}, + + {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff}, + + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff}, + {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff}, + /* sromrev 13 */ + {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007}, + {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078}, + {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080}, + {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700}, + {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800}, + {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000}, + {NULL, 0, 0, 0, 0} +}; + +#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) +#define PHY_TYPE_HT 7 /* HT-Phy value */ +#define PHY_TYPE_N 4 /* N-Phy value */ +#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */ +#if !defined(PHY_TYPE_AC) +#define PHY_TYPE_AC 11 /* AC-Phy value */ +#endif /* !defined(PHY_TYPE_AC) */ +#if !defined(PHY_TYPE_LCN20) +#define PHY_TYPE_LCN20 12 /* LCN20-Phy value */ +#endif /* !defined(PHY_TYPE_LCN20) */ +#if !defined(PHY_TYPE_NULL) +#define PHY_TYPE_NULL 0xf /* Invalid Phy value */ +#endif /* !defined(PHY_TYPE_NULL) */ + +typedef struct { + uint16 phy_type; + uint16 bandrange; + uint16 chain; + const char *vars; +} pavars_t; + +static const pavars_t pavars[] = { + /* HTPHY */ + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"}, + {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"}, + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"}, + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"}, + /* LCN20PHY */ + {PHY_TYPE_LCN20, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + + +static const pavars_t pavars_SROM12[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +static const pavars_t pavars_SROM13[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 3, "pa2g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 3, "pa5ga3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 3, "pa5g40a3"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 3, "pa5g80a3"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 1 */ +static const pavars_t pavars_bwver_1[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga2"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5gbw40a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw80a0"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 2 */ +static const pavars_t pavars_bwver_2[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +/* pavars table when paparambwver is 3 */ +static const pavars_t pavars_bwver_3[] = { + /* ACPHY */ + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gccka0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2gccka1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"}, + {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"}, + {PHY_TYPE_NULL, 0, 0, ""} +}; + +typedef struct { + uint16 phy_type; + uint16 bandrange; + const char *vars; +} povars_t; + +static const povars_t povars[] = { + /* NPHY */ + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 " + "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 " + "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 " + "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"}, + {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 " + "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"}, + {PHY_TYPE_NULL, 0, ""} +}; + +typedef struct { + uint8 tag; /* Broadcom subtag name */ + uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in + * different tuples have the same name. Therefore, the MFGc tool + * needs to know which tuple to generate when seeing these + * parameters (given that we know sromrev from user input, like the + * nvram file). + */ + uint8 len; /* Length field of the tuple, note that it includes the + * subtag name (1 byte): 1 + tuple content length + */ + const char *params; +} cis_tuple_t; + +#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */ +#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */ +#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */ +#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */ + +/** this array is used by CIS creating/writing applications */ +static const cis_tuple_t cis_hnbuvars[] = { +/* tag revmask len params */ + {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */ + {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */ + {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */ + /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */ + {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"}, + {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"}, + /* NOTE: subdevid is also written to boardtype. + * Need to write HNBU_BOARDTYPE to change it if it is different. + */ + {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"}, + {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"}, + {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"}, + {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"}, + {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */ + {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"}, + {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 " + "4boardflags4 4boardflags5 "}, + {HNBU_LEDS, 0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 " + "1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"}, + {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"}, + {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"}, + {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"}, + {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 " + "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit " + "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"}, + {HNBU_RDLID, 0xffffffff, 3, "2rdlid"}, + {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g " + "0rssisav2g 0bxa2g"}, /* special case */ + {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g " + "0rssisav5g 0bxa5g"}, /* special case */ + {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"}, + {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"}, + {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"}, + {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"}, + {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"}, + {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"}, + {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */ + {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"}, + {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"}, + {HNBU_LEDDC, 0xffffffff, 3, "2leddc"}, + {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"}, + {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"}, + {HNBU_REGREV, 0xffffffff, 2, "1regrev"}, + {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g " + "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */ + {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 " + "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 " + "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"}, + {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 " + "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 " + "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"}, + {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo " + "4ofdm5ghpo"}, + {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 " + "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"}, + {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 " + "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"}, + {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 " + "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 " + "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 " + "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"}, + {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"}, + {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"}, + {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"}, + {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"}, + {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"}, + {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"}, + {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"}, + {HNBU_USBFS, 0xffffffff, 2, "1usbfs"}, + {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"}, + {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"}, + {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"}, + {OTP_RAW, 0xffffffff, 0, ""}, /* special case */ + {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"}, + {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"}, + {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"}, + {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"}, + {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo " + "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"}, + {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"}, + {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"}, + {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"}, + {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"}, + {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"}, + {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"}, + {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis " + "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option " + "1phycal_tempdelta"}, /* special case */ + {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"}, + {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g " + "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g " + "0tssiposslope5g"}, /* special case */ + {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 " + "1*4maxp5ga0 2*12pa5ga0"}, + {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"}, + {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"}, + {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"}, + {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 " + "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"}, + {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo " + "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo " + "2sb20in80ofdmlrbw202gpo"}, + {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo " + "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo " + "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po " + "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"}, + {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"}, + {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo " + "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo " + "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo " + "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo " + "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo " + "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po " + }, + {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo " + "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo " + "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo " + "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo " + "2sb20in80p80lr5gpo 2dot11agduppo"}, + {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 " + "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"}, + {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 " + "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"}, + {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"}, + {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"}, + {HNBU_UUID, 0xffffffff, 17, "16uuid"}, + {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"}, + {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 " + "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 " + "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 " + "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */ + {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 " + "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 " + "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 " + "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */ + {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 " + "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 " + "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 " + "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */ + {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g " + "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"}, + {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 " + "0pdoffset2g40ma2 0pdoffset2g40mvalid"}, + {HNBU_ACPA_CCK, 0xfffff800, 7, "2*3pa2gccka0"}, + {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"}, + {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"}, + {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"}, + {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"}, + {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"}, + {HNBU_TXBFRPCALS, 0xfffff800, 11, + "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */ + {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"}, + {0xFF, 0xffffffff, 0, ""} +}; + +#endif /* _bcmsrom_tbl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h new file mode 100644 index 000000000000..6cc925c599d9 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h @@ -0,0 +1,1302 @@ +/* + * Misc useful os-independent macros and functions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmutils.h 563776 2015-06-15 15:51:15Z $ + */ + +#ifndef _bcmutils_h_ +#define _bcmutils_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + + +#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) +#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count)) +#define bcm_snprintf_s snprintf +#define bcm_sprintf_s snprintf + +/* + * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count)) + * Use bcm_strcpy_s instead as it is a safer option + * bcm_strcat_s: Use bcm_strncat_s as a safer option + * + */ + +/* ctype replacement */ +#define _BCM_U 0x01 /* upper */ +#define _BCM_L 0x02 /* lower */ +#define _BCM_D 0x04 /* digit */ +#define _BCM_C 0x08 /* cntrl */ +#define _BCM_P 0x10 /* punct */ +#define _BCM_S 0x20 /* white space (space/lf/tab) */ +#define _BCM_X 0x40 /* hex digit */ +#define _BCM_SP 0x80 /* hard space (0x20) */ + +extern const unsigned char bcm_ctype[]; +#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)]) + +#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) +#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) +#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) +#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) +#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) +#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) +#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) +#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) +#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) +#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) +#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) +#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) + +#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx) + +#define KB(bytes) (((bytes) + 1023) / 1024) + +/* Buffer structure for collecting string-formatted data +* using bcm_bprintf() API. +* Use bcm_binit() to initialize before use +*/ + +struct bcmstrbuf { + char *buf; /* pointer to current position in origbuf */ + unsigned int size; /* current (residual) size in bytes */ + char *origbuf; /* unmodified pointer to orignal buffer */ + unsigned int origsize; /* unmodified orignal buffer size in bytes */ +}; + +#define BCMSTRBUF_LEN(b) (b->size) +#define BCMSTRBUF_BUF(b) (b->buf) + +/* ** driver-only section ** */ +#ifdef BCMDRIVER +#include +#include +#include + +#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */ + +/* + * Spin at most 'us' microseconds while 'exp' is true. + * Caller should explicitly test 'exp' when this completes + * and take appropriate error action if 'exp' is still true. + */ +#ifndef SPINWAIT_POLL_PERIOD +#define SPINWAIT_POLL_PERIOD 10 +#endif + +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \ + while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \ + OSL_DELAY(SPINWAIT_POLL_PERIOD); \ + countdown -= SPINWAIT_POLL_PERIOD; \ + } \ +} + +/* forward definition of ether_addr structure used by some function prototypes */ + +struct ether_addr; + +extern int ether_isbcast(const void *ea); +extern int ether_isnulladdr(const void *ea); + +#define BCM_MAC_RXCPL_IDX_BITS 12 +#define BCM_MAX_RXCPL_IDX_INVALID 0 +#define BCM_MAC_RXCPL_IFIDX_BITS 3 +#define BCM_MAC_RXCPL_DOT11_BITS 1 +#define BCM_MAX_RXCPL_IFIDX ((1 << BCM_MAC_RXCPL_IFIDX_BITS) - 1) +#define BCM_MAC_RXCPL_FLAG_BITS 4 +#define BCM_RXCPL_FLAGS_IN_TRANSIT 0x1 +#define BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST 0x2 +#define BCM_RXCPL_FLAGS_RXCPLVALID 0x4 +#define BCM_RXCPL_FLAGS_RSVD 0x8 + +#define BCM_RXCPL_SET_IN_TRANSIT(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_IN_TRANSIT) +#define BCM_RXCPL_CLR_IN_TRANSIT(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_IN_TRANSIT) +#define BCM_RXCPL_IN_TRANSIT(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_IN_TRANSIT) + +#define BCM_RXCPL_SET_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) +#define BCM_RXCPL_CLR_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) +#define BCM_RXCPL_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST) + +#define BCM_RXCPL_SET_VALID_INFO(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_RXCPLVALID) +#define BCM_RXCPL_CLR_VALID_INFO(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID) +#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE) + +#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */ + +struct reorder_rxcpl_id_list { + uint16 head; + uint16 tail; + uint32 cnt; +}; + +typedef struct rxcpl_id { + uint32 idx : BCM_MAC_RXCPL_IDX_BITS; + uint32 next_idx : BCM_MAC_RXCPL_IDX_BITS; + uint32 ifidx : BCM_MAC_RXCPL_IFIDX_BITS; + uint32 dot11 : BCM_MAC_RXCPL_DOT11_BITS; + uint32 flags : BCM_MAC_RXCPL_FLAG_BITS; +} rxcpl_idx_id_t; + +typedef struct rxcpl_data_len { + uint32 metadata_len_w : 6; + uint32 dataoffset: 10; + uint32 datalen : 16; +} rxcpl_data_len_t; + +typedef struct rxcpl_info { + rxcpl_idx_id_t rxcpl_id; + uint32 host_pktref; + union { + rxcpl_data_len_t rxcpl_len; + struct rxcpl_info *free_next; + }; +} rxcpl_info_t; + +/* rx completion list */ +typedef struct bcm_rxcplid_list { + uint32 max; + uint32 avail; + rxcpl_info_t *rxcpl_ptr; + rxcpl_info_t *free_list; +} bcm_rxcplid_list_t; + +extern bool bcm_alloc_rxcplid_list(osl_t *osh, uint32 max); +extern rxcpl_info_t * bcm_alloc_rxcplinfo(void); +extern void bcm_free_rxcplinfo(rxcpl_info_t *ptr); +extern void bcm_chain_rxcplid(uint16 first, uint16 next); +extern rxcpl_info_t *bcm_id2rxcplinfo(uint16 id); +extern uint16 bcm_rxcplinfo2id(rxcpl_info_t *ptr); +extern rxcpl_info_t *bcm_rxcpllist_end(rxcpl_info_t *ptr, uint32 *count); + +/* externs */ +/* packet */ +extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf); +extern uint pkttotlen(osl_t *osh, void *p); +extern void *pktlast(osl_t *osh, void *p); +extern uint pktsegcnt(osl_t *osh, void *p); +extern uint pktsegcnt_war(osl_t *osh, void *p); +extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset); +extern void *pktoffset(osl_t *osh, void *p, uint offset); + +/* Get priority from a packet and pass it back in scb (or equiv) */ +#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */ +#define PKTPRIO_VLAN 0x200 /* VLAN prio found */ +#define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */ +#define PKTPRIO_DSCP 0x800 /* DSCP prio found */ + +/* DSCP type definitions (RFC4594) */ +/* AF1x: High-Throughput Data (RFC2597) */ +#define DSCP_AF11 0x0A +#define DSCP_AF12 0x0C +#define DSCP_AF13 0x0E +/* AF2x: Low-Latency Data (RFC2597) */ +#define DSCP_AF21 0x12 +#define DSCP_AF22 0x14 +#define DSCP_AF23 0x16 +/* AF3x: Multimedia Streaming (RFC2597) */ +#define DSCP_AF31 0x1A +#define DSCP_AF32 0x1C +#define DSCP_AF33 0x1E +/* EF: Telephony (RFC3246) */ +#define DSCP_EF 0x2E + +extern uint pktsetprio(void *pkt, bool update_vtag); +extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag); +extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp); + +/* string */ +extern int bcm_atoi(const char *s); +extern ulong bcm_strtoul(const char *cp, char **endp, uint base); +extern char *bcmstrstr(const char *haystack, const char *needle); +extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len); +extern char *bcmstrcat(char *dest, const char *src); +extern char *bcmstrncat(char *dest, const char *src, uint size); +extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); +char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); +int bcmstricmp(const char *s1, const char *s2); +int bcmstrnicmp(const char* s1, const char* s2, int cnt); + + +/* ethernet address */ +extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); +extern int bcm_ether_atoe(const char *p, struct ether_addr *ea); + +/* ip address */ +struct ipv4_addr; +extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); +extern char *bcm_ipv6_ntoa(void *ipv6, char *buf); +extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip); + +/* delay */ +extern void bcm_mdelay(uint ms); +/* variable access */ +#define NVRAM_RECLAIM_CHECK(name) + +extern char *getvar(char *vars, const char *name); +extern int getintvar(char *vars, const char *name); +extern int getintvararray(char *vars, const char *name, int index); +extern int getintvararraysize(char *vars, const char *name); +extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); +#define bcm_perf_enable() +#define bcmstats(fmt) +#define bcmlog(fmt, a1, a2) +#define bcmdumplog(buf, size) *buf = '\0' +#define bcmdumplogent(buf, idx) -1 + +#define TSF_TICKS_PER_MS 1000 +#define TS_ENTER 0xdeadbeef /* Timestamp profiling enter */ +#define TS_EXIT 0xbeefcafe /* Timestamp profiling exit */ + +#define bcmtslog(tstamp, fmt, a1, a2) +#define bcmprinttslogs() +#define bcmprinttstamp(us) +#define bcmdumptslog(b) + +extern char *bcm_nvram_vars(uint *length); +extern int bcm_nvram_cache(void *sih); + +/* Support for sharing code across in-driver iovar implementations. + * The intent is that a driver use this structure to map iovar names + * to its (private) iovar identifiers, and the lookup function to + * find the entry. Macros are provided to map ids and get/set actions + * into a single number space for a switch statement. + */ + +/* iovar structure */ +typedef struct bcm_iovar { + const char *name; /* name for lookup and display */ + uint16 varid; /* id for switch */ + uint16 flags; /* driver-specific flag bits */ + uint16 type; /* base type of argument */ + uint16 minlen; /* min length for buffer vars */ +} bcm_iovar_t; + +/* varid definitions are per-driver, may use these get/set bits */ + +/* IOVar action bits for id mapping */ +#define IOV_GET 0 /* Get an iovar */ +#define IOV_SET 1 /* Set an iovar */ + +/* Varid to actionid mapping */ +#define IOV_GVAL(id) ((id) * 2) +#define IOV_SVAL(id) ((id) * 2 + IOV_SET) +#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) +#define IOV_ID(actionid) (actionid >> 1) + +/* flags are per-driver based on driver attributes */ + +extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); +extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); +#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \ + defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) +extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); +#endif +#endif /* BCMDRIVER */ + +/* Base type definitions */ +#define IOVT_VOID 0 /* no value (implictly set only) */ +#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */ +#define IOVT_INT8 2 /* integer values are range-checked */ +#define IOVT_UINT8 3 /* unsigned int 8 bits */ +#define IOVT_INT16 4 /* int 16 bits */ +#define IOVT_UINT16 5 /* unsigned int 16 bits */ +#define IOVT_INT32 6 /* int 32 bits */ +#define IOVT_UINT32 7 /* unsigned int 32 bits */ +#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */ +#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) + +/* Initializer for IOV type strings */ +#define BCM_IOV_TYPE_INIT { \ + "void", \ + "bool", \ + "int8", \ + "uint8", \ + "int16", \ + "uint16", \ + "int32", \ + "uint32", \ + "buffer", \ + "" } + +#define BCM_IOVT_IS_INT(type) (\ + (type == IOVT_BOOL) || \ + (type == IOVT_INT8) || \ + (type == IOVT_UINT8) || \ + (type == IOVT_INT16) || \ + (type == IOVT_UINT16) || \ + (type == IOVT_INT32) || \ + (type == IOVT_UINT32)) + +/* ** driver/apps-shared section ** */ + +#define BCME_STRLEN 64 /* Max string length for BCM errors */ +#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST)) + + +/* + * error codes could be added but the defined ones shouldn't be changed/deleted + * these error codes are exposed to the user code + * when ever a new error code is added to this list + * please update errorstring table with the related error string and + * update osl files with os specific errorcode map +*/ + +#define BCME_OK 0 /* Success */ +#define BCME_ERROR -1 /* Error generic */ +#define BCME_BADARG -2 /* Bad Argument */ +#define BCME_BADOPTION -3 /* Bad option */ +#define BCME_NOTUP -4 /* Not up */ +#define BCME_NOTDOWN -5 /* Not down */ +#define BCME_NOTAP -6 /* Not AP */ +#define BCME_NOTSTA -7 /* Not STA */ +#define BCME_BADKEYIDX -8 /* BAD Key Index */ +#define BCME_RADIOOFF -9 /* Radio Off */ +#define BCME_NOTBANDLOCKED -10 /* Not band locked */ +#define BCME_NOCLK -11 /* No Clock */ +#define BCME_BADRATESET -12 /* BAD Rate valueset */ +#define BCME_BADBAND -13 /* BAD Band */ +#define BCME_BUFTOOSHORT -14 /* Buffer too short */ +#define BCME_BUFTOOLONG -15 /* Buffer too long */ +#define BCME_BUSY -16 /* Busy */ +#define BCME_NOTASSOCIATED -17 /* Not Associated */ +#define BCME_BADSSIDLEN -18 /* Bad SSID len */ +#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */ +#define BCME_BADCHAN -20 /* Bad Channel */ +#define BCME_BADADDR -21 /* Bad Address */ +#define BCME_NORESOURCE -22 /* Not Enough Resources */ +#define BCME_UNSUPPORTED -23 /* Unsupported */ +#define BCME_BADLEN -24 /* Bad length */ +#define BCME_NOTREADY -25 /* Not Ready */ +#define BCME_EPERM -26 /* Not Permitted */ +#define BCME_NOMEM -27 /* No Memory */ +#define BCME_ASSOCIATED -28 /* Associated */ +#define BCME_RANGE -29 /* Not In Range */ +#define BCME_NOTFOUND -30 /* Not Found */ +#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */ +#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */ +#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */ +#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */ +#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */ +#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */ +#define BCME_VERSION -37 /* Incorrect version */ +#define BCME_TXFAIL -38 /* TX failure */ +#define BCME_RXFAIL -39 /* RX failure */ +#define BCME_NODEVICE -40 /* Device not present */ +#define BCME_NMODE_DISABLED -41 /* NMODE disabled */ +#define BCME_NONRESIDENT -42 /* access to nonresident overlay */ +#define BCME_SCANREJECT -43 /* reject scan request */ +#define BCME_USAGE_ERROR -44 /* WLCMD usage error */ +#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */ +#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */ +#define BCME_DISABLED -47 /* Disabled in this build */ +#define BCME_DECERR -48 /* Decrypt error */ +#define BCME_ENCERR -49 /* Encrypt error */ +#define BCME_MICERR -50 /* Integrity/MIC error */ +#define BCME_REPLAY -51 /* Replay */ +#define BCME_IE_NOTFOUND -52 /* IE not found */ +#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */ +#define BCME_LAST BCME_DATA_NOTFOUND + +#define BCME_NOTENABLED BCME_DISABLED + +/* These are collection of BCME Error strings */ +#define BCMERRSTRINGTABLE { \ + "OK", \ + "Undefined error", \ + "Bad Argument", \ + "Bad Option", \ + "Not up", \ + "Not down", \ + "Not AP", \ + "Not STA", \ + "Bad Key Index", \ + "Radio Off", \ + "Not band locked", \ + "No clock", \ + "Bad Rate valueset", \ + "Bad Band", \ + "Buffer too short", \ + "Buffer too long", \ + "Busy", \ + "Not Associated", \ + "Bad SSID len", \ + "Out of Range Channel", \ + "Bad Channel", \ + "Bad Address", \ + "Not Enough Resources", \ + "Unsupported", \ + "Bad length", \ + "Not Ready", \ + "Not Permitted", \ + "No Memory", \ + "Associated", \ + "Not In Range", \ + "Not Found", \ + "WME Not Enabled", \ + "TSPEC Not Found", \ + "ACM Not Supported", \ + "Not WME Association", \ + "SDIO Bus Error", \ + "Dongle Not Accessible", \ + "Incorrect version", \ + "TX Failure", \ + "RX Failure", \ + "Device Not Present", \ + "NMODE Disabled", \ + "Nonresident overlay access", \ + "Scan Rejected", \ + "WLCMD usage error", \ + "WLCMD ioctl error", \ + "RWL serial port error", \ + "Disabled", \ + "Decrypt error", \ + "Encrypt error", \ + "MIC error", \ + "Replay", \ + "IE not found", \ + "Data not found", \ +} + +#ifndef ABS +#define ABS(a) (((a) < 0) ? -(a) : (a)) +#endif /* ABS */ + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* MIN */ + +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MAX */ + +/* limit to [min, max] */ +#ifndef LIMIT_TO_RANGE +#define LIMIT_TO_RANGE(x, min, max) \ + ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_RANGE */ + +/* limit to max */ +#ifndef LIMIT_TO_MAX +#define LIMIT_TO_MAX(x, max) \ + (((x) > (max) ? (max) : (x))) +#endif /* LIMIT_TO_MAX */ + +/* limit to min */ +#ifndef LIMIT_TO_MIN +#define LIMIT_TO_MIN(x, min) \ + (((x) < (min) ? (min) : (x))) +#endif /* LIMIT_TO_MIN */ + +#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \ + (0xffffffff - (prev) + (curr) + 1)) +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) +#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define ROUNDDN(p, align) ((p) & ~((align) - 1)) +#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0) +#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \ + & ~((boundary) - 1)) +#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0) +#define VALID_MASK(mask) !((mask) & ((mask) + 1)) + +#ifndef OFFSETOF +#ifdef __ARMCC_VERSION +/* + * The ARM RVCT compiler complains when using OFFSETOF where a constant + * expression is expected, such as an initializer for a static object. + * offsetof from the runtime library doesn't have that problem. + */ +#include +#define OFFSETOF(type, member) offsetof(type, member) +#else +# if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8)) +/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */ +# define OFFSETOF(type, member) __builtin_offsetof(type, member) +# else +# define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) +# endif /* GCC 4.8 or newer */ +#endif /* __ARMCC_VERSION */ +#endif /* OFFSETOF */ + +#ifndef ARRAYSIZE +#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) +#endif + +#ifndef ARRAYLAST /* returns pointer to last array element */ +#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1]) +#endif + +/* Reference a function; used to prevent a static function from being optimized out */ +extern void *_bcmutils_dummy_fn; +#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f)) + +/* bit map related macros */ +#ifndef setbit +#ifndef NBBY /* the BSD family defines NBBY */ +#define NBBY 8 /* 8 bits per byte */ +#endif /* #ifndef NBBY */ +#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS +extern void setbit(void *array, uint bit); +extern void clrbit(void *array, uint bit); +extern bool isset(const void *array, uint bit); +extern bool isclr(const void *array, uint bit); +#else +#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY)) +#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY))) +#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) +#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0) +#endif +#endif /* setbit */ +extern void set_bitrange(void *array, uint start, uint end, uint maxbit); + +#define isbitset(a, i) (((a) & (1 << (i))) != 0) + +#define NBITS(type) (sizeof(type) * 8) +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + +extern void bcm_bitprint32(const uint32 u32); + +/* + * ---------------------------------------------------------------------------- + * Multiword map of 2bits, nibbles + * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val) + * getbit2 getbit4 (void *ptr, uint32 ix) + * ---------------------------------------------------------------------------- + */ + +#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK) \ +static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */ \ + uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */ \ + uint32 mask = (MSK << pos); \ + uint32 tmp = *a & ~mask; \ + *a = tmp | (val << pos); \ +} \ +static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \ +{ \ + uint32 *addr = (uint32 *)ptr; \ + uint32 *a = addr + (ix >> RSH); \ + uint32 pos = (ix & OFF) << LSH; \ + return ((*a >> pos) & MSK); \ +} + +DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */ +DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */ +DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */ + +/* basic mux operation - can be optimized on several architectures */ +#define MUX(pred, true, false) ((pred) ? (true) : (false)) + +/* modulo inc/dec - assumes x E [0, bound - 1] */ +#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) +#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) + +/* modulo inc/dec, bound = 2^k */ +#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) +#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) + +/* modulo add/sub - assumes x, y E [0, bound - 1] */ +#define MODADD(x, y, bound) \ + MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) +#define MODSUB(x, y, bound) \ + MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) + +/* module add/sub, bound = 2^k */ +#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) +#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) + +/* crc defines */ +#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */ +#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */ +#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */ +#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */ +#define CRC32_INIT_VALUE 0xffffffff /* Initial CRC32 checksum value */ +#define CRC32_GOOD_VALUE 0xdebb20e3 /* Good final CRC32 checksum value */ + +/* use for direct output of MAC address in printf etc */ +#define MACF "%02x:%02x:%02x:%02x:%02x:%02x" +#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \ + ((struct ether_addr *) (ea))->octet[1], \ + ((struct ether_addr *) (ea))->octet[2], \ + ((struct ether_addr *) (ea))->octet[3], \ + ((struct ether_addr *) (ea))->octet[4], \ + ((struct ether_addr *) (ea))->octet[5] + +#define ETHER_TO_MACF(ea) (ea).octet[0], \ + (ea).octet[1], \ + (ea).octet[2], \ + (ea).octet[3], \ + (ea).octet[4], \ + (ea).octet[5] +#if !defined(SIMPLE_MAC_PRINT) +#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5] +#else +#define MACDBG "%02x:%02x:%02x" +#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5] +#endif /* SIMPLE_MAC_PRINT */ + +/* bcm_format_flags() bit description structure */ +typedef struct bcm_bit_desc { + uint32 bit; + const char* name; +} bcm_bit_desc_t; + +/* bcm_format_field */ +typedef struct bcm_bit_desc_ex { + uint32 mask; + const bcm_bit_desc_t *bitfield; +} bcm_bit_desc_ex_t; + +/* buffer length for ethernet address from bcm_ether_ntoa() */ +#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */ + +static INLINE uint32 /* 32bit word aligned xor-32 */ +bcm_compute_xor32(volatile uint32 *u32_val, int num_u32) +{ + int idx; + uint32 xor32 = 0; + for (idx = 0; idx < num_u32; idx++) + xor32 ^= *(u32_val + idx); + return xor32; +} + +/* crypto utility function */ +/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */ +static INLINE void +xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) +{ + if ( +#ifdef __i386__ + 1 || +#endif + (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { + /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */ + /* x86 supports unaligned. This version runs 6x-9x faster on x86. */ + ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0]; + ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1]; + ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2]; + ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3]; + } else { + /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */ + int k; + for (k = 0; k < 16; k++) + dst[k] = src1[k] ^ src2[k]; + } +} + +/* externs */ +/* crc */ +extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc); +extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc); +extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc); + +/* format/print */ +#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \ + defined(WLMSG_ASSOC) +/* print out the value a field has: fields may have 1-32 bits and may hold any value */ +extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len); +/* print out which bits in flags are set */ +extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len); +#endif + +extern int bcm_format_hex(char *str, const void *bytes, int len); + +extern const char *bcm_crypto_algo_name(uint algo); +extern char *bcm_chipname(uint chipid, char *buf, uint len); +extern char *bcm_brev_str(uint32 brev, char *buf); +extern void printbig(char *buf); +extern void prhex(const char *msg, uchar *buf, uint len); + +/* IE parsing */ + +/* packing is required if struct is passed across the bus */ +#include +/* tag_ID/length/value_buffer tuple */ +typedef struct bcm_tlv { + uint8 id; + uint8 len; + uint8 data[1]; +} bcm_tlv_t; + +/* bcm tlv w/ 16 bit id/len */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv { + uint16 id; + uint16 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT bcm_xtlv_t; +#include + + +/* descriptor of xtlv data src or dst */ +typedef struct { + uint16 type; + uint16 len; + void *ptr; /* ptr to memory location */ +} xtlv_desc_t; + +/* xtlv options */ +#define BCM_XTLV_OPTION_NONE 0x0000 +#define BCM_XTLV_OPTION_ALIGN32 0x0001 + +typedef uint16 bcm_xtlv_opts_t; +struct bcm_xtlvbuf { + bcm_xtlv_opts_t opts; + uint16 size; + uint8 *head; /* point to head of buffer */ + uint8 *buf; /* current position of buffer */ + /* allocated buffer may follow, but not necessarily */ +}; +typedef struct bcm_xtlvbuf bcm_xtlvbuf_t; + +#define BCM_TLV_MAX_DATA_SIZE (255) +#define BCM_XTLV_MAX_DATA_SIZE (65535) +#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data)) + +#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data)) +/* LEN only stores the value's length without padding */ +#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len)) +#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id)) +/* entire size of the XTLV including header, data, and optional padding */ +#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts) +#define bcm_valid_xtlv(elt, buflen, opts) (elt && ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt, opts))) + +/* Check that bcm_tlv_t fits into the given buflen */ +#define bcm_valid_tlv(elt, buflen) (\ + ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \ + ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len))) + + +extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen); +extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key); +extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen); + +extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key); + +extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, + int type_len); + +extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst); +extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, + int dst_maxlen); + +extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst); +extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen); + +/* xtlv */ + +/* return the next xtlv element, and update buffer len (remaining). Buffer length + * updated includes padding as specified by options + */ +extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts); + +/* initialize an xtlv buffer. Use options specified for packing/unpacking using + * the buffer. Caller is responsible for allocating both buffers. + */ +extern int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, + bcm_xtlv_opts_t opts); + +extern uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf); +extern uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf); +extern uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf); +extern uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf); +extern int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen); +extern int bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data); +extern int bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data); +extern int bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data); +extern int bcm_unpack_xtlv_entry(uint8 **buf, uint16 xpct_type, uint16 xpct_len, + void *dst, bcm_xtlv_opts_t opts); +extern int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len, + void *src, bcm_xtlv_opts_t opts); +extern int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts); + +/* callback for unpacking xtlv from a buffer into context. */ +typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, uint8 *buf, uint16 type, uint16 len); + +/* unpack a tlv buffer using buffer, options, and callback */ +extern int bcm_unpack_xtlv_buf(void *ctx, uint8 *buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn); + +/* unpack a set of tlvs from the buffer using provided xtlv desc */ +extern int bcm_unpack_xtlv_buf_to_mem(void *buf, int *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* pack a set of tlvs into buffer using provided xtlv desc */ +extern int bcm_pack_xtlv_buf_from_mem(void **buf, uint16 *buflen, xtlv_desc_t *items, + bcm_xtlv_opts_t opts); + +/* return data pointer of a given ID from xtlv buffer + * xtlv data length is given to *datalen_out, if the pointer is valid + */ +extern void *bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id, + uint16 *datalen_out, bcm_xtlv_opts_t opts); + +/* callback to return next tlv id and len to pack, if there is more tlvs to come and + * options e.g. alignment + */ +typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len); + +/* callback to pack the tlv into length validated buffer */ +typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx, + uint16 tlv_id, uint16 tlv_len, uint8* buf); + +/* pack a set of tlvs into buffer using get_next to interate */ +int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, + bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next, + bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen); + +/* bcmerror */ +extern const char *bcmerrorstr(int bcmerror); + +/* multi-bool data type: set of bools, mbool is true if any is set */ +typedef uint32 mbool; +#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */ +#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */ +#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */ +#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) + +/* generic datastruct to help dump routines */ +struct fielddesc { + const char *nameandfmt; + uint32 offset; + uint32 len; +}; + +extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); +extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, + const uint8 *buf, int len); + +extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); +extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes); +extern void bcm_print_bytes(const char *name, const uchar *cdata, int len); + +typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); +extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, + char *buf, uint32 bufsize); +extern uint bcm_bitcount(uint8 *bitmap, uint bytelength); + +extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); + +/* power conversion */ +extern uint16 bcm_qdbm_to_mw(uint8 qdbm); +extern uint8 bcm_mw_to_qdbm(uint16 mw); +extern uint bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint len); + +unsigned int process_nvram_vars(char *varbuf, unsigned int len); + +/* trace any object allocation / free, with / without features (flags) set to the object */ + +#define BCM_OBJDBG_ADD 1 +#define BCM_OBJDBG_REMOVE 2 +#define BCM_OBJDBG_ADD_PKT 3 + +/* object feature: set or clear flags */ +#define BCM_OBJECT_FEATURE_FLAG 1 +#define BCM_OBJECT_FEATURE_PKT_STATE 2 +/* object feature: flag bits */ +#define BCM_OBJECT_FEATURE_0 (1 << 0) +#define BCM_OBJECT_FEATURE_1 (1 << 1) +#define BCM_OBJECT_FEATURE_2 (1 << 2) +/* object feature: clear flag bits field set with this flag */ +#define BCM_OBJECT_FEATURE_CLEAR (1 << 31) +#ifdef BCM_OBJECT_TRACE +#define bcm_pkt_validate_chk(obj) do { \ + void * pkttag; \ + bcm_object_trace_chk(obj, 0, 0, \ + __FUNCTION__, __LINE__); \ + if ((pkttag = PKTTAG(obj))) { \ + bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \ + __FUNCTION__, __LINE__); \ + } \ +} while (0) +extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line); +extern void bcm_object_trace_upd(void *obj, void *obj_new); +extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn, + const char *caller, int line); +extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value); +extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value); +extern void bcm_object_trace_init(void); +extern void bcm_object_trace_deinit(void); +#else +#define bcm_pkt_validate_chk(obj) +#define bcm_object_trace_opr(a, b, c, d) +#define bcm_object_trace_upd(a, b) +#define bcm_object_trace_chk(a, b, c, d, e) +#define bcm_object_feature_set(a, b, c) +#define bcm_object_feature_get(a, b, c) +#define bcm_object_trace_init() +#define bcm_object_trace_deinit() +#endif /* BCM_OBJECT_TRACE */ + +/* calculate a * b + c */ +extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c); +/* calculate a / b */ +extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b); + + +/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */ + +/* Table driven count set bits. */ +static const uint8 /* Table only for use by bcm_cntsetbits */ +_CSBTBL[256] = +{ +# define B2(n) n, n + 1, n + 1, n + 2 +# define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2) +# define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2) + B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2) +}; + +static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */ +bcm_cntsetbits(const uint32 u32arg) +{ + /* function local scope declaration of const _CSBTBL[] */ + const uint8 * p = (const uint8 *)&u32arg; + return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]); +} + + +static INLINE int /* C equivalent count of leading 0's in a u32 */ +C_bcm_count_leading_zeros(uint32 u32arg) +{ + int shifts = 0; + while (u32arg) { + shifts++; u32arg >>= 1; + } + return (32U - shifts); +} + +#ifdef BCMDRIVER +/* + * Assembly instructions: Count Leading Zeros + * "clz" : MIPS, ARM + * "cntlzw" : PowerPC + * "BSF" : x86 + * "lzcnt" : AMD, SPARC + */ + +#if defined(__arm__) +#if defined(__ARM_ARCH_7M__) /* Cortex M3 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7M__ */ +#if defined(__ARM_ARCH_7R__) /* Cortex R4 */ +#define __USE_ASM_CLZ__ +#endif /* __ARM_ARCH_7R__ */ +#endif /* __arm__ */ + +static INLINE int +bcm_count_leading_zeros(uint32 u32arg) +{ +#if defined(__USE_ASM_CLZ__) + int zeros; + __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg)); + return zeros; +#else /* C equivalent */ + return C_bcm_count_leading_zeros(u32arg); +#endif /* C equivalent */ +} + +/* + * Macro to count leading zeroes + * + */ +#if defined(__GNUC__) +#define CLZ(x) __builtin_clzl(x) +#elif defined(__arm__) +#define CLZ(x) __clz(x) +#else +#define CLZ(x) bcm_count_leading_zeros(x) +#endif /* __GNUC__ */ + +/* INTERFACE: Multiword bitmap based small id allocator. */ +struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */ + +#define BCM_MWBMAP_INVALID_HDL ((struct bcm_mwbmap *)NULL) +#define BCM_MWBMAP_INVALID_IDX ((uint32)(~0U)) + +/* Incarnate a multiword bitmap based small index allocator */ +extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max); + +/* Free up the multiword bitmap index allocator */ +extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl); + +/* Allocate a unique small index using a multiword bitmap index allocator */ +extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl); + +/* Force an index at a specified position to be in use */ +extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Free a previously allocated index back into the multiword bitmap allocator */ +extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Fetch the toal number of free indices in the multiword bitmap allocator */ +extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl); + +/* Determine whether an index is inuse or free */ +extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix); + +/* Debug dump a multiword bitmap allocator */ +extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl); + +extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl); +/* End - Multiword bitmap based small Id allocator. */ + + +/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */ + +#define ID16_INVALID ((uint16)(~0)) +#define ID16_UNDEFINED (ID16_INVALID) + +/* + * Construct a 16bit id allocator, managing 16bit ids in the range: + * [start_val16 .. start_val16+total_ids) + * Note: start_val16 is inclusive. + * Returns an opaque handle to the 16bit id allocator. + */ +extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16); +extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl); +extern void id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16); + +/* Allocate a unique 16bit id */ +extern uint16 id16_map_alloc(void * id16_map_hndl); + +/* Free a 16bit id value into the id16 allocator */ +extern void id16_map_free(void * id16_map_hndl, uint16 val16); + +/* Get the number of failures encountered during id allocation. */ +extern uint32 id16_map_failures(void * id16_map_hndl); + +/* Audit the 16bit id allocator state. */ +extern bool id16_map_audit(void * id16_map_hndl); +/* End - Simple 16bit Id Allocator. */ +#endif /* BCMDRIVER */ + +extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b); + +void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset); +void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset); + +/* calculate checksum for ip header, tcp / udp header / data */ +uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum); + +#ifndef _dll_t_ +#define _dll_t_ +/* + * ----------------------------------------------------------------------------- + * Double Linked List Macros + * ----------------------------------------------------------------------------- + * + * All dll operations must be performed on a pre-initialized node. + * Inserting an uninitialized node into a list effectively initialized it. + * + * When a node is deleted from a list, you may initialize it to avoid corruption + * incurred by double deletion. You may skip initialization if the node is + * immediately inserted into another list. + * + * By placing a dll_t element at the start of a struct, you may cast a dll_t * + * to the struct or vice versa. + * + * Example of declaring an initializing someList and inserting nodeA, nodeB + * + * typedef struct item { + * dll_t node; + * int someData; + * } Item_t; + * Item_t nodeA, nodeB, nodeC; + * nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333; + * + * dll_t someList; + * dll_init(&someList); + * + * dll_append(&someList, (dll_t *) &nodeA); + * dll_prepend(&someList, &nodeB.node); + * dll_insert((dll_t *)&nodeC, &nodeA.node); + * + * dll_delete((dll_t *) &nodeB); + * + * Example of a for loop to walk someList of node_p + * + * extern void mydisplay(Item_t * item_p); + * + * dll_t * item_p, * next_p; + * for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p); + * item_p = next_p) + * { + * next_p = dll_next_p(item_p); + * ... use item_p at will, including removing it from list ... + * mydisplay((PItem_t)item_p); + * } + * + * ----------------------------------------------------------------------------- + */ +typedef struct dll { + struct dll * next_p; + struct dll * prev_p; +} dll_t; + +static INLINE void +dll_init(dll_t *node_p) +{ + node_p->next_p = node_p; + node_p->prev_p = node_p; +} +/* dll macros returing a pointer to dll_t */ + +static INLINE dll_t * +dll_head_p(dll_t *list_p) +{ + return list_p->next_p; +} + + +static INLINE dll_t * +dll_tail_p(dll_t *list_p) +{ + return (list_p)->prev_p; +} + + +static INLINE dll_t * +dll_next_p(dll_t *node_p) +{ + return (node_p)->next_p; +} + + +static INLINE dll_t * +dll_prev_p(dll_t *node_p) +{ + return (node_p)->prev_p; +} + + +static INLINE bool +dll_empty(dll_t *list_p) +{ + return ((list_p)->next_p == (list_p)); +} + + +static INLINE bool +dll_end(dll_t *list_p, dll_t * node_p) +{ + return (list_p == node_p); +} + + +/* inserts the node new_p "after" the node at_p */ +static INLINE void +dll_insert(dll_t *new_p, dll_t * at_p) +{ + new_p->next_p = at_p->next_p; + new_p->prev_p = at_p; + at_p->next_p = new_p; + (new_p->next_p)->prev_p = new_p; +} + +static INLINE void +dll_append(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, dll_tail_p(list_p)); +} + +static INLINE void +dll_prepend(dll_t *list_p, dll_t *node_p) +{ + dll_insert(node_p, list_p); +} + + +/* deletes a node from any list that it "may" be in, if at all. */ +static INLINE void +dll_delete(dll_t *node_p) +{ + node_p->prev_p->next_p = node_p->next_p; + node_p->next_p->prev_p = node_p->prev_p; +} +#endif /* ! defined(_dll_t_) */ + +/* Elements managed in a double linked list */ + +typedef struct dll_pool { + dll_t free_list; + uint16 free_count; + uint16 elems_max; + uint16 elem_size; + dll_t elements[1]; +} dll_pool_t; + +dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size); +void * dll_pool_alloc(dll_pool_t * dll_pool_p); +void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p); +void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p); +typedef void (* dll_elem_dump)(void * elem_p); +void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size); + +#ifdef __cplusplus + } +#endif + +/* #define DEBUG_COUNTER */ +#ifdef DEBUG_COUNTER +#define CNTR_TBL_MAX 10 +typedef struct _counter_tbl_t { + char name[16]; /* name of this counter table */ + uint32 prev_log_print; /* Internal use. Timestamp of the previous log print */ + uint log_print_interval; /* Desired interval to print logs in ms */ + uint needed_cnt; /* How many counters need to be used */ + uint32 cnt[CNTR_TBL_MAX]; /* Counting entries to increase at desired places */ + bool enabled; /* Whether to enable printing log */ +} counter_tbl_t; + + +void counter_printlog(counter_tbl_t *ctr_tbl); +#endif /* DEBUG_COUNTER */ + +/* Given a number 'n' returns 'm' that is next larger power of 2 after n */ +static INLINE uint32 next_larger_power2(uint32 num) +{ + num--; + num |= (num >> 1); + num |= (num >> 2); + num |= (num >> 4); + num |= (num >> 8); + num |= (num >> 16); + return (num + 1); +} + +#endif /* _bcmutils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h new file mode 100644 index 000000000000..888863117105 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/brcm_nl80211.h @@ -0,0 +1,68 @@ +/* + * Definitions for nl80211 vendor command/event access to host driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: brcm_nl80211.h 556083 2015-05-12 14:03:00Z $ + * + */ + +#ifndef _brcm_nl80211_h_ +#define _brcm_nl80211_h_ + +#define OUI_BRCM 0x001018 + +enum wl_vendor_subcmd { + BRCM_VENDOR_SCMD_UNSPEC, + BRCM_VENDOR_SCMD_PRIV_STR, + BRCM_VENDOR_SCMD_BCM_STR +}; + + +struct bcm_nlmsg_hdr { + uint cmd; /* common ioctl definition */ + int len; /* expected return buffer length */ + uint offset; /* user buffer offset */ + uint set; /* get or set request optional */ + uint magic; /* magic number for verification */ +}; + +enum bcmnl_attrs { + BCM_NLATTR_UNSPEC, + + BCM_NLATTR_LEN, + BCM_NLATTR_DATA, + + __BCM_NLATTR_AFTER_LAST, + BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1 +}; + +struct nl_prv_data { + int err; /* return result */ + void *data; /* ioctl return buffer pointer */ + uint len; /* ioctl return buffer length */ + struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */ +}; + +#endif /* _brcm_nl80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/dbus.h b/drivers/net/wireless/bcmdhd/include/dbus.h new file mode 100644 index 000000000000..b066c67a5dad --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/dbus.h @@ -0,0 +1,591 @@ +/* + * Dongle BUS interface Abstraction layer + * target serial buses like USB, SDIO, SPI, etc. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dbus.h 553311 2015-04-29 10:23:08Z $ + */ + +#ifndef __DBUS_H__ +#define __DBUS_H__ + +#include "typedefs.h" + +#define DBUSTRACE(args) +#define DBUSERR(args) +#define DBUSINFO(args) +#define DBUSDBGLOCK(args) + +enum { + DBUS_OK = 0, + DBUS_ERR = -200, + DBUS_ERR_TIMEOUT, + DBUS_ERR_DISCONNECT, + DBUS_ERR_NODEVICE, + DBUS_ERR_UNSUPPORTED, + DBUS_ERR_PENDING, + DBUS_ERR_NOMEM, + DBUS_ERR_TXFAIL, + DBUS_ERR_TXTIMEOUT, + DBUS_ERR_TXDROP, + DBUS_ERR_RXFAIL, + DBUS_ERR_RXDROP, + DBUS_ERR_TXCTLFAIL, + DBUS_ERR_RXCTLFAIL, + DBUS_ERR_REG_PARAM, + DBUS_STATUS_CANCELLED, + DBUS_ERR_NVRAM, + DBUS_JUMBO_NOMATCH, + DBUS_JUMBO_BAD_FORMAT, + DBUS_NVRAM_NONTXT, + DBUS_ERR_RXZLP +}; + +#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */ +#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */ +#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */ + +#define ERR_CBMASK_TXFAIL 0x00000001 +#define ERR_CBMASK_RXFAIL 0x00000002 +#define ERR_CBMASK_ALL 0xFFFFFFFF + +#define DBUS_CBCTL_WRITE 0 +#define DBUS_CBCTL_READ 1 +#if defined(INTR_EP_ENABLE) +#define DBUS_CBINTR_POLL 2 +#endif /* defined(INTR_EP_ENABLE) */ + +#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */ +#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */ + +#define DBUS_BUFFER_SIZE_TX 32000 +#define DBUS_BUFFER_SIZE_RX 24000 + +#define DBUS_BUFFER_SIZE_TX_NOAGG 2048 +#define DBUS_BUFFER_SIZE_RX_NOAGG 2048 + +/** DBUS types */ +enum { + DBUS_USB, + DBUS_SDIO, + DBUS_SPI, + DBUS_UNKNOWN +}; + +enum dbus_state { + DBUS_STATE_DL_PENDING, + DBUS_STATE_DL_DONE, + DBUS_STATE_UP, + DBUS_STATE_DOWN, + DBUS_STATE_PNP_FWDL, + DBUS_STATE_DISCONNECT, + DBUS_STATE_SLEEP, + DBUS_STATE_DL_NEEDED +}; + +enum dbus_pnp_state { + DBUS_PNP_DISCONNECT, + DBUS_PNP_SLEEP, + DBUS_PNP_RESUME +}; + +enum dbus_file { + DBUS_FIRMWARE, + DBUS_NVFILE +}; + +typedef enum _DEVICE_SPEED { + INVALID_SPEED = -1, + LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */ + FULL_SPEED, /**< USB 1.1: 12 Mbps */ + HIGH_SPEED, /**< USB 2.0: 480 Mbps */ + SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */ +} DEVICE_SPEED; + +typedef struct { + int bustype; + int vid; + int pid; + int devid; + int chiprev; /**< chip revsion number */ + int mtu; + int nchan; /**< Data Channels */ + int has_2nd_bulk_in_ep; +} dbus_attrib_t; + +/* FIX: Account for errors related to DBUS; + * Let upper layer account for packets/bytes + */ +typedef struct { + uint32 rx_errors; + uint32 tx_errors; + uint32 rx_dropped; + uint32 tx_dropped; +} dbus_stats_t; + +/** + * Configurable BUS parameters + */ +enum { + DBUS_CONFIG_ID_RXCTL_DEFERRES = 1, + DBUS_CONFIG_ID_AGGR_LIMIT +}; + +typedef struct { + uint32 config_id; + union { + bool rxctl_deferrespok; + struct { + int maxrxsf; + int maxrxsize; + int maxtxsf; + int maxtxsize; + } aggr_param; + }; +} dbus_config_t; + +/** + * External Download Info + */ +typedef struct dbus_extdl { + uint8 *fw; + int fwlen; + uint8 *vars; + int varslen; +} dbus_extdl_t; + +struct dbus_callbacks; +struct exec_parms; + +typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen); +typedef void (*disconnect_cb_t)(void *arg); +typedef void *(*exec_cb_t)(struct exec_parms *args); + +/** Client callbacks registered during dbus_attach() */ +typedef struct dbus_callbacks { + void (*send_complete)(void *cbarg, void *info, int status); + void (*recv_buf)(void *cbarg, uint8 *buf, int len); + void (*recv_pkt)(void *cbarg, void *pkt); + void (*txflowcontrol)(void *cbarg, bool onoff); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); +} dbus_callbacks_t; + +struct dbus_pub; +struct bcmstrbuf; +struct dbus_irb; +struct dbus_irb_rx; +struct dbus_irb_tx; +struct dbus_intf_callbacks; + +typedef struct { + void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs); + void (*detach)(struct dbus_pub *pub, void *bus); + + int (*up)(void *bus); + int (*down)(void *bus); + int (*send_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb); + int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb); + int (*send_ctl)(void *bus, uint8 *buf, int len); + int (*recv_ctl)(void *bus, uint8 *buf, int len); + int (*get_stats)(void *bus, dbus_stats_t *stats); + int (*get_attrib)(void *bus, dbus_attrib_t *attrib); + + int (*pnp)(void *bus, int evnt); + int (*remove)(void *bus); + int (*resume)(void *bus); + int (*suspend)(void *bus); + int (*stop)(void *bus); + int (*reset)(void *bus); + + /* Access to bus buffers directly */ + void *(*pktget)(void *bus, int len); + void (*pktfree)(void *bus, void *pkt); + + int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len, + bool set); + void (*dump)(void *bus, struct bcmstrbuf *strbuf); + int (*set_config)(void *bus, dbus_config_t *config); + int (*get_config)(void *bus, dbus_config_t *config); + + bool (*device_exists)(void *bus); + bool (*dlneeded)(void *bus); + int (*dlstart)(void *bus, uint8 *fw, int len); + int (*dlrun)(void *bus); + bool (*recv_needed)(void *bus); + + void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args); + void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args); + + int (*tx_timer_init)(void *bus); + int (*tx_timer_start)(void *bus, uint timeout); + int (*tx_timer_stop)(void *bus); + + int (*sched_dpc)(void *bus); + int (*lock)(void *bus); + int (*unlock)(void *bus); + int (*sched_probe_cb)(void *bus); + + int (*shutdown)(void *bus); + + int (*recv_stop)(void *bus); + int (*recv_resume)(void *bus); + + int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx); + + int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value); + + /* Add from the bottom */ +} dbus_intf_t; + +typedef struct dbus_pub { + struct osl_info *osh; + dbus_stats_t stats; + dbus_attrib_t attrib; + enum dbus_state busstate; + DEVICE_SPEED device_speed; + int ntxq, nrxq, rxsize; + void *bus; + struct shared_info *sh; + void *dev_info; +} dbus_pub_t; + +#define BUS_INFO(bus, type) (((type *) bus)->pub->bus) + +#define ALIGNED_LOCAL_VARIABLE(var, align) \ + uint8 buffer[SDALIGN+64]; \ + uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align; + +/* + * Public Bus Function Interface + */ + +/* + * FIX: Is there better way to pass OS/Host handles to DBUS but still + * maintain common interface for all OS?? + * Under NDIS, param1 needs to be MiniportHandle + * For NDIS60, param2 is WdfDevice + * Under Linux, param1 and param2 are NULL; + */ +extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + void *param1, void *param2); +extern int dbus_deregister(void); + +extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq, + void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh); +extern void dbus_detach(dbus_pub_t *pub); + +extern int dbus_download_firmware(dbus_pub_t *pub); +extern int dbus_up(dbus_pub_t *pub); +extern int dbus_down(dbus_pub_t *pub); +extern int dbus_stop(dbus_pub_t *pub); +extern int dbus_shutdown(dbus_pub_t *pub); +extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on); + +extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf); +extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info); +extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info); +extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len); +extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len); +extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx); +extern int dbus_poll_intr(dbus_pub_t *pub); +extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats); +extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib); +extern int dbus_get_device_speed(dbus_pub_t *pub); +extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config); +extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config); +extern void * dbus_get_devinfo(dbus_pub_t *pub); + +extern void *dbus_pktget(dbus_pub_t *pub, int len); +extern void dbus_pktfree(dbus_pub_t *pub, void* pkt); + +extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask); +extern int dbus_pnp_sleep(dbus_pub_t *pub); +extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload); +extern int dbus_pnp_disconnect(dbus_pub_t *pub); + +extern int dbus_iovar_op(dbus_pub_t *pub, const char *name, + void *params, int plen, void *arg, int len, bool set); + +extern void *dhd_dbus_txq(const dbus_pub_t *pub); +extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub); + +/* + * Private Common Bus Interface + */ + +/** IO Request Block (IRB) */ +typedef struct dbus_irb { + struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */ +} dbus_irb_t; + +typedef struct dbus_irb_rx { + struct dbus_irb irb; /* Must be first */ + uint8 *buf; + int buf_len; + int actual_len; + void *pkt; + void *info; + void *arg; +} dbus_irb_rx_t; + +typedef struct dbus_irb_tx { + struct dbus_irb irb; /** Must be first */ + uint8 *buf; /** mutually exclusive with struct member 'pkt' */ + int len; /** length of field 'buf' */ + void *pkt; /** mutually exclusive with struct member 'buf' */ + int retry_count; + void *info; + void *arg; + void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */ +} dbus_irb_tx_t; + +/** + * DBUS interface callbacks are different from user callbacks + * so, internally, different info can be passed to upper layer + */ +typedef struct dbus_intf_callbacks { + void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb); + void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status); + void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status); + void (*errhandler)(void *cbarg, int err); + void (*ctl_complete)(void *cbarg, int type, int status); + void (*state_change)(void *cbarg, int state); + bool (*isr)(void *cbarg, bool *wantdpc); + bool (*dpc)(void *cbarg, bool bounded); + void (*watchdog)(void *cbarg); + void *(*pktget)(void *cbarg, uint len, bool send); + void (*pktfree)(void *cbarg, void *p, bool send); + struct dbus_irb* (*getirb)(void *cbarg, bool send); + void (*rxerr_indicate)(void *cbarg, bool on); +} dbus_intf_callbacks_t; + +/* + * Porting: To support new bus, port these functions below + */ + +/* + * Bus specific Interface + * Implemented by dbus_usb.c/dbus_sdio.c + */ +extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg, + dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_deregister(void); +extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp); + +/* + * Bus-specific and OS-specific Interface + * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c + */ +extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf, void *param1, void *param2); +extern int dbus_bus_osl_deregister(void); + +/* + * Bus-specific, OS-specific, HW-specific Interface + * Mainly for SDIO Host HW controller + */ +extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, + void *prarg, dbus_intf_t **intf); +extern int dbus_bus_osl_hw_deregister(void); + +extern uint usbdev_bulkin_eps(void); +#if defined(BCM_REQUEST_FW) +extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type, + uint16 boardtype, uint16 boardrev); +extern void dbus_release_fw_nvfile(void *firmware); +#endif /* #if defined(BCM_REQUEST_FW) */ + + +#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX) + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) + /* Backward compatibility */ + typedef unsigned int gfp_t; + + #define dma_pool pci_pool + #define dma_pool_create(name, dev, size, align, alloc) \ + pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC) + #define dma_pool_destroy(pool) pci_pool_destroy(pool) + #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle) + #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr) + + #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir) + #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir) + #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE + #define DMA_TO_DEVICE PCI_DMA_TODEVICE +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +/* Availability of these functions varies (when present, they have two arguments) */ +#ifndef hc32_to_cpu + #define hc32_to_cpu(x) le32_to_cpu(x) + #define cpu_to_hc32(x) cpu_to_le32(x) + typedef unsigned int __hc32; +#else + #error Two-argument functions needed +#endif + +/* Private USB opcode base */ +#define EHCI_FASTPATH 0x31 +#define EHCI_SET_EP_BYPASS EHCI_FASTPATH +#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1) +#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2) +#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3) +#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4) +#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5) + +/* + * EHCI QTD structure (hardware and extension) + * NOTE that is does not need to (and does not) match its kernel counterpart + */ +#define EHCI_QTD_NBUFFERS 5 +#define EHCI_QTD_ALIGN 32 +#define EHCI_BULK_PACKET_SIZE 512 +#define EHCI_QTD_XACTERR_MAX 32 + +struct ehci_qtd { + /* Hardware map */ + volatile uint32_t qtd_next; + volatile uint32_t qtd_altnext; + volatile uint32_t qtd_status; +#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff) +#define EHCI_QTD_IOC 0x00008000 +#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3) +#define EHCI_QTD_SET_CERR(x) ((x) << 10) +#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3) +#define EHCI_QTD_SET_PID(x) ((x) << 8) +#define EHCI_QTD_ACTIVE 0x80 +#define EHCI_QTD_HALTED 0x40 +#define EHCI_QTD_BUFERR 0x20 +#define EHCI_QTD_BABBLE 0x10 +#define EHCI_QTD_XACTERR 0x08 +#define EHCI_QTD_MISSEDMICRO 0x04 + volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS]; + volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS]; + + /* Implementation extension */ + dma_addr_t qtd_self; /**< own hardware address */ + struct ehci_qtd *obj_next; /**< software link to the next QTD */ + void *rpc; /**< pointer to the rpc buffer */ + size_t length; /**< length of the data in the buffer */ + void *buff; /**< pointer to the reassembly buffer */ + int xacterrs; /**< retry counter for qtd xact error */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */ + +#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1) + +/** + * Queue Head + * NOTE This structure is slightly different from the one in the kernel; but needs to stay + * compatible. + */ +struct ehci_qh { + /* Hardware map */ + volatile uint32_t qh_link; + volatile uint32_t qh_endp; + volatile uint32_t qh_endphub; + volatile uint32_t qh_curqtd; + + /* QTD overlay */ + volatile uint32_t ow_next; + volatile uint32_t ow_altnext; + volatile uint32_t ow_status; + volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS]; + volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS]; + + /* Extension (should match the kernel layout) */ + dma_addr_t unused0; + void *unused1; + struct list_head unused2; + struct ehci_qtd *dummy; + struct ehci_qh *unused3; + + struct ehci_hcd *unused4; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) + struct kref unused5; + unsigned unused6; + + uint8_t unused7; + + /* periodic schedule info */ + uint8_t unused8; + uint8_t unused9; + uint8_t unused10; + uint16_t unused11; + uint16_t unused12; + uint16_t unused13; + struct usb_device *unused14; +#else + unsigned unused5; + + u8 unused6; + + /* periodic schedule info */ + u8 unused7; + u8 unused8; + u8 unused9; + unsigned short unused10; + unsigned short unused11; +#define NO_FRAME ((unsigned short)~0) +#ifdef EHCI_QUIRK_FIX + struct usb_device *unused12; +#endif /* EHCI_QUIRK_FIX */ +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + struct ehci_qtd *first_qtd; + /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */ + /* NOTE that ehci_qh in ehci.h shall reserve this word */ +} __attribute__ ((aligned(EHCI_QTD_ALIGN))); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/** The corresponding structure in the kernel is used to get the QH */ +struct hcd_dev { /* usb_device.hcpriv points to this */ + struct list_head unused0; + struct list_head unused1; + + /* array of QH pointers */ + void *ep[32]; +}; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc, + int token, int len); +int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data, + int token, int len); +int optimize_submit_async(struct ehci_qtd *qtd, int epn); +void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma); +struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags); +void optimize_ehci_qtd_free(struct ehci_qtd *qtd); +void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf); +#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */ + +void dbus_flowctrl_tx(void *dbi, bool on); +#endif /* __DBUS_H__ */ diff --git a/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h b/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h new file mode 100644 index 000000000000..2fbe8e0a6b50 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/devctrl_if/wlioctl_defs.h @@ -0,0 +1,2114 @@ +/* + * Custom OID/ioctl definitions for + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $ + */ + + +#ifndef wlioctl_defs_h +#define wlioctl_defs_h + + + + + +/* All builds use the new 11ac ratespec/chanspec */ +#undef D11AC_IOTYPES +#define D11AC_IOTYPES + +/* WL_RSPEC defines for rate information */ +#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ +#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ +#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ +#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ +#define WL_RSPEC_TXEXP_MASK 0x00000300 +#define WL_RSPEC_TXEXP_SHIFT 8 +#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */ +#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */ +#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */ +#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */ +#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */ +#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */ +#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */ +#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */ +#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */ + +/* WL_RSPEC_ENCODING field defs */ +#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ + +/* WL_RSPEC_BW field defs */ +#define WL_RSPEC_BW_UNSPECIFIED 0 +#define WL_RSPEC_BW_20MHZ 0x00010000 +#define WL_RSPEC_BW_40MHZ 0x00020000 +#define WL_RSPEC_BW_80MHZ 0x00030000 +#define WL_RSPEC_BW_160MHZ 0x00040000 +#define WL_RSPEC_BW_10MHZ 0x00050000 +#define WL_RSPEC_BW_5MHZ 0x00060000 +#define WL_RSPEC_BW_2P5MHZ 0x00070000 + +/* Legacy defines for the nrate iovar */ +#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ +#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */ +#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */ +#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */ +#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */ +#define OLD_NRATE_SGI 0x00800000 /* sgi mode */ +#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ + +#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */ +#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */ +#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */ +#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */ + +#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ + +#define WLC_11N_N_PROP_MCS 6 +#define WLC_11N_FIRST_PROP_MCS 87 +#define WLC_11N_LAST_PROP_MCS 102 + + +#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */ +#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */ + +#define IBSS_MED 15 /* Mediom in-bss congestion percentage */ +#define IBSS_HI 25 /* Hi in-bss congestion percentage */ +#define OBSS_MED 12 +#define OBSS_HI 25 +#define INTERFER_MED 5 +#define INTERFER_HI 10 + +#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */ +#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */ +#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */ +#define CCA_FLAGS_PREFER_1_6_11 0x10 +#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */ + +#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */ +#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */ +#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */ +#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */ +#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */ + +#define WL_STA_AID(a) ((a) &~ 0xc000) + +/* Flags for sta_info_t indicating properties of STA */ +#define WL_STA_BRCM 0x00000001 /* Running a Broadcom driver */ +#define WL_STA_WME 0x00000002 /* WMM association */ +#define WL_STA_NONERP 0x00000004 /* No ERP */ +#define WL_STA_AUTHE 0x00000008 /* Authenticated */ +#define WL_STA_ASSOC 0x00000010 /* Associated */ +#define WL_STA_AUTHO 0x00000020 /* Authorized */ +#define WL_STA_WDS 0x00000040 /* Wireless Distribution System */ +#define WL_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */ +#define WL_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */ +#define WL_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */ +#define WL_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */ +#define WL_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */ +#define WL_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */ +#define WL_STA_N_CAP 0x00002000 /* STA 802.11n capable */ +#define WL_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define WL_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */ +#define WL_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */ +#define WL_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */ +#define WL_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */ +#define WL_STA_RIFS_CAP 0x00080000 /* rifs enabled */ +#define WL_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ +#define WL_STA_WPS 0x00200000 /* WPS state */ + +#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */ + +/* STA HT cap fields */ +#define WL_STA_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define WL_STA_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define WL_STA_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define WL_STA_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define WL_STA_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define WL_STA_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define WL_STA_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define WL_STA_CAP_GF 0x0010 /* Greenfield preamble support */ +#define WL_STA_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define WL_STA_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define WL_STA_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define WL_STA_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define WL_STA_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define WL_STA_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define WL_STA_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ +#define WL_STA_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define WL_STA_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define WL_STA_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define WL_STA_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define WL_STA_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define WL_STA_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define WL_STA_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define WL_STA_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + +/* scb vht flags */ +#define WL_STA_VHT_LDPCCAP 0x0001 +#define WL_STA_SGI80 0x0002 +#define WL_STA_SGI160 0x0004 +#define WL_STA_VHT_TX_STBCCAP 0x0008 +#define WL_STA_VHT_RX_STBCCAP 0x0010 +#define WL_STA_SU_BEAMFORMER 0x0020 +#define WL_STA_SU_BEAMFORMEE 0x0040 +#define WL_STA_MU_BEAMFORMER 0x0080 +#define WL_STA_MU_BEAMFORMEE 0x0100 +#define WL_STA_VHT_TXOP_PS 0x0200 +#define WL_STA_HTC_VHT_CAP 0x0400 + +/* Values for TX Filter override mode */ +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + +#define WL_IOCTL_ACTION_GET 0x0 +#define WL_IOCTL_ACTION_SET 0x1 +#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e +#define WL_IOCTL_ACTION_OVL_RSV 0x20 +#define WL_IOCTL_ACTION_OVL 0x40 +#define WL_IOCTL_ACTION_MASK 0x7e +#define WL_IOCTL_ACTION_OVL_SHIFT 1 + +/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */ +#define WL_BSSTYPE_INDEP 0 +#define WL_BSSTYPE_INFRA 1 +#define WL_BSSTYPE_ANY 2 /* deprecated */ +#define WL_BSSTYPE_MESH 3 + +/* Bitmask for scan_type */ +#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ +#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */ +#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */ +#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */ +#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */ +#define WL_SCANFLAGS_SWTCHAN 0x20 /* Force channel switch for differerent bandwidth */ +#define WL_SCANFLAGS_FORCE_PARALLEL 0x40 /* Force parallel scan even when actcb_fn_t is on. + * by default parallel scan will be disabled if actcb_fn_t + * is provided. + */ + +/* wl_iscan_results status values */ +#define WL_SCAN_RESULTS_SUCCESS 0 +#define WL_SCAN_RESULTS_PARTIAL 1 +#define WL_SCAN_RESULTS_PENDING 2 +#define WL_SCAN_RESULTS_ABORTED 3 +#define WL_SCAN_RESULTS_NO_MEM 4 + +#define SCANOL_ENABLED (1 << 0) +#define SCANOL_BCAST_SSID (1 << 1) +#define SCANOL_NOTIFY_BCAST_SSID (1 << 2) +#define SCANOL_RESULTS_PER_CYCLE (1 << 3) + +/* scan times in milliseconds */ +#define SCANOL_HOME_TIME 45 /* for home channel processing */ +#define SCANOL_ASSOC_TIME 20 /* dwell on a channel while associated */ +#define SCANOL_UNASSOC_TIME 40 /* dwell on a channel while unassociated */ +#define SCANOL_PASSIVE_TIME 110 /* listen on a channelfor passive scan */ +#define SCANOL_AWAY_LIMIT 100 /* max time to be away from home channel */ +#define SCANOL_IDLE_REST_TIME 40 +#define SCANOL_IDLE_REST_MULTIPLIER 0 +#define SCANOL_ACTIVE_REST_TIME 20 +#define SCANOL_ACTIVE_REST_MULTIPLIER 0 +#define SCANOL_CYCLE_IDLE_REST_TIME 300000 /* Idle Rest Time between Scan Cycle (msec) */ +#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER 0 /* Idle Rest Time Multiplier */ +#define SCANOL_CYCLE_ACTIVE_REST_TIME 200 +#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER 0 +#define SCANOL_MAX_REST_TIME 3600000 /* max rest time between scan cycle (msec) */ +#define SCANOL_CYCLE_DEFAULT 0 /* default for Max Scan Cycle, 0 = forever */ +#define SCANOL_CYCLE_MAX 864000 /* Max Scan Cycle */ + /* 10 sec/scan cycle => 100 days */ +#define SCANOL_NPROBES 2 /* for Active scan; send n probes on each channel */ +#define SCANOL_NPROBES_MAX 5 /* for Active scan; send n probes on each channel */ +#define SCANOL_SCAN_START_DLY 10 /* delay start of offload scan (sec) */ +#define SCANOL_SCAN_START_DLY_MAX 240 /* delay start of offload scan (sec) */ +#define SCANOL_MULTIPLIER_MAX 10 /* Max Multiplier */ +#define SCANOL_UNASSOC_TIME_MAX 100 /* max dwell on a channel while unassociated */ +#define SCANOL_PASSIVE_TIME_MAX 500 /* max listen on a channel for passive scan */ +#define SCANOL_SSID_MAX 16 /* max supported preferred SSID */ + +/* masks for channel and ssid count */ +#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff +#define WL_SCAN_PARAMS_NSSID_SHIFT 16 + +#define WL_SCAN_ACTION_START 1 +#define WL_SCAN_ACTION_CONTINUE 2 +#define WL_SCAN_ACTION_ABORT 3 + + +#define ANTENNA_NUM_1 1 /* total number of antennas to be used */ +#define ANTENNA_NUM_2 2 +#define ANTENNA_NUM_3 3 +#define ANTENNA_NUM_4 4 + +#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */ +#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */ +#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */ +#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */ +#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */ +#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */ + +/* interference source detection and identification mode */ +#define ITFR_MODE_DISABLE 0 /* disable feature */ +#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */ +#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */ + +/* bit definitions for flags in interference source report */ +#define ITFR_INTERFERENCED 1 /* interference detected */ +#define ITFR_HOME_CHANNEL 2 /* home channel has interference */ +#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */ + +#define WL_NUM_RPI_BINS 8 +#define WL_RM_TYPE_BASIC 1 +#define WL_RM_TYPE_CCA 2 +#define WL_RM_TYPE_RPI 3 +#define WL_RM_TYPE_ABORT -1 /* ABORT any in-progress RM request */ + +#define WL_RM_FLAG_PARALLEL (1<<0) + +#define WL_RM_FLAG_LATE (1<<1) +#define WL_RM_FLAG_INCAPABLE (1<<2) +#define WL_RM_FLAG_REFUSED (1<<3) + +/* flags */ +#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */ + +#define WLC_CIS_DEFAULT 0 /* built-in default */ +#define WLC_CIS_SROM 1 /* source is sprom */ +#define WLC_CIS_OTP 2 /* source is otp */ + +/* PCL - Power Control Loop */ +/* current gain setting is replaced by user input */ +#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */ +#define WL_ATTEN_PCL_ON 1 /* turn on PCL */ +/* current gain setting is maintained */ +#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */ + +/* defines used by poweridx iovar - it controls power in a-band */ +/* current gain setting is maintained */ +#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */ +#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */ +#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */ +#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */ +/* value >= 0 causes + * - input to be set to that value + * - PCL to be off + */ + +#define BCM_MAC_STATUS_INDICATION (0x40010200L) + +/* Values for TX Filter override mode */ +#define WLC_TXFILTER_OVERRIDE_DISABLED 0 +#define WLC_TXFILTER_OVERRIDE_ENABLED 1 + +/* magic pattern used for mismatch driver and wl */ +#define WL_TXFIFO_SZ_MAGIC 0xa5a5 + +/* check this magic number */ +#define WLC_IOCTL_MAGIC 0x14e46c77 + +/* bss_info_cap_t flags */ +#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */ +#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */ +#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info received on channel (vs offchannel) */ +#define WL_BSS_FLAGS_HS20 0x08 /* hotspot 2.0 capable */ +#define WL_BSS_FLAGS_RSSI_INVALID 0x10 /* BSS contains invalid RSSI */ +#define WL_BSS_FLAGS_RSSI_INACCURATE 0x20 /* BSS contains inaccurate RSSI */ +#define WL_BSS_FLAGS_SNR_INVALID 0x40 /* BSS contains invalid SNR */ +#define WL_BSS_FLAGS_NF_INVALID 0x80 /* BSS contains invalid noise floor */ + +/* bssinfo flag for nbss_cap */ +#define VHT_BI_SGI_80MHZ 0x00000100 +#define VHT_BI_80MHZ 0x00000200 +#define VHT_BI_160MHZ 0x00000400 +#define VHT_BI_8080MHZ 0x00000800 + +/* reference to wl_ioctl_t struct used by usermode driver */ +#define ioctl_subtype set /* subtype param */ +#define ioctl_pid used /* pid param */ +#define ioctl_status needed /* status param */ + + +/* Enumerate crypto algorithms */ +#define CRYPTO_ALGO_OFF 0 +#define CRYPTO_ALGO_WEP1 1 +#define CRYPTO_ALGO_TKIP 2 +#define CRYPTO_ALGO_WEP128 3 +#define CRYPTO_ALGO_AES_CCM 4 +#define CRYPTO_ALGO_AES_OCB_MSDU 5 +#define CRYPTO_ALGO_AES_OCB_MPDU 6 +#if !defined(BCMEXTCCX) +#define CRYPTO_ALGO_NALG 7 +#else +#define CRYPTO_ALGO_CKIP 7 +#define CRYPTO_ALGO_CKIP_MMH 8 +#define CRYPTO_ALGO_WEP_MMH 9 +#define CRYPTO_ALGO_NALG 10 +#endif + +#define CRYPTO_ALGO_SMS4 11 +#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */ +#define CRYPTO_ALGO_BIP 13 /* 802.11w BIP (aes cmac) */ + +#define CRYPTO_ALGO_AES_GCM 14 /* 128 bit GCM */ +#define CRYPTO_ALGO_AES_CCM256 15 /* 256 bit CCM */ +#define CRYPTO_ALGO_AES_GCM256 16 /* 256 bit GCM */ +#define CRYPTO_ALGO_BIP_CMAC256 17 /* 256 bit BIP CMAC */ +#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */ +#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */ + +#define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF + +#define WSEC_GEN_MIC_ERROR 0x0001 +#define WSEC_GEN_REPLAY 0x0002 +#define WSEC_GEN_ICV_ERROR 0x0004 +#define WSEC_GEN_MFP_ACT_ERROR 0x0008 +#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010 +#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020 + +#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */ +#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */ +#if defined(BCMEXTCCX) +#define WL_CKIP_KP (1 << 4) /* CMIC */ +#define WL_CKIP_MMH (1 << 5) /* CKIP */ +#else +#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */ +#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */ +#endif +#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */ + +/* wireless security bitvec */ +#define WEP_ENABLED 0x0001 +#define TKIP_ENABLED 0x0002 +#define AES_ENABLED 0x0004 +#define WSEC_SWFLAG 0x0008 +#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */ + +/* wsec macros for operating on the above definitions */ +#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED) +#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED) +#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED) + +#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) +#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED) + +/* Following macros are not used any more. Just kept here to + * avoid build issue in BISON/CARIBOU branch + */ +#define MFP_CAPABLE 0x0200 +#define MFP_REQUIRED 0x0400 +#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */ + +/* WPA authentication mode bitvec */ +#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */ +#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */ +#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */ +#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */ +#if defined(BCMEXTCCX) +#define WPA_AUTH_CCKM 0x0008 /* CCKM */ +#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */ +#endif +/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */ +#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */ +#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */ +#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */ +#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */ +#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */ +#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */ +#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ +/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */ +#define WPA2_AUTH_SHA256 0x8000 +#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ + +/* pmkid */ +#define MAXPMKID 16 + +/* SROM12 changes */ +#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ + + +#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ +#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */ +#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF) +#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */ +#else +#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */ +#endif +#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192 + +/* common ioctl definitions */ +#define WLC_GET_MAGIC 0 +#define WLC_GET_VERSION 1 +#define WLC_UP 2 +#define WLC_DOWN 3 +#define WLC_GET_LOOP 4 +#define WLC_SET_LOOP 5 +#define WLC_DUMP 6 +#define WLC_GET_MSGLEVEL 7 +#define WLC_SET_MSGLEVEL 8 +#define WLC_GET_PROMISC 9 +#define WLC_SET_PROMISC 10 +/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */ +#define WLC_GET_RATE 12 +#define WLC_GET_MAX_RATE 13 +#define WLC_GET_INSTANCE 14 +/* #define WLC_GET_FRAG 15 */ /* no longer supported */ +/* #define WLC_SET_FRAG 16 */ /* no longer supported */ +/* #define WLC_GET_RTS 17 */ /* no longer supported */ +/* #define WLC_SET_RTS 18 */ /* no longer supported */ +#define WLC_GET_INFRA 19 +#define WLC_SET_INFRA 20 +#define WLC_GET_AUTH 21 +#define WLC_SET_AUTH 22 +#define WLC_GET_BSSID 23 +#define WLC_SET_BSSID 24 +#define WLC_GET_SSID 25 +#define WLC_SET_SSID 26 +#define WLC_RESTART 27 +#define WLC_TERMINATED 28 +/* #define WLC_DUMP_SCB 28 */ /* no longer supported */ +#define WLC_GET_CHANNEL 29 +#define WLC_SET_CHANNEL 30 +#define WLC_GET_SRL 31 +#define WLC_SET_SRL 32 +#define WLC_GET_LRL 33 +#define WLC_SET_LRL 34 +#define WLC_GET_PLCPHDR 35 +#define WLC_SET_PLCPHDR 36 +#define WLC_GET_RADIO 37 +#define WLC_SET_RADIO 38 +#define WLC_GET_PHYTYPE 39 +#define WLC_DUMP_RATE 40 +#define WLC_SET_RATE_PARAMS 41 +#define WLC_GET_FIXRATE 42 +#define WLC_SET_FIXRATE 43 +/* #define WLC_GET_WEP 42 */ /* no longer supported */ +/* #define WLC_SET_WEP 43 */ /* no longer supported */ +#define WLC_GET_KEY 44 +#define WLC_SET_KEY 45 +#define WLC_GET_REGULATORY 46 +#define WLC_SET_REGULATORY 47 +#define WLC_GET_PASSIVE_SCAN 48 +#define WLC_SET_PASSIVE_SCAN 49 +#define WLC_SCAN 50 +#define WLC_SCAN_RESULTS 51 +#define WLC_DISASSOC 52 +#define WLC_REASSOC 53 +#define WLC_GET_ROAM_TRIGGER 54 +#define WLC_SET_ROAM_TRIGGER 55 +#define WLC_GET_ROAM_DELTA 56 +#define WLC_SET_ROAM_DELTA 57 +#define WLC_GET_ROAM_SCAN_PERIOD 58 +#define WLC_SET_ROAM_SCAN_PERIOD 59 +#define WLC_EVM 60 /* diag */ +#define WLC_GET_TXANT 61 +#define WLC_SET_TXANT 62 +#define WLC_GET_ANTDIV 63 +#define WLC_SET_ANTDIV 64 +/* #define WLC_GET_TXPWR 65 */ /* no longer supported */ +/* #define WLC_SET_TXPWR 66 */ /* no longer supported */ +#define WLC_GET_CLOSED 67 +#define WLC_SET_CLOSED 68 +#define WLC_GET_MACLIST 69 +#define WLC_SET_MACLIST 70 +#define WLC_GET_RATESET 71 +#define WLC_SET_RATESET 72 +/* #define WLC_GET_LOCALE 73 */ /* no longer supported */ +#define WLC_LONGTRAIN 74 +#define WLC_GET_BCNPRD 75 +#define WLC_SET_BCNPRD 76 +#define WLC_GET_DTIMPRD 77 +#define WLC_SET_DTIMPRD 78 +#define WLC_GET_SROM 79 +#define WLC_SET_SROM 80 +#define WLC_GET_WEP_RESTRICT 81 +#define WLC_SET_WEP_RESTRICT 82 +#define WLC_GET_COUNTRY 83 +#define WLC_SET_COUNTRY 84 +#define WLC_GET_PM 85 +#define WLC_SET_PM 86 +#define WLC_GET_WAKE 87 +#define WLC_SET_WAKE 88 +/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */ +#define WLC_GET_FORCELINK 90 /* ndis only */ +#define WLC_SET_FORCELINK 91 /* ndis only */ +#define WLC_FREQ_ACCURACY 92 /* diag */ +#define WLC_CARRIER_SUPPRESS 93 /* diag */ +#define WLC_GET_PHYREG 94 +#define WLC_SET_PHYREG 95 +#define WLC_GET_RADIOREG 96 +#define WLC_SET_RADIOREG 97 +#define WLC_GET_REVINFO 98 +#define WLC_GET_UCANTDIV 99 +#define WLC_SET_UCANTDIV 100 +#define WLC_R_REG 101 +#define WLC_W_REG 102 +/* #define WLC_DIAG_LOOPBACK 103 old tray diag */ +/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */ +#define WLC_GET_MACMODE 105 +#define WLC_SET_MACMODE 106 +#define WLC_GET_MONITOR 107 +#define WLC_SET_MONITOR 108 +#define WLC_GET_GMODE 109 +#define WLC_SET_GMODE 110 +#define WLC_GET_LEGACY_ERP 111 +#define WLC_SET_LEGACY_ERP 112 +#define WLC_GET_RX_ANT 113 +#define WLC_GET_CURR_RATESET 114 /* current rateset */ +#define WLC_GET_SCANSUPPRESS 115 +#define WLC_SET_SCANSUPPRESS 116 +#define WLC_GET_AP 117 +#define WLC_SET_AP 118 +#define WLC_GET_EAP_RESTRICT 119 +#define WLC_SET_EAP_RESTRICT 120 +#define WLC_SCB_AUTHORIZE 121 +#define WLC_SCB_DEAUTHORIZE 122 +#define WLC_GET_WDSLIST 123 +#define WLC_SET_WDSLIST 124 +#define WLC_GET_ATIM 125 +#define WLC_SET_ATIM 126 +#define WLC_GET_RSSI 127 +#define WLC_GET_PHYANTDIV 128 +#define WLC_SET_PHYANTDIV 129 +#define WLC_AP_RX_ONLY 130 +#define WLC_GET_TX_PATH_PWR 131 +#define WLC_SET_TX_PATH_PWR 132 +#define WLC_GET_WSEC 133 +#define WLC_SET_WSEC 134 +#define WLC_GET_PHY_NOISE 135 +#define WLC_GET_BSS_INFO 136 +#define WLC_GET_PKTCNTS 137 +#define WLC_GET_LAZYWDS 138 +#define WLC_SET_LAZYWDS 139 +#define WLC_GET_BANDLIST 140 + +#define WLC_GET_BAND 141 +#define WLC_SET_BAND 142 +#define WLC_SCB_DEAUTHENTICATE 143 +#define WLC_GET_SHORTSLOT 144 +#define WLC_GET_SHORTSLOT_OVERRIDE 145 +#define WLC_SET_SHORTSLOT_OVERRIDE 146 +#define WLC_GET_SHORTSLOT_RESTRICT 147 +#define WLC_SET_SHORTSLOT_RESTRICT 148 +#define WLC_GET_GMODE_PROTECTION 149 +#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150 +#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151 +#define WLC_UPGRADE 152 +/* #define WLC_GET_MRATE 153 */ /* no longer supported */ +/* #define WLC_SET_MRATE 154 */ /* no longer supported */ +#define WLC_GET_IGNORE_BCNS 155 +#define WLC_SET_IGNORE_BCNS 156 +#define WLC_GET_SCB_TIMEOUT 157 +#define WLC_SET_SCB_TIMEOUT 158 +#define WLC_GET_ASSOCLIST 159 +#define WLC_GET_CLK 160 +#define WLC_SET_CLK 161 +#define WLC_GET_UP 162 +#define WLC_OUT 163 +#define WLC_GET_WPA_AUTH 164 +#define WLC_SET_WPA_AUTH 165 +#define WLC_GET_UCFLAGS 166 +#define WLC_SET_UCFLAGS 167 +#define WLC_GET_PWRIDX 168 +#define WLC_SET_PWRIDX 169 +#define WLC_GET_TSSI 170 +#define WLC_GET_SUP_RATESET_OVERRIDE 171 +#define WLC_SET_SUP_RATESET_OVERRIDE 172 +/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */ +/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */ +/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */ +/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */ +/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */ +#define WLC_GET_PROTECTION_CONTROL 178 +#define WLC_SET_PROTECTION_CONTROL 179 +#define WLC_GET_PHYLIST 180 +#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */ +#define WLC_DECRYPT_STATUS 182 /* ndis only */ +#define WLC_GET_KEY_SEQ 183 +#define WLC_GET_SCAN_CHANNEL_TIME 184 +#define WLC_SET_SCAN_CHANNEL_TIME 185 +#define WLC_GET_SCAN_UNASSOC_TIME 186 +#define WLC_SET_SCAN_UNASSOC_TIME 187 +#define WLC_GET_SCAN_HOME_TIME 188 +#define WLC_SET_SCAN_HOME_TIME 189 +#define WLC_GET_SCAN_NPROBES 190 +#define WLC_SET_SCAN_NPROBES 191 +#define WLC_GET_PRB_RESP_TIMEOUT 192 +#define WLC_SET_PRB_RESP_TIMEOUT 193 +#define WLC_GET_ATTEN 194 +#define WLC_SET_ATTEN 195 +#define WLC_GET_SHMEM 196 /* diag */ +#define WLC_SET_SHMEM 197 /* diag */ +/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */ +/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */ +#define WLC_SET_WSEC_TEST 200 +#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define WLC_TKIP_COUNTERMEASURES 202 +#define WLC_GET_PIOMODE 203 +#define WLC_SET_PIOMODE 204 +#define WLC_SET_ASSOC_PREFER 205 +#define WLC_GET_ASSOC_PREFER 206 +#define WLC_SET_ROAM_PREFER 207 +#define WLC_GET_ROAM_PREFER 208 +#define WLC_SET_LED 209 +#define WLC_GET_LED 210 +#define WLC_GET_INTERFERENCE_MODE 211 +#define WLC_SET_INTERFERENCE_MODE 212 +#define WLC_GET_CHANNEL_QA 213 +#define WLC_START_CHANNEL_QA 214 +#define WLC_GET_CHANNEL_SEL 215 +#define WLC_START_CHANNEL_SEL 216 +#define WLC_GET_VALID_CHANNELS 217 +#define WLC_GET_FAKEFRAG 218 +#define WLC_SET_FAKEFRAG 219 +#define WLC_GET_PWROUT_PERCENTAGE 220 +#define WLC_SET_PWROUT_PERCENTAGE 221 +#define WLC_SET_BAD_FRAME_PREEMPT 222 +#define WLC_GET_BAD_FRAME_PREEMPT 223 +#define WLC_SET_LEAP_LIST 224 +#define WLC_GET_LEAP_LIST 225 +#define WLC_GET_CWMIN 226 +#define WLC_SET_CWMIN 227 +#define WLC_GET_CWMAX 228 +#define WLC_SET_CWMAX 229 +#define WLC_GET_WET 230 +#define WLC_SET_WET 231 +#define WLC_GET_PUB 232 +/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */ +/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */ +#define WLC_GET_KEY_PRIMARY 235 +#define WLC_SET_KEY_PRIMARY 236 + + +/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */ +#define WLC_GET_ACI_ARGS 238 +#define WLC_SET_ACI_ARGS 239 +#define WLC_UNSET_CALLBACK 240 +#define WLC_SET_CALLBACK 241 +#define WLC_GET_RADAR 242 +#define WLC_SET_RADAR 243 +#define WLC_SET_SPECT_MANAGMENT 244 +#define WLC_GET_SPECT_MANAGMENT 245 +#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */ +#define WLC_WDS_GET_WPA_SUP 247 +#define WLC_SET_CS_SCAN_TIMER 248 +#define WLC_GET_CS_SCAN_TIMER 249 +#define WLC_MEASURE_REQUEST 250 +#define WLC_INIT 251 +#define WLC_SEND_QUIET 252 +#define WLC_KEEPALIVE 253 +#define WLC_SEND_PWR_CONSTRAINT 254 +#define WLC_UPGRADE_STATUS 255 +#define WLC_CURRENT_PWR 256 +#define WLC_GET_SCAN_PASSIVE_TIME 257 +#define WLC_SET_SCAN_PASSIVE_TIME 258 +#define WLC_LEGACY_LINK_BEHAVIOR 259 +#define WLC_GET_CHANNELS_IN_COUNTRY 260 +#define WLC_GET_COUNTRY_LIST 261 +#define WLC_GET_VAR 262 /* get value of named variable */ +#define WLC_SET_VAR 263 /* set named variable to value */ +#define WLC_NVRAM_GET 264 /* deprecated */ +#define WLC_NVRAM_SET 265 +#define WLC_NVRAM_DUMP 266 +#define WLC_REBOOT 267 +#define WLC_SET_WSEC_PMK 268 +#define WLC_GET_AUTH_MODE 269 +#define WLC_SET_AUTH_MODE 270 +#define WLC_GET_WAKEENTRY 271 +#define WLC_SET_WAKEENTRY 272 +#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */ +#define WLC_NVOTPW 274 +#define WLC_OTPW 275 +#define WLC_IOV_BLOCK_GET 276 +#define WLC_IOV_MODULES_GET 277 +#define WLC_SOFT_RESET 278 +#define WLC_GET_ALLOW_MODE 279 +#define WLC_SET_ALLOW_MODE 280 +#define WLC_GET_DESIRED_BSSID 281 +#define WLC_SET_DESIRED_BSSID 282 +#define WLC_DISASSOC_MYAP 283 +#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */ +#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */ +#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */ +#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */ +#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */ +#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */ +#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */ +#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */ +#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */ +#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */ +#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */ +#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */ +#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */ +#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */ +#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */ +#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */ +#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */ +#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */ +#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */ +#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */ +#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */ +#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */ +/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */ +#define WLC_GET_CMD 309 +/* #define WLC_LAST 310 */ /* Never used - can be reused */ +#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */ +#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */ +/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */ +/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */ +/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */ +#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */ +#define WLC_GET_NAT_STATE 317 +#define WLC_GET_TXBF_RATESET 318 +#define WLC_SET_TXBF_RATESET 319 +#define WLC_SCAN_CQ 320 +#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */ +#define WLC_DUMP_RATESET 322 +#define WLC_ECHO 323 +#define WLC_LAST 324 +#ifndef EPICTRL_COOKIE +#define EPICTRL_COOKIE 0xABADCEDE +#endif + +/* vx wlc ioctl's offset */ +#define CMN_IOCTL_OFF 0x180 + +/* + * custom OID support + * + * 0xFF - implementation specific OID + * 0xE4 - first byte of Broadcom PCI vendor ID + * 0x14 - second byte of Broadcom PCI vendor ID + * 0xXX - the custom OID number + */ + +/* begin 0x1f values beyond the start of the ET driver range. */ +#define WL_OID_BASE 0xFFE41420 + +/* NDIS overrides */ +#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE) +#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK) +#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK) +#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH) +#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS) +#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR) +#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM) + +/* EXT_STA Dongle suuport */ +#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC) +#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS) +#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY) +#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY) +#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME) +#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID) +#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE) +#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING) +#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING) +#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON) +#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON) +#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE) +#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC) +#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS) +#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS) + +/* NAT filter driver support */ +#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG) +#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE) + +#define WL_DECRYPT_STATUS_SUCCESS 1 +#define WL_DECRYPT_STATUS_FAILURE 2 +#define WL_DECRYPT_STATUS_UNKNOWN 3 + +/* allows user-mode app to poll the status of USB image upgrade */ +#define WLC_UPGRADE_SUCCESS 0 +#define WLC_UPGRADE_PENDING 1 + +/* WLC_GET_AUTH, WLC_SET_AUTH values */ +#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */ +#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ +#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */ + +/* a large TX Power as an init value to factor out of MIN() calculations, + * keep low enough to fit in an int8, units are .25 dBm + */ +#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */ + +/* "diag" iovar argument and error code */ +#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */ +#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */ +#define WL_DIAG_MEMORY 3 /* d11 memory test */ +#define WL_DIAG_LED 4 /* LED test */ +#define WL_DIAG_REG 5 /* d11/phy register test */ +#define WL_DIAG_SROM 6 /* srom read/crc test */ +#define WL_DIAG_DMA 7 /* DMA test */ +#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */ + +#define WL_DIAGERR_SUCCESS 0 +#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */ +#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */ +#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */ +#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */ +#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */ +#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */ +#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */ +#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */ +#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */ +#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */ + +#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */ +#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */ + +/* band types */ +#define WLC_BAND_AUTO 0 /* auto-select */ +#define WLC_BAND_5G 1 /* 5 Ghz */ +#define WLC_BAND_2G 2 /* 2.4 Ghz */ +#define WLC_BAND_ALL 3 /* all bands */ + +/* band range returned by band_range iovar */ +#define WL_CHAN_FREQ_RANGE_2G 0 +#define WL_CHAN_FREQ_RANGE_5GL 1 +#define WL_CHAN_FREQ_RANGE_5GM 2 +#define WL_CHAN_FREQ_RANGE_5GH 3 + +#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4 +#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5 +#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6 +#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7 +#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8 + +#define WL_CHAN_FREQ_RANGE_5G_BAND0 1 +#define WL_CHAN_FREQ_RANGE_5G_BAND1 2 +#define WL_CHAN_FREQ_RANGE_5G_BAND2 3 +#define WL_CHAN_FREQ_RANGE_5G_BAND3 4 +#define WL_CHAN_FREQ_RANGE_5G_4BAND 5 + + +/* SROM12 */ +#define WL_CHAN_FREQ_RANGE_5G_BAND4 5 +#define WL_CHAN_FREQ_RANGE_2G_40 6 +#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7 +#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8 +#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9 +#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10 +#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11 +#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12 +#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13 +#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14 +#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15 +#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16 + +#define WL_CHAN_FREQ_RANGE_5G_5BAND 18 +#define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19 +#define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20 + +#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */ +#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */ +#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */ + +/* + * 54g modes (basic bits may still be overridden) + * + * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11 + * Preamble: Long + * Shortslot: Off + * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 + * Extended Rateset: 6, 9, 12, 48 + * Preamble: Long + * Shortslot: Auto + * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54 + * Extended Rateset: 6b, 9, 12b, 48 + * Preamble: Short required + * Shortslot: Auto + * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 + * Extended Rateset: 6, 9, 12, 48 + * Preamble: Long + * Shortslot: On + * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54 + * Preamble: Short required + * Shortslot: On and required + * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b + * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54 + * Preamble: Long + * Shortslot: Auto + */ +#define GMODE_LEGACY_B 0 +#define GMODE_AUTO 1 +#define GMODE_ONLY 2 +#define GMODE_B_DEFERRED 3 +#define GMODE_PERFORMANCE 4 +#define GMODE_LRS 5 +#define GMODE_MAX 6 + +/* values for PLCPHdr_override */ +#define WLC_PLCP_AUTO -1 +#define WLC_PLCP_SHORT 0 +#define WLC_PLCP_LONG 1 + +/* values for g_protection_override and n_protection_override */ +#define WLC_PROTECTION_AUTO -1 +#define WLC_PROTECTION_OFF 0 +#define WLC_PROTECTION_ON 1 +#define WLC_PROTECTION_MMHDR_ONLY 2 +#define WLC_PROTECTION_CTS_ONLY 3 + +/* values for g_protection_control and n_protection_control */ +#define WLC_PROTECTION_CTL_OFF 0 +#define WLC_PROTECTION_CTL_LOCAL 1 +#define WLC_PROTECTION_CTL_OVERLAP 2 + +/* values for n_protection */ +#define WLC_N_PROTECTION_OFF 0 +#define WLC_N_PROTECTION_OPTIONAL 1 +#define WLC_N_PROTECTION_20IN40 2 +#define WLC_N_PROTECTION_MIXEDMODE 3 + +/* values for n_preamble_type */ +#define WLC_N_PREAMBLE_MIXEDMODE 0 +#define WLC_N_PREAMBLE_GF 1 +#define WLC_N_PREAMBLE_GF_BRCM 2 + +/* values for band specific 40MHz capabilities (deprecated) */ +#define WLC_N_BW_20ALL 0 +#define WLC_N_BW_40ALL 1 +#define WLC_N_BW_20IN2G_40IN5G 2 + +#define WLC_BW_20MHZ_BIT (1<<0) +#define WLC_BW_40MHZ_BIT (1<<1) +#define WLC_BW_80MHZ_BIT (1<<2) +#define WLC_BW_160MHZ_BIT (1<<3) +#define WLC_BW_10MHZ_BIT (1<<4) +#define WLC_BW_5MHZ_BIT (1<<5) +#define WLC_BW_2P5MHZ_BIT (1<<6) + +/* Bandwidth capabilities */ +#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \ + WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_2P5MHZ (WLC_BW_2P5MHZ_BIT) +#define WLC_BW_CAP_5MHZ (WLC_BW_5MHZ_BIT) +#define WLC_BW_CAP_10MHZ (WLC_BW_10MHZ_BIT) +#define WLC_BW_CAP_UNRESTRICTED 0xFF + +#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_5MHZ(bw_cap) (((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE) +#define WL_BW_CAP_10MHZ(bw_cap) (((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE) + +/* values to force tx/rx chain */ +#define WLC_N_TXRX_CHAIN0 0 +#define WLC_N_TXRX_CHAIN1 1 + +/* bitflags for SGI support (sgi_rx iovar) */ +#define WLC_N_SGI_20 0x01 +#define WLC_N_SGI_40 0x02 +#define WLC_VHT_SGI_80 0x04 + +/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */ +#define WLC_SGI_ALL 0x02 + +#define LISTEN_INTERVAL 10 +/* interference mitigation options */ +#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */ +#define INTERFERE_NONE 0 /* off */ +#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */ +#define WLAN_MANUAL 2 /* ACI: no auto detection */ +#define WLAN_AUTO 3 /* ACI: auto detect */ +#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */ +#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */ + +/* interfernece mode bit-masks (ACPHY) */ +#define ACPHY_ACI_GLITCHBASED_DESENSE 1 /* bit 0 */ +#define ACPHY_ACI_HWACI_PKTGAINLMT 2 /* bit 1 */ +#define ACPHY_ACI_W2NB_PKTGAINLMT 4 /* bit 2 */ +#define ACPHY_ACI_PREEMPTION 8 /* bit 3 */ +#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */ +#define ACPHY_ACI_MAX_MODE 31 + +/* AP environment */ +#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */ +#define AP_ENV_DENSE 1 /* "Corporate" or other AP dense environment */ +#define AP_ENV_SPARSE 2 /* "Home" or other sparse environment */ +#define AP_ENV_INDETERMINATE 3 /* AP environment hasn't been identified */ + +#define TRIGGER_NOW 0 +#define TRIGGER_CRS 0x01 +#define TRIGGER_CRSDEASSERT 0x02 +#define TRIGGER_GOODFCS 0x04 +#define TRIGGER_BADFCS 0x08 +#define TRIGGER_BADPLCP 0x10 +#define TRIGGER_CRSGLITCH 0x20 + +#define WL_SAMPLEDATA_HEADER_TYPE 1 +#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */ +#define WL_SAMPLEDATA_TYPE 2 +#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */ +#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */ + +/* WL_OTA START */ +#define WL_OTA_ARG_PARSE_BLK_SIZE 1200 +#define WL_OTA_TEST_MAX_NUM_RATE 30 +#define WL_OTA_TEST_MAX_NUM_SEQ 100 +#define WL_OTA_TEST_MAX_NUM_RSSI 85 + +#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */ + +/* radar iovar SET defines */ +#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */ +#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */ +#define WL_RADAR_SIMULATED 2 /* force radar detector to declare + * detection once + */ +#define WL_RADAR_SIMULATED_SC 3 /* force radar detector to declare + * detection once on scan core + * if available and active + */ +#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */ +#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */ +#define WL_ANT_HT_RX_MAX 4 /* max 4 receive antennas/cores */ +#define WL_ANT_IDX_1 0 /* antenna index 1 */ +#define WL_ANT_IDX_2 1 /* antenna index 2 */ + +#ifndef WL_RSSI_ANT_MAX +#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */ +#elif WL_RSSI_ANT_MAX != 4 +#error "WL_RSSI_ANT_MAX does not match" +#endif + +/* dfs_status iovar-related defines */ + +/* cac - channel availability check, + * ism - in-service monitoring + * csa - channel switching announcement + */ + +/* cac state values */ +#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */ +#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */ +#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */ +#define WL_DFS_CACSTATE_CSA 3 /* csa */ +#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */ +#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */ +#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */ +#define WL_DFS_CACSTATES 7 /* this many states exist */ + +/* Defines used with channel_bandwidth for curpower */ +#define WL_BW_20MHZ 0 +#define WL_BW_40MHZ 1 +#define WL_BW_80MHZ 2 +#define WL_BW_160MHZ 3 +#define WL_BW_8080MHZ 4 +#define WL_BW_2P5MHZ 5 +#define WL_BW_5MHZ 6 +#define WL_BW_10MHZ 7 + +/* tx_power_t.flags bits */ +#define WL_TX_POWER_F_ENABLED 1 +#define WL_TX_POWER_F_HW 2 +#define WL_TX_POWER_F_MIMO 4 +#define WL_TX_POWER_F_SISO 8 +#define WL_TX_POWER_F_HT 0x10 +#define WL_TX_POWER_F_VHT 0x20 +#define WL_TX_POWER_F_OPENLOOP 0x40 +#define WL_TX_POWER_F_PROP11NRATES 0x80 + +/* Message levels */ +#define WL_ERROR_VAL 0x00000001 +#define WL_TRACE_VAL 0x00000002 +#define WL_PRHDRS_VAL 0x00000004 +#define WL_PRPKT_VAL 0x00000008 +#define WL_INFORM_VAL 0x00000010 +#define WL_TMP_VAL 0x00000020 +#define WL_OID_VAL 0x00000040 +#define WL_RATE_VAL 0x00000080 +#define WL_ASSOC_VAL 0x00000100 +#define WL_PRUSR_VAL 0x00000200 +#define WL_PS_VAL 0x00000400 +#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */ +#define WL_PORT_VAL 0x00001000 +#define WL_DUAL_VAL 0x00002000 +#define WL_WSEC_VAL 0x00004000 +#define WL_WSEC_DUMP_VAL 0x00008000 +#define WL_LOG_VAL 0x00010000 +#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */ +#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */ +#define WL_REGULATORY_VAL 0x00080000 +#define WL_TAF_VAL 0x00100000 +#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */ +#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */ +#define WL_MPC_VAL 0x00400000 +#define WL_APSTA_VAL 0x00800000 +#define WL_DFS_VAL 0x01000000 +#define WL_BA_VAL 0x00000000 /* retired in TOT on 6/14/2010 */ +#define WL_MUMIMO_VAL 0x02000000 /* Using retired WL_BA_VAL */ +#define WL_ACI_VAL 0x04000000 +#define WL_PRMAC_VAL 0x04000000 +#define WL_MBSS_VAL 0x04000000 +#define WL_CAC_VAL 0x08000000 +#define WL_AMSDU_VAL 0x10000000 +#define WL_AMPDU_VAL 0x20000000 +#define WL_FFPLD_VAL 0x40000000 + +/* wl_msg_level is full. For new bits take the next one and AND with + * wl_msg_level2 in wl_dbg.h + */ +#define WL_DPT_VAL 0x00000001 +/* re-using WL_DPT_VAL */ +#define WL_MESH_VAL 0x00000001 +#define WL_SCAN_VAL 0x00000002 +#define WL_WOWL_VAL 0x00000004 +#define WL_COEX_VAL 0x00000008 +#define WL_RTDC_VAL 0x00000010 +#define WL_PROTO_VAL 0x00000020 +#define WL_BTA_VAL 0x00000040 +#define WL_CHANINT_VAL 0x00000080 +#define WL_WMF_VAL 0x00000100 +#define WL_P2P_VAL 0x00000200 +#define WL_ITFR_VAL 0x00000400 +#define WL_MCHAN_VAL 0x00000800 +#define WL_TDLS_VAL 0x00001000 +#define WL_MCNX_VAL 0x00002000 +#define WL_PROT_VAL 0x00004000 +#define WL_PSTA_VAL 0x00008000 +#define WL_TSO_VAL 0x00010000 +#define WL_TRF_MGMT_VAL 0x00020000 +#define WL_LPC_VAL 0x00040000 +#define WL_L2FILTER_VAL 0x00080000 +#define WL_TXBF_VAL 0x00100000 +#define WL_P2PO_VAL 0x00200000 +#define WL_TBTT_VAL 0x00400000 +#define WL_FBT_VAL 0x00800000 +#define WL_MQ_VAL 0x01000000 + +/* This level is currently used in Phoenix2 only */ +#define WL_SRSCAN_VAL 0x02000000 + +#define WL_WNM_VAL 0x04000000 +#define WL_PWRSEL_VAL 0x10000000 +#define WL_NET_DETECT_VAL 0x20000000 +#define WL_PCIE_VAL 0x40000000 +#define WL_PMDUR_VAL 0x80000000 + + +/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier + * rather than a message-type of its own + */ +#define WL_TIMESTAMP_VAL 0x80000000 + +/* max # of leds supported by GPIO (gpio pin# == led index#) */ +#define WL_LED_NUMGPIO 32 /* gpio 0-31 */ + +/* led per-pin behaviors */ +#define WL_LED_OFF 0 /* always off */ +#define WL_LED_ON 1 /* always on */ +#define WL_LED_ACTIVITY 2 /* activity */ +#define WL_LED_RADIO 3 /* radio enabled */ +#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */ +#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */ +#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */ +#define WL_LED_WI1 7 +#define WL_LED_WI2 8 +#define WL_LED_WI3 9 +#define WL_LED_ASSOC 10 /* associated state indicator */ +#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */ +#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */ +#define WL_LED_WI4 13 +#define WL_LED_WI5 14 +#define WL_LED_BLINKSLOW 15 /* blink slow */ +#define WL_LED_BLINKMED 16 /* blink med */ +#define WL_LED_BLINKFAST 17 /* blink fast */ +#define WL_LED_BLINKCUSTOM 18 /* blink custom */ +#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */ +#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */ + /* keep on for 300 sec */ +#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */ +#define WL_LED_WI6 22 +#define WL_LED_WI7 23 +#define WL_LED_WI8 24 +#define WL_LED_NUMBEHAVIOR 25 + +/* led behavior numeric value format */ +#define WL_LED_BEH_MASK 0x3f /* behavior mask */ +#define WL_LED_PMU_OVERRIDE 0x40 /* need to set PMU Override bit for the GPIO */ +#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */ + +/* number of bytes needed to define a proper bit mask for MAC event reporting */ +#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define BCMIO_NBBY 8 +#define WL_EVENTING_MASK_LEN 16 + + +/* join preference types */ +#define WL_JOIN_PREF_RSSI 1 /* by RSSI */ +#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */ +#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */ +#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */ +#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */ + +/* band preference */ +#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */ + +/* any multicast cipher suite */ +#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00" + +/* 802.11h measurement types */ +#define WLC_MEASURE_TPC 1 +#define WLC_MEASURE_CHANNEL_BASIC 2 +#define WLC_MEASURE_CHANNEL_CCA 3 +#define WLC_MEASURE_CHANNEL_RPI 4 + +/* regulatory enforcement levels */ +#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */ +#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */ +#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */ +#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */ +/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE + * adoption is done regardless of capability spectrum_management + */ +#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */ + +#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */ +#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */ +#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */ +#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */ +#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */ +#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */ +#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */ + +/* BTC mode used by "btc_mode" iovar */ +#define WL_BTC_DISABLE 0 /* disable BT coexistence */ +#define WL_BTC_FULLTDM 1 /* full TDM COEX */ +#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */ +#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */ +#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */ +#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */ +#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */ +#define WL_BTC_DEFAULT 8 /* set the default mode for the device */ +#define WL_INF_BTC_DISABLE 0 +#define WL_INF_BTC_ENABLE 1 +#define WL_INF_BTC_AUTO 3 + +/* BTC wire used by "btc_wire" iovar */ +#define WL_BTC_DEFWIRE 0 /* use default wire setting */ +#define WL_BTC_2WIRE 2 /* use 2-wire BTC */ +#define WL_BTC_3WIRE 3 /* use 3-wire BTC */ +#define WL_BTC_4WIRE 4 /* use 4-wire BTC */ + +/* BTC flags: BTC configuration that can be set by host */ +#define WL_BTC_FLAG_PREMPT (1 << 0) +#define WL_BTC_FLAG_BT_DEF (1 << 1) +#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2) +#define WL_BTC_FLAG_SIM_RSP (1 << 3) +#define WL_BTC_FLAG_PS_PROTECT (1 << 4) +#define WL_BTC_FLAG_SIM_TX_LP (1 << 5) +#define WL_BTC_FLAG_ECI (1 << 6) +#define WL_BTC_FLAG_LIGHT (1 << 7) +#define WL_BTC_FLAG_PARALLEL (1 << 8) + +/* maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 + +/* max number of chanspecs (used by the iovar to calc. buf space) */ +#ifdef WL11AC_80P80 +#define WL_NUMCHANSPECS 206 +#else +#define WL_NUMCHANSPECS 110 +#endif + +/* WDS link local endpoint WPA role */ +#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */ +#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */ +#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */ + +/* Base offset values */ +#define WL_PKT_FILTER_BASE_PKT 0 +#define WL_PKT_FILTER_BASE_END 1 +#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */ +#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */ +#define WL_PKT_FILTER_BASE_ETH_H 4 +#define WL_PKT_FILTER_BASE_ETH_D 5 +#define WL_PKT_FILTER_BASE_ARP_H 6 +#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */ +#define WL_PKT_FILTER_BASE_IP4_H 8 +#define WL_PKT_FILTER_BASE_IP4_D 9 +#define WL_PKT_FILTER_BASE_IP6_H 10 +#define WL_PKT_FILTER_BASE_IP6_D 11 +#define WL_PKT_FILTER_BASE_TCP_H 12 +#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */ +#define WL_PKT_FILTER_BASE_UDP_H 14 +#define WL_PKT_FILTER_BASE_UDP_D 15 +#define WL_PKT_FILTER_BASE_IP6_P 16 +#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */ + +/* String mapping for bases that may be used by applications or debug */ +#define WL_PKT_FILTER_BASE_NAMES \ + { "START", WL_PKT_FILTER_BASE_PKT }, \ + { "END", WL_PKT_FILTER_BASE_END }, \ + { "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \ + { "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \ + { "D11_H", WL_PKT_FILTER_BASE_D11_H }, \ + { "D11_D", WL_PKT_FILTER_BASE_D11_D }, \ + { "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \ + { "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \ + { "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \ + { "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \ + { "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \ + { "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \ + { "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \ + { "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \ + { "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \ + { "UDP_D", WL_PKT_FILTER_BASE_UDP_D } + +/* Flags for a pattern list element */ +#define WL_PKT_FILTER_MFLAG_NEG 0x0001 + +/* + * Packet engine interface + */ + +#define WL_PKTENG_PER_TX_START 0x01 +#define WL_PKTENG_PER_TX_STOP 0x02 +#define WL_PKTENG_PER_RX_START 0x04 +#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 +#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 +#define WL_PKTENG_PER_RX_STOP 0x08 +#define WL_PKTENG_PER_MASK 0xff + +#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */ +#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */ + +#define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */ + +#define NUM_80211b_RATES 4 +#define NUM_80211ag_RATES 8 +#define NUM_80211n_RATES 32 +#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES) + +/* + * WOWL capability/override settings + */ +#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */ +#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */ +#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */ +#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */ +#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */ +#define WL_WOWL_TST (1 << 5) /* Wakeup after test */ +#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */ +#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */ +#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */ +#define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads - + * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS and + * not WLC_HIGH_ONLY case + */ +#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */ +#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */ +#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */ +#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */ +#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */ +#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */ +#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */ +#define WL_WOWL_SCANOL (1 << 16) /* If the bit is set, scan offload is enabled */ +#define WL_WOWL_TCPKEEP_TIME (1 << 17) /* Wakeup on tcpkeep alive timeout */ +#define WL_WOWL_MDNS_CONFLICT (1 << 18) /* Wakeup on mDNS Conflict Resolution */ +#define WL_WOWL_MDNS_SERVICE (1 << 19) /* Wakeup on mDNS Service Connect */ +#define WL_WOWL_TCPKEEP_DATA (1 << 20) /* tcp keepalive got data */ +#define WL_WOWL_FW_HALT (1 << 21) /* Firmware died in wowl mode */ +#define WL_WOWL_ENAB_HWRADIO (1 << 22) /* Enable detection of radio button changes */ +#define WL_WOWL_MIC_FAIL (1 << 23) /* Offloads detected MIC failure(s) */ +#define WL_WOWL_UNASSOC (1 << 24) /* Wakeup in Unassociated state (Net/Magic Pattern) */ +#define WL_WOWL_SECURE (1 << 25) /* Wakeup if received matched secured pattern */ +#define WL_WOWL_LINKDOWN (1 << 31) /* Link Down indication in WoWL mode */ + +#define WL_WOWL_TCPKEEP (1 << 20) /* temp copy to satisfy automerger */ +#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ + +#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */ +#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */ + +#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ +#define MAGIC_PKT_NUM_MAC_ADDRS 16 + + +/* Overlap BSS Scan parameters default, minimum, maximum */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */ +#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */ +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5 +#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100 +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */ +#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */ +#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */ + +#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */ + +#define WL_COEX_INFO_MASK 0x07 +#define WL_COEX_INFO_REQ 0x01 +#define WL_COEX_40MHZ_INTOLERANT 0x02 +#define WL_COEX_WIDTH20 0x04 + +#define WLC_RSSI_INVALID 0 /* invalid RSSI value */ + +#define MAX_RSSI_LEVELS 8 + +/* **** EXTLOG **** */ +#define EXTLOG_CUR_VER 0x0100 + +#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */ + +/* log modules (bitmap) */ +#define LOG_MODULE_COMMON 0x0001 +#define LOG_MODULE_ASSOC 0x0002 +#define LOG_MODULE_EVENT 0x0004 +#define LOG_MODULE_MAX 3 /* Update when adding module */ + +/* log levels */ +#define WL_LOG_LEVEL_DISABLE 0 +#define WL_LOG_LEVEL_ERR 1 +#define WL_LOG_LEVEL_WARN 2 +#define WL_LOG_LEVEL_INFO 3 +#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */ + +/* flag */ +#define LOG_FLAG_EVENT 1 + +/* log arg_type */ +#define LOG_ARGTYPE_NULL 0 +#define LOG_ARGTYPE_STR 1 /* %s */ +#define LOG_ARGTYPE_INT 2 /* %d */ +#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */ +#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */ + +/* 802.11 Mgmt Packet flags */ +#define VNDR_IE_BEACON_FLAG 0x1 +#define VNDR_IE_PRBRSP_FLAG 0x2 +#define VNDR_IE_ASSOCRSP_FLAG 0x4 +#define VNDR_IE_AUTHRSP_FLAG 0x8 +#define VNDR_IE_PRBREQ_FLAG 0x10 +#define VNDR_IE_ASSOCREQ_FLAG 0x20 +#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */ +#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */ + +#if defined(WLP2P) +/* P2P Action Frames flags (spec ordered) */ +#define VNDR_IE_GONREQ_FLAG 0x001000 +#define VNDR_IE_GONRSP_FLAG 0x002000 +#define VNDR_IE_GONCFM_FLAG 0x004000 +#define VNDR_IE_INVREQ_FLAG 0x008000 +#define VNDR_IE_INVRSP_FLAG 0x010000 +#define VNDR_IE_DISREQ_FLAG 0x020000 +#define VNDR_IE_DISRSP_FLAG 0x040000 +#define VNDR_IE_PRDREQ_FLAG 0x080000 +#define VNDR_IE_PRDRSP_FLAG 0x100000 + +#define VNDR_IE_P2PAF_SHIFT 12 +#endif /* WLP2P */ + +/* channel interference measurement (chanim) related defines */ + +/* chanim mode */ +#define CHANIM_DISABLE 0 /* disabled */ +#define CHANIM_DETECT 1 /* detection only */ +#define CHANIM_EXT 2 /* external state machine */ +#define CHANIM_ACT 3 /* full internal state machine, detect + act */ +#define CHANIM_MODE_MAX 4 + +/* define for apcs reason code */ +#define APCS_INIT 0 +#define APCS_IOCTL 1 +#define APCS_CHANIM 2 +#define APCS_CSTIMER 3 +#define APCS_BTA 4 +#define APCS_TXDLY 5 +#define APCS_NONACSD 6 +#define APCS_DFS_REENTRY 7 +#define APCS_TXFAIL 8 +#define APCS_MAX 9 + +/* number of ACS record entries */ +#define CHANIM_ACS_RECORD 10 + +/* CHANIM */ +#define CCASTATS_TXDUR 0 +#define CCASTATS_INBSS 1 +#define CCASTATS_OBSS 2 +#define CCASTATS_NOCTG 3 +#define CCASTATS_NOPKT 4 +#define CCASTATS_DOZE 5 +#define CCASTATS_TXOP 6 +#define CCASTATS_GDTXDUR 7 +#define CCASTATS_BDTXDUR 8 +#define CCASTATS_MAX 9 + +#define WL_CHANIM_COUNT_ALL 0xff +#define WL_CHANIM_COUNT_ONE 0x1 + +/* ap tpc modes */ +#define AP_TPC_OFF 0 +#define AP_TPC_BSS_PWR 1 /* BSS power control */ +#define AP_TPC_AP_PWR 2 /* AP power control */ +#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ +#define AP_TPC_MAX_LINK_MARGIN 127 + +/* ap tpc modes */ +#define AP_TPC_OFF 0 +#define AP_TPC_BSS_PWR 1 /* BSS power control */ +#define AP_TPC_AP_PWR 2 /* AP power control */ +#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ +#define AP_TPC_MAX_LINK_MARGIN 127 + +/* state */ +#define WL_P2P_DISC_ST_SCAN 0 +#define WL_P2P_DISC_ST_LISTEN 1 +#define WL_P2P_DISC_ST_SEARCH 2 + +/* i/f type */ +#define WL_P2P_IF_CLIENT 0 +#define WL_P2P_IF_GO 1 +#define WL_P2P_IF_DYNBCN_GO 2 +#define WL_P2P_IF_DEV 3 + +/* count */ +#define WL_P2P_SCHED_RSVD 0 +#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */ + +#define WL_P2P_SCHED_FIXED_LEN 3 + +/* schedule type */ +#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */ +#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */ + +/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */ +#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */ +#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */ +/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ +#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */ +/* schedule option - WL_P2P_SCHED_TYPE_XXX */ +#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */ + +/* schedule option - WL_P2P_SCHED_TYPE_ABS */ +#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */ +#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */ +/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ +#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with + * start being an offset of the 'current' TSF + */ + +/* feature flags */ +#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */ +#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe + * requests + */ +#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */ + +/* n-mode support capability */ +/* 2x2 includes both 1x1 & 2x2 devices + * reserved #define 2 for future when we want to separate 1x1 & 2x2 and + * control it independently + */ +#define WL_11N_2x2 1 +#define WL_11N_3x3 3 +#define WL_11N_4x4 4 + +/* define 11n feature disable flags */ +#define WLFEATURE_DISABLE_11N 0x00000001 +#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 +#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 +#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 +#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 +#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 +#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 +#define WLFEATURE_DISABLE_11N_GF 0x00000080 + +/* Proxy STA modes */ +#define PSTA_MODE_DISABLED 0 +#define PSTA_MODE_PROXY 1 +#define PSTA_MODE_REPEATER 2 + +/* op code in nat_cfg */ +#define NAT_OP_ENABLE 1 /* enable NAT on given interface */ +#define NAT_OP_DISABLE 2 /* disable NAT on given interface */ +#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */ + +/* NAT state */ +#define NAT_STATE_ENABLED 1 /* NAT is enabled */ +#define NAT_STATE_DISABLED 2 /* NAT is disabled */ + +#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */ +#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */ +#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */ +#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */ + +/* D0 Coalescing */ +#define IPV4_ARP_FILTER 0x0001 +#define IPV4_NETBT_FILTER 0x0002 +#define IPV4_LLMNR_FILTER 0x0004 +#define IPV4_SSDP_FILTER 0x0008 +#define IPV4_WSD_FILTER 0x0010 +#define IPV6_NETBT_FILTER 0x0200 +#define IPV6_LLMNR_FILTER 0x0400 +#define IPV6_SSDP_FILTER 0x0800 +#define IPV6_WSD_FILTER 0x1000 + +/* Network Offload Engine */ +#define NWOE_OL_ENABLE 0x00000001 + +/* + * Traffic management structures/defines. + */ + +/* Traffic management bandwidth parameters */ +#define TRF_MGMT_MAX_PRIORITIES 3 + +#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */ +#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Don't shape traffic */ +#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC 0x0008 /* Manage traffic over our local subnet */ +#define TRF_MGMT_FLAG_FILTER_ON_MACADDR 0x0010 /* filter on MAC address */ +#define TRF_MGMT_FLAG_NO_RX 0x0020 /* do not apply fiters to rx packets */ + +#define TRF_FILTER_MAC_ADDR 0x0001 /* L2 filter use dst mac address for filtering */ +#define TRF_FILTER_IP_ADDR 0x0002 /* L3 filter use ip ddress for filtering */ +#define TRF_FILTER_L4 0x0004 /* L4 filter use tcp/udp for filtering */ +#define TRF_FILTER_DWM 0x0008 /* L3 filter use DSCP for filtering */ +#define TRF_FILTER_FAVORED 0x0010 /* Tag the packet FAVORED */ + +/* WNM/NPS subfeatures mask */ +#define WL_WNM_BSSTRANS 0x00000001 +#define WL_WNM_PROXYARP 0x00000002 +#define WL_WNM_MAXIDLE 0x00000004 +#define WL_WNM_TIMBC 0x00000008 +#define WL_WNM_TFS 0x00000010 +#define WL_WNM_SLEEP 0x00000020 +#define WL_WNM_DMS 0x00000040 +#define WL_WNM_FMS 0x00000080 +#define WL_WNM_NOTIF 0x00000100 +#define WL_WNM_MAX 0x00000200 + +#ifdef WLWNM_BRCM +#define BRCM_WNM_FEATURE_SET\ + (WL_WNM_PROXYARP | \ + WL_WNM_SLEEP | \ + WL_WNM_FMS | \ + WL_WNM_TFS | \ + WL_WNM_TIMBC | \ + WL_WNM_BSSTRANS | \ + WL_WNM_DMS | \ + WL_WNM_NOTIF | \ + 0) +#endif /* WLWNM_BRCM */ + +#ifndef ETHER_MAX_DATA +#define ETHER_MAX_DATA 1500 +#endif /* ETHER_MAX_DATA */ + +/* Different discovery modes for dpt */ +#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */ +#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */ +#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */ + +/* different path selection values */ +#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */ +#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */ +#define DPT_PATHSEL_APPATH 2 /* always use AP path */ + +/* different ops for deny list */ +#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */ +#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */ + +/* different ops for manual end point */ +#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ +#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ +#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ + +/* flags to indicate DPT status */ +#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */ +#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */ +#define DPT_STATUS_FAILED 0x04 /* DPT link failed */ + +#ifdef WLTDLS +/* different ops for manual end point */ +#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ +#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ +#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ +#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */ +#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */ +#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */ +#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */ +#define TDLS_MANUAL_EP_WFD_TPQ 8 /* WiFi-Display Tunneled Probe reQuest */ + +/* modes */ +#define TDLS_WFD_IE_TX 0 +#define TDLS_WFD_IE_RX 1 +#define TDLS_WFD_PROBE_IE_TX 2 +#define TDLS_WFD_PROBE_IE_RX 3 +#endif /* WLTDLS */ + +/* define for flag */ +#define TSPEC_PENDING 0 /* TSPEC pending */ +#define TSPEC_ACCEPTED 1 /* TSPEC accepted */ +#define TSPEC_REJECTED 2 /* TSPEC rejected */ +#define TSPEC_UNKNOWN 3 /* TSPEC unknown */ +#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */ + + +/* Software feature flag defines used by wlfeatureflag */ +#ifdef WLAFTERBURNER +#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */ +#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */ +#endif /* WLAFTERBURNER */ +#define WL_SWFL_NOHWRADIO 0x0004 +#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */ +#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */ + +#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */ + +#define CSA_BROADCAST_ACTION_FRAME 0 /* csa broadcast action frame */ +#define CSA_UNICAST_ACTION_FRAME 1 /* csa unicast action frame */ + +/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER. + * + * (-100 < value < 0) value is used directly as a roaming trigger in dBm + * (0 <= value) value specifies a logical roaming trigger level from + * the list below + * + * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never + * the logical roam trigger value. + */ +#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */ +#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */ +#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */ +#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */ +#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */ + +#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */ + +/* Preferred Network Offload (PNO, formerly PFN) defines */ +#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 +#define BESTN_BSSID_ONLY_BIT 12 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 +#define BESTN_BSSID_ONLY_MASK 0x1000 + +#define PFN_VERSION 2 +#define PFN_SCANRESULT_VERSION 1 +#define MAX_PFN_LIST_COUNT 16 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 + +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_RSSI_MASK 0xff00 +#define WL_PFN_RSSI_SHIFT 8 + +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 + +#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ +#define WL_PFN_CFG_FLAGS_HISTORY_OFF 0x00000002 /* Scan history suppressed */ + +#define WL_PFN_HIDDEN_BIT 2 +#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */ +#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */ +#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */ +#define WL_PFN_HIDDEN_MASK 0x4 + +#ifndef BESTN_MAX +#define BESTN_MAX 8 +#endif + +#ifndef MSCAN_MAX +#define MSCAN_MAX 32 +#endif + +/* TCP Checksum Offload error injection for testing */ +#define TOE_ERRTEST_TX_CSUM 0x00000001 +#define TOE_ERRTEST_RX_CSUM 0x00000002 +#define TOE_ERRTEST_RX_CSUM2 0x00000004 + +/* ARP Offload feature flags for arp_ol iovar */ +#define ARP_OL_AGENT 0x00000001 +#define ARP_OL_SNOOP 0x00000002 +#define ARP_OL_HOST_AUTO_REPLY 0x00000004 +#define ARP_OL_PEER_AUTO_REPLY 0x00000008 + +/* ARP Offload error injection */ +#define ARP_ERRTEST_REPLY_PEER 0x1 +#define ARP_ERRTEST_REPLY_HOST 0x2 + +#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */ +#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */ +#define ND_REQUEST_MAX 5 /* Max set of offload params */ + + +/* AOAC wake event flag */ +#define WAKE_EVENT_NLO_DISCOVERY_BIT 1 +#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2 +#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4 +#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8 +#define WAKE_EVENT_NET_PACKET_BIT 0x10 + + +#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */ + + +/* Packet filter operation mode */ +/* True: 1; False: 0 */ +#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1 +/* Enable and disable pkt_filter as a whole */ +#define PKT_FILTER_MODE_DISABLE 2 +/* Cache first matched rx pkt(be queried by host later) */ +#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4 +/* If pkt_filter is enabled and no filter is set, don't forward anything */ +#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8 + +#ifdef DONGLEOVERLAYS +#define OVERLAY_IDX_MASK 0x000000ff +#define OVERLAY_IDX_SHIFT 0 +#define OVERLAY_FLAGS_MASK 0xffffff00 +#define OVERLAY_FLAGS_SHIFT 8 +/* overlay written to device memory immediately after loading the base image */ +#define OVERLAY_FLAG_POSTLOAD 0x100 +/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */ +#define OVERLAY_FLAG_DEFER_DL 0x200 +/* overlay downloaded prior to the host going to sleep */ +#define OVERLAY_FLAG_PRESLEEP 0x400 +#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024 +#endif /* DONGLEOVERLAYS */ + +/* reuse two number in the sc/rc space */ +#define SMFS_CODE_MALFORMED 0xFFFE +#define SMFS_CODE_IGNORED 0xFFFD + +/* RFAWARE def */ +#define BCM_ACTION_RFAWARE 0x77 +#define BCM_ACTION_RFAWARE_DCS 0x01 + +/* DCS reason code define */ +#define BCM_DCS_IOVAR 0x1 +#define BCM_DCS_UNKNOWN 0xFF + + +#ifdef PROP_TXSTATUS +/* Bit definitions for tlv iovar */ +/* + * enable RSSI signals: + * WLFC_CTL_TYPE_RSSI + */ +#define WLFC_FLAGS_RSSI_SIGNALS 0x0001 + +/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals: + * + * WLFC_CTL_TYPE_MAC_OPEN + * WLFC_CTL_TYPE_MAC_CLOSE + * + * WLFC_CTL_TYPE_INTERFACE_OPEN + * WLFC_CTL_TYPE_INTERFACE_CLOSE + * + * WLFC_CTL_TYPE_MACDESC_ADD + * WLFC_CTL_TYPE_MACDESC_DEL + * + */ +#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002 + +/* enable (status, fifo_credit, mac_credit) signals + * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT + * WLFC_CTL_TYPE_TXSTATUS + * WLFC_CTL_TYPE_FIFO_CREDITBACK + */ +#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004 + +#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008 +#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010 +#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020 +#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040 +#define WLFC_FLAGS_PKT_STAMP_SIGNALS 0x0080 + +#endif /* PROP_TXSTATUS */ + +#define WL_TIMBC_STATUS_AP_UNKNOWN 255 /* AP status for internal use only */ + +#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */ + +/* Definitions for Reliable Multicast */ +#define WL_RELMCAST_MAX_CLIENT 32 +#define WL_RELMCAST_FLAG_INBLACKLIST 1 +#define WL_RELMCAST_FLAG_ACTIVEACKER 2 +#define WL_RELMCAST_FLAG_RELMCAST 4 + +/* structures for proximity detection device role */ +#define WL_PROXD_MODE_DISABLE 0 +#define WL_PROXD_MODE_NEUTRAL 1 +#define WL_PROXD_MODE_INITIATOR 2 +#define WL_PROXD_MODE_TARGET 3 +#define WL_PROXD_RANDOM_WAKEUP 0x8000 + + +#ifdef NET_DETECT +#define NET_DETECT_MAX_WAKE_DATA_SIZE 2048 +#define NET_DETECT_MAX_PROFILES 16 +#define NET_DETECT_MAX_CHANNELS 50 +#endif /* NET_DETECT */ + +/* Bit masks for radio disabled status - returned by WL_GET_RADIO */ +#define WL_RADIO_SW_DISABLE (1<<0) +#define WL_RADIO_HW_DISABLE (1<<1) +#define WL_RADIO_MPC_DISABLE (1<<2) +#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */ +#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */ + +#define WL_SPURAVOID_OFF 0 +#define WL_SPURAVOID_ON1 1 +#define WL_SPURAVOID_ON2 2 + + +#define WL_4335_SPURAVOID_ON1 1 +#define WL_4335_SPURAVOID_ON2 2 +#define WL_4335_SPURAVOID_ON3 3 +#define WL_4335_SPURAVOID_ON4 4 +#define WL_4335_SPURAVOID_ON5 5 +#define WL_4335_SPURAVOID_ON6 6 +#define WL_4335_SPURAVOID_ON7 7 +#define WL_4335_SPURAVOID_ON8 8 +#define WL_4335_SPURAVOID_ON9 9 + +/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */ +#define WL_TXPWR_OVERRIDE (1U<<31) +#define WL_TXPWR_NEG (1U<<30) + + +/* phy types (returned by WLC_GET_PHYTPE) */ +#define WLC_PHY_TYPE_A 0 +#define WLC_PHY_TYPE_B 1 +#define WLC_PHY_TYPE_G 2 +#define WLC_PHY_TYPE_N 4 +#define WLC_PHY_TYPE_LP 5 +#define WLC_PHY_TYPE_SSN 6 +#define WLC_PHY_TYPE_HT 7 +#define WLC_PHY_TYPE_LCN 8 +#define WLC_PHY_TYPE_LCN40 10 +#define WLC_PHY_TYPE_AC 11 +#define WLC_PHY_TYPE_LCN20 12 +#define WLC_PHY_TYPE_NULL 0xf + +/* Values for PM */ +#define PM_OFF 0 +#define PM_MAX 1 +#define PM_FAST 2 +#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */ + +#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */ + +/* fbt_cap: FBT assoc / reassoc modes. */ +#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC 1 /* Driver 4-way handshake & reassoc (WLFBT). */ + +/* monitor_promisc_level bits */ +#define WL_MONPROMISC_PROMISC 0x0001 +#define WL_MONPROMISC_CTRL 0x0002 +#define WL_MONPROMISC_FCS 0x0004 + +/* TCP Checksum Offload defines */ +#define TOE_TX_CSUM_OL 0x00000001 +#define TOE_RX_CSUM_OL 0x00000002 + +/* Wi-Fi Display Services (WFDS) */ +#define WL_P2P_SOCIAL_CHANNELS_MAX WL_NUMCHANNELS +#define MAX_WFDS_SEEK_SVC 4 /* Max # of wfds services to seek */ +#define MAX_WFDS_ADVERT_SVC 4 /* Max # of wfds services to advertise */ +#define MAX_WFDS_SVC_NAME_LEN 200 /* maximum service_name length */ +#define MAX_WFDS_ADV_SVC_INFO_LEN 65000 /* maximum adv service_info length */ +#define P2P_WFDS_HASH_LEN 6 /* Length of a WFDS service hash */ +#define MAX_WFDS_SEEK_SVC_INFO_LEN 255 /* maximum seek service_info req length */ +#define MAX_WFDS_SEEK_SVC_NAME_LEN 200 /* maximum service_name length */ + +/* ap_isolate bitmaps */ +#define AP_ISOLATE_DISABLED 0x0 +#define AP_ISOLATE_SENDUP_ALL 0x01 +#define AP_ISOLATE_SENDUP_MCAST 0x02 + +#endif /* wlioctl_defs_h */ diff --git a/drivers/net/wireless/bcmdhd/include/dhdioctl.h b/drivers/net/wireless/bcmdhd/include/dhdioctl.h new file mode 100644 index 000000000000..9e79ddb5525e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/dhdioctl.h @@ -0,0 +1,142 @@ +/* + * Definitions for ioctls to access DHD iovars. + * Based on wlioctl.h (for Broadcom 802.11abg driver). + * (Moves towards generic ioctls for BCM drivers/iovars.) + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: dhdioctl.h 585723 2015-09-11 06:26:37Z $ + */ + +#ifndef _dhdioctl_h_ +#define _dhdioctl_h_ + +#include + + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + + +/* Linux network driver ioctl encoding */ +typedef struct dhd_ioctl { + uint cmd; /* common ioctl definition */ + void *buf; /* pointer to user buffer */ + uint len; /* length of user buffer */ + bool set; /* get or set request (optional) */ + uint used; /* bytes read or written (optional) */ + uint needed; /* bytes needed (optional) */ + uint driver; /* to identify target driver */ +} dhd_ioctl_t; + +/* Underlying BUS definition */ +enum { + BUS_TYPE_USB = 0, /* for USB dongles */ + BUS_TYPE_SDIO, /* for SDIO dongles */ + BUS_TYPE_PCIE /* for PCIE dongles */ +}; + +/* per-driver magic numbers */ +#define DHD_IOCTL_MAGIC 0x00444944 + +/* bump this number if you change the ioctl interface */ +#define DHD_IOCTL_VERSION 1 + +#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ +#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ + +/* common ioctl definitions */ +#define DHD_GET_MAGIC 0 +#define DHD_GET_VERSION 1 +#define DHD_GET_VAR 2 +#define DHD_SET_VAR 3 + +/* message levels */ +#define DHD_ERROR_VAL 0x0001 +#define DHD_TRACE_VAL 0x0002 +#define DHD_INFO_VAL 0x0004 +#define DHD_DATA_VAL 0x0008 +#define DHD_CTL_VAL 0x0010 +#define DHD_TIMER_VAL 0x0020 +#define DHD_HDRS_VAL 0x0040 +#define DHD_BYTES_VAL 0x0080 +#define DHD_INTR_VAL 0x0100 +#define DHD_LOG_VAL 0x0200 +#define DHD_GLOM_VAL 0x0400 +#define DHD_EVENT_VAL 0x0800 +#define DHD_BTA_VAL 0x1000 +#define DHD_ISCAN_VAL 0x2000 +#define DHD_ARPOE_VAL 0x4000 +#define DHD_REORDER_VAL 0x8000 +#define DHD_WL_VAL 0x10000 +#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */ +#define DHD_WL_VAL2 0x40000 +#define DHD_PNO_VAL 0x80000 +#define DHD_MSGTRACE_VAL 0x100000 +#define DHD_FWLOG_VAL 0x400000 +#define DHD_RTT_VAL 0x200000 +#define DHD_IOV_INFO_VAL 0x800000 + +#ifdef SDTEST +/* For pktgen iovar */ +typedef struct dhd_pktgen { + uint version; /* To allow structure change tracking */ + uint freq; /* Max ticks between tx/rx attempts */ + uint count; /* Test packets to send/rcv each attempt */ + uint print; /* Print counts every attempts */ + uint total; /* Total packets (or bursts) */ + uint minlen; /* Minimum length of packets to send */ + uint maxlen; /* Maximum length of packets to send */ + uint numsent; /* Count of test packets sent */ + uint numrcvd; /* Count of test packets received */ + uint numfail; /* Count of test send failures */ + uint mode; /* Test mode (type of test packets) */ + uint stop; /* Stop after this many tx failures */ +} dhd_pktgen_t; + +/* Version in case structure changes */ +#define DHD_PKTGEN_VERSION 2 + +/* Type of test packets to use */ +#define DHD_PKTGEN_ECHO 1 /* Send echo requests */ +#define DHD_PKTGEN_SEND 2 /* Send discard packets */ +#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */ +#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous tx dongle */ +#endif /* SDTEST */ + +/* Enter idle immediately (no timeout) */ +#define DHD_IDLE_IMMEDIATE (-1) + +/* Values for idleclock iovar: other values are the sd_divisor to use when idle */ +#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */ +#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */ + + +/* require default structure packing */ +#include + +#endif /* _dhdioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h new file mode 100644 index 000000000000..76012564435d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/epivers.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ + * +*/ + +#ifndef _epivers_h_ +#define _epivers_h_ + +#define EPI_MAJOR_VERSION 1 + +#define EPI_MINOR_VERSION 363 + +#define EPI_RC_NUMBER 59 + +#define EPI_INCREMENTAL_NUMBER 144 + +#define EPI_BUILD_NUMBER 0 + +#define EPI_VERSION 1, 363, 59, 144 + +#define EPI_VERSION_NUM 0x0116b3b9 + +#define EPI_VERSION_DEV 1.363.59 + +/* Driver Version String, ASCII, 32 chars max */ +#define EPI_VERSION_STR "1.363.59.144 (r)" + +#endif /* _epivers_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/event_log.h b/drivers/net/wireless/bcmdhd/include/event_log.h new file mode 100644 index 000000000000..d06d811cb925 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/event_log.h @@ -0,0 +1,349 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _EVENT_LOG_H_ +#define _EVENT_LOG_H_ + +#include +#include +#include + +/* logstrs header */ +#define LOGSTRS_MAGIC 0x4C4F4753 +#define LOGSTRS_VERSION 0x1 + +/* We make sure that the block size will fit in a single packet + * (allowing for a bit of overhead on each packet + */ +#define EVENT_LOG_MAX_BLOCK_SIZE 1400 +#define EVENT_LOG_WL_BLOCK_SIZE 0x200 +#define EVENT_LOG_PSM_BLOCK_SIZE 0x200 +#define EVENT_LOG_BUS_BLOCK_SIZE 0x200 +#define EVENT_LOG_ERROR_BLOCK_SIZE 0x200 + +/* + * There are multiple levels of objects define here: + * event_log_set - a set of buffers + * event log groups - every event log call is part of just one. All + * event log calls in a group are handled the + * same way. Each event log group is associated + * with an event log set or is off. + */ + +#ifndef __ASSEMBLER__ + +/* On the external system where the dumper is we need to make sure + * that these types are the same size as they are on the ARM the + * produced them + */ +#ifdef EVENT_LOG_DUMPER +#define _EL_BLOCK_PTR uint32 +#define _EL_TYPE_PTR uint32 +#define _EL_SET_PTR uint32 +#define _EL_TOP_PTR uint32 +#else +#define _EL_BLOCK_PTR struct event_log_block * +#define _EL_TYPE_PTR uint32 * +#define _EL_SET_PTR struct event_log_set ** +#define _EL_TOP_PTR struct event_log_top * +#endif /* EVENT_LOG_DUMPER */ + +/* Event log sets (a logical circurlar buffer) consist of one or more + * event_log_blocks. The blocks themselves form a logical circular + * list. The log entries are placed in each event_log_block until it + * is full. Logging continues with the next event_log_block in the + * event_set until the last event_log_block is reached and then + * logging starts over with the first event_log_block in the + * event_set. + */ +typedef struct event_log_block { + _EL_BLOCK_PTR next_block; + _EL_BLOCK_PTR prev_block; + _EL_TYPE_PTR end_ptr; + + /* Start of packet sent for log tracing */ + uint16 pktlen; /* Size of rest of block */ + uint16 count; /* Logtrace counter */ + uint32 timestamp; /* Timestamp at start of use */ + uint32 event_logs; +} event_log_block_t; + +/* There can be multiple event_sets with each logging a set of + * associated events (i.e, "fast" and "slow" events). + */ +typedef struct event_log_set { + _EL_BLOCK_PTR first_block; /* Pointer to first event_log block */ + _EL_BLOCK_PTR last_block; /* Pointer to last event_log block */ + _EL_BLOCK_PTR logtrace_block; /* next block traced */ + _EL_BLOCK_PTR cur_block; /* Pointer to current event_log block */ + _EL_TYPE_PTR cur_ptr; /* Current event_log pointer */ + uint32 blockcount; /* Number of blocks */ + uint16 logtrace_count; /* Last count for logtrace */ + uint16 blockfill_count; /* Fill count for logtrace */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ +} event_log_set_t; + +/* Top data structure for access to everything else */ +typedef struct event_log_top { + uint32 magic; +#define EVENT_LOG_TOP_MAGIC 0x474C8669 /* 'EVLG' */ + uint32 version; +#define EVENT_LOG_VERSION 1 + uint32 num_sets; + uint32 logstrs_size; /* Size of lognums + logstrs area */ + uint32 timestamp; /* Last timestamp event */ + uint32 cyclecount; /* Cycles at last timestamp event */ + _EL_SET_PTR sets; /* Ptr to array of set ptrs */ +} event_log_top_t; + +/* Data structure of Keeping the Header from logstrs.bin */ +typedef struct { + uint32 logstrs_size; /* Size of the file */ + uint32 rom_lognums_offset; /* Offset to the ROM lognum */ + uint32 ram_lognums_offset; /* Offset to the RAM lognum */ + uint32 rom_logstrs_offset; /* Offset to the ROM logstr */ + uint32 ram_logstrs_offset; /* Offset to the RAM logstr */ + /* Keep version and magic last since "header" is appended to the end of logstrs file. */ + uint32 version; /* Header version */ + uint32 log_magic; /* MAGIC number for verification 'LOGS' */ +} logstr_header_t; + +/* + * Use the following macros for generating log events. + * + * The FAST versions check the enable of the tag before evaluating the arguments and calling the + * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments + * and call the event_log function unconditionally. The event_log function will then skip logging + * if this tag is disabled. + * + * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are + * two variants of these macros to help. + * + * First there are the CAST versions. The event_log function normally logs uint32 values or else + * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit + * any existing code. + * + * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments + * to be enclosed in parentheses. This allows us to make the following mapping of an existing + * msglevel macro: + * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args) + * + * The versions of the macros without FAST or COMPACT in their name are just synonyms for the + * COMPACT versions. + * + * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic + * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed. + * Use the FAST macro in performance sensitive paths. The key concept here is that you should be + * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely. + * + */ + +#ifndef EVENT_LOG_DUMPER + +#ifndef EVENT_LOG_COMPILE + +/* Null define if no tracing */ +#define EVENT_LOG(format, ...) +#define EVENT_LOG_FAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT(tag, fmt, ...) + +#define EVENT_LOG_CAST(tag, fmt, ...) +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) + +#define EVENT_LOG_IS_LOG_ON(tag) 0 + +#else /* EVENT_LOG_COMPILE */ + +/* The first few are special because they can be done more efficiently + * this way and they are the common case. Once there are too many + * parameters the code size starts to be an issue and a loop is better + */ +#define _EVENT_LOG0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, t1) +#define _EVENT_LOG2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, t1, t2) +#define _EVENT_LOG3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, t1, t2, t3) +#define _EVENT_LOG4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, t1, t2, t3, t4) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG5(tag, fmt_num, ...) event_logn(5, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG6(tag, fmt_num, ...) event_logn(6, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG7(tag, fmt_num, ...) event_logn(7, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG8(tag, fmt_num, ...) event_logn(8, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG9(tag, fmt_num, ...) event_logn(9, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGA(tag, fmt_num, ...) event_logn(10, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGB(tag, fmt_num, ...) event_logn(11, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGC(tag, fmt_num, ...) event_logn(12, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGD(tag, fmt_num, ...) event_logn(13, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__) + + +/* Casting low level macros */ +#define _EVENT_LOG_CAST0(tag, fmt_num) \ + event_log0(tag, fmt_num) +#define _EVENT_LOG_CAST1(tag, fmt_num, t1) \ + event_log1(tag, fmt_num, (uint32)(t1)) +#define _EVENT_LOG_CAST2(tag, fmt_num, t1, t2) \ + event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2)) +#define _EVENT_LOG_CAST3(tag, fmt_num, t1, t2, t3) \ + event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3)) +#define _EVENT_LOG_CAST4(tag, fmt_num, t1, t2, t3, t4) \ + event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4)) + +/* The rest call the generic routine that takes a count */ +#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__) +#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__) + +/* Hack to make the proper routine call when variadic macros get + * passed. Note the max of 15 arguments. More than that can't be + * handled by the event_log entries anyways so best to catch it at compile + * time + */ + +#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9, \ + _A, _B, _C, _D, _E, _F, N, ...) F ## N + +/* cast = _EVENT_LOG for no casting + * cast = _EVENT_LOG_CAST for casting of fmt arguments to uint32. + * Only first 4 arguments are casted to uint32. event_logn() is called + * if more than 4 arguments are present. This function internally assumes + * all arguments are uint32 + */ +#define _EVENT_LOG(cast, tag, fmt, ...) \ + static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \ + static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \ + _EVENT_LOG_VA_NUM_ARGS(cast, ##__VA_ARGS__, \ + F, E, D, C, B, A, 9, 8, \ + 7, 6, 5, 4, 3, 2, 1, 0) \ + (tag, (int) &fmtnum , ## __VA_ARGS__) + + +#define EVENT_LOG_FAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if (tag_flag != 0) { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \ + } while (0) + +/* Event log macro with casting to uint32 of arguments */ +#define EVENT_LOG_FAST_CAST(tag, fmt, ...) \ + do { \ + if (event_log_tag_sets != NULL) { \ + uint8 tag_flag = *(event_log_tag_sets + tag); \ + if (tag_flag != 0) { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } \ + } \ + } while (0) + +#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) \ + do { \ + _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \ + } while (0) + + +#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__) + +#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__) + +#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__ +#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args + +#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + +#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \ + EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs)) + + +#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG) + +#define EVENT_DUMP event_log_buffer + +extern uint8 *event_log_tag_sets; + +#include + +extern int event_log_init(si_t *sih); +extern int event_log_set_init(si_t *sih, int set_num, int size); +extern int event_log_set_expand(si_t *sih, int set_num, int size); +extern int event_log_set_shrink(si_t *sih, int set_num, int size); +extern int event_log_tag_start(int tag, int set_num, int flags); +extern int event_log_tag_stop(int tag); +extern int event_log_get(int set_num, int buflen, void *buf); +extern uint8 * event_log_next_logtrace(int set_num); + +extern void event_log0(int tag, int fmtNum); +extern void event_log1(int tag, int fmtNum, uint32 t1); +extern void event_log2(int tag, int fmtNum, uint32 t1, uint32 t2); +extern void event_log3(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3); +extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4); +extern void event_logn(int num_args, int tag, int fmtNum, ...); + +extern void event_log_time_sync(uint32 ms); +extern void event_log_buffer(int tag, uint8 *buf, int size); + +#endif /* EVENT_LOG_DUMPER */ + +#endif /* EVENT_LOG_COMPILE */ + +#endif /* __ASSEMBLER__ */ + +#endif /* _EVENT_LOG_H */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h new file mode 100644 index 000000000000..baf55724c595 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_armtrap.h @@ -0,0 +1,92 @@ +/* + * HND arm trap handling. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_armtrap.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _hnd_armtrap_h_ +#define _hnd_armtrap_h_ + + +/* ARM trap handling */ + +/* Trap types defined by ARM (see arminc.h) */ + +/* Trap locations in lo memory */ +#define TRAP_STRIDE 4 +#define FIRST_TRAP TR_RST +#define LAST_TRAP (TR_FIQ * TRAP_STRIDE) + +#if defined(__ARM_ARCH_4T__) +#define MAX_TRAP_TYPE (TR_FIQ + 1) +#elif defined(__ARM_ARCH_7M__) +#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS) +#endif /* __ARM_ARCH_7M__ */ + +/* The trap structure is defined here as offsets for assembly */ +#define TR_TYPE 0x00 +#define TR_EPC 0x04 +#define TR_CPSR 0x08 +#define TR_SPSR 0x0c +#define TR_REGS 0x10 +#define TR_REG(n) (TR_REGS + (n) * 4) +#define TR_SP TR_REG(13) +#define TR_LR TR_REG(14) +#define TR_PC TR_REG(15) + +#define TRAP_T_SIZE 80 +#define ASSERT_TRAP_SVC_NUMBER 255 + +#ifndef _LANGUAGE_ASSEMBLY + +#include + +typedef struct _trap_struct { + uint32 type; + uint32 epc; + uint32 cpsr; + uint32 spsr; + uint32 r0; /* a1 */ + uint32 r1; /* a2 */ + uint32 r2; /* a3 */ + uint32 r3; /* a4 */ + uint32 r4; /* v1 */ + uint32 r5; /* v2 */ + uint32 r6; /* v3 */ + uint32 r7; /* v4 */ + uint32 r8; /* v5 */ + uint32 r9; /* sb/v6 */ + uint32 r10; /* sl/v7 */ + uint32 r11; /* fp/v8 */ + uint32 r12; /* ip */ + uint32 r13; /* sp */ + uint32 r14; /* lr */ + uint32 pc; /* r15 */ +} trap_t; + +#endif /* !_LANGUAGE_ASSEMBLY */ + +#endif /* _hnd_armtrap_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_cons.h b/drivers/net/wireless/bcmdhd/include/hnd_cons.h new file mode 100644 index 000000000000..2dee71abefeb --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_cons.h @@ -0,0 +1,80 @@ +/* + * Console support for RTE - for host use only. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_cons.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _hnd_cons_h_ +#define _hnd_cons_h_ + +#include +#include + +#define CBUF_LEN (128) + +#define LOG_BUF_LEN 1024 + +#ifdef BOOTLOADER_CONSOLE_OUTPUT +#undef RWL_MAX_DATA_LEN +#undef CBUF_LEN +#undef LOG_BUF_LEN +#define RWL_MAX_DATA_LEN (4 * 1024 + 8) +#define CBUF_LEN (RWL_MAX_DATA_LEN + 64) +#define LOG_BUF_LEN (16 * 1024) +#endif + +typedef struct { + uint32 buf; /* Can't be pointer on (64-bit) hosts */ + uint buf_size; + uint idx; + uint out_idx; /* output index */ +} hnd_log_t; + +typedef struct { + /* Virtual UART + * When there is no UART (e.g. Quickturn), the host should write a complete + * input line directly into cbuf and then write the length into vcons_in. + * This may also be used when there is a real UART (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + volatile uint vcons_in; + volatile uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host polls. + */ + hnd_log_t log; + + /* Console input line buffer + * Characters are read one at a time into cbuf until is received, then + * the buffer is processed as a command line. Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +} hnd_cons_t; + +#endif /* _hnd_cons_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h new file mode 100644 index 000000000000..3cf46727b044 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_pktpool.h @@ -0,0 +1,225 @@ +/* + * HND generic packet pool operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktpool.h 591285 2015-10-07 11:56:29Z $ + */ + +#ifndef _hnd_pktpool_h_ +#define _hnd_pktpool_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTPOOL_THREAD_SAFE +#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTPOOL_MUTEX_DECL(mutex) +#endif + +#ifdef BCMPKTPOOL +#define POOL_ENAB(pool) ((pool) && (pool)->inited) +#else /* BCMPKTPOOL */ +#define POOL_ENAB(bus) 0 +#endif /* BCMPKTPOOL */ + +#ifndef PKTPOOL_LEN_MAX +#define PKTPOOL_LEN_MAX 40 +#endif /* PKTPOOL_LEN_MAX */ +#define PKTPOOL_CB_MAX 3 +#define PKTPOOL_CB_MAX_AVL 4 + + +/* forward declaration */ +struct pktpool; + +typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg); +typedef struct { + pktpool_cb_t cb; + void *arg; +} pktpool_cbinfo_t; + +/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */ +typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2); +typedef struct { + pktpool_cb_extn_t cb; + void *arg; +} pktpool_cbextn_info_t; + + +#ifdef BCMDBG_POOL +/* pkt pool debug states */ +#define POOL_IDLE 0 +#define POOL_RXFILL 1 +#define POOL_RXDH 2 +#define POOL_RXD11 3 +#define POOL_TXDH 4 +#define POOL_TXD11 5 +#define POOL_AMPDU 6 +#define POOL_TXENQ 7 + +typedef struct { + void *p; + uint32 cycles; + uint32 dur; +} pktpool_dbg_t; + +typedef struct { + uint8 txdh; /* tx to host */ + uint8 txd11; /* tx to d11 */ + uint8 enq; /* waiting in q */ + uint8 rxdh; /* rx from host */ + uint8 rxd11; /* rx from d11 */ + uint8 rxfill; /* dma_rxfill */ + uint8 idle; /* avail in pool */ +} pktpool_stats_t; +#endif /* BCMDBG_POOL */ + +typedef struct pktpool { + bool inited; /**< pktpool_init was successful */ + uint8 type; /**< type of lbuf: basic, frag, etc */ + uint8 id; /**< pktpool ID: index in registry */ + bool istx; /**< direction: transmit or receive data path */ + HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */ + + void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */ + uint16 avail; /**< number of packets in pool's free list */ + uint16 len; /**< number of packets managed by pool */ + uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */ + uint16 plen; /**< size of pkt buffer, excluding lbuf|lbuf_frag */ + + bool empty; + uint8 cbtoggle; + uint8 cbcnt; + uint8 ecbcnt; + uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */ + pktpool_cbinfo_t *availcb_excl; + pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL]; + pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; + pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */ + pktpool_cbextn_info_t rxcplidfn; +#ifdef BCMDBG_POOL + uint8 dbg_cbcnt; + pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX]; + uint16 dbg_qlen; + pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1]; +#endif + pktpool_cbinfo_t dmarxfill; +} pktpool_t; + + +pktpool_t *get_pktpools_registry(int id); + +/* Incarnate a pktpool registry. On success returns total_pools. */ +extern int pktpool_attach(osl_t *osh, uint32 total_pools); +extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */ + +extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type); +extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp); +extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal); +extern void* pktpool_get(pktpool_t *pktp); +extern void pktpool_free(pktpool_t *pktp, void *p); +extern int pktpool_add(pktpool_t *pktp, void *p); +extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp); +extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb); +extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen); +extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen); +extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable); +extern bool pktpool_emptycb_disabled(pktpool_t *pktp); +extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1); +extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg); +extern void pktpool_invoke_dmarxfill(pktpool_t *pktp); +extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg); + +#define POOLPTR(pp) ((pktpool_t *)(pp)) +#define POOLID(pp) (POOLPTR(pp)->id) + +#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid)) + +#define pktpool_len(pp) (POOLPTR(pp)->len) +#define pktpool_avail(pp) (POOLPTR(pp)->avail) +#define pktpool_plen(pp) (POOLPTR(pp)->plen) +#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen) + + +/* + * ---------------------------------------------------------------------------- + * A pool ID is assigned with a pkt pool during pool initialization. This is + * done by maintaining a registry of all initialized pools, and the registry + * index at which the pool is registered is used as the pool's unique ID. + * ID 0 is reserved and is used to signify an invalid pool ID. + * All packets henceforth allocated from a pool will be tagged with the pool's + * unique ID. Packets allocated from the heap will use the reserved ID = 0. + * Packets with non-zero pool id signify that they were allocated from a pool. + * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used + * in place of a 32bit pool pointer in each packet. + * ---------------------------------------------------------------------------- + */ +#define PKTPOOL_INVALID_ID (0) +#define PKTPOOL_MAXIMUM_ID (15) + +/* Registry of pktpool(s) */ +/* Pool ID to/from Pool Pointer converters */ +#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id)) +#define PKTPOOL_PTR2ID(pp) (POOLID(pp)) + +#ifdef BCMDBG_POOL +extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); +extern int pktpool_start_trigger(pktpool_t *pktp, void *p); +extern int pktpool_dbg_dump(pktpool_t *pktp); +extern int pktpool_dbg_notify(pktpool_t *pktp); +extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); +#endif /* BCMDBG_POOL */ + +#ifdef BCMPKTPOOL +#define SHARED_POOL (pktpool_shared) +extern pktpool_t *pktpool_shared; +#ifdef BCMFRAGPOOL +#define SHARED_FRAG_POOL (pktpool_shared_lfrag) +extern pktpool_t *pktpool_shared_lfrag; +#endif + +/** PCIe SPLITRX related */ +#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag) +extern pktpool_t *pktpool_shared_rxlfrag; + +void hnd_pktpool_init(osl_t *osh); +void hnd_pktpool_fill(pktpool_t *pktpool, bool minimal); +void hnd_pktpool_refill(bool minimal); +#else /* BCMPKTPOOL */ +#define SHARED_POOL ((struct pktpool *)NULL) +#endif /* BCMPKTPOOL */ + +#ifdef __cplusplus + } +#endif + +#endif /* _hnd_pktpool_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hnd_pktq.h b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h new file mode 100644 index 000000000000..1586de3ca5b4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hnd_pktq.h @@ -0,0 +1,214 @@ +/* + * HND generic pktq operation primitives + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hnd_pktq.h 591283 2015-10-07 11:52:00Z $ + */ + +#ifndef _hnd_pktq_h_ +#define _hnd_pktq_h_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* mutex macros for thread safe */ +#ifdef HND_PKTQ_THREAD_SAFE +#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex) +#else +#define HND_PKTQ_MUTEX_DECL(mutex) +#endif + +/* osl multi-precedence packet queue */ +#define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */ +#ifndef PKTQ_LEN_DEFAULT +#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */ +#endif +#ifndef PKTQ_MAX_PREC +#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */ +#endif + +typedef struct pktq_prec { + void *head; /**< first packet to dequeue */ + void *tail; /**< last packet to dequeue */ + uint16 len; /**< number of queued packets */ + uint16 max; /**< maximum number of queued packets */ +} pktq_prec_t; + +#ifdef PKTQ_LOG +typedef struct { + uint32 requested; /**< packets requested to be stored */ + uint32 stored; /**< packets stored */ + uint32 saved; /**< packets saved, + because a lowest priority queue has given away one packet + */ + uint32 selfsaved; /**< packets saved, + because an older packet from the same queue has been dropped + */ + uint32 full_dropped; /**< packets dropped, + because pktq is full with higher precedence packets + */ + uint32 dropped; /**< packets dropped because pktq per that precedence is full */ + uint32 sacrificed; /**< packets dropped, + in order to save one from a queue of a highest priority + */ + uint32 busy; /**< packets droped because of hardware/transmission error */ + uint32 retry; /**< packets re-sent because they were not received */ + uint32 ps_retry; /**< packets retried again prior to moving power save mode */ + uint32 suppress; /**< packets which were suppressed and not transmitted */ + uint32 retry_drop; /**< packets finally dropped after retry limit */ + uint32 max_avail; /**< the high-water mark of the queue capacity for packets - + goes to zero as queue fills + */ + uint32 max_used; /**< the high-water mark of the queue utilisation for packets - + increases with use ('inverse' of max_avail) + */ + uint32 queue_capacity; /**< the maximum capacity of the queue */ + uint32 rtsfail; /**< count of rts attempts that failed to receive cts */ + uint32 acked; /**< count of packets sent (acked) successfully */ + uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */ + uint32 txrate_main; /**< running totoal of primary phy rate of all packets */ + uint32 throughput; /**< actual data transferred successfully */ + uint32 airtime; /**< cumulative total medium access delay in useconds */ + uint32 _logtime; /**< timestamp of last counter clear */ +} pktq_counters_t; + +typedef struct { + uint32 _prec_log; + pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */ +} pktq_log_t; +#endif /* PKTQ_LOG */ + + +#define PKTQ_COMMON \ + uint16 num_prec; /**< number of precedences in use */ \ + uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \ + uint16 max; /**< total max packets */ \ + uint16 len; /**< total number of packets */ + +/* multi-priority pkt queue */ +struct pktq { + PKTQ_COMMON + /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ + struct pktq_prec q[PKTQ_MAX_PREC]; + HND_PKTQ_MUTEX_DECL(mutex) +#ifdef PKTQ_LOG + pktq_log_t* pktqlog; +#endif +}; + +/* simple, non-priority pkt queue */ +struct spktq { + PKTQ_COMMON + /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ + struct pktq_prec q[1]; + HND_PKTQ_MUTEX_DECL(mutex) +}; + +#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) + +/* fn(pkt, arg). return true if pkt belongs to if */ +typedef bool (*ifpkt_cb_t)(void*, int); + +/* operations on a specific precedence in packet queue */ + +#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) +#define pktq_pmax(pq, prec) ((pq)->q[prec].max) +#define pktq_plen(pq, prec) ((pq)->q[prec].len) +#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0) +#define pktq_ppeek(pq, prec) ((pq)->q[prec].head) +#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_pavail(struct pktq *pq, int prec); +extern bool pktq_pfull(struct pktq *pq, int prec); +#else +#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) +#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) +#endif /* HND_PKTQ_THREAD_SAFE */ + +extern void pktq_append(struct pktq *pq, int prec, struct spktq *list); +extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list); + +extern void *pktq_penq(struct pktq *pq, int prec, void *p); +extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); +extern void *pktq_pdeq(struct pktq *pq, int prec); +extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p); +extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg); +extern void *pktq_pdeq_tail(struct pktq *pq, int prec); +/* Empty the queue at particular precedence level */ +extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, + ifpkt_cb_t fn, int arg); +/* Remove a specified packet from its queue */ +extern bool pktq_pdel(struct pktq *pq, void *p, int prec); + +/* operations on a set of precedences in packet queue */ + +extern int pktq_mlen(struct pktq *pq, uint prec_bmp); +extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); +extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); + +/* operations on packet queue as a whole */ + +#define pktq_len(pq) ((int)(pq)->len) +#define pktq_max(pq) ((int)(pq)->max) +#define pktq_empty(pq) ((pq)->len == 0) +#ifdef HND_PKTQ_THREAD_SAFE +extern int pktq_avail(struct pktq *pq); +extern bool pktq_full(struct pktq *pq); +#else +#define pktq_avail(pq) ((int)((pq)->max - (pq)->len)) +#define pktq_full(pq) ((pq)->len >= (pq)->max) +#endif /* HND_PKTQ_THREAD_SAFE */ + +/* operations for single precedence queues */ +#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p)) +#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p)) +#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0) +#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0) +#define pktqflush(osh, pq) pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0) +#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len) +#define pktqdeinit(pq) pktq_deinit((struct pktq *)(void *)pq) +#define pktqavail(pq) pktq_avail((struct pktq *)(void *)pq) +#define pktqfull(pq) pktq_full((struct pktq *)(void *)pq) + +extern bool pktq_init(struct pktq *pq, int num_prec, int max_len); +extern bool pktq_deinit(struct pktq *pq); + +extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len); + +/* prec_out may be NULL if caller is not interested in return value */ +extern void *pktq_deq(struct pktq *pq, int *prec_out); +extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); +extern void *pktq_peek(struct pktq *pq, int *prec_out); +extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); +extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg); + +#ifdef __cplusplus + } +#endif + +#endif /* _hnd_pktq_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hndpmu.h b/drivers/net/wireless/bcmdhd/include/hndpmu.h new file mode 100644 index 000000000000..dfc83d3d7fd1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndpmu.h @@ -0,0 +1,45 @@ +/* + * HND SiliconBackplane PMU support. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndpmu.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _hndpmu_h_ +#define _hndpmu_h_ + +#include +#include +#include + + +extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask); +extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength); + +extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear); +extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh); +extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag); + +#endif /* _hndpmu_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/hndsoc.h b/drivers/net/wireless/bcmdhd/include/hndsoc.h new file mode 100644 index 000000000000..36884a088b6f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/hndsoc.h @@ -0,0 +1,315 @@ +/* + * Broadcom HND chip & on-chip-interconnect-related definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: hndsoc.h 517544 2014-11-26 00:40:42Z $ + */ + +#ifndef _HNDSOC_H +#define _HNDSOC_H + +/* Include the soci specific files */ +#include +#include + +/* + * SOC Interconnect Address Map. + * All regions may not exist on all chips. + */ +#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ +#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI_MEM_SZ (64 * 1024 * 1024) +#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ +#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ + +#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ + +#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */ +#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ + +#ifndef SI_MAXCORES +#define SI_MAXCORES 32 /* NorthStar has more cores */ +#endif /* SI_MAXCORES */ + +#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software + * convenience and could be changed if we + * make any larger chips + */ + +#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ +#define SI_FASTRAM_SWAPPED 0x19800000 + +#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ +#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ +#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ +#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ +#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ +#define SI_FLASH_WINDOW 0x01000000 /* Flash XIP Window */ + +#define SI_NS_NANDFLASH 0x1c000000 /* NorthStar NAND flash base */ +#define SI_NS_NORFLASH 0x1e000000 /* NorthStar NOR flash base */ +#define SI_NS_ROM 0xfffd0000 /* NorthStar ROM */ +#define SI_NS_FLASH_WINDOW 0x02000000 /* Flash XIP Window */ + +#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ +#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */ +#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ +#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ +#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */ +#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */ +#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ +#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ + +#define SI_SFLASH 0x14000000 +#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ +#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), low 32 bits + */ +#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ + +#define SI_BCM53573_NANDFLASH 0x30000000 /* 53573 NAND flash base */ +#define SI_BCM53573_NORFLASH 0x1c000000 /* 53573 NOR flash base */ + +#define SI_BCM53573_NORFLASH_WINDOW 0x01000000 /* only support 16M direct access for + * 3-byte address modes in spi flash + */ +#define SI_BCM53573_BOOTDEV_MASK 0x3 +#define SI_BCM53573_BOOTDEV_NOR 0x0 + +#define SI_BCM53573_DDRTYPE_MASK 0x10 +#define SI_BCM53573_DDRTYPE_DDR3 0x10 + +/* APB bridge code */ +#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */ + +/* core codes */ +#define NODEV_CORE_ID 0x700 /* Invalid coreid */ +#define CC_CORE_ID 0x800 /* chipcommon core */ +#define ILINE20_CORE_ID 0x801 /* iline20 core */ +#define SRAM_CORE_ID 0x802 /* sram core */ +#define SDRAM_CORE_ID 0x803 /* sdram core */ +#define PCI_CORE_ID 0x804 /* pci core */ +#define MIPS_CORE_ID 0x805 /* mips core */ +#define ENET_CORE_ID 0x806 /* enet mac core */ +#define CODEC_CORE_ID 0x807 /* v90 codec core */ +#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ +#define ADSL_CORE_ID 0x809 /* ADSL core */ +#define ILINE100_CORE_ID 0x80a /* iline100 core */ +#define IPSEC_CORE_ID 0x80b /* ipsec core */ +#define UTOPIA_CORE_ID 0x80c /* utopia core */ +#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ +#define SOCRAM_CORE_ID 0x80e /* internal memory core */ +#define MEMC_CORE_ID 0x80f /* memc sdram core */ +#define OFDM_CORE_ID 0x810 /* OFDM phy core */ +#define EXTIF_CORE_ID 0x811 /* external interface core */ +#define D11_CORE_ID 0x812 /* 802.11 MAC core */ +#define APHY_CORE_ID 0x813 /* 802.11a phy core */ +#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ +#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ +#define MIPS33_CORE_ID 0x816 /* mips3302 core */ +#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ +#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ +#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ +#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ +#define SDIOH_CORE_ID 0x81b /* sdio host core */ +#define ROBO_CORE_ID 0x81c /* roboswitch core */ +#define ATA100_CORE_ID 0x81d /* parallel ATA core */ +#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ +#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ +#define PCIE_CORE_ID 0x820 /* pci express core */ +#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ +#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ +#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ +#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ +#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ +#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ +#define PMU_CORE_ID 0x827 /* PMU core */ +#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ +#define SDIOD_CORE_ID 0x829 /* SDIO device core */ +#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ +#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ +#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ +#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ +#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ +#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ +#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ +#define SC_CORE_ID 0x831 /* shared common core */ +#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ +#define SPIH_CORE_ID 0x833 /* SPI host core */ +#define I2S_CORE_ID 0x834 /* I2S core */ +#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ +#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ + +#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */ +#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */ +#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */ +#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */ +#define GCI_CORE_ID 0x840 /* GCI Core */ +#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */ +#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */ +#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */ +#define SYSMEM_CORE_ID 0x849 /* System memory core */ +#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ +#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ +#define EROM_CORE_ID 0x366 /* EROM core ID */ +#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ +#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all + * unused address ranges + */ + +#define CC_4706_CORE_ID 0x500 /* chipcommon core */ +#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ +#define NS_DMA_CORE_ID 0x502 /* DMA core */ +#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ +#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ +#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ +#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ +#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ +#define NS_ROM_CORE_ID 0x508 /* ROM core */ +#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ +#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ +#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ +#define SOCRAM_4706_CORE_ID 0x50e /* internal memory core */ +#define NS_SOCRAM_CORE_ID SOCRAM_4706_CORE_ID +#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ +#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */ +#define GMAC_COMMON_4706_CORE_ID 0x5dc /* Gigabit MAC core */ +#define GMAC_4706_CORE_ID 0x52d /* Gigabit MAC core */ +#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */ +#define ALTA_CORE_ID 0x534 /* I2S core */ +#define DDR23_PHY_CORE_ID 0x5dd + +#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2 + * (2 ZettaBytes), high 32 bits + */ +#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */ +#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */ +#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */ +#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */ + +/* There are TWO constants on all HND chips: SI_ENUM_BASE above, + * and chipcommon being the first core: + */ +#define SI_CC_IDX 0 +/* SOC Interconnect types (aka chip types) */ +#define SOCI_SB 0 +#define SOCI_AI 1 +#define SOCI_UBUS 2 +#define SOCI_NAI 3 + +/* Common core control flags */ +#define SICF_BIST_EN 0x8000 +#define SICF_PME_EN 0x4000 +#define SICF_CORE_BITS 0x3ffc +#define SICF_FGC 0x0002 +#define SICF_CLOCK_EN 0x0001 + +/* Common core status flags */ +#define SISF_BIST_DONE 0x8000 +#define SISF_BIST_ERROR 0x4000 +#define SISF_GATED_CLK 0x2000 +#define SISF_DMA64 0x1000 +#define SISF_CORE_BITS 0x0fff + +/* Norstar core status flags */ +#define SISF_NS_BOOTDEV_MASK 0x0003 /* ROM core */ +#define SISF_NS_BOOTDEV_NOR 0x0000 /* ROM core */ +#define SISF_NS_BOOTDEV_NAND 0x0001 /* ROM core */ +#define SISF_NS_BOOTDEV_ROM 0x0002 /* ROM core */ +#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */ +#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */ + +/* A register that is common to all cores to + * communicate w/PMU regarding clock control. + */ +#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ +#define SI_PWR_CTL_ST 0x1e8 /* For memory clock gating */ + +/* clk_ctl_st register */ +#define CCS_FORCEALP 0x00000001 /* force ALP request */ +#define CCS_FORCEHT 0x00000002 /* force HT request */ +#define CCS_FORCEILP 0x00000004 /* force ILP request */ +#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ +#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ +#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ +#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */ +#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */ +#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */ +#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */ +#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */ +#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ +#define CCS_ERSRC_REQ_SHIFT 8 +#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ +#define CCS_HTAVAIL 0x00020000 /* HT is available */ +#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */ +#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */ +#define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */ +#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ +#define CCS_ERSRC_STS_SHIFT 24 + +#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */ +#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */ + +/* Not really related to SOC Interconnect, but a couple of software + * conventions for the use the flash space: + */ + +/* Minumum amount of flash we support */ +#define FLASH_MIN 0x00020000 /* Minimum flash size */ + +/* A boot/binary may have an embedded block that describes its size */ +#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ +#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ +#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ +#define BISZ_TXTST_IDX 1 /* 1: text start */ +#define BISZ_TXTEND_IDX 2 /* 2: text end */ +#define BISZ_DATAST_IDX 3 /* 3: data start */ +#define BISZ_DATAEND_IDX 4 /* 4: data end */ +#define BISZ_BSSST_IDX 5 /* 5: bss start */ +#define BISZ_BSSEND_IDX 6 /* 6: bss end */ +#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */ + +/* Boot/Kernel related defintion and functions */ +#define SOC_BOOTDEV_ROM 0x00000001 +#define SOC_BOOTDEV_PFLASH 0x00000002 +#define SOC_BOOTDEV_SFLASH 0x00000004 +#define SOC_BOOTDEV_NANDFLASH 0x00000008 + +#define SOC_KNLDEV_NORFLASH 0x00000002 +#define SOC_KNLDEV_NANDFLASH 0x00000004 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) +int soc_boot_dev(void *sih); +int soc_knl_dev(void *sih); +#endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */ + +#endif /* _HNDSOC_H */ diff --git a/drivers/net/wireless/bcmdhd/include/linux_osl.h b/drivers/net/wireless/bcmdhd/include/linux_osl.h new file mode 100644 index 000000000000..539a2fa771d7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/linux_osl.h @@ -0,0 +1,1088 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl.h 601764 2015-11-24 03:47:41Z $ + */ + +#ifndef _linux_osl_h_ +#define _linux_osl_h_ + +#include +#define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x))) + +/* Linux Kernel: File Operations: start */ +extern void * osl_os_open_image(char * filename); +extern int osl_os_get_image_block(char * buf, int len, void * image); +extern void osl_os_close_image(void * image); +extern int osl_os_image_size(void *image); +/* Linux Kernel: File Operations: end */ + +#ifdef BCMDRIVER + +/* OSL initialization */ +#ifdef SHARED_OSL_CMN +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn); +#else +extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); +#endif /* SHARED_OSL_CMN */ + +extern void osl_detach(osl_t *osh); +extern int osl_static_mem_init(osl_t *osh, void *adapter); +extern int osl_static_mem_deinit(osl_t *osh, void *adapter); +extern void osl_set_bus_handle(osl_t *osh, void *bus_handle); +extern void* osl_get_bus_handle(osl_t *osh); + +/* Global ASSERT type */ +extern uint32 g_assert_type; + +/* ASSERT */ +#if defined(BCMASSERT_LOG) + #define ASSERT(exp) \ + do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0) +extern void osl_assert(const char *exp, const char *file, int line); +#else + #ifdef __GNUC__ + #define GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + #if GCC_VERSION > 30100 + #define ASSERT(exp) do {} while (0) + #else + /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */ + #define ASSERT(exp) + #endif /* GCC_VERSION > 30100 */ + #endif /* __GNUC__ */ +#endif + +/* bcm_prefetch_32B */ +static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B) +{ +} + +/* microsecond delay */ +#define OSL_DELAY(usec) osl_delay(usec) +extern void osl_delay(uint usec); + +#define OSL_SLEEP(ms) osl_sleep(ms) +extern void osl_sleep(uint ms); + +#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \ + osl_pcmcia_read_attr((osh), (offset), (buf), (size)) +#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \ + osl_pcmcia_write_attr((osh), (offset), (buf), (size)) +extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size); +extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size); + +/* PCI configuration space access macros */ +#define OSL_PCI_READ_CONFIG(osh, offset, size) \ + osl_pci_read_config((osh), (offset), (size)) +#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ + osl_pci_write_config((osh), (offset), (size), (val)) +extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); +extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); + +/* PCI device bus # and slot # */ +#define OSL_PCI_BUS(osh) osl_pci_bus(osh) +#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) +#define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh) +#define OSL_PCIE_BUS(osh) osl_pcie_bus(osh) +extern uint osl_pci_bus(osl_t *osh); +extern uint osl_pci_slot(osl_t *osh); +extern uint osl_pcie_domain(osl_t *osh); +extern uint osl_pcie_bus(osl_t *osh); +extern struct pci_dev *osl_pci_device(osl_t *osh); + +#define OSL_ACP_COHERENCE (1<<1L) + +/* Pkttag flag should be part of public information */ +typedef struct { + bool pkttag; + bool mmbus; /**< Bus supports memory-mapped register accesses */ + pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */ + void *tx_ctx; /**< Context to the callback function */ + void *unused[3]; +} osl_pubinfo_t; + +extern void osl_flag_set(osl_t *osh, uint32 mask); +extern bool osl_is_flag_set(osl_t *osh, uint32 mask); + +#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ + do { \ + ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ + ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ + } while (0) + + +/* host/bus architecture-specific byte swap */ +#define BUS_SWAP32(v) (v) + #define MALLOC(osh, size) osl_malloc((osh), (size)) + #define MALLOCZ(osh, size) osl_mallocz((osh), (size)) + #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) + #define MALLOCED(osh) osl_malloced((osh)) + #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh) + extern void *osl_malloc(osl_t *osh, uint size); + extern void *osl_mallocz(osl_t *osh, uint size); + extern void osl_mfree(osl_t *osh, void *addr, uint size); + extern uint osl_malloced(osl_t *osh); + extern uint osl_check_memleak(osl_t *osh); + + +#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) +extern uint osl_malloc_failed(osl_t *osh); + +/* allocate/free shared (dma-able) consistent memory */ +#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align() +#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +#define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \ + osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) +#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \ + osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) + +extern uint osl_dma_consistent_align(void); +extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, + uint *tot, dmaaddr_t *pap); +extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); + +/* map/unmap direction */ +#define DMA_TX 1 /* TX direction for DMA */ +#define DMA_RX 2 /* RX direction for DMA */ + +/* map/unmap shared (dma-able) memory */ +#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ + osl_dma_unmap((osh), (pa), (size), (direction)) +extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *txp_dmah); +extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction); + +/* API for DMA addressing capability */ +#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);}) + +#define OSL_SMP_WMB() smp_wmb() + +/* API for CPU relax */ +extern void osl_cpu_relax(void); +#define OSL_CPU_RELAX() osl_cpu_relax() + +#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \ + (defined(STBLINUX) && defined(__ARM_ARCH_7A__)) || (defined(CONFIG_ARCH_MSM8996) || \ + defined(CONFIG_SOC_EXYNOS8890)) + extern void osl_cache_flush(void *va, uint size); + extern void osl_cache_inv(void *va, uint size); + extern void osl_prefetch(const void *ptr); + #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *) va, len) + #define OSL_CACHE_INV(va, len) osl_cache_inv((void *) va, len) + #define OSL_PREFETCH(ptr) osl_prefetch(ptr) +#ifdef __ARM_ARCH_7A__ + extern int osl_arch_is_coherent(void); + #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent() + extern int osl_acp_war_enab(void); + #define OSL_ACP_WAR_ENAB() osl_acp_war_enab() +#else + #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL +#endif /* __ARM_ARCH_7A__ */ +#else + #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va) + #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va) + #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr) + + #define OSL_ARCH_IS_COHERENT() NULL + #define OSL_ACP_WAR_ENAB() NULL +#endif + +/* register access macros */ +#if defined(BCMSDIO) + #include + #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)), (v))) + #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \ + (uintptr)(r), sizeof(*(r)))) +#endif + +#if defined(BCMSDIO) + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \ + mmap_op else bus_op + #define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \ + mmap_op : bus_op +#else + #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) + #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;}) +#endif + +#define OSL_ERROR(bcmerror) osl_error(bcmerror) +extern int osl_error(int bcmerror); + +/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */ +#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */ + +#define OSH_NULL NULL + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + * Macros expand to calls to functions defined in linux_osl.c . + */ +#include /* use current 2.4.x calling conventions */ +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) +#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies)) +#else +#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */ +#define printf(fmt, args...) printk(fmt , ## args) +#include /* for vsn/printf's */ +#include /* for mem*, str* */ +/* bcopy's: Linux kernel doesn't provide these (anymore) */ +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* register access macros */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ + defined(CONFIG_X86) +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + case sizeof(uint64): __osl_v = \ + readq((volatile uint64*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#else +#define R_REG(osh, r) (\ + SELECT_BUS_READ(osh, \ + ({ \ + __typeof(*(r)) __osl_v; \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): __osl_v = \ + readb((volatile uint8*)(r)); break; \ + case sizeof(uint16): __osl_v = \ + readw((volatile uint16*)(r)); break; \ + case sizeof(uint32): __osl_v = \ + readl((volatile uint32*)(r)); break; \ + } \ + __osl_v; \ + }), \ + OSL_READ_REG(osh, r)) \ +) +#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \ + defined(CONFIG_X86) +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#else +#define W_REG(osh, r, v) do { \ + SELECT_BUS_WRITE(osh, \ + switch (sizeof(*(r))) { \ + case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ + case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ + case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ + }, \ + (OSL_WRITE_REG(osh, r, v))); \ + } while (0) +#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */ + +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) + +/* bcopy, bcmp, and bzero functions */ +#define bcopy(src, dst, len) memcpy((dst), (src), (len)) +#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) +#define bzero(b, len) memset((b), '\0', (len)) + +/* uncached/cached virtual address */ +#define OSL_UNCACHED(va) ((void *)va) +#define OSL_CACHED(va) ((void *)va) + +#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va) +#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va) + +/* get processor cycle count */ +#if defined(__i386__) +#define OSL_GETCYCLES(x) rdtscl((x)) +#else +#define OSL_GETCYCLES(x) ((x) = 0) +#endif + +/* dereference an address that may cause a bus exception */ +#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) + +/* map/unmap physical to virtual I/O */ +#if !defined(CONFIG_MMC_MSM7X00A) +#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) +#else +#define REG_MAP(pa, size) (void *)(0) +#endif /* !defined(CONFIG_MMC_MSM7X00A */ +#define REG_UNMAP(va) iounmap((va)) + +/* shared (dma-able) memory access macros */ +#define R_SM(r) *(r) +#define W_SM(r, v) (*(r) = (v)) +#define BZERO_SM(r, len) memset((r), '\0', (len)) + +/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for + * performance reasons), we need the Linux headers. + */ +#include /* use current 2.4.x calling conventions */ + +/* packet primitives */ +#ifdef BCMDBG_CTRACE +#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__) +#else +#ifdef BCM_OBJECT_TRACE +#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FUNCTION__) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__) +#else +#define PKTGET(osh, len, send) osl_pktget((osh), (len)) +#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh) +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#if defined(BCM_OBJECT_TRACE) +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__) +#else +#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) +#endif /* BCM_OBJECT_TRACE */ +#ifdef CONFIG_DHD_USE_STATIC_BUF +#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) +#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) +#else +#define PKTGET_STATIC PKTGET +#define PKTFREE_STATIC PKTFREE +#endif /* CONFIG_DHD_USE_STATIC_BUF */ +#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);}) +#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);}) +#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) +#define PKTEXPHEADROOM(osh, skb, b) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_realloc_headroom((struct sk_buff*)(skb), (b)); \ + }) +#define PKTTAILROOM(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_tailroom((struct sk_buff*)(skb)); \ + }) +#define PKTPADTAILROOM(osh, skb, padlen) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pad((struct sk_buff*)(skb), (padlen)); \ + }) +#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);}) +#define PKTSETNEXT(osh, skb, x) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \ + }) +#define PKTSETLEN(osh, skb, len) \ + ({ \ + BCM_REFERENCE(osh); \ + __skb_trim((struct sk_buff*)(skb), (len)); \ + }) +#define PKTPUSH(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_push((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTPULL(osh, skb, bytes) \ + ({ \ + BCM_REFERENCE(osh); \ + skb_pull((struct sk_buff*)(skb), (bytes)); \ + }) +#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) +#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh) +#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#define PKTFREELIST(skb) PKTLINK(skb) +#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x)) +#define PKTPTR(skb) (skb) +#define PKTID(skb) ({BCM_REFERENCE(skb); 0;}) +#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);}) +#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;}) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER) +#define PKTORPHAN(skb) osl_pkt_orphan_partial(skb) +extern void osl_pkt_orphan_partial(struct sk_buff *skb); +#else +#define PKTORPHAN(skb) ({BCM_REFERENCE(skb); 0;}) +#endif /* LINUX VERSION >= 3.6 */ + + +#ifdef BCMDBG_CTRACE +#define DEL_CTRACE(zosh, zskb) { \ + unsigned long zflags; \ + spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \ + list_del(&(zskb)->ctrace_list); \ + (zosh)->ctrace_num--; \ + (zskb)->ctrace_start = 0; \ + (zskb)->ctrace_count = 0; \ + spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \ +} + +#define UPDATE_CTRACE(zskb, zfile, zline) { \ + struct sk_buff *_zskb = (struct sk_buff *)(zskb); \ + if (_zskb->ctrace_count < CTRACE_NUM) { \ + _zskb->func[_zskb->ctrace_count] = zfile; \ + _zskb->line[_zskb->ctrace_count] = zline; \ + _zskb->ctrace_count++; \ + } \ + else { \ + _zskb->func[_zskb->ctrace_start] = zfile; \ + _zskb->line[_zskb->ctrace_start] = zline; \ + _zskb->ctrace_start++; \ + if (_zskb->ctrace_start >= CTRACE_NUM) \ + _zskb->ctrace_start = 0; \ + } \ +} + +#define ADD_CTRACE(zosh, zskb, zfile, zline) { \ + unsigned long zflags; \ + spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \ + list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \ + (zosh)->ctrace_num++; \ + UPDATE_CTRACE(zskb, zfile, zline); \ + spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \ +} + +#define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__) +#endif /* BCMDBG_CTRACE */ + +#ifdef CTFPOOL +#define CTFPOOL_REFILL_THRESH 3 +typedef struct ctfpool { + void *head; + spinlock_t lock; + uint max_obj; + uint curr_obj; + uint obj_size; + uint refills; + uint fast_allocs; + uint fast_frees; + uint slow_allocs; +} ctfpool_t; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#define FASTBUF (1 << 0) +#define PKTSETFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \ + }) +#define PKTCLRFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \ + }) +#define PKTISFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \ + }) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->pktc_flags) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define FASTBUF (1 << 16) +#define PKTSETFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \ + }) +#define PKTCLRFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \ + }) +#define PKTISFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \ + }) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len) +#else +#define FASTBUF (1 << 0) +#define PKTSETFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \ + }) +#define PKTCLRFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \ + }) +#define PKTISFAST(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \ + }) +#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused) +#endif /* 2.6.22 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool) +#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head) +#else +#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk) +#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head) +#endif + +extern void *osl_ctfpool_add(osl_t *osh); +extern void osl_ctfpool_replenish(osl_t *osh, uint thresh); +extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size); +extern void osl_ctfpool_cleanup(osl_t *osh); +extern void osl_ctfpool_stats(osl_t *osh, void *b); +#else /* CTFPOOL */ +#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) +#endif /* CTFPOOL */ + +#define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#ifdef HNDCTF + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#define SKIPCT (1 << 2) +#define CHAINED (1 << 3) +#define PKTSETSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \ + }) +#define PKTCLRSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \ + }) +#define PKTSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \ + }) +#define PKTSETCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \ + }) +#define PKTCLRCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \ + }) +#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->pktc_flags & CHAINED) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define SKIPCT (1 << 18) +#define CHAINED (1 << 19) +#define PKTSETSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \ + }) +#define PKTCLRSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \ + }) +#define PKTSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len & SKIPCT); \ + }) +#define PKTSETCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len |= CHAINED); \ + }) +#define PKTCLRCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \ + }) +#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->mac_len & CHAINED) +#else /* 2.6.22 */ +#define SKIPCT (1 << 2) +#define CHAINED (1 << 3) +#define PKTSETSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused |= SKIPCT); \ + }) +#define PKTCLRSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \ + }) +#define PKTSKIPCT(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused & SKIPCT); \ + }) +#define PKTSETCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused |= CHAINED); \ + }) +#define PKTCLRCHAINED(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \ + }) +#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->__unused & CHAINED) +#endif /* 2.6.22 */ +typedef struct ctf_mark { + uint32 value; +} ctf_mark_t; +#define CTF_MARK(m) (m.value) +#else /* HNDCTF */ +#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define CTF_MARK(m) ({BCM_REFERENCE(m); 0;}) +#endif /* HNDCTF */ + +#if defined(BCM_GMAC3) + +/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */ + +/* Account for packets delivered to downstream forwarder by GMAC interface. */ +extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt); +#define PKTTOFWDER(osh, skbs, skb_cnt) \ + osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt)) + +/* Account for packets received from downstream forwarder. */ +#if defined(BCMDBG_CTRACE) /* pkt logging */ +extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, + int line, char *file); +#define PKTFRMFWDER(osh, skbs, skb_cnt) \ + osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \ + __LINE__, __FILE__) +#else /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */ +extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt); +#define PKTFRMFWDER(osh, skbs, skb_cnt) \ + osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt)) +#endif + + +/** GMAC Forwarded packet tagging for reduced cache flush/invalidate. + * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have + * been accessed in the GMAC forwarder. This may be used to limit the number of + * cachelines that need to be flushed or invalidated. + * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF. + * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed. + * Likewise, a debug print of a packet payload in say the ethernet driver needs + * to be accompanied with a clear of the FWDERBUF tag. + */ + +/** Forwarded packets, have a HWRXOFF sized rx header (etc.h) */ +#define FWDER_HWRXOFF (30) + +/** Maximum amount of a pktadat that a downstream forwarder (GMAC) may have + * read into the L1 cache (not dirty). This may be used in reduced cache ops. + * + * Max 56: ET HWRXOFF[30] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4] + */ +#define FWDER_PKTMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + +#define FWDERBUF (1 << 4) +#define PKTSETFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \ + }) +#define PKTCLRFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \ + }) +#define PKTISFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \ + }) + +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) + +#define FWDERBUF (1 << 20) +#define PKTSETFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \ + }) +#define PKTCLRFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \ + }) +#define PKTISFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \ + }) + +#else /* 2.6.22 */ + +#define FWDERBUF (1 << 4) +#define PKTSETFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \ + }) +#define PKTCLRFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \ + }) +#define PKTISFWDERBUF(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->__unused & FWDERBUF); \ + }) + +#endif /* 2.6.22 */ + +#else /* ! BCM_GMAC3 */ + +#define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); }) +#define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); }) +#define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;}) + +#endif /* ! BCM_GMAC3 */ + + +#ifdef HNDCTF +/* For broadstream iqos */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) +#define TOBR (1 << 5) +#define PKTSETTOBR(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \ + }) +#define PKTCLRTOBR(osh, skb) \ + ({ \ + BCM_REFERENCE(osh); \ + (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \ + }) +#define PKTISTOBR(skb) (((struct sk_buff*)(skb))->pktc_flags & TOBR) +#define PKTSETCTFIPCTXIF(skb, ifp) (((struct sk_buff*)(skb))->ctf_ipc_txif = ifp) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);}) +#else /* 2.6.22 */ +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);}) +#endif /* 2.6.22 */ +#else /* HNDCTF */ +#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);}) +#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;}) +#endif /* HNDCTF */ + + +#ifdef BCMFA +#ifdef BCMFA_HW_HASH +#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx) +#else +#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);}) +#endif /* BCMFA_SW_HASH */ +#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx) +#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp) +#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev) + +#define AUX_TCP_FIN_RST (1 << 0) +#define AUX_FREED (1 << 1) +#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST) +#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST)) +#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST) +#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED) +#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED)) +#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED) +#define PKTISFABRIDGED(skb) PKTISFAAUX(skb) +#else +#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;}) +#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;}) + +#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb) +#define PKTSETFAFREED(skb) BCM_REFERENCE(skb) +#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb) +#endif /* BCMFA */ + +#if defined(BCM_OBJECT_TRACE) +extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller); +#else +extern void osl_pktfree(osl_t *osh, void *skb, bool send); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pktget_static(osl_t *osh, uint len); +extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); +extern void osl_pktclone(osl_t *osh, void **pkt); + +#ifdef BCMDBG_CTRACE +#define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b)) +extern void *osl_pktget(osl_t *osh, uint len, int line, char *file); +extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file); +extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file); +struct bcmstrbuf; +extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b); +#else +#ifdef BCM_OBJECT_TRACE +extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller); +extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller); +#else +extern void *osl_pktget(osl_t *osh, uint len); +extern void *osl_pktdup(osl_t *osh, void *skb); +#endif /* BCM_OBJECT_TRACE */ +extern void *osl_pkt_frmnative(osl_t *osh, void *skb); +#endif /* BCMDBG_CTRACE */ +extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); +#ifdef BCMDBG_CTRACE +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \ + (struct sk_buff*)(skb), __LINE__, __FILE__) +#define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb)) +#else +#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb)) +#endif /* BCMDBG_CTRACE */ +#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt)) + +#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) +#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) +#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) +#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) +#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) +#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ + ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) +/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */ +#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) + +#ifdef CONFIG_NF_CONNTRACK_MARK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define PKTMARK(p) (((struct sk_buff *)(p))->mark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m) +#else /* !2.6.0 */ +#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark) +#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m) +#endif /* 2.6.0 */ +#else /* CONFIG_NF_CONNTRACK_MARK */ +#define PKTMARK(p) 0 +#define PKTSETMARK(p, m) +#endif /* CONFIG_NF_CONNTRACK_MARK */ + +#define PKTALLOCED(osh) osl_pktalloced(osh) +extern uint osl_pktalloced(osl_t *osh); + +#define OSL_RAND() osl_rand() +extern uint32 osl_rand(void); + +#define DMA_MAP(osh, va, size, direction, p, dmah) \ + osl_dma_map((osh), (va), (size), (direction), (p), (dmah)) + +#ifdef PKTC +/* Use 8 bytes of skb tstamp field to store below info */ +struct chain_node { + struct sk_buff *link; + unsigned int flags:3, pkts:9, bytes:20; +}; + +#define CHAIN_NODE(skb) ((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb)) + +#define PKTCSETATTR(s, f, p, b) ({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \ + CHAIN_NODE(s)->bytes = (b);}) +#define PKTCCLRATTR(s) ({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \ + CHAIN_NODE(s)->bytes = 0;}) +#define PKTCGETATTR(s) (CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \ + CHAIN_NODE(s)->bytes) +#define PKTCCNT(skb) (CHAIN_NODE(skb)->pkts) +#define PKTCLEN(skb) (CHAIN_NODE(skb)->bytes) +#define PKTCGETFLAGS(skb) (CHAIN_NODE(skb)->flags) +#define PKTCSETFLAGS(skb, f) (CHAIN_NODE(skb)->flags = (f)) +#define PKTCCLRFLAGS(skb) (CHAIN_NODE(skb)->flags = 0) +#define PKTCFLAGS(skb) (CHAIN_NODE(skb)->flags) +#define PKTCSETCNT(skb, c) (CHAIN_NODE(skb)->pkts = (c)) +#define PKTCINCRCNT(skb) (CHAIN_NODE(skb)->pkts++) +#define PKTCADDCNT(skb, c) (CHAIN_NODE(skb)->pkts += (c)) +#define PKTCSETLEN(skb, l) (CHAIN_NODE(skb)->bytes = (l)) +#define PKTCADDLEN(skb, l) (CHAIN_NODE(skb)->bytes += (l)) +#define PKTCSETFLAG(skb, fb) (CHAIN_NODE(skb)->flags |= (fb)) +#define PKTCCLRFLAG(skb, fb) (CHAIN_NODE(skb)->flags &= ~(fb)) +#define PKTCLINK(skb) (CHAIN_NODE(skb)->link) +#define PKTSETCLINK(skb, x) (CHAIN_NODE(skb)->link = (struct sk_buff*)(x)) +#define FOREACH_CHAINED_PKT(skb, nskb) \ + for (; (skb) != NULL; (skb) = (nskb)) \ + if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \ + PKTSETCLINK((skb), NULL), 1) +#define PKTCFREE(osh, skb, send) \ +do { \ + void *nskb; \ + ASSERT((skb) != NULL); \ + FOREACH_CHAINED_PKT((skb), nskb) { \ + PKTCLRCHAINED((osh), (skb)); \ + PKTCCLRFLAGS((skb)); \ + PKTFREE((osh), (skb), (send)); \ + } \ +} while (0) +#define PKTCENQTAIL(h, t, p) \ +do { \ + if ((t) == NULL) { \ + (h) = (t) = (p); \ + } else { \ + PKTSETCLINK((t), (p)); \ + (t) = (p); \ + } \ +} while (0) +#endif /* PKTC */ + +#else /* ! BCMDRIVER */ + + +/* ASSERT */ + #define ASSERT(exp) do {} while (0) + +/* MALLOC and MFREE */ +#define MALLOC(o, l) malloc(l) +#define MFREE(o, p, l) free(p) +#include + +/* str* and mem* functions */ +#include + +/* *printf functions */ +#include + +/* bcopy, bcmp, and bzero */ +extern void bcopy(const void *src, void *dst, size_t len); +extern int bcmp(const void *b1, const void *b2, size_t len); +extern void bzero(void *b, size_t len); +#endif /* ! BCMDRIVER */ + +typedef struct sec_cma_info { + struct sec_mem_elem *sec_alloc_list; + struct sec_mem_elem *sec_alloc_list_tail; +} sec_cma_info_t; + +/* Current STB 7445D1 doesn't use ACP and it is non-coherrent. + * Adding these dummy values for build apss only + * When we revisit need to change these. + */ +#if defined(STBLINUX) + +#if defined(__ARM_ARCH_7A__) +#define ACP_WAR_ENAB() 0 +#define ACP_WIN_LIMIT 1 +#define arch_is_coherent() 0 +#endif /* __ARM_ARCH_7A__ */ + +#endif /* STBLINUX */ + +#ifdef BCM_SECURE_DMA + +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \ + osl_sec_dma_map((osh), (va), (size), (direction), (p), (dmah), (pcma), (offset)) +#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) \ + osl_sec_dma_dd_map((osh), (va), (size), (direction), (p), (dmah)) +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \ + osl_sec_dma_map_txmeta((osh), (va), (size), (direction), (p), (dmah), (pcma)) +#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \ + osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset)) +#define SECURE_DMA_UNMAP_ALL(osh, pcma) \ + osl_sec_dma_unmap_all((osh), (pcma)) +#if defined(__ARM_ARCH_7A__) +#define CMA_BUFSIZE_4K 4096 +#define CMA_BUFSIZE_2K 2048 +#define CMA_BUFSIZE_512 512 + +#define CMA_BUFNUM 2048 +#define SEC_CMA_COHERENT_BLK 0x8000 /* 32768 */ +#define SEC_CMA_COHERENT_MAX 32 +#define CMA_DMA_DESC_MEMBLOCK (SEC_CMA_COHERENT_BLK * SEC_CMA_COHERENT_MAX) +#define CMA_DMA_DATA_MEMBLOCK (CMA_BUFSIZE_4K*CMA_BUFNUM) +#define CMA_MEMBLOCK (CMA_DMA_DESC_MEMBLOCK + CMA_DMA_DATA_MEMBLOCK) +#define CONT_ARMREGION 0x02 /* Region CMA */ +#else +#define CONT_MIPREGION 0x00 /* To access the MIPs mem, Not yet... */ +#endif /* !defined __ARM_ARCH_7A__ */ + +#define SEC_DMA_ALIGN (1<<16) +typedef struct sec_mem_elem { + size_t size; + int direction; + phys_addr_t pa_cma; /**< physical address */ + void *va; /**< virtual address of driver pkt */ + dma_addr_t dma_handle; /**< bus address assign by linux */ + void *vac; /**< virtual address of cma buffer */ + struct sec_mem_elem *next; +} sec_mem_elem_t; + +extern dma_addr_t osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset); +extern dma_addr_t osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah); +extern dma_addr_t osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, + int direction, void *p, hnddma_seg_map_t *dmah, void *ptr_cma_info); +extern void osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, + void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset); +extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info); + +#endif /* BCM_SECURE_DMA */ + +typedef struct sk_buff_head PKT_LIST; +#define PKTLIST_INIT(x) skb_queue_head_init((x)) +#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y)) +#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x)) +#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x)) +#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x)) + +#endif /* _linux_osl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/linuxver.h b/drivers/net/wireless/bcmdhd/include/linuxver.h new file mode 100644 index 000000000000..4fe3030b7625 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/linuxver.h @@ -0,0 +1,779 @@ +/* + * Linux-specific abstractions to gain some independence from linux kernel versions. + * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linuxver.h 604758 2015-12-08 12:01:08Z $ + */ + +#ifndef _linuxver_h_ +#define _linuxver_h_ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-parameter" +#endif + +#include +#include +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#include +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)) +#include +#else +#include +#endif +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)) +#include +#endif +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) +/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */ +#ifdef __UNDEF_NO_VERSION__ +#undef __NO_VERSION__ +#else +#define __NO_VERSION__ +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") +#define module_param_string(_name_, _string_, _size_, _perm_) \ + MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) +#endif + +/* linux/malloc.h is deprecated, use linux/slab.h instead. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#else +#include +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) +#undef IP_TOS +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */ +#include + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) +#include +#else +#include +#ifndef work_struct +#define work_struct tq_struct +#endif +#ifndef INIT_WORK +#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) +#endif +#ifndef schedule_work +#define schedule_work(_work) schedule_task((_work)) +#endif +#ifndef flush_scheduled_work +#define flush_scheduled_work() flush_scheduled_tasks() +#endif +#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define DAEMONIZE(a) do { \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); \ + } while (0) +#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func) +#else +#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work) +#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \ + (RHEL_MAJOR == 5)) +/* Exclude RHEL 5 */ +typedef void (*work_func_t)(void *work); +#endif +#endif /* >= 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* Some distributions have their own 2.6.x compatibility layers */ +#ifndef IRQ_NONE +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif +#else +typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) +#define IRQF_SHARED SA_SHIRQ +#endif /* < 2.6.18 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) +#ifdef CONFIG_NET_RADIO +#define CONFIG_WIRELESS_EXT +#endif +#endif /* < 2.6.17 */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) +#define MOD_INC_USE_COUNT +#define MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) +#include +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) +#include +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) +#include +#endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */ + + +#ifndef __exit +#define __exit +#endif +#ifndef __devexit +#define __devexit +#endif +#ifndef __devinit +# if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) +# define __devinit __init +# else +/* All devices are hotpluggable since linux 3.8.0 */ +# define __devinit +# endif +#endif /* !__devinit */ +#ifndef __devinitdata +#define __devinitdata +#endif +#ifndef __devexit_p +#define __devexit_p(x) x +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) + +#define pci_get_drvdata(dev) (dev)->sysdata +#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) + +/* + * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration + */ + +struct pci_device_id { + unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */ + unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ + unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */ + unsigned long driver_data; /* Data private to the driver */ +}; + +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; /* NULL if wants all devices */ + int (*probe)(struct pci_dev *dev, + const struct pci_device_id *id); /* New device inserted */ + void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug + * capable driver) + */ + void (*suspend)(struct pci_dev *dev); /* Device suspended */ + void (*resume)(struct pci_dev *dev); /* Device woken up */ +}; + +#define MODULE_DEVICE_TABLE(type, name) +#define PCI_ANY_ID (~0) + +/* compatpci.c */ +#define pci_module_init pci_register_driver +extern int pci_register_driver(struct pci_driver *drv); +extern void pci_unregister_driver(struct pci_driver *drv); + +#endif /* PCI registration */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) +#define pci_module_init pci_register_driver +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) +#ifdef MODULE +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#else +#define module_init(x) __initcall(x); +#define module_exit(x) __exitcall(x); +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) +#define WL_USE_NETDEV_OPS +#else +#undef WL_USE_NETDEV_OPS +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL) +#define WL_CONFIG_RFKILL +#else +#undef WL_CONFIG_RFKILL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) +#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) +#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) +#define pci_enable_device(dev) do { } while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) +#define net_device device +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) + +/* + * DMA mapping + * + * See linux/Documentation/DMA-mapping.txt + */ + +#ifndef PCI_DMA_TODEVICE +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#endif + +typedef u32 dma_addr_t; + +/* Pure 2^n version of get_order */ +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC | GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + return ret; +} +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} +#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) +#define pci_unmap_single(cookie, address, size, dir) + +#endif /* DMA mapping */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) + +#define dev_kfree_skb_any(a) dev_kfree_skb(a) +#define netif_down(dev) do { (dev)->start = 0; } while (0) + +/* pcmcia-cs provides its own netdevice compatibility layer */ +#ifndef _COMPAT_NETDEVICE_H + +/* + * SoftNet + * + * For pre-softnet kernels we need to tell the upper layer not to + * re-enter start_xmit() while we are in there. However softnet + * guarantees not to enter while we are in there so there is no need + * to do the netif_stop_queue() dance unless the transmit queue really + * gets stuck. This should also improve performance according to tests + * done by Aman Singla. + */ + +#define dev_kfree_skb_irq(a) dev_kfree_skb(a) +#define netif_wake_queue(dev) \ + do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) +#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) + +static inline void netif_start_queue(struct net_device *dev) +{ + dev->tbusy = 0; + dev->interrupt = 0; + dev->start = 1; +} + +#define netif_queue_stopped(dev) (dev)->tbusy +#define netif_running(dev) (dev)->start + +#endif /* _COMPAT_NETDEVICE_H */ + +#define netif_device_attach(dev) netif_start_queue(dev) +#define netif_device_detach(dev) netif_stop_queue(dev) + +/* 2.4.x renamed bottom halves to tasklets */ +#define tasklet_struct tq_struct +static inline void tasklet_schedule(struct tasklet_struct *tasklet) +{ + queue_task(tasklet, &tq_immediate); + mark_bh(IMMEDIATE_BH); +} + +static inline void tasklet_init(struct tasklet_struct *tasklet, + void (*func)(unsigned long), + unsigned long data) +{ + tasklet->next = NULL; + tasklet->sync = 0; + tasklet->routine = (void (*)(void *))func; + tasklet->data = (void *)data; +} +#define tasklet_kill(tasklet) { do {} while (0); } + +/* 2.4.x introduced del_timer_sync() */ +#define del_timer_sync(timer) del_timer(timer) + +#else + +#define netif_down(dev) + +#endif /* SoftNet */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) + +/* + * Emit code to initialise a tq_struct's routine and data pointers + */ +#define PREPARE_TQUEUE(_tq, _routine, _data) \ + do { \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) + +/* + * Emit code to initialise all of a tq_struct + */ +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + PREPARE_TQUEUE((_tq), (_routine), (_data)); \ + } while (0) + +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */ + +/* Power management related macro & routines */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9) +#define PCI_SAVE_STATE(a, b) pci_save_state(a) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a) +#else +#define PCI_SAVE_STATE(a, b) pci_save_state(a, b) +#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) +static inline int +pci_save_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + if (buffer) { + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &buffer[i]); + } + return 0; +} + +static inline int +pci_restore_state(struct pci_dev *dev, u32 *buffer) +{ + int i; + + if (buffer) { + for (i = 0; i < 16; i++) + pci_write_config_dword(dev, i * 4, buffer[i]); + } + /* + * otherwise, write the context information we know from bootup. + * This works around a problem where warm-booting from Windows + * combined with a D3(hot)->D0 transition causes PCI config + * header data to be forgotten. + */ + else { + for (i = 0; i < 6; i ++) + pci_write_config_dword(dev, + PCI_BASE_ADDRESS_0 + (i * 4), + pci_resource_start(dev, i)); + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + } + return 0; +} +#endif /* PCI power management */ + +/* Old cp0 access macros deprecated in 2.4.19 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) +#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) +#endif + +/* Module refcount handled internally in 2.6.x */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#else +#define OLD_MOD_INC_USE_COUNT do {} while (0) +#define OLD_MOD_DEC_USE_COUNT do {} while (0) +#endif +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) do {} while (0) +#endif +#ifndef MOD_INC_USE_COUNT +#define MOD_INC_USE_COUNT do {} while (0) +#endif +#ifndef MOD_DEC_USE_COUNT +#define MOD_DEC_USE_COUNT do {} while (0) +#endif +#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT +#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) do {} while (0) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)) +#ifndef HAVE_FREE_NETDEV +#define free_netdev(dev) kfree(dev) +#endif +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +/* struct packet_type redefined in 2.6.x */ +#define af_packet_priv data +#endif + +/* suspend args */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) +#define DRV_SUSPEND_STATE_TYPE pm_message_t +#else +#define DRV_SUSPEND_STATE_TYPE uint32 +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) +#define CHECKSUM_HW CHECKSUM_PARTIAL +#endif + +typedef struct { + void *parent; /* some external entity that the thread supposed to work for */ + char *proc_name; + struct task_struct *p_task; + long thr_pid; + int prio; /* priority */ + struct semaphore sema; + int terminated; + struct completion completed; + spinlock_t spinlock; + int up_cnt; +} tsk_ctl_t; + + +/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */ +/* note this macro assumes there may be only one context waiting on thread's completion */ +#ifdef DHD_DEBUG +#define DBG_THR(x) printk x +#else +#define DBG_THR(x) +#endif + +static inline bool binary_sema_down(tsk_ctl_t *tsk) +{ + if (down_interruptible(&tsk->sema) == 0) { + unsigned long flags = 0; + spin_lock_irqsave(&tsk->spinlock, flags); + if (tsk->up_cnt == 1) + tsk->up_cnt--; + else { + DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt)); + } + spin_unlock_irqrestore(&tsk->spinlock, flags); + return false; + } else + return true; +} + +static inline bool binary_sema_up(tsk_ctl_t *tsk) +{ + bool sem_up = false; + unsigned long flags = 0; + + spin_lock_irqsave(&tsk->spinlock, flags); + if (tsk->up_cnt == 0) { + tsk->up_cnt++; + sem_up = true; + } else if (tsk->up_cnt == 1) { + /* dhd_sched_dpc: dpc is alread up! */ + } else + DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt)); + + spin_unlock_irqrestore(&tsk->spinlock, flags); + + if (sem_up) + up(&tsk->sema); + + return sem_up; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x) +#else +#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x) +#endif + +#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \ +{ \ + sema_init(&((tsk_ctl)->sema), 0); \ + init_completion(&((tsk_ctl)->completed)); \ + (tsk_ctl)->parent = owner; \ + (tsk_ctl)->proc_name = name; \ + (tsk_ctl)->terminated = FALSE; \ + (tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \ + if (IS_ERR((tsk_ctl)->p_task)) { \ + (tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \ + DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + } else { \ + (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \ + spin_lock_init(&((tsk_ctl)->spinlock)); \ + DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + } \ +} + +#define PROC_STOP(tsk_ctl) \ +{ \ + (tsk_ctl)->terminated = TRUE; \ + smp_wmb(); \ + up(&((tsk_ctl)->sema)); \ + wait_for_completion(&((tsk_ctl)->completed)); \ + DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \ + (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \ + (tsk_ctl)->thr_pid = -1; \ +} + +/* ----------------------- */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) +#define KILL_PROC(nr, sig) \ +{ \ +struct task_struct *tsk; \ +struct pid *pid; \ +pid = find_get_pid((pid_t)nr); \ +tsk = pid_task(pid, PIDTYPE_PID); \ +if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(2, 6, 30)) +#define KILL_PROC(pid, sig) \ +{ \ + struct task_struct *tsk; \ + tsk = find_task_by_vpid(pid); \ + if (tsk) send_sig(sig, tsk, 1); \ +} +#else +#define KILL_PROC(pid, sig) \ +{ \ + kill_proc(pid, sig, 1); \ +} +#endif +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) +#include +#include +#else +#include + +#define __wait_event_interruptible_timeout(wq, condition, ret) \ +do { \ + wait_queue_t __wait; \ + init_waitqueue_entry(&__wait, current); \ + \ + add_wait_queue(&wq, &__wait); \ + for (;;) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + ret = schedule_timeout(ret); \ + if (!ret) \ + break; \ + continue; \ + } \ + ret = -ERESTARTSYS; \ + break; \ + } \ + current->state = TASK_RUNNING; \ + remove_wait_queue(&wq, &__wait); \ +} while (0) + +#define wait_event_interruptible_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + if (!(condition)) \ + __wait_event_interruptible_timeout(wq, condition, __ret); \ + __ret; \ +}) + +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ + +/* +For < 2.6.24, wl creates its own netdev but doesn't +align the priv area like the genuine alloc_netdev(). +Since netdev_priv() always gives us the aligned address, it will +not match our unaligned address for < 2.6.24 +*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#define DEV_PRIV(dev) (dev->priv) +#else +#define DEV_PRIV(dev) netdev_priv(dev) +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) +#define WL_ISR(i, d, p) wl_isr((i), (d)) +#else +#define WL_ISR(i, d, p) wl_isr((i), (d), (p)) +#endif /* < 2.6.20 */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) +#define netdev_priv(dev) dev->priv +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) +#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled())) +#else +#define CAN_SLEEP() (FALSE) +#endif + +#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define RANDOM32 prandom_u32 +#else +#define RANDOM32 random32 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define SRANDOM32(entropy) prandom_seed(entropy) +#else +#define SRANDOM32(entropy) srandom32(entropy) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */ + +/* + * Overide latest kfifo functions with + * older version to work on older kernels + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c) +#define kfifo_esize(a) 1 +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS) +#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d) +#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d) +#define kfifo_esize(a) 1 +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic pop +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static inline struct inode *file_inode(const struct file *f) +{ + return f->f_dentry->d_inode; +} +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */ + +#endif /* _linuxver_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/miniopt.h b/drivers/net/wireless/bcmdhd/include/miniopt.h new file mode 100644 index 000000000000..2eb6d18ea7ca --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/miniopt.h @@ -0,0 +1,83 @@ +/* + * Command line options parser. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: miniopt.h 514727 2014-11-12 03:02:48Z $ + */ + + +#ifndef MINI_OPT_H +#define MINI_OPT_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Include Files ---------------------------------------------------- */ + + +/* ---- Constants and Types ---------------------------------------------- */ + +#define MINIOPT_MAXKEY 128 /* Max options */ +typedef struct miniopt { + + /* These are persistent after miniopt_init() */ + const char* name; /* name for prompt in error strings */ + const char* flags; /* option chars that take no args */ + bool longflags; /* long options may be flags */ + bool opt_end; /* at end of options (passed a "--") */ + + /* These are per-call to miniopt() */ + + int consumed; /* number of argv entries cosumed in + * the most recent call to miniopt() + */ + bool positional; + bool good_int; /* 'val' member is the result of a sucessful + * strtol conversion of the option value + */ + char opt; + char key[MINIOPT_MAXKEY]; + char* valstr; /* positional param, or value for the option, + * or null if the option had + * no accompanying value + */ + uint uval; /* strtol translation of valstr */ + int val; /* strtol translation of valstr */ +} miniopt_t; + +void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags); +int miniopt(miniopt_t *t, char **argv); + + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +#ifdef __cplusplus + } +#endif + +#endif /* MINI_OPT_H */ diff --git a/drivers/net/wireless/bcmdhd/include/msgtrace.h b/drivers/net/wireless/bcmdhd/include/msgtrace.h new file mode 100644 index 000000000000..0d67000c9df3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/msgtrace.h @@ -0,0 +1,81 @@ +/* + * Trace messages sent over HBUS + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: msgtrace.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _MSGTRACE_H +#define _MSGTRACE_H + +#ifndef _TYPEDEFS_H_ +#include +#endif + + +/* This marks the start of a packed structure section. */ +#include +/* for osl_t */ +#include +#define MSGTRACE_VERSION 1 + +/* Message trace header */ +typedef BWL_PRE_PACKED_STRUCT struct msgtrace_hdr { + uint8 version; + uint8 trace_type; +#define MSGTRACE_HDR_TYPE_MSG 0 +#define MSGTRACE_HDR_TYPE_LOG 1 + uint16 len; /* Len of the trace */ + uint32 seqnum; /* Sequence number of message. Useful if the messsage has been lost + * because of DMA error or a bus reset (ex: SDIO Func2) + */ + /* Msgtrace type only */ + uint32 discarded_bytes; /* Number of discarded bytes because of trace overflow */ + uint32 discarded_printf; /* Number of discarded printf because of trace overflow */ +} BWL_POST_PACKED_STRUCT msgtrace_hdr_t; + +#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t) + +/* The hbus driver generates traces when sending a trace message. This causes endless traces. + * This flag must be set to TRUE in any hbus traces. The flag is reset in the function msgtrace_put. + * This prevents endless traces but generates hasardous lost of traces only in bus device code. + * It is recommendat to set this flag in macro SD_TRACE but not in SD_ERROR for avoiding missing + * hbus error traces. hbus error trace should not generates endless traces. + */ +extern bool msgtrace_hbus_trace; + +typedef void (*msgtrace_func_send_t)(void *hdl1, void *hdl2, uint8 *hdr, + uint16 hdrlen, uint8 *buf, uint16 buflen); +extern void msgtrace_start(void); +extern void msgtrace_stop(void); +extern int msgtrace_sent(void); +extern void msgtrace_put(char *buf, int count); +extern void msgtrace_init(void *hdl1, void *hdl2, msgtrace_func_send_t func_send); +extern bool msgtrace_event_enabled(void); + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _MSGTRACE_H */ diff --git a/drivers/net/wireless/bcmdhd/include/osl.h b/drivers/net/wireless/bcmdhd/include/osl.h new file mode 100644 index 000000000000..8a00f9d55a83 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl.h @@ -0,0 +1,178 @@ +/* + * OS Abstraction Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl.h 526460 2015-01-14 08:25:24Z $ + */ + +#ifndef _osl_h_ +#define _osl_h_ + +#include + +#define OSL_PKTTAG_SZ 32 /* Size of PktTag */ + +/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */ +typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); + +/* Drivers use REGOPSSET() to register register read/write funcitons */ +typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size); +typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size); + + + +#if defined(WL_UNITTEST) +#include +#else +#include +#endif + +#ifndef PKTDBG_TRACE +#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh) +#endif + +#define PKTCTFMAP(osh, p) BCM_REFERENCE(osh) + +/* -------------------------------------------------------------------------- +** Register manipulation macros. +*/ + +#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) + +#ifndef AND_REG +#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) +#endif /* !AND_REG */ + +#ifndef OR_REG +#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) +#endif /* !OR_REG */ + +#if !defined(OSL_SYSUPTIME) +#define OSL_SYSUPTIME() (0) +#define OSL_SYSUPTIME_SUPPORT FALSE +#else +#define OSL_SYSUPTIME_SUPPORT TRUE +#endif /* OSL_SYSUPTIME */ + +#ifndef OSL_SYS_HALT +#define OSL_SYS_HALT() do {} while (0) +#endif + +#ifndef OSL_MEM_AVAIL +#define OSL_MEM_AVAIL() (0xffffffff) +#endif + +#if !defined(PKTC) && !defined(PKTC_DONGLE) +#define PKTCGETATTR(skb) (0) +#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb) +#define PKTCCLRATTR(skb) BCM_REFERENCE(skb) +#define PKTCCNT(skb) (1) +#define PKTCLEN(skb) PKTLEN(NULL, skb) +#define PKTCGETFLAGS(skb) (0) +#define PKTCSETFLAGS(skb, f) BCM_REFERENCE(skb) +#define PKTCCLRFLAGS(skb) BCM_REFERENCE(skb) +#define PKTCFLAGS(skb) (0) +#define PKTCSETCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCINCRCNT(skb) BCM_REFERENCE(skb) +#define PKTCADDCNT(skb, c) BCM_REFERENCE(skb) +#define PKTCSETLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCADDLEN(skb, l) BCM_REFERENCE(skb) +#define PKTCSETFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCCLRFLAG(skb, fb) BCM_REFERENCE(skb) +#define PKTCLINK(skb) NULL +#define PKTSETCLINK(skb, x) BCM_REFERENCE(skb) +#define FOREACH_CHAINED_PKT(skb, nskb) \ + for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb)) +#define PKTCFREE PKTFREE +#define PKTCENQTAIL(h, t, p) \ +do { \ + if ((t) == NULL) { \ + (h) = (t) = (p); \ + } \ +} while (0) +#endif /* !linux || !PKTC */ + +#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE) +#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh) +#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh) +#define PKTISCHAINED(skb) FALSE +#endif + +/* Lbuf with fraglist */ +#define PKTFRAGPKTID(osh, lb) (0) +#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh) +#define PKTFRAGTOTNUM(osh, lb) (0) +#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh) +#define PKTFRAGTOTLEN(osh, lb) (0) +#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh) +#define PKTIFINDEX(osh, lb) (0) +#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh) +#define PKTGETLF(osh, len, send, lbuf_type) (0) + +/* in rx path, reuse totlen as used len */ +#define PKTFRAGUSEDLEN(osh, lb) (0) +#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh) + +#define PKTFRAGLEN(osh, lb, ix) (0) +#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh) +#define PKTFRAGDATA_LO(osh, lb, ix) (0) +#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh) +#define PKTFRAGDATA_HI(osh, lb, ix) (0) +#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh) + +/* RX FRAG */ +#define PKTISRXFRAG(osh, lb) (0) +#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh) +#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh) + +/* TX FRAG */ +#define PKTISTXFRAG(osh, lb) (0) +#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh) + +/* Need Rx completion used for AMPDU reordering */ +#define PKTNEEDRXCPL(osh, lb) (TRUE) +#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh) +#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh) + +#define PKTISFRAG(osh, lb) (0) +#define PKTFRAGISCHAINED(osh, i) (0) +/* TRIM Tail bytes from lfrag */ +#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len) + +#ifdef BCM_SECURE_DMA +#define SECURE_DMA_ENAB(osh) (1) +#else + +#define SECURE_DMA_ENAB(osh) (0) +#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) {(0)}) +#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0 +#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) {(0)}) +#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) +#define SECURE_DMA_UNMAP_ALL(osh, pcma) + +#endif + + +#endif /* _osl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/osl_decl.h b/drivers/net/wireless/bcmdhd/include/osl_decl.h new file mode 100644 index 000000000000..6c8d86eeabf1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl_decl.h @@ -0,0 +1,37 @@ +/* + * osl forward declarations + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_decl.h 591283 2015-10-07 11:52:00Z $ + */ + +#ifndef _osl_decl_h_ +#define _osl_decl_h_ + +/* osl handle type forward declaration */ +typedef struct osl_info osl_t; +typedef struct osl_dmainfo osldma_t; +extern unsigned int lmtest; /* low memory test */ +#endif diff --git a/drivers/net/wireless/bcmdhd/include/osl_ext.h b/drivers/net/wireless/bcmdhd/include/osl_ext.h new file mode 100644 index 000000000000..61984e68c4d0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/osl_ext.h @@ -0,0 +1,697 @@ +/* + * OS Abstraction Layer Extension - the APIs defined by the "extension" API + * are only supported by a subset of all operating systems. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: osl_ext.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _osl_ext_h_ +#define _osl_ext_h_ + + +/* ---- Include Files ---------------------------------------------------- */ + +#if defined(TARGETOS_symbian) + #include + #include +#elif defined(THREADX) + #include +#else + #define OSL_EXT_DISABLED +#endif + +/* Include base operating system abstraction. */ +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* ---- Constants and Types ---------------------------------------------- */ + +/* ----------------------------------------------------------------------- + * Generic OS types. + */ +typedef enum osl_ext_status_t +{ + OSL_EXT_SUCCESS, + OSL_EXT_ERROR, + OSL_EXT_TIMEOUT + +} osl_ext_status_t; +#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status; + +#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1)) +typedef unsigned int osl_ext_time_ms_t; + +typedef unsigned int osl_ext_event_bits_t; + +typedef unsigned int osl_ext_interrupt_state_t; + +/* ----------------------------------------------------------------------- + * Timers. + */ +typedef enum +{ + /* One-shot timer. */ + OSL_EXT_TIMER_MODE_ONCE, + + /* Periodic timer. */ + OSL_EXT_TIMER_MODE_REPEAT + +} osl_ext_timer_mode_t; + +/* User registered callback and parameter to invoke when timer expires. */ +typedef void* osl_ext_timer_arg_t; +typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg); + + +/* ----------------------------------------------------------------------- + * Tasks. + */ + +/* Task entry argument. */ +typedef void* osl_ext_task_arg_t; + +/* Task entry function. */ +typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg); + +/* Abstract task priority levels. */ +typedef enum +{ + OSL_EXT_TASK_IDLE_PRIORITY, + OSL_EXT_TASK_LOW_PRIORITY, + OSL_EXT_TASK_LOW_NORMAL_PRIORITY, + OSL_EXT_TASK_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGH_NORMAL_PRIORITY, + OSL_EXT_TASK_HIGHEST_PRIORITY, + OSL_EXT_TASK_TIME_CRITICAL_PRIORITY, + + /* This must be last. */ + OSL_EXT_TASK_NUM_PRIORITES +} osl_ext_task_priority_t; + + +#ifndef OSL_EXT_DISABLED + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + + +/* -------------------------------------------------------------------------- +** Semaphore +*/ + +/**************************************************************************** +* Function: osl_ext_sem_create +* +* Purpose: Creates a counting semaphore object, which can subsequently be +* used for thread notification. +* +* Parameters: name (in) Name to assign to the semaphore (must be unique). +* init_cnt (in) Initial count that the semaphore should have. +* sem (out) Newly created semaphore. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_delete +* +* Purpose: Destroys a previously created semaphore object. +* +* Parameters: sem (mod) Semaphore object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_give +* +* Purpose: Increments the count associated with the semaphore. This will +* cause one thread blocked on a take to wake up. +* +* Parameters: sem (mod) Semaphore object to give. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem); + +/**************************************************************************** +* Function: osl_ext_sem_take +* +* Purpose: Decrements the count associated with the semaphore. If the count +* is less than zero, then the calling task will become blocked until +* another thread does a give on the semaphore. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: sem (mod) Semaphore object to take. +* timeout_msec (in) Number of milliseconds to wait for the +* semaphore to enter a state where it can be +* taken. +* +* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an +* error code if the semaphore could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec); + + +/* -------------------------------------------------------------------------- +** Mutex +*/ + +/**************************************************************************** +* Function: osl_ext_mutex_create +* +* Purpose: Creates a mutex object, which can subsequently be used to control +* mutually exclusion of resources. +* +* Parameters: name (in) Name to assign to the mutex (must be unique). +* mutex (out) Mutex object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_delete +* +* Purpose: Destroys a previously created mutex object. +* +* Parameters: mutex (mod) Mutex object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex); + +/**************************************************************************** +* Function: osl_ext_mutex_acquire +* +* Purpose: Acquires the indicated mutual exclusion object. If the object is +* currently acquired by another task, then this function will wait +* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT. +* +* Parameters: mutex (mod) Mutex object to acquire. +* timeout_msec (in) Number of milliseconds to wait for the mutex. +* +* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec); + +/**************************************************************************** +* Function: osl_ext_mutex_release +* +* Purpose: Releases the indicated mutual exclusion object. This makes it +* available for another task to acquire. +* +* Parameters: mutex (mod) Mutex object to release. +* +* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an +* error code if the mutex could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex); + + +/* -------------------------------------------------------------------------- +** Timers +*/ + +/**************************************************************************** +* Function: osl_ext_timer_create +* +* Purpose: Creates a timer object. +* +* Parameters: name (in) Name of timer. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* func (in) Callback function to invoke on timer expiry. +* arg (in) Argument to callback function. +* timer (out) Timer object to create. +* +* Note: The function callback occurs in interrupt context. The application is +* required to provide context switch for the callback if required. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode, + osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_delete +* +* Purpose: Destroys a previously created timer object. +* +* Parameters: timer (mod) Timer object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_timer_start +* +* Purpose: Start a previously created timer object. +* +* Parameters: timer (in) Timer object. +* timeout_msec (in) Invoke callback after this number of milliseconds. +* mode (in) One-shot or periodic timer. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_start(osl_ext_timer_t *timer, + osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode); + +/**************************************************************************** +* Function: osl_ext_timer_stop +* +* Purpose: Stop a previously created timer object. +* +* Parameters: timer (in) Timer object. +* +* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an +* error code if the timer could not be created. +***************************************************************************** +*/ +osl_ext_status_t +osl_ext_timer_stop(osl_ext_timer_t *timer); + +/**************************************************************************** +* Function: osl_ext_time_get +* +* Purpose: Returns incrementing time counter. +* +* Parameters: None. +* +* Returns: Returns incrementing time counter in msec. +***************************************************************************** +*/ +osl_ext_time_ms_t osl_ext_time_get(void); + +/* -------------------------------------------------------------------------- +** Tasks +*/ + +/**************************************************************************** +* Function: osl_ext_task_create +* +* Purpose: Create a task. +* +* Parameters: name (in) Pointer to task string descriptor. +* stack (in) Pointer to stack. NULL to allocate. +* stack_size (in) Stack size - in bytes. +* priority (in) Abstract task priority. +* func (in) A pointer to the task entry point function. +* arg (in) Value passed into task entry point function. +* task (out) Task to create. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \ + (arg), (task)) + +osl_ext_status_t osl_ext_task_create_ex(char* name, + void *stack, unsigned int stack_size, osl_ext_task_priority_t priority, + osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg, + osl_ext_task_t *task); + +/**************************************************************************** +* Function: osl_ext_task_delete +* +* Purpose: Destroy a task. +* +* Parameters: task (mod) Task to destroy. +* +* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an +* error code if the task could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task); + + +/**************************************************************************** +* Function: osl_ext_task_is_running +* +* Purpose: Returns current running task. +* +* Parameters: None. +* +* Returns: osl_ext_task_t of current running task. +***************************************************************************** +*/ +osl_ext_task_t *osl_ext_task_current(void); + + +/**************************************************************************** +* Function: osl_ext_task_yield +* +* Purpose: Yield the CPU to other tasks of the same priority that are +* ready-to-run. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_yield(void); + + +/**************************************************************************** +* Function: osl_ext_task_enable_stack_check +* +* Purpose: Enable task stack checking. +* +* Parameters: None. +* +* Returns: OSL_EXT_SUCCESS if successful, else error code. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_task_enable_stack_check(void); + + +/* -------------------------------------------------------------------------- +** Queue +*/ + +/**************************************************************************** +* Function: osl_ext_queue_create +* +* Purpose: Create a queue. +* +* Parameters: name (in) Name to assign to the queue (must be unique). +* buffer (in) Queue buffer. NULL to allocate. +* size (in) Size of the queue. +* queue (out) Newly created queue. +* +* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an +* error code if the queue could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_create(char *name, + void *queue_buffer, unsigned int queue_size, + osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_delete +* +* Purpose: Destroys a previously created queue object. +* +* Parameters: queue (mod) Queue object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an +* error code if the queue could not be deleteed. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue); + +/**************************************************************************** +* Function: osl_ext_queue_send +* +* Purpose: Send/add data to the queue. This function will not block the +* calling thread if the queue is full. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_send_synchronous +* +* Purpose: Send/add data to the queue. This function will block the +* calling thread until the data is dequeued. +* +* Parameters: queue (mod) Queue object. +* data (in) Data pointer to be queued. +* +* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an +* error code if the data could not be queued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data); + +/**************************************************************************** +* Function: osl_ext_queue_receive +* +* Purpose: Receive/remove data from the queue. This function will only +* block the calling thread for timeout_msec milliseconds, before +* returning with OSL_EXT_TIMEOUT. +* +* Parameters: queue (mod) Queue object. +* timeout_msec (in) Number of milliseconds to wait for the +* data from the queue. +* data (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an +* error code if the data could not be dequeued. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue, + osl_ext_time_ms_t timeout_msec, void **data); + +/**************************************************************************** +* Function: osl_ext_queue_count +* +* Purpose: Returns the number of items in the queue. +* +* Parameters: queue (mod) Queue object. +* count (out) Data pointer received/removed from the queue. +* +* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an +* error code if the count is invalid. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count); + + +/* -------------------------------------------------------------------------- +** Event +*/ + +/**************************************************************************** +* Function: osl_ext_event_create +* +* Purpose: Creates a event object, which can subsequently be used to +* notify and trigger tasks. +* +* Parameters: name (in) Name to assign to the event (must be unique). +* event (out) Event object to initialize. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_delete +* +* Purpose: Destroys a previously created event object. +* +* Parameters: event (mod) Event object to destroy. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event); + +/**************************************************************************** +* Function: osl_ext_event_get +* +* Purpose: Get event from specified event object. +* +* Parameters: event (mod) Event object to get. +* requested (in) Requested event to get. +* timeout_msec (in) Number of milliseconds to wait for the event. +* event_bits (out) Event bits retrieved. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event, + osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec, + osl_ext_event_bits_t *event_bits); + +/**************************************************************************** +* Function: osl_ext_event_set +* +* Purpose: Set event of specified event object. +* +* Parameters: event (mod) Event object to set. +* event_bits (in) Event bits to set. +* +* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an +* error code if the event could not be created. +***************************************************************************** +*/ +osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event, + osl_ext_event_bits_t event_bits); + + +/* -------------------------------------------------------------------------- +** Interrupt +*/ + +/**************************************************************************** +* Function: osl_ext_interrupt_disable +* +* Purpose: Disable CPU interrupt. +* +* Parameters: None. +* +* Returns: The interrupt state before disable for restoring interrupt. +***************************************************************************** +*/ +osl_ext_interrupt_state_t osl_ext_interrupt_disable(void); + + +/**************************************************************************** +* Function: osl_ext_interrupt_restore +* +* Purpose: Restore CPU interrupt state. +* +* Parameters: state (in) Interrupt state to restore returned from +* osl_ext_interrupt_disable(). +* +* Returns: None. +***************************************************************************** +*/ +void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state); + +#else + +/* ---- Constants and Types ---------------------------------------------- */ + +/* Semaphore. */ +#define osl_ext_sem_t +#define OSL_EXT_SEM_DECL(sem) + +/* Mutex. */ +#define osl_ext_mutex_t +#define OSL_EXT_MUTEX_DECL(mutex) + +/* Timer. */ +#define osl_ext_timer_t +#define OSL_EXT_TIMER_DECL(timer) + +/* Task. */ +#define osl_ext_task_t void +#define OSL_EXT_TASK_DECL(task) + +/* Queue. */ +#define osl_ext_queue_t +#define OSL_EXT_QUEUE_DECL(queue) + +/* Event. */ +#define osl_ext_event_t +#define OSL_EXT_EVENT_DECL(event) + +/* ---- Variable Externs ------------------------------------------------- */ +/* ---- Function Prototypes ---------------------------------------------- */ + +#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS) +#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS) + +#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS) +#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS) + +#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \ + (OSL_EXT_SUCCESS) +#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS) +#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS) +#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS) +#define osl_ext_time_get() (0) + +#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \ + (OSL_EXT_SUCCESS) +#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS) +#define osl_ext_task_current() (NULL) +#define osl_ext_task_yield() (OSL_EXT_SUCCESS) +#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS) + +#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS) +#define osl_ext_queue_receive(queue, timeout_msec, data) \ + (OSL_EXT_SUCCESS) +#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS) + +#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS) +#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS) +#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \ + (OSL_EXT_SUCCESS) +#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS) + +#define osl_ext_interrupt_disable(void) +#define osl_ext_interrupt_restore(state) + +#endif /* OSL_EXT_DISABLED */ + +#ifdef __cplusplus +} +#endif + +#endif /* _osl_ext_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_end.h b/drivers/net/wireless/bcmdhd/include/packed_section_end.h new file mode 100644 index 000000000000..e3a35c7e9270 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/packed_section_end.h @@ -0,0 +1,63 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_end.h 514727 2014-11-12 03:02:48Z $ + */ + + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is NOT defined at this + * point, then there is a missing include of packed_section_start.h. + */ +#ifdef BWL_PACKED_SECTION + #undef BWL_PACKED_SECTION +#else + #error "BWL_PACKED_SECTION is NOT defined!" +#endif + + + + +/* Compiler-specific directives for structure packing are declared in + * packed_section_start.h. This marks the end of the structure packing section, + * so, undef them here. + */ +#undef BWL_PRE_PACKED_STRUCT +#undef BWL_POST_PACKED_STRUCT diff --git a/drivers/net/wireless/bcmdhd/include/packed_section_start.h b/drivers/net/wireless/bcmdhd/include/packed_section_start.h new file mode 100644 index 000000000000..617176461f75 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/packed_section_start.h @@ -0,0 +1,67 @@ +/* + * Declare directives for structure packing. No padding will be provided + * between the members of packed structures, and therefore, there is no + * guarantee that structure members will be aligned. + * + * Declaring packed structures is compiler specific. In order to handle all + * cases, packed structures should be delared as: + * + * #include + * + * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { + * some_struct_members; + * } BWL_POST_PACKED_STRUCT foobar_t; + * + * #include + * + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: packed_section_start.h 514727 2014-11-12 03:02:48Z $ + */ + + +/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h + * and undefined in packed_section_end.h. If it is already defined at this + * point, then there is a missing include of packed_section_end.h. + */ +#ifdef BWL_PACKED_SECTION + #error "BWL_PACKED_SECTION is already defined!" +#else + #define BWL_PACKED_SECTION +#endif + + + + +/* Declare compiler-specific directives for structure packing. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_PRE_PACKED_STRUCT + #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) +#elif defined(__CC_ARM) + #define BWL_PRE_PACKED_STRUCT __packed + #define BWL_POST_PACKED_STRUCT +#else + #error "Unknown compiler!" +#endif diff --git a/drivers/net/wireless/bcmdhd/include/pcicfg.h b/drivers/net/wireless/bcmdhd/include/pcicfg.h new file mode 100644 index 000000000000..be0a92a17847 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/pcicfg.h @@ -0,0 +1,260 @@ +/* + * pcicfg.h: PCI configuration constants and structures. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcicfg.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _h_pcicfg_ +#define _h_pcicfg_ + + +/* pci config status reg has a bit to indicate that capability ptr is present */ + +#define PCI_CAPPTR_PRESENT 0x0010 + +/* A structure for the config registers is nice, but in most + * systems the config space is not memory mapped, so we need + * field offsetts. :-( + */ +#define PCI_CFG_VID 0 +#define PCI_CFG_DID 2 +#define PCI_CFG_CMD 4 +#define PCI_CFG_STAT 6 +#define PCI_CFG_REV 8 +#define PCI_CFG_PROGIF 9 +#define PCI_CFG_SUBCL 0xa +#define PCI_CFG_BASECL 0xb +#define PCI_CFG_CLSZ 0xc +#define PCI_CFG_LATTIM 0xd +#define PCI_CFG_HDR 0xe +#define PCI_CFG_BIST 0xf +#define PCI_CFG_BAR0 0x10 +#define PCI_CFG_BAR1 0x14 +#define PCI_CFG_BAR2 0x18 +#define PCI_CFG_BAR3 0x1c +#define PCI_CFG_BAR4 0x20 +#define PCI_CFG_BAR5 0x24 +#define PCI_CFG_CIS 0x28 +#define PCI_CFG_SVID 0x2c +#define PCI_CFG_SSID 0x2e +#define PCI_CFG_ROMBAR 0x30 +#define PCI_CFG_CAPPTR 0x34 +#define PCI_CFG_INT 0x3c +#define PCI_CFG_PIN 0x3d +#define PCI_CFG_MINGNT 0x3e +#define PCI_CFG_MAXLAT 0x3f +#define PCI_CFG_DEVCTRL 0xd8 + + +/* PCI CAPABILITY DEFINES */ +#define PCI_CAP_POWERMGMTCAP_ID 0x01 +#define PCI_CAP_MSICAP_ID 0x05 +#define PCI_CAP_VENDSPEC_ID 0x09 +#define PCI_CAP_PCIECAP_ID 0x10 + +/* Data structure to define the Message Signalled Interrupt facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_msi { + uint8 capID; + uint8 nextptr; + uint16 msgctrl; + uint32 msgaddr; +} pciconfig_cap_msi; +#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */ + +/* Data structure to define the Power managment facility + * Valid for PCI and PCIE configurations + */ +typedef struct _pciconfig_cap_pwrmgmt { + uint8 capID; + uint8 nextptr; + uint16 pme_cap; + uint16 pme_sts_ctrl; + uint8 pme_bridge_ext; + uint8 data; +} pciconfig_cap_pwrmgmt; + +#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ +#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ +#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ +#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ + +/* Data structure to define the PCIE capability */ +typedef struct _pciconfig_cap_pcie { + uint8 capID; + uint8 nextptr; + uint16 pcie_cap; + uint32 dev_cap; + uint16 dev_ctrl; + uint16 dev_status; + uint32 link_cap; + uint16 link_ctrl; + uint16 link_status; + uint32 slot_cap; + uint16 slot_ctrl; + uint16 slot_status; + uint16 root_ctrl; + uint16 root_cap; + uint32 root_status; +} pciconfig_cap_pcie; + +/* PCIE Enhanced CAPABILITY DEFINES */ +#define PCIE_EXTCFG_OFFSET 0x100 +#define PCIE_ADVERRREP_CAPID 0x0001 +#define PCIE_VC_CAPID 0x0002 +#define PCIE_DEVSNUM_CAPID 0x0003 +#define PCIE_PWRBUDGET_CAPID 0x0004 + +/* PCIE Extended configuration */ +#define PCIE_ADV_CORR_ERR_MASK 0x114 +#define CORR_ERR_RE (1 << 0) /* Receiver */ +#define CORR_ERR_BT (1 << 6) /* Bad TLP */ +#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ +#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ +#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ +#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ + CORR_ERR_RR | CORR_ERR_RT) + +/* PCIE Root Control Register bits (Host mode only) */ +#define PCIE_RC_CORR_SERR_EN 0x0001 +#define PCIE_RC_NONFATAL_SERR_EN 0x0002 +#define PCIE_RC_FATAL_SERR_EN 0x0004 +#define PCIE_RC_PME_INT_EN 0x0008 +#define PCIE_RC_CRS_EN 0x0010 + +/* PCIE Root Capability Register bits (Host mode only) */ +#define PCIE_RC_CRS_VISIBILITY 0x0001 + +/* Header to define the PCIE specific capabilities in the extended config space */ +typedef struct _pcie_enhanced_caphdr { + uint16 capID; + uint16 cap_ver : 4; + uint16 next_ptr : 12; +} pcie_enhanced_caphdr; + + +#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ +#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ +#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ +#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ +#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ +#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ +#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ +#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ +#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ +#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ +#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ +#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ +#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ +#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */ + +/* Private Registers */ +#define PCI_STAT_CTRL 0xa80 +#define PCI_L0_EVENTCNT 0xa84 +#define PCI_L0_STATETMR 0xa88 +#define PCI_L1_EVENTCNT 0xa8c +#define PCI_L1_STATETMR 0xa90 +#define PCI_L1_1_EVENTCNT 0xa94 +#define PCI_L1_1_STATETMR 0xa98 +#define PCI_L1_2_EVENTCNT 0xa9c +#define PCI_L1_2_STATETMR 0xaa0 +#define PCI_L2_EVENTCNT 0xaa4 +#define PCI_L2_STATETMR 0xaa8 + +#define PCI_PMCR_REFUP 0x1814 /* Trefup time */ +#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */ +#define PCI_TPOWER_SCALE_MASK 0x3 +#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */ + + +#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ +#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ +#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ +#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the + * 8KB window, so their address is the "regular" + * address plus 4K + */ +/* + * PCIE GEN2 changed some of the above locations for + * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase + * BAR0 maps 32K of register space +*/ +#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ +#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ + +#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ +/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ +#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ +#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ +#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ +#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */ + + +/* Header types */ +#define PCI_HEADER_MULTI 0x80 +#define PCI_HEADER_MASK 0x7f +typedef enum { + PCI_HEADER_NORMAL, + PCI_HEADER_BRIDGE, + PCI_HEADER_CARDBUS +} pci_header_types; + +#define PCI_CONFIG_SPACE_SIZE 256 + +#define DWORD_ALIGN(x) (x & ~(0x03)) +#define BYTE_POS(x) (x & 0x3) +#define WORD_POS(x) (x & 0x1) + +#define BYTE_SHIFT(x) (8 * BYTE_POS(x)) +#define WORD_SHIFT(x) (16 * WORD_POS(x)) + +#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF) +#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF) + +#define read_pci_cfg_byte(a) \ + (BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff) + +#define read_pci_cfg_word(a) \ + (WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff) + +#define write_pci_cfg_byte(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \ + val << BYTE_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#define write_pci_cfg_word(a, val) do { \ + uint32 tmpval; \ + tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \ + val << WORD_POS(a); \ + OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \ + } while (0) + +#endif /* _h_pcicfg_ */ diff --git a/drivers/net/wireless/bcmdhd/include/pcie_core.h b/drivers/net/wireless/bcmdhd/include/pcie_core.h new file mode 100644 index 000000000000..25a156adcb4f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/pcie_core.h @@ -0,0 +1,652 @@ +/* + * BCM43XX PCIE core hardware definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.h 514727 2014-11-12 03:02:48Z $ + */ +#ifndef _PCIE_CORE_H +#define _PCIE_CORE_H + +#include +#include + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* PCIE Enumeration space offsets */ +#define PCIE_CORE_CONFIG_OFFSET 0x0 +#define PCIE_FUNC0_CONFIG_OFFSET 0x400 +#define PCIE_FUNC1_CONFIG_OFFSET 0x500 +#define PCIE_FUNC2_CONFIG_OFFSET 0x600 +#define PCIE_FUNC3_CONFIG_OFFSET 0x700 +#define PCIE_SPROM_SHADOW_OFFSET 0x800 +#define PCIE_SBCONFIG_OFFSET 0xE00 + + +#define PCIEDEV_MAX_DMAS 4 + +/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */ +#define PCIE_DEV_BAR0_SIZE 0x4000 +#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0 +#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000 +#define PCIE_BAR0_PCIECORE_OFFSET 0x2000 +#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000 + +/* different register spaces to access thr'u pcie indirect access */ +#define PCIE_CONFIGREGS 1 /* Access to config space */ +#define PCIE_PCIEREGS 2 /* Access to pcie registers */ + +/* dma regs to control the flow between host2dev and dev2host */ +typedef struct pcie_devdmaregs { + dma64regs_t tx; + uint32 PAD[2]; + dma64regs_t rx; + uint32 PAD[2]; +} pcie_devdmaregs_t; + +#define PCIE_DB_HOST2DEV_0 0x1 +#define PCIE_DB_HOST2DEV_1 0x2 +#define PCIE_DB_DEV2HOST_0 0x3 +#define PCIE_DB_DEV2HOST_1 0x4 + +/* door bell register sets */ +typedef struct pcie_doorbell { + uint32 host2dev_0; + uint32 host2dev_1; + uint32 dev2host_0; + uint32 dev2host_1; +} pcie_doorbell_t; + +/* SB side: PCIE core and host control registers */ +typedef struct sbpcieregs { + uint32 control; /* host mode only */ + uint32 iocstatus; /* PCIE2: iostatus */ + uint32 PAD[1]; + uint32 biststatus; /* bist Status: 0x00C */ + uint32 gpiosel; /* PCIE gpio sel: 0x010 */ + uint32 gpioouten; /* PCIE gpio outen: 0x14 */ + uint32 PAD[2]; + uint32 intstatus; /* Interrupt status: 0x20 */ + uint32 intmask; /* Interrupt mask: 0x24 */ + uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */ + uint32 obffcontrol; /* PCIE2: 0x2C */ + uint32 obffintstatus; /* PCIE2: 0x30 */ + uint32 obffdatastatus; /* PCIE2: 0x34 */ + uint32 PAD[2]; + uint32 errlog; /* PCIE2: 0x40 */ + uint32 errlogaddr; /* PCIE2: 0x44 */ + uint32 mailboxint; /* PCIE2: 0x48 */ + uint32 mailboxintmsk; /* PCIE2: 0x4c */ + uint32 ltrspacing; /* PCIE2: 0x50 */ + uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */ + uint32 PAD[42]; + + uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */ + uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */ + uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */ + uint32 PAD[5]; + + /* pcie core supports in direct access to config space */ + uint32 configaddr; /* pcie config space access: Address field: 0x120 */ + uint32 configdata; /* pcie config space access: Data field: 0x124 */ + union { + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiodata; /* Data to the mdio access: 0x12c */ + /* pcie protocol phy/dllp/tlp register indirect access mechanism */ + uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */ + uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */ + uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */ + uint32 PAD[177]; + } pcie1; + struct { + /* mdio access to serdes */ + uint32 mdiocontrol; /* controls the mdio access: 0x128 */ + uint32 mdiowrdata; /* write data to mdio 0x12C */ + uint32 mdiorddata; /* read data to mdio 0x130 */ + uint32 PAD[3]; /* 0x134-0x138-0x13c */ + /* door bell registers available from gen2 rev5 onwards */ + pcie_doorbell_t dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */ + uint32 dataintf; /* 0x180 */ + uint32 PAD[1]; /* 0x184 */ + uint32 d2h_intrlazy_0; /* 0x188 */ + uint32 h2d_intrlazy_0; /* 0x18c */ + uint32 h2d_intstat_0; /* 0x190 */ + uint32 h2d_intmask_0; /* 0x194 */ + uint32 d2h_intstat_0; /* 0x198 */ + uint32 d2h_intmask_0; /* 0x19c */ + uint32 ltr_state; /* 0x1A0 */ + uint32 pwr_int_status; /* 0x1A4 */ + uint32 pwr_int_mask; /* 0x1A8 */ + uint32 PAD[13]; /* 0x1AC - 0x1DF */ + uint32 clk_ctl_st; /* 0x1E0 */ + uint32 PAD[7]; /* 0x1E4 - 0x1FF */ + pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */ + pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */ + pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */ + pcie_devdmaregs_t d2h1_dmaregs; /* 0x2c0 - 0x2fc */ + pcie_devdmaregs_t h2d2_dmaregs; /* 0x300 - 0x33c */ + pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */ + pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */ + pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */ + } pcie2; + } u; + uint32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */ + uint16 sprom[64]; /* SPROM shadow Area */ +} sbpcieregs_t; + +/* PCI control */ +#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */ +#define PCIE_RST 0x02 /* Value driven out to pin */ +#define PCIE_SPERST 0x04 /* SurvivePeRst */ +#define PCIE_DISABLE_L1CLK_GATING 0x10 +#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */ +#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */ +#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */ +#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */ +#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */ + +#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */ +#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */ + +/* Interrupt status/mask */ +#define PCIE_INTA 0x01 /* PCIE INTA message is received */ +#define PCIE_INTB 0x02 /* PCIE INTB message is received */ +#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */ +#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */ +#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */ +#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */ +#define PCIE_PERST 0x40 /* PCIE Reset Interrupt */ + +#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */ +#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */ +#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */ +#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */ +#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */ +#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */ +#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */ +#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */ + +/* PCIE MailboxInt/MailboxIntMask register */ +#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */ +#define PCIE_MB_TOSB_FN0_1 0x0002 +#define PCIE_MB_TOSB_FN1_0 0x0004 +#define PCIE_MB_TOSB_FN1_1 0x0008 +#define PCIE_MB_TOSB_FN2_0 0x0010 +#define PCIE_MB_TOSB_FN2_1 0x0020 +#define PCIE_MB_TOSB_FN3_0 0x0040 +#define PCIE_MB_TOSB_FN3_1 0x0080 +#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */ +#define PCIE_MB_TOPCIE_FN0_1 0x0200 +#define PCIE_MB_TOPCIE_FN1_0 0x0400 +#define PCIE_MB_TOPCIE_FN1_1 0x0800 +#define PCIE_MB_TOPCIE_FN2_0 0x1000 +#define PCIE_MB_TOPCIE_FN2_1 0x2000 +#define PCIE_MB_TOPCIE_FN3_0 0x4000 +#define PCIE_MB_TOPCIE_FN3_1 0x8000 +#define PCIE_MB_TOPCIE_D2H0_DB0 0x10000 +#define PCIE_MB_TOPCIE_D2H0_DB1 0x20000 +#define PCIE_MB_TOPCIE_D2H1_DB0 0x40000 +#define PCIE_MB_TOPCIE_D2H1_DB1 0x80000 +#define PCIE_MB_TOPCIE_D2H2_DB0 0x100000 +#define PCIE_MB_TOPCIE_D2H2_DB1 0x200000 +#define PCIE_MB_TOPCIE_D2H3_DB0 0x400000 +#define PCIE_MB_TOPCIE_D2H3_DB1 0x800000 + +#define PCIE_MB_D2H_MB_MASK \ + (PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 | \ + PCIE_MB_TOPCIE_D2H1_DB0 | PCIE_MB_TOPCIE_D2H1_DB1 | \ + PCIE_MB_TOPCIE_D2H2_DB0 | PCIE_MB_TOPCIE_D2H2_DB1 | \ + PCIE_MB_TOPCIE_D2H3_DB0 | PCIE_MB_TOPCIE_D2H3_DB1) + +/* SB to PCIE translation masks */ +#define SBTOPCIE0_MASK 0xfc000000 +#define SBTOPCIE1_MASK 0xfc000000 +#define SBTOPCIE2_MASK 0xc0000000 + +/* Access type bits (0:1) */ +#define SBTOPCIE_MEM 0 +#define SBTOPCIE_IO 1 +#define SBTOPCIE_CFG0 2 +#define SBTOPCIE_CFG1 3 + +/* Prefetch enable bit 2 */ +#define SBTOPCIE_PF 4 + +/* Write Burst enable for memory write bit 3 */ +#define SBTOPCIE_WR_BURST 8 + +/* config access */ +#define CONFIGADDR_FUNC_MASK 0x7000 +#define CONFIGADDR_FUNC_SHF 12 +#define CONFIGADDR_REG_MASK 0x0FFF +#define CONFIGADDR_REG_SHF 0 + +#define PCIE_CONFIG_INDADDR(f, r) ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \ + (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF)) + +/* PCIE protocol regs Indirect Address */ +#define PCIEADDR_PROT_MASK 0x300 +#define PCIEADDR_PROT_SHF 8 +#define PCIEADDR_PL_TLP 0 +#define PCIEADDR_PL_DLLP 1 +#define PCIEADDR_PL_PLP 2 + +/* PCIE protocol PHY diagnostic registers */ +#define PCIE_PLP_MODEREG 0x200 /* Mode */ +#define PCIE_PLP_STATUSREG 0x204 /* Status */ +#define PCIE_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */ +#define PCIE_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */ +#define PCIE_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */ +#define PCIE_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */ +#define PCIE_PLP_ATTNREG 0x218 /* Attention */ +#define PCIE_PLP_ATTNMASKREG 0x21C /* Attention Mask */ +#define PCIE_PLP_RXERRCTR 0x220 /* Rx Error */ +#define PCIE_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */ +#define PCIE_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */ +#define PCIE_PLP_TESTCTRLREG 0x22C /* Test Control reg */ +#define PCIE_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */ +#define PCIE_PLP_TIMINGOVRDREG 0x234 /* Timing param override */ +#define PCIE_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */ +#define PCIE_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */ + +/* PCIE protocol DLLP diagnostic registers */ +#define PCIE_DLLP_LCREG 0x100 /* Link Control */ +#define PCIE_DLLP_LSREG 0x104 /* Link Status */ +#define PCIE_DLLP_LAREG 0x108 /* Link Attention */ +#define PCIE_DLLP_LAMASKREG 0x10C /* Link Attention Mask */ +#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */ +#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */ +#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */ +#define PCIE_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */ +#define PCIE_DLLP_LRREG 0x120 /* Link Replay */ +#define PCIE_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */ +#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */ +#define PCIE_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */ +#define PCIE_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */ +#define PCIE_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */ +#define PCIE_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */ +#define PCIE_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */ +#define PCIE_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */ +#define PCIE_DLLP_ERRCTRREG 0x144 /* Error Counter */ +#define PCIE_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */ +#define PCIE_DLLP_TESTREG 0x14C /* Test */ +#define PCIE_DLLP_PKTBIST 0x150 /* Packet BIST */ +#define PCIE_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */ + +#define PCIE_DLLP_LSREG_LINKUP (1 << 16) + +/* PCIE protocol TLP diagnostic registers */ +#define PCIE_TLP_CONFIGREG 0x000 /* Configuration */ +#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */ +#define PCIE_TLP_WRDMAUPPER 0x010 /* Write DMA Upper Address */ +#define PCIE_TLP_WRDMALOWER 0x014 /* Write DMA Lower Address */ +#define PCIE_TLP_WRDMAREQ_LBEREG 0x018 /* Write DMA Len/ByteEn Req */ +#define PCIE_TLP_RDDMAUPPER 0x01C /* Read DMA Upper Address */ +#define PCIE_TLP_RDDMALOWER 0x020 /* Read DMA Lower Address */ +#define PCIE_TLP_RDDMALENREG 0x024 /* Read DMA Len Req */ +#define PCIE_TLP_MSIDMAUPPER 0x028 /* MSI DMA Upper Address */ +#define PCIE_TLP_MSIDMALOWER 0x02C /* MSI DMA Lower Address */ +#define PCIE_TLP_MSIDMALENREG 0x030 /* MSI DMA Len Req */ +#define PCIE_TLP_SLVREQLENREG 0x034 /* Slave Request Len */ +#define PCIE_TLP_FCINPUTSREQ 0x038 /* Flow Control Inputs */ +#define PCIE_TLP_TXSMGRSREQ 0x03C /* Tx StateMachine and Gated Req */ +#define PCIE_TLP_ADRACKCNTARBLEN 0x040 /* Address Ack XferCnt and ARB Len */ +#define PCIE_TLP_DMACPLHDR0 0x044 /* DMA Completion Hdr 0 */ +#define PCIE_TLP_DMACPLHDR1 0x048 /* DMA Completion Hdr 1 */ +#define PCIE_TLP_DMACPLHDR2 0x04C /* DMA Completion Hdr 2 */ +#define PCIE_TLP_DMACPLMISC0 0x050 /* DMA Completion Misc0 */ +#define PCIE_TLP_DMACPLMISC1 0x054 /* DMA Completion Misc1 */ +#define PCIE_TLP_DMACPLMISC2 0x058 /* DMA Completion Misc2 */ +#define PCIE_TLP_SPTCTRLLEN 0x05C /* Split Controller Req len */ +#define PCIE_TLP_SPTCTRLMSIC0 0x060 /* Split Controller Misc 0 */ +#define PCIE_TLP_SPTCTRLMSIC1 0x064 /* Split Controller Misc 1 */ +#define PCIE_TLP_BUSDEVFUNC 0x068 /* Bus/Device/Func */ +#define PCIE_TLP_RESETCTR 0x06C /* Reset Counter */ +#define PCIE_TLP_RTRYBUF 0x070 /* Retry Buffer value */ +#define PCIE_TLP_TGTDEBUG1 0x074 /* Target Debug Reg1 */ +#define PCIE_TLP_TGTDEBUG2 0x078 /* Target Debug Reg2 */ +#define PCIE_TLP_TGTDEBUG3 0x07C /* Target Debug Reg3 */ +#define PCIE_TLP_TGTDEBUG4 0x080 /* Target Debug Reg4 */ + +/* PCIE2 MDIO register offsets */ +#define PCIE2_MDIO_CONTROL 0x128 +#define PCIE2_MDIO_WR_DATA 0x12C +#define PCIE2_MDIO_RD_DATA 0x130 + + +/* MDIO control */ +#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define MDIOCTL_DIVISOR_VAL 0x2 +#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */ +#define MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */ + +/* MDIO Data */ +#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */ +#define MDIODATA_TA 0x00020000 /* Turnaround */ +#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */ +#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */ +#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */ +#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */ +#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */ +#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */ +#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */ +#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */ +#define MDIODATA_WRITE 0x10000000 /* write Transaction */ +#define MDIODATA_READ 0x20000000 /* Read Transaction */ +#define MDIODATA_START 0x40000000 /* start of Transaction */ + +#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */ +#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */ + +/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */ +#define MDIOCTL2_DIVISOR_MASK 0x7f /* clock to be used on MDIO */ +#define MDIOCTL2_DIVISOR_VAL 0x2 +#define MDIOCTL2_REGADDR_SHF 8 /* Regaddr shift */ +#define MDIOCTL2_REGADDR_MASK 0x00FFFF00 /* Regaddr Mask */ +#define MDIOCTL2_DEVADDR_SHF 24 /* Physmedia devaddr shift */ +#define MDIOCTL2_DEVADDR_MASK 0x0f000000 /* Physmedia devaddr Mask */ +#define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */ +#define MDIOCTL2_READ 0x20000000 /* IP slave bypass */ + +#define MDIODATA2_DONE 0x80000000 /* rd/wr transaction done */ +#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */ +#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */ + + +/* MDIO devices (SERDES modules) + * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks. + * two layers mapping (blockidx, register offset) is required + */ +#define MDIO_DEV_IEEE0 0x000 +#define MDIO_DEV_IEEE1 0x001 +#define MDIO_DEV_BLK0 0x800 +#define MDIO_DEV_BLK1 0x801 +#define MDIO_DEV_BLK2 0x802 +#define MDIO_DEV_BLK3 0x803 +#define MDIO_DEV_BLK4 0x804 +#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */ +#define MDIO_DEV_TXCTRL0 0x820 +#define MDIO_DEV_SERDESID 0x831 +#define MDIO_DEV_RXCTRL0 0x840 + + +/* XgxsBlk1_A Register Offsets */ +#define BLK1_PWR_MGMT0 0x16 +#define BLK1_PWR_MGMT1 0x17 +#define BLK1_PWR_MGMT2 0x18 +#define BLK1_PWR_MGMT3 0x19 +#define BLK1_PWR_MGMT4 0x1A + +/* serdes regs (rev < 10) */ +#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */ +#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */ +#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */ + /* SERDES RX registers */ +#define SERDES_RX_CTRL 1 /* Rx cntrl */ +#define SERDES_RX_TIMER1 2 /* Rx Timer1 */ +#define SERDES_RX_CDR 6 /* CDR */ +#define SERDES_RX_CDRBW 7 /* CDR BW */ + + /* SERDES RX control register */ +#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */ +#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */ + + /* SERDES PLL registers */ +#define SERDES_PLL_CTRL 1 /* PLL control reg */ +#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */ + +/* Power management threshold */ +#define PCIE_L0THRESHOLDTIME_MASK 0xFF00 /* bits 0 - 7 */ +#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */ +#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */ +#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */ +#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */ + +/* SPROM offsets */ +#define SRSH_ASPM_OFFSET 4 /* word 4 */ +#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */ +#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */ +#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */ +#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */ +#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */ +#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */ +#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */ +#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */ +#define SRSH_BD_OFFSET 6 /* word 6 */ +#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */ + +/* Linkcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */ +#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */ +#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */ +#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */ +#define PCIE_LINKSPEED_MASK 0xF0000 /* bits 0 - 3 of high word */ +#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */ + +/* Devcontrol reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */ +#define PCIE_CAP_DEVCTRL_MRRS_MASK 0x7000 /* Max read request size mask */ +#define PCIE_CAP_DEVCTRL_MRRS_SHIFT 12 /* Max read request size shift */ +#define PCIE_CAP_DEVCTRL_MRRS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MRRS_1024B 3 /* 1024 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_MASK 0x00e0 /* Max payload size mask */ +#define PCIE_CAP_DEVCTRL_MPS_SHIFT 5 /* Max payload size shift */ +#define PCIE_CAP_DEVCTRL_MPS_128B 0 /* 128 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_256B 1 /* 256 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */ +#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */ + +#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */ +#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */ + +#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */ +#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */ + +/* Devcontrol2 reg offset in PCIE Cap */ +#define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */ +#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13 /* Enable OBFF mechanism, select signaling method */ +#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */ + +/* LTR registers in PCIE Cap */ +#define PCIE_LTR0_REG_OFFSET 0x844 /* ltr0_reg offset in pcie cap */ +#define PCIE_LTR1_REG_OFFSET 0x848 /* ltr1_reg offset in pcie cap */ +#define PCIE_LTR2_REG_OFFSET 0x84c /* ltr2_reg offset in pcie cap */ +#define PCIE_LTR0_REG_DEFAULT_60 0x883c883c /* active latency default to 60usec */ +#define PCIE_LTR0_REG_DEFAULT_150 0x88968896 /* active latency default to 150usec */ +#define PCIE_LTR1_REG_DEFAULT 0x88648864 /* idle latency default to 100usec */ +#define PCIE_LTR2_REG_DEFAULT 0x90039003 /* sleep latency default to 3msec */ + +/* Status reg PCIE_PLP_STATUSREG */ +#define PCIE_PLP_POLARITYINV_STAT 0x10 + + +/* PCIE BRCM Vendor CAP REVID reg bits */ +#define BRCMCAP_PCIEREV_CT_MASK 0xF00 +#define BRCMCAP_PCIEREV_CT_SHIFT 8 +#define BRCMCAP_PCIEREV_REVID_MASK 0xFF +#define BRCMCAP_PCIEREV_REVID_SHIFT 0 + +#define PCIE_REVREG_CT_PCIE1 0 +#define PCIE_REVREG_CT_PCIE2 1 + +/* PCIE GEN2 specific defines */ +/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */ +#define PCIE2R0_BRCMCAP_REVID_OFFSET 4 +#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET 8 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET 12 +#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET 16 +#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET 20 +#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET 24 +#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET 28 +#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET 32 +#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET 36 +#define PCIE2R0_BRCMCAP_INTMASK_OFFSET 40 +#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET 44 +#define PCIE2R0_BRCMCAP_BPADDR_OFFSET 48 +#define PCIE2R0_BRCMCAP_BPDATA_OFFSET 52 +#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET 56 + +/* definition of configuration space registers of PCIe gen2 + * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm + */ +#define PCIECFGREG_STATUS_CMD 0x4 +#define PCIECFGREG_PM_CSR 0x4C +#define PCIECFGREG_MSI_CAP 0x58 +#define PCIECFGREG_MSI_ADDR_L 0x5C +#define PCIECFGREG_MSI_ADDR_H 0x60 +#define PCIECFGREG_MSI_DATA 0x64 +#define PCIECFGREG_LINK_STATUS_CTRL 0xBC +#define PCIECFGREG_LINK_STATUS_CTRL2 0xDC +#define PCIECFGREG_RBAR_CTRL 0x228 +#define PCIECFGREG_PML1_SUB_CTRL1 0x248 +#define PCIECFGREG_REG_BAR2_CONFIG 0x4E0 +#define PCIECFGREG_REG_BAR3_CONFIG 0x4F4 +#define PCIECFGREG_PDL_CTRL1 0x1004 +#define PCIECFGREG_PDL_IDDQ 0x1814 +#define PCIECFGREG_REG_PHY_CTL7 0x181c + +/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */ +#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */ +#define PCI_PM_L1_1_ENA_MASK 0x00000002 /* PCI-PM L1.1 Enabled */ +#define ASPM_L1_2_ENA_MASK 0x00000004 /* ASPM L1.2 Enabled */ +#define ASPM_L1_1_ENA_MASK 0x00000008 /* ASPM L1.1 Enabled */ + +/* PCIe gen2 mailbox interrupt masks */ +#define I_MB 0x3 +#define I_BIT0 0x1 +#define I_BIT1 0x2 + +/* PCIE gen2 config regs */ +#define PCIIntstatus 0x090 +#define PCIIntmask 0x094 +#define PCISBMbx 0x98 + +/* enumeration Core regs */ +#define PCIH2D_MailBox 0x140 +#define PCIH2D_DB1 0x144 +#define PCID2H_MailBox 0x148 +#define PCIMailBoxInt 0x48 +#define PCIMailBoxMask 0x4C + +#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */ +#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */ + +#define PCIECFGREG_DEVCONTROL 0xB4 +#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12 +#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT) + +/* SROM hardware region */ +#define SROM_OFFSET_BAR1_CTRL 52 + +#define BAR1_ENC_SIZE_MASK 0x000e +#define BAR1_ENC_SIZE_SHIFT 1 + +#define BAR1_ENC_SIZE_1M 0 +#define BAR1_ENC_SIZE_2M 1 +#define BAR1_ENC_SIZE_4M 2 + +#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB 0x400 + +/* + * Latency Tolerance Reporting (LTR) states + * Active has the least tolerant latency requirement + * Sleep is most tolerant + */ +#define LTR_ACTIVE 2 +#define LTR_ACTIVE_IDLE 1 +#define LTR_SLEEP 0 +#define LTR_FINAL_MASK 0x300 +#define LTR_FINAL_SHIFT 8 + +/* pwrinstatus, pwrintmask regs */ +#define PCIEGEN2_PWRINT_D0_STATE_SHIFT 0 +#define PCIEGEN2_PWRINT_D1_STATE_SHIFT 1 +#define PCIEGEN2_PWRINT_D2_STATE_SHIFT 2 +#define PCIEGEN2_PWRINT_D3_STATE_SHIFT 3 +#define PCIEGEN2_PWRINT_L0_LINK_SHIFT 4 +#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT 5 +#define PCIEGEN2_PWRINT_L1_LINK_SHIFT 6 +#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT 7 +#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT 8 + +#define PCIEGEN2_PWRINT_D0_STATE_MASK (1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D1_STATE_MASK (1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D2_STATE_MASK (1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT) +#define PCIEGEN2_PWRINT_D3_STATE_MASK (1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT) +#define PCIEGEN2_PWRINT_L0_LINK_MASK (1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L0s_LINK_MASK (1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L1_LINK_MASK (1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT) +#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK (1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT) +#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK (1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT) + +/* sbtopcie mail box */ +#define SBTOPCIE_MB_FUNC0_SHIFT 8 +#define SBTOPCIE_MB_FUNC1_SHIFT 10 +#define SBTOPCIE_MB_FUNC2_SHIFT 12 +#define SBTOPCIE_MB_FUNC3_SHIFT 14 + +/* pcieiocstatus */ +#define PCIEGEN2_IOC_D0_STATE_SHIFT 8 +#define PCIEGEN2_IOC_D1_STATE_SHIFT 9 +#define PCIEGEN2_IOC_D2_STATE_SHIFT 10 +#define PCIEGEN2_IOC_D3_STATE_SHIFT 11 +#define PCIEGEN2_IOC_L0_LINK_SHIFT 12 +#define PCIEGEN2_IOC_L1_LINK_SHIFT 13 +#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14 +#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15 + +#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT) +#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT) +#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT) +#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT) +#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT) +#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT) +#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT) +#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT) + +/* stat_ctrl */ +#define PCIE_STAT_CTRL_RESET 0x1 +#define PCIE_STAT_CTRL_ENABLE 0x2 +#define PCIE_STAT_CTRL_INTENABLE 0x4 +#define PCIE_STAT_CTRL_INTSTATUS 0x8 + +#ifdef BCMDRIVER +void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs); +#endif /* BCMDRIVER */ + +#endif /* _PCIE_CORE_H */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11.h b/drivers/net/wireless/bcmdhd/include/proto/802.11.h new file mode 100644 index 000000000000..7aaea5d0596d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11.h @@ -0,0 +1,4445 @@ +/* + * Fundamental types and constants relating to 802.11 + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11.h 556559 2015-05-14 01:48:17Z $ + */ + +#ifndef _802_11_H_ +#define _802_11_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +#ifndef _NET_ETHERNET_H_ +#include +#endif + +#include + +/* This marks the start of a packed structure section. */ +#include + + +#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */ + +/* Generic 802.11 frame constants */ +#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */ +#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */ +#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */ +#define DOT11_FCS_LEN 4 /* d11 FCS length */ +#define DOT11_ICV_LEN 4 /* d11 ICV length */ +#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */ +#define DOT11_QOS_LEN 2 /* d11 QoS length */ +#define DOT11_HTC_LEN 4 /* d11 HT Control field length */ + +#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */ +#define DOT11_IV_LEN 4 /* d11 IV length */ +#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */ +#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */ +#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */ +#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */ + +/* Includes MIC */ +#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */ +/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */ +#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ + DOT11_QOS_LEN + \ + DOT11_IV_AES_CCM_LEN + \ + DOT11_MAX_MPDU_BODY_LEN + \ + DOT11_ICV_LEN + \ + DOT11_FCS_LEN) /* d11 max MPDU length */ + +#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */ + +/* dot11RTSThreshold */ +#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */ +#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */ + +/* dot11FragmentationThreshold */ +#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */ +#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength + * of the attached PHY + */ +#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */ + +/* dot11BeaconPeriod */ +#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */ +#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */ + +/* dot11DTIMPeriod */ +#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */ +#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */ + +/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */ +#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */ +#define DOT11_OUI_LEN 3 /* d11 OUI length */ +BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* RFC1042 header used by 802.11 per 802.1H */ +#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */ + +/* Generic 802.11 MAC header */ +/** + * N.B.: This struct reflects the full 4 address 802.11 MAC header. + * The fields are defined such that the shorter 1, 2, and 3 + * address headers just use the first k fields. + */ +BWL_PRE_PACKED_STRUCT struct dot11_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr a1; /* address 1 */ + struct ether_addr a2; /* address 2 */ + struct ether_addr a3; /* address 3 */ + uint16 seq; /* sequence control */ + struct ether_addr a4; /* address 4 */ +} BWL_POST_PACKED_STRUCT; + +/* Control frames */ + +BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_RTS_LEN 16 /* d11 RTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTS_LEN 10 /* d11 CTS frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACK_LEN 10 /* d11 ACK frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { + uint16 fc; /* frame control */ + uint16 durid; /* AID */ + struct ether_addr bssid; /* receiver address, STA in AP */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */ + +BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr bssid; /* transmitter address, STA in AP */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */ + +/** + * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling + * category+OUI+vendor specific content ( this can be variable) + */ +BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1040]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; + +/** generic vendor specific action frame with variable length */ +BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr { + uint8 category; + uint8 OUI[3]; + uint8 type; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t; + +#define DOT11_ACTION_VS_HDR_LEN 6 + +#define BCM_ACTION_OUI_BYTE0 0x00 +#define BCM_ACTION_OUI_BYTE1 0x90 +#define BCM_ACTION_OUI_BYTE2 0x4c + +/* BA/BAR Control parameters */ +#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */ +#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */ +#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */ + +#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */ +#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */ + +#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */ +#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */ + +#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */ +#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */ + +/** control frame header (BA/BAR) */ +BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr ra; /* receiver address */ + struct ether_addr ta; /* transmitter address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */ + +/** BAR frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_bar { + uint16 bar_control; /* BAR Control */ + uint16 seqnum; /* Starting Sequence control */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BAR_LEN 4 /* BAR frame payload length */ + +#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */ +#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */ +/** BA frame payload */ +BWL_PRE_PACKED_STRUCT struct dot11_ba { + uint16 ba_control; /* BA Control */ + uint16 seqnum; /* Starting Sequence control */ + uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */ + +/** Management frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_management_header { + uint16 fc; /* frame control */ + uint16 durid; /* duration/ID */ + struct ether_addr da; /* receiver address */ + struct ether_addr sa; /* transmitter address */ + struct ether_addr bssid; /* BSS ID */ + uint16 seq; /* sequence control */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_management_header dot11_management_header_t; +#define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */ + +/* Management frame payloads */ + +BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { + uint32 timestamp[2]; + uint16 beacon_interval; + uint16 capability; +} BWL_POST_PACKED_STRUCT; +#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */ +#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_auth { + uint16 alg; /* algorithm */ + uint16 seq; /* sequence control */ + uint16 status; /* status code */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { + uint16 capability; /* capability information */ + uint16 listen; /* listen interval */ + struct ether_addr ap; /* Current AP address */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { + uint16 capability; /* capability information */ + uint16 status; /* status code */ + uint16 aid; /* association ID */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_measure { + uint8 category; + uint8 action; + uint8 token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { + uint8 category; + uint8 action; + uint8 ch_width; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { + uint8 category; + uint8 action; + uint8 control; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query { + uint8 category; + uint8 action; + uint16 id; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_action_vht_oper_mode { + uint8 category; + uint8 action; + uint8 mode; +} BWL_POST_PACKED_STRUCT; + +/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */ +#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */ +#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */ +BWL_PRE_PACKED_STRUCT struct dot11_action_group_id { + uint8 category; + uint8 action; + uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN]; + uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN]; +} BWL_POST_PACKED_STRUCT; + +#define SM_PWRSAVE_ENABLE 1 +#define SM_PWRSAVE_MODE 2 + +/* ************* 802.11h related definitions. ************* */ +BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { + uint8 id; + uint8 len; + uint8 power; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cnst dot11_power_cnst_t; + +BWL_PRE_PACKED_STRUCT struct dot11_power_cap { + int8 min; + int8 max; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_power_cap dot11_power_cap_t; + +BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { + uint8 id; + uint8 len; + uint8 tx_pwr; + uint8 margin; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tpc_rep dot11_tpc_rep_t; +#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */ + +BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { + uint8 id; + uint8 len; + uint8 first_channel; + uint8 num_channels; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_supp_channels dot11_supp_channels_t; + +/** + * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband + * offset for 40MHz operation. The possible 3 values are: + * 1 = above control channel + * 3 = below control channel + * 0 = no extension channel + */ +BWL_PRE_PACKED_STRUCT struct dot11_extch { + uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */ + uint8 len; /* IE length */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extch dot11_extch_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + uint8 extch; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; + +#define BRCM_EXTCH_IE_LEN 5 +#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */ +#define DOT11_EXTCH_IE_LEN 1 +#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */ +#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */ +#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */ +#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { + uint8 category; + uint8 action; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_action_frmhdr dot11_action_frmhdr_t; +#define DOT11_ACTION_FRMHDR_LEN 2 + +/** CSA IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { + uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 mode; /* mode 0 or 1 */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch dot11_chan_switch_ie_t; + +#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ +/* CSA mode - 802.11h-2003 $7.3.2.20 */ +#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */ +#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { + uint8 category; + uint8 action; + dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */ + dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11_csa_body { + uint8 mode; /* mode 0 or 1 */ + uint8 reg; /* regulatory class */ + uint8 channel; /* channel switch to */ + uint8 count; /* number of beacons before switching */ +} BWL_POST_PACKED_STRUCT; + +/** 11n Extended Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { + uint8 id; /* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ext_csa dot11_ext_csa_ie_t; +#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */ + +BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { + uint8 category; + uint8 action; + dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */ +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { + uint8 category; + uint8 action; + struct dot11_csa_body b; /* body of the ie */ +} BWL_POST_PACKED_STRUCT; + +/** Wide Bandwidth Channel Switch IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 channel_width; /* new channel width */ + uint8 center_frequency_segment_0; /* center frequency segment 0 */ + uint8 center_frequency_segment_1; /* center frequency segment 1 */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t; + +#define DOT11_WIDE_BW_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ + +/** Channel Switch Wrapper IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t; + +/** VHT Transmit Power Envelope IE data structure */ +BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope { + uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */ + uint8 len; /* length of IE */ + uint8 transmit_power_info; + uint8 local_max_transmit_power_20; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t; + +/* vht transmit power envelope IE length depends on channel width */ +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ 1 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ 2 +#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ 3 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { + uint8 id; + uint8 len; + uint8 info; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_coex dot11_obss_coex_t; +#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */ + +#define DOT11_OBSS_COEX_INFO_REQ 0x01 +#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 +#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 + +BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { + uint8 id; + uint8 len; + uint8 regclass; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; +#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */ + +BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { + uint8 id; + uint8 len; + uint8 cap[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap_ie dot11_extcap_ie_t; + +#define DOT11_EXTCAP_LEN_COEX 1 +#define DOT11_EXTCAP_LEN_BT 3 +#define DOT11_EXTCAP_LEN_IW 4 +#define DOT11_EXTCAP_LEN_SI 6 + +#define DOT11_EXTCAP_LEN_TDLS 5 +#define DOT11_11AC_EXTCAP_LEN_TDLS 8 + +#define DOT11_EXTCAP_LEN_FMS 2 +#define DOT11_EXTCAP_LEN_PROXY_ARP 2 +#define DOT11_EXTCAP_LEN_TFS 3 +#define DOT11_EXTCAP_LEN_WNM_SLEEP 3 +#define DOT11_EXTCAP_LEN_TIMBC 3 +#define DOT11_EXTCAP_LEN_BSSTRANS 3 +#define DOT11_EXTCAP_LEN_DMS 4 +#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6 +#define DOT11_EXTCAP_LEN_TDLS_WBW 8 +#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8 + +/* TDLS Capabilities */ +#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */ +#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */ +#define DOT11_TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */ +#define DOT11_TDLS_CAP_CH_SW 30 /* TDLS Channel switch */ +#define DOT11_TDLS_CAP_PROH 38 /* TDLS prohibited */ +#define DOT11_TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */ +#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61 /* TDLS Wider Band-Width */ + +#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */ + +/* 802.11h/802.11k Measurement Request/Report IEs */ +/* Measurement Type field */ +#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ +#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ +#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ +#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ +#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ +#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ +#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ +#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */ +#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ +#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ +#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */ +#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */ +#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */ +#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */ +#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */ +#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */ +#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */ +#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ + +/* Measurement Request Modes */ +#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */ +#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */ +#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */ +#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */ +#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */ +/* Measurement Report Modes */ +#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */ +#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */ +#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */ +/* Basic Measurement Map bits */ +#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */ +#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */ +#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */ +#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */ +#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 channel; + uint8 start_time[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req dot11_meas_req_t; +#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */ +/* length of Measure Request IE data not including variable len */ +#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 subject; + uint8 type; /* type of civic location */ + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + } BWL_POST_PACKED_STRUCT req; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_req_loc dot11_meas_req_loc_t; +#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */ + +BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement { + uint8 subelement; + uint8 length; + uint8 lci_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lci_subelement dot11_lci_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 civic_data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_civic_subelement dot11_civic_subelement_t; + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + BWL_PRE_PACKED_STRUCT union + { + BWL_PRE_PACKED_STRUCT struct { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; + } BWL_POST_PACKED_STRUCT basic; + BWL_PRE_PACKED_STRUCT struct { + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT lci; + BWL_PRE_PACKED_STRUCT struct { + uint8 type; /* type of civic location */ + uint8 subelement; + uint8 length; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT civic; + BWL_PRE_PACKED_STRUCT struct { + uint8 entry_count; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT ftm_range; + uint8 data[1]; + } BWL_POST_PACKED_STRUCT rep; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep dot11_meas_rep_t; +#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */ +#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4 + +/* length of Measure Report IE data not including variable len */ +#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { + uint8 channel; + uint8 start_time[8]; + uint16 duration; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; +#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */ + +BWL_PRE_PACKED_STRUCT struct dot11_quiet { + uint8 id; + uint8 len; + uint8 count; /* TBTTs until beacon interval in quiet starts */ + uint8 period; /* Beacon intervals between periodic quiet periods ? */ + uint16 duration; /* Length of quiet period, in TU's */ + uint16 offset; /* TU's offset from TBTT in Count field */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_quiet dot11_quiet_t; + +BWL_PRE_PACKED_STRUCT struct chan_map_tuple { + uint8 channel; + uint8 map; +} BWL_POST_PACKED_STRUCT; +typedef struct chan_map_tuple chan_map_tuple_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { + uint8 id; + uint8 len; + uint8 eaddr[ETHER_ADDR_LEN]; + uint8 interval; + chan_map_tuple_t map[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; + +/* WME Elements */ +#define WME_OUI "\x00\x50\xf2" /* WME OUI */ +#define WME_OUI_LEN 3 +#define WME_OUI_TYPE 2 /* WME type */ +#define WME_TYPE 2 /* WME type, deprecated */ +#define WME_SUBTYPE_IE 0 /* Information Element */ +#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */ +#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */ +#define WME_VER 1 /* WME version */ + +/* WME Access Category Indices (ACIs) */ +#define AC_BE 0 /* Best Effort */ +#define AC_BK 1 /* Background */ +#define AC_VI 2 /* Video */ +#define AC_VO 3 /* Voice */ +#define AC_COUNT 4 /* number of ACs */ + +typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ + +#define AC_BITMAP_NONE 0x0 /* No ACs */ +#define AC_BITMAP_ALL 0xf /* All ACs */ +#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) +#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) +#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) + + +/** WME Information Element (IE) */ +BWL_PRE_PACKED_STRUCT struct wme_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_ie wme_ie_t; +#define WME_IE_LEN 7 /* WME IE length */ + +BWL_PRE_PACKED_STRUCT struct edcf_acparam { + uint8 ACI; + uint8 ECW; + uint16 TXOP; /* stored in network order (ls octet first) */ +} BWL_POST_PACKED_STRUCT; +typedef struct edcf_acparam edcf_acparam_t; + +/** WME Parameter Element (PE) */ +BWL_PRE_PACKED_STRUCT struct wme_param_ie { + uint8 oui[3]; + uint8 type; + uint8 subtype; + uint8 version; + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct wme_param_ie wme_param_ie_t; +#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */ + +/* QoS Info field for IE as sent from AP */ +#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */ +#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */ +#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */ +#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */ + +/* QoS Info field for IE as sent from STA */ +#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */ +#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */ +#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */ +#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */ +#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */ +#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */ +#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */ +#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */ +#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */ +#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */ +#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */ +#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */ + +/* ACI */ +#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */ +#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */ +#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */ +#define EDCF_ACM_MASK 0x10 /* ACM mask */ +#define EDCF_ACI_MASK 0x60 /* ACI mask */ +#define EDCF_ACI_SHIFT 5 /* ACI shift */ +#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */ + +/* ECW */ +#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */ +#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */ +#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) +#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */ +#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */ +#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */ + +/* TXOP */ +#define EDCF_TXOP_MIN 0 /* TXOP minimum value */ +#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */ +#define EDCF_TXOP2USEC(txop) ((txop) << 5) + +/* Default BE ACI value for non-WME connection STA */ +#define NON_EDCF_AC_BE_ACI_STA 0x02 + +/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */ +#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */ +#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */ +#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */ +#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */ +#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */ +#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */ +#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */ +#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */ + +/* Default EDCF parameters that AP uses; WMM draft Table 14 */ +#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */ +#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */ +#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */ +#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */ +#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */ +#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */ +#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */ +#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */ +#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */ +#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */ +#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */ +#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */ + +/** EDCA Parameter IE */ +BWL_PRE_PACKED_STRUCT struct edca_param_ie { + uint8 qosinfo; + uint8 rsvd; + edcf_acparam_t acparam[AC_COUNT]; +} BWL_POST_PACKED_STRUCT; +typedef struct edca_param_ie edca_param_ie_t; +#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */ + +/** QoS Capability IE */ +BWL_PRE_PACKED_STRUCT struct qos_cap_ie { + uint8 qosinfo; +} BWL_POST_PACKED_STRUCT; +typedef struct qos_cap_ie qos_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { + uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */ + uint8 length; + uint16 station_count; /* total number of STAs associated */ + uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */ + uint16 aac; /* available admission capacity */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; +#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */ + +#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */ + +/* nom_msdu_size */ +#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */ +#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */ + +/* surplus_bandwidth */ +/* Represented as 3 bits of integer, binary point, 13 bits fraction */ +#define INTEGER_SHIFT 13 /* integer shift */ +#define FRACTION_MASK 0x1FFF /* fraction mask */ + +/** Management Notification Frame */ +BWL_PRE_PACKED_STRUCT struct dot11_management_notification { + uint8 category; /* DOT11_ACTION_NOTIFICATION */ + uint8 action; + uint8 token; + uint8 status; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */ + +/** Timeout Interval IE */ +BWL_PRE_PACKED_STRUCT struct ti_ie { + uint8 ti_type; + uint32 ti_val; +} BWL_POST_PACKED_STRUCT; +typedef struct ti_ie ti_ie_t; +#define TI_TYPE_REASSOC_DEADLINE 1 +#define TI_TYPE_KEY_LIFETIME 2 + +/* WME Action Codes */ +#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */ +#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */ +#define WME_DELTS_REQUEST 2 /* WME DELTS request */ + +/* WME Setup Response Status Codes */ +#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */ +#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */ +#define WME_ADMISSION_REFUSED 3 /* WME admission refused */ + +/* Macro to take a pointer to a beacon or probe response + * body and return the char* pointer to the SSID info element + */ +#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) + +/* Authentication frame payload constants */ +#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */ +#define DOT11_SHARED_KEY 1 /* d11 shared authentication */ +#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */ +#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */ + +/* Frame control macros */ +#define FC_PVER_MASK 0x3 /* PVER mask */ +#define FC_PVER_SHIFT 0 /* PVER shift */ +#define FC_TYPE_MASK 0xC /* type mask */ +#define FC_TYPE_SHIFT 2 /* type shift */ +#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */ +#define FC_SUBTYPE_SHIFT 4 /* subtype shift */ +#define FC_TODS 0x100 /* to DS */ +#define FC_TODS_SHIFT 8 /* to DS shift */ +#define FC_FROMDS 0x200 /* from DS */ +#define FC_FROMDS_SHIFT 9 /* from DS shift */ +#define FC_MOREFRAG 0x400 /* more frag. */ +#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */ +#define FC_RETRY 0x800 /* retry */ +#define FC_RETRY_SHIFT 11 /* retry shift */ +#define FC_PM 0x1000 /* PM */ +#define FC_PM_SHIFT 12 /* PM shift */ +#define FC_MOREDATA 0x2000 /* more data */ +#define FC_MOREDATA_SHIFT 13 /* more data shift */ +#define FC_WEP 0x4000 /* WEP */ +#define FC_WEP_SHIFT 14 /* WEP shift */ +#define FC_ORDER 0x8000 /* order */ +#define FC_ORDER_SHIFT 15 /* order shift */ + +/* sequence control macros */ +#define SEQNUM_SHIFT 4 /* seq. number shift */ +#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */ +#define FRAGNUM_MASK 0xF /* frag. number mask */ + +/* Frame Control type/subtype defs */ + +/* FC Types */ +#define FC_TYPE_MNG 0 /* management type */ +#define FC_TYPE_CTL 1 /* control type */ +#define FC_TYPE_DATA 2 /* data type */ + +/* Management Subtypes */ +#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */ +#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */ +#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */ +#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */ +#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */ +#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */ +#define FC_SUBTYPE_BEACON 8 /* beacon */ +#define FC_SUBTYPE_ATIM 9 /* ATIM */ +#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */ +#define FC_SUBTYPE_AUTH 11 /* authentication */ +#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */ +#define FC_SUBTYPE_ACTION 13 /* action */ +#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */ + +/* Control Subtypes */ +#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */ +#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */ +#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */ +#define FC_SUBTYPE_PS_POLL 10 /* PS poll */ +#define FC_SUBTYPE_RTS 11 /* RTS */ +#define FC_SUBTYPE_CTS 12 /* CTS */ +#define FC_SUBTYPE_ACK 13 /* ACK */ +#define FC_SUBTYPE_CF_END 14 /* CF-END */ +#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */ + +/* Data Subtypes */ +#define FC_SUBTYPE_DATA 0 /* Data */ +#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */ +#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */ +#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_NULL 4 /* Null */ +#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */ +#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */ +#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */ +#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */ +#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */ +#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */ +#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */ +#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */ + +/* Data Subtype Groups */ +#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) +#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) +#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) +#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) +#define FC_SUBTYPE_ANY_PSPOLL(s) (((s) & 10) != 0) + +/* Type/Subtype Combos */ +#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */ + +#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */ + +#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */ +#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */ + +#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */ +#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */ +#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */ +#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */ +#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */ +#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */ +#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */ +#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */ +#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */ +#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */ +#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */ +#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */ +#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */ + +#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */ +#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */ +#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */ +#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */ +#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */ +#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */ +#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */ +#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */ +#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */ + +#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */ +#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */ +#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */ +#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */ +#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */ + +/* QoS Control Field */ + +/* 802.1D Priority */ +#define QOS_PRIO_SHIFT 0 /* QoS priority shift */ +#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */ +#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */ + +/* Traffic Identifier */ +#define QOS_TID_SHIFT 0 /* QoS TID shift */ +#define QOS_TID_MASK 0x000f /* QoS TID mask */ +#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */ + +/* End of Service Period (U-APSD) */ +#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */ +#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */ +#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */ + +/* Ack Policy */ +#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */ +#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */ +#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */ +#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */ +#define QOS_ACK_SHIFT 5 /* QoS ACK shift */ +#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */ +#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */ + +/* A-MSDU flag */ +#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */ +#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */ + +/* Management Frames */ + +/* Management Frame Constants */ + +/* Fixed fields */ +#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */ +#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */ +#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */ +#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */ +#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */ +#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */ +#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */ +#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */ +#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */ +#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */ + +/* DUR/ID field in assoc resp is 0xc000 | AID */ +#define DOT11_AID_MASK 0x3fff /* d11 AID mask */ + +/* Reason Codes */ +#define DOT11_RC_RESERVED 0 /* d11 RC reserved */ +#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */ +#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */ +#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station + * is leaving (or has left) IBSS or ESS + */ +#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */ +#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle + * all currently associated stations + */ +#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from + * nonauthenticated station + */ +#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from + * nonassociated station + */ +#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is + * leaving (or has left) BSS + */ +#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not + * authenticated with responding station + */ +#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */ +#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */ + +/* 12 is unused by STA but could be used by AP/GO */ +#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */ + + +/* 32-39 are QSTA specific reasons added in 11e */ +#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */ +#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */ +#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */ +#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */ +#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */ +#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */ +#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */ +#define DOT11_RC_TIMEOUT 39 /* timeout */ + +#define DOT11_RC_MESH_PEERING_CANCELLED 52 +#define DOT11_RC_MESH_MAX_PEERS 53 +#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54 +#define DOT11_RC_MESH_CLOSE_RECVD 55 +#define DOT11_RC_MESH_MAX_RETRIES 56 +#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57 +#define DOT11_RC_MESH_INVALID_GTK 58 +#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59 + +#define DOT11_RC_MESH_INVALID_SEC_CAP 60 +#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61 +#define DOT11_RC_MESH_PATHERR_NOFWINFO 62 +#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63 +#define DOT11_RC_MESH_MBSSMAC_EXISTS 64 +#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65 +#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66 + +#define DOT11_RC_MAX 66 /* Reason codes > 66 are reserved */ + +#define DOT11_RC_TDLS_PEER_UNREACH 25 +#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 + +/* Status Codes */ +#define DOT11_SC_SUCCESS 0 /* Successful */ +#define DOT11_SC_FAILURE 1 /* Unspecified failure */ +#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */ + /* schedule provided */ +#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */ +#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */ +#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */ +#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */ +#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested + * capabilities in the Capability + * Information field + */ +#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability + * to confirm that association exists + */ +#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason + * outside the scope of this standard + */ +#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support + * the specified authentication + * algorithm + */ +#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame + * with authentication transaction + * sequence number out of expected + * sequence + */ +#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of + * challenge failure + */ +#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout + * waiting for next frame in sequence + */ +#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is + * unable to handle additional + * associated stations + */ +#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting + * station not supporting all of the + * data rates in the BSSBasicRateSet + * parameter + */ +#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting + * station not supporting the Short + * Preamble option + */ +#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting + * station not supporting the PBCC + * Modulation option + */ +#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting + * station not supporting the Channel + * Agility option + */ +#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum + * Management capability is required. + */ +#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info + * in the Power Cap element is + * unacceptable. + */ +#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info + * in the Supported Channel element is + * unacceptable + */ +#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting + * station not supporting the Short Slot + * Time option + */ +#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26 /* Association denied because requesting station + * does not support the DSSS-OFDM option + */ +#define DOT11_SC_ASSOC_HT_REQUIRED 27 /* Association denied because the requesting + * station does not support HT features + */ +#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP + * being unable to reach the R0 Key Holder + */ +#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later + */ +#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management + * frame policy violation + */ + +#define DOT11_SC_DECLINED 37 /* request declined */ +#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */ +#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */ +#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */ +#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */ +#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */ +#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */ +#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */ +#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */ + +#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59 /* ad proto not supported */ +#define DOT11_SC_NO_OUTSTAND_REQ 60 /* no outstanding req */ +#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61 /* no response from server */ +#define DOT11_SC_TIMEOUT 62 /* timeout */ +#define DOT11_SC_QUERY_RSP_TOO_LARGE 63 /* query rsp too large */ +#define DOT11_SC_SERVER_UNREACHABLE 65 /* server unreachable */ + +#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */ +#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */ +#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */ +#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */ + +#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting + * station does not support VHT features. + */ + +#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */ + +/* Info Elts, length of INFORMATION portion of Info Elts */ +#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */ +#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */ + +/* TIM Info element has 3 bytes fixed info in INFORMATION field, + * followed by 1 to 251 bytes of Partial Virtual Bitmap + */ +#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */ +#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */ +#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */ +#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */ +#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */ + +/* TLV defines */ +#define TLV_TAG_OFF 0 /* tag offset */ +#define TLV_LEN_OFF 1 /* length offset */ +#define TLV_HDR_LEN 2 /* header length */ +#define TLV_BODY_OFF 2 /* body offset */ +#define TLV_BODY_LEN_MAX 255 /* max body length */ + +/* Management Frame Information Element IDs */ +#define DOT11_MNG_SSID_ID 0 /* d11 management SSID id */ +#define DOT11_MNG_RATES_ID 1 /* d11 management rates id */ +#define DOT11_MNG_FH_PARMS_ID 2 /* d11 management FH parameter id */ +#define DOT11_MNG_DS_PARMS_ID 3 /* d11 management DS parameter id */ +#define DOT11_MNG_CF_PARMS_ID 4 /* d11 management CF parameter id */ +#define DOT11_MNG_TIM_ID 5 /* d11 management TIM id */ +#define DOT11_MNG_IBSS_PARMS_ID 6 /* d11 management IBSS parameter id */ +#define DOT11_MNG_COUNTRY_ID 7 /* d11 management country id */ +#define DOT11_MNG_HOPPING_PARMS_ID 8 /* d11 management hopping parameter id */ +#define DOT11_MNG_HOPPING_TABLE_ID 9 /* d11 management hopping table id */ +#define DOT11_MNG_REQUEST_ID 10 /* d11 management request id */ +#define DOT11_MNG_QBSS_LOAD_ID 11 /* d11 management QBSS Load id */ +#define DOT11_MNG_EDCA_PARAM_ID 12 /* 11E EDCA Parameter id */ +#define DOT11_MNG_TSPEC_ID 13 /* d11 management TSPEC id */ +#define DOT11_MNG_TCLAS_ID 14 /* d11 management TCLAS id */ +#define DOT11_MNG_CHALLENGE_ID 16 /* d11 management chanllenge id */ +#define DOT11_MNG_PWR_CONSTRAINT_ID 32 /* 11H PowerConstraint */ +#define DOT11_MNG_PWR_CAP_ID 33 /* 11H PowerCapability */ +#define DOT11_MNG_TPC_REQUEST_ID 34 /* 11H TPC Request */ +#define DOT11_MNG_TPC_REPORT_ID 35 /* 11H TPC Report */ +#define DOT11_MNG_SUPP_CHANNELS_ID 36 /* 11H Supported Channels */ +#define DOT11_MNG_CHANNEL_SWITCH_ID 37 /* 11H ChannelSwitch Announcement */ +#define DOT11_MNG_MEASURE_REQUEST_ID 38 /* 11H MeasurementRequest */ +#define DOT11_MNG_MEASURE_REPORT_ID 39 /* 11H MeasurementReport */ +#define DOT11_MNG_QUIET_ID 40 /* 11H Quiet */ +#define DOT11_MNG_IBSS_DFS_ID 41 /* 11H IBSS_DFS */ +#define DOT11_MNG_ERP_ID 42 /* d11 management ERP id */ +#define DOT11_MNG_TS_DELAY_ID 43 /* d11 management TS Delay id */ +#define DOT11_MNG_TCLAS_PROC_ID 44 /* d11 management TCLAS processing id */ +#define DOT11_MNG_HT_CAP 45 /* d11 mgmt HT cap id */ +#define DOT11_MNG_QOS_CAP_ID 46 /* 11E QoS Capability id */ +#define DOT11_MNG_NONERP_ID 47 /* d11 management NON-ERP id */ +#define DOT11_MNG_RSN_ID 48 /* d11 management RSN id */ +#define DOT11_MNG_EXT_RATES_ID 50 /* d11 management ext. rates id */ +#define DOT11_MNG_AP_CHREP_ID 51 /* 11k AP Channel report id */ +#define DOT11_MNG_NEIGHBOR_REP_ID 52 /* 11k & 11v Neighbor report id */ +#define DOT11_MNG_RCPI_ID 53 /* 11k RCPI */ +#define DOT11_MNG_MDIE_ID 54 /* 11r Mobility domain id */ +#define DOT11_MNG_FTIE_ID 55 /* 11r Fast Bss Transition id */ +#define DOT11_MNG_FT_TI_ID 56 /* 11r Timeout Interval id */ +#define DOT11_MNG_RDE_ID 57 /* 11r RIC Data Element id */ +#define DOT11_MNG_REGCLASS_ID 59 /* d11 management regulatory class id */ +#define DOT11_MNG_EXT_CSA_ID 60 /* d11 Extended CSA */ +#define DOT11_MNG_HT_ADD 61 /* d11 mgmt additional HT info */ +#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 /* d11 mgmt ext channel offset */ +#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID 63 /* 11k bss average access delay */ +#define DOT11_MNG_ANTENNA_ID 64 /* 11k antenna id */ +#define DOT11_MNG_RSNI_ID 65 /* 11k RSNI id */ +#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID 66 /* 11k measurement pilot tx info id */ +#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID 67 /* 11k bss aval admission cap id */ +#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID 68 /* 11k bss AC access delay id */ +#define DOT11_MNG_WAPI_ID 68 /* d11 management WAPI id */ +#define DOT11_MNG_TIME_ADVERTISE_ID 69 /* 11p time advertisement */ +#define DOT11_MNG_RRM_CAP_ID 70 /* 11k radio measurement capability */ +#define DOT11_MNG_MULTIPLE_BSSID_ID 71 /* 11k multiple BSSID id */ +#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 /* d11 mgmt OBSS Coexistence INFO */ +#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */ +#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */ +#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */ +#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */ +#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */ +#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */ +#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID 90 /* 11v bss max idle id */ +#define DOT11_MNG_TFS_REQUEST_ID 91 /* 11v tfs request id */ +#define DOT11_MNG_TFS_RESPONSE_ID 92 /* 11v tfs response id */ +#define DOT11_MNG_WNM_SLEEP_MODE_ID 93 /* 11v wnm-sleep mode id */ +#define DOT11_MNG_TIMBC_REQ_ID 94 /* 11v TIM broadcast request id */ +#define DOT11_MNG_TIMBC_RESP_ID 95 /* 11v TIM broadcast response id */ +#define DOT11_MNG_CHANNEL_USAGE 97 /* 11v channel usage */ +#define DOT11_MNG_TIME_ZONE_ID 98 /* 11v time zone */ +#define DOT11_MNG_DMS_REQUEST_ID 99 /* 11v dms request id */ +#define DOT11_MNG_DMS_RESPONSE_ID 100 /* 11v dms response id */ +#define DOT11_MNG_LINK_IDENTIFIER_ID 101 /* 11z TDLS Link Identifier IE */ +#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 /* 11z TDLS Wakeup Schedule IE */ +#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 /* 11z TDLS Channel Switch Timing IE */ +#define DOT11_MNG_PTI_CONTROL_ID 105 /* 11z TDLS PTI Control IE */ +#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */ +#define DOT11_MNG_INTERWORKING_ID 107 /* 11u interworking */ +#define DOT11_MNG_ADVERTISEMENT_ID 108 /* 11u advertisement protocol */ +#define DOT11_MNG_EXP_BW_REQ_ID 109 /* 11u expedited bandwith request */ +#define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */ +#define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */ +#define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */ +#define DOT11_MNG_MESH_CONFIG 113 /* Mesh Configuration */ +#define DOT11_MNG_MESH_ID 114 /* Mesh ID */ +#define DOT11_MNG_MESH_PEER_MGMT_ID 117 /* Mesh PEER MGMT IE */ + +#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ +#define DOT11_MNG_EXT_PREQ_ID 130 /* Mesh PREQ IE */ +#define DOT11_MNG_EXT_PREP_ID 131 /* Mesh PREP IE */ +#define DOT11_MNG_EXT_PERR_ID 132 /* Mesh PERR IE */ +#define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */ +#define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */ +#define DOT11_MNG_EXT_BSSLOAD_ID 193 /* d11 mgmt VHT extended bss load id */ +#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */ +#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */ +#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */ +#define DOT11_MNG_AID_ID 197 /* Association ID IE */ +#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */ +#define DOT11_MNG_FTM_PARAMS_ID 206 + +#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */ +#define DOT11_MNG_PROPR_ID 221 +/* should start using this one instead of above two */ +#define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */ + +/* Rate Defines */ + +/* Valid rates for the Supported Rates and Extended Supported Rates IEs. + * Encoding is the rate in 500kbps units, rouding up for fractional values. + * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values. + * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates. + * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27}, + * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices. + */ + +#define DOT11_RATE_1M 2 /* 1 Mbps in 500kbps units */ +#define DOT11_RATE_2M 4 /* 2 Mbps in 500kbps units */ +#define DOT11_RATE_5M5 11 /* 5.5 Mbps in 500kbps units */ +#define DOT11_RATE_11M 22 /* 11 Mbps in 500kbps units */ +#define DOT11_RATE_6M 12 /* 6 Mbps in 500kbps units */ +#define DOT11_RATE_9M 18 /* 9 Mbps in 500kbps units */ +#define DOT11_RATE_12M 24 /* 12 Mbps in 500kbps units */ +#define DOT11_RATE_18M 36 /* 18 Mbps in 500kbps units */ +#define DOT11_RATE_24M 48 /* 24 Mbps in 500kbps units */ +#define DOT11_RATE_36M 72 /* 36 Mbps in 500kbps units */ +#define DOT11_RATE_48M 96 /* 48 Mbps in 500kbps units */ +#define DOT11_RATE_54M 108 /* 54 Mbps in 500kbps units */ +#define DOT11_RATE_MAX 108 /* highest rate (54 Mbps) in 500kbps units */ + +/* Supported Rates and Extended Supported Rates IEs + * The supported rates octets are defined a the MSB indicatin a Basic Rate + * and bits 0-6 as the rate value + */ +#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */ +#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */ + +/* BSS Membership Selector parameters + * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3 + * These selector values are advertised in Supported Rates and Extended Supported Rates IEs + * in the supported rates list with the Basic rate bit set. + * Constants below include the basic bit. + */ +#define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */ +#define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */ + +/* ERP info element bit values */ +#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */ +#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present + *in the BSS + */ +#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for + *ERP-OFDM frames + */ +#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed, + * 1 == not allowed + */ +/* TS Delay element offset & size */ +#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */ +#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */ + +/* Capability Information Field */ +#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */ +#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */ +#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */ +#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */ +#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */ +#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */ +#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */ +#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */ +#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */ +#define DOT11_CAP_QOS 0x0200 /* d11 cap. qos */ +#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */ +#define DOT11_CAP_APSD 0x0800 /* d11 cap. apsd */ +#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */ +#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */ +#define DOT11_CAP_DELAY_BA 0x4000 /* d11 cap. delayed block ack */ +#define DOT11_CAP_IMMEDIATE_BA 0x8000 /* d11 cap. immediate block ack */ + +/* Extended capabilities IE bitfields */ +/* 20/40 BSS Coexistence Management support bit position */ +#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0 +/* Extended Channel Switching support bit position */ +#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING 2 +/* scheduled PSMP support bit position */ +#define DOT11_EXT_CAP_SPSMP 6 +/* Flexible Multicast Service */ +#define DOT11_EXT_CAP_FMS 11 +/* proxy ARP service support bit position */ +#define DOT11_EXT_CAP_PROXY_ARP 12 +/* Civic Location */ +#define DOT11_EXT_CAP_CIVIC_LOC 14 +/* Geospatial Location */ +#define DOT11_EXT_CAP_LCI 15 +/* Traffic Filter Service */ +#define DOT11_EXT_CAP_TFS 16 +/* WNM-Sleep Mode */ +#define DOT11_EXT_CAP_WNM_SLEEP 17 +/* TIM Broadcast service */ +#define DOT11_EXT_CAP_TIMBC 18 +/* BSS Transition Management support bit position */ +#define DOT11_EXT_CAP_BSSTRANS_MGMT 19 +/* Direct Multicast Service */ +#define DOT11_EXT_CAP_DMS 26 +/* Interworking support bit position */ +#define DOT11_EXT_CAP_IW 31 +/* QoS map support bit position */ +#define DOT11_EXT_CAP_QOS_MAP 32 +/* service Interval granularity bit position and mask */ +#define DOT11_EXT_CAP_SI 41 +#define DOT11_EXT_CAP_SI_MASK 0x0E +/* WNM notification */ +#define DOT11_EXT_CAP_WNM_NOTIF 46 +/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */ +#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62 +/* Fine timing measurement - D3.0 */ +#define DOT11_EXT_CAP_FTM_RESPONDER 70 +#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */ +#ifdef WL_FTM +#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */ +#else +#define DOT11_EXT_CAP_MAX_BIT_IDX 62 /* !!!update this please!!! */ +#endif + +/* extended capability */ +#ifndef DOT11_EXTCAP_LEN_MAX +#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3) +#endif + +BWL_PRE_PACKED_STRUCT struct dot11_extcap { + uint8 extcap[DOT11_EXTCAP_LEN_MAX]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_extcap dot11_extcap_t; + +/* VHT Operating mode bit fields - (11ac D3.0 - 8.4.1.50) */ +#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0 +#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3 +#define DOT11_OPER_MODE_RXNSS_SHIFT 4 +#define DOT11_OPER_MODE_RXNSS_MASK 0x70 +#define DOT11_OPER_MODE_RXNSS_TYPE_SHIFT 7 +#define DOT11_OPER_MODE_RXNSS_TYPE_MASK 0x80 + +#define DOT11_OPER_MODE(type, nss, chanw) (\ + ((type) << DOT11_OPER_MODE_RXNSS_TYPE_SHIFT &\ + DOT11_OPER_MODE_RXNSS_TYPE_MASK) |\ + (((nss) - 1) << DOT11_OPER_MODE_RXNSS_SHIFT & DOT11_OPER_MODE_RXNSS_MASK) |\ + ((chanw) << DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT &\ + DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)) + +#define DOT11_OPER_MODE_CHANNEL_WIDTH(mode) \ + (((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK)\ + >> DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT) +#define DOT11_OPER_MODE_RXNSS(mode) \ + ((((mode) & DOT11_OPER_MODE_RXNSS_MASK) \ + >> DOT11_OPER_MODE_RXNSS_SHIFT) + 1) +#define DOT11_OPER_MODE_RXNSS_TYPE(mode) \ + (((mode) & DOT11_OPER_MODE_RXNSS_TYPE_MASK)\ + >> DOT11_OPER_MODE_RXNSS_TYPE_SHIFT) + +#define DOT11_OPER_MODE_20MHZ 0 +#define DOT11_OPER_MODE_40MHZ 1 +#define DOT11_OPER_MODE_80MHZ 2 +#define DOT11_OPER_MODE_160MHZ 3 +#define DOT11_OPER_MODE_8080MHZ 3 + +#define DOT11_OPER_MODE_CHANNEL_WIDTH_20MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_20MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_40MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_40MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_80MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_80MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_160MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_160MHZ) +#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\ + ((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ) + +/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */ +BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie { + uint8 mode; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_oper_mode_notif_ie dot11_oper_mode_notif_ie_t; + +#define DOT11_OPER_MODE_NOTIF_IE_LEN 1 + +/* Extended Capability Information Field */ +#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 /* 20/40 BSS Coexistence Management support */ + +/* + * Action Frame Constants + */ +#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */ +#define DOT11_ACTION_CAT_OFF 0 /* category offset */ +#define DOT11_ACTION_ACT_OFF 1 /* action offset */ + +/* Action Category field (sec 8.4.1.11) */ +#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */ +#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */ +#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */ +#define DOT11_ACTION_CAT_QOS 1 /* category QoS */ +#define DOT11_ACTION_CAT_DLS 2 /* category DLS */ +#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */ +#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */ +#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */ +#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */ +#define DOT11_ACTION_CAT_HT 7 /* category for HT */ +#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */ +#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */ +#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */ +#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */ +#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */ +#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */ +#define DOT11_ACTION_NOTIFICATION 17 +#define DOT11_ACTION_CAT_VHT 21 /* VHT action */ +#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ +#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */ + +/* Spectrum Management Action IDs (sec 7.4.1) */ +#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */ +#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */ +#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */ +#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */ +#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */ + +/* QoS action ids */ +#define DOT11_QOS_ACTION_ADDTS_REQ 0 /* d11 action ADDTS request */ +#define DOT11_QOS_ACTION_ADDTS_RESP 1 /* d11 action ADDTS response */ +#define DOT11_QOS_ACTION_DELTS 2 /* d11 action DELTS */ +#define DOT11_QOS_ACTION_SCHEDULE 3 /* d11 action schedule */ +#define DOT11_QOS_ACTION_QOS_MAP 4 /* d11 action QOS map */ + +/* HT action ids */ +#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */ +#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */ + +/* Public action ids */ +#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ +#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ +#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */ +#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */ +#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */ + +/* Block Ack action types */ +#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */ +#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */ +#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */ + +/* ADDBA action parameters */ +#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */ +#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */ +#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */ +#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */ +#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */ +#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */ +#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */ + +#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */ +#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */ + +/* Fast Transition action types */ +#define DOT11_FT_ACTION_FT_RESERVED 0 +#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */ +#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */ +#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */ + +/* DLS action types */ +#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */ +#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */ +#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */ + +/* Wireless Network Management (WNM) action types */ +#define DOT11_WNM_ACTION_EVENT_REQ 0 +#define DOT11_WNM_ACTION_EVENT_REP 1 +#define DOT11_WNM_ACTION_DIAG_REQ 2 +#define DOT11_WNM_ACTION_DIAG_REP 3 +#define DOT11_WNM_ACTION_LOC_CFG_REQ 4 +#define DOT11_WNM_ACTION_LOC_RFG_RESP 5 +#define DOT11_WNM_ACTION_BSSTRANS_QUERY 6 +#define DOT11_WNM_ACTION_BSSTRANS_REQ 7 +#define DOT11_WNM_ACTION_BSSTRANS_RESP 8 +#define DOT11_WNM_ACTION_FMS_REQ 9 +#define DOT11_WNM_ACTION_FMS_RESP 10 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11 +#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12 +#define DOT11_WNM_ACTION_TFS_REQ 13 +#define DOT11_WNM_ACTION_TFS_RESP 14 +#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ 15 +#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16 +#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17 +#define DOT11_WNM_ACTION_TIMBC_REQ 18 +#define DOT11_WNM_ACTION_TIMBC_RESP 19 +#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20 +#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21 +#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22 +#define DOT11_WNM_ACTION_DMS_REQ 23 +#define DOT11_WNM_ACTION_DMS_RESP 24 +#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25 +#define DOT11_WNM_ACTION_NOTFCTN_REQ 26 +#define DOT11_WNM_ACTION_NOTFCTN_RESP 27 +#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP 28 + +/* Unprotected Wireless Network Management (WNM) action types */ +#define DOT11_UWNM_ACTION_TIM 0 +#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT 1 + +#define DOT11_MNG_COUNTRY_ID_LEN 3 + +/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */ +#define DOT11_VHT_ACTION_CBF 0 /* Compressed Beamforming */ +#define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */ +#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */ + +/** DLS Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_req { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint16 cap; /* capability */ + uint16 timeout; /* timeout value */ + uint8 data[1]; /* IE:support rate, extend support rate, HT cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_req dot11_dls_req_t; +#define DOT11_DLS_REQ_LEN 18 /* Fixed length */ + +/** DLS response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dls_resp { + uint8 category; /* category of action frame (2) */ + uint8 action; /* DLS action: req (0) */ + uint16 status; /* status code field */ + struct ether_addr da; /* destination address */ + struct ether_addr sa; /* source address */ + uint8 data[1]; /* optional: capability, rate ... */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dls_resp dot11_dls_resp_t; +#define DOT11_DLS_RESP_LEN 16 /* Fixed length */ + + +/* ************* 802.11v related definitions. ************* */ + +/** BSS Management Transition Query frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_query (6) */ + uint8 token; /* dialog token */ + uint8 reason; /* transition query reason */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_query dot11_bsstrans_query_t; +#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */ + +/* BTM transition reason */ +#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0 +#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1 +#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2 +#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3 +#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4 +#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5 +#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6 +#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7 +#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8 +#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9 +#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10 +#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11 +#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13 +#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14 +#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15 +#define DOT11_BSSTRANS_REASON_LOW_RSSI 16 +#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17 +#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18 +#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19 +#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20 + +/** BSS Management Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_req (7) */ + uint8 token; /* dialog token */ + uint8 reqmode; /* transition request mode */ + uint16 disassoc_tmr; /* disassociation timer */ + uint8 validity_intrvl; /* validity interval */ + uint8 data[1]; /* optional: BSS term duration, ... */ + /* ...session info URL, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_req dot11_bsstrans_req_t; +#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */ + +/* BSS Mgmt Transition Request Mode Field - 802.11v */ +#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01 +#define DOT11_BSSTRANS_REQMODE_ABRIDGED 0x02 +#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT 0x04 +#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL 0x08 +#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT 0x10 + +/** BSS Management transition response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: trans_resp (8) */ + uint8 token; /* dialog token */ + uint8 status; /* transition status */ + uint8 term_delay; /* validity interval */ + uint8 data[1]; /* optional: BSSID target, candidate list */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t; +#define DOT11_BSSTRANS_RESP_LEN 5 /* Fixed length */ + +/* BSS Mgmt Transition Response Status Field */ +#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT 0 +#define DOT11_BSSTRANS_RESP_STATUS_REJECT 1 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN 2 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_CAP 3 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_UNDESIRED 4 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_TERM_DELAY_REQ 5 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_BSS_LIST_PROVIDED 6 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS 7 +#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8 + + +/** BSS Max Idle Period element */ +BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie { + uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */ + uint8 len; + uint16 max_idle_period; /* in unit of 1000 TUs */ + uint8 idle_opt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t; +#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3 /* bss max idle period IE size */ +#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1 /* BSS max idle option */ + +/** TIM Broadcast request element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie { + uint8 id; /* 94, DOT11_MNG_TIMBC_REQ_ID */ + uint8 len; + uint8 interval; /* in unit of beacon interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t; +#define DOT11_TIMBC_REQ_IE_LEN 1 /* Fixed length */ + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast request element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_req dot11_timbc_req_t; +#define DOT11_TIMBC_REQ_LEN 3 /* Fixed length */ + +/** TIM Broadcast response element */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie { + uint8 id; /* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */ + uint8 len; + uint8 status; /* status of add request */ + uint8 interval; /* in unit of beacon interval */ + int32 offset; /* in unit of ms */ + uint16 high_rate; /* in unit of 0.5 Mb/s */ + uint16 low_rate; /* in unit of 0.5 Mb/s */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t; +#define DOT11_TIMBC_DENY_RESP_IE_LEN 1 /* Deny. Fixed length */ +#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10 /* Accept. Fixed length */ + +#define DOT11_TIMBC_STATUS_ACCEPT 0 +#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP 1 +#define DOT11_TIMBC_STATUS_DENY 2 +#define DOT11_TIMBC_STATUS_OVERRIDDEN 3 +#define DOT11_TIMBC_STATUS_RESERVED 4 + +/** TIM Broadcast request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* TIM broadcast response element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc_resp dot11_timbc_resp_t; +#define DOT11_TIMBC_RESP_LEN 3 /* Fixed length */ + +/** TIM element */ +BWL_PRE_PACKED_STRUCT struct dot11_tim_ie { + uint8 id; /* 5, DOT11_MNG_TIM_ID */ + uint8 len; /* 4 - 255 */ + uint8 dtim_count; /* DTIM decrementing counter */ + uint8 dtim_period; /* DTIM period */ + uint8 bitmap_control; /* AID 0 + bitmap offset */ + uint8 pvb[1]; /* Partial Virtual Bitmap, variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tim_ie dot11_tim_ie_t; +#define DOT11_TIM_IE_FIXED_LEN 3 /* Fixed length, without id and len */ +#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5 /* Fixed length, with id and len */ + +/** TIM Broadcast frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_timbc { + uint8 category; /* category of action frame (11) */ + uint8 action; /* action: TIM (0) */ + uint8 check_beacon; /* need to check-beacon */ + uint8 tsf[8]; /* Time Synchronization Function */ + dot11_tim_ie_t tim_ie; /* TIM element */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timbc dot11_timbc_t; +#define DOT11_TIMBC_HDR_LEN (sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t)) +#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1) /* Fixed length */ +#define DOT11_TIMBC_LEN 11 /* Fixed length */ + +/** TCLAS frame classifier type */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr { + uint8 type; + uint8 mask; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t; +#define DOT11_TCLAS_FC_HDR_LEN 2 /* Fixed length */ + +#define DOT11_TCLAS_MASK_0 0x1 +#define DOT11_TCLAS_MASK_1 0x2 +#define DOT11_TCLAS_MASK_2 0x4 +#define DOT11_TCLAS_MASK_3 0x8 +#define DOT11_TCLAS_MASK_4 0x10 +#define DOT11_TCLAS_MASK_5 0x20 +#define DOT11_TCLAS_MASK_6 0x40 +#define DOT11_TCLAS_MASK_7 0x80 + +#define DOT11_TCLAS_FC_0_ETH 0 +#define DOT11_TCLAS_FC_1_IP 1 +#define DOT11_TCLAS_FC_2_8021Q 2 +#define DOT11_TCLAS_FC_3_OFFSET 3 +#define DOT11_TCLAS_FC_4_IP_HIGHER 4 +#define DOT11_TCLAS_FC_5_8021D 5 + +/** TCLAS frame classifier type 0 parameters for Ethernet */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth { + uint8 type; + uint8 mask; + uint8 sa[ETHER_ADDR_LEN]; + uint8 da[ETHER_ADDR_LEN]; + uint16 eth_type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t; +#define DOT11_TCLAS_FC_0_ETH_LEN 16 + +/** TCLAS frame classifier type 1 parameters for IPV4 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 { + uint8 type; + uint8 mask; + uint8 version; + uint32 src_ip; + uint32 dst_ip; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 protocol; + uint8 reserved; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t; +#define DOT11_TCLAS_FC_1_IPV4_LEN 18 + +/** TCLAS frame classifier type 2 parameters for 802.1Q */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q { + uint8 type; + uint8 mask; + uint16 tci; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t; +#define DOT11_TCLAS_FC_2_8021Q_LEN 4 + +/** TCLAS frame classifier type 3 parameters for filter offset */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter { + uint8 type; + uint8 mask; + uint16 offset; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t; +#define DOT11_TCLAS_FC_3_FILTER_LEN 4 + +/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */ +typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t; +#define DOT11_TCLAS_FC_4_IPV4_LEN DOT11_TCLAS_FC_1_IPV4_LEN + +/** TCLAS frame classifier type 4 parameters for IPV6 */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 { + uint8 type; + uint8 mask; + uint8 version; + uint8 saddr[16]; + uint8 daddr[16]; + uint16 src_port; + uint16 dst_port; + uint8 dscp; + uint8 nexthdr; + uint8 flow_lbl[3]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t; +#define DOT11_TCLAS_FC_4_IPV6_LEN 44 + +/** TCLAS frame classifier type 5 parameters for 802.1D */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d { + uint8 type; + uint8 mask; + uint8 pcp; + uint8 cfi; + uint16 vid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t; +#define DOT11_TCLAS_FC_5_8021D_LEN 6 + +/** TCLAS frame classifier type parameters */ +BWL_PRE_PACKED_STRUCT union dot11_tclas_fc { + uint8 data[1]; + dot11_tclas_fc_hdr_t hdr; + dot11_tclas_fc_0_eth_t t0_eth; + dot11_tclas_fc_1_ipv4_t t1_ipv4; + dot11_tclas_fc_2_8021q_t t2_8021q; + dot11_tclas_fc_3_filter_t t3_filter; + dot11_tclas_fc_4_ipv4_t t4_ipv4; + dot11_tclas_fc_4_ipv6_t t4_ipv6; + dot11_tclas_fc_5_8021d_t t5_8021d; +} BWL_POST_PACKED_STRUCT; +typedef union dot11_tclas_fc dot11_tclas_fc_t; + +#define DOT11_TCLAS_FC_MIN_LEN 4 /* Classifier Type 2 has the min size */ +#define DOT11_TCLAS_FC_MAX_LEN 254 + +/** TCLAS element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie { + uint8 id; /* 14, DOT11_MNG_TCLAS_ID */ + uint8 len; + uint8 user_priority; + dot11_tclas_fc_t fc; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_ie dot11_tclas_ie_t; +#define DOT11_TCLAS_IE_LEN 3 /* Fixed length, include id and len */ + +/** TCLAS processing element */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie { + uint8 id; /* 44, DOT11_MNG_TCLAS_PROC_ID */ + uint8 len; + uint8 process; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t; +#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */ + +#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */ +#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */ +#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */ + + +/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */ +#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */ + +/** TFS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie { + uint8 id; /* 91, DOT11_MNG_TFS_REQUEST_ID */ + uint8 len; + uint8 tfs_id; + uint8 actcode; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t; +#define DOT11_TFS_REQ_IE_LEN 2 /* Fixed length, without id and len */ + +/** TFS request action codes (bitfield) */ +#define DOT11_TFS_ACTCODE_DELETE 1 +#define DOT11_TFS_ACTCODE_NOTIFY 2 + +/** TFS request subelement IDs */ +#define DOT11_TFS_REQ_TFS_SE_ID 1 +#define DOT11_TFS_REQ_VENDOR_SE_ID 221 + +/** TFS subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_se { + uint8 sub_id; + uint8 len; + uint8 data[1]; /* TCLAS element(s) + optional TCLAS proc */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_se dot11_tfs_se_t; + + +/** TFS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie { + uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 tfs_id; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t; +#define DOT11_TFS_RESP_IE_LEN 1 /* Fixed length, without id and len */ + +/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */ +#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1 +#define DOT11_TFS_RESP_TFS_SE_ID 2 +#define DOT11_TFS_RESP_VENDOR_SE_ID 221 + +/** TFS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se { + uint8 sub_id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */ + uint8 len; + uint8 resp_st; + uint8 data[1]; /* Potential dot11_tfs_se_t included */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_status_se dot11_tfs_status_se_t; +#define DOT11_TFS_STATUS_SE_LEN 1 /* Fixed length, without id and len */ + +/* Following Definition should be merged to FMS_TFS macro below */ +/* TFS Response status code. Identical to FMS Element status, without N/A */ +#define DOT11_TFS_STATUS_ACCEPT 0 +#define DOT11_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_TFS_STATUS_DENY_POLICY 4 +#define DOT11_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_TFS_STATUS_ALTPREF_POLICY 7 +#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP 14 + +/* FMS Element Status and TFS Response Status Definition */ +#define DOT11_FMS_TFS_STATUS_ACCEPT 0 +#define DOT11_FMS_TFS_STATUS_DENY_FORMAT 1 +#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE 2 +#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI 3 +#define DOT11_FMS_TFS_STATUS_DENY_POLICY 4 +#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED 5 +#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI 6 +#define DOT11_FMS_TFS_STATUS_ALT_POLICY 7 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI 8 +#define DOT11_FMS_TFS_STATUS_ALT_MCRATE 9 +#define DOT11_FMS_TFS_STATUS_TERM_POLICY 10 +#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE 11 +#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO 12 +#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI 13 +#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP 14 + +/** TFS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (13) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_req dot11_tfs_req_t; +#define DOT11_TFS_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS request (14) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_resp dot11_tfs_resp_t; +#define DOT11_TFS_RESP_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame request header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify request (15) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t; +#define DOT11_TFS_NOTIFY_REQ_LEN 3 /* Fixed length */ + +/** TFS Management Notify frame response header */ +BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: TFS notify response (28) */ + uint8 tfs_id_cnt; /* TFS IDs count */ + uint8 tfs_id[1]; /* Array of TFS IDs */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t; +#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */ + + +/** WNM-Sleep Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (16) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t; +#define DOT11_WNM_SLEEP_REQ_LEN 3 /* Fixed length */ + +/** WNM-Sleep Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: wnm-sleep request (17) */ + uint8 token; /* dialog token */ + uint16 key_len; /* key data length */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t; +#define DOT11_WNM_SLEEP_RESP_LEN 5 /* Fixed length */ + +#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK 0 +#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK 1 + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_gtk { + uint8 sub_id; + uint8 len; + uint16 key_info; + uint8 key_length; + uint8 rsc[8]; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11 /* without sub_id, len, and key */ +#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43 /* without sub_id and len */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk { + uint8 sub_id; + uint8 len; + uint16 key_id; + uint8 pn[6]; + uint8 key[16]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t; +#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie { + uint8 id; /* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */ + uint8 len; + uint8 act_type; + uint8 resp_status; + uint16 interval; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t; +#define DOT11_WNM_SLEEP_IE_LEN 4 /* Fixed length */ + +#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER 0 +#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT 1 + +#define DOT11_WNM_SLEEP_RESP_ACCEPT 0 +#define DOT11_WNM_SLEEP_RESP_UPDATE 1 +#define DOT11_WNM_SLEEP_RESP_DENY 2 +#define DOT11_WNM_SLEEP_RESP_DENY_TEMP 3 +#define DOT11_WNM_SLEEP_RESP_DENY_KEY 4 +#define DOT11_WNM_SLEEP_RESP_DENY_INUSE 5 +#define DOT11_WNM_SLEEP_RESP_LAST 6 + +/** DMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (23) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req dot11_dms_req_t; +#define DOT11_DMS_REQ_LEN 3 /* Fixed length */ + +/** DMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: dms request (24) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp dot11_dms_resp_t; +#define DOT11_DMS_RESP_LEN 3 /* Fixed length */ + +/** DMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie { + uint8 id; /* 99, DOT11_MNG_DMS_REQUEST_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_ie dot11_dms_req_ie_t; +#define DOT11_DMS_REQ_IE_LEN 2 /* Fixed length */ + +/** DMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie { + uint8 id; /* 100, DOT11_MNG_DMS_RESPONSE_ID */ + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t; +#define DOT11_DMS_RESP_IE_LEN 2 /* Fixed length */ + +/** DMS request descriptor */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc { + uint8 dms_id; + uint8 len; + uint8 type; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_req_desc dot11_dms_req_desc_t; +#define DOT11_DMS_REQ_DESC_LEN 3 /* Fixed length */ + +#define DOT11_DMS_REQ_TYPE_ADD 0 +#define DOT11_DMS_REQ_TYPE_REMOVE 1 +#define DOT11_DMS_REQ_TYPE_CHANGE 2 + +/** DMS response status */ +BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st { + uint8 dms_id; + uint8 len; + uint8 type; + uint16 lsc; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_dms_resp_st dot11_dms_resp_st_t; +#define DOT11_DMS_RESP_STATUS_LEN 5 /* Fixed length */ + +#define DOT11_DMS_RESP_TYPE_ACCEPT 0 +#define DOT11_DMS_RESP_TYPE_DENY 1 +#define DOT11_DMS_RESP_TYPE_TERM 2 + +#define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF + +/** FMS Management Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (9) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req dot11_fms_req_t; +#define DOT11_FMS_REQ_LEN 3 /* Fixed length */ + +/** FMS Management Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp { + uint8 category; /* category of action frame (10) */ + uint8 action; /* WNM action: fms request (10) */ + uint8 token; /* dialog token */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp dot11_fms_resp_t; +#define DOT11_FMS_RESP_LEN 3 /* Fixed length */ + +/** FMS Descriptor element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_desc { + uint8 id; + uint8 len; + uint8 num_fms_cnt; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_desc dot11_fms_desc_t; +#define DOT11_FMS_DESC_LEN 1 /* Fixed length */ + +#define DOT11_FMS_CNTR_MAX 0x8 +#define DOT11_FMS_CNTR_ID_MASK 0x7 +#define DOT11_FMS_CNTR_ID_SHIFT 0x0 +#define DOT11_FMS_CNTR_COUNT_MASK 0xf1 +#define DOT11_FMS_CNTR_SHIFT 0x3 + +/** FMS request element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie { + uint8 id; + uint8 len; + uint8 fms_token; /* token used to identify fms stream set */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_req_ie dot11_fms_req_ie_t; +#define DOT11_FMS_REQ_IE_FIX_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field { + uint8 mask; + uint8 mcs_idx; + uint16 rate; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rate_id_field dot11_rate_id_field_t; +#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK 0x7 +#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET 0 +#define DOT11_RATE_ID_FIELD_RATETYPE_MASK 0x18 +#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET 3 +#define DOT11_RATE_ID_FIELD_LEN sizeof(dot11_rate_id_field_t) + +/** FMS request subelements */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_se { + uint8 sub_id; + uint8 len; + uint8 interval; + uint8 max_interval; + dot11_rate_id_field_t rate; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_se dot11_fms_se_t; +#define DOT11_FMS_REQ_SE_LEN 6 /* Fixed length */ + +#define DOT11_FMS_REQ_SE_ID_FMS 1 /* FMS subelement */ +#define DOT11_FMS_REQ_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS response element */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie { + uint8 id; + uint8 len; + uint8 fms_token; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t; +#define DOT11_FMS_RESP_IE_FIX_LEN 1 /* Fixed length */ + +/* FMS status subelements */ +#define DOT11_FMS_STATUS_SE_ID_FMS 1 /* FMS Status */ +#define DOT11_FMS_STATUS_SE_ID_TCLAS 2 /* TCLAS Status */ +#define DOT11_FMS_STATUS_SE_ID_VS 221 /* Vendor Specific subelement */ + +/** FMS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se { + uint8 sub_id; + uint8 len; + uint8 status; + uint8 interval; + uint8 max_interval; + uint8 fmsid; + uint8 counter; + dot11_rate_id_field_t rate; + uint8 mcast_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_fms_status_se dot11_fms_status_se_t; +#define DOT11_FMS_STATUS_SE_LEN 15 /* Fixed length */ + +/** TCLAS status subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se { + uint8 sub_id; + uint8 len; + uint8 fmsid; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_tclas_status_se dot11_tclas_status_se_t; +#define DOT11_TCLAS_STATUS_SE_LEN 1 /* Fixed length */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_req { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint8 token; /* identifier */ + uint16 addba_param_set; /* parameter set */ + uint16 timeout; /* timeout in seconds */ + uint16 start_seqnum; /* starting sequence number */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_req dot11_addba_req_t; +#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */ + +BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba resp */ + uint8 token; /* identifier */ + uint16 status; /* status of add request */ + uint16 addba_param_set; /* negotiated parameter set */ + uint16 timeout; /* negotiated timeout in seconds */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_addba_resp dot11_addba_resp_t; +#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */ + +/* DELBA action parameters */ +#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */ +#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */ +#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */ +#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */ + +BWL_PRE_PACKED_STRUCT struct dot11_delba { + uint8 category; /* category of action frame (3) */ + uint8 action; /* action: addba req */ + uint16 delba_param_set; /* paarmeter set */ + uint16 reason; /* reason for dellba */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_delba dot11_delba_t; +#define DOT11_DELBA_LEN 6 /* length of delba frame */ + +/* SA Query action field value */ +#define SA_QUERY_REQUEST 0 +#define SA_QUERY_RESPONSE 1 + +/* ************* 802.11r related definitions. ************* */ + +/** Over-the-DS Fast Transition Request frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_req { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft req */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_req dot11_ft_req_t; +#define DOT11_FT_REQ_FIXED_LEN 14 + +/** Over-the-DS Fast Transition Response frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_res { + uint8 category; /* category of action frame (6) */ + uint8 action; /* action: ft resp */ + uint8 sta_addr[ETHER_ADDR_LEN]; + uint8 tgt_ap_addr[ETHER_ADDR_LEN]; + uint16 status; /* status code */ + uint8 data[1]; /* Elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_res dot11_ft_res_t; +#define DOT11_FT_RES_FIXED_LEN 16 + +/** RDE RIC Data Element. */ +BWL_PRE_PACKED_STRUCT struct dot11_rde_ie { + uint8 id; /* 11r, DOT11_MNG_RDE_ID */ + uint8 length; + uint8 rde_id; /* RDE identifier. */ + uint8 rd_count; /* Resource Descriptor Count. */ + uint16 status; /* Status Code. */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rde_ie dot11_rde_ie_t; + +/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */ +#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t) + + +/* ************* 802.11k related definitions. ************* */ + +/* Radio measurements enabled capability ie */ +#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */ +#define RCPI_IE_LEN 1 +#define RSNI_IE_LEN 1 +BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie { + uint8 cap[DOT11_RRM_CAP_LEN]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; + +/* Bitmap definitions for cap ie */ +#define DOT11_RRM_CAP_LINK 0 +#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1 +#define DOT11_RRM_CAP_PARALLEL 2 +#define DOT11_RRM_CAP_REPEATED 3 +#define DOT11_RRM_CAP_BCN_PASSIVE 4 +#define DOT11_RRM_CAP_BCN_ACTIVE 5 +#define DOT11_RRM_CAP_BCN_TABLE 6 +#define DOT11_RRM_CAP_BCN_REP_COND 7 +#define DOT11_RRM_CAP_FM 8 +#define DOT11_RRM_CAP_CLM 9 +#define DOT11_RRM_CAP_NHM 10 +#define DOT11_RRM_CAP_SM 11 +#define DOT11_RRM_CAP_LCIM 12 +#define DOT11_RRM_CAP_LCIA 13 +#define DOT11_RRM_CAP_TSCM 14 +#define DOT11_RRM_CAP_TTSCM 15 +#define DOT11_RRM_CAP_AP_CHANREP 16 +#define DOT11_RRM_CAP_RMMIB 17 +/* bit18-bit26, not used for RRM_IOVAR */ +#define DOT11_RRM_CAP_MPTI 27 +#define DOT11_RRM_CAP_NBRTSFO 28 +#define DOT11_RRM_CAP_RCPI 29 +#define DOT11_RRM_CAP_RSNI 30 +#define DOT11_RRM_CAP_BSSAAD 31 +#define DOT11_RRM_CAP_BSSAAC 32 +#define DOT11_RRM_CAP_AI 33 +#define DOT11_RRM_CAP_FTM_RANGE 34 +#define DOT11_RRM_CAP_CIVIC_LOC 35 +#define DOT11_RRM_CAP_LAST 35 + +/* Operating Class (formerly "Regulatory Class") definitions */ +#define DOT11_OP_CLASS_NONE 255 + +BWL_PRE_PACKED_STRUCT struct do11_ap_chrep { + uint8 id; + uint8 len; + uint8 reg; + uint8 chanlist[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct do11_ap_chrep dot11_ap_chrep_t; + +/* Radio Measurements action ids */ +#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */ +#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */ +#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */ +#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */ +#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */ +#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */ + +/** Generic radio measurement action frame header */ +BWL_PRE_PACKED_STRUCT struct dot11_rm_action { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_action dot11_rm_action_t; +#define DOT11_RM_ACTION_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint16 reps; /* no. of repetitions */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq dot11_rmreq_t; +#define DOT11_RMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_rm_ie { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rm_ie dot11_rm_ie_t; +#define DOT11_RM_IE_LEN 5 + +/* Definitions for "mode" bits in rm req */ +#define DOT11_RMREQ_MODE_PARALLEL 1 +#define DOT11_RMREQ_MODE_ENABLE 2 +#define DOT11_RMREQ_MODE_REQUEST 4 +#define DOT11_RMREQ_MODE_REPORT 8 +#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */ + +/* Definitions for "mode" bits in rm rep */ +#define DOT11_RMREP_MODE_LATE 1 +#define DOT11_RMREP_MODE_INCAPABLE 2 +#define DOT11_RMREP_MODE_REFUSED 4 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 bcn_mode; + struct ether_addr bssid; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; +#define DOT11_RMREQ_BCN_LEN 18 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 frame_info; + uint8 rcpi; + uint8 rsni; + struct ether_addr bssid; + uint8 antenna_id; + uint32 parent_tsf; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; +#define DOT11_RMREP_BCN_LEN 26 + +/* Beacon request measurement mode */ +#define DOT11_RMREQ_BCN_PASSIVE 0 +#define DOT11_RMREQ_BCN_ACTIVE 1 +#define DOT11_RMREQ_BCN_TABLE 2 + +/* Sub-element IDs for Beacon Request */ +#define DOT11_RMREQ_BCN_SSID_ID 0 +#define DOT11_RMREQ_BCN_REPINFO_ID 1 +#define DOT11_RMREQ_BCN_REPDET_ID 2 +#define DOT11_RMREQ_BCN_REQUEST_ID 10 +#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID + +/* Reporting Detail element definition */ +#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */ +#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */ +#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */ + +/* Reporting Information (reporting condition) element definition */ +#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */ +#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */ + +/* Sub-element IDs for Beacon Report */ +#define DOT11_RMREP_BCN_FRM_BODY 1 +#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */ + +/* Sub-element IDs for Frame Report */ +#define DOT11_RMREP_FRAME_COUNT_REPORT 1 + +/* Statistics Group Report: Group IDs */ +#define DOT11_RRM_STATS_GRP_ID_0 0 + +/* Statistics Group Report: Group Data length */ +#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28 + +/* Channel load request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t; +#define DOT11_RMREQ_CHANLOAD_LEN 11 + +/** Channel load report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 channel_load; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t; +#define DOT11_RMREP_CHANLOAD_LEN 13 + +/** Noise histogram request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_noise dot11_rmreq_noise_t; +#define DOT11_RMREQ_NOISE_LEN 11 + +/** Noise histogram report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; + uint8 antid; + uint8 anpi; + uint8 ipi0_dens; + uint8 ipi1_dens; + uint8 ipi2_dens; + uint8 ipi3_dens; + uint8 ipi4_dens; + uint8 ipi5_dens; + uint8 ipi6_dens; + uint8 ipi7_dens; + uint8 ipi8_dens; + uint8 ipi9_dens; + uint8 ipi10_dens; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_noise dot11_rmrep_noise_t; +#define DOT11_RMREP_NOISE_LEN 25 + +/** Frame request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 reg; + uint8 channel; + uint16 interval; + uint16 duration; + uint8 req_type; + struct ether_addr ta; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_frame dot11_rmreq_frame_t; +#define DOT11_RMREQ_FRAME_LEN 18 + +/** Frame report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame { + uint8 reg; + uint8 channel; + uint32 starttime[2]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frame dot11_rmrep_frame_t; +#define DOT11_RMREP_FRAME_LEN 12 + +/** Frame report entry */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry { + struct ether_addr ta; + struct ether_addr bssid; + uint8 phy_type; + uint8 avg_rcpi; + uint8 last_rsni; + uint8 last_rcpi; + uint8 ant_id; + uint16 frame_cnt; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t; +#define DOT11_RMREP_FRMENTRY_LEN 19 + +/** STA statistics request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + struct ether_addr peer; + uint16 interval; + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_stat dot11_rmreq_stat_t; +#define DOT11_RMREQ_STAT_LEN 16 + +/** STA statistics report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat { + uint16 duration; + uint8 group_id; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_stat dot11_rmrep_stat_t; + +/** Transmit stream/category measurement request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 interval; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 bin0_range; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t; + +/** Transmit stream/category measurement report */ +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream { + uint32 starttime[2]; + uint16 duration; + struct ether_addr peer; + uint8 traffic_id; + uint8 reason; + uint32 txmsdu_cnt; + uint32 msdu_discarded_cnt; + uint32 msdufailed_cnt; + uint32 msduretry_cnt; + uint32 cfpolls_lost_cnt; + uint32 avrqueue_delay; + uint32 avrtx_delay; + uint8 bin0_range; + uint32 bin0; + uint32 bin1; + uint32 bin2; + uint32 bin3; + uint32 bin4; + uint32 bin5; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t; + +enum { + DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */ + DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */ + DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */ +}; + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + + /* Following 3 fields are unused. Keep for ROM compatibility. */ + uint8 lat_res; + uint8 lon_res; + uint8 alt_res; + + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 lci_sub_id; + uint8 lci_sub_len; + /* optional LCI field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t; + +#define DOT11_FTM_LCI_SUBELEM_ID 0 +#define DOT11_FTM_LCI_SUBELEM_LEN 2 +#define DOT11_FTM_LCI_FIELD_LEN 16 +#define DOT11_FTM_LCI_UNKNOWN_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 subj; + uint8 civloc_type; + uint8 siu; /* service interval units */ + uint16 si; /* service interval */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t; + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 civloc_type; + uint8 civloc_sub_id; + uint8 civloc_sub_len; + /* optional location civic field */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t; + +#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0 +#define DOT11_FTM_CIVIC_SUBELEM_ID 0 +#define DOT11_FTM_CIVIC_SUBELEM_LEN 2 +#define DOT11_FTM_CIVIC_LOC_SI_NONE 0 +#define DOT11_FTM_CIVIC_TYPE_LEN 1 +#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel { + uint8 id; + uint8 len; + uint16 max_age; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t; +#define DOT11_FTM_RANGE_SUBELEM_ID 4 +#define DOT11_FTM_RANGE_SUBELEM_LEN 2 + +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 max_init_delay; /* maximum random initial delay */ + uint8 min_ap_count; + uint8 data[1]; + /* neighbor report sub-elements */ + /* optional sub-elements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t; +#define DOT11_RMREQ_FTM_RANGE_LEN 8 + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint16 range; + uint16 max_err; + uint8 rsvd; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t; +#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15 + +enum { + DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 3, + DOT11_FTM_RANGE_ERROR_AP_FAILED = 4, + DOT11_FTM_RANGE_ERROR_TX_FAILED = 8, + DOT11_FTM_RANGE_ERROR_MAX +}; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry { + uint32 start_tsf; /* 4 lsb of tsf */ + struct ether_addr bssid; + uint8 code; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t; +#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11 + +BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint8 entry_count; + uint8 data[2]; /* includes pad */ + /* + dot11_ftm_range_entry_t entries[entry_count]; + uint8 error_count; + dot11_ftm_error_entry_t errors[error_count]; + */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t; + +#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */ +#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15 +#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11 +#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */ +/** Measurement pause request */ +BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time { + uint8 id; + uint8 len; + uint8 token; + uint8 mode; + uint8 type; + uint16 pause_time; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t; + + +/* Neighbor Report subelements ID (11k & 11v) */ +#define DOT11_NGBR_TSF_INFO_SE_ID 1 +#define DOT11_NGBR_CCS_SE_ID 2 +#define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3 +#define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4 +#define DOT11_NGBR_BEARING_SE_ID 5 + +/** Neighbor Report, BSS Transition Candidate Preference subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se { + uint8 sub_id; + uint8 len; + uint8 preference; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t; +#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1 + +/** Neighbor Report, BSS Termination Duration subelement */ +BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se { + uint8 sub_id; + uint8 len; + uint8 tsf[8]; + uint16 duration; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t; +#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN 10 + +/* Neighbor Report BSSID Information Field */ +#define DOT11_NGBR_BI_REACHABILTY_UNKN 0x0002 +#define DOT11_NGBR_BI_REACHABILTY 0x0003 +#define DOT11_NGBR_BI_SEC 0x0004 +#define DOT11_NGBR_BI_KEY_SCOPE 0x0008 +#define DOT11_NGBR_BI_CAP 0x03f0 +#define DOT11_NGBR_BI_CAP_SPEC_MGMT 0x0010 +#define DOT11_NGBR_BI_CAP_QOS 0x0020 +#define DOT11_NGBR_BI_CAP_APSD 0x0040 +#define DOT11_NGBR_BI_CAP_RDIO_MSMT 0x0080 +#define DOT11_NGBR_BI_CAP_DEL_BA 0x0100 +#define DOT11_NGBR_BI_CAP_IMM_BA 0x0200 +#define DOT11_NGBR_BI_MOBILITY 0x0400 +#define DOT11_NGBR_BI_HT 0x0800 +#define DOT11_NGBR_BI_VHT 0x1000 +#define DOT11_NGBR_BI_FTM 0x2000 + +/** Neighbor Report element (11k & 11v) */ +BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; /* Operating class */ + uint8 channel; + uint8 phytype; + uint8 data[1]; /* Variable size subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t; +#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13 + + +/* MLME Enumerations */ +#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */ +#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */ +#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */ +#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */ +#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */ +#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */ + +/** Link Measurement */ +BWL_PRE_PACKED_STRUCT struct dot11_lmreq { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + uint8 txpwr; /* Transmit Power Used */ + uint8 maxtxpwr; /* Max Transmit Power */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmreq dot11_lmreq_t; +#define DOT11_LMREQ_LEN 5 + +BWL_PRE_PACKED_STRUCT struct dot11_lmrep { + uint8 category; /* category of action frame (5) */ + uint8 action; /* radio measurement action */ + uint8 token; /* dialog token */ + dot11_tpc_rep_t tpc; /* TPC element */ + uint8 rxant; /* Receive Antenna ID */ + uint8 txant; /* Transmit Antenna ID */ + uint8 rcpi; /* RCPI */ + uint8 rsni; /* RSNI */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_lmrep dot11_lmrep_t; +#define DOT11_LMREP_LEN 11 + +/* 802.11 BRCM "Compromise" Pre N constants */ +#define PREN_PREAMBLE 24 /* green field preamble time */ +#define PREN_MM_EXT 12 /* extra mixed mode preamble time */ +#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */ + +/* 802.11N PHY constants */ +#define RIFS_11N_TIME 2 /* NPHY RIFS time */ + +/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3 + * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2 + */ +/* HT-SIG1 */ +#define HT_SIG1_MCS_MASK 0x00007F +#define HT_SIG1_CBW 0x000080 +#define HT_SIG1_HT_LENGTH 0xFFFF00 + +/* HT-SIG2 */ +#define HT_SIG2_SMOOTHING 0x000001 +#define HT_SIG2_NOT_SOUNDING 0x000002 +#define HT_SIG2_RESERVED 0x000004 +#define HT_SIG2_AGGREGATION 0x000008 +#define HT_SIG2_STBC_MASK 0x000030 +#define HT_SIG2_STBC_SHIFT 4 +#define HT_SIG2_FEC_CODING 0x000040 +#define HT_SIG2_SHORT_GI 0x000080 +#define HT_SIG2_ESS_MASK 0x000300 +#define HT_SIG2_ESS_SHIFT 8 +#define HT_SIG2_CRC 0x03FC00 +#define HT_SIG2_TAIL 0x1C0000 + +/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */ +#define HT_T_LEG_PREAMBLE 16 +#define HT_T_L_SIG 4 +#define HT_T_SIG 8 +#define HT_T_LTF1 4 +#define HT_T_GF_LTF1 8 +#define HT_T_LTFs 4 +#define HT_T_STF 4 +#define HT_T_GF_STF 8 +#define HT_T_SYML 4 + +#define HT_N_SERVICE 16 /* bits in SERVICE field */ +#define HT_N_TAIL 6 /* tail bits per BCC encoder */ + +/* 802.11 A PHY constants */ +#define APHY_SLOT_TIME 9 /* APHY slot time */ +#define APHY_SIFS_TIME 16 /* APHY SIFS time */ +#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */ +#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */ +#define APHY_SIGNAL_TIME 4 /* APHY signal time */ +#define APHY_SYMBOL_TIME 4 /* APHY symbol time */ +#define APHY_SERVICE_NBITS 16 /* APHY service nbits */ +#define APHY_TAIL_NBITS 6 /* APHY tail nbits */ +#define APHY_CWMIN 15 /* APHY cwmin */ +#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */ + +/* 802.11 B PHY constants */ +#define BPHY_SLOT_TIME 20 /* BPHY slot time */ +#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */ +#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */ +#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */ +#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */ +#define BPHY_CWMIN 31 /* BPHY cwmin */ +#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */ +#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */ + +/* 802.11 G constants */ +#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */ + +#define PHY_CWMAX 1023 /* PHY cwmax */ + +#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */ + +/* 802.11 VHT constants */ + +typedef int vht_group_id_t; + +/* for VHT-A1 */ +/* SIG-A1 reserved bits */ +#define VHT_SIGA1_CONST_MASK 0x800004 + +#define VHT_SIGA1_BW_MASK 0x000003 +#define VHT_SIGA1_20MHZ_VAL 0x000000 +#define VHT_SIGA1_40MHZ_VAL 0x000001 +#define VHT_SIGA1_80MHZ_VAL 0x000002 +#define VHT_SIGA1_160MHZ_VAL 0x000003 + +#define VHT_SIGA1_STBC 0x000008 + +#define VHT_SIGA1_GID_MASK 0x0003f0 +#define VHT_SIGA1_GID_SHIFT 4 +#define VHT_SIGA1_GID_TO_AP 0x00 +#define VHT_SIGA1_GID_NOT_TO_AP 0x3f +#define VHT_SIGA1_GID_MAX_GID 0x3f + +#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00 +#define VHT_SIGA1_NSTS_SHIFT 10 +#define VHT_SIGA1_MAX_USERPOS 3 + +#define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000 +#define VHT_SIGA1_PARTIAL_AID_SHIFT 13 + +#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED 0x400000 + +/* for VHT-A2 */ +#define VHT_SIGA2_GI_NONE 0x000000 +#define VHT_SIGA2_GI_SHORT 0x000001 +#define VHT_SIGA2_GI_W_MOD10 0x000002 +#define VHT_SIGA2_CODING_LDPC 0x000004 +#define VHT_SIGA2_LDPC_EXTRA_OFDM_SYM 0x000008 +#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100 +#define VHT_SIGA2_MCS_SHIFT 4 + +#define VHT_SIGA2_B9_RESERVED 0x000200 +#define VHT_SIGA2_TAIL_MASK 0xfc0000 +#define VHT_SIGA2_TAIL_VALUE 0x000000 + +/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */ +#define VHT_T_LEG_PREAMBLE 16 +#define VHT_T_L_SIG 4 +#define VHT_T_SIG_A 8 +#define VHT_T_LTF 4 +#define VHT_T_STF 4 +#define VHT_T_SIG_B 4 +#define VHT_T_SYML 4 + +#define VHT_N_SERVICE 16 /* bits in SERVICE field */ +#define VHT_N_TAIL 6 /* tail bits per BCC encoder */ + + +/** dot11Counters Table - 802.11 spec., Annex D */ +typedef struct d11cnt { + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ +} d11cnt_t; + +#define BRCM_PROP_OUI "\x00\x90\x4C" + + +/* Action frame type for FTM Initiator Report */ +#define BRCM_FTM_VS_AF_TYPE 14 +enum { + BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */ + BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */ +}; + +/* Action frame type for RWL */ +#define RWL_WIFI_DEFAULT 0 +#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */ +#define RWL_WIFI_FOUND_PEER 10 /* Server response to the client */ +#define RWL_ACTION_WIFI_FRAG_TYPE 85 /* Fragment indicator for receiver */ + +#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */ +#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */ + + + +/* brcm syscap_ie cap */ +#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */ + +#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */ + +/** BRCM info element */ +BWL_PRE_PACKED_STRUCT struct brcm_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 ver; /* type/ver of this IE */ + uint8 assoc; /* # of assoc STAs */ + uint8 flags; /* misc flags */ + uint8 flags1; /* misc flags */ + uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */ +} BWL_POST_PACKED_STRUCT; +typedef struct brcm_ie brcm_ie_t; +#define BRCM_IE_LEN 11 /* BRCM IE length */ +#define BRCM_IE_VER 2 /* BRCM IE version */ +#define BRCM_IE_LEGACY_AES_VER 1 /* BRCM IE legacy AES version */ + +/* brcm_ie flags */ +#define BRF_ABCAP 0x1 /* afterburner is obsolete, defined for backward compat */ +#define BRF_ABRQRD 0x2 /* afterburner is obsolete, defined for backward compat */ +#define BRF_LZWDS 0x4 /* lazy wds enabled */ +#define BRF_BLOCKACK 0x8 /* BlockACK capable */ +#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */ +#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */ +#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */ + +#define GET_BRF_PROP_11N_MCS(brcm_ie) \ + (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS)) + +/* brcm_ie flags1 */ +#define BRF1_AMSDU 0x1 /* A-MSDU capable */ +#define BRF1_WNM 0x2 /* WNM capable */ +#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */ +#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */ +#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */ +#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */ +#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */ +#define BRF1_DWDS 0x80 /* DWDS capable */ + +/** Vendor IE structure */ +BWL_PRE_PACKED_STRUCT struct vndr_ie { + uchar id; + uchar len; + uchar oui [3]; + uchar data [1]; /* Variable size data */ +} BWL_POST_PACKED_STRUCT; +typedef struct vndr_ie vndr_ie_t; + +#define VNDR_IE_HDR_LEN 2 /* id + len field */ +#define VNDR_IE_MIN_LEN 3 /* size of the oui field */ +#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN) + +#define VNDR_IE_MAX_LEN 255 /* vendor IE max length, without ID and len */ + +/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */ +BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie { + uchar id; + uchar len; + uchar oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* Device Primary MAC Adrress */ +} BWL_POST_PACKED_STRUCT; +typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t; + +#define MEMBER_OF_BRCM_PROP_IE_LEN 10 /* IE max length */ +#define MEMBER_OF_BRCM_PROP_IE_HDRLEN (sizeof(member_of_brcm_prop_ie_t)) +#define MEMBER_OF_BRCM_PROP_IE_TYPE 54 + +/** BRCM Reliable Multicast IE */ +BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + struct ether_addr ea; /* The ack sender's MAC Adrress */ + struct ether_addr mcast_ea; /* The multicast MAC address */ + uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */ +} BWL_POST_PACKED_STRUCT; +typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t; + +/* IE length */ +/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */ +#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8))) + +#define RELMCAST_BRCM_PROP_IE_TYPE 55 + +/* BRCM BTC IE */ +BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie { + uint8 id; + uint8 len; + uint8 oui[3]; + uint8 type; /* type inidicates what follows */ + uint32 info; +} BWL_POST_PACKED_STRUCT; +typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t; + +#define BTC_INFO_BRCM_PROP_IE_TYPE 90 +#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8))) + +/* ************* HT definitions. ************* */ +#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */ +#define MAX_MCS_NUM (128) /* max mcs number = 128 */ + +BWL_PRE_PACKED_STRUCT struct ht_cap_ie { + uint16 cap; + uint8 params; + uint8 supp_mcs[MCSSET_LEN]; + uint16 ext_htcap; + uint32 txbf_cap; + uint8 as_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_cap_ie ht_cap_ie_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ht_cap_ie { + uint8 id; + uint8 len; + ht_cap_ie_t ht_cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t; + +/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the capability IE is primarily used to convey this nodes abilities */ +BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* type indicates what follows */ + ht_cap_ie_t cap_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; + +#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */ +#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */ +#define HT_CAP_IE_TYPE 51 + +#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ +#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ +#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ +#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ +#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ +#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ +#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ +#define HT_CAP_GF 0x0010 /* Greenfield preamble support */ +#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ +#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ +#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */ +#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ +#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ +#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */ +#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ + +#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ +#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ +#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ +#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ + +#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ +#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ +#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ + + +#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1 +#define HT_CAP_TXBF_CAP_NDP_RX 0x8 +#define HT_CAP_TXBF_CAP_NDP_TX 0x10 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI 0x100 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING 0x200 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING 0x400 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_MASK 0x1800 +#define HT_CAP_TXBF_CAP_EXPLICIT_CSI_FB_SHIFT 11 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_MASK 0x6000 +#define HT_CAP_TXBF_CAP_EXPLICIT_NC_FB_SHIFT 13 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_MASK 0x18000 +#define HT_CAP_TXBF_CAP_EXPLICIT_C_FB_SHIFT 15 +#define HT_CAP_TXBF_CAP_CSI_BFR_ANT_SHIFT 19 +#define HT_CAP_TXBF_CAP_NC_BFR_ANT_SHIFT 21 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_SHIFT 23 +#define HT_CAP_TXBF_CAP_C_BFR_ANT_MASK 0x1800000 + +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_SHIFT 27 +#define HT_CAP_TXBF_CAP_CHAN_ESTIM_MASK 0x18000000 + +#define HT_CAP_TXBF_FB_TYPE_NONE 0 +#define HT_CAP_TXBF_FB_TYPE_DELAYED 1 +#define HT_CAP_TXBF_FB_TYPE_IMMEDIATE 2 +#define HT_CAP_TXBF_FB_TYPE_BOTH 3 + +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_MASK 0x400 +#define HT_CAP_TX_BF_CAP_EXPLICIT_CSI_FB_SHIFT 10 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000 +#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15 + +#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */ +#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */ +/* Max AMSDU len - per spec */ +#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA) + +#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */ +#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */ + +#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */ +#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */ +#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */ + +/* HT/AMPDU specific define */ +#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/4 usec units */ +#define AMPDU_DENSITY_NONE 0 /* No density requirement */ +#define AMPDU_DENSITY_1over4_US 1 /* 1/4 us density */ +#define AMPDU_DENSITY_1over2_US 2 /* 1/2 us density */ +#define AMPDU_DENSITY_1_US 3 /* 1 us density */ +#define AMPDU_DENSITY_2_US 4 /* 2 us density */ +#define AMPDU_DENSITY_4_US 5 /* 4 us density */ +#define AMPDU_DENSITY_8_US 6 /* 8 us density */ +#define AMPDU_DENSITY_16_US 7 /* 16 us density */ +#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */ +#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */ +#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */ +#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */ + +/* AMPDU RX factors for VHT rates */ +#define AMPDU_RX_FACTOR_128K 4 /* max rcv ampdu len (128kb) */ +#define AMPDU_RX_FACTOR_256K 5 /* max rcv ampdu len (256kb) */ +#define AMPDU_RX_FACTOR_512K 6 /* max rcv ampdu len (512kb) */ +#define AMPDU_RX_FACTOR_1024K 7 /* max rcv ampdu len (1024kb) */ + +#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */ +#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */ + +#define AMPDU_DELIMITER_LEN 4 /* length of ampdu delimiter */ +#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */ + +#define HT_CAP_EXT_PCO 0x0001 +#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006 +#define HT_CAP_EXT_PCO_TTIME_SHIFT 1 +#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300 +#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8 +#define HT_CAP_EXT_HTC 0x0400 +#define HT_CAP_EXT_RD_RESP 0x0800 + +/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */ +BWL_PRE_PACKED_STRUCT struct ht_add_ie { + uint8 ctl_ch; /* control channel number */ + uint8 byte1; /* ext ch,rec. ch. width, RIFS support */ + uint16 opmode; /* operation mode */ + uint16 misc_bits; /* misc bits */ + uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */ +} BWL_POST_PACKED_STRUCT; +typedef struct ht_add_ie ht_add_ie_t; + +/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ +/* the additional IE is primarily used to convey the current BSS configuration */ +BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { + uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ + uint8 len; /* IE length */ + uint8 oui[3]; + uint8 type; /* indicates what follows */ + ht_add_ie_t add_ie; +} BWL_POST_PACKED_STRUCT; +typedef struct ht_prop_add_ie ht_prop_add_ie_t; + +#define HT_ADD_IE_LEN 22 +#define HT_ADD_IE_TYPE 52 + +/* byte1 defn's */ +#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */ +#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */ + +/* opmode defn's */ +#define HT_OPMODE_MASK 0x0003 /* protection mode mask */ +#define HT_OPMODE_SHIFT 0 /* protection mode shift */ +#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */ +#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */ +#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */ +#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */ +#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ +#define DOT11N_TXBURST 0x0008 /* Tx burst limit */ +#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ + +/* misc_bites defn's */ +#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */ +#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */ +#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */ +#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */ +#define HT_PCO_ACTIVE 0x0400 /* PCO active */ +#define HT_PCO_PHASE 0x0800 /* PCO phase */ +#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */ + +/* Tx Burst Limits */ +#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */ +#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */ + +/* Macros for opmode */ +#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + >> HT_OPMODE_SHIFT) +#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_MIXED) /* mixed mode present */ +#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_HT20IN40) /* 20MHz HT present */ +#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ + == HT_OPMODE_OPTIONAL) /* Optional protection present */ +#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ + HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */ +#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ + == HT_OPMODE_NONGF) /* non-GF present */ +#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ + == DOT11N_TXBURST) /* Tx Burst present */ +#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ + == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */ + +BWL_PRE_PACKED_STRUCT struct obss_params { + uint16 passive_dwell; + uint16 active_dwell; + uint16 bss_widthscan_interval; + uint16 passive_total; + uint16 active_total; + uint16 chanwidth_transition_dly; + uint16 activity_threshold; +} BWL_POST_PACKED_STRUCT; +typedef struct obss_params obss_params_t; + +BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { + uint8 id; + uint8 len; + obss_params_t obss_params; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_obss_ie dot11_obss_ie_t; +#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */ + +/* HT control field */ +#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */ +#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */ +#define HT_CTRL_LA_MAI_SHIFT 2 +#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */ +#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */ +#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */ +#define HT_CTRL_LA_MFSI_SHIFT 6 +#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */ +#define HT_CTRL_LA_MFB_ASELC_SH 9 +#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */ +#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */ +#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */ +#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */ +#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */ +#define HT_CTRL_CSI_STEER_SHIFT 22 +#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */ +#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */ +#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */ +#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */ +#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */ +#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */ +#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */ + +/* ************* VHT definitions. ************* */ + +/** + * VHT Capabilites IE (sec 8.4.2.160) + */ + +BWL_PRE_PACKED_STRUCT struct vht_cap_ie { + uint32 vht_cap_info; + /* supported MCS set - 64 bit field */ + uint16 rx_mcs_map; + uint16 rx_max_rate; + uint16 tx_mcs_map; + uint16 tx_max_rate; +} BWL_POST_PACKED_STRUCT; +typedef struct vht_cap_ie vht_cap_ie_t; + +/* 4B cap_info + 8B supp_mcs */ +#define VHT_CAP_IE_LEN 12 + +/* VHT Capabilities Info field - 32bit - in VHT Cap IE */ +#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003 +#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c +#define VHT_CAP_INFO_LDPC 0x00000010 +#define VHT_CAP_INFO_SGI_80MHZ 0x00000020 +#define VHT_CAP_INFO_SGI_160MHZ 0x00000040 +#define VHT_CAP_INFO_TX_STBC 0x00000080 +#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700 +#define VHT_CAP_INFO_RX_STBC_SHIFT 8 +#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800 +#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000 +#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000 +#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16 +#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000 +#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000 +#define VHT_CAP_INFO_TXOPPS 0x00200000 +#define VHT_CAP_INFO_HTCVHT 0x00400000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000 +#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000 +#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26 + +/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */ +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0 + +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff +#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0 + +#define VHT_CAP_MCS_MAP_0_7 0 +#define VHT_CAP_MCS_MAP_0_8 1 +#define VHT_CAP_MCS_MAP_0_9 2 +#define VHT_CAP_MCS_MAP_NONE 3 +#define VHT_CAP_MCS_MAP_S 2 /* num bits for 1-stream */ +#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */ +/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */ +#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff + +/* VHT rates bitmap */ +#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff +#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff +#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff +#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP + +#define VHT_PROP_MCS_MAP_10_11 0 +#define VHT_PROP_MCS_MAP_UNUSED1 1 +#define VHT_PROP_MCS_MAP_UNUSED2 2 +#define VHT_PROP_MCS_MAP_NONE 3 +#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff + +/* VHT prop rates bitmap */ +#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00 +#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP + +#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3) +/* mcsmap with MCS0-9 for Nss = 3 */ +#define VHT_CAP_MCS_MAP_0_9_NSS3 \ + ((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \ + (VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3))) +#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */ + +#define VHT_CAP_MCS_MAP_NSS_MAX 8 + +/* get mcsmap with given mcs for given nss streams */ +#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \ + do { \ + int i; \ + for (i = 1; i <= nss; i++) { \ + VHT_MCS_MAP_SET_MCS_PER_SS(i, mcs, mcsmap); \ + } \ + } while (0) + +/* Map the mcs code to mcs bit map */ +#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \ + (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0) + +#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \ + ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0) + +/* Map the mcs bit map to mcs code */ +#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \ + ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \ + (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \ + (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE) + +#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \ + (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE) + +/** VHT Capabilities Supported Channel Width */ +typedef enum vht_cap_chan_width { + VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00, + VHT_CAP_CHAN_WIDTH_SUPPORT_160 = 0x04, + VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080 = 0x08 +} vht_cap_chan_width_t; + +/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */ +typedef enum vht_cap_max_mpdu_len { + VHT_CAP_MPDU_MAX_4K = 0x00, + VHT_CAP_MPDU_MAX_8K = 0x01, + VHT_CAP_MPDU_MAX_11K = 0x02 +} vht_cap_max_mpdu_len_t; + +/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */ +#define VHT_MPDU_LIMIT_4K 3895 +#define VHT_MPDU_LIMIT_8K 7991 +#define VHT_MPDU_LIMIT_11K 11454 + + +/** + * VHT Operation IE (sec 8.4.2.161) + */ + +BWL_PRE_PACKED_STRUCT struct vht_op_ie { + uint8 chan_width; + uint8 chan1; + uint8 chan2; + uint16 supp_mcs; /* same def as above in vht cap */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_op_ie vht_op_ie_t; + +/* 3B VHT Op info + 2B Basic MCS */ +#define VHT_OP_IE_LEN 5 + +typedef enum vht_op_chan_width { + VHT_OP_CHAN_WIDTH_20_40 = 0, + VHT_OP_CHAN_WIDTH_80 = 1, + VHT_OP_CHAN_WIDTH_160 = 2, + VHT_OP_CHAN_WIDTH_80_80 = 3 +} vht_op_chan_width_t; + +/* AID length */ +#define AID_IE_LEN 2 +/** + * BRCM vht features IE header + * The header if the fixed part of the IE + * On the 5GHz band this is the entire IE, + * on 2.4GHz the VHT IEs as defined in the 802.11ac + * specification follows + * + * + * VHT features rates bitmap. + * Bit0: 5G MCS 0-9 BW 160MHz + * Bit1: 5G MCS 0-9 support BW 80MHz + * Bit2: 5G MCS 0-9 support BW 20MHz + * Bit3: 2.4G MCS 0-9 support BW 20MHz + * Bits:4-7 Reserved for future use + * + */ +#define VHT_FEATURES_IE_TYPE 0x4 +BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr { + uint8 oui[3]; + uint8 type; /* type of this IE = 4 */ + uint8 rate_mask; /* VHT rate mask */ +} BWL_POST_PACKED_STRUCT; +typedef struct vht_features_ie_hdr vht_features_ie_hdr_t; + +/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */ +#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S) +#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \ + (((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M) +#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \ + do { \ + (mcsMap) &= (~(VHT_CAP_MCS_MAP_M << VHT_MCS_MAP_GET_SS_IDX(nss))); \ + (mcsMap) |= (((numMcs) & VHT_CAP_MCS_MAP_M) << VHT_MCS_MAP_GET_SS_IDX(nss)); \ + } while (0) +#define VHT_MCS_SS_SUPPORTED(nss, mcsMap) \ + (VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE) + + +/* ************* WPA definitions. ************* */ +#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ +#define WPA_OUI_LEN 3 /* WPA OUI length */ +#define WPA_OUI_TYPE 1 +#define WPA_VERSION 1 /* WPA version */ +#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */ +#define WPA2_OUI_LEN 3 /* WPA2 OUI length */ +#define WPA2_VERSION 1 /* WPA2 version */ +#define WPA2_VERSION_LEN 2 /* WAP2 version length */ + +/* ************* WPS definitions. ************* */ +#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */ +#define WPS_OUI_LEN 3 /* WPS OUI length */ +#define WPS_OUI_TYPE 4 + +/* ************* WFA definitions. ************* */ + +#ifdef P2P_IE_OVRD +#define WFA_OUI MAC_OUI +#else +#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ +#endif /* P2P_IE_OVRD */ +#define WFA_OUI_LEN 3 /* WFA OUI length */ +#ifdef P2P_IE_OVRD +#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P +#else +#define WFA_OUI_TYPE_TPC 8 +#define WFA_OUI_TYPE_P2P 9 +#endif + +#define WFA_OUI_TYPE_TPC 8 +#ifdef WLTDLS +#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */ +#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */ +#define WFA_OUI_TYPE_WFD 10 +#endif /* WTDLS */ +#define WFA_OUI_TYPE_HS20 0x10 +#define WFA_OUI_TYPE_OSEN 0x12 +#define WFA_OUI_TYPE_NAN 0x13 + +/* RSN authenticated key managment suite */ +#define RSN_AKM_NONE 0 /* None (IBSS) */ +#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ +#define RSN_AKM_PSK 2 /* Pre-shared Key */ +#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */ +#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */ +/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more + * Just kept here to avoid build issue in BISON/CARIBOU branch + */ +#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ +#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ + +/* OSEN authenticated key managment suite */ +#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */ + +/* Key related defines */ +#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */ +#define DOT11_MAX_IGTK_KEYS 2 +#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */ +#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */ +#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */ +#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */ + +#define WEP1_KEY_SIZE 5 /* max size of any WEP key */ +#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */ +#define WEP128_KEY_SIZE 13 /* max size of any WEP key */ +#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */ +#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */ +#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */ +#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */ +#define TKIP_KEY_SIZE 32 /* size of any TKIP key, includs MIC keys */ +#define TKIP_TK_SIZE 16 +#define TKIP_MIC_KEY_SIZE 8 +#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */ +#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */ +#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */ +#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */ +#define AES_KEY_SIZE 16 /* size of AES key */ +#define AES_MIC_SIZE 8 /* size of AES MIC */ +#define BIP_KEY_SIZE 16 /* size of BIP key */ +#define BIP_MIC_SIZE 8 /* sizeof BIP MIC */ + +#define AES_GCM_MIC_SIZE 16 /* size of MIC for 128-bit GCM - .11adD9 */ + +#define AES256_KEY_SIZE 32 /* size of AES 256 key - .11acD5 */ +#define AES256_MIC_SIZE 16 /* size of MIC for 256 bit keys, incl BIP */ + +/* WCN */ +#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */ +#define WCN_TYPE 4 /* WCN type */ + + +/* 802.11r protocol definitions */ + +/** Mobility Domain IE */ +BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie { + uint8 id; + uint8 len; + uint16 mdid; /* Mobility Domain Id */ + uint8 cap; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_mdid_ie dot11_mdid_ie_t; + +#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */ +#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */ + +/** Fast Bss Transition IE */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { + uint8 id; + uint8 len; + uint16 mic_control; /* Mic Control */ + uint8 mic[16]; + uint8 anonce[32]; + uint8 snonce[32]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ft_ie dot11_ft_ie_t; + +#define TIE_TYPE_RESERVED 0 +#define TIE_TYPE_REASSOC_DEADLINE 1 +#define TIE_TYPE_KEY_LIEFTIME 2 +#define TIE_TYPE_ASSOC_COMEBACK 3 +BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie { + uint8 id; + uint8 len; + uint8 type; /* timeout interval type */ + uint32 value; /* timeout interval value */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_timeout_ie dot11_timeout_ie_t; + +/** GTK ie */ +BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie { + uint8 id; + uint8 len; + uint16 key_info; + uint8 key_len; + uint8 rsc[8]; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_gtk_ie dot11_gtk_ie_t; + +/** Management MIC ie */ +BWL_PRE_PACKED_STRUCT struct mmic_ie { + uint8 id; /* IE ID: DOT11_MNG_MMIE_ID */ + uint8 len; /* IE length */ + uint16 key_id; /* key id */ + uint8 ipn[6]; /* ipn */ + uint8 mic[16]; /* mic */ +} BWL_POST_PACKED_STRUCT; +typedef struct mmic_ie mmic_ie_t; + +/* 802.11r-2008, 11A.10.3 - RRB frame format */ +BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame { + uint8 frame_type; /* 1 for RRB */ + uint8 packet_type; /* 0 for Request 1 for Response */ + uint16 len; + uint8 cur_ap_addr[ETHER_ADDR_LEN]; + uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */ +} BWL_POST_PACKED_STRUCT; + +typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t; + +#define DOT11_FT_RRB_FIXED_LEN 10 +#define DOT11_FT_REMOTE_FRAME_TYPE 1 +#define DOT11_FT_PACKET_REQ 0 +#define DOT11_FT_PACKET_RESP 1 + +#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" +#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" + + +/* ************* WMM Parameter definitions. ************* */ +#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */ +#define WMM_OUI_LEN 3 /* WMM OUI length */ +#define WMM_OUI_TYPE 2 /* WMM OUT type */ +#define WMM_VERSION 1 +#define WMM_VERSION_LEN 1 + +/* WMM OUI subtype */ +#define WMM_OUI_SUBTYPE_PARAMETER 1 +#define WMM_PARAMETER_IE_LEN 24 + +/** Link Identifier Element */ +BWL_PRE_PACKED_STRUCT struct link_id_ie { + uint8 id; + uint8 len; + struct ether_addr bssid; + struct ether_addr tdls_init_mac; + struct ether_addr tdls_resp_mac; +} BWL_POST_PACKED_STRUCT; +typedef struct link_id_ie link_id_ie_t; +#define TDLS_LINK_ID_IE_LEN 18 + +/** Link Wakeup Schedule Element */ +BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie { + uint8 id; + uint8 len; + uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */ + uint32 interval; /* in ms bwtween the start of 2 Awake Windows */ + uint32 awake_win_slots; /* in backof slots, duration of Awake Window */ + uint32 max_wake_win; /* in ms, max duration of Awake Window */ + uint16 idle_cnt; /* number of consecutive Awake Windows */ +} BWL_POST_PACKED_STRUCT; +typedef struct wakeup_sch_ie wakeup_sch_ie_t; +#define TDLS_WAKEUP_SCH_IE_LEN 18 + +/** Channel Switch Timing Element */ +BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie { + uint8 id; + uint8 len; + uint16 switch_time; /* in ms, time to switch channels */ + uint16 switch_timeout; /* in ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct channel_switch_timing_ie channel_switch_timing_ie_t; +#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4 + +/** PTI Control Element */ +BWL_PRE_PACKED_STRUCT struct pti_control_ie { + uint8 id; + uint8 len; + uint8 tid; + uint16 seq_control; +} BWL_POST_PACKED_STRUCT; +typedef struct pti_control_ie pti_control_ie_t; +#define TDLS_PTI_CONTROL_IE_LEN 3 + +/** PU Buffer Status Element */ +BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie { + uint8 id; + uint8 len; + uint8 status; +} BWL_POST_PACKED_STRUCT; +typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; +#define TDLS_PU_BUFFER_STATUS_IE_LEN 1 +#define TDLS_PU_BUFFER_STATUS_AC_BK 1 +#define TDLS_PU_BUFFER_STATUS_AC_BE 2 +#define TDLS_PU_BUFFER_STATUS_AC_VI 4 +#define TDLS_PU_BUFFER_STATUS_AC_VO 8 + +/* TDLS Action Field Values */ +#define TDLS_SETUP_REQ 0 +#define TDLS_SETUP_RESP 1 +#define TDLS_SETUP_CONFIRM 2 +#define TDLS_TEARDOWN 3 +#define TDLS_PEER_TRAFFIC_IND 4 +#define TDLS_CHANNEL_SWITCH_REQ 5 +#define TDLS_CHANNEL_SWITCH_RESP 6 +#define TDLS_PEER_PSM_REQ 7 +#define TDLS_PEER_PSM_RESP 8 +#define TDLS_PEER_TRAFFIC_RESP 9 +#define TDLS_DISCOVERY_REQ 10 + +/* 802.11z TDLS Public Action Frame action field */ +#define TDLS_DISCOVERY_RESP 14 + +/* 802.11u GAS action frames */ +#define GAS_REQUEST_ACTION_FRAME 10 +#define GAS_RESPONSE_ACTION_FRAME 11 +#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12 +#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13 + +/* FTM - fine timing measurement public action frames */ +BWL_PRE_PACKED_STRUCT struct dot11_ftm_req { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (32) */ + uint8 trigger; /* trigger/continue? */ + /* optional lci, civic loc, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_req dot11_ftm_req_t; + +BWL_PRE_PACKED_STRUCT struct dot11_ftm { + uint8 category; /* category of action frame (4) */ + uint8 action; /* public action (33) */ + uint8 dialog; /* dialog token */ + uint8 follow_up; /* follow up dialog token */ + uint8 tod[6]; /* t1 - last depart timestamp */ + uint8 toa[6]; /* t4 - last ack arrival timestamp */ + uint8 tod_err[2]; /* t1 error */ + uint8 toa_err[2]; /* t4 error */ + /* optional lci report, civic loc report, ftm params */ +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm dot11_ftm_t; + +#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0 +#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001 +#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0 +#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \ + DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT) +#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\ + uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \ + _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \ + _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \ + (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \ +} while (0) + +#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0 +#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7 +#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1 +#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1) +#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\ + uint16 _val2; \ + _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\ + ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \ + (_err)[0] = _val2 & 0xff; \ + (_err)[1] = _val2 >> 8 & 0xff; \ +} while (0) + +BWL_PRE_PACKED_STRUCT struct dot11_ftm_params { + uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */ + uint8 len; + uint8 info[9]; +} BWL_POST_PACKED_STRUCT; +typedef struct dot11_ftm_params dot11_ftm_params_t; +#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2) + +#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift)) +#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\ + uint8 _ptmp = (_p)->info[_off] & ~(_mask); \ + (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \ +} while (0) + +#define FTM_PARAMS_STATUS_OFFSET 0 +#define FTM_PARAMS_STATUS_MASK 0x03 +#define FTM_PARAMS_STATUS_SHIFT 0 +#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \ + FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT) +#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status) + +#define FTM_PARAMS_VALUE_OFFSET 0 +#define FTM_PARAMS_VALUE_MASK 0x7c +#define FTM_PARAMS_VALUE_SHIFT 2 +#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \ + FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT) +#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value) +#define FTM_PARAMS_MAX_VALUE 32 + +#define FTM_PARAMS_NBURSTEXP_OFFSET 1 +#define FTM_PARAMS_NBURSTEXP_MASK 0x0f +#define FTM_PARAMS_NBURSTEXP_SHIFT 0 +#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \ + FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT) +#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \ + _bexp) + +#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p)) + +enum { + FTM_PARAMS_BURSTTMO_NOPREF = 15 +}; + +#define FTM_PARAMS_BURSTTMO_OFFSET 1 +#define FTM_PARAMS_BURSTTMO_MASK 0xf0 +#define FTM_PARAMS_BURSTTMO_SHIFT 4 +#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \ + FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT) +/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */ +#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2) + +#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250) +#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \ + (_val) == FTM_PARAMS_BURSTTMO_NOPREF) +#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */ +#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */ + +#define FTM_PARAMS_MINDELTA_OFFSET 2 +#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100) +#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \ + (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3]) +#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \ + (_p)->info[3] = (_partial_tsf) & 0xff; \ + (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL +#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10 +#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16 +#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff + +#define FTM_PARAMS_ASAP_OFFSET 5 +#define FTM_PARAMS_ASAP_MASK 0x4 +#define FTM_PARAMS_ASAP_SHIFT 2 +#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \ + FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT) +#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap) + +#define FTM_PARAMS_FTM1_OFFSET 5 +#define FTM_PARAMS_FTM1_MASK 0x02 +#define FTM_PARAMS_FTM1_SHIFT 1 +#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \ + FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT) +#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1) + +#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5 +#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8 +#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3 +#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \ + FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT) +#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \ + FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms) + +#define FTM_PARAMS_CHAN_INFO_OFFSET 6 +#define FTM_PARAMS_CHAN_INFO_MASK 0xfc +#define FTM_PARAMS_CHAN_INFO_SHIFT 2 +#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \ + FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT) +#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \ + FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci) + +/* burst period - units of 100ms */ +#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7]) +#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\ + (_p)->info[7] = (_bp) & 0xff; \ + (_p)->info[8] = ((_bp) >> 8) & 0xff; \ +} while (0) + +#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100) + +/* FTM status values - last updated from 11mcD4.0 */ +enum { + FTM_PARAMS_STATUS_RESERVED = 0, + FTM_PARAMS_STATUS_SUCCESSFUL = 1, + FTM_PARAMS_STATUS_INCAPABLE = 2, + FTM_PARAMS_STATUS_FAILED = 3, + /* Below are obsolte */ + FTM_PARAMS_STATUS_OVERRIDDEN = 4, + FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5, + FTM_PARAMS_STATUS_ASAP_FAILED = 6, + /* rest are reserved */ +}; + +enum { + FTM_PARAMS_CHAN_INFO_NO_PREF = 0, + FTM_PARAMS_CHAN_INFO_RESERVE1 = 1, + FTM_PARAMS_CHAN_INFO_RESERVE2 = 2, + FTM_PARAMS_CHAN_INFO_RESERVE3 = 3, + FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4, + FTM_PARAMS_CHAN_INFO_RESERVE5 = 5, + FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6, + FTM_PARAMS_CHAN_INFO_RESERVE7 = 7, + FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */ + FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9, + FTM_PARAMS_CHAN_INFO_VHT_20 = 10, + FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11, + FTM_PARAMS_CHAN_INFO_VHT_40 = 12, + FTM_PARAMS_CHAN_INFO_VHT_80 = 13, + FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14, + FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15, + FTM_PARAMS_CHAN_INFO_VHT_160 = 16, + /* Reserved from 17 - 30 */ + FTM_PARAMS_CHAN_INFO_DMG_2160 = 31, + /* Reserved from 32 - 63 */ + FTM_PARAMS_CHAN_INFO_MAX = 63 +}; + +/* 802.11u interworking access network options */ +#define IW_ANT_MASK 0x0f +#define IW_INTERNET_MASK 0x10 +#define IW_ASRA_MASK 0x20 +#define IW_ESR_MASK 0x40 +#define IW_UESA_MASK 0x80 + +/* 802.11u interworking access network type */ +#define IW_ANT_PRIVATE_NETWORK 0 +#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1 +#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2 +#define IW_ANT_FREE_PUBLIC_NETWORK 3 +#define IW_ANT_PERSONAL_DEVICE_NETWORK 4 +#define IW_ANT_EMERGENCY_SERVICES_NETWORK 5 +#define IW_ANT_TEST_NETWORK 14 +#define IW_ANT_WILDCARD_NETWORK 15 + +/* 802.11u advertisement protocol */ +#define ADVP_ANQP_PROTOCOL_ID 0 +#define ADVP_MIH_PROTOCOL_ID 1 + +/* 802.11u advertisement protocol masks */ +#define ADVP_QRL_MASK 0x7f +#define ADVP_PAME_BI_MASK 0x80 + +/* 802.11u advertisement protocol values */ +#define ADVP_QRL_REQUEST 0x00 +#define ADVP_QRL_RESPONSE 0x7f +#define ADVP_PAME_BI_DEPENDENT 0x00 +#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK + +/* 802.11u ANQP information ID */ +#define ANQP_ID_QUERY_LIST 256 +#define ANQP_ID_CAPABILITY_LIST 257 +#define ANQP_ID_VENUE_NAME_INFO 258 +#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259 +#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260 +#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261 +#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262 +#define ANQP_ID_NAI_REALM_LIST 263 +#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264 +#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265 +#define ANQP_ID_AP_CIVIC_LOCATION 266 +#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267 +#define ANQP_ID_DOMAIN_NAME_LIST 268 +#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269 +#define ANQP_ID_EMERGENCY_NAI 271 +#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797 + +/* 802.11u ANQP OUI */ +#define ANQP_OUI_SUBTYPE 9 + +/* 802.11u venue name */ +#define VENUE_LANGUAGE_CODE_SIZE 3 +#define VENUE_NAME_SIZE 255 + +/* 802.11u venue groups */ +#define VENUE_UNSPECIFIED 0 +#define VENUE_ASSEMBLY 1 +#define VENUE_BUSINESS 2 +#define VENUE_EDUCATIONAL 3 +#define VENUE_FACTORY 4 +#define VENUE_INSTITUTIONAL 5 +#define VENUE_MERCANTILE 6 +#define VENUE_RESIDENTIAL 7 +#define VENUE_STORAGE 8 +#define VENUE_UTILITY 9 +#define VENUE_VEHICULAR 10 +#define VENUE_OUTDOOR 11 + +/* 802.11u network authentication type indicator */ +#define NATI_UNSPECIFIED -1 +#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0 +#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1 +#define NATI_HTTP_HTTPS_REDIRECTION 2 +#define NATI_DNS_REDIRECTION 3 + +/* 802.11u IP address type availability - IPv6 */ +#define IPA_IPV6_SHIFT 0 +#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT) +#define IPA_IPV6_NOT_AVAILABLE 0x00 +#define IPA_IPV6_AVAILABLE 0x01 +#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02 + +/* 802.11u IP address type availability - IPv4 */ +#define IPA_IPV4_SHIFT 2 +#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT) +#define IPA_IPV4_NOT_AVAILABLE 0x00 +#define IPA_IPV4_PUBLIC 0x01 +#define IPA_IPV4_PORT_RESTRICT 0x02 +#define IPA_IPV4_SINGLE_NAT 0x03 +#define IPA_IPV4_DOUBLE_NAT 0x04 +#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05 +#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06 +#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07 + +/* 802.11u NAI realm encoding */ +#define REALM_ENCODING_RFC4282 0 +#define REALM_ENCODING_UTF8 1 + +/* 802.11u IANA EAP method type numbers */ +#define REALM_EAP_TLS 13 +#define REALM_EAP_LEAP 17 +#define REALM_EAP_SIM 18 +#define REALM_EAP_TTLS 21 +#define REALM_EAP_AKA 23 +#define REALM_EAP_PEAP 25 +#define REALM_EAP_FAST 43 +#define REALM_EAP_PSK 47 +#define REALM_EAP_AKAP 50 +#define REALM_EAP_EXPANDED 254 + +/* 802.11u authentication ID */ +#define REALM_EXPANDED_EAP 1 +#define REALM_NON_EAP_INNER_AUTHENTICATION 2 +#define REALM_INNER_AUTHENTICATION_EAP 3 +#define REALM_EXPANDED_INNER_EAP 4 +#define REALM_CREDENTIAL 5 +#define REALM_TUNNELED_EAP_CREDENTIAL 6 +#define REALM_VENDOR_SPECIFIC_EAP 221 + +/* 802.11u non-EAP inner authentication type */ +#define REALM_RESERVED_AUTH 0 +#define REALM_PAP 1 +#define REALM_CHAP 2 +#define REALM_MSCHAP 3 +#define REALM_MSCHAPV2 4 + +/* 802.11u credential type */ +#define REALM_SIM 1 +#define REALM_USIM 2 +#define REALM_NFC 3 +#define REALM_HARDWARE_TOKEN 4 +#define REALM_SOFTOKEN 5 +#define REALM_CERTIFICATE 6 +#define REALM_USERNAME_PASSWORD 7 +#define REALM_SERVER_SIDE 8 +#define REALM_RESERVED_CRED 9 +#define REALM_VENDOR_SPECIFIC_CRED 10 + +/* 802.11u 3GPP PLMN */ +#define G3PP_GUD_VERSION 0 +#define G3PP_PLMN_LIST_IE 0 + +/** hotspot2.0 indication element (vendor specific) */ +BWL_PRE_PACKED_STRUCT struct hs20_ie { + uint8 oui[3]; + uint8 type; + uint8 config; +} BWL_POST_PACKED_STRUCT; +typedef struct hs20_ie hs20_ie_t; +#define HS20_IE_LEN 5 /* HS20 IE length */ + +/** IEEE 802.11 Annex E */ +typedef enum { + DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */ + DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */ + DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */ + DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */ + DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */ + DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */ + DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */ + DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */ + DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */ + DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */ + DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */ + DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */ + DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */ + DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */ + DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */ + DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */ +} dot11_op_class_t; + +/* QoS map */ +#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */ + +#define BCM_AIBSS_IE_TYPE 56 + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h new file mode 100644 index 000000000000..f1da1c174491 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11_bta.h @@ -0,0 +1,48 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11_bta.h 518342 2014-12-01 23:21:41Z $ +*/ + +#ifndef _802_11_BTA_H_ +#define _802_11_BTA_H_ + +#define BT_SIG_SNAP_MPROT "\xAA\xAA\x03\x00\x19\x58" + +/* BT-AMP 802.11 PAL Protocols */ +#define BTA_PROT_L2CAP 1 +#define BTA_PROT_ACTIVITY_REPORT 2 +#define BTA_PROT_SECURITY 3 +#define BTA_PROT_LINK_SUPERVISION_REQUEST 4 +#define BTA_PROT_LINK_SUPERVISION_REPLY 5 + +/* BT-AMP 802.11 PAL AMP_ASSOC Type IDs */ +#define BTA_TYPE_ID_MAC_ADDRESS 1 +#define BTA_TYPE_ID_PREFERRED_CHANNELS 2 +#define BTA_TYPE_ID_CONNECTED_CHANNELS 3 +#define BTA_TYPE_ID_CAPABILITIES 4 +#define BTA_TYPE_ID_VERSION 5 +#endif /* _802_11_bta_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.11e.h b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h new file mode 100644 index 000000000000..ccfa9656b83b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.11e.h @@ -0,0 +1,135 @@ +/* + * 802.11e protocol header file + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.11e.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_11e_H_ +#define _802_11e_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* WME Traffic Specification (TSPEC) element */ +#define WME_TSPEC_HDR_LEN 2 /* WME TSPEC header length */ +#define WME_TSPEC_BODY_OFF 2 /* WME TSPEC body offset */ + +#define WME_CATEGORY_CODE_OFFSET 0 /* WME Category code offset */ +#define WME_ACTION_CODE_OFFSET 1 /* WME Action code offset */ +#define WME_TOKEN_CODE_OFFSET 2 /* WME Token code offset */ +#define WME_STATUS_CODE_OFFSET 3 /* WME Status code offset */ + +BWL_PRE_PACKED_STRUCT struct tsinfo { + uint8 octets[3]; +} BWL_POST_PACKED_STRUCT; + +typedef struct tsinfo tsinfo_t; + +/* 802.11e TSPEC IE */ +typedef BWL_PRE_PACKED_STRUCT struct tspec { + uint8 oui[DOT11_OUI_LEN]; /* WME_OUI */ + uint8 type; /* WME_TYPE */ + uint8 subtype; /* WME_SUBTYPE_TSPEC */ + uint8 version; /* WME_VERSION */ + tsinfo_t tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint32 min_srv_interval; /* Minimum Service Interval (us) */ + uint32 max_srv_interval; /* Maximum Service Interval (us) */ + uint32 inactivity_interval; /* Inactivity Interval (us) */ + uint32 suspension_interval; /* Suspension Interval (us) */ + uint32 srv_start_time; /* Service Start Time (us) */ + uint32 min_data_rate; /* Minimum Data Rate (bps) */ + uint32 mean_data_rate; /* Mean Data Rate (bps) */ + uint32 peak_data_rate; /* Peak Data Rate (bps) */ + uint32 max_burst_size; /* Maximum Burst Size (bytes) */ + uint32 delay_bound; /* Delay Bound (us) */ + uint32 min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0-8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ +} BWL_POST_PACKED_STRUCT tspec_t; + +#define WME_TSPEC_LEN (sizeof(tspec_t)) /* not including 2-bytes of header */ + +/* ts_info */ +/* 802.1D priority is duplicated - bits 13-11 AND bits 3-1 */ +#define TS_INFO_TID_SHIFT 1 /* TS info. TID shift */ +#define TS_INFO_TID_MASK (0xf << TS_INFO_TID_SHIFT) /* TS info. TID mask */ +#define TS_INFO_CONTENTION_SHIFT 7 /* TS info. contention shift */ +#define TS_INFO_CONTENTION_MASK (0x1 << TS_INFO_CONTENTION_SHIFT) /* TS info. contention mask */ +#define TS_INFO_DIRECTION_SHIFT 5 /* TS info. direction shift */ +#define TS_INFO_DIRECTION_MASK (0x3 << TS_INFO_DIRECTION_SHIFT) /* TS info. direction mask */ +#define TS_INFO_PSB_SHIFT 2 /* TS info. PSB bit Shift */ +#define TS_INFO_PSB_MASK (1 << TS_INFO_PSB_SHIFT) /* TS info. PSB mask */ +#define TS_INFO_UPLINK (0 << TS_INFO_DIRECTION_SHIFT) /* TS info. uplink */ +#define TS_INFO_DOWNLINK (1 << TS_INFO_DIRECTION_SHIFT) /* TS info. downlink */ +#define TS_INFO_BIDIRECTIONAL (3 << TS_INFO_DIRECTION_SHIFT) /* TS info. bidirectional */ +#define TS_INFO_USER_PRIO_SHIFT 3 /* TS info. user priority shift */ +/* TS info. user priority mask */ +#define TS_INFO_USER_PRIO_MASK (0x7 << TS_INFO_USER_PRIO_SHIFT) + +/* Macro to get/set bit(s) field in TSINFO */ +#define WLC_CAC_GET_TID(pt) ((((pt).octets[0]) & TS_INFO_TID_MASK) >> TS_INFO_TID_SHIFT) +#define WLC_CAC_GET_DIR(pt) ((((pt).octets[0]) & \ + TS_INFO_DIRECTION_MASK) >> TS_INFO_DIRECTION_SHIFT) +#define WLC_CAC_GET_PSB(pt) ((((pt).octets[1]) & TS_INFO_PSB_MASK) >> TS_INFO_PSB_SHIFT) +#define WLC_CAC_GET_USER_PRIO(pt) ((((pt).octets[1]) & \ + TS_INFO_USER_PRIO_MASK) >> TS_INFO_USER_PRIO_SHIFT) + +#define WLC_CAC_SET_TID(pt, id) ((((pt).octets[0]) & (~TS_INFO_TID_MASK)) | \ + ((id) << TS_INFO_TID_SHIFT)) +#define WLC_CAC_SET_USER_PRIO(pt, prio) ((((pt).octets[0]) & (~TS_INFO_USER_PRIO_MASK)) | \ + ((prio) << TS_INFO_USER_PRIO_SHIFT)) + +/* 802.11e QBSS Load IE */ +#define QBSS_LOAD_IE_LEN 5 /* QBSS Load IE length */ +#define QBSS_LOAD_AAC_OFF 3 /* AAC offset in IE */ + +#define CAC_ADDTS_RESP_TIMEOUT 1000 /* default ADDTS response timeout in ms */ + /* DEFVAL dot11ADDTSResponseTimeout = 1s */ + +/* 802.11e ADDTS status code */ +#define DOT11E_STATUS_ADMISSION_ACCEPTED 0 /* TSPEC Admission accepted status */ +#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */ +#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */ +#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */ + +/* 802.11e DELTS status code */ +#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */ +#define DOT11E_STATUS_END_TS 37 /* END TS */ +#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */ +#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */ + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _802_11e_CAC_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.1d.h b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h new file mode 100644 index 000000000000..9610b550467a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.1d.h @@ -0,0 +1,53 @@ +/* + * Fundamental types and constants relating to 802.1D + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.1d.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_1_D_ +#define _802_1_D_ + +/* 802.1D priority defines */ +#define PRIO_8021D_NONE 2 /* None = - */ +#define PRIO_8021D_BK 1 /* BK - Background */ +#define PRIO_8021D_BE 0 /* BE - Best-effort */ +#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ +#define PRIO_8021D_CL 4 /* CL - Controlled Load */ +#define PRIO_8021D_VI 5 /* Vi - Video */ +#define PRIO_8021D_VO 6 /* Vo - Voice */ +#define PRIO_8021D_NC 7 /* NC - Network Control */ +#define MAXPRIO 7 /* 0-7 */ +#define NUMPRIO (MAXPRIO + 1) + +#define ALLPRIO -1 /* All prioirty */ + +/* Converts prio to precedence since the numerical value of + * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. + */ +#define PRIO2PREC(prio) \ + (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) + +#endif /* _802_1_D__ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/802.3.h b/drivers/net/wireless/bcmdhd/include/proto/802.3.h new file mode 100644 index 000000000000..9f108c888a2e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/802.3.h @@ -0,0 +1,55 @@ +/* + * Fundamental constants relating to 802.3 + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: 802.3.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _802_3_h_ +#define _802_3_h_ + +/* This marks the start of a packed structure section. */ +#include + +#define SNAP_HDR_LEN 6 /* 802.3 SNAP header length */ +#define DOT3_OUI_LEN 3 /* 802.3 oui length */ + +BWL_PRE_PACKED_STRUCT struct dot3_mac_llc_snap_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[DOT3_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 type; /* ethertype */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _802_3_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h new file mode 100644 index 000000000000..5e51979ce393 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmdhcp.h @@ -0,0 +1,80 @@ +/* + * Fundamental constants relating to DHCP Protocol + * + * Copyright (C) 2016, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * + * <> + * + * $Id: bcmdhcp.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmdhcp_h_ +#define _bcmdhcp_h_ + +/* DHCP params */ +#define DHCP_TYPE_OFFSET 0 /* DHCP type (request|reply) offset */ +#define DHCP_TID_OFFSET 4 /* DHCP transition id offset */ +#define DHCP_FLAGS_OFFSET 10 /* DHCP flags offset */ +#define DHCP_CIADDR_OFFSET 12 /* DHCP client IP address offset */ +#define DHCP_YIADDR_OFFSET 16 /* DHCP your IP address offset */ +#define DHCP_GIADDR_OFFSET 24 /* DHCP relay agent IP address offset */ +#define DHCP_CHADDR_OFFSET 28 /* DHCP client h/w address offset */ +#define DHCP_OPT_OFFSET 236 /* DHCP options offset */ + +#define DHCP_OPT_MSGTYPE 53 /* DHCP message type */ +#define DHCP_OPT_MSGTYPE_REQ 3 +#define DHCP_OPT_MSGTYPE_ACK 5 /* DHCP message type - ACK */ + +#define DHCP_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP_OPT_LEN_OFFSET 1 /* Option data length */ +#define DHCP_OPT_DATA_OFFSET 2 /* Option data */ + +#define DHCP_OPT_CODE_CLIENTID 61 /* Option identifier */ + +#define DHCP_TYPE_REQUEST 1 /* DHCP request (discover|request) */ +#define DHCP_TYPE_REPLY 2 /* DHCP reply (offset|ack) */ + +#define DHCP_PORT_SERVER 67 /* DHCP server UDP port */ +#define DHCP_PORT_CLIENT 68 /* DHCP client UDP port */ + +#define DHCP_FLAG_BCAST 0x8000 /* DHCP broadcast flag */ + +#define DHCP_FLAGS_LEN 2 /* DHCP flags field length */ + +#define DHCP6_TYPE_SOLICIT 1 /* DHCP6 solicit */ +#define DHCP6_TYPE_ADVERTISE 2 /* DHCP6 advertise */ +#define DHCP6_TYPE_REQUEST 3 /* DHCP6 request */ +#define DHCP6_TYPE_CONFIRM 4 /* DHCP6 confirm */ +#define DHCP6_TYPE_RENEW 5 /* DHCP6 renew */ +#define DHCP6_TYPE_REBIND 6 /* DHCP6 rebind */ +#define DHCP6_TYPE_REPLY 7 /* DHCP6 reply */ +#define DHCP6_TYPE_RELEASE 8 /* DHCP6 release */ +#define DHCP6_TYPE_DECLINE 9 /* DHCP6 decline */ +#define DHCP6_TYPE_RECONFIGURE 10 /* DHCP6 reconfigure */ +#define DHCP6_TYPE_INFOREQ 11 /* DHCP6 information request */ +#define DHCP6_TYPE_RELAYFWD 12 /* DHCP6 relay forward */ +#define DHCP6_TYPE_RELAYREPLY 13 /* DHCP6 relay reply */ + +#define DHCP6_TYPE_OFFSET 0 /* DHCP6 type offset */ + +#define DHCP6_MSG_OPT_OFFSET 4 /* Offset of options in client server messages */ +#define DHCP6_RELAY_OPT_OFFSET 34 /* Offset of options in relay messages */ + +#define DHCP6_OPT_CODE_OFFSET 0 /* Option identifier */ +#define DHCP6_OPT_LEN_OFFSET 2 /* Option data length */ +#define DHCP6_OPT_DATA_OFFSET 4 /* Option data */ + +#define DHCP6_OPT_CODE_CLIENTID 1 /* DHCP6 CLIENTID option */ +#define DHCP6_OPT_CODE_SERVERID 2 /* DHCP6 SERVERID option */ + +#define DHCP6_PORT_SERVER 547 /* DHCP6 server UDP port */ +#define DHCP6_PORT_CLIENT 546 /* DHCP6 client UDP port */ + +#endif /* #ifndef _bcmdhcp_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h new file mode 100644 index 000000000000..7ad453dbad0d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmeth.h @@ -0,0 +1,115 @@ +/* + * Broadcom Ethernettype protocol definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmeth.h 518342 2014-12-01 23:21:41Z $ + */ + +/* + * Broadcom Ethernet protocol defines + */ + +#ifndef _BCMETH_H_ +#define _BCMETH_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* ETHER_TYPE_BRCM is defined in ethernet.h */ + +/* + * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field + * in one of two formats: (only subtypes 32768-65535 are in use now) + * + * subtypes 0-32767: + * 8 bit subtype (0-127) + * 8 bit length in bytes (0-255) + * + * subtypes 32768-65535: + * 16 bit big-endian subtype + * 16 bit big-endian length in bytes (0-65535) + * + * length is the number of additional bytes beyond the 4 or 6 byte header + * + * Reserved values: + * 0 reserved + * 5-15 reserved for iLine protocol assignments + * 17-126 reserved, assignable + * 127 reserved + * 32768 reserved + * 32769-65534 reserved, assignable + * 65535 reserved + */ + +/* + * While adding the subtypes and their specific processing code make sure + * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition + */ + +#define BCMILCP_SUBTYPE_RATE 1 +#define BCMILCP_SUBTYPE_LINK 2 +#define BCMILCP_SUBTYPE_CSA 3 +#define BCMILCP_SUBTYPE_LARQ 4 +#define BCMILCP_SUBTYPE_VENDOR 5 +#define BCMILCP_SUBTYPE_FLH 17 + +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 +#define BCMILCP_SUBTYPE_CERT 32770 +#define BCMILCP_SUBTYPE_SES 32771 + + +#define BCMILCP_BCM_SUBTYPE_RESERVED 0 +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BCMILCP_BCM_SUBTYPE_SES 2 +/* + * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded + * within BCMILCP_BCM_SUBTYPE_EVENT type messages + */ +/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */ +#define BCMILCP_BCM_SUBTYPE_DPT 4 + +#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 +#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 + +/* These fields are stored in network order */ +typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr +{ + uint16 subtype; /* Vendor specific..32769 */ + uint16 length; + uint8 version; /* Version is 0 */ + uint8 oui[3]; /* Broadcom OUI */ + /* user specific Data */ + uint16 usr_subtype; +} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _BCMETH_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h new file mode 100644 index 000000000000..6c30d57bfbbc --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmevent.h @@ -0,0 +1,791 @@ +/* + * Broadcom Event protocol definitions + * + * Dependencies: proto/bcmeth.h + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmevent.h 555154 2015-05-07 20:46:07Z $ + * + */ + +/* + * Broadcom Ethernet Events protocol defines + * + */ + +#ifndef _BCMEVENT_H_ +#define _BCMEVENT_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +/* #include -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */ +#include + +/* This marks the start of a packed structure section. */ +#include + +#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */ +#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */ + +/* flags */ +#define WLC_EVENT_MSG_LINK 0x01 /* link is up */ +#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */ +#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */ +#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */ +#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */ + +/* these fields are stored in network order */ + +/* version 1 */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ +} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t; + +/* the current version */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint16 version; + uint16 flags; /* see flags below */ + uint32 event_type; /* Message (see below) */ + uint32 status; /* Status code (see below) */ + uint32 reason; /* Reason code (if applicable) */ + uint32 auth_type; /* WLC_E_AUTH */ + uint32 datalen; /* data buf */ + struct ether_addr addr; /* Station address (if applicable) */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ + uint8 ifidx; /* destination OS i/f index */ + uint8 bsscfgidx; /* source bsscfg index */ +} BWL_POST_PACKED_STRUCT wl_event_msg_t; + +/* used by driver msgs */ +typedef BWL_PRE_PACKED_STRUCT struct bcm_event { + struct ether_header eth; + bcmeth_hdr_t bcm_hdr; + wl_event_msg_t event; + /* data portion follows */ +} BWL_POST_PACKED_STRUCT bcm_event_t; + +#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) + +/* Event messages */ +#define WLC_E_SET_SSID 0 /* indicates status of set SSID */ +#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */ +#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */ +#define WLC_E_AUTH 3 /* 802.11 AUTH request */ +#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */ +#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */ +#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */ +#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */ +#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */ +#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */ +#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */ +#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */ +#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */ +#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */ +#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */ +#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */ +#define WLC_E_LINK 16 /* generic link indication */ +#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */ +#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */ +#define WLC_E_ROAM 19 /* roam attempt occurred: indicate status & reason */ +#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */ +#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */ +#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */ +#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */ +#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */ +#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */ +#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */ +#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */ +#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */ +#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */ +#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */ +#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */ +#define WLC_E_ROAM_PREP 32 /* before attempting to roam */ +#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */ +#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */ +#define WLC_E_RESET_COMPLETE 35 +#define WLC_E_JOIN_START 36 +#define WLC_E_ROAM_START 37 +#define WLC_E_ASSOC_START 38 +#define WLC_E_IBSS_ASSOC 39 +#define WLC_E_RADIO 40 +#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ +#define WLC_E_PROBREQ_MSG 44 /* probe request received */ +#define WLC_E_SCAN_CONFIRM_IND 45 +#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ +#define WLC_E_COUNTRY_CODE_CHANGED 47 +#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */ +#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */ +#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */ +#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */ +#define WLC_E_TRACE 52 +#define WLC_E_IF 54 /* I/F change (for dongle host notification) */ +#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */ +#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */ +#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */ +#define WLC_E_EXTLOG_MSG 58 +#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */ +#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */ +#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */ +#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */ +#define WLC_E_CHANNEL_ADOPTED 63 +#define WLC_E_AP_STARTED 64 /* AP started */ +#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */ +#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */ +#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */ +#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */ +#define WLC_E_ESCAN_RESULT 69 /* escan result event */ +#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */ +#define WLC_E_PROBRESP_MSG 71 /* probe response received */ +#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */ +#define WLC_E_DCS_REQUEST 73 +#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */ +#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH + * wl_event_rx_frame_data_t header + */ +#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */ +#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */ +#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */ +#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */ +#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */ +#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */ +#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */ +/* PFN BSSID network found event, conflict/share with WLC_E_PFN_SCAN_NONE */ +#define WLC_E_PFN_BSSID_NET_FOUND 82 +#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */ +/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */ +#define WLC_E_PFN_BSSID_NET_LOST 83 +#define WLC_E_GTK_PLUMBED 84 +#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */ +#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */ +#define WLC_E_ASSOC_REQ_IE 87 +#define WLC_E_ASSOC_RESP_IE 88 +#define WLC_E_ASSOC_RECREATED 89 /* association recreated on resume */ +#define WLC_E_ACTION_FRAME_RX_NDIS 90 /* rx action frame event for NDIS only */ +#define WLC_E_AUTH_REQ 91 /* authentication request received */ +#define WLC_E_TDLS_PEER_EVENT 92 /* discovered peer, connected/disconnected peer */ +#define WLC_E_SPEEDY_RECREATE_FAIL 93 /* fast assoc recreation failed */ +#define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */ +#define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */ +#define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */ +#define WLC_E_NAN 100 /* NAN event */ +#define WLC_E_BEACON_FRAME_RX 101 +#define WLC_E_SERVICE_FOUND 102 /* desired service found */ +#define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */ +#define WLC_E_GAS_COMPLETE 104 /* GAS sessions all complete */ +#define WLC_E_P2PO_ADD_DEVICE 105 /* New device found by p2p offload */ +#define WLC_E_P2PO_DEL_DEVICE 106 /* device has been removed by p2p offload */ +#define WLC_E_WNM_STA_SLEEP 107 /* WNM event to notify STA enter sleep mode */ +#define WLC_E_TXFAIL_THRESH 108 /* Indication of MAC tx failures (exhaustion of + * 802.11 retries) exceeding threshold(s) + */ +#define WLC_E_PROXD 109 /* Proximity Detection event */ +#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */ +#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */ +#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */ +#define WLC_E_MSCH 120 /* Multiple channel scheduler event */ +#define WLC_E_CSA_START_IND 121 +#define WLC_E_CSA_DONE_IND 122 +#define WLC_E_CSA_FAILURE_IND 123 +#define WLC_E_CCA_CHAN_QUAL 124 /* CCA based channel quality report */ +#define WLC_E_BSSID 125 /* to report change in BSSID while roaming */ +#define WLC_E_TX_STAT_ERROR 126 /* tx error indication */ +#define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */ +#define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */ +#define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */ +#define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */ +#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */ +#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */ +#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */ +#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */ +#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */ +#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */ +#define WLC_E_RMC_EVENT 139 /* RMC Event */ +#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */ +#define WLC_E_RRM 141 /* RRM Event */ +#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */ +#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */ +#define WLC_E_LAST 144 /* highest val + 1 for range checking */ +#if (WLC_E_LAST > 144) +#error "WLC_E_LAST: Invalid value for last event; must be <= 141." +#endif /* WLC_E_LAST */ + +/* define an API for getting the string name of an event */ +extern const char *bcmevent_get_name(uint event_type); +extern void wl_event_to_host_order(wl_event_msg_t * evt); +extern void wl_event_to_network_order(wl_event_msg_t * evt); + +/* conversion between host and network order for events */ +void wl_event_to_host_order(wl_event_msg_t * evt); +void wl_event_to_network_order(wl_event_msg_t * evt); + + +/* Event status codes */ +#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */ +#define WLC_E_STATUS_FAIL 1 /* operation failed */ +#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */ +#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */ +#define WLC_E_STATUS_ABORT 4 /* operation was aborted */ +#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */ +#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */ +#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */ +#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */ +#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */ +#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */ +#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */ +#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */ +#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */ +#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */ +#define WLC_E_STATUS_ERROR 16 /* request failed due to error */ +#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */ + +/* roam reason codes */ +#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ +#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ +#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */ +#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */ +#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */ + +/* Roam codes used primarily by CCX */ +#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */ +#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */ +#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */ +#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */ +#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */ +#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */ +/* retained for precommit auto-merging errors; remove once all branches are synced */ +#define WLC_E_REASON_REQUESTED_ROAM 11 +#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */ + +/* prune reason codes */ +#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ +#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */ +#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */ +#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */ +#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */ +#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */ +#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */ +#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */ +#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */ +#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */ +#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */ +#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */ +#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */ +#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */ +#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */ +#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */ + +/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ +#define WLC_E_SUP_OTHER 0 /* Other reason */ +#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */ +#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */ +#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */ +#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */ +#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */ +#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */ +#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */ +#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */ +#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */ +#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */ +#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */ +#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */ +#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */ +#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */ +#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */ + +/* Event data for events that include frames received over the air */ +/* WLC_E_PROBRESP_MSG + * WLC_E_P2P_PROBREQ_MSG + * WLC_E_ACTION_FRAME_RX + */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data { + uint16 version; + uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ + int32 rssi; + uint32 mactime; + uint32 rate; +} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t; + +#define BCM_RX_FRAME_DATA_VERSION 1 + +/* WLC_E_IF event data */ +typedef struct wl_event_data_if { + uint8 ifidx; /* RTE virtual device index (for dongle) */ + uint8 opcode; /* see I/F opcode */ + uint8 reserved; /* bit mask (WLC_E_IF_FLAGS_XXX ) */ + uint8 bssidx; /* bsscfg index */ + uint8 role; /* see I/F role */ +} wl_event_data_if_t; + +/* opcode in WLC_E_IF event */ +#define WLC_E_IF_ADD 1 /* bsscfg add */ +#define WLC_E_IF_DEL 2 /* bsscfg delete */ +#define WLC_E_IF_CHANGE 3 /* bsscfg role change */ + +/* I/F role code in WLC_E_IF event */ +#define WLC_E_IF_ROLE_STA 0 /* Infra STA */ +#define WLC_E_IF_ROLE_AP 1 /* Access Point */ +#define WLC_E_IF_ROLE_WDS 2 /* WDS link */ +#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ +#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ +#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */ + +/* WLC_E_RSSI event data */ +typedef struct wl_event_data_rssi { + int32 rssi; + int32 snr; + int32 noise; +} wl_event_data_rssi_t; + +/* WLC_E_IF flag */ +#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */ + +/* Reason codes for LINK */ +#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ +#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ +#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ +#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ + + +/* WLC_E_NDIS_LINK event data */ +typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms { + struct ether_addr peer_mac; /* 6 bytes */ + uint16 chanspec; /* 2 bytes */ + uint32 link_speed; /* current datarate in units of 500 Kbit/s */ + uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */ + int32 rssi; /* average rssi */ +} BWL_POST_PACKED_STRUCT ndis_link_parms_t; + +/* reason codes for WLC_E_OVERLAY_REQ event */ +#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */ +#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */ + +/* reason codes for WLC_E_TDLS_PEER_EVENT event */ +#define WLC_E_TDLS_PEER_DISCOVERED 0 /* peer is ready to establish TDLS */ +#define WLC_E_TDLS_PEER_CONNECTED 1 +#define WLC_E_TDLS_PEER_DISCONNECTED 2 + +/* reason codes for WLC_E_RMC_EVENT event */ +#define WLC_E_REASON_RMC_NONE 0 +#define WLC_E_REASON_RMC_AR_LOST 1 +#define WLC_E_REASON_RMC_AR_NO_ACK 2 + +#ifdef WLTDLS +/* TDLS Action Category code */ +#define TDLS_AF_CATEGORY 12 +/* Wi-Fi Display (WFD) Vendor Specific Category */ +/* used for WFD Tunneled Probe Request and Response */ +#define TDLS_VENDOR_SPECIFIC 127 +/* TDLS Action Field Values */ +#define TDLS_ACTION_SETUP_REQ 0 +#define TDLS_ACTION_SETUP_RESP 1 +#define TDLS_ACTION_SETUP_CONFIRM 2 +#define TDLS_ACTION_TEARDOWN 3 +#define WLAN_TDLS_SET_PROBE_WFD_IE 11 +#define WLAN_TDLS_SET_SETUP_WFD_IE 12 +#define WLAN_TDLS_SET_WFD_ENABLED 13 +#define WLAN_TDLS_SET_WFD_DISABLED 14 +#endif + + +/* GAS event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas { + uint16 channel; /* channel of GAS protocol */ + uint8 dialog_token; /* GAS dialog token */ + uint8 fragment_id; /* fragment id */ + uint16 status_code; /* status code on GAS completion */ + uint16 data_len; /* length of data to follow */ + uint8 data[1]; /* variable length specified by data_len */ +} BWL_POST_PACKED_STRUCT wl_event_gas_t; + +/* service discovery TLV */ +typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv { + uint16 length; /* length of response_data */ + uint8 protocol; /* service protocol type */ + uint8 transaction_id; /* service transaction id */ + uint8 status_code; /* status code */ + uint8 data[1]; /* response data */ +} BWL_POST_PACKED_STRUCT wl_sd_tlv_t; + +/* service discovery event data */ +typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd { + uint16 channel; /* channel */ + uint8 count; /* number of tlvs */ + wl_sd_tlv_t tlv[1]; /* service discovery TLV */ +} BWL_POST_PACKED_STRUCT wl_event_sd_t; + +/* Note: proxd has a new API (ver 3.0) deprecates the following */ + +/* Reason codes for WLC_E_PROXD */ +#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */ +#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */ +#define WLC_E_PROXD_START 3 /* used by: target */ +#define WLC_E_PROXD_STOP 4 /* used by: target */ +#define WLC_E_PROXD_COMPLETED 5 /* used by: initiator completed */ +#define WLC_E_PROXD_ERROR 6 /* used by both initiator and target */ +#define WLC_E_PROXD_COLLECT_START 7 /* used by: target & initiator */ +#define WLC_E_PROXD_COLLECT_STOP 8 /* used by: target */ +#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */ +#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */ +#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */ +#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */ + +/* proxd_event data */ +typedef struct ftm_sample { + uint32 value; /* RTT in ns */ + int8 rssi; /* RSSI */ +} ftm_sample_t; + +typedef struct ts_sample { + uint32 t1; + uint32 t2; + uint32 t3; + uint32 t4; +} ts_sample_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint8 OFDM_frame_type; /* legacy or VHT */ + uint8 bandwidth; /* Bandwidth is 20, 40,80, MHZ */ + struct ether_addr peer_mac; /* (e.g for tgt:initiator's */ + uint32 distance; /* dst to tgt, units meter */ + uint32 meanrtt; /* mean delta */ + uint32 modertt; /* Mode delta */ + uint32 medianrtt; /* median RTT */ + uint32 sdrtt; /* Standard deviation of RTT */ + int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */ + /* frame type is VHT, then we should do it by hardware */ + int16 avg_rssi; /* avg rssi accroos the ftm frames */ + int16 validfrmcnt; /* Firmware's valid frame counts */ + int32 peer_router_info; /* Peer router information if available in TLV, */ + /* We will add this field later */ + int32 var1; /* average of group delay */ + int32 var2; /* average of threshold crossing */ + int32 var3; /* difference between group delay and threshold crossing */ + /* raw Fine Time Measurements (ftm) data */ + uint16 ftm_unit; /* ftm cnt resolution in picoseconds , 6250ps - default */ + uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */ + ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results { + uint16 ver; /* version */ + uint16 mode; /* mode: target/initiator */ + uint16 method; /* method: rssi/TOF/AOA */ + uint8 err_code; /* error classification */ + uint8 TOF_type; /* one way or two way TOF */ + uint16 ts_cnt; /* number of timestamp measurements */ + ts_sample_t ts_buff[1]; /* Timestamps */ +} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t; + + +/* Video Traffic Interference Monitor Event */ +#define INTFER_EVENT_VERSION 1 +#define INTFER_STREAM_TYPE_NONTCP 1 +#define INTFER_STREAM_TYPE_TCP 2 +#define WLINTFER_STATS_NSMPLS 4 +typedef struct wl_intfer_event { + uint16 version; /* version */ + uint16 status; /* status */ + uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */ +} wl_intfer_event_t; + +/* WLC_E_PSTA_PRIMARY_INTF_IND event data */ +typedef struct wl_psta_primary_intf_event { + struct ether_addr prim_ea; /* primary intf ether addr */ +} wl_psta_primary_intf_event_t; + +/* WLC_E_DPSTA_INTF_IND event data */ +typedef enum { + WL_INTF_PSTA = 1, + WL_INTF_DWDS = 2 +} wl_dpsta_intf_type; + +typedef struct wl_dpsta_intf_event { + wl_dpsta_intf_type intf_type; /* dwds/psta intf register */ +} wl_dpsta_intf_event_t; + +/* ********** NAN protocol events/subevents ********** */ +#define NAN_EVENT_BUFFER_SIZE 512 /* max size */ +/* nan application events to the host driver */ +typedef enum nan_app_events { + WL_NAN_EVENT_START = 1, /* NAN cluster started */ + WL_NAN_EVENT_JOIN = 2, /* Joined to a NAN cluster */ + WL_NAN_EVENT_ROLE = 3, /* Role or State changed */ + WL_NAN_EVENT_SCAN_COMPLETE = 4, + WL_NAN_EVENT_DISCOVERY_RESULT = 5, + WL_NAN_EVENT_REPLIED = 6, + WL_NAN_EVENT_TERMINATED = 7, /* the instance ID will be present in the ev data */ + WL_NAN_EVENT_RECEIVE = 8, + WL_NAN_EVENT_STATUS_CHG = 9, /* generated on any change in nan_mac status */ + WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */ + WL_NAN_EVENT_STOP = 11, /* NAN stopped */ + WL_NAN_EVENT_P2P = 12, /* NAN P2P EVENT */ + WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Event for begin of P2P further availability window */ + WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14, + WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15, + WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16, + WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */ + WL_NAN_EVENT_INVALID /* delimiter for max value */ +} nan_app_events_e; + +#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0) +/* ******************* end of NAN section *************** */ + +#define MSCH_EVENTS_BUFFER_SIZE 2048 + +/* Reason codes for WLC_E_MSCH */ +#define WLC_E_MSCH_START 0 /* start event check */ +#define WLC_E_MSCH_EXIT 1 /* exit event check */ +#define WLC_E_MSCH_REQ 2 /* request event */ +#define WLC_E_MSCH_CALLBACK 3 /* call back event */ +#define WLC_E_MSCH_MESSAGE 4 /* message event */ +#define WLC_E_MSCH_PROFILE_START 5 +#define WLC_E_MSCH_PROFILE_END 6 +#define WLC_E_MSCH_REQ_HANDLE 7 +#define WLC_E_MSCH_REQ_ENTITY 8 +#define WLC_E_MSCH_CHAN_CTXT 9 +#define WLC_E_MSCH_TIMESLOT 10 +#define WLC_E_MSCH_REQ_TIMING 11 + +typedef BWL_PRE_PACKED_STRUCT struct msch_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; +} BWL_POST_PACKED_STRUCT msch_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_start_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 status; +} BWL_POST_PACKED_STRUCT msch_start_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_message_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + char message[1]; /* message */ +} BWL_POST_PACKED_STRUCT msch_message_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_param_event_data { + uint16 flags; /* Describe various request properties */ + uint8 req_type; /* Describe start and end time flexiblilty */ + uint8 priority; /* Define the request priority */ + uint32 start_time_l; /* Requested start time offset in us unit */ + uint32 start_time_h; + uint32 duration; /* Requested duration in us unit */ + uint32 interval; /* Requested periodic interval in us unit, + * 0 means non-periodic + */ + union { + uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */ + struct { + uint32 min_dur; /* min duration for traffic, maps to home_time */ + uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time*/ + uint32 lo_prio_time_l; + uint32 lo_prio_time_h; + uint32 lo_prio_interval; /* repeated low priority interval */ + uint32 hi_prio_time_l; + uint32 hi_prio_time_h; + uint32 hi_prio_interval; /* repeated high priority interval */ + } bf; + } flex; +} BWL_POST_PACKED_STRUCT msch_req_param_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_timeslot_event_data { + uint32 p_timeslot; + uint32 p_prev; + uint32 p_next; + uint32 timeslot_id; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 sch_dur_l; + uint32 sch_dur_h; + uint32 p_chan_ctxt; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 state; +} BWL_POST_PACKED_STRUCT msch_timeslot_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_timing_event_data { + uint32 p_req_timing; + uint32 p_prev; + uint32 p_next; + uint16 flags; + uint16 timeslot_ptr; + uint32 fire_time_l; + uint32 fire_time_h; + uint32 pre_start_time_l; + uint32 pre_start_time_h; + uint32 start_time_l; + uint32 start_time_h; + uint32 end_time_l; + uint32 end_time_h; + uint32 p_timeslot; +} BWL_POST_PACKED_STRUCT msch_req_timing_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_chan_ctxt_event_data { + uint32 p_chan_ctxt; + uint32 p_prev; + uint32 p_next; + uint16 chanspec; + uint16 bf_sch_pending; + uint32 bf_link_prev; + uint32 bf_link_next; + uint32 onchan_time_l; + uint32 onchan_time_h; + uint32 actual_onchan_dur_l; + uint32 actual_onchan_dur_h; + uint32 pend_onchan_dur_l; + uint32 pend_onchan_dur_h; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 bf_entity_list_cnt; + uint16 bf_entity_list_ptr; +} BWL_POST_PACKED_STRUCT msch_chan_ctxt_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_prio_event_data { + uint32 is_lo; + uint32 time_l; + uint32 time_h; + uint32 p_entity; +} BWL_POST_PACKED_STRUCT msch_prio_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_entity_event_data { + uint32 p_req_entity; + uint32 req_hdl_link_prev; + uint32 req_hdl_link_next; + uint32 chan_ctxt_link_prev; + uint32 chan_ctxt_link_next; + uint32 rt_specific_link_prev; + uint32 rt_specific_link_next; + uint16 chanspec; + uint16 req_param_ptr; + uint16 cur_slot_ptr; + uint16 pend_slot_ptr; + msch_prio_event_data_t lo_event; + msch_prio_event_data_t hi_event; + uint32 ts_change_dur_flex; + uint16 ts_change_flags; + uint16 chan_ctxt_ptr; + uint32 p_chan_ctxt; + uint32 p_req_hdl; + uint32 hi_cnt_l; + uint32 hi_cnt_h; + uint32 bf_last_serv_time_l; + uint32 bf_last_serv_time_h; +} BWL_POST_PACKED_STRUCT msch_req_entity_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_handle_event_data { + uint32 p_req_handle; + uint32 p_prev; + uint32 p_next; + uint32 cb_func; + uint32 cb_ctxt; + uint16 req_param_ptr; + uint16 req_entity_list_cnt; + uint16 req_entity_list_ptr; + uint16 chan_cnt; + uint16 schd_chan_cnt; + uint16 chanspec_list_cnt; + uint16 chanspec_list_ptr; + uint16 pad; +} BWL_POST_PACKED_STRUCT msch_req_handle_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_profile_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint32 free_req_hdl_list; + uint32 free_req_entity_list; + uint32 free_chan_ctxt_list; + uint32 free_timeslot_list; + uint32 free_chanspec_list; + uint16 cur_msch_timeslot_ptr; + uint16 pad; + uint32 p_cur_msch_timeslot; + uint32 cur_armed_timeslot; + uint32 cur_armed_req_timing; + uint32 ts_id; + uint32 service_interval; + uint32 max_lo_prio_interval; + uint16 flex_list_cnt; + uint16 msch_chanspec_alloc_cnt; + uint16 msch_req_entity_alloc_cnt; + uint16 msch_req_hdl_alloc_cnt; + uint16 msch_chan_ctxt_alloc_cnt; + uint16 msch_timeslot_alloc_cnt; + uint16 msch_req_hdl_list_cnt; + uint16 msch_req_hdl_list_ptr; + uint16 msch_chan_ctxt_list_cnt; + uint16 msch_chan_ctxt_list_ptr; + uint16 msch_timeslot_list_cnt; + uint16 msch_timeslot_list_ptr; + uint16 msch_req_timing_list_cnt; + uint16 msch_req_timing_list_ptr; + uint16 msch_start_flex_list_cnt; + uint16 msch_start_flex_list_ptr; + uint16 msch_both_flex_list_cnt; + uint16 msch_both_flex_list_ptr; +} BWL_POST_PACKED_STRUCT msch_profile_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_req_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 chanspec_cnt; + uint16 chanspec_ptr; + uint16 req_param_ptr; + uint16 pad; +} BWL_POST_PACKED_STRUCT msch_req_event_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct msch_callback_event_data { + uint32 time_lo; /* Request time */ + uint32 time_hi; + uint16 type; /* callback type */ + uint16 chanspec; /* actual chanspec, may different with requested one */ + uint32 pre_start_time_l; /* time slot prestart time low 32bit */ + uint32 pre_start_time_h; /* time slot prestart time high 32bit */ + uint32 end_time_l; /* time slot end time low 32 bit */ + uint32 end_time_h; /* time slot end time high 32 bit */ + uint32 timeslot_id; /* unique time slot id */ +} BWL_POST_PACKED_STRUCT msch_callback_event_data_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _BCMEVENT_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmip.h b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h new file mode 100644 index 000000000000..eaa679c38948 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmip.h @@ -0,0 +1,248 @@ +/* + * Fundamental constants relating to IP Protocol + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmip.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmip_h_ +#define _bcmip_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* IPV4 and IPV6 common */ +#define IP_VER_OFFSET 0x0 /* offset to version field */ +#define IP_VER_MASK 0xf0 /* version mask */ +#define IP_VER_SHIFT 4 /* version shift */ +#define IP_VER_4 4 /* version number for IPV4 */ +#define IP_VER_6 6 /* version number for IPV6 */ + +#define IP_VER(ip_body) \ + ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) + +#define IP_PROT_ICMP 0x1 /* ICMP protocol */ +#define IP_PROT_IGMP 0x2 /* IGMP protocol */ +#define IP_PROT_TCP 0x6 /* TCP protocol */ +#define IP_PROT_UDP 0x11 /* UDP protocol type */ +#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ + +/* IPV4 field offsets */ +#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */ +#define IPV4_TOS_OFFSET 1 /* type of service offset */ +#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */ +#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */ +#define IPV4_PROT_OFFSET 9 /* protocol type offset */ +#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */ +#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */ +#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */ +#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */ +#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */ + +/* IPV4 field decodes */ +#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */ +#define IPV4_VER_SHIFT 4 /* IPV4 version shift */ + +#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */ +#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) + +#define IPV4_ADDR_LEN 4 /* IPV4 address length */ + +#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ + ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) + +#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ + ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) + +#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */ +#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */ + +#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) + +#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */ +#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */ + +#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */ +#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */ +#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */ + +#define IPV4_TOS_ROUTINE 0 +#define IPV4_TOS_PRIORITY 1 +#define IPV4_TOS_IMMEDIATE 2 +#define IPV4_TOS_FLASH 3 +#define IPV4_TOS_FLASHOVERRIDE 4 +#define IPV4_TOS_CRITICAL 5 +#define IPV4_TOS_INETWORK_CTRL 6 +#define IPV4_TOS_NETWORK_CTRL 7 + +#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) + +#define IPV4_FRAG_RESV 0x8000 /* Reserved */ +#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */ +#define IPV4_FRAG_MORE 0x2000 /* More fragments */ +#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */ + +#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */ + +/* IPV4 packet formats */ +BWL_PRE_PACKED_STRUCT struct ipv4_addr { + uint8 addr[IPV4_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv4_hdr { + uint8 version_ihl; /* Version and Internet Header Length */ + uint8 tos; /* Type Of Service */ + uint16 tot_len; /* Number of bytes in packet (max 65535) */ + uint16 id; + uint16 frag; /* 3 flag bits and fragment offset */ + uint8 ttl; /* Time To Live */ + uint8 prot; /* Protocol */ + uint16 hdr_chksum; /* IP header checksum */ + uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ + uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ +} BWL_POST_PACKED_STRUCT; + +/* IPV6 field offsets */ +#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */ +#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */ +#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */ +#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */ +#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */ + +/* IPV6 field decodes */ +#define IPV6_TRAFFIC_CLASS(ipv6_body) \ + (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ + ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) + +#define IPV6_FLOW_LABEL(ipv6_body) \ + (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ + (((uint8 *)(ipv6_body))[2] << 8) | \ + (((uint8 *)(ipv6_body))[3])) + +#define IPV6_PAYLOAD_LEN(ipv6_body) \ + ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ + ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) + +#define IPV6_NEXT_HDR(ipv6_body) \ + (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) + +#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) + +#define IPV6_ADDR_LEN 16 /* IPV6 address length */ + +/* IPV4 TOS or IPV6 Traffic Classifier or 0 */ +#define IP_TOS46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) + +#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT); + +/* IPV4 or IPV6 Protocol Classifier or 0 */ +#define IP_PROT46(ip_body) \ + (IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \ + IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0) + +/* IPV6 extension headers (options) */ +#define IPV6_EXTHDR_HOP 0 +#define IPV6_EXTHDR_ROUTING 43 +#define IPV6_EXTHDR_FRAGMENT 44 +#define IPV6_EXTHDR_AUTH 51 +#define IPV6_EXTHDR_NONE 59 +#define IPV6_EXTHDR_DEST 60 + +#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \ + ((prot) == IPV6_EXTHDR_ROUTING) || \ + ((prot) == IPV6_EXTHDR_FRAGMENT) || \ + ((prot) == IPV6_EXTHDR_AUTH) || \ + ((prot) == IPV6_EXTHDR_NONE) || \ + ((prot) == IPV6_EXTHDR_DEST)) + +#define IPV6_MIN_HLEN 40 + +#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3) + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr { + uint8 nexthdr; + uint8 hdrlen; +} BWL_POST_PACKED_STRUCT; + +BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag { + uint8 nexthdr; + uint8 rsvd; + uint16 frag_off; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +static INLINE int32 +ipv6_exthdr_len(uint8 *h, uint8 *proto) +{ + uint16 len = 0, hlen; + struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h; + + while (IPV6_EXTHDR(eh->nexthdr)) { + if (eh->nexthdr == IPV6_EXTHDR_NONE) + return -1; + else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) + hlen = 8; + else if (eh->nexthdr == IPV6_EXTHDR_AUTH) + hlen = (eh->hdrlen + 2) << 2; + else + hlen = IPV6_EXTHDR_LEN(eh); + + len += hlen; + eh = (struct ipv6_exthdr *)(h + len); + } + + *proto = eh->nexthdr; + return len; +} + +#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000) + +#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \ +{ \ + ether[0] = 0x01; \ + ether[1] = 0x00; \ + ether[2] = 0x5E; \ + ether[3] = (ipv4 & 0x7f0000) >> 16; \ + ether[4] = (ipv4 & 0xff00) >> 8; \ + ether[5] = (ipv4 & 0xff); \ +} + +/* This marks the end of a packed structure section. */ +#include + +#define IPV4_ADDR_STR "%d.%d.%d.%d" +#define IPV4_ADDR_TO_STR(addr) ((uint32)addr & 0xff000000) >> 24, \ + ((uint32)addr & 0x00ff0000) >> 16, \ + ((uint32)addr & 0x0000ff00) >> 8, \ + ((uint32)addr & 0x000000ff) + +#endif /* _bcmip_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h new file mode 100644 index 000000000000..fbab037b2f32 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmipv6.h @@ -0,0 +1,163 @@ +/* + * Fundamental constants relating to Neighbor Discovery Protocol + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmipv6.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmipv6_h_ +#define _bcmipv6_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +/* Extension headers */ +#define IPV6_EXT_HOP 0 +#define IPV6_EXT_ROUTE 43 +#define IPV6_EXT_FRAG 44 +#define IPV6_EXT_DEST 60 +#define IPV6_EXT_ESEC 50 +#define IPV6_EXT_AUTH 51 + +/* Minimum size (extension header "word" length) */ +#define IPV6_EXT_WORD 8 + +/* Offsets for most extension headers */ +#define IPV6_EXT_NEXTHDR 0 +#define IPV6_EXT_HDRLEN 1 + +/* Constants specific to fragmentation header */ +#define IPV6_FRAG_MORE_MASK 0x0001 +#define IPV6_FRAG_MORE_SHIFT 0 +#define IPV6_FRAG_OFFS_MASK 0xfff8 +#define IPV6_FRAG_OFFS_SHIFT 3 + +/* For icmpv6 */ +#define ICMPV6_HEADER_TYPE 0x3A +#define ICMPV6_PKT_TYPE_RA 134 +#define ICMPV6_PKT_TYPE_NS 135 +#define ICMPV6_PKT_TYPE_NA 136 + +#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2 +#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define ICMPV6_ND_OPT_LEN_LINKADDR 1 + +#define IPV6_VERSION 6 +#define IPV6_HOP_LIMIT 255 + +#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \ + a[5] | a[6] | a[7] | a[8] | a[9] | \ + a[10] | a[11] | a[12] | a[13] | \ + a[14] | a[15]) == 0) + +#define IPV6_ADDR_LOCAL(a) (((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE) + +/* IPV6 address */ +BWL_PRE_PACKED_STRUCT struct ipv6_addr { + uint8 addr[16]; +} BWL_POST_PACKED_STRUCT; + + +/* ICMPV6 Header */ +BWL_PRE_PACKED_STRUCT struct icmp6_hdr { + uint8 icmp6_type; + uint8 icmp6_code; + uint16 icmp6_cksum; + BWL_PRE_PACKED_STRUCT union { + uint32 reserved; + BWL_PRE_PACKED_STRUCT struct nd_advt { + uint32 reserved1:5, + override:1, + solicited:1, + router:1, + reserved2:24; + } BWL_POST_PACKED_STRUCT nd_advt; + } BWL_POST_PACKED_STRUCT opt; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Header Format */ +BWL_PRE_PACKED_STRUCT struct ipv6_hdr { + uint8 priority:4, + version:4; + uint8 flow_lbl[3]; + uint16 payload_len; + uint8 nexthdr; + uint8 hop_limit; + struct ipv6_addr saddr; + struct ipv6_addr daddr; +} BWL_POST_PACKED_STRUCT; + +/* Neighbor Advertisement/Solicitation Packet Structure */ +BWL_PRE_PACKED_STRUCT struct bcm_nd_msg { + struct icmp6_hdr icmph; + struct ipv6_addr target; +} BWL_POST_PACKED_STRUCT; + + +/* Neighibor Solicitation/Advertisement Optional Structure */ +BWL_PRE_PACKED_STRUCT struct nd_msg_opt { + uint8 type; + uint8 len; + uint8 mac_addr[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; + +/* Ipv6 Fragmentation Header */ +BWL_PRE_PACKED_STRUCT struct ipv6_frag { + uint8 nexthdr; + uint8 reserved; + uint16 frag_offset; + uint32 ident; +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +static const struct ipv6_addr all_node_ipv6_maddr = { + { 0xff, 0x2, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 1 + }}; + +#define IPV6_ISMULTI(a) (a[0] == 0xff) + +#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \ +{ \ + ether[0] = 0x33; \ + ether[1] = 0x33; \ + ether[2] = ipv6[12]; \ + ether[3] = ipv6[13]; \ + ether[4] = ipv6[14]; \ + ether[5] = ipv6[15]; \ +} + +#endif /* !defined(_bcmipv6_h_) */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h new file mode 100644 index 000000000000..661e1f84d2ae --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmtcp.h @@ -0,0 +1,93 @@ +/* + * Fundamental constants relating to TCP Protocol + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bcmtcp.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmtcp_h_ +#define _bcmtcp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */ +#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */ +#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */ +#define TCP_ACK_NUM_OFFSET 8 /* TCP acknowledgement number offset */ +#define TCP_HLEN_OFFSET 12 /* HLEN and reserved bits offset */ +#define TCP_FLAGS_OFFSET 13 /* FLAGS and reserved bits offset */ +#define TCP_CHKSUM_OFFSET 16 /* TCP body checksum offset */ + +#define TCP_PORT_LEN 2 /* TCP port field length */ + +/* 8bit TCP flag field */ +#define TCP_FLAG_URG 0x20 +#define TCP_FLAG_ACK 0x10 +#define TCP_FLAG_PSH 0x08 +#define TCP_FLAG_RST 0x04 +#define TCP_FLAG_SYN 0x02 +#define TCP_FLAG_FIN 0x01 + +#define TCP_HLEN_MASK 0xf000 +#define TCP_HLEN_SHIFT 12 + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint32 seq_num; /* TCP Sequence Number */ + uint32 ack_num; /* TCP Sequence Number */ + uint16 hdrlen_rsvd_flags; /* Header length, reserved bits and flags */ + uint16 tcpwin; /* TCP window */ + uint16 chksum; /* Segment checksum with pseudoheader */ + uint16 urg_ptr; /* Points to seq-num of byte following urg data */ +} BWL_POST_PACKED_STRUCT; + +#define TCP_MIN_HEADER_LEN 20 + +#define TCP_HDRLEN_MASK 0xf0 +#define TCP_HDRLEN_SHIFT 4 +#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT) + +#define TCP_FLAGS_MASK 0x1f +#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK) + +/* This marks the end of a packed structure section. */ +#include + +/* To address round up by 32bit. */ +#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31)) /* a >= b */ +#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31)) /* a =< b */ +#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b) /* a > b */ +#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b) /* a < b */ + +#endif /* #ifndef _bcmtcp_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h new file mode 100644 index 000000000000..97cf815db76c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bcmudp.h @@ -0,0 +1,49 @@ +/* + * Fundamental constants relating to UDP Protocol + * + * Copyright (C) 2016, Broadcom Corporation + * All Rights Reserved. + * + * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation; + * the contents of this file may not be disclosed to third parties, copied + * or duplicated in any form, in whole or in part, without the prior + * written permission of Broadcom Corporation. + * + * + * <> + * + * $Id: bcmudp.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _bcmudp_h_ +#define _bcmudp_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* UDP header */ +#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */ +#define UDP_LEN_OFFSET 4 /* UDP length offset */ +#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */ + +#define UDP_HDR_LEN 8 /* UDP header length */ +#define UDP_PORT_LEN 2 /* UDP port length */ + +/* These fields are stored in network order */ +BWL_PRE_PACKED_STRUCT struct bcmudp_hdr +{ + uint16 src_port; /* Source Port Address */ + uint16 dst_port; /* Destination Port Address */ + uint16 len; /* Number of bytes in datagram including header */ + uint16 chksum; /* entire datagram checksum with pseudoheader */ +} BWL_POST_PACKED_STRUCT; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* #ifndef _bcmudp_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h new file mode 100644 index 000000000000..4e948d24dfc1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/bt_amp_hci.h @@ -0,0 +1,444 @@ +/* + * BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: bt_amp_hci.h 518342 2014-12-01 23:21:41Z $ +*/ + +#ifndef _bt_amp_hci_h +#define _bt_amp_hci_h + +/* This marks the start of a packed structure section. */ +#include + + +/* AMP HCI CMD packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_cmd { + uint16 opcode; + uint8 plen; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT amp_hci_cmd_t; + +#define HCI_CMD_PREAMBLE_SIZE OFFSETOF(amp_hci_cmd_t, parms) +#define HCI_CMD_DATA_SIZE 255 + +/* AMP HCI CMD opcode layout */ +#define HCI_CMD_OPCODE(ogf, ocf) ((((ogf) & 0x3F) << 10) | ((ocf) & 0x03FF)) +#define HCI_CMD_OGF(opcode) ((uint8)(((opcode) >> 10) & 0x3F)) +#define HCI_CMD_OCF(opcode) ((opcode) & 0x03FF) + +/* AMP HCI command opcodes */ +#define HCI_Read_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0001) +#define HCI_Reset_Failed_Contact_Counter HCI_CMD_OPCODE(0x05, 0x0002) +#define HCI_Read_Link_Quality HCI_CMD_OPCODE(0x05, 0x0003) +#define HCI_Read_Local_AMP_Info HCI_CMD_OPCODE(0x05, 0x0009) +#define HCI_Read_Local_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000A) +#define HCI_Write_Remote_AMP_ASSOC HCI_CMD_OPCODE(0x05, 0x000B) +#define HCI_Create_Physical_Link HCI_CMD_OPCODE(0x01, 0x0035) +#define HCI_Accept_Physical_Link_Request HCI_CMD_OPCODE(0x01, 0x0036) +#define HCI_Disconnect_Physical_Link HCI_CMD_OPCODE(0x01, 0x0037) +#define HCI_Create_Logical_Link HCI_CMD_OPCODE(0x01, 0x0038) +#define HCI_Accept_Logical_Link HCI_CMD_OPCODE(0x01, 0x0039) +#define HCI_Disconnect_Logical_Link HCI_CMD_OPCODE(0x01, 0x003A) +#define HCI_Logical_Link_Cancel HCI_CMD_OPCODE(0x01, 0x003B) +#define HCI_Flow_Spec_Modify HCI_CMD_OPCODE(0x01, 0x003C) +#define HCI_Write_Flow_Control_Mode HCI_CMD_OPCODE(0x01, 0x0067) +#define HCI_Read_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x0069) +#define HCI_Write_Best_Effort_Flush_Timeout HCI_CMD_OPCODE(0x01, 0x006A) +#define HCI_Short_Range_Mode HCI_CMD_OPCODE(0x01, 0x006B) +#define HCI_Reset HCI_CMD_OPCODE(0x03, 0x0003) +#define HCI_Read_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0015) +#define HCI_Write_Connection_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0016) +#define HCI_Read_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0036) +#define HCI_Write_Link_Supervision_Timeout HCI_CMD_OPCODE(0x03, 0x0037) +#define HCI_Enhanced_Flush HCI_CMD_OPCODE(0x03, 0x005F) +#define HCI_Read_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0061) +#define HCI_Write_Logical_Link_Accept_Timeout HCI_CMD_OPCODE(0x03, 0x0062) +#define HCI_Set_Event_Mask_Page_2 HCI_CMD_OPCODE(0x03, 0x0063) +#define HCI_Read_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0064) +#define HCI_Write_Location_Data_Command HCI_CMD_OPCODE(0x03, 0x0065) +#define HCI_Read_Local_Version_Info HCI_CMD_OPCODE(0x04, 0x0001) +#define HCI_Read_Local_Supported_Commands HCI_CMD_OPCODE(0x04, 0x0002) +#define HCI_Read_Buffer_Size HCI_CMD_OPCODE(0x04, 0x0005) +#define HCI_Read_Data_Block_Size HCI_CMD_OPCODE(0x04, 0x000A) + +/* AMP HCI command parameters */ +typedef BWL_PRE_PACKED_STRUCT struct read_local_cmd_parms { + uint8 plh; + uint8 offset[2]; /* length so far */ + uint8 max_remote[2]; +} BWL_POST_PACKED_STRUCT read_local_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct write_remote_cmd_parms { + uint8 plh; + uint8 offset[2]; + uint8 len[2]; + uint8 frag[1]; +} BWL_POST_PACKED_STRUCT write_remote_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct phy_link_cmd_parms { + uint8 plh; + uint8 key_length; + uint8 key_type; + uint8 key[1]; +} BWL_POST_PACKED_STRUCT phy_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_cmd_parms { + uint8 plh; + uint8 reason; +} BWL_POST_PACKED_STRUCT dis_phy_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cmd_parms { + uint8 plh; + uint8 txflow[16]; + uint8 rxflow[16]; +} BWL_POST_PACKED_STRUCT log_link_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ext_flow_spec { + uint8 id; + uint8 service_type; + uint8 max_sdu[2]; + uint8 sdu_ia_time[4]; + uint8 access_latency[4]; + uint8 flush_timeout[4]; +} BWL_POST_PACKED_STRUCT ext_flow_spec_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_cmd_parms { + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_cancel_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_cmd_parms { + uint8 llh[2]; + uint8 txflow[16]; + uint8 rxflow[16]; +} BWL_POST_PACKED_STRUCT flow_spec_mod_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct plh_pad { + uint8 plh; + uint8 pad; +} BWL_POST_PACKED_STRUCT plh_pad_t; + +typedef BWL_PRE_PACKED_STRUCT union hci_handle { + uint16 bredr; + plh_pad_t amp; +} BWL_POST_PACKED_STRUCT hci_handle_t; + +typedef BWL_PRE_PACKED_STRUCT struct ls_to_cmd_parms { + hci_handle_t handle; + uint8 timeout[2]; +} BWL_POST_PACKED_STRUCT ls_to_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct befto_cmd_parms { + uint8 llh[2]; + uint8 befto[4]; +} BWL_POST_PACKED_STRUCT befto_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct srm_cmd_parms { + uint8 plh; + uint8 srm; +} BWL_POST_PACKED_STRUCT srm_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ld_cmd_parms { + uint8 ld_aware; + uint8 ld[2]; + uint8 ld_opts; + uint8 l_opts; +} BWL_POST_PACKED_STRUCT ld_cmd_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct eflush_cmd_parms { + uint8 llh[2]; + uint8 packet_type; +} BWL_POST_PACKED_STRUCT eflush_cmd_parms_t; + +/* Generic AMP extended flow spec service types */ +#define EFS_SVCTYPE_NO_TRAFFIC 0 +#define EFS_SVCTYPE_BEST_EFFORT 1 +#define EFS_SVCTYPE_GUARANTEED 2 + +/* AMP HCI event packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_event { + uint8 ecode; + uint8 plen; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT amp_hci_event_t; + +#define HCI_EVT_PREAMBLE_SIZE OFFSETOF(amp_hci_event_t, parms) + +/* AMP HCI event codes */ +#define HCI_Command_Complete 0x0E +#define HCI_Command_Status 0x0F +#define HCI_Flush_Occurred 0x11 +#define HCI_Enhanced_Flush_Complete 0x39 +#define HCI_Physical_Link_Complete 0x40 +#define HCI_Channel_Select 0x41 +#define HCI_Disconnect_Physical_Link_Complete 0x42 +#define HCI_Logical_Link_Complete 0x45 +#define HCI_Disconnect_Logical_Link_Complete 0x46 +#define HCI_Flow_Spec_Modify_Complete 0x47 +#define HCI_Number_of_Completed_Data_Blocks 0x48 +#define HCI_Short_Range_Mode_Change_Complete 0x4C +#define HCI_Status_Change_Event 0x4D +#define HCI_Vendor_Specific 0xFF + +/* AMP HCI event mask bit positions */ +#define HCI_Physical_Link_Complete_Event_Mask 0x0001 +#define HCI_Channel_Select_Event_Mask 0x0002 +#define HCI_Disconnect_Physical_Link_Complete_Event_Mask 0x0004 +#define HCI_Logical_Link_Complete_Event_Mask 0x0020 +#define HCI_Disconnect_Logical_Link_Complete_Event_Mask 0x0040 +#define HCI_Flow_Spec_Modify_Complete_Event_Mask 0x0080 +#define HCI_Number_of_Completed_Data_Blocks_Event_Mask 0x0100 +#define HCI_Short_Range_Mode_Change_Complete_Event_Mask 0x1000 +#define HCI_Status_Change_Event_Mask 0x2000 +#define HCI_All_Event_Mask 0x31e7 +/* AMP HCI event parameters */ +typedef BWL_PRE_PACKED_STRUCT struct cmd_status_parms { + uint8 status; + uint8 cmdpkts; + uint16 opcode; +} BWL_POST_PACKED_STRUCT cmd_status_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct cmd_complete_parms { + uint8 cmdpkts; + uint16 opcode; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT cmd_complete_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flush_occurred_evt_parms { + uint16 handle; +} BWL_POST_PACKED_STRUCT flush_occurred_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct write_remote_evt_parms { + uint8 status; + uint8 plh; +} BWL_POST_PACKED_STRUCT write_remote_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_local_evt_parms { + uint8 status; + uint8 plh; + uint16 len; + uint8 frag[1]; +} BWL_POST_PACKED_STRUCT read_local_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_local_info_evt_parms { + uint8 status; + uint8 AMP_status; + uint32 bandwidth; + uint32 gbandwidth; + uint32 latency; + uint32 PDU_size; + uint8 ctrl_type; + uint16 PAL_cap; + uint16 AMP_ASSOC_len; + uint32 max_flush_timeout; + uint32 be_flush_timeout; +} BWL_POST_PACKED_STRUCT read_local_info_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_evt_parms { + uint8 status; + uint16 llh; + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct disc_log_link_evt_parms { + uint8 status; + uint16 llh; + uint8 reason; +} BWL_POST_PACKED_STRUCT disc_log_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct log_link_cancel_evt_parms { + uint8 status; + uint8 plh; + uint8 tx_fs_ID; +} BWL_POST_PACKED_STRUCT log_link_cancel_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct flow_spec_mod_evt_parms { + uint8 status; + uint16 llh; +} BWL_POST_PACKED_STRUCT flow_spec_mod_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct phy_link_evt_parms { + uint8 status; + uint8 plh; +} BWL_POST_PACKED_STRUCT phy_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct dis_phy_link_evt_parms { + uint8 status; + uint8 plh; + uint8 reason; +} BWL_POST_PACKED_STRUCT dis_phy_link_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_ls_to_evt_parms { + uint8 status; + hci_handle_t handle; + uint16 timeout; +} BWL_POST_PACKED_STRUCT read_ls_to_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_lla_ca_to_evt_parms { + uint8 status; + uint16 timeout; +} BWL_POST_PACKED_STRUCT read_lla_ca_to_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_data_block_size_evt_parms { + uint8 status; + uint16 ACL_pkt_len; + uint16 data_block_len; + uint16 data_block_num; +} BWL_POST_PACKED_STRUCT read_data_block_size_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct data_blocks { + uint16 handle; + uint16 pkts; + uint16 blocks; +} BWL_POST_PACKED_STRUCT data_blocks_t; + +typedef BWL_PRE_PACKED_STRUCT struct num_completed_data_blocks_evt_parms { + uint16 num_blocks; + uint8 num_handles; + data_blocks_t completed[1]; +} BWL_POST_PACKED_STRUCT num_completed_data_blocks_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct befto_evt_parms { + uint8 status; + uint32 befto; +} BWL_POST_PACKED_STRUCT befto_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct srm_evt_parms { + uint8 status; + uint8 plh; + uint8 srm; +} BWL_POST_PACKED_STRUCT srm_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct contact_counter_evt_parms { + uint8 status; + uint8 llh[2]; + uint16 counter; +} BWL_POST_PACKED_STRUCT contact_counter_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct contact_counter_reset_evt_parms { + uint8 status; + uint8 llh[2]; +} BWL_POST_PACKED_STRUCT contact_counter_reset_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct read_linkq_evt_parms { + uint8 status; + hci_handle_t handle; + uint8 link_quality; +} BWL_POST_PACKED_STRUCT read_linkq_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct ld_evt_parms { + uint8 status; + uint8 ld_aware; + uint8 ld[2]; + uint8 ld_opts; + uint8 l_opts; +} BWL_POST_PACKED_STRUCT ld_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct eflush_complete_evt_parms { + uint16 handle; +} BWL_POST_PACKED_STRUCT eflush_complete_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct vendor_specific_evt_parms { + uint8 len; + uint8 parms[1]; +} BWL_POST_PACKED_STRUCT vendor_specific_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct local_version_info_evt_parms { + uint8 status; + uint8 hci_version; + uint16 hci_revision; + uint8 pal_version; + uint16 mfg_name; + uint16 pal_subversion; +} BWL_POST_PACKED_STRUCT local_version_info_evt_parms_t; + +#define MAX_SUPPORTED_CMD_BYTE 64 +typedef BWL_PRE_PACKED_STRUCT struct local_supported_cmd_evt_parms { + uint8 status; + uint8 cmd[MAX_SUPPORTED_CMD_BYTE]; +} BWL_POST_PACKED_STRUCT local_supported_cmd_evt_parms_t; + +typedef BWL_PRE_PACKED_STRUCT struct status_change_evt_parms { + uint8 status; + uint8 amp_status; +} BWL_POST_PACKED_STRUCT status_change_evt_parms_t; + +/* AMP HCI error codes */ +#define HCI_SUCCESS 0x00 +#define HCI_ERR_ILLEGAL_COMMAND 0x01 +#define HCI_ERR_NO_CONNECTION 0x02 +#define HCI_ERR_MEMORY_FULL 0x07 +#define HCI_ERR_CONNECTION_TIMEOUT 0x08 +#define HCI_ERR_MAX_NUM_OF_CONNECTIONS 0x09 +#define HCI_ERR_CONNECTION_EXISTS 0x0B +#define HCI_ERR_CONNECTION_DISALLOWED 0x0C +#define HCI_ERR_CONNECTION_ACCEPT_TIMEOUT 0x10 +#define HCI_ERR_UNSUPPORTED_VALUE 0x11 +#define HCI_ERR_ILLEGAL_PARAMETER_FMT 0x12 +#define HCI_ERR_CONN_TERM_BY_LOCAL_HOST 0x16 +#define HCI_ERR_UNSPECIFIED 0x1F +#define HCI_ERR_UNIT_KEY_USED 0x26 +#define HCI_ERR_QOS_REJECTED 0x2D +#define HCI_ERR_PARAM_OUT_OF_RANGE 0x30 +#define HCI_ERR_NO_SUITABLE_CHANNEL 0x39 +#define HCI_ERR_CHANNEL_MOVE 0xFF + +/* AMP HCI ACL Data packet format */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_ACL_data { + uint16 handle; /* 12-bit connection handle + 2-bit PB and 2-bit BC flags */ + uint16 dlen; /* data total length */ + uint8 data[1]; +} BWL_POST_PACKED_STRUCT amp_hci_ACL_data_t; + +#define HCI_ACL_DATA_PREAMBLE_SIZE OFFSETOF(amp_hci_ACL_data_t, data) + +#define HCI_ACL_DATA_BC_FLAGS (0x0 << 14) +#define HCI_ACL_DATA_PB_FLAGS (0x3 << 12) + +#define HCI_ACL_DATA_HANDLE(handle) ((handle) & 0x0fff) +#define HCI_ACL_DATA_FLAGS(handle) ((handle) >> 12) + +/* AMP Activity Report packet formats */ +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report { + uint8 ScheduleKnown; + uint8 NumReports; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT amp_hci_activity_report_t; + +typedef BWL_PRE_PACKED_STRUCT struct amp_hci_activity_report_triple { + uint32 StartTime; + uint32 Duration; + uint32 Periodicity; +} BWL_POST_PACKED_STRUCT amp_hci_activity_report_triple_t; + +#define HCI_AR_SCHEDULE_KNOWN 0x01 + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _bt_amp_hci_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/eapol.h b/drivers/net/wireless/bcmdhd/include/proto/eapol.h new file mode 100644 index 000000000000..be4ef5358fa5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/eapol.h @@ -0,0 +1,215 @@ +/* + * 802.1x EAPOL definitions + * + * See + * IEEE Std 802.1X-2001 + * IEEE 802.1X RADIUS Usage Guidelines + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: eapol.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _eapol_h_ +#define _eapol_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#include + +/* EAPOL for 802.3/Ethernet */ +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_header eth; /* 802.3/Ethernet header */ + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ + unsigned char body[1]; /* Body (optional) */ +} BWL_POST_PACKED_STRUCT eapol_header_t; + +#define EAPOL_HEADER_LEN 18 + +typedef struct { + unsigned char version; /* EAPOL protocol version */ + unsigned char type; /* EAPOL type */ + unsigned short length; /* Length of body */ +} eapol_hdr_t; + +#define EAPOL_HDR_LEN 4 + +/* EAPOL version */ +#define WPA2_EAPOL_VERSION 2 +#define WPA_EAPOL_VERSION 1 +#define LEAP_EAPOL_VERSION 1 +#define SES_EAPOL_VERSION 1 + +/* EAPOL types */ +#define EAP_PACKET 0 +#define EAPOL_START 1 +#define EAPOL_LOGOFF 2 +#define EAPOL_KEY 3 +#define EAPOL_ASF 4 + +/* EAPOL-Key types */ +#define EAPOL_RC4_KEY 1 +#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */ +#define EAPOL_WPA_KEY 254 /* WPA */ + +/* RC4 EAPOL-Key header field sizes */ +#define EAPOL_KEY_REPLAY_LEN 8 +#define EAPOL_KEY_IV_LEN 16 +#define EAPOL_KEY_SIG_LEN 16 + +/* RC4 EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short length; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char iv[EAPOL_KEY_IV_LEN]; /* Key IV */ + unsigned char index; /* Key Flags & Index */ + unsigned char signature[EAPOL_KEY_SIG_LEN]; /* Key Signature */ + unsigned char key[1]; /* Key (optional) */ +} BWL_POST_PACKED_STRUCT eapol_key_header_t; + +#define EAPOL_KEY_HEADER_LEN 44 + +/* RC4 EAPOL-Key flags */ +#define EAPOL_KEY_FLAGS_MASK 0x80 +#define EAPOL_KEY_BROADCAST 0 +#define EAPOL_KEY_UNICAST 0x80 + +/* RC4 EAPOL-Key index */ +#define EAPOL_KEY_INDEX_MASK 0x7f + +/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */ +#define EAPOL_WPA_KEY_REPLAY_LEN 8 +#define EAPOL_WPA_KEY_NONCE_LEN 32 +#define EAPOL_WPA_KEY_IV_LEN 16 +#define EAPOL_WPA_KEY_RSC_LEN 8 +#define EAPOL_WPA_KEY_ID_LEN 8 +#define EAPOL_WPA_KEY_MIC_LEN 16 +#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN) +#define EAPOL_WPA_MAX_KEY_SIZE 32 + +/* WPA EAPOL-Key */ +typedef BWL_PRE_PACKED_STRUCT struct { + unsigned char type; /* Key Descriptor Type */ + unsigned short key_info; /* Key Information (unaligned) */ + unsigned short key_len; /* Key Length (unaligned) */ + unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */ + unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */ + unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */ + unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */ + unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */ + unsigned char mic[EAPOL_WPA_KEY_MIC_LEN]; /* Key MIC */ + unsigned short data_len; /* Key Data Length */ + unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */ +} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t; + +#define EAPOL_WPA_KEY_LEN 95 + +/* WPA/802.11i/WPA2 KEY KEY_INFO bits */ +#define WPA_KEY_DESC_OSEN 0x0 +#define WPA_KEY_DESC_V1 0x01 +#define WPA_KEY_DESC_V2 0x02 +#define WPA_KEY_DESC_V3 0x03 +#define WPA_KEY_PAIRWISE 0x08 +#define WPA_KEY_INSTALL 0x40 +#define WPA_KEY_ACK 0x80 +#define WPA_KEY_MIC 0x100 +#define WPA_KEY_SECURE 0x200 +#define WPA_KEY_ERROR 0x400 +#define WPA_KEY_REQ 0x800 + +#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2 + +/* WPA-only KEY KEY_INFO bits */ +#define WPA_KEY_INDEX_0 0x00 +#define WPA_KEY_INDEX_1 0x10 +#define WPA_KEY_INDEX_2 0x20 +#define WPA_KEY_INDEX_3 0x30 +#define WPA_KEY_INDEX_MASK 0x30 +#define WPA_KEY_INDEX_SHIFT 0x04 + +/* 802.11i/WPA2-only KEY KEY_INFO bits */ +#define WPA_KEY_ENCRYPTED_DATA 0x1000 + +/* Key Data encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 type; + uint8 length; + uint8 oui[3]; + uint8 subtype; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_encap_data_t; + +#define EAPOL_WPA2_ENCAP_DATA_HDR_LEN 6 + +#define WPA2_KEY_DATA_SUBTYPE_GTK 1 +#define WPA2_KEY_DATA_SUBTYPE_STAKEY 2 +#define WPA2_KEY_DATA_SUBTYPE_MAC 3 +#define WPA2_KEY_DATA_SUBTYPE_PMKID 4 +#define WPA2_KEY_DATA_SUBTYPE_IGTK 9 + +/* GTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 flags; + uint8 reserved; + uint8 gtk[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_gtk_encap_t; + +#define EAPOL_WPA2_KEY_GTK_ENCAP_HDR_LEN 2 + +#define WPA2_GTK_INDEX_MASK 0x03 +#define WPA2_GTK_INDEX_SHIFT 0x00 + +#define WPA2_GTK_TRANSMIT 0x04 + +/* IGTK encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 key_id; + uint8 ipn[6]; + uint8 key[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_igtk_encap_t; + +#define EAPOL_WPA2_KEY_IGTK_ENCAP_HDR_LEN 8 + +/* STAKey encapsulation */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 reserved[2]; + uint8 mac[ETHER_ADDR_LEN]; + uint8 stakey[EAPOL_WPA_MAX_KEY_SIZE]; +} BWL_POST_PACKED_STRUCT eapol_wpa2_key_stakey_encap_t; + +#define WPA2_KEY_DATA_PAD 0xdd + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _eapol_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/ethernet.h b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h new file mode 100644 index 000000000000..022fee41a196 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/ethernet.h @@ -0,0 +1,227 @@ +/* + * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: ethernet.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ +#define _NET_ETHERNET_H_ + +#ifndef _TYPEDEFS_H_ +#include "typedefs.h" +#endif + +/* This marks the start of a packed structure section. */ +#include + + +/* + * The number of bytes in an ethernet (MAC) address. + */ +#define ETHER_ADDR_LEN 6 + +/* + * The number of bytes in the type field. + */ +#define ETHER_TYPE_LEN 2 + +/* + * The number of bytes in the trailing CRC field. + */ +#define ETHER_CRC_LEN 4 + +/* + * The length of the combined header. + */ +#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) + +/* + * The minimum packet length. + */ +#define ETHER_MIN_LEN 64 + +/* + * The minimum packet user data length. + */ +#define ETHER_MIN_DATA 46 + +/* + * The maximum packet length. + */ +#define ETHER_MAX_LEN 1518 + +/* + * The maximum packet user data length. + */ +#define ETHER_MAX_DATA 1500 + +/* ether types */ +#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */ +#define ETHER_TYPE_IP 0x0800 /* IP */ +#define ETHER_TYPE_ARP 0x0806 /* ARP */ +#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */ +#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */ +#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */ +#define ETHER_TYPE_802_1X 0x888e /* 802.1x */ +#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */ +#define ETHER_TYPE_WAI 0x88b4 /* WAI */ +#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */ +#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */ + +#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */ + +#define ETHER_TYPE_IAPP_L2_UPDATE 0x6 /* IAPP L2 update frame */ + +/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */ +#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */ + +/* ether header */ +#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */ +#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */ +#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */ + +/* + * A macro to validate a length with + */ +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + +#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \ + ((uint8 *)ea)[0] = 0x01; \ + ((uint8 *)ea)[1] = 0x00; \ + ((uint8 *)ea)[2] = 0x5e; \ + ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \ + ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \ + ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \ +} + +#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */ +/* + * Structure of a 10Mb/s Ethernet header. + */ +BWL_PRE_PACKED_STRUCT struct ether_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 ether_type; +} BWL_POST_PACKED_STRUCT; + +/* + * Structure of a 48-bit Ethernet address. + */ +BWL_PRE_PACKED_STRUCT struct ether_addr { + uint8 octet[ETHER_ADDR_LEN]; +} BWL_POST_PACKED_STRUCT; +#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */ + +/* + * Takes a pointer, set, test, clear, toggle locally admininistered + * address bit in the 48-bit Ethernet address. + */ +#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) +#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) +#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd)) +#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) + +/* Takes a pointer, marks unicast address bit in the MAC address */ +#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) + +/* + * Takes a pointer, returns true if a 48-bit multicast address + * (including broadcast, since it is all ones) + */ +#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) + + +/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */ +#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \ + (((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \ + (((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2])) + +#define ether_cmp(a, b) eacmp(a, b) + +/* copy an ethernet address - assumes the pointers can be referenced as shorts */ +#define eacopy(s, d) \ +do { \ + ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \ + ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \ + ((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \ +} while (0) + +#define ether_copy(s, d) eacopy(s, d) + +/* Copy an ethernet address in reverse order */ +#define ether_rcopy(s, d) \ +do { \ + ((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \ + ((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \ + ((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \ +} while (0) + +/* Copy 14B ethernet header: 32bit aligned source and destination. */ +#define ehcopy32(s, d) \ +do { \ + ((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \ + ((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \ + ((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \ + ((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \ +} while (0) + + +static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; +static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; +static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}}; + +#define ETHER_ISBCAST(ea) ((((const uint8 *)(ea))[0] & \ + ((const uint8 *)(ea))[1] & \ + ((const uint8 *)(ea))[2] & \ + ((const uint8 *)(ea))[3] & \ + ((const uint8 *)(ea))[4] & \ + ((const uint8 *)(ea))[5]) == 0xff) +#define ETHER_ISNULLADDR(ea) ((((const uint8 *)(ea))[0] | \ + ((const uint8 *)(ea))[1] | \ + ((const uint8 *)(ea))[2] | \ + ((const uint8 *)(ea))[3] | \ + ((const uint8 *)(ea))[4] | \ + ((const uint8 *)(ea))[5]) == 0) + +#define ETHER_ISNULLDEST(da) ((((const uint16 *)(da))[0] | \ + ((const uint16 *)(da))[1] | \ + ((const uint16 *)(da))[2]) == 0) +#define ETHER_ISNULLSRC(sa) ETHER_ISNULLDEST(sa) + +#define ETHER_MOVE_HDR(d, s) \ +do { \ + struct ether_header t; \ + t = *(struct ether_header *)(s); \ + *(struct ether_header *)(d) = t; \ +} while (0) + +#define ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0) + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _NET_ETHERNET_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/event_log_set.h b/drivers/net/wireless/bcmdhd/include/proto/event_log_set.h new file mode 100644 index 000000000000..910cbcf169af --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/event_log_set.h @@ -0,0 +1,45 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + */ + +#ifndef _EVENT_LOG_SET_H_ +#define _EVENT_LOG_SET_H_ + +/* Set a maximum number of sets here. It is not dynamic for + * efficiency of the EVENT_LOG calls. + */ +#define NUM_EVENT_LOG_SETS 8 + +/* Define new event log sets here */ +#define EVENT_LOG_SET_BUS 0 +#define EVENT_LOG_SET_WL 1 +#define EVENT_LOG_SET_PSM 2 +#define EVENT_LOG_SET_ERROR 3 +#define EVENT_LOG_SET_MEM_API 4 + +#endif /* _EVENT_LOG_SET_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/event_log_tag.h b/drivers/net/wireless/bcmdhd/include/proto/event_log_tag.h new file mode 100644 index 000000000000..25acbc7420e1 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/event_log_tag.h @@ -0,0 +1,157 @@ +/* + * EVENT_LOG system definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: event_log.h 241182 2011-02-17 21:50:03Z $ + */ + +#ifndef _EVENT_LOG_TAG_H_ +#define _EVENT_LOG_TAG_H_ + +#include + +/* Define new event log tags here */ +#define EVENT_LOG_TAG_NULL 0 /* Special null tag */ +#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */ +#define EVENT_LOG_TAG_BUS_OOB 2 +#define EVENT_LOG_TAG_BUS_STATE 3 +#define EVENT_LOG_TAG_BUS_PROTO 4 +#define EVENT_LOG_TAG_BUS_CTL 5 +#define EVENT_LOG_TAG_BUS_EVENT 6 +#define EVENT_LOG_TAG_BUS_PKT 7 +#define EVENT_LOG_TAG_BUS_FRAME 8 +#define EVENT_LOG_TAG_BUS_DESC 9 +#define EVENT_LOG_TAG_BUS_SETUP 10 +#define EVENT_LOG_TAG_BUS_MISC 11 +#define EVENT_LOG_TAG_SRSCAN 22 +#define EVENT_LOG_TAG_PWRSTATS_INFO 23 +#define EVENT_LOG_TAG_UCODE_WATCHDOG 26 +#define EVENT_LOG_TAG_UCODE_FIFO 27 +#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28 +#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29 +#define EVENT_LOG_TAG_SCAN_ERROR 30 +#define EVENT_LOG_TAG_SCAN_WARN 31 +#define EVENT_LOG_TAG_MPF_ERR 32 +#define EVENT_LOG_TAG_MPF_WARN 33 +#define EVENT_LOG_TAG_MPF_INFO 34 +#define EVENT_LOG_TAG_MPF_DEBUG 35 +#define EVENT_LOG_TAG_EVENT_INFO 36 +#define EVENT_LOG_TAG_EVENT_ERR 37 +#define EVENT_LOG_TAG_PWRSTATS_ERROR 38 +#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39 +#define EVENT_LOG_TAG_IOCTL_LOG 40 +#define EVENT_LOG_TAG_PFN_ERR 41 +#define EVENT_LOG_TAG_PFN_WARN 42 +#define EVENT_LOG_TAG_PFN_INFO 43 +#define EVENT_LOG_TAG_PFN_DEBUG 44 +#define EVENT_LOG_TAG_BEACON_LOG 45 +#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46 +#define EVENT_LOG_TAG_TRACE_CHANSW 47 +#define EVENT_LOG_TAG_PCI_ERROR 48 +#define EVENT_LOG_TAG_PCI_TRACE 49 +#define EVENT_LOG_TAG_PCI_WARN 50 +#define EVENT_LOG_TAG_PCI_INFO 51 +#define EVENT_LOG_TAG_PCI_DBG 52 +#define EVENT_LOG_TAG_PCI_DATA 53 +#define EVENT_LOG_TAG_PCI_RING 54 +#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55 +#define EVENT_LOG_TAG_WL_ERROR 56 +#define EVENT_LOG_TAG_PHY_ERROR 57 +#define EVENT_LOG_TAG_OTP_ERROR 58 +#define EVENT_LOG_TAG_NOTIF_ERROR 59 +#define EVENT_LOG_TAG_MPOOL_ERROR 60 +#define EVENT_LOG_TAG_OBJR_ERROR 61 +#define EVENT_LOG_TAG_DMA_ERROR 62 +#define EVENT_LOG_TAG_PMU_ERROR 63 +#define EVENT_LOG_TAG_BSROM_ERROR 64 +#define EVENT_LOG_TAG_SI_ERROR 65 +#define EVENT_LOG_TAG_ROM_PRINTF 66 +#define EVENT_LOG_TAG_RATE_CNT 67 +#define EVENT_LOG_TAG_CTL_MGT_CNT 68 +#define EVENT_LOG_TAG_AMPDU_DUMP 69 +#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70 +#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71 +#define EVENT_LOG_TAG_MEM_FREE 72 +#define EVENT_LOG_TAG_WL_ASSOC_LOG 73 +#define EVENT_LOG_TAG_WL_PS_LOG 74 +#define EVENT_LOG_TAG_WL_ROAM_LOG 75 +#define EVENT_LOG_TAG_WL_MPC_LOG 76 +#define EVENT_LOG_TAG_WL_WSEC_LOG 77 +#define EVENT_LOG_TAG_WL_WSEC_DUMP 78 +#define EVENT_LOG_TAG_WL_MCNX_LOG 79 +#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80 +#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81 +#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82 +#define EVENT_LOG_TAG_WL_COUNTERS 83 +#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84 +#define EVENT_LOG_TAG_WL_P2P_LOG 85 +#define EVENT_LOG_TAG_SDIO_ERROR 86 +#define EVENT_LOG_TAG_SDIO_TRACE 87 +#define EVENT_LOG_TAG_SDIO_DBG 88 +#define EVENT_LOG_TAG_SDIO_PRHDRS 89 +#define EVENT_LOG_TAG_SDIO_PRPKT 90 +#define EVENT_LOG_TAG_SDIO_INFORM 91 +#define EVENT_LOG_TAG_MIMO_PS_ERROR 92 +#define EVENT_LOG_TAG_MIMO_PS_TRACE 93 +#define EVENT_LOG_TAG_MIMO_PS_INFO 94 +#define EVENT_LOG_TAG_BTCX_STATS 95 +#define EVENT_LOG_TAG_LEAKY_AP_STATS 96 +#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97 +#define EVENT_LOG_TAG_MIMO_PS_STATS 98 +#define EVENT_LOG_TAG_PWRSTATS_PHY 99 +#define EVENT_LOG_TAG_PWRSTATS_SCAN 100 +#define EVENT_LOG_TAG_PWRSTATS_AWDL 101 +#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102 +#define EVENT_LOG_TAG_LQM 103 +#define EVENT_LOG_TAG_TRACE_WL_INFO 104 +#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105 +#define EVENT_LOG_TAG_MAX 105 /* Set to the same value of last tag, not last tag + 1 */ +/* Note: New event should be added/reserved in trunk before adding it to branches */ + + +#define SD_PRHDRS(i, s, h, p, n, l) +#define SD_PRPKT(m, b, n) +#define SD_INFORM(args) + +/* Flags for tag control */ +#define EVENT_LOG_TAG_FLAG_NONE 0 +#define EVENT_LOG_TAG_FLAG_LOG 0x80 +#define EVENT_LOG_TAG_FLAG_PRINT 0x40 +#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f + +/* Each event log entry has a type. The type is the LAST word of the + * event log. The printing code walks the event entries in reverse + * order to find the first entry. + */ +typedef union event_log_hdr { + struct { + uint8 tag; /* Event_log entry tag */ + uint8 count; /* Count of 4-byte entries */ + uint16 fmt_num; /* Format number */ + }; + uint32 t; /* Type cheat */ +} event_log_hdr_t; + +#endif /* _EVENT_LOG_TAG_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/p2p.h b/drivers/net/wireless/bcmdhd/include/proto/p2p.h new file mode 100644 index 000000000000..91f5147f54d0 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/p2p.h @@ -0,0 +1,713 @@ +/* + * Fundamental types and constants relating to WFA P2P (aka WiFi Direct) + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: p2p.h 536785 2015-02-24 08:35:00Z $ + */ + +#ifndef _P2P_H_ +#define _P2P_H_ + +#ifndef _TYPEDEFS_H_ +#include +#endif +#include +#include + +/* This marks the start of a packed structure section. */ +#include + + +/* WiFi P2P OUI values */ +#define P2P_OUI WFA_OUI /* WiFi P2P OUI */ +#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */ + +#define P2P_IE_ID 0xdd /* P2P IE element ID */ + +/* WiFi P2P IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie { + uint8 id; /* IE ID: 0xDD */ + uint8 len; /* IE length */ + uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */ + uint8 oui_type; /* Identifies P2P version: P2P_VER */ + uint8 subelts[1]; /* variable length subelements */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ie wifi_p2p_ie_t; + +#define P2P_IE_FIXED_LEN 6 + +#define P2P_ATTR_ID_OFF 0 +#define P2P_ATTR_LEN_OFF 1 +#define P2P_ATTR_DATA_OFF 3 + +#define P2P_ATTR_ID_LEN 1 /* ID filed length */ +#define P2P_ATTR_LEN_LEN 2 /* length field length */ +#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */ + +#define P2P_WFDS_HASH_LEN 6 +#define P2P_WFDS_MAX_SVC_NAME_LEN 32 + +/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */ +#define P2P_SEID_STATUS 0 /* Status */ +#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */ +#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */ +#define P2P_SEID_DEV_ID 3 /* P2P Device ID */ +#define P2P_SEID_INTENT 4 /* Group Owner Intent */ +#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */ +#define P2P_SEID_CHANNEL 6 /* Listen channel */ +#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */ +#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */ +#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */ +#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */ +#define P2P_SEID_CHAN_LIST 11 /* Channel List */ +#define P2P_SEID_ABSENCE 12 /* Notice of Absence */ +#define P2P_SEID_DEV_INFO 13 /* Device Info */ +#define P2P_SEID_GROUP_INFO 14 /* Group Info */ +#define P2P_SEID_GROUP_ID 15 /* Group ID */ +#define P2P_SEID_P2P_IF 16 /* P2P Interface */ +#define P2P_SEID_OP_CHANNEL 17 /* Operating Channel */ +#define P2P_SEID_INVITE_FLAGS 18 /* Invitation Flags */ +#define P2P_SEID_SERVICE_HASH 21 /* Service hash */ +#define P2P_SEID_SESSION 22 /* Session information */ +#define P2P_SEID_CONNECT_CAP 23 /* Connection capability */ +#define P2P_SEID_ADVERTISE_ID 24 /* Advertisement ID */ +#define P2P_SEID_ADVERTISE_SERVICE 25 /* Advertised service */ +#define P2P_SEID_SESSION_ID 26 /* Session ID */ +#define P2P_SEID_FEATURE_CAP 27 /* Feature capability */ +#define P2P_SEID_PERSISTENT_GROUP 28 /* Persistent group */ +#define P2P_SEID_SESSION_INFO_RESP 29 /* Session Information Response */ +#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */ + +#define P2P_SE_VS_ID_SERVICES 0x1b + + +/* WiFi P2P IE subelement: P2P Capability (capabilities info) */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 dev; /* Device Capability Bitmap */ + uint8 group; /* Group Capability Bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t; + +/* P2P Capability subelement's Device Capability Bitmap bit values */ +#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */ +#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */ +#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */ +#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */ +#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */ +#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */ + +/* P2P Capability subelement's Group Capability Bitmap bit values */ +#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */ +#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */ +#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */ +#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */ +#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */ +#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */ +#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */ + + +/* WiFi P2P IE subelement: Group Owner Intent */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTENT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t; + +/* WiFi P2P IE subelement: Configuration Timeout */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 go_tmo; /* GO config timeout in units of 10 ms */ + uint8 client_tmo; /* Client config timeout in units of 10 ms */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t; + +/* WiFi P2P IE subelement: Listen Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t; + +/* WiFi P2P IE subelement: P2P Group BSSID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GRP_BSSID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P group bssid */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t; + +/* WiFi P2P IE subelement: P2P Group ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_ID */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ssid[1]; /* ssid. device id. variable length */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t; + +/* WiFi P2P IE subelement: P2P Interface */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_IF */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P device address */ + uint8 ifaddrs; /* P2P Interface Address count */ + uint8 ifaddr[1][6]; /* P2P Interface Address list */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t; + +/* WiFi P2P IE subelement: Status */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 status; /* Status Code: P2P_STATSE_* */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t; + +/* Status subelement Status Code definitions */ +#define P2P_STATSE_SUCCESS 0 + /* Success */ +#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1 + /* Failed, information currently unavailable */ +#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL + /* Old name for above in P2P spec 1.08 and older */ +#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2 + /* Failed, incompatible parameters */ +#define P2P_STATSE_FAIL_LIMIT_REACHED 3 + /* Failed, limit reached */ +#define P2P_STATSE_FAIL_INVALID_PARAMS 4 + /* Failed, invalid parameters */ +#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5 + /* Failed, unable to accomodate request */ +#define P2P_STATSE_FAIL_PROTO_ERROR 6 + /* Failed, previous protocol error or disruptive behaviour */ +#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7 + /* Failed, no common channels */ +#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8 + /* Failed, unknown P2P Group */ +#define P2P_STATSE_FAIL_INTENT 9 + /* Failed, both peers indicated Intent 15 in GO Negotiation */ +#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10 + /* Failed, incompatible provisioning method */ +#define P2P_STATSE_FAIL_USER_REJECT 11 + /* Failed, rejected by user */ +#define P2P_STATSE_SUCCESS_USER_ACCEPT 12 + /* Success, accepted by user */ + +/* WiFi P2P IE attribute: Extended Listen Timing */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s { + uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */ + uint8 len[2]; /* length not including eltId, len fields */ + uint8 avail[2]; /* availibility period */ + uint8 interval[2]; /* availibility interval */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t; + +#define P2P_EXT_MIN 10 /* minimum 10ms */ + +/* WiFi P2P IE subelement: Intended P2P Interface Address */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* intended P2P interface MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t; + +/* WiFi P2P IE subelement: Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_STATUS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 band; /* Regulatory Class (band) */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t; + + +/* Channel Entry structure within the Channel List SE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s { + uint8 band; /* Regulatory Class (band) */ + uint8 num_channels; /* # of channels in the channel list */ + uint8 channels[WL_NUMCHANNELS]; /* Channel List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t; +#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2 + +/* WiFi P2P IE subelement: Channel List */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CHAN_LIST */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 num_entries; /* # of channel entries */ + wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES]; + /* Channel Entry List */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t; + +/* WiFi Primary Device Type structure */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s { + uint16 cat_id; /* Category ID */ + uint8 OUI[3]; /* WFA OUI: 0x0050F2 */ + uint8 oui_type; /* WPS_OUI_TYPE */ + uint16 sub_cat_id; /* Sub Category ID */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t; + +/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category + * maximum values for each category + */ +#define P2P_DISE_SUBCATEGORY_MINVAL 1 +#define P2P_DISE_CATEGORY_COMPUTER 1 +#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL 8 +#define P2P_DISE_CATEGORY_INPUT_DEVICE 2 +#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL 9 +#define P2P_DISE_CATEGORY_PRINTER 3 +#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL 5 +#define P2P_DISE_CATEGORY_CAMERA 4 +#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL 4 +#define P2P_DISE_CATEGORY_STORAGE 5 +#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL 1 +#define P2P_DISE_CATEGORY_NETWORK_INFRA 6 +#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL 4 +#define P2P_DISE_CATEGORY_DISPLAY 7 +#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL 4 +#define P2P_DISE_CATEGORY_MULTIMEDIA 8 +#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL 6 +#define P2P_DISE_CATEGORY_GAMING 9 +#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL 5 +#define P2P_DISE_CATEGORY_TELEPHONE 10 +#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL 5 +#define P2P_DISE_CATEGORY_AUDIO 11 +#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL 6 + +/* WiFi P2P IE's Device Info subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mac[6]; /* P2P Device MAC address */ + uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pri_devtype[8]; /* Primary Device Type */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t; + +#define P2P_DEV_TYPE_LEN 8 + +/* WiFi P2P IE's Group Info subelement Client Info Descriptor */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s { + uint8 len; + uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */ + uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */ + uint8 devcap; /* Device Capability */ + uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */ + uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */ + uint8 secdts; /* Number of Secondary Device Types */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t; + +/* WiFi P2P IE's Device ID subelement */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s { + uint8 eltId; + uint8 len[2]; + struct ether_addr addr; /* P2P Device MAC address */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t; + +/* WiFi P2P IE subelement: P2P Manageability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s { + uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 mg_bitmap; /* manageability bitmap */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t; +/* mg_bitmap field bit values */ +#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */ + +/* WiFi P2P IE subelement: Group Info */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s { + uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */ + uint8 len[2]; /* SE length not including eltId, len fields */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t; + +/* WiFi IE subelement: Operating Channel */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s { + uint8 eltId; /* SE ID: P2P_SEID_OP_CHANNEL */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 country[3]; /* Country String */ + uint8 op_class; /* Operating Class */ + uint8 channel; /* Channel */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t; + +/* WiFi IE subelement: INVITATION FLAGS */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s { + uint8 eltId; /* SE ID: P2P_SEID_INVITE_FLAGS */ + uint8 len[2]; /* SE length not including eltId, len fields */ + uint8 flags; /* Flags */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t; + +/* WiFi P2P IE subelement: Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SERVICE_HASH */ + uint8 len[2]; /* SE length not including eltId, len fields + * in multiple of 6 Bytes + */ + uint8 hash[1]; /* Variable length - SHA256 hash of + * service names (can be more than one hashes) + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t; + +/* WiFi P2P IE subelement: Service Instance Data */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 ssn_info[1]; /* Variable length - Session information as specified by + * the service layer, type matches serv. name + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t; + + +/* WiFi P2P IE subelement: Connection capability */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s { + uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */ + uint8 len[2]; /* SE length not including eltId, len */ + uint8 conn_cap; /* 1byte capability as specified by the + * service layer, valid bitmask/values + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t; + + +/* WiFi P2P IE subelement: Advertisement ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 advt_id[4]; /* 4byte Advertisement ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 advt_mac[6]; /* P2P device address of the service advertiser */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t; + + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s { + uint8 advt_id[4]; /* SE Advertise ID for the service */ + uint16 nw_cfg_method; /* SE Network Config method for the service */ + uint8 serv_name_len; /* SE length of the service name */ + uint8 serv_name[1]; /* Variable length service name field */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t; + + +/* WiFi P2P IE subelement: Advertise Service Hash */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s { + uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */ + uint8 len[2]; /* SE length not including eltId, len fields mutiple len of + * wifi_p2p_adv_serv_info_t entries + */ + wifi_p2p_adv_serv_info_t p_advt_serv_info[1]; /* Variable length + of multiple instances + of the advertise service info + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t; + + +/* WiFi P2P IE subelement: Session ID */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s { + uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */ + uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */ + uint8 ssn_id[4]; /* 4byte Session ID of the peer device sent in + * PROV Disc in Network byte order + */ + uint8 ssn_mac[6]; /* P2P device address of the seeker - session mac */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t; + + +#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */ +#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id + + * nw_config_method + serv_name_len + */ + +/* WiFi P2P Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame { + uint8 category; /* P2P_AF_CATEGORY */ + uint8 OUI[3]; /* OUI - P2P_OUI */ + uint8 type; /* OUI Type - P2P_VER */ + uint8 subtype; /* OUI Subtype - P2P_AF_* */ + uint8 dialog_token; /* nonzero, identifies req/resp tranaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t; +#define P2P_AF_CATEGORY 0x7f + +#define P2P_AF_FIXED_LEN 7 + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + + +/* WiFi P2P Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame { + uint8 category; /* P2P_PUB_AF_CATEGORY */ + uint8 action; /* P2P_PUB_AF_ACTION */ + uint8 oui[3]; /* P2P_OUI */ + uint8 oui_type; /* OUI type - P2P_VER */ + uint8 subtype; /* OUI subtype - P2P_TYPE_* */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 elts[1]; /* Variable length information elements. Max size = + * ACTION_FRAME_SIZE - sizeof(this structure) - 1 + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t; +#define P2P_PUB_AF_FIXED_LEN 8 +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */ +#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */ + +/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */ +#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ +#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP +#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF + +/* WiFi P2P IE subelement: Notice of Absence */ +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc { + uint8 cnt_type; /* Count/Type */ + uint32 duration; /* Duration */ + uint32 interval; /* Interval */ + uint32 start; /* Start Time */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t; + +BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se { + uint8 eltId; /* Subelement ID */ + uint8 len[2]; /* Length */ + uint8 index; /* Index */ + uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */ + wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t; + +#define P2P_NOA_SE_FIXED_LEN 5 + +#define P2P_NOA_SE_MAX_DESC 2 /* max NoA descriptors in presence request */ + +/* cnt_type field values */ +#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */ +#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */ +#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */ +#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */ + +/* ctw_ops_parms field values */ +#define P2P_NOA_CTW_MASK 0x7f +#define P2P_NOA_OPS_MASK 0x80 +#define P2P_NOA_OPS_SHIFT 7 + +#define P2P_CTW_MIN 10 /* minimum 10TU */ + +/* + * P2P Service Discovery related + */ +#define P2PSD_ACTION_CATEGORY 0x04 + /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a + /* Action value for GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b + /* Action value for GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c + /* Action value for GAS Comeback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d + /* Action value for GAS Comeback Response AF */ +#define P2PSD_AD_EID 0x6c + /* Advertisement Protocol IE ID */ +#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00 + /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */ +#define P2PSD_ADP_PROTO_ID 0x00 + /* Advertisement Protocol ID. Always 0 for P2P SD */ +#define P2PSD_GAS_OUI P2P_OUI + /* WFA OUI */ +#define P2PSD_GAS_OUI_SUBTYPE P2P_VER + /* OUI Subtype for GAS IE */ +#define P2PSD_GAS_NQP_INFOID 0xDDDD + /* NQP Query Info ID: 56797 */ +#define P2PSD_GAS_COMEBACKDEALY 0x00 + /* Not used in the Native GAS protocol */ + +/* Service Protocol Type */ +typedef enum p2psd_svc_protype { + SVC_RPOTYPE_ALL = 0, + SVC_RPOTYPE_BONJOUR = 1, + SVC_RPOTYPE_UPNP = 2, + SVC_RPOTYPE_WSD = 3, + SVC_RPOTYPE_WFDS = 11, + SVC_RPOTYPE_VENDOR = 255 +} p2psd_svc_protype_t; + +/* Service Discovery response status code */ +typedef enum { + P2PSD_RESP_STATUS_SUCCESS = 0, + P2PSD_RESP_STATUS_PROTYPE_NA = 1, + P2PSD_RESP_STATUS_DATA_NA = 2, + P2PSD_RESP_STATUS_BAD_REQUEST = 3 +} p2psd_resp_status_t; + +/* Advertisement Protocol IE tuple field */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl { + uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus + * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0 + */ + uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t; + +/* Advertisement Protocol IE */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie { + uint8 id; /* IE ID: 0x6c - 108 */ + uint8 len; /* IE length */ + wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one + * tuple is defined for P2P Service Discovery + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t; + +/* NQP Vendor-specific Content */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc { + uint8 oui_subtype; /* OUI Subtype: 0x09 */ + uint16 svc_updi; /* Service Update Indicator */ + uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request, + * wifi_p2psd_qresp_tlv_t type for service response + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t; + +/* Service Request TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t; + +/* Query Request Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Length of service request TLV, 5 plus the size of request data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t; + +/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame { + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qreq_len; /* Query Request Length */ + uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t; + +/* Service Response TLV */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv { + uint16 len; /* Length: 5 plus size of Query Data */ + uint8 svc_prot; /* Service Protocol Type */ + uint8 svc_tscid; /* Service Transaction ID */ + uint8 status; /* Value defined in Table 57 of P2P spec. */ + uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t; + +/* Query Response Frame, defined in generic format, instead of NQP specific */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame { + uint16 info_id; /* Info ID: 0xDDDD */ + uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */ + uint8 oui[3]; /* WFA OUI: 0x0050F2 */ + uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */ + +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t; + +/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t; + +/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame { + uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */ + uint8 fragment_id; /* Fragmentation ID */ + uint16 cb_delay; /* GAS Comeback Delay */ + wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */ + uint16 qresp_len; /* Query Response Length */ + uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t; + +/* Wi-Fi GAS Public Action Frame */ +BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame { + uint8 category; /* 0x04 Public Action Frame */ + uint8 action; /* 0x6c Advertisement Protocol */ + uint8 dialog_token; /* nonzero, identifies req/rsp transaction */ + uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t + * or wifi_p2psd_gas_iresp_frame_t format + */ +} BWL_POST_PACKED_STRUCT; +typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t; + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _P2P_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/sdspi.h b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h new file mode 100644 index 000000000000..a1d7ac937cf3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/sdspi.h @@ -0,0 +1,78 @@ +/* + * SD-SPI Protocol Standard + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdspi.h 518342 2014-12-01 23:21:41Z $ + */ +#ifndef _SD_SPI_H +#define _SD_SPI_H + +#define SPI_START_M BITFIELD_MASK(1) /* Bit [31] - Start Bit */ +#define SPI_START_S 31 +#define SPI_DIR_M BITFIELD_MASK(1) /* Bit [30] - Direction */ +#define SPI_DIR_S 30 +#define SPI_CMD_INDEX_M BITFIELD_MASK(6) /* Bits [29:24] - Command number */ +#define SPI_CMD_INDEX_S 24 +#define SPI_RW_M BITFIELD_MASK(1) /* Bit [23] - Read=0, Write=1 */ +#define SPI_RW_S 23 +#define SPI_FUNC_M BITFIELD_MASK(3) /* Bits [22:20] - Function Number */ +#define SPI_FUNC_S 20 +#define SPI_RAW_M BITFIELD_MASK(1) /* Bit [19] - Read After Wr */ +#define SPI_RAW_S 19 +#define SPI_STUFF_M BITFIELD_MASK(1) /* Bit [18] - Stuff bit */ +#define SPI_STUFF_S 18 +#define SPI_BLKMODE_M BITFIELD_MASK(1) /* Bit [19] - Blockmode 1=blk */ +#define SPI_BLKMODE_S 19 +#define SPI_OPCODE_M BITFIELD_MASK(1) /* Bit [18] - OP Code */ +#define SPI_OPCODE_S 18 +#define SPI_ADDR_M BITFIELD_MASK(17) /* Bits [17:1] - Address */ +#define SPI_ADDR_S 1 +#define SPI_STUFF0_M BITFIELD_MASK(1) /* Bit [0] - Stuff bit */ +#define SPI_STUFF0_S 0 + +#define SPI_RSP_START_M BITFIELD_MASK(1) /* Bit [7] - Start Bit (always 0) */ +#define SPI_RSP_START_S 7 +#define SPI_RSP_PARAM_ERR_M BITFIELD_MASK(1) /* Bit [6] - Parameter Error */ +#define SPI_RSP_PARAM_ERR_S 6 +#define SPI_RSP_RFU5_M BITFIELD_MASK(1) /* Bit [5] - RFU (Always 0) */ +#define SPI_RSP_RFU5_S 5 +#define SPI_RSP_FUNC_ERR_M BITFIELD_MASK(1) /* Bit [4] - Function number error */ +#define SPI_RSP_FUNC_ERR_S 4 +#define SPI_RSP_CRC_ERR_M BITFIELD_MASK(1) /* Bit [3] - COM CRC Error */ +#define SPI_RSP_CRC_ERR_S 3 +#define SPI_RSP_ILL_CMD_M BITFIELD_MASK(1) /* Bit [2] - Illegal Command error */ +#define SPI_RSP_ILL_CMD_S 2 +#define SPI_RSP_RFU1_M BITFIELD_MASK(1) /* Bit [1] - RFU (Always 0) */ +#define SPI_RSP_RFU1_S 1 +#define SPI_RSP_IDLE_M BITFIELD_MASK(1) /* Bit [0] - In idle state */ +#define SPI_RSP_IDLE_S 0 + +/* SD-SPI Protocol Definitions */ +#define SDSPI_COMMAND_LEN 6 /* Number of bytes in an SD command */ +#define SDSPI_START_BLOCK 0xFE /* SD Start Block Token */ +#define SDSPI_IDLE_PAD 0xFF /* SD-SPI idle value for MOSI */ +#define SDSPI_START_BIT_MASK 0x80 + +#endif /* _SD_SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/vlan.h b/drivers/net/wireless/bcmdhd/include/proto/vlan.h new file mode 100644 index 000000000000..77b1458b3683 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/vlan.h @@ -0,0 +1,98 @@ +/* + * 802.1Q VLAN protocol definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: vlan.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _vlan_h_ +#define _vlan_h_ + +#ifndef _TYPEDEFS_H_ +#include +#endif + +/* This marks the start of a packed structure section. */ +#include + +#ifndef VLAN_VID_MASK +#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */ +#endif + +#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */ +#define VLAN_PRI_SHIFT 13 /* user priority */ + +#define VLAN_PRI_MASK 7 /* 3 bits of priority */ + +#define VLAN_TPID_OFFSET 12 /* offset of tag protocol id field */ +#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */ + +#define VLAN_TAG_LEN 4 +#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */ + +#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */ + +struct vlan_header { + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ +}; + +struct ethervlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; + uint8 ether_shost[ETHER_ADDR_LEN]; + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; +}; + +struct dot3_mac_llc_snapvlan_header { + uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */ + uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */ + uint16 length; /* frame length incl header */ + uint8 dsap; /* always 0xAA */ + uint8 ssap; /* always 0xAA */ + uint8 ctl; /* always 0x03 */ + uint8 oui[3]; /* RFC1042: 0x00 0x00 0x00 + * Bridge-Tunnel: 0x00 0x00 0xF8 + */ + uint16 vlan_type; /* 0x8100 */ + uint16 vlan_tag; /* priority, cfi and vid */ + uint16 ether_type; /* ethertype */ +}; + +#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) + + +/* This marks the end of a packed structure section. */ +#include + +#define ETHERVLAN_MOVE_HDR(d, s) \ +do { \ + struct ethervlan_header t; \ + t = *(struct ethervlan_header *)(s); \ + *(struct ethervlan_header *)(d) = t; \ +} while (0) + +#endif /* _vlan_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/wpa.h b/drivers/net/wireless/bcmdhd/include/proto/wpa.h new file mode 100644 index 000000000000..ef5d664dabee --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/wpa.h @@ -0,0 +1,182 @@ +/* + * Fundamental types and constants relating to WPA + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wpa.h 518342 2014-12-01 23:21:41Z $ + */ + +#ifndef _proto_wpa_h_ +#define _proto_wpa_h_ + +#include +#include + + +/* This marks the start of a packed structure section. */ +#include + +/* Reason Codes */ + +/* 13 through 23 taken from IEEE Std 802.11i-2004 */ +#define DOT11_RC_INVALID_WPA_IE 13 /* Invalid info. element */ +#define DOT11_RC_MIC_FAILURE 14 /* Michael failure */ +#define DOT11_RC_4WH_TIMEOUT 15 /* 4-way handshake timeout */ +#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 /* Group key update timeout */ +#define DOT11_RC_WPA_IE_MISMATCH 17 /* WPA IE in 4-way handshake differs from + * (re-)assoc. request/probe response + */ +#define DOT11_RC_INVALID_MC_CIPHER 18 /* Invalid multicast cipher */ +#define DOT11_RC_INVALID_UC_CIPHER 19 /* Invalid unicast cipher */ +#define DOT11_RC_INVALID_AKMP 20 /* Invalid authenticated key management protocol */ +#define DOT11_RC_BAD_WPA_VERSION 21 /* Unsupported WPA version */ +#define DOT11_RC_INVALID_WPA_CAP 22 /* Invalid WPA IE capabilities */ +#define DOT11_RC_8021X_AUTH_FAIL 23 /* 802.1X authentication failure */ + +#define WPA2_PMKID_LEN 16 + +/* WPA IE fixed portion */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 tag; /* TAG */ + uint8 length; /* TAG length */ + uint8 oui[3]; /* IE OUI */ + uint8 oui_type; /* OUI type */ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; /* IE version */ +} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t; +#define WPA_IE_OUITYPE_LEN 4 +#define WPA_IE_FIXED_LEN 8 +#define WPA_IE_TAG_FIXED_LEN 6 + +#define BIP_OUI_TYPE WPA2_OUI "\x06" + +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 tag; /* TAG */ + uint8 length; /* TAG length */ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT version; /* IE version */ +} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t; +#define WPA_RSN_IE_FIXED_LEN 4 +#define WPA_RSN_IE_TAG_FIXED_LEN 2 +typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN]; + +#define WFA_OSEN_IE_FIXED_LEN 6 + +/* WPA suite/multicast suite */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + uint8 oui[3]; + uint8 type; +} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t; +#define WPA_SUITE_LEN 4 + +/* WPA unicast suite list/key management suite list */ +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_suite_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t; +#define WPA_IE_SUITE_COUNT_LEN 2 +typedef BWL_PRE_PACKED_STRUCT struct +{ + BWL_PRE_PACKED_STRUCT struct { + uint8 low; + uint8 high; + } BWL_POST_PACKED_STRUCT count; + wpa_pmkid_t list[1]; +} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t; + +/* WPA cipher suites */ +#define WPA_CIPHER_NONE 0 /* None */ +#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */ +#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */ +#define WPA_CIPHER_AES_OCB 3 /* AES (OCB) */ +#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */ +#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */ +#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */ +#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */ + + +#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ + (cipher) == WPA_CIPHER_WEP_40 || \ + (cipher) == WPA_CIPHER_WEP_104 || \ + (cipher) == WPA_CIPHER_TKIP || \ + (cipher) == WPA_CIPHER_AES_OCB || \ + (cipher) == WPA_CIPHER_AES_CCM || \ + (cipher) == WPA_CIPHER_TPK) + + +/* WPA TKIP countermeasures parameters */ +#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */ +#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */ + +/* RSN IE defines */ +#define RSN_CAP_LEN 2 /* Length of RSN capabilities field (2 octets) */ + +/* RSN Capabilities defined in 802.11i */ +#define RSN_CAP_PREAUTH 0x0001 +#define RSN_CAP_NOPAIRWISE 0x0002 +#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C +#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2 +#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030 +#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4 +#define RSN_CAP_1_REPLAY_CNTR 0 +#define RSN_CAP_2_REPLAY_CNTRS 1 +#define RSN_CAP_4_REPLAY_CNTRS 2 +#define RSN_CAP_16_REPLAY_CNTRS 3 +#define RSN_CAP_MFPR 0x0040 +#define RSN_CAP_MFPC 0x0080 +#define RSN_CAP_SPPC 0x0400 +#define RSN_CAP_SPPR 0x0800 + +/* WPA capabilities defined in 802.11i */ +#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS +#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS +#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT +#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK + +/* WPA capabilities defined in 802.11zD9.0 */ +#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) /* bit 9 */ + +/* WPA Specific defines */ +#define WPA_CAP_LEN RSN_CAP_LEN /* Length of RSN capabilities in RSN IE (2 octets) */ +#define WPA_PMKID_CNT_LEN 2 /* Length of RSN PMKID count (2 octests) */ + +#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH + +#define WPA2_PMKID_COUNT_LEN 2 + + +/* This marks the end of a packed structure section. */ +#include + +#endif /* _proto_wpa_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/proto/wps.h b/drivers/net/wireless/bcmdhd/include/proto/wps.h new file mode 100644 index 000000000000..495d7f181fd3 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/proto/wps.h @@ -0,0 +1,389 @@ +/* + * WPS IE definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id$ + */ + +#ifndef _WPS_ +#define _WPS_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Data Element Definitions */ +#define WPS_ID_AP_CHANNEL 0x1001 +#define WPS_ID_ASSOC_STATE 0x1002 +#define WPS_ID_AUTH_TYPE 0x1003 +#define WPS_ID_AUTH_TYPE_FLAGS 0x1004 +#define WPS_ID_AUTHENTICATOR 0x1005 +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_CONFIG_ERROR 0x1009 +#define WPS_ID_CONF_URL4 0x100A +#define WPS_ID_CONF_URL6 0x100B +#define WPS_ID_CONN_TYPE 0x100C +#define WPS_ID_CONN_TYPE_FLAGS 0x100D +#define WPS_ID_CREDENTIAL 0x100E +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_E_HASH1 0x1014 +#define WPS_ID_E_HASH2 0x1015 +#define WPS_ID_E_SNONCE1 0x1016 +#define WPS_ID_E_SNONCE2 0x1017 +#define WPS_ID_ENCR_SETTINGS 0x1018 +#define WPS_ID_ENCR_TYPE 0x100F +#define WPS_ID_ENCR_TYPE_FLAGS 0x1010 +#define WPS_ID_ENROLLEE_NONCE 0x101A +#define WPS_ID_FEATURE_ID 0x101B +#define WPS_ID_IDENTITY 0x101C +#define WPS_ID_IDENTITY_PROOF 0x101D +#define WPS_ID_KEY_WRAP_AUTH 0x101E +#define WPS_ID_KEY_IDENTIFIER 0x101F +#define WPS_ID_MAC_ADDR 0x1020 +#define WPS_ID_MANUFACTURER 0x1021 +#define WPS_ID_MSG_TYPE 0x1022 +#define WPS_ID_MODEL_NAME 0x1023 +#define WPS_ID_MODEL_NUMBER 0x1024 +#define WPS_ID_NW_INDEX 0x1026 +#define WPS_ID_NW_KEY 0x1027 +#define WPS_ID_NW_KEY_INDEX 0x1028 +#define WPS_ID_NEW_DEVICE_NAME 0x1029 +#define WPS_ID_NEW_PWD 0x102A +#define WPS_ID_OOB_DEV_PWD 0x102C +#define WPS_ID_OS_VERSION 0x102D +#define WPS_ID_POWER_LEVEL 0x102F +#define WPS_ID_PSK_CURRENT 0x1030 +#define WPS_ID_PSK_MAX 0x1031 +#define WPS_ID_PUBLIC_KEY 0x1032 +#define WPS_ID_RADIO_ENABLED 0x1033 +#define WPS_ID_REBOOT 0x1034 +#define WPS_ID_REGISTRAR_CURRENT 0x1035 +#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036 +#define WPS_ID_REGISTRAR_LIST 0x1037 +#define WPS_ID_REGISTRAR_MAX 0x1038 +#define WPS_ID_REGISTRAR_NONCE 0x1039 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_RESP_TYPE 0x103B +#define WPS_ID_RF_BAND 0x103C +#define WPS_ID_R_HASH1 0x103D +#define WPS_ID_R_HASH2 0x103E +#define WPS_ID_R_SNONCE1 0x103F +#define WPS_ID_R_SNONCE2 0x1040 +#define WPS_ID_SEL_REGISTRAR 0x1041 +#define WPS_ID_SERIAL_NUM 0x1042 +#define WPS_ID_SC_STATE 0x1044 +#define WPS_ID_SSID 0x1045 +#define WPS_ID_TOT_NETWORKS 0x1046 +#define WPS_ID_UUID_E 0x1047 +#define WPS_ID_UUID_R 0x1048 +#define WPS_ID_VENDOR_EXT 0x1049 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_X509_CERT_REQ 0x104B +#define WPS_ID_X509_CERT 0x104C +#define WPS_ID_EAP_IDENTITY 0x104D +#define WPS_ID_MSG_COUNTER 0x104E +#define WPS_ID_PUBKEY_HASH 0x104F +#define WPS_ID_REKEY_KEY 0x1050 +#define WPS_ID_KEY_LIFETIME 0x1051 +#define WPS_ID_PERM_CFG_METHODS 0x1052 +#define WPS_ID_SEL_REG_CFG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 +#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055 +#define WPS_ID_PORTABLE_DEVICE 0x1056 +#define WPS_ID_AP_SETUP_LOCKED 0x1057 +#define WPS_ID_APP_LIST 0x1058 +#define WPS_ID_EAP_TYPE 0x1059 +#define WPS_ID_INIT_VECTOR 0x1060 +#define WPS_ID_KEY_PROVIDED_AUTO 0x1061 +#define WPS_ID_8021X_ENABLED 0x1062 +#define WPS_ID_WEP_TRANSMIT_KEY 0x1064 +#define WPS_ID_REQ_DEV_TYPE 0x106A + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WFA_VENDOR_EXT_ID "\x00\x37\x2A" +#define WPS_WFA_SUBID_VERSION2 0x00 +#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02 +#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04 +#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05 + + +/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */ +#define MS_VENDOR_EXT_ID "\x00\x01\x37" +#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */ +#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */ + +/* Vertical Pairing Identifier TLV Definitions */ +#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */ +#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */ +#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */ +#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */ +#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested. + * Not supported in Windows 7 + */ +#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */ + +/* sizes of the fixed size elements */ +#define WPS_ID_AP_CHANNEL_S 2 +#define WPS_ID_ASSOC_STATE_S 2 +#define WPS_ID_AUTH_TYPE_S 2 +#define WPS_ID_AUTH_TYPE_FLAGS_S 2 +#define WPS_ID_AUTHENTICATOR_S 8 +#define WPS_ID_CONFIG_METHODS_S 2 +#define WPS_ID_CONFIG_ERROR_S 2 +#define WPS_ID_CONN_TYPE_S 1 +#define WPS_ID_CONN_TYPE_FLAGS_S 1 +#define WPS_ID_DEVICE_PWD_ID_S 2 +#define WPS_ID_ENCR_TYPE_S 2 +#define WPS_ID_ENCR_TYPE_FLAGS_S 2 +#define WPS_ID_FEATURE_ID_S 4 +#define WPS_ID_MAC_ADDR_S 6 +#define WPS_ID_MSG_TYPE_S 1 +#define WPS_ID_SC_STATE_S 1 +#define WPS_ID_RF_BAND_S 1 +#define WPS_ID_OS_VERSION_S 4 +#define WPS_ID_VERSION_S 1 +#define WPS_ID_SEL_REGISTRAR_S 1 +#define WPS_ID_SEL_REG_CFG_METHODS_S 2 +#define WPS_ID_REQ_TYPE_S 1 +#define WPS_ID_RESP_TYPE_S 1 +#define WPS_ID_AP_SETUP_LOCKED_S 1 + +/* WSC 2.0, WFA Vendor Extension Subelements */ +#define WPS_WFA_SUBID_VERSION2_S 1 +#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1 +#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1 +#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1 +#define WPS_WFA_SUBID_REG_CFG_METHODS_S 2 + +/* Association states */ +#define WPS_ASSOC_NOT_ASSOCIATED 0 +#define WPS_ASSOC_CONN_SUCCESS 1 +#define WPS_ASSOC_CONFIG_FAIL 2 +#define WPS_ASSOC_ASSOC_FAIL 3 +#define WPS_ASSOC_IP_FAIL 4 + +/* Authentication types */ +#define WPS_AUTHTYPE_OPEN 0x0001 +#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */ +#define WPS_AUTHTYPE_WPA2 0x0010 +#define WPS_AUTHTYPE_WPA2PSK 0x0020 + +/* Config methods */ +#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_CONFMET_LABEL 0x0004 +#define WPS_CONFMET_DISPLAY 0x0008 +#define WPS_CONFMET_EXT_NFC_TOK 0x0010 +#define WPS_CONFMET_INT_NFC_TOK 0x0020 +#define WPS_CONFMET_NFC_INTF 0x0040 +#define WPS_CONFMET_PBC 0x0080 +#define WPS_CONFMET_KEYPAD 0x0100 +/* WSC 2.0 */ +#define WPS_CONFMET_VIRT_PBC 0x0280 +#define WPS_CONFMET_PHY_PBC 0x0480 +#define WPS_CONFMET_VIRT_DISPLAY 0x2008 +#define WPS_CONFMET_PHY_DISPLAY 0x4008 + +/* WPS error messages */ +#define WPS_ERROR_NO_ERROR 0 +#define WPS_ERROR_OOB_INT_READ_ERR 1 +#define WPS_ERROR_DECRYPT_CRC_FAIL 2 +#define WPS_ERROR_CHAN24_NOT_SUPP 3 +#define WPS_ERROR_CHAN50_NOT_SUPP 4 +#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_FAIL_CONN_REGISTRAR 11 +#define WPS_ERROR_MULTI_PBC_DETECTED 12 +#define WPS_ERROR_ROGUE_SUSPECTED 13 +#define WPS_ERROR_DEVICE_BUSY 14 +#define WPS_ERROR_SETUP_LOCKED 15 +#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */ +#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18 +#define WPS_ERROR_60GHZ_NOT_SUPPORT 19 +#define WPS_ERROR_PKH_MISMATCH 20 /* Public Key Hash Mismatch */ + +/* Connection types */ +#define WPS_CONNTYPE_ESS 0x01 +#define WPS_CONNTYPE_IBSS 0x02 + +/* Device password ID */ +#define WPS_DEVICEPWDID_DEFAULT 0x0000 +#define WPS_DEVICEPWDID_USER_SPEC 0x0001 +#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002 +#define WPS_DEVICEPWDID_REKEY 0x0003 +#define WPS_DEVICEPWDID_PUSH_BTN 0x0004 +#define WPS_DEVICEPWDID_REG_SPEC 0x0005 +#define WPS_DEVICEPWDID_IBSS 0x0006 +#define WPS_DEVICEPWDID_NFC_CHO 0x0007 /* NFC-Connection-Handover */ +#define WPS_DEVICEPWDID_WFDS 0x0008 /* Wi-Fi Direct Services Specification */ + +/* Encryption type */ +#define WPS_ENCRTYPE_NONE 0x0001 +#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */ +#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only + * be advertised on the AP when Mixed Mode + * is enabled (Encryption Type is 0x000c). + */ +#define WPS_ENCRTYPE_AES 0x0008 + + +/* WPS Message Types */ +#define WPS_ID_BEACON 0x01 +#define WPS_ID_PROBE_REQ 0x02 +#define WPS_ID_PROBE_RESP 0x03 +#define WPS_ID_MESSAGE_M1 0x04 +#define WPS_ID_MESSAGE_M2 0x05 +#define WPS_ID_MESSAGE_M2D 0x06 +#define WPS_ID_MESSAGE_M3 0x07 +#define WPS_ID_MESSAGE_M4 0x08 +#define WPS_ID_MESSAGE_M5 0x09 +#define WPS_ID_MESSAGE_M6 0x0A +#define WPS_ID_MESSAGE_M7 0x0B +#define WPS_ID_MESSAGE_M8 0x0C +#define WPS_ID_MESSAGE_ACK 0x0D +#define WPS_ID_MESSAGE_NACK 0x0E +#define WPS_ID_MESSAGE_DONE 0x0F + +/* WSP private ID for local use */ +#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1) +#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2) +#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3) +#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4) +#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5) +#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6) + + +/* Device Type categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_CAT_COMPUTER 1 +#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2 +#define WPS_DEVICE_TYPE_CAT_PRINTER 3 +#define WPS_DEVICE_TYPE_CAT_CAMERA 4 +#define WPS_DEVICE_TYPE_CAT_STORAGE 5 +#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6 +#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7 +#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8 +#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9 +#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10 +#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */ + +/* Device Type sub categories for primary and secondary device types */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1 +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3 +#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3 +#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3 +#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3 +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1 +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */ +#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */ + + +/* Device request/response type */ +#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00 +#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01 +#define WPS_MSGTYPE_REGISTRAR 0x02 +#define WPS_MSGTYPE_AP_WLAN_MGR 0x03 + +/* RF Band */ +#define WPS_RFBAND_24GHZ 0x01 +#define WPS_RFBAND_50GHZ 0x02 + +/* Simple Config state */ +#define WPS_SCSTATE_UNCONFIGURED 0x01 +#define WPS_SCSTATE_CONFIGURED 0x02 +#define WPS_SCSTATE_OFF 11 + +/* WPS Vendor extension key */ +#define WPS_OUI_HEADER_LEN 2 +#define WPS_OUI_HEADER_SIZE 4 +#define WPS_OUI_FIXED_HEADER_OFF 16 +#define WPS_WFA_SUBID_V2_OFF 3 +#define WPS_WFA_V2_OFF 5 + +#ifdef __cplusplus +} +#endif + +#endif /* _WPS_ */ diff --git a/drivers/net/wireless/bcmdhd/include/rte_ioctl.h b/drivers/net/wireless/bcmdhd/include/rte_ioctl.h new file mode 100644 index 000000000000..9c214ae704ac --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/rte_ioctl.h @@ -0,0 +1,85 @@ +/* + * HND Run Time Environment ioctl. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: rte_ioctl.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _rte_ioctl_h_ +#define _rte_ioctl_h_ + +/* RTE IOCTL definitions for generic ether devices */ +#define RTEGHWADDR 0x8901 +#define RTESHWADDR 0x8902 +#define RTEGMTU 0x8903 +#define RTEGSTATS 0x8904 +#define RTEGALLMULTI 0x8905 +#define RTESALLMULTI 0x8906 +#define RTEGPROMISC 0x8907 +#define RTESPROMISC 0x8908 +#define RTESMULTILIST 0x8909 +#define RTEGUP 0x890A +#define RTEGPERMADDR 0x890B +#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */ +#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */ + +#define RTE_IOCTL_QUERY 0x00 +#define RTE_IOCTL_SET 0x01 +#define RTE_IOCTL_OVL_IDX_MASK 0x1e +#define RTE_IOCTL_OVL_RSV 0x20 +#define RTE_IOCTL_OVL 0x40 +#define RTE_IOCTL_OVL_IDX_SHIFT 1 + +enum hnd_ioctl_cmd { + HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */ + + /* PCIEDEV specific wl <--> bus ioctls */ + BUS_GET_VAR = 2, + BUS_SET_VAR = 3, + BUS_FLUSH_RXREORDER_Q = 4, + BUS_SET_LTR_STATE = 5, + BUS_FLUSH_CHAINED_PKTS = 6, + BUS_SET_COPY_COUNT = 7 +}; + +#define SDPCMDEV_SET_MAXTXPKTGLOM 1 + +typedef struct memuse_info { + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + uint32 tot; /* Total memory */ + uint32 text_len; /* Size of Text segment memory */ + uint32 data_len; /* Size of Data segment memory */ + uint32 bss_len; /* Size of BSS segment memory */ + + uint32 arena_size; /* Total Heap size */ + uint32 arena_free; /* Heap memory available or free */ + uint32 inuse_size; /* Heap memory currently in use */ + uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */ + uint32 inuse_overhead; /* tally of allocated mem_t blocks */ + uint32 inuse_total; /* Heap in-use + Heap overhead memory */ +} memuse_info_t; + +#endif /* _rte_ioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h new file mode 100644 index 000000000000..0f8c52e1c220 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h @@ -0,0 +1,3761 @@ +/* + * SiliconBackplane Chipcommon core hardware definitions. + * + * The chipcommon core provides chip identification, SB control, + * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, + * GPIO interface, extbus, and support for serial and parallel flashes. + * + * $Id: sbchipc.h 574579 2015-07-27 15:36:37Z $ + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + */ + +#ifndef _SBCHIPC_H +#define _SBCHIPC_H + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/** + * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the + * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to + * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead + * be assigned their respective chipc-specific address space and connected to the Always On + * Backplane via the APB interface. + */ +typedef volatile struct { + uint32 PAD[384]; + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 PAD; + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 PAD[3]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 PAD[20]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 PAD[8]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 PAD[15]; + uint32 pmuintctrl0; /* 0x780 */ +} pmuregs_t; + +typedef struct eci_prerev35 { + uint32 eci_output; + uint32 eci_control; + uint32 eci_inputlo; + uint32 eci_inputmi; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolaritymi; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskmi; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventmi; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskmi; + uint32 eci_eventmaskhi; + uint32 PAD[3]; +} eci_prerev35_t; + +typedef struct eci_rev35 { + uint32 eci_outputlo; + uint32 eci_outputhi; + uint32 eci_controllo; + uint32 eci_controlhi; + uint32 eci_inputlo; + uint32 eci_inputhi; + uint32 eci_inputintpolaritylo; + uint32 eci_inputintpolarityhi; + uint32 eci_intmasklo; + uint32 eci_intmaskhi; + uint32 eci_eventlo; + uint32 eci_eventhi; + uint32 eci_eventmasklo; + uint32 eci_eventmaskhi; + uint32 eci_auxtx; + uint32 eci_auxrx; + uint32 eci_datatag; + uint32 eci_uartescvalue; + uint32 eci_autobaudctr; + uint32 eci_uartfifolevel; +} eci_rev35_t; + +typedef struct flash_config { + uint32 PAD[19]; + /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */ + uint32 flashstrconfig; +} flash_config_t; + +typedef volatile struct { + uint32 chipid; /* 0x0 */ + uint32 capabilities; + uint32 corecontrol; /* corerev >= 1 */ + uint32 bist; + + /* OTP */ + uint32 otpstatus; /* 0x10, corerev >= 10 */ + uint32 otpcontrol; + uint32 otpprog; + uint32 otplayout; /* corerev >= 23 */ + + /* Interrupt control */ + uint32 intstatus; /* 0x20 */ + uint32 intmask; + + /* Chip specific regs */ + uint32 chipcontrol; /* 0x28, rev >= 11 */ + uint32 chipstatus; /* 0x2c, rev >= 11 */ + + /* Jtag Master */ + uint32 jtagcmd; /* 0x30, rev >= 10 */ + uint32 jtagir; + uint32 jtagdr; + uint32 jtagctrl; + + /* serial flash interface registers */ + uint32 flashcontrol; /* 0x40 */ + uint32 flashaddress; + uint32 flashdata; + uint32 otplayoutextension; /* rev >= 35 */ + + /* Silicon backplane configuration broadcast control */ + uint32 broadcastaddress; /* 0x50 */ + uint32 broadcastdata; + + /* gpio - cleared only by power-on-reset */ + uint32 gpiopullup; /* 0x58, corerev >= 20 */ + uint32 gpiopulldown; /* 0x5c, corerev >= 20 */ + uint32 gpioin; /* 0x60 */ + uint32 gpioout; /* 0x64 */ + uint32 gpioouten; /* 0x68 */ + uint32 gpiocontrol; /* 0x6C */ + uint32 gpiointpolarity; /* 0x70 */ + uint32 gpiointmask; /* 0x74 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioevent; + uint32 gpioeventintmask; + + /* Watchdog timer */ + uint32 watchdog; /* 0x80 */ + + /* GPIO events corerev >= 11 */ + uint32 gpioeventintpolarity; + + /* GPIO based LED powersave registers corerev >= 16 */ + uint32 gpiotimerval; /* 0x88 */ + uint32 gpiotimeroutmask; + + /* clock control */ + uint32 clockcontrol_n; /* 0x90 */ + uint32 clockcontrol_sb; /* aka m0 */ + uint32 clockcontrol_pci; /* aka m1 */ + uint32 clockcontrol_m2; /* mii/uart/mipsref */ + uint32 clockcontrol_m3; /* cpu */ + uint32 clkdiv; /* corerev >= 3 */ + uint32 gpiodebugsel; /* corerev >= 28 */ + uint32 capabilities_ext; /* 0xac */ + + /* pll delay registers (corerev >= 4) */ + uint32 pll_on_delay; /* 0xb0 */ + uint32 fref_sel_delay; + uint32 slow_clk_ctl; /* 5 < corerev < 10 */ + uint32 PAD; + + /* Instaclock registers (corerev >= 10) */ + uint32 system_clk_ctl; /* 0xc0 */ + uint32 clkstatestretch; + uint32 PAD[2]; + + /* Indirect backplane access (corerev >= 22) */ + uint32 bp_addrlow; /* 0xd0 */ + uint32 bp_addrhigh; + uint32 bp_data; + uint32 PAD; + uint32 bp_indaccess; + /* SPI registers, corerev >= 37 */ + uint32 gsioctrl; + uint32 gsioaddress; + uint32 gsiodata; + + /* More clock dividers (corerev >= 32) */ + uint32 clkdiv2; + /* FAB ID (corerev >= 40) */ + uint32 otpcontrol1; + uint32 fabid; /* 0xf8 */ + + /* In AI chips, pointer to erom */ + uint32 eromptr; /* 0xfc */ + + /* ExtBus control registers (corerev >= 3) */ + uint32 pcmcia_config; /* 0x100 */ + uint32 pcmcia_memwait; + uint32 pcmcia_attrwait; + uint32 pcmcia_iowait; + uint32 ide_config; + uint32 ide_memwait; + uint32 ide_attrwait; + uint32 ide_iowait; + uint32 prog_config; + uint32 prog_waitcount; + uint32 flash_config; + uint32 flash_waitcount; + uint32 SECI_config; /* 0x130 SECI configuration */ + uint32 SECI_status; + uint32 SECI_statusmask; + uint32 SECI_rxnibchanged; + + uint32 PAD[20]; + + /* SROM interface (corerev >= 32) */ + uint32 sromcontrol; /* 0x190 */ + uint32 sromaddress; + uint32 sromdata; + uint32 PAD[1]; /* 0x19C */ + /* NAND flash registers for BCM4706 (corerev = 31) */ + uint32 nflashctrl; /* 0x1a0 */ + uint32 nflashconf; + uint32 nflashcoladdr; + uint32 nflashrowaddr; + uint32 nflashdata; + uint32 nflashwaitcnt0; /* 0x1b4 */ + uint32 PAD[2]; + + uint32 seci_uart_data; /* 0x1C0 */ + uint32 seci_uart_bauddiv; + uint32 seci_uart_fcr; + uint32 seci_uart_lcr; + uint32 seci_uart_mcr; + uint32 seci_uart_lsr; + uint32 seci_uart_msr; + uint32 seci_uart_baudadj; + /* Clock control and hardware workarounds (corerev >= 20) */ + uint32 clk_ctl_st; /* 0x1e0 */ + uint32 hw_war; + uint32 PAD[70]; + + /* UARTs */ + uint8 uart0data; /* 0x300 */ + uint8 uart0imr; + uint8 uart0fcr; + uint8 uart0lcr; + uint8 uart0mcr; + uint8 uart0lsr; + uint8 uart0msr; + uint8 uart0scratch; + uint8 PAD[248]; /* corerev >= 1 */ + + uint8 uart1data; /* 0x400 */ + uint8 uart1imr; + uint8 uart1fcr; + uint8 uart1lcr; + uint8 uart1mcr; + uint8 uart1lsr; + uint8 uart1msr; + uint8 uart1scratch; /* 0x407 */ + uint32 PAD[62]; + + /* save/restore, corerev >= 48 */ + uint32 sr_capability; /* 0x500 */ + uint32 sr_control0; /* 0x504 */ + uint32 sr_control1; /* 0x508 */ + uint32 gpio_control; /* 0x50C */ + uint32 PAD[60]; + + /* PMU registers (corerev >= 20) */ + /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP. + * The CPU must read them twice, compare, and retry if different. + */ + uint32 pmucontrol; /* 0x600 */ + uint32 pmucapabilities; + uint32 pmustatus; + uint32 res_state; + uint32 res_pending; + uint32 pmutimer; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 res_table_sel; + uint32 res_dep_mask; + uint32 res_updn_timer; + uint32 res_timer; + uint32 clkstretch; + uint32 pmuwatchdog; + uint32 gpiosel; /* 0x638, rev >= 1 */ + uint32 gpioenable; /* 0x63c, rev >= 1 */ + uint32 res_req_timer_sel; + uint32 res_req_timer; + uint32 res_req_mask; + uint32 PAD; + uint32 chipcontrol_addr; /* 0x650 */ + uint32 chipcontrol_data; /* 0x654 */ + uint32 regcontrol_addr; + uint32 regcontrol_data; + uint32 pllcontrol_addr; + uint32 pllcontrol_data; + uint32 pmustrapopt; /* 0x668, corerev >= 28 */ + uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + uint32 retention_ctl; /* 0x670 */ + uint32 PAD[3]; + uint32 retention_grpidx; /* 0x680 */ + uint32 retention_grpctl; /* 0x684 */ + uint32 PAD[20]; + uint32 pmucontrol_ext; /* 0x6d8 */ + uint32 slowclkperiod; /* 0x6dc */ + uint32 PAD[8]; + uint32 pmuintmask0; /* 0x700 */ + uint32 pmuintmask1; /* 0x704 */ + uint32 PAD[14]; + uint32 pmuintstatus; /* 0x740 */ + uint32 PAD[15]; + uint32 pmuintctrl0; /* 0x780 */ + uint32 PAD[31]; + uint16 sromotp[512]; /* 0x800 */ +#ifdef CCNFLASH_SUPPORT + /* Nand flash MLC controller registers (corerev >= 38) */ + uint32 nand_revision; /* 0xC00 */ + uint32 nand_cmd_start; + uint32 nand_cmd_addr_x; + uint32 nand_cmd_addr; + uint32 nand_cmd_end_addr; + uint32 nand_cs_nand_select; + uint32 nand_cs_nand_xor; + uint32 PAD; + uint32 nand_spare_rd0; + uint32 nand_spare_rd4; + uint32 nand_spare_rd8; + uint32 nand_spare_rd12; + uint32 nand_spare_wr0; + uint32 nand_spare_wr4; + uint32 nand_spare_wr8; + uint32 nand_spare_wr12; + uint32 nand_acc_control; + uint32 PAD; + uint32 nand_config; + uint32 PAD; + uint32 nand_timing_1; + uint32 nand_timing_2; + uint32 nand_semaphore; + uint32 PAD; + uint32 nand_devid; + uint32 nand_devid_x; + uint32 nand_block_lock_status; + uint32 nand_intfc_status; + uint32 nand_ecc_corr_addr_x; + uint32 nand_ecc_corr_addr; + uint32 nand_ecc_unc_addr_x; + uint32 nand_ecc_unc_addr; + uint32 nand_read_error_count; + uint32 nand_corr_stat_threshold; + uint32 PAD[2]; + uint32 nand_read_addr_x; + uint32 nand_read_addr; + uint32 nand_page_program_addr_x; + uint32 nand_page_program_addr; + uint32 nand_copy_back_addr_x; + uint32 nand_copy_back_addr; + uint32 nand_block_erase_addr_x; + uint32 nand_block_erase_addr; + uint32 nand_inv_read_addr_x; + uint32 nand_inv_read_addr; + uint32 PAD[2]; + uint32 nand_blk_wr_protect; + uint32 PAD[3]; + uint32 nand_acc_control_cs1; + uint32 nand_config_cs1; + uint32 nand_timing_1_cs1; + uint32 nand_timing_2_cs1; + uint32 PAD[20]; + uint32 nand_spare_rd16; + uint32 nand_spare_rd20; + uint32 nand_spare_rd24; + uint32 nand_spare_rd28; + uint32 nand_cache_addr; + uint32 nand_cache_data; + uint32 nand_ctrl_config; + uint32 nand_ctrl_status; +#endif /* CCNFLASH_SUPPORT */ + uint32 gci_corecaps0; /* GCI starting at 0xC00 */ + uint32 gci_corecaps1; + uint32 gci_corecaps2; + uint32 gci_corectrl; + uint32 gci_corestat; /* 0xC10 */ + uint32 gci_intstat; /* 0xC14 */ + uint32 gci_intmask; /* 0xC18 */ + uint32 gci_wakemask; /* 0xC1C */ + uint32 gci_levelintstat; /* 0xC20 */ + uint32 gci_eventintstat; /* 0xC24 */ + uint32 PAD[6]; + uint32 gci_indirect_addr; /* 0xC40 */ + uint32 gci_gpioctl; /* 0xC44 */ + uint32 gci_gpiostatus; + uint32 gci_gpiomask; /* 0xC4C */ + uint32 PAD; + uint32 gci_miscctl; /* 0xC54 */ + uint32 gci_gpiointmask; + uint32 gci_gpiowakemask; + uint32 gci_input[32]; /* C60 */ + uint32 gci_event[32]; /* CE0 */ + uint32 gci_output[4]; /* D60 */ + uint32 gci_control_0; /* 0xD70 */ + uint32 gci_control_1; /* 0xD74 */ + uint32 gci_intpolreg; /* 0xD78 */ + uint32 gci_levelintmask; /* 0xD7C */ + uint32 gci_eventintmask; /* 0xD80 */ + uint32 PAD[3]; + uint32 gci_inbandlevelintmask; /* 0xD90 */ + uint32 gci_inbandeventintmask; /* 0xD94 */ + uint32 PAD[2]; + uint32 gci_seciauxtx; /* 0xDA0 */ + uint32 gci_seciauxrx; /* 0xDA4 */ + uint32 gci_secitx_datatag; /* 0xDA8 */ + uint32 gci_secirx_datatag; /* 0xDAC */ + uint32 gci_secitx_datamask; /* 0xDB0 */ + uint32 gci_seciusef0tx_reg; /* 0xDB4 */ + uint32 gci_secif0tx_offset; /* 0xDB8 */ + uint32 gci_secif0rx_offset; /* 0xDBC */ + uint32 gci_secif1tx_offset; /* 0xDC0 */ + uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */ + uint32 gci_rxfifoctrl; /* 0xDC8 */ + uint32 gci_uartreadid; /* DCC */ + uint32 gci_uartescval; /* DD0 */ + uint32 PAD; + uint32 gci_secififolevel; /* DD8 */ + uint32 gci_seciuartdata; /* DDC */ + uint32 gci_secibauddiv; /* DE0 */ + uint32 gci_secifcr; /* DE4 */ + uint32 gci_secilcr; /* DE8 */ + uint32 gci_secimcr; /* DEC */ + uint32 gci_secilsr; /* DF0 */ + uint32 gci_secimsr; /* DF4 */ + uint32 gci_baudadj; /* DF8 */ + uint32 PAD; + uint32 gci_chipctrl; /* 0xE00 */ + uint32 gci_chipsts; /* 0xE04 */ + uint32 gci_gpioout; /* 0xE08 */ + uint32 gci_gpioout_read; /* 0xE0C */ + uint32 gci_mpwaketx; /* 0xE10 */ + uint32 gci_mpwakedetect; /* 0xE14 */ + uint32 gci_seciin_ctrl; /* 0xE18 */ + uint32 gci_seciout_ctrl; /* 0xE1C */ + uint32 gci_seciin_auxfifo_en; /* 0xE20 */ + uint32 gci_seciout_txen_txbr; /* 0xE24 */ + uint32 gci_seciin_rxbrstatus; /* 0xE28 */ + uint32 gci_seciin_rxerrstatus; /* 0xE2C */ + uint32 gci_seciin_fcstatus; /* 0xE30 */ + uint32 gci_seciout_txstatus; /* 0xE34 */ + uint32 gci_seciout_txbrstatus; /* 0xE38 */ +} chipcregs_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + + +#define CC_CHIPID 0 +#define CC_CAPABILITIES 4 +#define CC_CHIPST 0x2c +#define CC_EROMPTR 0xfc + +#define CC_OTPST 0x10 +#define CC_INTSTATUS 0x20 +#define CC_INTMASK 0x24 +#define CC_JTAGCMD 0x30 +#define CC_JTAGIR 0x34 +#define CC_JTAGDR 0x38 +#define CC_JTAGCTRL 0x3c +#define CC_GPIOPU 0x58 +#define CC_GPIOPD 0x5c +#define CC_GPIOIN 0x60 +#define CC_GPIOOUT 0x64 +#define CC_GPIOOUTEN 0x68 +#define CC_GPIOCTRL 0x6c +#define CC_GPIOPOL 0x70 +#define CC_GPIOINTM 0x74 +#define CC_GPIOEVENT 0x78 +#define CC_GPIOEVENTMASK 0x7c +#define CC_WATCHDOG 0x80 +#define CC_GPIOEVENTPOL 0x84 +#define CC_CLKC_N 0x90 +#define CC_CLKC_M0 0x94 +#define CC_CLKC_M1 0x98 +#define CC_CLKC_M2 0x9c +#define CC_CLKC_M3 0xa0 +#define CC_CLKDIV 0xa4 +#define CC_CAP_EXT 0xac +#define CC_SYS_CLK_CTL 0xc0 +#define CC_CLKDIV2 0xf0 +#define CC_CLK_CTL_ST SI_CLK_CTL_ST +#define PMU_CTL 0x600 +#define PMU_CAP 0x604 +#define PMU_ST 0x608 +#define PMU_RES_STATE 0x60c +#define PMU_RES_PENDING 0x610 +#define PMU_TIMER 0x614 +#define PMU_MIN_RES_MASK 0x618 +#define PMU_MAX_RES_MASK 0x61c +#define CC_CHIPCTL_ADDR 0x650 +#define CC_CHIPCTL_DATA 0x654 +#define PMU_REG_CONTROL_ADDR 0x658 +#define PMU_REG_CONTROL_DATA 0x65C +#define PMU_PLL_CONTROL_ADDR 0x660 +#define PMU_PLL_CONTROL_DATA 0x664 +#define CC_SROM_CTRL 0x190 +#define CC_SROM_OTP 0x800 /* SROM/OTP address space */ +#define CC_GCI_INDIRECT_ADDR_REG 0xC40 +#define CC_GCI_CHIP_CTRL_REG 0xE00 +#define CC_GCI_CC_OFFSET_2 2 +#define CC_GCI_CC_OFFSET_5 5 +#define CC_SWD_CTRL 0x380 +#define CC_SWD_REQACK 0x384 +#define CC_SWD_DATA 0x388 + + +#define CHIPCTRLREG0 0x0 +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define CHIPCTRLREG6 0x6 +#define REGCTRLREG4 0x4 +#define REGCTRLREG5 0x5 +#define REGCTRLREG6 0x6 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define PMU_RES_DEP_MASK 0x624 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c +#define PMUREG_RESREQ_TIMER 0x688 +#define PMUREG_RESREQ_MASK1 0x6f4 +#define PMUREG_RESREQ_TIMER1 0x6f0 +#define EXT_LPO_AVAIL 0x100 +#define LPO_SEL (1 << 0) +#define CC_EXT_LPO_PU 0x200000 +#define GC_EXT_LPO_PU 0x2 +#define CC_INT_LPO_PU 0x100000 +#define GC_INT_LPO_PU 0x1 +#define EXT_LPO_SEL 0x8 +#define INT_LPO_SEL 0x4 +#define ENABLE_FINE_CBUCK_CTRL (1 << 30) +#define REGCTRL5_PWM_AUTO_CTRL_MASK 0x007e0000 +#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17 +#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000 +#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16 +#define CC_BP_IND_ACCESS_START_SHIFT 9 +#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT) +#define CC_BP_IND_ACCESS_RDWR_SHIFT 8 +#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT) +#define CC_BP_IND_ACCESS_ERROR_SHIFT 10 +#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT) + +#ifdef SR_DEBUG +#define SUBCORE_POWER_ON 0x0001 +#define PHY_POWER_ON 0x0010 +#define VDDM_POWER_ON 0x0100 +#define MEMLPLDO_POWER_ON 0x1000 +#define SUBCORE_POWER_ON_CHK 0x00040000 +#define PHY_POWER_ON_CHK 0x00080000 +#define VDDM_POWER_ON_CHK 0x00100000 +#define MEMLPLDO_POWER_ON_CHK 0x00200000 +#endif /* SR_DEBUG */ + +#ifdef CCNFLASH_SUPPORT +/* NAND flash support */ +#define CC_NAND_REVISION 0xC00 +#define CC_NAND_CMD_START 0xC04 +#define CC_NAND_CMD_ADDR 0xC0C +#define CC_NAND_SPARE_RD_0 0xC20 +#define CC_NAND_SPARE_RD_4 0xC24 +#define CC_NAND_SPARE_RD_8 0xC28 +#define CC_NAND_SPARE_RD_C 0xC2C +#define CC_NAND_CONFIG 0xC48 +#define CC_NAND_DEVID 0xC60 +#define CC_NAND_DEVID_EXT 0xC64 +#define CC_NAND_INTFC_STATUS 0xC6C +#endif /* CCNFLASH_SUPPORT */ + +/* chipid */ +#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */ +#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */ +#define CID_REV_SHIFT 16 /**< Chip Revision shift */ +#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */ +#define CID_PKG_SHIFT 20 /**< Package Option shift */ +#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */ +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */ +#define CID_TYPE_SHIFT 28 + +/* capabilities */ +#define CC_CAP_UARTS_MASK 0x00000003 /**< Number of UARTs */ +#define CC_CAP_MIPSEB 0x00000004 /**< MIPS is in big-endian mode */ +#define CC_CAP_UCLKSEL 0x00000018 /**< UARTs clock select */ +#define CC_CAP_UINTCLK 0x00000008 /**< UARTs are driven by internal divided clock */ +#define CC_CAP_UARTGPIO 0x00000020 /**< UARTs own GPIOs 15:12 */ +#define CC_CAP_EXTBUS_MASK 0x000000c0 /**< External bus mask */ +#define CC_CAP_EXTBUS_NONE 0x00000000 /**< No ExtBus present */ +#define CC_CAP_EXTBUS_FULL 0x00000040 /**< ExtBus: PCMCIA, IDE & Prog */ +#define CC_CAP_EXTBUS_PROG 0x00000080 /**< ExtBus: ProgIf only */ +#define CC_CAP_FLASH_MASK 0x00000700 /**< Type of flash */ +#define CC_CAP_PLL_MASK 0x00038000 /**< Type of PLL */ +#define CC_CAP_PWR_CTL 0x00040000 /**< Power control */ +#define CC_CAP_OTPSIZE 0x00380000 /**< OTP Size (0 = none) */ +#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */ +#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */ +#define CC_CAP_JTAGP 0x00400000 /**< JTAG Master Present */ +#define CC_CAP_ROM 0x00800000 /**< Internal boot rom active */ +#define CC_CAP_BKPLN64 0x08000000 /**< 64-bit backplane */ +#define CC_CAP_PMU 0x10000000 /**< PMU Present, rev >= 20 */ +#define CC_CAP_ECI 0x20000000 /**< ECI Present, rev >= 21 */ +#define CC_CAP_SROM 0x40000000 /**< Srom Present, rev >= 32 */ +#define CC_CAP_NFLASH 0x80000000 /**< Nand flash present, rev >= 35 */ + +#define CC_CAP2_SECI 0x00000001 /**< SECI Present, rev >= 36 */ +#define CC_CAP2_GSIO 0x00000002 /**< GSIO (spi/i2c) present, rev >= 37 */ + +/* capabilities extension */ +#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */ +#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */ +#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */ +#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */ +#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */ + +/* WL Channel Info to BT via GCI - bits 40 - 47 */ +#define GCI_WL_CHN_INFO_MASK (0xFF00) +/* bits [51:48] - reserved for wlan TX pwr index */ +/* bits [55:52] btc mode indication */ +#define GCI_WL_BTC_MODE_SHIFT (20) +#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT) +#define GCI_WL_ANT_BIT_MASK (0x00c0) +#define GCI_WL_ANT_SHIFT_BITS (6) +/* PLL type */ +#define PLL_NONE 0x00000000 +#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */ +#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */ +#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */ +#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */ +#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */ +#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */ + +/* ILP clock */ +#define ILP_CLOCK 32000 + +/* ALP clock on pre-PMU chips */ +#define ALP_CLOCK 20000000 + +#ifdef CFG_SIM +#define NS_ALP_CLOCK 84922 +#define NS_SLOW_ALP_CLOCK 84922 +#define NS_CPU_CLOCK 534500 +#define NS_SLOW_CPU_CLOCK 534500 +#define NS_SI_CLOCK 271750 +#define NS_SLOW_SI_CLOCK 271750 +#define NS_FAST_MEM_CLOCK 271750 +#define NS_MEM_CLOCK 271750 +#define NS_SLOW_MEM_CLOCK 271750 +#else +#define NS_ALP_CLOCK 125000000 +#define NS_SLOW_ALP_CLOCK 100000000 +#define NS_CPU_CLOCK 1000000000 +#define NS_SLOW_CPU_CLOCK 800000000 +#define NS_SI_CLOCK 250000000 +#define NS_SLOW_SI_CLOCK 200000000 +#define NS_FAST_MEM_CLOCK 800000000 +#define NS_MEM_CLOCK 533000000 +#define NS_SLOW_MEM_CLOCK 400000000 +#endif /* CFG_SIM */ + +/* HT clock */ +#define HT_CLOCK 80000000 + +/* corecontrol */ +#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */ +#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */ +#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */ +#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */ + +/* retention_ctl */ +#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29 +#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT) + +/* 4321 chipcontrol */ +#define CHIPCTRL_4321A0_DEFAULT 0x3a4 +#define CHIPCTRL_4321A1_DEFAULT 0x0a4 +#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */ + +/* Fields in the otpstatus register in rev >= 21 */ +#define OTPS_OL_MASK 0x000000ff +#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */ +#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */ +#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */ +#define OTPS_OL_GU 0x00000008 /**< general use region is locked */ +#define OTPS_GUP_MASK 0x00000f00 +#define OTPS_GUP_SHIFT 8 +#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */ +#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */ +#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */ +#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */ +#define OTPS_READY 0x00001000 +#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */ +#define OTPS_RV_MASK 0x0fff0000 +#define OTPS_PROGOK 0x40000000 + +/* Fields in the otpcontrol register in rev >= 21 */ +#define OTPC_PROGSEL 0x00000001 +#define OTPC_PCOUNT_MASK 0x0000000e +#define OTPC_PCOUNT_SHIFT 1 +#define OTPC_VSEL_MASK 0x000000f0 +#define OTPC_VSEL_SHIFT 4 +#define OTPC_TMM_MASK 0x00000700 +#define OTPC_TMM_SHIFT 8 +#define OTPC_ODM 0x00000800 +#define OTPC_PROGEN 0x80000000 + +/* Fields in the 40nm otpcontrol register in rev >= 40 */ +#define OTPC_40NM_PROGSEL_SHIFT 0 +#define OTPC_40NM_PCOUNT_SHIFT 1 +#define OTPC_40NM_PCOUNT_WR 0xA +#define OTPC_40NM_PCOUNT_V1X 0xB +#define OTPC_40NM_REGCSEL_SHIFT 5 +#define OTPC_40NM_REGCSEL_DEF 0x4 +#define OTPC_40NM_PROGIN_SHIFT 8 +#define OTPC_40NM_R2X_SHIFT 10 +#define OTPC_40NM_ODM_SHIFT 11 +#define OTPC_40NM_DF_SHIFT 15 +#define OTPC_40NM_VSEL_SHIFT 16 +#define OTPC_40NM_VSEL_WR 0xA +#define OTPC_40NM_VSEL_V1X 0xA +#define OTPC_40NM_VSEL_R1X 0x5 +#define OTPC_40NM_COFAIL_SHIFT 30 + +#define OTPC1_CPCSEL_SHIFT 0 +#define OTPC1_CPCSEL_DEF 6 +#define OTPC1_TM_SHIFT 8 +#define OTPC1_TM_WR 0x84 +#define OTPC1_TM_V1X 0x84 +#define OTPC1_TM_R1X 0x4 +#define OTPC1_CLK_EN_MASK 0x00020000 +#define OTPC1_CLK_DIV_MASK 0x00FC0000 + +/* Fields in otpprog in rev >= 21 and HND OTP */ +#define OTPP_COL_MASK 0x000000ff +#define OTPP_COL_SHIFT 0 +#define OTPP_ROW_MASK 0x0000ff00 +#define OTPP_ROW_MASK9 0x0001ff00 /* for ccrev >= 49 */ +#define OTPP_ROW_SHIFT 8 +#define OTPP_OC_MASK 0x0f000000 +#define OTPP_OC_SHIFT 24 +#define OTPP_READERR 0x10000000 +#define OTPP_VALUE_MASK 0x20000000 +#define OTPP_VALUE_SHIFT 29 +#define OTPP_START_BUSY 0x80000000 +#define OTPP_READ 0x40000000 /* HND OTP */ + +/* Fields in otplayout register */ +#define OTPL_HWRGN_OFF_MASK 0x00000FFF +#define OTPL_HWRGN_OFF_SHIFT 0 +#define OTPL_WRAP_REVID_MASK 0x00F80000 +#define OTPL_WRAP_REVID_SHIFT 19 +#define OTPL_WRAP_TYPE_MASK 0x00070000 +#define OTPL_WRAP_TYPE_SHIFT 16 +#define OTPL_WRAP_TYPE_65NM 0 +#define OTPL_WRAP_TYPE_40NM 1 +#define OTPL_ROW_SIZE_MASK 0x0000F000 +#define OTPL_ROW_SIZE_SHIFT 12 + +/* otplayout reg corerev >= 36 */ +#define OTP_CISFORMAT_NEW 0x80000000 + +/* Opcodes for OTPP_OC field */ +#define OTPPOC_READ 0 +#define OTPPOC_BIT_PROG 1 +#define OTPPOC_VERIFY 3 +#define OTPPOC_INIT 4 +#define OTPPOC_SET 5 +#define OTPPOC_RESET 6 +#define OTPPOC_OCST 7 +#define OTPPOC_ROW_LOCK 8 +#define OTPPOC_PRESCN_TEST 9 + +/* Opcodes for OTPP_OC field (40NM) */ +#define OTPPOC_READ_40NM 0 +#define OTPPOC_PROG_ENABLE_40NM 1 +#define OTPPOC_PROG_DISABLE_40NM 2 +#define OTPPOC_VERIFY_40NM 3 +#define OTPPOC_WORD_VERIFY_1_40NM 4 +#define OTPPOC_ROW_LOCK_40NM 5 +#define OTPPOC_STBY_40NM 6 +#define OTPPOC_WAKEUP_40NM 7 +#define OTPPOC_WORD_VERIFY_0_40NM 8 +#define OTPPOC_PRESCN_TEST_40NM 9 +#define OTPPOC_BIT_PROG_40NM 10 +#define OTPPOC_WORDPROG_40NM 11 +#define OTPPOC_BURNIN_40NM 12 +#define OTPPOC_AUTORELOAD_40NM 13 +#define OTPPOC_OVST_READ_40NM 14 +#define OTPPOC_OVST_PROG_40NM 15 + +/* Fields in otplayoutextension */ +#define OTPLAYOUTEXT_FUSE_MASK 0x3FF + + +/* Jtagm characteristics that appeared at a given corerev */ +#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */ +#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */ +#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */ + +/* jtagcmd */ +#define JCMD_START 0x80000000 +#define JCMD_BUSY 0x80000000 +#define JCMD_STATE_MASK 0x60000000 +#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */ +#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */ +#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */ +#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */ +#define JCMD0_ACC_MASK 0x0000f000 +#define JCMD0_ACC_IRDR 0x00000000 +#define JCMD0_ACC_DR 0x00001000 +#define JCMD0_ACC_IR 0x00002000 +#define JCMD0_ACC_RESET 0x00003000 +#define JCMD0_ACC_IRPDR 0x00004000 +#define JCMD0_ACC_PDR 0x00005000 +#define JCMD0_IRW_MASK 0x00000f00 +#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */ +#define JCMD_ACC_IRDR 0x00000000 +#define JCMD_ACC_DR 0x00010000 +#define JCMD_ACC_IR 0x00020000 +#define JCMD_ACC_RESET 0x00030000 +#define JCMD_ACC_IRPDR 0x00040000 +#define JCMD_ACC_PDR 0x00050000 +#define JCMD_ACC_PIR 0x00060000 +#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */ +#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */ +#define JCMD_IRW_MASK 0x00001f00 +#define JCMD_IRW_SHIFT 8 +#define JCMD_DRW_MASK 0x0000003f + +/* jtagctrl */ +#define JCTRL_FORCE_CLK 4 /**< Force clock */ +#define JCTRL_EXT_EN 2 /**< Enable external targets */ +#define JCTRL_EN 1 /**< Enable Jtag master */ +#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */ + +/* swdmasterctrl */ +#define SWDCTRL_INT_EN 8 /**< Enable internal targets */ +#define SWDCTRL_FORCE_CLK 4 /**< Force clock */ +#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */ +#define SWDCTRL_EN 1 /**< Enable Jtag master */ + +/* Fields in clkdiv */ +#define CLKD_SFLASH 0x1f000000 +#define CLKD_SFLASH_SHIFT 24 +#define CLKD_OTP 0x000f0000 +#define CLKD_OTP_SHIFT 16 +#define CLKD_JTAG 0x00000f00 +#define CLKD_JTAG_SHIFT 8 +#define CLKD_UART 0x000000ff + +#define CLKD2_SROM 0x00000003 +#define CLKD2_SWD 0xf8000000 +#define CLKD2_SWD_SHIFT 27 + +/* intstatus/intmask */ +#define CI_GPIO 0x00000001 /**< gpio intr */ +#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */ +#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */ +#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */ +#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */ +#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */ +#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */ +#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */ + +/* slow_clk_ctl */ +#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */ +#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */ +#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */ +#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */ +#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled, + * 0: LPO is enabled + */ +#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock, + * 0: power logic control + */ +#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors + * PLL clock disable requests from core + */ +#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't + * disable crystal when appropriate + */ +#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */ +#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SCC_CD_SHIFT 16 + +/* system_clk_ctl */ +#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */ +#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */ +#define SYCC_FP 0x00000004 /**< ForcePLLOn */ +#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */ +#define SYCC_HR 0x00000010 /**< Force HT */ +#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */ +#define SYCC_CD_SHIFT 16 + +/* Indirect backplane access */ +#define BPIA_BYTEEN 0x0000000f +#define BPIA_SZ1 0x00000001 +#define BPIA_SZ2 0x00000003 +#define BPIA_SZ4 0x00000007 +#define BPIA_SZ8 0x0000000f +#define BPIA_WRITE 0x00000100 +#define BPIA_START 0x00000200 +#define BPIA_BUSY 0x00000200 +#define BPIA_ERROR 0x00000400 + +/* pcmcia/prog/flash_config */ +#define CF_EN 0x00000001 /**< enable */ +#define CF_EM_MASK 0x0000000e /**< mode */ +#define CF_EM_SHIFT 1 +#define CF_EM_FLASH 0 /**< flash/asynchronous mode */ +#define CF_EM_SYNC 2 /**< synchronous mode */ +#define CF_EM_PCMCIA 4 /**< pcmcia mode */ +#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */ +#define CF_BS 0x00000020 /**< byteswap */ +#define CF_CD_MASK 0x000000c0 /**< clock divider */ +#define CF_CD_SHIFT 6 +#define CF_CD_DIV2 0x00000000 /**< backplane/2 */ +#define CF_CD_DIV3 0x00000040 /**< backplane/3 */ +#define CF_CD_DIV4 0x00000080 /**< backplane/4 */ +#define CF_CE 0x00000100 /**< clock enable */ +#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define PM_W0_MASK 0x0000003f /**< waitcount0 */ +#define PM_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PM_W1_SHIFT 8 +#define PM_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PM_W2_SHIFT 16 +#define PM_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PM_W3_SHIFT 24 + +/* pcmcia_attrwait */ +#define PA_W0_MASK 0x0000003f /**< waitcount0 */ +#define PA_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PA_W1_SHIFT 8 +#define PA_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PA_W2_SHIFT 16 +#define PA_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PA_W3_SHIFT 24 + +/* pcmcia_iowait */ +#define PI_W0_MASK 0x0000003f /**< waitcount0 */ +#define PI_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PI_W1_SHIFT 8 +#define PI_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PI_W2_SHIFT 16 +#define PI_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PI_W3_SHIFT 24 + +/* prog_waitcount */ +#define PW_W0_MASK 0x0000001f /**< waitcount0 */ +#define PW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define PW_W1_SHIFT 8 +#define PW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define PW_W2_SHIFT 16 +#define PW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define PW_W3_SHIFT 24 + +#define PW_W0 0x0000000c +#define PW_W1 0x00000a00 +#define PW_W2 0x00020000 +#define PW_W3 0x01000000 + +/* flash_waitcount */ +#define FW_W0_MASK 0x0000003f /**< waitcount0 */ +#define FW_W1_MASK 0x00001f00 /**< waitcount1 */ +#define FW_W1_SHIFT 8 +#define FW_W2_MASK 0x001f0000 /**< waitcount2 */ +#define FW_W2_SHIFT 16 +#define FW_W3_MASK 0x1f000000 /**< waitcount3 */ +#define FW_W3_SHIFT 24 + +/* When Srom support present, fields in sromcontrol */ +#define SRC_START 0x80000000 +#define SRC_BUSY 0x80000000 +#define SRC_OPCODE 0x60000000 +#define SRC_OP_READ 0x00000000 +#define SRC_OP_WRITE 0x20000000 +#define SRC_OP_WRDIS 0x40000000 +#define SRC_OP_WREN 0x60000000 +#define SRC_OTPSEL 0x00000010 +#define SRC_OTPPRESENT 0x00000020 +#define SRC_LOCK 0x00000008 +#define SRC_SIZE_MASK 0x00000006 +#define SRC_SIZE_1K 0x00000000 +#define SRC_SIZE_4K 0x00000002 +#define SRC_SIZE_16K 0x00000004 +#define SRC_SIZE_SHIFT 1 +#define SRC_PRESENT 0x00000001 + +/* Fields in pmucontrol */ +#define PCTL_ILP_DIV_MASK 0xffff0000 +#define PCTL_ILP_DIV_SHIFT 16 +#define PCTL_LQ_REQ_EN 0x00008000 +#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */ +#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */ +#define PCTL_HT_REQ_EN 0x00000100 +#define PCTL_ALP_REQ_EN 0x00000080 +#define PCTL_XTALFREQ_MASK 0x0000007c +#define PCTL_XTALFREQ_SHIFT 2 +#define PCTL_ILP_DIV_EN 0x00000002 +#define PCTL_LPO_SEL 0x00000001 + +/* Retention Control */ +#define PMU_RCTL_CLK_DIV_SHIFT 0 +#define PMU_RCTL_CHAIN_LEN_SHIFT 12 +#define PMU_RCTL_MACPHY_DISABLE_SHIFT 26 +#define PMU_RCTL_MACPHY_DISABLE_MASK (1 << 26) +#define PMU_RCTL_LOGIC_DISABLE_SHIFT 27 +#define PMU_RCTL_LOGIC_DISABLE_MASK (1 << 27) +#define PMU_RCTL_MEMSLP_LOG_SHIFT 28 +#define PMU_RCTL_MEMSLP_LOG_MASK (1 << 28) +#define PMU_RCTL_MEMRETSLP_LOG_SHIFT 29 +#define PMU_RCTL_MEMRETSLP_LOG_MASK (1 << 29) + +/* Retention Group Control */ +#define PMU_RCTLGRP_CHAIN_LEN_SHIFT 0 +#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT 14 +#define PMU_RCTLGRP_RMODE_ENABLE_MASK (1 << 14) +#define PMU_RCTLGRP_DFT_ENABLE_SHIFT 15 +#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15) +#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16 +#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16) +/* Retention Group Control special for 4334 */ +#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0 338 +#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1 315 +/* Retention Group Control special for 43341 */ +#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0 366 +#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1 330 + +/* Fields in clkstretch */ +#define CSTRETCH_HT 0xffff0000 +#define CSTRETCH_ALP 0x0000ffff + +/* gpiotimerval */ +#define GPIO_ONTIME_SHIFT 16 + +/* clockcontrol_n */ +#define CN_N1_MASK 0x3f /**< n1 control */ +#define CN_N2_MASK 0x3f00 /**< n2 control */ +#define CN_N2_SHIFT 8 +#define CN_PLLC_MASK 0xf0000 /**< pll control */ +#define CN_PLLC_SHIFT 16 + +/* clockcontrol_sb/pci/uart */ +#define CC_M1_MASK 0x3f /**< m1 control */ +#define CC_M2_MASK 0x3f00 /**< m2 control */ +#define CC_M2_SHIFT 8 +#define CC_M3_MASK 0x3f0000 /**< m3 control */ +#define CC_M3_SHIFT 16 +#define CC_MC_MASK 0x1f000000 /**< mux control */ +#define CC_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define CC_F6_2 0x02 /**< A factor of 2 in */ +#define CC_F6_3 0x03 /**< 6-bit fields like */ +#define CC_F6_4 0x05 /**< N1, M1 or M3 */ +#define CC_F6_5 0x09 +#define CC_F6_6 0x11 +#define CC_F6_7 0x21 + +#define CC_F5_BIAS 5 /**< 5-bit fields get this added */ + +#define CC_MC_BYPASS 0x08 +#define CC_MC_M1 0x04 +#define CC_MC_M1M2 0x02 +#define CC_MC_M1M2M3 0x01 +#define CC_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */ +#define CC_T2M2_BIAS 3 /**< m2 bias */ + +#define CC_T2MC_M1BYP 1 +#define CC_T2MC_M2BYP 2 +#define CC_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define CC_T6_MMASK 1 /**< bits of interest in m */ +#define CC_T6_M0 120000000 /**< sb clock for m = 0 */ +#define CC_T6_M1 100000000 /**< sb clock for m = 1 */ +#define SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */ +#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */ + +/* Clock control values for 200MHz in 5350 */ +#define CLKC_5350_N 0x0311 +#define CLKC_5350_M 0x04020009 + +/* Flash types in the chipcommon capabilities register */ +#define FLASH_NONE 0x000 /**< No flash */ +#define SFLASH_ST 0x100 /**< ST serial flash */ +#define SFLASH_AT 0x200 /**< Atmel serial flash */ +#define NFLASH 0x300 +#define PFLASH 0x700 /**< Parallel flash */ +#define QSPIFLASH_ST 0x800 +#define QSPIFLASH_AT 0x900 + +/* Bits in the ExtBus config registers */ +#define CC_CFG_EN 0x0001 /**< Enable */ +#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */ +#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */ +#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */ +#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */ +#define CC_CFG_EM_IDE 0x0006 /**< IDE */ +#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */ +#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */ +#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */ +#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */ +#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */ + +/* ExtBus address space */ +#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */ +#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */ +#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */ +#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */ +#define CC_EB_IDE 0x1a800000 /**< IDE memory base */ +#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */ +#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */ +#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */ +#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */ + + +/* Start/busy bit in flashcontrol */ +#define SFLASH_OPCODE 0x000000ff +#define SFLASH_ACTION 0x00000700 +#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */ +#define SFLASH_START 0x80000000 +#define SFLASH_BUSY SFLASH_START + +/* flashcontrol action codes */ +#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */ +#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */ +#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */ +#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */ +#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */ +#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */ +#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */ + +/* flashcontrol action+opcodes for ST flashes */ +#define SFLASH_ST_WREN 0x0006 /**< Write Enable */ +#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */ +#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */ +#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */ +#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */ +#define SFLASH_ST_PP 0x0302 /**< Page Program */ +#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */ +#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */ +#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */ +#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */ +#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */ +#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */ + +#define SFLASH_MXIC_RDID 0x0390 /**< Read Manufacture ID */ +#define SFLASH_MXIC_MFID 0xc2 /**< MXIC Manufacture ID */ + +/* Status register bits for ST flashes */ +#define SFLASH_ST_WIP 0x01 /**< Write In Progress */ +#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */ +#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */ +#define SFLASH_ST_BP_SHIFT 2 +#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */ + +/* flashcontrol action+opcodes for Atmel flashes */ +#define SFLASH_AT_READ 0x07e8 +#define SFLASH_AT_PAGE_READ 0x07d2 +#define SFLASH_AT_BUF1_READ +#define SFLASH_AT_BUF2_READ +#define SFLASH_AT_STATUS 0x01d7 +#define SFLASH_AT_BUF1_WRITE 0x0384 +#define SFLASH_AT_BUF2_WRITE 0x0387 +#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 +#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 +#define SFLASH_AT_BUF1_PROGRAM 0x0288 +#define SFLASH_AT_BUF2_PROGRAM 0x0289 +#define SFLASH_AT_PAGE_ERASE 0x0281 +#define SFLASH_AT_BLOCK_ERASE 0x0250 +#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define SFLASH_AT_BUF1_LOAD 0x0253 +#define SFLASH_AT_BUF2_LOAD 0x0255 +#define SFLASH_AT_BUF1_COMPARE 0x0260 +#define SFLASH_AT_BUF2_COMPARE 0x0261 +#define SFLASH_AT_BUF1_REPROGRAM 0x0258 +#define SFLASH_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SFLASH_AT_READY 0x80 +#define SFLASH_AT_MISMATCH 0x40 +#define SFLASH_AT_ID_MASK 0x38 +#define SFLASH_AT_ID_SHIFT 3 + +/* SPI register bits, corerev >= 37 */ +#define GSIO_START 0x80000000 +#define GSIO_BUSY GSIO_START + +/* + * These are the UART port assignments, expressed as offsets from the base + * register. These assignments should hold for any serial port based on + * a 8250, 16450, or 16550(A). + */ + +#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */ +#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */ +#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */ +#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */ +#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */ +#define UART_IIR 2 /**< In: Interrupt Identity Register */ +#define UART_FCR 2 /**< Out: FIFO Control Register */ +#define UART_LCR 3 /**< Out: Line Control Register */ +#define UART_MCR 4 /**< Out: Modem Control Register */ +#define UART_LSR 5 /**< In: Line Status Register */ +#define UART_MSR 6 /**< In: Modem Status Register */ +#define UART_SCR 7 /**< I/O: Scratch Register */ +#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */ +#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */ +#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */ +#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */ +#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */ +#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */ +#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */ +#define UART_LSR_BREAK 0x10 /**< Break interrupt */ +#define UART_LSR_FRAMING 0x08 /**< Framing error */ +#define UART_LSR_PARITY 0x04 /**< Parity error */ +#define UART_LSR_OVERRUN 0x02 /**< Overrun error */ +#define UART_LSR_RXRDY 0x01 /**< Receiver ready */ +#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */ + +/* Interrupt Identity Register (IIR) bits */ +#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */ +#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */ +#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */ +#define UART_IIR_NOINT 0x1 /**< No interrupt pending */ +#define UART_IIR_THRE 0x2 /**< THR empty */ +#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */ +#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */ +#define UART_IIR_CHAR_TIME 0xc /**< Character time */ + +/* Interrupt Enable Register (IER) bits */ +#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */ +#define UART_IER_EDSSI 8 /**< enable modem status interrupt */ +#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */ +#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */ +#define UART_IER_ERBFI 1 /**< enable data available interrupt */ + +/* pmustatus */ +#define PST_SLOW_WR_PENDING 0x0400 +#define PST_EXTLPOAVAIL 0x0100 +#define PST_WDRESET 0x0080 +#define PST_INTPEND 0x0040 +#define PST_SBCLKST 0x0030 +#define PST_SBCLKST_ILP 0x0010 +#define PST_SBCLKST_ALP 0x0020 +#define PST_SBCLKST_HT 0x0030 +#define PST_ALPAVAIL 0x0008 +#define PST_HTAVAIL 0x0004 +#define PST_RESINIT 0x0003 + +/* pmucapabilities */ +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */ +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 + +/* PMU Resource Request Timer registers */ +/* This is based on PmuRev0 */ +#define PRRT_TIME_MASK 0x03ff +#define PRRT_INTEN 0x0400 +#define PRRT_REQ_ACTIVE 0x0800 +#define PRRT_ALP_REQ 0x1000 +#define PRRT_HT_REQ 0x2000 +#define PRRT_HQ_REQ 0x4000 + +/* PMU Int Control register bits */ +#define PMU_INTC_ALP_REQ 0x1 +#define PMU_INTC_HT_REQ 0x2 +#define PMU_INTC_HQ_REQ 0x4 + +/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */ +#define RSRC_INTR_MASK_TIMER_INT_0 1 + +/* PMU resource bit position */ +#define PMURES_BIT(bit) (1 << (bit)) + +/* PMU resource number limit */ +#define PMURES_MAX_RESNUM 30 + +/* PMU chip control0 register */ +#define PMU_CHIPCTL0 0 +#define PMU43143_CC0_SDIO_DRSTR_OVR (1 << 31) /* sdio drive strength override enable */ + +/* clock req types */ +#define PMU_CC1_CLKREQ_TYPE_SHIFT 19 +#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT) + +#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0 +#define CLKREQ_TYPE_CONFIG_PUSHPULL 1 + +/* PMU chip control1 register */ +#define PMU_CHIPCTL1 1 +#define PMU_CC1_RXC_DLL_BYPASS 0x00010000 +#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN 0x00000010 + +#define PMU_CC1_IF_TYPE_MASK 0x00000030 +#define PMU_CC1_IF_TYPE_RMII 0x00000000 +#define PMU_CC1_IF_TYPE_MII 0x00000010 +#define PMU_CC1_IF_TYPE_RGMII 0x00000020 + +#define PMU_CC1_SW_TYPE_MASK 0x000000c0 +#define PMU_CC1_SW_TYPE_EPHY 0x00000000 +#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040 +#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 +#define PMU_CC1_SW_TYPE_RGMII 0x000000c0 + +#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080 +#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000 + +/* PMU chip control2 register */ +#define PMU_CHIPCTL2 2 +#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18) +#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19) +#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20) +#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21) +#define PMU_CC2_MASK_WL_DEV_WAKE (1 << 22) +#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25) + + +/* PMU chip control3 register */ +#define PMU_CHIPCTL3 3 +#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19 +#define PMU_CC3_ENABLE_RF_SHIFT 22 +#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23 + +/* PMU chip control4 register */ +#define PMU_CHIPCTL4 4 + +/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */ +#define PMU_CC4_IF_TYPE_MASK 0x00003000 +#define PMU_CC4_IF_TYPE_RMII 0x00000000 +#define PMU_CC4_IF_TYPE_MII 0x00001000 +#define PMU_CC4_IF_TYPE_RGMII 0x00002000 + +#define PMU_CC4_SW_TYPE_MASK 0x0000c000 +#define PMU_CC4_SW_TYPE_EPHY 0x00000000 +#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000 +#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000 +#define PMU_CC4_SW_TYPE_RGMII 0x0000c000 + +/* PMU chip control5 register */ +#define PMU_CHIPCTL5 5 + +/* PMU chip control6 register */ +#define PMU_CHIPCTL6 6 +#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4) +#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6) + +/* PMU chip control7 register */ +#define PMU_CHIPCTL7 7 +#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25) +#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27) +/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */ +#define PMU_CC7_IF_TYPE_MASK 0x000000c0 +#define PMU_CC7_IF_TYPE_RMII 0x00000000 +#define PMU_CC7_IF_TYPE_MII 0x00000040 +#define PMU_CC7_IF_TYPE_RGMII 0x00000080 + + +/* PMU corerev and chip specific PLL controls. + * PMU_PLL_XX where is PMU corerev and is an arbitrary number + * to differentiate different PLLs controlled by the same PMU rev. + */ +/* pllcontrol registers */ +/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */ +#define PMU0_PLL0_PLLCTL0 0 +#define PMU0_PLL0_PC0_PDIV_MASK 1 +#define PMU0_PLL0_PC0_PDIV_FREQ 25000 +#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 +#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 +#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 + +/* PC0_DIV_ARM for PLLOUT_ARM */ +#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 +#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 +#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 +#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */ +#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 +#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 +#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 +#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 + +/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */ +#define PMU0_PLL0_PLLCTL1 1 +#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 +#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 +#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 +#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 +#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 + +/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */ +#define PMU0_PLL0_PLLCTL2 2 +#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf +#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 + +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU1_PLL0_PLLCTL0 0 +#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 +#define PMU1_PLL0_PC0_P1DIV_SHIFT 20 +#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 +#define PMU1_PLL0_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU1_PLL0_PLLCTL1 1 +#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff +#define PMU1_PLL0_PC1_M1DIV_SHIFT 0 +#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC1_M2DIV_SHIFT 8 +#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 +#define PMU1_PLL0_PC1_M3DIV_SHIFT 16 +#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 +#define PMU1_PLL0_PC1_M4DIV_SHIFT 24 +#define PMU1_PLL0_PC1_M4DIV_BY_9 9 +#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12 +#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24 +#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C + +#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 +#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) +#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU1_PLL0_PLLCTL2 2 +#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff +#define PMU1_PLL0_PC2_M5DIV_SHIFT 0 +#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc +#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 +#define PMU1_PLL0_PC2_M6DIV_SHIFT 8 +#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 +#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24 +#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 +#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 +#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /**< recommended for 4319 */ +#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU1_PLL0_PLLCTL3 3 +#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU1_PLL0_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU1_PLL0_PLLCTL5 5 +#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 +#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 + +#define PMU1_PLL0_PLLCTL6 6 +#define PMU1_PLL0_PLLCTL7 7 +#define PMU1_PLL0_PLLCTL8 8 + +#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1) +#define PMU_PLL4350_OPENLOOP_MASK (1 << 7) + +/* PMU rev 2 control words */ +#define PMU2_PHY_PLL_PLLCTL 4 +#define PMU2_SI_PLL_PLLCTL 10 + +/* PMU rev 2 */ +/* pllcontrol registers */ +/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ +#define PMU2_PLL_PLLCTL0 0 +#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 +#define PMU2_PLL_PC0_P1DIV_SHIFT 20 +#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 +#define PMU2_PLL_PC0_P2DIV_SHIFT 24 + +/* mdiv */ +#define PMU2_PLL_PLLCTL1 1 +#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff +#define PMU2_PLL_PC1_M1DIV_SHIFT 0 +#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC1_M2DIV_SHIFT 8 +#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 +#define PMU2_PLL_PC1_M3DIV_SHIFT 16 +#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000 +#define PMU2_PLL_PC1_M4DIV_SHIFT 24 + +/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ +#define PMU2_PLL_PLLCTL2 2 +#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff +#define PMU2_PLL_PC2_M5DIV_SHIFT 0 +#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 +#define PMU2_PLL_PC2_M6DIV_SHIFT 8 +#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 +#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 +#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 +#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20 + +/* ndiv_frac */ +#define PMU2_PLL_PLLCTL3 3 +#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff +#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 + +/* pll_ctrl */ +#define PMU2_PLL_PLLCTL4 4 + +/* pll_ctrl, vco_rng, clkdrive_ch */ +#define PMU2_PLL_PLLCTL5 5 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 +#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 +#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 +#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 +#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 +#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 + +/* PMU rev 5 (& 6) */ +#define PMU5_PLL_P1P2_OFF 0 +#define PMU5_PLL_P1_MASK 0x0f000000 +#define PMU5_PLL_P1_SHIFT 24 +#define PMU5_PLL_P2_MASK 0x00f00000 +#define PMU5_PLL_P2_SHIFT 20 +#define PMU5_PLL_M14_OFF 1 +#define PMU5_PLL_MDIV_MASK 0x000000ff +#define PMU5_PLL_MDIV_WIDTH 8 +#define PMU5_PLL_NM5_OFF 2 +#define PMU5_PLL_NDIV_MASK 0xfff00000 +#define PMU5_PLL_NDIV_SHIFT 20 +#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000 +#define PMU5_PLL_NDIV_MODE_SHIFT 17 +#define PMU5_PLL_FMAB_OFF 3 +#define PMU5_PLL_MRAT_MASK 0xf0000000 +#define PMU5_PLL_MRAT_SHIFT 28 +#define PMU5_PLL_ABRAT_MASK 0x08000000 +#define PMU5_PLL_ABRAT_SHIFT 27 +#define PMU5_PLL_FDIV_MASK 0x07ffffff +#define PMU5_PLL_PLLCTL_OFF 4 +#define PMU5_PLL_PCHI_OFF 5 +#define PMU5_PLL_PCHI_MASK 0x0000003f + +/* pmu XtalFreqRatio */ +#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF +#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 +#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31 + +/* Divider allocation in 4716/47162/5356/5357 */ +#define PMU5_MAINPLL_CPU 1 +#define PMU5_MAINPLL_MEM 2 +#define PMU5_MAINPLL_SI 3 + +/* 4706 PMU */ +#define PMU4706_MAINPLL_PLL0 0 +#define PMU6_4706_PROCPLL_OFF 4 /**< The CPU PLL */ +#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000 +#define PMU6_4706_PROC_P2DIV_SHIFT 16 +#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000 +#define PMU6_4706_PROC_P1DIV_SHIFT 12 +#define PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8 +#define PMU6_4706_PROC_NDIV_INT_SHIFT 3 +#define PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 +#define PMU6_4706_PROC_NDIV_MODE_SHIFT 0 + +#define PMU7_PLL_PLLCTL7 7 +#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000 +#define PMU7_PLL_CTL7_M4DIV_SHIFT 24 +#define PMU7_PLL_CTL7_M4DIV_BY_6 6 +#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc +#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL8 8 +#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff +#define PMU7_PLL_CTL8_M5DIV_SHIFT 0 +#define PMU7_PLL_CTL8_M5DIV_BY_8 8 +#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18 +#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00 +#define PMU7_PLL_CTL8_M6DIV_SHIFT 8 +#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc +#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18 +#define PMU7_PLL_PLLCTL11 11 +#define PMU7_PLL_PLLCTL11_MASK 0xffffff00 +#define PMU7_PLL_PLLCTL11_VAL 0x22222200 + +/* PMU rev 15 */ +#define PMU15_PLL_PLLCTL0 0 +#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + +#define PMU15_PLL_PLLCTL1 1 +#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060 +#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040 +#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80 +#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000 +#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17 +#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000 +#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000 +#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28 +#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000 +#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30 + +#define PMU15_PLL_PLLCTL2 2 +#define PMU15_PLL_PC2_CTEN_MASK 0x00000001 +#define PMU15_PLL_PC2_CTEN_SHIFT 0 + +#define PMU15_PLL_PLLCTL3 3 +#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001 +#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000 +#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01 +#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02 +#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04 +#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18 +#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60 +#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2 +#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3 + +#define PMU15_PLL_PLLCTL4 4 +#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007 +#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0 +#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038 +#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3 +#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0 +#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6 +#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00 +#define PMU15_PLL_PC4_DBGMODE_SHIFT 9 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000 +#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12 +#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000 +#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13 +#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000 +#define PMU15_PLL_PC4_DINPOL_SHIFT 20 +#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000 +#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21 +#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000 +#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22 +#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000 +#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23 +#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000 +#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24 +#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000 +#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25 +#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000 +#define PMU15_PLL_PC4_TEST_EN_SHIFT 26 + +#define PMU15_PLL_PLLCTL5 5 +#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC5_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC5_PRESCALE_SHIFT 27 + +#define PMU15_PLL_PLLCTL6 6 +#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF +#define PMU15_PLL_PC6_FREQTGT_SHIFT 0 +#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000 +#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20 +#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000 +#define PMU15_PLL_PC6_PRESCALE_SHIFT 27 + +#define PMU15_FREQTGT_480_DEFAULT 0x19AB1 +#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5 +#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */ +#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */ +#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */ + + +#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070 +#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4 + +#define PMU17_PLLCTL2_NDIV_MODE_INT 0 +#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2 +#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3 + +#define PMU17_PLLCTL0_BBPLL_PWRDWN 0 +#define PMU17_PLLCTL0_BBPLL_DRST 3 +#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8 + +/* PLL usage in 4716/47162 */ +#define PMU4716_MAINPLL_PLL0 12 + +/* PLL usage in 4335 */ +#define PMU4335_PLL0_PC2_P1DIV_MASK 0x000f0000 +#define PMU4335_PLL0_PC2_P1DIV_SHIFT 16 +#define PMU4335_PLL0_PC2_NDIV_INT_MASK 0xff800000 +#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT 23 +#define PMU4335_PLL0_PC1_MDIV2_MASK 0x0000ff00 +#define PMU4335_PLL0_PC1_MDIV2_SHIFT 8 + + +/* PLL usage in 5356/5357 */ +#define PMU5356_MAINPLL_PLL0 0 +#define PMU5357_MAINPLL_PLL0 0 + +/* 4716/47162 resources */ +#define RES4716_PROC_PLL_ON 0x00000040 +#define RES4716_PROC_HT_AVAIL 0x00000080 + +/* 4716/4717/4718 Chip specific ChipControl register bits */ +#define CCTRL_471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared w/ pflash */ + +/* 5357 Chip specific ChipControl register bits */ +/* 2nd - 32-bit reg */ +#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000 /* I2S pins enable */ +#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */ + +/* 5354 resources */ +#define RES5354_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES5354_BB_SWITCHER_PWM 1 /**< 0x00002 */ +#define RES5354_BB_SWITCHER_BURST 2 /**< 0x00004 */ +#define RES5354_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */ +#define RES5354_ILP_REQUEST 4 /**< 0x00010 */ +#define RES5354_RADIO_SWITCHER_PWM 5 /**< 0x00020 */ +#define RES5354_RADIO_SWITCHER_BURST 6 /**< 0x00040 */ +#define RES5354_ROM_SWITCH 7 /**< 0x00080 */ +#define RES5354_PA_REF_LDO 8 /**< 0x00100 */ +#define RES5354_RADIO_LDO 9 /**< 0x00200 */ +#define RES5354_AFE_LDO 10 /**< 0x00400 */ +#define RES5354_PLL_LDO 11 /**< 0x00800 */ +#define RES5354_BG_FILTBYP 12 /**< 0x01000 */ +#define RES5354_TX_FILTBYP 13 /**< 0x02000 */ +#define RES5354_RX_FILTBYP 14 /**< 0x04000 */ +#define RES5354_XTAL_PU 15 /**< 0x08000 */ +#define RES5354_XTAL_EN 16 /**< 0x10000 */ +#define RES5354_BB_PLL_FILTBYP 17 /**< 0x20000 */ +#define RES5354_RF_PLL_FILTBYP 18 /**< 0x40000 */ +#define RES5354_BB_PLL_PU 19 /**< 0x80000 */ + +/* 5357 Chip specific ChipControl register bits */ +#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */ +#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */ +#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */ + +/* 43217 Chip specific ChipControl register bits */ +#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */ +#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */ + +/* 43228 Chip specific ChipControl register bits */ +#define CCTRL43228_EXTPA_C0 (1<<14) /* core1 extPA in ChipControl 1, bit 14 */ +#define CCTRL43228_EXTPA_C1 (1<<9) /* core0 extPA in ChipControl 1, bit 1 */ + +/* 4328 resources */ +#define RES4328_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES4328_BB_SWITCHER_PWM 1 /**< 0x00002 */ +#define RES4328_BB_SWITCHER_BURST 2 /**< 0x00004 */ +#define RES4328_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */ +#define RES4328_ILP_REQUEST 4 /**< 0x00010 */ +#define RES4328_RADIO_SWITCHER_PWM 5 /**< 0x00020 */ +#define RES4328_RADIO_SWITCHER_BURST 6 /**< 0x00040 */ +#define RES4328_ROM_SWITCH 7 /**< 0x00080 */ +#define RES4328_PA_REF_LDO 8 /**< 0x00100 */ +#define RES4328_RADIO_LDO 9 /**< 0x00200 */ +#define RES4328_AFE_LDO 10 /**< 0x00400 */ +#define RES4328_PLL_LDO 11 /**< 0x00800 */ +#define RES4328_BG_FILTBYP 12 /**< 0x01000 */ +#define RES4328_TX_FILTBYP 13 /**< 0x02000 */ +#define RES4328_RX_FILTBYP 14 /**< 0x04000 */ +#define RES4328_XTAL_PU 15 /**< 0x08000 */ +#define RES4328_XTAL_EN 16 /**< 0x10000 */ +#define RES4328_BB_PLL_FILTBYP 17 /**< 0x20000 */ +#define RES4328_RF_PLL_FILTBYP 18 /**< 0x40000 */ +#define RES4328_BB_PLL_PU 19 /**< 0x80000 */ + +/* 4325 A0/A1 resources */ +#define RES4325_BUCK_BOOST_BURST 0 /**< 0x00000001 */ +#define RES4325_CBUCK_BURST 1 /**< 0x00000002 */ +#define RES4325_CBUCK_PWM 2 /**< 0x00000004 */ +#define RES4325_CLDO_CBUCK_BURST 3 /**< 0x00000008 */ +#define RES4325_CLDO_CBUCK_PWM 4 /**< 0x00000010 */ +#define RES4325_BUCK_BOOST_PWM 5 /**< 0x00000020 */ +#define RES4325_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4325_ABUCK_BURST 7 /**< 0x00000080 */ +#define RES4325_ABUCK_PWM 8 /**< 0x00000100 */ +#define RES4325_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4325_OTP_PU 10 /**< 0x00000400 */ +#define RES4325_LNLDO3_PU 11 /**< 0x00000800 */ +#define RES4325_LNLDO4_PU 12 /**< 0x00001000 */ +#define RES4325_XTAL_PU 13 /**< 0x00002000 */ +#define RES4325_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4325_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4325_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4325_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4325_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4325_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4325_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4325_HT_AVAIL 21 /**< 0x00200000 */ + +/* 4325 B0/C0 resources */ +#define RES4325B0_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4325B0_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4325B0_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4325B0_CLDO_PU 4 /**< 0x00000010 */ + +/* 4325 C1 resources */ +#define RES4325C1_LNLDO2_PU 12 /**< 0x00001000 */ + +/* 4325 chip-specific ChipStatus register bits */ +#define CST4325_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4325_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4325_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4325_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4325_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */ +#define CST4325_SDIO_USB_MODE_MASK 0x00000004 +#define CST4325_SDIO_USB_MODE_SHIFT 2 +#define CST4325_RCAL_VALID_MASK 0x00000008 +#define CST4325_RCAL_VALID_SHIFT 3 +#define CST4325_RCAL_VALUE_MASK 0x000001f0 +#define CST4325_RCAL_VALUE_SHIFT 4 +#define CST4325_PMUTOP_2B_MASK 0x00000200 /**< 1 for 2b, 0 for to 2a */ +#define CST4325_PMUTOP_2B_SHIFT 9 + +#define RES4329_RESERVED0 0 /**< 0x00000001 */ +#define RES4329_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4329_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4329_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4329_CLDO_PU 4 /**< 0x00000010 */ +#define RES4329_PALDO_PU 5 /**< 0x00000020 */ +#define RES4329_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4329_RESERVED7 7 /**< 0x00000080 */ +#define RES4329_RESERVED8 8 /**< 0x00000100 */ +#define RES4329_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4329_OTP_PU 10 /**< 0x00000400 */ +#define RES4329_RESERVED11 11 /**< 0x00000800 */ +#define RES4329_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4329_XTAL_PU 13 /**< 0x00002000 */ +#define RES4329_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4329_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4329_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4329_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4329_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4329_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4329_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4329_HT_AVAIL 21 /**< 0x00200000 */ + +#define CST4329_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4329_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4329_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4329_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4329_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */ +#define CST4329_SPI_SDIO_MODE_MASK 0x00000004 +#define CST4329_SPI_SDIO_MODE_SHIFT 2 + +/* 4312 chip-specific ChipStatus register bits */ +#define CST4312_SPROM_OTP_SEL_MASK 0x00000003 +#define CST4312_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */ +#define CST4312_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */ +#define CST4312_OTP_SEL 2 /**< OTP is powered up, no SPROM */ +#define CST4312_OTP_BAD 3 /**< OTP is broken, SPROM is present */ + +/* 4312 resources (all PMU chips with little memory constraint) */ +#define RES4312_SWITCHER_BURST 0 /**< 0x00000001 */ +#define RES4312_SWITCHER_PWM 1 /**< 0x00000002 */ +#define RES4312_PA_REF_LDO 2 /**< 0x00000004 */ +#define RES4312_CORE_LDO_BURST 3 /**< 0x00000008 */ +#define RES4312_CORE_LDO_PWM 4 /**< 0x00000010 */ +#define RES4312_RADIO_LDO 5 /**< 0x00000020 */ +#define RES4312_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4312_BG_FILTBYP 7 /**< 0x00000080 */ +#define RES4312_TX_FILTBYP 8 /**< 0x00000100 */ +#define RES4312_RX_FILTBYP 9 /**< 0x00000200 */ +#define RES4312_XTAL_PU 10 /**< 0x00000400 */ +#define RES4312_ALP_AVAIL 11 /**< 0x00000800 */ +#define RES4312_BB_PLL_FILTBYP 12 /**< 0x00001000 */ +#define RES4312_RF_PLL_FILTBYP 13 /**< 0x00002000 */ +#define RES4312_HT_AVAIL 14 /**< 0x00004000 */ + +/* 4322 resources */ +#define RES4322_RF_LDO 0 +#define RES4322_ILP_REQUEST 1 +#define RES4322_XTAL_PU 2 +#define RES4322_ALP_AVAIL 3 +#define RES4322_SI_PLL_ON 4 +#define RES4322_HT_SI_AVAIL 5 +#define RES4322_PHY_PLL_ON 6 +#define RES4322_HT_PHY_AVAIL 7 +#define RES4322_OTP_PU 8 + +/* 4322 chip-specific ChipStatus register bits */ +#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020 +#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0 +#define CST4322_SPROM_OTP_SEL_SHIFT 6 +#define CST4322_NO_SPROM_OTP 0 /**< no OTP, no SPROM */ +#define CST4322_SPROM_PRESENT 1 /**< SPROM is present */ +#define CST4322_OTP_PRESENT 2 /**< OTP is present */ +#define CST4322_PCI_OR_USB 0x00000100 +#define CST4322_BOOT_MASK 0x00000600 +#define CST4322_BOOT_SHIFT 9 +#define CST4322_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST4322_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST4322_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST4322_BOOT_FROM_INVALID 3 +#define CST4322_ILP_DIV_EN 0x00000800 +#define CST4322_FLASH_TYPE_MASK 0x00001000 +#define CST4322_FLASH_TYPE_SHIFT 12 +#define CST4322_FLASH_TYPE_SHIFT_ST 0 /**< ST serial FLASH */ +#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /**< ATMEL flash */ +#define CST4322_ARM_TAP_SEL 0x00002000 +#define CST4322_RES_INIT_MODE_MASK 0x0000c000 +#define CST4322_RES_INIT_MODE_SHIFT 14 +#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /**< resinitmode: ILP available */ +#define CST4322_RES_INIT_MODE_ILPREQ 1 /**< resinitmode: ILP request */ +#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /**< resinitmode: ALP available */ +#define CST4322_RES_INIT_MODE_HTAVAIL 3 /**< resinitmode: HT available */ +#define CST4322_PCIPLLCLK_GATING 0x00010000 +#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000 +#define CST4322_PCI_CARDBUS_MODE 0x00040000 + +/* 43224 chip-specific ChipControl register bits */ +#define CCTRL43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */ +#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */ +#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */ + +/* 43236 resources */ +#define RES43236_REGULATOR 0 +#define RES43236_ILP_REQUEST 1 +#define RES43236_XTAL_PU 2 +#define RES43236_ALP_AVAIL 3 +#define RES43236_SI_PLL_ON 4 +#define RES43236_HT_SI_AVAIL 5 + +/* 43236 chip-specific ChipControl register bits */ +#define CCTRL43236_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43236_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43236_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43236_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43236_GSIO (1<<4) /**< 0 disable */ + +/* 43236 Chip specific ChipStatus register bits */ +#define CST43236_SFLASH_MASK 0x00000040 +#define CST43236_OTP_SEL_MASK 0x00000080 +#define CST43236_OTP_SEL_SHIFT 7 +#define CST43236_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43236_BP_CLK 0x00000200 /**< 120/96Mbps */ +#define CST43236_BOOT_MASK 0x00001800 +#define CST43236_BOOT_SHIFT 11 +#define CST43236_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43236_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43236_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST43236_BOOT_FROM_INVALID 3 + +/* 43237 resources */ +#define RES43237_REGULATOR 0 +#define RES43237_ILP_REQUEST 1 +#define RES43237_XTAL_PU 2 +#define RES43237_ALP_AVAIL 3 +#define RES43237_SI_PLL_ON 4 +#define RES43237_HT_SI_AVAIL 5 + +/* 43237 chip-specific ChipControl register bits */ +#define CCTRL43237_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL43237_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL43237_EXT_LNA (1<<2) /**< 0 disable */ +#define CCTRL43237_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */ +#define CCTRL43237_GSIO (1<<4) /**< 0 disable */ + +/* 43237 Chip specific ChipStatus register bits */ +#define CST43237_SFLASH_MASK 0x00000040 +#define CST43237_OTP_SEL_MASK 0x00000080 +#define CST43237_OTP_SEL_SHIFT 7 +#define CST43237_HSIC_MASK 0x00000100 /**< USB/HSIC */ +#define CST43237_BP_CLK 0x00000200 /**< 120/96Mbps */ +#define CST43237_BOOT_MASK 0x00001800 +#define CST43237_BOOT_SHIFT 11 +#define CST43237_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */ +#define CST43237_BOOT_FROM_ROM 1 /**< boot from ROM */ +#define CST43237_BOOT_FROM_FLASH 2 /**< boot from FLASH */ +#define CST43237_BOOT_FROM_INVALID 3 + +/* 43239 resources */ +#define RES43239_OTP_PU 9 +#define RES43239_MACPHY_CLKAVAIL 23 +#define RES43239_HT_AVAIL 24 + +/* 43239 Chip specific ChipStatus register bits */ +#define CST43239_SPROM_MASK 0x00000002 +#define CST43239_SFLASH_MASK 0x00000004 +#define CST43239_RES_INIT_MODE_SHIFT 7 +#define CST43239_RES_INIT_MODE_MASK 0x000001f0 +#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /**< SDIO || gSPI */ +#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /**< USB || USBDA */ +#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /**< SDIO */ +#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /**< gSPI */ + +/* 4324 resources */ +/* 43242 use same PMU as 4324 */ +#define RES4324_LPLDO_PU 0 +#define RES4324_RESET_PULLDN_DIS 1 +#define RES4324_PMU_BG_PU 2 +#define RES4324_HSIC_LDO_PU 3 +#define RES4324_CBUCK_LPOM_PU 4 +#define RES4324_CBUCK_PFM_PU 5 +#define RES4324_CLDO_PU 6 +#define RES4324_LPLDO2_LVM 7 +#define RES4324_LNLDO1_PU 8 +#define RES4324_LNLDO2_PU 9 +#define RES4324_LDO3P3_PU 10 +#define RES4324_OTP_PU 11 +#define RES4324_XTAL_PU 12 +#define RES4324_BBPLL_PU 13 +#define RES4324_LQ_AVAIL 14 +#define RES4324_WL_CORE_READY 17 +#define RES4324_ILP_REQ 18 +#define RES4324_ALP_AVAIL 19 +#define RES4324_PALDO_PU 20 +#define RES4324_RADIO_PU 21 +#define RES4324_SR_CLK_STABLE 22 +#define RES4324_SR_SAVE_RESTORE 23 +#define RES4324_SR_PHY_PWRSW 24 +#define RES4324_SR_PHY_PIC 25 +#define RES4324_SR_SUBCORE_PWRSW 26 +#define RES4324_SR_SUBCORE_PIC 27 +#define RES4324_SR_MEM_PM0 28 +#define RES4324_HT_AVAIL 29 +#define RES4324_MACPHY_CLKAVAIL 30 + +/* 4324 Chip specific ChipStatus register bits */ +#define CST4324_SPROM_MASK 0x00000080 +#define CST4324_SFLASH_MASK 0x00400000 +#define CST4324_RES_INIT_MODE_SHIFT 10 +#define CST4324_RES_INIT_MODE_MASK 0x00000c00 +#define CST4324_CHIPMODE_MASK 0x7 +#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /**< SDIO || gSPI */ +#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /**< USB || USBDA */ + +/* 43242 Chip specific ChipStatus register bits */ +#define CST43242_SFLASH_MASK 0x00000008 +#define CST43242_SR_HALT (1<<25) +#define CST43242_SR_CHIP_STATUS_2 27 /* bit 27 */ + +/* 4331 resources */ +#define RES4331_REGULATOR 0 +#define RES4331_ILP_REQUEST 1 +#define RES4331_XTAL_PU 2 +#define RES4331_ALP_AVAIL 3 +#define RES4331_SI_PLL_ON 4 +#define RES4331_HT_SI_AVAIL 5 + +/* 4331 chip-specific ChipControl register bits */ +#define CCTRL4331_BT_COEXIST (1<<0) /**< 0 disable */ +#define CCTRL4331_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */ +#define CCTRL4331_EXT_LNA_G (1<<2) /**< 0 disable */ +#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /**< sprom/gpio13-15 mux */ +#define CCTRL4331_EXTPA_EN (1<<4) /**< 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /**< set drive out GPIO_CLK on sprom_cs pin */ +#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /**< use sprom_cs pin as PCIE mdio interface */ +#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */ +#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /**< override core control on pipe_AuxClkEnable */ +#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /**< override core control on pipe_AuxPowerDown */ +#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /**< pcie_auxclkenable */ +#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /**< pcie_pipe_pllpowerdown */ +#define CCTRL4331_EXTPA_EN2 (1<<12) /**< 0 ext pa disable, 1 ext pa enabled */ +#define CCTRL4331_EXT_LNA_A (1<<13) /**< 0 disable */ +#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /**< enable bt_shd0 at gpio4 */ +#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /**< enable bt_shd1 at gpio5 */ +#define CCTRL4331_EXTPA_ANA_EN (1<<24) /**< 0 ext pa disable, 1 ext pa enabled */ + +/* 4331 Chip specific ChipStatus register bits */ +#define CST4331_XTAL_FREQ 0x00000001 /**< crystal frequency 20/40Mhz */ +#define CST4331_SPROM_OTP_SEL_MASK 0x00000006 +#define CST4331_SPROM_OTP_SEL_SHIFT 1 +#define CST4331_SPROM_PRESENT 0x00000002 +#define CST4331_OTP_PRESENT 0x00000004 +#define CST4331_LDO_RF 0x00000008 +#define CST4331_LDO_PAR 0x00000010 + +/* 4315 resource */ +#define RES4315_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4315_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4315_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4315_CLDO_PU 4 /**< 0x00000010 */ +#define RES4315_PALDO_PU 5 /**< 0x00000020 */ +#define RES4315_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4315_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4315_OTP_PU 10 /**< 0x00000400 */ +#define RES4315_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4315_XTAL_PU 13 /**< 0x00002000 */ +#define RES4315_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4315_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4315_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4315_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4315_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4315_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4315_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4315_HT_AVAIL 21 /**< 0x00200000 */ + +/* 4315 chip-specific ChipStatus register bits */ +#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /**< gpio [7:6], SDIO CIS selection */ +#define CST4315_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */ +#define CST4315_SPROM_SEL 0x00000001 /**< use SPROM, OTP is powered up */ +#define CST4315_OTP_SEL 0x00000002 /**< use OTP, OTP is powered up */ +#define CST4315_OTP_PWRDN 0x00000003 /**< use SPROM, OTP is powered down */ +#define CST4315_SDIO_MODE 0x00000004 /**< gpio [8], sdio/usb mode */ +#define CST4315_RCAL_VALID 0x00000008 +#define CST4315_RCAL_VALUE_MASK 0x000001f0 +#define CST4315_RCAL_VALUE_SHIFT 4 +#define CST4315_PALDO_EXTPNP 0x00000200 /**< PALDO is configured with external PNP */ +#define CST4315_CBUCK_MODE_MASK 0x00000c00 +#define CST4315_CBUCK_MODE_BURST 0x00000400 +#define CST4315_CBUCK_MODE_LPBURST 0x00000c00 + +/* 4319 resources */ +#define RES4319_CBUCK_LPOM 1 /**< 0x00000002 */ +#define RES4319_CBUCK_BURST 2 /**< 0x00000004 */ +#define RES4319_CBUCK_PWM 3 /**< 0x00000008 */ +#define RES4319_CLDO_PU 4 /**< 0x00000010 */ +#define RES4319_PALDO_PU 5 /**< 0x00000020 */ +#define RES4319_ILP_REQUEST 6 /**< 0x00000040 */ +#define RES4319_LNLDO1_PU 9 /**< 0x00000200 */ +#define RES4319_OTP_PU 10 /**< 0x00000400 */ +#define RES4319_LNLDO2_PU 12 /**< 0x00001000 */ +#define RES4319_XTAL_PU 13 /**< 0x00002000 */ +#define RES4319_ALP_AVAIL 14 /**< 0x00004000 */ +#define RES4319_RX_PWRSW_PU 15 /**< 0x00008000 */ +#define RES4319_TX_PWRSW_PU 16 /**< 0x00010000 */ +#define RES4319_RFPLL_PWRSW_PU 17 /**< 0x00020000 */ +#define RES4319_LOGEN_PWRSW_PU 18 /**< 0x00040000 */ +#define RES4319_AFE_PWRSW_PU 19 /**< 0x00080000 */ +#define RES4319_BBPLL_PWRSW_PU 20 /**< 0x00100000 */ +#define RES4319_HT_AVAIL 21 /**< 0x00200000 */ + +/* 4319 chip-specific ChipStatus register bits */ +#define CST4319_SPI_CPULESSUSB 0x00000001 +#define CST4319_SPI_CLK_POL 0x00000002 +#define CST4319_SPI_CLK_PH 0x00000008 +#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /**< gpio [7:6], SDIO CIS selection */ +#define CST4319_SPROM_OTP_SEL_SHIFT 6 +#define CST4319_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */ +#define CST4319_SPROM_SEL 0x00000040 /**< use SPROM, OTP is powered up */ +#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */ +#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */ +#define CST4319_SDIO_USB_MODE 0x00000100 /**< gpio [8], sdio/usb mode */ +#define CST4319_REMAP_SEL_MASK 0x00000600 +#define CST4319_ILPDIV_EN 0x00000800 +#define CST4319_XTAL_PD_POL 0x00001000 +#define CST4319_LPO_SEL 0x00002000 +#define CST4319_RES_INIT_MODE 0x0000c000 +#define CST4319_PALDO_EXTPNP 0x00010000 /**< PALDO is configured with external PNP */ +#define CST4319_CBUCK_MODE_MASK 0x00060000 +#define CST4319_CBUCK_MODE_BURST 0x00020000 +#define CST4319_CBUCK_MODE_LPBURST 0x00060000 +#define CST4319_RCAL_VALID 0x01000000 +#define CST4319_RCAL_VALUE_MASK 0x3e000000 +#define CST4319_RCAL_VALUE_SHIFT 25 + +#define PMU1_PLL0_CHIPCTL0 0 +#define PMU1_PLL0_CHIPCTL1 1 +#define PMU1_PLL0_CHIPCTL2 2 +#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000 +#define CCTL_4319USB_XTAL_SEL_SHIFT 19 +#define CCTL_4319USB_48MHZ_PLL_SEL 1 +#define CCTL_4319USB_24MHZ_PLL_SEL 2 + +/* PMU resources for 4336 */ +#define RES4336_CBUCK_LPOM 0 +#define RES4336_CBUCK_BURST 1 +#define RES4336_CBUCK_LP_PWM 2 +#define RES4336_CBUCK_PWM 3 +#define RES4336_CLDO_PU 4 +#define RES4336_DIS_INT_RESET_PD 5 +#define RES4336_ILP_REQUEST 6 +#define RES4336_LNLDO_PU 7 +#define RES4336_LDO3P3_PU 8 +#define RES4336_OTP_PU 9 +#define RES4336_XTAL_PU 10 +#define RES4336_ALP_AVAIL 11 +#define RES4336_RADIO_PU 12 +#define RES4336_BG_PU 13 +#define RES4336_VREG1p4_PU_PU 14 +#define RES4336_AFE_PWRSW_PU 15 +#define RES4336_RX_PWRSW_PU 16 +#define RES4336_TX_PWRSW_PU 17 +#define RES4336_BB_PWRSW_PU 18 +#define RES4336_SYNTH_PWRSW_PU 19 +#define RES4336_MISC_PWRSW_PU 20 +#define RES4336_LOGEN_PWRSW_PU 21 +#define RES4336_BBPLL_PWRSW_PU 22 +#define RES4336_MACPHY_CLKAVAIL 23 +#define RES4336_HT_AVAIL 24 +#define RES4336_RSVD 25 + +/* 4336 chip-specific ChipStatus register bits */ +#define CST4336_SPI_MODE_MASK 0x00000001 +#define CST4336_SPROM_PRESENT 0x00000002 +#define CST4336_OTP_PRESENT 0x00000004 +#define CST4336_ARMREMAP_0 0x00000008 +#define CST4336_ILPDIV_EN_MASK 0x00000010 +#define CST4336_ILPDIV_EN_SHIFT 4 +#define CST4336_XTAL_PD_POL_MASK 0x00000020 +#define CST4336_XTAL_PD_POL_SHIFT 5 +#define CST4336_LPO_SEL_MASK 0x00000040 +#define CST4336_LPO_SEL_SHIFT 6 +#define CST4336_RES_INIT_MODE_MASK 0x00000180 +#define CST4336_RES_INIT_MODE_SHIFT 7 +#define CST4336_CBUCK_MODE_MASK 0x00000600 +#define CST4336_CBUCK_MODE_SHIFT 9 + +/* 4336 Chip specific PMU ChipControl register bits */ +#define PCTL_4336_SERIAL_ENAB (1 << 24) + +/* 4330 resources */ +#define RES4330_CBUCK_LPOM 0 +#define RES4330_CBUCK_BURST 1 +#define RES4330_CBUCK_LP_PWM 2 +#define RES4330_CBUCK_PWM 3 +#define RES4330_CLDO_PU 4 +#define RES4330_DIS_INT_RESET_PD 5 +#define RES4330_ILP_REQUEST 6 +#define RES4330_LNLDO_PU 7 +#define RES4330_LDO3P3_PU 8 +#define RES4330_OTP_PU 9 +#define RES4330_XTAL_PU 10 +#define RES4330_ALP_AVAIL 11 +#define RES4330_RADIO_PU 12 +#define RES4330_BG_PU 13 +#define RES4330_VREG1p4_PU_PU 14 +#define RES4330_AFE_PWRSW_PU 15 +#define RES4330_RX_PWRSW_PU 16 +#define RES4330_TX_PWRSW_PU 17 +#define RES4330_BB_PWRSW_PU 18 +#define RES4330_SYNTH_PWRSW_PU 19 +#define RES4330_MISC_PWRSW_PU 20 +#define RES4330_LOGEN_PWRSW_PU 21 +#define RES4330_BBPLL_PWRSW_PU 22 +#define RES4330_MACPHY_CLKAVAIL 23 +#define RES4330_HT_AVAIL 24 +#define RES4330_5gRX_PWRSW_PU 25 +#define RES4330_5gTX_PWRSW_PU 26 +#define RES4330_5g_LOGEN_PWRSW_PU 27 + +/* 4330 chip-specific ChipStatus register bits */ +#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /**< SDIO || gSPI */ +#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /**< USB || USBDA */ +#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /**< SDIO */ +#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /**< gSPI */ +#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /**< USB packet-oriented */ +#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /**< USB Direct Access */ +#define CST4330_OTP_PRESENT 0x00000010 +#define CST4330_LPO_AUTODET_EN 0x00000020 +#define CST4330_ARMREMAP_0 0x00000040 +#define CST4330_SPROM_PRESENT 0x00000080 /**< takes priority over OTP if both set */ +#define CST4330_ILPDIV_EN 0x00000100 +#define CST4330_LPO_SEL 0x00000200 +#define CST4330_RES_INIT_MODE_SHIFT 10 +#define CST4330_RES_INIT_MODE_MASK 0x00000c00 +#define CST4330_CBUCK_MODE_SHIFT 12 +#define CST4330_CBUCK_MODE_MASK 0x00003000 +#define CST4330_CBUCK_POWER_OK 0x00004000 +#define CST4330_BB_PLL_LOCKED 0x00008000 +#define SOCDEVRAM_BP_ADDR 0x1E000000 +#define SOCDEVRAM_ARM_ADDR 0x00800000 + +/* 4330 Chip specific PMU ChipControl register bits */ +#define PCTL_4330_SERIAL_ENAB (1 << 24) + +/* 4330 Chip specific ChipControl register bits */ +#define CCTRL_4330_GPIO_SEL 0x00000001 /* 1=select GPIOs to be muxed out */ +#define CCTRL_4330_ERCX_SEL 0x00000002 /* 1=select ERCX BT coex to be muxed out */ +#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004 /* SDIO: 1=configure GPIO0 for host wake */ +#define CCTRL_4330_JTAG_DISABLE 0x00000008 /* 1=disable JTAG interface on mux'd pins */ + +#define PMU_VREG0_ADDR 0 +#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2 +#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3 + +#define PMU_VREG4_ADDR 4 + +#define PMU_VREG4_CLDO_PWM_SHIFT 4 +#define PMU_VREG4_CLDO_PWM_MASK 0x7 + +#define PMU_VREG4_LPLDO1_SHIFT 15 +#define PMU_VREG4_LPLDO1_MASK 0x7 +#define PMU_VREG4_LPLDO1_1p20V 0 +#define PMU_VREG4_LPLDO1_1p15V 1 +#define PMU_VREG4_LPLDO1_1p10V 2 +#define PMU_VREG4_LPLDO1_1p25V 3 +#define PMU_VREG4_LPLDO1_1p05V 4 +#define PMU_VREG4_LPLDO1_1p00V 5 +#define PMU_VREG4_LPLDO1_0p95V 6 +#define PMU_VREG4_LPLDO1_0p90V 7 + +/* 4350/4345 VREG4 settings */ +#define PMU4350_VREG4_LPLDO1_1p10V 0 +#define PMU4350_VREG4_LPLDO1_1p15V 1 +#define PMU4350_VREG4_LPLDO1_1p21V 2 +#define PMU4350_VREG4_LPLDO1_1p24V 3 +#define PMU4350_VREG4_LPLDO1_0p90V 4 +#define PMU4350_VREG4_LPLDO1_0p96V 5 +#define PMU4350_VREG4_LPLDO1_1p01V 6 +#define PMU4350_VREG4_LPLDO1_1p04V 7 + +#define PMU_VREG4_LPLDO2_LVM_SHIFT 18 +#define PMU_VREG4_LPLDO2_LVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_HVM_SHIFT 21 +#define PMU_VREG4_LPLDO2_HVM_MASK 0x7 +#define PMU_VREG4_LPLDO2_LVM_HVM_MASK 0x3f +#define PMU_VREG4_LPLDO2_1p00V 0 +#define PMU_VREG4_LPLDO2_1p15V 1 +#define PMU_VREG4_LPLDO2_1p20V 2 +#define PMU_VREG4_LPLDO2_1p10V 3 +#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */ + +#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27 +#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1 + +#define PMU_VREG5_ADDR 5 +#define PMU_VREG5_HSICAVDD_PD_SHIFT 6 +#define PMU_VREG5_HSICAVDD_PD_MASK 0x1 +#define PMU_VREG5_HSICDVDD_PD_SHIFT 11 +#define PMU_VREG5_HSICDVDD_PD_MASK 0x1 + +/* 4334 resources */ +#define RES4334_LPLDO_PU 0 +#define RES4334_RESET_PULLDN_DIS 1 +#define RES4334_PMU_BG_PU 2 +#define RES4334_HSIC_LDO_PU 3 +#define RES4334_CBUCK_LPOM_PU 4 +#define RES4334_CBUCK_PFM_PU 5 +#define RES4334_CLDO_PU 6 +#define RES4334_LPLDO2_LVM 7 +#define RES4334_LNLDO_PU 8 +#define RES4334_LDO3P3_PU 9 +#define RES4334_OTP_PU 10 +#define RES4334_XTAL_PU 11 +#define RES4334_WL_PWRSW_PU 12 +#define RES4334_LQ_AVAIL 13 +#define RES4334_LOGIC_RET 14 +#define RES4334_MEM_SLEEP 15 +#define RES4334_MACPHY_RET 16 +#define RES4334_WL_CORE_READY 17 +#define RES4334_ILP_REQ 18 +#define RES4334_ALP_AVAIL 19 +#define RES4334_MISC_PWRSW_PU 20 +#define RES4334_SYNTH_PWRSW_PU 21 +#define RES4334_RX_PWRSW_PU 22 +#define RES4334_RADIO_PU 23 +#define RES4334_WL_PMU_PU 24 +#define RES4334_VCO_LDO_PU 25 +#define RES4334_AFE_LDO_PU 26 +#define RES4334_RX_LDO_PU 27 +#define RES4334_TX_LDO_PU 28 +#define RES4334_HT_AVAIL 29 +#define RES4334_MACPHY_CLK_AVAIL 30 + +/* 4334 chip-specific ChipStatus register bits */ +#define CST4334_CHIPMODE_MASK 7 +#define CST4334_SDIO_MODE 0x00000000 +#define CST4334_SPI_MODE 0x00000004 +#define CST4334_HSIC_MODE 0x00000006 +#define CST4334_BLUSB_MODE 0x00000007 +#define CST4334_CHIPMODE_HSIC(cs) (((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE) +#define CST4334_OTP_PRESENT 0x00000010 +#define CST4334_LPO_AUTODET_EN 0x00000020 +#define CST4334_ARMREMAP_0 0x00000040 +#define CST4334_SPROM_PRESENT 0x00000080 +#define CST4334_ILPDIV_EN_MASK 0x00000100 +#define CST4334_ILPDIV_EN_SHIFT 8 +#define CST4334_LPO_SEL_MASK 0x00000200 +#define CST4334_LPO_SEL_SHIFT 9 +#define CST4334_RES_INIT_MODE_MASK 0x00000C00 +#define CST4334_RES_INIT_MODE_SHIFT 10 + +/* 4334 Chip specific PMU ChipControl register bits */ +#define PCTL_4334_GPIO3_ENAB (1 << 3) + +/* 4334 Chip control */ +#define CCTRL4334_PMU_WAKEUP_GPIO1 (1 << 0) +#define CCTRL4334_PMU_WAKEUP_HSIC (1 << 1) +#define CCTRL4334_PMU_WAKEUP_AOS (1 << 2) +#define CCTRL4334_HSIC_WAKE_MODE (1 << 3) +#define CCTRL4334_HSIC_INBAND_GPIO1 (1 << 4) +#define CCTRL4334_HSIC_LDO_PU (1 << 23) + +/* 4334 Chip control 3 */ +#define CCTRL4334_BLOCK_EXTRNL_WAKE (1 << 4) +#define CCTRL4334_SAVERESTORE_FIX (1 << 5) + +/* 43341 Chip control 3 */ +#define CCTRL43341_BLOCK_EXTRNL_WAKE (1 << 13) +#define CCTRL43341_SAVERESTORE_FIX (1 << 14) +#define CCTRL43341_BT_ISO_SEL (1 << 16) + +/* 4334 Chip specific ChipControl1 register bits */ +#define CCTRL1_4334_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4334_ERCX_SEL (1 << 1) /* 1=select ERCX BT coex to be muxed out */ +#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ +#define CCTRL1_4334_JTAG_DISABLE (1 << 3) /* 1=disable JTAG interface on mux'd pins */ +#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /**< 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */ + +/* 4324 Chip specific ChipControl1 register bits */ +#define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ + +/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */ +/* register contains strap values sampled during POR */ +#define CST43143_REMAP_TO_ROM (3 << 0) /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */ +#define CST43143_SDIO_EN (1 << 2) /* 0 = USB Enab, SDIO pins are GPIO or I2S */ +#define CST43143_SDIO_ISO (1 << 3) /* 1 = SDIO isolated */ +#define CST43143_USB_CPU_LESS (1 << 4) /* 1 = CPULess mode Enabled */ +#define CST43143_CBUCK_MODE (3 << 6) /* Indicates what controller mode CBUCK is in */ +#define CST43143_POK_CBUCK (1 << 8) /* 1 = 1.2V CBUCK voltage ready */ +#define CST43143_PMU_OVRSPIKE (1 << 9) +#define CST43143_PMU_OVRTEMP (0xF << 10) +#define CST43143_SR_FLL_CAL_DONE (1 << 14) +#define CST43143_USB_PLL_LOCKDET (1 << 15) +#define CST43143_PMU_PLL_LOCKDET (1 << 16) +#define CST43143_CHIPMODE_SDIOD(cs) (((cs) & CST43143_SDIO_EN) != 0) /* SDIO */ + +/* 43143 Chip specific ChipControl register bits */ +/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire */ +#define CCTRL_43143_SECI (1<<0) +#define CCTRL_43143_BT_LEGACY (1<<1) +#define CCTRL_43143_I2S_MODE (1<<2) /**< 0: SDIO enabled */ +#define CCTRL_43143_I2S_MASTER (1<<3) /**< 0: I2S MCLK input disabled */ +#define CCTRL_43143_I2S_FULL (1<<4) /**< 0: I2S SDIN and SPDIF_TX inputs disabled */ +#define CCTRL_43143_GSIO (1<<5) /**< 0: sFlash enabled */ +#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /**< 0: disabled */ +#define CCTRL_43143_RF_SWCTRL_0 (1<<6) +#define CCTRL_43143_RF_SWCTRL_1 (2<<6) +#define CCTRL_43143_RF_SWCTRL_2 (4<<6) +#define CCTRL_43143_RF_XSWCTRL (1<<9) /**< 0: UART enabled */ +#define CCTRL_43143_HOST_WAKE0 (1<<11) /**< 1: SDIO separate interrupt output from GPIO4 */ +#define CCTRL_43143_HOST_WAKE1 (1<<12) /* 1: SDIO separate interrupt output from GPIO16 */ + +/* 43143 resources, based on pmu_params.xls V1.19 */ +#define RES43143_EXT_SWITCHER_PWM 0 /**< 0x00001 */ +#define RES43143_XTAL_PU 1 /**< 0x00002 */ +#define RES43143_ILP_REQUEST 2 /**< 0x00004 */ +#define RES43143_ALP_AVAIL 3 /**< 0x00008 */ +#define RES43143_WL_CORE_READY 4 /**< 0x00010 */ +#define RES43143_BBPLL_PWRSW_PU 5 /**< 0x00020 */ +#define RES43143_HT_AVAIL 6 /**< 0x00040 */ +#define RES43143_RADIO_PU 7 /**< 0x00080 */ +#define RES43143_MACPHY_CLK_AVAIL 8 /**< 0x00100 */ +#define RES43143_OTP_PU 9 /**< 0x00200 */ +#define RES43143_LQ_AVAIL 10 /**< 0x00400 */ + +#define PMU43143_XTAL_CORE_SIZE_MASK 0x3F + +/* 4313 resources */ +#define RES4313_BB_PU_RSRC 0 +#define RES4313_ILP_REQ_RSRC 1 +#define RES4313_XTAL_PU_RSRC 2 +#define RES4313_ALP_AVAIL_RSRC 3 +#define RES4313_RADIO_PU_RSRC 4 +#define RES4313_BG_PU_RSRC 5 +#define RES4313_VREG1P4_PU_RSRC 6 +#define RES4313_AFE_PWRSW_RSRC 7 +#define RES4313_RX_PWRSW_RSRC 8 +#define RES4313_TX_PWRSW_RSRC 9 +#define RES4313_BB_PWRSW_RSRC 10 +#define RES4313_SYNTH_PWRSW_RSRC 11 +#define RES4313_MISC_PWRSW_RSRC 12 +#define RES4313_BB_PLL_PWRSW_RSRC 13 +#define RES4313_HT_AVAIL_RSRC 14 +#define RES4313_MACPHY_CLK_AVAIL_RSRC 15 + +/* 4313 chip-specific ChipStatus register bits */ +#define CST4313_SPROM_PRESENT 1 +#define CST4313_OTP_PRESENT 2 +#define CST4313_SPROM_OTP_SEL_MASK 0x00000002 +#define CST4313_SPROM_OTP_SEL_SHIFT 0 + +/* 4313 Chip specific ChipControl register bits */ +#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ + +/* PMU respources for 4314 */ +#define RES4314_LPLDO_PU 0 +#define RES4314_PMU_SLEEP_DIS 1 +#define RES4314_PMU_BG_PU 2 +#define RES4314_CBUCK_LPOM_PU 3 +#define RES4314_CBUCK_PFM_PU 4 +#define RES4314_CLDO_PU 5 +#define RES4314_LPLDO2_LVM 6 +#define RES4314_WL_PMU_PU 7 +#define RES4314_LNLDO_PU 8 +#define RES4314_LDO3P3_PU 9 +#define RES4314_OTP_PU 10 +#define RES4314_XTAL_PU 11 +#define RES4314_WL_PWRSW_PU 12 +#define RES4314_LQ_AVAIL 13 +#define RES4314_LOGIC_RET 14 +#define RES4314_MEM_SLEEP 15 +#define RES4314_MACPHY_RET 16 +#define RES4314_WL_CORE_READY 17 +#define RES4314_ILP_REQ 18 +#define RES4314_ALP_AVAIL 19 +#define RES4314_MISC_PWRSW_PU 20 +#define RES4314_SYNTH_PWRSW_PU 21 +#define RES4314_RX_PWRSW_PU 22 +#define RES4314_RADIO_PU 23 +#define RES4314_VCO_LDO_PU 24 +#define RES4314_AFE_LDO_PU 25 +#define RES4314_RX_LDO_PU 26 +#define RES4314_TX_LDO_PU 27 +#define RES4314_HT_AVAIL 28 +#define RES4314_MACPHY_CLK_AVAIL 29 + +/* 4314 chip-specific ChipStatus register bits */ +#define CST4314_OTP_ENABLED 0x00200000 + +/* 43228 resources */ +#define RES43228_NOT_USED 0 +#define RES43228_ILP_REQUEST 1 +#define RES43228_XTAL_PU 2 +#define RES43228_ALP_AVAIL 3 +#define RES43228_PLL_EN 4 +#define RES43228_HT_PHY_AVAIL 5 + +/* 43228 chipstatus reg bits */ +#define CST43228_ILP_DIV_EN 0x1 +#define CST43228_OTP_PRESENT 0x2 +#define CST43228_SERDES_REFCLK_PADSEL 0x4 +#define CST43228_SDIO_MODE 0x8 +#define CST43228_SDIO_OTP_PRESENT 0x10 +#define CST43228_SDIO_RESET 0x20 + +/* 4706 chipstatus reg bits */ +#define CST4706_PKG_OPTION (1<<0) /* 0: full-featured package 1: low-cost package */ +#define CST4706_SFLASH_PRESENT (1<<1) /* 0: parallel, 1: serial flash is present */ +#define CST4706_SFLASH_TYPE (1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ +#define CST4706_MIPS_BENDIAN (1<<3) /* 0: little, 1: big endian */ +#define CST4706_PCIE1_DISABLE (1<<5) /* PCIE1 enable strap pin */ + +/* 4706 flashstrconfig reg bits */ +#define FLSTRCF4706_MASK 0x000000ff +#define FLSTRCF4706_SF1 0x00000001 /**< 2nd serial flash present */ +#define FLSTRCF4706_PF1 0x00000002 /**< 2nd parallel flash present */ +#define FLSTRCF4706_SF1_TYPE 0x00000004 /**< 2nd serial flash type : 0 : ST, 1 : Atmel */ +#define FLSTRCF4706_NF1 0x00000008 /**< 2nd NAND flash present */ +#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /**< Valid value mask */ +#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /**< 4MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /**< 8MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /**< 16MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /**< 32MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /**< 64MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /**< 128MB */ +#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /**< 256MB */ + +/* 4360 Chip specific ChipControl register bits */ +#define CCTRL4360_I2C_MODE (1 << 0) +#define CCTRL4360_UART_MODE (1 << 1) +#define CCTRL4360_SECI_MODE (1 << 2) +#define CCTRL4360_BTSWCTRL_MODE (1 << 3) +#define CCTRL4360_DISCRETE_FEMCTRL_MODE (1 << 4) +#define CCTRL4360_DIGITAL_PACTRL_MODE (1 << 5) +#define CCTRL4360_BTSWCTRL_AND_DIGPA_PRESENT (1 << 6) +#define CCTRL4360_EXTRA_GPIO_MODE (1 << 7) +#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8) +#define CCTRL4360_BT_LGCY_MODE (1 << 9) +#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21) +#define CCTRL4360_SECI_ON_GPIO01 (1 << 24) + +/* 4360 Chip specific Regulator Control register bits */ +#define RCTRL4360_RFLDO_PWR_DOWN (1 << 1) + +/* 4360 PMU resources and chip status bits */ +#define RES4360_REGULATOR 0 +#define RES4360_ILP_AVAIL 1 +#define RES4360_ILP_REQ 2 +#define RES4360_XTAL_LDO_PU 3 +#define RES4360_XTAL_PU 4 +#define RES4360_ALP_AVAIL 5 +#define RES4360_BBPLLPWRSW_PU 6 +#define RES4360_HT_AVAIL 7 +#define RES4360_OTP_PU 8 +#define RES4360_AVB_PLL_PWRSW_PU 9 +#define RES4360_PCIE_TL_CLK_AVAIL 10 + +#define CST4360_XTAL_40MZ 0x00000001 +#define CST4360_SFLASH 0x00000002 +#define CST4360_SPROM_PRESENT 0x00000004 +#define CST4360_SFLASH_TYPE 0x00000004 +#define CST4360_OTP_ENABLED 0x00000008 +#define CST4360_REMAP_ROM 0x00000010 +#define CST4360_RSRC_INIT_MODE_MASK 0x00000060 +#define CST4360_RSRC_INIT_MODE_SHIFT 5 +#define CST4360_ILP_DIVEN 0x00000080 +#define CST4360_MODE_USB 0x00000100 +#define CST4360_SPROM_SIZE_MASK 0x00000600 +#define CST4360_SPROM_SIZE_SHIFT 9 +#define CST4360_BBPLL_LOCK 0x00000800 +#define CST4360_AVBBPLL_LOCK 0x00001000 +#define CST4360_USBBBPLL_LOCK 0x00002000 +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + +#define CCTRL_4360_UART_SEL 0x2 +#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \ + CST4360_RSRC_INIT_MODE_SHIFT) + + +/* 43602 PMU resources based on pmu_params.xls version v0.95 */ +#define RES43602_LPLDO_PU 0 +#define RES43602_REGULATOR 1 +#define RES43602_PMU_SLEEP 2 +#define RES43602_RSVD_3 3 +#define RES43602_XTALLDO_PU 4 +#define RES43602_SERDES_PU 5 +#define RES43602_BBPLL_PWRSW_PU 6 +#define RES43602_SR_CLK_START 7 +#define RES43602_SR_PHY_PWRSW 8 +#define RES43602_SR_SUBCORE_PWRSW 9 +#define RES43602_XTAL_PU 10 +#define RES43602_PERST_OVR 11 +#define RES43602_SR_CLK_STABLE 12 +#define RES43602_SR_SAVE_RESTORE 13 +#define RES43602_SR_SLEEP 14 +#define RES43602_LQ_START 15 +#define RES43602_LQ_AVAIL 16 +#define RES43602_WL_CORE_RDY 17 +#define RES43602_ILP_REQ 18 +#define RES43602_ALP_AVAIL 19 +#define RES43602_RADIO_PU 20 +#define RES43602_RFLDO_PU 21 +#define RES43602_HT_START 22 +#define RES43602_HT_AVAIL 23 +#define RES43602_MACPHY_CLKAVAIL 24 +#define RES43602_PARLDO_PU 25 +#define RES43602_RSVD_26 26 + +/* 43602 chip status bits */ +#define CST43602_SPROM_PRESENT (1<<1) +#define CST43602_SPROM_SIZE (1<<10) /* 0 = 16K, 1 = 4K */ +#define CST43602_BBPLL_LOCK (1<<11) +#define CST43602_RF_LDO_OUT_OK (1<<15) /* RF LDO output OK */ + +#define PMU43602_CC1_GPIO12_OVRD (1<<28) /* GPIO12 override */ + +#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN (1<<2) /* creates gated_pcie_wake, pmu_wakeup logic */ +#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3) +#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5) /* enable pmu_wakeup to request for ALP_AVAIL */ +#define PMU43602_CC2_PERST_L_EXTEND_EN (1<<9) /* extend perst_l until rsc PERST_OVR comes up */ +#define PMU43602_CC2_FORCE_EXT_LPO (1<<19) /* 1=ext LPO clock is the final LPO clock */ +#define PMU43602_CC2_XTAL32_SEL (1<<30) /* 0=ext_clock, 1=xtal */ + +#define CC_SR1_43602_SR_ASM_ADDR (0x0) + +/* PLL CTL register values for open loop, used during S/R operation */ +#define PMU43602_PLL_CTL6_VAL 0x68000528 +#define PMU43602_PLL_CTL7_VAL 0x6 + +#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29) + +/* 4365 PMU resources */ +#define RES4365_REGULATOR_PU 0 +#define RES4365_XTALLDO_PU 1 +#define RES4365_XTAL_PU 2 +#define RES4365_CPU_PLLLDO_PU 3 +#define RES4365_CPU_PLL_PU 4 +#define RES4365_WL_CORE_RDY 5 +#define RES4365_ILP_REQ 6 +#define RES4365_ALP_AVAIL 7 +#define RES4365_HT_AVAIL 8 +#define RES4365_BB_PLLLDO_PU 9 +#define RES4365_BB_PLL_PU 10 +#define RES4365_MINIMU_PU 11 +#define RES4365_RADIO_PU 12 +#define RES4365_MACPHY_CLK_AVAIL 13 + +/* 4349 related */ +#define RES4349_LPLDO_PU 0 +#define RES4349_BG_PU 1 +#define RES4349_PMU_SLEEP 2 +#define RES4349_PALDO3P3_PU 3 +#define RES4349_CBUCK_LPOM_PU 4 +#define RES4349_CBUCK_PFM_PU 5 +#define RES4349_COLD_START_WAIT 6 +#define RES4349_RSVD_7 7 +#define RES4349_LNLDO_PU 8 +#define RES4349_XTALLDO_PU 9 +#define RES4349_LDO3P3_PU 10 +#define RES4349_OTP_PU 11 +#define RES4349_XTAL_PU 12 +#define RES4349_SR_CLK_START 13 +#define RES4349_LQ_AVAIL 14 +#define RES4349_LQ_START 15 +#define RES4349_PERST_OVR 16 +#define RES4349_WL_CORE_RDY 17 +#define RES4349_ILP_REQ 18 +#define RES4349_ALP_AVAIL 19 +#define RES4349_MINI_PMU 20 +#define RES4349_RADIO_PU 21 +#define RES4349_SR_CLK_STABLE 22 +#define RES4349_SR_SAVE_RESTORE 23 +#define RES4349_SR_PHY_PWRSW 24 +#define RES4349_SR_VDDM_PWRSW 25 +#define RES4349_SR_SUBCORE_PWRSW 26 +#define RES4349_SR_SLEEP 27 +#define RES4349_HT_START 28 +#define RES4349_HT_AVAIL 29 +#define RES4349_MACPHY_CLKAVAIL 30 + +#define CR4_4349_RAM_BASE (0x180000) +#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000) + +/* SR binary offset is at 8K */ +#define CC_SR1_4349_SR_ASM_ADDR (0x10) + +#define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */ +#define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */ + +#define CST4349_SPROM_PRESENT 0x00000010 + +#define CC2_4349_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4349_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4349_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4349_SDIO_AOS_WAKEUP_SHIFT (24) + + +#define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4349_PMU_EN_EXT_PERST_MASK (1 << 13) +#define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN (1 << 15) +#define CC6_4349_PMU_EN_MDIO_MASK (1 << 16) +#define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25) + + + +/* 43430 PMU resources based on pmu_params.xls */ +#define RES43430_LPLDO_PU 0 +#define RES43430_BG_PU 1 +#define RES43430_PMU_SLEEP 2 +#define RES43430_RSVD_3 3 +#define RES43430_CBUCK_LPOM_PU 4 +#define RES43430_CBUCK_PFM_PU 5 +#define RES43430_COLD_START_WAIT 6 +#define RES43430_RSVD_7 7 +#define RES43430_LNLDO_PU 8 +#define RES43430_RSVD_9 9 +#define RES43430_LDO3P3_PU 10 +#define RES43430_OTP_PU 11 +#define RES43430_XTAL_PU 12 +#define RES43430_SR_CLK_START 13 +#define RES43430_LQ_AVAIL 14 +#define RES43430_LQ_START 15 +#define RES43430_RSVD_16 16 +#define RES43430_WL_CORE_RDY 17 +#define RES43430_ILP_REQ 18 +#define RES43430_ALP_AVAIL 19 +#define RES43430_MINI_PMU 20 +#define RES43430_RADIO_PU 21 +#define RES43430_SR_CLK_STABLE 22 +#define RES43430_SR_SAVE_RESTORE 23 +#define RES43430_SR_PHY_PWRSW 24 +#define RES43430_SR_VDDM_PWRSW 25 +#define RES43430_SR_SUBCORE_PWRSW 26 +#define RES43430_SR_SLEEP 27 +#define RES43430_HT_START 28 +#define RES43430_HT_AVAIL 29 +#define RES43430_MACPHY_CLK_AVAIL 30 + +/* 43430 chip status bits */ +#define CST43430_SDIO_MODE 0x00000001 +#define CST43430_GSPI_MODE 0x00000002 +#define CST43430_RSRC_INIT_MODE_0 0x00000080 +#define CST43430_RSRC_INIT_MODE_1 0x00000100 +#define CST43430_SEL0_SDIO 0x00000200 +#define CST43430_SEL1_SDIO 0x00000400 +#define CST43430_SEL2_SDIO 0x00000800 +#define CST43430_BBPLL_LOCKED 0x00001000 +#define CST43430_DBG_INST_DETECT 0x00004000 +#define CST43430_CLB2WL_BT_READY 0x00020000 +#define CST43430_JTAG_MODE 0x00100000 +#define CST43430_HOST_IFACE 0x00400000 +#define CST43430_TRIM_EN 0x00800000 +#define CST43430_DIN_PACKAGE_OPTION 0x10000000 + +#define PMU_MACCORE_0_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F + +#define PMU_MACCORE_1_RES_REQ_TIMER 0x19000000 +#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F + +/* defines to detect active host interface in use */ +#define CHIP_HOSTIF_PCIEMODE 0x1 +#define CHIP_HOSTIF_USBMODE 0x2 +#define CHIP_HOSTIF_SDIOMODE 0x4 +#define CHIP_HOSTIF_PCIE(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE) +#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE) +#define CHIP_HOSTIF_SDIO(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE) + +/* 4335 resources */ +#define RES4335_LPLDO_PO 0 +#define RES4335_PMU_BG_PU 1 +#define RES4335_PMU_SLEEP 2 +#define RES4335_RSVD_3 3 +#define RES4335_CBUCK_LPOM_PU 4 +#define RES4335_CBUCK_PFM_PU 5 +#define RES4335_RSVD_6 6 +#define RES4335_RSVD_7 7 +#define RES4335_LNLDO_PU 8 +#define RES4335_XTALLDO_PU 9 +#define RES4335_LDO3P3_PU 10 +#define RES4335_OTP_PU 11 +#define RES4335_XTAL_PU 12 +#define RES4335_SR_CLK_START 13 +#define RES4335_LQ_AVAIL 14 +#define RES4335_LQ_START 15 +#define RES4335_RSVD_16 16 +#define RES4335_WL_CORE_RDY 17 +#define RES4335_ILP_REQ 18 +#define RES4335_ALP_AVAIL 19 +#define RES4335_MINI_PMU 20 +#define RES4335_RADIO_PU 21 +#define RES4335_SR_CLK_STABLE 22 +#define RES4335_SR_SAVE_RESTORE 23 +#define RES4335_SR_PHY_PWRSW 24 +#define RES4335_SR_VDDM_PWRSW 25 +#define RES4335_SR_SUBCORE_PWRSW 26 +#define RES4335_SR_SLEEP 27 +#define RES4335_HT_START 28 +#define RES4335_HT_AVAIL 29 +#define RES4335_MACPHY_CLKAVAIL 30 + +/* 4335 Chip specific ChipStatus register bits */ +#define CST4335_SPROM_MASK 0x00000020 +#define CST4335_SFLASH_MASK 0x00000040 +#define CST4335_RES_INIT_MODE_SHIFT 7 +#define CST4335_RES_INIT_MODE_MASK 0x00000180 +#define CST4335_CHIPMODE_MASK 0xF +#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ +#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ +#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /**< HSIC || USBDA */ +#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ + +/* 4335 Chip specific ChipControl1 register bits */ +#define CCTRL1_4335_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ +#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ + +/* 4335 Chip specific ChipControl2 register bits */ +#define CCTRL2_4335_AOSBLOCK (1 << 30) +#define CCTRL2_4335_PMUWAKE (1 << 31) +#define PATCHTBL_SIZE (0x800) +#define CR4_4335_RAM_BASE (0x180000) +#define CR4_4345_LT_C0_RAM_BASE (0x1b0000) +#define CR4_4345_GE_C0_RAM_BASE (0x198000) +#define CR4_4349_RAM_BASE (0x180000) +#define CR4_4350_RAM_BASE (0x180000) +#define CR4_4360_RAM_BASE (0x0) +#define CR4_43602_RAM_BASE (0x180000) +#define CA7_4365_RAM_BASE (0x200000) + + +/* 4335 chip OTP present & OTP select bits. */ +#define SPROM4335_OTP_SELECT 0x00000010 +#define SPROM4335_OTP_PRESENT 0x00000020 + +/* 4335 GCI specific bits. */ +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24) +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25 +#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770 + +/* SFLASH clkdev specific bits. */ +#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000 +#define CC4335_SFLASH_CLKDIV_SHIFT 25 + +/* 4335 OTP bits for SFLASH. */ +#define CC4335_SROM_OTP_SFLASH 40 +#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1 +#define CC4335_SROM_OTP_SFLASH_TYPE 0x2 +#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C +#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2 + + +/* 4335 chip OTP present & OTP select bits. */ +#define SPROM4335_OTP_SELECT 0x00000010 +#define SPROM4335_OTP_PRESENT 0x00000020 + +/* 4335 GCI specific bits. */ +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24) +#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25 +#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770 + +/* SFLASH clkdev specific bits. */ +#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000 +#define CC4335_SFLASH_CLKDIV_SHIFT 25 + +/* 4335 OTP bits for SFLASH. */ +#define CC4335_SROM_OTP_SFLASH 40 +#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1 +#define CC4335_SROM_OTP_SFLASH_TYPE 0x2 +#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C +#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2 + +/* 4335 resources--END */ + +/* 4345 Chip specific ChipStatus register bits */ +#define CST4345_SPROM_MASK 0x00000020 +#define CST4345_SFLASH_MASK 0x00000040 +#define CST4345_RES_INIT_MODE_SHIFT 7 +#define CST4345_RES_INIT_MODE_MASK 0x00000180 +#define CST4345_CHIPMODE_MASK 0x4000F +#define CST4345_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ +#define CST4345_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ +#define CST4345_CHIPMODE_HSIC(cs) (((cs) & (1 << 2)) != 0) /* HSIC */ +#define CST4345_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ +#define CST4345_CHIPMODE_USB20D(cs) (((cs) & (1 << 18)) != 0) /* USBDA */ + +/* 4350 Chipcommon ChipStatus bits */ +#define CST4350_SDIO_MODE 0x00000001 +#define CST4350_HSIC20D_MODE 0x00000002 +#define CST4350_BP_ON_HSIC_CLK 0x00000004 +#define CST4350_PCIE_MODE 0x00000008 +#define CST4350_USB20D_MODE 0x00000010 +#define CST4350_USB30D_MODE 0x00000020 +#define CST4350_SPROM_PRESENT 0x00000040 +#define CST4350_RSRC_INIT_MODE_0 0x00000080 +#define CST4350_RSRC_INIT_MODE_1 0x00000100 +#define CST4350_SEL0_SDIO 0x00000200 +#define CST4350_SEL1_SDIO 0x00000400 +#define CST4350_SDIO_PAD_MODE 0x00000800 +#define CST4350_BBPLL_LOCKED 0x00001000 +#define CST4350_USBPLL_LOCKED 0x00002000 +#define CST4350_LINE_STATE 0x0000C000 +#define CST4350_SERDES_PIPE_PLLLOCK 0x00010000 +#define CST4350_BT_READY 0x00020000 +#define CST4350_SFLASH_PRESENT 0x00040000 +#define CST4350_CPULESS_ENABLE 0x00080000 +#define CST4350_STRAP_HOST_IFC_1 0x00100000 +#define CST4350_STRAP_HOST_IFC_2 0x00200000 +#define CST4350_STRAP_HOST_IFC_3 0x00400000 +#define CST4350_RAW_SPROM_PRESENT 0x00800000 +#define CST4350_APP_CLK_SWITCH_SEL_RDBACK 0x01000000 +#define CST4350_RAW_RSRC_INIT_MODE_0 0x02000000 +#define CST4350_SDIO_PAD_VDDIO 0x04000000 +#define CST4350_GSPI_MODE 0x08000000 +#define CST4350_PACKAGE_OPTION 0xF0000000 +#define CST4350_PACKAGE_SHIFT 28 + +/* package option for 4350 */ +#define CST4350_PACKAGE_WLCSP 0x0 +#define CST4350_PACKAGE_PCIE 0x1 +#define CST4350_PACKAGE_WLBGA 0x2 +#define CST4350_PACKAGE_DBG 0x3 +#define CST4350_PACKAGE_USB 0x4 +#define CST4350_PACKAGE_USB_HSIC 0x4 + +#define CST4350_PKG_MODE(cs) ((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT) + +#define CST4350_PKG_WLCSP(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP)) +#define CST4350_PKG_PCIE(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE)) +#define CST4350_PKG_WLBGA(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA)) +#define CST4350_PKG_USB(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB)) +#define CST4350_PKG_USB_HSIC(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC)) + +/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */ +#define CST4350_PKG_USB_40M(cs) (cs & CST4350_RAW_SPROM_PRESENT) + +#define CST4350_CHIPMODE_SDIOD(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD)) +#define CST4350_CHIPMODE_USB20D(cs) ((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D)) +#define CST4350_CHIPMODE_HSIC20D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D)) +#define CST4350_CHIPMODE_HSIC30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D)) +#define CST4350_CHIPMODE_USB30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D)) +#define CST4350_CHIPMODE_USB30D_WL(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL)) +#define CST4350_CHIPMODE_PCIE(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE)) + +/* strap_host_ifc strap value */ +#define CST4350_HOST_IFC_MASK 0x00700000 +#define CST4350_HOST_IFC_SHIFT 20 + +/* host_ifc raw mode */ +#define CST4350_IFC_MODE_SDIOD 0x0 +#define CST4350_IFC_MODE_HSIC20D 0x1 +#define CST4350_IFC_MODE_HSIC30D 0x2 +#define CST4350_IFC_MODE_PCIE 0x3 +#define CST4350_IFC_MODE_USB20D 0x4 +#define CST4350_IFC_MODE_USB30D 0x5 +#define CST4350_IFC_MODE_USB30D_WL 0x6 +#define CST4350_IFC_MODE_USB30D_BT 0x7 + +#define CST4350_IFC_MODE(cs) ((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT) + +/* 4350 PMU resources */ +#define RES4350_LPLDO_PU 0 +#define RES4350_PMU_BG_PU 1 +#define RES4350_PMU_SLEEP 2 +#define RES4350_RSVD_3 3 +#define RES4350_CBUCK_LPOM_PU 4 +#define RES4350_CBUCK_PFM_PU 5 +#define RES4350_COLD_START_WAIT 6 +#define RES4350_RSVD_7 7 +#define RES4350_LNLDO_PU 8 +#define RES4350_XTALLDO_PU 9 +#define RES4350_LDO3P3_PU 10 +#define RES4350_OTP_PU 11 +#define RES4350_XTAL_PU 12 +#define RES4350_SR_CLK_START 13 +#define RES4350_LQ_AVAIL 14 +#define RES4350_LQ_START 15 +#define RES4350_PERST_OVR 16 +#define RES4350_WL_CORE_RDY 17 +#define RES4350_ILP_REQ 18 +#define RES4350_ALP_AVAIL 19 +#define RES4350_MINI_PMU 20 +#define RES4350_RADIO_PU 21 +#define RES4350_SR_CLK_STABLE 22 +#define RES4350_SR_SAVE_RESTORE 23 +#define RES4350_SR_PHY_PWRSW 24 +#define RES4350_SR_VDDM_PWRSW 25 +#define RES4350_SR_SUBCORE_PWRSW 26 +#define RES4350_SR_SLEEP 27 +#define RES4350_HT_START 28 +#define RES4350_HT_AVAIL 29 +#define RES4350_MACPHY_CLKAVAIL 30 + +#define MUXENAB4350_UART_MASK (0x0000000f) +#define MUXENAB4350_UART_SHIFT 0 +#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ +#define MUXENAB4350_HOSTWAKE_SHIFT 4 + + +/* 4350 GCI function sel values */ +#define CC4350_FNSEL_HWDEF (0) +#define CC4350_FNSEL_SAMEASPIN (1) +#define CC4350_FNSEL_UART (2) +#define CC4350_FNSEL_SFLASH (3) +#define CC4350_FNSEL_SPROM (4) +#define CC4350_FNSEL_I2C (5) +#define CC4350_FNSEL_MISC0 (6) +#define CC4350_FNSEL_GCI (7) +#define CC4350_FNSEL_MISC1 (8) +#define CC4350_FNSEL_MISC2 (9) +#define CC4350_FNSEL_PWDOG (10) +#define CC4350_FNSEL_IND (12) +#define CC4350_FNSEL_PDN (13) +#define CC4350_FNSEL_PUP (14) +#define CC4350_FNSEL_TRISTATE (15) +#define CC4350C_FNSEL_UART (3) + + +/* 4350 GPIO */ +#define CC4350_PIN_GPIO_00 (0) +#define CC4350_PIN_GPIO_01 (1) +#define CC4350_PIN_GPIO_02 (2) +#define CC4350_PIN_GPIO_03 (3) +#define CC4350_PIN_GPIO_04 (4) +#define CC4350_PIN_GPIO_05 (5) +#define CC4350_PIN_GPIO_06 (6) +#define CC4350_PIN_GPIO_07 (7) +#define CC4350_PIN_GPIO_08 (8) +#define CC4350_PIN_GPIO_09 (9) +#define CC4350_PIN_GPIO_10 (10) +#define CC4350_PIN_GPIO_11 (11) +#define CC4350_PIN_GPIO_12 (12) +#define CC4350_PIN_GPIO_13 (13) +#define CC4350_PIN_GPIO_14 (14) +#define CC4350_PIN_GPIO_15 (15) + +#define CC4350_RSVD_16_SHIFT 16 + +#define CC2_4350_PHY_PWRSW_UPTIME_MASK (0xf << 0) +#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT (0) +#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK (0xf << 4) +#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT (4) +#define CC2_4350_VDDM_PWRSW_UPTIME_MASK (0xf << 8) +#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT (8) +#define CC2_4350_SBC_PWRSW_DNDELAY_MASK (0x3 << 12) +#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT (12) +#define CC2_4350_PHY_PWRSW_DNDELAY_MASK (0x3 << 14) +#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT (14) +#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK (0x3 << 16) +#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT (16) +#define CC2_4350_VDDM_PWRSW_EN_MASK (1 << 20) +#define CC2_4350_VDDM_PWRSW_EN_SHIFT (20) +#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK (1 << 21) +#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT (21) +#define CC2_4350_SDIO_AOS_WAKEUP_MASK (1 << 24) +#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT (24) + +/* Applies to 4335/4350/4345 */ +#define CC3_SR_CLK_SR_MEM_MASK (1 << 0) +#define CC3_SR_CLK_SR_MEM_SHIFT (0) +#define CC3_SR_BIT1_TBD_MASK (1 << 1) +#define CC3_SR_BIT1_TBD_SHIFT (1) +#define CC3_SR_ENGINE_ENABLE_MASK (1 << 2) +#define CC3_SR_ENGINE_ENABLE_SHIFT (2) +#define CC3_SR_BIT3_TBD_MASK (1 << 3) +#define CC3_SR_BIT3_TBD_SHIFT (3) +#define CC3_SR_MINDIV_FAST_CLK_MASK (0xF << 4) +#define CC3_SR_MINDIV_FAST_CLK_SHIFT (4) +#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_MASK (1 << 8) +#define CC3_SR_R23_SR2_RISE_EDGE_TRIG_SHIFT (8) +#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_MASK (1 << 9) +#define CC3_SR_R23_SR2_FALL_EDGE_TRIG_SHIFT (9) +#define CC3_SR_R23_SR_RISE_EDGE_TRIG_MASK (1 << 10) +#define CC3_SR_R23_SR_RISE_EDGE_TRIG_SHIFT (10) +#define CC3_SR_R23_SR_FALL_EDGE_TRIG_MASK (1 << 11) +#define CC3_SR_R23_SR_FALL_EDGE_TRIG_SHIFT (11) +#define CC3_SR_NUM_CLK_HIGH_MASK (0x7 << 12) +#define CC3_SR_NUM_CLK_HIGH_SHIFT (12) +#define CC3_SR_BIT15_TBD_MASK (1 << 15) +#define CC3_SR_BIT15_TBD_SHIFT (15) +#define CC3_SR_PHY_FUNC_PIC_MASK (1 << 16) +#define CC3_SR_PHY_FUNC_PIC_SHIFT (16) +#define CC3_SR_BIT17_19_TBD_MASK (0x7 << 17) +#define CC3_SR_BIT17_19_TBD_SHIFT (17) +#define CC3_SR_CHIP_TRIGGER_1_MASK (1 << 20) +#define CC3_SR_CHIP_TRIGGER_1_SHIFT (20) +#define CC3_SR_CHIP_TRIGGER_2_MASK (1 << 21) +#define CC3_SR_CHIP_TRIGGER_2_SHIFT (21) +#define CC3_SR_CHIP_TRIGGER_3_MASK (1 << 22) +#define CC3_SR_CHIP_TRIGGER_3_SHIFT (22) +#define CC3_SR_CHIP_TRIGGER_4_MASK (1 << 23) +#define CC3_SR_CHIP_TRIGGER_4_SHIFT (23) +#define CC3_SR_ALLOW_SBC_FUNC_PIC_MASK (1 << 24) +#define CC3_SR_ALLOW_SBC_FUNC_PIC_SHIFT (24) +#define CC3_SR_BIT25_26_TBD_MASK (0x3 << 25) +#define CC3_SR_BIT25_26_TBD_SHIFT (25) +#define CC3_SR_ALLOW_SBC_STBY_MASK (1 << 27) +#define CC3_SR_ALLOW_SBC_STBY_SHIFT (27) +#define CC3_SR_GPIO_MUX_MASK (0xF << 28) +#define CC3_SR_GPIO_MUX_SHIFT (28) + +/* Applies to 4335/4350/4345 */ +#define CC4_SR_INIT_ADDR_MASK (0x3FF0000) +#define CC4_4350_SR_ASM_ADDR (0x30) +#define CC4_4350_C0_SR_ASM_ADDR (0x0) +#define CC4_4335_SR_ASM_ADDR (0x48) +#define CC4_4345_SR_ASM_ADDR (0x48) +#define CC4_SR_INIT_ADDR_SHIFT (16) + +#define CC4_4350_EN_SR_CLK_ALP_MASK (1 << 30) +#define CC4_4350_EN_SR_CLK_ALP_SHIFT (30) +#define CC4_4350_EN_SR_CLK_HT_MASK (1 << 31) +#define CC4_4350_EN_SR_CLK_HT_SHIFT (31) + +#define VREG4_4350_MEMLPDO_PU_MASK (1 << 31) +#define VREG4_4350_MEMLPDO_PU_SHIFT 31 + +#define VREG6_4350_SR_EXT_CLKDIR_MASK (1 << 20) +#define VREG6_4350_SR_EXT_CLKDIR_SHIFT 20 +#define VREG6_4350_SR_EXT_CLKDIV_MASK (0x3 << 21) +#define VREG6_4350_SR_EXT_CLKDIV_SHIFT 21 +#define VREG6_4350_SR_EXT_CLKEN_MASK (1 << 23) +#define VREG6_4350_SR_EXT_CLKEN_SHIFT 23 + +#define CC5_4350_PMU_EN_ASSERT_MASK (1 << 13) +#define CC5_4350_PMU_EN_ASSERT_SHIFT (13) + +#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK (1 << 4) +#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT (4) +#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6) +#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT (6) +#define CC6_4350_PMU_EN_EXT_PERST_MASK (1 << 17) +#define CC6_4350_PMU_EN_EXT_PERST_SHIFT (17) +#define CC6_4350_PMU_EN_WAKEUP_MASK (1 << 18) +#define CC6_4350_PMU_EN_WAKEUP_SHIFT (18) + +#define CC7_4350_PMU_EN_ASSERT_L2_MASK (1 << 26) +#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT (26) +#define CC7_4350_PMU_EN_MDIO_MASK (1 << 27) +#define CC7_4350_PMU_EN_MDIO_SHIFT (27) + +#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK (1 << 13) +#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF (13) +#define CC6_4345_PMU_EN_L2_DEASSERT_MASK (1 << 14) +#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF (14) +#define CC6_4345_PMU_EN_ASSERT_L2_MASK (1 << 15) +#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT (15) +#define CC6_4345_PMU_EN_MDIO_MASK (1 << 24) +#define CC6_4345_PMU_EN_MDIO_SHIFT (24) + +/* GCI chipcontrol register indices */ +#define CC_GCI_CHIPCTRL_00 (0) +#define CC_GCI_CHIPCTRL_01 (1) +#define CC_GCI_CHIPCTRL_02 (2) +#define CC_GCI_CHIPCTRL_03 (3) +#define CC_GCI_CHIPCTRL_04 (4) +#define CC_GCI_CHIPCTRL_05 (5) +#define CC_GCI_CHIPCTRL_06 (6) +#define CC_GCI_CHIPCTRL_07 (7) +#define CC_GCI_CHIPCTRL_08 (8) +#define CC_GCI_CHIPCTRL_11 (11) +#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12) + +#define CC_GCI_06_JTAG_SEL_SHIFT 4 +#define CC_GCI_06_JTAG_SEL_MASK (1 << 4) + +#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8) + +/* 4345 PMU resources */ +#define RES4345_LPLDO_PU 0 +#define RES4345_PMU_BG_PU 1 +#define RES4345_PMU_SLEEP 2 +#define RES4345_HSICLDO_PU 3 +#define RES4345_CBUCK_LPOM_PU 4 +#define RES4345_CBUCK_PFM_PU 5 +#define RES4345_COLD_START_WAIT 6 +#define RES4345_RSVD_7 7 +#define RES4345_LNLDO_PU 8 +#define RES4345_XTALLDO_PU 9 +#define RES4345_LDO3P3_PU 10 +#define RES4345_OTP_PU 11 +#define RES4345_XTAL_PU 12 +#define RES4345_SR_CLK_START 13 +#define RES4345_LQ_AVAIL 14 +#define RES4345_LQ_START 15 +#define RES4345_PERST_OVR 16 +#define RES4345_WL_CORE_RDY 17 +#define RES4345_ILP_REQ 18 +#define RES4345_ALP_AVAIL 19 +#define RES4345_MINI_PMU 20 +#define RES4345_RADIO_PU 21 +#define RES4345_SR_CLK_STABLE 22 +#define RES4345_SR_SAVE_RESTORE 23 +#define RES4345_SR_PHY_PWRSW 24 +#define RES4345_SR_VDDM_PWRSW 25 +#define RES4345_SR_SUBCORE_PWRSW 26 +#define RES4345_SR_SLEEP 27 +#define RES4345_HT_START 28 +#define RES4345_HT_AVAIL 29 +#define RES4345_MACPHY_CLK_AVAIL 30 + +/* 4335 pins +* note: only the values set as default/used are added here. +*/ +#define CC4335_PIN_GPIO_00 (0) +#define CC4335_PIN_GPIO_01 (1) +#define CC4335_PIN_GPIO_02 (2) +#define CC4335_PIN_GPIO_03 (3) +#define CC4335_PIN_GPIO_04 (4) +#define CC4335_PIN_GPIO_05 (5) +#define CC4335_PIN_GPIO_06 (6) +#define CC4335_PIN_GPIO_07 (7) +#define CC4335_PIN_GPIO_08 (8) +#define CC4335_PIN_GPIO_09 (9) +#define CC4335_PIN_GPIO_10 (10) +#define CC4335_PIN_GPIO_11 (11) +#define CC4335_PIN_GPIO_12 (12) +#define CC4335_PIN_GPIO_13 (13) +#define CC4335_PIN_GPIO_14 (14) +#define CC4335_PIN_GPIO_15 (15) +#define CC4335_PIN_SDIO_CLK (16) +#define CC4335_PIN_SDIO_CMD (17) +#define CC4335_PIN_SDIO_DATA0 (18) +#define CC4335_PIN_SDIO_DATA1 (19) +#define CC4335_PIN_SDIO_DATA2 (20) +#define CC4335_PIN_SDIO_DATA3 (21) +#define CC4335_PIN_RF_SW_CTRL_6 (22) +#define CC4335_PIN_RF_SW_CTRL_7 (23) +#define CC4335_PIN_RF_SW_CTRL_8 (24) +#define CC4335_PIN_RF_SW_CTRL_9 (25) +/* Last GPIO Pad */ +#define CC4335_PIN_GPIO_LAST (31) + +/* 4335 GCI function sel values +*/ +#define CC4335_FNSEL_HWDEF (0) +#define CC4335_FNSEL_SAMEASPIN (1) +#define CC4335_FNSEL_GPIO0 (2) +#define CC4335_FNSEL_GPIO1 (3) +#define CC4335_FNSEL_GCI0 (4) +#define CC4335_FNSEL_GCI1 (5) +#define CC4335_FNSEL_UART (6) +#define CC4335_FNSEL_SFLASH (7) +#define CC4335_FNSEL_SPROM (8) +#define CC4335_FNSEL_MISC0 (9) +#define CC4335_FNSEL_MISC1 (10) +#define CC4335_FNSEL_MISC2 (11) +#define CC4335_FNSEL_IND (12) +#define CC4335_FNSEL_PDN (13) +#define CC4335_FNSEL_PUP (14) +#define CC4335_FNSEL_TRI (15) + +/* GCI Core Control Reg */ +#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */ +#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */ +#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */ +#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */ +#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */ +#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */ +#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */ + +/* 4345 pins +* note: only the values set as default/used are added here. +*/ +#define CC4345_PIN_GPIO_00 (0) +#define CC4345_PIN_GPIO_01 (1) +#define CC4345_PIN_GPIO_02 (2) +#define CC4345_PIN_GPIO_03 (3) +#define CC4345_PIN_GPIO_04 (4) +#define CC4345_PIN_GPIO_05 (5) +#define CC4345_PIN_GPIO_06 (6) +#define CC4345_PIN_GPIO_07 (7) +#define CC4345_PIN_GPIO_08 (8) +#define CC4345_PIN_GPIO_09 (9) +#define CC4345_PIN_GPIO_10 (10) +#define CC4345_PIN_GPIO_11 (11) +#define CC4345_PIN_GPIO_12 (12) +#define CC4345_PIN_GPIO_13 (13) +#define CC4345_PIN_GPIO_14 (14) +#define CC4345_PIN_GPIO_15 (15) +#define CC4345_PIN_GPIO_16 (16) +#define CC4345_PIN_SDIO_CLK (17) +#define CC4345_PIN_SDIO_CMD (18) +#define CC4345_PIN_SDIO_DATA0 (19) +#define CC4345_PIN_SDIO_DATA1 (20) +#define CC4345_PIN_SDIO_DATA2 (21) +#define CC4345_PIN_SDIO_DATA3 (22) +#define CC4345_PIN_RF_SW_CTRL_0 (23) +#define CC4345_PIN_RF_SW_CTRL_1 (24) +#define CC4345_PIN_RF_SW_CTRL_2 (25) +#define CC4345_PIN_RF_SW_CTRL_3 (26) +#define CC4345_PIN_RF_SW_CTRL_4 (27) +#define CC4345_PIN_RF_SW_CTRL_5 (28) +#define CC4345_PIN_RF_SW_CTRL_6 (29) +#define CC4345_PIN_RF_SW_CTRL_7 (30) +#define CC4345_PIN_RF_SW_CTRL_8 (31) +#define CC4345_PIN_RF_SW_CTRL_9 (32) + +/* 4345 GCI function sel values +*/ +#define CC4345_FNSEL_HWDEF (0) +#define CC4345_FNSEL_SAMEASPIN (1) +#define CC4345_FNSEL_GPIO0 (2) +#define CC4345_FNSEL_GPIO1 (3) +#define CC4345_FNSEL_GCI0 (4) +#define CC4345_FNSEL_GCI1 (5) +#define CC4345_FNSEL_UART (6) +#define CC4345_FNSEL_SFLASH (7) +#define CC4345_FNSEL_SPROM (8) +#define CC4345_FNSEL_MISC0 (9) +#define CC4345_FNSEL_MISC1 (10) +#define CC4345_FNSEL_MISC2 (11) +#define CC4345_FNSEL_IND (12) +#define CC4345_FNSEL_PDN (13) +#define CC4345_FNSEL_PUP (14) +#define CC4345_FNSEL_TRI (15) + +#define MUXENAB4345_UART_MASK (0x0000000f) +#define MUXENAB4345_UART_SHIFT 0 +#define MUXENAB4345_HOSTWAKE_MASK (0x000000f0) +#define MUXENAB4345_HOSTWAKE_SHIFT 4 + +/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */ +#define CC4349_GRP_GCI_AVS_CTRL_MASK (0xffe00000) +#define CC4349_GRP_GCI_AVS_CTRL_SHIFT (21) +#define CC4349_GRP_GCI_AVS_CTRL_ENAB (1 << 5) + +/* 4345 GCI AVS function sel values */ +#define CC4345_GCI_AVS_CTRL_MASK (0xfc) +#define CC4345_GCI_AVS_CTRL_SHIFT (2) +#define CC4345_GCI_AVS_CTRL_ENAB (1 << 5) + +/* GCI GPIO for function sel GCI-0/GCI-1 */ +#define CC_GCI_GPIO_0 (0) +#define CC_GCI_GPIO_1 (1) +#define CC_GCI_GPIO_2 (2) +#define CC_GCI_GPIO_3 (3) +#define CC_GCI_GPIO_4 (4) +#define CC_GCI_GPIO_5 (5) +#define CC_GCI_GPIO_6 (6) +#define CC_GCI_GPIO_7 (7) +#define CC_GCI_GPIO_8 (8) +#define CC_GCI_GPIO_9 (9) +#define CC_GCI_GPIO_10 (10) +#define CC_GCI_GPIO_11 (11) +#define CC_GCI_GPIO_12 (12) +#define CC_GCI_GPIO_13 (13) +#define CC_GCI_GPIO_14 (14) +#define CC_GCI_GPIO_15 (15) + + +/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */ +#define CC_GCI_GPIO_INVALID 0xFF + +/* find the 4 bit mask given the bit position */ +#define GCIMASK(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL(val, pos) ((val >> pos) & 0xF) + + +/* find the 8 bit mask given the bit position */ +#define GCIMASK_8B(pos) (((uint32)0xFF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_8B(val, pos) ((((uint32)val) << pos) & GCIMASK_8B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_8B(val, pos) ((val >> pos) & 0xFF) + +/* find the 4 bit mask given the bit position */ +#define GCIMASK_4B(pos) (((uint32)0xF) << pos) +/* get the value which can be used to directly OR with chipcontrol reg */ +#define GCIPOSVAL_4B(val, pos) ((((uint32)val) << pos) & GCIMASK_4B(pos)) +/* Extract nibble from a given position */ +#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF) + + +/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */ +#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */ + +/* 4335 GCI IntMask Register bits. */ +#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ + +/* 4335 GCI WakeMask Register bits. */ +#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */ +#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */ +#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */ +#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */ +#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */ +#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */ +#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */ +#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */ +#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */ +#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */ +#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */ +#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */ + +#define GCI_WAKE_ON_GCI_GPIO1 1 +#define GCI_WAKE_ON_GCI_GPIO2 2 +#define GCI_WAKE_ON_GCI_GPIO3 3 +#define GCI_WAKE_ON_GCI_GPIO4 4 +#define GCI_WAKE_ON_GCI_GPIO5 5 +#define GCI_WAKE_ON_GCI_GPIO6 6 +#define GCI_WAKE_ON_GCI_GPIO7 7 +#define GCI_WAKE_ON_GCI_GPIO8 8 +#define GCI_WAKE_ON_GCI_SECI_IN 9 + +/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic +* for now only UART for bootloader. +*/ +#define MUXENAB4335_UART_MASK (0x0000000f) + +#define MUXENAB4335_UART_SHIFT 0 +#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */ +#define MUXENAB4335_HOSTWAKE_SHIFT 4 +#define MUXENAB4335_GETIX(val, name) \ + ((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1) + +/* +* Maximum delay for the PMU state transition in us. +* This is an upper bound intended for spinwaits etc. +*/ +#define PMU_MAX_TRANSITION_DLY 15000 + +/* PMU resource up transition time in ILP cycles */ +#define PMURES_UP_TRANSITION 2 + + +/* SECI configuration */ +#define SECI_MODE_UART 0x0 +#define SECI_MODE_SECI 0x1 +#define SECI_MODE_LEGACY_3WIRE_BT 0x2 +#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3 +#define SECI_MODE_HALF_SECI 0x4 + +#define SECI_RESET (1 << 0) +#define SECI_RESET_BAR_UART (1 << 1) +#define SECI_ENAB_SECI_ECI (1 << 2) +#define SECI_ENAB_SECIOUT_DIS (1 << 3) +#define SECI_MODE_MASK 0x7 +#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */ +#define SECI_UPD_SECI (1 << 7) + +#define SECI_SLIP_ESC_CHAR 0xDB +#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR +#define SECI_SIGNOFF_1 0 +#define SECI_REFRESH_REQ 0xDA + +/* seci clk_ctl_st bits */ +#define CLKCTL_STS_SECI_CLK_REQ (1 << 8) +#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) + +#define SECI_UART_MSR_CTS_STATE (1 << 0) +#define SECI_UART_MSR_RTS_STATE (1 << 1) +#define SECI_UART_SECI_IN_STATE (1 << 2) +#define SECI_UART_SECI_IN2_STATE (1 << 3) + +/* GCI RX FIFO Control Register */ +#define GCI_RXF_LVL_MASK (0xFF << 0) +#define GCI_RXF_TIMEOUT_MASK (0xFF << 8) + +/* GCI UART Registers' Bit definitions */ +/* Seci Fifo Level Register */ +#define SECI_TXF_LVL_MASK (0x3F << 8) +#define TXF_AE_LVL_DEFAULT 0x4 +#define SECI_RXF_LVL_FC_MASK (0x3F << 16) + +/* SeciUARTFCR Bit definitions */ +#define SECI_UART_FCR_RFR (1 << 0) +#define SECI_UART_FCR_TFR (1 << 1) +#define SECI_UART_FCR_SR (1 << 2) +#define SECI_UART_FCR_THP (1 << 3) +#define SECI_UART_FCR_AB (1 << 4) +#define SECI_UART_FCR_ATOE (1 << 5) +#define SECI_UART_FCR_ARTSOE (1 << 6) +#define SECI_UART_FCR_ABV (1 << 7) +#define SECI_UART_FCR_ALM (1 << 8) + +/* SECI UART LCR register bits */ +#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */ +#define SECI_UART_LCR_PARITY_EN (1 << 1) +#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */ +#define SECI_UART_LCR_RX_EN (1 << 3) +#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */ +#define SECI_UART_LCR_TXO_EN (1 << 5) +#define SECI_UART_LCR_RTSO_EN (1 << 6) +#define SECI_UART_LCR_SLIPMODE_EN (1 << 7) +#define SECI_UART_LCR_RXCRC_CHK (1 << 8) +#define SECI_UART_LCR_TXCRC_INV (1 << 9) +#define SECI_UART_LCR_TXCRC_LSBF (1 << 10) +#define SECI_UART_LCR_TXCRC_EN (1 << 11) +#define SECI_UART_LCR_RXSYNC_EN (1 << 12) + +#define SECI_UART_MCR_TX_EN (1 << 0) +#define SECI_UART_MCR_PRTS (1 << 1) +#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2) +#define SECI_UART_MCR_HIGHRATE_EN (1 << 3) +#define SECI_UART_MCR_LOOPBK_EN (1 << 4) +#define SECI_UART_MCR_AUTO_RTS (1 << 5) +#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6) +#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7) +#define SECI_UART_MCR_XONOFF_RPT (1 << 9) + +/* SeciUARTLSR Bit Mask */ +#define SECI_UART_LSR_RXOVR_MASK (1 << 0) +#define SECI_UART_LSR_RFF_MASK (1 << 1) +#define SECI_UART_LSR_TFNE_MASK (1 << 2) +#define SECI_UART_LSR_TI_MASK (1 << 3) +#define SECI_UART_LSR_TPR_MASK (1 << 4) +#define SECI_UART_LSR_TXHALT_MASK (1 << 5) + +/* SeciUARTMSR Bit Mask */ +#define SECI_UART_MSR_CTSS_MASK (1 << 0) +#define SECI_UART_MSR_RTSS_MASK (1 << 1) +#define SECI_UART_MSR_SIS_MASK (1 << 2) +#define SECI_UART_MSR_SIS2_MASK (1 << 3) + +/* SeciUARTData Bits */ +#define SECI_UART_DATA_RF_NOT_EMPTY_BIT (1 << 12) +#define SECI_UART_DATA_RF_FULL_BIT (1 << 13) +#define SECI_UART_DATA_RF_OVRFLOW_BIT (1 << 14) +#define SECI_UART_DATA_FIFO_PTR_MASK 0xFF +#define SECI_UART_DATA_RF_RD_PTR_SHIFT 16 +#define SECI_UART_DATA_RF_WR_PTR_SHIFT 24 + +/* LTECX: ltecxmux */ +#define LTECX_EXTRACT_MUX(val, idx) (getbit4(&(val), (idx))) + +/* LTECX: ltecxmux MODE */ +#define LTECX_MUX_MODE_IDX 0 +#define LTECX_MUX_MODE_WCI2 0x0 +#define LTECX_MUX_MODE_GPIO 0x1 + + +/* LTECX GPIO Information Index */ +#define LTECX_NVRAM_FSYNC_IDX 0 +#define LTECX_NVRAM_LTERX_IDX 1 +#define LTECX_NVRAM_LTETX_IDX 2 +#define LTECX_NVRAM_WLPRIO_IDX 3 + +/* LTECX WCI2 Information Index */ +#define LTECX_NVRAM_WCI2IN_IDX 0 +#define LTECX_NVRAM_WCI2OUT_IDX 1 + +/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */ +#define LTECX_EXTRACT_PADNUM(val, idx) (getbit8(&(val), (idx))) +#define LTECX_EXTRACT_FNSEL(val, idx) (getbit4(&(val), (idx))) +#define LTECX_EXTRACT_GCIGPIO(val, idx) (getbit4(&(val), (idx))) + +/* WLAN channel numbers - used from wifi.h */ + +/* WLAN BW */ +#define ECI_BW_20 0x0 +#define ECI_BW_25 0x1 +#define ECI_BW_30 0x2 +#define ECI_BW_35 0x3 +#define ECI_BW_40 0x4 +#define ECI_BW_45 0x5 +#define ECI_BW_50 0x6 +#define ECI_BW_ALL 0x7 + +/* WLAN - number of antenna */ +#define WLAN_NUM_ANT1 TXANT_0 +#define WLAN_NUM_ANT2 TXANT_1 + +/* otpctrl1 0xF4 */ +#define OTPC_FORCE_PWR_OFF 0x02000000 +/* chipcommon s/r registers introduced with cc rev >= 48 */ +#define CC_SR_CTL0_ENABLE_MASK 0x1 +#define CC_SR_CTL0_ENABLE_SHIFT 0 +#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */ +#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to sr_engine */ +#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk in sr_engine */ +#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 /* Allow Subcore mem StandBy? */ +#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18 +#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19 +#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power domains */ +#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25 +#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30 + +#define CC_SR_CTL1_SR_INIT_MASK 0x3FF +#define CC_SR_CTL1_SR_INIT_SHIFT 0 + +#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */ +#define ECI_INLO_PKTDUR_SHIFT 4 + +/* gci chip control bits */ +#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT 0 +#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT 1 +#define GCI_GPIO_CHIPCTRL_INVERT_BIT 2 +#define GCI_GPIO_CHIPCTRL_PULLUP_BIT 3 +#define GCI_GPIO_CHIPCTRL_PULLDN_BIT 4 +#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT 5 +#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT 6 +#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT 7 + +/* gci GPIO input status bits */ +#define GCI_GPIO_STS_VALUE_BIT 0 +#define GCI_GPIO_STS_POS_EDGE_BIT 1 +#define GCI_GPIO_STS_NEG_EDGE_BIT 2 +#define GCI_GPIO_STS_FAST_EDGE_BIT 3 +#define GCI_GPIO_STS_CLEAR 0xF + +#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT) + +#endif /* _SBCHIPC_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbconfig.h b/drivers/net/wireless/bcmdhd/include/sbconfig.h new file mode 100644 index 000000000000..53e26ae4e320 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbconfig.h @@ -0,0 +1,285 @@ +/* + * Broadcom SiliconBackplane hardware register definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbconfig.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _SBCONFIG_H +#define _SBCONFIG_H + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +/* enumeration in SB is based on the premise that cores are contiguos in the + * enumeration space. + */ +#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */ +#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) +#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */ + +/* + * Sonics Configuration Space Registers. + */ +#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */ +#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */ + +#define SBIPSFLAG 0x08 +#define SBTPSFLAG 0x18 +#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */ +#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */ +#define SBADMATCH3 0x60 +#define SBADMATCH2 0x68 +#define SBADMATCH1 0x70 +#define SBIMSTATE 0x90 +#define SBINTVEC 0x94 +#define SBTMSTATELOW 0x98 +#define SBTMSTATEHIGH 0x9c +#define SBBWA0 0xa0 +#define SBIMCONFIGLOW 0xa8 +#define SBIMCONFIGHIGH 0xac +#define SBADMATCH0 0xb0 +#define SBTMCONFIGLOW 0xb8 +#define SBTMCONFIGHIGH 0xbc +#define SBBCONFIG 0xc0 +#define SBBSTATE 0xc8 +#define SBACTCNFG 0xd8 +#define SBFLAGST 0xe8 +#define SBIDLOW 0xf8 +#define SBIDHIGH 0xfc + +/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have + * a few registers *below* that line. I think it would be very confusing to try + * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, + */ + +#define SBIMERRLOGA 0xea8 +#define SBIMERRLOG 0xeb0 +#define SBTMPORTCONNID0 0xed8 +#define SBTMPORTLOCK0 0xef8 + +#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) + +typedef volatile struct _sbconfig { + uint32 PAD[2]; + uint32 sbipsflag; /**< initiator port ocp slave flag */ + uint32 PAD[3]; + uint32 sbtpsflag; /**< target port ocp slave flag */ + uint32 PAD[11]; + uint32 sbtmerrloga; /**< (sonics >= 2.3) */ + uint32 PAD; + uint32 sbtmerrlog; /**< (sonics >= 2.3) */ + uint32 PAD[3]; + uint32 sbadmatch3; /**< address match3 */ + uint32 PAD; + uint32 sbadmatch2; /**< address match2 */ + uint32 PAD; + uint32 sbadmatch1; /**< address match1 */ + uint32 PAD[7]; + uint32 sbimstate; /**< initiator agent state */ + uint32 sbintvec; /**< interrupt mask */ + uint32 sbtmstatelow; /**< target state */ + uint32 sbtmstatehigh; /**< target state */ + uint32 sbbwa0; /**< bandwidth allocation table0 */ + uint32 PAD; + uint32 sbimconfiglow; /**< initiator configuration */ + uint32 sbimconfighigh; /**< initiator configuration */ + uint32 sbadmatch0; /**< address match0 */ + uint32 PAD; + uint32 sbtmconfiglow; /**< target configuration */ + uint32 sbtmconfighigh; /**< target configuration */ + uint32 sbbconfig; /**< broadcast configuration */ + uint32 PAD; + uint32 sbbstate; /**< broadcast state */ + uint32 PAD[3]; + uint32 sbactcnfg; /**< activate configuration */ + uint32 PAD[3]; + uint32 sbflagst; /**< current sbflags */ + uint32 PAD[3]; + uint32 sbidlow; /**< identification */ + uint32 sbidhigh; /**< identification */ +} sbconfig_t; + +#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */ + +/* sbipsflag */ +#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */ +#define SBIPS_INT1_SHIFT 0 +#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */ +#define SBIPS_INT2_SHIFT 8 +#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */ +#define SBIPS_INT3_SHIFT 16 +#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */ +#define SBIPS_INT4_SHIFT 24 + +/* sbtpsflag */ +#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */ +#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */ + +/* sbtmerrlog */ +#define SBTMEL_CM 0x00000007 /**< command */ +#define SBTMEL_CI 0x0000ff00 /**< connection id */ +#define SBTMEL_EC 0x0f000000 /**< error code */ +#define SBTMEL_ME 0x80000000 /**< multiple error */ + +/* sbimstate */ +#define SBIM_PC 0xf /**< pipecount */ +#define SBIM_AP_MASK 0x30 /**< arbitration policy */ +#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */ +#define SBIM_AP_TS 0x10 /**< use timesliaces only */ +#define SBIM_AP_TK 0x20 /**< use token only */ +#define SBIM_AP_RSV 0x30 /**< reserved */ +#define SBIM_IBE 0x20000 /**< inbanderror */ +#define SBIM_TO 0x40000 /**< timeout */ +#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */ +#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */ + +/* sbtmstatelow */ +#define SBTML_RESET 0x0001 /**< reset */ +#define SBTML_REJ_MASK 0x0006 /**< reject field */ +#define SBTML_REJ 0x0002 /**< reject */ +#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */ + +#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */ + +/* sbtmstatehigh */ +#define SBTMH_SERR 0x0001 /**< serror */ +#define SBTMH_INT 0x0002 /**< interrupt */ +#define SBTMH_BUSY 0x0004 /**< busy */ +#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */ + +#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */ + +/* sbbwa0 */ +#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */ +#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */ +#define SBBWA_TAB1_SHIFT 16 + +/* sbimconfiglow */ +#define SBIMCL_STO_MASK 0x7 /**< service timeout */ +#define SBIMCL_RTO_MASK 0x70 /**< request timeout */ +#define SBIMCL_RTO_SHIFT 4 +#define SBIMCL_CID_MASK 0xff0000 /**< connection id */ +#define SBIMCL_CID_SHIFT 16 + +/* sbimconfighigh */ +#define SBIMCH_IEM_MASK 0xc /**< inband error mode */ +#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */ +#define SBIMCH_TEM_SHIFT 4 +#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */ +#define SBIMCH_BEM_SHIFT 6 + +/* sbadmatch0 */ +#define SBAM_TYPE_MASK 0x3 /**< address type */ +#define SBAM_AD64 0x4 /**< reserved */ +#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */ +#define SBAM_ADINT0_SHIFT 3 +#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */ +#define SBAM_ADINT1_SHIFT 3 +#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */ +#define SBAM_ADINT2_SHIFT 3 +#define SBAM_ADEN 0x400 /**< enable */ +#define SBAM_ADNEG 0x800 /**< negative decode */ +#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */ +#define SBAM_BASE0_SHIFT 8 +#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */ +#define SBAM_BASE1_SHIFT 12 +#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */ +#define SBAM_BASE2_SHIFT 16 + +/* sbtmconfiglow */ +#define SBTMCL_CD_MASK 0xff /**< clock divide */ +#define SBTMCL_CO_MASK 0xf800 /**< clock offset */ +#define SBTMCL_CO_SHIFT 11 +#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */ +#define SBTMCL_IF_SHIFT 18 +#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */ +#define SBTMCL_IM_SHIFT 24 + +/* sbtmconfighigh */ +#define SBTMCH_BM_MASK 0x3 /**< busy mode */ +#define SBTMCH_RM_MASK 0x3 /**< retry mode */ +#define SBTMCH_RM_SHIFT 2 +#define SBTMCH_SM_MASK 0x30 /**< stop mode */ +#define SBTMCH_SM_SHIFT 4 +#define SBTMCH_EM_MASK 0x300 /**< sb error mode */ +#define SBTMCH_EM_SHIFT 8 +#define SBTMCH_IM_MASK 0xc00 /**< int mode */ +#define SBTMCH_IM_SHIFT 10 + +/* sbbconfig */ +#define SBBC_LAT_MASK 0x3 /**< sb latency */ +#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */ +#define SBBC_MAX0_SHIFT 16 +#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */ +#define SBBC_MAX1_SHIFT 20 + +/* sbbstate */ +#define SBBS_SRD 0x1 /**< st reg disable */ +#define SBBS_HRD 0x2 /**< hold reg disable */ + +/* sbidlow */ +#define SBIDL_CS_MASK 0x3 /**< config space */ +#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */ +#define SBIDL_AR_SHIFT 3 +#define SBIDL_SYNCH 0x40 /**< sync */ +#define SBIDL_INIT 0x80 /**< initiator */ +#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */ +#define SBIDL_MINLAT_SHIFT 8 +#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */ +#define SBIDL_MAXLAT_SHIFT 12 +#define SBIDL_FIRST 0x10000 /**< this initiator is first */ +#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */ +#define SBIDL_CW_SHIFT 18 +#define SBIDL_TP_MASK 0xf00000 /**< target ports */ +#define SBIDL_TP_SHIFT 20 +#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */ +#define SBIDL_IP_SHIFT 24 +#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */ +#define SBIDL_RV_SHIFT 28 +#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */ +#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */ + +/* sbidhigh */ +#define SBIDH_RC_MASK 0x000f /**< revision code */ +#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */ +#define SBIDH_RCE_SHIFT 8 +#define SBCOREREV(sbidh) \ + ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) +#define SBIDH_CC_MASK 0x8ff0 /**< core code */ +#define SBIDH_CC_SHIFT 4 +#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */ +#define SBIDH_VC_SHIFT 16 + +#define SB_COMMIT 0xfd8 /**< update buffered registers value */ + +/* vendor codes */ +#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */ + +#endif /* _SBCONFIG_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbhnddma.h b/drivers/net/wireless/bcmdhd/include/sbhnddma.h new file mode 100644 index 000000000000..5692ea954b35 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbhnddma.h @@ -0,0 +1,420 @@ +/* + * Generic Broadcom Home Networking Division (HND) DMA engine HW interface + * This supports the following chips: BCM42xx, 44xx, 47xx . + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbhnddma.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _sbhnddma_h_ +#define _sbhnddma_h_ + +/* DMA structure: + * support two DMA engines: 32 bits address or 64 bit addressing + * basic DMA register set is per channel(transmit or receive) + * a pair of channels is defined for convenience + */ + + +/* 32 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 addr; /**< descriptor ring base address (4K aligned) */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 status; /**< current active descriptor, et al */ +} dma32regs_t; + +typedef volatile struct { + dma32regs_t xmt; /**< dma tx channel */ + dma32regs_t rcv; /**< dma rx channel */ +} dma32regp_t; + +typedef volatile struct { /* diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma32diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl; /**< misc control bits & bufcount */ + uint32 addr; /**< data buffer address */ +} dma32dd_t; + +/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */ +#define D32RINGALIGN_BITS 12 +#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) +#define D32RINGALIGN (1 << D32RINGALIGN_BITS) + +#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) + +/* transmit channel control */ +#define XC_XE ((uint32)1 << 0) /**< transmit enable */ +#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */ +#define XC_LE ((uint32)1 << 2) /**< loopback enable */ +#define XC_FL ((uint32)1 << 4) /**< flush request */ +#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define XC_MR_SHIFT 6 +#define XC_PD ((uint32)1 << 11) /**< parity check disable */ +#define XC_AE ((uint32)3 << 16) /**< address extension bits */ +#define XC_AE_SHIFT 16 +#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define XC_BL_SHIFT 18 +#define XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define XC_PC_SHIFT 21 +#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define XC_PT_SHIFT 24 + +/** Multiple outstanding reads */ +#define DMA_MR_1 0 +#define DMA_MR_2 1 +#define DMA_MR_4 2 +#define DMA_MR_8 3 +#define DMA_MR_12 4 +#define DMA_MR_16 5 +#define DMA_MR_20 6 +#define DMA_MR_32 7 + +/** DMA Burst Length in bytes */ +#define DMA_BL_16 0 +#define DMA_BL_32 1 +#define DMA_BL_64 2 +#define DMA_BL_128 3 +#define DMA_BL_256 4 +#define DMA_BL_512 5 +#define DMA_BL_1024 6 + +/** Prefetch control */ +#define DMA_PC_0 0 +#define DMA_PC_4 1 +#define DMA_PC_8 2 +#define DMA_PC_16 3 +/* others: reserved */ + +/** Prefetch threshold */ +#define DMA_PT_1 0 +#define DMA_PT_2 1 +#define DMA_PT_4 2 +#define DMA_PT_8 3 + +/* transmit descriptor table pointer */ +#define XP_LD_MASK 0xfff /**< last valid descriptor */ + +/* transmit channel status */ +#define XS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define XS_XS_MASK 0xf000 /**< transmit state */ +#define XS_XS_SHIFT 12 +#define XS_XS_DISABLED 0x0000 /**< disabled */ +#define XS_XS_ACTIVE 0x1000 /**< active */ +#define XS_XS_IDLE 0x2000 /**< idle wait */ +#define XS_XS_STOPPED 0x3000 /**< stopped */ +#define XS_XS_SUSP 0x4000 /**< suspend pending */ +#define XS_XE_MASK 0xf0000 /**< transmit errors */ +#define XS_XE_SHIFT 16 +#define XS_XE_NOERR 0x00000 /**< no error */ +#define XS_XE_DPE 0x10000 /**< descriptor protocol error */ +#define XS_XE_DFU 0x20000 /**< data fifo underrun */ +#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */ +#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */ +#define XS_AD_MASK 0xfff00000 /**< active descriptor */ +#define XS_AD_SHIFT 20 + +/* receive channel control */ +#define RC_RE ((uint32)1 << 0) /**< receive enable */ +#define RC_RO_MASK 0xfe /**< receive frame offset */ +#define RC_RO_SHIFT 1 +#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */ +#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */ +#define RC_OC ((uint32)1 << 10) /**< overflow continue */ +#define RC_PD ((uint32)1 << 11) /**< parity check disable */ +#define RC_AE ((uint32)3 << 16) /**< address extension bits */ +#define RC_AE_SHIFT 16 +#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define RC_BL_SHIFT 18 +#define RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define RC_PC_SHIFT 21 +#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define RC_PT_SHIFT 24 + +/* receive descriptor table pointer */ +#define RP_LD_MASK 0xfff /**< last valid descriptor */ + +/* receive channel status */ +#define RS_CD_MASK 0x0fff /**< current descriptor pointer */ +#define RS_RS_MASK 0xf000 /**< receive state */ +#define RS_RS_SHIFT 12 +#define RS_RS_DISABLED 0x0000 /**< disabled */ +#define RS_RS_ACTIVE 0x1000 /**< active */ +#define RS_RS_IDLE 0x2000 /**< idle wait */ +#define RS_RS_STOPPED 0x3000 /**< reserved */ +#define RS_RE_MASK 0xf0000 /**< receive errors */ +#define RS_RE_SHIFT 16 +#define RS_RE_NOERR 0x00000 /**< no error */ +#define RS_RE_DPE 0x10000 /**< descriptor protocol error */ +#define RS_RE_DFO 0x20000 /**< data fifo overflow */ +#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */ +#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */ +#define RS_AD_MASK 0xfff00000 /**< active descriptor */ +#define RS_AD_SHIFT 20 + +/* fifoaddr */ +#define FA_OFF_MASK 0xffff /**< offset */ +#define FA_SEL_MASK 0xf0000 /**< select */ +#define FA_SEL_SHIFT 16 +#define FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define FA_SEL_RDD 0x40000 /**< receive dma data */ +#define FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags */ +#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */ +#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */ +#define CTRL_AE_SHIFT 16 +#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */ +#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */ +#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define CTRL_CORE_MASK 0x0ff00000 + +/* 64 bits addressing */ + +/** dma registers per channel(xmt or rcv) */ +typedef volatile struct { + uint32 control; /**< enable, et al */ + uint32 ptr; /**< last descriptor posted to chip */ + uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */ + uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */ + uint32 status0; /**< current descriptor, xmt state */ + uint32 status1; /**< active descriptor, xmt error */ +} dma64regs_t; + +typedef volatile struct { + dma64regs_t tx; /**< dma64 tx channel */ + dma64regs_t rx; /**< dma64 rx channel */ +} dma64regp_t; + +typedef volatile struct { /**< diag access */ + uint32 fifoaddr; /**< diag address */ + uint32 fifodatalow; /**< low 32bits of data */ + uint32 fifodatahigh; /**< high 32bits of data */ + uint32 pad; /**< reserved */ +} dma64diag_t; + +/** + * DMA Descriptor + * Descriptors are only read by the hardware, never written back. + */ +typedef volatile struct { + uint32 ctrl1; /**< misc control bits */ + uint32 ctrl2; /**< buffer count and address extension */ + uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */ + uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */ +} dma64dd_t; + +/** + * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. + */ +#define D64RINGALIGN_BITS 13 +#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) +#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) + +#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) + +/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ +#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) + +/** + * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross + * 64K boundary + */ +#define D64RINGBOUNDARY_LARGE (1 << 16) + +/* + * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. + * When this field contains the value N, the burst length is 2**(N + 4) bytes. + */ +#define D64_DEF_USBBURSTLEN 2 +#define D64_DEF_SDIOBURSTLEN 1 + + +#ifndef D64_USBBURSTLEN +#define D64_USBBURSTLEN DMA_BL_64 +#endif +#ifndef D64_SDIOBURSTLEN +#define D64_SDIOBURSTLEN DMA_BL_32 +#endif + +/* transmit channel control */ +#define D64_XC_XE 0x00000001 /**< transmit enable */ +#define D64_XC_SE 0x00000002 /**< transmit suspend request */ +#define D64_XC_LE 0x00000004 /**< loopback enable */ +#define D64_XC_FL 0x00000010 /**< flush request */ +#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */ +#define D64_XC_MR_SHIFT 6 +#define D64_XC_PD 0x00000800 /**< parity check disable */ +#define D64_XC_AE 0x00030000 /**< address extension bits */ +#define D64_XC_AE_SHIFT 16 +#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_XC_BL_SHIFT 18 +#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_XC_PC_SHIFT 21 +#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_XC_PT_SHIFT 24 + +/* transmit descriptor table pointer */ +#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* transmit channel status */ +#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */ +#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */ +#define D64_XS0_XS_SHIFT 28 +#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */ +#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */ +#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */ +#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */ +#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */ +#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */ +#define D64_XS1_XE_SHIFT 28 +#define D64_XS1_XE_NOERR 0x00000000 /**< no error */ +#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */ +#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */ +#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */ +#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_XS1_XE_COREE 0x50000000 /**< core error */ + +/* receive channel control */ +#define D64_RC_RE 0x00000001 /**< receive enable */ +#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */ +#define D64_RC_RO_SHIFT 1 +#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */ +#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */ +#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */ +#define D64_RC_OC 0x00000400 /**< overflow continue */ +#define D64_RC_PD 0x00000800 /**< parity check disable */ +#define D64_RC_SA 0x00002000 /**< select active */ +#define D64_RC_GE 0x00004000 /**< Glom enable */ +#define D64_RC_AE 0x00030000 /**< address extension bits */ +#define D64_RC_AE_SHIFT 16 +#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */ +#define D64_RC_BL_SHIFT 18 +#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */ +#define D64_RC_PC_SHIFT 21 +#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */ +#define D64_RC_PT_SHIFT 24 + +/* flags for dma controller */ +#define DMA_CTRL_PEN (1 << 0) /**< partity enable */ +#define DMA_CTRL_ROC (1 << 1) /**< rx overflow continue */ +#define DMA_CTRL_RXMULTI (1 << 2) /**< allow rx scatter to multiple descriptors */ +#define DMA_CTRL_UNFRAMED (1 << 3) /**< Unframed Rx/Tx data */ +#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) +#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */ +#define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */ +#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */ + +/* receive descriptor table pointer */ +#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */ + +/* receive channel status */ +#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */ +#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */ +#define D64_RS0_RS_SHIFT 28 +#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */ +#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */ +#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */ +#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */ +#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */ + +#define D64_RS1_AD_MASK 0x0001ffff /**< active descriptor */ +#define D64_RS1_RE_MASK 0xf0000000 /**< receive errors */ +#define D64_RS1_RE_SHIFT 28 +#define D64_RS1_RE_NOERR 0x00000000 /**< no error */ +#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */ +#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */ +#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */ +#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */ +#define D64_RS1_RE_COREE 0x50000000 /**< core error */ + +/* fifoaddr */ +#define D64_FA_OFF_MASK 0xffff /**< offset */ +#define D64_FA_SEL_MASK 0xf0000 /**< select */ +#define D64_FA_SEL_SHIFT 16 +#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */ +#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */ +#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */ +#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */ +#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */ +#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */ +#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */ +#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */ +#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */ +#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */ + +/* descriptor control flags 1 */ +#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */ +#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */ +#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */ +#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */ +#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */ +#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */ + +/* descriptor control flags 2 */ +#define D64_CTRL2_BC_MASK 0x00007fff /**< buffer byte count. real data len must <= 16KB */ +#define D64_CTRL2_AE 0x00030000 /**< address extension bits */ +#define D64_CTRL2_AE_SHIFT 16 +#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ + +/** control flags in the range [27:20] are core-specific and not defined here */ +#define D64_CTRL_CORE_MASK 0x0ff00000 + +#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */ +#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */ +#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */ +#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */ + +/** receive frame status */ +typedef volatile struct { + uint16 len; + uint16 flags; +} dma_rxh_t; + +#endif /* _sbhnddma_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbpcmcia.h b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h new file mode 100644 index 000000000000..d2e42ffffdbe --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbpcmcia.h @@ -0,0 +1,116 @@ +/* + * BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbpcmcia.h 521344 2014-12-17 10:03:55Z $ + */ + +#ifndef _SBPCMCIA_H +#define _SBPCMCIA_H + +/* All the addresses that are offsets in attribute space are divided + * by two to account for the fact that odd bytes are invalid in + * attribute space and our read/write routines make the space appear + * as if they didn't exist. Still we want to show the original numbers + * as documented in the hnd_pcmcia core manual. + */ + +/* PCMCIA Function Configuration Registers */ +#define PCMCIA_FCR (0x700 / 2) + +#define FCR0_OFF 0 +#define FCR1_OFF (0x40 / 2) +#define FCR2_OFF (0x80 / 2) +#define FCR3_OFF (0xc0 / 2) + +#define PCMCIA_FCR0 (0x700 / 2) +#define PCMCIA_FCR1 (0x740 / 2) +#define PCMCIA_FCR2 (0x780 / 2) +#define PCMCIA_FCR3 (0x7c0 / 2) + +/* Standard PCMCIA FCR registers */ + +#define PCMCIA_COR 0 + +#define COR_RST 0x80 +#define COR_LEV 0x40 +#define COR_IRQEN 0x04 +#define COR_BLREN 0x01 +#define COR_FUNEN 0x01 + + +#define PCICIA_FCSR (2 / 2) +#define PCICIA_PRR (4 / 2) +#define PCICIA_SCR (6 / 2) +#define PCICIA_ESR (8 / 2) + + +#define PCM_MEMOFF 0x0000 +#define F0_MEMOFF 0x1000 +#define F1_MEMOFF 0x2000 +#define F2_MEMOFF 0x3000 +#define F3_MEMOFF 0x4000 + +/* Memory base in the function fcr's */ +#define MEM_ADDR0 (0x728 / 2) +#define MEM_ADDR1 (0x72a / 2) +#define MEM_ADDR2 (0x72c / 2) + +/* PCMCIA base plus Srom access in fcr0: */ +#define PCMCIA_ADDR0 (0x072e / 2) +#define PCMCIA_ADDR1 (0x0730 / 2) +#define PCMCIA_ADDR2 (0x0732 / 2) + +#define MEM_SEG (0x0734 / 2) +#define SROM_CS (0x0736 / 2) +#define SROM_DATAL (0x0738 / 2) +#define SROM_DATAH (0x073a / 2) +#define SROM_ADDRL (0x073c / 2) +#define SROM_ADDRH (0x073e / 2) +#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */ +#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */ + +/* Values for srom_cs: */ +#define SROM_IDLE 0 +#define SROM_WRITE 1 +#define SROM_READ 2 +#define SROM_WEN 4 +#define SROM_WDS 7 +#define SROM_DONE 8 + +/* Fields in srom_info: */ +#define SRI_SZ_MASK 0x03 +#define SRI_BLANK 0x04 +#define SRI_OTP 0x80 + + +/* sbtmstatelow */ +#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */ +#define SBTML_INT_EN 0x20000 /* enable sb interrupt */ + +/* sbtmstatehigh */ +#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */ + +#endif /* _SBPCMCIA_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsdio.h b/drivers/net/wireless/bcmdhd/include/sbsdio.h new file mode 100644 index 000000000000..f4760a22c077 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsdio.h @@ -0,0 +1,189 @@ +/* + * SDIO device core hardware definitions. + * sdio is a portion of the pcmcia core in core rev 3 - rev 8 + * + * SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsdio.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSDIO_H +#define _SBSDIO_H + +#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */ + +/* function 1 miscellaneous registers */ +#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */ +#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */ +#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */ +#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */ +#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */ +#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */ + +/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */ +#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D /* MesBusyCtl at 0x1001D (rev 11) */ + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */ + +/* Sdio Core Rev 12 */ +#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1 +#define SBSDIO_FUNC1_SLEEPCSR 0x1001F +#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1 + +/* SBSDIO_SPROM_CS */ +#define SBSDIO_SPROM_IDLE 0 +#define SBSDIO_SPROM_WRITE 1 +#define SBSDIO_SPROM_READ 2 +#define SBSDIO_SPROM_WEN 4 +#define SBSDIO_SPROM_WDS 7 +#define SBSDIO_SPROM_DONE 8 + +/* SBSDIO_SPROM_INFO */ +#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */ +#define SROM_BLANK 0x04 /* depreciated in corerev 6 */ +#define SROM_OTP 0x80 /* OTP present */ + +/* SBSDIO_CHIP_CTRL */ +#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu, + * 1: power on oscillator + * (for 4318 only) + */ +/* SBSDIO_WATERMARK */ +#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device + * to wait before sending data to host + */ + +/* SBSDIO_MESBUSYCTRL */ +/* When RX FIFO has less entries than this & MBE is set + * => busy signal is asserted between data blocks. +*/ +#define SBSDIO_MESBUSYCTRL_MASK 0x7f +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 /* Enable busy capability for MES access */ + +/* SBSDIO_DEVICE_CTL */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when + * receiving CMD53 + */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is + * synchronous to the sdio clock + */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host + * except the chipActive (rev 8) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put + * external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK 0x10 /* Enable function 2 tx for each block */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Enable F2 Watermark */ +#define SBSDIO_DEVCTL_NONDAT_PADS_ISO 0x20 /* Isolate sdio clk and cmd (non-data) */ + +/* SBSDIO_FUNC1_CHIPCLKCSR */ +#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */ +#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */ +#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */ +#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */ +/* In rev8, actual avail bits followed original docs */ +#define SBSDIO_Rev8_HT_AVAIL 0x40 +#define SBSDIO_Rev8_ALP_AVAIL 0x80 +#define SBSDIO_CSR_MASK 0x1F + +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval)) +#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \ + (alponly ? 1 : SBSDIO_HTAV(regval))) + +/* SBSDIO_FUNC1_SDIOPULLUP */ +#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */ +#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */ +#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */ +#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */ +#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */ + +/* function 1 OCP space */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */ + +/* some duplication with sbsdpcmdev.h here */ +/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */ +#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */ +#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */ +#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */ + +/* direct(mapped) cis space */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */ +#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */ +#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */ + +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */ + +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple, + * link bytes + */ + +/* indirect cis access (in sprom) */ +#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from + * 8th byte + */ + +#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one + * data comamnd + */ + +#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */ + +#endif /* _SBSDIO_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h new file mode 100644 index 000000000000..c0c889e5316f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsdpcmdev.h @@ -0,0 +1,298 @@ +/* + * Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific + * device core support + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsdpcmdev.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sbsdpcmdev_h_ +#define _sbsdpcmdev_h_ + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + + +typedef volatile struct { + dma64regs_t xmt; /* dma tx */ + uint32 PAD[2]; + dma64regs_t rcv; /* dma rx */ + uint32 PAD[2]; +} dma64p_t; + +/* dma64 sdiod corerev >= 1 */ +typedef volatile struct { + dma64p_t dma64regs[2]; + dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */ + uint32 PAD[92]; +} sdiodma64_t; + +/* dma32 sdiod corerev == 0 */ +typedef volatile struct { + dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */ + uint32 PAD[108]; +} sdiodma32_t; + +/* dma32 regs for pcmcia core */ +typedef volatile struct { + dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */ + dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */ + uint32 PAD[116]; +} pcmdma32_t; + +/* core registers */ +typedef volatile struct { + uint32 corecontrol; /* CoreControl, 0x000, rev8 */ + uint32 corestatus; /* CoreStatus, 0x004, rev8 */ + uint32 PAD[1]; + uint32 biststatus; /* BistStatus, 0x00c, rev8 */ + + /* PCMCIA access */ + uint16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */ + uint16 PAD[1]; + uint16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */ + uint16 PAD[1]; + uint16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */ + uint16 PAD[1]; + uint16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */ + uint16 PAD[1]; + + /* interrupt */ + uint32 intstatus; /* IntStatus, 0x020, rev8 */ + uint32 hostintmask; /* IntHostMask, 0x024, rev8 */ + uint32 intmask; /* IntSbMask, 0x028, rev8 */ + uint32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */ + uint32 sbintmask; /* SBIntMask, 0x030, rev8 */ + uint32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */ + uint32 PAD[2]; + uint32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */ + uint32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */ + uint32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */ + uint32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */ + uint32 PAD[3]; + + /* PCMCIA frame control */ + uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */ + uint8 PAD[3]; + uint8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */ + uint8 PAD[155]; + + /* interrupt batching control */ + uint32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */ + uint32 PAD[3]; + + /* counters */ + uint32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */ + uint32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */ + uint32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */ + uint32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */ + uint32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */ + uint32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */ + uint32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */ + uint32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */ + uint32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */ + uint32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */ + uint32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */ + uint32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */ + uint32 PAD[40]; + uint32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */ + uint32 PAD[7]; + + /* DMA engines */ + volatile union { + pcmdma32_t pcm32; + sdiodma32_t sdiod32; + sdiodma64_t sdiod64; + } dma; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */ + uint16 PAD[55]; + + /* PCMCIA backplane access */ + uint16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */ + uint16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */ + uint16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */ + uint16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */ + uint16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */ + uint16 backplanedata0; /* BackplaneData0, 0x778, rev6 */ + uint16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */ + uint16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */ + uint16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */ + uint16 PAD[31]; + + /* sprom "size" & "blank" info */ + uint16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */ + uint32 PAD[464]; + + /* Sonics SiliconBackplane registers */ + sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */ +} sdpcmd_regs_t; + +/* corecontrol */ +#define CC_CISRDY (1 << 0) /* CIS Ready */ +#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */ +#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */ +#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */ +#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */ + +/* corestatus */ +#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */ +#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */ +#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */ + +#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */ +#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */ +#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */ +#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */ + +/* intstatus */ +#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */ +#define I_PC (1 << 10) /* descriptor error */ +#define I_PD (1 << 11) /* data error */ +#define I_DE (1 << 12) /* Descriptor protocol Error */ +#define I_RU (1 << 13) /* Receive descriptor Underflow */ +#define I_RO (1 << 14) /* Receive fifo Overflow */ +#define I_XU (1 << 15) /* Transmit fifo Underflow */ +#define I_RI (1 << 16) /* Receive Interrupt */ +#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */ +#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */ +#define I_XI (1 << 24) /* Transmit Interrupt */ +#define I_RF_TERM (1 << 25) /* Read Frame Terminate */ +#define I_WF_TERM (1 << 26) /* Write Frame Terminate */ +#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT (1 << 28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */ +#define I_SRESET (1 << 30) /* CCCR RES interrupt */ +#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */ +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* sbintstatus */ +#define I_SB_SERR (1 << 8) /* Backplane SError (write) */ +#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */ +#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */ + +/* sdioaccess */ +#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */ +#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */ +#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */ +#define SDA_WRITE 0x01000000 /* Write bit */ +#define SDA_READ 0x00000000 /* Write bit cleared for Read */ +#define SDA_BUSY 0x80000000 /* Busy bit */ + +/* sdioaccess-accessible register address spaces */ +#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */ +#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */ +#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */ +#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */ + +/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */ +#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */ +#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */ +#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */ +#define SDA_DEVICECONTROL 0x009 /* DeviceControl */ +#define SDA_SBADDRLOW 0x00a /* SbAddrLow */ +#define SDA_SBADDRMID 0x00b /* SbAddrMid */ +#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */ +#define SDA_FRAMECTRL 0x00d /* FrameCtrl */ +#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */ +#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */ +#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */ +#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */ +#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */ +#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */ + +/* SDA_F2WATERMARK */ +#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */ + +/* SDA_SBADDRLOW */ +#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */ + +/* SDA_SBADDRMID */ +#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */ + +/* SDA_SBADDRHIGH */ +#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */ + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */ +#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */ + +/* pcmciaframectrl */ +#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */ +#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */ + +/* intrcvlazy */ +#define IRL_TO_MASK 0x00ffffff /* timeout */ +#define IRL_FC_MASK 0xff000000 /* frame count */ +#define IRL_FC_SHIFT 24 /* frame count */ + +/* rx header */ +typedef volatile struct { + uint16 len; + uint16 flags; +} sdpcmd_rxh_t; + +/* rx header flags */ +#define RXF_CRC 0x0001 /* CRC error detected */ +#define RXF_WOOS 0x0002 /* write frame out of sync */ +#define RXF_WF_TERM 0x0004 /* write frame terminated */ +#define RXF_ABORT 0x0008 /* write frame aborted */ +#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */ + +/* HW frame tag */ +#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */ + +#define SDPCM_HWEXT_LEN 8 + +#endif /* _sbsdpcmdev_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsocram.h b/drivers/net/wireless/bcmdhd/include/sbsocram.h new file mode 100644 index 000000000000..cfe12e1bb09a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsocram.h @@ -0,0 +1,203 @@ +/* + * BCM47XX Sonics SiliconBackplane embedded ram core + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsocram.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSOCRAM_H +#define _SBSOCRAM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* Memcsocram core registers */ +typedef volatile struct sbsocramregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; /* rev 6 */ + uint32 errlogaddr; /* rev 6 */ + /* used for patching rev 3 & 5 */ + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; /* corev 8 */ + uint32 bankpda; + uint32 PAD[14]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; /* corerev >= 2 */ + uint32 PAD[133]; + uint32 sr_control; /* corerev >= 15 */ + uint32 sr_status; /* corerev >= 15 */ + uint32 sr_address; /* corerev >= 15 */ + uint32 sr_data; /* corerev >= 15 */ +} sbsocramregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 +/* corerev >= 3 */ +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */ +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */ +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines corerev >= 8 */ +#define SOCRAM_BANKINFO_SZMASK 0x7f +#define SOCRAM_BANKIDX_ROM_MASK 0x100 + +#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 +/* socram bankinfo memtype */ +#define SOCRAM_MEMTYPE_RAM 0 +#define SOCRAM_MEMTYPE_R0M 1 +#define SOCRAM_MEMTYPE_DEVRAM 2 + +#define SOCRAM_BANKINFO_REG 0x40 +#define SOCRAM_BANKIDX_REG 0x10 +#define SOCRAM_BANKINFO_STDBY_MASK 0x400 +#define SOCRAM_BANKINFO_STDBY_TIMER 0x800 + +/* bankinfo rev >= 10 */ +#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15 +#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16 +#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SOCRAM_BANKINFO_PDASZ_SHIFT 17 +#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SOCRAM_DEVRAMBANK_MASK 0xF000 +#define SOCRAM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SOCRAM_BANKINFO_SZBASE 8192 +#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */ + + +#endif /* _SBSOCRAM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sbsysmem.h b/drivers/net/wireless/bcmdhd/include/sbsysmem.h new file mode 100644 index 000000000000..99a810c434e8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sbsysmem.h @@ -0,0 +1,200 @@ +/* + * SiliconBackplane System Memory core + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbsysmem.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SBSYSMEM_H +#define _SBSYSMEM_H + +#ifndef _LANGUAGE_ASSEMBLY + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif /* PAD */ + +/* sysmem core registers */ +typedef volatile struct sysmemregs { + uint32 coreinfo; + uint32 bwalloc; + uint32 extracoreinfo; + uint32 biststat; + uint32 bankidx; + uint32 standbyctrl; + + uint32 errlogstatus; + uint32 errlogaddr; + + uint32 cambankidx; + uint32 cambankstandbyctrl; + uint32 cambankpatchctrl; + uint32 cambankpatchtblbaseaddr; + uint32 cambankcmdreg; + uint32 cambankdatareg; + uint32 cambankmaskreg; + uint32 PAD[1]; + uint32 bankinfo; + uint32 PAD[15]; + uint32 extmemconfig; + uint32 extmemparitycsr; + uint32 extmemparityerrdata; + uint32 extmemparityerrcnt; + uint32 extmemwrctrlandsize; + uint32 PAD[84]; + uint32 workaround; + uint32 pwrctl; + uint32 PAD[133]; + uint32 sr_control; + uint32 sr_status; + uint32 sr_address; + uint32 sr_data; +} sysmemregs_t; + +#endif /* _LANGUAGE_ASSEMBLY */ + +/* Register offsets */ +#define SR_COREINFO 0x00 +#define SR_BWALLOC 0x04 +#define SR_BISTSTAT 0x0c +#define SR_BANKINDEX 0x10 +#define SR_BANKSTBYCTL 0x14 +#define SR_PWRCTL 0x1e8 + +/* Coreinfo register */ +#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */ +#define SRCI_PT_SHIFT 16 +/* port types : SRCI_PT__ */ +#define SRCI_PT_OCP_OCP 0 +#define SRCI_PT_AXI_OCP 1 +#define SRCI_PT_ARM7AHB_OCP 2 +#define SRCI_PT_CM3AHB_OCP 3 +#define SRCI_PT_AXI_AXI 4 +#define SRCI_PT_AHB_AXI 5 + +#define SRCI_LSS_MASK 0x00f00000 +#define SRCI_LSS_SHIFT 20 +#define SRCI_LRS_MASK 0x0f000000 +#define SRCI_LRS_SHIFT 24 + +/* In corerev 0, the memory size is 2 to the power of the + * base plus 16 plus to the contents of the memsize field plus 1. + */ +#define SRCI_MS0_MASK 0xf +#define SR_MS0_BASE 16 + +/* + * In corerev 1 the bank size is 2 ^ the bank size field plus 14, + * the memory size is number of banks times bank size. + * The same applies to rom size. + */ +#define SRCI_ROMNB_MASK 0xf000 +#define SRCI_ROMNB_SHIFT 12 +#define SRCI_ROMBSZ_MASK 0xf00 +#define SRCI_ROMBSZ_SHIFT 8 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_SHIFT 4 +#define SRCI_SRBSZ_MASK 0xf +#define SRCI_SRBSZ_SHIFT 0 + +#define SR_BSZ_BASE 14 + +/* Standby control register */ +#define SRSC_SBYOVR_MASK 0x80000000 +#define SRSC_SBYOVR_SHIFT 31 +#define SRSC_SBYOVRVAL_MASK 0x60000000 +#define SRSC_SBYOVRVAL_SHIFT 29 +#define SRSC_SBYEN_MASK 0x01000000 +#define SRSC_SBYEN_SHIFT 24 + +/* Power control register */ +#define SRPC_PMU_STBYDIS_MASK 0x00000010 +#define SRPC_PMU_STBYDIS_SHIFT 4 +#define SRPC_STBYOVRVAL_MASK 0x00000008 +#define SRPC_STBYOVRVAL_SHIFT 3 +#define SRPC_STBYOVR_MASK 0x00000007 +#define SRPC_STBYOVR_SHIFT 0 + +/* Extra core capability register */ +#define SRECC_NUM_BANKS_MASK 0x000000F0 +#define SRECC_NUM_BANKS_SHIFT 4 +#define SRECC_BANKSIZE_MASK 0x0000000F +#define SRECC_BANKSIZE_SHIFT 0 + +#define SRECC_BANKSIZE(value) (1 << (value)) + +/* CAM bank patch control */ +#define SRCBPC_PATCHENABLE 0x80000000 + +#define SRP_ADDRESS 0x0001FFFC +#define SRP_VALID 0x8000 + +/* CAM bank command reg */ +#define SRCMD_WRITE 0x00020000 +#define SRCMD_READ 0x00010000 +#define SRCMD_DONE 0x80000000 + +#define SRCMD_DONE_DLY 1000 + +/* bankidx and bankinfo reg defines */ +#define SYSMEM_BANKINFO_SZMASK 0x7f +#define SYSMEM_BANKIDX_ROM_MASK 0x100 + +#define SYSMEM_BANKIDX_MEMTYPE_SHIFT 8 +/* sysmem bankinfo memtype */ +#define SYSMEM_MEMTYPE_RAM 0 +#define SYSMEM_MEMTYPE_R0M 1 +#define SYSMEM_MEMTYPE_DEVRAM 2 + +#define SYSMEM_BANKINFO_REG 0x40 +#define SYSMEM_BANKIDX_REG 0x10 +#define SYSMEM_BANKINFO_STDBY_MASK 0x400 +#define SYSMEM_BANKINFO_STDBY_TIMER 0x800 + +#define SYSMEM_BANKINFO_DEVRAMSEL_SHIFT 13 +#define SYSMEM_BANKINFO_DEVRAMSEL_MASK 0x2000 +#define SYSMEM_BANKINFO_DEVRAMPRO_SHIFT 14 +#define SYSMEM_BANKINFO_DEVRAMPRO_MASK 0x4000 +#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 15 +#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x8000 +#define SYSMEM_BANKINFO_RETNTRAM_SHIFT 16 +#define SYSMEM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SYSMEM_BANKINFO_PDASZ_SHIFT 17 +#define SYSMEM_BANKINFO_PDASZ_MASK 0x003E0000 +#define SYSMEM_BANKINFO_DEVRAMREMAP_SHIFT 24 +#define SYSMEM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 + +/* extracoreinfo register */ +#define SYSMEM_DEVRAMBANK_MASK 0xF000 +#define SYSMEM_DEVRAMBANK_SHIFT 12 + +/* bank info to calculate bank size */ +#define SYSMEM_BANKINFO_SZBASE 8192 +#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */ + +#endif /* _SBSYSMEM_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdio.h b/drivers/net/wireless/bcmdhd/include/sdio.h new file mode 100644 index 000000000000..ca53afbcf3e9 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdio.h @@ -0,0 +1,625 @@ +/* + * SDIO spec header file + * Protocol and standard (common) device definitions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdio.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SDIO_H +#define _SDIO_H + +#ifdef BCMSDIO + +/* CCCR structure for function 0 */ +typedef volatile struct { + uint8 cccr_sdio_rev; /* RO, cccr and sdio revision */ + uint8 sd_rev; /* RO, sd spec revision */ + uint8 io_en; /* I/O enable */ + uint8 io_rdy; /* I/O ready reg */ + uint8 intr_ctl; /* Master and per function interrupt enable control */ + uint8 intr_status; /* RO, interrupt pending status */ + uint8 io_abort; /* read/write abort or reset all functions */ + uint8 bus_inter; /* bus interface control */ + uint8 capability; /* RO, card capability */ + + uint8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */ + uint8 cis_base_mid; + uint8 cis_base_high; /* 0xB RO, common CIS base address, MSB */ + + /* suspend/resume registers */ + uint8 bus_suspend; /* 0xC */ + uint8 func_select; /* 0xD */ + uint8 exec_flag; /* 0xE */ + uint8 ready_flag; /* 0xF */ + + uint8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */ + + uint8 power_control; /* 0x12 (SDIO version 1.10) */ + + uint8 speed_control; /* 0x13 */ +} sdio_regs_t; + +/* SDIO Device CCCR offsets */ +#define SDIOD_CCCR_REV 0x00 +#define SDIOD_CCCR_SDREV 0x01 +#define SDIOD_CCCR_IOEN 0x02 +#define SDIOD_CCCR_IORDY 0x03 +#define SDIOD_CCCR_INTEN 0x04 +#define SDIOD_CCCR_INTPEND 0x05 +#define SDIOD_CCCR_IOABORT 0x06 +#define SDIOD_CCCR_BICTRL 0x07 +#define SDIOD_CCCR_CAPABLITIES 0x08 +#define SDIOD_CCCR_CISPTR_0 0x09 +#define SDIOD_CCCR_CISPTR_1 0x0A +#define SDIOD_CCCR_CISPTR_2 0x0B +#define SDIOD_CCCR_BUSSUSP 0x0C +#define SDIOD_CCCR_FUNCSEL 0x0D +#define SDIOD_CCCR_EXECFLAGS 0x0E +#define SDIOD_CCCR_RDYFLAGS 0x0F +#define SDIOD_CCCR_BLKSIZE_0 0x10 +#define SDIOD_CCCR_BLKSIZE_1 0x11 +#define SDIOD_CCCR_POWER_CONTROL 0x12 +#define SDIOD_CCCR_SPEED_CONTROL 0x13 +#define SDIOD_CCCR_UHSI_SUPPORT 0x14 +#define SDIOD_CCCR_DRIVER_STRENGTH 0x15 +#define SDIOD_CCCR_INTR_EXTN 0x16 + +/* Broadcom extensions (corerev >= 1) */ +#define SDIOD_CCCR_BRCM_CARDCAP 0xf0 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04 +#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08 +#define SDIOD_CCCR_BRCM_CARDCTL 0xf1 +#define SDIOD_CCCR_BRCM_SEPINT 0xf2 + +/* cccr_sdio_rev */ +#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */ +#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */ +#define SDIO_SPEC_VERSION_3_0 0x40 /* SDIO spec version 3.0 */ + +/* sd_rev */ +#define SD_REV_PHY_MASK 0x0f /* SD format version number */ + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */ +#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */ + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */ +#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */ + +/* intr_ctl */ +#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */ +#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */ +#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */ + +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */ +#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */ + +/* io_abort */ +#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */ +#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */ + +/* bus_inter */ +#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */ +#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */ +#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */ +#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */ +#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */ +#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */ + +/* capability */ +#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */ +#define SDIO_CAP_LSC 0x40 /* low speed card */ +#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */ +#define SDIO_CAP_SBS 0x08 /* support suspend/resume */ +#define SDIO_CAP_SRW 0x04 /* support read wait */ +#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */ +#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */ + +/* power_control */ +#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */ +#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */ + +/* speed_control (control device entry into high-speed clocking mode) */ +#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */ +#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */ +#define SDIO_SPEED_UHSI_DDR50 0x08 + +/* for setting bus speed in card: 0x13h */ +#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSISEL_S 1 + +/* for getting bus speed cap in card: 0x14h */ +#define SDIO_BUS_SPEED_UHSICAP_M BITFIELD_MASK(3) +#define SDIO_BUS_SPEED_UHSICAP_S 0 + +/* for getting driver type CAP in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_CAP_M BITFIELD_MASK(3) +#define SDIO_BUS_DRVR_TYPE_CAP_S 0 + +/* for setting driver type selection in card: 0x15h */ +#define SDIO_BUS_DRVR_TYPE_SEL_M BITFIELD_MASK(2) +#define SDIO_BUS_DRVR_TYPE_SEL_S 4 + +/* for getting async int support in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_CAP_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_CAP_S 0 + +/* for setting async int selection in card: 0x16h */ +#define SDIO_BUS_ASYNCINT_SEL_M BITFIELD_MASK(1) +#define SDIO_BUS_ASYNCINT_SEL_S 1 + +/* brcm sepint */ +#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */ +#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */ +#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */ + +/* FBR structure for function 1-7, FBR addresses and register offsets */ +typedef volatile struct { + uint8 devctr; /* device interface, CSA control */ + uint8 ext_dev; /* extended standard I/O device type code */ + uint8 pwr_sel; /* power selection support */ + uint8 PAD[6]; /* reserved */ + + uint8 cis_low; /* CIS LSB */ + uint8 cis_mid; + uint8 cis_high; /* CIS MSB */ + uint8 csa_low; /* code storage area, LSB */ + uint8 csa_mid; + uint8 csa_high; /* code storage area, MSB */ + uint8 csa_dat_win; /* data access window to function */ + + uint8 fnx_blk_size[2]; /* block size, little endian */ +} sdio_fbr_t; + +/* Maximum number of I/O funcs */ +#define SDIOD_MAX_FUNCS 8 +#define SDIOD_MAX_IOFUNCS 7 + +/* SDIO Device FBR Start Address */ +#define SDIOD_FBR_STARTADDR 0x100 + +/* SDIO Device FBR Size */ +#define SDIOD_FBR_SIZE 0x100 + +/* Macro to calculate FBR register base */ +#define SDIOD_FBR_BASE(n) ((n) * 0x100) + +/* Function register offsets */ +#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */ +#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */ +#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */ + +/* SDIO Function CIS ptr offset */ +#define SDIOD_FBR_CISPTR_0 0x09 +#define SDIOD_FBR_CISPTR_1 0x0A +#define SDIOD_FBR_CISPTR_2 0x0B + +/* Code Storage Area pointer */ +#define SDIOD_FBR_CSA_ADDR_0 0x0C +#define SDIOD_FBR_CSA_ADDR_1 0x0D +#define SDIOD_FBR_CSA_ADDR_2 0x0E +#define SDIOD_FBR_CSA_DATA 0x0F + +/* SDIO Function I/O Block Size */ +#define SDIOD_FBR_BLKSIZE_0 0x10 +#define SDIOD_FBR_BLKSIZE_1 0x11 + +/* devctr */ +#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */ +#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */ +#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */ +/* interface codes */ +#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */ +#define SDIOD_DIC_UART 1 +#define SDIOD_DIC_BLUETOOTH_A 2 +#define SDIOD_DIC_BLUETOOTH_B 3 +#define SDIOD_DIC_GPS 4 +#define SDIOD_DIC_CAMERA 5 +#define SDIOD_DIC_PHS 6 +#define SDIOD_DIC_WLAN 7 +#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */ + +/* pwr_sel */ +#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */ +#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */ + +/* misc defines */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_3 3 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */ +#define SD_CARD_TYPE_IO 1 /* IO only card */ +#define SD_CARD_TYPE_MEMORY 2 /* memory only card */ +#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */ + +#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */ +#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */ + +/* Card registers: status bit position */ +#define CARDREG_STATUS_BIT_OUTOFRANGE 31 +#define CARDREG_STATUS_BIT_COMCRCERROR 23 +#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22 +#define CARDREG_STATUS_BIT_ERROR 19 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10 +#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9 +#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4 + + + +#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */ +#define SD_CMD_SEND_OPCOND 1 +#define SD_CMD_MMC_SET_RCA 3 +#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */ +#define SD_CMD_SELECT_DESELECT_CARD 7 +#define SD_CMD_SEND_CSD 9 +#define SD_CMD_SEND_CID 10 +#define SD_CMD_STOP_TRANSMISSION 12 +#define SD_CMD_SEND_STATUS 13 +#define SD_CMD_GO_INACTIVE_STATE 15 +#define SD_CMD_SET_BLOCKLEN 16 +#define SD_CMD_READ_SINGLE_BLOCK 17 +#define SD_CMD_READ_MULTIPLE_BLOCK 18 +#define SD_CMD_WRITE_BLOCK 24 +#define SD_CMD_WRITE_MULTIPLE_BLOCK 25 +#define SD_CMD_PROGRAM_CSD 27 +#define SD_CMD_SET_WRITE_PROT 28 +#define SD_CMD_CLR_WRITE_PROT 29 +#define SD_CMD_SEND_WRITE_PROT 30 +#define SD_CMD_ERASE_WR_BLK_START 32 +#define SD_CMD_ERASE_WR_BLK_END 33 +#define SD_CMD_ERASE 38 +#define SD_CMD_LOCK_UNLOCK 42 +#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */ +#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */ +#define SD_CMD_APP_CMD 55 +#define SD_CMD_GEN_CMD 56 +#define SD_CMD_READ_OCR 58 +#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */ +#define SD_ACMD_SD_STATUS 13 +#define SD_ACMD_SEND_NUM_WR_BLOCKS 22 +#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23 +#define SD_ACMD_SD_SEND_OP_COND 41 +#define SD_ACMD_SET_CLR_CARD_DETECT 42 +#define SD_ACMD_SEND_SCR 51 + +/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */ +#define SD_IO_OP_READ 0 /* Read_Write: Read */ +#define SD_IO_OP_WRITE 1 /* Read_Write: Write */ +#define SD_IO_RW_NORMAL 0 /* no RAW */ +#define SD_IO_RW_RAW 1 /* RAW */ +#define SD_IO_BYTE_MODE 0 /* Byte Mode */ +#define SD_IO_BLOCK_MODE 1 /* BlockMode */ +#define SD_IO_FIXED_ADDRESS 0 /* fix Address */ +#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */ + +/* build SD_CMD_IO_RW_DIRECT Argument */ +#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \ + (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF)) + +/* build SD_CMD_IO_RW_EXTENDED Argument */ +#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \ + ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \ + (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF)) + +/* SDIO response parameters */ +#define SD_RSP_NO_NONE 0 +#define SD_RSP_NO_1 1 +#define SD_RSP_NO_2 2 +#define SD_RSP_NO_3 3 +#define SD_RSP_NO_4 4 +#define SD_RSP_NO_5 5 +#define SD_RSP_NO_6 6 + + /* Modified R6 response (to CMD3) */ +#define SD_RSP_MR6_COM_CRC_ERROR 0x8000 +#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000 +#define SD_RSP_MR6_ERROR 0x2000 + + /* Modified R1 in R4 Response (to CMD5) */ +#define SD_RSP_MR1_SBIT 0x80 +#define SD_RSP_MR1_PARAMETER_ERROR 0x40 +#define SD_RSP_MR1_RFU5 0x20 +#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10 +#define SD_RSP_MR1_COM_CRC_ERROR 0x08 +#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04 +#define SD_RSP_MR1_RFU1 0x02 +#define SD_RSP_MR1_IDLE_STATE 0x01 + + /* R5 response (to CMD52 and CMD53) */ +#define SD_RSP_R5_COM_CRC_ERROR 0x80 +#define SD_RSP_R5_ILLEGAL_COMMAND 0x40 +#define SD_RSP_R5_IO_CURRENTSTATE1 0x20 +#define SD_RSP_R5_IO_CURRENTSTATE0 0x10 +#define SD_RSP_R5_ERROR 0x08 +#define SD_RSP_R5_RFU 0x04 +#define SD_RSP_R5_FUNC_NUM_ERROR 0x02 +#define SD_RSP_R5_OUT_OF_RANGE 0x01 + +#define SD_RSP_R5_ERRBITS 0xCB + + +/* ------------------------------------------------ + * SDIO Commands and responses + * + * I/O only commands are: + * CMD0, CMD3, CMD5, CMD7, CMD14, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +/* SDIO Commands */ +#define SDIOH_CMD_0 0 +#define SDIOH_CMD_3 3 +#define SDIOH_CMD_5 5 +#define SDIOH_CMD_7 7 +#define SDIOH_CMD_11 11 +#define SDIOH_CMD_14 14 +#define SDIOH_CMD_15 15 +#define SDIOH_CMD_19 19 +#define SDIOH_CMD_52 52 +#define SDIOH_CMD_53 53 +#define SDIOH_CMD_59 59 + +/* SDIO Command Responses */ +#define SDIOH_RSP_NONE 0 +#define SDIOH_RSP_R1 1 +#define SDIOH_RSP_R2 2 +#define SDIOH_RSP_R3 3 +#define SDIOH_RSP_R4 4 +#define SDIOH_RSP_R5 5 +#define SDIOH_RSP_R6 6 + +/* + * SDIO Response Error flags + */ +#define SDIOH_RSP5_ERROR_FLAGS 0xCB + +/* ------------------------------------------------ + * SDIO Command structures. I/O only commands are: + * + * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53 + * ------------------------------------------------ + */ + +#define CMD5_OCR_M BITFIELD_MASK(24) +#define CMD5_OCR_S 0 + +#define CMD5_S18R_M BITFIELD_MASK(1) +#define CMD5_S18R_S 24 + +#define CMD7_RCA_M BITFIELD_MASK(16) +#define CMD7_RCA_S 16 + +#define CMD14_RCA_M BITFIELD_MASK(16) +#define CMD14_RCA_S 16 +#define CMD14_SLEEP_M BITFIELD_MASK(1) +#define CMD14_SLEEP_S 15 + +#define CMD_15_RCA_M BITFIELD_MASK(16) +#define CMD_15_RCA_S 16 + +#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52 + */ +#define CMD52_DATA_S 0 +#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD52_REG_ADDR_S 9 +#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */ +#define CMD52_RAW_S 27 +#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD52_FUNCTION_S 28 +#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD52_RW_FLAG_S 31 + + +#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */ +#define CMD53_BYTE_BLK_CNT_S 0 +#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */ +#define CMD53_REG_ADDR_S 9 +#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */ +#define CMD53_OP_CODE_S 26 +#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */ +#define CMD53_BLK_MODE_S 27 +#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */ +#define CMD53_FUNCTION_S 28 +#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */ +#define CMD53_RW_FLAG_S 31 + +/* ------------------------------------------------------ + * SDIO Command Response structures for SD1 and SD4 modes + * ----------------------------------------------------- + */ +#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_IO_OCR_S 0 + +#define RSP4_S18A_M BITFIELD_MASK(1) /* Bits [23:0] - Card's OCR Bits [23:0] */ +#define RSP4_S18A_S 24 + +#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */ +#define RSP4_STUFF_S 24 +#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */ +#define RSP4_MEM_PRESENT_S 27 +#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */ +#define RSP4_NUM_FUNCS_S 28 +#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */ +#define RSP4_CARD_READY_S 31 + +#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0] + */ +#define RSP6_STATUS_S 0 +#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */ +#define RSP6_IO_RCA_S 16 + +#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */ +#define RSP1_AKE_SEQ_ERROR_S 3 +#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP1_APP_CMD_S 5 +#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */ +#define RSP1_READY_FOR_DATA_S 8 +#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card + * when Cmd was received + */ +#define RSP1_CURR_STATE_S 9 +#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */ +#define RSP1_EARSE_RESET_S 13 +#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */ +#define RSP1_CARD_ECC_DISABLE_S 14 +#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */ +#define RSP1_WP_ERASE_SKIP_S 15 +#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits + * of CSD + */ +#define RSP1_CID_CSD_OVERW_S 16 +#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */ +#define RSP1_ERROR_S 19 +#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */ +#define RSP1_CC_ERROR_S 20 +#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed + * to correct data + */ +#define RSP1_CARD_ECC_FAILED_S 21 +#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */ +#define RSP1_ILLEGAL_CMD_S 22 +#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed + */ +#define RSP1_COM_CRC_ERROR_S 23 +#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */ +#define RSP1_LOCK_UNLOCK_FAIL_S 24 +#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */ +#define RSP1_CARD_LOCKED_S 25 +#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program + * write-protected blocks + */ +#define RSP1_WP_VIOLATION_S 26 +#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */ +#define RSP1_ERASE_PARAM_S 27 +#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */ +#define RSP1_ERASE_SEQ_ERR_S 28 +#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */ +#define RSP1_BLK_LEN_ERR_S 29 +#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */ +#define RSP1_ADDR_ERR_S 30 +#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */ +#define RSP1_OUT_OF_RANGE_S 31 + + +#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */ +#define RSP5_DATA_S 0 +#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */ +#define RSP5_FLAGS_S 8 +#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */ +#define RSP5_STUFF_S 16 + +/* ---------------------------------------------- + * SDIO Command Response structures for SPI mode + * ---------------------------------------------- + */ +#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */ +#define SPIRSP4_IO_OCR_S 0 +#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */ +#define SPIRSP4_STUFF_S 16 +#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */ +#define SPIRSP4_MEM_PRESENT_S 19 +#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */ +#define SPIRSP4_NUM_FUNCS_S 20 +#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */ +#define SPIRSP4_CARD_READY_S 23 +#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */ +#define SPIRSP4_IDLE_STATE_S 24 +#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP4_ILLEGAL_CMD_S 26 +#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP4_COM_CRC_ERROR_S 27 +#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP4_FUNC_NUM_ERROR_S 28 +#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP4_PARAM_ERROR_S 30 +#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP4_START_BIT_S 31 + +#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */ +#define SPIRSP5_DATA_S 16 +#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */ +#define SPIRSP5_IDLE_STATE_S 24 +#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */ +#define SPIRSP5_ILLEGAL_CMD_S 26 +#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */ +#define SPIRSP5_COM_CRC_ERROR_S 27 +#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error + */ +#define SPIRSP5_FUNC_NUM_ERROR_S 28 +#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */ +#define SPIRSP5_PARAM_ERROR_S 30 +#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */ +#define SPIRSP5_START_BIT_S 31 + +/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */ +#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error + */ +#define RSP6STAT_AKE_SEQ_ERROR_S 3 +#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */ +#define RSP6STAT_APP_CMD_S 5 +#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data + * (buff empty) + */ +#define RSP6STAT_READY_FOR_DATA_S 8 +#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at + * Cmd reception + */ +#define RSP6STAT_CURR_STATE_S 9 +#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19 + */ +#define RSP6STAT_ERROR_S 13 +#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for + * card state Bit 22 + */ +#define RSP6STAT_ILLEGAL_CMD_S 14 +#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command + * failed Bit 23 + */ +#define RSP6STAT_COM_CRC_ERROR_S 15 + +#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ +#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE + +/* command issue options */ +#define CMD_OPTION_DEFAULT 0 +#define CMD_OPTION_TUNING 1 + +#endif /* def BCMSDIO */ +#endif /* _SDIO_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h new file mode 100644 index 000000000000..bc1fcbc0a04b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdioh.h @@ -0,0 +1,448 @@ +/* + * SDIO Host Controller Spec header file + * Register map and definitions for the Standard Host Controller + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdioh.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SDIOH_H +#define _SDIOH_H + +#define SD_SysAddr 0x000 +#define SD_BlockSize 0x004 +#define SD_BlockCount 0x006 +#define SD_Arg0 0x008 +#define SD_Arg1 0x00A +#define SD_TransferMode 0x00C +#define SD_Command 0x00E +#define SD_Response0 0x010 +#define SD_Response1 0x012 +#define SD_Response2 0x014 +#define SD_Response3 0x016 +#define SD_Response4 0x018 +#define SD_Response5 0x01A +#define SD_Response6 0x01C +#define SD_Response7 0x01E +#define SD_BufferDataPort0 0x020 +#define SD_BufferDataPort1 0x022 +#define SD_PresentState 0x024 +#define SD_HostCntrl 0x028 +#define SD_PwrCntrl 0x029 +#define SD_BlockGapCntrl 0x02A +#define SD_WakeupCntrl 0x02B +#define SD_ClockCntrl 0x02C +#define SD_TimeoutCntrl 0x02E +#define SD_SoftwareReset 0x02F +#define SD_IntrStatus 0x030 +#define SD_ErrorIntrStatus 0x032 +#define SD_IntrStatusEnable 0x034 +#define SD_ErrorIntrStatusEnable 0x036 +#define SD_IntrSignalEnable 0x038 +#define SD_ErrorIntrSignalEnable 0x03A +#define SD_CMD12ErrorStatus 0x03C +#define SD_Capabilities 0x040 +#define SD_Capabilities3 0x044 +#define SD_MaxCurCap 0x048 +#define SD_MaxCurCap_Reserved 0x04C +#define SD_ADMA_ErrStatus 0x054 +#define SD_ADMA_SysAddr 0x58 +#define SD_SlotInterruptStatus 0x0FC +#define SD_HostControllerVersion 0x0FE +#define SD_GPIO_Reg 0x100 +#define SD_GPIO_OE 0x104 +#define SD_GPIO_Enable 0x108 + +/* SD specific registers in PCI config space */ +#define SD_SlotInfo 0x40 + +/* HC 3.0 specific registers and offsets */ +#define SD3_HostCntrl2 0x03E +/* preset regsstart and count */ +#define SD3_PresetValStart 0x060 +#define SD3_PresetValCount 8 +/* preset-indiv regs */ +#define SD3_PresetVal_init 0x060 +#define SD3_PresetVal_default 0x062 +#define SD3_PresetVal_HS 0x064 +#define SD3_PresetVal_SDR12 0x066 +#define SD3_PresetVal_SDR25 0x068 +#define SD3_PresetVal_SDR50 0x06a +#define SD3_PresetVal_SDR104 0x06c +#define SD3_PresetVal_DDR50 0x06e +/* SDIO3.0 Revx specific Registers */ +#define SD3_Tuning_Info_Register 0x0EC +#define SD3_WL_BT_reset_register 0x0F0 + + +/* preset value indices */ +#define SD3_PRESETVAL_INITIAL_IX 0 +#define SD3_PRESETVAL_DESPEED_IX 1 +#define SD3_PRESETVAL_HISPEED_IX 2 +#define SD3_PRESETVAL_SDR12_IX 3 +#define SD3_PRESETVAL_SDR25_IX 4 +#define SD3_PRESETVAL_SDR50_IX 5 +#define SD3_PRESETVAL_SDR104_IX 6 +#define SD3_PRESETVAL_DDR50_IX 7 + +/* SD_Capabilities reg (0x040) */ +#define CAP_TO_CLKFREQ_M BITFIELD_MASK(6) +#define CAP_TO_CLKFREQ_S 0 +#define CAP_TO_CLKUNIT_M BITFIELD_MASK(1) +#define CAP_TO_CLKUNIT_S 7 +/* Note: for sdio-2.0 case, this mask has to be 6 bits, but msb 2 + bits are reserved. going ahead with 8 bits, as it is req for 3.0 +*/ +#define CAP_BASECLK_M BITFIELD_MASK(8) +#define CAP_BASECLK_S 8 +#define CAP_MAXBLOCK_M BITFIELD_MASK(2) +#define CAP_MAXBLOCK_S 16 +#define CAP_ADMA2_M BITFIELD_MASK(1) +#define CAP_ADMA2_S 19 +#define CAP_ADMA1_M BITFIELD_MASK(1) +#define CAP_ADMA1_S 20 +#define CAP_HIGHSPEED_M BITFIELD_MASK(1) +#define CAP_HIGHSPEED_S 21 +#define CAP_DMA_M BITFIELD_MASK(1) +#define CAP_DMA_S 22 +#define CAP_SUSPEND_M BITFIELD_MASK(1) +#define CAP_SUSPEND_S 23 +#define CAP_VOLT_3_3_M BITFIELD_MASK(1) +#define CAP_VOLT_3_3_S 24 +#define CAP_VOLT_3_0_M BITFIELD_MASK(1) +#define CAP_VOLT_3_0_S 25 +#define CAP_VOLT_1_8_M BITFIELD_MASK(1) +#define CAP_VOLT_1_8_S 26 +#define CAP_64BIT_HOST_M BITFIELD_MASK(1) +#define CAP_64BIT_HOST_S 28 + +#define SDIO_OCR_READ_FAIL (2) + + +#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1) +#define CAP_ASYNCINT_SUP_S 29 + +#define CAP_SLOTTYPE_M BITFIELD_MASK(2) +#define CAP_SLOTTYPE_S 30 + +#define CAP3_MSBits_OFFSET (32) +/* note: following are caps MSB32 bits. + So the bits start from 0, instead of 32. that is why + CAP3_MSBits_OFFSET is subtracted. +*/ +#define CAP3_SDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR50_SUP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_SDR104_SUP_M BITFIELD_MASK(1) +#define CAP3_SDR104_SUP_S (33 - CAP3_MSBits_OFFSET) + +#define CAP3_DDR50_SUP_M BITFIELD_MASK(1) +#define CAP3_DDR50_SUP_S (34 - CAP3_MSBits_OFFSET) + +/* for knowing the clk caps in a single read */ +#define CAP3_30CLKCAP_M BITFIELD_MASK(3) +#define CAP3_30CLKCAP_S (32 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_A_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_A_S (36 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_C_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_C_S (37 - CAP3_MSBits_OFFSET) + +#define CAP3_DRIVTYPE_D_M BITFIELD_MASK(1) +#define CAP3_DRIVTYPE_D_S (38 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_TC_M BITFIELD_MASK(4) +#define CAP3_RETUNING_TC_S (40 - CAP3_MSBits_OFFSET) + +#define CAP3_TUNING_SDR50_M BITFIELD_MASK(1) +#define CAP3_TUNING_SDR50_S (45 - CAP3_MSBits_OFFSET) + +#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2) +#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET) + +#define CAP3_CLK_MULT_M BITFIELD_MASK(8) +#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET) + +#define PRESET_DRIVR_SELECT_M BITFIELD_MASK(2) +#define PRESET_DRIVR_SELECT_S 14 + +#define PRESET_CLK_DIV_M BITFIELD_MASK(10) +#define PRESET_CLK_DIV_S 0 + +/* SD_MaxCurCap reg (0x048) */ +#define CAP_CURR_3_3_M BITFIELD_MASK(8) +#define CAP_CURR_3_3_S 0 +#define CAP_CURR_3_0_M BITFIELD_MASK(8) +#define CAP_CURR_3_0_S 8 +#define CAP_CURR_1_8_M BITFIELD_MASK(8) +#define CAP_CURR_1_8_S 16 + +/* SD_SysAddr: Offset 0x0000, Size 4 bytes */ + +/* SD_BlockSize: Offset 0x004, Size 2 bytes */ +#define BLKSZ_BLKSZ_M BITFIELD_MASK(12) +#define BLKSZ_BLKSZ_S 0 +#define BLKSZ_BNDRY_M BITFIELD_MASK(3) +#define BLKSZ_BNDRY_S 12 + +/* SD_BlockCount: Offset 0x006, size 2 bytes */ + +/* SD_Arg0: Offset 0x008, size = 4 bytes */ +/* SD_TransferMode Offset 0x00C, size = 2 bytes */ +#define XFER_DMA_ENABLE_M BITFIELD_MASK(1) +#define XFER_DMA_ENABLE_S 0 +#define XFER_BLK_COUNT_EN_M BITFIELD_MASK(1) +#define XFER_BLK_COUNT_EN_S 1 +#define XFER_CMD_12_EN_M BITFIELD_MASK(1) +#define XFER_CMD_12_EN_S 2 +#define XFER_DATA_DIRECTION_M BITFIELD_MASK(1) +#define XFER_DATA_DIRECTION_S 4 +#define XFER_MULTI_BLOCK_M BITFIELD_MASK(1) +#define XFER_MULTI_BLOCK_S 5 + +/* SD_Command: Offset 0x00E, size = 2 bytes */ +/* resp_type field */ +#define RESP_TYPE_NONE 0 +#define RESP_TYPE_136 1 +#define RESP_TYPE_48 2 +#define RESP_TYPE_48_BUSY 3 +/* type field */ +#define CMD_TYPE_NORMAL 0 +#define CMD_TYPE_SUSPEND 1 +#define CMD_TYPE_RESUME 2 +#define CMD_TYPE_ABORT 3 + +#define CMD_RESP_TYPE_M BITFIELD_MASK(2) /* Bits [0-1] - Response type */ +#define CMD_RESP_TYPE_S 0 +#define CMD_CRC_EN_M BITFIELD_MASK(1) /* Bit 3 - CRC enable */ +#define CMD_CRC_EN_S 3 +#define CMD_INDEX_EN_M BITFIELD_MASK(1) /* Bit 4 - Enable index checking */ +#define CMD_INDEX_EN_S 4 +#define CMD_DATA_EN_M BITFIELD_MASK(1) /* Bit 5 - Using DAT line */ +#define CMD_DATA_EN_S 5 +#define CMD_TYPE_M BITFIELD_MASK(2) /* Bit [6-7] - Normal, abort, resume, etc + */ +#define CMD_TYPE_S 6 +#define CMD_INDEX_M BITFIELD_MASK(6) /* Bits [8-13] - Command number */ +#define CMD_INDEX_S 8 + +/* SD_BufferDataPort0 : Offset 0x020, size = 2 or 4 bytes */ +/* SD_BufferDataPort1 : Offset 0x022, size = 2 bytes */ +/* SD_PresentState : Offset 0x024, size = 4 bytes */ +#define PRES_CMD_INHIBIT_M BITFIELD_MASK(1) /* Bit 0 May use CMD */ +#define PRES_CMD_INHIBIT_S 0 +#define PRES_DAT_INHIBIT_M BITFIELD_MASK(1) /* Bit 1 May use DAT */ +#define PRES_DAT_INHIBIT_S 1 +#define PRES_DAT_BUSY_M BITFIELD_MASK(1) /* Bit 2 DAT is busy */ +#define PRES_DAT_BUSY_S 2 +#define PRES_PRESENT_RSVD_M BITFIELD_MASK(5) /* Bit [3-7] rsvd */ +#define PRES_PRESENT_RSVD_S 3 +#define PRES_WRITE_ACTIVE_M BITFIELD_MASK(1) /* Bit 8 Write is active */ +#define PRES_WRITE_ACTIVE_S 8 +#define PRES_READ_ACTIVE_M BITFIELD_MASK(1) /* Bit 9 Read is active */ +#define PRES_READ_ACTIVE_S 9 +#define PRES_WRITE_DATA_RDY_M BITFIELD_MASK(1) /* Bit 10 Write buf is avail */ +#define PRES_WRITE_DATA_RDY_S 10 +#define PRES_READ_DATA_RDY_M BITFIELD_MASK(1) /* Bit 11 Read buf data avail */ +#define PRES_READ_DATA_RDY_S 11 +#define PRES_CARD_PRESENT_M BITFIELD_MASK(1) /* Bit 16 Card present - debounced */ +#define PRES_CARD_PRESENT_S 16 +#define PRES_CARD_STABLE_M BITFIELD_MASK(1) /* Bit 17 Debugging */ +#define PRES_CARD_STABLE_S 17 +#define PRES_CARD_PRESENT_RAW_M BITFIELD_MASK(1) /* Bit 18 Not debounced */ +#define PRES_CARD_PRESENT_RAW_S 18 +#define PRES_WRITE_ENABLED_M BITFIELD_MASK(1) /* Bit 19 Write protected? */ +#define PRES_WRITE_ENABLED_S 19 +#define PRES_DAT_SIGNAL_M BITFIELD_MASK(4) /* Bit [20-23] Debugging */ +#define PRES_DAT_SIGNAL_S 20 +#define PRES_CMD_SIGNAL_M BITFIELD_MASK(1) /* Bit 24 Debugging */ +#define PRES_CMD_SIGNAL_S 24 + +/* SD_HostCntrl: Offset 0x028, size = 1 bytes */ +#define HOST_LED_M BITFIELD_MASK(1) /* Bit 0 LED On/Off */ +#define HOST_LED_S 0 +#define HOST_DATA_WIDTH_M BITFIELD_MASK(1) /* Bit 1 4 bit enable */ +#define HOST_DATA_WIDTH_S 1 +#define HOST_HI_SPEED_EN_M BITFIELD_MASK(1) /* Bit 2 High speed vs low speed */ +#define HOST_DMA_SEL_S 3 +#define HOST_DMA_SEL_M BITFIELD_MASK(2) /* Bit 4:3 DMA Select */ +#define HOST_HI_SPEED_EN_S 2 + +/* Host Control2: */ +#define HOSTCtrl2_PRESVAL_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_PRESVAL_EN_S 15 /* bit# */ + +#define HOSTCtrl2_ASYINT_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_ASYINT_EN_S 14 /* bit# */ + +#define HOSTCtrl2_SAMPCLK_SEL_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_SAMPCLK_SEL_S 7 /* bit# */ + +#define HOSTCtrl2_EXEC_TUNING_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_EXEC_TUNING_S 6 /* bit# */ + +#define HOSTCtrl2_DRIVSTRENGTH_SEL_M BITFIELD_MASK(2) /* 2 bit */ +#define HOSTCtrl2_DRIVSTRENGTH_SEL_S 4 /* bit# */ + +#define HOSTCtrl2_1_8SIG_EN_M BITFIELD_MASK(1) /* 1 bit */ +#define HOSTCtrl2_1_8SIG_EN_S 3 /* bit# */ + +#define HOSTCtrl2_UHSMODE_SEL_M BITFIELD_MASK(3) /* 3 bit */ +#define HOSTCtrl2_UHSMODE_SEL_S 0 /* bit# */ + +#define HOST_CONTR_VER_2 (1) +#define HOST_CONTR_VER_3 (2) + +/* misc defines */ +#define SD1_MODE 0x1 /* SD Host Cntrlr Spec */ +#define SD4_MODE 0x2 /* SD Host Cntrlr Spec */ + +/* SD_PwrCntrl: Offset 0x029, size = 1 bytes */ +#define PWR_BUS_EN_M BITFIELD_MASK(1) /* Bit 0 Power the bus */ +#define PWR_BUS_EN_S 0 +#define PWR_VOLTS_M BITFIELD_MASK(3) /* Bit [1-3] Voltage Select */ +#define PWR_VOLTS_S 1 + +/* SD_SoftwareReset: Offset 0x02F, size = 1 byte */ +#define SW_RESET_ALL_M BITFIELD_MASK(1) /* Bit 0 Reset All */ +#define SW_RESET_ALL_S 0 +#define SW_RESET_CMD_M BITFIELD_MASK(1) /* Bit 1 CMD Line Reset */ +#define SW_RESET_CMD_S 1 +#define SW_RESET_DAT_M BITFIELD_MASK(1) /* Bit 2 DAT Line Reset */ +#define SW_RESET_DAT_S 2 + +/* SD_IntrStatus: Offset 0x030, size = 2 bytes */ +/* Defs also serve SD_IntrStatusEnable and SD_IntrSignalEnable */ +#define INTSTAT_CMD_COMPLETE_M BITFIELD_MASK(1) /* Bit 0 */ +#define INTSTAT_CMD_COMPLETE_S 0 +#define INTSTAT_XFER_COMPLETE_M BITFIELD_MASK(1) +#define INTSTAT_XFER_COMPLETE_S 1 +#define INTSTAT_BLOCK_GAP_EVENT_M BITFIELD_MASK(1) +#define INTSTAT_BLOCK_GAP_EVENT_S 2 +#define INTSTAT_DMA_INT_M BITFIELD_MASK(1) +#define INTSTAT_DMA_INT_S 3 +#define INTSTAT_BUF_WRITE_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_WRITE_READY_S 4 +#define INTSTAT_BUF_READ_READY_M BITFIELD_MASK(1) +#define INTSTAT_BUF_READ_READY_S 5 +#define INTSTAT_CARD_INSERTION_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INSERTION_S 6 +#define INTSTAT_CARD_REMOVAL_M BITFIELD_MASK(1) +#define INTSTAT_CARD_REMOVAL_S 7 +#define INTSTAT_CARD_INT_M BITFIELD_MASK(1) +#define INTSTAT_CARD_INT_S 8 +#define INTSTAT_RETUNING_INT_M BITFIELD_MASK(1) /* Bit 12 */ +#define INTSTAT_RETUNING_INT_S 12 +#define INTSTAT_ERROR_INT_M BITFIELD_MASK(1) /* Bit 15 */ +#define INTSTAT_ERROR_INT_S 15 + +/* SD_ErrorIntrStatus: Offset 0x032, size = 2 bytes */ +/* Defs also serve SD_ErrorIntrStatusEnable and SD_ErrorIntrSignalEnable */ +#define ERRINT_CMD_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_CMD_TIMEOUT_S 0 +#define ERRINT_CMD_CRC_M BITFIELD_MASK(1) +#define ERRINT_CMD_CRC_S 1 +#define ERRINT_CMD_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_CMD_ENDBIT_S 2 +#define ERRINT_CMD_INDEX_M BITFIELD_MASK(1) +#define ERRINT_CMD_INDEX_S 3 +#define ERRINT_DATA_TIMEOUT_M BITFIELD_MASK(1) +#define ERRINT_DATA_TIMEOUT_S 4 +#define ERRINT_DATA_CRC_M BITFIELD_MASK(1) +#define ERRINT_DATA_CRC_S 5 +#define ERRINT_DATA_ENDBIT_M BITFIELD_MASK(1) +#define ERRINT_DATA_ENDBIT_S 6 +#define ERRINT_CURRENT_LIMIT_M BITFIELD_MASK(1) +#define ERRINT_CURRENT_LIMIT_S 7 +#define ERRINT_AUTO_CMD12_M BITFIELD_MASK(1) +#define ERRINT_AUTO_CMD12_S 8 +#define ERRINT_VENDOR_M BITFIELD_MASK(4) +#define ERRINT_VENDOR_S 12 +#define ERRINT_ADMA_M BITFIELD_MASK(1) +#define ERRINT_ADMA_S 9 + +/* Also provide definitions in "normal" form to allow combined masks */ +#define ERRINT_CMD_TIMEOUT_BIT 0x0001 +#define ERRINT_CMD_CRC_BIT 0x0002 +#define ERRINT_CMD_ENDBIT_BIT 0x0004 +#define ERRINT_CMD_INDEX_BIT 0x0008 +#define ERRINT_DATA_TIMEOUT_BIT 0x0010 +#define ERRINT_DATA_CRC_BIT 0x0020 +#define ERRINT_DATA_ENDBIT_BIT 0x0040 +#define ERRINT_CURRENT_LIMIT_BIT 0x0080 +#define ERRINT_AUTO_CMD12_BIT 0x0100 +#define ERRINT_ADMA_BIT 0x0200 + +/* Masks to select CMD vs. DATA errors */ +#define ERRINT_CMD_ERRS (ERRINT_CMD_TIMEOUT_BIT | ERRINT_CMD_CRC_BIT |\ + ERRINT_CMD_ENDBIT_BIT | ERRINT_CMD_INDEX_BIT) +#define ERRINT_DATA_ERRS (ERRINT_DATA_TIMEOUT_BIT | ERRINT_DATA_CRC_BIT |\ + ERRINT_DATA_ENDBIT_BIT | ERRINT_ADMA_BIT) +#define ERRINT_TRANSFER_ERRS (ERRINT_CMD_ERRS | ERRINT_DATA_ERRS) + +/* SD_WakeupCntr_BlockGapCntrl : Offset 0x02A , size = bytes */ +/* SD_ClockCntrl : Offset 0x02C , size = bytes */ +/* SD_SoftwareReset_TimeoutCntrl : Offset 0x02E , size = bytes */ +/* SD_IntrStatus : Offset 0x030 , size = bytes */ +/* SD_ErrorIntrStatus : Offset 0x032 , size = bytes */ +/* SD_IntrStatusEnable : Offset 0x034 , size = bytes */ +/* SD_ErrorIntrStatusEnable : Offset 0x036 , size = bytes */ +/* SD_IntrSignalEnable : Offset 0x038 , size = bytes */ +/* SD_ErrorIntrSignalEnable : Offset 0x03A , size = bytes */ +/* SD_CMD12ErrorStatus : Offset 0x03C , size = bytes */ +/* SD_Capabilities : Offset 0x040 , size = bytes */ +/* SD_MaxCurCap : Offset 0x048 , size = bytes */ +/* SD_MaxCurCap_Reserved: Offset 0x04C , size = bytes */ +/* SD_SlotInterruptStatus: Offset 0x0FC , size = bytes */ +/* SD_HostControllerVersion : Offset 0x0FE , size = bytes */ + +/* SDIO Host Control Register DMA Mode Definitions */ +#define SDIOH_SDMA_MODE 0 +#define SDIOH_ADMA1_MODE 1 +#define SDIOH_ADMA2_MODE 2 +#define SDIOH_ADMA2_64_MODE 3 + +#define ADMA2_ATTRIBUTE_VALID (1 << 0) /* ADMA Descriptor line valid */ +#define ADMA2_ATTRIBUTE_END (1 << 1) /* End of Descriptor */ +#define ADMA2_ATTRIBUTE_INT (1 << 2) /* Interrupt when line is done */ +#define ADMA2_ATTRIBUTE_ACT_NOP (0 << 4) /* Skip current line, go to next. */ +#define ADMA2_ATTRIBUTE_ACT_RSV (1 << 4) /* Same as NOP */ +#define ADMA1_ATTRIBUTE_ACT_SET (1 << 4) /* ADMA1 Only - set transfer length */ +#define ADMA2_ATTRIBUTE_ACT_TRAN (2 << 4) /* Transfer Data of one descriptor line. */ +#define ADMA2_ATTRIBUTE_ACT_LINK (3 << 4) /* Link Descriptor */ + +/* ADMA2 Descriptor Table Entry for 32-bit Address */ +typedef struct adma2_dscr_32b { + uint32 len_attr; + uint32 phys_addr; +} adma2_dscr_32b_t; + +/* ADMA1 Descriptor Table Entry */ +typedef struct adma1_dscr { + uint32 phys_addr_attr; +} adma1_dscr_t; + +#endif /* _SDIOH_H */ diff --git a/drivers/net/wireless/bcmdhd/include/sdiovar.h b/drivers/net/wireless/bcmdhd/include/sdiovar.h new file mode 100644 index 000000000000..15b74abec9ea --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/sdiovar.h @@ -0,0 +1,61 @@ +/* + * Structure used by apps whose drivers access SDIO drivers. + * Pulled out separately so dhdu and wlu can both use it. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sdiovar.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _sdiovar_h_ +#define _sdiovar_h_ + +#include + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + +typedef struct sdreg { + int func; + int offset; + int value; +} sdreg_t; + +/* Common msglevel constants */ +#define SDH_ERROR_VAL 0x0001 /* Error */ +#define SDH_TRACE_VAL 0x0002 /* Trace */ +#define SDH_INFO_VAL 0x0004 /* Info */ +#define SDH_DEBUG_VAL 0x0008 /* Debug */ +#define SDH_DATA_VAL 0x0010 /* Data */ +#define SDH_CTRL_VAL 0x0020 /* Control Regs */ +#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */ +#define SDH_DMA_VAL 0x0080 /* DMA */ + +#define NUM_PREV_TRANSACTIONS 16 + + +#include + +#endif /* _sdiovar_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/siutils.h b/drivers/net/wireless/bcmdhd/include/siutils.h new file mode 100644 index 000000000000..27ad7c484455 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/siutils.h @@ -0,0 +1,601 @@ +/* + * Misc utility routines for accessing the SOC Interconnects + * of Broadcom HNBU chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils.h 530150 2015-01-29 08:43:40Z $ + */ + +#ifndef _siutils_h_ +#define _siutils_h_ + +#ifdef SR_DEBUG +#include "wlioctl.h" +#endif /* SR_DEBUG */ + + +/** + * Data structure to export all chip specific common variables + * public (read-only) portion of siutils handle returned by si_attach()/si_kattach() + */ +struct si_pub { + uint socitype; /**< SOCI_SB, SOCI_AI */ + + uint bustype; /**< SI_BUS, PCI_BUS */ + uint buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ + uint buscorerev; /**< buscore rev */ + uint buscoreidx; /**< buscore index */ + int ccrev; /**< chip common core rev */ + uint32 cccaps; /**< chip common capabilities */ + uint32 cccaps_ext; /**< chip common capabilities extension */ + int pmurev; /**< pmu core rev */ + uint32 pmucaps; /**< pmu capabilities */ + uint boardtype; /**< board type */ + uint boardrev; /* board rev */ + uint boardvendor; /**< board vendor */ + uint boardflags; /**< board flags */ + uint boardflags2; /**< board flags2 */ + uint chip; /**< chip number */ + uint chiprev; /**< chip revision */ + uint chippkg; /**< chip package option */ + uint32 chipst; /**< chip status */ + bool issim; /**< chip is in simulation or emulation */ + uint socirev; /**< SOC interconnect rev */ + bool pci_pr32414; + +}; + +/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver + * for monolithic driver, it is readonly to prevent accident change + */ +typedef const struct si_pub si_t; + +/* + * Many of the routines below take an 'sih' handle as their first arg. + * Allocate this by calling si_attach(). Free it by calling si_detach(). + * At any one time, the sih is logically focused on one particular si core + * (the "current core"). + * Use si_setcore() or si_setcoreidx() to change the association to another core. + */ +#define SI_OSH NULL /**< Use for si_kattach when no osh is available */ + +#define BADIDX (SI_MAXCORES + 1) + +/* clkctl xtal what flags */ +#define XTAL 0x1 /**< primary crystal oscillator (2050) */ +#define PLL 0x2 /**< main chip pll */ + +/* clkctl clk mode */ +#define CLK_FAST 0 /**< force fast (pll) clock */ +#define CLK_DYNAMIC 2 /**< enable dynamic clock control */ + +/* GPIO usage priorities */ +#define GPIO_DRV_PRIORITY 0 /**< Driver */ +#define GPIO_APP_PRIORITY 1 /**< Application */ +#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */ + +/* GPIO pull up/down */ +#define GPIO_PULLUP 0 +#define GPIO_PULLDN 1 + +/* GPIO event regtype */ +#define GPIO_REGEVT 0 /**< GPIO register event */ +#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */ +#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */ + +/* device path */ +#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */ + +/* SI routine enumeration: to be used by update function with multiple hooks */ +#define SI_DOATTACH 1 +#define SI_PCIDOWN 2 /**< wireless interface is down */ +#define SI_PCIUP 3 /**< wireless interface is up */ + +#ifdef SR_DEBUG +#define PMU_RES 31 +#endif /* SR_DEBUG */ + +#define ISSIM_ENAB(sih) FALSE + +/* PMU clock/power control */ +#if defined(BCMPMUCTL) +#define PMUCTL_ENAB(sih) (BCMPMUCTL) +#else +#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) +#endif + +#define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \ + ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0) + +/* chipcommon clock/power control (exclusive with PMU's) */ +#if defined(BCMPMUCTL) && BCMPMUCTL +#define CCCTL_ENAB(sih) (0) +#define CCPLL_ENAB(sih) (0) +#else +#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) +#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) +#endif + +typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg); + +/* External BT Coex enable mask */ +#define CC_BTCOEX_EN_MASK 0x01 +/* External PA enable mask */ +#define GPIO_CTRL_EPA_EN_MASK 0x40 +/* WL/BT control enable mask */ +#define GPIO_CTRL_5_6_EN_MASK 0x60 +#define GPIO_CTRL_7_6_EN_MASK 0xC0 +#define GPIO_OUT_7_EN_MASK 0x80 + + + +/* CR4 specific defines used by the host driver */ +#define SI_CR4_CAP (0x04) +#define SI_CR4_BANKIDX (0x40) +#define SI_CR4_BANKINFO (0x44) +#define SI_CR4_BANKPDA (0x4C) + +#define ARMCR4_TCBBNB_MASK 0xf0 +#define ARMCR4_TCBBNB_SHIFT 4 +#define ARMCR4_TCBANB_MASK 0xf +#define ARMCR4_TCBANB_SHIFT 0 + +#define SICF_CPUHALT (0x0020) +#define ARMCR4_BSZ_MASK 0x3f +#define ARMCR4_BSZ_MULT 8192 +#define SI_BPIND_1BYTE 0x1 +#define SI_BPIND_2BYTE 0x3 +#define SI_BPIND_4BYTE 0xF +#include +/* === exported functions === */ +extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *si_kattach(osl_t *osh); +extern void si_detach(si_t *sih); +extern bool si_pci_war16165(si_t *sih); +extern void * +si_d11_switch_addrbase(si_t *sih, uint coreunit); +extern uint si_corelist(si_t *sih, uint coreid[]); +extern uint si_coreid(si_t *sih); +extern uint si_flag(si_t *sih); +extern uint si_flag_alt(si_t *sih); +extern uint si_intflag(si_t *sih); +extern uint si_coreidx(si_t *sih); +extern uint si_coreunit(si_t *sih); +extern uint si_corevendor(si_t *sih); +extern uint si_corerev(si_t *sih); +extern void *si_osh(si_t *sih); +extern void si_setosh(si_t *sih, osl_t *osh); +extern uint si_backplane_access(si_t *sih, uint addr, uint size, + uint *val, bool read); +extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val); +extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern void *si_coreregs(si_t *sih); +extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val); +extern void *si_wrapperregs(si_t *sih); +extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); +extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern bool si_iscoreup(si_t *sih); +extern uint si_numcoreunits(si_t *sih, uint coreid); +extern uint si_numd11coreunits(si_t *sih); +extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); +extern void *si_setcoreidx(si_t *sih, uint coreidx); +extern void *si_setcore(si_t *sih, uint coreid, uint coreunit); +extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); +extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); +extern int si_numaddrspaces(si_t *sih); +extern uint32 si_addrspace(si_t *sih, uint asidx); +extern uint32 si_addrspacesize(si_t *sih, uint asidx); +extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern int si_corebist(si_t *sih); +extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void si_core_disable(si_t *sih, uint32 bits); +extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); +extern uint si_chip_hostif(si_t *sih); +extern bool si_read_pmu_autopll(si_t *sih); +extern uint32 si_clock(si_t *sih); +extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */ +extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */ +extern void si_pci_setup(si_t *sih, uint coremask); +extern void si_pcmcia_init(si_t *sih); +extern void si_setint(si_t *sih, int siflag); +extern bool si_backplane64(si_t *sih); +extern void si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg); +extern void si_deregister_intr_callback(si_t *sih); +extern void si_clkctl_init(si_t *sih); +extern uint16 si_clkctl_fast_pwrup_delay(si_t *sih); +extern bool si_clkctl_cc(si_t *sih, uint mode); +extern int si_clkctl_xtal(si_t *sih, uint what, bool on); +extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val); +extern void si_btcgpiowar(si_t *sih); +extern bool si_deviceremoved(si_t *sih); +extern uint32 si_sysmem_size(si_t *sih); +extern uint32 si_socram_size(si_t *sih); +extern uint32 si_socdevram_size(si_t *sih); +extern uint32 si_socram_srmem_size(si_t *sih); +extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda); +extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap); +extern bool si_socdevram_pkg(si_t *sih); +extern bool si_socdevram_remap_isenb(si_t *sih); +extern uint32 si_socdevram_remap_size(si_t *sih); + +extern void si_watchdog(si_t *sih, uint ticks); +extern void si_watchdog_ms(si_t *sih, uint32 ms); +extern uint32 si_watchdog_msticks(void); +extern void *si_gpiosetcore(si_t *sih); +extern uint32 si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioin(si_t *sih); +extern uint32 si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority); +extern uint32 si_gpioled(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_gpioreserve(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiorelease(si_t *sih, uint32 gpio_num, uint8 priority); +extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val); +extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val); +extern uint32 si_gpio_int_enable(si_t *sih, bool enable); +extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode); +extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value); +extern uint8 si_gci_host_wake_gpio_init(si_t *sih); +extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state); + +/* GCI interrupt handlers */ +extern void si_gci_handler_process(si_t *sih); + +/* GCI GPIO event handlers */ +extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts, + gci_gpio_handler_t cb, void *arg); +extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i); +extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value); + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool si_pci_pmecap(si_t *sih); +extern bool si_pci_fastpmecap(struct osl_info *osh); +extern bool si_pci_pmestat(si_t *sih); +extern void si_pci_pmeclr(si_t *sih); +extern void si_pci_pmeen(si_t *sih); +extern void si_pci_pmestatclr(si_t *sih); +extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset); +extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val); +extern void si_deepsleep_count(si_t *sih, bool arm_wakeup); + + +#ifdef BCMSDIO +extern void si_sdio_init(si_t *sih); +#endif + +extern uint16 si_d11_devid(si_t *sih); +extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice, + uint8 *pciclass, uint8 *pcisubclass, uint8 *pciprogif, uint8 *pciheader); + +#define si_eci(sih) 0 +static INLINE void * si_eci_init(si_t *sih) {return NULL;} +#define si_eci_notify_bt(sih, type, val) (0) +#define si_seci(sih) 0 +#define si_seci_upd(sih, a) do {} while (0) +static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;} +static INLINE void * si_gci_init(si_t *sih) {return NULL;} +#define si_seci_down(sih) do {} while (0) +#define si_gci(sih) 0 + +/* OTP status */ +extern bool si_is_otp_disabled(si_t *sih); +extern bool si_is_otp_powered(si_t *sih); +extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask); + +/* SPROM availability */ +extern bool si_is_sprom_available(si_t *sih); +extern bool si_is_sprom_enabled(si_t *sih); +extern void si_sprom_enable(si_t *sih, bool enable); + +/* OTP/SROM CIS stuff */ +extern int si_cis_source(si_t *sih); +#define CIS_DEFAULT 0 +#define CIS_SROM 1 +#define CIS_OTP 2 + +/* Fab-id information */ +#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */ +#define CSM_FAB7 0x1 /**< CSM Fab7 chip */ +#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */ +#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */ + +extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw); +extern uint16 si_fabid(si_t *sih); +extern uint16 si_chipid(si_t *sih); + +/* + * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. + * The returned path is NULL terminated and has trailing '/'. + * Return 0 on success, nonzero otherwise. + */ +extern int si_devpath(si_t *sih, char *path, int size); +extern int si_devpath_pcie(si_t *sih, char *path, int size); +/* Read variable with prepending the devpath to the name */ +extern char *si_getdevpathvar(si_t *sih, const char *name); +extern int si_getdevpathintvar(si_t *sih, const char *name); +extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name); + + +extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val); +extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val); +extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val); +extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val); +extern void si_pcie_set_error_injection(si_t *sih, uint32 mode); +extern void si_pcie_set_L1substate(si_t *sih, uint32 substate); +extern uint32 si_pcie_get_L1substate(si_t *sih); +extern void si_war42780_clkreq(si_t *sih, bool clkreq); +extern void si_pci_down(si_t *sih); +extern void si_pci_up(si_t *sih); +extern void si_pci_sleep(si_t *sih); +extern void si_pcie_war_ovr_update(si_t *sih, uint8 aspm); +extern void si_pcie_power_save_enable(si_t *sih, bool enable); +extern void si_pcie_extendL1timer(si_t *sih, bool extend); +extern int si_pci_fixcfg(si_t *sih); +extern void si_chippkg_set(si_t *sih, uint); + +extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on); +extern void si_chipcontrl_restore(si_t *sih, uint32 val); +extern uint32 si_chipcontrl_read(si_t *sih); +extern void si_chipcontrl_epa4331(si_t *sih, bool on); +extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl); +extern void si_chipcontrl_srom4360(si_t *sih, bool on); +extern void si_clk_srom4365(si_t *sih); +/* Enable BT-COEX & Ex-PA for 4313 */ +extern void si_epa_4313war(si_t *sih); +extern void si_btc_enable_chipcontrol(si_t *sih); +/* BT/WL selection for 4313 bt combo >= P250 boards */ +extern void si_btcombo_p250_4313_war(si_t *sih); +extern void si_btcombo_43228_war(si_t *sih); +extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear); +extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag); +extern void si_pmu_synth_pwrsw_4313_war(si_t *sih); +extern uint si_pll_reset(si_t *sih); +/* === debug routines === */ + +extern bool si_taclear(si_t *sih, bool details); + +#if defined(BCMDBG_PHYDUMP) +struct bcmstrbuf; +extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b); +#endif + +#if defined(BCMDBG_PHYDUMP) +extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif + +extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type); +extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low, + int32* data, bool read); +#ifdef SR_DEBUG +extern void si_dump_pmu(si_t *sih, void *pmu_var); +extern void si_pmu_keep_on(si_t *sih, int32 int_val); +extern uint32 si_pmu_keep_on_get(si_t *sih); +extern uint32 si_power_island_set(si_t *sih, uint32 int_val); +extern uint32 si_power_island_get(si_t *sih); +#endif /* SR_DEBUG */ +extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val); +extern void si_pcie_set_request_size(si_t *sih, uint16 size); +extern uint16 si_pcie_get_request_size(si_t *sih); +extern void si_pcie_set_maxpayload_size(si_t *sih, uint16 size); +extern uint16 si_pcie_get_maxpayload_size(si_t *sih); +extern uint16 si_pcie_get_ssid(si_t *sih); +extern uint32 si_pcie_get_bar0(si_t *sih); +extern int si_pcie_configspace_cache(si_t *sih); +extern int si_pcie_configspace_restore(si_t *sih); +extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size); + +char *si_getnvramflvar(si_t *sih, const char *name); + + +extern uint32 si_tcm_size(si_t *sih); +extern bool si_has_flops(si_t *sih); + +extern int si_set_sromctl(si_t *sih, uint32 value); +extern uint32 si_get_sromctl(si_t *sih); + +extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val); +extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_input(si_t *sih, uint reg); +extern uint32 si_gci_int_enable(si_t *sih, bool enable); +extern void si_gci_reset(si_t *sih); +#ifdef BCMLTECOEX +extern void si_gci_seci_init(si_t *sih); +extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum, + uint32 ltecx_fnsel, uint32 ltecx_gcigpio); +#endif /* BCMLTECOEX */ +extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel); +extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin); +extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel); +extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos); +extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val); +extern uint32 si_gci_chipstatus(si_t *sih, uint reg); +extern uint16 si_cc_get_reg16(uint32 reg_offs); +extern uint32 si_cc_get_reg32(uint32 reg_offs); +extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val); +extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask); +extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status); +extern void si_swdenable(si_t *sih, uint32 swdflag); + +#define CHIPCTRLREG1 0x1 +#define CHIPCTRLREG2 0x2 +#define CHIPCTRLREG3 0x3 +#define CHIPCTRLREG4 0x4 +#define CHIPCTRLREG5 0x5 +#define MINRESMASKREG 0x618 +#define MAXRESMASKREG 0x61c +#define CHIPCTRLADDR 0x650 +#define CHIPCTRLDATA 0x654 +#define RSRCTABLEADDR 0x620 +#define RSRCUPDWNTIME 0x628 +#define PMUREG_RESREQ_MASK 0x68c + +void si_update_masks(si_t *sih); +void si_force_islanding(si_t *sih, bool enable); +extern uint32 si_pmu_res_req_timer_clr(si_t *sih); +extern void si_pmu_rfldo(si_t *sih, bool on); +extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val); +extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val); +extern void si_pcie_ltr_war(si_t *sih); +extern void si_pcie_hw_LTR_war(si_t *sih); +extern void si_pcie_hw_L1SS_war(si_t *sih); +extern void si_pciedev_crwlpciegen2(si_t *sih); +extern void si_pcie_prep_D3(si_t *sih, bool enter_D3); +extern void si_pciedev_reg_pm_clk_period(si_t *sih); + +#ifdef WLRSDB +extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits); +extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +#endif + + +/* Macro to enable clock gating changes in different cores */ +#define MEM_CLK_GATE_BIT 5 +#define GCI_CLK_GATE_BIT 18 + +#define USBAPP_CLK_BIT 0 +#define PCIE_CLK_BIT 3 +#define ARMCR4_DBG_CLK_BIT 4 +#define SAMPLE_SYNC_CLK_BIT 17 +#define PCIE_TL_CLK_BIT 18 +#define HQ_REQ_BIT 24 +#define PLL_DIV2_BIT_START 9 +#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START) +#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START) + +#define pmu_corereg(si, cc_idx, member, mask, val) \ + (AOB_ENAB(si) ? \ + si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \ + OFFSETOF(pmuregs_t, member), mask, val): \ + si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val)) + +/* GCI Macros */ +#define ALLONES_32 0xFFFFFFFF +#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */ +#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */ +#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */ +#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */ +#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */ +#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */ +#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */ +#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */ +#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */ +#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */ +#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */ +#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */ +#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */ +#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */ +#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */ +#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */ +#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */ + +#define GCI_MODE_UART 0x0 +#define GCI_MODE_SECI 0x1 +#define GCI_MODE_BTSIG 0x2 +#define GCI_MODE_GPIO 0x3 +#define GCI_MODE_MASK 0x7 + +#define GCI_CCTL_LOWTOUT_DIS 0x0 +#define GCI_CCTL_LOWTOUT_10BIT 0x1 +#define GCI_CCTL_LOWTOUT_20BIT 0x2 +#define GCI_CCTL_LOWTOUT_30BIT 0x3 +#define GCI_CCTL_LOWTOUT_MASK 0x3 + +#define GCI_CCTL_SCS_DEF 0x19 +#define GCI_CCTL_SCS_MASK 0xFF + +#define GCI_SECIIN_MODE_OFFSET 0 +#define GCI_SECIIN_GCIGPIO_OFFSET 4 +#define GCI_SECIIN_RXID2IP_OFFSET 8 + +#define GCI_SECIOUT_MODE_OFFSET 0 +#define GCI_SECIOUT_GCIGPIO_OFFSET 4 +#define GCI_SECIOUT_SECIINRELATED_OFFSET 16 + +#define GCI_SECIAUX_RXENABLE_OFFSET 0 +#define GCI_SECIFIFO_RXENABLE_OFFSET 16 + +#define GCI_SECITX_ENABLE_OFFSET 0 + +#define GCI_GPIOCTL_INEN_OFFSET 0 +#define GCI_GPIOCTL_OUTEN_OFFSET 1 +#define GCI_GPIOCTL_PDN_OFFSET 4 + +#define GCI_GPIOIDX_OFFSET 16 + +#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */ + +/* To access per GCI bit registers */ +#define GCI_REG_WIDTH 32 + +/* GCI bit positions */ +/* GCI [127:000] = WLAN [127:0] */ +#define GCI_WLAN_IP_ID 0 +#define GCI_WLAN_BEGIN 0 +#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4) + +/* GCI [639:512] = LTE [127:0] */ +#define GCI_LTE_IP_ID 4 +#define GCI_LTE_BEGIN 512 +#define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0) +#define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1) +#define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2) +#define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56) + +/* Reg Index corresponding to ECI bit no x of ECI space */ +#define GCI_REGIDX(x) ((x)/GCI_REG_WIDTH) +/* Bit offset of ECI bit no x in 32-bit words */ +#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH) + +/* End - GCI Macros */ + +#ifdef REROUTE_OOBINT +#define CC_OOB 0x0 +#define M2MDMA_OOB 0x1 +#define PMU_OOB 0x2 +#define D11_OOB 0x3 +#define SDIOD_OOB 0x4 +#define WLAN_OOB 0x5 +#define PMU_OOB_BIT 0x12 +#endif /* REROUTE_OOBINT */ + +extern void si_pll_sr_reinit(si_t *sih); +extern void si_pll_closeloop(si_t *sih); + +#endif /* _siutils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/spid.h b/drivers/net/wireless/bcmdhd/include/spid.h new file mode 100644 index 000000000000..9a39aaf0dd3f --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/spid.h @@ -0,0 +1,168 @@ +/* + * SPI device spec header file + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: spid.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _SPI_H +#define _SPI_H + +/* + * Brcm SPI Device Register Map. + * + */ + +typedef volatile struct { + uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */ + uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */ + uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay + * function selection, command/data error check + */ + uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */ + uint16 intr_reg; /* 0x04, Intr status register */ + uint16 intr_en_reg; /* 0x06, Intr mask register */ + uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */ + uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */ + uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */ + uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */ + uint32 test_read; /* 0x14, RO 0xfeedbead signature */ + uint32 test_rw; /* 0x18, RW */ + uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */ + uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */ + uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */ + uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */ +} spi_regs_t; + +/* SPI device register offsets */ +#define SPID_CONFIG 0x00 +#define SPID_RESPONSE_DELAY 0x01 +#define SPID_STATUS_ENABLE 0x02 +#define SPID_RESET_BP 0x03 /* (corerev >= 1) */ +#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */ +#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */ +#define SPID_STATUS_REG 0x08 /* 32 bits */ +#define SPID_F1_INFO_REG 0x0C /* 16 bits */ +#define SPID_F2_INFO_REG 0x0E /* 16 bits */ +#define SPID_F3_INFO_REG 0x10 /* 16 bits */ +#define SPID_TEST_READ 0x14 /* 32 bits */ +#define SPID_TEST_RW 0x18 /* 32 bits */ +#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */ +#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */ + +/* Bit masks for SPID_CONFIG device register */ +#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */ +#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */ +#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */ +#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */ +#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */ +#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */ +#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */ + +/* Bit mask for SPID_RESPONSE_DELAY device register */ +#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */ + +/* Bit mask for SPID_STATUS_ENABLE device register */ +#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */ +#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */ +#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */ +#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */ +#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */ +#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */ + +/* Bit mask for SPID_RESET_BP device register */ +#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */ +#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */ +#define RESET_SPI 0x80 /* reset the above enabled logic */ + +/* Bit mask for SPID_INTR_REG device register */ +#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */ +#define F2_F3_FIFO_RD_UNDERFLOW 0x0002 +#define F2_F3_FIFO_WR_OVERFLOW 0x0004 +#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */ +#define DATA_ERROR 0x0010 /* Cleared by writing 1 */ +#define F2_PACKET_AVAILABLE 0x0020 +#define F3_PACKET_AVAILABLE 0x0040 +#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */ +#define MISC_INTR0 0x0100 +#define MISC_INTR1 0x0200 +#define MISC_INTR2 0x0400 +#define MISC_INTR3 0x0800 +#define MISC_INTR4 0x1000 +#define F1_INTR 0x2000 +#define F2_INTR 0x4000 +#define F3_INTR 0x8000 + +/* Bit mask for 32bit SPID_STATUS_REG device register */ +#define STATUS_DATA_NOT_AVAILABLE 0x00000001 +#define STATUS_UNDERFLOW 0x00000002 +#define STATUS_OVERFLOW 0x00000004 +#define STATUS_F2_INTR 0x00000008 +#define STATUS_F3_INTR 0x00000010 +#define STATUS_F2_RX_READY 0x00000020 +#define STATUS_F3_RX_READY 0x00000040 +#define STATUS_HOST_CMD_DATA_ERR 0x00000080 +#define STATUS_F2_PKT_AVAILABLE 0x00000100 +#define STATUS_F2_PKT_LEN_MASK 0x000FFE00 +#define STATUS_F2_PKT_LEN_SHIFT 9 +#define STATUS_F3_PKT_AVAILABLE 0x00100000 +#define STATUS_F3_PKT_LEN_MASK 0xFFE00000 +#define STATUS_F3_PKT_LEN_SHIFT 21 + +/* Bit mask for 16 bits SPID_F1_INFO_REG device register */ +#define F1_ENABLED 0x0001 +#define F1_RDY_FOR_DATA_TRANSFER 0x0002 +#define F1_MAX_PKT_SIZE 0x01FC + +/* Bit mask for 16 bits SPID_F2_INFO_REG device register */ +#define F2_ENABLED 0x0001 +#define F2_RDY_FOR_DATA_TRANSFER 0x0002 +#define F2_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 16 bits SPID_F3_INFO_REG device register */ +#define F3_ENABLED 0x0001 +#define F3_RDY_FOR_DATA_TRANSFER 0x0002 +#define F3_MAX_PKT_SIZE 0x3FFC + +/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */ +#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD + +/* Maximum number of I/O funcs */ +#define SPI_MAX_IOFUNCS 4 + +#define SPI_MAX_PKT_LEN (2048*4) + +/* Misc defines */ +#define SPI_FUNC_0 0 +#define SPI_FUNC_1 1 +#define SPI_FUNC_2 2 +#define SPI_FUNC_3 3 + +#define WAIT_F2RXFIFORDY 100 +#define WAIT_F2RXFIFORDY_DELAY 20 + +#endif /* _SPI_H */ diff --git a/drivers/net/wireless/bcmdhd/include/trxhdr.h b/drivers/net/wireless/bcmdhd/include/trxhdr.h new file mode 100644 index 000000000000..f7404be99b0e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/trxhdr.h @@ -0,0 +1,95 @@ +/* + * TRX image file header format. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: trxhdr.h 520026 2014-12-10 01:29:40Z $ + */ + +#ifndef _TRX_HDR_H +#define _TRX_HDR_H + +#include + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_MAX_LEN 0x3B0000 /* Max length */ +#define TRX_NO_HEADER 1 /* Do not write TRX header */ +#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ +#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */ +#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */ +#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ +#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */ + +#define TRX_V1 1 +#define TRX_V1_MAX_OFFSETS 3 /* V1: Max number of individual files */ + +#ifndef BCMTRXV2 +#define TRX_VERSION TRX_V1 /* Version 1 */ +#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS +#endif + +/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as + * Ver 2 of trx header. To make it generic, trx_header is structure is modified + * as below where size of "offsets" field will vary as per the TRX version. + * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well. + * To make sure, other applications like "dhdl" which are yet to be enhanced to support + * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2 + * is defined. + */ +struct trx_header { + uint32 magic; /* "HDR0" */ + uint32 len; /* Length of file including header */ + uint32 crc32; /* 32-bit CRC from flag_version to end of file */ + uint32 flag_version; /* 0:15 flags, 16:31 version */ +#ifndef BCMTRXV2 + uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ +#else + uint32 offsets[1]; /* Offsets of partitions from start of header */ +#endif +}; + +#ifdef BCMTRXV2 +#define TRX_VERSION TRX_V2 /* Version 2 */ +#define TRX_MAX_OFFSET TRX_V2_MAX_OFFSETS + +#define TRX_V2 2 +/* V2: Max number of individual files + * To support SDR signature + Config data region + */ +#define TRX_V2_MAX_OFFSETS 5 +#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32)) +#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32)) +#define TRX_VER(trx) ((trx)->flag_version>>16) +#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1) +#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2) +/* For V2, return size of V2 size: others, return V1 size */ +#define SIZEOF_TRX(trx) (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1) +#else +#define SIZEOF_TRX(trx) (sizeof(struct trx_header)) +#endif /* BCMTRXV2 */ + +/* Compatibility */ +typedef struct trx_header TRXHDR, *PTRXHDR; + +#endif /* _TRX_HDR_H */ diff --git a/drivers/net/wireless/bcmdhd/include/typedefs.h b/drivers/net/wireless/bcmdhd/include/typedefs.h new file mode 100644 index 000000000000..0e110a1908ed --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/typedefs.h @@ -0,0 +1,339 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: typedefs.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _TYPEDEFS_H_ +#define _TYPEDEFS_H_ + +#ifdef SITE_TYPEDEFS + +/* + * Define SITE_TYPEDEFS in the compile to include a site-specific + * typedef file "site_typedefs.h". + * + * If SITE_TYPEDEFS is not defined, then the code section below makes + * inferences about the compile environment based on defined symbols and + * possibly compiler pragmas. + * + * Following these two sections is the Default Typedefs section. + * This section is only processed if USE_TYPEDEF_DEFAULTS is + * defined. This section has a default set of typedefs and a few + * preprocessor symbols (TRUE, FALSE, NULL, ...). + */ + +#include "site_typedefs.h" + +#else + +/* + * Infer the compile environment based on preprocessor symbols and pragmas. + * Override type definitions as needed, and include configuration-dependent + * header files to define types. + */ + +#ifdef __cplusplus + +#define TYPEDEF_BOOL +#ifndef FALSE +#define FALSE false +#endif +#ifndef TRUE +#define TRUE true +#endif + +#else /* ! __cplusplus */ + + +#endif /* ! __cplusplus */ + +#if defined(__LP64__) +#define TYPEDEF_UINTPTR +typedef unsigned long long int uintptr; +#endif + + + + + +#if defined(_NEED_SIZE_T_) +typedef long unsigned int size_t; +#endif + + + + + +#if defined(__sparc__) +#define TYPEDEF_ULONG +#endif + +/* + * If this is either a Linux hybrid build or the per-port code of a hybrid build + * then use the Linux header files to get some of the typedefs. Otherwise, define + * them entirely in this file. We can't always define the types because we get + * a duplicate typedef error; there is no way to "undefine" a typedef. + * We know when it's per-port code because each file defines LINUX_PORT at the top. + */ +#define TYPEDEF_UINT +#ifndef TARGETENV_android +#define TYPEDEF_USHORT +#define TYPEDEF_ULONG +#endif /* TARGETENV_android */ +#ifdef __KERNEL__ +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) +#define TYPEDEF_BOOL +#endif /* >= 2.6.19 */ +/* special detection for 2.6.18-128.7.1.0.1.el5 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) +#include +#ifdef noinline_for_stack +#define TYPEDEF_BOOL +#endif +#endif /* == 2.6.18 */ +#endif /* __KERNEL__ */ + + +/* Do not support the (u)int64 types with strict ansi for GNU C */ +#if defined(__GNUC__) && defined(__STRICT_ANSI__) +#define TYPEDEF_INT64 +#define TYPEDEF_UINT64 +#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */ + +/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode + * for signed or unsigned + */ +#if defined(__ICL) + +#define TYPEDEF_INT64 + +#if defined(__STDC__) +#define TYPEDEF_UINT64 +#endif + +#endif /* __ICL */ + +#if !defined(__DJGPP__) + +/* pick up ushort & uint from standard types.h */ +#if defined(__KERNEL__) + +/* See note above */ +#include /* sys/types.h and linux/types.h are oil and water */ + +#else + +#include + +#endif /* linux && __KERNEL__ */ + +#endif + + +/* use the default typedefs in the next section of this file */ +#define USE_TYPEDEF_DEFAULTS + +#endif /* SITE_TYPEDEFS */ + + +/* + * Default Typedefs + */ + +#ifdef USE_TYPEDEF_DEFAULTS +#undef USE_TYPEDEF_DEFAULTS + +#ifndef TYPEDEF_BOOL +typedef /* @abstract@ */ unsigned char bool; +#endif /* endif TYPEDEF_BOOL */ + +/* define uchar, ushort, uint, ulong */ + +#ifndef TYPEDEF_UCHAR +typedef unsigned char uchar; +#endif + +#ifndef TYPEDEF_USHORT +typedef unsigned short ushort; +#endif + +#ifndef TYPEDEF_UINT +typedef unsigned int uint; +#endif + +#ifndef TYPEDEF_ULONG +typedef unsigned long ulong; +#endif + +/* define [u]int8/16/32/64, uintptr */ + +#ifndef TYPEDEF_UINT8 +typedef unsigned char uint8; +#endif + +#ifndef TYPEDEF_UINT16 +typedef unsigned short uint16; +#endif + +#ifndef TYPEDEF_UINT32 +typedef unsigned int uint32; +#endif + +#ifndef TYPEDEF_UINT64 +typedef unsigned long long uint64; +#endif + +#ifndef TYPEDEF_UINTPTR +typedef unsigned int uintptr; +#endif + +#ifndef TYPEDEF_INT8 +typedef signed char int8; +#endif + +#ifndef TYPEDEF_INT16 +typedef signed short int16; +#endif + +#ifndef TYPEDEF_INT32 +typedef signed int int32; +#endif + +#ifndef TYPEDEF_INT64 +typedef signed long long int64; +#endif + +/* define float32/64, float_t */ + +#ifndef TYPEDEF_FLOAT32 +typedef float float32; +#endif + +#ifndef TYPEDEF_FLOAT64 +typedef double float64; +#endif + +/* + * abstracted floating point type allows for compile time selection of + * single or double precision arithmetic. Compiling with -DFLOAT32 + * selects single precision; the default is double precision. + */ + +#ifndef TYPEDEF_FLOAT_T + +#if defined(FLOAT32) +typedef float32 float_t; +#else /* default to double precision floating point */ +typedef float64 float_t; +#endif + +#endif /* TYPEDEF_FLOAT_T */ + +/* define macro values */ + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 /* TRUE */ +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef OFF +#define OFF 0 +#endif + +#ifndef ON +#define ON 1 /* ON = 1 */ +#endif + +#define AUTO (-1) /* Auto = -1 */ + +/* define PTRSZ, INLINE */ + +#ifndef PTRSZ +#define PTRSZ sizeof(char*) +#endif + + +/* Detect compiler type. */ +#if defined(__GNUC__) || defined(__lint) + #define BWL_COMPILER_GNU +#elif defined(__CC_ARM) && __CC_ARM + #define BWL_COMPILER_ARMCC +#else + #error "Unknown compiler!" +#endif + + +#ifndef INLINE + #if defined(BWL_COMPILER_MICROSOFT) + #define INLINE __inline + #elif defined(BWL_COMPILER_GNU) + #define INLINE __inline__ + #elif defined(BWL_COMPILER_ARMCC) + #define INLINE __inline + #else + #define INLINE + #endif +#endif /* INLINE */ + +#undef TYPEDEF_BOOL +#undef TYPEDEF_UCHAR +#undef TYPEDEF_USHORT +#undef TYPEDEF_UINT +#undef TYPEDEF_ULONG +#undef TYPEDEF_UINT8 +#undef TYPEDEF_UINT16 +#undef TYPEDEF_UINT32 +#undef TYPEDEF_UINT64 +#undef TYPEDEF_UINTPTR +#undef TYPEDEF_INT8 +#undef TYPEDEF_INT16 +#undef TYPEDEF_INT32 +#undef TYPEDEF_INT64 +#undef TYPEDEF_FLOAT32 +#undef TYPEDEF_FLOAT64 +#undef TYPEDEF_FLOAT_T + +#endif /* USE_TYPEDEF_DEFAULTS */ + +/* Suppress unused parameter warning */ +#define UNUSED_PARAMETER(x) (void)(x) + +/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */ +#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) + +/* + * Including the bcmdefs.h here, to make sure everyone including typedefs.h + * gets this automatically +*/ +#include +#endif /* _TYPEDEFS_H_ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlfc_proto.h b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h new file mode 100644 index 000000000000..0d5b434198ee --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlfc_proto.h @@ -0,0 +1,350 @@ +/* + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wlfc_proto.h 542895 2015-03-22 14:13:12Z $ + * + */ + +/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */ + + +#ifndef __wlfc_proto_definitions_h__ +#define __wlfc_proto_definitions_h__ + + /* Use TLV to convey WLFC information. + --------------------------------------------------------------------------- + | Type | Len | value | Description + --------------------------------------------------------------------------- + | 1 | 1 | (handle) | MAC OPEN + --------------------------------------------------------------------------- + | 2 | 1 | (handle) | MAC CLOSE + --------------------------------------------------------------------------- + | 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn + --------------------------------------------------------------------------- + | 4 | 4+ | see pkttag comments | TXSTATUS + | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled + --------------------------------------------------------------------------- + | 5 | 4 | see pkttag comments | PKKTTAG [host->firmware] + --------------------------------------------------------------------------- + | 6 | 8 | (handle, ifid, MAC) | MAC ADD + --------------------------------------------------------------------------- + | 7 | 8 | (handle, ifid, MAC) | MAC DEL + --------------------------------------------------------------------------- + | 8 | 1 | (rssi) | RSSI - RSSI value for the packet. + --------------------------------------------------------------------------- + | 9 | 1 | (interface ID) | Interface OPEN + --------------------------------------------------------------------------- + | 10 | 1 | (interface ID) | Interface CLOSE + --------------------------------------------------------------------------- + | 11 | 8 | fifo credit returns map | FIFO credits back to the host + | | | | + | | | | -------------------------------------- + | | | | | ac0 | ac1 | ac2 | ac3 | bcmc | atim | + | | | | -------------------------------------- + | | | | + --------------------------------------------------------------------------- + | 12 | 2 | MAC handle, | Host provides a bitmap of pending + | | | AC[0-3] traffic bitmap | unicast traffic for MAC-handle dstn. + | | | | [host->firmware] + --------------------------------------------------------------------------- + | 13 | 3 | (count, handle, prec_bmp)| One time request for packet to a specific + | | | | MAC destination. + --------------------------------------------------------------------------- + | 15 | 12 | (pkttag, timestamps) | Send TX timestamp at reception from host + --------------------------------------------------------------------------- + | 16 | 12 | (pkttag, timestamps) | Send WLAN RX timestamp along with RX frame + --------------------------------------------------------------------------- + | 255 | N/A | N/A | FILLER - This is a special type + | | | | that has no length or value. + | | | | Typically used for padding. + --------------------------------------------------------------------------- + */ + +#define WLFC_CTL_TYPE_MAC_OPEN 1 +#define WLFC_CTL_TYPE_MAC_CLOSE 2 +#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3 +#define WLFC_CTL_TYPE_TXSTATUS 4 +#define WLFC_CTL_TYPE_PKTTAG 5 /** host<->dongle */ + +#define WLFC_CTL_TYPE_MACDESC_ADD 6 +#define WLFC_CTL_TYPE_MACDESC_DEL 7 +#define WLFC_CTL_TYPE_RSSI 8 + +#define WLFC_CTL_TYPE_INTERFACE_OPEN 9 +#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10 + +#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11 + +#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 /** host->dongle */ +#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13 +#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14 + +#define WLFC_CTL_TYPE_TX_ENTRY_STAMP 15 +#define WLFC_CTL_TYPE_RX_STAMP 16 +#define WLFC_CTL_TYPE_TX_STATUS_STAMP 17 /** obsolete */ + +#define WLFC_CTL_TYPE_TRANS_ID 18 +#define WLFC_CTL_TYPE_COMP_TXSTATUS 19 + +#define WLFC_CTL_TYPE_TID_OPEN 20 +#define WLFC_CTL_TYPE_TID_CLOSE 21 + + +#define WLFC_CTL_TYPE_FILLER 255 + +#define WLFC_CTL_VALUE_LEN_MACDESC 8 /** handle, interface, MAC */ + +#define WLFC_CTL_VALUE_LEN_MAC 1 /** MAC-handle */ +#define WLFC_CTL_VALUE_LEN_RSSI 1 + +#define WLFC_CTL_VALUE_LEN_INTERFACE 1 +#define WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP 2 + +#define WLFC_CTL_VALUE_LEN_TXSTATUS 4 +#define WLFC_CTL_VALUE_LEN_PKTTAG 4 +#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12 /** 4-byte rate info + 2 TSF */ + +#define WLFC_CTL_VALUE_LEN_SEQ 2 + +/* The high bits of ratespec report in timestamp are used for various status */ +#define WLFC_TSFLAGS_RX_RETRY (1 << 31) +#define WLFC_TSFLAGS_PM_ENABLED (1 << 30) +#define WLFC_TSFLAGS_MASK (WLFC_TSFLAGS_RX_RETRY | WLFC_TSFLAGS_PM_ENABLED) + +/* enough space to host all 4 ACs, bc/mc and atim fifo credit */ +#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6 + +#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */ +#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */ + + +#define WLFC_PKTFLAG_PKTFROMHOST 0x01 +#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 + +#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */ +#define WL_TXSTATUS_STATUS_SHIFT 24 + +#define WL_TXSTATUS_SET_STATUS(x, status) ((x) = \ + ((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \ + (((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT)) +#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \ + WL_TXSTATUS_STATUS_MASK) + +/** + * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the + * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The + * firmware accepts a packet when the generation matches; on reset (startup) both "current" and + * "expected" are set to 0. + */ +#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */ +#define WL_TXSTATUS_GENERATION_SHIFT 31 + +#define WL_TXSTATUS_SET_GENERATION(x, gen) ((x) = \ + ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \ + (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT)) + +#define WL_TXSTATUS_GET_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \ + WL_TXSTATUS_GENERATION_MASK) + +#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */ +#define WL_TXSTATUS_FLAGS_SHIFT 27 + +#define WL_TXSTATUS_SET_FLAGS(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FLAGS_MASK << WL_TXSTATUS_FLAGS_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FLAGS_MASK) << WL_TXSTATUS_FLAGS_SHIFT)) +#define WL_TXSTATUS_GET_FLAGS(x) (((x) >> WL_TXSTATUS_FLAGS_SHIFT) & \ + WL_TXSTATUS_FLAGS_MASK) + +#define WL_TXSTATUS_FIFO_MASK 0x7 /* allow 3 bits for FIFO ID */ +#define WL_TXSTATUS_FIFO_SHIFT 24 + +#define WL_TXSTATUS_SET_FIFO(x, flags) ((x) = \ + ((x) & ~(WL_TXSTATUS_FIFO_MASK << WL_TXSTATUS_FIFO_SHIFT)) | \ + (((flags) & WL_TXSTATUS_FIFO_MASK) << WL_TXSTATUS_FIFO_SHIFT)) +#define WL_TXSTATUS_GET_FIFO(x) (((x) >> WL_TXSTATUS_FIFO_SHIFT) & WL_TXSTATUS_FIFO_MASK) + +#define WL_TXSTATUS_PKTID_MASK 0xffffff /* allow 24 bits */ +#define WL_TXSTATUS_SET_PKTID(x, num) ((x) = \ + ((x) & ~WL_TXSTATUS_PKTID_MASK) | (num)) +#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK) + +#define WL_TXSTATUS_HSLOT_MASK 0xffff /* allow 16 bits */ +#define WL_TXSTATUS_HSLOT_SHIFT 8 + +#define WL_TXSTATUS_SET_HSLOT(x, hslot) ((x) = \ + ((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \ + (((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT)) +#define WL_TXSTATUS_GET_HSLOT(x) (((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \ + WL_TXSTATUS_HSLOT_MASK) + +#define WL_TXSTATUS_FREERUNCTR_MASK 0xff /* allow 8 bits */ + +#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr) ((x) = \ + ((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \ + ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK)) +#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK) + +/* Seq number part of AMSDU */ +#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_AMSDU_SHIFT 14 +#define WL_SEQ_SET_AMSDU(x, val) ((x) = \ + ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \ + (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT)) +#define WL_SEQ_GET_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \ + WL_SEQ_AMSDU_MASK) + +/* Seq number is valid coming from FW */ +#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMFW_SHIFT 13 +#define WL_SEQ_SET_FROMFW(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \ + (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT)) +#define WL_SEQ_GET_FROMFW(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & \ + WL_SEQ_FROMFW_MASK) + +/** + * Proptxstatus related. + * + * Pkt from bus layer (DHD for SDIO and pciedev for PCIE) + * is re-using seq number previously suppressed + * so FW should not assign new one + */ +#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */ +#define WL_SEQ_FROMDRV_SHIFT 12 +#define WL_SEQ_SET_FROMDRV(x, val) ((x) = \ + ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \ + (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT)) +#define WL_SEQ_GET_FROMDRV(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \ + WL_SEQ_FROMDRV_MASK) + +#define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */ +#define WL_SEQ_NUM_SHIFT 0 +#define WL_SEQ_SET_NUM(x, val) ((x) = \ + ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \ + (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT)) +#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \ + WL_SEQ_NUM_MASK) + +#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \ + (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \ + (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) + +/* 32 STA should be enough??, 6 bits; Must be power of 2 */ +#define WLFC_MAC_DESC_TABLE_SIZE 32 +#define WLFC_MAX_IFNUM 16 +#define WLFC_MAC_DESC_ID_INVALID 0xff + +/* b[7:5] -reuse guard, b[4:0] -value */ +#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f) + +#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \ + (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + +#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \ + ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT) + + +#define WLFC_MAX_PENDING_DATALEN 120 + +/* host is free to discard the packet */ +#define WLFC_CTL_PKTFLAG_DISCARD 0 +/* D11 suppressed a packet */ +#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1 +/* WL firmware suppressed a packet because MAC is + already in PSMode (short time window) +*/ +#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2 +/* Firmware tossed this packet */ +#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3 +/* Firmware tossed after retries */ +#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4 +/* Firmware wrongly reported suppressed previously,now fixing to acked */ +#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5 + +#define WLFC_D11_STATUS_INTERPRET(txs) \ + ((txs)->status.was_acked ? WLFC_CTL_PKTFLAG_DISCARD : \ + (TXS_SUPR_MAGG_DONE((txs)->status.suppr_ind) ? \ + WLFC_CTL_PKTFLAG_DISCARD_NOACK : WLFC_CTL_PKTFLAG_D11SUPPRESS)) + + +#ifdef PROP_TXSTATUS_DEBUG +#define WLFC_DBGMESG(x) printf x +/* wlfc-breadcrumb */ +#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \ + {printf("WLFC: %s():%d:caller:%p\n", \ + __FUNCTION__, __LINE__, __builtin_return_address(0));}} while (0) +#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \ + banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0) +#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s)) +#else +#define WLFC_DBGMESG(x) +#define WLFC_BREADCRUMB(x) +#define WLFC_PRINTMAC(banner, ea) +#define WLFC_WHEREIS(s) +#endif + +/* AMPDU host reorder packet flags */ +#define WLHOST_REORDERDATA_MAXFLOWS 256 +#define WLHOST_REORDERDATA_LEN 10 +#define WLHOST_REORDERDATA_TOTLEN (WLHOST_REORDERDATA_LEN + 1 + 1) /* +tag +len */ + +#define WLHOST_REORDERDATA_FLOWID_OFFSET 0 +#define WLHOST_REORDERDATA_MAXIDX_OFFSET 2 +#define WLHOST_REORDERDATA_FLAGS_OFFSET 4 +#define WLHOST_REORDERDATA_CURIDX_OFFSET 6 +#define WLHOST_REORDERDATA_EXPIDX_OFFSET 8 + +#define WLHOST_REORDERDATA_DEL_FLOW 0x01 +#define WLHOST_REORDERDATA_FLUSH_ALL 0x02 +#define WLHOST_REORDERDATA_CURIDX_VALID 0x04 +#define WLHOST_REORDERDATA_EXPIDX_VALID 0x08 +#define WLHOST_REORDERDATA_NEW_HOLE 0x10 + +/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */ +#define WLFC_CTL_TRANS_ID_LEN 6 +#define WLFC_TYPE_TRANS_ID_LEN 6 + +#define WLFC_MODE_HANGER 1 /* use hanger */ +#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */ +#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2)) + +#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */ +#define WLFC_SET_AFQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_AFQ_SHIFT)) +#define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1) + +#define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */ +#define WLFC_SET_REUSESEQ(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT)) +#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1) + +#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */ +#define WLFC_SET_REORDERSUPP(x, val) ((x) = \ + ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \ + (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT)) +#define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1) + +#endif /* __wlfc_proto_definitions_h__ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h new file mode 100644 index 000000000000..4447ca0107b5 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h @@ -0,0 +1,8065 @@ +/* + * Custom OID/ioctl definitions for + * + * + * Broadcom 802.11abg Networking Device Driver + * + * Definitions subject to change without notice. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> + * + * $Id: wlioctl.h 609280 2016-01-01 06:31:38Z $ + */ + +#ifndef _wlioctl_h_ +#define _wlioctl_h_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + + + + +typedef struct { + uint32 num; + chanspec_t list[1]; +} chanspec_list_t; + +#define RSN_KCK_LENGTH 16 +#define RSN_KEK_LENGTH 16 + + +#ifndef INTF_NAME_SIZ +#define INTF_NAME_SIZ 16 +#endif + +/* Used to send ioctls over the transport pipe */ +typedef struct remote_ioctl { + cdc_ioctl_t msg; + uint32 data_len; + char intf_name[INTF_NAME_SIZ]; +} rem_ioctl_t; +#define REMOTE_SIZE sizeof(rem_ioctl_t) + + +/* DFS Forced param */ +typedef struct wl_dfs_forced_params { + chanspec_t chspec; + uint16 version; + chanspec_list_t chspec_list; +} wl_dfs_forced_t; + +#define DFS_PREFCHANLIST_VER 0x01 +#define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list) +#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \ + (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list)) +#define WL_DFS_FORCED_PARAMS_MAX_SIZE \ + WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t)) + +/* association decision information */ +typedef struct { + bool assoc_approved; /**< (re)association approved */ + uint16 reject_reason; /**< reason code for rejecting association */ + struct ether_addr da; + int64 sys_time; /**< current system time */ +} assoc_decision_t; + +#define DFS_SCAN_S_IDLE -1 +#define DFS_SCAN_S_RADAR_FREE 0 +#define DFS_SCAN_S_RADAR_FOUND 1 +#define DFS_SCAN_S_INPROGESS 2 +#define DFS_SCAN_S_SCAN_ABORTED 3 +#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4 +#define DFS_SCAN_S_MAX 5 + + +#define ACTION_FRAME_SIZE 1800 + +typedef struct wl_action_frame { + struct ether_addr da; + uint16 len; + uint32 packetId; + uint8 data[ACTION_FRAME_SIZE]; +} wl_action_frame_t; + +#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) + +typedef struct ssid_info +{ + uint8 ssid_len; /**< the length of SSID */ + uint8 ssid[32]; /**< SSID string */ +} ssid_info_t; + +typedef struct wl_af_params { + uint32 channel; + int32 dwell_time; + struct ether_addr BSSID; + wl_action_frame_t action_frame; +} wl_af_params_t; + +#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params) + +#define MFP_TEST_FLAG_NORMAL 0 +#define MFP_TEST_FLAG_ANY_KEY 1 +typedef struct wl_sa_query { + uint32 flag; + uint8 action; + uint16 id; + struct ether_addr da; +} wl_sa_query_t; + +/* require default structure packing */ +#define BWL_DEFAULT_PACKING +#include + + +/* Flags for OBSS IOVAR Parameters */ +#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD (0x02) +#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04) +#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD (0x08) +#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD (0x10) +#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD (0x20) +#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD (0x40) + +/* OBSS IOVAR Version information */ +#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1 +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 obss_bwsw_activity_cfm_count_cfg; /* configurable count in + * seconds before we confirm that OBSS is present and + * dynamically activate dynamic bwswitch. + */ + uint8 obss_bwsw_no_activity_cfm_count_cfg; /* configurable count in + * seconds before we confirm that OBSS is GONE and + * dynamically start pseudo upgrade. If in pseudo sense time, we + * will see OBSS, [means that, we false detected that OBSS-is-gone + * in watchdog] this count will be incremented in steps of + * obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS + * detection again. Note that, at present, max 30seconds is + * allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT] + */ + uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above + */ + uint16 obss_bwsw_pseudo_sense_count_cfg; /* number of msecs/cnt to be in + * pseudo state. This is used to sense/measure the stats from lq. + */ + uint8 obss_bwsw_rx_crs_threshold_cfg; /* RX CRS default threshold */ + uint8 obss_bwsw_dur_thres; /* OBSS dyn bwsw trigger/RX CRS Sec */ + uint8 obss_bwsw_txop_threshold_cfg; /* TXOP default threshold */ +} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 version; /**< version field */ + uint32 config_mask; + uint32 reset_mask; + wlc_prot_dynbwsw_config_t config_params; +} BWL_POST_PACKED_STRUCT obss_config_params_t; + + +/* bsscfg type */ +typedef enum bsscfg_type_t { + BSSCFG_TYPE_GENERIC = 0, /**< default */ + BSSCFG_TYPE_P2P = 1, /**< The BSS is for p2p link */ + BSSCFG_TYPE_BTA = 2, + BSSCFG_TYPE_TDLS = 4, + BSSCFG_TYPE_AWDL = 5, + BSSCFG_TYPE_PROXD = 6, + BSSCFG_TYPE_NAN = 7, + BSSCFG_TYPE_MAX +} bsscfg_type_t; + +/* bsscfg subtype */ +enum { + BSSCFG_GENERIC_STA = 1, /* GENERIC */ + BSSCFG_GENERIC_AP = 2, /* GENERIC */ + BSSCFG_P2P_GC = 3, /* P2P */ + BSSCFG_P2P_GO = 4, /* P2P */ + BSSCFG_P2P_DISC = 5, /* P2P */ +}; + +typedef struct wlc_bsscfg_info { + uint32 type; + uint32 subtype; +} wlc_bsscfg_info_t; + + + +/* Legacy structure to help keep backward compatible wl tool and tray app */ + +#define LEGACY_WL_BSS_INFO_VERSION 107 /**< older version of wl_bss_info struct */ + +typedef struct wl_bss_info_107 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + uint8 channel; /**< Channel no. */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + uint32 ie_length; /**< byte length of Information Elements */ + /* variable length Information Elements */ +} wl_bss_info_107_t; + +/* + * Per-BSS information structure. + */ + +#define LEGACY2_WL_BSS_INFO_VERSION 108 /**< old version of wl_bss_info struct */ + +/* BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info_108 { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint32 nbss_cap; /**< 802.11N BSS Capabilities (based on HT_CAP_*) */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint32 reserved32[1]; /**< Reserved for expansion of BSS properties */ + uint8 flags; /**< flags */ + uint8 reserved[3]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint32 ie_length; /**< byte length of Information Elements */ + /* Add new fields here */ + /* variable length Information Elements */ +} wl_bss_info_108_t; + +#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */ + +/* BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in wl_scan_results_t) + */ +typedef struct wl_bss_info { + uint32 version; /**< version field */ + uint32 length; /**< byte length of data in this record, + * starting at version and including IEs + */ + struct ether_addr BSSID; + uint16 beacon_period; /**< units are Kusec */ + uint16 capability; /**< Capability information */ + uint8 SSID_len; + uint8 SSID[32]; + struct { + uint count; /**< # rates in this set */ + uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */ + } rateset; /**< supported rates */ + chanspec_t chanspec; /**< chanspec for bss */ + uint16 atim_window; /**< units are Kusec */ + uint8 dtim_period; /**< DTIM period */ + int16 RSSI; /**< receive signal strength (in dBm) */ + int8 phy_noise; /**< noise (in dBm) */ + + uint8 n_cap; /**< BSS is 802.11N Capable */ + uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */ + uint8 ctl_ch; /**< 802.11N BSS control channel number */ + uint8 padding1[3]; /**< explicit struct alignment padding */ + uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */ + uint8 flags; /**< flags */ + uint8 vht_cap; /**< BSS is vht capable */ + uint8 reserved[2]; /**< Reserved for expansion of BSS properties */ + uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */ + + uint16 ie_offset; /**< offset at which IEs start, from beginning */ + uint32 ie_length; /**< byte length of Information Elements */ + int16 SNR; /**< average SNR of during frame reception */ + uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */ + uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */ + uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */ + /* Add new fields here */ + /* variable length Information Elements */ +} wl_bss_info_t; + +#define WL_GSCAN_BSS_INFO_VERSION 1 /* current version of wl_gscan_bss_info struct */ +#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t)) + +typedef struct wl_gscan_bss_info { + uint32 timestamp[2]; + wl_bss_info_t info; + /* Do not add any more members below, fixed */ + /* and variable length Information Elements to follow */ +} wl_gscan_bss_info_t; + + +typedef struct wl_bsscfg { + uint32 bsscfg_idx; + uint32 wsec; + uint32 WPA_auth; + uint32 wsec_index; + uint32 associated; + uint32 BSS; + uint32 phytest_on; + struct ether_addr prev_BSSID; + struct ether_addr BSSID; + uint32 targetbss_wpa2_flags; + uint32 assoc_type; + uint32 assoc_state; +} wl_bsscfg_t; + +typedef struct wl_if_add { + uint32 bsscfg_flags; + uint32 if_flags; + uint32 ap; + struct ether_addr mac_addr; + uint32 wlc_index; +} wl_if_add_t; + +typedef struct wl_bss_config { + uint32 atim_window; + uint32 beacon_period; + uint32 chanspec; +} wl_bss_config_t; + +#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /**< User application will randomly select + * radar channel. + */ + +#define DLOAD_HANDLER_VER 1 /**< Downloader version */ +#define DLOAD_FLAG_VER_MASK 0xf000 /**< Downloader version mask */ +#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */ + +#define DL_CRC_NOT_INUSE 0x0001 +#define DL_BEGIN 0x0002 +#define DL_END 0x0004 + +/* generic download types & flags */ +enum { + DL_TYPE_UCODE = 1, + DL_TYPE_CLM = 2 +}; + +/* ucode type values */ +enum { + UCODE_FW, + INIT_VALS, + BS_INIT_VALS +}; + +struct wl_dload_data { + uint16 flag; + uint16 dload_type; + uint32 len; + uint32 crc; + uint8 data[1]; +}; +typedef struct wl_dload_data wl_dload_data_t; + +struct wl_ucode_info { + uint32 ucode_type; + uint32 num_chunks; + uint32 chunk_len; + uint32 chunk_num; + uint8 data_chunk[1]; +}; +typedef struct wl_ucode_info wl_ucode_info_t; + +struct wl_clm_dload_info { + uint32 ds_id; + uint32 clm_total_len; + uint32 num_chunks; + uint32 chunk_len; + uint32 chunk_offset; + uint8 data_chunk[1]; +}; +typedef struct wl_clm_dload_info wl_clm_dload_info_t; + +typedef struct wlc_ssid { + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; +} wlc_ssid_t; + +typedef struct wlc_ssid_ext { + bool hidden; + uint32 SSID_len; + uchar SSID[DOT11_MAX_SSID_LEN]; +} wlc_ssid_ext_t; + + +#define MAX_PREFERRED_AP_NUM 5 +typedef struct wlc_fastssidinfo { + uint32 SSID_channel[MAX_PREFERRED_AP_NUM]; + wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM]; +} wlc_fastssidinfo_t; + +#ifdef CUSTOMER_HW_31_1 + +#define AP_NORM 0 +#define AP_STEALTH 1 +#define STREET_PASS_AP 2 + +#define NSC_MAX_TGT_SSID 20 +typedef struct nsc_ssid_entry_list { + wlc_ssid_t ssid_info; + int ssid_type; +} nsc_ssid_entry_list_t; + +typedef struct nsc_ssid_list { + uint32 num_entries; /* N wants 150 */ + nsc_ssid_entry_list_t ssid_entry[1]; +} nsc_ssid_list_t; + +#define NSC_TGT_SSID_BUFSZ (sizeof(nsc_ssid_entry_list_t) * \ + (NSC_MAX_TGT_SSID - 1) + sizeof(nsc_ssid_list_t)) + +/* Default values from N */ +#define NSC_SCPATT_ARRSZ 32 + +/* scan types */ +#define UNI_SCAN 0 +#define SP_SCAN_ACTIVE 1 +#define SP_SCAN_PASSIVE 2 +#define DOZE 3 + +/* what we found */ +typedef struct nsc_scan_results { + wlc_ssid_t ssid; + struct ether_addr mac; + int scantype; + uint16 channel; +} nsc_scan_results_t; + +typedef BWL_PRE_PACKED_STRUCT struct nsc_af_body { + uint8 type; /* should be 0x7f */ + uint8 oui[DOT11_OUI_LEN]; /* just like it says */ + uint8 subtype; + uint8 ielen; /* */ + uint8 data[1]; /* variable */ +} BWL_POST_PACKED_STRUCT nsc_af_body_t; + +typedef BWL_PRE_PACKED_STRUCT struct nsc_sdlist { + uint8 scantype; + uint16 duration; + uint16 channel; /* SP only */ + uint8 ssid_index; /* SP only */ + uint16 rate; /* SP only */ +} BWL_POST_PACKED_STRUCT nsc_sdlist_t; + +typedef struct nsc_scandes { + uint32 num_entries; /* number of list entries */ + nsc_sdlist_t sdlist[1]; /* variable */ +} nsc_scandes_t; + +#define NSC_MAX_SDLIST_ENTRIES 8 +#define NSC_SDDESC_BUFSZ (sizeof(nsc_sdlist_t) * \ + (NSC_MAX_SDLIST_ENTRIES - 1) + sizeof(nsc_scandes_t)) + +#define SCAN_ARR_END (NSC_MAX_SDLIST_ENTRIES) +#endif /* CUSTOMER_HW_31_1 */ + +typedef BWL_PRE_PACKED_STRUCT struct wnm_url { + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT wnm_url_t; + +#define WNM_BSS_SELECT_TYPE_RSSI 0 +#define WNM_BSS_SELECT_TYPE_CU 1 + +#define WNM_BSSLOAD_MONITOR_VERSION 1 +typedef struct wnm_bssload_monitor_cfg { + uint8 version; + uint8 band; + uint8 duration; /* duration between 1 to 20sec */ +} wnm_bssload_monitor_cfg_t; + +#define BSS_MAXTABLE_SIZE 10 +#define WNM_BSS_SELECT_FACTOR_VERSION 1 +typedef struct wnm_bss_select_factor_params { + uint8 low; + uint8 high; + uint8 factor; + uint8 pad; +} wnm_bss_select_factor_params_t; + +typedef struct wnm_bss_select_factor_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 pad; + uint16 count; + wnm_bss_select_factor_params_t params[1]; +} wnm_bss_select_factor_cfg_t; + +#define WNM_BSS_SELECT_WEIGHT_VERSION 1 +typedef struct wnm_bss_select_weight_cfg { + uint8 version; + uint8 band; + uint16 type; + uint16 weight; /* weightage for each type between 0 to 100 */ +} wnm_bss_select_weight_cfg_t; + +#define WNM_ROAM_TRIGGER_VERSION 1 +typedef struct wnm_roam_trigger_cfg { + uint8 version; + uint8 band; + uint16 type; + int16 trigger; /* trigger for each type in new roam algorithm */ +} wnm_roam_trigger_cfg_t; + +typedef struct chan_scandata { + uint8 txpower; + uint8 pad; + chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */ + uint32 channel_mintime; + uint32 channel_maxtime; +} chan_scandata_t; + +typedef enum wl_scan_type { + EXTDSCAN_FOREGROUND_SCAN, + EXTDSCAN_BACKGROUND_SCAN, + EXTDSCAN_FORCEDBACKGROUND_SCAN +} wl_scan_type_t; + +#define WLC_EXTDSCAN_MAX_SSID 5 + +typedef struct wl_extdscan_params { + int8 nprobes; /**< 0, passive, otherwise active */ + int8 split_scan; /**< split scan */ + int8 band; /**< band */ + int8 pad; + wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */ + uint32 tx_rate; /**< in 500ksec units */ + wl_scan_type_t scan_type; /**< enum */ + int32 channel_num; + chan_scandata_t channel_list[1]; /**< list of chandata structs */ +} wl_extdscan_params_t; + +#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t)) + +#define WL_SCAN_PARAMS_SSID_MAX 10 + +typedef struct wl_scan_params { + wlc_ssid_t ssid; /**< default: {0, ""} */ + struct ether_addr bssid; /**< default: bcast */ + int8 bss_type; /**< default: any, + * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT + */ + uint8 scan_type; /**< flags, 0 use default */ + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for + * active scanning + */ + int32 passive_time; /**< -1 use default, dwell time per channel + * for passive scanning + */ + int32 home_time; /**< -1 use default, dwell time for the home channel + * between channel scans + */ + int32 channel_num; /**< count of channels and ssids that follow + * + * low half is count of channels in channel_list, 0 + * means default (use all available channels) + * + * high half is entries in wlc_ssid_t array that + * follows channel_list, aligned for int32 (4 bytes) + * meaning an odd channel count implies a 2-byte pad + * between end of channel_list and first ssid + * + * if ssid count is zero, single ssid in the fixed + * parameter portion is assumed, otherwise ssid in + * the fixed portion is ignored + */ + uint16 channel_list[1]; /**< list of chanspecs */ +} wl_scan_params_t; + +/* size of wl_scan_params not including variable length array */ +#define WL_SCAN_PARAMS_FIXED_SIZE 64 +#define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16))) + +#define ISCAN_REQ_VERSION 1 + +/* incremental scan struct */ +typedef struct wl_iscan_params { + uint32 version; + uint16 action; + uint16 scan_duration; + wl_scan_params_t params; +} wl_iscan_params_t; + +/* 3 fields + size of wl_scan_params, not including variable length array */ +#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) + +typedef struct wl_scan_results { + uint32 buflen; + uint32 version; + uint32 count; + wl_bss_info_t bss_info[1]; +} wl_scan_results_t; + +/* size of wl_scan_results not including variable length array */ +#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t)) + + +#define ESCAN_REQ_VERSION 1 + +/** event scan reduces amount of SOC memory needed to store scan results */ +typedef struct wl_escan_params { + uint32 version; + uint16 action; + uint16 sync_id; + wl_scan_params_t params; +} wl_escan_params_t; + +#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t)) + +/** event scan reduces amount of SOC memory needed to store scan results */ +typedef struct wl_escan_result { + uint32 buflen; + uint32 version; + uint16 sync_id; + uint16 bss_count; + wl_bss_info_t bss_info[1]; +} wl_escan_result_t; + +#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) + +typedef struct wl_gscan_result { + uint32 buflen; + uint32 version; + wl_gscan_bss_info_t bss_info[1]; +} wl_gscan_result_t; + +#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t)) + +/* incremental scan results struct */ +typedef struct wl_iscan_results { + uint32 status; + wl_scan_results_t results; +} wl_iscan_results_t; + +/* size of wl_iscan_results not including variable length array */ +#define WL_ISCAN_RESULTS_FIXED_SIZE \ + (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results)) + +#define SCANOL_PARAMS_VERSION 1 + +typedef struct scanol_params { + uint32 version; + uint32 flags; /**< offload scanning flags */ + int32 active_time; /**< -1 use default, dwell time per channel for active scanning */ + int32 passive_time; /**< -1 use default, dwell time per channel for passive scanning */ + int32 idle_rest_time; /**< -1 use default, time idle between scan cycle */ + int32 idle_rest_time_multiplier; + int32 active_rest_time; + int32 active_rest_time_multiplier; + int32 scan_cycle_idle_rest_time; + int32 scan_cycle_idle_rest_multiplier; + int32 scan_cycle_active_rest_time; + int32 scan_cycle_active_rest_multiplier; + int32 max_rest_time; + int32 max_scan_cycles; + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 scan_start_delay; + uint32 nchannels; + uint32 ssid_count; + wlc_ssid_t ssidlist[1]; +} scanol_params_t; + +typedef struct wl_probe_params { + wlc_ssid_t ssid; + struct ether_addr bssid; + struct ether_addr mac; +} wl_probe_params_t; + +#define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */ +typedef struct wl_rateset { + uint32 count; /**< # rates in this set */ + uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ +} wl_rateset_t; + +typedef struct wl_rateset_args { + uint32 count; /**< # rates in this set */ + uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */ + uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */ + uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ +} wl_rateset_args_t; + +#define TXBF_RATE_MCS_ALL 4 +#define TXBF_RATE_VHT_ALL 4 +#define TXBF_RATE_OFDM_ALL 8 + +typedef struct wl_txbf_rateset { + uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /**< one for each stream */ + uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /**< one for each stream */ + uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /**< one for each stream */ + uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /**< one for each stream */ + uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /**< bitmap of ofdm rates that enables txbf */ + uint8 txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */ + uint8 txbf_rate_ofdm_cnt; + uint8 txbf_rate_ofdm_cnt_bcm; +} wl_txbf_rateset_t; + +#define OFDM_RATE_MASK 0x0000007f +typedef uint8 ofdm_rates_t; + +typedef struct wl_rates_info { + wl_rateset_t rs_tgt; + uint32 phy_type; + int32 bandtype; + uint8 cck_only; + uint8 rate_mask; + uint8 mcsallow; + uint8 bw; + uint8 txstreams; +} wl_rates_info_t; + +/* uint32 list */ +typedef struct wl_uint32_list { + /* in - # of elements, out - # of entries */ + uint32 count; + /* variable length uint32 list */ + uint32 element[1]; +} wl_uint32_list_t; + +/* used for association with a specific BSSID and chanspec list */ +typedef struct wl_assoc_params { + struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */ + uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid, + * otherwise count of chanspecs in chanspec_list + * AND paired bssids following chanspec_list + * also, chanspec_num has to be set to zero + * for bssid list to be used + */ + int32 chanspec_num; /**< 0: all available channels, + * otherwise count of chanspecs in chanspec_list + */ + chanspec_t chanspec_list[1]; /**< list of chanspecs */ +} wl_assoc_params_t; + +#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) + +/* used for reassociation/roam to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_reassoc_params_t; +#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + +/* used for association to a specific BSSID and channel */ +typedef wl_assoc_params_t wl_join_assoc_params_t; +#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE + +/* used for join with or without a specific bssid and channel list */ +typedef struct wl_join_params { + wlc_ssid_t ssid; + wl_assoc_params_t params; /**< optional field, but it must include the fixed portion + * of the wl_assoc_params_t struct when it does present. + */ +} wl_join_params_t; + +#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ + WL_ASSOC_PARAMS_FIXED_SIZE) +/* scan params for extended join */ +typedef struct wl_join_scan_params { + uint8 scan_type; /**< 0 use default, active or passive scan */ + int32 nprobes; /**< -1 use default, number of probes per channel */ + int32 active_time; /**< -1 use default, dwell time per channel for + * active scanning + */ + int32 passive_time; /**< -1 use default, dwell time per channel + * for passive scanning + */ + int32 home_time; /**< -1 use default, dwell time for the home channel + * between channel scans + */ +} wl_join_scan_params_t; + +/* extended join params */ +typedef struct wl_extjoin_params { + wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */ + wl_join_scan_params_t scan; + wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion + * of the wl_join_assoc_params_t struct when it does + * present. + */ +} wl_extjoin_params_t; +#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \ + WL_JOIN_ASSOC_PARAMS_FIXED_SIZE) + +#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */ +#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */ +typedef struct { + uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */ + uint8 num_antcfg; /**< number of available antenna configurations */ +} wlc_antselcfg_t; + +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */ + /**< move if cur bss moves channels) */ + uint32 congest_obss; /**< traffic not in our bss */ + uint32 interference; /**< millisecs detecting a non 802.11 interferer. */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_t; + +typedef struct { + chanspec_t chanspec; /**< Which channel? */ + uint16 num_secs; /**< How many secs worth of data */ + cca_congest_t secs[1]; /**< Data */ +} cca_congest_channel_req_t; + +typedef struct { + uint32 duration; /**< millisecs spent sampling this channel */ + uint32 congest; /**< millisecs detecting busy CCA */ + uint32 timestamp; /**< second timestamp */ +} cca_congest_simple_t; + +typedef struct { + uint16 status; + uint16 id; + chanspec_t chanspec; /**< Which channel? */ + uint16 len; + union { + cca_congest_simple_t cca_busy; /**< CCA busy */ + int noise; /**< noise floor */ + }; +} cca_chan_qual_event_t; + + +/* interference sources */ +enum interference_source { + ITFR_NONE = 0, /**< interference */ + ITFR_PHONE, /**< wireless phone */ + ITFR_VIDEO_CAMERA, /**< wireless video camera */ + ITFR_MICROWAVE_OVEN, /**< microwave oven */ + ITFR_BABY_MONITOR, /**< wireless baby monitor */ + ITFR_BLUETOOTH, /**< bluetooth */ + ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */ + ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */ + ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */ + ITFR_UNIDENTIFIED /**< interference from unidentified source */ +}; + +/* structure for interference source report */ +typedef struct { + uint32 flags; /**< flags. bit definitions below */ + uint32 source; /**< last detected interference source */ + uint32 timestamp; /**< second timestamp on interferenced flag change */ +} interference_source_rep_t; + +#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */ + + +typedef struct wl_country { + char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in + * the Country IE + */ + int32 rev; /**< revision specifier for ccode + * on set, -1 indicates unspecified. + * on get, rev >= 0 + */ + char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code. + * variable length, but fixed size in + * struct allows simple allocation for + * expected country strings <= 3 chars. + */ +} wl_country_t; + +#define CCODE_INFO_VERSION 1 + +typedef enum wl_ccode_role { + WLC_CCODE_ROLE_ACTIVE = 0, + WLC_CCODE_ROLE_HOST, + WLC_CCODE_ROLE_80211D_ASSOC, + WLC_CCODE_ROLE_80211D_SCAN, + WLC_CCODE_ROLE_DEFAULT, + WLC_CCODE_LAST +} wl_ccode_role_t; +#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST + +typedef struct wl_ccode_entry { + uint16 reserved; + uint8 band; + uint8 role; + char ccode[WLC_CNTRY_BUF_SZ]; +} wl_ccode_entry_t; + +typedef struct wl_ccode_info { + uint16 version; + uint16 count; /* Number of ccodes entries in the set */ + wl_ccode_entry_t ccodelist[1]; +} wl_ccode_info_t; +#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist) + +typedef struct wl_channels_in_country { + uint32 buflen; + uint32 band; + char country_abbrev[WLC_CNTRY_BUF_SZ]; + uint32 count; + uint32 channel[1]; +} wl_channels_in_country_t; + +typedef struct wl_country_list { + uint32 buflen; + uint32 band_set; + uint32 band; + uint32 count; + char country_abbrev[1]; +} wl_country_list_t; + +typedef struct wl_rm_req_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ +} wl_rm_req_elt_t; + +typedef struct wl_rm_req { + uint32 token; /**< overall measurement set token */ + uint32 count; /**< number of measurement requests */ + void *cb; /**< completion callback function: may be NULL */ + void *cb_arg; /**< arg to completion callback function */ + wl_rm_req_elt_t req[1]; /**< variable length block of requests */ +} wl_rm_req_t; +#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) + +typedef struct wl_rm_rep_elt { + int8 type; + int8 flags; + chanspec_t chanspec; + uint32 token; /**< token for this measurement */ + uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */ + uint32 tsf_l; /**< TSF low 32-bits */ + uint32 dur; /**< TUs */ + uint32 len; /**< byte length of data block */ + uint8 data[1]; /**< variable length data block */ +} wl_rm_rep_elt_t; +#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */ + +#define WL_RPI_REP_BIN_NUM 8 +typedef struct wl_rm_rpi_rep { + uint8 rpi[WL_RPI_REP_BIN_NUM]; + int8 rpi_max[WL_RPI_REP_BIN_NUM]; +} wl_rm_rpi_rep_t; + +typedef struct wl_rm_rep { + uint32 token; /**< overall measurement set token */ + uint32 len; /**< length of measurement report block */ + wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */ +} wl_rm_rep_t; +#define WL_RM_REP_FIXED_LEN 8 + + +typedef enum sup_auth_status { + /* Basic supplicant authentication states */ + WLC_SUP_DISCONNECTED = 0, + WLC_SUP_CONNECTING, + WLC_SUP_IDREQUIRED, + WLC_SUP_AUTHENTICATING, + WLC_SUP_AUTHENTICATED, + WLC_SUP_KEYXCHANGE, + WLC_SUP_KEYED, + WLC_SUP_TIMEOUT, + WLC_SUP_LAST_BASIC_STATE, + + /* Extended supplicant authentication states */ + /* Waiting to receive handshake msg M1 */ + WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, + /* Preparing to send handshake msg M2 */ + WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, + /* Waiting to receive handshake msg M3 */ + WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, + WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */ + WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */ + WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */ +} sup_auth_status_t; + +typedef struct wl_wsec_key { + uint32 index; /**< key index */ + uint32 len; /**< key length */ + uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */ + uint32 pad_1[18]; + uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + uint32 flags; /**< misc flags */ + uint32 pad_2[2]; + int pad_3; + int iv_initialized; /**< has IV been initialized already? */ + int pad_4; + /* Rx IV */ + struct { + uint32 hi; /**< upper 32 bits of IV */ + uint16 lo; /**< lower 16 bits of IV */ + } rxiv; + uint32 pad_5[2]; + struct ether_addr ea; /**< per station */ +} wl_wsec_key_t; + +#define WSEC_MIN_PSK_LEN 8 +#define WSEC_MAX_PSK_LEN 64 + +/* Flag for key material needing passhash'ing */ +#define WSEC_PASSPHRASE (1<<0) + +/* receptacle for WLC_SET_WSEC_PMK parameter */ +typedef struct { + ushort key_len; /**< octets in key material */ + ushort flags; /**< key handling qualification */ + uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */ +} wsec_pmk_t; + +typedef struct _pmkid { + struct ether_addr BSSID; + uint8 PMKID[WPA2_PMKID_LEN]; +} pmkid_t; + +typedef struct _pmkid_list { + uint32 npmkid; + pmkid_t pmkid[1]; +} pmkid_list_t; + +typedef struct _pmkid_cand { + struct ether_addr BSSID; + uint8 preauth; +} pmkid_cand_t; + +typedef struct _pmkid_cand_list { + uint32 npmkid_cand; + pmkid_cand_t pmkid_cand[1]; +} pmkid_cand_list_t; + +#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */ + +typedef struct wl_assoc_info { + uint32 req_len; + uint32 resp_len; + uint32 flags; + struct dot11_assoc_req req; + struct ether_addr reassoc_bssid; /* used in reassoc's */ + struct dot11_assoc_resp resp; +} wl_assoc_info_t; + +typedef struct wl_led_info { + uint32 index; /* led index */ + uint32 behavior; + uint8 activehi; +} wl_led_info_t; + + +/* srom read/write struct passed through ioctl */ +typedef struct { + uint byteoff; /**< byte offset */ + uint nbytes; /**< number of bytes */ + uint16 buf[1]; +} srom_rw_t; + +#define CISH_FLAG_PCIECIS (1 << 15) /* write CIS format bit for PCIe CIS */ +/* similar cis (srom or otp) struct [iovar: may not be aligned] */ +typedef struct { + uint16 source; /**< cis source */ + uint16 flags; /**< flags */ + uint32 byteoff; /**< byte offset */ + uint32 nbytes; /**< number of bytes */ + /* data follows here */ +} cis_rw_t; + +/* R_REG and W_REG struct passed through ioctl */ +typedef struct { + uint32 byteoff; /**< byte offset of the field in d11regs_t */ + uint32 val; /**< read/write value of the field */ + uint32 size; /**< sizeof the field */ + uint band; /**< band (optional) */ +} rw_reg_t; + +/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */ +/* PCL - Power Control Loop */ +typedef struct { + uint16 auto_ctrl; /**< WL_ATTEN_XX */ + uint16 bb; /**< Baseband attenuation */ + uint16 radio; /**< Radio attenuation */ + uint16 txctl1; /**< Radio TX_CTL1 value */ +} atten_t; + +/* Per-AC retry parameters */ +struct wme_tx_params_s { + uint8 short_retry; + uint8 short_fallback; + uint8 long_retry; + uint8 long_fallback; + uint16 max_rate; /* In units of 512 Kbps */ +}; + +typedef struct wme_tx_params_s wme_tx_params_t; + +#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) + +/* Used to get specific link/ac parameters */ +typedef struct { + int32 ac; + uint8 val; + struct ether_addr ea; +} link_val_t; + + +#define WL_PM_MUTE_TX_VER 1 + +typedef struct wl_pm_mute_tx { + uint16 version; /**< version */ + uint16 len; /**< length */ + uint16 deadline; /**< deadline timer (in milliseconds) */ + uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */ +} wl_pm_mute_tx_t; + + +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint16 cap; /**< sta's advertised capabilities */ + uint32 flags; /**< flags defined below */ + uint32 idle; /**< time since data pkt rx'd from sta */ + struct ether_addr ea; /**< Station address */ + wl_rateset_t rateset; /**< rateset in use */ + uint32 in; /**< seconds elapsed since associated */ + uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */ + uint32 tx_pkts; /**< # of user packets transmitted (unicast) */ + uint32 tx_failures; /**< # of user packets failed */ + uint32 rx_ucast_pkts; /**< # of unicast packets received */ + uint32 rx_mcast_pkts; /**< # of multicast packets received */ + uint32 tx_rate; /**< Rate used by last tx frame */ + uint32 rx_rate; /**< Rate of last successful rx frame */ + uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */ + uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */ + uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */ + uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */ + uint32 tx_mcast_pkts; /**< # of mcast pkts txed */ + uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */ + uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */ + uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */ + uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */ + uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */ + uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */ + int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna + * of data frames + */ + int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */ + uint16 aid; /**< association ID */ + uint16 ht_capabilities; /**< advertised ht caps */ + uint16 vht_flags; /**< converted vht flags */ + uint32 tx_pkts_retried; /**< # of frames where a retry was + * necessary + */ + uint32 tx_pkts_retry_exhausted; /* # of user frames where a retry + * was exhausted + */ + int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and WLAN locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + uint32 tx_pkts_total; /**< # user frames sent successfully */ + uint32 tx_pkts_retries; /**< # user frames retries */ + uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */ + uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */ + uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry + * was exhausted + */ + uint32 rx_pkts_retried; /**< # rx with retry bit set */ + uint32 tx_rate_fallback; /**< lowest fallback TX rate */ +} sta_info_t; + +#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts) + +#define WL_STA_VER 4 + +typedef struct { + uint32 auto_en; + uint32 active_ant; + uint32 rxcount; + int32 avg_snr_per_ant0; + int32 avg_snr_per_ant1; + int32 avg_snr_per_ant2; + uint32 swap_ge_rxcount0; + uint32 swap_ge_rxcount1; + uint32 swap_ge_snrthresh0; + uint32 swap_ge_snrthresh1; + uint32 swap_txfail0; + uint32 swap_txfail1; + uint32 swap_timer0; + uint32 swap_timer1; + uint32 swap_alivecheck0; + uint32 swap_alivecheck1; + uint32 rxcount_per_ant0; + uint32 rxcount_per_ant1; + uint32 acc_rxcount; + uint32 acc_rxcount_per_ant0; + uint32 acc_rxcount_per_ant1; + uint32 tx_auto_en; + uint32 tx_active_ant; + uint32 rx_policy; + uint32 tx_policy; + uint32 cell_policy; +} wlc_swdiv_stats_t; + +#define WLC_NUMRATES 16 /**< max # of rates in a rateset */ + +typedef struct wlc_rateset { + uint32 count; /**< number of rates in rates[] */ + uint8 rates[WLC_NUMRATES]; /**< rates in 500kbps units w/hi bit set if basic */ + uint8 htphy_membership; /**< HT PHY Membership */ + uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */ + uint16 vht_mcsmap; /**< supported vht mcs nss bit map */ + uint16 vht_mcsmap_prop; /**< supported prop vht mcs nss bit map */ +} wlc_rateset_t; + +/* Used to get specific STA parameters */ +typedef struct { + uint32 val; + struct ether_addr ea; +} scb_val_t; + +/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ +typedef struct { + uint32 code; + scb_val_t ioctl_args; +} authops_t; + +/* channel encoding */ +typedef struct channel_info { + int hw_channel; + int target_channel; + int scan_channel; +} channel_info_t; + +/* For ioctls that take a list of MAC addresses */ +typedef struct maclist { + uint count; /**< number of MAC addresses */ + struct ether_addr ea[1]; /**< variable length array of MAC addresses */ +} maclist_t; + +/* get pkt count struct passed through ioctl */ +typedef struct get_pktcnt { + uint rx_good_pkt; + uint rx_bad_pkt; + uint tx_good_pkt; + uint tx_bad_pkt; + uint rx_ocast_good_pkt; /* unicast packets destined for others */ +} get_pktcnt_t; + +/* NINTENDO2 */ +#define LQ_IDX_MIN 0 +#define LQ_IDX_MAX 1 +#define LQ_IDX_AVG 2 +#define LQ_IDX_SUM 2 +#define LQ_IDX_LAST 3 +#define LQ_STOP_MONITOR 0 +#define LQ_START_MONITOR 1 + +/* Get averages RSSI, Rx PHY rate and SNR values */ +typedef struct { + int rssi[LQ_IDX_LAST]; /* Array to keep min, max, avg rssi */ + int snr[LQ_IDX_LAST]; /* Array to keep min, max, avg snr */ + int isvalid; /* Flag indicating whether above data is valid */ +} wl_lq_t; /* Link Quality */ + +typedef enum wl_wakeup_reason_type { + LCD_ON = 1, + LCD_OFF, + DRC1_WAKE, + DRC2_WAKE, + REASON_LAST +} wl_wr_type_t; + +typedef struct { +/* Unique filter id */ + uint32 id; + +/* stores the reason for the last wake up */ + uint8 reason; +} wl_wr_t; + +/* Get MAC specific rate histogram command */ +typedef struct { + struct ether_addr ea; /**< MAC Address */ + uint8 ac_cat; /**< Access Category */ + uint8 num_pkts; /**< Number of packet entries to be averaged */ +} wl_mac_ratehisto_cmd_t; /**< MAC Specific Rate Histogram command */ + +/* Get MAC rate histogram response */ +typedef struct { + uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */ + uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */ + uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */ + uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */ + uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /* MCS counts */ +} wl_mac_ratehisto_res_t; /**< MAC Specific Rate Histogram Response */ + +/* Linux network driver ioctl encoding */ +typedef struct wl_ioctl { + uint cmd; /**< common ioctl definition */ + void *buf; /**< pointer to user buffer */ + uint len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint used; /**< bytes read or written (optional) */ + uint needed; /**< bytes needed (optional) */ +} wl_ioctl_t; + +#ifdef CONFIG_COMPAT +typedef struct compat_wl_ioctl { + uint cmd; /**< common ioctl definition */ + uint32 buf; /**< pointer to user buffer */ + uint len; /**< length of user buffer */ + uint8 set; /**< 1=set IOCTL; 0=query IOCTL */ + uint used; /**< bytes read or written (optional) */ + uint needed; /**< bytes needed (optional) */ +} compat_wl_ioctl_t; +#endif /* CONFIG_COMPAT */ + +#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */ +#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ +#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ +#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */ +#define WL_NUM_RATES_VHT 10 +#define WL_NUM_RATES_MCS32 1 + + +/* + * Structure for passing hardware and software + * revision info up from the driver. + */ +typedef struct wlc_rev_info { + uint vendorid; /**< PCI vendor id */ + uint deviceid; /**< device id of chip */ + uint radiorev; /**< radio revision */ + uint chiprev; /**< chip revision */ + uint corerev; /**< core revision */ + uint boardid; /**< board identifier (usu. PCI sub-device id) */ + uint boardvendor; /**< board vendor (usu. PCI sub-vendor id) */ + uint boardrev; /**< board revision */ + uint driverrev; /**< driver version */ + uint ucoderev; /**< microcode version */ + uint bus; /**< bus type */ + uint chipnum; /**< chip number */ + uint phytype; /**< phy type */ + uint phyrev; /**< phy revision */ + uint anarev; /**< anacore rev */ + uint chippkg; /**< chip package info */ + uint nvramrev; /**< nvram revision number */ +} wlc_rev_info_t; + +#define WL_REV_INFO_LEGACY_LENGTH 48 + +#define WL_BRAND_MAX 10 +typedef struct wl_instance_info { + uint instance; + char brand[WL_BRAND_MAX]; +} wl_instance_info_t; + +/* structure to change size of tx fifo */ +typedef struct wl_txfifo_sz { + uint16 magic; + uint16 fifo; + uint16 size; +} wl_txfifo_sz_t; + +/* Transfer info about an IOVar from the driver */ +/* Max supported IOV name size in bytes, + 1 for nul termination */ +#define WLC_IOV_NAME_LEN 30 +typedef struct wlc_iov_trx_s { + uint8 module; + uint8 type; + char name[WLC_IOV_NAME_LEN]; +} wlc_iov_trx_t; + +/* bump this number if you change the ioctl interface */ +#define WLC_IOCTL_VERSION 2 +#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1 + +#ifdef CONFIG_USBRNDIS_RETAIL +/* struct passed in for WLC_NDCONFIG_ITEM */ +typedef struct { + char *name; + void *param; +} ndconfig_item_t; +#endif + + + +#define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */ + + +#define WL_PHY_PAVAR_VER 1 /**< pavars version */ +#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */ +typedef struct wl_pavars2 { + uint16 ver; /**< version of this struct */ + uint16 len; /**< len of this structure */ + uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */ + uint16 phy_type; /**< phy type */ + uint16 bandrange; + uint16 chain; + uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */ +} wl_pavars2_t; + +typedef struct wl_po { + uint16 phy_type; /**< Phy type */ + uint16 band; + uint16 cckpo; + uint32 ofdmpo; + uint16 mcspo[8]; +} wl_po_t; + +#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */ + +typedef struct wl_rpcal { + uint16 value; + uint16 update; +} wl_rpcal_t; + +typedef struct wl_aci_args { + int enter_aci_thresh; /* Trigger level to start detecting ACI */ + int exit_aci_thresh; /* Trigger level to exit ACI mode */ + int usec_spin; /* microsecs to delay between rssi samples */ + int glitch_delay; /* interval between ACI scans when glitch count is consistently high */ + uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */ + uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */ + uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */ + uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */ + uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */ + uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */ + uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */ + uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */ + uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */ + uint16 nphy_noise_noassoc_glitch_th_dn; + uint16 nphy_noise_assoc_glitch_th_up; + uint16 nphy_noise_assoc_glitch_th_dn; + uint16 nphy_noise_assoc_aci_glitch_th_up; + uint16 nphy_noise_assoc_aci_glitch_th_dn; + uint16 nphy_noise_assoc_enter_th; + uint16 nphy_noise_noassoc_enter_th; + uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th; + uint16 nphy_noise_noassoc_crsidx_incr; + uint16 nphy_noise_assoc_crsidx_incr; + uint16 nphy_noise_crsidx_decr; +} wl_aci_args_t; + +#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */ +#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */ +typedef struct wl_samplecollect_args { + /* version 0 fields */ + uint8 coll_us; + int cores; + /* add'l version 1 fields */ + uint16 version; /* see definition of WL_SAMPLECOLLECT_T_VERSION */ + uint16 length; /* length of entire structure */ + int8 trigger; + uint16 timeout; + uint16 mode; + uint32 pre_dur; + uint32 post_dur; + uint8 gpio_sel; + uint8 downsamp; + uint8 be_deaf; + uint8 agc; /**< loop from init gain and going down */ + uint8 filter; /**< override high pass corners to lowest */ + /* add'l version 2 fields */ + uint8 trigger_state; + uint8 module_sel1; + uint8 module_sel2; + uint16 nsamps; + int bitStart; + uint32 gpioCapMask; +} wl_samplecollect_args_t; + +#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */ +/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */ +#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 + +typedef struct wl_sampledata { + uint16 version; /**< structure version */ + uint16 size; /**< size of structure */ + uint16 tag; /**< Header/Data */ + uint16 length; /**< data length */ + uint32 flag; /**< bit def */ +} wl_sampledata_t; + + +/* WL_OTA START */ +/* OTA Test Status */ +enum { + WL_OTA_TEST_IDLE = 0, /**< Default Idle state */ + WL_OTA_TEST_ACTIVE = 1, /**< Test Running */ + WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */ + WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */ +}; +/* OTA SYNC Status */ +enum { + WL_OTA_SYNC_IDLE = 0, /**< Idle state */ + WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */ + WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */ +}; + +/* Various error states dut can get stuck during test */ +enum { + WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */ + WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */ + WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */ + WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */ + WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */ + WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */ +}; + +/* Differentiator for ota_tx and ota_rx */ +enum { + WL_OTA_TEST_TX = 0, /**< ota_tx */ + WL_OTA_TEST_RX = 1, /**< ota_rx */ +}; + +/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */ +enum { + WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */ + WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */ + WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */ + WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */ +}; + +#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */ +#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */ +#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */ +#define OTA_STF_SISO 0 +#define OTA_STF_CDD 1 +#define OTA_STF_STBC 2 +#define OTA_STF_SDM 3 + +typedef struct ota_rate_info { + uint8 rate_cnt; /**< Total number of rates */ + uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */ + /**< for legacy rates : ratein mbps * 2 */ + /**< for HT rates : mcs index */ +} ota_rate_info_t; + +typedef struct ota_power_info { + int8 pwr_ctrl_on; /**< power control on/off */ + int8 start_pwr; /**< starting power/index */ + int8 delta_pwr; /**< delta power/index */ + int8 end_pwr; /**< end power/index */ +} ota_power_info_t; + +typedef struct ota_packetengine { + uint16 delay; /* Inter-packet delay */ + /**< for ota_tx, delay is tx ifs in micro seconds */ + /* for ota_rx, delay is wait time in milliseconds */ + uint16 nframes; /* Number of frames */ + uint16 length; /* Packet length */ +} ota_packetengine_t; + +/* Test info vector */ +typedef struct wl_ota_test_args { + uint8 cur_test; /**< test phase */ + uint8 chan; /**< channel */ + uint8 bw; /**< bandwidth */ + uint8 control_band; /**< control band */ + uint8 stf_mode; /**< stf mode */ + ota_rate_info_t rt_info; /**< Rate info */ + ota_packetengine_t pkteng; /**< packeteng info */ + uint8 txant; /**< tx antenna */ + uint8 rxant; /**< rx antenna */ + ota_power_info_t pwr_info; /**< power sweep info */ + uint8 wait_for_sync; /**< wait for sync or not */ + uint8 ldpc; + uint8 sgi; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_args_t; + +#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */ +typedef struct wl_ota_test_vector { + uint16 version; + wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */ + uint16 test_cnt; /**< Total no of test */ + uint8 file_dwnld_valid; /**< File successfully downloaded */ + uint8 sync_timeout; /**< sync packet timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< macaddress for tx */ + struct ether_addr rx_mac; /**< macaddress for rx */ + int8 loop_test; /**< dbg feature to loop the test */ + uint16 test_rxcnt; + /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */ +} wl_ota_test_vector_t; + + +/* struct copied back form dongle to host to query the status */ +typedef struct wl_ota_test_status { + int16 cur_test_cnt; /**< test phase */ + int8 skip_test_reason; /**< skip test reasoin */ + wl_ota_test_args_t test_arg; /**< cur test arg details */ + uint16 test_cnt; /**< total no of test downloaded */ + uint8 file_dwnld_valid; /**< file successfully downloaded ? */ + uint8 sync_timeout; /**< sync timeout */ + int8 sync_fail_action; /**< sync fail action */ + struct ether_addr sync_mac; /**< macaddress for sync pkt */ + struct ether_addr tx_mac; /**< tx mac address */ + struct ether_addr rx_mac; /**< rx mac address */ + uint8 test_stage; /**< check the test status */ + int8 loop_test; /**< Debug feature to puts test enfine in a loop */ + uint8 sync_status; /**< sync status */ +} wl_ota_test_status_t; +typedef struct wl_ota_rx_rssi { + uint16 pktcnt; /* Pkt count used for this rx test */ + chanspec_t chanspec; /* Channel info on which the packets are received */ + int16 rssi; /* Average RSSI of the first 50% packets received */ +} wl_ota_rx_rssi_t; + +#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */ +#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi) + +typedef struct wl_ota_test_rssi { + uint8 version; + uint8 testcnt; /* total measured RSSI values, valid on output only */ + wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */ +} wl_ota_test_rssi_t; + +/* WL_OTA END */ + +/* wl_radar_args_t */ +typedef struct { + int npulses; /**< required number of pulses at n * t_int */ + int ncontig; /**< required number of pulses at t_int */ + int min_pw; /**< minimum pulse width (20 MHz clocks) */ + int max_pw; /**< maximum pulse width (20 MHz clocks) */ + uint16 thresh0; /**< Radar detection, thresh 0 */ + uint16 thresh1; /**< Radar detection, thresh 1 */ + uint16 blank; /**< Radar detection, blank control */ + uint16 fmdemodcfg; /**< Radar detection, fmdemod config */ + int npulses_lp; /* Radar detection, minimum long pulses */ + int min_pw_lp; /* Minimum pulsewidth for long pulses */ + int max_pw_lp; /* Maximum pulsewidth for long pulses */ + int min_fm_lp; /* Minimum fm for long pulses */ + int max_span_lp; /* Maximum deltat for long pulses */ + int min_deltat; /* Minimum spacing between pulses */ + int max_deltat; /* Maximum spacing between pulses */ + uint16 autocorr; /**< Radar detection, autocorr on or off */ + uint16 st_level_time; /**< Radar detection, start_timing level */ + uint16 t2_min; /* minimum clocks needed to remain in state 2 */ + uint32 version; /* version */ + uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */ + int npulses_fra; /* Radar detection, minimum French pulses set */ + int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */ + int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */ + uint16 percal_mask; /**< defines which period cal is masked from radar detection */ + int quant; /**< quantization resolution to pulse positions */ + uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */ + uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */ + int nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */ + int max_pw_tol; /**< maximum tolerance allowd in detected pulse width for radar detection */ + uint16 feature_mask; /* 16-bit mask to specify enabled features */ +} wl_radar_args_t; + +#define WL_RADAR_ARGS_VERSION 2 + +typedef struct { + uint32 version; /* version */ + uint16 thresh0_20_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh1_20_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ + uint16 thresh0_40_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ + uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ + uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ + uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ + uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ + uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ +#ifdef WL11AC160 + uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ + uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ + uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ +#endif /* WL11AC160 */ +} wl_radar_thr_t; + +#define WL_RADAR_THR_VERSION 2 + +/* RSSI per antenna */ +typedef struct { + uint32 version; /**< version field */ + uint32 count; /**< number of valid antenna rssi */ + int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */ +} wl_rssi_ant_t; + +/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */ +typedef struct { + uint state; /**< noted by WL_DFS_CACSTATE_XX. */ + uint duration; /**< time spent in ms in state. */ + /* as dfs enters ISM state, it removes the operational channel from quiet channel + * list and notes the channel in channel_cleared. set to 0 if no channel is cleared + */ + chanspec_t chanspec_cleared; + /* chanspec cleared used to be a uint, add another to uint16 to maintain size */ + uint16 pad; +} wl_dfs_status_t; + +typedef struct { + uint state; /* noted by WL_DFS_CACSTATE_XX */ + uint duration; /* time spent in ms in state */ + chanspec_t chanspec; /* chanspec of this core */ + chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */ + uint16 sub_type; /* currently just the index of the core or the respective PLL */ + uint16 pad; +} wl_dfs_sub_status_t; + +#define WL_DFS_STATUS_ALL_VERSION (1) +typedef struct { + uint16 version; /* version field; current max version 1 */ + uint16 num_sub_status; + wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */ +} wl_dfs_status_all_t; + +#define WL_DFS_AP_MOVE_VERSION (1) +typedef struct wl_dfs_ap_move_status { + int8 version; /* version field; current max version 1 */ + int8 move_status; /* DFS move status */ + chanspec_t chanspec; /* New AP Chanspec */ + wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */ +} wl_dfs_ap_move_status_t; + + +/* data structure used in 'radar_status' wl interface, which is use to query radar det status */ +typedef struct { + bool detected; + int count; + bool pretended; + uint32 radartype; + uint32 timenow; + uint32 timefromL; + int lp_csect_single; + int detected_pulse_index; + int nconsecq_pulses; + chanspec_t ch; + int pw[10]; + int intv[10]; + int fm[10]; +} wl_radar_status_t; + +#define NUM_PWRCTRL_RATES 12 + +typedef struct { + uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */ + uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */ + uint8 txpwr_local_max; /**< local max according to the AP */ + uint8 txpwr_local_constraint; /**< local constraint according to the AP */ + uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */ + uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */ + uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */ + uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */ + uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */ + int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_power_legacy_t; + +#define WL_TX_POWER_RATES_LEGACY 45 +#define WL_TX_POWER_MCS20_FIRST 12 +#define WL_TX_POWER_MCS20_NUM 16 +#define WL_TX_POWER_MCS40_FIRST 28 +#define WL_TX_POWER_MCS40_NUM 17 + +typedef struct { + uint32 flags; + chanspec_t chanspec; /* txpwr report for this channel */ + chanspec_t local_chanspec; /* channel on which we are associated */ + uint8 local_max; /* local max according to the AP */ + uint8 local_constraint; /* local constraint according to the AP */ + int8 antgain[2]; /* Ant gain for each band - from SROM */ + uint8 rf_cores; /* count of RF Cores being reported */ + uint8 est_Pout[4]; /* Latest tx power out estimate per RF + * chain without adjustment + */ + uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ + uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */ + uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */ + uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */ + uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */ +} tx_power_legacy2_t; + +#define WL_NUM_2x2_ELEMENTS 4 +#define WL_NUM_3x3_ELEMENTS 6 +#define WL_NUM_4x4_ELEMENTS 10 + +typedef struct { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 flags; + chanspec_t chanspec; /**< txpwr report for this channel */ + chanspec_t local_chanspec; /**< channel on which we are associated */ + uint32 buflen; /**< ppr buffer length */ + uint8 pprbuf[1]; /**< Latest target power buffer */ +} wl_txppr_t; + +#define WL_TXPPR_VERSION 1 +#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) +#define TX_POWER_T_VERSION 45 +/* number of ppr serialization buffers, it should be reg, board and target */ +#define WL_TXPPR_SER_BUF_NUM (3) + +typedef struct chanspec_txpwr_max { + chanspec_t chanspec; /* chanspec */ + uint8 txpwr_max; /* max txpwr in all the rates */ + uint8 padding; +} chanspec_txpwr_max_t; + +typedef struct wl_chanspec_txpwr_max { + uint16 ver; /**< version of this struct */ + uint16 len; /**< length in bytes of this structure */ + uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */ + chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */ +} wl_chanspec_txpwr_max_t; + +#define WL_CHANSPEC_TXPWR_MAX_VER 1 +#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t)) + +typedef struct tx_inst_power { + uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */ + uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */ +} tx_inst_power_t; + +#define WL_NUM_TXCHAIN_MAX 4 +typedef struct wl_txchain_pwr_offsets { + int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */ +} wl_txchain_pwr_offsets_t; +/* maximum channels returned by the get valid channels iovar */ +#define WL_NUMCHANNELS 64 + +/* + * Join preference iovar value is an array of tuples. Each tuple has a one-byte type, + * a one-byte length, and a variable length value. RSSI type tuple must be present + * in the array. + * + * Types are defined in "join preference types" section. + * + * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple + * and must be set to zero. + * + * Values are defined below. + * + * 1. RSSI - 2 octets + * offset 0: reserved + * offset 1: reserved + * + * 2. WPA - 2 + 12 * n octets (n is # tuples defined below) + * offset 0: reserved + * offset 1: # of tuples + * offset 2: tuple 1 + * offset 14: tuple 2 + * ... + * offset 2 + 12 * (n - 1) octets: tuple n + * + * struct wpa_cfg_tuple { + * uint8 akm[DOT11_OUI_LEN+1]; akm suite + * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite + * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite + * }; + * + * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY. + * + * 3. BAND - 2 octets + * offset 0: reserved + * offset 1: see "band preference" and "band types" + * + * 4. BAND RSSI - 2 octets + * offset 0: band types + * offset 1: +ve RSSI boost value in dB + */ + +struct tsinfo_arg { + uint8 octets[3]; +}; + +#define RATE_CCK_1MBPS 0 +#define RATE_CCK_2MBPS 1 +#define RATE_CCK_5_5MBPS 2 +#define RATE_CCK_11MBPS 3 + +#define RATE_LEGACY_OFDM_6MBPS 0 +#define RATE_LEGACY_OFDM_9MBPS 1 +#define RATE_LEGACY_OFDM_12MBPS 2 +#define RATE_LEGACY_OFDM_18MBPS 3 +#define RATE_LEGACY_OFDM_24MBPS 4 +#define RATE_LEGACY_OFDM_36MBPS 5 +#define RATE_LEGACY_OFDM_48MBPS 6 +#define RATE_LEGACY_OFDM_54MBPS 7 + +#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1 + +typedef struct wl_bsstrans_rssi { + int8 rssi_2g; /**< RSSI in dbm for 2.4 G */ + int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */ +} wl_bsstrans_rssi_t; + +#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */ + +/* RSSI to rate mapping, all 20Mhz, no SGI */ +typedef struct wl_bsstrans_rssi_rate_map { + uint16 ver; + uint16 len; /* length of entire structure */ + wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /* 2.4G only */ + wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /* 6 to 54mbps */ + wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */ + wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /* MCS0-9 */ +} wl_bsstrans_rssi_rate_map_t; + +#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1 + +/* Configure number of scans allowed per throttle period */ +typedef struct wl_bsstrans_roamthrottle { + uint16 ver; + uint16 period; + uint16 scans_allowed; +} wl_bsstrans_roamthrottle_t; + +#define NFIFO 6 /**< # tx/rx fifopairs */ +#define NREINITREASONCOUNT 8 +#define REINITREASONIDX(_x) (((_x) < NREINITREASONCOUNT) ? (_x) : 0) + +#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */ +#define WL_CNT_VERSION_6 6 +#define WL_CNT_VERSION_11 11 + +#define WLC_WITH_XTLV_CNT + +/* + * tlv IDs uniquely identifies counter component + * packed into wl_cmd_t container + */ +enum wl_cnt_xtlv_id { + WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */ + WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */ + WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */ + WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800 /* corerev >= 64 UCODEX MACSTAT */ +}; + +/* The number of variables in wl macstat cnt struct. + * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t) + */ +#define WL_CNT_MCST_VAR_NUM 64 +/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */ +#define WL_CNT_MCST_STRUCT_SZ ((uint)sizeof(uint32) * WL_CNT_MCST_VAR_NUM) + +#define INVALID_CNT_VAL (uint32)(-1) +#define WL_CNT_MCXST_STRUCT_SZ ((uint)sizeof(wl_cnt_ge64mcxst_v1_t)) + +#define WL_XTLV_CNTBUF_MAX_SIZE ((uint)(OFFSETOF(wl_cnt_info_t, data)) + \ + (uint)BCM_XTLV_HDR_SIZE + (uint)sizeof(wl_cnt_wlc_t) + \ + (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \ + (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ) + +#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint)sizeof(wl_cnt_ver_11_t)) + +/* Top structure of counters IOVar buffer */ +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 datalen; /**< length of data including all paddings. */ + uint8 data [1]; /**< variable length payload: + * 1 or more bcm_xtlv_t type of tuples. + * each tuple is padded to multiple of 4 bytes. + * 'datalen' field of this structure includes all paddings. + */ +} wl_cnt_info_t; + +/* wlc layer counters */ +typedef struct { + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + uint32 rfdisable; /**< count of radio disables */ + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; + + uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ +} wl_cnt_wlc_t; + +/* MACXSTAT counters for ucodex (corerev >= 64) */ +typedef struct { + uint32 macxsusp; + uint32 m2vmsg; + uint32 v2mmsg; + uint32 mboxout; + uint32 musnd; + uint32 sfb2v; +} wl_cnt_ge64mcxst_v1_t; + +/* MACSTAT counters for ucode (corerev >= 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */ + uint32 rxhlovfl; /**< number of length / header fifo overflows */ + uint32 missbcn_dbg; /**< number of beacon missed to receive */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_ge40mcst_v1_t; + +/* MACSTAT counters for ucode (corerev < 40) */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txampdu; /**< number of AMPDUs transmitted */ + uint32 txmpdu; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */ + uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */ + uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */ + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 dbgoff46; + uint32 dbgoff47; + uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */ + uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */ + uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 phywatch; + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_lt40mcst_v1_t; + +/* MACSTAT counters for "wl counter" version <= 10 */ +typedef struct { + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 PAD0; /**< number of MPDUs transmitted */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 PAD1; + uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< number of PMQ overflows */ + uint32 rxcgprqfrm; /**< number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 rxtoolate; /**< receive too late */ + uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */ +} wl_cnt_v_le10_mcst_t; + +typedef struct { + uint16 version; /**< see definition of WL_CNT_T_VERSION */ + uint16 length; /**< length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /**< tx data frames */ + uint32 txbyte; /**< tx data bytes */ + uint32 txretrans; /**< tx mac retransmits */ + uint32 txerror; /**< tx data errors (derived: sum of others) */ + uint32 txctl; /**< tx management frames */ + uint32 txprshort; /**< tx short preamble frames */ + uint32 txserr; /**< tx status errors */ + uint32 txnobuf; /**< tx out of buffers errors */ + uint32 txnoassoc; /**< tx discard because we're not associated */ + uint32 txrunt; /**< tx runt frames */ + uint32 txchit; /**< tx header cache hit (fastpath) */ + uint32 txcmiss; /**< tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /**< tx fifo underflows */ + uint32 txphyerr; /**< tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /**< rx data frames */ + uint32 rxbyte; /**< rx data bytes */ + uint32 rxerror; /**< rx data errors (derived: sum of others) */ + uint32 rxctl; /**< rx management frames */ + uint32 rxnobuf; /**< rx out of buffers errors */ + uint32 rxnondata; /**< rx non data frames in the data channel errors */ + uint32 rxbadds; /**< rx bad DS errors */ + uint32 rxbadcm; /**< rx bad control or management frames */ + uint32 rxfragerr; /**< rx fragmentation errors */ + uint32 rxrunt; /**< rx runt frames */ + uint32 rxgiant; /**< rx giant frames */ + uint32 rxnoscb; /**< rx no scb error */ + uint32 rxbadproto; /**< rx invalid frames */ + uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */ + uint32 rxbadda; /**< rx frames tossed for invalid da */ + uint32 rxfilter; /**< rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /**< rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /**< tx/rx dma descriptor errors */ + uint32 dmada; /**< tx/rx dma data errors */ + uint32 dmape; /**< tx/rx dma descriptor protocol errors */ + uint32 reset; /**< reset count */ + uint32 tbtt; /**< cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /**< callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /**< number of RTS sent out by the MAC */ + uint32 txctsfrm; /**< number of CTS sent out by the MAC */ + uint32 txackfrm; /**< number of ACK frames sent out */ + uint32 txdnlfrm; /**< Not used */ + uint32 txbcnfrm; /**< beacons transmitted */ + uint32 txfunfl[6]; /**< per-fifo tx underflows */ + uint32 rxtoolate; /**< receive too late */ + uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /**< parity check of the PLCP header failed */ + uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /**< Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /**< beacons received from member of BSS */ + uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /**< beacons received from other BSS */ + uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /**< Number of PMQ overflows */ + uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; /**< obsolete */ + uint32 frmscons; /**< obsolete */ + uint32 txnack; /**< obsolete */ + uint32 rxback; /**< blockack rxcnt */ + uint32 txback; /**< blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /**< dot11TransmittedFragmentCount */ + uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */ + uint32 txfail; /**< dot11FailedCount */ + uint32 txretry; /**< dot11RetryCount */ + uint32 txretrie; /**< dot11MultipleRetryCount */ + uint32 rxdup; /**< dot11FrameduplicateCount */ + uint32 txrts; /**< dot11RTSSuccessCount */ + uint32 txnocts; /**< dot11RTSFailureCount */ + uint32 txnoack; /**< dot11ACKFailureCount */ + uint32 rxfrag; /**< dot11ReceivedFragmentCount */ + uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /**< dot11FCSErrorCount */ + uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /**< TKIPReplays */ + uint32 ccmpfmterr; /**< CCMPFormatErrors */ + uint32 ccmpreplay; /**< CCMPReplays */ + uint32 ccmpundec; /**< CCMPDecryptErrors */ + uint32 fourwayfail; /**< FourWayHandshakeFailures */ + uint32 wepundec; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr; /**< dot11WEPICVErrorCount */ + uint32 decsuccess; /**< DecryptSuccessCount */ + uint32 tkipicverr; /**< TKIPICVErrorCount */ + uint32 wepexcluded; /**< dot11WEPExcludedCount */ + + uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */ + uint32 psmwds; /**< Count PSM watchdogs */ + uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /**< PRQ entries read in */ + uint32 prq_undirected_entries; /**< which were bcast bss & ssid */ + uint32 prq_bad_entries; /**< which could not be translated to info */ + uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /**< count of radio disables */ + uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txexptime; /**< Tx frames suppressed due to timer expiration */ + + uint32 txmpdu_sgi; /**< count for sgi transmit */ + uint32 rxmpdu_sgi; /**< count for sgi received */ + uint32 txmpdu_stbc; /**< count for stbc transmit */ + uint32 rxmpdu_stbc; /**< count for stbc received */ + + uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /**< TKIPReplays */ + uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /**< CCMPReplays */ + uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */ + uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /**< DecryptSuccessCount */ + uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */ + + uint32 dma_hang; /**< count for dma hang */ + uint32 reinit; /**< count for reinit */ + + uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */ + uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */ + uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */ + uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */ + uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */ + + uint32 cso_passthrough; /* hw cso required but passthrough */ + uint32 cso_normal; /**< hw cso hdr for normal process */ + uint32 chained; /**< number of frames chained */ + uint32 chainedsz1; /**< number of chain size 1 frames */ + uint32 unchained; /**< number of frames not chained */ + uint32 maxchainsz; /**< max chain size so far */ + uint32 currchainsz; /**< current chain size */ + uint32 rxdrop20s; /**< drop secondary cnt */ + uint32 pciereset; /**< Secondary Bus Reset issued by driver */ + uint32 cfgrestore; /**< configspace restore by driver */ + uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */ + uint32 rxrtry; /**< num of received packets with retry bit on */ + uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */ + uint32 rxnodelim; /**< macstat cnt only valid in ver 11. + * number of occasions that no valid delimiter is detected + * by ampdu parser. + */ + uint32 rxmpdu_mu; /* Number of MU MPDUs received */ + + /* detailed control/management frames */ + uint32 txbar; /**< Number of TX BAR */ + uint32 rxbar; /**< Number of RX BAR */ + uint32 txpspoll; /**< Number of TX PS-poll */ + uint32 rxpspoll; /**< Number of RX PS-poll */ + uint32 txnull; /**< Number of TX NULL_DATA */ + uint32 rxnull; /**< Number of RX NULL_DATA */ + uint32 txqosnull; /**< Number of TX NULL_QoSDATA */ + uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */ + uint32 txassocreq; /**< Number of TX ASSOC request */ + uint32 rxassocreq; /**< Number of RX ASSOC request */ + uint32 txreassocreq; /**< Number of TX REASSOC request */ + uint32 rxreassocreq; /**< Number of RX REASSOC request */ + uint32 txdisassoc; /**< Number of TX DISASSOC */ + uint32 rxdisassoc; /**< Number of RX DISASSOC */ + uint32 txassocrsp; /**< Number of TX ASSOC response */ + uint32 rxassocrsp; /**< Number of RX ASSOC response */ + uint32 txreassocrsp; /**< Number of TX REASSOC response */ + uint32 rxreassocrsp; /**< Number of RX REASSOC response */ + uint32 txauth; /**< Number of TX AUTH */ + uint32 rxauth; /**< Number of RX AUTH */ + uint32 txdeauth; /**< Number of TX DEAUTH */ + uint32 rxdeauth; /**< Number of RX DEAUTH */ + uint32 txprobereq; /**< Number of TX probe request */ + uint32 rxprobereq; /**< Number of RX probe request */ + uint32 txprobersp; /**< Number of TX probe response */ + uint32 rxprobersp; /**< Number of RX probe response */ + uint32 txaction; /**< Number of TX action frame */ + uint32 rxaction; /**< Number of RX action frame */ + +} wl_cnt_ver_11_t; + +typedef struct { + uint16 version; /* see definition of WL_CNT_T_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /* tx data frames */ + uint32 txbyte; /* tx data bytes */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txerror; /* tx data errors (derived: sum of others) */ + uint32 txctl; /* tx management frames */ + uint32 txprshort; /* tx short preamble frames */ + uint32 txserr; /* tx status errors */ + uint32 txnobuf; /* tx out of buffers errors */ + uint32 txnoassoc; /* tx discard because we're not associated */ + uint32 txrunt; /* tx runt frames */ + uint32 txchit; /* tx header cache hit (fastpath) */ + uint32 txcmiss; /* tx header cache miss (slowpath) */ + + /* transmit chip error counters */ + uint32 txuflo; /* tx fifo underflows */ + uint32 txphyerr; /* tx phy errors (indicated in tx status) */ + uint32 txphycrs; + + /* receive stat counters */ + uint32 rxframe; /* rx data frames */ + uint32 rxbyte; /* rx data bytes */ + uint32 rxerror; /* rx data errors (derived: sum of others) */ + uint32 rxctl; /* rx management frames */ + uint32 rxnobuf; /* rx out of buffers errors */ + uint32 rxnondata; /* rx non data frames in the data channel errors */ + uint32 rxbadds; /* rx bad DS errors */ + uint32 rxbadcm; /* rx bad control or management frames */ + uint32 rxfragerr; /* rx fragmentation errors */ + uint32 rxrunt; /* rx runt frames */ + uint32 rxgiant; /* rx giant frames */ + uint32 rxnoscb; /* rx no scb error */ + uint32 rxbadproto; /* rx invalid frames */ + uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ + uint32 rxbadda; /* rx frames tossed for invalid da */ + uint32 rxfilter; /* rx frames filtered out */ + + /* receive chip error counters */ + uint32 rxoflo; /* rx fifo overflow errors */ + uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ + + uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ + uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ + uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ + + /* misc counters */ + uint32 dmade; /* tx/rx dma descriptor errors */ + uint32 dmada; /* tx/rx dma data errors */ + uint32 dmape; /* tx/rx dma descriptor protocol errors */ + uint32 reset; /* reset count */ + uint32 tbtt; /* cnts the TBTT int's */ + uint32 txdmawar; + uint32 pkt_callback_reg_fail; /* callbacks register failure */ + + /* MAC counters: 32-bit version of d11.h's macstat_t */ + uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, + * Control Management (includes retransmissions) + */ + uint32 txrtsfrm; /* number of RTS sent out by the MAC */ + uint32 txctsfrm; /* number of CTS sent out by the MAC */ + uint32 txackfrm; /* number of ACK frames sent out */ + uint32 txdnlfrm; /* Not used */ + uint32 txbcnfrm; /* beacons transmitted */ + uint32 txfunfl[6]; /* per-fifo tx underflows */ + uint32 rxtoolate; /* receive too late */ + uint32 txfbw; /* transmit at fallback bw (dynamic bw) */ + uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS + * or BCN) + */ + uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for + * driver enqueued frames + */ + uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ + uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ + uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not + * data/control/management + */ + uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ + uint32 rxbadplcp; /* parity check of the PLCP header failed */ + uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ + uint32 rxstrt; /* Number of received frames with a good PLCP + * (i.e. passing parity check) + */ + uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ + uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ + uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ + uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ + uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ + uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ + uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ + uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ + uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ + uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ + uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ + uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ + uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ + uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC + * (unlikely to see these) + */ + uint32 rxbeaconmbss; /* beacons received from member of BSS */ + uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from + * other BSS (WDS FRAME) + */ + uint32 rxbeaconobss; /* beacons received from other BSS */ + uint32 rxrsptmout; /* Number of response timeouts for transmitted frames + * expecting a response + */ + uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ + uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ + uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ + uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ + uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ + uint32 pmqovfl; /* Number of PMQ overflows */ + uint32 rxcgprqfrm; /* Number of received Probe requests that made it into + * the PRQ fifo + */ + uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ + uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did + * not get ACK + */ + uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ + uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ + * fifo because a probe response could not be sent out within + * the time limit defined in M_PRS_MAXTIME + */ + uint32 rxnack; + uint32 frmscons; + uint32 txnack; /* obsolete */ + uint32 rxback; /* blockack rxcnt */ + uint32 txback; /* blockack txcnt */ + + /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ + uint32 txfrag; /* dot11TransmittedFragmentCount */ + uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ + uint32 txfail; /* dot11FailedCount */ + uint32 txretry; /* dot11RetryCount */ + uint32 txretrie; /* dot11MultipleRetryCount */ + uint32 rxdup; /* dot11FrameduplicateCount */ + uint32 txrts; /* dot11RTSSuccessCount */ + uint32 txnocts; /* dot11RTSFailureCount */ + uint32 txnoack; /* dot11ACKFailureCount */ + uint32 rxfrag; /* dot11ReceivedFragmentCount */ + uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ + uint32 rxcrc; /* dot11FCSErrorCount */ + uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ + uint32 rxundec; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay; /* TKIPReplays */ + uint32 ccmpfmterr; /* CCMPFormatErrors */ + uint32 ccmpreplay; /* CCMPReplays */ + uint32 ccmpundec; /* CCMPDecryptErrors */ + uint32 fourwayfail; /* FourWayHandshakeFailures */ + uint32 wepundec; /* dot11WEPUndecryptableCount */ + uint32 wepicverr; /* dot11WEPICVErrorCount */ + uint32 decsuccess; /* DecryptSuccessCount */ + uint32 tkipicverr; /* TKIPICVErrorCount */ + uint32 wepexcluded; /* dot11WEPExcludedCount */ + + uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ + + /* WPA2 counters (see rxundec for DecryptFailureCount) */ + uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ + uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ + uint32 tkipreplay_mcst; /* TKIPReplays */ + uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ + uint32 ccmpreplay_mcst; /* CCMPReplays */ + uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ + uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ + uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ + uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ + uint32 decsuccess_mcst; /* DecryptSuccessCount */ + uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ + uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ + + uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ + uint32 txexptime; /* Tx frames suppressed due to timer expiration */ + uint32 psmwds; /* Count PSM watchdogs */ + uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ + + /* MBSS counters, AP only */ + uint32 prq_entries_handled; /* PRQ entries read in */ + uint32 prq_undirected_entries; /* which were bcast bss & ssid */ + uint32 prq_bad_entries; /* which could not be translated to info */ + uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ + uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ + uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ + uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* pkteng rx frame stats */ + uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ + uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ + + uint32 rfdisable; /* count of radio disables */ + uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ + uint32 bphy_badplcp; + + uint32 txmpdu_sgi; /* count for sgi transmit */ + uint32 rxmpdu_sgi; /* count for sgi received */ + uint32 txmpdu_stbc; /* count for stbc transmit */ + uint32 rxmpdu_stbc; /* count for stbc received */ + + uint32 rxdrop20s; /* drop secondary cnt */ +} wl_cnt_ver_6_t; + +#define WL_DELTA_STATS_T_VERSION 2 /* current version of wl_delta_stats_t struct */ + +typedef struct { + uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txframe; /* tx data frames */ + uint32 txbyte; /* tx data bytes */ + uint32 txretrans; /* tx mac retransmits */ + uint32 txfail; /* tx failures */ + + /* receive stat counters */ + uint32 rxframe; /* rx data frames */ + uint32 rxbyte; /* rx data bytes */ + + /* per-rate receive stat counters */ + uint32 rx1mbps; /* packets rx at 1Mbps */ + uint32 rx2mbps; /* packets rx at 2Mbps */ + uint32 rx5mbps5; /* packets rx at 5.5Mbps */ + uint32 rx6mbps; /* packets rx at 6Mbps */ + uint32 rx9mbps; /* packets rx at 9Mbps */ + uint32 rx11mbps; /* packets rx at 11Mbps */ + uint32 rx12mbps; /* packets rx at 12Mbps */ + uint32 rx18mbps; /* packets rx at 18Mbps */ + uint32 rx24mbps; /* packets rx at 24Mbps */ + uint32 rx36mbps; /* packets rx at 36Mbps */ + uint32 rx48mbps; /* packets rx at 48Mbps */ + uint32 rx54mbps; /* packets rx at 54Mbps */ + uint32 rx108mbps; /* packets rx at 108mbps */ + uint32 rx162mbps; /* packets rx at 162mbps */ + uint32 rx216mbps; /* packets rx at 216 mbps */ + uint32 rx270mbps; /* packets rx at 270 mbps */ + uint32 rx324mbps; /* packets rx at 324 mbps */ + uint32 rx378mbps; /* packets rx at 378 mbps */ + uint32 rx432mbps; /* packets rx at 432 mbps */ + uint32 rx486mbps; /* packets rx at 486 mbps */ + uint32 rx540mbps; /* packets rx at 540 mbps */ + + /* phy stats */ + uint32 rxbadplcp; + uint32 rxcrsglitch; + uint32 bphy_rxcrsglitch; + uint32 bphy_badplcp; + +} wl_delta_stats_t; + +typedef struct { + uint32 packets; + uint32 bytes; +} wl_traffic_stats_t; + +typedef struct { + uint16 version; /* see definition of WL_WME_CNT_VERSION */ + uint16 length; /* length of entire structure */ + + wl_traffic_stats_t tx[AC_COUNT]; /* Packets transmitted */ + wl_traffic_stats_t tx_failed[AC_COUNT]; /* Packets dropped or failed to transmit */ + wl_traffic_stats_t rx[AC_COUNT]; /* Packets received */ + wl_traffic_stats_t rx_failed[AC_COUNT]; /* Packets failed to receive */ + + wl_traffic_stats_t forward[AC_COUNT]; /* Packets forwarded by AP */ + + wl_traffic_stats_t tx_expired[AC_COUNT]; /* packets dropped due to lifetime expiry */ + +} wl_wme_cnt_t; + +struct wl_msglevel2 { + uint32 low; + uint32 high; +}; + +typedef struct wl_mkeep_alive_pkt { + uint16 version; /* Version for mkeep_alive */ + uint16 length; /* length of fixed parameters in the structure */ + uint32 period_msec; + uint16 len_bytes; + uint8 keep_alive_id; /* 0 - 3 for N = 4 */ + uint8 data[1]; +} wl_mkeep_alive_pkt_t; + +#define WL_MKEEP_ALIVE_VERSION 1 +#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) +#define WL_MKEEP_ALIVE_PRECISION 500 + +/* TCP Keep-Alive conn struct */ +typedef struct wl_mtcpkeep_alive_conn_pkt { + struct ether_addr saddr; /* src mac address */ + struct ether_addr daddr; /* dst mac address */ + struct ipv4_addr sipaddr; /* source IP addr */ + struct ipv4_addr dipaddr; /* dest IP addr */ + uint16 sport; /* src port */ + uint16 dport; /* dest port */ + uint32 seq; /* seq number */ + uint32 ack; /* ACK number */ + uint16 tcpwin; /* TCP window */ +} wl_mtcpkeep_alive_conn_pkt_t; + +/* TCP Keep-Alive interval struct */ +typedef struct wl_mtcpkeep_alive_timers_pkt { + uint16 interval; /* interval timer */ + uint16 retry_interval; /* retry_interval timer */ + uint16 retry_count; /* retry_count */ +} wl_mtcpkeep_alive_timers_pkt_t; + +typedef struct wake_info { + uint32 wake_reason; + uint32 wake_info_len; /* size of packet */ + uchar packet[1]; +} wake_info_t; + +typedef struct wake_pkt { + uint32 wake_pkt_len; /* size of packet */ + uchar packet[1]; +} wake_pkt_t; + + +#define WL_MTCPKEEP_ALIVE_VERSION 1 + +#ifdef WLBA + +#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */ + +/* block ack related stats */ +typedef struct wlc_ba_cnt { + uint16 version; /* WLC_BA_CNT_VERSION */ + uint16 length; /* length of entire structure */ + + /* transmit stat counters */ + uint32 txpdu; /* pdus sent */ + uint32 txsdu; /* sdus sent */ + uint32 txfc; /* tx side flow controlled packets */ + uint32 txfci; /* tx side flow control initiated */ + uint32 txretrans; /* retransmitted pdus */ + uint32 txbatimer; /* ba resend due to timer */ + uint32 txdrop; /* dropped packets */ + uint32 txaddbareq; /* addba req sent */ + uint32 txaddbaresp; /* addba resp sent */ + uint32 txdelba; /* delba sent */ + uint32 txba; /* ba sent */ + uint32 txbar; /* bar sent */ + uint32 txpad[4]; /* future */ + + /* receive side counters */ + uint32 rxpdu; /* pdus recd */ + uint32 rxqed; /* pdus buffered before sending up */ + uint32 rxdup; /* duplicate pdus */ + uint32 rxnobuf; /* pdus discarded due to no buf */ + uint32 rxaddbareq; /* addba req recd */ + uint32 rxaddbaresp; /* addba resp recd */ + uint32 rxdelba; /* delba recd */ + uint32 rxba; /* ba recd */ + uint32 rxbar; /* bar recd */ + uint32 rxinvba; /* invalid ba recd */ + uint32 rxbaholes; /* ba recd with holes */ + uint32 rxunexp; /* unexpected packets */ + uint32 rxpad[4]; /* future */ +} wlc_ba_cnt_t; +#endif /* WLBA */ + +/* structure for per-tid ampdu control */ +struct ampdu_tid_control { + uint8 tid; /* tid */ + uint8 enable; /* enable/disable */ +}; + +/* struct for ampdu tx/rx aggregation control */ +struct ampdu_aggr { + int8 aggr_override; /* aggr overrided by dongle. Not to be set by host. */ + uint16 conf_TID_bmap; /* bitmap of TIDs to configure */ + uint16 enab_TID_bmap; /* enable/disable per TID */ +}; + +/* structure for identifying ea/tid for sending addba/delba */ +struct ampdu_ea_tid { + struct ether_addr ea; /* Station address */ + uint8 tid; /* tid */ + uint8 initiator; /* 0 is recipient, 1 is originator */ +}; +/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ +struct ampdu_retry_tid { + uint8 tid; /* tid */ + uint8 retry; /* retry value */ +}; + +#define BDD_FNAME_LEN 32 /* Max length of friendly name */ +typedef struct bdd_fname { + uint8 len; /* length of friendly name */ + uchar name[BDD_FNAME_LEN]; /* friendly name */ +} bdd_fname_t; + +/* structure for addts arguments */ +/* For ioctls that take a list of TSPEC */ +struct tslist { + int count; /* number of tspecs */ + struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */ +}; + +#ifdef WLTDLS +/* structure for tdls iovars */ +typedef struct tdls_iovar { + struct ether_addr ea; /* Station address */ + uint8 mode; /* mode: depends on iovar */ + chanspec_t chanspec; + uint32 pad; /* future */ +} tdls_iovar_t; + +#define TDLS_WFD_IE_SIZE 512 +/* structure for tdls wfd ie */ +typedef struct tdls_wfd_ie_iovar { + struct ether_addr ea; /* Station address */ + uint8 mode; + uint16 length; + uint8 data[TDLS_WFD_IE_SIZE]; +} tdls_wfd_ie_iovar_t; +#endif /* WLTDLS */ + +/* structure for addts/delts arguments */ +typedef struct tspec_arg { + uint16 version; /* see definition of TSPEC_ARG_VERSION */ + uint16 length; /* length of entire structure */ + uint flag; /* bit field */ + /* TSPEC Arguments */ + struct tsinfo_arg tsinfo; /* TS Info bit field */ + uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ + uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ + uint min_srv_interval; /* Minimum Service Interval (us) */ + uint max_srv_interval; /* Maximum Service Interval (us) */ + uint inactivity_interval; /* Inactivity Interval (us) */ + uint suspension_interval; /* Suspension Interval (us) */ + uint srv_start_time; /* Service Start Time (us) */ + uint min_data_rate; /* Minimum Data Rate (bps) */ + uint mean_data_rate; /* Mean Data Rate (bps) */ + uint peak_data_rate; /* Peak Data Rate (bps) */ + uint max_burst_size; /* Maximum Burst Size (bytes) */ + uint delay_bound; /* Delay Bound (us) */ + uint min_phy_rate; /* Minimum PHY Rate (bps) */ + uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */ + uint16 medium_time; /* Medium Time (32 us/s periods) */ + uint8 dialog_token; /* dialog token */ +} tspec_arg_t; + +/* tspec arg for desired station */ +typedef struct tspec_per_sta_arg { + struct ether_addr ea; + struct tspec_arg ts; +} tspec_per_sta_arg_t; + +/* structure for max bandwidth for each access category */ +typedef struct wme_max_bandwidth { + uint32 ac[AC_COUNT]; /* max bandwidth for each access category */ +} wme_max_bandwidth_t; + +#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t)) + +/* current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */ +#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */ +#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */ +#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */ + + +#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80 +#define WLC_WOWL_MAX_KEEPALIVE 2 + +/* Packet lifetime configuration per ac */ +typedef struct wl_lifetime { + uint32 ac; /* access class */ + uint32 lifetime; /* Packet lifetime value in ms */ +} wl_lifetime_t; + + +/* Channel Switch Announcement param */ +typedef struct wl_chan_switch { + uint8 mode; /* value 0 or 1 */ + uint8 count; /* count # of beacons before switching */ + chanspec_t chspec; /* chanspec */ + uint8 reg; /* regulatory class */ + uint8 frame_type; /* csa frame type, unicast or broadcast */ +} wl_chan_switch_t; + +enum { + PFN_LIST_ORDER, + PFN_RSSI +}; + +enum { + DISABLE, + ENABLE +}; + +enum { + OFF_ADAPT, + SMART_ADAPT, + STRICT_ADAPT, + SLOW_ADAPT +}; + +#define SORT_CRITERIA_BIT 0 +#define AUTO_NET_SWITCH_BIT 1 +#define ENABLE_BKGRD_SCAN_BIT 2 +#define IMMEDIATE_SCAN_BIT 3 +#define AUTO_CONNECT_BIT 4 +#define ENABLE_BD_SCAN_BIT 5 +#define ENABLE_ADAPTSCAN_BIT 6 +#define IMMEDIATE_EVENT_BIT 8 +#define SUPPRESS_SSID_BIT 9 +#define ENABLE_NET_OFFLOAD_BIT 10 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_BIT 11 + +#define SORT_CRITERIA_MASK 0x0001 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define ENABLE_BKGRD_SCAN_MASK 0x0004 +#define IMMEDIATE_SCAN_MASK 0x0008 +#define AUTO_CONNECT_MASK 0x0010 + +#define ENABLE_BD_SCAN_MASK 0x0020 +#define ENABLE_ADAPTSCAN_MASK 0x00c0 +#define IMMEDIATE_EVENT_MASK 0x0100 +#define SUPPRESS_SSID_MASK 0x0200 +#define ENABLE_NET_OFFLOAD_MASK 0x0400 +/* report found/lost events for SSID and BSSID networks seperately */ +#define REPORT_SEPERATELY_MASK 0x0800 + +#define PFN_VERSION 2 +#define PFN_SCANRESULT_VERSION 1 +#define MAX_PFN_LIST_COUNT 16 + +#define PFN_COMPLETE 1 +#define PFN_INCOMPLETE 0 + +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 + +#define PFN_PARTIAL_SCAN_BIT 0 +#define PFN_PARTIAL_SCAN_MASK 1 +#define PFN_SWC_RSSI_WINDOW_MAX 8 +#define PFN_SWC_MAX_NUM_APS 16 +#define PFN_HOTLIST_MAX_NUM_APS 64 + +/* PFN network info structure */ +typedef struct wl_pfn_subnet_info { + struct ether_addr BSSID; + uint8 channel; /* channel number only */ + uint8 SSID_len; + uint8 SSID[32]; +} wl_pfn_subnet_info_t; + +typedef struct wl_pfn_net_info { + wl_pfn_subnet_info_t pfnsubnet; + int16 RSSI; /* receive signal strength (in dBm) */ + uint16 timestamp; /* age in seconds */ +} wl_pfn_net_info_t; + +typedef struct wl_pfn_lnet_info { + wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */ + uint16 flags; /* partial scan, etc */ + int16 RSSI; /* receive signal strength (in dBm) */ + uint32 timestamp; /* age in miliseconds */ + uint16 rtt0; /* estimated distance to this AP in centimeters */ + uint16 rtt1; /* standard deviation of the distance to this AP in centimeters */ +} wl_pfn_lnet_info_t; + +typedef struct wl_pfn_lscanresults { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_lnet_info_t netinfo[1]; +} wl_pfn_lscanresults_t; + +/* this is used to report on 1-* pfn scan results */ +typedef struct wl_pfn_scanresults { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_t netinfo[1]; +} wl_pfn_scanresults_t; + +typedef struct wl_pfn_significant_net { + uint16 flags; + uint16 channel; + struct ether_addr BSSID; + int8 rssi[PFN_SWC_RSSI_WINDOW_MAX]; +} wl_pfn_significant_net_t; + + +typedef struct wl_pfn_swc_results { + uint32 version; + uint32 pkt_count; + uint32 total_count; + wl_pfn_significant_net_t list[1]; +} wl_pfn_swc_results_t; + +/* used to report exactly one scan result */ +/* plus reports detailed scan info in bss_info */ +typedef struct wl_pfn_scanresult { + uint32 version; + uint32 status; + uint32 count; + wl_pfn_net_info_t netinfo; + wl_bss_info_t bss_info; +} wl_pfn_scanresult_t; + +/* PFN data structure */ +typedef struct wl_pfn_param { + int32 version; /* PNO parameters version */ + int32 scan_freq; /* Scan frequency */ + int32 lost_network_timeout; /* Timeout in sec. to declare + * discovered network as lost + */ + int16 flags; /* Bit field to control features + * of PFN such as sort criteria auto + * enable switch and background scan + */ + int16 rssi_margin; /* Margin to avoid jitter for choosing a + * PFN based on RSSI sort criteria + */ + uint8 bestn; /* number of best networks in each scan */ + uint8 mscan; /* number of scans recorded */ + uint8 repeat; /* Minimum number of scan intervals + *before scan frequency changes in adaptive scan + */ + uint8 exp; /* Exponent of 2 for maximum scan interval */ + int32 slow_freq; /* slow scan period */ +} wl_pfn_param_t; + +typedef struct wl_pfn_bssid { + struct ether_addr macaddr; + /* Bit4: suppress_lost, Bit3: suppress_found */ + uint16 flags; +} wl_pfn_bssid_t; + +typedef struct wl_pfn_significant_bssid { + struct ether_addr macaddr; + int8 rssi_low_threshold; + int8 rssi_high_threshold; +} wl_pfn_significant_bssid_t; +#define WL_PFN_SUPPRESSFOUND_MASK 0x08 +#define WL_PFN_SUPPRESSLOST_MASK 0x10 +#define WL_PFN_RSSI_MASK 0xff00 +#define WL_PFN_RSSI_SHIFT 8 + +typedef struct wl_pfn_cfg { + uint32 reporttype; + int32 channel_num; + uint16 channel_list[WL_NUMCHANNELS]; + uint32 flags; +} wl_pfn_cfg_t; + +#define CH_BUCKET_REPORT_REGULAR 0 +#define CH_BUCKET_REPORT_FULL_RESULT 2 +#define CH_BUCKET_GSCAN 4 + + +typedef struct wl_pfn_gscan_channel_bucket { + uint16 bucket_end_index; + uint8 bucket_freq_multiple; + uint8 report_flag; +} wl_pfn_gscan_channel_bucket_t; + +#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0) +#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7) + +typedef struct wl_pfn_gscan_cfg { + /* BIT0 1 = send probes/beacons to HOST + * BIT2 Reserved + * Add any future flags here + * BIT7 1 = no other useful cfg sent + */ + uint8 flags; + /* Buffer filled threshold in % to generate an event */ + uint8 buffer_threshold; + /* No. of BSSIDs with "change" to generate an evt + * change - crosses rssi threshold/lost + */ + uint8 swc_nbssid_threshold; + /* Max=8 (for now) Size of rssi cache buffer */ + uint8 swc_rssi_window_size; + uint16 count_of_channel_buckets; + uint16 lost_ap_window; + wl_pfn_gscan_channel_bucket_t channel_bucket[1]; +} wl_pfn_gscan_cfg_t; + + +#define WL_PFN_REPORT_ALLNET 0 +#define WL_PFN_REPORT_SSIDNET 1 +#define WL_PFN_REPORT_BSSIDNET 2 +#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */ +#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /* Remaining reserved for future use */ + +typedef struct wl_pfn { + wlc_ssid_t ssid; /* ssid name and its length */ + int32 flags; /* bit2: hidden */ + int32 infra; /* BSS Vs IBSS */ + int32 auth; /* Open Vs Closed */ + int32 wpa_auth; /* WPA type */ + int32 wsec; /* wsec value */ +} wl_pfn_t; + +typedef struct wl_pfn_list { + uint32 version; + uint32 enabled; + uint32 count; + wl_pfn_t pfn[1]; +} wl_pfn_list_t; + +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +/* To configure pfn_macaddr */ +typedef struct wl_pfn_macaddr_cfg { + uint8 version; + uint8 flags; + struct ether_addr macaddr; +} wl_pfn_macaddr_cfg_t; +#define WL_PFN_MACADDR_CFG_VER 1 +typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t { + wlc_ssid_t ssid; + uint32 cipher_type; + uint32 auth_type; + uint8 channels[4]; +} BWL_POST_PACKED_STRUCT pfn_olmsg_params; + +#define WL_PFN_HIDDEN_BIT 2 +#define WL_PFN_HIDDEN_MASK 0x4 + +#ifndef BESTN_MAX +#define BESTN_MAX 3 +#endif + +#ifndef MSCAN_MAX +#define MSCAN_MAX 90 +#endif + +/* + * WLFCTS definition + */ +typedef struct wl_txstatus_additional_info { + uint32 rspec; + uint32 enq_ts; + uint32 last_ts; + uint32 entry_ts; + uint16 seq; + uint8 rts_cnt; + uint8 tx_cnt; +} wl_txstatus_additional_info_t; + +/* Service discovery */ +typedef struct { + uint8 transaction_id; /* Transaction id */ + uint8 protocol; /* Service protocol type */ + uint16 query_len; /* Length of query */ + uint16 response_len; /* Length of response */ + uint8 qrbuf[1]; +} wl_p2po_qr_t; + +typedef struct { + uint16 period; /* extended listen period */ + uint16 interval; /* extended listen interval */ + uint16 count; /* count to repeat */ + uint16 pad; /* pad for 32bit align */ +} wl_p2po_listen_t; + +/* GAS state machine tunable parameters. Structure field values of 0 means use the default. */ +typedef struct wl_gas_config { + uint16 max_retransmit; /* Max # of firmware/driver retransmits on no Ack + * from peer (on top of the ucode retries). + */ + uint16 response_timeout; /* Max time to wait for a GAS-level response + * after sending a packet. + */ + uint16 max_comeback_delay; /* Max GAS response comeback delay. + * Exceeding this fails the GAS exchange. + */ + uint16 max_retries; /* Max # of GAS state machine retries on failure + * of a GAS frame exchange. + */ +} wl_gas_config_t; + +/* P2P Find Offload parameters */ +typedef BWL_PRE_PACKED_STRUCT struct wl_p2po_find_config { + uint16 version; /* Version of this struct */ + uint16 length; /* sizeof(wl_p2po_find_config_t) */ + int32 search_home_time; /* P2P search state home time when concurrent + * connection exists. -1 for default. + */ + uint8 num_social_channels; + /* Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX. + * 0 means use default social channels. + */ + uint8 flags; + uint16 social_channels[1]; /* Variable length array of social channels */ +} BWL_POST_PACKED_STRUCT wl_p2po_find_config_t; +#define WL_P2PO_FIND_CONFIG_VERSION 2 /* value for version field */ + +/* wl_p2po_find_config_t flags */ +#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /* Whether to scan for all APs in the p2po_find + * periodic scans of all channels. + * 0 means scan for only P2P devices. + * 1 means scan for P2P devices plus non-P2P APs. + */ + + +/* For adding a WFDS service to seek */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 seek_hdl; /* unique id chosen by host */ + uint8 addr[6]; /* Seek service from a specific device with this + * MAC address, all 1's for any device. + */ + uint8 service_hash[P2P_WFDS_HASH_LEN]; + uint8 service_name_len; + uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN]; + /* Service name to seek, not null terminated */ + uint8 service_info_req_len; + uint8 service_info_req[1]; /* Service info request, not null terminated. + * Variable length specified by service_info_req_len. + * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN. + */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_add_t; + +/* For deleting a WFDS service to seek */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 seek_hdl; /* delete service specified by id */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_del_t; + + +/* For adding a WFDS service to advertise */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 advertise_hdl; /* unique id chosen by host */ + uint8 service_hash[P2P_WFDS_HASH_LEN]; + uint32 advertisement_id; + uint16 service_config_method; + uint8 service_name_len; + uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; + /* Service name , not null terminated */ + uint8 service_status; + uint16 service_info_len; + uint8 service_info[1]; /* Service info, not null terminated. + * Variable length specified by service_info_len. + * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN. + */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t; + +/* For deleting a WFDS service to advertise */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 advertise_hdl; /* delete service specified by hdl */ +} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_del_t; + +/* P2P Offload discovery mode for the p2po_state iovar */ +typedef enum { + WL_P2PO_DISC_STOP, + WL_P2PO_DISC_LISTEN, + WL_P2PO_DISC_DISCOVERY +} disc_mode_t; + +/* ANQP offload */ + +#define ANQPO_MAX_QUERY_SIZE 256 +typedef struct { + uint16 max_retransmit; /* ~0 use default, max retransmit on no ACK from peer */ + uint16 response_timeout; /* ~0 use default, msec to wait for resp after tx packet */ + uint16 max_comeback_delay; /* ~0 use default, max comeback delay in resp else fail */ + uint16 max_retries; /* ~0 use default, max retries on failure */ + uint16 query_len; /* length of ANQP query */ + uint8 query_data[1]; /* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */ +} wl_anqpo_set_t; + +typedef struct { + uint16 channel; /* channel of the peer */ + struct ether_addr addr; /* addr of the peer */ +} wl_anqpo_peer_t; + +#define ANQPO_MAX_PEER_LIST 64 +typedef struct { + uint16 count; /* number of peers in list */ + wl_anqpo_peer_t peer[1]; /* max ANQPO_MAX_PEER_LIST */ +} wl_anqpo_peer_list_t; + +#define ANQPO_MAX_IGNORE_SSID 64 +typedef struct { + bool is_clear; /* set to clear list (not used on GET) */ + uint16 count; /* number of SSID in list */ + wlc_ssid_t ssid[1]; /* max ANQPO_MAX_IGNORE_SSID */ +} wl_anqpo_ignore_ssid_list_t; + +#define ANQPO_MAX_IGNORE_BSSID 64 +typedef struct { + bool is_clear; /* set to clear list (not used on GET) */ + uint16 count; /* number of addr in list */ + struct ether_addr bssid[1]; /* max ANQPO_MAX_IGNORE_BSSID */ +} wl_anqpo_ignore_bssid_list_t; + + +struct toe_ol_stats_t { + /* Num of tx packets that don't need to be checksummed */ + uint32 tx_summed; + + /* Num of tx packets where checksum is filled by offload engine */ + uint32 tx_iph_fill; + uint32 tx_tcp_fill; + uint32 tx_udp_fill; + uint32 tx_icmp_fill; + + /* Num of rx packets where toe finds out if checksum is good or bad */ + uint32 rx_iph_good; + uint32 rx_iph_bad; + uint32 rx_tcp_good; + uint32 rx_tcp_bad; + uint32 rx_udp_good; + uint32 rx_udp_bad; + uint32 rx_icmp_good; + uint32 rx_icmp_bad; + + /* Num of tx packets in which csum error is injected */ + uint32 tx_tcp_errinj; + uint32 tx_udp_errinj; + uint32 tx_icmp_errinj; + + /* Num of rx packets in which csum error is injected */ + uint32 rx_tcp_errinj; + uint32 rx_udp_errinj; + uint32 rx_icmp_errinj; +}; + +/* Arp offload statistic counts */ +struct arp_ol_stats_t { + uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ + uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ + + uint32 arp_table_entries; /* ARP table entries */ + uint32 arp_table_overflow; /* ARP table additions skipped due to overflow */ + + uint32 host_request; /* ARP requests from host */ + uint32 host_reply; /* ARP replies from host */ + uint32 host_service; /* ARP requests from host serviced by ARP Agent */ + + uint32 peer_request; /* ARP requests received from network */ + uint32 peer_request_drop; /* ARP requests from network that were dropped */ + uint32 peer_reply; /* ARP replies received from network */ + uint32 peer_reply_drop; /* ARP replies from network that were dropped */ + uint32 peer_service; /* ARP request from host serviced by ARP Agent */ +}; + +/* NS offload statistic counts */ +struct nd_ol_stats_t { + uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ + uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ + uint32 peer_request; /* NS requests received from network */ + uint32 peer_request_drop; /* NS requests from network that were dropped */ + uint32 peer_reply_drop; /* NA replies from network that were dropped */ + uint32 peer_service; /* NS request from host serviced by firmware */ +}; + +/* + * Keep-alive packet offloading. + */ + +/* NAT keep-alive packets format: specifies the re-transmission period, the packet + * length, and packet contents. + */ +typedef struct wl_keep_alive_pkt { + uint32 period_msec; /* Retransmission period (0 to disable packet re-transmits) */ + uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */ + uint8 data[1]; /* Variable length packet to transmit. Contents should include + * entire ethernet packet (enet header, IP header, UDP header, + * and UDP payload) in network byte order. + */ +} wl_keep_alive_pkt_t; + +#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data) + + +/* + * Dongle pattern matching filter. + */ + +#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */ + +#define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \ + DOT11_QOS_LEN + \ + sizeof(struct dot11_llc_snap_header) + \ + ETHER_MAX_DATA) + +typedef struct pm_wake_packet { + uint32 status; /* Is the wake reason a packet (if all the other field's valid) */ + uint32 pattern_id; /* Pattern ID that matched */ + uint32 original_packet_size; + uint32 saved_packet_size; + uchar packet[MAX_WAKE_PACKET_CACHE_BYTES]; +} pm_wake_packet_t; + +/* Packet filter types. Currently, only pattern matching is supported. */ +typedef enum wl_pkt_filter_type { + WL_PKT_FILTER_TYPE_PATTERN_MATCH=0, /* Pattern matching filter */ + WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */ + WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /* A pattern list (match all to match filter) */ + WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /* SECURE WOWL magic / net pattern match */ +} wl_pkt_filter_type_t; + +#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t + +/* String mapping for types that may be used by applications or debug */ +#define WL_PKT_FILTER_TYPE_NAMES \ + { "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH }, \ + { "MAGIC", WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \ + { "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH } + +/* Secured WOWL packet was encrypted, need decrypted before check filter match */ +typedef struct wl_pkt_decrypter { + uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending); + void* dec_ctx; +} wl_pkt_decrypter_t; + +/* Pattern matching filter. Specifies an offset within received packets to + * start matching, the pattern to match, the size of the pattern, and a bitmask + * that indicates which bits within the pattern should be matched. + */ +typedef struct wl_pkt_filter_pattern { + uint32 offset; /* Offset within received packet to start pattern matching. + * Offset '0' is the first byte of the ethernet header. + */ + uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */ + uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts + * at offset 0. Pattern immediately follows mask. for + * secured pattern, put the descrypter pointer to the + * beginning, mask and pattern postponed correspondingly + */ +} wl_pkt_filter_pattern_t; + +/* A pattern list is a numerically specified list of modified pattern structures. */ +typedef struct wl_pkt_filter_pattern_listel { + uint16 rel_offs; /* Offset to begin match (relative to 'base' below) */ + uint16 base_offs; /* Base for offset (defined below) */ + uint16 size_bytes; /* Size of mask/pattern */ + uint16 match_flags; /* Addition flags controlling the match */ + uint8 mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */ +} wl_pkt_filter_pattern_listel_t; + +typedef struct wl_pkt_filter_pattern_list { + uint8 list_cnt; /* Number of elements in the list */ + uint8 PAD1[1]; /* Reserved (possible version: reserved) */ + uint16 totsize; /* Total size of this pattern list (includes this struct) */ + wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */ +} wl_pkt_filter_pattern_list_t; + +/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */ +typedef struct wl_pkt_filter { + uint32 id; /* Unique filter id, specified by app. */ + uint32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */ + uint32 negate_match; /* Negate the result of filter matches */ + union { /* Filter definitions */ + wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */ + wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */ + } u; +} wl_pkt_filter_t; + +/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */ +typedef struct wl_tcp_keep_set { + uint32 val1; + uint32 val2; +} wl_tcp_keep_set_t; + +#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u) +#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern) +#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns) +#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN \ + OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data) + +/* IOVAR "pkt_filter_enable" parameter. */ +typedef struct wl_pkt_filter_enable { + uint32 id; /* Unique filter id */ + uint32 enable; /* Enable/disable bool */ +} wl_pkt_filter_enable_t; + +/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */ +typedef struct wl_pkt_filter_list { + uint32 num; /* Number of installed packet filters */ + wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */ +} wl_pkt_filter_list_t; + +#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) + +/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */ +typedef struct wl_pkt_filter_stats { + uint32 num_pkts_matched; /* # filter matches for specified filter id */ + uint32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */ + uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */ +} wl_pkt_filter_stats_t; + +/* IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */ +typedef struct wl_pkt_filter_ports { + uint8 version; /* Be proper */ + uint8 reserved; /* Be really proper */ + uint16 count; /* Number of ports following */ + /* End of fixed data */ + uint16 ports[1]; /* Placeholder for ports[] */ +} wl_pkt_filter_ports_t; + +#define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports) + +#define WL_PKT_FILTER_PORTS_VERSION 0 +#define WL_PKT_FILTER_PORTS_MAX 128 + +#define RSN_REPLAY_LEN 8 +typedef struct _gtkrefresh { + uchar KCK[RSN_KCK_LENGTH]; + uchar KEK[RSN_KEK_LENGTH]; + uchar ReplayCounter[RSN_REPLAY_LEN]; +} gtk_keyinfo_t, *pgtk_keyinfo_t; + +/* Sequential Commands ioctl */ +typedef struct wl_seq_cmd_ioctl { + uint32 cmd; /* common ioctl definition */ + uint32 len; /* length of user buffer */ +} wl_seq_cmd_ioctl_t; + +#define WL_SEQ_CMD_ALIGN_BYTES 4 + +/* These are the set of get IOCTLs that should be allowed when using + * IOCTL sequence commands. These are issued implicitly by wl.exe each time + * it is invoked. We never want to buffer these, or else wl.exe will stop working. + */ +#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \ + (((cmd) == WLC_GET_MAGIC) || \ + ((cmd) == WLC_GET_VERSION) || \ + ((cmd) == WLC_GET_AP) || \ + ((cmd) == WLC_GET_INSTANCE)) + +typedef struct wl_pkteng { + uint32 flags; + uint32 delay; /* Inter-packet delay */ + uint32 nframes; /* Number of frames */ + uint32 length; /* Packet length */ + uint8 seqno; /* Enable/disable sequence no. */ + struct ether_addr dest; /* Destination address */ + struct ether_addr src; /* Source address */ +} wl_pkteng_t; + +typedef struct wl_pkteng_stats { + uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */ + int32 rssi; /* RSSI */ + int32 snr; /* signal to noise ratio */ + uint16 rxpktcnt[NUM_80211_RATES+1]; + uint8 rssi_qdb; /* qdB portion of the computed rssi */ +} wl_pkteng_stats_t; + +typedef struct wl_txcal_params { + wl_pkteng_t pkteng; + uint8 gidx_start; + int8 gidx_step; + uint8 gidx_stop; +} wl_txcal_params_t; + + +typedef enum { + wowl_pattern_type_bitmap = 0, + wowl_pattern_type_arp, + wowl_pattern_type_na +} wowl_pattern_type_t; + +typedef struct wl_wowl_pattern { + uint32 masksize; /* Size of the mask in #of bytes */ + uint32 offset; /* Pattern byte offset in packet */ + uint32 patternoffset; /* Offset of start of pattern in the structure */ + uint32 patternsize; /* Size of the pattern itself in #of bytes */ + uint32 id; /* id */ + uint32 reasonsize; /* Size of the wakeup reason code */ + wowl_pattern_type_t type; /* Type of pattern */ + /* Mask follows the structure above */ + /* Pattern follows the mask is at 'patternoffset' from the start */ +} wl_wowl_pattern_t; + +typedef struct wl_wowl_pattern_list { + uint count; + wl_wowl_pattern_t pattern[1]; +} wl_wowl_pattern_list_t; + +typedef struct wl_wowl_wakeind { + uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */ + uint32 ucode_wakeind; /* What wakeup-event indication was set by ucode */ +} wl_wowl_wakeind_t; + +typedef struct { + uint32 pktlen; /* size of packet */ + void *sdu; +} tcp_keepalive_wake_pkt_infop_t; + +/* per AC rate control related data structure */ +typedef struct wl_txrate_class { + uint8 init_rate; + uint8 min_rate; + uint8 max_rate; +} wl_txrate_class_t; + +/* structure for Overlap BSS scan arguments */ +typedef struct wl_obss_scan_arg { + int16 passive_dwell; + int16 active_dwell; + int16 bss_widthscan_interval; + int16 passive_total; + int16 active_total; + int16 chanwidth_transition_delay; + int16 activity_threshold; +} wl_obss_scan_arg_t; + +#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) + +/* RSSI event notification configuration. */ +typedef struct wl_rssi_event { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint8 num_rssi_levels; /* Number of entries in rssi_levels[] below */ + int8 rssi_levels[MAX_RSSI_LEVELS]; /* Variable number of RSSI levels. An event + * will be posted each time the RSSI of received + * beacons/packets crosses a level. + */ +} wl_rssi_event_t; + +/* CCA based channel quality event configuration */ +#define WL_CHAN_QUAL_CCA 0 +#define WL_CHAN_QUAL_NF 1 +#define WL_CHAN_QUAL_NF_LTE 2 +#define WL_CHAN_QUAL_TOTAL 3 + +#define MAX_CHAN_QUAL_LEVELS 8 + +typedef struct wl_chan_qual_metric { + uint8 id; /* metric ID */ + uint8 num_levels; /* Number of entries in rssi_levels[] below */ + uint16 flags; + int16 htol[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: hi-to-lo */ + int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: lo-to-hi */ +} wl_chan_qual_metric_t; + +typedef struct wl_chan_qual_event { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint16 flags; + uint16 num_metrics; + wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /* metric array */ +} wl_chan_qual_event_t; + +typedef struct wl_action_obss_coex_req { + uint8 info; + uint8 num; + uint8 ch_list[1]; +} wl_action_obss_coex_req_t; + + +/* IOVar parameter block for small MAC address array with type indicator */ +#define WL_IOV_MAC_PARAM_LEN 4 + +#define WL_IOV_PKTQ_LOG_PRECS 16 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 num_addrs; + char addr_type[WL_IOV_MAC_PARAM_LEN]; + struct ether_addr ea[WL_IOV_MAC_PARAM_LEN]; +} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t; + +/* This is extra info that follows wl_iov_mac_params_t */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 addr_info[WL_IOV_MAC_PARAM_LEN]; +} BWL_POST_PACKED_STRUCT wl_iov_mac_extra_params_t; + +/* Combined structure */ +typedef struct { + wl_iov_mac_params_t params; + wl_iov_mac_extra_params_t extra_params; +} wl_iov_mac_full_params_t; + +/* Parameter block for PKTQ_LOG statistics */ +#define PKTQ_LOG_COUNTERS_V4 \ + /* packets requested to be stored */ \ + uint32 requested; \ + /* packets stored */ \ + uint32 stored; \ + /* packets saved, because a lowest priority queue has given away one packet */ \ + uint32 saved; \ + /* packets saved, because an older packet from the same queue has been dropped */ \ + uint32 selfsaved; \ + /* packets dropped, because pktq is full with higher precedence packets */ \ + uint32 full_dropped; \ + /* packets dropped because pktq per that precedence is full */ \ + uint32 dropped; \ + /* packets dropped, in order to save one from a queue of a highest priority */ \ + uint32 sacrificed; \ + /* packets droped because of hardware/transmission error */ \ + uint32 busy; \ + /* packets re-sent because they were not received */ \ + uint32 retry; \ + /* packets retried again (ps pretend) prior to moving power save mode */ \ + uint32 ps_retry; \ + /* suppressed packet count */ \ + uint32 suppress; \ + /* packets finally dropped after retry limit */ \ + uint32 retry_drop; \ + /* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \ + uint32 max_avail; \ + /* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \ + uint32 max_used; \ + /* the maximum capacity of the queue */ \ + uint32 queue_capacity; \ + /* count of rts attempts that failed to receive cts */ \ + uint32 rtsfail; \ + /* count of packets sent (acked) successfully */ \ + uint32 acked; \ + /* running total of phy rate of packets sent successfully */ \ + uint32 txrate_succ; \ + /* running total of phy 'main' rate */ \ + uint32 txrate_main; \ + /* actual data transferred successfully */ \ + uint32 throughput; \ + /* time difference since last pktq_stats */ \ + uint32 time_delta; + +typedef struct { + PKTQ_LOG_COUNTERS_V4 +} pktq_log_counters_v04_t; + +/* v5 is the same as V4 with extra parameter */ +typedef struct { + PKTQ_LOG_COUNTERS_V4 + /* cumulative time to transmit */ + uint32 airtime; +} pktq_log_counters_v05_t; + +typedef struct { + uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; + pktq_log_counters_v04_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; + uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; + uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; + char headings[1]; +} pktq_log_format_v04_t; + +typedef struct { + uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; + pktq_log_counters_v05_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; + uint32 counter_info[WL_IOV_MAC_PARAM_LEN]; + uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN]; + char headings[1]; +} pktq_log_format_v05_t; + + +typedef struct { + uint32 version; + wl_iov_mac_params_t params; + union { + pktq_log_format_v04_t v04; + pktq_log_format_v05_t v05; + } pktq_log; +} wl_iov_pktq_log_t; + +/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */ +#define PKTQ_LOG_AUTO (1 << 31) +#define PKTQ_LOG_DEF_PREC (1 << 30) + + +#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0 + +#define WL_PFN_MAC_OUI_ONLY_MASK 1 +#define WL_PFN_SET_MAC_UNASSOC_MASK 2 +#define WL_PFN_RESTRICT_LA_MAC_MASK 4 +#define WL_PFN_MACADDR_FLAG_MASK 0x7 + + +/* + * SCB_BS_DATA iovar definitions start. + */ +#define SCB_BS_DATA_STRUCT_VERSION 1 + +/* The actual counters maintained for each station */ +typedef BWL_PRE_PACKED_STRUCT struct { + /* The following counters are a subset of what pktq_stats provides per precedence. */ + uint32 retry; /* packets re-sent because they were not received */ + uint32 retry_drop; /* packets finally dropped after retry limit */ + uint32 rtsfail; /* count of rts attempts that failed to receive cts */ + uint32 acked; /* count of packets sent (acked) successfully */ + uint32 txrate_succ; /* running total of phy rate of packets sent successfully */ + uint32 txrate_main; /* running total of phy 'main' rate */ + uint32 throughput; /* actual data transferred successfully */ + uint32 time_delta; /* time difference since last pktq_stats */ + uint32 airtime; /* cumulative total medium access delay in useconds */ +} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t; + +/* The structure for individual station information. */ +typedef BWL_PRE_PACKED_STRUCT struct { + struct ether_addr station_address; /* The station MAC address */ + uint16 station_flags; /* Bit mask of flags, for future use. */ + iov_bs_data_counters_t station_counters; /* The actual counter values */ +} BWL_POST_PACKED_STRUCT iov_bs_data_record_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 structure_version; /* Structure version number (for wl/wlu matching) */ + uint16 structure_count; /* Number of iov_bs_data_record_t records following */ + iov_bs_data_record_t structure_record[1]; /* 0 - structure_count records */ +} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t; + +/* Bitmask of options that can be passed in to the iovar. */ +enum { + SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /* Do not clear the counters after reading */ +}; +/* + * SCB_BS_DATA iovar definitions end. + */ + +typedef struct wlc_extlog_cfg { + int max_number; + uint16 module; /* bitmap */ + uint8 level; + uint8 flag; + uint16 version; +} wlc_extlog_cfg_t; + +typedef struct log_record { + uint32 time; + uint16 module; + uint16 id; + uint8 level; + uint8 sub_unit; + uint8 seq_num; + int32 arg; + char str[MAX_ARGSTR_LEN]; +} log_record_t; + +typedef struct wlc_extlog_req { + uint32 from_last; + uint32 num; +} wlc_extlog_req_t; + +typedef struct wlc_extlog_results { + uint16 version; + uint16 record_len; + uint32 num; + log_record_t logs[1]; +} wlc_extlog_results_t; + +typedef struct log_idstr { + uint16 id; + uint16 flag; + uint8 arg_type; + const char *fmt_str; +} log_idstr_t; + +#define FMTSTRF_USER 1 + +/* flat ID definitions + * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will + * affect backward compatibility with pre-existing apps + */ +typedef enum { + FMTSTR_DRIVER_UP_ID = 0, + FMTSTR_DRIVER_DOWN_ID = 1, + FMTSTR_SUSPEND_MAC_FAIL_ID = 2, + FMTSTR_NO_PROGRESS_ID = 3, + FMTSTR_RFDISABLE_ID = 4, + FMTSTR_REG_PRINT_ID = 5, + FMTSTR_EXPTIME_ID = 6, + FMTSTR_JOIN_START_ID = 7, + FMTSTR_JOIN_COMPLETE_ID = 8, + FMTSTR_NO_NETWORKS_ID = 9, + FMTSTR_SECURITY_MISMATCH_ID = 10, + FMTSTR_RATE_MISMATCH_ID = 11, + FMTSTR_AP_PRUNED_ID = 12, + FMTSTR_KEY_INSERTED_ID = 13, + FMTSTR_DEAUTH_ID = 14, + FMTSTR_DISASSOC_ID = 15, + FMTSTR_LINK_UP_ID = 16, + FMTSTR_LINK_DOWN_ID = 17, + FMTSTR_RADIO_HW_OFF_ID = 18, + FMTSTR_RADIO_HW_ON_ID = 19, + FMTSTR_EVENT_DESC_ID = 20, + FMTSTR_PNP_SET_POWER_ID = 21, + FMTSTR_RADIO_SW_OFF_ID = 22, + FMTSTR_RADIO_SW_ON_ID = 23, + FMTSTR_PWD_MISMATCH_ID = 24, + FMTSTR_FATAL_ERROR_ID = 25, + FMTSTR_AUTH_FAIL_ID = 26, + FMTSTR_ASSOC_FAIL_ID = 27, + FMTSTR_IBSS_FAIL_ID = 28, + FMTSTR_EXTAP_FAIL_ID = 29, + FMTSTR_MAX_ID +} log_fmtstr_id_t; + +#ifdef DONGLEOVERLAYS +typedef struct { + uint32 flags_idx; /* lower 8 bits: overlay index; upper 24 bits: flags */ + uint32 offset; /* offset into overlay region to write code */ + uint32 len; /* overlay code len */ + /* overlay code follows this struct */ +} wl_ioctl_overlay_t; +#endif /* DONGLEOVERLAYS */ + +/* 11k Neighbor Report element (unversioned, deprecated) */ +typedef struct nbr_element { + uint8 id; + uint8 len; + struct ether_addr bssid; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 pad; +} nbr_element_t; + +#define WL_RRM_NBR_RPT_VER 1 +/* 11k Neighbor Report element */ +typedef struct nbr_rpt_elem { + uint8 version; + uint8 id; + uint8 len; + uint8 pad; + struct ether_addr bssid; + uint8 pad_1[2]; + uint32 bssid_info; + uint8 reg; + uint8 channel; + uint8 phytype; + uint8 pad_2; + wlc_ssid_t ssid; + uint8 bss_trans_preference; + uint8 pad_3[3]; +} nbr_rpt_elem_t; + +typedef enum event_msgs_ext_command { + EVENTMSGS_NONE = 0, + EVENTMSGS_SET_BIT = 1, + EVENTMSGS_RESET_BIT = 2, + EVENTMSGS_SET_MASK = 3 +} event_msgs_ext_command_t; + +#define EVENTMSGS_VER 1 +#define EVENTMSGS_EXT_STRUCT_SIZE OFFSETOF(eventmsgs_ext_t, mask[0]) + +/* len- for SET it would be mask size from the application to the firmware */ +/* for GET it would be actual firmware mask size */ +/* maxgetsize - is only used for GET. indicate max mask size that the */ +/* application can read from the firmware */ +typedef struct eventmsgs_ext +{ + uint8 ver; + uint8 command; + uint8 len; + uint8 maxgetsize; + uint8 mask[1]; +} eventmsgs_ext_t; + +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params { + /* no of host dma descriptors programmed by the firmware before a commit */ + uint16 max_dma_descriptors; + + uint16 host_buf_len; /* length of host buffer */ + dmaaddr_t host_buf_addr; /* physical address for bus_throughput_buf */ +} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t; +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats { + uint16 time_taken; /* no of secs the test is run */ + uint16 nbytes_per_descriptor; /* no of bytes of data dma ed per descriptor */ + + /* no of desciptors fo which dma is sucessfully completed within the test time */ + uint32 count; +} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t; + +#define MAX_ROAMOFFL_BSSID_NUM 100 + +typedef BWL_PRE_PACKED_STRUCT struct roamoffl_bssid_list { + int32 cnt; + struct ether_addr bssid[1]; +} BWL_POST_PACKED_STRUCT roamoffl_bssid_list_t; + +/* no default structure packing */ +#include + +typedef struct keepalives_max_idle { + uint16 keepalive_count; /* nmbr of keepalives per bss_max_idle period */ + uint8 mkeepalive_index; /* mkeepalive_index for keepalive frame to be used */ + uint8 PAD; /* to align next field */ + uint16 max_interval; /* seconds */ +} keepalives_max_idle_t; + +#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0) +#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1) + +/* require strict packing */ +#include + +/* ##### Power Stats section ##### */ + +#define WL_PWRSTATS_VERSION 2 + +/* Input structure for pwrstats IOVAR */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats_query { + uint16 length; /* Number of entries in type array. */ + uint16 type[1]; /* Types (tags) to retrieve. + * Length 0 (no types) means get all. + */ +} BWL_POST_PACKED_STRUCT wl_pwrstats_query_t; + +/* This structure is for version 2; version 1 will be deprecated in by FW */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats { + uint16 version; /* Version = 2 is TLV format */ + uint16 length; /* Length of entire structure */ + uint8 data[1]; /* TLV data, a series of structures, + * each starting with type and length. + * + * Padded as necessary so each section + * starts on a 4-byte boundary. + * + * Both type and len are uint16, but the + * upper nibble of length is reserved so + * valid len values are 0-4095. + */ +} BWL_POST_PACKED_STRUCT wl_pwrstats_t; +#define WL_PWR_STATS_HDRLEN OFFSETOF(wl_pwrstats_t, data) + +/* Type values for the data section */ +#define WL_PWRSTATS_TYPE_PHY 0 /* struct wl_pwr_phy_stats */ +#define WL_PWRSTATS_TYPE_SCAN 1 /* struct wl_pwr_scan_stats */ +#define WL_PWRSTATS_TYPE_USB_HSIC 2 /* struct wl_pwr_usb_hsic_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /* struct wl_pwr_pm_awake_stats_v1 */ +#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */ +#define WL_PWRSTATS_TYPE_PCIE 6 /* struct wl_pwr_pcie_stats */ +#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /* struct wl_pwr_pm_awake_stats_v2 */ + +/* Bits for wake reasons */ +#define WLC_PMD_WAKE_SET 0x1 +#define WLC_PMD_PM_AWAKE_BCN 0x2 +#define WLC_PMD_BTA_ACTIVE 0x4 +#define WLC_PMD_SCAN_IN_PROGRESS 0x8 +#define WLC_PMD_RM_IN_PROGRESS 0x10 +#define WLC_PMD_AS_IN_PROGRESS 0x20 +#define WLC_PMD_PM_PEND 0x40 +#define WLC_PMD_PS_POLL 0x80 +#define WLC_PMD_CHK_UNALIGN_TBTT 0x100 +#define WLC_PMD_APSD_STA_UP 0x200 +#define WLC_PMD_TX_PEND_WAR 0x400 +#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800 +#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000 +#define WLC_PMD_NON_PRIM_STA_UP 0x4000 +#define WLC_PMD_AP_UP 0x8000 + +typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug { + uint32 timestamp; /* timestamp in millisecond */ + uint32 reason; /* reason(s) for staying awake */ +} BWL_POST_PACKED_STRUCT wlc_pm_debug_t; + +/* WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */ +#define WLC_STA_AWAKE_STATES_MAX_V1 30 +#define WLC_PMD_EVENT_MAX_V1 32 +/* Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */ +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 { + uint32 curr_time; /* ms */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + + /* Wake history tracking */ + uint8 pmwake_idx; /* for stepping through pm_state */ + wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /* timestamped wake bits */ + uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /* cumulative usecs per wake reason */ + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 { + uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v1_t awake_data; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t; + +/* WL_PWRSTATS_TYPE_PM_AWAKE2 structures */ +/* Data sent as part of pwrstats IOVAR */ +typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v2 { + uint32 curr_time; /* ms */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + + /* int32 drifts = remote - local; +ve drift => local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + + /* Wake history tracking */ + + /* pmstate array (type wlc_pm_debug_t) start offset */ + uint16 pm_state_offset; + /* pmstate number of array entries */ + uint16 pm_state_len; + + /* array (type uint32) start offset */ + uint16 pmd_event_wake_dur_offset; + /* pmd_event_wake_dur number of array entries */ + uint16 pmd_event_wake_dur_len; + + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ + uint8 pmwake_idx; /* for stepping through pm_state */ + uint8 pad[3]; + uint32 frts_time; /* Cumulative ms spent in frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ +} BWL_POST_PACKED_STRUCT pm_awake_data_v2_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v2 { + uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + pm_awake_data_v2_t awake_data; +} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v2_t; + +/* Original bus structure is for HSIC */ +typedef BWL_PRE_PACKED_STRUCT struct bus_metrics { + uint32 suspend_ct; /* suspend count */ + uint32 resume_ct; /* resume count */ + uint32 disconnect_ct; /* disconnect count */ + uint32 reconnect_ct; /* reconnect count */ + uint32 active_dur; /* msecs in bus, usecs for user */ + uint32 suspend_dur; /* msecs in bus, usecs for user */ + uint32 disconnect_dur; /* msecs in bus, usecs for user */ +} BWL_POST_PACKED_STRUCT bus_metrics_t; + +/* Bus interface info for USB/HSIC */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats { + uint16 type; /* WL_PWRSTATS_TYPE_USB_HSIC */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + bus_metrics_t hsic; /* stats from hsic bus driver */ +} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t; + +typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics { + uint32 d3_suspend_ct; /* suspend count */ + uint32 d0_resume_ct; /* resume count */ + uint32 perst_assrt_ct; /* PERST# assert count */ + uint32 perst_deassrt_ct; /* PERST# de-assert count */ + uint32 active_dur; /* msecs */ + uint32 d3_suspend_dur; /* msecs */ + uint32 perst_dur; /* msecs */ + uint32 l0_cnt; /* L0 entry count */ + uint32 l0_usecs; /* L0 duration in usecs */ + uint32 l1_cnt; /* L1 entry count */ + uint32 l1_usecs; /* L1 duration in usecs */ + uint32 l1_1_cnt; /* L1_1ss entry count */ + uint32 l1_1_usecs; /* L1_1ss duration in usecs */ + uint32 l1_2_cnt; /* L1_2ss entry count */ + uint32 l1_2_usecs; /* L1_2ss duration in usecs */ + uint32 l2_cnt; /* L2 entry count */ + uint32 l2_usecs; /* L2 duration in usecs */ + uint32 timestamp; /* Timestamp on when stats are collected */ + uint32 num_h2d_doorbell; /* # of doorbell interrupts - h2d */ + uint32 num_d2h_doorbell; /* # of doorbell interrupts - d2h */ + uint32 num_submissions; /* # of submissions */ + uint32 num_completions; /* # of completions */ + uint32 num_rxcmplt; /* # of rx completions */ + uint32 num_rxcmplt_drbl; /* of drbl interrupts for rx complt. */ + uint32 num_txstatus; /* # of tx completions */ + uint32 num_txstatus_drbl; /* of drbl interrupts for tx complt. */ + uint32 ltr_active_ct; /* # of times chip went to LTR ACTIVE */ + uint32 ltr_active_dur; /* # of msecs chip was in LTR ACTIVE */ + uint32 ltr_sleep_ct; /* # of times chip went to LTR SLEEP */ + uint32 ltr_sleep_dur; /* # of msecs chip was in LTR SLEEP */ + uint32 deepsleep_count; /* # of times chip went to deepsleep */ + uint32 deepsleep_dur; /* # of msecs chip was in deepsleep */ +} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t; + +/* Bus interface info for PCIE */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pcie_stats { + uint16 type; /* WL_PWRSTATS_TYPE_PCIE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + pcie_bus_metrics_t pcie; /* stats from pcie bus driver */ +} BWL_POST_PACKED_STRUCT wl_pwr_pcie_stats_t; + +/* Scan information history per category */ +typedef BWL_PRE_PACKED_STRUCT struct scan_data { + uint32 count; /* Number of scans performed */ + uint32 dur; /* Total time (in us) used */ +} BWL_POST_PACKED_STRUCT scan_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_scan_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + /* Scan history */ + scan_data_t user_scans; /* User-requested scans: (i/e/p)scan */ + scan_data_t assoc_scans; /* Scans initiated by association requests */ + scan_data_t roam_scans; /* Scans initiated by the roam engine */ + scan_data_t pno_scans[8]; /* For future PNO bucketing (BSSID, SSID, etc) */ + scan_data_t other_scans; /* Scan engine usage not assigned to the above */ +} BWL_POST_PACKED_STRUCT wl_pwr_scan_stats_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_connect_stats { + uint16 type; /* WL_PWRSTATS_TYPE_SCAN */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + /* Connection (Association + Key exchange) data */ + uint32 count; /* Number of connections performed */ + uint32 dur; /* Total time (in ms) used */ +} BWL_POST_PACKED_STRUCT wl_pwr_connect_stats_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_phy_stats { + uint16 type; /* WL_PWRSTATS_TYPE_PHY */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 tx_dur; /* TX Active duration in us */ + uint32 rx_dur; /* RX Active duration in us */ +} BWL_POST_PACKED_STRUCT wl_pwr_phy_stats_t; + + +/* ##### End of Power Stats section ##### */ + +/* IPV4 Arp offloads for ndis context */ +BWL_PRE_PACKED_STRUCT struct hostip_id { + struct ipv4_addr ipa; + uint8 id; +} BWL_POST_PACKED_STRUCT; + +/* Return values */ +#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */ +#define ND_REQ_SINK 0x2 /* Input packet should be discarded */ +#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */ + +/* Neighbor Solicitation Response Offload IOVAR param */ +typedef BWL_PRE_PACKED_STRUCT struct nd_param { + struct ipv6_addr host_ip[2]; + struct ipv6_addr solicit_ip; + struct ipv6_addr remote_ip; + uint8 host_mac[ETHER_ADDR_LEN]; + uint32 offload_id; +} BWL_POST_PACKED_STRUCT nd_param_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh { + uint32 pfn_alert_thresh; /* time in ms */ + uint32 roam_alert_thresh; /* time in ms */ +} BWL_POST_PACKED_STRUCT wl_pfn_roam_thresh_t; + + +/* Reasons for wl_pmalert_t */ +#define PM_DUR_EXCEEDED (1<<0) +#define MPC_DUR_EXCEEDED (1<<1) +#define ROAM_ALERT_THRESH_EXCEEDED (1<<2) +#define PFN_ALERT_THRESH_EXCEEDED (1<<3) +#define CONST_AWAKE_DUR_ALERT (1<<4) +#define CONST_AWAKE_DUR_RECOVERY (1<<5) + +#define MIN_PM_ALERT_LEN 9 + +/* Data sent in EXCESS_PM_WAKE event */ +#define WL_PM_ALERT_VERSION 3 + +#define MAX_P2P_BSS_DTIM_PRD 4 + +/* This structure is for version 3; version 2 will be deprecated in by FW */ +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert { + uint16 version; /* Version = 3 is TLV format */ + uint16 length; /* Length of entire structure */ + uint32 reasons; /* reason(s) for pm_alert */ + uint8 data[1]; /* TLV data, a series of structures, + * each starting with type and length. + * + * Padded as necessary so each section + * starts on a 4-byte boundary. + * + * Both type and len are uint16, but the + * upper nibble of length is reserved so + * valid len values are 0-4095. + */ +} BWL_POST_PACKED_STRUCT wl_pmalert_t; + +/* Type values for the data section */ +#define WL_PMALERT_FIXED 0 /* struct wl_pmalert_fixed_t, fixed fields */ +#define WL_PMALERT_PMSTATE 1 /* struct wl_pmalert_pmstate_t, variable */ +#define WL_PMALERT_EVENT_DUR 2 /* struct wl_pmalert_event_dur_t, variable */ +#define WL_PMALERT_UCODE_DBG 3 /* struct wl_pmalert_ucode_dbg_t, variable */ +#define WL_PMALERT_PS_ALLOWED_HIST 4 /* struct wl_pmalert_ps_allowed_history, variable */ +#define WL_PMALERT_EXT_UCODE_DBG 5 /* struct wl_pmalert_ext_ucode_dbg_t, variable */ +#define WL_PMALERT_EPM_START_EVENT_DUR 6 /* struct wl_pmalert_event_dur_t, variable */ + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed { + uint16 type; /* WL_PMALERT_FIXED */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 prev_stats_time; /* msecs */ + uint32 curr_time; /* ms */ + uint32 prev_pm_dur; /* msecs */ + uint32 pm_dur; /* Total sleep time in PM, msecs */ + uint32 prev_mpc_dur; /* msecs */ + uint32 mpc_dur; /* Total sleep time in MPC, msecs */ + uint32 hw_macc; /* HW maccontrol */ + uint32 sw_macc; /* SW maccontrol */ + + /* int32 drifts = remote - local; +ve drift -> local-clk slow */ + int32 last_drift; /* Most recent TSF drift from beacon */ + int32 min_drift; /* Min TSF drift from beacon in magnitude */ + int32 max_drift; /* Max TSF drift from beacon in magnitude */ + + uint32 avg_drift; /* Avg TSF drift from beacon */ + uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */ + uint32 frts_time; /* Cumulative ms spent in data frts since driver load */ + uint32 frts_end_cnt; /* No of times frts ended since driver load */ + uint32 prev_frts_dur; /* Data frts duration at start of pm-period */ + uint32 cal_dur; /* Cumulative ms spent in calibration */ + uint32 prev_cal_dur; /* cal duration at start of pm-period */ +} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate { + uint16 type; /* WL_PMALERT_PMSTATE */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + uint8 pmwake_idx; /* for stepping through pm_state */ + uint8 pad[3]; + /* Array of pmstate; len of array is based on tlv len */ + wlc_pm_debug_t pmstate[1]; +} BWL_POST_PACKED_STRUCT wl_pmalert_pmstate_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_event_dur { + uint16 type; /* WL_PMALERT_EVENT_DUR */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + + /* Array of event_dur, len of array is based on tlv len */ + uint32 event_dur[1]; +} BWL_POST_PACKED_STRUCT wl_pmalert_event_dur_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg { + uint16 type; /* WL_PMALERT_UCODE_DBG */ + uint16 len; /* Up to 4K-1, top 4 bits are reserved */ + uint32 macctrl; + uint16 m_p2p_hps; + uint32 psm_brc; + uint32 ifsstat; + uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD]; + uint32 psmdebug[20]; + uint32 phydebug[20]; +} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t; + + +/* Structures and constants used for "vndr_ie" IOVar interface */ +#define VNDR_IE_CMD_LEN 4 /* length of the set command string: + * "add", "del" (+ NUL) + */ + +#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + vndr_ie_t vndr_ie_data; /* vendor IE data */ +} BWL_POST_PACKED_STRUCT vndr_ie_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + int iecount; /* number of entries in the vndr_ie_list[] array */ + vndr_ie_info_t vndr_ie_list[1]; /* variable size list of vndr_ie_info_t structs */ +} BWL_POST_PACKED_STRUCT vndr_ie_buf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; /* vndr_ie IOVar set command : "add", "del" + NUL */ + vndr_ie_buf_t vndr_ie_buffer; /* buffer containing Vendor IE list information */ +} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t; + +/* tag_ID/length/value_buffer tuple */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint8 id; + uint8 len; + uint8 data[1]; +} BWL_POST_PACKED_STRUCT tlv_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + tlv_t ie_data; /* IE data */ +} BWL_POST_PACKED_STRUCT ie_info_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + int iecount; /* number of entries in the ie_list[] array */ + ie_info_t ie_list[1]; /* variable size list of ie_info_t structs */ +} BWL_POST_PACKED_STRUCT ie_buf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */ + ie_buf_t ie_buffer; /* buffer containing IE list information */ +} BWL_POST_PACKED_STRUCT ie_setbuf_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + uint8 id; /* IE type */ +} BWL_POST_PACKED_STRUCT ie_getbuf_t; + +/* structures used to define format of wps ie data from probe requests */ +/* passed up to applications via iovar "prbreq_wpsie" */ +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr { + struct ether_addr staAddr; + uint16 ieLen; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { + sta_prbreq_wps_ie_hdr_t hdr; + uint8 ieData[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { + uint32 totLen; + uint8 ieDataList[1]; +} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; + + +#ifdef WLMEDIA_TXFAILEVENT +typedef BWL_PRE_PACKED_STRUCT struct { + char dest[ETHER_ADDR_LEN]; /* destination MAC */ + uint8 prio; /* Packet Priority */ + uint8 flags; /* Flags */ + uint32 tsf_l; /* TSF timer low */ + uint32 tsf_h; /* TSF timer high */ + uint16 rates; /* Main Rates */ + uint16 txstatus; /* TX Status */ +} BWL_POST_PACKED_STRUCT txfailinfo_t; +#endif /* WLMEDIA_TXFAILEVENT */ + +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 flags; + chanspec_t chanspec; /* txpwr report for this channel */ + chanspec_t local_chanspec; /* channel on which we are associated */ + uint8 local_max; /* local max according to the AP */ + uint8 local_constraint; /* local constraint according to the AP */ + int8 antgain[2]; /* Ant gain for each band - from SROM */ + uint8 rf_cores; /* count of RF Cores being reported */ + uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */ + uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */ + uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ + uint8 tx_power_max[4]; /* Maximum target power among all rates */ + uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */ + int8 sar; /* SAR limit for display by wl executable */ + int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */ + uint8 version; /* Version of the data format wlu <--> driver */ + uint8 display_core; /* Displayed curpower core */ + int8 target_offsets[4]; /* Target power offsets for current rate per core */ + uint32 last_tx_ratespec; /* Ratespec for last transmition */ + uint user_target; /* user limit */ + uint32 ppr_len; /* length of each ppr serialization buffer */ + int8 SARLIMIT[MAX_STREAMS_SUPPORTED]; + uint8 pprdata[1]; /* ppr serialization buffer */ +} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t; + +typedef BWL_PRE_PACKED_STRUCT struct { + struct ipv4_addr ipv4_addr; + struct ether_addr nexthop; +} BWL_POST_PACKED_STRUCT ibss_route_entry_t; +typedef BWL_PRE_PACKED_STRUCT struct { + uint32 num_entry; + ibss_route_entry_t route_entry[1]; +} BWL_POST_PACKED_STRUCT ibss_route_tbl_t; + +#define MAX_IBSS_ROUTE_TBL_ENTRY 64 + +#define TXPWR_TARGET_VERSION 0 +typedef BWL_PRE_PACKED_STRUCT struct { + int32 version; /* version number */ + chanspec_t chanspec; /* txpwr report for this channel */ + int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */ + uint8 rf_cores; /* count of RF Cores being reported */ +} BWL_POST_PACKED_STRUCT txpwr_target_max_t; + +#define BSS_PEER_INFO_PARAM_CUR_VER 0 +/* Input structure for IOV_BSS_PEER_INFO */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + struct ether_addr ea; /* peer MAC address */ +} BWL_POST_PACKED_STRUCT bss_peer_info_param_t; + +#define BSS_PEER_INFO_CUR_VER 0 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + struct ether_addr ea; + int32 rssi; + uint32 tx_rate; /* current tx rate */ + uint32 rx_rate; /* current rx rate */ + wl_rateset_t rateset; /* rateset in use */ + uint32 age; /* age in seconds */ +} BWL_POST_PACKED_STRUCT bss_peer_info_t; + +#define BSS_PEER_LIST_INFO_CUR_VER 0 + +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 bss_peer_info_len; /* length of bss_peer_info_t */ + uint32 count; /* number of peer info */ + bss_peer_info_t peer_info[1]; /* peer info */ +} BWL_POST_PACKED_STRUCT bss_peer_list_info_t; + +#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info) + +#define AIBSS_BCN_FORCE_CONFIG_VER_0 0 + +/* structure used to configure AIBSS beacon force xmit */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 len; + uint32 initial_min_bcn_dur; /* dur in ms to check a bcn in bcn_flood period */ + uint32 min_bcn_dur; /* dur in ms to check a bcn after bcn_flood period */ + uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */ +} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t; + +#define AIBSS_TXFAIL_CONFIG_VER_0 0 +#define AIBSS_TXFAIL_CONFIG_VER_1 1 +#define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1 + +/* structure used to configure aibss tx fail event */ +typedef BWL_PRE_PACKED_STRUCT struct { + uint16 version; + uint16 len; + uint32 bcn_timeout; /* dur in seconds to receive 1 bcn */ + uint32 max_tx_retry; /* no of consecutive no acks to send txfail event */ + uint32 max_atim_failure; /* no of consecutive atim failure */ +} BWL_POST_PACKED_STRUCT aibss_txfail_config_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if { + uint16 version; + uint16 len; + uint32 flags; + struct ether_addr addr; + chanspec_t chspec; +} BWL_POST_PACKED_STRUCT wl_aibss_if_t; + +typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry { + struct ipv4_addr ip_addr; + struct ether_addr nexthop; +} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t; + +typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl { + uint32 num_entry; + wlc_ipfo_route_entry_t route_entry[1]; +} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t; + +#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4 +#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64 + +/* no strict structure packing */ +#include + + /* Global ASSERT Logging */ +#define ASSERTLOG_CUR_VER 0x0100 +#define MAX_ASSRTSTR_LEN 64 + + typedef struct assert_record { + uint32 time; + uint8 seq_num; + char str[MAX_ASSRTSTR_LEN]; + } assert_record_t; + + typedef struct assertlog_results { + uint16 version; + uint16 record_len; + uint32 num; + assert_record_t logs[1]; + } assertlog_results_t; + +#define LOGRRC_FIX_LEN 8 +#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) + + + /* chanim acs record */ + typedef struct { + bool valid; + uint8 trigger; + chanspec_t selected_chspc; + int8 bgnoise; + uint32 glitch_cnt; + uint8 ccastats; + uint8 chan_idle; + uint timestamp; + } chanim_acs_record_t; + + typedef struct { + chanim_acs_record_t acs_record[CHANIM_ACS_RECORD]; + uint8 count; + uint timestamp; + } wl_acs_record_t; + + typedef struct chanim_stats { + uint32 glitchcnt; /* normalized as per second count */ + uint32 badplcp; /* normalized as per second count */ + uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */ + int8 bgnoise; /* background noise level (in dBm) */ + chanspec_t chanspec; /* ctrl chanspec of the interface */ + uint32 timestamp; /* time stamp at which the stats are collected */ + uint32 bphy_glitchcnt; /* normalized as per second count */ + uint32 bphy_badplcp; /* normalized as per second count */ + uint8 chan_idle; /* normalized as 0~255 */ + } chanim_stats_t; + +#define WL_CHANIM_STATS_VERSION 2 + +typedef struct { + uint32 buflen; + uint32 version; + uint32 count; + chanim_stats_t stats[1]; +} wl_chanim_stats_t; + +#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats) + +/* Noise measurement metrics. */ +#define NOISE_MEASURE_KNOISE 0x1 + +/* scb probe parameter */ +typedef struct { + uint32 scb_timeout; + uint32 scb_activity_time; + uint32 scb_max_probe; +} wl_scb_probe_t; + +/* structure/defines for selective mgmt frame (smf) stats support */ + +#define SMFS_VERSION 1 +/* selected mgmt frame (smf) stats element */ +typedef struct wl_smfs_elem { + uint32 count; + uint16 code; /* SC or RC code */ +} wl_smfs_elem_t; + +typedef struct wl_smf_stats { + uint32 version; + uint16 length; /* reserved for future usage */ + uint8 type; + uint8 codetype; + uint32 ignored_cnt; + uint32 malformed_cnt; + uint32 count_total; /* count included the interested group */ + wl_smfs_elem_t elem[1]; +} wl_smf_stats_t; + +#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem); + +enum { + SMFS_CODETYPE_SC, + SMFS_CODETYPE_RC +}; + +typedef enum smfs_type { + SMFS_TYPE_AUTH, + SMFS_TYPE_ASSOC, + SMFS_TYPE_REASSOC, + SMFS_TYPE_DISASSOC_TX, + SMFS_TYPE_DISASSOC_RX, + SMFS_TYPE_DEAUTH_TX, + SMFS_TYPE_DEAUTH_RX, + SMFS_TYPE_MAX +} smfs_type_t; + +#ifdef PHYMON + +#define PHYMON_VERSION 1 + +typedef struct wl_phycal_core_state { + /* Tx IQ/LO calibration coeffs */ + int16 tx_iqlocal_a; + int16 tx_iqlocal_b; + int8 tx_iqlocal_ci; + int8 tx_iqlocal_cq; + int8 tx_iqlocal_di; + int8 tx_iqlocal_dq; + int8 tx_iqlocal_ei; + int8 tx_iqlocal_eq; + int8 tx_iqlocal_fi; + int8 tx_iqlocal_fq; + + /* Rx IQ calibration coeffs */ + int16 rx_iqcal_a; + int16 rx_iqcal_b; + + uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */ + uint32 papd_epsilon_table[64]; /* PAPD epsilon table */ + int16 papd_epsilon_offset; /* PAPD epsilon offset */ + uint8 curr_tx_pwrindex; /* Tx power index */ + int8 idle_tssi; /* Idle TSSI */ + int8 est_tx_pwr; /* Estimated Tx Power (dB) */ + int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */ + uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */ + uint16 init_gaincode; /* initgain required for ACI */ + int8 estirr_tx; + int8 estirr_rx; + +} wl_phycal_core_state_t; + +typedef struct wl_phycal_state { + int version; + int8 num_phy_cores; /* number of cores */ + int8 curr_temperature; /* on-chip temperature sensor reading */ + chanspec_t chspec; /* channspec for this state */ + bool aci_state; /* ACI state: ON/OFF */ + uint16 crsminpower; /* crsminpower required for ACI */ + uint16 crsminpowerl; /* crsminpowerl required for ACI */ + uint16 crsminpoweru; /* crsminpoweru required for ACI */ + wl_phycal_core_state_t phycal_core[1]; +} wl_phycal_state_t; + +#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core) +#endif /* PHYMON */ + +/* discovery state */ +typedef struct wl_p2p_disc_st { + uint8 state; /* see state */ + chanspec_t chspec; /* valid in listen state */ + uint16 dwell; /* valid in listen state, in ms */ +} wl_p2p_disc_st_t; + +/* scan request */ +typedef struct wl_p2p_scan { + uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */ + uint8 reserved[3]; + /* scan or escan parms... */ +} wl_p2p_scan_t; + +/* i/f request */ +typedef struct wl_p2p_if { + struct ether_addr addr; + uint8 type; /* see i/f type */ + chanspec_t chspec; /* for p2p_ifadd GO */ +} wl_p2p_if_t; + +/* i/f query */ +typedef struct wl_p2p_ifq { + uint bsscfgidx; + char ifname[BCM_MSG_IFNAME_MAX]; +} wl_p2p_ifq_t; + +/* OppPS & CTWindow */ +typedef struct wl_p2p_ops { + uint8 ops; /* 0: disable 1: enable */ + uint8 ctw; /* >= 10 */ +} wl_p2p_ops_t; + +/* absence and presence request */ +typedef struct wl_p2p_sched_desc { + uint32 start; + uint32 interval; + uint32 duration; + uint32 count; /* see count */ +} wl_p2p_sched_desc_t; + +typedef struct wl_p2p_sched { + uint8 type; /* see schedule type */ + uint8 action; /* see schedule action */ + uint8 option; /* see schedule option */ + wl_p2p_sched_desc_t desc[1]; +} wl_p2p_sched_t; + +typedef struct wl_p2p_wfds_hash { + uint32 advt_id; + uint16 nw_cfg_method; + uint8 wfds_hash[6]; + uint8 name_len; + uint8 service_name[MAX_WFDS_SVC_NAME_LEN]; +} wl_p2p_wfds_hash_t; + +typedef struct wl_bcmdcs_data { + uint reason; + chanspec_t chspec; +} wl_bcmdcs_data_t; + + +/* NAT configuration */ +typedef struct { + uint32 ipaddr; /* interface ip address */ + uint32 ipaddr_mask; /* interface ip address mask */ + uint32 ipaddr_gateway; /* gateway ip address */ + uint8 mac_gateway[6]; /* gateway mac address */ + uint32 ipaddr_dns; /* DNS server ip address, valid only for public if */ + uint8 mac_dns[6]; /* DNS server mac address, valid only for public if */ + uint8 GUID[38]; /* interface GUID */ +} nat_if_info_t; + +typedef struct { + uint op; /* operation code */ + bool pub_if; /* set for public if, clear for private if */ + nat_if_info_t if_info; /* interface info */ +} nat_cfg_t; + +typedef struct { + int state; /* NAT state returned */ +} nat_state_t; + + +#define BTA_STATE_LOG_SZ 64 + +/* BTAMP Statemachine states */ +enum { + HCIReset = 1, + HCIReadLocalAMPInfo, + HCIReadLocalAMPASSOC, + HCIWriteRemoteAMPASSOC, + HCICreatePhysicalLink, + HCIAcceptPhysicalLinkRequest, + HCIDisconnectPhysicalLink, + HCICreateLogicalLink, + HCIAcceptLogicalLink, + HCIDisconnectLogicalLink, + HCILogicalLinkCancel, + HCIAmpStateChange, + HCIWriteLogicalLinkAcceptTimeout +}; + +typedef struct flush_txfifo { + uint32 txfifobmp; + uint32 hwtxfifoflush; + struct ether_addr ea; +} flush_txfifo_t; + +enum { + SPATIAL_MODE_2G_IDX = 0, + SPATIAL_MODE_5G_LOW_IDX, + SPATIAL_MODE_5G_MID_IDX, + SPATIAL_MODE_5G_HIGH_IDX, + SPATIAL_MODE_5G_UPPER_IDX, + SPATIAL_MODE_MAX_IDX +}; + +#define WLC_TXCORE_MAX 4 /* max number of txcore supports */ +#define WLC_SUBBAND_MAX 4 /* max number of sub-band supports */ +typedef struct { + uint8 band2g[WLC_TXCORE_MAX]; + uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX]; +} sar_limit_t; + +#define WLC_TXCAL_CORE_MAX 2 /* max number of txcore supports for txcal */ +#define MAX_NUM_TXCAL_MEAS 128 +#define MAX_NUM_PWR_STEP 40 +#define TXCAL_ROUNDING_FIX 1 +typedef struct wl_txcal_meas { +#ifdef TXCAL_ROUNDING_FIX + uint16 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#else + uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; +#endif /* TXCAL_ROUNDING_FIX */ + int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS]; + uint8 valid_cnt; +} wl_txcal_meas_t; + +typedef struct wl_txcal_power_tssi { + uint8 set_core; + uint8 channel; + int16 tempsense[WLC_TXCAL_CORE_MAX]; + int16 pwr_start[WLC_TXCAL_CORE_MAX]; + uint8 pwr_start_idx[WLC_TXCAL_CORE_MAX]; + uint8 num_entries[WLC_TXCAL_CORE_MAX]; + uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_PWR_STEP]; + bool gen_tbl; +} wl_txcal_power_tssi_t; + +/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */ +typedef struct wl_mempool_stats { + int num; /* Number of memory pools */ + bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */ +} wl_mempool_stats_t; + +typedef struct { + uint32 ipaddr; + uint32 ipaddr_netmask; + uint32 ipaddr_gateway; +} nwoe_ifconfig_t; + +/* Traffic management priority classes */ +typedef enum trf_mgmt_priority_class { + trf_mgmt_priority_low = 0, /* Maps to 802.1p BK */ + trf_mgmt_priority_medium = 1, /* Maps to 802.1p BE */ + trf_mgmt_priority_high = 2, /* Maps to 802.1p VI */ + trf_mgmt_priority_nochange = 3, /* do not update the priority */ + trf_mgmt_priority_invalid = (trf_mgmt_priority_nochange + 1) +} trf_mgmt_priority_class_t; + +/* Traffic management configuration parameters */ +typedef struct trf_mgmt_config { + uint32 trf_mgmt_enabled; /* 0 - disabled, 1 - enabled */ + uint32 flags; /* See TRF_MGMT_FLAG_xxx defines */ + uint32 host_ip_addr; /* My IP address to determine subnet */ + uint32 host_subnet_mask; /* My subnet mask */ + uint32 downlink_bandwidth; /* In units of kbps */ + uint32 uplink_bandwidth; /* In units of kbps */ + uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed tx bandwidth */ + uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /* Minimum guaranteed rx bandwidth */ +} trf_mgmt_config_t; + +/* Traffic management filter */ +typedef struct trf_mgmt_filter { + struct ether_addr dst_ether_addr; /* His L2 address */ + uint32 dst_ip_addr; /* His IP address */ + uint16 dst_port; /* His L4 port */ + uint16 src_port; /* My L4 port */ + uint16 prot; /* L4 protocol (only TCP or UDP) */ + uint16 flags; /* TBD. For now, this must be zero. */ + trf_mgmt_priority_class_t priority; /* Priority for filtered packets */ + uint32 dscp; /* DSCP */ +} trf_mgmt_filter_t; + +/* Traffic management filter list (variable length) */ +typedef struct trf_mgmt_filter_list { + uint32 num_filters; + trf_mgmt_filter_t filter[1]; +} trf_mgmt_filter_list_t; + +/* Traffic management global info used for all queues */ +typedef struct trf_mgmt_global_info { + uint32 maximum_bytes_per_second; + uint32 maximum_bytes_per_sampling_period; + uint32 total_bytes_consumed_per_second; + uint32 total_bytes_consumed_per_sampling_period; + uint32 total_unused_bytes_per_sampling_period; +} trf_mgmt_global_info_t; + +/* Traffic management shaping info per priority queue */ +typedef struct trf_mgmt_shaping_info { + uint32 gauranteed_bandwidth_percentage; + uint32 guaranteed_bytes_per_second; + uint32 guaranteed_bytes_per_sampling_period; + uint32 num_bytes_produced_per_second; + uint32 num_bytes_consumed_per_second; + uint32 num_queued_packets; /* Number of packets in queue */ + uint32 num_queued_bytes; /* Number of bytes in queue */ +} trf_mgmt_shaping_info_t; + +/* Traffic management shaping info array */ +typedef struct trf_mgmt_shaping_info_array { + trf_mgmt_global_info_t tx_global_shaping_info; + trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; + trf_mgmt_global_info_t rx_global_shaping_info; + trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; +} trf_mgmt_shaping_info_array_t; + + +/* Traffic management statistical counters */ +typedef struct trf_mgmt_stats { + uint32 num_processed_packets; /* Number of packets processed */ + uint32 num_processed_bytes; /* Number of bytes processed */ + uint32 num_discarded_packets; /* Number of packets discarded from queue */ +} trf_mgmt_stats_t; + +/* Traffic management statisics array */ +typedef struct trf_mgmt_stats_array { + trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; + trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; +} trf_mgmt_stats_array_t; + +typedef struct powersel_params { + /* LPC Params exposed via IOVAR */ + int32 tp_ratio_thresh; /* Throughput ratio threshold */ + uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /* Number of successes before power step down */ + uint8 pwr_sel_exp_time; /* Time lapse for expiry of database */ +} powersel_params_t; + +typedef struct lpc_params { + /* LPC Params exposed via IOVAR */ + uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */ + uint8 pwr_stab_thresh; /* Number of successes before power step down */ + uint8 lpc_exp_time; /* Time lapse for expiry of database */ + uint8 pwrup_slow_step; /* Step size for slow step up */ + uint8 pwrup_fast_step; /* Step size for fast step up */ + uint8 pwrdn_slow_step; /* Step size for slow step down */ +} lpc_params_t; + +/* tx pkt delay statistics */ +#define SCB_RETRY_SHORT_DEF 7 /* Default Short retry Limit */ +#define WLPKTDLY_HIST_NBINS 16 /* number of bins used in the Delay histogram */ + +/* structure to store per-AC delay statistics */ +typedef struct scb_delay_stats { + uint32 txmpdu_lost; /* number of MPDUs lost */ + uint32 txmpdu_cnt[SCB_RETRY_SHORT_DEF]; /* retry times histogram */ + uint32 delay_sum[SCB_RETRY_SHORT_DEF]; /* cumulative packet latency */ + uint32 delay_min; /* minimum packet latency observed */ + uint32 delay_max; /* maximum packet latency observed */ + uint32 delay_avg; /* packet latency average */ + uint32 delay_hist[WLPKTDLY_HIST_NBINS]; /* delay histogram */ +} scb_delay_stats_t; + +/* structure for txdelay event */ +typedef struct txdelay_event { + uint8 status; + int rssi; + chanim_stats_t chanim_stats; + scb_delay_stats_t delay_stats[AC_COUNT]; +} txdelay_event_t; + +/* structure for txdelay parameters */ +typedef struct txdelay_params { + uint16 ratio; /* Avg Txdelay Delta */ + uint8 cnt; /* Sample cnt */ + uint8 period; /* Sample period */ + uint8 tune; /* Debug */ +} txdelay_params_t; + +enum { + WNM_SERVICE_DMS = 1, + WNM_SERVICE_FMS = 2, + WNM_SERVICE_TFS = 3 +}; + +/* Definitions for WNM/NPS TCLAS */ +typedef struct wl_tclas { + uint8 user_priority; + uint8 fc_len; + dot11_tclas_fc_t fc; +} wl_tclas_t; + +#define WL_TCLAS_FIXED_SIZE OFFSETOF(wl_tclas_t, fc) + +typedef struct wl_tclas_list { + uint32 num; + wl_tclas_t tclas[1]; +} wl_tclas_list_t; + +/* Definitions for WNM/NPS Traffic Filter Service */ +typedef struct wl_tfs_req { + uint8 tfs_id; + uint8 tfs_actcode; + uint8 tfs_subelem_id; + uint8 send; +} wl_tfs_req_t; + +typedef struct wl_tfs_filter { + uint8 status; /* Status returned by the AP */ + uint8 tclas_proc; /* TCLAS processing value (0:and, 1:or) */ + uint8 tclas_cnt; /* count of all wl_tclas_t in tclas array */ + uint8 tclas[1]; /* VLA of wl_tclas_t */ +} wl_tfs_filter_t; +#define WL_TFS_FILTER_FIXED_SIZE OFFSETOF(wl_tfs_filter_t, tclas) + +typedef struct wl_tfs_fset { + struct ether_addr ea; /* Address of AP/STA involved with this filter set */ + uint8 tfs_id; /* TFS ID field chosen by STA host */ + uint8 status; /* Internal status TFS_STATUS_xxx */ + uint8 actcode; /* Action code DOT11_TFS_ACTCODE_xxx */ + uint8 token; /* Token used in last request frame */ + uint8 notify; /* Notify frame sent/received because of this set */ + uint8 filter_cnt; /* count of all wl_tfs_filter_t in filter array */ + uint8 filter[1]; /* VLA of wl_tfs_filter_t */ +} wl_tfs_fset_t; +#define WL_TFS_FSET_FIXED_SIZE OFFSETOF(wl_tfs_fset_t, filter) + +enum { + TFS_STATUS_DISABLED = 0, /* TFS filter set disabled by user */ + TFS_STATUS_DISABLING = 1, /* Empty request just sent to AP */ + TFS_STATUS_VALIDATED = 2, /* Filter set validated by AP (but maybe not enabled!) */ + TFS_STATUS_VALIDATING = 3, /* Filter set just sent to AP */ + TFS_STATUS_NOT_ASSOC = 4, /* STA not associated */ + TFS_STATUS_NOT_SUPPORT = 5, /* TFS not supported by AP */ + TFS_STATUS_DENIED = 6, /* Filter set refused by AP (=> all sets are disabled!) */ +}; + +typedef struct wl_tfs_status { + uint8 fset_cnt; /* count of all wl_tfs_fset_t in fset array */ + wl_tfs_fset_t fset[1]; /* VLA of wl_tfs_fset_t */ +} wl_tfs_status_t; + +typedef struct wl_tfs_set { + uint8 send; /* Immediatly register registered sets on AP side */ + uint8 tfs_id; /* ID of a specific set (existing or new), or nul for all */ + uint8 actcode; /* Action code for this filter set */ + uint8 tclas_proc; /* TCLAS processing operator for this filter set */ +} wl_tfs_set_t; + +typedef struct wl_tfs_term { + uint8 del; /* Delete internal set once confirmation received */ + uint8 tfs_id; /* ID of a specific set (existing), or nul for all */ +} wl_tfs_term_t; + + +#define DMS_DEP_PROXY_ARP (1 << 0) + +/* Definitions for WNM/NPS Directed Multicast Service */ +enum { + DMS_STATUS_DISABLED = 0, /* DMS desc disabled by user */ + DMS_STATUS_ACCEPTED = 1, /* Request accepted by AP */ + DMS_STATUS_NOT_ASSOC = 2, /* STA not associated */ + DMS_STATUS_NOT_SUPPORT = 3, /* DMS not supported by AP */ + DMS_STATUS_DENIED = 4, /* Request denied by AP */ + DMS_STATUS_TERM = 5, /* Request terminated by AP */ + DMS_STATUS_REMOVING = 6, /* Remove request just sent */ + DMS_STATUS_ADDING = 7, /* Add request just sent */ + DMS_STATUS_ERROR = 8, /* Non compliant AP behvior */ + DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */ + DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */ +}; + +typedef struct wl_dms_desc { + uint8 user_id; + uint8 status; + uint8 token; + uint8 dms_id; + uint8 tclas_proc; + uint8 mac_len; /* length of all ether_addr in data array, 0 if STA */ + uint8 tclas_len; /* length of all wl_tclas_t in data array */ + uint8 data[1]; /* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */ +} wl_dms_desc_t; + +#define WL_DMS_DESC_FIXED_SIZE OFFSETOF(wl_dms_desc_t, data) + +typedef struct wl_dms_status { + uint32 cnt; + wl_dms_desc_t desc[1]; +} wl_dms_status_t; + +typedef struct wl_dms_set { + uint8 send; + uint8 user_id; + uint8 tclas_proc; +} wl_dms_set_t; + +typedef struct wl_dms_term { + uint8 del; + uint8 user_id; +} wl_dms_term_t; + +typedef struct wl_service_term { + uint8 service; + union { + wl_dms_term_t dms; + } u; +} wl_service_term_t; + +/* Definitions for WNM/NPS BSS Transistion */ +typedef struct wl_bsstrans_req { + uint16 tbtt; /* time of BSS to end of life, in unit of TBTT */ + uint16 dur; /* time of BSS to keep off, in unit of minute */ + uint8 reqmode; /* request mode of BSS transition request */ + uint8 unicast; /* request by unicast or by broadcast */ +} wl_bsstrans_req_t; + +enum { + BSSTRANS_RESP_AUTO = 0, /* Currently equivalent to ENABLE */ + BSSTRANS_RESP_DISABLE = 1, /* Never answer BSS Trans Req frames */ + BSSTRANS_RESP_ENABLE = 2, /* Always answer Req frames with preset data */ + BSSTRANS_RESP_WAIT = 3, /* Send ind, wait and/or send preset data (NOT IMPL) */ + BSSTRANS_RESP_IMMEDIATE = 4 /* After an ind, set data and send resp (NOT IMPL) */ +}; + +typedef struct wl_bsstrans_resp { + uint8 policy; + uint8 status; + uint8 delay; + struct ether_addr target; +} wl_bsstrans_resp_t; + +/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception. + * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF, + * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan + * mandates different behavior on receiving BSS-transition request. To accomodate + * such divergent behaviors these policies have been created. + */ +enum { + WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /* Roam (or disassociate) in all cases */ + WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /* Roam only if requested by Request Mode field */ + WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /* Roam only if Preferred BSS provided */ + WL_BSSTRANS_POLICY_WAIT = 3, /* Wait for deauth and send Accepted status */ + WL_BSSTRANS_POLICY_PRODUCT = 4, /* Policy for real product use cases (non-pf) */ +}; + +/* Definitions for WNM/NPS TIM Broadcast */ +typedef struct wl_timbc_offset { + int16 offset; /* offset in us */ + uint16 fix_intv; /* override interval sent from STA */ + uint16 rate_override; /* use rate override to send high rate TIM broadcast frame */ + uint8 tsf_present; /* show timestamp in TIM broadcast frame */ +} wl_timbc_offset_t; + +typedef struct wl_timbc_set { + uint8 interval; /* Interval in DTIM wished or required. */ + uint8 flags; /* Bitfield described below */ + uint16 rate_min; /* Minimum rate required for High/Low TIM frames. Optionnal */ + uint16 rate_max; /* Maximum rate required for High/Low TIM frames. Optionnal */ +} wl_timbc_set_t; + +enum { + WL_TIMBC_SET_TSF_REQUIRED = 1, /* Enable TIMBC only if TSF in TIM frames */ + WL_TIMBC_SET_NO_OVERRIDE = 2, /* ... if AP does not override interval */ + WL_TIMBC_SET_PROXY_ARP = 4, /* ... if AP support Proxy ARP */ + WL_TIMBC_SET_DMS_ACCEPTED = 8 /* ... if all DMS desc have been accepted */ +}; + +typedef struct wl_timbc_status { + uint8 status_sta; /* Status from internal state machine (check below) */ + uint8 status_ap; /* From AP response frame (check 8.4.2.86 from 802.11) */ + uint8 interval; + uint8 pad; + int32 offset; + uint16 rate_high; + uint16 rate_low; +} wl_timbc_status_t; + +enum { + WL_TIMBC_STATUS_DISABLE = 0, /* TIMBC disabled by user */ + WL_TIMBC_STATUS_REQ_MISMATCH = 1, /* AP settings do no match user requirements */ + WL_TIMBC_STATUS_NOT_ASSOC = 2, /* STA not associated */ + WL_TIMBC_STATUS_NOT_SUPPORT = 3, /* TIMBC not supported by AP */ + WL_TIMBC_STATUS_DENIED = 4, /* Req to disable TIMBC sent to AP */ + WL_TIMBC_STATUS_ENABLE = 5 /* TIMBC enabled */ +}; + +/* Definitions for PM2 Dynamic Fast Return To Sleep */ +typedef struct wl_pm2_sleep_ret_ext { + uint8 logic; /* DFRTS logic: see WL_DFRTS_LOGIC_* below */ + uint16 low_ms; /* Low FRTS timeout */ + uint16 high_ms; /* High FRTS timeout */ + uint16 rx_pkts_threshold; /* switching threshold: # rx pkts */ + uint16 tx_pkts_threshold; /* switching threshold: # tx pkts */ + uint16 txrx_pkts_threshold; /* switching threshold: # (tx+rx) pkts */ + uint32 rx_bytes_threshold; /* switching threshold: # rx bytes */ + uint32 tx_bytes_threshold; /* switching threshold: # tx bytes */ + uint32 txrx_bytes_threshold; /* switching threshold: # (tx+rx) bytes */ +} wl_pm2_sleep_ret_ext_t; + +#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */ +#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */ +#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */ + +/* Values for the passive_on_restricted_mode iovar. When set to non-zero, this iovar + * disables automatic conversions of a channel from passively scanned to + * actively scanned. These values only have an effect for country codes such + * as XZ where some 5 GHz channels are defined to be passively scanned. + */ +#define WL_PASSACTCONV_DISABLE_NONE 0 /* Enable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_ALL 1 /* Disable permanent and temporary conversions */ +#define WL_PASSACTCONV_DISABLE_PERM 2 /* Disable only permanent conversions */ + +/* Definitions for Reliable Multicast */ +#define WL_RMC_CNT_VERSION 1 +#define WL_RMC_TR_VERSION 1 +#define WL_RMC_MAX_CLIENT 32 +#define WL_RMC_FLAG_INBLACKLIST 1 +#define WL_RMC_FLAG_ACTIVEACKER 2 +#define WL_RMC_FLAG_RELMCAST 4 +#define WL_RMC_MAX_TABLE_ENTRY 4 + +#define WL_RMC_VER 1 +#define WL_RMC_INDEX_ACK_ALL 255 +#define WL_RMC_NUM_OF_MC_STREAMS 4 +#define WL_RMC_MAX_TRS_PER_GROUP 1 +#define WL_RMC_MAX_TRS_IN_ACKALL 1 +#define WL_RMC_ACK_MCAST0 0x02 +#define WL_RMC_ACK_MCAST_ALL 0x01 +#define WL_RMC_ACTF_TIME_MIN 300 /* time in ms */ +#define WL_RMC_ACTF_TIME_MAX 20000 /* time in ms */ +#define WL_RMC_MAX_NUM_TRS 32 /* maximun transmitters allowed */ +#define WL_RMC_ARTMO_MIN 350 /* time in ms */ +#define WL_RMC_ARTMO_MAX 40000 /* time in ms */ + +/* RMC events in action frames */ +enum rmc_opcodes { + RELMCAST_ENTRY_OP_DISABLE = 0, /* Disable multi-cast group */ + RELMCAST_ENTRY_OP_DELETE = 1, /* Delete multi-cast group */ + RELMCAST_ENTRY_OP_ENABLE = 2, /* Enable multi-cast group */ + RELMCAST_ENTRY_OP_ACK_ALL = 3 /* Enable ACK ALL bit in AMT */ +}; + +/* RMC operational modes */ +enum rmc_modes { + WL_RMC_MODE_RECEIVER = 0, /* Receiver mode by default */ + WL_RMC_MODE_TRANSMITTER = 1, /* Transmitter mode using wl ackreq */ + WL_RMC_MODE_INITIATOR = 2 /* Initiator mode using wl ackreq */ +}; + +/* Each RMC mcast client info */ +typedef struct wl_relmcast_client { + uint8 flag; /* status of client such as AR, R, or blacklisted */ + int16 rssi; /* rssi value of RMC client */ + struct ether_addr addr; /* mac address of RMC client */ +} wl_relmcast_client_t; + +/* RMC Counters */ +typedef struct wl_rmc_cnts { + uint16 version; /* see definition of WL_CNT_T_VERSION */ + uint16 length; /* length of entire structure */ + uint16 dupcnt; /* counter for duplicate rmc MPDU */ + uint16 ackreq_err; /* counter for wl ackreq error */ + uint16 af_tx_err; /* error count for action frame transmit */ + uint16 null_tx_err; /* error count for rmc null frame transmit */ + uint16 af_unicast_tx_err; /* error count for rmc unicast frame transmit */ + uint16 mc_no_amt_slot; /* No mcast AMT entry available */ + /* Unused. Keep for rom compatibility */ + uint16 mc_no_glb_slot; /* No mcast entry available in global table */ + uint16 mc_not_mirrored; /* mcast group is not mirrored */ + uint16 mc_existing_tr; /* mcast group is already taken by transmitter */ + uint16 mc_exist_in_amt; /* mcast group is already programmed in amt */ + /* Unused. Keep for rom compatibility */ + uint16 mc_not_exist_in_gbl; /* mcast group is not in global table */ + uint16 mc_not_exist_in_amt; /* mcast group is not in AMT table */ + uint16 mc_utilized; /* mcast addressed is already taken */ + uint16 mc_taken_other_tr; /* multi-cast addressed is already taken */ + uint32 rmc_rx_frames_mac; /* no of mc frames received from mac */ + uint32 rmc_tx_frames_mac; /* no of mc frames transmitted to mac */ + uint32 mc_null_ar_cnt; /* no. of times NULL AR is received */ + uint32 mc_ar_role_selected; /* no. of times took AR role */ + uint32 mc_ar_role_deleted; /* no. of times AR role cancelled */ + uint32 mc_noacktimer_expired; /* no. of times noack timer expired */ + uint16 mc_no_wl_clk; /* no wl clk detected when trying to access amt */ + uint16 mc_tr_cnt_exceeded; /* No of transmitters in the network exceeded */ +} wl_rmc_cnts_t; + +/* RMC Status */ +typedef struct wl_relmcast_st { + uint8 ver; /* version of RMC */ + uint8 num; /* number of clients detected by transmitter */ + wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT]; + uint16 err; /* error status (used in infra) */ + uint16 actf_time; /* action frame time period */ +} wl_relmcast_status_t; + +/* Entry for each STA/node */ +typedef struct wl_rmc_entry { + /* operation on multi-cast entry such add, + * delete, ack-all + */ + int8 flag; + struct ether_addr addr; /* multi-cast group mac address */ +} wl_rmc_entry_t; + +/* RMC table */ +typedef struct wl_rmc_entry_table { + uint8 index; /* index to a particular mac entry in table */ + uint8 opcode; /* opcodes or operation on entry */ + wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY]; +} wl_rmc_entry_table_t; + +typedef struct wl_rmc_trans_elem { + struct ether_addr tr_mac; /* transmitter mac */ + struct ether_addr ar_mac; /* ar mac */ + uint16 artmo; /* AR timeout */ + uint8 amt_idx; /* amt table entry */ + uint16 flag; /* entry will be acked, not acked, programmed, full etc */ +} wl_rmc_trans_elem_t; + +/* RMC transmitters */ +typedef struct wl_rmc_trans_in_network { + uint8 ver; /* version of RMC */ + uint8 num_tr; /* number of transmitters in the network */ + wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS]; +} wl_rmc_trans_in_network_t; + +/* To update vendor specific ie for RMC */ +typedef struct wl_rmc_vsie { + uint8 oui[DOT11_OUI_LEN]; + uint16 payload; /* IE Data Payload */ +} wl_rmc_vsie_t; + + +/* structures & defines for proximity detection */ +enum proxd_method { + PROXD_UNDEFINED_METHOD = 0, + PROXD_RSSI_METHOD = 1, + PROXD_TOF_METHOD = 2 +}; + +/* structures for proximity detection device role */ +#define WL_PROXD_MODE_DISABLE 0 +#define WL_PROXD_MODE_NEUTRAL 1 +#define WL_PROXD_MODE_INITIATOR 2 +#define WL_PROXD_MODE_TARGET 3 + +#define WL_PROXD_ACTION_STOP 0 +#define WL_PROXD_ACTION_START 1 + +#define WL_PROXD_FLAG_TARGET_REPORT 0x1 +#define WL_PROXD_FLAG_REPORT_FAILURE 0x2 +#define WL_PROXD_FLAG_INITIATOR_REPORT 0x4 +#define WL_PROXD_FLAG_NOCHANSWT 0x8 +#define WL_PROXD_FLAG_NETRUAL 0x10 +#define WL_PROXD_FLAG_INITIATOR_RPTRTT 0x20 +#define WL_PROXD_FLAG_ONEWAY 0x40 +#define WL_PROXD_FLAG_SEQ_EN 0x80 + +#define WL_PROXD_RANDOM_WAKEUP 0x8000 +#define WL_PROXD_MAXREPORT 8 + +typedef struct wl_proxd_iovar { + uint16 method; /* Proxmity Detection method */ + uint16 mode; /* Mode (neutral, initiator, target) */ +} wl_proxd_iovar_t; + +/* + * structures for proximity detection parameters + * consists of two parts, common and method specific params + * common params should be placed at the beginning + */ + +/* require strict packing */ +#include + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_common { + chanspec_t chanspec; /* channel spec */ + int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /* timeout value */ + uint16 interval; /* interval between neighbor finding attempts (in TU) */ + uint16 duration; /* duration of neighbor finding attempts (in ms) */ +} BWL_POST_PACKED_STRUCT wl_proxd_params_common_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_rssi_method { + chanspec_t chanspec; /* chanspec for home channel */ + int16 tx_power; /* tx power of Proximity Detection frames (in dBm) */ + uint16 tx_rate; /* tx rate of PD frames, 500kbps units */ + uint16 timeout; /* state machine wait timeout of the frames (in ms) */ + uint16 interval; /* interval between neighbor finding attempts (in TU) */ + uint16 duration; /* duration of neighbor finding attempts (in ms) */ + /* method specific ones go after this line */ + int16 rssi_thresh; /* RSSI threshold (in dBm) */ + uint16 maxconvergtmo; /* max wait converge timeout (in ms) */ +} wl_proxd_params_rssi_method_t; + +#define Q1_NS 25 /* Q1 time units */ + +#define TOF_BW_NUM 3 /* number of bandwidth that the TOF can support */ +#define TOF_BW_SEQ_NUM (TOF_BW_NUM+2) /* number of total index */ +enum tof_bw_index { + TOF_BW_20MHZ_INDEX = 0, + TOF_BW_40MHZ_INDEX = 1, + TOF_BW_80MHZ_INDEX = 2, + TOF_BW_SEQTX_INDEX = 3, + TOF_BW_SEQRX_INDEX = 4 +}; + +#define BANDWIDTH_BASE 20 /* base value of bandwidth */ +#define TOF_BW_20MHZ (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX) +#define TOF_BW_40MHZ (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX) +#define TOF_BW_80MHZ (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX) +#define TOF_BW_10MHZ 10 + +#define NFFT_BASE 64 /* base size of fft */ +#define TOF_NFFT_20MHZ (NFFT_BASE << TOF_BW_20MHZ_INDEX) +#define TOF_NFFT_40MHZ (NFFT_BASE << TOF_BW_40MHZ_INDEX) +#define TOF_NFFT_80MHZ (NFFT_BASE << TOF_BW_80MHZ_INDEX) + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method { + chanspec_t chanspec; /* chanspec for home channel */ + int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */ + uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */ + uint16 timeout; /* state machine wait timeout of the frames (in ms) */ + uint16 interval; /* interval between neighbor finding attempts (in TU) */ + uint16 duration; /* duration of neighbor finding attempts (in ms) */ + /* specific for the method go after this line */ + struct ether_addr tgt_mac; /* target mac addr for TOF method */ + uint16 ftm_cnt; /* number of the frames txed by initiator */ + uint16 retry_cnt; /* number of retransmit attampts for ftm frames */ + int16 vht_rate; /* ht or vht rate */ + /* add more params required for other methods can be added here */ +} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t; + +typedef struct wl_proxd_seq_config +{ + int16 N_tx_log2; + int16 N_rx_log2; + int16 N_tx_scale; + int16 N_rx_scale; + int16 w_len; + int16 w_offset; +} wl_proxd_seq_config_t; + + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune { + uint32 Ki; /* h/w delay K factor for initiator */ + uint32 Kt; /* h/w delay K factor for target */ + int16 vhtack; /* enable/disable VHT ACK */ + int16 N_log2[TOF_BW_SEQ_NUM]; /* simple threshold crossing */ + int16 w_offset[TOF_BW_NUM]; /* offset of threshold crossing window(per BW) */ + int16 w_len[TOF_BW_NUM]; /* length of threshold crossing window(per BW) */ + int32 maxDT; /* max time difference of T4/T1 or T3/T2 */ + int32 minDT; /* min time difference of T4/T1 or T3/T2 */ + uint8 totalfrmcnt; /* total count of transfered measurement frames */ + uint16 rsv_media; /* reserve media value for TOF */ + uint32 flags; /* flags */ + uint8 core; /* core to use for tx */ + uint8 force_K; /* set to force value of K */ + int16 N_scale[TOF_BW_SEQ_NUM]; /* simple threshold crossing */ + uint8 sw_adj; /* enable sw assisted timestamp adjustment */ + uint8 hw_adj; /* enable hw assisted timestamp adjustment */ + uint8 seq_en; /* enable ranging sequence */ + uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */ + int16 N_log2_2g; /* simple threshold crossing for 2g channel */ + int16 N_scale_2g; /* simple threshold crossing for 2g channel */ + wl_proxd_seq_config_t seq_5g20; +} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t; + +typedef struct wl_proxd_params_iovar { + uint16 method; /* Proxmity Detection method */ + union { + /* common params for pdsvc */ + wl_proxd_params_common_t cmn_params; /* common parameters */ + /* method specific */ + wl_proxd_params_rssi_method_t rssi_params; /* RSSI method parameters */ + wl_proxd_params_tof_method_t tof_params; /* TOF meothod parameters */ + /* tune parameters */ + wl_proxd_params_tof_tune_t tof_tune; /* TOF tune parameters */ + } u; /* Method specific optional parameters */ +} wl_proxd_params_iovar_t; + +#define PROXD_COLLECT_GET_STATUS 0 +#define PROXD_COLLECT_SET_STATUS 1 +#define PROXD_COLLECT_QUERY_HEADER 2 +#define PROXD_COLLECT_QUERY_DATA 3 +#define PROXD_COLLECT_QUERY_DEBUG 4 +#define PROXD_COLLECT_REMOTE_REQUEST 5 +#define PROXD_COLLECT_DONE 6 + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query { + uint32 method; /* method */ + uint8 request; /* Query request. */ + uint8 status; /* 0 -- disable, 1 -- enable collection, */ + /* 2 -- enable collection & debug */ + uint16 index; /* The current frame index [0 to total_frames - 1]. */ + uint16 mode; /* Initiator or Target */ + bool busy; /* tof sm is busy */ + bool remote; /* Remote collect data */ +} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header { + uint16 total_frames; /* The totral frames for this collect. */ + uint16 nfft; /* nfft value */ + uint16 bandwidth; /* bandwidth */ + uint16 channel; /* channel number */ + uint32 chanspec; /* channel spec */ + uint32 fpfactor; /* avb timer value factor */ + uint16 fpfactor_shift; /* avb timer value shift bits */ + int32 distance; /* distance calculated by fw */ + uint32 meanrtt; /* mean of RTTs */ + uint32 modertt; /* mode of RTTs */ + uint32 medianrtt; /* median of RTTs */ + uint32 sdrtt; /* standard deviation of RTTs */ + uint32 clkdivisor; /* clock divisor */ + uint16 chipnum; /* chip type */ + uint8 chiprev; /* chip revision */ + uint8 phyver; /* phy version */ + struct ether_addr loaclMacAddr; /* local mac address */ + struct ether_addr remoteMacAddr; /* remote mac address */ + wl_proxd_params_tof_tune_t params; +} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t; + + +#ifdef WL_NAN +/* ********************** NAN wl interface struct types and defs ******************** */ + +#define WL_NAN_IOCTL_VERSION 0x1 +#define NAN_IOC_BUFSZ 256 /**< some sufficient ioc buff size for our module */ +#define NAN_IOC_BUFSZ_EXT 1024 /* some sufficient ioc buff size for dump commands */ + +/* wl_nan_sub_cmd may also be used in dhd */ +typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t; +typedef int (cmd_handler_t)(void *wl, const wl_nan_sub_cmd_t *cmd, char **argv); +/* nan cmd list entry */ +struct wl_nan_sub_cmd { + char *name; + uint8 version; /* cmd version */ + uint16 id; /* id for the dongle f/w switch/case */ + uint16 type; /* base type of argument */ + cmd_handler_t *handler; /* cmd handler */ +}; + +/* container for nan iovtls & events */ +typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc { + uint16 version; /* interface command or event version */ + uint16 id; /* nan ioctl cmd ID */ + uint16 len; /* total length of all tlv records in data[] */ + uint16 pad; /* pad to be 32 bit aligment */ + uint8 data [1]; /* var len payload of bcm_xtlv_t type */ +} BWL_POST_PACKED_STRUCT wl_nan_ioc_t; + +typedef struct wl_nan_status { + uint8 inited; + uint8 joined; + uint8 role; + uint8 hop_count; + uint32 chspec; + uint8 amr[8]; /* Anchor Master Rank */ + uint32 cnt_pend_txfrm; /* pending TX frames */ + uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ + struct ether_addr cid; + uint32 chspec_5g; +} wl_nan_status_t; + +typedef struct wl_nan_count { + uint32 cnt_bcn_tx; /* TX disc/sync beacon count */ + uint32 cnt_bcn_rx; /* RX disc/sync beacon count */ + uint32 cnt_svc_disc_tx; /* TX svc disc frame count */ + uint32 cnt_svc_disc_rx; /* RX svc disc frame count */ +} wl_nan_count_t; + +/* various params and ctl swithce for nan_debug instance */ +typedef struct nan_debug_params { + uint8 enabled; /* runtime debuging enabled */ + uint8 collect; /* enables debug svc sdf monitor mode */ + uint16 cmd; /* debug cmd to perform a debug action */ + uint32 msglevel; /* msg level if enabled */ + uint16 status; +} nan_debug_params_t; + +/* time slot */ +#define NAN_MAX_TIMESLOT 32 +typedef struct nan_timeslot { + uint32 abitmap; /* available bitmap */ + uint32 chanlist[NAN_MAX_TIMESLOT]; +} nan_timeslot_t; + +/* nan passive scan params */ +#define NAN_SCAN_MAX_CHCNT 8 +typedef struct nan_scan_params { + uint16 scan_time; + uint16 home_time; + uint16 ms_intvl; /* interval between merge scan */ + uint16 ms_dur; /* duration of merge scan */ + uint16 chspec_num; + uint8 pad[2]; + chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */ +} nan_scan_params_t; + +enum wl_nan_role { + WL_NAN_ROLE_AUTO = 0, + WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1, + WL_NAN_ROLE_NON_MASTER_SYNC = 2, + WL_NAN_ROLE_MASTER = 3, + WL_NAN_ROLE_ANCHOR_MASTER = 4 +}; +#define NAN_MASTER_RANK_LEN 8 +/* nan cmd IDs */ +enum wl_nan_cmds { + /* nan cfg /disc & dbg ioctls */ + WL_NAN_CMD_ENABLE = 1, + WL_NAN_CMD_ATTR = 2, + WL_NAN_CMD_NAN_JOIN = 3, + WL_NAN_CMD_LEAVE = 4, + WL_NAN_CMD_MERGE = 5, + WL_NAN_CMD_STATUS = 6, + WL_NAN_CMD_TSRESERVE = 7, + WL_NAN_CMD_TSSCHEDULE = 8, + WL_NAN_CMD_TSRELEASE = 9, + WL_NAN_CMD_OUI = 10, + + WL_NAN_CMD_COUNT = 15, + WL_NAN_CMD_CLEARCOUNT = 16, + + /* discovery engine commands */ + WL_NAN_CMD_PUBLISH = 20, + WL_NAN_CMD_SUBSCRIBE = 21, + WL_NAN_CMD_CANCEL_PUBLISH = 22, + WL_NAN_CMD_CANCEL_SUBSCRIBE = 23, + WL_NAN_CMD_TRANSMIT = 24, + WL_NAN_CMD_CONNECTION = 25, + WL_NAN_CMD_SHOW = 26, + WL_NAN_CMD_STOP = 27, /* stop nan for a given cluster ID */ + /* nan debug iovars & cmds */ + WL_NAN_CMD_SCAN_PARAMS = 46, + WL_NAN_CMD_SCAN = 47, + WL_NAN_CMD_SCAN_RESULTS = 48, + WL_NAN_CMD_EVENT_MASK = 49, + WL_NAN_CMD_EVENT_CHECK = 50, + WL_NAN_CMD_DUMP = 51, + WL_NAN_CMD_CLEAR = 52, + WL_NAN_CMD_RSSI = 53, + + WL_NAN_CMD_DEBUG = 60, + WL_NAN_CMD_TEST1 = 61, + WL_NAN_CMD_TEST2 = 62, + WL_NAN_CMD_TEST3 = 63, + WL_NAN_CMD_DISC_RESULTS = 64 +}; + +/* + * tlv IDs uniquely identifies cmd parameters + * packed into wl_nan_ioc_t container + */ +enum wl_nan_cmd_xtlv_id { + /* 0x00 ~ 0xFF: standard TLV ID whose data format is the same as NAN attribute TLV */ + WL_NAN_XTLV_ZERO = 0, /* used as tlv buf end marker */ +#ifdef NAN_STD_TLV /* rfu, don't use yet */ + WL_NAN_XTLV_MASTER_IND = 1, /* == NAN_ATTR_MASTER_IND, */ + WL_NAN_XTLV_CLUSTER = 2, /* == NAN_ATTR_CLUSTER, */ + WL_NAN_XTLV_VENDOR = 221, /* == NAN_ATTR_VENDOR, */ +#endif + /* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */ + /* 0x100 ~ : private TLV ID defined just for NAN command */ + /* common types */ + WL_NAN_XTLV_MAC_ADDR = 0x102, /* used in various cmds */ + WL_NAN_XTLV_REASON = 0x103, + WL_NAN_XTLV_ENABLED = 0x104, + /* explicit types, primarily for discovery engine iovars */ + WL_NAN_XTLV_SVC_PARAMS = 0x120, /* Contains required params: wl_nan_disc_params_t */ + WL_NAN_XTLV_MATCH_RX = 0x121, /* Matching filter to evaluate on receive */ + WL_NAN_XTLV_MATCH_TX = 0x122, /* Matching filter to send */ + WL_NAN_XTLV_SVC_INFO = 0x123, /* Service specific info */ + WL_NAN_XTLV_SVC_NAME = 0x124, /* Optional UTF-8 service name, for debugging. */ + WL_NAN_XTLV_INSTANCE_ID = 0x125, /* Identifies unique publish or subscribe instance */ + WL_NAN_XTLV_PRIORITY = 0x126, /* used in transmit cmd context */ + WL_NAN_XTLV_REQUESTOR_ID = 0x127, /* Requestor instance ID */ + WL_NAN_XTLV_VNDR = 0x128, /* Vendor specific attribute */ + WL_NAN_XTLV_SR_FILTER = 0x129, /* Service Response Filter */ + WL_NAN_XTLV_FOLLOWUP = 0x130, /* Service Info for Follow-Up SDF */ + WL_NAN_XTLV_PEER_INSTANCE_ID = 0x131, /* Used to parse remote instance Id */ + /* explicit types, primarily for NAN MAC iovars */ + WL_NAN_XTLV_DW_LEN = 0x140, /* discovery win length */ + WL_NAN_XTLV_BCN_INTERVAL = 0x141, /* beacon interval, both sync and descovery bcns? */ + WL_NAN_XTLV_CLUSTER_ID = 0x142, + WL_NAN_XTLV_IF_ADDR = 0x143, + WL_NAN_XTLV_MC_ADDR = 0x144, + WL_NAN_XTLV_ROLE = 0x145, + WL_NAN_XTLV_START = 0x146, + + WL_NAN_XTLV_MASTER_PREF = 0x147, + WL_NAN_XTLV_DW_INTERVAL = 0x148, + WL_NAN_XTLV_PTBTT_OVERRIDE = 0x149, + /* nan status command xtlvs */ + WL_NAN_XTLV_MAC_INITED = 0x14a, + WL_NAN_XTLV_MAC_ENABLED = 0x14b, + WL_NAN_XTLV_MAC_CHANSPEC = 0x14c, + WL_NAN_XTLV_MAC_AMR = 0x14d, /* anchormaster rank u8 amr[8] */ + WL_NAN_XTLV_MAC_HOPCNT = 0x14e, + WL_NAN_XTLV_MAC_AMBTT = 0x14f, + WL_NAN_XTLV_MAC_TXRATE = 0x150, + WL_NAN_XTLV_MAC_STATUS = 0x151, /* xtlv payload is nan_status_t */ + WL_NAN_XTLV_NAN_SCANPARAMS = 0x152, /* payload is nan_scan_params_t */ + WL_NAN_XTLV_DEBUGPARAMS = 0x153, /* payload is nan_scan_params_t */ + WL_NAN_XTLV_SUBSCR_ID = 0x154, /* subscriber id */ + WL_NAN_XTLV_PUBLR_ID = 0x155, /* publisher id */ + WL_NAN_XTLV_EVENT_MASK = 0x156, + WL_NAN_XTLV_MASTER_RANK = 0x158, + WL_NAN_XTLV_WARM_UP_TIME = 0x159, + WL_NAN_XTLV_PM_OPTION = 0x15a, + WL_NAN_XTLV_OUI = 0x15b, /* NAN OUI */ + WL_NAN_XTLV_MAC_COUNT = 0x15c, /* xtlv payload is nan_count_t */ + /* nan timeslot management */ + WL_NAN_XTLV_TSRESERVE = 0x160, + WL_NAN_XTLV_TSRELEASE = 0x161, + WL_NAN_XTLV_IDLE_DW_TIMEOUT = 0x162, + WL_NAN_XTLV_IDLE_DW_LEN = 0x163, + WL_NAN_XTLV_RND_FACTOR = 0x164, + WL_NAN_XTLV_SVC_DISC_TXTIME = 0x165, /* svc disc frame tx time in DW */ + WL_NAN_XTLV_OPERATING_BAND = 0x166, + WL_NAN_XTLV_STOP_BCN_TX = 0x167, + WL_NAN_XTLV_CONCUR_SCAN = 0x168, + WL_NAN_XTLV_DUMP_CLR_TYPE = 0x175, /* wl nan dump/clear subtype */ + WL_NAN_XTLV_PEER_RSSI = 0x176, /* xtlv payload for wl nan dump rssi */ + WL_NAN_XTLV_MAC_CHANSPEC_1 = 0x17A, /* to get chanspec[1] */ + WL_NAN_XTLV_DISC_RESULTS = 0x17B, /* get disc results */ + WL_NAN_XTLV_MAC_STATS = 0x17C /* xtlv payload for wl nan dump stats */ +}; + +/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */ +#define WL_NAN_RANGE_LIMITED 0x0040 +/* Bits specific to Publish */ +/* Unsolicited transmissions */ +#define WL_NAN_PUB_UNSOLICIT 0x1000 +/* Solicited transmissions */ +#define WL_NAN_PUB_SOLICIT 0x2000 +#define WL_NAN_PUB_BOTH 0x3000 +/* Set for broadcast solicited transmission + * Do not set for unicast solicited transmission + */ +#define WL_NAN_PUB_BCAST 0x4000 +/* Generate event on each solicited transmission */ +#define WL_NAN_PUB_EVENT 0x8000 +/* Used for one-time solicited Publish functions to indicate transmision occurred */ +#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 +/* Follow-up frames */ +#define WL_NAN_FOLLOWUP 0x20000 +/* Bits specific to Subscribe */ +/* Active subscribe mode (Leave unset for passive) */ +#define WL_NAN_SUB_ACTIVE 0x1000 + +/* Special values for time to live (ttl) parameter */ +#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF +/* Publish - runs until first transmission + * Subscribe - runs until first DiscoveryResult event + */ +#define WL_NAN_TTL_FIRST 0 + +/* The service hash (service id) is exactly this many bytes. */ +#define WL_NAN_SVC_HASH_LEN 6 + +/* Number of hash functions per bloom filter */ +#define WL_NAN_HASHES_PER_BLOOM 4 + +/* Instance ID type (unique identifier) */ +typedef uint8 wl_nan_instance_id_t; + +/* no. of max last disc results */ +#define WL_NAN_MAX_DISC_RESULTS 3 + +/** Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */ +typedef struct wl_nan_disc_params_s { + /* Periodicity of unsolicited/query transmissions, in DWs */ + uint32 period; + /* Time to live in DWs */ + uint32 ttl; + /* Flag bits */ + uint32 flags; + /* Publish or subscribe service id, i.e. hash of the service name */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; + /* pad to make 4 byte alignment, can be used for something else in the future */ + uint8 pad; + /* Publish or subscribe id */ + wl_nan_instance_id_t instance_id; +} wl_nan_disc_params_t; + +/* recent discovery results */ +typedef struct wl_nan_disc_result_s +{ + wl_nan_instance_id_t instance_id; /* instance id of pub/sub req */ + wl_nan_instance_id_t peer_instance_id; /* peer instance id of pub/sub req/resp */ + uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service descp string */ + struct ether_addr peer_mac; /* peer mac address */ +} wl_nan_disc_result_t; + +/* list of recent discovery results */ +typedef struct wl_nan_disc_results_s +{ + wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS]; +} wl_nan_disc_results_list_t; + +/* +* desovery interface event structures * +*/ + +/* NAN Ranging */ + +/* Bit defines for global flags */ +#define WL_NAN_RANGING_ENABLE 1 /* enable RTT */ +#define WL_NAN_RANGING_RANGED 2 /* Report to host if ranged as target */ +typedef struct nan_ranging_config { + uint32 chanspec; /* Ranging chanspec */ + uint16 timeslot; /* NAN RTT start time slot 1-511 */ + uint16 duration; /* NAN RTT duration in ms */ + struct ether_addr allow_mac; /* peer initiated ranging: the allowed peer mac + * address, a unicast (for one peer) or + * a broadcast for all. Setting it to all zeros + * means responding to none,same as not setting + * the flag bit NAN_RANGING_RESPOND + */ + uint16 flags; +} wl_nan_ranging_config_t; + +/* list of peers for self initiated ranging */ +/* Bit defines for per peer flags */ +#define WL_NAN_RANGING_REPORT (1<<0) /* Enable reporting range to target */ +typedef struct nan_ranging_peer { + uint32 chanspec; /* desired chanspec for this peer */ + uint32 abitmap; /* available bitmap */ + struct ether_addr ea; /* peer MAC address */ + uint8 frmcnt; /* frame count */ + uint8 retrycnt; /* retry count */ + uint16 flags; /* per peer flags, report or not */ +} wl_nan_ranging_peer_t; +typedef struct nan_ranging_list { + uint8 count; /* number of MAC addresses */ + uint8 num_peers_done; /* host set to 0, when read, shows number of peers + * completed, success or fail + */ + uint8 num_dws; /* time period to do the ranging, specified in dws */ + uint8 reserve; /* reserved field */ + wl_nan_ranging_peer_t rp[1]; /* variable length array of peers */ +} wl_nan_ranging_list_t; + +/* ranging results, a list for self initiated ranging and one for peer initiated ranging */ +/* There will be one structure for each peer */ +#define WL_NAN_RANGING_STATUS_SUCCESS 1 +#define WL_NAN_RANGING_STATUS_FAIL 2 +#define WL_NAN_RANGING_STATUS_TIMEOUT 3 +#define WL_NAN_RANGING_STATUS_ABORT 4 /* with partial results if sounding count > 0 */ +typedef struct nan_ranging_result { + uint8 status; /* 1: Success, 2: Fail 3: Timeout 4: Aborted */ + uint8 sounding_count; /* number of measurements completed (0 = failure) */ + struct ether_addr ea; /* initiator MAC address */ + uint32 chanspec; /* Chanspec where the ranging was done */ + uint32 timestamp; /* 32bits of the TSF timestamp ranging was completed at */ + uint32 distance; /* mean distance in meters expressed as Q4 number. + * Only valid when sounding_count > 0. Examples: + * 0x08 = 0.5m + * 0x10 = 1m + * 0x18 = 1.5m + * set to 0xffffffff to indicate invalid number + */ + int32 rtt_var; /* standard deviation in 10th of ns of RTTs measured. + * Only valid when sounding_count > 0 + */ + struct ether_addr tgtea; /* target MAC address */ +} wl_nan_ranging_result_t; +typedef struct nan_ranging_event_data { + uint8 mode; /* 1: Result of host initiated ranging */ + /* 2: Result of peer initiated ranging */ + uint8 reserved; + uint8 success_count; /* number of peers completed successfully */ + uint8 count; /* number of peers in the list */ + wl_nan_ranging_result_t rr[1]; /* variable array of ranging peers */ +} wl_nan_ranging_event_data_t; +enum { + WL_NAN_RSSI_DATA = 1, + WL_NAN_STATS_DATA = 2, +/* + * ***** ADD before this line **** + */ + WL_NAN_INVALID +}; + +typedef struct wl_nan_stats { + /* general */ + uint32 cnt_dw; /* DW slots */ + uint32 cnt_disc_bcn_sch; /* disc beacon slots */ + uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */ + uint32 cnt_bcn_upd; /* count of beacon template updates */ + uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */ + uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */ + uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */ + uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */ + uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */ + uint32 cnt_sdftx_uc; /* count of unicast sdf tx */ + uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */ + uint32 cnt_sdf_rx; /* count of sdf rx */ + /* NAN roles */ + uint32 cnt_am; /* anchor master */ + uint32 cnt_master; /* master */ + uint32 cnt_nms; /* non master sync */ + uint32 cnt_nmns; /* non master non sync */ + /* TX */ + uint32 cnt_err_txtime; /* error in txtime */ + uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */ + uint32 cnt_err_bcn_tx; /* beacon tx error */ + uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive + * sync beacons is more than dw interval + */ + /* SCANS */ + uint32 cnt_mrg_scan; /* count of merge scans completed */ + uint32 cnt_err_ms_rej; /* number of merge scan failed */ + uint32 cnt_scan_results; /* no. of nan beacons scanned */ + uint32 cnt_join_scan_rej; /* no. of join scans rejected */ + uint32 cnt_nan_scan_abort; /* no. of join scans rejected */ + /* enable/disable */ + uint32 cnt_nan_enab; /* no. of times nan feature got enabled */ + uint32 cnt_nan_disab; /* no. of times nan feature got disabled */ +} wl_nan_stats_t; + +#define WL_NAN_MAC_MAX_NAN_PEERS 6 +#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER 10 + +typedef struct wl_nan_nbr_rssi { + uint8 rx_chan; /* channel number on which bcn rcvd */ + int rssi_raw; /* received rssi value */ + int rssi_avg; /* normalized rssi value */ +} wl_nan_peer_rssi_t; + +typedef struct wl_nan_peer_rssi_entry { + struct ether_addr mac; /* peer mac address */ + uint8 flags; /* TODO:rssi data order: latest first, oldest first etc */ + uint8 rssi_cnt; /* rssi data sample present */ + wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */ +} wl_nan_peer_rssi_entry_t; + +#define WL_NAN_PEER_RSSI 0x1 +#define WL_NAN_PEER_RSSI_LIST 0x2 + +typedef struct wl_nan_nbr_rssi_data { + uint8 flags; /* this is a list or single rssi data */ + uint8 peer_cnt; /* number of peers */ + uint16 pad; /* padding */ + wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */ +} wl_nan_peer_rssi_data_t; + +/* ********************* end of NAN section ******************************** */ +#endif /* WL_NAN */ + + +#define RSSI_THRESHOLD_SIZE 16 +#define MAX_IMP_RESP_SIZE 256 + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias { + int32 version; /* version */ + int32 threshold[RSSI_THRESHOLD_SIZE]; /* threshold */ + int32 peak_offset; /* peak offset */ + int32 bias; /* rssi bias */ + int32 gd_delta; /* GD - GD_ADJ */ + int32 imp_resp[MAX_IMP_RESP_SIZE]; /* (Hi*Hi)+(Hr*Hr) */ +} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias_avg { + int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /* avg threshold */ + int32 avg_peak_offset; /* avg peak offset */ + int32 avg_rssi; /* avg rssi */ + int32 avg_bias; /* avg bias */ +} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_avg_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info { + uint16 type; /* type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */ + uint16 index; /* The current frame index, from 1 to total_frames. */ + uint16 tof_cmd; /* M_TOF_CMD */ + uint16 tof_rsp; /* M_TOF_RSP */ + uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */ + uint16 tof_id; /* M_TOF_ID */ + uint8 tof_frame_type; + uint8 tof_frame_bw; + int8 tof_rssi; + int32 tof_cfo; + int32 gd_adj_ns; /* gound delay */ + int32 gd_h_adj_ns; /* group delay + threshold crossing */ +#ifdef RSSI_REFINE + wl_proxd_rssi_bias_t rssi_bias; /* RSSI refinement info */ +#endif + int16 nfft; /* number of samples stored in H */ + +} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t; + +#define k_tof_collect_H_pad 1 +#define k_tof_collect_H_size (256+16+k_tof_collect_H_pad) +#define k_tof_collect_Hraw_size (2*k_tof_collect_H_size) +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data { + wl_proxd_collect_info_t info; + uint32 H[k_tof_collect_H_size]; /* raw data read from phy used to adjust timestamps */ + +} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t; + +typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_debug_data { + uint8 count; /* number of packets */ + uint8 stage; /* state machone stage */ + uint8 received; /* received or txed */ + uint8 paket_type; /* packet type */ + uint8 category; /* category field */ + uint8 action; /* action field */ + uint8 token; /* token number */ + uint8 follow_token; /* following token number */ + uint16 index; /* index of the packet */ + uint16 tof_cmd; /* M_TOF_CMD */ + uint16 tof_rsp; /* M_TOF_RSP */ + uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */ + uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */ + uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */ + uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */ + uint16 tof_id; /* M_TOF_ID */ + uint16 tof_status0; /* M_TOF_STATUS_0 */ + uint16 tof_status2; /* M_TOF_STATUS_2 */ + uint16 tof_chsm0; /* M_TOF_CHNSM_0 */ + uint16 tof_phyctl0; /* M_TOF_PHYCTL0 */ + uint16 tof_phyctl1; /* M_TOF_PHYCTL1 */ + uint16 tof_phyctl2; /* M_TOF_PHYCTL2 */ + uint16 tof_lsig; /* M_TOF_LSIG */ + uint16 tof_vhta0; /* M_TOF_VHTA0 */ + uint16 tof_vhta1; /* M_TOF_VHTA1 */ + uint16 tof_vhta2; /* M_TOF_VHTA2 */ + uint16 tof_vhtb0; /* M_TOF_VHTB0 */ + uint16 tof_vhtb1; /* M_TOF_VHTB1 */ + uint16 tof_apmductl; /* M_TOF_AMPDU_CTL */ + uint16 tof_apmdudlim; /* M_TOF_AMPDU_DLIM */ + uint16 tof_apmdulen; /* M_TOF_AMPDU_LEN */ +} BWL_POST_PACKED_STRUCT wl_proxd_debug_data_t; + +/* version of the wl_wsec_info structure */ +#define WL_WSEC_INFO_VERSION 0x01 + +/* start enum value for BSS properties */ +#define WL_WSEC_INFO_BSS_BASE 0x0100 + +/* size of len and type fields of wl_wsec_info_tlv_t struct */ +#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data) + +/* Allowed wl_wsec_info properties; not all of them may be supported. */ +typedef enum { + WL_WSEC_INFO_NONE = 0, + WL_WSEC_INFO_MAX_KEYS = 1, + WL_WSEC_INFO_NUM_KEYS = 2, + WL_WSEC_INFO_NUM_HW_KEYS = 3, + WL_WSEC_INFO_MAX_KEY_IDX = 4, + WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5, + WL_WSEC_INFO_SUPPORTED_ALGOS = 6, + WL_WSEC_INFO_MAX_KEY_LEN = 7, + WL_WSEC_INFO_FLAGS = 8, + /* add global/per-wlc properties above */ + WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1), + WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2), + WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3), + WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4), + WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5), + /* add per-BSS properties above */ + WL_WSEC_INFO_MAX = 0xffff +} wl_wsec_info_type_t; + +/* tlv used to return wl_wsec_info properties */ +typedef struct { + uint16 type; + uint16 len; /* data length */ + uint8 data[1]; /* data follows */ +} wl_wsec_info_tlv_t; + +/* input/output data type for wsec_info iovar */ +typedef struct wl_wsec_info { + uint8 version; /* structure version */ + uint8 pad[2]; + uint8 num_tlvs; + wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */ +} wl_wsec_info_t; + +/* + * scan MAC definitions + */ + +/* common iovar struct */ +typedef struct wl_scanmac { + uint16 subcmd_id; /* subcommand id */ + uint16 len; /* total length of data[] */ + uint8 data[1]; /* subcommand data */ +} wl_scanmac_t; + +/* subcommand ids */ +#define WL_SCANMAC_SUBCMD_ENABLE 0 +#define WL_SCANMAC_SUBCMD_BSSCFG 1 /* only GET supported */ +#define WL_SCANMAC_SUBCMD_CONFIG 2 + +/* scanmac enable data struct */ +typedef struct wl_scanmac_enable { + uint8 enable; /* 1 - enable, 0 - disable */ + uint8 pad[3]; /* 4-byte struct alignment */ +} wl_scanmac_enable_t; + +/* scanmac bsscfg data struct */ +typedef struct wl_scanmac_bsscfg { + uint32 bsscfg; /* bsscfg index */ +} wl_scanmac_bsscfg_t; + +/* scanmac config data struct */ +typedef struct wl_scanmac_config { + struct ether_addr mac; /* 6 bytes of MAC address or MAC prefix (i.e. OUI) */ + struct ether_addr random_mask; /* randomized bits on each scan */ + uint16 scan_bitmap; /* scans to use this MAC address */ + uint8 pad[2]; /* 4-byte struct alignment */ +} wl_scanmac_config_t; + +/* scan bitmap */ +#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /* unassociated scans */ +#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /* associated roam scans */ +#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /* associated PNO scans */ +#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /* associated host scans */ + +/* no default structure packing */ +#include + +enum rssi_reason { + RSSI_REASON_UNKNOW = 0, + RSSI_REASON_LOWRSSI = 1, + RSSI_REASON_NSYC = 2, + RSSI_REASON_TIMEOUT = 3 +}; + +enum tof_reason { + TOF_REASON_OK = 0, + TOF_REASON_REQEND = 1, + TOF_REASON_TIMEOUT = 2, + TOF_REASON_NOACK = 3, + TOF_REASON_INVALIDAVB = 4, + TOF_REASON_INITIAL = 5, + TOF_REASON_ABORT = 6 +}; + +enum rssi_state { + RSSI_STATE_POLL = 0, + RSSI_STATE_TPAIRING = 1, + RSSI_STATE_IPAIRING = 2, + RSSI_STATE_THANDSHAKE = 3, + RSSI_STATE_IHANDSHAKE = 4, + RSSI_STATE_CONFIRMED = 5, + RSSI_STATE_PIPELINE = 6, + RSSI_STATE_NEGMODE = 7, + RSSI_STATE_MONITOR = 8, + RSSI_STATE_LAST = 9 +}; + +enum tof_state { + TOF_STATE_IDLE = 0, + TOF_STATE_IWAITM = 1, + TOF_STATE_TWAITM = 2, + TOF_STATE_ILEGACY = 3, + TOF_STATE_IWAITCL = 4, + TOF_STATE_TWAITCL = 5, + TOF_STATE_ICONFIRM = 6, + TOF_STATE_IREPORT = 7 +}; + +enum tof_mode_type { + TOF_LEGACY_UNKNOWN = 0, + TOF_LEGACY_AP = 1, + TOF_NONLEGACY_AP = 2 +}; + +enum tof_way_type { + TOF_TYPE_ONE_WAY = 0, + TOF_TYPE_TWO_WAY = 1, + TOF_TYPE_REPORT = 2 +}; + +enum tof_rate_type { + TOF_FRAME_RATE_VHT = 0, + TOF_FRAME_RATE_LEGACY = 1 +}; + +#define TOF_ADJ_TYPE_NUM 4 /* number of assisted timestamp adjustment */ +enum tof_adj_mode { + TOF_ADJ_SOFTWARE = 0, + TOF_ADJ_HARDWARE = 1, + TOF_ADJ_SEQ = 2, + TOF_ADJ_NONE = 3 +}; + +#define FRAME_TYPE_NUM 4 /* number of frame type */ +enum frame_type { + FRAME_TYPE_CCK = 0, + FRAME_TYPE_OFDM = 1, + FRAME_TYPE_11N = 2, + FRAME_TYPE_11AC = 3 +}; + +typedef struct wl_proxd_status_iovar { + uint16 method; /* method */ + uint8 mode; /* mode */ + uint8 peermode; /* peer mode */ + uint8 state; /* state */ + uint8 reason; /* reason code */ + uint32 distance; /* distance */ + uint32 txcnt; /* tx pkt counter */ + uint32 rxcnt; /* rx pkt counter */ + struct ether_addr peer; /* peer mac address */ + int8 avg_rssi; /* average rssi */ + int8 hi_rssi; /* highest rssi */ + int8 low_rssi; /* lowest rssi */ + uint32 dbgstatus; /* debug status */ + uint16 frame_type_cnt[FRAME_TYPE_NUM]; /* frame types */ + uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /* adj types HW/SW */ +} wl_proxd_status_iovar_t; + +#ifdef NET_DETECT +typedef struct net_detect_adapter_features { + bool wowl_enabled; + bool net_detect_enabled; + bool nlo_enabled; +} net_detect_adapter_features_t; + +typedef enum net_detect_bss_type { + nd_bss_any = 0, + nd_ibss, + nd_ess +} net_detect_bss_type_t; + +typedef struct net_detect_profile { + wlc_ssid_t ssid; + net_detect_bss_type_t bss_type; /* Ignore for now since Phase 1 is only for ESS */ + uint32 cipher_type; /* DOT11_CIPHER_ALGORITHM enumeration values */ + uint32 auth_type; /* DOT11_AUTH_ALGORITHM enumeration values */ +} net_detect_profile_t; + +typedef struct net_detect_profile_list { + uint32 num_nd_profiles; + net_detect_profile_t nd_profile[0]; +} net_detect_profile_list_t; + +typedef struct net_detect_config { + bool nd_enabled; + uint32 scan_interval; + uint32 wait_period; + bool wake_if_connected; + bool wake_if_disconnected; + net_detect_profile_list_t nd_profile_list; +} net_detect_config_t; + +typedef enum net_detect_wake_reason { + nd_reason_unknown, + nd_net_detected, + nd_wowl_event, + nd_ucode_error +} net_detect_wake_reason_t; + +typedef struct net_detect_wake_data { + net_detect_wake_reason_t nd_wake_reason; + uint32 nd_wake_date_length; + uint8 nd_wake_data[0]; /* Wake data (currently unused) */ +} net_detect_wake_data_t; + +#endif /* NET_DETECT */ + +/* (unversioned, deprecated) */ +typedef struct bcnreq { + uint8 bcn_mode; + int dur; + int channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; +} bcnreq_t; + +#define WL_RRM_BCN_REQ_VER 1 +typedef struct bcn_req { + uint8 version; + uint8 bcn_mode; + uint8 pad_1[2]; + int32 dur; + int32 channel; + struct ether_addr da; + uint16 random_int; + wlc_ssid_t ssid; + uint16 reps; + uint8 req_elements; + uint8 pad_2; + chanspec_list_t chspec_list; +} bcn_req_t; + +typedef struct rrmreq { + struct ether_addr da; + uint8 reg; + uint8 chan; + uint16 random_int; + uint16 dur; + uint16 reps; +} rrmreq_t; + +typedef struct framereq { + struct ether_addr da; + uint8 reg; + uint8 chan; + uint16 random_int; + uint16 dur; + struct ether_addr ta; + uint16 reps; +} framereq_t; + +typedef struct statreq { + struct ether_addr da; + struct ether_addr peer; + uint16 random_int; + uint16 dur; + uint8 group_id; + uint16 reps; +} statreq_t; + +#define WL_RRM_RPT_VER 0 +#define WL_RRM_RPT_MAX_PAYLOAD 256 +#define WL_RRM_RPT_MIN_PAYLOAD 7 +#define WL_RRM_RPT_FALG_ERR 0 +#define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0) +#define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1) +typedef struct { + uint16 ver; /* version */ + struct ether_addr addr; /* STA MAC addr */ + uint32 timestamp; /* timestamp of the report */ + uint16 flag; /* flag */ + uint16 len; /* length of payload data */ + unsigned char data[WL_RRM_RPT_MAX_PAYLOAD]; +} statrpt_t; + +typedef struct wlc_l2keepalive_ol_params { + uint8 flags; + uint8 prio; + uint16 period_ms; +} wlc_l2keepalive_ol_params_t; + +typedef struct wlc_dwds_config { + uint32 enable; + uint32 mode; /* STA/AP interface */ + struct ether_addr ea; +} wlc_dwds_config_t; + +typedef struct wl_el_set_params_s { + uint8 set; /* Set number */ + uint32 size; /* Size to make/expand */ +} wl_el_set_params_t; + +typedef struct wl_el_tag_params_s { + uint16 tag; + uint8 set; + uint8 flags; +} wl_el_tag_params_t; + +/* Video Traffic Interference Monitor config */ +#define INTFER_VERSION 1 +typedef struct wl_intfer_params { + uint16 version; /* version */ + uint8 period; /* sample period */ + uint8 cnt; /* sample cnt */ + uint8 txfail_thresh; /* non-TCP txfail threshold */ + uint8 tcptxfail_thresh; /* tcptxfail threshold */ +} wl_intfer_params_t; + +typedef struct wl_staprio_cfg { + struct ether_addr ea; /* mac addr */ + uint8 prio; /* scb priority */ +} wl_staprio_cfg_t; + +typedef enum wl_stamon_cfg_cmd_type { + STAMON_CFG_CMD_DEL = 0, + STAMON_CFG_CMD_ADD = 1 +} wl_stamon_cfg_cmd_type_t; + +typedef struct wlc_stamon_sta_config { + wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */ + struct ether_addr ea; +} wlc_stamon_sta_config_t; + +#ifdef SR_DEBUG +typedef struct /* pmu_reg */{ + uint32 pmu_control; + uint32 pmu_capabilities; + uint32 pmu_status; + uint32 res_state; + uint32 res_pending; + uint32 pmu_timer1; + uint32 min_res_mask; + uint32 max_res_mask; + uint32 pmu_chipcontrol1[4]; + uint32 pmu_regcontrol[5]; + uint32 pmu_pllcontrol[5]; + uint32 pmu_rsrc_up_down_timer[31]; + uint32 rsrc_dep_mask[31]; +} pmu_reg_t; +#endif /* pmu_reg */ + +typedef struct wl_taf_define { + struct ether_addr ea; /* STA MAC or 0xFF... */ + uint16 version; /* version */ + uint32 sch; /* method index */ + uint32 prio; /* priority */ + uint32 misc; /* used for return value */ + char text[1]; /* used to pass and return ascii text */ +} wl_taf_define_t; + +/* Received Beacons lengths information */ +#define WL_LAST_BCNS_INFO_FIXED_LEN OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring) +typedef struct wlc_bcn_len_hist { + uint16 ver; /* version field */ + uint16 cur_index; /* current pointed index in ring buffer */ + uint32 max_bcnlen; /* Max beacon length received */ + uint32 min_bcnlen; /* Min beacon length received */ + uint32 ringbuff_len; /* Length of the ring buffer 'bcnlen_ring' */ + uint32 bcnlen_ring[1]; /* ring buffer storing received beacon lengths */ +} wlc_bcn_len_hist_t; + +/* WDS net interface types */ +#define WL_WDSIFTYPE_NONE 0x0 /* The interface type is neither WDS nor DWDS. */ +#define WL_WDSIFTYPE_WDS 0x1 /* The interface is WDS type. */ +#define WL_WDSIFTYPE_DWDS 0x2 /* The interface is DWDS type. */ + +typedef struct wl_bssload_static { + bool is_static; + uint16 sta_count; + uint8 chan_util; + uint16 aac; +} wl_bssload_static_t; + + +/* IO Var Operations - the Value of iov_op In wlc_ap_doiovar */ +typedef enum wlc_ap_iov_operation { + WLC_AP_IOV_OP_DELETE = -1, + WLC_AP_IOV_OP_DISABLE = 0, + WLC_AP_IOV_OP_ENABLE = 1, + WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2, + WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3, + WLC_AP_IOV_OP_MOVE = 4 +} wlc_ap_iov_oper_t; + +/* LTE coex info */ +/* Analogue of HCI Set MWS Signaling cmd */ +typedef struct { + uint16 mws_rx_assert_offset; + uint16 mws_rx_assert_jitter; + uint16 mws_rx_deassert_offset; + uint16 mws_rx_deassert_jitter; + uint16 mws_tx_assert_offset; + uint16 mws_tx_assert_jitter; + uint16 mws_tx_deassert_offset; + uint16 mws_tx_deassert_jitter; + uint16 mws_pattern_assert_offset; + uint16 mws_pattern_assert_jitter; + uint16 mws_inact_dur_assert_offset; + uint16 mws_inact_dur_assert_jitter; + uint16 mws_scan_freq_assert_offset; + uint16 mws_scan_freq_assert_jitter; + uint16 mws_prio_assert_offset_req; +} wci2_config_t; + +/* Analogue of HCI MWS Channel Params */ +typedef struct { + uint16 mws_rx_center_freq; /* MHz */ + uint16 mws_tx_center_freq; + uint16 mws_rx_channel_bw; /* KHz */ + uint16 mws_tx_channel_bw; + uint8 mws_channel_en; + uint8 mws_channel_type; /* Don't care for WLAN? */ +} mws_params_t; + +/* MWS wci2 message */ +typedef struct { + uint8 mws_wci2_data; /* BT-SIG msg */ + uint16 mws_wci2_interval; /* Interval in us */ + uint16 mws_wci2_repeat; /* No of msgs to send */ +} mws_wci2_msg_t; + +typedef struct { + uint32 config; /* MODE: AUTO (-1), Disable (0), Enable (1) */ + uint32 status; /* Current state: Disabled (0), Enabled (1) */ +} wl_config_t; + +#define WLC_RSDB_MODE_AUTO_MASK 0x80 +#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK)))) + +#define WL_IF_STATS_T_VERSION 1 /* current version of wl_if_stats structure */ + +/* per interface counters */ +typedef struct wl_if_stats { + uint16 version; /* version of the structure */ + uint16 length; /* length of the entire structure */ + uint32 PAD; /* padding */ + + /* transmit stat counters */ + uint64 txframe; /* tx data frames */ + uint64 txbyte; /* tx data bytes */ + uint64 txerror; /* tx data errors (derived: sum of others) */ + uint64 txnobuf; /* tx out of buffer errors */ + uint64 txrunt; /* tx runt frames */ + uint64 txfail; /* tx failed frames */ + uint64 txretry; /* tx retry frames */ + uint64 txretrie; /* tx multiple retry frames */ + uint64 txfrmsnt; /* tx sent frames */ + uint64 txmulti; /* tx mulitcast sent frames */ + uint64 txfrag; /* tx fragments sent */ + + /* receive stat counters */ + uint64 rxframe; /* rx data frames */ + uint64 rxbyte; /* rx data bytes */ + uint64 rxerror; /* rx data errors (derived: sum of others) */ + uint64 rxnobuf; /* rx out of buffer errors */ + uint64 rxrunt; /* rx runt frames */ + uint64 rxfragerr; /* rx fragment errors */ + uint64 rxmulti; /* rx multicast frames */ +} +wl_if_stats_t; + +typedef struct wl_band { + uint16 bandtype; /* WL_BAND_2G, WL_BAND_5G */ + uint16 bandunit; /* bandstate[] index */ + uint16 phytype; /* phytype */ + uint16 phyrev; +} +wl_band_t; + +#define WL_WLC_VERSION_T_VERSION 1 /* current version of wlc_version structure */ + +/* wlc interface version */ +typedef struct wl_wlc_version { + uint16 version; /* version of the structure */ + uint16 length; /* length of the entire structure */ + + /* epi version numbers */ + uint16 epi_ver_major; /* epi major version number */ + uint16 epi_ver_minor; /* epi minor version number */ + uint16 epi_rc_num; /* epi RC number */ + uint16 epi_incr_num; /* epi increment number */ + + /* wlc interface version numbers */ + uint16 wlc_ver_major; /* wlc interface major version number */ + uint16 wlc_ver_minor; /* wlc interface minor version number */ +} +wl_wlc_version_t; + +/* Version of WLC interface to be returned as a part of wl_wlc_version structure. + * For the discussion related to versions update policy refer to + * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlShimAbstractionLayer + * For now the policy is to increment WLC_VERSION_MAJOR each time + * there is a change that involves both WLC layer and per-port layer. + * WLC_VERSION_MINOR is currently not in use. + */ +#define WLC_VERSION_MAJOR 3 +#define WLC_VERSION_MINOR 0 + +/* begin proxd definitions */ +#include + +#define WL_PROXD_API_VERSION 0x0300 /* version 3.0 */ + +/* Minimum supported API version */ +#define WL_PROXD_API_MIN_VERSION 0x0300 + +/* proximity detection methods */ +enum { + WL_PROXD_METHOD_NONE = 0, + WL_PROXD_METHOD_RSVD1 = 1, /* backward compatibility - RSSI, not supported */ + WL_PROXD_METHOD_TOF = 2, + WL_PROXD_METHOD_RSVD2 = 3, /* 11v only - if needed */ + WL_PROXD_METHOD_FTM = 4, /* IEEE rev mc/2014 */ + WL_PROXD_METHOD_MAX +}; +typedef int16 wl_proxd_method_t; + +/* global and method configuration flags */ +enum { + WL_PROXD_FLAG_NONE = 0x00000000, + WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /* respond to requests */ + WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /* 11mc range requests enabled */ + WL_PROXD_FLAG_TX_LCI = 0x00000004, /* transmit location, if available */ + WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /* tx civic loc, if available */ + WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /* respond to requests w/o host action */ + WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /* continue requests w/o host action */ + WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /* publish availability */ + WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /* schedule using availability */ + WL_PROXD_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_flags_t; + +#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \ + WL_PROXD_FLAG_AVAIL_SCHEDULE) + +/* session flags */ +enum { + WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /* no flags */ + WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /* local device is initiator */ + WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /* local device is target */ + WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /* (initiated) 1-way rtt */ + WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /* created w/ rx_auto_burst */ + WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /* good until cancelled */ + WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /* rtt detail in results */ + WL_PROXD_SESSION_FLAG_TOF_COMPAT = 0x00000040, /* TOF compatibility - TBD */ + WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /* AOA along w/ RTT */ + WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /* Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /* Same as proxd flags above */ + WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /* Use NAN BSS, if applicable */ + WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /* e.g. FTM1 - cap or rx */ + WL_PROXD_SESSION_FLAG_REPORT_FAILURE= 0x00002000, /* report failure to target */ + WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /* report distance to target */ + WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, /* No channel switching */ + WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /* netrual mode */ + WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /* Toast */ + WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /* no param override from target */ + WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /* ASAP session */ + WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /* transmit LCI req */ + WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /* transmit civic loc req */ + WL_PROXD_SESSION_FLAG_COLLECT = 0x80000000, /* debug - collect */ + WL_PROXD_SESSION_FLAG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_session_flags_t; + +/* time units - mc supports up to 0.1ns resolution */ +enum { + WL_PROXD_TMU_TU = 0, /* 1024us */ + WL_PROXD_TMU_SEC = 1, + WL_PROXD_TMU_MILLI_SEC = 2, + WL_PROXD_TMU_MICRO_SEC = 3, + WL_PROXD_TMU_NANO_SEC = 4, + WL_PROXD_TMU_PICO_SEC = 5 +}; +typedef int16 wl_proxd_tmu_t; + +/* time interval e.g. 10ns */ +typedef struct wl_proxd_intvl { + uint32 intvl; + wl_proxd_tmu_t tmu; + uint8 pad[2]; +} wl_proxd_intvl_t; + +/* commands that can apply to proxd, method or a session */ +enum { + WL_PROXD_CMD_NONE = 0, + WL_PROXD_CMD_GET_VERSION = 1, + WL_PROXD_CMD_ENABLE = 2, + WL_PROXD_CMD_DISABLE = 3, + WL_PROXD_CMD_CONFIG = 4, + WL_PROXD_CMD_START_SESSION = 5, + WL_PROXD_CMD_BURST_REQUEST = 6, + WL_PROXD_CMD_STOP_SESSION = 7, + WL_PROXD_CMD_DELETE_SESSION = 8, + WL_PROXD_CMD_GET_RESULT = 9, + WL_PROXD_CMD_GET_INFO = 10, + WL_PROXD_CMD_GET_STATUS = 11, + WL_PROXD_CMD_GET_SESSIONS = 12, + WL_PROXD_CMD_GET_COUNTERS = 13, + WL_PROXD_CMD_CLEAR_COUNTERS = 14, + WL_PROXD_CMD_COLLECT = 15, + WL_PROXD_CMD_TUNE = 16, + WL_PROXD_CMD_DUMP = 17, + WL_PROXD_CMD_START_RANGING = 18, + WL_PROXD_CMD_STOP_RANGING = 19, + WL_PROXD_CMD_GET_RANGING_INFO = 20, + WL_PROXD_CMD_IS_TLV_SUPPORTED = 21, + + WL_PROXD_CMD_MAX +}; +typedef int16 wl_proxd_cmd_t; + +/* session ids: + * id 0 is reserved + * ids 1..0x7fff - allocated by host/app + * 0x8000-0xffff - allocated by firmware, used for auto/rx + */ +enum { + WL_PROXD_SESSION_ID_GLOBAL = 0 +}; + +#define WL_PROXD_SID_HOST_MAX 0x7fff +#define WL_PROXD_SID_HOST_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_HOST_MAX) + +/* maximum number sessions that can be allocated, may be less if tunable */ +#define WL_PROXD_MAX_SESSIONS 16 + +typedef uint16 wl_proxd_session_id_t; + +/* status - TBD BCME_ vs proxd status - range reserved for BCME_ */ +enum { + WL_PROXD_E_POLICY = -1045, + WL_PROXD_E_INCOMPLETE = -1044, + WL_PROXD_E_OVERRIDDEN = -1043, + WL_PROXD_E_ASAP_FAILED = -1042, + WL_PROXD_E_NOTSTARTED = -1041, + WL_PROXD_E_INVALIDAVB = -1040, + WL_PROXD_E_INCAPABLE = -1039, + WL_PROXD_E_MISMATCH = -1038, + WL_PROXD_E_DUP_SESSION = -1037, + WL_PROXD_E_REMOTE_FAIL = -1036, + WL_PROXD_E_REMOTE_INCAPABLE = -1035, + WL_PROXD_E_SCHED_FAIL = -1034, + WL_PROXD_E_PROTO = -1033, + WL_PROXD_E_EXPIRED = -1032, + WL_PROXD_E_TIMEOUT = -1031, + WL_PROXD_E_NOACK = -1030, + WL_PROXD_E_DEFERRED = -1029, + WL_PROXD_E_INVALID_SID = -1028, + WL_PROXD_E_REMOTE_CANCEL = -1027, + WL_PROXD_E_CANCELED = -1026, /* local */ + WL_PROXD_E_INVALID_SESSION = -1025, + WL_PROXD_E_BAD_STATE = -1024, + WL_PROXD_E_ERROR = -1, + WL_PROXD_E_OK = 0 +}; +typedef int32 wl_proxd_status_t; + +/* session states */ +enum { + WL_PROXD_SESSION_STATE_NONE = 0, + WL_PROXD_SESSION_STATE_CREATED = 1, + WL_PROXD_SESSION_STATE_CONFIGURED = 2, + WL_PROXD_SESSION_STATE_STARTED = 3, + WL_PROXD_SESSION_STATE_DELAY = 4, + WL_PROXD_SESSION_STATE_USER_WAIT = 5, + WL_PROXD_SESSION_STATE_SCHED_WAIT = 6, + WL_PROXD_SESSION_STATE_BURST = 7, + WL_PROXD_SESSION_STATE_STOPPING = 8, + WL_PROXD_SESSION_STATE_ENDED = 9, + WL_PROXD_SESSION_STATE_DESTROYING = -1 +}; +typedef int16 wl_proxd_session_state_t; + +/* RTT sample flags */ +enum { + WL_PROXD_RTT_SAMPLE_NONE = 0x00, + WL_PROXD_RTT_SAMPLE_DISCARD = 0x01 +}; +typedef uint8 wl_proxd_rtt_sample_flags_t; + +typedef struct wl_proxd_rtt_sample { + uint8 id; /* id for the sample - non-zero */ + wl_proxd_rtt_sample_flags_t flags; + int16 rssi; + wl_proxd_intvl_t rtt; /* round trip time */ + uint32 ratespec; +} wl_proxd_rtt_sample_t; + +/* result flags */ +enum { + WL_PRXOD_RESULT_FLAG_NONE = 0x0000, + WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /* LOS - if available */ + WL_PROXD_RESULT_FLAG_LOS = 0x0002, /* NLOS - if available */ + WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /* Fatal error during burst */ + WL_PROXD_RESULT_FLAG_ALL = 0xffff +}; +typedef int16 wl_proxd_result_flags_t; + +/* rtt measurement result */ +typedef struct wl_proxd_rtt_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; /* current state */ + union { + wl_proxd_intvl_t retry_after; /* hint for errors */ + wl_proxd_intvl_t burst_duration; /* burst duration */ + } u; + wl_proxd_rtt_sample_t avg_rtt; + uint32 avg_dist; /* 1/256m units */ + uint16 sd_rtt; /* RTT standard deviation */ + uint8 num_valid_rtt; /* valid rtt cnt */ + uint8 num_ftm; /* actual num of ftm cnt */ + uint16 burst_num; /* in a session */ + uint16 num_rtt; /* 0 if no detail */ + wl_proxd_rtt_sample_t rtt[1]; /* variable */ +} wl_proxd_rtt_result_t; + +/* aoa measurement result */ +typedef struct wl_proxd_aoa_result { + wl_proxd_session_id_t sid; + wl_proxd_result_flags_t flags; + wl_proxd_status_t status; + struct ether_addr peer; + wl_proxd_session_state_t state; + uint16 burst_num; + uint8 pad[2]; + /* wl_proxd_aoa_sample_t sample_avg; TBD */ +} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t; + +/* global stats */ +typedef struct wl_proxd_counters { + uint32 tx; /* tx frame count */ + uint32 rx; /* rx frame count */ + uint32 burst; /* total number of burst */ + uint32 sessions; /* total number of sessions */ + uint32 max_sessions; /* max concurrency */ + uint32 sched_fail; /* scheduling failures */ + uint32 timeouts; /* timeouts */ + uint32 protoerr; /* protocol errors */ + uint32 noack; /* tx w/o ack */ + uint32 txfail; /* any tx falure */ + uint32 lci_req_tx; /* tx LCI requests */ + uint32 lci_req_rx; /* rx LCI requests */ + uint32 lci_rep_tx; /* tx LCI reports */ + uint32 lci_rep_rx; /* rx LCI reports */ + uint32 civic_req_tx; /* tx civic requests */ + uint32 civic_req_rx; /* rx civic requests */ + uint32 civic_rep_tx; /* tx civic reports */ + uint32 civic_rep_rx; /* rx civic reports */ + uint32 rctx; /* ranging contexts created */ + uint32 rctx_done; /* count of ranging done */ + uint32 publish_err; /* availability publishing errors */ + uint32 on_chan; /* count of scheduler onchan */ + uint32 off_chan; /* count of scheduler offchan */ +} wl_proxd_counters_t; + +typedef struct wl_proxd_counters wl_proxd_session_counters_t; + +enum { + WL_PROXD_CAP_NONE = 0x0000, + WL_PROXD_CAP_ALL = 0xffff +}; +typedef int16 wl_proxd_caps_t; + +/* method capabilities */ +enum { + WL_PROXD_FTM_CAP_NONE = 0x0000, + WL_PROXD_FTM_CAP_FTM1 = 0x0001 +}; +typedef uint16 wl_proxd_ftm_caps_t; + +typedef struct BWL_PRE_PACKED_STRUCT wl_proxd_tlv_id_list { + uint16 num_ids; + uint16 ids[1]; +} BWL_POST_PACKED_STRUCT wl_proxd_tlv_id_list_t; + +typedef struct wl_proxd_session_id_list { + uint16 num_ids; + wl_proxd_session_id_t ids[1]; +} wl_proxd_session_id_list_t; + +/* tlvs returned for get_info on ftm method + * configuration: + * proxd flags + * event mask + * debug mask + * session defaults (session tlvs) + * status tlv - not supported for ftm method + * info tlv + */ +typedef struct wl_proxd_ftm_info { + wl_proxd_ftm_caps_t caps; + uint16 max_sessions; + uint16 num_sessions; + uint16 rx_max_burst; +} wl_proxd_ftm_info_t; + +/* tlvs returned for get_info on session + * session config (tlvs) + * session info tlv + */ +typedef struct wl_proxd_ftm_session_info { + uint16 sid; + uint8 bss_index; + uint8 pad; + struct ether_addr bssid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; +} wl_proxd_ftm_session_info_t; + +typedef struct wl_proxd_ftm_session_status { + uint16 sid; + wl_proxd_session_state_t state; + wl_proxd_status_t status; + uint16 burst_num; +} wl_proxd_ftm_session_status_t; + +/* rrm range request */ +typedef struct wl_proxd_range_req { + uint16 num_repeat; + uint16 init_delay_range; /* in TUs */ + uint8 pad; + uint8 num_nbr; /* number of (possible) neighbors */ + nbr_element_t nbr[1]; +} wl_proxd_range_req_t; + +#define WL_PROXD_LCI_LAT_OFF 0 +#define WL_PROXD_LCI_LONG_OFF 5 +#define WL_PROXD_LCI_ALT_OFF 10 + +#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \ + unsigned _off = WL_PROXD_LCI_LAT_OFF; \ + _lat_err = (_lci)->data[(_off)] & 0x3f; \ + _lat = (_lci)->data[(_off)+1]; \ + _lat |= (_lci)->data[(_off)+2] << 8; \ + _lat |= (_lci)->data[_(_off)+3] << 16; \ + _lat |= (_lci)->data[(_off)+4] << 24; \ + _lat <<= 2; \ + _lat |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \ + unsigned _off = WL_PROXD_LCI_LONG_OFF; \ + _long_err = (_lci)->data[(_off)] & 0x3f; \ + _lcilong = (_lci)->data[(_off)+1]; \ + _lcilong |= (_lci)->data[(_off)+2] << 8; \ + _lcilong |= (_lci)->data[_(_off)+3] << 16; \ + _lcilong |= (_lci)->data[(_off)+4] << 24; \ + __lcilong <<= 2; \ + _lcilong |= (_lci)->data[(_off)] >> 6; \ +} + +#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \ + unsigned _off = WL_PROXD_LCI_ALT_OFF; \ + _alt_type = (_lci)->data[_off] & 0x0f; \ + _alt_err = (_lci)->data[(_off)] >> 4; \ + _alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \ + _alt = (_lci)->data[(_off)+2]; \ + _alt |= (_lci)->data[(_off)+3] << 8; \ + _alt |= (_lci)->data[_(_off)+4] << 16; \ + _alt <<= 6; \ + _alt |= (_lci)->data[(_off) + 1] >> 2; \ +} + +#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6) + +/* availability. advertising mechanism bss specific */ +/* availablity flags */ +enum { + WL_PROXD_AVAIL_NONE = 0, + WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001, + WL_PROXD_AVAIL_SCHEDULED = 0x0002 /* scheduled by proxd */ +}; +typedef int16 wl_proxd_avail_flags_t; + +/* time reference */ +enum { + WL_PROXD_TREF_NONE = 0, + WL_PROXD_TREF_DEV_TSF = 1, + WL_PROXD_TREF_NAN_DW = 2, + WL_PROXD_TREF_TBTT = 3, + WL_PROXD_TREF_MAX /* last entry */ +}; +typedef int16 wl_proxd_time_ref_t; + +/* proxd channel-time slot */ +typedef struct { + wl_proxd_intvl_t start; /* from ref */ + wl_proxd_intvl_t duration; /* from start */ + uint32 chanspec; +} wl_proxd_time_slot_t; + +typedef struct wl_proxd_avail24 { + wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /* for query only */ + uint16 num_slots; + wl_proxd_time_slot_t slots[1]; /* ROM compat - not used */ + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t ts0[1]; +} wl_proxd_avail24_t; +#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)]) +#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0) +#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0) +#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\ + WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0))) + +typedef struct wl_proxd_avail { + wl_proxd_avail_flags_t flags; /* for query only */ + wl_proxd_time_ref_t time_ref; + uint16 max_slots; /* for query only */ + uint16 num_slots; + wl_proxd_intvl_t repeat; + wl_proxd_time_slot_t slots[1]; +} wl_proxd_avail_t; +#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)]) +#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots) + +#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0) +#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\ + WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \ + (_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0))) + +/* collect support TBD */ + +/* debugging */ +enum { + WL_PROXD_DEBUG_NONE = 0x00000000, + WL_PROXD_DEBUG_LOG = 0x00000001, + WL_PROXD_DEBUG_IOV = 0x00000002, + WL_PROXD_DEBUG_EVENT = 0x00000004, + WL_PROXD_DEBUG_SESSION = 0x00000008, + WL_PROXD_DEBUG_PROTO = 0x00000010, + WL_PROXD_DEBUG_SCHED = 0x00000020, + WL_PROXD_DEBUG_RANGING = 0x00000040, + WL_PROXD_DEBUG_ALL = 0xffffffff +}; +typedef uint32 wl_proxd_debug_mask_t; + +/* tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum { + WL_PROXD_TLV_ID_NONE = 0, + WL_PROXD_TLV_ID_METHOD = 1, + WL_PROXD_TLV_ID_FLAGS = 2, + WL_PROXD_TLV_ID_CHANSPEC = 3, /* note: uint32 */ + WL_PROXD_TLV_ID_TX_POWER = 4, + WL_PROXD_TLV_ID_RATESPEC = 5, + WL_PROXD_TLV_ID_BURST_DURATION = 6, /* intvl - length of burst */ + WL_PROXD_TLV_ID_BURST_PERIOD = 7, /* intvl - between bursts */ + WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /* intvl - between FTMs */ + WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /* uint16 - per burst */ + WL_PROXD_TLV_ID_NUM_BURST = 10, /* uint16 */ + WL_PROXD_TLV_ID_FTM_RETRIES = 11, /* uint16 at FTM level */ + WL_PROXD_TLV_ID_BSS_INDEX = 12, /* uint8 */ + WL_PROXD_TLV_ID_BSSID = 13, + WL_PROXD_TLV_ID_INIT_DELAY = 14, /* intvl - optional, non-standalone only */ + WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /* expect response within - intvl */ + WL_PROXD_TLV_ID_EVENT_MASK = 16, /* interested events - in/out */ + WL_PROXD_TLV_ID_FLAGS_MASK = 17, /* interested flags - in only */ + WL_PROXD_TLV_ID_PEER_MAC = 18, /* mac address of peer */ + WL_PROXD_TLV_ID_FTM_REQ = 19, /* dot11_ftm_req */ + WL_PROXD_TLV_ID_LCI_REQ = 20, + WL_PROXD_TLV_ID_LCI = 21, + WL_PROXD_TLV_ID_CIVIC_REQ = 22, + WL_PROXD_TLV_ID_CIVIC = 23, + WL_PROXD_TLV_ID_AVAIL24 = 24, /* ROM compatibility */ + WL_PROXD_TLV_ID_SESSION_FLAGS = 25, + WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /* in only */ + WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /* uint16 - limit bursts per session */ + WL_PROXD_TLV_ID_RANGING_INFO = 28, /* ranging info */ + WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /* uint16 */ + WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /* uint16, in only */ + WL_PROXD_TLV_ID_NAN_MAP_ID = 31, + WL_PROXD_TLV_ID_DEV_ADDR = 32, + WL_PROXD_TLV_ID_AVAIL = 33, /* wl_proxd_avail_t */ + WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ + WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + + /* output - 512 + x */ + WL_PROXD_TLV_ID_STATUS = 512, + WL_PROXD_TLV_ID_COUNTERS = 513, + WL_PROXD_TLV_ID_INFO = 514, + WL_PROXD_TLV_ID_RTT_RESULT = 515, + WL_PROXD_TLV_ID_AOA_RESULT = 516, + WL_PROXD_TLV_ID_SESSION_INFO = 517, + WL_PROXD_TLV_ID_SESSION_STATUS = 518, + WL_PROXD_TLV_ID_SESSION_ID_LIST = 519, + + /* debug tlvs can be added starting 1024 */ + WL_PROXD_TLV_ID_DEBUG_MASK = 1024, + WL_PROXD_TLV_ID_COLLECT = 1025, /* output only */ + WL_PROXD_TLV_ID_STRBUF = 1026, + + WL_PROXD_TLV_ID_MAX +}; + +typedef struct wl_proxd_tlv { + uint16 id; + uint16 len; + uint8 data[1]; +} wl_proxd_tlv_t; + +/* proxd iovar - applies to proxd, method or session */ +typedef struct wl_proxd_iov { + uint16 version; + uint16 len; + wl_proxd_cmd_t cmd; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /* variable */ +} wl_proxd_iov_t; + +#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs) + +/* The following event definitions may move to bcmevent.h, but sharing proxd types + * across needs more invasive changes unrelated to proxd + */ +enum { + WL_PROXD_EVENT_NONE = 0, /* not an event, reserved */ + WL_PROXD_EVENT_SESSION_CREATE = 1, + WL_PROXD_EVENT_SESSION_START = 2, + WL_PROXD_EVENT_FTM_REQ = 3, + WL_PROXD_EVENT_BURST_START = 4, + WL_PROXD_EVENT_BURST_END = 5, + WL_PROXD_EVENT_SESSION_END = 6, + WL_PROXD_EVENT_SESSION_RESTART = 7, + WL_PROXD_EVENT_BURST_RESCHED = 8, /* burst rescheduled - e.g. partial TSF */ + WL_PROXD_EVENT_SESSION_DESTROY = 9, + WL_PROXD_EVENT_RANGE_REQ = 10, + WL_PROXD_EVENT_FTM_FRAME = 11, + WL_PROXD_EVENT_DELAY = 12, + WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /* (target) rx initiator-report */ + WL_PROXD_EVENT_RANGING = 14, + WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ + WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + + WL_PROXD_EVENT_MAX +}; +typedef int16 wl_proxd_event_type_t; + +/* proxd event mask - upto 32 events for now */ +typedef uint32 wl_proxd_event_mask_t; + +#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe +#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type)) +#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\ + ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0) + +/* proxd event - applies to proxd, method or session */ +typedef struct wl_proxd_event { + uint16 version; + uint16 len; + wl_proxd_event_type_t type; + wl_proxd_method_t method; + wl_proxd_session_id_t sid; + uint8 pad[2]; + wl_proxd_tlv_t tlvs[1]; /* variable */ +} wl_proxd_event_t; + +enum { + WL_PROXD_RANGING_STATE_NONE = 0, + WL_PROXD_RANGING_STATE_NOTSTARTED = 1, + WL_PROXD_RANGING_STATE_INPROGRESS = 2, + WL_PROXD_RANGING_STATE_DONE = 3 +}; +typedef int16 wl_proxd_ranging_state_t; + +/* proxd ranging flags */ +enum { + WL_PROXD_RANGING_FLAG_NONE = 0x0000, /* no flags */ + WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001, + WL_PROXD_RANGING_FLAG_ALL = 0xffff +}; +typedef uint16 wl_proxd_ranging_flags_t; + +struct wl_proxd_ranging_info { + wl_proxd_status_t status; + wl_proxd_ranging_state_t state; + wl_proxd_ranging_flags_t flags; + uint16 num_sids; + uint16 num_done; +}; +typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t; +#include +/* end proxd definitions */ + +/* require strict packing */ +#include +/* Data returned by the bssload_report iovar. + * This is also the WLC_E_BSS_LOAD event data. + */ +typedef BWL_PRE_PACKED_STRUCT struct wl_bssload { + uint16 sta_count; /* station count */ + uint16 aac; /* available admission capacity */ + uint8 chan_util; /* channel utilization */ +} BWL_POST_PACKED_STRUCT wl_bssload_t; + +/* Maximum number of configurable BSS Load levels. The number of BSS Load + * ranges is always 1 more than the number of configured levels. eg. if + * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges: + * 0-10, 11-20, 21-30, 31-255. A WLC_E_BSS_LOAD event is generated each time + * the utilization level crosses into another range, subject to the rate limit. + */ +#define MAX_BSSLOAD_LEVELS 8 +#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1) + +/* BSS Load event notification configuration. */ +typedef struct wl_bssload_cfg { + uint32 rate_limit_msec; /* # of events posted to application will be limited to + * one per specified period (0 to disable rate limit). + */ + uint8 num_util_levels; /* Number of entries in util_levels[] below */ + uint8 util_levels[MAX_BSSLOAD_LEVELS]; + /* Variable number of BSS Load utilization levels in + * low to high order. An event will be posted each time + * a received beacon's BSS Load IE channel utilization + * value crosses a level. + */ +} wl_bssload_cfg_t; + +/* Multiple roaming profile suport */ +#define WL_MAX_ROAM_PROF_BRACKETS 4 + +#define WL_MAX_ROAM_PROF_VER 1 + +#define WL_ROAM_PROF_NONE (0 << 0) +#define WL_ROAM_PROF_LAZY (1 << 0) +#define WL_ROAM_PROF_NO_CI (1 << 1) +#define WL_ROAM_PROF_SUSPEND (1 << 2) +#define WL_ROAM_PROF_SYNC_DTIM (1 << 6) +#define WL_ROAM_PROF_DEFAULT (1 << 7) /* backward compatible single default profile */ + +#define WL_FACTOR_TABLE_MAX_LIMIT 5 + +typedef struct wl_roam_prof { + int8 roam_flags; /* bit flags */ + int8 roam_trigger; /* RSSI trigger level per profile/RSSI bracket */ + int8 rssi_lower; + int8 roam_delta; + int8 rssi_boost_thresh; /* Min RSSI to qualify for RSSI boost */ + int8 rssi_boost_delta; /* RSSI boost for AP in the other band */ + uint16 nfscan; /* nuber of full scan to start with */ + uint16 fullscan_period; + uint16 init_scan_period; + uint16 backoff_multiplier; + uint16 max_scan_period; + uint8 channel_usage; + uint8 cu_avg_calc_dur; +} wl_roam_prof_t; + +typedef struct wl_roam_prof_band { + uint32 band; /* Must be just one band */ + uint16 ver; /* version of this struct */ + uint16 len; /* length in bytes of this structure */ + wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS]; +} wl_roam_prof_band_t; + +/* Data structures for Interface Create/Remove */ + +#define WL_INTERFACE_CREATE_VER (0) + +/* + * The flags filed of the wl_interface_create is designed to be + * a Bit Mask. As of now only Bit 0 and Bit 1 are used as mentioned below. + * The rest of the bits can be used, incase we have to provide + * more information to the dongle + */ + +/* + * Bit 0 of flags field is used to inform whether the interface requested to + * be created is STA or AP. + * 0 - Create a STA interface + * 1 - Create an AP interface + */ +#define WL_INTERFACE_CREATE_STA (0 << 0) +#define WL_INTERFACE_CREATE_AP (1 << 0) + +/* + * Bit 1 of flags field is used to inform whether MAC is present in the + * data structure or not. + * 0 - Ignore mac_addr field + * 1 - Use the mac_addr field + */ +#define WL_INTERFACE_MAC_DONT_USE (0 << 1) +#define WL_INTERFACE_MAC_USE (1 << 1) + +typedef struct wl_interface_create { + uint16 ver; /* version of this struct */ + uint32 flags; /* flags that defines the operation */ + struct ether_addr mac_addr; /* Optional Mac address */ +} wl_interface_create_t; + +typedef struct wl_interface_info { + uint16 ver; /* version of this struct */ + struct ether_addr mac_addr; /* MAC address of the interface */ + char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */ + uint8 bsscfgidx; /* source bsscfg index */ +} wl_interface_info_t; + +/* no default structure packing */ +#include + +#define TBOW_MAX_SSID_LEN 32 +#define TBOW_MAX_PASSPHRASE_LEN 63 + +#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */ +typedef struct tbow_setup_netinfo { + uint32 version; + uint8 opmode; + uint8 pad; + uint8 macaddr[ETHER_ADDR_LEN]; + uint32 ssid_len; + uint8 ssid[TBOW_MAX_SSID_LEN]; + uint8 passphrase_len; + uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN]; + chanspec_t chanspec; +} tbow_setup_netinfo_t; + +typedef enum tbow_ho_opmode { + TBOW_HO_MODE_START_GO = 0, + TBOW_HO_MODE_START_STA, + TBOW_HO_MODE_START_GC, + TBOW_HO_MODE_TEST_GO, + TBOW_HO_MODE_STOP_GO = 0x10, + TBOW_HO_MODE_STOP_STA, + TBOW_HO_MODE_STOP_GC, + TBOW_HO_MODE_TEARDOWN +} tbow_ho_opmode_t; + +/* Beacon trim feature statistics */ +/* Configuration params */ +#define M_BCNTRIM_N (0) /* Enable/Disable Beacon Trim */ +#define M_BCNTRIM_TIMEND (1) /* Waiting time for TIM IE to end */ +#define M_BCNTRIM_TSFTLRN (2) /* TSF tolerance value (usecs) */ +/* PSM internal use */ +#define M_BCNTRIM_PREVBCNLEN (3) /* Beacon length excluding the TIM IE */ +#define M_BCNTRIM_N_COUNTER (4) /* PSM's local beacon trim counter */ +#define M_BCNTRIM_STATE (5) /* PSM's Beacon trim status register */ +#define M_BCNTRIM_TIMLEN (6) /* TIM IE Length */ +#define M_BCNTRIM_BMPCTL (7) /* Bitmap control word */ +#define M_BCNTRIM_TSF_L (8) /* Lower TSF word */ +#define M_BCNTRIM_TSF_ML (9) /* Lower middle TSF word */ +#define M_BCNTRIM_RSSI (10) /* Partial beacon RSSI */ +#define M_BCNTRIM_CHANNEL (11) /* Partial beacon channel */ +/* Trimming Counters */ +#define M_BCNTRIM_SBCNRXED (12) /* Self-BSSID beacon received */ +#define M_BCNTRIM_CANTRIM (13) /* Num of beacons which can be trimmed */ +#define M_BCNTRIM_TRIMMED (14) /* # beacons which were trimmed */ +#define M_BCNTRIM_BCNLENCNG (15) /* # beacons trimmed due to length change */ +#define M_BCNTRIM_TSFADJ (16) /* # beacons not trimmed due to large TSF delta */ +#define M_BCNTRIM_TIMNOTFOUND (17) /* # beacons not trimmed due to TIM missing */ +#define M_RXTSFTMRVAL_WD0 (18) +#define M_RXTSFTMRVAL_WD1 (19) +#define M_RXTSFTMRVAL_WD2 (20) +#define M_RXTSFTMRVAL_WD3 (21) +#define BCNTRIM_STATS_NUMPARAMS (22) /* 16 bit words */ + +#define TXPWRCAP_MAX_NUM_CORES 8 +#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2) + +typedef struct wl_txpwrcap_tbl { + uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES]; + /* Stores values for valid antennas */ + int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ + int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */ +} wl_txpwrcap_tbl_t; + +/* -------------- dynamic BTCOEX --------------- */ +/* require strict packing */ +#include + +#define DCTL_TROWS 2 /* currently practical number of rows */ +#define DCTL_TROWS_MAX 4 /* 2 extra rows RFU */ +/* DYNCTL profile flags */ +#define DCTL_FLAGS_DYNCTL (1 << 0) /* 1 - enabled, 0 - legacy only */ +#define DCTL_FLAGS_DESENSE (1 << 1) /* auto desense is enabled */ +#define DCTL_FLAGS_MSWITCH (1 << 2) /* mode switching is enabled */ +/* for now AGG on/off is handled separately */ +#define DCTL_FLAGS_TX_AGG_OFF (1 << 3) /* TBD: allow TX agg Off */ +#define DCTL_FLAGS_RX_AGG_OFF (1 << 4) /* TBD: allow RX agg Off */ +/* used for dry run testing only */ +#define DCTL_FLAGS_DRYRUN (1 << 7) /* Eenables dynctl dry run mode */ +#define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0) +#define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0) +#define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0) +/* desense level currently in use */ +#define DESENSE_OFF 0 +#define DFLT_DESENSE_MID 12 +#define DFLT_DESENSE_HIGH 2 + +/* + * dynctl data points(a set of btpwr & wlrssi thresholds) + * for mode & desense switching + */ +typedef struct btc_thr_data { + int8 mode; /* used by desense sw */ + int8 bt_pwr; /* BT tx power threshold */ + int8 bt_rssi; /* BT rssi threshold */ + /* wl rssi range when mode or desense change may be needed */ + int8 wl_rssi_high; + int8 wl_rssi_low; +} btc_thr_data_t; + +/* dynctl. profile data structure */ +#define DCTL_PROFILE_VER 0x01 +typedef BWL_PRE_PACKED_STRUCT struct dctl_prof { + uint8 version; /* dynctl profile version */ + /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */ + uint8 flags; /* bit[6:3] reserved, bit7 - Dryrun (sim) - On */ + /* wl desense levels to apply */ + uint8 dflt_dsns_level; + uint8 low_dsns_level; + uint8 mid_dsns_level; + uint8 high_dsns_level; + /* mode switching hysteresis in dBm */ + int8 msw_btrssi_hyster; + /* default btcoex mode */ + uint8 default_btc_mode; + /* num of active rows in mode switching table */ + uint8 msw_rows; + /* num of rows in desense table */ + uint8 dsns_rows; + /* dynctl mode switching data table */ + btc_thr_data_t msw_data[DCTL_TROWS_MAX]; + /* dynctl desense switching data table */ + btc_thr_data_t dsns_data[DCTL_TROWS_MAX]; +} BWL_POST_PACKED_STRUCT dctl_prof_t; + +/* dynctl status info */ +typedef BWL_PRE_PACKED_STRUCT struct dynctl_status { + bool sim_on; /* true if simulation is On */ + uint16 bt_pwr_shm; /* BT per/task power as read from ucode */ + int8 bt_pwr; /* BT pwr extracted & converted to dBm */ + int8 bt_rssi; /* BT rssi in dBm */ + int8 wl_rssi; /* last wl rssi reading used by btcoex */ + uint8 dsns_level; /* current desense level */ + uint8 btc_mode; /* current btcoex mode */ + /* add more status items if needed, pad to 4 BB if needed */ +} BWL_POST_PACKED_STRUCT dynctl_status_t; + +/* dynctl simulation (dryrun data) */ +typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim { + bool sim_on; /* simulation mode on/off */ + int8 btpwr; /* simulated BT power in dBm */ + int8 btrssi; /* simulated BT rssi in dBm */ + int8 wlrssi; /* simulated WL rssi in dBm */ +} BWL_POST_PACKED_STRUCT dynctl_sim_t; +/* no default structure packing */ +#include + +/* PTK key maintained per SCB */ +#define RSN_TEMP_ENCR_KEY_LEN 16 +typedef struct wpa_ptk { + uint8 kck[RSN_KCK_LENGTH]; /* EAPOL-Key Key Confirmation Key (KCK) */ + uint8 kek[RSN_KEK_LENGTH]; /* EAPOL-Key Key Encryption Key (KEK) */ + uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 1 (TK1) */ + uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 2 (TK2) */ +} wpa_ptk_t; + +/* GTK key maintained per SCB */ +typedef struct wpa_gtk { + uint32 idx; + uint32 key_len; + uint8 key[DOT11_MAX_KEY_SIZE]; +} wpa_gtk_t; + +/* FBT Auth Response Data structure */ +typedef struct wlc_fbt_auth_resp { + uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 pad[2]; + uint8 pmk_r1_name[WPA2_PMKID_LEN]; + wpa_ptk_t ptk; /* pairwise key */ + wpa_gtk_t gtk; /* group key */ + uint32 ie_len; + uint8 status; /* Status of parsing FBT authentication + Request in application + */ + uint8 ies[1]; /* IEs contains MDIE, RSNIE, + FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID) + */ +} wlc_fbt_auth_resp_t; + +/* FBT Action Response frame */ +typedef struct wlc_fbt_action_resp { + uint16 version; /* structure version */ + uint16 length; /* length of structure */ + uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */ + uint8 data_len; /* len of ie from Category */ + uint8 data[1]; /* data contains category, action, sta address, target ap, + status code,fbt response frame body + */ +} wlc_fbt_action_resp_t; + +#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16 +#define MACDBG_PMAC_OBJ_TYPE_LEN 8 + +typedef struct _wl_macdbg_pmac_param_t { + char type[MACDBG_PMAC_OBJ_TYPE_LEN]; + uint8 step; + uint8 num; + uint32 bitmap; + bool addr_raw; + uint8 addr_num; + uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM]; +} wl_macdbg_pmac_param_t; + +/* IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */ +typedef struct svmp_mem { + uint32 addr; /* offset to read svmp memory from vasip base address */ + uint16 len; /* length in count of uint16's */ + uint16 val; /* set the range of addr/len with a value */ +} svmp_mem_t; + +#define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */ + +/* Definitions of different NAN Bands */ +enum { /* mode selection for reading/writing tx iqlo cal coefficients */ + NAN_BAND_AUTO, + NAN_BAND_B, + NAN_BAND_A, + NAN_BAND_INVALID = 0xFF +}; + +#if defined(WL_LINKSTAT) +typedef struct { + uint32 preamble; + uint32 nss; + uint32 bw; + uint32 rateMcsIdx; + uint32 reserved; + uint32 bitrate; +} wifi_rate; + +typedef struct { + uint16 version; + uint16 length; + uint32 tx_mpdu; + uint32 rx_mpdu; + uint32 mpdu_lost; + uint32 retries; + uint32 retries_short; + uint32 retries_long; + wifi_rate rate; +} wifi_rate_stat_t; + +typedef int32 wifi_radio; + +typedef struct { + uint16 version; + uint16 length; + wifi_radio radio; + uint32 on_time; + uint32 tx_time; + uint32 rx_time; + uint32 on_time_scan; + uint32 on_time_nbd; + uint32 on_time_gscan; + uint32 on_time_roam_scan; + uint32 on_time_pno_scan; + uint32 on_time_hs20; + uint32 num_channels; + uint8 channels[1]; +} wifi_radio_stat; +#endif /* WL_LINKSTAT */ + +#ifdef WL11ULB +/* ULB Mode configured via "ulb_mode" IOVAR */ +enum { + ULB_MODE_DISABLED = 0, + ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */ + ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */ + /* Add all other enums before this */ + MAX_SUPP_ULB_MODES +}; + +/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only. + * Values of this enumeration are also used to specify 'Current Operational Bandwidth' + * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in + * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute) + */ +typedef enum { + ULB_BW_DISABLED = 0, + ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */ + ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */ + ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */ + /* Add all other enums before this */ + MAX_SUPP_ULB_BW +} ulb_bw_type_t; +#endif /* WL11ULB */ + +#if defined(WLRCC) +#define MAX_ROAM_CHANNEL 20 + +typedef struct { + int n; + chanspec_t channels[MAX_ROAM_CHANNEL]; +} wl_roam_channel_list_t; +#endif + + +/* + * Neighbor Discover Offload: enable NDO feature + * Called by ipv6 event handler when interface comes up + * Set RA rate limit interval value(%) + */ +typedef struct nd_ra_ol_limits { + uint16 version; /* version of the iovar buffer */ + uint16 type; /* type of data provided */ + uint16 length; /* length of the entire structure */ + uint16 pad1; /* pad union to 4 byte boundary */ + union { + struct { + uint16 min_time; /* seconds, min time for RA offload hold */ + uint16 lifetime_percent; + /* percent, lifetime percentage for offload hold time */ + } lifetime_relative; + struct { + uint16 hold_time; /* seconds, RA offload hold time */ + uint16 pad2; /* unused */ + } fixed; + } limits; +} nd_ra_ol_limits_t; + +#define ND_RA_OL_LIMITS_VER 1 + +/* nd_ra_ol_limits sub-types */ +#define ND_RA_OL_LIMITS_REL_TYPE 0 /* relative, percent of RA lifetime */ +#define ND_RA_OL_LIMITS_FIXED_TYPE 1 /* fixed time */ + +/* buffer lengths for the different nd_ra_ol_limits types */ +#define ND_RA_OL_LIMITS_REL_TYPE_LEN 12 +#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10 + +#define ND_RA_OL_SET "SET" +#define ND_RA_OL_GET "GET" +#define ND_PARAM_SIZE 50 +#define ND_VALUE_SIZE 5 +#define ND_PARAMS_DELIMETER " " +#define ND_PARAM_VALUE_DELLIMETER '=' +#define ND_LIMIT_STR_FMT ("%50s %50s") + +#define ND_RA_TYPE "TYPE" +#define ND_RA_MIN_TIME "MIN" +#define ND_RA_PER "PER" +#define ND_RA_HOLD "HOLD" + +/* + * Temperature Throttling control mode + */ +typedef struct wl_temp_control { + bool enable; + uint16 control_bit; +} wl_temp_control_t; + +#endif /* _wlioctl_h_ */ diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h b/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h new file mode 100644 index 000000000000..c3fe428580b4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/include/wlioctl_utils.h @@ -0,0 +1,53 @@ +/* + * Custom OID/ioctl related helper functions. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * <> + * + * $Id: wlioctl_utils.h 555740 2015-05-11 10:16:23Z $ + */ + +#ifndef _wlioctl_utils_h_ +#define _wlioctl_utils_h_ + +#include + +#ifndef BCMDRIVER +#define CCA_THRESH_MILLI 14 +#define CCA_THRESH_INTERFERE 6 + +extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input, + cca_congest_channel_req_t *avg, bool percent); + +extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans, + uint flags, chanspec_t *answer); +#endif /* BCMDRIVER */ + +extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, + int buflen, uint32 corerev); + +/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */ +#define GET_WLCCNT_FROM_CNTBUF(cntbuf) \ + bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data, \ + ((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \ + NULL, BCM_XTLV_OPTION_ALIGN32) + +#endif /* _wlioctl_utils_h_ */ diff --git a/drivers/net/wireless/bcmdhd/linux_osl.c b/drivers/net/wireless/bcmdhd/linux_osl.c new file mode 100644 index 000000000000..f2abdb43b0e6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/linux_osl.c @@ -0,0 +1,2652 @@ +/* + * Linux OS Independent Layer + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: linux_osl.c 602478 2015-11-26 04:46:12Z $ + */ + +#define LINUX_PORT + +#include +#include +#include +#include + + +#if !defined(STBLINUX) +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) +#include +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#endif /* STBLINUX */ + +#include + +#include +#include +#include +#include + + +#ifdef BCM_SECURE_DMA +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(__ARM_ARCH_7A__) +#include +#include +#endif +#include +#endif /* BCM_SECURE_DMA */ + +#include + + +#ifdef BCM_OBJECT_TRACE +#include +#endif /* BCM_OBJECT_TRACE */ + +#define PCI_CFG_RETRY 10 + +#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ +#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ +#define DUMPBUFSZ 1024 + +/* dependancy check */ +#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF) +#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only" +#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */ + +#ifdef CONFIG_DHD_USE_STATIC_BUF +#ifdef DHD_USE_STATIC_CTRLBUF +#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1) +#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2) +#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4) + +#define PREALLOC_FREE_MAGIC 0xFEDC +#define PREALLOC_USED_MAGIC 0xFCDE +#else +#define DHD_SKB_HDRSIZE 336 +#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE) +#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE) +#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE) +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_BUF_MAX_NUM 16 +#define STATIC_BUF_SIZE (PAGE_SIZE*2) +#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) + +typedef struct bcm_static_buf { + struct semaphore static_sem; + unsigned char *buf_ptr; + unsigned char buf_use[STATIC_BUF_MAX_NUM]; +} bcm_static_buf_t; + +static bcm_static_buf_t *bcm_static_buf = 0; + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#elif defined(ENHANCED_STATIC_BUF) +#define STATIC_PKT_4PAGE_NUM 1 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE +#else +#define STATIC_PKT_4PAGE_NUM 0 +#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#ifdef DHD_USE_STATIC_CTRLBUF +#define STATIC_PKT_1PAGE_NUM 0 +#define STATIC_PKT_2PAGE_NUM 64 +#else +#define STATIC_PKT_1PAGE_NUM 8 +#define STATIC_PKT_2PAGE_NUM 8 +#endif /* DHD_USE_STATIC_CTRLBUF */ + +#define STATIC_PKT_1_2PAGE_NUM \ + ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM)) +#define STATIC_PKT_MAX_NUM \ + ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM)) + +typedef struct bcm_static_pkt { +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; + unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM]; + spinlock_t osl_pkt_lock; + uint32 last_allocated_index; +#else + struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM]; + struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM]; +#ifdef ENHANCED_STATIC_BUF + struct sk_buff *skb_16k; +#endif /* ENHANCED_STATIC_BUF */ + struct semaphore osl_pkt_sem; +#endif /* DHD_USE_STATIC_CTRLBUF */ + unsigned char pkt_use[STATIC_PKT_MAX_NUM]; +} bcm_static_pkt_t; + +static bcm_static_pkt_t *bcm_static_skb = 0; + +void* wifi_platform_prealloc(void *adapter, int section, unsigned long size); +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +typedef struct bcm_mem_link { + struct bcm_mem_link *prev; + struct bcm_mem_link *next; + uint size; + int line; + void *osh; + char file[BCM_MEM_FILENAME_LEN]; +} bcm_mem_link_t; + +struct osl_cmn_info { + atomic_t malloced; + atomic_t pktalloced; /* Number of allocated packet buffers */ + spinlock_t dbgmem_lock; + bcm_mem_link_t *dbgmem_list; + spinlock_t pktalloc_lock; + atomic_t refcount; /* Number of references to this shared structure. */ +}; +typedef struct osl_cmn_info osl_cmn_t; + +struct osl_info { + osl_pubinfo_t pub; + uint32 flags; /* If specific cases to be handled in the OSL */ +#ifdef CTFPOOL + ctfpool_t *ctfpool; +#endif /* CTFPOOL */ + uint magic; + void *pdev; + uint failed; + uint bustype; + osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */ + + void *bus_handle; +#ifdef BCMDBG_CTRACE + spinlock_t ctrace_lock; + struct list_head ctrace_list; + int ctrace_num; +#endif /* BCMDBG_CTRACE */ +#ifdef BCM_SECURE_DMA + struct cma_dev *cma; + struct sec_mem_elem *sec_list_512; + struct sec_mem_elem *sec_list_base_512; + struct sec_mem_elem *sec_list_2048; + struct sec_mem_elem *sec_list_base_2048; + struct sec_mem_elem *sec_list_4096; + struct sec_mem_elem *sec_list_base_4096; + phys_addr_t contig_base; + void *contig_base_va; + phys_addr_t contig_base_alloc; + void *contig_base_alloc_va; + phys_addr_t contig_base_alloc_coherent; + void *contig_base_alloc_coherent_va; + phys_addr_t contig_delta_va_pa; + struct { + phys_addr_t pa; + void *va; + bool avail; + } sec_cma_coherent[SEC_CMA_COHERENT_MAX]; + +#endif /* BCM_SECURE_DMA */ +}; +#ifdef BCM_SECURE_DMA +phys_addr_t g_contig_delta_va_pa; +static void osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn); +static int osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn); +static void osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn); +static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, + bool iscache, bool isdecr); +static void osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size); +static void osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, + sec_mem_elem_t **list); +static void osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, + void *sec_list_base); +static sec_mem_elem_t * osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, + int direction, struct sec_cma_info *ptr_cma_info, uint offset); +static void osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem); +static void osl_sec_dma_init_consistent(osl_t *osh); +static void *osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, + ulong *pap); +static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa); +#endif /* BCM_SECURE_DMA */ + +#ifdef BCM_OBJECT_TRACE +/* don't clear the first 4 byte that is the pkt sn */ +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) +#else +#define OSL_PKTTAG_CLEAR(p) \ +do { \ + struct sk_buff *s = (struct sk_buff *)(p); \ + ASSERT(OSL_PKTTAG_SZ == 32); \ + *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ + *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ + *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ + *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ +} while (0) +#endif /* BCM_OBJECT_TRACE */ + +/* PCMCIA attribute space access macros */ + +/* Global ASSERT type flag */ +uint32 g_assert_type = 1; +module_param(g_assert_type, int, 0); + +static int16 linuxbcmerrormap[] = +{ 0, /* 0 */ + -EINVAL, /* BCME_ERROR */ + -EINVAL, /* BCME_BADARG */ + -EINVAL, /* BCME_BADOPTION */ + -EINVAL, /* BCME_NOTUP */ + -EINVAL, /* BCME_NOTDOWN */ + -EINVAL, /* BCME_NOTAP */ + -EINVAL, /* BCME_NOTSTA */ + -EINVAL, /* BCME_BADKEYIDX */ + -EINVAL, /* BCME_RADIOOFF */ + -EINVAL, /* BCME_NOTBANDLOCKED */ + -EINVAL, /* BCME_NOCLK */ + -EINVAL, /* BCME_BADRATESET */ + -EINVAL, /* BCME_BADBAND */ + -E2BIG, /* BCME_BUFTOOSHORT */ + -E2BIG, /* BCME_BUFTOOLONG */ + -EBUSY, /* BCME_BUSY */ + -EINVAL, /* BCME_NOTASSOCIATED */ + -EINVAL, /* BCME_BADSSIDLEN */ + -EINVAL, /* BCME_OUTOFRANGECHAN */ + -EINVAL, /* BCME_BADCHAN */ + -EFAULT, /* BCME_BADADDR */ + -ENOMEM, /* BCME_NORESOURCE */ + -EOPNOTSUPP, /* BCME_UNSUPPORTED */ + -EMSGSIZE, /* BCME_BADLENGTH */ + -EINVAL, /* BCME_NOTREADY */ + -EPERM, /* BCME_EPERM */ + -ENOMEM, /* BCME_NOMEM */ + -EINVAL, /* BCME_ASSOCIATED */ + -ERANGE, /* BCME_RANGE */ + -EINVAL, /* BCME_NOTFOUND */ + -EINVAL, /* BCME_WME_NOT_ENABLED */ + -EINVAL, /* BCME_TSPEC_NOTFOUND */ + -EINVAL, /* BCME_ACM_NOTSUPPORTED */ + -EINVAL, /* BCME_NOT_WME_ASSOCIATION */ + -EIO, /* BCME_SDIO_ERROR */ + -ENODEV, /* BCME_DONGLE_DOWN */ + -EINVAL, /* BCME_VERSION */ + -EIO, /* BCME_TXFAIL */ + -EIO, /* BCME_RXFAIL */ + -ENODEV, /* BCME_NODEVICE */ + -EINVAL, /* BCME_NMODE_DISABLED */ + -ENODATA, /* BCME_NONRESIDENT */ + -EINVAL, /* BCME_SCANREJECT */ + -EINVAL, /* BCME_USAGE_ERROR */ + -EIO, /* BCME_IOCTL_ERROR */ + -EIO, /* BCME_SERIAL_PORT_ERR */ + -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */ + -EIO, /* BCME_DECERR */ + -EIO, /* BCME_ENCERR */ + -EIO, /* BCME_MICERR */ + -ERANGE, /* BCME_REPLAY */ + -EINVAL, /* BCME_IE_NOTFOUND */ + -EINVAL, /* BCME_DATA_NOTFOUND */ + +/* When an new error code is added to bcmutils.h, add os + * specific error translation here as well + */ +/* check if BCME_LAST changed since the last time this function was updated */ +#if BCME_LAST != -53 +#error "You need to add a OS error translation in the linuxbcmerrormap \ + for new error code defined in bcmutils.h" +#endif +}; +uint lmtest = FALSE; + +/* translate bcmerrors into linux errors */ +int +osl_error(int bcmerror) +{ + if (bcmerror > 0) + bcmerror = 0; + else if (bcmerror < BCME_LAST) + bcmerror = BCME_ERROR; + + /* Array bounds covered by ASSERT in osl_attach */ + return linuxbcmerrormap[-bcmerror]; +} +#ifdef SHARED_OSL_CMN +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn) +{ +#else +osl_t * +osl_attach(void *pdev, uint bustype, bool pkttag) +{ + void **osl_cmn = NULL; +#endif /* SHARED_OSL_CMN */ + osl_t *osh; + gfp_t flags; + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + if (!(osh = kmalloc(sizeof(osl_t), flags))) + return osh; + + ASSERT(osh); + + bzero(osh, sizeof(osl_t)); + + if (osl_cmn == NULL || *osl_cmn == NULL) { + if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) { + kfree(osh); + return NULL; + } + bzero(osh->cmn, sizeof(osl_cmn_t)); + if (osl_cmn) + *osl_cmn = osh->cmn; + atomic_set(&osh->cmn->malloced, 0); + osh->cmn->dbgmem_list = NULL; + spin_lock_init(&(osh->cmn->dbgmem_lock)); + + spin_lock_init(&(osh->cmn->pktalloc_lock)); + + } else { + osh->cmn = *osl_cmn; + } + atomic_add(1, &osh->cmn->refcount); + + bcm_object_trace_init(); + + /* Check that error map has the right number of entries in it */ + ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); + + osh->failed = 0; + osh->pdev = pdev; + osh->pub.pkttag = pkttag; + osh->bustype = bustype; + osh->magic = OS_HANDLE_MAGIC; +#ifdef BCM_SECURE_DMA + + osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); + + osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh, + phys_to_page((u32)osh->contig_base_alloc), + CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE); + + osh->contig_base_alloc_coherent = osh->contig_base_alloc; + osl_sec_dma_init_consistent(osh); + + osh->contig_base_alloc += CMA_DMA_DESC_MEMBLOCK; + + osh->contig_base_alloc_va = osl_sec_dma_ioremap(osh, + phys_to_page((u32)osh->contig_base_alloc), CMA_DMA_DATA_MEMBLOCK, TRUE, FALSE); + osh->contig_base_va = osh->contig_base_alloc_va; + + /* + * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512); + * osh->sec_list_base_512 = osh->sec_list_512; + * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048); + * osh->sec_list_base_2048 = osh->sec_list_2048; + */ + osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096); + osh->sec_list_base_4096 = osh->sec_list_4096; + +#endif /* BCM_SECURE_DMA */ + + switch (bustype) { + case PCI_BUS: + case SI_BUS: + case PCMCIA_BUS: + osh->pub.mmbus = TRUE; + break; + case JTAG_BUS: + case SDIO_BUS: + case USB_BUS: + case SPI_BUS: + case RPC_BUS: + osh->pub.mmbus = FALSE; + break; + default: + ASSERT(FALSE); + break; + } + +#ifdef BCMDBG_CTRACE + spin_lock_init(&osh->ctrace_lock); + INIT_LIST_HEAD(&osh->ctrace_list); + osh->ctrace_num = 0; +#endif /* BCMDBG_CTRACE */ + + + return osh; +} + +int osl_static_mem_init(osl_t *osh, void *adapter) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (!bcm_static_buf && adapter) { + if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter, + 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) { + printk("can not alloc static buf!\n"); + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } else { + printk("alloc static buf at %p!\n", bcm_static_buf); + } + + sema_init(&bcm_static_buf->static_sem, 1); + + bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; + } + +#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF) + if (!bcm_static_skb && adapter) { + int i; + void *skb_buff_ptr = 0; + bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); + skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0); + if (!skb_buff_ptr) { + printk("cannot alloc static buf!\n"); + bcm_static_buf = NULL; + bcm_static_skb = NULL; + ASSERT(osh->magic == OS_HANDLE_MAGIC); + return -ENOMEM; + } + + bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) * + (STATIC_PKT_MAX_NUM)); + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + bcm_static_skb->pkt_use[i] = 0; + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_init(&bcm_static_skb->osl_pkt_lock); + bcm_static_skb->last_allocated_index = 0; +#else + sema_init(&bcm_static_skb->osl_pkt_sem, 1); +#endif /* DHD_USE_STATIC_CTRLBUF */ + } +#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + return 0; +} + +void osl_set_bus_handle(osl_t *osh, void *bus_handle) +{ + osh->bus_handle = bus_handle; +} + +void* osl_get_bus_handle(osl_t *osh) +{ + return osh->bus_handle; +} + +void +osl_detach(osl_t *osh) +{ + if (osh == NULL) + return; + +#ifdef BCM_SECURE_DMA + osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048); + osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096); + osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK); +#endif /* BCM_SECURE_DMA */ + + + bcm_object_trace_deinit(); + + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(1, &osh->cmn->refcount); + if (atomic_read(&osh->cmn->refcount) == 0) { + kfree(osh->cmn); + } + kfree(osh); +} + +int osl_static_mem_deinit(osl_t *osh, void *adapter) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } +#ifdef BCMSDIO + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif /* BCMSDIO */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + return 0; +} + +static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len) +{ + struct sk_buff *skb; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) + gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; +#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA) + flags |= GFP_ATOMIC; +#endif +#ifdef DHD_USE_ATOMIC_PKTGET + flags = GFP_ATOMIC; +#endif /* DHD_USE_ATOMIC_PKTGET */ + skb = __dev_alloc_skb(len, flags); +#else + skb = dev_alloc_skb(len); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */ + return skb; +} + +#ifdef CTFPOOL + +#ifdef CTFPOOL_SPINLOCK +#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags) +#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags) +#else +#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock) +#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock) +#endif /* CTFPOOL_SPINLOCK */ +/* + * Allocate and add an object to packet pool. + */ +void * +osl_ctfpool_add(osl_t *osh) +{ + struct sk_buff *skb; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return NULL; + + CTFPOOL_LOCK(osh->ctfpool, flags); + ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj); + + /* No need to allocate more objects */ + if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) { + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + /* Allocate a new skb and add it to the ctfpool */ + skb = osl_alloc_skb(osh, osh->ctfpool->obj_size); + if (skb == NULL) { + printf("%s: skb alloc of len %d failed\n", __FUNCTION__, + osh->ctfpool->obj_size); + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + /* Add to ctfpool */ + skb->next = (struct sk_buff *)osh->ctfpool->head; + osh->ctfpool->head = skb; + osh->ctfpool->fast_frees++; + osh->ctfpool->curr_obj++; + + /* Hijack a skb member to store ptr to ctfpool */ + CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool; + + /* Use bit flag to indicate skb from fast ctfpool */ + PKTFAST(osh, skb) = FASTBUF; + + CTFPOOL_UNLOCK(osh->ctfpool, flags); + + return skb; +} + +/* + * Add new objects to the pool. + */ +void +osl_ctfpool_replenish(osl_t *osh, uint thresh) +{ + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + + /* Do nothing if no refills are required */ + while ((osh->ctfpool->refills > 0) && (thresh--)) { + osl_ctfpool_add(osh); + osh->ctfpool->refills--; + } +} + +/* + * Initialize the packet pool with specified number of objects. + */ +int32 +osl_ctfpool_init(osl_t *osh, uint numobj, uint size) +{ + gfp_t flags; + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags); + ASSERT(osh->ctfpool); + + osh->ctfpool->max_obj = numobj; + osh->ctfpool->obj_size = size; + + spin_lock_init(&osh->ctfpool->lock); + + while (numobj--) { + if (!osl_ctfpool_add(osh)) + return -1; + osh->ctfpool->fast_frees--; + } + + return 0; +} + +/* + * Cleanup the packet pool objects. + */ +void +osl_ctfpool_cleanup(osl_t *osh) +{ + struct sk_buff *skb, *nskb; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + + CTFPOOL_LOCK(osh->ctfpool, flags); + + skb = osh->ctfpool->head; + + while (skb != NULL) { + nskb = skb->next; + dev_kfree_skb(skb); + skb = nskb; + osh->ctfpool->curr_obj--; + } + + ASSERT(osh->ctfpool->curr_obj == 0); + osh->ctfpool->head = NULL; + CTFPOOL_UNLOCK(osh->ctfpool, flags); + + kfree(osh->ctfpool); + osh->ctfpool = NULL; +} + +void +osl_ctfpool_stats(osl_t *osh, void *b) +{ + struct bcmstrbuf *bb; + + if ((osh == NULL) || (osh->ctfpool == NULL)) + return; + +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) { + bcm_static_buf = 0; + } +#ifdef BCMSDIO + if (bcm_static_skb) { + bcm_static_skb = 0; + } +#endif /* BCMSDIO */ +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + bb = b; + + ASSERT((osh != NULL) && (bb != NULL)); + + bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n", + osh->ctfpool->max_obj, osh->ctfpool->obj_size, + osh->ctfpool->curr_obj, osh->ctfpool->refills); + bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n", + osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees, + osh->ctfpool->slow_allocs); +} + +static inline struct sk_buff * +osl_pktfastget(osl_t *osh, uint len) +{ + struct sk_buff *skb; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + + /* Try to do fast allocate. Return null if ctfpool is not in use + * or if there are no items in the ctfpool. + */ + if (osh->ctfpool == NULL) + return NULL; + + CTFPOOL_LOCK(osh->ctfpool, flags); + if (osh->ctfpool->head == NULL) { + ASSERT(osh->ctfpool->curr_obj == 0); + osh->ctfpool->slow_allocs++; + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + if (len > osh->ctfpool->obj_size) { + CTFPOOL_UNLOCK(osh->ctfpool, flags); + return NULL; + } + + ASSERT(len <= osh->ctfpool->obj_size); + + /* Get an object from ctfpool */ + skb = (struct sk_buff *)osh->ctfpool->head; + osh->ctfpool->head = (void *)skb->next; + + osh->ctfpool->fast_allocs++; + osh->ctfpool->curr_obj--; + ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); + CTFPOOL_UNLOCK(osh->ctfpool, flags); + + /* Init skb struct */ + skb->next = skb->prev = NULL; +#if defined(__ARM_ARCH_7A__) + skb->data = skb->head + NET_SKB_PAD; + skb->tail = skb->head + NET_SKB_PAD; +#else + skb->data = skb->head + 16; + skb->tail = skb->head + 16; +#endif /* __ARM_ARCH_7A__ */ + skb->len = 0; + skb->cloned = 0; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) + skb->list = NULL; +#endif + atomic_set(&skb->users, 1); + + PKTSETCLINK(skb, NULL); + PKTCCLRATTR(skb); + PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED); + + return skb; +} +#endif /* CTFPOOL */ + +#if defined(BCM_GMAC3) +/* Account for a packet delivered to downstream forwarder. + * Decrement a GMAC forwarder interface's pktalloced count. + */ +void BCMFASTPATH +osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt) +{ + + atomic_sub(skb_cnt, &osh->cmn->pktalloced); +} + +/* Account for a downstream forwarder delivered packet to a WL/DHD driver. + * Increment a GMAC forwarder interface's pktalloced count. + */ +#ifdef BCMDBG_CTRACE +void BCMFASTPATH +osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file) +#else +void BCMFASTPATH +osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt) +#endif /* BCMDBG_CTRACE */ +{ +#if defined(BCMDBG_CTRACE) + int i; + struct sk_buff *skb; +#endif + +#if defined(BCMDBG_CTRACE) + if (skb_cnt > 1) { + struct sk_buff **skb_array = (struct sk_buff **)skbs; + for (i = 0; i < skb_cnt; i++) { + skb = skb_array[i]; +#if defined(BCMDBG_CTRACE) + ASSERT(!PKTISCHAINED(skb)); + ADD_CTRACE(osh, skb, file, line); +#endif /* BCMDBG_CTRACE */ + } + } else { + skb = (struct sk_buff *)skbs; +#if defined(BCMDBG_CTRACE) + ASSERT(!PKTISCHAINED(skb)); + ADD_CTRACE(osh, skb, file, line); +#endif /* BCMDBG_CTRACE */ + } +#endif + + atomic_add(skb_cnt, &osh->cmn->pktalloced); +} + +#endif /* BCM_GMAC3 */ + +/* Convert a driver packet to native(OS) packet + * In the process, packettag is zeroed out before sending up + * IP code depends on skb->cb to be setup correctly with various options + * In our case, that means it should be 0 + */ +struct sk_buff * BCMFASTPATH +osl_pkt_tonative(osl_t *osh, void *pkt) +{ + struct sk_buff *nskb; +#ifdef BCMDBG_CTRACE + struct sk_buff *nskb1, *nskb2; +#endif + + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(pkt); + + /* Decrement the packet counter */ + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced); + +#ifdef BCMDBG_CTRACE + for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) { + if (PKTISCHAINED(nskb1)) { + nskb2 = PKTCLINK(nskb1); + } + else + nskb2 = NULL; + + DEL_CTRACE(osh, nskb1); + } +#endif /* BCMDBG_CTRACE */ + } + return (struct sk_buff *)pkt; +} + +/* Convert a native(OS) packet to driver packet. + * In the process, native packet is destroyed, there is no copying + * Also, a packettag is zeroed out + */ +#ifdef BCMDBG_CTRACE +void * BCMFASTPATH +osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file) +#else +void * BCMFASTPATH +osl_pkt_frmnative(osl_t *osh, void *pkt) +#endif /* BCMDBG_CTRACE */ +{ + struct sk_buff *nskb; +#ifdef BCMDBG_CTRACE + struct sk_buff *nskb1, *nskb2; +#endif + + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(pkt); + + /* Increment the packet counter */ + for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { + atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced); + +#ifdef BCMDBG_CTRACE + for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) { + if (PKTISCHAINED(nskb1)) { + nskb2 = PKTCLINK(nskb1); + } + else + nskb2 = NULL; + + ADD_CTRACE(osh, nskb1, file, line); + } +#endif /* BCMDBG_CTRACE */ + } + return (void *)pkt; +} + +/* Return a new packet. zero out pkttag */ +#ifdef BCMDBG_CTRACE +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len, int line, char *file) +#else +#ifdef BCM_OBJECT_TRACE +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len, int line, const char *caller) +#else +void * BCMFASTPATH +osl_pktget(osl_t *osh, uint len) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +{ + struct sk_buff *skb; + uchar num = 0; + if (lmtest != FALSE) { + get_random_bytes(&num, sizeof(uchar)); + if ((num + 1) <= (256 * lmtest / 100)) + return NULL; + } + +#ifdef CTFPOOL + /* Allocate from local pool */ + skb = osl_pktfastget(osh, len); + if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) { +#else /* CTFPOOL */ + if ((skb = osl_alloc_skb(osh, len))) { +#endif /* CTFPOOL */ + skb->tail += len; + skb->len += len; + skb->priority = 0; + +#ifdef BCMDBG_CTRACE + ADD_CTRACE(osh, skb, file, line); +#endif + atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + } + + return ((void*) skb); +} + +#ifdef CTFPOOL +static inline void +osl_pktfastfree(osl_t *osh, struct sk_buff *skb) +{ + ctfpool_t *ctfpool; +#ifdef CTFPOOL_SPINLOCK + unsigned long flags; +#endif /* CTFPOOL_SPINLOCK */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + skb->tstamp.tv.sec = 0; +#else + skb->stamp.tv_sec = 0; +#endif + + /* We only need to init the fields that we change */ + skb->dev = NULL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) + skb->dst = NULL; +#endif + OSL_PKTTAG_CLEAR(skb); + skb->ip_summed = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + skb_orphan(skb); +#else + skb->destructor = NULL; +#endif + + ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); + ASSERT(ctfpool != NULL); + + /* Add object to the ctfpool */ + CTFPOOL_LOCK(ctfpool, flags); + skb->next = (struct sk_buff *)ctfpool->head; + ctfpool->head = (void *)skb; + + ctfpool->fast_frees++; + ctfpool->curr_obj++; + + ASSERT(ctfpool->curr_obj <= ctfpool->max_obj); + CTFPOOL_UNLOCK(ctfpool, flags); +} +#endif /* CTFPOOL */ + +/* Free the driver packet. Free the tag if present */ +#ifdef BCM_OBJECT_TRACE +void BCMFASTPATH +osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller) +#else +void BCMFASTPATH +osl_pktfree(osl_t *osh, void *p, bool send) +#endif /* BCM_OBJECT_TRACE */ +{ + struct sk_buff *skb, *nskb; + if (osh == NULL) + return; + + skb = (struct sk_buff*) p; + + if (send && osh->pub.tx_fn) + osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); + + PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); + +#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF) + if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) { + printk("%s: pkt %p is from static pool\n", + __FUNCTION__, p); + dump_stack(); + return; + } + + if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) { + printk("%s: pkt %p is from static pool and not in used\n", + __FUNCTION__, p); + dump_stack(); + return; + } +#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */ + + /* perversion: we use skb->next to chain multi-skb packets */ + while (skb) { + nskb = skb->next; + skb->next = NULL; + +#ifdef BCMDBG_CTRACE + DEL_CTRACE(osh, skb); +#endif + + +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line); +#endif /* BCM_OBJECT_TRACE */ + +#ifdef CTFPOOL + if (PKTISFAST(osh, skb)) { + if (atomic_read(&skb->users) == 1) + smp_rmb(); + else if (!atomic_dec_and_test(&skb->users)) + goto next_skb; + osl_pktfastfree(osh, skb); + } else +#endif + { + dev_kfree_skb_any(skb); + } +#ifdef CTFPOOL +next_skb: +#endif + atomic_dec(&osh->cmn->pktalloced); + skb = nskb; + } +} + +#ifdef CONFIG_DHD_USE_STATIC_BUF +void* +osl_pktget_static(osl_t *osh, uint len) +{ + int i = 0; + struct sk_buff *skb; +#ifdef DHD_USE_STATIC_CTRLBUF + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!bcm_static_skb) + return osl_pktget(osh, len); + + if (len > DHD_SKB_MAX_BUFSIZE) { + printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); + return osl_pktget(osh, len); + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + uint32 index; + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM; + bcm_static_skb->last_allocated_index++; + if (bcm_static_skb->skb_8k[index] && + bcm_static_skb->pkt_use[index] == 0) { + break; + } + } + + if ((i != STATIC_PKT_2PAGE_NUM) && + (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) { + bcm_static_skb->pkt_use[index] = 1; + skb = bcm_static_skb->skb_8k[index]; + skb->data = skb->head; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, NET_SKB_PAD); +#else + skb->tail = skb->data + NET_SKB_PAD; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->data += NET_SKB_PAD; + skb->cloned = 0; + skb->priority = 0; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + skb->mac_len = PREALLOC_USED_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return skb; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return NULL; +#else + down(&bcm_static_skb->osl_pkt_sem); + + if (len <= DHD_SKB_1PAGE_BUFSIZE) { + for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { + if (bcm_static_skb->skb_4k[i] && + bcm_static_skb->pkt_use[i] == 0) { + break; + } + } + + if (i != STATIC_PKT_MAX_NUM) { + bcm_static_skb->pkt_use[i] = 1; + + skb = bcm_static_skb->skb_4k[i]; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + + if (len <= DHD_SKB_2PAGE_BUFSIZE) { + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] && + bcm_static_skb->pkt_use[i] == 0) { + break; + } + } + + if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) { + bcm_static_skb->pkt_use[i] = 1; + skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } + } + +#if defined(ENHANCED_STATIC_BUF) + if (bcm_static_skb->skb_16k && + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1; + + skb = bcm_static_skb->skb_16k; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb_set_tail_pointer(skb, len); +#else + skb->tail = skb->data + len; +#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + skb->len = len; + + up(&bcm_static_skb->osl_pkt_sem); + return skb; + } +#endif /* ENHANCED_STATIC_BUF */ + + up(&bcm_static_skb->osl_pkt_sem); + printk("%s: all static pkt in use!\n", __FUNCTION__); + return osl_pktget(osh, len); +#endif /* DHD_USE_STATIC_CTRLBUF */ +} + +void +osl_pktfree_static(osl_t *osh, void *p, bool send) +{ + int i; +#ifdef DHD_USE_STATIC_CTRLBUF + struct sk_buff *skb = (struct sk_buff *)p; + unsigned long flags; +#endif /* DHD_USE_STATIC_CTRLBUF */ + + if (!p) { + return; + } + + if (!bcm_static_skb) { + osl_pktfree(osh, p, send); + return; + } + +#ifdef DHD_USE_STATIC_CTRLBUF + spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags); + + for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i]) { + if (bcm_static_skb->pkt_use[i] == 0) { + printk("%s: static pkt idx %d(%p) is double free\n", + __FUNCTION__, i, p); + } else { + bcm_static_skb->pkt_use[i] = 0; + } + + if (skb->mac_len != PREALLOC_USED_MAGIC) { + printk("%s: static pkt idx %d(%p) is not in used\n", + __FUNCTION__, i, p); + } + + skb->mac_len = PREALLOC_FREE_MAGIC; + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + return; + } + } + + spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags); + printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p); +#else + down(&bcm_static_skb->osl_pkt_sem); + for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_4k[i]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } + + for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) { + if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) { + bcm_static_skb->pkt_use[i] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } + } +#ifdef ENHANCED_STATIC_BUF + if (p == bcm_static_skb->skb_16k) { + bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0; + up(&bcm_static_skb->osl_pkt_sem); + return; + } +#endif + up(&bcm_static_skb->osl_pkt_sem); + osl_pktfree(osh, p, send); +#endif /* DHD_USE_STATIC_CTRLBUF */ +} +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + +uint32 +osl_pci_read_config(osl_t *osh, uint offset, uint size) +{ + uint val = 0; + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + /* only 4byte access supported */ + ASSERT(size == 4); + + do { + pci_read_config_dword(osh->pdev, offset, &val); + if (val != 0xffffffff) + break; + } while (retry--); + + + return (val); +} + +void +osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) +{ + uint retry = PCI_CFG_RETRY; + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + /* only 4byte access supported */ + ASSERT(size == 4); + + do { + pci_write_config_dword(osh->pdev, offset, val); + if (offset != PCI_BAR0_WIN) + break; + if (osl_pci_read_config(osh, offset, size) == val) + break; + } while (retry--); + +} + +/* return bus # for the pci device pointed by osh->pdev */ +uint +osl_pci_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + +#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) + return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); +#else + return ((struct pci_dev *)osh->pdev)->bus->number; +#endif +} + +/* return slot # for the pci device pointed by osh->pdev */ +uint +osl_pci_slot(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + +#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35) + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1; +#else + return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); +#endif +} + +/* return domain # for the pci device pointed by osh->pdev */ +uint +osl_pcie_domain(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus); +} + +/* return bus # for the pci device pointed by osh->pdev */ +uint +osl_pcie_bus(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return ((struct pci_dev *)osh->pdev)->bus->number; +} + +/* return the pci device pointed by osh->pdev */ +struct pci_dev * +osl_pci_device(osl_t *osh) +{ + ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); + + return osh->pdev; +} + +static void +osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) +{ +} + +void +osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); +} + +void +osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) +{ + osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); +} + +void * +osl_malloc(osl_t *osh, uint size) +{ + void *addr; + gfp_t flags; + + /* only ASSERT if osh is defined */ + if (osh) + ASSERT(osh->magic == OS_HANDLE_MAGIC); +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + int i = 0; + if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) + { + down(&bcm_static_buf->static_sem); + + for (i = 0; i < STATIC_BUF_MAX_NUM; i++) + { + if (bcm_static_buf->buf_use[i] == 0) + break; + } + + if (i == STATIC_BUF_MAX_NUM) + { + up(&bcm_static_buf->static_sem); + printk("all static buff in use!\n"); + goto original; + } + + bcm_static_buf->buf_use[i] = 1; + up(&bcm_static_buf->static_sem); + + bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); + if (osh) + atomic_add(size, &osh->cmn->malloced); + + return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); + } + } +original: +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + + flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC; + if ((addr = kmalloc(size, flags)) == NULL) { + if (osh) + osh->failed++; + return (NULL); + } + if (osh && osh->cmn) + atomic_add(size, &osh->cmn->malloced); + + return (addr); +} + +void * +osl_mallocz(osl_t *osh, uint size) +{ + void *ptr; + + ptr = osl_malloc(osh, size); + + if (ptr != NULL) { + bzero(ptr, size); + } + + return ptr; +} + +void +osl_mfree(osl_t *osh, void *addr, uint size) +{ +#ifdef CONFIG_DHD_USE_STATIC_BUF + if (bcm_static_buf) + { + if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr + <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) + { + int buf_idx = 0; + + buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; + + down(&bcm_static_buf->static_sem); + bcm_static_buf->buf_use[buf_idx] = 0; + up(&bcm_static_buf->static_sem); + + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + atomic_sub(size, &osh->cmn->malloced); + } + return; + } + } +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + if (osh && osh->cmn) { + ASSERT(osh->magic == OS_HANDLE_MAGIC); + + ASSERT(size <= osl_malloced(osh)); + + atomic_sub(size, &osh->cmn->malloced); + } + kfree(addr); +} + +uint +osl_check_memleak(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + if (atomic_read(&osh->cmn->refcount) == 1) + return (atomic_read(&osh->cmn->malloced)); + else + return 0; +} + +uint +osl_malloced(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (atomic_read(&osh->cmn->malloced)); +} + +uint +osl_malloc_failed(osl_t *osh) +{ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + return (osh->failed); +} + + +uint +osl_dma_consistent_align(void) +{ + return (PAGE_SIZE); +} + +void* +osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap) +{ + void *va; + uint16 align = (1 << align_bits); + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) + size += align; + *alloced = size; + +#ifndef BCM_SECURE_DMA +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) + va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO); + if (va) + *pap = (ulong)__virt_to_phys((ulong)va); +#else + { + dma_addr_t pap_lin; + struct pci_dev *hwdev = osh->pdev; + gfp_t flags; +#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL + flags = GFP_ATOMIC; +#else + flags = GFP_KERNEL; +#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */ + va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags); +#ifdef BCMDMA64OSL + PHYSADDRLOSET(*pap, pap_lin & 0xffffffff); + PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff); +#else + *pap = (dmaaddr_t)pap_lin; +#endif /* BCMDMA64OSL */ + } +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#else + va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap); +#endif /* BCM_SECURE_DMA */ + return va; +} + +void +osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) +{ +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + +#ifndef BCM_SECURE_DMA +#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) + kfree(va); +#else +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_free_consistent(osh->pdev, size, va, paddr); +#else + pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa); +#endif /* BCMDMA64OSL */ +#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */ +#else + osl_sec_dma_free_consistent(osh, va, size, pa); +#endif /* BCM_SECURE_DMA */ +} + +dmaaddr_t BCMFASTPATH +osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) +{ + int dir; +#ifdef BCMDMA64OSL + dmaaddr_t ret; + dma_addr_t map_addr; +#endif /* BCMDMA64OSL */ + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; + + + + +#ifdef BCMDMA64OSL + map_addr = pci_map_single(osh->pdev, va, size, dir); + PHYSADDRLOSET(ret, map_addr & 0xffffffff); + PHYSADDRHISET(ret, (map_addr >> 32) & 0xffffffff); + return ret; +#else + return (pci_map_single(osh->pdev, va, size, dir)); +#endif /* BCMDMA64OSL */ +} + +void BCMFASTPATH +osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction) +{ + int dir; +#ifdef BCMDMA64OSL + dma_addr_t paddr; +#endif /* BCMDMA64OSL */ + + ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); + + + dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE; +#ifdef BCMDMA64OSL + PHYSADDRTOULONG(pa, paddr); + pci_unmap_single(osh->pdev, paddr, size, dir); +#else + pci_unmap_single(osh->pdev, (uint32)pa, size, dir); +#endif /* BCMDMA64OSL */ +} + +/* OSL function for CPU relax */ +inline void BCMFASTPATH +osl_cpu_relax(void) +{ + cpu_relax(); +} + + +#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) || \ + defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)) + +#include + +/* + * Note that its gauranteed that the Ring is cache line aligned, but + * the messages are not. And we see that __dma_inv_range in + * arch/arm64/mm/cache.S invalidates only if the request size is + * cache line aligned. If not, it will Clean and invalidate. + * So we'll better invalidate the whole ring. + * + * Also, the latest Kernel versions invoke cache maintenance operations + * from arch/arm64/mm/dma-mapping.c, __swiotlb_sync_single_for_device + * Only if is_device_dma_coherent returns 0. Since we don't have BSP + * source, assuming that its the case, since we pass NULL for the dev ptr + */ +inline void BCMFASTPATH +osl_cache_flush(void *va, uint size) +{ + /* + * using long for address arithmatic is OK, in linux + * 32 bit its 4 bytes and 64 bit its 8 bytes + */ + unsigned long end_cache_line_start; + unsigned long end_addr; + unsigned long next_cache_line_start; + + end_addr = (unsigned long)va + size; + + /* Start address beyond the cache line we plan to operate */ + end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); + next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; + + /* Align the start address to cache line boundary */ + va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); + + /* Ensure that size is also aligned and extends partial line to full */ + size = next_cache_line_start - (unsigned long)va; + +#ifndef BCM_SECURE_DMA + +#ifdef CONFIG_ARM64 + /* + * virt_to_dma is not present in arm64/include/dma-mapping.h + * So have to convert the va to pa first and then get the dma addr + * of the same. + */ + { + phys_addr_t pa; + dma_addr_t dma_addr; + pa = virt_to_phys(va); + dma_addr = phys_to_dma(NULL, pa); + if (size > 0) + dma_sync_single_for_device(OSH_NULL, dma_addr, size, DMA_TX); + } +#else + if (size > 0) + dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX); +#endif /* !CONFIG_ARM64 */ +#else + phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); + if (size > 0) + dma_sync_single_for_device(OSH_NULL, orig_pa, size, DMA_TX); +#endif /* defined BCM_SECURE_DMA */ +} + +inline void BCMFASTPATH +osl_cache_inv(void *va, uint size) +{ + /* + * using long for address arithmatic is OK, in linux + * 32 bit its 4 bytes and 64 bit its 8 bytes + */ + unsigned long end_cache_line_start; + unsigned long end_addr; + unsigned long next_cache_line_start; + + end_addr = (unsigned long)va + size; + + /* Start address beyond the cache line we plan to operate */ + end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1)); + next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES; + + /* Align the start address to cache line boundary */ + va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1)); + + /* Ensure that size is also aligned and extends partial line to full */ + size = next_cache_line_start - (unsigned long)va; + +#ifndef BCM_SECURE_DMA + +#ifdef CONFIG_ARM64 + /* + * virt_to_dma is not present in arm64/include/dma-mapping.h + * So have to convert the va to pa first and then get the dma addr + * of the same. + */ + { + phys_addr_t pa; + dma_addr_t dma_addr; + pa = virt_to_phys(va); + dma_addr = phys_to_dma(NULL, pa); + dma_sync_single_for_cpu(OSH_NULL, dma_addr, size, DMA_RX); + } +#else + dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX); +#endif /* !CONFIG_ARM64 */ +#else + phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa); + dma_sync_single_for_cpu(OSH_NULL, orig_pa, size, DMA_RX); +#endif /* defined BCM_SECURE_DMA */ +} + +inline void osl_prefetch(const void *ptr) +{ + /* PLD instruction is not applicable in ARM 64. We don't care for now */ +#ifndef CONFIG_ARM64 + __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc"); +#endif +} + +int osl_arch_is_coherent(void) +{ + return 0; +} + + +inline int osl_acp_war_enab(void) +{ + return 0; +} + +#endif + +#if defined(BCMASSERT_LOG) +void +osl_assert(const char *exp, const char *file, int line) +{ + char tempbuf[256]; + const char *basename; + + basename = strrchr(file, '/'); + /* skip the '/' */ + if (basename) + basename++; + + if (!basename) + basename = file; + +#ifdef BCMASSERT_LOG + snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n", + exp, basename, line); +#endif /* BCMASSERT_LOG */ + + +#if defined(BCMASSERT_LOG) + switch (g_assert_type) { + case 0: + panic("%s", tempbuf); + break; + case 1: + printk("%s", tempbuf); + break; + case 2: + printk("%s", tempbuf); + BUG(); + break; + default: + break; + } +#endif + +} +#endif + +void +osl_delay(uint usec) +{ + uint d; + + while (usec > 0) { + d = MIN(usec, 1000); + udelay(d); + usec -= d; + } +} + +void +osl_sleep(uint ms) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + if (ms < 20) + usleep_range(ms*1000, ms*1000 + 1000); + else +#endif + msleep(ms); +} + + + +/* Clone a packet. + * The pkttag contents are NOT cloned. + */ +#ifdef BCMDBG_CTRACE +void * +osl_pktdup(osl_t *osh, void *skb, int line, char *file) +#else +#ifdef BCM_OBJECT_TRACE +void * +osl_pktdup(osl_t *osh, void *skb, int line, const char *caller) +#else +void * +osl_pktdup(osl_t *osh, void *skb) +#endif /* BCM_OBJECT_TRACE */ +#endif /* BCMDBG_CTRACE */ +{ + void * p; + + ASSERT(!PKTISCHAINED(skb)); + + /* clear the CTFBUF flag if set and map the rest of the buffer + * before cloning. + */ + PKTCTFMAP(osh, skb); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) +#else + if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) +#endif + return NULL; + +#ifdef CTFPOOL + if (PKTISFAST(osh, skb)) { + ctfpool_t *ctfpool; + + /* if the buffer allocated from ctfpool is cloned then + * we can't be sure when it will be freed. since there + * is a chance that we will be losing a buffer + * from our pool, we increment the refill count for the + * object to be alloced later. + */ + ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); + ASSERT(ctfpool != NULL); + PKTCLRFAST(osh, p); + PKTCLRFAST(osh, skb); + ctfpool->refills++; + } +#endif /* CTFPOOL */ + + /* Clear PKTC context */ + PKTSETCLINK(p, NULL); + PKTCCLRFLAGS(p); + PKTCSETCNT(p, 1); + PKTCSETLEN(p, PKTLEN(osh, skb)); + + /* skb_clone copies skb->cb.. we don't want that */ + if (osh->pub.pkttag) + OSL_PKTTAG_CLEAR(p); + + /* Increment the packet counter */ + atomic_inc(&osh->cmn->pktalloced); +#ifdef BCM_OBJECT_TRACE + bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line); +#endif /* BCM_OBJECT_TRACE */ + +#ifdef BCMDBG_CTRACE + ADD_CTRACE(osh, (struct sk_buff *)p, file, line); +#endif + return (p); +} + +#ifdef BCMDBG_CTRACE +int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt) +{ + unsigned long flags; + struct sk_buff *skb; + int ck = FALSE; + + spin_lock_irqsave(&osh->ctrace_lock, flags); + + list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) { + if (pkt == skb) { + ck = TRUE; + break; + } + } + + spin_unlock_irqrestore(&osh->ctrace_lock, flags); + return ck; +} + +void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b) +{ + unsigned long flags; + struct sk_buff *skb; + int idx = 0; + int i, j; + + spin_lock_irqsave(&osh->ctrace_lock, flags); + + if (b != NULL) + bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num); + else + printk(" Total %d sbk not free\n", osh->ctrace_num); + + list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) { + if (b != NULL) + bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb); + else + printk("[%d] skb %p:\n", ++idx, skb); + + for (i = 0; i < skb->ctrace_count; i++) { + j = (skb->ctrace_start + i) % CTRACE_NUM; + if (b != NULL) + bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]); + else + printk(" [%s(%d)]\n", skb->func[j], skb->line[j]); + } + if (b != NULL) + bcm_bprintf(b, "\n"); + else + printk("\n"); + } + + spin_unlock_irqrestore(&osh->ctrace_lock, flags); + + return; +} +#endif /* BCMDBG_CTRACE */ + + +/* + * OSLREGOPS specifies the use of osl_XXX routines to be used for register access + */ + +/* + * BINOSL selects the slightly slower function-call-based binary compatible osl. + */ + +uint +osl_pktalloced(osl_t *osh) +{ + if (atomic_read(&osh->cmn->refcount) == 1) + return (atomic_read(&osh->cmn->pktalloced)); + else + return 0; +} + +uint32 +osl_rand(void) +{ + uint32 rand; + + get_random_bytes(&rand, sizeof(rand)); + + return rand; +} + +/* Linux Kernel: File Operations: start */ +void * +osl_os_open_image(char *filename) +{ + struct file *fp; + + fp = filp_open(filename, O_RDONLY, 0); + /* + * 2.6.11 (FC4) supports filp_open() but later revs don't? + * Alternative: + * fp = open_namei(AT_FDCWD, filename, O_RD, 0); + * ??? + */ + if (IS_ERR(fp)) + fp = NULL; + + return fp; +} + +int +osl_os_get_image_block(char *buf, int len, void *image) +{ + struct file *fp = (struct file *)image; + int rdlen; + + if (!image) + return 0; + + rdlen = kernel_read(fp, buf, len, &fp->f_pos); + if (rdlen > 0) + fp->f_pos += rdlen; + + return rdlen; +} + +void +osl_os_close_image(void *image) +{ + if (image) + filp_close((struct file *)image, NULL); +} + +int +osl_os_image_size(void *image) +{ + int len = 0, curroffset; + + if (image) { + /* store the current offset */ + curroffset = generic_file_llseek(image, 0, 1); + /* goto end of file to get length */ + len = generic_file_llseek(image, 0, 2); + /* restore back the offset */ + generic_file_llseek(image, curroffset, 0); + } + return len; +} + +/* Linux Kernel: File Operations: end */ + + +/* APIs to set/get specific quirks in OSL layer */ +void +osl_flag_set(osl_t *osh, uint32 mask) +{ + osh->flags |= mask; +} + +bool +osl_is_flag_set(osl_t *osh, uint32 mask) +{ + return (osh->flags & mask); +} + +#ifdef BCM_SECURE_DMA + +static void +osl_sec_dma_setup_contig_mem(osl_t *osh, unsigned long memsize, int regn) +{ + int ret; + +#if defined(__ARM_ARCH_7A__) + if (regn == CONT_ARMREGION) { + ret = osl_sec_dma_alloc_contig_mem(osh, memsize, regn); + if (ret != BCME_OK) + printk("linux_osl.c: CMA memory access failed\n"); + } +#endif + /* implement the MIPS Here */ +} + +static int +osl_sec_dma_alloc_contig_mem(osl_t *osh, unsigned long memsize, int regn) +{ + u64 addr; + + printk("linux_osl.c: The value of cma mem block size = %ld\n", memsize); + osh->cma = cma_dev_get_cma_dev(regn); + printk("The value of cma = %p\n", osh->cma); + if (!osh->cma) { + printk("linux_osl.c:contig_region index is invalid\n"); + return BCME_ERROR; + } + if (cma_dev_get_mem(osh->cma, &addr, (u32)memsize, SEC_DMA_ALIGN) < 0) { + printk("linux_osl.c: contiguous memory block allocation failure\n"); + return BCME_ERROR; + } + osh->contig_base_alloc = (phys_addr_t)addr; + osh->contig_base = (phys_addr_t)osh->contig_base_alloc; + printk("contig base alloc=%lx \n", (ulong)osh->contig_base_alloc); + + return BCME_OK; +} + +static void +osl_sec_dma_free_contig_mem(osl_t *osh, u32 memsize, int regn) +{ + int ret; + + ret = cma_dev_put_mem(osh->cma, (u64)osh->contig_base, memsize); + if (ret) + printf("%s contig base free failed\n", __FUNCTION__); +} + +static void * +osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size, bool iscache, bool isdecr) +{ + + struct page **map; + int order, i; + void *addr = NULL; + + size = PAGE_ALIGN(size); + order = get_order(size); + + map = kmalloc(sizeof(struct page *) << order, GFP_ATOMIC); + + if (map == NULL) + return NULL; + + for (i = 0; i < (size >> PAGE_SHIFT); i++) + map[i] = page + i; + + if (iscache) { + addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, __pgprot(PAGE_KERNEL)); + if (isdecr) { + osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page)); + g_contig_delta_va_pa = osh->contig_delta_va_pa; + } + } + else { + +#if defined(__ARM_ARCH_7A__) + addr = vmap(map, size >> PAGE_SHIFT, VM_MAP, + pgprot_noncached(__pgprot(PAGE_KERNEL))); +#endif + if (isdecr) { + osh->contig_delta_va_pa = (phys_addr_t)(addr - page_to_phys(page)); + g_contig_delta_va_pa = osh->contig_delta_va_pa; + } + } + + kfree(map); + return (void *)addr; +} + +static void +osl_sec_dma_iounmap(osl_t *osh, void *contig_base_va, size_t size) +{ + vunmap(contig_base_va); +} + +static void +osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list) +{ + int i; + sec_mem_elem_t *sec_mem_elem; + + if ((sec_mem_elem = kmalloc(sizeof(sec_mem_elem_t)*(max), GFP_ATOMIC)) != NULL) { + + *list = sec_mem_elem; + bzero(sec_mem_elem, sizeof(sec_mem_elem_t)*(max)); + for (i = 0; i < max-1; i++) { + sec_mem_elem->next = (sec_mem_elem + 1); + sec_mem_elem->size = mbsize; + sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc; + sec_mem_elem->vac = osh->contig_base_alloc_va; + + osh->contig_base_alloc += mbsize; + osh->contig_base_alloc_va += mbsize; + + sec_mem_elem = sec_mem_elem + 1; + } + sec_mem_elem->next = NULL; + sec_mem_elem->size = mbsize; + sec_mem_elem->pa_cma = (u32)osh->contig_base_alloc; + sec_mem_elem->vac = osh->contig_base_alloc_va; + + osh->contig_base_alloc += mbsize; + osh->contig_base_alloc_va += mbsize; + + } + else + printf("%s sec mem elem kmalloc failed\n", __FUNCTION__); +} + + +static void +osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base) +{ + if (sec_list_base) + kfree(sec_list_base); +} + +static sec_mem_elem_t * BCMFASTPATH +osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction, + struct sec_cma_info *ptr_cma_info, uint offset) +{ + sec_mem_elem_t *sec_mem_elem = NULL; + + if (size <= 512 && osh->sec_list_512) { + sec_mem_elem = osh->sec_list_512; + osh->sec_list_512 = sec_mem_elem->next; + } + else if (size <= 2048 && osh->sec_list_2048) { + sec_mem_elem = osh->sec_list_2048; + osh->sec_list_2048 = sec_mem_elem->next; + } + else if (osh->sec_list_4096) { + sec_mem_elem = osh->sec_list_4096; + osh->sec_list_4096 = sec_mem_elem->next; + } else { + printf("%s No matching Pool available size=%d \n", __FUNCTION__, size); + return NULL; + } + + if (sec_mem_elem != NULL) { + sec_mem_elem->next = NULL; + + if (ptr_cma_info->sec_alloc_list_tail) { + ptr_cma_info->sec_alloc_list_tail->next = sec_mem_elem; + } + + ptr_cma_info->sec_alloc_list_tail = sec_mem_elem; + if (ptr_cma_info->sec_alloc_list == NULL) + ptr_cma_info->sec_alloc_list = sec_mem_elem; + } + return sec_mem_elem; +} + +static void BCMFASTPATH +osl_sec_dma_free_mem_elem(osl_t *osh, sec_mem_elem_t *sec_mem_elem) +{ + sec_mem_elem->dma_handle = 0x0; + sec_mem_elem->va = NULL; + + if (sec_mem_elem->size == 512) { + sec_mem_elem->next = osh->sec_list_512; + osh->sec_list_512 = sec_mem_elem; + } + else if (sec_mem_elem->size == 2048) { + sec_mem_elem->next = osh->sec_list_2048; + osh->sec_list_2048 = sec_mem_elem; + } + else if (sec_mem_elem->size == 4096) { + sec_mem_elem->next = osh->sec_list_4096; + osh->sec_list_4096 = sec_mem_elem; + } + else + printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size); +} + +static sec_mem_elem_t * BCMFASTPATH +osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle) +{ + sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list; + sec_mem_elem_t *sec_prv_elem = ptr_cma_info->sec_alloc_list; + + if (sec_mem_elem->dma_handle == dma_handle) { + + ptr_cma_info->sec_alloc_list = sec_mem_elem->next; + + if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) { + ptr_cma_info->sec_alloc_list_tail = NULL; + ASSERT(ptr_cma_info->sec_alloc_list == NULL); + } + + return sec_mem_elem; + } + + while (sec_mem_elem != NULL) { + + if (sec_mem_elem->dma_handle == dma_handle) { + + sec_prv_elem->next = sec_mem_elem->next; + if (sec_mem_elem == ptr_cma_info->sec_alloc_list_tail) + ptr_cma_info->sec_alloc_list_tail = sec_prv_elem; + + return sec_mem_elem; + } + sec_prv_elem = sec_mem_elem; + sec_mem_elem = sec_mem_elem->next; + } + return NULL; +} + +static sec_mem_elem_t * +osl_sec_dma_rem_first_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info) +{ + sec_mem_elem_t *sec_mem_elem = ptr_cma_info->sec_alloc_list; + + if (sec_mem_elem) { + + ptr_cma_info->sec_alloc_list = sec_mem_elem->next; + + if (ptr_cma_info->sec_alloc_list == NULL) + ptr_cma_info->sec_alloc_list_tail = NULL; + + return sec_mem_elem; + + } else + return NULL; +} + +static void * BCMFASTPATH +osl_sec_dma_last_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info) +{ + return ptr_cma_info->sec_alloc_list_tail; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_map_txmeta(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info) +{ + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + uint loffset; + void *vaorig = va + size; + dma_addr_t dma_handle = 0x0; + /* packet will be the one added with osl_sec_dma_map() just before this call */ + + sec_mem_elem = osl_sec_dma_last_elem(osh, ptr_cma_info); + + if (sec_mem_elem && sec_mem_elem->va == vaorig) { + + pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + + dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + } else { + printf("%s: error orig va not found va = 0x%p \n", + __FUNCTION__, vaorig); + } + return dma_handle; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, + hnddma_seg_map_t *dmah, void *ptr_cma_info, uint offset) +{ + + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + void *pa_cma_kmap_va = NULL; + int *fragva; + uint buflen = 0; + struct sk_buff *skb; + dma_addr_t dma_handle = 0x0; + uint loffset; + int i = 0; + + sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset); + + if (sec_mem_elem == NULL) { + printk("linux_osl.c: osl_sec_dma_map - cma allocation failed\n"); + return 0; + } + sec_mem_elem->va = va; + sec_mem_elem->direction = direction; + pa_cma_page = phys_to_page(sec_mem_elem->pa_cma); + + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); + * pa_cma_kmap_va += loffset; + */ + + pa_cma_kmap_va = sec_mem_elem->vac; + + if (direction == DMA_TX) { + + if (p == NULL) { + + memcpy(pa_cma_kmap_va+offset, va, size); + buflen = size; + } + else { + for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { + if (skb_is_nonlinear(skb)) { + + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + fragva = kmap_atomic(skb_frag_page(f)); + memcpy((pa_cma_kmap_va+offset+buflen), + (fragva + f->page_offset), skb_frag_size(f)); + kunmap_atomic(fragva); + buflen += skb_frag_size(f); + } + } + else { + memcpy((pa_cma_kmap_va+offset+buflen), skb->data, skb->len); + buflen += skb->len; + } + } + + } + if (dmah) { + dmah->nsegs = 1; + dmah->origsize = buflen; + } + } + + else if (direction == DMA_RX) + { + buflen = size; + if ((p != NULL) && (dmah != NULL)) { + dmah->nsegs = 1; + dmah->origsize = buflen; + } + } + if (direction == DMA_RX || direction == DMA_TX) { + + dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset+offset, buflen, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + } + if (dmah) { + dmah->segs[0].addr = dma_handle; + dmah->segs[0].length = buflen; + } + sec_mem_elem->dma_handle = dma_handle; + /* kunmap_atomic(pa_cma_kmap_va-loffset); */ + return dma_handle; +} + +dma_addr_t BCMFASTPATH +osl_sec_dma_dd_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *map) +{ + + struct page *pa_cma_page; + phys_addr_t pa_cma; + dma_addr_t dma_handle = 0x0; + uint loffset; + + pa_cma = (phys_addr_t)(va - osh->contig_delta_va_pa); + pa_cma_page = phys_to_page(pa_cma); + loffset = pa_cma -(pa_cma & ~(PAGE_SIZE-1)); + + dma_handle = dma_map_page(osh->cma->dev, pa_cma_page, loffset, size, + (direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE)); + + return dma_handle; +} + +void BCMFASTPATH +osl_sec_dma_unmap(osl_t *osh, dma_addr_t dma_handle, uint size, int direction, +void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset) +{ + sec_mem_elem_t *sec_mem_elem; + struct page *pa_cma_page; + void *pa_cma_kmap_va = NULL; + uint buflen = 0; + dma_addr_t pa_cma; + void *va; + uint loffset = 0; + int read_count = 0; + BCM_REFERENCE(buflen); + BCM_REFERENCE(read_count); + + sec_mem_elem = osl_sec_dma_find_rem_elem(osh, ptr_cma_info, dma_handle); + if (sec_mem_elem == NULL) { + printf("%s sec_mem_elem is NULL and dma_handle =0x%lx and dir=%d\n", + __FUNCTION__, (ulong)dma_handle, direction); + return; + } + + va = sec_mem_elem->va; + va -= offset; + pa_cma = sec_mem_elem->pa_cma; + + pa_cma_page = phys_to_page(pa_cma); + loffset = sec_mem_elem->pa_cma -(sec_mem_elem->pa_cma & ~(PAGE_SIZE-1)); + + if (direction == DMA_RX) { + + if (p == NULL) { + + /* pa_cma_kmap_va = kmap_atomic(pa_cma_page); + * pa_cma_kmap_va += loffset; + */ + + pa_cma_kmap_va = sec_mem_elem->vac; + + dma_unmap_page(osh->cma->dev, pa_cma, size, DMA_FROM_DEVICE); + memcpy(va, pa_cma_kmap_va, size); + /* kunmap_atomic(pa_cma_kmap_va); */ + } + } else { + dma_unmap_page(osh->cma->dev, pa_cma, size+offset, DMA_TO_DEVICE); + } + + osl_sec_dma_free_mem_elem(osh, sec_mem_elem); +} + +void +osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info) +{ + + sec_mem_elem_t *sec_mem_elem; + + sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info); + + while (sec_mem_elem != NULL) { + + dma_unmap_page(osh->cma->dev, sec_mem_elem->pa_cma, sec_mem_elem->size, + sec_mem_elem->direction == DMA_TX ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + osl_sec_dma_free_mem_elem(osh, sec_mem_elem); + + sec_mem_elem = osl_sec_dma_rem_first_elem(osh, ptr_cma_info); + } +} + +static void +osl_sec_dma_init_consistent(osl_t *osh) +{ + int i; + void *temp_va = osh->contig_base_alloc_coherent_va; + phys_addr_t temp_pa = osh->contig_base_alloc_coherent; + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + osh->sec_cma_coherent[i].avail = TRUE; + osh->sec_cma_coherent[i].va = temp_va; + osh->sec_cma_coherent[i].pa = temp_pa; + temp_va += SEC_CMA_COHERENT_BLK; + temp_pa += SEC_CMA_COHERENT_BLK; + } +} + +static void * +osl_sec_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, ulong *pap) +{ + + void *temp_va = NULL; + ulong temp_pa = 0; + int i; + + if (size > SEC_CMA_COHERENT_BLK) { + printf("%s unsupported size\n", __FUNCTION__); + return NULL; + } + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + if (osh->sec_cma_coherent[i].avail == TRUE) { + temp_va = osh->sec_cma_coherent[i].va; + temp_pa = osh->sec_cma_coherent[i].pa; + osh->sec_cma_coherent[i].avail = FALSE; + break; + } + } + + if (i == SEC_CMA_COHERENT_MAX) + printf("%s:No coherent mem: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, + temp_va, (ulong)temp_pa, size); + + *pap = (unsigned long)temp_pa; + return temp_va; +} + +static void +osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa) +{ + int i = 0; + + for (i = 0; i < SEC_CMA_COHERENT_MAX; i++) { + if (osh->sec_cma_coherent[i].va == va) { + osh->sec_cma_coherent[i].avail = TRUE; + break; + } + } + if (i == SEC_CMA_COHERENT_MAX) + printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__, + va, (ulong)pa, size); +} + +#endif /* BCM_SECURE_DMA */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER) +#include +#include +void +osl_pkt_orphan_partial(struct sk_buff *skb) +{ + uint32 fraction; + static void *p_tcp_wfree = NULL; + + if (!skb->destructor || skb->destructor == sock_wfree) + return; + + if (unlikely(!p_tcp_wfree)) { + char sym[KSYM_SYMBOL_LEN]; + sprint_symbol(sym, (unsigned long)skb->destructor); + sym[9] = 0; + if (!strcmp(sym, "tcp_wfree")) + p_tcp_wfree = skb->destructor; + else + return; + } + + if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk)) + return; + + /* abstract a certain portion of skb truesize from the socket + * sk_wmem_alloc to allow more skb can be allocated for this + * socket for better cusion meeting WiFi device requirement + */ + fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER; + skb->truesize -= fraction; + atomic_sub(fraction, &skb->sk->sk_wmem_alloc); +} +#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */ diff --git a/drivers/net/wireless/bcmdhd/pcie_core.c b/drivers/net/wireless/bcmdhd/pcie_core.c new file mode 100644 index 000000000000..c36bc62ecdb4 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/pcie_core.c @@ -0,0 +1,115 @@ +/** @file pcie_core.c + * + * Contains PCIe related functions that are shared between different driver models (e.g. firmware + * builds, DHD builds, BMAC builds), in order to avoid code duplication. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: pcie_core.c 591285 2015-10-07 11:56:29Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie_core.h" + +/* local prototypes */ + +/* local variables */ + +/* function definitions */ + +#ifdef BCMDRIVER + +void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + uint32 val, i, lsc; + uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR, + PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L, + PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA, + PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL, + PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG, + PCIECFGREG_REG_BAR3_CONFIG}; + sbpcieregs_t *pcie = NULL; + uint32 origidx = si_coreidx(sih); + + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + /* Disable/restore ASPM Control to protect the watchdog reset */ + W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + lsc = R_REG(osh, &sbpcieregs->configdata); + val = lsc & (~PCIE_ASPM_ENAB); + W_REG(osh, &sbpcieregs->configdata, val); + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4); + OSL_DELAY(100000); + + W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL); + W_REG(osh, &sbpcieregs->configdata, lsc); + + if (sih->buscorerev <= 13) { + /* Write configuration registers back to the shadow registers + * cause shadow registers are cleared out after watchdog reset. + */ + for (i = 0; i < ARRAYSIZE(cfg_offset); i++) { + W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]); + val = R_REG(osh, &sbpcieregs->configdata); + W_REG(osh, &sbpcieregs->configdata, val); + } + } + si_setcoreidx(sih, origidx); +} + + +/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled + * by the L12 state from MAC to save power by putting the + * SerDes analog in IDDQ mode + */ +void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs) +{ + sbpcieregs_t *pcie = NULL; + uint crwlpciegen2_117_disable = 0; + uint32 origidx = si_coreidx(sih); + + crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1; + /* Switch to PCIE2 core */ + pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0); + BCM_REFERENCE(pcie); + ASSERT(pcie != NULL); + + OR_REG(osh, &sbpcieregs->control, + crwlpciegen2_117_disable); + + si_setcoreidx(sih, origidx); +} +#endif /* BCMDRIVER */ diff --git a/drivers/net/wireless/bcmdhd/sbutils.c b/drivers/net/wireless/bcmdhd/sbutils.c new file mode 100644 index 000000000000..0804ef455135 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/sbutils.c @@ -0,0 +1,1108 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: sbutils.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "siutils_priv.h" + + +/* local prototypes */ +static uint _sb_coreidx(si_info_t *sii, uint32 sba); +static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, + uint ncores); +static uint32 _sb_coresba(si_info_t *sii); +static void *_sb_setcoreidx(si_info_t *sii, uint coreidx); +#define SET_SBREG(sii, r, mask, val) \ + W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val))) +#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF) + +/* sonicsrev */ +#define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT) +#define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT) + +#define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr)) +#define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v)) +#define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v))) +#define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v))) + +static uint32 +sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint8 tmp; + uint32 val, intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + val = R_REG(sii->osh, sbr); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } + + return (val); +} + +static void +sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint8 tmp; + volatile uint32 dummy; + uint32 intr_val = 0; + + + /* + * compact flash only has 11 bits address, while we needs 12 bits address. + * MEM_SEG will be OR'd with other 11 bits address in hardware, + * so we program MEM_SEG with 12th bit when necessary(access sb regsiters). + * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special + */ + if (PCMCIA(sii)) { + INTR_OFF(sii, intr_val); + tmp = 1; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */ + } + + if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) { + dummy = R_REG(sii->osh, sbr); + BCM_REFERENCE(dummy); + W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff)); + dummy = R_REG(sii->osh, sbr); + BCM_REFERENCE(dummy); + W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff)); + } else + W_REG(sii->osh, sbr, v); + + if (PCMCIA(sii)) { + tmp = 0; + OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1); + INTR_RESTORE(sii, intr_val); + } +} + +uint +sb_coreid(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT); +} + +uint +sb_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + void *corereg; + sbconfig_t *sb; + uint origidx, intflag, intr_val = 0; + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + corereg = si_setcore(sih, CC_CORE_ID, 0); + ASSERT(corereg != NULL); + sb = REGS2SB(corereg); + intflag = R_SBREG(sii, &sb->sbflagst); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + + return intflag; +} + +uint +sb_flag(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK; +} + +void +sb_setint(si_t *sih, int siflag) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 vec; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + if (siflag == -1) + vec = 0; + else + vec = 1 << siflag; + W_SBREG(sii, &sb->sbintvec, vec); +} + +/* return core index of the core with address 'sba' */ +static uint +_sb_coreidx(si_info_t *sii, uint32 sba) +{ + uint i; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + for (i = 0; i < sii->numcores; i ++) + if (sba == cores_info->coresba[i]) + return i; + return BADIDX; +} + +/* return core address of the current core */ +static uint32 +_sb_coresba(si_info_t *sii) +{ + uint32 sbaddr; + + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: { + sbconfig_t *sb = REGS2SB(sii->curmap); + sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0)); + break; + } + + case PCI_BUS: + sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + break; + + case PCMCIA_BUS: { + uint8 tmp = 0; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + sbaddr = (uint32)tmp << 12; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + sbaddr |= (uint32)tmp << 16; + OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + sbaddr |= (uint32)tmp << 24; + break; + } + +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + sbaddr = (uint32)(uintptr)sii->curmap; + break; +#endif + + + default: + sbaddr = BADCOREADDR; + break; + } + + return sbaddr; +} + +uint +sb_corevendor(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT); +} + +uint +sb_corerev(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + uint sbidh; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + sbidh = R_SBREG(sii, &sb->sbidhigh); + + return (SBCOREREV(sbidh)); +} + +/* set core-specific control flags */ +void +sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); +} + +/* set/clear core-specific control flags */ +uint32 +sb_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) | + (val << SBTML_SICF_SHIFT); + W_SBREG(sii, &sb->sbtmstatelow, w); + } + + /* return the new value + * for write operation, the following readback ensures the completion of write opration. + */ + return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT); +} + +/* set/clear core-specific status flags */ +uint32 +sb_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + sbconfig_t *sb; + uint32 w; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + ASSERT((val & ~mask) == 0); + ASSERT((mask & ~SISF_CORE_BITS) == 0); + + /* mask and set */ + if (mask || val) { + w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) | + (val << SBTMH_SISF_SHIFT); + W_SBREG(sii, &sb->sbtmstatehigh, w); + } + + /* return the new value */ + return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT); +} + +bool +sb_iscoreup(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + return ((R_SBREG(sii, &sb->sbtmstatelow) & + (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) == + (SICF_CLOCK_EN << SBTML_SICF_SHIFT)); +} + +/* + * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, + * switch back to the original core, and return the new value. + * + * When using the silicon backplane, no fidleing with interrupts or core switches are needed. + * + * Also, when using pci/pcie, we can optimize away the core switching for pci registers + * and (on newer pci cores) chipcommon registers. + */ +uint +sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + uint origidx = 0; + uint32 *r = NULL; + uint w; + uint intr_val = 0; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + ASSERT((val & ~mask) == 0); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) { + INTR_OFF(sii, intr_val); + + /* save current core index */ + origidx = si_coreidx(&sii->pub); + + /* switch core */ + r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff); + } + ASSERT(r != NULL); + + /* mask and set */ + if (mask || val) { + if (regoff >= SBCONFIGOFF) { + w = (R_SBREG(sii, r) & ~mask) | val; + W_SBREG(sii, r, w); + } else { + w = (R_REG(sii->osh, r) & ~mask) | val; + W_REG(sii->osh, r, w); + } + } + + /* readback */ + if (regoff >= SBCONFIGOFF) + w = R_SBREG(sii, r); + else { + if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) && + (coreidx == SI_CC_IDX) && + (regoff == OFFSETOF(chipcregs_t, watchdog))) { + w = val; + } else + w = R_REG(sii->osh, r); + } + + if (!fast) { + /* restore core index */ + if (origidx != coreidx) + sb_setcoreidx(&sii->pub, origidx); + + INTR_RESTORE(sii, intr_val); + } + + return (w); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +uint32 * +sb_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + uint32 *r = NULL; + bool fast = FALSE; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + ASSERT(GOODIDX(coreidx)); + ASSERT(regoff < SI_CORE_SIZE); + + if (coreidx >= SI_MAXCORES) + return 0; + + if (BUSTYPE(sii->pub.bustype) == SI_BUS) { + /* If internal bus, we can always get at everything */ + fast = TRUE; + /* map if does not exist */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx], + SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff); + } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) { + /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ + + if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { + /* Chipc registers are mapped at 12KB */ + + fast = TRUE; + r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); + } else if (sii->pub.buscoreidx == coreidx) { + /* pci registers are at either in the last 2KB of an 8KB window + * or, in pcie and pci rev 13 at 8KB + */ + fast = TRUE; + if (SI_FAST(sii)) + r = (uint32 *)((char *)sii->curmap + + PCI_16KB0_PCIREGS_OFFSET + regoff); + else + r = (uint32 *)((char *)sii->curmap + + ((regoff >= SBCONFIGOFF) ? + PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + + regoff); + } + } + + if (!fast) + return 0; + + return (r); +} + +/* Scan the enumeration space to find all cores starting from the given + * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba' + * is the default core address at chip POR time and 'regs' is the virtual + * address that the default core is mapped at. 'ncores' is the number of + * cores expected on bus 'sbba'. It returns the total number of cores + * starting from bus 'sbba', inclusive. + */ +#define SB_MAXBUSES 2 +static uint +_sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores) +{ + uint next; + uint ncc = 0; + uint i; + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (bus >= SB_MAXBUSES) { + SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus)); + return 0; + } + SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores)); + + /* Scan all cores on the bus starting from core 0. + * Core addresses must be contiguous on each bus. + */ + for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) { + cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE); + + /* keep and reuse the initial register mapping */ + if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) { + SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next)); + cores_info->regs[next] = regs; + } + + /* change core to 'next' and read its coreid */ + sii->curmap = _sb_setcoreidx(sii, next); + sii->curidx = next; + + cores_info->coreid[next] = sb_coreid(&sii->pub); + + /* core specific processing... */ + /* chipc provides # cores */ + if (cores_info->coreid[next] == CC_CORE_ID) { + chipcregs_t *cc = (chipcregs_t *)sii->curmap; + uint32 ccrev = sb_corerev(&sii->pub); + + /* determine numcores - this is the total # cores in the chip */ + if (((ccrev == 4) || (ccrev >= 6))) { + ASSERT(cc); + numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >> + CID_CC_SHIFT; + } else { + /* Older chips */ + uint chip = CHIPID(sii->pub.chip); + + if (chip == BCM4306_CHIP_ID) /* < 4306c0 */ + numcores = 6; + else if (chip == BCM4704_CHIP_ID) + numcores = 9; + else if (chip == BCM5365_CHIP_ID) + numcores = 7; + else { + SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n", + chip)); + ASSERT(0); + numcores = 1; + } + } + SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores, + sii->pub.issim ? "QT" : "")); + } + /* scan bridged SB(s) and add results to the end of the list */ + else if (cores_info->coreid[next] == OCP_CORE_ID) { + sbconfig_t *sb = REGS2SB(sii->curmap); + uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1); + uint nsbcc; + + sii->numcores = next + 1; + + if ((nsbba & 0xfff00000) != SI_ENUM_BASE) + continue; + nsbba &= 0xfffff000; + if (_sb_coreidx(sii, nsbba) != BADIDX) + continue; + + nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16; + nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc); + if (sbba == SI_ENUM_BASE) + numcores -= nsbcc; + ncc += nsbcc; + } + } + + SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba)); + + sii->numcores = i + ncc; + return sii->numcores; +} + +/* scan the sb enumerated space to identify all cores */ +void +sb_scan(si_t *sih, void *regs, uint devid) +{ + uint32 origsba; + sbconfig_t *sb; + si_info_t *sii = SI_INFO(sih); + + sb = REGS2SB(sii->curmap); + + sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT; + + /* Save the current core info and validate it later till we know + * for sure what is good and what is bad. + */ + origsba = _sb_coresba(sii); + + /* scan all SB(s) starting from SI_ENUM_BASE */ + sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1); +} + +/* + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +sb_setcoreidx(si_t *sih, uint coreidx) +{ + si_info_t *sii = SI_INFO(sih); + + if (coreidx >= sii->numcores) + return (NULL); + + /* + * If the user has provided an interrupt mask enabled function, + * then assert interrupts are disabled before switching the core. + */ + ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); + + sii->curmap = _sb_setcoreidx(sii, coreidx); + sii->curidx = coreidx; + + return (sii->curmap); +} + +/* This function changes the logical "focus" to the indicated core. + * Return the current core's virtual address. + */ +static void * +_sb_setcoreidx(si_info_t *sii, uint coreidx) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint32 sbaddr = cores_info->coresba[coreidx]; + void *regs; + + switch (BUSTYPE(sii->pub.bustype)) { + case SI_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE); + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + regs = cores_info->regs[coreidx]; + break; + + case PCI_BUS: + /* point bar0 window */ + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr); + regs = sii->curmap; + break; + + case PCMCIA_BUS: { + uint8 tmp = (sbaddr >> 12) & 0x0f; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1); + tmp = (sbaddr >> 16) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1); + tmp = (sbaddr >> 24) & 0xff; + OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1); + regs = sii->curmap; + break; + } +#ifdef BCMSDIO + case SPI_BUS: + case SDIO_BUS: + /* map new one */ + if (!cores_info->regs[coreidx]) { + cores_info->regs[coreidx] = (void *)(uintptr)sbaddr; + ASSERT(GOODREGS(cores_info->regs[coreidx])); + } + regs = cores_info->regs[coreidx]; + break; +#endif /* BCMSDIO */ + + + default: + ASSERT(0); + regs = NULL; + break; + } + + return regs; +} + +/* Return the address of sbadmatch0/1/2/3 register */ +static volatile uint32 * +sb_admatch(si_info_t *sii, uint asidx) +{ + sbconfig_t *sb; + volatile uint32 *addrm; + + sb = REGS2SB(sii->curmap); + + switch (asidx) { + case 0: + addrm = &sb->sbadmatch0; + break; + + case 1: + addrm = &sb->sbadmatch1; + break; + + case 2: + addrm = &sb->sbadmatch2; + break; + + case 3: + addrm = &sb->sbadmatch3; + break; + + default: + SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx)); + return 0; + } + + return (addrm); +} + +/* Return the number of address spaces in current core */ +int +sb_numaddrspaces(si_t *sih) +{ + si_info_t *sii; + sbconfig_t *sb; + + sii = SI_INFO(sih); + sb = REGS2SB(sii->curmap); + + /* + 1 because of enumeration space */ + return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1; +} + +/* Return the address of the nth address space in the current core */ +uint32 +sb_addrspace(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + +/* Return the size of the nth address space in the current core */ +uint32 +sb_addrspacesize(si_t *sih, uint asidx) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx)))); +} + + +/* do buffered registers update */ +void +sb_commit(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + origidx = sii->curidx; + ASSERT(GOODIDX(origidx)); + + INTR_OFF(sii, intr_val); + + /* switch over to chipcommon core if there is one, else use pci */ + if (sii->pub.ccrev != NOREV) { + chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(ccregs != NULL); + + /* do the buffer registers update */ + W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT); + W_REG(sii->osh, &ccregs->broadcastdata, 0x0); + } else + ASSERT(0); + + /* restore core index */ + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} + +void +sb_core_disable(si_t *sih, uint32 bits) +{ + si_info_t *sii; + volatile uint32 dummy; + sbconfig_t *sb; + + sii = SI_INFO(sih); + + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* if core is already in reset, just return */ + if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET) + return; + + /* if clocks are not enabled, put into reset and return */ + if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0) + goto disable; + + /* set target reject and spin until busy is clear (preserve core-specific bits) */ + OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000); + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY) + SI_ERROR(("%s: target state still busy\n", __FUNCTION__)); + + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) { + OR_SBREG(sii, &sb->sbimstate, SBIM_RJ); + dummy = R_SBREG(sii, &sb->sbimstate); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000); + } + + /* set reset and reject while enabling the clocks */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_REJ | SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(10); + + /* don't forget to clear the initiator reject bit */ + if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) + AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ); + +disable: + /* leave reset and reject asserted */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET)); + OSL_DELAY(1); +} + +/* reset and re-enable a core + * inputs: + * bits - core specific bits that are set during and after reset sequence + * resetbits - core specific bits that are set only during reset sequence + */ +void +sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + si_info_t *sii; + sbconfig_t *sb; + volatile uint32 dummy; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + sb = REGS2SB(sii->curmap); + + /* + * Must do the disable sequence first to work for arbitrary current core state. + */ + sb_core_disable(sih, (bits | resetbits)); + + /* + * Now do the initialization sequence. + */ + + /* set reset while enabling the clock and forcing them on throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) | + SBTML_RESET)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) { + W_SBREG(sii, &sb->sbtmstatehigh, 0); + } + if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) { + AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO)); + } + + /* clear reset and allow it to propagate throughout the core */ + W_SBREG(sii, &sb->sbtmstatelow, + ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); + + /* leave clock enabled */ + W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT)); + dummy = R_SBREG(sii, &sb->sbtmstatelow); + BCM_REFERENCE(dummy); + OSL_DELAY(1); +} + +/* + * Set the initiator timeout for the "master core". + * The master core is defined to be the core in control + * of the chip and so it issues accesses to non-memory + * locations (Because of dma *any* core can access memeory). + * + * The routine uses the bus to decide who is the master: + * SI_BUS => mips + * JTAG_BUS => chipc + * PCI_BUS => pci or pcie + * PCMCIA_BUS => pcmcia + * SDIO_BUS => pcmcia + * + * This routine exists so callers can disable initiator + * timeouts so accesses to very slow devices like otp + * won't cause an abort. The routine allows arbitrary + * settings of the service and request timeouts, though. + * + * Returns the timeout state before changing it or -1 + * on error. + */ + +#define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK) + +uint32 +sb_set_initiator_to(si_t *sih, uint32 to, uint idx) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint32 tmp, ret = 0xffffffff; + sbconfig_t *sb; + + + if ((to & ~TO_MASK) != 0) + return ret; + + /* Figure out the master core */ + if (idx == BADIDX) { + switch (BUSTYPE(sii->pub.bustype)) { + case PCI_BUS: + idx = sii->pub.buscoreidx; + break; + case JTAG_BUS: + idx = SI_CC_IDX; + break; + case PCMCIA_BUS: +#ifdef BCMSDIO + case SDIO_BUS: +#endif + idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0); + break; + case SI_BUS: + idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0); + break; + default: + ASSERT(0); + } + if (idx == BADIDX) + return ret; + } + + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + sb = REGS2SB(sb_setcoreidx(sih, idx)); + + tmp = R_SBREG(sii, &sb->sbimconfiglow); + ret = tmp & TO_MASK; + W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to); + + sb_commit(sih); + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret; +} + +uint32 +sb_base(uint32 admatch) +{ + uint32 base; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + base = 0; + + if (type == 0) { + base = admatch & SBAM_BASE0_MASK; + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE1_MASK; + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + base = admatch & SBAM_BASE2_MASK; + } + + return (base); +} + +uint32 +sb_size(uint32 admatch) +{ + uint32 size; + uint type; + + type = admatch & SBAM_TYPE_MASK; + ASSERT(type < 3); + + size = 0; + + if (type == 0) { + size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1); + } else if (type == 1) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1); + } else if (type == 2) { + ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */ + size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1); + } + + return (size); +} + +#if defined(BCMDBG_PHYDUMP) +/* print interesting sbconfig registers */ +void +sb_dumpregs(si_t *sih, struct bcmstrbuf *b) +{ + sbconfig_t *sb; + uint origidx, i, intr_val = 0; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + origidx = sii->curidx; + + INTR_OFF(sii, intr_val); + + for (i = 0; i < sii->numcores; i++) { + sb = REGS2SB(sb_setcoreidx(sih, i)); + + bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]); + + if (sii->pub.socirev > SONICS_2_2) + bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n", + sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0), + sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0)); + + bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x " + "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n", + R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh), + R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate), + R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh)); + } + + sb_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); +} +#endif diff --git a/drivers/net/wireless/bcmdhd/siutils.c b/drivers/net/wireless/bcmdhd/siutils.c new file mode 100644 index 000000000000..6ec96d0fab2a --- /dev/null +++ b/drivers/net/wireless/bcmdhd/siutils.c @@ -0,0 +1,3245 @@ +/* + * Misc utility routines for accessing chip-specific features + * of the SiliconBackplane-based Broadcom chips. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils.c 552034 2015-04-24 19:00:35Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef BCMPCIEDEV +#include +#endif /* BCMPCIEDEV */ +#include +#include +#include +#include +#ifdef BCMSDIO +#include +#include +#include +#include +#include +#include +#endif /* BCMSDIO */ +#include + +#ifdef BCM_SDRBL +#include +#endif /* BCM_SDRBL */ +#ifdef HNDGCI +#include +#endif /* HNDGCI */ + +#include "siutils_priv.h" + +/** + * A set of PMU registers is clocked in the ILP domain, which has an implication on register write + * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb + * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set. + */ +#define PMUREGS_ILP_SENSITIVE(regoff) \ + ((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \ + (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \ + (regoff) == OFFSETOF(pmuregs_t, res_req_timer)) + +#define CHIPCREGS_ILP_SENSITIVE(regoff) \ + ((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \ + (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \ + (regoff) == OFFSETOF(chipcregs_t, res_req_timer)) + +/* local prototypes */ +static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz); +static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh); +static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs); + + +static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff); + +#ifdef BCMLTECOEX +static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio, + uint8 gpioctl_mask, uint8 gpioctl_val); +#endif /* BCMLTECOEX */ + + +/* global variable to indicate reservation/release of gpio's */ +static uint32 si_gpioreservation = 0; + +/* global flag to prevent shared resources from being initialized multiple times in si_attach() */ +#ifdef SR_DEBUG +static const uint32 si_power_island_test_array[] = { + 0x0000, 0x0001, 0x0010, 0x0011, + 0x0100, 0x0101, 0x0110, 0x0111, + 0x1000, 0x1001, 0x1010, 0x1011, + 0x1100, 0x1101, 0x1110, 0x1111 +}; +#endif /* SR_DEBUG */ + +int do_4360_pcie2_war = 0; + +/* global kernel resource */ +static si_info_t ksii; +static si_cores_info_t ksii_cores_info; + +/** + * Allocate an si handle. This function may be called multiple times. + * + * devid - pci device id (used to determine chip#) + * osh - opaque OS handle + * regs - virtual address of initial core registers + * bustype - pci/pcmcia/sb/sdio/etc + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL, making dereferencing of this parameter undesired. + * varsz - pointer to int to return the size of the vars + */ +si_t * +si_attach(uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + si_info_t *sii; + si_cores_info_t *cores_info; + /* alloc si_info_t */ + if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + return (NULL); + } + + /* alloc si_cores_info_t */ + if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) { + SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); + MFREE(osh, sii, sizeof(si_info_t)); + return (NULL); + } + sii->cores_info = cores_info; + + if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { + MFREE(osh, sii, sizeof(si_info_t)); + MFREE(osh, cores_info, sizeof(si_cores_info_t)); + return (NULL); + } + sii->vars = vars ? *vars : NULL; + sii->varsz = varsz ? *varsz : 0; + + return (si_t *)sii; +} + + +static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */ + +/** generic kernel variant of si_attach() */ +si_t * +si_kattach(osl_t *osh) +{ + static bool ksii_attached = FALSE; + si_cores_info_t *cores_info; + + if (!ksii_attached) { + void *regs = NULL; + regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + cores_info = (si_cores_info_t *)&ksii_cores_info; + ksii.cores_info = cores_info; + + ASSERT(osh); + if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs, + SI_BUS, NULL, + osh != SI_OSH ? &(ksii.vars) : NULL, + osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) { + SI_ERROR(("si_kattach: si_doattach failed\n")); + REG_UNMAP(regs); + return NULL; + } + REG_UNMAP(regs); + + /* save ticks normalized to ms for si_watchdog_ms() */ + if (PMUCTL_ENAB(&ksii.pub)) { + /* based on 32KHz ILP clock */ + wd_msticks = 32; + } else { + wd_msticks = ALP_CLOCK / 1000; + } + + ksii_attached = TRUE; + SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n", + ksii.pub.ccrev, wd_msticks)); + } + + return &ksii.pub; +} + + +static bool +si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh) +{ + /* need to set memseg flag for CF card first before any sb registers access */ + if (BUSTYPE(bustype) == PCMCIA_BUS) + sii->memseg = TRUE; + + +#if defined(BCMSDIO) + if (BUSTYPE(bustype) == SDIO_BUS) { + int err; + uint8 clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (!err) { + uint8 clkval; + + /* If register supported, wait for ALPAvail and then force ALP */ + clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL); + if ((clkval & ~SBSDIO_AVBITS) == clkset) { + SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, + SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + if (!SBSDIO_ALPAV(clkval)) { + SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n", + clkval)); + return FALSE; + } + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, + clkset, &err); + OSL_DELAY(65); + } + } + + /* Also, disable the extra SDIO pull-ups */ + bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + } + +#endif /* BCMSDIO && BCMDONGLEHOST */ + + return TRUE; +} + +static bool +si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, + uint *origidx, void *regs) +{ + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + bool pci, pcie, pcie_gen2 = FALSE; + uint i; + uint pciidx, pcieidx, pcirev, pcierev; + + /* first, enable backplane timeouts */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) + ai_enable_backplane_timeouts(&sii->pub); + + cc = si_setcoreidx(&sii->pub, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + sii->pub.ccrev = (int)si_corerev(&sii->pub); + + /* get chipcommon chipstatus */ + if (sii->pub.ccrev >= 11) + sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); + + /* get chipcommon capabilites */ + sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); + /* get chipcommon extended capabilities */ + + if (sii->pub.ccrev >= 35) + sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); + + /* get pmu rev and caps */ + if (sii->pub.cccaps & CC_CAP_PMU) { + if (AOB_ENAB(&sii->pub)) { + uint pmucoreidx; + pmuregs_t *pmu; + pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0); + pmu = si_setcoreidx(&sii->pub, pmucoreidx); + sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities); + si_setcoreidx(&sii->pub, SI_CC_IDX); + } else + sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); + + sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; + } + + SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", + sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, + sii->pub.pmucaps)); + + /* figure out bus/orignal core idx */ + sii->pub.buscoretype = NODEV_CORE_ID; + sii->pub.buscorerev = (uint)NOREV; + sii->pub.buscoreidx = BADIDX; + + pci = pcie = FALSE; + pcirev = pcierev = (uint)NOREV; + pciidx = pcieidx = BADIDX; + + for (i = 0; i < sii->numcores; i++) { + uint cid, crev; + + si_setcoreidx(&sii->pub, i); + cid = si_coreid(&sii->pub); + crev = si_corerev(&sii->pub); + + /* Display cores found */ + SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", + i, cid, crev, cores_info->coresba[i], cores_info->regs[i])); + + if (BUSTYPE(bustype) == SI_BUS) { + /* now look at the chipstatus register to figure the pacakge */ + /* for SDIO but downloaded on PCIE dev */ + if (cid == PCIE2_CORE_ID) { + if (BCM43602_CHIP(sii->pub.chip) || + (CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) || + (CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) || + ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID || + CHIPID(sii->pub.chip) == BCM43454_CHIP_ID) && + CST4345_CHIPMODE_PCIE(sii->pub.chipst))) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + pcie_gen2 = TRUE; + } + } + + } + else if (BUSTYPE(bustype) == PCI_BUS) { + if (cid == PCI_CORE_ID) { + pciidx = i; + pcirev = crev; + pci = TRUE; + } else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) { + pcieidx = i; + pcierev = crev; + pcie = TRUE; + if (cid == PCIE2_CORE_ID) + pcie_gen2 = TRUE; + } + } else if ((BUSTYPE(bustype) == PCMCIA_BUS) && + (cid == PCMCIA_CORE_ID)) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } +#ifdef BCMSDIO + else if (((BUSTYPE(bustype) == SDIO_BUS) || + (BUSTYPE(bustype) == SPI_BUS)) && + ((cid == PCMCIA_CORE_ID) || + (cid == SDIOD_CORE_ID))) { + sii->pub.buscorerev = crev; + sii->pub.buscoretype = cid; + sii->pub.buscoreidx = i; + } +#endif /* BCMSDIO */ + + /* find the core idx before entering this func. */ + if ((savewin && (savewin == cores_info->coresba[i])) || + (regs == cores_info->regs[i])) + *origidx = i; + } + + +#if defined(PCIE_FULL_DONGLE) + if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } + BCM_REFERENCE(pci); + BCM_REFERENCE(pcirev); + BCM_REFERENCE(pciidx); +#else + if (pci) { + sii->pub.buscoretype = PCI_CORE_ID; + sii->pub.buscorerev = pcirev; + sii->pub.buscoreidx = pciidx; + } else if (pcie) { + if (pcie_gen2) + sii->pub.buscoretype = PCIE2_CORE_ID; + else + sii->pub.buscoretype = PCIE_CORE_ID; + sii->pub.buscorerev = pcierev; + sii->pub.buscoreidx = pcieidx; + } +#endif /* defined(PCIE_FULL_DONGLE) */ + + SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, + sii->pub.buscorerev)); + + if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) && + (sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3)) + OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL); + + +#if defined(BCMSDIO) + /* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was + * already running. + */ + if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) { + if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) || + si_setcore(&sii->pub, ARMCM3_CORE_ID, 0)) + si_core_disable(&sii->pub, 0); + } +#endif /* BCMSDIO && BCMDONGLEHOST */ + + /* return to the original core */ + si_setcoreidx(&sii->pub, *origidx); + + return TRUE; +} + + + + +uint16 +si_chipid(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + return (sii->chipnew) ? sii->chipnew : sih->chip; +} + +static void +si_chipid_fixup(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + ASSERT(sii->chipnew == 0); + switch (sih->chip) { + case BCM43567_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43570_CHIP_ID; /* chip class */ + break; + case BCM4358_CHIP_ID: + case BCM43566_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM43569_CHIP_ID; /* chip class */ + break; + case BCM4356_CHIP_ID: + sii->chipnew = sih->chip; /* save it */ + sii->pub.chip = BCM4354_CHIP_ID; /* chip class */ + break; + default: + break; + } +} + +/** + * Allocate an si handle. This function may be called multiple times. + * + * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this + * function set 'vars' to NULL. + */ +static si_info_t * +si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, + uint bustype, void *sdh, char **vars, uint *varsz) +{ + struct si_pub *sih = &sii->pub; + uint32 w, savewin; + chipcregs_t *cc; + char *pvars = NULL; + uint origidx; +#if !defined(_CFEZ_) || defined(CFG_WL) +#endif + + ASSERT(GOODREGS(regs)); + + savewin = 0; + + sih->buscoreidx = BADIDX; + + sii->curmap = regs; + sii->sdh = sdh; + sii->osh = osh; + sii->second_bar0win = ~0x0; + + + /* check to see if we are a si core mimic'ing a pci core */ + if ((bustype == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) { + SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " + "devid:0x%x\n", __FUNCTION__, devid)); + bustype = SI_BUS; + } + + /* find Chipcommon address */ + if (bustype == PCI_BUS) { + savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (!GOODCOREADDR(savewin, SI_ENUM_BASE)) + savewin = SI_ENUM_BASE; + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE); + if (!regs) + return NULL; + cc = (chipcregs_t *)regs; +#ifdef BCMSDIO + } else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) { + cc = (chipcregs_t *)sii->curmap; +#endif + } else { + cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); + } + + sih->bustype = bustype; + if (bustype != BUSTYPE(bustype)) { + SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", + bustype, BUSTYPE(bustype))); + return NULL; + } + + /* bus/core/clk setup for register access */ + if (!si_buscore_prep(sii, bustype, devid, sdh)) { + SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype)); + return NULL; + } + + /* ChipID recognition. + * We assume we can read chipid at offset 0 from the regs arg. + * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), + * some way of recognizing them needs to be added here. + */ + if (!cc) { + SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__)); + return NULL; + } + w = R_REG(osh, &cc->chipid); + if ((w & 0xfffff) == 148277) w -= 65532; + sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + /* Might as wll fill in chip id rev & pkg */ + sih->chip = w & CID_ID_MASK; + sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; + sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; + + si_chipid_fixup(sih); + + if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && CHIPREV(sih->chiprev == 0) && + (sih->chippkg != BCM4329_289PIN_PKG_ID)) { + sih->chippkg = BCM4329_182PIN_PKG_ID; + } + sih->issim = IS_SIM(sih->chippkg); + + /* scan for cores */ + if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) { + SI_MSG(("Found chip type SB (0x%08x)\n", w)); + sb_scan(&sii->pub, regs, devid); + } else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) || + (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) { + if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) + SI_MSG(("Found chip type AI (0x%08x)\n", w)); + else + SI_MSG(("Found chip type NAI (0x%08x)\n", w)); + /* pass chipc address instead of original core base */ + ai_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) { + SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); + /* pass chipc address instead of original core base */ + ub_scan(&sii->pub, (void *)(uintptr)cc, devid); + } else { + SI_ERROR(("Found chip of unknown type (0x%08x)\n", w)); + return NULL; + } + /* no cores found, bail out */ + if (sii->numcores == 0) { + SI_ERROR(("si_doattach: could not find any cores\n")); + return NULL; + } + /* bus/core/clk setup */ + origidx = SI_CC_IDX; + if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { + SI_ERROR(("si_doattach: si_buscore_setup failed\n")); + goto exit; + } + +#if !defined(_CFEZ_) || defined(CFG_WL) + if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK) + >> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT | + CST4322_SPROM_PRESENT))) { + SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__)); + return NULL; + } + + /* assume current core is CC */ + if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID || + CHIPID(sih->chip) == BCM43235_CHIP_ID || + CHIPID(sih->chip) == BCM43234_CHIP_ID || + CHIPID(sih->chip) == BCM43238_CHIP_ID) && + (CHIPREV(sii->pub.chiprev) <= 2))) { + + if ((cc->chipstatus & CST43236_BP_CLK) != 0) { + uint clkdiv; + clkdiv = R_REG(osh, &cc->clkdiv); + /* otp_clk_div is even number, 120/14 < 9mhz */ + clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT); + W_REG(osh, &cc->clkdiv, clkdiv); + SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv)); + } + OSL_DELAY(10); + } + + if (bustype == PCI_BUS) { + + } +#endif +#ifdef BCM_SDRBL + /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is + * not turned on, then we want to hold arm in reset. + * Bottomline: In sdrenable case, we allow arm to boot only when protection is + * turned on. + */ + if (CHIP_HOSTIF_PCIE(&(sii->pub))) { + uint32 sflags = si_arm_sflags(&(sii->pub)); + + /* If SDR is enabled but protection is not turned on + * then we want to force arm to WFI. + */ + if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) { + disable_arm_irq(); + while (1) { + hnd_cpu_wait(sih); + } + } + } +#endif /* BCM_SDRBL */ + + pvars = NULL; + BCM_REFERENCE(pvars); + + + + if (sii->pub.ccrev >= 20) { + uint32 gpiopullup = 0, gpiopulldown = 0; + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + /* 4314/43142 has pin muxing, don't clear gpio bits */ + if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) || + (CHIPID(sih->chip) == BCM43142_CHIP_ID)) { + gpiopullup |= 0x402e0; + gpiopulldown |= 0x20500; + } + + + W_REG(osh, &cc->gpiopullup, gpiopullup); + W_REG(osh, &cc->gpiopulldown, gpiopulldown); + si_setcoreidx(sih, origidx); + } + + + /* clear any previous epidiag-induced target abort */ + ASSERT(!si_taclear(sih, FALSE)); + + +#ifdef BOOTLOADER_CONSOLE_OUTPUT + /* Enable console prints */ + si_muxenab(sii, 3); +#endif + + return (sii); + +exit: + + return NULL; +} + +/** may be called with core in reset */ +void +si_detach(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx; + + + if (BUSTYPE(sih->bustype) == SI_BUS) + for (idx = 0; idx < SI_MAXCORES; idx++) + if (cores_info->regs[idx]) { + REG_UNMAP(cores_info->regs[idx]); + cores_info->regs[idx] = NULL; + } + + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (cores_info != &ksii_cores_info) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, cores_info, sizeof(si_cores_info_t)); + +#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS) + if (sii != &ksii) +#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */ + MFREE(sii->osh, sii, sizeof(si_info_t)); +} + +void * +si_osh(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->osh; +} + +void +si_setosh(si_t *sih, osl_t *osh) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + if (sii->osh != NULL) { + SI_ERROR(("osh is already set....\n")); + ASSERT(!sii->osh); + } + sii->osh = osh; +} + +/** register driver interrupt disabling and restoring callback functions */ +void +si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn, + void *intrsenabled_fn, void *intr_arg) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + sii->intr_arg = intr_arg; + sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn; + sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn; + sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn; + /* save current core id. when this function called, the current core + * must be the core which provides driver functions(il, et, wl, etc.) + */ + sii->dev_coreid = cores_info->coreid[sii->curidx]; +} + +void +si_deregister_intr_callback(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + sii->intrsoff_fn = NULL; + sii->intrsrestore_fn = NULL; + sii->intrsenabled_fn = NULL; +} + +uint +si_intflag(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_intflag(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return R_REG(sii->osh, ((uint32 *)(uintptr) + (sii->oob_router + OOB_STATUSA))); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_flag(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_flag(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_flag(sih); + else { + ASSERT(0); + return 0; + } +} + +uint +si_flag_alt(si_t *sih) +{ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_flag_alt(sih); + else { + ASSERT(0); + return 0; + } +} + +void +si_setint(si_t *sih, int siflag) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_setint(sih, siflag); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_setint(sih, siflag); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_setint(sih, siflag); + else + ASSERT(0); +} + +uint +si_coreid(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + return cores_info->coreid[sii->curidx]; +} + +uint +si_coreidx(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + return sii->curidx; +} + +void * +si_d11_switch_addrbase(si_t *sih, uint coreunit) +{ + return si_setcore(sih, D11_CORE_ID, coreunit); +} + +/** return the core-type instantiation # of the current core */ +uint +si_coreunit(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint idx; + uint coreid; + uint coreunit; + uint i; + + coreunit = 0; + + idx = sii->curidx; + + ASSERT(GOODREGS(sii->curmap)); + coreid = si_coreid(sih); + + /* count the cores of our type */ + for (i = 0; i < idx; i++) + if (cores_info->coreid[i] == coreid) + coreunit++; + + return (coreunit); +} + +uint +si_corevendor(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corevendor(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corevendor(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corevendor(sih); + else { + ASSERT(0); + return 0; + } +} + +bool +si_backplane64(si_t *sih) +{ + return ((sih->cccaps & CC_CAP_BKPLN64) != 0); +} + +uint +si_corerev(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corerev(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corerev(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corerev(sih); + else { + ASSERT(0); + return 0; + } +} + + +/* return index of coreid or BADIDX if not found */ +uint +si_findcoreidx(si_t *sih, uint coreid, uint coreunit) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found; + uint i; + + + found = 0; + + for (i = 0; i < sii->numcores; i++) + if (cores_info->coreid[i] == coreid) { + if (found == coreunit) + return (i); + found++; + } + + return (BADIDX); +} + +/** return total coreunit of coreid or zero if not found */ +uint +si_numcoreunits(si_t *sih, uint coreid) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint found = 0; + uint i; + + for (i = 0; i < sii->numcores; i++) { + if (cores_info->coreid[i] == coreid) { + found++; + } + } + + return found; +} + +/** return total D11 coreunits */ +uint +BCMRAMFN(si_numd11coreunits)(si_t *sih) +{ + uint found = 0; + + found = si_numcoreunits(sih, D11_CORE_ID); + +#if defined(WLRSDB) && defined(WLRSDB_DISABLED) + /* If RSDB functionality is compiled out, + * then ignore any D11 cores beyond the first + * Used in norsdb dongle build variants for rsdb chip. + */ + found = 1; +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ + + return found; +} + +/** return list of found cores */ +uint +si_corelist(si_t *sih, uint coreid[]) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); + return (sii->numcores); +} + +/** return current wrapper mapping */ +void * +si_wrapperregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curwrap)); + + return (sii->curwrap); +} + +/** return current register mapping */ +void * +si_coreregs(si_t *sih) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + ASSERT(GOODREGS(sii->curmap)); + + return (sii->curmap); +} + +/** + * This function changes logical "focus" to the indicated core; + * must be called with interrupts off. + * Moreover, callers should keep interrupts off during switching out of and back to d11 core + */ +void * +si_setcore(si_t *sih, uint coreid, uint coreunit) +{ + uint idx; + + idx = si_findcoreidx(sih, coreid, coreunit); + if (!GOODIDX(idx)) + return (NULL); + + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, idx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_setcoreidx(sih, idx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, idx); + else { + ASSERT(0); + return NULL; + } +} + +void * +si_setcoreidx(si_t *sih, uint coreidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_setcoreidx(sih, coreidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_setcoreidx(sih, coreidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_setcoreidx(sih, coreidx); + else { + ASSERT(0); + return NULL; + } +} + +/** Turn off interrupt as required by sb_setcore, before switch core */ +void * +si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) +{ + void *cc; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (SI_FAST(sii)) { + /* Overloading the origidx variable to remember the coreid, + * this works because the core ids cannot be confused with + * core indices. + */ + *origidx = coreid; + if (coreid == CC_CORE_ID) + return (void *)CCREGS_FAST(sii); + else if (coreid == sih->buscoretype) + return (void *)PCIEREGS(sii); + } + INTR_OFF(sii, *intr_val); + *origidx = sii->curidx; + cc = si_setcore(sih, coreid, 0); + ASSERT(cc != NULL); + + return cc; +} + +/* restore coreidx and restore interrupt */ +void +si_restore_core(si_t *sih, uint coreid, uint intr_val) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) + return; + + si_setcoreidx(sih, coreid); + INTR_RESTORE(sii, intr_val); +} + +int +si_numaddrspaces(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_numaddrspaces(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_numaddrspaces(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_numaddrspaces(sih); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspace(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspace(sih, asidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_addrspace(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspace(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +uint32 +si_addrspacesize(si_t *sih, uint asidx) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_addrspacesize(sih, asidx); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_addrspacesize(sih, asidx); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_addrspacesize(sih, asidx); + else { + ASSERT(0); + return 0; + } +} + +void +si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) +{ + /* Only supported for SOCI_AI */ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_coreaddrspaceX(sih, asidx, addr, size); + else + *size = 0; +} + +uint32 +si_core_cflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_cflags(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_core_cflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_cflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +void +si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_cflags_wo(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_cflags_wo(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_cflags_wo(sih, mask, val); + else + ASSERT(0); +} + +uint32 +si_core_sflags(si_t *sih, uint32 mask, uint32 val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_core_sflags(sih, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_core_sflags(sih, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_core_sflags(sih, mask, val); + else { + ASSERT(0); + return 0; + } +} + +bool +si_iscoreup(si_t *sih) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_iscoreup(sih); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_iscoreup(sih); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_iscoreup(sih); + else { + ASSERT(0); + return FALSE; + } +} + +uint +si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) +{ + /* only for AI back plane chips */ + if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return (ai_wrap_reg(sih, offset, mask, val)); + return 0; +} +/* si_backplane_access is used to read full backplane address from host for PCIE FD + * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0 + * Provides support for read/write of 1/2/4 bytes of backplane address + * Can be used to read/write + * 1. core regs + * 2. Wrapper regs + * 3. memory + * 4. BT area + * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region" + * [11 : 0] should be the "regoff" + * for reading 4 bytes from reg 0x200 of d11 core use it like below + * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE) + */ +static int si_backplane_addr_sane(uint addr, uint size) +{ + int bcmerror = BCME_OK; + + /* For 2 byte access, address has to be 2 byte aligned */ + if (size == 2) { + if (addr & 0x1) { + bcmerror = BCME_ERROR; + } + } + /* For 4 byte access, address has to be 4 byte aligned */ + if (size == 4) { + if (addr & 0x3) { + bcmerror = BCME_ERROR; + } + } + + return bcmerror; +} +uint +si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read) +{ + uint32 *r = NULL; + uint32 region = 0; + si_info_t *sii = SI_INFO(sih); + + /* Valid only for pcie bus */ + if (BUSTYPE(sih->bustype) != PCI_BUS) { + SI_ERROR(("Valid only for pcie bus \n")); + return BCME_ERROR; + } + + /* Split adrr into region and address offset */ + region = (addr & (0xFFFFF << 12)); + addr = addr & 0xFFF; + + /* check for address and size sanity */ + if (si_backplane_addr_sane(addr, size) != BCME_OK) + return BCME_ERROR; + + /* Update window if required */ + if (sii->second_bar0win != region) { + OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region); + sii->second_bar0win = region; + } + + /* Estimate effective address + * sii->curmap : bar-0 virtual address + * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset + * regoff : actual reg offset + */ + r = (uint32 *)((char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr); + + SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n", + (char*)sii->curmap, region, addr, r, read)); + + switch (size) { + case sizeof(uint8) : + if (read) + *val = R_REG(sii->osh, (uint8*)r); + else + W_REG(sii->osh, (uint8*)r, *val); + break; + case sizeof(uint16) : + if (read) + *val = R_REG(sii->osh, (uint16*)r); + else + W_REG(sii->osh, (uint16*)r, *val); + break; + case sizeof(uint32) : + if (read) + *val = R_REG(sii->osh, (uint32*)r); + else + W_REG(sii->osh, (uint32*)r, *val); + break; + + default : + SI_ERROR(("Invalid size %d \n", size)); + return (BCME_ERROR); + break; + } + + return (BCME_OK); +} +uint +si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg(sih, coreidx, regoff, mask, val); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corereg(sih, coreidx, regoff, mask, val); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + return ub_corereg(sih, coreidx, regoff, mask, val); + else { + ASSERT(0); + return 0; + } +} + +/** ILP sensitive register access needs special treatment to avoid backplane stalls */ +bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff) +{ + if (idx == SI_CC_IDX) { + if (CHIPCREGS_ILP_SENSITIVE(regoff)) + return TRUE; + } else if (PMUREGS_ILP_SENSITIVE(regoff)) { + return TRUE; + } + + return FALSE; +} + +/** 'idx' should refer either to the chipcommon core or the PMU core */ +uint +si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val) +{ + int pmustatus_offset; + + /* prevent backplane stall on double write to 'ILP domain' registers in the PMU */ + if (mask != 0 && sih->pmurev >= 22 && + si_pmu_is_ilp_sensitive(idx, regoff)) { + pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) : + OFFSETOF(chipcregs_t, pmustatus); + + while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING) + {}; + } + + return si_corereg(sih, idx, regoff, mask, val); +} + +/* + * If there is no need for fiddling with interrupts or core switches (typically silicon + * back plane registers, pci registers and chipcommon registers), this function + * returns the register offset on this core to a mapped address. This address can + * be used for W_REG/R_REG directly. + * + * For accessing registers that would need a core switch, this function will return + * NULL. + */ +uint32 * +si_corereg_addr(si_t *sih, uint coreidx, uint regoff) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + return sb_corereg_addr(sih, coreidx, regoff); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + return ai_corereg_addr(sih, coreidx, regoff); + else { + return 0; + } +} + +void +si_core_disable(si_t *sih, uint32 bits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_disable(sih, bits); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_disable(sih, bits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_disable(sih, bits); +} + +void +si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) +{ + if (CHIPTYPE(sih->socitype) == SOCI_SB) + sb_core_reset(sih, bits, resetbits); + else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)) + ai_core_reset(sih, bits, resetbits); + else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) + ub_core_reset(sih, bits, resetbits); +} + +/** Run bist on current core. Caller needs to take care of core-specific bist hazards */ +int +si_corebist(si_t *sih) +{ + uint32 cflags; + int result = 0; + + /* Read core control flags */ + cflags = si_core_cflags(sih, 0, 0); + + /* Set bist & fgc */ + si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC)); + + /* Wait for bist done */ + SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); + + if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) + result = BCME_ERROR; + + /* Reset core control flags */ + si_core_cflags(sih, 0xffff, cflags); + + return result; +} + +static uint32 +factor6(uint32 x) +{ + switch (x) { + case CC_F6_2: return 2; + case CC_F6_3: return 3; + case CC_F6_4: return 4; + case CC_F6_5: return 5; + case CC_F6_6: return 6; + case CC_F6_7: return 7; + default: return 0; + } +} + +/* + * Divide the clock by the divisor with protection for + * a zero divisor. + */ +static uint32 +divide_clock(uint32 clock, uint32 div) +{ + return div ? clock / div : 0; +} + + +/** calculate the speed the SI would run at given a set of clockcontrol values */ +uint32 +si_clock_rate(uint32 pll_type, uint32 n, uint32 m) +{ + uint32 n1, n2, clock, m1, m2, m3, mc; + + n1 = n & CN_N1_MASK; + n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; + + if (pll_type == PLL_TYPE6) { + if (m & CC_T6_MMASK) + return CC_T6_M1; + else + return CC_T6_M0; + } else if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + n1 = factor6(n1); + n2 += CC_F5_BIAS; + } else if (pll_type == PLL_TYPE2) { + n1 += CC_T2_BIAS; + n2 += CC_T2_BIAS; + ASSERT((n1 >= 2) && (n1 <= 7)); + ASSERT((n2 >= 5) && (n2 <= 23)); + } else if (pll_type == PLL_TYPE5) { + return (100000000); + } else + ASSERT(0); + /* PLL types 3 and 7 use BASE2 (25Mhz) */ + if ((pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE7)) { + clock = CC_CLOCK_BASE2 * n1 * n2; + } else + clock = CC_CLOCK_BASE1 * n1 * n2; + + if (clock == 0) + return 0; + + m1 = m & CC_M1_MASK; + m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; + m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; + mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; + + if ((pll_type == PLL_TYPE1) || + (pll_type == PLL_TYPE3) || + (pll_type == PLL_TYPE4) || + (pll_type == PLL_TYPE7)) { + m1 = factor6(m1); + if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) + m2 += CC_F5_BIAS; + else + m2 = factor6(m2); + m3 = factor6(m3); + + switch (mc) { + case CC_MC_BYPASS: return (clock); + case CC_MC_M1: return divide_clock(clock, m1); + case CC_MC_M1M2: return divide_clock(clock, m1 * m2); + case CC_MC_M1M2M3: return divide_clock(clock, m1 * m2 * m3); + case CC_MC_M1M3: return divide_clock(clock, m1 * m3); + default: return (0); + } + } else { + ASSERT(pll_type == PLL_TYPE2); + + m1 += CC_T2_BIAS; + m2 += CC_T2M2_BIAS; + m3 += CC_T2_BIAS; + ASSERT((m1 >= 2) && (m1 <= 7)); + ASSERT((m2 >= 3) && (m2 <= 10)); + ASSERT((m3 >= 2) && (m3 <= 7)); + + if ((mc & CC_T2MC_M1BYP) == 0) + clock /= m1; + if ((mc & CC_T2MC_M2BYP) == 0) + clock /= m2; + if ((mc & CC_T2MC_M3BYP) == 0) + clock /= m3; + + return (clock); + } + return 0; +} + +/** + * Some chips could have multiple host interfaces, however only one will be active. + * For a given chip. Depending pkgopt and cc_chipst return the active host interface. + */ +uint +si_chip_hostif(si_t *sih) +{ + uint hosti = 0; + + switch (CHIPID(sih->chip)) { + + CASE_BCM43602_CHIP: + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4360_CHIP_ID: + /* chippkg bit-0 == 0 is PCIE only pkgs + * chippkg bit-0 == 1 has both PCIE and USB cores enabled + */ + if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB)) + hosti = CHIP_HOSTIF_USBMODE; + else + hosti = CHIP_HOSTIF_PCIEMODE; + + break; + + case BCM4335_CHIP_ID: + /* TBD: like in 4360, do we need to check pkg? */ + if (CST4335_CHIPMODE_USB20D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4335_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4345_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4345_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4349_CHIP_GRPID: + if (CST4349_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4349_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + if (CST4350_CHIPMODE_USB20D(sih->chipst) || + CST4350_CHIPMODE_HSIC20D(sih->chipst) || + CST4350_CHIPMODE_USB30D(sih->chipst) || + CST4350_CHIPMODE_USB30D_WL(sih->chipst) || + CST4350_CHIPMODE_HSIC30D(sih->chipst)) + hosti = CHIP_HOSTIF_USBMODE; + else if (CST4350_CHIPMODE_SDIOD(sih->chipst)) + hosti = CHIP_HOSTIF_SDIOMODE; + else if (CST4350_CHIPMODE_PCIE(sih->chipst)) + hosti = CHIP_HOSTIF_PCIEMODE; + break; + + default: + break; + } + + return hosti; +} + + +/** set chip watchdog reset timer to fire in 'ticks' */ +void +si_watchdog(si_t *sih, uint ticks) +{ + uint nb, maxt; + + if (PMUCTL_ENAB(sih)) { + +#if !defined(_CFEZ_) || defined(CFG_WL) + if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) && + (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) { + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2); + si_setcore(sih, USB20D_CORE_ID, 0); + si_core_disable(sih, 1); + si_setcore(sih, CC_CORE_ID, 0); + } +#endif + + nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24); + /* The mips compiler uses the sllv instruction, + * so we specially handle the 32-bit case. + */ + if (nb == 32) + maxt = 0xffffffff; + else + maxt = ((1 << nb) - 1); + + if (ticks == 1) + ticks = 2; + else if (ticks > maxt) + ticks = maxt; + + pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks); + } else { + maxt = (1 << 28) - 1; + if (ticks > maxt) + ticks = maxt; + + si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks); + } +} + +/** trigger watchdog reset after ms milliseconds */ +void +si_watchdog_ms(si_t *sih, uint32 ms) +{ + si_watchdog(sih, wd_msticks * ms); +} + +uint32 si_watchdog_msticks(void) +{ + return wd_msticks; +} + +bool +si_taclear(si_t *sih, bool details) +{ + return FALSE; +} + + + +/** return the slow clock source - LPO, XTAL, or PCI */ +static uint +si_slowclk_src(si_info_t *sii) +{ + chipcregs_t *cc; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + if (sii->pub.ccrev < 6) { + if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) && + (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) & + PCI_CFG_GPIO_SCS)) + return (SCC_SS_PCI); + else + return (SCC_SS_XTAL); + } else if (sii->pub.ccrev < 10) { + cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); + ASSERT(cc); + return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); + } else /* Insta-clock */ + return (SCC_SS_XTAL); +} + +/** return the ILP (slowclock) min or max frequency */ +static uint +si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) +{ + uint32 slowclk; + uint div; + + ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); + + /* shouldn't be here unless we've established the chip has dynamic clk control */ + ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); + + slowclk = si_slowclk_src(sii); + if (sii->pub.ccrev < 6) { + if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); + else + return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); + } else if (sii->pub.ccrev < 10) { + div = 4 * + (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); + if (slowclk == SCC_SS_LPO) + return (max_freq ? LPOMAXFREQ : LPOMINFREQ); + else if (slowclk == SCC_SS_XTAL) + return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div)); + else if (slowclk == SCC_SS_PCI) + return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div)); + else + ASSERT(0); + } else { + /* Chipc rev 10 is InstaClock */ + div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT; + div = 4 * (div + 1); + return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div)); + } + return (0); +} + +static void +si_clkctl_setdelay(si_info_t *sii, void *chipcregs) +{ + chipcregs_t *cc = (chipcregs_t *)chipcregs; + uint slowmaxfreq, pll_delay, slowclk; + uint pll_on_delay, fref_sel_delay; + + pll_delay = PLL_DELAY; + + /* If the slow clock is not sourced by the xtal then add the xtal_on_delay + * since the xtal will also be powered down by dynamic clk control logic. + */ + + slowclk = si_slowclk_src(sii); + if (slowclk != SCC_SS_XTAL) + pll_delay += XTAL_ON_DELAY; + + /* Starting with 4318 it is ILP that is used for the delays */ + slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc); + + pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; + fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; + + W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay); + W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay); +} + +/** initialize power control delay registers */ +void +si_clkctl_init(si_t *sih) +{ + si_info_t *sii; + uint origidx = 0; + chipcregs_t *cc; + bool fast; + + if (!CCCTL_ENAB(sih)) + return; + + sii = SI_INFO(sih); + fast = SI_FAST(sii); + if (!fast) { + origidx = sii->curidx; + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) + return; + } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) + return; + ASSERT(cc != NULL); + + /* set all Instaclk chip ILP to 1 MHz */ + if (sih->ccrev >= 10) + SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, + (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); + + si_clkctl_setdelay(sii, (void *)(uintptr)cc); + + OSL_DELAY(20000); + + if (!fast) + si_setcoreidx(sih, origidx); +} + + +/** change logical "focus" to the gpio core for optimized access */ +void * +si_gpiosetcore(si_t *sih) +{ + return (si_setcoreidx(sih, SI_CC_IDX)); +} + +/** + * mask & set gpiocontrol bits. + * If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin. + * If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated + * to some chip-specific purpose. + */ +uint32 +si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiocontrol); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** mask&set gpio output enable bits */ +uint32 +si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioouten); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** mask&set gpio output bits */ +uint32 +si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + regoff = 0; + + /* gpios could be shared on router platforms + * ignore reservation if it's high priority (e.g., test apps) + */ + if ((priority != GPIO_HI_PRIORITY) && + (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpioout); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/** reserve one gpio */ +uint32 +si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already reserved */ + if (si_gpioreservation & gpio_bitmask) + return 0xffffffff; + /* set reservation */ + si_gpioreservation |= gpio_bitmask; + + return si_gpioreservation; +} + +/** + * release one gpio. + * + * releasing the gpio doesn't change the current value on the GPIO last write value + * persists till someone overwrites it. + */ +uint32 +si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority) +{ + /* only cores on SI_BUS share GPIO's and only applcation users need to + * reserve/release GPIO + */ + if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) { + ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority)); + return 0xffffffff; + } + /* make sure only one bit is set */ + if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) { + ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1))); + return 0xffffffff; + } + + /* already released */ + if (!(si_gpioreservation & gpio_bitmask)) + return 0xffffffff; + + /* clear reservation */ + si_gpioreservation &= ~gpio_bitmask; + + return si_gpioreservation; +} + +/* return the current gpioin register value */ +uint32 +si_gpioin(si_t *sih) +{ + uint regoff; + + regoff = OFFSETOF(chipcregs_t, gpioin); + return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0)); +} + +/* mask&set gpio interrupt polarity bits */ +uint32 +si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointpolarity); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* mask&set gpio interrupt mask bits */ +uint32 +si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority) +{ + uint regoff; + + /* gpios could be shared on router platforms */ + if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { + mask = priority ? (si_gpioreservation & mask) : + ((si_gpioreservation | mask) & ~(si_gpioreservation)); + val &= mask; + } + + regoff = OFFSETOF(chipcregs_t, gpiointmask); + return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); +} + +/* assign the gpio to an led */ +uint32 +si_gpioled(si_t *sih, uint32 mask, uint32 val) +{ + if (sih->ccrev < 16) + return 0xffffffff; + + /* gpio led powersave reg */ + return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val)); +} + +/* mask&set gpio timer val */ +uint32 +si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval) +{ + if (sih->ccrev < 16) + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, + OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval)); +} + +uint32 +si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val) +{ + uint offs; + + if (sih->ccrev < 20) + return 0xffffffff; + + offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup)); + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val) +{ + uint offs; + + if (sih->ccrev < 11) + return 0xffffffff; + + if (regtype == GPIO_REGEVT) + offs = OFFSETOF(chipcregs_t, gpioevent); + else if (regtype == GPIO_REGEVT_INTMSK) + offs = OFFSETOF(chipcregs_t, gpioeventintmask); + else if (regtype == GPIO_REGEVT_INTPOL) + offs = OFFSETOF(chipcregs_t, gpioeventintpolarity); + else + return 0xffffffff; + + return (si_corereg(sih, SI_CC_IDX, offs, mask, val)); +} + +uint32 +si_gpio_int_enable(si_t *sih, bool enable) +{ + uint offs; + + if (sih->ccrev < 11) + return 0xffffffff; + + offs = OFFSETOF(chipcregs_t, intmask); + return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0))); +} + +/** Return the size of the specified SYSMEM bank */ +static uint +sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = idx | (mem_type << SYSMEM_BANKIDX_MEMTYPE_SHIFT); + + ASSERT(mem_type <= SYSMEM_MEMTYPE_DEVRAM); + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1); + return banksize; +} + +/** Return the RAM size of the SYSMEM core */ +uint32 +si_sysmem_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + sysmemregs_t *regs; + bool wasup; + uint32 coreinfo; + uint memsize = 0; + uint8 i; + uint nb; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SYSMEM core */ + if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += sysmem_banksize(sii, regs, i, SYSMEM_MEMTYPE_RAM); + + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the size of the specified SOCRAM bank */ +static uint +socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type) +{ + uint banksize, bankinfo; + uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + + ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM); + + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1); + return banksize; +} + +void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + W_REG(sii->osh, ®s->bankidx, bankidx); + W_REG(sii->osh, ®s->bankpda, bankpda); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +void +si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + if (!set) + *enable = *protect = *remap = 0; + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (set) { + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK; + bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK; + if (*enable) { + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT); + if (*protect) + bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT); + if ((corerev >= 16) && *remap) + bankinfo |= + (1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT); + } + W_REG(sii->osh, ®s->bankinfo, bankinfo); + } + else if (i == 0) { + if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) { + *enable = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK) + *protect = 1; + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) + *remap = 1; + } + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); +} + +bool +si_socdevram_remap_isenb(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + sbsocramregs_t *regs; + bool wasup, remap = FALSE; + uint corerev; + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT); + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { + remap = TRUE; + break; + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + return remap; +} + +bool +si_socdevram_pkg(si_t *sih) +{ + if (si_socdevram_size(sih) > 0) + return TRUE; + else + return FALSE; +} + +uint32 +si_socdevram_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint32 memsize = 0; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 10) { + uint32 extcinfo; + uint8 nb; + uint8 i; + + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +uint32 +si_socdevram_remap_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint32 memsize = 0, banksz; + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 extcinfo; + uint8 nb; + uint8 i; + uint32 bankidx, bankinfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + + corerev = si_corerev(sih); + if (corerev >= 16) { + extcinfo = R_REG(sii->osh, ®s->extracoreinfo); + nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT)); + + /* + * FIX: A0 Issue: Max addressable is 512KB, instead 640KB + * Only four banks are accessible to ARM + */ + if ((corerev == 16) && (nb == 5)) + nb = 4; + + for (i = 0; i < nb; i++) { + bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + W_REG(sii->osh, ®s->bankidx, bankidx); + bankinfo = R_REG(sii->osh, ®s->bankinfo); + if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) { + banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM); + memsize += banksz; + } else { + /* Account only consecutive banks for now */ + break; + } + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +/** Return the RAM size of the SOCRAM core */ +uint32 +si_socram_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev == 0) + memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK)); + else if (corerev < 3) { + memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK)); + memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } else if ((corerev <= 7) || (corerev == 12)) { + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + uint bsz = (coreinfo & SRCI_SRBSZ_MASK); + uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; + if (lss != 0) + nb --; + memsize = nb * (1 << (bsz + SR_BSZ_BASE)); + if (lss != 0) + memsize += (1 << ((lss - 1) + SR_BSZ_BASE)); + } else { + uint8 i; + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + + +/** Return the TCM-RAM size of the ARMCR4 core. */ +uint32 +si_tcm_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + uint8 *regs; + bool wasup; + uint32 corecap; + uint memsize = 0; + uint32 nab = 0; + uint32 nbb = 0; + uint32 totb = 0; + uint32 bxinfo = 0; + uint32 idx = 0; + uint32 *arm_cap_reg; + uint32 *arm_bidx; + uint32 *arm_binfo; + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to CR4 core */ + if (!(regs = si_setcore(sih, ARMCR4_CORE_ID, 0))) + goto done; + + /* Get info for determining size. If in reset, come out of reset, + * but remain in halt + */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, SICF_CPUHALT, SICF_CPUHALT); + + arm_cap_reg = (uint32 *)(regs + SI_CR4_CAP); + corecap = R_REG(sii->osh, arm_cap_reg); + + nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; + nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; + totb = nab + nbb; + + arm_bidx = (uint32 *)(regs + SI_CR4_BANKIDX); + arm_binfo = (uint32 *)(regs + SI_CR4_BANKINFO); + for (idx = 0; idx < totb; idx++) { + W_REG(sii->osh, arm_bidx, idx); + + bxinfo = R_REG(sii->osh, arm_binfo); + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + +bool +si_has_flops(si_t *sih) +{ + uint origidx, cr4_rev; + + /* Find out CR4 core revision */ + origidx = si_coreidx(sih); + if (si_setcore(sih, ARMCR4_CORE_ID, 0)) { + cr4_rev = si_corerev(sih); + si_setcoreidx(sih, origidx); + + if (cr4_rev == 1 || cr4_rev >= 3) + return TRUE; + } + return FALSE; +} + +uint32 +si_socram_srmem_size(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + + sbsocramregs_t *regs; + bool wasup; + uint corerev; + uint32 coreinfo; + uint memsize = 0; + + if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) { + return (32 * 1024); + } + + if (CHIPID(sih->chip) == BCM43430_CHIP_ID) { + return (64 * 1024); + } + + /* Block ints and save current core */ + INTR_OFF(sii, intr_val); + origidx = si_coreidx(sih); + + /* Switch to SOCRAM core */ + if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0))) + goto done; + + /* Get info for determining size */ + if (!(wasup = si_iscoreup(sih))) + si_core_reset(sih, 0, 0); + corerev = si_corerev(sih); + coreinfo = R_REG(sii->osh, ®s->coreinfo); + + /* Calculate size from coreinfo based on rev */ + if (corerev >= 16) { + uint8 i; + uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) { + W_REG(sii->osh, ®s->bankidx, i); + if (R_REG(sii->osh, ®s->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK) + memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM); + } + } + + /* Return to previous state and core */ + if (!wasup) + si_core_disable(sih, 0); + si_setcoreidx(sih, origidx); + +done: + INTR_RESTORE(sii, intr_val); + + return memsize; +} + + +#if !defined(_CFEZ_) || defined(CFG_WL) +void +si_btcgpiowar(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + uint origidx; + uint intr_val = 0; + chipcregs_t *cc; + + /* Make sure that there is ChipCommon core present && + * UART_TX is strapped to 1 + */ + if (!(sih->cccaps & CC_CAP_UARTGPIO)) + return; + + /* si_corereg cannot be used as we have to guarantee 8-bit read/writes */ + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); + ASSERT(cc != NULL); + + W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04); + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +void +si_chipcontrl_btshd0_4331(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + chipcregs_t *cc; + uint origidx; + uint32 val; + uint intr_val = 0; + + INTR_OFF(sii, intr_val); + + origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + val = R_REG(sii->osh, &cc->chipcontrol); + + /* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */ + if (on) { + /* Enable bt_shd0 on gpio4: */ + val |= (CCTRL4331_BT_SHD0_ON_GPIO4); + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4); + W_REG(sii->osh, &cc->chipcontrol, val); + } + + /* restore the original index */ + si_setcoreidx(sih, origidx); + + INTR_RESTORE(sii, intr_val); +} + +void +si_chipcontrl_restore(si_t *sih, uint32 val) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + W_REG(sii->osh, &cc->chipcontrol, val); + si_setcoreidx(sih, origidx); +} + +uint32 +si_chipcontrl_read(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return -1; + } + val = R_REG(sii->osh, &cc->chipcontrol); + si_setcoreidx(sih, origidx); + return val; +} + +void +si_chipcontrl_epa4331(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->chipcontrol); + + if (on) { + if (sih->chippkg == 9 || sih->chippkg == 0xb) { + val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); + /* Ext PA Controls for 4331 12x9 Package */ + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + /* Ext PA Controls for 4331 12x12 Package */ + if (CHIPREV(sih->chiprev) > 0) { + W_REG(sii->osh, &cc->chipcontrol, val | + (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2)); + } else { + W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN)); + } + } + } else { + val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5); + W_REG(sii->osh, &cc->chipcontrol, val); + } + + si_setcoreidx(sih, origidx); +} + +/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */ +void +si_chipcontrl_srom4360(si_t *sih, bool on) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->chipcontrol); + + if (on) { + val &= ~(CCTRL4360_SECI_MODE | + CCTRL4360_BTSWCTRL_MODE | + CCTRL4360_EXTRA_FEMCTRL_MODE | + CCTRL4360_BT_LGCY_MODE | + CCTRL4360_CORE2FEMCTRL4_ON); + + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + } + + si_setcoreidx(sih, origidx); +} + +void +si_clk_srom4365(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 val; + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + val = R_REG(sii->osh, &cc->clkdiv2); + W_REG(sii->osh, &cc->clkdiv2, ((val&~0xf) | 0x4)); + + si_setcoreidx(sih, origidx); +} + +void +si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih) +{ +#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) + ai_d11rsdb_core1_alt_reg_clk_dis(sih); +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ +} + +void +si_d11rsdb_core1_alt_reg_clk_en(si_t *sih) +{ +#if defined(WLRSDB) && !defined(WLRSDB_DISABLED) + ai_d11rsdb_core1_alt_reg_clk_en(sih); +#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */ +} + +void +si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl) +{ + si_info_t *sii; + chipcregs_t *cc; + uint origidx; + uint32 val; + bool sel_chip; + + sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) || + (CHIPID(sih->chip) == BCM43431_CHIP_ID); + sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb)); + + if (!sel_chip) + return; + + sii = SI_INFO(sih); + origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + val = R_REG(sii->osh, &cc->chipcontrol); + + if (enter_wowl) { + val |= CCTRL4331_EXTPA_EN; + W_REG(sii->osh, &cc->chipcontrol, val); + } else { + val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); + W_REG(sii->osh, &cc->chipcontrol, val); + } + si_setcoreidx(sih, origidx); +} +#endif + +uint +si_pll_reset(si_t *sih) +{ + uint err = 0; + + return (err); +} + +/** Enable BT-COEX & Ex-PA for 4313 */ +void +si_epa_4313war(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + /* EPA Fix */ + W_REG(sii->osh, &cc->gpiocontrol, + R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK); + + si_setcoreidx(sih, origidx); +} + +void +si_clk_pmu_htavail_set(si_t *sih, bool set_clear) +{ +} + +void +si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag) +{ +} + +/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */ +void +si_pmu_synth_pwrsw_4313_war(si_t *sih) +{ +} + +/** WL/BT control for 4313 btcombo boards >= P250 */ +void +si_btcombo_p250_4313_war(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + W_REG(sii->osh, &cc->gpiocontrol, + R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK); + + W_REG(sii->osh, &cc->gpioouten, + R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK); + + si_setcoreidx(sih, origidx); +} +void +si_btc_enable_chipcontrol(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + /* BT fix */ + W_REG(sii->osh, &cc->chipcontrol, + R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK); + + si_setcoreidx(sih, origidx); +} +void +si_btcombo_43228_war(si_t *sih) +{ + si_info_t *sii = SI_INFO(sih); + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + + if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { + SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__)); + return; + } + + W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK); + W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK); + + si_setcoreidx(sih, origidx); +} + +/** check if the device is removed */ +bool +si_deviceremoved(si_t *sih) +{ + uint32 w; + + switch (BUSTYPE(sih->bustype)) { + case PCI_BUS: + ASSERT(SI_INFO(sih)->osh != NULL); + w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32)); + if ((w & 0xFFFF) != VENDOR_BROADCOM) + return TRUE; + break; + } + return FALSE; +} + +bool +si_is_sprom_available(si_t *sih) +{ + if (sih->ccrev >= 31) { + si_info_t *sii; + uint origidx; + chipcregs_t *cc; + uint32 sromctrl; + + if ((sih->cccaps & CC_CAP_SROM) == 0) + return FALSE; + + sii = SI_INFO(sih); + origidx = sii->curidx; + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT(cc); + sromctrl = R_REG(sii->osh, &cc->sromcontrol); + si_setcoreidx(sih, origidx); + return (sromctrl & SRC_PRESENT); + } + + switch (CHIPID(sih->chip)) { + case BCM4312_CHIP_ID: + return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL); + case BCM4325_CHIP_ID: + return (sih->chipst & CST4325_SPROM_SEL) != 0; + case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID: + case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID: + case BCM4342_CHIP_ID: { + uint32 spromotp; + spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >> + CST4322_SPROM_OTP_SEL_SHIFT; + return (spromotp & CST4322_SPROM_PRESENT) != 0; + } + case BCM4329_CHIP_ID: + return (sih->chipst & CST4329_SPROM_SEL) != 0; + case BCM4315_CHIP_ID: + return (sih->chipst & CST4315_SPROM_SEL) != 0; + case BCM4319_CHIP_ID: + return (sih->chipst & CST4319_SPROM_SEL) != 0; + case BCM4336_CHIP_ID: + case BCM43362_CHIP_ID: + return (sih->chipst & CST4336_SPROM_PRESENT) != 0; + case BCM4330_CHIP_ID: + return (sih->chipst & CST4330_SPROM_PRESENT) != 0; + case BCM4313_CHIP_ID: + return (sih->chipst & CST4313_SPROM_PRESENT) != 0; + case BCM4331_CHIP_ID: + case BCM43431_CHIP_ID: + return (sih->chipst & CST4331_SPROM_PRESENT) != 0; + case BCM43239_CHIP_ID: + return ((sih->chipst & CST43239_SPROM_MASK) && + !(sih->chipst & CST43239_SFLASH_MASK)); + case BCM4324_CHIP_ID: + case BCM43242_CHIP_ID: + return ((sih->chipst & CST4324_SPROM_MASK) && + !(sih->chipst & CST4324_SFLASH_MASK)); + case BCM4335_CHIP_ID: + case BCM4345_CHIP_ID: + case BCM43454_CHIP_ID: + return ((sih->chipst & CST4335_SPROM_MASK) && + !(sih->chipst & CST4335_SFLASH_MASK)); + case BCM4349_CHIP_GRPID: + return (sih->chipst & CST4349_SPROM_PRESENT) != 0; + break; + case BCM4350_CHIP_ID: + case BCM4354_CHIP_ID: + case BCM4356_CHIP_ID: + case BCM43556_CHIP_ID: + case BCM43558_CHIP_ID: + case BCM43566_CHIP_ID: + case BCM43568_CHIP_ID: + case BCM43569_CHIP_ID: + case BCM43570_CHIP_ID: + case BCM4358_CHIP_ID: + return (sih->chipst & CST4350_SPROM_PRESENT) != 0; + CASE_BCM43602_CHIP: + return (sih->chipst & CST43602_SPROM_PRESENT) != 0; + case BCM43131_CHIP_ID: + case BCM43217_CHIP_ID: + case BCM43227_CHIP_ID: + case BCM43228_CHIP_ID: + case BCM43428_CHIP_ID: + return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT; + default: + return TRUE; + } +} + + +uint32 si_get_sromctl(si_t *sih) +{ + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + uint32 sromctl; + osl_t *osh = si_osh(sih); + + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT((uintptr)cc); + + sromctl = R_REG(osh, &cc->sromcontrol); + + /* return to the original core */ + si_setcoreidx(sih, origidx); + return sromctl; +} + +int si_set_sromctl(si_t *sih, uint32 value) +{ + chipcregs_t *cc; + uint origidx = si_coreidx(sih); + osl_t *osh = si_osh(sih); + int ret = BCME_OK; + + cc = si_setcoreidx(sih, SI_CC_IDX); + ASSERT((uintptr)cc); + + /* get chipcommon rev */ + if (si_corerev(sih) >= 32) { + /* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and + * SpromPresent is 1. + */ + if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 && + (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) { + W_REG(osh, &cc->sromcontrol, value); + } else { + ret = BCME_NODEVICE; + } + } else { + ret = BCME_UNSUPPORTED; + } + + /* return to the original core */ + si_setcoreidx(sih, origidx); + + return ret; +} + +uint +si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val) +{ + uint origidx, intr_val = 0; + uint ret_val; + si_info_t *sii = SI_INFO(sih); + si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info; + + origidx = si_coreidx(sih); + + INTR_OFF(sii, intr_val); + si_setcoreidx(sih, coreidx); + + ret_val = si_wrapperreg(sih, offset, mask, val); + + /* return to the original core */ + si_setcoreidx(sih, origidx); + INTR_RESTORE(sii, intr_val); + return ret_val; +} + + +/* cleanup the timer from the host when ARM is been halted + * without a chance for ARM cleanup its resources + * If left not cleanup, Intr from a software timer can still + * request HT clk when ARM is halted. + */ +uint32 +si_pmu_res_req_timer_clr(si_t *sih) +{ + uint32 mask; + + mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ; + if (CHIPID(sih->chip) != BCM4328_CHIP_ID) + mask <<= 14; + /* clear mask bits */ + pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0); + /* readback to ensure write completes */ + return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0); +} + +/** turn on/off rfldo */ +void +si_pmu_rfldo(si_t *sih, bool on) +{ +} + + +#ifdef SURVIVE_PERST_ENAB +static uint32 +si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val) +{ + si_info_t *sii; + + sii = SI_INFO(sih); + + if (!PCIE(sii)) + return (0); + + return pcie_survive_perst(sii->pch, mask, val); +} + +static void +si_watchdog_reset(si_t *sih) +{ + uint32 i; + + /* issue a watchdog reset */ + pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2); + /* do busy wait for 20ms */ + for (i = 0; i < 2000; i++) { + OSL_DELAY(10); + } +} +#endif /* SURVIVE_PERST_ENAB */ + +void +si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val) +{ +#ifdef SURVIVE_PERST_ENAB + if (BUSTYPE(sih->bustype) != PCI_BUS) + return; + + if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) || + (CHIPREV(sih->chiprev) >= 4)) + return; + + if (reset) { + si_info_t *sii = SI_INFO(sih); + uint32 bar0win, bar0win_after; + + /* save the bar0win */ + bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + + si_watchdog_reset(sih); + + bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32)); + if (bar0win_after != bar0win) { + SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n", + __FUNCTION__, bar0win, bar0win_after)); + OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win); + } + } + if (sperst_mask) { + /* enable survive perst */ + si_pcie_survive_perst(sih, sperst_mask, sperst_val); + } +#endif /* SURVIVE_PERST_ENAB */ +} + +void +si_pcie_ltr_war(si_t *sih) +{ +} + +void +si_pcie_hw_LTR_war(si_t *sih) +{ +} + +void +si_pciedev_reg_pm_clk_period(si_t *sih) +{ +} + +void +si_pciedev_crwlpciegen2(si_t *sih) +{ +} + +void +si_pcie_prep_D3(si_t *sih, bool enter_D3) +{ +} + + + +void +si_pll_sr_reinit(si_t *sih) +{ +} + +void +si_pll_closeloop(si_t *sih) +{ +#if defined(SAVERESTORE) + uint32 data; + + /* disable PLL open loop operation */ + switch (CHIPID(sih->chip)) { +#ifdef SAVERESTORE + case BCM43430_CHIP_ID: + if (SR_ENAB() && sr_isenab(sih)) { + /* read back the pll openloop state */ + data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0); + /* current mode is openloop (possible POR) */ + if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) { + si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, + PMU1_PLLCTL8_OPENLOOP_MASK, 0); + si_pmu_pllupd(sih); + } + } + break; +#endif /* SAVERESTORE */ + default: + /* any unsupported chip bail */ + return; + } +#endif +} diff --git a/drivers/net/wireless/bcmdhd/siutils_priv.h b/drivers/net/wireless/bcmdhd/siutils_priv.h new file mode 100644 index 000000000000..090d7913631c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/siutils_priv.h @@ -0,0 +1,290 @@ +/* + * Include file private to the SOC Interconnect support files. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: siutils_priv.h 520760 2014-12-15 00:54:16Z $ + */ + +#ifndef _siutils_priv_h_ +#define _siutils_priv_h_ + +#define SI_ERROR(args) + +#define SI_MSG(args) + +#ifdef BCMDBG_SI +#define SI_VMSG(args) printf args +#else +#define SI_VMSG(args) +#endif + +#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) + +typedef uint32 (*si_intrsoff_t)(void *intr_arg); +typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); +typedef bool (*si_intrsenabled_t)(void *intr_arg); + + +#define SI_GPIO_MAX 16 + +typedef struct gci_gpio_item { + void *arg; + uint8 gci_gpio; + uint8 status; + gci_gpio_handler_t handler; + struct gci_gpio_item *next; +} gci_gpio_item_t; + + +typedef struct si_cores_info { + void *regs[SI_MAXCORES]; /* other regs va */ + + uint coreid[SI_MAXCORES]; /* id of each core */ + uint32 coresba[SI_MAXCORES]; /* backplane address of each core */ + void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */ + uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */ + uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */ + uint32 coresba2_size[SI_MAXCORES]; /* second address space size */ + + void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ + + void *wrappers2[SI_MAXCORES]; /* other cores wrapper va */ + uint32 wrapba2[SI_MAXCORES]; /* address of controlling wrapper */ + + uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ + uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ +} si_cores_info_t; + +/* misc si info needed by some of the routines */ +typedef struct si_info { + struct si_pub pub; /* back plane public state (must be first field) */ + + void *osh; /* osl os handle */ + void *sdh; /* bcmsdh handle */ + + uint dev_coreid; /* the core provides driver functions */ + void *intr_arg; /* interrupt callback function arg */ + si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */ + si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */ + si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */ + + void *pch; /* PCI/E core handle */ + + bool memseg; /* flag to toggle MEM_SEG register */ + + char *vars; + uint varsz; + + void *curmap; /* current regs va */ + + uint curidx; /* current core index */ + uint numcores; /* # discovered cores */ + + void *curwrap; /* current wrapper va */ + + uint32 oob_router; /* oob router registers for axi */ + + void *cores_info; + gci_gpio_item_t *gci_gpio_head; /* gci gpio interrupts head */ + uint chipnew; /* new chip number */ + uint second_bar0win; /* Backplane region */ + uint num_br; /* # discovered bridges */ + uint32 br_wrapba[SI_MAXBR]; /* address of bridge controlling wrapper */ + uint32 xtalfreq; +} si_info_t; + + +#define SI_INFO(sih) ((si_info_t *)(uintptr)sih) + +#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ + ISALIGNED((x), SI_CORE_SIZE)) +#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) +#define BADCOREADDR 0 +#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) +#define NOREV -1 /* Invalid rev */ + +#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCI_CORE_ID)) + +#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE_CORE_ID)) + +#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ + ((si)->pub.buscoretype == PCIE2_CORE_ID)) + +#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si)) + +#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) + +/* Newer chips can access PCI/PCIE and CC core without requiring to change + * PCI BAR0 WIN + */ +#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13))) + +#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) +#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) + +/* + * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ + * after core switching to avoid invalid register accesss inside ISR. + */ +#define INTR_OFF(si, intr_val) \ + if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } +#define INTR_RESTORE(si, intr_val) \ + if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \ + (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } + +/* dynamic clock control defines */ +#define LPOMINFREQ 25000 /* low power oscillator min */ +#define LPOMAXFREQ 43000 /* low power oscillator max */ +#define XTALMINFREQ 19800000 /* 20 MHz - 1% */ +#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */ +#define PCIMINFREQ 25000000 /* 25 MHz */ +#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */ + +#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */ +#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */ + +/* Force fast clock for 4360b0 */ +#define PCI_FORCEHT(si) \ + (((PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4311_CHIP_ID) && \ + ((CHIPREV(si->pub.chiprev) <= 1))) || \ + ((PCI(si) || PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4321_CHIP_ID)) || \ + (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4716_CHIP_ID)) || \ + (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4748_CHIP_ID))) + +/* GPIO Based LED powersave defines */ +#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ +#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */ + +#ifndef DEFAULT_GPIOTIMERVAL +#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) +#endif + +/* Silicon Backplane externs */ +extern void sb_scan(si_t *sih, void *regs, uint devid); +extern uint sb_coreid(si_t *sih); +extern uint sb_intflag(si_t *sih); +extern uint sb_flag(si_t *sih); +extern void sb_setint(si_t *sih, int siflag); +extern uint sb_corevendor(si_t *sih); +extern uint sb_corerev(si_t *sih); +extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern bool sb_iscoreup(si_t *sih); +extern void *sb_setcoreidx(si_t *sih, uint coreidx); +extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 sb_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern void sb_commit(si_t *sih); +extern uint32 sb_base(uint32 admatch); +extern uint32 sb_size(uint32 admatch); +extern void sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void sb_core_disable(si_t *sih, uint32 bits); +extern uint32 sb_addrspace(si_t *sih, uint asidx); +extern uint32 sb_addrspacesize(si_t *sih, uint asidx); +extern int sb_numaddrspaces(si_t *sih); + +extern uint32 sb_set_initiator_to(si_t *sih, uint32 to, uint idx); + +extern bool sb_taclear(si_t *sih, bool details); + +#if defined(BCMDBG_PHYDUMP) +extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif + +/* Wake-on-wireless-LAN (WOWL) */ +extern bool sb_pci_pmecap(si_t *sih); +struct osl_info; +extern bool sb_pci_fastpmecap(struct osl_info *osh); +extern bool sb_pci_pmeclr(si_t *sih); +extern void sb_pci_pmeen(si_t *sih); +extern uint sb_pcie_readreg(void *sih, uint addrtype, uint offset); + +/* AMBA Interconnect exported externs */ +extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, + void *sdh, char **vars, uint *varsz); +extern si_t *ai_kattach(osl_t *osh); +extern void ai_scan(si_t *sih, void *regs, uint devid); + +extern uint ai_flag(si_t *sih); +extern uint ai_flag_alt(si_t *sih); +extern void ai_setint(si_t *sih, int siflag); +extern uint ai_coreidx(si_t *sih); +extern uint ai_corevendor(si_t *sih); +extern uint ai_corerev(si_t *sih); +extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff); +extern bool ai_iscoreup(si_t *sih); +extern void *ai_setcoreidx(si_t *sih, uint coreidx); +extern void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx); +extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); +extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); +extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); +extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); +extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); +extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits, + uint32 resetbits, void *p, void *s); +extern void ai_d11rsdb_core1_alt_reg_clk_en(si_t *sih); +extern void ai_d11rsdb_core1_alt_reg_clk_dis(si_t *sih); + +extern void ai_core_disable(si_t *sih, uint32 bits); +extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits, + aidmp_t *pmacai, aidmp_t *smacai); +extern int ai_numaddrspaces(si_t *sih); +extern uint32 ai_addrspace(si_t *sih, uint asidx); +extern uint32 ai_addrspacesize(si_t *sih, uint asidx); +extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); +extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); +extern void ai_enable_backplane_timeouts(si_t *sih); +extern void ai_clear_backplane_to(si_t *sih); + +#if defined(BCMDBG_PHYDUMP) +extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b); +#endif + + +#define ub_scan(a, b, c) do {} while (0) +#define ub_flag(a) (0) +#define ub_setint(a, b) do {} while (0) +#define ub_coreidx(a) (0) +#define ub_corevendor(a) (0) +#define ub_corerev(a) (0) +#define ub_iscoreup(a) (0) +#define ub_setcoreidx(a, b) (0) +#define ub_core_cflags(a, b, c) (0) +#define ub_core_cflags_wo(a, b, c) do {} while (0) +#define ub_core_sflags(a, b, c) (0) +#define ub_corereg(a, b, c, d, e) (0) +#define ub_core_reset(a, b, c) do {} while (0) +#define ub_core_disable(a, b) do {} while (0) +#define ub_numaddrspaces(a) (0) +#define ub_addrspace(a, b) (0) +#define ub_addrspacesize(a, b) (0) +#define ub_view(a, b) do {} while (0) +#define ub_dumpregs(a, b) do {} while (0) + +#endif /* _siutils_priv_h_ */ diff --git a/drivers/net/wireless/bcmdhd/uamp_api.h b/drivers/net/wireless/bcmdhd/uamp_api.h new file mode 100644 index 000000000000..0d04a9d86037 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/uamp_api.h @@ -0,0 +1,181 @@ +/* + * Name: uamp_api.h + * + * Description: Universal AMP API + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: uamp_api.h 514727 2014-11-12 03:02:48Z $ + * + */ + + +#ifndef UAMP_API_H +#define UAMP_API_H + + +#include "typedefs.h" + + +/***************************************************************************** +** Constant and Type Definitions +****************************************************************************** +*/ + +#define BT_API + +/* Types. */ +typedef bool BOOLEAN; +typedef uint8 UINT8; +typedef uint16 UINT16; + + +/* UAMP identifiers */ +#define UAMP_ID_1 1 +#define UAMP_ID_2 2 +typedef UINT8 tUAMP_ID; + +/* UAMP event ids (used by UAMP_CBACK) */ +#define UAMP_EVT_RX_READY 0 /* Data from AMP controller is ready to be read */ +#define UAMP_EVT_CTLR_REMOVED 1 /* Controller removed */ +#define UAMP_EVT_CTLR_READY 2 /* Controller added/ready */ +typedef UINT8 tUAMP_EVT; + + +/* UAMP Channels */ +#define UAMP_CH_HCI_CMD 0 /* HCI Command channel */ +#define UAMP_CH_HCI_EVT 1 /* HCI Event channel */ +#define UAMP_CH_HCI_DATA 2 /* HCI ACL Data channel */ +typedef UINT8 tUAMP_CH; + +/* tUAMP_EVT_DATA: union for event-specific data, used by UAMP_CBACK */ +typedef union { + tUAMP_CH channel; /* UAMP_EVT_RX_READY: channel for which rx occured */ +} tUAMP_EVT_DATA; + + +/***************************************************************************** +** +** Function: UAMP_CBACK +** +** Description: Callback for events. Register callback using UAMP_Init. +** +** Parameters amp_id: AMP device identifier that generated the event +** amp_evt: event id +** p_amp_evt_data: pointer to event-specific data +** +****************************************************************************** +*/ +typedef void (*tUAMP_CBACK)(tUAMP_ID amp_id, tUAMP_EVT amp_evt, tUAMP_EVT_DATA *p_amp_evt_data); + +/***************************************************************************** +** external function declarations +****************************************************************************** +*/ +#ifdef __cplusplus +extern "C" +{ +#endif + +/***************************************************************************** +** +** Function: UAMP_Init +** +** Description: Initialize UAMP driver +** +** Parameters p_cback: Callback function for UAMP event notification +** +****************************************************************************** +*/ +BT_API BOOLEAN UAMP_Init(tUAMP_CBACK p_cback); + + +/***************************************************************************** +** +** Function: UAMP_Open +** +** Description: Open connection to local AMP device. +** +** Parameters app_id: Application specific AMP identifer. This value +** will be included in AMP messages sent to the +** BTU task, to identify source of the message +** +****************************************************************************** +*/ +BT_API BOOLEAN UAMP_Open(tUAMP_ID amp_id); + +/***************************************************************************** +** +** Function: UAMP_Close +** +** Description: Close connection to local AMP device. +** +** Parameters app_id: Application specific AMP identifer. +** +****************************************************************************** +*/ +BT_API void UAMP_Close(tUAMP_ID amp_id); + + +/***************************************************************************** +** +** Function: UAMP_Write +** +** Description: Send buffer to AMP device. Frees GKI buffer when done. +** +** +** Parameters: app_id: AMP identifer. +** p_buf: pointer to buffer to write +** num_bytes: number of bytes to write +** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_CMD +** +** Returns: number of bytes written +** +****************************************************************************** +*/ +BT_API UINT16 UAMP_Write(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 num_bytes, tUAMP_CH channel); + +/***************************************************************************** +** +** Function: UAMP_Read +** +** Description: Read incoming data from AMP. Call after receiving a +** UAMP_EVT_RX_READY callback event. +** +** Parameters: app_id: AMP identifer. +** p_buf: pointer to buffer for holding incoming AMP data +** buf_size: size of p_buf +** channel: UAMP_CH_HCI_ACL, or UAMP_CH_HCI_EVT +** +** Returns: number of bytes read +** +****************************************************************************** +*/ +BT_API UINT16 UAMP_Read(tUAMP_ID amp_id, UINT8 *p_buf, UINT16 buf_size, tUAMP_CH channel); + +#ifdef __cplusplus +} +#endif + +#endif /* UAMP_API_H */ diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c new file mode 100644 index 000000000000..a1f4ba77fac2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_android.c @@ -0,0 +1,3357 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_android.c 608788 2015-12-29 10:59:33Z $ + */ + +#include +#include +#include +#ifdef CONFIG_COMPAT +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif +#ifdef BCMSDIO +#include +#endif +#ifdef WL_CFG80211 +#include +#endif +#ifdef WL_NAN +#include +#endif /* WL_NAN */ +#ifdef DHDTCPACK_SUPPRESS +#include +#endif /* DHDTCPACK_SUPPRESS */ + +/* + * Android private command strings, PLEASE define new private commands here + * so they can be updated easily in the future (if needed) + */ + +#define CMD_START "START" +#define CMD_STOP "STOP" +#define CMD_SCAN_ACTIVE "SCAN-ACTIVE" +#define CMD_SCAN_PASSIVE "SCAN-PASSIVE" +#define CMD_RSSI "RSSI" +#define CMD_LINKSPEED "LINKSPEED" +#define CMD_RXFILTER_START "RXFILTER-START" +#define CMD_RXFILTER_STOP "RXFILTER-STOP" +#define CMD_RXFILTER_ADD "RXFILTER-ADD" +#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE" +#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START" +#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP" +#define CMD_BTCOEXMODE "BTCOEXMODE" +#define CMD_SETSUSPENDOPT "SETSUSPENDOPT" +#define CMD_SETSUSPENDMODE "SETSUSPENDMODE" +#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR" +#define CMD_SETFWPATH "SETFWPATH" +#define CMD_SETBAND "SETBAND" +#define CMD_GETBAND "GETBAND" +#define CMD_COUNTRY "COUNTRY" +#define CMD_P2P_SET_NOA "P2P_SET_NOA" +#if !defined WL_ENABLE_P2P_IF +#define CMD_P2P_GET_NOA "P2P_GET_NOA" +#endif /* WL_ENABLE_P2P_IF */ +#define CMD_P2P_SD_OFFLOAD "P2P_SD_" +#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_" +#define CMD_P2P_SET_PS "P2P_SET_PS" +#define CMD_P2P_ECSA "P2P_ECSA" +#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE" +#define CMD_SETROAMMODE "SETROAMMODE" +#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA" +#define CMD_MIRACAST "MIRACAST" +#ifdef WL_NAN +#define CMD_NAN "NAN_" +#endif /* WL_NAN */ +#define CMD_COUNTRY_DELIMITER "/" +#ifdef WL11ULB +#define CMD_ULB_MODE "ULB_MODE" +#define CMD_ULB_BW "ULB_BW" +#endif /* WL11ULB */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */ +#define CMD_CHANSPEC "CHANSPEC" +#define CMD_DATARATE "DATARATE" +#define CMD_ASSOC_CLIENTS "ASSOCLIST" +#define CMD_SET_CSA "SETCSA" +#ifdef WL_SUPPORT_AUTO_CHANNEL +#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL" +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef SUPPORT_SET_LPC +#define CMD_HAPD_LPC_ENABLED "HAPD_LPC_ENABLED" +#endif /* SUPPORT_SET_LPC */ +#ifdef SUPPORT_TRIGGER_HANG_EVENT +#define CMD_TEST_FORCE_HANG "TEST_FORCE_HANG" +#endif /* SUPPORT_TRIGGER_HANG_EVENT */ +#ifdef TEST_TX_POWER_CONTROL +#define CMD_TEST_SET_TX_POWER "TEST_SET_TX_POWER" +#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER" +#endif /* TEST_TX_POWER_CONTROL */ +#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING" +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ +#define CMD_KEEP_ALIVE "KEEPALIVE" + + +#ifdef PNO_SUPPORT +#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR" +#define CMD_PNOSETUP_SET "PNOSETUP " +#define CMD_PNOENABLE_SET "PNOFORCE" +#define CMD_PNODEBUG_SET "PNODEBUG" +#define CMD_WLS_BATCHING "WLS_BATCHING" +#endif /* PNO_SUPPORT */ + +#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER" + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + + +#if defined(SUPPORT_RANDOM_MAC_SCAN) +#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC" +#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC" +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + + +#define CMD_CHANGE_RL "CHANGE_RL" +#define CMD_RESTORE_RL "RESTORE_RL" + +#define CMD_SET_RMC_ENABLE "SETRMCENABLE" +#define CMD_SET_RMC_TXRATE "SETRMCTXRATE" +#define CMD_SET_RMC_ACTPERIOD "SETRMCACTIONPERIOD" +#define CMD_SET_RMC_IDLEPERIOD "SETRMCIDLEPERIOD" +#define CMD_SET_RMC_LEADER "SETRMCLEADER" +#define CMD_SET_RMC_EVENT "SETRMCEVENT" + +#define CMD_SET_SCSCAN "SETSINGLEANT" +#define CMD_GET_SCSCAN "GETSINGLEANT" + +/* FCC_PWR_LIMIT_2G */ +#define CUSTOMER_HW4_ENABLE 0 +#define CUSTOMER_HW4_DISABLE -1 +#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1) + +#ifdef WLTDLS +#define CMD_TDLS_RESET "TDLS_RESET" +#endif /* WLTDLS */ + +#ifdef IPV6_NDO_SUPPORT +#define CMD_NDRA_LIMIT "NDRA_LIMIT" +#endif /* IPV6_NDO_SUPPORT */ + +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + + +#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD" +#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST" +#define CMD_INTERFACE_CREATE "INTERFACE_CREATE" +#define CMD_INTERFACE_DELETE "INTERFACE_DELETE" + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define CMD_GET_BSS_INFO "GETBSSINFO" +#define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO" +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +#ifdef P2PRESP_WFDIE_SRC +#define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP" +#define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP" +#endif /* P2PRESP_WFDIE_SRC */ + +#define CMD_DFS_AP_MOVE "DFS_AP_MOVE" +#define CMD_WBTEXT_ENABLE "WBTEXT_ENABLE" +#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG" +#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG" +#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG" +#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG" + +#ifdef WLWFDS +#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH" +#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH" +#endif /* WLWFDS */ + +#ifdef SET_RPS_CPUS +#define CMD_RPSMODE "RPSMODE" +#endif /* SET_RPS_CPUS */ + +#ifdef BT_WIFI_HANDOVER +#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN" +#endif /* BT_WIFI_HANDOVER */ + +#define CMD_MURX_BFE_CAP "MURX_BFE_CAP" + +/* miracast related definition */ +#define MIRACAST_MODE_OFF 0 +#define MIRACAST_MODE_SOURCE 1 +#define MIRACAST_MODE_SINK 2 + +#ifndef MIRACAST_AMPDU_SIZE +#define MIRACAST_AMPDU_SIZE 8 +#endif + +#ifndef MIRACAST_MCHAN_ALGO +#define MIRACAST_MCHAN_ALGO 1 +#endif + +#ifndef MIRACAST_MCHAN_BW +#define MIRACAST_MCHAN_BW 25 +#endif + +#ifdef CONNECTION_STATISTICS +#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS" + +struct connection_stats { + u32 txframe; + u32 txbyte; + u32 txerror; + u32 rxframe; + u32 rxbyte; + u32 txfail; + u32 txretry; + u32 txretrie; + u32 txrts; + u32 txnocts; + u32 txexptime; + u32 txrate; + u8 chan_idle; +}; +#endif /* CONNECTION_STATISTICS */ + +static LIST_HEAD(miracast_resume_list); +static u8 miracast_cur_mode; + +#ifdef DHD_LOG_DUMP +#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP" +extern void dhd_schedule_log_dump(dhd_pub_t *dhdp); +extern int dhd_bus_mem_dump(dhd_pub_t *dhd); +#endif /* DHD_LOG_DUMP */ +#ifdef DHD_TRACE_WAKE_LOCK +extern void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp); +#endif /* DHD_TRACE_WAKE_LOCK */ + +struct io_cfg { + s8 *iovar; + s32 param; + u32 ioctl; + void *arg; + u32 len; + struct list_head list; +}; + +typedef struct _android_wifi_priv_cmd { + char *buf; + int used_len; + int total_len; +} android_wifi_priv_cmd; + +#ifdef CONFIG_COMPAT +typedef struct _compat_android_wifi_priv_cmd { + compat_caddr_t buf; + int used_len; + int total_len; +} compat_android_wifi_priv_cmd; +#endif /* CONFIG_COMPAT */ + +#if defined(BCMFW_ROAM_ENABLE) +#define CMD_SET_ROAMPREF "SET_ROAMPREF" + +#define MAX_NUM_SUITES 10 +#define WIDTH_AKM_SUITE 8 +#define JOIN_PREF_RSSI_LEN 0x02 +#define JOIN_PREF_RSSI_SIZE 4 /* RSSI pref header size in bytes */ +#define JOIN_PREF_WPA_HDR_SIZE 4 /* WPA pref header size in bytes */ +#define JOIN_PREF_WPA_TUPLE_SIZE 12 /* Tuple size in bytes */ +#define JOIN_PREF_MAX_WPA_TUPLES 16 +#define MAX_BUF_SIZE (JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + \ + (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES)) +#endif /* BCMFW_ROAM_ENABLE */ + + +/** + * Extern function declarations (TODO: move them to dhd_linux.h) + */ +int dhd_net_bus_devreset(struct net_device *dev, uint8 flag); +int dhd_dev_init_ioctl(struct net_device *dev); +#ifdef WL_CFG80211 +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command); +#else +int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ return 0; } +int wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ return 0; } +int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ return 0; } +#endif /* WK_CFG80211 */ + + +#ifdef ENABLE_4335BT_WAR +extern int bcm_bt_lock(int cookie); +extern void bcm_bt_unlock(int cookie); +static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */ +#endif /* ENABLE_4335BT_WAR */ + +extern bool ap_fw_loaded; +extern char iface_name[IFNAMSIZ]; + +/** + * Local (static) functions and variables + */ + +/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first + * time (only) in dhd_open, subsequential wifi on will be handled by + * wl_android_wifi_on + */ +static int g_wifi_on = TRUE; + +/** + * Local (static) function definitions + */ + +#ifdef WLWFDS +static int wl_android_set_wfds_hash( + struct net_device *dev, char *command, int total_len, bool enable) +{ + int error = 0; + wl_p2p_wfds_hash_t *wfds_hash = NULL; + char *smbuf = NULL; + smbuf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + + if (smbuf == NULL) { + DHD_ERROR(("%s: failed to allocated memory %d bytes\n", + __FUNCTION__, WLC_IOCTL_MAXLEN)); + return -ENOMEM; + } + + if (enable) { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + else { + wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1); + error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash, + sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL); + } + + if (error) { + DHD_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error)); + } + + if (smbuf) + kfree(smbuf); + return error; +} +#endif /* WLWFDS */ + +static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len) +{ + int link_speed; + int bytes_written; + int error; + + error = wldev_get_link_speed(net, &link_speed); + if (error) + return -1; + + /* Convert Kbps to Android Mbps */ + link_speed = link_speed / 1000; + bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed); + DHD_INFO(("%s: command result is %s\n", __FUNCTION__, command)); + return bytes_written; +} + +static int wl_android_get_rssi(struct net_device *net, char *command, int total_len) +{ + wlc_ssid_t ssid = {0}; + int bytes_written = 0; + int error = 0; + scb_val_t scbval; + char *delim = NULL; + + delim = strchr(command, ' '); + /* For Ap mode rssi command would be + * driver rssi + * for STA/GC mode + * driver rssi + */ + if (delim) { + /* Ap/GO mode + * driver rssi + */ + DHD_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim)); + /* skip space from delim after finding char */ + delim++; + if (!(bcm_ether_atoe((delim), &scbval.ea))) + { + DHD_ERROR(("%s:address err\n", __FUNCTION__)); + return -1; + } + scbval.val = htod32(0); + DHD_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet))); + } + else { + /* STA/GC mode */ + memset(&scbval, 0, sizeof(scb_val_t)); + } + + error = wldev_get_rssi(net, &scbval); + if (error) + return -1; + + error = wldev_get_ssid(net, &ssid); + if (error) + return -1; + if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) { + DHD_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__)); + } else { + memcpy(command, ssid.SSID, ssid.SSID_len); + bytes_written = ssid.SSID_len; + } + bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", scbval.val); + DHD_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written)); + return bytes_written; +} + +static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len) +{ + int suspend_flag; + int ret_now; + int ret = 0; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0'; + + if (suspend_flag != 0) { + suspend_flag = 1; + } + ret_now = net_os_set_suspend_disable(dev, suspend_flag); + + if (ret_now != suspend_flag) { + if (!(ret = net_os_set_suspend(dev, ret_now, 1))) { + DHD_INFO(("%s: Suspend Flag %d -> %d\n", + __FUNCTION__, ret_now, suspend_flag)); + } else { + DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); + } + } + + return ret; +} + +static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len) +{ + int ret = 0; + +#if !defined(CONFIG_HAS_EARLYSUSPEND) || !defined(DHD_USE_EARLYSUSPEND) + int suspend_flag; + + suspend_flag = *(command + strlen(CMD_SETSUSPENDMODE) + 1) - '0'; + if (suspend_flag != 0) + suspend_flag = 1; + + if (!(ret = net_os_set_suspend(dev, suspend_flag, 0))) + DHD_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag)); + else + DHD_ERROR(("%s: failed %d\n", __FUNCTION__, ret)); +#endif + + return ret; +} + +int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len) +{ + uint8 mode[4]; + int error = 0; + int bytes_written = 0; + + error = wldev_get_mode(dev, mode); + if (error) + return -1; + + DHD_INFO(("%s: mode:%s\n", __FUNCTION__, mode)); + bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode); + DHD_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} + +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int chsp = {0}; + uint16 band = 0; + uint16 bw = 0; + uint16 channel = 0; + u32 sb = 0; + chanspec_t chanspec; + + /* command is + * driver chanspec + */ + error = wldev_iovar_getint(dev, "chanspec", &chsp); + if (error) + return -1; + + chanspec = wl_chspec_driver_to_host(chsp); + DHD_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec)); + + channel = chanspec & WL_CHANSPEC_CHAN_MASK; + band = chanspec & WL_CHANSPEC_BAND_MASK; + bw = chanspec & WL_CHANSPEC_BW_MASK; + + DHD_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw)); + + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CH_BANDWIDTH_80MHZ; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CH_BANDWIDTH_40MHZ; + else if (bw == WL_CHANSPEC_BW_20) + bw = WL_CH_BANDWIDTH_20MHZ; + else + bw = WL_CH_BANDWIDTH_20MHZ; + + if (bw == WL_CH_BANDWIDTH_40MHZ) { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + else if (bw == WL_CH_BANDWIDTH_80MHZ) { + sb = chanspec & WL_CHANSPEC_CTL_SB_MASK; + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } + } + bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC, + channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw); + + DHD_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command)); + return bytes_written; + +} + +/* returns current datarate datarate returned from firmware are in 500kbps */ +int wl_android_get_datarate(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int datarate = 0; + int bytes_written = 0; + + error = wldev_get_datarate(dev, &datarate); + if (error) + return -1; + + DHD_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2)); + return bytes_written; +} +int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + uint i; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + DHD_TRACE(("%s: ENTER\n", __FUNCTION__)); + + assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST); + + error = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), false); + if (error) + return -1; + + assoc_maclist->count = dtoh32(assoc_maclist->count); + bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:", + CMD_ASSOC_CLIENTS, assoc_maclist->count); + + for (i = 0; i < assoc_maclist->count; i++) { + bytes_written += snprintf(command + bytes_written, total_len, " " MACDBG, + MAC2STRDBG(assoc_maclist->ea[i].octet)); + } + return bytes_written; + +} +extern chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec); +static int wl_android_set_csa(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + DHD_INFO(("%s: command:%s\n", __FUNCTION__, command)); + + command = (command + strlen(CMD_SET_CSA)); + /* Order is mode, count channel */ + if (!*++command) { + DHD_ERROR(("%s:error missing arguments\n", __FUNCTION__)); + return -1; + } + csa_arg.mode = bcm_atoi(command); + + if (csa_arg.mode != 0 && csa_arg.mode != 1) { + DHD_ERROR(("Invalid mode\n")); + return -1; + } + + if (!*++command) { + DHD_ERROR(("%s:error missing count\n", __FUNCTION__)); + return -1; + } + command++; + csa_arg.count = bcm_atoi(command); + + csa_arg.reg = 0; + csa_arg.chspec = 0; + command += 2; + if (!*command) { + DHD_ERROR(("%s:error missing channel\n", __FUNCTION__)); + return -1; + } + + chnsp = wf_chspec_aton(command); + if (chnsp == 0) { + DHD_ERROR(("%s:chsp is not correct\n", __FUNCTION__)); + return -1; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + if (chnsp & WL_CHANSPEC_BAND_5G) { + u32 chanspec = chnsp; + err = wldev_iovar_getint(dev, "per_chan_info", &chanspec); + if (!err) { + if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) { + DHD_ERROR(("Channel is radar sensitive\n")); + return -1; + } + if (chanspec == 0) { + DHD_ERROR(("Invalid hw channel\n")); + return -1; + } + } else { + DHD_ERROR(("does not support per_chan_info\n")); + return -1; + } + DHD_INFO(("non radar sensitivity\n")); + } + error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (error) { + DHD_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error)); + return -1; + } + return 0; +} +static int wl_android_get_band(struct net_device *dev, char *command, int total_len) +{ + uint band; + int bytes_written; + int error; + + error = wldev_get_band(dev, &band); + if (error) + return -1; + bytes_written = snprintf(command, total_len, "Band %d", band); + return bytes_written; +} + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + +#ifdef FCC_PWR_LIMIT_2G +int +wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int enable = 0; + + sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable); + + if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) { + DHD_ERROR(("%s: Invalid data\n", __FUNCTION__)); + return BCME_ERROR; + } + + CUSTOMER_HW4_EN_CONVERT(enable); + + DHD_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable)); + error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable); + if (error) { + DHD_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error)); + return BCME_ERROR; + } + + return error; +} + +int +wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int enable = 0; + int bytes_written = 0; + + error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable); + if (error) { + DHD_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error)); + return BCME_ERROR; + } + DHD_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable)); + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable); + + return bytes_written; +} +#endif /* FCC_PWR_LIMIT_2G */ + +#ifdef IPV6_NDO_SUPPORT +int +wl_android_nd_ra_limit(struct net_device *dev, char *command, int total_len) +{ + int err = 0; + int bytes_written = 0; + uint tokens; + char *pos, *token, *delim; + char smbuf[WLC_IOCTL_SMLEN]; + char param[ND_PARAM_SIZE+1], value[ND_VALUE_SIZE+1]; + uint16 type = 0xff, min = 0, per = 0, hold = 0; + nd_ra_ol_limits_t ra_ol_limit; + + WL_TRACE(("command=%s, len=%d\n", command, total_len)); + pos = command + strlen(CMD_NDRA_LIMIT) + 1; + memset(&ra_ol_limit, 0, sizeof(nd_ra_ol_limits_t)); + + if (!strncmp(pos, ND_RA_OL_SET, strlen(ND_RA_OL_SET))) { + WL_TRACE(("SET NDRA_LIMIT\n")); + pos += strlen(ND_RA_OL_SET) + 1; + while ((token = strsep(&pos, ND_PARAMS_DELIMETER)) != NULL) { + memset(param, 0, sizeof(param)); + memset(value, 0, sizeof(value)); + + delim = strchr(token, ND_PARAM_VALUE_DELLIMETER); + if (delim != NULL) + *delim = ' '; + + tokens = sscanf(token, ND_LIMIT_STR_FMT, param, value); + if (!strncmp(param, ND_RA_TYPE, strlen(ND_RA_TYPE))) { + type = simple_strtol(value, NULL, 0); + } else if (!strncmp(param, ND_RA_MIN_TIME, strlen(ND_RA_MIN_TIME))) { + min = simple_strtol(value, NULL, 0); + } else if (!strncmp(param, ND_RA_PER, strlen(ND_RA_PER))) { + per = simple_strtol(value, NULL, 0); + if (per > 100) { + WL_ERR(("Invalid PERCENT %d\n", per)); + err = BCME_BADARG; + goto exit; + } + } else if (!strncmp(param, ND_RA_HOLD, strlen(ND_RA_HOLD))) { + hold = simple_strtol(value, NULL, 0); + } + } + + ra_ol_limit.version = htod32(ND_RA_OL_LIMITS_VER); + ra_ol_limit.type = htod32(type); + if (type == ND_RA_OL_LIMITS_REL_TYPE) { + if ((min == 0) || (per == 0)) { + WL_ERR(("Invalid min_time %d, percent %d\n", min, per)); + err = BCME_BADARG; + goto exit; + } + ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_REL_TYPE_LEN); + ra_ol_limit.limits.lifetime_relative.min_time = htod32(min); + ra_ol_limit.limits.lifetime_relative.lifetime_percent = htod32(per); + } else if (type == ND_RA_OL_LIMITS_FIXED_TYPE) { + if (hold == 0) { + WL_ERR(("Invalid hold_time %d\n", hold)); + err = BCME_BADARG; + goto exit; + } + ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_FIXED_TYPE_LEN); + ra_ol_limit.limits.fixed.hold_time = htod32(hold); + } else { + WL_ERR(("unknown TYPE %d\n", type)); + err = BCME_BADARG; + goto exit; + } + + err = wldev_iovar_setbuf(dev, "nd_ra_limit_intv", &ra_ol_limit, + sizeof(nd_ra_ol_limits_t), smbuf, sizeof(smbuf), NULL); + if (err) { + WL_ERR(("Failed to set nd_ra_limit_intv, error = %d\n", err)); + goto exit; + } + + WL_TRACE(("TYPE %d, MIN %d, PER %d, HOLD %d\n", type, min, per, hold)); + } else if (!strncmp(pos, ND_RA_OL_GET, strlen(ND_RA_OL_GET))) { + WL_TRACE(("GET NDRA_LIMIT\n")); + err = wldev_iovar_getbuf(dev, "nd_ra_limit_intv", NULL, 0, + smbuf, sizeof(smbuf), NULL); + if (err) { + WL_ERR(("Failed to get nd_ra_limit_intv, error = %d\n", err)); + goto exit; + } + + memcpy(&ra_ol_limit, (uint8 *)smbuf, sizeof(nd_ra_ol_limits_t)); + type = ra_ol_limit.type; + if (ra_ol_limit.version != ND_RA_OL_LIMITS_VER) { + WL_ERR(("Invalid Version %d\n", ra_ol_limit.version)); + err = BCME_VERSION; + goto exit; + } + + if (ra_ol_limit.type == ND_RA_OL_LIMITS_REL_TYPE) { + min = ra_ol_limit.limits.lifetime_relative.min_time; + per = ra_ol_limit.limits.lifetime_relative.lifetime_percent; + WL_ERR(("TYPE %d, MIN %d, PER %d\n", type, min, per)); + bytes_written = snprintf(command, total_len, + "%s GET TYPE %d, MIN %d, PER %d", CMD_NDRA_LIMIT, type, min, per); + } else if (ra_ol_limit.type == ND_RA_OL_LIMITS_FIXED_TYPE) { + hold = ra_ol_limit.limits.fixed.hold_time; + WL_ERR(("TYPE %d, HOLD %d\n", type, hold)); + bytes_written = snprintf(command, total_len, + "%s GET TYPE %d, HOLD %d", CMD_NDRA_LIMIT, type, hold); + } else { + WL_ERR(("unknown TYPE %d\n", type)); + err = BCME_ERROR; + goto exit; + } + + return bytes_written; + } else { + WL_ERR(("unknown command\n")); + err = BCME_ERROR; + goto exit; + } + +exit: + return err; +} +#endif /* IPV6_NDO_SUPPORT */ +#ifdef WLTDLS +int wl_android_tdls_reset(struct net_device *dev) +{ + int ret = 0; + ret = dhd_tdls_enable(dev, false, false, NULL); + if (ret < 0) { + DHD_ERROR(("Disable tdls failed. %d\n", ret)); + return ret; + } + ret = dhd_tdls_enable(dev, true, true, NULL); + if (ret < 0) { + DHD_ERROR(("enable tdls failed. %d\n", ret)); + return ret; + } + return 0; +} +#endif /* WLTDLS */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ +static int wl_android_wbtext(struct net_device *dev, char *command, int total_len) +{ + int error = 0, argc = 0; + int data, bytes_written; + + argc = sscanf(command+sizeof("WBTEXT_ENABLE"), "%d", &data); + if (!argc) { + error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data); + if (error) { + DHD_ERROR(("%s: Failed to set wbtext error = %d\n", + __FUNCTION__, error)); + } + bytes_written = snprintf(command, total_len, "WBTEXT %s\n", + (data == WL_BSSTRANS_POLICY_PRODUCT)? "ENABLED" : "DISABLED"); + return bytes_written; + } else { + if (data) + data = WL_BSSTRANS_POLICY_PRODUCT; + + error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data); + if (error) { + DHD_ERROR(("%s: Failed to set wbtext error = %d\n", + __FUNCTION__, error)); + } + } + return error; +} + +#ifdef PNO_SUPPORT +#define PNO_PARAM_SIZE 50 +#define VALUE_SIZE 50 +#define LIMIT_STR_FMT ("%50s %50s") +static int +wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len) +{ + int err = BCME_OK; + uint i, tokens; + char *pos, *pos2, *token, *token2, *delim; + char param[PNO_PARAM_SIZE], value[VALUE_SIZE]; + struct dhd_pno_batch_params batch_params; + DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + if (total_len < strlen(CMD_WLS_BATCHING)) { + DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + err = BCME_ERROR; + goto exit; + } + pos = command + strlen(CMD_WLS_BATCHING) + 1; + memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params)); + + if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) { + pos += strlen(PNO_BATCHING_SET) + 1; + while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) { + memset(param, 0, sizeof(param)); + memset(value, 0, sizeof(value)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + delim = strchr(token, PNO_PARAM_VALUE_DELLIMETER); + if (delim != NULL) + *delim = ' '; + + tokens = sscanf(token, LIMIT_STR_FMT, param, value); + if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) { + batch_params.scan_fr = simple_strtol(value, NULL, 0); + DHD_PNO(("scan_freq : %d\n", batch_params.scan_fr)); + } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) { + batch_params.bestn = simple_strtol(value, NULL, 0); + DHD_PNO(("bestn : %d\n", batch_params.bestn)); + } else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) { + batch_params.mscan = simple_strtol(value, NULL, 0); + DHD_PNO(("mscan : %d\n", batch_params.mscan)); + } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) { + i = 0; + pos2 = value; + tokens = sscanf(value, "<%s>", value); + if (tokens != 1) { + err = BCME_ERROR; + DHD_ERROR(("%s : invalid format for channel" + " <> params\n", __FUNCTION__)); + goto exit; + } + while ((token2 = strsep(&pos2, + PNO_PARAM_CHANNEL_DELIMETER)) != NULL) { + if (token2 == NULL || !*token2) + break; + if (*token2 == '\0') + continue; + if (*token2 == 'A' || *token2 == 'B') { + batch_params.band = (*token2 == 'A')? + WLC_BAND_5G : WLC_BAND_2G; + DHD_PNO(("band : %s\n", + (*token2 == 'A')? "A" : "B")); + } else { + batch_params.chan_list[i++] = + simple_strtol(token2, NULL, 0); + batch_params.nchan++; + DHD_PNO(("channel :%d\n", + batch_params.chan_list[i-1])); + } + } + } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) { + batch_params.rtt = simple_strtol(value, NULL, 0); + DHD_PNO(("rtt : %d\n", batch_params.rtt)); + } else { + DHD_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param)); + err = BCME_ERROR; + goto exit; + } + } + err = dhd_dev_pno_set_for_batch(dev, &batch_params); + if (err < 0) { + DHD_ERROR(("failed to configure batch scan\n")); + } else { + memset(command, 0, total_len); + err = snprintf(command, total_len, "%d", err); + } + } else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) { + err = dhd_dev_pno_get_for_batch(dev, command, total_len); + if (err < 0) { + DHD_ERROR(("failed to getting batching results\n")); + } else { + err = strlen(command); + } + } else if (!strncmp(pos, PNO_BATCHING_STOP, strlen(PNO_BATCHING_STOP))) { + err = dhd_dev_pno_stop_for_batch(dev); + if (err < 0) { + DHD_ERROR(("failed to stop batching scan\n")); + } else { + memset(command, 0, total_len); + err = snprintf(command, total_len, "OK"); + } + } else { + DHD_ERROR(("%s : unknown command\n", __FUNCTION__)); + err = BCME_ERROR; + goto exit; + } +exit: + return err; +} +#ifndef WL_SCHED_SCAN +static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len) +{ + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; + int res = -1; + int nssid = 0; + cmd_tlv_t *cmd_tlv_temp; + char *str_ptr; + int tlv_size_left; + int pno_time = 0; + int pno_repeat = 0; + int pno_freq_expo_max = 0; + +#ifdef PNO_SET_DEBUG + int i; + char pno_in_example[] = { + 'P', 'N', 'O', 'S', 'E', 'T', 'U', 'P', ' ', + 'S', '1', '2', '0', + 'S', + 0x05, + 'd', 'l', 'i', 'n', 'k', + 'S', + 0x04, + 'G', 'O', 'O', 'G', + 'T', + '0', 'B', + 'R', + '2', + 'M', + '2', + 0x00 + }; +#endif /* PNO_SET_DEBUG */ + DHD_PNO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len)); + + if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) { + DHD_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len)); + goto exit_proc; + } +#ifdef PNO_SET_DEBUG + memcpy(command, pno_in_example, sizeof(pno_in_example)); + total_len = sizeof(pno_in_example); +#endif + str_ptr = command + strlen(CMD_PNOSETUP_SET); + tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET); + + cmd_tlv_temp = (cmd_tlv_t *)str_ptr; + memset(ssids_local, 0, sizeof(ssids_local)); + + if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) && + (cmd_tlv_temp->version == PNO_TLV_VERSION) && + (cmd_tlv_temp->subtype == PNO_TLV_SUBTYPE_LEGACY_PNO)) { + + str_ptr += sizeof(cmd_tlv_t); + tlv_size_left -= sizeof(cmd_tlv_t); + + if ((nssid = wl_iw_parse_ssid_list_tlv(&str_ptr, ssids_local, + MAX_PFN_LIST_COUNT, &tlv_size_left)) <= 0) { + DHD_ERROR(("SSID is not presented or corrupted ret=%d\n", nssid)); + goto exit_proc; + } else { + if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) { + DHD_ERROR(("%s scan duration corrupted field size %d\n", + __FUNCTION__, tlv_size_left)); + goto exit_proc; + } + str_ptr++; + pno_time = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_PNO(("%s: pno_time=%d\n", __FUNCTION__, pno_time)); + + if (str_ptr[0] != 0) { + if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) { + DHD_ERROR(("%s pno repeat : corrupted field\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_PNO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat)); + if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) { + DHD_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n", + __FUNCTION__)); + goto exit_proc; + } + str_ptr++; + pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16); + DHD_PNO(("%s: pno_freq_expo_max=%d\n", + __FUNCTION__, pno_freq_expo_max)); + } + } + } else { + DHD_ERROR(("%s get wrong TLV command\n", __FUNCTION__)); + goto exit_proc; + } + + res = dhd_dev_pno_set_for_ssid(dev, ssids_local, nssid, pno_time, pno_repeat, + pno_freq_expo_max, NULL, 0); +exit_proc: + return res; +} +#endif /* !WL_SCHED_SCAN */ +#endif /* PNO_SUPPORT */ + +static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len) +{ + int ret; + int bytes_written = 0; + + ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command); + if (ret) + return 0; + bytes_written = sizeof(struct ether_addr); + return bytes_written; +} + + +int +wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist) +{ + int i, j, match; + int ret = 0; + char mac_buf[MAX_NUM_OF_ASSOCLIST * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + + /* set filtering mode */ + if ((ret = wldev_ioctl(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode), true)) != 0) { + DHD_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret)); + return ret; + } + if (macmode != MACLIST_MODE_DISABLED) { + /* set the MAC filter list */ + if ((ret = wldev_ioctl(dev, WLC_SET_MACLIST, maclist, + sizeof(int) + sizeof(struct ether_addr) * maclist->count, true)) != 0) { + DHD_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret)); + return ret; + } + /* get the current list of associated STAs */ + assoc_maclist->count = MAX_NUM_OF_ASSOCLIST; + if ((ret = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, + sizeof(mac_buf), false)) != 0) { + DHD_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret)); + return ret; + } + /* do we have any STA associated? */ + if (assoc_maclist->count) { + /* iterate each associated STA */ + for (i = 0; i < assoc_maclist->count; i++) { + match = 0; + /* compare with each entry */ + for (j = 0; j < maclist->count; j++) { + DHD_INFO(("%s : associated="MACDBG " list="MACDBG "\n", + __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet), + MAC2STRDBG(maclist->ea[j].octet))); + if (memcmp(assoc_maclist->ea[i].octet, + maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) { + match = 1; + break; + } + } + /* do conditional deauth */ + /* "if not in the allow list" or "if in the deny list" */ + if ((macmode == MACLIST_MODE_ALLOW && !match) || + (macmode == MACLIST_MODE_DENY && match)) { + scb_val_t scbval; + + scbval.val = htod32(1); + memcpy(&scbval.ea, &assoc_maclist->ea[i], + ETHER_ADDR_LEN); + if ((ret = wldev_ioctl(dev, + WLC_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scb_val_t), true)) != 0) + DHD_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n", + __FUNCTION__, ret)); + } + } + } + } + return ret; +} + +/* + * HAPD_MAC_FILTER mac_mode mac_cnt mac_addr1 mac_addr2 + * + */ +static int +wl_android_set_mac_address_filter(struct net_device *dev, const char* str) +{ + int i; + int ret = 0; + int macnum = 0; + int macmode = MACLIST_MODE_DISABLED; + struct maclist *list; + char eabuf[ETHER_ADDR_STR_LEN]; + char *token; + + /* string should look like below (macmode/macnum/maclist) */ + /* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */ + + /* get the MAC filter mode */ + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macmode = bcm_atoi(token); + + if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) { + DHD_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode)); + return -1; + } + + token = strsep((char**)&str, " "); + if (!token) { + return -1; + } + macnum = bcm_atoi(token); + if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { + DHD_ERROR(("%s : invalid number of MAC address entries %d\n", + __FUNCTION__, macnum)); + return -1; + } + /* allocate memory for the MAC list */ + list = (struct maclist*)kmalloc(sizeof(int) + + sizeof(struct ether_addr) * macnum, GFP_KERNEL); + if (!list) { + DHD_ERROR(("%s : failed to allocate memory\n", __FUNCTION__)); + return -1; + } + /* prepare the MAC list */ + list->count = htod32(macnum); + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + for (i = 0; i < list->count; i++) { + strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1); + if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) { + DHD_ERROR(("%s : mac parsing err index=%d, addr=%s\n", + __FUNCTION__, i, eabuf)); + list->count--; + break; + } + DHD_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf)); + } + /* set the list */ + if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0) + DHD_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + + kfree(list); + + return 0; +} + +/** + * Global function definitions (declared in wl_android.h) + */ + +int wl_android_wifi_on(struct net_device *dev) +{ + int ret = 0; + int retry = POWERUP_MAX_RETRY; + + DHD_ERROR(("%s in\n", __FUNCTION__)); + if (!dev) { + DHD_ERROR(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_net_if_lock(dev); + if (!g_wifi_on) { + do { + dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY); +#ifdef BCMSDIO + ret = dhd_net_bus_resume(dev, 0); +#endif /* BCMSDIO */ +#ifdef BCMPCIE + ret = dhd_net_bus_devreset(dev, FALSE); +#endif /* BCMPCIE */ + if (ret == 0) { + break; + } + DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n", + retry)); +#ifdef BCMPCIE + dhd_net_bus_devreset(dev, TRUE); +#endif /* BCMPCIE */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + } while (retry-- > 0); + if (ret != 0) { + DHD_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n")); + goto exit; + } +#ifdef BCMSDIO + ret = dhd_net_bus_devreset(dev, FALSE); + dhd_net_bus_resume(dev, 1); +#endif /* BCMSDIO */ + +#ifndef BCMPCIE + if (!ret) { + if (dhd_dev_init_ioctl(dev) < 0) { + ret = -EFAULT; + } + } +#endif /* !BCMPCIE */ + g_wifi_on = TRUE; + } + +exit: + dhd_net_if_unlock(dev); + + return ret; +} + +int wl_android_wifi_off(struct net_device *dev, bool on_failure) +{ + int ret = 0; + + DHD_ERROR(("%s in\n", __FUNCTION__)); + if (!dev) { + DHD_TRACE(("%s: dev is null\n", __FUNCTION__)); + return -EINVAL; + } + + dhd_net_if_lock(dev); + if (g_wifi_on || on_failure) { +#if defined(BCMSDIO) || defined(BCMPCIE) + ret = dhd_net_bus_devreset(dev, TRUE); +#ifdef BCMSDIO + dhd_net_bus_suspend(dev); +#endif /* BCMSDIO */ +#endif /* BCMSDIO || BCMPCIE */ + dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY); + g_wifi_on = FALSE; + } + dhd_net_if_unlock(dev); + + return ret; +} + +static int wl_android_set_fwpath(struct net_device *net, char *command, int total_len) +{ + if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN) + return -1; + return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1); +} + +#ifdef CONNECTION_STATISTICS +static int +wl_chanim_stats(struct net_device *dev, u8 *chan_idle) +{ + int err; + wl_chanim_stats_t *list; + /* Parameter _and_ returned buffer of chanim_stats. */ + wl_chanim_stats_t param; + u8 result[WLC_IOCTL_SMLEN]; + chanim_stats_t *stats; + + memset(¶m, 0, sizeof(param)); + memset(result, 0, sizeof(result)); + + param.buflen = htod32(sizeof(wl_chanim_stats_t)); + param.count = htod32(WL_CHANIM_COUNT_ONE); + + if ((err = wldev_iovar_getbuf(dev, "chanim_stats", (char*)¶m, sizeof(wl_chanim_stats_t), + (char*)result, sizeof(result), 0)) < 0) { + WL_ERR(("Failed to get chanim results %d \n", err)); + return err; + } + + list = (wl_chanim_stats_t*)result; + + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + if (list->buflen == 0) { + list->version = 0; + list->count = 0; + } else if (list->version != WL_CHANIM_STATS_VERSION) { + WL_ERR(("Sorry, firmware has wl_chanim_stats version %d " + "but driver supports only version %d.\n", + list->version, WL_CHANIM_STATS_VERSION)); + list->buflen = 0; + list->count = 0; + } + + stats = list->stats; + stats->glitchcnt = dtoh32(stats->glitchcnt); + stats->badplcp = dtoh32(stats->badplcp); + stats->chanspec = dtoh16(stats->chanspec); + stats->timestamp = dtoh32(stats->timestamp); + stats->chan_idle = dtoh32(stats->chan_idle); + + WL_INFORM(("chanspec: 0x%4x glitch: %d badplcp: %d idle: %d timestamp: %d\n", + stats->chanspec, stats->glitchcnt, stats->badplcp, stats->chan_idle, + stats->timestamp)); + + *chan_idle = stats->chan_idle; + + return (err); +} + +static int +wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len) +{ + wl_cnt_t* cnt = NULL; +#ifndef DISABLE_IF_COUNTERS + wl_if_stats_t* if_stats = NULL; +#endif /* DISABLE_IF_COUNTERS */ + + int link_speed = 0; + struct connection_stats *output; + unsigned int bufsize = 0; + int bytes_written = -1; + int ret = 0; + + WL_INFORM(("%s: enter Get Connection Stats\n", __FUNCTION__)); + + if (total_len <= 0) { + WL_ERR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len)); + goto error; + } + + bufsize = total_len; + if (bufsize < sizeof(struct connection_stats)) { + WL_ERR(("%s: not enough buffer size, provided=%u, requires=%zu\n", + __FUNCTION__, bufsize, + sizeof(struct connection_stats))); + goto error; + } + + output = (struct connection_stats *)command; + +#ifndef DISABLE_IF_COUNTERS + if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) { + WL_ERR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); + goto error; + } + memset(if_stats, 0, sizeof(*if_stats)); + + ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0, + (char *)if_stats, sizeof(*if_stats), NULL); + if (ret) { + WL_ERR(("%s: if_counters not supported ret=%d\n", + __FUNCTION__, ret)); + + /* In case if_stats IOVAR is not supported, get information from counters. */ +#endif /* DISABLE_IF_COUNTERS */ + if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) { + WL_ERR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__)); + goto error; + } + memset(cnt, 0, sizeof(*cnt)); + + ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, + (char *)cnt, sizeof(wl_cnt_t), NULL); + if (ret) { + WL_ERR(("%s: wldev_iovar_getbuf() failed, ret=%d\n", + __FUNCTION__, ret)); + goto error; + } + + if (dtoh16(cnt->version) > WL_CNT_T_VERSION) { + WL_ERR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n", + __FUNCTION__, WL_CNT_T_VERSION, cnt->version)); + goto error; + } + + output->txframe = dtoh32(cnt->txframe); + output->txbyte = dtoh32(cnt->txbyte); + output->txerror = dtoh32(cnt->txerror); + output->rxframe = dtoh32(cnt->rxframe); + output->rxbyte = dtoh32(cnt->rxbyte); + output->txfail = dtoh32(cnt->txfail); + output->txretry = dtoh32(cnt->txretry); + output->txretrie = dtoh32(cnt->txretrie); + output->txrts = dtoh32(cnt->txrts); + output->txnocts = dtoh32(cnt->txnocts); + output->txexptime = dtoh32(cnt->txexptime); +#ifndef DISABLE_IF_COUNTERS + } else { + /* Populate from if_stats. */ + if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) { + WL_ERR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n", + __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version)); + goto error; + } + + output->txframe = (uint32)dtoh64(if_stats->txframe); + output->txbyte = (uint32)dtoh64(if_stats->txbyte); + output->txerror = (uint32)dtoh64(if_stats->txerror); + output->rxframe = (uint32)dtoh64(if_stats->rxframe); + output->rxbyte = (uint32)dtoh64(if_stats->rxbyte); + output->txfail = (uint32)dtoh64(if_stats->txfail); + output->txretry = (uint32)dtoh64(if_stats->txretry); + output->txretrie = (uint32)dtoh64(if_stats->txretrie); + /* Unavailable */ + output->txrts = 0; + output->txnocts = 0; + output->txexptime = 0; + } +#endif /* DISABLE_IF_COUNTERS */ + + /* link_speed is in kbps */ + ret = wldev_get_link_speed(dev, &link_speed); + if (ret || link_speed < 0) { + WL_ERR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n", + __FUNCTION__, ret, link_speed)); + goto error; + } + + output->txrate = link_speed; + + /* Channel idle ratio. */ + if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) { + output->chan_idle = 0; + }; + + bytes_written = sizeof(struct connection_stats); + +error: +#ifndef DISABLE_IF_COUNTERS + if (if_stats) { + kfree(if_stats); + } +#endif /* DISABLE_IF_COUNTERS */ + if (cnt) { + kfree(cnt); + } + + return bytes_written; +} +#endif /* CONNECTION_STATISTICS */ + + +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +/* SoftAP feature */ +#define APCS_BAND_2G_LEGACY1 20 +#define APCS_BAND_2G_LEGACY2 0 +#define APCS_BAND_AUTO "band=auto" +#define APCS_BAND_2G "band=2g" +#define APCS_BAND_5G "band=5g" +#define APCS_MAX_2G_CHANNELS 11 +#define APCS_MAX_RETRY 10 +#define APCS_DEFAULT_2G_CH 1 +#define APCS_DEFAULT_5G_CH 149 +#if defined(WL_SUPPORT_AUTO_CHANNEL) +static int +wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str, + char* command, int total_len) +{ + int channel = 0; + int chosen = 0; + int retry = 0; + int ret = 0; + int spect = 0; + u8 *reqbuf = NULL; + uint32 band = WLC_BAND_2G; + uint32 buf_size; + + if (cmd_str) { + WL_INFORM(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str))); + if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) { + band = WLC_BAND_AUTO; + } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) { + band = WLC_BAND_5G; + } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) { + band = WLC_BAND_2G; + } else { + /* + * For backward compatibility: Some platforms used to issue argument 20 or 0 + * to enforce the 2G channel selection + */ + channel = bcm_atoi(cmd_str); + if ((channel == APCS_BAND_2G_LEGACY1) || + (channel == APCS_BAND_2G_LEGACY2)) { + band = WLC_BAND_2G; + } else { + WL_ERR(("Invalid argument\n")); + return -EINVAL; + } + } + } else { + /* If no argument is provided, default to 2G */ + WL_ERR(("No argument given default to 2.4G scan\n")); + band = WLC_BAND_2G; + } + WL_INFORM(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band)); + + if ((ret = wldev_ioctl(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect), false)) < 0) { + WL_ERR(("ACS: error getting the spect\n")); + goto done; + } + + if (spect > 0) { + /* If STA is connected, return is STA channel, else ACS can be issued, + * set spect to 0 and proceed with ACS + */ + channel = wl_cfg80211_get_sta_channel(); + if (channel) { + channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH; + goto done2; + } + + if ((ret = wl_cfg80211_set_spect(dev, 0) < 0)) { + WL_ERR(("ACS: error while setting spect\n")); + goto done; + } + } + + reqbuf = kzalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); + if (reqbuf == NULL) { + WL_ERR(("failed to allocate chanspec buffer\n")); + return -ENOMEM; + } + + if (band == WLC_BAND_AUTO) { + WL_INFORM(("ACS full channel scan \n")); + reqbuf[0] = htod32(0); + } else if (band == WLC_BAND_5G) { + WL_INFORM(("ACS 5G band scan \n")); + if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + WL_ERR(("ACS 5g chanspec retreival failed! \n")); + goto done; + } + } else if (band == WLC_BAND_2G) { + /* + * If channel argument is not provided/ argument 20 is provided, + * Restrict channel to 2GHz, 20MHz BW, No SB + */ + WL_INFORM(("ACS 2G band scan \n")); + if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) { + WL_ERR(("ACS 2g chanspec retreival failed! \n")); + goto done; + } + } else { + WL_ERR(("ACS: No band chosen\n")); + goto done2; + } + + buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE; + ret = wldev_ioctl(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf, + buf_size, true); + if (ret < 0) { + WL_ERR(("can't start auto channel scan, err = %d\n", ret)); + channel = 0; + goto done; + } + + /* Wait for auto channel selection, max 3000 ms */ + if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) { + OSL_SLEEP(500); + } else { + /* + * Full channel scan at the minimum takes 1.2secs + * even with parallel scan. max wait time: 3500ms + */ + OSL_SLEEP(1000); + } + + retry = APCS_MAX_RETRY; + while (retry--) { + ret = wldev_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen, + sizeof(chosen), false); + if (ret < 0) { + chosen = 0; + } else { + chosen = dtoh32(chosen); + } + + if (chosen) { + int chosen_band; + int apcs_band; +#ifdef D11AC_IOTYPES + if (wl_cfg80211_get_ioctl_version() == 1) { + channel = LCHSPEC_CHANNEL((chanspec_t)chosen); + } else { + channel = CHSPEC_CHANNEL((chanspec_t)chosen); + } +#else + channel = CHSPEC_CHANNEL((chanspec_t)chosen); +#endif /* D11AC_IOTYPES */ + apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band; + chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G; + if (apcs_band == chosen_band) { + WL_ERR(("selected channel = %d\n", channel)); + break; + } + } + WL_INFORM(("%d tried, ret = %d, chosen = 0x%x\n", + (APCS_MAX_RETRY - retry), ret, chosen)); + OSL_SLEEP(250); + } + +done: + if ((retry == 0) || (ret < 0)) { + /* On failure, fallback to a default channel */ + if ((band == WLC_BAND_5G)) { + channel = APCS_DEFAULT_5G_CH; + } else { + channel = APCS_DEFAULT_2G_CH; + } + WL_ERR(("ACS failed. Fall back to default channel (%d) \n", channel)); + } +done2: + if (spect > 0) { + if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) { + WL_ERR(("ACS: error while setting spect\n")); + } + } + + if (reqbuf) { + kfree(reqbuf); + } + + if (channel) { + snprintf(command, 4, "%d", channel); + WL_INFORM(("command result is %s \n", command)); + return strlen(command); + } else { + return ret; + } +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#ifdef CUSTOMER_HW4_PRIVATE_CMD + + +#ifdef SUPPORT_SET_LPC +static int +wl_android_set_lpc(struct net_device *dev, const char* string_num) +{ + int lpc_enabled, ret; + s32 val = 1; + + lpc_enabled = bcm_atoi(string_num); + DHD_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled)); + + ret = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true); + if (ret < 0) + DHD_ERROR(("WLC_DOWN error %d\n", ret)); + + wldev_iovar_setint(dev, "lpc", lpc_enabled); + + ret = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true); + if (ret < 0) + DHD_ERROR(("WLC_UP error %d\n", ret)); + + return 1; +} +#endif /* SUPPORT_SET_LPC */ + +static int +wl_android_ch_res_rl(struct net_device *dev, bool change) +{ + int error = 0; + s32 srl = 7; + s32 lrl = 4; + printk("%s enter\n", __FUNCTION__); + if (change) { + srl = 4; + lrl = 2; + } + error = wldev_ioctl(dev, WLC_SET_SRL, &srl, sizeof(s32), true); + if (error) { + DHD_ERROR(("Failed to set SRL, error = %d\n", error)); + } + error = wldev_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(s32), true); + if (error) { + DHD_ERROR(("Failed to set LRL, error = %d\n", error)); + } + return error; +} + + +static int +wl_android_rmc_enable(struct net_device *net, int rmc_enable) +{ + int err; + + err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable); + return err; +} + +static int +wl_android_rmc_set_leader(struct net_device *dev, const char* straddr) +{ + int error = BCME_OK; + char smbuf[WLC_IOCTL_SMLEN]; + wl_rmc_entry_t rmc_entry; + DHD_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr)); + + memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t)); + if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) { + if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) { + DHD_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__)); + memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t)); + } else { + DHD_ERROR(("%s: No valid mac address provided\n", + __FUNCTION__)); + return BCME_ERROR; + } + } + + error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t), + smbuf, sizeof(smbuf), NULL); + + if (error != BCME_OK) { + DHD_ERROR(("%s: Unable to set RMC leader, error = %d\n", + __FUNCTION__, error)); + } + + return error; +} + +static int wl_android_set_rmc_event(struct net_device *dev, char *command, int total_len) +{ + int err = 0; + int pid = 0; + + if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) { + WL_ERR(("Failed to get Parameter from : %s\n", command)); + return -1; + } + + /* set pid, and if the event was happened, let's send a notification through netlink */ + wl_cfg80211_set_rmc_pid(pid); + + WL_DBG(("RMC pid=%d\n", pid)); + + return err; +} + +int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int mode = 0; + + error = wldev_iovar_getint(dev, "scan_ps", &mode); + if (error) { + DHD_ERROR(("%s: Failed to get single core scan Mode, error = %d\n", + __FUNCTION__, error)); + return -1; + } + + bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode); + + return bytes_written; +} + +int wl_android_set_singlecore_scan(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "scan_ps", mode); + if (error) { + DHD_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return -1; + } + + return error; +} +#ifdef TEST_TX_POWER_CONTROL +static int +wl_android_set_tx_power(struct net_device *dev, const char* string_num) +{ + int err = 0; + s32 dbm; + enum nl80211_tx_power_setting type; + + dbm = bcm_atoi(string_num); + + if (dbm < -1) { + DHD_ERROR(("%s: dbm is negative...\n", __FUNCTION__)); + return -EINVAL; + } + + if (dbm == -1) + type = NL80211_TX_POWER_AUTOMATIC; + else + type = NL80211_TX_POWER_FIXED; + + err = wl_set_tx_power(dev, type, dbm); + if (unlikely(err)) { + DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + + return 1; +} + +static int +wl_android_get_tx_power(struct net_device *dev, char *command, int total_len) +{ + int err; + int bytes_written; + s32 dbm = 0; + + err = wl_get_tx_power(dev, &dbm); + if (unlikely(err)) { + DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + + bytes_written = snprintf(command, total_len, "%s %d", + CMD_TEST_GET_TX_POWER, dbm); + + DHD_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm)); + + return bytes_written; +} +#endif /* TEST_TX_POWER_CONTROL */ + +static int +wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num) +{ + int err = 0; + int setval = 0; + s32 mode = bcm_atoi(string_num); + + /* As Samsung specific and their requirement, '0' means activate sarlimit + * and '-1' means back to normal state (deactivate sarlimit) + */ + if (mode == 0) { + DHD_INFO(("%s: SAR limit control activated\n", __FUNCTION__)); + setval = 1; + } else if (mode == -1) { + DHD_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__)); + setval = 0; + } else { + return -EINVAL; + } + + err = wldev_iovar_setint(dev, "sar_enable", setval); + if (unlikely(err)) { + DHD_ERROR(("%s: error (%d)\n", __FUNCTION__, err)); + return err; + } + return 1; +} +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int mode = 0; + + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + error = wldev_iovar_setint(dev, "roam_off", mode); + if (error) { + DHD_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return -1; + } + else + DHD_ERROR(("%s: succeeded to set roaming Mode %d, error = %d\n", + __FUNCTION__, mode, error)); + return 0; +} + +int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len) +{ + char ie_buf[VNDR_IE_MAX_LEN]; + char *ioctl_buf = NULL; + char hex[] = "XX"; + char *pcmd = NULL; + int ielen = 0, datalen = 0, idx = 0, tot_len = 0; + vndr_ie_setbuf_t *vndr_ie = NULL; + s32 iecount; + uint32 pktflag; + u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + s32 err = BCME_OK; + + /* Check the VSIE (Vendor Specific IE) which was added. + * If exist then send IOVAR to delete it + */ + if (wl_cfg80211_ibss_vsie_delete(dev) != BCME_OK) { + return -EINVAL; + } + + pcmd = command + strlen(CMD_SETIBSSBEACONOUIDATA) + 1; + for (idx = 0; idx < DOT11_OUI_LEN; idx++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + ie_buf[idx] = (uint8)simple_strtoul(hex, NULL, 16); + } + pcmd++; + while ((*pcmd != '\0') && (idx < VNDR_IE_MAX_LEN)) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + ie_buf[idx++] = (uint8)simple_strtoul(hex, NULL, 16); + datalen++; + } + tot_len = sizeof(vndr_ie_setbuf_t) + (datalen - 1); + vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags); + if (!vndr_ie) { + WL_ERR(("IE memory alloc failed\n")); + return -ENOMEM; + } + /* Copy the vndr_ie SET command ("add"/"del") to the buffer */ + strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1); + vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Set the IE count - the buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32)); + + /* Set packet flag to indicate that BEACON's will contain this IE */ + pktflag = htod32(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG); + memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, + sizeof(u32)); + /* Set the IE ID */ + vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar) DOT11_MNG_PROPR_ID; + + memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, &ie_buf, + DOT11_OUI_LEN); + memcpy(&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, + &ie_buf[DOT11_OUI_LEN], datalen); + + ielen = DOT11_OUI_LEN + datalen; + vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen; + + ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (!ioctl_buf) { + WL_ERR(("ioctl memory alloc failed\n")); + if (vndr_ie) { + kfree(vndr_ie); + } + return -ENOMEM; + } + memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */ + err = wldev_iovar_setbuf(dev, "ie", vndr_ie, tot_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + + + if (err != BCME_OK) { + err = -EINVAL; + if (vndr_ie) { + kfree(vndr_ie); + } + } + else { + /* do NOT free 'vndr_ie' for the next process */ + wl_cfg80211_ibss_vsie_set_buffer(vndr_ie, tot_len); + } + + if (ioctl_buf) { + kfree(ioctl_buf); + } + + return err; +} + +#if defined(BCMFW_ROAM_ENABLE) +static int +wl_android_set_roampref(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + char smbuf[WLC_IOCTL_SMLEN]; + uint8 buf[MAX_BUF_SIZE]; + uint8 *pref = buf; + char *pcmd; + int num_ucipher_suites = 0; + int num_akm_suites = 0; + wpa_suite_t ucipher_suites[MAX_NUM_SUITES]; + wpa_suite_t akm_suites[MAX_NUM_SUITES]; + int num_tuples = 0; + int total_bytes = 0; + int total_len_left; + int i, j; + char hex[] = "XX"; + + pcmd = command + strlen(CMD_SET_ROAMPREF) + 1; + total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1; + + num_akm_suites = simple_strtoul(pcmd, NULL, 16); + /* Increment for number of AKM suites field + space */ + pcmd += 3; + total_len_left -= 3; + + /* check to make sure pcmd does not overrun */ + if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE)) + return -1; + + memset(buf, 0, sizeof(buf)); + memset(akm_suites, 0, sizeof(akm_suites)); + memset(ucipher_suites, 0, sizeof(ucipher_suites)); + + /* Save the AKM suites passed in the command */ + for (i = 0; i < num_akm_suites; i++) { + /* Store the MSB first, as required by join_pref */ + for (j = 0; j < 4; j++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + buf[j] = (uint8)simple_strtoul(hex, NULL, 16); + } + memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32)); + } + + total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE); + num_ucipher_suites = simple_strtoul(pcmd, NULL, 16); + /* Increment for number of cipher suites field + space */ + pcmd += 3; + total_len_left -= 3; + + if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE)) + return -1; + + /* Save the cipher suites passed in the command */ + for (i = 0; i < num_ucipher_suites; i++) { + /* Store the MSB first, as required by join_pref */ + for (j = 0; j < 4; j++) { + hex[0] = *pcmd++; + hex[1] = *pcmd++; + buf[j] = (uint8)simple_strtoul(hex, NULL, 16); + } + memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32)); + } + + /* Join preference for RSSI + * Type : 1 byte (0x01) + * Length : 1 byte (0x02) + * Value : 2 bytes (reserved) + */ + *pref++ = WL_JOIN_PREF_RSSI; + *pref++ = JOIN_PREF_RSSI_LEN; + *pref++ = 0; + *pref++ = 0; + + /* Join preference for WPA + * Type : 1 byte (0x02) + * Length : 1 byte (not used) + * Value : (variable length) + * reserved: 1 byte + * count : 1 byte (no of tuples) + * Tuple1 : 12 bytes + * akm[4] + * ucipher[4] + * mcipher[4] + * Tuple2 : 12 bytes + * Tuplen : 12 bytes + */ + num_tuples = num_akm_suites * num_ucipher_suites; + if (num_tuples != 0) { + if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) { + *pref++ = WL_JOIN_PREF_WPA; + *pref++ = 0; + *pref++ = 0; + *pref++ = (uint8)num_tuples; + total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + + (JOIN_PREF_WPA_TUPLE_SIZE * num_tuples); + } else { + DHD_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__)); + return -1; + } + } else { + /* No WPA config, configure only RSSI preference */ + total_bytes = JOIN_PREF_RSSI_SIZE; + } + + /* akm-ucipher-mcipher tuples in the format required for join_pref */ + for (i = 0; i < num_ucipher_suites; i++) { + for (j = 0; j < num_akm_suites; j++) { + memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + /* Set to 0 to match any available multicast cipher */ + memset(pref, 0, WPA_SUITE_LEN); + pref += WPA_SUITE_LEN; + } + } + + prhex("join pref", (uint8 *)buf, total_bytes); + error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL); + if (error) { + DHD_ERROR(("Failed to set join_pref, error = %d\n", error)); + } + return error; +} +#endif /* defined(BCMFW_ROAM_ENABLE */ + +static int +wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config) +{ + struct io_cfg *resume_cfg; + s32 ret; + + resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL); + if (!resume_cfg) + return -ENOMEM; + + if (config->iovar) { + ret = wldev_iovar_getint(dev, config->iovar, &resume_cfg->param); + if (ret) { + DHD_ERROR(("%s: Failed to get current %s value\n", + __FUNCTION__, config->iovar)); + goto error; + } + + ret = wldev_iovar_setint(dev, config->iovar, config->param); + if (ret) { + DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, + config->iovar, config->param)); + goto error; + } + + resume_cfg->iovar = config->iovar; + } else { + resume_cfg->arg = kzalloc(config->len, GFP_KERNEL); + if (!resume_cfg->arg) { + ret = -ENOMEM; + goto error; + } + ret = wldev_ioctl(dev, config->ioctl, resume_cfg->arg, config->len, false); + if (ret) { + DHD_ERROR(("%s: Failed to get ioctl %d\n", __FUNCTION__, + config->ioctl)); + goto error; + } + ret = wldev_ioctl(dev, config->ioctl + 1, config->arg, config->len, true); + if (ret) { + DHD_ERROR(("%s: Failed to set %s to %d\n", __FUNCTION__, + config->iovar, config->param)); + goto error; + } + if (config->ioctl + 1 == WLC_SET_PM) + wl_cfg80211_update_power_mode(dev); + resume_cfg->ioctl = config->ioctl; + resume_cfg->len = config->len; + } + + list_add(&resume_cfg->list, head); + + return 0; +error: + kfree(resume_cfg->arg); + kfree(resume_cfg); + return ret; +} + +static void +wl_android_iolist_resume(struct net_device *dev, struct list_head *head) +{ + struct io_cfg *config; + struct list_head *cur, *q; + s32 ret = 0; + + list_for_each_safe(cur, q, head) { + config = list_entry(cur, struct io_cfg, list); + if (config->iovar) { + if (!ret) + ret = wldev_iovar_setint(dev, config->iovar, + config->param); + } else { + if (!ret) + ret = wldev_ioctl(dev, config->ioctl + 1, + config->arg, config->len, true); + if (config->ioctl + 1 == WLC_SET_PM) + wl_cfg80211_update_power_mode(dev); + kfree(config->arg); + } + list_del(cur); + kfree(config); + } +} +#ifdef WL11ULB +static int +wl_android_set_ulb_mode(struct net_device *dev, char *command, int total_len) +{ + int mode = 0; + + DHD_INFO(("set ulb mode (%s) \n", command)); + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + return wl_cfg80211_set_ulb_mode(dev, mode); +} +static int +wl_android_set_ulb_bw(struct net_device *dev, char *command, int total_len) +{ + int bw = 0; + u8 *pos; + char *ifname = NULL; + DHD_INFO(("set ulb bw (%s) \n", command)); + + /* + * For sta/ap: IFNAME= DRIVER ULB_BW ifname + * For p2p: IFNAME=wlan0 DRIVER ULB_BW p2p-dev-wlan0 + */ + if (total_len < strlen(CMD_ULB_BW) + 2) + return -EINVAL; + + pos = command + strlen(CMD_ULB_BW) + 1; + bw = bcm_atoi(pos); + + if ((strlen(pos) >= 5)) { + ifname = pos + 2; + } + + DHD_INFO(("[ULB] ifname:%s ulb_bw:%d \n", ifname, bw)); + return wl_cfg80211_set_ulb_bw(dev, bw, ifname); +} +#endif /* WL11ULB */ +static int +wl_android_set_miracast(struct net_device *dev, char *command, int total_len) +{ + int mode, val; + int ret = 0; + struct io_cfg config; + + if (sscanf(command, "%*s %d", &mode) != 1) { + DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__)); + return -1; + } + + DHD_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode)); + + if (miracast_cur_mode == mode) { + return 0; + } + + wl_android_iolist_resume(dev, &miracast_resume_list); + miracast_cur_mode = MIRACAST_MODE_OFF; + + switch (mode) { + case MIRACAST_MODE_SOURCE: + /* setting mchan_algo to platform specific value */ + config.iovar = "mchan_algo"; + + ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false); + if (!ret && val > 100) { + config.param = 0; + DHD_ERROR(("%s: Connected station's beacon interval: " + "%d and set mchan_algo to %d \n", + __FUNCTION__, val, config.param)); + } else { + config.param = MIRACAST_MCHAN_ALGO; + } + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* setting mchan_bw to platform specific value */ + config.iovar = "mchan_bw"; + config.param = MIRACAST_MCHAN_BW; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* setting apmdu to platform specific value */ + config.iovar = "ampdu_mpdu"; + config.param = MIRACAST_AMPDU_SIZE; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + /* FALLTROUGH */ + /* Source mode shares most configurations with sink mode. + * Fall through here to avoid code duplication + */ + case MIRACAST_MODE_SINK: + /* disable internal roaming */ + config.iovar = "roam_off"; + config.param = 1; + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + + /* tunr off pm */ + ret = wldev_ioctl(dev, WLC_GET_PM, &val, sizeof(val), false); + if (ret) { + goto resume; + } + + if (val != PM_OFF) { + val = PM_OFF; + config.iovar = NULL; + config.ioctl = WLC_GET_PM; + config.arg = &val; + config.len = sizeof(int); + ret = wl_android_iolist_add(dev, &miracast_resume_list, &config); + if (ret) { + goto resume; + } + } + break; + case MIRACAST_MODE_OFF: + default: + break; + } + miracast_cur_mode = mode; + + return 0; + +resume: + DHD_ERROR(("%s: turnoff miracast mode because of err%d\n", __FUNCTION__, ret)); + wl_android_iolist_resume(dev, &miracast_resume_list); + return ret; +} + +#define NETLINK_OXYGEN 30 +#define AIBSS_BEACON_TIMEOUT 10 + +static struct sock *nl_sk = NULL; + +static void wl_netlink_recv(struct sk_buff *skb) +{ + WL_ERR(("netlink_recv called\n")); +} + +static int wl_netlink_init(void) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + struct netlink_kernel_cfg cfg = { + .input = wl_netlink_recv, + }; +#endif + + if (nl_sk != NULL) { + WL_ERR(("nl_sk already exist\n")); + return BCME_ERROR; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, + 0, wl_netlink_recv, NULL, THIS_MODULE); +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg); +#else + nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg); +#endif + + if (nl_sk == NULL) { + WL_ERR(("nl_sk is not ready\n")); + return BCME_ERROR; + } + + return BCME_OK; +} + +static void wl_netlink_deinit(void) +{ + if (nl_sk) { + netlink_kernel_release(nl_sk); + nl_sk = NULL; + } +} + +s32 +wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + int ret = -1; + + if (nl_sk == NULL) { + WL_ERR(("nl_sk was not initialized\n")); + goto nlmsg_failure; + } + + skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC); + if (skb == NULL) { + WL_ERR(("failed to allocate memory\n")); + goto nlmsg_failure; + } + + nlh = nlmsg_put(skb, 0, 0, 0, size, 0); + if (nlh == NULL) { + WL_ERR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n", + skb_tailroom(skb), nlmsg_total_size(size))); + dev_kfree_skb(skb); + goto nlmsg_failure; + } + + memcpy(nlmsg_data(nlh), data, size); + nlh->nlmsg_seq = seq; + nlh->nlmsg_type = type; + + /* netlink_unicast() takes ownership of the skb and frees it itself. */ + ret = netlink_unicast(nl_sk, skb, pid, 0); + WL_DBG(("netlink_unicast() pid=%d, ret=%d\n", pid, ret)); + +nlmsg_failure: + return ret; +} + + +int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len) +{ + char buf[256]; + const char *str; + wl_mkeep_alive_pkt_t mkeep_alive_pkt; + wl_mkeep_alive_pkt_t *mkeep_alive_pktp; + int buf_len; + int str_len; + int res = -1; + uint period_msec = 0; + + if (extra == NULL) + { + DHD_ERROR(("%s: extra is NULL\n", __FUNCTION__)); + return -1; + } + if (sscanf(extra, "%d", &period_msec) != 1) + { + DHD_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__)); + return -EINVAL; + } + DHD_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec)); + + memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t)); + + str = "mkeep_alive"; + str_len = strlen(str); + strncpy(buf, str, str_len); + buf[ str_len ] = '\0'; + mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1); + mkeep_alive_pkt.period_msec = period_msec; + buf_len = str_len + 1; + mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION); + mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN); + + /* Setup keep alive zero for null packet generation */ + mkeep_alive_pkt.keep_alive_id = 0; + mkeep_alive_pkt.len_bytes = 0; + buf_len += WL_MKEEP_ALIVE_FIXED_LEN; + /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and + * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no + * guarantee that the buffer is properly aligned. + */ + memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN); + + if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0) + { + DHD_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res)); + } + else + { + DHD_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res)); + } + + return res; +} + +static const char * +get_string_by_separator(char *result, int result_len, const char *src, char separator) +{ + char *end = result + result_len - 1; + while ((result != end) && (*src != separator) && (*src)) { + *result++ = *src++; + } + *result = 0; + if (*src == separator) { + ++src; + } + return src; +} + +int +wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd) +{ + char sbuf[32]; + int i, cnt, size, err, ioctl_buf_len; + roamoffl_bssid_list_t *bssid_list; + const char *str = cmd; + char *ioctl_buf; + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); + + str = get_string_by_separator(sbuf, 32, str, ','); + cnt = bcm_atoi(sbuf); + cnt = MIN(cnt, MAX_ROAMOFFL_BSSID_NUM); + + if ((cnt > 0) && + (((dhdp->op_mode & DHD_FLAG_STA_MODE) && (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) || + FALSE)) { + WL_ERR(("Can't set ROAMOFFL_BSSID when enabled STA-SoftAP or WES\n")); + return -EINVAL; + } + + size = sizeof(int32) + sizeof(struct ether_addr) * cnt; + WL_ERR(("ROAM OFFLOAD BSSID LIST %d BSSIDs, size %d\n", cnt, size)); + bssid_list = kmalloc(size, GFP_KERNEL); + if (bssid_list == NULL) { + WL_ERR(("%s: memory alloc for bssid list(%d) failed\n", + __FUNCTION__, size)); + return -ENOMEM; + } + ioctl_buf_len = size + 64; + ioctl_buf = kmalloc(ioctl_buf_len, GFP_KERNEL); + if (ioctl_buf == NULL) { + WL_ERR(("%s: memory alloc for ioctl_buf(%d) failed\n", + __FUNCTION__, ioctl_buf_len)); + kfree(bssid_list); + return -ENOMEM; + } + + for (i = 0; i < cnt; i++) { + str = get_string_by_separator(sbuf, 32, str, ','); + bcm_ether_atoe(sbuf, &bssid_list->bssid[i]); + } + + bssid_list->cnt = (int32)cnt; + err = wldev_iovar_setbuf(dev, "roamoffl_bssid_list", + bssid_list, size, ioctl_buf, ioctl_buf_len, NULL); + kfree(bssid_list); + kfree(ioctl_buf); + + return err; +} + +#ifdef P2PRESP_WFDIE_SRC +static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len) +{ + int error = 0; + int bytes_written = 0; + int only_resp_wfdsrc = 0; + + error = wldev_iovar_getint(dev, "p2p_only_resp_wfdsrc", &only_resp_wfdsrc); + if (error) { + DHD_ERROR(("%s: Failed to get the mode for only_resp_wfdsrc, error = %d\n", + __FUNCTION__, error)); + return -1; + } + + bytes_written = snprintf(command, total_len, "%s %d", + CMD_P2P_GET_WFDIE_RESP, only_resp_wfdsrc); + + return bytes_written; +} + +static int wl_android_set_wfdie_resp(struct net_device *dev, int only_resp_wfdsrc) +{ + int error = 0; + + error = wldev_iovar_setint(dev, "p2p_only_resp_wfdsrc", only_resp_wfdsrc); + if (error) { + DHD_ERROR(("%s: Failed to set only_resp_wfdsrc %d, error = %d\n", + __FUNCTION__, only_resp_wfdsrc, error)); + return -1; + } + + return 0; +} +#endif /* P2PRESP_WFDIE_SRC */ + +#ifdef BT_WIFI_HANDOVER +static int +wl_tbow_teardown(struct net_device *dev, char *command, int total_len) +{ + int err = BCME_OK; + char buf[WLC_IOCTL_SMLEN]; + tbow_setup_netinfo_t netinfo; + memset(&netinfo, 0, sizeof(netinfo)); + netinfo.opmode = TBOW_HO_MODE_TEARDOWN; + + err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo, + sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL); + if (err < 0) { + WL_ERR(("tbow_doho iovar error %d\n", err)); + return err; + } + return err; +} +#endif /* BT_WIFI_HANOVER */ + +#ifdef SET_RPS_CPUS +static int +wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len) +{ + int error, enable; + + enable = command[strlen(CMD_RPSMODE) + 1] - '0'; + error = dhd_rps_cpus_enable(dev, enable); + +#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211) + if (!error) { + void *dhdp = wl_cfg80211_get_dhdp(); + if (enable) { + DHD_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD); + } else { + DHD_TRACE(("%s : clear ack suppress.\n", __FUNCTION__)); + dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF); + } + } +#endif /* DHDTCPACK_SUPPRESS && BCMPCIE && WL_CFG80211 */ + + return error; +} +#endif /* SET_RPS_CPUS */ +#ifdef P2P_LISTEN_OFFLOADING +s32 +wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len) +{ + int ret = 0; + + WL_ERR(("Entry cmd:%s arg_len:%d \n", cmd, len)); + + if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) { + ret = wl_cfg80211_p2plo_listen_start(dev, buf, len); + } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) { + ret = wl_cfg80211_p2plo_listen_stop(dev); + } else { + WL_ERR(("Request for Unsupported CMD:%s \n", buf)); + ret = -EINVAL; + } + return ret; +} +#endif /* P2P_LISTEN_OFFLOADING */ + +int +wl_android_murx_bfe_cap(struct net_device *dev, int val) +{ + int err = BCME_OK; + int iface_count = wl_cfg80211_iface_count(); + + if (iface_count > 1) { + DHD_ERROR(("%s: murx_bfe_cap change is not allowed when " + "there are multiple interfaces\n", __FUNCTION__)); + return -EINVAL; + } + /* Now there is only single interface */ + err = wldev_iovar_setint(dev, "murx_bfe_cap", val); + if (err) { + DHD_ERROR(("%s: Failed to set murx_bfe_cap IOVAR to %d," + "error %d\n", __FUNCTION__, val, err)); + err = -EINVAL; + } + return err; +} + +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd) +{ +#define PRIVATE_COMMAND_MAX_LEN 8192 + int ret = 0; + char *command = NULL; + int bytes_written = 0; + android_wifi_priv_cmd priv_cmd; + + net_os_wake_lock(net); + + if (!ifr->ifr_data) { + ret = -EINVAL; + goto exit; + } + +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + compat_android_wifi_priv_cmd compat_priv_cmd; + if (copy_from_user(&compat_priv_cmd, ifr->ifr_data, + sizeof(compat_android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + + } + priv_cmd.buf = compat_ptr(compat_priv_cmd.buf); + priv_cmd.used_len = compat_priv_cmd.used_len; + priv_cmd.total_len = compat_priv_cmd.total_len; + } else +#endif /* CONFIG_COMPAT */ + { + if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) { + ret = -EFAULT; + goto exit; + } + } + if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) { + DHD_ERROR(("%s: too long priavte command\n", __FUNCTION__)); + ret = -EINVAL; + goto exit; + } + command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL); + if (!command) + { + DHD_ERROR(("%s: failed to allocate memory\n", __FUNCTION__)); + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) { + ret = -EFAULT; + goto exit; + } + command[priv_cmd.total_len] = '\0'; + + DHD_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name)); + + if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) { + DHD_INFO(("%s, Received regular START command\n", __FUNCTION__)); + bytes_written = wl_android_wifi_on(net); + } + else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) { + bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len); + } + + if (!g_wifi_on) { + DHD_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n", + __FUNCTION__, command, ifr->ifr_name)); + ret = 0; + goto exit; + } + + if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) { + bytes_written = wl_android_wifi_off(net, FALSE); + } + else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) { + wl_cfg80211_set_passive_scan(net, command); + } + else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) { + wl_cfg80211_set_passive_scan(net, command); + } + else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) { + bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) { + bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len); + } +#ifdef PKT_FILTER_SUPPORT + else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) { + bytes_written = net_os_enable_packet_filter(net, 1); + } + else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) { + bytes_written = net_os_enable_packet_filter(net, 0); + } + else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) { + int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0'; + bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num); + } + else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) { + int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0'; + bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num); + } +#endif /* PKT_FILTER_SUPPORT */ + else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) { + /* TBD: BTCOEXSCAN-START */ + } + else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) { + /* TBD: BTCOEXSCAN-STOP */ + } + else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) { +#ifdef WL_CFG80211 + void *dhdp = wl_cfg80211_get_dhdp(); + bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command); +#else +#ifdef PKT_FILTER_SUPPORT + uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0'; + + if (mode == 1) + net_os_enable_packet_filter(net, 0); /* DHCP starts */ + else + net_os_enable_packet_filter(net, 1); /* DHCP ends */ +#endif /* PKT_FILTER_SUPPORT */ +#endif /* WL_CFG80211 */ + } + else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) { + bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) { + bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) { + uint band = *(command + strlen(CMD_SETBAND) + 1) - '0'; + bytes_written = wldev_set_band(net, band); + } + else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) { + bytes_written = wl_android_get_band(net, command, priv_cmd.total_len); + } +#ifdef WL_CFG80211 + /* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */ + else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) { + /* + * Usage examples: + * DRIVER COUNTRY US + * DRIVER COUNTRY US/7 + */ + char *country_code = command + strlen(CMD_COUNTRY) + 1; + char *rev_info_delim = country_code + 2; /* 2 bytes of country code */ + int revinfo = -1; + if ((rev_info_delim) && + (strnicmp(rev_info_delim, CMD_COUNTRY_DELIMITER, + strlen(CMD_COUNTRY_DELIMITER)) == 0) && + (rev_info_delim + 1)) { + revinfo = bcm_atoi(rev_info_delim + 1); + } + bytes_written = wldev_set_country(net, country_code, true, true, revinfo); +#ifdef FCC_PWR_LIMIT_2G + if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) { + DHD_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__)); + } else { + DHD_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__)); + } +#endif /* FCC_PWR_LIMIT_2G */ + } +#endif /* WL_CFG80211 */ + else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) { + bytes_written = wl_android_set_csa(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) { + bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) { + bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) { + bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len); + } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) { + bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len); + } + +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef WLTDLS + else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) { + bytes_written = wl_android_tdls_reset(net); + } +#endif /* WLTDLS */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + +#ifdef PNO_SUPPORT + else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) { + bytes_written = dhd_dev_pno_stop_for_ssid(net); + } +#ifndef WL_SCHED_SCAN + else if (strnicmp(command, CMD_PNOSETUP_SET, strlen(CMD_PNOSETUP_SET)) == 0) { + bytes_written = wl_android_set_pno_setup(net, command, priv_cmd.total_len); + } +#endif /* !WL_SCHED_SCAN */ + else if (strnicmp(command, CMD_PNOENABLE_SET, strlen(CMD_PNOENABLE_SET)) == 0) { + int enable = *(command + strlen(CMD_PNOENABLE_SET) + 1) - '0'; + bytes_written = (enable)? 0 : dhd_dev_pno_stop_for_ssid(net); + } + else if (strnicmp(command, CMD_WLS_BATCHING, strlen(CMD_WLS_BATCHING)) == 0) { + bytes_written = wls_parse_batching_cmd(net, command, priv_cmd.total_len); + } +#endif /* PNO_SUPPORT */ + else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) { + bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) { + int skip = strlen(CMD_P2P_SET_NOA) + 1; + bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip, + priv_cmd.total_len - skip); + } +#ifdef P2P_LISTEN_OFFLOADING + else if (strnicmp(command, CMD_P2P_LISTEN_OFFLOAD, strlen(CMD_P2P_LISTEN_OFFLOAD)) == 0) { + u8 *sub_command = strchr(command, ' '); + bytes_written = wl_cfg80211_p2plo_offload(net, command, sub_command, + sub_command ? strlen(sub_command) : 0); + } +#endif /* P2P_LISTEN_OFFLOADING */ +#ifdef WL_NAN + else if (strnicmp(command, CMD_NAN, strlen(CMD_NAN)) == 0) { + bytes_written = wl_cfg80211_nan_cmd_handler(net, command, + priv_cmd.total_len); + } +#endif /* WL_NAN */ +#if !defined WL_ENABLE_P2P_IF + else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) { + bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len); + } +#endif /* WL_ENABLE_P2P_IF */ + else if (strnicmp(command, CMD_P2P_SET_PS, strlen(CMD_P2P_SET_PS)) == 0) { + int skip = strlen(CMD_P2P_SET_PS) + 1; + bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip, + priv_cmd.total_len - skip); + } + else if (strnicmp(command, CMD_P2P_ECSA, strlen(CMD_P2P_ECSA)) == 0) { + int skip = strlen(CMD_P2P_ECSA) + 1; + bytes_written = wl_cfg80211_set_p2p_ecsa(net, command + skip, + priv_cmd.total_len - skip); + } +#ifdef WL_CFG80211 + else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE, + strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) { + int skip = strlen(CMD_SET_AP_WPS_P2P_IE) + 3; + bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip, + priv_cmd.total_len - skip, *(command + skip - 2) - '0'); + } +#endif /* WL_CFG80211 */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) + else if (strnicmp(command, CMD_GET_BEST_CHANNELS, + strlen(CMD_GET_BEST_CHANNELS)) == 0) { + bytes_written = wl_cfg80211_get_best_channels(net, command, + priv_cmd.total_len); + } +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#if defined(WL_SUPPORT_AUTO_CHANNEL) + else if (strnicmp(command, CMD_SET_HAPD_AUTO_CHANNEL, + strlen(CMD_SET_HAPD_AUTO_CHANNEL)) == 0) { + int skip = strlen(CMD_SET_HAPD_AUTO_CHANNEL) + 1; + bytes_written = wl_android_set_auto_channel(net, (const char*)command+skip, command, + priv_cmd.total_len); + } +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef CUSTOMER_HW4_PRIVATE_CMD +#ifdef SUPPORT_SET_LPC + else if (strnicmp(command, CMD_HAPD_LPC_ENABLED, + strlen(CMD_HAPD_LPC_ENABLED)) == 0) { + int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3; + wl_android_set_lpc(net, (const char*)command+skip); + } +#endif /* SUPPORT_SET_LPC */ +#ifdef SUPPORT_TRIGGER_HANG_EVENT + else if (strnicmp(command, CMD_TEST_FORCE_HANG, + strlen(CMD_TEST_FORCE_HANG)) == 0) { + int skip = strlen(CMD_TEST_FORCE_HANG) + 1; + net_os_send_hang_message_reason(net, (const char*)command+skip); + } +#endif /* SUPPORT_TRIGGER_HANG_EVENT */ + else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0) + bytes_written = wl_android_ch_res_rl(net, true); + else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0) + bytes_written = wl_android_ch_res_rl(net, false); + else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) { + int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0'; + bytes_written = wl_android_rmc_enable(net, rmc_enable); + } + else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) { + int rmc_txrate; + sscanf(command, "%*s %10d", &rmc_txrate); + bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2); + } + else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) { + int actperiod; + sscanf(command, "%*s %10d", &actperiod); + bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod); + } + else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) { + int acktimeout; + sscanf(command, "%*s %10d", &acktimeout); + acktimeout *= 1000; + bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout); + } + else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) { + int skip = strlen(CMD_SET_RMC_LEADER) + 1; + bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_SET_RMC_EVENT, + strlen(CMD_SET_RMC_EVENT)) == 0) + bytes_written = wl_android_set_rmc_event(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) { + bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) { + bytes_written = wl_android_set_singlecore_scan(net, command, priv_cmd.total_len); + } +#ifdef TEST_TX_POWER_CONTROL + else if (strnicmp(command, CMD_TEST_SET_TX_POWER, + strlen(CMD_TEST_SET_TX_POWER)) == 0) { + int skip = strlen(CMD_TEST_SET_TX_POWER) + 1; + wl_android_set_tx_power(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_TEST_GET_TX_POWER, + strlen(CMD_TEST_GET_TX_POWER)) == 0) { + wl_android_get_tx_power(net, command, priv_cmd.total_len); + } +#endif /* TEST_TX_POWER_CONTROL */ + else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL, + strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) { + int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1; + wl_android_set_sarlimit_txctrl(net, (const char*)command+skip); + } +#ifdef IPV6_NDO_SUPPORT + else if (strnicmp(command, CMD_NDRA_LIMIT, strlen(CMD_NDRA_LIMIT)) == 0) { + bytes_written = wl_android_nd_ra_limit(net, command, priv_cmd.total_len); + } +#endif /* IPV6_NDO_SUPPORT */ +#endif /* CUSTOMER_HW4_PRIVATE_CMD */ + else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) { + int skip = strlen(CMD_HAPD_MAC_FILTER) + 1; + wl_android_set_mac_address_filter(net, (const char*)command+skip); + } + else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0) + bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len); +#if defined(BCMFW_ROAM_ENABLE) + else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) { + bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len); + } +#endif /* BCMFW_ROAM_ENABLE */ + else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0) + bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len); +#ifdef WL11ULB + else if (strnicmp(command, CMD_ULB_MODE, strlen(CMD_ULB_MODE)) == 0) + bytes_written = wl_android_set_ulb_mode(net, command, priv_cmd.total_len); + else if (strnicmp(command, CMD_ULB_BW, strlen(CMD_ULB_BW)) == 0) + bytes_written = wl_android_set_ulb_bw(net, command, priv_cmd.total_len); +#endif /* WL11ULB */ + else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0) + bytes_written = wl_android_set_ibss_beacon_ouidata(net, + command, priv_cmd.total_len); + else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) { + int skip = strlen(CMD_KEEP_ALIVE) + 1; + bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip); + } + else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) { + int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0'; + bytes_written = wl_cfg80211_enable_roam_offload(net, enable); + } + else if (strnicmp(command, CMD_ROAM_OFFLOAD_APLIST, strlen(CMD_ROAM_OFFLOAD_APLIST)) == 0) { + bytes_written = wl_android_set_roam_offload_bssid_list(net, + command + strlen(CMD_ROAM_OFFLOAD_APLIST) + 1); + } +#if defined(WL_VIRTUAL_APSTA) + else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) { + char *name = (command + strlen(CMD_INTERFACE_CREATE) +1); + WL_INFORM(("Creating %s interface\n", name)); + bytes_written = wl_cfg80211_interface_create(net, name); + } + else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) { + char *name = (command + strlen(CMD_INTERFACE_DELETE) +1); + WL_INFORM(("Deleteing %s interface\n", name)); + bytes_written = wl_cfg80211_interface_delete(net, name); + } +#endif /* defined (WL_VIRTUAL_APSTA) */ +#ifdef P2PRESP_WFDIE_SRC + else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP, + strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) { + int mode = *(command + strlen(CMD_P2P_SET_WFDIE_RESP) + 1) - '0'; + bytes_written = wl_android_set_wfdie_resp(net, mode); + } else if (strnicmp(command, CMD_P2P_GET_WFDIE_RESP, + strlen(CMD_P2P_GET_WFDIE_RESP)) == 0) { + bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len); + } +#endif /* P2PRESP_WFDIE_SRC */ + else if (strnicmp(command, CMD_DFS_AP_MOVE, strlen(CMD_DFS_AP_MOVE)) == 0) { + char *data = (command + strlen(CMD_DFS_AP_MOVE) +1); + bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) { + bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG, + strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG, + strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_weight_config(net, data, + command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG, + strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_table_config(net, data, + command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG, + strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) { + char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1); + bytes_written = wl_cfg80211_wbtext_delta_config(net, data, + command, priv_cmd.total_len); + } +#ifdef SET_RPS_CPUS + else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) { + bytes_written = wl_android_set_rps_cpus(net, command, priv_cmd.total_len); + } +#endif /* SET_RPS_CPUS */ +#ifdef WLWFDS + else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) { + bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 1); + } + else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) { + bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 0); + } +#endif /* WLWFDS */ +#ifdef BT_WIFI_HANDOVER + else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) { + ret = wl_tbow_teardown(net, command, priv_cmd.total_len); + } +#endif /* BT_WIFI_HANDOVER */ +#ifdef FCC_PWR_LIMIT_2G + else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G, + strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) { + bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G, + strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) { + bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command, priv_cmd.total_len); + } +#endif /* FCC_PWR_LIMIT_2G */ + else if (strnicmp(command, CMD_MURX_BFE_CAP, + strlen(CMD_MURX_BFE_CAP)) == 0) { + uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0'; + bytes_written = wl_android_murx_bfe_cap(net, val); + } +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) { + bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len); + } + else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO)) + == 0) { + bytes_written = wl_cfg80211_get_connect_failed_status(net, command, + priv_cmd.total_len); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ +#if defined(SUPPORT_RANDOM_MAC_SCAN) + else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) { + bytes_written = wl_cfg80211_set_random_mac(net, TRUE); + } else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) { + bytes_written = wl_cfg80211_set_random_mac(net, FALSE); + } +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +#ifdef DHD_LOG_DUMP + else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP, + strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) { + dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(); +#ifdef DHD_TRACE_WAKE_LOCK + dhd_wk_lock_stats_dump(dhdp); +#endif /* DHD_TRACE_WAKE_LOCK */ + dhd_schedule_log_dump(dhdp); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP; + dhd_bus_mem_dump(dhdp); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + } +#endif /* DHD_LOG_DUMP */ + else { + DHD_ERROR(("Unknown PRIVATE command %s - ignored\n", command)); + snprintf(command, 3, "OK"); + bytes_written = strlen("OK"); + } + + if (bytes_written >= 0) { + if ((bytes_written == 0) && (priv_cmd.total_len > 0)) + command[0] = '\0'; + if (bytes_written >= priv_cmd.total_len) { + DHD_ERROR(("%s: bytes_written = %d\n", __FUNCTION__, bytes_written)); + bytes_written = priv_cmd.total_len; + } else { + bytes_written++; + } + priv_cmd.used_len = bytes_written; + if (copy_to_user(priv_cmd.buf, command, bytes_written)) { + DHD_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__)); + ret = -EFAULT; + } + } +#ifdef CONNECTION_STATISTICS + else if (strnicmp(command, CMD_GET_CONNECTION_STATS, + strlen(CMD_GET_CONNECTION_STATS)) == 0) { + bytes_written = wl_android_get_connection_stats(net, command, + priv_cmd.total_len); + } +#endif + else { + ret = bytes_written; + } + +exit: + net_os_wake_unlock(net); + if (command) { + kfree(command); + } + + return ret; +} + +int wl_android_init(void) +{ + int ret = 0; + +#ifdef ENABLE_INSMOD_NO_FW_LOAD + dhd_download_fw_on_driverload = FALSE; +#endif /* ENABLE_INSMOD_NO_FW_LOAD */ + if (!iface_name[0]) { + memset(iface_name, 0, IFNAMSIZ); + bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ); + } + + wl_netlink_init(); + + return ret; +} + +int wl_android_exit(void) +{ + int ret = 0; + struct io_cfg *cur, *q; + + wl_netlink_deinit(); + + list_for_each_entry_safe(cur, q, &miracast_resume_list, list) { + list_del(&cur->list); + kfree(cur); + } + + return ret; +} + +void wl_android_post_init(void) +{ + +#ifdef ENABLE_4335BT_WAR + bcm_bt_unlock(lock_cookie_wifi); + printk("%s: btlock released\n", __FUNCTION__); +#endif /* ENABLE_4335BT_WAR */ + + if (!dhd_download_fw_on_driverload) + g_wifi_on = FALSE; +} diff --git a/drivers/net/wireless/bcmdhd/wl_android.h b/drivers/net/wireless/bcmdhd/wl_android.h new file mode 100644 index 000000000000..14ffc6820a44 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_android.h @@ -0,0 +1,80 @@ +/* + * Linux cfg80211 driver - Android related functions + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_android.h 608194 2015-12-24 04:34:35Z $ + */ + +#include +#include +#include + +/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL + * automatically + */ +#if defined(BT_WIFI_HANDOVER) || defined(WL_NAN) +#define WL_GENL +#endif + + + +/** + * Android platform dependent functions, feel free to add Android specific functions here + * (save the macros in dhd). Please do NOT declare functions that are NOT exposed to dhd + * or cfg, define them as static in wl_android.c + */ + +/** + * wl_android_init will be called from module init function (dhd_module_init now), similarly + * wl_android_exit will be called from module exit function (dhd_module_cleanup now) + */ +int wl_android_init(void); +int wl_android_exit(void); +void wl_android_post_init(void); +int wl_android_wifi_on(struct net_device *dev); +int wl_android_wifi_off(struct net_device *dev, bool on_failure); +int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd); + +s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size); + +/* hostap mac mode */ +#define MACLIST_MODE_DISABLED 0 +#define MACLIST_MODE_DENY 1 +#define MACLIST_MODE_ALLOW 2 + +/* max number of assoc list */ +#define MAX_NUM_OF_ASSOCLIST 64 + +/* Bandwidth */ +#define WL_CH_BANDWIDTH_20MHZ 20 +#define WL_CH_BANDWIDTH_40MHZ 40 +#define WL_CH_BANDWIDTH_80MHZ 80 +/* max number of mac filter list + * restrict max number to 10 as maximum cmd string size is 255 + */ +#define MAX_NUM_MAC_FILT 10 + +int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist); +int wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd); diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c new file mode 100644 index 000000000000..90e7a9c2f1a6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c @@ -0,0 +1,17335 @@ +/* + * Linux cfg80211 driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfg80211.c 610196 2016-01-06 11:20:45Z $ + */ +/* */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif /* PNO_SUPPORT */ + +#if defined(WL_VENDOR_EXT_SUPPORT) +#include +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + +#ifdef WL_NAN +#include +#endif /* WL_NAN */ + +#ifdef PROP_TXSTATUS +#include +#endif + +#ifdef BCMPCIE +#include +#endif + +#ifdef WL11U +#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF) +#error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \ + according to Kernel version and is supported only in Android-JB +#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */ +#endif /* WL11U */ + + +#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) + +static struct device *cfg80211_parent_dev = NULL; +/* g_bcm_cfg should be static. Do not change */ +static struct bcm_cfg80211 *g_bcm_cfg = NULL; +#ifdef CUSTOMER_HW4_DEBUG +u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION; +#else +u32 wl_dbg_level = WL_DBG_ERR; +#endif /* CUSTOMER_HW4_DEBUG */ + +#define MAX_WAIT_TIME 1500 +#ifdef WLAIBSS_MCHAN +#define IBSS_IF_NAME "ibss%d" +#endif /* WLAIBSS_MCHAN */ + +#ifdef VSDB +/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */ +#define DEFAULT_SLEEP_TIME_VSDB 120 +#define OFF_CHAN_TIME_THRESHOLD_MS 200 +#define AF_RETRY_DELAY_TIME 40 + +/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */ +#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg) \ + do { \ + if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) || \ + wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) { \ + OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB); \ + } \ + } while (0) +#else /* VSDB */ +/* if not VSDB, do nothing */ +#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg) +#endif /* VSDB */ + +#ifdef WL_CFG80211_SYNC_GON +#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \ + (wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \ + wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) +#else +#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM) +#endif /* WL_CFG80211_SYNC_GON */ + +#define DNGL_FUNC(func, parameters) func parameters +#define COEX_DHCP + +#define WLAN_EID_SSID 0 +#define CH_MIN_5G_CHANNEL 34 +#define CH_MIN_2G_CHANNEL 1 +#define ACTIVE_SCAN 1 +#define PASSIVE_SCAN 0 + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +(entry) = list_first_entry((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +entry = container_of((ptr), type, member); \ +_Pragma("GCC diagnostic pop") \ + +#else +#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \ +(entry) = list_first_entry((ptr), type, member); \ + +#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \ +entry = container_of((ptr), type, member); \ + +#endif /* STRICT_GCC_WARNINGS */ + +enum rmc_event_type { + RMC_EVENT_NONE, + RMC_EVENT_LEADER_CHECK_FAIL +}; + +/* This is to override regulatory domains defined in cfg80211 module (reg.c) + * By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN + * and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165). + * With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels. + * All the chnages in world regulatory domain are to be done here. + * + * this definition reuires disabling missing-field-initializer warning + * as the ieee80211_regdomain definition differs in plain linux and in Android + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") +#endif +static const struct ieee80211_regdomain brcm_regdom = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2472+10, 40, 6, 20, 0), + /* If any */ + /* IEEE 802.11 channel 14 - Only JP enables + * this and for 802.11b only + */ + REG_RULE(2484-10, 2484+10, 20, 6, 20, 0), + /* IEEE 802.11a, channel 36..64 */ + REG_RULE(5150-10, 5350+10, 40, 6, 20, 0), + /* IEEE 802.11a, channel 100..165 */ + REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), } +}; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ + (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)) +static const struct ieee80211_iface_limit common_if_limits[] = { + { + /* + * Driver can support up to 2 AP's + */ + .max = 2, + .types = BIT(NL80211_IFTYPE_AP), + }, + { + /* + * During P2P-GO removal, P2P-GO is first changed to STA and later only + * removed. So setting maximum possible number of STA interfaces according + * to kernel version. + * + * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x) + * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x) + */ +#ifdef WL_ENABLE_P2P_IF + .max = 3, +#else + .max = 2, +#endif /* WL_ENABLE_P2P_IF */ + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT), + }, +#if defined(WL_CFG80211_P2P_DEV_IF) + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +#endif /* WL_CFG80211_P2P_DEV_IF */ + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC), + }, +}; +#ifdef BCM4330_CHIP +#define NUM_DIFF_CHANNELS 1 +#else +#define NUM_DIFF_CHANNELS 2 +#endif +static const struct ieee80211_iface_combination +common_iface_combinations[] = { + { + .num_different_channels = NUM_DIFF_CHANNELS, + /* + * max_interfaces = 4 + * The max no of interfaces will be used in dual p2p case. + * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we + * will not be using the STA functionality in this case, it + * will remain registered as it is the primary interface. + */ + .max_interfaces = 4, + .limits = common_if_limits, + .n_limits = ARRAY_SIZE(common_if_limits), + }, +}; +#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */ + +/* Data Element Definitions */ +#define WPS_ID_CONFIG_METHODS 0x1008 +#define WPS_ID_REQ_TYPE 0x103A +#define WPS_ID_DEVICE_NAME 0x1011 +#define WPS_ID_VERSION 0x104A +#define WPS_ID_DEVICE_PWD_ID 0x1012 +#define WPS_ID_REQ_DEV_TYPE 0x106A +#define WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS 0x1053 +#define WPS_ID_PRIM_DEV_TYPE 0x1054 + +/* Device Password ID */ +#define DEV_PW_DEFAULT 0x0000 +#define DEV_PW_USER_SPECIFIED 0x0001, +#define DEV_PW_MACHINE_SPECIFIED 0x0002 +#define DEV_PW_REKEY 0x0003 +#define DEV_PW_PUSHBUTTON 0x0004 +#define DEV_PW_REGISTRAR_SPECIFIED 0x0005 + +/* Config Methods */ +#define WPS_CONFIG_USBA 0x0001 +#define WPS_CONFIG_ETHERNET 0x0002 +#define WPS_CONFIG_LABEL 0x0004 +#define WPS_CONFIG_DISPLAY 0x0008 +#define WPS_CONFIG_EXT_NFC_TOKEN 0x0010 +#define WPS_CONFIG_INT_NFC_TOKEN 0x0020 +#define WPS_CONFIG_NFC_INTERFACE 0x0040 +#define WPS_CONFIG_PUSHBUTTON 0x0080 +#define WPS_CONFIG_KEYPAD 0x0100 +#define WPS_CONFIG_VIRT_PUSHBUTTON 0x0280 +#define WPS_CONFIG_PHY_PUSHBUTTON 0x0480 +#define WPS_CONFIG_VIRT_DISPLAY 0x2008 +#define WPS_CONFIG_PHY_DISPLAY 0x4008 + +#define PM_BLOCK 1 +#define PM_ENABLE 0 + + +#define WL_AKM_SUITE_SHA256_1X 0x000FAC05 +#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06 + +#ifndef IBSS_COALESCE_ALLOWED +#define IBSS_COALESCE_ALLOWED 0 +#endif + +#ifndef IBSS_INITIAL_SCAN_ALLOWED +#define IBSS_INITIAL_SCAN_ALLOWED 0 +#endif + +#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */ +#define LONG_LISTEN_TIME 2000 + +#define MAX_SCAN_ABORT_WAIT_CNT 20 +#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10 + +#define IDSUP_4WAY_HANDSHAKE_TIMEOUT 10000 +enum idsup_event_type { + IDSUP_EVENT_SUCCESS = 0, + IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT +}; +/* + * cfg80211_ops api/callback list + */ +static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, + const struct ether_addr *sa, const struct ether_addr *bssid, + u8 **pheader, u32 *body_len, u8 *pbody); +static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid); +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request); +#else +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request); +#endif /* WL_CFG80211_P2P_DEV_IF */ +static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed); +#ifdef WLAIBSS_MCHAN +static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name); +static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev); +#endif /* WLAIBSS_MCHAN */ +static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params); +static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, + struct net_device *dev); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, + struct station_info *sinfo); +#else +static s32 wl_cfg80211_get_station(struct wiphy *wiphy, + struct net_device *dev, u8 *mac, + struct station_info *sinfo); +#endif +static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, bool enabled, + s32 timeout); +static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code); +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, s32 mbm); +#else +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, s32 dbm); +#endif /* WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, s32 *dbm); +#else +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm); +#endif /* WL_CFG80211_P2P_DEV_IF */ +static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, + struct net_device *dev, + u8 key_idx, bool unicast, bool multicast); +static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params); +static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr); +static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + void *cookie, void (*callback) (void *cookie, + struct key_params *params)); +static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx); +static s32 wl_cfg80211_resume(struct wiphy *wiphy); +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +static s32 wl_cfg80211_del_station( + struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_del_station(struct wiphy *wiphy, + struct net_device *ndev, const u8* mac_addr); +#else +static s32 wl_cfg80211_del_station(struct wiphy *wiphy, + struct net_device *ndev, u8* mac_addr); +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, const u8 *mac, struct station_parameters *params); +#else +static s32 wl_cfg80211_change_station(struct wiphy *wiphy, + struct net_device *dev, u8 *mac, struct station_parameters *params); +#endif +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); +#else +static s32 wl_cfg80211_suspend(struct wiphy *wiphy); +#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */ +static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa); +static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa); +static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy, + struct net_device *dev); +static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg); +static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg); +static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, + struct net_device *ndev, bool aborted, bool fw_abort); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) +#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len); +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, const u8 *data, size_t len); +#else +static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, + size_t len); +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper); +#else +static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, enum nl80211_tdls_operation oper); +#endif +#endif +#ifdef WL_SCHED_SCAN +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid); +#else +static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev); +#endif +#endif +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) +bcm_struct_cfgdev* +wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype + iface_type, u8 *mac_addr, const char *name); +s32 +wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev); +#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */ + +s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr); +s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr); +chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec); +chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); +#ifdef WL11ULB +static s32 wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev); +static chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx); +static s32 wl_cfg80211_ulbbw_to_ulbchspec(u32 ulb_bw); +#else +static inline chanspec_t wl_cfg80211_ulb_get_min_bw_chspec( + struct wireless_dev *wdev, s32 bssidx) +{ + return WL_CHANSPEC_BW_20; +} +#endif /* WL11ULB */ + +/* + * event & event Q handlers for cfg80211 interfaces + */ +static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg); +static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg); +static s32 wl_event_handler(void *data); +static void wl_init_eq(struct bcm_cfg80211 *cfg); +static void wl_flush_eq(struct bcm_cfg80211 *cfg); +static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg); +static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags); +static void wl_init_eq_lock(struct bcm_cfg80211 *cfg); +static void wl_init_event_handler(struct bcm_cfg80211 *cfg); +static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg); +static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type, + const wl_event_msg_t *msg, void *data); +static void wl_put_event(struct wl_event_q *e); +static void wl_wakeup_event(struct bcm_cfg80211 *cfg); +static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed); +static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#ifdef BT_WIFI_HANDOVER +static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +#endif /* BT_WIFI_HANDOVER */ +#ifdef WL_SCHED_SCAN +static s32 +wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data); +#endif /* WL_SCHED_SCAN */ +#ifdef PNO_SUPPORT +static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* PNO_SUPPORT */ +#ifdef GSCAN_SUPPORT +static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* GSCAN_SUPPORT */ +static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info, + enum wl_status state, bool set); +#ifdef DHD_LOSSLESS_ROAMING +static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data); +static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg); +#endif /* DHD_LOSSLESS_ROAMING */ +#ifdef CUSTOM_EVENT_PM_WAKE +static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* CUSTOM_EVENT_PM_WAKE */ + +#ifdef WLTDLS +static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#endif /* WLTDLS */ +/* + * register/deregister parent device + */ +static void wl_cfg80211_clear_parent_dev(void); +/* + * ioctl utilites + */ + +/* + * cfg80211 set_wiphy_params utilities + */ +static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold); +static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold); +static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l); + +/* + * cfg profile utilities + */ +static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, const void *data, s32 item); +static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item); +static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev); + +/* + * cfg80211 connect utilites + */ +static s32 wl_set_wpa_version(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_auth_type(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_set_cipher(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_key_mgmt(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_set_set_sharedkey(struct net_device *dev, + struct cfg80211_connect_params *sme); +static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev); +static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, + struct wl_join_params *join_params, size_t *join_params_size); +void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg); + +/* + * information element utilities + */ +static void wl_rst_ie(struct bcm_cfg80211 *cfg); +static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v); +static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size, + bool roam); +static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size); +static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size); +static u32 wl_get_ielen(struct bcm_cfg80211 *cfg); + +#ifdef WL11U +bcm_tlv_t * +wl_cfg80211_find_interworking_ie(u8 *parse, u32 len); +static s32 +wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag, + uint8 ie_id, uint8 *data, uint8 data_len); +#endif /* WL11U */ + +static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data); +static void wl_free_wdev(struct bcm_cfg80211 *cfg); +#ifdef CONFIG_CFG80211_INTERNAL_REGDB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static int +#else +static void +#endif /* kernel version < 3.10.11 */ +wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +static s32 wl_inform_bss(struct bcm_cfg80211 *cfg); +static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam); +static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam); +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy); +s32 wl_cfg80211_channel_to_freq(u32 channel); + + +static void wl_cfg80211_work_handler(struct work_struct *work); +static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, + struct key_params *params); +/* + * key indianess swap utilities + */ +static void swap_key_from_BE(struct wl_wsec_key *key); +static void swap_key_to_BE(struct wl_wsec_key *key); + +/* + * bcm_cfg80211 memory init/deinit utilities + */ +static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg); +static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg); + +static void wl_delay(u32 ms); + +/* + * ibss mode utilities + */ +static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev); +static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg); + +/* + * link up/down , default configuration utilities + */ +static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg); +static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg); +static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e); +static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, + struct net_device *ndev); +static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e); +static void wl_link_up(struct bcm_cfg80211 *cfg); +static void wl_link_down(struct bcm_cfg80211 *cfg); +static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype); +static void wl_init_conf(struct wl_conf *conf); +static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info, + struct net_device* ndev); + +int wl_cfg80211_get_ioctl_version(void); + +/* + * find most significant bit set + */ +static __used u32 wl_find_msb(u16 bit16); + +/* + * rfkill support + */ +static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup); +static int wl_rfkill_set(void *data, bool blocked); +#ifdef DEBUGFS_CFG80211 +static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg); +static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg); +#endif + +static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel, + int nprobes, int *out_params_size); +static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role); + +#ifdef WL_CFG80211_ACL +/* ACL */ +static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev, + const struct cfg80211_acl_data *acl); +#endif /* WL_CFG80211_ACL */ + +/* + * Some external functions, TODO: move them to dhd_linux.h + */ +int dhd_add_monitor(char *name, struct net_device **new_ndev); +int dhd_del_monitor(struct net_device *ndev); +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); +int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); + + +#ifdef DHD_IFDEBUG +void wl_dump_ifinfo(struct bcm_cfg80211 *cfg); +#endif + +#ifdef P2P_LISTEN_OFFLOADING +s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg); +#endif /* P2P_LISTEN_OFFLOADING */ + +static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const struct ether_addr *bssid); + +static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ, + WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ }; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) +#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, gfp); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) +#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \ + cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp); +#endif + +#ifdef RSSI_OFFSET +static s32 wl_rssi_offset(s32 rssi) +{ + rssi += RSSI_OFFSET; + if (rssi > 0) + rssi = 0; + return rssi; +} +#else +#define wl_rssi_offset(x) x +#endif + +#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \ + (akm) == RSN_AKM_UNSPECIFIED || \ + (akm) == RSN_AKM_PSK) + + +extern int dhd_wait_pend8021x(struct net_device *dev); +#ifdef PROP_TXSTATUS_VSDB +extern int disable_proptx; +#endif /* PROP_TXSTATUS_VSDB */ + + +extern int passive_channel_skip; + +static s32 +wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +static s32 +wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \ + 0))) +struct chan_info { + int freq; + int chan_type; +}; +#endif + + +#if (WL_DBG_LEVEL > 0) +#define WL_DBG_ESTR_MAX 50 +static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = { + "SET_SSID", "JOIN", "START", "AUTH", "AUTH_IND", + "DEAUTH", "DEAUTH_IND", "ASSOC", "ASSOC_IND", "REASSOC", + "REASSOC_IND", "DISASSOC", "DISASSOC_IND", "QUIET_START", "QUIET_END", + "BEACON_RX", "LINK", "MIC_ERROR", "NDIS_LINK", "ROAM", + "TXFAIL", "PMKID_CACHE", "RETROGRADE_TSF", "PRUNE", "AUTOAUTH", + "EAPOL_MSG", "SCAN_COMPLETE", "ADDTS_IND", "DELTS_IND", "BCNSENT_IND", + "BCNRX_MSG", "BCNLOST_MSG", "ROAM_PREP", "PFN_NET_FOUND", + "PFN_NET_LOST", + "RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START", + "IBSS_ASSOC", + "RADIO", "PSM_WATCHDOG", + "WLC_E_XXX_ASSOC_START", "WLC_E_XXX_ASSOC_ABORT", + "PROBREQ_MSG", + "SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED", + "EXCEEDED_MEDIUM_TIME", "ICV_ERROR", + "UNICAST_DECODE_ERROR", "MULTICAST_DECODE_ERROR", "TRACE", + "WLC_E_BTA_HCI_EVENT", "IF", "WLC_E_P2P_DISC_LISTEN_COMPLETE", + "RSSI", "PFN_SCAN_COMPLETE", "WLC_E_EXTLOG_MSG", + "ACTION_FRAME", "ACTION_FRAME_COMPLETE", "WLC_E_PRE_ASSOC_IND", + "WLC_E_PRE_REASSOC_IND", "WLC_E_CHANNEL_ADOPTED", "WLC_E_AP_STARTED", + "WLC_E_DFS_AP_STOP", "WLC_E_DFS_AP_RESUME", "WLC_E_WAI_STA_EVENT", + "WLC_E_WAI_MSG", "WLC_E_ESCAN_RESULT", "WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE", + "WLC_E_PROBRESP_MSG", "WLC_E_P2P_PROBREQ_MSG", "WLC_E_DCS_REQUEST", "WLC_E_FIFO_CREDIT_MAP", + "WLC_E_ACTION_FRAME_RX", "WLC_E_WAKE_EVENT", "WLC_E_RM_COMPLETE" +}; +#endif /* WL_DBG_LEVEL */ + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_2GHZ , \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = NL80211_BAND_5GHZ , \ + .center_freq = 5000 + (5 * (_channel)), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .bitrate = RATE_TO_BASE100KBPS(_rateid), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate __wl_rates[] = { + RATETAB_ENT(DOT11_RATE_1M, 0), + RATETAB_ENT(DOT11_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(DOT11_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(DOT11_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(DOT11_RATE_6M, 0), + RATETAB_ENT(DOT11_RATE_9M, 0), + RATETAB_ENT(DOT11_RATE_12M, 0), + RATETAB_ENT(DOT11_RATE_18M, 0), + RATETAB_ENT(DOT11_RATE_24M, 0), + RATETAB_ENT(DOT11_RATE_36M, 0), + RATETAB_ENT(DOT11_RATE_48M, 0), + RATETAB_ENT(DOT11_RATE_54M, 0) +}; + +#define wl_a_rates (__wl_rates + 4) +#define wl_a_rates_size 8 +#define wl_g_rates (__wl_rates + 0) +#define wl_g_rates_size 12 + +static struct ieee80211_channel __wl_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0) +}; + +static struct ieee80211_channel __wl_5ghz_a_channels[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(144, 0), + CHAN5G(149, 0), CHAN5G(153, 0), + CHAN5G(157, 0), CHAN5G(161, 0), + CHAN5G(165, 0) +}; + +static struct ieee80211_supported_band __wl_band_2ghz = { + .band = NL80211_BAND_2GHZ , + .channels = __wl_2ghz_channels, + .n_channels = ARRAY_SIZE(__wl_2ghz_channels), + .bitrates = wl_g_rates, + .n_bitrates = wl_g_rates_size +}; + +static struct ieee80211_supported_band __wl_band_5ghz_a = { + .band = NL80211_BAND_5GHZ , + .channels = __wl_5ghz_a_channels, + .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels), + .bitrates = wl_a_rates, + .n_bitrates = wl_a_rates_size +}; + +static const u32 __wl_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, +}; + +#ifdef WL_SUPPORT_ACS +/* + * The firmware code required for this feature to work is currently under + * BCMINTERNAL flag. In future if this is to enabled we need to bring the + * required firmware code out of the BCMINTERNAL flag. + */ +struct wl_dump_survey { + u32 obss; + u32 ibss; + u32 no_ctg; + u32 no_pckt; + u32 tx; + u32 idle; +}; +#endif /* WL_SUPPORT_ACS */ + + +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) +static int maxrxpktglom = 0; +#endif + +/* IOCtl version read from targeted driver */ +static int ioctl_version; +#ifdef DEBUGFS_CFG80211 +#define S_SUBLOGLEVEL 20 +static const struct { + u32 log_level; + char *sublogname; +} sublogname_map[] = { + {WL_DBG_ERR, "ERR"}, + {WL_DBG_INFO, "INFO"}, + {WL_DBG_DBG, "DBG"}, + {WL_DBG_SCAN, "SCAN"}, + {WL_DBG_TRACE, "TRACE"}, + {WL_DBG_P2P_ACTION, "P2PACTION"} +}; +#endif + +#ifdef CUSTOMER_HW4_DEBUG +uint prev_dhd_console_ms = 0; +u32 prev_wl_dbg_level = 0; +bool wl_scan_timeout_dbg_enabled = 0; +static void wl_scan_timeout_dbg_set(void); +static void wl_scan_timeout_dbg_clear(void); + +static void wl_scan_timeout_dbg_set(void) +{ + WL_ERR(("Enter \n")); + prev_dhd_console_ms = dhd_console_ms; + prev_wl_dbg_level = wl_dbg_level; + + dhd_console_ms = 1; + wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN); + + wl_scan_timeout_dbg_enabled = 1; +} +static void wl_scan_timeout_dbg_clear(void) +{ + WL_ERR(("Enter \n")); + dhd_console_ms = prev_dhd_console_ms; + wl_dbg_level = prev_wl_dbg_level; + + wl_scan_timeout_dbg_enabled = 0; +} +#endif /* CUSTOMER_HW4_DEBUG */ + +/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */ +uint32 fw_assoc_watchdog_ms = 0; +bool fw_assoc_watchdog_started = 0; +#define FW_ASSOC_WATCHDOG_TIME 10 * 1000 /* msec */ + +#ifdef DHD_IFDEBUG + +void wl_dump_ifinfo(struct bcm_cfg80211 *cfg) +{ + WL_ERR(("cfg=%p\n", cfg)); + if (cfg) { + WL_ERR(("cfg->wdev=%p\n", bcmcfg_to_prmry_wdev(cfg))); + if (bcmcfg_to_prmry_wdev(cfg)) { + WL_ERR(("cfg->wdev->wiphy=%p\n", bcmcfg_to_wiphy(cfg))); + WL_ERR(("cfg->wdev->netdev=%p\n", bcmcfg_to_prmry_ndev(cfg))); + } + } +} +#endif + +static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, + enum wl_pm_workq_act_type type) +{ + u16 wq_duration = 0; + + if (cfg == NULL) + return; + + mutex_lock(&cfg->pm_sync); + /* + * Make cancel and schedule work part mutually exclusive + * so that while cancelling, we are sure that there is no + * work getting scheduled. + */ + if (delayed_work_pending(&cfg->pm_enable_work)) { + cancel_delayed_work_sync(&cfg->pm_enable_work); + DHD_OS_WAKE_UNLOCK(cfg->pub); + } + + if (type == WL_PM_WORKQ_SHORT) { + wq_duration = WL_PM_ENABLE_TIMEOUT; + } else if (type == WL_PM_WORKQ_LONG) { + wq_duration = (WL_PM_ENABLE_TIMEOUT*2); + } + if (wq_duration) { + DHD_OS_WAKE_LOCK(cfg->pub); + schedule_delayed_work(&cfg->pm_enable_work, + msecs_to_jiffies((const unsigned int)wq_duration)); + } + mutex_unlock(&cfg->pm_sync); +} + +/* Return a new chanspec given a legacy chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_from_legacy(chanspec_t legacy_chspec) +{ + chanspec_t chspec; + + /* get the channel number */ + chspec = LCHSPEC_CHANNEL(legacy_chspec); + + /* convert the band */ + if (LCHSPEC_IS2G(legacy_chspec)) { + chspec |= WL_CHANSPEC_BAND_2G; + } else { + chspec |= WL_CHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (LCHSPEC_IS20(legacy_chspec)) { + chspec |= WL_CHANSPEC_BW_20; + } else { + chspec |= WL_CHANSPEC_BW_40; + if (LCHSPEC_CTL_SB(legacy_chspec) == WL_LCHANSPEC_CTL_SB_LOWER) { + chspec |= WL_CHANSPEC_CTL_SB_L; + } else { + chspec |= WL_CHANSPEC_CTL_SB_U; + } + } + + if (wf_chspec_malformed(chspec)) { + WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + return chspec; +} + +/* Return a legacy chanspec given a new chanspec + * Returns INVCHANSPEC on error + */ +static chanspec_t +wl_chspec_to_legacy(chanspec_t chspec) +{ + chanspec_t lchspec; + + if (wf_chspec_malformed(chspec)) { + WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n", + chspec)); + return INVCHANSPEC; + } + + /* get the channel number */ + lchspec = CHSPEC_CHANNEL(chspec); + + /* convert the band */ + if (CHSPEC_IS2G(chspec)) { + lchspec |= WL_LCHANSPEC_BAND_2G; + } else { + lchspec |= WL_LCHANSPEC_BAND_5G; + } + + /* convert the bw and sideband */ + if (CHSPEC_IS20(chspec)) { + lchspec |= WL_LCHANSPEC_BW_20; + lchspec |= WL_LCHANSPEC_CTL_SB_NONE; + } else if (CHSPEC_IS40(chspec)) { + lchspec |= WL_LCHANSPEC_BW_40; + if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_L) { + lchspec |= WL_LCHANSPEC_CTL_SB_LOWER; + } else { + lchspec |= WL_LCHANSPEC_CTL_SB_UPPER; + } + } else { + /* cannot express the bandwidth */ + char chanbuf[CHANSPEC_STR_LEN]; + WL_ERR(( + "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) " + "to pre-11ac format\n", + wf_chspec_ntoa(chspec, chanbuf), chspec)); + return INVCHANSPEC; + } + + return lchspec; +} + +/* given a chanspec value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +chanspec_t +wl_chspec_host_to_driver(chanspec_t chanspec) +{ + if (ioctl_version == 1) { + chanspec = wl_chspec_to_legacy(chanspec); + if (chanspec == INVCHANSPEC) { + return chanspec; + } + } + chanspec = htodchanspec(chanspec); + + return chanspec; +} + +/* given a channel value, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +chanspec_t +wl_ch_host_to_driver(s32 bssidx, u16 channel) +{ + chanspec_t chanspec; + + chanspec = channel & WL_CHANSPEC_CHAN_MASK; + + if (channel <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + chanspec |= wl_cfg80211_ulb_get_min_bw_chspec(NULL, bssidx); + + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + return wl_chspec_host_to_driver(chanspec); +} + +/* given a chanspec value from the driver, do the endian and chanspec version conversion to + * a chanspec_t value + * Returns INVCHANSPEC on error + */ +chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec) +{ + chanspec = dtohchanspec(chanspec); + if (ioctl_version == 1) { + chanspec = wl_chspec_from_legacy(chanspec); + } + + return chanspec; +} + +/* + * convert ASCII string to MAC address (colon-delimited format) + * eg: 00:11:22:33:44:55 + */ +int +wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n) +{ + char *c = NULL; + int count = 0; + + memset(n, 0, ETHER_ADDR_LEN); + for (;;) { + n->octet[count++] = (uint8)simple_strtoul(a, &c, 16); + if (!*c++ || count == ETHER_ADDR_LEN) + break; + a = c; + } + return (count == ETHER_ADDR_LEN); +} + +/* There isn't a lot of sense in it, but you can transmit anything you like */ +static const struct ieee80211_txrx_stypes +wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_ADHOC] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_AP_VLAN] = { + /* copy AP */ + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, +#if defined(WL_CFG80211_P2P_DEV_IF) + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, +#endif /* WL_CFG80211_P2P_DEV_IF */ +}; + +static void swap_key_from_BE(struct wl_wsec_key *key) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void swap_key_to_BE(struct wl_wsec_key *key) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +/* Dump the contents of the encoded wps ie buffer and get pbc value */ +static void +wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc) +{ + #define WPS_IE_FIXED_LEN 6 + u16 len; + u8 *subel = NULL; + u16 subelt_id; + u16 subelt_len; + u16 val; + u8 *valptr = (uint8*) &val; + if (wps_ie == NULL || wps_ie_len < WPS_IE_FIXED_LEN) { + WL_ERR(("invalid argument : NULL\n")); + return; + } + len = (u16)wps_ie[TLV_LEN_OFF]; + + if (len > wps_ie_len) { + WL_ERR(("invalid length len %d, wps ie len %d\n", len, wps_ie_len)); + return; + } + WL_DBG(("wps_ie len=%d\n", len)); + len -= 4; /* for the WPS IE's OUI, oui_type fields */ + subel = wps_ie + WPS_IE_FIXED_LEN; + while (len >= 4) { /* must have attr id, attr len fields */ + valptr[0] = *subel++; + valptr[1] = *subel++; + subelt_id = HTON16(val); + + valptr[0] = *subel++; + valptr[1] = *subel++; + subelt_len = HTON16(val); + + len -= 4; /* for the attr id, attr len fields */ + len -= subelt_len; /* for the remaining fields in this attribute */ + WL_DBG((" subel=%p, subelt_id=0x%x subelt_len=%u\n", + subel, subelt_id, subelt_len)); + + if (subelt_id == WPS_ID_VERSION) { + WL_DBG((" attr WPS_ID_VERSION: %u\n", *subel)); + } else if (subelt_id == WPS_ID_REQ_TYPE) { + WL_DBG((" attr WPS_ID_REQ_TYPE: %u\n", *subel)); + } else if (subelt_id == WPS_ID_CONFIG_METHODS) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val))); + } else if (subelt_id == WPS_ID_DEVICE_NAME) { + char devname[100]; + size_t namelen = MIN(subelt_len, sizeof(devname)); + if (namelen) { + memcpy(devname, subel, namelen); + devname[namelen - 1] = '\0'; + WL_DBG((" attr WPS_ID_DEVICE_NAME: %s (len %u)\n", + devname, subelt_len)); + } + } else if (subelt_id == WPS_ID_DEVICE_PWD_ID) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_DEVICE_PWD_ID: %u\n", HTON16(val))); + *pbc = (HTON16(val) == DEV_PW_PUSHBUTTON) ? true : false; + } else if (subelt_id == WPS_ID_PRIM_DEV_TYPE) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: cat=%u \n", HTON16(val))); + valptr[0] = *(subel + 6); + valptr[1] = *(subel + 7); + WL_DBG((" attr WPS_ID_PRIM_DEV_TYPE: subcat=%u\n", HTON16(val))); + } else if (subelt_id == WPS_ID_REQ_DEV_TYPE) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: cat=%u\n", HTON16(val))); + valptr[0] = *(subel + 6); + valptr[1] = *(subel + 7); + WL_DBG((" attr WPS_ID_REQ_DEV_TYPE: subcat=%u\n", HTON16(val))); + } else if (subelt_id == WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS) { + valptr[0] = *subel; + valptr[1] = *(subel + 1); + WL_DBG((" attr WPS_ID_SELECTED_REGISTRAR_CONFIG_METHODS" + ": cat=%u\n", HTON16(val))); + } else { + WL_DBG((" unknown attr 0x%x\n", subelt_id)); + } + + subel += subelt_len; + } +} + +s32 wl_set_tx_power(struct net_device *dev, + enum nl80211_tx_power_setting type, s32 dbm) +{ + s32 err = 0; + s32 disable = 0; + s32 txpwrqdbm; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + /* Make sure radio is off or on as far as software is concerned */ + disable = WL_RADIO_SW_DISABLE << 16; + disable = htod32(disable); + err = wldev_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable), true); + if (unlikely(err)) { + WL_ERR(("WLC_SET_RADIO error (%d)\n", err)); + return err; + } + + if (dbm > 0xffff) + dbm = 0xffff; + txpwrqdbm = dbm * 4; + err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm, + sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (unlikely(err)) + WL_ERR(("qtxpower error (%d)\n", err)); + else + WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm)); + + return err; +} + +s32 wl_get_tx_power(struct net_device *dev, s32 *dbm) +{ + s32 err = 0; + s32 txpwrdbm; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm)); + txpwrdbm = dtoh32(txpwrdbm); + *dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4; + + WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm)); + + return err; +} + +static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy) +{ + chanspec_t chspec; + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + struct ether_addr bssid; + struct wl_bss_info *bss = NULL; + s32 bssidx = 0; /* Explicitly set to primary bssidx */ + + if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) { + /* STA interface is not associated. So start the new interface on a temp + * channel . Later proper channel will be applied by the above framework + * via set_channel (cfg80211 API). + */ + WL_DBG(("Not associated. Return a temp channel. \n")); + return wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN); + } + + + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, + WL_EXTRA_BUF_MAX, false))) { + WL_ERR(("Failed to get associated bss info, use temp channel \n")); + chspec = wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN); + } + else { + bss = (struct wl_bss_info *) (cfg->extra_buf + 4); + chspec = bss->chanspec; + + WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec)); + } + return chspec; +} + +static bcm_struct_cfgdev * +wl_cfg80211_add_monitor_if(char *name) +{ +#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF) + WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n")); + return ERR_PTR(-EOPNOTSUPP); +#else + struct net_device* ndev = NULL; + + dhd_add_monitor(name, &ndev); + WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev)); + return ndev_to_cfgdev(ndev); +#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */ +} + +static bcm_struct_cfgdev * +wl_cfg80211_add_virtual_iface(struct wiphy *wiphy, +#if defined(WL_CFG80211_P2P_DEV_IF) + const char *name, +#else + char *name, +#endif /* WL_CFG80211_P2P_DEV_IF */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) + unsigned char name_assign_type, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + enum nl80211_iftype type, +#else + enum nl80211_iftype type, u32 *flags, +#endif + struct vif_params *params) +{ + s32 err = -ENODEV; + s32 timeout = -1; + s32 wlif_type = -1; + s32 mode = 0; + s32 val = 0; + s32 cfg_type; + s32 dhd_mode = 0; + chanspec_t chspec; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev; + struct net_device *new_ndev; + struct ether_addr primary_mac; +#ifdef WL_VIRTUAL_APSTA + bcm_struct_cfgdev *new_cfgdev; +#endif /* WL_VIRTUAL_APSTA */ +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + s32 up = 1; + dhd_pub_t *dhd; + bool enabled; +#endif +#endif /* PROP_TXSTATUS_VSDB */ +#if defined(SUPPORT_AP_POWERSAVE) + dhd_pub_t *dhd; +#endif /* SUPPORT_AP_POWERSAVE */ + bool hang_required = false; + + if (!cfg) + return ERR_PTR(-EINVAL); + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd = (dhd_pub_t *)(cfg->pub); +#endif +#endif /* PROP_TXSTATUS_VSDB */ +#if defined(SUPPORT_AP_POWERSAVE) + dhd = (dhd_pub_t *)(cfg->pub); +#endif /* SUPPORT_AP_POWERSAVE */ + + /* Use primary I/F for sending cmds down to firmware */ + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) { + WL_ERR(("device is not ready\n")); + return ERR_PTR(-ENODEV); + } + + WL_DBG(("if name: %s, type: %d\n", name, type)); + switch (type) { + case NL80211_IFTYPE_ADHOC: +#ifdef WLAIBSS_MCHAN + return bcm_cfg80211_add_ibss_if(wiphy, (char *)name); +#endif /* WLAIBSS_MCHAN */ + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + WL_ERR(("Unsupported interface type\n")); + mode = WL_MODE_IBSS; + return NULL; + case NL80211_IFTYPE_MONITOR: + return wl_cfg80211_add_monitor_if((char *)name); +#if defined(WL_CFG80211_P2P_DEV_IF) + case NL80211_IFTYPE_P2P_DEVICE: + cfg->down_disc_if = FALSE; + return wl_cfgp2p_add_p2p_disc_if(cfg); +#endif /* WL_CFG80211_P2P_DEV_IF */ + case NL80211_IFTYPE_STATION: +#ifdef WL_VIRTUAL_APSTA +#ifdef WLAIBSS_MCHAN + if (cfg->ibss_cfgdev) { + WL_ERR(("AIBSS is already operational. " + " AIBSS & DUALSTA can't be used together \n")); + return ERR_PTR(-ENOMEM); + } +#endif /* WLAIBSS_MCHAN */ + if (!name) { + WL_ERR(("Interface name not provided \n")); + return ERR_PTR(-ENODEV); + } + + if (wl_cfgp2p_vif_created(cfg)) { + WL_ERR(("Could not create new iface." + "Already one p2p interface is running")); + return ERR_PTR(-ENODEV); + } + new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy, + NL80211_IFTYPE_STATION, NULL, name); + if (!new_cfgdev) + return ERR_PTR(-ENOMEM); + else + return new_cfgdev; +#endif /* WL_VIRTUAL_APSTA */ + case NL80211_IFTYPE_P2P_CLIENT: + wlif_type = WL_P2P_IF_CLIENT; + mode = WL_MODE_BSS; + break; + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + wlif_type = WL_P2P_IF_GO; + mode = WL_MODE_AP; + break; + default: + WL_ERR(("Unsupported interface type\n")); + return ERR_PTR(-ENODEV); + break; + } + + if (!name) { + WL_ERR(("name is NULL\n")); + return ERR_PTR(-ENODEV); + } + if (cfg->p2p_supported && (wlif_type != -1)) { + ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */ + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (!dhd) + return ERR_PTR(-ENODEV); +#endif +#endif /* PROP_TXSTATUS_VSDB */ + if (!cfg->p2p) + return ERR_PTR(-ENODEV); + + if (cfg->cfgdev_bssidx != -1) { + WL_ERR(("Failed to start p2p, Maximum no of interface reached")); + return ERR_PTR(-ENODEV); + } + + if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) { + p2p_on(cfg) = true; + wl_cfgp2p_set_firm_p2p(cfg); + wl_cfgp2p_init_discovery(cfg); + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + } + + strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1); + cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0'; + + wl_cfg80211_scan_abort(cfg); +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (!cfg->wlfc_on && !disable_proptx) { + dhd_wlfc_get_enable(dhd, &enabled); + if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_init(dhd); + err = wldev_ioctl(primary_ndev, WLC_UP, &up, sizeof(s32), true); + if (err < 0) + WL_ERR(("WLC_UP return err:%d\n", err)); + } + cfg->wlfc_on = true; + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + + /* Dual p2p doesn't support multiple P2PGO interfaces, + * p2p_go_count is the counter for GO creation + * requests. + */ + if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) { + WL_ERR(("Fw doesnot support multiple Go")); + return ERR_PTR(-ENOMEM); + } + /* In concurrency case, STA may be already associated in a particular channel. + * so retrieve the current channel of primary interface and then start the virtual + * interface on that. + */ + chspec = wl_cfg80211_get_shared_freq(wiphy); + + /* For P2P mode, use P2P-specific driver features to create the + * bss: "cfg p2p_ifadd" + */ + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return ERR_PTR(-ENOMEM); + } + wl_set_p2p_status(cfg, IF_ADDING); + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + if (wlif_type == WL_P2P_IF_GO) + wldev_iovar_setint(primary_ndev, "mpc", 0); + cfg_type = wl_cfgp2p_get_conn_idx(cfg); + if (cfg_type == BCME_ERROR) { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR(("Failed to get connection idx for p2p interface")); + goto fail; + } + err = wl_cfgp2p_ifadd(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type), + htod32(wlif_type), chspec); + if (unlikely(err)) { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR((" virtual iface add failed (%d) \n", err)); + return ERR_PTR(-ENOMEM); + } + + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + ((wl_get_p2p_status(cfg, IF_ADDING) == false) && + (cfg->if_event_info.valid)), + msecs_to_jiffies(MAX_WAIT_TIME)); + + if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) { + struct wireless_dev *vwdev; + int pm_mode = PM_ENABLE; + wl_if_event_info *event = &cfg->if_event_info; + /* IF_ADD event has come back, we can proceed to to register + * the new interface now, use the interface name provided by caller (thus + * ignore the one from wlc) + */ + new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname, + event->mac, event->bssidx, event->name); + if (new_ndev == NULL) + goto fail; + + wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev; + wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx; + vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL); + if (unlikely(!vwdev)) { + WL_ERR(("Could not allocate wireless device\n")); + err = -ENOMEM; + goto fail; + } + vwdev->wiphy = cfg->wdev->wiphy; + WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname)); + if (type == NL80211_IFTYPE_P2P_GO) { + cfg->p2p->p2p_go_count++; + } + vwdev->iftype = type; +#ifdef DHD_IFDEBUG + WL_ERR(("new_ndev: %p\n", new_ndev)); +#endif + vwdev->netdev = new_ndev; + new_ndev->ieee80211_ptr = vwdev; + SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy)); + wl_set_drv_status(cfg, READY, new_ndev); + wl_set_mode_by_netdev(cfg, new_ndev, mode); + + if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) { + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + err = -ENODEV; + goto fail; + } + err = wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode, event->bssidx); + if (unlikely(err != 0)) { + WL_ERR(("Allocation of netinfo failed (%d) \n", err)); + goto fail; + } + val = 1; + /* Disable firmware roaming for P2P interface */ + wldev_iovar_setint(new_ndev, "roam_off", val); +#ifdef WL11ULB + if (cfg->p2p_wdev && is_p2p_group_iface(new_ndev->ieee80211_ptr)) { + u32 ulb_bw = wl_cfg80211_get_ulb_bw(cfg->p2p_wdev); + if (ulb_bw) { + /* Apply ULB BW settings on the newly spawned interface */ + WL_DBG(("[ULB] Applying ULB BW for the newly" + "created P2P interface \n")); + if (wl_cfg80211_set_ulb_bw(new_ndev, + ulb_bw, new_ndev->name) < 0) { + /* + * If ulb_bw set failed, fail the iface creation. + * wl_dealloc_netinfo_by_wdev will be called by the + * unregister notifier. + */ + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + err = -EINVAL; + goto fail; + } + } + } +#endif /* WL11ULB */ + + if (mode != WL_MODE_AP) + wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1); + + WL_ERR((" virtual interface(%s) is " + "created net attach done\n", cfg->p2p->vir_ifname)); + if (mode == WL_MODE_AP) + wl_set_drv_status(cfg, CONNECTED, new_ndev); +#ifdef SUPPORT_AP_POWERSAVE + if (mode == WL_MODE_AP) { + dhd_set_ap_powersave(dhd, 0, TRUE); + } +#endif /* SUPPORT_AP_POWERSAVE */ + if (type == NL80211_IFTYPE_P2P_CLIENT) + dhd_mode = DHD_FLAG_P2P_GC_MODE; + else if (type == NL80211_IFTYPE_P2P_GO) + dhd_mode = DHD_FLAG_P2P_GO_MODE; + DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode)); + /* reinitialize completion to clear previous count */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) + INIT_COMPLETION(cfg->iface_disable); +#else + init_completion(&cfg->iface_disable); +#endif + return ndev_to_cfgdev(new_ndev); + } else { + wl_clr_p2p_status(cfg, IF_ADDING); + WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname)); + + WL_ERR(("left timeout : %d\n", timeout)); + WL_ERR(("IF_ADDING status : %d\n", wl_get_p2p_status(cfg, IF_ADDING))); + WL_ERR(("event valid : %d\n", cfg->if_event_info.valid)); + + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + wl_set_p2p_status(cfg, IF_DELETING); + + err = wl_cfgp2p_ifdel(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type)); + if (err == BCME_OK) { + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + ((wl_get_p2p_status(cfg, IF_DELETING) == false) && + (cfg->if_event_info.valid)), + msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) && + cfg->if_event_info.valid) { + /* + * Should indicate upper layer this failure case of p2p + * interface creation + */ + WL_ERR(("IFDEL operation done\n")); + } else { + WL_ERR(("IFDEL didn't complete properly\n")); + hang_required = true; + } + } else { + hang_required = true; + } + + if (hang_required) { + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n", + err, ndev->name)); + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(ndev); + } + + memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); + wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1; +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + /* + * Returns -ENODEV to upperlayer to indicate that DHD + * failed to create p2p interface + */ + err = -ENODEV; + } + } +fail: + if (wlif_type == WL_P2P_IF_GO) + wldev_iovar_setint(primary_ndev, "mpc", 1); + return ERR_PTR(err); +} + +static s32 +wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) +{ + struct net_device *dev = NULL; + struct ether_addr p2p_mac; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 timeout = -1; + s32 ret = 0; + s32 index = -1; + s32 type = -1; +#ifdef CUSTOM_SET_CPUCORE + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* CUSTOM_SET_CPUCORE */ + WL_DBG(("Enter\n")); + +#ifdef CUSTOM_SET_CPUCORE + dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE; + if (!(dhd->chan_isvht80)) + dhd_set_cpucore(dhd, FALSE); +#endif /* CUSTOM_SET_CPUCORE */ +#ifdef WL_CFG80211_P2P_DEV_IF + if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { + if (dhd_download_fw_on_driverload) { + return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg); + } else { + cfg->down_disc_if = TRUE; + return 0; + } + } +#endif /* WL_CFG80211_P2P_DEV_IF */ + dev = cfgdev_to_wlc_ndev(cfgdev, cfg); + +#ifdef WLAIBSS_MCHAN + if (cfgdev == cfg->ibss_cfgdev) + return bcm_cfg80211_del_ibss_if(wiphy, cfgdev); +#endif /* WLAIBSS_MCHAN */ + +#ifdef WL_VIRTUAL_APSTA + if (cfgdev == cfg->bss_cfgdev) + return wl_cfg80211_del_iface(wiphy, cfgdev); +#endif /* WL_VIRTUAL_APSTA */ + if ((index = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) { + WL_ERR(("Find p2p index from wdev failed\n")); + return BCME_ERROR; + } + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return BCME_ERROR; + } + if (cfg->p2p_supported) { + if (wl_cfgp2p_find_type(cfg, index, &type) != BCME_OK) + return BCME_ERROR; + memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, ETHER_ADDR_LEN); + + /* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases + */ + WL_DBG(("P2P: GO_NEG_PHASE status cleared ")); + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + if (wl_cfgp2p_vif_created(cfg)) { + if (wl_get_drv_status(cfg, SCANNING, dev)) { + wl_notify_escan_complete(cfg, dev, true, true); + } + wldev_iovar_setint(dev, "mpc", 1); + /* Delete pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); + + /* for GC */ + if (wl_get_drv_status(cfg, DISCONNECTING, dev) && + (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) { + WL_ERR(("Wait for Link Down event for GC !\n")); + wait_for_completion_timeout + (&cfg->iface_disable, msecs_to_jiffies(500)); + } + + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + wl_set_p2p_status(cfg, IF_DELETING); + DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg)); + + /* for GO */ + if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false); + cfg->p2p->p2p_go_count--; + /* disable interface before bsscfg free */ + ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac); + /* if fw doesn't support "ifdis", + do not wait for link down of ap mode + */ + if (ret == 0) { + WL_ERR(("Wait for Link Down event for GO !!!\n")); + wait_for_completion_timeout(&cfg->iface_disable, + msecs_to_jiffies(500)); + } else if (ret != BCME_UNSUPPORTED) { + msleep(300); + } + } + wl_cfg80211_clear_per_bss_ies(cfg, index); + + if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) + wldev_iovar_setint(dev, "buf_key_b4_m4", 0); + memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, + ETHER_ADDR_LEN); + CFGP2P_INFO(("primary idx %d : cfg p2p_ifdis "MACDBG"\n", + dev->ifindex, MAC2STRDBG(p2p_mac.octet))); + + /* delete interface after link down */ + ret = wl_cfgp2p_ifdel(cfg, &p2p_mac); + if (ret != BCME_OK) { + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n", + ret, ndev->name)); + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(ndev); + } else { + /* Wait for IF_DEL operation to be finished */ + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + ((wl_get_p2p_status(cfg, IF_DELETING) == false) && + (cfg->if_event_info.valid)), + msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) && + cfg->if_event_info.valid) { + + WL_DBG(("IFDEL operation done\n")); + wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev); + } else { + WL_ERR(("IFDEL didn't complete properly\n")); + } + } + + ret = dhd_del_monitor(dev); + if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub)); + } + } + } + return ret; +} + +static s32 +wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + enum nl80211_iftype type, +#else + enum nl80211_iftype type, u32 *flags, +#endif + struct vif_params *params) +{ + s32 ap = 0; + s32 infra = 0; + s32 ibss = 0; + s32 wlif_type; + s32 mode = 0; + s32 err = BCME_OK; + s32 index; + s32 conn_idx = -1; + chanspec_t chspec; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("Enter type %d\n", type)); + switch (type) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + ap = 1; + WL_ERR(("type (%d) : currently we do not support this type\n", + type)); + break; + case NL80211_IFTYPE_ADHOC: + mode = WL_MODE_IBSS; + ibss = 1; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + mode = WL_MODE_BSS; + infra = 1; + break; + case NL80211_IFTYPE_AP: + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + /* intentional fall through */ + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_P2P_GO: + mode = WL_MODE_AP; + ap = 1; + break; + default: + return -EINVAL; + } + if (!dhd) + return -EINVAL; + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_cfg80211_scan_abort(cfg); + while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + } + + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return -EINVAL; + } + if (ap) { + wl_set_mode_by_netdev(cfg, ndev, mode); + if (is_p2p_group_iface(ndev->ieee80211_ptr) && + cfg->p2p && wl_cfgp2p_vif_created(cfg)) { + WL_DBG(("p2p_vif_created p2p_on (%d)\n", p2p_on(cfg))); + wldev_iovar_setint(ndev, "mpc", 0); + wl_notify_escan_complete(cfg, ndev, true, true); + + /* Dual p2p doesn't support multiple P2PGO interfaces, + * p2p_go_count is the counter for GO creation + * requests. + */ + if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) { + wl_set_mode_by_netdev(cfg, ndev, WL_MODE_BSS); + WL_ERR(("Fw doesnot support multiple GO ")); + return BCME_ERROR; + } + /* In concurrency case, STA may be already associated in a particular + * channel. so retrieve the current channel of primary interface and + * then start the virtual interface on that. + */ + chspec = wl_cfg80211_get_shared_freq(wiphy); + index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (index < 0) { + WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev)); + return BCME_ERROR; + } + if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) + return BCME_ERROR; + + wlif_type = WL_P2P_IF_GO; + WL_DBG(("%s : ap (%d), infra (%d), iftype (%d) conn_idx (%d)\n", + ndev->name, ap, infra, type, conn_idx)); + wl_set_p2p_status(cfg, IF_CHANGING); + wl_clr_p2p_status(cfg, IF_CHANGED); + wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx), + htod32(wlif_type), chspec, conn_idx); + wait_event_interruptible_timeout(cfg->netif_change_event, + (wl_get_p2p_status(cfg, IF_CHANGED) == true), + msecs_to_jiffies(MAX_WAIT_TIME)); + wl_set_mode_by_netdev(cfg, ndev, mode); + dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE; + dhd->op_mode |= DHD_FLAG_P2P_GO_MODE; + wl_clr_p2p_status(cfg, IF_CHANGING); + wl_clr_p2p_status(cfg, IF_CHANGED); + if (mode == WL_MODE_AP) + wl_set_drv_status(cfg, CONNECTED, ndev); +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, TRUE); +#endif /* SUPPORT_AP_POWERSAVE */ + } else if (((ndev == primary_ndev) || + (ndev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) && + !wl_get_drv_status(cfg, AP_CREATED, ndev)) { + wl_set_drv_status(cfg, AP_CREATING, ndev); + } else { + WL_ERR(("Cannot change the interface for GO or SOFTAP\n")); + return -EINVAL; + } + } else { + /* P2P GO interface deletion is handled on the basis of role type (AP). + * So avoid changing role for p2p type. + */ + if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) + wl_set_mode_by_netdev(cfg, ndev, mode); + WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA")); +#ifdef SUPPORT_AP_POWERSAVE + dhd_set_ap_powersave(dhd, 0, FALSE); +#endif /* SUPPORT_AP_POWERSAVE */ + } + + if (ibss) { + infra = 0; + wl_set_mode_by_netdev(cfg, ndev, mode); + err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET Adhoc error %d\n", err)); + return -EINVAL; + } + } + + ndev->ieee80211_ptr->iftype = type; + return 0; +} + +s32 +wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx) +{ + bool ifadd_expected = FALSE; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + /* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd") + * redirect the IF_ADD event to ifchange as it is not a real "new" interface + */ + if (wl_get_p2p_status(cfg, IF_CHANGING)) + return wl_cfg80211_notify_ifchange(ifidx, name, mac, bssidx); + + /* Okay, we are expecting IF_ADD (as IF_ADDING is true) */ + if (wl_get_p2p_status(cfg, IF_ADDING)) { + ifadd_expected = TRUE; + wl_clr_p2p_status(cfg, IF_ADDING); + } else if (cfg->bss_pending_op) { + ifadd_expected = TRUE; + cfg->bss_pending_op = FALSE; + } + + if (ifadd_expected) { + wl_if_event_info *if_event_info = &cfg->if_event_info; + + if_event_info->valid = TRUE; + if_event_info->ifidx = ifidx; + if_event_info->bssidx = bssidx; + strncpy(if_event_info->name, name, IFNAMSIZ); + if_event_info->name[IFNAMSIZ] = '\0'; + if (mac) + memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN); + wake_up_interruptible(&cfg->netif_change_event); + return BCME_OK; + } + + return BCME_ERROR; +} + +s32 +wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx) +{ + bool ifdel_expected = FALSE; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_if_event_info *if_event_info = &cfg->if_event_info; + + if (wl_get_p2p_status(cfg, IF_DELETING)) { + ifdel_expected = TRUE; + wl_clr_p2p_status(cfg, IF_DELETING); + } else if (cfg->bss_pending_op) { + ifdel_expected = TRUE; + cfg->bss_pending_op = FALSE; + } + + if (ifdel_expected) { + if_event_info->valid = TRUE; + if_event_info->ifidx = ifidx; + if_event_info->bssidx = bssidx; + wake_up_interruptible(&cfg->netif_change_event); + return BCME_OK; + } + + return BCME_ERROR; +} + +s32 +wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (wl_get_p2p_status(cfg, IF_CHANGING)) { + wl_set_p2p_status(cfg, IF_CHANGED); + wake_up_interruptible(&cfg->netif_change_event); + return BCME_OK; + } + + return BCME_ERROR; +} + +static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info, + struct net_device* ndev) +{ + s32 type = -1; + s32 bssidx = -1; +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + bool enabled; +#endif +#endif /* PROP_TXSTATUS_VSDB */ + + bssidx = if_event_info->bssidx; + if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) && + bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2)) { + WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx)); + return BCME_ERROR; + } + + if (p2p_is_on(cfg) && wl_cfgp2p_vif_created(cfg)) { + if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) { + /* Abort any pending scan requests */ + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + WL_DBG(("ESCAN COMPLETED\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false); + } + + memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ); + if (wl_cfgp2p_find_type(cfg, bssidx, &type) == BCME_OK) { + /* Update P2P data */ + wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type)); + wl_to_p2p_bss_ndev(cfg, type) = NULL; + wl_to_p2p_bss_bssidx(cfg, type) = -1; + } else if (wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr) < 0) { + WL_ERR(("bssidx not known for the given ndev as per net_info data \n")); + return BCME_ERROR; + } + +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + } + + dhd_net_if_lock(ndev); + wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev); + dhd_net_if_unlock(ndev); + + return BCME_OK; +} + +/* Find listen channel */ +static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg, + const u8 *ie, u32 ie_len) +{ + wifi_p2p_ie_t *p2p_ie; + u8 *end, *pos; + s32 listen_channel; + +/* unfortunately const cast required here - function is + * a callback so its signature must not be changed + * and cascade of changing wl_cfgp2p_find_p2pie + * causes need for const cast in other places + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + pos = (u8 *)ie; +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len); + + if (p2p_ie == NULL) + return 0; + + pos = p2p_ie->subelts; + end = p2p_ie->subelts + (p2p_ie->len - 4); + + CFGP2P_DBG((" found p2p ie ! lenth %d \n", + p2p_ie->len)); + + while (pos < end) { + uint16 attr_len; + if (pos + 2 >= end) { + CFGP2P_DBG((" -- Invalid P2P attribute")); + return 0; + } + attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0])); + + if (pos + 3 + attr_len > end) { + CFGP2P_DBG(("P2P: Attribute underflow " + "(len=%u left=%d)", + attr_len, (int) (end - pos - 3))); + return 0; + } + + /* if Listen Channel att id is 6 and the vailue is valid, + * return the listen channel + */ + if (pos[0] == 6) { + /* listen channel subel length format + * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num) + */ + listen_channel = pos[1 + 2 + 3 + 1]; + + if (listen_channel == SOCIAL_CHAN_1 || + listen_channel == SOCIAL_CHAN_2 || + listen_channel == SOCIAL_CHAN_3) { + CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel)); + return listen_channel; + } + } + pos += 3 + attr_len; + } + return 0; +} + +static void wl_scan_prep(struct wl_scan_params *params, struct cfg80211_scan_request *request) +{ + u32 n_ssids; + u32 n_channels; + u16 channel; + chanspec_t chanspec; + s32 i = 0, j = 0, offset; + char *ptr; + wlc_ssid_t ssid; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + memset(¶ms->ssid, 0, sizeof(wlc_ssid_t)); + + WL_SCAN(("Preparing Scan request\n")); + WL_SCAN(("nprobes=%d\n", params->nprobes)); + WL_SCAN(("active_time=%d\n", params->active_time)); + WL_SCAN(("passive_time=%d\n", params->passive_time)); + WL_SCAN(("home_time=%d\n", params->home_time)); + WL_SCAN(("scan_type=%d\n", params->scan_type)); + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + + /* if request is null just exit so it will be all channel broadcast scan */ + if (!request) + return; + + n_ssids = request->n_ssids; + n_channels = request->n_channels; + + /* Copy channel array if applicable */ + WL_SCAN(("### List of channelspecs to scan ###\n")); + if (n_channels > 0) { + for (i = 0; i < n_channels; i++) { + chanspec = 0; + channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq); + /* SKIP DFS channels for Secondary interface */ + if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) && + (request->channels[i]->flags & +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))) +#else + (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */ + continue; + +#if defined(WL_CFG80211_P2P_DEV_IF) + wdev = request->wdev; +#else + wdev = request->dev->ieee80211_ptr; +#endif /* WL_CFG80211_P2P_DEV_IF */ + chanspec = wl_cfg80211_ulb_get_min_bw_chspec(wdev, -1); + if (chanspec == INVCHANSPEC) { + WL_ERR(("Invalid chanspec! Skipping channel\n")); + continue; + } + + if (request->channels[i]->band == NL80211_BAND_2GHZ ) { + chanspec |= WL_CHANSPEC_BAND_2G; + } else { + chanspec |= WL_CHANSPEC_BAND_5G; + } + params->channel_list[j] = channel; + params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK; + params->channel_list[j] |= chanspec; + WL_SCAN(("Chan : %d, Channel spec: %x \n", + channel, params->channel_list[j])); + params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]); + j++; + } + } else { + WL_SCAN(("Scanning all channels\n")); + } + n_channels = j; + /* Copy ssid array if applicable */ + WL_SCAN(("### List of SSIDs to scan ###\n")); + if (n_ssids > 0) { + offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + ptr = (char*)params + offset; + for (i = 0; i < n_ssids; i++) { + memset(&ssid, 0, sizeof(wlc_ssid_t)); + ssid.SSID_len = request->ssids[i].ssid_len; + memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len); + if (!ssid.SSID_len) + WL_SCAN(("%d: Broadcast scan\n", i)); + else + WL_SCAN(("%d: scan for %s size =%d\n", i, + ssid.SSID, ssid.SSID_len)); + memcpy(ptr, &ssid, sizeof(wlc_ssid_t)); + ptr += sizeof(wlc_ssid_t); + } + } else { + WL_SCAN(("Broadcast scan\n")); + } + /* Adding mask to channel numbers */ + params->channel_num = + htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & WL_SCAN_PARAMS_COUNT_MASK)); + + if (n_channels == 1) { + params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS); + params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS); + } +} + +static s32 +wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size) +{ + wl_uint32_list_t *list; + s32 err = BCME_OK; + if (valid_chan_list == NULL || size <= 0) + return -ENOMEM; + + memset(valid_chan_list, 0, size); + list = (wl_uint32_list_t *)(void *) valid_chan_list; + list->count = htod32(WL_NUMCHANNELS); + err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false); + if (err != 0) { + WL_ERR(("get channels failed with %d\n", err)); + } + + return err; +} + +#if defined(USE_INITIAL_SHORT_DWELL_TIME) +#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40 +bool g_first_broadcast_scan = TRUE; +#endif + +static s32 +wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct cfg80211_scan_request *request, uint16 action) +{ + s32 err = BCME_OK; + u32 n_channels; + u32 n_ssids; + s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params)); + wl_escan_params_t *params = NULL; + u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)]; + u32 num_chans = 0; + s32 channel; + u32 n_valid_chan; + s32 search_state = WL_P2P_DISC_ST_SCAN; + u32 i, j, n_nodfs = 0; + u16 *default_chan_list = NULL; + wl_uint32_list_t *list; + s32 bssidx = -1; + struct net_device *dev = NULL; +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + bool is_first_init_2g_scan = false; +#endif + p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN; + + WL_DBG(("Enter \n")); + + /* scan request can come with empty request : perform all default scan */ + if (!cfg) { + err = -EINVAL; + goto exit; + } + if (!cfg->p2p_supported || !p2p_scan(cfg)) { + /* LEGACY SCAN TRIGGER */ + WL_ERR((" LEGACY E-SCAN START\n")); + +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + if (!request) { + err = -EINVAL; + goto exit; + } + if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) { + is_first_init_2g_scan = true; + g_first_broadcast_scan = false; + } +#endif + + /* if scan request is not empty parse scan request paramters */ + if (request != NULL) { + n_channels = request->n_channels; + n_ssids = request->n_ssids; + if (n_channels % 2) + /* If n_channels is odd, add a padd of u16 */ + params_size += sizeof(u16) * (n_channels + 1); + else + params_size += sizeof(u16) * n_channels; + + /* Allocate space for populating ssids in wl_escan_params_t struct */ + params_size += sizeof(struct wlc_ssid) * n_ssids; + } + params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + err = -ENOMEM; + goto exit; + } + wl_scan_prep(¶ms->params, request); + +#if defined(USE_INITIAL_SHORT_DWELL_TIME) + /* Override active_time to reduce scan time if it's first bradcast scan. */ + if (is_first_init_2g_scan) + params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS; +#endif + + params->version = htod32(ESCAN_REQ_VERSION); + params->action = htod16(action); + wl_escan_set_sync_id(params->sync_id, cfg); + wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY); + if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) { + WL_ERR(("ioctl buffer length not sufficient\n")); + kfree(params); + err = -ENOMEM; + goto exit; + } + if (cfg->active_scan == PASSIVE_SCAN) { + params->params.scan_type = DOT11_SCANTYPE_PASSIVE; + WL_DBG(("Passive scan_type %d \n", params->params.scan_type)); + } + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + + err = wldev_iovar_setbuf(ndev, "escan", params, params_size, + cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + WL_ERR(("LEGACY_SCAN sync ID: %d, bssidx: %d\n", params->sync_id, bssidx)); + if (unlikely(err)) { + if (err == BCME_EPERM) + /* Scan Not permitted at this point of time */ + WL_DBG((" Escan not permitted at this time (%d)\n", err)); + else + WL_ERR((" Escan set error (%d)\n", err)); + } + kfree(params); + } + else if (p2p_is_on(cfg) && p2p_scan(cfg)) { + /* P2P SCAN TRIGGER */ + s32 _freq = 0; + n_nodfs = 0; + if (request && request->n_channels) { + num_chans = request->n_channels; + WL_ERR((" chann number : %d\n", num_chans)); + default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list), + GFP_KERNEL); + if (default_chan_list == NULL) { + WL_ERR(("channel list allocation failed \n")); + err = -ENOMEM; + goto exit; + } + if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) { +#ifdef P2P_SKIP_DFS + int is_printed = false; +#endif /* P2P_SKIP_DFS */ + list = (wl_uint32_list_t *) chan_buf; + n_valid_chan = dtoh32(list->count); + for (i = 0; i < num_chans; i++) + { + _freq = request->channels[i]->center_freq; + channel = ieee80211_frequency_to_channel(_freq); + + /* ignore DFS channels */ + if (request->channels[i]->flags & +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + (IEEE80211_CHAN_NO_IR + | IEEE80211_CHAN_RADAR)) +#else + (IEEE80211_CHAN_RADAR + | IEEE80211_CHAN_PASSIVE_SCAN)) +#endif + continue; +#ifdef P2P_SKIP_DFS + if (channel >= 52 && channel <= 144) { + if (is_printed == false) { + WL_ERR(("SKIP DFS CHANs(52~144)\n")); + is_printed = true; + } + continue; + } +#endif /* P2P_SKIP_DFS */ + + for (j = 0; j < n_valid_chan; j++) { + /* allows only supported channel on + * current reguatory + */ + if (channel == (dtoh32(list->element[j]))) + default_chan_list[n_nodfs++] = + channel; + } + + } + } + if (num_chans == SOCIAL_CHAN_CNT && ( + (default_chan_list[0] == SOCIAL_CHAN_1) && + (default_chan_list[1] == SOCIAL_CHAN_2) && + (default_chan_list[2] == SOCIAL_CHAN_3))) { + /* SOCIAL CHANNELS 1, 6, 11 */ + search_state = WL_P2P_DISC_ST_SEARCH; + p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL; + WL_INFORM(("P2P SEARCH PHASE START \n")); + } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) && + (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) || + ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) && + (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) { + /* If you are already a GO, then do SEARCH only */ + WL_INFORM(("Already a GO. Do SEARCH Only")); + search_state = WL_P2P_DISC_ST_SEARCH; + num_chans = n_nodfs; + p2p_scan_purpose = P2P_SCAN_NORMAL; + + } else if (num_chans == 1) { + p2p_scan_purpose = P2P_SCAN_CONNECT_TRY; + } else if (num_chans == SOCIAL_CHAN_CNT + 1) { + /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by + * the supplicant + */ + p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL; + } else { + WL_INFORM(("P2P SCAN STATE START \n")); + num_chans = n_nodfs; + p2p_scan_purpose = P2P_SCAN_NORMAL; + } + } else { + err = -EINVAL; + goto exit; + } + err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list, + search_state, action, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL, + p2p_scan_purpose); + + if (!err) + cfg->p2p->search_state = search_state; + + kfree(default_chan_list); + } +exit: + if (unlikely(err)) { + /* Don't print Error incase of Scan suppress */ + if ((err == BCME_EPERM) && cfg->scan_suppressed) + WL_DBG(("Escan failed: Scan Suppressed \n")); + else + WL_ERR(("error (%d)\n", err)); + } + return err; +} + + +static s32 +wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +{ + s32 err = BCME_OK; + s32 passive_scan; + s32 passive_scan_time; + s32 passive_scan_time_org; + wl_scan_results_t *results; + WL_SCAN(("Enter \n")); + + results = wl_escan_get_buf(cfg, FALSE); + results->version = 0; + results->count = 0; + results->buflen = WL_SCAN_RESULTS_FIXED_SIZE; + + cfg->escan_info.ndev = ndev; + cfg->escan_info.wiphy = wiphy; + cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING; + passive_scan = cfg->active_scan ? 0 : 1; + err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN, + &passive_scan, sizeof(passive_scan), true); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + goto exit; + } + + if (passive_channel_skip) { + + err = wldev_ioctl(ndev, WLC_GET_SCAN_PASSIVE_TIME, + &passive_scan_time_org, sizeof(passive_scan_time_org), false); + if (unlikely(err)) { + WL_ERR(("== error (%d)\n", err)); + goto exit; + } + + WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org)); + + passive_scan_time = 0; + err = wldev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME, + &passive_scan_time, sizeof(passive_scan_time), true); + if (unlikely(err)) { + WL_ERR(("== error (%d)\n", err)); + goto exit; + } + + WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n", + passive_channel_skip)); + } + + err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START); + + if (passive_channel_skip) { + err = wldev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME, + &passive_scan_time_org, sizeof(passive_scan_time_org), true); + if (unlikely(err)) { + WL_ERR(("== error (%d)\n", err)); + goto exit; + } + + WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n", + passive_scan_time_org)); + } + +exit: + return err; +} + +static s32 +__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request, + struct cfg80211_ssid *this_ssid) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct cfg80211_ssid *ssids; + struct ether_addr primary_mac; + bool p2p_ssid; +#ifdef WL11U + bcm_tlv_t *interworking_ie; +#endif + s32 err = 0; + s32 bssidx = -1; + s32 i; + + unsigned long flags; + static s32 busy_count = 0; +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + struct net_device *remain_on_channel_ndev = NULL; +#endif + + /* + * Hostapd triggers scan before starting automatic channel selection + * to collect channel characteristics. However firmware scan engine + * doesn't support any channel characteristics collection along with + * scan. Hence return scan success. + */ + if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) { + WL_INFORM(("Scan Command on SoftAP Interface. Ignoring...\n")); + return 0; + } + + ndev = ndev_to_wlc_ndev(ndev, cfg); + + if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) { + WL_ERR(("Sending Action Frames. Try it again.\n")); + return -EAGAIN; + } + + WL_DBG(("Enter wiphy (%p)\n", wiphy)); + if (wl_get_drv_status_all(cfg, SCANNING)) { + if (cfg->scan_request == NULL) { + wl_clr_drv_status_all(cfg, SCANNING); + WL_DBG(("<<<<<<<<<<>>>>>>>>>>\n")); + } else { + WL_ERR(("Scanning already\n")); + return -EAGAIN; + } + } + if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) { + WL_ERR(("Scanning being aborted\n")); + return -EAGAIN; + } + if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) { + WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n")); + return -EOPNOTSUPP; + } + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("P2P_FIND: Discovery offload is in progress\n")); + return -EAGAIN; + } +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg); + if (remain_on_channel_ndev) { + WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n")); + wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true); + } +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + + + /* Arm scan timeout timer */ + mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS)); + if (request) { /* scan bss */ + ssids = request->ssids; + p2p_ssid = false; + for (i = 0; i < request->n_ssids; i++) { + if (ssids[i].ssid_len && + IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) { + p2p_ssid = true; + break; + } + } + if (p2p_ssid) { + if (cfg->p2p_supported) { + /* p2p scan trigger */ + if (p2p_on(cfg) == false) { + /* p2p on at the first time */ + p2p_on(cfg) = true; + wl_cfgp2p_set_firm_p2p(cfg); + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti = false; +#endif + } + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + p2p_scan(cfg) = true; + } + } else { + /* legacy scan trigger + * So, we have to disable p2p discovery if p2p discovery is on + */ + if (cfg->p2p_supported) { + p2p_scan(cfg) = false; + /* If Netdevice is not equals to primary and p2p is on + * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE. + */ + + if (p2p_scan(cfg) == false) { + if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { + err = wl_cfgp2p_discover_enable_search(cfg, + false); + if (unlikely(err)) { + goto scan_out; + } + + } + } + } + if (!cfg->p2p_supported || !p2p_scan(cfg)) { + if ((bssidx = wl_get_bssidx_by_wdev(cfg, + ndev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from ndev(%p) failed\n", + ndev)); + err = BCME_ERROR; + goto scan_out; + } +#ifdef WL11U + if ((interworking_ie = wl_cfg80211_find_interworking_ie( + (u8 *)request->ie, request->ie_len)) != NULL) { + err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx, + VNDR_IE_CUSTOM_FLAG, interworking_ie->id, + interworking_ie->data, interworking_ie->len); + + if (unlikely(err)) { + WL_ERR(("Failed to add interworking IE")); + } + } else if (cfg->iw_ie_len != 0) { + /* we have to clear IW IE and disable gratuitous APR */ + wl_cfg80211_add_iw_ie(cfg, ndev, bssidx, + VNDR_IE_CUSTOM_FLAG, + DOT11_MNG_INTERWORKING_ID, + 0, 0); + + (void)wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0, + bssidx); + cfg->wl11u = FALSE; + cfg->iw_ie_len = 0; + memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN); + /* we don't care about error */ + } +#endif /* WL11U */ + err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(ndev), + bssidx, VNDR_IE_PRBREQ_FLAG, request->ie, + request->ie_len); + + if (unlikely(err)) { + goto scan_out; + } + + } + } + } else { /* scan in ibss */ + ssids = this_ssid; + } + + if (request && cfg->p2p_supported && !p2p_scan(cfg)) { + WL_TRACE_HW4(("START SCAN\n")); + DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub), + SCAN_WAKE_LOCK_TIMEOUT); + DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub)); + } + + if (cfg->p2p_supported) { + if (p2p_on(cfg) && p2p_scan(cfg)) { + + /* find my listen channel */ + cfg->afx_hdl->my_listen_chan = + wl_find_listen_channel(cfg, request->ie, + request->ie_len); + err = wl_cfgp2p_enable_discovery(cfg, ndev, + request->ie, request->ie_len); + + if (unlikely(err)) { + goto scan_out; + } + } + } + err = wl_do_escan(cfg, wiphy, ndev, request); + if (likely(!err)) + goto scan_success; + else + goto scan_out; + +scan_success: + busy_count = 0; + cfg->scan_request = request; + wl_set_drv_status(cfg, SCANNING, ndev); + + return 0; + +scan_out: + if (err == BCME_BUSY || err == BCME_NOTREADY) { + WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY)); + err = -EBUSY; + } else if ((err == BCME_EPERM) && cfg->scan_suppressed) { + WL_ERR(("Scan not permitted due to scan suppress\n")); + err = -EPERM; + } else { + /* For all other fw errors, use a generic error code as return + * value to cfg80211 stack + */ + err = -EAGAIN; + } + +#define SCAN_EBUSY_RETRY_LIMIT 20 + if (err == -EBUSY) { + if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) { + struct ether_addr bssid; + s32 ret = 0; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + busy_count = 0; + WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n", + wl_get_drv_status(cfg, SCANNING, ndev), + wl_get_drv_status(cfg, SCAN_ABORTING, ndev), + wl_get_drv_status(cfg, CONNECTING, ndev), + wl_get_drv_status(cfg, CONNECTED, ndev), + wl_get_drv_status(cfg, DISCONNECTING, ndev), + wl_get_drv_status(cfg, AP_CREATING, ndev), + wl_get_drv_status(cfg, AP_CREATED, ndev), + wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev), + wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev))); + +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + + bzero(&bssid, sizeof(bssid)); + if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID, + &bssid, ETHER_ADDR_LEN, false)) == 0) + WL_ERR(("FW is connected with " MACDBG "/n", + MAC2STRDBG(bssid.octet))); + else + WL_ERR(("GET BSSID failed with %d\n", ret)); + + wl_cfg80211_scan_abort(cfg); + + } else { + /* Hold the context for 400msec, so that 10 subsequent scans + * can give a buffer of 4sec which is enough to + * cover any on-going scan in the firmware + */ + WL_DBG(("Enforcing delay for EBUSY case \n")); + msleep(500); + } + } else { + busy_count = 0; + } + + wl_clr_drv_status(cfg, SCANNING, ndev); + if (timer_pending(&cfg->scan_timeout)) + del_timer_sync(&cfg->scan_timeout); + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + cfg->scan_request = NULL; + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + return err; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) +#else +static s32 +wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + s32 err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); +#if defined(WL_CFG80211_P2P_DEV_IF) + struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + WL_DBG(("Enter\n")); + RETURN_EIO_IF_NOT_UP(cfg); + + if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + if (wl_cfg_multip2p_operational(cfg)) { + WL_ERR(("wlan0 scan failed, p2p devices are operational")); + return -ENODEV; + } + } + + mutex_lock(&cfg->usr_sync); + err = __wl_cfg80211_scan(wiphy, ndev, request, NULL); + if (unlikely(err)) { + WL_ERR(("scan error (%d)\n", err)); + } + mutex_unlock(&cfg->usr_sync); + + return err; +} + +static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold) +{ + s32 err = 0; + + err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + return err; +} + +static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold) +{ + s32 err = 0; + + err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + return err; +} + +static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l) +{ + s32 err = 0; + u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL); + + retry = htod32(retry); + err = wldev_ioctl(dev, cmd, &retry, sizeof(retry), true); + if (unlikely(err)) { + WL_ERR(("cmd (%d) , error (%d)\n", cmd, err)); + return err; + } + return err; +} + +static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; + + RETURN_EIO_IF_NOT_UP(cfg); + WL_DBG(("Enter\n")); + if (changed & WIPHY_PARAM_RTS_THRESHOLD && + (cfg->conf->rts_threshold != wiphy->rts_threshold)) { + cfg->conf->rts_threshold = wiphy->rts_threshold; + err = wl_set_rts(ndev, cfg->conf->rts_threshold); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_FRAG_THRESHOLD && + (cfg->conf->frag_threshold != wiphy->frag_threshold)) { + cfg->conf->frag_threshold = wiphy->frag_threshold; + err = wl_set_frag(ndev, cfg->conf->frag_threshold); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_RETRY_LONG && + (cfg->conf->retry_long != wiphy->retry_long)) { + cfg->conf->retry_long = wiphy->retry_long; + err = wl_set_retry(ndev, cfg->conf->retry_long, true); + if (!err) + return err; + } + if (changed & WIPHY_PARAM_RETRY_SHORT && + (cfg->conf->retry_short != wiphy->retry_short)) { + cfg->conf->retry_short = wiphy->retry_short; + err = wl_set_retry(ndev, cfg->conf->retry_short, false); + if (!err) { + return err; + } + } + + return err; +} +static chanspec_t +channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + u8 *buf = NULL; + wl_uint32_list_t *list; + int err = BCME_OK; + chanspec_t c = 0, ret_c = 0; + int bw = 0, tmp_bw = 0; + int i; + u32 tmp_c; + u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; +#define LOCAL_BUF_SIZE 1024 + buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags); + if (!buf) { + WL_ERR(("buf memory alloc failed\n")); + goto exit; + } + list = (wl_uint32_list_t *)(void *)buf; + list->count = htod32(WL_NUMCHANSPECS); + err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL, + 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync); + if (err != BCME_OK) { + WL_ERR(("get chanspecs failed with %d\n", err)); + goto exit; + } + for (i = 0; i < dtoh32(list->count); i++) { + c = dtoh32(list->element[i]); + if (channel <= CH_MAX_2G_CHANNEL) { + if (!CHSPEC_IS20(c)) + continue; + if (channel == CHSPEC_CHANNEL(c)) { + ret_c = c; + bw = 20; + goto exit; + } + } + tmp_c = wf_chspec_ctlchan(c); + tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT]; + if (tmp_c != channel) + continue; + + if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) { + bw = tmp_bw; + ret_c = c; + if (bw == bw_cap) + goto exit; + } + } +exit: + if (buf) + kfree(buf); +#undef LOCAL_BUF_SIZE + WL_INFORM(("return chanspec %x %d\n", ret_c, bw)); + return ret_c; +} + +void +wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (cfg != NULL && ibss_vsie != NULL) { + if (cfg->ibss_vsie != NULL) { + kfree(cfg->ibss_vsie); + } + cfg->ibss_vsie = ibss_vsie; + cfg->ibss_vsie_len = ibss_vsie_len; + } +} + +static void +wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg) +{ + /* free & initiralize VSIE (Vendor Specific IE) */ + if (cfg->ibss_vsie != NULL) { + kfree(cfg->ibss_vsie); + cfg->ibss_vsie = NULL; + cfg->ibss_vsie_len = 0; + } +} + +s32 +wl_cfg80211_ibss_vsie_delete(struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + char *ioctl_buf = NULL; + s32 ret = BCME_OK; + + if (cfg != NULL && cfg->ibss_vsie != NULL) { + ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (!ioctl_buf) { + WL_ERR(("ioctl memory alloc failed\n")); + return -ENOMEM; + } + + /* change the command from "add" to "del" */ + strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1); + cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + ret = wldev_iovar_setbuf(dev, "ie", + cfg->ibss_vsie, cfg->ibss_vsie_len, + ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + WL_ERR(("ret=%d\n", ret)); + + if (ret == BCME_OK) { + /* free & initiralize VSIE */ + kfree(cfg->ibss_vsie); + cfg->ibss_vsie = NULL; + cfg->ibss_vsie_len = 0; + } + + if (ioctl_buf) { + kfree(ioctl_buf); + } + } + + return ret; +} + +#ifdef WLAIBSS_MCHAN +static bcm_struct_cfgdev* +bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wireless_dev* wdev = NULL; + struct net_device *new_ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 timeout; + wl_aibss_if_t aibss_if; + wl_if_event_info *event = NULL; + + if (cfg->ibss_cfgdev != NULL) { + WL_ERR(("IBSS interface %s already exists\n", name)); + return NULL; + } + + WL_ERR(("Try to create IBSS interface %s\n", name)); + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + /* generate a new MAC address for the IBSS interface */ + get_primary_mac(cfg, &cfg->ibss_if_addr); + cfg->ibss_if_addr.octet[4] ^= 0x40; + memset(&aibss_if, sizeof(aibss_if), 0); + memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr)); + aibss_if.chspec = 0; + aibss_if.len = sizeof(aibss_if); + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if, + sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + if (err) { + WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err)); + goto fail; + } + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) + goto fail; + + event = &cfg->if_event_info; + /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control + * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux + * and will be freed by dhd_detach unless it gets unregistered before that. The + * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will + * be freed by wl_dealloc_netinfo + */ + new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name, + event->mac, event->bssidx, event->name); + if (new_ndev == NULL) + goto fail; + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (wdev == NULL) + goto fail; + wdev->wiphy = wiphy; + wdev->iftype = NL80211_IFTYPE_ADHOC; + wdev->netdev = new_ndev; + new_ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy)); + + /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if + * needs to be modified to take one parameter (bool need_rtnl_lock) + */ + ASSERT_RTNL(); + if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) + goto fail; + + wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE, event->bssidx); + cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev); + WL_ERR(("IBSS interface %s created\n", new_ndev->name)); + return cfg->ibss_cfgdev; + +fail: + WL_ERR(("failed to create IBSS interface %s \n", name)); + cfg->bss_pending_op = FALSE; + if (new_ndev) + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + if (wdev) + kfree(wdev); + return NULL; +} + +static s32 +bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 timeout; + + if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet)) + return -EINVAL; + ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev); + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr, + sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL); + if (err) { + WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err)); + goto fail; + } + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) { + WL_ERR(("timeout in waiting IF_DEL event\n")); + goto fail; + } + + wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev); + cfg->ibss_cfgdev = NULL; + return 0; + +fail: + cfg->bss_pending_op = FALSE; + return -1; +} +#endif /* WLAIBSS_MCHAN */ + +s32 +wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr) +{ + wl_interface_create_t iface; + s32 ret; + wl_interface_info_t *info; + + bzero(&iface, sizeof(wl_interface_create_t)); + + iface.ver = WL_INTERFACE_CREATE_VER; + + if (iface_type == NL80211_IFTYPE_AP) + iface.flags = WL_INTERFACE_CREATE_AP; + else + iface.flags = WL_INTERFACE_CREATE_STA; + + if (del) { + ret = wldev_iovar_setbuf(ndev, "interface_remove", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL); + } else { + if (addr) { + memcpy(&iface.mac_addr.octet, addr, ETH_ALEN); + iface.flags |= WL_INTERFACE_MAC_USE; + } + ret = wldev_iovar_getbuf(ndev, "interface_create", + &iface, sizeof(wl_interface_create_t), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret == 0) { + /* success */ + info = (wl_interface_info_t *)cfg->ioctl_buf; + WL_DBG(("wl interface create success!! bssidx:%d \n", + info->bsscfgidx)); + ret = info->bsscfgidx; + } + } + + if (ret < 0) + WL_ERR(("Interface %s failed!! ret %d\n", + del ? "remove" : "create", ret)); + + return ret; +} + + +s32 +wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg, + struct net_device *ndev, s32 bsscfg_idx, + enum nl80211_iftype iface_type, s32 del, u8 *addr) +{ + s32 ret = BCME_OK; + s32 val = 0; + + struct { + s32 cfg; + s32 val; + struct ether_addr ea; + } bss_setbuf; + + WL_INFORM(("iface_type:%d del:%d \n", iface_type, del)); + + bzero(&bss_setbuf, sizeof(bss_setbuf)); + + /* AP=3, STA=2, up=1, down=0, val=-1 */ + if (del) { + val = -1; + } else if (iface_type == NL80211_IFTYPE_AP) { + /* AP Interface */ + WL_DBG(("Adding AP Interface \n")); + val = 3; + } else if (iface_type == NL80211_IFTYPE_STATION) { + WL_DBG(("Adding STA Interface \n")); + val = 2; + } else { + WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type)); + return -EINVAL; + } + + bss_setbuf.cfg = htod32(bsscfg_idx); + bss_setbuf.val = htod32(val); + + if (addr) { + memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN); + } + + ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret != 0) + WL_ERR(("'bss %d' failed with %d\n", val, ret)); + + return ret; +} + +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) +/* Create a Generic Network Interface and initialize it depending up on + * the interface type + */ +bcm_struct_cfgdev* +wl_cfg80211_create_iface(struct wiphy *wiphy, + enum nl80211_iftype iface_type, + u8 *mac_addr, const char *name) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *new_ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 ret = BCME_OK; + s32 bsscfg_idx = 0; + u32 timeout; + wl_if_event_info *event = NULL; + struct wireless_dev *wdev = NULL; + u8 addr[ETH_ALEN]; + + WL_DBG(("Enter\n")); + + if (!name) { + WL_ERR(("Interface name not provided\n")); + return NULL; + } + + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + +#ifdef DHD_IFDEBUG + WL_ERR(("cfg=%p, primary_ndev=%p, ifname=%s\n", cfg, primary_ndev, name)); +#endif + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_cfg80211_scan_abort(cfg); + while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (!wait_cnt && wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Failed to abort scan\n")); + return NULL; + } + } + + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + if (likely(!mac_addr)) { + /* Use primary MAC with the locally administered bit for the + * Secondary STA I/F + */ + memcpy(addr, primary_ndev->dev_addr, ETH_ALEN); + addr[0] |= 0x02; + } else { + /* Use the application provided mac address (if any) */ + memcpy(addr, mac_addr, ETH_ALEN); + } + + if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) { + WL_ERR(("IFACE type:%d not supported. STA " + "or AP IFACE is only supported\n", iface_type)); + return NULL; + } + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + + /* De-initialize the p2p discovery interface, if operational */ + if (p2p_is_on(cfg)) { + WL_DBG(("Disabling P2P Discovery Interface \n")); +#ifdef WL_CFG80211_P2P_DEV_IF + ret = wl_cfg80211_scan_stop(bcmcfg_to_p2p_wdev(cfg)); +#else + ret = wl_cfg80211_scan_stop(cfg->p2p_net); +#endif + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret)); + } + +#ifdef DHD_IFDEBUG + WL_ERR(("call wl_cfgp2p_disable_discovery()\n")); +#endif + wl_cfgp2p_disable_discovery(cfg); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; + p2p_on(cfg) = false; + } + + /* + * Intialize the firmware I/F. + */ + ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx, + NL80211_IFTYPE_STATION, 0, addr); + if (ret == BCME_UNSUPPORTED) { + /* Use bssidx 1 by default */ + bsscfg_idx = 1; + if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev, + bsscfg_idx, iface_type, 0, addr)) < 0) { + return NULL; + } + } else if (ret < 0) { + WL_ERR(("Interface create failed!! ret:%d \n", ret)); + goto fail; + } else { + /* Success */ + bsscfg_idx = ret; + } + + WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx)); + + /* + * Wait till the firmware send a confirmation event back. + */ + WL_DBG(("Wait for the FW I/F Event\n")); + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) { + WL_ERR(("ADD_IF event, didn't come. Return \n")); + goto fail; + } + + /* + * Since FW operation is successful,we can go ahead with the + * the host interface creation. + */ + event = &cfg->if_event_info; + new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, + (char*)name, addr, event->bssidx, event->name); + if (!new_ndev) { + WL_ERR(("I/F allocation failed! \n")); + goto fail; + } else + WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n", + event->ifidx, event->bssidx)); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (!wdev) { + WL_ERR(("wireless_dev alloc failed! \n")); + goto fail; + } + + wdev->wiphy = wiphy; + wdev->iftype = iface_type; + new_ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy)); + +#ifdef DHD_IFDEBUG + WL_ERR(("wdev=%p, new_ndev=%p\n", wdev, new_ndev)); +#endif + + /* RTNL lock must have been acquired. */ + ASSERT_RTNL(); + + /* Set the locally administed mac addr, if not applied already */ + if (memcmp(addr, event->mac, ETH_ALEN) != 0) { + ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", + addr, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + event->bssidx, &cfg->ioctl_buf_sync); + if (unlikely(ret)) { + WL_ERR(("set cur_etheraddr Error (%d)\n", ret)); + goto fail; + } + memcpy(new_ndev->dev_addr, addr, ETH_ALEN); + } + + if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) { + WL_ERR(("IFACE register failed \n")); + goto fail; + } + + /* Initialize with the station mode params */ + wl_alloc_netinfo(cfg, new_ndev, wdev, + (iface_type == NL80211_IFTYPE_STATION) ? + WL_MODE_BSS : WL_MODE_AP, PM_ENABLE, event->bssidx); + cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev); + cfg->cfgdev_bssidx = event->bssidx; + + WL_DBG(("Host Network Interface for Secondary I/F created")); + +#ifdef DHD_IFDEBUG + WL_ERR(("cfg->bss_cfgdev=%p\n", cfg->bss_cfgdev)); +#endif + + return cfg->bss_cfgdev; + +fail: + cfg->bss_pending_op = FALSE; + cfg->cfgdev_bssidx = -1; + if (wdev) + kfree(wdev); + if (new_ndev) + wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev); + +#ifdef DHD_IFDEBUG + WL_ERR(("failed!!!\n")); +#endif + + return NULL; +} + +s32 +wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = NULL; + struct net_device *primary_ndev = NULL; + s32 ret = BCME_OK; + s32 bsscfg_idx = 1; + u32 timeout; + u32 ifidx; + enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION; + + WL_ERR(("Enter\n")); + + if (!cfg->bss_cfgdev) + return 0; + + /* If any scan is going on, abort it */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Scan in progress. Aborting the scan!\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + + ndev = (struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev); + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + +#ifdef DHD_IFDEBUG + WL_ERR(("cfg->bss_cfgdev=%p, ndev=%p, primary_ndev=%p\n", + cfg->bss_cfgdev, ndev, primary_ndev)); +#endif + + cfg->bss_pending_op = TRUE; + memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info)); + + /* Delete the firmware interface. "interface_remove" command + * should go on the interface to be deleted + */ + ret = wl_cfg80211_interface_ops(cfg, ndev, cfg->cfgdev_bssidx, + NL80211_IFTYPE_STATION, 1, NULL); + if (ret == BCME_UNSUPPORTED) { + if ((ret = wl_cfg80211_add_del_bss(cfg, ndev, + bsscfg_idx, iface_type, true, NULL)) < 0) { + WL_ERR(("DEL bss failed ret:%d \n", ret)); + goto exit; + } + } else if (ret < 0) { + WL_ERR(("Interface DEL failed ret:%d \n", ret)); + goto exit; + } + + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME)); + if (timeout <= 0 || cfg->bss_pending_op) { + WL_ERR(("timeout in waiting IF_DEL event\n")); + } + +exit: + ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev); + wl_cfg80211_remove_if(cfg, ifidx, ndev); + cfg->bss_cfgdev = NULL; + cfg->cfgdev_bssidx = -1; + cfg->bss_pending_op = FALSE; + + WL_ERR(("IF_DEL Done.\n")); + + return ret; +} +#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */ + +static s32 +wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_ibss_params *params) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct cfg80211_bss *bss; + struct ieee80211_channel *chan; + struct wl_join_params join_params; + int scan_suppress; + struct cfg80211_ssid ssid; + s32 scan_retry = 0; + s32 err = 0; + size_t join_params_size; + chanspec_t chanspec = 0; + u32 param[2] = {0, 0}; + u32 bw_cap = 0; + + WL_TRACE(("In\n")); + RETURN_EIO_IF_NOT_UP(cfg); + WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid))); + if (!params->ssid || params->ssid_len <= 0) { + WL_ERR(("Invalid parameter\n")); + return -EINVAL; + } +#if defined(WL_CFG80211_P2P_DEV_IF) + chan = params->chandef.chan; +#else + chan = params->channel; +#endif /* WL_CFG80211_P2P_DEV_IF */ + if (chan) + cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); + if (wl_get_drv_status(cfg, CONNECTED, dev)) { + struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID); + u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID); + u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN); + if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) && + (memcmp(params->ssid, lssid->SSID, lssid->SSID_len) == 0) && + (*channel == cfg->channel))) { + WL_ERR(("Connection already existed to " MACDBG "\n", + MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID)))); + return -EISCONN; + } + WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n", + lssid->SSID, MAC2STRDBG(bssid))); + } + + /* remove the VSIE */ + wl_cfg80211_ibss_vsie_delete(dev); + + bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len); + if (!bss) { + if (IBSS_INITIAL_SCAN_ALLOWED == TRUE) { + memcpy(ssid.ssid, params->ssid, params->ssid_len); + ssid.ssid_len = params->ssid_len; + do { + if (unlikely + (__wl_cfg80211_scan(wiphy, dev, NULL, &ssid) == + -EBUSY)) { + wl_delay(150); + } else { + break; + } + } while (++scan_retry < WL_SCAN_RETRY_MAX); + + /* rtnl lock code is removed here. don't see why rtnl lock + * needs to be released. + */ + + /* wait 4 secons till scan done.... */ + schedule_timeout_interruptible(msecs_to_jiffies(4000)); + + bss = cfg80211_get_ibss(wiphy, NULL, + params->ssid, params->ssid_len); + } + } + if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) || + ((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid && + !memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) { + cfg->ibss_starter = false; + WL_DBG(("Found IBSS\n")); + } else { + cfg->ibss_starter = true; + } + if (chan) { + if (chan->band == NL80211_BAND_5GHZ ) + param[0] = WLC_BAND_5G; + else if (chan->band == NL80211_BAND_2GHZ ) + param[0] = WLC_BAND_2G; + err = wldev_iovar_getint(dev, "bw_cap", param); + if (unlikely(err)) { + WL_ERR(("Get bw_cap Failed (%d)\n", err)); + return err; + } + bw_cap = param[0]; + chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap); + } + /* + * Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + memset(&join_params, 0, sizeof(join_params)); + memcpy((void *)join_params.ssid.SSID, (void *)params->ssid, + params->ssid_len); + join_params.ssid.SSID_len = htod32(params->ssid_len); + if (params->bssid) { + memcpy(&join_params.params.bssid, params->bssid, ETHER_ADDR_LEN); + err = wldev_ioctl(dev, WLC_SET_DESIRED_BSSID, &join_params.params.bssid, + ETHER_ADDR_LEN, true); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + } else + memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN); + wldev_iovar_setint(dev, "ibss_coalesce_allowed", IBSS_COALESCE_ALLOWED); + + if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) { + scan_suppress = TRUE; + /* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */ + err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS, + &scan_suppress, sizeof(int), true); + if (unlikely(err)) { + WL_ERR(("Scan Suppress Setting Failed (%d)\n", err)); + return err; + } + } + + join_params.params.chanspec_list[0] = chanspec; + join_params.params.chanspec_num = 1; + wldev_iovar_setint(dev, "chanspec", chanspec); + join_params_size = sizeof(join_params); + + /* Disable Authentication, IBSS will add key if it required */ + wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED); + wldev_iovar_setint(dev, "wsec", 0); + + + err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, + join_params_size, true); + if (unlikely(err)) { + WL_ERR(("Error (%d)\n", err)); + return err; + } + + if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) { + scan_suppress = FALSE; + /* Reset the SCAN SUPPRESS Flag */ + err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS, + &scan_suppress, sizeof(int), true); + if (unlikely(err)) { + WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err)); + return err; + } + } + wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID); + wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN); + cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */ + return err; +} + +static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + scb_val_t scbval; + u8 *curbssid; + + RETURN_EIO_IF_NOT_UP(cfg); + wl_link_down(cfg); + + WL_ERR(("Leave IBSS\n")); + curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID); + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = 0; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error(%d)\n", err)); + return err; + } + + /* remove the VSIE */ + wl_cfg80211_ibss_vsie_delete(dev); + + return err; +} + + +static s32 +wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) + val = WPA_AUTH_PSK | + WPA_AUTH_UNSPECIFIED; + else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) + val = WPA2_AUTH_PSK| + WPA2_AUTH_UNSPECIFIED; + else + val = WPA_AUTH_DISABLED; + + if (is_wps_conn(sme)) + val = WPA_AUTH_DISABLED; + + WL_DBG(("setting wpa_auth to 0x%0x\n", val)); + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set wpa_auth failed (%d)\n", err)); + return err; + } + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->wpa_versions = sme->crypto.wpa_versions; + return err; +} + + +static s32 +wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + switch (sme->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + val = WL_AUTH_OPEN_SYSTEM; + WL_DBG(("open system\n")); + break; + case NL80211_AUTHTYPE_SHARED_KEY: + val = WL_AUTH_SHARED_KEY; + WL_DBG(("shared key\n")); + break; + case NL80211_AUTHTYPE_AUTOMATIC: + val = WL_AUTH_OPEN_SHARED; + WL_DBG(("automatic\n")); + break; + default: + val = 2; + WL_ERR(("invalid auth type (%d)\n", sme->auth_type)); + break; + } + + err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set auth failed (%d)\n", err)); + return err; + } + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->auth_type = sme->auth_type; + return err; +} + +static s32 +wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 pval = 0; + s32 gval = 0; + s32 err = 0; + s32 wsec_val = 0; + + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (sme->crypto.n_ciphers_pairwise) { + switch (sme->crypto.ciphers_pairwise[0]) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + pval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + pval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_AES_CMAC: + pval = AES_ENABLED; + break; + default: + WL_ERR(("invalid cipher pairwise (%d)\n", + sme->crypto.ciphers_pairwise[0])); + return -EINVAL; + } + } + if (sme->crypto.cipher_group) { + switch (sme->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + gval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + gval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + gval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + gval = AES_ENABLED; + break; + default: + WL_ERR(("invalid cipher group (%d)\n", + sme->crypto.cipher_group)); + return -EINVAL; + } + } + + WL_DBG(("pval (%d) gval (%d)\n", pval, gval)); + + if (is_wps_conn(sme)) { + if (sme->privacy) + err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx); + else + /* WPS-2.0 allows no security */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx); + } else { + WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC")); + wsec_val = pval | gval; + + WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val)); + err = wldev_iovar_setint_bsscfg(dev, "wsec", + wsec_val, bssidx); + } + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; + sec->cipher_group = sme->crypto.cipher_group; + + return err; +} + +static s32 +wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + s32 val = 0; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (sme->crypto.n_akm_suites) { + err = wldev_iovar_getint(dev, "wpa_auth", &val); + if (unlikely(err)) { + WL_ERR(("could not get wpa_auth (%d)\n", err)); + return err; + } + if (val & (WPA_AUTH_PSK | + WPA_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA_AUTH_PSK; + break; + default: + WL_ERR(("invalid akm suite (0x%x)\n", + sme->crypto.akm_suites[0])); + return -EINVAL; + } + } else if (val & (WPA2_AUTH_PSK | + WPA2_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA2_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA2_AUTH_PSK; + break; + default: + WL_ERR(("invalid akm suite (0x%x)\n", + sme->crypto.akm_suites[0])); + return -EINVAL; + } + } + + + WL_DBG(("setting wpa_auth to 0x%x\n", val)); + + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("could not set wpa_auth (%d)\n", err)); + return err; + } + } + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + sec->wpa_auth = sme->crypto.akm_suites[0]; + + return err; +} + +static s32 +wl_set_set_sharedkey(struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_security *sec; + struct wl_wsec_key key; + s32 val; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + WL_DBG(("key len (%d)\n", sme->key_len)); + if (sme->key_len) { + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n", + sec->wpa_versions, sec->cipher_pairwise)); + if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 | + NL80211_WPA_VERSION_2)) && + (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | + WLAN_CIPHER_SUITE_WEP104))) + { + memset(&key, 0, sizeof(key)); + key.len = (u32) sme->key_len; + key.index = (u32) sme->key_idx; + if (unlikely(key.len > sizeof(key.data))) { + WL_ERR(("Too long key length (%u)\n", key.len)); + return -EINVAL; + } + memcpy(key.data, sme->key, key.len); + key.flags = WL_PRIMARY_KEY; + switch (sec->cipher_pairwise) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + break; + default: + WL_ERR(("Invalid algorithm (%d)\n", + sme->crypto.ciphers_pairwise[0])); + return -EINVAL; + } + /* Set the new key/index */ + WL_DBG(("key length (%d) key index (%d) algo (%d)\n", + key.len, key.index, key.algo)); + WL_DBG(("key \"%s\"\n", key.data)); + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { + WL_DBG(("set auth_type to shared key\n")); + val = WL_AUTH_SHARED_KEY; /* shared key */ + err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx); + if (unlikely(err)) { + WL_ERR(("set auth failed (%d)\n", err)); + return err; + } + } + } + } + return err; +} + +#if defined(ESCAN_RESULT_PATCH) +static u8 connect_req_bssid[6]; +static u8 broad_bssid[6]; +#endif /* ESCAN_RESULT_PATCH */ + + + +#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX) +static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd) +{ + u32 chanspec = 0; + bool isvht80 = 0; + + if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK) + chanspec = wl_chspec_driver_to_host(chanspec); + + isvht80 = chanspec & WL_CHANSPEC_BW_80; + WL_INFO(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80)); + + return isvht80; +} +#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */ + +static s32 +wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct ieee80211_channel *chan = sme->channel; + wl_extjoin_params_t *ext_join_params; + struct wl_join_params join_params; + size_t join_params_size; + s32 err = 0; + wpa_ie_fixed_t *wpa_ie; + bcm_tlv_t *wpa2_ie; + u8* wpaie = 0; + u32 wpaie_len = 0; + u32 chan_cnt = 0; + struct ether_addr bssid; + s32 bssidx = -1; + int ret; + int wait_cnt; + + WL_DBG(("In\n")); + +#if defined(SUPPORT_RANDOM_MAC_SCAN) + wl_cfg80211_set_random_mac(dev, FALSE); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) + if (sme->channel_hint) { + chan = sme->channel_hint; + WL_DBG(("channel_hint (%d), channel_hint center_freq (%d)\n", + ieee80211_frequency_to_channel(sme->channel_hint->center_freq), + sme->channel_hint->center_freq)); + } + if (sme->bssid_hint) { + sme->bssid = sme->bssid_hint; + WL_DBG(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint))); + } +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + + if (unlikely(!sme->ssid)) { + WL_ERR(("Invalid ssid\n")); + return -EOPNOTSUPP; + } + + if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) { + WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n", + sme->ssid, sme->ssid_len)); + return -EINVAL; + } + + RETURN_EIO_IF_NOT_UP(cfg); + + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ +#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH)) + if (cfg->scan_request) { + WL_TRACE_HW4(("Aborting the scan! \n")); + wl_cfg80211_scan_abort(cfg); + wait_cnt = MAX_SCAN_ABORT_WAIT_CNT; + while (wl_get_drv_status(cfg, SCANNING, dev) && wait_cnt) { + WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME); + } + if (wl_get_drv_status(cfg, SCANNING, dev)) { + wl_notify_escan_complete(cfg, dev, true, true); + } + } +#endif +#ifdef WL_SCHED_SCAN + if (cfg->sched_scan_req) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg), 0); +#else + wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg)); +#endif + } +#endif +#if defined(ESCAN_RESULT_PATCH) + if (sme->bssid) + memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN); + else + bzero(connect_req_bssid, ETHER_ADDR_LEN); + bzero(broad_bssid, ETHER_ADDR_LEN); +#endif +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) + maxrxpktglom = 0; +#endif + bzero(&bssid, sizeof(bssid)); + if (!wl_get_drv_status(cfg, CONNECTED, dev)&& + (ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) { + if (!ETHER_ISNULLADDR(&bssid)) { + scb_val_t scbval; + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = DOT11_RC_DISASSOC_LEAVING; + memcpy(&scbval.ea, &bssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + + WL_DBG(("drv status CONNECTED is not set, but connected in FW!" MACDBG "/n", + MAC2STRDBG(bssid.octet))); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } + wait_cnt = 500/10; + while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) { + WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", + wait_cnt)); + wait_cnt--; + OSL_SLEEP(10); + } + } else + WL_DBG(("Currently not associated!\n")); + } else { + /* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */ + wait_cnt = 200/10; + while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) { + WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt)); + wait_cnt--; + OSL_SLEEP(10); + } + if (wl_get_drv_status(cfg, DISCONNECTING, dev)) { + WL_ERR(("Force clear DISCONNECTING status!\n")); + wl_clr_drv_status(cfg, DISCONNECTING, dev); + } + } + + /* Clean BSSID */ + bzero(&bssid, sizeof(bssid)); + if (!wl_get_drv_status(cfg, DISCONNECTING, dev)) + wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID); + + if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) { + /* we only allow to connect using virtual interface in case of P2P */ + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", + dev->ieee80211_ptr)); + return BCME_ERROR; + } + wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); + } else if (dev == bcmcfg_to_prmry_ndev(cfg)) { + /* find the RSN_IE */ + if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE is found\n")); + } + /* find the WPA_IE */ + if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie, + sme->ie_len)) != NULL) { + WL_DBG((" WPA IE is found\n")); + } + if (wpa_ie != NULL || wpa2_ie != NULL) { + wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie; + wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len; + wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN; + err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("wpaie set error (%d)\n", err)); + return err; + } + } else { + err = wldev_iovar_setbuf(dev, "wpaie", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("wpaie set error (%d)\n", err)); + return err; + } + } + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len); + if (unlikely(err)) { + return err; + } + } + if (chan) { + /* If RCC is not enabled, use the channel provided by userspace */ + cfg->channel = ieee80211_frequency_to_channel(chan->center_freq); + chan_cnt = 1; + WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel, + chan->center_freq, chan_cnt)); + } else { + /* + * No channel information from user space. if RCC is enabled, the RCC + * would prepare the channel list, else no channel would be provided + * and firmware would need to do a full channel scan. + */ + WL_DBG(("No channel info from user space\n")); + cfg->channel = 0; + } + WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len)); + WL_DBG(("3. set wpa version \n")); + err = wl_set_wpa_version(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid wpa_version\n")); + return err; + } + err = wl_set_auth_type(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid auth type\n")); + return err; + } + + err = wl_set_set_cipher(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid ciper\n")); + return err; + } + + err = wl_set_key_mgmt(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid key mgmt\n")); + return err; + } + + err = wl_set_set_sharedkey(dev, sme); + if (unlikely(err)) { + WL_ERR(("Invalid shared key\n")); + return err; + } + + /* + * Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE + + chan_cnt * sizeof(chanspec_t); + ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL); + if (ext_join_params == NULL) { + err = -ENOMEM; + wl_clr_drv_status(cfg, CONNECTING, dev); + goto exit; + } + ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len); + memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len); + wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID); + ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len); + /* increate dwell time to receive probe response or detect Beacon + * from target AP at a noisy air only during connect command + */ + ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1; + ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1; + /* Set up join scan parameters */ + ext_join_params->scan.scan_type = -1; + ext_join_params->scan.nprobes = chan_cnt ? + (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1; + ext_join_params->scan.home_time = -1; + + if (sme->bssid) + memcpy(&ext_join_params->assoc.bssid, sme->bssid, ETH_ALEN); + else + memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN); + ext_join_params->assoc.chanspec_num = chan_cnt; + if (chan_cnt) { + if (cfg->channel) { + /* + * Use the channel provided by userspace + */ + u16 channel, band, bw, ctl_sb; + chanspec_t chspec; + channel = cfg->channel; + band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G + : WL_CHANSPEC_BAND_5G; + + /* Get min_bw set for the interface */ + bw = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx); + if (bw == INVCHANSPEC) { + WL_ERR(("Invalid chanspec \n")); + kfree(ext_join_params); + return BCME_ERROR; + } + + ctl_sb = WL_CHANSPEC_CTL_SB_NONE; + chspec = (channel | band | bw | ctl_sb); + ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + ext_join_params->assoc.chanspec_list[0] |= chspec; + ext_join_params->assoc.chanspec_list[0] = + wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]); + } + } + ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num); + if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID, + ext_join_params->ssid.SSID_len)); + } + wl_set_drv_status(cfg, CONNECTING, dev); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + kfree(ext_join_params); + return BCME_ERROR; + } + err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (cfg->rcc_enabled) { + WL_ERR(("Connecting with" MACDBG " ssid \"%s\", len (%d) with rcc channels \n\n", + MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), + ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len)); + } else { + WL_ERR(("Connecting with" MACDBG " ssid \"%s\", len (%d) channel=%d\n\n", + MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), + ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel)); + } + + kfree(ext_join_params); + if (err) { + wl_clr_drv_status(cfg, CONNECTING, dev); + if (err == BCME_UNSUPPORTED) { + WL_DBG(("join iovar is not supported\n")); + goto set_ssid; + } else { + WL_ERR(("error (%d)\n", err)); + goto exit; + } + } else + goto exit; + +set_ssid: + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid); + + join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len); + memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len); + join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len); + wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID); + if (sme->bssid) + memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN); + else + memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN); + + if (wl_ch_to_chanspec(dev, cfg->channel, &join_params, &join_params_size) < 0) { + WL_ERR(("Invalid chanspec\n")); + return -EINVAL; + } + + WL_DBG(("join_param_size %zu\n", join_params_size)); + + if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) { + WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID, + join_params.ssid.SSID_len)); + } + wl_set_drv_status(cfg, CONNECTING, dev); + err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true); + if (err) { + WL_ERR(("error (%d)\n", err)); + wl_clr_drv_status(cfg, CONNECTING, dev); + } +exit: + return err; +} + +static s32 +wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, + u16 reason_code) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + scb_val_t scbval; + bool act = false; + s32 err = 0; + u8 *curbssid; +#ifdef CUSTOM_SET_CPUCORE + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* CUSTOM_SET_CPUCORE */ + WL_ERR(("Reason %d\n", reason_code)); + RETURN_EIO_IF_NOT_UP(cfg); + act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT); + curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID); +#ifdef ESCAN_RESULT_PATCH + if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid && + (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) { + WL_ERR(("Disconnecting from connecting device: " MACDBG "\n", + MAC2STRDBG(curbssid))); + act = true; + } +#endif /* ESCAN_RESULT_PATCH */ + + if (act) { + /* + * Cancel ongoing scan to sync up with sme state machine of cfg80211. + */ +#if !defined(ESCAN_RESULT_PATCH) + /* Let scan aborted by F/W */ + if (cfg->scan_request) { + WL_TRACE_HW4(("Aborting the scan! \n")); + wl_notify_escan_complete(cfg, dev, true, true); + } +#endif /* ESCAN_RESULT_PATCH */ + if (wl_get_drv_status(cfg, CONNECTING, dev) || + wl_get_drv_status(cfg, CONNECTED, dev)) { + wl_set_drv_status(cfg, DISCONNECTING, dev); + scbval.val = reason_code; + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(dev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (unlikely(err)) { + wl_clr_drv_status(cfg, DISCONNECTING, dev); + WL_ERR(("error (%d)\n", err)); + return err; + } +#if defined(BCM4358_CHIP) + WL_ERR(("Wait for complete of disconnecting \n")); + OSL_SLEEP(200); +#endif /* BCM4358_CHIP */ + } + } +#ifdef CUSTOM_SET_CPUCORE + /* set default cpucore */ + if (dev == bcmcfg_to_prmry_ndev(cfg)) { + dhd->chan_isvht80 &= ~DHD_FLAG_STA_MODE; + if (!(dhd->chan_isvht80)) + dhd_set_cpucore(dhd, FALSE); + } +#endif /* CUSTOM_SET_CPUCORE */ + + return err; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, s32 mbm) +#else +static s32 +wl_cfg80211_set_tx_power(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, s32 dbm) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; +#if defined(WL_CFG80211_P2P_DEV_IF) + s32 dbm = MBM_TO_DBM(mbm); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \ + defined(WL_COMPAT_WIRELESS) || defined(WL_SUPPORT_BACKPORTED_KPATCHES) + dbm = MBM_TO_DBM(dbm); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + RETURN_EIO_IF_NOT_UP(cfg); + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + break; + case NL80211_TX_POWER_LIMITED: + if (dbm < 0) { + WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n")); + return -EINVAL; + } + break; + case NL80211_TX_POWER_FIXED: + if (dbm < 0) { + WL_ERR(("TX_POWER_FIXED - dbm is negative..\n")); + return -EINVAL; + } + break; + } + + err = wl_set_tx_power(ndev, type, dbm); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + + cfg->conf->tx_power = dbm; + + return err; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, s32 *dbm) +#else +static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; + + RETURN_EIO_IF_NOT_UP(cfg); + err = wl_get_tx_power(ndev, dbm); + if (unlikely(err)) + WL_ERR(("error (%d)\n", err)); + + return err; +} + +static s32 +wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool unicast, bool multicast) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + u32 index; + s32 wsec; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + WL_DBG(("key index (%d)\n", key_idx)); + RETURN_EIO_IF_NOT_UP(cfg); + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); + return err; + } + if (wsec == WEP_ENABLED) { + /* Just select a new current key */ + index = (u32) key_idx; + index = htod32(index); + err = wldev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index, + sizeof(index), true); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + } + } + return err; +} + +static s32 +wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, const u8 *mac_addr, struct key_params *params) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wl_wsec_key key; + s32 err = 0; + s32 bssidx; + s32 mode = wl_get_mode_by_netdev(cfg, dev); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + memset(&key, 0, sizeof(key)); + key.index = (u32) key_idx; + + if (!ETHER_ISMULTI(mac_addr)) + memcpy((char *)&key.ea, (const void *)mac_addr, ETHER_ADDR_LEN); + key.len = (u32) params->key_len; + + /* check for key index change */ + if (key.len == 0) { + /* key delete */ + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("key delete error (%d)\n", err)); + return err; + } + } else { + if (key.len > sizeof(key.data)) { + WL_ERR(("Invalid key length (%d)\n", key.len)); + return -EINVAL; + } + WL_DBG(("Setting the key index %d\n", key.index)); + memcpy(key.data, params->key, key.len); + + if ((mode == WL_MODE_BSS) && + (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { + u8 keybuf[8]; + memcpy(keybuf, &key.data[24], sizeof(keybuf)); + memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); + memcpy(&key.data[16], keybuf, sizeof(keybuf)); + } + + /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ + if (params->seq && params->seq_len == 6) { + /* rx iv */ + u8 *ivptr; + ivptr = (u8 *) params->seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = true; + } + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + break; + case WLAN_CIPHER_SUITE_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key.algo = CRYPTO_ALGO_AES_CCM; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); + break; + default: + WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); + return -EINVAL; + } + swap_key_from_BE(&key); + /* need to guarantee EAPOL 4/4 send out before set key */ + dhd_wait_pend8021x(dev); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + } + return err; +} + +int +wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable) +{ + int err; + wl_eventmsg_buf_t ev_buf; + + if (dev != bcmcfg_to_prmry_ndev(g_bcm_cfg)) { + /* roam offload is only for the primary device */ + return -1; + } + err = wldev_iovar_setint(dev, "roam_offload", enable); + if (err) + return err; + + if (enable) { + err = wldev_iovar_setint(dev, "sup_wpa_tmo", IDSUP_4WAY_HANDSHAKE_TIMEOUT); + if (err) { + WL_INFORM(("Setting 'sup_wpa_tmo' failed, err=%d\n", err)); + } + } + + bzero(&ev_buf, sizeof(wl_eventmsg_buf_t)); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable); + wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable); + err = wl_cfg80211_apply_eventbuffer(dev, g_bcm_cfg, &ev_buf); + if (!err) { + g_bcm_cfg->roam_offload = enable; + } + return err; +} + +#if defined(WL_VIRTUAL_APSTA) +int +wl_cfg80211_interface_create(struct net_device *dev, char *name) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + bcm_struct_cfgdev *new_cfgdev; + + new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy, + NL80211_IFTYPE_STATION, NULL, name); + if (!new_cfgdev) { + return BCME_ERROR; + } + else { + WL_DBG(("Iface %s created successfuly\n", name)); + return BCME_OK; + } +} + +int +wl_cfg80211_interface_delete(struct net_device *dev, char *name) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *iter, *next; + int err = BCME_ERROR; + + if (name == NULL) { + return BCME_ERROR; + } + + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (strcmp(iter->ndev->name, name) == 0) { + err = wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev); + break; + } + } + } + if (!err) { + WL_DBG(("Iface %s deleted successfuly", name)); + } + return err; +} +#endif /* defined (WL_VIRTUAL_APSTA) */ + +static s32 +wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params) +{ + struct wl_wsec_key key; + s32 val = 0; + s32 wsec = 0; + s32 err = 0; + u8 keybuf[8]; + s32 bssidx = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 mode = wl_get_mode_by_netdev(cfg, dev); + WL_DBG(("key index (%d)\n", key_idx)); + RETURN_EIO_IF_NOT_UP(cfg); + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (mac_addr && + ((params->cipher != WLAN_CIPHER_SUITE_WEP40) && + (params->cipher != WLAN_CIPHER_SUITE_WEP104))) { + wl_add_keyext(wiphy, dev, key_idx, mac_addr, params); + goto exit; + } + memset(&key, 0, sizeof(key)); + /* Clear any buffered wep key */ + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + + key.len = (u32) params->key_len; + key.index = (u32) key_idx; + + if (unlikely(key.len > sizeof(key.data))) { + WL_ERR(("Too long key length (%u)\n", key.len)); + return -EINVAL; + } + memcpy(key.data, params->key, key.len); + + key.flags = WL_PRIMARY_KEY; + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + val = WEP_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + val = WEP_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + break; + case WLAN_CIPHER_SUITE_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + val = TKIP_ENABLED; + /* wpa_supplicant switches the third and fourth quarters of the TKIP key */ + if (mode == WL_MODE_BSS) { + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key.algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n")); + break; + default: + WL_ERR(("Invalid cipher (0x%x)\n", params->cipher)); + return -EINVAL; + } + + /* Set the new key/index */ + if ((mode == WL_MODE_IBSS) && (val & (TKIP_ENABLED | AES_ENABLED))) { + WL_ERR(("IBSS KEY setted\n")); + wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE); + } + swap_key_from_BE(&key); + if ((params->cipher == WLAN_CIPHER_SUITE_WEP40) || + (params->cipher == WLAN_CIPHER_SUITE_WEP104)) { + /* + * For AP role, since we are doing a wl down before bringing up AP, + * the plumbed keys will be lost. So for AP once we bring up AP, we + * need to plumb keys again. So buffer the keys for future use. This + * is more like a WAR. If firmware later has the capability to do + * interface upgrade without doing a "wl down" and "wl apsta 0", then + * this will not be required. + */ + WL_DBG(("Buffering WEP Keys \n")); + memcpy(&cfg->wep_key, &key, sizeof(struct wl_wsec_key)); + } + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + return err; + } + +exit: + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("get wsec error (%d)\n", err)); + return err; + } + + wsec |= val; + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("set wsec error (%d)\n", err)); + return err; + } + + return err; +} + +static s32 +wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr) +{ + struct wl_wsec_key key; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + s32 bssidx; + + WL_DBG(("Enter. key_idx: %d\n", key_idx)); + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2)) + return -EINVAL; + + RETURN_EIO_IF_NOT_UP(cfg); + memset(&key, 0, sizeof(key)); + + key.flags = WL_PRIMARY_KEY; + key.algo = CRYPTO_ALGO_OFF; + key.index = (u32) key_idx; + + /* Set the new key/index */ + swap_key_from_BE(&key); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + if (err == -EINVAL) { + if (key.index >= DOT11_MAX_DEFAULT_KEYS) { + /* we ignore this key index in this case */ + WL_DBG(("invalid key index (%d)\n", key_idx)); + } + } else { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + } + return err; + } + return err; +} + +static s32 +wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, + u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, + void (*callback) (void *cookie, struct key_params * params)) +{ + struct key_params params; + struct wl_wsec_key key; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wl_security *sec; + s32 wsec; + s32 err = 0; + s32 bssidx; + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + WL_DBG(("key index (%d)\n", key_idx)); + RETURN_EIO_IF_NOT_UP(cfg); + memset(&key, 0, sizeof(key)); + key.index = key_idx; + swap_key_to_BE(&key); + memset(¶ms, 0, sizeof(params)); + params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len); + memcpy((void *)params.key, key.data, params.key_len); + + err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx); + if (unlikely(err)) { + WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); + return err; + } + switch (WSEC_ENABLED(wsec)) { + case WEP_ENABLED: + sec = wl_read_prof(cfg, dev, WL_PROF_SEC); + if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { + params.cipher = WLAN_CIPHER_SUITE_WEP40; + WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n")); + } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { + params.cipher = WLAN_CIPHER_SUITE_WEP104; + WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n")); + } + break; + case TKIP_ENABLED: + params.cipher = WLAN_CIPHER_SUITE_TKIP; + WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n")); + break; + case AES_ENABLED: + params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; + WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n")); + break; + default: + WL_ERR(("Invalid algo (0x%x)\n", wsec)); + return -EINVAL; + } + + callback(cookie, ¶ms); + return err; +} + +static s32 +wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *dev, u8 key_idx) +{ + WL_INFORM(("Not supported\n")); + return -EOPNOTSUPP; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, struct station_info *sinfo) +#else +static s32 +wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +#endif +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + scb_val_t scb_val; + s32 rssi; + s32 rate; + s32 err = 0; + sta_info_t *sta; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + s8 eabuf[ETHER_ADDR_STR_LEN]; +#endif + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + bool fw_assoc_state = FALSE; + u32 dhd_assoc_state = 0; + RETURN_EIO_IF_NOT_UP(cfg); + if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) { + err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac, + ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("GET STA INFO failed, %d\n", err)); + return err; + } + sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME); + sta = (sta_info_t *)cfg->ioctl_buf; + sta->len = dtoh16(sta->len); + sta->cap = dtoh16(sta->cap); + sta->flags = dtoh32(sta->flags); + sta->idle = dtoh32(sta->idle); + sta->in = dtoh32(sta->in); + sinfo->inactive_time = sta->idle * 1000; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) + if (sta->flags & WL_STA_ASSOC) { + sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME); + sinfo->connected_time = sta->in; + } + WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n", + bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time, + sta->idle * 1000)); +#endif + } else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS || + wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) { + get_pktcnt_t pktcnt; + u8 *curmacp; + + if (cfg->roam_offload) { + struct ether_addr bssid; + err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + if (err) { + WL_ERR(("Failed to get current BSSID\n")); + } else { + if (!ETHER_ISNULLADDR(&bssid.octet) && + memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) { + /* roaming is detected */ + err = wl_cfg80211_delayed_roam(cfg, dev, &bssid); + if (err) + WL_ERR(("Failed to handle the delayed roam, " + "err=%d", err)); + mac = (u8 *)bssid.octet; + } + } + } + dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev); + fw_assoc_state = dhd_is_associated(dhd, 0, &err); + if (!dhd_assoc_state || !fw_assoc_state) { + WL_ERR(("NOT assoc\n")); + if (err == -ERESTARTSYS) + return err; + if (!dhd_assoc_state) { + WL_TRACE_HW4(("drv state is not connected \n")); + } + if (!fw_assoc_state) { + WL_TRACE_HW4(("fw state is not associated \n")); + } + /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms. + * 'err == 0' of dhd_is_associated() and '!fw_assoc_state' + * means that BSSID is null. + */ + if (dhd_assoc_state && !fw_assoc_state && !err) { + if (!fw_assoc_watchdog_started) { + fw_assoc_watchdog_ms = OSL_SYSUPTIME(); + fw_assoc_watchdog_started = TRUE; + WL_TRACE_HW4(("fw_assoc_watchdog_started \n")); + } else { + if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms > + FW_ASSOC_WATCHDOG_TIME) { + fw_assoc_watchdog_started = FALSE; + err = -ENODEV; + WL_TRACE_HW4(("fw is not associated for %d ms \n", + (OSL_SYSUPTIME() - fw_assoc_watchdog_ms))); + goto get_station_err; + } + } + } + err = -ENODEV; + return err; + } + fw_assoc_watchdog_started = FALSE; + curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID); + if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) { + WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n", + MAC2STRDBG(mac), MAC2STRDBG(curmacp))); + } + + /* Report the current tx rate */ + err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); + if (err) { + WL_ERR(("Could not get rate (%d)\n", err)); + } else { +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) + int rxpktglom; +#endif + rate = dtoh32(rate); + sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE); + sinfo->txrate.legacy = rate * 5; + WL_DBG(("Rate %d Mbps\n", (rate / 2))); +#if defined(USE_DYNAMIC_MAXPKT_RXGLOM) + rxpktglom = ((rate/2) > 150) ? 20 : 10; + + if (maxrxpktglom != rxpktglom) { + maxrxpktglom = rxpktglom; + WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2), + maxrxpktglom)); + err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom", + (char*)&maxrxpktglom, 4, cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, NULL); + if (err < 0) { + WL_ERR(("set bus:maxtxpktglom failed, %d\n", err)); + } + } +#endif + } + + memset(&scb_val, 0, sizeof(scb_val)); + scb_val.val = 0; + err = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, + sizeof(scb_val_t), false); + if (err) { + WL_ERR(("Could not get rssi (%d)\n", err)); + goto get_station_err; + } + rssi = wl_rssi_offset(dtoh32(scb_val.val)); + sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL); + sinfo->signal = rssi; + WL_DBG(("RSSI %d dBm\n", rssi)); + err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt, + sizeof(pktcnt), false); + if (!err) { + sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) | + STA_INFO_BIT(INFO_RX_DROP_MISC) | + STA_INFO_BIT(INFO_TX_PACKETS) | + STA_INFO_BIT(INFO_TX_FAILED)); + sinfo->rx_packets = pktcnt.rx_good_pkt; + sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt; + sinfo->tx_packets = pktcnt.tx_good_pkt; + sinfo->tx_failed = pktcnt.tx_bad_pkt; + } +get_station_err: + if (err && (err != -ERESTARTSYS)) { + /* Disconnect due to zero BSSID or error to get RSSI */ + WL_ERR(("force cfg80211_disconnected: %d\n", err)); + wl_clr_drv_status(cfg, CONNECTED, dev); + CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL); + wl_link_down(cfg); + } + } + else { + WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev))); + } + + return err; +} + +static s32 +wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, s32 timeout) +{ + s32 pm; + s32 err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev); + + RETURN_EIO_IF_NOT_UP(cfg); + WL_DBG(("Enter\n")); + if (cfg->p2p_net == dev || _net_info == NULL || + !wl_get_drv_status(cfg, CONNECTED, dev) || + (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_BSS && + wl_get_mode_by_netdev(cfg, dev) != WL_MODE_IBSS)) { + return err; + } + /* Enlarge pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_LONG); + + pm = enabled ? PM_FAST : PM_OFF; + if (_net_info->pm_block) { + WL_ERR(("%s:Do not enable the power save for pm_block %d\n", + dev->name, _net_info->pm_block)); + pm = PM_OFF; + } + pm = htod32(pm); + WL_DBG(("%s:power save %s\n", dev->name, (pm ? "enabled" : "disabled"))); + err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true); + if (unlikely(err)) { + if (err == -ENODEV) + WL_DBG(("net_device is not ready yet\n")); + else + WL_ERR(("error (%d)\n", err)); + return err; + } + wl_cfg80211_update_power_mode(dev); + return err; +} + +void wl_cfg80211_update_power_mode(struct net_device *dev) +{ + int err, pm = -1; + + err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), true); + if (err) + WL_ERR(("%s:error (%d)\n", __FUNCTION__, err)); + else if (pm != -1 && dev->ieee80211_ptr) + dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true; +} + +void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (strcmp(command, "SCAN-ACTIVE") == 0) { + cfg->active_scan = 1; + } else if (strcmp(command, "SCAN-PASSIVE") == 0) { + cfg->active_scan = 0; + } else + WL_ERR(("Unknown command \n")); +} + +static __used u32 wl_find_msb(u16 bit16) +{ + u32 ret = 0; + + if (bit16 & 0xff00) { + ret += 8; + bit16 >>= 8; + } + + if (bit16 & 0xf0) { + ret += 4; + bit16 >>= 4; + } + + if (bit16 & 0xc) { + ret += 2; + bit16 >>= 2; + } + + if (bit16 & 2) + ret += bit16 & 2; + else if (bit16) + ret += bit16; + + return ret; +} + +static s32 wl_cfg80211_resume(struct wiphy *wiphy) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = BCME_OK; + + if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) { + WL_INFORM(("device is not ready\n")); + return err; + } + + return err; +} + + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) +static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) +#else +static s32 wl_cfg80211_suspend(struct wiphy *wiphy) +#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */ +{ + s32 err = BCME_OK; +#ifdef DHD_CLEAR_ON_SUSPEND + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_info *iter, *next; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct cfg80211_scan_info info = {}; + unsigned long flags; + if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) { + WL_INFORM(("device is not ready : status (%d)\n", + (int)cfg->status)); + return err; + } + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface doesn't have a ndev associated with it (for kernel > 3.8) */ + if (iter->ndev) + wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + if (cfg->scan_request) { + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + } + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + wl_clr_drv_status(cfg, SCANNING, iter->ndev); + wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) { + wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false); + } + } + } +#endif /* DHD_CLEAR_ON_SUSPEND */ + + + return err; +} + +static s32 +wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list, + s32 err) +{ + int i, j; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg); + + if (!pmk_list) { + WL_INFORM(("pmk_list is NULL\n")); + return -EINVAL; + } + /* pmk list is supported only for STA interface i.e. primary interface + * Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init + */ + if (primary_dev != dev) { + WL_INFORM(("Not supporting Flushing pmklist on virtual" + " interfaces than primary interface\n")); + return err; + } + + WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid)); + for (i = 0; i < pmk_list->pmkids.npmkid; i++) { + WL_DBG(("PMKID[%d]: %pM =\n", i, + &pmk_list->pmkids.pmkid[i].BSSID)); + for (j = 0; j < WPA2_PMKID_LEN; j++) { + WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j])); + } + } + if (likely(!err)) { + err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list, + sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + } + + return err; +} + +static s32 +wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + int i; + + RETURN_EIO_IF_NOT_UP(cfg); + for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++) + if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + if (i < WL_NUM_PMKIDS_MAX) { + memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid, + ETHER_ADDR_LEN); + memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid, + WPA2_PMKID_LEN); + if (i == cfg->pmk_list->pmkids.npmkid) + cfg->pmk_list->pmkids.npmkid++; + } else { + err = -EINVAL; + } + WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", + &cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID)); + for (i = 0; i < WPA2_PMKID_LEN; i++) { + WL_DBG(("%02x\n", + cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1]. + PMKID[i])); + } + + err = wl_update_pmklist(dev, cfg->pmk_list, err); + + return err; +} + +static s32 +wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_pmksa *pmksa) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + struct _pmkid_list pmkid = {.npmkid = 0}; + s32 err = 0; + int i; + + RETURN_EIO_IF_NOT_UP(cfg); + memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN); + memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN); + + WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", + &pmkid.pmkid[0].BSSID)); + for (i = 0; i < WPA2_PMKID_LEN; i++) { + WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i])); + } + + for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++) + if (!memcmp + (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID, + ETHER_ADDR_LEN)) + break; + + if ((cfg->pmk_list->pmkids.npmkid > 0) && + (i < cfg->pmk_list->pmkids.npmkid)) { + memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t)); + for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) { + memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, + &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID, + ETHER_ADDR_LEN); + memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, + &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID, + WPA2_PMKID_LEN); + } + cfg->pmk_list->pmkids.npmkid--; + } else { + err = -EINVAL; + } + + err = wl_update_pmklist(dev, cfg->pmk_list, err); + + return err; + +} + +static s32 +wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = 0; + RETURN_EIO_IF_NOT_UP(cfg); + memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list)); + err = wl_update_pmklist(dev, cfg->pmk_list, err); + return err; + +} + +static wl_scan_params_t * +wl_cfg80211_scan_alloc_params(int channel, int nprobes, int *out_params_size) +{ + wl_scan_params_t *params; + int params_size; + int num_chans; + int bssidx = 0; + + *out_params_size = 0; + + /* Our scan params only need space for 1 channel and 0 ssids */ + params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16); + params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL); + if (params == NULL) { + WL_ERR(("mem alloc failed (%d bytes)\n", params_size)); + return params; + } + memset(params, 0, params_size); + params->nprobes = nprobes; + + num_chans = (channel == 0) ? 0 : 1; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = DOT11_SCANTYPE_ACTIVE; + params->nprobes = htod32(1); + params->active_time = htod32(-1); + params->passive_time = htod32(-1); + params->home_time = htod32(10); + if (channel == -1) + params->channel_list[0] = htodchanspec(channel); + else + params->channel_list[0] = wl_ch_host_to_driver(bssidx, channel); + + /* Our scan params have 1 channel and 0 ssids */ + params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + *out_params_size = params_size; /* rtn size to the caller */ + return params; +} + +#if defined(WL_CFG80211_P2P_DEV_IF) +static s32 +wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct ieee80211_channel *channel, unsigned int duration, u64 *cookie) +#else +static s32 +wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct ieee80211_channel * channel, + enum nl80211_channel_type channel_type, + unsigned int duration, u64 *cookie) +#endif /* WL_CFG80211_P2P_DEV_IF */ +{ + s32 target_channel; + s32 err = BCME_OK; + struct ether_addr primary_mac; + struct net_device *ndev = NULL; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n", + ieee80211_frequency_to_channel(channel->center_freq), + duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO")); + + if (!cfg->p2p) { + WL_ERR(("cfg->p2p is not initialized\n")); + err = BCME_ERROR; + goto exit; + } + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("P2P_FIND: Discovery offload is in progress\n")); + return -EAGAIN; + } +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + + target_channel = ieee80211_frequency_to_channel(channel->center_freq); + memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel)); +#if defined(WL_ENABLE_P2P_IF) + cfg->remain_on_chan_type = channel_type; +#endif /* WL_ENABLE_P2P_IF */ + *cookie = wl_cfg80211_get_new_roc_id(cfg); +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (wl_get_drv_status(cfg, SCANNING, ndev)) { + struct timer_list *_timer; + WL_DBG(("scan is running. go to fake listen state\n")); + + if (duration > LONG_LISTEN_TIME) { + wl_cfg80211_scan_abort(cfg); + } else { + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); + + if (timer_pending(&cfg->p2p->listen_timer)) { + WL_DBG(("cancel current listen timer \n")); + del_timer_sync(&cfg->p2p->listen_timer); + } + + _timer = &cfg->p2p->listen_timer; + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); + + cfg->p2p->bcm_cfg = cfg; + INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0); + + err = BCME_OK; + goto exit; + } + } +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +#ifdef WL_CFG80211_SYNC_GON + if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) { + /* do not enter listen mode again if we are in listen mode already for next af. + * remain on channel completion will be returned by waiting next af completion. + */ +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); +#else + wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + goto exit; + } +#endif /* WL_CFG80211_SYNC_GON */ + if (cfg->p2p && !cfg->p2p->on) { + /* In case of p2p_listen command, supplicant send remain_on_channel + * without turning on P2P + */ + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + p2p_on(cfg) = true; + } + + if (p2p_is_on(cfg)) { + err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0); + if (unlikely(err)) { + goto exit; + } +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + err = wl_cfgp2p_discover_listen(cfg, target_channel, duration); + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (err == BCME_OK) { + wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); + } else { + /* if failed, firmware may be internal scanning state. + * so other scan request shall not abort it + */ + wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); + } +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + /* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant + * and expire timer will send a completion to the upper layer + */ + err = BCME_OK; + } + +exit: + if (err == BCME_OK) { + WL_INFORM(("Success\n")); +#if defined(WL_CFG80211_P2P_DEV_IF) + cfg80211_ready_on_channel(cfgdev, *cookie, channel, + duration, GFP_KERNEL); +#else + cfg80211_ready_on_channel(cfgdev, *cookie, channel, + channel_type, duration, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } else { + WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie)); + } + return err; +} + +static s32 +wl_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie) +{ + s32 err = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + +#ifdef P2PLISTEN_AP_SAMECHN + struct net_device *dev; +#endif /* P2PLISTEN_AP_SAMECHN */ + + RETURN_EIO_IF_NOT_UP(cfg); +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) { + WL_DBG((" enter ) on P2P dedicated discover interface\n")); + } +#else + WL_DBG((" enter ) netdev_ifidx: %d \n", cfgdev->ifindex)); +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#ifdef P2PLISTEN_AP_SAMECHN + if (cfg && cfg->p2p_resp_apchn_status) { + dev = bcmcfg_to_prmry_ndev(cfg); + wl_cfg80211_set_p2p_resp_ap_chn(dev, 0); + cfg->p2p_resp_apchn_status = false; + WL_DBG(("p2p_resp_apchn_status Turn OFF \n")); + } +#endif /* P2PLISTEN_AP_SAMECHN */ + + if (cfg->last_roc_id == cookie) { + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } else { + WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n", + __FUNCTION__, cookie, cfg->last_roc_id)); + } + + return err; +} + +static void +wl_cfg80211_afx_handler(struct work_struct *work) +{ + struct afx_hdl *afx_instance; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 ret = BCME_OK; + + BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work); + if (afx_instance != NULL && cfg->afx_hdl->is_active) { + if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) { + ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan, + (100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */ + } else { + ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev, + cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan, + NULL); + } + if (unlikely(ret != BCME_OK)) { + WL_ERR(("ERROR occurred! returned value is (%d)\n", ret)); + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) + complete(&cfg->act_frm_scan); + } + } +} + +static s32 +wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev) +{ + u32 max_retry = WL_CHANNEL_SYNC_RETRY; + bool is_p2p_gas = false; + + if (dev == NULL) + return -1; + + WL_DBG((" enter ) \n")); + + wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev); + cfg->afx_hdl->is_active = TRUE; + + if (cfg->afx_hdl->pending_tx_act_frm) { + wl_action_frame_t *action_frame; + action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame); + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) + is_p2p_gas = true; + } + + /* Loop to wait until we find a peer's channel or the + * pending action frame tx is cancelled. + */ + while ((cfg->afx_hdl->retry < max_retry) && + (cfg->afx_hdl->peer_chan == WL_INVALID)) { + cfg->afx_hdl->is_listen = FALSE; + wl_set_drv_status(cfg, SCANNING, dev); + WL_DBG(("Scheduling the action frame for sending.. retry %d\n", + cfg->afx_hdl->retry)); + /* search peer on peer's listen channel */ + schedule_work(&cfg->afx_hdl->work); + wait_for_completion_timeout(&cfg->act_frm_scan, + msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX)); + + if ((cfg->afx_hdl->peer_chan != WL_INVALID) || + !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev))) + break; + + if (is_p2p_gas) + break; + + if (cfg->afx_hdl->my_listen_chan) { + WL_DBG(("Scheduling Listen peer in my listen channel = %d\n", + cfg->afx_hdl->my_listen_chan)); + /* listen on my listen channel */ + cfg->afx_hdl->is_listen = TRUE; + schedule_work(&cfg->afx_hdl->work); + wait_for_completion_timeout(&cfg->act_frm_scan, + msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX)); + } + if ((cfg->afx_hdl->peer_chan != WL_INVALID) || + !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev))) + break; + + cfg->afx_hdl->retry++; + + WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg); + } + + cfg->afx_hdl->is_active = FALSE; + + wl_clr_drv_status(cfg, SCANNING, dev); + wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev); + + return (cfg->afx_hdl->peer_chan); +} + +struct p2p_config_af_params { + s32 max_tx_retry; /* max tx retry count if tx no ack */ + /* To make sure to send successfully action frame, we have to turn off mpc + * 0: off, 1: on, (-1): do nothing + */ + s32 mpc_onoff; +#ifdef WL_CFG80211_SYNC_GON + bool extra_listen; +#endif + bool search_channel; /* 1: search peer's channel to send af */ +}; + +static s32 +wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy, + wl_action_frame_t *action_frame, wl_af_params_t *af_params, + struct p2p_config_af_params *config_af_params) +{ + s32 err = BCME_OK; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + wifi_p2p_pub_act_frame_t *act_frm = + (wifi_p2p_pub_act_frame_t *) (action_frame->data); + + /* initialize default value */ +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = true; +#endif + config_af_params->search_channel = false; + config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY; + config_af_params->mpc_onoff = -1; + cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID; + + switch (act_frm->subtype) { + case P2P_PAF_GON_REQ: { + WL_DBG(("P2P: GO_NEG_PHASE status set \n")); + wl_set_p2p_status(cfg, GO_NEG_PHASE); + + config_af_params->mpc_onoff = 0; + config_af_params->search_channel = true; + cfg->next_af_subtype = act_frm->subtype + 1; + + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = WL_MED_DWELL_TIME; + + break; + } + case P2P_PAF_GON_RSP: { + cfg->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for CONF frame */ + af_params->dwell_time = WL_MED_DWELL_TIME + 100; + break; + } + case P2P_PAF_GON_CONF: { + /* If we reached till GO Neg confirmation reset the filter */ + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + + /* turn on mpc again if go nego is done */ + config_af_params->mpc_onoff = 1; + + /* minimize dwell time */ + af_params->dwell_time = WL_MIN_DWELL_TIME; + +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + } + case P2P_PAF_INVITE_REQ: { + config_af_params->search_channel = true; + cfg->next_af_subtype = act_frm->subtype + 1; + + /* increase dwell time */ + af_params->dwell_time = WL_MED_DWELL_TIME; + break; + } + case P2P_PAF_INVITE_RSP: + /* minimize dwell time */ + af_params->dwell_time = WL_MIN_DWELL_TIME; +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + case P2P_PAF_DEVDIS_REQ: { + if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0], + action_frame->len)) { + config_af_params->search_channel = true; + } + + cfg->next_af_subtype = act_frm->subtype + 1; + /* maximize dwell time to wait for RESP frame */ + af_params->dwell_time = WL_LONG_DWELL_TIME; + break; + } + case P2P_PAF_DEVDIS_RSP: + /* minimize dwell time */ + af_params->dwell_time = WL_MIN_DWELL_TIME; +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + case P2P_PAF_PROVDIS_REQ: { + if (IS_ACTPUB_WITHOUT_GROUP_ID(&act_frm->elts[0], + action_frame->len)) { + config_af_params->search_channel = true; + } + + config_af_params->mpc_onoff = 0; + cfg->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = WL_MED_DWELL_TIME; + break; + } + case P2P_PAF_PROVDIS_RSP: { + cfg->next_af_subtype = P2P_PAF_GON_REQ; + af_params->dwell_time = WL_MIN_DWELL_TIME; +#ifdef WL_CFG80211_SYNC_GON + config_af_params->extra_listen = false; +#endif /* WL_CFG80211_SYNC_GON */ + break; + } + default: + WL_DBG(("Unknown p2p pub act frame subtype: %d\n", + act_frm->subtype)); + err = BCME_BADARG; + } + return err; +} + +#ifdef WL11U +static bool +wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params, + void *frame, u16 frame_len) +{ + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; + bool result = false; + s32 i; + chanspec_t chanspec; + + /* If DFS channel is 52~148, check to block it or not */ + if (af_params && + (af_params->channel >= 52 && af_params->channel <= 148)) { + if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) { + bss_list = cfg->bss_list; + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + chanspec = wl_chspec_driver_to_host(bi->chanspec); + if (CHSPEC_IS5G(chanspec) && + ((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec)) + == af_params->channel)) { + result = true; /* do not block the action frame */ + break; + } + } + } + } + else { + result = true; + } + + WL_DBG(("result=%s", result?"true":"false")); + return result; +} +#endif /* WL11U */ +static bool +wl_cfg80211_check_dwell_overflow(int32 requested_dwell, ulong dwell_jiffies) +{ + if ((requested_dwell & CUSTOM_RETRY_MASK) && + (jiffies_to_msecs(jiffies - dwell_jiffies) > + (requested_dwell & ~CUSTOM_RETRY_MASK))) { + WL_ERR(("Action frame TX retry time over dwell time!\n")); + return true; + } + return false; +} + +static bool +wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev, + bcm_struct_cfgdev *cfgdev, wl_af_params_t *af_params, + wl_action_frame_t *action_frame, u16 action_frame_len, s32 bssidx) +{ +#ifdef WL11U + struct net_device *ndev = NULL; +#endif /* WL11U */ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + bool ack = false; + u8 category, action; + s32 tx_retry; + struct p2p_config_af_params config_af_params; + struct net_info *netinfo; +#ifdef VSDB + ulong off_chan_started_jiffies = 0; +#endif + ulong dwell_jiffies = 0; + bool dwell_overflow = false; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + int32 requested_dwell = af_params->dwell_time; + + /* Add the default dwell time + * Dwell time to stay off-channel to wait for a response action frame + * after transmitting an GO Negotiation action frame + */ + af_params->dwell_time = WL_DWELL_TIME; + +#ifdef WL11U +#if defined(WL_CFG80211_P2P_DEV_IF) + ndev = dev; +#else + ndev = ndev_to_cfgdev(cfgdev); +#endif /* WL_CFG80211_P2P_DEV_IF */ +#endif /* WL11U */ + + category = action_frame->data[DOT11_ACTION_CAT_OFF]; + action = action_frame->data[DOT11_ACTION_ACT_OFF]; + + /* initialize variables */ + tx_retry = 0; + cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID; + config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY; + config_af_params.mpc_onoff = -1; + config_af_params.search_channel = false; +#ifdef WL_CFG80211_SYNC_GON + config_af_params.extra_listen = false; +#endif + + /* config parameters */ + /* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */ + if (category == DOT11_ACTION_CAT_PUBLIC) { + if ((action == P2P_PUB_AF_ACTION) && + (action_frame_len >= sizeof(wifi_p2p_pub_act_frame_t))) { + /* p2p public action frame process */ + if (BCME_OK != wl_cfg80211_config_p2p_pub_af_tx(wiphy, + action_frame, af_params, &config_af_params)) { + WL_DBG(("Unknown subtype.\n")); + } + + } else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) { + /* service discovery process */ + if (action == P2PSD_ACTION_ID_GAS_IREQ || + action == P2PSD_ACTION_ID_GAS_CREQ) { + /* configure service discovery query frame */ + + config_af_params.search_channel = true; + + /* save next af suptype to cancel remained dwell time */ + cfg->next_af_subtype = action + 1; + + af_params->dwell_time = WL_MED_DWELL_TIME; + if (requested_dwell & CUSTOM_RETRY_MASK) { + config_af_params.max_tx_retry = + (requested_dwell & CUSTOM_RETRY_MASK) >> 24; + af_params->dwell_time = + (requested_dwell & ~CUSTOM_RETRY_MASK); + WL_DBG(("Custom retry(%d) and dwell time(%d) is set.\n", + config_af_params.max_tx_retry, + af_params->dwell_time)); + } + } else if (action == P2PSD_ACTION_ID_GAS_IRESP || + action == P2PSD_ACTION_ID_GAS_CRESP) { + /* configure service discovery response frame */ + af_params->dwell_time = WL_MIN_DWELL_TIME; + } else { + WL_DBG(("Unknown action type: %d\n", action)); + } + } else { + WL_DBG(("Unknown Frame: category 0x%x, action 0x%x, length %d\n", + category, action, action_frame_len)); + } + } else if (category == P2P_AF_CATEGORY) { + /* do not configure anything. it will be sent with a default configuration */ + } else { + WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n", + category, action)); + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev); + return false; + } + } + + /* To make sure to send successfully action frame, we have to turn off mpc */ + if (config_af_params.mpc_onoff == 0) { + wldev_iovar_setint(dev, "mpc", 0); + } + + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + /* validate channel and p2p ies */ + if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) && + netinfo && netinfo->bss.ies.probe_req_ie_len) { + config_af_params.search_channel = true; + } else { + config_af_params.search_channel = false; + } +#ifdef WL11U + if (ndev == bcmcfg_to_prmry_ndev(cfg)) + config_af_params.search_channel = false; +#endif /* WL11U */ + +#ifdef VSDB + /* if connecting on primary iface, sleep for a while before sending af tx for VSDB */ + if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) { + OSL_SLEEP(50); + } +#endif + + /* if scan is ongoing, abort current scan. */ + if (wl_get_drv_status_all(cfg, SCANNING)) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + + /* Abort P2P listen */ + if (discover_cfgdev(cfgdev, cfg)) { + if (cfg->p2p_supported && cfg->p2p) { + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } + } + +#ifdef WL11U + /* handling DFS channel exceptions */ + if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) { + return false; /* the action frame was blocked */ + } +#endif /* WL11U */ + + /* set status and destination address before sending af */ + if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { + /* set this status to cancel the remained dwell time in rx process */ + wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev); + } + wl_set_drv_status(cfg, SENDING_ACT_FRM, dev); + memcpy(cfg->afx_hdl->tx_dst_addr.octet, + af_params->action_frame.da.octet, + sizeof(cfg->afx_hdl->tx_dst_addr.octet)); + + /* save af_params for rx process */ + cfg->afx_hdl->pending_tx_act_frm = af_params; + + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) { + WL_DBG(("Set GAS action frame config.\n")); + config_af_params.search_channel = false; + config_af_params.max_tx_retry = 1; + } + + /* search peer's channel */ + if (config_af_params.search_channel) { + /* initialize afx_hdl */ + if ((cfg->afx_hdl->bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + goto exit; + } + cfg->afx_hdl->dev = dev; + cfg->afx_hdl->retry = 0; + cfg->afx_hdl->peer_chan = WL_INVALID; + + if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) { + WL_ERR(("couldn't find peer's channel.\n")); + wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, + af_params->channel); + goto exit; + } + + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + /* + * Abort scan even for VSDB scenarios. Scan gets aborted in firmware + * but after the check of piggyback algorithm. + * To take care of current piggback algo, lets abort the scan here itself. + */ + wl_notify_escan_complete(cfg, dev, true, true); + /* Suspend P2P discovery's search-listen to prevent it from + * starting a scan or changing the channel. + */ + if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + goto exit; + } + + /* update channel */ + af_params->channel = cfg->afx_hdl->peer_chan; + } + +#ifdef VSDB + off_chan_started_jiffies = jiffies; +#endif /* VSDB */ + + wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel); + + wl_cfgp2p_need_wait_actfrmae(cfg, action_frame->data, action_frame->len, true); + + dwell_jiffies = jiffies; + /* Now send a tx action frame */ + ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true; + dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies); + + /* if failed, retry it. tx_retry_max value is configure by .... */ + while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry) && + !dwell_overflow) { +#ifdef VSDB + if (af_params->channel) { + if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) > + OFF_CHAN_TIME_THRESHOLD_MS) { + WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg); + off_chan_started_jiffies = jiffies; + } else + OSL_SLEEP(AF_RETRY_DELAY_TIME); + } +#endif /* VSDB */ + ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? + false : true; + dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies); + } + + if (ack == false) { + WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry)); + } + WL_DBG(("Complete to send action frame\n")); +exit: + /* Clear SENDING_ACT_FRM after all sending af is done */ + wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev); + +#ifdef WL_CFG80211_SYNC_GON + /* WAR: sometimes dongle does not keep the dwell time of 'actframe'. + * if we coundn't get the next action response frame and dongle does not keep + * the dwell time, go to listen state again to get next action response frame. + */ + if (ack && config_af_params.extra_listen && + wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) && + cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) { + s32 extar_listen_time; + + extar_listen_time = af_params->dwell_time - + jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies); + + if (extar_listen_time > 50) { + wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev); + WL_DBG(("Wait more time! actual af time:%d," + "calculated extar listen:%d\n", + af_params->dwell_time, extar_listen_time)); + if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel, + extar_listen_time + 100) == BCME_OK) { + wait_for_completion_timeout(&cfg->wait_next_af, + msecs_to_jiffies(extar_listen_time + 100 + 300)); + } + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev); + } + } +#endif /* WL_CFG80211_SYNC_GON */ + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev); + + if (cfg->afx_hdl->pending_tx_act_frm) + cfg->afx_hdl->pending_tx_act_frm = NULL; + + WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n", + (ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan)); + + + /* if all done, turn mpc on again */ + if (config_af_params.mpc_onoff == 1) { + wldev_iovar_setint(dev, "mpc", 1); + } + + return ack; +} + +#define MAX_NUM_OF_ASSOCIATED_DEV 64 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) +static s32 +wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie) +#else +static s32 +wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + struct ieee80211_channel *channel, bool offchan, +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0)) + enum nl80211_channel_type channel_type, + bool channel_type_valid, +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */ + unsigned int wait, const u8* buf, size_t len, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + bool no_cck, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + bool dont_wait_for_ack, +#endif + u64 *cookie) +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */ +{ + wl_action_frame_t *action_frame; + wl_af_params_t *af_params; + scb_val_t scb_val; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + struct ieee80211_channel *channel = params->chan; + const u8 *buf = params->buf; + size_t len = params->len; +#endif + const struct ieee80211_mgmt *mgmt; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *dev = NULL; + s32 err = BCME_OK; + s32 bssidx = 0; + u32 id; + bool ack = false; + s8 eabuf[ETHER_ADDR_STR_LEN]; + + WL_DBG(("Enter \n")); + + dev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (!dev) { + WL_ERR(("dev is NULL\n")); + return -EINVAL; + } + + /* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE) */ + if (discover_cfgdev(cfgdev, cfg)) { + if (!cfg->p2p_supported || !cfg->p2p) { + WL_ERR(("P2P doesn't setup completed yet\n")); + return -EINVAL; + } + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + else { + if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) { + WL_ERR(("Find p2p index failed\n")); + return BCME_ERROR; + } + } + + WL_DBG(("TX target bssidx=%d\n", bssidx)); + + if (p2p_is_on(cfg)) { + /* Suspend P2P discovery search-listen to prevent it from changing the + * channel. + */ + if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + return -EFAULT; + } + } + *cookie = 0; + id = cfg->send_action_id++; + if (id == 0) + id = cfg->send_action_id++; + *cookie = id; + mgmt = (const struct ieee80211_mgmt *)buf; + if (ieee80211_is_mgmt(mgmt->frame_control)) { + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + s32 ie_len = len - ie_offset; + if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) { + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, (const u8 *)(buf + ie_offset), ie_len); + cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL); +#if defined(P2P_IE_MISSING_FIX) + if (!cfg->p2p_prb_noti) { + cfg->p2p_prb_noti = true; + WL_DBG(("%s: TX 802_1X Probe Response first time.\n", + __FUNCTION__)); + } +#endif + goto exit; + } else if (ieee80211_is_disassoc(mgmt->frame_control) || + ieee80211_is_deauth(mgmt->frame_control)) { + char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + int num_associated = 0; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + if (!bcmp((const uint8 *)BSSID_BROADCAST, + (const struct ether_addr *)mgmt->da, ETHER_ADDR_LEN)) { + assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV; + err = wldev_ioctl(dev, WLC_GET_ASSOCLIST, + assoc_maclist, sizeof(mac_buf), false); + if (err < 0) + WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err)); + else + num_associated = assoc_maclist->count; + } + memcpy(scb_val.ea.octet, mgmt->da, ETH_ALEN); + scb_val.val = mgmt->u.disassoc.reason_code; + err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val, + sizeof(scb_val_t), true); + if (err < 0) + WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err)); + WL_ERR(("Disconnect STA : %s scb_val.val %d\n", + bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf), + scb_val.val)); + + if (num_associated > 0 && ETHER_ISBCAST(mgmt->da)) + wl_delay(400); + + cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL); + goto exit; + + } else if (ieee80211_is_action(mgmt->frame_control)) { + /* Abort the dwell time of any previous off-channel + * action frame that may be still in effect. Sending + * off-channel action frames relies on the driver's + * scan engine. If a previous off-channel action frame + * tx is still in progress (including the dwell time), + * then this new action frame will not be sent out. + */ +/* Do not abort scan for VSDB. Scan will be aborted in firmware if necessary. + * And previous off-channel action frame must be ended before new af tx. + */ +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_notify_escan_complete(cfg, dev, true, true); +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + } + + } else { + WL_ERR(("Driver only allows MGMT packet type\n")); + goto exit; + } + + af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL); + + if (af_params == NULL) + { + WL_ERR(("unable to allocate frame\n")); + return -ENOMEM; + } + + action_frame = &af_params->action_frame; + + /* Add the packet Id */ + action_frame->packetId = *cookie; + WL_DBG(("action frame %d\n", action_frame->packetId)); + /* Add BSSID */ + memcpy(&action_frame->da, &mgmt->da[0], ETHER_ADDR_LEN); + memcpy(&af_params->BSSID, &mgmt->bssid[0], ETHER_ADDR_LEN); + + /* Add the length exepted for 802.11 header */ + action_frame->len = len - DOT11_MGMT_HDR_LEN; + WL_DBG(("action_frame->len: %d\n", action_frame->len)); + + /* Add the channel */ + af_params->channel = + ieee80211_frequency_to_channel(channel->center_freq); + /* Save listen_chan for searching common channel */ + cfg->afx_hdl->peer_listen_chan = af_params->channel; + WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + af_params->dwell_time = params->wait; +#else + af_params->dwell_time = wait; +#endif + + memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len); + + ack = wl_cfg80211_send_action_frame(wiphy, dev, cfgdev, af_params, + action_frame, action_frame->len, bssidx); + cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL); + + kfree(af_params); +exit: + return err; +} + + +static void +wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev, + u16 frame_type, bool reg) +{ + + WL_DBG(("frame_type: %x, reg: %d\n", frame_type, reg)); + + if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) + return; + + return; +} + + +static s32 +wl_cfg80211_change_bss(struct wiphy *wiphy, + struct net_device *dev, + struct bss_parameters *params) +{ + s32 err = 0; + s32 ap_isolate = 0; + + if (params->use_cts_prot >= 0) { + } + + if (params->use_short_preamble >= 0) { + } + + if (params->use_short_slot_time >= 0) { + } + + if (params->basic_rates) { + } + + if (params->ap_isolate >= 0) { + ap_isolate = params->ap_isolate; + err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate); + if (unlikely(err)) + { + WL_ERR(("set ap_isolate Error (%d)\n", err)); + } + } + + if (params->ht_opmode >= 0) { + } + + + return 0; +} + +static s32 +wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + s32 _chan; + chanspec_t chspec = 0; + chanspec_t fw_chspec = 0; + u32 bw = WL_CHANSPEC_BW_20; +#ifdef WL11ULB + u32 ulb_bw = wl_cfg80211_get_ulb_bw(dev->ieee80211_ptr); +#endif /* WL11ULB */ + + s32 err = BCME_OK; + s32 bw_cap = 0; + struct { + u32 band; + u32 bw_cap; + } param = {0, 0}; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); +#ifdef CUSTOM_SET_CPUCORE + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* CUSTOM_SET_CPUCORE */ + + dev = ndev_to_wlc_ndev(dev, cfg); + _chan = ieee80211_frequency_to_channel(chan->center_freq); + WL_ERR(("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n", + dev->ifindex, channel_type, _chan)); + + +#ifdef WL11ULB + if (ulb_bw) { + WL_DBG(("[ULB] setting AP/GO BW to ulb_bw 0x%x \n", ulb_bw)); + bw = wl_cfg80211_ulbbw_to_ulbchspec(ulb_bw); + goto set_channel; + } +#endif /* WL11ULB */ + if (chan->band == NL80211_BAND_5GHZ ) { + param.band = WLC_BAND_5G; + err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync); + if (err) { + if (err != BCME_UNSUPPORTED) { + WL_ERR(("bw_cap failed, %d\n", err)); + return err; + } else { + err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); + if (err) { + WL_ERR(("error get mimo_bw_cap (%d)\n", err)); + } + if (bw_cap != WLC_N_BW_20ALL) + bw = WL_CHANSPEC_BW_40; + } + } else { + if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0])) + bw = WL_CHANSPEC_BW_80; + else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0])) + bw = WL_CHANSPEC_BW_40; + else + bw = WL_CHANSPEC_BW_20; + + } + + } else if (chan->band == NL80211_BAND_2GHZ ) + bw = WL_CHANSPEC_BW_20; +set_channel: + chspec = wf_channel2chspec(_chan, bw); + if (wf_chspec_valid(chspec)) { + fw_chspec = wl_chspec_host_to_driver(chspec); + if (fw_chspec != INVCHANSPEC) { + if ((err = wldev_iovar_setint(dev, "chanspec", + fw_chspec)) == BCME_BADCHAN) { + if (bw == WL_CHANSPEC_BW_80) + goto change_bw; + err = wldev_ioctl(dev, WLC_SET_CHANNEL, + &_chan, sizeof(_chan), true); + if (err < 0) { + WL_ERR(("WLC_SET_CHANNEL error %d" + "chip may not be supporting this channel\n", err)); + } + } else if (err) { + WL_ERR(("failed to set chanspec error %d\n", err)); + } + } else { + WL_ERR(("failed to convert host chanspec to fw chanspec\n")); + err = BCME_ERROR; + } + } else { +change_bw: + if (bw == WL_CHANSPEC_BW_80) + bw = WL_CHANSPEC_BW_40; + else if (bw == WL_CHANSPEC_BW_40) + bw = WL_CHANSPEC_BW_20; + else + bw = 0; + if (bw) + goto set_channel; + WL_ERR(("Invalid chanspec 0x%x\n", chspec)); + err = BCME_ERROR; + } +#ifdef CUSTOM_SET_CPUCORE + if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) { + WL_DBG(("SoftAP mode do not need to set cpucore\n")); + } else if (chspec & WL_CHANSPEC_BW_80) { + /* SoftAp only mode do not need to set cpucore */ + if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) && + dev != bcmcfg_to_prmry_ndev(cfg)) { + /* Soft AP on virtual Iface (AP+STA case) */ + dhd->chan_isvht80 |= DHD_FLAG_HOSTAP_MODE; + dhd_set_cpucore(dhd, TRUE); + } else if (is_p2p_group_iface(dev->ieee80211_ptr)) { + /* If P2P IF is vht80 */ + dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; + dhd_set_cpucore(dhd, TRUE); + } + } +#endif /* CUSTOM_SET_CPUCORE */ + if (!err && (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) { + /* Update AP/GO operating channel */ + cfg->ap_oper_channel = _chan; + } + return err; +} + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST +struct net_device * +wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) { + if (_net_info->ndev && + test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state)) + return _net_info->ndev; + } + return NULL; +} +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +static s32 +wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy) +{ + s32 err = BCME_OK; + u32 wpa_val; + s32 wsec = 0; + + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + + if (privacy) { + /* If privacy bit is set in open mode, then WEP would be enabled */ + wsec = WEP_ENABLED; + WL_DBG(("Setting wsec to %d for WEP \n", wsec)); + } + + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + + /* set upper-layer auth */ + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC) + wpa_val = WPA_AUTH_NONE; + else + wpa_val = WPA_AUTH_DISABLED; + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_val, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } + + return 0; +} + +static s32 +wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx) +{ + s32 len = 0; + s32 err = BCME_OK; + u16 auth = 0; /* d11 open authentication */ + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + wpa_suite_mcast_t *mcast; + wpa_suite_ucast_t *ucast; + wpa_suite_auth_key_mgmt_t *mgmt; + wpa_pmkid_list_t *pmkid; + int cnt = 0; + + u16 suite_count; + u8 rsn_cap[2]; + u32 wme_bss_disable; + + if (wpa2ie == NULL) + goto exit; + + WL_DBG(("Enter \n")); + len = wpa2ie->len - WPA2_VERSION_LEN; + /* check the mcast cipher */ + mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN]; + switch (mcast->type) { + case WPA_CIPHER_NONE: + gval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + gval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + gval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + gval = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + break; + } + if ((len -= WPA_SUITE_LEN) <= 0) + return BCME_BADLEN; + + /* check the unicast cipher */ + ucast = (wpa_suite_ucast_t *)&mcast[1]; + suite_count = ltoh16_ua(&ucast->count); + switch (ucast->list[0].type) { + case WPA_CIPHER_NONE: + pval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + pval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + pval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + pval = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0) + return BCME_BADLEN; + + /* FOR WPS , set SEC_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + /* check the AKM */ + mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count]; + suite_count = cnt = ltoh16_ua(&mgmt->count); + while (cnt--) { + switch (mgmt->list[cnt].type) { + case RSN_AKM_NONE: + wpa_auth |= WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + wpa_auth |= WPA2_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + wpa_auth |= WPA2_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + } + + if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) { + rsn_cap[0] = *(u8 *)&mgmt->list[suite_count]; + rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1); + + if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) { + wme_bss_disable = 0; + } else { + wme_bss_disable = 1; + } + + + /* set wme_bss_disable to sync RSN Capabilities */ + err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx); + if (err < 0) { + WL_ERR(("wme_bss_disable error %d\n", err)); + return BCME_ERROR; + } + } else { + WL_DBG(("There is no RSN Capabilities. remained len %d\n", len)); + } + + len -= RSN_CAP_LEN; + if (len >= WPA2_PMKID_COUNT_LEN) { + pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN); + cnt = ltoh16_ua(&pmkid->count); + if (cnt != 0) { + WL_ERR(("AP has non-zero PMKID count. Wrong!\n")); + return BCME_ERROR; + } + /* since PMKID cnt is known to be 0 for AP, */ + /* so don't bother to send down this info to firmware */ + } + + + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + + + /* set upper-layer auth */ + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } +exit: + return 0; +} + +static s32 +wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx) +{ + wpa_suite_mcast_t *mcast; + wpa_suite_ucast_t *ucast; + wpa_suite_auth_key_mgmt_t *mgmt; + u16 auth = 0; /* d11 open authentication */ + u16 count; + s32 err = BCME_OK; + s32 len = 0; + u32 i; + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + u32 tmp = 0; + + if (wpaie == NULL) + goto exit; + WL_DBG(("Enter \n")); + len = wpaie->length; /* value length */ + len -= WPA_IE_TAG_FIXED_LEN; + /* check for multicast cipher suite */ + if (len < WPA_SUITE_LEN) { + WL_INFORM(("no multicast cipher suite\n")); + goto exit; + } + + /* pick up multicast cipher */ + mcast = (wpa_suite_mcast_t *)&wpaie[1]; + len -= WPA_SUITE_LEN; + if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_CIPHER(mcast->type)) { + tmp = 0; + switch (mcast->type) { + case WPA_CIPHER_NONE: + tmp = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + tmp = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + tmp = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + tmp = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + gval |= tmp; + } + } + /* Check for unicast suite(s) */ + if (len < WPA_IE_SUITE_COUNT_LEN) { + WL_INFORM(("no unicast suite\n")); + goto exit; + } + /* walk thru unicast cipher list and pick up what we recognize */ + ucast = (wpa_suite_ucast_t *)&mcast[1]; + count = ltoh16_ua(&ucast->count); + len -= WPA_IE_SUITE_COUNT_LEN; + for (i = 0; i < count && len >= WPA_SUITE_LEN; + i++, len -= WPA_SUITE_LEN) { + if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_CIPHER(ucast->list[i].type)) { + tmp = 0; + switch (ucast->list[i].type) { + case WPA_CIPHER_NONE: + tmp = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + tmp = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + tmp = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + tmp = AES_ENABLED; + break; + default: + WL_ERR(("No Security Info\n")); + } + pval |= tmp; + } + } + } + len -= (count - i) * WPA_SUITE_LEN; + /* Check for auth key management suite(s) */ + if (len < WPA_IE_SUITE_COUNT_LEN) { + WL_INFORM((" no auth key mgmt suite\n")); + goto exit; + } + /* walk thru auth management suite list and pick up what we recognize */ + mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count]; + count = ltoh16_ua(&mgmt->count); + len -= WPA_IE_SUITE_COUNT_LEN; + for (i = 0; i < count && len >= WPA_SUITE_LEN; + i++, len -= WPA_SUITE_LEN) { + if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) { + if (IS_WPA_AKM(mgmt->list[i].type)) { + tmp = 0; + switch (mgmt->list[i].type) { + case RSN_AKM_NONE: + tmp = WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + tmp = WPA_AUTH_UNSPECIFIED; + break; + case RSN_AKM_PSK: + tmp = WPA_AUTH_PSK; + break; + default: + WL_ERR(("No Key Mgmt Info\n")); + } + wpa_auth |= tmp; + } + } + + } + /* FOR WPS , set SEC_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + /* set auth */ + err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx); + if (err < 0) { + WL_ERR(("auth error %d\n", err)); + return BCME_ERROR; + } + /* set wsec */ + err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx); + if (err < 0) { + WL_ERR(("wsec error %d\n", err)); + return BCME_ERROR; + } + /* set upper-layer auth */ + err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx); + if (err < 0) { + WL_ERR(("wpa_auth error %d\n", err)); + return BCME_ERROR; + } +exit: + return 0; +} + + +static s32 +wl_cfg80211_bcn_validate_sec( + struct net_device *dev, + struct parsed_ies *ies, + u32 dev_role, + s32 bssidx, + bool privacy) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr); + + if (!bss) { + WL_ERR(("cfgbss is NULL \n")); + return BCME_ERROR; + } + + if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) { + /* For P2P GO, the sec type is WPA2-PSK */ + WL_DBG(("P2P GO: validating wpa2_ie")); + if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0) + return BCME_ERROR; + + } else if (dev_role == NL80211_IFTYPE_AP) { + + WL_DBG(("SoftAP: validating security")); + /* If wpa2_ie or wpa_ie is present validate it */ + + if ((ies->wpa2_ie || ies->wpa_ie) && + ((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 || + wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) { + bss->security_mode = false; + return BCME_ERROR; + } + + bss->security_mode = true; + if (bss->rsn_ie) { + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + } + if (bss->wpa_ie) { + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + } + if (bss->wps_ie) { + kfree(bss->wps_ie); + bss->wps_ie = NULL; + } + if (ies->wpa_ie != NULL) { + /* WPAIE */ + bss->rsn_ie = NULL; + bss->wpa_ie = kmemdup(ies->wpa_ie, + ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else if (ies->wpa2_ie != NULL) { + /* RSNIE */ + bss->wpa_ie = NULL; + bss->rsn_ie = kmemdup(ies->wpa2_ie, + ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } + if (!ies->wpa2_ie && !ies->wpa_ie) { + wl_validate_opensecurity(dev, bssidx, privacy); + bss->security_mode = false; + } + + if (ies->wps_ie) { + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } + } + + return 0; + +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) +static s32 wl_cfg80211_bcn_set_params( + struct cfg80211_ap_settings *info, + struct net_device *dev, + u32 dev_role, s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 err = BCME_OK; + + WL_DBG(("interval (%d) \ndtim_period (%d) \n", + info->beacon_interval, info->dtim_period)); + + if (info->beacon_interval) { + if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD, + &info->beacon_interval, sizeof(s32), true)) < 0) { + WL_ERR(("Beacon Interval Set Error, %d\n", err)); + return err; + } + } + + if (info->dtim_period) { + if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD, + &info->dtim_period, sizeof(s32), true)) < 0) { + WL_ERR(("DTIM Interval Set Error, %d\n", err)); + return err; + } + } + + if ((info->ssid) && (info->ssid_len > 0) && + (info->ssid_len <= 32)) { + WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len)); + if (dev_role == NL80211_IFTYPE_AP) { + /* Store the hostapd SSID */ + memset(cfg->hostapd_ssid.SSID, 0x00, 32); + memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len); + cfg->hostapd_ssid.SSID_len = info->ssid_len; + } else { + /* P2P GO */ + memset(cfg->p2p->ssid.SSID, 0x00, 32); + memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len); + cfg->p2p->ssid.SSID_len = info->ssid_len; + } + } + + if (info->hidden_ssid) { + if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0) + WL_ERR(("failed to set hidden : %d\n", err)); + WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid)); + } + + return err; +} +#endif + +static s32 +wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies) +{ + s32 err = BCME_OK; + + memset(ies, 0, sizeof(struct parsed_ies)); + + /* find the WPSIE */ + if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) { + WL_DBG(("WPSIE in beacon \n")); + ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN; + } else { + WL_ERR(("No WPSIE in beacon \n")); + } + + /* find the RSN_IE */ + if ((ies->wpa2_ie = bcm_parse_tlvs(ptr, len, + DOT11_MNG_RSN_ID)) != NULL) { + WL_DBG((" WPA2 IE found\n")); + ies->wpa2_ie_len = ies->wpa2_ie->len; + } + + /* find the WPA_IE */ + if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) { + WL_DBG((" WPA found\n")); + ies->wpa_ie_len = ies->wpa_ie->length; + } + + return err; + +} + +#define MAX_AP_LINK_WAIT_TIME 10000 +static s32 +wl_cfg80211_bcn_bringup_ap( + struct net_device *dev, + struct parsed_ies *ies, + u32 dev_role, s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wl_join_params join_params; + struct wiphy *wiphy; + bool is_bssup = false; + s32 infra = 1; + s32 join_params_size = 0; + s32 ap = 1; + s32 pm; + s32 wsec; +#ifdef SOFTAP_UAPSD_OFF + uint32 wme_apsd = 0; +#endif /* SOFTAP_UAPSD_OFF */ + s32 err = BCME_OK; + s32 is_rsdb_supported = BCME_ERROR; + u32 timeout; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + + is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE); + if (is_rsdb_supported < 0) + return (-ENODEV); + + WL_DBG(("Enter dev_role:%d bssidx:%d\n", dev_role, bssidx)); + + /* Common code for SoftAP and P2P GO */ + wiphy = bcmcfg_to_wiphy(cfg); + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return -EINVAL; + } + wldev_iovar_setint(dev, "mpc", 0); + + wl_clr_drv_status(cfg, AP_CREATED, dev); + + if (dev_role == NL80211_IFTYPE_P2P_GO) { + is_bssup = wl_cfgp2p_bss_isup(dev, bssidx); + if (!is_bssup && (ies->wpa2_ie != NULL)) { + + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + goto exit; + } + + err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid, + sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("GO SSID setting error %d\n", err)); + goto exit; + } + + /* Do abort scan before creating GO */ + wl_cfg80211_scan_abort(cfg); + + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) { + WL_ERR(("GO Bring up error %d\n", err)); + goto exit; + } + } else + WL_DBG(("Bss is already up\n")); + } else if ((dev_role == NL80211_IFTYPE_AP) && + (wl_get_drv_status(cfg, AP_CREATING, dev))) { + + /* Device role SoftAP */ + WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role)); + + /* Clear the status bit after use */ + wl_clr_drv_status(cfg, AP_CREATING, dev); + + /* AP on primary Interface */ + if (bssidx == 0) { + if (is_rsdb_supported) { + if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx, + NL80211_IFTYPE_AP, 0, NULL)) < 0) { + WL_ERR(("wl add_del_bss returned error:%d\n", err)); + goto exit; + } + } else if (is_rsdb_supported == 0) { + /* AP mode switch not supported. Try setting up AP explicitly */ + err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true); + if (err < 0) { + WL_ERR(("WLC_DOWN error %d\n", err)); + goto exit; + } + err = wldev_iovar_setint(dev, "apsta", 0); + if (err < 0) { + WL_ERR(("wl apsta 0 error %d\n", err)); + goto exit; + } + + if ((err = wldev_ioctl(dev, + WLC_SET_AP, &ap, sizeof(s32), true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + goto exit; + } + + } + + pm = 0; + if ((err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true)) != 0) { + WL_ERR(("wl PM 0 returned error:%d\n", err)); + goto exit; + } + + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + goto exit; + } + } else if (cfg->cfgdev_bssidx && (bssidx == cfg->cfgdev_bssidx)) { + + WL_DBG(("Bringup SoftAP on virtual Interface bssidx:%d \n", bssidx)); + + if ((err = wl_cfg80211_add_del_bss(cfg, dev, + bssidx, NL80211_IFTYPE_AP, 0, NULL)) < 0) { + WL_ERR(("wl bss ap returned error:%d\n", err)); + goto exit; + } + + } + +#ifdef SOFTAP_UAPSD_OFF + err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("failed to disable uapsd, error=%d\n", err)); + } +#endif /* SOFTAP_UAPSD_OFF */ + + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + goto exit; + } + + err = wldev_iovar_getint(dev, "wsec", (s32 *)&wsec); + if (unlikely(err)) { + WL_ERR(("Could not get wsec %d\n", err)); + goto exit; + } + if ((wsec == WEP_ENABLED) && cfg->wep_key.len) { + WL_DBG(("Applying buffered WEP KEY \n")); + err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key, + sizeof(struct wl_wsec_key), cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + /* clear the key after use */ + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + if (unlikely(err)) { + WL_ERR(("WLC_SET_KEY error (%d)\n", err)); + goto exit; + } + } + + memset(&join_params, 0, sizeof(join_params)); + /* join parameters starts with ssid */ + join_params_size = sizeof(join_params.ssid); + memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID, + cfg->hostapd_ssid.SSID_len); + join_params.ssid.SSID_len = htod32(cfg->hostapd_ssid.SSID_len); + + /* create softap */ + if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, + join_params_size, true)) != 0) { + WL_ERR(("SoftAP/GO set ssid failed! \n")); + goto exit; + } else { + WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID)); + } + + if (bssidx != 0) { + /* AP on Virtual Interface */ + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) { + WL_ERR(("GO Bring up error %d\n", err)); + goto exit; + } + } + + } + /* Wait for Linkup event to mark successful AP/GO bring up */ + timeout = wait_event_interruptible_timeout(cfg->netif_change_event, + wl_get_drv_status(cfg, AP_CREATED, dev), msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME)); + if (timeout <= 0 || !wl_get_drv_status(cfg, AP_CREATED, dev)) { + WL_ERR(("Link up didn't come for AP interface. AP/GO creation failed! \n")); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE; + dhd_bus_mem_dump(dhdp); + } +#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */ + err = -ENODEV; + goto exit; + } + +exit: + if (cfg->wep_key.len) + memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key)); + return err; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) +s32 +wl_cfg80211_parse_ap_ies( + struct net_device *dev, + struct cfg80211_beacon_data *info, + struct parsed_ies *ies) +{ + struct parsed_ies prb_ies; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + u8 *vndr = NULL; + u32 vndr_ie_len = 0; + s32 err = BCME_OK; + + /* Parse Beacon IEs */ + if (wl_cfg80211_parse_ies((u8 *)info->tail, + info->tail_len, ies) < 0) { + WL_ERR(("Beacon get IEs failed \n")); + err = -EINVAL; + goto fail; + } + + vndr = (u8 *)info->proberesp_ies; + vndr_ie_len = info->proberesp_ies_len; + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + /* SoftAP mode */ + struct ieee80211_mgmt *mgmt; + mgmt = (struct ieee80211_mgmt *)info->probe_resp; + if (mgmt != NULL) { + vndr = (u8 *)&mgmt->u.probe_resp.variable; + vndr_ie_len = info->probe_resp_len - + offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + } + } + + /* Parse Probe Response IEs */ + if (wl_cfg80211_parse_ies(vndr, vndr_ie_len, &prb_ies) < 0) { + WL_ERR(("PROBE RESP get IEs failed \n")); + err = -EINVAL; + } + +fail: + + return err; +} + +s32 +wl_cfg80211_set_ies( + struct net_device *dev, + struct cfg80211_beacon_data *info, + s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + u8 *vndr = NULL; + u32 vndr_ie_len = 0; + s32 err = BCME_OK; + + /* Set Beacon IEs to FW */ + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_BEACON_FLAG, (const u8 *)info->tail, + info->tail_len)) < 0) { + WL_ERR(("Set Beacon IE Failed \n")); + } else { + WL_DBG(("Applied Vndr IEs for Beacon \n")); + } + + vndr = (u8 *)info->proberesp_ies; + vndr_ie_len = info->proberesp_ies_len; + + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + /* SoftAP mode */ + struct ieee80211_mgmt *mgmt; + mgmt = (struct ieee80211_mgmt *)info->probe_resp; + if (mgmt != NULL) { + vndr = (u8 *)&mgmt->u.probe_resp.variable; + vndr_ie_len = info->probe_resp_len - + offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + } + } + + /* Set Probe Response IEs to FW */ + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) { + WL_ERR(("Set Probe Resp IE Failed \n")); + } else { + WL_DBG(("Applied Vndr IEs for Probe Resp \n")); + } + + return err; +} +#endif + +static s32 wl_cfg80211_hostapd_sec( + struct net_device *dev, + struct parsed_ies *ies, + s32 bssidx) +{ + bool update_bss = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr); + + if (!bss) { + WL_ERR(("cfgbss is NULL \n")); + return -EINVAL; + } + + if (ies->wps_ie) { + if (bss->wps_ie && + memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) { + WL_DBG((" WPS IE is changed\n")); + kfree(bss->wps_ie); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } else if (bss->wps_ie == NULL) { + WL_DBG((" WPS IE is added\n")); + bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL); + } + + if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) { + if (!bss->security_mode) { + /* change from open mode to security mode */ + update_bss = true; + if (ies->wpa_ie != NULL) { + bss->wpa_ie = kmemdup(ies->wpa_ie, + ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else { + bss->rsn_ie = kmemdup(ies->wpa2_ie, + ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } + } else if (bss->wpa_ie) { + /* change from WPA2 mode to WPA mode */ + if (ies->wpa_ie != NULL) { + update_bss = true; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + bss->wpa_ie = kmemdup(ies->wpa_ie, + ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + } else if (memcmp(bss->rsn_ie, + ies->wpa2_ie, ies->wpa2_ie->len + + WPA_RSN_IE_TAG_FIXED_LEN)) { + update_bss = true; + kfree(bss->rsn_ie); + bss->rsn_ie = kmemdup(ies->wpa2_ie, + ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN, + GFP_KERNEL); + bss->wpa_ie = NULL; + } + } + if (update_bss) { + bss->security_mode = true; + wl_cfgp2p_bss(cfg, dev, bssidx, 0); + if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 || + wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) { + return BCME_ERROR; + } + wl_cfgp2p_bss(cfg, dev, bssidx, 1); + } + } + } else { + WL_ERR(("No WPSIE in beacon \n")); + } + return 0; +} + +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) +static s32 +wl_cfg80211_del_station( + struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_del_station( + struct wiphy *wiphy, + struct net_device *ndev, + const u8* mac_addr) +#else +static s32 +wl_cfg80211_del_station( + struct wiphy *wiphy, + struct net_device *ndev, + u8* mac_addr) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ +{ + struct net_device *dev; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + scb_val_t scb_val; + s8 eabuf[ETHER_ADDR_STR_LEN]; + int err; + char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV * + sizeof(struct ether_addr) + sizeof(uint)] = {0}; + struct maclist *assoc_maclist = (struct maclist *)mac_buf; + int num_associated = 0; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) + const u8 *mac_addr = params->mac; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ + + WL_DBG(("Entry\n")); + if (mac_addr == NULL) { + WL_DBG(("mac_addr is NULL ignore it\n")); + return 0; + } + + dev = ndev_to_wlc_ndev(ndev, cfg); + + if (p2p_is_on(cfg)) { + /* Suspend P2P discovery search-listen to prevent it from changing the + * channel. + */ + if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) { + WL_ERR(("Can not disable discovery mode\n")); + return -EFAULT; + } + } + + assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV; + err = wldev_ioctl(ndev, WLC_GET_ASSOCLIST, + assoc_maclist, sizeof(mac_buf), false); + if (err < 0) + WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err)); + else + num_associated = assoc_maclist->count; + + memcpy(scb_val.ea.octet, mac_addr, ETHER_ADDR_LEN); + scb_val.val = DOT11_RC_DEAUTH_LEAVING; + err = wldev_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val, + sizeof(scb_val_t), true); + if (err < 0) + WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err)); + WL_ERR(("Disconnect STA : %s scb_val.val %d\n", + bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf), + scb_val.val)); + + if (num_associated > 0 && ETHER_ISBCAST(mac_addr)) + wl_delay(400); + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_change_station( + struct wiphy *wiphy, + struct net_device *dev, + const u8 *mac, + struct station_parameters *params) +#else +static s32 +wl_cfg80211_change_station( + struct wiphy *wiphy, + struct net_device *dev, + u8 *mac, + struct station_parameters *params) +#endif +{ + int err; +#ifdef DHD_LOSSLESS_ROAMING + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); +#endif + + WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x " + "sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac), + params->sta_flags_mask, params->sta_flags_set, dev->name)); + + /* Processing only authorize/de-authorize flag for now */ + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) { + WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n")); + return -ENOTSUPP; + } + + if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + err = wldev_ioctl(dev, WLC_SCB_DEAUTHORIZE, (u8 *)mac, ETH_ALEN, true); +#else + err = wldev_ioctl(dev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true); +#endif + if (err) + WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err)); + return err; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) + err = wldev_ioctl(dev, WLC_SCB_AUTHORIZE, (u8 *)mac, ETH_ALEN, true); +#else + err = wldev_ioctl(dev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true); +#endif + if (err) + WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err)); +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif + return err; +} +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */ + +static s32 +wl_cfg80211_set_scb_timings( + struct bcm_cfg80211 *cfg, + struct net_device *dev) +{ + int err; + u32 ps_pretend; + wl_scb_probe_t scb_probe; + + bzero(&scb_probe, sizeof(wl_scb_probe_t)); + scb_probe.scb_timeout = WL_SCB_TIMEOUT; + scb_probe.scb_activity_time = WL_SCB_ACTIVITY_TIME; + scb_probe.scb_max_probe = WL_SCB_MAX_PROBE; + err = wldev_iovar_setbuf(dev, "scb_probe", (void *)&scb_probe, + sizeof(wl_scb_probe_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("set 'scb_probe' failed, error = %d\n", err)); + return err; + } + + ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD); + err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend); + if (unlikely(err)) { + if (err == BCME_UNSUPPORTED) { + /* Ignore error if fw doesn't support the iovar */ + WL_DBG(("wl pspretend_threshold %d set error %d\n", + ps_pretend, err)); + } else { + WL_ERR(("wl pspretend_threshold %d set error %d\n", + ps_pretend, err)); + return err; + } + } + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) +static s32 +wl_cfg80211_start_ap( + struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ap_settings *info) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 err = BCME_OK; + struct parsed_ies ies; + s32 bssidx = 0; + u32 dev_role = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("Enter \n")); + +#if defined(SUPPORT_RANDOM_MAC_SCAN) + wl_cfg80211_set_random_mac(dev, FALSE); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + + if ((dev == bcmcfg_to_prmry_ndev(cfg)) || + (dev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) { + WL_DBG(("Start AP req on iface: %s \n", dev->name)); + dev_role = NL80211_IFTYPE_AP; + } +#if defined(WL_ENABLE_P2P_IF) + else if (dev == cfg->p2p_net) { + /* Group Add request on p2p0 */ + WL_DBG(("Start AP req on P2P iface: GO\n")); + dev = bcmcfg_to_prmry_ndev(cfg); + dev_role = NL80211_IFTYPE_P2P_GO; + } +#endif /* WL_ENABLE_P2P_IF */ + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (p2p_is_on(cfg) && (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) { + dev_role = NL80211_IFTYPE_P2P_GO; + } else if (dev_role == NL80211_IFTYPE_AP) { + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + /* + * Enabling Softap is causing issues with STA NDO operations + * as NDO is not interface specific. So disable NDO while + * Softap is enabled + */ + err = dhd_ndo_enable(dhd, FALSE); + WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err)); + if (err) { + /* Non fatal error. */ + WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err)); + } else { + cfg->revert_ndo_disable = true; + } + +#ifdef PKT_FILTER_SUPPORT + /* Disable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Disable pkt_filter\n")); + dhd_enable_packet_filter(0, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is enabled, disable arpoe */ + dhd_arp_offload_set(dhd, 0); + dhd_arp_offload_enable(dhd, FALSE); +#endif /* ARP_OFFLOAD_SUPPORT */ + if ((dhd->op_mode & DHD_FLAG_STA_MODE) && wl_cfg80211_is_roam_offload()) { + WL_ERR(("Cleare roam_offload_bssid_list at STA-SoftAP MODE.\n")); + wl_android_set_roam_offload_bssid_list(dev, "0"); + } + } else { + /* only AP or GO role need to be handled here. */ + err = -EINVAL; + goto fail; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -EINVAL; + goto fail; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + if ((err = wl_cfg80211_set_channel(wiphy, dev, + dev->ieee80211_ptr->preset_chandef.chan, + NL80211_CHAN_HT20) < 0)) { + WL_ERR(("Set channel failed \n")); + goto fail; + } +#endif + + if ((err = wl_cfg80211_bcn_set_params(info, dev, + dev_role, bssidx)) < 0) { + WL_ERR(("Beacon params set failed \n")); + goto fail; + } + + /* Parse IEs */ + if ((err = wl_cfg80211_parse_ap_ies(dev, &info->beacon, &ies)) < 0) { + WL_ERR(("Set IEs failed \n")); + goto fail; + } + + if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies, + dev_role, bssidx, info->privacy)) < 0) + { + WL_ERR(("Beacon set security failed \n")); + goto fail; + } + + if ((err = wl_cfg80211_bcn_bringup_ap(dev, &ies, + dev_role, bssidx)) < 0) { + WL_ERR(("Beacon bring up AP/GO failed \n")); + goto fail; + } + + /* Set GC/STA SCB expiry timings. */ + if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) { + WL_ERR(("scb setting failed \n")); + goto fail; + } + + WL_DBG(("** AP/GO Created **\n")); + +#ifdef WL_CFG80211_ACL + /* Enfoce Admission Control. */ + if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) { + WL_ERR(("Set ACL failed\n")); + } +#endif /* WL_CFG80211_ACL */ + + /* Set IEs to FW */ + if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0) + WL_ERR(("Set IEs failed \n")); + + /* Enable Probe Req filter, WPS-AP certification 4.2.13 */ + if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) { + bool pbc = 0; + wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc); + if (pbc) { + WL_DBG(("set WLC_E_PROBREQ_MSG\n")); + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true); + } + } + +fail: + if (err) { + WL_ERR(("ADD/SET beacon failed\n")); + wldev_iovar_setint(dev, "mpc", 1); + if (dev_role == NL80211_IFTYPE_AP) { + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Enable pkt_filter\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ + } + } + + return err; +} + +static s32 +wl_cfg80211_stop_ap( + struct wiphy *wiphy, + struct net_device *dev) +{ + int err = 0; + u32 dev_role = 0; + int infra = 0; + int ap = 0; + s32 bssidx = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 is_rsdb_supported = BCME_ERROR; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("Enter \n")); + + is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE); + if (is_rsdb_supported < 0) + return (-ENODEV); + + wl_clr_drv_status(cfg, AP_CREATING, dev); + wl_clr_drv_status(cfg, AP_CREATED, dev); + cfg->ap_oper_channel = 0; + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) { + dev_role = NL80211_IFTYPE_AP; + WL_DBG(("stopping AP operation\n")); + } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { + dev_role = NL80211_IFTYPE_P2P_GO; + WL_DBG(("stopping P2P GO operation\n")); + } else { + WL_ERR(("no AP/P2P GO interface is operational.\n")); + return -EINVAL; + } + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + WL_ERR(("role integrity check failed \n")); + err = -EINVAL; + goto exit; + } + + if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 0)) < 0) { + WL_ERR(("bss down error %d\n", err)); + } + + if (dev_role == NL80211_IFTYPE_AP) { + if (cfg->revert_ndo_disable == true) { + err = dhd_ndo_enable(dhd, TRUE); + WL_DBG(("%s: Enabling back NDO on Softap turn off %d\n", + __FUNCTION__, err)); + if (err) { + WL_ERR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, err)); + } + cfg->revert_ndo_disable = false; + } + +#ifdef PKT_FILTER_SUPPORT + /* Enable packet filter */ + if (dhd->early_suspended) { + WL_ERR(("Enable pkt_filter\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ +#ifdef ARP_OFFLOAD_SUPPORT + /* IF SoftAP is disabled, enable arpoe back for STA mode. */ + dhd_arp_offload_set(dhd, dhd_arp_mode); + dhd_arp_offload_enable(dhd, TRUE); +#endif /* ARP_OFFLOAD_SUPPORT */ + /* + * Bring down the AP interface by changing role to STA. + * Don't do a down or "WLC_SET_AP 0" since the shared + * interface may be still running + */ + if (is_rsdb_supported) { + if ((err = wl_cfg80211_add_del_bss(cfg, dev, + bssidx, NL80211_IFTYPE_STATION, 0, NULL)) < 0) { + if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), + true)) < 0) { + WL_ERR(("setting AP mode failed %d \n", err)); + err = -ENOTSUPP; + goto exit; + } + } + } else if (is_rsdb_supported == 0) { + err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true); + if (err < 0) { + WL_ERR(("SET INFRA error %d\n", err)); + err = -ENOTSUPP; + goto exit; + } + err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true); + if (unlikely(err)) { + WL_ERR(("WLC_UP error (%d)\n", err)); + err = -EINVAL; + goto exit; + } + } + + /* Turn on the MPC */ + wldev_iovar_setint(dev, "mpc", 1); + + wl_cfg80211_clear_per_bss_ies(cfg, bssidx); + } else { + WL_DBG(("Stopping P2P GO \n")); + DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub), + DHD_EVENT_TIMEOUT_MS*3); + DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub)); + } + +exit: + + if (dev_role == NL80211_IFTYPE_AP) { + /* clear the AP mode */ + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + } + return err; +} + +static s32 +wl_cfg80211_change_beacon( + struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_beacon_data *info) +{ + s32 err = BCME_OK; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct parsed_ies ies; + u32 dev_role = 0; + s32 bssidx = 0; + bool pbc = 0; + + WL_DBG(("Enter \n")); + + if (dev == bcmcfg_to_prmry_ndev(cfg)) { + dev_role = NL80211_IFTYPE_AP; + } +#if defined(WL_ENABLE_P2P_IF) + else if (dev == cfg->p2p_net) { + /* Group Add request on p2p0 */ + dev = bcmcfg_to_prmry_ndev(cfg); + dev_role = NL80211_IFTYPE_P2P_GO; + } +#endif /* WL_ENABLE_P2P_IF */ + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { + dev_role = NL80211_IFTYPE_P2P_GO; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -EINVAL; + goto fail; + } + + if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) { + WL_ERR(("P2P already down status!\n")); + err = BCME_ERROR; + goto fail; + } + + /* Parse IEs */ + if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) { + WL_ERR(("Parse IEs failed \n")); + goto fail; + } + + /* Set IEs to FW */ + if ((err = wl_cfg80211_set_ies(dev, info, bssidx)) < 0) { + WL_ERR(("Set IEs failed \n")); + goto fail; + } + + if (dev_role == NL80211_IFTYPE_AP) { + if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) { + WL_ERR(("Hostapd update sec failed \n")); + err = -EINVAL; + goto fail; + } + /* Enable Probe Req filter, WPS-AP certification 4.2.13 */ + if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) { + wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc); + WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc)); + if (pbc) + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true); + else + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false); + } + } + +fail: + return err; +} +#else +static s32 +wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info) +{ + s32 err = BCME_OK; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + s32 ie_offset = 0; + s32 bssidx = 0; + u32 dev_role = NL80211_IFTYPE_AP; + struct parsed_ies ies; + bcm_tlv_t *ssid_ie; + bool pbc = 0; + bool privacy; + bool is_bss_up = 0; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n", + info->interval, info->dtim_period, info->head_len, info->tail_len)); + + if (dev == bcmcfg_to_prmry_ndev(cfg)) { + dev_role = NL80211_IFTYPE_AP; + } +#if defined(WL_ENABLE_P2P_IF) + else if (dev == cfg->p2p_net) { + /* Group Add request on p2p0 */ + dev = bcmcfg_to_prmry_ndev(cfg); + dev_role = NL80211_IFTYPE_P2P_GO; + } +#endif /* WL_ENABLE_P2P_IF */ + + if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr)); + return BCME_ERROR; + } + + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) { + dev_role = NL80211_IFTYPE_P2P_GO; + } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) { + dhd->op_mode |= DHD_FLAG_HOSTAP_MODE; + } + + if (!check_dev_role_integrity(cfg, dev_role)) { + err = -ENODEV; + goto fail; + } + + if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) { + WL_ERR(("P2P already down status!\n")); + err = BCME_ERROR; + goto fail; + } + + ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + /* find the SSID */ + if ((ssid_ie = bcm_parse_tlvs((u8 *)&info->head[ie_offset], + info->head_len - ie_offset, + DOT11_MNG_SSID_ID)) != NULL) { + if (dev_role == NL80211_IFTYPE_AP) { + /* Store the hostapd SSID */ + memset(&cfg->hostapd_ssid.SSID[0], 0x00, 32); + memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len); + cfg->hostapd_ssid.SSID_len = ssid_ie->len; + } else { + /* P2P GO */ + memset(&cfg->p2p->ssid.SSID[0], 0x00, 32); + memcpy(cfg->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len); + cfg->p2p->ssid.SSID_len = ssid_ie->len; + } + } + + if (wl_cfg80211_parse_ies((u8 *)info->tail, + info->tail_len, &ies) < 0) { + WL_ERR(("Beacon get IEs failed \n")); + err = -EINVAL; + goto fail; + } + + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_BEACON_FLAG, (u8 *)info->tail, + info->tail_len)) < 0) { + WL_ERR(("Beacon set IEs failed \n")); + goto fail; + } else { + WL_DBG(("Applied Vndr IEs for Beacon \n")); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx, + VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies, + info->proberesp_ies_len)) < 0) { + WL_ERR(("ProbeRsp set IEs failed \n")); + goto fail; + } else { + WL_DBG(("Applied Vndr IEs for ProbeRsp \n")); + } +#endif + + is_bss_up = wl_cfgp2p_bss_isup(dev, bssidx); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + privacy = info->privacy; +#else + privacy = 0; +#endif + if (!is_bss_up && + (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0)) + { + WL_ERR(("Beacon set security failed \n")); + err = -EINVAL; + goto fail; + } + + /* Set BI and DTIM period */ + if (info->interval) { + if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD, + &info->interval, sizeof(s32), true)) < 0) { + WL_ERR(("Beacon Interval Set Error, %d\n", err)); + return err; + } + } + if (info->dtim_period) { + if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD, + &info->dtim_period, sizeof(s32), true)) < 0) { + WL_ERR(("DTIM Interval Set Error, %d\n", err)); + return err; + } + } + + /* If bss is already up, skip bring up */ + if (!is_bss_up && + (err = wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx)) < 0) + { + WL_ERR(("Beacon bring up AP/GO failed \n")); + goto fail; + } + + /* Set GC/STA SCB expiry timings. */ + if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) { + WL_ERR(("scb setting failed \n")); + goto fail; + } + + if (wl_get_drv_status(cfg, AP_CREATED, dev)) { + /* Soft AP already running. Update changed params */ + if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) { + WL_ERR(("Hostapd update sec failed \n")); + err = -EINVAL; + goto fail; + } + } + + /* Enable Probe Req filter */ + if (((dev_role == NL80211_IFTYPE_P2P_GO) || + (dev_role == NL80211_IFTYPE_AP)) && (ies.wps_ie != NULL)) { + wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc); + if (pbc) + wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true); + } + + WL_DBG(("** ADD/SET beacon done **\n")); + +fail: + if (err) { + WL_ERR(("ADD/SET beacon failed\n")); + wldev_iovar_setint(dev, "mpc", 1); + if (dev_role == NL80211_IFTYPE_AP) { + /* clear the AP mode */ + dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE; + } + } + return err; + +} +#endif + +#ifdef WL_SCHED_SCAN +#define PNO_TIME 30 +#define PNO_REPEAT 4 +#define PNO_FREQ_EXPO_MAX 2 +static bool +is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count) +{ + int i; + + if (!ssid || !ssid_list) + return FALSE; + + for (i = 0; i < count; i++) { + if (ssid->ssid_len == ssid_list[i].ssid_len) { + if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0) + return TRUE; + } + } + return FALSE; +} + +static int +wl_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + ushort pno_time = PNO_TIME; + int pno_repeat = PNO_REPEAT; + int pno_freq_expo_max = PNO_FREQ_EXPO_MAX; + wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT]; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct cfg80211_ssid *ssid = NULL; + struct cfg80211_ssid *hidden_ssid_list = NULL; + int ssid_cnt = 0; + int i; + int ret = 0; + + if (!request) { + WL_ERR(("Sched scan request was NULL\n")); + return -EINVAL; + } + + WL_DBG(("Enter \n")); + WL_PNO((">>> SCHED SCAN START\n")); + WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n", + request->n_match_sets, request->n_ssids)); + WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n", + request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max)); + + + if (!request->n_ssids || !request->n_match_sets) { + WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids)); + return -EINVAL; + } + + memset(&ssids_local, 0, sizeof(ssids_local)); + + if (request->n_ssids > 0) { + hidden_ssid_list = request->ssids; + } + + for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) { + ssid = &request->match_sets[i].ssid; + /* No need to include null ssid */ + if (ssid->ssid_len) { + memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid, ssid->ssid_len); + ssids_local[ssid_cnt].SSID_len = ssid->ssid_len; + if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) { + ssids_local[ssid_cnt].hidden = TRUE; + WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid)); + } else { + ssids_local[ssid_cnt].hidden = FALSE; + WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid)); + } + ssid_cnt++; + } + } + + if (ssid_cnt) { + if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt, + pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) { + WL_ERR(("PNO setup failed!! ret=%d \n", ret)); + return -EINVAL; + } + cfg->sched_scan_req = request; + } else { + return -EINVAL; + } + + return 0; +} + +static int +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid) +#else +wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) +#endif +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + WL_DBG(("Enter \n")); + WL_PNO((">>> SCHED SCAN STOP\n")); + + if (dhd_dev_pno_stop_for_ssid(dev) < 0) + WL_ERR(("PNO Stop for SSID failed")); + + if (cfg->scan_request && cfg->sched_scan_running) { + WL_PNO((">>> Sched scan running. Aborting it..\n")); + wl_notify_escan_complete(cfg, dev, true, true); + } + + cfg->sched_scan_req = NULL; + cfg->sched_scan_running = FALSE; + + return 0; +} +#endif /* WL_SCHED_SCAN */ + +#ifdef WL_SUPPORT_ACS +/* + * Currently the dump_obss IOVAR is returning string as output so we need to + * parse the output buffer in an unoptimized way. Going forward if we get the + * IOVAR output in binary format this method can be optimized + */ +static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey) +{ + int i; + char *token; + char delim[] = " \n"; + + token = strsep(&buf, delim); + while (token != NULL) { + if (!strcmp(token, "OBSS")) { + for (i = 0; i < OBSS_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->obss = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "IBSS")) { + for (i = 0; i < IBSS_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->ibss = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "TXDur")) { + for (i = 0; i < TX_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->tx = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "Category")) { + for (i = 0; i < CTG_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->no_ctg = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "Packet")) { + for (i = 0; i < PKT_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->no_pckt = simple_strtoul(token, NULL, 10); + } + + if (!strcmp(token, "Opp(time):")) { + for (i = 0; i < IDLE_TOKEN_IDX; i++) + token = strsep(&buf, delim); + survey->idle = simple_strtoul(token, NULL, 10); + } + + token = strsep(&buf, delim); + } + + return 0; +} + +static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req, + struct wl_dump_survey *survey) +{ + cca_stats_n_flags *results; + char *buf; + int retry, err; + + buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!buf)) { + WL_ERR(("%s: buf alloc failed\n", __func__)); + return -ENOMEM; + } + + retry = IOCTL_RETRY_COUNT; + while (retry--) { + err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req), + buf, WLC_IOCTL_MAXLEN, NULL); + if (err >= 0) { + break; + } + WL_DBG(("attempt = %d, err = %d, \n", + (IOCTL_RETRY_COUNT - retry), err)); + } + + if (retry <= 0) { + WL_ERR(("failure, dump_obss IOVAR failed\n")); + err = -EINVAL; + goto exit; + } + + results = (cca_stats_n_flags *)(buf); + wl_parse_dump_obss(results->buf, survey); + kfree(buf); + + return 0; +exit: + kfree(buf); + return err; +} + +static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev, + int idx, struct survey_info *info) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct wl_dump_survey *survey; + struct ieee80211_supported_band *band; + struct ieee80211_channel*chan; + cca_msrmnt_query req; + int val, err, noise, retry; + + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) { + return -ENOENT; + } + band = wiphy->bands[NL80211_BAND_2GHZ ]; + if (band && idx >= band->n_channels) { + idx -= band->n_channels; + band = NULL; + } + + if (!band || idx >= band->n_channels) { + /* Move to 5G band */ + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (idx >= band->n_channels) { + return -ENOENT; + } + } + + chan = &band->channels[idx]; + /* Setting current channel to the requested channel */ + if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan, + NL80211_CHAN_HT20) < 0)) { + WL_ERR(("Set channel failed \n")); + } + + if (!idx) { + /* Disable mpc */ + val = 0; + err = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (err < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", err)); + } + + /* Set interface up, explicitly. */ + val = 1; + err = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true); + if (err < 0) { + WL_ERR(("set interface up failed, error = %d\n", err)); + } + } + + /* Get noise value */ + retry = IOCTL_RETRY_COUNT; + while (retry--) { + err = wldev_ioctl(ndev, WLC_GET_PHY_NOISE, &noise, + sizeof(noise), false); + if (err >= 0) { + break; + } + WL_DBG(("attempt = %d, err = %d, \n", + (IOCTL_RETRY_COUNT - retry), err)); + } + + if (retry <= 0) { + WL_ERR(("Get Phy Noise failed, error = %d\n", err)); + noise = CHAN_NOISE_DUMMY; + } + + survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey), + GFP_KERNEL); + if (unlikely(!survey)) { + WL_ERR(("%s: alloc failed\n", __func__)); + return -ENOMEM; + } + + /* Start Measurement for obss stats on current channel */ + req.msrmnt_query = 0; + req.time_req = ACS_MSRMNT_DELAY; + if ((err = wl_dump_obss(ndev, req, survey)) < 0) { + goto exit; + } + + /* + * Wait for the meaurement to complete, adding a buffer value of 10 to take + * into consideration any delay in IOVAR completion + */ + msleep(ACS_MSRMNT_DELAY + 10); + + /* Issue IOVAR to collect measurement results */ + req.msrmnt_query = 1; + if ((err = wl_dump_obss(ndev, req, survey)) < 0) { + goto exit; + } + + info->channel = chan; + info->noise = noise; + info->channel_time = ACS_MSRMNT_DELAY; + info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle; + info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg + + survey->no_pckt; + info->channel_time_tx = survey->tx; + info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME | + SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_RX | + SURVEY_INFO_CHANNEL_TIME_TX; + kfree(survey); + + return 0; +exit: + kfree(survey); + return err; +} +#endif /* WL_SUPPORT_ACS */ + +static struct cfg80211_ops wl_cfg80211_ops = { + .add_virtual_intf = wl_cfg80211_add_virtual_iface, + .del_virtual_intf = wl_cfg80211_del_virtual_iface, + .change_virtual_intf = wl_cfg80211_change_virtual_iface, +#if defined(WL_CFG80211_P2P_DEV_IF) + .start_p2p_device = wl_cfgp2p_start_p2p_device, + .stop_p2p_device = wl_cfgp2p_stop_p2p_device, +#endif /* WL_CFG80211_P2P_DEV_IF */ + .scan = wl_cfg80211_scan, + .set_wiphy_params = wl_cfg80211_set_wiphy_params, + .join_ibss = wl_cfg80211_join_ibss, + .leave_ibss = wl_cfg80211_leave_ibss, + .get_station = wl_cfg80211_get_station, + .set_tx_power = wl_cfg80211_set_tx_power, + .get_tx_power = wl_cfg80211_get_tx_power, + .add_key = wl_cfg80211_add_key, + .del_key = wl_cfg80211_del_key, + .get_key = wl_cfg80211_get_key, + .set_default_key = wl_cfg80211_config_default_key, + .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key, + .set_power_mgmt = wl_cfg80211_set_power_mgmt, + .connect = wl_cfg80211_connect, + .disconnect = wl_cfg80211_disconnect, + .suspend = wl_cfg80211_suspend, + .resume = wl_cfg80211_resume, + .set_pmksa = wl_cfg80211_set_pmksa, + .del_pmksa = wl_cfg80211_del_pmksa, + .flush_pmksa = wl_cfg80211_flush_pmksa, + .remain_on_channel = wl_cfg80211_remain_on_channel, + .cancel_remain_on_channel = wl_cfg80211_cancel_remain_on_channel, + .mgmt_tx = wl_cfg80211_mgmt_tx, + .mgmt_frame_register = wl_cfg80211_mgmt_frame_register, + .change_bss = wl_cfg80211_change_bss, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + .set_channel = wl_cfg80211_set_channel, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) + .set_beacon = wl_cfg80211_add_set_beacon, + .add_beacon = wl_cfg80211_add_set_beacon, +#else + .change_beacon = wl_cfg80211_change_beacon, + .start_ap = wl_cfg80211_start_ap, + .stop_ap = wl_cfg80211_stop_ap, +#endif +#ifdef WL_SCHED_SCAN + .sched_scan_start = wl_cfg80211_sched_scan_start, + .sched_scan_stop = wl_cfg80211_sched_scan_stop, +#endif /* WL_SCHED_SCAN */ +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) + .del_station = wl_cfg80211_del_station, + .change_station = wl_cfg80211_change_station, + .mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait, +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) + .tdls_mgmt = wl_cfg80211_tdls_mgmt, + .tdls_oper = wl_cfg80211_tdls_oper, +#endif +#ifdef WL_SUPPORT_ACS + .dump_survey = wl_cfg80211_dump_survey, +#endif /* WL_SUPPORT_ACS */ +#ifdef WL_CFG80211_ACL + .set_mac_acl = wl_cfg80211_set_mac_acl, +#endif /* WL_CFG80211_ACL */ +}; + +s32 wl_mode_to_nl80211_iftype(s32 mode) +{ + s32 err = 0; + + switch (mode) { + case WL_MODE_BSS: + return NL80211_IFTYPE_STATION; + case WL_MODE_IBSS: + return NL80211_IFTYPE_ADHOC; + case WL_MODE_AP: + return NL80211_IFTYPE_AP; + default: + return NL80211_IFTYPE_UNSPECIFIED; + } + + return err; +} + +#ifdef CONFIG_CFG80211_INTERNAL_REGDB +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) +static int +#else +static void +#endif /* kernel version < 3.9.0 */ +wl_cfg80211_reg_notifier( + struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy); + int ret = 0; + int revinfo = -1; + + if (!request || !cfg) { + WL_ERR(("Invalid arg\n")); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + return -EINVAL; +#else + return; +#endif /* kernel version < 3.9.0 */ + } + + WL_DBG(("ccode: %c%c Initiator: %d\n", + request->alpha2[0], request->alpha2[1], request->initiator)); + + /* We support only REGDOM_SET_BY_USER as of now */ + if ((request->initiator != NL80211_REGDOM_SET_BY_USER) && + (request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE)) { + WL_ERR(("reg_notifier for intiator:%d not supported : set default\n", + request->initiator)); + /* in case of no supported country by regdb + lets driver setup platform default Locale + */ + } + + WL_ERR(("Set country code %c%c from %s\n", + request->alpha2[0], request->alpha2[1], + ((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User"))); + + if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2, + false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false), + revinfo)) < 0) { + WL_ERR(("set country Failed :%d\n", ret)); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) + return ret; +#else + return; +#endif /* kernel version < 3.9.0 */ +} +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +#ifdef CONFIG_PM +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) +static const struct wiphy_wowlan_support brcm_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY, + .n_patterns = WL_WOWLAN_MAX_PATTERNS, + .pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN, + .pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + .max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) +static struct cfg80211_wowlan brcm_wowlan_config = { + .disconnect = true, + .gtk_rekey_failure = true, + .eap_identity_req = true, + .four_way_handshake = true, +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif /* CONFIG_PM */ + +static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context) +{ + s32 err = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + dhd_pub_t *dhd = (dhd_pub_t *)context; + BCM_REFERENCE(dhd); + + if (!dhd) { + WL_ERR(("DHD is NULL!!")); + err = -ENODEV; + return err; + } +#endif + + wdev->wiphy = + wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211)); + if (unlikely(!wdev->wiphy)) { + WL_ERR(("Couldn not allocate wiphy device\n")); + err = -ENOMEM; + return err; + } + set_wiphy_dev(wdev->wiphy, sdiofunc_dev); + wdev->wiphy->max_scan_ie_len = WL_SCAN_IE_LEN_MAX; + /* Report how many SSIDs Driver can support per Scan request */ + wdev->wiphy->max_scan_ssids = WL_SCAN_PARAMS_SSID_MAX; + wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; +#ifdef WL_SCHED_SCAN + wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT; + wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT; + wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; +#endif +#endif /* WL_SCHED_SCAN */ + wdev->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) + | BIT(NL80211_IFTYPE_ADHOC) +#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF) + | BIT(NL80211_IFTYPE_MONITOR) +#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF) + | BIT(NL80211_IFTYPE_P2P_CLIENT) + | BIT(NL80211_IFTYPE_P2P_GO) +#endif /* WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_CFG80211_P2P_DEV_IF) + | BIT(NL80211_IFTYPE_P2P_DEVICE) +#endif /* WL_CFG80211_P2P_DEV_IF */ + | BIT(NL80211_IFTYPE_AP); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \ + (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)) + WL_DBG(("Setting interface combinations for common mode\n")); + wdev->wiphy->iface_combinations = common_iface_combinations; + wdev->wiphy->n_iface_combinations = + ARRAY_SIZE(common_iface_combinations); +#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */ + + wdev->wiphy->bands[NL80211_BAND_2GHZ ] = &__wl_band_2ghz; + + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + wdev->wiphy->cipher_suites = __wl_cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); + wdev->wiphy->max_remain_on_channel_duration = 5000; + wdev->wiphy->mgmt_stypes = wl_cfg80211_default_mgmt_stypes; +#ifndef WL_POWERSAVE_DISABLED + wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; +#else + wdev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; +#endif /* !WL_POWERSAVE_DISABLED */ + wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_4ADDR_AP | +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) + WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS | +#endif + WIPHY_FLAG_4ADDR_STATION; +#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \ + KERNEL_VERSION(3, 2, 0))) + /* + * If FW ROAM flag is advertised, upper layer wouldn't provide + * the bssid & freq in the connect command. This will result a + * delay in initial connection time due to firmware doing a full + * channel scan to figure out the channel & bssid. However kernel + * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel + * ver >= 3.15 won't have any issue. So if this flags need to be + * advertised for kernel < 3.15, suggest to use RCC along with it + * to avoid the initial connection delay. + */ + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; +#endif +#ifdef UNSET_FW_ROAM_WIPHY_FLAG + wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM; +#endif /* UNSET_FW_ROAM_WIPHY_FLAG */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) + wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_OFFCHAN_TX; +#endif +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 4, 0)) + /* From 3.4 kernel ownards AP_SME flag can be advertised + * to remove the patch from supplicant + */ + wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; + +#ifdef WL_CFG80211_ACL + /* Configure ACL capabilities. */ + wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + /* Supplicant distinguish between the SoftAP mode and other + * modes (e.g. P2P, WPS, HS2.0) when it builds the probe + * response frame from Supplicant MR1 and Kernel 3.4.0 or + * later version. To add Vendor specific IE into the + * probe response frame in case of SoftAP mode, + * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable. + */ + if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) { + wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + wdev->wiphy->probe_resp_offload = 0; + } +#endif +#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */ + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) + wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; +#endif + +#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF) + /* + * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the + * disconnection of connected network before suspend. So a dummy wowlan + * filter is configured for kernels linux-3.8 and above. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + wdev->wiphy->wowlan = &brcm_wowlan_support; + /* If this is not provided cfg stack will get disconnect + * during suspend. + */ + wdev->wiphy->wowlan_config = &brcm_wowlan_config; +#else + wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; + wdev->wiphy->wowlan.n_patterns = WL_WOWLAN_MAX_PATTERNS; + wdev->wiphy->wowlan.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN; + wdev->wiphy->wowlan.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + wdev->wiphy->wowlan.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */ + + WL_DBG(("Registering custom regulatory)\n")); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; +#else + wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; +#endif + wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom); +#if defined(WL_VENDOR_EXT_SUPPORT) + WL_ERR(("Registering Vendor80211\n")); + err = wl_cfgvendor_attach(wdev->wiphy); + if (unlikely(err < 0)) { + WL_ERR(("Couldn not attach vendor commands (%d)\n", err)); + } +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + /* Now we can register wiphy with cfg80211 module */ + err = wiphy_register(wdev->wiphy); + if (unlikely(err < 0)) { + WL_ERR(("Couldn not register wiphy device (%d)\n", err)); + wiphy_free(wdev->wiphy); + } + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \ + KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS) + wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS; +#endif + + return err; +} + +static void wl_free_wdev(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = cfg->wdev; + struct wiphy *wiphy = NULL; + if (!wdev) { + WL_ERR(("wdev is invalid\n")); + return; + } + if (wdev->wiphy) { + wiphy = wdev->wiphy; + +#if defined(WL_VENDOR_EXT_SUPPORT) + wl_cfgvendor_detach(wdev->wiphy); +#endif /* if defined(WL_VENDOR_EXT_SUPPORT) */ +#if defined(CONFIG_PM) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) + /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */ + WL_DBG(("wl_free_wdev Clearing wowlan Config \n")); + wdev->wiphy->wowlan = NULL; + wdev->wiphy->wowlan_config = NULL; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */ +#endif + wiphy_unregister(wdev->wiphy); + wdev->wiphy->dev.parent = NULL; + wdev->wiphy = NULL; + } + + wl_delete_all_netinfo(cfg); + if (wiphy) + wiphy_free(wiphy); + + /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg", + * which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!! + */ +} + +static s32 wl_inform_bss(struct bcm_cfg80211 *cfg) +{ + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; /* must be initialized */ + s32 err = 0; + s32 i; + + bss_list = cfg->bss_list; + WL_DBG(("scanned AP count (%d)\n", bss_list->count)); + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + err = wl_inform_single_bss(cfg, bi, false); + if (unlikely(err)) + break; + } + return err; +} + +static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam) +{ + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct ieee80211_mgmt *mgmt; + struct ieee80211_channel *channel; + struct ieee80211_supported_band *band; + struct wl_cfg80211_bss_info *notif_bss_info; + struct wl_scan_req *sr = wl_to_sr(cfg); + struct beacon_proberesp *beacon_proberesp; + struct cfg80211_bss *cbss = NULL; + s32 mgmt_type; + s32 signal; + u32 freq; + s32 err = 0; + gfp_t aflags; + + if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) { + WL_DBG(("Beacon is larger than buffer. Discarding\n")); + return err; + } + aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt) + - sizeof(u8) + WL_BSS_INFO_MAX, aflags); + if (unlikely(!notif_bss_info)) { + WL_ERR(("notif_bss_info alloc failed\n")); + return -ENOMEM; + } + mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf; + notif_bss_info->channel = + wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + + if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (!band) { + WL_ERR(("No valid band")); + kfree(notif_bss_info); + return -EINVAL; + } + notif_bss_info->rssi = wl_rssi_offset(dtoh16(bi->RSSI)); + memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN); + mgmt_type = cfg->active_scan ? + IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON; + if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) { + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type); + } + beacon_proberesp = cfg->active_scan ? + (struct beacon_proberesp *)&mgmt->u.probe_resp : + (struct beacon_proberesp *)&mgmt->u.beacon; + beacon_proberesp->timestamp = 0; + beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period); + beacon_proberesp->capab_info = cpu_to_le16(bi->capability); + wl_rst_ie(cfg); + wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam); + wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length); + wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX - + offsetof(struct wl_cfg80211_bss_info, frame_buf)); + notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt, + u.beacon.variable) + wl_get_ielen(cfg); +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(notif_bss_info->channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band); +#endif + if (freq == 0) { + WL_ERR(("Invalid channel, fail to chcnage channel to freq\n")); + kfree(notif_bss_info); + return -EINVAL; + } + channel = ieee80211_get_channel(wiphy, freq); + if (unlikely(!channel)) { + WL_ERR(("ieee80211_get_channel error\n")); + kfree(notif_bss_info); + return -EINVAL; + } + WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM" + "mgmt_type %d frame_len %d\n", bi->SSID, + notif_bss_info->rssi, notif_bss_info->channel, + mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type, + notif_bss_info->frame_len)); + + signal = notif_bss_info->rssi * 100; + if (!mgmt->u.probe_resp.timestamp) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) + struct timespec ts; + get_monotonic_boottime(&ts); + mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000) + + ts.tv_nsec / 1000; +#else + struct timeval tv; + do_gettimeofday(&tv); + mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000) + + tv.tv_usec; +#endif + } + + + cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt, + le16_to_cpu(notif_bss_info->frame_len), signal, aflags); + if (unlikely(!cbss)) { + WL_ERR(("cfg80211_inform_bss_frame error\n")); + kfree(notif_bss_info); + return -EINVAL; + } + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) + cfg80211_put_bss(wiphy, cbss); +#else + cfg80211_put_bss(cbss); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */ + kfree(notif_bss_info); + return err; +} + +static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev) +{ + u32 event = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + u16 flags = ntoh16(e->flags); + + WL_DBG(("event %d, status %d flags %x\n", event, status, flags)); + if (event == WLC_E_SET_SSID) { + if (status == WLC_E_STATUS_SUCCESS) { + if (!wl_is_ibssmode(cfg, ndev)) + return true; + } + } else if (event == WLC_E_LINK) { + if (flags & WLC_EVENT_MSG_LINK) + return true; + } + + WL_DBG(("wl_is_linkup false\n")); + return false; +} + +static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 event = ntoh32(e->event_type); + u16 flags = ntoh16(e->flags); + + if (event == WLC_E_DEAUTH_IND || + event == WLC_E_DISASSOC_IND || + event == WLC_E_DISASSOC || + event == WLC_E_DEAUTH) { +#if (WL_DBG_LEVEL > 0) + WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event])); +#endif /* (WL_DBG_LEVEL > 0) */ + return true; + } else if (event == WLC_E_LINK) { + if (!(flags & WLC_EVENT_MSG_LINK)) { +#if (WL_DBG_LEVEL > 0) + WL_ERR(("Link down Reason : WLC_E_%s\n", wl_dbg_estr[event])); +#endif /* (WL_DBG_LEVEL > 0) */ + return true; + } + } + + return false; +} + +static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 event = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + + if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS) + return true; + if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) + return true; + + return false; +} + +/* The mainline kernel >= 3.2.0 has support for indicating new/del station + * to AP/P2P GO via events. If this change is backported to kernel for which + * this driver is being built, then define WL_CFG80211_STA_EVENT. You + * should use this new/del sta event mechanism for BRCM supplicant >= 22. + */ +static s32 +wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = ntoh32(e->event_type); + u32 reason = ntoh32(e->reason); + u32 len = ntoh32(e->datalen); + u32 status = ntoh32(e->status); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) + bool isfree = false; + u8 *mgmt_frame; + u8 bsscfgidx = e->bsscfgidx; + s32 freq; + s32 channel; + u8 *body = NULL; + u16 fc = 0; + + struct ieee80211_supported_band *band; + struct ether_addr da; + struct ether_addr bssid; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + channel_info_t ci; +#else + struct station_info sinfo; +#endif + + WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason)); + /* if link down, bsscfg is disabled. */ + if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS && + wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) { + wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false); + WL_INFORM(("AP mode link down !! \n")); + complete(&cfg->iface_disable); + return 0; + } + + if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) && + (reason == WLC_E_REASON_INITIAL_ASSOC) && + (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) { + if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) { + /* AP/GO brought up successfull in firmware */ + WL_ERR(("** AP/GO Link up event **\n")); + wl_set_drv_status(cfg, AP_CREATED, ndev); + wake_up_interruptible(&cfg->netif_change_event); + return 0; + } + } + + if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) { + WL_ERR(("event %s(%d) status %d reason %d\n", + bcmevent_get_name(event), event, ntoh32(e->status), reason)); + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) + WL_DBG(("Enter \n")); + if (!len && (event == WLC_E_DEAUTH)) { + len = 2; /* reason code field */ + data = &reason; + } + if (len) { + body = kzalloc(len, GFP_KERNEL); + + if (body == NULL) { + WL_ERR(("wl_notify_connect_status: Failed to allocate body\n")); + return WL_INVALID; + } + } + memset(&bssid, 0, ETHER_ADDR_LEN); + WL_DBG(("Enter event %d ndev %p\n", event, ndev)); + if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) { + kfree(body); + return WL_INVALID; + } + if (len) + memcpy(body, data, len); + + wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync); + memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN); + err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + switch (event) { + case WLC_E_ASSOC_IND: + fc = FC_ASSOC_REQ; + break; + case WLC_E_REASSOC_IND: + fc = FC_REASSOC_REQ; + break; + case WLC_E_DISASSOC_IND: + fc = FC_DISASSOC; + break; + case WLC_E_DEAUTH_IND: + fc = FC_DISASSOC; + break; + case WLC_E_DEAUTH: + fc = FC_DISASSOC; + break; + default: + fc = 0; + goto exit; + } + if ((err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false))) { + kfree(body); + return err; + } + + channel = dtoh32(ci.hw_channel); + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (!band) { + WL_ERR(("No valid band")); + if (body) + kfree(body); + return -EINVAL; + } +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + + err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid, + &mgmt_frame, &len, body); + if (err < 0) + goto exit; + isfree = true; + + if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) { +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) && (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 18, 0))) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len); +#else + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); +#endif + } else if (event == WLC_E_DISASSOC_IND) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); +#endif + } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) + cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC); +#endif + } + +exit: + if (isfree) + kfree(mgmt_frame); + if (body) + kfree(body); +#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */ + sinfo.filled = 0; + if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) && + reason == DOT11_SC_SUCCESS) { + /* Linux ver >= 4.0 assoc_req_ies_len is used instead of + * STATION_INFO_ASSOC_REQ_IES flag + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) + sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */ + if (!data) { + WL_ERR(("No IEs present in ASSOC/REASSOC_IND")); + return -EINVAL; + } + sinfo.assoc_req_ies = data; + sinfo.assoc_req_ies_len = len; + cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC); + } else if (event == WLC_E_DISASSOC_IND) { + cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); + } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) { + cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC); + } +#endif + return err; +} + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define MAX_ASSOC_REJECT_ERR_STATUS 5 +int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e) +{ + u32 status = ntoh32(e->status); + + cfg->assoc_reject_status = 0; + + if (status == WLC_E_STATUS_FAIL) { + WL_ERR(("auth assoc status event=%d e->status %d e->reason %d \n", + ntoh32(cfg->event_auth_assoc.event_type), + (int)ntoh32(cfg->event_auth_assoc.status), + (int)ntoh32(cfg->event_auth_assoc.reason))); + + switch ((int)ntoh32(cfg->event_auth_assoc.status)) { + case WLC_E_STATUS_NO_ACK: + cfg->assoc_reject_status = 1; + break; + case WLC_E_STATUS_FAIL: + cfg->assoc_reject_status = 2; + break; + case WLC_E_STATUS_UNSOLICITED: + cfg->assoc_reject_status = 3; + break; + case WLC_E_STATUS_TIMEOUT: + cfg->assoc_reject_status = 4; + break; + case WLC_E_STATUS_ABORT: + cfg->assoc_reject_status = 5; + break; + default: + break; + } + if (cfg->assoc_reject_status) { + if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) { + cfg->assoc_reject_status += MAX_ASSOC_REJECT_ERR_STATUS; + } + } + } + + WL_ERR(("assoc_reject_status %d \n", cfg->assoc_reject_status)); + + return 0; +} + +s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len) +{ + struct bcm_cfg80211 *cfg = NULL; + int bytes_written = 0; + + cfg = g_bcm_cfg; + + if (cfg == NULL) { + return -1; + } + + memset(cmd, 0, total_len); + bytes_written = snprintf(cmd, 30, "assoc_reject.status %d", cfg->assoc_reject_status); + + WL_ERR(("cmd: %s \n", cmd)); + + return bytes_written; +} +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +static s32 +wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e) +{ + u32 reason = ntoh32(e->reason); + u32 event = ntoh32(e->event_type); + struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + WL_DBG(("event type : %d, reason : %d\n", event, reason)); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t)); + WL_ERR(("event=%d status %d reason %d \n", + ntoh32(cfg->event_auth_assoc.event_type), + ntoh32(cfg->event_auth_assoc.status), + ntoh32(cfg->event_auth_assoc.reason))); +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + if (sec) { + switch (event) { + case WLC_E_ASSOC: + case WLC_E_AUTH: + sec->auth_assoc_res_status = reason; + default: + break; + } + } else + WL_ERR(("sec is NULL\n")); + return 0; +} + +static s32 +wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = ntoh32(e->event_type); + u16 flags = ntoh16(e->flags); + u32 status = ntoh32(e->status); + bool active; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + struct ieee80211_channel *channel = NULL; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + u32 chanspec, chan; + u32 freq, band; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + + if (event == WLC_E_JOIN) { + WL_DBG(("joined in IBSS network\n")); + } + if (event == WLC_E_START) { + WL_DBG(("started IBSS network\n")); + } + if (event == WLC_E_JOIN || event == WLC_E_START || + (event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + err = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec); + if (unlikely(err)) { + WL_ERR(("Could not get chanspec %d\n", err)); + return err; + } + chan = wf_chspec_ctlchan(wl_chspec_driver_to_host(chanspec)); + band = (chan <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ ; + freq = ieee80211_channel_to_frequency(chan, band); + channel = ieee80211_get_channel(wiphy, freq); +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */ + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + /* ROAM or Redundant */ + u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_DBG(("IBSS connected event from same BSSID(" + MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid))); + return err; + } + WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n", + MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr))); + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, false); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL); +#else + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL); +#endif + } + else { + /* New connection */ + WL_INFORM(("IBSS connected to " MACDBG "\n", + MAC2STRDBG((const u8 *)&e->addr))); + wl_link_up(cfg); + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, false); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL); +#else + cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL); +#endif + wl_set_drv_status(cfg, CONNECTED, ndev); + active = true; + wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT); + } + } else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) || + event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) { + wl_clr_drv_status(cfg, CONNECTED, ndev); + wl_link_down(cfg); + wl_init_prof(cfg, ndev); + } + else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) { + WL_DBG(("no action - join fail (IBSS mode)\n")); + } + else { + WL_DBG(("no action (IBSS mode)\n")); +} + return err; +} + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define WiFiALL_OUI "\x50\x6F\x9A" /* Wi-FiAll OUI */ +#define WiFiALL_OUI_LEN 3 +#define WiFiALL_OUI_TYPE 16 + +int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, uint8 *mac) +{ + s32 err = 0; + struct wl_bss_info *bi; + uint8 eabuf[ETHER_ADDR_LEN]; + u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0; + char rate_str[4]; + u8 *ie = NULL; + u32 ie_len; + struct wiphy *wiphy; + struct cfg80211_bss *bss; + bcm_tlv_t *interworking_ie = NULL; + bcm_tlv_t *tlv_ie = NULL; + bcm_tlv_t *vht_ie = NULL; + vndr_ie_t *vndrie; + int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1; + u32 i, remained_len, count = 0; + char roam_count_str[4], akm_str[4]; + s32 val = 0; + + /* get BSS information */ + + strncpy(cfg->bss_info, "x x x x x x x x x x x x x", GET_BSS_INFO_LEN); + + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + + err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX, false); + if (unlikely(err)) { + WL_ERR(("Could not get bss info %d\n", err)); + cfg->roam_count = 0; + return -1; + } + + if (!mac) { + WL_ERR(("mac is null \n")); + cfg->roam_count = 0; + return -1; + } + + memcpy(eabuf, mac, ETHER_ADDR_LEN); + + bi = (struct wl_bss_info *)(cfg->extra_buf + 4); + channel = wf_chspec_ctlchan(bi->chanspec); + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); +#else + if (channel > 14) { + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ ); + } else { + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ ); + } +#endif + + err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false); + if (err) { + WL_ERR(("Could not get rate (%d)\n", err)); + snprintf(rate_str, sizeof(rate_str), "x"); // Unknown + + } else { + rate = dtoh32(rate); + snprintf(rate_str, sizeof(rate_str), "%d", (rate/2)); + } + + //supported maximum rate + supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2; + + if (supported_rate < 12) { + mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode + } else { + //It's not HT Capable case. + if (channel > 14) { + mode_80211 = 3; // 11a mode + } else { + mode_80211 = 1; // 11g mode + } + } + + if (bi->n_cap) { + /* check Rx MCS Map for HT */ + nss = 0; + mode_80211 = 2; + for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) { + int8 bitmap = 0xFF; + if (i == MAX_STREAMS_SUPPORTED-1) { + bitmap = 0x7F; + } + if (bi->basic_mcs[i] & bitmap) { + nss++; + } + } + } + + if (bi->vht_cap) { + nss = 0; + mode_80211 = 4; + for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) { + mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap)); + if (mcs_map != VHT_CAP_MCS_MAP_NONE) { + nss++; + } + } + } + + if (nss) { + nss = nss - 1; + } + + wiphy = bcmcfg_to_wiphy(cfg); + bss = cfg80211_get_bss(wiphy, NULL, eabuf, + bi->SSID, strlen(bi->SSID), WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + + if (!bss) { + WL_ERR(("Could not find the AP\n")); + } else { +#if defined(WL_CFG80211_P2P_DEV_IF) + ie = (u8 *)bss->ies->data; + ie_len = bss->ies->len; +#else + ie = bss->information_elements; + ie_len = bss->len_information_elements; +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + + if (ie) { + ie_mu_mimo_cap = 0; + ie_11u_rel_num = 0; + + if (bi->vht_cap) { + if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len, + DOT11_MNG_VHT_CAP_ID)) != NULL) { + ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3; + } + } + + if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len, + DOT11_MNG_INTERWORKING_ID)) != NULL) { + if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) { + remained_len = ie_len; + + while (tlv_ie) { + if (count > MAX_VNDR_IE_NUMBER) + break; + + if (tlv_ie->id == DOT11_MNG_VS_ID) { + vndrie = (vndr_ie_t *) tlv_ie; + + if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { + WL_ERR(("%s: invalid vndr ie." + "length is too small %d\n", + __FUNCTION__, vndrie->len)); + break; + } + + if (!bcmp(vndrie->oui, + (u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) && + (vndrie->data[0] == WiFiALL_OUI_TYPE)) + { + WL_ERR(("Found Wi-FiAll OUI oui.\n")); + ie_11u_rel_num = vndrie->data[1]; + ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4; + ie_11u_rel_num += 1; + + break; + } + } + count++; + tlv_ie = bcm_next_tlv(tlv_ie, &remained_len); + } + } + } + } + + for (i = 0; i < bi->SSID_len; i++) { + if (bi->SSID[i] == ' ') { + bi->SSID[i] = '_'; + } + } + + //0 : None, 1 : OKC, 2 : FT, 3 : CCKM + err = wldev_iovar_getint(dev, "wpa_auth", &val); + if (unlikely(err)) { + WL_ERR(("could not get wpa_auth (%d)\n", err)); + snprintf(akm_str, sizeof(akm_str), "x"); // Unknown + } else { + WL_ERR(("wpa_auth val %d \n", val)); +#if defined(BCMEXTCCX) + if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) { + snprintf(akm_str, sizeof(akm_str), "3"); + } else +#endif + if (val & WPA2_AUTH_FT) { + snprintf(akm_str, sizeof(akm_str), "2"); + } else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) { + snprintf(akm_str, sizeof(akm_str), "1"); + } else { + snprintf(akm_str, sizeof(akm_str), "0"); + } + } + + if (cfg->roam_offload) { + snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown + } else { + snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count); + } + cfg->roam_count = 0; + + WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), bi->SSID)); + WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d," + "MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n" + "akm:%s roam:%s \n", + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), (rate / 2), mode_80211, nss, + ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise, + akm_str, roam_count_str)); + + if (ie) { + snprintf(cfg->bss_info, GET_BSS_INFO_LEN, + "%02x:%02x:%02x %d %s %d %s %d %d %d %d %d %d %s %s", + eabuf[0], eabuf[1], eabuf[2], + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), rate_str, mode_80211, nss, + ie_mu_mimo_cap, ie_11u_rel_num, + bi->SNR, bi->phy_noise, akm_str, roam_count_str); + } else { + //ie_mu_mimo_cap and ie_11u_rel_num is unknow. + snprintf(cfg->bss_info, GET_BSS_INFO_LEN, + "%02x:%02x:%02x %d %s %d %s %d %d x x %d %d %s %s", + eabuf[0], eabuf[1], eabuf[2], + freq, wf_chspec_to_bw_str(bi->chanspec), + dtoh32(bi->RSSI), rate_str, mode_80211, nss, + bi->SNR, bi->phy_noise, akm_str, roam_count_str); + } + + + return 0; +} + +s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len) +{ + struct bcm_cfg80211 *cfg = NULL; + + cfg = g_bcm_cfg; + + if (cfg == NULL) { + return -1; + } + + memset(cmd, 0, total_len); + memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN); + + WL_ERR(("cmd: %s \n", cmd)); + + return GET_BSS_INFO_LEN; +} + +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +static s32 +wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + bool act; + struct net_device *ndev = NULL; + s32 err = 0; + u32 event = ntoh32(e->event_type); + struct wiphy *wiphy = NULL; + struct cfg80211_bss *bss = NULL; + struct wlc_ssid *ssid = NULL; + u8 *bssid = 0; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + err = wl_notify_connect_status_ap(cfg, ndev, e, data); + } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) { + err = wl_notify_connect_status_ibss(cfg, ndev, e, data); + } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) { + WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n", + ntoh32(e->event_type), ntoh32(e->status), ndev)); + if (event == WLC_E_ASSOC || event == WLC_E_AUTH) { + wl_get_auth_assoc_status(cfg, ndev, e); + return 0; + } + DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub); + if (wl_is_linkup(cfg, e, ndev)) { + wl_link_up(cfg); + act = true; + if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) { +#ifdef DHD_LOSSLESS_ROAMING + bool is_connected = wl_get_drv_status(cfg, CONNECTED, ndev); +#endif + + WL_ERR(("wl_bss_connect_done succeeded with " MACDBG "\n", + MAC2STRDBG((const u8*)(&e->addr)))); + wl_bss_connect_done(cfg, ndev, e, data, true); + WL_DBG(("joined in BSS network \"%s\"\n", + ((struct wlc_ssid *) + wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID)); +#ifdef DHD_LOSSLESS_ROAMING + if (event == WLC_E_LINK && is_connected && + !cfg->roam_offload) { + wl_bss_roaming_done(cfg, ndev, e, data); + } +#endif /* DHD_LOSSLESS_ROAMING */ + + } + wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + + } else if (wl_is_linkdown(cfg, e)) { +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif +#ifdef P2PLISTEN_AP_SAMECHN + if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0); + cfg->p2p_resp_apchn_status = false; + WL_DBG(("p2p_resp_apchn_status Turn OFF \n")); + } +#endif /* P2PLISTEN_AP_SAMECHN */ + wl_cfg80211_cancel_scan(cfg); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + wl_get_bss_info(cfg, ndev, (u8*)(&e->addr)); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + /* Explicitly calling unlink to remove BSS in CFG */ + wiphy = bcmcfg_to_wiphy(cfg); + ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID); + bssid = (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (ssid && bssid) { + bss = cfg80211_get_bss(wiphy, NULL, bssid, + ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + if (bss) { + cfg80211_unlink_bss(wiphy, bss); + } + } + + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + scb_val_t scbval; + u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + s32 reason = 0; + struct ether_addr bssid_dongle; + struct ether_addr bssid_null = {{0, 0, 0, 0, 0, 0}}; + + if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) + reason = ntoh32(e->reason); + /* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */ + reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason; + + WL_ERR(("link down if %s may call cfg80211_disconnected. " + "event : %d, reason=%d from " MACDBG "\n", + ndev->name, event, ntoh32(e->reason), + MAC2STRDBG((const u8*)(&e->addr)))); + + /* roam offload does not sync BSSID always, get it from dongle */ + if (cfg->roam_offload) { + if (wldev_ioctl(ndev, WLC_GET_BSSID, &bssid_dongle, + sizeof(bssid_dongle), false) == BCME_OK) { + /* if not roam case, it would return null bssid */ + if (memcmp(&bssid_dongle, &bssid_null, + ETHER_ADDR_LEN) != 0) { + curbssid = (u8 *)&bssid_dongle; + } + } + } + if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) { + bool fw_assoc_state = TRUE; + dhd_pub_t *dhd = (dhd_pub_t *)cfg->pub; + fw_assoc_state = dhd_is_associated(dhd, e->ifidx, &err); + if (!fw_assoc_state) { + WL_ERR(("Even sends up even different BSSID" + " cur: " MACDBG " event: " MACDBG"\n", + MAC2STRDBG(curbssid), + MAC2STRDBG((const u8*)(&e->addr)))); + } else { + WL_ERR(("BSSID of event is not the connected BSSID" + "(ignore it) cur: " MACDBG + " event: " MACDBG"\n", + MAC2STRDBG(curbssid), + MAC2STRDBG((const u8*)(&e->addr)))); + return 0; + } + } + wl_clr_drv_status(cfg, CONNECTED, ndev); + if (! wl_get_drv_status(cfg, DISCONNECTING, ndev)) { + /* To make sure disconnect, explictly send dissassoc + * for BSSID 00:00:00:00:00:00 issue + */ + scbval.val = WLAN_REASON_DEAUTH_LEAVING; + + memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN); + scbval.val = htod32(scbval.val); + err = wldev_ioctl(ndev, WLC_DISASSOC, &scbval, + sizeof(scb_val_t), true); + if (err < 0) { + WL_ERR(("WLC_DISASSOC error %d\n", err)); + err = 0; + } + CFG80211_DISCONNECTED(ndev, reason, NULL, 0, + false, GFP_KERNEL); + wl_link_down(cfg); + wl_init_prof(cfg, ndev); + memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN); + } + } + else if (wl_get_drv_status(cfg, CONNECTING, ndev)) { + WL_ERR(("link down, during connecting\n")); +#ifdef ESCAN_RESULT_PATCH + if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) || + (memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) || + (memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0)) + /* In case this event comes while associating another AP */ +#endif /* ESCAN_RESULT_PATCH */ + wl_bss_connect_done(cfg, ndev, e, data, false); + } + wl_clr_drv_status(cfg, DISCONNECTING, ndev); + + /* if link down, bsscfg is diabled */ + if (ndev != bcmcfg_to_prmry_ndev(cfg)) + complete(&cfg->iface_disable); + + } else if (wl_is_nonetwork(cfg, e)) { + WL_ERR(("connect failed event=%d e->status %d e->reason %d \n", + event, (int)ntoh32(e->status), (int)ntoh32(e->reason))); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + if (event == WLC_E_SET_SSID) { + wl_get_connect_failed_status(cfg, e); + } +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + /* Clean up any pending scan request */ + wl_cfg80211_cancel_scan(cfg); + if (wl_get_drv_status(cfg, CONNECTING, ndev)) + wl_bss_connect_done(cfg, ndev, e, data, false); + } else { + WL_DBG(("%s nothing\n", __FUNCTION__)); + } + DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub); + } + else { + WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev))); + } + return err; +} + +void wl_cfg80211_set_rmc_pid(int pid) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + if (pid > 0) + cfg->rmc_event_pid = pid; + WL_DBG(("set pid for rmc event : pid=%d\n", pid)); +} + +#ifdef WL_RELMCAST +static s32 +wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + u32 evt = ntoh32(e->event_type); + u32 reason = ntoh32(e->reason); + int ret = -1; + + switch (reason) { + case WLC_E_REASON_RMC_AR_LOST: + case WLC_E_REASON_RMC_AR_NO_ACK: + if (cfg->rmc_event_pid != 0) { + ret = wl_netlink_send_msg(cfg->rmc_event_pid, + RMC_EVENT_LEADER_CHECK_FAIL, + cfg->rmc_event_seq++, NULL, 0); + } + break; + default: + break; + } + WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret)); + return ret; +} +#endif /* WL_RELMCAST */ +static s32 +wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + bool act; + struct net_device *ndev = NULL; + s32 err = 0; + u32 event = be32_to_cpu(e->event_type); + u32 status = be32_to_cpu(e->status); +#ifdef DHD_LOSSLESS_ROAMING + struct wl_security *sec; +#endif + WL_DBG(("Enter \n")); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) { + wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false); + cfg->disable_roam_event = TRUE; + } + + if ((cfg->disable_roam_event) && (event == WLC_E_ROAM)) + return err; + + if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) { + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { +#ifdef DHD_LOSSLESS_ROAMING + if (cfg->roam_offload) { + wl_bss_roaming_done(cfg, ndev, e, data); + wl_del_roam_timeout(cfg); + } + else { + sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + /* In order to reduce roaming delay, wl_bss_roaming_done is + * early called with WLC_E_LINK event. It is called from + * here only if WLC_E_LINK event is blocked for specific + * security type. + */ + if (IS_AKM_SUITE_FT(sec)) { + wl_bss_roaming_done(cfg, ndev, e, data); + } + /* Roam timer is deleted mostly from wl_cfg80211_change_station + * after roaming is finished successfully. We need to delete + * the timer from here only for some security types that aren't + * using wl_cfg80211_change_station to authorize SCB + */ + if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) { + wl_del_roam_timeout(cfg); + } + } +#else + wl_bss_roaming_done(cfg, ndev, e, data); +#endif /* DHD_LOSSLESS_ROAMING */ + } else { + wl_bss_connect_done(cfg, ndev, e, data, true); + } + act = true; + wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT); + wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID); + } +#ifdef DHD_LOSSLESS_ROAMING + else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) { + wl_del_roam_timeout(cfg); + } +#endif + return err; +} + +#ifdef QOS_MAP_SET +/* up range from low to high with up value */ +static bool +up_table_set(uint8 *up_table, uint8 up, uint8 low, uint8 high) +{ + int i; + + if (up > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) { + return FALSE; + } + + for (i = low; i <= high; i++) { + up_table[i] = up; + } + + return TRUE; +} + +/* set user priority table */ +static void +wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie) +{ + uint8 len; + + if (up_table == NULL || qos_map_ie == NULL) { + return; + } + + /* clear table to check table was set or not */ + memset(up_table, 0xff, UP_TABLE_MAX); + + /* length of QoS Map IE must be 16+n*2, n is number of exceptions */ + if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID && + (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH && + (len % 2) == 0) { + uint8 *except_ptr = (uint8 *)qos_map_ie->data; + uint8 except_len = len - QOS_MAP_FIXED_LENGTH; + uint8 *range_ptr = except_ptr + except_len; + int i; + + /* fill in ranges */ + for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) { + uint8 low = range_ptr[i]; + uint8 high = range_ptr[i + 1]; + if (low == 255 && high == 255) { + continue; + } + + if (!up_table_set(up_table, i / 2, low, high)) { + /* clear the table on failure */ + memset(up_table, 0xff, UP_TABLE_MAX); + return; + } + } + + /* update exceptions */ + for (i = 0; i < except_len; i += 2) { + uint8 dscp = except_ptr[i]; + uint8 up = except_ptr[i+1]; + + /* exceptions with invalid dscp/up are ignored */ + up_table_set(up_table, up, dscp, dscp); + } + } + + if (wl_dbg_level & WL_DBG_DBG) { + prhex("UP table", up_table, UP_TABLE_MAX); + } +} + +/* get user priority table */ +uint8 * +wl_get_up_table(void) +{ + return (uint8 *)(g_bcm_cfg->up_table); +} +#endif /* QOS_MAP_SET */ + +#ifdef DHD_LOSSLESS_ROAMING +static s32 +wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + struct wl_security *sec; + struct net_device *ndev; + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); + /* Disable Lossless Roaming for specific AKM suite + * Any other AKM suite can be added below if transition time + * is delayed because of Lossless Roaming + * and it causes any certication failure + */ + if (IS_AKM_SUITE_FT(sec)) { + return err; + } + + dhdp->dequeue_prec_map = 1 << PRIO_8021D_NC; + /* Restore flow control */ + dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF); + + mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS)); + + return err; +} +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 +wl_notify_idsup_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; +#if defined(WL_VENDOR_EXT_SUPPORT) + u32 idsup_status; + u32 reason = ntoh32(e->reason); + struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + + if (cfg->roam_offload) { +#if defined(WL_VENDOR_EXT_SUPPORT) + switch (reason) { + case WLC_E_SUP_WPA_PSK_TMO: + idsup_status = IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT; + break; + case WLC_E_SUP_OTHER: + idsup_status = IDSUP_EVENT_SUCCESS; + break; + default: + WL_ERR(("Other type at IDSUP. " + "event=%d e->status %d e->reason %d \n", + (int)ntoh32(e->event_type), (int)ntoh32(e->status), + (int)ntoh32(e->reason))); + return err; + } + + err = wl_cfgvendor_send_async_event(wiphy, ndev, + BRCM_VENDOR_EVENT_IDSUP_STATUS, &idsup_status, sizeof(u32)); +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + } + return err; +} + +#ifdef CUSTOM_EVENT_PM_WAKE +static s32 +wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + struct net_device *ndev = NULL; + u8 *pbuf = NULL; + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + pbuf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL); + if (pbuf == NULL) { + WL_ERR(("failed to allocate local pbuf\n")); + return -ENOMEM; + } + + err = wldev_iovar_getbuf_bsscfg(ndev, "dump", + "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync); + + if (err) { + WL_ERR(("dump ioctl err = %d", err)); + } else { + WL_ERR(("PM status : %s\n", pbuf)); + } + + if (pbuf) { + kfree(pbuf); + } + return err; +} +#endif /* CUSTOM_EVENT_PM_WAKE */ + +static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + wl_assoc_info_t assoc_info; + struct wl_connect_info *conn_info = wl_to_conn(cfg); + s32 err = 0; +#ifdef QOS_MAP_SET + bcm_tlv_t * qos_map_ie = NULL; +#endif /* QOS_MAP_SET */ + + WL_DBG(("Enter \n")); + err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc info (%d)\n", err)); + return err; + } + memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t)); + assoc_info.req_len = htod32(assoc_info.req_len); + assoc_info.resp_len = htod32(assoc_info.resp_len); + assoc_info.flags = htod32(assoc_info.flags); + if (conn_info->req_ie_len) { + conn_info->req_ie_len = 0; + bzero(conn_info->req_ie, sizeof(conn_info->req_ie)); + } + if (conn_info->resp_ie_len) { + conn_info->resp_ie_len = 0; + bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie)); + } + if (assoc_info.req_len) { + err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc req (%d)\n", err)); + return err; + } + conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req); + if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) { + conn_info->req_ie_len -= ETHER_ADDR_LEN; + } + if (conn_info->req_ie_len <= MAX_REQ_LINE) + memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len); + else { + WL_ERR(("IE size %d above max %d size \n", + conn_info->req_ie_len, MAX_REQ_LINE)); + return err; + } + } else { + conn_info->req_ie_len = 0; + } + if (assoc_info.resp_len) { + err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf, + WL_ASSOC_INFO_MAX, NULL); + if (unlikely(err)) { + WL_ERR(("could not get assoc resp (%d)\n", err)); + return err; + } + conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp); + if (conn_info->resp_ie_len <= MAX_REQ_LINE) { + memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len); + } else { + WL_ERR(("IE size %d above max %d size \n", + conn_info->resp_ie_len, MAX_REQ_LINE)); + return err; + } + +#ifdef QOS_MAP_SET + /* find qos map set ie */ + if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len, + DOT11_MNG_QOS_MAP_ID)) != NULL) { + WL_DBG((" QoS map set IE found in assoc response\n")); + if (!cfg->up_table) { + cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL); + } + wl_set_up_table(cfg->up_table, qos_map_ie); + } else { + kfree(cfg->up_table); + cfg->up_table = NULL; + } +#endif /* QOS_MAP_SET */ + } else { + conn_info->resp_ie_len = 0; + } + WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len, + conn_info->resp_ie_len)); + + return err; +} + +static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params, + size_t *join_params_size) +{ + struct bcm_cfg80211 *cfg; + s32 bssidx = -1; + chanspec_t chanspec = 0, chspec; + + if (ch != 0) { + cfg = (struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy); + if (cfg && cfg->rcc_enabled) { + } else { + join_params->params.chanspec_num = 1; + join_params->params.chanspec_list[0] = ch; + + if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL) + chanspec |= WL_CHANSPEC_BAND_2G; + else + chanspec |= WL_CHANSPEC_BAND_5G; + + /* Get the min_bw set for the interface */ + chspec = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx); + if (chspec == INVCHANSPEC) { + WL_ERR(("Invalid chanspec \n")); + return -EINVAL; + } + chanspec |= chspec; + chanspec |= WL_CHANSPEC_CTL_SB_NONE; + + *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE + + join_params->params.chanspec_num * sizeof(chanspec_t); + + join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK; + join_params->params.chanspec_list[0] |= chanspec; + join_params->params.chanspec_list[0] = + wl_chspec_host_to_driver(join_params->params.chanspec_list[0]); + + join_params->params.chanspec_num = + htod32(join_params->params.chanspec_num); + } + + WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n", + join_params->params.chanspec_list[0], + join_params->params.chanspec_num)); + } + return 0; +} + +static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam) +{ + struct wl_bss_info *bi; + struct wlc_ssid *ssid; + struct bcm_tlv *tim; + s32 beacon_interval; + s32 dtim_period; + size_t ie_len; + u8 *ie; + u8 *curbssid; + s32 err = 0; + struct wiphy *wiphy; + u32 channel; + + wiphy = bcmcfg_to_wiphy(cfg); + + ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID); + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + + mutex_lock(&cfg->usr_sync); + + *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX); + err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, + cfg->extra_buf, WL_EXTRA_BUF_MAX, false); + if (unlikely(err)) { + WL_ERR(("Could not get bss info %d\n", err)); + goto update_bss_info_out; + } + bi = (struct wl_bss_info *)(cfg->extra_buf + 4); + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN); + + if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) { + WL_ERR(("Bssid doesn't match\n")); + err = -EIO; + goto update_bss_info_out; + } + err = wl_inform_single_bss(cfg, bi, roam); + if (unlikely(err)) + goto update_bss_info_out; + + ie = ((u8 *)bi) + bi->ie_offset; + ie_len = bi->ie_length; + beacon_interval = cpu_to_le16(bi->beacon_period); + tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM); + if (tim) { + dtim_period = tim->data[1]; + } else { + /* + * active scan was done so we could not get dtim + * information out of probe response. + * so we speficially query dtim information. + */ + err = wldev_ioctl(ndev, WLC_GET_DTIMPRD, + &dtim_period, sizeof(dtim_period), false); + if (unlikely(err)) { + WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err)); + goto update_bss_info_out; + } + } + + wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT); + wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD); + +update_bss_info_out: + if (unlikely(err)) { + WL_ERR(("Failed with error %d\n", err)); + } + mutex_unlock(&cfg->usr_sync); + return err; +} + +static s32 +wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + struct wl_connect_info *conn_info = wl_to_conn(cfg); + s32 err = 0; + u8 *curbssid; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct ieee80211_supported_band *band; + struct ieee80211_channel *notify_channel = NULL; + u32 *channel; + u32 freq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + struct cfg80211_roam_info roam_info = {}; +#endif +#endif + + + if (memcmp(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_INFORM(("BSSID already updated\n")); + return err; + } + + /* Skip calling cfg80211_roamed If current bssid and + * roamed bssid are same. Also clear timer roam_timeout. + */ + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) { + WL_ERR(("BSS already present, Skipping roamed event to upper layer\n")); +#ifdef DHD_LOSSLESS_ROAMING + wl_del_roam_timeout(cfg); +#endif /* DHD_LOSSLESS_ROAMING */ + return err; + } + + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID); + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, true); + wl_update_pmklist(ndev, cfg->pmk_list, err); + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) + /* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */ + channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN); + if (*channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + freq = ieee80211_channel_to_frequency(*channel, band->band); + notify_channel = ieee80211_get_channel(wiphy, freq); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + roam_info.channel = notify_channel; + roam_info.bssid = curbssid; + roam_info.req_ie = conn_info->req_ie; + roam_info.req_ie_len = conn_info->req_ie_len; + roam_info.resp_ie = conn_info->resp_ie; + roam_info.resp_ie_len = conn_info->resp_ie_len; +#endif +#endif + WL_ERR(("wl_bss_roaming_done succeeded to " MACDBG "\n", + MAC2STRDBG((const u8*)(&e->addr)))); + + cfg80211_roamed(ndev, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + &roam_info, +#else +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) + notify_channel, +#endif + curbssid, + conn_info->req_ie, conn_info->req_ie_len, + conn_info->resp_ie, conn_info->resp_ie_len, +#endif + GFP_KERNEL); + WL_DBG(("Report roaming result\n")); + + memcpy(&cfg->last_roamed_addr, (void *)&e->addr, ETHER_ADDR_LEN); + wl_set_drv_status(cfg, CONNECTED, ndev); + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + cfg->roam_count++; +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + + return err; +} + +static s32 +wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data, bool completed) +{ + struct wl_connect_info *conn_info = wl_to_conn(cfg); + struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC); +#if defined(CUSTOM_SET_CPUCORE) + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif + s32 err = 0; + u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + if (!sec) { + WL_ERR(("sec is NULL\n")); + return -ENODEV; + } + WL_DBG((" enter\n")); +#ifdef ESCAN_RESULT_PATCH + if (wl_get_drv_status(cfg, CONNECTED, ndev)) { + if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) { + WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n", + ntoh32(e->event_type), ntoh32(e->status))); + return err; + } + } + if (memcmp(curbssid, broad_bssid, ETHER_ADDR_LEN) == 0 && + memcmp(broad_bssid, connect_req_bssid, ETHER_ADDR_LEN) != 0) { + WL_DBG(("copy bssid\n")); + memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN); + } + +#else + if (cfg->scan_request) { + wl_notify_escan_complete(cfg, ndev, true, true); + } +#endif /* ESCAN_RESULT_PATCH */ + if (wl_get_drv_status(cfg, CONNECTING, ndev)) { + wl_cfg80211_scan_abort(cfg); + wl_clr_drv_status(cfg, CONNECTING, ndev); + if (completed) { + wl_get_assoc_ies(cfg, ndev); + wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), + WL_PROF_BSSID); + curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID); + wl_update_bss_info(cfg, ndev, false); + wl_update_pmklist(ndev, cfg->pmk_list, err); + wl_set_drv_status(cfg, CONNECTED, ndev); + if (ndev != bcmcfg_to_prmry_ndev(cfg)) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) + init_completion(&cfg->iface_disable); +#else + /* reinitialize completion to clear previous count */ + INIT_COMPLETION(cfg->iface_disable); +#endif + } +#ifdef CUSTOM_SET_CPUCORE + if (wl_get_chan_isvht80(ndev, dhd)) { + if (ndev == bcmcfg_to_prmry_ndev(cfg)) + dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */ + else if (is_p2p_group_iface(ndev->ieee80211_ptr)) + dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */ + dhd_set_cpucore(dhd, TRUE); + } +#endif /* CUSTOM_SET_CPUCORE */ + + } + cfg80211_connect_result(ndev, + curbssid, + conn_info->req_ie, + conn_info->req_ie_len, + conn_info->resp_ie, + conn_info->resp_ie_len, + completed ? WLAN_STATUS_SUCCESS : + (sec->auth_assoc_res_status) ? + sec->auth_assoc_res_status : + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + if (completed) + WL_INFORM(("Report connect result - connection succeeded\n")); + else + WL_ERR(("Report connect result - connection failed\n")); + } +#ifdef CONFIG_TCPACK_FASTTX + if (wl_get_chan_isvht80(ndev, dhd)) + wldev_iovar_setint(ndev, "tcpack_fast_tx", 0); + else + wldev_iovar_setint(ndev, "tcpack_fast_tx", 1); +#endif /* CONFIG_TCPACK_FASTTX */ + + return err; +} + +static s32 +wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + u16 flags = ntoh16(e->flags); + enum nl80211_key_type key_type; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + mutex_lock(&cfg->usr_sync); + if (flags & WLC_EVENT_MSG_GROUP) + key_type = NL80211_KEYTYPE_GROUP; + else + key_type = NL80211_KEYTYPE_PAIRWISE; + + cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1, + NULL, GFP_KERNEL); + mutex_unlock(&cfg->usr_sync); + + return 0; +} + +#ifdef BT_WIFI_HANDOVER +static s32 +wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + u32 event = ntoh32(e->event_type); + u32 datalen = ntoh32(e->datalen); + s32 err; + + WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen)); + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0); + + return err; +} +#endif /* BT_WIFI_HANDOVER */ + +#ifdef PNO_SUPPORT +static s32 +wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + + WL_ERR((">>> PNO Event\n")); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + +#ifndef WL_SCHED_SCAN + mutex_lock(&cfg->usr_sync); + /* TODO: Use cfg80211_sched_scan_results(wiphy); */ + CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL); + mutex_unlock(&cfg->usr_sync); +#else + /* If cfg80211 scheduled scan is supported, report the pno results via sched + * scan results + */ + wl_notify_sched_scan_results(cfg, ndev, e, data); +#endif /* WL_SCHED_SCAN */ + return 0; +} +#endif /* PNO_SUPPORT */ + +#ifdef GSCAN_SUPPORT +static s32 +wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = 0; + u32 event = be32_to_cpu(e->event_type); + void *ptr; + int send_evt_bytes = 0; + int batch_event_result_dummy = 0; + struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + u32 len = ntoh32(e->datalen); + + switch (event) { + case WLC_E_PFN_SWC: + ptr = dhd_dev_swc_scan_event(ndev, data, &send_evt_bytes); + if (send_evt_bytes) { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_GSCAN_SIGNIFICANT_EVENT, ptr, send_evt_bytes); + kfree(ptr); + } + break; + case WLC_E_PFN_BEST_BATCHING: + err = dhd_dev_retrieve_batch_scan(ndev); + if (err < 0) { + WL_ERR(("Batch retrieval already in progress %d\n", err)); + } else { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_GSCAN_BATCH_SCAN_EVENT, + &batch_event_result_dummy, sizeof(int)); + } + break; + case WLC_E_PFN_SCAN_COMPLETE: + batch_event_result_dummy = WIFI_SCAN_COMPLETE; + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_SCAN_COMPLETE_EVENT, + &batch_event_result_dummy, sizeof(int)); + break; + case WLC_E_PFN_BSSID_NET_FOUND: + ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes, + HOTLIST_FOUND); + if (ptr) { + wl_cfgvendor_send_hotlist_event(wiphy, ndev, + ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT); + dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND); + } + break; + case WLC_E_PFN_BSSID_NET_LOST: + /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE + * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore + */ + if (len) { + ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes, + HOTLIST_LOST); + if (ptr) { + wl_cfgvendor_send_hotlist_event(wiphy, ndev, + ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT); + dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST); + } + } + break; + case WLC_E_PFN_GSCAN_FULL_RESULT: + ptr = dhd_dev_process_full_gscan_result(ndev, data, &send_evt_bytes); + if (ptr) { + wl_cfgvendor_send_async_event(wiphy, ndev, + GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes); + kfree(ptr); + } + break; + default: + WL_ERR(("%s: Unexpected event! - %d\n", __FUNCTION__, event)); + + } + return err; +} +#endif /* GSCAN_SUPPORT */ + +static s32 +wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct channel_info channel_inform; + struct wl_scan_results *bss_list; + struct net_device *ndev = NULL; + struct cfg80211_scan_info info = {}; + u32 len = WL_SCAN_BUF_MAX; + s32 err = 0; + unsigned long flags; + + WL_DBG(("Enter \n")); + if (!wl_get_drv_status(cfg, SCANNING, ndev)) { + WL_ERR(("scan is not ready \n")); + return err; + } + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + mutex_lock(&cfg->usr_sync); + wl_clr_drv_status(cfg, SCANNING, ndev); + err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform, + sizeof(channel_inform), false); + if (unlikely(err)) { + WL_ERR(("scan busy (%d)\n", err)); + goto scan_done_out; + } + channel_inform.scan_channel = dtoh32(channel_inform.scan_channel); + if (unlikely(channel_inform.scan_channel)) { + + WL_DBG(("channel_inform.scan_channel (%d)\n", + channel_inform.scan_channel)); + } + cfg->bss_list = cfg->scan_results; + bss_list = cfg->bss_list; + memset(bss_list, 0, len); + bss_list->buflen = htod32(len); + err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false); + if (unlikely(err) && unlikely(!cfg->scan_suppressed)) { + WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err)); + err = -EINVAL; + goto scan_done_out; + } + bss_list->buflen = dtoh32(bss_list->buflen); + bss_list->version = dtoh32(bss_list->version); + bss_list->count = dtoh32(bss_list->count); + + err = wl_inform_bss(cfg); + +scan_done_out: + del_timer_sync(&cfg->scan_timeout); + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + if (cfg->scan_request) { + info.aborted = false; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + WL_DBG(("cfg80211_scan_done\n")); + mutex_unlock(&cfg->usr_sync); + return err; +} + +static s32 +wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, + const struct ether_addr *sa, const struct ether_addr *bssid, + u8 **pheader, u32 *body_len, u8 *pbody) +{ + struct dot11_management_header *hdr; + u32 totlen = 0; + s32 err = 0; + u8 *offset; + u32 prebody_len = *body_len; + switch (fc) { + case FC_ASSOC_REQ: + /* capability , listen interval */ + totlen = DOT11_ASSOC_REQ_FIXED_LEN; + *body_len += DOT11_ASSOC_REQ_FIXED_LEN; + break; + + case FC_REASSOC_REQ: + /* capability, listen inteval, ap address */ + totlen = DOT11_REASSOC_REQ_FIXED_LEN; + *body_len += DOT11_REASSOC_REQ_FIXED_LEN; + break; + } + totlen += DOT11_MGMT_HDR_LEN + prebody_len; + *pheader = kzalloc(totlen, GFP_KERNEL); + if (*pheader == NULL) { + WL_ERR(("memory alloc failed \n")); + return -ENOMEM; + } + hdr = (struct dot11_management_header *) (*pheader); + hdr->fc = htol16(fc); + hdr->durid = 0; + hdr->seq = 0; + offset = (u8*)(hdr + 1) + (totlen - DOT11_MGMT_HDR_LEN - prebody_len); + bcopy((const char*)da, (u8*)&hdr->da, ETHER_ADDR_LEN); + bcopy((const char*)sa, (u8*)&hdr->sa, ETHER_ADDR_LEN); + bcopy((const char*)bssid, (u8*)&hdr->bssid, ETHER_ADDR_LEN); + if ((pbody != NULL) && prebody_len) + bcopy((const char*)pbody, offset, prebody_len); + *body_len = totlen; + return err; +} + + +void +wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + if (timer_pending(&cfg->p2p->listen_timer)) { + del_timer_sync(&cfg->p2p->listen_timer); + } + if (cfg->afx_hdl != NULL) { + if (cfg->afx_hdl->dev != NULL) { + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev); + } + cfg->afx_hdl->peer_chan = WL_INVALID; + } + complete(&cfg->act_frm_scan); + WL_DBG(("*** Wake UP ** Working afx searching is cleared\n")); + } else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) { + if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) || + wl_get_p2p_status(cfg, ACTION_TX_NOACK))) + wl_set_p2p_status(cfg, ACTION_TX_COMPLETED); + + WL_DBG(("*** Wake UP ** abort actframe iovar\n")); + /* if channel is not zero, "actfame" uses off channel scan. + * So abort scan for off channel completion. + */ + if (cfg->af_sent_channel) + wl_cfg80211_scan_abort(cfg); + } +#ifdef WL_CFG80211_SYNC_GON + else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) { + WL_DBG(("*** Wake UP ** abort listen for next af frame\n")); + /* So abort scan to cancel listen */ + wl_cfg80211_scan_abort(cfg); + } +#endif /* WL_CFG80211_SYNC_GON */ +} + +#if defined(WLTDLS) +bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len) +{ + unsigned char *data; + + if (frame == NULL) { + WL_ERR(("Invalid frame \n")); + return false; + } + + if (frame_len < 5) { + WL_ERR(("Invalid frame length [%d] \n", frame_len)); + return false; + } + + data = frame; + + if (!memcmp(data, TDLS_TUNNELED_PRB_REQ, 5) || + !memcmp(data, TDLS_TUNNELED_PRB_RESP, 5)) { + WL_DBG(("TDLS Vendor Specific Received type\n")); + return true; + } + + return false; +} +#endif /* WLTDLS */ + + +int wl_cfg80211_get_ioctl_version(void) +{ + return ioctl_version; +} + +static s32 +wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + struct ieee80211_supported_band *band; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct ether_addr da; + struct ether_addr bssid; + bool isfree = false; + s32 err = 0; + s32 freq; + struct net_device *ndev = NULL; + wifi_p2p_pub_act_frame_t *act_frm = NULL; + wifi_p2p_action_frame_t *p2p_act_frm = NULL; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL; +#if defined(WLTDLS) && defined(TDLS_MSG_ONLY_WFD) + dhd_pub_t *dhdp; +#endif /* WLTDLS && TDLS_MSG_ONLY_WFD */ + wl_event_rx_frame_data_t *rxframe = + (wl_event_rx_frame_data_t*)data; + u32 event = ntoh32(e->event_type); + u8 *mgmt_frame; + u8 bsscfgidx = e->bsscfgidx; + u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t); + u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK)); + + memset(&bssid, 0, ETHER_ADDR_LEN); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (channel <= CH_MAX_2G_CHANNEL) + band = wiphy->bands[NL80211_BAND_2GHZ ]; + else + band = wiphy->bands[NL80211_BAND_5GHZ ]; + if (!band) { + WL_ERR(("No valid band")); + return -EINVAL; + } +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); + (void)band->band; +#else + freq = ieee80211_channel_to_frequency(channel, band->band); +#endif + if (event == WLC_E_ACTION_FRAME_RX) { + wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync); + + err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false); + if (err < 0) + WL_ERR(("WLC_GET_BSSID error %d\n", err)); + memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN); + err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid, + &mgmt_frame, &mgmt_frame_len, + (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1)); + if (err < 0) { + WL_ERR(("Error in receiving action frame len %d channel %d freq %d\n", + mgmt_frame_len, channel, freq)); + goto exit; + } + isfree = true; + if (wl_cfgp2p_is_pub_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + act_frm = (wifi_p2p_pub_act_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + } else if (wl_cfgp2p_is_p2p_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + p2p_act_frm = (wifi_p2p_action_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + (void) p2p_act_frm; + } else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN)) { + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *) + (&mgmt_frame[DOT11_MGMT_HDR_LEN]); + if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) { + if (cfg->next_af_subtype == sd_act_frm->action) { + WL_DBG(("We got a right next frame of SD!(%d)\n", + sd_act_frm->action)); + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + /* Stop waiting for next AF. */ + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + (void) sd_act_frm; +#ifdef WLTDLS + } else if ((mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) || + (wl_cfg80211_is_tdls_tunneled_frame( + &mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN))) { + if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) { + WL_ERR((" TDLS Action Frame Received type = %d \n", + mgmt_frame[DOT11_MGMT_HDR_LEN + 1])); + } +#ifdef TDLS_MSG_ONLY_WFD + dhdp = (dhd_pub_t *)(cfg->pub); + if (!dhdp->tdls_mode) { + WL_DBG((" TDLS Frame filtered \n")); + return 0; + } +#else + if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) { + cfg->tdls_mgmt_frame = mgmt_frame; + cfg->tdls_mgmt_frame_len = mgmt_frame_len; + cfg->tdls_mgmt_freq = freq; + return 0; + } +#endif /* TDLS_MSG_ONLY_WFD */ +#endif /* WLTDLS */ +#ifdef QOS_MAP_SET + } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_QOS) { + /* update QoS map set table */ + bcm_tlv_t * qos_map_ie = NULL; + if ((qos_map_ie = bcm_parse_tlvs(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, + DOT11_MNG_QOS_MAP_ID)) != NULL) { + WL_DBG((" QoS map set IE found in QoS action frame\n")); + if (!cfg->up_table) { + cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL); + } + wl_set_up_table(cfg->up_table, qos_map_ie); + } else { + kfree(cfg->up_table); + cfg->up_table = NULL; + } +#endif /* QOS_MAP_SET */ + } else { + /* + * if we got normal action frame and ndev is p2p0, + * we have to change ndev from p2p0 to wlan0 + */ + + + if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { + u8 action = 0; + if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) { + WL_DBG(("Recived action is not public action frame\n")); + } else if (cfg->next_af_subtype == action) { + WL_DBG(("Recived action is the waiting action(%d)\n", + action)); + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + /* Stop waiting for next AF. */ + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + } + + if (act_frm) { + + if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) { + if (cfg->next_af_subtype == act_frm->subtype) { + WL_DBG(("We got a right next frame!(%d)\n", + act_frm->subtype)); + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + if (cfg->next_af_subtype == P2P_PAF_GON_CONF) { + OSL_SLEEP(20); + } + + /* Stop waiting for next AF. */ + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + } + + wl_cfgp2p_print_actframe(false, &mgmt_frame[DOT11_MGMT_HDR_LEN], + mgmt_frame_len - DOT11_MGMT_HDR_LEN, channel); + /* + * After complete GO Negotiation, roll back to mpc mode + */ + if (act_frm && ((act_frm->subtype == P2P_PAF_GON_CONF) || + (act_frm->subtype == P2P_PAF_PROVDIS_RSP))) { + wldev_iovar_setint(ndev, "mpc", 1); + } + if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) { + WL_DBG(("P2P: GO_NEG_PHASE status cleared \n")); + wl_clr_p2p_status(cfg, GO_NEG_PHASE); + } + } else if (event == WLC_E_PROBREQ_MSG) { + + /* Handle probe reqs frame + * WPS-AP certification 4.2.13 + */ + struct parsed_ies prbreq_ies; + u32 prbreq_ie_len = 0; + bool pbc = 0; + + WL_DBG((" Event WLC_E_PROBREQ_MSG received\n")); + mgmt_frame = (u8 *)(data); + mgmt_frame_len = ntoh32(e->datalen); + + prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN; + + /* Parse prob_req IEs */ + if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN], + prbreq_ie_len, &prbreq_ies) < 0) { + WL_ERR(("Prob req get IEs failed\n")); + return 0; + } + if (prbreq_ies.wps_ie != NULL) { + wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc); + WL_DBG((" wps_ie exist pbc = %d\n", pbc)); + /* if pbc method, send prob_req mgmt frame to upper layer */ + if (!pbc) + return 0; + } else + return 0; + } else { + mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1); + + /* wpa supplicant use probe request event for restarting another GON Req. + * but it makes GON Req repetition. + * so if src addr of prb req is same as my target device, + * do not send probe request event during sending action frame. + */ + if (event == WLC_E_P2P_PROBREQ_MSG) { + WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ? + "WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG")); + + + /* Filter any P2P probe reqs arriving during the + * GO-NEG Phase + */ + if (cfg->p2p && +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti && +#endif + wl_get_p2p_status(cfg, GO_NEG_PHASE)) { + WL_DBG(("Filtering P2P probe_req while " + "being in GO-Neg state\n")); + return 0; + } + } + } + + if (discover_cfgdev(cfgdev, cfg)) + WL_DBG(("Rx Managment frame For P2P Discovery Interface \n")); + else + WL_DBG(("Rx Managment frame For Iface (%s) \n", ndev->name)); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); +#elif(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ + defined(WL_COMPAT_WIRELESS) + cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); +#else + cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC); +#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */ + + WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n", + mgmt_frame_len, ntoh32(e->datalen), channel, freq)); +exit: + if (isfree) + kfree(mgmt_frame); + return 0; +} + +#ifdef WL_SCHED_SCAN +/* If target scan is not reliable, set the below define to "1" to do a + * full escan + */ +#define FULL_ESCAN_ON_PFN_NET_FOUND 0 +static s32 +wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, void *data) +{ + wl_pfn_net_info_t *netinfo, *pnetinfo; + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + int err = 0; + struct cfg80211_scan_request *request = NULL; + struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT]; + struct ieee80211_channel *channel = NULL; + int channel_req = 0; + int band = 0; + struct wl_pfn_scanresults *pfn_result = (struct wl_pfn_scanresults *)data; + int n_pfn_results = pfn_result->count; + + WL_DBG(("Enter\n")); + + if (e->event_type == WLC_E_PFN_NET_LOST) { + WL_PNO(("PFN NET LOST event. Do Nothing \n")); + return 0; + } + WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results)); + if (n_pfn_results > 0) { + int i; + + if (n_pfn_results > MAX_PFN_LIST_COUNT) + n_pfn_results = MAX_PFN_LIST_COUNT; + pnetinfo = (wl_pfn_net_info_t *)(data + sizeof(wl_pfn_scanresults_t) + - sizeof(wl_pfn_net_info_t)); + + memset(&ssid, 0x00, sizeof(ssid)); + + request = kzalloc(sizeof(*request) + + sizeof(*request->channels) * n_pfn_results, + GFP_KERNEL); + channel = (struct ieee80211_channel *)kzalloc( + (sizeof(struct ieee80211_channel) * n_pfn_results), + GFP_KERNEL); + if (!request || !channel) { + WL_ERR(("No memory")); + err = -ENOMEM; + goto out_err; + } + + request->wiphy = wiphy; + + for (i = 0; i < n_pfn_results; i++) { + netinfo = &pnetinfo[i]; + if (!netinfo) { + WL_ERR(("Invalid netinfo ptr. index:%d", i)); + err = -EINVAL; + goto out_err; + } + WL_PNO((">>> SSID:%s Channel:%d \n", + netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel)); + /* PFN result doesn't have all the info which are required by the supplicant + * (For e.g IEs) Do a target Escan so that sched scan results are reported + * via wl_inform_single_bss in the required format. Escan does require the + * scan request in the form of cfg80211_scan_request. For timebeing, create + * cfg80211_scan_request one out of the received PNO event. + */ + ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN, netinfo->pfnsubnet.SSID_len); + memcpy(ssid[i].ssid, netinfo->pfnsubnet.SSID, + ssid[i].ssid_len); + request->n_ssids++; + + channel_req = netinfo->pfnsubnet.channel; + band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ + : NL80211_BAND_5GHZ; + channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band); + channel[i].band = band; + channel[i].flags |= IEEE80211_CHAN_NO_HT40; + request->channels[i] = &channel[i]; + request->n_channels++; + } + + /* assign parsed ssid array */ + if (request->n_ssids) + request->ssids = &ssid[0]; + + if (wl_get_drv_status_all(cfg, SCANNING)) { + /* Abort any on-going scan */ + wl_notify_escan_complete(cfg, ndev, true, true); + } + + if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { + WL_PNO((">>> P2P discovery was ON. Disabling it\n")); + err = wl_cfgp2p_discover_enable_search(cfg, false); + if (unlikely(err)) { + wl_clr_drv_status(cfg, SCANNING, ndev); + goto out_err; + } + p2p_scan(cfg) = false; + } + + wl_set_drv_status(cfg, SCANNING, ndev); +#if FULL_ESCAN_ON_PFN_NET_FOUND + WL_PNO((">>> Doing Full ESCAN on PNO event\n")); + err = wl_do_escan(cfg, wiphy, ndev, NULL); +#else + WL_PNO((">>> Doing targeted ESCAN on PNO event\n")); + err = wl_do_escan(cfg, wiphy, ndev, request); +#endif + if (err) { + wl_clr_drv_status(cfg, SCANNING, ndev); + goto out_err; + } + cfg->sched_scan_running = TRUE; + } + else { + WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n")); + } +out_err: + if (request) + kfree(request); + if (channel) + kfree(channel); + return err; +} +#endif /* WL_SCHED_SCAN */ + +static void wl_init_conf(struct wl_conf *conf) +{ + WL_DBG(("Enter \n")); + conf->frag_threshold = (u32)-1; + conf->rts_threshold = (u32)-1; + conf->retry_short = (u32)-1; + conf->retry_long = (u32)-1; + conf->tx_power = -1; +} + +static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + unsigned long flags; + struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + memset(profile, 0, sizeof(struct wl_profile)); + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); +} + +static void wl_init_event_handler(struct bcm_cfg80211 *cfg) +{ + memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler)); + + cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status; + cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status; + cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status; + cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame; + cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; + cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame; + cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete; + cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete; + cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete; + cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status; + cfg->evt_handler[WLC_E_START] = wl_notify_connect_status; +#ifdef PNO_SUPPORT + cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status; +#endif /* PNO_SUPPORT */ +#ifdef GSCAN_SUPPORT + cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_SWC] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event; + cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event; +#endif /* GSCAN_SUPPORT */ +#ifdef WLTDLS + cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler; +#endif /* WLTDLS */ + cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status; +#ifdef WL_RELMCAST + cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status; +#endif +#ifdef BT_WIFI_HANDOVER + cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req; +#endif +#ifdef WL_NAN + cfg->evt_handler[WLC_E_NAN] = wl_cfgnan_notify_nan_status; + cfg->evt_handler[WLC_E_PROXD] = wl_cfgnan_notify_proxd_status; +#endif /* WL_NAN */ + cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind; +#ifdef DHD_LOSSLESS_ROAMING + cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status; +#endif + cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind; +#ifdef CUSTOM_EVENT_PM_WAKE + cfg->evt_handler[WLC_E_EXCESS_PM_WAKE_EVENT] = wl_check_pmstatus; +#endif /* CUSTOM_EVENT_PM_WAKE */ + cfg->evt_handler[WLC_E_PSK_SUP] = wl_notify_idsup_status; +} + +#if defined(STATIC_WL_PRIV_STRUCT) +static void +wl_init_escan_result_buf(struct bcm_cfg80211 *cfg) +{ + cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub, + DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE); + bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE); +} + +static void +wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg) +{ + cfg->escan_info.escan_buf = NULL; + +} +#endif /* STATIC_WL_PRIV_STRUCT */ + +static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg) +{ + WL_DBG(("Enter \n")); + + cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); + if (unlikely(!cfg->scan_results)) { + WL_ERR(("Scan results alloc failed\n")); + goto init_priv_mem_out; + } + cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL); + if (unlikely(!cfg->conf)) { + WL_ERR(("wl_conf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->scan_req_int = + (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL); + if (unlikely(!cfg->scan_req_int)) { + WL_ERR(("Scan req alloc failed\n")); + goto init_priv_mem_out; + } + cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!cfg->ioctl_buf)) { + WL_ERR(("Ioctl buf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL); + if (unlikely(!cfg->escan_ioctl_buf)) { + WL_ERR(("Ioctl buf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (unlikely(!cfg->extra_buf)) { + WL_ERR(("Extra buf alloc failed\n")); + goto init_priv_mem_out; + } + cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL); + if (unlikely(!cfg->pmk_list)) { + WL_ERR(("pmk list alloc failed\n")); + goto init_priv_mem_out; + } +#if defined(STATIC_WL_PRIV_STRUCT) + cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL); + if (unlikely(!cfg->conn_info)) { + WL_ERR(("cfg->conn_info alloc failed\n")); + goto init_priv_mem_out; + } + cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL); + if (unlikely(!cfg->ie)) { + WL_ERR(("cfg->ie alloc failed\n")); + goto init_priv_mem_out; + } + wl_init_escan_result_buf(cfg); +#endif /* STATIC_WL_PRIV_STRUCT */ + cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL); + if (unlikely(!cfg->afx_hdl)) { + WL_ERR(("afx hdl alloc failed\n")); + goto init_priv_mem_out; + } else { + init_completion(&cfg->act_frm_scan); + init_completion(&cfg->wait_next_af); + + INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler); + } +#ifdef WLTDLS + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + } +#endif /* WLTDLS */ + return 0; + +init_priv_mem_out: + wl_deinit_priv_mem(cfg); + + return -ENOMEM; +} + +static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg) +{ + kfree(cfg->scan_results); + cfg->scan_results = NULL; + kfree(cfg->conf); + cfg->conf = NULL; + kfree(cfg->scan_req_int); + cfg->scan_req_int = NULL; + kfree(cfg->ioctl_buf); + cfg->ioctl_buf = NULL; + kfree(cfg->escan_ioctl_buf); + cfg->escan_ioctl_buf = NULL; + kfree(cfg->extra_buf); + cfg->extra_buf = NULL; + kfree(cfg->pmk_list); + cfg->pmk_list = NULL; +#if defined(STATIC_WL_PRIV_STRUCT) + kfree(cfg->conn_info); + cfg->conn_info = NULL; + kfree(cfg->ie); + cfg->ie = NULL; + wl_deinit_escan_result_buf(cfg); +#endif /* STATIC_WL_PRIV_STRUCT */ + if (cfg->afx_hdl) { + cancel_work_sync(&cfg->afx_hdl->work); + kfree(cfg->afx_hdl); + cfg->afx_hdl = NULL; + } + +} + +static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg) +{ + int ret = 0; + WL_DBG(("Enter \n")); + + /* Do not use DHD in cfg driver */ + cfg->event_tsk.thr_pid = -1; + + PROC_START(wl_event_handler, cfg, &cfg->event_tsk, 0, "wl_event_handler"); + if (cfg->event_tsk.thr_pid < 0) + ret = -ENOMEM; + return ret; +} + +static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg) +{ + if (cfg->event_tsk.thr_pid >= 0) + PROC_STOP(&cfg->event_tsk); +} + +void wl_terminate_event_handler(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (cfg) { + wl_destroy_event_handler(cfg); + wl_flush_eq(cfg); + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_scan_timeout(struct timer_list *t) +#else +static void wl_scan_timeout(unsigned long data) +#endif +{ + wl_event_msg_t msg; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct bcm_cfg80211 *cfg = from_timer(cfg, t, scan_timeout); +#else + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; +#endif + struct wireless_dev *wdev = NULL; + struct net_device *ndev = NULL; + struct wl_scan_results *bss_list; + struct wl_bss_info *bi = NULL; + s32 i; + u32 channel; +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + uint32 prev_memdump_mode = dhdp->memdump_enabled; +#endif /* DHD_DEBUG && BCMPCIE */ + + if (!(cfg->scan_request)) { + WL_ERR(("timer expired but no scan request\n")); + return; + } + + bss_list = wl_escan_get_buf(cfg, FALSE); + if (!bss_list) { + WL_ERR(("bss_list is null. Didn't receive any partial scan results\n")); + } else { + WL_ERR(("scanned AP count (%d)\n", bss_list->count)); + + bi = next_bss(bss_list, bi); + for_each_bss(bss_list, bi, i) { + channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec)); + WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel)); + } + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + if (cfg->scan_request->dev) + wdev = cfg->scan_request->dev->ieee80211_ptr; +#else + wdev = cfg->scan_request->wdev; +#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */ + if (!wdev) { + WL_ERR(("No wireless_dev present\n")); + return; + } + ndev = wdev_to_wlc_ndev(wdev, cfg); + + bzero(&msg, sizeof(wl_event_msg_t)); + WL_ERR(("timer expired\n")); +#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP) + if (dhdp->memdump_enabled) { + dhdp->memdump_enabled = DUMP_MEMFILE; + dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT; + dhd_bus_mem_dump(dhdp); + dhdp->memdump_enabled = prev_memdump_mode; + } +#endif /* DHD_DEBUG && BCMPCIE */ + msg.event_type = hton32(WLC_E_ESCAN_RESULT); + msg.status = hton32(WLC_E_STATUS_TIMEOUT); + msg.reason = 0xFFFFFFFF; + wl_cfg80211_event(ndev, &msg, NULL); +#ifdef CUSTOMER_HW4_DEBUG + if (!wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_set(); +#endif /* CUSTOMER_HW4_DEBUG */ +} + +#ifdef DHD_LOSSLESS_ROAMING +static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + /* restore prec_map to ALLPRIO */ + dhdp->dequeue_prec_map = ALLPRIO; + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } + +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_roam_timeout(struct timer_list *t) +#else +static void wl_roam_timeout(unsigned long data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct bcm_cfg80211 *cfg = from_timer(cfg, t, roam_timeout); +#else + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; +#endif + dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub); + + WL_ERR(("roam timer expired\n")); + + /* restore prec_map to ALLPRIO */ + dhdp->dequeue_prec_map = ALLPRIO; +} + +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 +wl_cfg80211_netdev_notifier_call(struct notifier_block * nb, + unsigned long state, void *ptr) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) + struct net_device *dev = ptr; +#else + struct net_device *dev = netdev_notifier_info_to_dev(ptr); +#endif /* LINUX_VERSION < VERSION(3, 11, 0) */ + struct wireless_dev *wdev = ndev_to_wdev(dev); + struct bcm_cfg80211 *cfg = g_bcm_cfg; + +#ifdef DHD_IFDEBUG + WL_ERR(("Enter \n")); +#endif + + if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg)) + return NOTIFY_DONE; + + switch (state) { + case NETDEV_DOWN: + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)) + int max_wait_timeout = 2; + int max_wait_count = 100; + int refcnt = 0; + unsigned long limit = jiffies + max_wait_timeout * HZ; +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_DOWN(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + while (work_pending(&wdev->cleanup_work)) { + if (refcnt%5 == 0) { + WL_ERR(("[NETDEV_DOWN] wait for " + "complete of cleanup_work" + " (%d th)\n", refcnt)); + } + if (!time_before(jiffies, limit)) { + WL_ERR(("[NETDEV_DOWN] cleanup_work" + " of CFG80211 is not" + " completed in %d sec\n", + max_wait_timeout)); + break; + } + if (refcnt >= max_wait_count) { + WL_ERR(("[NETDEV_DOWN] cleanup_work" + " of CFG80211 is not" + " completed in %d loop\n", + max_wait_count)); + break; + } + set_current_state(TASK_INTERRUPTIBLE); + (void)schedule_timeout(100); + set_current_state(TASK_RUNNING); + refcnt++; + } +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_DOWN(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif +#endif /* LINUX_VERSION < VERSION(3, 14, 0) */ + break; + } + case NETDEV_UNREGISTER: +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_UNREGISTER(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + /* after calling list_del_rcu(&wdev->list) */ + wl_cfg80211_clear_per_bss_ies(cfg, + wl_get_bssidx_by_wdev(cfg, wdev)); + wl_dealloc_netinfo_by_wdev(cfg, wdev); +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_UNREGISTER(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + break; + case NETDEV_GOING_DOWN: + /* + * At NETDEV_DOWN state, wdev_cleanup_work work will be called. + * In front of door, the function checks whether current scan + * is working or not. If the scanning is still working, + * wdev_cleanup_work call WARN_ON and make the scan done forcibly. + */ +#ifdef DHD_IFDEBUG + WL_ERR(("NETDEV_GOING_DOWN wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev)); +#endif + if (wl_get_drv_status(cfg, SCANNING, dev)) + wl_notify_escan_complete(cfg, dev, true, true); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block wl_cfg80211_netdev_notifier = { + .notifier_call = wl_cfg80211_netdev_notifier_call, +}; + +/* + * to make sure we won't register the same notifier twice, otherwise a loop is likely to be + * created in kernel notifier link list (with 'next' pointing to itself) + */ +static bool wl_cfg80211_netdev_notifier_registered = FALSE; + +static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = NULL; + struct net_device *ndev = NULL; + + if (!cfg->scan_request) + return; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) + if (cfg->scan_request->dev) + wdev = cfg->scan_request->dev->ieee80211_ptr; +#else + wdev = cfg->scan_request->wdev; +#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */ + + if (!wdev) { + WL_ERR(("No wireless_dev present\n")); + return; + } + + ndev = wdev_to_wlc_ndev(wdev, cfg); + wl_notify_escan_complete(cfg, ndev, true, true); + WL_ERR(("Scan aborted! \n")); +} + +static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg) +{ + wl_scan_params_t *params = NULL; + s32 params_size = 0; + s32 err = BCME_OK; + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + if (!in_atomic()) { + /* Our scan params only need space for 1 channel and 0 ssids */ + params = wl_cfg80211_scan_alloc_params(-1, 0, ¶ms_size); + if (params == NULL) { + WL_ERR(("scan params allocation failed \n")); + err = -ENOMEM; + } else { + /* Do a scan abort to stop the driver's scan engine */ + err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true); + if (err < 0) { + WL_ERR(("scan abort failed \n")); + } + kfree(params); + } + } +#ifdef WLTDLS + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + } +#endif /* WLTDLS */ +} + +static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg, + struct net_device *ndev, + bool aborted, bool fw_abort) +{ + s32 err = BCME_OK; + unsigned long flags; + struct net_device *dev; + struct cfg80211_scan_info info = {}; + WL_DBG(("Enter \n")); + + mutex_lock(&cfg->scan_complete); + + if (!ndev) { + WL_ERR(("ndev is null\n")); + err = BCME_ERROR; + goto out; + } + + if (cfg->escan_info.ndev != ndev) { + WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev)); + err = BCME_ERROR; + goto out; + } + + if (cfg->scan_request) { + dev = bcmcfg_to_prmry_ndev(cfg); +#if defined(WL_ENABLE_P2P_IF) + if (cfg->scan_request->dev != cfg->p2p_net) + dev = cfg->scan_request->dev; +#elif defined(WL_CFG80211_P2P_DEV_IF) + if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) { +#ifdef DHD_IFDEBUG + WL_ERR(("%s: dev: %p\n", __FUNCTION__, cfg->scan_request->wdev->netdev)); +#endif + dev = cfg->scan_request->wdev->netdev; + } +#endif /* WL_ENABLE_P2P_IF */ + } + else { + WL_DBG(("cfg->scan_request is NULL may be internal scan." + "doing scan_abort for ndev %p primary %p", + ndev, bcmcfg_to_prmry_ndev(cfg))); + dev = ndev; + } + if (fw_abort && !in_atomic()) + wl_cfg80211_scan_abort(cfg); + if (timer_pending(&cfg->scan_timeout)) + del_timer_sync(&cfg->scan_timeout); +#if defined(ESCAN_RESULT_PATCH) + if (likely(cfg->scan_request)) { + cfg->bss_list = wl_escan_get_buf(cfg, aborted); + wl_inform_bss(cfg); + } +#endif /* ESCAN_RESULT_PATCH */ + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); +#ifdef WL_SCHED_SCAN + if (cfg->sched_scan_req && !cfg->scan_request) { + WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n")); + if (!aborted) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) + cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy, 0); +#else + cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy); +#endif + + cfg->sched_scan_running = FALSE; + cfg->sched_scan_req = NULL; + } +#endif /* WL_SCHED_SCAN */ + if (likely(cfg->scan_request)) { + info.aborted = aborted; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub)); + } + if (p2p_is_on(cfg)) + wl_clr_p2p_status(cfg, SCANNING); + wl_clr_drv_status(cfg, SCANNING, dev); + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + +out: + mutex_unlock(&cfg->scan_complete); + return err; +} + +#ifdef ESCAN_BUF_OVERFLOW_MGMT +static void +wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate) +{ + int idx; + for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) { + int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1; + if (bss->RSSI < candidate[idx].RSSI) { + if (len) + memcpy(&candidate[idx + 1], &candidate[idx], + sizeof(removal_element_t) * len); + candidate[idx].RSSI = bss->RSSI; + candidate[idx].length = bss->length; + memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN); + return; + } + } +} + +static void +wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate, + wl_bss_info_t *bi) +{ + int idx1, idx2; + int total_delete_len = 0; + for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) { + int cur_len = WL_SCAN_RESULTS_FIXED_SIZE; + wl_bss_info_t *bss = NULL; + if (candidate[idx1].RSSI >= bi->RSSI) + continue; + for (idx2 = 0; idx2 < list->count; idx2++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) : + list->bss_info; + if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + candidate[idx1].RSSI == bss->RSSI && + candidate[idx1].length == dtoh32(bss->length)) { + u32 delete_len = dtoh32(bss->length); + WL_DBG(("delete scan info of " MACDBG " to add new AP\n", + MAC2STRDBG(bss->BSSID.octet))); + if (idx2 < list->count -1) { + memmove((u8 *)bss, (u8 *)bss + delete_len, + list->buflen - cur_len - delete_len); + } + list->buflen -= delete_len; + list->count--; + total_delete_len += delete_len; + /* if delete_len is greater than or equal to result length */ + if (total_delete_len >= bi->length) { + return; + } + break; + } + cur_len += dtoh32(bss->length); + } + } +} +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + +static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 err = BCME_OK; + s32 status = ntoh32(e->status); + wl_bss_info_t *bi; + wl_escan_result_t *escan_result; + wl_bss_info_t *bss = NULL; + wl_scan_results_t *list; + wifi_p2p_ie_t * p2p_ie; + struct net_device *ndev = NULL; + u32 bi_length; + u32 i; + u8 *p2p_dev_addr = NULL; + + WL_DBG((" enter event type : %d, status : %d \n", + ntoh32(e->event_type), ntoh32(e->status))); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + mutex_lock(&cfg->usr_sync); + /* P2P SCAN is coming from primary interface */ + if (wl_get_p2p_status(cfg, SCANNING)) { + if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) + ndev = cfg->afx_hdl->dev; + else + ndev = cfg->escan_info.ndev; + + } + if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) { + WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n", + ndev, wl_get_drv_status(cfg, SCANNING, ndev), + ntoh32(e->event_type), ntoh32(e->status))); + goto exit; + } + escan_result = (wl_escan_result_t *)data; + + if (status == WLC_E_STATUS_PARTIAL) { + WL_INFORM(("WLC_E_STATUS_PARTIAL \n")); + if (!escan_result) { + WL_ERR(("Invalid escan result (NULL pointer)\n")); + goto exit; + } + if (dtoh16(escan_result->bss_count) != 1) { + WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count)); + goto exit; + } + bi = escan_result->bss_info; + if (!bi) { + WL_ERR(("Invalid escan bss info (NULL pointer)\n")); + goto exit; + } + bi_length = dtoh32(bi->length); + if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) { + WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length)); + goto exit; + } + if (wl_escan_check_sync_id(status, escan_result->sync_id, + cfg->escan_info.cur_sync_id) < 0) + goto exit; + + if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) { + if (dtoh16(bi->capability) & DOT11_CAP_IBSS) { + WL_DBG(("Ignoring IBSS result\n")); + goto exit; + } + } + + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length); + if (p2p_dev_addr && !memcmp(p2p_dev_addr, + cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) { + s32 channel = wf_chspec_ctlchan( + wl_chspec_driver_to_host(bi->chanspec)); + + if ((channel > MAXCHANNEL) || (channel <= 0)) + channel = WL_INVALID; + else + WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found," + " channel : %d\n", + MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet), + channel)); + + wl_clr_p2p_status(cfg, SCANNING); + cfg->afx_hdl->peer_chan = channel; + complete(&cfg->act_frm_scan); + goto exit; + } + + } else { + int cur_len = WL_SCAN_RESULTS_FIXED_SIZE; +#ifdef ESCAN_BUF_OVERFLOW_MGMT + removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT]; + int remove_lower_rssi = FALSE; + + bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT); +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + list = wl_escan_get_buf(cfg, FALSE); + if (scan_req_match(cfg)) { + /* p2p scan && allow only probe response */ + if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) && + (bi->flags & WL_BSS_FLAGS_FROM_BEACON)) + goto exit; + if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, + bi->ie_length)) == NULL) { + WL_ERR(("Couldn't find P2PIE in probe" + " response/beacon\n")); + goto exit; + } + } +#ifdef ESCAN_BUF_OVERFLOW_MGMT + if (bi_length > ESCAN_BUF_SIZE - list->buflen) + remove_lower_rssi = TRUE; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + for (i = 0; i < list->count; i++) { + bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) + : list->bss_info; +#ifdef ESCAN_BUF_OVERFLOW_MGMT + WL_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n", + bss->SSID, MAC2STRDBG(bss->BSSID.octet), + i, bss->RSSI, list->count)); + + if (remove_lower_rssi) + wl_cfg80211_find_removal_candidate(bss, candidate); +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + + if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) && + (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec)) + == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) && + bi->SSID_len == bss->SSID_len && + !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) { + + /* do not allow beacon data to update + *the data recd from a probe response + */ + if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) && + (bi->flags & WL_BSS_FLAGS_FROM_BEACON)) + goto exit; + + WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d" + " flags 0x%x, new: RSSI %d flags 0x%x\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), i, + bss->RSSI, bss->flags, bi->RSSI, bi->flags)); + + if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == + (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) { + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + WL_SCAN(("%s("MACDBG"), same onchan" + ", RSSI: prev %d new %d\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + bss->RSSI, bi->RSSI)); + bi->RSSI = MAX(bss->RSSI, bi->RSSI); + } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) && + (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + WL_SCAN(("%s("MACDBG"), prev onchan" + ", RSSI: prev %d new %d\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + bss->RSSI, bi->RSSI)); + bi->RSSI = bss->RSSI; + bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL; + } + if (dtoh32(bss->length) != bi_length) { + u32 prev_len = dtoh32(bss->length); + + WL_SCAN(("bss info replacement" + " is occured(bcast:%d->probresp%d)\n", + bss->ie_length, bi->ie_length)); + WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n", + bss->SSID, MAC2STRDBG(bi->BSSID.octet), + prev_len, bi_length)); + + if (list->buflen - prev_len + bi_length + > ESCAN_BUF_SIZE) { + WL_ERR(("Buffer is too small: keep the" + " previous result of this AP\n")); + /* Only update RSSI */ + bss->RSSI = bi->RSSI; + bss->flags |= (bi->flags + & WL_BSS_FLAGS_RSSI_ONCHANNEL); + goto exit; + } + + if (i < list->count - 1) { + /* memory copy required by this case only */ + memmove((u8 *)bss + bi_length, + (u8 *)bss + prev_len, + list->buflen - cur_len - prev_len); + } + list->buflen -= prev_len; + list->buflen += bi_length; + } + list->version = dtoh32(bi->version); + memcpy((u8 *)bss, (u8 *)bi, bi_length); + goto exit; + } + cur_len += dtoh32(bss->length); + } + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { +#ifdef ESCAN_BUF_OVERFLOW_MGMT + wl_cfg80211_remove_lowRSSI_info(list, candidate, bi); + if (bi_length > ESCAN_BUF_SIZE - list->buflen) { + WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n", + MAC2STRDBG(bi->BSSID.octet), bi->RSSI)); + goto exit; + } +#else + WL_ERR(("Buffer is too small: ignoring\n")); + goto exit; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + } + + memcpy(&(((char *)list)[list->buflen]), bi, bi_length); + list->version = dtoh32(bi->version); + list->buflen += bi_length; + list->count++; + + /* + * !Broadcast && number of ssid = 1 && number of channels =1 + * means specific scan to association + */ + if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) { + WL_ERR(("P2P assoc scan fast aborted.\n")); + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true); + goto exit; + } + } + } + else if (status == WLC_E_STATUS_SUCCESS) { + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id, + escan_result->sync_id); + + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_INFORM(("ACTION FRAME SCAN DONE\n")); + wl_clr_p2p_status(cfg, SCANNING); + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + if (cfg->afx_hdl->peer_chan == WL_INVALID) + complete(&cfg->act_frm_scan); + } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) { + WL_INFORM(("ESCAN COMPLETED\n")); + cfg->bss_list = wl_escan_get_buf(cfg, FALSE); + if (!scan_req_match(cfg)) { + WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n", + cfg->bss_list->count)); + } + wl_inform_bss(cfg); + wl_notify_escan_complete(cfg, ndev, false, false); + } + wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT); +#ifdef CUSTOMER_HW4_DEBUG + if (wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_clear(); +#endif /* CUSTOMER_HW4_DEBUG */ + } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) || + (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) || + (status == WLC_E_STATUS_NEWASSOC)) { + /* Handle all cases of scan abort */ + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_print_sync_id(status, escan_result->sync_id, + cfg->escan_info.cur_sync_id); + WL_DBG(("ESCAN ABORT reason: %d\n", status)); + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_INFORM(("ACTION FRAME SCAN DONE\n")); + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + wl_clr_p2p_status(cfg, SCANNING); + if (cfg->afx_hdl->peer_chan == WL_INVALID) + complete(&cfg->act_frm_scan); + } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) { + WL_INFORM(("ESCAN ABORTED\n")); + cfg->bss_list = wl_escan_get_buf(cfg, TRUE); + if (!scan_req_match(cfg)) { + WL_TRACE_HW4(("scan_req_match=0: scanned AP count=%d\n", + cfg->bss_list->count)); + } + wl_inform_bss(cfg); + wl_notify_escan_complete(cfg, ndev, true, false); + } else { + /* If there is no pending host initiated scan, do nothing */ + WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n")); + } + wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT); + } else if (status == WLC_E_STATUS_TIMEOUT) { + WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request)); + WL_ERR(("reason[0x%x]\n", e->reason)); + if (e->reason == 0xFFFFFFFF) { + wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true); + } + } else { + WL_ERR(("unexpected Escan Event %d : abort\n", status)); + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_print_sync_id(status, escan_result->sync_id, + cfg->escan_info.cur_sync_id); + if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_INFORM(("ACTION FRAME SCAN DONE\n")); + wl_clr_p2p_status(cfg, SCANNING); + wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev); + if (cfg->afx_hdl->peer_chan == WL_INVALID) + complete(&cfg->act_frm_scan); + } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) { + cfg->bss_list = wl_escan_get_buf(cfg, TRUE); + if (!scan_req_match(cfg)) { + WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): " + "scanned AP count=%d\n", + cfg->bss_list->count)); + } + wl_inform_bss(cfg); + wl_notify_escan_complete(cfg, ndev, true, false); + } + wl_escan_increment_sync_id(cfg, 2); + } +exit: + mutex_unlock(&cfg->usr_sync); + return err; +} + +static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable) +{ + u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED); + bool p2p_connected = wl_cfgp2p_vif_created(cfg); + struct net_info *iter, *next; + + if (!cfg->roamoff_on_concurrent) + return; + if (enable && (p2p_connected||(connected_cnt > 1))) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev && iter->wdev && + iter->wdev->iftype == NL80211_IFTYPE_STATION) { + if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE) + == BCME_OK) { + iter->roam_off = TRUE; + } + else { + WL_ERR(("error to enable roam_off\n")); + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + } + else if (!enable) { +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev && iter->wdev && + iter->wdev->iftype == NL80211_IFTYPE_STATION) { + if (iter->roam_off != WL_INVALID) { + if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE) + == BCME_OK) { + iter->roam_off = FALSE; + } + else { + WL_ERR(("error to disable roam_off\n")); + } + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + } + return; +} + +static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg) +{ + struct net_info *iter, *next; + u32 ctl_chan = 0; + u32 chanspec = 0; + u32 pre_ctl_chan = 0; + u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED); + cfg->vsdb_mode = false; + + if (connected_cnt <= 1) { + return; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev could be null */ + if (iter->ndev) { + chanspec = 0; + ctl_chan = 0; + if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) { + if (wldev_iovar_getint(iter->ndev, "chanspec", + (s32 *)&chanspec) == BCME_OK) { + chanspec = wl_chspec_driver_to_host(chanspec); + ctl_chan = wf_chspec_ctlchan(chanspec); + wl_update_prof(cfg, iter->ndev, NULL, + &ctl_chan, WL_PROF_CHAN); + } + if (!cfg->vsdb_mode) { + if (!pre_ctl_chan && ctl_chan) + pre_ctl_chan = ctl_chan; + else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) { + cfg->vsdb_mode = true; + } + } + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + WL_ERR(("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel")); + return; +} + +#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF) +extern int g_frameburst; +#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */ + +static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info, + enum wl_status state, bool set) +{ + s32 pm = PM_FAST; + s32 err = BCME_OK; + u32 mode; + u32 chan = 0; + struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg); + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__)); + return 0; + } + WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n", + state, set, _net_info->pm_restore, _net_info->ndev->name)); + + if (state != WL_STATUS_CONNECTED) + return 0; + mode = wl_get_mode_by_netdev(cfg, _net_info->ndev); + if (set) { + wl_cfg80211_concurrent_roam(cfg, 1); + wl_cfg80211_determine_vsdb_mode(cfg); + if (mode == WL_MODE_AP) { + if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false)) + WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n")); + } + + pm = PM_OFF; + if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm, + sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + _net_info->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + _net_info->ndev->name, err)); + + wl_cfg80211_update_power_mode(_net_info->ndev); + } + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT); +#if defined(WLTDLS) + if (wl_cfg80211_is_concurrent_mode()) { + err = wldev_iovar_setint(primary_dev, "tdls_enable", 0); + } +#endif /* defined(WLTDLS) */ + +#ifdef DISABLE_FRAMEBURST_VSDB +#ifdef USE_WFA_CERT_CONF + if (g_frameburst) +#endif /* USE_WFA_CERT_CONF */ + { + if (wl_cfg80211_is_concurrent_mode()) { + int frameburst = 0; + if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst, + sizeof(frameburst), true) != 0) { + WL_DBG(("frameburst set error\n")); + } + WL_DBG(("Frameburst Disabled\n")); + } + } +#endif /* DISABLE_FRAMEBURST_VSDB */ + } else { /* clear */ + chan = 0; + /* clear chan information when the net device is disconnected */ + wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN); + wl_cfg80211_determine_vsdb_mode(cfg); + if (primary_dev == _net_info->ndev) { + pm = PM_FAST; + if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm, + sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + _net_info->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + _net_info->ndev->name, err)); + + wl_cfg80211_update_power_mode(_net_info->ndev); + } + } + + wl_cfg80211_concurrent_roam(cfg, 0); +#if defined(WLTDLS) + if (!wl_cfg80211_is_concurrent_mode()) { + err = wldev_iovar_setint(primary_dev, "tdls_enable", 1); + } +#endif /* defined(WLTDLS) */ + +#ifdef DISABLE_FRAMEBURST_VSDB +#ifdef USE_WFA_CERT_CONF + if (g_frameburst) +#endif /* USE_WFA_CERT_CONF */ + { + int frameburst = 1; + if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst, + sizeof(frameburst), true) != 0) { + WL_DBG(("frameburst set error\n")); + } + WL_DBG(("Frameburst Enabled\n")); + } +#endif /* DISABLE_FRAMEBURST_VSDB */ + } + return err; +} +static s32 wl_init_scan(struct bcm_cfg80211 *cfg) +{ + int err = 0; + + cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler; + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + wl_escan_init_sync_id(cfg); + + /* Init scan_timeout timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&cfg->scan_timeout, wl_scan_timeout, 0); +#else + init_timer(&cfg->scan_timeout); + cfg->scan_timeout.data = (unsigned long) cfg; + cfg->scan_timeout.function = wl_scan_timeout; +#endif + + return err; +} + +#ifdef DHD_LOSSLESS_ROAMING +static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg) +{ + int err = 0; + + /* Init roam timer */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&cfg->roam_timeout, wl_roam_timeout, 0); +#else + init_timer(&cfg->roam_timeout); + cfg->roam_timeout.data = (unsigned long) cfg; + cfg->roam_timeout.function = wl_roam_timeout; + +#endif + return err; +} +#endif /* DHD_LOSSLESS_ROAMING */ + +static s32 wl_init_priv(struct bcm_cfg80211 *cfg) +{ + struct wiphy *wiphy = bcmcfg_to_wiphy(cfg); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + s32 err = 0; + + cfg->scan_request = NULL; + cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT); + cfg->roam_on = false; + cfg->active_scan = true; + cfg->rf_blocked = false; + cfg->vsdb_mode = false; +#if defined(BCMSDIO) + cfg->wlfc_on = false; +#endif + cfg->roamoff_on_concurrent = true; + cfg->disable_roam_event = false; + cfg->cfgdev_bssidx = -1; + /* register interested state */ + set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state); + spin_lock_init(&cfg->cfgdrv_lock); + mutex_init(&cfg->ioctl_buf_sync); + init_waitqueue_head(&cfg->netif_change_event); + init_completion(&cfg->send_af_done); + init_completion(&cfg->iface_disable); + wl_init_eq(cfg); + err = wl_init_priv_mem(cfg); + if (err) + return err; + if (wl_create_event_handler(cfg)) + return -ENOMEM; + wl_init_event_handler(cfg); + mutex_init(&cfg->usr_sync); + mutex_init(&cfg->event_sync); + mutex_init(&cfg->scan_complete); + err = wl_init_scan(cfg); + if (err) + return err; +#ifdef DHD_LOSSLESS_ROAMING + err = wl_init_roam_timeout(cfg); + if (err) { + return err; + } +#endif /* DHD_LOSSLESS_ROAMING */ + wl_init_conf(cfg->conf); + wl_init_prof(cfg, ndev); + wl_link_down(cfg); + DNGL_FUNC(dhd_cfg80211_init, (cfg)); + + return err; +} + +static void wl_deinit_priv(struct bcm_cfg80211 *cfg) +{ + DNGL_FUNC(dhd_cfg80211_deinit, (cfg)); + wl_destroy_event_handler(cfg); + wl_flush_eq(cfg); + wl_link_down(cfg); + del_timer_sync(&cfg->scan_timeout); +#ifdef DHD_LOSSLESS_ROAMING + del_timer_sync(&cfg->roam_timeout); +#endif + wl_deinit_priv_mem(cfg); + if (wl_cfg80211_netdev_notifier_registered) { + wl_cfg80211_netdev_notifier_registered = FALSE; + unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier); + } +} + +#if defined(WL_ENABLE_P2P_IF) +static s32 wl_cfg80211_attach_p2p(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + WL_TRACE(("Enter \n")); + + if (wl_cfgp2p_register_ndev(cfg) < 0) { + WL_ERR(("P2P attach failed. \n")); + return -ENODEV; + } + + return 0; +} + +static s32 wl_cfg80211_detach_p2p(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev; + + WL_DBG(("Enter \n")); + if (!cfg) { + WL_ERR(("Invalid Ptr\n")); + return -EINVAL; + } else + wdev = cfg->p2p_wdev; + + if (!wdev) { + WL_ERR(("Invalid Ptr\n")); + return -EINVAL; + } + + wl_cfgp2p_unregister_ndev(cfg); + + cfg->p2p_wdev = NULL; + cfg->p2p_net = NULL; + WL_DBG(("Freeing 0x%p \n", wdev)); + kfree(wdev); + + return 0; +} +#endif + +s32 wl_cfg80211_attach_post(struct net_device *ndev) +{ + struct bcm_cfg80211 * cfg = NULL; + s32 err = 0; + s32 ret = 0; + WL_TRACE(("In\n")); + if (unlikely(!ndev)) { + WL_ERR(("ndev is invaild\n")); + return -ENODEV; + } + cfg = g_bcm_cfg; + if (unlikely(!cfg)) { + WL_ERR(("cfg is invaild\n")); + return -EINVAL; + } + if (!wl_get_drv_status(cfg, READY, ndev)) { + if (cfg->wdev) { + ret = wl_cfgp2p_supported(cfg, ndev); + if (ret > 0) { +#if !defined(WL_ENABLE_P2P_IF) + cfg->wdev->wiphy->interface_modes |= + (BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO)); +#endif /* !WL_ENABLE_P2P_IF */ + if ((err = wl_cfgp2p_init_priv(cfg)) != 0) + goto fail; + +#if defined(WL_ENABLE_P2P_IF) + if (cfg->p2p_net) { + /* Update MAC addr for p2p0 interface here. */ + memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN); + cfg->p2p_net->dev_addr[0] |= 0x02; + WL_ERR(("%s: p2p_dev_addr="MACDBG "\n", + cfg->p2p_net->name, + MAC2STRDBG(cfg->p2p_net->dev_addr))); + } else { + WL_ERR(("p2p_net not yet populated." + " Couldn't update the MAC Address for p2p0 \n")); + return -ENODEV; + } +#endif /* WL_ENABLE_P2P_IF */ + cfg->p2p_supported = true; + } else if (ret == 0) { + if ((err = wl_cfgp2p_init_priv(cfg)) != 0) + goto fail; + } else { + /* SDIO bus timeout */ + err = -ENODEV; + goto fail; + } + } + } + wl_set_drv_status(cfg, READY, ndev); +fail: + return err; +} + +s32 wl_cfg80211_attach(struct net_device *ndev, void *context) +{ + struct wireless_dev *wdev; + struct bcm_cfg80211 *cfg; + s32 err = 0; + struct device *dev; + + WL_TRACE(("In\n")); + if (!ndev) { + WL_ERR(("ndev is invaild\n")); + return -ENODEV; + } + WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev())); + dev = wl_cfg80211_get_parent_dev(); + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return -ENOMEM; + } + err = wl_setup_wiphy(wdev, dev, context); + if (unlikely(err)) { + kfree(wdev); + return -ENOMEM; + } + wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); + cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy); + cfg->wdev = wdev; + cfg->pub = context; + INIT_LIST_HEAD(&cfg->net_list); + spin_lock_init(&cfg->net_list_sync); + ndev->ieee80211_ptr = wdev; + SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); + wdev->netdev = ndev; + cfg->state_notifier = wl_notifier_change_state; + err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE, 0); + if (err) { + WL_ERR(("Failed to alloc net_info (%d)\n", err)); + goto cfg80211_attach_out; + } + err = wl_init_priv(cfg); + if (err) { + WL_ERR(("Failed to init iwm_priv (%d)\n", err)); + goto cfg80211_attach_out; + } + + err = wl_setup_rfkill(cfg, TRUE); + if (err) { + WL_ERR(("Failed to setup rfkill %d\n", err)); + goto cfg80211_attach_out; + } +#ifdef DEBUGFS_CFG80211 + err = wl_setup_debugfs(cfg); + if (err) { + WL_ERR(("Failed to setup debugfs %d\n", err)); + goto cfg80211_attach_out; + } +#endif + if (!wl_cfg80211_netdev_notifier_registered) { + wl_cfg80211_netdev_notifier_registered = TRUE; + err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier); + if (err) { + wl_cfg80211_netdev_notifier_registered = FALSE; + WL_ERR(("Failed to register notifierl %d\n", err)); + goto cfg80211_attach_out; + } + } +#if defined(COEX_DHCP) + cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev); + if (!cfg->btcoex_info) + goto cfg80211_attach_out; +#endif +#if defined(SUPPORT_RANDOM_MAC_SCAN) + cfg->random_mac_enabled = FALSE; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + g_bcm_cfg = cfg; + +#ifdef CONFIG_CFG80211_INTERNAL_REGDB + wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier; +#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ + +#if defined(WL_ENABLE_P2P_IF) + err = wl_cfg80211_attach_p2p(); + if (err) + goto cfg80211_attach_out; +#endif + + INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler); + mutex_init(&cfg->pm_sync); + + return err; + +cfg80211_attach_out: + wl_setup_rfkill(cfg, FALSE); + wl_free_wdev(cfg); + return err; +} + +void wl_cfg80211_detach(void *para) +{ + struct bcm_cfg80211 *cfg; + + (void)para; + cfg = g_bcm_cfg; + + WL_TRACE(("In\n")); + + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); + +#if defined(COEX_DHCP) + wl_cfg80211_btcoex_deinit(); + cfg->btcoex_info = NULL; +#endif + + wl_setup_rfkill(cfg, FALSE); +#ifdef DEBUGFS_CFG80211 + wl_free_debugfs(cfg); +#endif + if (cfg->p2p_supported) { + if (timer_pending(&cfg->p2p->listen_timer)) + del_timer_sync(&cfg->p2p->listen_timer); + wl_cfgp2p_deinit_priv(cfg); + } + + if (timer_pending(&cfg->scan_timeout)) + del_timer_sync(&cfg->scan_timeout); +#ifdef DHD_LOSSLESS_ROAMING + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } +#endif /* DHD_LOSSLESS_ROAMING */ + +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfg->p2p_wdev) + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); +#endif /* WL_CFG80211_P2P_DEV_IF */ +#if defined(WL_ENABLE_P2P_IF) + wl_cfg80211_detach_p2p(); +#endif + + wl_cfg80211_ibss_vsie_free(cfg); + wl_cfg80211_clear_mgmt_vndr_ies(cfg); + wl_deinit_priv(cfg); + g_bcm_cfg = NULL; + wl_cfg80211_clear_parent_dev(); + wl_free_wdev(cfg); + /* PLEASE do NOT call any function after wl_free_wdev, the driver's private + * structure "cfg", which is the private part of wiphy, has been freed in + * wl_free_wdev !!!!!!!!!!! + */ +} + +static void wl_wakeup_event(struct bcm_cfg80211 *cfg) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + + if (dhd->up && (cfg->event_tsk.thr_pid >= 0)) { + up(&cfg->event_tsk.sema); + } +} + +static s32 wl_event_handler(void *data) +{ + struct bcm_cfg80211 *cfg = NULL; + struct wl_event_q *e; + tsk_ctl_t *tsk = (tsk_ctl_t *)data; + struct wireless_dev *wdev = NULL; + + cfg = (struct bcm_cfg80211 *)tsk->parent; + + WL_ERR(("tsk Enter, tsk = 0x%p\n", tsk)); + + while (down_interruptible (&tsk->sema) == 0) { + SMP_RD_BARRIER_DEPENDS(); + if (tsk->terminated) { + break; + } + while ((e = wl_deq_event(cfg))) { + WL_DBG(("event type (%d), ifidx: %d bssidx: %d \n", + e->etype, e->emsg.ifidx, e->emsg.bsscfgidx)); + + if (e->emsg.ifidx > WL_MAX_IFS) { + WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx)); + goto fail; + } + + if (!(wdev = wl_get_wdev_by_bssidx(cfg, e->emsg.bsscfgidx))) { + /* For WLC_E_IF would be handled by wl_host_event */ + if (e->etype != WLC_E_IF) + WL_ERR(("No wdev corresponding to bssidx: 0x%x found!" + " Ignoring event.\n", e->emsg.bsscfgidx)); + } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) { + dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub); + if (dhd->busstate == DHD_BUS_DOWN) { + WL_ERR((": BUS is DOWN.\n")); + } else { +#ifdef DHD_IFDEBUG + if (cfg->iface_cnt == 0) { + wl_dump_ifinfo(cfg); + } +#endif + cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev), + &e->emsg, e->edata); + } + } else { + WL_DBG(("Unknown Event (%d): ignoring\n", e->etype)); + } +fail: + wl_put_event(e); + DHD_EVENT_WAKE_UNLOCK(cfg->pub); + } + } + WL_ERR(("was terminated\n")); + complete_and_exit(&tsk->completed, 0); + return 0; +} + +void +wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data) +{ + u32 event_type = ntoh32(e->event_type); + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *netinfo; + +#if (WL_DBG_LEVEL > 0) + s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ? + wl_dbg_estr[event_type] : (s8 *) "Unknown"; + WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr)); +#endif /* (WL_DBG_LEVEL > 0) */ + + if (cfg->event_tsk.thr_pid == -1) { + WL_ERR(("Event handler is not created\n")); + return; + } + + if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) { + WL_ERR(("Stale event ignored\n")); + return; + } + + if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) { + WL_ERR(("during IF change, ignore event %d\n", event_type)); + return; + } + +#ifdef DHD_IFDEBUG + if (event_type != WLC_E_ESCAN_RESULT) { + WL_ERR(("Event_type %d , status : %d, reason : %d, bssidx:%d \n", + event_type, ntoh32(e->status), ntoh32(e->reason), e->bsscfgidx)); + } +#endif + netinfo = wl_get_netinfo_by_bssidx(cfg, e->bsscfgidx); + if (!netinfo) { + /* Since the netinfo entry is not there, the netdev entry is not + * created via cfg80211 interface. so the event is not of interest + * to the cfg80211 layer. + */ + WL_ERR(("ignore event %d, not interested\n", event_type)); + return; + } + + if (event_type == WLC_E_PFN_NET_FOUND) { + WL_DBG((" PNOEVENT: PNO_NET_FOUND\n")); + } + else if (event_type == WLC_E_PFN_NET_LOST) { + WL_DBG((" PNOEVENT: PNO_NET_LOST\n")); + } + + DHD_EVENT_WAKE_LOCK(cfg->pub); + if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) { + wl_wakeup_event(cfg); + } else { + DHD_EVENT_WAKE_UNLOCK(cfg->pub); + } +} + +static void wl_init_eq(struct bcm_cfg80211 *cfg) +{ + wl_init_eq_lock(cfg); + INIT_LIST_HEAD(&cfg->eq_list); +} + +static void wl_flush_eq(struct bcm_cfg80211 *cfg) +{ + struct wl_event_q *e; + unsigned long flags; + + flags = wl_lock_eq(cfg); + while (!list_empty_careful(&cfg->eq_list)) { + BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list); + list_del(&e->eq_list); + kfree(e); + } + wl_unlock_eq(cfg, flags); +} + +/* +* retrieve first queued event from head +*/ + +static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg) +{ + struct wl_event_q *e = NULL; + unsigned long flags; + + flags = wl_lock_eq(cfg); + if (likely(!list_empty(&cfg->eq_list))) { + BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list); + list_del(&e->eq_list); + } + wl_unlock_eq(cfg, flags); + + return e; +} + +/* + * push event to tail of the queue + */ + +static s32 +wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event, + const wl_event_msg_t *msg, void *data) +{ + struct wl_event_q *e; + s32 err = 0; + uint32 evtq_size; + uint32 data_len; + unsigned long flags; + gfp_t aflags; + + data_len = 0; + if (data) + data_len = ntoh32(msg->datalen); + evtq_size = sizeof(struct wl_event_q) + data_len; + aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL; + e = kzalloc(evtq_size, aflags); + if (unlikely(!e)) { + WL_ERR(("event alloc failed\n")); + return -ENOMEM; + } + e->etype = event; + memcpy(&e->emsg, msg, sizeof(wl_event_msg_t)); + if (data) + memcpy(e->edata, data, data_len); + flags = wl_lock_eq(cfg); + list_add_tail(&e->eq_list, &cfg->eq_list); + wl_unlock_eq(cfg, flags); + + return err; +} + +static void wl_put_event(struct wl_event_q *e) +{ + kfree(e); +} + +static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype) +{ + s32 infra = 0; + s32 err = 0; + s32 mode = 0; + switch (iftype) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + WL_ERR(("type (%d) : currently we do not support this mode\n", + iftype)); + err = -EINVAL; + return err; + case NL80211_IFTYPE_ADHOC: + mode = WL_MODE_IBSS; + break; + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + mode = WL_MODE_BSS; + infra = 1; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + mode = WL_MODE_AP; + infra = 1; + break; + default: + err = -EINVAL; + WL_ERR(("invalid type (%d)\n", iftype)); + return err; + } + infra = htod32(infra); + err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra), true); + if (unlikely(err)) { + WL_ERR(("WLC_SET_INFRA error (%d)\n", err)); + return err; + } + + wl_set_mode_by_netdev(cfg, ndev, mode); + + return 0; +} + +void wl_cfg80211_add_to_eventbuffer(struct wl_eventmsg_buf *ev, u16 event, bool set) +{ + if (!ev || (event > WLC_E_LAST)) + return; + + if (ev->num < MAX_EVENT_BUF_NUM) { + ev->event[ev->num].type = event; + ev->event[ev->num].set = set; + ev->num++; + } else { + WL_ERR(("evenbuffer doesn't support > %u events. Update" + " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM)); + ASSERT(0); + } +} + +s32 wl_cfg80211_apply_eventbuffer( + struct net_device *ndev, + struct bcm_cfg80211 *cfg, + wl_eventmsg_buf_t *ev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + int i, ret = 0; + s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; + + if (!ev || (!ev->num)) + return -EINVAL; + + mutex_lock(&cfg->event_sync); + + /* Read event_msgs mask */ + bcm_mkiovar("event_msgs", NULL, 0, iovbuf, + sizeof(iovbuf)); + ret = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false); + if (unlikely(ret)) { + WL_ERR(("Get event_msgs error (%d)\n", ret)); + goto exit; + } + memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN); + + /* apply the set bits */ + for (i = 0; i < ev->num; i++) { + if (ev->event[i].set) + setbit(eventmask, ev->event[i].type); + else + clrbit(eventmask, ev->event[i].type); + } + + /* Write updated Event mask */ + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf)); + ret = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(ret)) { + WL_ERR(("Set event_msgs error (%d)\n", ret)); + } + +exit: + mutex_unlock(&cfg->event_sync); + return ret; +} + +s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add) +{ + s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; + s8 eventmask[WL_EVENTING_MASK_LEN]; + s32 err = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (!ndev || !cfg) + return -ENODEV; + + mutex_lock(&cfg->event_sync); + + /* Setup event_msgs */ + bcm_mkiovar("event_msgs", NULL, 0, iovbuf, + sizeof(iovbuf)); + err = wldev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf), false); + if (unlikely(err)) { + WL_ERR(("Get event_msgs error (%d)\n", err)); + goto eventmsg_out; + } + memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN); + if (add) { + setbit(eventmask, event); + } else { + clrbit(eventmask, event); + } + bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, + sizeof(iovbuf)); + err = wldev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(err)) { + WL_ERR(("Set event_msgs error (%d)\n", err)); + goto eventmsg_out; + } + +eventmsg_out: + mutex_unlock(&cfg->event_sync); + return err; +} + +static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap) +{ + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + struct ieee80211_channel *band_chan_arr = NULL; + wl_uint32_list_t *list; + u32 i, j, index, n_2g, n_5g, band, channel, array_size; + u32 *n_cnt = NULL; + chanspec_t c = 0; + s32 err = BCME_OK; + bool update; + bool ht40_allowed; + u8 *pbuf = NULL; + bool dfs_radar_disabled = FALSE; + +#define LOCAL_BUF_LEN 1024 + pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL); + + if (pbuf == NULL) { + WL_ERR(("failed to allocate local buf\n")); + return -ENOMEM; + } + list = (wl_uint32_list_t *)(void *)pbuf; + list->count = htod32(WL_NUMCHANSPECS); + + + err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL, + 0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync); + if (err != 0) { + WL_ERR(("get chanspecs failed with %d\n", err)); + kfree(pbuf); + return err; + } +#undef LOCAL_BUF_LEN + + list = (wl_uint32_list_t *)(void *)pbuf; + band = array_size = n_2g = n_5g = 0; + for (i = 0; i < dtoh32(list->count); i++) { + index = 0; + update = false; + ht40_allowed = false; + c = (chanspec_t)dtoh32(list->element[i]); + c = wl_chspec_driver_to_host(c); + channel = wf_chspec_ctlchan(c); + + if (!CHSPEC_IS40(c) && ! CHSPEC_IS20(c)) { + WL_DBG(("HT80/160/80p80 center channel : %d\n", channel)); + continue; + } + if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) && + (channel <= CH_MAX_2G_CHANNEL)) { + band_chan_arr = __wl_2ghz_channels; + array_size = ARRAYSIZE(__wl_2ghz_channels); + n_cnt = &n_2g; + band = NL80211_BAND_2GHZ ; + ht40_allowed = (bw_cap == WLC_N_BW_40ALL)? true : false; + } else if (CHSPEC_IS5G(c) && channel >= CH_MIN_5G_CHANNEL) { + band_chan_arr = __wl_5ghz_a_channels; + array_size = ARRAYSIZE(__wl_5ghz_a_channels); + n_cnt = &n_5g; + band = NL80211_BAND_5GHZ ; + ht40_allowed = (bw_cap == WLC_N_BW_20ALL)? false : true; + } else { + WL_ERR(("Invalid channel Sepc. 0x%x.\n", c)); + continue; + } + if (!ht40_allowed && CHSPEC_IS40(c)) + continue; + for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { + if (band_chan_arr[j].hw_value == channel) { + update = true; + break; + } + } + if (update) + index = j; + else + index = *n_cnt; + if (index < array_size) { +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel); +#else + band_chan_arr[index].center_freq = + ieee80211_channel_to_frequency(channel, band); +#endif + band_chan_arr[index].hw_value = channel; + + if (CHSPEC_IS40(c) && ht40_allowed) { + /* assuming the order is HT20, HT40 Upper, + * HT40 lower from chanspecs + */ + u32 ht40_flag = band_chan_arr[index].flags & IEEE80211_CHAN_NO_HT40; + if (CHSPEC_SB_UPPER(c)) { + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags &= + ~IEEE80211_CHAN_NO_HT40; + band_chan_arr[index].flags |= IEEE80211_CHAN_NO_HT40PLUS; + } else { + /* It should be one of + * IEEE80211_CHAN_NO_HT40 or IEEE80211_CHAN_NO_HT40PLUS + */ + band_chan_arr[index].flags &= ~IEEE80211_CHAN_NO_HT40; + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + band_chan_arr[index].flags |= + IEEE80211_CHAN_NO_HT40MINUS; + } + } else { + band_chan_arr[index].flags = IEEE80211_CHAN_NO_HT40; + if (!dfs_radar_disabled) { + if (band == NL80211_BAND_2GHZ ) + channel |= WL_CHANSPEC_BAND_2G; + else + channel |= WL_CHANSPEC_BAND_5G; + channel |= WL_CHANSPEC_BW_20; + channel = wl_chspec_host_to_driver(channel); + err = wldev_iovar_getint(dev, "per_chan_info", &channel); + if (!err) { + if (channel & WL_CHAN_RADAR) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + band_chan_arr[index].flags |= + (IEEE80211_CHAN_RADAR + | IEEE80211_CHAN_NO_IBSS); +#else + band_chan_arr[index].flags |= + IEEE80211_CHAN_RADAR; +#endif + } + + if (channel & WL_CHAN_PASSIVE) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) + band_chan_arr[index].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; +#else + band_chan_arr[index].flags |= + IEEE80211_CHAN_NO_IR; +#endif + } else if (err == BCME_UNSUPPORTED) { + dfs_radar_disabled = TRUE; + WL_ERR(("does not support per_chan_info\n")); + } + } + } + if (!update) + (*n_cnt)++; + } + + } + __wl_band_2ghz.n_channels = n_2g; + __wl_band_5ghz_a.n_channels = n_5g; + kfree(pbuf); + return err; +} + +s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify) +{ + struct wiphy *wiphy; + struct net_device *dev; + u32 bandlist[3]; + u32 nband = 0; + u32 i = 0; + s32 err = 0; + s32 index = 0; + s32 nmode = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + u32 j = 0; + s32 vhtmode = 0; + s32 txstreams = 0; + s32 rxstreams = 0; + s32 ldpc_cap = 0; + s32 stbc_rx = 0; + s32 stbc_tx = 0; + s32 txbf_bfe_cap = 0; + s32 txbf_bfr_cap = 0; +#endif + bool rollback_lock = false; + s32 bw_cap = 0; + s32 cur_band = -1; + struct ieee80211_supported_band *bands[NUM_NL80211_BANDS] = {NULL, }; + + if (cfg == NULL) { + cfg = g_bcm_cfg; + mutex_lock(&cfg->usr_sync); + rollback_lock = true; + } + dev = bcmcfg_to_prmry_ndev(cfg); + + memset(bandlist, 0, sizeof(bandlist)); + err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist, + sizeof(bandlist), false); + if (unlikely(err)) { + WL_ERR(("error read bandlist (%d)\n", err)); + goto end_bands; + } + err = wldev_ioctl(dev, WLC_GET_BAND, &cur_band, + sizeof(s32), false); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + goto end_bands; + } + + err = wldev_iovar_getint(dev, "nmode", &nmode); + if (unlikely(err)) { + WL_ERR(("error reading nmode (%d)\n", err)); + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + err = wldev_iovar_getint(dev, "vhtmode", &vhtmode); + if (unlikely(err)) { + WL_ERR(("error reading vhtmode (%d)\n", err)); + } + + if (vhtmode) { + err = wldev_iovar_getint(dev, "txstreams", &txstreams); + if (unlikely(err)) { + WL_ERR(("error reading txstreams (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "rxstreams", &rxstreams); + if (unlikely(err)) { + WL_ERR(("error reading rxstreams (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap); + if (unlikely(err)) { + WL_ERR(("error reading ldpc_cap (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx); + if (unlikely(err)) { + WL_ERR(("error reading stbc_rx (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx); + if (unlikely(err)) { + WL_ERR(("error reading stbc_tx (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap); + if (unlikely(err)) { + WL_ERR(("error reading txbf_bfe_cap (%d)\n", err)); + } + + err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap); + if (unlikely(err)) { + WL_ERR(("error reading txbf_bfr_cap (%d)\n", err)); + } + } +#endif + + /* For nmode and vhtmode check bw cap */ + if (nmode || +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + vhtmode || +#endif + 0) { + err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap); + if (unlikely(err)) { + WL_ERR(("error get mimo_bw_cap (%d)\n", err)); + } + } + + err = wl_construct_reginfo(cfg, bw_cap); + if (err) { + WL_ERR(("wl_construct_reginfo() fails err=%d\n", err)); + if (err != BCME_UNSUPPORTED) + goto end_bands; + err = 0; + } + wiphy = bcmcfg_to_wiphy(cfg); + nband = bandlist[0]; + + for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) { + index = -1; + if (bandlist[i] == WLC_BAND_5G && __wl_band_5ghz_a.n_channels > 0) { + bands[NL80211_BAND_5GHZ ] = + &__wl_band_5ghz_a; + index = NL80211_BAND_5GHZ ; + if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G)) + bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + /* VHT capabilities. */ + if (vhtmode) { + /* Supported */ + bands[index]->vht_cap.vht_supported = TRUE; + + for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) { + /* TX stream rates. */ + if (j <= txstreams) { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9, + bands[index]->vht_cap.vht_mcs.tx_mcs_map); + } else { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE, + bands[index]->vht_cap.vht_mcs.tx_mcs_map); + } + + /* RX stream rates. */ + if (j <= rxstreams) { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9, + bands[index]->vht_cap.vht_mcs.rx_mcs_map); + } else { + VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE, + bands[index]->vht_cap.vht_mcs.rx_mcs_map); + } + } + + + /* Capabilities */ + /* 80 MHz is mandatory */ + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_80; + + if (WL_BW_CAP_160MHZ(bw_cap)) { + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_160; + } + + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + + if (ldpc_cap) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_RXLDPC; + + if (stbc_tx) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_TXSTBC; + + if (stbc_rx) + bands[index]->vht_cap.cap |= + (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT); + + if (txbf_bfe_cap) + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + + if (txbf_bfr_cap) { + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + } + + if (txbf_bfe_cap || txbf_bfr_cap) { + bands[index]->vht_cap.cap |= + (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT); + bands[index]->vht_cap.cap |= + ((txstreams - 1) << + VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT); + bands[index]->vht_cap.cap |= + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB; + } + + /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */ + bands[index]->vht_cap.cap |= + (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT); + WL_INFORM(("%s band[%d] vht_enab=%d vht_cap=%08x " + "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n", + __FUNCTION__, index, + bands[index]->vht_cap.vht_supported, + bands[index]->vht_cap.cap, + bands[index]->vht_cap.vht_mcs.rx_mcs_map, + bands[index]->vht_cap.vht_mcs.tx_mcs_map)); + } +#endif + } + else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) { + bands[NL80211_BAND_2GHZ ] = + &__wl_band_2ghz; + index = NL80211_BAND_2GHZ ; + if (bw_cap == WLC_N_BW_40ALL) + bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + } + + if ((index >= 0) && nmode) { + bands[index]->ht_cap.cap |= + (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40); + bands[index]->ht_cap.ht_supported = TRUE; + bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + /* An HT shall support all EQM rates for one spatial stream */ + bands[index]->ht_cap.mcs.rx_mask[0] = 0xff; + } + + } + + wiphy->bands[NL80211_BAND_2GHZ ] = bands[NL80211_BAND_2GHZ ]; + wiphy->bands[NL80211_BAND_5GHZ ] = bands[NL80211_BAND_5GHZ ]; + + /* check if any bands populated otherwise makes 2Ghz as default */ + if (wiphy->bands[NL80211_BAND_2GHZ ] == NULL && + wiphy->bands[NL80211_BAND_5GHZ ] == NULL) { + /* Setup 2Ghz band as default */ + wiphy->bands[NL80211_BAND_2GHZ ] = &__wl_band_2ghz; + } + + if (notify) + wiphy_apply_custom_regulatory(wiphy, &brcm_regdom); + + end_bands: + if (rollback_lock) + mutex_unlock(&cfg->usr_sync); + return err; +} + +static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct wireless_dev *wdev = ndev->ieee80211_ptr; + + WL_DBG(("In\n")); + + err = dhd_config_dongle(cfg); + if (unlikely(err)) + return err; + + err = wl_config_ifmode(cfg, ndev, wdev->iftype); + if (unlikely(err && err != -EINPROGRESS)) { + WL_ERR(("wl_config_ifmode failed\n")); + if (err == -1) { + WL_ERR(("return error %d\n", err)); + return err; + } + } + err = wl_update_wiphybands(cfg, true); + if (unlikely(err)) { + WL_ERR(("wl_update_wiphybands failed\n")); + if (err == -1) { + WL_ERR(("return error %d\n", err)); + return err; + } + } + + err = wl_create_event_handler(cfg); + if (err) { + WL_ERR(("wl_create_event_handler failed\n")); + return err; + } + wl_init_event_handler(cfg); + + err = wl_init_scan(cfg); + if (err) { + WL_ERR(("wl_init_scan failed\n")); + return err; + } +#ifdef DHD_LOSSLESS_ROAMING + if (timer_pending(&cfg->roam_timeout)) { + del_timer_sync(&cfg->roam_timeout); + } +#endif /* DHD_LOSSLESS_ROAMING */ + + err = dhd_monitor_init(cfg->pub); + + wl_set_drv_status(cfg, READY, ndev); + return err; +} + +static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + unsigned long flags; + struct net_info *iter, *next; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct cfg80211_scan_info info = {}; +#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF) + struct net_device *p2p_net = cfg->p2p_net; +#endif +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif +#endif /* PROP_TXSTATUS_VSDB */ + WL_DBG(("In\n")); + /* Delete pm_enable_work */ + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); + +#ifdef WL_NAN + wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, 0, NULL); +#endif /* WL_NAN */ + + if (cfg->p2p_supported) { + wl_clr_p2p_status(cfg, GO_NEG_PHASE); +#ifdef PROP_TXSTATUS_VSDB +#if defined(BCMSDIO) + if (wl_cfgp2p_vif_created(cfg)) { + bool enabled = false; + dhd_wlfc_get_enable(dhd, &enabled); + if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE && + dhd->op_mode != DHD_FLAG_IBSS_MODE) { + dhd_wlfc_deinit(dhd); + cfg->wlfc_on = false; + } + } +#endif +#endif /* PROP_TXSTATUS_VSDB */ + } + + + /* If primary BSS is operational (for e.g SoftAP), bring it down */ + if (wl_cfgp2p_bss_isup(ndev, 0)) { + if (wl_cfgp2p_bss(cfg, ndev, 0, 0) < 0) + WL_ERR(("BSS down failed \n")); + } + + /* Check if cfg80211 interface is already down */ + if (!wl_get_drv_status(cfg, READY, ndev)) + return err; /* it is even not ready */ + + /* clear all the security setting on primary Interface */ + wl_cfg80211_clear_security(cfg); + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + if (iter->ndev) /* p2p discovery iface is null */ + wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + +#ifdef P2P_LISTEN_OFFLOADING + wl_cfg80211_p2plo_deinit(cfg); +#endif /* P2P_LISTEN_OFFLOADING */ + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + if (cfg->scan_request) { + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev ptr could be null */ + if (iter->ndev == NULL) + continue; + wl_clr_drv_status(cfg, READY, iter->ndev); + wl_clr_drv_status(cfg, SCANNING, iter->ndev); + wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev); + wl_clr_drv_status(cfg, CONNECTING, iter->ndev); + wl_clr_drv_status(cfg, CONNECTED, iter->ndev); + wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev); + wl_clr_drv_status(cfg, AP_CREATED, iter->ndev); + wl_clr_drv_status(cfg, AP_CREATING, iter->ndev); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype = + NL80211_IFTYPE_STATION; +#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF) + if (p2p_net) + dev_close(p2p_net); +#endif + + /* Avoid deadlock from wl_cfg80211_down */ + mutex_unlock(&cfg->usr_sync); + wl_destroy_event_handler(cfg); + mutex_lock(&cfg->usr_sync); + wl_flush_eq(cfg); + wl_link_down(cfg); + if (cfg->p2p_supported) { + if (timer_pending(&cfg->p2p->listen_timer)) + del_timer_sync(&cfg->p2p->listen_timer); + wl_cfgp2p_down(cfg); + } + + if (timer_pending(&cfg->scan_timeout)) { + del_timer_sync(&cfg->scan_timeout); + } + + DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub)); + + dhd_monitor_uninit(); +#ifdef WLAIBSS_MCHAN + bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev); +#endif /* WLAIBSS_MCHAN */ + +#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) + /* Clean up if not removed already */ + if (cfg->bss_cfgdev) + wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev); +#endif /* defined (WL_VIRTUAL_APSTA) || defined (DUAL_STA_STATIC_IF) */ + +#ifdef WL11U + /* Clear interworking element. */ + if (cfg->wl11u) { + cfg->wl11u = FALSE; + cfg->iw_ie_len = 0; + memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN); + } +#endif /* WL11U */ + +#ifdef CUSTOMER_HW4_DEBUG + if (wl_scan_timeout_dbg_enabled) + wl_scan_timeout_dbg_clear(); +#endif /* CUSTOMER_HW4_DEBUG */ + + cfg->disable_roam_event = false; + + DNGL_FUNC(dhd_cfg80211_down, (cfg)); + +#ifdef DHD_IFDEBUG + /* Printout all netinfo entries */ + wl_probe_wdev_all(cfg); +#endif /* DHD_IFDEBUG */ + + return err; +} + +s32 wl_cfg80211_up(void *para) +{ + struct bcm_cfg80211 *cfg; + s32 err = 0; + int val = 1; + dhd_pub_t *dhd; +#ifdef DISABLE_PM_BCNRX + s32 interr = 0; + uint param = 0; + s8 iovbuf[WLC_IOCTL_SMLEN]; +#endif /* DISABLE_PM_BCNRX */ + + (void)para; + WL_DBG(("In\n")); + cfg = g_bcm_cfg; + + if ((err = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val, + sizeof(int), false) < 0)) { + WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err)); + return err; + } + val = dtoh32(val); + if (val != WLC_IOCTL_VERSION && val != 1) { + WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n", + val, WLC_IOCTL_VERSION)); + return BCME_VERSION; + } + ioctl_version = val; + WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version)); + + mutex_lock(&cfg->usr_sync); + dhd = (dhd_pub_t *)(cfg->pub); + if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) { + err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg)); + if (unlikely(err)) { + mutex_unlock(&cfg->usr_sync); + return err; + } + } + err = __wl_cfg80211_up(cfg); + if (unlikely(err)) + WL_ERR(("__wl_cfg80211_up failed\n")); + + + + /* IOVAR configurations with 'up' condition */ +#ifdef DISABLE_PM_BCNRX + bcm_mkiovar("pm_bcnrx", (char *)¶m, 4, iovbuf, sizeof(iovbuf)); + interr = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_SET_VAR, iovbuf, sizeof(iovbuf), true); + if (unlikely(interr)) + WL_ERR(("Set pm_bcnrx returned (%d)\n", interr)); +#endif /* DISABLE_PM_BCNRX */ + + mutex_unlock(&cfg->usr_sync); + +#ifdef WLAIBSS_MCHAN + bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME); +#endif /* WLAIBSS_MCHAN */ + +#ifdef DUAL_STA_STATIC_IF +#ifdef WL_VIRTUAL_APSTA +#error "Both DUAL STA and DUAL_STA_STATIC_IF can't be enabled together" +#endif + /* Static Interface support is currently supported only for STA only builds (without P2P) */ + wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d"); +#endif /* DUAL_STA_STATIC_IF */ + + return err; +} + +/* Private Event to Supplicant with indication that chip hangs */ +int wl_cfg80211_hang(struct net_device *dev, u16 reason) +{ + struct bcm_cfg80211 *cfg; + dhd_pub_t *dhd; +#if defined(SOFTAP_SEND_HANGEVT) + /* specifc mac address used for hang event */ + uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11}; +#endif /* SOFTAP_SEND_HANGEVT */ + if (!g_bcm_cfg) { + return BCME_ERROR; + } + + cfg = g_bcm_cfg; + dhd = (dhd_pub_t *)(cfg->pub); + +#ifdef DHD_USE_EXTENDED_HANG_REASON + if (dhd->hang_reason != 0) { + reason = dhd->hang_reason; + } +#endif /* DHD_USE_EXTENDED_HANG_REASON */ + + WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason))); + wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL); +#if defined(SOFTAP_SEND_HANGEVT) + if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) { + cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC); + } else +#endif /* SOFTAP_SEND_HANGEVT */ + { + CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL); + } + if (cfg != NULL) { + wl_link_down(cfg); + } + return 0; +} + +s32 wl_cfg80211_down(void *para) +{ + struct bcm_cfg80211 *cfg; + s32 err = 0; + + (void)para; + WL_DBG(("In\n")); + cfg = g_bcm_cfg; + mutex_lock(&cfg->usr_sync); + err = __wl_cfg80211_down(cfg); + mutex_unlock(&cfg->usr_sync); + + return err; +} + +static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item) +{ + unsigned long flags; + void *rptr = NULL; + struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); + + if (!profile) + return NULL; + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + switch (item) { + case WL_PROF_SEC: + rptr = &profile->sec; + break; + case WL_PROF_ACT: + rptr = &profile->active; + break; + case WL_PROF_BSSID: + rptr = profile->bssid; + break; + case WL_PROF_SSID: + rptr = &profile->ssid; + break; + case WL_PROF_CHAN: + rptr = &profile->channel; + break; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + if (!rptr) + WL_ERR(("invalid item (%d)\n", item)); + return rptr; +} + +static s32 +wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const wl_event_msg_t *e, const void *data, s32 item) +{ + s32 err = 0; + const struct wlc_ssid *ssid; + unsigned long flags; + struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev); + + if (!profile) + return WL_INVALID; + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); + switch (item) { + case WL_PROF_SSID: + ssid = (const wlc_ssid_t *) data; + memset(profile->ssid.SSID, 0, + sizeof(profile->ssid.SSID)); + memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len); + profile->ssid.SSID_len = ssid->SSID_len; + break; + case WL_PROF_BSSID: + if (data) + memcpy(profile->bssid, data, ETHER_ADDR_LEN); + else + memset(profile->bssid, 0, ETHER_ADDR_LEN); + break; + case WL_PROF_SEC: + memcpy(&profile->sec, data, sizeof(profile->sec)); + break; + case WL_PROF_ACT: + profile->active = *(const bool *)data; + break; + case WL_PROF_BEACONINT: + profile->beacon_interval = *(const u16 *)data; + break; + case WL_PROF_DTIMPERIOD: + profile->dtim_period = *(const u8 *)data; + break; + case WL_PROF_CHAN: + profile->channel = *(const u32*)data; + break; + default: + err = -EOPNOTSUPP; + break; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + if (err == -EOPNOTSUPP) + WL_ERR(("unsupported item (%d)\n", item)); + + return err; +} + +void wl_cfg80211_dbg_level(u32 level) +{ + /* + * prohibit to change debug level + * by insmod parameter. + * eventually debug level will be configured + * in compile time by using CONFIG_XXX + */ + /* wl_dbg_level = level; */ +} + +static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS; +} + +static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg) +{ + return cfg->ibss_starter; +} + +static void wl_rst_ie(struct bcm_cfg80211 *cfg) +{ + struct wl_ie *ie = wl_to_ie(cfg); + + ie->offset = 0; +} + +static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v) +{ + struct wl_ie *ie = wl_to_ie(cfg); + s32 err = 0; + + if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) { + WL_ERR(("ei crosses buffer boundary\n")); + return -ENOSPC; + } + ie->buf[ie->offset] = t; + ie->buf[ie->offset + 1] = l; + memcpy(&ie->buf[ie->offset + 2], v, l); + ie->offset += l + 2; + + return err; +} + +static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size, + bool roam) +{ + u8 *ssidie; + /* cfg80211_find_ie defined in kernel returning const u8 */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + if (!ssidie) + return; + if (ssidie[1] != bi->SSID_len) { + if (ssidie[1]) { + WL_ERR(("%s: Wrong SSID len: %d != %d\n", + __FUNCTION__, ssidie[1], bi->SSID_len)); + } + if (roam) { + WL_ERR(("Changing the SSID Info.\n")); + memmove(ssidie + bi->SSID_len + 2, + (ssidie + 2) + ssidie[1], + *ie_size - (ssidie + 2 + ssidie[1] - ie_stream)); + memcpy(ssidie + 2, bi->SSID, bi->SSID_len); + *ie_size = *ie_size + bi->SSID_len - ssidie[1]; + ssidie[1] = bi->SSID_len; + } + return; + } + if (*(ssidie + 2) == '\0') + memcpy(ssidie + 2, bi->SSID, bi->SSID_len); + return; +} + +static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size) +{ + struct wl_ie *ie = wl_to_ie(cfg); + s32 err = 0; + + if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) { + WL_ERR(("ei_stream crosses buffer boundary\n")); + return -ENOSPC; + } + memcpy(&ie->buf[ie->offset], ie_stream, ie_size); + ie->offset += ie_size; + + return err; +} + +static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size) +{ + struct wl_ie *ie = wl_to_ie(cfg); + s32 err = 0; + + if (unlikely(ie->offset > dst_size)) { + WL_ERR(("dst_size is not enough\n")); + return -ENOSPC; + } + memcpy(dst, &ie->buf[0], ie->offset); + + return err; +} + +static u32 wl_get_ielen(struct bcm_cfg80211 *cfg) +{ + struct wl_ie *ie = wl_to_ie(cfg); + + return ie->offset; +} + +static void wl_link_up(struct bcm_cfg80211 *cfg) +{ + cfg->link_up = true; +} + +static void wl_link_down(struct bcm_cfg80211 *cfg) +{ + struct wl_connect_info *conn_info = wl_to_conn(cfg); + + WL_DBG(("In\n")); + cfg->link_up = false; + conn_info->req_ie_len = 0; + conn_info->resp_ie_len = 0; +} + +static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg) +{ + unsigned long flags; + + spin_lock_irqsave(&cfg->eq_lock, flags); + return flags; +} + +static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags) +{ + spin_unlock_irqrestore(&cfg->eq_lock, flags); +} + +static void wl_init_eq_lock(struct bcm_cfg80211 *cfg) +{ + spin_lock_init(&cfg->eq_lock); +} + +static void wl_delay(u32 ms) +{ + if (in_atomic() || (ms < jiffies_to_msecs(1))) { + OSL_DELAY(ms*1000); + } else { + OSL_SLEEP(ms); + } +} + +s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct ether_addr primary_mac; + if (!cfg->p2p) + return -1; + if (!p2p_is_on(cfg)) { + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + } else { + memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet, + ETHER_ADDR_LEN); + } + + return 0; +} +s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len); +} + +s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len); +} + +s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len); +} + +s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len) +{ + struct bcm_cfg80211 *cfg; + cfg = g_bcm_cfg; + + return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len); +} + +#ifdef P2PLISTEN_AP_SAMECHN +s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable) +{ + s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable); + + if ((ret == 0) && enable) { + /* disable PM for p2p responding on infra AP channel */ + s32 pm = PM_OFF; + + ret = wldev_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), true); + } + + return ret; +} +#endif /* P2PLISTEN_AP_SAMECHN */ + +s32 wl_cfg80211_channel_to_freq(u32 channel) +{ + int freq = 0; + +#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) + freq = ieee80211_channel_to_frequency(channel); +#else + { + u16 band = 0; + if (channel <= CH_MAX_2G_CHANNEL) + band = NL80211_BAND_2GHZ ; + else + band = NL80211_BAND_5GHZ ; + freq = ieee80211_channel_to_frequency(channel, band); + } +#endif + return freq; +} + + +#ifdef WLTDLS +static s32 +wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) { + + struct net_device *ndev = NULL; + u32 reason = ntoh32(e->reason); + s8 *msg = NULL; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + switch (reason) { + case WLC_E_TDLS_PEER_DISCOVERED : + msg = " TDLS PEER DISCOVERD "; + break; + case WLC_E_TDLS_PEER_CONNECTED : +#ifdef PCIE_FULL_DONGLE + dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]); +#endif /* PCIE_FULL_DONGLE */ + if (cfg->tdls_mgmt_frame) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + 0); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + 0, GFP_ATOMIC); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \ + defined(WL_COMPAT_WIRELESS) + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + GFP_ATOMIC); +#else + cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, + cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, + GFP_ATOMIC); +#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */ + } + msg = " TDLS PEER CONNECTED "; + break; + case WLC_E_TDLS_PEER_DISCONNECTED : +#ifdef PCIE_FULL_DONGLE + dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]); +#endif /* PCIE_FULL_DONGLE */ + if (cfg->tdls_mgmt_frame) { + kfree(cfg->tdls_mgmt_frame); + cfg->tdls_mgmt_frame = NULL; + cfg->tdls_mgmt_freq = 0; + } + msg = "TDLS PEER DISCONNECTED "; + break; + } + if (msg) { + WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)), + (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary")); + } + return 0; + +} +#endif /* WLTDLS */ + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) +static s32 +#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \ + KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, const u8 *data, size_t len) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, const u8 *data, size_t len) +#else +wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data, + size_t len) +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ +{ + s32 ret = 0; +#ifdef WLTDLS + struct bcm_cfg80211 *cfg; + tdls_wfd_ie_iovar_t info; + memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t)); + cfg = g_bcm_cfg; + +#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2) + /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10 + * and that cuases build error + */ + BCM_REFERENCE(peer_capability); +#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */ + + switch (action_code) { + /* We need to set TDLS Wifi Display IE to firmware + * using tdls_wfd_ie iovar + */ + case WLAN_TDLS_SET_PROBE_WFD_IE: + WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__)); + info.mode = TDLS_WFD_PROBE_IE_TX; + memcpy(&info.data, data, len); + info.length = len; + break; + case WLAN_TDLS_SET_SETUP_WFD_IE: + WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__)); + info.mode = TDLS_WFD_IE_TX; + memcpy(&info.data, data, len); + info.length = len; + break; + case WLAN_TDLS_SET_WFD_ENABLED: + WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__)); + dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true); + goto out; + case WLAN_TDLS_SET_WFD_DISABLED: + WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__)); + dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false); + goto out; + default: + WL_ERR(("Unsupported action code : %d\n", action_code)); + goto out; + } + + ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret) { + WL_ERR(("tdls_wfd_ie error %d\n", ret)); + } +out: +#endif /* WLTDLS */ + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) +static s32 +wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, enum nl80211_tdls_operation oper) +#else +static s32 +wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, + u8 *peer, enum nl80211_tdls_operation oper) +#endif +{ + s32 ret = 0; +#ifdef WLTDLS + struct bcm_cfg80211 *cfg; + tdls_iovar_t info; + dhd_pub_t *dhdp; + bool tdls_auto_mode = false; + cfg = g_bcm_cfg; + dhdp = (dhd_pub_t *)(cfg->pub); + memset(&info, 0, sizeof(tdls_iovar_t)); + if (peer) { + memcpy(&info.ea, peer, ETHER_ADDR_LEN); + } else { + return -1; + } + switch (oper) { + case NL80211_TDLS_DISCOVERY_REQ: + /* If the discovery request is broadcast then we need to set + * info.mode to Tunneled Probe Request + */ + if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) { + info.mode = TDLS_MANUAL_EP_WFD_TPQ; + WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__)); + } else { + info.mode = TDLS_MANUAL_EP_DISCOVERY; + } + break; + case NL80211_TDLS_SETUP: + if (dhdp->tdls_mode == true) { + info.mode = TDLS_MANUAL_EP_CREATE; + tdls_auto_mode = false; + ret = dhd_tdls_enable(dev, false, tdls_auto_mode, NULL); + if (ret < 0) { + return ret; + } + } else { + tdls_auto_mode = true; + } + break; + case NL80211_TDLS_TEARDOWN: + info.mode = TDLS_MANUAL_EP_DELETE; + break; + default: + WL_ERR(("Unsupported operation : %d\n", oper)); + goto out; + } + /* turn on TDLS */ + ret = dhd_tdls_enable(dev, true, tdls_auto_mode, NULL); + if (ret < 0) { + return ret; + } + if (info.mode) { + ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret) { + WL_ERR(("tdls_endpoint error %d\n", ret)); + } + } +out: +#endif /* WLTDLS */ + return ret; +} +#endif + +s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len, + enum wl_management_type type) +{ + struct bcm_cfg80211 *cfg; + s32 ret = 0; + struct ether_addr primary_mac; + s32 bssidx = 0; + s32 pktflag = 0; + cfg = g_bcm_cfg; + + if (wl_get_drv_status(cfg, AP_CREATING, ndev)) { + /* Vendor IEs should be set to FW + * after SoftAP interface is brought up + */ + WL_DBG(("Skipping set IE since AP is not up \n")); + goto exit; + } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) { + /* Either stand alone AP case or P2P discovery */ + if (wl_get_drv_status(cfg, AP_CREATED, ndev)) { + /* Stand alone AP case on primary interface */ + WL_DBG(("Apply IEs for Primary AP Interface \n")); + bssidx = 0; + } else { + /* P2P Discovery case (p2p listen) */ + if (!cfg->p2p->on) { + /* Turn on Discovery interface */ + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + p2p_on(cfg) = true; + ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0); + if (unlikely(ret)) { + WL_ERR(("Enable discovery failed \n")); + goto exit; + } + } + WL_DBG(("Apply IEs for P2P Discovery Iface \n")); + ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } + } else { + /* Virtual AP/ P2P Group Interface */ + WL_DBG(("Apply IEs for iface:%s\n", ndev->name)); + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + } + + if (ndev != NULL) { + switch (type) { + case WL_BEACON: + pktflag = VNDR_IE_BEACON_FLAG; + break; + case WL_PROBE_RESP: + pktflag = VNDR_IE_PRBRSP_FLAG; + break; + case WL_ASSOC_RESP: + pktflag = VNDR_IE_ASSOCRSP_FLAG; + break; + } + if (pktflag) { + ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, + ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len); + } + } +exit: + return ret; +} + +#ifdef WL_SUPPORT_AUTO_CHANNEL +static s32 +wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev) +{ + u32 val = 0; + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wiphy *wiphy; + /* Disable mpc, to avoid automatic interface down. */ + val = 0; + + wiphy = bcmcfg_to_wiphy(cfg); + if (wl_check_dongle_idle(wiphy) != TRUE) { + WL_ERR(("FW is busy to add interface")); + return ret; + } + ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + goto done; + } + + /* Set interface up, explicitly. */ + val = 1; + + ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true); + if (ret < 0) { + WL_ERR(("set interface up failed, error = %d\n", ret)); + goto done; + } + + /* Stop all scan explicitly, till auto channel selection complete. */ + wl_set_drv_status(cfg, SCANNING, ndev); + if (cfg->escan_info.ndev == NULL) { + ret = BCME_OK; + goto done; + } + ret = wl_notify_escan_complete(cfg, ndev, true, true); + if (ret < 0) { + WL_ERR(("set scan abort failed, error = %d\n", ret)); + goto done; + } + +done: + return ret; +} + +static bool +wl_cfg80211_valid_channel_p2p(int channel) +{ + bool valid = false; + + /* channel 1 to 14 */ + if ((channel >= 1) && (channel <= 14)) { + valid = true; + } + /* channel 36 to 48 */ + else if ((channel >= 36) && (channel <= 48)) { + valid = true; + } + /* channel 149 to 161 */ + else if ((channel >= 149) && (channel <= 161)) { + valid = true; + } + else { + valid = false; + WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel)); + } + + return valid; +} + +s32 +wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen) +{ + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = NULL; + wl_uint32_list_t *list = NULL; + chanspec_t chanspec = 0; + + memset(buf, 0, buflen); + + cfg = g_bcm_cfg; + list = (wl_uint32_list_t *)buf; + list->count = htod32(WL_NUMCHANSPECS); + + /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */ + chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE); + chanspec = wl_chspec_host_to_driver(chanspec); + + ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, + sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); + } + + return ret; +} + +s32 +wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen) +{ + u32 channel = 0; + s32 ret = BCME_ERROR; + s32 i = 0; + s32 j = 0; + struct bcm_cfg80211 *cfg = NULL; + wl_uint32_list_t *list = NULL; + chanspec_t chanspec = 0; + + memset(buf, 0, buflen); + + cfg = g_bcm_cfg; + list = (wl_uint32_list_t *)buf; + list->count = htod32(WL_NUMCHANSPECS); + + /* Restrict channels to 5GHz, 20MHz BW, no SB. */ + chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE); + chanspec = wl_chspec_host_to_driver(chanspec); + + ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec, + sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("get 'chanspecs' failed, error = %d\n", ret)); + goto done; + } + + /* Skip DFS and inavlid P2P channel. */ + for (i = 0, j = 0; i < dtoh32(list->count); i++) { + chanspec = (chanspec_t) dtoh32(list->element[i]); + channel = CHSPEC_CHANNEL(chanspec); + + ret = wldev_iovar_getint(ndev, "per_chan_info", &channel); + if (ret < 0) { + WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret)); + goto done; + } + + if (CHANNEL_IS_RADAR(channel) || + !(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) { + continue; + } else { + list->element[j] = list->element[i]; + } + + j++; + } + + list->count = j; + +done: + return ret; +} + +static s32 +wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen, + int *channel) +{ + s32 ret = BCME_ERROR; + int chosen = 0; + int retry = 0; + + /* Start auto channel selection scan. */ + ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true); + if (ret < 0) { + WL_ERR(("can't start auto channel scan, error = %d\n", ret)); + *channel = 0; + goto done; + } + + /* Wait for auto channel selection, worst case possible delay is 5250ms. */ + retry = CHAN_SEL_RETRY_COUNT; + + while (retry--) { + OSL_SLEEP(CHAN_SEL_IOCTL_DELAY); + + ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen), + false); + if ((ret == 0) && (dtoh32(chosen) != 0)) { + *channel = (u16)(chosen & 0x00FF); + WL_INFORM(("selected channel = %d\n", *channel)); + break; + } + WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n", + (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen))); + } + + if (retry <= 0) { + WL_ERR(("failure, auto channel selection timed out\n")); + *channel = 0; + ret = BCME_ERROR; + } + +done: + return ret; +} + +static s32 +wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev) +{ + u32 val = 0; + s32 ret = BCME_ERROR; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + /* Clear scan stop driver status. */ + wl_clr_drv_status(cfg, SCANNING, ndev); + + /* Enable mpc back to 1, irrespective of initial state. */ + val = 1; + + ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val, + sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, + &cfg->ioctl_buf_sync); + if (ret < 0) { + WL_ERR(("set 'mpc' failed, error = %d\n", ret)); + } + + return ret; +} + +s32 +wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len) +{ + int channel = 0; + s32 ret = BCME_ERROR; + u8 *buf = NULL; + char *pos = cmd; + struct bcm_cfg80211 *cfg = NULL; + struct net_device *ndev = NULL; + + memset(cmd, 0, total_len); + + buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) { + WL_ERR(("failed to allocate chanspec buffer\n")); + return -ENOMEM; + } + + /* + * Always use primary interface, irrespective of interface on which + * command came. + */ + cfg = g_bcm_cfg; + ndev = bcmcfg_to_prmry_ndev(cfg); + + /* + * Make sure that FW and driver are in right state to do auto channel + * selection scan. + */ + ret = wl_cfg80211_set_auto_channel_scan_state(ndev); + if (ret < 0) { + WL_ERR(("can't set auto channel scan state, error = %d\n", ret)); + goto done; + } + + /* Best channel selection in 2.4GHz band. */ + ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); + if (ret < 0) { + WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret)); + goto done; + } + + ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, + &channel); + if (ret < 0) { + WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret)); + goto done; + } + + if (CHANNEL_IS_2G(channel)) { + channel = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ ); + } else { + WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel)); + channel = 0; + } + + pos += snprintf(pos, total_len, "%04d ", channel); + + /* Best channel selection in 5GHz band. */ + ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE); + if (ret < 0) { + WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret)); + goto done; + } + + ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE, + &channel); + if (ret < 0) { + WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret)); + goto done; + } + + if (CHANNEL_IS_5G(channel)) { + channel = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ ); + } else { + WL_ERR(("invalid 5GHz channel, channel = %d\n", channel)); + channel = 0; + } + + pos += snprintf(pos, total_len, "%04d ", channel); + + /* Set overall best channel same as 5GHz best channel. */ + pos += snprintf(pos, total_len, "%04d ", channel); + +done: + if (NULL != buf) { + kfree(buf); + } + + /* Restore FW and driver back to normal state. */ + ret = wl_cfg80211_restore_auto_channel_scan_state(ndev); + if (ret < 0) { + WL_ERR(("can't restore auto channel scan state, error = %d\n", ret)); + } + + return (pos - cmd); +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +static const struct rfkill_ops wl_rfkill_ops = { + .set_block = wl_rfkill_set +}; + +static int wl_rfkill_set(void *data, bool blocked) +{ + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data; + + WL_DBG(("Enter \n")); + WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked")); + + if (!cfg) + return -EINVAL; + + cfg->rf_blocked = blocked; + + return 0; +} + +static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup) +{ + s32 err = 0; + + WL_DBG(("Enter \n")); + if (!cfg) + return -EINVAL; + if (setup) { + cfg->rfkill = rfkill_alloc("brcmfmac-wifi", + wl_cfg80211_get_parent_dev(), + RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg); + + if (!cfg->rfkill) { + err = -ENOMEM; + goto err_out; + } + + err = rfkill_register(cfg->rfkill); + + if (err) + rfkill_destroy(cfg->rfkill); + } else { + if (!cfg->rfkill) { + err = -ENOMEM; + goto err_out; + } + + rfkill_unregister(cfg->rfkill); + rfkill_destroy(cfg->rfkill); + } + +err_out: + return err; +} + +#ifdef DEBUGFS_CFG80211 +/** +* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level +* to turn on SCAN and DBG log. +* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level +* To see current setting of debug level, +* cat /sys/kernel/debug/dhd/debug_level +*/ +static ssize_t +wl_debuglevel_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL]; + char *params, *token, *colon; + uint i, tokens, log_on = 0; + memset(tbuf, 0, sizeof(tbuf)); + memset(sublog, 0, sizeof(sublog)); + if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count))) + return -EFAULT; + + params = &tbuf[0]; + colon = strchr(params, '\n'); + if (colon != NULL) + *colon = '\0'; + while ((token = strsep(¶ms, " ")) != NULL) { + memset(sublog, 0, sizeof(sublog)); + if (token == NULL || !*token) + break; + if (*token == '\0') + continue; + colon = strchr(token, ':'); + if (colon != NULL) { + *colon = ' '; + } + tokens = sscanf(token, "%s %u", sublog, &log_on); + if (colon != NULL) + *colon = ':'; + + if (tokens == 2) { + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + if (!strncmp(sublog, sublogname_map[i].sublogname, + strlen(sublogname_map[i].sublogname))) { + if (log_on) + wl_dbg_level |= + (sublogname_map[i].log_level); + else + wl_dbg_level &= + ~(sublogname_map[i].log_level); + } + } + } else + WL_ERR(("%s: can't parse '%s' as a " + "SUBMODULE:LEVEL (%d tokens)\n", + tbuf, token, tokens)); + + + } + return count; +} + +static ssize_t +wl_debuglevel_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *param; + char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)]; + uint i; + memset(tbuf, 0, sizeof(tbuf)); + param = &tbuf[0]; + for (i = 0; i < ARRAYSIZE(sublogname_map); i++) { + param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ", + sublogname_map[i].sublogname, + (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0); + } + *param = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0])); + +} +static const struct file_operations fops_debuglevel = { + .open = NULL, + .write = wl_debuglevel_write, + .read = wl_debuglevel_read, + .owner = THIS_MODULE, + .llseek = NULL, +}; + +static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg) +{ + s32 err = 0; + struct dentry *_dentry; + if (!cfg) + return -EINVAL; + cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cfg->debugfs || IS_ERR(cfg->debugfs)) { + if (cfg->debugfs == ERR_PTR(-ENODEV)) + WL_ERR(("Debugfs is not enabled on this kernel\n")); + else + WL_ERR(("Can not create debugfs directory\n")); + cfg->debugfs = NULL; + goto exit; + + } + _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR, + cfg->debugfs, cfg, &fops_debuglevel); + if (!_dentry || IS_ERR(_dentry)) { + WL_ERR(("failed to create debug_level debug file\n")); + wl_free_debugfs(cfg); + } +exit: + return err; +} +static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg) +{ + if (!cfg) + return -EINVAL; + if (cfg->debugfs) + debugfs_remove_recursive(cfg->debugfs); + cfg->debugfs = NULL; + return 0; +} +#endif /* DEBUGFS_CFG80211 */ + +struct device *wl_cfg80211_get_parent_dev(void) +{ + return cfg80211_parent_dev; +} + +void wl_cfg80211_set_parent_dev(void *dev) +{ + cfg80211_parent_dev = dev; +} + +static void wl_cfg80211_clear_parent_dev(void) +{ + cfg80211_parent_dev = NULL; +} + +void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN); +} +static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role) +{ + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (((dev_role == NL80211_IFTYPE_AP) && + !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) || + ((dev_role == NL80211_IFTYPE_P2P_GO) && + !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE))) + { + WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode)); + return false; + } + return true; +} + +int wl_cfg80211_do_driver_init(struct net_device *net) +{ + struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net); + + if (!cfg || !cfg->wdev) + return -EINVAL; + + if (dhd_do_driver_init(cfg->wdev->netdev) < 0) + return -1; + + return 0; +} + +void wl_cfg80211_enable_trace(bool set, u32 level) +{ + if (set) + wl_dbg_level = level & WL_DBG_LEVEL; + else + wl_dbg_level |= (WL_DBG_LEVEL & level); +} +#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \ + 2, 0)) +static s32 +wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + bcm_struct_cfgdev *cfgdev, u64 cookie) +{ + /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION + * is passed with CMD_FRAME. This callback is supposed to cancel + * the OFFCHANNEL Wait. Since we are already taking care of that + * with the tx_mgmt logic, do nothing here. + */ + + return 0; +} +#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */ + +#ifdef WL11U +bcm_tlv_t * +wl_cfg80211_find_interworking_ie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) { + return (bcm_tlv_t *)ie; + } + return NULL; +} + + +static s32 +wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag, + uint8 ie_id, uint8 *data, uint8 data_len) +{ + s32 err = BCME_OK; + s32 buf_len; + s32 iecount; + ie_setbuf_t *ie_setbuf; + + if (ie_id != DOT11_MNG_INTERWORKING_ID) + return BCME_UNSUPPORTED; + + /* Validate the pktflag parameter */ + if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | + VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | + VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG| + VNDR_IE_CUSTOM_FLAG))) { + WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag)); + return -1; + } + + /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */ + pktflag = htod32(pktflag); + + buf_len = sizeof(ie_setbuf_t) + data_len - 1; + ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL); + + if (!ie_setbuf) { + WL_ERR(("Error allocating buffer for IE\n")); + return -ENOMEM; + } + + if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) { + WL_ERR(("Previous IW IE is equals to current IE\n")); + err = BCME_OK; + goto exit; + } + + strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1); + ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int)); + memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32)); + + /* Now, add the IE to the buffer */ + ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id; + + /* if already set with previous values, delete it first */ + if (cfg->iw_ie_len != 0) { + WL_DBG(("Different IW_IE was already set. clear first\n")); + + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0; + + err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) + goto exit; + } + + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len; + memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len); + + err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (err == BCME_OK) { + memcpy(cfg->iw_ie, data, data_len); + cfg->iw_ie_len = data_len; + cfg->wl11u = TRUE; + + err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx); + } + +exit: + if (ie_setbuf) + kfree(ie_setbuf); + return err; +} +#endif /* WL11U */ + +s32 +wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + char ioctl_buf[50]; + int err = 0; + uint32 val = 0; + chanspec_t chanspec = 0; + int abort; + int bytes_written = 0; + wl_dfs_ap_move_status_t *status; + char chanbuf[CHANSPEC_STR_LEN]; + const char *dfs_state_str[DFS_SCAN_S_MAX] = { + "Radar Free On Channel", + "Radar Found On Channel", + "Radar Scan In Progress", + "Radar Scan Aborted", + "RSDB Mode switch in Progress For Scan" + }; + if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { + bytes_written = snprintf(command, total_len, "AP is not UP\n"); + return bytes_written; + } + if (!*data) { + if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("setting dfs_ap_move failed with err=%d \n", err)); + return err; + } + status = (wl_dfs_ap_move_status_t *)cfg->ioctl_buf; + + if (status->version != WL_DFS_AP_MOVE_VERSION) { + err = BCME_UNSUPPORTED; + WL_ERR(("err=%d version=%d\n", err, status->version)); + return err; + } + + if (status->move_status != (int8) DFS_SCAN_S_IDLE) { + chanspec = wl_chspec_driver_to_host(status->chanspec); + if (chanspec != 0 && chanspec != INVCHANSPEC) { + wf_chspec_ntoa(chanspec, chanbuf); + bytes_written = snprintf(command, total_len, + "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec); + } + bytes_written += snprintf(command + bytes_written, total_len, + "%s\n", dfs_state_str[status->move_status]); + return bytes_written; + } else { + bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n"); + return bytes_written; + } + + } + + abort = bcm_atoi(data); + if (abort == -1) { + if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort, + sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { + WL_ERR(("seting dfs_ap_move failed with err %d\n", err)); + return err; + } + } else { + chanspec = wf_chspec_aton(data); + if (chanspec != 0) { + val = wl_chspec_host_to_driver(chanspec); + if (val != INVCHANSPEC) { + if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val, + sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) { + WL_ERR(("seting dfs_ap_move failed with err %d\n", err)); + return err; + } + WL_DBG((" set dfs_ap_move successfull")); + } else { + err = BCME_USAGE_ERROR; + } + } + } + return err; +} + +s32 +wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len) +{ + uint i = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + wl_roam_prof_band_t *rp; + int err = -EINVAL, bytes_written = 0; + size_t len = strlen(data); + int rp_len = 0; + data[len] = '\0'; + rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp) + * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL); + if (unlikely(!rp)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + rp->ver = WL_MAX_ROAM_PROF_VER; + if (*data && (!strncmp(data, "b", 1))) { + rp->band = WLC_BAND_2G; + } else if (*data && (!strncmp(data, "a", 1))) { + rp->band = WLC_BAND_5G; + } else { + err = snprintf(command, total_len, "Missing band\n"); + goto exit; + } + data++; + rp->len = 0; + /* Getting roam profile from fw */ + if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting roam_profile failed with err=%d \n", err)); + goto exit; + } + memcpy(rp, cfg->ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS); + /* roam_prof version get */ + if (rp->ver != WL_MAX_ROAM_PROF_VER) { + WL_ERR(("bad version (=%d) in return data\n", rp->ver)); + err = -EINVAL; + goto exit; + } + if ((rp->len % sizeof(wl_roam_prof_t)) != 0) { + WL_ERR(("bad length (=%d) in return data\n", rp->len)); + err = -EINVAL; + goto exit; + } + + if (!*data) { + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* printing contents of roam profile data from fw and exits + * if code hits any of one of the below condtion. If remaining + * length of buffer is less than roam profile size or + * if there is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + bytes_written += snprintf(command+bytes_written, + total_len, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n", + rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower, + rp->roam_prof[i].channel_usage, + rp->roam_prof[i].cu_avg_calc_dur); + } + err = bytes_written; + goto exit; + } else { + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* reading contents of roam profile data from fw and exits + * if code hits any of one of the below condtion, If remaining + * length of buffer is less than roam profile size or if there + * is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + } + /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */ + if (i != 2) { + WL_ERR(("FW must have 2 rows to fill roam_prof\n")); + err = -EINVAL; + goto exit; + } + /* setting roam profile to fw */ + data++; + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + rp->roam_prof[i].roam_trigger = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].rssi_lower = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10); + data++; + rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10); + + rp_len += sizeof(wl_roam_prof_t); + if (*data == '\0') { + break; + } + data++; + } + if (i != 1) { + WL_ERR(("Only two roam_prof rows supported.\n")); + err = -EINVAL; + goto exit; + } + rp->len = rp_len; + if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp, + sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting roam_profile failed with err %d\n", err)); + } + } +exit: + if (rp) { + kfree(rp); + } + return err; +} + +int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data, + char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int bytes_written = 0, err = -EINVAL, argc = 0; + char rssi[5], band[5], weight[5]; + char *endptr = NULL; + wnm_bss_select_weight_cfg_t *bwcfg; + + bwcfg = kzalloc(sizeof(*bwcfg), GFP_KERNEL); + if (unlikely(!bwcfg)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION; + bwcfg->type = 0; + bwcfg->weight = 0; + + argc = sscanf(data, "%s %s %s", rssi, band, weight); + + if (!strcasecmp(rssi, "rssi")) + bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI; + else if (!strcasecmp(rssi, "cu")) + bwcfg->type = WNM_BSS_SELECT_TYPE_CU; + else { + /* Usage DRIVER WBTEXT_WEIGHT_CONFIG */ + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; + } + + if (!strcasecmp(band, "a")) + bwcfg->band = WLC_BAND_5G; + else if (!strcasecmp(band, "b")) + bwcfg->band = WLC_BAND_2G; + else if (!strcasecmp(band, "all")) + bwcfg->band = WLC_BAND_ALL; + else { + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; + } + + if (argc == 2) { + /* If there is no data after band, getting wnm_bss_select_weight from fw */ + if (bwcfg->band == WLC_BAND_ALL) { + WL_ERR(("band option \"all\" is for set only, not get\n")); + goto exit; + } + if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg, + sizeof(*bwcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err)); + goto exit; + } + memcpy(bwcfg, cfg->ioctl_buf, sizeof(*bwcfg)); + bytes_written = snprintf(command, total_len, "%s %s weight = %d\n", + (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU", + (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight); + err = bytes_written; + goto exit; + } else { + /* if weight is non integer returns command usage error */ + bwcfg->weight = simple_strtol(weight, &endptr, 0); + if (*endptr != '\0') { + WL_ERR(("%s: Command usage error", __func__)); + goto exit; + } + /* setting weight for iovar wnm_bss_select_weight to fw */ + if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg, + sizeof(*bwcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err)); + } + } +exit: + if (bwcfg) { + kfree(bwcfg); + } + return err; +} + +/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */ +#define WBTEXT_TUPLE_MIN_LEN_CHECK 5 + +int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data, + char *command, int total_len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int bytes_written = 0, err = -EINVAL; + char rssi[5], band[5]; + int btcfg_len = 0, i = 0, parsed_len = 0; + wnm_bss_select_factor_cfg_t *btcfg; + size_t slen = strlen(data); + char *start_addr = NULL; + data[slen] = '\0'; + + btcfg = kzalloc((sizeof(*btcfg) + sizeof(*btcfg) * + WL_FACTOR_TABLE_MAX_LIMIT), GFP_KERNEL); + if (unlikely(!btcfg)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION; + btcfg->band = WLC_BAND_AUTO; + btcfg->type = 0; + btcfg->count = 0; + + sscanf(data, "%s %s", rssi, band); + + if (!strcasecmp(rssi, "rssi")) { + btcfg->type = WNM_BSS_SELECT_TYPE_RSSI; + } + else if (!strcasecmp(rssi, "cu")) { + btcfg->type = WNM_BSS_SELECT_TYPE_CU; + } + else { + WL_ERR(("%s: Command usage error\n", __func__)); + goto exit; + } + + if (!strcasecmp(band, "a")) { + btcfg->band = WLC_BAND_5G; + } + else if (!strcasecmp(band, "b")) { + btcfg->band = WLC_BAND_2G; + } + else if (!strcasecmp(band, "all")) { + btcfg->band = WLC_BAND_ALL; + } + else { + WL_ERR(("%s: Command usage, Wrong band\n", __func__)); + goto exit; + } + + if ((slen - 1) == (strlen(rssi) + strlen(band))) { + /* Getting factor table using iovar 'wnm_bss_select_table' from fw */ + if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg, + sizeof(*btcfg), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err)); + goto exit; + } + memcpy(btcfg, cfg->ioctl_buf, sizeof(*btcfg)); + memcpy(btcfg, cfg->ioctl_buf, (btcfg->count+1) * sizeof(*btcfg)); + + bytes_written += snprintf(command + bytes_written, total_len, + "No of entries in table: %d\n", btcfg->count); + bytes_written += snprintf(command + bytes_written, total_len, "%s factor table\n", + (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU"); + bytes_written += snprintf(command + bytes_written, total_len, + "low\thigh\tfactor\n"); + for (i = 0; i <= btcfg->count-1; i++) { + bytes_written += snprintf(command + bytes_written, total_len, + "%d\t%d\t%d\n", btcfg->params[i].low, btcfg->params[i].high, + btcfg->params[i].factor); + } + err = bytes_written; + goto exit; + } else { + memset(btcfg->params, 0, sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT); + data += (strlen(rssi) + strlen(band) + 2); + start_addr = data; + slen = slen - (strlen(rssi) + strlen(band) + 2); + for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) { + if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) { + btcfg->params[i].low = simple_strtol(data, &data, 10); + data++; + btcfg->params[i].high = simple_strtol(data, &data, 10); + data++; + btcfg->params[i].factor = simple_strtol(data, &data, 10); + btcfg->count++; + if (*data == '\0') { + break; + } + data++; + parsed_len = data - start_addr; + } else { + WL_ERR(("%s:Command usage:less no of args\n", __func__)); + goto exit; + } + } + btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg)); + if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len, + cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err)); + goto exit; + } + } +exit: + if (btcfg) { + kfree(btcfg); + } + return err; +} + +s32 +wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len) +{ + uint i = 0; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0; + char delta[5], band[5], *endptr = NULL; + wl_roam_prof_band_t *rp; + + rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp) + * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL); + if (unlikely(!rp)) { + WL_ERR(("%s: failed to allocate memory\n", __func__)); + err = -ENOMEM; + goto exit; + } + + argc = sscanf(data, "%s %s", band, delta); + if (!strcasecmp(band, "a")) + rp->band = WLC_BAND_5G; + else if (!strcasecmp(band, "b")) + rp->band = WLC_BAND_2G; + else { + WL_ERR(("%s: Missing band\n", __func__)); + goto exit; + } + /* Getting roam profile from fw */ + if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) { + WL_ERR(("Getting roam_profile failed with err=%d \n", err)); + goto exit; + } + memcpy(rp, cfg->ioctl_buf, sizeof(wl_roam_prof_band_t)); + if (rp->ver != WL_MAX_ROAM_PROF_VER) { + WL_ERR(("bad version (=%d) in return data\n", rp->ver)); + err = -EINVAL; + goto exit; + } + if ((rp->len % sizeof(wl_roam_prof_t)) != 0) { + WL_ERR(("bad length (=%d) in return data\n", rp->len)); + err = -EINVAL; + goto exit; + } + + if (argc == 2) { + /* if delta is non integer returns command usage error */ + val = simple_strtol(delta, &endptr, 0); + if (*endptr != '\0') { + WL_ERR(("%s: Command usage error", __func__)); + goto exit; + } + for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) { + /* + * Checking contents of roam profile data from fw and exits + * if code hits below condtion. If remaining length of buffer is + * less than roam profile size or if there is no valid entry. + */ + if (((i * sizeof(wl_roam_prof_t)) > rp->len) || + (rp->roam_prof[i].fullscan_period == 0)) { + break; + } + if (rp->roam_prof[i].channel_usage != 0) { + rp->roam_prof[i].roam_delta = val; + } + len += sizeof(wl_roam_prof_t); + } + } + else { + if (rp->roam_prof[i].channel_usage != 0) { + bytes_written = snprintf(command, total_len, + "%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G", + rp->roam_prof[0].roam_delta); + } + err = bytes_written; + goto exit; + } + rp->len = len; + if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp, + sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) { + WL_ERR(("seting roam_profile failed with err %d\n", err)); + } +exit : + if (rp) { + kfree(rp); + } + return err; +} + + +int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev) +{ + struct bcm_cfg80211 *cfg = NULL; + struct net_device *ndev = NULL; + struct cfg80211_scan_info info = {}; + unsigned long flags; + int clear_flag = 0; + int ret = 0; + + WL_TRACE(("Enter\n")); + + cfg = g_bcm_cfg; + if (!cfg) + return -EINVAL; + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + spin_lock_irqsave(&cfg->cfgdrv_lock, flags); +#ifdef WL_CFG80211_P2P_DEV_IF + if (cfg->scan_request && cfg->scan_request->wdev == cfgdev) { +#else + if (cfg->scan_request && cfg->scan_request->dev == cfgdev) { +#endif + info.aborted = true; + cfg80211_scan_done(cfg->scan_request, &info); + cfg->scan_request = NULL; + clear_flag = 1; + } + spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags); + + if (clear_flag) + wl_clr_drv_status(cfg, SCANNING, ndev); + + return ret; +} + +bool wl_cfg80211_is_concurrent_mode(void) +{ + if ((g_bcm_cfg) && (wl_get_drv_status_all(g_bcm_cfg, CONNECTED) > 1)) { + return true; + } else { + return false; + } +} + +void* wl_cfg80211_get_dhdp() +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + return cfg->pub; +} + +bool wl_cfg80211_is_p2p_active(void) +{ + return (g_bcm_cfg && g_bcm_cfg->p2p); +} + +bool wl_cfg80211_is_roam_offload(void) +{ + return (g_bcm_cfg && g_bcm_cfg->roam_offload); +} + +bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx) +{ + dhd_pub_t *dhd = NULL; + struct net_device *ndev = NULL; + u8 *curbssid = NULL; + + dhd = (dhd_pub_t *)(g_bcm_cfg->pub); + + if (dhd) { + ndev = dhd_idx2net(dhd, ifidx); + } + + if (!dhd || !ndev) { + return false; + } + + curbssid = wl_read_prof(g_bcm_cfg, ndev, WL_PROF_BSSID); + + return memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0; +} + +static void wl_cfg80211_work_handler(struct work_struct * work) +{ + struct bcm_cfg80211 *cfg = NULL; + struct net_info *iter, *next; + s32 err = BCME_OK; + s32 pm = PM_FAST; + BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work); + WL_DBG(("Enter \n")); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + /* p2p discovery iface ndev could be null */ + if (iter->ndev) { + if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) || + (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS && + wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS)) + continue; + if (iter->ndev) { + if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, + &pm, sizeof(pm), true)) != 0) { + if (err == -ENODEV) + WL_DBG(("%s:netdev not ready\n", + iter->ndev->name)); + else + WL_ERR(("%s:error (%d)\n", + iter->ndev->name, err)); + } else + wl_cfg80211_update_power_mode(iter->ndev); + } + } + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) + _Pragma("GCC diagnostic pop") +#endif + DHD_OS_WAKE_UNLOCK(cfg->pub); +} + +u8 +wl_get_action_category(void *frame, u32 frame_len) +{ + u8 category; + u8 *ptr = (u8 *)frame; + if (frame == NULL) + return DOT11_ACTION_CAT_ERR_MASK; + if (frame_len < DOT11_ACTION_HDR_LEN) + return DOT11_ACTION_CAT_ERR_MASK; + category = ptr[DOT11_ACTION_CAT_OFF]; + WL_INFORM(("Action Category: %d\n", category)); + return category; +} + +int +wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action) +{ + u8 *ptr = (u8 *)frame; + if (frame == NULL || ret_action == NULL) + return BCME_ERROR; + if (frame_len < DOT11_ACTION_HDR_LEN) + return BCME_ERROR; + if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len)) + return BCME_ERROR; + *ret_action = ptr[DOT11_ACTION_ACT_OFF]; + WL_INFORM(("Public Action : %d\n", *ret_action)); + return BCME_OK; +} + + +static int +wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev, + const struct ether_addr *bssid) +{ + s32 err; + wl_event_msg_t e; + + bzero(&e, sizeof(e)); + e.event_type = cpu_to_be32(WLC_E_BSSID); + memcpy(&e.addr, bssid, ETHER_ADDR_LEN); + /* trigger the roam event handler */ + WL_INFORM(("Delayed roam to " MACDBG "\n", MAC2STRDBG((u8*)(bssid)))); + err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL); + + return err; +} + +static s32 +wl_cfg80211_parse_vndr_ies(u8 *parse, u32 len, + struct parsed_vndr_ies *vndr_ies) +{ + s32 err = BCME_OK; + vndr_ie_t *vndrie; + bcm_tlv_t *ie; + struct parsed_vndr_ie_info *parsed_info; + u32 count = 0; + s32 remained_len; + + remained_len = (s32)len; + memset(vndr_ies, 0, sizeof(*vndr_ies)); + + WL_INFORM(("---> len %d\n", len)); + ie = (bcm_tlv_t *) parse; + if (!bcm_valid_tlv(ie, remained_len)) + ie = NULL; + while (ie) { + if (count >= MAX_VNDR_IE_NUMBER) + break; + if (ie->id == DOT11_MNG_VS_ID) { + vndrie = (vndr_ie_t *) ie; + /* len should be bigger than OUI length + one data length at least */ + if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) { + WL_ERR(("%s: invalid vndr ie. length is too small %d\n", + __FUNCTION__, vndrie->len)); + goto end; + } + /* if wpa or wme ie, do not add ie */ + if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) && + ((vndrie->data[0] == WPA_OUI_TYPE) || + (vndrie->data[0] == WME_OUI_TYPE))) { + CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n")); + goto end; + } + + parsed_info = &vndr_ies->ie_info[count++]; + + /* save vndr ie information */ + parsed_info->ie_ptr = (char *)vndrie; + parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN); + memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t)); + vndr_ies->count = count; + + WL_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x len:%d\n", + parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1], + parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0], + parsed_info->ie_len)); + } +end: + ie = bcm_next_tlv(ie, &remained_len); + } + return err; +} + +s32 +wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + s32 index; + struct net_info *netinfo; + s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, + VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG}; + + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + if (!netinfo || !netinfo->wdev) { + WL_ERR(("netinfo or netinfo->wdev is NULL\n")); + return -1; + } + + WL_DBG(("clear management vendor IEs for bssidx:%d \n", bssidx)); + /* Clear the IEs set in the firmware so that host is in sync with firmware */ + for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) { + if (wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev), + bssidx, vndrie_flag[index], NULL, 0) < 0) + WL_ERR(("vndr_ies clear failed. Ignoring.. \n")); + } + + return 0; +} + +s32 +wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg) +{ + struct net_info *iter, *next; + + WL_DBG(("clear management vendor IEs \n")); +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + for_each_ndev(cfg, iter, next) { + wl_cfg80211_clear_per_bss_ies(cfg, iter->bssidx); + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + return 0; +} + +#define WL_VNDR_IE_MAXLEN 2048 +static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN]; +int +wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len) +{ + struct net_device *ndev = NULL; + s32 ret = BCME_OK; + u8 *curr_ie_buf = NULL; + u8 *mgmt_ie_buf = NULL; + u32 mgmt_ie_buf_len = 0; + u32 *mgmt_ie_len = 0; + u32 del_add_ie_buf_len = 0; + u32 total_ie_buf_len = 0; + u32 parsed_ie_buf_len = 0; + struct parsed_vndr_ies old_vndr_ies; + struct parsed_vndr_ies new_vndr_ies; + s32 i; + u8 *ptr; + s32 remained_buf_len; + wl_bss_vndr_ies_t *ies = NULL; + struct net_info *netinfo; + + WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d \n", + pktflag, bssidx, vndr_ie_len)); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (bssidx > WL_MAX_IFS) { + WL_ERR(("bssidx > supported concurrent Ifaces \n")); + return -EINVAL; + } + + netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + if (!netinfo) { + WL_ERR(("net_info ptr is NULL \n")); + return -EINVAL; + } + + /* Clear the global buffer */ + memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf)); + curr_ie_buf = g_mgmt_ie_buf; + ies = &netinfo->bss.ies; + + switch (pktflag) { + case VNDR_IE_PRBRSP_FLAG : + mgmt_ie_buf = ies->probe_res_ie; + mgmt_ie_len = &ies->probe_res_ie_len; + mgmt_ie_buf_len = sizeof(ies->probe_res_ie); + break; + case VNDR_IE_ASSOCRSP_FLAG : + mgmt_ie_buf = ies->assoc_res_ie; + mgmt_ie_len = &ies->assoc_res_ie_len; + mgmt_ie_buf_len = sizeof(ies->assoc_res_ie); + break; + case VNDR_IE_BEACON_FLAG : + mgmt_ie_buf = ies->beacon_ie; + mgmt_ie_len = &ies->beacon_ie_len; + mgmt_ie_buf_len = sizeof(ies->beacon_ie); + break; + case VNDR_IE_PRBREQ_FLAG : + mgmt_ie_buf = ies->probe_req_ie; + mgmt_ie_len = &ies->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(ies->probe_req_ie); + break; + case VNDR_IE_ASSOCREQ_FLAG : + mgmt_ie_buf = ies->assoc_req_ie; + mgmt_ie_len = &ies->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(ies->assoc_req_ie); + break; + default: + mgmt_ie_buf = NULL; + mgmt_ie_len = NULL; + WL_ERR(("not suitable packet type (%d)\n", pktflag)); + return BCME_ERROR; + } + + if (vndr_ie_len > mgmt_ie_buf_len) { + WL_ERR(("extra IE size too big\n")); + ret = -ENOMEM; + } else { + /* parse and save new vndr_ie in curr_ie_buff before comparing it */ + if (vndr_ie && vndr_ie_len && curr_ie_buf) { + ptr = curr_ie_buf; +/* must discard vndr_ie constness, attempt to change vndr_ie arg to non-const + * causes cascade of errors in other places, fix involves const casts there + */ +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic push") +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#endif + if ((ret = wl_cfg80211_parse_vndr_ies((u8 *)vndr_ie, + vndr_ie_len, &new_vndr_ies)) < 0) { + WL_ERR(("parse vndr ie failed \n")); + goto exit; + } +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) +_Pragma("GCC diagnostic pop") +#endif + for (i = 0; i < new_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &new_vndr_ies.ie_info[i]; + + if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) { + WL_ERR(("IE size is too big (%d > %d)\n", + parsed_ie_buf_len, WL_VNDR_IE_MAXLEN)); + ret = -EINVAL; + goto exit; + } + + memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr, + vndrie_info->ie_len); + parsed_ie_buf_len += vndrie_info->ie_len; + } + } + + if (mgmt_ie_buf != NULL) { + if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) && + (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) { + WL_INFORM(("Previous mgmt IE is equals to current IE")); + goto exit; + } + + /* parse old vndr_ie */ + if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, + &old_vndr_ies)) < 0) { + WL_ERR(("parse vndr ie failed \n")); + goto exit; + } + /* make a command to delete old ie */ + for (i = 0; i < old_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &old_vndr_ies.ie_info[i]; + + WL_INFORM(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n", + vndrie_info->vndrie.id, vndrie_info->vndrie.len, + vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], + vndrie_info->vndrie.oui[2])); + + del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, + pktflag, vndrie_info->vndrie.oui, + vndrie_info->vndrie.id, + vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, + vndrie_info->ie_len - VNDR_IE_FIXED_LEN, + "del"); + + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + + *mgmt_ie_len = 0; + /* Add if there is any extra IE */ + if (mgmt_ie_buf && parsed_ie_buf_len) { + ptr = mgmt_ie_buf; + + remained_buf_len = mgmt_ie_buf_len; + + /* make a command to add new ie */ + for (i = 0; i < new_vndr_ies.count; i++) { + struct parsed_vndr_ie_info *vndrie_info = + &new_vndr_ies.ie_info[i]; + + WL_INFORM(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n", + vndrie_info->vndrie.id, vndrie_info->vndrie.len, + vndrie_info->ie_len - 2, + vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1], + vndrie_info->vndrie.oui[2])); + + del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf, + pktflag, vndrie_info->vndrie.oui, + vndrie_info->vndrie.id, + vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN, + vndrie_info->ie_len - VNDR_IE_FIXED_LEN, + "add"); + + /* verify remained buf size before copy data */ + if (remained_buf_len >= vndrie_info->ie_len) { + remained_buf_len -= vndrie_info->ie_len; + } else { + WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, " + "found vndr ies # = %d(cur %d), remained len %d, " + "cur mgmt_ie_len %d, new ie len = %d\n", + pktflag, new_vndr_ies.count, i, remained_buf_len, + *mgmt_ie_len, vndrie_info->ie_len)); + break; + } + + /* save the parsed IE in cfg struct */ + memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr, + vndrie_info->ie_len); + *mgmt_ie_len += vndrie_info->ie_len; + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + + if (total_ie_buf_len && cfg->ioctl_buf != NULL) { + ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf, + total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + if (ret) + WL_ERR(("vndr ie set error : %d\n", ret)); + } + } +exit: + +return ret; +} + +#ifdef WL_CFG80211_ACL +static int +wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev, + const struct cfg80211_acl_data *acl) +{ + int i; + int ret = 0; + int macnum = 0; + int macmode = MACLIST_MODE_DISABLED; + struct maclist *list; + + /* get the MAC filter mode */ + if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) { + macmode = MACLIST_MODE_ALLOW; + } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED && + acl->n_acl_entries) { + macmode = MACLIST_MODE_DENY; + } + + /* if acl == NULL, macmode is still disabled.. */ + if (macmode == MACLIST_MODE_DISABLED) { + if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0) + WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + + return ret; + } + + macnum = acl->n_acl_entries; + if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) { + WL_ERR(("%s : invalid number of MAC address entries %d\n", + __FUNCTION__, macnum)); + return -1; + } + + /* allocate memory for the MAC list */ + list = (struct maclist*)kmalloc(sizeof(int) + + sizeof(struct ether_addr) * macnum, GFP_KERNEL); + if (!list) { + WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__)); + return -1; + } + + /* prepare the MAC list */ + list->count = htod32(macnum); + for (i = 0; i < macnum; i++) { + memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN); + } + /* set the list */ + if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0) + WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret)); + + kfree(list); + + return ret; +} +#endif /* WL_CFG80211_ACL */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) +int wl_chspec_chandef(chanspec_t chanspec, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + struct cfg80211_chan_def *chandef, +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + struct chan_info *chaninfo, +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */ +struct wiphy *wiphy) + +{ + uint16 freq = 0; + int chan_type = 0; + int channel = 0; + struct ieee80211_channel *chan; + + if (!chandef) { + return -1; + } + channel = CHSPEC_CHANNEL(chanspec); + + switch (CHSPEC_BW(chanspec)) { + case WL_CHANSPEC_BW_20: + chan_type = NL80211_CHAN_HT20; + break; + case WL_CHANSPEC_BW_40: + { + if (CHSPEC_SB_UPPER(chanspec)) { + channel += CH_10MHZ_APART; + } else { + channel -= CH_10MHZ_APART; + } + } + chan_type = NL80211_CHAN_HT40PLUS; + break; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + case WL_CHANSPEC_BW_80: + case WL_CHANSPEC_BW_8080: + { + uint16 sb = CHSPEC_CTL_SB(chanspec); + + if (sb == WL_CHANSPEC_CTL_SB_LL) { + channel -= (CH_10MHZ_APART + CH_20MHZ_APART); + } else if (sb == WL_CHANSPEC_CTL_SB_LU) { + channel -= CH_10MHZ_APART; + } else if (sb == WL_CHANSPEC_CTL_SB_UL) { + channel += CH_10MHZ_APART; + } else { + /* WL_CHANSPEC_CTL_SB_UU */ + channel += (CH_10MHZ_APART + CH_20MHZ_APART); + } + + if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU) + chan_type = NL80211_CHAN_HT40MINUS; + else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU) + chan_type = NL80211_CHAN_HT40PLUS; + } + break; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + default: + chan_type = NL80211_CHAN_HT20; + break; + + } + + if (CHSPEC_IS5G(chanspec)) + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ); + else + freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); + + chan = ieee80211_get_channel(wiphy, freq); + WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n", + channel, freq, chan_type, chan)); + + if (unlikely(!chan)) { + /* fw and cfg80211 channel lists are not in sync */ + WL_ERR(("Couldn't find matching channel in wiphy channel list \n")); + ASSERT(0); + return -EINVAL; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + cfg80211_chandef_create(chandef, chan, chan_type); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + chaninfo->freq = freq; + chaninfo->chan_type = chan_type; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + return 0; +} + +void +wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy) +{ + u32 freq; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + struct cfg80211_chan_def chandef; +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + struct chan_info chaninfo; +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + if (!wiphy) { + WL_ERR(("wiphy is null\n")); + return; + } +#ifndef ALLOW_CHSW_EVT + /* Channel switch support is only for AP/GO/ADHOC/MESH */ + if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION || + dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) { + WL_ERR(("No channel switch notify support for STA/GC\n")); + return; + } +#endif /* !ALLOW_CHSW_EVT */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + if (wl_chspec_chandef(chanspec, &chandef, wiphy)) { +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + if (wl_chspec_chandef(chanspec, &chaninfo, wiphy)) { +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + WL_ERR(("chspec_chandef failed\n")); + return; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) + freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1; + cfg80211_ch_switch_notify(dev, &chandef); +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \ + \ + \ + \ + 0))) + freq = chan_info.freq; + cfg80211_ch_switch_notify(dev, chan_info.freq, chan_info.chan_type); +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */ + + WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec)); + return; +} +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + +#ifdef WL11ULB +s32 +wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode) +{ + int ret; + int cur_mode; + + ret = wldev_iovar_getint(dev, "ulb_mode", &cur_mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode get failed. ret:%d \n", ret)); + return ret; + } + + if (cur_mode == mode) { + /* If request mode is same as that of the current mode, then + * do nothing (Avoid unnecessary wl down and up). + */ + WL_INFORM(("[ULB] No change in ulb_mode. Do nothing.\n")); + return 0; + } + + /* setting of ulb_mode requires wl to be down */ + ret = wldev_ioctl(dev, WLC_DOWN, NULL, 0, true); + if (unlikely(ret)) { + WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret)); + return ret; + } + + if (mode >= MAX_SUPP_ULB_MODES) { + WL_ERR(("[ULB] unsupported ulb_mode :[%d]\n", mode)); + return -EINVAL; + } + + ret = wldev_iovar_setint(dev, "ulb_mode", mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode set failed. ret:%d \n", ret)); + return ret; + } + + ret = wldev_ioctl(dev, WLC_UP, NULL, 0, true); + if (unlikely(ret)) { + WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret)); + return ret; + } + + WL_DBG(("[ULB] ulb_mode set to %d successfully \n", mode)); + + return ret; +} + +static s32 +wl_cfg80211_ulbbw_to_ulbchspec(u32 bw) +{ + if (bw == ULB_BW_DISABLED) { + return WL_CHANSPEC_BW_20; + } else if (bw == ULB_BW_10MHZ) { + return WL_CHANSPEC_BW_10; + } else if (bw == ULB_BW_5MHZ) { + return WL_CHANSPEC_BW_5; + } else if (bw == ULB_BW_2P5MHZ) { + return WL_CHANSPEC_BW_2P5; + } else { + WL_ERR(("[ULB] unsupported value for ulb_bw \n")); + return -EINVAL; + } +} + +static chanspec_t +wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *_netinfo; + + /* + * Return the chspec value corresponding to the + * BW setting for a particular interface + */ + if (wdev) { + /* if wdev is provided, use it */ + _netinfo = wl_get_netinfo_by_wdev(cfg, wdev); + } else if (bssidx >= 0) { + /* if wdev is not provided, use it */ + _netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx); + } else { + WL_ERR(("[ULB] wdev/bssidx not provided\n")); + return INVCHANSPEC; + } + + if (unlikely(!_netinfo)) { + WL_ERR(("[ULB] net_info is null \n")); + return INVCHANSPEC; + } + + if (_netinfo->ulb_bw) { + WL_DBG(("[ULB] wdev_ptr:%p ulb_bw:0x%x \n", _netinfo->wdev, _netinfo->ulb_bw)); + return wl_cfg80211_ulbbw_to_ulbchspec(_netinfo->ulb_bw); + } else { + return WL_CHANSPEC_BW_20; + } +} + +static s32 +wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *_netinfo = wl_get_netinfo_by_wdev(cfg, wdev); + + /* + * Return the ulb_bw setting for a + * particular interface + */ + if (unlikely(!_netinfo)) { + WL_ERR(("[ULB] net_info is null \n")); + return -1; + } + + return _netinfo->ulb_bw; +} + +s32 +wl_cfg80211_set_ulb_bw(struct net_device *dev, + u32 ulb_bw, char *ifname) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int ret; + int mode; + struct net_info *_netinfo = NULL, *iter, *next; + u32 bssidx; + enum nl80211_iftype iftype; + + if (!ifname) + return -EINVAL; + + WL_DBG(("[ULB] Enter. bw_type:%d \n", ulb_bw)); + + ret = wldev_iovar_getint(dev, "ulb_mode", &mode); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_mode not supported \n")); + return ret; + } + + if (mode != ULB_MODE_STD_ALONE_MODE) { + WL_ERR(("[ULB] ulb bw modification allowed only in stand-alone mode\n")); + return -EINVAL; + } + + if (ulb_bw >= MAX_SUPP_ULB_BW) { + WL_ERR(("[ULB] unsupported value (%d) for ulb_bw \n", ulb_bw)); + return -EINVAL; + } + +#ifdef WL_CFG80211_P2P_DEV_IF + if (strcmp(ifname, "p2p-dev-wlan0") == 0) { + iftype = NL80211_IFTYPE_P2P_DEVICE; + /* Use wdev corresponding to the dedicated p2p discovery interface */ + if (likely(cfg->p2p_wdev)) { + _netinfo = wl_get_netinfo_by_wdev(cfg, cfg->p2p_wdev); + } else { + return -ENODEV; + } + } +#endif /* WL_CFG80211_P2P_DEV_IF */ + if (!_netinfo) { + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + if (strncmp(iter->ndev->name, ifname, strlen(ifname)) == 0) { + _netinfo = wl_get_netinfo_by_netdev(cfg, iter->ndev); + iftype = NL80211_IFTYPE_STATION; + } + } + } + } + + if (!_netinfo) + return -ENODEV; + bssidx = _netinfo->bssidx; + _netinfo->ulb_bw = ulb_bw; + + + WL_DBG(("[ULB] Applying ulb_bw:%d for bssidx:%d \n", ulb_bw, bssidx)); + ret = wldev_iovar_setbuf_bsscfg(dev, "ulb_bw", (void *)&ulb_bw, 4, + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, + &cfg->ioctl_buf_sync); + if (unlikely(ret)) { + WL_ERR(("[ULB] ulb_bw set failed. ret:%d \n", ret)); + return ret; + } + + return ret; +} +#endif /* WL11ULB */ + +static void +wl_ap_channel_ind(struct bcm_cfg80211 *cfg, + struct net_device *ndev, + chanspec_t chanspec) +{ + u32 channel = LCHSPEC_CHANNEL(chanspec); + + WL_DBG(("(%s) AP channel:%d chspec:0x%x \n", + ndev->name, channel, chanspec)); + if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) { + /* + * If cached channel is different from the channel indicated + * by the event, notify user space about the channel switch. + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg)); +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + cfg->ap_oper_channel = channel; + } +} + +static s32 +wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +const wl_event_msg_t *e, void *data) +{ + struct net_device *ndev = NULL; + chanspec_t chanspec; + u32 channel; + + WL_DBG(("Enter\n")); + if (unlikely(e->status)) { + WL_ERR(("status:0x%x \n", e->status)); + return -1; + } + + if (!data) { + return -EINVAL; + } + + if (likely(cfgdev)) { + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + chanspec = *((chanspec_t *)data); + channel = LCHSPEC_CHANNEL(chanspec); + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + /* For AP/GO role */ + wl_ap_channel_ind(cfg, ndev, chanspec); + } + } + + return 0; +} + +static s32 +wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, +const wl_event_msg_t *e, void *data) +{ + int error = 0; + u32 chanspec = 0; + struct net_device *ndev = NULL; + struct wiphy *wiphy = NULL; + + WL_DBG(("Enter\n")); + if (unlikely(e->status)) { + WL_ERR(("status:0x%x \n", e->status)); + return -1; + } + + if (likely(cfgdev)) { + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + wiphy = bcmcfg_to_wiphy(cfg); + error = wldev_iovar_getint(ndev, "chanspec", &chanspec); + if (unlikely(error)) { + WL_ERR(("Get chanspec error: %d \n", error)); + return -1; + } + + if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) { + /* For AP/GO role */ + wl_ap_channel_ind(cfg, ndev, chanspec); + } else { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) + wl_cfg80211_ch_switch_notify(ndev, chanspec, wiphy); +#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */ + } + + } + + return 0; +} + +#ifdef WL_NAN +int +wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len) +{ + return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len); +} +#endif /* WL_NAN */ + +void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg) +{ + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + int err; + + /* Clear the security settings on the primary Interface */ + err = wldev_iovar_setint(dev, "wsec", 0); + if (unlikely(err)) { + WL_ERR(("wsec clear failed \n")); + } + err = wldev_iovar_setint(dev, "auth", 0); + if (unlikely(err)) { + WL_ERR(("auth clear failed \n")); + } + err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED); + if (unlikely(err)) { + WL_ERR(("wpa_auth clear failed \n")); + } +} + +#ifdef WL_CFG80211_P2P_DEV_IF +void wl_cfg80211_del_p2p_wdev(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct wireless_dev *wdev = NULL; + + WL_DBG(("Enter \n")); + if (!cfg) { + WL_ERR(("Invalid Ptr\n")); + return; + } else { + wdev = cfg->p2p_wdev; + } + + if (wdev && cfg->down_disc_if) { + wl_cfgp2p_del_p2p_disc_if(wdev, cfg); + cfg->down_disc_if = FALSE; + } +} +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +int +wl_cfg80211_set_spect(struct net_device *dev, int spect) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int down = 1; + int up = 1; + int err = BCME_OK; + + if (!wl_get_drv_status_all(cfg, CONNECTED)) { + err = wldev_ioctl(dev, WLC_DOWN, &down, sizeof(down), true); + if (err) { + WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err)); + return err; + } + + err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect), true); + if (err) { + WL_ERR(("%s: error setting spect: code: %d\n", __func__, err)); + return err; + } + + err = wldev_ioctl(dev, WLC_UP, &up, sizeof(up), true); + if (err) { + WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err)); + return err; + } + } + return err; +} + +int +wl_cfg80211_get_sta_channel(void) +{ + struct net_device *ndev = bcmcfg_to_prmry_ndev(g_bcm_cfg); + int channel = 0; + + if (wl_get_drv_status(g_bcm_cfg, CONNECTED, ndev)) { + channel = g_bcm_cfg->channel; + } + return channel; +} +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +#ifdef P2P_LISTEN_OFFLOADING +s32 +wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg) +{ + s32 bssidx; + int ret = 0; + int p2plo_pause = 0; + if (!cfg || !cfg->p2p) { + WL_ERR(("Wl %p or cfg->p2p %p is null\n", + cfg, cfg ? cfg->p2p : 0)); + return 0; + } + + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), + "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause), + cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL); + if (ret < 0) { + WL_ERR(("p2po_stop Failed :%d\n", ret)); + } + + return ret; +} +s32 +wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + wl_p2plo_listen_t p2plo_listen; + int ret = -EAGAIN; + int channel = 0; + int period = 0; + int interval = 0; + int count = 0; + + if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) { + WL_ERR(("Sending Action Frames. Try it again.\n")); + goto exit; + } + + if (wl_get_drv_status_all(cfg, SCANNING)) { + WL_ERR(("Scanning already\n")); + goto exit; + } + + if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) { + WL_ERR(("Scanning being aborted\n")); + goto exit; + } + + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + WL_ERR(("p2p listen offloading already running\n")); + goto exit; + } + + /* Just in case if it is not enabled */ + if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) { + WL_ERR(("cfgp2p_enable discovery failed")); + goto exit; + } + + bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t)); + + if (len) { + sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count); + if ((channel == 0) || (period == 0) || + (interval == 0) || (count == 0)) { + WL_ERR(("Wrong argument %d/%d/%d/%d \n", + channel, period, interval, count)); + ret = -EAGAIN; + goto exit; + } + p2plo_listen.period = period; + p2plo_listen.interval = interval; + p2plo_listen.count = count; + + WL_ERR(("channel:%d period:%d, interval:%d count:%d\n", + channel, period, interval, count)); + } else { + WL_ERR(("Argument len is wrong.\n")); + ret = -EAGAIN; + goto exit; + } + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel, + sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_listen_channel Failed :%d\n", ret)); + goto exit; + } + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen, + sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_listen Failed :%d\n", ret)); + goto exit; + } + + wl_set_p2p_status(cfg, DISC_IN_PROGRESS); + cfg->last_roc_id = P2PO_COOKIE; +exit : + return ret; +} +s32 +wl_cfg80211_p2plo_listen_stop(struct net_device *dev) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + int ret = -EAGAIN; + + if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL, + 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, + bssidx, &cfg->ioctl_buf_sync)) < 0) { + WL_ERR(("p2po_stop Failed :%d\n", ret)); + goto exit; + } + +exit: + return ret; +} +#endif /* P2P_LISTEN_OFFLOADING */ +u64 +wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg) +{ + u64 id = 0; + id = ++cfg->last_roc_id; +#ifdef P2P_LISTEN_OFFLOADING + if (id == P2PO_COOKIE) { + id = ++cfg->last_roc_id; + } +#endif /* P2P_LISTEN_OFFLOADING */ + if (id == 0) + id = ++cfg->last_roc_id; + return id; +} + +#if defined(SUPPORT_RANDOM_MAC_SCAN) +int +wl_cfg80211_set_random_mac(struct net_device *dev, bool enable) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + int ret; + + if (cfg->random_mac_enabled == enable) { + WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled")); + return BCME_OK; + } + + if (enable) { + ret = wl_cfg80211_random_mac_enable(dev); + } else { + ret = wl_cfg80211_random_mac_disable(dev); + } + + if (!ret) { + cfg->random_mac_enabled = enable; + } + + return ret; +} + +int +wl_cfg80211_random_mac_enable(struct net_device *dev) +{ + u8 current_mac[ETH_ALEN] = {0, }; + s32 err = BCME_ERROR; + uint8 buffer[20] = {0, }; + wl_scanmac_t *sm = NULL; + int len = 0; + wl_scanmac_enable_t *sm_enable = NULL; + wl_scanmac_config_t *sm_config = NULL; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) || + wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) { + WL_ERR(("Fail to Set random mac, current state is wrong\n")); + return err; + } + + /* Read current mac address */ + err = wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr", + NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to get current dongle mac address\n")); + return err; + } + + memcpy(current_mac, cfg->ioctl_buf, ETH_ALEN); + + /* Enable scan mac */ + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = 1; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to enable scanmac, err=%d\n", err)); + return err; + } + + /* Configure scanmac */ + memset(buffer, 0x0, sizeof(buffer)); + sm_config = (wl_scanmac_config_t *)sm->data; + sm->len = sizeof(*sm_config); + sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG; + sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC; + + /* Set current mac address */ + memcpy(&sm_config->mac.octet, current_mac, ETH_ALEN); + sm_config->mac.octet[3] = 0x0; + sm_config->mac.octet[4] = 0x0; + sm_config->mac.octet[5] = 0x0; + + /* Set randomize mac address(last 3bytes) */ + memset(&sm_config->random_mask.octet, 0x0, ETH_ALEN); + sm_config->random_mask.octet[3] = 0xff; + sm_config->random_mask.octet[4] = 0xff; + sm_config->random_mask.octet[5] = 0xff; + + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed scanmac configuration\n")); + + /* Disable scan mac for clean-up */ + wl_cfg80211_random_mac_disable(dev); + return err; + } + + WL_ERR(("random MAC enable done")); + return err; +} + +int +wl_cfg80211_random_mac_disable(struct net_device *dev) +{ + s32 err = BCME_ERROR; + uint8 buffer[20] = {0, }; + wl_scanmac_t *sm = NULL; + int len = 0; + wl_scanmac_enable_t *sm_enable = NULL; + struct bcm_cfg80211 *cfg = g_bcm_cfg; + + sm = (wl_scanmac_t *)buffer; + sm_enable = (wl_scanmac_enable_t *)sm->data; + sm->len = sizeof(*sm_enable); + sm_enable->enable = 0; + len = OFFSETOF(wl_scanmac_t, data) + sm->len; + + sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE; + + err = wldev_iovar_setbuf_bsscfg(dev, "scanmac", + sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync); + + if (err != BCME_OK) { + WL_ERR(("failed to disable scanmac, err=%d\n", err)); + return err; + } + + WL_ERR(("random MAC disable done\n")); + return err; +} +#endif /* SUPPORT_RANDOM_MAC_SCAN */ + +int +wl_cfg80211_iface_count(void) +{ + struct bcm_cfg80211 *cfg = g_bcm_cfg; + struct net_info *iter, *next; + int iface_count = 0; + + for_each_ndev(cfg, iter, next) { + if (iter->ndev) { + iface_count++; + } + } + return iface_count; +} + +#ifdef DHD_LOG_DUMP +struct bcm_cfg80211* +wl_get_bcm_cfg80211_ptr(void) +{ + return g_bcm_cfg; +} +#endif /* DHD_LOG_DUMP */ + +#define CHECK_DONGLE_IDLE_TIME 50 +#define CHECK_DONGLE_IDLE_CNT 100 +int +wl_check_dongle_idle(struct wiphy *wiphy) +{ + int error = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct net_device *primary_ndev; + int retry = 0; + struct channel_info ci; + if (!cfg) + return FALSE; + /* Use primary I/F for sending cmds down to firmware */ + primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + while (retry++ < CHECK_DONGLE_IDLE_CNT) { + error = wldev_ioctl(primary_ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false); + if (error != BCME_OK || ci.scan_channel != 0) { + WL_ERR(("Firmware is busy(err:%d scan channel:%d). wait %dms\n", + error, ci.scan_channel, CHECK_DONGLE_IDLE_TIME)); + } else { + break; + } + wl_delay(CHECK_DONGLE_IDLE_TIME); + } + if (retry >= CHECK_DONGLE_IDLE_CNT) { + WL_ERR(("DONGLE is BUSY too long\n")); + return FALSE; + } + WL_DBG(("DONGLE is idle\n")); + return TRUE; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h new file mode 100644 index 000000000000..945ab5801b6b --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h @@ -0,0 +1,1442 @@ +/* + * Linux cfg80211 driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfg80211.h 608788 2015-12-29 10:59:33Z $ + */ + +/** + * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface. + */ + +#ifndef _wl_cfg80211_h_ +#define _wl_cfg80211_h_ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct wl_conf; +struct wl_iface; +struct bcm_cfg80211; +struct wl_security; +struct wl_ibss; + + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh64(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#define WL_DBG_NONE 0 +#define WL_DBG_P2P_ACTION (1 << 5) +#define WL_DBG_TRACE (1 << 4) +#define WL_DBG_SCAN (1 << 3) +#define WL_DBG_DBG (1 << 2) +#define WL_DBG_INFO (1 << 1) +#define WL_DBG_ERR (1 << 0) + +#ifdef DHD_LOG_DUMP +extern void dhd_log_dump_print(const char *fmt, ...); +extern char *dhd_log_dump_get_timestamp(void); +struct bcm_cfg80211 *wl_get_bcm_cfg80211_ptr(void); +#endif /* DHD_LOG_DUMP */ + +/* 0 invalidates all debug messages. default is 1 */ +#define WL_DBG_LEVEL 0xFF + +#ifdef CUSTOMER_HW4_DEBUG +#define CFG80211_ERROR_TEXT "CFG80211-INFO2) " +#else +#define CFG80211_ERROR_TEXT "CFG80211-ERROR) " +#endif /* CUSTOMER_HW4_DEBUG */ + +#if defined(DHD_DEBUG) +#ifdef DHD_LOG_DUMP +#define WL_ERR(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ +} while (0) +#else +#define WL_ERR(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ +} while (0) +#endif /* DHD_LOG_DUMP */ +#else /* defined(DHD_DEBUG) */ +#define WL_ERR(args) \ +do { \ + if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \ + printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ +} while (0) +#endif /* defined(DHD_DEBUG) */ + +#ifdef WL_INFORM +#undef WL_INFORM +#endif + +#define WL_INFORM(args) \ +do { \ + if (wl_dbg_level & WL_DBG_INFO) { \ + printk(KERN_INFO "CFG80211-INFO) %s : ", __func__); \ + printk args; \ + } \ +} while (0) + + +#ifdef WL_SCAN +#undef WL_SCAN +#endif +#define WL_SCAN(args) \ +do { \ + if (wl_dbg_level & WL_DBG_SCAN) { \ + printk(KERN_INFO "CFG80211-SCAN) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#ifdef WL_TRACE +#undef WL_TRACE +#endif +#define WL_TRACE(args) \ +do { \ + if (wl_dbg_level & WL_DBG_TRACE) { \ + printk(KERN_INFO "CFG80211-TRACE) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#ifdef WL_TRACE_HW4 +#undef WL_TRACE_HW4 +#endif +#ifdef CUSTOMER_HW4_DEBUG +#define WL_TRACE_HW4(args) \ +do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#else +#define WL_TRACE_HW4 WL_TRACE +#endif /* CUSTOMER_HW4_DEBUG */ +#if (WL_DBG_LEVEL > 0) +#define WL_DBG(args) \ +do { \ + if (wl_dbg_level & WL_DBG_DBG) { \ + printk(KERN_DEBUG "CFG80211-DEBUG) %s :", __func__); \ + printk args; \ + } \ +} while (0) +#else /* !(WL_DBG_LEVEL > 0) */ +#define WL_DBG(args) +#endif /* (WL_DBG_LEVEL > 0) */ +#define WL_PNO(x) +#define WL_SD(x) + + +#define WL_SCAN_RETRY_MAX 3 +#define WL_NUM_PMKIDS_MAX MAXPMKID +#define WL_SCAN_BUF_MAX (1024 * 8) +#define WL_TLV_INFO_MAX 1500 +#define WL_SCAN_IE_LEN_MAX 2048 +#define WL_BSS_INFO_MAX 2048 +#define WL_ASSOC_INFO_MAX 512 +#define WL_IOCTL_LEN_MAX 2048 +#define WL_EXTRA_BUF_MAX 2048 +#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1) +#define WL_AP_MAX 256 +#define WL_FILE_NAME_MAX 256 +#define WL_DWELL_TIME 200 +#define WL_MED_DWELL_TIME 400 +#define WL_MIN_DWELL_TIME 100 +#define WL_LONG_DWELL_TIME 1000 +#define IFACE_MAX_CNT 4 +#define WL_SCAN_CONNECT_DWELL_TIME_MS 200 +#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20 +#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 +#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 +#define WL_AF_TX_MAX_RETRY 5 + +#define WL_AF_SEARCH_TIME_MAX 450 +#define WL_AF_TX_EXTRA_TIME_MAX 200 + +#define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */ +#define WL_CHANNEL_SYNC_RETRY 5 +#define WL_INVALID -1 + +#ifdef DHD_LOSSLESS_ROAMING +#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */ +#endif +/* Bring down SCB Timeout to 20secs from 60secs default */ +#ifndef WL_SCB_TIMEOUT +#define WL_SCB_TIMEOUT 20 +#endif + +#ifndef WL_SCB_ACTIVITY_TIME +#define WL_SCB_ACTIVITY_TIME 5 +#endif + +#ifndef WL_SCB_MAX_PROBE +#define WL_SCB_MAX_PROBE 3 +#endif + +#ifndef WL_MIN_PSPRETEND_THRESHOLD +#define WL_MIN_PSPRETEND_THRESHOLD 2 +#endif + +/* SCAN_SUPPRESS timer values in ms */ +#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */ +#define WL_SCAN_SUPPRESS_RETRY 3000 + +#define WL_PM_ENABLE_TIMEOUT 10000 + +/* cfg80211 wowlan definitions */ +#define WL_WOWLAN_MAX_PATTERNS 8 +#define WL_WOWLAN_MIN_PATTERN_LEN 1 +#define WL_WOWLAN_MAX_PATTERN_LEN 255 +#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201 +#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \ + WL_WOWLAN_MAX_PATTERNS - 1) + +#ifdef WLTDLS +#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04" +#define TDLS_TUNNELED_PRB_RESP "\x7f\x50\x6f\x9a\05" +#endif /* WLTDLS */ + + +/* driver status */ +enum wl_status { + WL_STATUS_READY = 0, + WL_STATUS_SCANNING, + WL_STATUS_SCAN_ABORTING, + WL_STATUS_CONNECTING, + WL_STATUS_CONNECTED, + WL_STATUS_DISCONNECTING, + WL_STATUS_AP_CREATING, + WL_STATUS_AP_CREATED, + /* whole sending action frame procedure: + * includes a) 'finding common channel' for public action request frame + * and b) 'sending af via 'actframe' iovar' + */ + WL_STATUS_SENDING_ACT_FRM, + /* find a peer to go to a common channel before sending public action req frame */ + WL_STATUS_FINDING_COMMON_CHANNEL, + /* waiting for next af to sync time of supplicant. + * it includes SENDING_ACT_FRM and WAITING_NEXT_ACT_FRM_LISTEN + */ + WL_STATUS_WAITING_NEXT_ACT_FRM, +#ifdef WL_CFG80211_SYNC_GON + /* go to listen state to wait for next af after SENDING_ACT_FRM */ + WL_STATUS_WAITING_NEXT_ACT_FRM_LISTEN, +#endif /* WL_CFG80211_SYNC_GON */ + /* it will be set when upper layer requests listen and succeed in setting listen mode. + * if set, other scan request can abort current listen state + */ + WL_STATUS_REMAINING_ON_CHANNEL, +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + /* it's fake listen state to keep current scan state. + * it will be set when upper layer requests listen but scan is running. then just run + * a expire timer without actual listen state. + * if set, other scan request does not need to abort scan. + */ + WL_STATUS_FAKE_REMAINING_ON_CHANNEL +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ +}; + +/* wi-fi mode */ +enum wl_mode { + WL_MODE_BSS, + WL_MODE_IBSS, + WL_MODE_AP +}; + +/* driver profile list */ +enum wl_prof_list { + WL_PROF_MODE, + WL_PROF_SSID, + WL_PROF_SEC, + WL_PROF_IBSS, + WL_PROF_BAND, + WL_PROF_CHAN, + WL_PROF_BSSID, + WL_PROF_ACT, + WL_PROF_BEACONINT, + WL_PROF_DTIMPERIOD +}; + +/* donlge escan state */ +enum wl_escan_state { + WL_ESCAN_STATE_IDLE, + WL_ESCAN_STATE_SCANING +}; +/* fw downloading status */ +enum wl_fw_status { + WL_FW_LOADING_DONE, + WL_NVRAM_LOADING_DONE +}; + +enum wl_management_type { + WL_BEACON = 0x1, + WL_PROBE_RESP = 0x2, + WL_ASSOC_RESP = 0x4 +}; + +enum wl_pm_workq_act_type { + WL_PM_WORKQ_SHORT, + WL_PM_WORKQ_LONG, + WL_PM_WORKQ_DEL +}; + +/* beacon / probe_response */ +struct beacon_proberesp { + __le64 timestamp; + __le16 beacon_int; + __le16 capab_info; + u8 variable[0]; +} __attribute__ ((packed)); + +/* driver configuration */ +struct wl_conf { + u32 frag_threshold; + u32 rts_threshold; + u32 retry_short; + u32 retry_long; + s32 tx_power; + struct ieee80211_channel channel; +}; + +typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); + +/* bss inform structure for cfg80211 interface */ +struct wl_cfg80211_bss_info { + u16 band; + u16 channel; + s16 rssi; + u16 frame_len; + u8 frame_buf[1]; +}; + +/* basic structure of scan request */ +struct wl_scan_req { + struct wlc_ssid ssid; +}; + +/* basic structure of information element */ +struct wl_ie { + u16 offset; + u8 buf[WL_TLV_INFO_MAX]; +}; + +/* event queue for cfg80211 main event */ +struct wl_event_q { + struct list_head eq_list; + u32 etype; + wl_event_msg_t emsg; + s8 edata[1]; +}; + +/* security information with currently associated ap */ +struct wl_security { + u32 wpa_versions; + u32 auth_type; + u32 cipher_pairwise; + u32 cipher_group; + u32 wpa_auth; + u32 auth_assoc_res_status; +}; + +/* ibss information for currently joined ibss network */ +struct wl_ibss { + u8 beacon_interval; /* in millisecond */ + u8 atim; /* in millisecond */ + s8 join_only; + u8 band; + u8 channel; +}; + +typedef struct wl_bss_vndr_ies { + u8 probe_req_ie[VNDR_IES_BUF_LEN]; + u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN]; + u8 assoc_req_ie[VNDR_IES_BUF_LEN]; + u8 assoc_res_ie[VNDR_IES_BUF_LEN]; + u8 beacon_ie[VNDR_IES_MAX_BUF_LEN]; + u32 probe_req_ie_len; + u32 probe_res_ie_len; + u32 assoc_req_ie_len; + u32 assoc_res_ie_len; + u32 beacon_ie_len; +} wl_bss_vndr_ies_t; + +typedef struct wl_cfgbss { + u8 *wpa_ie; + u8 *rsn_ie; + u8 *wps_ie; + bool security_mode; + struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */ +} wl_cfgbss_t; + +/* cfg driver profile */ +struct wl_profile { + u32 mode; + s32 band; + u32 channel; + struct wlc_ssid ssid; + struct wl_security sec; + struct wl_ibss ibss; + u8 bssid[ETHER_ADDR_LEN]; + u16 beacon_interval; + u8 dtim_period; + bool active; +}; + +struct net_info { + struct net_device *ndev; + struct wireless_dev *wdev; + struct wl_profile profile; + s32 mode; + s32 roam_off; + unsigned long sme_state; + bool pm_restore; + bool pm_block; + s32 pm; + s32 bssidx; + wl_cfgbss_t bss; + u32 ulb_bw; + struct list_head list; /* list of all net_info structure */ +}; + +/* association inform */ +#define MAX_REQ_LINE 1024 +struct wl_connect_info { + u8 req_ie[MAX_REQ_LINE]; + s32 req_ie_len; + u8 resp_ie[MAX_REQ_LINE]; + s32 resp_ie_len; +}; + +/* firmware /nvram downloading controller */ +struct wl_fw_ctrl { + const struct firmware *fw_entry; + unsigned long status; + u32 ptr; + s8 fw_name[WL_FILE_NAME_MAX]; + s8 nvram_name[WL_FILE_NAME_MAX]; +}; + +/* assoc ie length */ +struct wl_assoc_ielen { + u32 req_len; + u32 resp_len; +}; + +/* wpa2 pmk list */ +struct wl_pmk_list { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID - 1]; +}; + +#ifdef DHD_MAX_IFS +#define WL_MAX_IFS DHD_MAX_IFS +#else +#define WL_MAX_IFS 16 +#endif + +#define ESCAN_BUF_SIZE (64 * 1024) + +struct escan_info { + u32 escan_state; +#if defined(STATIC_WL_PRIV_STRUCT) +#ifndef CONFIG_DHD_USE_STATIC_BUF +#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF +#endif /* CONFIG_DHD_USE_STATIC_BUF */ + u8 *escan_buf; +#else + u8 escan_buf[ESCAN_BUF_SIZE]; +#endif /* STATIC_WL_PRIV_STRUCT */ + struct wiphy *wiphy; + struct net_device *ndev; +}; + +#ifdef ESCAN_BUF_OVERFLOW_MGMT +#define BUF_OVERFLOW_MGMT_COUNT 3 +typedef struct { + int RSSI; + int length; + struct ether_addr BSSID; +} removal_element_t; +#endif /* ESCAN_BUF_OVERFLOW_MGMT */ + +struct afx_hdl { + wl_af_params_t *pending_tx_act_frm; + struct ether_addr tx_dst_addr; + struct net_device *dev; + struct work_struct work; + s32 bssidx; + u32 retry; + s32 peer_chan; + s32 peer_listen_chan; /* search channel: configured by upper layer */ + s32 my_listen_chan; /* listen chanel: extract it from prb req or gon req */ + bool is_listen; + bool ack_recv; + bool is_active; +}; + +struct parsed_ies { + wpa_ie_fixed_t *wps_ie; + u32 wps_ie_len; + wpa_ie_fixed_t *wpa_ie; + u32 wpa_ie_len; + bcm_tlv_t *wpa2_ie; + u32 wpa2_ie_len; +}; + + +#ifdef P2P_LISTEN_OFFLOADING +typedef struct { + uint16 period; /* listen offload period */ + uint16 interval; /* listen offload interval */ + uint16 count; /* listen offload count */ + uint16 pad; /* pad for 32bit align */ +} wl_p2plo_listen_t; +#endif /* P2P_LISTEN_OFFLOADING */ + +#ifdef WL11U +/* Max length of Interworking element */ +#define IW_IES_MAX_BUF_LEN 9 +#endif +#define MAX_EVENT_BUF_NUM 16 +typedef struct wl_eventmsg_buf { + u16 num; + struct { + u16 type; + bool set; + } event [MAX_EVENT_BUF_NUM]; +} wl_eventmsg_buf_t; + +typedef struct wl_if_event_info { + bool valid; + int ifidx; + int bssidx; + uint8 mac[ETHER_ADDR_LEN]; + char name[IFNAMSIZ+1]; +} wl_if_event_info; + +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +#define GET_BSS_INFO_LEN 90 +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +/* private data of cfg80211 interface */ +struct bcm_cfg80211 { + struct wireless_dev *wdev; /* representing cfg cfg80211 device */ + + struct wireless_dev *p2p_wdev; /* representing cfg cfg80211 device for P2P */ + struct net_device *p2p_net; /* reference to p2p0 interface */ + + struct wl_conf *conf; + struct cfg80211_scan_request *scan_request; /* scan request object */ + EVENT_HANDLER evt_handler[WLC_E_LAST]; + struct list_head eq_list; /* used for event queue */ + struct list_head net_list; /* used for struct net_info */ + spinlock_t net_list_sync; /* to protect scan status (and others if needed) */ + spinlock_t eq_lock; /* for event queue synchronization */ + spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */ + struct completion act_frm_scan; + struct completion iface_disable; + struct completion wait_next_af; + struct mutex usr_sync; /* maily for up/down synchronization */ + struct mutex scan_complete; /* serialize scan_complete call */ + struct wl_scan_results *bss_list; + struct wl_scan_results *scan_results; + + /* scan request object for internal purpose */ + struct wl_scan_req *scan_req_int; + /* information element object for internal purpose */ +#if defined(STATIC_WL_PRIV_STRUCT) + struct wl_ie *ie; +#else + struct wl_ie ie; +#endif + + /* association information container */ +#if defined(STATIC_WL_PRIV_STRUCT) + struct wl_connect_info *conn_info; +#else + struct wl_connect_info conn_info; +#endif +#ifdef DEBUGFS_CFG80211 + struct dentry *debugfs; +#endif /* DEBUGFS_CFG80211 */ + struct wl_pmk_list *pmk_list; /* wpa2 pmk list */ + tsk_ctl_t event_tsk; /* task of main event handler thread */ + void *pub; + u32 iface_cnt; + u32 channel; /* current channel */ + u32 af_sent_channel; /* channel action frame is sent */ + /* next af subtype to cancel the remained dwell time in rx process */ + u8 next_af_subtype; +#ifdef WL_CFG80211_SYNC_GON + ulong af_tx_sent_jiffies; +#endif /* WL_CFG80211_SYNC_GON */ + struct escan_info escan_info; /* escan information */ + bool active_scan; /* current scan mode */ + bool ibss_starter; /* indicates this sta is ibss starter */ + bool link_up; /* link/connection up flag */ + + /* indicate whether chip to support power save mode */ + bool pwr_save; + bool roam_on; /* on/off switch for self-roaming */ + bool scan_tried; /* indicates if first scan attempted */ +#if defined(BCMSDIO) || defined(BCMPCIE) + bool wlfc_on; +#endif + bool vsdb_mode; + bool roamoff_on_concurrent; + u8 *ioctl_buf; /* ioctl buffer */ + struct mutex ioctl_buf_sync; + u8 *escan_ioctl_buf; + u8 *extra_buf; /* maily to grab assoc information */ + struct dentry *debugfsdir; + struct rfkill *rfkill; + bool rf_blocked; + struct ieee80211_channel remain_on_chan; + enum nl80211_channel_type remain_on_chan_type; + u64 send_action_id; + u64 last_roc_id; + wait_queue_head_t netif_change_event; + wl_if_event_info if_event_info; + struct completion send_af_done; + struct afx_hdl *afx_hdl; + struct p2p_info *p2p; + bool p2p_supported; + void *btcoex_info; + struct timer_list scan_timeout; /* Timer for catch scan event timeout */ +#if defined(P2P_IE_MISSING_FIX) + bool p2p_prb_noti; +#endif + s32(*state_notifier) (struct bcm_cfg80211 *cfg, + struct net_info *_net_info, enum wl_status state, bool set); + unsigned long interrested_state; + wlc_ssid_t hostapd_ssid; +#ifdef WL11U + bool wl11u; + u8 iw_ie[IW_IES_MAX_BUF_LEN]; + u32 iw_ie_len; +#endif /* WL11U */ + bool sched_scan_running; /* scheduled scan req status */ +#ifdef WL_SCHED_SCAN + struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */ +#endif /* WL_SCHED_SCAN */ + bool scan_suppressed; + struct timer_list scan_supp_timer; + struct work_struct wlan_work; + struct mutex event_sync; /* maily for up/down synchronization */ + bool disable_roam_event; + struct delayed_work pm_enable_work; + struct mutex pm_sync; /* mainly for pm work synchronization */ + + vndr_ie_setbuf_t *ibss_vsie; /* keep the VSIE for IBSS */ + int ibss_vsie_len; + u32 rmc_event_pid; + u32 rmc_event_seq; +#ifdef WLAIBSS_MCHAN + struct ether_addr ibss_if_addr; + bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */ +#endif /* WLAIBSS_MCHAN */ + bcm_struct_cfgdev *bss_cfgdev; /* For DUAL STA/STA+AP */ + s32 cfgdev_bssidx; + bool bss_pending_op; /* indicate where there is a pending IF operation */ + int roam_offload; +#ifdef WL_NAN + bool nan_enable; + bool nan_running; +#endif /* WL_NAN */ +#ifdef WL_CFG80211_P2P_DEV_IF + bool down_disc_if; +#endif /* WL_CFG80211_P2P_DEV_IF */ +#ifdef P2PLISTEN_AP_SAMECHN + bool p2p_resp_apchn_status; +#endif /* P2PLISTEN_AP_SAMECHN */ + struct wl_wsec_key wep_key; +#ifdef WLTDLS + u8 *tdls_mgmt_frame; + u32 tdls_mgmt_frame_len; + s32 tdls_mgmt_freq; +#endif /* WLTDLS */ + bool need_wait_afrx; +#ifdef QOS_MAP_SET + uint8 *up_table; /* user priority table, size is UP_TABLE_MAX */ +#endif /* QOS_MAP_SET */ + struct ether_addr last_roamed_addr; +#ifdef DHD_LOSSLESS_ROAMING + struct timer_list roam_timeout; /* Timer for catch roam timeout */ +#endif + bool rcc_enabled; /* flag for Roam channel cache feature */ +#if defined(DHD_ENABLE_BIGDATA_LOGGING) + char bss_info[GET_BSS_INFO_LEN]; + wl_event_msg_t event_auth_assoc; + u32 assoc_reject_status; + u32 roam_count; +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + u16 ap_oper_channel; + bool revert_ndo_disable; +#if defined(SUPPORT_RANDOM_MAC_SCAN) + bool random_mac_enabled; +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +}; + +#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \ + 4 && __GNUC_MINOR__ >= 6)) + +#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \ +_Pragma("GCC diagnostic push") \ +_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \ +list_for_each_entry_safe((pos), (next), (head), member) \ +_Pragma("GCC diagnostic pop") \ + +#else +#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \ +list_for_each_entry_safe((pos), (next), (head), member) \ + +#endif /* STRICT_GCC_WARNINGS */ + +static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss) +{ + return bss = bss ? + (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info; +} + +static inline void +wl_probe_wdev_all(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + int idx = 0; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, + &cfg->net_list, list) { + WL_ERR(("%s: net_list[%d] bssidx: %d, " + "ndev: %p, wdev: %p \n", __FUNCTION__, + idx++, _net_info->bssidx, + _net_info->ndev, _net_info->wdev)); + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return; +} + +static inline struct net_info * +wl_get_netinfo_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if ((bssidx >= 0) && (_net_info->bssidx == bssidx)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +static inline void +wl_dealloc_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_netinfo enter wdev=%p \n", wdev)); +#endif + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + wl_cfgbss_t *bss = &_net_info->bss; + + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + kfree(bss->wps_ie); + bss->wps_ie = NULL; + list_del(&_net_info->list); + cfg->iface_cnt--; + kfree(_net_info); + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt)); +#endif +} + +static inline s32 +wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct wireless_dev * wdev, s32 mode, bool pm_block, u8 bssidx) +{ + struct net_info *_net_info; + s32 err = 0; + unsigned long int flags; +#ifdef DHD_IFDEBUG + WL_ERR(("alloc_netinfo enter bssidx=%d wdev=%p ndev=%p\n", bssidx, wdev, ndev)); +#endif + /* Check whether there is any duplicate entry for the + * same bssidx * + */ + if ((_net_info = wl_get_netinfo_by_bssidx(cfg, bssidx))) { + /* We have a duplicate entry for the same bssidx + * already present which shouldn't have been the case. + * Attempt recovery. + */ + WL_ERR(("Duplicate entry for bssidx=%d present\n", bssidx)); + wl_probe_wdev_all(cfg); +#ifdef DHD_DEBUG + ASSERT(0); +#endif /* DHD_DEBUG */ + WL_ERR(("Removing the Dup entry for bssidx=%d \n", bssidx)); + wl_dealloc_netinfo_by_wdev(cfg, _net_info->wdev); + } + if (cfg->iface_cnt == IFACE_MAX_CNT) + return -ENOMEM; + _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL); + if (!_net_info) + err = -ENOMEM; + else { + _net_info->mode = mode; + _net_info->ndev = ndev; + _net_info->wdev = wdev; + _net_info->pm_restore = 0; + _net_info->pm = 0; + _net_info->pm_block = pm_block; + _net_info->roam_off = WL_INVALID; + _net_info->bssidx = bssidx; + spin_lock_irqsave(&cfg->net_list_sync, flags); + cfg->iface_cnt++; + list_add(&_net_info->list, &cfg->net_list); + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + } +#ifdef DHD_IFDEBUG + WL_ERR(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt)); +#endif + return err; +} + +static inline void +wl_delete_all_netinfo(struct bcm_cfg80211 *cfg) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + wl_cfgbss_t *bss = &_net_info->bss; + + kfree(bss->wpa_ie); + bss->wpa_ie = NULL; + kfree(bss->rsn_ie); + bss->rsn_ie = NULL; + kfree(bss->wps_ie); + bss->wps_ie = NULL; + list_del(&_net_info->list); + if (_net_info->wdev) + kfree(_net_info->wdev); + kfree(_net_info); + } + cfg->iface_cnt = 0; + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} +static inline u32 +wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status) + +{ + struct net_info *_net_info, *next; + u32 cnt = 0; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->ndev && + test_bit(status, &_net_info->sme_state)) + cnt++; + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return cnt; +} +static inline void +wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + switch (op) { + case 1: + break; /* set all status is not allowed */ + case 2: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + clear_bit(status, &_net_info->sme_state); + if (cfg->state_notifier && + test_bit(status, &(cfg->interrested_state))) + cfg->state_notifier(cfg, _net_info, status, false); + return; + case 4: + break; /* change all status is not allowed */ + default: + break; /* unknown operation */ + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} +static inline void +wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, + struct net_device *ndev, u32 op) +{ + + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + switch (op) { + case 1: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + set_bit(status, &_net_info->sme_state); + if (cfg->state_notifier && + test_bit(status, &(cfg->interrested_state))) + cfg->state_notifier(cfg, _net_info, status, true); + return; + case 2: + /* + * Release the spinlock before calling notifier. Else there + * will be nested calls + */ + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + clear_bit(status, &_net_info->sme_state); + if (cfg->state_notifier && + test_bit(status, &(cfg->interrested_state))) + cfg->state_notifier(cfg, _net_info, status, false); + return; + case 4: + change_bit(status, &_net_info->sme_state); + break; + } + } + + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + +} + +static inline wl_cfgbss_t * +wl_get_cfgbss_by_wdev(struct bcm_cfg80211 *cfg, + struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + wl_cfgbss_t *bss = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + bss = &_net_info->bss; + break; + } + } + + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return bss; +} + +static inline u32 +wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status, + struct net_device *ndev) +{ + struct net_info *_net_info, *next; + u32 stat = 0; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + stat = test_bit(status, &_net_info->sme_state); + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return stat; +} + +static inline s32 +wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + s32 mode = -1; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + mode = _net_info->mode; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return mode; +} + +static inline void +wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev, + s32 mode) +{ + struct net_info *_net_info, *next; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) + _net_info->mode = mode; + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); +} + +static inline s32 +wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next; + s32 bssidx = -1; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->wdev && (_net_info->wdev == wdev)) { + bssidx = _net_info->bssidx; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return bssidx; +} + +static inline struct wireless_dev * +wl_get_wdev_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + struct net_info *_net_info, *next; + struct wireless_dev *wdev = NULL; + unsigned long int flags; + + if (bssidx < 0) + return NULL; + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (_net_info->bssidx == bssidx) { + wdev = _net_info->wdev; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return wdev; +} + +static inline struct wl_profile * +wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + struct net_info *_net_info, *next; + struct wl_profile *prof = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + prof = &_net_info->profile; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return prof; +} +static inline struct net_info * +wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (ndev && (_net_info->ndev == ndev)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +static inline struct net_info * +wl_get_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev) +{ + struct net_info *_net_info, *next, *info = NULL; + unsigned long int flags; + + spin_lock_irqsave(&cfg->net_list_sync, flags); + BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) { + if (wdev && (_net_info->wdev == wdev)) { + info = _net_info; + break; + } + } + spin_unlock_irqrestore(&cfg->net_list_sync, flags); + return info; +} + +#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \ + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0) +#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy) +#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev) +#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev) +#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev) +#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr)) +#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr) +#define wdev_to_ndev(wdev) (wdev->netdev) + +#if defined(WL_ENABLE_P2P_IF) +#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \ + bcmcfg_to_prmry_ndev(cfg) : ndev) +#else +#define ndev_to_wlc_ndev(ndev, cfg) (ndev) +#endif /* WL_ENABLE_P2P_IF */ + +#define wdev_to_wlc_ndev(wdev, cfg) \ + (wdev_to_ndev(wdev) ? \ + wdev_to_ndev(wdev) : bcmcfg_to_prmry_ndev(cfg)) +#if defined(WL_CFG80211_P2P_DEV_IF) +#define cfgdev_to_wlc_ndev(cfgdev, cfg) wdev_to_wlc_ndev(cfgdev, cfg) +#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg) +#elif defined(WL_ENABLE_P2P_IF) +#define cfgdev_to_wlc_ndev(cfgdev, cfg) ndev_to_wlc_ndev(cfgdev, cfg) +#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg) +#else +#define cfgdev_to_wlc_ndev(cfgdev, cfg) (cfgdev) +#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev) +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +#define cfgdev_to_wdev(cfgdev) (cfgdev) +#define ndev_to_cfgdev(ndev) ndev_to_wdev(ndev) +#define cfgdev_to_ndev(cfgdev) (cfgdev ? (cfgdev->netdev) : NULL) +#define wdev_to_cfgdev(cfgdev) (cfgdev) +#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) +#else +#define cfgdev_to_wdev(cfgdev) (cfgdev->ieee80211_ptr) +#define wdev_to_cfgdev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL +#define ndev_to_cfgdev(ndev) (ndev) +#define cfgdev_to_ndev(cfgdev) (cfgdev) +#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net) +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \ + (cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false) +#elif defined(WL_ENABLE_P2P_IF) +#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \ + (cfg->scan_request->dev == cfg->p2p_net)) ? true : false) +#else +#define scan_req_match(cfg) (((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \ + true : false) +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) +#define scan_req_iftype(req) (req->dev->ieee80211_ptr->iftype) +#else +#define scan_req_iftype(req) (req->wdev->iftype) +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */ + +#define wl_to_sr(w) (w->scan_req_int) +#if defined(STATIC_WL_PRIV_STRUCT) +#define wl_to_ie(w) (w->ie) +#define wl_to_conn(w) (w->conn_info) +#else +#define wl_to_ie(w) (&w->ie) +#define wl_to_conn(w) (&w->conn_info) +#endif +#define wiphy_from_scan(w) (w->escan_info.wiphy) +#define wl_get_drv_status_all(cfg, stat) \ + (wl_get_status_all(cfg, WL_STATUS_ ## stat)) +#define wl_get_drv_status(cfg, stat, ndev) \ + (wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev)) +#define wl_set_drv_status(cfg, stat, ndev) \ + (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1)) +#define wl_clr_drv_status(cfg, stat, ndev) \ + (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2)) +#define wl_clr_drv_status_all(cfg, stat) \ + (wl_set_status_all(cfg, WL_STATUS_ ## stat, 2)) +#define wl_chg_drv_status(cfg, stat, ndev) \ + (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4)) + +#define for_each_bss(list, bss, __i) \ + for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss)) + +#define for_each_ndev(cfg, iter, next) \ + list_for_each_entry_safe(iter, next, &cfg->net_list, list) + +/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0. + * In addtion to that, wpa_version is WPA_VERSION_1 + */ +#define is_wps_conn(_sme) \ + ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \ + (!_sme->crypto.n_ciphers_pairwise) && \ + (!_sme->crypto.cipher_group)) + +#define IS_AKM_SUITE_FT(sec) false + +#define IS_AKM_SUITE_CCKM(sec) false + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) +#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info) +#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len)) +#else +#define STA_INFO_BIT(info) (STATION_ ## info) +#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */ + +extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context); +extern s32 wl_cfg80211_attach_post(struct net_device *ndev); +extern void wl_cfg80211_detach(void *para); + +extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e, + void *data); +void wl_cfg80211_set_parent_dev(void *dev); +struct device *wl_cfg80211_get_parent_dev(void); + +/* clear IEs */ +extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg); +extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx); + +extern s32 wl_cfg80211_up(void *para); +extern s32 wl_cfg80211_down(void *para); +extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx); +extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx); +extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx); +extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name, + uint8 *mac, uint8 bssidx, char *dngl_name); +extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev); +extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev); +extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev); +extern bool wl_cfg80211_is_concurrent_mode(void); +extern void* wl_cfg80211_get_dhdp(void); +extern bool wl_cfg80211_is_p2p_active(void); +extern bool wl_cfg80211_is_roam_offload(void); +extern bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx); +extern void wl_cfg80211_dbg_level(u32 level); +extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr); +extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len, + enum wl_management_type type); +extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len); +extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len); +#ifdef WL11ULB +extern s32 wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode); +extern s32 wl_cfg80211_set_ulb_bw(struct net_device *dev, + u32 ulb_bw, char *ifname); +#endif /* WL11ULB */ +#ifdef P2PLISTEN_AP_SAMECHN +extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable); +#endif /* P2PLISTEN_AP_SAMECHN */ + +/* btcoex functions */ +void* wl_cfg80211_btcoex_init(struct net_device *ndev); +void wl_cfg80211_btcoex_deinit(void); + +#ifdef WL_SUPPORT_AUTO_CHANNEL +#define CHANSPEC_BUF_SIZE 1024 +#define CHAN_SEL_IOCTL_DELAY 300 +#define CHAN_SEL_RETRY_COUNT 15 +#define CHANNEL_IS_RADAR(channel) (((channel & WL_CHAN_RADAR) || \ + (channel & WL_CHAN_PASSIVE)) ? true : false) +#define CHANNEL_IS_2G(channel) (((channel >= 1) && (channel <= 14)) ? \ + true : false) +#define CHANNEL_IS_5G(channel) (((channel >= 36) && (channel <= 165)) ? \ + true : false) +extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command, + int total_len); +#endif /* WL_SUPPORT_AUTO_CHANNEL */ +extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n); +extern int wl_cfg80211_hang(struct net_device *dev, u16 reason); +extern s32 wl_mode_to_nl80211_iftype(s32 mode); +int wl_cfg80211_do_driver_init(struct net_device *net); +void wl_cfg80211_enable_trace(bool set, u32 level); +extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify); +extern s32 wl_cfg80211_if_is_group_owner(void); +extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec); +extern chanspec_t wl_ch_host_to_driver(s32 bssidx, u16 channel); +extern s32 wl_set_tx_power(struct net_device *dev, + enum nl80211_tx_power_setting type, s32 dbm); +extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm); +extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add); +extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev); +extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set); +extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev, + struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev); +extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac); +extern void wl_cfg80211_update_power_mode(struct net_device *dev); +extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command); +extern void wl_terminate_event_handler(void); +#if defined(DHD_ENABLE_BIGDATA_LOGGING) +extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len); +extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len); +#endif /* DHD_ENABLE_BIGDATA_LOGGING */ + +#define SCAN_BUF_CNT 2 +#define SCAN_BUF_NEXT 1 +#define WL_SCANTYPE_LEGACY 0x1 +#define WL_SCANTYPE_P2P 0x2 +#define wl_escan_set_sync_id(a, b) ((a) = htod16(0x1234)) +#define wl_escan_set_type(a, b) +#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf) +#define wl_escan_check_sync_id(a, b, c) 0 +#define wl_escan_print_sync_id(a, b, c) +#define wl_escan_increment_sync_id(a, b) +#define wl_escan_init_sync_id(a) +extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len); +extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev); +extern void wl_cfg80211_set_rmc_pid(int pid); +extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, + bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag, + const u8 *vndr_ie, u32 vndr_ie_len); + + +/* Action frame specific functions */ +extern u8 wl_get_action_category(void *frame, u32 frame_len); +extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action); + +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST +struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +#ifdef WL_SUPPORT_ACS +#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */ +#define IOCTL_RETRY_COUNT 5 +#define CHAN_NOISE_DUMMY -80 +#define OBSS_TOKEN_IDX 15 +#define IBSS_TOKEN_IDX 15 +#define TX_TOKEN_IDX 14 +#define CTG_TOKEN_IDX 13 +#define PKT_TOKEN_IDX 15 +#define IDLE_TOKEN_IDX 12 +#endif /* WL_SUPPORT_ACS */ + +extern int wl_cfg80211_get_ioctl_version(void); +extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable); +extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, + char *command, int total_len); +extern s32 wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, + void *buf, s32 buflen); +extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, + void *buf, s32 buflen); +#if defined(WL_VIRTUAL_APSTA) +extern int wl_cfg80211_interface_create(struct net_device *dev, char *name); +extern int wl_cfg80211_interface_delete(struct net_device *dev, char *name); +#endif /* defined (WL_VIRTUAL_APSTA) */ + +#ifdef WL_NAN +extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, + int cmd_len); +#endif /* WL_NAN */ + +#ifdef WL_CFG80211_P2P_DEV_IF +extern void wl_cfg80211_del_p2p_wdev(void); +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_SUPPORT_AUTO_CHANNEL) +extern int wl_cfg80211_set_spect(struct net_device *dev, int spect); +extern int wl_cfg80211_get_sta_channel(void); +#endif /* WL_SUPPORT_AUTO_CHANNEL */ + +#ifdef P2P_LISTEN_OFFLOADING +extern s32 wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len); +extern s32 wl_cfg80211_p2plo_listen_stop(struct net_device *dev); +#endif /* P2P_LISTEN_OFFLOADING */ + +#define RETURN_EIO_IF_NOT_UP(wlpriv) \ +do { \ + struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \ + if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \ + WL_INFORM(("device is not ready\n")); \ + return -EIO; \ + } \ +} while (0) + +#ifdef QOS_MAP_SET +extern uint8 *wl_get_up_table(void); +#endif /* QOS_MAP_SET */ + +#define P2PO_COOKIE 65535 +u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg); +#if defined(SUPPORT_RANDOM_MAC_SCAN) +int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable); +int wl_cfg80211_random_mac_enable(struct net_device *dev); +int wl_cfg80211_random_mac_disable(struct net_device *dev); +#endif /* SUPPORT_RANDOM_MAC_SCAN */ +int wl_cfg80211_iface_count(void); +int wl_check_dongle_idle(struct wiphy *wiphy); +#endif /* _wl_cfg80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c new file mode 100644 index 000000000000..1aaa8fe4bfe6 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfg_btcoex.c @@ -0,0 +1,564 @@ +/* + * Linux cfg80211 driver - Dongle Host Driver (DHD) related + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfg_btcoex.c 514727 2014-11-12 03:02:48Z $ + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef PKT_FILTER_SUPPORT +extern uint dhd_pkt_filter_enable; +extern uint dhd_master_mode; +extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); +#endif + +struct btcoex_info { + struct timer_list timer; + u32 timer_ms; + u32 timer_on; + u32 ts_dhcp_start; /* ms ts ecord time stats */ + u32 ts_dhcp_ok; /* ms ts ecord time stats */ + bool dhcp_done; /* flag, indicates that host done with + * dhcp before t1/t2 expiration + */ + s32 bt_state; + struct work_struct work; + struct net_device *dev; +}; + +static struct btcoex_info *btcoex_info_loc = NULL; + +/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */ + +/* use New SCO/eSCO smart YG suppression */ +#define BT_DHCP_eSCO_FIX +/* this flag boost wifi pkt priority to max, caution: -not fair to sco */ +#define BT_DHCP_USE_FLAGS +/* T1 start SCO/ESCo priority suppression */ +#define BT_DHCP_OPPR_WIN_TIME 2500 +/* T2 turn off SCO/SCO supperesion is (timeout) */ +#define BT_DHCP_FLAG_FORCE_TIME 5500 + +enum wl_cfg80211_btcoex_status { + BT_DHCP_IDLE, + BT_DHCP_START, + BT_DHCP_OPPR_WIN, + BT_DHCP_FLAG_FORCE_TIMEOUT +}; + +/* + * get named driver variable to uint register value and return error indication + * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, ®_value) + */ +static int +dev_wlc_intvar_get_reg(struct net_device *dev, char *name, + uint reg, int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + bcm_mkiovar(name, (char *)(®), sizeof(reg), + (char *)(&var), sizeof(var.buf)); + error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false); + + *retval = dtoh32(var.val); + return (error); +} + +static int +dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) + char ioctlbuf_local[1024]; +#else + static char ioctlbuf_local[1024]; +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */ + + bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local)); + + return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true)); +} +/* +get named driver variable to uint register value and return error indication +calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value) +*/ +static int +dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val) +{ + char reg_addr[8]; + + memset(reg_addr, 0, sizeof(reg_addr)); + memcpy((char *)®_addr[0], (char *)addr, 4); + memcpy((char *)®_addr[4], (char *)val, 4); + + return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr))); +} + +static bool btcoex_is_sco_active(struct net_device *dev) +{ + int ioc_res = 0; + bool res = FALSE; + int sco_id_cnt = 0; + int param27; + int i; + + for (i = 0; i < 12; i++) { + + ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27); + + WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27)); + + if (ioc_res < 0) { + WL_ERR(("ioc read btc params error\n")); + break; + } + + if ((param27 & 0x6) == 2) { /* count both sco & esco */ + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + WL_TRACE(("sco/esco detected, pkt id_cnt:%d samples:%d\n", + sco_id_cnt, i)); + res = TRUE; + break; + } + + OSL_SLEEP(5); + } + + return res; +} + +#if defined(BT_DHCP_eSCO_FIX) +/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */ +static int set_btc_esco_params(struct net_device *dev, bool trump_sco) +{ + static bool saved_status = FALSE; + + char buf_reg50va_dhcp_on[8] = + { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 }; + char buf_reg51va_dhcp_on[8] = + { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg64va_dhcp_on[8] = + { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg65va_dhcp_on[8] = + { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + char buf_reg71va_dhcp_on[8] = + { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 }; + uint32 regaddr; + static uint32 saved_reg50; + static uint32 saved_reg51; + static uint32 saved_reg64; + static uint32 saved_reg65; + static uint32 saved_reg71; + + if (trump_sco) { + /* this should reduce eSCO agressive retransmit + * w/o breaking it + */ + + /* 1st save current */ + WL_TRACE(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) { + saved_status = TRUE; + WL_TRACE(("saved bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, + saved_reg64, saved_reg65, saved_reg71)); + } else { + WL_ERR((":%s: save btc_params failed\n", + __FUNCTION__)); + saved_status = FALSE; + return -1; + } + + WL_TRACE(("override with [50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + *(u32 *)(buf_reg50va_dhcp_on+4), + *(u32 *)(buf_reg51va_dhcp_on+4), + *(u32 *)(buf_reg64va_dhcp_on+4), + *(u32 *)(buf_reg65va_dhcp_on+4), + *(u32 *)(buf_reg71va_dhcp_on+4))); + + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg50va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg51va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg64va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg65va_dhcp_on[0], 8); + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg71va_dhcp_on[0], 8); + + saved_status = TRUE; + } else if (saved_status) { + /* restore previously saved bt params */ + WL_TRACE(("Do new SCO/eSCO coex algo {save &" + "override}\n")); + + regaddr = 50; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg50); + regaddr = 51; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg51); + regaddr = 64; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg64); + regaddr = 65; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg65); + regaddr = 71; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg71); + + WL_TRACE(("restore bt_params[50,51,64,65,71]:" + "0x%x 0x%x 0x%x 0x%x 0x%x\n", + saved_reg50, saved_reg51, saved_reg64, + saved_reg65, saved_reg71)); + + saved_status = FALSE; + } else { + WL_ERR((":%s att to restore not saved BTCOEX params\n", + __FUNCTION__)); + return -1; + } + return 0; +} +#endif /* BT_DHCP_eSCO_FIX */ + +static void +wl_cfg80211_bt_setflag(struct net_device *dev, bool set) +{ +#if defined(BT_DHCP_USE_FLAGS) + char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 }; + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; +#endif + + +#if defined(BT_DHCP_eSCO_FIX) + /* set = 1, save & turn on 0 - off & restore prev settings */ + set_btc_esco_params(dev, set); +#endif + +#if defined(BT_DHCP_USE_FLAGS) + WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set)); + if (set == TRUE) + /* Forcing bt_flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_dhcp_on[0], + sizeof(buf_flag7_dhcp_on)); + else + /* Restoring default bt flag7 */ + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], + sizeof(buf_flag7_default)); +#endif +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_cfg80211_bt_timerfunc(struct timer_list *t) +#else +static void wl_cfg80211_bt_timerfunc(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct btcoex_info *bt_local = from_timer(bt_local, t, timer); +#else + struct btcoex_info *bt_local = (struct btcoex_info *)data; +#endif + WL_TRACE(("Enter\n")); + bt_local->timer_on = 0; + schedule_work(&bt_local->work); +} + +static void wl_cfg80211_bt_handler(struct work_struct *work) +{ + struct btcoex_info *btcx_inf; + + btcx_inf = container_of(work, struct btcoex_info, work); + + if (btcx_inf->timer_on) { + btcx_inf->timer_on = 0; + del_timer_sync(&btcx_inf->timer); + } + + switch (btcx_inf->bt_state) { + case BT_DHCP_START: + /* DHCP started + * provide OPPORTUNITY window to get DHCP address + */ + WL_TRACE(("bt_dhcp stm: started \n")); + + btcx_inf->bt_state = BT_DHCP_OPPR_WIN; + mod_timer(&btcx_inf->timer, + jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME)); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_OPPR_WIN: + if (btcx_inf->dhcp_done) { + WL_TRACE(("DHCP Done before T1 expiration\n")); + goto btc_coex_idle; + } + + /* DHCP is not over yet, start lowering BT priority + * enforce btc_params + flags if necessary + */ + WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE); + btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&btcx_inf->timer, + jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME)); + btcx_inf->timer_on = 1; + break; + + case BT_DHCP_FLAG_FORCE_TIMEOUT: + if (btcx_inf->dhcp_done) { + WL_TRACE(("DHCP Done before T2 expiration\n")); + } else { + /* Noo dhcp during T1+T2, restore BT priority */ + WL_TRACE(("DHCP wait interval T2:%d msec expired\n", + BT_DHCP_FLAG_FORCE_TIME)); + } + + /* Restoring default bt priority */ + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); +btc_coex_idle: + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + + default: + WL_ERR(("error g_status=%d !!!\n", btcx_inf->bt_state)); + if (btcx_inf->dev) + wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE); + btcx_inf->bt_state = BT_DHCP_IDLE; + btcx_inf->timer_on = 0; + break; + } + + net_os_wake_unlock(btcx_inf->dev); +} + +void* wl_cfg80211_btcoex_init(struct net_device *ndev) +{ + struct btcoex_info *btco_inf = NULL; + + btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL); + if (!btco_inf) + return NULL; + + btco_inf->bt_state = BT_DHCP_IDLE; + btco_inf->ts_dhcp_start = 0; + btco_inf->ts_dhcp_ok = 0; + /* Set up timer for BT */ + btco_inf->timer_ms = 10; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&btco_inf->timer, wl_cfg80211_bt_timerfunc, btco_inf->timer_ms); +#else + init_timer(&btco_inf->timer); + btco_inf->timer.data = (ulong)btco_inf; + btco_inf->timer.function = wl_cfg80211_bt_timerfunc; +#endif + + btco_inf->dev = ndev; + + INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler); + + btcoex_info_loc = btco_inf; + return btco_inf; +} + +void wl_cfg80211_btcoex_deinit() +{ + if (!btcoex_info_loc) + return; + + if (btcoex_info_loc->timer_on) { + btcoex_info_loc->timer_on = 0; + del_timer_sync(&btcoex_info_loc->timer); + } + + cancel_work_sync(&btcoex_info_loc->work); + + kfree(btcoex_info_loc); +} + +int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command) +{ + + struct btcoex_info *btco_inf = btcoex_info_loc; + char powermode_val = 0; + char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 }; + char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 }; + char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 }; + + uint32 regaddr; + static uint32 saved_reg66; + static uint32 saved_reg41; + static uint32 saved_reg68; + static bool saved_status = FALSE; + + char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00}; + + /* Figure out powermode 1 or o command */ + strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1); + + if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) { + WL_TRACE_HW4(("DHCP session starts\n")); + + +#ifdef PKT_FILTER_SUPPORT + dhd->dhcp_in_progress = 1; + + if (dhd->early_suspended) { + WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n")); + dhd_enable_packet_filter(0, dhd); + } +#endif + + /* Retrieve and saved orig regs value */ + if ((saved_status == FALSE) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) && + (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) { + saved_status = TRUE; + WL_TRACE(("Saved 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + + /* Disable PM mode during dhpc session */ + + /* Disable PM mode during dhpc session */ + /* Start BT timer only for SCO connection */ + if (btcoex_is_sco_active(dev)) { + /* btc_params 66 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg66va_dhcp_on[0], + sizeof(buf_reg66va_dhcp_on)); + /* btc_params 41 0x33 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg41va_dhcp_on[0], + sizeof(buf_reg41va_dhcp_on)); + /* btc_params 68 0x190 */ + dev_wlc_bufvar_set(dev, "btc_params", + (char *)&buf_reg68va_dhcp_on[0], + sizeof(buf_reg68va_dhcp_on)); + saved_status = TRUE; + + btco_inf->bt_state = BT_DHCP_START; + btco_inf->timer_on = 1; + mod_timer(&btco_inf->timer, btco_inf->timer.expires); + WL_TRACE(("enable BT DHCP Timer\n")); + } + } + else if (saved_status == TRUE) { + WL_ERR(("was called w/o DHCP OFF. Continue\n")); + } + } + else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) { + + + +#ifdef PKT_FILTER_SUPPORT + dhd->dhcp_in_progress = 0; + WL_TRACE_HW4(("DHCP is complete \n")); + + /* Enable packet filtering */ + if (dhd->early_suspended) { + WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n")); + dhd_enable_packet_filter(1, dhd); + } +#endif /* PKT_FILTER_SUPPORT */ + + /* Restoring PM mode */ + + /* Stop any bt timer because DHCP session is done */ + WL_TRACE(("disable BT DHCP Timer\n")); + if (btco_inf->timer_on) { + btco_inf->timer_on = 0; + del_timer_sync(&btco_inf->timer); + + if (btco_inf->bt_state != BT_DHCP_IDLE) { + /* need to restore original btc flags & extra btc params */ + WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state)); + /* wake up btcoex thread to restore btlags+params */ + schedule_work(&btco_inf->work); + } + } + + /* Restoring btc_flag paramter anyway */ + if (saved_status == TRUE) + dev_wlc_bufvar_set(dev, "btc_flags", + (char *)&buf_flag7_default[0], sizeof(buf_flag7_default)); + + /* Restore original values */ + if (saved_status == TRUE) { + regaddr = 66; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg66); + regaddr = 41; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg41); + regaddr = 68; + dev_wlc_intvar_set_reg(dev, "btc_params", + (char *)®addr, (char *)&saved_reg68); + + WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n", + saved_reg66, saved_reg41, saved_reg68)); + } + saved_status = FALSE; + + } + else { + WL_ERR(("Unkwown yet power setting, ignored\n")); + } + + snprintf(command, 3, "OK"); + + return (strlen("OK")); +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c new file mode 100644 index 000000000000..35901ab1b835 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c @@ -0,0 +1,2591 @@ +/* + * Linux cfgp2p driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgp2p.c 604795 2015-12-08 13:45:42Z $ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static s8 scanparambuf[WLC_IOCTL_SMLEN]; +static bool +wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type); + +static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct wireless_dev *wdev, bool notify); + +#if defined(WL_ENABLE_P2P_IF) +static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev); +static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd); +static int wl_cfgp2p_if_open(struct net_device *net); +static int wl_cfgp2p_if_stop(struct net_device *net); + +static const struct net_device_ops wl_cfgp2p_if_ops = { + .ndo_open = wl_cfgp2p_if_open, + .ndo_stop = wl_cfgp2p_if_stop, + .ndo_do_ioctl = wl_cfgp2p_do_ioctl, + .ndo_start_xmit = wl_cfgp2p_start_xmit, +}; +#endif /* WL_ENABLE_P2P_IF */ + + +bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + + if (frame == NULL) + return false; + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + if (frame_len < sizeof(wifi_p2p_pub_act_frame_t) -1) + return false; + + if (pact_frm->category == P2P_PUB_AF_CATEGORY && + pact_frm->action == P2P_PUB_AF_ACTION && + pact_frm->oui_type == P2P_VER && + memcmp(pact_frm->oui, P2P_OUI, sizeof(pact_frm->oui)) == 0) { + return true; + } + + return false; +} + +bool wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len) +{ + wifi_p2p_action_frame_t *act_frm; + + if (frame == NULL) + return false; + act_frm = (wifi_p2p_action_frame_t *)frame; + if (frame_len < sizeof(wifi_p2p_action_frame_t) -1) + return false; + + if (act_frm->category == P2P_AF_CATEGORY && + act_frm->type == P2P_VER && + memcmp(act_frm->OUI, P2P_OUI, DOT11_OUI_LEN) == 0) { + return true; + } + + return false; +} + +#define GAS_RESP_LEN 2 +#define DOUBLE_TLV_BODY_OFF 4 +#define GAS_RESP_OFFSET 4 +#define GAS_CRESP_OFFSET 5 + +bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len) +{ + bcm_tlv_t *ie = (bcm_tlv_t *)data; + u8 *frame = NULL; + u16 id, flen; + + /* Skipped first ANQP Element, if frame has anqp elemnt */ + ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID); + + if (ie == NULL) + return false; + + frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN; + id = ((u16) (((frame)[1] << 8) | (frame)[0])); + flen = ((u16) (((frame)[3] << 8) | (frame)[2])); + + /* If the contents match the OUI and the type */ + if (flen >= WFA_OUI_LEN + 1 && + id == P2PSD_GAS_NQP_INFOID && + !bcmp(&frame[DOUBLE_TLV_BODY_OFF], (const uint8*)WFA_OUI, WFA_OUI_LEN) && + subtype == frame[DOUBLE_TLV_BODY_OFF+WFA_OUI_LEN]) { + return true; + } + + return false; +} + +bool wl_cfgp2p_is_gas_action(void *frame, u32 frame_len) +{ + + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)) + return false; + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + +#ifdef WL11U + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET, + frame_len); + + else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET, + frame_len); + else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ) + return true; + else + return false; +#else + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return true; + else + return false; +#endif /* WL11U */ +} + +bool wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len) +{ + + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + + if (frame == NULL) + return false; + + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1)) + return false; + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ) + return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE, + (u8 *)sd_act_frm->query_data, + frame_len); + else + return false; +} + +void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + wifi_p2p_action_frame_t *act_frm; + wifi_p2psd_gas_pub_act_frame_t *sd_act_frm; + if (!frame || frame_len <= 2) + return; + + if (wl_cfgp2p_is_pub_action(frame, frame_len)) { + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + switch (pact_frm->subtype) { + case P2P_PAF_GON_REQ: + CFGP2P_ACTION(("%s P2P Group Owner Negotiation Req Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_GON_RSP: + CFGP2P_ACTION(("%s P2P Group Owner Negotiation Rsp Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_GON_CONF: + CFGP2P_ACTION(("%s P2P Group Owner Negotiation Confirm Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_INVITE_REQ: + CFGP2P_ACTION(("%s P2P Invitation Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_INVITE_RSP: + CFGP2P_ACTION(("%s P2P Invitation Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_DEVDIS_REQ: + CFGP2P_ACTION(("%s P2P Device Discoverability Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_DEVDIS_RSP: + CFGP2P_ACTION(("%s P2P Device Discoverability Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_PROVDIS_REQ: + CFGP2P_ACTION(("%s P2P Provision Discovery Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_PAF_PROVDIS_RSP: + CFGP2P_ACTION(("%s P2P Provision Discovery Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + default: + CFGP2P_ACTION(("%s Unknown Public Action Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + + } + + } else if (wl_cfgp2p_is_p2p_action(frame, frame_len)) { + act_frm = (wifi_p2p_action_frame_t *)frame; + switch (act_frm->subtype) { + case P2P_AF_NOTICE_OF_ABSENCE: + CFGP2P_ACTION(("%s P2P Notice of Absence Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_AF_PRESENCE_REQ: + CFGP2P_ACTION(("%s P2P Presence Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_AF_PRESENCE_RSP: + CFGP2P_ACTION(("%s P2P Presence Response Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + case P2P_AF_GO_DISC_REQ: + CFGP2P_ACTION(("%s P2P Discoverability Request Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + break; + default: + CFGP2P_ACTION(("%s Unknown P2P Action Frame," + " channel=%d\n", (tx)? "TX": "RX", channel)); + } + + } else if (wl_cfgp2p_is_gas_action(frame, frame_len)) { + sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame; + switch (sd_act_frm->action) { + case P2PSD_ACTION_ID_GAS_IREQ: + CFGP2P_ACTION(("%s GAS Initial Request," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + case P2PSD_ACTION_ID_GAS_IRESP: + CFGP2P_ACTION(("%s GAS Initial Response," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + case P2PSD_ACTION_ID_GAS_CREQ: + CFGP2P_ACTION(("%s GAS Comback Request," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + case P2PSD_ACTION_ID_GAS_CRESP: + CFGP2P_ACTION(("%s GAS Comback Response," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + break; + default: + CFGP2P_ACTION(("%s Unknown GAS Frame," + " channel=%d\n", (tx)? "TX" : "RX", channel)); + } + + + } +} + +/* + * Initialize variables related to P2P + * + */ +s32 +wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg) +{ + if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) { + CFGP2P_ERR(("struct p2p_info allocation failed\n")); + return -ENOMEM; + } + + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) = -1; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2) = NULL; + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) = -1; + return BCME_OK; + +} +/* + * Deinitialize variables related to P2P + * + */ +void +wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg) +{ + CFGP2P_ERR(("In\n")); + if (cfg->p2p) { + kfree(cfg->p2p); + cfg->p2p = NULL; + } + cfg->p2p_supported = 0; +} +/* + * Set P2P functions into firmware + */ +s32 +wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } }; + s32 ret = BCME_OK; + s32 val = 0; + /* Do we have to check whether APSTA is enabled or not ? */ + ret = wldev_iovar_getint(ndev, "apsta", &val); + if (ret < 0) { + CFGP2P_ERR(("get apsta error %d\n", ret)); + return ret; + } + if (val == 0) { + val = 1; + ret = wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true); + if (ret < 0) { + CFGP2P_ERR(("WLC_DOWN error %d\n", ret)); + return ret; + } + + ret = wldev_iovar_setint(ndev, "apsta", val); + if (ret < 0) { + /* return error and fail the initialization */ + CFGP2P_ERR(("wl apsta %d set error. ret: %d\n", val, ret)); + return ret; + } + + ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true); + if (ret < 0) { + CFGP2P_ERR(("WLC_UP error %d\n", ret)); + return ret; + } + } + + /* In case of COB type, firmware has default mac address + * After Initializing firmware, we have to set current mac address to + * firmware for P2P device address + */ + ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr, + sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync); + if (ret && ret != BCME_UNSUPPORTED) { + CFGP2P_ERR(("failed to update device address ret %d\n", ret)); + } + return ret; +} + +int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg) +{ + if (!cfg->p2p) { + CFGP2P_DBG(("p2p not enabled! \n")); + return false; + } + + if ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) && + (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1)) + return true; + else + return false; +} + +/* Create a new P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to create + * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT + * @chspec : chspec to use if creating a GO BSS. + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec) +{ + wl_p2p_if_t ifreq; + s32 err; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + + ifreq.type = if_type; + ifreq.chspec = chspec; + memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); + + CFGP2P_DBG(("---cfg p2p_ifadd "MACDBG" %s %u\n", + MAC2STRDBG(ifreq.addr.octet), + (if_type == WL_P2P_IF_GO) ? "go" : "client", + (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT)); + + err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err < 0)) { + printk("'cfg p2p_ifadd' error %d\n", err); + return err; + } + + return err; +} + +/* Disable a P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to disable + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + s32 ret; + struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n", + netdev->ifindex, MAC2STRDBG(mac->octet))); + ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(ret < 0)) { + printk("'cfg p2p_ifdis' error %d\n", ret); + } + return ret; +} + +/* Delete a P2P BSS. + * Parameters: + * @mac : MAC address of the BSS to delete + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac) +{ + s32 ret; + struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n", + netdev->ifindex, MAC2STRDBG(mac->octet))); + ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(ret < 0)) { + printk("'cfg p2p_ifdel' error %d\n", ret); + } + return ret; +} + +/* Change a P2P Role. + * Parameters: + * @mac : MAC address of the BSS to change a role + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec, s32 conn_idx) +{ + wl_p2p_if_t ifreq; + s32 err; + + struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, conn_idx); + + ifreq.type = if_type; + ifreq.chspec = chspec; + memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); + + CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u" + " chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet), + (if_type == WL_P2P_IF_GO) ? "go" : "client", + (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT, + ifreq.chspec)); + + err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err < 0)) { + printk("'cfg p2p_ifupd' error %d\n", err); + } else if (if_type == WL_P2P_IF_GO) { + cfg->p2p->p2p_go_count++; + } + return err; +} + + +/* Get the index of a created P2P BSS. + * Parameters: + * @mac : MAC address of the created BSS + * @index : output: index of created BSS + * Returns 0 if success. + */ +s32 +wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index) +{ + s32 ret; + u8 getbuf[64]; + struct net_device *dev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet))); + + ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf, + sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL); + + if (ret == 0) { + memcpy(index, getbuf, sizeof(s32)); + CFGP2P_INFO(("---cfg p2p_if ==> %d\n", *index)); + } + + return ret; +} + +static s32 +wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on) +{ + s32 ret = BCME_OK; + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + CFGP2P_DBG(("enter\n")); + + ret = wldev_iovar_setint(ndev, "p2p_disc", on); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret)); + } + + return ret; +} + +/* Set the WL driver's P2P mode. + * Parameters : + * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}. + * @channel : the channel to listen + * @listen_ms : the time (milli seconds) to wait + * @bssidx : bss index for BSSCFG + * Returns 0 if success + */ + +s32 +wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx) +{ + wl_p2p_disc_st_t discovery_mode; + s32 ret; + struct net_device *dev; + CFGP2P_DBG(("enter\n")); + + if (unlikely(bssidx == WL_INVALID)) { + CFGP2P_ERR((" %d index out of range\n", bssidx)); + return -1; + } + + dev = wl_cfgp2p_find_ndev(cfg, bssidx); + if (unlikely(dev == NULL)) { + CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx)); + return BCME_NOTFOUND; + } + +#ifdef P2PLISTEN_AP_SAMECHN + CFGP2P_DBG(("p2p0 listen channel %d AP connection chan %d \n", + channel, cfg->channel)); + if ((mode == WL_P2P_DISC_ST_LISTEN) && (cfg->channel == channel)) { + struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg); + + if (cfg->p2p_resp_apchn_status) { + CFGP2P_DBG(("p2p_resp_apchn_status already ON \n")); + return BCME_OK; + } + + if (wl_get_drv_status(cfg, CONNECTED, primary_ndev)) { + ret = wl_cfg80211_set_p2p_resp_ap_chn(primary_ndev, 1); + cfg->p2p_resp_apchn_status = true; + CFGP2P_DBG(("p2p_resp_apchn_status ON \n")); + return ret; + } + } +#endif /* P2PLISTEN_AP_SAMECHN */ + + /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */ + discovery_mode.state = mode; + discovery_mode.chspec = wl_ch_host_to_driver(bssidx, channel); + discovery_mode.dwell = listen_ms; + ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode, + sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, + bssidx, &cfg->ioctl_buf_sync); + + return ret; +} + +/* Get the index of the P2P Discovery BSS */ +static s32 +wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index) +{ + s32 ret; + struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + + ret = wldev_iovar_getint(dev, "p2p_dev", index); + CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret)); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("'p2p_dev' error %d\n", ret)); + return ret; + } + return ret; +} + +int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg) +{ + int i; + s32 connected_cnt; + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); + if (!dhd) + return (-ENODEV); + for (i = P2PAPI_BSSCFG_CONNECTION1; i < P2PAPI_BSSCFG_MAX; i++) { + if (wl_to_p2p_bss_bssidx(cfg, i) == -1) { + if (i == P2PAPI_BSSCFG_CONNECTION2) { + if (!(dhd->op_mode & DHD_FLAG_MP2P_MODE)) { + CFGP2P_ERR(("Multi p2p not supported")); + return BCME_ERROR; + } + if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) { + CFGP2P_ERR(("Failed to create second p2p interface" + "Already one connection exists")); + return BCME_ERROR; + } + } + return i; + } + } + return BCME_ERROR; +} + +s32 +wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg) +{ + + s32 bssidx = 0; + s32 ret = BCME_OK; + + CFGP2P_DBG(("enter\n")); + + if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) { + CFGP2P_ERR(("do nothing, already initialized\n")); + return ret; + } + + ret = wl_cfgp2p_set_discovery(cfg, 1); + if (ret < 0) { + CFGP2P_ERR(("set discover error\n")); + return ret; + } + /* Enable P2P Discovery in the WL Driver */ + ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx); + + if (ret < 0) { + return ret; + } + /* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */ + if (!cfg->p2p_wdev) { + CFGP2P_ERR(("p2p_wdev is NULL.\n")); + return BCME_NODEVICE; + } + /* Make an entry in the netinfo */ + wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx); + + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx; + + /* Set the initial discovery state to SCAN */ + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + + if (unlikely(ret != 0)) { + CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); + wl_cfgp2p_set_discovery(cfg, 0); + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; + return 0; + } + return ret; +} + +/* Deinitialize P2P Discovery + * Parameters : + * @cfg : wl_private data + * Returns 0 if succes + */ +static s32 +wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg) +{ + s32 ret = BCME_OK; + s32 bssidx; + + CFGP2P_DBG(("enter\n")); + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (bssidx <= 0) { + CFGP2P_ERR(("do nothing, not initialized\n")); + return -1; + } + + /* Clear our saved WPS and P2P IEs for the discovery BSS */ + wl_cfg80211_clear_per_bss_ies(cfg, bssidx); + + /* Set the discovery state to SCAN */ + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + bssidx); + /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */ + ret = wl_cfgp2p_set_discovery(cfg, 0); + + /* Remove the p2p disc entry in the netinfo */ +#ifdef DHD_IFDEBUG + WL_ERR(("dealloc_net_info by wdev=%p\n", cfg->p2p_wdev)); +#endif + wl_dealloc_netinfo_by_wdev(cfg, cfg->p2p_wdev); + + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID; + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL; + + return ret; + +} +/* Enable P2P Discovery + * Parameters: + * @cfg : wl_private data + * @ie : probe request ie (WPS IE + P2P IE) + * @ie_len : probe request ie length + * Returns 0 if success. + */ +s32 +wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, + const u8 *ie, u32 ie_len) +{ + s32 ret = BCME_OK; + s32 bssidx; + + CFGP2P_DBG(("enter\n")); + if (wl_get_p2p_status(cfg, DISCOVERY_ON)) { + CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n")); + goto set_ie; + } + + ret = wl_cfgp2p_init_discovery(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR((" init discovery error %d\n", ret)); + goto exit; + } + + wl_set_p2p_status(cfg, DISCOVERY_ON); + /* Set wsec to any non-zero value in the discovery bsscfg to ensure our + * P2P probe responses have the privacy bit set in the 802.11 WPA IE. + * Some peer devices may not initiate WPS with us if this bit is not set. + */ + ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), + "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + if (unlikely(ret < 0)) { + CFGP2P_ERR((" wsec error %d\n", ret)); + } +set_ie: + if (ie_len) { + + if (bcmcfg_to_prmry_ndev(cfg) == dev) { + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + } else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) { + WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev)); + return BCME_ERROR; + } + + ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), + bssidx, + VNDR_IE_PRBREQ_FLAG, ie, ie_len); + + if (unlikely(ret < 0)) { + CFGP2P_ERR(("set probreq ie occurs error %d\n", ret)); + goto exit; + } + } +exit: + return ret; +} + +/* Disable P2P Discovery + * Parameters: + * @cfg : wl_private_data + * Returns 0 if success. + */ +s32 +wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg) +{ + s32 ret = BCME_OK; + s32 bssidx; + + CFGP2P_DBG((" enter\n")); + wl_clr_p2p_status(cfg, DISCOVERY_ON); + +#ifdef DHD_IFDEBUG + WL_ERR(("%s: (cfg)->p2p->bss[type].bssidx: %d\n", + __FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx)); +#endif + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (bssidx <= 0) { + CFGP2P_ERR((" do nothing, not initialized\n")); + return 0; + } + + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + bssidx); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); + } + /* Do a scan abort to stop the driver's scan engine in case it is still + * waiting out an action frame tx dwell time. + */ + wl_clr_p2p_status(cfg, DISCOVERY_ON); + ret = wl_cfgp2p_deinit_discovery(cfg); + + return ret; +} + +s32 +wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, + u32 num_chans, u16 *channels, + s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr, + p2p_scan_purpose_t p2p_scan_purpose) +{ + s32 ret = BCME_OK; + s32 memsize; + s32 eparams_size; + u32 i; + s8 *memblk; + wl_p2p_scan_t *p2p_params; + wl_escan_params_t *eparams; + wlc_ssid_t ssid; + /* Scan parameters */ +#define P2PAPI_SCAN_NPROBES 1 +#define P2PAPI_SCAN_DWELL_TIME_MS 80 +#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40 +#define P2PAPI_SCAN_HOME_TIME_MS 60 +#define P2PAPI_SCAN_NPROBS_TIME_MS 30 +#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100 + + struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY); + /* Allocate scan params which need space for 3 channels and 0 ssids */ + eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE + + OFFSETOF(wl_escan_params_t, params)) + + num_chans * sizeof(eparams->params.channel_list[0]); + + memsize = sizeof(wl_p2p_scan_t) + eparams_size; + memblk = scanparambuf; + if (memsize > sizeof(scanparambuf)) { + CFGP2P_ERR((" scanpar buf too small (%u > %zu)\n", + memsize, sizeof(scanparambuf))); + return -1; + } + memset(memblk, 0, memsize); + memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN); + if (search_state == WL_P2P_DISC_ST_SEARCH) { + /* + * If we in SEARCH STATE, we don't need to set SSID explictly + * because dongle use P2P WILDCARD internally by default + */ + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx); + /* use null ssid */ + ssid.SSID_len = 0; + memset(&ssid.SSID, 0, sizeof(ssid.SSID)); + } else if (search_state == WL_P2P_DISC_ST_SCAN) { + /* SCAN STATE 802.11 SCAN + * WFD Supplicant has p2p_find command with (type=progressive, type= full) + * So if P2P_find command with type=progressive, + * we have to set ssid to P2P WILDCARD because + * we just do broadcast scan unless setting SSID + */ + wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx); + /* use wild card ssid */ + ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN; + memset(&ssid.SSID, 0, sizeof(ssid.SSID)); + memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN); + } else { + CFGP2P_ERR((" invalid search state %d\n", search_state)); + return -1; + } + + + /* Fill in the P2P scan structure at the start of the iovar param block */ + p2p_params = (wl_p2p_scan_t*) memblk; + p2p_params->type = 'E'; + /* Fill in the Scan structure that follows the P2P scan structure */ + eparams = (wl_escan_params_t*) (p2p_params + 1); + eparams->params.bss_type = DOT11_BSSTYPE_ANY; + if (active) + eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE; + else + eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE; + + if (tx_dst_addr == NULL) + memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN); + else + memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN); + + if (ssid.SSID_len) + memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t)); + + eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS); + + switch (p2p_scan_purpose) { + case P2P_SCAN_SOCIAL_CHANNEL: + eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS); + break; + case P2P_SCAN_AFX_PEER_NORMAL: + case P2P_SCAN_AFX_PEER_REDUCED: + eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS); + break; + case P2P_SCAN_CONNECT_TRY: + eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS); + break; + default : + if (wl_get_drv_status_all(cfg, CONNECTED)) + eparams->params.active_time = -1; + else + eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS); + break; + } + + if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY) + eparams->params.nprobes = htod32(eparams->params.active_time / + WL_SCAN_JOIN_PROBE_INTERVAL_MS); + else + eparams->params.nprobes = htod32((eparams->params.active_time / + P2PAPI_SCAN_NPROBS_TIME_MS)); + + + if (eparams->params.nprobes <= 0) + eparams->params.nprobes = 1; + CFGP2P_DBG(("nprobes # %d, active_time %d\n", + eparams->params.nprobes, eparams->params.active_time)); + eparams->params.passive_time = htod32(-1); + eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | + (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); + + for (i = 0; i < num_chans; i++) { + eparams->params.channel_list[i] = wl_ch_host_to_driver(bssidx, channels[i]); + } + eparams->version = htod32(ESCAN_REQ_VERSION); + eparams->action = htod16(action); + wl_escan_set_sync_id(eparams->sync_id, cfg); + wl_escan_set_type(cfg, WL_SCANTYPE_P2P); + CFGP2P_INFO(("SCAN CHANNELS : ")); + + for (i = 0; i < num_chans; i++) { + if (i == 0) CFGP2P_INFO(("%d", channels[i])); + else CFGP2P_INFO((",%d", channels[i])); + } + + CFGP2P_INFO(("\n")); + + ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan", + memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + WL_SCAN(("P2P_SEARCH sync ID: %d, bssidx: %d\n", eparams->sync_id, bssidx)); + if (ret == BCME_OK) + wl_set_p2p_status(cfg, SCANNING); + return ret; +} + +/* search function to reach at common channel to send action frame + * Parameters: + * @cfg : wl_private data + * @ndev : net device for bssidx + * @bssidx : bssidx for BSS + * Returns 0 if success. + */ +s32 +wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, + s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr) +{ + s32 ret = 0; + u32 chan_cnt = 0; + u16 *default_chan_list = NULL; + p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL; + if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID) + return -EINVAL; + WL_TRACE_HW4((" Enter\n")); + if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY)) + bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); + if (channel) + chan_cnt = AF_PEER_SEARCH_CNT; + else + chan_cnt = SOCIAL_CHAN_CNT; + + if (cfg->afx_hdl->pending_tx_act_frm && cfg->afx_hdl->is_active) { + wl_action_frame_t *action_frame; + action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame); + if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) { + chan_cnt = 1; + p2p_scan_purpose = P2P_SCAN_AFX_PEER_REDUCED; + } + } + + default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL); + if (default_chan_list == NULL) { + CFGP2P_ERR(("channel list allocation failed \n")); + ret = -ENOMEM; + goto exit; + } + if (channel) { + u32 i; + /* insert same channel to the chan_list */ + for (i = 0; i < chan_cnt; i++) { + default_chan_list[i] = channel; + } + } else { + default_chan_list[0] = SOCIAL_CHAN_1; + default_chan_list[1] = SOCIAL_CHAN_2; + default_chan_list[2] = SOCIAL_CHAN_3; + } + ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt, + default_chan_list, WL_P2P_DISC_ST_SEARCH, + WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose); + kfree(default_chan_list); +exit: + return ret; +} + +/* Check whether pointed-to IE looks like WPA. */ +#define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE) +/* Check whether pointed-to IE looks like WPS. */ +#define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE) +/* Check whether the given IE looks like WFA P2P IE. */ +#define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P) +/* Check whether the given IE looks like WFA WFDisplay IE. */ +#ifndef WFA_OUI_TYPE_WFD +#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */ +#endif +#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD) + + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +static bool +wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return TRUE; + } + + if (tlvs == NULL) + return FALSE; + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return FALSE; +} + +wpa_ie_fixed_t * +wl_cfgp2p_find_wpaie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) { + return (wpa_ie_fixed_t *)ie; + } + } + return NULL; +} + +wpa_ie_fixed_t * +wl_cfgp2p_find_wpsie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) { + return (wpa_ie_fixed_t *)ie; + } + } + return NULL; +} + +wifi_p2p_ie_t * +wl_cfgp2p_find_p2pie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) { + return (wifi_p2p_ie_t *)ie; + } + } + return NULL; +} + +wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) { + return (wifi_wfd_ie_t *)ie; + } + } + return NULL; +} +u32 +wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd) +{ + vndr_ie_setbuf_t hdr; /* aligned temporary vndr_ie buffer header */ + s32 iecount; + u32 data_offset; + + /* Validate the pktflag parameter */ + if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | + VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | + VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) { + CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag)); + return -1; + } + + /* Copy the vndr_ie SET command ("add"/"del") to the buffer */ + strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1); + hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0'; + + /* Set the IE count - the buffer contains only 1 IE */ + iecount = htod32(1); + memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32)); + + /* Copy packet flags that indicate which packets will contain this IE */ + pktflag = htod32(pktflag); + memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, + sizeof(u32)); + + /* Add the IE ID to the buffer */ + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id; + + /* Add the IE length to the buffer */ + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = + (uint8) VNDR_IE_MIN_LEN + datalen; + + /* Add the IE OUI to the buffer */ + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[0] = oui[0]; + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[1] = oui[1]; + hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui[2] = oui[2]; + + /* Copy the aligned temporary vndr_ie buffer header to the IE buffer */ + memcpy(iebuf, &hdr, sizeof(hdr) - 1); + + /* Copy the IE data to the IE buffer */ + data_offset = + (u8*)&hdr.vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data[0] - + (u8*)&hdr; + memcpy(iebuf + data_offset, data, datalen); + return data_offset + datalen; + +} + +struct net_device * +wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx) +{ + u32 i; + struct net_device *ndev = NULL; + if (bssidx < 0) { + CFGP2P_ERR((" bsscfg idx is invalid\n")); + goto exit; + } + + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) { + ndev = wl_to_p2p_bss_ndev(cfg, i); + break; + } + } + +exit: + return ndev; +} +/* + * Search the driver array idx based on bssidx argument + * Parameters: Note that this idx is applicable only + * for primary and P2P interfaces. The virtual AP/STA is not + * covered here. + * @cfg : wl_private data + * @bssidx : bssidx which indicate bsscfg->idx of firmware. + * @type : output arg to store array idx of p2p->bss. + * Returns error + */ + +s32 +wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type) +{ + u32 i; + if (bssidx < 0 || type == NULL) { + CFGP2P_ERR((" argument is invalid\n")); + goto exit; + } + + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) { + *type = i; + return BCME_OK; + } + } + +exit: + return BCME_BADARG; +} + +/* + * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE + */ +s32 +wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 ret = BCME_OK; + struct net_device *ndev = NULL; + + if (!cfg || !cfg->p2p || !cfgdev) + return BCME_ERROR; + + CFGP2P_DBG((" Enter\n")); + +#ifdef DHD_IFDEBUG + WL_ERR(("%s: cfg: %p, cfgdev: %p, cfg->wdev: %p, cfg->p2p_wdev: %p\n", + __FUNCTION__, cfg, cfgdev, cfg->wdev, cfg->p2p_wdev)); +#endif + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + +#ifdef P2P_LISTEN_OFFLOADING + if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) { + wl_clr_p2p_status(cfg, DISC_IN_PROGRESS); + CFGP2P_ERR(("DISC_IN_PROGRESS cleared\n")); + if (ndev && (ndev->ieee80211_ptr != NULL)) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) { + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); + } else { + CFGP2P_ERR(("Invalid cfgdev. Dropping the" + "remain_on_channel_expired event.\n")); + } +#else + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + } +#endif /* P2P_LISTEN_OFFLOADING */ + + if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) { + wl_set_p2p_status(cfg, LISTEN_EXPIRED); + if (timer_pending(&cfg->p2p->listen_timer)) { + del_timer_sync(&cfg->p2p->listen_timer); + } + + if (cfg->afx_hdl->is_listen == TRUE && + wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) { + WL_DBG(("Listen DONE for action frame\n")); + complete(&cfg->act_frm_scan); + } +#ifdef WL_CFG80211_SYNC_GON + else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) { + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev); + WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n", + jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies))); + + if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) + wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev); + + complete(&cfg->wait_next_af); + } +#endif /* WL_CFG80211_SYNC_GON */ + +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL)) { +#else + if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) || + wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL)) { +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + WL_DBG(("Listen DONE for remain on channel expired\n")); + wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev); +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + if (ndev && (ndev->ieee80211_ptr != NULL)) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) { + /* + * To prevent kernel panic, + * if cfgdev->wiphy may be invalid, adding explicit check + */ + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); + } else { + CFGP2P_ERR(("Invalid cfgdev. Dropping the" + "remain_on_channel_expired event.\n")); + } +#else + cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + } + if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg), + WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) { + CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n")); + } + } else + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); + + return ret; + +} + +/* + * Timer expire callback function for LISTEN + * We can't report cfg80211_remain_on_channel_expired from Timer ISR context, + * so lets do it from thread context. + */ +void +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +wl_cfgp2p_listen_expired(struct timer_list *t) +#else +wl_cfgp2p_listen_expired(unsigned long data) +#endif +{ + wl_event_msg_t msg; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + struct p2p_info *ip2p; + struct bcm_cfg80211 *cfg; + + ip2p = from_timer(ip2p, t, listen_timer); + cfg = ip2p->bcm_cfg; +#else + struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data; +#endif + CFGP2P_DBG((" Enter\n")); + bzero(&msg, sizeof(wl_event_msg_t)); + msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE); + msg.bsscfgidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE); +#if defined(WL_ENABLE_P2P_IF) + wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net : + wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL); +#else + wl_cfg80211_event(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, + NULL); +#endif /* WL_ENABLE_P2P_IF */ +} +/* + * Routine for cancelling the P2P LISTEN + */ +static s32 +wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev, + struct wireless_dev *wdev, bool notify) +{ + WL_DBG(("Enter \n")); + /* Irrespective of whether timer is running or not, reset + * the LISTEN state. + */ + if (timer_pending(&cfg->p2p->listen_timer)) { + del_timer_sync(&cfg->p2p->listen_timer); + if (notify) { +#if defined(WL_CFG80211_P2P_DEV_IF) + if (wdev) + cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id, + &cfg->remain_on_chan, GFP_KERNEL); +#else + if (ndev && ndev->ieee80211_ptr) + cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id, + &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL); +#endif /* WL_CFG80211_P2P_DEV_IF */ + } + } + return 0; +} +/* + * Do a P2P Listen on the given channel for the given duration. + * A listen consists of sitting idle and responding to P2P probe requests + * with a P2P probe response. + * + * This fn assumes dongle p2p device discovery is already enabled. + * Parameters : + * @cfg : wl_private data + * @channel : channel to listen + * @duration_ms : the time (milli seconds) to wait + */ +s32 +wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms) +{ +#define EXTRA_DELAY_TIME 100 + s32 ret = BCME_OK; + struct timer_list *_timer; + s32 extra_delay; + struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg); + + CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms)); + if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) { + + CFGP2P_ERR((" Discovery is not set, so we have noting to do\n")); + + ret = BCME_NOTREADY; + goto exit; + } + if (timer_pending(&cfg->p2p->listen_timer)) { + CFGP2P_DBG(("previous LISTEN is not completed yet\n")); + goto exit; + + } +#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + else + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); +#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) { + CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n")); + } + + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + _timer = &cfg->p2p->listen_timer; + + /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle , + * otherwise we will wait up to duration_ms + 100ms + duration / 10 + */ + if (ret == BCME_OK) { + extra_delay = EXTRA_DELAY_TIME + (duration_ms / 10); + } else { + /* if failed to set listen, it doesn't need to wait whole duration. */ + duration_ms = 100 + duration_ms / 20; + extra_delay = 0; + } + + cfg->p2p->bcm_cfg = cfg; + INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay); +#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST + wl_clr_p2p_status(cfg, LISTEN_EXPIRED); +#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */ + +#undef EXTRA_DELAY_TIME +exit: + return ret; +} + + +s32 +wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable) +{ + s32 ret = BCME_OK; + CFGP2P_DBG((" Enter\n")); + if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) { + + CFGP2P_DBG((" do nothing, discovery is off\n")); + return ret; + } + if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) { + CFGP2P_DBG(("already : %d\n", enable)); + return ret; + } + + wl_chg_p2p_status(cfg, SEARCH_ENABLED); + /* When disabling Search, reset the WL driver's p2p discovery state to + * WL_P2P_DISC_ST_SCAN. + */ + if (!enable) { + wl_clr_p2p_status(cfg, SCANNING); + ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, + wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE)); + } + + return ret; +} + +/* + * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE + */ +s32 +wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data) +{ + s32 ret = BCME_OK; + u32 event_type = ntoh32(e->event_type); + u32 status = ntoh32(e->status); + struct net_device *ndev = NULL; + CFGP2P_DBG((" Enter\n")); + + ndev = cfgdev_to_wlc_ndev(cfgdev, cfg); + + if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) { + if (event_type == WLC_E_ACTION_FRAME_COMPLETE) { + + CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status)); + if (status == WLC_E_STATUS_SUCCESS) { + wl_set_p2p_status(cfg, ACTION_TX_COMPLETED); + CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n")); + if (!cfg->need_wait_afrx && cfg->af_sent_channel) { + CFGP2P_DBG(("no need to wait next AF.\n")); + wl_stop_wait_next_action_frame(cfg, ndev); + } + } + else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) { + wl_set_p2p_status(cfg, ACTION_TX_NOACK); + CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n")); + wl_stop_wait_next_action_frame(cfg, ndev); + } + } else { + CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received," + "status : %d\n", status)); + + if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) + complete(&cfg->send_af_done); + } + } + return ret; +} +/* Send an action frame immediately without doing channel synchronization. + * + * This function does not wait for a completion event before returning. + * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action + * frame is transmitted. + * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an + * 802.11 ack has been received for the sent action frame. + */ +s32 +wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev, + wl_af_params_t *af_params, s32 bssidx) +{ + s32 ret = BCME_OK; + s32 evt_ret = BCME_OK; + s32 timeout = 0; + wl_eventmsg_buf_t buf; + + + CFGP2P_INFO(("\n")); + CFGP2P_INFO(("channel : %u , dwell time : %u\n", + af_params->channel, af_params->dwell_time)); + + wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED); + wl_clr_p2p_status(cfg, ACTION_TX_NOACK); + + bzero(&buf, sizeof(wl_eventmsg_buf_t)); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true); + if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) + return evt_ret; + + cfg->af_sent_channel = af_params->channel; +#ifdef WL_CFG80211_SYNC_GON + cfg->af_tx_sent_jiffies = jiffies; +#endif /* WL_CFG80211_SYNC_GON */ + + ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync); + + if (ret < 0) { + CFGP2P_ERR((" sending action frame is failed\n")); + goto exit; + } + + timeout = wait_for_completion_timeout(&cfg->send_af_done, + msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX)); + + if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) { + CFGP2P_INFO(("tx action frame operation is completed\n")); + ret = BCME_OK; + } else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) { + CFGP2P_INFO(("bcast tx action frame operation is completed\n")); + ret = BCME_OK; + } else { + ret = BCME_ERROR; + CFGP2P_INFO(("tx action frame operation is failed\n")); + } + /* clear status bit for action tx */ + wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED); + wl_clr_p2p_status(cfg, ACTION_TX_NOACK); + +exit: + CFGP2P_INFO((" via act frame iovar : status = %d\n", ret)); + + bzero(&buf, sizeof(wl_eventmsg_buf_t)); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false); + wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false); + if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) { + WL_ERR(("TX frame events revert back failed \n")); + return evt_ret; + } + + return ret; +} + +/* Generate our P2P Device Address and P2P Interface Address from our primary + * MAC address. + */ +void +wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr) +{ + struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE); + struct ether_addr *int_addr; + + memcpy(mac_addr, primary_addr, sizeof(struct ether_addr)); + mac_addr->octet[0] |= 0x02; + WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet))); + + int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1); + memcpy(int_addr, mac_addr, sizeof(struct ether_addr)); + int_addr->octet[4] ^= 0x80; + WL_DBG(("Primary P2P Interface address:"MACDBG "\n", MAC2STRDBG(int_addr->octet))); + + int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION2); + memcpy(int_addr, mac_addr, sizeof(struct ether_addr)); + int_addr->octet[4] ^= 0x90; +} + +/* P2P IF Address change to Virtual Interface MAC Address */ +void +wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id) +{ + wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf; + u16 len = ie->len; + u8 *subel; + u8 subelt_id; + u16 subelt_len; + CFGP2P_DBG((" Enter\n")); + + /* Point subel to the P2P IE's subelt field. + * Subtract the preceding fields (id, len, OUI, oui_type) from the length. + */ + subel = ie->subelts; + len -= 4; /* exclude OUI + OUI_TYPE */ + + while (len >= 3) { + /* attribute id */ + subelt_id = *subel; + subel += 1; + len -= 1; + + /* 2-byte little endian */ + subelt_len = *subel++; + subelt_len |= *subel++ << 8; + + len -= 2; + len -= subelt_len; /* for the remaining subelt fields */ + + if (subelt_id == element_id) { + if (subelt_id == P2P_SEID_INTINTADDR) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_DEV_ID) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Device ID ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_DEV_INFO) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("Device INFO ATTR FOUND\n")); + } else if (subelt_id == P2P_SEID_GROUP_ID) { + memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); + CFGP2P_INFO(("GROUP ID ATTR FOUND\n")); + } return; + } else { + CFGP2P_DBG(("OTHER id : %d\n", subelt_id)); + } + subel += subelt_len; + } +} +/* + * Check if a BSS is up. + * This is a common implementation called by most OSL implementations of + * p2posl_bss_isup(). DO NOT call this function directly from the + * common code -- call p2posl_bss_isup() instead to allow the OSL to + * override the common implementation if necessary. + */ +bool +wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx) +{ + s32 result, val; + bool isup = false; + s8 getbuf[64]; + + /* Check if the BSS is up */ + *(int*)getbuf = -1; + result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx, + sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL); + if (result != 0) { + CFGP2P_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result)); + CFGP2P_ERR(("NOTE: this ioctl error is normal " + "when the BSS has not been created yet.\n")); + } else { + val = *(int*)getbuf; + val = dtoh32(val); + CFGP2P_INFO(("---cfg bss -C %d ==> %d\n", bsscfg_idx, val)); + isup = (val ? TRUE : FALSE); + } + return isup; +} + + +/* Bring up or down a BSS */ +s32 +wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 is_up) +{ + s32 ret = BCME_OK; + s32 val = is_up ? 1 : 0; + + struct { + s32 cfg; + s32 val; + } bss_setbuf; + + bss_setbuf.cfg = htod32(bsscfg_idx); + bss_setbuf.val = htod32(val); + CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, is_up ? "up" : "down")); + ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret != 0) { + CFGP2P_ERR(("'bss %d' failed with %d\n", is_up, ret)); + } + + return ret; +} + +/* Check if 'p2p' is supported in the driver */ +s32 +wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev) +{ + s32 ret = BCME_OK; + s32 p2p_supported = 0; + ret = wldev_iovar_getint(ndev, "p2p", + &p2p_supported); + if (ret < 0) { + if (ret == BCME_UNSUPPORTED) { + CFGP2P_INFO(("p2p is unsupported\n")); + return 0; + } else { + CFGP2P_ERR(("cfg p2p error %d\n", ret)); + return ret; + } + } + if (p2p_supported == 1) { + CFGP2P_INFO(("p2p is supported\n")); + } else { + CFGP2P_INFO(("p2p is unsupported\n")); + p2p_supported = 0; + } + return p2p_supported; +} +/* Cleanup P2P resources */ +s32 +wl_cfgp2p_down(struct bcm_cfg80211 *cfg) +{ + struct net_device *ndev = NULL; + struct wireless_dev *wdev = NULL; + s32 i = 0, index = -1; + +#if defined(WL_CFG80211_P2P_DEV_IF) + ndev = bcmcfg_to_prmry_ndev(cfg); + wdev = bcmcfg_to_p2p_wdev(cfg); +#elif defined(WL_ENABLE_P2P_IF) + ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg); + wdev = ndev_to_wdev(ndev); +#endif /* WL_CFG80211_P2P_DEV_IF */ + + wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE); + wl_cfgp2p_disable_discovery(cfg); + +#if defined(WL_CFG80211_P2P_DEV_IF) && !defined(KEEP_WIFION_OPTION) + if (cfg->p2p_wdev) { + /* If p2p wdev is left out, clean it up */ + WL_ERR(("Clean up the p2p discovery IF\n")); + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); + } +#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */ + + for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { + index = wl_to_p2p_bss_bssidx(cfg, i); + if (index != WL_INVALID) + wl_cfg80211_clear_per_bss_ies(cfg, index); + } + wl_cfgp2p_deinit_priv(cfg); + return 0; +} + +int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg) +{ + if (cfg->p2p && ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) || + (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1))) + return true; + else + return false; + +} + +s32 +wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + s32 ret = -1; + int count, start, duration; + wl_p2p_sched_t dongle_noa; + s32 bssidx, type; + int iovar_len = sizeof(dongle_noa); + CFGP2P_DBG((" Enter\n")); + + memset(&dongle_noa, 0, sizeof(dongle_noa)); + + if (wl_cfgp2p_vif_created(cfg)) { + cfg->p2p->noa.desc[0].start = 0; + + sscanf(buf, "%10d %10d %10d", &count, &start, &duration); + CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n", + count, start, duration)); + if (count != -1) + cfg->p2p->noa.desc[0].count = count; + + /* supplicant gives interval as start */ + if (start != -1) + cfg->p2p->noa.desc[0].interval = start; + + if (duration != -1) + cfg->p2p->noa.desc[0].duration = duration; + + if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) { + cfg->p2p->noa.desc[0].start = 200; + dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS; + dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF; + dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS; + } + else if (cfg->p2p->noa.desc[0].count == 0) { + cfg->p2p->noa.desc[0].start = 0; + dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; + dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL; + dongle_noa.action = WL_P2P_SCHED_ACTION_RESET; + } + else { + /* Continuous NoA interval. */ + dongle_noa.action = WL_P2P_SCHED_ACTION_DOZE; + dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; + if ((cfg->p2p->noa.desc[0].interval == 102) || + (cfg->p2p->noa.desc[0].interval == 100)) { + cfg->p2p->noa.desc[0].start = 100 - + cfg->p2p->noa.desc[0].duration; + dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT; + } + else { + dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL; + } + } + /* Put the noa descriptor in dongle format for dongle */ + dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count); + if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) { + dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start); + dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration); + } + else { + dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000); + dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000); + } + dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000); + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) + return BCME_ERROR; + + if (dongle_noa.action == WL_P2P_SCHED_ACTION_RESET) { + iovar_len -= sizeof(wl_p2p_sched_desc_t); + } + + ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, type), + "p2p_noa", &dongle_noa, iovar_len, cfg->ioctl_buf, + WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + + if (ret < 0) { + CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret)); + } + } + else { + CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n")); + } + return ret; +} +s32 +wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len) +{ + + wifi_p2p_noa_desc_t *noa_desc; + int len = 0, i; + char _buf[200]; + + CFGP2P_DBG((" Enter\n")); + buf[0] = '\0'; + if (wl_cfgp2p_vif_created(cfg)) { + if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) { + _buf[0] = 1; /* noa index */ + _buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) | + (cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */ + len += 2; + if (cfg->p2p->noa.desc[0].count) { + noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len]; + noa_desc->cnt_type = cfg->p2p->noa.desc[0].count; + noa_desc->duration = cfg->p2p->noa.desc[0].duration; + noa_desc->interval = cfg->p2p->noa.desc[0].interval; + noa_desc->start = cfg->p2p->noa.desc[0].start; + len += sizeof(wifi_p2p_noa_desc_t); + } + if (buf_len <= len * 2) { + CFGP2P_ERR(("ERROR: buf_len %d in not enough for" + "returning noa in string format\n", buf_len)); + return -1; + } + /* We have to convert the buffer data into ASCII strings */ + for (i = 0; i < len; i++) { + snprintf(buf, 3, "%02x", _buf[i]); + buf += 2; + } + buf[i*2] = '\0'; + } + } + else { + CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n")); + return -1; + } + return len * 2; +} +s32 +wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + int ps, ctw; + int ret = -1; + s32 legacy_ps; + s32 conn_idx; + s32 bssidx; + struct net_device *dev; + + CFGP2P_DBG((" Enter\n")); + if (wl_cfgp2p_vif_created(cfg)) { + sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw); + CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw)); + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) + return BCME_ERROR; + dev = wl_to_p2p_bss_ndev(cfg, conn_idx); + if (ctw != -1) { + cfg->p2p->ops.ctw = ctw; + ret = 0; + } + if (ps != -1) { + cfg->p2p->ops.ops = ps; + ret = wldev_iovar_setbuf(dev, + "p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops), + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (ret < 0) { + CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret)); + } + } + + if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) { + ret = wldev_ioctl(dev, + WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true); + if (unlikely(ret)) + CFGP2P_ERR(("error (%d)\n", ret)); + wl_cfg80211_update_power_mode(dev); + } + else + CFGP2P_ERR(("ilegal setting\n")); + } + else { + CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n")); + ret = -1; + } + return ret; +} + +s32 +wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len) +{ + int ch, bw; + s32 conn_idx; + s32 bssidx; + struct net_device *dev; + char smbuf[WLC_IOCTL_SMLEN]; + wl_chan_switch_t csa_arg; + u32 chnsp = 0; + int err = 0; + + CFGP2P_DBG((" Enter\n")); + if (wl_cfgp2p_vif_created(cfg)) { + sscanf(buf, "%10d %10d", &ch, &bw); + CFGP2P_DBG(("Enter ch %d bw %d\n", ch, bw)); + + bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr); + if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) { + return BCME_ERROR; + } + dev = wl_to_p2p_bss_ndev(cfg, conn_idx); + if (ch <= 0 || bw <= 0) { + CFGP2P_ERR(("Negative value not permitted!\n")); + return BCME_ERROR; + } + + csa_arg.mode = DOT11_CSA_MODE_ADVISORY; + csa_arg.count = P2P_ECSA_CNT; + csa_arg.reg = 0; + + sprintf(buf, "%d/%d", ch, bw); + chnsp = wf_chspec_aton(buf); + if (chnsp == 0) { + CFGP2P_ERR(("%s:chsp is not correct\n", __FUNCTION__)); + return BCME_ERROR; + } + chnsp = wl_chspec_host_to_driver(chnsp); + csa_arg.chspec = chnsp; + + err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg), + smbuf, sizeof(smbuf), NULL); + if (err) { + CFGP2P_ERR(("%s:set p2p_ecsa failed:%d\n", __FUNCTION__, err)); + return BCME_ERROR; + } + } else { + CFGP2P_ERR(("ERROR: set_p2p_ecsa in non-p2p mode\n")); + return BCME_ERROR; + } + return BCME_OK; +} + +u8 * +wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id) +{ + wifi_p2p_ie_t *ie = NULL; + u16 len = 0; + u8 *subel; + u8 subelt_id; + u16 subelt_len; + + if (!buf) { + WL_ERR(("P2P IE not present")); + return 0; + } + + ie = (wifi_p2p_ie_t*) buf; + len = ie->len; + + /* Point subel to the P2P IE's subelt field. + * Subtract the preceding fields (id, len, OUI, oui_type) from the length. + */ + subel = ie->subelts; + len -= 4; /* exclude OUI + OUI_TYPE */ + + while (len >= 3) { + /* attribute id */ + subelt_id = *subel; + subel += 1; + len -= 1; + + /* 2-byte little endian */ + subelt_len = *subel++; + subelt_len |= *subel++ << 8; + + len -= 2; + len -= subelt_len; /* for the remaining subelt fields */ + + if (subelt_id == element_id) { + /* This will point to start of subelement attrib after + * attribute id & len + */ + return subel; + } + + /* Go to next subelement */ + subel += subelt_len; + } + + /* Not Found */ + return NULL; +} + +#define P2P_GROUP_CAPAB_GO_BIT 0x01 + +u8* +wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib) +{ + bcm_tlv_t *ie; + u8* pAttrib; + + CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len)); + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) { + /* Have the P2p ie. Now check for attribute */ + if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) { + CFGP2P_INFO(("P2P attribute %d was found at parse %p", + attrib, parse)); + return pAttrib; + } + else { + parse += (ie->len + TLV_HDR_LEN); + len -= (ie->len + TLV_HDR_LEN); + CFGP2P_INFO(("P2P Attribute %d not found Moving parse" + " to %p len to %d", attrib, parse, len)); + } + } + else { + /* It was not p2p IE. parse will get updated automatically to next TLV */ + CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len)); + } + } + CFGP2P_ERR(("P2P attribute %d was NOT found", attrib)); + return NULL; +} + +u8 * +wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length) +{ + u8 *capability = NULL; + bool p2p_go = 0; + u8 *ptr = NULL; + + if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) { + WL_ERR(("P2P Capability attribute not found")); + return NULL; + } + + /* Check Group capability for Group Owner bit */ + p2p_go = capability[1] & P2P_GROUP_CAPAB_GO_BIT; + if (!p2p_go) { + return bi->BSSID.octet; + } + + /* In probe responses, DEVICE INFO attribute will be present */ + if (!(ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_DEV_INFO))) { + /* If DEVICE_INFO is not found, this might be a beacon frame. + * check for DEVICE_ID in the beacon frame. + */ + ptr = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_DEV_ID); + } + + if (!ptr) + WL_ERR((" Both DEVICE_ID & DEVICE_INFO attribute not present in P2P IE ")); + + return ptr; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) +static void +wl_cfgp2p_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + snprintf(info->driver, sizeof(info->driver), "p2p"); + snprintf(info->version, sizeof(info->version), "%lu", (unsigned long)(0)); +} + +struct ethtool_ops cfgp2p_ethtool_ops = { + .get_drvinfo = wl_cfgp2p_ethtool_get_drvinfo +}; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + +#if defined(WL_ENABLE_P2P_IF) +s32 +wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg) +{ + int ret = 0; + struct net_device* net = NULL; + struct wireless_dev *wdev = NULL; + uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 }; + + if (cfg->p2p_net) { + CFGP2P_ERR(("p2p_net defined already.\n")); + return -EINVAL; + } + + /* Allocate etherdev, including space for private structure */ + if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) { + CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); + return -ENODEV; + } + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + free_netdev(net); + return -ENOMEM; + } + + strncpy(net->name, "p2p%d", sizeof(net->name) - 1); + net->name[IFNAMSIZ - 1] = '\0'; + + /* Copy the reference to bcm_cfg80211 */ + memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *)); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) + ASSERT(!net->open); + net->do_ioctl = wl_cfgp2p_do_ioctl; + net->hard_start_xmit = wl_cfgp2p_start_xmit; + net->open = wl_cfgp2p_if_open; + net->stop = wl_cfgp2p_if_stop; +#else + ASSERT(!net->netdev_ops); + net->netdev_ops = &wl_cfgp2p_if_ops; +#endif + + /* Register with a dummy MAC addr */ + memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); + + wdev->wiphy = cfg->wdev->wiphy; + + wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS); + + net->ieee80211_ptr = wdev; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) + net->ethtool_ops = &cfgp2p_ethtool_ops; +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ + + SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy)); + + /* Associate p2p0 network interface with new wdev */ + wdev->netdev = net; + + ret = register_netdev(net); + if (ret) { + CFGP2P_ERR((" register_netdevice failed (%d)\n", ret)); + free_netdev(net); + kfree(wdev); + return -ENODEV; + } + + /* store p2p net ptr for further reference. Note that iflist won't have this + * entry as there corresponding firmware interface is a "Hidden" interface. + */ + cfg->p2p_wdev = wdev; + cfg->p2p_net = net; + + printk("%s: P2P Interface Registered\n", net->name); + + return ret; +} + +s32 +wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg) +{ + + if (!cfg || !cfg->p2p_net) { + CFGP2P_ERR(("Invalid Ptr\n")); + return -EINVAL; + } + + unregister_netdev(cfg->p2p_net); + free_netdev(cfg->p2p_net); + + return 0; +} +static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + + if (skb) + { + CFGP2P_DBG(("(%s) is not used for data operations.Droping the packet.\n", + ndev->name)); + dev_kfree_skb_any(skb); + } + + return 0; +} + +static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd) +{ + int ret = 0; + struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net); + struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg); + + /* There is no ifidx corresponding to p2p0 in our firmware. So we should + * not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs. + * For Android PRIV CMD handling map it to primary I/F + */ + if (cmd == SIOCDEVPRIVATE+1) { + ret = wl_android_priv_cmd(ndev, ifr, cmd); + + } else { + CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n", + __FUNCTION__, cmd)); + return -1; + } + + return ret; +} +#endif + +#if defined(WL_ENABLE_P2P_IF) +static int wl_cfgp2p_if_open(struct net_device *net) +{ + struct wireless_dev *wdev = net->ieee80211_ptr; + + if (!wdev || !wl_cfg80211_is_p2p_active()) + return -EINVAL; + WL_TRACE(("Enter\n")); +#if !defined(WL_IFACE_COMB_NUM_CHANNELS) + /* If suppose F/W download (ifconfig wlan0 up) hasn't been done by now, + * do it here. This will make sure that in concurrent mode, supplicant + * is not dependent on a particular order of interface initialization. + * i.e you may give wpa_supp -iwlan0 -N -ip2p0 or wpa_supp -ip2p0 -N + * -iwlan0. + */ + wdev->wiphy->interface_modes |= (BIT(NL80211_IFTYPE_P2P_CLIENT) + | BIT(NL80211_IFTYPE_P2P_GO)); +#endif /* !WL_IFACE_COMB_NUM_CHANNELS */ + wl_cfg80211_do_driver_init(net); + + return 0; +} + +static int wl_cfgp2p_if_stop(struct net_device *net) +{ + struct wireless_dev *wdev = net->ieee80211_ptr; + + if (!wdev) + return -EINVAL; + + wl_cfg80211_scan_stop(net); + +#if !defined(WL_IFACE_COMB_NUM_CHANNELS) + wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes) + & (~(BIT(NL80211_IFTYPE_P2P_CLIENT)| + BIT(NL80211_IFTYPE_P2P_GO))); +#endif /* !WL_IFACE_COMB_NUM_CHANNELS */ + return 0; +} + +bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops) +{ + return (if_ops == &wl_cfgp2p_if_ops); +} +#endif /* WL_ENABLE_P2P_IF */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +struct wireless_dev * +wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg) +{ + struct wireless_dev *wdev = NULL; + struct ether_addr primary_mac; + + if (!cfg || !cfg->p2p_supported) + return ERR_PTR(-EINVAL); + + WL_TRACE(("Enter\n")); + + if (cfg->p2p_wdev) { +#ifndef EXPLICIT_DISCIF_CLEANUP + dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub); +#endif /* EXPLICIT_DISCIF_CLEANUP */ + /* + * This is not expected. This can happen due to + * supplicant crash/unclean de-initialization which + * didn't free the p2p discovery interface. Indicate + * driver hang to user space so that the framework + * can rei-init the Wi-Fi. + */ + CFGP2P_ERR(("p2p_wdev defined already.\n")); + wl_probe_wdev_all(cfg); +#ifdef EXPLICIT_DISCIF_CLEANUP + /* + * CUSTOMER_HW4 design doesn't delete the p2p discovery + * interface on ifconfig wlan0 down context which comes + * without a preceeding NL80211_CMD_DEL_INTERFACE for p2p + * discovery. But during supplicant crash the DEL_IFACE + * command will not happen and will cause a left over iface + * even after ifconfig wlan0 down. So delete the iface + * first and then indicate the HANG event + */ + wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg); +#else + dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE; + net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg)); + return ERR_PTR(-ENODEV); +#endif /* EXPLICIT_DISCIF_CLEANUP */ + } + + wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); + if (unlikely(!wdev)) { + WL_ERR(("Could not allocate wireless device\n")); + return ERR_PTR(-ENOMEM); + } + + memset(&primary_mac, 0, sizeof(primary_mac)); + get_primary_mac(cfg, &primary_mac); + wl_cfgp2p_generate_bss_mac(cfg, &primary_mac); + + wdev->wiphy = cfg->wdev->wiphy; + wdev->iftype = NL80211_IFTYPE_P2P_DEVICE; + memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN); + + + /* store p2p wdev ptr for further reference. */ + cfg->p2p_wdev = wdev; + + CFGP2P_ERR(("P2P interface registered\n")); +#ifdef DHD_IFDEBUG + WL_ERR(("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev)); +#endif + return wdev; +} + +int +wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + int ret = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + if (!cfg) + return -EINVAL; + + WL_TRACE(("Enter\n")); + + ret = wl_cfgp2p_set_firm_p2p(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret)); + goto exit; + } + + ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret)); + goto exit; + } + + p2p_on(cfg) = true; +#if defined(P2P_IE_MISSING_FIX) + cfg->p2p_prb_noti = false; +#endif + + CFGP2P_DBG(("P2P interface started\n")); + +exit: + return ret; +} + +void +wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + int ret = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + if (!cfg) + return; + + CFGP2P_DBG(("Enter\n")); + + ret = wl_cfg80211_scan_stop(wdev); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret)); + } + + if (!cfg->p2p) + return; + + ret = wl_cfgp2p_disable_discovery(cfg); + if (unlikely(ret < 0)) { + CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret)); + } + + p2p_on(cfg) = false; + + CFGP2P_DBG(("Exit. P2P interface stopped\n")); + + return; +} + +int +wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg) +{ + bool rollback_lock = false; + + if (!wdev) + return -EINVAL; + + WL_TRACE(("Enter\n")); +#ifdef DHD_IFDEBUG + WL_ERR(("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev)); +#endif + + if (!rtnl_is_locked()) { + rtnl_lock(); + rollback_lock = true; + } + + cfg80211_unregister_wdev(wdev); + + if (rollback_lock) + rtnl_unlock(); + + synchronize_rcu(); + + kfree(wdev); + + if (cfg) + cfg->p2p_wdev = NULL; + + CFGP2P_ERR(("P2P interface unregistered\n")); + + return 0; +} +#endif /* WL_CFG80211_P2P_DEV_IF */ + +void +wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx) +{ + wifi_p2p_pub_act_frame_t *pact_frm; + int status = 0; + + if (!frame || (frame_len < (sizeof(*pact_frm) + WL_P2P_AF_STATUS_OFFSET - 1))) { + return; + } + + if (wl_cfgp2p_is_pub_action(frame, frame_len)) { + pact_frm = (wifi_p2p_pub_act_frame_t *)frame; + if (pact_frm->subtype == P2P_PAF_GON_RSP && tx) { + CFGP2P_ACTION(("Check TX P2P Group Owner Negotiation Rsp Frame status\n")); + status = pact_frm->elts[WL_P2P_AF_STATUS_OFFSET]; + if (status) { + cfg->need_wait_afrx = false; + return; + } + } + } + + cfg->need_wait_afrx = true; + return; +} + +int +wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request) +{ + if (request && (request->n_ssids == 1) && + (request->n_channels == 1) && + IS_P2P_SSID(request->ssids[0].ssid, WL_P2P_WILDCARD_SSID_LEN) && + (request->ssids[0].ssid_len > WL_P2P_WILDCARD_SSID_LEN)) { + return true; + } + return false; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h new file mode 100644 index 000000000000..aab1061b43a7 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h @@ -0,0 +1,458 @@ +/* + * Linux cfgp2p driver + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgp2p.h 608203 2015-12-24 05:30:44Z $ + */ +#ifndef _wl_cfgp2p_h_ +#define _wl_cfgp2p_h_ +#include +#include + +struct bcm_cfg80211; +extern u32 wl_dbg_level; + +typedef struct wifi_p2p_ie wifi_wfd_ie_t; +/* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not + * confuse this with a bsscfg index. This value is an index into the + * saved_ie[] array of structures which in turn contains a bsscfg index field. + */ +typedef enum { + P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */ + P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */ + P2PAPI_BSSCFG_CONNECTION1, /* maps to driver's P2P connection bsscfg */ + P2PAPI_BSSCFG_CONNECTION2, + P2PAPI_BSSCFG_MAX +} p2p_bsscfg_type_t; + +typedef enum { + P2P_SCAN_PURPOSE_MIN, + P2P_SCAN_SOCIAL_CHANNEL, /* scan for social channel */ + P2P_SCAN_AFX_PEER_NORMAL, /* scan for action frame search */ + P2P_SCAN_AFX_PEER_REDUCED, /* scan for action frame search with short time */ + P2P_SCAN_DURING_CONNECTED, /* scan during connected status */ + P2P_SCAN_CONNECT_TRY, /* scan for connecting */ + P2P_SCAN_NORMAL, /* scan during not-connected status */ + P2P_SCAN_PURPOSE_MAX +} p2p_scan_purpose_t; + +/* vendor ies max buffer length for probe response or beacon */ +#define VNDR_IES_MAX_BUF_LEN 1400 +/* normal vendor ies buffer length */ +#define VNDR_IES_BUF_LEN 512 + +struct p2p_bss { + s32 bssidx; + struct net_device *dev; + void *private_data; + struct ether_addr mac_addr; +}; + +struct p2p_info { + bool on; /* p2p on/off switch */ + bool scan; + int16 search_state; + s8 vir_ifname[IFNAMSIZ]; + unsigned long status; + struct p2p_bss bss[P2PAPI_BSSCFG_MAX]; + struct timer_list listen_timer; + struct bcm_cfg80211 *bcm_cfg; + wl_p2p_sched_t noa; + wl_p2p_ops_t ops; + wlc_ssid_t ssid; + s8 p2p_go_count; +}; + +#define MAX_VNDR_IE_NUMBER 10 + +struct parsed_vndr_ie_info { + char *ie_ptr; + u32 ie_len; /* total length including id & length field */ + vndr_ie_t vndrie; +}; + +struct parsed_vndr_ies { + u32 count; + struct parsed_vndr_ie_info ie_info[MAX_VNDR_IE_NUMBER]; +}; + +/* dongle status */ +enum wl_cfgp2p_status { + WLP2P_STATUS_DISCOVERY_ON = 0, + WLP2P_STATUS_SEARCH_ENABLED, + WLP2P_STATUS_IF_ADDING, + WLP2P_STATUS_IF_DELETING, + WLP2P_STATUS_IF_CHANGING, + WLP2P_STATUS_IF_CHANGED, + WLP2P_STATUS_LISTEN_EXPIRED, + WLP2P_STATUS_ACTION_TX_COMPLETED, + WLP2P_STATUS_ACTION_TX_NOACK, + WLP2P_STATUS_SCANNING, + WLP2P_STATUS_GO_NEG_PHASE, + WLP2P_STATUS_DISC_IN_PROGRESS +}; + + +#define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev) +#define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx) +#define wl_to_p2p_bss_macaddr(cfg, type) &((cfg)->p2p->bss[type].mac_addr) +#define wl_to_p2p_bss_saved_ie(cfg, type) ((cfg)->p2p->bss[type].saved_ie) +#define wl_to_p2p_bss_private(cfg, type) ((cfg)->p2p->bss[type].private_data) +#define wl_to_p2p_bss(cfg, type) ((cfg)->p2p->bss[type]) +#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \ + change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status)) +#define p2p_on(cfg) ((cfg)->p2p->on) +#define p2p_scan(cfg) ((cfg)->p2p->scan) +#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on) + +/* dword align allocation */ +#define WLC_IOCTL_MAXLEN 8192 + +#ifdef CUSTOMER_HW4_DEBUG +#define CFGP2P_ERROR_TEXT "CFGP2P-INFO2) " +#else +#define CFGP2P_ERROR_TEXT "CFGP2P-ERROR) " +#endif /* CUSTOMER_HW4_DEBUG */ + +#ifdef DHD_LOG_DUMP +#define CFGP2P_ERR(args) \ + do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + dhd_log_dump_print("[%s] %s: ", \ + dhd_log_dump_get_timestamp(), __func__); \ + dhd_log_dump_print args; \ + } \ + } while (0) +#else +#define CFGP2P_ERR(args) \ + do { \ + if (wl_dbg_level & WL_DBG_ERR) { \ + printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \ + printk args; \ + } \ + } while (0) +#endif /* DHD_LOG_DUMP */ +#define CFGP2P_INFO(args) \ + do { \ + if (wl_dbg_level & WL_DBG_INFO) { \ + printk(KERN_INFO "CFGP2P-INFO) %s : ", __func__); \ + printk args; \ + } \ + } while (0) +#define CFGP2P_DBG(args) \ + do { \ + if (wl_dbg_level & WL_DBG_DBG) { \ + printk(KERN_DEBUG "CFGP2P-DEBUG) %s :", __func__); \ + printk args; \ + } \ + } while (0) + +#define CFGP2P_ACTION(args) \ + do { \ + if (wl_dbg_level & WL_DBG_P2P_ACTION) { \ + printk(KERN_DEBUG "CFGP2P-ACTION) %s :", __func__); \ + printk args; \ + } \ + } while (0) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +#define INIT_TIMER(timer, func, duration, extra_delay) \ + do { \ + timer_setup(timer, func, duration + extra_delay); \ + add_timer(timer); \ + } while (0); +#else +#define INIT_TIMER(timer, func, duration, extra_delay) \ + do { \ + init_timer(timer); \ + timer->function = func; \ + timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \ + timer->data = (unsigned long) cfg; \ + add_timer(timer); \ + } while (0); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF) +#define WL_CFG80211_P2P_DEV_IF + +#ifdef WL_ENABLE_P2P_IF +#undef WL_ENABLE_P2P_IF +#endif + +#ifdef WL_SUPPORT_BACKPORTED_KPATCHES +#undef WL_SUPPORT_BACKPORTED_KPATCHES +#endif +#else +#ifdef WLP2P +#ifndef WL_ENABLE_P2P_IF +/* Enable P2P network Interface if P2P support is enabled */ +#define WL_ENABLE_P2P_IF +#endif /* WL_ENABLE_P2P_IF */ +#endif /* WLP2P */ +#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */ + +#ifndef WL_CFG80211_P2P_DEV_IF +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))) +#error Disable 'WL_ENABLE_P2P_IF', if 'WL_CFG80211_P2P_DEV_IF' is enabled \ + or kernel version is 3.8.0 or above +#endif /* WL_ENABLE_P2P_IF && (WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 8, 0))) */ + +#if !defined(WLP2P) && (defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)) +#error WLP2P not defined +#endif /* !WLP2P && (WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF) */ + +#if defined(WL_CFG80211_P2P_DEV_IF) +#define bcm_struct_cfgdev struct wireless_dev +#else +#define bcm_struct_cfgdev struct net_device +#endif /* WL_CFG80211_P2P_DEV_IF */ + +#define P2P_ECSA_CNT 50 + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +extern void +wl_cfgp2p_listen_expired(struct timer_list *t); +#else +extern void +wl_cfgp2p_listen_expired(unsigned long data); +#endif + +extern bool +wl_cfgp2p_is_pub_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_is_p2p_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_is_gas_action(void *frame, u32 frame_len); +extern bool +wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len); +extern bool +wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len); +extern void +wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel); +extern s32 +wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg); +extern void +wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, + u32 channel, u16 listen_ms, int bssidx); +extern s32 +wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec); +extern s32 +wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac); +extern s32 +wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac); +extern s32 +wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, + chanspec_t chspec, s32 conn_idx); + +extern s32 +wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index); + +extern s32 +wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie, + u32 ie_len); +extern s32 +wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg); +extern s32 +wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans, + u16 *channels, + s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr, + p2p_scan_purpose_t p2p_scan_purpose); + +extern s32 +wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev, + s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr); + +extern wpa_ie_fixed_t * +wl_cfgp2p_find_wpaie(u8 *parse, u32 len); + +extern wpa_ie_fixed_t * +wl_cfgp2p_find_wpsie(u8 *parse, u32 len); + +extern wifi_p2p_ie_t * +wl_cfgp2p_find_p2pie(u8 *parse, u32 len); + +extern wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len); +extern s32 +wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, + s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len); +extern s32 +wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx); + +extern struct net_device * +wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx); +extern s32 +wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type); + + +extern s32 +wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); +extern s32 +wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms); + +extern s32 +wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable); + +extern s32 +wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev, + const wl_event_msg_t *e, void *data); + +extern s32 +wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev, + wl_af_params_t *af_params, s32 bssidx); + +extern void +wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr); + +extern void +wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id); +extern bool +wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx); + +extern s32 +wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up); + + +extern s32 +wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev); + +extern s32 +wl_cfgp2p_down(struct bcm_cfg80211 *cfg); + +extern s32 +wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern s32 +wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len); + +extern u8 * +wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id); + +extern u8* +wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib); + +extern u8 * +wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length); + +extern s32 +wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg); + +extern s32 +wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg); + +extern bool +wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops); + +extern u32 +wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag, + s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd); + +extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg); + +extern +int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg); + +extern +int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg); + +#if defined(WL_CFG80211_P2P_DEV_IF) +extern struct wireless_dev * +wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg); + +extern int +wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev); + +extern void +wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev); + +extern int +wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg); + +#endif /* WL_CFG80211_P2P_DEV_IF */ + +extern void +wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx); + +extern int +wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request); + +/* WiFi Direct */ +#define SOCIAL_CHAN_1 1 +#define SOCIAL_CHAN_2 6 +#define SOCIAL_CHAN_3 11 +#define IS_P2P_SOCIAL_CHANNEL(channel) ((channel == SOCIAL_CHAN_1) || \ + (channel == SOCIAL_CHAN_2) || \ + (channel == SOCIAL_CHAN_3)) +#define SOCIAL_CHAN_CNT 3 +#define AF_PEER_SEARCH_CNT 2 +#define WL_P2P_WILDCARD_SSID "DIRECT-" +#define WL_P2P_WILDCARD_SSID_LEN 7 +#define WL_P2P_INTERFACE_PREFIX "p2p" +#define WL_P2P_TEMP_CHAN 11 +#define WL_P2P_AF_STATUS_OFFSET 9 + +/* If the provision discovery is for JOIN operations, + * or the device discoverablity frame is destined to GO + * then we need not do an internal scan to find GO. + */ +#define IS_ACTPUB_WITHOUT_GROUP_ID(p2p_ie, len) \ + (wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_GROUP_ID) == NULL) + +#define IS_GAS_REQ(frame, len) (wl_cfgp2p_is_gas_action(frame, len) && \ + ((frame->action == P2PSD_ACTION_ID_GAS_IREQ) || \ + (frame->action == P2PSD_ACTION_ID_GAS_CREQ))) + +#define IS_P2P_PUB_ACT_RSP_SUBTYPE(subtype) ((subtype == P2P_PAF_GON_RSP) || \ + ((subtype == P2P_PAF_GON_CONF) || \ + (subtype == P2P_PAF_INVITE_RSP) || \ + (subtype == P2P_PAF_PROVDIS_RSP))) +#define IS_P2P_SOCIAL(ch) ((ch == SOCIAL_CHAN_1) || (ch == SOCIAL_CHAN_2) || (ch == SOCIAL_CHAN_3)) +#define IS_P2P_SSID(ssid, len) (!memcmp(ssid, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN) && \ + (len == WL_P2P_WILDCARD_SSID_LEN)) +#endif /* _wl_cfgp2p_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.c b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c new file mode 100644 index 000000000000..a8176a2a53ba --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.c @@ -0,0 +1,1470 @@ +/* + * Linux cfg80211 Vendor Extension Code + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgvendor.c 605796 2015-12-11 13:45:36Z $ + */ + +/* + * New vendor interface additon to nl80211/cfg80211 to allow vendors + * to implement proprietary features over the cfg80211 stack. +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PNO_SUPPORT +#include +#endif /* PNO_SUPPORT */ +#ifdef RTT_SUPPORT +#include +#endif /* RTT_SUPPORT */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef PROP_TXSTATUS +#include +#endif +#include + +#if defined(WL_VENDOR_EXT_SUPPORT) +/* + * This API is to be used for asynchronous vendor events. This + * shouldn't be used in response to a vendor command from its + * do_it handler context (instead wl_cfgvendor_send_cmd_reply should + * be used). + */ +int wl_cfgvendor_send_async_event(struct wiphy *wiphy, + struct net_device *dev, int event_id, const void *data, int len) +{ + u16 kflags; + struct sk_buff *skb; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, len, event_id, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + + cfg80211_vendor_event(skb, kflags); + + return 0; +} + +static int +wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy, + struct net_device *dev, const void *data, int len) +{ + struct sk_buff *skb; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + + return cfg80211_vendor_cmd_reply(skb); +} + +static int +wl_cfgvendor_get_feature_set(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int reply; + + reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg)); + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + &reply, sizeof(int)); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + + return err; +} + +static int +wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + struct sk_buff *skb; + int *reply; + int num, mem_needed, i; + + reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), &num); + + if (!reply) { + WL_ERR(("Could not get feature list matrix\n")); + err = -EINVAL; + return err; + } + mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * num) + + ATTRIBUTE_U32_LEN; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + err = -ENOMEM; + goto exit; + } + + nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, num); + for (i = 0; i < num; i++) { + nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply[i]); + } + + err = cfg80211_vendor_cmd_reply(skb); + + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + +exit: + kfree(reply); + return err; +} + +static int +wl_cfgvendor_set_pno_mac_oui(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + uint8 pno_random_mac_oui[DOT11_OUI_LEN]; + + type = nla_type(data); + + if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) { + memcpy(pno_random_mac_oui, nla_data(data), DOT11_OUI_LEN); + + err = dhd_dev_pno_set_mac_oui(bcmcfg_to_prmry_ndev(cfg), pno_random_mac_oui); + + if (unlikely(err)) + WL_ERR(("Bad OUI, could not set:%d \n", err)); + + + } else { + err = -1; + } + + return err; +} + +#ifdef CUSTOM_FORCE_NODFS_FLAG +static int +wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + u32 nodfs; + + type = nla_type(data); + if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) { + nodfs = nla_get_u32(data); + err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs); + } else { + err = -1; + } + return err; +} +#endif /* CUSTOM_FORCE_NODFS_FLAG */ + +#ifdef GSCAN_SUPPORT +int +wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy, + struct net_device *dev, void *data, int len, wl_vendor_event_t event) +{ + u16 kflags; + const void *ptr; + struct sk_buff *skb; + int malloc_len, total, iter_cnt_to_send, cnt; + gscan_results_cache_t *cache = (gscan_results_cache_t *)data; + total = len/sizeof(wifi_gscan_result_t); + while (total > 0) { + malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD; + if (malloc_len > NLMSG_DEFAULT_SIZE) { + malloc_len = NLMSG_DEFAULT_SIZE; + } + iter_cnt_to_send = + (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t); + total = total - iter_cnt_to_send; + + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, malloc_len, event, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + return -ENOMEM; + } + + while (cache && iter_cnt_to_send) { + ptr = (const void *) &cache->results[cache->tot_consumed]; + + if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed)) { + cnt = iter_cnt_to_send; + } else { + cnt = (cache->tot_count - cache->tot_consumed); + } + + iter_cnt_to_send -= cnt; + cache->tot_consumed += cnt; + /* Push the data to the skb */ + nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr); + if (cache->tot_consumed == cache->tot_count) { + cache = cache->next; + } + + } + + cfg80211_vendor_event(skb, kflags); + } + + return 0; +} + + +static int +wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + dhd_pno_gscan_capabilities_t *reply = NULL; + uint32 reply_len = 0; + + + reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_CAPABILITIES, NULL, &reply_len); + if (!reply) { + WL_ERR(("Could not get capabilities\n")); + err = -EINVAL; + return err; + } + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + reply, reply_len); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } + + kfree(reply); + return err; +} + +static int +wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0, type, band; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + uint16 *reply = NULL; + uint32 reply_len = 0, num_channels, mem_needed; + struct sk_buff *skb; + + type = nla_type(data); + + if (type == GSCAN_ATTRIBUTE_BAND) { + band = nla_get_u32(data); + } else { + return -EINVAL; + } + + reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len); + + if (!reply) { + WL_ERR(("Could not get channel list\n")); + err = -EINVAL; + return err; + } + num_channels = reply_len/ sizeof(uint32); + mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2); + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + err = -ENOMEM; + goto exit; + } + + nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels); + nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply); + + err = cfg80211_vendor_cmd_reply(skb); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } +exit: + kfree(reply); + return err; +} + +static int +wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_results_cache_t *results, *iter; + uint32 reply_len, complete = 0, num_results_iter; + int32 mem_needed; + wifi_gscan_result_t *ptr; + uint16 num_scan_ids, num_results; + struct sk_buff *skb; + struct nlattr *scan_hdr; + + dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg)); + dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); + + if (!results) { + WL_ERR(("No results to send %d\n", err)); + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + results, 0); + + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + return err; + } + num_scan_ids = reply_len & 0xFFFF; + num_results = (reply_len & 0xFFFF0000) >> 16; + mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; + + if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { + mem_needed = (int32)NLMSG_DEFAULT_SIZE; + complete = 0; + } else { + complete = 1; + } + + WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, + (int)NLMSG_DEFAULT_SIZE)); + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); + if (unlikely(!skb)) { + WL_ERR(("skb alloc failed")); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + return -ENOMEM; + } + iter = results; + + nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete); + mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); + while (iter && ((mem_needed - GSCAN_BATCH_RESULT_HDR_LEN) > 0)) { + + scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); + nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); + nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); + + num_results_iter = + (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); + + if ((iter->tot_count - iter->tot_consumed) < num_results_iter) + num_results_iter = iter->tot_count - iter->tot_consumed; + nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); + if (num_results_iter) { + ptr = &iter->results[iter->tot_consumed]; + iter->tot_consumed += num_results_iter; + nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, + num_results_iter * sizeof(wifi_gscan_result_t), ptr); + } + nla_nest_end(skb, scan_hdr); + mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + + (num_results_iter * sizeof(wifi_gscan_result_t)); + iter = iter->next; + } + + dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg)); + dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg)); + + return cfg80211_vendor_cmd_reply(skb); +} + +static int +wl_cfgvendor_initiate_gscan(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type, tmp = len; + int run = 0xFF; + int flush = 0; + const struct nlattr *iter; + + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE) + run = nla_get_u32(iter); + else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE) + flush = nla_get_u32(iter); + } + + if (run != 0xFF) { + err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush); + + if (unlikely(err)) { + WL_ERR(("Could not run gscan:%d \n", err)); + } + return err; + } else { + return -EINVAL; + } + + +} + +static int +wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int type; + bool real_time = FALSE; + + type = nla_type(data); + + if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) { + real_time = nla_get_u32(data); + + err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time); + + if (unlikely(err)) { + WL_ERR(("Could not run gscan:%d \n", err)); + } + + } else { + err = -EINVAL; + } + + return err; +} + +static int +wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_scan_params_t *scan_param; + int j = 0; + int type, tmp, tmp1, tmp2, k = 0; + const struct nlattr *iter, *iter1, *iter2; + struct dhd_pno_gscan_channel_bucket *ch_bucket; + + scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL); + if (!scan_param) { + WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n")); + err = -EINVAL; + return err; + + } + + scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC; + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + + if (j >= GSCAN_MAX_CH_BUCKETS) { + break; + } + + switch (type) { + case GSCAN_ATTRIBUTE_BASE_PERIOD: + scan_param->scan_fr = nla_get_u32(iter)/1000; + break; + case GSCAN_ATTRIBUTE_NUM_BUCKETS: + scan_param->nchannel_buckets = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_CH_BUCKET_1: + case GSCAN_ATTRIBUTE_CH_BUCKET_2: + case GSCAN_ATTRIBUTE_CH_BUCKET_3: + case GSCAN_ATTRIBUTE_CH_BUCKET_4: + case GSCAN_ATTRIBUTE_CH_BUCKET_5: + case GSCAN_ATTRIBUTE_CH_BUCKET_6: + case GSCAN_ATTRIBUTE_CH_BUCKET_7: + nla_for_each_nested(iter1, iter, tmp1) { + type = nla_type(iter1); + ch_bucket = + scan_param->channel_bucket; + + switch (type) { + case GSCAN_ATTRIBUTE_BUCKET_ID: + break; + case GSCAN_ATTRIBUTE_BUCKET_PERIOD: + ch_bucket[j].bucket_freq_multiple = + nla_get_u32(iter1)/1000; + break; + case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS: + ch_bucket[j].num_channels = + nla_get_u32(iter1); + break; + case GSCAN_ATTRIBUTE_BUCKET_CHANNELS: + nla_for_each_nested(iter2, iter1, tmp2) { + if (k >= PFN_SWC_RSSI_WINDOW_MAX) + break; + ch_bucket[j].chan_list[k] = + nla_get_u32(iter2); + k++; + } + k = 0; + break; + case GSCAN_ATTRIBUTE_BUCKETS_BAND: + ch_bucket[j].band = (uint16) + nla_get_u32(iter1); + break; + case GSCAN_ATTRIBUTE_REPORT_EVENTS: + ch_bucket[j].report_flag = (uint8) + nla_get_u32(iter1); + break; + default: + WL_ERR(("bucket attribute type error %d\n", + type)); + break; + } + } + j++; + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_SCAN_CFG_ID, scan_param, 0) < 0) { + WL_ERR(("Could not set GSCAN scan cfg\n")); + err = -EINVAL; + } + + kfree(scan_param); + return err; + +} + +static int +wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_hotlist_scan_params_t *hotlist_params; + int tmp, tmp1, tmp2, type, j = 0, dummy; + const struct nlattr *outer, *inner, *iter; + uint8 flush = 0; + struct bssid_t *pbssid; + + hotlist_params = (gscan_hotlist_scan_params_t *)kzalloc(len, GFP_KERNEL); + if (!hotlist_params) { + WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len)); + return -ENOMEM; + } + + hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT; + + nla_for_each_attr(iter, data, len, tmp2) { + type = nla_type(iter); + switch (type) { + case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS: + pbssid = hotlist_params->bssid; + nla_for_each_nested(outer, iter, tmp) { + nla_for_each_nested(inner, outer, tmp1) { + type = nla_type(inner); + + switch (type) { + case GSCAN_ATTRIBUTE_BSSID: + memcpy(&(pbssid[j].macaddr), + nla_data(inner), ETHER_ADDR_LEN); + break; + case GSCAN_ATTRIBUTE_RSSI_LOW: + pbssid[j].rssi_reporting_threshold = + (int8) nla_get_u8(inner); + break; + case GSCAN_ATTRIBUTE_RSSI_HIGH: + dummy = (int8) nla_get_u8(inner); + break; + default: + WL_ERR(("ATTR unknown %d\n", + type)); + break; + } + } + j++; + } + hotlist_params->nbssid = j; + break; + case GSCAN_ATTRIBUTE_HOTLIST_FLUSH: + flush = nla_get_u8(iter); + break; + case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: + hotlist_params->lost_ap_window = nla_get_u32(iter); + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_GEOFENCE_SCAN_CFG_ID, + hotlist_params, flush) < 0) { + WL_ERR(("Could not set GSCAN HOTLIST cfg\n")); + err = -EINVAL; + goto exit; + } +exit: + kfree(hotlist_params); + return err; +} +static int +wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0, tmp, type; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_batch_params_t batch_param; + const struct nlattr *iter; + + batch_param.mscan = batch_param.bestn = 0; + batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET; + + nla_for_each_attr(iter, data, len, tmp) { + type = nla_type(iter); + + switch (type) { + case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN: + batch_param.bestn = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE: + batch_param.mscan = nla_get_u32(iter); + break; + case GSCAN_ATTRIBUTE_REPORT_THRESHOLD: + batch_param.buffer_threshold = nla_get_u32(iter); + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_BATCH_SCAN_CFG_ID, + &batch_param, 0) < 0) { + WL_ERR(("Could not set batch cfg\n")); + err = -EINVAL; + return err; + } + + return err; +} + +static int +wl_cfgvendor_significant_change_cfg(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + gscan_swc_params_t *significant_params; + int tmp, tmp1, tmp2, type, j = 0; + const struct nlattr *outer, *inner, *iter; + uint8 flush = 0; + wl_pfn_significant_bssid_t *bssid; + + significant_params = (gscan_swc_params_t *) kzalloc(len, GFP_KERNEL); + if (!significant_params) { + WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len)); + return -ENOMEM; + } + + nla_for_each_attr(iter, data, len, tmp2) { + type = nla_type(iter); + + switch (type) { + case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH: + flush = nla_get_u8(iter); + break; + case GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE: + significant_params->rssi_window = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: + significant_params->lost_ap_window = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_MIN_BREACHING: + significant_params->swc_threshold = nla_get_u16(iter); + break; + case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS: + bssid = significant_params->bssid_elem_list; + nla_for_each_nested(outer, iter, tmp) { + nla_for_each_nested(inner, outer, tmp1) { + switch (nla_type(inner)) { + case GSCAN_ATTRIBUTE_BSSID: + memcpy(&(bssid[j].macaddr), + nla_data(inner), + ETHER_ADDR_LEN); + break; + case GSCAN_ATTRIBUTE_RSSI_HIGH: + bssid[j].rssi_high_threshold + = (int8) nla_get_u8(inner); + break; + case GSCAN_ATTRIBUTE_RSSI_LOW: + bssid[j].rssi_low_threshold + = (int8) nla_get_u8(inner); + break; + default: + WL_ERR(("ATTR unknown %d\n", + type)); + break; + } + } + j++; + } + break; + default: + WL_ERR(("Unknown type %d\n", type)); + break; + } + } + significant_params->nbssid = j; + + if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg), + DHD_PNO_SIGNIFICANT_SCAN_CFG_ID, + significant_params, flush) < 0) { + WL_ERR(("Could not set GSCAN significant cfg\n")); + err = -EINVAL; + goto exit; + } +exit: + kfree(significant_params); + return err; +} +#endif /* GSCAN_SUPPORT */ + +#ifdef RTT_SUPPORT +void +wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data) +{ + struct wireless_dev *wdev = (struct wireless_dev *)ctx; + struct wiphy *wiphy; + struct sk_buff *skb; + uint32 tot_len = NLMSG_DEFAULT_SIZE, entry_len = 0; + gfp_t kflags; + rtt_report_t *rtt_report = NULL; + rtt_result_t *rtt_result = NULL; + struct list_head *rtt_list; + wiphy = wdev->wiphy; + + WL_DBG(("In\n")); + /* Push the data to the skb */ + if (!rtt_data) { + WL_ERR(("rtt_data is NULL\n")); + goto exit; + } + rtt_list = (struct list_head *)rtt_data; + kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL; + /* Alloc the SKB for vendor_event */ +#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC) + skb = cfg80211_vendor_event_alloc(wiphy, NULL, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags); +#else + skb = cfg80211_vendor_event_alloc(wiphy, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags); +#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */ + if (!skb) { + WL_ERR(("skb alloc failed")); + goto exit; + } + /* fill in the rtt results on each entry */ + list_for_each_entry(rtt_result, rtt_list, list) { + entry_len = 0; + entry_len = sizeof(rtt_report_t); + rtt_report = kzalloc(entry_len, kflags); + if (!rtt_report) { + WL_ERR(("rtt_report alloc failed")); + kfree_skb(skb); + goto exit; + } + rtt_report->addr = rtt_result->peer_mac; + rtt_report->num_measurement = 1; /* ONE SHOT */ + rtt_report->status = rtt_result->err_code; + rtt_report->type = + (rtt_result->TOF_type == TOF_TYPE_ONE_WAY) ? RTT_ONE_WAY: RTT_TWO_WAY; + rtt_report->peer = rtt_result->target_info->peer; + rtt_report->channel = rtt_result->target_info->channel; + rtt_report->rssi = rtt_result->avg_rssi; + /* tx_rate */ + rtt_report->tx_rate = rtt_result->tx_rate; + /* RTT */ + rtt_report->rtt = rtt_result->meanrtt; + rtt_report->rtt_sd = rtt_result->sdrtt/10; + /* convert to centi meter */ + if (rtt_result->distance != 0xffffffff) + rtt_report->distance = (rtt_result->distance >> 2) * 25; + else /* invalid distance */ + rtt_report->distance = -1; + rtt_report->ts = rtt_result->ts; + nla_append(skb, entry_len, rtt_report); + kfree(rtt_report); + } + cfg80211_vendor_event(skb, kflags); +exit: + return; +} + +static int +wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) { + int err = 0, rem, rem1, rem2, type; + rtt_config_params_t rtt_param; + rtt_target_info_t* rtt_target = NULL; + const struct nlattr *iter, *iter1, *iter2; + int8 eabuf[ETHER_ADDR_STR_LEN]; + int8 chanbuf[CHANSPEC_STR_LEN]; + int32 feature_set = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + feature_set = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg)); + + WL_DBG(("In\n")); + err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt); + if (err < 0) { + WL_ERR(("failed to register rtt_noti_callback\n")); + goto exit; + } + memset(&rtt_param, 0, sizeof(rtt_param)); + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case RTT_ATTRIBUTE_TARGET_CNT: + rtt_param.rtt_target_cnt = nla_get_u8(iter); + if (rtt_param.rtt_target_cnt > RTT_MAX_TARGET_CNT) { + WL_ERR(("exceed max target count : %d\n", + rtt_param.rtt_target_cnt)); + err = BCME_RANGE; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_INFO: + rtt_target = rtt_param.target_info; + nla_for_each_nested(iter1, iter, rem1) { + nla_for_each_nested(iter2, iter1, rem2) { + type = nla_type(iter2); + switch (type) { + case RTT_ATTRIBUTE_TARGET_MAC: + memcpy(&rtt_target->addr, nla_data(iter2), + ETHER_ADDR_LEN); + break; + case RTT_ATTRIBUTE_TARGET_TYPE: + rtt_target->type = nla_get_u8(iter2); + if (!(feature_set & WIFI_FEATURE_D2D_RTT)) { + if (rtt_target->type == RTT_TWO_WAY || + rtt_target->type == RTT_INVALID) { + WL_ERR(("doesn't support RTT type" + " : %d\n", + rtt_target->type)); + err = -EINVAL; + goto exit; + } else if (rtt_target->type == RTT_AUTO) { + rtt_target->type = RTT_ONE_WAY; + } + } else if (rtt_target->type == RTT_INVALID) { + WL_ERR(("doesn't support RTT type" + " : %d\n", + rtt_target->type)); + err = -EINVAL; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_PEER: + rtt_target->peer = nla_get_u8(iter2); + if (rtt_target->peer != RTT_PEER_AP) { + WL_ERR(("doesn't support peer type : %d\n", + rtt_target->peer)); + err = -EINVAL; + goto exit; + } + break; + case RTT_ATTRIBUTE_TARGET_CHAN: + memcpy(&rtt_target->channel, nla_data(iter2), + sizeof(rtt_target->channel)); + break; + case RTT_ATTRIBUTE_TARGET_MODE: + rtt_target->continuous = nla_get_u8(iter2); + break; + case RTT_ATTRIBUTE_TARGET_INTERVAL: + rtt_target->interval = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT: + rtt_target->measure_cnt = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_PKT: + rtt_target->ftm_cnt = nla_get_u32(iter2); + break; + case RTT_ATTRIBUTE_TARGET_NUM_RETRY: + rtt_target->retry_cnt = nla_get_u32(iter2); + } + } + /* convert to chanspec value */ + rtt_target->chanspec = + dhd_rtt_convert_to_chspec(rtt_target->channel); + if (rtt_target->chanspec == 0) { + WL_ERR(("Channel is not valid \n")); + goto exit; + } + WL_INFORM(("Target addr %s, Channel : %s for RTT \n", + bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, + eabuf), + wf_chspec_ntoa(rtt_target->chanspec, chanbuf))); + rtt_target++; + } + break; + } + } + WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt)); + if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) { + WL_ERR(("Could not set RTT configuration\n")); + err = -EINVAL; + } +exit: + return err; +} + +static int +wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int err = 0, rem, type, target_cnt = 0; + int target_cnt_chk = 0; + const struct nlattr *iter; + struct ether_addr *mac_list = NULL, *mac_addr = NULL; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + + nla_for_each_attr(iter, data, len, rem) { + type = nla_type(iter); + switch (type) { + case RTT_ATTRIBUTE_TARGET_CNT: + if (mac_list != NULL) { + WL_ERR(("mac_list is not NULL\n")); + goto exit; + } + target_cnt = nla_get_u8(iter); + mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN, + GFP_KERNEL); + if (mac_list == NULL) { + WL_ERR(("failed to allocate mem for mac list\n")); + goto exit; + } + mac_addr = &mac_list[0]; + break; + case RTT_ATTRIBUTE_TARGET_MAC: + if (mac_addr) { + memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN); + target_cnt_chk++; + if (target_cnt_chk > target_cnt) { + WL_ERR(("over target count\n")); + goto exit; + } + break; + } else { + WL_ERR(("mac_list is NULL\n")); + goto exit; + } + } + } + if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) { + WL_ERR(("Could not cancel RTT configuration\n")); + err = -EINVAL; + goto exit; + } + +exit: + if (mac_list) { + kfree(mac_list); + } + return err; +} +static int +wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int err = 0; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + rtt_capabilities_t capability; + + err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability); + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + goto exit; + } + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + &capability, sizeof(capability)); + + if (unlikely(err)) { + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + } +exit: + return err; +} + +#endif /* RTT_SUPPORT */ + +static int +wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int err = 0; + int data_len = 0; + + WL_INFORM(("%s: Enter \n", __func__)); + + bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN); + + if (strncmp((char *)data, BRCM_VENDOR_SCMD_CAPA, strlen(BRCM_VENDOR_SCMD_CAPA)) == 0) { + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cap", NULL, 0, + cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + data_len = strlen(cfg->ioctl_buf); + cfg->ioctl_buf[data_len] = '\0'; + } + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + cfg->ioctl_buf, data_len+1); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + else + WL_INFORM(("Vendor Command reply sent successfully!\n")); + + return err; +} + +#ifdef LINKSTAT_SUPPORT +#define NUM_RATE 32 +#define NUM_PEER 1 +#define NUM_CHAN 11 +#define HEADER_SIZE sizeof(ver_len) +static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + static char iovar_buf[WLC_IOCTL_MAXLEN]; + struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); + int err = 0, i; + wifi_iface_stat *iface; + wifi_radio_stat *radio; + wl_wme_cnt_t *wl_wme_cnt; + wl_cnt_v_le10_mcst_t *macstat_cnt; + wl_cnt_wlc_t *wlc_cnt; + scb_val_t scbval; + char *output; + + WL_INFORM(("%s: Enter \n", __func__)); + RETURN_EIO_IF_NOT_UP(cfg); + + bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN); + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + + output = cfg->ioctl_buf; + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (err != BCME_OK && err != BCME_UNSUPPORTED) { + WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat))); + return err; + } + radio = (wifi_radio_stat *)iovar_buf; + radio->num_channels = NUM_CHAN; + memcpy(output, iovar_buf+HEADER_SIZE, sizeof(wifi_radio_stat)-HEADER_SIZE); + + output += (sizeof(wifi_radio_stat) - HEADER_SIZE); + output += (NUM_CHAN*sizeof(wifi_channel_stat)); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d)\n", err)); + return err; + } + wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf; + iface = (wifi_iface_stat *)output; + + iface->ac[WIFI_AC_VO].ac = WIFI_AC_VO; + iface->ac[WIFI_AC_VO].tx_mpdu = wl_wme_cnt->tx[AC_VO].packets; + iface->ac[WIFI_AC_VO].rx_mpdu = wl_wme_cnt->rx[AC_VO].packets; + iface->ac[WIFI_AC_VO].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VO].packets; + + iface->ac[WIFI_AC_VI].ac = WIFI_AC_VI; + iface->ac[WIFI_AC_VI].tx_mpdu = wl_wme_cnt->tx[AC_VI].packets; + iface->ac[WIFI_AC_VI].rx_mpdu = wl_wme_cnt->rx[AC_VI].packets; + iface->ac[WIFI_AC_VI].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VI].packets; + + iface->ac[WIFI_AC_BE].ac = WIFI_AC_BE; + iface->ac[WIFI_AC_BE].tx_mpdu = wl_wme_cnt->tx[AC_BE].packets; + iface->ac[WIFI_AC_BE].rx_mpdu = wl_wme_cnt->rx[AC_BE].packets; + iface->ac[WIFI_AC_BE].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BE].packets; + + iface->ac[WIFI_AC_BK].ac = WIFI_AC_BK; + iface->ac[WIFI_AC_BK].tx_mpdu = wl_wme_cnt->tx[AC_BK].packets; + iface->ac[WIFI_AC_BK].rx_mpdu = wl_wme_cnt->rx[AC_BK].packets; + iface->ac[WIFI_AC_BK].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BK].packets; + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (unlikely(err)) { + WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_wlc_t))); + return err; + } + + /* Translate traditional (ver <= 10) counters struct to new xtlv type struct */ + err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0); + if (err != BCME_OK) { + WL_ERR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", __FUNCTION__, err)); + return err; + } + + if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) { + WL_ERR(("%s wlc_cnt NULL!\n", __FUNCTION__)); + return BCME_ERROR; + } + + iface->ac[WIFI_AC_BE].retries = wlc_cnt->txretry; + + if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data, + ((wl_cnt_info_t *)iovar_buf)->datalen, + WL_CNT_XTLV_CNTV_LE10_UCODE, NULL, + BCM_XTLV_OPTION_ALIGN32)) == NULL) { + macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data, + ((wl_cnt_info_t *)iovar_buf)->datalen, + WL_CNT_XTLV_LT40_UCODE_V1, NULL, + BCM_XTLV_OPTION_ALIGN32); + } + + if (macstat_cnt == NULL) { + printf("wlmTxGetAckedPackets: macstat_cnt NULL!\n"); + return FALSE; + } + + iface->beacon_rx = macstat_cnt->rxbeaconmbss; + + err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &scbval); + if (unlikely(err)) { + WL_ERR(("get_rssi error (%d)\n", err)); + return err; + } + iface->rssi_mgmt = scbval.val; + + iface->num_peers = NUM_PEER; + iface->peer_info->num_rate = NUM_RATE; + + bzero(iovar_buf, WLC_IOCTL_MAXLEN); + output = (char *)iface + sizeof(wifi_iface_stat) + NUM_PEER*sizeof(wifi_peer_info); + + err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0, + iovar_buf, WLC_IOCTL_MAXLEN, NULL); + if (err != BCME_OK && err != BCME_UNSUPPORTED) { + WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat))); + return err; + } + for (i = 0; i < NUM_RATE; i++) + memcpy(output, iovar_buf+HEADER_SIZE+i*sizeof(wifi_rate_stat), + sizeof(wifi_rate_stat)-HEADER_SIZE); + + err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg), + cfg->ioctl_buf, + sizeof(wifi_radio_stat)-HEADER_SIZE + + NUM_CHAN*sizeof(wifi_channel_stat) + + sizeof(wifi_iface_stat)+NUM_PEER*sizeof(wifi_peer_info) + + NUM_RATE*(sizeof(wifi_rate_stat)-HEADER_SIZE)); + if (unlikely(err)) + WL_ERR(("Vendor Command reply failed ret:%d \n", err)); + + return err; +} +#endif /* LINKSTAT_SUPPORT */ + +static const struct wiphy_vendor_command wl_vendor_cmds [] = { + { + { + .vendor_id = OUI_BRCM, + .subcmd = BRCM_VENDOR_SCMD_BCM_STR + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_priv_bcm_handler + }, +#ifdef GSCAN_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_CAPABILITIES + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_capabilities + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_scan_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_batch_scan_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_initiate_gscan + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_enable_full_scan_result + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_HOTLIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_hotlist_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_significant_change_cfg + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_batch_results + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_gscan_get_channel_list + }, +#endif /* GSCAN_SUPPORT */ +#ifdef RTT_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_SET_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_set_config + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_CANCEL_CONFIG + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_cancel_config + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = RTT_SUBCMD_GETCAPABILITY + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_rtt_get_capability + }, +#endif /* RTT_SUPPORT */ + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_get_feature_set + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_get_feature_set_matrix + }, + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_PNO_RANDOM_MAC_OUI + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_pno_mac_oui + }, +#ifdef CUSTOM_FORCE_NODFS_FLAG + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = ANDR_WIFI_NODFS_CHANNELS + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_set_nodfs_flag + + }, +#endif /* CUSTOM_FORCE_NODFS_FLAG */ +#ifdef LINKSTAT_SUPPORT + { + { + .vendor_id = OUI_GOOGLE, + .subcmd = LSTATS_SUBCMD_GET_INFO + }, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wl_cfgvendor_lstats_get_info + }, +#endif /* LINKSTAT_SUPPORT */ +}; + +static const struct nl80211_vendor_cmd_info wl_vendor_events [] = { + { OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC }, + { OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR }, +#ifdef GSCAN_SUPPORT + { OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT }, + { OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT }, +#endif /* GSCAN_SUPPORT */ +#ifdef RTT_SUPPORT + { OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT }, +#endif /* RTT_SUPPORT */ +#ifdef GSCAN_SUPPORT + { OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT }, + { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT }, +#endif /* GSCAN_SUPPORT */ + { OUI_BRCM, BRCM_VENDOR_EVENT_IDSUP_STATUS } +}; + +int wl_cfgvendor_attach(struct wiphy *wiphy) +{ + + WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n", + NL80211_CMD_VENDOR)); + + wiphy->vendor_commands = wl_vendor_cmds; + wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds); + wiphy->vendor_events = wl_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(wl_vendor_events); + + return 0; +} + +int wl_cfgvendor_detach(struct wiphy *wiphy) +{ + WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n")); + + wiphy->vendor_commands = NULL; + wiphy->vendor_events = NULL; + wiphy->n_vendor_commands = 0; + wiphy->n_vendor_events = 0; + + return 0; +} +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgvendor.h b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h new file mode 100644 index 000000000000..0d9b4842931d --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_cfgvendor.h @@ -0,0 +1,267 @@ +/* + * Linux cfg80211 Vendor Extension Code + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_cfgvendor.h 605796 2015-12-11 13:45:36Z $ + */ + + +#ifndef _wl_cfgvendor_h_ +#define _wl_cfgvendor_h_ + +#define OUI_BRCM 0x001018 +#define OUI_GOOGLE 0x001A11 +#define BRCM_VENDOR_SUBCMD_PRIV_STR 1 +#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4) +#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN +#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN +#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN) + +#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN +#define SCAN_INDEX_HDR_LEN (NLA_HDRLEN) +#define SCAN_ID_HDR_LEN ATTRIBUTE_U32_LEN +#define SCAN_FLAGS_HDR_LEN ATTRIBUTE_U32_LEN +#define GSCAN_NUM_RESULTS_HDR_LEN ATTRIBUTE_U32_LEN +#define GSCAN_RESULTS_HDR_LEN (NLA_HDRLEN) +#define GSCAN_BATCH_RESULT_HDR_LEN (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \ + SCAN_FLAGS_HDR_LEN + \ + GSCAN_NUM_RESULTS_HDR_LEN + \ + GSCAN_RESULTS_HDR_LEN) + +#define VENDOR_REPLY_OVERHEAD (VENDOR_ID_OVERHEAD + \ + VENDOR_SUBCMD_OVERHEAD + \ + VENDOR_DATA_OVERHEAD) + +#define GSCAN_ATTR_SET1 10 +#define GSCAN_ATTR_SET2 20 +#define GSCAN_ATTR_SET3 30 +#define GSCAN_ATTR_SET4 40 +#define GSCAN_ATTR_SET5 50 +#define GSCAN_ATTR_SET6 60 + +typedef enum { + /* don't use 0 as a valid subcommand */ + VENDOR_NL80211_SUBCMD_UNSPECIFIED, + + /* define all vendor startup commands between 0x0 and 0x0FFF */ + VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001, + VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF, + + /* define all GScan related commands between 0x1000 and 0x10FF */ + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000, + ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF, + + /* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF, + + /* define all RTT related commands between 0x1100 and 0x11FF */ + ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100, + ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF, + + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200, + ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF, + + ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300, + ANDROID_NL80211_SUBCMD_TDLS_RANGE_END = 0x13FF, + /* This is reserved for future usage */ + +} ANDROID_VENDOR_SUB_COMMAND; + +enum andr_vendor_subcmd { + GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START, + GSCAN_SUBCMD_SET_CONFIG, + GSCAN_SUBCMD_SET_SCAN_CONFIG, + GSCAN_SUBCMD_ENABLE_GSCAN, + GSCAN_SUBCMD_GET_SCAN_RESULTS, + GSCAN_SUBCMD_SCAN_RESULTS, + GSCAN_SUBCMD_SET_HOTLIST, + GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG, + GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS, + GSCAN_SUBCMD_GET_CHANNEL_LIST, + /* ANDR_WIFI_XXX although not related to gscan are defined here */ + ANDR_WIFI_SUBCMD_GET_FEATURE_SET, + ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX, + ANDR_WIFI_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_NODFS_CHANNELS, + RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START, + RTT_SUBCMD_CANCEL_CONFIG, + RTT_SUBCMD_GETCAPABILITY, + + LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START, + /* Add more sub commands here */ + VENDOR_SUBCMD_MAX +}; + +enum gscan_attributes { + GSCAN_ATTRIBUTE_NUM_BUCKETS = GSCAN_ATTR_SET1, + GSCAN_ATTRIBUTE_BASE_PERIOD, + GSCAN_ATTRIBUTE_BUCKETS_BAND, + GSCAN_ATTRIBUTE_BUCKET_ID, + GSCAN_ATTRIBUTE_BUCKET_PERIOD, + GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS, + GSCAN_ATTRIBUTE_BUCKET_CHANNELS, + GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN, + GSCAN_ATTRIBUTE_REPORT_THRESHOLD, + GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE, + GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND, + + GSCAN_ATTRIBUTE_ENABLE_FEATURE = GSCAN_ATTR_SET2, + GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, + GSCAN_ATTRIBUTE_FLUSH_FEATURE, + GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS, + GSCAN_ATTRIBUTE_REPORT_EVENTS, + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_NUM_OF_RESULTS = GSCAN_ATTR_SET3, + GSCAN_ATTRIBUTE_FLUSH_RESULTS, + GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */ + GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */ + GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */ + GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */ + GSCAN_ATTRIBUTE_NUM_CHANNELS, + GSCAN_ATTRIBUTE_CHANNEL_LIST, + + /* remaining reserved for additional attributes */ + + GSCAN_ATTRIBUTE_SSID = GSCAN_ATTR_SET4, + GSCAN_ATTRIBUTE_BSSID, + GSCAN_ATTRIBUTE_CHANNEL, + GSCAN_ATTRIBUTE_RSSI, + GSCAN_ATTRIBUTE_TIMESTAMP, + GSCAN_ATTRIBUTE_RTT, + GSCAN_ATTRIBUTE_RTTSD, + + /* remaining reserved for additional attributes */ + + GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = GSCAN_ATTR_SET5, + GSCAN_ATTRIBUTE_RSSI_LOW, + GSCAN_ATTRIBUTE_RSSI_HIGH, + GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM, + GSCAN_ATTRIBUTE_HOTLIST_FLUSH, + + /* remaining reserved for additional attributes */ + GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6, + GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE, + GSCAN_ATTRIBUTE_MIN_BREACHING, + GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS, + GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH, + GSCAN_ATTRIBUTE_MAX +}; + +enum gscan_bucket_attributes { + GSCAN_ATTRIBUTE_CH_BUCKET_1, + GSCAN_ATTRIBUTE_CH_BUCKET_2, + GSCAN_ATTRIBUTE_CH_BUCKET_3, + GSCAN_ATTRIBUTE_CH_BUCKET_4, + GSCAN_ATTRIBUTE_CH_BUCKET_5, + GSCAN_ATTRIBUTE_CH_BUCKET_6, + GSCAN_ATTRIBUTE_CH_BUCKET_7 +}; + +enum gscan_ch_attributes { + GSCAN_ATTRIBUTE_CH_ID_1, + GSCAN_ATTRIBUTE_CH_ID_2, + GSCAN_ATTRIBUTE_CH_ID_3, + GSCAN_ATTRIBUTE_CH_ID_4, + GSCAN_ATTRIBUTE_CH_ID_5, + GSCAN_ATTRIBUTE_CH_ID_6, + GSCAN_ATTRIBUTE_CH_ID_7 +}; + +enum rtt_attributes { + RTT_ATTRIBUTE_TARGET_CNT, + RTT_ATTRIBUTE_TARGET_INFO, + RTT_ATTRIBUTE_TARGET_MAC, + RTT_ATTRIBUTE_TARGET_TYPE, + RTT_ATTRIBUTE_TARGET_PEER, + RTT_ATTRIBUTE_TARGET_CHAN, + RTT_ATTRIBUTE_TARGET_MODE, + RTT_ATTRIBUTE_TARGET_INTERVAL, + RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT, + RTT_ATTRIBUTE_TARGET_NUM_PKT, + RTT_ATTRIBUTE_TARGET_NUM_RETRY +}; + +typedef enum wl_vendor_event { + BRCM_VENDOR_EVENT_UNSPEC, + BRCM_VENDOR_EVENT_PRIV_STR, + GOOGLE_GSCAN_SIGNIFICANT_EVENT, + GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT, + GOOGLE_GSCAN_BATCH_SCAN_EVENT, + GOOGLE_SCAN_FULL_RESULTS_EVENT, + GOOGLE_RTT_COMPLETE_EVENT, + GOOGLE_SCAN_COMPLETE_EVENT, + GOOGLE_GSCAN_GEOFENCE_LOST_EVENT, + BRCM_VENDOR_EVENT_IDSUP_STATUS +} wl_vendor_event_t; + +enum andr_wifi_attr { + ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_FEATURE_SET, + ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI, + ANDR_WIFI_ATTRIBUTE_NODFS_SET +}; + +typedef enum wl_vendor_gscan_attribute { + ATTR_START_GSCAN, + ATTR_STOP_GSCAN, + ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */ + ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */ + ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */ + ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */ + ATTR_GET_GSCAN_CAPABILITIES_ID, + /* Add more sub commands here */ + ATTR_GSCAN_MAX +} wl_vendor_gscan_attribute_t; + +typedef enum gscan_batch_attribute { + ATTR_GSCAN_BATCH_BESTN, + ATTR_GSCAN_BATCH_MSCAN, + ATTR_GSCAN_BATCH_BUFFER_THRESHOLD +} gscan_batch_attribute_t; + +typedef enum gscan_geofence_attribute { + ATTR_GSCAN_NUM_HOTLIST_BSSID, + ATTR_GSCAN_HOTLIST_BSSID +} gscan_geofence_attribute_t; + +typedef enum gscan_complete_event { + WIFI_SCAN_BUFFER_FULL, + WIFI_SCAN_COMPLETE +} gscan_complete_event_t; + +#if defined(WL_VENDOR_EXT_SUPPORT) || defined(CONFIG_BCMDHD_VENDOR_EXT) +extern int wl_cfgvendor_attach(struct wiphy *wiphy); +extern int wl_cfgvendor_detach(struct wiphy *wiphy); +extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy, + struct net_device *dev, int event_id, const void *data, int len); +extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy, + struct net_device *dev, void *data, int len, wl_vendor_event_t event); +#else +static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; } +static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; } +#endif /* defined(WL_VENDOR_EXT_SUPPORT) */ + +#endif /* _wl_cfgvendor_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_dbg.h b/drivers/net/wireless/bcmdhd/wl_dbg.h new file mode 100644 index 000000000000..291911611daa --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_dbg.h @@ -0,0 +1,211 @@ +/* + * Minimal debug/trace/assert driver definitions for + * Broadcom 802.11 Networking Adapter. + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_dbg.h 519338 2014-12-05 21:23:30Z $ + */ + + +#ifndef _wl_dbg_h_ +#define _wl_dbg_h_ + +/* wl_msg_level is a bit vector with defs in wlioctl.h */ +extern uint32 wl_msg_level; +extern uint32 wl_msg_level2; + +#define WL_TIMESTAMP() + +#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0) + +#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN) +#define _WL_SRSCAN(fmt, ...) EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__) +#define WL_SRSCAN(args) _WL_SRSCAN args +#else +#define WL_SRSCAN(args) +#endif + +#if defined(BCMCONDITIONAL_LOGGING) + +/* Ideally this should be some include file that vendors can include to conditionalize logging */ + +/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when + * BCMDBG is defined. + */ +#define DBGONLY(x) + +/* To disable a message completely ... until you need it again */ +#define WL_NONE(args) +#define WL_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args);} while (0) +#define WL_TRACE(args) +#define WL_PRHDRS_MSG(args) +#define WL_PRHDRS(i, p, f, t, r, l) +#define WL_PRPKT(m, b, n) +#define WL_INFORM(args) +#define WL_TMP(args) +#define WL_OID(args) +#define WL_RATE(args) do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0) +#define WL_ASSOC(args) do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args);} while (0) +#define WL_PRUSR(m, b, n) +#define WL_PS(args) do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0) + +#define WL_PORT(args) +#define WL_DUAL(args) +#define WL_REGULATORY(args) do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args);} while (0) + +#define WL_MPC(args) +#define WL_APSTA(args) +#define WL_APSTA_BCN(args) +#define WL_APSTA_TX(args) +#define WL_APSTA_TSF(args) +#define WL_APSTA_BSSID(args) +#define WL_BA(args) +#define WL_MBSS(args) +#define WL_PROTO(args) + +#define WL_CAC(args) do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0) +#define WL_AMSDU(args) +#define WL_AMPDU(args) +#define WL_FFPLD(args) +#define WL_MCHAN(args) + +#define WL_DFS(args) +#define WL_WOWL(args) +#define WL_DPT(args) +#define WL_ASSOC_OR_DPT(args) +#define WL_SCAN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0) +#define WL_COEX(args) +#define WL_RTDC(w, s, i, j) +#define WL_RTDC2(w, s, i, j) +#define WL_CHANINT(args) +#define WL_BTA(args) +#define WL_P2P(args) +#define WL_ITFR(args) +#define WL_TDLS(args) +#define WL_MCNX(args) +#define WL_PROT(args) +#define WL_PSTA(args) +#define WL_WFDS(m, b, n) +#define WL_TRF_MGMT(args) +#define WL_L2FILTER(args) +#define WL_MQ(args) +#define WL_TXBF(args) +#define WL_P2PO(args) +#define WL_ROAM(args) +#define WL_WNM(args) + + +#define WL_AMPDU_UPDN(args) +#define WL_AMPDU_RX(args) +#define WL_AMPDU_ERR(args) +#define WL_AMPDU_TX(args) +#define WL_AMPDU_CTL(args) +#define WL_AMPDU_HW(args) +#define WL_AMPDU_HWTXS(args) +#define WL_AMPDU_HWDBG(args) +#define WL_AMPDU_STAT(args) +#define WL_AMPDU_ERR_ON() 0 +#define WL_AMPDU_HW_ON() 0 +#define WL_AMPDU_HWTXS_ON() 0 + +#define WL_APSTA_UPDN(args) +#define WL_APSTA_RX(args) +#define WL_WSEC(args) +#define WL_WSEC_DUMP(args) +#define WL_PCIE(args) +#define WL_TSLOG(w, s, i, j) +#define WL_FBT(args) + +#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL) +#define WL_TRACE_ON() 0 +#define WL_PRHDRS_ON() 0 +#define WL_PRPKT_ON() 0 +#define WL_INFORM_ON() 0 +#define WL_TMP_ON() 0 +#define WL_OID_ON() 0 +#define WL_RATE_ON() (wl_msg_level & WL_RATE_VAL) +#define WL_ASSOC_ON() (wl_msg_level & WL_ASSOC_VAL) +#define WL_PRUSR_ON() 0 +#define WL_PS_ON() (wl_msg_level & WL_PS_VAL) +#define WL_PORT_ON() 0 +#define WL_WSEC_ON() 0 +#define WL_WSEC_DUMP_ON() 0 +#define WL_MPC_ON() 0 +#define WL_REGULATORY_ON() (wl_msg_level & WL_REGULATORY_VAL) +#define WL_APSTA_ON() 0 +#define WL_DFS_ON() 0 +#define WL_MBSS_ON() 0 +#define WL_CAC_ON() (wl_msg_level & WL_CAC_VAL) +#define WL_AMPDU_ON() 0 +#define WL_DPT_ON() 0 +#define WL_WOWL_ON() 0 +#define WL_SCAN_ON() (wl_msg_level2 & WL_SCAN_VAL) +#define WL_BTA_ON() 0 +#define WL_P2P_ON() 0 +#define WL_ITFR_ON() 0 +#define WL_MCHAN_ON() 0 +#define WL_TDLS_ON() 0 +#define WL_MCNX_ON() 0 +#define WL_PROT_ON() 0 +#define WL_PSTA_ON() 0 +#define WL_TRF_MGMT_ON() 0 +#define WL_LPC_ON() 0 +#define WL_L2FILTER_ON() 0 +#define WL_TXBF_ON() 0 +#define WL_P2PO_ON() 0 +#define WL_TSLOG_ON() 0 +#define WL_WNM_ON() 0 +#define WL_PCIE_ON() 0 + +#else /* !BCMDBG */ + +/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when + * BCMDBG is defined. + */ +#define DBGONLY(x) + +/* To disable a message completely ... until you need it again */ +#define WL_NONE(args) + +#define WL_ERROR(args) +#define WL_TRACE(args) +#define WL_APSTA_UPDN(args) +#define WL_APSTA_RX(args) +#ifdef WLMSG_WSEC +#define WL_WSEC(args) WL_PRINT(args) +#define WL_WSEC_DUMP(args) WL_PRINT(args) +#else +#define WL_WSEC(args) +#define WL_WSEC_DUMP(args) +#endif +#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0) +#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL) +#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0) +#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL) +#endif + +extern uint32 wl_msg_level; +extern uint32 wl_msg_level2; +#endif /* _wl_dbg_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_iw.c b/drivers/net/wireless/bcmdhd/wl_iw.c new file mode 100644 index 000000000000..92d9aa1614a2 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_iw.c @@ -0,0 +1,3811 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_iw.c 591286 2015-10-07 11:59:26Z $ + */ + +#if defined(USE_IW) +#define LINUX_PORT + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +typedef const struct si_pub si_t; + +#include +#include + + +/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */ +#ifndef IW_AUTH_KEY_MGMT_FT_802_1X +#define IW_AUTH_KEY_MGMT_FT_802_1X 0x04 +#endif + +#ifndef IW_AUTH_KEY_MGMT_FT_PSK +#define IW_AUTH_KEY_MGMT_FT_PSK 0x08 +#endif + +#ifndef IW_ENC_CAPA_FW_ROAM_ENABLE +#define IW_ENC_CAPA_FW_ROAM_ENABLE 0x00000020 +#endif + + +/* FC9: wireless.h 2.6.25-14.fc9.i686 is missing these, even though WIRELESS_EXT is set to latest + * version 22. + */ +#ifndef IW_ENCODE_ALG_PMK +#define IW_ENCODE_ALG_PMK 4 +#endif +#ifndef IW_ENC_CAPA_4WAY_HANDSHAKE +#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010 +#endif +/* End FC9. */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) +#include +#endif +#if defined(SOFTAP) +struct net_device *ap_net_dev = NULL; +tsk_ctl_t ap_eth_ctl; /* apsta AP netdev waiter thread */ +#endif /* SOFTAP */ + +extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status, + uint32 reason, char* stringBuf, uint buflen); + +uint wl_msg_level = WL_ERROR_VAL; + +#define MAX_WLIW_IOCTL_LEN 1024 + +/* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */ +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); +extern int dhd_wait_pend8021x(struct net_device *dev); + +#if WIRELESS_EXT < 19 +#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) +#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST) +#endif /* WIRELESS_EXT < 19 */ + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) +#define DAEMONIZE(a) do { \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); \ + } while (0) +#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))) +#define DAEMONIZE(a) daemonize(a); \ + allow_signal(SIGKILL); \ + allow_signal(SIGTERM); +#else /* Linux 2.4 (w/o preemption patch) */ +#define RAISE_RX_SOFTIRQ() \ + cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ) +#define DAEMONIZE(a) daemonize(); \ + do { if (a) \ + strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \ + } while (0); +#endif /* LINUX_VERSION_CODE */ + +#define ISCAN_STATE_IDLE 0 +#define ISCAN_STATE_SCANING 1 + +/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */ +#define WLC_IW_ISCAN_MAXLEN 2048 +typedef struct iscan_buf { + struct iscan_buf * next; + char iscan_buf[WLC_IW_ISCAN_MAXLEN]; +} iscan_buf_t; + +typedef struct iscan_info { + struct net_device *dev; + struct timer_list timer; + uint32 timer_ms; + uint32 timer_on; + int iscan_state; + iscan_buf_t * list_hdr; + iscan_buf_t * list_cur; + + /* Thread to work on iscan */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + struct task_struct *kthread; +#endif + long sysioc_pid; + struct semaphore sysioc_sem; + struct completion sysioc_exited; + + + char ioctlbuf[WLC_IOCTL_SMLEN]; +} iscan_info_t; +iscan_info_t *g_iscan = NULL; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +static void wl_iw_timerfunc(struct timer_list *t); +#else +static void wl_iw_timerfunc(ulong data); +#endif +static void wl_iw_set_event_mask(struct net_device *dev); +static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action); + +/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */ +typedef struct priv_link { + wl_iw_t *wliw; +} priv_link_t; + +/* dev to priv_link */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) +#define WL_DEV_LINK(dev) (priv_link_t*)(dev->priv) +#else +#define WL_DEV_LINK(dev) (priv_link_t*)netdev_priv(dev) +#endif + +/* dev to wl_iw_t */ +#define IW_DEV_IF(dev) ((wl_iw_t*)(WL_DEV_LINK(dev))->wliw) + +static void swap_key_from_BE( + wl_wsec_key_t *key +) +{ + key->index = htod32(key->index); + key->len = htod32(key->len); + key->algo = htod32(key->algo); + key->flags = htod32(key->flags); + key->rxiv.hi = htod32(key->rxiv.hi); + key->rxiv.lo = htod16(key->rxiv.lo); + key->iv_initialized = htod32(key->iv_initialized); +} + +static void swap_key_to_BE( + wl_wsec_key_t *key +) +{ + key->index = dtoh32(key->index); + key->len = dtoh32(key->len); + key->algo = dtoh32(key->algo); + key->flags = dtoh32(key->flags); + key->rxiv.hi = dtoh32(key->rxiv.hi); + key->rxiv.lo = dtoh16(key->rxiv.lo); + key->iv_initialized = dtoh32(key->iv_initialized); +} + +static int +dev_wlc_ioctl( + struct net_device *dev, + int cmd, + void *arg, + int len +) +{ + struct ifreq ifr; + wl_ioctl_t ioc; + mm_segment_t fs; + int ret; + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + + strncpy(ifr.ifr_name, dev->name, sizeof(ifr.ifr_name)); + ifr.ifr_name[sizeof(ifr.ifr_name) - 1] = '\0'; + ifr.ifr_data = (caddr_t) &ioc; + + fs = get_fs(); + set_fs(get_ds()); +#if defined(WL_USE_NETDEV_OPS) + ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#else + ret = dev->do_ioctl(dev, &ifr, SIOCDEVPRIVATE); +#endif + set_fs(fs); + + return ret; +} + +/* +set named driver variable to int value and return error indication +calling example: dev_wlc_intvar_set(dev, "arate", rate) +*/ + +static int +dev_wlc_intvar_set( + struct net_device *dev, + char *name, + int val) +{ + char buf[WLC_IOCTL_SMLEN]; + uint len; + + val = htod32(val); + len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf)); + ASSERT(len); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len)); +} + +static int +dev_iw_iovar_setbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + BCM_REFERENCE(iolen); + + return (dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen)); +} + +static int +dev_iw_iovar_getbuf( + struct net_device *dev, + char *iovar, + void *param, + int paramlen, + void *bufptr, + int buflen) +{ + int iolen; + + iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen); + ASSERT(iolen); + BCM_REFERENCE(iolen); + + return (dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen)); +} + +#if WIRELESS_EXT > 17 +static int +dev_wlc_bufvar_set( + struct net_device *dev, + char *name, + char *buf, int len) +{ + char *ioctlbuf; + uint buflen; + int error; + + ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); + if (!ioctlbuf) + return -ENOMEM; + + buflen = bcm_mkiovar(name, buf, len, ioctlbuf, MAX_WLIW_IOCTL_LEN); + ASSERT(buflen); + error = dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen); + + kfree(ioctlbuf); + return error; +} +#endif /* WIRELESS_EXT > 17 */ + +/* +get named driver variable to int value and return error indication +calling example: dev_wlc_bufvar_get(dev, "arate", &rate) +*/ + +static int +dev_wlc_bufvar_get( + struct net_device *dev, + char *name, + char *buf, int buflen) +{ + char *ioctlbuf; + int error; + + uint len; + + ioctlbuf = kmalloc(MAX_WLIW_IOCTL_LEN, GFP_KERNEL); + if (!ioctlbuf) + return -ENOMEM; + len = bcm_mkiovar(name, NULL, 0, ioctlbuf, MAX_WLIW_IOCTL_LEN); + ASSERT(len); + BCM_REFERENCE(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf, MAX_WLIW_IOCTL_LEN); + if (!error) + bcopy(ioctlbuf, buf, buflen); + + kfree(ioctlbuf); + return (error); +} + +/* +get named driver variable to int value and return error indication +calling example: dev_wlc_intvar_get(dev, "arate", &rate) +*/ + +static int +dev_wlc_intvar_get( + struct net_device *dev, + char *name, + int *retval) +{ + union { + char buf[WLC_IOCTL_SMLEN]; + int val; + } var; + int error; + + uint len; + uint data_null; + + len = bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); + ASSERT(len); + error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len); + + *retval = dtoh32(var.val); + + return (error); +} + +/* Maintain backward compatibility */ +#if WIRELESS_EXT < 13 +struct iw_request_info +{ + __u16 cmd; /* Wireless Extension command */ + __u16 flags; /* More to come ;-) */ +}; + +typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info, + void *wrqu, char *extra); +#endif /* WIRELESS_EXT < 13 */ + +#if WIRELESS_EXT > 12 +static int +wl_iw_set_leddc( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int dc = *(int *)extra; + int error; + + error = dev_wlc_intvar_set(dev, "leddc", dc); + return error; +} + +static int +wl_iw_set_vlanmode( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int mode = *(int *)extra; + int error; + + mode = htod32(mode); + error = dev_wlc_intvar_set(dev, "vlan_mode", mode); + return error; +} + +static int +wl_iw_set_pm( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + int pm = *(int *)extra; + int error; + + pm = htod32(pm); + error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)); + return error; +} + +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ +#endif /* WIRELESS_EXT > 12 */ + +int +wl_iw_send_priv_event( + struct net_device *dev, + char *flag +) +{ + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd; + + cmd = IWEVCUSTOM; + memset(&wrqu, 0, sizeof(wrqu)); + if (strlen(flag) > sizeof(extra)) + return -1; + + strncpy(extra, flag, sizeof(extra)); + extra[sizeof(extra) - 1] = '\0'; + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra)); + + return 0; +} + +static int +wl_iw_config_commit( + struct net_device *dev, + struct iw_request_info *info, + void *zwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + struct sockaddr bssid; + + WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) + return error; + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + if (!ssid.SSID_len) + return 0; + + bzero(&bssid, sizeof(struct sockaddr)); + if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) { + WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error)); + return error; + } + + return 0; +} + +static int +wl_iw_get_name( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *cwrq, + char *extra +) +{ + int phytype, err; + uint band[3]; + char cap[5]; + + WL_TRACE(("%s: SIOCGIWNAME\n", dev->name)); + + cap[0] = 0; + if ((err = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))) < 0) + goto done; + if ((err = dev_wlc_ioctl(dev, WLC_GET_BANDLIST, band, sizeof(band))) < 0) + goto done; + + band[0] = dtoh32(band[0]); + switch (phytype) { + case WLC_PHY_TYPE_A: + strncpy(cap, "a", sizeof(cap)); + break; + case WLC_PHY_TYPE_B: + strncpy(cap, "b", sizeof(cap)); + break; + case WLC_PHY_TYPE_G: + if (band[0] >= 2) + strncpy(cap, "abg", sizeof(cap)); + else + strncpy(cap, "bg", sizeof(cap)); + break; + case WLC_PHY_TYPE_N: + if (band[0] >= 2) + strncpy(cap, "abgn", sizeof(cap)); + else + strncpy(cap, "bgn", sizeof(cap)); + break; + } +done: + (void)snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap); + + return 0; +} + +static int +wl_iw_set_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + int error, chan; + uint sf = 0; + + WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name)); + + /* Setting by channel number */ + if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) { + chan = fwrq->m; + } + + /* Setting by frequency */ + else { + /* Convert to MHz as best we can */ + if (fwrq->e >= 6) { + fwrq->e -= 6; + while (fwrq->e--) + fwrq->m *= 10; + } else if (fwrq->e < 6) { + while (fwrq->e++ < 6) + fwrq->m /= 10; + } + /* handle 4.9GHz frequencies as Japan 4 GHz based channelization */ + if (fwrq->m > 4000 && fwrq->m < 5000) + sf = WF_CHAN_FACTOR_4_G; /* start factor for 4 GHz */ + + chan = wf_mhz2channel(fwrq->m, sf); + } + chan = htod32(chan); + if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) + return error; + + /* -EINPROGRESS: Call commit handler */ + return -EINPROGRESS; +} + +static int +wl_iw_get_freq( + struct net_device *dev, + struct iw_request_info *info, + struct iw_freq *fwrq, + char *extra +) +{ + channel_info_t ci; + int error; + + WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + + /* Return radio channel in channel form */ + fwrq->m = dtoh32(ci.hw_channel); + fwrq->e = dtoh32(0); + return 0; +} + +static int +wl_iw_set_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int infra = 0, ap = 0, error = 0; + + WL_TRACE(("%s: SIOCSIWMODE\n", dev->name)); + + switch (*uwrq) { + case IW_MODE_MASTER: + infra = ap = 1; + break; + case IW_MODE_ADHOC: + case IW_MODE_AUTO: + break; + case IW_MODE_INFRA: + infra = 1; + break; + default: + return -EINVAL; + } + infra = htod32(infra); + ap = htod32(ap); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap)))) + return error; + + /* -EINPROGRESS: Call commit handler */ + return -EINPROGRESS; +} + +static int +wl_iw_get_mode( + struct net_device *dev, + struct iw_request_info *info, + __u32 *uwrq, + char *extra +) +{ + int error, infra = 0, ap = 0; + + WL_TRACE(("%s: SIOCGIWMODE\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap)))) + return error; + + infra = dtoh32(infra); + ap = dtoh32(ap); + *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC; + + return 0; +} + +static int +wl_iw_get_range( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + struct iw_range *range = (struct iw_range *) extra; + static int channels[MAXCHANNEL+1]; + wl_uint32_list_t *list = (wl_uint32_list_t *) channels; + wl_rateset_t rateset; + int error, i, k; + uint sf, ch; + + int phytype; + int bw_cap = 0, sgi_tx = 0, nmode = 0; + channel_info_t ci; + uint8 nrate_list2copy = 0; + uint16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130}, + {14, 29, 43, 58, 87, 116, 130, 144}, + {27, 54, 81, 108, 162, 216, 243, 270}, + {30, 60, 90, 120, 180, 240, 270, 300}}; + int fbt_cap = 0; + + WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name)); + + if (!extra) + return -EINVAL; + + dwrq->length = sizeof(struct iw_range); + memset(range, 0, sizeof(*range)); + + /* We don't use nwids */ + range->min_nwid = range->max_nwid = 0; + + /* Set available channels/frequencies */ + list->count = htod32(MAXCHANNEL); + if ((error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels, sizeof(channels)))) + return error; + for (i = 0; i < dtoh32(list->count) && i < IW_MAX_FREQUENCIES; i++) { + range->freq[i].i = dtoh32(list->element[i]); + + ch = dtoh32(list->element[i]); + if (ch <= CH_MAX_2G_CHANNEL) + sf = WF_CHAN_FACTOR_2_4_G; + else + sf = WF_CHAN_FACTOR_5_G; + + range->freq[i].m = wf_channel2mhz(ch, sf); + range->freq[i].e = 6; + } + range->num_frequency = range->num_channels = i; + + /* Link quality (use NDIS cutoffs) */ + range->max_qual.qual = 5; + /* Signal level (use RSSI) */ + range->max_qual.level = 0x100 - 200; /* -200 dBm */ + /* Noise level (use noise) */ + range->max_qual.noise = 0x100 - 200; /* -200 dBm */ + /* Signal level threshold range (?) */ + range->sensitivity = 65535; + +#if WIRELESS_EXT > 11 + /* Link quality (use NDIS cutoffs) */ + range->avg_qual.qual = 3; + /* Signal level (use RSSI) */ + range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD; + /* Noise level (use noise) */ + range->avg_qual.noise = 0x100 - 75; /* -75 dBm */ +#endif /* WIRELESS_EXT > 11 */ + + /* Set available bitrates */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) + return error; + rateset.count = dtoh32(rateset.count); + range->num_bitrates = rateset.count; + for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++) + range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000; /* convert to bps */ + if ((error = dev_wlc_intvar_get(dev, "nmode", &nmode))) + return error; + if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype)))) + return error; + if (nmode == 1 && (((phytype == WLC_PHY_TYPE_LCN) || + (phytype == WLC_PHY_TYPE_LCN40)))) { + if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap))) + return error; + if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx))) + return error; + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t)))) + return error; + ci.hw_channel = dtoh32(ci.hw_channel); + + if (bw_cap == 0 || + (bw_cap == 2 && ci.hw_channel <= 14)) { + if (sgi_tx == 0) + nrate_list2copy = 0; + else + nrate_list2copy = 1; + } + if (bw_cap == 1 || + (bw_cap == 2 && ci.hw_channel >= 36)) { + if (sgi_tx == 0) + nrate_list2copy = 2; + else + nrate_list2copy = 3; + } + range->num_bitrates += 8; + ASSERT(range->num_bitrates < IW_MAX_BITRATES); + for (k = 0; i < range->num_bitrates; k++, i++) { + /* convert to bps */ + range->bitrate[i] = (nrate_list[nrate_list2copy][k]) * 500000; + } + } + + /* Set an indication of the max TCP throughput + * in bit/s that we can expect using this interface. + * May be use for QoS stuff... Jean II + */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i)))) + return error; + i = dtoh32(i); + if (i == WLC_PHY_TYPE_A) + range->throughput = 24000000; /* 24 Mbits/s */ + else + range->throughput = 1500000; /* 1.5 Mbits/s */ + + /* RTS and fragmentation thresholds */ + range->min_rts = 0; + range->max_rts = 2347; + range->min_frag = 256; + range->max_frag = 2346; + + range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS; + range->num_encoding_sizes = 4; + range->encoding_size[0] = WEP1_KEY_SIZE; + range->encoding_size[1] = WEP128_KEY_SIZE; +#if WIRELESS_EXT > 17 + range->encoding_size[2] = TKIP_KEY_SIZE; +#else + range->encoding_size[2] = 0; +#endif + range->encoding_size[3] = AES_KEY_SIZE; + + /* Do not support power micro-management */ + range->min_pmp = 0; + range->max_pmp = 0; + range->min_pmt = 0; + range->max_pmt = 0; + range->pmp_flags = 0; + range->pm_capa = 0; + + /* Transmit Power - values are in mW */ + range->num_txpower = 2; + range->txpower[0] = 1; + range->txpower[1] = 255; + range->txpower_capa = IW_TXPOW_MWATT; + +#if WIRELESS_EXT > 10 + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 19; + + /* Only support retry limits */ + range->retry_capa = IW_RETRY_LIMIT; + range->retry_flags = IW_RETRY_LIMIT; + range->r_time_flags = 0; + /* SRL and LRL limits */ + range->min_retry = 1; + range->max_retry = 255; + /* Retry lifetime limits unsupported */ + range->min_r_time = 0; + range->max_r_time = 0; +#endif /* WIRELESS_EXT > 10 */ + +#if WIRELESS_EXT > 17 + range->enc_capa = IW_ENC_CAPA_WPA; + range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP; + range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP; + range->enc_capa |= IW_ENC_CAPA_WPA2; + + /* Determine driver FBT capability. */ + if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { + if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { + /* Tell the host (e.g. wpa_supplicant) to let driver do the handshake */ + range->enc_capa |= IW_ENC_CAPA_4WAY_HANDSHAKE; + } + } + +#ifdef BCMFW_ROAM_ENABLE_WEXT + /* Advertise firmware roam capability to the external supplicant */ + range->enc_capa |= IW_ENC_CAPA_FW_ROAM_ENABLE; +#endif /* BCMFW_ROAM_ENABLE_WEXT */ + + /* Event capability (kernel) */ + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + /* Event capability (driver) */ + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); + IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCREQIE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVASSOCRESPIE); + IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND); + +#if WIRELESS_EXT >= 22 && defined(IW_SCAN_CAPA_ESSID) + /* FC7 wireless.h defines EXT 22 but doesn't define scan_capa bits */ + range->scan_capa = IW_SCAN_CAPA_ESSID; +#endif +#endif /* WIRELESS_EXT > 17 */ + + return 0; +} + +static int +rssi_to_qual(int rssi) +{ + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + return 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + return 1; + else if (rssi <= WL_IW_RSSI_LOW) + return 2; + else if (rssi <= WL_IW_RSSI_GOOD) + return 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + return 4; + else + return 5; +} + +static int +wl_iw_set_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + int i; + + WL_TRACE(("%s: SIOCSIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + iw->spy_num = MIN(ARRAYSIZE(iw->spy_addr), dwrq->length); + for (i = 0; i < iw->spy_num; i++) + memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN); + memset(iw->spy_qual, 0, sizeof(iw->spy_qual)); + + return 0; +} + +static int +wl_iw_get_spy( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality *qual = (struct iw_quality *) &addr[iw->spy_num]; + int i; + + WL_TRACE(("%s: SIOCGIWSPY\n", dev->name)); + + if (!extra) + return -EINVAL; + + dwrq->length = iw->spy_num; + for (i = 0; i < iw->spy_num; i++) { + memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN); + addr[i].sa_family = AF_UNIX; + memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality)); + iw->spy_qual[i].updated = 0; + } + + return 0; +} + +static int +wl_iw_set_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + int error = -EINVAL; + + WL_TRACE(("%s: SIOCSIWAP\n", dev->name)); + + if (awrq->sa_family != ARPHRD_ETHER) { + WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__)); + return -EINVAL; + } + + /* Ignore "auto" or "off" */ + if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) { + scb_val_t scbval; + bzero(&scbval, sizeof(scb_val_t)); + if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) { + WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error)); + } + return 0; + } + /* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data), + * eabuf))); + */ + /* Reassociate to the specified AP */ + if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) { + WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error)); + return error; + } + + return 0; +} + +static int +wl_iw_get_wap( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWAP\n", dev->name)); + + awrq->sa_family = ARPHRD_ETHER; + memset(awrq->sa_data, 0, ETHER_ADDR_LEN); + + /* Ignore error (may be down or disassociated) */ + (void) dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN); + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_mlme( + struct net_device *dev, + struct iw_request_info *info, + struct sockaddr *awrq, + char *extra +) +{ + struct iw_mlme *mlme; + scb_val_t scbval; + int error = -EINVAL; + + WL_TRACE(("%s: SIOCSIWMLME\n", dev->name)); + + mlme = (struct iw_mlme *)extra; + if (mlme == NULL) { + WL_ERROR(("Invalid ioctl data.\n")); + return error; + } + + scbval.val = mlme->reason_code; + bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN); + + if (mlme->cmd == IW_MLME_DISASSOC) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)); + } + else if (mlme->cmd == IW_MLME_DEAUTH) { + scbval.val = htod32(scbval.val); + error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, + sizeof(scb_val_t)); + } + else { + WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__)); + return error; + } + + return error; +} +#endif /* WIRELESS_EXT > 17 */ + +static int +wl_iw_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int error, i; + uint buflen = dwrq->length; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* Get scan results (too large to put on the stack) */ + list = kmalloc(buflen, GFP_KERNEL); + if (!list) + return -ENOMEM; + memset(list, 0, buflen); + list->buflen = htod32(buflen); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { + WL_ERROR(("%d: Scan results error %d\n", __LINE__, error)); + kfree(list); + return error; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + ASSERT(list->version == WL_BSS_INFO_VERSION); + + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + buflen)); + + /* Infrastructure only */ + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + /* BSSID */ + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + /* Updated qual, level, and noise */ +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif /* WIRELESS_EXT > 18 */ + + dwrq->length++; + } + + kfree(list); + + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + /* Provided qual */ + dwrq->flags = 1; + } + + return 0; +} + +static int +wl_iw_iscan_get_aplist( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + iscan_buf_t * buf; + iscan_info_t *iscan = g_iscan; + + struct sockaddr *addr = (struct sockaddr *) extra; + struct iw_quality qual[IW_MAX_AP]; + wl_bss_info_t *bi = NULL; + int i; + + WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((!iscan) || (iscan->sysioc_pid < 0)) { + return wl_iw_get_aplist(dev, info, dwrq, extra); + } + + buf = iscan->list_hdr; + /* Get scan results (too large to put on the stack) */ + while (buf) { + list = &((wl_iscan_results_t*)buf->iscan_buf)->results; + ASSERT(list->version == WL_BSS_INFO_VERSION); + + bi = NULL; + for (i = 0, dwrq->length = 0; i < list->count && dwrq->length < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + /* Infrastructure only */ + if (!(dtoh16(bi->capability) & DOT11_CAP_ESS)) + continue; + + /* BSSID */ + memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN); + addr[dwrq->length].sa_family = ARPHRD_ETHER; + qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI)); + qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI); + qual[dwrq->length].noise = 0x100 + bi->phy_noise; + + /* Updated qual, level, and noise */ +#if WIRELESS_EXT > 18 + qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; +#else + qual[dwrq->length].updated = 7; +#endif /* WIRELESS_EXT > 18 */ + + dwrq->length++; + } + buf = buf->next; + } + if (dwrq->length) { + memcpy(&addr[dwrq->length], qual, sizeof(struct iw_quality) * dwrq->length); + /* Provided qual */ + dwrq->flags = 1; + } + + return 0; +} + +#if WIRELESS_EXT > 13 +static int +wl_iw_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + wlc_ssid_t ssid; + + WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); + + /* default Broadcast scan */ + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + /* check for given essid */ + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + } + } +#endif + /* Ignore error (most likely scan in progress) */ + (void) dev_wlc_ioctl(dev, WLC_SCAN, &ssid, sizeof(ssid)); + + return 0; +} + +static int +wl_iw_iscan_set_scan( + struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *wrqu, + char *extra +) +{ + wlc_ssid_t ssid; + iscan_info_t *iscan = g_iscan; + + WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name)); + + /* use backup if our thread is not successful */ + if ((!iscan) || (iscan->sysioc_pid < 0)) { + return wl_iw_set_scan(dev, info, wrqu, extra); + } + if (iscan->iscan_state == ISCAN_STATE_SCANING) { + return 0; + } + + /* default Broadcast scan */ + memset(&ssid, 0, sizeof(ssid)); + +#if WIRELESS_EXT > 17 + /* check for given essid */ + if (wrqu->data.length == sizeof(struct iw_scan_req)) { + if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { + struct iw_scan_req *req = (struct iw_scan_req *)extra; + ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len); + memcpy(ssid.SSID, req->essid, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + } + } +#endif + + iscan->list_cur = iscan->list_hdr; + iscan->iscan_state = ISCAN_STATE_SCANING; + + + wl_iw_set_event_mask(dev); + wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START); + + iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); + add_timer(&iscan->timer); + iscan->timer_on = 1; + + return 0; +} + +#if WIRELESS_EXT > 17 +static bool +ie_is_wpa_ie(uint8 **wpaie, uint8 **tlvs, int *tlvs_len) +{ +/* Is this body of this tlvs entry a WPA entry? If */ +/* not update the tlvs buffer pointer/length */ + uint8 *ie = *wpaie; + + /* If the contents match the WPA_OUI and type=1 */ + if ((ie[1] >= 6) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) { + return TRUE; + } + + /* point to the next ie */ + ie += ie[1] + 2; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + return FALSE; +} + +static bool +ie_is_wps_ie(uint8 **wpsie, uint8 **tlvs, int *tlvs_len) +{ +/* Is this body of this tlvs entry a WPS entry? If */ +/* not update the tlvs buffer pointer/length */ + uint8 *ie = *wpsie; + + /* If the contents match the WPA_OUI and type=4 */ + if ((ie[1] >= 4) && + !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) { + return TRUE; + } + + /* point to the next ie */ + ie += ie[1] + 2; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + return FALSE; +} +#endif /* WIRELESS_EXT > 17 */ + + +static int +wl_iw_handle_scanresults_ies(char **event_p, char *end, + struct iw_request_info *info, wl_bss_info_t *bi) +{ +#if WIRELESS_EXT > 17 + struct iw_event iwe; + char *event; + + event = *event_p; + if (bi->ie_length) { + /* look for wpa/rsn ies in the ie list... */ + bcm_tlv_t *ie; + uint8 *ptr = ((uint8 *)bi) + bi->ie_offset; + int ptr_len = bi->ie_length; + + /* OSEN IE */ + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) && + ie->len > WFA_OUI_LEN + 1 && + !bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) && + ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + bi->ie_offset; + + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + bi->ie_offset; + + if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + } + ptr = ((uint8 *)bi) + bi->ie_offset; + + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + /* look for WPS IE */ + if (ie_is_wps_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + ptr = ((uint8 *)bi) + bi->ie_offset; + ptr_len = bi->ie_length; + while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) { + if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie->len + 2; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie); + break; + } + } + + *event_p = event; + } + +#endif /* WIRELESS_EXT > 17 */ + return 0; +} +static int +wl_iw_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + channel_info_t ci; + wl_scan_results_t *list; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + int error, i, j; + char *event = extra, *end = extra + dwrq->length, *value; + uint buflen = dwrq->length; + + WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* Check for scan in progress */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) + return error; + ci.scan_channel = dtoh32(ci.scan_channel); + if (ci.scan_channel) + return -EAGAIN; + + /* Get scan results (too large to put on the stack) */ + list = kmalloc(buflen, GFP_KERNEL); + if (!list) + return -ENOMEM; + memset(list, 0, buflen); + list->buflen = htod32(buflen); + if ((error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen))) { + kfree(list); + return error; + } + list->buflen = dtoh32(list->buflen); + list->version = dtoh32(list->version); + list->count = dtoh32(list->count); + + ASSERT(list->version == WL_BSS_INFO_VERSION); + + for (i = 0; i < list->count && i < IW_MAX_AP; i++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + buflen)); + + /* First entry must be the BSSID */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + /* SSID */ + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + /* Mode */ + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + /* Channel */ + iwe.cmd = SIOCGIWFREQ; + + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), + (CHSPEC_IS2G(bi->chanspec)) ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + /* Channel quality */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + /* Encryption */ + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + /* Rates */ + if (bi->rateset.count) { + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + + kfree(list); + + dwrq->length = event - extra; + dwrq->flags = 0; /* todo */ + + return 0; +} + +static int +wl_iw_iscan_get_scan( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_scan_results_t *list; + struct iw_event iwe; + wl_bss_info_t *bi = NULL; + int ii, j; + int apcnt; + char *event = extra, *end = extra + dwrq->length, *value; + iscan_info_t *iscan = g_iscan; + iscan_buf_t * p_buf; + + WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* use backup if our thread is not successful */ + if ((!iscan) || (iscan->sysioc_pid < 0)) { + return wl_iw_get_scan(dev, info, dwrq, extra); + } + + /* Check for scan in progress */ + if (iscan->iscan_state == ISCAN_STATE_SCANING) + return -EAGAIN; + + apcnt = 0; + p_buf = iscan->list_hdr; + /* Get scan results */ + while (p_buf != iscan->list_cur) { + list = &((wl_iscan_results_t*)p_buf->iscan_buf)->results; + + if (list->version != WL_BSS_INFO_VERSION) { + WL_ERROR(("list->version %d != WL_BSS_INFO_VERSION\n", list->version)); + } + + bi = NULL; + for (ii = 0; ii < list->count && apcnt < IW_MAX_AP; apcnt++, ii++) { + bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : list->bss_info; + ASSERT(((uintptr)bi + dtoh32(bi->length)) <= ((uintptr)list + + WLC_IW_ISCAN_MAXLEN)); + + /* overflow check cover fields before wpa IEs */ + if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN + + IW_EV_QUAL_LEN >= end) + return -E2BIG; + /* First entry must be the BSSID */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN); + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN); + + /* SSID */ + iwe.u.data.length = dtoh32(bi->SSID_len); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID); + + /* Mode */ + if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) { + iwe.cmd = SIOCGIWMODE; + if (dtoh16(bi->capability) & DOT11_CAP_ESS) + iwe.u.mode = IW_MODE_INFRA; + else + iwe.u.mode = IW_MODE_ADHOC; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN); + } + + /* Channel */ + iwe.cmd = SIOCGIWFREQ; + + iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec), + (CHSPEC_IS2G(bi->chanspec)) ? + WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G); + iwe.u.freq.e = 6; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN); + + /* Channel quality */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.qual = rssi_to_qual(dtoh16(bi->RSSI)); + iwe.u.qual.level = 0x100 + dtoh16(bi->RSSI); + iwe.u.qual.noise = 0x100 + bi->phy_noise; + event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN); + + wl_iw_handle_scanresults_ies(&event, end, info, bi); + + /* Encryption */ + iwe.cmd = SIOCGIWENCODE; + if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event); + + /* Rates */ + if (bi->rateset.count <= sizeof(bi->rateset.rates)) { + if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) + return -E2BIG; + + value = event + IW_EV_LCP_LEN; + iwe.cmd = SIOCGIWRATE; + /* Those two flags are ignored... */ + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) { + iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000; + value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe, + IW_EV_PARAM_LEN); + } + event = value; + } + } + p_buf = p_buf->next; + } /* while (p_buf) */ + + dwrq->length = event - extra; + dwrq->flags = 0; /* todo */ + + return 0; +} + +#endif /* WIRELESS_EXT > 13 */ + + +static int +wl_iw_set_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + + WL_TRACE(("%s: SIOCSIWESSID\n", dev->name)); + + /* default Broadcast SSID */ + memset(&ssid, 0, sizeof(ssid)); + if (dwrq->length && extra) { +#if WIRELESS_EXT > 20 + ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length); +#else + ssid.SSID_len = MIN(sizeof(ssid.SSID), dwrq->length-1); +#endif + memcpy(ssid.SSID, extra, ssid.SSID_len); + ssid.SSID_len = htod32(ssid.SSID_len); + + if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) + return error; + } + /* If essid null then it is "iwconfig essid off" command */ + else { + scb_val_t scbval; + bzero(&scbval, sizeof(scb_val_t)); + if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) + return error; + } + return 0; +} + +static int +wl_iw_get_essid( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wlc_ssid_t ssid; + int error; + + WL_TRACE(("%s: SIOCGIWESSID\n", dev->name)); + + if (!extra) + return -EINVAL; + + if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) { + WL_ERROR(("Error getting the SSID\n")); + return error; + } + + ssid.SSID_len = dtoh32(ssid.SSID_len); + + /* Get the current SSID */ + memcpy(extra, ssid.SSID, ssid.SSID_len); + + dwrq->length = ssid.SSID_len; + + dwrq->flags = 1; /* active */ + + return 0; +} + +static int +wl_iw_set_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + /* Check the size of the string */ + if (dwrq->length > sizeof(iw->nickname)) + return -E2BIG; + + memcpy(iw->nickname, extra, dwrq->length); + iw->nickname[dwrq->length - 1] = '\0'; + + return 0; +} + +static int +wl_iw_get_nick( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_iw_t *iw = IW_DEV_IF(dev); + WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name)); + + if (!extra) + return -EINVAL; + + strcpy(extra, iw->nickname); + dwrq->length = strlen(extra) + 1; + + return 0; +} + +static int wl_iw_set_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + wl_rateset_t rateset; + int error, rate, i, error_bg, error_a; + + WL_TRACE(("%s: SIOCSIWRATE\n", dev->name)); + + /* Get current rateset */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset, sizeof(rateset)))) + return error; + + rateset.count = dtoh32(rateset.count); + + if (vwrq->value < 0) { + /* Select maximum rate */ + rate = rateset.rates[rateset.count - 1] & 0x7f; + } else if (vwrq->value < rateset.count) { + /* Select rate by rateset index */ + rate = rateset.rates[vwrq->value] & 0x7f; + } else { + /* Specified rate in bps */ + rate = vwrq->value / 500000; + } + + if (vwrq->fixed) { + /* + Set rate override, + Since the is a/b/g-blind, both a/bg_rate are enforced. + */ + error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate); + error_a = dev_wlc_intvar_set(dev, "a_rate", rate); + + if (error_bg && error_a) + return (error_bg | error_a); + } else { + /* + clear rate override + Since the is a/b/g-blind, both a/bg_rate are enforced. + */ + /* 0 is for clearing rate override */ + error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0); + /* 0 is for clearing rate override */ + error_a = dev_wlc_intvar_set(dev, "a_rate", 0); + + if (error_bg && error_a) + return (error_bg | error_a); + + /* Remove rates above selected rate */ + for (i = 0; i < rateset.count; i++) + if ((rateset.rates[i] & 0x7f) > rate) + break; + rateset.count = htod32(i); + + /* Set current rateset */ + if ((error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset, sizeof(rateset)))) + return error; + } + + return 0; +} + +static int wl_iw_get_rate( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rate; + + WL_TRACE(("%s: SIOCGIWRATE\n", dev->name)); + + /* Report the current tx rate */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate)))) + return error; + rate = dtoh32(rate); + vwrq->value = rate * 500000; + + return 0; +} + +static int +wl_iw_set_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCSIWRTS\n", dev->name)); + + if (vwrq->disabled) + rts = DOT11_DEFAULT_RTS_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN) + return -EINVAL; + else + rts = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "rtsthresh", rts))) + return error; + + return 0; +} + +static int +wl_iw_get_rts( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, rts; + + WL_TRACE(("%s: SIOCGIWRTS\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "rtsthresh", &rts))) + return error; + + vwrq->value = rts; + vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, frag; + + WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name)); + + if (vwrq->disabled) + frag = DOT11_DEFAULT_FRAG_LEN; + else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN) + return -EINVAL; + else + frag = vwrq->value; + + if ((error = dev_wlc_intvar_set(dev, "fragthresh", frag))) + return error; + + return 0; +} + +static int +wl_iw_get_frag( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, fragthreshold; + + WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name)); + + if ((error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold))) + return error; + + vwrq->value = fragthreshold; + vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN); + vwrq->fixed = 1; + + return 0; +} + +static int +wl_iw_set_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable; + uint16 txpwrmw; + WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name)); + + /* Make sure radio is off or on as far as software is concerned */ + disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0; + disable += WL_RADIO_SW_DISABLE << 16; + + disable = htod32(disable); + if ((error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable)))) + return error; + + /* If Radio is off, nothing more to do */ + if (disable & WL_RADIO_SW_DISABLE) + return 0; + + /* Only handle mW */ + if (!(vwrq->flags & IW_TXPOW_MWATT)) + return -EINVAL; + + /* Value < 0 means just "on" or "off" */ + if (vwrq->value < 0) + return 0; + + if (vwrq->value > 0xffff) txpwrmw = 0xffff; + else txpwrmw = (uint16)vwrq->value; + + + error = dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw))); + return error; +} + +static int +wl_iw_get_txpow( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, disable, txpwrdbm; + uint8 result; + + WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable))) || + (error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm))) + return error; + + disable = dtoh32(disable); + result = (uint8)(txpwrdbm & ~WL_TXPWR_OVERRIDE); + vwrq->value = (int32)bcm_qdbm_to_mw(result); + vwrq->fixed = 0; + vwrq->disabled = (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0; + vwrq->flags = IW_TXPOW_MWATT; + + return 0; +} + +#if WIRELESS_EXT > 10 +static int +wl_iw_set_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name)); + + /* Do not handle "off" or "lifetime" */ + if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME)) + return -EINVAL; + + /* Handle "[min|max] limit" */ + if (vwrq->flags & IW_RETRY_LIMIT) { + /* "max limit" or just "limit" */ +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_LONG) ||(vwrq->flags & IW_RETRY_MAX) || + !((vwrq->flags & IW_RETRY_SHORT) || (vwrq->flags & IW_RETRY_MIN))) { +#else + if ((vwrq->flags & IW_RETRY_MAX) || !(vwrq->flags & IW_RETRY_MIN)) { +#endif /* WIRELESS_EXT > 20 */ + + lrl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(lrl)))) + return error; + } + /* "min limit" or just "limit" */ +#if WIRELESS_EXT > 20 + if ((vwrq->flags & IW_RETRY_SHORT) ||(vwrq->flags & IW_RETRY_MIN) || + !((vwrq->flags & IW_RETRY_LONG) || (vwrq->flags & IW_RETRY_MAX))) { +#else + if ((vwrq->flags & IW_RETRY_MIN) || !(vwrq->flags & IW_RETRY_MAX)) { +#endif /* WIRELESS_EXT > 20 */ + + srl = htod32(vwrq->value); + if ((error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl, sizeof(srl)))) + return error; + } + } + + return 0; +} + +static int +wl_iw_get_retry( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, lrl, srl; + + WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name)); + + vwrq->disabled = 0; /* Can't be disabled */ + + /* Do not handle lifetime queries */ + if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) + return -EINVAL; + + /* Get retry limits */ + if ((error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl))) || + (error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl)))) + return error; + + lrl = dtoh32(lrl); + srl = dtoh32(srl); + + /* Note : by default, display the min retry number */ + if (vwrq->flags & IW_RETRY_MAX) { + vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX; + vwrq->value = lrl; + } else { + vwrq->flags = IW_RETRY_LIMIT; + vwrq->value = srl; + if (srl != lrl) + vwrq->flags |= IW_RETRY_MIN; + } + + return 0; +} +#endif /* WIRELESS_EXT > 10 */ + +static int +wl_iw_set_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec; + + WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name)); + + memset(&key, 0, sizeof(key)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + /* Find the current key */ + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + /* Default to 0 */ + if (key.index == DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + } else { + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + return -EINVAL; + } + + /* Interpret "off" to mean no encryption */ + wsec = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED; + + if ((error = dev_wlc_intvar_set(dev, "wsec", wsec))) + return error; + + /* Old API used to pass a NULL pointer instead of IW_ENCODE_NOKEY */ + if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) { + /* Just select a new current key */ + val = htod32(key.index); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + } else { + key.len = dwrq->length; + + if (dwrq->length > sizeof(key.data)) + return -EINVAL; + + memcpy(key.data, extra, dwrq->length); + + key.flags = WL_PRIMARY_KEY; + switch (key.len) { + case WEP1_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WEP128_KEY_SIZE: + key.algo = CRYPTO_ALGO_WEP128; + break; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) + case TKIP_KEY_SIZE: + key.algo = CRYPTO_ALGO_TKIP; + break; +#endif + case AES_KEY_SIZE: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + return -EINVAL; + } + + /* Set the new key/index */ + swap_key_from_BE(&key); + if ((error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)))) + return error; + } + + /* Interpret "restricted" to mean shared key authentication */ + val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0; + val = htod32(val); + if ((error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val)))) + return error; + + return 0; +} + +static int +wl_iw_get_encode( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error, val, wsec, auth; + + WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name)); + + /* assure default values of zero for things we don't touch */ + bzero(&key, sizeof(wl_wsec_key_t)); + + if ((dwrq->flags & IW_ENCODE_INDEX) == 0) { + /* Find the current key */ + for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS; key.index++) { + val = key.index; + if ((error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val, sizeof(val)))) + return error; + val = dtoh32(val); + if (val) + break; + } + } else + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + if (key.index >= DOT11_MAX_DEFAULT_KEYS) + key.index = 0; + + /* Get info */ + + if ((error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec))) || + (error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth)))) + return error; + + swap_key_to_BE(&key); + + wsec = dtoh32(wsec); + auth = dtoh32(auth); + /* Get key length */ + dwrq->length = MIN(IW_ENCODING_TOKEN_MAX, key.len); + + /* Get flags */ + dwrq->flags = key.index + 1; + if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))) { + /* Interpret "off" to mean no encryption */ + dwrq->flags |= IW_ENCODE_DISABLED; + } + if (auth) { + /* Interpret "restricted" to mean shared key authentication */ + dwrq->flags |= IW_ENCODE_RESTRICTED; + } + + /* Get key */ + if (dwrq->length && extra) + memcpy(extra, key.data, dwrq->length); + + return 0; +} + +static int +wl_iw_set_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name)); + + pm = vwrq->disabled ? PM_OFF : PM_MAX; + + pm = htod32(pm); + if ((error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm)))) + return error; + + return 0; +} + +static int +wl_iw_get_power( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error, pm; + + WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name)); + + if ((error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm)))) + return error; + + pm = dtoh32(pm); + vwrq->disabled = pm ? 0 : 1; + vwrq->flags = IW_POWER_ALL_R; + + return 0; +} + +#if WIRELESS_EXT > 17 +static int +wl_iw_set_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length); + + return 0; +} + +static int +wl_iw_get_wpaie( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *iwp, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name)); + iwp->length = 64; + dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length); + return 0; +} + +static int +wl_iw_set_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_point *dwrq, + char *extra +) +{ + wl_wsec_key_t key; + int error; + struct iw_encode_ext *iwe; + + WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name)); + + memset(&key, 0, sizeof(key)); + iwe = (struct iw_encode_ext *)extra; + + /* disable encryption completely */ + if (dwrq->flags & IW_ENCODE_DISABLED) { + + } + + /* get the key index */ + key.index = 0; + if (dwrq->flags & IW_ENCODE_INDEX) + key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1; + + key.len = iwe->key_len; + + /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */ + if (!ETHER_ISMULTI(iwe->addr.sa_data)) + bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea, ETHER_ADDR_LEN); + + /* check for key index change */ + if (key.len == 0) { + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("Changing the the primary Key to %d\n", key.index)); + /* change the key index .... */ + key.index = htod32(key.index); + error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, + &key.index, sizeof(key.index)); + if (error) + return error; + } + /* key delete */ + else { + swap_key_from_BE(&key); + error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + if (error) + return error; + } + } + /* This case is used to allow an external 802.1x supplicant + * to pass the PMK to the in-driver supplicant for use in + * the 4-way handshake. + */ + else if (iwe->alg == IW_ENCODE_ALG_PMK) { + int j; + wsec_pmk_t pmk; + char keystring[WSEC_MAX_PSK_LEN + 1]; + char* charptr = keystring; + uint len; + + /* copy the raw hex key to the appropriate format */ + for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) { + (void)snprintf(charptr, 3, "%02x", iwe->key[j]); + charptr += 2; + } + len = strlen(keystring); + pmk.key_len = htod16(len); + bcopy(keystring, pmk.key, len); + pmk.flags = htod16(WSEC_PASSPHRASE); + + error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk)); + if (error) + return error; + } + + else { + if (iwe->key_len > sizeof(key.data)) + return -EINVAL; + + WL_WSEC(("Setting the key index %d\n", key.index)); + if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + WL_WSEC(("key is a Primary Key\n")); + key.flags = WL_PRIMARY_KEY; + } + + bcopy((void *)iwe->key, key.data, iwe->key_len); + + if (iwe->alg == IW_ENCODE_ALG_TKIP) { + uint8 keybuf[8]; + bcopy(&key.data[24], keybuf, sizeof(keybuf)); + bcopy(&key.data[16], &key.data[24], sizeof(keybuf)); + bcopy(keybuf, &key.data[16], sizeof(keybuf)); + } + + /* rx iv */ + if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { + uchar *ivptr; + ivptr = (uchar *)iwe->rx_seq; + key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key.iv_initialized = TRUE; + } + + switch (iwe->alg) { + case IW_ENCODE_ALG_NONE: + key.algo = CRYPTO_ALGO_OFF; + break; + case IW_ENCODE_ALG_WEP: + if (iwe->key_len == WEP1_KEY_SIZE) + key.algo = CRYPTO_ALGO_WEP1; + else + key.algo = CRYPTO_ALGO_WEP128; + break; + case IW_ENCODE_ALG_TKIP: + key.algo = CRYPTO_ALGO_TKIP; + break; + case IW_ENCODE_ALG_CCMP: + key.algo = CRYPTO_ALGO_AES_CCM; + break; + default: + break; + } + swap_key_from_BE(&key); + + dhd_wait_pend8021x(dev); + + error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key)); + if (error) + return error; + } + return 0; +} + + +#if WIRELESS_EXT > 17 +struct { + pmkid_list_t pmkids; + pmkid_t foo[MAXPMKID-1]; +} pmkid_list; +static int +wl_iw_set_pmksa( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + struct iw_pmksa *iwpmksa; + uint i; + char eabuf[ETHER_ADDR_STR_LEN]; + pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid; + + WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name)); + iwpmksa = (struct iw_pmksa *)extra; + bzero((char *)eabuf, ETHER_ADDR_STR_LEN); + if (iwpmksa->cmd == IW_PMKSA_FLUSH) { + WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n")); + bzero((char *)&pmkid_list, sizeof(pmkid_list)); + } + if (iwpmksa->cmd == IW_PMKSA_REMOVE) { + pmkid_list_t pmkid, *pmkidptr; + pmkidptr = &pmkid; + bcopy(&iwpmksa->bssid.sa_data[0], &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID, WPA2_PMKID_LEN); + { + uint j; + WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_REMOVE - PMKID: %s = ", + bcm_ether_ntoa(&pmkidptr->pmkid[0].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j])); + WL_TRACE(("\n")); + } + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) + if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID, + ETHER_ADDR_LEN)) + break; + for (; i < pmkid_list.pmkids.npmkid; i++) { + bcopy(&pmkid_array[i+1].BSSID, + &pmkid_array[i].BSSID, + ETHER_ADDR_LEN); + bcopy(&pmkid_array[i+1].PMKID, + &pmkid_array[i].PMKID, + WPA2_PMKID_LEN); + } + pmkid_list.pmkids.npmkid--; + } + if (iwpmksa->cmd == IW_PMKSA_ADD) { + bcopy(&iwpmksa->bssid.sa_data[0], + &pmkid_array[pmkid_list.pmkids.npmkid].BSSID, + ETHER_ADDR_LEN); + bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID, + WPA2_PMKID_LEN); + { + uint j; + uint k; + k = pmkid_list.pmkids.npmkid; + BCM_REFERENCE(k); + WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ", + bcm_ether_ntoa(&pmkid_array[k].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_TRACE(("%02x ", pmkid_array[k].PMKID[j])); + WL_TRACE(("\n")); + } + pmkid_list.pmkids.npmkid++; + } + WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid)); + for (i = 0; i < pmkid_list.pmkids.npmkid; i++) { + uint j; + WL_TRACE(("PMKID[%d]: %s = ", i, + bcm_ether_ntoa(&pmkid_array[i].BSSID, + eabuf))); + for (j = 0; j < WPA2_PMKID_LEN; j++) + WL_TRACE(("%02x ", pmkid_array[i].PMKID[j])); + printf("\n"); + } + WL_TRACE(("\n")); + dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list)); + return 0; +} +#endif /* WIRELESS_EXT > 17 */ + +static int +wl_iw_get_encodeext( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name)); + return 0; +} + +static int +wl_iw_set_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error = 0; + int paramid; + int paramval; + uint32 cipher_combined; + int val = 0; + wl_iw_t *iw = IW_DEV_IF(dev); + + WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name)); + + paramid = vwrq->flags & IW_AUTH_INDEX; + paramval = vwrq->value; + + WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n", + dev->name, paramid, paramval)); + + switch (paramid) { + + case IW_AUTH_WPA_VERSION: + /* supported wpa version disabled or wpa or wpa2 */ + if (paramval & IW_AUTH_WPA_VERSION_DISABLED) + val = WPA_AUTH_DISABLED; + else if (paramval & (IW_AUTH_WPA_VERSION_WPA)) + val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; + else if (paramval & IW_AUTH_WPA_VERSION_WPA2) + val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; + WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + + case IW_AUTH_CIPHER_PAIRWISE: + case IW_AUTH_CIPHER_GROUP: { + int fbt_cap = 0; + + if (paramid == IW_AUTH_CIPHER_PAIRWISE) { + iw->pwsec = paramval; + } + else { + iw->gwsec = paramval; + } + + if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) + return error; + + cipher_combined = iw->gwsec | iw->pwsec; + val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED); + if (cipher_combined & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) + val |= WEP_ENABLED; + if (cipher_combined & IW_AUTH_CIPHER_TKIP) + val |= TKIP_ENABLED; + if (cipher_combined & IW_AUTH_CIPHER_CCMP) + val |= AES_ENABLED; + + if (iw->privacy_invoked && !val) { + WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming " + "we're a WPS enrollee\n", dev->name, __FUNCTION__)); + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { + WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + return error; + } + } else if (val) { + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } + + if ((error = dev_wlc_intvar_set(dev, "wsec", val))) + return error; + + /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way + * handshake. + */ + if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) { + if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) { + if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) { + if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) + return error; + } + else if (val == 0) { + if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) + return error; + } + } + } + break; + } + + case IW_AUTH_KEY_MGMT: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) + val = WPA_AUTH_PSK; + else + val = WPA_AUTH_UNSPECIFIED; + if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) + val |= WPA2_AUTH_FT; + } + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { + if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK)) + val = WPA2_AUTH_PSK; + else + val = WPA2_AUTH_UNSPECIFIED; + if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK)) + val |= WPA2_AUTH_FT; + } + WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val))) + return error; + break; + + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_set(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + /* open shared */ + WL_ERROR(("Setting the D11auth %d\n", paramval)); + if (paramval & IW_AUTH_ALG_OPEN_SYSTEM) + val = 0; + else if (paramval & IW_AUTH_ALG_SHARED_KEY) + val = 1; + else + error = 1; + if (!error && (error = dev_wlc_intvar_set(dev, "auth", val))) + return error; + break; + + case IW_AUTH_WPA_ENABLED: + if (paramval == 0) { + val = 0; + WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val)); + error = dev_wlc_intvar_set(dev, "wpa_auth", val); + return error; + } + else { + /* If WPA is enabled, wpa_auth is set elsewhere */ + } + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)¶mval, 1); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + +#if WIRELESS_EXT > 17 + + case IW_AUTH_ROAMING_CONTROL: + WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + /* driver control or user space app control */ + break; + + case IW_AUTH_PRIVACY_INVOKED: { + int wsec; + + if (paramval == 0) { + iw->privacy_invoked = FALSE; + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } else { + iw->privacy_invoked = TRUE; + if ((error = dev_wlc_intvar_get(dev, "wsec", &wsec))) + return error; + + if (!WSEC_ENABLED(wsec)) { + /* if privacy is true, but wsec is false, we are a WPS enrollee */ + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) { + WL_WSEC(("Failed to set iovar is_WPS_enrollee\n")); + return error; + } + } else { + if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", FALSE))) { + WL_WSEC(("Failed to clear iovar is_WPS_enrollee\n")); + return error; + } + } + } + break; + } + + +#endif /* WIRELESS_EXT > 17 */ + + + default: + break; + } + return 0; +} +#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK)) + +static int +wl_iw_get_wpaauth( + struct net_device *dev, + struct iw_request_info *info, + struct iw_param *vwrq, + char *extra +) +{ + int error; + int paramid; + int paramval = 0; + int val; + wl_iw_t *iw = IW_DEV_IF(dev); + + WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name)); + + paramid = vwrq->flags & IW_AUTH_INDEX; + + switch (paramid) { + case IW_AUTH_WPA_VERSION: + /* supported wpa version disabled or wpa or wpa2 */ + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED)) + paramval = IW_AUTH_WPA_VERSION_DISABLED; + else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) + paramval = IW_AUTH_WPA_VERSION_WPA; + else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) + paramval = IW_AUTH_WPA_VERSION_WPA2; + break; + + case IW_AUTH_CIPHER_PAIRWISE: + paramval = iw->pwsec; + break; + + case IW_AUTH_CIPHER_GROUP: + paramval = iw->gwsec; + break; + + case IW_AUTH_KEY_MGMT: + /* psk, 1x */ + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (VAL_PSK(val)) + paramval = IW_AUTH_KEY_MGMT_PSK; + else + paramval = IW_AUTH_KEY_MGMT_802_1X; + + break; + case IW_AUTH_TKIP_COUNTERMEASURES: + dev_wlc_bufvar_get(dev, "tkip_countermeasures", (char *)¶mval, 1); + break; + + case IW_AUTH_DROP_UNENCRYPTED: + dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)¶mval, 1); + break; + + case IW_AUTH_RX_UNENCRYPTED_EAPOL: + dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol", (char *)¶mval, 1); + break; + + case IW_AUTH_80211_AUTH_ALG: + /* open, shared, leap */ + if ((error = dev_wlc_intvar_get(dev, "auth", &val))) + return error; + if (!val) + paramval = IW_AUTH_ALG_OPEN_SYSTEM; + else + paramval = IW_AUTH_ALG_SHARED_KEY; + break; + case IW_AUTH_WPA_ENABLED: + if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) + return error; + if (val) + paramval = TRUE; + else + paramval = FALSE; + break; + +#if WIRELESS_EXT > 17 + + case IW_AUTH_ROAMING_CONTROL: + WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__)); + /* driver control or user space app control */ + break; + + case IW_AUTH_PRIVACY_INVOKED: + paramval = iw->privacy_invoked; + break; + +#endif /* WIRELESS_EXT > 17 */ + } + vwrq->value = paramval; + return 0; +} +#endif /* WIRELESS_EXT > 17 */ + +static const iw_handler wl_iw_handler[] = +{ + (iw_handler) wl_iw_config_commit, /* SIOCSIWCOMMIT */ + (iw_handler) wl_iw_get_name, /* SIOCGIWNAME */ + (iw_handler) NULL, /* SIOCSIWNWID */ + (iw_handler) NULL, /* SIOCGIWNWID */ + (iw_handler) wl_iw_set_freq, /* SIOCSIWFREQ */ + (iw_handler) wl_iw_get_freq, /* SIOCGIWFREQ */ + (iw_handler) wl_iw_set_mode, /* SIOCSIWMODE */ + (iw_handler) wl_iw_get_mode, /* SIOCGIWMODE */ + (iw_handler) NULL, /* SIOCSIWSENS */ + (iw_handler) NULL, /* SIOCGIWSENS */ + (iw_handler) NULL, /* SIOCSIWRANGE */ + (iw_handler) wl_iw_get_range, /* SIOCGIWRANGE */ + (iw_handler) NULL, /* SIOCSIWPRIV */ + (iw_handler) NULL, /* SIOCGIWPRIV */ + (iw_handler) NULL, /* SIOCSIWSTATS */ + (iw_handler) NULL, /* SIOCGIWSTATS */ + (iw_handler) wl_iw_set_spy, /* SIOCSIWSPY */ + (iw_handler) wl_iw_get_spy, /* SIOCGIWSPY */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) wl_iw_set_wap, /* SIOCSIWAP */ + (iw_handler) wl_iw_get_wap, /* SIOCGIWAP */ +#if WIRELESS_EXT > 17 + (iw_handler) wl_iw_mlme, /* SIOCSIWMLME */ +#else + (iw_handler) NULL, /* -- hole -- */ +#endif + (iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */ +#if WIRELESS_EXT > 13 + (iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */ + (iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */ +#else /* WIRELESS_EXT > 13 */ + (iw_handler) NULL, /* SIOCSIWSCAN */ + (iw_handler) NULL, /* SIOCGIWSCAN */ +#endif /* WIRELESS_EXT > 13 */ + (iw_handler) wl_iw_set_essid, /* SIOCSIWESSID */ + (iw_handler) wl_iw_get_essid, /* SIOCGIWESSID */ + (iw_handler) wl_iw_set_nick, /* SIOCSIWNICKN */ + (iw_handler) wl_iw_get_nick, /* SIOCGIWNICKN */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) wl_iw_set_rate, /* SIOCSIWRATE */ + (iw_handler) wl_iw_get_rate, /* SIOCGIWRATE */ + (iw_handler) wl_iw_set_rts, /* SIOCSIWRTS */ + (iw_handler) wl_iw_get_rts, /* SIOCGIWRTS */ + (iw_handler) wl_iw_set_frag, /* SIOCSIWFRAG */ + (iw_handler) wl_iw_get_frag, /* SIOCGIWFRAG */ + (iw_handler) wl_iw_set_txpow, /* SIOCSIWTXPOW */ + (iw_handler) wl_iw_get_txpow, /* SIOCGIWTXPOW */ +#if WIRELESS_EXT > 10 + (iw_handler) wl_iw_set_retry, /* SIOCSIWRETRY */ + (iw_handler) wl_iw_get_retry, /* SIOCGIWRETRY */ +#endif /* WIRELESS_EXT > 10 */ + (iw_handler) wl_iw_set_encode, /* SIOCSIWENCODE */ + (iw_handler) wl_iw_get_encode, /* SIOCGIWENCODE */ + (iw_handler) wl_iw_set_power, /* SIOCSIWPOWER */ + (iw_handler) wl_iw_get_power, /* SIOCGIWPOWER */ +#if WIRELESS_EXT > 17 + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) NULL, /* -- hole -- */ + (iw_handler) wl_iw_set_wpaie, /* SIOCSIWGENIE */ + (iw_handler) wl_iw_get_wpaie, /* SIOCGIWGENIE */ + (iw_handler) wl_iw_set_wpaauth, /* SIOCSIWAUTH */ + (iw_handler) wl_iw_get_wpaauth, /* SIOCGIWAUTH */ + (iw_handler) wl_iw_set_encodeext, /* SIOCSIWENCODEEXT */ + (iw_handler) wl_iw_get_encodeext, /* SIOCGIWENCODEEXT */ + (iw_handler) wl_iw_set_pmksa, /* SIOCSIWPMKSA */ +#endif /* WIRELESS_EXT > 17 */ +}; + +#if WIRELESS_EXT > 12 +enum { + WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV, + WL_IW_SET_VLANMODE, + WL_IW_SET_PM, +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ + WL_IW_SET_LAST +}; + +static iw_handler wl_iw_priv_handler[] = { + wl_iw_set_leddc, + wl_iw_set_vlanmode, + wl_iw_set_pm, +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ + NULL +}; + +static struct iw_priv_args wl_iw_priv_args[] = { + { + WL_IW_SET_LEDDC, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + 0, + "set_leddc" + }, + { + WL_IW_SET_VLANMODE, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + 0, + "set_vlanmode" + }, + { + WL_IW_SET_PM, + IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, + 0, + "set_pm" + }, +#if WIRELESS_EXT > 17 +#endif /* WIRELESS_EXT > 17 */ + { 0, 0, 0, { 0 } } +}; + +const struct iw_handler_def wl_iw_handler_def = +{ + .num_standard = ARRAYSIZE(wl_iw_handler), + .num_private = ARRAY_SIZE(wl_iw_priv_handler), + .num_private_args = ARRAY_SIZE(wl_iw_priv_args), + .standard = (const iw_handler *) wl_iw_handler, + .private = wl_iw_priv_handler, + .private_args = wl_iw_priv_args, +#if WIRELESS_EXT >= 19 + get_wireless_stats: dhd_get_wireless_stats, +#endif /* WIRELESS_EXT >= 19 */ + }; +#endif /* WIRELESS_EXT > 12 */ + +int +wl_iw_ioctl( + struct net_device *dev, + struct ifreq *rq, + int cmd +) +{ + struct iwreq *wrq = (struct iwreq *) rq; + struct iw_request_info info; + iw_handler handler; + char *extra = NULL; + size_t token_size = 1; + int max_tokens = 0, ret = 0; + + if (cmd < SIOCIWFIRST || + IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) || + !(handler = wl_iw_handler[IW_IOCTL_IDX(cmd)])) + return -EOPNOTSUPP; + + switch (cmd) { + + case SIOCSIWESSID: + case SIOCGIWESSID: + case SIOCSIWNICKN: + case SIOCGIWNICKN: + max_tokens = IW_ESSID_MAX_SIZE + 1; + break; + + case SIOCSIWENCODE: + case SIOCGIWENCODE: +#if WIRELESS_EXT > 17 + case SIOCSIWENCODEEXT: + case SIOCGIWENCODEEXT: +#endif + max_tokens = IW_ENCODING_TOKEN_MAX; + break; + + case SIOCGIWRANGE: + max_tokens = sizeof(struct iw_range); + break; + + case SIOCGIWAPLIST: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_AP; + break; + +#if WIRELESS_EXT > 13 + case SIOCGIWSCAN: + if (g_iscan) + max_tokens = wrq->u.data.length; + else + max_tokens = IW_SCAN_MAX_DATA; + break; +#endif /* WIRELESS_EXT > 13 */ + + case SIOCSIWSPY: + token_size = sizeof(struct sockaddr); + max_tokens = IW_MAX_SPY; + break; + + case SIOCGIWSPY: + token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality); + max_tokens = IW_MAX_SPY; + break; + default: + break; + } + + if (max_tokens && wrq->u.data.pointer) { + if (wrq->u.data.length > max_tokens) + return -E2BIG; + + if (!(extra = kmalloc(max_tokens * token_size, GFP_KERNEL))) + return -ENOMEM; + + if (copy_from_user(extra, wrq->u.data.pointer, wrq->u.data.length * token_size)) { + kfree(extra); + return -EFAULT; + } + } + + info.cmd = cmd; + info.flags = 0; + + ret = handler(dev, &info, &wrq->u, extra); + + if (extra) { + if (copy_to_user(wrq->u.data.pointer, extra, wrq->u.data.length * token_size)) { + kfree(extra); + return -EFAULT; + } + + kfree(extra); + } + + return ret; +} + +/* Convert a connection status event into a connection status string. + * Returns TRUE if a matching connection status string was found. + */ +bool +wl_iw_conn_status_str(uint32 event_type, uint32 status, uint32 reason, + char* stringBuf, uint buflen) +{ + typedef struct conn_fail_event_map_t { + uint32 inEvent; /* input: event type to match */ + uint32 inStatus; /* input: event status code to match */ + uint32 inReason; /* input: event reason code to match */ + const char* outName; /* output: failure type */ + const char* outCause; /* output: failure cause */ + } conn_fail_event_map_t; + + /* Map of WLC_E events to connection failure strings */ +# define WL_IW_DONT_CARE 9999 + const conn_fail_event_map_t event_map [] = { + /* inEvent inStatus inReason */ + /* outName outCause */ + {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE, + "Conn", "Success"}, + {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE, + "Conn", "NoNetworks"}, + {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ConfigMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH, + "Conn", "EncrypMismatch"}, + {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH, + "Conn", "RsnMismatch"}, + {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "AuthTimeout"}, + {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "AuthFail"}, + {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE, + "Conn", "AuthNoAck"}, + {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE, + "Conn", "ReassocFail"}, + {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE, + "Conn", "ReassocTimeout"}, + {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE, + "Conn", "ReassocAbort"}, + {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE, + "Sup", "ConnSuccess"}, + {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Sup", "WpaHandshakeFail"}, + {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Deauth"}, + {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "DisassocInd"}, + {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE, + "Conn", "Disassoc"} + }; + + const char* name = ""; + const char* cause = NULL; + int i; + + /* Search the event map table for a matching event */ + for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) { + const conn_fail_event_map_t* row = &event_map[i]; + if (row->inEvent == event_type && + (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) && + (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) { + name = row->outName; + cause = row->outCause; + break; + } + } + + /* If found, generate a connection failure string and return TRUE */ + if (cause) { + memset(stringBuf, 0, buflen); + (void)snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason); + WL_TRACE(("Connection status: %s\n", stringBuf)); + return TRUE; + } else { + return FALSE; + } +} + +#if (WIRELESS_EXT > 14) +/* Check if we have received an event that indicates connection failure + * If so, generate a connection failure report string. + * The caller supplies a buffer to hold the generated string. + */ +static bool +wl_iw_check_conn_fail(wl_event_msg_t *e, char* stringBuf, uint buflen) +{ + uint32 event = ntoh32(e->event_type); + uint32 status = ntoh32(e->status); + uint32 reason = ntoh32(e->reason); + + if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) { + return TRUE; + } else + { + return FALSE; + } +} +#endif /* WIRELESS_EXT > 14 */ + +#ifndef IW_CUSTOM_MAX +#define IW_CUSTOM_MAX 256 /* size of extra buffer used for translation of events */ +#endif /* IW_CUSTOM_MAX */ + +void +wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data) +{ +#if WIRELESS_EXT > 13 + union iwreq_data wrqu; + char extra[IW_CUSTOM_MAX + 1]; + int cmd = 0; + uint32 event_type = ntoh32(e->event_type); + uint16 flags = ntoh16(e->flags); + uint32 datalen = ntoh32(e->datalen); + uint32 status = ntoh32(e->status); + + memset(&wrqu, 0, sizeof(wrqu)); + memset(extra, 0, sizeof(extra)); + + memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN); + wrqu.addr.sa_family = ARPHRD_ETHER; + + switch (event_type) { + case WLC_E_TXFAIL: + cmd = IWEVTXDROP; + break; +#if WIRELESS_EXT > 14 + case WLC_E_JOIN: + case WLC_E_ASSOC_IND: + case WLC_E_REASSOC_IND: + cmd = IWEVREGISTERED; + break; + case WLC_E_DEAUTH_IND: + case WLC_E_DISASSOC_IND: + cmd = SIOCGIWAP; + wrqu.data.length = strlen(extra); + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + break; + + case WLC_E_LINK: + cmd = SIOCGIWAP; + wrqu.data.length = strlen(extra); + if (!(flags & WLC_EVENT_MSG_LINK)) { + bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN); + bzero(&extra, ETHER_ADDR_LEN); + } + break; + case WLC_E_ACTION_FRAME: + cmd = IWEVCUSTOM; + if (datalen + 1 <= sizeof(extra)) { + wrqu.data.length = datalen + 1; + extra[0] = WLC_E_ACTION_FRAME; + memcpy(&extra[1], data, datalen); + WL_TRACE(("WLC_E_ACTION_FRAME len %d \n", wrqu.data.length)); + } + break; + + case WLC_E_ACTION_FRAME_COMPLETE: + cmd = IWEVCUSTOM; + if (sizeof(status) + 1 <= sizeof(extra)) { + wrqu.data.length = sizeof(status) + 1; + extra[0] = WLC_E_ACTION_FRAME_COMPLETE; + memcpy(&extra[1], &status, sizeof(status)); + WL_TRACE(("wl_iw_event status %d \n", status)); + } + break; +#endif /* WIRELESS_EXT > 14 */ +#if WIRELESS_EXT > 17 + case WLC_E_MIC_ERROR: { + struct iw_michaelmicfailure *micerrevt = (struct iw_michaelmicfailure *)&extra; + cmd = IWEVMICHAELMICFAILURE; + wrqu.data.length = sizeof(struct iw_michaelmicfailure); + if (flags & WLC_EVENT_MSG_GROUP) + micerrevt->flags |= IW_MICFAILURE_GROUP; + else + micerrevt->flags |= IW_MICFAILURE_PAIRWISE; + memcpy(micerrevt->src_addr.sa_data, &e->addr, ETHER_ADDR_LEN); + micerrevt->src_addr.sa_family = ARPHRD_ETHER; + + break; + } + + case WLC_E_ASSOC_REQ_IE: + cmd = IWEVASSOCREQIE; + wrqu.data.length = datalen; + if (datalen < sizeof(extra)) + memcpy(extra, data, datalen); + break; + + case WLC_E_ASSOC_RESP_IE: + cmd = IWEVASSOCRESPIE; + wrqu.data.length = datalen; + if (datalen < sizeof(extra)) + memcpy(extra, data, datalen); + break; + + case WLC_E_PMKID_CACHE: { + struct iw_pmkid_cand *iwpmkidcand = (struct iw_pmkid_cand *)&extra; + pmkid_cand_list_t *pmkcandlist; + pmkid_cand_t *pmkidcand; + int count; + + if (data == NULL) + break; + + cmd = IWEVPMKIDCAND; + pmkcandlist = data; + count = ntoh32_ua((uint8 *)&pmkcandlist->npmkid_cand); + wrqu.data.length = sizeof(struct iw_pmkid_cand); + pmkidcand = pmkcandlist->pmkid_cand; + while (count) { + bzero(iwpmkidcand, sizeof(struct iw_pmkid_cand)); + if (pmkidcand->preauth) + iwpmkidcand->flags |= IW_PMKID_CAND_PREAUTH; + bcopy(&pmkidcand->BSSID, &iwpmkidcand->bssid.sa_data, + ETHER_ADDR_LEN); + wireless_send_event(dev, cmd, &wrqu, extra); + pmkidcand++; + count--; + } + break; + } +#endif /* WIRELESS_EXT > 17 */ + + case WLC_E_SCAN_COMPLETE: +#if WIRELESS_EXT > 14 + cmd = SIOCGIWSCAN; +#endif + WL_TRACE(("event WLC_E_SCAN_COMPLETE\n")); + if ((g_iscan) && (g_iscan->sysioc_pid >= 0) && + (g_iscan->iscan_state != ISCAN_STATE_IDLE)) + up(&g_iscan->sysioc_sem); + break; + + default: + /* Cannot translate event */ + break; + } + + if (cmd) { + if (cmd == SIOCGIWSCAN) + wireless_send_event(dev, cmd, &wrqu, NULL); + else + wireless_send_event(dev, cmd, &wrqu, extra); + } + +#if WIRELESS_EXT > 14 + /* Look for WLC events that indicate a connection failure. + * If found, generate an IWEVCUSTOM event. + */ + memset(extra, 0, sizeof(extra)); + if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) { + cmd = IWEVCUSTOM; + wrqu.data.length = strlen(extra); + wireless_send_event(dev, cmd, &wrqu, extra); + } +#endif /* WIRELESS_EXT > 14 */ + +#endif /* WIRELESS_EXT > 13 */ +} + +static int wl_iw_get_wireless_stats_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len) +{ + struct iw_statistics *wstats = ctx; + int res = BCME_OK; + + switch (type) { + case WL_CNT_XTLV_WLC: { + wl_cnt_wlc_t *cnt = (wl_cnt_wlc_t *)data; + if (len > sizeof(wl_cnt_wlc_t)) { + printf("counter structure length invalid! %d > %d\n", + len, (int)sizeof(wl_cnt_wlc_t)); + } + wstats->discard.nwid = 0; + wstats->discard.code = dtoh32(cnt->rxundec); + wstats->discard.fragment = dtoh32(cnt->rxfragerr); + wstats->discard.retries = dtoh32(cnt->txfail); + wstats->discard.misc = dtoh32(cnt->rxrunt) + dtoh32(cnt->rxgiant); + wstats->miss.beacon = 0; + WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n", + dtoh32(cnt->txframe), dtoh32(cnt->txbyte))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", + dtoh32(cnt->rxundec))); + WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", + dtoh32(cnt->txfail))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", + dtoh32(cnt->rxfragerr))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", + dtoh32(cnt->rxrunt))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", + dtoh32(cnt->rxgiant))); + break; + } + case WL_CNT_XTLV_CNTV_LE10_UCODE: + case WL_CNT_XTLV_LT40_UCODE_V1: + case WL_CNT_XTLV_GE40_UCODE_V1: + { + /* Offsets of rxfrmtoolong and rxbadplcp are the same in + * wl_cnt_v_le10_mcst_t, wl_cnt_lt40mcst_v1_t, and wl_cnt_ge40mcst_v1_t. + * So we can just cast to wl_cnt_v_le10_mcst_t here. + */ + wl_cnt_v_le10_mcst_t *cnt = (wl_cnt_v_le10_mcst_t *)data; + if (len != WL_CNT_MCST_STRUCT_SZ) { + printf("counter structure length mismatch! %d != %d\n", + len, WL_CNT_MCST_STRUCT_SZ); + } + WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", + dtoh32(cnt->rxfrmtoolong))); + WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", + dtoh32(cnt->rxbadplcp))); + BCM_REFERENCE(cnt); + break; + } + default: + WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type)); + break; + } + return res; +} + +int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats) +{ + int res = 0; + int phy_noise; + int rssi; + scb_val_t scb_val; +#if WIRELESS_EXT > 11 + char *cntbuf = NULL; + wl_cnt_info_t *cntinfo; + uint16 ver; + uint32 corerev = 0; +#endif /* WIRELESS_EXT > 11 */ + + phy_noise = 0; + if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) + goto done; + + phy_noise = dtoh32(phy_noise); + WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n *****", phy_noise)); + + scb_val.val = 0; + if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) + goto done; + + rssi = dtoh32(scb_val.val); + WL_TRACE(("wl_iw_get_wireless_stats rssi=%d ****** \n", rssi)); + if (rssi <= WL_IW_RSSI_NO_SIGNAL) + wstats->qual.qual = 0; + else if (rssi <= WL_IW_RSSI_VERY_LOW) + wstats->qual.qual = 1; + else if (rssi <= WL_IW_RSSI_LOW) + wstats->qual.qual = 2; + else if (rssi <= WL_IW_RSSI_GOOD) + wstats->qual.qual = 3; + else if (rssi <= WL_IW_RSSI_VERY_GOOD) + wstats->qual.qual = 4; + else + wstats->qual.qual = 5; + + /* Wraps to 0 if RSSI is 0 */ + wstats->qual.level = 0x100 + rssi; + wstats->qual.noise = 0x100 + phy_noise; +#if WIRELESS_EXT > 18 + wstats->qual.updated |= (IW_QUAL_ALL_UPDATED | IW_QUAL_DBM); +#else + wstats->qual.updated |= 7; +#endif /* WIRELESS_EXT > 18 */ + +#if WIRELESS_EXT > 11 + WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", WL_CNTBUF_MAX_SIZE)); + + if (WL_CNTBUF_MAX_SIZE > MAX_WLIW_IOCTL_LEN) + { + WL_ERROR(("wl_iw_get_wireless_stats buffer too short %d < %d\n", + WL_CNTBUF_MAX_SIZE, MAX_WLIW_IOCTL_LEN)); + res = BCME_BUFTOOSHORT; + goto done; + } + + cntbuf = kmalloc(WL_CNTBUF_MAX_SIZE, GFP_KERNEL); + if (!cntbuf) { + res = BCME_NOMEM; + goto done; + } + + memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE); + res = dev_wlc_bufvar_get(dev, "counters", cntbuf, WL_CNTBUF_MAX_SIZE); + if (res) + { + WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res)); + goto done; + } + + cntinfo = (wl_cnt_info_t *)cntbuf; + cntinfo->version = dtoh16(cntinfo->version); + cntinfo->datalen = dtoh16(cntinfo->datalen); + ver = cntinfo->version; + if (ver > WL_CNT_T_VERSION) { + WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n", + WL_CNT_T_VERSION, ver)); + res = BCME_VERSION; + goto done; + } + + if (ver == WL_CNT_VERSION_11) { + wlc_rev_info_t revinfo; + memset(&revinfo, 0, sizeof(revinfo)); + res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo)); + if (res) { + WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res)); + goto done; + } + corerev = dtoh32(revinfo.corerev); + } + + res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, WL_CNTBUF_MAX_SIZE, corerev); + if (res) { + WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res)); + goto done; + } + + if ((res = bcm_unpack_xtlv_buf(wstats, cntinfo->data, cntinfo->datalen, + BCM_XTLV_OPTION_ALIGN32, wl_iw_get_wireless_stats_cbfn))) { + goto done; + } +#endif /* WIRELESS_EXT > 11 */ + +done: +#if WIRELESS_EXT > 11 + if (cntbuf) { + kfree(cntbuf); + } +#endif /* WIRELESS_EXT > 11 */ + return res; +} + +static void +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) +wl_iw_timerfunc(struct timer_list *t) +#else +wl_iw_timerfunc(ulong data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + iscan_info_t *iscan = from_timer(iscan, t, timer);; +#else + iscan_info_t *iscan = (iscan_info_t *)data; +#endif + iscan->timer_on = 0; + if (iscan->iscan_state != ISCAN_STATE_IDLE) { + WL_TRACE(("timer trigger\n")); + up(&iscan->sysioc_sem); + } +} + +static void +wl_iw_set_event_mask(struct net_device *dev) +{ + char eventmask[WL_EVENTING_MASK_LEN]; + char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ + + dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf)); + bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); + setbit(eventmask, WLC_E_SCAN_COMPLETE); + dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, + iovbuf, sizeof(iovbuf)); + +} + +static int +wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid) +{ + int err = 0; + + memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN); + params->bss_type = DOT11_BSSTYPE_ANY; + params->scan_type = 0; + params->nprobes = -1; + params->active_time = -1; + params->passive_time = -1; + params->home_time = -1; + params->channel_num = 0; + + params->nprobes = htod32(params->nprobes); + params->active_time = htod32(params->active_time); + params->passive_time = htod32(params->passive_time); + params->home_time = htod32(params->home_time); + if (ssid && ssid->SSID_len) + memcpy(¶ms->ssid, ssid, sizeof(wlc_ssid_t)); + + return err; +} + +static int +wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action) +{ + int params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_iscan_params_t, params)); + wl_iscan_params_t *params; + int err = 0; + + if (ssid && ssid->SSID_len) { + params_size += sizeof(wlc_ssid_t); + } + params = (wl_iscan_params_t*)kmalloc(params_size, GFP_KERNEL); + if (params == NULL) { + return -ENOMEM; + } + memset(params, 0, params_size); + ASSERT(params_size < WLC_IOCTL_SMLEN); + + err = wl_iw_iscan_prep(¶ms->params, ssid); + + if (!err) { + params->version = htod32(ISCAN_REQ_VERSION); + params->action = htod16(action); + params->scan_duration = htod16(0); + + /* params_size += OFFSETOF(wl_iscan_params_t, params); */ + (void) dev_iw_iovar_setbuf(iscan->dev, "iscan", params, params_size, + iscan->ioctlbuf, WLC_IOCTL_SMLEN); + } + + kfree(params); + return err; +} + +static uint32 +wl_iw_iscan_get(iscan_info_t *iscan) +{ + iscan_buf_t * buf; + iscan_buf_t * ptr; + wl_iscan_results_t * list_buf; + wl_iscan_results_t list; + wl_scan_results_t *results; + uint32 status; + + /* buffers are allocated on demand */ + if (iscan->list_cur) { + buf = iscan->list_cur; + iscan->list_cur = buf->next; + } + else { + buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL); + if (!buf) + return WL_SCAN_RESULTS_ABORTED; + buf->next = NULL; + if (!iscan->list_hdr) + iscan->list_hdr = buf; + else { + ptr = iscan->list_hdr; + while (ptr->next) { + ptr = ptr->next; + } + ptr->next = buf; + } + } + memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN); + list_buf = (wl_iscan_results_t*)buf->iscan_buf; + results = &list_buf->results; + results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE; + results->version = 0; + results->count = 0; + + memset(&list, 0, sizeof(list)); + list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN); + (void) dev_iw_iovar_getbuf( + iscan->dev, + "iscanresults", + &list, + WL_ISCAN_RESULTS_FIXED_SIZE, + buf->iscan_buf, + WLC_IW_ISCAN_MAXLEN); + results->buflen = dtoh32(results->buflen); + results->version = dtoh32(results->version); + results->count = dtoh32(results->count); + WL_TRACE(("results->count = %d\n", results->count)); + + WL_TRACE(("results->buflen = %d\n", results->buflen)); + status = dtoh32(list_buf->status); + return status; +} + +static void wl_iw_send_scan_complete(iscan_info_t *iscan) +{ + union iwreq_data wrqu; + + memset(&wrqu, 0, sizeof(wrqu)); + + /* wext expects to get no data for SIOCGIWSCAN Event */ + wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL); +} + +static int +_iscan_sysioc_thread(void *data) +{ + uint32 status; + iscan_info_t *iscan = (iscan_info_t *)data; + + DAEMONIZE("iscan_sysioc"); + + status = WL_SCAN_RESULTS_PARTIAL; + while (down_interruptible(&iscan->sysioc_sem) == 0) { + if (iscan->timer_on) { + del_timer(&iscan->timer); + iscan->timer_on = 0; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + status = wl_iw_iscan_get(iscan); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + + switch (status) { + case WL_SCAN_RESULTS_PARTIAL: + WL_TRACE(("iscanresults incomplete\n")); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_lock(); +#endif + /* make sure our buffer size is enough before going next round */ + wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) + rtnl_unlock(); +#endif + /* Reschedule the timer */ + iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); + add_timer(&iscan->timer); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_SUCCESS: + WL_TRACE(("iscanresults complete\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + wl_iw_send_scan_complete(iscan); + break; + case WL_SCAN_RESULTS_PENDING: + WL_TRACE(("iscanresults pending\n")); + /* Reschedule the timer */ + iscan->timer.expires = jiffies + msecs_to_jiffies(iscan->timer_ms); + add_timer(&iscan->timer); + iscan->timer_on = 1; + break; + case WL_SCAN_RESULTS_ABORTED: + WL_TRACE(("iscanresults aborted\n")); + iscan->iscan_state = ISCAN_STATE_IDLE; + wl_iw_send_scan_complete(iscan); + break; + default: + WL_TRACE(("iscanresults returned unknown status %d\n", status)); + break; + } + } + complete_and_exit(&iscan->sysioc_exited, 0); +} + +int +wl_iw_attach(struct net_device *dev, void * dhdp) +{ + iscan_info_t *iscan = NULL; + + if (!dev) + return 0; + + iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL); + if (!iscan) + return -ENOMEM; + memset(iscan, 0, sizeof(iscan_info_t)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + iscan->kthread = NULL; +#endif + iscan->sysioc_pid = -1; + /* we only care about main interface so save a global here */ + g_iscan = iscan; + iscan->dev = dev; + iscan->iscan_state = ISCAN_STATE_IDLE; + + + /* Set up the timer */ + iscan->timer_ms = 2000; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) + timer_setup(&iscan->timer, wl_iw_timerfunc, 0); +#else + init_timer(&iscan->timer); + iscan->timer.data = (ulong)iscan; + iscan->timer.function = wl_iw_timerfunc; +#endif + + sema_init(&iscan->sysioc_sem, 0); + init_completion(&iscan->sysioc_exited); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) + iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc"); + iscan->sysioc_pid = iscan->kthread->pid; +#else + iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0); +#endif + if (iscan->sysioc_pid < 0) + return -ENOMEM; + return 0; +} + +void wl_iw_detach(void) +{ + iscan_buf_t *buf; + iscan_info_t *iscan = g_iscan; + if (!iscan) + return; + if (iscan->sysioc_pid >= 0) { + KILL_PROC(iscan->sysioc_pid, SIGTERM); + wait_for_completion(&iscan->sysioc_exited); + } + + while (iscan->list_hdr) { + buf = iscan->list_hdr->next; + kfree(iscan->list_hdr); + iscan->list_hdr = buf; + } + kfree(iscan); + g_iscan = NULL; +} + +#endif /* USE_IW */ diff --git a/drivers/net/wireless/bcmdhd/wl_iw.h b/drivers/net/wireless/bcmdhd/wl_iw.h new file mode 100644 index 000000000000..6b86bb13a32c --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_iw.h @@ -0,0 +1,164 @@ +/* + * Linux Wireless Extensions support + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_iw.h 514727 2014-11-12 03:02:48Z $ + */ + +#ifndef _wl_iw_h_ +#define _wl_iw_h_ + +#include + +#include +#include +#include + +#define WL_SCAN_PARAMS_SSID_MAX 10 +#define GET_SSID "SSID=" +#define GET_CHANNEL "CH=" +#define GET_NPROBE "NPROBE=" +#define GET_ACTIVE_ASSOC_DWELL "ACTIVE=" +#define GET_PASSIVE_ASSOC_DWELL "PASSIVE=" +#define GET_HOME_DWELL "HOME=" +#define GET_SCAN_TYPE "TYPE=" + +#define BAND_GET_CMD "GETBAND" +#define BAND_SET_CMD "SETBAND" +#define DTIM_SKIP_GET_CMD "DTIMSKIPGET" +#define DTIM_SKIP_SET_CMD "DTIMSKIPSET" +#define SETSUSPEND_CMD "SETSUSPENDOPT" +#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR" +/* Lin - Is the extra space needed? */ +#define PNOSETUP_SET_CMD "PNOSETUP " /* TLV command has extra end space */ +#define PNOENABLE_SET_CMD "PNOFORCE" +#define PNODEBUG_SET_CMD "PNODEBUG" +#define TXPOWER_SET_CMD "TXPOWER" + +#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] +#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" + +/* Structure to keep global parameters */ +typedef struct wl_iw_extra_params { + int target_channel; /* target channel */ +} wl_iw_extra_params_t; + +struct cntry_locales_custom { + char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */ + char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */ + int32 custom_locale_rev; /* Custom local revisin default -1 */ +}; +/* ============================================== */ +/* Defines from wlc_pub.h */ +#define WL_IW_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */ +#define WL_IW_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */ +#define WL_IW_RSSI_VERY_LOW -80 /* Very low quality cutoffs */ +#define WL_IW_RSSI_LOW -70 /* Low quality cutoffs */ +#define WL_IW_RSSI_GOOD -68 /* Good quality cutoffs */ +#define WL_IW_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */ +#define WL_IW_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */ +#define WL_IW_RSSI_INVALID 0 /* invalid RSSI value */ +#define MAX_WX_STRING 80 +#define SSID_FMT_BUF_LEN ((4 * 32) + 1) +#define isprint(c) bcm_isprint(c) +#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1) +#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3) +#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5) +#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7) +#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9) +#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11) +#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13) + +#define G_SCAN_RESULTS 8*1024 +#define WE_ADD_EVENT_FIX 0x80 +#define G_WLAN_SET_ON 0 +#define G_WLAN_SET_OFF 1 + + +typedef struct wl_iw { + char nickname[IW_ESSID_MAX_SIZE]; + + struct iw_statistics wstats; + + int spy_num; + uint32 pwsec; /* pairwise wsec setting */ + uint32 gwsec; /* group wsec setting */ + bool privacy_invoked; /* IW_AUTH_PRIVACY_INVOKED setting */ + struct ether_addr spy_addr[IW_MAX_SPY]; + struct iw_quality spy_qual[IW_MAX_SPY]; + void *wlinfo; +} wl_iw_t; + +struct wl_ctrl { + struct timer_list *timer; + struct net_device *dev; + long sysioc_pid; + struct semaphore sysioc_sem; + struct completion sysioc_exited; +}; + + +#if WIRELESS_EXT > 12 +#include +extern const struct iw_handler_def wl_iw_handler_def; +#endif /* WIRELESS_EXT > 12 */ + +extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data); +extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats); +int wl_iw_attach(struct net_device *dev, void * dhdp); +int wl_iw_send_priv_event(struct net_device *dev, char *flag); + +void wl_iw_detach(void); + +#define CSCAN_COMMAND "CSCAN " +#define CSCAN_TLV_PREFIX 'S' +#define CSCAN_TLV_VERSION 1 +#define CSCAN_TLV_SUBVERSION 0 +#define CSCAN_TLV_TYPE_SSID_IE 'S' +#define CSCAN_TLV_TYPE_CHANNEL_IE 'C' +#define CSCAN_TLV_TYPE_NPROBE_IE 'N' +#define CSCAN_TLV_TYPE_ACTIVE_IE 'A' +#define CSCAN_TLV_TYPE_PASSIVE_IE 'P' +#define CSCAN_TLV_TYPE_HOME_IE 'H' +#define CSCAN_TLV_TYPE_STYPE_IE 'T' + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(info, stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(info, event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(info, stream, ends, iwe, extra) +#else +#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \ + iwe_stream_add_event(stream, ends, iwe, extra) +#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \ + iwe_stream_add_value(event, value, ends, iwe, event_len) +#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \ + iwe_stream_add_point(stream, ends, iwe, extra) +#endif + +#endif /* _wl_iw_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_linux_mon.c b/drivers/net/wireless/bcmdhd/wl_linux_mon.c new file mode 100644 index 000000000000..65e624092e80 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_linux_mon.c @@ -0,0 +1,406 @@ +/* + * Broadcom Dongle Host Driver (DHD), Linux monitor network interface + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_linux_mon.c 514727 2014-11-12 03:02:48Z $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +typedef enum monitor_states +{ + MONITOR_STATE_DEINIT = 0x0, + MONITOR_STATE_INIT = 0x1, + MONITOR_STATE_INTERFACE_ADDED = 0x2, + MONITOR_STATE_INTERFACE_DELETED = 0x4 +} monitor_states_t; +int dhd_add_monitor(char *name, struct net_device **new_ndev); +extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); +int dhd_del_monitor(struct net_device *ndev); +int dhd_monitor_init(void *dhd_pub); +int dhd_monitor_uninit(void); + +/** + * Local declarations and defintions (not exposed) + */ +#ifndef DHD_MAX_IFS +#define DHD_MAX_IFS 16 +#endif +#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__) +#define MON_TRACE MON_PRINT + +typedef struct monitor_interface { + int radiotap_enabled; + struct net_device* real_ndev; /* The real interface that the monitor is on */ + struct net_device* mon_ndev; +} monitor_interface; + +typedef struct dhd_linux_monitor { + void *dhd_pub; + monitor_states_t monitor_state; + monitor_interface mon_if[DHD_MAX_IFS]; + struct mutex lock; /* lock to protect mon_if */ +} dhd_linux_monitor_t; + +static dhd_linux_monitor_t g_monitor; + +static struct net_device* lookup_real_netdev(char *name); +static monitor_interface* ndev_to_monif(struct net_device *ndev); +static int dhd_mon_if_open(struct net_device *ndev); +static int dhd_mon_if_stop(struct net_device *ndev); +static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); +static void dhd_mon_if_set_multicast_list(struct net_device *ndev); +static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); + +static const struct net_device_ops dhd_mon_if_ops = { + .ndo_open = dhd_mon_if_open, + .ndo_stop = dhd_mon_if_stop, + .ndo_start_xmit = dhd_mon_if_subif_start_xmit, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) + .ndo_set_rx_mode = dhd_mon_if_set_multicast_list, +#else + .ndo_set_multicast_list = dhd_mon_if_set_multicast_list, +#endif + .ndo_set_mac_address = dhd_mon_if_change_mac, +}; + +/** + * Local static function defintions + */ + +/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" + * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") + */ +static struct net_device* lookup_real_netdev(char *name) +{ + struct net_device *ndev_found = NULL; + + int i; + int len = 0; + int last_name_len = 0; + struct net_device *ndev; + + /* We need to find interface "p2p-p2p-0" corresponding to monitor interface "mon-p2p-0", + * Once mon iface name reaches IFNAMSIZ, it is reset to p2p0-0 and corresponding mon + * iface would be mon-p2p0-0. + */ + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = dhd_idx2net(g_monitor.dhd_pub, i); + + /* Skip "p2p" and look for "-p2p0-x" in monitor interface name. If it + * it matches, then this netdev is the corresponding real_netdev. + */ + if (ndev && strstr(ndev->name, "p2p-p2p0")) { + len = strlen("p2p"); + } else { + /* if p2p- is not present, then the IFNAMSIZ have reached and name + * would have got reset. In this casse,look for p2p0-x in mon-p2p0-x + */ + len = 0; + } + if (ndev && strstr(name, (ndev->name + len))) { + if (strlen(ndev->name) > last_name_len) { + ndev_found = ndev; + last_name_len = strlen(ndev->name); + } + } + } + + return ndev_found; +} + +static monitor_interface* ndev_to_monif(struct net_device *ndev) +{ + int i; + + for (i = 0; i < DHD_MAX_IFS; i++) { + if (g_monitor.mon_if[i].mon_ndev == ndev) + return &g_monitor.mon_if[i]; + } + + return NULL; +} + +static int dhd_mon_if_open(struct net_device *ndev) +{ + int ret = 0; + + MON_PRINT("enter\n"); + return ret; +} + +static int dhd_mon_if_stop(struct net_device *ndev) +{ + int ret = 0; + + MON_PRINT("enter\n"); + return ret; +} + +static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + int ret = 0; + int rtap_len; + int qos_len = 0; + int dot11_hdr_len = 24; + int snap_len = 6; + unsigned char *pdata; + unsigned short frame_ctl; + unsigned char src_mac_addr[6]; + unsigned char dst_mac_addr[6]; + struct ieee80211_hdr *dot11_hdr; + struct ieee80211_radiotap_header *rtap_hdr; + monitor_interface* mon_if; + + MON_PRINT("enter\n"); + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + goto fail; + } + + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + goto fail; + + rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; + if (unlikely(rtap_hdr->it_version)) + goto fail; + + rtap_len = ieee80211_get_radiotap_len(skb->data); + if (unlikely(skb->len < rtap_len)) + goto fail; + + MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); + + /* Skip the ratio tap header */ + skb_pull(skb, rtap_len); + + dot11_hdr = (struct ieee80211_hdr *)skb->data; + frame_ctl = le16_to_cpu(dot11_hdr->frame_control); + /* Check if the QoS bit is set */ + if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { + /* Check if this ia a Wireless Distribution System (WDS) frame + * which has 4 MAC addresses + */ + if (dot11_hdr->frame_control & 0x0080) + qos_len = 2; + if ((dot11_hdr->frame_control & 0x0300) == 0x0300) + dot11_hdr_len += 6; + + memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); + memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); + + /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for + * for two MAC addresses + */ + skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); + pdata = (unsigned char*)skb->data; + memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); + memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); + PKTSETPRIO(skb, 0); + + MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); + + /* Use the real net device to transmit the packet */ + ret = dhd_start_xmit(skb, mon_if->real_ndev); + + return ret; + } +fail: + dev_kfree_skb(skb); + return 0; +} + +static void dhd_mon_if_set_multicast_list(struct net_device *ndev) +{ + monitor_interface* mon_if; + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + } else { + MON_PRINT("enter, if name: %s, matched if name %s\n", + ndev->name, mon_if->real_ndev->name); + } +} + +static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) +{ + int ret = 0; + monitor_interface* mon_if; + + mon_if = ndev_to_monif(ndev); + if (mon_if == NULL || mon_if->real_ndev == NULL) { + MON_PRINT(" cannot find matched net dev, skip the packet\n"); + } else { + MON_PRINT("enter, if name: %s, matched if name %s\n", + ndev->name, mon_if->real_ndev->name); + } + return ret; +} + +/** + * Global function definitions (declared in dhd_linux_mon.h) + */ + +int dhd_add_monitor(char *name, struct net_device **new_ndev) +{ + int i; + int idx = -1; + int ret = 0; + struct net_device* ndev = NULL; + dhd_linux_monitor_t **dhd_mon; + + mutex_lock(&g_monitor.lock); + + MON_TRACE("enter, if name: %s\n", name); + if (!name || !new_ndev) { + MON_PRINT("invalid parameters\n"); + ret = -EINVAL; + goto out; + } + + /* + * Find a vacancy + */ + for (i = 0; i < DHD_MAX_IFS; i++) + if (g_monitor.mon_if[i].mon_ndev == NULL) { + idx = i; + break; + } + if (idx == -1) { + MON_PRINT("exceeds maximum interfaces\n"); + ret = -EFAULT; + goto out; + } + + ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); + if (!ndev) { + MON_PRINT("failed to allocate memory\n"); + ret = -ENOMEM; + goto out; + } + + ndev->type = ARPHRD_IEEE80211_RADIOTAP; + strncpy(ndev->name, name, IFNAMSIZ); + ndev->name[IFNAMSIZ - 1] = 0; + ndev->netdev_ops = &dhd_mon_if_ops; + + ret = register_netdevice(ndev); + if (ret) { + MON_PRINT(" register_netdevice failed (%d)\n", ret); + goto out; + } + + *new_ndev = ndev; + g_monitor.mon_if[idx].radiotap_enabled = TRUE; + g_monitor.mon_if[idx].mon_ndev = ndev; + g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); + dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); + *dhd_mon = &g_monitor; + g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; + MON_PRINT("net device returned: 0x%p\n", ndev); + MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); + +out: + if (ret && ndev) + free_netdev(ndev); + + mutex_unlock(&g_monitor.lock); + return ret; + +} + +int dhd_del_monitor(struct net_device *ndev) +{ + int i; + if (!ndev) + return -EINVAL; + mutex_lock(&g_monitor.lock); + for (i = 0; i < DHD_MAX_IFS; i++) { + if (g_monitor.mon_if[i].mon_ndev == ndev || + g_monitor.mon_if[i].real_ndev == ndev) { + + g_monitor.mon_if[i].real_ndev = NULL; + unregister_netdevice(g_monitor.mon_if[i].mon_ndev); + free_netdev(g_monitor.mon_if[i].mon_ndev); + g_monitor.mon_if[i].mon_ndev = NULL; + g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; + break; + } + } + + if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED) + MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev); + mutex_unlock(&g_monitor.lock); + + return 0; +} + +int dhd_monitor_init(void *dhd_pub) +{ + if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { + g_monitor.dhd_pub = dhd_pub; + mutex_init(&g_monitor.lock); + g_monitor.monitor_state = MONITOR_STATE_INIT; + } + return 0; +} + +int dhd_monitor_uninit(void) +{ + int i; + struct net_device *ndev; + mutex_lock(&g_monitor.lock); + if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { + for (i = 0; i < DHD_MAX_IFS; i++) { + ndev = g_monitor.mon_if[i].mon_ndev; + if (ndev) { + unregister_netdevice(ndev); + free_netdev(ndev); + g_monitor.mon_if[i].real_ndev = NULL; + g_monitor.mon_if[i].mon_ndev = NULL; + } + } + g_monitor.monitor_state = MONITOR_STATE_DEINIT; + } + mutex_unlock(&g_monitor.lock); + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wl_roam.c b/drivers/net/wireless/bcmdhd/wl_roam.c new file mode 100644 index 000000000000..bddc755f8204 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wl_roam.c @@ -0,0 +1,28 @@ +/* + * Linux roam cache + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wl_roam.c 599089 2015-11-12 10:41:33Z $ + */ diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c new file mode 100644 index 000000000000..8478de93b7c8 --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wldev_common.c @@ -0,0 +1,456 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wldev_common.c 585478 2015-09-10 13:33:58Z $ + */ + +#include +#include +#include +#include + +#include +#include + +#define htod32(i) (i) +#define htod16(i) (i) +#define dtoh32(i) (i) +#define dtoh16(i) (i) +#define htodchanspec(i) (i) +#define dtohchanspec(i) (i) + +#define WLDEV_ERROR(args) \ + do { \ + printk(KERN_ERR "WLDEV-ERROR) "); \ + printk args; \ + } while (0) + +#define WLDEV_INFO(args) \ + do { \ + printk(KERN_INFO "WLDEV-INFO) "); \ + printk args; \ + } while (0) + +extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd); + +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set) +{ + s32 ret = 0; + struct wl_ioctl ioc; + + + memset(&ioc, 0, sizeof(ioc)); + ioc.cmd = cmd; + ioc.buf = arg; + ioc.len = len; + ioc.set = set; + + ret = dhd_ioctl_entry_local(dev, &ioc, cmd); + + return ret; +} + +/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be + * taken care of in dhd_ioctl_entry. Internal use only, not exposed to + * wl_iw, wl_cfg80211 and wl_cfgp2p + */ +static s32 wldev_mkiovar( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, u32 buflen) +{ + s32 iolen = 0; + + iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen); + return iolen; +} + +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + s32 ret = 0; + if (buf_sync) { + mutex_lock(buf_sync); + } + wldev_mkiovar(iovar_name, param, paramlen, buf, buflen); + ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE); + if (buf_sync) + mutex_unlock(buf_sync); + return ret; +} + + +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen); + if (iovar_len > 0) + ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE); + else + ret = BCME_BUFTOOSHORT; + + if (buf_sync) + mutex_unlock(buf_sync); + return ret; +} + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + + val = htod32(val); + memset(iovar_buf, 0, sizeof(iovar_buf)); + return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf, + sizeof(iovar_buf), NULL); +} + + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + s32 err; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf, + sizeof(iovar_buf), NULL); + if (err == 0) + { + memcpy(pval, iovar_buf, sizeof(*pval)); + *pval = dtoh32(*pval); + } + return err; +} + +/** Format a bsscfg indexed iovar buffer. The bsscfg index will be + * taken care of in dhd_ioctl_entry. Internal use only, not exposed to + * wl_iw, wl_cfg80211 and wl_cfgp2p + */ +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx) +{ + const s8 *prefix = "bsscfg:"; + s8 *p; + u32 prefixlen; + u32 namelen; + u32 iolen; + + if (bssidx == 0) { + return wldev_mkiovar(iovar_name, param, paramlen, + iovar_buf, buflen); + } + + prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */ + namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */ + iolen = prefixlen + namelen + sizeof(u32) + paramlen; + + if (buflen < 0 || iolen > (u32)buflen) + { + WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__)); + return BCME_BUFTOOSHORT; + } + + p = (s8 *)iovar_buf; + + /* copy prefix, no null */ + memcpy(p, prefix, prefixlen); + p += prefixlen; + + /* copy iovar name including null */ + memcpy(p, iovar_name, namelen); + p += namelen; + + /* bss config index as first param */ + bssidx = htod32(bssidx); + memcpy(p, &bssidx, sizeof(u32)); + p += sizeof(u32); + + /* parameter buffer follows */ + if (paramlen) + memcpy(p, param, paramlen); + + return iolen; + +} + +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + s32 ret = 0; + if (buf_sync) { + mutex_lock(buf_sync); + } + + wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx); + ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE); + if (buf_sync) { + mutex_unlock(buf_sync); + } + return ret; + +} + +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync) +{ + s32 ret = 0; + s32 iovar_len; + if (buf_sync) { + mutex_lock(buf_sync); + } + iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx); + if (iovar_len > 0) + ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE); + else { + ret = BCME_BUFTOOSHORT; + } + + if (buf_sync) { + mutex_unlock(buf_sync); + } + return ret; +} + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + + val = htod32(val); + memset(iovar_buf, 0, sizeof(iovar_buf)); + return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf, + sizeof(iovar_buf), bssidx, NULL); +} + + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx) +{ + s8 iovar_buf[WLC_IOCTL_SMLEN]; + s32 err; + + memset(iovar_buf, 0, sizeof(iovar_buf)); + err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf, + sizeof(iovar_buf), bssidx, NULL); + if (err == 0) + { + memcpy(pval, iovar_buf, sizeof(*pval)); + *pval = dtoh32(*pval); + } + return err; +} + +int wldev_get_link_speed( + struct net_device *dev, int *plink_speed) +{ + int error; + + if (!plink_speed) + return -ENOMEM; + error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0); + if (unlikely(error)) + return error; + + /* Convert internal 500Kbps to Kbps */ + *plink_speed *= 500; + return error; +} + +int wldev_get_rssi( + struct net_device *dev, scb_val_t *scb_val) +{ + int error; + + if (!scb_val) + return -ENOMEM; + + error = wldev_ioctl(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t), 0); + if (unlikely(error)) + return error; + + return error; +} + +int wldev_get_ssid( + struct net_device *dev, wlc_ssid_t *pssid) +{ + int error; + + if (!pssid) + return -ENOMEM; + error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0); + if (unlikely(error)) + return error; + pssid->SSID_len = dtoh32(pssid->SSID_len); + return error; +} + +int wldev_get_band( + struct net_device *dev, uint *pband) +{ + int error; + + error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0); + return error; +} + +int wldev_set_band( + struct net_device *dev, uint band) +{ + int error = -1; + + if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) { + error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true); + if (!error) + dhd_bus_band_set(dev, band); + } + return error; +} +int wldev_get_datarate(struct net_device *dev, int *datarate) +{ + int error = 0; + + error = wldev_ioctl(dev, WLC_GET_RATE, datarate, sizeof(int), false); + if (error) { + return -1; + } else { + *datarate = dtoh32(*datarate); + } + + return error; +} + +extern chanspec_t +wl_chspec_driver_to_host(chanspec_t chanspec); +#define WL_EXTRA_BUF_MAX 2048 +int wldev_get_mode( + struct net_device *dev, uint8 *cap) +{ + int error = 0; + int chanspec = 0; + uint16 band = 0; + uint16 bandwidth = 0; + wl_bss_info_t *bss = NULL; + char* buf = kmalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (!buf) + return -1; + *(u32*) buf = htod32(WL_EXTRA_BUF_MAX); + error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void*)buf, WL_EXTRA_BUF_MAX, false); + if (error) { + WLDEV_ERROR(("%s:failed:%d\n", __FUNCTION__, error)); + return -1; + } + bss = (struct wl_bss_info *)(buf + 4); + chanspec = wl_chspec_driver_to_host(bss->chanspec); + + band = chanspec & WL_CHANSPEC_BAND_MASK; + bandwidth = chanspec & WL_CHANSPEC_BW_MASK; + + if (band == WL_CHANSPEC_BAND_2G) { + if (bss->n_cap) + strcpy(cap, "n"); + else + strcpy(cap, "bg"); + } else if (band == WL_CHANSPEC_BAND_5G) { + if (bandwidth == WL_CHANSPEC_BW_80) + strcpy(cap, "ac"); + else if ((bandwidth == WL_CHANSPEC_BW_40) || (bandwidth == WL_CHANSPEC_BW_20)) { + if ((bss->nbss_cap & 0xf00) && (bss->n_cap)) + strcpy(cap, "n|ac"); + else if (bss->n_cap) + strcpy(cap, "n"); + else if (bss->vht_cap) + strcpy(cap, "ac"); + else + strcpy(cap, "a"); + } else { + WLDEV_ERROR(("%s:Mode get failed\n", __FUNCTION__)); + return -1; + } + + } + return error; +} +int wldev_set_country( + struct net_device *dev, char *country_code, bool notify, bool user_enforced, int revinfo) +{ + int error = -1; + wl_country_t cspec = {{0}, 0, {0}}; + scb_val_t scbval; + char smbuf[WLC_IOCTL_SMLEN]; + + if (!country_code) + return error; + + bzero(&scbval, sizeof(scb_val_t)); + error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cspec, sizeof(cspec), NULL); + if (error < 0) { + WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error)); + return error; + } + + if ((error < 0) || + dhd_force_country_change(dev) || + (strncmp(country_code, cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) { + + if (user_enforced) { + bzero(&scbval, sizeof(scb_val_t)); + error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true); + if (error < 0) { + WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n", + __FUNCTION__, error)); + return error; + } + } + + cspec.rev = revinfo; + memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ); + memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ); + dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec); + error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec), + smbuf, sizeof(smbuf), NULL); + if (error < 0) { + WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + return error; + } + dhd_bus_country_set(dev, &cspec, notify); + WLDEV_INFO(("%s: set country for %s as %s rev %d\n", + __FUNCTION__, country_code, cspec.ccode, cspec.rev)); + } + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wldev_common.h b/drivers/net/wireless/bcmdhd/wldev_common.h new file mode 100644 index 000000000000..4cf421cfa19e --- /dev/null +++ b/drivers/net/wireless/bcmdhd/wldev_common.h @@ -0,0 +1,123 @@ +/* + * Common function shared by Linux WEXT, cfg80211 and p2p drivers + * + * Copyright (C) 1999-2016, Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2 (the "GPL"), + * available at http://www.broadcom.com/licenses/GPLv2.php, with the + * following added to such license: + * + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions of + * the license of that module. An independent module is a module which is not + * derived from this software. The special exception does not apply to any + * modifications of the software. + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a license + * other than the GPL, without Broadcom's express prior written consent. + * + * + * <> + * + * $Id: wldev_common.h 556083 2015-05-12 14:03:00Z $ + */ +#ifndef __WLDEV_COMMON_H__ +#define __WLDEV_COMMON_H__ + +#include + +/* wl_dev_ioctl - get/set IOCTLs, will call net_device's do_ioctl (or + * netdev_ops->ndo_do_ioctl in new kernels) + * @dev: the net_device handle + */ +s32 wldev_ioctl( + struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set); + +/** Retrieve named IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +/** Set named IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf( + struct net_device *dev, s8 *iovar_name, + void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync); + +s32 wldev_iovar_setint( + struct net_device *dev, s8 *iovar, s32 val); + +s32 wldev_iovar_getint( + struct net_device *dev, s8 *iovar, s32 *pval); + +/** The following function can be implemented if there is a need for bsscfg + * indexed IOVARs + */ + +s32 wldev_mkiovar_bsscfg( + const s8 *iovar_name, s8 *param, s32 paramlen, + s8 *iovar_buf, s32 buflen, s32 bssidx); + +/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_GET_VAR IOCTL code + */ +s32 wldev_iovar_getbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +/** Set named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with + * WLC_SET_VAR IOCTL code + */ +s32 wldev_iovar_setbuf_bsscfg( + struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen, + void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync); + +s32 wldev_iovar_getint_bsscfg( + struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx); + +s32 wldev_iovar_setint_bsscfg( + struct net_device *dev, s8 *iovar, s32 val, s32 bssidx); + +extern int dhd_net_set_fw_path(struct net_device *dev, char *fw); +extern int dhd_net_bus_suspend(struct net_device *dev); +extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage); +extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, + unsigned long delay_msec); +extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code, + wl_country_t *cspec); +extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify); +extern bool dhd_force_country_change(struct net_device *dev); +extern void dhd_bus_band_set(struct net_device *dev, uint band); +extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify, + bool user_enforced, int revinfo); +extern int net_os_wake_lock(struct net_device *dev); +extern int net_os_wake_unlock(struct net_device *dev); +extern int net_os_wake_lock_timeout(struct net_device *dev); +extern int net_os_wake_lock_timeout_enable(struct net_device *dev, int val); +extern int net_os_set_dtim_skip(struct net_device *dev, int val); +extern int net_os_set_suspend_disable(struct net_device *dev, int val); +extern int net_os_set_suspend(struct net_device *dev, int val, int force); +extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, + int max, int *bytes_left); + +/* Get the link speed from dongle, speed is in kpbs */ +int wldev_get_link_speed(struct net_device *dev, int *plink_speed); + +int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi); + +int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid); + +int wldev_get_band(struct net_device *dev, uint *pband); +int wldev_get_mode(struct net_device *dev, uint8 *pband); +int wldev_get_datarate(struct net_device *dev, int *datarate); +int wldev_set_band(struct net_device *dev, uint band); + +#endif /* __WLDEV_COMMON_H__ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index cd587325e286..dd6e27513cc1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 4157c90ad973..0d635556f6c5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2841,7 +2841,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg); - struct ieee80211_channel *notify_channel; struct cfg80211_bss *bss; struct ieee80211_supported_band *band; struct brcmu_chan ch; @@ -2851,7 +2850,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, u16 notify_interval; u8 *notify_ie; size_t notify_ielen; - s32 notify_signal; + struct cfg80211_inform_bss bss_data = {}; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { brcmf_err("Bss info is larger than buffer. Discarding\n"); @@ -2871,27 +2870,28 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, band = wiphy->bands[NL80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); - notify_channel = ieee80211_get_channel(wiphy, freq); + bss_data.chan = ieee80211_get_channel(wiphy, freq); + bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; + bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); - notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; + bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100; brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID); brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq); brcmf_dbg(CONN, "Capability: %X\n", notify_capability); brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval); - brcmf_dbg(CONN, "Signal: %d\n", notify_signal); + brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, - CFG80211_BSS_FTYPE_UNKNOWN, - (const u8 *)bi->BSSID, - 0, notify_capability, - notify_interval, notify_ie, - notify_ielen, notify_signal, - GFP_KERNEL); + bss = cfg80211_inform_bss_data(wiphy, &bss_data, + CFG80211_BSS_FTYPE_UNKNOWN, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, GFP_KERNEL); if (!bss) return -ENOMEM; @@ -6916,7 +6916,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, return; /* ignore non-ISO3166 country codes */ - for (i = 0; i < sizeof(req->alpha2); i++) + for (i = 0; i < 2; i++) if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n", req->alpha2[0], req->alpha2[1]); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 2ce675ab40ef..450f2216fac2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) * @dev_addr: optional device address. * * P2P needs mac addresses for P2P device and interface. If no device - * address it specified, these are derived from the primary net device, ie. - * the permanent ethernet address of the device. + * address it specified, these are derived from a random ethernet + * address. */ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) { - struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - bool local_admin = false; + bool random_addr = false; - if (!dev_addr || is_zero_ether_addr(dev_addr)) { - dev_addr = pri_ifp->mac_addr; - local_admin = true; - } + if (!dev_addr || is_zero_ether_addr(dev_addr)) + random_addr = true; - /* Generate the P2P Device Address. This consists of the device's - * primary MAC address with the locally administered bit set. + /* Generate the P2P Device Address obtaining a random ethernet + * address with the locally administered bit set. */ - memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); - if (local_admin) - p2p->dev_addr[0] |= 0x02; + if (random_addr) + eth_random_addr(p2p->dev_addr); + else + memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); /* Generate the P2P Interface Address. If the discovery and connection * BSSCFGs need to simultaneously co-exist, then this address must be diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..eccd25febfe6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2064,7 +2064,7 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt) return head_pad; } -/** +/* * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for * bus layer usage. */ @@ -4096,8 +4096,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, sdio_release_host(sdiodev->func[1]); fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); - device_release_driver(dev); device_release_driver(&sdiodev->func[2]->dev); + device_release_driver(dev); } struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e8b5ff42f5a8..2c80c722feca 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -53,6 +53,7 @@ #include #include "iwl-config.h" #include "iwl-agn-hw.h" +#include "fw/file.h" /* Highest firmware API version supported */ #define IWL9000_UCODE_API_MAX 34 @@ -72,18 +73,21 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" #define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-" #define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" #define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" -#define IWL9000_MODULE_FIRMWARE(api) \ - IWL9000_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9000A_MODULE_FIRMWARE(api) \ + IWL9000A_FW_PRE __stringify(api) ".ucode" +#define IWL9000B_MODULE_FIRMWARE(api) \ + IWL9000B_FW_PRE __stringify(api) ".ucode" #define IWL9000RFB_MODULE_FIRMWARE(api) \ - IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode" + IWL9000RFB_FW_PRE __stringify(api) ".ucode" #define IWL9260A_MODULE_FIRMWARE(api) \ - IWL9260A_FW_PRE "-" __stringify(api) ".ucode" + IWL9260A_FW_PRE __stringify(api) ".ucode" #define IWL9260B_MODULE_FIRMWARE(api) \ - IWL9260B_FW_PRE "-" __stringify(api) ".ucode" + IWL9260B_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_9000 10 @@ -173,6 +177,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwl9260_killer_2ac_cfg = { + .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", + .fw_name_pre = IWL9260A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", .fw_name_pre = IWL9260A_FW_PRE, @@ -193,7 +208,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9460_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9461_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9462_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { @@ -205,10 +261,142 @@ const struct iwl_cfg iwl9560_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9560_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { + .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; -MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index a440140ed8dd..7eade165b747 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -80,15 +80,15 @@ #define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ - IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ - IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 5a40092febfb..3bfc657f6b42 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -531,6 +531,8 @@ struct iwl_scan_config_v1 { } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ #define SCAN_TWO_LMACS 2 +#define SCAN_LB_LMAC_IDX 0 +#define SCAN_HB_LMAC_IDX 1 struct iwl_scan_config { __le32 flags; @@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), + IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), }; /** @@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail { * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved2: for future use and alignment * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan + * @adwell_default_n_aps: for adaptive dwell the default number of APs + * per channel + * @adwell_default_n_aps_social: for adaptive dwell the default + * number of APs per social (1,6,11) channel + * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added + * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs @@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail { * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request * @reserved: for future use and alignment + * @reserved2: for future use and alignment + * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ @@ -651,41 +661,64 @@ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ __le16 general_flags; - u8 reserved2; + u8 reserved; u8 scan_start_mac_id; - u8 extended_dwell; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; union { struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ + struct { + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 reserved3; + __le16 adwell_max_budget; + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved2; + u8 data[]; + } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(u8) - sizeof(__le16)) #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) + 2 * sizeof(__le32) - 2 * sizeof(u8) - \ + sizeof(__le16)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 3721a3ed358b..f824bebceb06 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -211,7 +211,7 @@ enum { * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_START_IMMEDIATELY: start time event immediately * @TE_V2_DEP_OTHER: depends on another time event * @TE_V2_DEP_TSF: depends on a specific time * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC @@ -230,7 +230,7 @@ enum iwl_time_event_policy { TE_V2_NOTIF_HOST_FRAG_END = BIT(5), TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), + TE_V2_START_IMMEDIATELY = BIT(11), /* placement characteristics */ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 87b4434224a1..dfa111bb411e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -68,6 +68,9 @@ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using + * monitor mode. Note this queue is the same as the queue for P2P device + * but we can't have active monitor mode along with P2P device anyway. * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure * that we are never left without the possibility to connect to an AP. @@ -87,6 +90,7 @@ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_CMD_QUEUE = 0, IWL_MVM_DQA_AUX_QUEUE = 1, IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index f5dd7d83cd0a..2fa7ec466275 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -928,7 +930,6 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) out: iwl_fw_free_dump_desc(fwrt); - fwrt->dump.trig = NULL; clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); } IWL_EXPORT_SYMBOL(iwl_fw_error_dump); @@ -1084,6 +1085,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work) fwrt->ops->dump_start(fwrt->ops_ctx)) return; + if (fwrt->ops && fwrt->ops->fw_running && + !fwrt->ops->fw_running(fwrt->ops_ctx)) { + IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); + iwl_fw_free_dump_desc(fwrt); + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); + goto out; + } + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* stop recording */ iwl_fw_dbg_stop_recording(fwrt); @@ -1117,7 +1126,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); } } - +out: if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 9c889a32fe24..72259bff9922 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; + fwrt->dump.trig = NULL; } void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); @@ -209,8 +212,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt) static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { - iwl_fw_dbg_stop_recording(fwrt); - fwrt->dump.conf = FW_DBG_INVALID; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 279248cd9cfb..1b3ad8ef0c79 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -262,6 +262,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ + IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, @@ -433,6 +434,7 @@ enum iwl_fw_phy_cfg { FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, FW_PHY_CFG_RX_CHAIN_POS = 20, FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, + FW_PHY_CFG_SHARED_CLK = BIT(31), }; #define IWL_UCODE_MAX_CS 1 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 1610722b8099..747eef82cefd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -174,7 +176,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, const struct fw_img *image) { - int sec_idx, idx; + int sec_idx, idx, ret; u32 offset = 0; /* @@ -201,17 +203,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, */ if (sec_idx >= image->num_sec - 1) { IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(fwrt); - return -EINVAL; + ret = -EINVAL; + goto err; } /* copy the CSS block to the dram */ IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx); + if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) { + IWL_ERR(fwrt, "CSS block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, - fwrt->fw_paging_db[0].fw_paging_size); + image->sec[sec_idx].len); dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys, fwrt->fw_paging_db[0].fw_paging_size, @@ -232,6 +240,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + if (block->fw_paging_size > image->sec[sec_idx].len - offset) { + IWL_ERR(fwrt, + "Paging: paging size is larger than remaining data in block %d\n", + idx); + ret = -EINVAL; + goto err; + } + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, block->fw_paging_size); @@ -242,19 +258,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", - fwrt->fw_paging_db[idx].fw_paging_size, - idx); + block->fw_paging_size, idx); + + offset += block->fw_paging_size; - offset += fwrt->fw_paging_db[idx].fw_paging_size; + if (offset > image->sec[sec_idx].len) { + IWL_ERR(fwrt, + "Paging: offset goes over section size\n"); + ret = -EINVAL; + goto err; + } } /* copy the last paging block */ if (fwrt->num_of_pages_in_last_blk > 0) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + if (image->sec[sec_idx].len - offset > block->fw_paging_size) { + IWL_ERR(fwrt, + "Paging: last block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); + image->sec[sec_idx].len - offset); dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, @@ -266,6 +295,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, } return 0; + +err: + iwl_free_fw_paging(fwrt); + return ret; } static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 50cfb6d795a5..fb1ad3c5c93c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,7 @@ struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); void (*dump_end)(void *ctx); + bool (*fw_running)(void *ctx); }; #define MAX_NUM_LMAC 2 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 71cb1ecde0f7..70f3c327eb4a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -364,6 +364,7 @@ struct iwl_cfg { u32 dccm2_len; u32 smem_offset; u32 smem_len; + u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; u16 rx_with_siso_diversity:1, @@ -393,6 +394,7 @@ struct iwl_cfg { u8 max_vht_ampdu_exponent; u8 ucode_api_max; u8 ucode_api_min; + u32 extra_phy_cfg_flags; }; /* @@ -468,9 +470,22 @@ extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; +extern const struct iwl_cfg iwl9460_2ac_cfg_soc; +extern const struct iwl_cfg iwl9461_2ac_cfg_soc; +extern const struct iwl_cfg iwl9462_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; +extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index e90abbfba718..ecd5c1df811c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -117,6 +117,7 @@ #define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_ALIGN 0x40 #define FH_RSCSR_RPA_EN BIT(25) +#define FH_RSCSR_RADA_EN BIT(26) #define FH_RSCSR_RXQ_POS 16 #define FH_RSCSR_RXQ_MASK 0x3F0000 @@ -128,7 +129,8 @@ struct iwl_rx_packet { * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request - * 28-26: Reserved + * 28-27: Reserved + * 26: RADA enabled * 25: Offload enabled * 24: RPF enabled * 23: RSS enabled diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index e97904c2c4d4..714996187236 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1209,9 +1211,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, { int ret; - if (!iwl_mvm_firmware_running(mvm)) - return -EIO; - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 83485493a79a..b71a9d11a50f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -435,6 +435,10 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); + + /* set flags extra PHY configuration flags from the device's cfg */ + phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags); + phy_cfg_cmd.calib_control.event_trigger = mvm->fw->default_calib[ucode_type].event_trigger; phy_cfg_cmd.calib_control.flow_trigger = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index a2bf530eeae4..8ba16fc24e3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } /* Allocate the CAB queue for softAP and GO interfaces */ - if (vif->type == NL80211_IFTYPE_AP) { + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). @@ -787,7 +788,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; - u32 tfd_queue_msk = 0; + u32 tfd_queue_msk = BIT(mvm->snif_queue); int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a9ac872226fd..db1fab9aa1c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -2127,15 +2128,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_remove; - ret = iwl_mvm_add_mcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* Send the bcast station. At this stage the TBTT and DTIM time events - * are added and applied to the scheduler */ - ret = iwl_mvm_send_add_bcast_sta(mvm, vif); - if (ret) - goto out_rm_mcast; + /* + * This is not very nice, but the simplest: + * For older FWs adding the mcast sta before the bcast station may + * cause assert 0x2b00. + * This is fixed in later FW so make the order of removal depend on + * the TLV + */ + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) + goto out_unbind; + /* + * Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) { + iwl_mvm_rm_mcast_sta(mvm, vif); + goto out_unbind; + } + } else { + /* + * Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) + goto out_unbind; + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) { + iwl_mvm_send_rm_bcast_sta(mvm, vif); + goto out_unbind; + } + } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2165,7 +2191,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); -out_rm_mcast: iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); @@ -2703,6 +2728,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); + ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { @@ -3468,6 +3497,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ret = 0; goto out; case NL80211_IFTYPE_STATION: + mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ @@ -3511,7 +3541,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { - u32 duration = 2 * vif->bss_conf.beacon_int; + u32 duration = 3 * vif->bss_conf.beacon_int; /* iwl_mvm_protect_session() reads directly from the * device (the system time), so make sure it is @@ -3524,6 +3554,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, /* Protect the session to make sure we hear the first * beacon on the new channel. */ + mvmvif->csa_bcn_pending = true; iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); @@ -3967,6 +3998,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; + mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 949e63418299..736c176f1fd6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -434,6 +434,9 @@ struct iwl_mvm_vif { bool csa_failed; u16 csa_target_freq; + /* Indicates that we are waiting for a beacon on a new channel */ + bool csa_bcn_pending; + /* TCP Checksum Offload */ netdev_features_t features; @@ -954,6 +957,7 @@ struct iwl_mvm { /* Tx queues */ u16 aux_queue; + u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; @@ -1042,6 +1046,7 @@ struct iwl_mvm { * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running + * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1053,6 +1058,7 @@ enum iwl_mvm_status { IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, IWL_MVM_STATUS_FIRMWARE_RUNNING, + IWL_MVM_STATUS_NEED_FLUSH_P2P, }; /* Keep track of completed init configuration */ @@ -1124,6 +1130,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } +static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..54f411b83bea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -553,9 +555,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); } +static bool iwl_mvm_fwrt_fw_running(void *ctx) +{ + return iwl_mvm_firmware_running(ctx); +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, + .fw_running = iwl_mvm_fwrt_fw_running, }; static struct iwl_op_mode * @@ -622,6 +630,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 0fe723ca844e..386fdee23eb0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1881,12 +1881,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, struct rs_rate *rate = &search_tbl->rate; const struct rs_tx_column *column = &rs_tx_columns[col_id]; const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; - u32 sz = (sizeof(struct iwl_scale_tbl_info) - - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); unsigned long rate_mask = 0; u32 rate_idx = 0; - memcpy(search_tbl, tbl, sz); + memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win)); rate->sgi = column->sgi; rate->ant = column->ant; @@ -2692,7 +2690,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band, - struct rs_rate *rate) + struct rs_rate *rate, + bool init) { int i, nentries; unsigned long active_rate; @@ -2746,14 +2745,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, */ if (sta->vht_cap.vht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { - switch (sta->bandwidth) { - case IEEE80211_STA_RX_BW_160: - case IEEE80211_STA_RX_BW_80: - case IEEE80211_STA_RX_BW_40: + /* + * In AP mode, when a new station associates, rs is initialized + * immediately upon association completion, before the phy + * context is updated with the association parameters, so the + * sta bandwidth might be wider than the phy context allows. + * To avoid this issue, always initialize rs with 20mhz + * bandwidth rate, and after authorization, when the phy context + * is already up-to-date, re-init rs with the correct bw. + */ + u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); + + switch (bw) { + case RATE_MCS_CHAN_WIDTH_40: + case RATE_MCS_CHAN_WIDTH_80: + case RATE_MCS_CHAN_WIDTH_160: initial_rates = rs_optimal_rates_vht; nentries = ARRAY_SIZE(rs_optimal_rates_vht); break; - case IEEE80211_STA_RX_BW_20: + case RATE_MCS_CHAN_WIDTH_20: initial_rates = rs_optimal_rates_vht_20mhz; nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); break; @@ -2764,7 +2774,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, active_rate = lq_sta->active_siso_rate; rate->type = LQ_VHT_SISO; - rate->bw = rs_bw_from_sta_bw(sta); + rate->bw = bw; } else if (sta->ht_cap.ht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { initial_rates = rs_optimal_rates_ht; @@ -2846,7 +2856,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - rs_get_initial_rate(mvm, sta, lq_sta, band, rate); + rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 248699c2c4bf..e2196dc35dc6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; + int res; u8 tid, keyidx; u8 pn[IEEE80211_CCMP_PN_LEN]; u8 *extiv; @@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, pn[4] = extiv[1]; pn[5] = extiv[0]; - if (memcmp(pn, ptk_pn->q[queue].pn[tid], - IEEE80211_CCMP_PN_LEN) <= 0) + res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); + if (res < 0) + return -1; + if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) return -1; - if (!(stats->flag & RX_FLAG_AMSDU_MORE)) - memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; @@ -232,8 +234,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, - struct iwl_rx_mpdu_desc *desc, int queue, - u8 *crypt_len) + struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, + int queue, u8 *crypt_len) { u16 status = le16_to_cpu(desc->status); @@ -253,6 +255,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, return -1; stats->flag |= RX_FLAG_DECRYPTED; + if (pkt_flags & FH_RSCSR_RADA_EN) + stats->flag |= RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: @@ -270,6 +274,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; + + if (pkt_flags & FH_RSCSR_RADA_EN) + stats->flag |= RX_FLAG_ICV_STRIPPED; + return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) @@ -304,28 +312,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, } /* - * returns true if a packet outside BA session is a duplicate and - * should be dropped + * returns true if a packet is a duplicate and should be dropped. + * Updates AMSDU PN tracking info */ -static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, - struct ieee80211_rx_status *rx_status, - struct ieee80211_hdr *hdr, - struct iwl_rx_mpdu_desc *desc) +static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, + struct ieee80211_rx_status *rx_status, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_rxq_dup_data *dup_data; - u8 baid, tid, sub_frame_idx; + u8 tid, sub_frame_idx; if (WARN_ON(IS_ERR_OR_NULL(sta))) return false; - baid = (le32_to_cpu(desc->reorder_data) & - IWL_RX_MPDU_REORDER_BAID_MASK) >> - IWL_RX_MPDU_REORDER_BAID_SHIFT; - - if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) - return false; - mvm_sta = iwl_mvm_sta_from_mac80211(sta); dup_data = &mvm_sta->dup_data[queue]; @@ -355,6 +356,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, dup_data->last_sub_frame[tid] >= sub_frame_idx)) return true; + /* Allow same PN as the first subframe for following sub frames */ + if (dup_data->last_seq[tid] == hdr->seq_ctrl && + sub_frame_idx > dup_data->last_sub_frame[tid] && + desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) + rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; + dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_sub_frame[tid] = sub_frame_idx; @@ -810,7 +817,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, queue, &crypt_len)) { + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, + le32_to_cpu(pkt->len_n_flags), queue, + &crypt_len)) { kfree_skb(skb); return; } @@ -921,7 +930,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); - if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { + if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 774122fed454..e4fd476e9ccb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -130,6 +130,19 @@ struct iwl_mvm_scan_params { u32 measurement_dwell; }; +static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) +{ + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + return (void *)&cmd->v7.data; + + if (iwl_mvm_has_new_tx_api(mvm)) + return (void *)&cmd->v6.data; + + return (void *)&cmd->v1.data; +} + static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, { struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + if (params->measurement_dwell) { + cmd->v7.active_dwell = params->measurement_dwell; + cmd->v7.passive_dwell = params->measurement_dwell; + } else { + cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + } + cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + + cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + } + + return; + } + if (params->measurement_dwell) { - cmd->active_dwell = params->measurement_dwell; - cmd->passive_dwell = params->measurement_dwell; - cmd->extended_dwell = params->measurement_dwell; + cmd->v1.active_dwell = params->measurement_dwell; + cmd->v1.passive_dwell = params->measurement_dwell; + cmd->v1.extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; - cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; - cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; + cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; } - cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time); + cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->v6.max_out_time[1] = + cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[1] = + cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); } } else { @@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } - - if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ? - (void *)&cmd->v6.data : (void *)&cmd->v1.data; + void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; @@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + cmd->v7.channel_flags = channel_flags; + cmd->v7.n_channels = params->n_channels; + } else if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.channel_flags = channel_flags; cmd->v6.n_channels = params->n_channels; } else { @@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_has_new_tx_api(mvm)) - base_size = IWL_SCAN_REQ_UMAC_SIZE; + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; + else if (iwl_mvm_has_new_tx_api(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return base_size + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c4a343534c5e..d31d84eebc5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1679,7 +1679,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type) { - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; @@ -1700,29 +1701,29 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) sta->sta_id = IWL_MVM_INVALID_STA; } -static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) +static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, + u8 sta_id, u8 fifo) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? mvm->cfg->base_params->wd_timeout : IWL_WATCHDOG_DISABLED; if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue, - mvm->aux_sta.sta_id, - IWL_MAX_TID_COUNT, - wdg_timeout); - mvm->aux_queue = queue; + int tvqm_queue = + iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, + IWL_MAX_TID_COUNT, + wdg_timeout); + *queue = tvqm_queue; } else { struct iwl_trans_txq_scd_cfg cfg = { - .fifo = IWL_MVM_TX_FIFO_MCAST, - .sta_id = mvm->aux_sta.sta_id, + .fifo = fifo, + .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; - iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, - wdg_timeout); + iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); } } @@ -1741,7 +1742,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) /* Map Aux queue to fifo - needs to happen before adding Aux station */ if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_queue(mvm); + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MVM_TX_FIFO_MCAST); ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0); @@ -1755,7 +1758,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_enable_aux_queue(mvm); + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, + mvm->aux_sta.sta_id, + IWL_MVM_TX_FIFO_MCAST); return 0; } @@ -1763,10 +1768,31 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; lockdep_assert_held(&mvm->mutex); - return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, + + /* Map snif queue to fifo - must happen before adding snif station */ + if (!iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, + mvm->snif_sta.sta_id, + IWL_MVM_TX_FIFO_BE); + + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, mvmvif->id, 0); + if (ret) + return ret; + + /* + * For 22000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + if (iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, + mvm->snif_sta.sta_id, + IWL_MVM_TX_FIFO_BE); + + return 0; } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1775,6 +1801,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, + IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -1996,7 +2024,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = msta->sta_id, - .tid = IWL_MAX_TID_COUNT, + .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; @@ -2009,6 +2037,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; + /* + * In IBSS, ieee80211_check_queues() sets the cab_queue to be + * invalid, so make sure we use the queue we want. + * Note that this is done here as we want to avoid making DQA + * changes in mac80211 layer. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) { + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->cab_queue = vif->cab_queue; + } + /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. @@ -2036,24 +2075,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, msta->sta_id, - IWL_MAX_TID_COUNT, + 0, timeout); mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_STA_TYPE)) { - /* - * In IBSS, ieee80211_check_queues() sets the cab_queue to be - * invalid, so make sure we use the queue we want. - * Note that this is done here as we want to avoid making DQA - * changes in mac80211 layer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) { - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - mvmvif->cab_queue = vif->cab_queue; - } + IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, &cfg, timeout); - } return 0; } @@ -2072,7 +2100,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); + 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -2408,28 +2436,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* * Note the possible cases: - * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed - * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free - * one and mark it as reserved - * 3. In DQA mode, but no traffic yet on this TID: same treatment as in - * non-DQA mode, since the TXQ hasn't yet been allocated - * Don't support case 3 for new TX path as it is not expected to happen - * and aggregation will be offloaded soon anyway + * 1. An enabled TXQ - TXQ needs to become agg'ed + * 2. The TXQ hasn't yet been enabled, so find a free one and mark + * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (txq_id == IWL_MVM_INVALID_QUEUE) { - ret = -ENXIO; - goto release_locks; - } - } else if (unlikely(mvm->queue_info[txq_id].status == - IWL_MVM_QUEUE_SHARED)) { - ret = -ENXIO; - IWL_DEBUG_TX_QUEUES(mvm, - "Can't start tid %d agg on shared queue!\n", - tid); - goto release_locks; - } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); @@ -2438,16 +2450,16 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto release_locks; } - /* - * TXQ shouldn't be in inactive mode for non-DQA, so getting - * an inactive queue from iwl_mvm_find_free_queue() is - * certainly a bug - */ - WARN_ON(mvm->queue_info[txq_id].status == - IWL_MVM_QUEUE_INACTIVE); /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; + } else if (unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED)) { + ret = -ENXIO; + IWL_DEBUG_TX_QUEUES(mvm, + "Can't start tid %d agg on shared queue!\n", + tid); + goto release_locks; } spin_unlock(&mvm->queue_info_lock); @@ -2618,8 +2630,10 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - u16 txq_id) + struct iwl_mvm_tid_data *tid_data) { + u16 txq_id = tid_data->txq_id; + if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2631,8 +2645,10 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; + tid_data->txq_id = IWL_MVM_INVALID_QUEUE; + } spin_unlock_bh(&mvm->queue_info_lock); } @@ -2663,7 +2679,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: @@ -2730,7 +2746,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); @@ -3092,8 +3108,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, int ret, size; u32 status; + /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) - return -EINVAL; + return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); @@ -3154,17 +3171,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } sta_id = mvm_sta->sta_id; - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { - ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, - false); - goto end; - } - /* * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station + * there is a need to retrieve the sta from the local station * table. */ if (!sta) { @@ -3179,6 +3188,17 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; + } else { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + sta_id = mvmvif->mcast_sta.sta_id; + } + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { + ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); + goto end; } /* If the key_offset is not pre-assigned, we need to find a diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 4d0314912e94..342ca1778efd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -132,6 +129,24 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * executed, and a new time event means a new command. */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); + + /* Do the same for the P2P device queue (STA) */ + if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { + struct iwl_mvm_vif *mvmvif; + + /* + * NB: access to this pointer would be racy, but the flush bit + * can only be set when we had a P2P-Device VIF, and we have a + * flush of this work in iwl_mvm_prepare_mac_removal() so it's + * not really racy. + */ + + if (!WARN_ON(!mvm->p2p_device_vif)) { + mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); + iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, + CMD_ASYNC); + } + } } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -185,9 +200,13 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (vif->type != NL80211_IFTYPE_STATION) return false; - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) + + if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && + vif->bss_conf.dtim_period) return false; if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); @@ -331,7 +350,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, te_data->vif, - "No association and the time event is over already..."); + "No beacon heard and the time event is over already..."); break; default: break; @@ -603,7 +622,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); + TE_V2_START_IMMEDIATELY); if (!wait_for_notif) { iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); @@ -796,7 +815,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); + TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } @@ -855,10 +874,12 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - else + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); + } iwl_mvm_roc_finished(mvm); } @@ -904,6 +925,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, time_cmd.interval = cpu_to_le32(1); time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE); + if (!apply_time) + time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..6c014c273922 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; + enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_CCMP_256: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: + type = TX_CMD_SEC_GCMP; + /* Fall through */ + case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ - tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; + tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { - sta_id = mvmvif->bcast_sta.sta_id; + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + sta_id = mvmvif->bcast_sta.sta_id; + else + sta_id = mvmvif->mcast_sta.sta_id; + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); if (queue < 0) @@ -657,7 +664,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { - queue = mvm->aux_queue; + queue = mvm->snif_queue; + sta_id = mvm->snif_sta.sta_id; } } @@ -1871,14 +1879,12 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_sta *mvm_sta = sta; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (internal) - return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id, - BIT(IWL_MGMT_TID), flags); + BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != + offsetof(struct iwl_mvm_sta, sta_id)); + if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, - 0xFF, flags); - } + 0xff | BIT(IWL_MGMT_TID), flags); if (internal) return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 2ea74abad73d..d2cada0ab426 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -603,6 +603,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { + if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) { + IWL_ERR(mvm, + "DEVICE_ENABLED bit is not set. Aborting dump.\n"); + return; + } + iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); if (mvm->error_event_table[1]) @@ -804,12 +810,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; - bool remove_mac_queue = true; + bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; int ret; + if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) + return -EINVAL; + if (iwl_mvm_has_new_tx_api(mvm)) { spin_lock_bh(&mvm->queue_info_lock); - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + spin_unlock_bh(&mvm->queue_info_lock); iwl_trans_txq_free(mvm->trans, queue); @@ -1143,9 +1156,18 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, unsigned int default_timeout = cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { + /* + * We can't know when the station is asleep or awake, so we + * must disable the queue hang detection. + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && + vif && vif->type == NL80211_IFTYPE_AP) + return IWL_WATCHDOG_DISABLED; return iwlmvm_mod_params.tfd_q_hang_detect ? default_timeout : IWL_WATCHDOG_DISABLED; + } trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); txq_timer = (void *)trigger->data; @@ -1172,6 +1194,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, return le32_to_cpu(txq_timer->p2p_go); case NL80211_IFTYPE_P2P_DEVICE: return le32_to_cpu(txq_timer->p2p_device); + case NL80211_IFTYPE_MONITOR: + return default_timeout; default: WARN_ON(1); return mvm->cfg->base_params->wd_timeout; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 858765fed8f8..4cbc6cb8bf89 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -36,6 +37,7 @@ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -465,6 +467,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, @@ -483,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, @@ -508,67 +513,317 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, /* a000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, @@ -576,8 +831,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)}, + #endif /* CONFIG_IWLMVM */ {0} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 4fb7647995c3..9875ab5ce18c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -666,11 +666,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) return index & (q->n_window - 1); } -static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, +static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, struct iwl_txq *txq, int idx) { - return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, - idx); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->cfg->use_tfh) + idx = iwl_pcie_get_cmd_index(txq, idx); + + return txq->tfds + trans_pcie->tfd_size * idx; } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b658..ca99c3cf41c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -901,6 +901,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) } def_rxq = trans_pcie->rxq; + cancel_work_sync(&rba->rx_alloc); + spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index c59f4581e972..ac05fd1e74c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -49,6 +49,7 @@ * *****************************************************************************/ #include "iwl-trans.h" +#include "iwl-prph.h" #include "iwl-context-info.h" #include "internal.h" @@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; + /* Stop dbgc before stopping device */ + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..dffa697d71e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1138,6 +1138,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = true; + /* Stop dbgc before stopping device */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); + } else { + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + } + /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); @@ -1490,14 +1499,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int max_irqs, num_irqs, i, ret, nr_online_cpus; + int max_irqs, num_irqs, i, ret; u16 pci_cmd; if (!trans->cfg->mq_rx_supported) goto enable_msi; - nr_online_cpus = num_online_cpus(); - max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); + max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); for (i = 0; i < max_irqs; i++) trans_pcie->msix_entries[i].entry = i; @@ -1523,16 +1531,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, * Two interrupts less: non rx causes shared with FBQ and RSS. * More than two interrupts: we will use fewer RSS queues. */ - if (num_irqs <= nr_online_cpus) { + if (num_irqs <= max_irqs - 2) { trans_pcie->trans->num_rx_queues = num_irqs + 1; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | IWL_SHARED_IRQ_FIRST_RSS; - } else if (num_irqs == nr_online_cpus + 1) { + } else if (num_irqs == max_irqs - 1) { trans_pcie->trans->num_rx_queues = num_irqs; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; } else { trans_pcie->trans->num_rx_queues = num_irqs - 1; } + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; trans_pcie->msix_enabled = true; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index d74613fcb756..6f45c8148b27 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ @@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) lockdep_assert_held(&txq->lock); iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_pcie_get_tfd(trans_pcie, txq, idx)); + iwl_pcie_get_tfd(trans, txq, idx)); /* free SKB */ if (txq->entries) { @@ -367,11 +365,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, idx); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; bool amsdu; int i, len, tb1_len, tb2_len, hdr_len; @@ -568,8 +564,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index c645d10d3707..4704137a26e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i, num_tbs; - void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); + void *tfd = iwl_pcie_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); @@ -1999,7 +1999,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -2073,7 +2073,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + iwl_pcie_get_tfd(trans, txq, txq->write_ptr), trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); @@ -2406,7 +2406,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); - tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), iwl_pcie_tfd_get_num_tbs(trans, tfd)); diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index d5a3bf91a03e..ab6d39e12069 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; -#ifdef CONFIG_P54_LEDS - p54_unregister_leds(priv); -#endif /* CONFIG_P54_LEDS */ - if (priv->registered) { priv->registered = false; +#ifdef CONFIG_P54_LEDS + p54_unregister_leds(priv); +#endif /* CONFIG_P54_LEDS */ ieee80211_unregister_hw(dev); } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d686ba10fecc 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -727,16 +727,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) val != PS_MANUAL_POLL) return -EINVAL; - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); if (val == PS_MANUAL_POLL) { + if (data->ps != PS_ENABLED) + return -EINVAL; + local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + local_bh_enable(); + return 0; + } + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); @@ -3108,6 +3113,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; + int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; @@ -3118,6 +3124,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); + if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { + GENL_SET_ERR_MSG(info, "too many channels specified"); + return -EINVAL; + } + if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; @@ -3142,12 +3153,16 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); - if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) { + kfree(hwname); return -EINVAL; + } param.regd = hwsim_world_regdom_custom[idx]; } - return mac80211_hwsim_new_radio(info, ¶m); + ret = mac80211_hwsim_new_radio(info, ¶m); + kfree(hwname); + return ret; } static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) @@ -3212,7 +3227,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) continue; - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) { res = -ENOMEM; goto out_err; @@ -3412,8 +3427,11 @@ static void __net_exit hwsim_exit_net(struct net *net) continue; list_del(&data->list); - INIT_WORK(&data->destroy_work, destroy_radio); - schedule_work(&data->destroy_work); + spin_unlock_bh(&hwsim_radio_lock); + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), + NULL); + spin_lock_bh(&hwsim_radio_lock); + } spin_unlock_bh(&hwsim_radio_lock); } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 32c5074da84c..68aa0c7a8139 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype; + if (priv->scan_request) { + mwifiex_dbg(priv->adapter, ERROR, + "change virtual interface: scan in process\n"); + return -EBUSY; + } + switch (curr_iftype) { case NL80211_IFTYPE_ADHOC: switch (type) { diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index cd314946452c..9511f5fe62f4 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2781,7 +2781,10 @@ static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - pci_reset_function(card->dev); + /* We can't afford to wait here; remove() might be waiting on us. If we + * can't grab the device lock, maybe we'll get another chance later. + */ + pci_try_reset_function(card->dev); } static void mwifiex_pcie_work(struct work_struct *work) diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index f4f2b9b27e32..50890cab8807 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -644,6 +644,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) MWIFIEX_FUNC_SHUTDOWN); } + if (adapter->workqueue) + flush_workqueue(adapter->workqueue); + mwifiex_usb_free(card); mwifiex_dbg(adapter, FATAL, diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 0cd68ffc2c74..51ccf10f4413 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, s8 nflr) { struct mwifiex_histogram_data *phist_data = priv->hist_data; + s8 nf = -nflr; + s8 rssi = snr - nflr; atomic_inc(&phist_data->num_samples); atomic_inc(&phist_data->rx_rate[rx_rate]); - atomic_inc(&phist_data->snr[snr]); - atomic_inc(&phist_data->noise_flr[128 + nflr]); - atomic_inc(&phist_data->sig_str[nflr - snr]); + atomic_inc(&phist_data->snr[snr + 128]); + atomic_inc(&phist_data->noise_flr[nf + 128]); + atomic_inc(&phist_data->sig_str[rssi + 128]); } /* function to reset histogram data during init/reset */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 69131965a298..146e42a132e7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -643,11 +643,11 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) { if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, priv->tx_bd_num)) { - pr_err_ratelimited("reclaim full Tx queue\n"); qtnf_pcie_data_tx_reclaim(priv); if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, priv->tx_bd_num)) { + pr_warn_ratelimited("reclaim full Tx queue\n"); priv->tx_full_count++; return 0; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index ecc96312a370..6fe0c6abe0d6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -142,15 +142,25 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, if (!rt2x00dev->ops->hw->set_rts_threshold && (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT))) { - if (rt2x00queue_available(queue) <= 1) - goto exit_fail; + if (rt2x00queue_available(queue) <= 1) { + /* + * Recheck for full queue under lock to avoid race + * conditions with rt2x00lib_txdone(). + */ + spin_lock(&queue->tx_lock); + if (rt2x00queue_threshold(queue)) + rt2x00queue_pause_queue(queue); + spin_unlock(&queue->tx_lock); + + goto exit_free_skb; + } if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) - goto exit_fail; + goto exit_free_skb; } if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) - goto exit_fail; + goto exit_free_skb; /* * Pausing queue has to be serialized with rt2x00lib_txdone(). Note @@ -164,10 +174,6 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, return; - exit_fail: - spin_lock(&queue->tx_lock); - rt2x00queue_pause_queue(queue); - spin_unlock(&queue->tx_lock); exit_free_skb: ieee80211_free_txskb(hw, skb); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index a2c1ca5c76d1..e1660b92b20c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, /* * Determine IFS values - * - Use TXOP_BACKOFF for probe and management frames except beacons + * - Use TXOP_BACKOFF for management frames except beacons * - Use TXOP_SIFS for fragment bursts * - Use TXOP_HTTXOP for everything else * * Note: rt2800 devices won't use CTS protection (if used) * for frames not transmitted with TXOP_HTTXOP */ - if ((ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_beacon(hdr->frame_control)) || - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index e2f4f5778267..086aad22743d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, if (status >= 0) return 0; - if (status == -ENODEV) { + if (status == -ENODEV || status == -ENOENT) { /* Device has disappeared. */ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); break; @@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - if (status == -ENODEV) + if (status == -ENODEV || status == -ENOENT) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); @@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { - if (status == -ENODEV) + if (status == -ENODEV || status == -ENOENT) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 121b94f09714..9a1d15b3ce45 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -1450,6 +1450,7 @@ static int rtl8187_probe(struct usb_interface *intf, goto err_free_dev; } mutex_init(&priv->io_mutex); + mutex_init(&priv->conf_mutex); SET_IEEE80211_DEV(dev, &intf->dev); usb_set_intfdata(intf, dev); @@ -1625,7 +1626,6 @@ static int rtl8187_probe(struct usb_interface *intf, printk(KERN_ERR "rtl8187: Cannot register device\n"); goto err_free_dmabuf; } - mutex_init(&priv->conf_mutex); skb_queue_head_init(&priv->b_tx_status.queue); wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n", diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index ea18aa7afecb..ec82c1c3f12e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -483,18 +483,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) } -void rtl_deinit_deferred_work(struct ieee80211_hw *hw) +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) { struct rtl_priv *rtlpriv = rtl_priv(hw); del_timer_sync(&rtlpriv->works.watchdog_timer); - cancel_delayed_work(&rtlpriv->works.watchdog_wq); - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); - cancel_delayed_work(&rtlpriv->works.ps_work); - cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); - cancel_delayed_work(&rtlpriv->works.fwevt_wq); - cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); + cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); + if (ips_wq) + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + else + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ps_work); + cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq); + cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq); + cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq); } EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); @@ -1664,7 +1667,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw, void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv) { struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - u8 reject_agg, ctrl_agg_size = 0, agg_size; + u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0; if (rtlpriv->cfg->ops->get_btc_status()) btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg, diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index b56d1b7f5567..cbbb5be36a09 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw); void rtl_deinit_rfkill(struct ieee80211_hw *hw); void rtl_watch_dog_timer_callback(unsigned long data); -void rtl_deinit_deferred_work(struct ieee80211_hw *hw); +void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq); bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b5e9877d935c..a9e1239ff21b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -173,16 +173,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) { - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; - - /* override ant_num / ant_path */ - if (mod_params->ant_sel) { - rtlpriv->btcoexist.btc_info.ant_num = - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); - - rtlpriv->btcoexist.btc_info.single_ant_path = - (mod_params->ant_sel == 1 ? 0 : 1); - } return rtlpriv->btcoexist.btc_info.single_ant_path; } @@ -193,7 +183,6 @@ u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) { - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; u8 num; if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) @@ -201,10 +190,6 @@ u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) else num = 1; - /* override ant_num / ant_path */ - if (mod_params->ant_sel) - num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1; - return num; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index f9b87c12db09..ed1725cf091c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -601,6 +601,7 @@ extern struct btc_coexist gl_bt_coexist; bool exhalbtc_initlize_variables(void); bool exhalbtc_bind_bt_coex_withadapter(void *adapter); +void exhalbtc_power_on_setting(struct btc_coexist *btcoexist); void exhalbtc_init_hw_config(struct btc_coexist *btcoexist, bool wifi_only); void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist); void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index 7d296a401b6f..c394e0412982 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -32,6 +32,7 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_init_variables = rtl_btc_init_variables, .btc_init_hal_vars = rtl_btc_init_hal_vars, + .btc_power_on_setting = rtl_btc_power_on_setting, .btc_init_hw_config = rtl_btc_init_hw_config, .btc_ips_notify = rtl_btc_ips_notify, .btc_lps_notify = rtl_btc_lps_notify, @@ -110,6 +111,11 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) */ } +void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv) +{ + exhalbtc_power_on_setting(&gl_bt_coexist); +} + void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) { u8 bt_exist; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index ac1253c46f44..c2735e4fa5d3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -29,6 +29,7 @@ void rtl_btc_init_variables(struct rtl_priv *rtlpriv); void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv); +void rtl_btc_power_on_setting(struct rtl_priv *rtlpriv); void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv); void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type); void rtl_btc_lps_notify(struct rtl_priv *rtlpriv, u8 type); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index c53cbf3d52bd..b01123138797 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -130,7 +130,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context, firmware->size); rtlpriv->rtlhal.wowlan_fwsize = firmware->size; } - rtlpriv->rtlhal.fwsize = firmware->size; release_firmware(firmware); } @@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) /* reset sec info */ rtl_cam_reset_sec_info(hw); - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); } rtlpriv->intf_ops->adapter_stop(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 08dc8919ef60..457a0f725c8a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1568,7 +1568,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) dev_kfree_skb_irq(skb); ring->idx = (ring->idx + 1) % ring->entries; } + + if (rtlpriv->use_new_trx_flow) { + rtlpci->tx_ring[i].cur_tx_rp = 0; + rtlpci->tx_ring[i].cur_tx_wp = 0; + } + ring->idx = 0; + ring->entries = rtlpci->txringcount[i]; } } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); @@ -2352,7 +2359,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } rtlpriv->cfg->ops->disable_interrupt(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 07ee3096f50e..f6d00613c53d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -66,7 +66,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); /*<1> Stop all timer */ - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, true); /*<2> Disable Interrupt */ rtlpriv->cfg->ops->disable_interrupt(hw); @@ -287,7 +287,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; - cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq); spin_lock(&rtlpriv->locks.ips_lock); if (ppsc->inactiveps) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 9cff6bc4049c..cf551785eb08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, writeVal = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; - else if (rtlpriv->dm.dynamic_txhighpower_lvl == - TXHIGHPWRLEVEL_BT2) - writeVal = writeVal; *(p_outwriteval + rf) = writeVal; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 7eae27f8e173..f9563ae301ad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -682,7 +682,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; - + bool rtstatus; u32 totalpacketlen; u8 u1rsvdpageloc[5] = { 0 }; bool b_dlok = false; @@ -768,7 +768,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) skb = dev_alloc_skb(totalpacketlen); skb_put_data(skb, &reserved_page_packet, totalpacketlen); - b_dlok = true; + rtstatus = rtl_cmd_send_packet(hw, skb); + if (rtstatus) + b_dlok = true; if (b_dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 4d47b97adfed..f019eebe41d1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -846,6 +846,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) return false; } + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); + bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); @@ -1123,7 +1126,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) /* Configuration Space offset 0x70f BIT7 is used to control L0S */ tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); - _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); + _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | + ASPM_L1_LATENCY << 3); /* Configuration Space offset 0x719 Bit3 is for L1 * BIT4 is for clock request @@ -2695,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); rtlpriv->btcoexist.btc_info.single_ant_path = - (value & 0x40); /*0xc3[6]*/ + (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/ } else { rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; - rtlpriv->btcoexist.btc_info.single_ant_path = 0; + rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN; } /* override ant_num / ant_path */ if (mod_params->ant_sel) { rtlpriv->btcoexist.btc_info.ant_num = - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); + (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2); rtlpriv->btcoexist.btc_info.single_ant_path = - (mod_params->ant_sel == 1 ? 0 : 1); + (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 1d431d4bf6d2..b82e5b363c05 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) } if (0 == tmp) { read_addr = REG_DBI_RDATA + addr % 4; - ret = rtl_read_word(rtlpriv, read_addr); + ret = rtl_read_byte(rtlpriv, read_addr); } return ret; } @@ -1164,7 +1164,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) } tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); - _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) | + ASPM_L1_LATENCY << 3); tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); @@ -1372,6 +1373,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw) ppsc->wakeup_reason = 0; + do_gettimeofday(&ts); rtlhal->last_suspend_sec = ts.tv_sec; switch (fw_reason) { diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 5590d07d0918..820c42ff5384 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1150,7 +1150,7 @@ void rtl_usb_disconnect(struct usb_interface *intf) ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { - rtl_deinit_deferred_work(hw); + rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } /*deinit rfkill */ diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 1ab1024330fb..c73ce334ce6c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -99,6 +99,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 @@ -2544,6 +2545,7 @@ struct bt_coexist_info { struct rtl_btc_ops { void (*btc_init_variables) (struct rtl_priv *rtlpriv); void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); + void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); @@ -2706,6 +2708,11 @@ enum bt_ant_num { ANT_X1 = 1, }; +enum bt_ant_path { + ANT_MAIN = 0, + ANT_AUX = 1, +}; + enum bt_co_type { BT_2WIRE = 0, BT_ISSC_3WIRE = 1, diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 070dfd68bb83..120b0ff545c1 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -557,28 +557,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, u32 content_size) { struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops; - struct bl_header bl_hdr; + struct bl_header *bl_hdr; u32 write_addr, write_len; int status; - bl_hdr.flags = 0; - bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode); - bl_hdr.check_sum = cpu_to_le32( - *(u32 *)&flash_content[CHECK_SUM_OFFSET]); - bl_hdr.flash_start_address = cpu_to_le32( - *(u32 *)&flash_content[ADDR_OFFSET]); - bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); + bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL); + if (!bl_hdr) + return -ENOMEM; + + bl_hdr->flags = 0; + bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode); + bl_hdr->check_sum = + cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]); + bl_hdr->flash_start_address = + cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]); + bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); write_len = sizeof(struct bl_header); if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) { write_addr = PING_BUFFER_ADDRESS; status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } else { write_addr = PING_BUFFER_ADDRESS >> 16; @@ -587,20 +591,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto fail; } write_addr = RSI_SD_REQUEST_MASTER | (PING_BUFFER_ADDRESS & 0xFFFF); status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } - return 0; + status = 0; +fail: + kfree(bl_hdr); + return status; } static u32 read_flash_capacity(struct rsi_hw *adapter) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 8d3a4839b6ef..0362967874aa 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -161,7 +161,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) int err; struct mmc_card *card = pfunction->card; struct mmc_host *host = card->host; - s32 bit = (fls(host->ocr_avail) - 1); u8 cmd52_resp; u32 clock, resp, i; u16 rca; @@ -181,7 +180,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) msleep(20); /* Initialize the SDIO card */ - host->ios.vdd = bit; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.power_mode = MMC_POWER_UP; @@ -636,11 +634,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, u32 *read_buf, u16 size) { u32 addr_on_bus, *data; - u32 align[2] = {}; u16 ms_addr; int status; - data = PTR_ALIGN(&align[0], 8); + data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); + if (!data) + return -ENOMEM; + + data = PTR_ALIGN(data, 8); ms_addr = (addr >> 16); status = rsi_sdio_master_access_msword(adapter, ms_addr); @@ -648,7 +649,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto err; } addr &= 0xFFFF; @@ -666,7 +667,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, (u8 *)data, 4); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__); - return status; + goto err; } if (size == 2) { if ((addr & 0x3) == 0) @@ -688,17 +689,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, *read_buf = *data; } - return 0; +err: + kfree(data); + return status; } static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, unsigned long addr, unsigned long data, u16 size) { - unsigned long data1[2], *data_aligned; + unsigned long *data_aligned; int status; - data_aligned = PTR_ALIGN(&data1[0], 8); + data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); + if (!data_aligned) + return -ENOMEM; + + data_aligned = PTR_ALIGN(data_aligned, 8); if (size == 2) { *data_aligned = ((data << 16) | (data & 0xFFFF)); @@ -717,6 +724,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); + kfree(data_aligned); return -EIO; } addr = addr & 0xFFFF; @@ -726,12 +734,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, (adapter, (addr | RSI_SD_REQUEST_MASTER), (u8 *)data_aligned, size); - if (status < 0) { + if (status < 0) rsi_dbg(ERR_ZONE, "%s: Unable to do AHB reg write\n", __func__); - return status; - } - return 0; + + kfree(data_aligned); + return status; } /** @@ -960,17 +968,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, /*This function resets and re-initializes the chip.*/ static void rsi_reset_chip(struct rsi_hw *adapter) { - __le32 data; + u8 *data; u8 sdio_interrupt_status = 0; u8 request = 1; int ret; + data = kzalloc(sizeof(u32), GFP_KERNEL); + if (!data) + return; + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to write SDIO wakeup register\n", __func__); - return; + goto err; } msleep(20); ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, @@ -978,7 +990,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter) if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - return; + goto err; } rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", __func__, sdio_interrupt_status); @@ -988,17 +1000,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return; + goto err; } - data = TA_HOLD_THREAD_VALUE; + put_unaligned_le32(TA_HOLD_THREAD_VALUE, data); if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER, - (u8 *)&data, 4)) { + data, 4)) { rsi_dbg(ERR_ZONE, "%s: Unable to hold Thread-Arch processor threads\n", __func__); - return; + goto err; } /* This msleep will ensure Thread-Arch processor to go to hold @@ -1019,6 +1031,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter) * read write operations to complete for chip reset. */ msleep(500); +err: + kfree(data); + return; } /** diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 81df09dd2636..f90c10b3c921 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, u8 *buf; int status = -ENOMEM; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!buf) return status; - if (len > RSI_USB_CTRL_BUF_SIZE) - return -EINVAL; - status = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), USB_VENDOR_REGISTER_READ, @@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, u8 *usb_reg_buf; int status = -ENOMEM; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!usb_reg_buf) return status; - if (len > RSI_USB_CTRL_BUF_SIZE) - return -EINVAL; - usb_reg_buf[0] = (value & 0x00ff); usb_reg_buf[1] = (value & 0xff00) >> 8; usb_reg_buf[2] = 0x0; diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 95e4bed57baf..6788fbbdd166 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -46,6 +46,8 @@ enum sdio_interrupt_type { #define PKT_BUFF_AVAILABLE 1 #define FW_ASSERT_IND 2 +#define RSI_MASTER_REG_BUF_SIZE 12 + #define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3 #define RSI_FN1_INT_REGISTER 0xf9 #define RSI_SD_REQUEST_MASTER 0x10000 @@ -83,7 +85,7 @@ enum sdio_interrupt_type { #define TA_SOFT_RST_CLR 0 #define TA_SOFT_RST_SET BIT(0) #define TA_PC_ZERO 0 -#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF) +#define TA_HOLD_THREAD_VALUE 0xF #define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF) #define TA_BASE_ADDR 0x2200 #define MISC_CFG_BASE_ADDR 0x4105 diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index a52224836a2b..666b88cb2cfe 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, priv->bss_loss_state++; - skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); WARN_ON(!skb); if (skb) cw1200_tx(priv->hw, NULL, skb); @@ -2266,7 +2266,7 @@ static int cw1200_upload_null(struct cw1200_common *priv) .rate = 0xFF, }; - frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false); if (!frame.skb) return -ENOMEM; diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 9915d83a4a30..037defd10b91 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl) size = sizeof(struct wl12xx_null_data_template); ptr = NULL; } else { - skb = ieee80211_nullfunc_get(wl->hw, wl->vif); + skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false); if (!skb) goto out; size = skb->len; @@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; - wl1251_acx_arp_ip_filter(wl, enable, addr); - + ret = wl1251_acx_arp_ip_filter(wl, enable, addr); if (ret < 0) goto out_sleep; } diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 2bfc12fdc929..761cf8573a80 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif) ptr = NULL; } else { skb = ieee80211_nullfunc_get(wl->hw, - wl12xx_wlvif_to_vif(wlvif)); + wl12xx_wlvif_to_vif(wlvif), + false); if (!skb) goto out; size = skb->len; @@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl, struct sk_buff *skb = NULL; int ret = -ENOMEM; - skb = ieee80211_nullfunc_get(wl->hw, vif); + skb = ieee80211_nullfunc_get(wl->hw, vif, false); if (!skb) goto out; diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index 58898b99d3f7..145e10a8be55 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -549,6 +549,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; + /* Disable filtering */ + ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0); + if (ret < 0) + return ret; + ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); if (ret < 0) return ret; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index f8a1fea64e25..219d1a86b92e 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -406,6 +406,11 @@ static int wl1271_suspend(struct device *dev) mmc_pm_flag_t sdio_flags; int ret = 0; + if (!wl) { + dev_err(dev, "no wilink module was probed\n"); + goto out; + } + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", wl->wow_enabled); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8b8689c6d887..d5e790dd589a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -87,6 +87,9 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) +static DECLARE_WAIT_QUEUE_HEAD(module_load_q); +static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); + struct netfront_stats { u64 packets; u64 bytes; @@ -237,7 +240,7 @@ static void rx_refill_timeout(unsigned long data) static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < - (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); + (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) @@ -349,6 +352,9 @@ static int xennet_open(struct net_device *dev) unsigned int i = 0; struct netfront_queue *queue = NULL; + if (!np->queues) + return -ENODEV; + for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_enable(&queue->napi); @@ -785,7 +791,7 @@ static int xennet_get_responses(struct netfront_queue *queue, RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); grant_ref_t ref = xennet_get_rx_ref(queue, cons); - int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); + int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); int slots = 1; int err = 0; unsigned long ret; @@ -888,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { - struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; @@ -897,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; - if (shinfo->nr_frags == MAX_SKB_FRAGS) { + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; @@ -1324,6 +1330,12 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); + xenbus_switch_state(dev, XenbusStateInitialising); + wait_event(module_load_q, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: @@ -1355,18 +1367,8 @@ static int netfront_probe(struct xenbus_device *dev, #ifdef CONFIG_SYSFS info->netdev->sysfs_groups[0] = &xennet_dev_group; #endif - err = register_netdev(info->netdev); - if (err) { - pr_warn("%s: register_netdev err=%d\n", __func__, err); - goto fail; - } return 0; - - fail: - xennet_free_netdev(netdev); - dev_set_drvdata(&dev->dev, NULL); - return err; } static void xennet_end_access(int ref, void *page) @@ -1735,8 +1737,6 @@ static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i; - rtnl_lock(); - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i]; @@ -1745,8 +1745,6 @@ static void xennet_destroy_queues(struct netfront_info *info) netif_napi_del(&queue->napi); } - rtnl_unlock(); - kfree(info->queues); info->queues = NULL; } @@ -1762,8 +1760,6 @@ static int xennet_create_queues(struct netfront_info *info, if (!info->queues) return -ENOMEM; - rtnl_lock(); - for (i = 0; i < *num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; @@ -1772,7 +1768,7 @@ static int xennet_create_queues(struct netfront_info *info, ret = xennet_init_queue(queue); if (ret < 0) { - dev_warn(&info->netdev->dev, + dev_warn(&info->xbdev->dev, "only created %d queues\n", i); *num_queues = i; break; @@ -1786,10 +1782,8 @@ static int xennet_create_queues(struct netfront_info *info, netif_set_real_num_tx_queues(info->netdev, *num_queues); - rtnl_unlock(); - if (*num_queues == 0) { - dev_err(&info->netdev->dev, "no queues\n"); + dev_err(&info->xbdev->dev, "no queues\n"); return -EINVAL; } return 0; @@ -1826,6 +1820,7 @@ static int talk_to_netback(struct xenbus_device *dev, goto out; } + rtnl_lock(); if (info->queues) xennet_destroy_queues(info); @@ -1836,6 +1831,7 @@ static int talk_to_netback(struct xenbus_device *dev, info->queues = NULL; goto out; } + rtnl_unlock(); /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { @@ -1932,8 +1928,10 @@ static int talk_to_netback(struct xenbus_device *dev, xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); + rtnl_lock(); xennet_destroy_queues(info); out: + rtnl_unlock(); device_unregister(&dev->dev); return err; } @@ -1963,6 +1961,15 @@ static int xennet_connect(struct net_device *dev) netdev_update_features(dev); rtnl_unlock(); + if (dev->reg_state == NETREG_UNINITIALIZED) { + err = register_netdev(dev); + if (err) { + pr_warn("%s: register_netdev err=%d\n", __func__, err); + device_unregister(&np->xbdev->dev); + return err; + } + } + /* * All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver @@ -2005,7 +2012,10 @@ static void netback_changed(struct xenbus_device *dev, case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: + break; + case XenbusStateUnknown: + wake_up_all(&module_unload_q); break; case XenbusStateInitWait: @@ -2021,10 +2031,12 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateClosed: + wake_up_all(&module_unload_q); if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: + wake_up_all(&module_unload_q); xenbus_frontend_closed(dev); break; } @@ -2130,12 +2142,32 @@ static int xennet_remove(struct xenbus_device *dev) dev_dbg(&dev->dev, "%s\n", dev->nodename); + if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + xenbus_switch_state(dev, XenbusStateClosing); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown); + + xenbus_switch_state(dev, XenbusStateClosed); + wait_event(module_unload_q, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown); + } + xennet_disconnect_backend(info); - unregister_netdev(info->netdev); + if (info->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(info->netdev); - if (info->queues) + if (info->queues) { + rtnl_lock(); xennet_destroy_queues(info); + rtnl_unlock(); + } xennet_free_netdev(info->netdev); return 0; diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index c4da50e07bbc..08a4f82a2965 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -176,6 +176,16 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb) /* Packet that contains a length */ if (tmp[0] == 0 && tmp[1] == 0) { phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3; + /* + * Ensure next_read_size does not exceed sizeof(tmp) + * for reading that many bytes during next iteration + */ + if (phy->next_read_size > FDP_NCI_I2C_MAX_PAYLOAD) { + dev_dbg(&client->dev, "%s: corrupted packet\n", + __func__); + phy->next_read_size = 5; + goto flush; + } } else { phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index e153e8b64bb8..d5553c47014f 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -62,6 +62,9 @@ struct pn533_usb_phy { struct urb *out_urb; struct urb *in_urb; + struct urb *ack_urb; + u8 *ack_buffer; + struct pn533 *priv; }; @@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) struct pn533_usb_phy *phy = dev->phy; static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ - int rc; - phy->out_urb->transfer_buffer = (u8 *)ack; - phy->out_urb->transfer_buffer_length = sizeof(ack); - rc = usb_submit_urb(phy->out_urb, flags); + if (!phy->ack_buffer) { + phy->ack_buffer = kmemdup(ack, sizeof(ack), flags); + if (!phy->ack_buffer) + return -ENOMEM; + } - return rc; + phy->ack_urb->transfer_buffer = phy->ack_buffer; + phy->ack_urb->transfer_buffer_length = sizeof(ack); + return usb_submit_urb(phy->ack_urb, flags); } static int pn533_usb_send_frame(struct pn533 *dev, @@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) /* Power on th reader (CCID cmd) */ u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON, 0, 0, 0, 0, 0, 0, 3, 0, 0}; + char *buffer; + int transferred; int rc; void *cntx; struct pn533_acr122_poweron_rdr_arg arg; dev_dbg(&phy->udev->dev, "%s\n", __func__); + buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL); + if (!buffer) + return -ENOMEM; + init_completion(&arg.done); cntx = phy->in_urb->context; /* backup context */ phy->in_urb->complete = pn533_acr122_poweron_rdr_resp; phy->in_urb->context = &arg; - phy->out_urb->transfer_buffer = cmd; - phy->out_urb->transfer_buffer_length = sizeof(cmd); - print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, cmd, sizeof(cmd), false); - rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); - if (rc) { + rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), + &transferred, 0); + kfree(buffer); + if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, "Reader power on cmd error %d\n", rc); return rc; @@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface, phy->in_urb = usb_alloc_urb(0, GFP_KERNEL); phy->out_urb = usb_alloc_urb(0, GFP_KERNEL); + phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!phy->in_urb || !phy->out_urb) + if (!phy->in_urb || !phy->out_urb || !phy->ack_urb) goto error; usb_fill_bulk_urb(phy->in_urb, phy->udev, @@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface, usb_fill_bulk_urb(phy->out_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), NULL, 0, pn533_send_complete, phy); - + usb_fill_bulk_urb(phy->ack_urb, phy->udev, + usb_sndbulkpipe(phy->udev, out_endpoint), + NULL, 0, pn533_send_complete, phy); switch (id->driver_info) { case PN533_DEVICE_STD: @@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface, error: usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); + usb_free_urb(phy->ack_urb); usb_put_dev(phy->udev); kfree(in_buf); @@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface *interface) usb_kill_urb(phy->in_urb); usb_kill_urb(phy->out_urb); + usb_kill_urb(phy->ack_urb); kfree(phy->in_urb->transfer_buffer); usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); + usb_free_urb(phy->ack_urb); + kfree(phy->ack_buffer); nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); } diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index fd08be2917e6..3420c5104c94 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -217,7 +217,8 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, atr_req = (struct st21nfca_atr_req *)skb->data; - if (atr_req->length < sizeof(struct st21nfca_atr_req)) { + if (atr_req->length < sizeof(struct st21nfca_atr_req) || + atr_req->length > skb->len) { r = -EPROTO; goto exit; } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 3a98563d4a12..6e84e120150d 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -320,23 +320,33 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, * AID 81 5 to 16 * PARAMETERS 82 0 to 255 */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && + if (skb->len < NFC_MIN_AID_LENGTH + 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; + /* + * Buffer should have enough space for at least + * two tag fields + two length fields + aid_len (skb->data[1]) + */ + if (skb->len < skb->data[1] + 4) + return -EPROTO; + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); transaction->aid_len = skb->data[1]; memcpy(transaction->aid, &skb->data[2], transaction->aid_len); + transaction->params_len = skb->data[transaction->aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ + /* Check next byte is PARAMETERS tag (82) and the length field */ if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + NFC_EVT_TRANSACTION_PARAMS_TAG || + skb->len < transaction->aid_len + transaction->params_len + 4) { + devm_kfree(dev, transaction); return -EPROTO; + } - transaction->params_len = skb->data[transaction->aid_len + 3]; memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index f58d8e305323..18339b7e88a4 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -998,6 +998,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, mw_base = nt->mw_vec[mw_num].phys_addr; mw_size = nt->mw_vec[mw_num].phys_size; + if (max_mw_size && mw_size > max_mw_size) + mw_size = max_mw_size; + tx_size = (unsigned int)mw_size / num_qps_mw; qp_offset = tx_size * (qp_num / mw_count); diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..1bd7b3734751 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) disk->queue = q; disk->flags = GENHD_FL_EXT_DEVT; nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); - set_capacity(disk, 0); - device_add_disk(dev, disk); if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) return -ENOMEM; @@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) } set_capacity(disk, available_disk_size >> SECTOR_SHIFT); + device_add_disk(dev, disk); revalidate_disk(disk); return 0; } diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index d5612bd1cc81..b2feda35966b 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -210,12 +210,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, return ret; } -static int btt_log_read_pair(struct arena_info *arena, u32 lane, - struct log_entry *ent) +static int btt_log_group_read(struct arena_info *arena, u32 lane, + struct log_group *log) { return arena_read_bytes(arena, - arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, - 2 * LOG_ENT_SIZE, 0); + arena->logoff + (lane * LOG_GRP_SIZE), log, + LOG_GRP_SIZE, 0); } static struct dentry *debugfs_root; @@ -255,6 +255,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); debugfs_create_x32("flags", S_IRUGO, d, &a->flags); + debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); + debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); } static void btt_debugfs_init(struct btt *btt) @@ -273,6 +275,11 @@ static void btt_debugfs_init(struct btt *btt) } } +static u32 log_seq(struct log_group *log, int log_idx) +{ + return le32_to_cpu(log->ent[log_idx].seq); +} + /* * This function accepts two log entries, and uses the * sequence number to find the 'older' entry. @@ -282,8 +289,10 @@ static void btt_debugfs_init(struct btt *btt) * * TODO The logic feels a bit kludge-y. make it better.. */ -static int btt_log_get_old(struct log_entry *ent) +static int btt_log_get_old(struct arena_info *a, struct log_group *log) { + int idx0 = a->log_index[0]; + int idx1 = a->log_index[1]; int old; /* @@ -291,23 +300,23 @@ static int btt_log_get_old(struct log_entry *ent) * the next time, the following logic works out to put this * (next) entry into [1] */ - if (ent[0].seq == 0) { - ent[0].seq = cpu_to_le32(1); + if (log_seq(log, idx0) == 0) { + log->ent[idx0].seq = cpu_to_le32(1); return 0; } - if (ent[0].seq == ent[1].seq) + if (log_seq(log, idx0) == log_seq(log, idx1)) return -EINVAL; - if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) + if (log_seq(log, idx0) + log_seq(log, idx1) > 5) return -EINVAL; - if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { - if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) + if (log_seq(log, idx0) < log_seq(log, idx1)) { + if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) old = 0; else old = 1; } else { - if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) + if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) old = 1; else old = 0; @@ -327,17 +336,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane, { int ret; int old_ent, ret_ent; - struct log_entry log[2]; + struct log_group log; - ret = btt_log_read_pair(arena, lane, log); + ret = btt_log_group_read(arena, lane, &log); if (ret) return -EIO; - old_ent = btt_log_get_old(log); + old_ent = btt_log_get_old(arena, &log); if (old_ent < 0 || old_ent > 1) { dev_err(to_dev(arena), "log corruption (%d): lane %d seq [%d, %d]\n", - old_ent, lane, log[0].seq, log[1].seq); + old_ent, lane, log.ent[arena->log_index[0]].seq, + log.ent[arena->log_index[1]].seq); /* TODO set error state? */ return -EIO; } @@ -345,7 +355,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, ret_ent = (old_flag ? old_ent : (1 - old_ent)); if (ent != NULL) - memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); + memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); return ret_ent; } @@ -359,17 +369,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent, unsigned long flags) { int ret; - /* - * Ignore the padding in log_entry for calculating log_half. - * The entry is 'committed' when we write the sequence number, - * and we want to ensure that that is the last thing written. - * We don't bother writing the padding as that would be extra - * media wear and write amplification - */ - unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2; - u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE); + u32 group_slot = arena->log_index[sub]; + unsigned int log_half = LOG_ENT_SIZE / 2; void *src = ent; + u64 ns_off; + ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + + (group_slot * LOG_ENT_SIZE); /* split the 16B write into atomic, durable halves */ ret = arena_write_bytes(arena, ns_off, src, log_half, flags); if (ret) @@ -452,7 +458,7 @@ static int btt_log_init(struct arena_info *arena) { size_t logsize = arena->info2off - arena->logoff; size_t chunk_size = SZ_4K, offset = 0; - struct log_entry log; + struct log_entry ent; void *zerobuf; int ret; u32 i; @@ -484,11 +490,11 @@ static int btt_log_init(struct arena_info *arena) } for (i = 0; i < arena->nfree; i++) { - log.lba = cpu_to_le32(i); - log.old_map = cpu_to_le32(arena->external_nlba + i); - log.new_map = cpu_to_le32(arena->external_nlba + i); - log.seq = cpu_to_le32(LOG_SEQ_INIT); - ret = __btt_log_write(arena, i, 0, &log, 0); + ent.lba = cpu_to_le32(i); + ent.old_map = cpu_to_le32(arena->external_nlba + i); + ent.new_map = cpu_to_le32(arena->external_nlba + i); + ent.seq = cpu_to_le32(LOG_SEQ_INIT); + ret = __btt_log_write(arena, i, 0, &ent, 0); if (ret) goto free; } @@ -593,6 +599,123 @@ static int btt_freelist_init(struct arena_info *arena) return 0; } +static bool ent_is_padding(struct log_entry *ent) +{ + return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) + && (ent->seq == 0); +} + +/* + * Detecting valid log indices: We read a log group (see the comments in btt.h + * for a description of a 'log_group' and its 'slots'), and iterate over its + * four slots. We expect that a padding slot will be all-zeroes, and use this + * to detect a padding slot vs. an actual entry. + * + * If a log_group is in the initial state, i.e. hasn't been used since the + * creation of this BTT layout, it will have three of the four slots with + * zeroes. We skip over these log_groups for the detection of log_index. If + * all log_groups are in the initial state (i.e. the BTT has never been + * written to), it is safe to assume the 'new format' of log entries in slots + * (0, 1). + */ +static int log_set_indices(struct arena_info *arena) +{ + bool idx_set = false, initial_state = true; + int ret, log_index[2] = {-1, -1}; + u32 i, j, next_idx = 0; + struct log_group log; + u32 pad_count = 0; + + for (i = 0; i < arena->nfree; i++) { + ret = btt_log_group_read(arena, i, &log); + if (ret < 0) + return ret; + + for (j = 0; j < 4; j++) { + if (!idx_set) { + if (ent_is_padding(&log.ent[j])) { + pad_count++; + continue; + } else { + /* Skip if index has been recorded */ + if ((next_idx == 1) && + (j == log_index[0])) + continue; + /* valid entry, record index */ + log_index[next_idx] = j; + next_idx++; + } + if (next_idx == 2) { + /* two valid entries found */ + idx_set = true; + } else if (next_idx > 2) { + /* too many valid indices */ + return -ENXIO; + } + } else { + /* + * once the indices have been set, just verify + * that all subsequent log groups are either in + * their initial state or follow the same + * indices. + */ + if (j == log_index[0]) { + /* entry must be 'valid' */ + if (ent_is_padding(&log.ent[j])) + return -ENXIO; + } else if (j == log_index[1]) { + ; + /* + * log_index[1] can be padding if the + * lane never got used and it is still + * in the initial state (three 'padding' + * entries) + */ + } else { + /* entry must be invalid (padding) */ + if (!ent_is_padding(&log.ent[j])) + return -ENXIO; + } + } + } + /* + * If any of the log_groups have more than one valid, + * non-padding entry, then the we are no longer in the + * initial_state + */ + if (pad_count < 3) + initial_state = false; + pad_count = 0; + } + + if (!initial_state && !idx_set) + return -ENXIO; + + /* + * If all the entries in the log were in the initial state, + * assume new padding scheme + */ + if (initial_state) + log_index[1] = 1; + + /* + * Only allow the known permutations of log/padding indices, + * i.e. (0, 1), and (0, 2) + */ + if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2))) + ; /* known index possibilities */ + else { + dev_err(to_dev(arena), "Found an unknown padding scheme\n"); + return -ENXIO; + } + + arena->log_index[0] = log_index[0]; + arena->log_index[1] = log_index[1]; + dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); + dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); + return 0; +} + static int btt_rtt_init(struct arena_info *arena) { arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); @@ -649,8 +772,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, available -= 2 * BTT_PG_SIZE; /* The log takes a fixed amount of space based on nfree */ - logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), - BTT_PG_SIZE); + logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); available -= logsize; /* Calculate optimal split between map and data area */ @@ -667,6 +789,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, arena->mapoff = arena->dataoff + datasize; arena->logoff = arena->mapoff + mapsize; arena->info2off = arena->logoff + logsize; + + /* Default log indices are (0,1) */ + arena->log_index[0] = 0; + arena->log_index[1] = 1; return arena; } @@ -757,6 +883,13 @@ static int discover_arenas(struct btt *btt) arena->external_lba_start = cur_nlba; parse_arena_meta(arena, super, cur_off); + ret = log_set_indices(arena); + if (ret) { + dev_err(to_dev(arena), + "Unable to deduce log/padding indices\n"); + goto out; + } + mutex_init(&arena->err_lock); ret = btt_freelist_init(arena); if (ret) @@ -1409,8 +1542,6 @@ static int btt_blk_init(struct btt *btt) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); btt->btt_queue->queuedata = btt; - set_capacity(btt->btt_disk, 0); - device_add_disk(&btt->nd_btt->dev, btt->btt_disk); if (btt_meta_size(btt)) { int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); @@ -1422,6 +1553,7 @@ static int btt_blk_init(struct btt *btt) } } set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); + device_add_disk(&btt->nd_btt->dev, btt->btt_disk); btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; revalidate_disk(btt->btt_disk); diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 578c2057524d..2609683c4167 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -27,6 +27,7 @@ #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT) #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT))) #define MAP_ENT_NORMAL 0xC0000000 +#define LOG_GRP_SIZE sizeof(struct log_group) #define LOG_ENT_SIZE sizeof(struct log_entry) #define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */ #define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ @@ -50,12 +51,52 @@ enum btt_init_state { INIT_READY }; +/* + * A log group represents one log 'lane', and consists of four log entries. + * Two of the four entries are valid entries, and the remaining two are + * padding. Due to an old bug in the padding location, we need to perform a + * test to determine the padding scheme being used, and use that scheme + * thereafter. + * + * In kernels prior to 4.15, 'log group' would have actual log entries at + * indices (0, 2) and padding at indices (1, 3), where as the correct/updated + * format has log entries at indices (0, 1) and padding at indices (2, 3). + * + * Old (pre 4.15) format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------+-----------------+ + * + * New format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | lba/old/new/seq | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | pad | pad | + * +-----------------+-----------------+ + * + * We detect during start-up which format is in use, and set + * arena->log_index[(0, 1)] with the detected format. + */ + struct log_entry { __le32 lba; __le32 old_map; __le32 new_map; __le32 seq; - __le64 padding[2]; +}; + +struct log_group { + struct log_entry ent[4]; }; struct btt_sb { @@ -125,6 +166,7 @@ struct aligned_lock { * @list: List head for list of arenas * @debugfs_dir: Debugfs dentry * @flags: Arena flags - may signify error states. + * @log_index: Indices of the valid log entries in a log_group * * arena_info is a per-arena handle. Once an arena is narrowed down for an * IO, this struct is passed around for the duration of the IO. @@ -157,6 +199,7 @@ struct arena_info { /* Arena flags */ u32 flags; struct mutex err_lock; + int log_index[2]; }; /** diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index baf283986a7e..2fffd42767c7 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -565,14 +565,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk)->parent; struct nd_region *nd_region = to_nd_region(dev->parent); - const char *pol = nd_region->ro ? "only" : "write"; + int disk_ro = get_disk_ro(disk); - if (nd_region->ro == get_disk_ro(disk)) + /* + * Upgrade to read-only if the region is read-only preserve as + * read-only if the disk is already read-only. + */ + if (disk_ro || nd_region->ro == disk_ro) return 0; - dev_info(dev, "%s read-%s, marking %s read-%s\n", - dev_name(&nd_region->dev), pol, disk->disk_name, pol); - set_disk_ro(disk, nd_region->ro); + dev_info(dev, "%s read-only, marking %s read-only\n", + dev_name(&nd_region->dev), disk->disk_name); + set_disk_ro(disk, 1); return 0; diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index e0f0e3ce1a32..0939f064054d 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -65,9 +65,12 @@ static int nvdimm_probe(struct device *dev) ndd->ns_next = nd_label_next_nsindex(ndd->ns_current); nd_label_copy(ndd, to_next_namespace_index(ndd), to_current_namespace_index(ndd)); - rc = nd_label_reserve_dpa(ndd); - if (ndd->ns_current >= 0) - nvdimm_set_aliasing(dev); + if (ndd->ns_current >= 0) { + rc = nd_label_reserve_dpa(ndd); + if (rc == 0) + nvdimm_set_aliasing(dev); + } + nvdimm_clear_locked(dev); nvdimm_bus_unlock(dev); if (rc) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index f0d1b7e5de01..5f1385b96b13 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev) set_bit(NDD_LOCKED, &nvdimm->flags); } +void nvdimm_clear_locked(struct device *dev) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + + clear_bit(NDD_LOCKED, &nvdimm->flags); +} + static void nvdimm_release(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 9c5f108910e3..de66c02f6140 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels) nsindex = to_namespace_index(ndd, 0); memset(nsindex, 0, ndd->nsarea.config_size); for (i = 0; i < 2; i++) { - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT); + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT); if (rc) return rc; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 3e4d1e7998da..228bafa4d322 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj, if (a == &dev_attr_resource.attr) { if (is_namespace_blk(dev)) return 0; - return a->mode; + return 0400; } if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { @@ -1926,7 +1926,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, } if (i < nd_region->ndr_mappings) { - struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]); + struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm; /* * Give up if we don't find an instance of a uuid at each @@ -1934,7 +1934,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region, * find a dimm with two instances of the same uuid. */ dev_err(&nd_region->dev, "%s missing label for %pUb\n", - dev_name(ndd->dev), nd_label->uuid); + nvdimm_name(nvdimm), nd_label->uuid); rc = -EINVAL; goto err; } diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 9c758a91372b..156be00e1f76 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -254,6 +254,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, unsigned int len); void nvdimm_set_aliasing(struct device *dev); void nvdimm_set_locked(struct device *dev); +void nvdimm_clear_locked(struct device *dev); struct nd_btt *to_nd_btt(struct device *dev); struct nd_gen_sb { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 9576c444f0ab..2adada1a5855 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = { NULL, }; +static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n) +{ + if (a == &dev_attr_resource.attr) + return 0400; + return a->mode; +} + struct attribute_group nd_pfn_attribute_group = { .attrs = nd_pfn_attributes, + .is_visible = pfn_visible, }; static const struct attribute_group *nd_pfn_attribute_groups[] = { @@ -356,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; - unsigned long align; enum nd_pfn_mode mode; struct nd_namespace_io *nsio; + unsigned long align, start_pad; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_common *ndns = nd_pfn->ndns; const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); @@ -402,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) align = le32_to_cpu(pfn_sb->align); offset = le64_to_cpu(pfn_sb->dataoff); + start_pad = le32_to_cpu(pfn_sb->start_pad); if (align == 0) align = 1UL << ilog2(offset); mode = le32_to_cpu(pfn_sb->mode); @@ -460,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) return -EBUSY; } - if ((align && !IS_ALIGNED(offset, align)) + if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled align: %#lx\n", @@ -574,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, return altmap; } +static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) +{ + return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys), + ALIGN_DOWN(phys, nd_pfn->align)); +} + static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; @@ -629,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) start = nsio->res.start; size = PHYS_SECTION_ALIGN_UP(start + size) - start; if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED) { + IORES_DESC_NONE) == REGION_MIXED + || !IS_ALIGNED(start + resource_size(&nsio->res), + nd_pfn->align)) { size = resource_size(&nsio->res); - end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); + end_trunc = start + size - phys_pmem_align_down(nd_pfn, + start + size); } if (start_pad + end_trunc) - dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", + dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); /* diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 829d760f651c..abaf38c61220 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) return 0; - if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr) - return 0; + if (a == &dev_attr_resource.attr) { + if (is_nd_pmem(dev)) + return 0400; + else + return 0; + } if (a == &dev_attr_deep_flush.attr) { int has_flush = nvdimm_has_flush(nd_region); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 46d6cb1e03bd..8f845de8a8a2 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -18,7 +18,7 @@ config NVME_FABRICS config NVME_RDMA tristate "NVM Express over Fabrics RDMA host driver" - depends on INFINIBAND && BLOCK + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK select NVME_CORE select NVME_FABRICS select SG_POOL diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 37f9039bb9ca..92ec3d2ef0c0 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -11,7 +11,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ - +#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__ +#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt) #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include "nvme.h" #include "fabrics.h" @@ -665,6 +667,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ret = PTR_ERR(meta); goto out_unmap; } + req->cmd_flags |= REQ_INTEGRITY; } } @@ -1356,6 +1359,44 @@ static const struct pr_ops nvme_pr_ops = { .pr_clear = nvme_pr_clear, }; +static int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len) +{ + struct nvme_command cmd; + + dev_err(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", + __func__, nssf, spsp, secp, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.opcode = nvme_admin_security_send; + cmd.common.nsid = 0; + cmd.common.cdw10[0] = + cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); + cmd.common.cdw10[1] = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); +} + +static int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len) +{ + struct nvme_command cmd; + + dev_err(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", + __func__, nssf, spsp, secp, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.opcode = nvme_admin_security_recv; + cmd.common.nsid = 0; + cmd.common.cdw10[0] = + cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); + cmd.common.cdw10[1] = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); +} + #ifdef CONFIG_BLK_SED_OPAL int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) @@ -1515,7 +1556,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); } - if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) + if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && + is_power_of_2(ctrl->max_hw_sectors)) blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); blk_queue_virt_boundary(q, ctrl->page_size - 1); if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) @@ -1684,6 +1726,122 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val) } } +#define NVME_SECP_RPMB 0xEA /* Security Protocol EAh is assigned + * for NVMe use (refer to ACS-4) + */ +#define NVME_SPSP_RPMB 0x0001 /* RPMB Target */ +static int nvme_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct nvme_ctrl *ctrl; + struct rpmb_cmd *cmd; + u32 size; + int ret; + int i; + + ctrl = dev_get_drvdata(dev); + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + size = rpmb_ioc_frames_len_nvme(cmd->nframes); + if (cmd->flags & RPMB_F_WRITE) + ret = nvme_sec_send(ctrl, target, + NVME_SPSP_RPMB, NVME_SECP_RPMB, + cmd->frames, size); + else + ret = nvme_sec_recv(ctrl, target, + NVME_SPSP_RPMB, NVME_SECP_RPMB, + cmd->frames, size); + } + + return ret; +} + +static int nvme_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct nvme_ctrl *ctrl; + + ctrl = dev_get_drvdata(dev); + + return ((ctrl->rpmbs >> 16) & 0xFF) + 1; +} + +static struct rpmb_ops nvme_rpmb_dev_ops = { + .cmd_seq = nvme_rpmb_cmd_seq, + .get_capacity = nvme_rpmb_get_capacity, + .type = RPMB_TYPE_NVME, +}; + +static void nvme_rpmb_set_cap(struct nvme_ctrl *ctrl, + struct rpmb_ops *ops) +{ + ops->wr_cnt_max = ((ctrl->rpmbs >> 24) & 0xFF) + 1; + ops->rd_cnt_max = ops->wr_cnt_max; + ops->block_size = 2; /* 1 sector == 2 half sectors */ + ops->auth_method = (ctrl->rpmbs >> 3) & 0x3; +} + +static void nvme_rpmb_add(struct nvme_ctrl *ctrl) +{ + struct rpmb_dev *rdev; + int ndevs = ctrl->rpmbs & 0x7; + int i; + + nvme_rpmb_set_cap(ctrl, &nvme_rpmb_dev_ops); + + /* Add RPMB partitions */ + for (i = 0; i < ndevs; i++) { + rdev = rpmb_dev_register(ctrl->device, i, &nvme_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(ctrl->device, "%s: cannot register to rpmb %ld\n", + dev_name(ctrl->device), PTR_ERR(rdev)); + } + dev_set_drvdata(&rdev->dev, ctrl); + } +} + +static void nvme_rpmb_remove(struct nvme_ctrl *ctrl) +{ + int ndevs = ctrl->rpmbs & 0x7; + int i; + + /* FIXME: target */ + for (i = 0; i < ndevs; i++) + rpmb_dev_unregister_by_device(ctrl->device, i); +} + +int nvme_init_rpmb(struct nvme_ctrl *ctrl) +{ + dev_err(ctrl->device, "RPMBS %X\n", ctrl->rpmbs); + + if ((ctrl->rpmbs & 0x7) == 0x0) { + dev_err(ctrl->device, "RPMBS No partitions\n"); + return 0; + } + + dev_err(ctrl->device, "RPMBS Number of partitions %d\n", + ctrl->rpmbs & 0x7); + dev_err(ctrl->device, "RPMBS Authentication Method: %d\n", + (ctrl->rpmbs >> 3) & 0x3); + dev_err(ctrl->device, "RPMBS Total Size: %d %dK", + (ctrl->rpmbs >> 16) & 0xFF, + (((ctrl->rpmbs >> 16) & 0xFF) + 1) * 128); + dev_err(ctrl->device, "RPMBS Access Size: %d %dB", + (ctrl->rpmbs >> 24) & 0xFF, + (((ctrl->rpmbs >> 24) & 0xFF) + 1) * 512); + + nvme_rpmb_add(ctrl); + + return 0; +} +EXPORT_SYMBOL_GPL(nvme_init_rpmb); + +void nvme_exit_rpmb(struct nvme_ctrl *ctrl) +{ + nvme_rpmb_remove(ctrl); +} +EXPORT_SYMBOL_GPL(nvme_exit_rpmb); + struct nvme_core_quirk_entry { /* * NVMe model and firmware strings are padded with spaces. For @@ -1900,6 +2058,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } + ctrl->rpmbs = le32_to_cpu(id->rpmbs); + dev_info(ctrl->device, "RPMBS=%08X\n", ctrl->rpmbs); + kfree(id); if (ctrl->apst_enabled && !prev_apst_enabled) @@ -2299,7 +2460,8 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->ns_id == nsid) { - kref_get(&ns->kref); + if (!kref_get_unless_zero(&ns->kref)) + continue; ret = ns; break; } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 555c976cc2ee..33d060c524e6 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void) return NULL; kref_init(&host->ref); + uuid_gen(&host->id); snprintf(host->nqn, NVMF_NQN_SIZE, "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); @@ -586,6 +587,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->transport); opts->transport = p; break; case NVMF_OPT_NQN: @@ -594,6 +596,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->subsysnqn); opts->subsysnqn = p; nqnlen = strlen(opts->subsysnqn); if (nqnlen >= NVMF_NQN_SIZE) { @@ -605,8 +608,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->discovery_nqn = !(strcmp(opts->subsysnqn, NVME_DISC_SUBSYS_NAME)); - if (opts->discovery_nqn) + if (opts->discovery_nqn) { + opts->kato = 0; opts->nr_io_queues = 0; + } break; case NVMF_OPT_TRADDR: p = match_strdup(args); @@ -614,6 +619,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->traddr); opts->traddr = p; break; case NVMF_OPT_TRSVCID: @@ -622,6 +628,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->trsvcid); opts->trsvcid = p; break; case NVMF_OPT_QUEUE_SIZE: @@ -703,6 +710,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -EINVAL; goto out; } + nvmf_host_put(opts->host); opts->host = nvmf_host_add(p); kfree(p); if (!opts->host) { @@ -728,6 +736,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->host_traddr); opts->host_traddr = p; break; case NVMF_OPT_HOST_ID: diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index bf33663218cd..9ff8529a64a9 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -142,4 +142,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); +static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, + struct request *rq) +{ + struct nvme_command *cmd = nvme_req(rq)->cmd; + + /* + * We cannot accept any other command until the connect command has + * completed, so only allow connect to pass. + */ + if (!blk_rq_is_passthrough(rq) || + cmd->common.opcode != nvme_fabrics_command || + cmd->fabrics.fctype != nvme_fabrics_type_connect) { + /* + * Reconnecting state means transport disruption, which can take + * a long time and even might fail permanently, fail fast to + * give upper layers a chance to failover. + * Deleting state means that the ctrl will never accept commands + * again, fail it permanently. + */ + if (ctrl->state == NVME_CTRL_RECONNECTING || + ctrl->state == NVME_CTRL_DELETING) { + nvme_req(rq)->status = NVME_SC_ABORT_REQ; + return BLK_STS_IOERR; + } + return BLK_STS_RESOURCE; /* try again later */ + } + + return BLK_STS_OK; +} + #endif /* _NVME_FABRICS_H */ diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index be49d0f79381..7deb7b5d8683 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -41,6 +41,7 @@ enum nvme_fc_queue_flags { NVME_FC_Q_CONNECTED = (1 << 0), + NVME_FC_Q_LIVE = (1 << 1), }; #define NVMEFC_QUEUE_DELAY 3 /* ms units */ @@ -1654,6 +1655,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) return; + clear_bit(NVME_FC_Q_LIVE, &queue->flags); /* * Current implementation never disconnects a single queue. * It always terminates a whole association. So there is never @@ -1661,7 +1663,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - clear_bit(NVME_FC_Q_CONNECTED, &queue->flags); } static void @@ -1740,6 +1741,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) break; + + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); } return ret; @@ -2048,6 +2051,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, return BLK_STS_RESOURCE; } +static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; +} + static blk_status_t nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) @@ -2063,6 +2074,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, u32 data_len; blk_status_t ret; + ret = nvme_fc_is_ready(queue, rq); + if (unlikely(ret)) + return ret; + ret = nvme_setup_cmd(ns, rq, sqe); if (ret) return ret; @@ -2398,6 +2413,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ret) goto out_disconnect_admin_queue; + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); + /* * Check controller capabilities * @@ -2859,7 +2876,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); /* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index d3f3c4447515..950695e74ac6 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -20,6 +20,7 @@ #include #include #include +#include extern unsigned char nvme_io_timeout; #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) @@ -80,6 +81,11 @@ enum nvme_quirks { * Supports the LighNVM command set if indicated in vs[1]. */ NVME_QUIRK_LIGHTNVM = (1 << 6), + + /* + * Set MEDIUM priority on SQ creation + */ + NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), }; /* @@ -108,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req) * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * found empirically. */ -#define NVME_QUIRK_DELAY_AMOUNT 2000 +#define NVME_QUIRK_DELAY_AMOUNT 2300 enum nvme_ctrl_state { NVME_CTRL_NEW, @@ -139,6 +145,7 @@ struct nvme_ctrl { struct work_struct reset_work; struct opal_dev *opal_dev; + struct rpmb_dev *rdev; char name[12]; char serial[20]; @@ -167,6 +174,7 @@ struct nvme_ctrl { u16 kas; u8 npss; u8 apsta; + u32 rpmbs; unsigned int shutdown_timeout; unsigned int kato; bool subsystem; @@ -292,6 +300,8 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl); void nvme_put_ctrl(struct nvme_ctrl *ctrl); int nvme_init_identify(struct nvme_ctrl *ctrl); +int nvme_init_rpmb(struct nvme_ctrl *ctrl); +void nvme_exit_rpmb(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3f5a04c586ce..5ee7ac173480 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); * Represents an NVM Express device. Each nvme_dev is a PCI function. */ struct nvme_dev { - struct nvme_queue **queues; + struct nvme_queue *queues; struct blk_mq_tag_set tagset; struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; @@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = data; - struct nvme_queue *nvmeq = dev->queues[0]; + struct nvme_queue *nvmeq = &dev->queues[0]; WARN_ON(hctx_idx != 0); WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); @@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { struct nvme_dev *dev = data; - struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; + struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; if (!nvmeq->tags) nvmeq->tags = &dev->tagset.tags[hctx_idx]; @@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, struct nvme_dev *dev = set->driver_data; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; - struct nvme_queue *nvmeq = dev->queues[queue_idx]; + struct nvme_queue *nvmeq = &dev->queues[queue_idx]; BUG_ON(!nvmeq); iod->nvmeq = nvmeq; @@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) { struct nvme_dev *dev = to_nvme_dev(ctrl); - struct nvme_queue *nvmeq = dev->queues[0]; + struct nvme_queue *nvmeq = &dev->queues[0]; struct nvme_command c; memset(&c, 0, sizeof(c)); @@ -947,9 +947,18 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq) { + struct nvme_ctrl *ctrl = &dev->ctrl; struct nvme_command c; int flags = NVME_QUEUE_PHYS_CONTIG; + /* + * Some drives have a bug that auto-enables WRRU if MEDIUM isn't + * set. Since URGENT priority is zeroes, it makes all queues + * URGENT. + */ + if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) + flags |= NVME_SQ_PRIO_MEDIUM; + /* * Note: we (ab)use the fact the the prp fields survive if no data * is attached to the request. @@ -1004,12 +1013,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) if (!(csts & NVME_CSTS_CFS) && !nssro) return false; - /* If PCI error recovery process is happening, we cannot reset or - * the recovery mechanism will surely fail. - */ - if (pci_channel_offline(to_pci_dev(dev->dev))) - return false; - return true; } @@ -1040,6 +1043,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_command cmd; u32 csts = readl(dev->bar + NVME_REG_CSTS); + /* If PCI error recovery process is happening, we cannot reset or + * the recovery mechanism will surely fail. + */ + mb(); + if (pci_channel_offline(to_pci_dev(dev->dev))) + return BLK_EH_RESET_TIMER; + /* * Reset immediately if the controller is failed */ @@ -1136,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq) if (nvmeq->sq_cmds) dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); } static void nvme_free_queues(struct nvme_dev *dev, int lowest) @@ -1144,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) int i; for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { - struct nvme_queue *nvmeq = dev->queues[i]; dev->ctrl.queue_count--; - dev->queues[i] = NULL; - nvme_free_queue(nvmeq); + nvme_free_queue(&dev->queues[i]); } } @@ -1179,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) { - struct nvme_queue *nvmeq = dev->queues[0]; + struct nvme_queue *nvmeq = &dev->queues[0]; - if (!nvmeq) - return; if (nvme_suspend_queue(nvmeq)) return; @@ -1223,28 +1228,26 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, int qid, int depth) { - if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { - unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), - dev->ctrl.page_size); - nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; - nvmeq->sq_cmds_io = dev->cmb + offset; - } else { - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), - &nvmeq->sq_dma_addr, GFP_KERNEL); - if (!nvmeq->sq_cmds) - return -ENOMEM; - } + + /* CMB SQEs will be mapped before creation */ + if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) + return 0; + + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), + &nvmeq->sq_dma_addr, GFP_KERNEL); + if (!nvmeq->sq_cmds) + return -ENOMEM; return 0; } -static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int node) +static int nvme_alloc_queue(struct nvme_dev *dev, int qid, + int depth, int node) { - struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL, - node); - if (!nvmeq) - return NULL; + struct nvme_queue *nvmeq = &dev->queues[qid]; + + if (dev->ctrl.queue_count > qid) + return 0; nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), &nvmeq->cq_dma_addr, GFP_KERNEL); @@ -1263,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->q_depth = depth; nvmeq->qid = qid; nvmeq->cq_vector = -1; - dev->queues[qid] = nvmeq; dev->ctrl.queue_count++; - return nvmeq; + return 0; free_cqdma: dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); free_nvmeq: - kfree(nvmeq); - return NULL; + return -ENOMEM; } static int queue_request_irq(struct nvme_queue *nvmeq) @@ -1310,10 +1311,17 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) struct nvme_dev *dev = nvmeq->dev; int result; + if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { + unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), + dev->ctrl.page_size); + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; + nvmeq->sq_cmds_io = dev->cmb + offset; + } + nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) - return result; + goto release_vector; result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) @@ -1327,9 +1335,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) return result; release_sq: + dev->online_queues--; adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); + release_vector: + nvmeq->cq_vector = -1; return result; } @@ -1450,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) if (result < 0) return result; - nvmeq = dev->queues[0]; - if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, - dev_to_node(dev->dev)); - if (!nvmeq) - return -ENOMEM; - } + result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, + dev_to_node(dev->dev)); + if (result) + return result; + nvmeq = &dev->queues[0]; aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; @@ -1487,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { /* vector == qid - 1, match nvme_create_queue */ - if (!nvme_alloc_queue(dev, i, dev->q_depth, + if (nvme_alloc_queue(dev, i, dev->q_depth, pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { ret = -ENOMEM; break; @@ -1496,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) max = min(dev->max_qid, dev->ctrl.queue_count - 1); for (i = dev->online_queues; i <= max; i++) { - ret = nvme_create_queue(dev->queues[i], i); + ret = nvme_create_queue(&dev->queues[i], i); if (ret) break; } @@ -1617,6 +1626,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev) dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; + dev->nr_host_mem_descs = 0; } static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, @@ -1645,7 +1655,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (!bufs) goto out_free_descs; - for (size = 0; size < preferred; size += len) { + for (size = 0; size < preferred && i < max_entries; size += len) { dma_addr_t dma_addr; len = min_t(u64, chunk_size, preferred - size); @@ -1751,12 +1761,12 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) static int nvme_setup_io_queues(struct nvme_dev *dev) { - struct nvme_queue *adminq = dev->queues[0]; + struct nvme_queue *adminq = &dev->queues[0]; struct pci_dev *pdev = to_pci_dev(dev->dev); int result, nr_io_queues; unsigned long size; - nr_io_queues = num_present_cpus(); + nr_io_queues = num_possible_cpus(); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) return result; @@ -1877,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) retry: timeout = ADMIN_TIMEOUT; for (; i > 0; i--, sent++) - if (nvme_delete_queue(dev->queues[i], opcode)) + if (nvme_delete_queue(&dev->queues[i], opcode)) break; while (sent--) { @@ -2062,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) queues = dev->online_queues - 1; for (i = dev->ctrl.queue_count - 1; i > 0; i--) - nvme_suspend_queue(dev->queues[i]); + nvme_suspend_queue(&dev->queues[i]); if (dead) { /* A device might become IO incapable very soon during @@ -2070,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * queue_count can be 0 here. */ if (dev->ctrl.queue_count) - nvme_suspend_queue(dev->queues[0]); + nvme_suspend_queue(&dev->queues[0]); } else { nvme_disable_io_queues(dev, queues); nvme_disable_admin_queue(dev, shutdown); @@ -2171,6 +2181,10 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + result = nvme_init_rpmb(&dev->ctrl); + if (result < 0) + goto out; + if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { if (!dev->ctrl.opal_dev) dev->ctrl.opal_dev = @@ -2282,7 +2296,7 @@ static int nvme_dev_map(struct nvme_dev *dev) return -ENODEV; } -static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) +static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) { if (pdev->vendor == 0x144d && pdev->device == 0xa802) { /* @@ -2297,6 +2311,17 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) return NVME_QUIRK_NO_DEEPEST_PS; + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { + /* + * Samsung SSD 960 EVO drops off the PCIe bus after system + * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as + * within few minutes after bootup on a Coffee Lake board - + * ASUS PRIME Z370-A + */ + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && + (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || + dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) + return NVME_QUIRK_NO_APST; } return 0; @@ -2315,7 +2340,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) return -ENOMEM; - dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *), + + dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue), GFP_KERNEL, node); if (!dev->queues) goto free; @@ -2336,8 +2362,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto unmap; - quirks |= check_dell_samsung_bug(pdev); + quirks |= check_vendor_combination_bug(pdev); + dev_info(&pdev->dev, "calling nvme_init_ctrl\n"); result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, quirks); if (result) @@ -2489,6 +2516,9 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) static void nvme_error_resume(struct pci_dev *pdev) { + struct nvme_dev *dev = pci_get_drvdata(pdev); + + flush_work(&dev->ctrl.reset_work); pci_cleanup_aer_uncorrect_error_status(pdev); } @@ -2514,11 +2544,14 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ - .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_MEDIUM_PRIO_SQ }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ @@ -2529,6 +2562,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ + .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0ebb539f3bd3..48a831d58e7a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -67,6 +67,9 @@ struct nvme_rdma_request { struct nvme_request req; struct ib_mr *mr; struct nvme_rdma_qe sqe; + union nvme_result result; + __le16 status; + refcount_t ref; struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; u32 num_sge; int nents; @@ -85,7 +88,6 @@ enum nvme_rdma_queue_flags { struct nvme_rdma_queue { struct nvme_rdma_qe *rsp_ring; - atomic_t sig_count; int queue_size; size_t cmnd_capsule_len; struct nvme_rdma_ctrl *ctrl; @@ -518,7 +520,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, queue->cmnd_capsule_len = sizeof(struct nvme_command); queue->queue_size = queue_size; - atomic_set(&queue->sig_count, 0); queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, RDMA_PS_TCP, IB_QPT_RC); @@ -795,7 +796,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, if (error) { dev_err(ctrl->ctrl.device, "prop_get NVME_REG_CAP failed\n"); - goto out_cleanup_queue; + goto out_stop_queue; } ctrl->ctrl.sqsize = @@ -803,23 +804,25 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); if (error) - goto out_cleanup_queue; + goto out_stop_queue; ctrl->ctrl.max_hw_sectors = (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); error = nvme_init_identify(&ctrl->ctrl); if (error) - goto out_cleanup_queue; + goto out_stop_queue; error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); if (error) - goto out_cleanup_queue; + goto out_stop_queue; return 0; +out_stop_queue: + nvme_rdma_stop_queue(&ctrl->queues[0]); out_cleanup_queue: if (new) blk_cleanup_queue(ctrl->ctrl.admin_q); @@ -1177,6 +1180,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->num_sge = 1; req->inline_data = false; req->mr->need_inval = false; + refcount_set(&req->ref, 2); /* send and recv completions */ c->common.flags |= NVME_CMD_SGL_METABUF; @@ -1213,25 +1217,24 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) { - if (unlikely(wc->status != IB_WC_SUCCESS)) - nvme_rdma_wr_error(cq, wc, "SEND"); -} + struct nvme_rdma_qe *qe = + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); + struct nvme_rdma_request *req = + container_of(qe, struct nvme_rdma_request, sqe); + struct request *rq = blk_mq_rq_from_pdu(req); -/* - * We want to signal completion at least every queue depth/2. This returns the - * largest power of two that is not above half of (queue size + 1) to optimize - * (avoid divisions). - */ -static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) -{ - int limit = 1 << ilog2((queue->queue_size + 1) / 2); + if (unlikely(wc->status != IB_WC_SUCCESS)) { + nvme_rdma_wr_error(cq, wc, "SEND"); + return; + } - return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; + if (refcount_dec_and_test(&req->ref)) + nvme_end_request(rq, req->status, req->result); } static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, - struct ib_send_wr *first, bool flush) + struct ib_send_wr *first) { struct ib_send_wr wr, *bad_wr; int ret; @@ -1240,31 +1243,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, sge->length = sizeof(struct nvme_command), sge->lkey = queue->device->pd->local_dma_lkey; - qe->cqe.done = nvme_rdma_send_done; - wr.next = NULL; wr.wr_cqe = &qe->cqe; wr.sg_list = sge; wr.num_sge = num_sge; wr.opcode = IB_WR_SEND; - wr.send_flags = 0; - - /* - * Unsignalled send completions are another giant desaster in the - * IB Verbs spec: If we don't regularly post signalled sends - * the send queue will fill up and only a QP reset will rescue us. - * Would have been way to obvious to handle this in hardware or - * at least the RDMA stack.. - * - * Always signal the flushes. The magic request used for the flush - * sequencer is not allocated in our driver's tagset and it's - * triggered to be freed by blk_cleanup_queue(). So we need to - * always mark it as signaled to ensure that the "wr_cqe", which is - * embedded in request's payload, is not freed when __ib_process_cq() - * calls wr_cqe->done(). - */ - if (nvme_rdma_queue_sig_limit(queue) || flush) - wr.send_flags |= IB_SEND_SIGNALED; + wr.send_flags = IB_SEND_SIGNALED; if (first) first->next = ≀ @@ -1314,6 +1298,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) return queue->ctrl->tag_set.tags[queue_idx - 1]; } +static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) +{ + if (unlikely(wc->status != IB_WC_SUCCESS)) + nvme_rdma_wr_error(cq, wc, "ASYNC"); +} + static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); @@ -1335,10 +1325,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) cmd->common.flags |= NVME_CMD_SGL_METABUF; nvme_rdma_set_sg_null(cmd); + sqe->cqe.done = nvme_rdma_async_done; + ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); - ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false); + ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); WARN_ON_ONCE(ret); } @@ -1359,14 +1351,19 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, } req = blk_mq_rq_to_pdu(rq); - if (rq->tag == tag) - ret = 1; + req->status = cqe->status; + req->result = cqe->result; if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && wc->ex.invalidate_rkey == req->mr->rkey) req->mr->need_inval = false; - nvme_end_request(rq, cqe->status, cqe->result); + if (refcount_dec_and_test(&req->ref)) { + if (rq->tag == tag) + ret = 1; + nvme_end_request(rq, req->status, req->result); + } + return ret; } @@ -1603,31 +1600,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved) * We cannot accept any other command until the Connect command has completed. */ static inline blk_status_t -nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) -{ - if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { - struct nvme_command *cmd = nvme_req(rq)->cmd; - - if (!blk_rq_is_passthrough(rq) || - cmd->common.opcode != nvme_fabrics_command || - cmd->fabrics.fctype != nvme_fabrics_type_connect) { - /* - * reconnecting state means transport disruption, which - * can take a long time and even might fail permanently, - * fail fast to give upper layers a chance to failover. - * deleting state means that the ctrl will never accept - * commands again, fail it permanently. - */ - if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING || - queue->ctrl->ctrl.state == NVME_CTRL_DELETING) { - nvme_req(rq)->status = NVME_SC_ABORT_REQ; - return BLK_STS_IOERR; - } - return BLK_STS_RESOURCE; /* try again later */ - } - } - - return 0; +nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq) +{ + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1639,14 +1616,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_qe *sqe = &req->sqe; struct nvme_command *c = sqe->data; - bool flush = false; struct ib_device *dev; blk_status_t ret; int err; WARN_ON_ONCE(rq->tag < 0); - ret = nvme_rdma_queue_is_ready(queue, rq); + ret = nvme_rdma_is_ready(queue, rq); if (unlikely(ret)) return ret; @@ -1668,13 +1644,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, goto err; } + sqe->cqe.done = nvme_rdma_send_done; + ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); - if (req_op(rq) == REQ_OP_FLUSH) - flush = true; err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, - req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); + req->mr->need_inval ? &req->reg_wr.wr : NULL); if (unlikely(err)) { nvme_rdma_unmap_data(queue, rq); goto err; diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 03e4ab65fe77..48d20c2c1256 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP config NVME_TARGET_RDMA tristate "NVMe over Fabrics RDMA target support" - depends on INFINIBAND + depends on INFINIBAND && INFINIBAND_ADDR_TRANS depends on NVME_TARGET help This enables the NVMe RDMA target support, which allows exporting NVMe diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 645ba7eee35d..240b0d628222 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -505,9 +505,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, goto fail; } - /* either variant of SGLs is fine, as we don't support metadata */ - if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && - (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { + /* + * For fabrics, PSDT field shall describe metadata pointer (MPTR) that + * contains an address of a single contiguous physical buffer that is + * byte aligned. + */ + if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 58e010bdda3e..b7a5d1065378 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod { struct work_struct work; } __aligned(sizeof(unsigned long long)); +/* desired maximum for a single sequence - if sg list allows it */ #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) -#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE) enum nvmet_fcp_datadir { NVMET_FCP_NODATA, @@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod { struct nvme_fc_cmd_iu cmdiubuf; struct nvme_fc_ersp_iu rspiubuf; dma_addr_t rspdma; + struct scatterlist *next_sg; struct scatterlist *data_sg; int data_sg_cnt; u32 total_length; @@ -532,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); + /* release the queue lookup reference on the completed IO */ + nvmet_fc_tgt_q_put(queue); + spin_lock_irqsave(&queue->qlock, flags); deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, struct nvmet_fc_defer_fcp_req, req_list); if (!deferfcp) { list_add_tail(&fod->fcp_list, &fod->queue->fod_list); spin_unlock_irqrestore(&queue->qlock, flags); - - /* Release reference taken at queue lookup and fod allocation */ - nvmet_fc_tgt_q_put(queue); return; } @@ -759,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) tgtport->ops->fcp_req_release(&tgtport->fc_target_port, deferfcp->fcp_req); + /* release the queue lookup reference */ + nvmet_fc_tgt_q_put(queue); + kfree(deferfcp); spin_lock_irqsave(&queue->qlock, flags); @@ -997,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, INIT_LIST_HEAD(&newrec->assoc_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); - newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, - template->max_sgl_segments); + newrec->max_sg_cnt = template->max_sgl_segments; ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1714,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) ((fod->io_dir == NVMET_FCP_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); /* note: write from initiator perspective */ + fod->next_sg = fod->data_sg; return 0; @@ -1871,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod, u8 op) { struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; + struct scatterlist *sg = fod->next_sg; unsigned long flags; - u32 tlen; + u32 remaininglen = fod->total_length - fod->offset; + u32 tlen = 0; int ret; fcpreq->op = op; fcpreq->offset = fod->offset; fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; - tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, - (fod->total_length - fod->offset)); + /* + * for next sequence: + * break at a sg element boundary + * attempt to keep sequence length capped at + * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to + * be longer if a single sg element is larger + * than that amount. This is done to avoid creating + * a new sg list to use for the tgtport api. + */ + fcpreq->sg = sg; + fcpreq->sg_cnt = 0; + while (tlen < remaininglen && + fcpreq->sg_cnt < tgtport->max_sg_cnt && + tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) { + fcpreq->sg_cnt++; + tlen += sg_dma_len(sg); + sg = sg_next(sg); + } + if (tlen < remaininglen && fcpreq->sg_cnt == 0) { + fcpreq->sg_cnt++; + tlen += min_t(u32, sg_dma_len(sg), remaininglen); + sg = sg_next(sg); + } + if (tlen < remaininglen) + fod->next_sg = sg; + else + fod->next_sg = NULL; + fcpreq->transfer_length = tlen; fcpreq->transferred_length = 0; fcpreq->fcp_error = 0; fcpreq->rsplen = 0; - fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE]; - fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE); - /* * If the last READDATA request: check if LLDD supports * combined xfr with response. diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 7b75d9de55ab..c0080f6ab2f5 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -204,6 +204,10 @@ struct fcloop_lport { struct completion unreg_done; }; +struct fcloop_lport_priv { + struct fcloop_lport *lport; +}; + struct fcloop_rport { struct nvme_fc_remote_port *remoteport; struct nvmet_fc_target_port *targetport; @@ -370,6 +374,7 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work) spin_lock(&tfcp_req->reqlock); fcpreq = tfcp_req->fcpreq; + tfcp_req->fcpreq = NULL; spin_unlock(&tfcp_req->reqlock); if (tport->remoteport && fcpreq) { @@ -611,11 +616,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, if (!tfcp_req) /* abort has already been called */ - return; - - if (rport->targetport) - nvmet_fc_rcv_fcp_abort(rport->targetport, - &tfcp_req->tgt_fcp_req); + goto finish; /* break initiator/target relationship for io */ spin_lock(&tfcp_req->reqlock); @@ -623,6 +624,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, tfcp_req->fcpreq = NULL; spin_unlock(&tfcp_req->reqlock); + if (rport->targetport) + nvmet_fc_rcv_fcp_abort(rport->targetport, + &tfcp_req->tgt_fcp_req); + +finish: /* post the aborted io completion */ fcpreq->status = -ECANCELED; schedule_work(&inireq->iniwork); @@ -657,7 +663,8 @@ fcloop_nport_get(struct fcloop_nport *nport) static void fcloop_localport_delete(struct nvme_fc_local_port *localport) { - struct fcloop_lport *lport = localport->private; + struct fcloop_lport_priv *lport_priv = localport->private; + struct fcloop_lport *lport = lport_priv->lport; /* release any threads waiting for the unreg to complete */ complete(&lport->unreg_done); @@ -697,7 +704,7 @@ static struct nvme_fc_port_template fctemplate = { .max_dif_sgl_segments = FCLOOP_SGL_SEGS, .dma_boundary = FCLOOP_DMABOUND_4G, /* sizes of additional private data for data structures */ - .local_priv_sz = sizeof(struct fcloop_lport), + .local_priv_sz = sizeof(struct fcloop_lport_priv), .remote_priv_sz = sizeof(struct fcloop_rport), .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), @@ -728,11 +735,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, struct fcloop_ctrl_options *opts; struct nvme_fc_local_port *localport; struct fcloop_lport *lport; - int ret; + struct fcloop_lport_priv *lport_priv; + unsigned long flags; + int ret = -ENOMEM; + + lport = kzalloc(sizeof(*lport), GFP_KERNEL); + if (!lport) + return -ENOMEM; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) - return -ENOMEM; + goto out_free_lport; ret = fcloop_parse_options(opts, buf); if (ret) @@ -752,23 +765,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); if (!ret) { - unsigned long flags; - /* success */ - lport = localport->private; + lport_priv = localport->private; + lport_priv->lport = lport; + lport->localport = localport; INIT_LIST_HEAD(&lport->lport_list); spin_lock_irqsave(&fcloop_lock, flags); list_add_tail(&lport->lport_list, &fcloop_lports); spin_unlock_irqrestore(&fcloop_lock, flags); - - /* mark all of the input buffer consumed */ - ret = count; } out_free_opts: kfree(opts); +out_free_lport: + /* free only if we're going to fail */ + if (ret) + kfree(lport); + return ret ? ret : count; } @@ -790,6 +805,8 @@ __wait_localport_unreg(struct fcloop_lport *lport) wait_for_completion(&lport->unreg_done); + kfree(lport); + return ret; } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 92628c432926..02aff5cc48bf 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -61,10 +61,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) return container_of(ctrl, struct nvme_loop_ctrl, ctrl); } +enum nvme_loop_queue_flags { + NVME_LOOP_Q_LIVE = 0, +}; + struct nvme_loop_queue { struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvme_loop_ctrl *ctrl; + unsigned long flags; }; static struct nvmet_port *nvmet_loop_port; @@ -153,6 +158,14 @@ nvme_loop_timeout(struct request *rq, bool reserved) return BLK_EH_HANDLED; } +static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags))) + return nvmf_check_init_req(&queue->ctrl->ctrl, rq); + return BLK_STS_OK; +} + static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -162,6 +175,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; + ret = nvme_loop_is_ready(queue, req); + if (unlikely(ret)) + return ret; + ret = nvme_setup_cmd(ns, req, &iod->cmd); if (ret) return ret; @@ -275,6 +292,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -305,8 +323,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->ctrl.queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) { + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + } } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) @@ -346,6 +366,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) return ret; + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); } return 0; @@ -387,6 +408,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) if (error) goto out_cleanup_queue; + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); if (error) { dev_err(ctrl->ctrl.device, diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 76d2bb793afe..3333d417b248 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1512,15 +1512,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = { static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) { - struct nvmet_rdma_queue *queue; + struct nvmet_rdma_queue *queue, *tmp; /* Device is being removed, delete all queues using this device */ mutex_lock(&nvmet_rdma_queue_mutex); - list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { + list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, + queue_list) { if (queue->dev->device != ib_device) continue; pr_info("Removing queue %d\n", queue->idx); + list_del_init(&queue->queue_list); __nvmet_rdma_queue_disconnect(queue); } mutex_unlock(&nvmet_rdma_queue_mutex); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d12e5de78e70..2afafd5d8915 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1049,6 +1049,8 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, /* setup the first byte with lsb bits from nvmem */ rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); + if (rc) + goto err; *b++ |= GENMASK(bit_offset - 1, 0) & v; /* setup rest of the byte if any */ @@ -1067,11 +1069,16 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, /* setup the last byte with msb bits from nvmem */ rc = nvmem_reg_read(nvmem, cell->offset + cell->bytes - 1, &v, 1); + if (rc) + goto err; *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; } return buf; +err: + kfree(buf); + return ERR_PTR(rc); } /** diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index ce30c9a588a4..b1103e519ef6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -975,7 +975,7 @@ int __init early_init_dt_scan_chosen_stdout(void) int offset; const char *p, *q, *options = NULL; int l; - const struct earlycon_id *match; + const struct earlycon_id **p_match; const void *fdt = initial_boot_params; offset = fdt_path_offset(fdt, "/chosen"); @@ -1002,7 +1002,10 @@ int __init early_init_dt_scan_chosen_stdout(void) return 0; } - for (match = __earlycon_table; match < __earlycon_table_end; match++) { + for (p_match = __earlycon_table; p_match < __earlycon_table_end; + p_match++) { + const struct earlycon_id *match = *p_match; + if (!match->compatible[0]) continue; @@ -1109,42 +1112,66 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, return 0; } +/* + * Convert configs to something easy to use in C code + */ +#if defined(CONFIG_CMDLINE_FORCE) +static const int overwrite_incoming_cmdline = 1; +static const int read_dt_cmdline; +static const int concat_cmdline; +#elif defined(CONFIG_CMDLINE_EXTEND) +static const int overwrite_incoming_cmdline; +static const int read_dt_cmdline = 1; +static const int concat_cmdline = 1; +#else /* CMDLINE_FROM_BOOTLOADER */ +static const int overwrite_incoming_cmdline; +static const int read_dt_cmdline = 1; +static const int concat_cmdline; +#endif + +#ifdef CONFIG_CMDLINE +static const char *config_cmdline = CONFIG_CMDLINE; +#else +static const char *config_cmdline = ""; +#endif + int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data) { - int l; - const char *p; + int l = 0; + const char *p = NULL; + char *cmdline = data; pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); - if (depth != 1 || !data || + if (depth != 1 || !cmdline || (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) return 0; early_init_dt_check_for_initrd(node); - /* Retrieve command line */ - p = of_get_flat_dt_prop(node, "bootargs", &l); - if (p != NULL && l > 0) - strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE)); - - /* - * CONFIG_CMDLINE is meant to be a default in case nothing else - * managed to set the command line, unless CONFIG_CMDLINE_FORCE - * is set in which case we override whatever was found earlier. - */ -#ifdef CONFIG_CMDLINE -#if defined(CONFIG_CMDLINE_EXTEND) - strlcat(data, " ", COMMAND_LINE_SIZE); - strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#elif defined(CONFIG_CMDLINE_FORCE) - strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#else - /* No arguments from boot loader, use kernel's cmdl*/ - if (!((char *)data)[0]) - strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif -#endif /* CONFIG_CMDLINE */ + /* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */ + if (overwrite_incoming_cmdline || !cmdline[0]) + strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE); + + /* Retrieve command line unless forcing */ + if (read_dt_cmdline) + p = of_get_flat_dt_prop(node, "bootargs", &l); + + if (p != NULL && l > 0) { + if (concat_cmdline) { + int cmdline_len; + int copy_len; + strlcat(cmdline, " ", COMMAND_LINE_SIZE); + cmdline_len = strlen(cmdline); + copy_len = COMMAND_LINE_SIZE - cmdline_len - 1; + copy_len = min((int)l, copy_len); + strncpy(cmdline + cmdline_len, p, copy_len); + cmdline[cmdline_len + copy_len] = '\0'; + } else { + strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE)); + } + } pr_debug("Command line is: %s\n", (char*)data); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 98258583abb0..8c1819230ed2 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -228,7 +228,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) rc = of_mdiobus_register_phy(mdio, child, addr); else rc = of_mdiobus_register_device(mdio, child, addr); - if (rc) + + if (rc == -ENODEV) + dev_err(&mdio->dev, + "MDIO device at address %d is missing.\n", + addr); + else if (rc) goto unregister; } @@ -252,7 +257,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) if (of_mdiobus_child_is_phy(child)) { rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc) + if (rc && rc != -ENODEV) goto unregister; } } diff --git a/drivers/of/platform.c b/drivers/of/platform.c index ac15d0e3d27d..0f49718c6012 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -533,6 +533,9 @@ int of_platform_device_destroy(struct device *dev, void *data) if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS)) device_for_each_child(dev, NULL, of_platform_device_destroy); + of_node_clear_flag(dev->of_node, OF_POPULATED); + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); + if (dev->bus == &platform_bus_type) platform_device_unregister(to_platform_device(dev)); #ifdef CONFIG_ARM_AMBA @@ -540,8 +543,6 @@ int of_platform_device_destroy(struct device *dev, void *data) amba_device_unregister(to_amba_device(dev)); #endif - of_node_clear_flag(dev->of_node, OF_POPULATED); - of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; } EXPORT_SYMBOL_GPL(of_platform_device_destroy); diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index 99309cb7d372..3bf4b6489fd0 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -129,6 +129,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay, goto err_fail; } + if (offset < 0 || offset + sizeof(__be32) > prop->length) { + err = -EINVAL; + goto err_fail; + } + *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle); } diff --git a/drivers/of/unittest-data/.gitignore b/drivers/of/unittest-data/.gitignore deleted file mode 100644 index 4b3cf8b16de2..000000000000 --- a/drivers/of/unittest-data/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -testcases.dtb -testcases.dtb.S diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 168ef0bbabde..985a85f281a8 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -164,20 +164,20 @@ static void __init of_unittest_dynamic(void) /* Add a new property - should pass*/ prop->name = "new-property"; prop->value = "new-property-data"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n"); /* Try to add an existing property - should fail */ prop++; prop->name = "new-property"; prop->value = "new-property-data-should-fail"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_add_property(np, prop) != 0, "Adding an existing property should have failed\n"); /* Try to modify an existing property - should pass */ prop->value = "modify-property-data-should-pass"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_update_property(np, prop) == 0, "Updating an existing property should have passed\n"); @@ -185,7 +185,7 @@ static void __init of_unittest_dynamic(void) prop++; prop->name = "modify-property"; prop->value = "modify-missing-property-data-should-pass"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_update_property(np, prop) == 0, "Updating a missing property should have passed\n"); diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig new file mode 100644 index 000000000000..a7fbb93f302c --- /dev/null +++ b/drivers/opp/Kconfig @@ -0,0 +1,13 @@ +config PM_OPP + bool + select SRCU + ---help--- + SOCs have a standard set of tuples consisting of frequency and + voltage pairs that the device will support per voltage domain. This + is called Operating Performance Point or OPP. The actual definitions + of OPP varies over silicon within the same family of devices. + + OPP layer organizes the data internally using device pointers + representing individual voltage domains and provides SOC + implementations a ready to use framework to manage OPPs. + For more information, read diff --git a/drivers/base/power/opp/Makefile b/drivers/opp/Makefile similarity index 100% rename from drivers/base/power/opp/Makefile rename to drivers/opp/Makefile diff --git a/drivers/base/power/opp/core.c b/drivers/opp/core.c similarity index 99% rename from drivers/base/power/opp/core.c rename to drivers/opp/core.c index a6de32530693..d4862775b9f6 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/opp/core.c @@ -296,7 +296,7 @@ int dev_pm_opp_get_opp_count(struct device *dev) opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { count = PTR_ERR(opp_table); - dev_err(dev, "%s: OPP table not found (%d)\n", + dev_dbg(dev, "%s: OPP table not found (%d)\n", __func__, count); return count; } @@ -552,7 +552,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, } /* Scaling up? Scale voltage before frequency */ - if (freq > old_freq) { + if (freq >= old_freq) { ret = _set_opp_voltage(dev, reg, new_supply); if (ret) goto restore_voltage; diff --git a/drivers/base/power/opp/cpu.c b/drivers/opp/cpu.c similarity index 100% rename from drivers/base/power/opp/cpu.c rename to drivers/opp/cpu.c diff --git a/drivers/base/power/opp/debugfs.c b/drivers/opp/debugfs.c similarity index 100% rename from drivers/base/power/opp/debugfs.c rename to drivers/opp/debugfs.c diff --git a/drivers/base/power/opp/of.c b/drivers/opp/of.c similarity index 99% rename from drivers/base/power/opp/of.c rename to drivers/opp/of.c index 0b718886479b..87509cb69f79 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/opp/of.c @@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret); _dev_pm_opp_remove_table(opp_table, dev, false); + of_node_put(np); goto put_opp_table; } } diff --git a/drivers/base/power/opp/opp.h b/drivers/opp/opp.h similarity index 100% rename from drivers/base/power/opp/opp.h rename to drivers/opp/opp.h diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index a25fed52f7e9..69bd98421eb1 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1403,9 +1403,27 @@ lba_hw_init(struct lba_device *d) WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); } - /* Set HF mode as the default (vs. -1 mode). */ + + /* + * Hard Fail vs. Soft Fail on PCI "Master Abort". + * + * "Master Abort" means the MMIO transaction timed out - usually due to + * the device not responding to an MMIO read. We would like HF to be + * enabled to find driver problems, though it means the system will + * crash with a HPMC. + * + * In SoftFail mode "~0L" is returned as a result of a timeout on the + * pci bus. This is like how PCI busses on x86 and most other + * architectures behave. In order to increase compatibility with + * existing (x86) PCI hardware and existing Linux drivers we enable + * Soft Faul mode on PA-RISC now too. + */ stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); +#if defined(ENABLE_HARDFAIL) WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); +#else + WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); +#endif /* ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal @@ -1692,3 +1710,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) iounmap(base_addr); } + +/* + * The design of the Diva management card in rp34x0 machines (rp3410, rp3440) + * seems rushed, so that many built-in components simply don't work. + * The following quirks disable the serial AUX port and the built-in ATI RV100 + * Radeon 7000 graphics card which both don't have any external connectors and + * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as + * such makes those machines the only PARISC machines on which we can't use + * ttyS0 as boot console. + */ +static void quirk_diva_ati_card(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x1292) + return; + + dev_info(&dev->dev, "Hiding Diva built-in ATI card"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY, + quirk_diva_ati_card); + +static void quirk_diva_aux_disable(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x1291) + return; + + dev_info(&dev->dev, "Hiding Diva built-in AUX serial device"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, + quirk_diva_aux_disable); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 489492b608cf..380916bff9e0 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards { netmos_9901, netmos_9865, quatech_sppxp100, + wch_ch382l, }; @@ -2708,6 +2709,7 @@ static struct parport_pc_pci { /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, + /* wch_ch382l */ { 1, { { 2, -1 }, } }, }; static const struct pci_device_id parport_pc_pci_tbl[] = { @@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, + /* WCH CH382L PCI-E single parallel port card */ + { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 34427a6a15af..362607f727ee 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -594,6 +595,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) int i; int phy_count; struct phy **phy; + struct device_link **link; void __iomem *base; struct resource *res; struct dw_pcie *pci; @@ -649,11 +651,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; + link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); + if (!link) + return -ENOMEM; + for (i = 0; i < phy_count; i++) { snprintf(name, sizeof(name), "pcie-phy%d", i); phy[i] = devm_phy_get(dev, name); if (IS_ERR(phy[i])) return PTR_ERR(phy[i]); + + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + ret = -EINVAL; + goto err_link; + } } dra7xx->base = base; @@ -732,6 +744,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); +err_link: + while (--i >= 0) + device_link_del(link[i]); + return ret; } diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 5596fdedbb94..ea03f1ec12a4 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c @@ -695,7 +695,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) return ret; } - if (ep->ops && ep->ops->get_clk_resources) { + if (ep->ops && ep->ops->get_clk_resources && + ep->ops->init_clk_resources) { ret = ep->ops->get_clk_resources(ep); if (ret) return ret; diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index 5bee3af47588..39405598b22d 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c @@ -178,7 +178,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, } /* interrupt controller is in a child node */ - *np_temp = of_find_node_by_name(np_pcie, controller); + *np_temp = of_get_child_by_name(np_pcie, controller); if (!(*np_temp)) { dev_err(dev, "Node for %s is absent\n", controller); return -EINVAL; @@ -187,6 +187,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, temp = of_irq_count(*np_temp); if (!temp) { dev_err(dev, "No IRQ entries in %s\n", controller); + of_node_put(*np_temp); return -EINVAL; } @@ -204,6 +205,8 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, break; } + of_node_put(*np_temp); + if (temp) { *num_irqs = temp; return 0; diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c index d53d5f168363..7c621877a939 100644 --- a/drivers/pci/dwc/pcie-designware-ep.c +++ b/drivers/pci/dwc/pcie-designware-ep.c @@ -197,20 +197,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr, static int dw_pcie_ep_get_msi(struct pci_epc *epc) { int val; - u32 lower_addr; - u32 upper_addr; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL); - val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; - - lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); - upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); - - if (!(lower_addr || upper_addr)) + val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); + if (!(val & MSI_CAP_MSI_EN_MASK)) return -EINVAL; + val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; return val; } diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 81e2157a7cfb..bc3e2d8d0cce 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c @@ -607,7 +607,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) /* setup bus numbers */ val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); val &= 0xff000000; - val |= 0x00010100; + val |= 0x00ff0100; dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); /* setup command register */ diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index e5d9d77b778e..cb493bcae8b4 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h @@ -101,6 +101,7 @@ #define MSI_MESSAGE_CONTROL 0x52 #define MSI_CAP_MMC_SHIFT 1 #define MSI_CAP_MME_SHIFT 4 +#define MSI_CAP_MSI_EN_MASK 0x1 #define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) #define MSI_MESSAGE_ADDR_L32 0x54 #define MSI_MESSAGE_ADDR_U32 0x58 diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c index dc3033cf3c19..efc317e7669d 100644 --- a/drivers/pci/dwc/pcie-kirin.c +++ b/drivers/pci/dwc/pcie-kirin.c @@ -490,7 +490,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) return ret; kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, - "reset-gpio", 0); + "reset-gpios", 0); if (kirin_pcie->gpio_id_reset < 0) return -ENODEV; diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index 424fdd6ed1ca..16cec66b1d0b 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -109,7 +109,10 @@ static int pci_epc_epf_link(struct config_item *epc_item, goto err_add_epf; func_no = find_first_zero_bit(&epc_group->function_num_map, - sizeof(epc_group->function_num_map)); + BITS_PER_LONG); + if (func_no >= BITS_PER_LONG) + return -EINVAL; + set_bit(func_no, &epc_group->function_num_map); epf->func_no = func_no; diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 26ed0c08f209..9bfc22b5da4b 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c @@ -32,6 +32,7 @@ #define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 #define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) #define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2 #define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 #define PCIE_CORE_LINK_L0S_ENTRY BIT(0) #define PCIE_CORE_LINK_TRAINING BIT(5) @@ -103,7 +104,8 @@ #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) #define PCIE_ISR1_FLUSH BIT(5) -#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) +#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) +#define PCIE_ISR1_ALL_MASK GENMASK(11, 4) #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) @@ -175,8 +177,6 @@ #define PCIE_CONFIG_WR_TYPE0 0xa #define PCIE_CONFIG_WR_TYPE1 0xb -/* PCI_BDF shifts 8bit, so we need extra 4bit shift */ -#define PCIE_BDF(dev) (dev << 4) #define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) #define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) #define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) @@ -299,7 +299,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | - PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; + (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ << + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT); advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); /* Program PCIe Control 2 to disable strict ordering */ @@ -440,7 +441,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, u32 reg; int ret; - if (PCI_SLOT(devfn) != 0) { + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } @@ -459,7 +460,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, advk_writel(pcie, reg, PIO_CTRL); /* Program the address registers */ - reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); + reg = PCIE_CONF_ADDR(bus->number, devfn, where); advk_writel(pcie, reg, PIO_ADDR_LS); advk_writel(pcie, 0, PIO_ADDR_MS); @@ -494,7 +495,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int offset; int ret; - if (PCI_SLOT(devfn) != 0) + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) return PCIBIOS_DEVICE_NOT_FOUND; if (where % size) @@ -612,9 +613,9 @@ static void advk_pcie_irq_mask(struct irq_data *d) irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 mask; - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - mask |= PCIE_ISR0_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask |= PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); } static void advk_pcie_irq_unmask(struct irq_data *d) @@ -623,9 +624,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d) irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 mask; - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); } static int advk_pcie_irq_map(struct irq_domain *h, @@ -768,29 +769,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie) static void advk_pcie_handle_int(struct advk_pcie *pcie) { - u32 val, mask, status; + u32 isr0_val, isr0_mask, isr0_status; + u32 isr1_val, isr1_mask, isr1_status; int i, virq; - val = advk_readl(pcie, PCIE_ISR0_REG); - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); - status = val & ((~mask) & PCIE_ISR0_ALL_MASK); + isr0_val = advk_readl(pcie, PCIE_ISR0_REG); + isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); + + isr1_val = advk_readl(pcie, PCIE_ISR1_REG); + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); - if (!status) { - advk_writel(pcie, val, PCIE_ISR0_REG); + if (!isr0_status && !isr1_status) { + advk_writel(pcie, isr0_val, PCIE_ISR0_REG); + advk_writel(pcie, isr1_val, PCIE_ISR1_REG); return; } /* Process MSI interrupts */ - if (status & PCIE_ISR0_MSI_INT_PENDING) + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) advk_pcie_handle_msi(pcie); /* Process legacy interrupts */ for (i = 0; i < PCI_NUM_INTX; i++) { - if (!(status & PCIE_ISR0_INTX_ASSERT(i))) + if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) continue; - advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), - PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), + PCIE_ISR1_REG); virq = irq_find_mapping(pcie->irq_domain, i); generic_handle_irq(virq); diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 0fe3ea164ee5..ffc87a956d97 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -53,6 +53,8 @@ #include #include #include +#include + #include #include #include @@ -457,7 +459,6 @@ struct hv_pcibus_device { spinlock_t device_list_lock; /* Protect lists below */ void __iomem *cfg_addr; - struct semaphore enum_sem; struct list_head resources_for_children; struct list_head children; @@ -471,6 +472,8 @@ struct hv_pcibus_device { struct retarget_msi_interrupt retarget_msi_interrupt_params; spinlock_t retarget_msi_interrupt_lock; + + struct workqueue_struct *wq; }; /* @@ -530,6 +533,8 @@ struct hv_pci_compl { s32 completion_status; }; +static void hv_pci_onchannelcallback(void *context); + /** * hv_pci_generic_compl() - Invoked for a completion packet * @context: Set up by the sender of the packet. @@ -563,6 +568,26 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); +/* + * There is no good way to get notified from vmbus_onoffer_rescind(), + * so let's use polling here, since this is not a hot path. + */ +static int wait_for_response(struct hv_device *hdev, + struct completion *comp) +{ + while (true) { + if (hdev->channel->rescind) { + dev_warn_once(&hdev->device, "The device is gone.\n"); + return -ENODEV; + } + + if (wait_for_completion_timeout(comp, HZ / 10)) + break; + } + + return 0; +} + /** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot @@ -674,6 +699,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, } } +static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) +{ + u16 ret; + unsigned long flags; + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + + PCI_VENDOR_ID; + + spin_lock_irqsave(&hpdev->hbus->config_lock, flags); + + /* Choose the function to be read. (See comment above) */ + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start reading. */ + mb(); + /* Read from that function's config space. */ + ret = readw(addr); + /* + * mb() is not required here, because the spin_unlock_irqrestore() + * is a barrier. + */ + + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); + + return ret; +} + /** * _hv_pcifront_write_config() - Internal PCI config write * @hpdev: The PCI driver's representation of the device @@ -879,7 +929,7 @@ static void hv_irq_unmask(struct irq_data *data) int cpu; u64 res; - dest = irq_data_get_affinity_mask(data); + dest = irq_data_get_effective_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1042,6 +1092,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; + struct cpumask *dest; + unsigned long flags; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct { @@ -1056,6 +1108,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) int ret; pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); @@ -1081,14 +1134,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) switch (pci_protocol_version) { case PCI_PROTOCOL_VERSION_1_1: size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, - irq_data_get_affinity_mask(data), + dest, hpdev->desc.win_slot.slot, cfg->vector); break; case PCI_PROTOCOL_VERSION_1_2: size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, - irq_data_get_affinity_mask(data), + dest, hpdev->desc.win_slot.slot, cfg->vector); break; @@ -1118,8 +1171,38 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Since this function is called with IRQ locks held, can't * do normal wait for completion; instead poll. */ - while (!try_wait_for_completion(&comp.comp_pkt.host_event)) + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { + /* 0xFFFF means an invalid PCI VENDOR ID. */ + if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { + dev_err_once(&hbus->hdev->device, + "the device has gone\n"); + goto free_int_desc; + } + + /* + * When the higher level interrupt code calls us with + * interrupt disabled, we must poll the channel by calling + * the channel callback directly when channel->target_cpu is + * the current CPU. When the higher level interrupt code + * calls us with interrupt enabled, let's add the + * local_irq_save()/restore() to avoid race: + * hv_pci_onchannelcallback() can also run in tasklet. + */ + local_irq_save(flags); + + if (hbus->hdev->channel->target_cpu == smp_processor_id()) + hv_pci_onchannelcallback(hbus); + + local_irq_restore(flags); + + if (hpdev->state == hv_pcichild_ejecting) { + dev_err_once(&hbus->hdev->device, + "the device is being ejected\n"); + goto free_int_desc; + } + udelay(100); + } if (comp.comp_pkt.completion_status < 0) { dev_err(&hbus->hdev->device, @@ -1523,24 +1606,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, if (ret) goto error; - wait_for_completion(&comp_pkt.host_event); + if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) + goto error; hpdev->desc = *desc; refcount_set(&hpdev->refs, 1); get_pcichild(hpdev, hv_pcidev_ref_childlist); spin_lock_irqsave(&hbus->device_list_lock, flags); - /* - * When a device is being added to the bus, we set the PCI domain - * number to be the device serial number, which is non-zero and - * unique on the same VM. The serial numbers start with 1, and - * increase by 1 for each device. So device names including this - * can have shorter names than based on the bus instance UUID. - * Only the first device serial number is used for domain, so the - * domain number will not change after the first device is added. - */ - if (list_empty(&hbus->children)) - hbus->sysdata.domain = desc->ser; list_add_tail(&hpdev->list_entry, &hbus->children); spin_unlock_irqrestore(&hbus->device_list_lock, flags); return hpdev; @@ -1602,12 +1675,8 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, * It must also treat the omission of a previously observed device as * notification that the device no longer exists. * - * Note that this function is a work item, and it may not be - * invoked in the order that it was queued. Back to back - * updates of the list of present devices may involve queuing - * multiple work items, and this one may run before ones that - * were sent later. As such, this function only does something - * if is the last one in the queue. + * Note that this function is serialized with hv_eject_device_work(), + * because both are pushed to the ordered workqueue hbus->wq. */ static void pci_devices_present_work(struct work_struct *work) { @@ -1628,11 +1697,6 @@ static void pci_devices_present_work(struct work_struct *work) INIT_LIST_HEAD(&removed); - if (down_interruptible(&hbus->enum_sem)) { - put_hvpcibus(hbus); - return; - } - /* Pull this off the queue and process it if it was the last one. */ spin_lock_irqsave(&hbus->device_list_lock, flags); while (!list_empty(&hbus->dr_list)) { @@ -1649,7 +1713,6 @@ static void pci_devices_present_work(struct work_struct *work) spin_unlock_irqrestore(&hbus->device_list_lock, flags); if (!dr) { - up(&hbus->enum_sem); put_hvpcibus(hbus); return; } @@ -1736,7 +1799,6 @@ static void pci_devices_present_work(struct work_struct *work) break; } - up(&hbus->enum_sem); put_hvpcibus(hbus); kfree(dr); } @@ -1782,7 +1844,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus, spin_unlock_irqrestore(&hbus->device_list_lock, flags); get_hvpcibus(hbus); - schedule_work(&dr_wrk->wrk); + queue_work(hbus->wq, &dr_wrk->wrk); } /** @@ -1860,7 +1922,7 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev) get_pcichild(hpdev, hv_pcidev_ref_pnp); INIT_WORK(&hpdev->wrk, hv_eject_device_work); get_hvpcibus(hpdev->hbus); - schedule_work(&hpdev->wrk); + queue_work(hpdev->hbus->wq, &hpdev->wrk); } /** @@ -2027,15 +2089,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) sizeof(struct pci_version_request), (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + if (ret) { dev_err(&hdev->device, - "PCI Pass-through VSP failed sending version reqquest: %#x", + "PCI Pass-through VSP failed to request version: %d", ret); goto exit; } - wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status >= 0) { pci_protocol_version = pci_protocol_versions[i]; dev_info(&hdev->device, @@ -2244,11 +2307,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev) ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + if (ret) goto exit; - wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -2288,11 +2352,10 @@ static int hv_pci_query_relations(struct hv_device *hdev) ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), 0, VM_PKT_DATA_INBAND, 0); - if (ret) - return ret; + if (!ret) + ret = wait_for_response(hdev, &comp); - wait_for_completion(&comp); - return 0; + return ret; } /** @@ -2362,11 +2425,11 @@ static int hv_send_resources_allocated(struct hv_device *hdev) size_res, (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); if (ret) break; - wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status < 0) { ret = -EPROTO; dev_err(&hdev->device, @@ -2473,13 +2536,18 @@ static int hv_pci_probe(struct hv_device *hdev, spin_lock_init(&hbus->config_lock); spin_lock_init(&hbus->device_list_lock); spin_lock_init(&hbus->retarget_msi_interrupt_lock); - sema_init(&hbus->enum_sem, 1); init_completion(&hbus->remove_event); + hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, + hbus->sysdata.domain); + if (!hbus->wq) { + ret = -ENOMEM; + goto free_bus; + } ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) - goto free_bus; + goto destroy_wq; hv_set_drvdata(hdev, hbus); @@ -2548,6 +2616,8 @@ static int hv_pci_probe(struct hv_device *hdev, hv_free_config_window(hbus); close: vmbus_close(hdev->channel); +destroy_wq: + destroy_workqueue(hbus->wq); free_bus: free_page((unsigned long)hbus); return ret; @@ -2627,6 +2697,7 @@ static int hv_pci_remove(struct hv_device *hdev) irq_domain_free_fwnode(hbus->sysdata.fwnode); put_hvpcibus(hbus); wait_for_completion(&hbus->remove_event); + destroy_workqueue(hbus->wq); free_page((unsigned long)hbus); return 0; } diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 087645116ecb..c78fd9c2cf8c 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -686,7 +686,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) bus = bridge->bus; - pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index a5073a921a04..32228d41f746 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) pcie->need_ob_cfg = true; } + /* + * DT nodes are not used by all platforms that use the iProc PCIe + * core driver. For platforms that require explict inbound mapping + * configuration, "dma-ranges" would have been present in DT + */ + pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); + /* PHY use is optional */ pcie->phy = devm_phy_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) { diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 3a8b9d20ee57..c0ecc9f35667 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -1396,9 +1396,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) } } - ret = iproc_pcie_map_dma_ranges(pcie); - if (ret && ret != -ENOENT) - goto err_power_off_phy; + if (pcie->need_ib_cfg) { + ret = iproc_pcie_map_dma_ranges(pcie); + if (ret && ret != -ENOENT) + goto err_power_off_phy; + } #ifdef CONFIG_ARM pcie->sysdata.private_data = pcie; diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index a6b55cec9a66..4ac6282f2bfd 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -74,6 +74,7 @@ struct iproc_msi; * @ob: outbound mapping related parameters * @ob_map: outbound mapping related parameters specific to the controller * + * @need_ib_cfg: indicates SW needs to configure the inbound mapping window * @ib: inbound mapping related parameters * @ib_map: outbound mapping region related parameters * @@ -101,6 +102,7 @@ struct iproc_pcie { struct iproc_pcie_ob ob; const struct iproc_pcie_ob_map *ob_map; + bool need_ib_cfg; struct iproc_pcie_ib ib; const struct iproc_pcie_ib_map *ib_map; diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 4e0b25d09b0c..8f44a7d14bff 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -1141,17 +1141,19 @@ static int rcar_pcie_probe(struct platform_device *pdev) INIT_LIST_HEAD(&pcie->resources); - rcar_pcie_parse_request_of_pci_ranges(pcie); + err = rcar_pcie_parse_request_of_pci_ranges(pcie); + if (err) + goto err_free_bridge; err = rcar_pcie_get_resources(pcie); if (err < 0) { dev_err(dev, "failed to request resources: %d\n", err); - goto err_free_bridge; + goto err_free_resource_list; } err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); if (err) - goto err_free_bridge; + goto err_free_resource_list; pm_runtime_enable(dev); err = pm_runtime_get_sync(dev); @@ -1194,9 +1196,10 @@ static int rcar_pcie_probe(struct platform_device *pdev) err_pm_disable: pm_runtime_disable(dev); +err_free_resource_list: + pci_free_resource_list(&pcie->resources); err_free_bridge: pci_free_host_bridge(bridge); - pci_free_resource_list(&pcie->resources); return err; } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 5ed2dcaa8e27..711875afdd70 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -558,6 +558,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) { unsigned long long sta = 0; struct acpiphp_func *func; + u32 dvid; list_for_each_entry(func, &slot->funcs, sibling) { if (func->flags & FUNC_HAS_STA) { @@ -568,19 +569,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) if (ACPI_SUCCESS(status) && sta) break; } else { - u32 dvid; - - pci_bus_read_config_dword(slot->bus, - PCI_DEVFN(slot->device, - func->function), - PCI_VENDOR_ID, &dvid); - if (dvid != 0xffffffff) { + if (pci_bus_read_dev_vendor_id(slot->bus, + PCI_DEVFN(slot->device, func->function), + &dvid, 0)) { sta = ACPI_STA_ALL; break; } } } + if (!sta) { + /* + * Check for the slot itself since it may be that the + * ACPI slot is a device below PCIe upstream port so in + * that case it may not even be reachable yet. + */ + if (pci_bus_read_dev_vendor_id(slot->bus, + PCI_DEVFN(slot->device, 0), &dvid, 0)) { + sta = ACPI_STA_ALL; + } + } + return (unsigned int)sta; } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 06109d40c4ac..e7d6cfaf3865 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev); int pcie_init_notification(struct controller *ctrl); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); -void pcie_enable_notification(struct controller *ctrl); +void pcie_reenable_notification(struct controller *ctrl); int pciehp_power_on_slot(struct slot *slot); void pciehp_power_off_slot(struct slot *slot); void pciehp_get_power_status(struct slot *slot, u8 *status); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 35d84845d5af..1288289cc85d 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -297,7 +297,7 @@ static int pciehp_resume(struct pcie_device *dev) ctrl = get_service_data(dev); /* reinitialize the chipset's event detection logic */ - pcie_enable_notification(ctrl); + pcie_reenable_notification(ctrl); slot = ctrl->slot; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index e5d5ce9e3010..46c2ee2caf28 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -676,7 +676,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) return handled; } -void pcie_enable_notification(struct controller *ctrl) +static void pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; @@ -714,6 +714,17 @@ void pcie_enable_notification(struct controller *ctrl) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); } +void pcie_reenable_notification(struct controller *ctrl) +{ + /* + * Clear both Presence and Data Link Layer Changed to make sure + * those events still fire after we have re-enabled them. + */ + pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + pcie_enable_notification(ctrl); +} + static void pcie_disable_notification(struct controller *ctrl) { u16 mask; @@ -852,6 +863,13 @@ struct controller *pcie_init(struct pcie_device *dev) if (pdev->hotplug_user_indicators) slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP); + /* + * We assume no Thunderbolt controllers support Command Complete events, + * but some controllers falsely claim they do. + */ + if (pdev->is_thunderbolt) + slot_cap |= PCI_EXP_SLTCAP_NCCS; + ctrl->slot_cap = slot_cap; mutex_init(&ctrl->ctrl_lock); init_waitqueue_head(&ctrl->queue); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index ac41c8be9200..0fd8e164339c 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -162,7 +162,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) pci_device_add(virtfn, virtfn->bus); - pci_bus_add_device(virtfn); sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) @@ -173,6 +172,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset) kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); + pci_bus_add_device(virtfn); + return 0; failed2: diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 496ed9130600..ca3c81a5ea07 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -190,7 +190,7 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); } -static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +void __iomem *pci_msix_desc_addr(struct msi_desc *desc) { return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; @@ -294,7 +294,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) } } -void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +void native_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a8da543b3814..4708eb9df71b 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus) union acpi_object *obj; struct pci_host_bridge *bridge; - if (acpi_pci_disabled || !bus->bridge) + if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) return; acpi_pci_slot_enumerate(bus); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 11bd267fc137..bfa8cc18d92c 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -680,13 +680,6 @@ static int pci_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; - /* - * Devices having power.ignore_children set may still be necessary for - * suspending their children in the next phase of device suspend. - */ - if (dev->power.ignore_children) - pm_runtime_resume(dev); - if (drv && drv->pm && drv->pm->prepare) { int error = drv->pm->prepare(dev); if (error) @@ -805,6 +798,9 @@ static int pci_pm_suspend_noirq(struct device *dev) pci_prepare_to_sleep(pci_dev); } + dev_dbg(dev, "PCI PM: Suspend power state: %s\n", + pci_power_name(pci_dev->current_state)); + pci_pm_set_unknown_state(pci_dev); /* @@ -968,7 +964,12 @@ static int pci_pm_thaw_noirq(struct device *dev) if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); - pci_update_current_state(pci_dev, PCI_D0); + /* + * pci_restore_state() requires the device to be in D0 (because of MSI + * restoration among other things), so force it into D0 in case the + * driver's "freeze" callbacks put it into a low-power state directly. + */ + pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) @@ -1159,11 +1160,14 @@ static int pci_pm_runtime_suspend(struct device *dev) int error; /* - * If pci_dev->driver is not set (unbound), the device should - * always remain in D0 regardless of the runtime PM status + * If pci_dev->driver is not set (unbound), we leave the device in D0, + * but it may go to D3cold when the bridge above it runtime suspends. + * Save its config space in case that happens. */ - if (!pci_dev->driver) + if (!pci_dev->driver) { + pci_save_state(pci_dev); return 0; + } if (!pm || !pm->runtime_suspend) return -ENOSYS; @@ -1211,16 +1215,18 @@ static int pci_pm_runtime_resume(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* - * If pci_dev->driver is not set (unbound), the device should - * always remain in D0 regardless of the runtime PM status + * Restoring config space is necessary even if the device is not bound + * to a driver because although we left it in D0, it may have gone to + * D3cold when the bridge above it runtime suspended. */ + pci_restore_standard_config(pci_dev); + if (!pci_dev->driver) return 0; if (!pm || !pm->runtime_resume) return -ENOSYS; - pci_restore_standard_config(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 00fa4278c1f4..12016e17e11b 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -305,13 +305,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (!val) { - if (pci_is_enabled(pdev)) - pci_disable_device(pdev); - else - result = -EIO; - } else + device_lock(dev); + if (dev->driver) + result = -EBUSY; + else if (val) result = pci_enable_device(pdev); + else if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + else + result = -EIO; + device_unlock(dev); return result < 0 ? result : count; } @@ -723,7 +726,7 @@ static ssize_t driver_override_show(struct device *dev, ssize_t len; device_lock(dev); - len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); + len = sprintf(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6078dfc11b11..62a0677b32f1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1892,7 +1892,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) EXPORT_SYMBOL(pci_pme_active); /** - * pci_enable_wake - enable PCI device as wakeup event source + * __pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events * @enable: True to enable event generation; false to disable @@ -1910,7 +1910,7 @@ EXPORT_SYMBOL(pci_pme_active); * Error code depending on the platform is returned if both the platform and * the native mechanism fail to enable the generation of wake-up events */ -int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) +static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; @@ -1951,6 +1951,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) return ret; } + +/** + * pci_enable_wake - change wakeup settings for a PCI device + * @pci_dev: Target device + * @state: PCI state from which device will issue wakeup events + * @enable: Whether or not to enable event generation + * + * If @enable is set, check device_may_wakeup() for the device before calling + * __pci_enable_wake() for it. + */ +int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) +{ + if (enable && !device_may_wakeup(&pci_dev->dev)) + return -EINVAL; + + return __pci_enable_wake(pci_dev, state, enable); +} EXPORT_SYMBOL(pci_enable_wake); /** @@ -1963,9 +1980,9 @@ EXPORT_SYMBOL(pci_enable_wake); * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI * ordering constraints. * - * This function only returns error code if the device is not capable of - * generating PME# from both D3_hot and D3_cold, and the platform is unable to - * enable wake-up power for it. + * This function only returns error code if the device is not allowed to wake + * up the system from sleep or it is not capable of generating PME# from both + * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. */ int pci_wake_from_d3(struct pci_dev *dev, bool enable) { @@ -2096,7 +2113,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) dev->runtime_d3cold = target_state == PCI_D3cold; - pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); + __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); @@ -2120,16 +2137,16 @@ bool pci_dev_run_wake(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; - if (device_can_wakeup(&dev->dev)) - return true; - if (!dev->pme_support) return false; /* PME-capable in principle, but not from the target power state */ - if (!pci_pme_capable(dev, pci_target_state(dev, false))) + if (!pci_pme_capable(dev, pci_target_state(dev, true))) return false; + if (device_can_wakeup(&dev->dev)) + return true; + while (bus->parent) { struct pci_dev *bridge = bus->self; @@ -4356,6 +4373,10 @@ static bool pci_bus_resetable(struct pci_bus *bus) { struct pci_dev *dev; + + if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) + return false; + list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resetable(dev->subordinate))) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fdb02c1f94bb..04cdbe55cbe5 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -180,6 +180,8 @@ static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); } +void __iomem *pci_msix_desc_addr(struct msi_desc *desc); + void pci_realloc_get_opt(char *); static inline int pci_no_d1d2(struct pci_dev *dev) diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 890efcc574cb..744805232155 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev, * If the error is reported by an end point, we think this * error is related to the upstream link of the end point. */ - pci_walk_bus(dev->bus, cb, &result_data); + if (state == pci_channel_io_normal) + /* + * the error is non fatal so the bus is ok, just invoke + * the callback for the function that logged the error. + */ + cb(dev, &result_data); + else + pci_walk_bus(dev->bus, cb, &result_data); } return result_data.result; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 83e4a892b14b..bcb96af284a1 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -453,7 +453,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, /* Choose the greater of the two T_cmn_mode_rstr_time */ val1 = (upreg->l1ss_cap >> 8) & 0xFF; - val2 = (upreg->l1ss_cap >> 8) & 0xFF; + val2 = (dwreg->l1ss_cap >> 8) & 0xFF; if (val1 > val2) link->l1ss.ctl1 |= val1 << 8; else @@ -658,7 +658,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) 0xFF00, link->l1ss.ctl1); /* Program LTR L1.2 threshold in both ports */ - pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1, + pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, 0xE3FF0000, link->l1ss.ctl1); pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, 0xE3FF0000, link->l1ss.ctl1); @@ -803,10 +803,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) /* * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe - * hierarchies. + * hierarchies. Note that some PCIe host implementations omit + * the root ports entirely, in which case a downstream port on + * a switch may become the root of the link state chain for all + * its subordinate endpoints. */ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) { + pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || + !pdev->bus->parent->self) { link->root = link; } else { struct pcie_link_state *parent; @@ -1061,7 +1065,8 @@ void pci_disable_link_state(struct pci_dev *pdev, int state) } EXPORT_SYMBOL(pci_disable_link_state); -static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) +static int pcie_aspm_set_policy(const char *val, + const struct kernel_param *kp) { int i; struct pcie_link_state *link; @@ -1088,7 +1093,7 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) return 0; } -static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp) +static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) { int i, cnt = 0; for (i = 0; i < ARRAY_SIZE(policy_str); i++) diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index fafdb165dd2e..df290aa58dce 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -226,6 +226,9 @@ static void pcie_pme_work_fn(struct work_struct *work) break; pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); + if (rtsta == (u32) ~0) + break; + if (rtsta & PCI_EXP_RTSTA_PME) { /* * Clear PME status of the port. If there are other @@ -273,7 +276,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) spin_lock_irqsave(&data->lock, flags); pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); - if (!(rtsta & PCI_EXP_RTSTA_PME)) { + if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) { spin_unlock_irqrestore(&data->lock, flags); return IRQ_NONE; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ff94b69738a8..7761d4011a34 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -18,7 +18,6 @@ #include #include #include "pci.h" - #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 @@ -41,6 +40,70 @@ struct pci_domain_busn_res { int domain_nr; }; +#define PCI_IGNORE_MAX 8 + +static u16 devices_ignore_table[PCI_IGNORE_MAX]; +static int devices_ignore_cnt; + +static void parse_ignore_device(char *bdf_str) +{ + int fields; + unsigned int bus; + unsigned int dev; + unsigned int func; + + if (devices_ignore_cnt >= PCI_IGNORE_MAX - 1) + return; + + fields = sscanf(bdf_str, "%x:%x:%x", &bus, &dev, &func); + if (fields != 3) + return; + + devices_ignore_table[devices_ignore_cnt++] = + PCI_DEVID(bus, PCI_DEVFN(dev, func)); +} + +static int __init pci_deivces_ignore(char *str) +{ + int len; + char *start, *end; + char bdf[16]; + + devices_ignore_cnt = 0; + + while ((start = strchr(str, '('))) { + + end = strchr(start, ')'); + if (end == NULL) + break; + + len = end - start - 1; + if (len >= 16) /*invalid string*/ + break; + + memcpy((void *)bdf, (void *)(start+1), len); + bdf[len] = '\0'; + parse_ignore_device(bdf); + str = end + 1; + } + + return 1; +} +__setup("pci_devices_ignore=", pci_deivces_ignore); + +static bool device_on_ignore_list(int bus, int dev, int func) +{ + int i; + + for (i = 0; i < devices_ignore_cnt; i++) + if ((PCI_BUS_NUM(devices_ignore_table[i]) == bus) && + (PCI_SLOT(devices_ignore_table[i]) == dev) && + (PCI_FUNC(devices_ignore_table[i]) == func)) + return true; + + return false; +} + static struct resource *get_pci_domain_busn_res(int domain_nr) { struct pci_domain_busn_res *r; @@ -516,12 +579,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev) if (bridge->release_fn) bridge->release_fn(bridge); + + pci_free_resource_list(&bridge->windows); } static void pci_release_host_bridge_dev(struct device *dev) { devm_pci_release_host_bridge_dev(dev); - pci_free_host_bridge(to_pci_host_bridge(dev)); + kfree(to_pci_host_bridge(dev)); } struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) @@ -1076,7 +1141,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) child = pci_add_new_bus(bus, dev, max+1); if (!child) goto out; - pci_bus_insert_busn_res(child, max+1, 0xff); + pci_bus_insert_busn_res(child, max+1, + bus->busn_res.end); } max++; buses = (buses & 0xff000000) @@ -2133,6 +2199,11 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) return dev; } + if (device_on_ignore_list(bus->number, + PCI_SLOT(devfn), + PCI_FUNC(devfn))) + return NULL; + dev = pci_scan_device(bus, devfn); if (!dev) return NULL; @@ -2433,6 +2504,10 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) { if (max - bus->busn_res.start < pci_hotplug_bus_size - 1) max = bus->busn_res.start + pci_hotplug_bus_size - 1; + + /* Do not allocate more buses than we have room left */ + if (max > bus->busn_res.end) + max = bus->busn_res.end; } /* diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 911b3b65c8b2..ec2911c4ee42 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1636,8 +1636,8 @@ static void quirk_pcie_mch(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, quirk_pcie_mch); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); /* * It's possible for the MSI to get corrupted if shpc and acpi @@ -3412,22 +3412,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { - pci_set_vpd_size(dev, 8192); + int chip = (dev->device & 0xf000) >> 12; + int func = (dev->device & 0x0f00) >> 8; + int prod = (dev->device & 0x00ff) >> 0; + + /* + * If this is a T3-based adapter, there's a 1KB VPD area at offset + * 0xc00 which contains the preferred VPD values. If this is a T4 or + * later based adapter, the special VPD is at offset 0x400 for the + * Physical Functions (the SR-IOV Virtual Functions have no VPD + * Capabilities). The PCI VPD Access core routines will normally + * compute the size of the VPD by parsing the VPD Data Structure at + * offset 0x000. This will result in silent failures when attempting + * to accesses these other VPD areas which are beyond those computed + * limits. + */ + if (chip == 0x0 && prod >= 0x20) + pci_set_vpd_size(dev, 8192); + else if (chip >= 0x4 && func < 0x8) + pci_set_vpd_size(dev, 2048); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, + quirk_chelsio_extend_vpd); #ifdef CONFIG_ACPI /* @@ -3872,6 +3879,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); @@ -3887,11 +3896,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, + quirk_dma_func1_alias); /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB388_ESD, @@ -4212,17 +4226,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) #endif } +static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) +{ + /* + * Effectively selects all downstream ports for whole ThunderX 1 + * family by 0xf800 mask (which represents 8 SoCs), while the lower + * bits of device ID are used to indicate which subdevice is used + * within the SoC. + */ + return (pci_is_pcie(dev) && + (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) && + ((dev->device & 0xf800) == 0xa000)); +} + static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { /* - * Cavium devices matching this quirk do not perform peer-to-peer - * with other functions, allowing masking out these bits as if they - * were unimplemented in the ACS capability. + * Cavium root ports don't advertise an ACS capability. However, + * the RTL internally implements similar protection as if ACS had + * Request Redirection, Completion Redirection, Source Validation, + * and Upstream Forwarding features enabled. Assert that the + * hardware implements and enables equivalent ACS functionality for + * these flags. */ - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); + acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); - if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) + if (!pci_quirk_cavium_acs_match(dev)) return -ENOTTY; return acs_flags ? 0 : 1; @@ -4348,11 +4377,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) * 0xa290-0xa29f PCI Express Root port #{0-16} * 0xa2e7-0xa2ee PCI Express Root port #{17-24} * + * Mobile chipsets are also affected, 7th & 8th Generation + * Specification update confirms ACS errata 22, status no fix: (7th Generation + * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel + * Processor Family I/O for U Quad Core Platforms Specification Update, + * August 2017, Revision 002, Document#: 334660-002)[6] + * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O + * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U + * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7] + * + * 0x9d10-0x9d1b PCI Express Root port #{1-12} + * + * The 300 series chipset suffers from the same bug so include those root + * ports here as well. + * + * 0xa32c-0xa343 PCI Express Root port #{0-24} + * * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html + * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html + * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html */ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) { @@ -4362,6 +4409,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) switch (dev->device) { case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ + case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ + case 0xa32c ... 0xa343: /* 300 series */ return true; } @@ -4782,9 +4831,13 @@ static void quirk_no_ext_tags(struct pci_dev *pdev) pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); } +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 73a03d382590..2fa0dbde36b7 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev) pci_pme_active(dev, false); if (dev->is_added) { + device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); - device_release_driver(&dev->dev); dev->is_added = 0; } diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index c3b615c94b4b..8c8caec3a72c 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -452,17 +452,20 @@ static int socket_insert(struct pcmcia_socket *skt) static int socket_suspend(struct pcmcia_socket *skt) { - if (skt->state & SOCKET_SUSPEND) + if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME)) return -EBUSY; mutex_lock(&skt->ops_mutex); - skt->suspended_state = skt->state; + /* store state on first suspend, but not after spurious wakeups */ + if (!(skt->state & SOCKET_IN_RESUME)) + skt->suspended_state = skt->state; skt->socket = dead_socket; skt->ops->set_socket(skt, &skt->socket); if (skt->ops->suspend) skt->ops->suspend(skt); skt->state |= SOCKET_SUSPEND; + skt->state &= ~SOCKET_IN_RESUME; mutex_unlock(&skt->ops_mutex); return 0; } @@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt) skt->ops->set_socket(skt, &skt->socket); if (skt->state & SOCKET_PRESENT) skt->resume_status = socket_setup(skt, resume_delay); + skt->state |= SOCKET_IN_RESUME; mutex_unlock(&skt->ops_mutex); return 0; } @@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt) int ret = 0; mutex_lock(&skt->ops_mutex); - skt->state &= ~SOCKET_SUSPEND; + skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME); mutex_unlock(&skt->ops_mutex); if (!(skt->state & SOCKET_PRESENT)) { diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h index e86cd6b31773..384629ce48f5 100644 --- a/drivers/pcmcia/cs_internal.h +++ b/drivers/pcmcia/cs_internal.h @@ -70,6 +70,7 @@ struct pccard_resource_ops { /* Flags in socket state */ #define SOCKET_PRESENT 0x0008 #define SOCKET_INUSE 0x0010 +#define SOCKET_IN_RESUME 0x0040 #define SOCKET_SUSPEND 0x0080 #define SOCKET_WIN_REQ(i) (0x0100<<(i)) #define SOCKET_CARDBUS 0x8000 diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 1161e11fb3cf..afedb8cd1990 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -410,11 +410,13 @@ static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data) return true; /* - * The A31 companion pmic (axp221) does not generate vbus change - * interrupts when the board is driving vbus, so we must poll + * The A31/A23/A33 companion pmics (AXP221/AXP223) do not + * generate vbus change interrupts when the board is driving + * vbus using the N_VBUSEN pin on the pmic, so we must poll * when using the pmic for vbus-det _and_ we're driving vbus. */ - if (data->cfg->type == sun6i_a31_phy && + if ((data->cfg->type == sun6i_a31_phy || + data->cfg->type == sun8i_a33_phy) && data->vbus_power_supply && data->phys[0].regulator_on) return true; @@ -885,7 +887,7 @@ static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = { static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = { .num_phys = 2, - .type = sun4i_a10_phy, + .type = sun6i_a31_phy, .disc_thresh = 3, .phyctl_offset = REG_PHYCTL_A10, .dedicated_clocks = true, diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 721a2a1c97ef..a63bba12aee4 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -438,9 +438,9 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy, u32 index = instance->index; u32 tmp; - /* switch to USB function. (system register, force ip into usb mode) */ + /* switch to USB function, and enable usb pll */ tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~P2C_FORCE_UART_EN; + tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM); tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0); writel(tmp, com + U3P_U2PHYDTM0); @@ -500,10 +500,8 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy, u32 index = instance->index; u32 tmp; - /* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */ tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL); - tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK); + tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK); writel(tmp, com + U3P_U2PHYDTM0); /* OTG Enable */ @@ -538,7 +536,6 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, tmp = readl(com + U3P_U2PHYDTM0); tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN); - tmp |= P2C_FORCE_SUSPENDM; writel(tmp, com + U3P_U2PHYDTM0); /* OTG Disable */ @@ -546,18 +543,16 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy, tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN; writel(tmp, com + U3P_USBPHYACR6); - /* let suspendm=0, set utmi into analog power down */ - tmp = readl(com + U3P_U2PHYDTM0); - tmp &= ~P2C_RG_SUSPENDM; - writel(tmp, com + U3P_U2PHYDTM0); - udelay(1); - tmp = readl(com + U3P_U2PHYDTM1); tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID); tmp |= P2C_RG_SESSEND; writel(tmp, com + U3P_U2PHYDTM1); if (tphy->pdata->avoid_rx_sen_degradation && index) { + tmp = readl(com + U3P_U2PHYDTM0); + tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM); + writel(tmp, com + U3P_U2PHYDTM0); + tmp = readl(com + U3D_U2PHYDCR0); tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON; writel(tmp, com + U3D_U2PHYDCR0); diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index accaaaccb662..6601ad0dfb3a 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev, int irq, error; irq = platform_get_irq_byname(pdev, name); - if (!irq) + if (irq < 0) return -ENODEV; error = devm_request_threaded_irq(ddata->dev, irq, NULL, diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index a268f4d6f3e9..48a365e303e5 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index) if (ret) return ERR_PTR(-ENODEV); + /* This phy type handled by the usb-phy subsystem for now */ + if (of_device_is_compatible(args.np, "usb-nop-xceiv")) + return ERR_PTR(-ENODEV); + mutex_lock(&phy_provider_mutex); phy_provider = of_phy_provider_lookup(args.np); if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index e17f0351ccc2..2526971f9929 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -751,8 +751,6 @@ static int qcom_qmp_phy_poweroff(struct phy *phy) struct qmp_phy *qphy = phy_get_drvdata(phy); struct qcom_qmp *qmp = qphy->qmp; - clk_disable_unprepare(qphy->pipe_clk); - regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs); return 0; @@ -936,6 +934,8 @@ static int qcom_qmp_phy_exit(struct phy *phy) const struct qmp_phy_cfg *cfg = qmp->cfg; int i = cfg->num_clks; + clk_disable_unprepare(qphy->pipe_clk); + /* PHY reset */ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index 6c575244c0fb..af9b7005a2ba 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -178,6 +178,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy) struct device *dev = &qphy->phy->dev; u8 *val; + /* efuse register is optional */ + if (!qphy->cell) + return; + /* * Read efuse register having TUNE2 parameter's high nibble. * If efuse register shows value as 0x0, or if we fail to find diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c index 43865ef340e2..3afba145f2e6 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs.c @@ -689,3 +689,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy) return 0; } EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); + +MODULE_AUTHOR("Yaniv Gardi "); +MODULE_AUTHOR("Vivek Gautam "); +MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c index f1b24f18e9b2..b0d10934413f 100644 --- a/drivers/phy/rockchip/phy-rockchip-emmc.c +++ b/drivers/phy/rockchip/phy-rockchip-emmc.c @@ -76,6 +76,10 @@ #define PHYCTRL_OTAPDLYSEL_MASK 0xf #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7 +#define PHYCTRL_IS_CALDONE(x) \ + ((((x) >> PHYCTRL_CALDONE_SHIFT) & \ + PHYCTRL_CALDONE_MASK) == PHYCTRL_CALDONE_DONE) + struct rockchip_emmc_phy { unsigned int reg_offset; struct regmap *reg_base; @@ -90,6 +94,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) unsigned int freqsel = PHYCTRL_FREQSEL_200M; unsigned long rate; unsigned long timeout; + int ret; /* * Keep phyctrl_pdb and phyctrl_endll low to allow @@ -160,17 +165,19 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) PHYCTRL_PDB_SHIFT)); /* - * According to the user manual, it asks driver to - * wait 5us for calpad busy trimming + * According to the user manual, it asks driver to wait 5us for + * calpad busy trimming. However it is documented that this value is + * PVT(A.K.A process,voltage and temperature) relevant, so some + * failure cases are found which indicates we should be more tolerant + * to calpad busy trimming. */ - udelay(5); - regmap_read(rk_phy->reg_base, - rk_phy->reg_offset + GRF_EMMCPHY_STATUS, - &caldone); - caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK; - if (caldone != PHYCTRL_CALDONE_DONE) { - pr_err("rockchip_emmc_phy_power: caldone timeout.\n"); - return -ETIMEDOUT; + ret = regmap_read_poll_timeout(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_STATUS, + caldone, PHYCTRL_IS_CALDONE(caldone), + 0, 50); + if (ret) { + pr_err("%s: caldone failed, ret=%d\n", __func__, ret); + return ret; } /* Set the frequency of the DLL operation */ diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 4307bf0013e1..63e916d4d069 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match); static struct device_node * tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *pads, *np; + + pads = of_get_child_by_name(padctl->dev->of_node, "pads"); + if (!pads) + return NULL; - np = of_find_node_by_name(np, "pads"); - if (np) - np = of_find_node_by_name(np, name); + np = of_get_child_by_name(pads, name); + of_node_put(pads); return np; } @@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) static struct device_node * tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(pad->dev.of_node); + struct device_node *np, *lanes; - np = of_find_node_by_name(np, "lanes"); - if (!np) + lanes = of_get_child_by_name(pad->dev.of_node, "lanes"); + if (!lanes) return NULL; - return of_find_node_by_name(np, pad->soc->lanes[index].name); + np = of_get_child_by_name(lanes, pad->soc->lanes[index].name); + of_node_put(lanes); + + return np; } static int @@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad, unsigned int i; int err; - children = of_find_node_by_name(pad->dev.of_node, "lanes"); + children = of_get_child_by_name(pad->dev.of_node, "lanes"); if (!children) return -ENODEV; @@ -444,21 +444,21 @@ static struct device_node * tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *ports, *np; + char *name; - np = of_find_node_by_name(np, "ports"); - if (np) { - char *name; + ports = of_get_child_by_name(padctl->dev->of_node, "ports"); + if (!ports) + return NULL; - name = kasprintf(GFP_KERNEL, "%s-%u", type, index); - if (!name) - return ERR_PTR(-ENOMEM); - np = of_find_node_by_name(np, name); - kfree(name); + name = kasprintf(GFP_KERNEL, "%s-%u", type, index); + if (!name) { + of_node_put(ports); + return ERR_PTR(-ENOMEM); } + np = of_get_child_by_name(ports, name); + kfree(name); + of_node_put(ports); return np; } @@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl) static int tegra_xusb_padctl_probe(struct platform_device *pdev) { - struct device_node *np = of_node_get(pdev->dev.of_node); + struct device_node *np = pdev->dev.of_node; const struct tegra_xusb_padctl_soc *soc; struct tegra_xusb_padctl *padctl; const struct of_device_id *match; @@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) int err; /* for backwards compatibility with old device trees */ - np = of_find_node_by_name(np, "pads"); + np = of_get_child_by_name(np, "pads"); if (!np) { dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n"); return tegra_xusb_padctl_legacy_probe(pdev); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 82cd8b08d71f..a73c794bed03 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -33,7 +33,8 @@ config DEBUG_PINCTRL config PINCTRL_ADI2 bool "ADI pin controller driver" - depends on BLACKFIN + depends on (BF54x || BF60x) + depends on !GPIO_ADI select PINMUX select IRQ_DOMAIN help diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 56fbe4c3e800..c55517312485 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1189,19 +1189,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p, EXPORT_SYMBOL_GPL(pinctrl_lookup_state); /** - * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * pinctrl_commit_state() - select/activate/program a pinctrl state to HW * @p: the pinctrl handle for the device that requests configuration * @state: the state handle to select/activate/program */ -int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) { struct pinctrl_setting *setting, *setting2; struct pinctrl_state *old_state = p->state; int ret; - if (p->state == state) - return 0; - if (p->state) { /* * For each pinmux setting in the old state, forget SW's record @@ -1265,6 +1262,19 @@ int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) return ret; } + +/** + * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * @p: the pinctrl handle for the device that requests configuration + * @state: the state handle to select/activate/program + */ +int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +{ + if (p->state == state) + return 0; + + return pinctrl_commit_state(p, state); +} EXPORT_SYMBOL_GPL(pinctrl_select_state); static void devm_pinctrl_release(struct device *dev, void *res) @@ -1430,7 +1440,7 @@ void pinctrl_unregister_map(const struct pinctrl_map *map) int pinctrl_force_sleep(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep)) - return pinctrl_select_state(pctldev->p, pctldev->hog_sleep); + return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_sleep); @@ -1442,7 +1452,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep); int pinctrl_force_default(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default)) - return pinctrl_select_state(pctldev->p, pctldev->hog_default); + return pinctrl_commit_state(pctldev->p, pctldev->hog_default); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_default); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 1ff6c3573493..c4aa411f5935 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) } static int dt_to_map_one_config(struct pinctrl *p, - struct pinctrl_dev *pctldev, + struct pinctrl_dev *hog_pctldev, const char *statename, struct device_node *np_config) { + struct pinctrl_dev *pctldev = NULL; struct device_node *np_pctldev; const struct pinctrl_ops *ops; int ret; @@ -122,8 +123,12 @@ static int dt_to_map_one_config(struct pinctrl *p, /* OK let's just assume this will appear later then */ return -EPROBE_DEFER; } - if (!pctldev) - pctldev = get_pinctrl_dev_from_of_node(np_pctldev); + /* If we're creating a hog we can use the passed pctldev */ + if (hog_pctldev && (np_pctldev == p->dev->of_node)) { + pctldev = hog_pctldev; + break; + } + pctldev = get_pinctrl_dev_from_of_node(np_pctldev); if (pctldev) break; /* Do not defer probing of hogs (circular loop) */ diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 0f3a02495aeb..beeb7cbb5015 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -46,6 +46,9 @@ #define BYT_TRIG_POS BIT(25) #define BYT_TRIG_LVL BIT(24) #define BYT_DEBOUNCE_EN BIT(20) +#define BYT_GLITCH_FILTER_EN BIT(19) +#define BYT_GLITCH_F_SLOW_CLK BIT(17) +#define BYT_GLITCH_F_FAST_CLK BIT(16) #define BYT_PULL_STR_SHIFT 9 #define BYT_PULL_STR_MASK (3 << BYT_PULL_STR_SHIFT) #define BYT_PULL_STR_2K (0 << BYT_PULL_STR_SHIFT) @@ -1579,6 +1582,9 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) */ value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + /* Enable glitch filtering */ + value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK | + BYT_GLITCH_F_FAST_CLK; writel(value, reg); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index fadbca907c7c..28d0a145efdb 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1620,6 +1620,38 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) clear_bit(i, chip->irq_valid_mask); } + /* + * The same set of machines in chv_no_valid_mask[] have incorrectly + * configured GPIOs that generate spurious interrupts so we use + * this same list to apply another quirk for them. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. + */ + if (!need_valid_mask) { + /* + * Mask all interrupts the community is able to generate + * but leave the ones that can only generate GPEs unmasked. + */ + chv_writel(GENMASK(31, pctrl->community->nirqs), + pctrl->regs + CHV_INTMASK); + } + + /* + * The same set of machines in chv_no_valid_mask[] have incorrectly + * configured GPIOs that generate spurious interrupts so we use + * this same list to apply another quirk for them. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. + */ + if (!need_valid_mask) { + /* + * Mask all interrupts the community is able to generate + * but leave the ones that can only generate GPEs unmasked. + */ + chv_writel(GENMASK(31, pctrl->community->nirqs), + pctrl->regs + CHV_INTMASK); + } + /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c index 4500880240f2..6572550cfe78 100644 --- a/drivers/pinctrl/intel/pinctrl-denverton.c +++ b/drivers/pinctrl/intel/pinctrl-denverton.c @@ -207,7 +207,7 @@ static const unsigned int dnv_uart0_pins[] = { 60, 61, 64, 65 }; static const unsigned int dnv_uart0_modes[] = { 2, 3, 1, 1 }; static const unsigned int dnv_uart1_pins[] = { 94, 95, 96, 97 }; static const unsigned int dnv_uart2_pins[] = { 60, 61, 62, 63 }; -static const unsigned int dnv_uart2_modes[] = { 1, 1, 2, 2 }; +static const unsigned int dnv_uart2_modes[] = { 1, 2, 2, 2 }; static const unsigned int dnv_emmc_pins[] = { 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, }; diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 71df0f70b61f..3761fd29100f 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -30,8 +30,6 @@ #define PADBAR 0x00c #define GPI_IS 0x100 -#define GPI_GPE_STS 0x140 -#define GPI_GPE_EN 0x160 #define PADOWN_BITS 4 #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) @@ -818,7 +816,7 @@ static void intel_gpio_irq_ack(struct irq_data *d) community = intel_get_community(pctrl, pin); if (community) { const struct intel_padgroup *padgrp; - unsigned gpp, gpp_offset; + unsigned gpp, gpp_offset, is_offset; padgrp = intel_community_get_padgroup(community, pin); if (!padgrp) @@ -826,9 +824,10 @@ static void intel_gpio_irq_ack(struct irq_data *d) gpp = padgrp->reg_num; gpp_offset = padgroup_offset(padgrp, pin); + is_offset = community->is_offset + gpp * 4; raw_spin_lock(&pctrl->lock); - writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); + writel(BIT(gpp_offset), community->regs + is_offset); raw_spin_unlock(&pctrl->lock); } } @@ -843,7 +842,7 @@ static void intel_gpio_irq_enable(struct irq_data *d) community = intel_get_community(pctrl, pin); if (community) { const struct intel_padgroup *padgrp; - unsigned gpp, gpp_offset; + unsigned gpp, gpp_offset, is_offset; unsigned long flags; u32 value; @@ -853,10 +852,11 @@ static void intel_gpio_irq_enable(struct irq_data *d) gpp = padgrp->reg_num; gpp_offset = padgroup_offset(padgrp, pin); + is_offset = community->is_offset + gpp * 4; raw_spin_lock_irqsave(&pctrl->lock, flags); /* Clear interrupt status first to avoid unexpected interrupt */ - writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); + writel(BIT(gpp_offset), community->regs + is_offset); value = readl(community->regs + community->ie_offset + gpp * 4); value |= BIT(gpp_offset); @@ -991,7 +991,8 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, const struct intel_padgroup *padgrp = &community->gpps[gpp]; unsigned long pending, enabled, gpp_offset; - pending = readl(community->regs + GPI_IS + padgrp->reg_num * 4); + pending = readl(community->regs + community->is_offset + + padgrp->reg_num * 4); enabled = readl(community->regs + community->ie_offset + padgrp->reg_num * 4); @@ -1241,6 +1242,9 @@ int intel_pinctrl_probe(struct platform_device *pdev, community->regs = regs; community->pad_regs = regs + padbar; + if (!community->is_offset) + community->is_offset = GPI_IS; + ret = intel_pinctrl_add_padgroups(pctrl, community); if (ret) return ret; @@ -1356,7 +1360,7 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) for (gpp = 0; gpp < community->ngpps; gpp++) { /* Mask and clear all interrupts */ writel(0, base + community->ie_offset + gpp * 4); - writel(0xffff, base + GPI_IS + gpp * 4); + writel(0xffff, base + community->is_offset + gpp * 4); } } } diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 7fdb07753c2d..13b0bd6eb2a2 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -73,6 +73,8 @@ struct intel_padgroup { * @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it * is assumed that the host owns the pin (rather than * ACPI). + * @is_offset: Register offset of GPI_IS from @regs. If %0 then uses the + * default (%0x100). * @ie_offset: Register offset of GPI_IE from @regs. * @pin_base: Starting pin of pins in this community * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK, @@ -98,6 +100,7 @@ struct intel_community { unsigned padown_offset; unsigned padcfglock_offset; unsigned hostown_offset; + unsigned is_offset; unsigned ie_offset; unsigned pin_base; unsigned gpp_size; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 71b944748304..c5fe7d4a9065 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -408,12 +408,21 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, { struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = OUTPUT_EN; - unsigned int mask; + unsigned int mask, val, ret; armada_37xx_update_reg(®, offset); mask = BIT(offset); - return regmap_update_bits(info->regmap, reg, mask, mask); + ret = regmap_update_bits(info->regmap, reg, mask, mask); + + if (ret) + return ret; + + reg = OUTPUT_VAL; + val = value ? mask : 0; + regmap_update_bits(info->regmap, reg, mask, val); + + return 0; } static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index b1ca838dd80a..e61e2f8c91ce 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -576,8 +576,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, for_each_child_of_node(np_config, np) { ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); - if (ret < 0) + if (ret < 0) { + of_node_put(np); break; + } } } diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 9c950bbf07ba..db9cca4a83ff 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -779,6 +779,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, { int status, ret; bool mirror = false; + struct regmap_config *one_regmap_config = NULL; mutex_init(&mcp->lock); @@ -799,22 +800,36 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, switch (type) { #ifdef CONFIG_SPI_MASTER case MCP_TYPE_S08: - mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x08_regmap); - mcp->reg_shift = 0; - mcp->chip.ngpio = 8; - mcp->chip.label = "mcp23s08"; - break; - case MCP_TYPE_S17: + switch (type) { + case MCP_TYPE_S08: + one_regmap_config = + devm_kmemdup(dev, &mcp23x08_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + mcp->reg_shift = 0; + mcp->chip.ngpio = 8; + mcp->chip.label = "mcp23s08"; + break; + case MCP_TYPE_S17: + one_regmap_config = + devm_kmemdup(dev, &mcp23x17_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + mcp->reg_shift = 1; + mcp->chip.ngpio = 16; + mcp->chip.label = "mcp23s17"; + break; + } + if (!one_regmap_config) + return -ENOMEM; + + one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", (addr & ~0x40) >> 1); mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x17_regmap); - mcp->reg_shift = 1; - mcp->chip.ngpio = 16; - mcp->chip.label = "mcp23s17"; + one_regmap_config); break; case MCP_TYPE_S18: + if (!one_regmap_config) + return -ENOMEM; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, &mcp23x17_regmap); mcp->reg_shift = 1; @@ -891,16 +906,16 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, goto fail; } - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); - if (ret < 0) - goto fail; - if (mcp->irq && mcp->irq_controller) { ret = mcp23s08_irq_setup(mcp); if (ret) goto fail; } + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); + if (ret < 0) + goto fail; + mcp->pinctrl_desc.name = "mcp23xxx-pinctrl"; mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops; mcp->pinctrl_desc.confops = &mcp_pinconf_ops; diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index b5cb7858ffdc..a9bc1e01f982 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -1989,8 +1989,16 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { struct rockchip_pin_bank *bank = gpiochip_get_data(chip); u32 data; + int ret; + ret = clk_enable(bank->clk); + if (ret < 0) { + dev_err(bank->drvdata->dev, + "failed to enable clock for bank %s\n", bank->name); + return ret; + } data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); + clk_disable(bank->clk); return !(data & BIT(offset)); } diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 7450f5118445..70a0228f4e7f 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1144,6 +1144,27 @@ static int sx150x_probe(struct i2c_client *client, if (ret) return ret; + /* Pinctrl_desc */ + pctl->pinctrl_desc.name = "sx150x-pinctrl"; + pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; + pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; + pctl->pinctrl_desc.pins = pctl->data->pins; + pctl->pinctrl_desc.npins = pctl->data->npins; + pctl->pinctrl_desc.owner = THIS_MODULE; + + ret = devm_pinctrl_register_and_init(dev, &pctl->pinctrl_desc, + pctl, &pctl->pctldev); + if (ret) { + dev_err(dev, "Failed to register pinctrl device\n"); + return ret; + } + + ret = pinctrl_enable(pctl->pctldev); + if (ret) { + dev_err(dev, "Failed to enable pinctrl device\n"); + return ret; + } + /* Register GPIO controller */ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL); pctl->gpio.base = -1; @@ -1172,6 +1193,11 @@ static int sx150x_probe(struct i2c_client *client, if (ret) return ret; + ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev), + 0, 0, pctl->data->npins); + if (ret) + return ret; + /* Add Interrupt support if an irq is specified */ if (client->irq > 0) { pctl->irq_chip.name = devm_kstrdup(dev, client->name, @@ -1217,20 +1243,6 @@ static int sx150x_probe(struct i2c_client *client, client->irq); } - /* Pinctrl_desc */ - pctl->pinctrl_desc.name = "sx150x-pinctrl"; - pctl->pinctrl_desc.pctlops = &sx150x_pinctrl_ops; - pctl->pinctrl_desc.confops = &sx150x_pinconf_ops; - pctl->pinctrl_desc.pins = pctl->data->pins; - pctl->pinctrl_desc.npins = pctl->data->npins; - pctl->pinctrl_desc.owner = THIS_MODULE; - - pctl->pctldev = pinctrl_register(&pctl->pinctrl_desc, dev, pctl); - if (IS_ERR(pctl->pctldev)) { - dev_err(dev, "Failed to register pinctrl device\n"); - return PTR_ERR(pctl->pctldev); - } - return 0; } diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c index 866aa3ce1ac9..6cf0006d4c8d 100644 --- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c +++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c @@ -436,3 +436,7 @@ int pxa2xx_pinctrl_exit(struct platform_device *pdev) return 0; } EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit); + +MODULE_AUTHOR("Robert Jarzmik "); +MODULE_DESCRIPTION("Marvell PXA2xx pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index 071084d3ee9c..afeb4876ffb2 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -110,12 +110,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = { EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40), - EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"), EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44), EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48), EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c), EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50), EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54), + EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"), EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"), EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"), EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"), @@ -129,7 +129,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = { EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), }; -const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = s5pv210_pin_bank, @@ -142,6 +142,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = { + .ctrl = s5pv210_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s5pv210_pin_ctrl), +}; + /* Pad retention control code for accessing PMU regmap */ static atomic_t exynos_shared_retention_refcnt; @@ -204,7 +209,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes * two gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos3250_pin_banks0, @@ -225,6 +230,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = { + .ctrl = exynos3250_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos3250_pin_ctrl), +}; + /* pin banks of exynos4210 pin-controller 0 */ static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -308,7 +318,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes * three gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos4210_pin_banks0, @@ -334,6 +344,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = { + .ctrl = exynos4210_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos4210_pin_ctrl), +}; + /* pin banks of exynos4x12 pin-controller 0 */ static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -396,7 +411,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos4x12_pin_banks0, @@ -432,6 +447,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = { + .ctrl = exynos4x12_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos4x12_pin_ctrl), +}; + /* pin banks of exynos5250 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -492,7 +512,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5250_pin_banks0, @@ -528,6 +548,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = { + .ctrl = exynos5250_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5250_pin_ctrl), +}; + /* pin banks of exynos5260 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), @@ -572,7 +597,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes * three gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5260_pin_banks0, @@ -592,6 +617,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = { + .ctrl = exynos5260_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5260_pin_ctrl), +}; + /* pin banks of exynos5410 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), @@ -605,7 +635,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20), EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24), EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28), - EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"), EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c), EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30), EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34), @@ -616,6 +645,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48), EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c), EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50), + EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"), EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"), EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"), EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"), @@ -662,7 +692,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5410_pin_banks0, @@ -695,6 +725,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = { + .ctrl = exynos5410_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5410_pin_ctrl), +}; + /* pin banks of exynos5420 pin-controller 0 */ static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), @@ -779,7 +814,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes * four gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5420_pin_banks0, @@ -813,3 +848,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { .retention_data = &exynos4_audio_retention_data, }, }; + +const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = { + .ctrl = exynos5420_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5420_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index 08e9fdb58fd2..0ab88fc268ea 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -180,7 +180,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes * ten gpio/pin-mux/pinconfig controllers. */ -const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { { /* pin-controller instance 0 data */ .pin_banks = exynos5433_pin_banks0, @@ -265,6 +265,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = { + .ctrl = exynos5433_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos5433_pin_ctrl), +}; + /* pin banks of exynos7 pin-controller - ALIVE */ static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), @@ -344,7 +349,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = { EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), }; -const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { { /* pin-controller instance 0 Alive data */ .pin_banks = exynos7_pin_banks0, @@ -397,3 +402,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { .eint_gpio_init = exynos_eint_gpio_init, }, }; + +const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = { + .ctrl = exynos7_pin_ctrl, + .num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index edf27264b603..67da1cf18b68 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -570,7 +570,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = { PIN_BANK_2BIT(13, 0x080, "gpj"), }; -const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { { .pin_banks = s3c2412_pin_banks, .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), @@ -578,6 +578,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = { + .ctrl = s3c2412_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl), +}; + static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { PIN_BANK_A(27, 0x000, "gpa"), PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -592,7 +597,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { PIN_BANK_2BIT(2, 0x100, "gpm"), }; -const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { { .pin_banks = s3c2416_pin_banks, .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), @@ -600,6 +605,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = { + .ctrl = s3c2416_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl), +}; + static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { PIN_BANK_A(25, 0x000, "gpa"), PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -612,7 +622,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { PIN_BANK_2BIT(13, 0x0d0, "gpj"), }; -const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { { .pin_banks = s3c2440_pin_banks, .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), @@ -620,6 +630,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { }, }; +const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = { + .ctrl = s3c2440_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl), +}; + static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { PIN_BANK_A(28, 0x000, "gpa"), PIN_BANK_2BIT(11, 0x010, "gpb"), @@ -635,10 +650,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { PIN_BANK_2BIT(2, 0x100, "gpm"), }; -const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { { .pin_banks = s3c2450_pin_banks, .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), .eint_wkup_init = s3c24xx_eint_init, }, }; + +const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = { + .ctrl = s3c2450_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index e63663b32907..0bdc1e683181 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -794,7 +794,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = { * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes * one gpio/pin-mux/pinconfig controller. */ -const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { +static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { { /* pin-controller instance 1 data */ .pin_banks = s3c64xx_pin_banks0, @@ -803,3 +803,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { .eint_wkup_init = s3c64xx_eint_eint0_init, }, }; + +const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = { + .ctrl = s3c64xx_pin_ctrl, + .num_ctrl = ARRAY_SIZE(s3c64xx_pin_ctrl), +}; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index e04f7fe0a65d..26e8fab736f1 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -947,12 +947,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev, return 0; } +static const struct samsung_pin_ctrl * +samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct samsung_pinctrl_of_match_data *of_data; + int id; + + id = of_alias_get_id(node, "pinctrl"); + if (id < 0) { + dev_err(&pdev->dev, "failed to get alias id\n"); + return NULL; + } + + of_data = of_device_get_match_data(&pdev->dev); + if (id >= of_data->num_ctrl) { + dev_err(&pdev->dev, "invalid alias id %d\n", id); + return NULL; + } + + return &(of_data->ctrl[id]); +} + /* retrieve the soc specific data */ static const struct samsung_pin_ctrl * samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, struct platform_device *pdev) { - int id; struct device_node *node = pdev->dev.of_node; struct device_node *np; const struct samsung_pin_bank_data *bdata; @@ -962,13 +983,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; unsigned int i; - id = of_alias_get_id(node, "pinctrl"); - if (id < 0) { - dev_err(&pdev->dev, "failed to get alias id\n"); + ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev); + if (!ctrl) return ERR_PTR(-ENOENT); - } - ctrl = of_device_get_match_data(&pdev->dev); - ctrl += id; d->suspend = ctrl->suspend; d->resume = ctrl->resume; @@ -1193,41 +1210,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev) static const struct of_device_id samsung_pinctrl_dt_match[] = { #ifdef CONFIG_PINCTRL_EXYNOS_ARM { .compatible = "samsung,exynos3250-pinctrl", - .data = exynos3250_pin_ctrl }, + .data = &exynos3250_of_data }, { .compatible = "samsung,exynos4210-pinctrl", - .data = exynos4210_pin_ctrl }, + .data = &exynos4210_of_data }, { .compatible = "samsung,exynos4x12-pinctrl", - .data = exynos4x12_pin_ctrl }, + .data = &exynos4x12_of_data }, { .compatible = "samsung,exynos5250-pinctrl", - .data = exynos5250_pin_ctrl }, + .data = &exynos5250_of_data }, { .compatible = "samsung,exynos5260-pinctrl", - .data = exynos5260_pin_ctrl }, + .data = &exynos5260_of_data }, { .compatible = "samsung,exynos5410-pinctrl", - .data = exynos5410_pin_ctrl }, + .data = &exynos5410_of_data }, { .compatible = "samsung,exynos5420-pinctrl", - .data = exynos5420_pin_ctrl }, + .data = &exynos5420_of_data }, { .compatible = "samsung,s5pv210-pinctrl", - .data = s5pv210_pin_ctrl }, + .data = &s5pv210_of_data }, #endif #ifdef CONFIG_PINCTRL_EXYNOS_ARM64 { .compatible = "samsung,exynos5433-pinctrl", - .data = exynos5433_pin_ctrl }, + .data = &exynos5433_of_data }, { .compatible = "samsung,exynos7-pinctrl", - .data = exynos7_pin_ctrl }, + .data = &exynos7_of_data }, #endif #ifdef CONFIG_PINCTRL_S3C64XX { .compatible = "samsung,s3c64xx-pinctrl", - .data = s3c64xx_pin_ctrl }, + .data = &s3c64xx_of_data }, #endif #ifdef CONFIG_PINCTRL_S3C24XX { .compatible = "samsung,s3c2412-pinctrl", - .data = s3c2412_pin_ctrl }, + .data = &s3c2412_of_data }, { .compatible = "samsung,s3c2416-pinctrl", - .data = s3c2416_pin_ctrl }, + .data = &s3c2416_of_data }, { .compatible = "samsung,s3c2440-pinctrl", - .data = s3c2440_pin_ctrl }, + .data = &s3c2440_of_data }, { .compatible = "samsung,s3c2450-pinctrl", - .data = s3c2450_pin_ctrl }, + .data = &s3c2450_of_data }, #endif {}, }; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index 9af07af6cad6..ae932e0c05f2 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -285,6 +285,16 @@ struct samsung_pinctrl_drv_data { void (*resume)(struct samsung_pinctrl_drv_data *); }; +/** + * struct samsung_pinctrl_of_match_data: OF match device specific configuration data. + * @ctrl: array of pin controller data. + * @num_ctrl: size of array @ctrl. + */ +struct samsung_pinctrl_of_match_data { + const struct samsung_pin_ctrl *ctrl; + unsigned int num_ctrl; +}; + /** * struct samsung_pin_group: represent group of pins of a pinmux function. * @name: name of the pin group, used to lookup the group. @@ -313,20 +323,20 @@ struct samsung_pmx_func { }; /* list of all exported SoC specific data */ -extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; -extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; -extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; -extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; +extern const struct samsung_pinctrl_of_match_data exynos3250_of_data; +extern const struct samsung_pinctrl_of_match_data exynos4210_of_data; +extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5250_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5260_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5410_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5420_of_data; +extern const struct samsung_pinctrl_of_match_data exynos5433_of_data; +extern const struct samsung_pinctrl_of_match_data exynos7_of_data; +extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; +extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; +extern const struct samsung_pinctrl_of_match_data s5pv210_of_data; #endif /* __PINCTRL_SAMSUNG_H */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 10bd35f8c894..c01ef02d326b 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -4826,6 +4826,10 @@ static const char * const can0_groups[] = { "can0_data_d", "can0_data_e", "can0_data_f", + /* + * Retained for backwards compatibility, use can_clk_groups in new + * designs. + */ "can_clk", "can_clk_b", "can_clk_c", @@ -4837,6 +4841,21 @@ static const char * const can1_groups[] = { "can1_data_b", "can1_data_c", "can1_data_d", + /* + * Retained for backwards compatibility, use can_clk_groups in new + * designs. + */ + "can_clk", + "can_clk_b", + "can_clk_c", + "can_clk_d", +}; + +/* + * can_clk_groups allows for independent configuration, use can_clk function + * in new designs. + */ +static const char * const can_clk_groups[] = { "can_clk", "can_clk_b", "can_clk_c", @@ -5308,7 +5327,7 @@ static const char * const vin2_groups[] = { }; static const struct { - struct sh_pfc_function common[56]; + struct sh_pfc_function common[57]; struct sh_pfc_function r8a779x[2]; } pinmux_functions = { .common = { @@ -5316,6 +5335,7 @@ static const struct { SH_PFC_FUNCTION(avb), SH_PFC_FUNCTION(can0), SH_PFC_FUNCTION(can1), + SH_PFC_FUNCTION(can_clk), SH_PFC_FUNCTION(du), SH_PFC_FUNCTION(du0), SH_PFC_FUNCTION(du1), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c index 95fd0994893a..ad037534aa13 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c @@ -1397,7 +1397,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP16_27_24, AUDIO_CLKOUT_B, SEL_ADG_1), PINMUX_IPSR_MSEL(IP16_27_24, SSI_SCK2_B, SEL_SSI_1), PINMUX_IPSR_MSEL(IP16_27_24, TS_SDEN1_D, SEL_TSIF1_3), - PINMUX_IPSR_MSEL(IP16_27_24, STP_ISEN_1_D, SEL_SSP1_1_2), + PINMUX_IPSR_MSEL(IP16_27_24, STP_ISEN_1_D, SEL_SSP1_1_3), PINMUX_IPSR_MSEL(IP16_27_24, STP_OPWM_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D0_B, SEL_DRIF3_1), PINMUX_IPSR_MSEL(IP16_27_24, TCLK2_B, SEL_TIMER_TMU_1), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c index 200e1f4f6db9..711333fb2c6e 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c @@ -1,7 +1,7 @@ /* * R8A7796 processor support - PFC hardware block. * - * Copyright (C) 2016 Renesas Electronics Corp. + * Copyright (C) 2016-2017 Renesas Electronics Corp. * * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c * @@ -477,7 +477,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28 #define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1) #define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3) #define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0) -#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1) +#define MOD_SEL1_20 FM(SEL_SSI1_0) FM(SEL_SSI1_1) #define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1) #define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3) #define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1) @@ -1224,7 +1224,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_11_8, HSCK0), PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3), PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0), - PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1), PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2), @@ -1232,14 +1232,14 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_15_12, HRX0), PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI2_1), PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2), PINMUX_IPSR_GPSR(IP13_19_16, HTX0), PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI9_1), PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2), @@ -1247,7 +1247,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N), PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1), PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI9_0), PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2), @@ -1256,7 +1256,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N), PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1), PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI9_0), PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0), PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A), @@ -1271,7 +1271,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0), PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0), PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2), - PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0), PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2), PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A), PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1), @@ -1280,7 +1280,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0), PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3), PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0), - PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0), PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3), PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D), PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1), @@ -1308,10 +1308,10 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5), /* IPSR15 */ - PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI1_0), - PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0), - PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI2_0), + PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI1_1), PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349), PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0), @@ -1397,11 +1397,11 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0), PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0), - PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI9_0), PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1), PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2), PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0), - PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI1_1), PINMUX_IPSR_GPSR(IP16_31_28, SCK1), PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0), PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0), @@ -1433,7 +1433,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN), PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2), - PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI1_0), PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1), @@ -1443,7 +1443,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC), PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2), - PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI1_0), PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1), @@ -1453,7 +1453,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN), PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B), - PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI2_1), PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3), PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_3), PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4), @@ -1465,7 +1465,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC), PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B), - PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI2_1), PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3), PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3), PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4), @@ -1476,7 +1476,7 @@ static const u16 pinmux_data[] = { /* IPSR18 */ PINMUX_IPSR_GPSR(IP18_3_0, GP6_30), PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B), - PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI9_1), PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1), @@ -1486,7 +1486,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP18_7_4, GP6_31), PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B), - PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI9_1), PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c index 4f2a726bbaeb..f5f77432ce6f 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c @@ -428,7 +428,7 @@ static const struct sunxi_desc_pin a64_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */ - SUNXI_FUNCTION(0x4, "uart0")), /* RX */ + SUNXI_FUNCTION(0x3, "uart0")), /* RX */ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c index bc14e954d7a2..b7ca9a40cc66 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c @@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x3, "mcsi"), /* MCLK */ - SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PB_EINT14 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)), /* PB_EINT14 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x3, "mcsi"), /* SCK */ SUNXI_FUNCTION(0x4, "i2c4"), /* SCK */ - SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PB_EINT15 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)), /* PB_EINT15 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x3, "mcsi"), /* SDA */ SUNXI_FUNCTION(0x4, "i2c4"), /* SDA */ - SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PB_EINT16 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)), /* PB_EINT16 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 1baf720faf69..87e9747d229a 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -54,7 +54,6 @@ static int ec_response_timed_out(void) static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { - struct ec_host_request *request; struct ec_host_response response; u8 sum; int ret = 0; @@ -65,8 +64,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, /* Write buffer */ cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); - request = (struct ec_host_request *)ec->dout; - /* Here we go */ sum = EC_COMMAND_PROTOCOL_3; cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum); diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 8dfa7fcb1248..e7bbdf947bbc 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -60,12 +60,14 @@ static int send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) { int ret; + int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg); if (ec_dev->proto_version > 2) - ret = ec_dev->pkt_xfer(ec_dev, msg); + xfer_fxn = ec_dev->pkt_xfer; else - ret = ec_dev->cmd_xfer(ec_dev, msg); + xfer_fxn = ec_dev->cmd_xfer; + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; struct cros_ec_command *status_msg; @@ -88,7 +90,7 @@ static int send_command(struct cros_ec_device *ec_dev, for (i = 0; i < EC_COMMAND_RETRIES; i++) { usleep_range(10000, 11000); - ret = ec_dev->cmd_xfer(ec_dev, status_msg); + ret = (*xfer_fxn)(ec_dev, status_msg); if (ret < 0) break; diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index f3baf9973989..24f1630a8b3f 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev, count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: EC error %d\n", msg->result); else { - msg->data[sizeof(msg->data) - 1] = '\0'; + msg->data[EC_HOST_PARAM_SIZE - 1] = '\0'; count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: %s\n", msg->data); } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 80b87954f6dd..c853d1a48271 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1139,6 +1139,23 @@ config SILEAD_DMI with the OS-image for the device. This option supplies the missing information. Enable this for x86 tablets with Silead touchscreens. +config INTEL_PSTORE_PRAM + tristate "Intel pstore RAM backend driver (PRAM BIOS feature)" + depends on ACPI + depends on PSTORE_RAM + ---help--- + This driver provides RAM backend for pstore, managed by BIOS + as PRAM (Persisted RAM buffer) debug feature. + + PRAM BIOS feature is configurable through BIOS setup or PRAM_Conf + EFI variable (GUID ecb54cd9-e5ae-4fdc-a971-e877756068f7). + Accepted values for variable are 0, 1, 2 and 3 as ASCII + string; will configure PRAM feature respectively as + Disabled, 4 MB, 16 MB and 64 MB. + + Safe to say Y, will not bind if your BIOS doesn't support + this feature. + endif # X86_PLATFORM_DEVICES config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index f9e3ae683bbe..9e056fe7d6c0 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -84,3 +84,4 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_MLX_CPLD_PLATFORM) += mlxcpld-hotplug.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o +obj-$(CONFIG_INTEL_PSTORE_PRAM) += intel_pstore_pram.o diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 623d322447a2..7c4eb86c851e 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -54,7 +53,6 @@ struct apple_gmux_data { bool indexed; struct mutex index_lock; - struct pci_dev *pdev; struct backlight_device *bdev; /* switcheroo data */ @@ -599,23 +597,6 @@ static int gmux_resume(struct device *dev) return 0; } -static struct pci_dev *gmux_get_io_pdev(void) -{ - struct pci_dev *pdev = NULL; - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { - u16 cmd; - - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_IO)) - continue; - - return pdev; - } - - return NULL; -} - static int is_thunderbolt(struct device *dev, void *data) { return to_pci_dev(dev)->is_thunderbolt; @@ -631,7 +612,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) int ret = -ENXIO; acpi_status status; unsigned long long gpe; - struct pci_dev *pdev = NULL; if (apple_gmux_data) return -EBUSY; @@ -682,7 +662,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) ver_minor = (version >> 16) & 0xff; ver_release = (version >> 8) & 0xff; } else { - pr_info("gmux device not present or IO disabled\n"); + pr_info("gmux device not present\n"); ret = -ENODEV; goto err_release; } @@ -690,23 +670,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, ver_release, (gmux_data->indexed ? "indexed" : "classic")); - /* - * Apple systems with gmux are EFI based and normally don't use - * VGA. In addition changing IO+MEM ownership between IGP and dGPU - * disables IO/MEM used for backlight control on some systems. - * Lock IO+MEM to GPU with active IO to prevent switch. - */ - pdev = gmux_get_io_pdev(); - if (pdev && vga_tryget(pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) { - pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n", - pci_name(pdev)); - ret = -EBUSY; - goto err_release; - } else if (pdev) - pr_info("locked IO for PCI:%s\n", pci_name(pdev)); - gmux_data->pdev = pdev; - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS); @@ -822,10 +785,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) err_notify: backlight_device_unregister(bdev); err_release: - if (gmux_data->pdev) - vga_put(gmux_data->pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); - pci_dev_put(pdev); release_region(gmux_data->iostart, gmux_data->iolen); err_free: kfree(gmux_data); @@ -845,11 +804,6 @@ static void gmux_remove(struct pnp_dev *pnp) &gmux_notify_handler); } - if (gmux_data->pdev) { - vga_put(gmux_data->pdev, - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM); - pci_dev_put(gmux_data->pdev); - } backlight_device_unregister(gmux_data->bdev); release_region(gmux_data->iostart, gmux_data->iolen); diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index f3796164329e..f086469ea740 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c @@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event) return; } input_report_key(data->idev, KEY_RFKILL, 1); + input_sync(data->idev); input_report_key(data->idev, KEY_RFKILL, 0); input_sync(data->idev); } @@ -177,8 +178,10 @@ static int asus_wireless_remove(struct acpi_device *adev) { struct asus_wireless_data *data = acpi_driver_data(adev); - if (data->wq) + if (data->wq) { + devm_led_classdev_unregister(&adev->dev, &data->led); destroy_workqueue(data->wq); + } return 0; } diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 48e1541dc8d4..7440f650e81a 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -161,6 +161,16 @@ MODULE_LICENSE("GPL"); static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; +static bool ashs_present(void) +{ + int i = 0; + while (ashs_ids[i]) { + if (acpi_dev_found(ashs_ids[i++])) + return true; + } + return false; +} + struct bios_args { u32 arg0; u32 arg1; @@ -962,6 +972,9 @@ static int asus_new_rfkill(struct asus_wmi *asus, static void asus_wmi_rfkill_exit(struct asus_wmi *asus) { + if (asus->driver->wlan_ctrl_by_user && ashs_present()) + return; + asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); @@ -2058,16 +2071,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus) return 0; } -static bool ashs_present(void) -{ - int i = 0; - while (ashs_ids[i]) { - if (acpi_dev_found(ashs_ids[i++])) - return true; - } - return false; -} - /* * WMI Driver */ diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index f42159fd2031..dd5043a6a114 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -49,6 +49,7 @@ struct quirk_entry { u8 touchpad_led; + u8 kbd_led_levels_off_1; int needs_kbd_timeouts; /* @@ -79,6 +80,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = { .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, }; +static struct quirk_entry quirk_dell_latitude_e6410 = { + .kbd_led_levels_off_1 = 1, +}; + static struct platform_driver platform_driver = { .driver = { .name = "dell-laptop", @@ -280,6 +285,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = { }, .driver_data = &quirk_dell_xps13_9333, }, + { + .callback = dmi_matched, + .ident = "Dell Latitude E6410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"), + }, + .driver_data = &quirk_dell_latitude_e6410, + }, { } }; @@ -1163,6 +1177,7 @@ static u8 kbd_previous_mode_bit; static bool kbd_led_present; static DEFINE_MUTEX(kbd_led_mutex); +static enum led_brightness kbd_led_level; /* * NOTE: there are three ways to set the keyboard backlight level. @@ -1200,6 +1215,9 @@ static int kbd_get_info(struct kbd_info *info) units = (buffer->output[2] >> 8) & 0xFF; info->levels = (buffer->output[2] >> 16) & 0xFF; + if (quirks && quirks->kbd_led_levels_off_1 && info->levels) + info->levels--; + if (units & BIT(0)) info->seconds = (buffer->output[3] >> 0) & 0xFF; if (units & BIT(1)) @@ -2003,6 +2021,7 @@ static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev) static int kbd_led_level_set(struct led_classdev *led_cdev, enum led_brightness value) { + enum led_brightness new_value = value; struct kbd_state state; struct kbd_state new_state; u16 num; @@ -2032,6 +2051,9 @@ static int kbd_led_level_set(struct led_classdev *led_cdev, } out: + if (ret == 0) + kbd_led_level = new_value; + mutex_unlock(&kbd_led_mutex); return ret; } @@ -2059,6 +2081,9 @@ static int __init kbd_led_init(struct device *dev) if (kbd_led.max_brightness) kbd_led.max_brightness--; } + + kbd_led_level = kbd_led_level_get(NULL); + ret = led_classdev_register(dev, &kbd_led); if (ret) kbd_led_present = false; @@ -2083,13 +2108,25 @@ static void kbd_led_exit(void) static int dell_laptop_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { + bool changed = false; + enum led_brightness new_kbd_led_level; + switch (action) { case DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED: if (!kbd_led_present) break; - led_classdev_notify_brightness_hw_changed(&kbd_led, - kbd_led_level_get(&kbd_led)); + mutex_lock(&kbd_led_mutex); + new_kbd_led_level = kbd_led_level_get(&kbd_led); + if (kbd_led_level != new_kbd_led_level) { + kbd_led_level = new_kbd_led_level; + changed = true; + } + mutex_unlock(&kbd_led_mutex); + + if (changed) + led_classdev_notify_brightness_hw_changed(&kbd_led, + kbd_led_level); break; } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index b4ed3dc983d5..b4224389febe 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask) if (state < 0) return state; - return state & 0x1; + return !!(state & mask); } static int __init hp_wmi_bios_2008_later(void) diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 493d8910a74e..7b12abe86b94 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), + AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted), AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index e03fa31446ca..f317bae1adbd 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -577,15 +577,28 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, } return (ssize_t)count; } +static ssize_t intel_ssrambase_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + if (ipcdev.telem_punit_ssram_base > TELEM_PUNIT_SSRAM_OFFSET) + return scnprintf(buf, 64, "%llx\n", + ipcdev.telem_punit_ssram_base - TELEM_PUNIT_SSRAM_OFFSET); + else + return scnprintf(buf, 64, "%x\n", 0); +} static DEVICE_ATTR(simplecmd, S_IWUSR, NULL, intel_pmc_ipc_simple_cmd_store); static DEVICE_ATTR(northpeak, S_IWUSR, NULL, intel_pmc_ipc_northpeak_store); +static DEVICE_ATTR(ssrambase, S_IRUGO, + intel_ssrambase_show, NULL); static struct attribute *intel_ipc_attrs[] = { &dev_attr_northpeak.attr, &dev_attr_simplecmd.attr, + &dev_attr_ssrambase.attr, NULL }; diff --git a/drivers/platform/x86/intel_pstore_pram.c b/drivers/platform/x86/intel_pstore_pram.c new file mode 100644 index 000000000000..8c8b1291545f --- /dev/null +++ b/drivers/platform/x86/intel_pstore_pram.c @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define SZ_4K 0x00001000 +#define SZ_2M 0x00200000 + +/* PRAM stands for 'Persisted RAM' from BIOS point of view */ +#define ACPI_SIG_PRAM "PRAM" + +/* + * Following parameters match to those defined in fs/pstore/ram.c in + * order to keep campatibility between driver intefaces, please refer + * to it for implementaion details. + */ +static ulong pram_record_size = SZ_4K; +module_param_named(record_size, pram_record_size, ulong, 0400); +MODULE_PARM_DESC(record_size, "size of each dump done on oops/panic"); + +static ulong pram_console_size = SZ_2M; +module_param_named(console_size, pram_console_size, ulong, 0400); +MODULE_PARM_DESC(console_size, "size of kernel console log"); + +static ulong pram_ftrace_size = 2*SZ_4K; +module_param_named(ftrace_size, pram_ftrace_size, ulong, 0400); +MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); + +static int pram_dump_oops = 1; +module_param_named(dump_oops, pram_dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); + +static int pram_ecc; +module_param_named(ecc, pram_ecc, int, 0600); +MODULE_PARM_DESC(ecc, + "if non-zero, the option enables SW ECC support, provided by" + "fs/pstore/ram_core.c, and specifies ECC buffer size in bytes" + "(1 is a special value, means 16 bytes ECC)"); + +static struct ramoops_platform_data *pram_data; +static struct platform_device *pram_dev; + +struct acpi_table_pram { + struct acpi_table_header header; + u64 addr; + u32 size; +} __packed; + +static int register_pram_dev(unsigned long mem_address, + unsigned long mem_size) +{ + pram_data = kzalloc(sizeof(*pram_data), GFP_KERNEL); + if (!pram_data) { + pr_err("could not allocate pram_data\n"); + return -ENOMEM; + } + + pram_data->mem_address = mem_address; + pram_data->mem_size = mem_size; + pram_data->record_size = pram_record_size; + pram_data->console_size = pram_console_size; + pram_data->ftrace_size = pram_ftrace_size; + pram_data->dump_oops = pram_dump_oops; + /* + * For backwards compatibility with previous + * fs/pstore/ram_core.c implementation, + * intel_pstore_pram.ecc=1 means 16 bytes ECC. + */ + pram_data->ecc_info.ecc_size = pram_ecc == 1 ? 16 : pram_ecc; + + pram_dev = platform_device_register_data(NULL, "ramoops", -1, + pram_data, sizeof(struct ramoops_platform_data)); + if (IS_ERR(pram_dev)) { + pr_err("could not create platform device: %ld\n", + PTR_ERR(pram_dev)); + kfree(pram_data); + return PTR_ERR(pram_dev); + } + + pr_info("registered pram device, addr=0x%lx, size=0x%lx\n", + (unsigned long)pram_data->mem_address, (unsigned long)pram_data->mem_size); + + return 0; +} + +static int __init intel_pram_init(void) +{ + acpi_status status; + struct acpi_table_pram *pramt; + + status = acpi_get_table(ACPI_SIG_PRAM, 0, + (struct acpi_table_header **)&pramt); + if (status == AE_NOT_FOUND) { + pr_debug("PRAM table not found\n"); + return -ENODEV; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err("Failed to get PRAM table: %s\n", msg); + return -EINVAL; + } + + if (!pramt->addr || !pramt->size) { + pr_debug("PRAM: bad address (0x%llx) or size (0x%lx)\n", + (unsigned long long)pramt->addr, + (unsigned long)pramt->size); + return -ENODEV; + } + + return register_pram_dev(pramt->addr, pramt->size); +} +postcore_initcall(intel_pram_init); + +static void __exit intel_pram_exit(void) +{ + platform_device_unregister(pram_dev); + kfree(pram_data); +} +module_exit(intel_pram_exit); diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index a47a41fc10ad..b5b890127479 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev) * - GTDRIVER_IPC BASE_IFACE */ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 3); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 4); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 5); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index bc98ef95514a..2da48ecc90c1 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) } } +/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */ +static const struct dmi_system_id peaq_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), + DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), + }, + }, + {} +}; + static int __init peaq_wmi_init(void) { + /* WMI GUID is not unique, also check for a DMI match */ + if (!dmi_check_system(peaq_dmi_table)) + return -ENODEV; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return -ENODEV; @@ -86,6 +102,9 @@ static int __init peaq_wmi_init(void) static void __exit peaq_wmi_exit(void) { + if (!dmi_check_system(peaq_dmi_table)) + return; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 2242d6035d9e..c66efa06ac14 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -214,6 +214,10 @@ enum tpacpi_hkey_event_t { /* AC-related events */ TP_HKEY_EV_AC_CHANGED = 0x6040, /* AC status changed */ + /* Further user-interface events */ + TP_HKEY_EV_PALM_DETECTED = 0x60b0, /* palm hoveres keyboard */ + TP_HKEY_EV_PALM_UNDETECTED = 0x60b1, /* palm removed */ + /* Misc */ TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */ }; @@ -3973,6 +3977,12 @@ static bool hotkey_notify_6xxx(const u32 hkey, *send_acpi_ev = false; break; + case TP_HKEY_EV_PALM_DETECTED: + case TP_HKEY_EV_PALM_UNDETECTED: + /* palm detected hovering the keyboard, forward to user-space + * via netlink for consumption */ + return true; + default: pr_warn("unknown possible thermal alarm or keyboard event received\n"); known = false; @@ -9543,7 +9553,7 @@ static struct ibm_init_struct ibms_init[] __initdata = { }, }; -static int __init set_ibm_param(const char *val, struct kernel_param *kp) +static int __init set_ibm_param(const char *val, const struct kernel_param *kp) { unsigned int i; struct ibm_struct *ibm; diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 0765b1797d4c..7f8fa42a1084 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -1268,5 +1268,5 @@ static void __exit acpi_wmi_exit(void) bus_unregister(&wmi_bus_type); } -subsys_initcall(acpi_wmi_init); +subsys_initcall_sync(acpi_wmi_init); module_exit(acpi_wmi_exit); diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c index 7549c7f74a3c..c03e96e6a041 100644 --- a/drivers/power/reset/zx-reboot.c +++ b/drivers/power/reset/zx-reboot.c @@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = { }, }; module_platform_driver(zx_reboot_driver); + +MODULE_DESCRIPTION("ZTE SoCs reset driver"); +MODULE_AUTHOR("Jun Nie "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 4ebbcce45c48..5a76c6d343de 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di) } /* Enable backup battery charging */ - abx500_mask_and_set_register_interruptible(di->dev, + ret = abx500_mask_and_set_register_interruptible(di->dev, AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, RTC_BUP_CH_ENA); - if (ret < 0) + if (ret < 0) { dev_err(di->dev, "%s mask and set failed\n", __func__); + goto out; + } if (is_ab8540(di->parent)) { ret = abx500_mask_and_set_register_interruptible(di->dev, diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index d51ebd1da65e..9dc7590e07cb 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -785,6 +785,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) return 0; } +static void axp288_charger_cancel_work(void *data) +{ + struct axp288_chrg_info *info = data; + + cancel_work_sync(&info->otg.work); + cancel_work_sync(&info->cable.work); +} + static int axp288_charger_probe(struct platform_device *pdev) { int ret, i, pirq; @@ -836,6 +844,11 @@ static int axp288_charger_probe(struct platform_device *pdev) return ret; } + /* Cancel our work on cleanup, register this before the notifiers */ + ret = devm_add_action(dev, axp288_charger_cancel_work, info); + if (ret) + return ret; + /* Register for extcon notification */ INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker); info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt; diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 8e2c41ded171..8eda2a6b42b8 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,35 @@ #define BQ25890_IRQ_PIN "bq25890_irq" #define BQ25890_ID 3 +#define BQ25892_ID 0 + +/* pmic charger info */ + +#define PMIC_CHG_DEF_CC 2600000 /* in uA */ +#define PMIC_CHG_DEF_IPRECHG 256000 /* in uA */ +#define PMIC_CHG_DEF_ITERM 128000 /* in uA */ +#define PMIC_CHG_DEF_CV 4350000 /* in uV */ +#define PMIC_CHG_DEF_VSYSMIN 3500000 /* in uV */ +#define PMIC_CHG_DEF_BOOSTV 4700000 /* in uV */ +#define PMIC_CHG_DEF_BOOSTI 500000 /* in uA */ +#define PMIC_CHG_MAX_CC 2600000 /* in uA */ +#define PMIC_CHG_MAX_CV 4350000 /* in uV */ +#define PMIC_CHG_MAX_TEMP 100 /* in DegC */ +#define PMIC_CHG_MIN_TEMP 0 /* in DegC */ + +static struct bq25890_platform_data charger_drvdata = { + .def_cc = PMIC_CHG_DEF_CC, + .def_cv = PMIC_CHG_DEF_CV, + .iterm = PMIC_CHG_DEF_ITERM, + .iprechg = PMIC_CHG_DEF_IPRECHG, + .sysvmin = PMIC_CHG_DEF_VSYSMIN, + .boostv = PMIC_CHG_DEF_BOOSTV, + .boosti = PMIC_CHG_DEF_BOOSTI, + .max_cc = PMIC_CHG_MAX_CC, + .max_cv = PMIC_CHG_MAX_CV, + .min_temp = PMIC_CHG_MIN_TEMP, + .max_temp = PMIC_CHG_MAX_TEMP, +}; enum bq25890_fields { F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */ @@ -88,6 +118,7 @@ struct bq25890_state { struct bq25890_device { struct i2c_client *client; + struct bq25890_platform_data *pdata; struct device *dev; struct power_supply *charger; @@ -104,6 +135,11 @@ struct bq25890_device { struct bq25890_state state; struct mutex lock; /* protect state data */ + + int cc; + int cv; + int inlmt; + int chg_cntl_max; }; static const struct regmap_range bq25890_readonly_reg_ranges[] = { @@ -732,6 +768,7 @@ static int bq25890_irq_probe(struct bq25890_device *bq) return gpiod_to_irq(irq); } +#ifdef CONFIG_OF static int bq25890_fw_read_u32_props(struct bq25890_device *bq) { int ret; @@ -791,6 +828,37 @@ static int bq25890_fw_probe(struct bq25890_device *bq) return 0; } +#else +static int bq25890_fw_probe(struct bq25890_device *bq) +{ + struct bq25890_init_data *init = &bq->init_data; + + /* initialize the BXT platform data */ + init->ichg = bq25890_find_idx(bq->pdata->def_cc, TBL_ICHG); + init->vreg = bq25890_find_idx(bq->pdata->def_cv, TBL_VREG); + init->iterm = bq25890_find_idx(bq->pdata->iterm, TBL_ITERM); + init->iprechg = bq25890_find_idx(bq->pdata->iprechg, TBL_IPRECHG); + init->sysvmin = bq25890_find_idx(bq->pdata->sysvmin, TBL_SYSVMIN); + init->boostv = bq25890_find_idx(bq->pdata->boostv, TBL_BOOSTV); + init->boosti = bq25890_find_idx(bq->pdata->boosti, TBL_BOOSTI); + init->treg = bq25890_find_idx(bq->pdata->max_temp, TBL_TREG); + init->boostf = bq->pdata->boostf_low; + init->ilim_en = bq->pdata->en_ilim_pin; + + bq->cc = bq->pdata->def_cc; + bq->cv = bq->pdata->def_cv; + + /* + * devide the limit into 100mA steps so that + * total available steps will n + 2 including + * zero and High-Z mode. + */ + bq->chg_cntl_max = (bq->pdata->def_cc / 100000) + 2; + + return 0; +} +#endif + static int bq25890_probe(struct i2c_client *client, const struct i2c_device_id *id) @@ -840,12 +908,13 @@ static int bq25890_probe(struct i2c_client *client, return bq->chip_id; } - if (bq->chip_id != BQ25890_ID) { + if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25892_ID)) { dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id); return -ENODEV; } if (!dev->platform_data) { + bq->pdata = &charger_drvdata; ret = bq25890_fw_probe(bq); if (ret < 0) { dev_err(dev, "Cannot read device properties.\n"); diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c index 08e4fd9ee607..9621d6dd88c6 100644 --- a/drivers/power/supply/ltc2941-battery-gauge.c +++ b/drivers/power/supply/ltc2941-battery-gauge.c @@ -316,15 +316,15 @@ static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val) if (info->id == LTC2942_ID) { reg = LTC2942_REG_TEMPERATURE_MSB; - value = 60000; /* Full-scale is 600 Kelvin */ + value = 6000; /* Full-scale is 600 Kelvin */ } else { reg = LTC2943_REG_TEMPERATURE_MSB; - value = 51000; /* Full-scale is 510 Kelvin */ + value = 5100; /* Full-scale is 510 Kelvin */ } ret = ltc294x_read_regs(info->client, reg, &datar[0], 2); value *= (datar[0] << 8) | datar[1]; - /* Convert to centidegrees */ - *val = value / 0xFFFF - 27215; + /* Convert to tenths of degree Celsius */ + *val = value / 0xFFFF - 2722; return ret; } diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index 5b556a13f517..9c7eaaeda343 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -1021,6 +1021,7 @@ static int max17042_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); psy_cfg.drv_data = chip; + psy_cfg.of_node = dev->of_node; /* When current is not measured, * CURRENT_NOW and CURRENT_AVG properties should be invisible. */ diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 5204f115970f..da589c3decb2 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -121,7 +121,10 @@ static ssize_t power_supply_show_property(struct device *dev, else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); - return sprintf(buf, "%d\n", value.intval); + if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT) + return sprintf(buf, "%lld\n", value.int64val); + else + return sprintf(buf, "%d\n", value.intval); } static ssize_t power_supply_store_property(struct device *dev, @@ -245,6 +248,12 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(precharge_current), POWER_SUPPLY_ATTR(charge_term_current), POWER_SUPPLY_ATTR(calibrate), + /* Local extensions */ + POWER_SUPPLY_ATTR(usb_hc), + POWER_SUPPLY_ATTR(usb_otg), + POWER_SUPPLY_ATTR(charge_enabled), + /* Local extensions of type int64_t */ + POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(model_name), POWER_SUPPLY_ATTR(manufacturer), diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 58a97d420572..51364621f77c 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, case PTP_PF_PHYSYNC: if (chan != 0) return -EINVAL; + break; default: return -EINVAL; } diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 5d6ed1507d29..5561b9e190f8 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -74,6 +74,10 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev) return pwm_lpss_remove(lpwm); } +static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops, + pwm_lpss_suspend, + pwm_lpss_resume); + static const struct acpi_device_id pwm_lpss_acpi_match[] = { { "80860F09", (unsigned long)&pwm_lpss_byt_info }, { "80862288", (unsigned long)&pwm_lpss_bsw_info }, @@ -86,6 +90,7 @@ static struct platform_driver pwm_lpss_driver_platform = { .driver = { .name = "pwm-lpss", .acpi_match_table = pwm_lpss_acpi_match, + .pm = &pwm_lpss_platform_pm_ops, }, .probe = pwm_lpss_probe_platform, .remove = pwm_lpss_remove_platform, diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 8db0d40ccacd..4721a264bac2 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -32,10 +32,13 @@ /* Size of each PWM register space if multiple */ #define PWM_SIZE 0x400 +#define MAX_PWMS 4 + struct pwm_lpss_chip { struct pwm_chip chip; void __iomem *regs; const struct pwm_lpss_boardinfo *info; + u32 saved_ctrl[MAX_PWMS]; }; static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) @@ -177,6 +180,9 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, unsigned long c; int ret; + if (WARN_ON(info->npwm > MAX_PWMS)) + return ERR_PTR(-ENODEV); + lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL); if (!lpwm) return ERR_PTR(-ENOMEM); @@ -212,6 +218,30 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) } EXPORT_SYMBOL_GPL(pwm_lpss_remove); +int pwm_lpss_suspend(struct device *dev) +{ + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); + int i; + + for (i = 0; i < lpwm->info->npwm; i++) + lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM); + + return 0; +} +EXPORT_SYMBOL_GPL(pwm_lpss_suspend); + +int pwm_lpss_resume(struct device *dev) +{ + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); + int i; + + for (i = 0; i < lpwm->info->npwm; i++) + writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM); + + return 0; +} +EXPORT_SYMBOL_GPL(pwm_lpss_resume); + MODULE_DESCRIPTION("PWM driver for Intel LPSS"); MODULE_AUTHOR("Mika Westerberg "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 98306bb02cfe..7a4238ad1fcb 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -28,5 +28,7 @@ struct pwm_lpss_boardinfo { struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, const struct pwm_lpss_boardinfo *info); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm); +int pwm_lpss_suspend(struct device *dev); +int pwm_lpss_resume(struct device *dev); #endif /* __PWM_LPSS_H */ diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 1c85ecc9e7ac..0fcf94ffad32 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -156,8 +156,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (div < 0) return div; - /* Let the core driver set pwm->period if disabled and duty_ns == 0 */ - if (!pwm_is_enabled(pwm) && !duty_ns) + /* + * Let the core driver set pwm->period if disabled and duty_ns == 0. + * But, this driver should prevent to set the new duty_ns if current + * duty_cycle is not set + */ + if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle) return 0; rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c index e464582a390a..3439f1e902cb 100644 --- a/drivers/pwm/pwm-stmpe.c +++ b/drivers/pwm/pwm-stmpe.c @@ -145,7 +145,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, break; case 2: - offset = STMPE24XX_PWMIC1; + offset = STMPE24XX_PWMIC2; break; default: diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 5beb0c361076..76afe1449cab 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -963,7 +963,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, req->sgt.sgl, req->sgt.nents, dir); if (nents == -EFAULT) { rmcd_error("Failed to map SG list"); - return -EFAULT; + ret = -EFAULT; + goto err_pg; } ret = do_dma_request(req, xfer, sync, nents); diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c index f541b80f1b54..bd910fe123d9 100644 --- a/drivers/regulator/cpcap-regulator.c +++ b/drivers/regulator/cpcap-regulator.c @@ -222,7 +222,7 @@ static unsigned int cpcap_map_mode(unsigned int mode) case CPCAP_BIT_AUDIO_LOW_PWR: return REGULATOR_MODE_STANDBY; default: - return -EINVAL; + return REGULATOR_MODE_INVALID; } } diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 0fce06acfaec..a2eb50719c7b 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -271,8 +271,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { dev_err(&pdev->dev, "Failed to allocate supply name\n"); - ret = -ENOMEM; - goto err; + return -ENOMEM; } if (config->nr_gpios != 0) { @@ -292,7 +291,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Could not obtain regulator setting GPIOs: %d\n", ret); - goto err_memstate; + goto err_memgpio; } } @@ -303,7 +302,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) if (drvdata->states == NULL) { dev_err(&pdev->dev, "Failed to allocate state data\n"); ret = -ENOMEM; - goto err_memgpio; + goto err_stategpio; } drvdata->nr_states = config->nr_states; @@ -324,7 +323,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "No regulator type set\n"); ret = -EINVAL; - goto err_memgpio; + goto err_memstate; } /* build initial state from gpio init data. */ @@ -361,22 +360,21 @@ static int gpio_regulator_probe(struct platform_device *pdev) if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); - goto err_stategpio; + goto err_memstate; } platform_set_drvdata(pdev, drvdata); return 0; -err_stategpio: - gpio_free_array(drvdata->gpios, drvdata->nr_gpios); err_memstate: kfree(drvdata->states); +err_stategpio: + gpio_free_array(drvdata->gpios, drvdata->nr_gpios); err_memgpio: kfree(drvdata->gpios); err_name: kfree(drvdata->desc.name); -err: return ret; } diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 14637a01ba2d..a3bf7c993723 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -31,6 +31,7 @@ static void of_get_regulation_constraints(struct device_node *np, struct regulation_constraints *constraints = &(*init_data)->constraints; struct regulator_state *suspend_state; struct device_node *suspend_np; + unsigned int mode; int ret, i; u32 pval; @@ -124,11 +125,11 @@ static void of_get_regulation_constraints(struct device_node *np, if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) { if (desc && desc->of_map_mode) { - ret = desc->of_map_mode(pval); - if (ret == -EINVAL) + mode = desc->of_map_mode(pval); + if (mode == REGULATOR_MODE_INVALID) pr_err("%s: invalid mode %u\n", np->name, pval); else - constraints->initial_mode = ret; + constraints->initial_mode = mode; } else { pr_warn("%s: mapping for mode %d not defined\n", np->name, pval); @@ -163,12 +164,12 @@ static void of_get_regulation_constraints(struct device_node *np, if (!of_property_read_u32(suspend_np, "regulator-mode", &pval)) { if (desc && desc->of_map_mode) { - ret = desc->of_map_mode(pval); - if (ret == -EINVAL) + mode = desc->of_map_mode(pval); + if (mode == REGULATOR_MODE_INVALID) pr_err("%s: invalid mode %u\n", np->name, pval); else - suspend_state->mode = ret; + suspend_state->mode = mode; } else { pr_warn("%s: mapping for mode %d not defined\n", np->name, pval); @@ -305,6 +306,7 @@ int of_regulator_match(struct device *dev, struct device_node *node, dev_err(dev, "failed to parse DT for regulator %s\n", child->name); + of_node_put(child); return -EINVAL; } match->of_node = of_node_get(child); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 63922a2167e5..659e516455be 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -158,6 +158,7 @@ static const struct regulator_ops pfuze100_sw_regulator_ops = { static const struct regulator_ops pfuze100_swb_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = regulator_set_voltage_sel_regmap, diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 72c8b3e1022b..e0a9c445ed67 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) * arbitrary timeout. */ ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, - !(val & STM32_VRR), 650, 10000); + val & STM32_VRR, 650, 10000); if (ret) { dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); diff --git a/drivers/regulator/tps68470-regulator.c b/drivers/regulator/tps68470-regulator.c new file mode 100644 index 000000000000..08637d0c7c09 --- /dev/null +++ b/drivers/regulator/tps68470-regulator.c @@ -0,0 +1,233 @@ +// SPDX-License_Identifier: GPL-2.0 +/* Copyright (C) 2018 Intel Corporation + * + * Regulator driver for TPS68470 PMIC + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) +#define TPS68470_REGULATOR(_name, _id, _of_match, _ops, _n, _vr, \ + _vm, _er, _em, _t, _lr, _nlr) \ + { \ + .name = _name, \ + .id = _id, \ + .of_match = of_match_ptr(_of_match), \ + .regulators_node = of_match_ptr("regulators"), \ + .ops = &_ops, \ + .n_voltages = _n, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .vsel_reg = _vr, \ + .vsel_mask = _vm, \ + .enable_reg = _er, \ + .enable_mask = _em, \ + .volt_table = _t, \ + .linear_ranges = _lr, \ + .n_linear_ranges = _nlr, \ + } +#else +#define TPS68470_REGULATOR(_name, _id, _of_match, _ops, _n, _vr, \ + _vm, _er, _em, _t, _lr, _nlr) \ + { \ + .name = _name, \ + .id = _id, \ + .ops = &_ops, \ + .n_voltages = _n, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .vsel_reg = _vr, \ + .vsel_mask = _vm, \ + .enable_reg = _er, \ + .enable_mask = _em, \ + .volt_table = _t, \ + .linear_ranges = _lr, \ + .n_linear_ranges = _nlr, \ + } +#endif + +static const struct regulator_linear_range tps68470_ldo_ranges[] = { + REGULATOR_LINEAR_RANGE(875000, 0, 125, 17800), +}; + +static const struct regulator_linear_range tps68470_core_ranges[] = { + REGULATOR_LINEAR_RANGE(900000, 0, 42, 25000), +}; + +static int tps68470_regulator_enable(struct regulator_dev *dev) +{ + struct tps68470 *tps = rdev_get_drvdata(dev); + + return tps68470_set_bits(tps, dev->desc->enable_reg, + dev->desc->enable_mask, + dev->desc->enable_mask); +} + +static int tps68470_regulator_disable(struct regulator_dev *dev) +{ + struct tps68470 *tps = rdev_get_drvdata(dev); + + return tps68470_clear_bits(tps, dev->desc->enable_reg, + dev->desc->enable_mask); +} + +/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */ +static struct regulator_ops tps68470_regulator_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = tps68470_regulator_enable, + .disable = tps68470_regulator_disable, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, +}; + +static const struct regulator_desc regulators[] = { + TPS68470_REGULATOR("CORE", TPS68470_CORE, "core", + tps68470_regulator_ops, 43, TPS68470_REG_VDVAL, + TPS68470_VDVAL_DVOLT_MASK, TPS68470_REG_VDCTL, + TPS68470_VDCTL_EN_MASK, + NULL, tps68470_core_ranges, + ARRAY_SIZE(tps68470_core_ranges)), + TPS68470_REGULATOR("ANA", TPS68470_ANA, "ana", + tps68470_regulator_ops, 126, TPS68470_REG_VAVAL, + TPS68470_VAVAL_AVOLT_MASK, TPS68470_REG_VACTL, + TPS68470_VACTL_EN_MASK, + NULL, tps68470_ldo_ranges, + ARRAY_SIZE(tps68470_ldo_ranges)), + TPS68470_REGULATOR("VCM", TPS68470_VCM, "vcm", + tps68470_regulator_ops, 126, TPS68470_REG_VCMVAL, + TPS68470_VCMVAL_VCVOLT_MASK, TPS68470_REG_VCMCTL, + TPS68470_VCMCTL_EN_MASK, + NULL, tps68470_ldo_ranges, + ARRAY_SIZE(tps68470_ldo_ranges)), + TPS68470_REGULATOR("VIO", TPS68470_VIO, "vio", + tps68470_regulator_ops, 126, TPS68470_REG_VIOVAL, + TPS68470_VIOVAL_IOVOLT_MASK, TPS68470_REG_S_I2C_CTL, + TPS68470_S_I2C_CTL_EN_MASK, + NULL, tps68470_ldo_ranges, + ARRAY_SIZE(tps68470_ldo_ranges)), + +/* + * (1) This register must have same setting as VIOVAL if S_IO LDO is used to + * power daisy chained IOs in the receive side. + * (2) If there is no I2C daisy chain it can be set freely. + * + */ + TPS68470_REGULATOR("VSIO", TPS68470_VSIO, "vsio", + tps68470_regulator_ops, 126, TPS68470_REG_VSIOVAL, + TPS68470_VSIOVAL_IOVOLT_MASK, TPS68470_REG_S_I2C_CTL, + TPS68470_S_I2C_CTL_EN_MASK, + NULL, tps68470_ldo_ranges, + ARRAY_SIZE(tps68470_ldo_ranges)), + TPS68470_REGULATOR("AUX1", TPS68470_AUX1, "aux1", + tps68470_regulator_ops, 126, TPS68470_REG_VAUX1VAL, + TPS68470_VAUX1VAL_AUX1VOLT_MASK, + TPS68470_REG_VAUX1CTL, + TPS68470_VAUX1CTL_EN_MASK, + NULL, tps68470_ldo_ranges, + ARRAY_SIZE(tps68470_ldo_ranges)), + TPS68470_REGULATOR("AUX2", TPS68470_AUX2, "aux2", + tps68470_regulator_ops, 126, TPS68470_REG_VAUX2VAL, + TPS68470_VAUX2VAL_AUX2VOLT_MASK, + TPS68470_REG_VAUX2CTL, + TPS68470_VAUX2CTL_EN_MASK, + NULL, tps68470_ldo_ranges, + ARRAY_SIZE(tps68470_ldo_ranges)), +}; + +#define tps68470_reg_init_data(_name, _min_uV, _max_uV)\ +{\ + .constraints = {\ + .name = (const char *)_name,\ + .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE \ + | REGULATOR_CHANGE_STATUS,\ + .min_uV = _min_uV,\ + .max_uV = _max_uV,\ + },\ +} + +struct regulator_init_data tps68470_init[] = { + tps68470_reg_init_data("CORE", 900000, 1950000), + tps68470_reg_init_data("ANA", 875000, 3100000), + tps68470_reg_init_data("VCM", 875000, 3100000), + tps68470_reg_init_data("VIO", 875000, 3100000), + tps68470_reg_init_data("VSIO", 875000, 3100000), + tps68470_reg_init_data("AUX1", 875000, 3100000), + tps68470_reg_init_data("AUX2", 875000, 3100000), +}; + +static int tps68470_regulator_probe(struct platform_device *pdev) +{ + struct tps68470 *tps = dev_get_drvdata(pdev->dev.parent); + struct tps68470_board *pdata = dev_get_platdata(tps->dev); + struct regulator_dev *rdev; + struct regulator_config config = { }; + int i; + + platform_set_drvdata(pdev, tps); + + for (i = 0; i < TPS68470_NUM_REGULATOR; i++) { + /* Register the regulators */ + config.dev = tps->dev; + if (pdata) + config.init_data = pdata->tps68470_init_data[i]; + else + config.init_data = &tps68470_init[i]; + + config.driver_data = tps; + config.regmap = tps->regmap; + + rdev = devm_regulator_register(&pdev->dev, ®ulators[i], + &config); + if (IS_ERR(rdev)) { + dev_err(tps->dev, "failed to register %s regulator\n", + pdev->name); + return PTR_ERR(rdev); + } + dev_info(tps->dev, "Registered %s regulator\n", + pdev->name); + } + + return 0; +} + +static struct platform_driver tps68470_regulator_driver = { + .driver = { + .name = "tps68470-regulator", + }, + .probe = tps68470_regulator_probe, +}; + +static int __init tps68470_regulator_init(void) +{ + return platform_driver_register(&tps68470_regulator_driver); +} +subsys_initcall(tps68470_regulator_init); + +static void __exit tps68470_regulator_exit(void) +{ + platform_driver_unregister(&tps68470_regulator_driver); +} +module_exit(tps68470_regulator_exit); + +MODULE_AUTHOR("Zaikuo Wang "); +MODULE_AUTHOR("Tianshu Qiu "); +MODULE_AUTHOR("Jian Xu Zheng "); +MODULE_AUTHOR("Yuning Pu "); +MODULE_AUTHOR("Rajmohan Mani "); +MODULE_DESCRIPTION("TPS68470 voltage regulator driver"); +MODULE_ALIAS("platform:tps68470-regulator"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index a4456db5849d..884c7505ed91 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -274,7 +274,7 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode) case RES_STATE_SLEEP: return REGULATOR_MODE_STANDBY; default: - return -EINVAL; + return REGULATOR_MODE_INVALID; } } diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 633268e9d550..05bcbce2013a 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -339,8 +339,10 @@ static int imx_rproc_probe(struct platform_device *pdev) } dcfg = of_device_get_match_data(dev); - if (!dcfg) - return -EINVAL; + if (!dcfg) { + ret = -EINVAL; + goto err_put_rproc; + } priv = rproc->priv; priv->rproc = rproc; diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 2d3d5ac92c06..81ec9b6805fc 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -915,6 +915,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } + of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -932,6 +933,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } + of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5dcc9bf1c5bc..e8e12c2b1d0e 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -227,6 +227,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink, init_completion(&channel->open_req); init_completion(&channel->open_ack); + init_completion(&channel->intent_req_comp); INIT_LIST_HEAD(&channel->done_intents); INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work); @@ -1616,3 +1617,6 @@ void qcom_glink_native_unregister(struct qcom_glink *glink) device_unregister(glink->dev); } EXPORT_SYMBOL_GPL(qcom_glink_native_unregister); + +MODULE_DESCRIPTION("Qualcomm GLINK driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index b01774e9fac0..f1a2147a6d84 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1043,12 +1043,12 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed void *info; int ret; - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL); + channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return ERR_PTR(-ENOMEM); channel->edge = edge; - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL); + channel->name = kstrdup(name, GFP_KERNEL); if (!channel->name) return ERR_PTR(-ENOMEM); @@ -1098,8 +1098,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed return channel; free_name_and_channel: - devm_kfree(&edge->dev, channel->name); - devm_kfree(&edge->dev, channel); + kfree(channel->name); + kfree(channel); return ERR_PTR(ret); } @@ -1320,13 +1320,13 @@ static int qcom_smd_parse_edge(struct device *dev, */ static void qcom_smd_edge_release(struct device *dev) { - struct qcom_smd_channel *channel; + struct qcom_smd_channel *channel, *tmp; struct qcom_smd_edge *edge = to_smd_edge(dev); - list_for_each_entry(channel, &edge->channels, list) { - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_RX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); + list_for_each_entry_safe(channel, tmp, &edge->channels, list) { + list_del(&channel->list); + kfree(channel->name); + kfree(channel); } kfree(edge); diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index e0996fce3963..6a5b5b16145e 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void) unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } module_exit(rpmsg_chrdev_exit); + +MODULE_ALIAS("rpmsg:rpmsg_chrdev"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index e1cfa06810ef..e79f2a181ad2 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -49,6 +49,11 @@ static int __init rtc_hctosys(void) tv64.tv_sec = rtc_tm_to_time64(&tm); +#if BITS_PER_LONG == 32 + if (tv64.tv_sec > INT_MAX) + goto err_read; +#endif + err = do_settimeofday64(&tv64); dev_info(rtc->dev.parent, diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8cec9a02c0b8..e4f951e968a4 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -359,6 +359,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; + if (!rtc->ops) + return -ENODEV; + else if (!rtc->ops->set_alarm) + return -EINVAL; + err = rtc_valid_tm(&alarm->time); if (err != 0) return err; @@ -779,7 +784,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) } timerqueue_add(&rtc->timerqueue, &timer->node); - if (!next) { + if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c index 9e336184491c..8ff9dc3fe5bf 100644 --- a/drivers/rtc/rtc-ac100.c +++ b/drivers/rtc/rtc-ac100.c @@ -137,13 +137,15 @@ static unsigned long ac100_clkout_recalc_rate(struct clk_hw *hw, div = (reg >> AC100_CLKOUT_PRE_DIV_SHIFT) & ((1 << AC100_CLKOUT_PRE_DIV_WIDTH) - 1); prate = divider_recalc_rate(hw, prate, div, - ac100_clkout_prediv, 0); + ac100_clkout_prediv, 0, + AC100_CLKOUT_PRE_DIV_WIDTH); } div = (reg >> AC100_CLKOUT_DIV_SHIFT) & (BIT(AC100_CLKOUT_DIV_WIDTH) - 1); return divider_recalc_rate(hw, prate, div, NULL, - CLK_DIVIDER_POWER_OF_TWO); + CLK_DIVIDER_POWER_OF_TWO, + AC100_CLKOUT_DIV_WIDTH); } static long ac100_clkout_round_rate(struct clk_hw *hw, unsigned long rate, @@ -567,6 +569,12 @@ static int ac100_rtc_probe(struct platform_device *pdev) return chip->irq; } + chip->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(chip->rtc)) + return PTR_ERR(chip->rtc); + + chip->rtc->ops = &ac100_rtc_ops; + ret = devm_request_threaded_irq(&pdev->dev, chip->irq, NULL, ac100_rtc_irq, IRQF_SHARED | IRQF_ONESHOT, @@ -586,17 +594,16 @@ static int ac100_rtc_probe(struct platform_device *pdev) /* clear counter alarm pending interrupts */ regmap_write(chip->regmap, AC100_ALM_INT_STA, AC100_ALM_INT_ENABLE); - chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-ac100", - &ac100_rtc_ops, THIS_MODULE); - if (IS_ERR(chip->rtc)) { - dev_err(&pdev->dev, "unable to register device\n"); - return PTR_ERR(chip->rtc); - } - ret = ac100_rtc_register_clks(chip); if (ret) return ret; + ret = rtc_register_device(chip->rtc); + if (ret) { + dev_err(&pdev->dev, "unable to register device\n"); + return ret; + } + dev_info(&pdev->dev, "RTC enabled\n"); return 0; diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c index 796ac792a381..6cee61201c30 100644 --- a/drivers/rtc/rtc-brcmstb-waketimer.c +++ b/drivers/rtc/rtc-brcmstb-waketimer.c @@ -253,7 +253,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev) ret = devm_request_irq(dev, timer->irq, brcmstb_waketmr_irq, 0, "brcmstb-waketimer", timer); if (ret < 0) - return ret; + goto err_clk; timer->reboot_notifier.notifier_call = brcmstb_waketmr_reboot; register_reboot_notifier(&timer->reboot_notifier); @@ -262,12 +262,21 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev) &brcmstb_waketmr_ops, THIS_MODULE); if (IS_ERR(timer->rtc)) { dev_err(dev, "unable to register device\n"); - unregister_reboot_notifier(&timer->reboot_notifier); - return PTR_ERR(timer->rtc); + ret = PTR_ERR(timer->rtc); + goto err_notifier; } dev_info(dev, "registered, with irq %d\n", timer->irq); + return 0; + +err_notifier: + unregister_reboot_notifier(&timer->reboot_notifier); + +err_clk: + if (timer->clk) + clk_disable_unprepare(timer->clk); + return ret; } diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index d67769265185..a1c44d0c8557 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -235,3 +235,5 @@ static struct platform_driver goldfish_rtc = { }; module_platform_driver(goldfish_rtc); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index f4c070ea8384..6620016869cf 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -154,6 +154,8 @@ struct m41t80_data { struct rtc_device *rtc; #ifdef CONFIG_COMMON_CLK struct clk_hw sqw; + unsigned long freq; + unsigned int sqwe; #endif }; @@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume); #ifdef CONFIG_COMMON_CLK #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw) -static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long m41t80_decode_freq(int setting) +{ + return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ : + M41T80_SQW_MAX_FREQ >> setting; +} + +static unsigned long m41t80_get_freq(struct m41t80_data *m41t80) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); struct i2c_client *client = m41t80->client; int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ? M41T80_REG_WDAY : M41T80_REG_SQW; int ret = i2c_smbus_read_byte_data(client, reg_sqw); - unsigned long val = M41T80_SQW_MAX_FREQ; if (ret < 0) return 0; + return m41t80_decode_freq(ret >> 4); +} - ret >>= 4; - if (ret == 0) - val = 0; - else if (ret > 1) - val = val / (1 << ret); - - return val; +static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return sqw_to_m41t80_data(hw)->freq; } static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { - int i, freq = M41T80_SQW_MAX_FREQ; - - if (freq <= rate) - return freq; - - for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) { - freq /= 1 << i; - if (freq <= rate) - return freq; - } - - return 0; + if (rate >= M41T80_SQW_MAX_FREQ) + return M41T80_SQW_MAX_FREQ; + if (rate >= M41T80_SQW_MAX_FREQ / 4) + return M41T80_SQW_MAX_FREQ / 4; + if (!rate) + return 0; + return 1 << ilog2(rate); } static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, @@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, M41T80_REG_WDAY : M41T80_REG_SQW; int reg, ret, val = 0; - if (rate) { - if (!is_power_of_2(rate)) - return -EINVAL; - val = ilog2(rate); - if (val == ilog2(M41T80_SQW_MAX_FREQ)) - val = 1; - else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1)) - val = ilog2(M41T80_SQW_MAX_FREQ) - val; - else - return -EINVAL; - } + if (rate >= M41T80_SQW_MAX_FREQ) + val = 1; + else if (rate >= M41T80_SQW_MAX_FREQ / 4) + val = 2; + else if (rate) + val = 15 - ilog2(rate); reg = i2c_smbus_read_byte_data(client, reg_sqw); if (reg < 0) @@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, reg = (reg & 0x0f) | (val << 4); ret = i2c_smbus_write_byte_data(client, reg_sqw, reg); - if (ret < 0) - return ret; - - return -EINVAL; + if (!ret) + m41t80->freq = m41t80_decode_freq(val); + return ret; } static int m41t80_sqw_control(struct clk_hw *hw, bool enable) @@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable) else ret &= ~M41T80_ALMON_SQWE; - return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + if (!ret) + m41t80->sqwe = enable; + return ret; } static int m41t80_sqw_prepare(struct clk_hw *hw) @@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw) static int m41t80_sqw_is_prepared(struct clk_hw *hw) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); - struct i2c_client *client = m41t80->client; - int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); - - if (ret < 0) - return ret; - - return !!(ret & M41T80_ALMON_SQWE); + return sqw_to_m41t80_data(hw)->sqwe; } static const struct clk_ops m41t80_sqw_ops = { @@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80) init.parent_names = NULL; init.num_parents = 0; m41t80->sqw.init = &init; + m41t80->freq = m41t80_get_freq(m41t80); /* optional override of the clockname */ of_property_read_string(node, "clock-output-names", &init.name); @@ -895,7 +885,6 @@ static int m41t80_probe(struct i2c_client *client, { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int rc = 0; - struct rtc_device *rtc = NULL; struct rtc_time tm; struct m41t80_data *m41t80_data = NULL; bool wakeup_source = false; @@ -919,6 +908,10 @@ static int m41t80_probe(struct i2c_client *client, m41t80_data->features = id->driver_data; i2c_set_clientdata(client, m41t80_data); + m41t80_data->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(m41t80_data->rtc)) + return PTR_ERR(m41t80_data->rtc); + #ifdef CONFIG_OF wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source"); @@ -942,15 +935,11 @@ static int m41t80_probe(struct i2c_client *client, device_init_wakeup(&client->dev, true); } - rtc = devm_rtc_device_register(&client->dev, client->name, - &m41t80_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) - return PTR_ERR(rtc); + m41t80_data->rtc->ops = &m41t80_rtc_ops; - m41t80_data->rtc = rtc; if (client->irq <= 0) { /* We cannot support UIE mode if we do not have an IRQ line */ - rtc->uie_unsupported = 1; + m41t80_data->rtc->uie_unsupported = 1; } /* Make sure HT (Halt Update) bit is cleared */ @@ -1003,6 +992,11 @@ static int m41t80_probe(struct i2c_client *client, if (m41t80_data->features & M41T80_FEATURE_SQ) m41t80_sqw_register_clk(m41t80_data); #endif + + rc = rtc_register_device(m41t80_data->rtc); + if (rc) + return rc; + return 0; } diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index e2a946c0e667..60f2250fd96b 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -57,7 +57,8 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) { - long rc = OPAL_BUSY; + s64 rc = OPAL_BUSY; + int retries = 10; u32 y_m_d; u64 h_m_s_ms; __be32 __y_m_d; @@ -65,10 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else - msleep(10); + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { + if (retries--) { + msleep(10); /* Wait 10ms before retry */ + rc = OPAL_BUSY; /* go around again */ + } + } } if (rc != OPAL_SUCCESS) @@ -83,17 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) { - long rc = OPAL_BUSY; + s64 rc = OPAL_BUSY; + int retries = 10; u32 y_m_d = 0; u64 h_m_s_ms = 0; tm_to_opal(tm, &y_m_d, &h_m_s_ms); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_write(y_m_d, h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else - msleep(10); + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { + if (retries--) { + msleep(10); /* Wait 10ms before retry */ + rc = OPAL_BUSY; /* go around again */ + } + } } return rc == OPAL_SUCCESS ? 0 : -EIO; diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c index 4bcfb88674d3..34aea38ebfa6 100644 --- a/drivers/rtc/rtc-palmas.c +++ b/drivers/rtc/rtc-palmas.c @@ -45,6 +45,42 @@ struct palmas_rtc { /* Total number of RTC registers needed to set time*/ #define PALMAS_NUM_TIME_REGS (PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1) +/* + * Special bin2bcd mapping to deal with bcd storage of year. + * + * 0-69 -> 0xD0 + * 70-99 (1970 - 1999) -> 0xD0 - 0xF9 (correctly rolls to 0x00) + * 100-199 (2000 - 2099) -> 0x00 - 0x99 (does not roll to 0xA0 :-( ) + * 200-229 (2100 - 2129) -> 0xA0 - 0xC9 (really for completeness) + * 230- -> 0xC9 + * + * Confirmed: the only transition that does not work correctly for this rtc + * clock is the transition from 2099 to 2100, it proceeds to 2000. We will + * accept this issue since the clock retains and transitions the year correctly + * in all other conditions. + */ +static unsigned char year_bin2bcd(int val) +{ + if (val < 70) + return 0xD0; + if (val < 100) + return bin2bcd(val - 20) | 0x80; /* KISS leverage of bin2bcd */ + if (val >= 230) + return 0xC9; + if (val >= 200) + return bin2bcd(val - 180) | 0x80; + return bin2bcd(val - 100); +} + +static int year_bcd2bin(unsigned char val) +{ + if (val >= 0xD0) + return bcd2bin(val & 0x7F) + 20; + if (val >= 0xA0) + return bcd2bin(val & 0x7F) + 180; + return bcd2bin(val) + 100; +} + static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[PALMAS_NUM_TIME_REGS]; @@ -71,7 +107,7 @@ static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_hour = bcd2bin(rtc_data[2]); tm->tm_mday = bcd2bin(rtc_data[3]); tm->tm_mon = bcd2bin(rtc_data[4]) - 1; - tm->tm_year = bcd2bin(rtc_data[5]) + 100; + tm->tm_year = year_bcd2bin(rtc_data[5]); return ret; } @@ -87,7 +123,7 @@ static int palmas_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_data[2] = bin2bcd(tm->tm_hour); rtc_data[3] = bin2bcd(tm->tm_mday); rtc_data[4] = bin2bcd(tm->tm_mon + 1); - rtc_data[5] = bin2bcd(tm->tm_year - 100); + rtc_data[5] = year_bin2bcd(tm->tm_year); /* Stop RTC while updating the RTC time registers */ ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG, @@ -142,7 +178,7 @@ static int palmas_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->time.tm_hour = bcd2bin(alarm_data[2]); alm->time.tm_mday = bcd2bin(alarm_data[3]); alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1; - alm->time.tm_year = bcd2bin(alarm_data[5]) + 100; + alm->time.tm_year = year_bcd2bin(alarm_data[5]); ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG, &int_val); @@ -173,7 +209,7 @@ static int palmas_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) alarm_data[2] = bin2bcd(alm->time.tm_hour); alarm_data[3] = bin2bcd(alm->time.tm_mday); alarm_data[4] = bin2bcd(alm->time.tm_mon + 1); - alarm_data[5] = bin2bcd(alm->time.tm_year - 100); + alarm_data[5] = year_bin2bcd(alm->time.tm_year); ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE, PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS); diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index cea6ea4df970..8c836c51a508 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw, return 0; buf &= PCF8563_REG_CLKO_F_MASK; - return clkout_rates[ret]; + return clkout_rates[buf]; } static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index e1687e19c59f..a30f24cb6c83 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -308,7 +308,8 @@ static int pl031_remove(struct amba_device *adev) dev_pm_clear_wake_irq(&adev->dev); device_init_wakeup(&adev->dev, false); - free_irq(adev->irq[0], ldata); + if (adev->irq[0]) + free_irq(adev->irq[0], ldata); rtc_device_unregister(ldata->rtc); iounmap(ldata->base); kfree(ldata); @@ -381,12 +382,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_rtc; } - if (request_irq(adev->irq[0], pl031_interrupt, - vendor->irqflags, "rtc-pl031", ldata)) { - ret = -EIO; - goto out_no_irq; + if (adev->irq[0]) { + ret = request_irq(adev->irq[0], pl031_interrupt, + vendor->irqflags, "rtc-pl031", ldata); + if (ret) + goto out_no_irq; + dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); } - dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); return 0; out_no_irq: diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c index 35c9aada07c8..79c8da54e922 100644 --- a/drivers/rtc/rtc-rk808.c +++ b/drivers/rtc/rtc-rk808.c @@ -416,12 +416,11 @@ static int rk808_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - rk808_rtc->rtc = devm_rtc_device_register(&pdev->dev, "rk808-rtc", - &rk808_rtc_ops, THIS_MODULE); - if (IS_ERR(rk808_rtc->rtc)) { - ret = PTR_ERR(rk808_rtc->rtc); - return ret; - } + rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rk808_rtc->rtc)) + return PTR_ERR(rk808_rtc->rtc); + + rk808_rtc->rtc->ops = &rk808_rtc_ops; rk808_rtc->irq = platform_get_irq(pdev, 0); if (rk808_rtc->irq < 0) { @@ -438,9 +437,10 @@ static int rk808_rtc_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", rk808_rtc->irq, ret); + return ret; } - return ret; + return rtc_register_device(rk808_rtc->rtc); } static struct platform_driver rk808_rtc_driver = { diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 026035373ae6..38a12435b5a0 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c @@ -249,16 +249,24 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) platform_set_drvdata(dev, priv); - rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops, - THIS_MODULE); + rtc = devm_rtc_allocate_device(&dev->dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); + + rtc->ops = &rp5c01_rtc_ops; + priv->rtc = rtc; error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); if (error) return error; + error = rtc_register_device(rtc); + if (error) { + sysfs_remove_bin_file(&dev->dev.kobj, &priv->nvram_attr); + return error; + } + return 0; } diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index d8ef9e052c4f..9af591d5223c 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct snvs_rtc_data *data = dev_get_drvdata(dev); unsigned long time; + int ret; rtc_tm_to_time(tm, &time); /* Disable RTC first */ - snvs_rtc_enable(data, false); + ret = snvs_rtc_enable(data, false); + if (ret) + return ret; /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */ regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH); regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH)); /* Enable RTC again */ - snvs_rtc_enable(data, true); + ret = snvs_rtc_enable(data, true); - return 0; + return ret; } static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) @@ -288,7 +291,11 @@ static int snvs_rtc_probe(struct platform_device *pdev) regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff); /* Enable RTC */ - snvs_rtc_enable(data, true); + ret = snvs_rtc_enable(data, true); + if (ret) { + dev_err(&pdev->dev, "failed to enable rtc %d\n", ret); + goto error_rtc_device_register; + } device_init_wakeup(&pdev->dev, true); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 3d2216ccd860..8eb2b6dd36fe 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -74,7 +74,7 @@ #define SUN6I_ALARM_CONFIG_WAKEUP BIT(0) #define SUN6I_LOSC_OUT_GATING 0x0060 -#define SUN6I_LOSC_OUT_GATING_EN BIT(0) +#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0 /* * Get date values @@ -253,7 +253,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node) &clkout_name); rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name, 0, rtc->base + SUN6I_LOSC_OUT_GATING, - SUN6I_LOSC_OUT_GATING_EN, 0, + SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0, &rtc->lock); if (IS_ERR(rtc->ext_losc)) { pr_crit("Couldn't register the LOSC external gate\n"); diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c index a3418a8a3796..97fdc99bfeef 100644 --- a/drivers/rtc/rtc-tps6586x.c +++ b/drivers/rtc/rtc-tps6586x.c @@ -276,14 +276,15 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, rtc); - rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), - &tps6586x_rtc_ops, THIS_MODULE); + rtc->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); - dev_err(&pdev->dev, "RTC device register: ret %d\n", ret); + dev_err(&pdev->dev, "RTC allocate device: ret %d\n", ret); goto fail_rtc_register; } + rtc->rtc->ops = &tps6586x_rtc_ops; + ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, tps6586x_rtc_irq, IRQF_ONESHOT, @@ -294,6 +295,13 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) goto fail_rtc_register; } disable_irq(rtc->irq); + + ret = rtc_register_device(rtc->rtc); + if (ret) { + dev_err(&pdev->dev, "RTC device register: ret %d\n", ret); + goto fail_rtc_register; + } + return 0; fail_rtc_register: diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index d0244d7979fc..a56b526db89a 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -380,6 +380,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev) if (!tps_rtc) return -ENOMEM; + tps_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(tps_rtc->rtc)) + return PTR_ERR(tps_rtc->rtc); + /* Clear pending interrupts */ ret = regmap_read(tps65910->regmap, TPS65910_RTC_STATUS, &rtc_reg); if (ret < 0) @@ -421,10 +425,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev) tps_rtc->irq = irq; device_set_wakeup_capable(&pdev->dev, 1); - tps_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &tps65910_rtc_ops, THIS_MODULE); - if (IS_ERR(tps_rtc->rtc)) { - ret = PTR_ERR(tps_rtc->rtc); + tps_rtc->rtc->ops = &tps65910_rtc_ops; + + ret = rtc_register_device(tps_rtc->rtc); + if (ret) { dev_err(&pdev->dev, "RTC device register: err %d\n", ret); return ret; } diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c index 560d9a5e0225..a9528083061d 100644 --- a/drivers/rtc/rtc-tx4939.c +++ b/drivers/rtc/rtc-tx4939.c @@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm) for (i = 2; i < 6; i++) buf[i] = __raw_readl(&rtcreg->dat); spin_unlock_irq(&pdata->lock); - sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; + sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) | + (buf[3] << 8) | buf[2]; rtc_time_to_tm(sec, tm); return rtc_valid_tm(tm); } @@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0; alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0; spin_unlock_irq(&pdata->lock); - sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; + sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) | + (buf[3] << 8) | buf[2]; rtc_time_to_tm(sec, &alrm->time); return rtc_valid_tm(&alrm->time); } diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index 7ce22967fd16..7ed010714f29 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c @@ -292,13 +292,14 @@ static int rtc_probe(struct platform_device *pdev) goto err_rtc1_iounmap; } - rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops, - THIS_MODULE); + rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc)) { retval = PTR_ERR(rtc); goto err_iounmap_all; } + rtc->ops = &vr41xx_rtc_ops; + rtc->max_user_freq = MAX_PERIODIC_RATE; spin_lock_irq(&rtc_lock); @@ -340,6 +341,10 @@ static int rtc_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n"); + retval = rtc_register_device(rtc); + if (retval) + goto err_iounmap_all; + return 0; err_iounmap_all: diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 29f35e29d480..d072f84a8535 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2596,8 +2596,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) case DASD_CQR_QUEUED: /* request was not started - just set to cleared */ cqr->status = DASD_CQR_CLEARED; - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; break; case DASD_CQR_IN_IO: /* request in IO - terminate IO and release again */ @@ -3051,7 +3049,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, cqr->callback_data = req; cqr->status = DASD_CQR_FILLED; cqr->dq = dq; - req->completion_data = cqr; + *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr; + blk_mq_start_request(req); spin_lock(&block->queue_lock); list_add_tail(&cqr->blocklist, &block->ccw_queue); @@ -3075,12 +3074,13 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, */ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) { - struct dasd_ccw_req *cqr = req->completion_data; struct dasd_block *block = req->q->queuedata; struct dasd_device *device; + struct dasd_ccw_req *cqr; unsigned long flags; int rc = 0; + cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); if (!cqr) return BLK_EH_NOT_HANDLED; @@ -3186,6 +3186,7 @@ static int dasd_alloc_queue(struct dasd_block *block) int rc; block->tag_set.ops = &dasd_mq_ops; + block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; @@ -3917,9 +3918,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) wait_event(dasd_flush_wq, (cqr->status != DASD_CQR_CLEAR_PENDING)); - /* mark sleepon requests as ended */ - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; + /* + * requeue requests to blocklayer will only work + * for block device requests + */ + if (_dasd_requeue_request(cqr)) + continue; /* remove requests from device and block queue */ list_del_init(&cqr->devlist); @@ -3932,13 +3936,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) cqr = refers; } - /* - * requeue requests to blocklayer will only work - * for block device requests - */ - if (_dasd_requeue_request(cqr)) - continue; - if (cqr->block) list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( @@ -3955,8 +3952,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) list_splice_tail(&requeue_queue, &device->ccw_queue); spin_unlock_irq(get_ccwdev_lock(device->cdev)); } - /* wake up generic waitqueue for eventually ended sleepon requests */ - wake_up(&generic_waitq); + dasd_schedule_device_bh(device); return rc; } diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index c94b606e0df8..ee14d8e45c97 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) erp = dasd_3990_erp_handle_match_erp(cqr, erp); } + + /* + * For path verification work we need to stick with the path that was + * originally chosen so that the per path configuration data is + * assigned correctly. + */ + if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) { + erp->lpm = cqr->lpm; + } + if (device->features & DASD_FEATURE_ERPLOG) { /* print current erp_chain */ dev_err(&device->cdev->dev, diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 62f5f04d8f61..5e963fe0e38d 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, int dasd_alias_add_device(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - struct alias_lcu *lcu; + __u8 uaddr = private->uid.real_unit_addr; + struct alias_lcu *lcu = private->lcu; unsigned long flags; int rc; - lcu = private->lcu; rc = 0; spin_lock_irqsave(&lcu->lock, flags); + /* + * Check if device and lcu type differ. If so, the uac data may be + * outdated and needs to be updated. + */ + if (private->uid.type != lcu->uac->unit[uaddr].ua_type) { + lcu->flags |= UPDATE_PENDING; + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "uid type mismatch - trigger rescan"); + } if (!(lcu->flags & UPDATE_PENDING)) { rc = _add_device_to_lcu(lcu, device, device); if (rc) diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 8eafcd5fa004..5ede251c52ca 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -530,10 +530,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, pfxdata->validity.define_extent = 1; /* private uid is kept up to date, conf_data may be outdated */ - if (startpriv->uid.type != UA_BASE_DEVICE) { + if (startpriv->uid.type == UA_BASE_PAV_ALIAS) pfxdata->validity.verify_base = 1; - if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) - pfxdata->validity.hyper_pav = 1; + + if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { + pfxdata->validity.verify_base = 1; + pfxdata->validity.hyper_pav = 1; } rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); @@ -3414,10 +3416,12 @@ static int prepare_itcw(struct itcw *itcw, pfxdata.validity.define_extent = 1; /* private uid is kept up to date, conf_data may be outdated */ - if (startpriv->uid.type != UA_BASE_DEVICE) { + if (startpriv->uid.type == UA_BASE_PAV_ALIAS) + pfxdata.validity.verify_base = 1; + + if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { pfxdata.validity.verify_base = 1; - if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) - pfxdata.validity.hyper_pav = 1; + pfxdata.validity.hyper_pav = 1; } switch (cmd) { diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 05ac6ba15a53..ecc24a46e71a 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH) CFLAGS_sclp_early_core.o += -march=z900 endif +CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE) + obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ sclp_early.o sclp_early_core.o diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 7b0b295b2313..69687c16a150 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) { + struct channel_path *chp; struct chp_link link; struct chp_id chpid; int status; @@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) chpid.id = sei_area->rsid; /* allocate a new channel path structure, if needed */ status = chp_get_status(chpid); - if (status < 0) - chp_new(chpid); - else if (!status) + if (!status) return; + + if (status < 0) { + chp_new(chpid); + } else { + chp = chpid_to_chp(chpid); + mutex_lock(&chp->lock); + chp_update_desc(chp); + mutex_unlock(&chp->lock); + } memset(&link, 0, sizeof(struct chp_link)); link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index f98ea674c3d8..28837ad75712 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -796,6 +796,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_set_timeout(cdev, 0); cdev->private->iretry = 255; + cdev->private->async_kill_io_rc = -ETIMEDOUT; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); @@ -872,7 +873,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) /* OK, i/o is dead now. Call interrupt handler. */ if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + ERR_PTR(cdev->private->async_kill_io_rc)); } static void @@ -889,14 +890,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_online_verify(cdev, 0); if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + ERR_PTR(cdev->private->async_kill_io_rc)); } void ccw_device_kill_io(struct ccw_device *cdev) { int ret; + ccw_device_set_timeout(cdev, 0); cdev->private->iretry = 255; + cdev->private->async_kill_io_rc = -EIO; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index cf8c4ac6323a..b22922ec32d1 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -160,7 +160,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) } /** - * ccw_device_start_key() - start a s390 channel program with key + * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key * @cdev: target ccw device * @cpa: logical start address of channel program * @intparm: user specific interruption parameter; will be presented back to @@ -171,10 +171,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * @key: storage key to be used for the I/O * @flags: additional flags; defines the action to be performed for I/O * processing. + * @expires: timeout value in jiffies * * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * This function notifies the device driver if the channel program has not + * completed during the time specified by @expires. If a timeout occurs, the + * channel program is terminated via xsch, hsch or csch, and the device's + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -183,9 +188,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * Context: * Interrupts disabled, ccw device lock held */ -int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags) +int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags, int expires) { struct subchannel *sch; int ret; @@ -225,6 +230,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, switch (ret) { case 0: cdev->private->intparm = intparm; + if (expires) + ccw_device_set_timeout(cdev, expires); break; case -EACCES: case -ENODEV: @@ -235,7 +242,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, } /** - * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key + * ccw_device_start_key() - start a s390 channel program with key * @cdev: target ccw device * @cpa: logical start address of channel program * @intparm: user specific interruption parameter; will be presented back to @@ -246,15 +253,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * @key: storage key to be used for the I/O * @flags: additional flags; defines the action to be performed for I/O * processing. - * @expires: timeout value in jiffies * * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). - * This function notifies the device driver if the channel program has not - * completed during the time specified by @expires. If a timeout occurs, the - * channel program is terminated via xsch, hsch or csch, and the device's - * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -263,19 +265,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * Context: * Interrupts disabled, ccw device lock held */ -int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags, int expires) +int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags) { - int ret; - - if (!cdev) - return -ENODEV; - ccw_device_set_timeout(cdev, expires); - ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); - if (ret != 0) - ccw_device_set_timeout(cdev, 0); - return ret; + return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, + flags, 0); } /** @@ -490,18 +485,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) EXPORT_SYMBOL(ccw_device_get_id); /** - * ccw_device_tm_start_key() - perform start function + * ccw_device_tm_start_timeout_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler * @lpm: mask of paths to use * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request * * Start the tcw on the given ccw device. Return zero on success, non-zero * otherwise. */ -int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, - unsigned long intparm, u8 lpm, u8 key) +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key, + int expires) { struct subchannel *sch; int rc; @@ -528,37 +525,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, return -EACCES; } rc = cio_tm_start_key(sch, tcw, lpm, key); - if (rc == 0) + if (rc == 0) { cdev->private->intparm = intparm; + if (expires) + ccw_device_set_timeout(cdev, expires); + } return rc; } -EXPORT_SYMBOL(ccw_device_tm_start_key); +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); /** - * ccw_device_tm_start_timeout_key() - perform start function + * ccw_device_tm_start_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler * @lpm: mask of paths to use * @key: storage key to use for storage access - * @expires: time span in jiffies after which to abort request * * Start the tcw on the given ccw device. Return zero on success, non-zero * otherwise. */ -int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, - unsigned long intparm, u8 lpm, u8 key, - int expires) +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key) { - int ret; - - ccw_device_set_timeout(cdev, expires); - ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); - if (ret != 0) - ccw_device_set_timeout(cdev, 0); - return ret; + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0); } -EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); +EXPORT_SYMBOL(ccw_device_tm_start_key); /** * ccw_device_tm_start() - perform start function diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index af571d8d6925..90e4e3a7841b 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -157,6 +157,7 @@ struct ccw_device_private { unsigned long intparm; /* user interruption parameter */ struct qdio_irq *qdio_data; struct irb irb; /* device status */ + int async_kill_io_rc; struct senseid senseid; /* SenseID info */ struct pgid pgid[8]; /* path group IDs per chpid*/ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index a4ad39ba3873..8941e7caaf4d 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, int start, int count, int auto_ack) { - int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; + int rc, tmp_count = count, tmp_start = start, nr = q->nr; unsigned int ccq = 0; qperf_inc(q, eqbs); @@ -149,14 +149,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, qperf_inc(q, eqbs_partial); DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", tmp_count); - /* - * Retry once, if that fails bail out and process the - * extracted buffers before trying again. - */ - if (!retried++) - goto again; - else - return count - tmp_count; + return count - tmp_count; } DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); @@ -212,7 +205,10 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, return 0; } -/* returns number of examined buffers and their common state in *state */ +/* + * Returns number of examined buffers and their common state in *state. + * Requested number of buffers-to-examine must be > 0. + */ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, unsigned char *state, unsigned int count, int auto_ack, int merge_pending) @@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, if (is_qebsm(q)) return qdio_do_eqbs(q, state, bufnr, count, auto_ack); - for (i = 0; i < count; i++) { - if (!__state) { - __state = q->slsb.val[bufnr]; - if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) - __state = SLSB_P_OUTPUT_EMPTY; - } else if (merge_pending) { - if ((q->slsb.val[bufnr] & __state) != __state) - break; - } else if (q->slsb.val[bufnr] != __state) - break; + /* get initial state: */ + __state = q->slsb.val[bufnr]; + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) + __state = SLSB_P_OUTPUT_EMPTY; + + for (i = 1; i < count; i++) { bufnr = next_buf(bufnr); + + /* merge PENDING into EMPTY: */ + if (merge_pending && + q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING && + __state == SLSB_P_OUTPUT_EMPTY) + continue; + + /* stop if next state differs from initial state: */ + if (q->slsb.val[bufnr] != __state) + break; } *state = __state; return i; diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 48b3866a9ded..35286907c636 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) int i; for (i = 0; i < nr_queues; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); + q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); if (!q) return -ENOMEM; @@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) { struct ciw *ciw; struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; - int rc; memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); @@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data) ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); if (!ciw) { DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); - rc = -EINVAL; - goto out_err; + return -EINVAL; } irq_ptr->equeue = *ciw; ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); if (!ciw) { DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); - rc = -EINVAL; - goto out_err; + return -EINVAL; } irq_ptr->aqueue = *ciw; @@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->orig_handler = init_data->cdev->handler; init_data->cdev->handler = qdio_int_handler; return 0; -out_err: - qdio_release_memory(irq_ptr); - return rc; } void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index f20b4d66c75f..72ce6ad95767 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -330,6 +330,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx) { struct ccw1 *ccw = chain->ch_ccw + idx; + if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw)) + return; if (!ccw->count) return; @@ -701,6 +703,10 @@ void cp_free(struct channel_program *cp) * and stores the result to ccwchain list. @cp must have been * initialized by a previous call with cp_init(). Otherwise, undefined * behavior occurs. + * For each chain composing the channel program: + * - On entry ch_len holds the count of CCWs to be translated. + * - On exit ch_len is adjusted to the count of successfully translated CCWs. + * This allows cp_free to find in ch_len the count of CCWs to free in a chain. * * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced * as helpers to do ccw chain translation inside the kernel. Basically @@ -735,11 +741,18 @@ int cp_prefetch(struct channel_program *cp) for (idx = 0; idx < len; idx++) { ret = ccwchain_fetch_one(chain, idx, cp); if (ret) - return ret; + goto out_err; } } return 0; +out_err: + /* Only cleanup the chain elements that were actually translated. */ + chain->ch_len = idx; + list_for_each_entry_continue(chain, &cp->ccwchain_list, next) { + chain->ch_len = 0; + } + return ret; } /** diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index c30420c517b1..3c800642134e 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) int ccode; __u8 lpm; unsigned long flags; + int ret; sch = private->sch; spin_lock_irqsave(sch->lock, flags); private->state = VFIO_CCW_STATE_BUSY; - spin_unlock_irqrestore(sch->lock, flags); orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); @@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) * Initialize device status information */ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; - return 0; + ret = 0; + break; case 1: /* Status pending */ case 2: /* Busy */ - return -EBUSY; + ret = -EBUSY; + break; case 3: /* Device/path not operational */ { lpm = orb->cmd.lpm; @@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private) sch->lpm = 0; if (cio_update_schib(sch)) - return -ENODEV; - - return sch->lpm ? -EACCES : -ENODEV; + ret = -ENODEV; + else + ret = sch->lpm ? -EACCES : -ENODEV; + break; } default: - return ccode; + ret = ccode; } + spin_unlock_irqrestore(sch->lock, flags); + return ret; } static void fsm_notoper(struct vfio_ccw_private *private, @@ -124,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private, if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { orb = (union orb *)io_region->orb_area; + /* Don't try to build a cp if transport mode is specified. */ + if (orb->tm.b) { + io_region->ret_code = -EOPNOTSUPP; + goto err_out; + } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); if (io_region->ret_code) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index b5f4006198b9..a9a56aa9c26b 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -218,8 +218,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, weight += atomic_read(&zq->load); pref_weight += atomic_read(&pref_zq->load); if (weight == pref_weight) - return &zq->queue->total_request_count > - &pref_zq->queue->total_request_count; + return zq->queue->total_request_count > + pref_zq->queue->total_request_count; return weight > pref_weight; } diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 47a13c5723c6..6b1e83539a9d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -564,9 +564,9 @@ enum qeth_cq { }; struct qeth_ipato { - int enabled; - int invert4; - int invert6; + bool enabled; + bool invert4; + bool invert6; struct list_head entries; }; @@ -580,6 +580,11 @@ struct qeth_cmd_buffer { void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); }; +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + /** * definition of a qeth channel, used for read and write */ @@ -834,7 +839,7 @@ struct qeth_trap_id { */ static inline int qeth_get_elements_for_range(addr_t start, addr_t end) { - return PFN_UP(end - 1) - PFN_DOWN(start); + return PFN_UP(end) - PFN_DOWN(start); } static inline int qeth_get_micros(void) @@ -985,6 +990,9 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, int qeth_set_features(struct net_device *, netdev_features_t); int qeth_recover_features(struct net_device *); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); int qeth_vm_request_mac(struct qeth_card *card); int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bae7440abc01..939b5b5e97ef 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -19,6 +19,11 @@ #include #include #include +#include +#include +#include +#include + #include #include @@ -521,8 +526,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) queue == card->qdio.no_in_queues - 1; } - -static int qeth_issue_next_read(struct qeth_card *card) +static int __qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -553,6 +557,17 @@ static int qeth_issue_next_read(struct qeth_card *card) return rc; } +static int qeth_issue_next_read(struct qeth_card *card) +{ + int ret; + + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + ret = __qeth_issue_next_read(card); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + return ret; +} + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; @@ -956,7 +971,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); - wake_up(&card->wait_q); + wake_up_all(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); @@ -1160,6 +1175,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } rc = qeth_get_problem(cdev, irb); if (rc) { + card->read_or_write_problem = 1; qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1178,7 +1194,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, return; if (channel == &card->read && channel->state == CH_STATE_UP) - qeth_issue_next_read(card); + __qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; @@ -1474,9 +1490,9 @@ static int qeth_setup_card(struct qeth_card *card) qeth_set_intial_options(card); /* IP address takeover */ INIT_LIST_HEAD(&card->ipato.entries); - card->ipato.enabled = 0; - card->ipato.invert4 = 0; - card->ipato.invert6 = 0; + card->ipato.enabled = false; + card->ipato.invert4 = false; + card->ipato.invert6 = false; /* init QDIO stuff */ qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); @@ -2068,7 +2084,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); @@ -2082,23 +2098,27 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply->callback = reply_cb; reply->param = reply_param; - if (card->state == CARD_STATE_DOWN) - reply->seqno = QETH_IDX_COMMAND_SEQNO; - else - reply->seqno = card->seqno.ipa++; + init_waitqueue_head(&reply->wait_q); - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; - qeth_prepare_control_data(card, len, iob); - if (IS_IPA(iob->data)) + if (IS_IPA(iob->data)) { + cmd = __ipa_cmd(iob); + cmd->hdr.seqno = card->seqno.ipa++; + reply->seqno = cmd->hdr.seqno; event_timeout = QETH_IPA_TIMEOUT; - else + } else { + reply->seqno = QETH_IDX_COMMAND_SEQNO; event_timeout = QETH_TIMEOUT; + } + qeth_prepare_control_data(card, len, iob); + + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); @@ -2123,9 +2143,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - if ((cmd->hdr.command == IPA_CMD_SETIP) && - (cmd->hdr.prot_version == QETH_PROT_IPV4)) { + if (cmd && cmd->hdr.command == IPA_CMD_SETIP && + cmd->hdr.prot_version == QETH_PROT_IPV4) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; @@ -2889,7 +2908,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; - cmd->hdr.seqno = card->seqno.ipa; + /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) @@ -3854,10 +3873,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset) { - int elements = qeth_get_elements_for_range( - (addr_t)skb->data + data_offset, - (addr_t)skb->data + skb_headlen(skb)) + - qeth_get_elements_for_frags(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + addr_t start = (addr_t)skb->data + data_offset; + + if (start != end) + elements += qeth_get_elements_for_range(start, end); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, "Invalid size of IP packet " @@ -5051,8 +5072,6 @@ static void qeth_core_free_card(struct qeth_card *card) QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); - if (card->dev) - free_netdev(card->dev); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); kfree(card); @@ -5440,6 +5459,13 @@ int qeth_poll(struct napi_struct *napi, int budget) } EXPORT_SYMBOL_GPL(qeth_poll); +static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + return cmd->hdr.return_code; +} + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -6299,7 +6325,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, (struct qeth_checksum_cmd *)reply->param; QETH_CARD_TEXT(card, 4, "chkdoccb"); - if (cmd->hdr.return_code) + if (qeth_setassparms_inspect_rc(cmd)) return 0; memset(chksum_cb, 0, sizeof(*chksum_cb)); @@ -6505,6 +6531,32 @@ netdev_features_t qeth_fix_features(struct net_device *dev, } EXPORT_SYMBOL_GPL(qeth_fix_features); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* GSO segmentation builds skbs with + * a (small) linear part for the headers, and + * page frags for the data. + * Compared to a linear skb, the header-only part consumes an + * additional buffer element. This reduces buffer utilization, and + * hurts throughput. So compress small segments into one element. + */ + if (netif_needs_gso(skb, features)) { + /* match skb_segment(): */ + unsigned int doffset = skb->data - skb_mac_header(skb); + unsigned int hsize = skb_shinfo(skb)->gso_size; + unsigned int hroom = skb_headroom(skb); + + /* linearize only if resulting skb allocations are order-0: */ + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) + features &= ~NETIF_F_SG; + } + + return vlan_features_check(skb, features); +} +EXPORT_SYMBOL_GPL(qeth_features_check); + static int __init qeth_core_init(void) { int rc; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 760b023eae95..521293b1f4fa 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -935,8 +935,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) qeth_l2_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } return; @@ -963,6 +963,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_stop = qeth_l2_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_features_check = qeth_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -1009,6 +1010,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { card->dev->hw_features = NETIF_F_SG; card->dev->vlan_features = NETIF_F_SG; + card->dev->features |= NETIF_F_SG; /* OSA 3S and earlier has no RX/TX support */ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { card->dev->hw_features |= NETIF_F_IP_CSUM; @@ -1027,8 +1029,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 194ae9b577cc..8727b9517de8 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -40,8 +40,40 @@ struct qeth_ipaddr { unsigned int pfxlen; } a6; } u; - }; + +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + if (a1->proto != a2->proto) + return false; + if (a1->proto == QETH_PROT_IPV6) + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); + return a1->u.a4.addr == a2->u.a4.addr; +} + +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), + * so 'proto' and 'addr' match for sure. + * + * For ucast: + * - 'mac' is always 0. + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching + * values are required to avoid mixups in takeover eligibility. + * + * For mcast, + * - 'mac' is mapped from the IP, and thus always matches. + * - 'mask'/'pfxlen' is always 0. + */ + if (a1->type != a2->type) + return false; + if (a1->proto == QETH_PROT_IPV6) + return a1->u.a6.pfxlen == a2->u.a6.pfxlen; + return a1->u.a4.mask == a2->u.a4.mask; +} + static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) { u64 ret = 0; @@ -82,7 +114,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, const u8 *); -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); +void qeth_l3_update_ipato(struct qeth_card *card); struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ab661a431f7c..1c62cbbaa66f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -149,6 +149,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto, return -EINVAL; } +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, + struct qeth_ipaddr *query) +{ + u64 key = qeth_l3_ipaddr_hash(query); + struct qeth_ipaddr *addr; + + if (query->is_multicast) { + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } else { + hash_for_each_possible(card->ip_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } + return NULL; +} + static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) { int i, j; @@ -163,8 +181,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) } } -int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, - struct qeth_ipaddr *addr) +static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, + struct qeth_ipaddr *addr) { struct qeth_ipato_entry *ipatoe; u8 addr_bits[128] = {0, }; @@ -173,6 +191,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, if (!card->ipato.enabled) return 0; + if (addr->type != QETH_IP_TYPE_NORMAL) + return 0; qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, (addr->proto == QETH_PROT_IPV4)? 4:16); @@ -200,34 +220,6 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, return rc; } -inline int -qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) -{ - return addr1->proto == addr2->proto && - !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && - !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac)); -} - -static struct qeth_ipaddr * -qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) -{ - struct qeth_ipaddr *addr; - - if (tmp_addr->is_multicast) { - hash_for_each_possible(card->ip_mc_htable, addr, - hnode, qeth_l3_ipaddr_hash(tmp_addr)) - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) - return addr; - } else { - hash_for_each_possible(card->ip_htable, addr, - hnode, qeth_l3_ipaddr_hash(tmp_addr)) - if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) - return addr; - } - - return NULL; -} - int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { int rc = 0; @@ -242,23 +234,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); } - addr = qeth_l3_ip_from_hash(card, tmp_addr); - if (!addr) + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) return -ENOENT; addr->ref_counter--; - if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || - addr->type == QETH_IP_TYPE_RXIP)) + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) return rc; if (addr->in_progress) return -EINPROGRESS; - if (!qeth_card_hw_is_reachable(card)) { - addr->disp_flag = QETH_DISP_ADDR_DELETE; - return 0; - } - - rc = qeth_l3_deregister_addr_entry(card, addr); + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l3_deregister_addr_entry(card, addr); hash_del(&addr->hnode); kfree(addr); @@ -270,6 +257,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) { int rc = 0; struct qeth_ipaddr *addr; + char buf[40]; QETH_CARD_TEXT(card, 4, "addip"); @@ -280,8 +268,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); } - addr = qeth_l3_ip_from_hash(card, tmp_addr); - if (!addr) { + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (addr) { + if (tmp_addr->type != QETH_IP_TYPE_NORMAL) + return -EADDRINUSE; + if (qeth_l3_addr_match_all(addr, tmp_addr)) { + addr->ref_counter++; + return 0; + } + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, + buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + return -EADDRINUSE; + } else { addr = qeth_l3_get_addr_buffer(tmp_addr->proto); if (!addr) return -ENOMEM; @@ -289,8 +289,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); addr->ref_counter = 1; - if (addr->type == QETH_IP_TYPE_NORMAL && - qeth_l3_is_addr_covered_by_ipato(card, addr)) { + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { QETH_CARD_TEXT(card, 2, "tkovaddr"); addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; } @@ -322,19 +321,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) (rc == IPA_RC_LAN_OFFLINE)) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; if (addr->ref_counter < 1) { - qeth_l3_delete_ip(card, addr); + qeth_l3_deregister_addr_entry(card, addr); + hash_del(&addr->hnode); kfree(addr); } } else { hash_del(&addr->hnode); kfree(addr); } - } else { - if (addr->type == QETH_IP_TYPE_NORMAL || - addr->type == QETH_IP_TYPE_RXIP) - addr->ref_counter++; } - return rc; } @@ -402,11 +397,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) spin_lock_bh(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { - if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { - qeth_l3_deregister_addr_entry(card, addr); - hash_del(&addr->hnode); - kfree(addr); - } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; spin_unlock_bh(&card->ip_lock); @@ -604,6 +595,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) /* * IP address takeover related functions */ + +/** + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. + * + * Caller must hold ip_lock. + */ +void qeth_l3_update_ipato(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + unsigned int i; + + hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->type != QETH_IP_TYPE_NORMAL) + continue; + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + else + addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; + } +} + static void qeth_l3_clear_ipato_list(struct qeth_card *card) { struct qeth_ipato_entry *ipatoe, *tmp; @@ -615,6 +627,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) kfree(ipatoe); } + qeth_l3_update_ipato(card); spin_unlock_bh(&card->ip_lock); } @@ -639,8 +652,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, } } - if (!rc) + if (!rc) { list_add_tail(&new->entry, &card->ipato.entries); + qeth_l3_update_ipato(card); + } spin_unlock_bh(&card->ip_lock); @@ -663,6 +678,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, (proto == QETH_PROT_IPV4)? 4:16) && (ipatoe->mask_bits == mask_bits)) { list_del(&ipatoe->entry); + qeth_l3_update_ipato(card); kfree(ipatoe); } } @@ -697,12 +713,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, return -ENOMEM; spin_lock_bh(&card->ip_lock); - - if (qeth_l3_ip_from_hash(card, ipaddr)) - rc = -EEXIST; - else - qeth_l3_add_ip(card, ipaddr); - + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); @@ -765,12 +776,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, return -ENOMEM; spin_lock_bh(&card->ip_lock); - - if (qeth_l3_ip_from_hash(card, ipaddr)) - rc = -EEXIST; - else - qeth_l3_add_ip(card, ipaddr); - + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); @@ -1376,9 +1382,11 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); memcpy(tmp->mac, buf, sizeof(tmp->mac)); + tmp->is_multicast = 1; - ipm = qeth_l3_ip_from_hash(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, tmp); if (ipm) { + /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; } else { ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); @@ -1461,8 +1469,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev) sizeof(struct in6_addr)); tmp->is_multicast = 1; - ipm = qeth_l3_ip_from_hash(card, tmp); + ipm = qeth_l3_find_addr_by_ip(card, tmp); if (ipm) { + /* for mcast, by-IP match means full match */ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; continue; } @@ -1553,7 +1562,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (!addr) - return; + goto out; spin_lock_bh(&card->ip_lock); @@ -1567,6 +1576,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, spin_unlock_bh(&card->ip_lock); kfree(addr); +out: in_dev_put(in_dev); } @@ -1591,7 +1601,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (!addr) - return; + goto out; spin_lock_bh(&card->ip_lock); @@ -1606,6 +1616,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, spin_unlock_bh(&card->ip_lock); kfree(addr); +out: in6_dev_put(in6_dev); #endif /* CONFIG_QETH_IPV6 */ } @@ -2604,11 +2615,12 @@ static void qeth_tso_fill_header(struct qeth_card *card, static int qeth_l3_get_elements_no_tso(struct qeth_card *card, struct sk_buff *skb, int extra_elems) { - addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); - int elements = qeth_get_elements_for_range( - tcpdptr, - (addr_t)skb->data + skb_headlen(skb)) + - qeth_get_elements_for_frags(skb); + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + + if (start != end) + elements += qeth_get_elements_for_range(start, end); if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { QETH_DBF_MESSAGE(2, @@ -2920,6 +2932,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_stop = qeth_l3_stop, .ndo_get_stats = qeth_get_stats, .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_features_check = qeth_features_check, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, .ndo_do_ioctl = qeth_do_ioctl, @@ -2960,6 +2973,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->vlan_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; + card->dev->features |= NETIF_F_SG; } } } else if (card->info.type == QETH_CARD_TYPE_IQD) { @@ -2987,8 +3001,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netif_keep_dst(card->dev); - card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * - PAGE_SIZE; + netif_set_gso_max_size(card->dev, (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * + PAGE_SIZE); SET_NETDEV_DEV(card->dev, &card->gdev->dev); netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); @@ -3032,8 +3046,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_l3_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 7a829ad77783..1295dd8ec849 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - struct qeth_ipaddr *addr; - int i, rc = 0; + bool enable; + int rc = 0; if (!card) return -EINVAL; @@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, } if (sysfs_streq(buf, "toggle")) { - card->ipato.enabled = (card->ipato.enabled)? 0 : 1; - } else if (sysfs_streq(buf, "1")) { - card->ipato.enabled = 1; - hash_for_each(card->ip_htable, i, addr, hnode) { - if ((addr->type == QETH_IP_TYPE_NORMAL) && - qeth_l3_is_addr_covered_by_ipato(card, addr)) - addr->set_flags |= - QETH_IPA_SETIP_TAKEOVER_FLAG; - } - } else if (sysfs_streq(buf, "0")) { - card->ipato.enabled = 0; - hash_for_each(card->ip_htable, i, addr, hnode) { - if (addr->set_flags & - QETH_IPA_SETIP_TAKEOVER_FLAG) - addr->set_flags &= - ~QETH_IPA_SETIP_TAKEOVER_FLAG; - } - } else + enable = !card->ipato.enabled; + } else if (kstrtobool(buf, &enable)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.enabled != enable) { + card->ipato.enabled = enable; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + bool invert; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); - if (sysfs_streq(buf, "toggle")) - card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; - else if (sysfs_streq(buf, "1")) - card->ipato.invert4 = 1; - else if (sysfs_streq(buf, "0")) - card->ipato.invert4 = 0; - else + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert4; + } else if (kstrtobool(buf, &invert)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.invert4 != invert) { + card->ipato.invert4 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + bool invert; int rc = 0; if (!card) return -EINVAL; mutex_lock(&card->conf_mutex); - if (sysfs_streq(buf, "toggle")) - card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; - else if (sysfs_streq(buf, "1")) - card->ipato.invert6 = 1; - else if (sysfs_streq(buf, "0")) - card->ipato.invert6 = 0; - else + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert6; + } else if (kstrtobool(buf, &invert)) { rc = -EINVAL; + goto out; + } + + if (card->ipato.invert6 != invert) { + card->ipato.invert6 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; } diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index a851d34c642b..04674ce961f1 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -189,7 +189,7 @@ static struct device_driver smsg_driver = { static void __exit smsg_exit(void) { - cpcmd("SET SMSG IUCV", NULL, 0, NULL); + cpcmd("SET SMSG OFF", NULL, 0, NULL); device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index a8b831000b2d..599447032e50 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -4,7 +4,7 @@ * * Debug traces for zfcp. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #define KMSG_COMPONENT "zfcp" @@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, struct list_head *entry; unsigned long flags; + lockdep_assert_held(&adapter->erp_lock); + if (unlikely(!debug_level_enabled(dbf->rec, level))) return; @@ -308,6 +310,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, spin_unlock_irqrestore(&dbf->rec_lock, flags); } +/** + * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock + * @tag: identifier for event + * @adapter: adapter on which the erp_action should run + * @port: remote port involved in the erp_action + * @sdev: scsi device involved in the erp_action + * @want: wanted erp_action + * @need: required erp_action + * + * The adapter->erp_lock must not be held. + */ +void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter, + struct zfcp_port *port, struct scsi_device *sdev, + u8 want, u8 need) +{ + unsigned long flags; + + read_lock_irqsave(&adapter->erp_lock, flags); + zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need); + read_unlock_irqrestore(&adapter->erp_lock, flags); +} /** * zfcp_dbf_rec_run_lvl - trace event related to running recovery @@ -643,6 +666,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, spin_unlock_irqrestore(&dbf->scsi_lock, flags); } +/** + * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks. + * @tag: Identifier for event. + * @adapter: Pointer to zfcp adapter as context for this event. + * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF). + * @ret: Return value of calling function. + * + * This SCSI trace variant does not depend on any of: + * scsi_cmnd, zfcp_fsf_req, scsi_device. + */ +void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter, + unsigned int scsi_id, int ret) +{ + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; + unsigned long flags; + static int const level = 1; + + if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level))) + return; + + spin_lock_irqsave(&dbf->scsi_lock, flags); + memset(rec, 0, sizeof(*rec)); + + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_SCSI_CMND; + rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */ + rec->scsi_retries = ~0; + rec->scsi_allowed = ~0; + rec->fcp_rsp_info = ~0; + rec->scsi_id = scsi_id; + rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN; + rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32); + rec->host_scribble = ~0; + memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE); + + debug_event(dbf->scsi, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); +} + static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) { struct debug_info *d; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index cbb8156bf5e0..7aa243a6cdbf 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -35,11 +35,28 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_LUN_OPENING = 0x2000, }; +/** + * enum zfcp_erp_act_type - Type of ERP action object. + * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery. + * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery. + * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery. + * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery. + * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with + * either of the first four enum values. + * Used to indicate that an ERP action could not be + * set up despite a detected need for some recovery. + * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with + * either of the first four enum values. + * Used to indicate that ERP not needed because + * the object has ZFCP_STATUS_COMMON_ERP_FAILED. + */ enum zfcp_erp_act_type { ZFCP_ERP_ACTION_REOPEN_LUN = 1, ZFCP_ERP_ACTION_REOPEN_PORT = 2, ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, + ZFCP_ERP_ACTION_NONE = 0xc0, + ZFCP_ERP_ACTION_FAILED = 0xe0, }; enum zfcp_erp_act_state { @@ -126,6 +143,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) } } +static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev) +{ + int need = want; + struct zfcp_scsi_dev *zsdev; + + switch (want) { + case ZFCP_ERP_ACTION_REOPEN_LUN: + zsdev = sdev_to_zfcp(sdev); + if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + need = 0; + break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + need = 0; + break; + case ZFCP_ERP_ACTION_REOPEN_PORT: + if (atomic_read(&port->status) & + ZFCP_STATUS_COMMON_ERP_FAILED) { + need = 0; + /* ensure propagation of failed status to new devices */ + zfcp_erp_set_port_status( + port, ZFCP_STATUS_COMMON_ERP_FAILED); + } + break; + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_ERP_FAILED) { + need = 0; + /* ensure propagation of failed status to new devices */ + zfcp_erp_set_adapter_status( + adapter, ZFCP_STATUS_COMMON_ERP_FAILED); + } + break; + default: + need = 0; + break; + } + + return need; +} + static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) @@ -249,16 +309,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, int retval = 1, need; struct zfcp_erp_action *act; - if (!adapter->erp_thread) - return -EIO; + need = zfcp_erp_handle_failed(want, adapter, port, sdev); + if (!need) { + need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */ + goto out; + } + + if (!adapter->erp_thread) { + need = ZFCP_ERP_ACTION_NONE; /* marker for trace */ + retval = -EIO; + goto out; + } need = zfcp_erp_required_act(want, adapter, port, sdev); if (!need) goto out; act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); - if (!act) + if (!act) { + need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */ goto out; + } atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); @@ -269,18 +340,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, return retval; } +void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter, + u64 port_name, u32 port_id) +{ + unsigned long flags; + static /* don't waste stack */ struct zfcp_port tmpport; + + write_lock_irqsave(&adapter->erp_lock, flags); + /* Stand-in zfcp port with fields just good enough for + * zfcp_dbf_rec_trig() and zfcp_dbf_set_common(). + * Under lock because tmpport is static. + */ + atomic_set(&tmpport.status, -1); /* unknown */ + tmpport.wwpn = port_name; + tmpport.d_id = port_id; + zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL, + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, + ZFCP_ERP_ACTION_NONE); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask, char *id) { zfcp_erp_adapter_block(adapter, clear_mask); zfcp_scsi_schedule_rports_block(adapter); - /* ensure propagation of failed status to new devices */ - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_set_adapter_status(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED); - return -EIO; - } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, NULL, NULL, id, 0); } @@ -299,12 +384,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id) zfcp_scsi_schedule_rports_block(adapter); write_lock_irqsave(&adapter->erp_lock, flags); - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - zfcp_erp_set_adapter_status(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED); - else - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, - NULL, NULL, id, 0); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, + NULL, NULL, id, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } @@ -345,9 +426,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, port->adapter, port, NULL, id, 0); } @@ -373,12 +451,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - /* ensure propagation of failed status to new devices */ - zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); - return -EIO; - } - return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, port->adapter, port, NULL, id, 0); } @@ -418,9 +490,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, zfcp_erp_lun_block(sdev, clear); - if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, zfcp_sdev->port, sdev, id, act_status); } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 8ca2ab7deaa9..c1092a11e728 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -4,7 +4,7 @@ * * External function declarations. * - * Copyright IBM Corp. 2002, 2016 + * Copyright IBM Corp. 2002, 2018 */ #ifndef ZFCP_EXT_H @@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, struct zfcp_port *, struct scsi_device *, u8, u8); +extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev, u8 want, u8 need); extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); extern void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp); @@ -49,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, struct zfcp_fsf_req *); +extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter, + unsigned int scsi_id, int ret); /* zfcp_erp.c */ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); +extern void zfcp_erp_port_forced_no_port_dbf(char *id, + struct zfcp_adapter *adapter, + u64 port_name, u32 port_id); extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 4d2ba5682493..0b6f51424745 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -4,7 +4,7 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #define KMSG_COMPONENT "zfcp" @@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (abrt_req) break; + zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) { @@ -277,6 +278,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (fsf_req) break; + zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) { @@ -323,15 +325,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; - int ret; + int ret = SUCCESS, fc_ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); - ret = fc_block_scsi_eh(scpnt); - if (ret) - return ret; + fc_ret = fc_block_scsi_eh(scpnt); + if (fc_ret) + ret = fc_ret; - return SUCCESS; + zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret); + return ret; } struct scsi_transport_template *zfcp_scsi_transport_template; @@ -602,6 +605,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) if (port) { zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); put_device(&port->dev); + } else { + zfcp_erp_port_forced_no_port_dbf( + "sctrpin", adapter, + rport->port_name /* zfcp_scsi_rport_register */, + rport->port_id /* zfcp_scsi_rport_register */); } } @@ -618,9 +626,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) ids.port_id = port->d_id; ids.roles = FC_RPORT_ROLE_FCP_TARGET; - zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, - ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, - ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); + zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); if (!rport) { dev_err(&port->adapter->ccw_device->dev, @@ -642,9 +650,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) struct fc_rport *rport = port->rport; if (rport) { - zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, - ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, - ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); + zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); fc_remote_port_delete(rport); port->rport = NULL; } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 00e7968a1d70..a1388842e17e 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -886,6 +886,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file) unsigned int minor_number; int retval = TW_IOCTL_ERROR_OS_ENODEV; + if (!capable(CAP_SYS_ADMIN)) { + retval = -EACCES; + goto out; + } + minor_number = iminor(inode); if (minor_number >= twa_device_extension_count) goto out; diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 33261b690774..f6179e3d6953 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + minor_number = iminor(inode); if (minor_number >= tw_device_extension_count) return -ENODEV; diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index af3e4d3f9735..7173ae53c526 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -913,8 +913,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) memset(str, ' ', sizeof(*str)); if (sup_adap_info->adapter_type_text[0]) { - char *cp = sup_adap_info->adapter_type_text; int c; + char *cp; + char *cname = kmemdup(sup_adap_info->adapter_type_text, + sizeof(sup_adap_info->adapter_type_text), + GFP_ATOMIC); + if (!cname) + return; + + cp = cname; if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) inqstrcpy("SMC", str->vid); else { @@ -923,7 +930,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) ++cp; c = *cp; *cp = '\0'; - inqstrcpy(sup_adap_info->adapter_type_text, str->vid); + inqstrcpy(cname, str->vid); *cp = c; while (*cp && *cp != ' ') ++cp; @@ -937,8 +944,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex) cp[sizeof(str->pid)] = '\0'; } inqstrcpy (cp, str->pid); - if (c) - cp[sizeof(str->pid)] = c; + + kfree(cname); } else { struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 403a639574e5..b0b290f7b8dc 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1724,6 +1724,7 @@ struct aac_dev #define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010) #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020) #define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040) +#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080) /* * Define the command values diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index dfe8e70f8d99..998788a967be 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -752,6 +752,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, int wait; unsigned long flags = 0; unsigned long mflags = 0; + struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *) + fibptr->hw_fib_va; fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); if (callback) { @@ -762,11 +764,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, wait = 1; - if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { - struct aac_hba_cmd_req *hbacmd = - (struct aac_hba_cmd_req *)fibptr->hw_fib_va; + hbacmd->iu_type = command; - hbacmd->iu_type = command; + if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { /* bit1 of request_id must be 0 */ hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); @@ -1530,9 +1530,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) host = aac->scsi_host_ptr; scsi_block_requests(host); aac_adapter_disable_int(aac); - if (aac->thread->pid != current->pid) { + if (aac->thread && aac->thread->pid != current->pid) { spin_unlock_irq(host->host_lock); kthread_stop(aac->thread); + aac->thread = NULL; jafo = 1; } @@ -1583,6 +1584,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) * will ensure that i/o is queisced and the card is flushed in that * case. */ + aac_free_irq(aac); aac_fib_map_free(aac); dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, aac->comm_phys); @@ -1590,7 +1592,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) aac->comm_phys = 0; kfree(aac->queues); aac->queues = NULL; - aac_free_irq(aac); kfree(aac->fsa_dev); aac->fsa_dev = NULL; @@ -1619,6 +1620,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) aac->name); if (IS_ERR(aac->thread)) { retval = PTR_ERR(aac->thread); + aac->thread = NULL; goto out; } } @@ -1672,14 +1674,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) out: aac->in_reset = 0; scsi_unblock_requests(host); - /* - * Issue bus rescan to catch any configuration that might have - * occurred - */ - if (!retval) { - dev_info(&aac->pdev->dev, "Issuing bus rescan\n"); - scsi_scan_host(host); - } + if (jafo) { spin_lock_irq(host->host_lock); } @@ -2383,19 +2378,19 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, goto out; } -int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) +int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now) { struct tm cur_tm; char wellness_str[] = "TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ"; u32 datasize = sizeof(wellness_str); - unsigned long local_time; + time64_t local_time; int ret = -ENODEV; if (!dev->sa_firmware) goto out; - local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60)); - time_to_tm(local_time, 0, &cur_tm); + local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60)); + time64_to_tm(local_time, 0, &cur_tm); cur_tm.tm_mon += 1; cur_tm.tm_year += 1900; wellness_str[8] = bin2bcd(cur_tm.tm_hour); @@ -2412,7 +2407,7 @@ int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now) return ret; } -int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) +int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now) { int ret = -ENOMEM; struct fib *fibptr; @@ -2424,7 +2419,7 @@ int aac_send_hosttime(struct aac_dev *dev, struct timeval *now) aac_fib_init(fibptr); info = (__le32 *)fib_data(fibptr); - *info = cpu_to_le32(now->tv_sec); + *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */ ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal, 1, 1, NULL, NULL); @@ -2496,7 +2491,7 @@ int aac_command_thread(void *data) } if (!time_before(next_check_jiffies,next_jiffies) && ((difference = next_jiffies - jiffies) <= 0)) { - struct timeval now; + struct timespec64 now; int ret; /* Don't even try to talk to adapter if its sick */ @@ -2506,15 +2501,15 @@ int aac_command_thread(void *data) next_check_jiffies = jiffies + ((long)(unsigned)check_interval) * HZ; - do_gettimeofday(&now); + ktime_get_real_ts64(&now); /* Synchronize our watches */ - if (((1000000 - (1000000 / HZ)) > now.tv_usec) - && (now.tv_usec > (1000000 / HZ))) - difference = (((1000000 - now.tv_usec) * HZ) - + 500000) / 1000000; + if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) + && (now.tv_nsec > (NSEC_PER_SEC / HZ))) + difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) + + NSEC_PER_SEC / 2) / NSEC_PER_SEC; else { - if (now.tv_usec > 500000) + if (now.tv_nsec > NSEC_PER_SEC / 2) ++now.tv_sec; if (dev->sa_firmware) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index c9252b138c1f..4917649cacd5 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) info = &aac->hba_map[bus][cid]; if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || info->devtype != AAC_DEVTYPE_NATIVE_RAW) { - fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT; + fib->flags |= FIB_CONTEXT_FLAG_EH_RESET; cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER; } } @@ -1565,6 +1565,7 @@ static void __aac_shutdown(struct aac_dev * aac) up(&fib->event_wait); } kthread_stop(aac->thread); + aac->thread = NULL; } aac_send_shutdown(aac); @@ -1690,8 +1691,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) * Map in the registers from the adapter. */ aac->base_size = AAC_MIN_FOOTPRINT_SIZE; - if ((*aac_drivers[index].init)(aac)) + if ((*aac_drivers[index].init)(aac)) { + error = -ENODEV; goto out_unmap; + } if (aac->sync_mode) { if (aac_sync_mode) diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 24388795ee9a..936e8c735656 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, * have valid data in the sense buffer that could * confuse the higher levels. */ - memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); + memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id); //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); } /* diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index b2e8c0dfc79c..1aa46d0763a0 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job) struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; - struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct bfad_im_port_s *im_port = bfad_get_im_port(shost); struct bfad_s *bfad = im_port->bfad; struct request_queue *request_q = job->req->q; void *payload_kbuf; @@ -3357,7 +3358,8 @@ int bfad_im_bsg_els_ct_request(struct bsg_job *job) { struct bfa_bsg_data *bsg_data; - struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); + struct Scsi_Host *shost = fc_bsg_to_shost(job); + struct bfad_im_port_s *im_port = bfad_get_im_port(shost); struct bfad_s *bfad = im_port->bfad; bfa_bsg_fcpt_t *bsg_fcpt; struct bfad_fcxp *drv_fcxp; diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 8dcd8c70c7ee..05f523971348 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, struct bfad_s *bfad = port->bfad; struct bfa_s *bfa = &bfad->bfa; struct bfa_ioc_s *ioc = &bfa->ioc; - int addr, len, rc, i; + int addr, rc, i; + u32 len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; @@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); - if (rc < 2) { + if (rc < 2 || len > (UINT_MAX >> 2)) { printk(KERN_INFO "bfad[%d]: %s failed to read user buf\n", bfad->inst_no, __func__); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 24e657a4ec80..c05d6e91e4bd 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -546,6 +546,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev) { + struct bfad_im_port_pointer *im_portp; int error = 1; mutex_lock(&bfad_mutex); @@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, goto out_free_idr; } - im_port->shost->hostdata[0] = (unsigned long)im_port; + im_portp = shost_priv(im_port->shost); + im_portp->p = im_port; im_port->shost->unique_id = im_port->idr_id; im_port->shost->this_id = -1; im_port->shost->max_id = MAX_FCP_TARGET; @@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) sht->sg_tablesize = bfad->cfg_data.io_max_sge; - return scsi_host_alloc(sht, sizeof(unsigned long)); + return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); } void diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index c81ec2a77ef5..06ce4ba2b7bc 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -69,6 +69,16 @@ struct bfad_im_port_s { struct fc_vport *fc_vport; }; +struct bfad_im_port_pointer { + struct bfad_im_port_s *p; +}; + +static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host) +{ + struct bfad_im_port_pointer *im_portp = shost_priv(host); + return im_portp->p; +} + enum bfad_itnim_state { ITNIM_STATE_NONE, ITNIM_STATE_ONLINE, diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 5b6153f23f01..6626b28ba8fe 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1865,6 +1865,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, /* we will not receive ABTS response for this IO */ BNX2FC_IO_DBG(io_req, "Timer context finished processing " "this scsi cmd\n"); + return; } /* Cancel the timeout_work, as we received IO completion */ diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1d02cf9fe06c..30d5f0ef29bb 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1575,6 +1575,7 @@ static void release_offload_resources(struct cxgbi_sock *csk) csk, csk->state, csk->flags, csk->tid); cxgbi_sock_free_cpl_skbs(csk); + cxgbi_sock_purge_write_queue(csk); if (csk->wr_cred != csk->wr_max_cred) { cxgbi_sock_purge_wr_queue(csk); cxgbi_sock_reset_wr_list(csk); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 76b8b7eed0c0..737314cac8d8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) cmd->parent = afu; cmd->hwq_index = hwq_index; + cmd->sa.ioasc = 0; cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); @@ -945,9 +946,9 @@ static void cxlflash_remove(struct pci_dev *pdev) return; } - /* If a Task Management Function is active, wait for it to complete - * before continuing with remove. - */ + /* Yield to running recovery threads before continuing with remove */ + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && + cfg->state != STATE_PROBING); spin_lock_irqsave(&cfg->tmf_slock, lock_flags); if (cfg->tmf_active) wait_event_interruptible_lock_irq(cfg->tmf_waitq, @@ -1302,7 +1303,10 @@ static void afu_err_intr_init(struct afu *afu) for (i = 0; i < afu->num_hwqs; i++) { hwq = get_hwq(afu, i); - writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); + reg = readq_be(&hwq->host_map->ctx_ctrl); + WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); + reg |= SISL_MSI_SYNC_ERROR; + writeq_be(reg, &hwq->host_map->ctx_ctrl); writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); } } diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index 09daa86670fc..0892fb1f0a1e 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -284,6 +284,7 @@ struct sisl_host_map { __be64 cmd_room; __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ #define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ +#define SISL_CTX_CTRL_LISN_MASK (0xFFULL) __be64 mbox_w; /* restricted use */ __be64 sq_start; /* Submission Queue (R/W): write sequence and */ __be64 sq_end; /* inclusion semantics are the same as RRQ */ diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 375c536cbc68..c5eb0c468f0b 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -32,13 +32,13 @@ MODULE_AUTHOR("Open-FCoE.org"); MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); MODULE_LICENSE("GPL v2"); -static int fcoe_transport_create(const char *, struct kernel_param *); -static int fcoe_transport_destroy(const char *, struct kernel_param *); +static int fcoe_transport_create(const char *, const struct kernel_param *); +static int fcoe_transport_destroy(const char *, const struct kernel_param *); static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); -static int fcoe_transport_enable(const char *, struct kernel_param *); -static int fcoe_transport_disable(const char *, struct kernel_param *); +static int fcoe_transport_enable(const char *, const struct kernel_param *); +static int fcoe_transport_disable(const char *, const struct kernel_param *); static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr); @@ -865,7 +865,8 @@ EXPORT_SYMBOL(fcoe_ctlr_destroy_store); * * Returns: 0 for success */ -static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_create(const char *buffer, + const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; @@ -930,7 +931,8 @@ static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) * * Returns: 0 for success */ -static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_destroy(const char *buffer, + const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; @@ -974,7 +976,8 @@ static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) * * Returns: 0 for success */ -static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_disable(const char *buffer, + const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; @@ -1008,7 +1011,8 @@ static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) * * Returns: 0 for success */ -static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) +static int fcoe_transport_enable(const char *buffer, + const struct kernel_param *kp) { int rc = -ENODEV; struct net_device *netdev = NULL; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 16664f2e15fb..8fa9bb336ad4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -185,13 +185,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; + if (!task->lldd_task) + return; + + task->lldd_task = NULL; + if (!sas_protocol_ata(task->task_proto)) if (slot->n_elem) dma_unmap_sg(dev, task->scatter, slot->n_elem, task->data_dir); - task->lldd_task = NULL; - if (sas_dev) atomic64_dec(&sas_dev->running_req); } @@ -199,8 +202,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, if (slot->buf) dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); - list_del_init(&slot->entry); + slot->buf = NULL; slot->task = NULL; slot->port = NULL; hisi_sas_slot_index_free(hisi_hba, slot->idx); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 2e5fa9717be8..871962b2e2f6 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -328,10 +328,11 @@ enum { #define DIR_TO_DEVICE 2 #define DIR_RESERVED 3 -#define CMD_IS_UNCONSTRAINT(cmd) \ - ((cmd == ATA_CMD_READ_LOG_EXT) || \ - (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ - (cmd == ATA_CMD_DEV_RESET)) +#define FIS_CMD_IS_UNCONSTRAINED(fis) \ + ((fis.command == ATA_CMD_READ_LOG_EXT) || \ + (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ + ((fis.command == ATA_CMD_DEV_RESET) && \ + ((fis.control & ATA_SRST) != 0))) static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { @@ -1044,7 +1045,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, << CMD_HDR_FRAME_TYPE_OFF; dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; - if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) + if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; hdr->dw1 = cpu_to_le32(dw1); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index fe3a0da3ec97..ef22b275d050 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev) scsi_proc_hostdir_rm(shost->hostt); + /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */ + rcu_barrier(); + if (shost->tmf_work_q) destroy_workqueue(shost->tmf_work_q); if (shost->ehandler) @@ -471,6 +474,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->dma_boundary = 0xffffffff; shost->use_blk_mq = scsi_use_blk_mq; + shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq; device_initialize(&shost->shost_gendev); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4ed3d26ffdde..5b4b7f9be2d7 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1040,11 +1040,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); if (unlikely(!h->msix_vectors)) return; - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - c->Header.ReplyQueue = - raw_smp_processor_id() % h->nreply_queues; - else - c->Header.ReplyQueue = reply_queue % h->nreply_queues; + c->Header.ReplyQueue = reply_queue; } } @@ -1058,10 +1054,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h, * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->ReplyQueue = smp_processor_id() % h->nreply_queues; - else - cp->ReplyQueue = reply_queue % h->nreply_queues; + cp->ReplyQueue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit (bit 0) @@ -1082,10 +1075,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, /* Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->reply_queue = smp_processor_id() % h->nreply_queues; - else - cp->reply_queue = reply_queue % h->nreply_queues; + cp->reply_queue = reply_queue; /* Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) @@ -1104,10 +1094,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h, * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->reply_queue = smp_processor_id() % h->nreply_queues; - else - cp->reply_queue = reply_queue % h->nreply_queues; + cp->reply_queue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 @@ -1152,6 +1139,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, { dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); + + reply_queue = h->reply_map[raw_smp_processor_id()]; switch (c->cmd_type) { case CMD_IOACCEL1: set_ioaccel1_performant_mode(h, c, reply_queue); @@ -7244,6 +7233,26 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h) h->msix_vectors = 0; } +static void hpsa_setup_reply_map(struct ctlr_info *h) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + for (queue = 0; queue < h->msix_vectors; queue++) { + mask = pci_irq_get_affinity(h->pdev, queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + h->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + h->reply_map[cpu] = 0; +} + /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ @@ -7639,6 +7648,10 @@ static int hpsa_pci_init(struct ctlr_info *h) err = hpsa_interrupt_mode(h); if (err) goto clean1; + + /* setup mapping between CPU and reply queue */ + hpsa_setup_reply_map(h); + err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto clean2; /* intmode+region, pci */ @@ -8284,6 +8297,28 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, return wq; } +static void hpda_free_ctlr_info(struct ctlr_info *h) +{ + kfree(h->reply_map); + kfree(h); +} + +static struct ctlr_info *hpda_alloc_ctlr_info(void) +{ + struct ctlr_info *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return NULL; + + h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL); + if (!h->reply_map) { + kfree(h); + return NULL; + } + return h; +} + static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int dac, rc; @@ -8321,7 +8356,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * the driver. See comments in hpsa.h for more info. */ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); - h = kzalloc(sizeof(*h), GFP_KERNEL); + h = hpda_alloc_ctlr_info(); if (!h) { dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; @@ -8638,7 +8673,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) kfree(options); } -static void hpsa_shutdown(struct pci_dev *pdev) +static void __hpsa_shutdown(struct pci_dev *pdev) { struct ctlr_info *h; @@ -8653,6 +8688,12 @@ static void hpsa_shutdown(struct pci_dev *pdev) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } +static void hpsa_shutdown(struct pci_dev *pdev) +{ + __hpsa_shutdown(pdev); + pci_disable_device(pdev); +} + static void hpsa_free_device_info(struct ctlr_info *h) { int i; @@ -8684,6 +8725,8 @@ static void hpsa_remove_one(struct pci_dev *pdev) destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); + hpsa_delete_sas_host(h); + /* * Call before disabling interrupts. * scsi_remove_host can trigger I/O operations especially @@ -8694,7 +8737,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) scsi_remove_host(h->scsi_host); /* init_one 8 */ /* includes hpsa_free_irqs - init_one 4 */ /* includes hpsa_disable_interrupt_mode - pci_init 2 */ - hpsa_shutdown(pdev); + __hpsa_shutdown(pdev); hpsa_free_device_info(h); /* scan */ @@ -8718,9 +8761,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) h->lockup_detected = NULL; /* init_one 2 */ /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ - hpsa_delete_sas_host(h); - - kfree(h); /* init_one 1 */ + hpda_free_ctlr_info(h); /* init_one 1 */ } static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, @@ -9207,9 +9248,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) struct sas_phy *phy = hpsa_sas_phy->phy; sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (hpsa_sas_phy->added_to_port) list_del(&hpsa_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(hpsa_sas_phy); } diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 018f980a701c..fb9f5e7f8209 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -158,6 +158,7 @@ struct bmic_controller_parameters { #pragma pack() struct ctlr_info { + unsigned int *reply_map; int ctlr; char devname[8]; char *product_name; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 9a0696f68f37..b81a53c4a9a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes { }; struct ibmvfc_fcp_rsp_info { - __be16 reserved; + u8 reserved[3]; u8 rsp_code; u8 reserved2[4]; }__attribute__((packed, aligned (2))); diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index ac879745ef80..18a409bb9e0c 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, * Note: We have not moved the current phy_index so we will actually * compare the startting phy with itself. * This is expected and required to add the phy to the port. */ - while (phy_index < SCI_MAX_PHYS) { + for (; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], @@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); - phy_index++; } } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 4d934d6c3e13..e11eff6b0e97 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -952,6 +953,13 @@ static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev) static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) { + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); + struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_conn *conn = session->leadconn; + + if (conn->datadgst_en) + sdev->request_queue->backing_dev_info->capabilities + |= BDI_CAP_STABLE_WRITES; blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); blk_queue_dma_alignment(sdev->request_queue, 0); return 0; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f8dc1601efd5..bddbe2da5283 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) */ switch (session->state) { case ISCSI_STATE_FAILED: + /* + * cmds should fail during shutdown, if the session + * state is bad, allowing completion to happen + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + reason = FAILURE_SESSION_FAILED; + sc->result = DID_NO_CONNECT << 16; + break; + } case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; @@ -1980,6 +1989,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) } if (session->state != ISCSI_STATE_LOGGED_IN) { + /* + * During shutdown, if session is prematurely disconnected, + * recovery won't happen and there will be hung cmds. Not + * handling cmds would trigger EH, also bad in this case. + * Instead, handle cmd, allow completion to happen and let + * upper layer to deal with the result. + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + sc->result = DID_NO_CONNECT << 16; + ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); + rc = BLK_EH_HANDLED; + goto done; + } /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. @@ -2084,7 +2106,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) task->last_timeout = jiffies; spin_unlock(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? - "timer reset" : "nh"); + "timer reset" : "shutdown or nh"); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 6b4fd2375178..e2ea389fbec3 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) @@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); - if (!res) + if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); @@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: + kfree(req); kfree(resp); return res; @@ -2145,7 +2147,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { struct domain_device *dev; - unsigned int reslen = 0; + unsigned int rcvlen = 0; int ret = -EINVAL; /* no rphy means no smp target support (ie aic94xx host) */ @@ -2179,12 +2181,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, ret = smp_execute_task_sg(dev, job->request_payload.sg_list, job->reply_payload.sg_list); - if (ret > 0) { - /* positive number is the untransferred residual */ - reslen = ret; + if (ret >= 0) { + /* bsg_job_done() requires the length received */ + rcvlen = job->reply_payload.payload_len - ret; ret = 0; } out: - bsg_job_done(job, ret, reslen); + bsg_job_done(job, ret, rcvlen); } diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index ea8ad06ff582..0c4b186c852a 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -222,6 +222,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) { struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); + struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_task *task = TO_SAS_TASK(cmd); /* At this point, we only get called following an actual abort @@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) */ sas_end_task(cmd, task); + if (dev_is_sata(dev)) { + /* defer commands to libata so that libata EH can + * handle ata qcs correctly + */ + list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); + return; + } + /* now finish the command and move it on to the error * handler done list, this also takes it off the * error handler pending list. @@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); } -static void sas_eh_defer_cmd(struct scsi_cmnd *cmd) -{ - struct domain_device *dev = cmd_to_domain_dev(cmd); - struct sas_ha_struct *ha = dev->port->ha; - struct sas_task *task = TO_SAS_TASK(cmd); - - if (!dev_is_sata(dev)) { - sas_eh_finish_cmd(cmd); - return; - } - - /* report the timeout to libata */ - sas_end_task(cmd, task); - list_move_tail(&cmd->eh_entry, &ha->eh_ata_q); -} - static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) { struct scsi_cmnd *cmd, *n; @@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd list_for_each_entry_safe(cmd, n, error_q, eh_entry) { if (cmd->device->sdev_target == my_cmd->device->sdev_target && cmd->device->lun == my_cmd->device->lun) - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); } } @@ -486,15 +479,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type, int sas_eh_abort_handler(struct scsi_cmnd *cmd) { - int res; + int res = TMF_RESP_FUNC_FAILED; struct sas_task *task = TO_SAS_TASK(cmd); struct Scsi_Host *host = cmd->device->host; + struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_internal *i = to_sas_internal(host->transportt); + unsigned long flags; if (!i->dft->lldd_abort_task) return FAILED; - res = i->dft->lldd_abort_task(task); + spin_lock_irqsave(host->host_lock, flags); + /* We cannot do async aborts for SATA devices */ + if (dev_is_sata(dev) && !host->host_eh_scheduled) { + spin_unlock_irqrestore(host->host_lock, flags); + return FAILED; + } + spin_unlock_irqrestore(host->host_lock, flags); + + if (task) + res = i->dft->lldd_abort_task(task); + else + SAS_DPRINTK("no task to abort\n"); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; @@ -617,12 +623,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * case TASK_IS_DONE: SAS_DPRINTK("%s: task 0x%p is done\n", __func__, task); - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); continue; case TASK_IS_ABORTED: SAS_DPRINTK("%s: task 0x%p is aborted\n", __func__, task); - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); continue; case TASK_IS_AT_LU: SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); @@ -633,7 +639,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * "recovered\n", SAS_ADDR(task->dev), cmd->device->lun); - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); sas_scsi_clear_queue_lu(work_q, cmd); goto Again; } diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c17677f494af..3da242201cb4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -871,7 +871,12 @@ lpfc_issue_lip(struct Scsi_Host *shost) LPFC_MBOXQ_t *pmboxq; int mbxstatus = MBXERR_ERROR; + /* + * If the link is offline, disabled or BLOCK_MGMT_IO + * it doesn't make any sense to allow issue_lip + */ if ((vport->fc_flag & FC_OFFLINE_MODE) || + (phba->hba_flag & LINK_DISABLED) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; @@ -3134,7 +3139,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf) struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txq_max : 0); } static DEVICE_ATTR(txq_hw, S_IRUGO, @@ -3147,7 +3153,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba; struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba); - return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max); + return snprintf(buf, PAGE_SIZE, "%d\n", + pring ? pring->txcmplq_max : 0); } static DEVICE_ATTR(txcmplq_hw, S_IRUGO, diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index fe9e1c079c20..d89816222b23 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, } } - if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { + if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) { ret_val = -ENOMEM; goto err_post_rxbufs_exit; } @@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job) struct lpfc_iocbq *check_iocb, *next_iocb; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return -EIO; /* if job's driver data is NULL, the command completed or is in the * the process of completing. In this case, return status to request diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 33417681f5d4..126723a5bc6f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) "Parse GID_FTrsp: did:x%x flg:x%x x%x", Did, ndlp->nlp_flag, vport->fc_flag); + ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); /* By default, the driver expects to support FCP FC4 */ if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 468a66371de9..91783dbdf10c 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2088,6 +2088,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp = (struct lpfc_nodelist *) cmdiocb->context1; spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; + + /* Driver supports multiple FC4 types. Counters matter. */ + vport->fc_prli_sent--; + ndlp->fc4_prli_sent--; spin_unlock_irq(shost->host_lock); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, @@ -2095,9 +2099,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); - /* Ddriver supports multiple FC4 types. Counters matter. */ - vport->fc_prli_sent--; - /* PRLI completes to NPort */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0103 PRLI completes to NPort x%06x " @@ -2111,7 +2112,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { /* Check for retry */ - ndlp->fc4_prli_sent--; if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ goto out; @@ -2190,6 +2190,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_fc4_type |= NLP_FC4_NVME; local_nlp_type = ndlp->nlp_fc4_type; + /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp + * fields here before any of them can complete. + */ + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + ndlp->nvme_fb_size = 0; + send_next_prli: if (local_nlp_type & NLP_FC4_FCP) { /* Payload is 4 + 16 = 20 x14 bytes. */ @@ -2298,6 +2307,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_PRLI_SND; + + /* The vport counters are used for lpfc_scan_finished, but + * the ndlp is used to track outstanding PRLIs for different + * FC4 types. + */ + vport->fc_prli_sent++; + ndlp->fc4_prli_sent++; spin_unlock_irq(shost->host_lock); if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) { @@ -2308,12 +2324,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 1; } - /* The vport counters are used for lpfc_scan_finished, but - * the ndlp is used to track outstanding PRLIs for different - * FC4 types. - */ - vport->fc_prli_sent++; - ndlp->fc4_prli_sent++; /* The driver supports 2 FC4 types. Make sure * a PRLI is issued for all types before exiting. @@ -7430,6 +7440,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) timeout = (uint32_t)(phba->fc_ratov << 1); pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; if ((phba->pport->load_flag & FC_UNLOADING)) return; @@ -9310,6 +9322,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; + spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, list) { @@ -9416,7 +9431,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, rxid, 1); /* Check if TXQ queue needs to be serviced */ - if (!(list_empty(&pring->txq))) + if (pring && !list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 20808349a80e..4962d665b4d2 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -698,8 +698,9 @@ lpfc_work_done(struct lpfc_hba *phba) phba->hba_flag & HBA_SP_QUEUE_EVT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; - /* Set the lpfc data pending flag */ - set_bit(LPFC_DATA_READY, &phba->data_flags); + /* Preserve legacy behavior. */ + if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) + set_bit(LPFC_DATA_READY, &phba->data_flags); } else { if (phba->link_state >= LPFC_LINK_UP || phba->link_flag & LS_MDS_LOOPBACK) { @@ -3324,7 +3325,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Unblock ELS traffic */ pring = lpfc_phba_elsring(phba); - pring->flag &= ~LPFC_STOP_IOCB_EVENT; + if (pring) + pring->flag &= ~LPFC_STOP_IOCB_EVENT; /* Check for error */ if (mb->mbxStatus) { @@ -4982,7 +4984,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_cancel_retry_delay_tmo(vport, ndlp); if ((ndlp->nlp_flag & NLP_DEFER_RM) && !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && - !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { + !(ndlp->nlp_flag & NLP_RPI_REGISTERED) && + phba->sli_rev != LPFC_SLI_REV4) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ @@ -5430,6 +5433,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) psli = &phba->sli; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return; /* Error matching iocb on txq or txcmplq * First check the txq. diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1db0a38683f4..2b145966c73f 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3636,7 +3636,7 @@ struct lpfc_mbx_get_port_name { #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 #define MB_CQE_STATUS_DMA_FAILED 0x5 -#define LPFC_MBX_WR_CONFIG_MAX_BDE 8 +#define LPFC_MBX_WR_CONFIG_MAX_BDE 1 struct lpfc_mbx_wr_object { struct mbox_header header; union { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 100bc4c8798d..25612ccf6ff2 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -9413,44 +9413,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) lpfc_sli4_bar0_register_memmap(phba, if_type); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR2))) { - /* - * Map SLI4 if type 0 HBA Control Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); - bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); - phba->sli4_hba.ctrl_regs_memmap_p = - ioremap(phba->pci_bar1_map, bar1map_len); - if (!phba->sli4_hba.ctrl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA control registers.\n"); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { + /* + * Map SLI4 if type 0 HBA Control Register base to a + * kernel virtual address and setup the registers. + */ + phba->pci_bar1_map = pci_resource_start(pdev, + PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, + bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA " + "control registers.\n"); + error = -ENOMEM; + goto out_iounmap_conf; + } + phba->pci_bar2_memmap_p = + phba->sli4_hba.ctrl_regs_memmap_p; + lpfc_sli4_bar1_register_memmap(phba); + } else { + error = -ENOMEM; goto out_iounmap_conf; } - phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; - lpfc_sli4_bar1_register_memmap(phba); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR4))) { - /* - * Map SLI4 if type 0 HBA Doorbell Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); - bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); - phba->sli4_hba.drbl_regs_memmap_p = - ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA doorbell registers.\n"); - goto out_iounmap_ctrl; - } - phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); - if (error) + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { + /* + * Map SLI4 if type 0 HBA Doorbell Register base to + * a kernel virtual address and setup the registers. + */ + phba->pci_bar2_map = pci_resource_start(pdev, + PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, + bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA" + " doorbell registers.\n"); + error = -ENOMEM; + goto out_iounmap_ctrl; + } + phba->pci_bar4_memmap_p = + phba->sli4_hba.drbl_regs_memmap_p; + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + } else { + error = -ENOMEM; goto out_iounmap_all; + } } return 0; @@ -11404,6 +11422,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); + /* + * Bring down the SLI Layer. This step disables all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA FCoE function. + */ + lpfc_debugfs_terminate(vport); + lpfc_sli4_hba_unset(phba); /* Perform ndlp cleanup on the physical port. The nvme and nvmet * localports are destroyed after to cleanup all transport memory. @@ -11412,14 +11437,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) lpfc_nvmet_destroy_targetport(phba); lpfc_nvme_destroy_localport(vport); - /* - * Bring down the SLI Layer. This step disables all interrupts, - * clears the rings, discards all mailbox commands, and resets - * the HBA FCoE function. - */ - lpfc_debugfs_terminate(vport); - lpfc_sli4_hba_unset(phba); + lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 56faeb049b4a..87c08ff37ddd 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); if (rc < 0) { - (rqbp->rqb_free_buffer)(phba, rqb_entry); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6409 Cannot post to RQ %d: %x %x\n", rqb_entry->hrq->queue_id, rqb_entry->hrq->host_index, rqb_entry->hrq->hba_index); + (rqbp->rqb_free_buffer)(phba, rqb_entry); } else { list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); rqbp->buffer_count++; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index f3ad7cac355d..d489f6827cc1 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) pring = lpfc_phba_elsring(phba); /* In case of error recovery path, we might have a NULL pring here */ - if (!pring) + if (unlikely(!pring)) return; /* Abort outstanding I/O on NPort */ @@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, break; } + ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); + ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); + ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; + /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { @@ -742,9 +747,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lp = (uint32_t *) pcmd->virt; npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t)); - ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); - ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; if ((npr->prliType == PRLI_FCP_TYPE) || (npr->prliType == PRLI_NVME_TYPE)) { if (npr->initiatorFunc) { @@ -769,8 +771,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * type. Target mode does not issue gft_id so doesn't get * the fc4 type set until now. */ - if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE)) + if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + } + if (npr->prliType == PRLI_FCP_TYPE) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; } if (rport) { /* We need to update the rport role values */ @@ -1552,7 +1558,6 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } else { /* RPI registration has not completed. Reject the PRLI * to prevent an illegal state transition when the @@ -1564,10 +1569,11 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); memset(&stat, 0, sizeof(struct ls_rjt)); - stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; - stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; + stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; + stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return ndlp->nlp_state; } } else { /* Initiator mode. */ @@ -1922,13 +1928,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; } - /* Check out PRLI rsp */ - ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); - ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - - /* NVME or FCP first burst must be negotiated for each PRLI. */ - ndlp->nlp_flag &= ~NLP_FIRSTBURST; - ndlp->nvme_fb_size = 0; if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, @@ -1945,8 +1944,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; - /* PRLI completed. Decrement count. */ - ndlp->fc4_prli_sent--; } else if (nvpr && (bf_get_be32(prli_acc_rsp_code, nvpr) == PRLI_REQ_EXECUTED) && @@ -1991,8 +1988,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, be32_to_cpu(nvpr->word5), ndlp->nlp_flag, ndlp->nlp_fcp_info, ndlp->nlp_type); - /* PRLI completed. Decrement count. */ - ndlp->fc4_prli_sent--; } if (!(ndlp->nlp_type & NLP_FCP_TARGET) && (vport->port_type == LPFC_NPIV_PORT) && @@ -2016,7 +2011,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); - else + else if (ndlp->nlp_type & + (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } else lpfc_printf_vlog(vport, diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 0b7c1a49e203..7ac1a067d780 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1138,9 +1138,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) #endif if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, - "6025 Cannot register NVME targetport " - "x%x\n", error); + "6025 Cannot register NVME targetport x%x: " + "portnm %llx nodenm %llx segs %d qs %d\n", + error, + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); phba->targetport = NULL; + phba->nvmet_support = 0; lpfc_nvmet_cleanup_io_context(phba); @@ -1152,9 +1157,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6026 Registered NVME " "targetport: %p, private %p " - "portnm %llx nodenm %llx\n", + "portnm %llx nodenm %llx segs %d qs %d\n", phba->targetport, tgtp, - pinfo.port_name, pinfo.node_name); + pinfo.port_name, pinfo.node_name, + lpfc_tgttemplate.max_sgl_segments, + lpfc_tgttemplate.max_hw_queues); atomic_set(&tgtp->rcv_ls_req_in, 0); atomic_set(&tgtp->rcv_ls_req_out, 0); @@ -1457,6 +1464,7 @@ static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) { +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; struct lpfc_nvmet_ctx_info *get_infop; int i; @@ -1504,6 +1512,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, get_infop = get_infop->nvmet_ctx_next_cpu; } +#endif /* Nothing found, all contexts for the MRQ are in-flight */ return NULL; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8b119f87b51d..dc83498024dc 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -129,6 +129,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) /* set consumption flag every once in a while */ if (!((q->host_index + 1) % q->entry_repost)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); + else + bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); @@ -9396,10 +9398,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) * for abort iocb hba_wqidx should already * be setup based on what work queue we used. */ - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { piocb->hba_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb->context1); + piocb->hba_wqidx = piocb->hba_wqidx % + phba->cfg_fcp_io_channel; + } return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; } else { if (unlikely(!phba->sli4_hba.oas_wq)) @@ -10632,6 +10637,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) return 0; + if (!pring) { + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; + goto abort_iotag_exit; + } + /* * If we're unloading, don't abort iocb on the ELS ring, but change * the callback so that nothing happens when it finishes. @@ -12500,6 +12513,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, unsigned long iflags; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return NULL; wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; spin_lock_irqsave(&pring->ring_lock, iflags); @@ -12507,19 +12522,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, /* Look up the ELS command IOCB and create pseudo response IOCB */ cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, bf_get(lpfc_wcqe_c_request_tag, wcqe)); - /* Put the iocb back on the txcmplq */ - lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); - spin_unlock_irqrestore(&pring->ring_lock, iflags); - if (unlikely(!cmdiocbq)) { + spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0386 ELS complete with no corresponding " - "cmdiocb: iotag (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); + "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", + wcqe->word0, wcqe->total_data_placed, + wcqe->parameter, wcqe->word3); lpfc_sli_release_iocbq(phba, irspiocbq); return NULL; } + /* Put the iocb back on the txcmplq */ + lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + /* Fake the irspiocbq and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); @@ -17137,7 +17154,8 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, if (pcmd && pcmd->virt) dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); kfree(pcmd); - lpfc_sli_release_iocbq(phba, iocbq); + if (iocbq) + lpfc_sli_release_iocbq(phba, iocbq); lpfc_in_buf_free(phba, &dmabuf->dbuf); } @@ -18691,6 +18709,8 @@ lpfc_drain_txq(struct lpfc_hba *phba) uint32_t txq_cnt = 0; pring = lpfc_phba_elsring(phba); + if (unlikely(!pring)) + return 0; spin_lock_irqsave(&pring->ring_lock, iflags); list_for_each_entry(piocbq, &pring->txq, list) { diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 7195cff51d4c..9b6f5d024dba 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4199,6 +4199,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) int irq, i, j; int error = -ENODEV; + if (hba_count >= MAX_CONTROLLERS) + goto out; + if (pci_enable_device(pdev)) goto out; pci_set_master(pdev); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index a6722c93a295..81de4a1fbb9b 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1504,6 +1504,13 @@ enum FW_BOOT_CONTEXT { #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 +enum MR_ADAPTER_TYPE { + MFI_SERIES = 1, + THUNDERBOLT_SERIES = 2, + INVADER_SERIES = 3, + VENTURA_SERIES = 4, +}; + /* * register set for both 1068 and 1078 controllers * structure extended for 1078 registers @@ -2092,6 +2099,7 @@ enum MR_PD_TYPE { struct megasas_instance { + unsigned int *reply_map; __le32 *producer; dma_addr_t producer_h; __le32 *consumer; @@ -2236,12 +2244,12 @@ struct megasas_instance { bool dev_handle; bool fw_sync_cache_support; u32 mfi_frame_size; - bool is_ventura; bool msix_combined; u16 max_raid_mapsize; /* preffered count to send as LDIO irrspective of FP capable.*/ u8 r1_ldio_hint_default; u32 nvme_page_size; + u8 adapter_type; }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e518dadc8161..985378e4bb6f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2023,7 +2023,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance) msleep(1000); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || - (instance->ctrl_context)) { + (instance->adapter_type != MFI_SERIES)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); /* Flush */ readl(&instance->reg_set->doorbell); @@ -2494,7 +2494,8 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n", instance->host->host_no); - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) retval = megasas_issue_blocked_cmd(instance, cmd, MEGASAS_ROUTINE_WAIT_TIME_VF); else @@ -2790,7 +2791,9 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) /* * First wait for all commands to complete */ - if (instance->ctrl_context) { + if (instance->adapter_type == MFI_SERIES) { + ret = megasas_generic_reset(scmd); + } else { struct megasas_cmd_fusion *cmd; cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; if (cmd) @@ -2798,8 +2801,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); ret = megasas_reset_fusion(scmd->device->host, SCSIIO_TIMEOUT_OCR); - } else - ret = megasas_generic_reset(scmd); + } return ret; } @@ -2816,7 +2818,7 @@ static int megasas_task_abort(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; - if (instance->ctrl_context) + if (instance->adapter_type != MFI_SERIES) ret = megasas_task_abort_fusion(scmd); else { sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n"); @@ -2838,7 +2840,7 @@ static int megasas_reset_target(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; - if (instance->ctrl_context) + if (instance->adapter_type != MFI_SERIES) ret = megasas_reset_target_fusion(scmd); else { sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n"); @@ -3715,7 +3717,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || - (instance->ctrl_context)) + (instance->adapter_type != MFI_SERIES)) writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); @@ -3733,7 +3735,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || - (instance->ctrl_context)) + (instance->adapter_type != MFI_SERIES)) writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); else @@ -3753,11 +3755,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || - (instance->ctrl_context)) { + (instance->adapter_type != MFI_SERIES)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); - if (instance->ctrl_context) { + if (instance->adapter_type != MFI_SERIES) { for (i = 0; i < (10 * 1000); i += 20) { if (readl( &instance-> @@ -3924,7 +3926,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) * max_sge_sz = 12 byte (sizeof megasas_sge64) * Total 192 byte (3 MFI frame of 64 byte) */ - frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); + frame_count = (instance->adapter_type == MFI_SERIES) ? + (15 + 1) : (3 + 1); instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer @@ -3979,7 +3982,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) memset(cmd->frame, 0, instance->mfi_frame_size); cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; - if (!instance->ctrl_context && reset_devices) + if ((instance->adapter_type == MFI_SERIES) && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; } @@ -4099,7 +4102,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance) inline int dcmd_timeout_ocr_possible(struct megasas_instance *instance) { - if (!instance->ctrl_context) + if (instance->adapter_type == MFI_SERIES) return KILL_ADAPTER; else if (instance->unload || test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) @@ -4143,7 +4146,8 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h); dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO)); - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); @@ -4240,7 +4244,8 @@ megasas_get_pd_list(struct megasas_instance *instance) dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else @@ -4251,7 +4256,7 @@ megasas_get_pd_list(struct megasas_instance *instance) dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY " "failed/not supported by firmware\n"); - if (instance->ctrl_context) + if (instance->adapter_type != MFI_SERIES) megaraid_sas_kill_hba(instance); else instance->pd_list_not_supported = 1; @@ -4372,7 +4377,8 @@ megasas_get_ld_list(struct megasas_instance *instance) dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else @@ -4491,7 +4497,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); dcmd->pad_0 = 0; - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); @@ -4664,7 +4671,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance) dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); dcmd->mbox.b[0] = 1; - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); @@ -4783,7 +4791,8 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h); dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE); - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else ret = megasas_issue_polled(instance, cmd); @@ -5129,6 +5138,26 @@ megasas_setup_jbod_map(struct megasas_instance *instance) instance->use_seqnum_jbod_fp = false; } +static void megasas_setup_reply_map(struct megasas_instance *instance) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + for (queue = 0; queue < instance->msix_vectors; queue++) { + mask = pci_irq_get_affinity(instance->pdev, queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + instance->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + instance->reply_map[cpu] = cpu % instance->msix_vectors; +} + /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state @@ -5170,7 +5199,7 @@ static int megasas_init_fw(struct megasas_instance *instance) reg_set = instance->reg_set; - if (fusion) + if (instance->adapter_type != MFI_SERIES) instance->instancet = &megasas_instance_template_fusion; else { switch (instance->pdev->device) { @@ -5211,7 +5240,7 @@ static int megasas_init_fw(struct megasas_instance *instance) goto fail_ready_state; } - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { scratch_pad_3 = readl(&instance->reg_set->outbound_scratch_pad_3); instance->max_raid_mapsize = ((scratch_pad_3 >> @@ -5229,7 +5258,8 @@ static int megasas_init_fw(struct megasas_instance *instance) (&instance->reg_set->outbound_scratch_pad_2); /* Check max MSI-X vectors */ if (fusion) { - if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/ + if (instance->adapter_type == THUNDERBOLT_SERIES) { + /* Thunderbolt Series*/ instance->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = instance->msix_vectors; @@ -5293,6 +5323,8 @@ static int megasas_init_fw(struct megasas_instance *instance) goto fail_setup_irqs; } + megasas_setup_reply_map(instance); + dev_info(&instance->pdev->dev, "firmware supports msix\t: (%d)", fw_msix_count); dev_info(&instance->pdev->dev, @@ -5319,7 +5351,7 @@ static int megasas_init_fw(struct megasas_instance *instance) if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { scratch_pad_4 = readl(&instance->reg_set->outbound_scratch_pad_4); if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= @@ -5355,7 +5387,7 @@ static int megasas_init_fw(struct megasas_instance *instance) memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); /* stream detection initialization */ - if (instance->is_ventura && fusion) { + if (instance->adapter_type == VENTURA_SERIES) { fusion->stream_detect_by_ld = kzalloc(sizeof(struct LD_STREAM_DETECT *) * MAX_LOGICAL_DRIVES_EXT, @@ -5804,7 +5836,8 @@ megasas_get_target_prop(struct megasas_instance *instance, dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); - if (instance->ctrl_context && !instance->mask_interrupts) + if ((instance->adapter_type != MFI_SERIES) && + !instance->mask_interrupts) ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS); else @@ -5965,6 +5998,125 @@ megasas_set_dma_mask(struct pci_dev *pdev) return 1; } +/* + * megasas_set_adapter_type - Set adapter type. + * Supported controllers can be divided in + * 4 categories- enum MR_ADAPTER_TYPE { + * MFI_SERIES = 1, + * THUNDERBOLT_SERIES = 2, + * INVADER_SERIES = 3, + * VENTURA_SERIES = 4, + * }; + * @instance: Adapter soft state + * return: void + */ +static inline void megasas_set_adapter_type(struct megasas_instance *instance) +{ + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_VENTURA: + case PCI_DEVICE_ID_LSI_HARPOON: + case PCI_DEVICE_ID_LSI_TOMCAT: + case PCI_DEVICE_ID_LSI_VENTURA_4PORT: + case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: + instance->adapter_type = VENTURA_SERIES; + break; + case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_PLASMA: + instance->adapter_type = THUNDERBOLT_SERIES; + break; + case PCI_DEVICE_ID_LSI_INVADER: + case PCI_DEVICE_ID_LSI_INTRUDER: + case PCI_DEVICE_ID_LSI_INTRUDER_24: + case PCI_DEVICE_ID_LSI_CUTLASS_52: + case PCI_DEVICE_ID_LSI_CUTLASS_53: + case PCI_DEVICE_ID_LSI_FURY: + instance->adapter_type = INVADER_SERIES; + break; + default: /* For all other supported controllers */ + instance->adapter_type = MFI_SERIES; + break; + } +} + +static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance) +{ + instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32), + &instance->producer_h); + instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32), + &instance->consumer_h); + + if (!instance->producer || !instance->consumer) { + dev_err(&instance->pdev->dev, + "Failed to allocate memory for producer, consumer\n"); + return -1; + } + + *instance->producer = 0; + *instance->consumer = 0; + return 0; +} + +/** + * megasas_alloc_ctrl_mem - Allocate per controller memory for core data + * structures which are not common across MFI + * adapters and fusion adapters. + * For MFI based adapters, allocate producer and + * consumer buffers. For fusion adapters, allocate + * memory for fusion context. + * @instance: Adapter soft state + * return: 0 for SUCCESS + */ +static int megasas_alloc_ctrl_mem(struct megasas_instance *instance) +{ + instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids, + GFP_KERNEL); + if (!instance->reply_map) + return -ENOMEM; + + switch (instance->adapter_type) { + case MFI_SERIES: + if (megasas_alloc_mfi_ctrl_mem(instance)) + goto fail; + break; + case VENTURA_SERIES: + case THUNDERBOLT_SERIES: + case INVADER_SERIES: + if (megasas_alloc_fusion_context(instance)) + goto fail; + break; + } + + return 0; + fail: + kfree(instance->reply_map); + instance->reply_map = NULL; + return -ENOMEM; +} + +/* + * megasas_free_ctrl_mem - Free fusion context for fusion adapters and + * producer, consumer buffers for MFI adapters + * + * @instance - Adapter soft instance + * + */ +static inline void megasas_free_ctrl_mem(struct megasas_instance *instance) +{ + kfree(instance->reply_map); + if (instance->adapter_type == MFI_SERIES) { + if (instance->producer) + pci_free_consistent(instance->pdev, sizeof(u32), + instance->producer, + instance->producer_h); + if (instance->consumer) + pci_free_consistent(instance->pdev, sizeof(u32), + instance->consumer, + instance->consumer_h); + } else { + megasas_free_fusion_context(instance); + } +} + /** * megasas_probe_one - PCI hotplug entry point * @pdev: PCI device structure @@ -5977,7 +6129,6 @@ static int megasas_probe_one(struct pci_dev *pdev, struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; - struct fusion_context *fusion = NULL; /* Reset MSI-X in the kdump kernel */ if (reset_devices) { @@ -6022,56 +6173,10 @@ static int megasas_probe_one(struct pci_dev *pdev, atomic_set(&instance->fw_reset_no_pci_access, 0); instance->pdev = pdev; - switch (instance->pdev->device) { - case PCI_DEVICE_ID_LSI_VENTURA: - case PCI_DEVICE_ID_LSI_HARPOON: - case PCI_DEVICE_ID_LSI_TOMCAT: - case PCI_DEVICE_ID_LSI_VENTURA_4PORT: - case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: - instance->is_ventura = true; - case PCI_DEVICE_ID_LSI_FUSION: - case PCI_DEVICE_ID_LSI_PLASMA: - case PCI_DEVICE_ID_LSI_INVADER: - case PCI_DEVICE_ID_LSI_FURY: - case PCI_DEVICE_ID_LSI_INTRUDER: - case PCI_DEVICE_ID_LSI_INTRUDER_24: - case PCI_DEVICE_ID_LSI_CUTLASS_52: - case PCI_DEVICE_ID_LSI_CUTLASS_53: - { - if (megasas_alloc_fusion_context(instance)) { - megasas_free_fusion_context(instance); - goto fail_alloc_dma_buf; - } - fusion = instance->ctrl_context; - - if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) - fusion->adapter_type = THUNDERBOLT_SERIES; - else if (instance->is_ventura) - fusion->adapter_type = VENTURA_SERIES; - else - fusion->adapter_type = INVADER_SERIES; - } - break; - default: /* For all other supported controllers */ - - instance->producer = - pci_alloc_consistent(pdev, sizeof(u32), - &instance->producer_h); - instance->consumer = - pci_alloc_consistent(pdev, sizeof(u32), - &instance->consumer_h); - - if (!instance->producer || !instance->consumer) { - dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " - "memory for producer, consumer\n"); - goto fail_alloc_dma_buf; - } + megasas_set_adapter_type(instance); - *instance->producer = 0; - *instance->consumer = 0; - break; - } + if (megasas_alloc_ctrl_mem(instance)) + goto fail_alloc_dma_buf; /* Crash dump feature related initialisation*/ instance->drv_buf_index = 0; @@ -6166,7 +6271,7 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->disableOnlineCtrlReset = 1; instance->UnevenSpanSupport = 0; - if (instance->ctrl_context) { + if (instance->adapter_type != MFI_SERIES) { INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq); } else @@ -6246,7 +6351,7 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); - if (instance->ctrl_context) + if (instance->adapter_type != MFI_SERIES) megasas_release_fusion(instance); else megasas_release_mfi(instance); @@ -6267,14 +6372,8 @@ static int megasas_probe_one(struct pci_dev *pdev, pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), instance->tgt_prop, instance->tgt_prop_h); - if (instance->producer) - pci_free_consistent(pdev, sizeof(u32), instance->producer, - instance->producer_h); - if (instance->consumer) - pci_free_consistent(pdev, sizeof(u32), instance->consumer, - instance->consumer_h); + megasas_free_ctrl_mem(instance); scsi_host_put(host); - fail_alloc_instance: fail_set_dma_mask: pci_disable_device(pdev); @@ -6480,7 +6579,9 @@ megasas_resume(struct pci_dev *pdev) if (rval < 0) goto fail_reenable_msix; - if (instance->ctrl_context) { + megasas_setup_reply_map(instance); + + if (instance->adapter_type != MFI_SERIES) { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { megasas_free_cmds(instance); @@ -6543,12 +6644,8 @@ megasas_resume(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), instance->tgt_prop, instance->tgt_prop_h); - if (instance->producer) - pci_free_consistent(pdev, sizeof(u32), instance->producer, - instance->producer_h); - if (instance->consumer) - pci_free_consistent(pdev, sizeof(u32), instance->consumer, - instance->consumer_h); + + megasas_free_ctrl_mem(instance); scsi_host_put(host); fail_set_dma_mask: @@ -6605,7 +6702,6 @@ static void megasas_detach_one(struct pci_dev *pdev) u32 pd_seq_map_sz; instance = pci_get_drvdata(pdev); - instance->unload = 1; host = instance->host; fusion = instance->ctrl_context; @@ -6616,6 +6712,7 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->fw_crash_state != UNAVAILABLE) megasas_free_host_crash_buffer(instance); scsi_remove_host(instance->host); + instance->unload = 1; if (megasas_wait_for_adapter_operational(instance)) goto skip_firing_dcmds; @@ -6656,7 +6753,7 @@ static void megasas_detach_one(struct pci_dev *pdev) if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) kfree(fusion->stream_detect_by_ld[i]); kfree(fusion->stream_detect_by_ld); @@ -6664,7 +6761,7 @@ static void megasas_detach_one(struct pci_dev *pdev) } - if (instance->ctrl_context) { + if (instance->adapter_type != MFI_SERIES) { megasas_release_fusion(instance); pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * @@ -6689,15 +6786,8 @@ static void megasas_detach_one(struct pci_dev *pdev) fusion->pd_seq_sync[i], fusion->pd_seq_phys[i]); } - megasas_free_fusion_context(instance); } else { megasas_release_mfi(instance); - pci_free_consistent(pdev, sizeof(u32), - instance->producer, - instance->producer_h); - pci_free_consistent(pdev, sizeof(u32), - instance->consumer, - instance->consumer_h); } kfree(instance->ctrl_info); @@ -6738,6 +6828,8 @@ static void megasas_detach_one(struct pci_dev *pdev) pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), instance->system_info_buf, instance->system_info_h); + megasas_free_ctrl_mem(instance); + scsi_host_put(host); pci_disable_device(pdev); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index ecc699a65bac..f2ffde430ec1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, /* * This function will Populate Driver Map using firmware raid map */ -void MR_PopulateDrvRaidMap(struct megasas_instance *instance) +static int MR_PopulateDrvRaidMap(struct megasas_instance *instance) { struct fusion_context *fusion = instance->ctrl_context; struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; @@ -259,7 +259,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); if (ld_count > MAX_LOGICAL_DRIVES_EXT) { dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); - return; + return 1; } pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); @@ -285,6 +285,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) fusion->ld_map[(instance->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES) { + dev_dbg(&instance->pdev->dev, + "LD count exposed in RAID map in not valid\n"); + return 1; + } + pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; @@ -300,6 +306,8 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) sizeof(struct MR_DEV_HANDLE_INFO) * MAX_RAIDMAP_PHYSICAL_DEVICES); } + + return 0; } /* @@ -317,8 +325,8 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) u16 ld; u32 expected_size; - - MR_PopulateDrvRaidMap(instance); + if (MR_PopulateDrvRaidMap(instance)) + return 0; fusion = instance->ctrl_context; drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; @@ -737,7 +745,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, *pDevHandle = MR_PdDevHandleGet(pd, map); *pPdInterface = MR_PdInterfaceTypeGet(pd, map); /* get second pd also for raid 1/10 fast path writes*/ - if (instance->is_ventura && + if ((instance->adapter_type == VENTURA_SERIES) && (raid->level == 1) && !io_info->isRead) { r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); @@ -747,8 +755,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, } } else { if ((raid->level >= 5) && - ((fusion->adapter_type == THUNDERBOLT_SERIES) || - ((fusion->adapter_type == INVADER_SERIES) && + ((instance->adapter_type == THUNDERBOLT_SERIES) || + ((instance->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { @@ -762,7 +770,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = @@ -853,7 +861,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, *pDevHandle = MR_PdDevHandleGet(pd, map); *pPdInterface = MR_PdInterfaceTypeGet(pd, map); /* get second pd also for raid 1/10 fast path writes*/ - if (instance->is_ventura && + if ((instance->adapter_type == VENTURA_SERIES) && (raid->level == 1) && !io_info->isRead) { r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); @@ -863,8 +871,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, } } else { if ((raid->level >= 5) && - ((fusion->adapter_type == THUNDERBOLT_SERIES) || - ((fusion->adapter_type == INVADER_SERIES) && + ((instance->adapter_type == THUNDERBOLT_SERIES) || + ((instance->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { @@ -880,7 +888,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; io_info->span_arm = @@ -1088,10 +1096,10 @@ MR_BuildRaidContext(struct megasas_instance *instance, cpu_to_le16(raid->fpIoTimeoutForLd ? raid->fpIoTimeoutForLd : map->raidMap.fpPdIoTimeoutSec); - if (fusion->adapter_type == INVADER_SERIES) + if (instance->adapter_type == INVADER_SERIES) pRAID_Context->reg_lock_flags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; - else if (!instance->is_ventura) + else if (instance->adapter_type == THUNDERBOLT_SERIES) pRAID_Context->reg_lock_flags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->virtual_disk_tgt_id = raid->targetId; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 11bd2e698b84..06a2e3d9fc5b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -190,36 +190,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, /** * megasas_fire_cmd_fusion - Sends command to the FW * @instance: Adapter soft state - * @req_desc: 32bit or 64bit Request descriptor + * @req_desc: 64bit Request descriptor * - * Perform PCI Write. Ventura supports 32 bit Descriptor. - * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. + * Perform PCI Write. */ static void megasas_fire_cmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { - if (instance->is_ventura) - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_single_queue_port); - else { #if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | - le32_to_cpu(req_desc->u.low)); + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); - writeq(req_data, &instance->reg_set->inbound_low_queue_port); + writeq(req_data, &instance->reg_set->inbound_low_queue_port); #else - unsigned long flags; - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc->u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &instance->reg_set->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); #endif - } } /** @@ -243,7 +237,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c reg_set = instance->reg_set; /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */ - if (!instance->is_ventura) + if (instance->adapter_type < VENTURA_SERIES) cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; @@ -291,7 +285,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c instance->host->can_queue = instance->cur_can_queue; } - if (instance->is_ventura) + if (instance->adapter_type == VENTURA_SERIES) instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS; else @@ -772,7 +766,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) const char *sys_info; MFI_CAPABILITIES *drv_ops; u32 scratch_pad_2; - unsigned long flags; fusion = instance->ctrl_context; @@ -845,7 +838,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); /* driver support Extended MSIX */ - if (fusion->adapter_type >= INVADER_SERIES) + if (instance->adapter_type >= INVADER_SERIES) drv_ops->mfi_capabilities.support_additional_msix = 1; /* driver supports HA / Remote LUN over Fast Path interface */ drv_ops->mfi_capabilities.support_fp_remote_lun = 1; @@ -900,14 +893,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - /* For Ventura also IOC INIT required 64 bit Descriptor write. */ - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc.u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc.u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + megasas_fire_cmd_fusion(instance, &req_desc); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); @@ -917,7 +903,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) goto fail_fw_init; } - ret = 0; + return 0; fail_fw_init: megasas_return_cmd(instance, cmd); @@ -927,8 +913,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) IOCInitMessage, ioc_init_handle); fail_get_cmd: dev_err(&instance->pdev->dev, - "Init cmd return status %s for SCSI host %d\n", - ret ? "FAILED" : "SUCCESS", instance->host->host_no); + "Init cmd return status FAILED for SCSI host %d\n", + instance->host->host_no); return ret; } @@ -1803,7 +1789,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, fusion = instance->ctrl_context; - if (fusion->adapter_type >= INVADER_SERIES) { + if (instance->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; @@ -1813,7 +1799,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); sgl_ptr->Flags = 0; - if (fusion->adapter_type >= INVADER_SERIES) + if (instance->adapter_type >= INVADER_SERIES) if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; sgl_ptr++; @@ -1823,7 +1809,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; - if (fusion->adapter_type >= INVADER_SERIES) { + if (instance->adapter_type >= INVADER_SERIES) { if ((le16_to_cpu(cmd->io_request->IoFlags) & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) @@ -1839,7 +1825,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; - if (fusion->adapter_type >= INVADER_SERIES) + if (instance->adapter_type >= INVADER_SERIES) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = @@ -2355,15 +2341,12 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = (io_info.fpOkForIo > 0) ? true : false; } - /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU - id by default, not CPU group id, otherwise all MSI-X queues won't - be utilized */ - cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? - raw_smp_processor_id() % instance->msix_vectors : 0; + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; praid_context = &io_request->RaidContext; - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { spin_lock_irqsave(&instance->stream_lock, spinlock_flags); megasas_stream_detect(instance, cmd, &io_info); spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags); @@ -2416,7 +2399,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - if (fusion->adapter_type == INVADER_SERIES) { + if (instance->adapter_type == INVADER_SERIES) { if (io_request->RaidContext.raid_context.reg_lock_flags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = @@ -2429,7 +2412,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->RaidContext.raid_context.reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); - } else if (instance->is_ventura) { + } else if (instance->adapter_type == VENTURA_SERIES) { io_request->RaidContext.raid_context_g35.nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT); io_request->RaidContext.raid_context_g35.nseg_type |= @@ -2448,7 +2431,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, &io_info, local_map_ptr); scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; - if (instance->is_ventura) + if (instance->adapter_type == VENTURA_SERIES) io_request->RaidContext.raid_context_g35.span_arm = io_info.span_arm; else @@ -2458,7 +2441,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; - if (instance->is_ventura) + if (instance->adapter_type == VENTURA_SERIES) cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; else cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; @@ -2481,7 +2464,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - if (fusion->adapter_type == INVADER_SERIES) { + if (instance->adapter_type == INVADER_SERIES) { if (io_info.do_fp_rlbypass || (io_request->RaidContext.raid_context.reg_lock_flags == REGION_TYPE_UNUSED)) @@ -2494,7 +2477,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); io_request->RaidContext.raid_context.nseg = 0x1; - } else if (instance->is_ventura) { + } else if (instance->adapter_type == VENTURA_SERIES) { io_request->RaidContext.raid_context_g35.routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); io_request->RaidContext.raid_context_g35.nseg_type |= @@ -2569,7 +2552,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, /* set RAID context values */ pRAID_Context->config_seq_num = raid->seqNum; - if (!instance->is_ventura) + if (instance->adapter_type != VENTURA_SERIES) pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; pRAID_Context->timeout_value = cpu_to_le16(raid->fpIoTimeoutForLd); @@ -2654,7 +2637,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; io_request->DevHandle = pd_sync->seq[pd_index].devHandle; - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { io_request->RaidContext.raid_context_g35.routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); io_request->RaidContext.raid_context_g35.nseg_type |= @@ -2681,10 +2664,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, } cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; - cmd->request_desc->SCSIIO.MSIxIndex = - instance->msix_vectors ? - (raw_smp_processor_id() % instance->msix_vectors) : 0; + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; if (!fp_possible) { /* system pd firmware path */ @@ -2695,6 +2677,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); } else { + if (os_timeout_value) + os_timeout_value++; + /* system pd Fast Path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; timeout_limit = (scmd->device->type == TYPE_DISK) ? @@ -2702,7 +2687,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, pRAID_Context->timeout_value = cpu_to_le16((os_timeout_value > timeout_limit) ? timeout_limit : os_timeout_value); - if (fusion->adapter_type >= INVADER_SERIES) + if (instance->adapter_type >= INVADER_SERIES) io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); @@ -2785,7 +2770,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, return 1; } - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); @@ -3315,7 +3300,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, io_req = cmd->io_request; - if (fusion->adapter_type >= INVADER_SERIES) { + if (instance->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; @@ -4247,7 +4232,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; /*check for extra commands issued by driver*/ - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; megasas_return_cmd_fusion(instance, r1_cmd); } @@ -4348,7 +4333,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) megasas_set_dynamic_target_properties(sdev); /* reset stream detection array */ - if (instance->is_ventura) { + if (instance->adapter_type == VENTURA_SERIES) { for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { memset(fusion->stream_detect_by_ld[j], 0, sizeof(struct LD_STREAM_DETECT)); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index d78d76112501..7c1f7ccf031d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -104,12 +104,6 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define RAID_1_PEER_CMDS 2 #define JBOD_MAPS_COUNT 2 -enum MR_FUSION_ADAPTER_TYPE { - THUNDERBOLT_SERIES = 0, - INVADER_SERIES = 1, - VENTURA_SERIES = 2, -}; - /* * Raid Context structure which describes MegaRAID specific IO Parameters * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames @@ -1319,7 +1313,6 @@ struct fusion_context { struct LD_LOAD_BALANCE_INFO *load_balance_info; u32 load_balance_info_pages; LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; - u8 adapter_type; struct LD_STREAM_DETECT **stream_detect_by_ld; }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 87999905bca3..66a798243e48 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -105,7 +105,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); * */ static int -_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) +_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; @@ -1921,8 +1921,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) continue; } - for_each_cpu(cpu, mask) + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } } return; } @@ -5659,14 +5662,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) } /** - * _wait_for_commands_to_complete - reset controller + * mpt3sas_wait_for_commands_to_complete - reset controller * @ioc: Pointer to MPT_ADAPTER structure * * This function waiting(3s) for all pending commands to complete * prior to putting controller in reset. */ -static void -_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) { u32 ioc_state; unsigned long flags; @@ -5745,7 +5748,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, is_fault = 1; } _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); - _wait_for_commands_to_complete(ioc); + mpt3sas_wait_for_commands_to_complete(ioc); _base_mask_interrupts(ioc); r = _base_make_ioc_ready(ioc, type); if (r) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index a77bb7dc12b1..2948cb7e9ae6 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -1292,6 +1292,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); +void +mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); + /* scsih shared API */ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 22998cbd538f..decf9d50142e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -281,7 +281,7 @@ struct _scsi_io_transfer { * Note: The logging levels are defined in mpt3sas_debug.h. */ static int -_scsih_set_debug_level(const char *val, struct kernel_param *kp) +_scsih_set_debug_level(const char *val, const struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; @@ -2471,7 +2471,8 @@ scsih_abort(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2533,7 +2534,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2595,7 +2597,8 @@ scsih_target_reset(struct scsi_cmnd *scmd) _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + ioc->remove_host) { starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; @@ -2652,7 +2655,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) ioc->name, scmd); scsi_print_command(scmd); - if (ioc->is_driver_loading) { + if (ioc->is_driver_loading || ioc->remove_host) { pr_info(MPT3SAS_FMT "Blocking the host reset\n", ioc->name); r = FAILED; @@ -3957,7 +3960,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) _scsih_set_satl_pending(scmd, false); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); - if (ioc->pci_error_recovery) + if (ioc->pci_error_recovery || ioc->remove_host) scmd->result = DID_NO_CONNECT << 16; else scmd->result = DID_RESET << 16; @@ -4103,19 +4106,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } - /* - * Bug work around for firmware SATL handling. The loop - * is based on atomic operations and ensures consistency - * since we're lockless at this point - */ - do { - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { - scmd->result = SAM_STAT_BUSY; - scmd->scsi_done(scmd); - return 0; - } - } while (_scsih_set_satl_pending(scmd, true)); - sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -4141,6 +4131,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) sas_device_priv_data->block) return SCSI_MLQUEUE_DEVICE_BUSY; + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) @@ -4167,6 +4170,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); + _scsih_set_satl_pending(scmd, false); goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); @@ -4197,6 +4201,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (mpi_request->DataLength) { if (ioc->build_sg_scmd(ioc, scmd, smid)) { mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); goto out; } } else @@ -4804,6 +4809,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { scmd->result = DID_RESET << 16; break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; } scmd->result = DID_SOFT_ERROR << 16; break; @@ -8235,6 +8245,10 @@ static void scsih_remove(struct pci_dev *pdev) unsigned long flags; ioc->remove_host = 1; + + mpt3sas_wait_for_commands_to_complete(ioc); + _scsih_flush_running_cmds(ioc); + _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); @@ -8305,6 +8319,10 @@ scsih_shutdown(struct pci_dev *pdev) unsigned long flags; ioc->remove_host = 1; + + mpt3sas_wait_for_commands_to_complete(ioc); + _scsih_flush_running_cmds(ioc); + _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); @@ -8923,7 +8941,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event_%s%d", ioc->driver_name, ioc->id); ioc->firmware_event_thread = alloc_ordered_workqueue( - ioc->firmware_event_name, WQ_MEM_RECLAIM); + ioc->firmware_event_name, 0); if (!ioc->firmware_event_thread) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 7de5d8d75480..eb5471bc7263 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -1080,16 +1080,16 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, void __iomem *regs = mvi->regs_ex - 0x10200; int drive = (i/3) & (4-1); /* drive number on host */ - u32 block = mr32(MVS_SGPIO_DCTRL + + int driveshift = drive * 8; /* bit offset of drive */ + u32 block = ioread32be(regs + MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id); - /* * if bit is set then create a mask with the first * bit of the drive set in the mask ... */ - u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ? - 1<<(24-drive*8) : 0; + u32 bit = get_unaligned_be32(write_data) & (1 << i) ? + 1 << driveshift : 0; /* * ... and then shift it to the right position based @@ -1098,26 +1098,27 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, switch (i%3) { case 0: /* activity */ block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT) - << (24-drive*8)); + << driveshift); /* hardwire activity bit to SOF */ block |= LED_BLINKA_SOF << ( MVS_SGPIO_DCTRL_ACT_SHIFT + - (24-drive*8)); + driveshift); break; case 1: /* id */ block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT) - << (24-drive*8)); + << driveshift); block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT; break; case 2: /* fail */ block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT) - << (24-drive*8)); + << driveshift); block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT; break; } - mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, - block); + iowrite32be(block, + regs + MVS_SGPIO_DCTRL + + MVS_SGPIO_HOST_OFFSET * mvi->id); } @@ -1132,7 +1133,7 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, void __iomem *regs = mvi->regs_ex - 0x10200; mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, - be32_to_cpu(((u32 *) write_data)[i])); + ((u32 *) write_data)[i]); } return reg_count; } diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index a4f28b7e4c65..e18877177f1b 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write, return req; for_each_bio(bio) { - ret = blk_rq_append_bio(req, bio); + struct bio *bounce_bio = bio; + + ret = blk_rq_append_bio(req, &bounce_bio); if (ret) return ERR_PTR(ret); } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7c0064500cc5..382edb79a0de 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1649,6 +1649,15 @@ static int qedf_vport_destroy(struct fc_vport *vport) struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; + struct qedf_ctx *qedf = lport_priv(vn_port); + + if (!qedf) { + QEDF_ERR(NULL, "qedf is NULL.\n"); + goto out; + } + + /* Set unloading bit on vport qedf_ctx to prevent more I/O */ + set_bit(QEDF_UNLOADING, &qedf->flags); mutex_lock(&n_port->lp_mutex); list_del(&vn_port->list); @@ -1675,6 +1684,7 @@ static int qedf_vport_destroy(struct fc_vport *vport) if (vn_port->host) scsi_host_put(vn_port->host); +out: return 0; } diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 93d54acd4a22..2e5e04a7623f 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -769,6 +769,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, iscsi_cid = cqe->conn_id; qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!qedi_conn) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "icid not found 0x%x\n", cqe->conn_id); + return; + } /* Based on this itt get the corresponding qedi_cmd */ spin_lock_bh(&qedi_conn->tmf_work_lock); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cccc34adc0e0..1573749fe615 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1840,8 +1840,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", - initiator->initiator_name.byte); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, + initiator->initiator_name.byte); break; default: rc = 0; @@ -1908,8 +1908,8 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, switch (type) { case ISCSI_BOOT_TGT_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", - block->target[idx].target_name.byte); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, + block->target[idx].target_name.byte); break; case ISCSI_BOOT_TGT_IP_ADDR: if (ipv6_en) @@ -1930,20 +1930,20 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, block->target[idx].lun.value[0]); break; case ISCSI_BOOT_TGT_CHAP_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", - chap_name); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", - chap_secret); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + chap_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", - mchap_name); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + mchap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", - mchap_secret); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + mchap_secret); break; case ISCSI_BOOT_TGT_FLAGS: rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 9ce28c4f9812..b09d29931393 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2142,6 +2142,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) msleep(1000); qla24xx_disable_vp(vha); + qla2x00_wait_for_sess_deletion(vha); vha->flags.delete_progress = 1; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 486c075998f6..67b305531ec3 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -315,6 +315,29 @@ struct srb_cmd { /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) +/* + * 24 bit port ID type definition. + */ +typedef union { + uint32_t b24 : 24; + + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } b; +} port_id_t; +#define INVALID_PORT_ID 0xFFFFFF + struct els_logo_payload { uint8_t opcode; uint8_t rsvd[3]; @@ -332,6 +355,7 @@ struct ct_arg { u32 rsp_size; void *req; void *rsp; + port_id_t id; }; /* @@ -480,6 +504,7 @@ typedef struct srb { const char *name; int iocbs; struct qla_qpair *qpair; + struct list_head elem; u32 gen1; /* scratch */ u32 gen2; /* scratch */ union { @@ -2144,28 +2169,6 @@ struct imm_ntfy_from_isp { #define REQUEST_ENTRY_SIZE (sizeof(request_t)) -/* - * 24 bit port ID type definition. - */ -typedef union { - uint32_t b24 : 24; - - struct { -#ifdef __BIG_ENDIAN - uint8_t domain; - uint8_t area; - uint8_t al_pa; -#elif defined(__LITTLE_ENDIAN) - uint8_t al_pa; - uint8_t area; - uint8_t domain; -#else -#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" -#endif - uint8_t rsvd_1; - } b; -} port_id_t; -#define INVALID_PORT_ID 0xFFFFFF /* * Switch info gathering structure. @@ -4082,6 +4085,7 @@ typedef struct scsi_qla_host { #define LOOP_READY 5 #define LOOP_DEAD 6 + unsigned long relogin_jif; unsigned long dpc_flags; #define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */ #define RESET_ACTIVE 1 @@ -4223,6 +4227,7 @@ typedef struct scsi_qla_host { wait_queue_head_t fcport_waitQ; wait_queue_head_t vref_waitq; uint8_t min_link_speed_feat; + struct list_head gpnid_list; } scsi_qla_host_t; struct qla27xx_image_status { diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f852ca60c49f..89706341514e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -200,6 +200,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_abort_cmd(srb_t *); +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); /* * Global Functions in qla_mid.c source file. diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index bc3db6abc9a0..2a19ec0660cb 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -175,6 +175,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); } break; + case CS_TIMEOUT: + rval = QLA_FUNCTION_TIMEOUT; + /* drop through */ default: ql_dbg(ql_dbg_disc, vha, 0x2033, "%s failed, completion status (%x) on port_id: " @@ -2833,7 +2836,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea) } } else { /* fcport->d_id.b24 != ea->id.b24 */ fcport->d_id.b24 = ea->id.b24; - if (fcport->deleted == QLA_SESS_DELETED) { + if (fcport->deleted != QLA_SESS_DELETED) { ql_dbg(ql_dbg_disc, vha, 0x2021, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); @@ -2889,9 +2892,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res) ea.rc = res; ea.event = FCME_GIDPN_DONE; - ql_dbg(ql_dbg_disc, vha, 0x204f, - "Async done-%s res %x, WWPN %8phC ID %3phC \n", - sp->name, res, fcport->port_name, id); + if (res == QLA_FUNCTION_TIMEOUT) { + ql_dbg(ql_dbg_disc, sp->vha, 0xffff, + "Async done-%s WWPN %8phC timed out.\n", + sp->name, fcport->port_name); + qla24xx_post_gidpn_work(sp->vha, fcport); + sp->free(sp); + return; + } else if (res) { + ql_dbg(ql_dbg_disc, sp->vha, 0xffff, + "Async done-%s fail res %x, WWPN %8phC\n", + sp->name, res, fcport->port_name); + } else { + ql_dbg(ql_dbg_disc, vha, 0x204f, + "Async done-%s good WWPN %8phC ID %3phC\n", + sp->name, fcport->port_name, id); + } qla2x00_fcport_event_handler(vha, &ea); @@ -3205,11 +3221,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; struct event_arg ea; struct qla_work_evt *e; + unsigned long flags; - ql_dbg(ql_dbg_disc, vha, 0x2066, - "Async done-%s res %x ID %3phC. %8phC\n", - sp->name, res, ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); + if (res) + ql_dbg(ql_dbg_disc, vha, 0x2066, + "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", + sp->name, res, sp->gen1, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); + else + ql_dbg(ql_dbg_disc, vha, 0x2066, + "Async done-%s good rscn gen %d ID %3phC. %8phC\n", + sp->name, sp->gen1, ct_req->req.port_id.port_id, + ct_rsp->rsp.gpn_id.port_name); memset(&ea, 0, sizeof(ea)); memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); @@ -3220,6 +3243,22 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) ea.rc = res; ea.event = FCME_GPNID_DONE; + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_del(&sp->elem); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + if (res) { + if (res == QLA_FUNCTION_TIMEOUT) + qla24xx_post_gpnid_work(sp->vha, &ea.id); + sp->free(sp); + return; + } else if (sp->gen1) { + /* There was anoter RSNC for this Nport ID */ + qla24xx_post_gpnid_work(sp->vha, &ea.id); + sp->free(sp); + return; + } + qla2x00_fcport_event_handler(vha, &ea); e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE); @@ -3253,8 +3292,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; - srb_t *sp; + srb_t *sp, *tsp; struct ct_sns_pkt *ct_sns; + unsigned long flags; if (!vha->flags.online) goto done; @@ -3265,8 +3305,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) sp->type = SRB_CT_PTHRU_CMD; sp->name = "gpnid"; + sp->u.iocb_cmd.u.ctarg.id = *id; + sp->gen1 = 0; qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(tsp, &vha->gpnid_list, elem) { + if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { + tsp->gen1++; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sp->free(sp); + goto done; + } + } + list_add_tail(&sp->elem, &vha->gpnid_list); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); @@ -3314,6 +3368,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) return rval; done_free_sp: + spin_lock_irqsave(&vha->hw->vport_slock, flags); + list_del(&sp->elem); + spin_unlock_irqrestore(&vha->hw->vport_slock, flags); + if (sp->u.iocb_cmd.u.ctarg.req) { dma_free_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b5b48ddca962..1d42d38f5a45 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -102,14 +102,21 @@ qla2x00_async_iocb_timeout(void *data) struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; - ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, - "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", - sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); + if (fcport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, + "Async-%s timeout - hdl=%x portid=%06x %8phC.\n", + sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); - fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~FCF_ASYNC_SENT; + } else { + pr_info("Async-%s timeout - hdl=%x.\n", + sp->name, sp->handle); + } switch (sp->type) { case SRB_LOGIN_CMD: + if (!fcport) + break; /* Retry as needed. */ lio->u.logio.data[0] = MBS_COMMAND_ERROR; lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? @@ -123,6 +130,8 @@ qla2x00_async_iocb_timeout(void *data) qla24xx_handle_plogi_done_event(fcport->vha, &ea); break; case SRB_LOGOUT_CMD: + if (!fcport) + break; qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); break; case SRB_CT_PTHRU_CMD: @@ -864,6 +873,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) int rval = ea->rc; fc_port_t *fcport = ea->fcport; unsigned long flags; + u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10]; fcport->flags &= ~FCF_ASYNC_SENT; @@ -894,7 +904,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - ea->fcport->login_gen++; + if (opt != PDO_FORCE_ADISC) + ea->fcport->login_gen++; ea->fcport->deleted = 0; ea->fcport->logout_on_delete = 1; @@ -918,6 +929,13 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) qla24xx_post_gpsc_work(vha, fcport); } + } else if (ea->fcport->login_succ) { + /* + * We have an existing session. A late RSCN delivery + * must have triggered the session to be re-validate. + * session is still valid. + */ + fcport->disc_state = DSC_LOGIN_COMPLETE; } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } /* gpdb event */ @@ -964,7 +982,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); - qla24xx_async_gnl(vha, fcport); + qla24xx_post_gnl_work(vha, fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20bf, "%s %d %8phC post login\n", @@ -1133,7 +1151,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", __func__, __LINE__, fcport->port_name); - qla24xx_async_gidpn(vha, fcport); + qla24xx_post_gidpn_work(vha, fcport); return; } @@ -1308,11 +1326,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, wait_for_completion(&tm_iocb->u.tmf.comp); - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? - QLA_SUCCESS : QLA_FUNCTION_FAILED; + rval = tm_iocb->u.tmf.data; - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { - ql_dbg(ql_dbg_taskm, vha, 0x8030, + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } @@ -1348,6 +1365,7 @@ qla24xx_abort_sp_done(void *ptr, int res) srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; + del_timer(&sp->u.iocb_cmd.timer); complete(&abt->u.abt.comp); } @@ -1445,6 +1463,8 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { port_id_t cid; /* conflict Nport id */ + u16 lid; + struct fc_port *conflict_fcport; switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: @@ -1460,8 +1480,12 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC post gpdb\n", - __func__, __LINE__, ea->fcport->port_name); + "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->loop_id, ea->fcport->d_id.b24); + + set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); + ea->fcport->loop_id = FC_NO_LOOP_ID; ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->send_els_logo = 0; @@ -1506,8 +1530,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area, ea->fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(ea->fcport); - qla24xx_post_gidpn_work(vha, ea->fcport); + lid = ea->iop[1] & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(ea->fcport->port_name), + ea->fcport->d_id, lid, &conflict_fcport); + + if (conflict_fcport) { + /* + * Another fcport share the same loop_id/nport id. + * Conflict fcport needs to finish cleanup before this + * fcport can proceed to login. + */ + conflict_fcport->conflict = ea->fcport; + ea->fcport->login_pause = 1; + + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b24, lid); + qla2x00_clear_loop_id(ea->fcport); + qla24xx_post_gidpn_work(vha, ea->fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n", + __func__, __LINE__, ea->fcport->port_name, + ea->fcport->d_id.b24, lid); + + qla2x00_clear_loop_id(ea->fcport); + set_bit(lid, vha->hw->loop_id_map); + ea->fcport->loop_id = lid; + ea->fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(ea->fcport, false); + } break; } return; @@ -4572,7 +4626,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) return; if (fcport->fp_speed == PORT_SPEED_UNKNOWN || - fcport->fp_speed > ha->link_data_rate) + fcport->fp_speed > ha->link_data_rate || + !ha->flags.gpsc_supported) return; rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, @@ -8047,9 +8102,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) int ret = QLA_FUNCTION_FAILED; struct qla_hw_data *ha = qpair->hw; - if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created) - goto fail; - qpair->delete_in_progress = 1; while (atomic_read(&qpair->ref_count)) msleep(500); @@ -8057,6 +8109,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; + ret = qla25xx_delete_rsp_que(vha, qpair->rsp); if (ret != QLA_SUCCESS) goto fail; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 9a2c86eacf44..3f5a0f0f8b62 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -221,6 +221,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) sp->fcport = fcport; sp->iocbs = 1; sp->vha = qpair->vha; + INIT_LIST_HEAD(&sp->elem); + done: if (!sp) QLA_QPAIR_MARK_NOT_BUSY(qpair); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 2f94159186d7..8d579bf0fc81 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2128,34 +2128,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) req_cnt = 1; handle = 0; - if (!sp) - goto skip_cmd_array; - - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) { - ql_log(ql_log_warn, vha, 0x700b, - "No room on outstanding cmd array.\n"); - goto queuing_error; - } - - /* Prep command array. */ - req->current_outstanding_cmd = handle; - req->outstanding_cmds[handle] = sp; - sp->handle = handle; - - /* Adjust entry-counts as needed. */ - if (sp->type != SRB_SCSI_CMD) + if (sp && (sp->type != SRB_SCSI_CMD)) { + /* Adjust entry-counts as needed. */ req_cnt = sp->iocbs; + } -skip_cmd_array: /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) @@ -2179,6 +2156,28 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) if (req->cnt < req_cnt + 2) goto queuing_error; + if (sp) { + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) { + ql_log(ql_log_warn, vha, 0x700b, + "No room on outstanding cmd array.\n"); + goto queuing_error; + } + + /* Prep command array. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + } + /* Prep packet */ req->cnt -= req_cnt; pkt = req->ring_ptr; @@ -2191,6 +2190,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) pkt->handle = handle; } + return pkt; + queuing_error: qpair->tgt_counters.num_alloc_iocb_failed++; return pkt; @@ -2392,26 +2393,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data) srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; - struct qla_hw_data *ha = vha->hw; struct srb_iocb *lio = &sp->u.iocb_cmd; - unsigned long flags = 0; ql_dbg(ql_dbg_io, vha, 0x3069, "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - /* Abort the exchange */ - spin_lock_irqsave(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - ql_dbg(ql_dbg_io, vha, 0x3070, - "mbx abort_command failed.\n"); - } else { - ql_dbg(ql_dbg_io, vha, 0x3071, - "mbx abort_command success.\n"); - } - spin_unlock_irqrestore(&ha->hardware_lock, flags); - complete(&lio->u.els_logo.comp); } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9d9668aac6f6..e073eb16f8a4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Read all mbox registers? */ - mboxes = (1 << ha->mbx_count) - 1; + WARN_ON_ONCE(ha->mbx_count > 32); + mboxes = (1ULL << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); else @@ -1569,7 +1570,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, /* borrowing sts_entry_24xx.comp_status. same location as ct_entry_24xx.comp_status */ - res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, + res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, sp->name); sp->done(sp, res); @@ -2341,7 +2342,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int res = 0; uint16_t state_flags = 0; uint16_t retry_delay = 0; - uint8_t no_logout = 0; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -2454,8 +2454,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ox_id = le16_to_cpu(sts24->ox_id); par_sense_len = sizeof(sts24->data); /* Valid values of the retry delay timer are 0x1-0xffef */ - if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) - retry_delay = sts24->retry_delay; + if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) { + retry_delay = sts24->retry_delay & 0x3fff; + ql_dbg(ql_dbg_io, sp->vha, 0x3033, + "%s: scope=%#x retry_delay=%#x\n", __func__, + sts24->retry_delay >> 14, retry_delay); + } } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); @@ -2612,7 +2616,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) break; case CS_PORT_LOGGED_OUT: - no_logout = 1; case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: @@ -2643,9 +2646,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) port_state_str[atomic_read(&fcport->state)], comp_status); - if (no_logout) - fcport->logout_on_delete = 0; - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); qlt_schedule_sess_for_deletion_lock(fcport); } @@ -2826,7 +2826,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Read all mbox registers? */ - mboxes = (1 << ha->mbx_count) - 1; + WARN_ON_ONCE(ha->mbx_count > 32); + mboxes = (1ULL << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); else diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 99502fa90810..2d909e12e23a 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -6078,8 +6078,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, } /* Check for logged in state. */ - if (current_login_state != PDS_PRLI_COMPLETE && - last_login_state != PDS_PRLI_COMPLETE) { + if (current_login_state != PDS_PRLI_COMPLETE) { ql_dbg(ql_dbg_mbx, vha, 0x119a, "Unable to verify login-state (%x/%x) for loop_id %x.\n", current_login_state, last_login_state, fcport->loop_id); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c0f8f6c17b79..375a88e18afe 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -152,10 +152,15 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; int ret; + fc_port_t *fcport; ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + + qla2x00_mark_all_devices_lost(vha, 0); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->vport_slock, flags); @@ -343,15 +348,21 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) "FCPort update end.\n"); } - if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && - !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && - atomic_read(&vha->loop_state) != LOOP_DOWN) { + if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && + !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && + atomic_read(&vha->loop_state) != LOOP_DOWN) { + + if (!vha->relogin_jif || + time_after_eq(jiffies, vha->relogin_jif)) { + vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); - ql_dbg(ql_dbg_dpc, vha, 0x4018, - "Relogin needed scheduled.\n"); - qla2x00_relogin(vha); - ql_dbg(ql_dbg_dpc, vha, 0x4019, - "Relogin needed end.\n"); + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "Relogin needed scheduled.\n"); + qla2x00_relogin(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4019, + "Relogin needed end.\n"); + } } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && @@ -569,14 +580,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) int qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) { - int ret = -1; + int ret = QLA_SUCCESS; - if (req) { + if (req && vha->flags.qpairs_req_created) { req->options |= BIT_0; ret = qla25xx_init_req_que(vha, req); - } - if (ret == QLA_SUCCESS) + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + qla25xx_free_req_que(vha, req); + } return ret; } @@ -584,14 +597,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { - int ret = -1; + int ret = QLA_SUCCESS; - if (rsp) { + if (rsp && vha->flags.qpairs_rsp_created) { rsp->options |= BIT_0; ret = qla25xx_init_rsp_que(vha, rsp); - } - if (ret == QLA_SUCCESS) + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + qla25xx_free_rsp_que(vha, rsp); + } return ret; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dce42a416876..7d7fb5bbb600 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -388,7 +388,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list); ha->base_qpair->enable_class_2 = ql2xenableclass2; /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(rsp->qpair, smp_processor_id()); + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); ha->base_qpair->pdev = ha->pdev; if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) @@ -442,7 +442,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, ha->req_q_map[0] = req; set_bit(0, ha->rsp_qid_map); set_bit(0, ha->req_qid_map); - return 1; + return 0; fail_qpair_map: kfree(ha->base_qpair); @@ -1136,7 +1136,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) * qla2x00_wait_for_sess_deletion can only be called from remove_one. * it has dependency on UNLOADING flag to stop device discovery */ -static void +void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { qla2x00_mark_all_devices_lost(vha, 0); @@ -1710,6 +1710,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) struct qla_tgt_cmd *cmd; uint8_t trace = 0; + if (!ha->req_q_map) + return; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; @@ -3003,9 +3005,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) base_vha = qla2x00_create_host(sht, ha); if (!base_vha) { ret = -ENOMEM; - qla2x00_mem_free(ha); - qla2x00_free_req_que(ha, req); - qla2x00_free_rsp_que(ha, rsp); goto probe_hw_failed; } @@ -3066,14 +3065,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); if (ret) - goto probe_init_failed; + goto probe_failed; /* Alloc arrays of request and response ring ptrs */ - if (!qla2x00_alloc_queues(ha, req, rsp)) { + ret = qla2x00_alloc_queues(ha, req, rsp); + if (ret) { ql_log(ql_log_fatal, base_vha, 0x003d, "Failed to allocate memory for queue pointers..." "aborting.\n"); - goto probe_init_failed; + goto probe_failed; } if (ha->mqenable && shost_use_blk_mq(host)) { @@ -3177,10 +3177,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); + if (ha->mqenable) { bool mq = false; bool startit = false; - ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); if (QLA_TGT_MODE_ENABLED()) { mq = true; @@ -3349,15 +3350,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; -probe_init_failed: - qla2x00_free_req_que(ha, req); - ha->req_q_map[0] = NULL; - clear_bit(0, ha->req_qid_map); - qla2x00_free_rsp_que(ha, rsp); - ha->rsp_q_map[0] = NULL; - clear_bit(0, ha->rsp_qid_map); - ha->max_req_queues = ha->max_rsp_queues = 0; - probe_failed: if (base_vha->timer_active) qla2x00_stop_timer(base_vha); @@ -3370,10 +3362,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } qla2x00_free_device(base_vha); - scsi_host_put(base_vha->host); + /* + * Need to NULL out local req/rsp after + * qla2x00_free_device => qla2x00_free_queues frees + * what these are pointing to. Or else we'll + * fall over below in qla2x00_free_req/rsp_que. + */ + req = NULL; + rsp = NULL; probe_hw_failed: + qla2x00_mem_free(ha); + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); qla2x00_clear_drv_active(ha); iospace_config_failed: @@ -3579,6 +3581,8 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + qla2x00_wait_for_sess_deletion(base_vha); + /* * if UNLOAD flag is already set, then continue unload, * where it was set first. @@ -4061,6 +4065,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, (*rsp)->dma = 0; fail_rsp_ring: kfree(*rsp); + *rsp = NULL; fail_rsp: dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * sizeof(request_t), (*req)->ring, (*req)->dma); @@ -4068,6 +4073,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, (*req)->dma = 0; fail_req_ring: kfree(*req); + *req = NULL; fail_req: dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), ha->ct_sns, ha->ct_sns_dma); @@ -4434,6 +4440,7 @@ qla2x00_mem_free(struct qla_hw_data *ha) if (ha->init_cb) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); + vfree(ha->optrom_buffer); kfree(ha->nvram); kfree(ha->npiv_info); @@ -4454,6 +4461,15 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->ex_init_cb_dma = 0; ha->async_pd = NULL; ha->async_pd_dma = 0; + ha->loop_id_map = NULL; + ha->npiv_info = NULL; + ha->optrom_buffer = NULL; + ha->swl = NULL; + ha->nvram = NULL; + ha->mctp_dump = NULL; + ha->dcbx_tlv = NULL; + ha->xgmac_data = NULL; + ha->sfp_data = NULL; ha->s_dma_pool = NULL; ha->dl_dma_pool = NULL; @@ -4498,6 +4514,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->qp_list); INIT_LIST_HEAD(&vha->gnl.fcports); INIT_LIST_HEAD(&vha->nvme_rport_list); + INIT_LIST_HEAD(&vha->gpnid_list); spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); @@ -4732,11 +4749,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) } else { list_add_tail(&fcport->list, &vha->vp_fcports); - if (pla) { - qlt_plogi_ack_link(vha, pla, fcport, - QLT_PLOGI_LINK_SAME_WWN); - pla->ref_count--; - } + } + if (pla) { + qlt_plogi_ack_link(vha, pla, fcport, + QLT_PLOGI_LINK_SAME_WWN); + pla->ref_count--; } } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); @@ -4858,7 +4875,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) */ if (atomic_read(&fcport->state) != FCS_ONLINE && fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) { - fcport->login_retry--; + if (fcport->flags & FCF_FABRIC_DEVICE) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2108, "%s %8phC DS %d LS %d\n", __func__, @@ -4869,6 +4886,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) ea.fcport = fcport; qla2x00_fcport_event_handler(vha, &ea); } else { + fcport->login_retry--; status = qla2x00_local_device_login(vha, fcport); if (status == QLA_SUCCESS) { @@ -5776,8 +5794,9 @@ qla2x00_do_dpc(void *data) set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); } - if (test_and_clear_bit(ISP_ABORT_NEEDED, - &base_vha->dpc_flags)) { + if (test_and_clear_bit + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + !test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); @@ -5851,16 +5870,21 @@ qla2x00_do_dpc(void *data) } /* Retry each device up to login retry count */ - if ((test_and_clear_bit(RELOGIN_NEEDED, - &base_vha->dpc_flags)) && + if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) && !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) && atomic_read(&base_vha->loop_state) != LOOP_DOWN) { - ql_dbg(ql_dbg_dpc, base_vha, 0x400d, - "Relogin scheduled.\n"); - qla2x00_relogin(base_vha); - ql_dbg(ql_dbg_dpc, base_vha, 0x400e, - "Relogin end.\n"); + if (!base_vha->relogin_jif || + time_after_eq(jiffies, base_vha->relogin_jif)) { + base_vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags); + + ql_dbg(ql_dbg_dpc, base_vha, 0x400d, + "Relogin scheduled.\n"); + qla2x00_relogin(base_vha); + ql_dbg(ql_dbg_dpc, base_vha, 0x400e, + "Relogin end.\n"); + } } loop_resync_check: if (test_and_clear_bit(LOOP_RESYNC_NEEDED, @@ -6591,9 +6615,14 @@ qla83xx_disable_laser(scsi_qla_host_t *vha) static int qla2xxx_map_queues(struct Scsi_Host *shost) { + int rc; scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; - return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); + if (USER_CTRL_IRQ(vha->hw)) + rc = blk_mq_map_queues(&shost->tag_set); + else + rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); + return rc; } static const struct pci_error_handlers qla2xxx_err_handler = { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..d6fe08de59a0 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -665,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); sp->u.iocb_cmd.u.nack.ntfy = ntfy; - + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_nack_sp_done; rval = qla2x00_start_sp(sp); @@ -971,10 +971,11 @@ static void qlt_free_session_done(struct work_struct *work) logo.id = sess->d_id; logo.cmd_count = 0; + sess->send_els_logo = 0; qlt_send_first_logo(vha, &logo); } - if (sess->logout_on_delete) { + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { int rc; rc = qla2x00_post_async_logout_work(vha, sess, NULL); @@ -1033,8 +1034,7 @@ static void qlt_free_session_done(struct work_struct *work) sess->login_succ = 0; } - if (sess->chip_reset != ha->base_qpair->chip_reset) - qla2x00_clear_loop_id(sess); + qla2x00_clear_loop_id(sess); if (sess->conflict) { sess->conflict->login_pause = 0; @@ -1205,7 +1205,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess, ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); - schedule_work(&sess->del_work); + INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); + queue_work(sess->vha->hw->wq, &sess->del_work); } void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess) @@ -1560,8 +1561,11 @@ static void qlt_release(struct qla_tgt *tgt) btree_destroy64(&tgt->lun_qpair_map); - if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target) - ha->tgt.tgt_ops->remove_target(vha); + if (vha->vp_idx) + if (ha->tgt.tgt_ops && + ha->tgt.tgt_ops->remove_target && + vha->vha_tgt.target_lport_ptr) + ha->tgt.tgt_ops->remove_target(vha); vha->vha_tgt.qla_tgt = NULL; @@ -3708,7 +3712,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, term = 1; if (term) - qlt_term_ctio_exchange(qpair, ctio, cmd, status); + qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); return term; } @@ -4584,9 +4588,9 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); - other_sess->keep_nport_handle = 1; - *conflict_sess = other_sess; + if (other_sess->disc_state != DSC_DELETED) + *conflict_sess = other_sess; qlt_schedule_sess_for_deletion(other_sess, true); } @@ -5755,7 +5759,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, unsigned long flags; u8 newfcport = 0; - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, "qla_target(%d): Allocation of tmp FC port failed", @@ -5784,6 +5788,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, tfcp->port_type = fcport->port_type; tfcp->supported_classes = fcport->supported_classes; tfcp->flags |= fcport->flags; + tfcp->scan_state = QLA_FCPORT_FOUND; del = fcport; fcport = tfcp; diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index fc233717355f..817f312023a9 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -168,6 +168,8 @@ #define DEV_DB_NON_PERSISTENT 0 #define DEV_DB_PERSISTENT 1 +#define QL4_ISP_REG_DISCONNECT 0xffffffffU + #define COPY_ISID(dst_isid, src_isid) { \ int i, j; \ for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 64c6fa563fdb..a6aa08d9a171 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { static struct scsi_transport_template *qla4xxx_scsi_transport; +static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) +{ + u32 reg_val = 0; + int rval = QLA_SUCCESS; + + if (is_qla8022(ha)) + reg_val = readl(&ha->qla4_82xx_reg->host_status); + else if (is_qla8032(ha) || is_qla8042(ha)) + reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); + else + reg_val = readw(&ha->reg->ctrl_status); + + if (reg_val == QL4_ISP_REG_DISCONNECT) + rval = QLA_ERROR; + + return rval; +} + static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, uint32_t iface_type, uint32_t payload_size, uint32_t pid, struct sockaddr *dst_addr) @@ -9188,10 +9206,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) struct srb *srb = NULL; int ret = SUCCESS; int wait = 0; + int rval; ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", ha->host_no, id, lun, cmd, cmd->cmnd[0]); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + spin_lock_irqsave(&ha->hardware_lock, flags); srb = (struct srb *) CMD_SP(cmd); if (!srb) { @@ -9243,6 +9268,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int ret = FAILED, stat; + int rval; if (!ddb_entry) return ret; @@ -9262,6 +9288,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) cmd, jiffies, cmd->request->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + /* FIXME: wait for hba to go online */ stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); if (stat != QLA_SUCCESS) { @@ -9305,6 +9337,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int stat, ret; + int rval; if (!ddb_entry) return FAILED; @@ -9322,6 +9355,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + stat = qla4xxx_reset_target(ha, ddb_entry); if (stat != QLA_SUCCESS) { starget_printk(KERN_INFO, scsi_target(cmd->device), @@ -9376,9 +9415,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) { int return_status = FAILED; struct scsi_qla_host *ha; + int rval; ha = to_qla_host(cmd->device->host); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 09ba494f8896..92bc5b2d24ae 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3001,11 +3001,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && (ret < (num * sdebug_sector_size))) + } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) sdev_printk(KERN_INFO, scp->device, - "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", + "%s: %s: lb size=%u, IO sent=%d bytes\n", my_name, "write same", - num * sdebug_sector_size, ret); + sdebug_sector_size, ret); /* Copy first sector to remaining blocks */ for (i = 1 ; i < num ; i++) diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index 01f08c03f2c1..c3765d29fd3f 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq) { struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); - char buf[80]; + const u8 *const cdb = READ_ONCE(cmd->cmnd); + char buf[80] = "(?)"; - __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); + if (cdb) + __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len); seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, cmd->retries, msecs / 1000, msecs % 1000); } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 6bf43d94cdc0..ea947a7c2596 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -109,8 +109,8 @@ static struct { * seagate controller, which causes SCSI code to reset bus. */ {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */ - {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */ - {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */ + {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */ + {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */ {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */ {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */ {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */ @@ -120,7 +120,7 @@ static struct { {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */ {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ - {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, + {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN}, {"transtec", "T5008", "0001", BLIST_NOREPORTLUN }, {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ @@ -161,7 +161,7 @@ static struct { {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */ {"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, - {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN}, + {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2}, {"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN}, {"easyRAID", "16P", NULL, BLIST_NOREPORTLUN}, {"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN}, @@ -181,7 +181,7 @@ static struct { {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ - {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ + {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"HP", "C1557A", NULL, BLIST_FORCELUN}, @@ -255,7 +255,6 @@ static struct { {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR}, {"SUN", "T300", "*", BLIST_SPARSELUN}, {"SUN", "T4", "*", BLIST_SPARSELUN}, - {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN}, {"Tornado-", "F4", "*", BLIST_NOREPORTLUN}, {"TOSHIBA", "CDROM", NULL, BLIST_ISROM}, {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM}, @@ -595,17 +594,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev, int key) { struct scsi_dev_info_list *devinfo; - int err; devinfo = scsi_dev_info_list_find(vendor, model, key); if (!IS_ERR(devinfo)) return devinfo->flags; - err = PTR_ERR(devinfo); - if (err != -ENOENT) - return err; - - /* nothing found, return nothing */ + /* key or device not found: return nothing */ if (key != SCSI_DEVINFO_GLOBAL) return 0; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 84addee05be6..375cede0c534 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -56,10 +56,16 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"IBM", "1815", "rdac", }, {"IBM", "1818", "rdac", }, {"IBM", "3526", "rdac", }, - {"SGI", "TP9", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, + {"SGI", "TP9300", "rdac", }, + {"SGI", "TP9400", "rdac", }, + {"SGI", "TP9500", "rdac", }, + {"SGI", "TP9700", "rdac", }, {"SGI", "IS", "rdac", }, - {"STK", "OPENstorage D280", "rdac", }, + {"STK", "OPENstorage", "rdac", }, {"STK", "FLEXLINE 380", "rdac", }, + {"STK", "BladeCtlr", "rdac", }, {"SUN", "CSM", "rdac", }, {"SUN", "LCSM100", "rdac", }, {"SUN", "STK6580_6780", "rdac", }, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index dab876c65473..cf70f0bb8375 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -220,6 +220,18 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) } } +static void scsi_eh_inc_host_failed(struct rcu_head *head) +{ + struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); + struct Scsi_Host *shost = scmd->device->host; + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + shost->host_failed++; + scsi_eh_wakeup(shost); + spin_unlock_irqrestore(shost->host_lock, flags); +} + /** * scsi_eh_scmd_add - add scsi cmd to error handling. * @scmd: scmd to run eh on. @@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) scsi_eh_reset(scmd); list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); - shost->host_failed++; - scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); + /* + * Ensure that all tasks observe the host state change before the + * host_failed change. + */ + call_rcu(&scmd->rcu, scsi_eh_inc_host_failed); } /** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index bcc1694cebcd..bfd8f12d4e9a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) cmd->cmd_len = scsi_command_size(cmd->cmnd); } -void scsi_device_unbusy(struct scsi_device *sdev) +/* + * Decrement the host_busy counter and wake up the error handler if necessary. + * Avoid as follows that the error handler is not woken up if shost->host_busy + * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination + * with an RCU read lock in this function to ensure that this function in its + * entirety either finishes before scsi_eh_scmd_add() increases the + * host_failed counter or that it notices the shost state change made by + * scsi_eh_scmd_add(). + */ +static void scsi_dec_host_busy(struct Scsi_Host *shost) { - struct Scsi_Host *shost = sdev->host; - struct scsi_target *starget = scsi_target(sdev); unsigned long flags; + rcu_read_lock(); atomic_dec(&shost->host_busy); - if (starget->can_queue > 0) - atomic_dec(&starget->target_busy); - - if (unlikely(scsi_host_in_recovery(shost) && - (shost->host_failed || shost->host_eh_scheduled))) { + if (unlikely(scsi_host_in_recovery(shost))) { spin_lock_irqsave(shost->host_lock, flags); - scsi_eh_wakeup(shost); + if (shost->host_failed || shost->host_eh_scheduled) + scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); } + rcu_read_unlock(); +} + +void scsi_device_unbusy(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct scsi_target *starget = scsi_target(sdev); + + scsi_dec_host_busy(shost); + + if (starget->can_queue > 0) + atomic_dec(&starget->target_busy); atomic_dec(&sdev->device_busy); } @@ -653,6 +670,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; + destroy_rcu_head(&cmd->rcu); } if (req->mq_ctx) { @@ -837,6 +855,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) /* for passthrough error may be set */ error = BLK_STS_OK; } + /* + * Another corner case: the SCSI status byte is non-zero but 'good'. + * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when + * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD + * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related + * intermediate statuses (both obsolete in SAM-4) as good. + */ + if (status_byte(result) && scsi_status_is_good(result)) { + result = 0; + error = BLK_STS_OK; + } /* * special case: failed zero length commands always need to @@ -1133,6 +1162,7 @@ void scsi_initialize_rq(struct request *rq) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); scsi_req_init(&cmd->req); + init_rcu_head(&cmd->rcu); cmd->jiffies_at_alloc = jiffies; cmd->retries = 0; } @@ -1532,7 +1562,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, list_add_tail(&sdev->starved_entry, &shost->starved_list); spin_unlock_irq(shost->host_lock); out_dec: - atomic_dec(&shost->host_busy); + scsi_dec_host_busy(shost); return 0; } @@ -1993,7 +2023,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; out_dec_host_busy: - atomic_dec(&shost->host_busy); + scsi_dec_host_busy(shost); out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); @@ -2126,11 +2156,13 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) q->limits.cluster = 0; /* - * set a reasonable default alignment on word boundaries: the - * host and device may alter it using - * blk_queue_update_dma_alignment() later. + * Set a reasonable default alignment: The larger of 32-byte (dword), + * which is a common minimum for HBAs, and the minimum DMA alignment, + * which is set by the platform. + * + * Devices that require a bigger alignment can increase it later. */ - blk_queue_dma_alignment(q, 0x03); + blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); } EXPORT_SYMBOL_GPL(__scsi_init_queue); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index f796bd61f3f0..40406c162d0d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1383,7 +1383,10 @@ static void __scsi_remove_target(struct scsi_target *starget) * check. */ if (sdev->channel != starget->channel || - sdev->id != starget->id || + sdev->id != starget->id) + continue; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL || !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7404d26895f5..f6542c159ed6 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) return nlmsg_multicast(nls, skb, 0, group, gfp); } +static int +iscsi_unicast_skb(struct sk_buff *skb, u32 portid) +{ + return nlmsg_unicast(nls, skb, portid); +} + int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { @@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); static int -iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, - void *payload, int size) +iscsi_if_send_reply(u32 portid, int type, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = nlmsg_total_size(size); - int flags = multi ? NLM_F_MULTI : 0; - int t = done ? NLMSG_DONE : type; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { @@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, return -ENOMEM; } - nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); - nlh->nlmsg_flags = flags; + nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); memcpy(nlmsg_data(nlh), payload, size); - return iscsi_multicast_skb(skb, group, GFP_ATOMIC); + return iscsi_unicast_skb(skb, portid); } static int @@ -3470,6 +3472,7 @@ static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; + u32 portid; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) if (!try_module_get(transport->owner)) return -EINVAL; + portid = NETLINK_CB(skb).portid; + switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, - NETLINK_CB(skb).portid, + portid, ev->u.c_session.initial_cmdsn, ev->u.c_session.cmds_max, ev->u.c_session.queue_depth); @@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) } err = iscsi_if_create_session(priv, ep, ev, - NETLINK_CB(skb).portid, + portid, ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); @@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) static void iscsi_if_rx(struct sk_buff *skb) { + u32 portid = NETLINK_CB(skb).portid; + mutex_lock(&rx_queue_mutex); while (skb->len >= NLMSG_HDRLEN) { int err; @@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; - err = iscsi_if_send_reply(group, nlh->nlmsg_seq, - nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); + err = iscsi_if_send_reply(portid, nlh->nlmsg_type, + ev, sizeof(*ev)); } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 36f6190931bc..456ce9f19569 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -51,6 +51,8 @@ struct srp_internal { struct transport_container rport_attr_cont; }; +static int scsi_is_srp_rport(const struct device *dev); + #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) #define dev_to_rport(d) container_of(d, struct srp_rport, dev) @@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) return dev_to_shost(r->dev.parent); } +static int find_child_rport(struct device *dev, void *data) +{ + struct device **child = data; + + if (scsi_is_srp_rport(dev)) { + WARN_ON_ONCE(*child); + *child = dev; + } + return 0; +} + static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) { - return transport_class_to_srp_rport(&shost->shost_gendev); + struct device *child = NULL; + + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, + find_child_rport) < 0); + return child ? dev_to_rport(child) : NULL; } /** @@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) struct srp_rport *rport = shost_to_rport(shost); pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && + return rport && rport->fast_io_fail_tmo < 0 && + rport->dev_loss_tmo < 0 && i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d175c5c5ccf8..4a532318b211 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; + bool v; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - sdp->manage_start_stop = simple_strtoul(buf, NULL, 10); + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_start_stop = v; return count; } @@ -253,6 +257,7 @@ static ssize_t allow_restart_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + bool v; struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -262,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr, if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC) return -EINVAL; - sdp->allow_restart = simple_strtoul(buf, NULL, 10); + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->allow_restart = v; return count; } @@ -1284,6 +1292,7 @@ static int sd_init_command(struct scsi_cmnd *cmd) static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; + u8 *cmnd; if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) sd_zbc_write_unlock_zone(SCpnt); @@ -1292,9 +1301,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) __free_page(rq->special_vec.bv_page); if (SCpnt->cmnd != scsi_req(rq)->cmd) { - mempool_free(SCpnt->cmnd, sd_cdb_pool); + cmnd = SCpnt->cmnd; SCpnt->cmnd = NULL; SCpnt->cmd_len = 0; + mempool_free(cmnd, sd_cdb_pool); } } @@ -2122,6 +2132,8 @@ sd_spinup_disk(struct scsi_disk *sdkp) break; /* standby */ if (sshdr.asc == 4 && sshdr.ascq == 0xc) break; /* unavailable */ + if (sshdr.asc == 4 && sshdr.ascq == 0x1b) + break; /* sanitize in progress */ /* * Issue command to spin up drive when not ready */ @@ -2596,6 +2608,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) int res; struct scsi_device *sdp = sdkp->device; struct scsi_mode_data data; + int disk_ro = get_disk_ro(sdkp->disk); int old_wp = sdkp->write_prot; set_disk_ro(sdkp->disk, 0); @@ -2636,7 +2649,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) "Test WP failed, assume Write Enabled\n"); } else { sdkp->write_prot = ((data.device_specific & 0x80) != 0); - set_disk_ro(sdkp->disk, sdkp->write_prot); + set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro); if (sdkp->first_scan || old_wp != sdkp->write_prot) { sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", sdkp->write_prot ? "on" : "off"); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 8aa54779aac1..f4944dde6c8e 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -375,15 +375,15 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp, if (sdkp->device->type != TYPE_ZBC) { /* Host-aware */ sdkp->urswrz = 1; - sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]); - sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]); + sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]); + sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]); sdkp->zones_max_open = 0; } else { /* Host-managed */ sdkp->urswrz = buf[4] & 1; sdkp->zones_optimal_open = 0; sdkp->zones_optimal_nonseq = 0; - sdkp->zones_max_open = get_unaligned_be64(&buf[16]); + sdkp->zones_max_open = get_unaligned_be32(&buf[16]); } return 0; @@ -423,19 +423,27 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, #define SD_ZBC_BUF_SIZE 131072 -static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) +/** + * sd_zbc_check_zone_size - Check the device zone sizes + * @sdkp: Target disk + * + * Check that all zones of the device are equal. The last zone can however + * be smaller. The zone size must also be a power of two number of LBAs. + * + * Returns the zone size in number of blocks upon success or an error code + * upon failure. + */ +static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) { - u64 zone_blocks; + u64 zone_blocks = 0; sector_t block = 0; unsigned char *buf; unsigned char *rec; unsigned int buf_len; unsigned int list_length; - int ret; + s64 ret; u8 same; - sdkp->zone_blocks = 0; - /* Get a buffer */ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); if (!buf) @@ -443,10 +451,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) /* Do a report zone to get the same field */ ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); - if (ret) { - zone_blocks = 0; - goto out; - } + if (ret) + goto out_free; same = buf[4] & 0x0f; if (same > 0) { @@ -472,16 +478,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) /* Parse zone descriptors */ while (rec < buf + buf_len) { - zone_blocks = get_unaligned_be64(&rec[8]); - if (sdkp->zone_blocks == 0) { - sdkp->zone_blocks = zone_blocks; - } else if (zone_blocks != sdkp->zone_blocks && - (block + zone_blocks < sdkp->capacity - || zone_blocks > sdkp->zone_blocks)) { + u64 this_zone_blocks = get_unaligned_be64(&rec[8]); + + if (zone_blocks == 0) { + zone_blocks = this_zone_blocks; + } else if (this_zone_blocks != zone_blocks && + (block + this_zone_blocks < sdkp->capacity + || this_zone_blocks > zone_blocks)) { zone_blocks = 0; goto out; } - block += zone_blocks; + block += this_zone_blocks; rec += 64; } @@ -489,61 +496,77 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, block); if (ret) - return ret; + goto out_free; } } while (block < sdkp->capacity); - zone_blocks = sdkp->zone_blocks; - out: - kfree(buf); - if (!zone_blocks) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Devices with non constant zone " "size are not supported\n"); - return -ENODEV; - } - - if (!is_power_of_2(zone_blocks)) { + ret = -ENODEV; + } else if (!is_power_of_2(zone_blocks)) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Devices with non power of 2 zone " "size are not supported\n"); - return -ENODEV; - } - - if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { + ret = -ENODEV; + } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Zone size too large\n"); - return -ENODEV; + ret = -ENODEV; + } else { + ret = zone_blocks; } - sdkp->zone_blocks = zone_blocks; +out_free: + kfree(buf); - return 0; + return ret; } -static int sd_zbc_setup(struct scsi_disk *sdkp) +static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks) { + struct request_queue *q = sdkp->disk->queue; + u32 zone_shift = ilog2(zone_blocks); + u32 nr_zones; /* chunk_sectors indicates the zone size */ - blk_queue_chunk_sectors(sdkp->disk->queue, - logical_to_sectors(sdkp->device, sdkp->zone_blocks)); - sdkp->zone_shift = ilog2(sdkp->zone_blocks); - sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift; - if (sdkp->capacity & (sdkp->zone_blocks - 1)) - sdkp->nr_zones++; - - if (!sdkp->zones_wlock) { - sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones), - sizeof(unsigned long), - GFP_KERNEL); - if (!sdkp->zones_wlock) - return -ENOMEM; + blk_queue_chunk_sectors(q, + logical_to_sectors(sdkp->device, zone_blocks)); + nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift; + + /* + * Initialize the disk zone write lock bitmap if the number + * of zones changed. + */ + if (nr_zones != sdkp->nr_zones) { + unsigned long *zones_wlock = NULL; + + if (nr_zones) { + zones_wlock = kcalloc(BITS_TO_LONGS(nr_zones), + sizeof(unsigned long), + GFP_KERNEL); + if (!zones_wlock) + return -ENOMEM; + } + + blk_mq_freeze_queue(q); + sdkp->zone_blocks = zone_blocks; + sdkp->zone_shift = zone_shift; + sdkp->nr_zones = nr_zones; + swap(sdkp->zones_wlock, zones_wlock); + blk_mq_unfreeze_queue(q); + + kfree(zones_wlock); + + /* READ16/WRITE16 is mandatory for ZBC disks */ + sdkp->device->use_16_for_rw = 1; + sdkp->device->use_10_for_rw = 0; } return 0; @@ -552,6 +575,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp) int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) { + int64_t zone_blocks; int ret; if (!sd_is_zoned(sdkp)) @@ -589,19 +613,19 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, * Check zone size: only devices with a constant zone size (except * an eventual last runt zone) that is a power of 2 are supported. */ - ret = sd_zbc_check_zone_size(sdkp); - if (ret) + zone_blocks = sd_zbc_check_zone_size(sdkp); + ret = -EFBIG; + if (zone_blocks != (u32)zone_blocks) + goto err; + ret = zone_blocks; + if (ret < 0) goto err; /* The drive satisfies the kernel restrictions: set it up */ - ret = sd_zbc_setup(sdkp); + ret = sd_zbc_setup(sdkp, zone_blocks); if (ret) goto err; - /* READ16/WRITE16 is mandatory for ZBC disks */ - sdkp->device->use_16_for_rw = 1; - sdkp->device->use_10_for_rw = 0; - return 0; err: @@ -614,6 +638,7 @@ void sd_zbc_remove(struct scsi_disk *sdkp) { kfree(sdkp->zones_wlock); sdkp->zones_wlock = NULL; + sdkp->nr_zones = 0; } void sd_zbc_print_zones(struct scsi_disk *sdkp) diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 11826c5c2dd4..62f04c0511cf 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, } static void ses_match_to_enclosure(struct enclosure_device *edev, - struct scsi_device *sdev) + struct scsi_device *sdev, + int refresh) { + struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent); struct efd efd = { .addr = 0, }; - ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); + if (refresh) + ses_enclosure_data_process(edev, edev_sdev, 0); if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) efd.addr = sas_get_address(sdev); @@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev, struct enclosure_device *prev = NULL; while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) { - ses_match_to_enclosure(edev, sdev); + ses_match_to_enclosure(edev, sdev, 1); prev = edev; } return -ENODEV; @@ -768,7 +771,7 @@ static int ses_intf_add(struct device *cdev, shost_for_each_device(tmp_sdev, sdev->host) { if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev)) continue; - ses_match_to_enclosure(edev, tmp_sdev); + ses_match_to_enclosure(edev, tmp_sdev, 0); } return 0; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index aa28874e8fb9..3a406b40f150 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ #include #include #include +#include /* for sg_check_file_access() */ #include "scsi.h" #include @@ -210,6 +211,33 @@ static void sg_device_destroy(struct kref *kref); sdev_prefix_printk(prefix, (sdp)->device, \ (sdp)->disk->disk_name, fmt, ##a) +/* + * The SCSI interfaces that use read() and write() as an asynchronous variant of + * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways + * to trigger read() and write() calls from various contexts with elevated + * privileges. This can lead to kernel memory corruption (e.g. if these + * interfaces are called through splice()) and privilege escalation inside + * userspace (e.g. if a process with access to such a device passes a file + * descriptor to a SUID binary as stdin/stdout/stderr). + * + * This function provides protection for the legacy API by restricting the + * calling context. + */ +static int sg_check_file_access(struct file *filp, const char *caller) +{ + if (filp->f_cred != current_real_cred()) { + pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EPERM; + } + if (uaccess_kernel()) { + pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", + caller, task_tgid_vnr(current), current->comm); + return -EACCES; + } + return 0; +} + static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; @@ -394,6 +422,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) struct sg_header *old_hdr = NULL; int retval = 0; + /* + * This could cause a response to be stranded. Close the associated + * file descriptor to free up any resources being held. + */ + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, @@ -581,9 +617,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; + int retval; - if (unlikely(uaccess_kernel())) - return -EINVAL; + retval = sg_check_file_access(filp, __func__); + if (retval) + return retval; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; @@ -1894,7 +1932,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; - schp->pages[k] = alloc_pages(gfp_mask, order); + schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order); if (!schp->pages[k]) goto out; @@ -2148,6 +2186,7 @@ sg_add_sfp(Sg_device * sdp) write_lock_irqsave(&sdp->sfd_lock, iflags); if (atomic_read(&sdp->detaching)) { write_unlock_irqrestore(&sdp->sfd_lock, iflags); + kfree(sfp); return ERR_PTR(-ENODEV); } list_add_tail(&sfp->sfd_siblings, &sdp->sfds); diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile index 0f42a225a664..e6b779930230 100644 --- a/drivers/scsi/smartpqi/Makefile +++ b/drivers/scsi/smartpqi/Makefile @@ -1,3 +1,3 @@ ccflags-y += -I. -obj-m += smartpqi.o +obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 9be34d37c356..d0389b20574d 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -523,16 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) static int sr_block_open(struct block_device *bdev, fmode_t mode) { struct scsi_cd *cd; + struct scsi_device *sdev; int ret = -ENXIO; - mutex_lock(&sr_mutex); cd = scsi_cd_get(bdev->bd_disk); - if (cd) { - ret = cdrom_open(&cd->cdi, bdev, mode); - if (ret) - scsi_cd_put(cd); - } + if (!cd) + goto out; + + sdev = cd->device; + scsi_autopm_get_device(sdev); + check_disk_change(bdev); + + mutex_lock(&sr_mutex); + ret = cdrom_open(&cd->cdi, bdev, mode); mutex_unlock(&sr_mutex); + + scsi_autopm_put_device(sdev); + if (ret) + scsi_cd_put(cd); + +out: return ret; } @@ -560,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (ret) goto out; + scsi_autopm_get_device(sdev); + /* * Send SCSI addressing ioctls directly to mid level, send other * ioctls to cdrom/block level. @@ -568,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: ret = scsi_ioctl(sdev, cmd, argp); - goto out; + goto put; } ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); if (ret != -ENOSYS) - goto out; + goto put; ret = scsi_ioctl(sdev, cmd, argp); +put: + scsi_autopm_put_device(sdev); + out: mutex_unlock(&sr_mutex); return ret; @@ -585,18 +600,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { - struct scsi_cd *cd = scsi_cd(disk); + unsigned int ret = 0; + struct scsi_cd *cd; - if (atomic_read(&cd->device->disk_events_disable_depth)) + cd = scsi_cd_get(disk); + if (!cd) return 0; - return cdrom_check_events(&cd->cdi, clearing); + if (!atomic_read(&cd->device->disk_events_disable_depth)) + ret = cdrom_check_events(&cd->cdi, clearing); + + scsi_cd_put(cd); + return ret; } static int sr_block_revalidate_disk(struct gendisk *disk) { - struct scsi_cd *cd = scsi_cd(disk); struct scsi_sense_hdr sshdr; + struct scsi_cd *cd; + + cd = scsi_cd_get(disk); + if (!cd) + return -ENXIO; /* if the unit is not ready, nothing more to do */ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) @@ -605,6 +630,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk) sr_cd_check(&cd->cdi); get_sectorsize(cd); out: + scsi_cd_put(cd); return 0; } diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 2a21f2d48592..35fab1e18adc 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) struct scsi_device *SDev; struct scsi_sense_hdr sshdr; int result, err = 0, retries = 0; + unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL; SDev = cd->device; + if (cgc->sense) + senseptr = sense_buffer; + retry: if (!scsi_block_when_processing_errors(SDev)) { err = -ENODEV; @@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) } result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, - cgc->buffer, cgc->buflen, - (unsigned char *)cgc->sense, &sshdr, + cgc->buffer, cgc->buflen, senseptr, &sshdr, cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); + if (cgc->sense) + memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); + /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { switch (sshdr.sense_key) { diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 5e7200f05873..beb585ddc07d 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -952,10 +952,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, case TEST_UNIT_READY: break; default: - set_host_byte(scmnd, DID_TARGET_FAILURE); + set_host_byte(scmnd, DID_ERROR); } break; case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); do_work = true; process_err_fn = storvsc_remove_lun; break; @@ -1660,7 +1661,7 @@ static struct scsi_host_template scsi_driver = { .eh_timed_out = storvsc_eh_timed_out, .slave_alloc = storvsc_device_alloc, .slave_configure = storvsc_device_configure, - .cmd_per_lun = 255, + .cmd_per_lun = 2048, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, /* Make sure we dont get a sg segment crosses a page boundary */ @@ -1724,11 +1725,14 @@ static int storvsc_probe(struct hv_device *device, max_targets = STORVSC_MAX_TARGETS; max_channels = STORVSC_MAX_CHANNELS; /* - * On Windows8 and above, we support sub-channels for storage. + * On Windows8 and above, we support sub-channels for storage + * on SCSI and FC controllers. * The number of sub-channels offerred is based on the number of * VCPUs in the guest. */ - max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); + if (!dev_is_ide) + max_sub_channels = + (num_cpus - 1) / storvsc_vcpus_per_sub_channel; } scsi_driver.can_queue = (max_outstanding_req_per_channel * @@ -1826,8 +1830,10 @@ static int storvsc_probe(struct hv_device *device, fc_host_node_name(host) = stor_device->node_name; fc_host_port_name(host) = stor_device->port_name; stor_device->rport = fc_remote_port_add(host, 0, &ids); - if (!stor_device->rport) + if (!stor_device->rport) { + ret = -ENOMEM; goto err_out3; + } } #endif return 0; diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ca360daa6a25..378af306fda1 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa * Look for the greatest clock divisor that allows an * input speed faster than the period. */ - while (div-- > 0) + while (--div > 0) if (kpc >= (div_10M[div] << 2)) break; /* diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e27b4d4e6ae2..8b545a9c51dd 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -38,6 +38,7 @@ config SCSI_UFSHCD select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND select NLS + select RPMB ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index c87d770b519a..edd0554652bb 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -50,19 +50,10 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, u32 clk_cycles); -static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, - char *prefix) -{ - print_hex_dump(KERN_ERR, prefix, - len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, - 16, 4, (void __force *)hba->mmio_base + offset, - len * 4, false); -} - static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, - char *prefix, void *priv) + const char *prefix, void *priv) { - ufs_qcom_dump_regs(hba, offset, len, prefix); + ufshcd_dump_regs(hba, offset, len * 4, prefix); } static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) @@ -1436,7 +1427,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv, void (*print_fn)(struct ufs_hba *hba, - int offset, int num_regs, char *str, void *priv)) + int offset, int num_regs, const char *str, void *priv)) { u32 reg; struct ufs_qcom_host *host; @@ -1618,7 +1609,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) static void ufs_qcom_testbus_read(struct ufs_hba *hba) { - ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); } static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) @@ -1644,8 +1635,8 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { - ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, - "HCI Vendor Specific Registers "); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, + "HCI Vendor Specific Registers "); /* sleep a bit intermittently as we are dumping too much data */ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 54deeb754db5..77ff3c91917c 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -526,7 +526,9 @@ struct ufs_dev_info { */ struct ufs_dev_desc { u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; + char *model; + char *serial_no; + size_t serial_no_len; }; #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 794a4600e952..3e8e31b59941 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -37,10 +37,14 @@ * license terms, and distributes only under these terms. */ +#include #include #include #include #include +#include +#include + #include "ufshcd.h" #include "ufs_quirks.h" #include "unipro.h" @@ -97,8 +101,29 @@ _ret; \ }) -#define ufshcd_hex_dump(prefix_str, buf, len) \ -print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) +#define ufshcd_hex_dump(prefix_str, buf, len) do { \ + size_t __len = (len); \ + print_hex_dump(KERN_ERR, prefix_str, \ + __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ + 16, 4, buf, __len, false); \ +} while (0) + +int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix) +{ + u8 *regs; + + regs = kzalloc(len, GFP_KERNEL); + if (!regs) + return -ENOMEM; + + memcpy_fromio(regs, hba->mmio_base + offset, len); + ufshcd_hex_dump(prefix, regs, len); + kfree(regs); + + return 0; +} +EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, @@ -264,16 +289,6 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) } } -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(char *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -342,15 +357,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, static void ufshcd_print_host_regs(struct ufs_hba *hba) { - /* - * hex_dump reads its data without the readl macro. This might - * cause inconsistency issues on some platform, as the printed - * values may be from cache and not the most recent value. - * To know whether you are looking at an un-cached version verify - * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked - * during platform/pci probe function. - */ - ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", hba->ufs_version, hba->capabilities); dev_err(hba->dev, @@ -3009,7 +3016,7 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, u8 param_offset, - u8 *param_read_buf, + void *param_read_buf, u8 param_size) { int ret; @@ -3077,7 +3084,7 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u8 *buf, + void *buf, u32 size) { return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); @@ -3095,6 +3102,25 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); } +/** + * uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[0]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char blank_non_printable(char ch) +{ + return (ch >= 0x20 || ch <= 0x7e) ? ch : ' '; +} + /** * ufshcd_read_string_desc - read string descriptor * @hba: pointer to adapter instance @@ -3102,43 +3128,54 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) * @buf: pointer to buffer where descriptor would be read * @size: size of buf * @ascii: if true convert from unicode to ascii characters + * null terminated string. * - * Return 0 in case of success, non-zero otherwise + * Return: string size on success. + * -ENOMEM: on allocation failure + * -ETOOSMALL: if supplied buffer is too small */ -#define ASCII_STD true -static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) +#define SD_ASCII_STD true +#define SD_RAW false +static int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + char **buf, bool ascii) { - int err = 0; + struct uc_string_id *uc_str; + char *str; + int ret; - err = ufshcd_read_desc(hba, - QUERY_DESC_IDN_STRING, desc_index, buf, size); + if (!buf) + return -EINVAL; - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, + desc_index, uc_str, + QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; goto out; } if (ascii) { - int desc_len; - int ascii_len; int i; - char *buff_ascii; + ssize_t ascii_len; - desc_len = buf[0]; /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; goto out; } @@ -3146,22 +3183,29 @@ static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, * the descriptor contains string in UTF16 format * we need to convert to utf-8 so it can be displayed */ - utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], - desc_len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); + for (i = 0; i < ret; i++) + str[i] = blank_non_printable(str[i]); - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); + str[ret++] = '\0'; + + } else { + str = kzalloc(uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + memcpy(str, uc_str, uc_str->len); + ret = uc_str->len; } out: - return err; + *buf = str; + kfree(uc_str); + return ret; } /** @@ -4347,6 +4391,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* REPORT SUPPORTED OPERATION CODES is not supported */ sdev->no_report_opcodes = 1; + /* WRITE_SAME command is not supported */ + sdev->no_write_same = 1; ufshcd_set_queue_depth(sdev); @@ -4945,6 +4991,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eeh_work); pm_runtime_get_sync(hba->dev); + scsi_block_requests(hba->host); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", @@ -4958,6 +5005,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_bkops_exception_event_handler(hba); out: + scsi_unblock_requests(hba->host); pm_runtime_put_sync(hba->dev); return; } @@ -5957,6 +6005,228 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) } +#define SEC_PROTOCOL_UFS 0xEC +#define SEC_SPECIFIC_UFS_RPMB 0x01 + +#define SEC_PROTOCOL_CMD_SIZE 12 +#define SEC_PROTOCOL_RETRIES 3 +#define SEC_PROTOCOL_RETRIES_ON_RESET 10 +#define SEC_PROTOCOL_TIMEOUT msecs_to_jiffies(1000) + +static int +ufshcd_rpmb_security_out(struct scsi_device *sdev, u8 region, + void *frames, u32 trans_len) +{ + struct scsi_sense_hdr sshdr; + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_OUT; + cmd[1] = SEC_PROTOCOL_UFS; + cmd[2] = region; + cmd[3] = SEC_SPECIFIC_UFS_RPMB; + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(trans_len, cmd + 6); /* transfer length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, + frames, trans_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security out", &sshdr); + + return ret; +} + +static int +ufshcd_rpmb_security_in(struct scsi_device *sdev, u8 region, + void *frames, u32 alloc_len) +{ + struct scsi_sense_hdr sshdr; + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_IN; + cmd[1] = SEC_PROTOCOL_UFS; + cmd[2] = region; + cmd[3] = SEC_SPECIFIC_UFS_RPMB; + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(alloc_len, cmd + 6); /* allocation length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, + frames, alloc_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security in", &sshdr); + + return ret; +} + +static int ufshcd_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + unsigned long flags; + struct ufs_hba *hba = dev_get_drvdata(dev); + struct scsi_device *sdev; + struct rpmb_cmd *cmd; + u32 len; + int i; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdev = hba->sdev_ufs_rpmb; + if (sdev) { + ret = scsi_device_get(sdev); + if (!ret && !scsi_device_online(sdev)) { + ret = -ENODEV; + scsi_device_put(sdev); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) + return ret; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + len = rpmb_ioc_frames_len_jdec(cmd->nframes); + if (cmd->flags & RPMB_F_WRITE) + ret = ufshcd_rpmb_security_out(sdev, target, + cmd->frames, len); + else + ret = ufshcd_rpmb_security_in(sdev, target, + cmd->frames, len); + } + scsi_device_put(sdev); + return ret; +} + +static int ufshcd_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + __be64 block_count; + int ret; + + ret = ufshcd_read_unit_desc_param(hba, + UFS_UPIU_RPMB_WLUN, + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT, + (u8 *)&block_count, + sizeof(block_count)); + if (ret) + return ret; + + return be64_to_cpu(block_count) * SZ_512 / SZ_128K; +} + +static struct rpmb_ops ufshcd_rpmb_dev_ops = { + .cmd_seq = ufshcd_rpmb_cmd_seq, + .get_capacity = ufshcd_rpmb_get_capacity, + .type = RPMB_TYPE_UFS, + .auth_method = RPMB_HMAC_ALGO_SHA_256, +}; + +static inline void ufshcd_rpmb_add(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) +{ + struct rpmb_dev *rdev; + u8 rpmb_rw_size = 1; + int ret; + + ufshcd_rpmb_dev_ops.dev_id = kmemdup(dev_desc->serial_no, + dev_desc->serial_no_len, + GFP_KERNEL); + if (ufshcd_rpmb_dev_ops.dev_id) + ufshcd_rpmb_dev_ops.dev_id_len = dev_desc->serial_no_len; + + ret = scsi_device_get(hba->sdev_ufs_rpmb); + if (ret) + goto out_put_dev; + +#if 0 + if (hba->ufs_version >= UFSHCI_VERSION_21) { + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, + GEOMETRY_DESC_PARAM_RPMB_RW_SIZE, + &rpmb_rw_size, + sizeof(rpmb_rw_size)); + if (ret) + goto out_put_dev; + } +#endif /* 0 */ + + ufshcd_rpmb_dev_ops.rd_cnt_max = rpmb_rw_size; + ufshcd_rpmb_dev_ops.wr_cnt_max = rpmb_rw_size; + + rdev = rpmb_dev_register(hba->dev, 0, &ufshcd_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(hba->dev, "%s: cannot register to rpmb %ld\n", + dev_name(hba->dev), PTR_ERR(rdev)); + goto out_put_dev; + } + + return; + +out_put_dev: + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; +} + +static inline void ufshcd_rpmb_remove(struct ufs_hba *hba) +{ + unsigned long flags; + + if (!hba->sdev_ufs_rpmb) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + + rpmb_dev_unregister_by_device(hba->dev, 0); + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; + + kfree(ufshcd_rpmb_dev_ops.dev_id); + ufshcd_rpmb_dev_ops.dev_id = NULL; + + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + /** * ufshcd_scsi_add_wlus - Adds required W-LUs * @hba: per-adapter instance @@ -6012,7 +6282,10 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) ret = PTR_ERR(sdev_rpmb); goto remove_sdev_boot; } + hba->sdev_ufs_rpmb = sdev_rpmb; + scsi_device_put(sdev_rpmb); + goto out; remove_sdev_boot: @@ -6026,15 +6299,18 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) static int ufs_get_device_desc(struct ufs_hba *hba, struct ufs_dev_desc *dev_desc) { - int err; - u8 model_index; - u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0}; - u8 desc_buf[hba->desc_size.dev_desc]; + int ret; + u8 *desc_buf; + u8 index; - err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); - if (err) { + desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL); + if (!desc_buf) + return -ENOMEM; + + ret = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + if (ret) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", - __func__, err); + __func__, ret); goto out; } @@ -6045,26 +6321,36 @@ static int ufs_get_device_desc(struct ufs_hba *hba, dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; - model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - err = ufshcd_read_string_desc(hba, model_index, str_desc_buf, - QUERY_DESC_MAX_SIZE, ASCII_STD); - if (err) { + index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + ret = ufshcd_read_string_desc(hba, index, + &dev_desc->model, SD_ASCII_STD); + if (ret < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", - __func__, err); + __func__, ret); goto out; } - str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); - - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; + index = desc_buf[DEVICE_DESC_PARAM_SN]; + ret = ufshcd_read_string_desc(hba, index, + &dev_desc->serial_no, SD_RAW); + if (ret < 0) { + dev_err(hba->dev, "%s: Failed reading Serial No. err = %d\n", + __func__, ret); + goto out; + } out: - return err; + kfree(desc_buf); + return ret; +} + +static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc) +{ + kfree(dev_desc->model); + dev_desc->model = NULL; + + kfree(dev_desc->serial_no); + dev_desc->serial_no = NULL; } static void ufs_fixup_device_setup(struct ufs_hba *hba, @@ -6344,13 +6630,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_init_desc_sizes(hba); ret = ufs_get_device_desc(hba, &card); - if (ret) { + if (ret < 0) { dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", __func__, ret); goto out; } ufs_fixup_device_setup(hba, &card); + ufshcd_tune_unipro_params(hba); ret = ufshcd_set_vccq_rail_unused(hba, @@ -6399,6 +6686,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ufshcd_scsi_add_wlus(hba)) goto out; + ufshcd_rpmb_add(hba, &card); + /* Initialize devfreq after UFS device is detected */ if (ufshcd_is_clkscaling_supported(hba)) { memcpy(&hba->clk_scaling.saved_pwr_info.info, @@ -6428,6 +6717,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) hba->is_init_prefetch = true; out: + + ufs_put_device_desc(&card); /* * If we failed to initialize the device or the device is not * present, turn off the power/clocks etc. @@ -6555,12 +6846,15 @@ static int ufshcd_config_vreg(struct device *dev, struct ufs_vreg *vreg, bool on) { int ret = 0; - struct regulator *reg = vreg->reg; - const char *name = vreg->name; + struct regulator *reg; + const char *name; int min_uV, uA_load; BUG_ON(!vreg); + reg = vreg->reg; + name = vreg->name; + if (regulator_count_voltages(reg) > 0) { min_uV = on ? vreg->min_uV : 0; ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); @@ -6756,9 +7050,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, if (list_empty(head)) goto out; - ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); - if (ret) - return ret; + /* + * vendor specific setup_clocks ops may depend on clocks managed by + * this standard driver hence call the vendor specific setup_clocks + * before disabling the clocks managed here. + */ + if (!on) { + ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); + if (ret) + return ret; + } list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { @@ -6782,9 +7083,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, } } - ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); - if (ret) - return ret; + /* + * vendor specific setup_clocks ops may depend on clocks managed by + * this standard driver hence call the vendor specific setup_clocks + * after enabling the clocks managed here. + */ + if (on) { + ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); + if (ret) + return ret; + } out: if (ret) { @@ -7726,6 +8034,8 @@ int ufshcd_shutdown(struct ufs_hba *hba) goto out; } + ufshcd_rpmb_remove(hba); + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); out: if (ret) @@ -7742,7 +8052,10 @@ EXPORT_SYMBOL(ufshcd_shutdown); */ void ufshcd_remove(struct ufs_hba *hba) { + ufshcd_rpmb_remove(hba); + ufshcd_remove_sysfs_nodes(hba); + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index cdc8bd05f7df..f414297e8869 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -454,6 +454,7 @@ struct ufs_stats { * @utmrdl_dma_addr: UTMRDL DMA address * @host: Scsi_Host instance of the driver * @dev: device handle + * @sdev_ufs_rpmb: reference to RPMB device W-LU * @lrb: local reference block * @lrb_in_use: lrb in use * @outstanding_tasks: Bits representing outstanding task requests @@ -517,6 +518,7 @@ struct ufs_hba { * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; + struct scsi_device *sdev_ufs_rpmb; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; @@ -983,4 +985,7 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) hba->vops->dbg_register_dump(hba); } +int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix); + #endif /* End of Header */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 7c28e8d4955a..54e3a0f6844c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -91,9 +91,6 @@ struct virtio_scsi_vq { struct virtio_scsi_target_state { seqcount_t tgt_seq; - /* Count of outstanding requests. */ - atomic_t reqs; - /* Currently active virtqueue for requests sent to this target. */ struct virtio_scsi_vq *req_vq; }; @@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; - struct virtio_scsi_target_state *tgt = - scsi_target(sc->device)->hostdata; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", @@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) } sc->scsi_done(sc); - - atomic_dec(&tgt->reqs); } static void virtscsi_vq_done(struct virtio_scsi *vscsi, @@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); - struct virtio_scsi_target_state *tgt = - scsi_target(sc->device)->hostdata; - atomic_inc(&tgt->reqs); return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); } @@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, return &vscsi->req_vqs[hwq]; } -static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, - struct virtio_scsi_target_state *tgt) -{ - struct virtio_scsi_vq *vq; - unsigned long flags; - u32 queue_num; - - local_irq_save(flags); - if (atomic_inc_return(&tgt->reqs) > 1) { - unsigned long seq; - - do { - seq = read_seqcount_begin(&tgt->tgt_seq); - vq = tgt->req_vq; - } while (read_seqcount_retry(&tgt->tgt_seq, seq)); - } else { - /* no writes can be concurrent because of atomic_t */ - write_seqcount_begin(&tgt->tgt_seq); - - /* keep previous req_vq if a reader just arrived */ - if (unlikely(atomic_read(&tgt->reqs) > 1)) { - vq = tgt->req_vq; - goto unlock; - } - - queue_num = smp_processor_id(); - while (unlikely(queue_num >= vscsi->num_queues)) - queue_num -= vscsi->num_queues; - tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; - unlock: - write_seqcount_end(&tgt->tgt_seq); - } - local_irq_restore(flags); - - return vq; -} - static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); - struct virtio_scsi_target_state *tgt = - scsi_target(sc->device)->hostdata; - struct virtio_scsi_vq *req_vq; - - if (shost_use_blk_mq(sh)) - req_vq = virtscsi_pick_vq_mq(vscsi, sc); - else - req_vq = virtscsi_pick_vq(vscsi, tgt); + struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); return virtscsi_queuecommand(vscsi, req_vq, sc); } @@ -775,7 +721,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget) return -ENOMEM; seqcount_init(&tgt->tgt_seq); - atomic_set(&tgt->reqs, 0); tgt->req_vq = &vscsi->req_vqs[0]; starget->hostdata = tgt; @@ -823,6 +768,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, .track_queue_depth = 1, + .force_blk_mq = 1, }; static struct scsi_host_template virtscsi_host_template_multi = { @@ -844,6 +790,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .target_destroy = virtscsi_target_destroy, .map_queues = virtscsi_map_queues, .track_queue_depth = 1, + .force_blk_mq = 1, }; #define virtscsi_config_get(vdev, fld) \ diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index c374e3b5c678..777e5f1e52d1 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, break; case BTSTAT_ABORTQUEUE: - cmd->result = (DID_ABORT << 16); + cmd->result = (DID_BUS_BUSY << 16); break; case BTSTAT_SCSIPARITY: diff --git a/drivers/sdw/Kconfig b/drivers/sdw/Kconfig new file mode 100644 index 000000000000..660188bd2c02 --- /dev/null +++ b/drivers/sdw/Kconfig @@ -0,0 +1,19 @@ +menuconfig SDW + tristate "SoundWire bus support" + depends on CRC8 + depends on X86 + help + SoundWire interface is typically used for transporting data + related to audio functions. +menuconfig SDW_CNL + tristate "Intel SoundWire master controller support" + depends on SDW && X86 + help + Intel SoundWire master controller driver +menuconfig SDW_MAXIM_SLAVE + bool "SoundWire Slave for the Intel CNL FPGA" + depends on SDW && X86 + help + SoundWire Slave on FPGA platform for Intel CNL IP + Mostly N for all the cases other than CNL Slave FPGA + diff --git a/drivers/sdw/Makefile b/drivers/sdw/Makefile new file mode 100644 index 000000000000..e2ba440f4ef2 --- /dev/null +++ b/drivers/sdw/Makefile @@ -0,0 +1,5 @@ +sdw_bus-objs := sdw.o sdw_bwcalc.o sdw_utils.o + +obj-$(CONFIG_SDW) += sdw_bus.o +obj-$(CONFIG_SDW_CNL) += sdw_cnl.o +obj-$(CONFIG_SDW_MAXIM_SLAVE) += sdw_maxim.o diff --git a/drivers/sdw/sdw.c b/drivers/sdw/sdw.c new file mode 100644 index 000000000000..30e75cc504d2 --- /dev/null +++ b/drivers/sdw/sdw.c @@ -0,0 +1,3458 @@ +/* + * sdw.c - SoundWire Bus driver implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_priv.h" + +#define sdw_slave_attr_gr NULL +#define sdw_mstr_attr_gr NULL + +#define CREATE_TRACE_POINTS +#include + +/* Global instance handling all the SoundWire buses */ +struct sdw_core sdw_core; + +static void sdw_slave_release(struct device *dev) +{ + kfree(to_sdw_slave(dev)); +} + +static void sdw_mstr_release(struct device *dev) +{ + struct sdw_master *mstr = to_sdw_master(dev); + + complete(&mstr->slv_released); +} + +static struct device_type sdw_slv_type = { + .groups = sdw_slave_attr_gr, + .release = sdw_slave_release, +}; + +static struct device_type sdw_mstr_type = { + .groups = sdw_mstr_attr_gr, + .release = sdw_mstr_release, +}; +/** + * sdw_slave_verify - return parameter as sdw_slave, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slave. + */ +struct sdw_slave *sdw_slave_verify(struct device *dev) +{ + return (dev->type == &sdw_slv_type) + ? to_sdw_slave(dev) + : NULL; +} + +/** + * sdw_mstr_verify - return parameter as sdw_master, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slave. + */ +struct sdw_master *sdw_mstr_verify(struct device *dev) +{ + return (dev->type == &sdw_mstr_type) + ? to_sdw_master(dev) + : NULL; +} + +static const struct sdw_slave_id *sdw_match_slave(const struct sdw_slave_id *id, + const struct sdw_slave *sdw_slv) +{ + while (id->name[0]) { + if (strncmp(sdw_slv->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static const struct sdw_master_id *sdw_match_master( + const struct sdw_master_id *id, + const struct sdw_master *sdw_mstr) +{ + if (!id) + return NULL; + while (id->name[0]) { + if (strncmp(sdw_mstr->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static int sdw_slv_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slave *sdw_slv; + struct sdw_slave_driver *drv = to_sdw_slave_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_SLAVE) + return ret; + + sdw_slv = to_sdw_slave(dev); + + if (drv->id_table) + ret = (sdw_match_slave(drv->id_table, sdw_slv) != NULL); + + if (driver->name && !ret) + ret = (strncmp(sdw_slv->name, driver->name, SOUNDWIRE_NAME_SIZE) + == 0); + if (ret) + sdw_slv->driver = drv; + return ret; +} +static int sdw_mstr_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_master *sdw_mstr; + struct sdw_mstr_driver *drv = to_sdw_mstr_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_MASTER) + return ret; + + sdw_mstr = to_sdw_master(dev); + + if (drv->id_table) + ret = (sdw_match_master(drv->id_table, sdw_mstr) != NULL); + + if (driver->name) + ret = (strncmp(sdw_mstr->name, driver->name, + SOUNDWIRE_NAME_SIZE) == 0); + if (ret) + sdw_mstr->driver = drv; + + return ret; +} + +static int sdw_mstr_probe(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(mstr, sdw_match_master(sdrv->id_table, mstr)); + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + +static int sdw_slv_probe(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + struct sdw_slave *sdwslv = to_sdw_slave(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(sdwslv, sdw_match_slave(sdrv->id_table, + sdwslv)); + return 0; + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + + +int sdw_slave_get_bus_params(struct sdw_slave *sdw_slv, + struct sdw_bus_params *params) +{ + struct sdw_bus *bus; + struct sdw_master *mstr = sdw_slv->mstr; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + if (!bus) + return -EFAULT; + + params->num_rows = bus->row; + params->num_cols = bus->col; + params->bus_clk_freq = bus->clk_freq >> 1; + params->bank = bus->active_bank; + + return 0; +} +EXPORT_SYMBOL(sdw_slave_get_bus_params); + +static int sdw_mstr_remove(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_master(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; + +} + +static int sdw_slv_remove(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_slave(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; +} + +static void sdw_slv_shutdown(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + + if (sdrv->shutdown) + sdrv->shutdown(to_sdw_slave(dev)); +} + +static void sdw_mstr_shutdown(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + + if (sdrv->shutdown) + sdrv->shutdown(mstr); +} + +static void sdw_shutdown(struct device *dev) +{ + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + sdw_slv_shutdown(dev); + else if (sdw_mstr) + sdw_mstr_shutdown(dev); +} + +static int sdw_remove(struct device *dev) +{ + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_remove(dev); + else if (sdw_mstr) + return sdw_mstr_remove(dev); + + return 0; +} + +static int sdw_probe(struct device *dev) +{ + + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_probe(dev); + else if (sdw_mstr) + return sdw_mstr_probe(dev); + + return -ENODEV; + +} + +static int sdw_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slave *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_match(dev, driver); + else if (sdw_mstr) + return sdw_mstr_match(dev, driver); + return 0; + +} + +#ifdef CONFIG_PM_SLEEP +static int sdw_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct sdw_slave *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->suspend) + return 0; + + return driver->suspend(sdw_slv, mesg); +} + +static int sdw_legacy_resume(struct device *dev) +{ + struct sdw_slave *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->resume) + return 0; + + return driver->resume(sdw_slv); +} + +static int sdw_pm_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_suspend(dev); + else + return sdw_legacy_suspend(dev, PMSG_SUSPEND); +} + +static int sdw_pm_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_resume(dev); + else + return sdw_legacy_resume(dev); +} + +#else +#define sdw_pm_suspend NULL +#define sdw_pm_resume NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops soundwire_pm = { + .suspend = sdw_pm_suspend, + .resume = sdw_pm_resume, +#ifdef CONFIG_PM + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, +#endif +}; + +struct bus_type sdw_bus_type = { + .name = "soundwire", + .match = sdw_match, + .probe = sdw_probe, + .remove = sdw_remove, + .shutdown = sdw_shutdown, + .pm = &soundwire_pm, +}; +EXPORT_SYMBOL_GPL(sdw_bus_type); + +struct device sdw_slv = { + .init_name = "soundwire", +}; + +static struct static_key sdw_trace_msg = STATIC_KEY_INIT_FALSE; + +int sdw_transfer_trace_reg(void) +{ + static_key_slow_inc(&sdw_trace_msg); + + return 0; +} + +void sdw_transfer_trace_unreg(void) +{ + static_key_slow_dec(&sdw_trace_msg); +} + +/** + * sdw_lock_mstr - Get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_lock_mstr(struct sdw_master *mstr) +{ + rt_mutex_lock(&mstr->bus_lock); +} + +/** + * sdw_trylock_mstr - Try to get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +int sdw_trylock_mstr(struct sdw_master *mstr) +{ + return rt_mutex_trylock(&mstr->bus_lock); +} + + +/** + * sdw_unlock_mstr - Release exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_unlock_mstr(struct sdw_master *mstr) +{ + rt_mutex_unlock(&mstr->bus_lock); +} + + +static int sdw_assign_slv_number(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + int i, j, ret = -1; + + sdw_lock_mstr(mstr); + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned == true) + continue; + mstr->sdw_addr[i].assigned = true; + for (j = 0; j < 6; j++) + mstr->sdw_addr[i].dev_id[j] = msg->buf[j]; + ret = i; + break; + } + sdw_unlock_mstr(mstr); + return ret; +} + +static int sdw_program_slv_address(struct sdw_master *mstr, + u8 slave_addr) +{ + struct sdw_msg msg; + u8 buf[1] = {0}; + int ret; + + buf[0] = slave_addr; + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_DEVNUMBER; + msg.len = 1; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "Program Slave address change\n"); + return ret; + } + return 0; +} + +static int sdw_find_slave(struct sdw_master *mstr, struct sdw_msg + *msg, bool *found) +{ + struct sdw_slv_addr *sdw_addr; + int ret = 0, i, comparison; + *found = false; + + sdw_lock_mstr(mstr); + sdw_addr = mstr->sdw_addr; + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + comparison = memcmp(sdw_addr[i].dev_id, msg->buf, + SDW_NUM_DEV_ID_REGISTERS); + if ((!comparison) && (sdw_addr[i].assigned == true)) { + *found = true; + break; + } + } + sdw_unlock_mstr(mstr); + if (*found == true) + ret = sdw_program_slv_address(mstr, sdw_addr[i].slv_number); + return ret; +} + +static void sdw_free_slv_number(struct sdw_master *mstr, + int slv_number) +{ + int i; + + sdw_lock_mstr(mstr); + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (slv_number == mstr->sdw_addr[i].slv_number) { + mstr->sdw_addr[slv_number].assigned = false; + memset(&mstr->sdw_addr[slv_number].dev_id[0], 0x0, 6); + } + } + sdw_unlock_mstr(mstr); +} + + +int count; +static int sdw_register_slave(struct sdw_master *mstr) +{ + int ret = 0, i, ports; + struct sdw_msg msg; + u8 buf[6] = {0}; + struct sdw_slave *sdw_slave; + int slv_number = -1; + bool found = false; + + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.addr = SDW_SCP_DEVID_0; + msg.len = 6; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + while ((ret = (sdw_slave_transfer(mstr, &msg, 1)) == 1)) { + ret = sdw_find_slave(mstr, &msg, &found); + if (found && !ret) { + dev_info(&mstr->dev, "Slave already registered\n"); + continue; + /* Even if slave registering fails we continue for other + * slave status, but we flag error + */ + } else if (ret) { + dev_err(&mstr->dev, "Re-registering slave failed"); + continue; + } + slv_number = sdw_assign_slv_number(mstr, &msg); + if (slv_number <= 0) { + dev_err(&mstr->dev, "Failed to assign slv_number\n"); + ret = -EINVAL; + goto slv_number_assign_fail; + } + sdw_slave = kzalloc(sizeof(struct sdw_slave), GFP_KERNEL); + if (!sdw_slave) { + ret = -ENOMEM; + goto mem_alloc_failed; + } + sdw_slave->mstr = mstr; + sdw_slave->dev.parent = &sdw_slave->mstr->dev; + sdw_slave->dev.bus = &sdw_bus_type; + sdw_slave->dev.type = &sdw_slv_type; + sdw_slave->slv_addr = &mstr->sdw_addr[slv_number]; + sdw_slave->slv_addr->slave = sdw_slave; + /* We have assigned new slave number, so its not present + * till it again attaches to bus with this new + * slave address + */ + sdw_slave->slv_addr->status = SDW_SLAVE_STAT_NOT_PRESENT; + for (i = 0; i < 6; i++) + sdw_slave->dev_id[i] = msg.buf[i]; + dev_dbg(&mstr->dev, "SDW slave slave id found with values\n"); + dev_dbg(&mstr->dev, "dev_id0 to dev_id5: %x:%x:%x:%x:%x:%x\n", + msg.buf[0], msg.buf[1], msg.buf[2], + msg.buf[3], msg.buf[4], msg.buf[5]); + dev_dbg(&mstr->dev, "Slave number assigned is %x\n", slv_number); + /* TODO: Fill the sdw_slave structre from ACPI */ + ports = sdw_slave->sdw_slv_cap.num_of_sdw_ports; + /* Add 1 for port 0 for simplicity */ + ports++; + sdw_slave->port_ready = + kzalloc((sizeof(struct completion) * ports), + GFP_KERNEL); + if (!sdw_slave->port_ready) { + ret = -ENOMEM; + goto port_alloc_mem_failed; + } + for (i = 0; i < ports; i++) + init_completion(&sdw_slave->port_ready[i]); + + dev_set_name(&sdw_slave->dev, "sdw-slave%d-%02x:%02x:%02x:%02x:%02x:%02x", + sdw_master_id(mstr), + sdw_slave->dev_id[0], + sdw_slave->dev_id[1], + sdw_slave->dev_id[2], + sdw_slave->dev_id[3], + sdw_slave->dev_id[4], + sdw_slave->dev_id[5] + mstr->nr); + /* Set name based on dev_id. This will be + * compared to load driver + */ + sprintf(sdw_slave->name, "%02x:%02x:%02x:%02x:%02x:%02x", + sdw_slave->dev_id[0], + sdw_slave->dev_id[1], + sdw_slave->dev_id[2], + sdw_slave->dev_id[3], + sdw_slave->dev_id[4], + sdw_slave->dev_id[5] + mstr->nr); + ret = device_register(&sdw_slave->dev); + if (ret) { + dev_err(&mstr->dev, "Register slave failed\n"); + goto reg_slv_failed; + } + ret = sdw_program_slv_address(mstr, slv_number); + if (ret) { + dev_err(&mstr->dev, "Programming slave address failed\n"); + goto program_slv_failed; + } + dev_dbg(&mstr->dev, "Slave registered with bus id %s\n", + dev_name(&sdw_slave->dev)); + sdw_slave->slv_number = slv_number; + mstr->num_slv++; + sdw_lock_mstr(mstr); + list_add_tail(&sdw_slave->node, &mstr->slv_list); + sdw_unlock_mstr(mstr); + + } + count++; + return 0; +program_slv_failed: + device_unregister(&sdw_slave->dev); +port_alloc_mem_failed: +reg_slv_failed: + kfree(sdw_slave); +mem_alloc_failed: + sdw_free_slv_number(mstr, slv_number); +slv_number_assign_fail: + return ret; + +} + +/** + * __sdw_transfer - unlocked flavor of sdw_slave_transfer + * @mstr: Handle to SDW bus + * @msg: One or more messages to execute before STOP is issued to + * terminate the operation; each message begins with a START. + * @num: Number of messages to be executed. + * + * Returns negative errno, else the number of messages executed. + * + * Adapter lock must be held when calling this function. No debug logging + * takes place. mstr->algo->master_xfer existence isn't checked. + */ +int __sdw_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num, + struct sdw_async_xfer_data *async_data) +{ + unsigned long orig_jiffies; + int ret = 0, try, i; + struct sdw_slv_capabilities *slv_cap; + int program_scp_addr_page; + int addr = msg->slave_addr; + + /* sdw_trace_msg gets enabled when tracepoint sdw_slave_transfer gets + * enabled. This is an efficient way of keeping the for-loop from + * being executed when not needed. + */ + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < num; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_read(mstr, &msg[i], i); + else + trace_sdw_write(mstr, &msg[i], i); + } + orig_jiffies = jiffies; + for (i = 0; i < num; i++) { + for (ret = 0, try = 0; try <= mstr->retries; try++) { + if (msg->slave_addr == 0) + /* If we are enumerating slave address 0, + * we dont program scp, it should be set + * default to 0 + */ + program_scp_addr_page = 0; + else if (msg->slave_addr == 15) + /* If we are broadcasting, we need to program + * the SCP address as some slaves will be + * supporting it while some wont be. + * So it should be programmed + */ + program_scp_addr_page = 1; + + else { + slv_cap = + &mstr->sdw_addr[addr].slave->sdw_slv_cap; + program_scp_addr_page = + slv_cap->paging_supported; + } + /* Call async or sync handler based on call */ + if (!async_data) + ret = mstr->driver->mstr_ops->xfer_msg(mstr, + msg, program_scp_addr_page); + /* Async transfer is not mandatory to support + * It requires only if stream is split across the + * masters, where bus driver need to send the commands + * for bank switch individually and wait for them + * to complete out side of the master context + */ + else if (mstr->driver->mstr_ops->xfer_msg_async && + async_data) + ret = mstr->driver->mstr_ops->xfer_msg_async( + mstr, msg, + program_scp_addr_page, + async_data); + else + return -ENOTSUPP; + if (ret != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + mstr->timeout)) + break; + } + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < msg->len; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_reply(mstr, &msg[i], i); + trace_sdw_result(mstr, i, ret); + } + if (!ret) + return i; + return ret; +} +EXPORT_SYMBOL_GPL(__sdw_transfer); + +/* NO PM version of slave transfer. Called from power management APIs + * to avoid dead locks. + */ +static int sdw_slave_transfer_nopm(struct sdw_master *mstr, struct sdw_msg *msg, + int num) +{ + int ret; + + if (mstr->driver->mstr_ops->xfer_msg) { + ret = __sdw_transfer(mstr, msg, num, NULL); + return ret; + } + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; +} + +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data) +{ + int ret; + /* Currently we support only message asynchronously, This is mainly + * used to do bank switch for multiple controllers + */ + if (num != 1) + return -EINVAL; + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + ret = __sdw_transfer(mstr, msg, num, async_data); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +/** + * sdw_slave_transfer: Transfer message between slave and mstr on the bus. + * @mstr: mstr master which will transfer the message + * @msg: Array of messages to be transferred. + * @num: Number of messages to be transferred, messages include read and write + * messages, but not the ping messages. + */ +int sdw_slave_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num) +{ + int ret; + + /* REVISIT the fault reporting model here is weak: + * + * - When we get an error after receiving N bytes from a slave, + * there is no way to report "N". + * + * - When we get a NAK after transmitting N bytes to a slave, + * there is no way to report "N" ... or to let the mstr + * continue executing the rest of this combined message, if + * that's the appropriate response. + * + * - When for example "num" is two and we successfully complete + * the first message but get an error part way through the + * second, it's unclear whether that should be reported as + * one (discarding status on the second message) or errno + * (discarding status on the first one). + */ + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else { + sdw_lock_mstr(mstr); + } + ret = __sdw_transfer(mstr, msg, num, NULL); + sdw_unlock_mstr(mstr); +out: + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_transfer); + +static int sdw_handle_dp0_interrupts(struct sdw_master *mstr, + struct sdw_slave *sdw_slv, u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + int impl_def_mask = 0; + u8 rbuf[1] = {0}, wbuf[1] = {0}; + + /* Create message for clearing the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DP0_INTCLEAR; + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DP0_INTSTAT; + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Interrupt status read failed for slave %x\n", sdw_slv->slv_number); + goto out; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %d port 0\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[0]); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_PORT_READY_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTMASK_BRA_FAILURE_MASK) { + /* TODO: Handle BRA failure */ + dev_err(&mstr->dev, "BRA failed for slave %d\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_BRA_FAILURE_MASK; + } + impl_def_mask = SDW_DP0_INTSTAT_IMPDEF1_MASK | + SDW_DP0_INTSTAT_IMPDEF2_MASK | + SDW_DP0_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} + +static int sdw_handle_port_interrupt(struct sdw_master *mstr, + struct sdw_slave *sdw_slv, int port_num, + u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + u8 rbuf[1], wbuf[1]; + int impl_def_mask = 0; + +/* + * Handle the Data port0 interrupt separately since the interrupt + * mask and stat register is different than other DPn registers + */ + if (port_num == 0 && sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return sdw_handle_dp0_interrupts(mstr, sdw_slv, status); + + /* Create message for reading the port interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DPN_INTCLEAR + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DPN_INTSTAT + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Port Status read failed for slv %x port %x\n", + sdw_slv->slv_number, port_num); + goto out; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %x port %x\n", + sdw_slv->slv_number, port_num); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[port_num]); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_PORT_READY_MASK; + } + impl_def_mask = SDW_DPN_INTSTAT_IMPDEF1_MASK | + SDW_DPN_INTSTAT_IMPDEF2_MASK | + SDW_DPN_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + /* TODO: Handle implementation defined mask ready */ + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + /* Clear and Ack the interrupt */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} +static int sdw_handle_slave_alerts(struct sdw_master *mstr, + struct sdw_slave *sdw_slv) +{ + struct sdw_msg rd_msg[3], wr_msg; + u8 rbuf[3], wbuf[1]; + int i, ret = 0; + int cs_port_mask, cs_port_register, cs_port_start, cs_ports; + struct sdw_impl_def_intr_stat *intr_status; + struct sdw_portn_intr_stat *portn_stat; + u8 port_status[15] = {0}; + u8 control_port_stat = 0; + + + /* Read Instat 1, Instat 2 and Instat 3 registers */ + rd_msg[0].ssp_tag = 0x0; + rd_msg[0].flag = SDW_MSG_FLAG_READ; + rd_msg[0].addr = SDW_SCP_INTSTAT_1; + rd_msg[0].len = 1; + rd_msg[0].buf = &rbuf[0]; + rd_msg[0].slave_addr = sdw_slv->slv_number; + rd_msg[0].addr_page1 = 0x0; + rd_msg[0].addr_page2 = 0x0; + + rd_msg[1].ssp_tag = 0x0; + rd_msg[1].flag = SDW_MSG_FLAG_READ; + rd_msg[1].addr = SDW_SCP_INTSTAT2; + rd_msg[1].len = 1; + rd_msg[1].buf = &rbuf[1]; + rd_msg[1].slave_addr = sdw_slv->slv_number; + rd_msg[1].addr_page1 = 0x0; + rd_msg[1].addr_page2 = 0x0; + + rd_msg[2].ssp_tag = 0x0; + rd_msg[2].flag = SDW_MSG_FLAG_READ; + rd_msg[2].addr = SDW_SCP_INTSTAT3; + rd_msg[2].len = 1; + rd_msg[2].buf = &rbuf[2]; + rd_msg[2].slave_addr = sdw_slv->slv_number; + rd_msg[2].addr_page1 = 0x0; + rd_msg[2].addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_SCP_INTCLEAR1; + wr_msg.len = 1; + wr_msg.buf = &wbuf[0]; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, rd_msg, 3); + if (ret != 3) { + ret = -EINVAL; + dev_err(&mstr->dev, "Reading of register failed\n"); + goto out; + } + /* First handle parity and bus clash interrupts */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_PARITY_MASK) { + dev_err(&mstr->dev, "Parity error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_PARITY_MASK; + } + /* Handle bus errors */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_BUS_CLASH_MASK) { + dev_err(&mstr->dev, "Bus clash error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_BUS_CLASH_MASK; + } + /* Handle implementation defined mask */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_IMPL_DEF_MASK) { + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_IMPL_DEF_MASK; + control_port_stat = (rd_msg[0].buf[0] & + SDW_SCP_INTSTAT1_IMPL_DEF_MASK); + } + + /* Handle Cascaded Port interrupts from Instat_1 registers */ + + /* Number of port status bits in this register */ + cs_ports = 4; + /* Port number starts at in this register */ + cs_port_start = 0; + /* Bit mask for the starting port intr status */ + cs_port_mask = 0x08; + /* Bit mask for the starting port intr status */ + cs_port_register = 0; + + /* Look for cascaded port interrupts, if found handle port + * interrupts. Do this for all the Int_stat registers. + */ + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to SCP3 + */ + if (!(rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_SCP2_CASCADE_MASK)) + goto handle_instat_3_register; + + + cs_ports = 7; + cs_port_start = 4; + cs_port_mask = 0x1; + cs_port_register = 1; + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to impl_def intrs + */ +handle_instat_3_register: + if (!(rd_msg[1].buf[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE_MASK)) + goto handle_impl_def_interrupts; + + cs_ports = 4; + cs_port_start = 11; + cs_port_mask = 0x1; + cs_port_register = 2; + + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + +handle_impl_def_interrupts: + + /* + * If slave has not registered for implementation defined + * interrupts, dont read it. + */ + if (!sdw_slv->driver->handle_impl_def_interrupts) + goto ack_interrupts; + + intr_status = kzalloc(sizeof(*intr_status), GFP_KERNEL); + if (!intr_status) + return -ENOMEM; + + portn_stat = kzalloc((sizeof(*portn_stat)) * + sdw_slv->sdw_slv_cap.num_of_sdw_ports, + GFP_KERNEL); + if (!portn_stat) + return -ENOMEM; + + intr_status->portn_stat = portn_stat; + intr_status->control_port_stat = control_port_stat; + + /* Update the implementation defined status to Slave */ + for (i = 1; i < sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + intr_status->portn_stat[i].status = port_status[i]; + intr_status->portn_stat[i].num = i; + } + + intr_status->port0_stat = port_status[0]; + intr_status->control_port_stat = wr_msg.buf[0]; + + ret = sdw_slv->driver->handle_impl_def_interrupts(sdw_slv, + intr_status); + if (ret) + dev_err(&mstr->dev, "Implementation defined interrupt handling failed\n"); + + kfree(portn_stat); + kfree(intr_status); + +ack_interrupts: + /* Ack the interrupts */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + } +out: + return 0; +} + +int sdw_en_intr(struct sdw_slave *sdw_slv, int port_num, int mask) +{ + + struct sdw_msg rd_msg, wr_msg; + u8 buf; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + + rd_msg.addr = wr_msg.addr = SDW_DPN_INTMASK + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DPN_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DPN_INTSTAT_PORT_READY_MASK; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_en_scp_intr(struct sdw_slave *sdw_slv, int mask) +{ + struct sdw_msg rd_msg, wr_msg; + u8 buf = 0; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + u16 reg_addr; + + reg_addr = SDW_SCP_INTMASK1; + + rd_msg.addr = wr_msg.addr = reg_addr; + + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_SCP_INTMASK1_BUS_CLASH_MASK; + buf |= SDW_SCP_INTMASK1_PARITY_MASK; + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Return if DP0 is not present */ + if (!sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return 0; + + + reg_addr = SDW_DP0_INTMASK; + rd_msg.addr = wr_msg.addr = reg_addr; + mask = sdw_slv->sdw_slv_cap.sdw_dp0_cap->imp_def_intr_mask; + buf = 0; + + /* Create message for reading the interrupt mask */ + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DP0_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DP0_INTSTAT_PORT_READY_MASK; + buf |= SDW_DP0_INTSTAT_BRA_FAILURE_MASK; + + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_prog_slv(struct sdw_slave *sdw_slv) +{ + + struct sdw_slv_capabilities *cap; + int ret, i; + struct sdw_slv_dpn_capabilities *dpn_cap; + struct sdw_master *mstr = sdw_slv->mstr; + + if (!sdw_slv->slave_cap_updated) + return 0; + cap = &sdw_slv->sdw_slv_cap; + + /* Enable DP0 and SCP interrupts */ + ret = sdw_en_scp_intr(sdw_slv, cap->scp_impl_def_intr_mask); + + /* Failure should never happen, even if it happens we continue */ + if (ret) + dev_err(&mstr->dev, "SCP program failed\n"); + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + ret = sdw_en_intr(sdw_slv, (i + 1), + dpn_cap->imp_def_intr_mask); + + if (ret) + break; + } + return ret; +} + + +static void sdw_send_slave_status(struct sdw_slave *slave, + enum sdw_slave_status *status) +{ + struct sdw_slave_driver *slv_drv = slave->driver; + + if (slv_drv && slv_drv->update_slv_status) + slv_drv->update_slv_status(slave, status); +} + +static int sdw_wait_for_deprepare(struct sdw_slave *slave) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + struct sdw_master *mstr = slave->mstr; + + /* Create message to read clock stop status, its broadcast message. */ + buf[0] = 0xFF; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + /* + * Read the ClockStopNotFinished bit from the SCP_Stat register + * of particular Slave to make sure that clock stop prepare is done + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Clk stp deprepare failed for slave %d\n", + slave->slv_number); + + return -EINVAL; +} + +static void sdw_prep_slave_for_clk_stp(struct sdw_master *mstr, + struct sdw_slave *slave, + enum sdw_clk_stop_mode clock_stop_mode, + bool prep) +{ + bool wake_en; + struct sdw_slv_capabilities *cap; + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + + cap = &slave->sdw_slv_cap; + + /* Set the wakeup enable based on Slave capability */ + wake_en = !cap->wake_up_unavailable; + + if (prep) { + /* Even if its simplified clock stop prepare, + * setting prepare bit wont harm + */ + buf[0] |= (1 << SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_SHIFT); + buf[0] |= clock_stop_mode << + SDW_SCP_SYSTEMCTRL_CLK_STP_MODE_SHIFT; + buf[0] |= wake_en << SDW_SCP_SYSTEMCTRL_WAKE_UP_EN_SHIFT; + } else + buf[0] = 0; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + /* + * We are calling NOPM version of the transfer API, because + * Master controllers calls this from the suspend handler, + * so if we call the normal transfer API, it tries to resume + * controller, which result in deadlock + */ + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) + WARN_ONCE(1, "Clock Stop prepare failed for slave %d\n", + slave->slv_number); +} + +static int sdw_check_for_prep_bit(struct sdw_slave *slave) +{ + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + struct sdw_master *mstr = slave->mstr; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) { + dev_err(&mstr->dev, "SCP_SystemCtrl read failed for Slave %d\n", + slave->slv_number); + return -EINVAL; + + } + return (buf[0] & SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_MASK); + +} + +static int sdw_slv_deprepare_clk_stp1(struct sdw_slave *slave) +{ + struct sdw_slv_capabilities *cap; + int ret; + struct sdw_master *mstr = slave->mstr; + + cap = &slave->sdw_slv_cap; + + /* + * Slave might have enumerated 1st time or from clock stop mode 1 + * return if Slave doesn't require deprepare + */ + if (!cap->clk_stp1_deprep_required) + return 0; + + /* + * If Slave requires de-prepare after exiting from Clock Stop + * mode 1, than check for ClockStopPrepare bit in SystemCtrl register + * if its 1, de-prepare Slave from clock stop prepare, else + * return + */ + ret = sdw_check_for_prep_bit(slave); + /* If prepare bit is not set, return without error */ + if (!ret) + return 0; + + /* If error in reading register, return with error */ + if (ret < 0) + return ret; + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + if (ret) { + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + return ret; + } + } + + sdw_prep_slave_for_clk_stp(slave->mstr, slave, + cap->clock_stop1_mode_supported, false); + + /* Make sure NF = 0 for deprepare to complete */ + ret = sdw_wait_for_deprepare(slave); + + /* Return in de-prepare unsuccessful */ + if (ret) + return ret; + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + + return ret; +} + +static void handle_slave_status(struct kthread_work *work) +{ + int i, ret = 0; + struct sdw_slv_status *status, *__status__; + struct sdw_bus *bus = + container_of(work, struct sdw_bus, kwork); + struct sdw_master *mstr = bus->mstr; + unsigned long flags; + bool slave_present = 0; + + /* Handle the new attached slaves to the bus. Register new slave + * to the bus. + */ + list_for_each_entry_safe(status, __status__, &bus->status_list, node) { + if (status->status[0] == SDW_SLAVE_STAT_ATTACHED_OK) { + ret += sdw_register_slave(mstr); + if (ret) + /* Even if adding new slave fails, we will + * continue. + */ + dev_err(&mstr->dev, "Registering new slave failed\n"); + } + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_present = false; + if (status->status[i] == SDW_SLAVE_STAT_NOT_PRESENT && + mstr->sdw_addr[i].assigned == true) { + /* Logical address was assigned to slave, but + * now its down, so mark it as not present + */ + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + slave_present = true; + } + + else if (status->status[i] == SDW_SLAVE_STAT_ALERT && + mstr->sdw_addr[i].assigned == true) { + ret = 0; + /* Handle slave alerts */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_ALERT; + ret = sdw_handle_slave_alerts(mstr, + mstr->sdw_addr[i].slave); + if (ret) + dev_err(&mstr->dev, "Handle slave alert failed for Slave %d\n", i); + + slave_present = true; + + + } else if (status->status[i] == + SDW_SLAVE_STAT_ATTACHED_OK && + mstr->sdw_addr[i].assigned == true) { + + sdw_prog_slv(mstr->sdw_addr[i].slave); + + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_ATTACHED_OK; + ret = sdw_slv_deprepare_clk_stp1( + mstr->sdw_addr[i].slave); + + /* + * If depreparing Slave fails, no need to + * reprogram Slave, this should never happen + * in ideal case. + */ + if (ret) + continue; + slave_present = true; + } + + if (!slave_present) + continue; + + sdw_send_slave_status(mstr->sdw_addr[i].slave, + &mstr->sdw_addr[i].status); + } + spin_lock_irqsave(&bus->spinlock, flags); + list_del(&status->node); + spin_unlock_irqrestore(&bus->spinlock, flags); + kfree(status); + } +} + +static int sdw_register_master(struct sdw_master *mstr) +{ + int ret = 0; + int i; + struct sdw_bus *sdw_bus; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdw_bus_type.p))) { + ret = -EAGAIN; + goto bus_init_not_done; + } + /* Sanity checks */ + if (unlikely(mstr->name[0] == '\0')) { + pr_err("sdw-core: Attempt to register an master with no name!\n"); + ret = -EINVAL; + goto mstr_no_name; + } + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) + mstr->sdw_addr[i].slv_number = i; + + rt_mutex_init(&mstr->bus_lock); + INIT_LIST_HEAD(&mstr->slv_list); + INIT_LIST_HEAD(&mstr->mstr_rt_list); + + sdw_bus = kzalloc(sizeof(struct sdw_bus), GFP_KERNEL); + if (!sdw_bus) + goto bus_alloc_failed; + sdw_bus->mstr = mstr; + init_completion(&sdw_bus->async_data.xfer_complete); + + mutex_lock(&sdw_core.core_lock); + list_add_tail(&sdw_bus->bus_node, &sdw_core.bus_list); + mutex_unlock(&sdw_core.core_lock); + + dev_set_name(&mstr->dev, "sdw-%d", mstr->nr); + mstr->dev.bus = &sdw_bus_type; + mstr->dev.type = &sdw_mstr_type; + + ret = device_register(&mstr->dev); + if (ret) + goto out_list; + kthread_init_worker(&sdw_bus->kworker); + sdw_bus->status_thread = kthread_run(kthread_worker_fn, + &sdw_bus->kworker, "%s", + dev_name(&mstr->dev)); + if (IS_ERR(sdw_bus->status_thread)) { + dev_err(&mstr->dev, "error: failed to create status message task\n"); + ret = PTR_ERR(sdw_bus->status_thread); + goto task_failed; + } + kthread_init_work(&sdw_bus->kwork, handle_slave_status); + INIT_LIST_HEAD(&sdw_bus->status_list); + spin_lock_init(&sdw_bus->spinlock); + ret = sdw_mstr_bw_init(sdw_bus); + if (ret) { + dev_err(&mstr->dev, "error: Failed to init mstr bw\n"); + goto mstr_bw_init_failed; + } + dev_dbg(&mstr->dev, "master [%s] registered\n", mstr->name); + + return 0; + +mstr_bw_init_failed: +task_failed: + device_unregister(&mstr->dev); +out_list: + mutex_lock(&sdw_core.core_lock); + list_del(&sdw_bus->bus_node); + mutex_unlock(&sdw_core.core_lock); + kfree(sdw_bus); +bus_alloc_failed: +mstr_no_name: +bus_init_not_done: + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + return ret; +} + +/** + * sdw_master_update_slv_status: Report the status of slave to the bus driver. + * master calls this function based on the + * interrupt it gets once the slave changes its + * state. + * @mstr: Master handle for which status is reported. + * @status: Array of status of each slave. + */ +int sdw_master_update_slv_status(struct sdw_master *mstr, + struct sdw_status *status) +{ + struct sdw_bus *bus = NULL; + struct sdw_slv_status *slv_status; + unsigned long flags; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + /* This is master is not registered with bus driver */ + if (!bus) { + dev_info(&mstr->dev, "Master not registered with bus\n"); + return 0; + } + slv_status = kzalloc(sizeof(struct sdw_slv_status), GFP_ATOMIC); + memcpy(slv_status->status, status, sizeof(struct sdw_status)); + + spin_lock_irqsave(&bus->spinlock, flags); + list_add_tail(&slv_status->node, &bus->status_list); + spin_unlock_irqrestore(&bus->spinlock, flags); + + kthread_queue_work(&bus->kworker, &bus->kwork); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_update_slv_status); + +/** + * sdw_add_master_controller - declare sdw master, use dynamic bus number + * @master: the master to add + * Context: can sleep + * + * This routine is used to declare an sdw master when its bus number + * doesn't matter or when its bus number is specified by an dt alias. + * Examples of bases when the bus number doesn't matter: sdw masters + * dynamically added by USB links or PCI plugin cards. + * + * When this returns zero, a new bus number was allocated and stored + * in mstr->nr, and the specified master became available for slaves. + * Otherwise, a negative errno value is returned. + */ +int sdw_add_master_controller(struct sdw_master *mstr) +{ + int id; + + mutex_lock(&sdw_core.core_lock); + + id = idr_alloc(&sdw_core.idr, mstr, + sdw_core.first_dynamic_bus_num, 0, GFP_KERNEL); + mutex_unlock(&sdw_core.core_lock); + if (id < 0) + return id; + + mstr->nr = id; + + return sdw_register_master(mstr); +} +EXPORT_SYMBOL_GPL(sdw_add_master_controller); + +static void sdw_unregister_slave(struct sdw_slave *sdw_slv) +{ + + struct sdw_master *mstr; + + mstr = sdw_slv->mstr; + sdw_lock_mstr(mstr); + list_del(&sdw_slv->node); + sdw_unlock_mstr(mstr); + mstr->sdw_addr[sdw_slv->slv_number].assigned = false; + memset(mstr->sdw_addr[sdw_slv->slv_number].dev_id, 0x0, 6); + device_unregister(&sdw_slv->dev); + kfree(sdw_slv); +} + +static int __unregister_slave(struct device *dev, void *dummy) +{ + struct sdw_slave *slave = sdw_slave_verify(dev); + + if (slave && strcmp(slave->name, "dummy")) + sdw_unregister_slave(slave); + return 0; +} + +/** + * sdw_del_master_controller - unregister SDW master + * @mstr: the master being unregistered + * Context: can sleep + * + * This unregisters an SDW master which was previously registered + * by @sdw_add_master_controller or @sdw_add_master_controller. + */ +void sdw_del_master_controller(struct sdw_master *mstr) +{ + struct sdw_master *found; + + /* First make sure that this master was ever added */ + mutex_lock(&sdw_core.core_lock); + found = idr_find(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + if (found != mstr) { + pr_debug("sdw-core: attempting to delete unregistered master [%s]\n", mstr->name); + return; + } + /* Detach any active slaves. This can't fail, thus we do not + * check the returned value. + */ + device_for_each_child(&mstr->dev, NULL, __unregister_slave); + + /* device name is gone after device_unregister */ + dev_dbg(&mstr->dev, "mstrter [%s] unregistered\n", mstr->name); + + /* wait until all references to the device are gone + * + * FIXME: This is old code and should ideally be replaced by an + * alternative which results in decoupling the lifetime of the struct + * device from the sdw_master, like spi or netdev do. Any solution + * should be thoroughly tested with DEBUG_KOBJECT_RELEASE enabled! + */ + init_completion(&mstr->slv_released); + device_unregister(&mstr->dev); + wait_for_completion(&mstr->slv_released); + + /* free bus id */ + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + /* Clear the device structure in case this mstrter is ever going to be + added again */ + memset(&mstr->dev, 0, sizeof(mstr->dev)); +} +EXPORT_SYMBOL_GPL(sdw_del_master_controller); + +/** + * sdw_slave_xfer_bra_block: Transfer the data block using the BTP/BRA + * protocol. + * @mstr: SoundWire Master Master + * @block: Data block to be transferred. + */ +int sdw_slave_xfer_bra_block(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_mstr_driver *ops = NULL; + int ret; + + /* + * This API will be called by slave/codec + * when it needs to xfer firmware to + * its memory or perform bulk read/writes of registers. + */ + + /* + * Acquire core lock + * TODO: Acquire Master lock inside core lock + * similar way done in upstream. currently + * keeping it as core lock + */ + mutex_lock(&sdw_core.core_lock); + + /* Get master data structure */ + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + + break; + } + + /* + * Here assumption is made that complete SDW bandwidth is used + * by BRA. So bus will return -EBUSY if any active stream + * is running on given master. + * TODO: In final implementation extra bandwidth will be always + * allocated for BRA. In that case all the computation of clock, + * frame shape, transport parameters for DP0 will be done + * considering BRA feature. + */ + if (!list_empty(&mstr->mstr_rt_list)) { + + /* + * Currently not allowing BRA when any + * active stream on master, returning -EBUSY + */ + + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + return -EBUSY; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* + * Check whether Master is supporting bulk transfer. If not, then + * bus will use alternate method of performing BRA request using + * normal register read/write API. + * TODO: Currently if Master is not supporting BRA transfers, bus + * returns error. Bus driver to extend support for normal register + * read/write as alternate method. + */ + if (!ops->mstr_ops->xfer_bulk) + return -EINVAL; + + /* Data port Programming (ON) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, true); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter config failed ret=%d\n", ret); + goto error; + } + + /* Bulk Setup */ + ret = ops->mstr_ops->xfer_bulk(mstr, block); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Transfer failed ret=%d\n", ret); + goto error; + } + + /* Data port Programming (OFF) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, false); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter de-config failed ret=%d\n", ret); + goto error; + } + +error: + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_xfer_bra_block); + +/* + * An sdw_driver is used with one or more sdw_slave (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_mstr_driver_register(struct module *owner, + struct sdw_mstr_driver *driver) +{ + int res; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdw_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdw_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_mstr_driver_register); + +void sdw_mstr_driver_unregister(struct sdw_mstr_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_mstr_driver_unregister); + +void sdw_slave_driver_unregister(struct sdw_slave_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_slave_driver_unregister); + +/* + * An sdw_driver is used with one or more sdw_slave (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_slave_driver_register(struct module *owner, + struct sdw_slave_driver *driver) +{ + int res; + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdw_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdw_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_slave_driver_register); + +int sdw_register_slave_capabilities(struct sdw_slave *sdw, + struct sdw_slv_capabilities *cap) +{ + struct sdw_slv_capabilities *slv_cap; + struct sdw_slv_dpn_capabilities *slv_dpn_cap, *dpn_cap; + struct port_audio_mode_properties *prop, *slv_prop; + int i, j; + int ret = 0; + + slv_cap = &sdw->sdw_slv_cap; + + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->test_mode_supported = cap->test_mode_supported; + slv_cap->clock_stop1_mode_supported = cap->clock_stop1_mode_supported; + slv_cap->simplified_clock_stop_prepare = + cap->simplified_clock_stop_prepare; + slv_cap->scp_impl_def_intr_mask = cap->scp_impl_def_intr_mask; + + slv_cap->highphy_capable = cap->highphy_capable; + slv_cap->paging_supported = cap->paging_supported; + slv_cap->bank_delay_support = cap->bank_delay_support; + slv_cap->port_15_read_behavior = cap->port_15_read_behavior; + slv_cap->sdw_dp0_supported = cap->sdw_dp0_supported; + slv_cap->num_of_sdw_ports = cap->num_of_sdw_ports; + slv_cap->sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap->num_of_sdw_ports), GFP_KERNEL); + if (!slv_cap->sdw_dpn_cap) + return -ENOMEM; + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + slv_dpn_cap = &slv_cap->sdw_dpn_cap[i]; + slv_dpn_cap->port_direction = dpn_cap->port_direction; + slv_dpn_cap->port_number = dpn_cap->port_number; + slv_dpn_cap->max_word_length = dpn_cap->max_word_length; + slv_dpn_cap->min_word_length = dpn_cap->min_word_length; + slv_dpn_cap->num_word_length = dpn_cap->num_word_length; + if (NULL == dpn_cap->word_length_buffer) + slv_dpn_cap->word_length_buffer = + dpn_cap->word_length_buffer; + else { + slv_dpn_cap->word_length_buffer = + devm_kzalloc(&sdw->dev, + dpn_cap->num_word_length * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->word_length_buffer) + return -ENOMEM; + memcpy(slv_dpn_cap->word_length_buffer, + dpn_cap->word_length_buffer, + dpn_cap->num_word_length * + (sizeof(unsigned int))); + } + slv_dpn_cap->dpn_type = dpn_cap->dpn_type; + slv_dpn_cap->dpn_grouping = dpn_cap->dpn_grouping; + slv_dpn_cap->prepare_ch = dpn_cap->prepare_ch; + slv_dpn_cap->imp_def_intr_mask = dpn_cap->imp_def_intr_mask; + slv_dpn_cap->min_ch_num = dpn_cap->min_ch_num; + slv_dpn_cap->max_ch_num = dpn_cap->max_ch_num; + slv_dpn_cap->num_ch_supported = dpn_cap->num_ch_supported; + if (NULL == slv_dpn_cap->ch_supported) + slv_dpn_cap->ch_supported = dpn_cap->ch_supported; + else { + slv_dpn_cap->ch_supported = + devm_kzalloc(&sdw->dev, + dpn_cap->num_ch_supported * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->ch_supported) + return -ENOMEM; + memcpy(slv_dpn_cap->ch_supported, + dpn_cap->ch_supported, + dpn_cap->num_ch_supported * + (sizeof(unsigned int))); + } + slv_dpn_cap->port_flow_mode_mask = + dpn_cap->port_flow_mode_mask; + slv_dpn_cap->block_packing_mode_mask = + dpn_cap->block_packing_mode_mask; + slv_dpn_cap->port_encoding_type_mask = + dpn_cap->port_encoding_type_mask; + slv_dpn_cap->num_audio_modes = dpn_cap->num_audio_modes; + + slv_dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + if (!slv_dpn_cap->mode_properties) + return -ENOMEM; + + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + slv_prop = &slv_dpn_cap->mode_properties[j]; + slv_prop->max_frequency = prop->max_frequency; + slv_prop->min_frequency = prop->min_frequency; + slv_prop->num_freq_configs = prop->num_freq_configs; + if (NULL == slv_prop->freq_supported) + slv_prop->freq_supported = + prop->freq_supported; + else { + slv_prop->freq_supported = + devm_kzalloc(&sdw->dev, + prop->num_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->freq_supported) + return -ENOMEM; + memcpy(slv_prop->freq_supported, + prop->freq_supported, + prop->num_freq_configs * + (sizeof(unsigned int))); + } + slv_prop->glitchless_transitions_mask + = prop->glitchless_transitions_mask; + slv_prop->max_sampling_frequency = + prop->max_sampling_frequency; + slv_prop->min_sampling_frequency = + prop->min_sampling_frequency; + slv_prop->num_sampling_freq_configs = + prop->num_sampling_freq_configs; + if (NULL == prop->sampling_freq_config) + slv_prop->sampling_freq_config = + prop->sampling_freq_config; + else { + slv_prop->sampling_freq_config = + devm_kzalloc(&sdw->dev, + prop->num_sampling_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->sampling_freq_config) + return -ENOMEM; + memcpy(slv_prop->sampling_freq_config, + prop->sampling_freq_config, + prop->num_sampling_freq_configs * + (sizeof(unsigned int))); + } + + slv_prop->ch_prepare_behavior = + prop->ch_prepare_behavior; + } + } + ret = sdw_prog_slv(sdw); + if (ret) + return ret; + sdw->slave_cap_updated = true; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_register_slave_capabilities); + +static int sdw_get_stream_tag(char *key, int *stream_tag) +{ + int i; + int ret = -EINVAL; + struct sdw_runtime *sdw_rt; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + /* If stream tag is already allocated return that after incrementing + * reference count. This is only possible if key is provided. + */ + mutex_lock(&sdw_core.core_lock); + if (!key) + goto key_check_not_required; + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!(strcmp(stream_tags[i].key, key))) { + stream_tags[i].ref_count++; + *stream_tag = stream_tags[i].stream_tag; + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } +key_check_not_required: + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!stream_tags[i].ref_count) { + *stream_tag = stream_tags[i].stream_tag; + mutex_init(&stream_tags[i].stream_lock); + sdw_rt = kzalloc(sizeof(struct sdw_runtime), + GFP_KERNEL); + if (!sdw_rt) { + ret = -ENOMEM; + mutex_unlock(&sdw_core.core_lock); + goto out; + } + stream_tags[i].ref_count++; + INIT_LIST_HEAD(&sdw_rt->slv_rt_list); + INIT_LIST_HEAD(&sdw_rt->mstr_rt_list); + sdw_rt->stream_state = SDW_STATE_INIT_STREAM_TAG; + stream_tags[i].sdw_rt = sdw_rt; + if (key) + strlcpy(stream_tags[i].key, key, + SDW_MAX_STREAM_TAG_KEY_SIZE); + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } + mutex_unlock(&sdw_core.core_lock); +out: + return ret; +} + +void sdw_release_stream_tag(int stream_tag) +{ + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + mutex_lock(&sdw_core.core_lock); + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream_tags[i].ref_count--; + if (stream_tags[i].ref_count == 0) { + kfree(stream_tags[i].sdw_rt); + memset(stream_tags[i].key, 0x0, + SDW_MAX_STREAM_TAG_KEY_SIZE); + } + } + } + mutex_unlock(&sdw_core.core_lock); +} +EXPORT_SYMBOL_GPL(sdw_release_stream_tag); + +/** + * sdw_alloc_stream_tag: Assign the stream tag for the unique streams + * between master and slave device. + * Normally master master will request for the + * stream tag for the stream between master + * and slave device. It programs the same stream + * tag to the slave device. Stream tag is unique + * for all the streams between masters and slave + * across SoCs. + * @guid: Group of the device port. All the ports of the device with + * part of same stream will have same guid. + * + * @stream:tag: Stream tag returned by bus driver. + */ +int sdw_alloc_stream_tag(char *guid, int *stream_tag) +{ + int ret = 0; + + ret = sdw_get_stream_tag(guid, stream_tag); + if (ret) { + pr_err("Stream tag assignment failed\n"); + goto out; + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(sdw_alloc_stream_tag); + +static struct sdw_mstr_runtime *sdw_get_mstr_rt(struct sdw_runtime *sdw_rt, + struct sdw_master *mstr) { + + struct sdw_mstr_runtime *mstr_rt; + int ret = 0; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) + return mstr_rt; + } + + /* Allocate sdw_mstr_runtime structure */ + mstr_rt = kzalloc(sizeof(struct sdw_mstr_runtime), GFP_KERNEL); + if (!mstr_rt) { + ret = -ENOMEM; + goto out; + } + + /* Initialize sdw_mstr_runtime structure */ + INIT_LIST_HEAD(&mstr_rt->port_rt_list); + INIT_LIST_HEAD(&mstr_rt->slv_rt_list); + list_add_tail(&mstr_rt->mstr_sdw_node, &sdw_rt->mstr_rt_list); + list_add_tail(&mstr_rt->mstr_node, &mstr->mstr_rt_list); + mstr_rt->rt_state = SDW_STATE_INIT_RT; + mstr_rt->mstr = mstr; +out: + return mstr_rt; +} + +static struct sdw_slave_runtime *sdw_config_slave_stream( + struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt; + int ret = 0; + struct sdw_stream_params *str_p; + + slv_rt = kzalloc(sizeof(struct sdw_slave_runtime), GFP_KERNEL); + if (!slv_rt) { + ret = -ENOMEM; + goto out; + } + slv_rt->slave = slave; + str_p = &slv_rt->stream_params; + slv_rt->direction = stream_config->direction; + slv_rt->rt_state = SDW_STATE_CONFIG_RT; + str_p->rate = stream_config->frame_rate; + str_p->channel_count = stream_config->channel_count; + str_p->bps = stream_config->bps; + INIT_LIST_HEAD(&slv_rt->port_rt_list); +out: + return slv_rt; +} + +static void sdw_release_mstr_stream(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *mstr_rt, *__mstr_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(mstr_rt, __mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&mstr_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Master port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &mstr_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&mstr_rt->mstr_sdw_node); + if (mstr_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + list_del(&mstr_rt->mstr_node); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + kfree(mstr_rt); + } + } +} + +static void sdw_release_slave_stream(struct sdw_slave *slave, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt, *__slv_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(slv_rt, __slv_rt, &sdw_rt->slv_rt_list, + slave_sdw_node) { + if (slv_rt->slave == slave) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&slv_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Slave port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &slv_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&slv_rt->slave_sdw_node); + if (slv_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + pm_runtime_mark_last_busy(&slave->dev); + pm_runtime_put_sync_autosuspend(&slave->dev); + kfree(slv_rt); + } + } +} + +/** + * sdw_release_stream: De-allocates the bandwidth allocated to the + * the stream. This is reference counted, + * so for the last stream count, BW will be de-allocated + * for the stream. Normally this will be called + * as part of hw_free. + * + * @mstr: Master handle + * @slave: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across SoC for all soundwire + * busses. + * for each audio stream between slaves. This stream tag + * will be allocated by master driver for every + * stream getting open. + */ +int sdw_release_stream(struct sdw_master *mstr, + struct sdw_slave *slave, + unsigned int stream_tag) +{ + int i; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + if (!slave) + sdw_release_mstr_stream(mstr, sdw_rt); + else + sdw_release_slave_stream(slave, sdw_rt); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_release_stream); + +/** + * sdw_configure_stream: Allocates the B/W onto the soundwire bus + * for transferring the data between slave and master. + * This is configuring the single stream of data. + * This will be called by slave, Slave stream + * configuration should match the master stream + * configuration. Normally slave would call this + * as a part of hw_params. + * + * @mstr: Master handle + * @sdw_slave: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across the soundwire bus + * for each audio stream between slaves and master. + * This is something like stream_tag in HDA protocol, but + * here its virtual rather than being embedded into protocol. + * Further same stream tag is valid across masters also + * if some ports of the master is participating in + * stream aggregation. This is input parameters to the + * function. + */ +int sdw_config_stream(struct sdw_master *mstr, + struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + unsigned int stream_tag) +{ + int i; + int ret = 0; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_mstr_runtime *mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Valid stream tag not found\n"); + ret = -EINVAL; + goto out; + } + if (static_key_false(&sdw_trace_msg)) + trace_sdw_config_stream(mstr, slave, stream_config, + stream_tag); + + mutex_lock(&stream->stream_lock); + + mstr_rt = sdw_get_mstr_rt(sdw_rt, mstr); + if (!mstr_rt) { + dev_err(&mstr->dev, "master runtime configuration failed\n"); + ret = -EINVAL; + goto out; + } + + if (!slave) { + mstr_rt->direction = stream_config->direction; + mstr_rt->rt_state = SDW_STATE_CONFIG_RT; + sdw_rt->xport_state = SDW_STATE_ONLY_XPORT_STREAM; + + mstr_rt->stream_params.rate = stream_config->frame_rate; + mstr_rt->stream_params.channel_count = + stream_config->channel_count; + mstr_rt->stream_params.bps = stream_config->bps; + + } else + slv_rt = sdw_config_slave_stream(slave, + stream_config, sdw_rt); + /* Stream params will be stored based on Tx only, since there can + * be only one Tx and muliple Rx, There can be muliple Tx if + * there is aggregation on Tx. That is handled by adding the channels + * to stream_params for each aggregated Tx slaves + */ + if (!sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + sdw_rt->stream_params.rate = stream_config->frame_rate; + sdw_rt->stream_params.channel_count = + stream_config->channel_count; + sdw_rt->stream_params.bps = stream_config->bps; + sdw_rt->tx_ref_count++; + } + + + /* Normally there will be only one Tx in system, multiple Tx + * can only be there if we support aggregation. In that case + * there may be multiple slave or masters handing different + * channels of same Tx stream. + */ + else if (sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + if (sdw_rt->stream_params.rate != + stream_config->frame_rate) { + dev_err(&mstr->dev, "Frame rate for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + if (sdw_rt->stream_params.bps != stream_config->bps) { + dev_err(&mstr->dev, "bps for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + /* Number of channels gets added, since both devices will + * be supporting different channels. Like one Codec + * supporting L and other supporting R channel. + */ + sdw_rt->stream_params.channel_count += + stream_config->channel_count; + sdw_rt->tx_ref_count++; + } else + sdw_rt->rx_ref_count++; + + sdw_rt->type = stream_config->type; + sdw_rt->stream_state = SDW_STATE_CONFIG_STREAM; + + /* Slaves are added to two list, This is because BW is calculated + * for two masters individually, while Ports are enabled of all + * the aggregated masters and slaves part of the same stream tag + * simultaneously. + */ + if (slave) { + list_add_tail(&slv_rt->slave_sdw_node, &sdw_rt->slv_rt_list); + list_add_tail(&slv_rt->slave_node, &mstr_rt->slv_rt_list); + } + mutex_unlock(&stream->stream_lock); + if (slave) + pm_runtime_get_sync(&slave->dev); + else + pm_runtime_get_sync(&mstr->dev); + return ret; + +free_mem: + mutex_unlock(&stream->stream_lock); + kfree(mstr_rt); + kfree(slv_rt); +out: + return ret; + +} +EXPORT_SYMBOL_GPL(sdw_config_stream); + +/** + * sdw_chk_slv_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all slave port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_slv_dpn_caps(struct sdw_slv_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + struct port_audio_mode_properties *mode_prop = + dpn_cap->mode_properties; + int ret = 0, i, value; + + /* Check Sampling frequency */ + if (mode_prop->num_sampling_freq_configs) { + for (i = 0; i < mode_prop->num_sampling_freq_configs; i++) { + + value = mode_prop->sampling_freq_config[i]; + if (strm_prms->rate == value) + break; + } + + if (i == mode_prop->num_sampling_freq_configs) + return -EINVAL; + + } else { + + if ((strm_prms->rate < mode_prop->min_sampling_frequency) + || (strm_prms->rate > + mode_prop->max_sampling_frequency)) + return -EINVAL; + } + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) + return -EINVAL; + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +/** + * sdw_chk_mstr_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all master port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_mstr_dpn_caps(struct sdw_mstr_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + + int ret = 0, i, value; + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) { + return -EINVAL; + } + + + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +static int sdw_mstr_port_configuration(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_mstr_runtime *mstr_rt; + struct sdw_port_runtime *port_rt; + int found = 0; + int i; + int ret = 0, pn = 0; + struct sdw_mstr_dpn_capabilities *dpn_cap = + mstr->mstr_capabilities.sdw_dpn_cap; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + found = 1; + break; + } + } + if (!found) { + dev_err(&mstr->dev, "Master not found for this port\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + if (!dpn_cap) + return -EINVAL; + /* + * Note: Here the assumption the configuration is not + * received for 0th port. + */ + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_mstr_dpn_caps(&dpn_cap[pn], + &mstr_rt->stream_params); + if (ret < 0) { + dev_err(&mstr->dev, + "Master capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &mstr_rt->port_rt_list); + } + + return ret; +} + +static int sdw_slv_port_configuration(struct sdw_slave *slave, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_slave_runtime *slv_rt; + struct sdw_port_runtime *port_rt; + struct sdw_slv_dpn_capabilities *dpn_cap = + slave->sdw_slv_cap.sdw_dpn_cap; + int found = 0, ret = 0; + int i, pn; + + list_for_each_entry(slv_rt, &sdw_rt->slv_rt_list, slave_sdw_node) { + if (slv_rt->slave == slave) { + found = 1; + break; + } + } + if (!found) { + dev_err(&slave->mstr->dev, "Slave not found for this port\n"); + return -EINVAL; + } + + if (!slave->slave_cap_updated) { + dev_err(&slave->mstr->dev, "Slave capabilities not updated\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_slv_dpn_caps(&dpn_cap[pn], + &slv_rt->stream_params); + if (ret < 0) { + dev_err(&slave->mstr->dev, + "Slave capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &slv_rt->port_rt_list); + } + + return ret; +} + +/** + * sdw_config_port: Port configuration for the SoundWire. Multiple + * soundWire ports may form single stream. Like two + * ports each transferring/receiving mono channels + * forms single stream with stereo channels. + * There will be single ASoC DAI representing + * the both ports. So stream configuration will be + * stereo, but both of the ports will be configured + * for mono channels, each with different channel + * mask. This is used to program port w.r.t to stream. + * params. So no need to de-configure, since these + * are automatically destroyed once stream gets + * destroyed. + * @mstr: Master handle where the slave is connected. + * @slave: Slave handle. + * @port_config: Port configuration for each port of soundwire slave. + * @stream_tag: Stream tag, where this port is connected. + * + */ +int sdw_config_port(struct sdw_master *mstr, + struct sdw_slave *slave, + struct sdw_port_config *port_config, + unsigned int stream_tag) +{ + int ret = 0; + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < port_config->num_ports; i++) { + trace_sdw_config_port(mstr, slave, + &port_config->port_cfg[i], stream_tag); + } + } + + mutex_lock(&stream->stream_lock); + + if (!slave) + ret = sdw_mstr_port_configuration(mstr, sdw_rt, port_config); + else + ret = sdw_slv_port_configuration(slave, sdw_rt, port_config); + + mutex_unlock(&stream->stream_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_config_port); + +int sdw_prepare_and_enable(int stream_tag, bool enable) +{ + + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw(&stream_tags[i], enable); + if (ret) + pr_err("Bandwidth allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_prepare_and_enable); + +int sdw_disable_and_unprepare(int stream_tag, bool unprepare) +{ + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw_dis(&stream_tags[i], unprepare); + if (ret) + pr_err("Bandwidth de-allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_disable_and_unprepare); + +int sdw_stop_clock(struct sdw_master *mstr, enum sdw_clk_stop_mode mode) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + int slave_present = 0; + + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned && + mstr->sdw_addr[i].status != + SDW_SLAVE_STAT_NOT_PRESENT) + slave_present = 1; + } + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now + */ + msg.ssp_tag = 1; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_CTRL; + msg.len = 1; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + msg.buf = buf; + msg.slave_addr = 15; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + if (ret != 1 && slave_present) { + dev_err(&mstr->dev, "Failed to stop clk\n"); + return -EBUSY; + } + /* If we are entering clock stop mode1, mark all the slaves un-attached. + */ + if (mode == SDW_CLOCK_STOP_MODE_1) { + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned) + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_stop_clock); + +int sdw_wait_for_slave_enumeration(struct sdw_master *mstr, + struct sdw_slave *slave) +{ + int timeout = 0; + + /* Wait till device gets enumerated. Wait for 2Secs before + * giving up + */ + do { + msleep(100); + timeout++; + } while ((slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) && + timeout < 20); + + if (slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_wait_for_slave_enumeration); + +static enum sdw_clk_stop_mode sdw_get_clk_stp_mode(struct sdw_slave *slave) +{ + enum sdw_clk_stop_mode clock_stop_mode = SDW_CLOCK_STOP_MODE_0; + struct sdw_slv_capabilities *cap = &slave->sdw_slv_cap; + + if (!slave->driver) + return clock_stop_mode; + /* + * Get the dynamic value of clock stop from Slave driver + * if supported, else use the static value from + * capabilities register. Update the capabilities also + * if we have new dynamic value. + */ + if (slave->driver->get_dyn_clk_stp_mod) { + clock_stop_mode = slave->driver->get_dyn_clk_stp_mod(slave); + + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + cap->clock_stop1_mode_supported = true; + else + cap->clock_stop1_mode_supported = false; + } else + clock_stop_mode = cap->clock_stop1_mode_supported; + + return clock_stop_mode; +} + +/** + * sdw_master_stop_clock: Stop the clock. This function broadcasts the SCP_CTRL + * register with clock_stop_now bit set. + * + * @mstr: Master handle for which clock has to be stopped. + * + * Returns 0 on success, appropriate error code on failure. + */ +int sdw_master_stop_clock(struct sdw_master *mstr) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + enum sdw_clk_stop_mode mode; + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now. If none of the Slaves are attached, then there + * may not be ACK, flag the error about ACK not recevied but + * clock will be still stopped. + */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_CTRL; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + /* Even if broadcast fails, we stop the clock and flag error */ + if (ret != 1) + dev_err(&mstr->dev, "ClockStopNow Broadcast message failed\n"); + + /* + * Mark all Slaves as un-attached which are entering clock stop + * mode1 + */ + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + + if (!mstr->sdw_addr[i].assigned) + continue; + + /* Get clock stop mode for all Slaves */ + mode = sdw_get_clk_stp_mode(mstr->sdw_addr[i].slave); + if (mode == SDW_CLOCK_STOP_MODE_0) + continue; + + /* If clock stop mode 1, mark Slave as not present */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_NOT_PRESENT; + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_stop_clock); + +static struct sdw_slave *get_slave_for_prep_deprep(struct sdw_master *mstr, + int *slave_index) +{ + int i; + + for (i = *slave_index; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned != true) + continue; + + if (mstr->sdw_addr[i].status == SDW_SLAVE_STAT_NOT_PRESENT) + continue; + + *slave_index = i + 1; + return mstr->sdw_addr[i].slave; + } + return NULL; +} + +/* + * Wait till clock stop prepare/deprepare is finished. Prepare for all + * mode, De-prepare only for the Slaves resuming from clock stop mode 0 + */ +static void sdw_wait_for_clk_prep(struct sdw_master *mstr) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + + /* Create message to read clock stop status, its broadcast message. */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + buf[0] = 0xFF; + /* + * Once all the Slaves are written with prepare bit, + * we go ahead and broadcast the read message for the + * SCP_STAT register to read the ClockStopNotFinished bit + * Read till we get this a 0. Currently we have timeout of 1sec + * before giving up. Even if its not read as 0 after timeout, + * controller can stop the clock after warning. + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Some Slaves prepare un-successful\n"); +} + +/** + * sdw_master_prep_for_clk_stop: Prepare all the Slaves for clock stop. + * Iterate through each of the enumerated Slave. + * Prepare each Slave according to the clock stop + * mode supported by Slave. Use dynamic value from + * Slave callback if registered, else use static values + * from Slave capabilities registered. + * 1. Get clock stop mode for each Slave. + * 2. Call pre_prepare callback of each Slave if + * registered. + * 3. Prepare each Slave for clock stop + * 4. Broadcast the Read message to make sure + * all Slaves are prepared for clock stop. + * 5. Call post_prepare callback of each Slave if + * registered. + * + * @mstr: Master handle for which clock state has to be changed. + * + * Returns 0 + */ +int sdw_master_prep_for_clk_stop(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slave *slave = NULL; + int slv_index = 1; + + /* + * Get all the Slaves registered to the master driver for preparing + * for clock stop. Start from Slave with logical address as 1. + */ + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, true); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre prepare failed for Slave %d\n", + slave->slv_number); + } + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, true); + } + + /* Wait till prepare for all Slaves is finished */ + /* + * We should continue even if the prepare fails. Clock stop + * prepare failure on Slaves, should not impact the broadcasting + * of ClockStopNow. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + true); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post prepare failed for Slave %d\n", + slave->slv_number); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_prep_for_clk_stop); + +/** + * sdw_mstr_deprep_after_clk_start: De-prepare all the Slaves + * exiting clock stop mode 0 after clock resumes. Clock + * is already resumed before this. De-prepare all the Slaves + * which were earlier in ClockStop mode0. De-prepare for the + * Slaves which were there in ClockStop mode1 is done after + * they enumerated back. Its not done here as part of master + * getting resumed. + * 1. Get clock stop mode for each Slave its exiting from + * 2. Call pre_prepare callback of each Slave exiting from + * clock stop mode 0. + * 3. De-Prepare each Slave exiting from Clock Stop mode0 + * 4. Broadcast the Read message to make sure + * all Slaves are de-prepared for clock stop. + * 5. Call post_prepare callback of each Slave exiting from + * clock stop mode0 + * + * + * @mstr: Master handle + * + * Returns 0 + */ +int sdw_mstr_deprep_after_clk_start(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slave *slave = NULL; + /* We are preparing for stop */ + bool stop = false; + int slv_index = 1; + + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + /* Get the clock stop mode from which Slave is exiting */ + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. So nothing to be done + * here. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, false); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, false); + } + + /* + * Wait till prepare is finished for all the Slaves. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + + if (slave->driver && slave->driver->post_clk_stop_prep) + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + stop); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_deprep_after_clk_start); + + +struct sdw_master *sdw_get_master(int nr) +{ + struct sdw_master *master; + + mutex_lock(&sdw_core.core_lock); + master = idr_find(&sdw_core.idr, nr); + if (master && !try_module_get(master->owner)) + master = NULL; + mutex_unlock(&sdw_core.core_lock); + + return master; +} +EXPORT_SYMBOL_GPL(sdw_get_master); + +void sdw_put_master(struct sdw_master *mstr) +{ + if (mstr) + module_put(mstr->owner); +} +EXPORT_SYMBOL_GPL(sdw_put_master); + +static void sdw_exit(void) +{ + device_unregister(&sdw_slv); + bus_unregister(&sdw_bus_type); +} + +static int sdw_init(void) +{ + int retval; + int i; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) + sdw_core.stream_tags[i].stream_tag = i; + mutex_init(&sdw_core.core_lock); + INIT_LIST_HEAD(&sdw_core.bus_list); + idr_init(&sdw_core.idr); + retval = bus_register(&sdw_bus_type); + + if (!retval) + retval = device_register(&sdw_slv); + + + if (retval) + bus_unregister(&sdw_bus_type); + + retval = sdw_bus_bw_init(); + if (retval) { + device_unregister(&sdw_slv); + bus_unregister(&sdw_bus_type); + } + + return retval; +} +postcore_initcall(sdw_init); +module_exit(sdw_exit); + +MODULE_AUTHOR("Hardik Shah "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("SoundWire bus driver"); +MODULE_ALIAS("platform:soundwire"); diff --git a/drivers/sdw/sdw_bwcalc.c b/drivers/sdw/sdw_bwcalc.c new file mode 100644 index 000000000000..7ebb26756f59 --- /dev/null +++ b/drivers/sdw/sdw_bwcalc.c @@ -0,0 +1,3097 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include "sdw_priv.h" +#include +#include + + + +#ifndef CONFIG_SND_SOC_SVFPGA /* Original */ +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +int rows[MAX_NUM_ROWS] = {48, 50, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 50 +#else +int rows[MAX_NUM_ROWS] = {125, 64, 48, 50, 60, 72, 75, 80, 90, + 96, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 24 +#endif /* IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) */ + +int cols[MAX_NUM_COLS] = {2, 4, 6, 8, 10, 12, 14, 16}; + +#else +/* For PDM Capture, frameshape used is 50x10 */ +int rows[MAX_NUM_ROWS] = {50, 100, 48, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; + +int cols[MAX_NUM_COLS] = {10, 2, 4, 6, 8, 12, 14, 16}; +#define SDW_DEFAULT_SSP 50 +#endif + +/* + * TBD: Get supported clock frequency from ACPI and store + * it in master data structure. + */ +#define MAXCLOCKDIVS 1 +int clock_div[MAXCLOCKDIVS] = {1}; + +struct sdw_num_to_col sdw_num_col_mapping[MAX_NUM_COLS] = { + {0, 2}, {1, 4}, {2, 6}, {3, 8}, {4, 10}, {5, 12}, {6, 14}, {7, 16}, +}; + +struct sdw_num_to_row sdw_num_row_mapping[MAX_NUM_ROWS] = { + {0, 48}, {1, 50}, {2, 60}, {3, 64}, {4, 75}, {5, 80}, {6, 125}, + {7, 147}, {8, 96}, {9, 100}, {10, 120}, {11, 128}, {12, 150}, + {13, 160}, {14, 250}, {16, 192}, {17, 200}, {18, 240}, {19, 256}, + {20, 72}, {21, 144}, {22, 90}, {23, 180}, +}; + +/** + * sdw_bus_bw_init - returns Success + * + * + * This function is called from sdw_init function when bus driver + * gets intitalized. This function performs all the generic + * intializations required for BW control. + */ +int sdw_bus_bw_init(void) +{ + int r, c, rowcolcount = 0; + int control_bits = 48; + + for (c = 0; c < MAX_NUM_COLS; c++) { + + for (r = 0; r < MAX_NUM_ROWS; r++) { + sdw_core.rowcolcomb[rowcolcount].col = cols[c]; + sdw_core.rowcolcomb[rowcolcount].row = rows[r]; + sdw_core.rowcolcomb[rowcolcount].control_bits = + control_bits; + sdw_core.rowcolcomb[rowcolcount].data_bits = + (cols[c] * rows[r]) - control_bits; + rowcolcount++; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_bw_init); + + +/** + * sdw_mstr_bw_init - returns Success + * + * + * This function is called from sdw_register_master function + * for each master controller gets register. This function performs + * all the intializations per master controller required for BW control. + */ +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + + /* Initialize required parameters in bus structure */ + sdw_bs->bandwidth = 0; + sdw_bs->system_interval = 0; + sdw_bs->frame_freq = 0; + sdw_bs->clk_state = SDW_CLK_STATE_ON; + sdw_mstr_cap = &sdw_bs->mstr->mstr_capabilities; + sdw_bs->clk_freq = (sdw_mstr_cap->base_clk_freq * 2); + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_bw_init); + + +/** + * sdw_get_col_to_num + * + * Returns column number from the mapping. + */ +int sdw_get_col_to_num(int col) +{ + int i; + + for (i = 0; i < MAX_NUM_COLS; i++) { + if (sdw_num_col_mapping[i].col == col) + return sdw_num_col_mapping[i].num; + } + + return 0; /* Lowest Column number = 2 */ +} + + +/** + * sdw_get_row_to_num + * + * Returns row number from the mapping. + */ +int sdw_get_row_to_num(int row) +{ + int i; + + for (i = 0; i < MAX_NUM_ROWS; i++) { + if (sdw_num_row_mapping[i].row == row) + return sdw_num_row_mapping[i].num; + } + + return 0; /* Lowest Row number = 48 */ +} + +/* + * sdw_lcm - returns LCM of two numbers + * + * + * This function is called BW calculation function to find LCM + * of two numbers. + */ +int sdw_lcm(int num1, int num2) +{ + int max; + + /* maximum value is stored in variable max */ + max = (num1 > num2) ? num1 : num2; + + while (1) { + if (max%num1 == 0 && max%num2 == 0) + break; + ++max; + } + + return max; +} + + +/* + * sdw_cfg_slv_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures slave registers for + * transport and port parameters. + */ +int sdw_cfg_slv_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_slv_params, + struct sdw_port_params *p_slv_params, int slv_number) +{ + struct sdw_msg wr_msg, wr_msg1, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + u8 wbuf1[2] = {0, 0}; + u8 rbuf[1] = {0}; + + +#ifdef CONFIG_SND_SOC_SVFPGA + /* + * The below hardcoding is required + * for running PDM capture with SV conora card + * because the transport params of card is not + * same as master parameters. Also not all + * standard registers are valid. + */ + t_slv_params->blockgroupcontrol_valid = false; + t_slv_params->sample_interval = 50; + t_slv_params->offset1 = 0; + t_slv_params->offset2 = 0; + t_slv_params->hstart = 1; + t_slv_params->hstop = 6; + p_slv_params->word_length = 30; +#endif + + /* Program slave alternate bank with all transport parameters */ + /* DPN_BlockCtrl2 */ + wbuf[0] = t_slv_params->blockgroupcontrol; + /* DPN_SampleCtrl1 */ + wbuf[1] = (t_slv_params->sample_interval - 1) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; + wbuf[2] = ((t_slv_params->sample_interval - 1) >> 8) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; /* DPN_SampleCtrl2 */ + wbuf[3] = t_slv_params->offset1; /* DPN_OffsetCtrl1 */ + wbuf[4] = t_slv_params->offset2; /* DPN_OffsetCtrl2 */ + /* DPN_HCtrl */ + wbuf[5] = (t_slv_params->hstop | (t_slv_params->hstart << 4)); + wbuf[6] = t_slv_params->blockpackingmode; /* DPN_BlockCtrl3 */ + wbuf[7] = t_slv_params->lanecontrol; /* DPN_LaneCtrl */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + /* Program slave alternate bank with all port parameters */ + rd_msg.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + wbuf1[0] = (p_slv_params->port_flow_mode | + (p_slv_params->port_data_mode << + SDW_DPN_PORTCTRL_PORTDATAMODE_SHIFT) | + (rbuf[0])); + + wbuf1[1] = (p_slv_params->word_length - 1); + + /* Check whether address computed is correct for both cases */ + wr_msg.addr = ((SDW_DPN_BLOCKCTRL2 + + (1 * (!t_slv_params->blockgroupcontrol_valid)) + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num)); + + wr_msg1.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; +#ifdef CONFIG_SND_SOC_SVFPGA + wr_msg.len = (5 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#else + wr_msg.len = (7 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#endif + + wr_msg.slave_addr = slv_number; + wr_msg.buf = &wbuf[0 + (1 * (!t_slv_params->blockgroupcontrol_valid))]; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + wr_msg1.ssp_tag = 0x0; + wr_msg1.flag = SDW_MSG_FLAG_WRITE; + wr_msg1.len = 2; + + wr_msg1.slave_addr = slv_number; + wr_msg1.buf = &wbuf1[0]; + wr_msg1.addr_page1 = 0x0; + wr_msg1.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + + return ret; +} + + +/* + * sdw_cfg_mstr_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures master registers for + * transport and port parameters. + */ +int sdw_cfg_mstr_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_mstr_params, + struct sdw_port_params *p_mstr_params) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + int banktouse, ret = 0; + + /* 1. Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 2. Set Master Xport Params */ + if (ops->mstr_port_ops->dpn_set_port_transport_params) { + ret = ops->mstr_port_ops->dpn_set_port_transport_params + (mstr_bs->mstr, t_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Set Master Port Params */ + if (ops->mstr_port_ops->dpn_set_port_params) { + ret = ops->mstr_port_ops->dpn_set_port_params + (mstr_bs->mstr, p_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_cfg_params_mstr_slv - returns Success + * + * This function copies/configure master/slave transport & + * port params. + * + */ +int sdw_cfg_params_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + bool state_check) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + struct sdw_transport_params *t_params, *t_slv_params; + struct sdw_port_params *p_params, *p_slv_params; + int ret = 0; + + list_for_each_entry(slv_rt, + &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* configure transport params based on state */ + if ((state_check) && + (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + continue; + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + /* Fill in port params here */ + port_slv_rt->port_params.num = port_slv_rt->port_num; + port_slv_rt->port_params.word_length = + slv_rt->stream_params.bps; + /* Normal/Isochronous Mode */ + port_slv_rt->port_params.port_flow_mode = 0x0; + /* Normal Mode */ + port_slv_rt->port_params.port_data_mode = 0x0; + t_slv_params = &port_slv_rt->transport_params; + p_slv_params = &port_slv_rt->port_params; + + /* Configure xport & port params for slave */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_slv_params, + p_slv_params, slv_rt->slave->slv_number); + if (ret < 0) + return ret; + + } + } + + if ((state_check) && + (sdw_mstr_bs_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + return 0; + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + /* Transport and port parameters */ + t_params = &port_rt->transport_params; + p_params = &port_rt->port_params; + + + p_params->num = port_rt->port_num; + p_params->word_length = sdw_mstr_bs_rt->stream_params.bps; + p_params->port_flow_mode = 0x0; /* Normal/Isochronous Mode */ + p_params->port_data_mode = 0x0; /* Normal Mode */ + + /* Configure xport params and port params for master */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) + return ret; + + } + + return 0; +} + + +/* + * sdw_cfg_slv_enable_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable slave port channels. + */ +int sdw_cfg_slv_enable_disable(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + port_slv_strm->port_num)); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + + if (chn_en->is_activate) { + + /* + * 1. slave port enable_ch_pre + * --> callback + * --> no callback available + */ + + /* 2. slave port enable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + /* + * 3. slave port enable post pre + * --> callback + * --> no callback available + */ + slv_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + + } else { + + /* + * 1. slave port enable_ch_unpre + * --> callback + * --> no callback available + */ + + /* 2. slave port disable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * 3. slave port enable post unpre + * --> callback + * --> no callback available + */ + if (!chn_en->is_bank_sw) + slv_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_activate_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable master port channels. + */ +int sdw_cfg_mstr_activate_disable(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = port_mstr_strm->port_num; + activate_ch.ch_mask = port_mstr_strm->channel_mask; + activate_ch.activate = chn_en->is_activate; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + if (chn_en->is_activate) + mstr_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + else if (!chn_en->is_bank_sw) + mstr_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's. + */ +int sdw_en_dis_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_act) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + if (is_act) + chn_en.is_bank_sw = true; + else + chn_en.is_bank_sw = false; + + chn_en.is_activate = is_act; + + list_for_each_entry(slv_rt_strm, &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt_strm, + port_slv_strm, &chn_en); + if (ret < 0) + return ret; + + } + + break; + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, mstr_rt_strm, + port_mstr_strm, &chn_en); + if (ret < 0) + return ret; + + } + + } + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv_state - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's based on runtime state. + */ +int sdw_en_dis_mstr_slv_state(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + struct port_chn_en_state *chn_en) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt, *port_rt; + int ret = 0; + + list_for_each_entry(slv_rt, &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt, + port_slv_rt, chn_en); + if (ret < 0) + return ret; + + } + } + } + + if (sdw_mstr_bs_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, sdw_mstr_bs_rt, port_rt, chn_en); + if (ret < 0) + return ret; + + } + } + + return 0; +} + + +/* + * sdw_get_clock_frmshp - returns Success + * -EINVAL - In case of error. + * + * + * This function computes clock and frame shape based on + * clock frequency. + */ +int sdw_get_clock_frmshp(struct sdw_bus *sdw_mstr_bs, int *frame_int, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = NULL; + struct port_audio_mode_properties *mode_prop = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt = NULL; + int i, j, rc; + int clock_reqd = 0, frame_interval = 0, frame_frequency = 0; + int sel_row = 0, sel_col = 0, pn = 0; + int value; + bool clock_ok = false; + + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + + /* + * Find nearest clock frequency needed by master for + * given bandwidth + */ + for (i = 0; i < MAXCLOCKDIVS; i++) { + + /* TBD: Check why 3000 */ + if ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) <= + sdw_mstr_bs->bandwidth) || + ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) + % 3000) != 0)) + continue; + + clock_reqd = ((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]); + + /* + * Check all the slave device capabilities + * here and find whether given frequency is + * supported by all slaves + */ + list_for_each_entry(slv_rt, &sdw_mstr_rt->slv_rt_list, + slave_node) { + + /* check for valid slave */ + if (slv_rt->slave == NULL) + break; + + /* check clock req for each port */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + pn = port_slv_rt->port_num; + + + sdw_slv_dpn_cap = + &slv_rt->slave->sdw_slv_cap.sdw_dpn_cap[pn]; + mode_prop = sdw_slv_dpn_cap->mode_properties; + + /* + * TBD: Indentation to be fixed, + * code refactoring to be considered. + */ + if (mode_prop->num_freq_configs) { + for (j = 0; j < + mode_prop->num_freq_configs; j++) { + value = + mode_prop->freq_supported[j]; + if (clock_reqd == value) { + clock_ok = true; + break; + } + if (j == + mode_prop->num_freq_configs) { + clock_ok = false; + break; + } + + } + + } else { + if ((clock_reqd < + mode_prop->min_frequency) || + (clock_reqd > + mode_prop->max_frequency)) { + clock_ok = false; + } else + clock_ok = true; + } + + /* Go for next clock frequency */ + if (!clock_ok) + break; + } + + /* + * Dont check next slave, go for next clock + * frequency + */ + if (!clock_ok) + break; + } + + /* check for next clock divider */ + if (!clock_ok) + continue; + + /* Find frame shape based on bandwidth per controller */ + for (rc = 0; rc < MAX_NUM_ROW_COLS; rc++) { + frame_interval = + sdw_core.rowcolcomb[rc].row * + sdw_core.rowcolcomb[rc].col; + frame_frequency = clock_reqd/frame_interval; + + if ((clock_reqd - + (frame_frequency * + sdw_core.rowcolcomb[rc]. + control_bits)) < + sdw_mstr_bs->bandwidth) + continue; + + break; + } + + /* Valid frameshape not found, check for next clock freq */ + if (rc == MAX_NUM_ROW_COLS) + continue; + + sel_row = sdw_core.rowcolcomb[rc].row; + sel_col = sdw_core.rowcolcomb[rc].col; + sdw_mstr_bs->frame_freq = frame_frequency; + sdw_mstr_bs->clk_freq = clock_reqd; + sdw_mstr_bs->clk_div = clock_div[i]; + clock_ok = false; + *frame_int = frame_interval; + sdw_mstr_bs->col = sel_col; + sdw_mstr_bs->row = sel_row; + + return 0; + } + + /* None of clock frequency matches, return error */ + if (i == MAXCLOCKDIVS) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_sys_interval - returns Success + * -EINVAL - In case of error. + * + * + * This function computes system interval. + */ +int sdw_compute_sys_interval(struct sdw_bus *sdw_mstr_bs, + struct sdw_master_capabilities *sdw_mstr_cap, + int frame_interval) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int lcmnum1 = 0, lcmnum2 = 0, div = 0, lcm = 0; + int sample_interval; + + /* + * once you got bandwidth frame shape for bus, + * run a loop for all the active streams running + * on bus and compute stream interval & sample_interval. + */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* + * Calculate sample interval for stream + * running on given master. + */ + if (sdw_mstr_rt->stream_params.rate) + sample_interval = (sdw_mstr_bs->clk_freq/ + sdw_mstr_rt->stream_params.rate); + else + return -EINVAL; + + /* Run port loop to assign sample interval per port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + + /* + * Assign sample interval each port transport + * properties. Assumption is that sample interval + * per port for given master will be same. + */ + t_params->sample_interval = sample_interval; + } + + /* Calculate LCM */ + lcmnum2 = sample_interval; + if (!lcmnum1) + lcmnum1 = sdw_lcm(lcmnum2, lcmnum2); + else + lcmnum1 = sdw_lcm(lcmnum1, lcmnum2); + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* Assign sample interval for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + + /* Assign sample interval each port */ + t_slv_params->sample_interval = sample_interval; + } + } + } + + /* + * If system interval already calculated + * In pause/resume, underrun scenario + */ + if (sdw_mstr_bs->system_interval) + return 0; + + /* Assign frame stream interval */ + sdw_mstr_bs->stream_interval = lcmnum1; + + /* 6. compute system_interval */ + if ((sdw_mstr_cap) && (sdw_mstr_bs->clk_freq)) { + + div = ((sdw_mstr_cap->base_clk_freq * 2) / + sdw_mstr_bs->clk_freq); + + if ((lcmnum1) && (frame_interval)) + lcm = sdw_lcm(lcmnum1, frame_interval); + else + return -EINVAL; + + sdw_mstr_bs->system_interval = (div * lcm); + + } + + /* + * Something went wrong, may be sdw_lcm value may be 0, + * return error accordingly + */ + if (!sdw_mstr_bs->system_interval) + return -EINVAL; + + + return 0; +} + +/** + * sdw_chk_first_node - returns True or false + * + * This function returns true in case of first node + * else returns false. + */ +bool sdw_chk_first_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_master *sdw_mstr) +{ + struct sdw_mstr_runtime *first_rt = NULL; + + first_rt = list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node); + if (sdw_mstr_rt == first_rt) + return true; + else + return false; + +} + +/* + * sdw_compute_hstart_hstop - returns Success + * -EINVAL - In case of error. + * + * + * This function computes hstart and hstop for running + * streams per master & slaves. + */ +int sdw_compute_hstart_hstop(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int hstart = 0, hstop = 0; + int column_needed = 0; + int sel_col = sdw_mstr_bs->col; + int group_count = 0, no_of_channels = 0; + struct temp_elements *temp, *element; + int rates[10]; + int num, ch_mask, block_offset, i, port_block_offset; + + /* Run loop for all master runtimes for given master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Perform grouping of streams based on stream rate */ + if (sdw_mstr_rt == list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node)) + rates[group_count++] = sdw_mstr_rt->stream_params.rate; + else { + num = group_count; + for (i = 0; i < num; i++) { + if (sdw_mstr_rt->stream_params.rate == rates[i]) + break; + + if (i == num) + rates[group_count++] = + sdw_mstr_rt->stream_params.rate; + } + } + } + + /* check for number of streams and number of group count */ + if (group_count == 0) + return 0; + + /* Allocate temporary memory holding temp variables */ + temp = kzalloc((sizeof(struct temp_elements) * group_count), + GFP_KERNEL); + if (!temp) + return -ENOMEM; + + /* Calculate full bandwidth per group */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->rate = rates[i]; + element->full_bw = sdw_mstr_bs->clk_freq/element->rate; + } + + /* Calculate payload bandwidth per group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + for (i = 0; i < group_count; i++) { + element = &temp[i]; + if (sdw_mstr_rt->stream_params.rate == element->rate) { + element->payload_bw += + sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + } + + /* Any of stream rate should match */ + if (i == group_count) + return -EINVAL; + } + } + + /* Calculate hwidth per group and total column needed per master */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->hwidth = + (sel_col * element->payload_bw + + element->full_bw - 1)/element->full_bw; + column_needed += element->hwidth; + } + + /* Check column required should not be greater than selected columns*/ + if (column_needed > sel_col - 1) + return -EINVAL; + + /* Compute hstop */ + hstop = sel_col - 1; + + /* Run loop for all groups to compute transport parameters */ + for (i = 0; i < group_count; i++) { + port_block_offset = block_offset = 1; + element = &temp[i]; + + /* Find streams associated with each group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + if (sdw_mstr_rt->stream_params.rate != element->rate) + continue; + + /* Compute hstart */ + sdw_mstr_rt->hstart = hstart = + hstop - element->hwidth + 1; + sdw_mstr_rt->hstop = hstop; + + /* Assign hstart, hstop, block offset for each port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + t_params->num = port_rt->port_num; + t_params->hstart = hstart; + t_params->hstop = hstop; + t_params->offset1 = port_block_offset; + t_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_params->blockgroupcontrol_valid = true; + t_params->blockgroupcontrol = 0x0; + t_params->lanecontrol = 0x0; + /* Copy parameters if first node */ + if (port_rt == list_first_entry + (&sdw_mstr_rt->port_rt_list, + struct sdw_port_runtime, port_node)) { + + sdw_mstr_rt->hstart = hstart; + sdw_mstr_rt->hstop = hstop; + + sdw_mstr_rt->block_offset = + port_block_offset; + + } + + /* Get no. of channels running on curr. port */ + ch_mask = port_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + + port_block_offset += + sdw_mstr_rt->stream_params.bps * + no_of_channels; + } + + /* Compute block offset */ + block_offset += sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + + /* + * Re-assign port_block_offset for next stream + * under same group + */ + port_block_offset = block_offset; + } + + /* Compute hstop for next group */ + hstop = hstop - element->hwidth; + } + + /* Compute transport params for slave */ + + /* Run loop for master runtime streams running on master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + /* Get block offset from master runtime */ + port_block_offset = sdw_mstr_rt->block_offset; + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Run loop for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + t_slv_params->num = port_slv_rt->port_num; + + /* Assign transport parameters */ + t_slv_params->hstart = sdw_mstr_rt->hstart; + t_slv_params->hstop = sdw_mstr_rt->hstop; + t_slv_params->offset1 = port_block_offset; + t_slv_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_slv_params->blockgroupcontrol_valid = true; + t_slv_params->blockgroupcontrol = 0x0; + t_slv_params->lanecontrol = 0x0; + + /* Get no. of channels running on curr. port */ + ch_mask = port_slv_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + /* Increment block offset for next port/slave */ + port_block_offset += slv_rt->stream_params.bps * + no_of_channels; + } + } + } + + kfree(temp); + + return 0; +} + +/* + * sdw_cfg_frmshp_bnkswtch - returns Success + * -EINVAL - In case of error. + * -ENOMEM - In case of memory alloc failure. + * -EAGAIN - In case of activity ongoing. + * + * + * This function broadcast frameshape on framectrl + * register and performs bank switch. + */ +int sdw_cfg_frmshp_bnkswtch(struct sdw_bus *mstr_bs, bool is_wait) +{ + struct sdw_msg *wr_msg; + int ret = 0; + int banktouse, numcol, numrow; + u8 *wbuf; + + wr_msg = kzalloc(sizeof(struct sdw_msg), GFP_KERNEL); + if (!wr_msg) + return -ENOMEM; + + mstr_bs->async_data.msg = wr_msg; + + wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL); + if (!wbuf) + return -ENOMEM; + + numcol = sdw_get_col_to_num(mstr_bs->col); + numrow = sdw_get_row_to_num(mstr_bs->row); + + wbuf[0] = numcol | (numrow << 3); + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + if (banktouse) { + wr_msg->addr = (SDW_SCP_FRAMECTRL + SDW_BANK1_REGISTER_OFFSET) + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } else { + + wr_msg->addr = SDW_SCP_FRAMECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } + + wr_msg->ssp_tag = 0x1; + wr_msg->flag = SDW_MSG_FLAG_WRITE; + wr_msg->len = 1; + wr_msg->slave_addr = 0xF; /* Broadcast address*/ + wr_msg->buf = wbuf; + wr_msg->addr_page1 = 0x0; + wr_msg->addr_page2 = 0x0; + + if (is_wait) { + + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr_bs->mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else + sdw_lock_mstr(mstr_bs->mstr); + + ret = sdw_slave_transfer_async(mstr_bs->mstr, wr_msg, + 1, &mstr_bs->async_data); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } else { + ret = sdw_slave_transfer(mstr_bs->mstr, wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } + + msleep(100); /* TBD: Remove this */ + + /* + * TBD: check whether we need to poll on + * mcp active bank bit to switch bank + */ + mstr_bs->active_bank = banktouse; + + if (!is_wait) { + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + } + + +out: + + return ret; +} + +/* + * sdw_cfg_frmshp_bnkswtch_wait - returns Success + * -ETIMEDOUT - In case of timeout + * + * This function waits on completion of + * bank switch. + */ +int sdw_cfg_frmshp_bnkswtch_wait(struct sdw_bus *mstr_bs) +{ + unsigned long time_left; + struct sdw_master *mstr = mstr_bs->mstr; + + time_left = wait_for_completion_timeout( + &mstr_bs->async_data.xfer_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + sdw_unlock_mstr(mstr); + return -ETIMEDOUT; + } + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + sdw_unlock_mstr(mstr); + return 0; +} + +/* + * sdw_config_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params config, set SSP interval, set Clock + * frequency, enable channel. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_config_bs_prms(struct sdw_bus *sdw_mstr_bs, bool state_check) +{ + struct port_chn_en_state chn_en; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct sdw_mstr_driver *ops; + int banktouse, ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + /* + * Configure transport and port params + * for master and slave ports. + */ + ret = sdw_cfg_params_mstr_slv(sdw_mstr_bs, + sdw_mstr_bs_rt, state_check); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "slave/master config params failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + /* + * TBD: Currently harcoded SSP interval, + * computed value to be taken from system_interval in + * bus data structure. + * Add error check. + */ + if (ops->mstr_ops->set_ssp_interval) + ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + SDW_DEFAULT_SSP, banktouse); + + /* + * Configure Clock + * TBD: Add error check + */ + if (ops->mstr_ops->set_clock_freq) + ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + + /* Enable channel on alternate bank for running streams */ + chn_en.is_activate = true; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state + (sdw_mstr_bs, sdw_mstr_bs_rt, &chn_en); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "Channel enable failed\n"); + return ret; + } + + } + + return 0; +} + +/* + * sdw_dis_chan - returns Success + * -EINVAL - In case of error. + * + * + * This function disables channel on alternate + * bank. This API is called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis when channel on current + * bank is enabled. + * + */ +int sdw_dis_chan(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + chn_en.is_activate = false; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state(sdw_mstr_bs, + sdw_mstr_bs_rt, &chn_en); + if (ret < 0) + return ret; + } + + return 0; +} + + +/* + * sdw_cfg_slv_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare slave ports. + */ +int sdw_cfg_slv_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + bool prep) +{ + struct sdw_slave_driver *slv_ops = slv_rt_strm->slave->driver; + struct sdw_slv_capabilities *slv_cap = + &slv_rt_strm->slave->sdw_slv_cap; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = + slv_cap->sdw_dpn_cap; + + struct sdw_msg wr_msg, rd_msg, rd_msg1; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + u8 rbuf1[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + rd_msg1.ssp_tag = 0x0; + rd_msg1.flag = SDW_MSG_FLAG_READ; + rd_msg1.len = 1; + rd_msg1.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg1.buf = rbuf1; + rd_msg1.addr_page1 = 0x0; + rd_msg1.addr_page2 = 0x0; + + + rd_msg1.addr = SDW_DPN_PREPARESTATUS + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + if (prep) { /* PREPARE */ + + /* + * 1. slave port prepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_prepare) { + slv_ops->handle_pre_port_prepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port prepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, prepare required */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * TBD: check on port ready, + * ideally we should check on prepare + * status for port_ready + */ + + /* wait for completion on port ready*/ + msleep(100); /* TBD: Remove this */ + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post pre + * --> callback + * --> handle_post_port_prepare + */ + if (slv_ops->handle_post_port_prepare) { + slv_ops->handle_post_port_prepare + (slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + + } else { + /* UNPREPARE */ + /* + * 1. slave port unprepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_unprepare) { + slv_ops->handle_pre_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port unprepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, unprepare required */ + + /* Read SDW_DPN_PREPARECTRL register */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + /* + * TBD: poll for prepare interrupt bit before + * calling post_prepare + * Does it apply for unprepare aswell? + * 2. check capabilities if simplified CM + * no need to unprepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post unpre + * --> callback + * --> handle_post_port_unprepare + */ + if (slv_ops->handle_post_port_unprepare) { + slv_ops->handle_post_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare master ports. + */ +int sdw_cfg_mstr_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = port_mstr_strm->port_num; + prep_ch.ch_mask = port_mstr_strm->channel_mask; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* TBD: Bank configuration */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + if (prep) + mstr_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + else + mstr_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + + return 0; +} + + +/* + * sdw_prep_unprep_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave prepare/unprepare + * port configuration API's, called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis API's. + */ +int sdw_prep_unprep_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_prep) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + int ret = 0; + + list_for_each_entry(slv_rt_strm, + &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_prep_unprep(sdw_mstr_bs, + slv_rt_strm, port_slv_strm, is_prep); + if (ret < 0) + return ret; + } + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_prep_unprep(sdw_mstr_bs, + mstr_rt_strm, port_mstr_strm, is_prep); + if (ret < 0) + return ret; + } + } + + return 0; +} + +struct sdw_bus *master_to_bus(struct sdw_master *mstr) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + return sdw_mstr_bs; + } + /* This should never happen, added to suppress warning */ + WARN_ON(1); + + return NULL; +} + +/* + * sdw_chk_strm_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs all the required + * check such as isynchronous mode support, + * stream rates etc. This API is called + * from sdw_bus_calc_bw API. + * + */ +int sdw_chk_strm_prms(struct sdw_master_capabilities *sdw_mstr_cap, + struct sdw_stream_params *mstr_params, + struct sdw_stream_params *stream_params) +{ + /* Asynchronous mode not supported, return Error */ + if (((sdw_mstr_cap->base_clk_freq * 2) % mstr_params->rate) != 0) + return -EINVAL; + + /* Check for sampling frequency */ + if (stream_params->rate != mstr_params->rate) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params computation. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_compute_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0, frame_interval = 0; + + sdw_mstr_cap = &sdw_mstr->mstr_capabilities; + + ret = sdw_get_clock_frmshp(sdw_mstr_bs, &frame_interval, + sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "clock/frameshape config failed\n"); + return ret; + } + + /* + * TBD: find right place to run sorting on + * master rt_list. Below sorting is done based on + * bps from low to high, that means PDM streams + * will be placed before PCM. + */ + + /* + * TBD Should we also perform sorting based on rate + * for PCM stream check. if yes then how?? + * creating two different list. + */ + + /* Compute system interval */ + ret = sdw_compute_sys_interval(sdw_mstr_bs, sdw_mstr_cap, + frame_interval); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute system interval failed\n"); + return ret; + } + + /* Compute hstart/hstop */ + ret = sdw_compute_hstart_hstop(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute hstart/hstop failed\n"); + return ret; + } + + return 0; +} + +/* + * sdw_bs_pre_bnkswtch_post - returns Success + * -EINVAL or ret value - In case of error. + * + * This API performs on of the following operation + * based on bs_state value: + * pre-activate port + * bank switch operation + * post-activate port + * bankswitch wait operation + * disable channel operation + */ +int sdw_bs_pre_bnkswtch_post(struct sdw_runtime *sdw_rt, int bs_state) +{ + struct sdw_mstr_runtime *mstr_rt_act = NULL; + struct sdw_bus *mstr_bs_act = NULL; + struct sdw_master_port_ops *ops; + int ret = 0; + + list_for_each_entry(mstr_rt_act, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (mstr_rt_act->mstr == NULL) + break; + + /* Get bus structure for master */ + mstr_bs_act = master_to_bus(mstr_rt_act->mstr); + if (!mstr_bs_act) + return -EINVAL; + + ops = mstr_bs_act->mstr->driver->mstr_port_ops; + + /* + * Note that current all the operations + * of pre->bankswitch->post->wait->disable + * are performed sequentially.The switch case + * is kept in order for code to scale where + * pre->bankswitch->post->wait->disable are + * not sequential and called from different + * instances. + */ + switch (bs_state) { + + case SDW_UPDATE_BS_PRE: + /* Pre activate ports */ + if (ops->dpn_port_activate_ch_pre) { + ret = ops->dpn_port_activate_ch_pre + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH: + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(mstr_bs_act, true); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_POST: + /* Post activate ports */ + if (ops->dpn_port_activate_ch_post) { + ret = ops->dpn_port_activate_ch_post + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH_WAIT: + /* Post Bankswitch wait operation */ + ret = sdw_cfg_frmshp_bnkswtch_wait(mstr_bs_act); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_DIS_CHN: + /* Disable channel on previous bank */ + ret = sdw_dis_chan(mstr_bs_act); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + break; + } + } + + return ret; + +} + +/* + * sdw_update_bs_prms - returns Success + * -EINVAL - In case of error. + * + * Once all the parameters are configured + * for ports, this function performs bankswitch + * where all the new configured parameters + * gets in effect. This function is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * This function also disables all the channels + * enabled on previous bank after bankswitch. + */ +int sdw_update_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, + int last_node) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0; + + /* + * Optimization scope. + * Check whether we can assign function pointer + * link sync value is 1, and call that function + * if its not NULL. + */ + if ((last_node) && (sdw_mstr->link_sync_mask)) { + + /* Perform pre-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_PRE); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch operation*/ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_BNKSWTCH); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Bank Switch operation failed\n"); + return ret; + } + + /* Perform post-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_POST); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch post wait opearation */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, + SDW_UPDATE_BS_BNKSWTCH_WAIT); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "BnkSwtch wait op failed\n"); + return ret; + } + + /* Disable channels on previous bank */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_DIS_CHN); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + + } + + if (!sdw_mstr->link_sync_mask) { + + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "bank switch failed\n"); + return ret; + } + + /* Disable all channels enabled on previous bank */ + ret = sdw_dis_chan(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + } + + return ret; +} + +/** + * sdw_chk_last_node - returns True or false + * + * This function returns true in case of last node + * else returns false. + */ +bool sdw_chk_last_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *last_rt = NULL; + + last_rt = list_last_entry(&sdw_rt->mstr_rt_list, + struct sdw_mstr_runtime, mstr_sdw_node); + if (sdw_mstr_rt == last_rt) + return true; + else + return false; + +} + +/** + * sdw_unprepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to unprepare ports and does recomputation of + * bus parameters. + */ +int sdw_unprepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + mstr_params = &sdw_mstr_rt->stream_params; + + /* 1. Un-prepare master and slave port */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, + sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch unprep failed\n"); + return ret; + } + + /* change stream state to unprepare */ + if (last_node) + sdw_rt->stream_state = + SDW_STATE_UNPREPARE_STREAM; + + /* + * Calculate new bandwidth, frame size + * and total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth -= sdw_mstr_rt->stream_bw; + + /* Something went wrong in bandwidth calulation */ + if (sdw_mstr_bs->bandwidth < 0) { + dev_err(&sdw_mstr->dev, "BW calculation failed\n"); + return -EINVAL; + } + + if (!sdw_mstr_bs->bandwidth) { + /* + * Last stream on master should + * return successfully + */ + sdw_mstr_bs->system_interval = 0; + sdw_mstr_bs->stream_interval = 0; + sdw_mstr_bs->frame_freq = 0; + sdw_mstr_bs->row = 0; + sdw_mstr_bs->col = 0; + return 0; + } + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus params */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_disable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to disable ports. + */ +int sdw_disable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* Lets do disabling of port for stream to be freed */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch dis failed\n"); + return ret; + } + + /* Change stream state to disable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_DISABLE_STREAM; + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_enable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to enable ports. + */ +int sdw_enable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* Enable new port for master and slave */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel enable failed\n"); + return ret; + } + + /* change stream state to enable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_ENABLE_STREAM; + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_prepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to prepare ports and does computation of + * bus parameters. + */ +int sdw_prepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_stream_params *stream_params = &sdw_rt->stream_params; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* + * check all the stream parameters received + * Check for isochronous mode, sample rate etc + */ + ret = sdw_chk_strm_prms(sdw_mstr_cap, mstr_params, + stream_params); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Stream param check failed\n"); + return -EINVAL; + } + + /* + * Calculate stream bandwidth, frame size and + * total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth += sdw_mstr_rt->stream_bw; + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus parameters */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport param config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + /* Prepare new port for master and slave */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel prepare failed\n"); + return ret; + } + + /* change stream state to prepare */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_PREPARE_STREAM; + + + return ret; +} + +/** + * sdw_pre_en_dis_unprep_op - returns Success + * -EINVAL - In case of error. + * + * This function is called by sdw_bus_calc_bw + * and sdw_bus_calc_bw_dis to prepare, enable, + * unprepare and disable ports. Based on state + * value, individual APIs are called. + */ +int sdw_pre_en_dis_unprep_op(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt, int state) +{ + struct sdw_master *sdw_mstr = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + int ret = 0; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + + /* + * All data structures required available, + * lets calculate BW for master controller + */ + + switch (state) { + + case SDW_STATE_PREPARE_STREAM: /* Prepare */ + ret = sdw_prepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_ENABLE_STREAM: /* Enable */ + ret = sdw_enable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_DISABLE_STREAM: /* Disable */ + ret = sdw_disable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_UNPREPARE_STREAM: /* UnPrepare */ + ret = sdw_unprepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + default: + ret = -EINVAL; + break; + + } + + return ret; +} + +/** + * sdw_bus_calc_bw - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_prepare_and_enable + * whenever new stream is processed. The function based + * on the stream associated with controller calculates + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch. + */ +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable) +{ + + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* + * TBD: check for mstr_rt is in configured state or not + * If yes, then configure masters as well + * If no, then do not configure/enable master related parameters + */ + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((sdw_rt->stream_state != SDW_STATE_CONFIG_STREAM) && + (sdw_rt->stream_state != SDW_STATE_UNPREPARE_STREAM)) + goto enable_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_PREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Prepare Operation failed\n"); + return -EINVAL; + } + } + +enable_stream: + + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!enable) || + (sdw_rt->stream_state != SDW_STATE_PREPARE_STREAM)) + return 0; + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_ENABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Enable Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw); + +/** + * sdw_bus_calc_bw_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_disable_and_unprepare + * whenever stream is ended. The function based disables/ + * unprepare port/channel of associated stream and computes + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch for remaining streams on given + * controller. + */ +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare) +{ + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if (sdw_rt->stream_state != SDW_STATE_ENABLE_STREAM) + goto unprepare_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_DISABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Disable Operation failed\n"); + return -EINVAL; + } + } + +unprepare_stream: + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!unprepare) || + (sdw_rt->stream_state != SDW_STATE_DISABLE_STREAM)) + return 0; + + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_UNPREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Unprepare Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw_dis); + +/* + * sdw_slv_dp0_en_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Slave DP0 channels. + */ +int sdw_slv_dp0_en_dis(struct sdw_bus *mstr_bs, + bool is_enable, u8 slv_number) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + 0x0)); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (is_enable) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + /* This is just status read, can be removed later */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + +out: + return ret; + +} + + +/* + * sdw_mstr_dp0_act_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Master DP0 channels. + */ +int sdw_mstr_dp0_act_dis(struct sdw_bus *mstr_bs, bool is_enable) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = 0; + activate_ch.ch_mask = 0x1; + activate_ch.activate = is_enable; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 1. Master port enable_ch_pre */ + if (ops->mstr_port_ops->dpn_port_activate_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_pre + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Master port enable_ch_post */ + if (ops->mstr_port_ops->dpn_port_activate_ch_post) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_post + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_slv_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Slave DP0. + */ +int sdw_slv_dp0_prep_unprep(struct sdw_bus *mstr_bs, + u8 slv_number, bool prepare) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0x0); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (prepare) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * Sleep for 100ms. + * TODO: check on check on prepare status for port_ready + */ + msleep(100); + +out: + return ret; + +} + +/* + * sdw_mstr_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Master DP0. + */ +int sdw_mstr_dp0_prep_unprep(struct sdw_bus *mstr_bs, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = 0x0; + prep_ch.ch_mask = 0x1; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + return 0; +} + +static int sdw_bra_config_ops(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + struct sdw_mstr_driver *ops; + int ret, banktouse; + + /* configure Master transport params */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master xport params config failed\n"); + return ret; + } + + /* configure Slave transport params */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_params, + p_params, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave xport params config failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + if (ops->mstr_ops->set_ssp_interval) { + ret = ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + 24, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: SSP interval config failed\n"); + return ret; + } + } + + /* Configure Clock */ + if (ops->mstr_ops->set_clock_freq) { + ret = ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Clock config failed\n"); + return ret; + } + } + + return 0; +} + +static int sdw_bra_xport_config_enable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + int ret; + + /* Prepare sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + * 3. Try to use existing prep_unprep API both for master + * and slave. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, true); + + /* Prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master prepare failed\n"); + return ret; + } + + /* Enable sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Enable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, true, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 enable failed\n"); + return ret; + } + + /* Enable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 enable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + return 0; +} + +static int sdw_bra_xport_config_disable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block) +{ + int ret; + + /* Disable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, false, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 disable failed\n"); + return ret; + } + + /* Disable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 disable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * de-prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support de-prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, false); + + /* De-prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master de-prepare failed\n"); + return ret; + } + + return 0; +} + +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable) +{ + struct sdw_transport_params t_params; + struct sdw_port_params p_params; + int ret; + + /* TODO: + * compute transport parameters based on current clock and + * frameshape. need to check how algorithm should be designed + * for BRA for computing clock, frameshape, SSP and transport params. + */ + + /* Transport Parameters */ + t_params.num = 0x0; /* DP 0 */ + t_params.blockpackingmode = 0x0; + t_params.blockgroupcontrol_valid = false; + t_params.blockgroupcontrol = 0x0; + t_params.lanecontrol = 0; + t_params.sample_interval = 10; + + t_params.hstart = 7; + t_params.hstop = 9; + t_params.offset1 = 0; + t_params.offset2 = 0; + + /* Port Parameters */ + p_params.num = 0x0; /* DP 0 */ + + /* Isochronous Mode */ + p_params.port_flow_mode = 0x0; + + /* Normal Mode */ + p_params.port_data_mode = 0x0; + + /* Word length */ + p_params.word_length = 3; + + /* Frameshape and clock params */ + sdw_mstr_bs->clk_div = 1; + sdw_mstr_bs->col = 10; + sdw_mstr_bs->row = 80; + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) + sdw_mstr_bs->bandwidth = 9.6 * 1000 * 1000; +#else + sdw_mstr_bs->bandwidth = 12 * 1000 * 1000; +#endif + + if (enable) { + ret = sdw_bra_xport_config_enable(sdw_mstr_bs, block, + &t_params, &p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params config failed\n"); + return ret; + } + + } else { + ret = sdw_bra_xport_config_disable(sdw_mstr_bs, block); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params de-config failed\n"); + return ret; + } + } + + return 0; +} diff --git a/drivers/sdw/sdw_cnl.c b/drivers/sdw/sdw_cnl.c new file mode 100644 index 000000000000..857530f9e6c4 --- /dev/null +++ b/drivers/sdw/sdw_cnl.c @@ -0,0 +1,2482 @@ +/* + * sdw_cnl.c - Intel SoundWire master controller driver implementation. + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_cnl_priv.h" + +static inline int cnl_sdw_reg_readl(void __iomem *base, int offset) +{ + int value; + + value = readl(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writel(void __iomem *base, int offset, int value) +{ + writel(value, base + offset); +} + +static inline u16 cnl_sdw_reg_readw(void __iomem *base, int offset) +{ + int value; + + value = readw(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writew(void __iomem *base, int offset, u16 value) +{ + writew(value, base + offset); +} + +static inline int cnl_sdw_port_reg_readl(void __iomem *base, int offset, + int port_num) +{ + return cnl_sdw_reg_readl(base, offset + port_num * 128); +} + +static inline void cnl_sdw_port_reg_writel(u32 __iomem *base, int offset, + int port_num, int value) +{ + return cnl_sdw_reg_writel(base, offset + port_num * 128, value); +} + +struct cnl_sdw_async_msg { + struct completion *async_xfer_complete; + struct sdw_msg *msg; + int length; +}; + +struct cnl_sdw { + struct cnl_sdw_data data; + struct sdw_master *mstr; + irqreturn_t (*thread)(int irq, void *context); + void *thread_context; + struct completion tx_complete; + struct cnl_sdw_port port[CNL_SDW_MAX_PORTS]; + int num_pcm_streams; + struct cnl_sdw_pdi_stream *pcm_streams; + int num_in_pcm_streams; + struct cnl_sdw_pdi_stream *in_pcm_streams; + int num_out_pcm_streams; + struct cnl_sdw_pdi_stream *out_pcm_streams; + int num_pdm_streams; + struct cnl_sdw_pdi_stream *pdm_streams; + int num_in_pdm_streams; + struct cnl_sdw_pdi_stream *in_pdm_streams; + int num_out_pdm_streams; + struct cnl_sdw_pdi_stream *out_pdm_streams; + struct mutex stream_lock; + spinlock_t ctrl_lock; + struct cnl_sdw_async_msg async_msg; + u32 response_buf[0x80]; + bool sdw_link_status; + +}; + +static int sdw_power_up_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Try 10 times before timing out */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = (CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control |= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Up\n", + data->inst_id); + sdw->sdw_link_status = 1; + return 0; + } + dev_err(&mstr->dev, "Failed to Power Up the SDW ctrl %d\n", + data->inst_id); + return -EIO; +} + +static void sdw_power_down_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Retry 10 times before giving up */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = ~(CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control &= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) + break; + timeout--; + /* Wait for 20ms before each retry */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Down\n", + data->inst_id); + sdw->sdw_link_status = 0; + return; + } + dev_err(&mstr->dev, "Failed to Power Down the SDW ctrl %d\n", + data->inst_id); +} + +static void sdw_init_phyctrl(struct cnl_sdw *sdw) +{ + /* TODO: Initialize based on hardware requirement */ + +} + +static void sdw_switch_to_mip(struct cnl_sdw *sdw) +{ + u16 ioctl; + u16 act = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + int act_offset = SDW_CNL_CTMCTL + (data->inst_id * + SDW_CNL_CTMCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + + ioctl &= ~(CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT); + ioctl &= ~(CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT); + + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + act |= 0x1 << CNL_CTMCTL_DOAIS_SHIFT; + act |= CNL_CTMCTL_DACTQE_MASK << CNL_CTMCTL_DACTQE_SHIFT; + act |= CNL_CTMCTL_DODS_MASK << CNL_CTMCTL_DODS_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, act_offset, act); +} + +static void sdw_switch_to_glue(struct cnl_sdw *sdw) +{ + u16 ioctl; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + ioctl |= CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static void sdw_init_shim(struct cnl_sdw *sdw) +{ + u16 ioctl = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_WPDD_MASK << CNL_IOCTL_WPDD_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static int sdw_config_update(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + struct sdw_master *mstr = sdw->mstr; + int sync_reg, syncgo_mask; + volatile int config_update = 0; + volatile int sync_update = 0; + /* Try 10 times before giving up on configuration update */ + int timeout = 10; + int config_updated = 0; + + config_update |= MCP_CONFIGUPDATE_CONFIGUPDATE_MASK << + MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT; + /* Bit is self-cleared when configuration gets updated. */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIGUPDATE, + config_update); + + /* + * Set SYNCGO bit for Master(s) running in aggregated mode + * (MMModeEN = 1). This action causes all gSyncs of all Master IPs + * to be unmasked and asserted at the currently active gSync rate. + * The initialization-pending Master IP SoundWire bus clock will + * start up synchronizing to gSync, leading to bus reset entry, + * subsequent exit, and 1st Frame generation aligning to gSync. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + syncgo_mask = (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, + SDW_CNL_SYNC); + if ((sync_update & syncgo_mask) == 0) + break; + + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & syncgo_mask) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + + /* Reset timeout */ + timeout = 10; + } + + /* Wait for config update bit to be self cleared */ + do { + config_update = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONFIGUPDATE); + if ((config_update & + MCP_CONFIGUPDATE_CONFIGUPDATE_MASK) == 0) { + config_updated = 1; + break; + } + timeout--; + /* Wait for 20ms between each try */ + msleep(20); + + } while (timeout != 0); + if (!config_updated) { + dev_err(&mstr->dev, "SoundWire update failed\n"); + return -EIO; + } + return 0; +} + +static void sdw_enable_interrupt(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int int_mask = 0; + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK0, + MCP_SLAVEINTMASK0_MASK); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK1, + MCP_SLAVEINTMASK1_MASK); + /* Enable slave interrupt mask */ + int_mask |= MCP_INTMASK_SLAVERESERVED_MASK << + MCP_INTMASK_SLAVERESERVED_SHIFT; + int_mask |= MCP_INTMASK_SLAVEALERT_MASK << + MCP_INTMASK_SLAVEALERT_SHIFT; + int_mask |= MCP_INTMASK_SLAVEATTACHED_MASK << + MCP_INTMASK_SLAVEATTACHED_SHIFT; + int_mask |= MCP_INTMASK_SLAVENOTATTACHED_MASK << + MCP_INTMASK_SLAVENOTATTACHED_SHIFT; + int_mask |= MCP_INTMASK_CONTROLBUSCLASH_MASK << + MCP_INTMASK_CONTROLBUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_DATABUSCLASH_MASK << + MCP_INTMASK_DATABUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_RXWL_MASK << + MCP_INTMASK_RXWL_SHIFT; + int_mask |= MCP_INTMASK_IRQEN_MASK << + MCP_INTMASK_IRQEN_SHIFT; + int_mask |= MCP_INTMASK_DPPDIINT_MASK << + MCP_INTMASK_DPPDIINT_SHIFT; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTMASK, int_mask); +} + +static int sdw_pcm_pdi_init(struct cnl_sdw *sdw) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pcm_cap; + int pcm_cap_offset = SDW_CNL_PCMSCAP + (data->inst_id * + SDW_CNL_PCMSCAP_REG_OFFSET); + int ch_cnt_offset; + int i; + + pcm_cap = cnl_sdw_reg_readw(data->sdw_shim, pcm_cap_offset); + sdw->num_pcm_streams = (pcm_cap >> CNL_PCMSCAP_BSS_SHIFT) & + CNL_PCMSCAP_BSS_MASK; + dev_info(&mstr->dev, "Number of Bidirectional PCM stream = %d\n", + sdw->num_pcm_streams); + sdw->pcm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pcm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pcm_streams) + return -ENOMEM; + /* Two of the PCM streams are reserved for bulk transfers */ + sdw->pcm_streams -= SDW_CNL_PCM_PDI_NUM_OFFSET; + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < sdw->num_pcm_streams; i++) { + ch_cnt_offset = SDW_CNL_PCMSCHC + + (data->inst_id * SDW_CNL_PCMSCHC_REG_OFFSET) + + ((i + SDW_CNL_PCM_PDI_NUM_OFFSET) * 0x2); + + sdw->pcm_streams[i].ch_cnt = cnl_sdw_reg_readw(data->sdw_shim, + ch_cnt_offset); + /* Zero based value in register */ + sdw->pcm_streams[i].ch_cnt++; + sdw->pcm_streams[i].pdi_num = i; + sdw->pcm_streams[i].allocated = false; + dev_info(&mstr->dev, "CH Count for stream %d is %d\n", + i, sdw->pcm_streams[i].ch_cnt); + } + return 0; +} + +static int sdw_pdm_pdi_init(struct cnl_sdw *sdw) +{ + int i; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pdm_cap, pdm_ch_count, total_pdm_streams; + int pdm_cap_offset = SDW_CNL_PDMSCAP + + (data->inst_id * SDW_CNL_PDMSCAP_REG_OFFSET); + pdm_cap = cnl_sdw_reg_readw(data->sdw_shim, pdm_cap_offset); + sdw->num_pdm_streams = (pdm_cap >> CNL_PDMSCAP_BSS_SHIFT) & + CNL_PDMSCAP_BSS_MASK; + + sdw->pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pdm_streams) + return -ENOMEM; + + sdw->num_in_pdm_streams = (pdm_cap >> CNL_PDMSCAP_ISS_SHIFT) & + CNL_PDMSCAP_ISS_MASK; + + sdw->in_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_in_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + + if (!sdw->in_pdm_streams) + return -ENOMEM; + + sdw->num_out_pdm_streams = (pdm_cap >> CNL_PDMSCAP_OSS_SHIFT) & + CNL_PDMSCAP_OSS_MASK; + /* Zero based value in register */ + sdw->out_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_out_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->out_pdm_streams) + return -ENOMEM; + + total_pdm_streams = sdw->num_pdm_streams + + sdw->num_in_pdm_streams + + sdw->num_out_pdm_streams; + + pdm_ch_count = (pdm_cap >> CNL_PDMSCAP_CPSS_SHIFT) & + CNL_PDMSCAP_CPSS_MASK; + for (i = 0; i < sdw->num_pdm_streams; i++) { + sdw->pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_in_pdm_streams; i++) { + sdw->in_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->in_pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->in_pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_out_pdm_streams; i++) { + sdw->out_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->out_pdm_streams[i].pdi_num = + i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->out_pdm_streams[i].allocated = false; + } + return 0; +} + +static int sdw_port_pdi_init(struct cnl_sdw *sdw) +{ + int i, ret = 0; + + for (i = 0; i < CNL_SDW_MAX_PORTS; i++) { + sdw->port[i].port_num = i; + sdw->port[i].allocated = false; + } + ret = sdw_pcm_pdi_init(sdw); + if (ret) + return ret; + ret = sdw_pdm_pdi_init(sdw); + + return ret; +} + +static int sdw_init(struct cnl_sdw *sdw, bool is_first_init) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int mcp_config, mcp_control, sync_reg, mcp_clockctrl; + volatile int sync_update = 0; + int timeout = 10; /* Try 10 times before timing out */ + int ret = 0; + + /* Power up the link controller */ + ret = sdw_power_up_link(sdw); + if (ret) + return ret; + + /* Initialize the IO control registers */ + sdw_init_shim(sdw); + + /* Switch the ownership to Master IP from glue logic */ + sdw_switch_to_mip(sdw); + + /* Set SyncPRD period */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (SDW_CNL_DEFAULT_SYNC_PERIOD << CNL_SYNC_SYNCPRD_SHIFT); + + /* Set SyncPU bit */ + sync_reg |= (0x1 << CNL_SYNC_SYNCCPU_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) == 0) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) != 0) { + dev_err(&mstr->dev, "Fail to set sync period\n"); + return -EINVAL; + } + + /* + * Set CMDSYNC bit based on Master ID + * Note that this bit is set only for the Master which will be + * running in aggregated mode (MMModeEN = 1). By doing + * this the gSync to Master IP to be masked inactive. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + /* Set clock divider to default value in default bank */ + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + mcp_clockctrl |= SDW_CNL_DEFAULT_CLK_DIVIDER; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CLOCKCTRL0, + mcp_clockctrl); + + /* Set the Frame shape init to default value */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FRAMESHAPEINIT, + SDW_CNL_DEFAULT_FRAME_SHAPE); + + + /* Set the SSP interval to default value for both banks */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL0, + SDW_CNL_DEFAULT_SSP_INTERVAL); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL1, + SDW_CNL_DEFAULT_SSP_INTERVAL); + + /* Set command acceptance mode. This is required because when + * Master broadcasts the clock_stop command to slaves, slaves + * might be already suspended, so this return NO ACK, in that + * case also master should go to clock stop mode. + */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_CMDACCEPTMODE_MASK << + MCP_CONTROL_CMDACCEPTMODE_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + /* Set Max cmd retry to 15 times */ + mcp_config |= (CNL_SDW_MAX_CMD_RETRIES << + MCP_CONFIG_MAXCMDRETRY_SHIFT); + + /* Set Ping request to ping delay to 15 frames. + * Spec supports 32 max frames + */ + mcp_config |= (CNL_SDW_MAX_PREQ_DELAY << + MCP_CONFIG_MAXPREQDELAY_SHIFT); + + /* If master is synchronized to some other master set Multimode */ + if (mstr->link_sync_mask) { + mcp_config |= (MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config |= (MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } else { + mcp_config &= ~(MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config &= ~(MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } + + /* Disable automatic bus release */ + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + /* Disable sniffer mode now */ + mcp_config &= ~(MCP_CONFIG_SNIFFEREN_MASK << + MCP_CONFIG_SNIFFEREN_SHIFT); + + /* Set the command mode for Tx and Rx command */ + mcp_config &= ~(MCP_CONFIG_CMDMODE_MASK << + MCP_CONFIG_CMDMODE_SHIFT); + + /* Set operation mode to normal */ + mcp_config &= ~(MCP_CONFIG_OPERATIONMODE_MASK << + MCP_CONFIG_OPERATIONMODE_SHIFT); + mcp_config |= ((MCP_CONFIG_OPERATIONMODE_NORMAL & + MCP_CONFIG_OPERATIONMODE_MASK) << + MCP_CONFIG_OPERATIONMODE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + + /* Initialize the phy control registers. */ + sdw_init_phyctrl(sdw); + + if (is_first_init) { + /* Initlaize the ports */ + ret = sdw_port_pdi_init(sdw); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + sdw_power_down_link(sdw); + return ret; + } + } + + /* Lastly enable interrupts */ + sdw_enable_interrupt(sdw); + + /* Update soundwire configuration */ + return sdw_config_update(sdw); +} + +static int sdw_alloc_pcm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, + enum sdw_data_direction direction) +{ + int num_pcm_streams, pdi_ch_map = 0, stream_id; + struct cnl_sdw_pdi_stream *stream, *pdi_stream; + unsigned int i; + unsigned int ch_map_offset, port_ctrl_offset, pdi_config_offset; + struct sdw_master *mstr = sdw->mstr; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + unsigned int stream_config; + + /* Currently PCM supports only bi-directional streams only */ + num_pcm_streams = sdw->num_pcm_streams; + stream = sdw->pcm_streams; + + mutex_lock(&sdw->stream_lock); + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < num_pcm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) { + dev_err(&mstr->dev, "Unable to allocate stream for PCM\n"); + return -EINVAL; + } + pdi_stream = port->pdi_stream; + /* We didnt get enough PDI streams, so free the allocated + * PDI streams. Free the port as well and return with error + */ + pdi_stream->l_ch_num = 0; + pdi_stream->h_ch_num = ch_cnt - 1; + ch_map_offset = SDW_CNL_PCMSCHM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr) + + (SDW_PCM_STRM_START_INDEX * pdi_stream->pdi_num); + if (port->direction == SDW_DATA_DIR_IN) + pdi_ch_map |= (CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + else + pdi_ch_map &= ~(CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + /* TODO: Remove this hardcoding */ + stream_id = mstr->nr * 16 + pdi_stream->pdi_num + 5; + pdi_stream->sdw_pdi_num = stream_id; + pdi_ch_map |= (stream_id & CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + pdi_ch_map |= (pdi_stream->l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + pdi_ch_map |= (0xF & CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + cnl_sdw_reg_writew(sdw->data.sdw_shim, ch_map_offset, + pdi_ch_map); + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (pdi_stream->pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET)); + stream_config |= (CNL_STRMZCFG_DMAT_VAL & CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + stream_config |= ((ch_cnt - 1) & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + cnl_sdw_reg_writel(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET), + stream_config); + return 0; +} + +static int sdw_alloc_pdm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, int direction) +{ + int num_pdm_streams; + struct cnl_sdw_pdi_stream *stream; + int i; + unsigned int port_ctrl_offset, pdi_config_offset; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + + /* Currently PDM supports either Input or Output Streams */ + if (direction == SDW_DATA_DIR_IN) { + num_pdm_streams = sdw->num_in_pdm_streams; + stream = sdw->in_pdm_streams; + } else { + num_pdm_streams = sdw->num_out_pdm_streams; + stream = sdw->out_pdm_streams; + } + mutex_lock(&sdw->stream_lock); + for (i = 0; i < num_pdm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) + return -EINVAL; + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + (stream[i].pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + return 0; +} + +struct cnl_sdw_port *cnl_sdw_alloc_port(struct sdw_master *mstr, int ch_count, + enum sdw_data_direction direction, + enum cnl_sdw_pdi_stream_type stream_type) +{ + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + int i, ret = 0; + struct num_pdi_streams; + + sdw = sdw_master_get_drvdata(mstr); + + mutex_lock(&sdw->stream_lock); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].allocated == false) { + port = &sdw->port[i]; + port->allocated = true; + port->direction = direction; + port->ch_cnt = ch_count; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port) { + dev_err(&mstr->dev, "Unable to allocate port\n"); + return NULL; + } + port->pdi_stream = NULL; + if (stream_type == CNL_SDW_PDI_TYPE_PDM) + ret = sdw_alloc_pdm_stream(sdw, port, ch_count, direction); + else + ret = sdw_alloc_pcm_stream(sdw, port, ch_count, direction); + if (!ret) + return port; + + dev_err(&mstr->dev, "Unable to allocate stream\n"); + mutex_lock(&sdw->stream_lock); + port->allocated = false; + mutex_unlock(&sdw->stream_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(cnl_sdw_alloc_port); + +void cnl_sdw_free_port(struct sdw_master *mstr, int port_num) +{ + int i; + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + + sdw = sdw_master_get_drvdata(mstr); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].port_num == port_num) { + port = &sdw->port[i]; + break; + } + } + if (!port) + return; + mutex_lock(&sdw->stream_lock); + port->pdi_stream->allocated = false; + port->pdi_stream = NULL; + port->allocated = false; + mutex_unlock(&sdw->stream_lock); +} +EXPORT_SYMBOL_GPL(cnl_sdw_free_port); + +static int cnl_sdw_update_slave_status(struct cnl_sdw *sdw, int slave_intstat0, + int slave_intstat1) +{ + int i; + struct sdw_status slave_status; + u64 slaves_stat, slave_stat; + int ret = 0; + + memset(&slave_status, 0x0, sizeof(slave_status)); + slaves_stat = (u64) slave_intstat1 << + SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT; + slaves_stat |= slave_intstat0; + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_stat = slaves_stat >> (i * SDW_CNL_SLAVE_STATUS_BITS); + if (slave_stat & MCP_SLAVEINTSTAT_NOT_PRESENT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_NOT_PRESENT; + else if (slave_stat & MCP_SLAVEINTSTAT_ATTACHED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ATTACHED_OK; + else if (slave_stat & MCP_SLAVEINTSTAT_ALERT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ALERT; + else if (slave_stat & MCP_SLAVEINTSTAT_RESERVED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_RESERVED; + } + ret = sdw_master_update_slv_status(sdw->mstr, &slave_status); + return ret; +} + +static void cnl_sdw_read_response(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int num_res = 0, i; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + + num_res = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_FIFOSTAT); + num_res &= MCP_RX_FIFO_AVAIL_MASK; + for (i = 0; i < num_res; i++) { + sdw->response_buf[i] = cnl_sdw_reg_readl(data->sdw_regs, + cmd_base); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } +} + +static enum sdw_command_response sdw_fill_message_response( + struct sdw_master *mstr, + struct sdw_msg *msg, + int count, int offset) +{ + int i, j; + int no_ack = 0, nack = 0; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + for (i = 0; i < count; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & + sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + break; + } + if (nack) { + dev_err(&mstr->dev, "Nack detected for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } else if (no_ack) { + dev_err(&mstr->dev, "Command ignored for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } + if (msg->flag == SDW_MSG_FLAG_WRITE) + return 0; + /* Response and Command has same base address */ + for (j = 0; j < count; j++) + msg->buf[j + offset] = + (sdw->response_buf[j] >> MCP_RESPONSE_RDATA_SHIFT); + return 0; +} + + +irqreturn_t cnl_sdw_irq_handler(int irq, void *context) +{ + struct cnl_sdw *sdw = context; + volatile int int_status, status, wake_sts; + + struct cnl_sdw_data *data = &sdw->data; + volatile int slave_intstat0 = 0, slave_intstat1 = 0; + struct sdw_master *mstr = sdw->mstr; + + /* + * Return if IP is in power down state. Interrupt can still come + * since its shared irq. + */ + if (!sdw->sdw_link_status) + return IRQ_NONE; + + int_status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_INTSTAT); + status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_STAT); + slave_intstat0 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT0); + slave_intstat1 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT1); + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + + if (!(int_status & (MCP_INTSTAT_IRQ_MASK << MCP_INTSTAT_IRQ_SHIFT))) + return IRQ_NONE; + + if (int_status & (MCP_INTSTAT_RXWL_MASK << MCP_INTSTAT_RXWL_SHIFT)) { + cnl_sdw_read_response(sdw); + if (sdw->async_msg.async_xfer_complete) { + sdw_fill_message_response(mstr, sdw->async_msg.msg, + sdw->async_msg.length, 0); + complete(sdw->async_msg.async_xfer_complete); + sdw->async_msg.async_xfer_complete = NULL; + sdw->async_msg.msg = NULL; + } else + complete(&sdw->tx_complete); + } + if (int_status & (MCP_INTSTAT_CONTROLBUSCLASH_MASK << + MCP_INTSTAT_CONTROLBUSCLASH_SHIFT)) { + /* Some slave is behaving badly, where its driving + * data line during control word bits. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for control word\n"); + } + if (int_status & (MCP_INTSTAT_DATABUSCLASH_MASK << + MCP_INTSTAT_DATABUSCLASH_SHIFT)) { + /* More than 1 slave is trying to drive bus. There is + * some problem with ownership of bus data bits, + * or either of the + * slave is behaving badly. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for data word\n"); + } + + if (int_status & (MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK << + MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT)) { + dev_info(&mstr->dev, "Slave status change\n"); + cnl_sdw_update_slave_status(sdw, slave_intstat0, + slave_intstat1); + } + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT0, + slave_intstat0); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT1, + slave_intstat1); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTSTAT, int_status); + return IRQ_HANDLED; +} + +static enum sdw_command_response cnl_program_scp_addr(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data[2] = {0, 0}; + unsigned long time_left; + int no_ack = 0, nack = 0; + int i; + + /* Since we are programming 2 commands, program the + * RX watermark level at 2 + */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, 2); + /* Program device address */ + cmd_data[0] |= (msg->slave_addr & MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Write command to program the scp_addr1 register */ + cmd_data[0] |= (0x3 << MCP_COMMAND_COMMAND_SHIFT); + cmd_data[1] = cmd_data[0]; + /* scp_addr1 register address */ + cmd_data[0] |= (SDW_SCP_ADDRPAGE1 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[1] |= (SDW_SCP_ADDRPAGE2 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[0] |= msg->addr_page1; + cmd_data[1] |= msg->addr_page2; + + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[0]); + cmd_base += SDW_CNL_CMD_WORD_LEN; + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[1]); + + time_left = wait_for_completion_timeout(&sdw->tx_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + msg->len = 0; + return -ETIMEDOUT; + } + + for (i = 0; i < CNL_SDW_SCP_ADDR_REGS; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + } + /* We dont return error if NACK or No ACK detected for broadcast addr + * because some slave might support SCP addr, while some slaves may not + * support it. This is not correct, since we wont be able to find out + * if NACK is detected because of slave not supporting SCP_addrpage or + * its a genuine NACK because of bus errors. We are not sure what slaves + * will report, NACK or No ACK for the scp_addrpage programming if they + * dont support it. Spec is not clear about this. + * This needs to be thought through + */ + if (nack & (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write NACKed for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else if (no_ack && (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write ignored for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else + return 0; + +} + +static enum sdw_command_response sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, int cmd, int offset, int count, bool async) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int j; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data = 0; + unsigned long time_left; + u16 addr = msg->addr; + + /* Program the watermark level upto number of count */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, count); + + cmd_base = SDW_CNL_MCP_COMMAND_BASE; + for (j = 0; j < count; j++) { + /* Program device address */ + cmd_data = 0; + cmd_data |= (msg->slave_addr & + MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Program read/write command */ + cmd_data |= (cmd << MCP_COMMAND_COMMAND_SHIFT); + /* program incrementing address register */ + cmd_data |= (addr++ << MCP_COMMAND_REG_ADDR_L_SHIFT); + /* Program the data if write command */ + if (msg->flag == SDW_MSG_FLAG_WRITE) + cmd_data |= + msg->buf[j + offset]; + + cmd_data |= ((msg->ssp_tag & + MCP_COMMAND_SSP_TAG_MASK) << + MCP_COMMAND_SSP_TAG_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, + cmd_base, cmd_data); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } + + /* If Async dont wait for completion */ + if (async) + return 0; + /* Wait for 3 second for timeout */ + time_left = wait_for_completion_timeout(&sdw->tx_complete, 3 * HZ); + if (!time_left) { + dev_err(&mstr->dev, "Controller timedout\n"); + msg->len = 0; + return -ETIMEDOUT; + } + return sdw_fill_message_response(mstr, msg, count, offset); +} + +static enum sdw_command_response cnl_sdw_xfer_msg_async(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page, + struct sdw_async_xfer_data *data) +{ + int ret = 0, cmd; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + /* Only 1 message can be handled in Async fashion. This is used + * only for Bank switching where during aggregation it is required + * to synchronously switch the bank on more than 1 controller + */ + if (msg->len > 1) { + ret = -EINVAL; + goto error; + } + /* If scp addr programming fails goto error */ + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + if (ret) + goto error; + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + sdw->async_msg.async_xfer_complete = &data->xfer_complete; + sdw->async_msg.msg = msg; + sdw->async_msg.length = msg->len; + /* Dont wait for reply, calling function will wait for reply. */ + ret = sdw_xfer_msg(mstr, msg, cmd, 0, msg->len, true); + return ret; +error: + msg->len = 0; + complete(&data->xfer_complete); + return -EINVAL; + +} + +static enum sdw_command_response cnl_sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page) +{ + int i, ret = 0, cmd; + + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + + if (ret) { + msg->len = 0; + return ret; + } + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + for (i = 0; i < msg->len / SDW_CNL_MCP_COMMAND_LENGTH; i++) { + ret = sdw_xfer_msg(mstr, msg, + cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + break; + } + if (!(msg->len % SDW_CNL_MCP_COMMAND_LENGTH)) + return ret; + ret = sdw_xfer_msg(mstr, msg, cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + msg->len % SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + return -EINVAL; + return ret; +} + +static void cnl_sdw_bra_prep_crc(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int addr = addr_offset; + + txdata_buf[addr++] = sdw_bus_compute_crc8((block->values + data_offset), + block->num_bytes); + txdata_buf[addr++] = 0x0; + txdata_buf[addr++] = 0x0; + txdata_buf[addr] |= ((0x2 & SDW_BRA_SOP_EOP_PDI_MASK) + << SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_prep_data(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int i; + int addr = addr_offset; + + for (i = 0; i < block->num_bytes; i += 2) { + + txdata_buf[addr++] = block->values[i + data_offset]; + if ((block->num_bytes - 1) - i) + txdata_buf[addr++] = block->values[i + data_offset + 1]; + else + txdata_buf[addr++] = 0; + + txdata_buf[addr++] = 0; + txdata_buf[addr++] = 0; + } +} + +static void cnl_sdw_bra_prep_hdr(u8 *txdata_buf, + struct sdw_bra_block *block, int rolling_id, int offset) +{ + + u8 tmp_hdr[6] = {0, 0, 0, 0, 0, 0}; + u8 temp = 0x0; + + /* + * 6 bytes header + * 1st byte: b11001010 + * b11: Header is active + * b0010: Device number 2 is selected + * b1: Write operation + * b0: MSB of BRA_NumBytes is 0 + * 2nd byte: LSB of number of bytes + * 3rd byte to 6th byte: Slave register offset + */ + temp |= (SDW_BRA_HDR_ACTIVE & SDW_BRA_HDR_ACTIVE_MASK) << + SDW_BRA_HDR_ACTIVE_SHIFT; + temp |= (block->slave_addr & SDW_BRA_HDR_SLV_ADDR_MASK) << + SDW_BRA_HDR_SLV_ADDR_SHIFT; + temp |= (block->cmd & SDW_BRA_HDR_RD_WR_MASK) << + SDW_BRA_HDR_RD_WR_SHIFT; + + if (block->num_bytes > SDW_BRA_HDR_MSB_BYTE_CHK) + temp |= (SDW_BRA_HDR_MSB_BYTE_SET & SDW_BRA_HDR_MSB_BYTE_MASK); + else + temp |= (SDW_BRA_HDR_MSB_BYTE_UNSET & + SDW_BRA_HDR_MSB_BYTE_MASK); + + txdata_buf[offset + 0] = tmp_hdr[0] = temp; + txdata_buf[offset + 1] = tmp_hdr[1] = block->num_bytes; + txdata_buf[offset + 3] |= ((SDW_BRA_SOP_EOP_PDI_STRT_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); + + txdata_buf[offset + 3] |= ((rolling_id & SDW_BRA_ROLLINGID_PDI_MASK) + << SDW_BRA_ROLLINGID_PDI_SHIFT); + + txdata_buf[offset + 4] = tmp_hdr[2] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK24) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT24); + + txdata_buf[offset + 5] = tmp_hdr[3] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK16) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT16); + + txdata_buf[offset + 8] = tmp_hdr[4] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK8) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT8); + + txdata_buf[offset + 9] = tmp_hdr[5] = (block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK0); + + /* CRC check */ + txdata_buf[offset + 0xc] = sdw_bus_compute_crc8(tmp_hdr, + SDW_BRA_HEADER_SIZE); + + if (!block->cmd) + txdata_buf[offset + 0xf] = ((SDW_BRA_SOP_EOP_PDI_END_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_pdi_tx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + struct cnl_sdw_pdi_stream tx_pdi_stream; + unsigned int tx_ch_map_offset, port_ctrl_offset, tx_pdi_config_offset; + unsigned int port_ctrl = 0, tx_pdi_config = 0, tx_stream_config; + int tx_pdi_ch_map = 0; + + if (enable) { + /* DP0 PORT CTRL REG */ + port_ctrl_offset = SDW_CNL_PORTCTRL + (SDW_BRA_PORT_ID * + SDW_CNL_PORT_REG_OFFSET); + + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl |= ((SDW_BRA_BULK_ENABLE & SDW_BRA_BLK_EN_MASK) << + SDW_BRA_BLK_EN_SHIFT); + + port_ctrl |= ((SDW_BRA_BPT_PAYLOAD_TYPE & + SDW_BRA_BPT_PYLD_TY_MASK) << + SDW_BRA_BPT_PYLD_TY_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, + port_ctrl); + + /* PDI0 Programming */ + tx_pdi_stream.l_ch_num = 0; + tx_pdi_stream.h_ch_num = 0xF; + tx_pdi_stream.pdi_num = SDW_BRA_PDI_TX_ID; + /* TODO: Remove hardcoding */ + tx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + tx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS2CM SHIM REG */ + tx_ch_map_offset = SDW_CNL_CTLS2CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + tx_pdi_ch_map |= (tx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, tx_ch_map_offset, + tx_pdi_ch_map); + + /* TX PDI0 CONFIG REG BANK 0 */ + tx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (tx_pdi_stream.pdi_num * 16)); + + tx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + tx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + tx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + tx_pdi_config_offset, tx_pdi_config); + + /* ALH STRMzCFG REG */ + tx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + tx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + tx_stream_config |= (0x0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + tx_stream_config); + + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_rx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + + struct cnl_sdw_pdi_stream rx_pdi_stream; + unsigned int rx_ch_map_offset, rx_pdi_config_offset, rx_stream_config; + unsigned int rx_pdi_config = 0; + int rx_pdi_ch_map = 0; + + if (enable) { + + /* RX PDI1 Configuration */ + rx_pdi_stream.l_ch_num = 0; + rx_pdi_stream.h_ch_num = 0xF; + rx_pdi_stream.pdi_num = SDW_BRA_PDI_RX_ID; + rx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + rx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS3CM SHIM REG */ + rx_ch_map_offset = SDW_CNL_CTLS3CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + rx_pdi_ch_map |= (rx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, rx_ch_map_offset, + rx_pdi_ch_map); + + /* RX PDI1 CONFIG REG */ + rx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (rx_pdi_stream.pdi_num * 16)); + + rx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + rx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + rx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + rx_pdi_config_offset, rx_pdi_config); + + + /* ALH STRMzCFG REG */ + rx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + rx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + rx_stream_config |= (0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + rx_stream_config); + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_config(struct sdw_master *mstr, bool enable) +{ + struct cnl_sdw *sdw; + + /* Get driver data for master */ + sdw = sdw_master_get_drvdata(mstr); + + /* PDI0 configuration */ + cnl_sdw_bra_pdi_tx_config(mstr, sdw, enable); + + /* PDI1 configuration */ + cnl_sdw_bra_pdi_rx_config(mstr, sdw, enable); +} + +static int cnl_sdw_bra_verify_footer(u8 *rx_buf, int offset) +{ + int ret = 0; + u8 ftr_response; + u8 ack_nack = 0; + u8 ftr_result = 0; + + ftr_response = rx_buf[offset]; + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((ftr_response > SDW_BRA_FTR_RESP_ACK_SHIFT) & + SDW_BRA_FTR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA Packet Ignored\n"); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK\n"); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Reserved\n"); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort\n"); + return -EINVAL; + } + + /* + * BRA footer result check + * Writes: + * 0 -> Good. Target accepted write payload + * 1 -> Bad. Target did not accept write payload + * Reads: + * 0 -> Good. Target completed read operation successfully + * 1 -> Bad. Target failed to complete read operation successfully + */ + ftr_result = (ftr_response > SDW_BRA_FTR_RESP_RES_SHIFT) > + SDW_BRA_FTR_RESP_RES_MASK; + if (ftr_result == SDW_BRA_FTR_RESULT_BAD) { + pr_info("BRA: Read/Write operation failed on target side\n"); + /* Error scenario */ + return -EINVAL; + } + + pr_info("BRA: Read/Write operation complete on target side\n"); + + return ret; +} + +static int cnl_sdw_bra_verify_hdr(u8 *rx_buf, int offset, bool *chk_footer, + int roll_id) +{ + int ret = 0; + u8 hdr_response, rolling_id; + u8 ack_nack = 0; + u8 not_ready = 0; + + /* Match rolling ID */ + hdr_response = rx_buf[offset]; + rolling_id = rx_buf[offset + SDW_BRA_ROLLINGID_PDI_INDX]; + + rolling_id = (rolling_id & SDW_BRA_ROLLINGID_PDI_MASK); + if (roll_id != rolling_id) { + pr_info("BRA: Rolling ID doesn't match, returning error\n"); + return -EINVAL; + } + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((hdr_response > SDW_BRA_HDR_RESP_ACK_SHIFT) & + SDW_BRA_HDR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA: Packet Ignored rolling_id:%d\n", rolling_id); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK rolling_id:%d\n", rolling_id); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } + + /* BRA not ready check */ + not_ready = (hdr_response > SDW_BRA_HDR_RESP_NRDY_SHIFT) > + SDW_BRA_HDR_RESP_NRDY_MASK; + if (not_ready == SDW_BRA_TARGET_NOT_READY) { + pr_info("BRA: Target not ready for read/write operation rolling_id:%d\n", + rolling_id); + chk_footer = false; + return -EBUSY; + } + + pr_info("BRA: Target ready for read/write operation rolling_id:%d\n", rolling_id); + return ret; +} + +static void cnl_sdw_bra_remove_data_padding(u8 *src_buf, u8 *dst_buf, + u8 size) { + + int i; + + for (i = 0; i < size/2; i++) { + + *dst_buf++ = *src_buf++; + *dst_buf++ = *src_buf++; + src_buf++; + src_buf++; + } +} + + +static int cnl_sdw_bra_check_data(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) { + + int offset = 0, rolling_id = 0, tmp_offset = 0; + int rx_crc_comp = 0, rx_crc_rvd = 0; + int i, ret; + bool chk_footer = true; + int rx_buf_size = info->rx_block_size; + u8 *rx_buf = info->rx_ptr; + u8 *tmp_buf = NULL; + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA RX DATA:", DUMP_PREFIX_OFFSET, 8, 4, + rx_buf, rx_buf_size, false); + + /* Allocate temporary buffer in case of read request */ + if (!block->cmd) { + tmp_buf = kzalloc(block->num_bytes, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto error; + } + } + + /* + * TODO: From the response header and footer there is no mention of + * read or write packet so controller needs to keep transmit packet + * information in order to verify rx packet. Also the current + * approach used for error mechanism is any of the packet response + * is not success, just report the whole transfer failed to Slave. + */ + + /* + * Verification of response packet for one known + * hardcoded configuration. This needs to be extended + * once we have dynamic algorithm integrated. + */ + + /* 2 valid read response */ + for (i = 0; i < info->valid_packets; i++) { + + + pr_info("BRA: Verifying packet number:%d with rolling id:%d\n", + info->packet_info[i].packet_num, + rolling_id); + chk_footer = true; + ret = cnl_sdw_bra_verify_hdr(rx_buf, offset, &chk_footer, + rolling_id); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Header verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment offset for header response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + if (!block->cmd) { + + /* Remove PDI padding for data */ + cnl_sdw_bra_remove_data_padding(&rx_buf[offset], + &tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Increment offset for consumed data */ + offset = offset + + (info->packet_info[i].num_data_bytes * 2); + + rx_crc_comp = sdw_bus_compute_crc8(&tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Match Data CRC */ + rx_crc_rvd = rx_buf[offset]; + if (rx_crc_comp != rx_crc_rvd) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Data CRC doesn't match for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment destination buffer with copied data */ + tmp_offset = tmp_offset + + info->packet_info[i].num_data_bytes; + + /* Increment offset for CRC */ + offset = offset + SDW_BRA_DATA_CRC_SIZE_PDI; + } + + if (chk_footer) { + ret = cnl_sdw_bra_verify_footer(rx_buf, offset); + if (ret < 0) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Footer verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + } + + /* Increment offset for footer response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + /* Increment rolling id for next packet */ + rolling_id++; + if (rolling_id > 0xF) + rolling_id = 0; + } + + /* + * No need to check for dummy responses from codec + * Assumption made here is that dummy packets are + * added in 1ms buffer only after valid packets. + */ + + /* Copy data to codec buffer in case of read request */ + if (!block->cmd) + memcpy(block->values, tmp_buf, block->num_bytes); + +error: + /* Free up temp buffer allocated in case of read request */ + if (!block->cmd) + kfree(tmp_buf); + + /* Free up buffer allocated in cnl_sdw_bra_data_ops */ + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_bra_data_ops(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) +{ + + struct sdw_bra_block tmp_block; + int i; + int tx_buf_size = 384, rx_buf_size = 1152; + u8 *tx_buf = NULL, *rx_buf = NULL; + int rolling_id = 0, total_bytes = 0, offset = 0, reg_offset = 0; + int dummy_read = 0x0000; + int ret; + + /* + * TODO: Run an algorithm here to identify the buffer size + * for TX and RX buffers + number of dummy packets (read + * or write) to be added for to align buffers. + */ + + info->tx_block_size = tx_buf_size; + info->tx_ptr = tx_buf = kzalloc(tx_buf_size, GFP_KERNEL); + if (!tx_buf) { + ret = -ENOMEM; + goto error; + } + + info->rx_block_size = rx_buf_size; + info->rx_ptr = rx_buf = kzalloc(rx_buf_size, GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto error; + } + + /* Fill valid packets transferred per millisecond buffer */ + info->valid_packets = 2; + info->packet_info = kcalloc(info->valid_packets, + sizeof(*info->packet_info), + GFP_KERNEL); + if (!info->packet_info) { + ret = -ENOMEM; + goto error; + } + + /* + * Below code performs packet preparation for one known + * configuration. + * 1. 2 Valid Read request with 18 bytes each. + * 2. 22 dummy read packets with 18 bytes each. + */ + for (i = 0; i < info->valid_packets; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = block->cmd; /* Read Request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = block->reg_offset + reg_offset; + tmp_block.values = NULL; + reg_offset += tmp_block.num_bytes; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + /* Total Header size: Header + Header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + if (block->cmd) { + /* + * PDI data preparation in case of write request + * Assumption made here is data size from codec will + * be always an even number. + */ + cnl_sdw_bra_prep_data(tx_buf, &tmp_block, + total_bytes, offset); + offset += tmp_block.num_bytes * 2; + + /* Data CRC */ + cnl_sdw_bra_prep_crc(tx_buf, &tmp_block, + total_bytes, offset); + offset += SDW_BRA_DATA_CRC_SIZE_PDI; + } + + total_bytes += tmp_block.num_bytes; + rolling_id++; + + /* Fill packet info data structure */ + info->packet_info[i].packet_num = i + 1; + info->packet_info[i].num_data_bytes = tmp_block.num_bytes; + } + + /* Prepare dummy packets */ + for (i = 0; i < 22; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = 0; /* Read request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = dummy_read++; + tmp_block.values = NULL; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + + /* Total Header size: RD header + RD header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + total_bytes += tmp_block.num_bytes; + rolling_id++; + } + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA PDI VALID TX DATA:", + DUMP_PREFIX_OFFSET, 8, 4, tx_buf, tx_buf_size, false); + + return 0; + +error: + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_xfer_bulk(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct cnl_sdw *sdw = sdw_master_get_platdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + struct cnl_bra_operation *ops = data->bra_data->bra_ops; + struct bra_info info; + int ret; + + /* + * 1. PDI Configuration + * 2. Prepare BRA packets including CRC calculation. + * 3. Configure TX and RX DMA in one shot mode. + * 4. Configure TX and RX Pipeline. + * 5. Run TX and RX DMA. + * 6. Run TX and RX pipelines. + * 7. Wait on completion for RX buffer. + * 8. Match TX and RX buffer packets and check for errors. + */ + + /* Memset bra_info data structure */ + memset(&info, 0x0, sizeof(info)); + + /* Fill master number in bra info data structure */ + info.mstr_num = mstr->nr; + + /* PDI Configuration (ON) */ + cnl_sdw_bra_pdi_config(mstr, true); + + /* Prepare TX buffer */ + ret = cnl_sdw_bra_data_ops(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Request packet(s) creation failed\n"); + goto out; + } + + /* Pipeline Setup (ON) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline setup failed\n"); + goto out; + } + + /* Trigger START host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline start failed\n"); + goto out; + } + + /* Trigger STOP host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline stop failed\n"); + goto out; + } + + /* Pipeline Setup (OFF) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline de-setup failed\n"); + goto out; + } + + /* Verify RX buffer */ + ret = cnl_sdw_bra_check_data(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Response packet(s) incorrect\n"); + goto out; + } + + /* PDI Configuration (OFF) */ + cnl_sdw_bra_pdi_config(mstr, false); + +out: + return ret; +} + +static int cnl_sdw_mon_handover(struct sdw_master *mstr, + bool enable) +{ + int mcp_config; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + if (enable) + mcp_config |= MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT; + else + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + return 0; +} + +static int cnl_sdw_set_ssp_interval(struct sdw_master *mstr, + int ssp_interval, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int sspctrl_offset, check; + + if (bank) + sspctrl_offset = SDW_CNL_MCP_SSPCTRL1; + else + sspctrl_offset = SDW_CNL_MCP_SSPCTRL0; + + cnl_sdw_reg_writel(data->sdw_regs, sspctrl_offset, ssp_interval); + + check = cnl_sdw_reg_readl(data->sdw_regs, sspctrl_offset); + + return 0; +} + +static int cnl_sdw_set_clock_freq(struct sdw_master *mstr, + int cur_clk_div, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int mcp_clockctrl_offset, mcp_clockctrl; + + + /* TODO: Retrieve divider value or get value directly from calling + * function + */ + int divider = (cur_clk_div - 1); + + if (bank) { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL1; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL1); + + } else { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL0; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + } + + mcp_clockctrl |= divider; + + /* Write value here */ + cnl_sdw_reg_writel(data->sdw_regs, mcp_clockctrl_offset, + mcp_clockctrl); + + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + mcp_clockctrl_offset); + return 0; +} + +static int cnl_sdw_set_port_params(struct sdw_master *mstr, + struct sdw_port_params *params, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_config = 0, dpn_config_offset; + + if (bank) + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + else + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_config |= (((params->word_length - 1) & DPN_CONFIG_WL_MASK) << + DPN_CONFIG_WL_SHIFT); + dpn_config |= ((params->port_flow_mode & DPN_CONFIG_PF_MODE_MASK) << + DPN_CONFIG_PF_MODE_SHIFT); + dpn_config |= ((params->port_data_mode & DPN_CONFIG_PD_MODE_MASK) << + DPN_CONFIG_PD_MODE_SHIFT); + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + return 0; +} + +static int cnl_sdw_set_port_transport_params(struct sdw_master *mstr, + struct sdw_transport_params *params, int bank) +{ +struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + int dpn_config = 0, dpn_config_offset; + int dpn_samplectrl_offset; + int dpn_offsetctrl = 0, dpn_offsetctrl_offset; + int dpn_hctrl = 0, dpn_hctrl_offset; + + if (bank) { + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL1; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL1; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL1; + } else { + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL0; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL0; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL0; + } + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + dpn_config |= ((params->blockgroupcontrol & DPN_CONFIG_BGC_MASK) << + DPN_CONFIG_BGC_SHIFT); + dpn_config |= ((params->blockpackingmode & DPN_CONFIG_BPM_MASK) << + DPN_CONFIG_BPM_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_offsetctrl |= ((params->offset1 & DPN_OFFSETCTRL0_OF1_MASK) << + DPN_OFFSETCTRL0_OF1_SHIFT); + + dpn_offsetctrl |= ((params->offset2 & DPN_OFFSETCTRL0_OF2_MASK) << + DPN_OFFSETCTRL0_OF2_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_offsetctrl_offset, params->num, dpn_offsetctrl); + + + dpn_hctrl |= ((params->hstart & DPN_HCTRL_HSTART_MASK) << + DPN_HCTRL_HSTART_SHIFT); + dpn_hctrl |= ((params->hstop & DPN_HCTRL_HSTOP_MASK) << + DPN_HCTRL_HSTOP_SHIFT); + dpn_hctrl |= ((params->lanecontrol & DPN_HCTRL_LCONTROL_MASK) << + DPN_HCTRL_LCONTROL_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_hctrl_offset, params->num, dpn_hctrl); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_samplectrl_offset, params->num, + (params->sample_interval - 1)); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_hctrl_offset, params->num); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_samplectrl_offset, params->num); + + return 0; +} + +static int cnl_sdw_port_activate_ch(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_channelen_offset; + int ch_mask; + + if (bank) + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN1; + else + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN0; + + if (activate_ch->activate) + ch_mask = activate_ch->ch_mask; + else + ch_mask = 0; + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_channelen_offset, activate_ch->num, + ch_mask); + + return 0; +} + +static int cnl_sdw_port_activate_ch_pre(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + if (mstr->link_sync_mask) { + /* Check if this link is synchronized with some other link */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If link is synchronized with other link than + * Need to make sure that command doesnt go till + * ssync is applied + */ + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + return 0; +} +static int cnl_sdw_port_activate_ch_post(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + volatile int sync_update = 0; + int timeout = 10; + + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If waiting for synchronization set the go bit, else return */ + if (!(sync_reg & SDW_CMDSYNC_SET_MASK)) + return 0; + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) == 0) + break; + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + return 0; +} + +static int cnl_sdw_probe(struct sdw_master *mstr, + const struct sdw_master_id *sdw_id) +{ + struct cnl_sdw *sdw; + int ret = 0; + struct cnl_sdw_data *data = mstr->dev.platform_data; + + sdw = devm_kzalloc(&mstr->dev, sizeof(*sdw), GFP_KERNEL); + if (!sdw) { + ret = -ENOMEM; + return ret; + } + dev_info(&mstr->dev, + "Controller Resources ctrl_base = %p shim=%p irq=%d inst_id=%d\n", + data->sdw_regs, data->sdw_shim, data->irq, data->inst_id); + sdw->data.sdw_regs = data->sdw_regs; + sdw->data.sdw_shim = data->sdw_shim; + sdw->data.irq = data->irq; + sdw->data.inst_id = data->inst_id; + sdw->data.alh_base = data->alh_base; + sdw->mstr = mstr; + spin_lock_init(&sdw->ctrl_lock); + sdw_master_set_drvdata(mstr, sdw); + init_completion(&sdw->tx_complete); + mutex_init(&sdw->stream_lock); + ret = sdw_init(sdw, true); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + return ret; + } + ret = devm_request_irq(&mstr->dev, + sdw->data.irq, cnl_sdw_irq_handler, IRQF_SHARED, "SDW", sdw); + if (ret) { + dev_err(&mstr->dev, "unable to grab IRQ %d, disabling device\n", + sdw->data.irq); + sdw_power_down_link(sdw); + return ret; + } + pm_runtime_set_autosuspend_delay(&mstr->dev, 3000); + pm_runtime_use_autosuspend(&mstr->dev); + pm_runtime_enable(&mstr->dev); + pm_runtime_get_sync(&mstr->dev); + /* Resuming the device, since its already ON, function will simply + * return doing nothing + */ + pm_runtime_mark_last_busy(&mstr->dev); + /* Suspending the device after 3 secs, by the time + * all the slave would have enumerated. Initial + * clock freq is 9.6MHz and frame shape is 48X2, so + * there are 200000 frames in second, total there are + * minimum 600000 frames before device suspends. Soundwire + * spec says slave should get attached to bus in 4096 + * error free frames after reset. So this should be + * enough to make sure device gets attached to bus. + */ + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +static int cnl_sdw_remove(struct sdw_master *mstr) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + sdw_power_down_link(sdw); + + return 0; +} + +#ifdef CONFIG_PM +static int cnl_sdw_runtime_suspend(struct device *dev) +{ + int volatile mcp_stat; + int mcp_control; + int timeout = 0; + int ret = 0; + + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + + /* If its suspended return */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) { + dev_info(dev, "Clock is already stopped\n"); + return 0; + } + + /* Write the MCP Control register to prevent block wakeup */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_BLOCKWAKEUP_MASK << + MCP_CONTROL_BLOCKWAKEUP_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + /* Prepare all the slaves for clock stop */ + ret = sdw_master_prep_for_clk_stop(sdw->mstr); + if (ret) + return ret; + + /* Call bus function to broadcast the clock stop now */ + ret = sdw_master_stop_clock(sdw->mstr); + if (ret) + return ret; + /* Wait for clock to be stopped, we are waiting at max 1sec now */ + while (timeout != 10) { + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) + break; + msleep(100); + timeout++; + } + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_err(dev, "Clock Stop failed\n"); + ret = -EBUSY; + goto out; + } + /* Switch control from master IP to glue */ + sdw_switch_to_glue(sdw); + + sdw_power_down_link(sdw); + + /* Enable the wakeup */ + cnl_sdw_reg_writew(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET, + (0x1 << data->inst_id)); +out: + return ret; +} + +static int cnl_sdw_clock_stop_exit(struct cnl_sdw *sdw) +{ + u16 wake_en, wake_sts; + int ret; + struct cnl_sdw_data *data = &sdw->data; + + /* Disable the wake up interrupt */ + wake_en = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET); + wake_en &= ~(0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKEEN_REG_OFFSET, + wake_en); + + /* Clear wake status. This may be set if Slave requested wakeup has + * happened, or may not be if it master requested. But in any case + * this wont make any harm + */ + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + wake_sts |= (0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + ret = sdw_init(sdw, false); + if (ret < 0) { + pr_err("sdw_init fail: %d\n", ret); + return ret; + } + + dev_info(&sdw->mstr->dev, "Exit from clock stop successful\n"); + return 0; + +} + +static int cnl_sdw_runtime_resume(struct device *dev) +{ + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + int volatile mcp_stat; + struct sdw_master *mstr; + int ret = 0; + + mstr = sdw->mstr; + /* + * If already resumed, do nothing. This can happen because of + * wakeup enable. + */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_info(dev, "Clock is already running\n"); + return 0; + } + dev_info(dev, "%s %d Clock is stopped\n", __func__, __LINE__); + + ret = cnl_sdw_clock_stop_exit(sdw); + if (ret) + return ret; + dev_info(&mstr->dev, "Exit from clock stop successful\n"); + + /* Prepare all the slaves to comeout of clock stop */ + ret = sdw_mstr_deprep_after_clk_start(sdw->mstr); + if (ret) + return ret; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cnl_sdw_sleep_resume(struct device *dev) +{ + return cnl_sdw_runtime_resume(dev); +} +static int cnl_sdw_sleep_suspend(struct device *dev) +{ + return cnl_sdw_runtime_suspend(dev); +} +#else +#define cnl_sdw_sleep_suspend NULL +#define cnl_sdw_sleep_resume NULL +#endif /* CONFIG_PM_SLEEP */ +#else +#define cnl_sdw_runtime_suspend NULL +#define cnl_sdw_runtime_resume NULL +#endif /* CONFIG_PM */ + + +static const struct dev_pm_ops cnl_sdw_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cnl_sdw_sleep_suspend, cnl_sdw_sleep_resume) + SET_RUNTIME_PM_OPS(cnl_sdw_runtime_suspend, + cnl_sdw_runtime_resume, NULL) +}; + +static struct sdw_master_ops cnl_sdw_master_ops = { + .xfer_msg_async = cnl_sdw_xfer_msg_async, + .xfer_msg = cnl_sdw_xfer_msg, + .xfer_bulk = cnl_sdw_xfer_bulk, + .monitor_handover = cnl_sdw_mon_handover, + .set_ssp_interval = cnl_sdw_set_ssp_interval, + .set_clock_freq = cnl_sdw_set_clock_freq, + .set_frame_shape = NULL, +}; + +static struct sdw_master_port_ops cnl_sdw_master_port_ops = { + .dpn_set_port_params = cnl_sdw_set_port_params, + .dpn_set_port_transport_params = cnl_sdw_set_port_transport_params, + .dpn_port_activate_ch = cnl_sdw_port_activate_ch, + .dpn_port_activate_ch_pre = cnl_sdw_port_activate_ch_pre, + .dpn_port_activate_ch_post = cnl_sdw_port_activate_ch_post, + .dpn_port_prepare_ch = NULL, + .dpn_port_prepare_ch_pre = NULL, + .dpn_port_prepare_ch_post = NULL, + +}; + +static struct sdw_mstr_driver cnl_sdw_mstr_driver = { + .driver_type = SDW_DRIVER_TYPE_MASTER, + .driver = { + .name = "cnl_sdw_mstr", + .pm = &cnl_sdw_pm_ops, + }, + .probe = cnl_sdw_probe, + .remove = cnl_sdw_remove, + .mstr_ops = &cnl_sdw_master_ops, + .mstr_port_ops = &cnl_sdw_master_port_ops, +}; + +static int __init cnl_sdw_init(void) +{ + return sdw_mstr_driver_register(&cnl_sdw_mstr_driver); +} +module_init(cnl_sdw_init); + +static void cnl_sdw_exit(void) +{ + sdw_mstr_driver_unregister(&cnl_sdw_mstr_driver); +} +module_exit(cnl_sdw_exit); + +MODULE_DESCRIPTION("Intel SoundWire Master Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hardik Shah "); diff --git a/drivers/sdw/sdw_cnl_priv.h b/drivers/sdw/sdw_cnl_priv.h new file mode 100644 index 000000000000..b7f44e1f9d6f --- /dev/null +++ b/drivers/sdw/sdw_cnl_priv.h @@ -0,0 +1,385 @@ +/* + * sdw_cnl_priv.h - Private definition for intel master controller driver. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_CNL_PRIV_H +#define _LINUX_SDW_CNL_PRIV_H + +#define SDW_CNL_PM_TIMEOUT 3000 /* ms */ +#define SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT 32 +#define SDW_CNL_SLAVE_STATUS_BITS 4 +#define SDW_CNL_CMD_WORD_LEN 4 +#define SDW_CNL_DEFAULT_SSP_INTERVAL 0x18 +#define SDW_CNL_DEFAULT_CLK_DIVIDER 0 + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x257F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x48 +#else +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x176F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x30 +#endif + +#define SDW_CNL_PORT_REG_OFFSET 0x80 +#define CNL_SDW_SCP_ADDR_REGS 0x2 +#define SDW_CNL_PCM_PDI_NUM_OFFSET 0x2 +#define SDW_CNL_PDM_PDI_NUM_OFFSET 0x6 + +#define SDW_CNL_CTMCTL_REG_OFFSET 0x60 +#define SDW_CNL_IOCTL_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHC_REG_OFFSET 0x60 +#define SDW_CNL_PDMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHM_REG_OFFSET 0x60 +#define SDW_CNL_SNDWWAKEEN_REG_OFFSET 0x190 +#define SDW_CNL_SNDWWAKESTS_REG_OFFSET 0x192 + + +#define SDW_CNL_MCP_CONFIG 0x0 +#define MCP_CONFIG_BRELENABLE_MASK 0x1 +#define MCP_CONFIG_BRELENABLE_SHIFT 0x6 +#define MCP_CONFIG_MAXCMDRETRY_SHIFT 24 +#define MCP_CONFIG_MAXCMDRETRY_MASK 0xF +#define MCP_CONFIG_MAXPREQDELAY_SHIFT 16 +#define MCP_CONFIG_MAXPREQDELAY_MASK 0x1F +#define MCP_CONFIG_MMMODEEN_SHIFT 0x7 +#define MCP_CONFIG_MMMODEEN_MASK 0x1 +#define MCP_CONFIG_SNIFFEREN_SHIFT 0x5 +#define MCP_CONFIG_SNIFFEREN_MASK 0x1 +#define MCP_CONFIG_SSPMODE_SHIFT 0x4 +#define MCP_CONFIG_SSPMODE_MASK 0x1 +#define MCP_CONFIG_CMDMODE_SHIFT 0x3 +#define MCP_CONFIG_CMDMODE_MASK 0x1 + +#define MCP_CONFIG_OPERATIONMODE_MASK 0x7 +#define MCP_CONFIG_OPERATIONMODE_SHIFT 0x0 +#define MCP_CONFIG_OPERATIONMODE_NORMAL 0x0 + +#define SDW_CNL_MCP_CONTROL 0x4 +#define MCP_CONTROL_RESETDELAY_SHIFT 0x8 +#define MCP_CONTROL_CMDRST_SHIFT 0x7 +#define MCP_CONTROL_CMDRST_MASK 0x1 +#define MCP_CONTROL_SOFTRST_SHIFT 0x6 +#define MCP_CONTROL_SOFTCTRLBUSRST_SHIFT 0x5 +#define MCP_CONTROL_HARDCTRLBUSRST_MASK 0x1 +#define MCP_CONTROL_HARDCTRLBUSRST_SHIFT 0x4 +#define MCP_CONTROL_CLOCKPAUSEREQ_SHIFT 0x3 +#define MCP_CONTROL_CLOCKSTOPCLEAR_SHIFT 0x2 +#define MCP_CONTROL_CLOCKSTOPCLEAR_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_SHIFT 0x1 +#define MCP_CONTROL_BLOCKWAKEUP_SHIFT 0x0 +#define MCP_CONTROL_BLOCKWAKEUP_MASK 0x1 + + +#define MCP_SLAVEINTMASK0_MASK 0xFFFFFFFF +#define MCP_SLAVEINTMASK1_MASK 0x0000FFFF + +#define SDW_CNL_MCP_CMDCTRL 0x8 +#define SDW_CNL_MCP_SSPSTAT 0xC +#define SDW_CNL_MCP_FRAMESHAPE 0x10 +#define SDW_CNL_MCP_FRAMESHAPEINIT 0x14 +#define SDW_CNL_MCP_CONFIGUPDATE 0x18 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT 0x0 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_MASK 0x1 + +#define SDW_CNL_MCP_PHYCTRL 0x1C +#define SDW_CNL_MCP_SSPCTRL0 0x20 +#define SDW_CNL_MCP_SSPCTRL1 0x28 +#define SDW_CNL_MCP_CLOCKCTRL0 0x30 +#define SDW_CNL_MCP_CLOCKCTRL1 0x38 +#define SDW_CNL_MCP_STAT 0x40 +#define SDW_CNL_MCP_INTSTAT 0x44 +#define MCP_INTSTAT_IRQ_SHIFT 31 +#define MCP_INTSTAT_IRQ_MASK 1 +#define MCP_INTSTAT_WAKEUP_SHIFT 16 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT 12 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK 0xF +#define MCP_INTSTAT_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTSTAT_SLAVEATTACHED_SHIFT 13 +#define MCP_INTSTAT_SLAVEALERT_SHIFT 14 +#define MCP_INTSTAT_SLAVERESERVED_SHIFT 15 + +#define MCP_INTSTAT_DPPDIINT_SHIFT 11 +#define MCP_INTSTAT_DPPDIINTMASK 0x1 +#define MCP_INTSTAT_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTSTAT_CONTROLBUSCLASH_MASK 0x1 +#define MCP_INTSTAT_DATABUSCLASH_SHIFT 9 +#define MCP_INTSTAT_DATABUSCLASH_MASK 0x1 +#define MCP_INTSTAT_CMDERR_SHIFT 7 +#define MCP_INTSTAT_CMDERR_MASK 0x1 +#define MCP_INTSTAT_TXE_SHIFT 1 +#define MCP_INTSTAT_TXE_MASK 0x1 +#define MCP_INTSTAT_RXWL_SHIFT 2 +#define MCP_INTSTAT_RXWL_MASK 1 + +#define SDW_CNL_MCP_INTMASK 0x48 +#define MCP_INTMASK_IRQEN_SHIFT 31 +#define MCP_INTMASK_IRQEN_MASK 0x1 +#define MCP_INTMASK_WAKEUP_SHIFT 16 +#define MCP_INTMASK_WAKEUP_MASK 0x1 +#define MCP_INTMASK_SLAVERESERVED_SHIFT 15 +#define MCP_INTMASK_SLAVERESERVED_MASK 0x1 +#define MCP_INTMASK_SLAVEALERT_SHIFT 14 +#define MCP_INTMASK_SLAVEALERT_MASK 0x1 +#define MCP_INTMASK_SLAVEATTACHED_SHIFT 13 +#define MCP_INTMASK_SLAVEATTACHED_MASK 0x1 +#define MCP_INTMASK_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTMASK_SLAVENOTATTACHED_MASK 0x1 +#define MCP_INTMASK_DPPDIINT_SHIFT 11 +#define MCP_INTMASK_DPPDIINT_MASK 0x1 +#define MCP_INTMASK_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTMASK_CONTROLBUSCLASH_MASK 1 +#define MCP_INTMASK_DATABUSCLASH_SHIFT 9 +#define MCP_INTMASK_DATABUSCLASH_MASK 1 +#define MCP_INTMASK_CMDERR_SHIFT 7 +#define MCP_INTMASK_CMDERR_MASK 0x1 +#define MCP_INTMASK_TXE_SHIFT 1 +#define MCP_INTMASK_TXE_MASK 0x1 +#define MCP_INTMASK_RXWL_SHIFT 2 +#define MCP_INTMASK_RXWL_MASK 0x1 + +#define SDW_CNL_MCP_INTSET 0x4C +#define SDW_CNL_MCP_STAT 0x40 +#define MCP_STAT_ACTIVE_BANK_MASK 0x1 +#define MCP_STAT_ACTIVE_BANK_SHIT 20 +#define MCP_STAT_CLOCKSTOPPED_MASK 0x1 +#define MCP_STAT_CLOCKSTOPPED_SHIFT 16 + +#define SDW_CNL_MCP_SLAVESTAT 0x50 +#define MCP_SLAVESTAT_MASK 0x3 + +#define SDW_CNL_MCP_SLAVEINTSTAT0 0x54 +#define MCP_SLAVEINTSTAT_NOT_PRESENT_MASK 0x1 +#define MCP_SLAVEINTSTAT_ATTACHED_MASK 0x2 +#define MCP_SLAVEINTSTAT_ALERT_MASK 0x4 +#define MCP_SLAVEINTSTAT_RESERVED_MASK 0x8 + +#define SDW_CNL_MCP_SLAVEINTSTAT1 0x58 +#define SDW_CNL_MCP_SLAVEINTMASK0 0x5C +#define SDW_CNL_MCP_SLAVEINTMASK1 0x60 +#define SDW_CNL_MCP_PORTINTSTAT 0x64 +#define SDW_CNL_MCP_PDISTAT 0x6C + +#define SDW_CNL_MCP_FIFOLEVEL 0x78 +#define SDW_CNL_MCP_FIFOSTAT 0x7C +#define MCP_RX_FIFO_AVAIL_MASK 0x3F +#define SDW_CNL_MCP_COMMAND_BASE 0x80 +#define SDW_CNL_MCP_RESPONSE_BASE 0x80 +#define SDW_CNL_MCP_COMMAND_LENGTH 0x20 + +#define MCP_COMMAND_SSP_TAG_MASK 0x1 +#define MCP_COMMAND_SSP_TAG_SHIFT 31 +#define MCP_COMMAND_COMMAND_MASK 0x7 +#define MCP_COMMAND_COMMAND_SHIFT 28 +#define MCP_COMMAND_DEV_ADDR_MASK 0xF +#define MCP_COMMAND_DEV_ADDR_SHIFT 24 +#define MCP_COMMAND_REG_ADDR_H_MASK 0x7 +#define MCP_COMMAND_REG_ADDR_H_SHIFT 16 +#define MCP_COMMAND_REG_ADDR_L_MASK 0xFF +#define MCP_COMMAND_REG_ADDR_L_SHIFT 8 +#define MCP_COMMAND_REG_DATA_MASK 0xFF +#define MCP_COMMAND_REG_DATA_SHIFT 0x0 + +#define MCP_RESPONSE_RDATA_MASK 0xFF +#define MCP_RESPONSE_RDATA_SHIFT 8 +#define MCP_RESPONSE_ACK_MASK 0x1 +#define MCP_RESPONSE_ACK_SHIFT 0 +#define MCP_RESPONSE_NACK_MASK 0x2 + +#define SDW_CNL_DPN_CONFIG0 0x100 +#define SDW_CNL_DPN_CHANNELEN0 0x104 +#define SDW_CNL_DPN_SAMPLECTRL0 0x108 +#define SDW_CNL_DPN_OFFSETCTRL0 0x10C +#define SDW_CNL_DPN_HCTRL0 0x110 +#define SDW_CNL_DPN_ASYNCCTRL0 0x114 + +#define SDW_CNL_DPN_CONFIG1 0x118 +#define SDW_CNL_DPN_CHANNELEN1 0x11C +#define SDW_CNL_DPN_SAMPLECTRL1 0x120 +#define SDW_CNL_DPN_OFFSETCTRL1 0x124 +#define SDW_CNL_DPN_HCTRL1 0x128 + +#define SDW_CNL_PORTCTRL 0x130 +#define PORTCTRL_PORT_DIRECTION_SHIFT 0x7 +#define PORTCTRL_PORT_DIRECTION_MASK 0x1 +#define PORTCTRL_BANK_INVERT_SHIFT 0x8 +#define PORTCTRL_BANK_INVERT_MASK 0x1 + +#define SDW_CNL_PDINCONFIG0 0x1100 +#define SDW_CNL_PDINCONFIG1 0x1108 +#define PDINCONFIG_CHANNEL_MASK_SHIFT 0x8 +#define PDINCONFIG_CHANNEL_MASK_MASK 0xFF +#define PDINCONFIG_PORT_NUMBER_SHIFT 0x0 +#define PDINCONFIG_PORT_NUMBER_MASK 0x1F +#define PDINCONFIG_PORT_SOFT_RESET_SHIFT 0x18 +#define PDINCONFIG_PORT_SOFT_RESET 0x1F + +#define DPN_CONFIG_WL_SHIFT 0x8 +#define DPN_CONFIG_WL_MASK 0x1F +#define DPN_CONFIG_PF_MODE_SHIFT 0x0 +#define DPN_CONFIG_PF_MODE_MASK 0x3 +#define DPN_CONFIG_PD_MODE_SHIFT 0x2 +#define DPN_CONFIG_PD_MODE_MASK 0x3 +#define DPN_CONFIG_BPM_MASK 0x1 +#define DPN_CONFIG_BPM_SHIFT 0x12 +#define DPN_CONFIG_BGC_MASK 0x3 +#define DPN_CONFIG_BGC_SHIFT 0x10 + +#define DPN_SAMPLECTRL_SI_MASK 0xFFFF +#define DPN_SAMPLECTRL_SI_SHIFT 0x0 + +#define DPN_OFFSETCTRL0_OF1_MASK 0xFF +#define DPN_OFFSETCTRL0_OF1_SHIFT 0x0 +#define DPN_OFFSETCTRL0_OF2_MASK 0xFF +#define DPN_OFFSETCTRL0_OF2_SHIFT 0x8 + +#define DPN_HCTRL_HSTOP_MASK 0xF +#define DPN_HCTRL_HSTOP_SHIFT 0x0 +#define DPN_HCTRL_HSTART_MASK 0xF +#define DPN_HCTRL_HSTART_SHIFT 0x4 +#define DPN_HCTRL_LCONTROL_MASK 0x7 +#define DPN_HCTRL_LCONTROL_SHIFT 0x8 + +/* SoundWire Shim registers */ +#define SDW_CNL_LCAP 0x0 +#define SDW_CNL_LCTL 0x4 +#define CNL_LCTL_CPA_SHIFT 8 +#define CNL_LCTL_SPA_SHIFT 0 +#define CNL_LCTL_CPA_MASK 0x1 +#define CNL_LCTL_SPA_MASK 0x1 + +#define SDW_CMDSYNC_SET_MASK 0xF0000 +#define SDW_CNL_IPPTR 0x8 +#define SDW_CNL_SYNC 0xC +#define CNL_SYNC_CMDSYNC_MASK 0x1 +#define CNL_SYNC_CMDSYNC_SHIFT 16 +#define CNL_SYNC_SYNCGO_MASK 0x1 +#define CNL_SYNC_SYNCGO_SHIFT 0x18 +#define CNL_SYNC_SYNCPRD_MASK 0x7FFF +#define CNL_SYNC_SYNCPRD_SHIFT 0x0 +#define CNL_SYNC_SYNCCPU_MASK 0x8000 +#define CNL_SYNC_SYNCCPU_SHIFT 0xF + +#define SDW_CNL_CTLSCAP 0x10 +#define SDW_CNL_CTLS0CM 0x12 +#define SDW_CNL_CTLS1CM 0x14 +#define SDW_CNL_CTLS2CM 0x16 +#define SDW_CNL_CTLS3CM 0x18 + +#define SDW_CNL_PCMSCAP 0x20 +#define CNL_PCMSCAP_BSS_SHIFT 8 +#define CNL_PCMSCAP_BSS_MASK 0x1F +#define CNL_PCMSCAP_OSS_SHIFT 4 +#define CNL_PCMSCAP_OSS_MASK 0xF +#define CNL_PCMSCAP_ISS_SHIFT 0 +#define CNL_PCMSCAP_ISS_MASK 0xF + +#define SDW_CNL_PCMSCHM 0x22 +#define CNL_PCMSYCM_DIR_SHIFT 15 +#define CNL_PCMSYCM_DIR_MASK 0x1 +#define CNL_PCMSYCM_STREAM_SHIFT 8 +#define CNL_PCMSYCM_STREAM_MASK 0x3F +#define CNL_PCMSYCM_HCHAN_SHIFT 4 +#define CNL_PCMSYCM_HCHAN_MASK 0xF +#define CNL_PCMSYCM_LCHAN_SHIFT 0 +#define CNL_PCMSYCM_LCHAN_MASK 0xF + +#define SDW_CNL_PCMSCHC 0x42 + +#define SDW_CNL_PDMSCAP 0x62 +#define CNL_PDMSCAP_BSS_SHIFT 8 +#define CNL_PDMSCAP_BSS_MASK 0x1F +#define CNL_PDMSCAP_OSS_SHIFT 4 +#define CNL_PDMSCAP_OSS_MASK 0xF +#define CNL_PDMSCAP_ISS_SHIFT 0 +#define CNL_PDMSCAP_ISS_MASK 0xF +#define CNL_PDMSCAP_CPSS_SHIFT 13 +#define CNL_PDMSCAP_CPSS_MASK 0x7 +#define SDW_CNL_PDMSCM + +#define SDW_CNL_IOCTL 0x6C +#define CNL_IOCTL_MIF_SHIFT 0x0 +#define CNL_IOCTL_MIF_MASK 0x1 +#define CNL_IOCTL_CO_SHIFT 0x1 +#define CNL_IOCTL_CO_MASK 0x1 +#define CNL_IOCTL_COE_SHIFT 0x2 +#define CNL_IOCTL_COE_MASK 0x1 +#define CNL_IOCTL_DO_SHIFT 0x3 +#define CNL_IOCTL_DO_MASK 0x1 +#define CNL_IOCTL_DOE_SHIFT 0x4 +#define CNL_IOCTL_DOE_MASK 0x1 +#define CNL_IOCTL_BKE_SHIFT 0x5 +#define CNL_IOCTL_BKE_MASK 0x1 +#define CNL_IOCTL_WPDD_SHIFT 0x6 +#define CNL_IOCTL_WPDD_MASK 0x1 +#define CNL_IOCTL_CIBD_SHIFT 0x8 +#define CNL_IOCTL_CIBD_MASK 0x1 +#define CNL_IOCTL_DIBD_SHIFT 0x9 +#define CNL_IOCTL_DIBD_MASK 0x1 + +#define SDW_CNL_CTMCTL_OFFSET 0x60 +#define SDW_CNL_CTMCTL 0x6E +#define CNL_CTMCTL_DACTQE_SHIFT 0x0 +#define CNL_CTMCTL_DACTQE_MASK 0x1 +#define CNL_CTMCTL_DODS_SHIFT 0x1 +#define CNL_CTMCTL_DODS_MASK 0x1 +#define CNL_CTMCTL_DOAIS_SHIFT 0x3 +#define CNL_CTMCTL_DOAIS_MASK 0x3 + +#define ALH_CNL_STRMZCFG_BASE 0x4 +#define ALH_CNL_STRMZCFG_OFFSET 0x4 +#define CNL_STRMZCFG_DMAT_SHIFT 0x0 +#define CNL_STRMZCFG_DMAT_MASK 0xFF +#define CNL_STRMZCFG_DMAT_VAL 0x3 +#define CNL_STRMZCFG_CHAN_SHIFT 16 +#define CNL_STRMZCFG_CHAN_MASK 0xF + +#define SDW_BRA_HEADER_SIZE_PDI 12 /* In bytes */ +#define SDW_BRA_HEADER_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_DATA_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_FOOTER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_PADDING_SZ_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_TOTAL_SZ_PDI 16 /* In bytes */ + +#define SDW_BRA_SOP_EOP_PDI_STRT_VALUE 0x4 +#define SDW_BRA_SOP_EOP_PDI_END_VALUE 0x2 +#define SDW_BRA_SOP_EOP_PDI_MASK 0x1F +#define SDW_BRA_SOP_EOP_PDI_SHIFT 5 + +#define SDW_BRA_STRM_ID_BLK_OUT 3 +#define SDW_BRA_STRM_ID_BLK_IN 4 + +#define SDW_BRA_PDI_TX_ID 0 +#define SDW_BRA_PDI_RX_ID 1 + +#define SDW_BRA_SOFT_RESET 0x1 +#define SDW_BRA_BULK_ENABLE 1 +#define SDW_BRA_BLK_EN_MASK 0xFFFEFFFF +#define SDW_BRA_BLK_EN_SHIFT 16 + +#define SDW_BRA_ROLLINGID_PDI_INDX 3 +#define SDW_BRA_ROLLINGID_PDI_MASK 0xF +#define SDW_BRA_ROLLINGID_PDI_SHIFT 0 + +#define SDW_PCM_STRM_START_INDEX 0x2 + +#endif /* _LINUX_SDW_CNL_H */ diff --git a/drivers/sdw/sdw_maxim.c b/drivers/sdw/sdw_maxim.c new file mode 100644 index 000000000000..7f2844a7c9c1 --- /dev/null +++ b/drivers/sdw/sdw_maxim.c @@ -0,0 +1,146 @@ +/* + * sdw_maxim.c -- Maxim SoundWire slave device driver. Dummy driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + + +static int maxim_register_sdw_capabilties(struct sdw_slave *sdw, + const struct sdw_slave_id *sdw_id) +{ + struct sdw_slv_capabilities cap; + struct sdw_slv_dpn_capabilities *dpn_cap = NULL; + struct port_audio_mode_properties *prop = NULL; + int i, j; + + cap.wake_up_unavailable = true; + cap.test_mode_supported = false; + cap.clock_stop1_mode_supported = false; + cap.simplified_clock_stop_prepare = false; + cap.highphy_capable = true; + cap.paging_supported = false; + cap.bank_delay_support = false; + cap.port_15_read_behavior = 0; + cap.sdw_dp0_supported = false; + cap.num_of_sdw_ports = 3; + cap.sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap.num_of_sdw_ports), GFP_KERNEL); + for (i = 0; i < cap.num_of_sdw_ports; i++) { + dpn_cap = &cap.sdw_dpn_cap[i]; + if (i == 0 || i == 2) + dpn_cap->port_direction = SDW_PORT_SOURCE; + else + dpn_cap->port_direction = SDW_PORT_SINK; + + dpn_cap->port_number = i+1; + dpn_cap->max_word_length = 24; + dpn_cap->min_word_length = 16; + dpn_cap->num_word_length = 0; + dpn_cap->word_length_buffer = NULL; + dpn_cap->dpn_type = SDW_FULL_DP; + dpn_cap->dpn_grouping = SDW_BLOCKGROUPCOUNT_1; + dpn_cap->prepare_ch = SDW_CP_SM; + dpn_cap->imp_def_intr_mask = 0x0; + dpn_cap->min_ch_num = 1; + dpn_cap->max_ch_num = 2; + dpn_cap->num_ch_supported = 0; + dpn_cap->ch_supported = NULL; + dpn_cap->port_flow_mode_mask = SDW_PORT_FLOW_MODE_ISOCHRONOUS; + dpn_cap->block_packing_mode_mask = + SDW_PORT_BLK_PKG_MODE_BLK_PER_PORT_MASK | + SDW_PORT_BLK_PKG_MODE_BLK_PER_CH_MASK; + dpn_cap->port_encoding_type_mask = + SDW_PORT_ENCODING_TYPE_TWOS_CMPLMNT | + SDW_PORT_ENCODING_TYPE_SIGN_MAGNITUDE | + SDW_PORT_ENCODING_TYPE_IEEE_32_FLOAT; + dpn_cap->num_audio_modes = 1; + + dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + prop->max_frequency = 16000000; + prop->min_frequency = 1000000; + prop->num_freq_configs = 0; + prop->freq_supported = NULL; + prop->glitchless_transitions_mask = 0x1; + prop->max_sampling_frequency = 192000; + prop->min_sampling_frequency = 8000; + prop->num_sampling_freq_configs = 0; + prop->sampling_freq_config = NULL; + prop->ch_prepare_behavior = SDW_CH_PREP_ANY_TIME; + } + } + return sdw_register_slave_capabilities(sdw, &cap); + +} +static int maxim_sdw_probe(struct sdw_slave *sdw, + const struct sdw_slave_id *sdw_id) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave Registered %lx\n", sdw_id->driver_data); + return maxim_register_sdw_capabilties(sdw, sdw_id); +} + +static int maxim_sdw_remove(struct sdw_slave *sdw) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave un-Registered\n"); + return 0; +} + +static const struct sdw_slave_id maxim_id[] = { + {"03:01:9f:79:00:00", 0}, + {"09:01:9f:79:00:00", 1}, + {"04:01:9f:79:00:00", 2}, + {"0a:01:9f:79:00:00", 3}, + {"04:01:9f:79:00:00", 4}, + {"0a:01:9f:79:00:00", 5}, + {"05:01:9f:79:00:00", 6}, + {"06:01:9f:79:00:00", 7}, + {"05:01:9f:79:00:00", 8}, + {"00:01:9f:79:00:00", 9}, + {"06:01:9f:79:00:00", 10}, + {"07:01:9f:79:00:00", 11}, + {"00:01:9f:79:00:00", 12}, + {"06:01:9f:79:00:00", 13}, + {"01:01:9f:79:00:00", 14}, + {"07:01:9f:79:00:00", 15}, + {"08:01:9f:79:00:00", 16}, + {"01:01:9f:79:00:00", 17}, + {"07:01:9f:79:00:00", 18}, + {"02:01:9f:79:00:00", 19}, + {"08:01:9f:79:00:00", 20}, + {"09:01:9f:79:00:00", 21}, + {"02:01:9f:79:00:00", 22}, + {"08:01:9f:79:00:00", 23}, + {"03:01:9f:79:00:00", 24}, + {"09:01:9f:79:00:00", 25}, + {"0a:01:9f:79:00:00", 26}, + {}, +}; + +MODULE_DEVICE_TABLE(sdw, maxim_id); + +static struct sdw_slave_driver maxim_sdw_driver = { + .driver_type = SDW_DRIVER_TYPE_SLAVE, + .driver = { + .name = "maxim", + }, + .probe = maxim_sdw_probe, + .remove = maxim_sdw_remove, + .id_table = maxim_id, +}; + +module_sdw_slave_driver(maxim_sdw_driver); + +MODULE_DESCRIPTION("SoundWire Maxim Slave Driver"); +MODULE_AUTHOR("Hardik Shah, "); +MODULE_LICENSE("GPL"); diff --git a/drivers/sdw/sdw_priv.h b/drivers/sdw/sdw_priv.h new file mode 100644 index 000000000000..0a6d57d7a075 --- /dev/null +++ b/drivers/sdw/sdw_priv.h @@ -0,0 +1,280 @@ +/* + * sdw_priv.h - Private definition for sdw bus interface. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_PRIV_H +#define _LINUX_SDW_PRIV_H + +#include /* For kthread */ +#include + +#define SDW_MAX_STREAM_TAG_KEY_SIZE 80 +#define SDW_NUM_STREAM_TAGS 100 +#define MAX_NUM_ROWS 23 /* As per SDW Spec */ +#define MAX_NUM_COLS 8/* As per SDW Spec */ +#define MAX_NUM_ROW_COLS (MAX_NUM_ROWS * MAX_NUM_COLS) + +#define SDW_STATE_INIT_STREAM_TAG 0x1 +#define SDW_STATE_ALLOC_STREAM 0x2 +#define SDW_STATE_CONFIG_STREAM 0x3 +#define SDW_STATE_PREPARE_STREAM 0x4 +#define SDW_STATE_ENABLE_STREAM 0x5 +#define SDW_STATE_DISABLE_STREAM 0x6 +#define SDW_STATE_UNPREPARE_STREAM 0x7 +#define SDW_STATE_RELEASE_STREAM 0x8 +#define SDW_STATE_FREE_STREAM 0x9 +#define SDW_STATE_FREE_STREAM_TAG 0xA +#define SDW_STATE_ONLY_XPORT_STREAM 0xB + +#define SDW_STATE_INIT_RT 0x1 +#define SDW_STATE_CONFIG_RT 0x2 +#define SDW_STATE_PREPARE_RT 0x3 +#define SDW_STATE_ENABLE_RT 0x4 +#define SDW_STATE_DISABLE_RT 0x5 +#define SDW_STATE_UNPREPARE_RT 0x6 +#define SDW_STATE_RELEASE_RT 0x7 + +#define SDW_SLAVE_BDCAST_ADDR 15 + +struct sdw_runtime; +/* Defined in sdw.c, used by multiple files of module */ +extern struct sdw_core sdw_core; + +enum sdw_port_state { + SDW_PORT_STATE_CH_READY, + SDW_PORT_STATE_CH_STOPPED, + SDW_PORT_STATE_CH_PREPARING, + SDW_PORT_STATE_CH_DEPREPARING, +}; + +enum sdw_stream_state { + SDW_STREAM_ALLOCATED, + SDW_STREAM_FREE, + SDW_STREAM_ACTIVE, + SDW_STREAM_INACTIVE, +}; + +enum sdw_clk_state { + SDW_CLK_STATE_OFF = 0, + SDW_CLK_STATE_ON = 1, +}; + +enum sdw_update_bs_state { + SDW_UPDATE_BS_PRE, + SDW_UPDATE_BS_BNKSWTCH, + SDW_UPDATE_BS_POST, + SDW_UPDATE_BS_BNKSWTCH_WAIT, + SDW_UPDATE_BS_DIS_CHN, +}; + +enum sdw_port_en_state { + SDW_PORT_STATE_PREPARE, + SDW_PORT_STATE_ENABLE, + SDW_PORT_STATE_DISABLE, + SDW_PORT_STATE_UNPREPARE, +}; + +struct port_chn_en_state { + bool is_activate; + bool is_bank_sw; +}; + +struct temp_elements { + int rate; + int full_bw; + int payload_bw; + int hwidth; +}; + +struct sdw_stream_tag { + int stream_tag; + struct mutex stream_lock; + int ref_count; + enum sdw_stream_state stream_state; + char key[SDW_MAX_STREAM_TAG_KEY_SIZE]; + struct sdw_runtime *sdw_rt; +}; + +struct sdw_stream_params { + unsigned int rate; + unsigned int channel_count; + unsigned int bps; +}; + +struct sdw_port_runtime { + int port_num; + enum sdw_port_state port_state; + int channel_mask; + /* Frame params and stream params are per port based + * Single stream of audio may be split + * into mutliple port each handling + * subset of channels, channels should + * be contiguous in subset + */ + struct sdw_transport_params transport_params; + struct sdw_port_params port_params; + struct list_head port_node; +}; + +struct sdw_slave_runtime { + /* Simplified port or full port, there cannot be both types of + * data port for single stream, so data structure is kept per + * slave runtime, not per port + */ + enum sdw_dpn_type type; + struct sdw_slave *slave; + int direction; + /* Stream may be split into multiple slaves, so this is for + * this particular slave + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + struct list_head slave_sdw_node; + struct list_head slave_node; + int rt_state; /* State of runtime structure */ + +}; + + +struct sdw_mstr_runtime { + struct sdw_master *mstr; + int direction; + /* Stream may be split between multiple masters so this + * is for invidual master, if stream is split into multiple + * streams. For calculating the bandwidth on the particular bus + * stream params of master is taken into account. + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + /* Two nodes are required because BW calculation is based on master + * while stream enabling is based on stream_tag, where multiple + * masters may be involved + */ + struct list_head mstr_sdw_node; /* This is to add mstr_rt in sdw_rt */ + struct list_head mstr_node; /* This is to add mstr_rt in mstr */ + + struct list_head slv_rt_list; + /* Individual stream bandwidth on given master */ + unsigned int stream_bw; + /* State of runtime structure */ + int rt_state; + int hstart; + int hstop; + int block_offset; + int sub_block_offset; +}; + +struct sdw_runtime { + int tx_ref_count; + int rx_ref_count; + /* This is stream params for whole stream + * but stream may be split between two + * masters, or two slaves. + */ + struct sdw_stream_params stream_params; + struct list_head slv_rt_list; + struct list_head mstr_rt_list; + enum sdw_stream_type type; + int stream_state; + int xport_state; + +}; + +struct sdw_slv_status { + struct list_head node; + enum sdw_slave_status status[SOUNDWIRE_MAX_DEVICES+1]; +}; + +/** Bus structure which handles bus related information */ +struct sdw_bus { + struct list_head bus_node; + struct sdw_master *mstr; + unsigned int port_grp_mask[2]; + unsigned int slave_grp_mask[2]; + unsigned int clk_state; + unsigned int active_bank; + unsigned int clk_freq; + unsigned int clk_div; + /* Bus total Bandwidth. Initialize and reset to zero */ + unsigned int bandwidth; + unsigned int stream_interval; /* Stream Interval */ + unsigned int system_interval; /* Bus System Interval */ + unsigned int frame_freq; + unsigned int col; + unsigned int row; + struct task_struct *status_thread; + struct kthread_worker kworker; + struct kthread_work kwork; + struct list_head status_list; + spinlock_t spinlock; + struct sdw_async_xfer_data async_data; +}; + +/** Holds supported Row-Column combination related information */ +struct sdw_rowcol { + int row; + int col; + int control_bits; + int data_bits; +}; + +/** + * Global soundwire structure. It handles all the streams spawned + * across masters and has list of bus structure per every master + * registered + */ +struct sdw_core { + struct sdw_stream_tag stream_tags[SDW_NUM_STREAM_TAGS]; + struct sdw_rowcol rowcolcomb[MAX_NUM_ROW_COLS]; + struct list_head bus_list; + struct mutex core_lock; + struct idr idr; + int first_dynamic_bus_num; +}; + +/* Structure holding mapping of numbers to cols */ +struct sdw_num_to_col { + int num; + int col; +}; + +/* Structure holding mapping of numbers to rows */ +struct sdw_num_to_row { + int num; + int row; +}; + +int sdw_slave_port_config_port_params(struct sdw_slave_runtime *slv_rt); +int sdw_slave_port_prepare(struct sdw_slave_runtime, bool prepare); +int sdw_bus_bw_init(void); +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs); +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable); +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare); +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable); +int sdw_chn_enable(void); +void sdw_unlock_mstr(struct sdw_master *mstr); +int sdw_trylock_mstr(struct sdw_master *mstr); +void sdw_lock_mstr(struct sdw_master *mstr); +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data); + +#endif /* _LINUX_SDW_PRIV_H */ diff --git a/drivers/sdw/sdw_utils.c b/drivers/sdw/sdw_utils.c new file mode 100644 index 000000000000..724323d01993 --- /dev/null +++ b/drivers/sdw/sdw_utils.c @@ -0,0 +1,49 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include + + + +/** + * sdw_bus_compute_crc8: SoundWire bus helper function to compute crc8. + * This API uses crc8 helper functions internally. + * + * @values: Data buffer. + * @num_bytes: Number of bytes. + */ +u8 sdw_bus_compute_crc8(u8 *values, u8 num_bytes) +{ + u8 table[256]; + u8 poly = 0x4D; /* polynomial = x^8 + x^6 + x^3 + x^2 + 1 */ + u8 crc = CRC8_INIT_VALUE; /* Initialize 8 bit to 11111111 */ + + /* Populate MSB */ + crc8_populate_msb(table, poly); + + /* CRC computation */ + crc = crc8(table, values, num_bytes, crc); + + return crc; +} +EXPORT_SYMBOL(sdw_bus_compute_crc8); diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 89f4cf507be6..f2d8c3c53ea4 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -20,8 +20,8 @@ #define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8 #define SOCINFO_MAJOR GENMASK(31, 24) -#define SOCINFO_MINOR GENMASK(23, 16) -#define SOCINFO_PACK GENMASK(15, 8) +#define SOCINFO_PACK GENMASK(23, 16) +#define SOCINFO_MINOR GENMASK(15, 8) #define SOCINFO_MISC GENMASK(7, 0) static const struct meson_gx_soc_id { diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c index fe96a8b956fb..f7ed1187518b 100644 --- a/drivers/soc/bcm/raspberrypi-power.c +++ b/drivers/soc/bcm/raspberrypi-power.c @@ -45,7 +45,7 @@ struct rpi_power_domains { struct rpi_power_domain_packet { u32 domain; u32 on; -} __packet; +}; /* * Asks the firmware to enable or disable power on a specific power diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 18eefc3f1abe..0c6065dba48a 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2414,39 +2414,21 @@ struct cgr_comp { struct completion completion; }; -static int qman_delete_cgr_thread(void *p) +static void qman_delete_cgr_smp_call(void *p) { - struct cgr_comp *cgr_comp = (struct cgr_comp *)p; - int ret; - - ret = qman_delete_cgr(cgr_comp->cgr); - complete(&cgr_comp->completion); - - return ret; + qman_delete_cgr((struct qman_cgr *)p); } void qman_delete_cgr_safe(struct qman_cgr *cgr) { - struct task_struct *thread; - struct cgr_comp cgr_comp; - preempt_disable(); if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { - init_completion(&cgr_comp.completion); - cgr_comp.cgr = cgr; - thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, - "cgr_del"); - - if (IS_ERR(thread)) - goto out; - - kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); - wake_up_process(thread); - wait_for_completion(&cgr_comp.completion); + smp_call_function_single(qman_cgr_cpus[cgr->cgrid], + qman_delete_cgr_smp_call, cgr, true); preempt_enable(); return; } -out: + qman_delete_cgr(cgr); preempt_enable(); } diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 47e7aa963dbb..1613ccf0c059 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -456,13 +456,21 @@ static int imx_gpc_probe(struct platform_device *pdev) static int imx_gpc_remove(struct platform_device *pdev) { + struct device_node *pgc_node; int ret; + pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); + + /* bail out if DT too old and doesn't provide the necessary info */ + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && + !pgc_node) + return 0; + /* * If the old DT binding is used the toplevel driver needs to * de-register the power domains */ - if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { + if (!pgc_node) { of_genpd_del_provider(pdev->dev.of_node); ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index afc7ecc3c187..f4e3bd40c72e 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -155,7 +155,7 @@ static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd) return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false); } -static struct imx7_pgc_domain imx7_pgc_domains[] = { +static const struct imx7_pgc_domain imx7_pgc_domains[] = { [IMX7_POWER_DOMAIN_MIPI_PHY] = { .genpd = { .name = "mipi-phy", @@ -321,11 +321,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) continue; } - domain = &imx7_pgc_domains[domain_index]; - domain->regmap = regmap; - domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; - domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; - pd_pdev = platform_device_alloc("imx7-pgc-domain", domain_index); if (!pd_pdev) { @@ -334,7 +329,20 @@ static int imx_gpcv2_probe(struct platform_device *pdev) return -ENOMEM; } - pd_pdev->dev.platform_data = domain; + ret = platform_device_add_data(pd_pdev, + &imx7_pgc_domains[domain_index], + sizeof(imx7_pgc_domains[domain_index])); + if (ret) { + platform_device_put(pd_pdev); + of_node_put(np); + return ret; + } + + domain = pd_pdev->dev.platform_data; + domain->regmap = regmap; + domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; + domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; + pd_pdev->dev.parent = dev; pd_pdev->dev.of_node = np; diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c index 8d8659463b3e..feeb17cebc25 100644 --- a/drivers/soc/lantiq/gphy.c +++ b/drivers/soc/lantiq/gphy.c @@ -30,7 +30,6 @@ struct xway_gphy_priv { struct clk *gphy_clk_gate; struct reset_control *gphy_reset; struct reset_control *gphy_reset2; - struct notifier_block gphy_reboot_nb; void __iomem *membase; char *fw_name; }; @@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = { }; MODULE_DEVICE_TABLE(of, xway_gphy_match); -static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) -{ - return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); -} - -static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, - unsigned long code, void *unused) -{ - struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); - - if (priv) { - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); - } - - return NOTIFY_DONE; -} - static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, dma_addr_t *dev_addr) { @@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev) reset_control_deassert(priv->gphy_reset); reset_control_deassert(priv->gphy_reset2); - /* assert the gphy reset because it can hang after a reboot: */ - priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; - priv->gphy_reboot_nb.priority = -1; - - ret = register_reboot_notifier(&priv->gphy_reboot_nb); - if (ret) - dev_warn(dev, "Failed to register reboot notifier\n"); - platform_set_drvdata(pdev, priv); return ret; @@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev) static int xway_gphy_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; struct xway_gphy_priv *priv = platform_get_drvdata(pdev); - int ret; - - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); iowrite32be(0, priv->membase); clk_disable_unprepare(priv->gphy_clk_gate); - ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); - if (ret) - dev_warn(dev, "Failed to unregister reboot notifier\n"); - return 0; } diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index c2048382830f..e3df1e96b141 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -522,7 +522,7 @@ struct pmic_wrapper_type { u32 int_en_all; u32 spi_w; u32 wdt_src; - int has_bridge:1; + unsigned int has_bridge:1; int (*init_reg_clock)(struct pmic_wrapper *wrp); int (*init_soc_specific)(struct pmic_wrapper *wrp); }; diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index e1ce8b1b5090..fb2a8b1e7979 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c @@ -892,7 +892,7 @@ static int scpsys_probe(struct platform_device *pdev) pd_data = &scp->pd_data; - for (i = 0, sd = soc->subdomains ; i < soc->num_subdomains ; i++) { + for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) { ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin], pd_data->domains[sd->subdomain]); if (ret && IS_ENABLED(CONFIG_PM)) diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index 403bea9d546b..50214b620865 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -496,8 +496,10 @@ static int qcom_smsm_probe(struct platform_device *pdev) if (!smsm->hosts) return -ENOMEM; - local_node = of_find_node_with_property(of_node_get(pdev->dev.of_node), - "#qcom,smem-state-cells"); + for_each_child_of_node(pdev->dev.of_node, local_node) { + if (of_find_property(local_node, "#qcom,smem-state-cells", NULL)) + break; + } if (!local_node) { dev_err(&pdev->dev, "no state entry\n"); return -EINVAL; diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index d008e5b82db4..df3ccb30bc2d 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) /* Increment for next fragment */ req->seq++; - data += req->hdr.len; + data += NV_FRAGMENT_SIZE; left -= NV_FRAGMENT_SIZE; } while (left > 0); diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 40b75748835f..ba009bb9d82b 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -255,7 +255,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd, return; else if (pd->info->pwr_w_mask) regmap_write(pmu->regmap, pmu->info->pwr_offset, - on ? pd->info->pwr_mask : + on ? pd->info->pwr_w_mask : (pd->info->pwr_mask | pd->info->pwr_w_mask)); else regmap_update_bits(pmu->regmap, pmu->info->pwr_offset, diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 568e1c65aa82..4903f15177cf 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -79,6 +79,7 @@ #define A3700_SPI_BYTE_LEN BIT(5) #define A3700_SPI_CLK_PRESCALE BIT(0) #define A3700_SPI_CLK_PRESCALE_MASK (0x1f) +#define A3700_SPI_CLK_EVEN_OFFS (0x10) #define A3700_SPI_WFIFO_THRS_BIT 28 #define A3700_SPI_RFIFO_THRS_BIT 24 @@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz); + /* For prescaler values over 15, we can only set it by steps of 2. + * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to + * 30. We only use this range from 16 to 30. + */ + if (prescale > 15) + prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2); + val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); val = val & ~A3700_SPI_CLK_PRESCALE_MASK; @@ -616,6 +624,11 @@ static int a3700_spi_transfer_one(struct spi_master *master, a3700_spi_header_set(a3700_spi); if (xfer->rx_buf) { + /* Clear WFIFO, since it's last 2 bytes are shifted out during + * a read operation + */ + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0); + /* Set read data length */ spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, a3700_spi->buf_len); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index f95da364c283..047875861df1 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1489,6 +1489,11 @@ static void atmel_spi_init(struct atmel_spi *as) { spi_writel(as, CR, SPI_BIT(SWRST)); spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ + + /* It is recommended to enable FIFOs first thing after reset */ + if (as->fifo_size) + spi_writel(as, CR, SPI_BIT(FIFOEN)); + if (as->caps.has_wdrbt) { spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS) | SPI_BIT(MSTR)); @@ -1499,9 +1504,6 @@ static void atmel_spi_init(struct atmel_spi *as) if (as->use_pdc) spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); spi_writel(as, CR, SPI_BIT(SPIEN)); - - if (as->fifo_size) - spi_writel(as, CR, SPI_BIT(FIFOEN)); } static int atmel_spi_probe(struct platform_device *pdev) @@ -1661,12 +1663,12 @@ static int atmel_spi_remove(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); /* reset the hardware and block queue progress */ - spin_lock_irq(&as->lock); if (as->use_dma) { atmel_spi_stop_dma(master); atmel_spi_release_dma(master); } + spin_lock_irq(&as->lock); spi_writel(as, CR, SPI_BIT(SWRST)); spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ spi_readl(as, SR); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 6ab4c7700228..68cfc351b47f 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev) static int spi_engine_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct spi_engine *spi_engine = spi_master_get_devdata(master); int irq = platform_get_irq(pdev, 0); @@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev) free_irq(irq, master); + spi_master_put(master); + writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index a172ab299e80..6573152ce893 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi, static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) { - if (!has_bspi(qspi) || (qspi->bspi_enabled)) + if (!has_bspi(qspi)) return; qspi->bspi_enabled = 1; @@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) { - if (!has_bspi(qspi) || (!qspi->bspi_enabled)) + if (!has_bspi(qspi)) return; qspi->bspi_enabled = 0; @@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) { - u32 data = 0; + u32 rd = 0; + u32 wr = 0; - if (qspi->curr_cs == cs) - return; if (qspi->base[CHIP_SELECT]) { - data = bcm_qspi_read(qspi, CHIP_SELECT, 0); - data = (data & ~0xff) | (1 << cs); - bcm_qspi_write(qspi, CHIP_SELECT, 0, data); + rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); + wr = (rd & ~0xff) | (1 << cs); + if (rd == wr) + return; + bcm_qspi_write(qspi, CHIP_SELECT, 0, wr); usleep_range(10, 20); } + + dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs); qspi->curr_cs = cs; } @@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); } mspi_cdram = MSPI_CDRAM_CONT_BIT; - mspi_cdram |= (~(1 << spi->chip_select) & - MSPI_CDRAM_PCS); + + if (has_bspi(qspi)) + mspi_cdram &= ~1; + else + mspi_cdram |= (~(1 << spi->chip_select) & + MSPI_CDRAM_PCS); + mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : MSPI_CDRAM_BITSE_BIT); @@ -1247,7 +1255,7 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->base[MSPI] = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->base[MSPI])) { ret = PTR_ERR(qspi->base[MSPI]); - goto qspi_probe_err; + goto qspi_resource_err; } } else { goto qspi_resource_err; @@ -1258,7 +1266,7 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->base[BSPI] = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->base[BSPI])) { ret = PTR_ERR(qspi->base[BSPI]); - goto qspi_probe_err; + goto qspi_resource_err; } qspi->bspi_mode = true; } else { diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 7428091d3f5b..bd00b7cc8b78 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) struct bcm2835aux_spi *bs = spi_master_get_devdata(master); irqreturn_t ret = IRQ_NONE; + /* IRQ may be shared, so return if our interrupts are disabled */ + if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) & + (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE))) + return ret; + /* check if we have data to read */ while (bs->rx_len && (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 5c9516ae4942..4a001634023e 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && (xspi->tx_bytes > 0)) { + + /* When xspi in busy condition, bytes may send failed, + * then spi control did't work thoroughly, add one byte delay + */ + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & + CDNS_SPI_IXR_TXFULL) + usleep_range(10, 20); + if (xspi->txbuf) cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); else diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index babb15f07995..d51ca243a028 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1496,12 +1496,23 @@ static int spi_imx_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + int ret; spi_bitbang_stop(&spi_imx->bitbang); + ret = clk_enable(spi_imx->clk_per); + if (ret) + return ret; + + ret = clk_enable(spi_imx->clk_ipg); + if (ret) { + clk_disable(spi_imx->clk_per); + return ret; + } + writel(0, spi_imx->base + MXC_CSPICTRL); - clk_unprepare(spi_imx->clk_ipg); - clk_unprepare(spi_imx->clk_per); + clk_disable_unprepare(spi_imx->clk_ipg); + clk_disable_unprepare(spi_imx->clk_per); spi_imx_sdma_exit(spi_imx); spi_master_put(master); diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index 7f8429635502..a5b0df7e6131 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -574,10 +574,15 @@ static int meson_spicc_probe(struct platform_device *pdev) master->max_speed_hz = rate >> 2; ret = devm_spi_register_master(&pdev->dev, master); - if (!ret) - return 0; + if (ret) { + dev_err(&pdev->dev, "spi master registration failed\n"); + goto out_clk; + } - dev_err(&pdev->dev, "spi master registration failed\n"); + return 0; + +out_clk: + clk_disable_unprepare(spicc->core); out_master: spi_master_put(master); diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 94f7b0713281..02a8012a318a 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -38,7 +38,7 @@ struct driver_data { /* SSP register addresses */ void __iomem *ioaddr; - u32 ssdr_physical; + phys_addr_t ssdr_physical; /* SSP masks*/ u32 dma_cr1; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index b392cca8fa4f..1a6ec226d6e4 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -1273,8 +1273,6 @@ static int s3c64xx_spi_resume(struct device *dev) if (ret < 0) return ret; - s3c64xx_spi_hwinit(sdd, sdd->port_id); - return spi_master_resume(master); } #endif /* CONFIG_PM_SLEEP */ @@ -1312,6 +1310,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev) if (ret != 0) goto err_disable_src_clk; + s3c64xx_spi_hwinit(sdd, sdd->port_id); + return 0; err_disable_src_clk: diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0eb1e9583485..0fea18ab970e 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -55,6 +55,8 @@ struct sh_msiof_spi_priv { void *rx_dma_page; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; + bool native_cs_inited; + bool native_cs_high; bool slave_aborted; }; @@ -275,6 +277,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, } k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); + brps = min_t(int, brps, 32); scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); sh_msiof_write(p, TSCR, scr); @@ -528,8 +531,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) { struct device_node *np = spi->master->dev.of_node; struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); - - pm_runtime_get_sync(&p->pdev->dev); + u32 clr, set, tmp; if (!np) { /* @@ -539,19 +541,33 @@ static int sh_msiof_spi_setup(struct spi_device *spi) spi->cs_gpio = (uintptr_t)spi->controller_data; } - /* Configure pins before deasserting CS */ - sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL), - !!(spi->mode & SPI_CPHA), - !!(spi->mode & SPI_3WIRE), - !!(spi->mode & SPI_LSB_FIRST), - !!(spi->mode & SPI_CS_HIGH)); - - if (spi->cs_gpio >= 0) + if (spi->cs_gpio >= 0) { gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); + return 0; + } + if (spi_controller_is_slave(p->master)) + return 0; - pm_runtime_put(&p->pdev->dev); + if (p->native_cs_inited && + (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH))) + return 0; + /* Configure native chip select mode/polarity early */ + clr = MDR1_SYNCMD_MASK; + set = MDR1_SYNCMD_SPI; + if (spi->mode & SPI_CS_HIGH) + clr |= BIT(MDR1_SYNCAC_SHIFT); + else + set |= BIT(MDR1_SYNCAC_SHIFT); + pm_runtime_get_sync(&p->pdev->dev); + tmp = sh_msiof_read(p, TMDR1) & ~clr; + sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); + tmp = sh_msiof_read(p, RMDR1) & ~clr; + sh_msiof_write(p, RMDR1, tmp | set); + pm_runtime_put(&p->pdev->dev); + p->native_cs_high = spi->mode & SPI_CS_HIGH; + p->native_cs_inited = true; return 0; } @@ -784,11 +800,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, goto stop_dma; } - /* wait for tx fifo to be emptied / rx fifo to be filled */ + /* wait for tx/rx DMA completion */ ret = sh_msiof_wait_for_completion(p); if (ret) goto stop_reset; + if (!rx) { + reinit_completion(&p->done); + sh_msiof_write(p, IER, IER_TEOFE); + + /* wait for tx fifo to be emptied */ + ret = sh_msiof_wait_for_completion(p); + if (ret) + goto stop_reset; + } + /* clear status bits */ sh_msiof_reset_str(p); @@ -900,7 +926,7 @@ static int sh_msiof_transfer_one(struct spi_master *master, break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 1) + if (l & 3) break; copy32 = copy_wswap32; } else { diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index c5cd635c28f3..41410031f8e9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -525,7 +525,7 @@ static int sun4i_spi_probe(struct platform_device *pdev) static int sun4i_spi_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); return 0; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index fb38234249a8..8533f4edd00a 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -541,7 +541,7 @@ static int sun6i_spi_probe(struct platform_device *pdev) static int sun6i_spi_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); return 0; } diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index bc7100b93dfc..e0b9fe1d0e37 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) while (remaining_words) { int n_words, tx_words, rx_words; u32 sr; + int stalled; n_words = min(remaining_words, xspi->buffer_size); @@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) /* Read out all the data from the Rx FIFO */ rx_words = n_words; + stalled = 10; while (rx_words) { + if (rx_words == n_words && !(stalled--) && + !(sr & XSPI_SR_TX_EMPTY_MASK) && + (sr & XSPI_SR_RX_EMPTY_MASK)) { + dev_err(&spi->dev, + "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n"); + xspi_init_hw(xspi); + return -EIO; + } + if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) { xilinx_spi_rx(xspi); rx_words--; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e8b5a5e21b2e..f85d30dc9187 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -779,8 +779,14 @@ static int spi_map_buf(struct spi_controller *ctlr, struct device *dev, for (i = 0; i < sgs; i++) { if (vmalloced_buf || kmap_buf) { - min = min_t(size_t, - len, desc_len - offset_in_page(buf)); + /* + * Next scatterlist entry size is the minimum between + * the desc_len and the remaining buffer length that + * fits in a page. + */ + min = min_t(size_t, desc_len, + min_t(size_t, len, + PAGE_SIZE - offset_in_page(buf))); if (vmalloced_buf) vm_page = vmalloc_to_page(buf); else @@ -1216,6 +1222,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) if (!was_busy && ctlr->auto_runtime_pm) { ret = pm_runtime_get_sync(ctlr->dev.parent); if (ret < 0) { + pm_runtime_put_noidle(ctlr->dev.parent); dev_err(&ctlr->dev, "Failed to power device: %d\n", ret); mutex_unlock(&ctlr->io_mutex); @@ -2245,18 +2252,13 @@ static int __unregister(struct device *dev, void *null) void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; + int id = ctlr->bus_num; int dummy; /* First make sure that this controller was ever added */ mutex_lock(&board_lock); - found = idr_find(&spi_master_idr, ctlr->bus_num); + found = idr_find(&spi_master_idr, id); mutex_unlock(&board_lock); - if (found != ctlr) { - dev_dbg(&ctlr->dev, - "attempting to delete unregistered controller [%s]\n", - dev_name(&ctlr->dev)); - return; - } if (ctlr->queued) { if (spi_destroy_queue(ctlr)) dev_err(&ctlr->dev, "queue remove failed\n"); @@ -2269,7 +2271,8 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); + if (found == ctlr) + idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 71a50b99caff..a58832825d8e 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -14,7 +14,63 @@ config ASHMEM It is, in theory, a good memory allocator for low-memory devices, because it can discard shared memory units when under memory pressure. +config ANDROID_LOW_MEMORY_KILLER + bool "Android Low Memory Killer" + ---help--- + Registers processes to be killed when low memory conditions, this is useful + as there is no particular swap space on android. + + The registered process will kill according to the priorities in android init + scripts (/init.rc), and it defines priority values with minimum free memory size + for each priority. + +config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES + bool "Android Low Memory Killer: detect oom_adj values" + depends on ANDROID_LOW_MEMORY_KILLER + default y + ---help--- + Detect oom_adj values written to + /sys/module/lowmemorykiller/parameters/adj and convert them + to oom_score_adj values. + +config SYNC + bool "Synchronization framework" + default n + ---help--- + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config ANDROID_FWDATA + tristate "Parser for Android-specific firmware data" + depends on ACPI + default n + ---help--- + This driver parses Android-specific data (e.g. fstab configuration) + stored in firmware (e.g. ACPI tables), and present it to user space + via sysfs. Android Oreo (8.0) and later requires some essential boot- + time configuration to be available in a directory structure organized + in Device Tree style, e.g. /proc/device-tree/firmware/android/ on + platforms that enable DT. Platforms that use ACPI instead of DT, such + as Goldfish (Ranchu) x86/x86_64, should enable this driver to ensure + the required information can be found in sysfs with the expected + layout. + +config ANDROID_VSOC + tristate "Android Virtual SoC support" + default n + depends on PCI_MSI + ---help--- + This option adds support for the Virtual SoC driver needed to boot + a 'cuttlefish' Android image inside QEmu. The driver interacts with + a QEmu ivshmem device. If built as a module, it will be called vsoc. + source "drivers/staging/android/ion/Kconfig" +source "drivers/staging/android/abl/Kconfig" +source "drivers/staging/android/sbl/Kconfig" +source "drivers/staging/android/vsbl/Kconfig" + +source "drivers/staging/android/fiq_debugger/Kconfig" endif # if ANDROID diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 7cf1564a49a5..d1f80c6afed4 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,5 +1,12 @@ ccflags-y += -I$(src) # needed for trace events obj-y += ion/ +obj-$(CONFIG_ABL_BOOTLOADER_CONTROL) += abl/ +obj-$(CONFIG_SBL_BOOTLOADER_CONTROL) += sbl/ +obj-$(CONFIG_VSBL_BOOTLOADER_CONTROL) += vsbl/ +obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/ obj-$(CONFIG_ASHMEM) += ashmem.o +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o +obj-$(CONFIG_ANDROID_FWDATA) += fwdata.o +obj-$(CONFIG_ANDROID_VSOC) += vsoc.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 5f14247392bf..ebd6ba3ae02e 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -12,5 +12,14 @@ ion/ - Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0) - Better test framework (integration with VGEM was suggested) +vsoc.c, uapi/vsoc_shm.h + - The current driver uses the same wait queue for all of the futexes in a + region. This will cause false wakeups in regions with a large number of + waiting threads. We should eventually use multiple queues and select the + queue based on the region. + - Add debugfs support for examining the permissions of regions. + - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been + superseded by the futex and is there for legacy reasons. + Please send patches to Greg Kroah-Hartman and Cc: Arve Hjønnevåg and Riley Andrews diff --git a/drivers/staging/android/abl/Kconfig b/drivers/staging/android/abl/Kconfig new file mode 100644 index 000000000000..92299abcd1c4 --- /dev/null +++ b/drivers/staging/android/abl/Kconfig @@ -0,0 +1,18 @@ +config ABL_BOOTLOADER_CONTROL + tristate "ABL Bootloader Control module" + depends on X86 + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. + +config SEND_SLCAN_ENABLE + bool "control slcan protocol" + default n + help + This option control slcan protocol enable/disable in ablbc driver + The IOC compononent on broxton IVI platform use slcan protocol to + communicate befor calling powerctl program. + If no use IOC, this option can be disabed. diff --git a/drivers/staging/android/abl/Makefile b/drivers/staging/android/abl/Makefile new file mode 100644 index 000000000000..b70a05a2af6d --- /dev/null +++ b/drivers/staging/android/abl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ABL_BOOTLOADER_CONTROL) += ablbc.o diff --git a/drivers/staging/android/abl/ablbc.c b/drivers/staging/android/abl/ablbc.c new file mode 100644 index 000000000000..59154f5e10ef --- /dev/null +++ b/drivers/staging/android/abl/ablbc.c @@ -0,0 +1,415 @@ +/* + * ablbc: control ABL bootloaders + * Copyright (c) 2013-2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "ablbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* ABL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) +#define USERCMD_UPDATE_IFWI(len) _USERCMD_(2, len) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +static bool capsule_request; + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_capsule_cmd { + char action; + char device; + char partition; + char file_name[1]; +} __packed; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static struct kobject *capsule_kobject; + +static ssize_t is_capsule_requested(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", capsule_request); +} + +enum capsule_device_type { + EMMC = 2, + SDCARD = 4 +}; + +static ssize_t capsule_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nvram_msg msg; + struct nvram_capsule_cmd *capsule_cmd; + char name[32], partition; + enum capsule_device_type device; + int ret, padding; + unsigned char size; + union _cdata_header cdh; + + device = (buf[0] == 'm' ? EMMC : SDCARD); + partition = buf[1] - '0'; + ret = sscanf(buf+3, "%s", name); + pr_info(MODULE_NAME " capsule parameters (%d): DEVICE=%d PARTITION=%d NAME=%s\n", + ret, device, partition, name); + + cdh.data = 0; + cdh.tag = CDATA_TAG_USER_CMD; + + /* padding of filename on next dword */ + padding = (4 - (3 + strlen(name))%4)%4; + size = 2 + sizeof(cdh) + 3 + strlen(name) + padding + 4; + cdh.length = 1 + (3 + strlen(name) + padding) / 4; + + msg.magic = NVRAM_VALID_FLAG; + msg.size = size; + msg.cdata_header.data = cdh.data; + + capsule_cmd = kmalloc(size, GFP_KERNEL); + if (!capsule_cmd) + return -ENOMEM; + + capsule_cmd->action = USERCMD_UPDATE_IFWI(strlen(name) + 2); + capsule_cmd->device = device; + capsule_cmd->partition = partition; + strncpy(capsule_cmd->file_name, name, strlen(name)); + msg.cdata_payload = (char *)capsule_cmd; + msg.cdata_payload_size = 3 + strlen(name) + padding; + msg.crc = crc32c_msg(&msg); + write_msg_to_nvram(&msg); + capsule_request = true; + + kfree(capsule_cmd); + + return count; +} + +static struct kobj_attribute capsule_name_attribute = + __ATTR(capsule_name, 0600, NULL, capsule_store); + +static struct kobj_attribute capsule_requested_attribute = + __ATTR(capsule_requested, 0400, is_capsule_requested, NULL); + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static const unsigned int DEFAULT_TARGET_INDEX; + +static const char * const cold_reset[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#05015555555555", + NULL}; +static const char * const suppress_heartbeat[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#01035555555555", + NULL}; +static const char * const reboot_request[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#03015555555555", + NULL}; + +static int execute_slcan_command(const char *cmd[]) +{ +#ifdef CONFIG_SEND_SLCAN_ENABLE + struct subprocess_info *sub_info; + int ret = -1; + + sub_info = call_usermodehelper_setup((char *)cmd[0], + (char **)cmd,(char **) NULL, GFP_KERNEL, + (void *)NULL, (void*)NULL, (void*)NULL); + + if (sub_info) { + ret = call_usermodehelper_exec(sub_info, + UMH_WAIT_PROC); + pr_info("Exec cmd=%s ret=%d\n", cmd[0], ret); + } + + if (ret) + pr_err("Failure on cmd=%s ret=%d\n", cmd[0], ret); + + return ret; +#else + return 0; +#endif +} + +static int ablbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + ret = execute_slcan_command((const char **)suppress_heartbeat); + if (ret) + goto done; + + ret = execute_slcan_command((const char **)reboot_request); + if (ret) + goto done; + + ret = execute_slcan_command((const char **)cold_reset); + +done: + return NOTIFY_DONE; +} + +static struct notifier_block ablbc_reboot_notifier = { + .notifier_call = ablbc_reboot_notifier_call, +}; + +static int __init ablbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&ablbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + capsule_kobject = kobject_create_and_add("capsule", kernel_kobj); + if (!capsule_kobject) + return -ENOMEM; + + ret = sysfs_create_file(capsule_kobject, + &capsule_name_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_name\n"); + goto err; + } + + ret = sysfs_create_file(capsule_kobject, + &capsule_requested_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_requested\n"); + goto err; + } + + return 0; + +err: + kobject_put(capsule_kobject); + return ret; +} + +module_init(ablbc_init); + +static void __exit ablbc_exit(void) +{ + unregister_reboot_notifier(&ablbc_reboot_notifier); + kobject_put(capsule_kobject); +} +module_exit(ablbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Automotive Bootloader boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 0f695df14c9d..69df278e9aa4 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -334,24 +334,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) mutex_lock(&ashmem_mutex); if (asma->size == 0) { - ret = -EINVAL; - goto out; + mutex_unlock(&ashmem_mutex); + return -EINVAL; } if (!asma->file) { - ret = -EBADF; - goto out; + mutex_unlock(&ashmem_mutex); + return -EBADF; } + mutex_unlock(&ashmem_mutex); + ret = vfs_llseek(asma->file, offset, origin); if (ret < 0) - goto out; + return ret; /** Copy f_pos from backing file, since f_ops->llseek() sets it */ file->f_pos = asma->file->f_pos; - -out: - mutex_unlock(&ashmem_mutex); return ret; } @@ -401,22 +400,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } get_file(asma->file); - /* - * XXX - Reworked to use shmem_zero_setup() instead of - * shmem_set_file while we're in staging. -jstultz - */ - if (vma->vm_flags & VM_SHARED) { - ret = shmem_zero_setup(vma); - if (ret) { - fput(asma->file); - goto out; - } + if (vma->vm_flags & VM_SHARED) + shmem_set_file(vma, asma->file); + else { + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = asma->file; } - if (vma->vm_file) - fput(vma->vm_file); - vma->vm_file = asma->file; - out: mutex_unlock(&ashmem_mutex); return ret; @@ -453,9 +444,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; - vfs_fallocate(range->asma->file, - FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, end - start); + range->asma->file->f_op->fallocate(range->asma->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); range->purged = ASHMEM_WAS_PURGED; lru_del(range); @@ -710,30 +701,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, size_t pgstart, pgend; int ret = -EINVAL; - if (unlikely(!asma->file)) - return -EINVAL; - if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) return -EFAULT; + mutex_lock(&ashmem_mutex); + + if (unlikely(!asma->file)) + goto out_unlock; + /* per custom, you can pass zero for len to mean "everything onward" */ if (!pin.len) pin.len = PAGE_ALIGN(asma->size) - pin.offset; if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) - return -EINVAL; + goto out_unlock; if (unlikely(((__u32)-1) - pin.offset < pin.len)) - return -EINVAL; + goto out_unlock; if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) - return -EINVAL; + goto out_unlock; pgstart = pin.offset / PAGE_SIZE; pgend = pgstart + (pin.len / PAGE_SIZE) - 1; - mutex_lock(&ashmem_mutex); - switch (cmd) { case ASHMEM_PIN: ret = ashmem_pin(asma, pgstart, pgend); @@ -746,6 +737,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, break; } +out_unlock: mutex_unlock(&ashmem_mutex); return ret; @@ -765,10 +757,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case ASHMEM_SET_SIZE: ret = -EINVAL; + mutex_lock(&ashmem_mutex); if (!asma->file) { ret = 0; asma->size = (size_t)arg; } + mutex_unlock(&ashmem_mutex); break; case ASHMEM_GET_SIZE: ret = asma->size; diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig new file mode 100644 index 000000000000..60fc224d4efc --- /dev/null +++ b/drivers/staging/android/fiq_debugger/Kconfig @@ -0,0 +1,58 @@ +config FIQ_DEBUGGER + bool "FIQ Mode Serial Debugger" + default n + depends on ARM || ARM64 + help + The FIQ serial debugger can accept commands even when the + kernel is unresponsive due to being stuck with interrupts + disabled. + +config FIQ_DEBUGGER_NO_SLEEP + bool "Keep serial debugger active" + depends on FIQ_DEBUGGER + default n + help + Enables the serial debugger at boot. Passing + fiq_debugger.no_sleep on the kernel commandline will + override this config option. + +config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON + bool "Don't disable wakeup IRQ when debugger is active" + depends on FIQ_DEBUGGER + default n + help + Don't disable the wakeup irq when enabling the uart clock. This will + cause extra interrupts, but it makes the serial debugger usable with + on some MSM radio builds that ignore the uart clock request in power + collapse. + +config FIQ_DEBUGGER_CONSOLE + bool "Console on FIQ Serial Debugger port" + depends on FIQ_DEBUGGER + default n + help + Enables a console so that printk messages are displayed on + the debugger serial port as the occur. + +config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE + bool "Put the FIQ debugger into console mode by default" + depends on FIQ_DEBUGGER_CONSOLE + default n + help + If enabled, this puts the fiq debugger into console mode by default. + Otherwise, the fiq debugger will start out in debug mode. + +config FIQ_DEBUGGER_UART_OVERLAY + bool "Install uart DT overlay" + depends on FIQ_DEBUGGER + select OF_OVERLAY + default n + help + If enabled, fiq debugger is calling fiq_debugger_uart_overlay() + that will apply overlay uart_overlay@0 to disable proper uart. + +config FIQ_WATCHDOG + bool + select FIQ_DEBUGGER + select PSTORE_RAM + default n diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile new file mode 100644 index 000000000000..a7ca4871cad3 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/Makefile @@ -0,0 +1,4 @@ +obj-y += fiq_debugger.o +obj-$(CONFIG_ARM) += fiq_debugger_arm.o +obj-$(CONFIG_ARM64) += fiq_debugger_arm64.o +obj-$(CONFIG_FIQ_WATCHDOG) += fiq_watchdog.o diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c new file mode 100644 index 000000000000..f6a806219f84 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c @@ -0,0 +1,1246 @@ +/* + * drivers/staging/android/fiq_debugger.c + * + * Serial Debugger Interface accessed through an FIQ interrupt. + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_FIQ_GLUE +#include +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_UART_OVERLAY +#include +#endif + +#include + +#include "fiq_debugger.h" +#include "fiq_debugger_priv.h" +#include "fiq_debugger_ringbuf.h" + +#define DEBUG_MAX 64 +#define MAX_UNHANDLED_FIQ_COUNT 1000000 + +#define MAX_FIQ_DEBUGGER_PORTS 4 + +struct fiq_debugger_state { +#ifdef CONFIG_FIQ_GLUE + struct fiq_glue_handler handler; +#endif + struct fiq_debugger_output output; + + int fiq; + int uart_irq; + int signal_irq; + int wakeup_irq; + bool wakeup_irq_no_set_wake; + struct clk *clk; + struct fiq_debugger_pdata *pdata; + struct platform_device *pdev; + + char debug_cmd[DEBUG_MAX]; + int debug_busy; + int debug_abort; + + char debug_buf[DEBUG_MAX]; + int debug_count; + + bool no_sleep; + bool debug_enable; + bool ignore_next_wakeup_irq; + struct timer_list sleep_timer; + spinlock_t sleep_timer_lock; + bool uart_enabled; + struct wakeup_source debugger_wake_src; + bool console_enable; + int current_cpu; + atomic_t unhandled_fiq_count; + bool in_fiq; + + struct work_struct work; + spinlock_t work_lock; + char work_cmd[DEBUG_MAX]; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + spinlock_t console_lock; + struct console console; + struct tty_port tty_port; + struct fiq_debugger_ringbuf *tty_rbuf; + bool syslog_dumping; +#endif + + unsigned int last_irqs[NR_IRQS]; + unsigned int last_local_timer_irqs[NR_CPUS]; +}; + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +struct tty_driver *fiq_tty_driver; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP +static bool initial_no_sleep = true; +#else +static bool initial_no_sleep; +#endif + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE +static bool initial_debug_enable = true; +static bool initial_console_enable = true; +#else +static bool initial_debug_enable; +static bool initial_console_enable; +#endif + +static bool fiq_kgdb_enable; +static bool fiq_debugger_disable; + +module_param_named(no_sleep, initial_no_sleep, bool, 0644); +module_param_named(debug_enable, initial_debug_enable, bool, 0644); +module_param_named(console_enable, initial_console_enable, bool, 0644); +module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644); +module_param_named(disable, fiq_debugger_disable, bool, 0644); + +#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON +static inline +void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {} +static inline +void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {} +#else +static inline +void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + enable_irq(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + enable_irq_wake(state->wakeup_irq); +} +static inline +void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) +{ + if (state->wakeup_irq < 0) + return; + disable_irq_nosync(state->wakeup_irq); + if (!state->wakeup_irq_no_set_wake) + disable_irq_wake(state->wakeup_irq); +} +#endif + +static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state) +{ + return (state->fiq >= 0); +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_force_irq(struct fiq_debugger_state *state) +{ + unsigned int irq = state->signal_irq; + + if (WARN_ON(!fiq_debugger_have_fiq(state))) + return; + if (state->pdata->force_irq) { + state->pdata->force_irq(state->pdev, irq); + } else { + struct irq_chip *chip = irq_get_chip(irq); + if (chip && chip->irq_retrigger) + chip->irq_retrigger(irq_get_irq_data(irq)); + } +} +#endif + +static void fiq_debugger_uart_enable(struct fiq_debugger_state *state) +{ + if (state->clk) + clk_enable(state->clk); + if (state->pdata->uart_enable) + state->pdata->uart_enable(state->pdev); +} + +static void fiq_debugger_uart_disable(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_disable) + state->pdata->uart_disable(state->pdev); + if (state->clk) + clk_disable(state->clk); +} + +static void fiq_debugger_uart_flush(struct fiq_debugger_state *state) +{ + if (state->pdata->uart_flush) + state->pdata->uart_flush(state->pdev); +} + +static void fiq_debugger_putc(struct fiq_debugger_state *state, char c) +{ + state->pdata->uart_putc(state->pdev, c); +} + +static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s) +{ + unsigned c; + while ((c = *s++)) { + if (c == '\n') + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, c); + } +} + +static void fiq_debugger_prompt(struct fiq_debugger_state *state) +{ + fiq_debugger_puts(state, "debug> "); +} + +static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state) +{ + char buf[512]; + size_t len; + struct kmsg_dumper dumper = { .active = true }; + + + kmsg_dump_rewind_nolock(&dumper); + while (kmsg_dump_get_line_nolock(&dumper, true, buf, + sizeof(buf) - 1, &len)) { + buf[len] = 0; + fiq_debugger_puts(state, buf); + } +} + +static void fiq_debugger_printf(struct fiq_debugger_output *output, + const char *fmt, ...) +{ + struct fiq_debugger_state *state; + char buf[256]; + va_list ap; + + state = container_of(output, struct fiq_debugger_state, output); + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + fiq_debugger_puts(state, buf); +} + +/* Safe outside fiq context */ +static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...) +{ + struct fiq_debugger_state *state = cookie; + char buf[256]; + va_list ap; + unsigned long irq_flags; + + va_start(ap, fmt); + vsnprintf(buf, 128, fmt, ap); + va_end(ap); + + local_irq_save(irq_flags); + fiq_debugger_puts(state, buf); + fiq_debugger_uart_flush(state); + local_irq_restore(irq_flags); + return state->debug_abort; +} + +static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state) +{ + int n; + struct irq_desc *desc; + + fiq_debugger_printf(&state->output, + "irqnr total since-last status name\n"); + for_each_irq_desc(n, desc) { + struct irqaction *act = desc->action; + if (!act && !kstat_irqs(n)) + continue; + fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x %s\n", n, + kstat_irqs(n), + kstat_irqs(n) - state->last_irqs[n], + desc->status_use_accessors, + (act && act->name) ? act->name : "???"); + state->last_irqs[n] = kstat_irqs(n); + } +} + +static void fiq_debugger_do_ps(struct fiq_debugger_state *state) +{ + struct task_struct *g; + struct task_struct *p; + unsigned task_state; + static const char stat_nam[] = "RSDTtZX"; + + fiq_debugger_printf(&state->output, "pid ppid prio task pc\n"); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + task_state = p->state ? __ffs(p->state) + 1 : 0; + fiq_debugger_printf(&state->output, + "%5d %5d %4d ", p->pid, p->parent->pid, p->prio); + fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm, + task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]); + if (task_state == TASK_RUNNING) + fiq_debugger_printf(&state->output, " running\n"); + else + fiq_debugger_printf(&state->output, " %08lx\n", + thread_saved_pc(p)); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE +static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = true; +} + +static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state) +{ + state->syslog_dumping = false; +} +#else +extern int do_syslog(int type, char __user *bug, int count); +static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state) +{ + do_syslog(5 /* clear */, NULL, 0); +} + +static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state) +{ + fiq_debugger_dump_kernel_log(state); +} +#endif + +static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq) +{ + if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, "sysrq-g blocked\n"); + return; + } + fiq_debugger_begin_syslog_dump(state); + handle_sysrq(rq); + fiq_debugger_end_syslog_dump(state); +} + +#ifdef CONFIG_KGDB +static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state) +{ + if (!fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n"); + return; + } + + fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n"); + state->console_enable = true; + handle_sysrq('g'); +} +#endif + +static void fiq_debugger_schedule_work(struct fiq_debugger_state *state, + char *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&state->work_lock, flags); + if (state->work_cmd[0] != '\0') { + fiq_debugger_printf(&state->output, "work command processor busy\n"); + spin_unlock_irqrestore(&state->work_lock, flags); + return; + } + + strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd)); + spin_unlock_irqrestore(&state->work_lock, flags); + + schedule_work(&state->work); +} + +static void fiq_debugger_work(struct work_struct *work) +{ + struct fiq_debugger_state *state; + char work_cmd[DEBUG_MAX]; + char *cmd; + unsigned long flags; + + state = container_of(work, struct fiq_debugger_state, work); + + spin_lock_irqsave(&state->work_lock, flags); + + strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd)); + state->work_cmd[0] = '\0'; + + spin_unlock_irqrestore(&state->work_lock, flags); + + cmd = work_cmd; + if (!strncmp(cmd, "reboot", 6)) { + cmd += 6; + while (*cmd == ' ') + cmd++; + if (*cmd != '\0') + kernel_restart(cmd); + else + kernel_restart(NULL); + } else { + fiq_debugger_printf(&state->output, "unknown work command '%s'\n", + work_cmd); + } +} + +/* This function CANNOT be called in FIQ context */ +static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd) +{ + if (!strcmp(cmd, "ps")) + fiq_debugger_do_ps(state); + if (!strcmp(cmd, "sysrq")) + fiq_debugger_do_sysrq(state, 'h'); + if (!strncmp(cmd, "sysrq ", 6)) + fiq_debugger_do_sysrq(state, cmd[6]); +#ifdef CONFIG_KGDB + if (!strcmp(cmd, "kgdb")) + fiq_debugger_do_kgdb(state); +#endif + if (!strncmp(cmd, "reboot", 6)) + fiq_debugger_schedule_work(state, cmd); +} + +static void fiq_debugger_help(struct fiq_debugger_state *state) +{ + fiq_debugger_printf(&state->output, + "FIQ Debugger commands:\n" + " pc PC status\n" + " regs Register dump\n" + " allregs Extended Register dump\n" + " bt Stack trace\n" + " reboot [] Reboot with command \n" + " reset [] Hard reset with command \n" + " irqs Interupt status\n" + " kmsg Kernel log\n" + " version Kernel version\n"); + fiq_debugger_printf(&state->output, + " sleep Allow sleep while in FIQ\n" + " nosleep Disable sleep while in FIQ\n" + " console Switch terminal to console\n" + " cpu Current CPU\n" + " cpu Switch to CPU\n"); + fiq_debugger_printf(&state->output, + " ps Process list\n" + " sysrq sysrq options\n" + " sysrq Execute sysrq with \n"); +#ifdef CONFIG_KGDB + fiq_debugger_printf(&state->output, + " kgdb Enter kernel debugger\n"); +#endif +} + +static void fiq_debugger_take_affinity(void *info) +{ + struct fiq_debugger_state *state = info; + struct cpumask cpumask; + + cpumask_clear(&cpumask); + cpumask_set_cpu(get_cpu(), &cpumask); + + irq_set_affinity(state->uart_irq, &cpumask); +} + +static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu) +{ + if (!fiq_debugger_have_fiq(state)) + smp_call_function_single(cpu, fiq_debugger_take_affinity, state, + false); + state->current_cpu = cpu; +} + +static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state, + const char *cmd, const struct pt_regs *regs, + void *svc_sp) +{ + bool signal_helper = false; + + if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { + fiq_debugger_help(state); + } else if (!strcmp(cmd, "pc")) { + fiq_debugger_dump_pc(&state->output, regs); + } else if (!strcmp(cmd, "regs")) { + fiq_debugger_dump_regs(&state->output, regs); + } else if (!strcmp(cmd, "allregs")) { + fiq_debugger_dump_allregs(&state->output, regs); + } else if (!strcmp(cmd, "bt")) { + fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp); + } else if (!strncmp(cmd, "reset", 5)) { + cmd += 5; + while (*cmd == ' ') + cmd++; + if (*cmd) { + char tmp_cmd[32]; + strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd)); + machine_restart(tmp_cmd); + } else { + machine_restart(NULL); + } + } else if (!strcmp(cmd, "irqs")) { + fiq_debugger_dump_irqs(state); + } else if (!strcmp(cmd, "kmsg")) { + fiq_debugger_dump_kernel_log(state); + } else if (!strcmp(cmd, "version")) { + fiq_debugger_printf(&state->output, "%s\n", linux_banner); + } else if (!strcmp(cmd, "sleep")) { + state->no_sleep = false; + fiq_debugger_printf(&state->output, "enabling sleep\n"); + } else if (!strcmp(cmd, "nosleep")) { + state->no_sleep = true; + fiq_debugger_printf(&state->output, "disabling sleep\n"); + } else if (!strcmp(cmd, "console")) { + fiq_debugger_printf(&state->output, "console mode\n"); + fiq_debugger_uart_flush(state); + state->console_enable = true; + } else if (!strcmp(cmd, "cpu")) { + fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + } else if (!strncmp(cmd, "cpu ", 4)) { + unsigned long cpu = 0; + if (kstrtoul(cmd + 4, 10, &cpu) == 0) + fiq_debugger_switch_cpu(state, cpu); + else + fiq_debugger_printf(&state->output, "invalid cpu\n"); + fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + } else { + if (state->debug_busy) { + fiq_debugger_printf(&state->output, + "command processor busy. trying to abort.\n"); + state->debug_abort = -1; + } else { + strcpy(state->debug_cmd, cmd); + state->debug_busy = 1; + } + + return true; + } + if (!state->console_enable) + fiq_debugger_prompt(state); + + return signal_helper; +} + +static void fiq_debugger_sleep_timer_expired(unsigned long data) +{ + struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->uart_enabled && !state->no_sleep) { + if (state->debug_enable && !state->console_enable) { + state->debug_enable = false; + fiq_debugger_printf_nfiq(state, + "suspending fiq debugger\n"); + } + state->ignore_next_wakeup_irq = true; + fiq_debugger_uart_disable(state); + state->uart_enabled = false; + fiq_debugger_enable_wakeup_irq(state); + } + __pm_relax(&state->debugger_wake_src); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state) +{ + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) { + state->ignore_next_wakeup_irq = false; + } else if (!state->uart_enabled) { + __pm_stay_awake(&state->debugger_wake_src); + fiq_debugger_uart_enable(state); + state->uart_enabled = true; + fiq_debugger_disable_wakeup_irq(state); + mod_timer(&state->sleep_timer, jiffies + HZ / 2); + } + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); +} + +static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (!state->no_sleep) + fiq_debugger_puts(state, "WAKEUP\n"); + fiq_debugger_handle_wakeup(state); + + return IRQ_HANDLED; +} + +static +void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state) +{ +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + if (state->tty_port.ops) { + int i; + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + for (i = 0; i < count; i++) { + int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL); + if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) + pr_warn("fiq tty failed to consume byte\n"); + } + tty_flip_buffer_push(&state->tty_port); + } +#endif +} + +static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state) +{ + if (!state->no_sleep) { + unsigned long flags; + + spin_lock_irqsave(&state->sleep_timer_lock, flags); + __pm_stay_awake(&state->debugger_wake_src); + mod_timer(&state->sleep_timer, jiffies + HZ * 5); + spin_unlock_irqrestore(&state->sleep_timer_lock, flags); + } + fiq_debugger_handle_console_irq_context(state); + if (state->debug_busy) { + fiq_debugger_irq_exec(state, state->debug_cmd); + if (!state->console_enable) + fiq_debugger_prompt(state); + state->debug_busy = 0; + } +} + +static int fiq_debugger_getc(struct fiq_debugger_state *state) +{ + return state->pdata->uart_getc(state->pdev); +} + +static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state, + int this_cpu, const struct pt_regs *regs, void *svc_sp) +{ + int c; + static int last_c; + int count = 0; + bool signal_helper = false; + + if (this_cpu != state->current_cpu) { + if (state->in_fiq) + return false; + + if (atomic_inc_return(&state->unhandled_fiq_count) != + MAX_UNHANDLED_FIQ_COUNT) + return false; + + fiq_debugger_printf(&state->output, + "fiq_debugger: cpu %d not responding, " + "reverting to cpu %d\n", state->current_cpu, + this_cpu); + + atomic_set(&state->unhandled_fiq_count, 0); + fiq_debugger_switch_cpu(state, this_cpu); + return false; + } + + state->in_fiq = true; + + while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { + count++; + if (!state->debug_enable) { + if ((c == 13) || (c == 10)) { + state->debug_enable = true; + state->debug_count = 0; + fiq_debugger_prompt(state); + } + } else if (c == FIQ_DEBUGGER_BREAK) { + state->console_enable = false; + fiq_debugger_puts(state, "fiq debugger mode\n"); + state->debug_count = 0; + fiq_debugger_prompt(state); +#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE + } else if (state->console_enable && state->tty_rbuf) { + fiq_debugger_ringbuf_push(state->tty_rbuf, c); + signal_helper = true; +#endif + } else if ((c >= ' ') && (c < 127)) { + if (state->debug_count < (DEBUG_MAX - 1)) { + state->debug_buf[state->debug_count++] = c; + fiq_debugger_putc(state, c); + } + } else if ((c == 8) || (c == 127)) { + if (state->debug_count > 0) { + state->debug_count--; + fiq_debugger_putc(state, 8); + fiq_debugger_putc(state, ' '); + fiq_debugger_putc(state, 8); + } + } else if ((c == 13) || (c == 10)) { + if (c == '\r' || (c == '\n' && last_c != '\r')) { + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, '\n'); + } + if (state->debug_count) { + state->debug_buf[state->debug_count] = 0; + state->debug_count = 0; + signal_helper |= + fiq_debugger_fiq_exec(state, + state->debug_buf, + regs, svc_sp); + } else { + fiq_debugger_prompt(state); + } + } + last_c = c; + } + if (!state->console_enable) + fiq_debugger_uart_flush(state); + if (state->pdata->fiq_ack) + state->pdata->fiq_ack(state->pdev, state->fiq); + + /* poke sleep timer if necessary */ + if (state->debug_enable && !state->no_sleep) + signal_helper = true; + + atomic_set(&state->unhandled_fiq_count, 0); + state->in_fiq = false; + + return signal_helper; +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_fiq(struct fiq_glue_handler *h, + const struct pt_regs *regs, void *svc_sp) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; + bool need_irq; + + need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs, + svc_sp); + if (need_irq) + fiq_debugger_force_irq(state); +} +#endif + +/* + * When not using FIQs, we only use this single interrupt as an entry point. + * This just effectively takes over the UART interrupt and does all the work + * in this context. + */ +static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + bool not_done; + + fiq_debugger_handle_wakeup(state); + + /* handle the debugger irq in regular context */ + not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(), + get_irq_regs(), + current_thread_info()); + if (not_done) + fiq_debugger_handle_irq_context(state); + + return IRQ_HANDLED; +} + +/* + * If FIQs are used, not everything can happen in fiq context. + * FIQ handler does what it can and then signals this interrupt to finish the + * job in irq context. + */ +static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev) +{ + struct fiq_debugger_state *state = dev; + + if (state->pdata->force_irq_ack) + state->pdata->force_irq_ack(state->pdev, state->signal_irq); + + fiq_debugger_handle_irq_context(state); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_FIQ_GLUE +static void fiq_debugger_resume(struct fiq_glue_handler *h) +{ + struct fiq_debugger_state *state = + container_of(h, struct fiq_debugger_state, handler); + if (state->pdata->uart_resume) + state->pdata->uart_resume(state->pdev); +} +#endif + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) +struct tty_driver *fiq_debugger_console_device(struct console *co, int *index) +{ + *index = co->index; + return fiq_tty_driver; +} + +static void fiq_debugger_console_write(struct console *co, + const char *s, unsigned int count) +{ + struct fiq_debugger_state *state; + unsigned long flags; + + state = container_of(co, struct fiq_debugger_state, console); + + if (!state->console_enable && !state->syslog_dumping) + return; + + fiq_debugger_uart_enable(state); + spin_lock_irqsave(&state->console_lock, flags); + while (count--) { + if (*s == '\n') + fiq_debugger_putc(state, '\r'); + fiq_debugger_putc(state, *s++); + } + fiq_debugger_uart_flush(state); + spin_unlock_irqrestore(&state->console_lock, flags); + fiq_debugger_uart_disable(state); +} + +static struct console fiq_debugger_console = { + .name = "ttyFIQ", + .device = fiq_debugger_console_device, + .write = fiq_debugger_console_write, + .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, +}; + +int fiq_tty_open(struct tty_struct *tty, struct file *filp) +{ + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + + return tty_port_open(&state->tty_port, tty, filp); +} + +void fiq_tty_close(struct tty_struct *tty, struct file *filp) +{ + tty_port_close(tty->port, tty, filp); +} + +int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + int i; + int line = tty->index; + struct fiq_debugger_state **states = tty->driver->driver_state; + struct fiq_debugger_state *state = states[line]; + + if (!state->console_enable) + return count; + + fiq_debugger_uart_enable(state); + spin_lock_irq(&state->console_lock); + for (i = 0; i < count; i++) + fiq_debugger_putc(state, *buf++); + spin_unlock_irq(&state->console_lock); + fiq_debugger_uart_disable(state); + + return count; +} + +int fiq_tty_write_room(struct tty_struct *tty) +{ + return 16; +} + +#ifdef CONFIG_CONSOLE_POLL +static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options) +{ + return 0; +} + +static int fiq_tty_poll_get_char(struct tty_driver *driver, int line) +{ + struct fiq_debugger_state **states = driver->driver_state; + struct fiq_debugger_state *state = states[line]; + int c = NO_POLL_CHAR; + + fiq_debugger_uart_enable(state); + if (fiq_debugger_have_fiq(state)) { + int count = fiq_debugger_ringbuf_level(state->tty_rbuf); + if (count > 0) { + c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); + fiq_debugger_ringbuf_consume(state->tty_rbuf, 1); + } + } else { + c = fiq_debugger_getc(state); + if (c == FIQ_DEBUGGER_NO_CHAR) + c = NO_POLL_CHAR; + } + fiq_debugger_uart_disable(state); + + return c; +} + +static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch) +{ + struct fiq_debugger_state **states = driver->driver_state; + struct fiq_debugger_state *state = states[line]; + fiq_debugger_uart_enable(state); + fiq_debugger_putc(state, ch); + fiq_debugger_uart_disable(state); +} +#endif + +static const struct tty_port_operations fiq_tty_port_ops; + +static const struct tty_operations fiq_tty_driver_ops = { + .write = fiq_tty_write, + .write_room = fiq_tty_write_room, + .open = fiq_tty_open, + .close = fiq_tty_close, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = fiq_tty_poll_init, + .poll_get_char = fiq_tty_poll_get_char, + .poll_put_char = fiq_tty_poll_put_char, +#endif +}; + +static int fiq_debugger_tty_init(void) +{ + int ret; + struct fiq_debugger_state **states = NULL; + + states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL); + if (!states) { + pr_err("Failed to allocate fiq debugger state structres\n"); + return -ENOMEM; + } + + fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS); + if (!fiq_tty_driver) { + pr_err("Failed to allocate fiq debugger tty\n"); + ret = -ENOMEM; + goto err_free_state; + } + + fiq_tty_driver->owner = THIS_MODULE; + fiq_tty_driver->driver_name = "fiq-debugger"; + fiq_tty_driver->name = "ttyFIQ"; + fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL; + fiq_tty_driver->init_termios = tty_std_termios; + fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + fiq_tty_driver->driver_state = states; + + fiq_tty_driver->init_termios.c_cflag = + B115200 | CS8 | CREAD | HUPCL | CLOCAL; + fiq_tty_driver->init_termios.c_ispeed = 115200; + fiq_tty_driver->init_termios.c_ospeed = 115200; + + tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops); + + ret = tty_register_driver(fiq_tty_driver); + if (ret) { + pr_err("Failed to register fiq tty: %d\n", ret); + goto err_free_tty; + } + + pr_info("Registered FIQ tty driver\n"); + return 0; + +err_free_tty: + put_tty_driver(fiq_tty_driver); + fiq_tty_driver = NULL; +err_free_state: + kfree(states); + return ret; +} + +static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state) +{ + int ret; + struct device *tty_dev; + struct fiq_debugger_state **states = fiq_tty_driver->driver_state; + + states[state->pdev->id] = state; + + state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); + if (!state->tty_rbuf) { + pr_err("Failed to allocate fiq debugger ringbuf\n"); + ret = -ENOMEM; + goto err; + } + + tty_port_init(&state->tty_port); + state->tty_port.ops = &fiq_tty_port_ops; + + tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver, + state->pdev->id, &state->pdev->dev); + if (IS_ERR(tty_dev)) { + pr_err("Failed to register fiq debugger tty device\n"); + ret = PTR_ERR(tty_dev); + goto err; + } + + device_set_wakeup_capable(tty_dev, 1); + + pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id); + + return 0; + +err: + fiq_debugger_ringbuf_free(state->tty_rbuf); + state->tty_rbuf = NULL; + return ret; +} +#endif + +static int fiq_debugger_dev_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_suspend) + return state->pdata->uart_dev_suspend(pdev); + return 0; +} + +static int fiq_debugger_dev_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fiq_debugger_state *state = platform_get_drvdata(pdev); + + if (state->pdata->uart_dev_resume) + return state->pdata->uart_dev_resume(pdev); + return 0; +} + +static int fiq_debugger_probe(struct platform_device *pdev) +{ + int ret; + struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); + struct fiq_debugger_state *state; + int fiq; + int uart_irq; + + if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS) + return -EINVAL; + + if (!pdata->uart_getc || !pdata->uart_putc) + return -EINVAL; + if ((pdata->uart_enable && !pdata->uart_disable) || + (!pdata->uart_enable && pdata->uart_disable)) + return -EINVAL; + + fiq = platform_get_irq_byname(pdev, "fiq"); + uart_irq = platform_get_irq_byname(pdev, "uart_irq"); + + /* uart_irq mode and fiq mode are mutually exclusive, but one of them + * is required */ + if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0)) + return -EINVAL; + if (fiq >= 0 && !pdata->fiq_enable) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + state->output.printf = fiq_debugger_printf; + setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired, + (unsigned long)state); + state->pdata = pdata; + state->pdev = pdev; + state->no_sleep = initial_no_sleep; + state->debug_enable = initial_debug_enable; + state->console_enable = initial_console_enable; + + state->fiq = fiq; + state->uart_irq = uart_irq; + state->signal_irq = platform_get_irq_byname(pdev, "signal"); + state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); + + INIT_WORK(&state->work, fiq_debugger_work); + spin_lock_init(&state->work_lock); + + platform_set_drvdata(pdev, state); + + spin_lock_init(&state->sleep_timer_lock); + + if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state)) + state->no_sleep = true; + state->ignore_next_wakeup_irq = !state->no_sleep; + + wakeup_source_init(&state->debugger_wake_src, "serial-debug"); + + state->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(state->clk)) + state->clk = NULL; + + /* do not call pdata->uart_enable here since uart_init may still + * need to do some initialization before uart_enable can work. + * So, only try to manage the clock during init. + */ + if (state->clk) + clk_enable(state->clk); + + if (pdata->uart_init) { + ret = pdata->uart_init(pdev); + if (ret) + goto err_uart_init; + } + + fiq_debugger_printf_nfiq(state, + "\n", + state->no_sleep ? "" : "twice "); + +#ifdef CONFIG_FIQ_GLUE + if (fiq_debugger_have_fiq(state)) { + state->handler.fiq = fiq_debugger_fiq; + state->handler.resume = fiq_debugger_resume; + ret = fiq_glue_register_handler(&state->handler); + if (ret) { + pr_err("%s: could not install fiq handler\n", __func__); + goto err_register_irq; + } + + pdata->fiq_enable(pdev, state->fiq, 1); + } else +#endif + { + ret = request_irq(state->uart_irq, fiq_debugger_uart_irq, + IRQF_NO_SUSPEND, "debug", state); + if (ret) { + pr_err("%s: could not install irq handler\n", __func__); + goto err_register_irq; + } + + /* for irq-only mode, we want this irq to wake us up, if it + * can. + */ + enable_irq_wake(state->uart_irq); + } + + if (state->clk) + clk_disable(state->clk); + + if (state->signal_irq >= 0) { + ret = request_irq(state->signal_irq, fiq_debugger_signal_irq, + IRQF_TRIGGER_RISING, "debug-signal", state); + if (ret) + pr_err("serial_debugger: could not install signal_irq"); + } + + if (state->wakeup_irq >= 0) { + ret = request_irq(state->wakeup_irq, + fiq_debugger_wakeup_irq_handler, + IRQF_TRIGGER_FALLING, + "debug-wakeup", state); + if (ret) { + pr_err("serial_debugger: " + "could not install wakeup irq\n"); + state->wakeup_irq = -1; + } else { + ret = enable_irq_wake(state->wakeup_irq); + if (ret) { + pr_err("serial_debugger: " + "could not enable wakeup\n"); + state->wakeup_irq_no_set_wake = true; + } + } + } + if (state->no_sleep) + fiq_debugger_handle_wakeup(state); + +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + spin_lock_init(&state->console_lock); + state->console = fiq_debugger_console; + state->console.index = pdev->id; + if (!console_set_on_cmdline) + add_preferred_console(state->console.name, + state->console.index, NULL); + register_console(&state->console); + fiq_debugger_tty_init_one(state); +#endif + return 0; + +err_register_irq: + if (pdata->uart_free) + pdata->uart_free(pdev); +err_uart_init: + if (state->clk) + clk_disable(state->clk); + if (state->clk) + clk_put(state->clk); + wakeup_source_trash(&state->debugger_wake_src); + platform_set_drvdata(pdev, NULL); + kfree(state); + return ret; +} + +static const struct dev_pm_ops fiq_debugger_dev_pm_ops = { + .suspend = fiq_debugger_dev_suspend, + .resume = fiq_debugger_dev_resume, +}; + +static struct platform_driver fiq_debugger_driver = { + .probe = fiq_debugger_probe, + .driver = { + .name = "fiq_debugger", + .pm = &fiq_debugger_dev_pm_ops, + }, +}; + +#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY) +int fiq_debugger_uart_overlay(void) +{ + struct device_node *onp = of_find_node_by_path("/uart_overlay@0"); + int ret; + + if (!onp) { + pr_err("serial_debugger: uart overlay not found\n"); + return -ENODEV; + } + + ret = of_overlay_create(onp); + if (ret < 0) { + pr_err("serial_debugger: fail to create overlay: %d\n", ret); + of_node_put(onp); + return ret; + } + + pr_info("serial_debugger: uart overlay applied\n"); + return 0; +} +#endif + +static int __init fiq_debugger_init(void) +{ + if (fiq_debugger_disable) { + pr_err("serial_debugger: disabled\n"); + return -ENODEV; + } +#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) + fiq_debugger_tty_init(); +#endif +#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY) + fiq_debugger_uart_overlay(); +#endif + return platform_driver_register(&fiq_debugger_driver); +} + +postcore_initcall(fiq_debugger_init); diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h new file mode 100644 index 000000000000..c9ec4f8db086 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.h @@ -0,0 +1,64 @@ +/* + * drivers/staging/android/fiq_debugger/fiq_debugger.h + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ +#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_ + +#include + +#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR +#define FIQ_DEBUGGER_BREAK 0x00ff0100 + +#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq" +#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal" +#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup" + +/** + * struct fiq_debugger_pdata - fiq debugger platform data + * @uart_resume: used to restore uart state right before enabling + * the fiq. + * @uart_enable: Do the work necessary to communicate with the uart + * hw (enable clocks, etc.). This must be ref-counted. + * @uart_disable: Do the work necessary to disable the uart hw + * (disable clocks, etc.). This must be ref-counted. + * @uart_dev_suspend: called during PM suspend, generally not needed + * for real fiq mode debugger. + * @uart_dev_resume: called during PM resume, generally not needed + * for real fiq mode debugger. + */ +struct fiq_debugger_pdata { + int (*uart_init)(struct platform_device *pdev); + void (*uart_free)(struct platform_device *pdev); + int (*uart_resume)(struct platform_device *pdev); + int (*uart_getc)(struct platform_device *pdev); + void (*uart_putc)(struct platform_device *pdev, unsigned int c); + void (*uart_flush)(struct platform_device *pdev); + void (*uart_enable)(struct platform_device *pdev); + void (*uart_disable)(struct platform_device *pdev); + + int (*uart_dev_suspend)(struct platform_device *pdev); + int (*uart_dev_resume)(struct platform_device *pdev); + + void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq, + bool enable); + void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq); + + void (*force_irq)(struct platform_device *pdev, unsigned int irq); + void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq); +}; + +#endif diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c new file mode 100644 index 000000000000..8b3e0137be1a --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include + +#include "fiq_debugger_priv.h" + +static char *mode_name(unsigned cpsr) +{ + switch (cpsr & MODE_MASK) { + case USR_MODE: return "USR"; + case FIQ_MODE: return "FIQ"; + case IRQ_MODE: return "IRQ"; + case SVC_MODE: return "SVC"; + case ABT_MODE: return "ABT"; + case UND_MODE: return "UND"; + case SYSTEM_MODE: return "SYS"; + default: return "???"; + } +} + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " pc %08x cpsr %08x mode %s\n", + regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr)); +} + +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, + " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); + output->printf(output, + " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7); + output->printf(output, + " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", + regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp, + mode_name(regs->ARM_cpsr)); + output->printf(output, + " ip %08x sp %08x lr %08x pc %08x cpsr %08x\n", + regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc, + regs->ARM_cpsr); +} + +struct mode_regs { + unsigned long sp_svc; + unsigned long lr_svc; + unsigned long spsr_svc; + + unsigned long sp_abt; + unsigned long lr_abt; + unsigned long spsr_abt; + + unsigned long sp_und; + unsigned long lr_und; + unsigned long spsr_und; + + unsigned long sp_irq; + unsigned long lr_irq; + unsigned long spsr_irq; + + unsigned long r8_fiq; + unsigned long r9_fiq; + unsigned long r10_fiq; + unsigned long r11_fiq; + unsigned long r12_fiq; + unsigned long sp_fiq; + unsigned long lr_fiq; + unsigned long spsr_fiq; +}; + +static void __naked get_mode_regs(struct mode_regs *regs) +{ + asm volatile ( + "mrs r1, cpsr\n" + "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r13 - r14}\n" + "mrs r2, spsr\n" + "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" + "stmia r0!, {r2, r8 - r14}\n" + "mrs r2, spsr\n" + "stmia r0!, {r2}\n" + "msr cpsr_c, r1\n" + "bx lr\n"); +} + + +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + struct mode_regs mode_regs; + unsigned long mode = regs->ARM_cpsr & MODE_MASK; + + fiq_debugger_dump_regs(output, regs); + get_mode_regs(&mode_regs); + + output->printf(output, + "%csvc: sp %08x lr %08x spsr %08x\n", + mode == SVC_MODE ? '*' : ' ', + mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); + output->printf(output, + "%cabt: sp %08x lr %08x spsr %08x\n", + mode == ABT_MODE ? '*' : ' ', + mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); + output->printf(output, + "%cund: sp %08x lr %08x spsr %08x\n", + mode == UND_MODE ? '*' : ' ', + mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); + output->printf(output, + "%cirq: sp %08x lr %08x spsr %08x\n", + mode == IRQ_MODE ? '*' : ' ', + mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); + output->printf(output, + "%cfiq: r8 %08x r9 %08x r10 %08x r11 %08x r12 %08x\n", + mode == FIQ_MODE ? '*' : ' ', + mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, + mode_regs.r11_fiq, mode_regs.r12_fiq); + output->printf(output, + " fiq: sp %08x lr %08x spsr %08x\n", + mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); +} + +struct stacktrace_state { + struct fiq_debugger_output *output; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + sts->output->printf(sts->output, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + frame->pc, frame->pc, frame->lr, frame->lr, + frame->sp, frame->fp); + sts->depth--; + return 0; + } + sts->output->printf(sts->output, " ...\n"); + + return sts->depth == 0; +} + +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +static struct frame_tail *user_backtrace(struct fiq_debugger_output *output, + struct frame_tail *tail) +{ + struct frame_tail buftail[2]; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { + output->printf(output, " invalid frame pointer %p\n", + tail); + return NULL; + } + if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { + output->printf(output, + " failed to copy frame pointer %p\n", tail); + return NULL; + } + + output->printf(output, " %p\n", buftail[0].lr); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (tail >= buftail[0].fp) + return NULL; + + return buftail[0].fp-1; +} + +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp) +{ + struct frame_tail *tail; + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.output = output; + *current_thread_info() = *real_thread_info; + + if (!current) + output->printf(output, "current NULL\n"); + else + output->printf(output, "pid: %d comm: %s\n", + current->pid, current->comm); + fiq_debugger_dump_regs(output, regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + output->printf(output, + " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", + regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, + regs->ARM_sp, regs->ARM_fp); + walk_stackframe(&frame, report_trace, &sts); + return; + } + + tail = ((struct frame_tail *) regs->ARM_fp) - 1; + while (depth-- && tail && !((unsigned long) tail & 3)) + tail = user_backtrace(output, tail); +} diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c new file mode 100644 index 000000000000..c53f4980bab9 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c @@ -0,0 +1,201 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include "fiq_debugger_priv.h" + +static char *mode_name(const struct pt_regs *regs) +{ + if (compat_user_mode(regs)) { + return "USR"; + } else { + switch (processor_mode(regs)) { + case PSR_MODE_EL0t: return "EL0t"; + case PSR_MODE_EL1t: return "EL1t"; + case PSR_MODE_EL1h: return "EL1h"; + case PSR_MODE_EL2t: return "EL2t"; + case PSR_MODE_EL2h: return "EL2h"; + default: return "???"; + } + } +} + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " pc %016lx cpsr %08lx mode %s\n", + regs->pc, regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + output->printf(output, " r0 %08x r1 %08x r2 %08x r3 %08x\n", + regs->compat_usr(0), regs->compat_usr(1), + regs->compat_usr(2), regs->compat_usr(3)); + output->printf(output, " r4 %08x r5 %08x r6 %08x r7 %08x\n", + regs->compat_usr(4), regs->compat_usr(5), + regs->compat_usr(6), regs->compat_usr(7)); + output->printf(output, " r8 %08x r9 %08x r10 %08x r11 %08x\n", + regs->compat_usr(8), regs->compat_usr(9), + regs->compat_usr(10), regs->compat_usr(11)); + output->printf(output, " ip %08x sp %08x lr %08x pc %08x\n", + regs->compat_usr(12), regs->compat_sp, + regs->compat_lr, regs->pc); + output->printf(output, " cpsr %08x (%s)\n", + regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + + output->printf(output, " x0 %016lx x1 %016lx\n", + regs->regs[0], regs->regs[1]); + output->printf(output, " x2 %016lx x3 %016lx\n", + regs->regs[2], regs->regs[3]); + output->printf(output, " x4 %016lx x5 %016lx\n", + regs->regs[4], regs->regs[5]); + output->printf(output, " x6 %016lx x7 %016lx\n", + regs->regs[6], regs->regs[7]); + output->printf(output, " x8 %016lx x9 %016lx\n", + regs->regs[8], regs->regs[9]); + output->printf(output, " x10 %016lx x11 %016lx\n", + regs->regs[10], regs->regs[11]); + output->printf(output, " x12 %016lx x13 %016lx\n", + regs->regs[12], regs->regs[13]); + output->printf(output, " x14 %016lx x15 %016lx\n", + regs->regs[14], regs->regs[15]); + output->printf(output, " x16 %016lx x17 %016lx\n", + regs->regs[16], regs->regs[17]); + output->printf(output, " x18 %016lx x19 %016lx\n", + regs->regs[18], regs->regs[19]); + output->printf(output, " x20 %016lx x21 %016lx\n", + regs->regs[20], regs->regs[21]); + output->printf(output, " x22 %016lx x23 %016lx\n", + regs->regs[22], regs->regs[23]); + output->printf(output, " x24 %016lx x25 %016lx\n", + regs->regs[24], regs->regs[25]); + output->printf(output, " x26 %016lx x27 %016lx\n", + regs->regs[26], regs->regs[27]); + output->printf(output, " x28 %016lx x29 %016lx\n", + regs->regs[28], regs->regs[29]); + output->printf(output, " x30 %016lx sp %016lx\n", + regs->regs[30], regs->sp); + output->printf(output, " pc %016lx cpsr %08x (%s)\n", + regs->pc, regs->pstate, mode_name(regs)); +} + +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + if (compat_user_mode(regs)) + fiq_debugger_dump_regs_aarch32(output, regs); + else + fiq_debugger_dump_regs_aarch64(output, regs); +} + +#define READ_SPECIAL_REG(x) ({ \ + u64 val; \ + asm volatile ("mrs %0, " # x : "=r"(val)); \ + val; \ +}) + +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs) +{ + u32 pstate = READ_SPECIAL_REG(CurrentEl); + bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t; + + fiq_debugger_dump_regs(output, regs); + + output->printf(output, " sp_el0 %016lx\n", + READ_SPECIAL_REG(sp_el0)); + + if (in_el2) + output->printf(output, " sp_el1 %016lx\n", + READ_SPECIAL_REG(sp_el1)); + + output->printf(output, " elr_el1 %016lx\n", + READ_SPECIAL_REG(elr_el1)); + + output->printf(output, " spsr_el1 %08lx\n", + READ_SPECIAL_REG(spsr_el1)); + + if (in_el2) { + output->printf(output, " spsr_irq %08lx\n", + READ_SPECIAL_REG(spsr_irq)); + output->printf(output, " spsr_abt %08lx\n", + READ_SPECIAL_REG(spsr_abt)); + output->printf(output, " spsr_und %08lx\n", + READ_SPECIAL_REG(spsr_und)); + output->printf(output, " spsr_fiq %08lx\n", + READ_SPECIAL_REG(spsr_fiq)); + output->printf(output, " spsr_el2 %08lx\n", + READ_SPECIAL_REG(elr_el2)); + output->printf(output, " spsr_el2 %08lx\n", + READ_SPECIAL_REG(spsr_el2)); + } +} + +struct stacktrace_state { + struct fiq_debugger_output *output; + unsigned int depth; +}; + +static int report_trace(struct stackframe *frame, void *d) +{ + struct stacktrace_state *sts = d; + + if (sts->depth) { + sts->output->printf(sts->output, "%pF:\n", frame->pc); + sts->output->printf(sts->output, + " pc %016lx fp %016lx\n", + frame->pc, frame->fp); + sts->depth--; + return 0; + } + sts->output->printf(sts->output, " ...\n"); + + return sts->depth == 0; +} + +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp) +{ + struct thread_info *real_thread_info = THREAD_INFO(ssp); + struct stacktrace_state sts; + + sts.depth = depth; + sts.output = output; + *current_thread_info() = *real_thread_info; + + if (!current) + output->printf(output, "current NULL\n"); + else + output->printf(output, "pid: %d comm: %s\n", + current->pid, current->comm); + fiq_debugger_dump_regs(output, regs); + + if (!user_mode(regs)) { + struct stackframe frame; + frame.fp = regs->regs[29]; + frame.pc = regs->pc; + output->printf(output, "\n"); + walk_stackframe(current, &frame, report_trace, &sts); + } +} diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h new file mode 100644 index 000000000000..d5d051f727a8 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Colin Cross + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _FIQ_DEBUGGER_PRIV_H_ +#define _FIQ_DEBUGGER_PRIV_H_ + +#define THREAD_INFO(sp) ((struct thread_info *) \ + ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) + +struct fiq_debugger_output { + void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...); +}; + +struct pt_regs; + +void fiq_debugger_dump_pc(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_regs(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, + const struct pt_regs *regs); +void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, + const struct pt_regs *regs, unsigned int depth, void *ssp); + +#endif diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h new file mode 100644 index 000000000000..10c3c5d09098 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h @@ -0,0 +1,94 @@ +/* + * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h + * + * simple lockless ringbuffer + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +struct fiq_debugger_ringbuf { + int len; + int head; + int tail; + u8 buf[]; +}; + + +static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) +{ + struct fiq_debugger_ringbuf *rbuf; + + rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); + if (rbuf == NULL) + return NULL; + + rbuf->len = len; + rbuf->head = 0; + rbuf->tail = 0; + smp_mb(); + + return rbuf; +} + +static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) +{ + kfree(rbuf); +} + +static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) +{ + int level = rbuf->head - rbuf->tail; + + if (level < 0) + level = rbuf->len + level; + + return level; +} + +static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) +{ + return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; +} + +static inline u8 +fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) +{ + return rbuf->buf[(rbuf->tail + i) % rbuf->len]; +} + +static inline int +fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) +{ + count = min(count, fiq_debugger_ringbuf_level(rbuf)); + + rbuf->tail = (rbuf->tail + count) % rbuf->len; + smp_mb(); + + return count; +} + +static inline int +fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) +{ + if (fiq_debugger_ringbuf_room(rbuf) == 0) + return 0; + + rbuf->buf[rbuf->head] = datum; + smp_mb(); + rbuf->head = (rbuf->head + 1) % rbuf->len; + smp_mb(); + + return 1; +} diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c new file mode 100644 index 000000000000..194b54138417 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "fiq_watchdog.h" +#include "fiq_debugger_priv.h" + +static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock); + +static void fiq_watchdog_printf(struct fiq_debugger_output *output, + const char *fmt, ...) +{ + char buf[256]; + va_list ap; + int len; + + va_start(ap, fmt); + len = vscnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + ramoops_console_write_buf(buf, len); +} + +struct fiq_debugger_output fiq_watchdog_output = { + .printf = fiq_watchdog_printf, +}; + +void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp) +{ + char msg[24]; + int len; + + raw_spin_lock(&fiq_watchdog_lock); + + len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n", + THREAD_INFO(svc_sp)->cpu); + ramoops_console_write_buf(msg, len); + + fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp); + + raw_spin_unlock(&fiq_watchdog_lock); +} diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h new file mode 100644 index 000000000000..c6b507f8d976 --- /dev/null +++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _FIQ_WATCHDOG_H_ +#define _FIQ_WATCHDOG_H_ + +void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp); + +#endif diff --git a/drivers/staging/android/fwdata.c b/drivers/staging/android/fwdata.c new file mode 100644 index 000000000000..525f7e92ec84 --- /dev/null +++ b/drivers/staging/android/fwdata.c @@ -0,0 +1,402 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +struct android_fwdata_state { + struct device *dev; + struct kobject *properties_kobj; + struct kobject *android_kobj; + struct kobject *vbmeta_kobj; + struct kobject *fstab_kobj; + struct kobject *system_kobj; + struct kobject *vendor_kobj; + struct kobject *product_kobj; + struct kobject *odm_kobj; +}; + +static struct android_fwdata_state state; + +/* Called when /properties// is read. */ +static ssize_t property_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + const char *prefix = NULL; + char key[128]; + const char *value = NULL; + int ret; + + /* It would be much more convenient if show() gave us the relative path + * to the file being read, e.g. properties/android/fstab/system/dev, + * which could be easily converted to a property key. + * TODO: Infer the relative path from kobj and remove all hard-coded + * property keys. + */ + if (kobj == state.android_kobj) { + prefix = "android"; + } else if (kobj == state.vbmeta_kobj) { + prefix = "android.vbmeta"; + } else if (kobj == state.fstab_kobj) { + prefix = "android.fstab"; + } else if (kobj == state.system_kobj) { + prefix = "android.fstab.system"; + } else if (kobj == state.vendor_kobj) { + prefix = "android.fstab.vendor"; + } else if (kobj == state.product_kobj) { + prefix = "android.fstab.product"; + } else if (kobj == state.odm_kobj) { + prefix = "android.fstab.odm"; + } else { + pr_err("%s: Unexpected folder\n", __func__); + return -EINVAL; + } + /* We don't put any file in properties/ directly, so prefix can't be + * empty. + */ + snprintf(key, sizeof(key), "%s.%s", prefix, attr->attr.name); + + ret = device_property_read_string(state.dev, key, &value); + if (ret) { + pr_err("%s: Failed to read property '%s', ret=%d\n", __func__, + key, ret); + return ret; + } + return scnprintf(buf, PAGE_SIZE, "%s\n", value); +} + +#define DT_SIMPLE_ATTR(_prefix, _name) \ + struct kobj_attribute _prefix##_##_name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0444, \ + }, \ + .show = property_show, \ + } + +static DT_SIMPLE_ATTR(android, compatible); +static DT_SIMPLE_ATTR(vbmeta, compatible); +static DT_SIMPLE_ATTR(fstab, compatible); +static DT_SIMPLE_ATTR(system, compatible); +static DT_SIMPLE_ATTR(vendor, compatible); +static DT_SIMPLE_ATTR(product, compatible); +static DT_SIMPLE_ATTR(odm, compatible); + +static DT_SIMPLE_ATTR(vbmeta, parts); + +static struct attribute *vbmeta_attrs[] = { + &vbmeta_compatible_attr.attr, + &vbmeta_parts_attr.attr, + NULL, +}; + +static struct attribute_group vbmeta_group = { + .attrs = vbmeta_attrs, +}; + +static DT_SIMPLE_ATTR(system, dev); +static DT_SIMPLE_ATTR(system, type); +static DT_SIMPLE_ATTR(system, mnt_flags); +static DT_SIMPLE_ATTR(system, fsmgr_flags); + +static DT_SIMPLE_ATTR(vendor, dev); +static DT_SIMPLE_ATTR(vendor, type); +static DT_SIMPLE_ATTR(vendor, mnt_flags); +static DT_SIMPLE_ATTR(vendor, fsmgr_flags); + +static DT_SIMPLE_ATTR(product, dev); +static DT_SIMPLE_ATTR(product, type); +static DT_SIMPLE_ATTR(product, mnt_flags); +static DT_SIMPLE_ATTR(product, fsmgr_flags); + +static DT_SIMPLE_ATTR(odm, dev); +static DT_SIMPLE_ATTR(odm, type); +static DT_SIMPLE_ATTR(odm, mnt_flags); +static DT_SIMPLE_ATTR(odm, fsmgr_flags); + +static struct attribute *system_attrs[] = { + &system_compatible_attr.attr, + &system_dev_attr.attr, + &system_type_attr.attr, + &system_mnt_flags_attr.attr, + &system_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group system_group = { + .attrs = system_attrs, +}; + +static struct attribute *vendor_attrs[] = { + &vendor_compatible_attr.attr, + &vendor_dev_attr.attr, + &vendor_type_attr.attr, + &vendor_mnt_flags_attr.attr, + &vendor_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group vendor_group = { + .attrs = vendor_attrs, +}; + +static struct attribute *product_attrs[] = { + &product_compatible_attr.attr, + &product_dev_attr.attr, + &product_type_attr.attr, + &product_mnt_flags_attr.attr, + &product_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group product_group = { + .attrs = product_attrs, +}; + +static struct attribute *odm_attrs[] = { + &odm_compatible_attr.attr, + &odm_dev_attr.attr, + &odm_type_attr.attr, + &odm_mnt_flags_attr.attr, + &odm_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group odm_group = { + .attrs = odm_attrs, +}; + +static struct kobject *create_folder(struct kobject *parent, const char *name) +{ + struct kobject *kobj; + + kobj = kobject_create_and_add(name, parent); + if (!kobj) { + pr_err("%s: Failed to create %s/\n", __func__, name); + return NULL; + } + return kobj; +} + +static struct kobject *create_folder_with_file(struct kobject *parent, + const char *name, + struct kobj_attribute *attr) +{ + struct kobject *kobj; + + kobj = create_folder(parent, name); + if (kobj) { + /* Note: Usually drivers should use device_create_file() rather + * than sysfs_create_file(), but the former does not support + * creating the file in a subfolder. + */ + int ret; + + ret = sysfs_create_file(kobj, &attr->attr); + if (ret) { + pr_err("%s: Failed to create %s/%s: ret=%d\n", __func__, + name, attr->attr.name, ret); + kobject_put(kobj); + return NULL; + } + } + return kobj; +} + +static void remove_folder_with_file(struct kobject *kobj, + struct kobj_attribute *attr) +{ + sysfs_remove_file(kobj, &attr->attr); + kobject_put(kobj); +} + +static struct kobject *create_folder_with_files(struct kobject *parent, + const char *name, + struct attribute_group *group) +{ + struct kobject *kobj; + + kobj = create_folder(parent, name); + if (kobj) { + /* Note: Usually drivers should use device_add_groups() rather + * than sysfs_create_group(), but the former does not support + * creating the folder in a subfolder. + */ + int ret; + + ret = sysfs_create_group(kobj, group); + if (ret) { + pr_err("%s: Failed to create %s/*: ret=%d\n", __func__, + name, ret); + kobject_put(kobj); + return NULL; + } + } + return kobj; +} + +static void remove_folder_with_files(struct kobject *kobj, + struct attribute_group *group) +{ + sysfs_remove_group(kobj, group); + kobject_put(kobj); +} + +static void clean_up(void) +{ + if (state.vendor_kobj) { + /* Delete /properties/android/fstab/vendor/ */ + remove_folder_with_files(state.vendor_kobj, &vendor_group); + state.vendor_kobj = NULL; + } + if (state.product_kobj) { + /* Delete /properties/android/fstab/product/ */ + remove_folder_with_files(state.product_kobj, &product_group); + state.product_kobj = NULL; + } + if (state.odm_kobj) { + /* Delete /properties/android/fstab/odm/ */ + remove_folder_with_files(state.odm_kobj, &odm_group); + state.odm_kobj = NULL; + } + if (state.system_kobj) { + /* Delete /properties/android/fstab/system/ */ + remove_folder_with_files(state.system_kobj, &system_group); + state.system_kobj = NULL; + } + if (state.fstab_kobj) { + /* Delete /properties/android/fstab/ */ + remove_folder_with_file(state.fstab_kobj, + &fstab_compatible_attr); + state.fstab_kobj = NULL; + } + if (state.vbmeta_kobj) { + /* Delete /properties/android/vbmeta/ */ + remove_folder_with_files(state.vbmeta_kobj, &vbmeta_group); + state.vbmeta_kobj = NULL; + } + if (state.android_kobj) { + /* Delete /properties/android/ */ + remove_folder_with_file(state.android_kobj, + &android_compatible_attr); + state.android_kobj = NULL; + } + if (state.properties_kobj) { + /* Delete /properties/ */ + kobject_put(state.properties_kobj); + state.properties_kobj = NULL; + } +} + +static int android_fwdata_probe(struct platform_device *pdev) +{ + int ret = -EIO; + + state.dev = &pdev->dev; + /* Create /properties/ */ + state.properties_kobj = create_folder(&state.dev->kobj, "properties"); + if (!state.properties_kobj) + goto out; + + /* TODO: Iterate over all device properties in firmware, and dynamically + * create sysfs nodes under /properties/ + */ + + /* Create /properties/android/compatible */ + state.android_kobj = create_folder_with_file(state.properties_kobj, + "android", + &android_compatible_attr); + if (!state.android_kobj) + goto out; + + if (device_property_present(state.dev, "android.vbmeta.compatible")) { + /* Firmware contains vbmeta config for AVB 2.0 */ + state.vbmeta_kobj = create_folder_with_files(state.android_kobj, + "vbmeta", + &vbmeta_group); + if (!state.vbmeta_kobj) + goto out; + } + + /* Create /properties/android/fstab/compatible */ + state.fstab_kobj = create_folder_with_file(state.android_kobj, "fstab", + &fstab_compatible_attr); + if (!state.fstab_kobj) + goto out; + + if (device_property_present(state.dev, "android.fstab.system.dev")) { + /* Firmware contains fstab config for early mount of /system */ + state.system_kobj = create_folder_with_files(state.fstab_kobj, + "system", + &system_group); + if (!state.system_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.vendor.dev")) { + /* Firmware contains fstab config for early mount of /vendor */ + state.vendor_kobj = create_folder_with_files(state.fstab_kobj, + "vendor", + &vendor_group); + if (!state.vendor_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.product.dev")) { + /* Firmware contains fstab config for early mount of /product */ + state.product_kobj = create_folder_with_files(state.fstab_kobj, + "product", + &product_group); + if (!state.product_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.odm.dev")) { + /* Firmware contains fstab config for early mount of /odm */ + state.odm_kobj = create_folder_with_files(state.fstab_kobj, + "odm", + &odm_group); + if (!state.odm_kobj) + goto out; + } + return 0; + +out: + clean_up(); + return ret; +} + +static int android_fwdata_remove(struct platform_device *pdev) +{ + clean_up(); + return 0; +} + +static const struct acpi_device_id android_fwdata_acpi_match[] = { + { "ANDR0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, android_fwdata_acpi_match); + +static struct platform_driver android_fwdata_driver = { + .probe = android_fwdata_probe, + .remove = android_fwdata_remove, + .driver = { + .name = "android_fwdata", + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(android_fwdata_acpi_match), + } +}; + +module_platform_driver(android_fwdata_driver); diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index a517b2d29f1b..8f6494158d3d 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -37,7 +37,7 @@ config ION_CHUNK_HEAP config ION_CMA_HEAP bool "Ion CMA heap support" - depends on ION && CMA + depends on ION && DMA_CMA help Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c index d9f8b1424da1..021a956db1a8 100644 --- a/drivers/staging/android/ion/ion-ioctl.c +++ b/drivers/staging/android/ion/ion-ioctl.c @@ -71,8 +71,10 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; ret = validate_ioctl_arg(cmd, &data); - if (WARN_ON_ONCE(ret)) + if (ret) { + pr_warn_once("%s: ioctl validate failed\n", __func__); return ret; + } if (!(dir & _IOC_WRITE)) memset(&data, 0, sizeof(data)); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 93e2c90fa77d..24cb666c9224 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -131,8 +131,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, void ion_buffer_destroy(struct ion_buffer *buffer) { - if (WARN_ON(buffer->kmap_cnt > 0)) + if (buffer->kmap_cnt > 0) { + pr_warn_once("%s: buffer still mapped in the kernel\n", + __func__); buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + } buffer->heap->ops->free(buffer); kfree(buffer); } @@ -348,7 +351,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); @@ -370,7 +373,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index dd5545d9990a..fa3e4b7e0c9f 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "ion.h" @@ -39,12 +40,34 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, struct ion_cma_heap *cma_heap = to_cma_heap(heap); struct sg_table *table; struct page *pages; + unsigned long size = PAGE_ALIGN(len); + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long align = get_order(size); int ret; - pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL); if (!pages) return -ENOMEM; + if (PageHighMem(pages)) { + unsigned long nr_clear_pages = nr_pages; + struct page *page = pages; + + while (nr_clear_pages > 0) { + void *vaddr = kmap_atomic(page); + + memset(vaddr, 0, PAGE_SIZE); + kunmap_atomic(vaddr); + page++; + nr_clear_pages--; + } + } else { + memset(page_address(pages), 0, size); + } + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) goto err; @@ -53,7 +76,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, if (ret) goto free_mem; - sg_set_page(table->sgl, pages, len, 0); + sg_set_page(table->sgl, pages, size, 0); buffer->priv_virt = pages; buffer->sg_table = table; @@ -62,7 +85,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, free_mem: kfree(table); err: - cma_release(cma_heap->cma, pages, buffer->size); + cma_release(cma_heap->cma, pages, nr_pages); return -ENOMEM; } @@ -70,9 +93,10 @@ static void ion_cma_free(struct ion_buffer *buffer) { struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); struct page *pages = buffer->priv_virt; + unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; /* release memory */ - cma_release(cma_heap->cma, pages, buffer->size); + cma_release(cma_heap->cma, pages, nr_pages); /* release sg table */ sg_free_table(buffer->sg_table); kfree(buffer->sg_table); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 91faa7f035b9..babbd94c32d9 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap, struct page **tmp = pages; if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); if (buffer->flags & ION_FLAG_CACHED) pgprot = PAGE_KERNEL; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 4dc5d7a589c2..b6ece18e6a88 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -371,7 +371,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, unsigned long i; int ret; - page = alloc_pages(low_order_gfp_flags, order); + page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order); if (!page) return -ENOMEM; diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c new file mode 100644 index 000000000000..999653dc2356 --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.c @@ -0,0 +1,304 @@ +/* drivers/misc/lowmemorykiller.c + * + * The lowmemorykiller driver lets user-space specify a set of memory thresholds + * where processes with a range of oom_score_adj values will get killed. Specify + * the minimum oom_score_adj values in + * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in + * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma + * separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill + * processes with a oom_score_adj value of 8 or higher when the free memory + * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or + * higher when the free memory drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate + * and processes may not get killed until the normal oom killer is triggered. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace/lowmemorykiller.h" + +static u32 lowmem_debug_level = 1; +static short lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; + +static int lowmem_adj_size = 4; +static int lowmem_minfree[6] = { + 3 * 512, /* 6MB */ + 2 * 1024, /* 8MB */ + 4 * 1024, /* 16MB */ + 16 * 1024, /* 64MB */ +}; + +static int lowmem_minfree_size = 4; + +static unsigned long lowmem_deathpending_timeout; + +#define lowmem_print(level, x...) \ + do { \ + if (lowmem_debug_level >= (level)) \ + pr_info(x); \ + } while (0) + +static unsigned long lowmem_count(struct shrinker *s, + struct shrink_control *sc) +{ + return global_node_page_state(NR_ACTIVE_ANON) + + global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_ANON) + + global_node_page_state(NR_INACTIVE_FILE); +} + +static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) +{ + struct task_struct *tsk; + struct task_struct *selected = NULL; + unsigned long rem = 0; + int tasksize; + int i; + short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int minfree = 0; + int selected_tasksize = 0; + short selected_oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; + int other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + global_node_page_state(NR_UNEVICTABLE) - + total_swapcache_pages(); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for (i = 0; i < array_size; i++) { + minfree = lowmem_minfree[i]; + if (other_free < minfree && other_file < minfree) { + min_score_adj = lowmem_adj[i]; + break; + } + } + + lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n", + sc->nr_to_scan, sc->gfp_mask, other_free, + other_file, min_score_adj); + + if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) { + lowmem_print(5, "lowmem_scan %lu, %x, return 0\n", + sc->nr_to_scan, sc->gfp_mask); + return 0; + } + + selected_oom_score_adj = min_score_adj; + + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; + short oom_score_adj; + + if (tsk->flags & PF_KTHREAD) + continue; + + p = find_lock_task_mm(tsk); + if (!p) + continue; + + if (task_lmk_waiting(p) && + time_before_eq(jiffies, lowmem_deathpending_timeout)) { + task_unlock(p); + rcu_read_unlock(); + return 0; + } + oom_score_adj = p->signal->oom_score_adj; + if (oom_score_adj < min_score_adj) { + task_unlock(p); + continue; + } + tasksize = get_mm_rss(p->mm); + task_unlock(p); + if (tasksize <= 0) + continue; + if (selected) { + if (oom_score_adj < selected_oom_score_adj) + continue; + if (oom_score_adj == selected_oom_score_adj && + tasksize <= selected_tasksize) + continue; + } + selected = p; + selected_tasksize = tasksize; + selected_oom_score_adj = oom_score_adj; + lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n", + p->comm, p->pid, oom_score_adj, tasksize); + } + if (selected) { + long cache_size = other_file * (long)(PAGE_SIZE / 1024); + long cache_limit = minfree * (long)(PAGE_SIZE / 1024); + long free = other_free * (long)(PAGE_SIZE / 1024); + + task_lock(selected); + send_sig(SIGKILL, selected, 0); + if (selected->mm) + task_set_lmk_waiting(selected); + task_unlock(selected); + trace_lowmemory_kill(selected, cache_size, cache_limit, free); + lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n" + " to free %ldkB on behalf of '%s' (%d) because\n" + " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" + " Free memory is %ldkB above reserved\n", + selected->comm, selected->pid, selected->tgid, + selected_oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + cache_size, cache_limit, + min_score_adj, + free); + lowmem_deathpending_timeout = jiffies + HZ; + rem += selected_tasksize; + } + + lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", + sc->nr_to_scan, sc->gfp_mask, rem); + rcu_read_unlock(); + return rem; +} + +static struct shrinker lowmem_shrinker = { + .scan_objects = lowmem_scan, + .count_objects = lowmem_count, + .seeks = DEFAULT_SEEKS * 16 +}; + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + return 0; +} +device_initcall(lowmem_init); + +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +static short lowmem_oom_adj_to_oom_score_adj(short oom_adj) +{ + if (oom_adj == OOM_ADJUST_MAX) + return OOM_SCORE_ADJ_MAX; + else + return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; +} + +static void lowmem_autodetect_oom_adj_values(void) +{ + int i; + short oom_adj; + short oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + + if (array_size <= 0) + return; + + oom_adj = lowmem_adj[array_size - 1]; + if (oom_adj > OOM_ADJUST_MAX) + return; + + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + if (oom_score_adj <= OOM_ADJUST_MAX) + return; + + lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n"); + for (i = 0; i < array_size; i++) { + oom_adj = lowmem_adj[i]; + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + lowmem_adj[i] = oom_score_adj; + lowmem_print(1, "oom_adj %d => oom_score_adj %d\n", + oom_adj, oom_score_adj); + } +} + +static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp) +{ + int ret; + + ret = param_array_ops.set(val, kp); + + /* HACK: Autodetect oom_adj values in lowmem_adj array */ + lowmem_autodetect_oom_adj_values(); + + return ret; +} + +static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp) +{ + return param_array_ops.get(buffer, kp); +} + +static void lowmem_adj_array_free(void *arg) +{ + param_array_ops.free(arg); +} + +static struct kernel_param_ops lowmem_adj_array_ops = { + .set = lowmem_adj_array_set, + .get = lowmem_adj_array_get, + .free = lowmem_adj_array_free, +}; + +static const struct kparam_array __param_arr_adj = { + .max = ARRAY_SIZE(lowmem_adj), + .num = &lowmem_adj_size, + .ops = ¶m_ops_short, + .elemsize = sizeof(lowmem_adj[0]), + .elem = lowmem_adj, +}; +#endif + +/* + * not really modular, but the easiest way to keep compat with existing + * bootargs behaviour is to continue using module_param here. + */ +module_param_named(cost, lowmem_shrinker.seeks, int, 0644); +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +module_param_cb(adj, &lowmem_adj_array_ops, + .arr = &__param_arr_adj, + 0644); +__MODULE_PARM_TYPE(adj, "array of short"); +#else +module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644); +#endif +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, + 0644); +module_param_named(debug_level, lowmem_debug_level, uint, 0644); + diff --git a/drivers/staging/android/sbl/Kconfig b/drivers/staging/android/sbl/Kconfig new file mode 100644 index 000000000000..4b550cadcb40 --- /dev/null +++ b/drivers/staging/android/sbl/Kconfig @@ -0,0 +1,9 @@ +config SBL_BOOTLOADER_CONTROL + tristate "SBL Bootloader Control module" + depends on X86 + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/sbl/Makefile b/drivers/staging/android/sbl/Makefile new file mode 100644 index 000000000000..6d1258d7bfc1 --- /dev/null +++ b/drivers/staging/android/sbl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SBL_BOOTLOADER_CONTROL) += sblbc.o diff --git a/drivers/staging/android/sbl/sblbc.c b/drivers/staging/android/sbl/sblbc.c new file mode 100644 index 000000000000..3d354fd31e23 --- /dev/null +++ b/drivers/staging/android/sbl/sblbc.c @@ -0,0 +1,372 @@ +/* + * sblbc: control SBL bootloaders + * Copyright (c) 2013-2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "sblbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* ABL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) +#define USERCMD_UPDATE_IFWI(len) _USERCMD_(2, len) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +static bool capsule_request; + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_capsule_cmd { + char action; + char device; + char partition; + char file_name[1]; +} __packed; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + for (i = 0; i < size; i++) + { + pr_err("Kernel Addr=0x%X, data=0x%X\n", (unsigned int)(NVRAM_START_ADDRESS + offset + i), (unsigned int)(*(unsigned char *)(data + i))); + } + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static struct kobject *capsule_kobject; + +static ssize_t is_capsule_requested(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", capsule_request); +} + +enum capsule_device_type { + EMMC = 2, + SDCARD = 4 +}; + +static ssize_t capsule_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nvram_msg msg; + struct nvram_capsule_cmd *capsule_cmd; + char name[32], partition; + enum capsule_device_type device; + int ret, padding; + unsigned char size; + union _cdata_header cdh; + + device = (buf[0] == 'm' ? EMMC : SDCARD); + partition = buf[1] - '0'; + if (strlen(buf+3) >= sizeof(name)) { + pr_err(MODULE_NAME " buf+3: %ld is too long\n", strlen(buf+3)); + return -ENOMEM; + } + + ret = sscanf(buf+3, "%s", name); + pr_info(MODULE_NAME " capsule parameters (%d): DEVICE=%d PARTITION=%d NAME=%s\n", + ret, device, partition, name); + + cdh.data = 0; + cdh.tag = CDATA_TAG_USER_CMD; + + /* padding of filename on next dword */ + padding = (4 - (3 + strlen(name))%4)%4; + size = 2 + sizeof(cdh) + 3 + strlen(name) + padding + 4; + cdh.length = 1 + (3 + strlen(name) + padding) / 4; + + msg.magic = NVRAM_VALID_FLAG; + msg.size = size; + msg.cdata_header.data = cdh.data; + + capsule_cmd = kmalloc(size, GFP_KERNEL); + if (!capsule_cmd) + return -ENOMEM; + + capsule_cmd->action = USERCMD_UPDATE_IFWI(strlen(name) + 2); + capsule_cmd->device = device; + capsule_cmd->partition = partition; + strncpy(capsule_cmd->file_name, name, strlen(name)); + msg.cdata_payload = (char *)capsule_cmd; + msg.cdata_payload_size = 3 + strlen(name) + padding; + msg.crc = crc32c_msg(&msg); + write_msg_to_nvram(&msg); + capsule_request = true; + + kfree(capsule_cmd); + + return count; +} + +static struct kobj_attribute capsule_name_attribute = + __ATTR(capsule_name, 0600, NULL, capsule_store); + +static struct kobj_attribute capsule_requested_attribute = + __ATTR(capsule_requested, 0400, is_capsule_requested, NULL); + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static int sblbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + return NOTIFY_DONE; +} + +static struct notifier_block sblbc_reboot_notifier = { + .notifier_call = sblbc_reboot_notifier_call, +}; + +static int __init sblbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&sblbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + capsule_kobject = kobject_create_and_add("capsule", kernel_kobj); + if (!capsule_kobject) + return -ENOMEM; + + ret = sysfs_create_file(capsule_kobject, + &capsule_name_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_name\n"); + goto err; + } + + ret = sysfs_create_file(capsule_kobject, + &capsule_requested_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_requested\n"); + goto err; + } + + return 0; + +err: + kobject_put(capsule_kobject); + return ret; +} + +module_init(sblbc_init); + +static void __exit sblbc_exit(void) +{ + unregister_reboot_notifier(&sblbc_reboot_notifier); + kobject_put(capsule_kobject); +} +module_exit(sblbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Slimboot boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/trace/lowmemorykiller.h b/drivers/staging/android/trace/lowmemorykiller.h new file mode 100644 index 000000000000..f43d3fae75ee --- /dev/null +++ b/drivers/staging/android/trace/lowmemorykiller.h @@ -0,0 +1,41 @@ +#undef TRACE_SYSTEM +#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace +#define TRACE_SYSTEM lowmemorykiller + +#if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LOWMEMORYKILLER_H + +#include + +TRACE_EVENT(lowmemory_kill, + TP_PROTO(struct task_struct *killed_task, long cache_size, \ + long cache_limit, long free), + + TP_ARGS(killed_task, cache_size, cache_limit, free), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(long, pagecache_size) + __field(long, pagecache_limit) + __field(long, free) + ), + + TP_fast_assign( + memcpy(__entry->comm, killed_task->comm, TASK_COMM_LEN); + __entry->pid = killed_task->pid; + __entry->pagecache_size = cache_size; + __entry->pagecache_limit = cache_limit; + __entry->free = free; + ), + + TP_printk("%s (%d), page cache %ldkB (limit %ldkB), free %ldKb", + __entry->comm, __entry->pid, __entry->pagecache_size, + __entry->pagecache_limit, __entry->free) +); + + +#endif /* if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h new file mode 100644 index 000000000000..741b1387c25b --- /dev/null +++ b/drivers/staging/android/uapi/vsoc_shm.h @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_VSOC_SHM_H +#define _UAPI_LINUX_VSOC_SHM_H + +#include + +/** + * A permission is a token that permits a receiver to read and/or write an area + * of memory within a Vsoc region. + * + * An fd_scoped permission grants both read and write access, and can be + * attached to a file description (see open(2)). + * Ownership of the area can then be shared by passing a file descriptor + * among processes. + * + * begin_offset and end_offset define the area of memory that is controlled by + * the permission. owner_offset points to a word, also in shared memory, that + * controls ownership of the area. + * + * ownership of the region expires when the associated file description is + * released. + * + * At most one permission can be attached to each file description. + * + * This is useful when implementing HALs like gralloc that scope and pass + * ownership of shared resources via file descriptors. + * + * The caller is responsibe for doing any fencing. + * + * The calling process will normally identify a currently free area of + * memory. It will construct a proposed fd_scoped_permission_arg structure: + * + * begin_offset and end_offset describe the area being claimed + * + * owner_offset points to the location in shared memory that indicates the + * owner of the area. + * + * owned_value is the value that will be stored in owner_offset iff the + * permission can be granted. It must be different than VSOC_REGION_FREE. + * + * Two fd_scoped_permission structures are compatible if they vary only by + * their owned_value fields. + * + * The driver ensures that, for any group of simultaneous callers proposing + * compatible fd_scoped_permissions, it will accept exactly one of the + * propopsals. The other callers will get a failure with errno of EAGAIN. + * + * A process receiving a file descriptor can identify the region being + * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl. + */ +struct fd_scoped_permission { + __u32 begin_offset; + __u32 end_offset; + __u32 owner_offset; + __u32 owned_value; +}; + +/* + * This value represents a free area of memory. The driver expects to see this + * value at owner_offset when creating a permission otherwise it will not do it, + * and will write this value back once the permission is no longer needed. + */ +#define VSOC_REGION_FREE ((__u32)0) + +/** + * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION + */ +struct fd_scoped_permission_arg { + struct fd_scoped_permission perm; + __s32 managed_region_fd; +}; + +#define VSOC_NODE_FREE ((__u32)0) + +/* + * Describes a signal table in shared memory. Each non-zero entry in the + * table indicates that the receiver should signal the futex at the given + * offset. Offsets are relative to the region, not the shared memory window. + * + * interrupt_signalled_offset is used to reliably signal interrupts across the + * vmm boundary. There are two roles: transmitter and receiver. For example, + * in the host_to_guest_signal_table the host is the transmitter and the + * guest is the receiver. The protocol is as follows: + * + * 1. The transmitter should convert the offset of the futex to an offset + * in the signal table [0, (1 << num_nodes_lg2)) + * The transmitter can choose any appropriate hashing algorithm, including + * hash = futex_offset & ((1 << num_nodes_lg2) - 1) + * + * 3. The transmitter should atomically compare and swap futex_offset with 0 + * at hash. There are 3 possible outcomes + * a. The swap fails because the futex_offset is already in the table. + * The transmitter should stop. + * b. Some other offset is in the table. This is a hash collision. The + * transmitter should move to another table slot and try again. One + * possible algorithm: + * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1) + * c. The swap worked. Continue below. + * + * 3. The transmitter atomically swaps 1 with the value at the + * interrupt_signalled_offset. There are two outcomes: + * a. The prior value was 1. In this case an interrupt has already been + * posted. The transmitter is done. + * b. The prior value was 0, indicating that the receiver may be sleeping. + * The transmitter will issue an interrupt. + * + * 4. On waking the receiver immediately exchanges a 0 with the + * interrupt_signalled_offset. If it receives a 0 then this a spurious + * interrupt. That may occasionally happen in the current protocol, but + * should be rare. + * + * 5. The receiver scans the signal table by atomicaly exchanging 0 at each + * location. If a non-zero offset is returned from the exchange the + * receiver wakes all sleepers at the given offset: + * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT); + * + * 6. The receiver thread then does a conditional wait, waking immediately + * if the value at interrupt_signalled_offset is non-zero. This catches cases + * here additional signals were posted while the table was being scanned. + * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT + * ioctl. + */ +struct vsoc_signal_table_layout { + /* log_2(Number of signal table entries) */ + __u32 num_nodes_lg2; + /* + * Offset to the first signal table entry relative to the start of the + * region + */ + __u32 futex_uaddr_table_offset; + /* + * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates + * that one or more offsets are currently posted in the table. + * semi-unique access to an entry in the table + */ + __u32 interrupt_signalled_offset; +}; + +#define VSOC_REGION_WHOLE ((__s32)0) +#define VSOC_DEVICE_NAME_SZ 16 + +/** + * Each HAL would (usually) talk to a single device region + * Mulitple entities care about these regions: + * - The ivshmem_server will populate the regions in shared memory + * - The guest kernel will read the region, create minor device nodes, and + * allow interested parties to register for FUTEX_WAKE events in the region + * - HALs will access via the minor device nodes published by the guest kernel + * - Host side processes will access the region via the ivshmem_server: + * 1. Pass name to ivshmem_server at a UNIX socket + * 2. ivshmemserver will reply with 2 fds: + * - host->guest doorbell fd + * - guest->host doorbell fd + * - fd for the shared memory region + * - region offset + * 3. Start a futex receiver thread on the doorbell fd pointed at the + * signal_nodes + */ +struct vsoc_device_region { + __u16 current_version; + __u16 min_compatible_version; + __u32 region_begin_offset; + __u32 region_end_offset; + __u32 offset_of_region_data; + struct vsoc_signal_table_layout guest_to_host_signal_table; + struct vsoc_signal_table_layout host_to_guest_signal_table; + /* Name of the device. Must always be terminated with a '\0', so + * the longest supported device name is 15 characters. + */ + char device_name[VSOC_DEVICE_NAME_SZ]; + /* There are two ways that permissions to access regions are handled: + * - When subdivided_by is VSOC_REGION_WHOLE, any process that can + * open the device node for the region gains complete access to it. + * - When subdivided is set processes that open the region cannot + * access it. Access to a sub-region must be established by invoking + * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region + * referenced in subdivided_by, providing a fileinstance + * (represented by a fd) opened on this region. + */ + __u32 managed_by; +}; + +/* + * The vsoc layout descriptor. + * The first 4K should be reserved for the shm header and region descriptors. + * The regions should be page aligned. + */ + +struct vsoc_shm_layout_descriptor { + __u16 major_version; + __u16 minor_version; + + /* size of the shm. This may be redundant but nice to have */ + __u32 size; + + /* number of shared memory regions */ + __u32 region_count; + + /* The offset to the start of region descriptors */ + __u32 vsoc_region_desc_offset; +}; + +/* + * This specifies the current version that should be stored in + * vsoc_shm_layout_descriptor.major_version and + * vsoc_shm_layout_descriptor.minor_version. + * It should be updated only if the vsoc_device_region and + * vsoc_shm_layout_descriptor structures have changed. + * Versioning within each region is transferred + * via the min_compatible_version and current_version fields in + * vsoc_device_region. The driver does not consult these fields: they are left + * for the HALs and host processes and will change independently of the layout + * version. + */ +#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2 +#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0 + +#define VSOC_CREATE_FD_SCOPED_PERMISSION \ + _IOW(0xF5, 0, struct fd_scoped_permission) +#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission) + +/* + * This is used to signal the host to scan the guest_to_host_signal_table + * for new futexes to wake. This sends an interrupt if one is not already + * in flight. + */ +#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2) + +/* + * When this returns the guest will scan host_to_guest_signal_table to + * check for new futexes to wake. + */ +/* TODO(ghartman): Consider moving this to the bottom half */ +#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3) + +/* + * Guest HALs will use this to retrieve the region description after + * opening their device node. + */ +#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region) + +/* + * Wake any threads that may be waiting for a host interrupt on this region. + * This is mostly used during shutdown. + */ +#define VSOC_SELF_INTERRUPT _IO(0xF5, 5) + +/* + * This is used to signal the host to scan the guest_to_host_signal_table + * for new futexes to wake. This sends an interrupt unconditionally. + */ +#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6) + +enum wait_types { + VSOC_WAIT_UNDEFINED = 0, + VSOC_WAIT_IF_EQUAL = 1, + VSOC_WAIT_IF_EQUAL_TIMEOUT = 2 +}; + +/* + * Wait for a condition to be true + * + * Note, this is sized and aligned so the 32 bit and 64 bit layouts are + * identical. + */ +struct vsoc_cond_wait { + /* Input: Offset of the 32 bit word to check */ + __u32 offset; + /* Input: Value that will be compared with the offset */ + __u32 value; + /* Monotonic time to wake at in seconds */ + __u64 wake_time_sec; + /* Input: Monotonic time to wait in nanoseconds */ + __u32 wake_time_nsec; + /* Input: Type of wait */ + __u32 wait_type; + /* Output: Number of times the thread woke before returning. */ + __u32 wakes; + /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit + * compatibility. + */ + __u32 reserved_1; +}; + +#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait) + +/* Wake any local threads waiting at the offset given in arg */ +#define VSOC_COND_WAKE _IO(0xF5, 8) + +#endif /* _UAPI_LINUX_VSOC_SHM_H */ diff --git a/drivers/staging/android/vsbl/Kconfig b/drivers/staging/android/vsbl/Kconfig new file mode 100644 index 000000000000..465fed22ca6b --- /dev/null +++ b/drivers/staging/android/vsbl/Kconfig @@ -0,0 +1,9 @@ +config VSBL_BOOTLOADER_CONTROL + tristate "vSBL Bootloader Control module" + depends on X86 + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/vsbl/Makefile b/drivers/staging/android/vsbl/Makefile new file mode 100644 index 000000000000..8ce038941fc6 --- /dev/null +++ b/drivers/staging/android/vsbl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_VSBL_BOOTLOADER_CONTROL) += vsblbc.o diff --git a/drivers/staging/android/vsbl/vsblbc.c b/drivers/staging/android/vsbl/vsblbc.c new file mode 100644 index 000000000000..527a174f0130 --- /dev/null +++ b/drivers/staging/android/vsbl/vsblbc.c @@ -0,0 +1,262 @@ +/* + * vsblbc: control vSBL bootloaders + * Copyright (c) 2013-2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "vsblbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* vSBL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static int vsblbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + return NOTIFY_DONE; +} + +static struct notifier_block vsblbc_reboot_notifier = { + .notifier_call = vsblbc_reboot_notifier_call, +}; + +static int __init vsblbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&vsblbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + return 0; +} + +module_init(vsblbc_init); + +static void __exit vsblbc_exit(void) +{ + unregister_reboot_notifier(&vsblbc_reboot_notifier); +} +module_exit(vsblbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Virtual Slimboot boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c new file mode 100644 index 000000000000..954ed2c5d807 --- /dev/null +++ b/drivers/staging/android/vsoc.c @@ -0,0 +1,1165 @@ +/* + * drivers/android/staging/vsoc.c + * + * Android Virtual System on a Chip (VSoC) driver + * + * Copyright (C) 2017 Google, Inc. + * + * Author: ghartman@google.com + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory + * Copyright 2009 Cam Macdonell + * + * Based on cirrusfb.c and 8139cp.c: + * Copyright 1999-2001 Jeff Garzik + * Copyright 2001-2004 Jeff Garzik + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "uapi/vsoc_shm.h" + +#define VSOC_DEV_NAME "vsoc" + +/* + * Description of the ivshmem-doorbell PCI device used by QEmu. These + * constants follow docs/specs/ivshmem-spec.txt, which can be found in + * the QEmu repository. This was last reconciled with the version that + * came out with 2.8 + */ + +/* + * These constants are determined KVM Inter-VM shared memory device + * register offsets + */ +enum { + INTR_MASK = 0x00, /* Interrupt Mask */ + INTR_STATUS = 0x04, /* Interrupt Status */ + IV_POSITION = 0x08, /* VM ID */ + DOORBELL = 0x0c, /* Doorbell */ +}; + +static const int REGISTER_BAR; /* Equal to 0 */ +static const int MAX_REGISTER_BAR_LEN = 0x100; +/* + * The MSI-x BAR is not used directly. + * + * static const int MSI_X_BAR = 1; + */ +static const int SHARED_MEMORY_BAR = 2; + +struct vsoc_region_data { + char name[VSOC_DEVICE_NAME_SZ + 1]; + wait_queue_head_t interrupt_wait_queue; + /* TODO(b/73664181): Use multiple futex wait queues */ + wait_queue_head_t futex_wait_queue; + /* Flag indicating that an interrupt has been signalled by the host. */ + atomic_t *incoming_signalled; + /* Flag indicating the guest has signalled the host. */ + atomic_t *outgoing_signalled; + bool irq_requested; + bool device_created; +}; + +struct vsoc_device { + /* Kernel virtual address of REGISTER_BAR. */ + void __iomem *regs; + /* Physical address of SHARED_MEMORY_BAR. */ + phys_addr_t shm_phys_start; + /* Kernel virtual address of SHARED_MEMORY_BAR. */ + void __iomem *kernel_mapped_shm; + /* Size of the entire shared memory window in bytes. */ + size_t shm_size; + /* + * Pointer to the virtual address of the shared memory layout structure. + * This is probably identical to kernel_mapped_shm, but saving this + * here saves a lot of annoying casts. + */ + struct vsoc_shm_layout_descriptor *layout; + /* + * Points to a table of region descriptors in the kernel's virtual + * address space. Calculated from + * vsoc_shm_layout_descriptor.vsoc_region_desc_offset + */ + struct vsoc_device_region *regions; + /* Head of a list of permissions that have been granted. */ + struct list_head permissions; + struct pci_dev *dev; + /* Per-region (and therefore per-interrupt) information. */ + struct vsoc_region_data *regions_data; + /* + * Table of msi-x entries. This has to be separated from struct + * vsoc_region_data because the kernel deals with them as an array. + */ + struct msix_entry *msix_entries; + /* Mutex that protectes the permission list */ + struct mutex mtx; + /* Major number assigned by the kernel */ + int major; + /* Character device assigned by the kernel */ + struct cdev cdev; + /* Device class assigned by the kernel */ + struct class *class; + /* + * Flags that indicate what we've initialized. These are used to do an + * orderly cleanup of the device. + */ + bool enabled_device; + bool requested_regions; + bool cdev_added; + bool class_added; + bool msix_enabled; +}; + +static struct vsoc_device vsoc_dev; + +/* + * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions. + */ + +struct fd_scoped_permission_node { + struct fd_scoped_permission permission; + struct list_head list; +}; + +struct vsoc_private_data { + struct fd_scoped_permission_node *fd_scoped_permission_node; +}; + +static long vsoc_ioctl(struct file *, unsigned int, unsigned long); +static int vsoc_mmap(struct file *, struct vm_area_struct *); +static int vsoc_open(struct inode *, struct file *); +static int vsoc_release(struct inode *, struct file *); +static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *); +static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *); +static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin); +static int do_create_fd_scoped_permission( + struct vsoc_device_region *region_p, + struct fd_scoped_permission_node *np, + struct fd_scoped_permission_arg __user *arg); +static void do_destroy_fd_scoped_permission( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission *perm); +static long do_vsoc_describe_region(struct file *, + struct vsoc_device_region __user *); +static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off); + +/** + * Validate arguments on entry points to the driver. + */ +inline int vsoc_validate_inode(struct inode *inode) +{ + if (iminor(inode) >= vsoc_dev.layout->region_count) { + dev_err(&vsoc_dev.dev->dev, + "describe_region: invalid region %d\n", iminor(inode)); + return -ENODEV; + } + return 0; +} + +inline int vsoc_validate_filep(struct file *filp) +{ + int ret = vsoc_validate_inode(file_inode(filp)); + + if (ret) + return ret; + if (!filp->private_data) { + dev_err(&vsoc_dev.dev->dev, + "No private data on fd, region %d\n", + iminor(file_inode(filp))); + return -EBADFD; + } + return 0; +} + +/* Converts from shared memory offset to virtual address */ +static inline void *shm_off_to_virtual_addr(__u32 offset) +{ + return (void __force *)vsoc_dev.kernel_mapped_shm + offset; +} + +/* Converts from shared memory offset to physical address */ +static inline phys_addr_t shm_off_to_phys_addr(__u32 offset) +{ + return vsoc_dev.shm_phys_start + offset; +} + +/** + * Convenience functions to obtain the region from the inode or file. + * Dangerous to call before validating the inode/file. + */ +static inline struct vsoc_device_region *vsoc_region_from_inode( + struct inode *inode) +{ + return &vsoc_dev.regions[iminor(inode)]; +} + +static inline struct vsoc_device_region *vsoc_region_from_filep( + struct file *inode) +{ + return vsoc_region_from_inode(file_inode(inode)); +} + +static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r) +{ + return r->region_end_offset - r->region_begin_offset; +} + +static const struct file_operations vsoc_ops = { + .owner = THIS_MODULE, + .open = vsoc_open, + .mmap = vsoc_mmap, + .read = vsoc_read, + .unlocked_ioctl = vsoc_ioctl, + .compat_ioctl = vsoc_ioctl, + .write = vsoc_write, + .llseek = vsoc_lseek, + .release = vsoc_release, +}; + +static struct pci_device_id vsoc_id_table[] = { + {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, vsoc_id_table); + +static void vsoc_remove_device(struct pci_dev *pdev); +static int vsoc_probe_device(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static struct pci_driver vsoc_pci_driver = { + .name = "vsoc", + .id_table = vsoc_id_table, + .probe = vsoc_probe_device, + .remove = vsoc_remove_device, +}; + +static int do_create_fd_scoped_permission( + struct vsoc_device_region *region_p, + struct fd_scoped_permission_node *np, + struct fd_scoped_permission_arg __user *arg) +{ + struct file *managed_filp; + s32 managed_fd; + atomic_t *owner_ptr = NULL; + struct vsoc_device_region *managed_region_p; + + if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) || + copy_from_user(&managed_fd, + &arg->managed_region_fd, sizeof(managed_fd))) { + return -EFAULT; + } + managed_filp = fdget(managed_fd).file; + /* Check that it's a valid fd, */ + if (!managed_filp || vsoc_validate_filep(managed_filp)) + return -EPERM; + /* EEXIST if the given fd already has a permission. */ + if (((struct vsoc_private_data *)managed_filp->private_data)-> + fd_scoped_permission_node) + return -EEXIST; + managed_region_p = vsoc_region_from_filep(managed_filp); + /* Check that the provided region is managed by this one */ + if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p) + return -EPERM; + /* The area must be well formed and have non-zero size */ + if (np->permission.begin_offset >= np->permission.end_offset) + return -EINVAL; + /* The area must fit in the memory window */ + if (np->permission.end_offset > + vsoc_device_region_size(managed_region_p)) + return -ERANGE; + /* The area must be in the region data section */ + if (np->permission.begin_offset < + managed_region_p->offset_of_region_data) + return -ERANGE; + /* The area must be page aligned */ + if (!PAGE_ALIGNED(np->permission.begin_offset) || + !PAGE_ALIGNED(np->permission.end_offset)) + return -EINVAL; + /* Owner offset must be naturally aligned in the window */ + if (np->permission.owner_offset & + (sizeof(np->permission.owner_offset) - 1)) + return -EINVAL; + /* The owner flag must reside in the owner memory */ + if (np->permission.owner_offset + sizeof(np->permission.owner_offset) > + vsoc_device_region_size(region_p)) + return -ERANGE; + /* The owner flag must reside in the data section */ + if (np->permission.owner_offset < region_p->offset_of_region_data) + return -EINVAL; + /* The owner value must change to claim the memory */ + if (np->permission.owned_value == VSOC_REGION_FREE) + return -EINVAL; + owner_ptr = + (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset + + np->permission.owner_offset); + /* We've already verified that this is in the shared memory window, so + * it should be safe to write to this address. + */ + if (atomic_cmpxchg(owner_ptr, + VSOC_REGION_FREE, + np->permission.owned_value) != VSOC_REGION_FREE) { + return -EBUSY; + } + ((struct vsoc_private_data *)managed_filp->private_data)-> + fd_scoped_permission_node = np; + /* The file offset needs to be adjusted if the calling + * process did any read/write operations on the fd + * before creating the permission. + */ + if (managed_filp->f_pos) { + if (managed_filp->f_pos > np->permission.end_offset) { + /* If the offset is beyond the permission end, set it + * to the end. + */ + managed_filp->f_pos = np->permission.end_offset; + } else { + /* If the offset is within the permission interval + * keep it there otherwise reset it to zero. + */ + if (managed_filp->f_pos < np->permission.begin_offset) { + managed_filp->f_pos = 0; + } else { + managed_filp->f_pos -= + np->permission.begin_offset; + } + } + } + return 0; +} + +static void do_destroy_fd_scoped_permission_node( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission_node *node) +{ + if (node) { + do_destroy_fd_scoped_permission(owner_region_p, + &node->permission); + mutex_lock(&vsoc_dev.mtx); + list_del(&node->list); + mutex_unlock(&vsoc_dev.mtx); + kfree(node); + } +} + +static void do_destroy_fd_scoped_permission( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission *perm) +{ + atomic_t *owner_ptr = NULL; + int prev = 0; + + if (!perm) + return; + owner_ptr = (atomic_t *)shm_off_to_virtual_addr( + owner_region_p->region_begin_offset + perm->owner_offset); + prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE); + if (prev != perm->owned_value) + dev_err(&vsoc_dev.dev->dev, + "%x-%x: owner (%s) %x: expected to be %x was %x", + perm->begin_offset, perm->end_offset, + owner_region_p->device_name, perm->owner_offset, + perm->owned_value, prev); +} + +static long do_vsoc_describe_region(struct file *filp, + struct vsoc_device_region __user *dest) +{ + struct vsoc_device_region *region_p; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + region_p = vsoc_region_from_filep(filp); + if (copy_to_user(dest, region_p, sizeof(*region_p))) + return -EFAULT; + return 0; +} + +/** + * Implements the inner logic of cond_wait. Copies to and from userspace are + * done in the helper function below. + */ +static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) +{ + DEFINE_WAIT(wait); + u32 region_number = iminor(file_inode(filp)); + struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; + struct hrtimer_sleeper timeout, *to = NULL; + int ret = 0; + struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); + atomic_t *address = NULL; + struct timespec ts; + + /* Ensure that the offset is aligned */ + if (arg->offset & (sizeof(uint32_t) - 1)) + return -EADDRNOTAVAIL; + /* Ensure that the offset is within shared memory */ + if (((uint64_t)arg->offset) + region_p->region_begin_offset + + sizeof(uint32_t) > region_p->region_end_offset) + return -E2BIG; + address = shm_off_to_virtual_addr(region_p->region_begin_offset + + arg->offset); + + /* Ensure that the type of wait is valid */ + switch (arg->wait_type) { + case VSOC_WAIT_IF_EQUAL: + break; + case VSOC_WAIT_IF_EQUAL_TIMEOUT: + to = &timeout; + break; + default: + return -EINVAL; + } + + if (to) { + /* Copy the user-supplied timesec into the kernel structure. + * We do things this way to flatten differences between 32 bit + * and 64 bit timespecs. + */ + ts.tv_sec = arg->wake_time_sec; + ts.tv_nsec = arg->wake_time_nsec; + + if (!timespec_valid(&ts)) + return -EINVAL; + hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts), + current->timer_slack_ns); + + hrtimer_init_sleeper(to, current); + } + + while (1) { + prepare_to_wait(&data->futex_wait_queue, &wait, + TASK_INTERRUPTIBLE); + /* + * Check the sentinel value after prepare_to_wait. If the value + * changes after this check the writer will call signal, + * changing the task state from INTERRUPTIBLE to RUNNING. That + * will ensure that schedule() will eventually schedule this + * task. + */ + if (atomic_read(address) != arg->value) { + ret = 0; + break; + } + if (to) { + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); + if (likely(to->task)) + freezable_schedule(); + hrtimer_cancel(&to->timer); + if (!to->task) { + ret = -ETIMEDOUT; + break; + } + } else { + freezable_schedule(); + } + /* Count the number of times that we woke up. This is useful + * for unit testing. + */ + ++arg->wakes; + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + finish_wait(&data->futex_wait_queue, &wait); + if (to) + destroy_hrtimer_on_stack(&to->timer); + return ret; +} + +/** + * Handles the details of copying from/to userspace to ensure that the copies + * happen on all of the return paths of cond_wait. + */ +static int do_vsoc_cond_wait(struct file *filp, + struct vsoc_cond_wait __user *untrusted_in) +{ + struct vsoc_cond_wait arg; + int rval = 0; + + if (copy_from_user(&arg, untrusted_in, sizeof(arg))) + return -EFAULT; + /* wakes is an out parameter. Initialize it to something sensible. */ + arg.wakes = 0; + rval = handle_vsoc_cond_wait(filp, &arg); + if (copy_to_user(untrusted_in, &arg, sizeof(arg))) + return -EFAULT; + return rval; +} + +static int do_vsoc_cond_wake(struct file *filp, uint32_t offset) +{ + struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); + u32 region_number = iminor(file_inode(filp)); + struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; + /* Ensure that the offset is aligned */ + if (offset & (sizeof(uint32_t) - 1)) + return -EADDRNOTAVAIL; + /* Ensure that the offset is within shared memory */ + if (((uint64_t)offset) + region_p->region_begin_offset + + sizeof(uint32_t) > region_p->region_end_offset) + return -E2BIG; + /* + * TODO(b/73664181): Use multiple futex wait queues. + * We need to wake every sleeper when the condition changes. Typically + * only a single thread will be waiting on the condition, but there + * are exceptions. The worst case is about 10 threads. + */ + wake_up_interruptible_all(&data->futex_wait_queue); + return 0; +} + +static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int rv = 0; + struct vsoc_device_region *region_p; + u32 reg_num; + struct vsoc_region_data *reg_data; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + region_p = vsoc_region_from_filep(filp); + reg_num = iminor(file_inode(filp)); + reg_data = vsoc_dev.regions_data + reg_num; + switch (cmd) { + case VSOC_CREATE_FD_SCOPED_PERMISSION: + { + struct fd_scoped_permission_node *node = NULL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + /* We can't allocate memory for the permission */ + if (!node) + return -ENOMEM; + INIT_LIST_HEAD(&node->list); + rv = do_create_fd_scoped_permission( + region_p, + node, + (struct fd_scoped_permission_arg __user *)arg); + if (!rv) { + mutex_lock(&vsoc_dev.mtx); + list_add(&node->list, &vsoc_dev.permissions); + mutex_unlock(&vsoc_dev.mtx); + } else { + kfree(node); + return rv; + } + } + break; + + case VSOC_GET_FD_SCOPED_PERMISSION: + { + struct fd_scoped_permission_node *node = + ((struct vsoc_private_data *)filp->private_data)-> + fd_scoped_permission_node; + if (!node) + return -ENOENT; + if (copy_to_user + ((struct fd_scoped_permission __user *)arg, + &node->permission, sizeof(node->permission))) + return -EFAULT; + } + break; + + case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST: + if (!atomic_xchg( + reg_data->outgoing_signalled, + 1)) { + writel(reg_num, vsoc_dev.regs + DOORBELL); + return 0; + } else { + return -EBUSY; + } + break; + + case VSOC_SEND_INTERRUPT_TO_HOST: + writel(reg_num, vsoc_dev.regs + DOORBELL); + return 0; + + case VSOC_WAIT_FOR_INCOMING_INTERRUPT: + wait_event_interruptible( + reg_data->interrupt_wait_queue, + (atomic_read(reg_data->incoming_signalled) != 0)); + break; + + case VSOC_DESCRIBE_REGION: + return do_vsoc_describe_region( + filp, + (struct vsoc_device_region __user *)arg); + + case VSOC_SELF_INTERRUPT: + atomic_set(reg_data->incoming_signalled, 1); + wake_up_interruptible(®_data->interrupt_wait_queue); + break; + + case VSOC_COND_WAIT: + return do_vsoc_cond_wait(filp, + (struct vsoc_cond_wait __user *)arg); + case VSOC_COND_WAKE: + return do_vsoc_cond_wake(filp, arg); + + default: + return -EINVAL; + } + return 0; +} + +static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len, + loff_t *poffset) +{ + __u32 area_off; + const void *area_p; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + area_p = shm_off_to_virtual_addr(area_off); + area_p += *poffset; + area_len -= *poffset; + if (area_len <= 0) + return 0; + if (area_len < len) + len = area_len; + if (copy_to_user(buffer, area_p, len)) + return -EFAULT; + *poffset += len; + return len; +} + +static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin) +{ + ssize_t area_len = 0; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, NULL); + switch (origin) { + case SEEK_SET: + break; + + case SEEK_CUR: + if (offset > 0 && offset + filp->f_pos < 0) + return -EOVERFLOW; + offset += filp->f_pos; + break; + + case SEEK_END: + if (offset > 0 && offset + area_len < 0) + return -EOVERFLOW; + offset += area_len; + break; + + case SEEK_DATA: + if (offset >= area_len) + return -EINVAL; + if (offset < 0) + offset = 0; + break; + + case SEEK_HOLE: + /* Next hole is always the end of the region, unless offset is + * beyond that + */ + if (offset < area_len) + offset = area_len; + break; + + default: + return -EINVAL; + } + + if (offset < 0 || offset > area_len) + return -EINVAL; + filp->f_pos = offset; + + return offset; +} + +static ssize_t vsoc_write(struct file *filp, const char __user *buffer, + size_t len, loff_t *poffset) +{ + __u32 area_off; + void *area_p; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + area_p = shm_off_to_virtual_addr(area_off); + area_p += *poffset; + area_len -= *poffset; + if (area_len <= 0) + return 0; + if (area_len < len) + len = area_len; + if (copy_from_user(area_p, buffer, len)) + return -EFAULT; + *poffset += len; + return len; +} + +static irqreturn_t vsoc_interrupt(int irq, void *region_data_v) +{ + struct vsoc_region_data *region_data = + (struct vsoc_region_data *)region_data_v; + int reg_num = region_data - vsoc_dev.regions_data; + + if (unlikely(!region_data)) + return IRQ_NONE; + + if (unlikely(reg_num < 0 || + reg_num >= vsoc_dev.layout->region_count)) { + dev_err(&vsoc_dev.dev->dev, + "invalid irq @%p reg_num=0x%04x\n", + region_data, reg_num); + return IRQ_NONE; + } + if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) { + dev_err(&vsoc_dev.dev->dev, + "irq not aligned @%p reg_num=0x%04x\n", + region_data, reg_num); + return IRQ_NONE; + } + wake_up_interruptible(®ion_data->interrupt_wait_queue); + return IRQ_HANDLED; +} + +static int vsoc_probe_device(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int result; + int i; + resource_size_t reg_size; + dev_t devt; + + vsoc_dev.dev = pdev; + result = pci_enable_device(pdev); + if (result) { + dev_err(&pdev->dev, + "pci_enable_device failed %s: error %d\n", + pci_name(pdev), result); + return result; + } + vsoc_dev.enabled_device = true; + result = pci_request_regions(pdev, "vsoc"); + if (result < 0) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.requested_regions = true; + /* Set up the control registers in BAR 0 */ + reg_size = pci_resource_len(pdev, REGISTER_BAR); + if (reg_size > MAX_REGISTER_BAR_LEN) + vsoc_dev.regs = + pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN); + else + vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size); + + if (!vsoc_dev.regs) { + dev_err(&pdev->dev, + "cannot map registers of size %zu\n", + (size_t)reg_size); + vsoc_remove_device(pdev); + return -EBUSY; + } + + /* Map the shared memory in BAR 2 */ + vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR); + vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR); + + dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n", + &vsoc_dev.shm_phys_start, vsoc_dev.shm_size); + vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0); + if (!vsoc_dev.kernel_mapped_shm) { + dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + + vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *) + vsoc_dev.kernel_mapped_shm; + dev_info(&pdev->dev, "major_version: %d\n", + vsoc_dev.layout->major_version); + dev_info(&pdev->dev, "minor_version: %d\n", + vsoc_dev.layout->minor_version); + dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size); + dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count); + if (vsoc_dev.layout->major_version != + CURRENT_VSOC_LAYOUT_MAJOR_VERSION) { + dev_err(&vsoc_dev.dev->dev, + "driver supports only major_version %d\n", + CURRENT_VSOC_LAYOUT_MAJOR_VERSION); + vsoc_remove_device(pdev); + return -EBUSY; + } + result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count, + VSOC_DEV_NAME); + if (result) { + dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.major = MAJOR(devt); + cdev_init(&vsoc_dev.cdev, &vsoc_ops); + vsoc_dev.cdev.owner = THIS_MODULE; + result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count); + if (result) { + dev_err(&vsoc_dev.dev->dev, "cdev_add error\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.cdev_added = true; + vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME); + if (IS_ERR(vsoc_dev.class)) { + dev_err(&vsoc_dev.dev->dev, "class_create failed\n"); + vsoc_remove_device(pdev); + return PTR_ERR(vsoc_dev.class); + } + vsoc_dev.class_added = true; + vsoc_dev.regions = (struct vsoc_device_region __force *) + ((void *)vsoc_dev.layout + + vsoc_dev.layout->vsoc_region_desc_offset); + vsoc_dev.msix_entries = kcalloc( + vsoc_dev.layout->region_count, + sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL); + if (!vsoc_dev.msix_entries) { + dev_err(&vsoc_dev.dev->dev, + "unable to allocate msix_entries\n"); + vsoc_remove_device(pdev); + return -ENOSPC; + } + vsoc_dev.regions_data = kcalloc( + vsoc_dev.layout->region_count, + sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL); + if (!vsoc_dev.regions_data) { + dev_err(&vsoc_dev.dev->dev, + "unable to allocate regions' data\n"); + vsoc_remove_device(pdev); + return -ENOSPC; + } + for (i = 0; i < vsoc_dev.layout->region_count; ++i) + vsoc_dev.msix_entries[i].entry = i; + + result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries, + vsoc_dev.layout->region_count); + if (result) { + dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result); + vsoc_remove_device(pdev); + return -ENOSPC; + } + /* Check that all regions are well formed */ + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + const struct vsoc_device_region *region = vsoc_dev.regions + i; + + if (!PAGE_ALIGNED(region->region_begin_offset) || + !PAGE_ALIGNED(region->region_end_offset)) { + dev_err(&vsoc_dev.dev->dev, + "region %d not aligned (%x:%x)", i, + region->region_begin_offset, + region->region_end_offset); + vsoc_remove_device(pdev); + return -EFAULT; + } + if (region->region_begin_offset >= region->region_end_offset || + region->region_end_offset > vsoc_dev.shm_size) { + dev_err(&vsoc_dev.dev->dev, + "region %d offsets are wrong: %x %x %zx", + i, region->region_begin_offset, + region->region_end_offset, vsoc_dev.shm_size); + vsoc_remove_device(pdev); + return -EFAULT; + } + if (region->managed_by >= vsoc_dev.layout->region_count) { + dev_err(&vsoc_dev.dev->dev, + "region %d has invalid owner: %u", + i, region->managed_by); + vsoc_remove_device(pdev); + return -EFAULT; + } + } + vsoc_dev.msix_enabled = true; + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + const struct vsoc_device_region *region = vsoc_dev.regions + i; + size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1; + const struct vsoc_signal_table_layout *h_to_g_signal_table = + ®ion->host_to_guest_signal_table; + const struct vsoc_signal_table_layout *g_to_h_signal_table = + ®ion->guest_to_host_signal_table; + + vsoc_dev.regions_data[i].name[name_sz] = '\0'; + memcpy(vsoc_dev.regions_data[i].name, region->device_name, + name_sz); + dev_info(&pdev->dev, "region %d name=%s\n", + i, vsoc_dev.regions_data[i].name); + init_waitqueue_head( + &vsoc_dev.regions_data[i].interrupt_wait_queue); + init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue); + vsoc_dev.regions_data[i].incoming_signalled = + shm_off_to_virtual_addr(region->region_begin_offset) + + h_to_g_signal_table->interrupt_signalled_offset; + vsoc_dev.regions_data[i].outgoing_signalled = + shm_off_to_virtual_addr(region->region_begin_offset) + + g_to_h_signal_table->interrupt_signalled_offset; + result = request_irq( + vsoc_dev.msix_entries[i].vector, + vsoc_interrupt, 0, + vsoc_dev.regions_data[i].name, + vsoc_dev.regions_data + i); + if (result) { + dev_info(&pdev->dev, + "request_irq failed irq=%d vector=%d\n", + i, vsoc_dev.msix_entries[i].vector); + vsoc_remove_device(pdev); + return -ENOSPC; + } + vsoc_dev.regions_data[i].irq_requested = true; + if (!device_create(vsoc_dev.class, NULL, + MKDEV(vsoc_dev.major, i), + NULL, vsoc_dev.regions_data[i].name)) { + dev_err(&vsoc_dev.dev->dev, "device_create failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.regions_data[i].device_created = true; + } + return 0; +} + +/* + * This should undo all of the allocations in the probe function in reverse + * order. + * + * Notes: + * + * The device may have been partially initialized, so double check + * that the allocations happened. + * + * This function may be called multiple times, so mark resources as freed + * as they are deallocated. + */ +static void vsoc_remove_device(struct pci_dev *pdev) +{ + int i; + /* + * pdev is the first thing to be set on probe and the last thing + * to be cleared here. If it's NULL then there is no cleanup. + */ + if (!pdev || !vsoc_dev.dev) + return; + dev_info(&pdev->dev, "remove_device\n"); + if (vsoc_dev.regions_data) { + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + if (vsoc_dev.regions_data[i].device_created) { + device_destroy(vsoc_dev.class, + MKDEV(vsoc_dev.major, i)); + vsoc_dev.regions_data[i].device_created = false; + } + if (vsoc_dev.regions_data[i].irq_requested) + free_irq(vsoc_dev.msix_entries[i].vector, NULL); + vsoc_dev.regions_data[i].irq_requested = false; + } + kfree(vsoc_dev.regions_data); + vsoc_dev.regions_data = NULL; + } + if (vsoc_dev.msix_enabled) { + pci_disable_msix(pdev); + vsoc_dev.msix_enabled = false; + } + kfree(vsoc_dev.msix_entries); + vsoc_dev.msix_entries = NULL; + vsoc_dev.regions = NULL; + if (vsoc_dev.class_added) { + class_destroy(vsoc_dev.class); + vsoc_dev.class_added = false; + } + if (vsoc_dev.cdev_added) { + cdev_del(&vsoc_dev.cdev); + vsoc_dev.cdev_added = false; + } + if (vsoc_dev.major && vsoc_dev.layout) { + unregister_chrdev_region(MKDEV(vsoc_dev.major, 0), + vsoc_dev.layout->region_count); + vsoc_dev.major = 0; + } + vsoc_dev.layout = NULL; + if (vsoc_dev.kernel_mapped_shm) { + pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm); + vsoc_dev.kernel_mapped_shm = NULL; + } + if (vsoc_dev.regs) { + pci_iounmap(pdev, vsoc_dev.regs); + vsoc_dev.regs = NULL; + } + if (vsoc_dev.requested_regions) { + pci_release_regions(pdev); + vsoc_dev.requested_regions = false; + } + if (vsoc_dev.enabled_device) { + pci_disable_device(pdev); + vsoc_dev.enabled_device = false; + } + /* Do this last: it indicates that the device is not initialized. */ + vsoc_dev.dev = NULL; +} + +static void __exit vsoc_cleanup_module(void) +{ + vsoc_remove_device(vsoc_dev.dev); + pci_unregister_driver(&vsoc_pci_driver); +} + +static int __init vsoc_init_module(void) +{ + int err = -ENOMEM; + + INIT_LIST_HEAD(&vsoc_dev.permissions); + mutex_init(&vsoc_dev.mtx); + + err = pci_register_driver(&vsoc_pci_driver); + if (err < 0) + return err; + return 0; +} + +static int vsoc_open(struct inode *inode, struct file *filp) +{ + /* Can't use vsoc_validate_filep because filp is still incomplete */ + int ret = vsoc_validate_inode(inode); + + if (ret) + return ret; + filp->private_data = + kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL); + if (!filp->private_data) + return -ENOMEM; + return 0; +} + +static int vsoc_release(struct inode *inode, struct file *filp) +{ + struct vsoc_private_data *private_data = NULL; + struct fd_scoped_permission_node *node = NULL; + struct vsoc_device_region *owner_region_p = NULL; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + private_data = (struct vsoc_private_data *)filp->private_data; + if (!private_data) + return 0; + + node = private_data->fd_scoped_permission_node; + if (node) { + owner_region_p = vsoc_region_from_inode(inode); + if (owner_region_p->managed_by != VSOC_REGION_WHOLE) { + owner_region_p = + &vsoc_dev.regions[owner_region_p->managed_by]; + } + do_destroy_fd_scoped_permission_node(owner_region_p, node); + private_data->fd_scoped_permission_node = NULL; + } + kfree(private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * Returns the device relative offset and length of the area specified by the + * fd scoped permission. If there is no fd scoped permission set, a default + * permission covering the entire region is assumed, unless the region is owned + * by another one, in which case the default is a permission with zero size. + */ +static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset) +{ + __u32 off = 0; + ssize_t length = 0; + struct vsoc_device_region *region_p; + struct fd_scoped_permission *perm; + + region_p = vsoc_region_from_filep(filp); + off = region_p->region_begin_offset; + perm = &((struct vsoc_private_data *)filp->private_data)-> + fd_scoped_permission_node->permission; + if (perm) { + off += perm->begin_offset; + length = perm->end_offset - perm->begin_offset; + } else if (region_p->managed_by == VSOC_REGION_WHOLE) { + /* No permission set and the regions is not owned by another, + * default to full region access. + */ + length = vsoc_device_region_size(region_p); + } else { + /* return zero length, access is denied. */ + length = 0; + } + if (area_offset) + *area_offset = off; + return length; +} + +static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long len = vma->vm_end - vma->vm_start; + __u32 area_off; + phys_addr_t mem_off; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + /* Add the requested offset */ + area_off += (vma->vm_pgoff << PAGE_SHIFT); + area_len -= (vma->vm_pgoff << PAGE_SHIFT); + if (area_len < len) + return -EINVAL; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + mem_off = shm_off_to_phys_addr(area_off); + if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT, + len, vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +module_init(vsoc_init_module); +module_exit(vsoc_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Greg Hartman "); +MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device"); +MODULE_VERSION("1.0"); diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h index 851d3907167e..a9c417b07b04 100644 --- a/drivers/staging/ccree/cc_lli_defs.h +++ b/drivers/staging/ccree/cc_lli_defs.h @@ -59,7 +59,7 @@ static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr) lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK; - lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16)); + lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32)); #endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ } diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c index 8d31a93fd8b7..087a622f20b2 100644 --- a/drivers/staging/ccree/ssi_cipher.c +++ b/drivers/staging/ccree/ssi_cipher.c @@ -904,6 +904,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req) scatterwalk_map_and_copy(req_ctx->backup_info, req->src, (req->nbytes - ivsize), ivsize, 0); req_ctx->is_giv = false; + req_ctx->backup_info = NULL; return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT); } diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c index 9c6f1200c130..eeb995307951 100644 --- a/drivers/staging/ccree/ssi_driver.c +++ b/drivers/staging/ccree/ssi_driver.c @@ -141,7 +141,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) irr &= ~SSI_COMP_IRQ_MASK; complete_request(drvdata); } -#ifdef CC_SUPPORT_FIPS +#ifdef CONFIG_CRYPTO_FIPS /* TEE FIPS interrupt */ if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) { /* Mask interrupt - will be unmasked in Deferred service handler */ diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c index 13291aeaf350..e266a70a1b32 100644 --- a/drivers/staging/ccree/ssi_hash.c +++ b/drivers/staging/ccree/ssi_hash.c @@ -1781,7 +1781,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) struct device *dev = &ctx->drvdata->plat_dev->dev; struct ahash_req_ctx *state = ahash_request_ctx(req); u32 tmp; - int rc; + int rc = 0; memcpy(&tmp, in, sizeof(u32)); if (tmp != CC_EXPORT_MAGIC) { @@ -1790,9 +1790,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) } in += sizeof(u32); - rc = ssi_hash_init(state, ctx); - if (rc) - goto out; + /* call init() to allocate bufs if the user hasn't */ + if (!state->digest_buff) { + rc = ssi_hash_init(state, ctx); + if (rc) + goto out; + } dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 0b43db6371c6..c11c22bd6d13 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, struct comedi_cmd *cmd = &async->cmd; if (cmd->stop_src == TRIG_COUNT) { - unsigned int nscans = nsamples / cmd->scan_end_arg; - unsigned int scans_left = __comedi_nscans_left(s, nscans); + unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); unsigned int scan_pos = comedi_bytes_to_samples(s, async->scan_progress); unsigned long long samples_left = 0; diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 398347fedc47..2cac160993bb 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1284,6 +1284,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status) ack |= NISTC_INTA_ACK_AI_START; if (a_status & NISTC_AI_STATUS1_STOP) ack |= NISTC_INTA_ACK_AI_STOP; + if (a_status & NISTC_AI_STATUS1_OVER) + ack |= NISTC_INTA_ACK_AI_ERR; if (ack) ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG); } diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c index 802f51e46405..171960568356 100644 --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c @@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev, /* Make sure D/A update mode is direct update */ outb(0, dev->iobase + DAQP_AUX_REG); - for (i = 0; i > insn->n; i++) { + for (i = 0; i < insn->n; i++) { unsigned int val = data[i]; int ret; diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index 26017fe9df93..b83d17db06bd 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -131,6 +131,8 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, u16 fd_offset = dpaa2_fd_get_offset(fd); u32 fd_length = dpaa2_fd_get_len(fd); + ch->buf_count--; + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); if (unlikely(!skb)) @@ -139,8 +141,6 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, skb_reserve(skb, fd_offset); skb_put(skb, fd_length); - ch->buf_count--; - return skb; } @@ -178,8 +178,15 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* We build the skb around the first data buffer */ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) - return NULL; + if (unlikely(!skb)) { + /* We still need to subtract the buffers used + * by this FD from our software counter + */ + while (!dpaa2_sg_is_final(&sgt[i]) && + i < DPAA2_ETH_MAX_SG_ENTRIES) + i++; + break; + } sg_offset = dpaa2_sg_get_offset(sge); skb_reserve(skb, sg_offset); @@ -308,7 +315,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch) } fd = dpaa2_dq_fd(dq); - fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); fq->stats.frames++; fq->consume(priv, ch, fd, &ch->napi); @@ -1881,7 +1888,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, queue.destination.id = fq->channel->dpcon_id; queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 1; - queue.user_context = (u64)fq; + queue.user_context = (u64)(uintptr_t)fq; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_RX, 0, fq->flowid, DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, @@ -1933,7 +1940,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, queue.destination.id = fq->channel->dpcon_id; queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 0; - queue.user_context = (u64)fq; + queue.user_context = (u64)(uintptr_t)fq; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 504c987447f2..eee1c1b277fa 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig @@ -8,7 +8,7 @@ config FSL_MC_BUS bool "QorIQ DPAA2 fsl-mc bus driver" - depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC))) + depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC))) select GENERIC_MSI_IRQ_DOMAIN help Driver to enable the bus infrastructure for the QorIQ DPAA2 diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c index f8096828f5b7..a609ec82daf3 100644 --- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c +++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c @@ -76,7 +76,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, if (d) return d; - if (unlikely(cpu >= num_possible_cpus())) + if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus()) return NULL; /* @@ -121,7 +121,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) return NULL; /* check if CPU is out of range (-1 means any cpu) */ - if (desc->cpu >= num_possible_cpus()) { + if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) { kfree(obj); return NULL; } diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c index 123e4af58408..50260cb5056d 100644 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c @@ -75,6 +75,8 @@ int __init its_fsl_mc_msi_init(void) for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) continue; diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h index c5646096c5d4..afc2d060d077 100644 --- a/drivers/staging/fsl-mc/include/dpaa2-io.h +++ b/drivers/staging/fsl-mc/include/dpaa2-io.h @@ -54,6 +54,8 @@ struct device; * for dequeue. */ +#define DPAA2_IO_ANY_CPU -1 + /** * struct dpaa2_io_desc - The DPIO descriptor * @receives_notifications: Use notificaton mode. Non-zero if the DPIO @@ -91,8 +93,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj); * @cb: The callback to be invoked when the notification arrives * @is_cdan: Zero for FQDAN, non-zero for CDAN * @id: FQID or channel ID, needed for rearm - * @desired_cpu: The cpu on which the notifications will show up. -1 means - * any CPU. + * @desired_cpu: The cpu on which the notifications will show up. Use + * DPAA2_IO_ANY_CPU if don't care * @dpio_id: The dpio index * @qman64: The 64-bit context value shows up in the FQDAN/CDAN. * @node: The list node diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig index 4e094602437c..d293bbc22c79 100644 --- a/drivers/staging/goldfish/Kconfig +++ b/drivers/staging/goldfish/Kconfig @@ -4,6 +4,14 @@ config GOLDFISH_AUDIO ---help--- Emulated audio channel for the Goldfish Android Virtual Device +config GOLDFISH_SYNC + tristate "Goldfish AVD Sync Driver" + depends on GOLDFISH + depends on SW_SYNC + depends on SYNC_FILE + ---help--- + Emulated sync fences for the Goldfish Android Virtual Device + config MTD_GOLDFISH_NAND tristate "Goldfish NAND device" depends on GOLDFISH diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile index dec34ad58162..3313fce4e940 100644 --- a/drivers/staging/goldfish/Makefile +++ b/drivers/staging/goldfish/Makefile @@ -4,3 +4,9 @@ obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o + +# and sync + +ccflags-y := -Idrivers/staging/android +goldfish_sync-objs := goldfish_sync_timeline_fence.o goldfish_sync_timeline.o +obj-$(CONFIG_GOLDFISH_SYNC) += goldfish_sync.o diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c index bd559956f199..0bb0ee2e691f 100644 --- a/drivers/staging/goldfish/goldfish_audio.c +++ b/drivers/staging/goldfish/goldfish_audio.c @@ -28,6 +28,7 @@ #include #include #include +#include MODULE_AUTHOR("Google, Inc."); MODULE_DESCRIPTION("Android QEMU Audio Driver"); @@ -116,6 +117,7 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct goldfish_audio *data = fp->private_data; + unsigned long irq_flags; int length; int result = 0; @@ -129,6 +131,10 @@ static ssize_t goldfish_audio_read(struct file *fp, char __user *buf, wait_event_interruptible(data->wait, data->buffer_status & AUDIO_INT_READ_BUFFER_FULL); + spin_lock_irqsave(&data->lock, irq_flags); + data->buffer_status &= ~AUDIO_INT_READ_BUFFER_FULL; + spin_unlock_irqrestore(&data->lock, irq_flags); + length = AUDIO_READ(data, AUDIO_READ_BUFFER_AVAILABLE); /* copy data to user space */ @@ -351,12 +357,19 @@ static const struct of_device_id goldfish_audio_of_match[] = { }; MODULE_DEVICE_TABLE(of, goldfish_audio_of_match); +static const struct acpi_device_id goldfish_audio_acpi_match[] = { + { "GFSH0005", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_audio_acpi_match); + static struct platform_driver goldfish_audio_driver = { .probe = goldfish_audio_probe, .remove = goldfish_audio_remove, .driver = { .name = "goldfish_audio", .of_match_table = goldfish_audio_of_match, + .acpi_match_table = ACPI_PTR(goldfish_audio_acpi_match), } }; diff --git a/drivers/staging/goldfish/goldfish_sync_timeline.c b/drivers/staging/goldfish/goldfish_sync_timeline.c new file mode 100644 index 000000000000..880d6e2e5b34 --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync_timeline.c @@ -0,0 +1,962 @@ +/* + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "goldfish_sync_timeline_fence.h" + +#define ERR(...) printk(KERN_ERR __VA_ARGS__); + +#define INFO(...) printk(KERN_INFO __VA_ARGS__); + +#define DPRINT(...) pr_debug(__VA_ARGS__); + +#define DTRACE() DPRINT("%s: enter", __func__) + +/* The Goldfish sync driver is designed to provide a interface + * between the underlying host's sync device and the kernel's + * fence sync framework.. + * The purpose of the device/driver is to enable lightweight + * creation and signaling of timelines and fences + * in order to synchronize the guest with host-side graphics events. + * + * Each time the interrupt trips, the driver + * may perform a sync operation. + */ + +/* The operations are: */ + +/* Ready signal - used to mark when irq should lower */ +#define CMD_SYNC_READY 0 + +/* Create a new timeline. writes timeline handle */ +#define CMD_CREATE_SYNC_TIMELINE 1 + +/* Create a fence object. reads timeline handle and time argument. + * Writes fence fd to the SYNC_REG_HANDLE register. */ +#define CMD_CREATE_SYNC_FENCE 2 + +/* Increments timeline. reads timeline handle and time argument */ +#define CMD_SYNC_TIMELINE_INC 3 + +/* Destroys a timeline. reads timeline handle */ +#define CMD_DESTROY_SYNC_TIMELINE 4 + +/* Starts a wait on the host with + * the given glsync object and sync thread handle. */ +#define CMD_TRIGGER_HOST_WAIT 5 + +/* The register layout is: */ + +#define SYNC_REG_BATCH_COMMAND 0x00 /* host->guest batch commands */ +#define SYNC_REG_BATCH_GUESTCOMMAND 0x04 /* guest->host batch commands */ +#define SYNC_REG_BATCH_COMMAND_ADDR 0x08 /* communicate physical address of host->guest batch commands */ +#define SYNC_REG_BATCH_COMMAND_ADDR_HIGH 0x0c /* 64-bit part */ +#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR 0x10 /* communicate physical address of guest->host commands */ +#define SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH 0x14 /* 64-bit part */ +#define SYNC_REG_INIT 0x18 /* signals that the device has been probed */ + +/* There is an ioctl associated with goldfish sync driver. + * Make it conflict with ioctls that are not likely to be used + * in the emulator. + * + * '@' 00-0F linux/radeonfb.h conflict! + * '@' 00-0F drivers/video/aty/aty128fb.c conflict! + */ +#define GOLDFISH_SYNC_IOC_MAGIC '@' + +#define GOLDFISH_SYNC_IOC_QUEUE_WORK _IOWR(GOLDFISH_SYNC_IOC_MAGIC, 0, struct goldfish_sync_ioctl_info) + +/* The above definitions (command codes, register layout, ioctl definitions) + * need to be in sync with the following files: + * + * Host-side (emulator): + * external/qemu/android/emulation/goldfish_sync.h + * external/qemu-android/hw/misc/goldfish_sync.c + * + * Guest-side (system image): + * device/generic/goldfish-opengl/system/egl/goldfish_sync.h + * device/generic/goldfish/ueventd.ranchu.rc + * platform/build/target/board/generic/sepolicy/file_contexts + */ +struct goldfish_sync_hostcmd { + /* sorted for alignment */ + uint64_t handle; + uint64_t hostcmd_handle; + uint32_t cmd; + uint32_t time_arg; +}; + +struct goldfish_sync_guestcmd { + uint64_t host_command; /* uint64_t for alignment */ + uint64_t glsync_handle; + uint64_t thread_handle; + uint64_t guest_timeline_handle; +}; + +#define GOLDFISH_SYNC_MAX_CMDS 32 + +struct goldfish_sync_state { + char __iomem *reg_base; + int irq; + + /* Spinlock protects |to_do| / |to_do_end|. */ + spinlock_t lock; + /* |mutex_lock| protects all concurrent access + * to timelines for both kernel and user space. */ + struct mutex mutex_lock; + + /* Buffer holding commands issued from host. */ + struct goldfish_sync_hostcmd to_do[GOLDFISH_SYNC_MAX_CMDS]; + uint32_t to_do_end; + + /* Addresses for the reading or writing + * of individual commands. The host can directly write + * to |batch_hostcmd| (and then this driver immediately + * copies contents to |to_do|). This driver either replies + * through |batch_hostcmd| or simply issues a + * guest->host command through |batch_guestcmd|. + */ + struct goldfish_sync_hostcmd *batch_hostcmd; + struct goldfish_sync_guestcmd *batch_guestcmd; + + /* Used to give this struct itself to a work queue + * function for executing actual sync commands. */ + struct work_struct work_item; +}; + +static struct goldfish_sync_state global_sync_state[1]; + +struct goldfish_sync_timeline_obj { + struct goldfish_sync_timeline *sync_tl; + uint32_t current_time; + /* We need to be careful about when we deallocate + * this |goldfish_sync_timeline_obj| struct. + * In order to ensure proper cleanup, we need to + * consider the triggered host-side wait that may + * still be in flight when the guest close()'s a + * goldfish_sync device's sync context fd (and + * destroys the |sync_tl| field above). + * The host-side wait may raise IRQ + * and tell the kernel to increment the timeline _after_ + * the |sync_tl| has already been set to null. + * + * From observations on OpenGL apps and CTS tests, this + * happens at some very low probability upon context + * destruction or process close, but it does happen + * and it needs to be handled properly. Otherwise, + * if we clean up the surrounding |goldfish_sync_timeline_obj| + * too early, any |handle| field of any host->guest command + * might not even point to a null |sync_tl| field, + * but to garbage memory or even a reclaimed |sync_tl|. + * If we do not count such "pending waits" and kfree the object + * immediately upon |goldfish_sync_timeline_destroy|, + * we might get mysterous RCU stalls after running a long + * time because the garbage memory that is being read + * happens to be interpretable as a |spinlock_t| struct + * that is currently in the locked state. + * + * To track when to free the |goldfish_sync_timeline_obj| + * itself, we maintain a kref. + * The kref essentially counts the timeline itself plus + * the number of waits in flight. kref_init/kref_put + * are issued on + * |goldfish_sync_timeline_create|/|goldfish_sync_timeline_destroy| + * and kref_get/kref_put are issued on + * |goldfish_sync_fence_create|/|goldfish_sync_timeline_inc|. + * + * The timeline is destroyed after reference count + * reaches zero, which would happen after + * |goldfish_sync_timeline_destroy| and all pending + * |goldfish_sync_timeline_inc|'s are fulfilled. + * + * NOTE (1): We assume that |fence_create| and + * |timeline_inc| calls are 1:1, otherwise the kref scheme + * will not work. This is a valid assumption as long + * as the host-side virtual device implementation + * does not insert any timeline increments + * that we did not trigger from here. + * + * NOTE (2): The use of kref by itself requires no locks, + * but this does not mean everything works without locks. + * Related timeline operations do require a lock of some sort, + * or at least are not proven to work without it. + * In particualr, we assume that all the operations + * done on the |kref| field above are done in contexts where + * |global_sync_state->mutex_lock| is held. Do not + * remove that lock until everything is proven to work + * without it!!! */ + struct kref kref; +}; + +/* We will call |delete_timeline_obj| when the last reference count + * of the kref is decremented. This deletes the sync + * timeline object along with the wrapper itself. */ +static void delete_timeline_obj(struct kref* kref) { + struct goldfish_sync_timeline_obj* obj = + container_of(kref, struct goldfish_sync_timeline_obj, kref); + + goldfish_sync_timeline_put_internal(obj->sync_tl); + obj->sync_tl = NULL; + kfree(obj); +} + +static uint64_t gensym_ctr; +static void gensym(char *dst) +{ + sprintf(dst, "goldfish_sync:gensym:%llu", gensym_ctr); + gensym_ctr++; +} + +/* |goldfish_sync_timeline_create| assumes that |global_sync_state->mutex_lock| + * is held. */ +static struct goldfish_sync_timeline_obj* +goldfish_sync_timeline_create(void) +{ + + char timeline_name[256]; + struct goldfish_sync_timeline *res_sync_tl = NULL; + struct goldfish_sync_timeline_obj *res; + + DTRACE(); + + gensym(timeline_name); + + res_sync_tl = goldfish_sync_timeline_create_internal(timeline_name); + if (!res_sync_tl) { + ERR("Failed to create goldfish_sw_sync timeline."); + return NULL; + } + + res = kzalloc(sizeof(struct goldfish_sync_timeline_obj), GFP_KERNEL); + res->sync_tl = res_sync_tl; + res->current_time = 0; + kref_init(&res->kref); + + DPRINT("new timeline_obj=0x%p", res); + return res; +} + +/* |goldfish_sync_fence_create| assumes that |global_sync_state->mutex_lock| + * is held. */ +static int +goldfish_sync_fence_create(struct goldfish_sync_timeline_obj *obj, + uint32_t val) +{ + + int fd; + char fence_name[256]; + struct sync_pt *syncpt = NULL; + struct sync_file *sync_file_obj = NULL; + struct goldfish_sync_timeline *tl; + + DTRACE(); + + if (!obj) return -1; + + tl = obj->sync_tl; + + syncpt = goldfish_sync_pt_create_internal( + tl, sizeof(struct sync_pt) + 4, val); + if (!syncpt) { + ERR("could not create sync point! " + "goldfish_sync_timeline=0x%p val=%d", + tl, val); + return -1; + } + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + ERR("could not get unused fd for sync fence. " + "errno=%d", fd); + goto err_cleanup_pt; + } + + gensym(fence_name); + + sync_file_obj = sync_file_create(&syncpt->base); + if (!sync_file_obj) { + ERR("could not create sync fence! " + "goldfish_sync_timeline=0x%p val=%d sync_pt=0x%p", + tl, val, syncpt); + goto err_cleanup_fd_pt; + } + + DPRINT("installing sync fence into fd %d sync_file_obj=0x%p", + fd, sync_file_obj); + fd_install(fd, sync_file_obj->file); + kref_get(&obj->kref); + + return fd; + +err_cleanup_fd_pt: + put_unused_fd(fd); +err_cleanup_pt: + dma_fence_put(&syncpt->base); + return -1; +} + +/* |goldfish_sync_timeline_inc| assumes that |global_sync_state->mutex_lock| + * is held. */ +static void +goldfish_sync_timeline_inc(struct goldfish_sync_timeline_obj *obj, uint32_t inc) +{ + DTRACE(); + /* Just give up if someone else nuked the timeline. + * Whoever it was won't care that it doesn't get signaled. */ + if (!obj) return; + + DPRINT("timeline_obj=0x%p", obj); + goldfish_sync_timeline_signal_internal(obj->sync_tl, inc); + DPRINT("incremented timeline. increment max_time"); + obj->current_time += inc; + + /* Here, we will end up deleting the timeline object if it + * turns out that this call was a pending increment after + * |goldfish_sync_timeline_destroy| was called. */ + kref_put(&obj->kref, delete_timeline_obj); + DPRINT("done"); +} + +/* |goldfish_sync_timeline_destroy| assumes + * that |global_sync_state->mutex_lock| is held. */ +static void +goldfish_sync_timeline_destroy(struct goldfish_sync_timeline_obj *obj) +{ + DTRACE(); + /* See description of |goldfish_sync_timeline_obj| for why we + * should not immediately destroy |obj| */ + kref_put(&obj->kref, delete_timeline_obj); +} + +static inline void +goldfish_sync_cmd_queue(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t handle, + uint32_t time_arg, + uint64_t hostcmd_handle) +{ + struct goldfish_sync_hostcmd *to_add; + + DTRACE(); + + BUG_ON(sync_state->to_do_end == GOLDFISH_SYNC_MAX_CMDS); + + to_add = &sync_state->to_do[sync_state->to_do_end]; + + to_add->cmd = cmd; + to_add->handle = handle; + to_add->time_arg = time_arg; + to_add->hostcmd_handle = hostcmd_handle; + + sync_state->to_do_end += 1; +} + +static inline void +goldfish_sync_hostcmd_reply(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t handle, + uint32_t time_arg, + uint64_t hostcmd_handle) +{ + unsigned long irq_flags; + struct goldfish_sync_hostcmd *batch_hostcmd = + sync_state->batch_hostcmd; + + DTRACE(); + + spin_lock_irqsave(&sync_state->lock, irq_flags); + + batch_hostcmd->cmd = cmd; + batch_hostcmd->handle = handle; + batch_hostcmd->time_arg = time_arg; + batch_hostcmd->hostcmd_handle = hostcmd_handle; + writel(0, sync_state->reg_base + SYNC_REG_BATCH_COMMAND); + + spin_unlock_irqrestore(&sync_state->lock, irq_flags); +} + +static inline void +goldfish_sync_send_guestcmd(struct goldfish_sync_state *sync_state, + uint32_t cmd, + uint64_t glsync_handle, + uint64_t thread_handle, + uint64_t timeline_handle) +{ + unsigned long irq_flags; + struct goldfish_sync_guestcmd *batch_guestcmd = + sync_state->batch_guestcmd; + + DTRACE(); + + spin_lock_irqsave(&sync_state->lock, irq_flags); + + batch_guestcmd->host_command = (uint64_t)cmd; + batch_guestcmd->glsync_handle = (uint64_t)glsync_handle; + batch_guestcmd->thread_handle = (uint64_t)thread_handle; + batch_guestcmd->guest_timeline_handle = (uint64_t)timeline_handle; + writel(0, sync_state->reg_base + SYNC_REG_BATCH_GUESTCOMMAND); + + spin_unlock_irqrestore(&sync_state->lock, irq_flags); +} + +/* |goldfish_sync_interrupt| handles IRQ raises from the virtual device. + * In the context of OpenGL, this interrupt will fire whenever we need + * to signal a fence fd in the guest, with the command + * |CMD_SYNC_TIMELINE_INC|. + * However, because this function will be called in an interrupt context, + * it is necessary to do the actual work of signaling off of interrupt context. + * The shared work queue is used for this purpose. At the end when + * all pending commands are intercepted by the interrupt handler, + * we call |schedule_work|, which will later run the actual + * desired sync command in |goldfish_sync_work_item_fn|. + */ +static irqreturn_t goldfish_sync_interrupt(int irq, void *dev_id) +{ + + struct goldfish_sync_state *sync_state = dev_id; + + uint32_t nextcmd; + uint32_t command_r; + uint64_t handle_rw; + uint32_t time_r; + uint64_t hostcmd_handle_rw; + + int count = 0; + + DTRACE(); + + sync_state = dev_id; + + spin_lock(&sync_state->lock); + + for (;;) { + + readl(sync_state->reg_base + SYNC_REG_BATCH_COMMAND); + nextcmd = sync_state->batch_hostcmd->cmd; + + if (nextcmd == 0) + break; + + command_r = nextcmd; + handle_rw = sync_state->batch_hostcmd->handle; + time_r = sync_state->batch_hostcmd->time_arg; + hostcmd_handle_rw = sync_state->batch_hostcmd->hostcmd_handle; + + goldfish_sync_cmd_queue( + sync_state, + command_r, + handle_rw, + time_r, + hostcmd_handle_rw); + + count++; + } + + spin_unlock(&sync_state->lock); + + schedule_work(&sync_state->work_item); + + return (count == 0) ? IRQ_NONE : IRQ_HANDLED; +} + +/* |goldfish_sync_work_item_fn| does the actual work of servicing + * host->guest sync commands. This function is triggered whenever + * the IRQ for the goldfish sync device is raised. Once it starts + * running, it grabs the contents of the buffer containing the + * commands it needs to execute (there may be multiple, because + * our IRQ is active high and not edge triggered), and then + * runs all of them one after the other. + */ +static void goldfish_sync_work_item_fn(struct work_struct *input) +{ + + struct goldfish_sync_state *sync_state; + int sync_fence_fd; + + struct goldfish_sync_timeline_obj *timeline; + uint64_t timeline_ptr; + + uint64_t hostcmd_handle; + + uint32_t cmd; + uint64_t handle; + uint32_t time_arg; + + struct goldfish_sync_hostcmd *todo; + uint32_t todo_end; + + unsigned long irq_flags; + + struct goldfish_sync_hostcmd to_run[GOLDFISH_SYNC_MAX_CMDS]; + uint32_t i = 0; + + sync_state = container_of(input, struct goldfish_sync_state, work_item); + + mutex_lock(&sync_state->mutex_lock); + + spin_lock_irqsave(&sync_state->lock, irq_flags); { + + todo_end = sync_state->to_do_end; + + DPRINT("num sync todos: %u", sync_state->to_do_end); + + for (i = 0; i < todo_end; i++) + to_run[i] = sync_state->to_do[i]; + + /* We expect that commands will come in at a slow enough rate + * so that incoming items will not be more than + * GOLDFISH_SYNC_MAX_CMDS. + * + * This is because the way the sync device is used, + * it's only for managing buffer data transfers per frame, + * with a sequential dependency between putting things in + * to_do and taking them out. Once a set of commands is + * queued up in to_do, the user of the device waits for + * them to be processed before queuing additional commands, + * which limits the rate at which commands come in + * to the rate at which we take them out here. + * + * We also don't expect more than MAX_CMDS to be issued + * at once; there is a correspondence between + * which buffers need swapping to the (display / buffer queue) + * to particular commands, and we don't expect there to be + * enough display or buffer queues in operation at once + * to overrun GOLDFISH_SYNC_MAX_CMDS. + */ + sync_state->to_do_end = 0; + + } spin_unlock_irqrestore(&sync_state->lock, irq_flags); + + for (i = 0; i < todo_end; i++) { + DPRINT("todo index: %u", i); + + todo = &to_run[i]; + + cmd = todo->cmd; + + handle = (uint64_t)todo->handle; + time_arg = todo->time_arg; + hostcmd_handle = (uint64_t)todo->hostcmd_handle; + + DTRACE(); + + timeline = (struct goldfish_sync_timeline_obj *)(uintptr_t)handle; + + switch (cmd) { + case CMD_SYNC_READY: + break; + case CMD_CREATE_SYNC_TIMELINE: + DPRINT("exec CMD_CREATE_SYNC_TIMELINE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + timeline = goldfish_sync_timeline_create(); + timeline_ptr = (uintptr_t)timeline; + goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_TIMELINE, + timeline_ptr, + 0, + hostcmd_handle); + DPRINT("sync timeline created: %p", timeline); + break; + case CMD_CREATE_SYNC_FENCE: + DPRINT("exec CMD_CREATE_SYNC_FENCE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + sync_fence_fd = goldfish_sync_fence_create(timeline, time_arg); + goldfish_sync_hostcmd_reply(sync_state, CMD_CREATE_SYNC_FENCE, + sync_fence_fd, + 0, + hostcmd_handle); + break; + case CMD_SYNC_TIMELINE_INC: + DPRINT("exec CMD_SYNC_TIMELINE_INC: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + goldfish_sync_timeline_inc(timeline, time_arg); + break; + case CMD_DESTROY_SYNC_TIMELINE: + DPRINT("exec CMD_DESTROY_SYNC_TIMELINE: " + "handle=0x%llx time_arg=%d", + handle, time_arg); + goldfish_sync_timeline_destroy(timeline); + break; + } + DPRINT("Done executing sync command"); + } + mutex_unlock(&sync_state->mutex_lock); +} + +/* Guest-side interface: file operations */ + +/* Goldfish sync context and ioctl info. + * + * When a sync context is created by open()-ing the goldfish sync device, we + * create a sync context (|goldfish_sync_context|). + * + * Currently, the only data required to track is the sync timeline itself + * along with the current time, which are all packed up in the + * |goldfish_sync_timeline_obj| field. We use a |goldfish_sync_context| + * as the filp->private_data. + * + * Next, when a sync context user requests that work be queued and a fence + * fd provided, we use the |goldfish_sync_ioctl_info| struct, which holds + * information about which host handles to touch for this particular + * queue-work operation. We need to know about the host-side sync thread + * and the particular host-side GLsync object. We also possibly write out + * a file descriptor. + */ +struct goldfish_sync_context { + struct goldfish_sync_timeline_obj *timeline; +}; + +struct goldfish_sync_ioctl_info { + uint64_t host_glsync_handle_in; + uint64_t host_syncthread_handle_in; + int fence_fd_out; +}; + +static int goldfish_sync_open(struct inode *inode, struct file *file) +{ + + struct goldfish_sync_context *sync_context; + + DTRACE(); + + mutex_lock(&global_sync_state->mutex_lock); + + sync_context = kzalloc(sizeof(struct goldfish_sync_context), GFP_KERNEL); + + if (sync_context == NULL) { + ERR("Creation of goldfish sync context failed!"); + mutex_unlock(&global_sync_state->mutex_lock); + return -ENOMEM; + } + + sync_context->timeline = NULL; + + file->private_data = sync_context; + + DPRINT("successfully create a sync context @0x%p", sync_context); + + mutex_unlock(&global_sync_state->mutex_lock); + + return 0; +} + +static int goldfish_sync_release(struct inode *inode, struct file *file) +{ + + struct goldfish_sync_context *sync_context; + + DTRACE(); + + mutex_lock(&global_sync_state->mutex_lock); + + sync_context = file->private_data; + + if (sync_context->timeline) + goldfish_sync_timeline_destroy(sync_context->timeline); + + sync_context->timeline = NULL; + + kfree(sync_context); + + mutex_unlock(&global_sync_state->mutex_lock); + + return 0; +} + +/* |goldfish_sync_ioctl| is the guest-facing interface of goldfish sync + * and is used in conjunction with eglCreateSyncKHR to queue up the + * actual work of waiting for the EGL sync command to complete, + * possibly returning a fence fd to the guest. + */ +static long goldfish_sync_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct goldfish_sync_context *sync_context_data; + struct goldfish_sync_timeline_obj *timeline; + int fd_out; + struct goldfish_sync_ioctl_info ioctl_data; + + DTRACE(); + + sync_context_data = file->private_data; + fd_out = -1; + + switch (cmd) { + case GOLDFISH_SYNC_IOC_QUEUE_WORK: + + DPRINT("exec GOLDFISH_SYNC_IOC_QUEUE_WORK"); + + mutex_lock(&global_sync_state->mutex_lock); + + if (copy_from_user(&ioctl_data, + (void __user *)arg, + sizeof(ioctl_data))) { + ERR("Failed to copy memory for ioctl_data from user."); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + if (ioctl_data.host_syncthread_handle_in == 0) { + DPRINT("Error: zero host syncthread handle!!!"); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + if (!sync_context_data->timeline) { + DPRINT("no timeline yet, create one."); + sync_context_data->timeline = goldfish_sync_timeline_create(); + DPRINT("timeline: 0x%p", &sync_context_data->timeline); + } + + timeline = sync_context_data->timeline; + fd_out = goldfish_sync_fence_create(timeline, + timeline->current_time + 1); + DPRINT("Created fence with fd %d and current time %u (timeline: 0x%p)", + fd_out, + sync_context_data->timeline->current_time + 1, + sync_context_data->timeline); + + ioctl_data.fence_fd_out = fd_out; + + if (copy_to_user((void __user *)arg, + &ioctl_data, + sizeof(ioctl_data))) { + DPRINT("Error, could not copy to user!!!"); + + sys_close(fd_out); + /* We won't be doing an increment, kref_put immediately. */ + kref_put(&timeline->kref, delete_timeline_obj); + mutex_unlock(&global_sync_state->mutex_lock); + return -EFAULT; + } + + /* We are now about to trigger a host-side wait; + * accumulate on |pending_waits|. */ + goldfish_sync_send_guestcmd(global_sync_state, + CMD_TRIGGER_HOST_WAIT, + ioctl_data.host_glsync_handle_in, + ioctl_data.host_syncthread_handle_in, + (uint64_t)(uintptr_t)(sync_context_data->timeline)); + + mutex_unlock(&global_sync_state->mutex_lock); + return 0; + default: + return -ENOTTY; + } +} + +static const struct file_operations goldfish_sync_fops = { + .owner = THIS_MODULE, + .open = goldfish_sync_open, + .release = goldfish_sync_release, + .unlocked_ioctl = goldfish_sync_ioctl, + .compat_ioctl = goldfish_sync_ioctl, +}; + +static struct miscdevice goldfish_sync_device = { + .name = "goldfish_sync", + .fops = &goldfish_sync_fops, +}; + + +static bool setup_verify_batch_cmd_addr(struct goldfish_sync_state *sync_state, + void *batch_addr, + uint32_t addr_offset, + uint32_t addr_offset_high) +{ + uint64_t batch_addr_phys; + uint32_t batch_addr_phys_test_lo; + uint32_t batch_addr_phys_test_hi; + + if (!batch_addr) { + ERR("Could not use batch command address!"); + return false; + } + + batch_addr_phys = virt_to_phys(batch_addr); + writel((uint32_t)(batch_addr_phys), + sync_state->reg_base + addr_offset); + writel((uint32_t)(batch_addr_phys >> 32), + sync_state->reg_base + addr_offset_high); + + batch_addr_phys_test_lo = + readl(sync_state->reg_base + addr_offset); + batch_addr_phys_test_hi = + readl(sync_state->reg_base + addr_offset_high); + + if (virt_to_phys(batch_addr) != + (((uint64_t)batch_addr_phys_test_hi << 32) | + batch_addr_phys_test_lo)) { + ERR("Invalid batch command address!"); + return false; + } + + return true; +} + +int goldfish_sync_probe(struct platform_device *pdev) +{ + struct resource *ioresource; + struct goldfish_sync_state *sync_state = global_sync_state; + int status; + + DTRACE(); + + sync_state->to_do_end = 0; + + spin_lock_init(&sync_state->lock); + mutex_init(&sync_state->mutex_lock); + + platform_set_drvdata(pdev, sync_state); + + ioresource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (ioresource == NULL) { + ERR("platform_get_resource failed"); + return -ENODEV; + } + + sync_state->reg_base = + devm_ioremap(&pdev->dev, ioresource->start, PAGE_SIZE); + if (sync_state->reg_base == NULL) { + ERR("Could not ioremap"); + return -ENOMEM; + } + + sync_state->irq = platform_get_irq(pdev, 0); + if (sync_state->irq < 0) { + ERR("Could not platform_get_irq"); + return -ENODEV; + } + + status = devm_request_irq(&pdev->dev, + sync_state->irq, + goldfish_sync_interrupt, + IRQF_SHARED, + pdev->name, + sync_state); + if (status) { + ERR("request_irq failed"); + return -ENODEV; + } + + INIT_WORK(&sync_state->work_item, + goldfish_sync_work_item_fn); + + misc_register(&goldfish_sync_device); + + /* Obtain addresses for batch send/recv of commands. */ + { + struct goldfish_sync_hostcmd *batch_addr_hostcmd; + struct goldfish_sync_guestcmd *batch_addr_guestcmd; + + batch_addr_hostcmd = + devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_hostcmd), + GFP_KERNEL); + batch_addr_guestcmd = + devm_kzalloc(&pdev->dev, sizeof(struct goldfish_sync_guestcmd), + GFP_KERNEL); + + if (!setup_verify_batch_cmd_addr(sync_state, + batch_addr_hostcmd, + SYNC_REG_BATCH_COMMAND_ADDR, + SYNC_REG_BATCH_COMMAND_ADDR_HIGH)) { + ERR("goldfish_sync: Could not setup batch command address"); + return -ENODEV; + } + + if (!setup_verify_batch_cmd_addr(sync_state, + batch_addr_guestcmd, + SYNC_REG_BATCH_GUESTCOMMAND_ADDR, + SYNC_REG_BATCH_GUESTCOMMAND_ADDR_HIGH)) { + ERR("goldfish_sync: Could not setup batch guest command address"); + return -ENODEV; + } + + sync_state->batch_hostcmd = batch_addr_hostcmd; + sync_state->batch_guestcmd = batch_addr_guestcmd; + } + + INFO("goldfish_sync: Initialized goldfish sync device"); + + writel(0, sync_state->reg_base + SYNC_REG_INIT); + + return 0; +} + +static int goldfish_sync_remove(struct platform_device *pdev) +{ + struct goldfish_sync_state *sync_state = global_sync_state; + + DTRACE(); + + misc_deregister(&goldfish_sync_device); + memset(sync_state, 0, sizeof(struct goldfish_sync_state)); + return 0; +} + +static const struct of_device_id goldfish_sync_of_match[] = { + { .compatible = "google,goldfish-sync", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_sync_of_match); + +static const struct acpi_device_id goldfish_sync_acpi_match[] = { + { "GFSH0006", 0 }, + { }, +}; + +MODULE_DEVICE_TABLE(acpi, goldfish_sync_acpi_match); + +static struct platform_driver goldfish_sync = { + .probe = goldfish_sync_probe, + .remove = goldfish_sync_remove, + .driver = { + .name = "goldfish_sync", + .of_match_table = goldfish_sync_of_match, + .acpi_match_table = ACPI_PTR(goldfish_sync_acpi_match), + } +}; + +module_platform_driver(goldfish_sync); + +MODULE_AUTHOR("Google, Inc."); +MODULE_DESCRIPTION("Android QEMU Sync Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); diff --git a/drivers/staging/goldfish/goldfish_sync_timeline_fence.c b/drivers/staging/goldfish/goldfish_sync_timeline_fence.c new file mode 100644 index 000000000000..5c0c029fadda --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync_timeline_fence.c @@ -0,0 +1,246 @@ +#include +#include +#include +#include +#include + +#include "goldfish_sync_timeline_fence.h" + +/* + * Timeline-based sync for Goldfish Sync + * Based on "Sync File validation framework" + * (drivers/dma-buf/sw_sync.c) + * + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/** + * struct goldfish_sync_timeline - sync object + * @kref: reference count on fence. + * @name: name of the goldfish_sync_timeline. Useful for debugging + * @child_list_head: list of children sync_pts for this goldfish_sync_timeline + * @child_list_lock: lock protecting @child_list_head and fence.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + */ +struct goldfish_sync_timeline { + struct kref kref; + char name[32]; + + /* protected by child_list_lock */ + u64 context; + int value; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; +}; + +static inline struct goldfish_sync_timeline *goldfish_dma_fence_parent(struct dma_fence *fence) +{ + return container_of(fence->lock, struct goldfish_sync_timeline, + child_list_lock); +} + +static const struct dma_fence_ops goldfish_sync_timeline_fence_ops; + +static inline struct sync_pt *goldfish_sync_fence_to_sync_pt(struct dma_fence *fence) +{ + if (fence->ops != &goldfish_sync_timeline_fence_ops) + return NULL; + return container_of(fence, struct sync_pt, base); +} + +/** + * goldfish_sync_timeline_create_internal() - creates a sync object + * @name: sync_timeline name + * + * Creates a new sync_timeline. Returns the sync_timeline object or NULL in + * case of error. + */ +struct goldfish_sync_timeline +*goldfish_sync_timeline_create_internal(const char *name) +{ + struct goldfish_sync_timeline *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + kref_init(&obj->kref); + obj->context = dma_fence_context_alloc(1); + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->child_list_lock); + + return obj; +} + +static void goldfish_sync_timeline_free_internal(struct kref *kref) +{ + struct goldfish_sync_timeline *obj = + container_of(kref, struct goldfish_sync_timeline, kref); + + kfree(obj); +} + +static void goldfish_sync_timeline_get_internal( + struct goldfish_sync_timeline *obj) +{ + kref_get(&obj->kref); +} + +void goldfish_sync_timeline_put_internal(struct goldfish_sync_timeline *obj) +{ + kref_put(&obj->kref, goldfish_sync_timeline_free_internal); +} + +/** + * goldfish_sync_timeline_signal() - + * signal a status change on a goldfish_sync_timeline + * @obj: sync_timeline to signal + * @inc: num to increment on timeline->value + * + * A sync implementation should call this any time one of it's fences + * has signaled or has an error condition. + */ +void goldfish_sync_timeline_signal_internal(struct goldfish_sync_timeline *obj, + unsigned int inc) +{ + unsigned long flags; + struct sync_pt *pt, *next; + + spin_lock_irqsave(&obj->child_list_lock, flags); + + obj->value += inc; + + list_for_each_entry_safe(pt, next, &obj->active_list_head, + active_list) { + if (dma_fence_is_signaled_locked(&pt->base)) + list_del_init(&pt->active_list); + } + + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +/** + * goldfish_sync_pt_create_internal() - creates a sync pt + * @parent: fence's parent sync_timeline + * @size: size to allocate for this pt + * @inc: value of the fence + * + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. Returns the sync_pt object or + * NULL in case of error. + */ +struct sync_pt *goldfish_sync_pt_create_internal( + struct goldfish_sync_timeline *obj, int size, + unsigned int value) +{ + unsigned long flags; + struct sync_pt *pt; + + if (size < sizeof(*pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (!pt) + return NULL; + + spin_lock_irqsave(&obj->child_list_lock, flags); + goldfish_sync_timeline_get_internal(obj); + dma_fence_init(&pt->base, &goldfish_sync_timeline_fence_ops, &obj->child_list_lock, + obj->context, value); + list_add_tail(&pt->child_list, &obj->child_list_head); + INIT_LIST_HEAD(&pt->active_list); + spin_unlock_irqrestore(&obj->child_list_lock, flags); + return pt; +} + +static const char *goldfish_sync_timeline_fence_get_driver_name( + struct dma_fence *fence) +{ + return "sw_sync"; +} + +static const char *goldfish_sync_timeline_fence_get_timeline_name( + struct dma_fence *fence) +{ + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + return parent->name; +} + +static void goldfish_sync_timeline_fence_release(struct dma_fence *fence) +{ + struct sync_pt *pt = goldfish_sync_fence_to_sync_pt(fence); + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + unsigned long flags; + + spin_lock_irqsave(fence->lock, flags); + list_del(&pt->child_list); + if (!list_empty(&pt->active_list)) + list_del(&pt->active_list); + spin_unlock_irqrestore(fence->lock, flags); + + goldfish_sync_timeline_put_internal(parent); + dma_fence_free(fence); +} + +static bool goldfish_sync_timeline_fence_signaled(struct dma_fence *fence) +{ + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + return (fence->seqno > parent->value) ? false : true; +} + +static bool goldfish_sync_timeline_fence_enable_signaling(struct dma_fence *fence) +{ + struct sync_pt *pt = goldfish_sync_fence_to_sync_pt(fence); + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + if (goldfish_sync_timeline_fence_signaled(fence)) + return false; + + list_add_tail(&pt->active_list, &parent->active_list_head); + return true; +} + +static void goldfish_sync_timeline_fence_value_str(struct dma_fence *fence, + char *str, int size) +{ + snprintf(str, size, "%d", fence->seqno); +} + +static void goldfish_sync_timeline_fence_timeline_value_str( + struct dma_fence *fence, + char *str, int size) +{ + struct goldfish_sync_timeline *parent = goldfish_dma_fence_parent(fence); + + snprintf(str, size, "%d", parent->value); +} + +static const struct dma_fence_ops goldfish_sync_timeline_fence_ops = { + .get_driver_name = goldfish_sync_timeline_fence_get_driver_name, + .get_timeline_name = goldfish_sync_timeline_fence_get_timeline_name, + .enable_signaling = goldfish_sync_timeline_fence_enable_signaling, + .signaled = goldfish_sync_timeline_fence_signaled, + .wait = dma_fence_default_wait, + .release = goldfish_sync_timeline_fence_release, + .fence_value_str = goldfish_sync_timeline_fence_value_str, + .timeline_value_str = goldfish_sync_timeline_fence_timeline_value_str, +}; diff --git a/drivers/staging/goldfish/goldfish_sync_timeline_fence.h b/drivers/staging/goldfish/goldfish_sync_timeline_fence.h new file mode 100644 index 000000000000..638c6fb68f1e --- /dev/null +++ b/drivers/staging/goldfish/goldfish_sync_timeline_fence.h @@ -0,0 +1,58 @@ +#include +#include + +/** + * struct sync_pt - sync_pt object + * @base: base dma_fence object + * @child_list: sync timeline child's list + * @active_list: sync timeline active child's list + */ +struct sync_pt { + struct dma_fence base; + struct list_head child_list; + struct list_head active_list; +}; + +/** + * goldfish_sync_timeline_create_internal() - creates a sync object + * @name: goldfish_sync_timeline name + * + * Creates a new goldfish_sync_timeline. + * Returns the goldfish_sync_timeline object or NULL in case of error. + */ +struct goldfish_sync_timeline +*goldfish_sync_timeline_create_internal(const char *name); + +/** + * goldfish_sync_pt_create_internal() - creates a sync pt + * @parent: fence's parent goldfish_sync_timeline + * @size: size to allocate for this pt + * @inc: value of the fence + * + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. Returns the sync_pt object or + * NULL in case of error. + */ +struct sync_pt +*goldfish_sync_pt_create_internal(struct goldfish_sync_timeline *obj, + int size, unsigned int value); + +/** + * goldfish_sync_timeline_signal_internal() - + * signal a status change on a sync_timeline + * @obj: goldfish_sync_timeline to signal + * @inc: num to increment on timeline->value + * + * A sync implementation should call this any time one of it's fences + * has signaled or has an error condition. + */ +void goldfish_sync_timeline_signal_internal(struct goldfish_sync_timeline *obj, + unsigned int inc); + +/** + * goldfish_sync_timeline_put_internal() - dec refcount of a sync_timeline + * and clean up memory if it was the last ref. + * @obj: goldfish_sync_timeline to decref + */ +void goldfish_sync_timeline_put_internal(struct goldfish_sync_timeline *obj); diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c index 3f4148c92308..0f538b8c3a07 100644 --- a/drivers/staging/greybus/light.c +++ b/drivers/staging/greybus/light.c @@ -925,6 +925,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel) return; led_classdev_unregister(cdev); + kfree(cdev->name); + cdev->name = NULL; channel->led = NULL; } diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c index 08e255884206..93e86798ec1c 100644 --- a/drivers/staging/greybus/loopback.c +++ b/drivers/staging/greybus/loopback.c @@ -1042,8 +1042,10 @@ static int gb_loopback_fn(void *data) else if (type == GB_LOOPBACK_TYPE_SINK) error = gb_loopback_async_sink(gb, size); - if (error) + if (error) { gb->error++; + gb->iteration_count++; + } } else { /* We are effectively single threaded here */ if (type == GB_LOOPBACK_TYPE_PING) diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c index e97b19148497..1e7321a1404c 100644 --- a/drivers/staging/greybus/spilib.c +++ b/drivers/staging/greybus/spilib.c @@ -544,11 +544,14 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, return 0; -exit_spi_unregister: - spi_unregister_master(master); exit_spi_put: spi_master_put(master); + return ret; + +exit_spi_unregister: + spi_unregister_master(master); + return ret; } EXPORT_SYMBOL_GPL(gb_spilib_master_init); @@ -558,7 +561,6 @@ void gb_spilib_master_exit(struct gb_connection *connection) struct spi_master *master = gb_connection_get_data(connection); spi_unregister_master(master); - spi_master_put(master); } EXPORT_SYMBOL_GPL(gb_spilib_master_exit); diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk deleted file mode 100644 index fdadbf611757..000000000000 --- a/drivers/staging/greybus/tools/Android.mk +++ /dev/null @@ -1,10 +0,0 @@ -LOCAL_PATH:= $(call my-dir) - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES:= loopback_test.c -LOCAL_MODULE_TAGS := optional -LOCAL_MODULE := gb_loopback_test - -include $(BUILD_EXECUTABLE) - diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index 6150d2780e22..31a195d1bf05 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -141,6 +141,8 @@ #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ +#define AD7192_EXT_FREQ_MHZ_MIN 2457600 +#define AD7192_EXT_FREQ_MHZ_MAX 5120000 #define AD7192_INT_FREQ_MHZ 4915200 /* NOTE: @@ -217,6 +219,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st) ARRAY_SIZE(ad7192_calib_arr)); } +static inline bool ad7192_valid_external_frequency(u32 freq) +{ + return (freq >= AD7192_EXT_FREQ_MHZ_MIN && + freq <= AD7192_EXT_FREQ_MHZ_MAX); +} + static int ad7192_setup(struct ad7192_state *st, const struct ad7192_platform_data *pdata) { @@ -242,17 +250,20 @@ static int ad7192_setup(struct ad7192_state *st, id); switch (pdata->clock_source_sel) { - case AD7192_CLK_EXT_MCLK1_2: - case AD7192_CLK_EXT_MCLK2: - st->mclk = AD7192_INT_FREQ_MHZ; - break; case AD7192_CLK_INT: case AD7192_CLK_INT_CO: - if (pdata->ext_clk_hz) - st->mclk = pdata->ext_clk_hz; - else - st->mclk = AD7192_INT_FREQ_MHZ; + st->mclk = AD7192_INT_FREQ_MHZ; break; + case AD7192_CLK_EXT_MCLK1_2: + case AD7192_CLK_EXT_MCLK2: + if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) { + st->mclk = pdata->ext_clk_hz; + break; + } + dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n", + pdata->ext_clk_hz); + ret = -EINVAL; + goto out; default: ret = -EINVAL; goto out; diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 3d539eeb0e26..6d31001d1825 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -649,8 +649,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad5933_ring_setup_ops; - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - return 0; } @@ -763,7 +761,7 @@ static int ad5933_probe(struct i2c_client *client, indio_dev->dev.parent = &client->dev; indio_dev->info = &ad5933_info; indio_dev->name = id->name; - indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE); indio_dev->channels = ad5933_channels; indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index 975dbbb3abd0..7da3eb4ca4be 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -242,9 +242,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, offset = 0; while (bsize > offset) { - /* DPRINTK(4, "Element ID=%d\n",*bp); */ - switch (*bp) { - case 0: /* ssid */ + switch (*bp) { /* Information Element ID */ + case WLAN_EID_SSID: if (*(bp + 1) <= SSID_MAX_SIZE) { ap->ssid.size = *(bp + 1); } else { @@ -254,8 +253,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, } memcpy(ap->ssid.body, bp + 2, ap->ssid.size); break; - case 1: /* rate */ - case 50: /* ext rate */ + case WLAN_EID_SUPP_RATES: + case WLAN_EID_EXT_SUPP_RATES: if ((*(bp + 1) + ap->rate_set.size) <= RATE_SET_MAX_SIZE) { memcpy(&ap->rate_set.body[ap->rate_set.size], @@ -271,9 +270,9 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, (RATE_SET_MAX_SIZE - ap->rate_set.size); } break; - case 3: /* DS parameter */ + case WLAN_EID_DS_PARAMS: break; - case 48: /* RSN(WPA2) */ + case WLAN_EID_RSN: ap->rsn_ie.id = *bp; if (*(bp + 1) <= RSN_IE_BODY_MAX) { ap->rsn_ie.size = *(bp + 1); @@ -284,8 +283,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, } memcpy(ap->rsn_ie.body, bp + 2, ap->rsn_ie.size); break; - case 221: /* WPA */ - if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */ + case WLAN_EID_VENDOR_SPECIFIC: /* WPA */ + if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */ ap->wpa_ie.id = *bp; if (*(bp + 1) <= RSN_IE_BODY_MAX) { ap->wpa_ie.size = *(bp + 1); @@ -300,18 +299,18 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, } break; - case 2: /* FH parameter */ - case 4: /* CF parameter */ - case 5: /* TIM */ - case 6: /* IBSS parameter */ - case 7: /* Country */ - case 42: /* ERP information */ - case 47: /* Reserve ID 47 Broadcom AP */ + case WLAN_EID_FH_PARAMS: + case WLAN_EID_CF_PARAMS: + case WLAN_EID_TIM: + case WLAN_EID_IBSS_PARAMS: + case WLAN_EID_COUNTRY: + case WLAN_EID_ERP_INFO: break; default: DPRINTK(4, "unknown Element ID=%d\n", *bp); break; } + offset += 2; /* id & size field */ offset += *(bp + 1); /* +size offset */ bp += (*(bp + 1) + 2); /* pointer update */ diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h index 5bae8d468e23..9ac317e4b507 100644 --- a/drivers/staging/ks7010/ks_hostif.h +++ b/drivers/staging/ks7010/ks_hostif.h @@ -13,6 +13,7 @@ #define _KS_HOSTIF_H_ #include +#include /* * HOST-MAC I/F events diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 64763aacda57..8b92cf06d063 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -825,14 +825,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm return conn; failed_2: - kiblnd_destroy_conn(conn, true); + kiblnd_destroy_conn(conn); + LIBCFS_FREE(conn, sizeof(*conn)); failed_1: LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) +void kiblnd_destroy_conn(struct kib_conn *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; struct kib_peer *peer = conn->ibc_peer; @@ -895,8 +896,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) rdma_destroy_id(cmid); atomic_dec(&net->ibn_nconns); } - - LIBCFS_FREE(conn, sizeof(*conn)); } int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why) @@ -1711,7 +1710,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, return 0; } spin_unlock(&fps->fps_lock); - rc = -EBUSY; + rc = -EAGAIN; } spin_lock(&fps->fps_lock); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index a1e994a1cc84..98a5e2c21a83 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -1015,7 +1015,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why); struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid, int state, int version); -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn); +void kiblnd_destroy_conn(struct kib_conn *conn); void kiblnd_close_conn(struct kib_conn *conn, int error); void kiblnd_close_conn_locked(struct kib_conn *conn, int error); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 8fc191d99927..4b4a20149894 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -47,7 +47,7 @@ static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, __u64 dstcookie); static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn); static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn); -static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx); +static void kiblnd_unmap_tx(struct kib_tx *tx); static void kiblnd_check_sends_locked(struct kib_conn *conn); static void @@ -65,7 +65,7 @@ kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx) LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ LASSERT(tx->tx_pool); - kiblnd_unmap_tx(ni, tx); + kiblnd_unmap_tx(tx); /* tx may have up to 2 lnet msgs to finalise */ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; @@ -590,13 +590,9 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc * return 0; } -static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx) +static void kiblnd_unmap_tx(struct kib_tx *tx) { - struct kib_net *net = ni->ni_data; - - LASSERT(net); - - if (net->ibn_fmr_ps) + if (tx->fmr.fmr_pfmr || tx->fmr.fmr_frd) kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); if (tx->tx_nfrags) { @@ -1289,11 +1285,6 @@ kiblnd_connect_peer(struct kib_peer *peer) goto failed2; } - LASSERT(cmid->device); - CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", - libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, - &dev->ibd_ifip, cmid->device->name); - return; failed2: @@ -2995,8 +2986,19 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } else { rc = rdma_resolve_route( cmid, *kiblnd_tunables.kib_timeout * 1000); - if (!rc) + if (!rc) { + struct kib_net *net = peer->ibp_ni->ni_data; + struct kib_dev *dev = net->ibn_dev; + + CDEBUG(D_NET, "%s: connection bound to "\ + "%s:%pI4h:%s\n", + libcfs_nid2str(peer->ibp_nid), + dev->ibd_ifname, + &dev->ibd_ifip, cmid->device->name); + return 0; + } + /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); @@ -3313,11 +3315,13 @@ kiblnd_connd(void *arg) spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn, !peer); + kiblnd_destroy_conn(conn); spin_lock_irqsave(lock, flags); - if (!peer) + if (!peer) { + kfree(conn); continue; + } conn->ibc_peer = peer; if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c index 2da051c0d251..a4bb93b440a5 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c @@ -528,19 +528,20 @@ EXPORT_SYMBOL(cfs_cpt_spread_node); int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) { - int cpu = smp_processor_id(); - int cpt = cptab->ctb_cpu2cpt[cpu]; + int cpu; + int cpt; - if (cpt < 0) { - if (!remap) - return cpt; + preempt_disable(); + cpu = smp_processor_id(); + cpt = cptab->ctb_cpu2cpt[cpu]; + if (cpt < 0 && remap) { /* don't return negative value for safety of upper layer, * instead we shadow the unknown cpu to a valid partition ID */ cpt = cpu % cptab->ctb_nparts; } - + preempt_enable(); return cpt; } EXPORT_SYMBOL(cfs_cpt_current); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c index db0572733712..ab30a0f5129c 100644 --- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c @@ -119,6 +119,7 @@ static struct shash_alg alg = { .cra_name = "adler32", .cra_driver_name = "adler32-zlib", .cra_priority = 100, + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index a986737ec010..82a499fb23bb 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -190,7 +190,7 @@ struct client_obd { struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */ /* the grant values are protected by loi_list_lock below */ - unsigned long cl_dirty_pages; /* all _dirty_ in pahges */ + unsigned long cl_dirty_pages; /* all _dirty_ in pages */ unsigned long cl_dirty_max_pages; /* allowed w/o rpc */ unsigned long cl_dirty_transit; /* dirty synchronous */ unsigned long cl_avail_grant; /* bytes of credit for ost */ diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index b5d84f3f6071..11e01c48f51a 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -1571,8 +1571,10 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, return ERR_CAST(res); lock = ldlm_lock_new(res); - if (!lock) + if (!lock) { + ldlm_resource_putref(res); return ERR_PTR(-ENOMEM); + } lock->l_req_mode = mode; lock->l_ast_data = data; @@ -1615,6 +1617,8 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, return ERR_PTR(rc); } + + /** * Enqueue (request) a lock. * On the client this is called from ldlm_cli_enqueue_fini diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 0be55623bac4..364d697b2690 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -93,7 +93,11 @@ ll_xattr_set_common(const struct xattr_handler *handler, __u64 valid; int rc; - if (flags == XATTR_REPLACE) { + /* When setxattr() is called with a size of 0 the value is + * unconditionally replaced by "". When removexattr() is + * called we get a NULL value and XATTR_REPLACE for flags. + */ + if (!value && flags == XATTR_REPLACE) { ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); valid = OBD_MD_FLXATTRRM; } else { diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index 6e16c930a021..c2aadb2d1fea 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -2694,7 +2694,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp, if (lsm && !lmm) { int i; - for (i = 1; i < lsm->lsm_md_stripe_count; i++) { + for (i = 0; i < lsm->lsm_md_stripe_count; i++) { /* * For migrating inode, the master stripe and master * object will be the same, so do not need iput, see diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index 6ef8ddec4ab6..8c97acd79211 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -1121,9 +1121,9 @@ struct readpage_param { * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the * lu_dirpage for this integrated page will be adjusted. **/ -static int mdc_read_page_remote(void *data, struct page *page0) +static int mdc_read_page_remote(struct file *data, struct page *page0) { - struct readpage_param *rp = data; + struct readpage_param *rp = (struct readpage_param *)data; struct page **page_pool; struct page *page; struct lu_dirpage *dp; diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index e1207c227b79..c356d00d87a5 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -1528,7 +1528,7 @@ static int osc_enter_cache_try(struct client_obd *cli, if (rc < 0) return 0; - if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages && + if (cli->cl_dirty_pages < cli->cl_dirty_max_pages && atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index cd7a5391a574..0a3f832095ea 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -847,7 +847,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req) if (req->rq_pool || !req->rq_reqbuf) return; - kfree(req->rq_reqbuf); + kvfree(req->rq_reqbuf); req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c index 51b7d61df0f5..179576224319 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.c +++ b/drivers/staging/media/atomisp/i2c/ov2680.c @@ -396,12 +396,11 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg, { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov2680_device *dev = to_ov2680_sensor(sd); - u16 vts,hts; + u16 vts; int ret,exp_val; dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain); - hts = ov2680_res[dev->fmt_idx].pixels_per_line; vts = ov2680_res[dev->fmt_idx].lines_per_frame; /* group hold */ @@ -1190,7 +1189,8 @@ static int ov2680_detect(struct i2c_client *client) OV2680_SC_CMMN_SUB_ID, &high); revision = (u8) high & 0x0f; - dev_info(&client->dev, "sensor_revision id = 0x%x\n", id); + dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n", + id, revision); return 0; } diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c index 0592ac1f2832..cfe6bb610014 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c @@ -81,7 +81,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, get_user(kp->flags, &up->flags)) return -EFAULT; - kp->base = compat_ptr(tmp); + kp->base = (void __force *)compat_ptr(tmp); get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt); return 0; } @@ -232,10 +232,10 @@ static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp, get_user(ycoords_uv, &up->ycoords_uv)) return -EFAULT; - kp->xcoords_y = compat_ptr(xcoords_y); - kp->ycoords_y = compat_ptr(ycoords_y); - kp->xcoords_uv = compat_ptr(xcoords_uv); - kp->ycoords_uv = compat_ptr(ycoords_uv); + kp->xcoords_y = (void __force *)compat_ptr(xcoords_y); + kp->ycoords_y = (void __force *)compat_ptr(ycoords_y); + kp->xcoords_uv = (void __force *)compat_ptr(xcoords_uv); + kp->ycoords_uv = (void __force *)compat_ptr(ycoords_uv); return 0; } @@ -296,7 +296,7 @@ static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp, return -EFAULT; kp->data = compat_ptr(data); - kp->effective_width = compat_ptr(effective_width); + kp->effective_width = (void __force *)compat_ptr(effective_width); return 0; } @@ -360,7 +360,7 @@ static int get_atomisp_metadata_by_type_stat32( return -EFAULT; kp->data = compat_ptr(data); - kp->effective_width = compat_ptr(effective_width); + kp->effective_width = (void __force *)compat_ptr(effective_width); return 0; } @@ -437,7 +437,7 @@ static int get_atomisp_overlay32(struct atomisp_overlay *kp, get_user(kp->overlay_start_x, &up->overlay_start_y)) return -EFAULT; - kp->frame = compat_ptr(frame); + kp->frame = (void __force *)compat_ptr(frame); return 0; } @@ -481,7 +481,7 @@ static int get_atomisp_calibration_group32( get_user(calb_grp_values, &up->calb_grp_values)) return -EFAULT; - kp->calb_grp_values = compat_ptr(calb_grp_values); + kp->calb_grp_values = (void __force *)compat_ptr(calb_grp_values); return 0; } @@ -703,8 +703,8 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, return -EFAULT; while (n >= 0) { - compat_uptr_t *src = (compat_uptr_t *)up + n; - uintptr_t *dst = (uintptr_t *)kp + n; + compat_uptr_t __user *src = ((compat_uptr_t __user *)up) + n; + uintptr_t *dst = ((uintptr_t *)kp) + n; if (get_user((*dst), src)) return -EFAULT; @@ -751,12 +751,12 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->shading_table = user_ptr + offset; + kp->shading_table = (void __force *)user_ptr + offset; offset = sizeof(struct atomisp_shading_table); if (!kp->shading_table) return -EFAULT; - if (copy_to_user(kp->shading_table, + if (copy_to_user((void __user *)kp->shading_table, &karg.shading_table, sizeof(struct atomisp_shading_table))) return -EFAULT; @@ -777,13 +777,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->morph_table = user_ptr + offset; + kp->morph_table = (void __force *)user_ptr + offset; offset += sizeof(struct atomisp_morph_table); if (!kp->morph_table) return -EFAULT; - if (copy_to_user(kp->morph_table, &karg.morph_table, - sizeof(struct atomisp_morph_table))) + if (copy_to_user((void __user *)kp->morph_table, + &karg.morph_table, + sizeof(struct atomisp_morph_table))) return -EFAULT; } @@ -802,13 +803,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->dvs2_coefs = user_ptr + offset; + kp->dvs2_coefs = (void __force *)user_ptr + offset; offset += sizeof(struct atomisp_dis_coefficients); if (!kp->dvs2_coefs) return -EFAULT; - if (copy_to_user(kp->dvs2_coefs, &karg.dvs2_coefs, - sizeof(struct atomisp_dis_coefficients))) + if (copy_to_user((void __user *)kp->dvs2_coefs, + &karg.dvs2_coefs, + sizeof(struct atomisp_dis_coefficients))) return -EFAULT; } /* handle dvs 6axis configuration */ @@ -826,13 +828,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, #endif return -EFAULT; - kp->dvs_6axis_config = user_ptr + offset; + kp->dvs_6axis_config = (void __force *)user_ptr + offset; offset += sizeof(struct atomisp_dvs_6axis_config); if (!kp->dvs_6axis_config) return -EFAULT; - if (copy_to_user(kp->dvs_6axis_config, &karg.dvs_6axis_config, - sizeof(struct atomisp_dvs_6axis_config))) + if (copy_to_user((void __user *)kp->dvs_6axis_config, + &karg.dvs_6axis_config, + sizeof(struct atomisp_dvs_6axis_config))) return -EFAULT; } } @@ -891,7 +894,7 @@ static int get_atomisp_sensor_ae_bracketing_lut( get_user(lut, &up->lut)) return -EFAULT; - kp->lut = compat_ptr(lut); + kp->lut = (void __force *)compat_ptr(lut); return 0; } diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c index d8cfed358d55..f1d8cc5a2730 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c @@ -1285,7 +1285,10 @@ const struct v4l2_file_operations atomisp_fops = { .mmap = atomisp_mmap, .unlocked_ioctl = video_ioctl2, #ifdef CONFIG_COMPAT + /* + * There are problems with this code. Disable this for now. .compat_ioctl32 = atomisp_compat_ioctl32, + */ #endif .poll = atomisp_poll, }; @@ -1297,7 +1300,10 @@ const struct v4l2_file_operations atomisp_file_fops = { .mmap = atomisp_file_mmap, .unlocked_ioctl = video_ioctl2, #ifdef CONFIG_COMPAT + /* + * There are problems with this code. Disable this for now. .compat_ioctl32 = atomisp_compat_ioctl32, + */ #endif .poll = atomisp_poll, }; diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c index 717647951fb6..a5b7c2ba6c44 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c @@ -1291,9 +1291,9 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) wbinvd(); if (!atomisp_is_vf_pipe(pipe) && - (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING)) { + (buf->request & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING)) { /* this buffer will have a per-frame parameter */ - pipe->frame_request_config_id[buf->index] = buf->reserved2 & + pipe->frame_request_config_id[buf->index] = buf->request & ~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING; dev_dbg(isp->dev, "This buffer requires per_frame setting which has isp_config_id %d\n", pipe->frame_request_config_id[buf->index]); @@ -1477,12 +1477,12 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) buf->reserved &= 0x0000ffff; if (!(buf->flags & V4L2_BUF_FLAG_ERROR)) buf->reserved |= __get_frame_exp_id(pipe, buf) << 16; - buf->reserved2 = pipe->frame_config_id[buf->index]; + buf->request = pipe->frame_config_id[buf->index]; rt_mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n", buf->index, vdev->name, asd->index, buf->reserved >> 16, - buf->reserved2); + buf->request); return 0; } diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c index 71af13bd0ebd..e35e1b2160e3 100644 --- a/drivers/staging/media/lirc/lirc_zilog.c +++ b/drivers/staging/media/lirc/lirc_zilog.c @@ -288,7 +288,7 @@ static void release_ir_tx(struct kref *ref) struct IR_tx *tx = container_of(ref, struct IR_tx, ref); struct IR *ir = tx->ir; - ir->l.features &= ~LIRC_CAN_SEND_LIRCCODE; + ir->l.features &= ~LIRC_CAN_SEND_PULSE; /* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */ ir->tx = NULL; kfree(tx); @@ -1228,6 +1228,7 @@ static unsigned int poll(struct file *filep, poll_table *wait) dev_dbg(ir->l.dev, "%s result = %s\n", __func__, ret ? "POLLIN|POLLRDNORM" : "none"); + put_ir_rx(rx, false); return ret; } @@ -1267,14 +1268,14 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg) if (!(features & LIRC_CAN_SEND_MASK)) return -ENOTTY; - result = put_user(LIRC_MODE_LIRCCODE, uptr); + result = put_user(LIRC_MODE_PULSE, uptr); break; case LIRC_SET_SEND_MODE: if (!(features & LIRC_CAN_SEND_MASK)) return -ENOTTY; result = get_user(mode, uptr); - if (!result && mode != LIRC_MODE_LIRCCODE) + if (!result && mode != LIRC_MODE_PULSE) return -EINVAL; break; default: @@ -1512,7 +1513,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) kref_init(&tx->ref); ir->tx = tx; - ir->l.features |= LIRC_CAN_SEND_LIRCCODE; + ir->l.features |= LIRC_CAN_SEND_PULSE; mutex_init(&tx->client_lock); tx->c = client; tx->need_boot = 1; diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index 32a483769975..fa611455109a 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -754,7 +754,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf) } /* setting only at first time */ - if (!(pmlmepriv->cur_network.join_res)) { + if (pmlmepriv->cur_network.join_res != true) { /* WEP Key will be set before this function, do not * clear CAM. */ diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 9461bce883ea..be8542676adf 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -333,7 +333,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter) else RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); - pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); + pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); if (!pcmd) { res = _FAIL; goto exit; @@ -508,7 +508,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu if (enqueue) { /* need enqueue, prepare cmd_obj and enqueue */ - cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC); if (!cmdobj) { res = _FAIL; kfree(param); diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index f663e6c41f8a..f6d71587b803 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -106,10 +106,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) { - rtw_free_mlme_priv_ie_data(pmlmepriv); - - if (pmlmepriv) + if (pmlmepriv) { + rtw_free_mlme_priv_ie_data(pmlmepriv); vfree(pmlmepriv->free_bss_buf); + } } struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv) diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 3fd5f4102b36..afb9dadc1cfe 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -259,10 +259,12 @@ static int recvframe_chkmic(struct adapter *adapter, } /* icv_len included the mic code */ - datalen = precvframe->pkt->len-prxattrib->hdrlen - 8; + datalen = precvframe->pkt->len-prxattrib->hdrlen - + prxattrib->iv_len-prxattrib->icv_len-8; pframe = precvframe->pkt->data; - payload = pframe+prxattrib->hdrlen; + payload = pframe+prxattrib->hdrlen+prxattrib->iv_len; + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len)); rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); /* care the length of the data */ @@ -407,15 +409,9 @@ static struct recv_frame *decryptor(struct adapter *padapter, default: break; } - if (res != _FAIL) { - memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen); - skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len); - skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len); - } } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 && - (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) { - psecuritypriv->hw_decrypted = true; - } + (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) + psecuritypriv->hw_decrypted = true; if (res == _FAIL) { rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue); @@ -456,7 +452,7 @@ static struct recv_frame *portctrl(struct adapter *adapter, if (auth_alg == 2) { /* get ether_type */ - ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE; + ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len; memcpy(&be_tmp, ptr, 2); ether_type = ntohs(be_tmp); @@ -1138,8 +1134,6 @@ static int validate_recv_data_frame(struct adapter *adapter, } if (pattrib->privacy) { - struct sk_buff *skb = precv_frame->pkt; - RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy)); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra))); @@ -1148,13 +1142,6 @@ static int validate_recv_data_frame(struct adapter *adapter, RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt)); SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt); - - if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) { - memmove(skb->data + pattrib->iv_len, - skb->data, pattrib->hdrlen); - skb_pull(skb, pattrib->iv_len); - skb_trim(skb, skb->len - pattrib->icv_len); - } } else { pattrib->encrypt = 0; pattrib->iv_len = 0; @@ -1274,7 +1261,6 @@ static int validate_recv_frame(struct adapter *adapter, * Hence forward the frame to the monitor anyway to preserve the order * in which frames were received. */ - rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame); exit: @@ -1296,8 +1282,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) u8 *ptr = precvframe->pkt->data; struct rx_pkt_attrib *pattrib = &precvframe->attrib; - psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen); - psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE; + if (pattrib->encrypt) + skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len); + + psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len); + psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE; /* convert hdr + possible LLC headers into Ethernet header */ if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) && (!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) && @@ -1310,9 +1299,12 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe) bsnaphdr = false; } - rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0); + rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0); len = precvframe->pkt->len - rmv_len; + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, + ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len)); + memcpy(&be_tmp, ptr+rmv_len, 2); eth_type = ntohs(be_tmp); /* pattrib->ether_type */ pattrib->eth_type = eth_type; @@ -1337,6 +1329,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q) { struct list_head *plist, *phead; + u8 wlanhdr_offset; u8 curfragnum; struct recv_frame *pfhdr, *pnfhdr; struct recv_frame *prframe, *pnextrframe; @@ -1385,7 +1378,12 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, /* copy the 2nd~n fragment frame's payload to the first fragment */ /* get the 2nd~last fragment frame's payload */ - skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen); + wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len; + + skb_pull(pnextrframe->pkt, wlanhdr_offset); + + /* append to first fragment frame's tail (if privacy frame, pull the ICV) */ + skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len); /* memcpy */ memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data, @@ -1393,7 +1391,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, skb_put(prframe->pkt, pnfhdr->pkt->len); - pfhdr->attrib.icv_len = 0; + pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len; plist = plist->next; } @@ -1519,6 +1517,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) nr_subframes = 0; pattrib = &prframe->attrib; + skb_pull(prframe->pkt, prframe->attrib.hdrlen); + + if (prframe->attrib.iv_len > 0) + skb_pull(prframe->pkt, prframe->attrib.iv_len); + a_len = prframe->pkt->len; pdata = prframe->pkt->data; @@ -1887,6 +1890,24 @@ static int process_recv_indicatepkts(struct adapter *padapter, return retval; } +static int recv_func_prehandle(struct adapter *padapter, + struct recv_frame *rframe) +{ + int ret = _SUCCESS; + struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; + + /* check the frame crtl field and decache */ + ret = validate_recv_frame(padapter, rframe); + if (ret != _SUCCESS) { + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n")); + rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */ + goto exit; + } + +exit: + return ret; +} + static int recv_func_posthandle(struct adapter *padapter, struct recv_frame *prframe) { @@ -1939,7 +1960,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) struct rx_pkt_attrib *prxattrib = &rframe->attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *mlmepriv = &padapter->mlmepriv; - struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; /* check if need to handle uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) { @@ -1951,12 +1971,9 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe) } } - /* check the frame crtl field and decache */ - ret = validate_recv_frame(padapter, rframe); - if (ret != _SUCCESS) { - RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n")); - rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */ - } else { + ret = recv_func_prehandle(padapter, rframe); + + if (ret == _SUCCESS) { /* check if need to enqueue into uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 && diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index c0664dc80bf2..446310775e90 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev, if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) { len = pcur_bss->Ssid.SsidLength; - - wrqu->essid.length = len; - memcpy(extra, pcur_bss->Ssid.Ssid, len); - - wrqu->essid.flags = 1; } else { - ret = -1; - goto exit; + len = 0; + *extra = 0; } - -exit: - + wrqu->essid.length = len; + wrqu->essid.flags = 1; return ret; } diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c index 37fd52d7364f..225c23fc69dc 100644 --- a/drivers/staging/rtl8188eu/os_dep/mon.c +++ b/drivers/staging/rtl8188eu/os_dep/mon.c @@ -66,34 +66,6 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data, netif_rx(skb); } -static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data, - int data_len) -{ - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - int hdr_len; - - skb = netdev_alloc_skb(dev, data_len); - if (!skb) - return; - memcpy(skb_put(skb, data_len), data, data_len); - - /* - * Frame data is not encrypted. Strip off protection so - * userspace doesn't think that it is. - */ - - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - if (ieee80211_has_protected(hdr->frame_control)) - hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED); - - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->protocol = eth_type_trans(skb, dev); - netif_rx(skb); -} - static void mon_recv_encrypted(struct net_device *dev, const u8 *data, int data_len) { @@ -110,6 +82,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data, void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame) { struct rx_pkt_attrib *attr; + int iv_len, icv_len; int data_len; u8 *data; @@ -122,8 +95,11 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame) data = frame->pkt->data; data_len = frame->pkt->len; + /* Broadcast and multicast frames don't have attr->{iv,icv}_len set */ + SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt); + if (attr->bdecrypted) - mon_recv_decrypted_recv(dev, data, data_len); + mon_recv_decrypted(dev, data, data_len, iv_len, icv_len); else mon_recv_encrypted(dev, data, data_len); } diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 46b3f19e0878..db3eb7ec5809 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1702,6 +1702,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev) priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL); priv->oldaddr = kmalloc(16, GFP_KERNEL); + if (!priv->oldaddr) + return -ENOMEM; oldaddr = priv->oldaddr; align = ((long)oldaddr) & 3; if (align) { diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c index d3007c1c45e3..a84400f07a38 100644 --- a/drivers/staging/rtl8723bs/core/rtw_ap.c +++ b/drivers/staging/rtl8723bs/core/rtw_ap.c @@ -1059,7 +1059,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) return _FAIL; - if (len > MAX_IE_SZ) + if (len < 0 || len > MAX_IE_SZ) return _FAIL; pbss_network->IELength = len; diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c index 31a4f3fcad19..c88b9788363a 100644 --- a/drivers/staging/rtlwifi/phydm/phydm_dig.c +++ b/drivers/staging/rtlwifi/phydm/phydm_dig.c @@ -490,6 +490,8 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type, break; } + /* pin max_level to be >= 0 */ + max_level = max_t(s8, 0, max_level); /* write IGI of lower level */ odm_write_dig(dm, dig_tab->pause_dig_value[max_level]); ODM_RT_TRACE(dm, ODM_COMP_DIG, diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c index 8e24da16752c..acabb2470d55 100644 --- a/drivers/staging/rtlwifi/rtl8822be/fw.c +++ b/drivers/staging/rtlwifi/rtl8822be/fw.c @@ -419,7 +419,7 @@ static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw, dma_addr = rtlpriv->cfg->ops->get_desc( hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR); - pci_unmap_single(rtlpci->pdev, dma_addr, skb->len, + pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len, PCI_DMA_TODEVICE); kfree_skb(pskb); @@ -464,6 +464,8 @@ bool rtl8822b_halmac_cb_write_data_rsvd_page(struct rtl_priv *rtlpriv, u8 *buf, int count; skb = dev_alloc_skb(size); + if (!skb) + return false; memcpy((u8 *)skb_put(skb, size), buf, size); if (!_rtl8822be_send_bcn_or_cmd_packet(rtlpriv->hw, skb, BEACON_QUEUE)) diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c index 74386003044f..c6db2bd20594 100644 --- a/drivers/staging/rtlwifi/rtl8822be/hw.c +++ b/drivers/staging/rtlwifi/rtl8822be/hw.c @@ -814,7 +814,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw) return; pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); - pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7)); + pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3); pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h index eb91c130b245..5f0bc363ad41 100644 --- a/drivers/staging/rtlwifi/wifi.h +++ b/drivers/staging/rtlwifi/wifi.h @@ -99,6 +99,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h index 09c223f815de..aee82fcaf669 100644 --- a/drivers/staging/sm750fb/ddk750_chip.h +++ b/drivers/staging/sm750fb/ddk750_chip.h @@ -18,7 +18,7 @@ static inline u32 peek32(u32 addr) return readl(addr + mmio750); } -static inline void poke32(u32 data, u32 addr) +static inline void poke32(u32 addr, u32 data) { writel(data, addr + mmio750); } diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index d99daf69e501..fe229d63deec 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -207,11 +207,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, int chars_sent = 0; char __user *cp; char *init; + size_t bytes_per_ch = unicode ? 3 : 1; u16 ch; int empty; unsigned long flags; DEFINE_WAIT(wait); + if (count < bytes_per_ch) + return -EINVAL; + spin_lock_irqsave(&speakup_info.spinlock, flags); while (1) { prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); @@ -237,7 +241,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, init = get_initstring(); /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ - while (chars_sent <= count - 3) { + while (chars_sent <= count - bytes_per_ch) { if (speakup_info.flushing) { speakup_info.flushing = 0; ch = '\x18'; diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c index fc6a3cf74eb3..7d9f25db1add 100644 --- a/drivers/staging/typec/fusb302/fusb302.c +++ b/drivers/staging/typec/fusb302/fusb302.c @@ -1552,6 +1552,21 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip, fusb302_log(chip, "PD message header: %x", msg->header); fusb302_log(chip, "PD message len: %d", len); + /* + * Check if we've read off a GoodCRC message. If so then indicate to + * TCPM that the previous transmission has completed. Otherwise we pass + * the received message over to TCPM for processing. + * + * We make this check here instead of basing the reporting decision on + * the IRQ event type, as it's possible for the chip to report the + * TX_SUCCESS and GCRCSENT events out of order on occasion, so we need + * to check the message type to ensure correct reporting to TCPM. + */ + if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC)) + tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS); + else + tcpm_pd_receive(chip->tcpm_port, msg); + return ret; } @@ -1659,13 +1674,12 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id) if (interrupta & FUSB_REG_INTERRUPTA_TX_SUCCESS) { fusb302_log(chip, "IRQ: PD tx success"); - /* read out the received good CRC */ ret = fusb302_pd_read_message(chip, &pd_msg); if (ret < 0) { - fusb302_log(chip, "cannot read in GCRC, ret=%d", ret); + fusb302_log(chip, + "cannot read in PD message, ret=%d", ret); goto done; } - tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS); } if (interrupta & FUSB_REG_INTERRUPTA_HARDRESET) { @@ -1686,7 +1700,6 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id) "cannot read in PD message, ret=%d", ret); goto done; } - tcpm_pd_receive(chip->tcpm_port, &pd_msg); } done: mutex_unlock(&chip->lock); diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h index 4b9302703b36..eeac4f0cb2c6 100644 --- a/drivers/staging/vboxvideo/vbox_drv.h +++ b/drivers/staging/vboxvideo/vbox_drv.h @@ -137,8 +137,8 @@ struct vbox_connector { char name[32]; struct vbox_crtc *vbox_crtc; struct { - u16 width; - u16 height; + u32 width; + u32 height; bool disconnected; } mode_hint; }; @@ -150,8 +150,8 @@ struct vbox_crtc { unsigned int crtc_id; u32 fb_offset; bool cursor_enabled; - u16 x_hint; - u16 y_hint; + u32 x_hint; + u32 y_hint; }; struct vbox_encoder { diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c index 3ca8bec62ac4..74abdf02d9fd 100644 --- a/drivers/staging/vboxvideo/vbox_irq.c +++ b/drivers/staging/vboxvideo/vbox_irq.c @@ -150,8 +150,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox) disconnected = !(hints->enabled); crtc_id = vbox_conn->vbox_crtc->crtc_id; - vbox_conn->mode_hint.width = hints->cx & 0x8fff; - vbox_conn->mode_hint.height = hints->cy & 0x8fff; + vbox_conn->mode_hint.width = hints->cx; + vbox_conn->mode_hint.height = hints->cy; vbox_conn->vbox_crtc->x_hint = hints->dx; vbox_conn->vbox_crtc->y_hint = hints->dy; vbox_conn->mode_hint.disconnected = disconnected; diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c index 257a77830410..6f08dc966719 100644 --- a/drivers/staging/vboxvideo/vbox_mode.c +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -553,12 +553,22 @@ static int vbox_get_modes(struct drm_connector *connector) ++num_modes; } vbox_set_edid(connector, preferred_width, preferred_height); - drm_object_property_set_value( - &connector->base, vbox->dev->mode_config.suggested_x_property, - vbox_connector->vbox_crtc->x_hint); - drm_object_property_set_value( - &connector->base, vbox->dev->mode_config.suggested_y_property, - vbox_connector->vbox_crtc->y_hint); + + if (vbox_connector->vbox_crtc->x_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_x_property, + vbox_connector->vbox_crtc->x_hint); + else + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_x_property, 0); + + if (vbox_connector->vbox_crtc->y_hint != -1) + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_y_property, + vbox_connector->vbox_crtc->y_hint); + else + drm_object_property_set_value(&connector->base, + vbox->dev->mode_config.suggested_y_property, 0); return num_modes; } @@ -640,9 +650,9 @@ static int vbox_connector_init(struct drm_device *dev, drm_mode_create_suggested_offset_properties(dev); drm_object_attach_property(&connector->base, - dev->mode_config.suggested_x_property, -1); + dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, - dev->mode_config.suggested_y_property, -1); + dev->mode_config.suggested_y_property, 0); drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c index 8f2d508183b2..9030d71a3d0b 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c @@ -36,6 +36,10 @@ MODULE_PARM_DESC(enable_compat_alsa, static void snd_devm_unregister_child(struct device *dev, void *res) { struct device *childdev = *(struct device **)res; + struct bcm2835_chip *chip = dev_get_drvdata(childdev); + struct snd_card *card = chip->card; + + snd_card_free(card); device_unregister(childdev); } @@ -61,6 +65,13 @@ static int snd_devm_add_child(struct device *dev, struct device *child) return 0; } +static void snd_bcm2835_release(struct device *dev) +{ + struct bcm2835_chip *chip = dev_get_drvdata(dev); + + kfree(chip); +} + static struct device * snd_create_device(struct device *parent, struct device_driver *driver, @@ -76,6 +87,7 @@ snd_create_device(struct device *parent, device_initialize(device); device->parent = parent; device->driver = driver; + device->release = snd_bcm2835_release; dev_set_name(device, "%s", name); @@ -86,18 +98,19 @@ snd_create_device(struct device *parent, return device; } -static int snd_bcm2835_free(struct bcm2835_chip *chip) -{ - kfree(chip); - return 0; -} - /* component-destructor * (see "Management of Cards and Components") */ static int snd_bcm2835_dev_free(struct snd_device *device) { - return snd_bcm2835_free(device->device_data); + struct bcm2835_chip *chip = device->device_data; + struct snd_card *card = chip->card; + + /* TODO: free pcm, ctl */ + + snd_device_free(card, chip); + + return 0; } /* chip-specific constructor @@ -122,7 +135,7 @@ static int snd_bcm2835_create(struct snd_card *card, err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err) { - snd_bcm2835_free(chip); + kfree(chip); return err; } @@ -130,31 +143,14 @@ static int snd_bcm2835_create(struct snd_card *card, return 0; } -static void snd_devm_card_free(struct device *dev, void *res) +static struct snd_card *snd_bcm2835_card_new(struct device *dev) { - struct snd_card *snd_card = *(struct snd_card **)res; - - snd_card_free(snd_card); -} - -static struct snd_card *snd_devm_card_new(struct device *dev) -{ - struct snd_card **dr; struct snd_card *card; int ret; - dr = devres_alloc(snd_devm_card_free, sizeof(*dr), GFP_KERNEL); - if (!dr) - return ERR_PTR(-ENOMEM); - ret = snd_card_new(dev, -1, NULL, THIS_MODULE, 0, &card); - if (ret) { - devres_free(dr); + if (ret) return ERR_PTR(ret); - } - - *dr = card; - devres_add(dev, dr); return card; } @@ -271,7 +267,7 @@ static int snd_add_child_device(struct device *device, return PTR_ERR(child); } - card = snd_devm_card_new(child); + card = snd_bcm2835_card_new(child); if (IS_ERR(card)) { dev_err(child, "Failed to create card"); return PTR_ERR(card); @@ -313,7 +309,7 @@ static int snd_add_child_device(struct device *device, return err; } - dev_set_drvdata(child, card); + dev_set_drvdata(child, chip); dev_info(child, "card created with %d channels\n", numchans); return 0; diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 486be990d7fc..a457034818c3 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -601,6 +601,7 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking) } if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { + up(&state->slot_available_event); pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos); return NULL; } diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 9fcf2e223f71..1123b4f1e1d6 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1693,10 +1693,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state) MACbShutdown(priv); pci_disable_device(pcid); - pci_set_power_state(pcid, pci_choose_state(pcid, state)); spin_unlock_irqrestore(&priv->lock, flags); + pci_set_power_state(pcid, pci_choose_state(pcid, state)); + return 0; } diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 9addef1f1e12..f49dfa82f1b8 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid; buffer_offset = ETH_ETHERNET_HDR_OFFSET; - memcpy(&txb[offset + 4], bssid, 6); + memcpy(&txb[offset + 8], bssid, 6); } else { buffer_offset = HOST_HDR_OFFSET; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 5001261f5d69..52fa52c20be0 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -834,6 +834,7 @@ static int iscsit_add_reject_from_cmd( unsigned char *buf) { struct iscsi_conn *conn; + const bool do_put = cmd->se_cmd.se_tfo != NULL; if (!cmd->conn) { pr_err("cmd->conn is NULL for ITT: 0x%08x\n", @@ -864,7 +865,7 @@ static int iscsit_add_reject_from_cmd( * Perform the kref_put now if se_cmd has already been setup by * scsit_setup_scsi_cmd() */ - if (cmd->se_cmd.se_tfo != NULL) { + if (do_put) { pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); target_put_sess_cmd(&cmd->se_cmd); } @@ -1960,7 +1961,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct iscsi_tmr_req *tmr_req; struct iscsi_tm *hdr; int out_of_order_cmdsn = 0, ret; - bool sess_ref = false; u8 function, tcm_function = TMR_UNKNOWN; hdr = (struct iscsi_tm *) buf; @@ -1993,22 +1993,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->data_direction = DMA_NONE; cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL); - if (!cmd->tmr_req) + if (!cmd->tmr_req) { return iscsit_add_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); + } + + transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, 0, DMA_NONE, + TCM_SIMPLE_TAG, cmd->sense_buffer + 2); + + target_get_sess_cmd(&cmd->se_cmd, true); /* * TASK_REASSIGN for ERL=2 / connection stays inside of * LIO-Target $FABRIC_MOD */ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, - conn->sess->se_sess, 0, DMA_NONE, - TCM_SIMPLE_TAG, cmd->sense_buffer + 2); - - target_get_sess_cmd(&cmd->se_cmd, true); - sess_ref = true; tcm_function = iscsit_convert_tmf(function); if (tcm_function == TMR_UNKNOWN) { pr_err("Unknown iSCSI TMR Function:" @@ -2099,12 +2100,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + target_put_sess_cmd(&cmd->se_cmd); return 0; - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { return -1; + } } iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); @@ -2124,12 +2127,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * For connection recovery, this is also the default action for * TMR TASK_REASSIGN. */ - if (sess_ref) { - pr_debug("Handle TMR, using sess_ref=true check\n"); - target_put_sess_cmd(&cmd->se_cmd); - } - iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); + target_put_sess_cmd(&cmd->se_cmd); return 0; } EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index f9bc8ec6fb6b..9518ffd8b8ba 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -421,7 +421,8 @@ static int chap_server_compute_md5( auth_ret = 0; out: kzfree(desc); - crypto_free_shash(tfm); + if (tfm) + crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 0dd4c45f7575..0ebc4818e132 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI); if (ret < 0) - return NULL; + goto free_out; ret = iscsit_tpg_add_portal_group(tiqn, tpg); if (ret != 0) @@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg( return &tpg->tpg_se_tpg; out: core_tpg_deregister(&tpg->tpg_se_tpg); +free_out: kfree(tpg); return NULL; } diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7a6751fecd32..87248a2512e5 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk) if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { write_unlock_bh(&sk->sk_callback_lock); pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); + if (iscsi_target_sk_data_ready == conn->orig_data_ready) + return; + conn->orig_data_ready(sk); return; } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1e36f83b5961..70c6b9bfc04e 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -694,6 +694,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) struct iscsi_session *sess; struct se_cmd *se_cmd = &cmd->se_cmd; + WARN_ON(!list_empty(&cmd->i_conn_node)); + if (cmd->conn) sess = cmd->conn->sess; else @@ -716,6 +718,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues) { struct iscsi_conn *conn = cmd->conn; + WARN_ON(!list_empty(&cmd->i_conn_node)); + if (cmd->data_direction == DMA_TO_DEVICE) { iscsit_stop_dataout_timer(cmd); iscsit_free_r2ts_from_list(cmd); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c629817a8854..9b2c0c773022 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) struct inode *inode = file->f_mapping->host; int ret; + if (!nolb) { + return 0; + } + if (cmd->se_dev->dev_attrib.pi_prot_type) { ret = fd_do_prot_unmap(cmd, lba, nolb); if (ret) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 07c814c42648..60429011292a 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct scatterlist *sg = &cmd->t_data_sg[0]; - unsigned char *buf, zero = 0x00, *p = &zero; - int rc, ret; + unsigned char *buf, *not_zero; + int ret; buf = kmap(sg_page(sg)) + sg->offset; if (!buf) @@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) * Fall back to block_execute_write_same() slow-path if * incoming WRITE_SAME payload does not contain zeros. */ - rc = memcmp(buf, p, cmd->data_length); + not_zero = memchr_inv(buf, 0x00, cmd->data_length); kunmap(sg_page(sg)); - if (rc) + if (not_zero) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ret = blkdev_issue_zeroout(bdev, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index dd2cd8048582..fd6ce9996488 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -58,8 +58,10 @@ void core_pr_dump_initiator_port( char *buf, u32 size) { - if (!pr_reg->isid_present_at_reg) + if (!pr_reg->isid_present_at_reg) { buf[0] = '\0'; + return; + } snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid); } @@ -3727,11 +3729,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) * Check for overflow of 8byte PRI READ_KEYS payload and * next reservation key list descriptor. */ - if ((add_len + 8) > (cmd->data_length - 8)) - break; - - put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); - off += 8; + if (off + 8 <= cmd->data_length) { + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; + } + /* + * SPC5r17: 6.16.2 READ KEYS service action + * The ADDITIONAL LENGTH field indicates the number of bytes in + * the Reservation key list. The contents of the ADDITIONAL + * LENGTH field are not altered based on the allocation length + */ add_len += 8; } spin_unlock(&dev->t10_pr.registration_lock); @@ -4011,6 +4018,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Set the ADDITIONAL DESCRIPTOR LENGTH */ put_unaligned_be32(desc_len, &buf[off]); + off += 4; /* * Size of full desctipor header minus TransportID * containing $FABRIC_MOD specific) initiator device/port diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 7c69b4a9694d..6cb933ecc084 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bytes = min(bytes, data_len); if (!bio) { +new_bio: nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); nr_pages -= nr_vecs; /* @@ -920,7 +921,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, " %d i: %d bio: %p, allocating another" " bio\n", bio->bi_vcnt, i, bio); - rc = blk_rq_append_bio(req, bio); + rc = blk_rq_append_bio(req, &bio); if (rc) { pr_err("pSCSI: failed to append bio\n"); goto fail; @@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * be allocated with pscsi_get_bio() above. */ bio = NULL; + goto new_bio; } data_len -= bytes; @@ -938,7 +940,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } if (bio) { - rc = blk_rq_append_bio(req, bio); + rc = blk_rq_append_bio(req, &bio); if (rc) { pr_err("pSCSI: failed to append bio\n"); goto fail; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e22847bd79b9..9c7bc1ca341a 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, spin_unlock(&se_cmd->t_state_lock); return false; } + if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) { + if (se_cmd->scsi_status) { + pr_debug("Attempted to abort io tag: %llu early failure" + " status: 0x%02x\n", se_cmd->tag, + se_cmd->scsi_status); + spin_unlock(&se_cmd->t_state_lock); + return false; + } + } if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { pr_debug("Attempted to abort io tag: %llu already shutdown," " skipping\n", se_cmd->tag); @@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del_init(&tmr->tmr_list); + if (tmr) + list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { cmd = tmr_p->task_cmd; if (!cmd) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 836d552b0385..e6d51135d105 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1730,9 +1730,6 @@ void transport_generic_request_failure(struct se_cmd *cmd, { int ret = 0, post_ret = 0; - if (transport_check_aborted_status(cmd, 1)) - return; - pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", sense_reason); target_show_cmd("-----[ ", cmd); @@ -1741,6 +1738,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, * For SAM Task Attribute emulation for failed struct se_cmd */ transport_complete_task_attr(cmd); + /* * Handle special case for COMPARE_AND_WRITE failure, where the * callback is expected to drop the per device ->caw_sem. @@ -1749,6 +1747,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, cmd->transport_complete_callback) cmd->transport_complete_callback(cmd, false, &post_ret); + if (transport_check_aborted_status(cmd, 1)) + return; + switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: @@ -1973,6 +1974,7 @@ void target_execute_cmd(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_PROCESSING; + cmd->transport_state &= ~CMD_T_PRE_EXECUTE; cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); @@ -2010,6 +2012,8 @@ static void target_restart_delayed_cmds(struct se_device *dev) list_del(&cmd->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); + cmd->transport_state |= CMD_T_SENT; + __target_execute_cmd(cmd, true); if (cmd->sam_task_attr == TCM_ORDERED_TAG) @@ -2045,6 +2049,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", dev->dev_cur_ordered_id); } + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; + restart: target_restart_delayed_cmds(dev); } @@ -2570,7 +2576,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd); static void transport_write_pending_qf(struct se_cmd *cmd) { + unsigned long flags; int ret; + bool stop; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (stop) { + pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + complete_all(&cmd->t_transport_stop_comp); + return; + } ret = cmd->se_tfo->write_pending(cmd); if (ret) { @@ -2664,6 +2683,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) ret = -ESHUTDOWN; goto out; } + se_cmd->transport_state |= CMD_T_PRE_EXECUTE; list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..c4a5fb6f038f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -796,6 +796,13 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) int ret; DEFINE_WAIT(__wait); + /* + * Don't leave commands partially setup because the unmap + * thread might need the blocks to make forward progress. + */ + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); + tcmu_cmd_reset_dbi_cur(tcmu_cmd); + prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); pr_debug("sleeping for ring space\n"); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 7952357df9c8..edb6e4e9ef3a 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -590,7 +590,6 @@ static int __init optee_driver_init(void) return -ENODEV; np = of_find_matching_node(fw_np, optee_match); - of_node_put(fw_np); if (!np) return -ENODEV; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 58a5009eacc3..a548c3695797 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -181,6 +181,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, if (IS_ERR(shm)) return PTR_ERR(shm); + /* + * Ensure offset + size does not overflow offset + * and does not overflow the size of the referred + * shared memory object. + */ + if ((ip.a + ip.b) < ip.a || + (ip.a + ip.b) > shm->size) { + tee_shm_put(shm); + return -EINVAL; + } + params[n].u.memref.shm_offs = ip.a; params[n].u.memref.size = ip.b; params[n].u.memref.shm = shm; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 4bc7956cefc4..ea3ce4e17b85 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -203,9 +203,10 @@ int tee_shm_get_fd(struct tee_shm *shm) if ((shm->flags & req_flags) != req_flags) return -EINVAL; + get_dma_buf(shm->dmabuf); fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); - if (fd >= 0) - get_dma_buf(shm->dmabuf); + if (fd < 0) + dma_buf_put(shm->dmabuf); return fd; } diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index a4d6a0e2e993..23ad4f9f2143 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -213,8 +213,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) rate = clk_get_rate(data->clk); if ((rate < 1920000) || (rate > 5000000)) dev_warn(&pdev->dev, - "Clock %pCn running at %pCr Hz is outside of the recommended range: 1.92 to 5MHz\n", - data->clk, data->clk); + "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n", + data->clk, rate); /* register of thermal sensor and get info from DT */ tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data, diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index bd3572c41585..2d855a96cdd9 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -23,222 +23,450 @@ #include #include #include +#include #include "thermal_core.h" -#define TEMP0_TH (0x4) -#define TEMP0_RST_TH (0x8) -#define TEMP0_CFG (0xC) -#define TEMP0_EN (0x10) -#define TEMP0_INT_EN (0x14) -#define TEMP0_INT_CLR (0x18) -#define TEMP0_RST_MSK (0x1C) -#define TEMP0_VALUE (0x28) - -#define HISI_TEMP_BASE (-60) -#define HISI_TEMP_RESET (100000) - -#define HISI_MAX_SENSORS 4 +#define HI6220_TEMP0_LAG (0x0) +#define HI6220_TEMP0_TH (0x4) +#define HI6220_TEMP0_RST_TH (0x8) +#define HI6220_TEMP0_CFG (0xC) +#define HI6220_TEMP0_CFG_SS_MSK (0xF000) +#define HI6220_TEMP0_CFG_HDAK_MSK (0x30) +#define HI6220_TEMP0_EN (0x10) +#define HI6220_TEMP0_INT_EN (0x14) +#define HI6220_TEMP0_INT_CLR (0x18) +#define HI6220_TEMP0_RST_MSK (0x1C) +#define HI6220_TEMP0_VALUE (0x28) + +#define HI3660_OFFSET(chan) ((chan) * 0x40) +#define HI3660_TEMP(chan) (HI3660_OFFSET(chan) + 0x1C) +#define HI3660_TH(chan) (HI3660_OFFSET(chan) + 0x20) +#define HI3660_LAG(chan) (HI3660_OFFSET(chan) + 0x28) +#define HI3660_INT_EN(chan) (HI3660_OFFSET(chan) + 0x2C) +#define HI3660_INT_CLR(chan) (HI3660_OFFSET(chan) + 0x30) + +#define HI6220_TEMP_BASE (-60000) +#define HI6220_TEMP_RESET (100000) +#define HI6220_TEMP_STEP (785) +#define HI6220_TEMP_LAG (3500) + +#define HI3660_TEMP_BASE (-63780) +#define HI3660_TEMP_STEP (205) +#define HI3660_TEMP_LAG (4000) + +#define HI6220_DEFAULT_SENSOR 2 +#define HI3660_DEFAULT_SENSOR 1 struct hisi_thermal_sensor { - struct hisi_thermal_data *thermal; struct thermal_zone_device *tzd; - - long sensor_temp; uint32_t id; uint32_t thres_temp; }; struct hisi_thermal_data { - struct mutex thermal_lock; /* protects register data */ + int (*get_temp)(struct hisi_thermal_data *data); + int (*enable_sensor)(struct hisi_thermal_data *data); + int (*disable_sensor)(struct hisi_thermal_data *data); + int (*irq_handler)(struct hisi_thermal_data *data); struct platform_device *pdev; struct clk *clk; - struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS]; - - int irq, irq_bind_sensor; - bool irq_enabled; - + struct hisi_thermal_sensor sensor; void __iomem *regs; + int irq; }; -/* in millicelsius */ -static inline int _step_to_temp(int step) +/* + * The temperature computation on the tsensor is as follow: + * Unit: millidegree Celsius + * Step: 200/255 (0.7843) + * Temperature base: -60°C + * + * The register is programmed in temperature steps, every step is 785 + * millidegree and begins at -60 000 m°C + * + * The temperature from the steps: + * + * Temp = TempBase + (steps x 785) + * + * and the steps from the temperature: + * + * steps = (Temp - TempBase) / 785 + * + */ +static inline int hi6220_thermal_step_to_temp(int step) { - /* - * Every step equals (1 * 200) / 255 celsius, and finally - * need convert to millicelsius. - */ - return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255)); + return HI6220_TEMP_BASE + (step * HI6220_TEMP_STEP); } -static inline long _temp_to_step(long temp) +static inline int hi6220_thermal_temp_to_step(int temp) { - return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000; + return DIV_ROUND_UP(temp - HI6220_TEMP_BASE, HI6220_TEMP_STEP); } -static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, - struct hisi_thermal_sensor *sensor) +/* + * for Hi3660, + * Step: 189/922 (0.205) + * Temperature base: -63.780°C + * + * The register is programmed in temperature steps, every step is 205 + * millidegree and begins at -63 780 m°C + */ +static inline int hi3660_thermal_step_to_temp(int step) { - long val; + return HI3660_TEMP_BASE + step * HI3660_TEMP_STEP; +} - mutex_lock(&data->thermal_lock); +static inline int hi3660_thermal_temp_to_step(int temp) +{ + return DIV_ROUND_UP(temp - HI3660_TEMP_BASE, HI3660_TEMP_STEP); +} - /* disable interrupt */ - writel(0x0, data->regs + TEMP0_INT_EN); - writel(0x1, data->regs + TEMP0_INT_CLR); +/* + * The lag register contains 5 bits encoding the temperature in steps. + * + * Each time the temperature crosses the threshold boundary, an + * interrupt is raised. It could be when the temperature is going + * above the threshold or below. However, if the temperature is + * fluctuating around this value due to the load, we can receive + * several interrupts which may not desired. + * + * We can setup a temperature representing the delta between the + * threshold and the current temperature when the temperature is + * decreasing. + * + * For instance: the lag register is 5°C, the threshold is 65°C, when + * the temperature reaches 65°C an interrupt is raised and when the + * temperature decrease to 65°C - 5°C another interrupt is raised. + * + * A very short lag can lead to an interrupt storm, a long lag + * increase the latency to react to the temperature changes. In our + * case, that is not really a problem as we are polling the + * temperature. + * + * [0:4] : lag register + * + * The temperature is coded in steps, cf. HI6220_TEMP_STEP. + * + * Min : 0x00 : 0.0 °C + * Max : 0x1F : 24.3 °C + * + * The 'value' parameter is in milliCelsius. + */ +static inline void hi6220_thermal_set_lag(void __iomem *addr, int value) +{ + writel(DIV_ROUND_UP(value, HI6220_TEMP_STEP) & 0x1F, + addr + HI6220_TEMP0_LAG); +} - /* disable module firstly */ - writel(0x0, data->regs + TEMP0_EN); +static inline void hi6220_thermal_alarm_clear(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_INT_CLR); +} - /* select sensor id */ - writel((sensor->id << 12), data->regs + TEMP0_CFG); +static inline void hi6220_thermal_alarm_enable(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_INT_EN); +} - /* enable module */ - writel(0x1, data->regs + TEMP0_EN); +static inline void hi6220_thermal_alarm_set(void __iomem *addr, int temp) +{ + writel(hi6220_thermal_temp_to_step(temp) | 0x0FFFFFF00, + addr + HI6220_TEMP0_TH); +} - usleep_range(3000, 5000); +static inline void hi6220_thermal_reset_set(void __iomem *addr, int temp) +{ + writel(hi6220_thermal_temp_to_step(temp), addr + HI6220_TEMP0_RST_TH); +} - val = readl(data->regs + TEMP0_VALUE); - val = _step_to_temp(val); +static inline void hi6220_thermal_reset_enable(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_RST_MSK); +} - mutex_unlock(&data->thermal_lock); +static inline void hi6220_thermal_enable(void __iomem *addr, int value) +{ + writel(value, addr + HI6220_TEMP0_EN); +} - return val; +static inline int hi6220_thermal_get_temperature(void __iomem *addr) +{ + return hi6220_thermal_step_to_temp(readl(addr + HI6220_TEMP0_VALUE)); } -static void hisi_thermal_enable_bind_irq_sensor - (struct hisi_thermal_data *data) +/* + * [0:6] lag register + * + * The temperature is coded in steps, cf. HI3660_TEMP_STEP. + * + * Min : 0x00 : 0.0 °C + * Max : 0x7F : 26.0 °C + * + */ +static inline void hi3660_thermal_set_lag(void __iomem *addr, + int id, int value) { - struct hisi_thermal_sensor *sensor; + writel(DIV_ROUND_UP(value, HI3660_TEMP_STEP) & 0x7F, + addr + HI3660_LAG(id)); +} - mutex_lock(&data->thermal_lock); +static inline void hi3660_thermal_alarm_clear(void __iomem *addr, + int id, int value) +{ + writel(value, addr + HI3660_INT_CLR(id)); +} - sensor = &data->sensors[data->irq_bind_sensor]; +static inline void hi3660_thermal_alarm_enable(void __iomem *addr, + int id, int value) +{ + writel(value, addr + HI3660_INT_EN(id)); +} - /* setting the hdak time */ - writel(0x0, data->regs + TEMP0_CFG); +static inline void hi3660_thermal_alarm_set(void __iomem *addr, + int id, int value) +{ + writel(value, addr + HI3660_TH(id)); +} + +static inline int hi3660_thermal_get_temperature(void __iomem *addr, int id) +{ + return hi3660_thermal_step_to_temp(readl(addr + HI3660_TEMP(id))); +} + +/* + * Temperature configuration register - Sensor selection + * + * Bits [19:12] + * + * 0x0: local sensor (default) + * 0x1: remote sensor 1 (ACPU cluster 1) + * 0x2: remote sensor 2 (ACPU cluster 0) + * 0x3: remote sensor 3 (G3D) + */ +static inline void hi6220_thermal_sensor_select(void __iomem *addr, int sensor) +{ + writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_SS_MSK) | + (sensor << 12), addr + HI6220_TEMP0_CFG); +} + +/* + * Temperature configuration register - Hdak conversion polling interval + * + * Bits [5:4] + * + * 0x0 : 0.768 ms + * 0x1 : 6.144 ms + * 0x2 : 49.152 ms + * 0x3 : 393.216 ms + */ +static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value) +{ + writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_HDAK_MSK) | + (value << 4), addr + HI6220_TEMP0_CFG); +} + +static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data) +{ + hi6220_thermal_alarm_clear(data->regs, 1); + return 0; +} + +static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data) +{ + hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1); + return 0; +} + +static int hi6220_thermal_get_temp(struct hisi_thermal_data *data) +{ + return hi6220_thermal_get_temperature(data->regs); +} + +static int hi3660_thermal_get_temp(struct hisi_thermal_data *data) +{ + return hi3660_thermal_get_temperature(data->regs, data->sensor.id); +} + +static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data) +{ + /* disable sensor module */ + hi6220_thermal_enable(data->regs, 0); + hi6220_thermal_alarm_enable(data->regs, 0); + hi6220_thermal_reset_enable(data->regs, 0); + + clk_disable_unprepare(data->clk); + + return 0; +} + +static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data) +{ + /* disable sensor module */ + hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0); + return 0; +} + +static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data) +{ + struct hisi_thermal_sensor *sensor = &data->sensor; + int ret; + + /* enable clock for tsensor */ + ret = clk_prepare_enable(data->clk); + if (ret) + return ret; /* disable module firstly */ - writel(0x0, data->regs + TEMP0_RST_MSK); - writel(0x0, data->regs + TEMP0_EN); + hi6220_thermal_reset_enable(data->regs, 0); + hi6220_thermal_enable(data->regs, 0); /* select sensor id */ - writel((sensor->id << 12), data->regs + TEMP0_CFG); + hi6220_thermal_sensor_select(data->regs, sensor->id); + + /* setting the hdak time */ + hi6220_thermal_hdak_set(data->regs, 0); + + /* setting lag value between current temp and the threshold */ + hi6220_thermal_set_lag(data->regs, HI6220_TEMP_LAG); /* enable for interrupt */ - writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00, - data->regs + TEMP0_TH); + hi6220_thermal_alarm_set(data->regs, sensor->thres_temp); - writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH); + hi6220_thermal_reset_set(data->regs, HI6220_TEMP_RESET); /* enable module */ - writel(0x1, data->regs + TEMP0_RST_MSK); - writel(0x1, data->regs + TEMP0_EN); - - writel(0x0, data->regs + TEMP0_INT_CLR); - writel(0x1, data->regs + TEMP0_INT_EN); + hi6220_thermal_reset_enable(data->regs, 1); + hi6220_thermal_enable(data->regs, 1); - usleep_range(3000, 5000); + hi6220_thermal_alarm_clear(data->regs, 0); + hi6220_thermal_alarm_enable(data->regs, 1); - mutex_unlock(&data->thermal_lock); + return 0; } -static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data) +static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data) { - mutex_lock(&data->thermal_lock); + unsigned int value; + struct hisi_thermal_sensor *sensor = &data->sensor; - /* disable sensor module */ - writel(0x0, data->regs + TEMP0_INT_EN); - writel(0x0, data->regs + TEMP0_RST_MSK); - writel(0x0, data->regs + TEMP0_EN); + /* disable interrupt */ + hi3660_thermal_alarm_enable(data->regs, sensor->id, 0); - mutex_unlock(&data->thermal_lock); -} + /* setting lag value between current temp and the threshold */ + hi3660_thermal_set_lag(data->regs, sensor->id, HI3660_TEMP_LAG); -static int hisi_thermal_get_temp(void *_sensor, int *temp) -{ - struct hisi_thermal_sensor *sensor = _sensor; - struct hisi_thermal_data *data = sensor->thermal; + /* set interrupt threshold */ + value = hi3660_thermal_temp_to_step(sensor->thres_temp); + hi3660_thermal_alarm_set(data->regs, sensor->id, value); - int sensor_id = -1, i; - long max_temp = 0; + /* enable interrupt */ + hi3660_thermal_alarm_clear(data->regs, sensor->id, 1); + hi3660_thermal_alarm_enable(data->regs, sensor->id, 1); - *temp = hisi_thermal_get_sensor_temp(data, sensor); + return 0; +} - sensor->sensor_temp = *temp; +static int hi6220_thermal_probe(struct hisi_thermal_data *data) +{ + struct platform_device *pdev = data->pdev; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; - for (i = 0; i < HISI_MAX_SENSORS; i++) { - if (!data->sensors[i].tzd) - continue; + data->get_temp = hi6220_thermal_get_temp; + data->enable_sensor = hi6220_thermal_enable_sensor; + data->disable_sensor = hi6220_thermal_disable_sensor; + data->irq_handler = hi6220_thermal_irq_handler; - if (data->sensors[i].sensor_temp >= max_temp) { - max_temp = data->sensors[i].sensor_temp; - sensor_id = i; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(data->regs)) { + dev_err(dev, "failed to get io address\n"); + return PTR_ERR(data->regs); } - /* If no sensor has been enabled, then skip to enable irq */ - if (sensor_id == -1) - return 0; - - mutex_lock(&data->thermal_lock); - data->irq_bind_sensor = sensor_id; - mutex_unlock(&data->thermal_lock); - - dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n", - sensor->id, data->irq_enabled, *temp, sensor->thres_temp); - /* - * Bind irq to sensor for two cases: - * Reenable alarm IRQ if temperature below threshold; - * if irq has been enabled, always set it; - */ - if (data->irq_enabled) { - hisi_thermal_enable_bind_irq_sensor(data); - return 0; + data->clk = devm_clk_get(dev, "thermal_clk"); + if (IS_ERR(data->clk)) { + ret = PTR_ERR(data->clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to get thermal clk: %d\n", ret); + return ret; } - if (max_temp < sensor->thres_temp) { - data->irq_enabled = true; - hisi_thermal_enable_bind_irq_sensor(data); - enable_irq(data->irq); - } + data->irq = platform_get_irq(pdev, 0); + if (data->irq < 0) + return data->irq; + + data->sensor.id = HI6220_DEFAULT_SENSOR; return 0; } -static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = { - .get_temp = hisi_thermal_get_temp, -}; +static int hi3660_thermal_probe(struct hisi_thermal_data *data) +{ + struct platform_device *pdev = data->pdev; + struct device *dev = &pdev->dev; + struct resource *res; + + data->get_temp = hi3660_thermal_get_temp; + data->enable_sensor = hi3660_thermal_enable_sensor; + data->disable_sensor = hi3660_thermal_disable_sensor; + data->irq_handler = hi3660_thermal_irq_handler; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(data->regs)) { + dev_err(dev, "failed to get io address\n"); + return PTR_ERR(data->regs); + } -static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev) + data->irq = platform_get_irq(pdev, 0); + if (data->irq < 0) + return data->irq; + + data->sensor.id = HI3660_DEFAULT_SENSOR; + + return 0; +} + +static int hisi_thermal_get_temp(void *__data, int *temp) { - struct hisi_thermal_data *data = dev; + struct hisi_thermal_data *data = __data; + struct hisi_thermal_sensor *sensor = &data->sensor; - disable_irq_nosync(irq); - data->irq_enabled = false; + *temp = data->get_temp(data); - return IRQ_WAKE_THREAD; + dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n", + sensor->id, *temp, sensor->thres_temp); + + return 0; } +static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = { + .get_temp = hisi_thermal_get_temp, +}; + static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) { struct hisi_thermal_data *data = dev; - struct hisi_thermal_sensor *sensor; - int i; + struct hisi_thermal_sensor *sensor = &data->sensor; + int temp = 0; - mutex_lock(&data->thermal_lock); - sensor = &data->sensors[data->irq_bind_sensor]; + data->irq_handler(data); - dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n", - sensor->thres_temp / 1000); - mutex_unlock(&data->thermal_lock); + hisi_thermal_get_temp(data, &temp); - for (i = 0; i < HISI_MAX_SENSORS; i++) { - if (!data->sensors[i].tzd) - continue; + if (temp >= sensor->thres_temp) { + dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n", + temp, sensor->thres_temp); - thermal_zone_device_update(data->sensors[i].tzd, + thermal_zone_device_update(data->sensor.tzd, THERMAL_EVENT_UNSPECIFIED); + + } else { + dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n", + temp, sensor->thres_temp); } return IRQ_HANDLED; @@ -246,17 +474,14 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) static int hisi_thermal_register_sensor(struct platform_device *pdev, struct hisi_thermal_data *data, - struct hisi_thermal_sensor *sensor, - int index) + struct hisi_thermal_sensor *sensor) { int ret, i; const struct thermal_trip *trip; - sensor->id = index; - sensor->thermal = data; - sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, - sensor->id, sensor, &hisi_of_thermal_ops); + sensor->id, data, + &hisi_of_thermal_ops); if (IS_ERR(sensor->tzd)) { ret = PTR_ERR(sensor->tzd); sensor->tzd = NULL; @@ -278,7 +503,14 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev, } static const struct of_device_id of_hisi_thermal_match[] = { - { .compatible = "hisilicon,tsensor" }, + { + .compatible = "hisilicon,tsensor", + .data = hi6220_thermal_probe + }, + { + .compatible = "hisilicon,hi3660-tsensor", + .data = hi3660_thermal_probe + }, { /* end */ } }; MODULE_DEVICE_TABLE(of, of_hisi_thermal_match); @@ -295,88 +527,63 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor, static int hisi_thermal_probe(struct platform_device *pdev) { struct hisi_thermal_data *data; - struct resource *res; - int i; + int const (*platform_probe)(struct hisi_thermal_data *); + struct device *dev = &pdev->dev; int ret; - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - mutex_init(&data->thermal_lock); data->pdev = pdev; + platform_set_drvdata(pdev, data); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(data->regs)) { - dev_err(&pdev->dev, "failed to get io address\n"); - return PTR_ERR(data->regs); + platform_probe = of_device_get_match_data(dev); + if (!platform_probe) { + dev_err(dev, "failed to get probe func\n"); + return -EINVAL; } - data->irq = platform_get_irq(pdev, 0); - if (data->irq < 0) - return data->irq; - - ret = devm_request_threaded_irq(&pdev->dev, data->irq, - hisi_thermal_alarm_irq, - hisi_thermal_alarm_irq_thread, - 0, "hisi_thermal", data); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); + ret = platform_probe(data); + if (ret) return ret; - } - platform_set_drvdata(pdev, data); - - data->clk = devm_clk_get(&pdev->dev, "thermal_clk"); - if (IS_ERR(data->clk)) { - ret = PTR_ERR(data->clk); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "failed to get thermal clk: %d\n", ret); + ret = hisi_thermal_register_sensor(pdev, data, + &data->sensor); + if (ret) { + dev_err(dev, "failed to register thermal sensor: %d\n", ret); return ret; } - /* enable clock for thermal */ - ret = clk_prepare_enable(data->clk); + ret = data->enable_sensor(data); if (ret) { - dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); + dev_err(dev, "Failed to setup the sensor: %d\n", ret); return ret; } - hisi_thermal_enable_bind_irq_sensor(data); - irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED, - &data->irq_enabled); - - for (i = 0; i < HISI_MAX_SENSORS; ++i) { - ret = hisi_thermal_register_sensor(pdev, data, - &data->sensors[i], i); - if (ret) - dev_err(&pdev->dev, - "failed to register thermal sensor: %d\n", ret); - else - hisi_thermal_toggle_sensor(&data->sensors[i], true); + if (data->irq) { + ret = devm_request_threaded_irq(dev, data->irq, NULL, + hisi_thermal_alarm_irq_thread, + IRQF_ONESHOT, "hisi_thermal", data); + if (ret < 0) { + dev_err(dev, "failed to request alarm irq: %d\n", ret); + return ret; + } } + hisi_thermal_toggle_sensor(&data->sensor, true); + return 0; } static int hisi_thermal_remove(struct platform_device *pdev) { struct hisi_thermal_data *data = platform_get_drvdata(pdev); - int i; + struct hisi_thermal_sensor *sensor = &data->sensor; - for (i = 0; i < HISI_MAX_SENSORS; i++) { - struct hisi_thermal_sensor *sensor = &data->sensors[i]; + hisi_thermal_toggle_sensor(sensor, false); - if (!sensor->tzd) - continue; - - hisi_thermal_toggle_sensor(sensor, false); - } - - hisi_thermal_disable_sensor(data); - clk_disable_unprepare(data->clk); + data->disable_sensor(data); return 0; } @@ -386,10 +593,7 @@ static int hisi_thermal_suspend(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); - hisi_thermal_disable_sensor(data); - data->irq_enabled = false; - - clk_disable_unprepare(data->clk); + data->disable_sensor(data); return 0; } @@ -397,16 +601,8 @@ static int hisi_thermal_suspend(struct device *dev) static int hisi_thermal_resume(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); - int ret; - ret = clk_prepare_enable(data->clk); - if (ret) - return ret; - - data->irq_enabled = true; - hisi_thermal_enable_bind_irq_sensor(data); - - return 0; + return data->enable_sensor(data); } #endif diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 4798b4b1fd77..41c6154ae856 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -601,6 +601,9 @@ static int imx_thermal_probe(struct platform_device *pdev) regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); + data->irq_enabled = true; + data->mode = THERMAL_DEVICE_ENABLED; + ret = devm_request_threaded_irq(&pdev->dev, data->irq, imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread, 0, "imx_thermal", data); @@ -613,9 +616,6 @@ static int imx_thermal_probe(struct platform_device *pdev) return ret; } - data->irq_enabled = true; - data->mode = THERMAL_DEVICE_ENABLED; - return 0; } diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 8ee38f55c7f3..43b90fd577e4 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -319,17 +319,21 @@ static int int3400_thermal_probe(struct platform_device *pdev) result = sysfs_create_group(&pdev->dev.kobj, &uuid_attribute_group); if (result) - goto free_zone; + goto free_rel_misc; result = acpi_install_notify_handler( priv->adev->handle, ACPI_DEVICE_NOTIFY, int3400_notify, (void *)priv); if (result) - goto free_zone; + goto free_sysfs; return 0; -free_zone: +free_sysfs: + sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); +free_rel_misc: + if (!priv->rel_misc_dev_res) + acpi_thermal_rel_misc_device_remove(priv->adev->handle); thermal_zone_device_unregister(priv->thermal); free_art_trt: kfree(priv->trts); diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index 8a7f24dd9315..0c19fcd56a0d 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv) return -EFAULT; } + priv->priv = obj; obj->max_state = p->package.count - 1; obj->cdev = thermal_cooling_device_register(acpi_device_bid(priv->adev), @@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv) if (IS_ERR(obj->cdev)) result = PTR_ERR(obj->cdev); - priv->priv = obj; - kfree(buf.pointer); /* TODO: add ACPI notification support */ diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index f02341f7134d..1d9f524cb5b3 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -30,6 +30,10 @@ /* Skylake thermal reporting device */ #define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903 +/* CannonLake thermal reporting device */ +#define PCI_DEVICE_ID_PROC_CNL_THERMAL 0x5a03 +#define PCI_DEVICE_ID_PROC_CFL_THERMAL 0x3E83 + /* Braswell thermal reporting device */ #define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC @@ -461,6 +465,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTX_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTP_THERMAL)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CNL_THERMAL)}, { 0, }, }; diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c index c60b1cfcc64e..ddd777cf5309 100644 --- a/drivers/thermal/intel_pch_thermal.c +++ b/drivers/thermal/intel_pch_thermal.c @@ -30,6 +30,7 @@ #define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */ #define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */ #define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */ +#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */ /* Wildcat Point-LP PCH Thermal registers */ #define WPT_TEMP 0x0000 /* Temperature */ @@ -278,6 +279,7 @@ enum board_ids { board_hsw, board_wpt, board_skl, + board_cnl, }; static const struct board_info { @@ -296,6 +298,10 @@ static const struct board_info { .name = "pch_skylake", .ops = &pch_dev_ops_wpt, }, + [board_cnl] = { + .name = "pch_cannonlake", + .ops = &pch_dev_ops_wpt, + }, }; static int intel_pch_thermal_probe(struct pci_dev *pdev, @@ -398,6 +404,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = { .driver_data = board_skl, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H), .driver_data = board_skl, }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL), + .driver_data = board_cnl, }, { 0, }, }; MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id); diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index d718cd179ddb..e83d505716bb 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -99,6 +99,7 @@ struct powerclamp_worker_data { unsigned int target_ratio; unsigned int duration_jiffies; bool clamping; + bool setscheduler_done; }; static struct powerclamp_worker_data * __percpu worker_data; @@ -388,6 +389,10 @@ static void clamp_balancing_func(struct kthread_work *work) w_data = container_of(work, struct powerclamp_worker_data, balancing_work); + if (unlikely(w_data->setscheduler_done == false)) { + sched_setscheduler(current, SCHED_FIFO, &sparam); + w_data->setscheduler_done = true; + } /* * make sure user selected ratio does not take effect until * the next round. adjust target_ratio if user has changed @@ -503,7 +508,6 @@ static void start_power_clamp_worker(unsigned long cpu) w_data->cpu = cpu; w_data->clamping = true; set_bit(cpu, cpu_clamping_mask); - sched_setscheduler(worker->task, SCHED_FIFO, &sparam); kthread_init_work(&w_data->balancing_work, clamp_balancing_func); kthread_init_delayed_work(&w_data->idle_injection_work, clamp_idle_injection_func); diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index b4d3116cfdaf..3055f9a12a17 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + mutex_lock(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if ((instance->trip != params->trip_max_desired_temperature) || (!cdev_is_power_actor(instance->cdev))) @@ -534,6 +535,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) mutex_unlock(&instance->cdev->lock); thermal_cdev_update(instance->cdev); } + mutex_unlock(&tz->lock); } /** diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ed805c7c5ace..d60069b5dc98 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -185,6 +185,7 @@ * @regulator: pointer to the TMU regulator structure. * @reg_conf: pointer to structure to register with core thermal. * @ntrip: number of supported trip points. + * @enabled: current status of TMU device * @tmu_initialize: SoC specific TMU initialization method * @tmu_control: SoC specific TMU control method * @tmu_read: SoC specific TMU temperature read method @@ -205,6 +206,7 @@ struct exynos_tmu_data { struct regulator *regulator; struct thermal_zone_device *tzd; unsigned int ntrip; + bool enabled; int (*tmu_initialize)(struct platform_device *pdev); void (*tmu_control)(struct platform_device *pdev, bool on); @@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) mutex_lock(&data->lock); clk_enable(data->clk); data->tmu_control(pdev, on); + data->enabled = on; clk_disable(data->clk); mutex_unlock(&data->lock); } @@ -595,6 +598,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev) threshold_code = temp_to_code(data, temp); rising_threshold = readl(data->base + rising_reg_offset); + rising_threshold &= ~(0xff << j * 8); rising_threshold |= (threshold_code << j * 8); writel(rising_threshold, data->base + rising_reg_offset); @@ -889,19 +893,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) static int exynos_get_temp(void *p, int *temp) { struct exynos_tmu_data *data = p; + int value, ret = 0; - if (!data || !data->tmu_read) + if (!data || !data->tmu_read || !data->enabled) return -EINVAL; mutex_lock(&data->lock); clk_enable(data->clk); - *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS; + value = data->tmu_read(data); + if (value < 0) + ret = value; + else + *temp = code_to_temp(data, value) * MCELSIUS; clk_disable(data->clk); mutex_unlock(&data->lock); - return 0; + return ret; } #ifdef CONFIG_THERMAL_EMULATION diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index be95826631b7..ee047ca43084 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -31,8 +31,7 @@ * If the temperature is higher than a trip point, * a. if the trend is THERMAL_TREND_RAISING, use higher cooling * state for this trip point - * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling - * state for this trip point + * b. if the trend is THERMAL_TREND_DROPPING, do nothing * c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit * for this trip point * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit @@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (!throttle) next_target = THERMAL_NO_TARGET; } else { - next_target = cur_state - 1; - if (next_target > instance->upper) - next_target = instance->upper; + if (!throttle) { + next_target = cur_state - 1; + if (next_target > instance->upper) + next_target = instance->upper; + } } break; case THERMAL_TREND_DROP_FULL: diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 53250fc057e1..91830b1bdcaf 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -539,14 +539,14 @@ static bool icm_ar_is_supported(struct tb *tb) static int icm_ar_get_mode(struct tb *tb) { struct tb_nhi *nhi = tb->nhi; - int retries = 5; + int retries = 60; u32 val; do { val = ioread32(nhi->iobase + REG_FW_STS); if (val & REG_FW_STS_NVM_AUTH_DONE) break; - msleep(30); + msleep(50); } while (--retries); if (!retries) { @@ -720,6 +720,9 @@ static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) struct icm *icm = tb_priv(tb); u32 val; + if (!icm->upstream_port) + return -ENODEV; + /* Put ARC to wait for CIO reset event to happen */ val = ioread32(nhi->iobase + REG_FW_STS); val |= REG_FW_STS_CIO_RESET_REQ; @@ -859,6 +862,9 @@ static int icm_firmware_init(struct tb *tb) break; default: + if (ret < 0) + return ret; + tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); return -ENODEV; } diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 05af126a2435..16c607075ede 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -846,6 +846,7 @@ static const struct dev_pm_ops nhi_pm_ops = { * we just disable hotplug, the * pci-tunnels stay alive. */ + .thaw_noirq = nhi_resume_noirq, .restore_noirq = nhi_resume_noirq, .suspend = nhi_suspend, .freeze = nhi_suspend, diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 3953d17202a8..8bd137109980 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -716,6 +716,13 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) if (sw->authorized) goto unlock; + /* + * Make sure there is no PCIe rescan ongoing when a new PCIe + * tunnel is created. Otherwise the PCIe rescan code might find + * the new tunnel too early. + */ + pci_lock_rescan_remove(); + switch (val) { /* Approve switch */ case 1: @@ -735,6 +742,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) break; } + pci_unlock_rescan_remove(); + if (!ret) { sw->authorized = val; /* Notify status change to the userspace */ diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index d674e06767a5..1424581fd9af 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -225,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb) tb_port_info(up_port, "PCIe tunnel activation failed, aborting\n"); tb_pci_free(tunnel); + continue; } list_add(&tunnel->list, &tcm->tunnel_list); diff --git a/drivers/trusty/Kconfig b/drivers/trusty/Kconfig new file mode 100644 index 000000000000..a230dad0434d --- /dev/null +++ b/drivers/trusty/Kconfig @@ -0,0 +1,66 @@ +# +# Trusty +# + +menu "Trusty" + +config TRUSTY + tristate "Trusty" + depends on X86_64 + default n + +config TRUSTY_FIQ + tristate + depends on TRUSTY + +config TRUSTY_FIQ_ARM + tristate + depends on TRUSTY + depends on ARM + select FIQ_GLUE + select TRUSTY_FIQ + default y + +config TRUSTY_FIQ_ARM64 + tristate + depends on TRUSTY + depends on ARM64 + select FIQ_GLUE + select TRUSTY_FIQ + default y + +config TRUSTY_LOG + tristate "Trusty Log support" + depends on TRUSTY + default y + +config TRUSTY_VIRTIO + tristate "Trusty virtio support" + depends on TRUSTY + select VIRTIO + default y + +config TRUSTY_VIRTIO_IPC + tristate "Trusty Virtio IPC driver" + depends on TRUSTY_VIRTIO + default y + help + This module adds support for communications with Trusty Services + + If you choose to build a module, it'll be called trusty-ipc. + Say N if unsure. + +config TRUSTY_BACKUP_TIMER + tristate "Trusty backup timer" + depends on TRUSTY + default y + help + This module adds support for Trusty backup timer. Trusty backup + timer might be required on platforms that might loose state of + secure timer in deep idle state. + + If you choose to build a module, it'll be called trusty-timer. + Say N if unsure. + + +endmenu diff --git a/drivers/trusty/Makefile b/drivers/trusty/Makefile new file mode 100644 index 000000000000..69a78688f1b0 --- /dev/null +++ b/drivers/trusty/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for trusty components +# + +obj-$(CONFIG_TRUSTY) += trusty.o +obj-$(CONFIG_TRUSTY) += trusty-irq.o +obj-$(CONFIG_TRUSTY_FIQ) += trusty-fiq.o +obj-$(CONFIG_TRUSTY_FIQ_ARM) += trusty-fiq-arm.o +obj-$(CONFIG_TRUSTY_FIQ_ARM64) += trusty-fiq-arm64.o trusty-fiq-arm64-glue.o +obj-$(CONFIG_TRUSTY_LOG) += trusty-log.o +obj-$(CONFIG_TRUSTY) += trusty-mem.o +obj-$(CONFIG_TRUSTY_VIRTIO) += trusty-virtio.o +obj-$(CONFIG_TRUSTY_VIRTIO_IPC) += trusty-ipc.o +obj-$(CONFIG_TRUSTY) += trusty-wall.o +obj-$(CONFIG_TRUSTY_BACKUP_TIMER) += trusty-timer.o diff --git a/drivers/trusty/trusty-fiq-arm.c b/drivers/trusty/trusty-fiq-arm.c new file mode 100644 index 000000000000..8c62a00bbc44 --- /dev/null +++ b/drivers/trusty/trusty-fiq-arm.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "trusty-fiq.h" + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) + +static void __naked trusty_fiq_return(void) +{ + asm volatile( + ".arch_extension sec\n" + "mov r12, r0\n" + "ldr r0, =" STRINGIFY(SMC_FC_FIQ_EXIT) "\n" + "smc #0"); +} + +int trusty_fiq_arch_probe(struct platform_device *pdev) +{ + return fiq_glue_set_return_handler(trusty_fiq_return); +} + +void trusty_fiq_arch_remove(struct platform_device *pdev) +{ + fiq_glue_clear_return_handler(trusty_fiq_return); +} diff --git a/drivers/trusty/trusty-fiq-arm64-glue.S b/drivers/trusty/trusty-fiq-arm64-glue.S new file mode 100644 index 000000000000..6994b3a94fc3 --- /dev/null +++ b/drivers/trusty/trusty-fiq-arm64-glue.S @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +.macro push reg1,reg2,remregs:vararg + .ifnb \remregs + push \remregs + .endif + stp \reg1, \reg2, [sp, #-16]! +.endm + +.macro pop reg1,reg2,remregs:vararg + ldp \reg1, \reg2, [sp], #16 + .ifnb \remregs + pop \remregs + .endif +.endm + +ENTRY(trusty_fiq_glue_arm64) + sub sp, sp, #S_FRAME_SIZE - S_LR + push x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \ + x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \ + x26, x27, x28, x29 + ldr x0, =SMC_FC64_GET_FIQ_REGS + smc #0 + stp x0, x1, [sp, #S_PC] /* original pc, cpsr */ + tst x1, PSR_MODE_MASK + csel x2, x2, x3, eq /* sp el0, sp el1 */ + stp x30, x2, [sp, #S_LR] /* lr, original sp */ + mov x0, sp + mov x1, x3 + bl trusty_fiq_handler + pop x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, \ + x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, \ + x26, x27, x28, x29 + ldr x30, [sp], #S_FRAME_SIZE - S_LR /* load LR and restore SP */ + ldr x0, =SMC_FC_FIQ_EXIT + smc #0 + b . /* should not get here */ diff --git a/drivers/trusty/trusty-fiq-arm64.c b/drivers/trusty/trusty-fiq-arm64.c new file mode 100644 index 000000000000..8b9a40887587 --- /dev/null +++ b/drivers/trusty/trusty-fiq-arm64.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include + +#include "trusty-fiq.h" + +extern void trusty_fiq_glue_arm64(void); + +static struct device *trusty_dev; +static DEFINE_PER_CPU(void *, fiq_stack); +static struct fiq_glue_handler *fiq_handlers; +static DEFINE_MUTEX(fiq_glue_lock); + +void trusty_fiq_handler(struct pt_regs *regs, void *svc_sp) +{ + struct fiq_glue_handler *handler; + + for (handler = ACCESS_ONCE(fiq_handlers); handler; + handler = ACCESS_ONCE(handler->next)) { + /* Barrier paired with smp_wmb in fiq_glue_register_handler */ + smp_read_barrier_depends(); + handler->fiq(handler, regs, svc_sp); + } +} + +static void smp_nop_call(void *info) +{ + /* If this call is reached, the fiq handler is not currently running */ +} + +static void fiq_glue_clear_handler(void) +{ + int cpu; + int ret; + void *stack; + + for_each_possible_cpu(cpu) { + stack = per_cpu(fiq_stack, cpu); + if (!stack) + continue; + + ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER, + cpu, 0, 0); + if (ret) { + pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, 0, 0) failed 0x%x, skip free stack\n", + __func__, cpu, ret); + continue; + } + + per_cpu(fiq_stack, cpu) = NULL; + smp_call_function_single(cpu, smp_nop_call, NULL, true); + free_pages((unsigned long)stack, THREAD_SIZE_ORDER); + } +} + +static int fiq_glue_set_handler(void) +{ + int ret; + int cpu; + void *stack; + unsigned long irqflags; + + for_each_possible_cpu(cpu) { + stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + if (WARN_ON(!stack)) { + ret = -ENOMEM; + goto err_alloc_fiq_stack; + } + per_cpu(fiq_stack, cpu) = stack; + stack += THREAD_START_SP; + + local_irq_save(irqflags); + ret = trusty_fast_call64(trusty_dev, SMC_FC64_SET_FIQ_HANDLER, + cpu, (uintptr_t)trusty_fiq_glue_arm64, + (uintptr_t)stack); + local_irq_restore(irqflags); + if (ret) { + pr_err("%s: SMC_FC_SET_FIQ_HANDLER(%d, %p, %p) failed 0x%x\n", + __func__, cpu, trusty_fiq_glue_arm64, + stack, ret); + ret = -EINVAL; + goto err_set_fiq_handler; + } + } + return 0; + +err_alloc_fiq_stack: +err_set_fiq_handler: + fiq_glue_clear_handler(); + return ret; +} + +int fiq_glue_register_handler(struct fiq_glue_handler *handler) +{ + int ret; + + if (!handler || !handler->fiq) { + ret = -EINVAL; + goto err_bad_arg; + } + + mutex_lock(&fiq_glue_lock); + + if (!trusty_dev) { + ret = -ENODEV; + goto err_no_trusty; + } + + handler->next = fiq_handlers; + /* + * Write barrier paired with smp_read_barrier_depends in + * trusty_fiq_handler. Make sure next pointer is updated before + * fiq_handlers so trusty_fiq_handler does not see an uninitialized + * value and terminate early or crash. + */ + smp_wmb(); + fiq_handlers = handler; + + smp_call_function(smp_nop_call, NULL, true); + + if (!handler->next) { + ret = fiq_glue_set_handler(); + if (ret) + goto err_set_fiq_handler; + } + + mutex_unlock(&fiq_glue_lock); + return 0; + +err_set_fiq_handler: + fiq_handlers = handler->next; +err_no_trusty: + mutex_unlock(&fiq_glue_lock); +err_bad_arg: + pr_err("%s: failed, %d\n", __func__, ret); + return ret; +} + +int trusty_fiq_arch_probe(struct platform_device *pdev) +{ + mutex_lock(&fiq_glue_lock); + trusty_dev = pdev->dev.parent; + mutex_unlock(&fiq_glue_lock); + + return 0; +} + +void trusty_fiq_arch_remove(struct platform_device *pdev) +{ + mutex_lock(&fiq_glue_lock); + fiq_glue_clear_handler(); + trusty_dev = NULL; + mutex_unlock(&fiq_glue_lock); +} diff --git a/drivers/trusty/trusty-fiq.c b/drivers/trusty/trusty-fiq.c new file mode 100644 index 000000000000..1a031c67ea72 --- /dev/null +++ b/drivers/trusty/trusty-fiq.c @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "trusty-fiq.h" + +static int trusty_fiq_remove_child(struct device *dev, void *data) +{ + platform_device_unregister(to_platform_device(dev)); + return 0; +} + +static int trusty_fiq_probe(struct platform_device *pdev) +{ + int ret; + + ret = trusty_fiq_arch_probe(pdev); + if (ret) + goto err_set_fiq_return; + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to add children: %d\n", ret); + goto err_add_children; + } + + return 0; + +err_add_children: + device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child); + trusty_fiq_arch_remove(pdev); +err_set_fiq_return: + return ret; +} + +static int trusty_fiq_remove(struct platform_device *pdev) +{ + device_for_each_child(&pdev->dev, NULL, trusty_fiq_remove_child); + trusty_fiq_arch_remove(pdev); + return 0; +} + +static const struct of_device_id trusty_fiq_of_match[] = { + { .compatible = "android,trusty-fiq-v1", }, + {}, +}; + +static struct platform_driver trusty_fiq_driver = { + .probe = trusty_fiq_probe, + .remove = trusty_fiq_remove, + .driver = { + .name = "trusty-fiq", + .owner = THIS_MODULE, + .of_match_table = trusty_fiq_of_match, + }, +}; + +static int __init trusty_fiq_driver_init(void) +{ + return platform_driver_register(&trusty_fiq_driver); +} + +static void __exit trusty_fiq_driver_exit(void) +{ + platform_driver_unregister(&trusty_fiq_driver); +} + +subsys_initcall(trusty_fiq_driver_init); +module_exit(trusty_fiq_driver_exit); diff --git a/drivers/trusty/trusty-fiq.h b/drivers/trusty/trusty-fiq.h new file mode 100644 index 000000000000..d4ae9a9635f3 --- /dev/null +++ b/drivers/trusty/trusty-fiq.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +int trusty_fiq_arch_probe(struct platform_device *pdev); +void trusty_fiq_arch_remove(struct platform_device *pdev); diff --git a/drivers/trusty/trusty-ipc.c b/drivers/trusty/trusty-ipc.c new file mode 100644 index 000000000000..f0b6b1bb444a --- /dev/null +++ b/drivers/trusty/trusty-ipc.c @@ -0,0 +1,1699 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +#include +#endif +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define MAX_DEVICES 4 + +#define VIRTIO_ID_TRUSTY_IPC 13 /* virtio trusty ipc */ + +#define REPLY_TIMEOUT 5000 +#define TXBUF_TIMEOUT 15000 + +#define PULSE_ACTIVE 1 +#define PULSE_DEACTIVE 0 + +#define MAX_SRV_NAME_LEN 256 +#define MAX_DEV_NAME_LEN 32 + +#define DEFAULT_MSG_BUF_SIZE PAGE_SIZE +#define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE + +#define TIPC_CTRL_ADDR 53 +#define TIPC_ANY_ADDR 0xFFFFFFFF + +#define TIPC_MIN_LOCAL_ADDR 1024 + +#define TIPC_IOC_MAGIC 'r' +#define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *) +#if defined(CONFIG_COMPAT) +#define TIPC_IOC_CONNECT_COMPAT _IOW(TIPC_IOC_MAGIC, 0x80, \ + compat_uptr_t) +#endif + +struct tipc_virtio_dev; + +struct tipc_dev_config { + u32 msg_buf_max_size; + u32 msg_buf_alignment; + char dev_name[MAX_DEV_NAME_LEN]; +} __packed; + +struct tipc_msg_hdr { + u32 src; + u32 dst; + u32 len; + u16 flags; + u16 reserved; + u8 data[0]; +} __packed; + +enum tipc_ctrl_msg_types { + TIPC_CTRL_MSGTYPE_GO_ONLINE = 1, + TIPC_CTRL_MSGTYPE_GO_OFFLINE, + TIPC_CTRL_MSGTYPE_CONN_REQ, + TIPC_CTRL_MSGTYPE_CONN_RSP, + TIPC_CTRL_MSGTYPE_DISC_REQ, +}; + +struct tipc_ctrl_msg { + u32 type; + u32 body_len; + u8 body[0]; +} __packed; + +struct tipc_conn_req_body { + char name[MAX_SRV_NAME_LEN]; +} __packed; + +struct tipc_conn_rsp_body { + u32 target; + u32 status; + u32 remote; + u32 max_msg_size; + u32 max_msg_cnt; +} __packed; + +struct tipc_disc_req_body { + u32 target; +} __packed; + +struct tipc_cdev_node { + struct cdev cdev; + struct device *dev; + unsigned int minor; +}; + +enum tipc_device_state { + VDS_OFFLINE = 0, + VDS_ONLINE, + VDS_DEAD, +}; + +struct tipc_virtio_dev { + struct kref refcount; + struct mutex lock; /* protects access to this device */ + struct virtio_device *vdev; + struct virtqueue *rxvq; + struct virtqueue *txvq; + uint msg_buf_cnt; + uint msg_buf_max_cnt; + size_t msg_buf_max_sz; + uint free_msg_buf_cnt; + struct list_head free_buf_list; + wait_queue_head_t sendq; + struct idr addr_idr; + enum tipc_device_state state; + struct tipc_cdev_node cdev_node; + char cdev_name[MAX_DEV_NAME_LEN]; +}; + +enum tipc_chan_state { + TIPC_DISCONNECTED = 0, + TIPC_CONNECTING, + TIPC_CONNECTED, + TIPC_STALE, +}; + +struct tipc_chan { + struct mutex lock; /* protects channel state */ + struct kref refcount; + enum tipc_chan_state state; + struct tipc_virtio_dev *vds; + const struct tipc_chan_ops *ops; + void *ops_arg; + u32 remote; + u32 local; + u32 max_msg_size; + u32 max_msg_cnt; + char srv_name[MAX_SRV_NAME_LEN]; +}; + +static struct class *tipc_class; +static unsigned int tipc_major; + +struct virtio_device *default_vdev; + +static DEFINE_IDR(tipc_devices); +static DEFINE_MUTEX(tipc_devices_lock); + +static int _match_any(int id, void *p, void *data) +{ + return id; +} + +static int _match_data(int id, void *p, void *data) +{ + return (p == data); +} + +static void *_alloc_shareable_mem(size_t sz, phys_addr_t *ppa, gfp_t gfp) +{ + void *buf_va; + buf_va = alloc_pages_exact(sz, gfp); + *ppa = virt_to_phys(buf_va); + return buf_va; +} + +static void _free_shareable_mem(size_t sz, void *va, phys_addr_t pa) +{ + free_pages_exact(va, sz); +} + +static struct tipc_msg_buf *_alloc_msg_buf(size_t sz) +{ + struct tipc_msg_buf *mb; + + /* allocate tracking structure */ + mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL); + if (!mb) + return NULL; + + /* allocate buffer that can be shared with secure world */ + mb->buf_va = _alloc_shareable_mem(sz, &mb->buf_pa, GFP_KERNEL); + if (!mb->buf_va) + goto err_alloc; + + mb->buf_sz = sz; + + return mb; + +err_alloc: + kfree(mb); + return NULL; +} + +static void _free_msg_buf(struct tipc_msg_buf *mb) +{ + _free_shareable_mem(mb->buf_sz, mb->buf_va, mb->buf_pa); + kfree(mb); +} + +static void _free_msg_buf_list(struct list_head *list) +{ + struct tipc_msg_buf *mb = NULL; + + mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); + while (mb) { + list_del(&mb->node); + _free_msg_buf(mb); + mb = list_first_entry_or_null(list, struct tipc_msg_buf, node); + } +} + +static inline void mb_reset(struct tipc_msg_buf *mb) +{ + mb->wpos = 0; + mb->rpos = 0; +} + +static void _free_chan(struct kref *kref) +{ + struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount); + kfree(ch); +} + +static void _free_vds(struct kref *kref) +{ + struct tipc_virtio_dev *vds = + container_of(kref, struct tipc_virtio_dev, refcount); + kfree(vds); +} + +static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds) +{ + return _alloc_msg_buf(vds->msg_buf_max_sz); +} + +static void vds_free_msg_buf(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *mb) +{ + _free_msg_buf(mb); +} + +static bool _put_txbuf_locked(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *mb) +{ + list_add_tail(&mb->node, &vds->free_buf_list); + return vds->free_msg_buf_cnt++ == 0; +} + +static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds) +{ + struct tipc_msg_buf *mb; + + if (vds->state != VDS_ONLINE) + return ERR_PTR(-ENODEV); + + if (vds->free_msg_buf_cnt) { + /* take it out of free list */ + mb = list_first_entry(&vds->free_buf_list, + struct tipc_msg_buf, node); + list_del(&mb->node); + vds->free_msg_buf_cnt--; + } else { + if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt) + return ERR_PTR(-EAGAIN); + + /* try to allocate it */ + mb = _alloc_msg_buf(vds->msg_buf_max_sz); + if (!mb) + return ERR_PTR(-ENOMEM); + + vds->msg_buf_cnt++; + } + return mb; +} + +static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds) +{ + struct tipc_msg_buf *mb; + + mutex_lock(&vds->lock); + mb = _get_txbuf_locked(vds); + mutex_unlock(&vds->lock); + + return mb; +} + +static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb) +{ + if (!vds) + return; + + mutex_lock(&vds->lock); + _put_txbuf_locked(vds, mb); + wake_up_interruptible(&vds->sendq); + mutex_unlock(&vds->lock); +} + +static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds, + long timeout) +{ + struct tipc_msg_buf *mb; + + if (!vds) + return ERR_PTR(-EINVAL); + + mb = _vds_get_txbuf(vds); + + if ((PTR_ERR(mb) == -EAGAIN) && timeout) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + timeout = msecs_to_jiffies(timeout); + add_wait_queue(&vds->sendq, &wait); + for (;;) { + timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, + timeout); + if (!timeout) { + mb = ERR_PTR(-ETIMEDOUT); + break; + } + + if (signal_pending(current)) { + mb = ERR_PTR(-ERESTARTSYS); + break; + } + + mb = _vds_get_txbuf(vds); + if (PTR_ERR(mb) != -EAGAIN) + break; + } + remove_wait_queue(&vds->sendq, &wait); + } + + if (IS_ERR(mb)) + return mb; + + BUG_ON(!mb); + + /* reset and reserve space for message header */ + mb_reset(mb); + mb_put_data(mb, sizeof(struct tipc_msg_hdr)); + + return mb; +} + +static int vds_queue_txbuf(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *mb) +{ + int err; + struct scatterlist sg; + bool need_notify = false; + + if (!vds) + return -EINVAL; + + mutex_lock(&vds->lock); + if (vds->state == VDS_ONLINE) { + sg_init_one(&sg, mb->buf_va, mb->wpos); + err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL); + need_notify = virtqueue_kick_prepare(vds->txvq); + } else { + err = -ENODEV; + } + mutex_unlock(&vds->lock); + + if (need_notify) + virtqueue_notify(vds->txvq); + + return err; +} + +static int vds_add_channel(struct tipc_virtio_dev *vds, + struct tipc_chan *chan) +{ + int ret; + + mutex_lock(&vds->lock); + if (vds->state == VDS_ONLINE) { + ret = idr_alloc(&vds->addr_idr, chan, + TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1, + GFP_KERNEL); + if (ret > 0) { + chan->local = ret; + kref_get(&chan->refcount); + ret = 0; + } + } else { + ret = -EINVAL; + } + mutex_unlock(&vds->lock); + + return ret; +} + +static void vds_del_channel(struct tipc_virtio_dev *vds, + struct tipc_chan *chan) +{ + mutex_lock(&vds->lock); + if (chan->local) { + idr_remove(&vds->addr_idr, chan->local); + chan->local = 0; + chan->remote = 0; + kref_put(&chan->refcount, _free_chan); + } + mutex_unlock(&vds->lock); +} + +static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds, + u32 addr) +{ + int id; + struct tipc_chan *chan = NULL; + + mutex_lock(&vds->lock); + if (addr == TIPC_ANY_ADDR) { + id = idr_for_each(&vds->addr_idr, _match_any, NULL); + if (id > 0) + chan = idr_find(&vds->addr_idr, id); + } else { + chan = idr_find(&vds->addr_idr, addr); + } + if (chan) + kref_get(&chan->refcount); + mutex_unlock(&vds->lock); + + return chan; +} + +static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds, + const struct tipc_chan_ops *ops, + void *ops_arg) +{ + int ret; + struct tipc_chan *chan = NULL; + + if (!vds) + return ERR_PTR(-ENOENT); + + if (!ops) + return ERR_PTR(-EINVAL); + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return ERR_PTR(-ENOMEM); + + kref_get(&vds->refcount); + chan->vds = vds; + chan->ops = ops; + chan->ops_arg = ops_arg; + mutex_init(&chan->lock); + kref_init(&chan->refcount); + chan->state = TIPC_DISCONNECTED; + + ret = vds_add_channel(vds, chan); + if (ret) { + kfree(chan); + kref_put(&vds->refcount, _free_vds); + return ERR_PTR(ret); + } + + return chan; +} + +static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst) +{ + struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr)); + + hdr->src = src; + hdr->dst = dst; + hdr->len = mb_avail_data(mb); + hdr->flags = 0; + hdr->reserved = 0; +} + +/*****************************************************************************/ + +struct tipc_chan *tipc_create_channel(struct device *dev, + const struct tipc_chan_ops *ops, + void *ops_arg) +{ + struct virtio_device *vd; + struct tipc_chan *chan; + struct tipc_virtio_dev *vds; + + mutex_lock(&tipc_devices_lock); + if (dev) { + vd = container_of(dev, struct virtio_device, dev); + } else { + vd = default_vdev; + if (!vd) { + mutex_unlock(&tipc_devices_lock); + return ERR_PTR(-ENOENT); + } + } + vds = vd->priv; + kref_get(&vds->refcount); + mutex_unlock(&tipc_devices_lock); + + chan = vds_create_channel(vds, ops, ops_arg); + kref_put(&vds->refcount, _free_vds); + return chan; +} +EXPORT_SYMBOL(tipc_create_channel); + +struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan) +{ + return vds_alloc_msg_buf(chan->vds); +} +EXPORT_SYMBOL(tipc_chan_get_rxbuf); + +void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) +{ + vds_free_msg_buf(chan->vds, mb); +} +EXPORT_SYMBOL(tipc_chan_put_rxbuf); + +struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan, + long timeout) +{ + return vds_get_txbuf(chan->vds, timeout); +} +EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout); + +void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb) +{ + vds_put_txbuf(chan->vds, mb); +} +EXPORT_SYMBOL(tipc_chan_put_txbuf); + +int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb) +{ + int err; + + mutex_lock(&chan->lock); + switch (chan->state) { + case TIPC_CONNECTED: + fill_msg_hdr(mb, chan->local, chan->remote); + err = vds_queue_txbuf(chan->vds, mb); + if (err) { + /* this should never happen */ + pr_err("%s: failed to queue tx buffer (%d)\n", + __func__, err); + } + break; + case TIPC_DISCONNECTED: + case TIPC_CONNECTING: + err = -ENOTCONN; + break; + case TIPC_STALE: + err = -ESHUTDOWN; + break; + default: + err = -EBADFD; + pr_err("%s: unexpected channel state %d\n", + __func__, chan->state); + } + mutex_unlock(&chan->lock); + return err; +} +EXPORT_SYMBOL(tipc_chan_queue_msg); + + +int tipc_chan_connect(struct tipc_chan *chan, const char *name) +{ + int err; + struct tipc_ctrl_msg *msg; + struct tipc_conn_req_body *body; + struct tipc_msg_buf *txbuf; + + txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); + if (IS_ERR(txbuf)) + return PTR_ERR(txbuf); + + /* reserve space for connection request control message */ + msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); + body = (struct tipc_conn_req_body *)msg->body; + + /* fill message */ + msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ; + msg->body_len = sizeof(*body); + + strncpy(body->name, name, sizeof(body->name)); + body->name[sizeof(body->name)-1] = '\0'; + + mutex_lock(&chan->lock); + switch (chan->state) { + case TIPC_DISCONNECTED: + /* save service name we are connecting to */ + strcpy(chan->srv_name, body->name); + + fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); + err = vds_queue_txbuf(chan->vds, txbuf); + if (err) { + /* this should never happen */ + pr_err("%s: failed to queue tx buffer (%d)\n", + __func__, err); + } else { + chan->state = TIPC_CONNECTING; + txbuf = NULL; /* prevents discarding buffer */ + } + break; + case TIPC_CONNECTED: + case TIPC_CONNECTING: + /* check if we are trying to connect to the same service */ + if (strcmp(chan->srv_name, body->name) == 0) + err = 0; + else + if (chan->state == TIPC_CONNECTING) + err = -EALREADY; /* in progress */ + else + err = -EISCONN; /* already connected */ + break; + + case TIPC_STALE: + err = -ESHUTDOWN; + break; + default: + err = -EBADFD; + pr_err("%s: unexpected channel state %d\n", + __func__, chan->state); + break; + } + mutex_unlock(&chan->lock); + + if (txbuf) + tipc_chan_put_txbuf(chan, txbuf); /* discard it */ + + return err; +} +EXPORT_SYMBOL(tipc_chan_connect); + +int tipc_chan_shutdown(struct tipc_chan *chan) +{ + int err; + struct tipc_ctrl_msg *msg; + struct tipc_disc_req_body *body; + struct tipc_msg_buf *txbuf = NULL; + + /* get tx buffer */ + txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT); + if (IS_ERR(txbuf)) + return PTR_ERR(txbuf); + + mutex_lock(&chan->lock); + if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) { + /* reserve space for disconnect request control message */ + msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body)); + body = (struct tipc_disc_req_body *)msg->body; + + msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ; + msg->body_len = sizeof(*body); + body->target = chan->remote; + + fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR); + err = vds_queue_txbuf(chan->vds, txbuf); + if (err) { + /* this should never happen */ + pr_err("%s: failed to queue tx buffer (%d)\n", + __func__, err); + } + } else { + err = -ENOTCONN; + } + chan->state = TIPC_STALE; + mutex_unlock(&chan->lock); + + if (err) { + /* release buffer */ + tipc_chan_put_txbuf(chan, txbuf); + } + + return err; +} +EXPORT_SYMBOL(tipc_chan_shutdown); + +void tipc_chan_destroy(struct tipc_chan *chan) +{ + mutex_lock(&chan->lock); + if (chan->vds) { + vds_del_channel(chan->vds, chan); + kref_put(&chan->vds->refcount, _free_vds); + chan->vds = NULL; + } + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); +} +EXPORT_SYMBOL(tipc_chan_destroy); + +/***************************************************************************/ + +struct tipc_dn_chan { + int pulse; + int state; + struct mutex lock; /* protects rx_msg_queue list and channel state */ + struct tipc_chan *chan; + wait_queue_head_t readq; + struct completion reply_comp; + struct list_head rx_msg_queue; +}; + +static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout) +{ + int ret; + + ret = wait_for_completion_interruptible_timeout(&dn->reply_comp, + msecs_to_jiffies(timeout)); + if (ret < 0) + return ret; + + mutex_lock(&dn->lock); + if (!ret) { + /* no reply from remote */ + dn->state = TIPC_STALE; + ret = -ETIMEDOUT; + } else { + /* got reply */ + if (dn->pulse == PULSE_ACTIVE) { + dn->pulse = PULSE_DEACTIVE; + ret = 0; + } else if (dn->state == TIPC_DISCONNECTED) + if (!list_empty(&dn->rx_msg_queue)) + ret = 0; + else + ret = -ENOTCONN; + else + ret = -EIO; + } + mutex_unlock(&dn->lock); + + return ret; +} + +struct tipc_msg_buf *dn_handle_msg(void *data, struct tipc_msg_buf *rxbuf) +{ + struct tipc_dn_chan *dn = data; + struct tipc_msg_buf *newbuf = rxbuf; + + mutex_lock(&dn->lock); + if (dn->state == TIPC_CONNECTED) { + /* get new buffer */ + newbuf = tipc_chan_get_rxbuf(dn->chan); + if (newbuf) { + /* queue an old buffer and return a new one */ + list_add_tail(&rxbuf->node, &dn->rx_msg_queue); + wake_up_interruptible(&dn->readq); + } else { + /* + * return an old buffer effectively discarding + * incoming message + */ + pr_err("%s: discard incoming message\n", __func__); + newbuf = rxbuf; + } + } + mutex_unlock(&dn->lock); + + return newbuf; +} + +static void dn_connected(struct tipc_dn_chan *dn) +{ + mutex_lock(&dn->lock); + dn->state = TIPC_CONNECTED; + dn->pulse = PULSE_ACTIVE; + + /* complete all pending */ + complete(&dn->reply_comp); + + mutex_unlock(&dn->lock); +} + +static void dn_disconnected(struct tipc_dn_chan *dn) +{ + mutex_lock(&dn->lock); + dn->state = TIPC_DISCONNECTED; + + /* complete all pending */ + complete(&dn->reply_comp); + + /* wakeup all readers */ + wake_up_interruptible_all(&dn->readq); + + mutex_unlock(&dn->lock); +} + +static void dn_shutdown(struct tipc_dn_chan *dn) +{ + mutex_lock(&dn->lock); + + /* set state to STALE */ + dn->state = TIPC_STALE; + + /* complete all pending */ + complete(&dn->reply_comp); + + /* wakeup all readers */ + wake_up_interruptible_all(&dn->readq); + + mutex_unlock(&dn->lock); +} + +static void dn_handle_event(void *data, int event) +{ + struct tipc_dn_chan *dn = data; + + switch (event) { + case TIPC_CHANNEL_SHUTDOWN: + dn_shutdown(dn); + break; + + case TIPC_CHANNEL_DISCONNECTED: + dn_disconnected(dn); + break; + + case TIPC_CHANNEL_CONNECTED: + dn_connected(dn); + break; + + default: + pr_err("%s: unhandled event %d\n", __func__, event); + break; + } +} + +static struct tipc_chan_ops _dn_ops = { + .handle_msg = dn_handle_msg, + .handle_event = dn_handle_event, +}; + +#define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev) +#define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node) + +static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn) +{ + int ret; + struct tipc_virtio_dev *vds = NULL; + + mutex_lock(&tipc_devices_lock); + ret = idr_for_each(&tipc_devices, _match_data, cdn); + if (ret) { + vds = cdn_to_vds(cdn); + kref_get(&vds->refcount); + } + mutex_unlock(&tipc_devices_lock); + return vds; +} + +static int tipc_open(struct inode *inode, struct file *filp) +{ + int ret; + struct tipc_virtio_dev *vds; + struct tipc_dn_chan *dn; + struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev); + + vds = _dn_lookup_vds(cdn); + if (!vds) { + ret = -ENOENT; + goto err_vds_lookup; + } + + dn = kzalloc(sizeof(*dn), GFP_KERNEL); + if (!dn) { + ret = -ENOMEM; + goto err_alloc_chan; + } + + mutex_init(&dn->lock); + init_waitqueue_head(&dn->readq); + init_completion(&dn->reply_comp); + INIT_LIST_HEAD(&dn->rx_msg_queue); + + dn->state = TIPC_DISCONNECTED; + dn->pulse = PULSE_DEACTIVE; + + dn->chan = vds_create_channel(vds, &_dn_ops, dn); + if (IS_ERR(dn->chan)) { + ret = PTR_ERR(dn->chan); + goto err_create_chan; + } + + filp->private_data = dn; + kref_put(&vds->refcount, _free_vds); + return 0; + +err_create_chan: + kfree(dn); +err_alloc_chan: + kref_put(&vds->refcount, _free_vds); +err_vds_lookup: + return ret; +} + + +static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name) +{ + int err; + char name[MAX_SRV_NAME_LEN]; + + /* copy in service name from user space */ + err = strncpy_from_user(name, usr_name, sizeof(name)); + if (err < 0) { + pr_err("%s: copy_from_user (%p) failed (%d)\n", + __func__, usr_name, err); + return err; + } + name[sizeof(name)-1] = '\0'; + + /* send connect request */ + err = tipc_chan_connect(dn->chan, name); + if (err) + return err; + + /* and wait for reply */ + return dn_wait_for_reply(dn, REPLY_TIMEOUT); +} + +static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + struct tipc_dn_chan *dn = filp->private_data; + + if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC) + return -EINVAL; + + switch (cmd) { + case TIPC_IOC_CONNECT: + ret = dn_connect_ioctl(dn, (char __user *)arg); + break; + default: + pr_warn("%s: Unhandled ioctl cmd: 0x%x\n", + __func__, cmd); + ret = -EINVAL; + } + return ret; +} + +#if defined(CONFIG_COMPAT) +static long tipc_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int ret; + struct tipc_dn_chan *dn = filp->private_data; + void __user *user_req = compat_ptr(arg); + + if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC) + return -EINVAL; + + switch (cmd) { + case TIPC_IOC_CONNECT_COMPAT: + ret = dn_connect_ioctl(dn, user_req); + break; + default: + pr_warn("%s: Unhandled ioctl cmd: 0x%x\n", + __func__, cmd); + ret = -EINVAL; + } + return ret; +} +#endif + +static inline bool _got_rx(struct tipc_dn_chan *dn) +{ + if (dn->state != TIPC_CONNECTED) + return true; + + if (!list_empty(&dn->rx_msg_queue)) + return true; + + return false; +} + +static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + ssize_t ret; + size_t len; + struct tipc_msg_buf *mb; + struct file *filp = iocb->ki_filp; + struct tipc_dn_chan *dn = filp->private_data; + + mutex_lock(&dn->lock); + + while (list_empty(&dn->rx_msg_queue)) { + if (dn->state != TIPC_CONNECTED) { + if (dn->state == TIPC_CONNECTING) + ret = -ENOTCONN; + else if (dn->state == TIPC_DISCONNECTED) + ret = -ENOTCONN; + else if (dn->state == TIPC_STALE) + ret = -ESHUTDOWN; + else + ret = -EBADFD; + goto out; + } + + mutex_unlock(&dn->lock); + + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(dn->readq, _got_rx(dn))) + return -ERESTARTSYS; + + mutex_lock(&dn->lock); + } + + mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node); + + len = mb_avail_data(mb); + if (len > iov_iter_count(iter)) { + ret = -EMSGSIZE; + goto out; + } + + if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) { + ret = -EFAULT; + goto out; + } + + ret = len; + list_del(&mb->node); + tipc_chan_put_rxbuf(dn->chan, mb); + +out: + mutex_unlock(&dn->lock); + return ret; +} + +static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + ssize_t ret; + size_t len; + long timeout = TXBUF_TIMEOUT; + struct tipc_msg_buf *txbuf = NULL; + struct file *filp = iocb->ki_filp; + struct tipc_dn_chan *dn = filp->private_data; + + if (filp->f_flags & O_NONBLOCK) + timeout = 0; + + txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout); + if (IS_ERR(txbuf)) + return PTR_ERR(txbuf); + + /* message length */ + len = iov_iter_count(iter); + + /* check available space */ + if (len > mb_avail_space(txbuf)) { + ret = -EMSGSIZE; + goto err_out; + } + + /* copy in message data */ + if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len) { + ret = -EFAULT; + goto err_out; + } + + /* queue message */ + ret = tipc_chan_queue_msg(dn->chan, txbuf); + if (ret) + goto err_out; + + return len; + +err_out: + tipc_chan_put_txbuf(dn->chan, txbuf); + return ret; +} + +static unsigned int tipc_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + struct tipc_dn_chan *dn = filp->private_data; + + mutex_lock(&dn->lock); + + poll_wait(filp, &dn->readq, wait); + + /* Writes always succeed for now */ + mask |= POLLOUT | POLLWRNORM; + + if (!list_empty(&dn->rx_msg_queue)) + mask |= POLLIN | POLLRDNORM; + + if (dn->state != TIPC_CONNECTED) + mask |= POLLERR; + + mutex_unlock(&dn->lock); + return mask; +} + + +static int tipc_release(struct inode *inode, struct file *filp) +{ + struct tipc_dn_chan *dn = filp->private_data; + + dn_shutdown(dn); + + /* free all pending buffers */ + _free_msg_buf_list(&dn->rx_msg_queue); + + /* shutdown channel */ + tipc_chan_shutdown(dn->chan); + + /* and destroy it */ + tipc_chan_destroy(dn->chan); + + kfree(dn); + + return 0; +} + +static const struct file_operations tipc_fops = { + .open = tipc_open, + .release = tipc_release, + .unlocked_ioctl = tipc_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = tipc_compat_ioctl, +#endif + .read_iter = tipc_read_iter, + .write_iter = tipc_write_iter, + .poll = tipc_poll, + .owner = THIS_MODULE, +}; + +/*****************************************************************************/ + +static void chan_trigger_event(struct tipc_chan *chan, int event) +{ + if (!event) + return; + + chan->ops->handle_event(chan->ops_arg, event); +} + +static void _cleanup_vq(struct virtqueue *vq) +{ + struct tipc_msg_buf *mb; + + while ((mb = virtqueue_detach_unused_buf(vq)) != NULL) + _free_msg_buf(mb); +} + +static int _create_cdev_node(struct device *parent, + struct tipc_cdev_node *cdn, + const char *name) +{ + int ret; + dev_t devt; + + if (!name) { + dev_dbg(parent, "%s: cdev name has to be provided\n", + __func__); + return -EINVAL; + } + + /* allocate minor */ + ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES-1, GFP_KERNEL); + if (ret < 0) { + dev_dbg(parent, "%s: failed (%d) to get id\n", + __func__, ret); + return ret; + } + + cdn->minor = ret; + cdev_init(&cdn->cdev, &tipc_fops); + cdn->cdev.owner = THIS_MODULE; + + /* Add character device */ + devt = MKDEV(tipc_major, cdn->minor); + ret = cdev_add(&cdn->cdev, devt, 1); + if (ret) { + dev_dbg(parent, "%s: cdev_add failed (%d)\n", + __func__, ret); + goto err_add_cdev; + } + + /* Create a device node */ + cdn->dev = device_create(tipc_class, parent, + devt, NULL, "trusty-ipc-%s", name); + if (IS_ERR(cdn->dev)) { + ret = PTR_ERR(cdn->dev); + dev_dbg(parent, "%s: device_create failed: %d\n", + __func__, ret); + goto err_device_create; + } + + return 0; + +err_device_create: + cdn->dev = NULL; + cdev_del(&cdn->cdev); +err_add_cdev: + idr_remove(&tipc_devices, cdn->minor); + return ret; +} + +static void create_cdev_node(struct tipc_virtio_dev *vds, + struct tipc_cdev_node *cdn) +{ + int err; + + mutex_lock(&tipc_devices_lock); + + if (!default_vdev) { + kref_get(&vds->refcount); + default_vdev = vds->vdev; + } + + if (vds->cdev_name[0] && !cdn->dev) { + kref_get(&vds->refcount); + err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name); + if (err) { + dev_err(&vds->vdev->dev, + "failed (%d) to create cdev node\n", err); + kref_put(&vds->refcount, _free_vds); + } + } + mutex_unlock(&tipc_devices_lock); +} + +static void destroy_cdev_node(struct tipc_virtio_dev *vds, + struct tipc_cdev_node *cdn) +{ + mutex_lock(&tipc_devices_lock); + if (cdn->dev) { + device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor)); + cdev_del(&cdn->cdev); + idr_remove(&tipc_devices, cdn->minor); + cdn->dev = NULL; + kref_put(&vds->refcount, _free_vds); + } + + if (default_vdev == vds->vdev) { + default_vdev = NULL; + kref_put(&vds->refcount, _free_vds); + } + + mutex_unlock(&tipc_devices_lock); +} + +static void _go_online(struct tipc_virtio_dev *vds) +{ + mutex_lock(&vds->lock); + if (vds->state == VDS_OFFLINE) + vds->state = VDS_ONLINE; + mutex_unlock(&vds->lock); + + create_cdev_node(vds, &vds->cdev_node); + + dev_info(&vds->vdev->dev, "is online\n"); +} + +static void _go_offline(struct tipc_virtio_dev *vds) +{ + struct tipc_chan *chan; + + /* change state to OFFLINE */ + mutex_lock(&vds->lock); + if (vds->state != VDS_ONLINE) { + mutex_unlock(&vds->lock); + return; + } + vds->state = VDS_OFFLINE; + mutex_unlock(&vds->lock); + + /* wakeup all waiters */ + wake_up_interruptible_all(&vds->sendq); + + /* shutdown all channels */ + while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) { + mutex_lock(&chan->lock); + chan->state = TIPC_STALE; + chan->remote = 0; + chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN); + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); + } + + /* shutdown device node */ + destroy_cdev_node(vds, &vds->cdev_node); + + dev_info(&vds->vdev->dev, "is offline\n"); +} + +static void _handle_conn_rsp(struct tipc_virtio_dev *vds, + struct tipc_conn_rsp_body *rsp, size_t len) +{ + struct tipc_chan *chan; + + if (sizeof(*rsp) != len) { + dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n", + __func__, len); + return; + } + + dev_dbg(&vds->vdev->dev, + "%s: connection response: for addr 0x%x: " + "status %d remote addr 0x%x\n", + __func__, rsp->target, rsp->status, rsp->remote); + + /* Lookup channel */ + chan = vds_lookup_channel(vds, rsp->target); + if (chan) { + mutex_lock(&chan->lock); + if (chan->state == TIPC_CONNECTING) { + if (!rsp->status) { + chan->state = TIPC_CONNECTED; + chan->remote = rsp->remote; + chan->max_msg_cnt = rsp->max_msg_cnt; + chan->max_msg_size = rsp->max_msg_size; + chan_trigger_event(chan, + TIPC_CHANNEL_CONNECTED); + } else { + chan->state = TIPC_DISCONNECTED; + chan->remote = 0; + chan_trigger_event(chan, + TIPC_CHANNEL_DISCONNECTED); + } + } + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); + } +} + +static void _handle_disc_req(struct tipc_virtio_dev *vds, + struct tipc_disc_req_body *req, size_t len) +{ + struct tipc_chan *chan; + + if (sizeof(*req) != len) { + dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n", + __func__, len); + return; + } + + dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n", + __func__, req->target); + + chan = vds_lookup_channel(vds, req->target); + if (chan) { + mutex_lock(&chan->lock); + if (chan->state == TIPC_CONNECTED || + chan->state == TIPC_CONNECTING) { + chan->state = TIPC_DISCONNECTED; + chan->remote = 0; + chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED); + } + mutex_unlock(&chan->lock); + kref_put(&chan->refcount, _free_chan); + } +} + +static void _handle_ctrl_msg(struct tipc_virtio_dev *vds, + void *data, int len, u32 src) +{ + struct tipc_ctrl_msg *msg = data; + + if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) { + dev_err(&vds->vdev->dev, + "%s: Invalid message length ( %d vs. %d)\n", + __func__, (int)(sizeof(*msg) + msg->body_len), len); + return; + } + + dev_dbg(&vds->vdev->dev, + "%s: Incoming ctrl message: src 0x%x type %d len %d\n", + __func__, src, msg->type, msg->body_len); + + switch (msg->type) { + case TIPC_CTRL_MSGTYPE_GO_ONLINE: + _go_online(vds); + break; + + case TIPC_CTRL_MSGTYPE_GO_OFFLINE: + _go_offline(vds); + break; + + case TIPC_CTRL_MSGTYPE_CONN_RSP: + _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body, + msg->body_len); + break; + + case TIPC_CTRL_MSGTYPE_DISC_REQ: + _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body, + msg->body_len); + break; + + default: + dev_warn(&vds->vdev->dev, + "%s: Unexpected message type: %d\n", + __func__, msg->type); + } +} + +static int _handle_rxbuf(struct tipc_virtio_dev *vds, + struct tipc_msg_buf *rxbuf, size_t rxlen) +{ + int err; + struct scatterlist sg; + struct tipc_msg_hdr *msg; + struct device *dev = &vds->vdev->dev; + + /* message sanity check */ + if (rxlen > rxbuf->buf_sz) { + dev_warn(dev, "inbound msg is too big: %zd\n", rxlen); + goto drop_it; + } + + if (rxlen < sizeof(*msg)) { + dev_warn(dev, "inbound msg is too short: %zd\n", rxlen); + goto drop_it; + } + + /* reset buffer and put data */ + mb_reset(rxbuf); + mb_put_data(rxbuf, rxlen); + + /* get message header */ + msg = mb_get_data(rxbuf, sizeof(*msg)); + if (mb_avail_data(rxbuf) != msg->len) { + dev_warn(dev, "inbound msg length mismatch: (%d vs. %d)\n", + (uint) mb_avail_data(rxbuf), (uint)msg->len); + goto drop_it; + } + + dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d\n", + msg->src, msg->dst, msg->len, msg->flags, msg->reserved); + + /* message directed to control endpoint is a special case */ + if (msg->dst == TIPC_CTRL_ADDR) { + _handle_ctrl_msg(vds, msg->data, msg->len, msg->src); + } else { + struct tipc_chan *chan = NULL; + /* Lookup channel */ + chan = vds_lookup_channel(vds, msg->dst); + if (chan) { + /* handle it */ + rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf); + BUG_ON(!rxbuf); + kref_put(&chan->refcount, _free_chan); + } + } + +drop_it: + /* add the buffer back to the virtqueue */ + sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz); + err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); + if (err < 0) { + dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); + return err; + } + + return 0; +} + +static void _rxvq_cb(struct virtqueue *rxvq) +{ + unsigned int len; + struct tipc_msg_buf *mb; + unsigned int msg_cnt = 0; + struct tipc_virtio_dev *vds = rxvq->vdev->priv; + + while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) { + if (_handle_rxbuf(vds, mb, len)) + break; + msg_cnt++; + } + + /* tell the other size that we added rx buffers */ + if (msg_cnt) + virtqueue_kick(rxvq); +} + +static void _txvq_cb(struct virtqueue *txvq) +{ + unsigned int len; + struct tipc_msg_buf *mb; + bool need_wakeup = false; + struct tipc_virtio_dev *vds = txvq->vdev->priv; + + dev_dbg(&txvq->vdev->dev, "%s\n", __func__); + + /* detach all buffers */ + mutex_lock(&vds->lock); + while ((mb = virtqueue_get_buf(txvq, &len)) != NULL) + need_wakeup |= _put_txbuf_locked(vds, mb); + mutex_unlock(&vds->lock); + + if (need_wakeup) { + /* wake up potential senders waiting for a tx buffer */ + wake_up_interruptible_all(&vds->sendq); + } +} + +static int tipc_virtio_probe(struct virtio_device *vdev) +{ + int err, i; + struct tipc_virtio_dev *vds; + struct tipc_dev_config config; + struct virtqueue *vqs[2]; + vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb}; + const char *vq_names[] = { "rx", "tx" }; + + err = trusty_detect_vmm(); + if (err < 0) { + dev_err(&vdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&vdev->dev, "%s:\n", __func__); + + vds = kzalloc(sizeof(*vds), GFP_KERNEL); + if (!vds) + return -ENOMEM; + + vds->vdev = vdev; + + mutex_init(&vds->lock); + kref_init(&vds->refcount); + init_waitqueue_head(&vds->sendq); + INIT_LIST_HEAD(&vds->free_buf_list); + idr_init(&vds->addr_idr); + + /* set default max message size and alignment */ + memset(&config, 0, sizeof(config)); + config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE; + config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN; + + /* get configuration if present */ + vdev->config->get(vdev, 0, &config, sizeof(config)); + + /* copy dev name */ + strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name)); + vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0'; + + /* find tx virtqueues (rx and tx and in this order) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL, NULL); +#else + err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names); +#endif + if (err) + goto err_find_vqs; + + vds->rxvq = vqs[0]; + vds->txvq = vqs[1]; + + /* save max buffer size and count */ + vds->msg_buf_max_sz = config.msg_buf_max_size; + vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq); + + /* set up the receive buffers */ + for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) { + struct scatterlist sg; + struct tipc_msg_buf *rxbuf; + + rxbuf = _alloc_msg_buf(vds->msg_buf_max_sz); + if (!rxbuf) { + dev_err(&vdev->dev, "failed to allocate rx buffer\n"); + err = -ENOMEM; + goto err_free_rx_buffers; + } + + sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz); + err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL); + WARN_ON(err); /* sanity check; this can't really happen */ + } + + vdev->priv = vds; + vds->state = VDS_OFFLINE; + + dev_dbg(&vdev->dev, "%s: done\n", __func__); + return 0; + +err_free_rx_buffers: + _cleanup_vq(vds->rxvq); +err_find_vqs: + kref_put(&vds->refcount, _free_vds); + return err; +} + +static void tipc_virtio_remove(struct virtio_device *vdev) +{ + struct tipc_virtio_dev *vds = vdev->priv; + + _go_offline(vds); + + mutex_lock(&vds->lock); + vds->state = VDS_DEAD; + vds->vdev = NULL; + mutex_unlock(&vds->lock); + + vdev->config->reset(vdev); + + idr_destroy(&vds->addr_idr); + + _cleanup_vq(vds->rxvq); + _cleanup_vq(vds->txvq); + _free_msg_buf_list(&vds->free_buf_list); + + vdev->config->del_vqs(vdev); + + kref_put(&vds->refcount, _free_vds); +} + +static struct virtio_device_id tipc_virtio_id_table[] = { + { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + 0, +}; + +static struct virtio_driver virtio_tipc_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = tipc_virtio_id_table, + .probe = tipc_virtio_probe, + .remove = tipc_virtio_remove, +}; + +static int __init tipc_init(void) +{ + int ret; + dev_t dev; + + ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME); + if (ret) { + pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret); + return ret; + } + + tipc_major = MAJOR(dev); + tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME); + if (IS_ERR(tipc_class)) { + ret = PTR_ERR(tipc_class); + pr_err("%s: class_create failed: %d\n", __func__, ret); + goto err_class_create; + } + + ret = register_virtio_driver(&virtio_tipc_driver); + if (ret) { + pr_err("failed to register virtio driver: %d\n", ret); + goto err_register_virtio_drv; + } + + return 0; + +err_register_virtio_drv: + class_destroy(tipc_class); + +err_class_create: + unregister_chrdev_region(dev, MAX_DEVICES); + return ret; +} + +static void __exit tipc_exit(void) +{ + unregister_virtio_driver(&virtio_tipc_driver); + class_destroy(tipc_class); + unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES); +} + +/* We need to init this early */ +subsys_initcall(tipc_init); +module_exit(tipc_exit); + +MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table); +MODULE_DESCRIPTION("Trusty IPC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/trusty/trusty-irq.c b/drivers/trusty/trusty-irq.c new file mode 100644 index 000000000000..af2af6ee37ba --- /dev/null +++ b/drivers/trusty/trusty-irq.c @@ -0,0 +1,670 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IRQ_VECTOR_OFFSET 0x30 +#define IRQ_FOR_LK_TIMER 1 + +struct trusty_irq { + struct trusty_irq_state *is; + struct hlist_node node; + unsigned int irq; + bool percpu; + bool enable; + struct trusty_irq __percpu *percpu_ptr; +}; + +struct trusty_irq_irqset { + struct hlist_head pending; + struct hlist_head inactive; +}; + +struct trusty_irq_state { + struct device *dev; + struct device *trusty_dev; + struct trusty_irq_irqset normal_irqs; + spinlock_t normal_irqs_lock; + struct trusty_irq_irqset __percpu *percpu_irqs; + struct notifier_block trusty_call_notifier; + /* CPU hotplug instances for online */ + struct hlist_node node; +}; + +static enum cpuhp_state trusty_irq_online; + +#define TRUSTY_VMCALL_PENDING_INTR 0x74727505 +static inline void set_pending_intr_to_lk(uint8_t vector) +{ + __asm__ __volatile__( + "vmcall" + ::"a"(TRUSTY_VMCALL_PENDING_INTR), "b"(vector) + ); +} + +static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset, + bool percpu) +{ + struct hlist_node *n; + struct trusty_irq *trusty_irq; + + hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { + dev_dbg(is->dev, + "%s: enable pending irq %d, percpu %d, cpu %d\n", + __func__, trusty_irq->irq, percpu, smp_processor_id()); + if (percpu) + enable_percpu_irq(trusty_irq->irq, 0); + else + enable_irq(trusty_irq->irq); + hlist_del(&trusty_irq->node); + hlist_add_head(&trusty_irq->node, &irqset->inactive); + } +} + +static void trusty_irq_enable_irqset(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset) +{ + struct trusty_irq *trusty_irq; + + hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { + if (trusty_irq->enable) { + dev_warn(is->dev, + "%s: percpu irq %d already enabled, cpu %d\n", + __func__, trusty_irq->irq, smp_processor_id()); + continue; + } + dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n", + __func__, trusty_irq->irq, smp_processor_id()); + enable_percpu_irq(trusty_irq->irq, 0); + trusty_irq->enable = true; + } +} + +static void trusty_irq_disable_irqset(struct trusty_irq_state *is, + struct trusty_irq_irqset *irqset) +{ + struct hlist_node *n; + struct trusty_irq *trusty_irq; + + hlist_for_each_entry(trusty_irq, &irqset->inactive, node) { + if (!trusty_irq->enable) { + dev_warn(is->dev, + "irq %d already disabled, percpu %d, cpu %d\n", + trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + continue; + } + dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n", + __func__, trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + trusty_irq->enable = false; + if (trusty_irq->percpu) + disable_percpu_irq(trusty_irq->irq); + else + disable_irq_nosync(trusty_irq->irq); + } + hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) { + if (!trusty_irq->enable) { + dev_warn(is->dev, + "pending irq %d already disabled, percpu %d, cpu %d\n", + trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + } + dev_dbg(is->dev, + "%s: disable pending irq %d, percpu %d, cpu %d\n", + __func__, trusty_irq->irq, trusty_irq->percpu, + smp_processor_id()); + trusty_irq->enable = false; + hlist_del(&trusty_irq->node); + hlist_add_head(&trusty_irq->node, &irqset->inactive); + } +} + +static int trusty_irq_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_irq_state *is; + + BUG_ON(!irqs_disabled()); + + if (action != TRUSTY_CALL_PREPARE) + return NOTIFY_DONE; + + is = container_of(nb, struct trusty_irq_state, trusty_call_notifier); + + spin_lock(&is->normal_irqs_lock); + trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false); + spin_unlock(&is->normal_irqs_lock); + trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true); + + return NOTIFY_OK; +} + +irqreturn_t trusty_irq_handler(int irq, void *data) +{ + struct trusty_irq *trusty_irq = data; + struct trusty_irq_state *is = trusty_irq->is; + struct trusty_irq_irqset *irqset; + + dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n", + __func__, irq, trusty_irq->irq, smp_processor_id(), + trusty_irq->enable); + + WARN_ON(irq != IRQ_FOR_LK_TIMER); + + set_pending_intr_to_lk(irq+IRQ_VECTOR_OFFSET); + + if (trusty_irq->percpu) { + disable_percpu_irq(irq); + irqset = this_cpu_ptr(is->percpu_irqs); + } else { + disable_irq_nosync(irq); + irqset = &is->normal_irqs; + } + + spin_lock(&is->normal_irqs_lock); + if (trusty_irq->enable) { + hlist_del(&trusty_irq->node); + hlist_add_head(&trusty_irq->node, &irqset->pending); + } + spin_unlock(&is->normal_irqs_lock); + + trusty_enqueue_nop(is->trusty_dev, NULL); + + dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq); + + return IRQ_HANDLED; +} + +static int trusty_irq_cpu_up(unsigned int cpu, struct hlist_node *node) +{ + unsigned long irq_flags; + struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + + if(is == NULL) + return 0; + + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); + trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs)); + local_irq_restore(irq_flags); + return 0; +} + +static int trusty_irq_cpu_down(unsigned int cpu, struct hlist_node *node) +{ + unsigned long irq_flags; + struct trusty_irq_state *is = hlist_entry_safe(node, struct trusty_irq_state, node); + + if(is == NULL) + return 0; + + dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id()); + + local_irq_save(irq_flags); + trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs)); + local_irq_restore(irq_flags); + return 0; +} + +static int trusty_irq_create_irq_mapping(struct trusty_irq_state *is, int irq) +{ + int ret; + int index; + u32 irq_pos; + u32 templ_idx; + u32 range_base; + u32 range_end; + struct of_phandle_args oirq; + + /* check if "interrupt-ranges" property is present */ + if (!of_find_property(is->dev->of_node, "interrupt-ranges", NULL)) { + /* fallback to old behavior to be backward compatible with + * systems that do not need IRQ domains. + */ + return irq; + } + + /* find irq range */ + for (index = 0;; index += 3) { + ret = of_property_read_u32_index(is->dev->of_node, + "interrupt-ranges", + index, &range_base); + if (ret) + return ret; + + ret = of_property_read_u32_index(is->dev->of_node, + "interrupt-ranges", + index + 1, &range_end); + if (ret) + return ret; + + if (irq >= range_base && irq <= range_end) + break; + } + + /* read the rest of range entry: template index and irq_pos */ + ret = of_property_read_u32_index(is->dev->of_node, + "interrupt-ranges", + index + 2, &templ_idx); + if (ret) + return ret; + + /* read irq template */ + ret = of_parse_phandle_with_args(is->dev->of_node, + "interrupt-templates", + "#interrupt-cells", + templ_idx, &oirq); + if (ret) + return ret; + + WARN_ON(!oirq.np); + WARN_ON(!oirq.args_count); + + /* + * An IRQ template is a non empty array of u32 values describing group + * of interrupts having common properties. The u32 entry with index + * zero contains the position of irq_id in interrupt specifier array + * followed by data representing interrupt specifier array with irq id + * field omitted, so to convert irq template to interrupt specifier + * array we have to move down one slot the first irq_pos entries and + * replace the resulting gap with real irq id. + */ + irq_pos = oirq.args[0]; + + if (irq_pos >= oirq.args_count) { + dev_err(is->dev, "irq pos is out of range: %d\n", irq_pos); + return -EINVAL; + } + + for (index = 1; index <= irq_pos; index++) + oirq.args[index - 1] = oirq.args[index]; + + oirq.args[irq_pos] = irq - range_base; + + ret = irq_create_of_mapping(&oirq); + + return (!ret) ? -EINVAL : ret; +} + +static inline void trusty_irq_unmask(struct irq_data *data) +{ + return; +} + +static inline void trusty_irq_mask(struct irq_data *data) +{ + return; +} + +static void trusty_irq_enable(struct irq_data *data) +{ + return; +} + +static void trusty_irq_disable(struct irq_data *data) +{ + return; +} + +void trusty_irq_eoi(struct irq_data *data) +{ + return; +} +static struct irq_chip trusty_irq_chip = { + .name = "TRUSY-IRQ", + .irq_mask = trusty_irq_mask, + .irq_unmask = trusty_irq_unmask, + .irq_enable = trusty_irq_enable, + .irq_disable = trusty_irq_disable, + .irq_eoi = trusty_irq_eoi, +}; + +static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int tirq) +{ + int ret; + int irq; + unsigned long irq_flags; + struct trusty_irq *trusty_irq; + + dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); + + irq = tirq; + + trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL); + if (!trusty_irq) + return -ENOMEM; + + trusty_irq->is = is; + trusty_irq->irq = irq; + trusty_irq->enable = true; + + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + + ret = irq_alloc_desc_at(irq, 0); + if (ret >= 0) + irq_set_chip_and_handler_name(irq, &trusty_irq_chip, handle_edge_irq, "trusty-irq"); + else if (ret != -EEXIST) { + dev_err(is->dev, "can't allocate irq desc %d\n", ret); + goto err_request_irq; + } + + ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD, + "trusty-irq", trusty_irq); + + if (ret) { + dev_err(is->dev, "request_irq failed %d\n", ret); + goto err_request_irq; + } + return 0; + +err_request_irq: + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + hlist_del(&trusty_irq->node); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + kfree(trusty_irq); + return ret; +} + +static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int tirq) +{ + int ret; + int irq; + unsigned int cpu; + struct trusty_irq __percpu *trusty_irq_handler_data; + + dev_dbg(is->dev, "%s: irq %d\n", __func__, tirq); + + irq = trusty_irq_create_irq_mapping(is, tirq); + if (irq <= 0) { + dev_err(is->dev, + "trusty_irq_create_irq_mapping failed (%d)\n", irq); + return irq; + } + + trusty_irq_handler_data = alloc_percpu(struct trusty_irq); + if (!trusty_irq_handler_data) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + struct trusty_irq *trusty_irq; + struct trusty_irq_irqset *irqset; + + if (cpu >= NR_CPUS) + return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + irqset = per_cpu_ptr(is->percpu_irqs, cpu); + + trusty_irq->is = is; + hlist_add_head(&trusty_irq->node, &irqset->inactive); + trusty_irq->irq = irq; + trusty_irq->percpu = true; + trusty_irq->percpu_ptr = trusty_irq_handler_data; + } + + ret = request_percpu_irq(irq, trusty_irq_handler, "trusty", + trusty_irq_handler_data); + if (ret) { + dev_err(is->dev, "request_percpu_irq failed %d\n", ret); + goto err_request_percpu_irq; + } + + return 0; + +err_request_percpu_irq: + for_each_possible_cpu(cpu) { + struct trusty_irq *trusty_irq; + + if (cpu >= NR_CPUS) + return -EINVAL; + trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu); + hlist_del(&trusty_irq->node); + } + + free_percpu(trusty_irq_handler_data); + return ret; +} + +static int trusty_smc_get_next_irq(struct trusty_irq_state *is, + unsigned long min_irq, bool per_cpu) +{ + return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ, + min_irq, per_cpu, 0); +} + +static int trusty_irq_init_one(struct trusty_irq_state *is, + int irq, bool per_cpu) +{ + int ret; + + irq = trusty_smc_get_next_irq(is, irq, per_cpu); + if (irq < 0) + return irq; + dev_info(is->dev, "irq from lk = %d\n", irq); + + WARN_ON(irq-IRQ_VECTOR_OFFSET != IRQ_FOR_LK_TIMER); + + if (per_cpu) + ret = trusty_irq_init_per_cpu_irq(is, irq-IRQ_VECTOR_OFFSET); + else + ret = trusty_irq_init_normal_irq(is, irq-IRQ_VECTOR_OFFSET); + + if (ret) { + dev_warn(is->dev, + "failed to initialize irq %d, irq will be ignored\n", + irq); + } + + return irq + 1; +} + +static void trusty_irq_free_irqs(struct trusty_irq_state *is) +{ + struct trusty_irq *irq; + struct hlist_node *n; + + hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) { + dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq); + free_irq(irq->irq, irq); + hlist_del(&irq->node); + kfree(irq); + } +/* + hlist_for_each_entry_safe(irq, n, + &this_cpu_ptr(is->percpu_irqs)->inactive, + node) { + struct trusty_irq __percpu *trusty_irq_handler_data; + + dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq); + trusty_irq_handler_data = irq->percpu_ptr; + free_percpu_irq(irq->irq, trusty_irq_handler_data); + for_each_possible_cpu(cpu) { + struct trusty_irq *irq_tmp; + + irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu); + hlist_del(&irq_tmp->node); + } + free_percpu(trusty_irq_handler_data); + } */ +} + +static int trusty_irq_cpu_notif_add(struct trusty_irq_state *is) +{ + int ret; + + ret = cpuhp_state_add_instance(trusty_irq_online, &is->node); + + return ret; +} + +static void trusty_irq_cpu_notif_remove(struct trusty_irq_state *is) +{ + cpuhp_state_remove_instance(trusty_irq_online, &is->node); +} + +static int trusty_irq_probe(struct platform_device *pdev) +{ + int ret; + int irq; + unsigned long irq_flags; + struct trusty_irq_state *is; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + + is = kzalloc(sizeof(*is), GFP_KERNEL); + if (!is) { + ret = -ENOMEM; + goto err_alloc_is; + } + + is->dev = &pdev->dev; + is->trusty_dev = is->dev->parent; + spin_lock_init(&is->normal_irqs_lock); + is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset); + if (!is->percpu_irqs) { + ret = -ENOMEM; + goto err_alloc_pending_percpu_irqs; + } + + platform_set_drvdata(pdev, is); + + is->trusty_call_notifier.notifier_call = trusty_irq_call_notify; + ret = trusty_call_notifier_register(is->trusty_dev, + &is->trusty_call_notifier); + if (ret) { + dev_err(&pdev->dev, + "failed to register trusty call notifier\n"); + goto err_trusty_call_notifier_register; + } + + for (irq = 0; irq >= 0;) + irq = trusty_irq_init_one(is, irq, false); + + ret = trusty_irq_cpu_notif_add(is); + if (ret) { + dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret); + goto err_register_hotcpu_notifier; + } + + return 0; + +err_register_hotcpu_notifier: + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + trusty_irq_disable_irqset(is, &is->normal_irqs); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + trusty_irq_free_irqs(is); + trusty_call_notifier_unregister(is->trusty_dev, + &is->trusty_call_notifier); +err_trusty_call_notifier_register: + free_percpu(is->percpu_irqs); +err_alloc_pending_percpu_irqs: + kfree(is); +err_alloc_is: + return ret; +} + +static int trusty_irq_remove(struct platform_device *pdev) +{ + unsigned long irq_flags; + struct trusty_irq_state *is = platform_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s\n", __func__); + + trusty_irq_cpu_notif_remove(is); + spin_lock_irqsave(&is->normal_irqs_lock, irq_flags); + trusty_irq_disable_irqset(is, &is->normal_irqs); + spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags); + + trusty_irq_free_irqs(is); + + trusty_call_notifier_unregister(is->trusty_dev, + &is->trusty_call_notifier); + free_percpu(is->percpu_irqs); + kfree(is); + + return 0; +} + +static const struct of_device_id trusty_test_of_match[] = { + { .compatible = "android,trusty-irq-v1", }, + {}, +}; + +static struct platform_driver trusty_irq_driver = { + .probe = trusty_irq_probe, + .remove = trusty_irq_remove, + .driver = { + .name = "trusty-irq", + .owner = THIS_MODULE, + .of_match_table = trusty_test_of_match, + }, +}; + +static int __init trusty_irq_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "x86/trustyirq:online", + trusty_irq_cpu_up, trusty_irq_cpu_down); + if (ret < 0) + goto out; + trusty_irq_online = ret; + + ret = platform_driver_register(&trusty_irq_driver); + if (ret) + goto err_dead; + + return 0; +err_dead: + cpuhp_remove_multi_state(trusty_irq_online); +out: + return ret; +} + +static void __exit trusty_irq_driver_exit(void) +{ + cpuhp_remove_multi_state(trusty_irq_online); + platform_driver_unregister(&trusty_irq_driver); +} + +module_init(trusty_irq_driver_init); +module_exit(trusty_irq_driver_exit); + +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/trusty/trusty-log.c b/drivers/trusty/trusty-log.c new file mode 100755 index 000000000000..d2446a1f34c9 --- /dev/null +++ b/drivers/trusty/trusty-log.c @@ -0,0 +1,420 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trusty-log.h" + +#define TRUSTY_LOG_SIZE (PAGE_SIZE * 2) +#define TRUSTY_LINE_BUFFER_SIZE 256 + +#ifdef CONFIG_64BIT +static uint64_t g_vmm_debug_buf; +#else +static uint32_t g_vmm_debug_buf; +#endif + +struct trusty_log_state { + struct device *dev; + struct device *trusty_dev; + + /* + * This lock is here to ensure only one consumer will read + * from the log ring buffer at a time. + */ + spinlock_t lock; + struct log_rb *log; + uint32_t get; + + struct page *log_pages; + + struct notifier_block call_notifier; + struct notifier_block panic_notifier; + char line_buffer[TRUSTY_LINE_BUFFER_SIZE]; +}; + +static int log_read_line(struct trusty_log_state *s, int put, int get) +{ + struct log_rb *log = s->log; + int i; + char c = '\0'; + size_t max_to_read = min((size_t)(put - get), + sizeof(s->line_buffer) - 1); + size_t mask = log->sz - 1; + + for (i = 0; i < max_to_read && c != '\n';) + s->line_buffer[i++] = c = log->data[get++ & mask]; + s->line_buffer[i] = '\0'; + + return i; +} + +static void trusty_dump_logs(struct trusty_log_state *s, bool dump_panic_log) +{ + struct log_rb *log = s->log; + uint32_t get, put, alloc; + int read_chars; + + BUG_ON(!is_power_of_2(log->sz)); + + /* + * For this ring buffer, at any given point, alloc >= put >= get. + * The producer side of the buffer is not locked, so the put and alloc + * pointers must be read in a defined order (put before alloc) so + * that the above condition is maintained. A read barrier is needed + * to make sure the hardware and compiler keep the reads ordered. + */ + get = s->get; + while ((put = log->put) != get) { + /* Make sure that the read of put occurs before the read of log data */ + rmb(); + + /* Read a line from the log */ + read_chars = log_read_line(s, put, get); + + /* Force the loads from log_read_line to complete. */ + rmb(); + alloc = log->alloc; + + /* + * Discard the line that was just read if the data could + * have been corrupted by the producer. + */ + if (alloc - get > log->sz) { + pr_err("trusty: log overflow."); + get = alloc - log->sz; + continue; + } + + if (dump_panic_log) + pr_info("trusty: %s", s->line_buffer); + + get += read_chars; + } + s->get = get; +} + +static int trusty_log_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_log_state *s; + unsigned long flags; + + if (action != TRUSTY_CALL_RETURNED) + return NOTIFY_DONE; + + s = container_of(nb, struct trusty_log_state, call_notifier); + spin_lock_irqsave(&s->lock, flags); +#ifdef CONFIG_DEBUG_INFO + trusty_dump_logs(s, true); +#else + trusty_dump_logs(s, false); +#endif + spin_unlock_irqrestore(&s->lock, flags); + return NOTIFY_OK; +} + +static int trusty_log_panic_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_log_state *s; + + /* + * Don't grab the spin lock to hold up the panic notifier, even + * though this is racy. + */ + s = container_of(nb, struct trusty_log_state, panic_notifier); + pr_info("trusty-log panic notifier - trusty version %s", + trusty_version_str_get(s->trusty_dev)); + trusty_dump_logs(s, true); + return NOTIFY_OK; +} + +static void trusty_vmm_dump_header(struct deadloop_dump *dump) +{ + struct dump_header *header; + + if (!dump) + return; + + header = &(dump->header); + pr_info("-----------VMM PANIC HEADER-----------\n"); + pr_info("VMM version = %s\n", header->vmm_version); + pr_info("Signature = %s\n", header->signature); + pr_info("Error_info = %s\n", header->error_info); + pr_info("Cpuid = %d\n", header->cpuid); + pr_info("-----------END OF VMM PANIC HEADER-----------\n"); +} + +static void trusty_vmm_dump_data(struct deadloop_dump *dump) +{ + struct dump_data *dump_data; + char *p, *pstr; + + if (!dump) + return; + + dump_data = &(dump->data); + + pr_info("-----------VMM PANIC DATA INFO-----------\n"); + pstr = (char *)dump_data->data; + for (p = pstr; p < ((char *)dump_data->data + dump_data->length); p++) { + if (*p == '\r') { + *p = 0x00; + } else if (*p == '\n') { + *p = 0x00; + pr_info("%s\n", pstr); + pstr = (char *)(p + 1); + } + } + /* dump the characters in the last line */ + if ((pstr - (char *)(dump_data->data)) < dump_data->length) { + *p = 0x00; + pr_info("%s\n", pstr); + } + pr_info("-----------END OF VMM PANIC DATA INFO-----------\n"); +} + +static int trusty_vmm_panic_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct deadloop_dump *dump_info = NULL; + + if (g_vmm_debug_buf) { + dump_info = (struct deadloop_dump *)g_vmm_debug_buf; + + if (dump_info->is_valid) { + pr_info("trusty-vmm panic start!\n"); + trusty_vmm_dump_header(dump_info); + trusty_vmm_dump_data(dump_info); + pr_info("trusty-vmm panic dump end!\n"); + } + } + + return NOTIFY_OK; +} + +static struct notifier_block trusty_vmm_panic_nb = { + .notifier_call = trusty_vmm_panic_notify, + .priority = 0, +}; + +#define TRUSTY_VMCALL_DUMP_INIT 0x74727507 +static int trusty_vmm_dump_init(void *gva) +{ + int ret = -1; + + __asm__ __volatile__( + "vmcall" + : "=a"(ret) + : "a"(TRUSTY_VMCALL_DUMP_INIT), "D"(gva) + ); + + return ret; +} + +static bool trusty_supports_logging(struct device *device) +{ + int result; + + result = trusty_std_call32(device, SMC_SC_SHARED_LOG_VERSION, + TRUSTY_LOG_API_VERSION, 0, 0); + if (result == SM_ERR_UNDEFINED_SMC) { + pr_info("trusty-log not supported on secure side.\n"); + return false; + } else if (result < 0) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_VERSION) failed: %d\n", + result); + return false; + } + + if (result == TRUSTY_LOG_API_VERSION) { + return true; + } else { + pr_info("trusty-log unsupported api version: %d, supported: %d\n", + result, TRUSTY_LOG_API_VERSION); + return false; + } +} + +static int trusty_log_probe(struct platform_device *pdev) +{ + struct trusty_log_state *s; + int result; + int vmm_id; + phys_addr_t pa; + struct deadloop_dump *dump; + + vmm_id = trusty_detect_vmm(); + if (vmm_id < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + if (!trusty_supports_logging(pdev->dev.parent)) { + return -ENXIO; + } + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + result = -ENOMEM; + goto error_alloc_state; + } + + spin_lock_init(&s->lock); + s->dev = &pdev->dev; + s->trusty_dev = s->dev->parent; + s->get = 0; + s->log_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(TRUSTY_LOG_SIZE)); + if (!s->log_pages) { + result = -ENOMEM; + goto error_alloc_log; + } + s->log = page_address(s->log_pages); + + pa = page_to_phys(s->log_pages); + result = trusty_std_call32(s->trusty_dev, + SMC_SC_SHARED_LOG_ADD, + (u32)(pa), (u32)HIULINT(pa), + TRUSTY_LOG_SIZE); + if (result < 0) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_ADD) failed: %d %pa\n", + result, &pa); + goto error_std_call; + } + + s->call_notifier.notifier_call = trusty_log_call_notify; + result = trusty_call_notifier_register(s->trusty_dev, + &s->call_notifier); + if (result < 0) { + dev_err(&pdev->dev, + "failed to register trusty call notifier\n"); + goto error_call_notifier; + } + + s->panic_notifier.notifier_call = trusty_log_panic_notify; + result = atomic_notifier_chain_register(&panic_notifier_list, + &s->panic_notifier); + if (result < 0) { + dev_err(&pdev->dev, + "failed to register panic notifier\n"); + goto error_panic_notifier; + } + + if(vmm_id == VMM_ID_EVMM) { + /* allocate debug buffer for vmm panic dump */ + g_vmm_debug_buf = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); + if (!g_vmm_debug_buf) { + result = -ENOMEM; + goto error_alloc_vmm; + } + + dump = (struct deadloop_dump *)g_vmm_debug_buf; + dump->version_of_this_struct = VMM_DUMP_VERSION; + dump->size_of_this_struct = sizeof(struct deadloop_dump); + dump->is_valid = false; + + /* shared the buffer to vmm by VMCALL */ + result = trusty_vmm_dump_init(dump); + if (result < 0) { + dev_err(&pdev->dev, + "failed to share the dump buffer to VMM\n"); + goto error_vmm_panic_notifier; + } + + /* register the panic notifier for vmm */ + result = atomic_notifier_chain_register(&panic_notifier_list, + &trusty_vmm_panic_nb); + if (result < 0) { + dev_err(&pdev->dev, + "failed to register vmm panic notifier\n"); + goto error_vmm_panic_notifier; + } + } + + platform_set_drvdata(pdev, s); + + return 0; + +error_vmm_panic_notifier: + free_page(g_vmm_debug_buf); +error_alloc_vmm: + atomic_notifier_chain_unregister(&panic_notifier_list, + &s->panic_notifier); +error_panic_notifier: + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); +error_call_notifier: + trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, + (u32)pa, (u32)HIULINT(pa), 0); +error_std_call: + __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); +error_alloc_log: + kfree(s); +error_alloc_state: + return result; +} + +static int trusty_log_remove(struct platform_device *pdev) +{ + int result; + struct trusty_log_state *s = platform_get_drvdata(pdev); + phys_addr_t pa = page_to_phys(s->log_pages); + + dev_dbg(&pdev->dev, "%s\n", __func__); + + atomic_notifier_chain_unregister(&panic_notifier_list, + &trusty_vmm_panic_nb); + atomic_notifier_chain_unregister(&panic_notifier_list, + &s->panic_notifier); + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + + result = trusty_std_call32(s->trusty_dev, SMC_SC_SHARED_LOG_RM, + (u32)pa, (u32)HIULINT(pa), 0); + if (result) { + pr_err("trusty std call (SMC_SC_SHARED_LOG_RM) failed: %d\n", + result); + } + __free_pages(s->log_pages, get_order(TRUSTY_LOG_SIZE)); + kfree(s); + free_page(g_vmm_debug_buf); + + return 0; +} + +static const struct of_device_id trusty_test_of_match[] = { + { .compatible = "android,trusty-log-v1", }, + {}, +}; + +static struct platform_driver trusty_log_driver = { + .probe = trusty_log_probe, + .remove = trusty_log_remove, + .driver = { + .name = "trusty-log", + .owner = THIS_MODULE, + .of_match_table = trusty_test_of_match, + }, +}; + +module_platform_driver(trusty_log_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/trusty/trusty-log.h b/drivers/trusty/trusty-log.h new file mode 100644 index 000000000000..587bc7aaa145 --- /dev/null +++ b/drivers/trusty/trusty-log.h @@ -0,0 +1,44 @@ +#ifndef _TRUSTY_LOG_H_ +#define _TRUSTY_LOG_H_ + +/* + * Ring buffer that supports one secure producer thread and one + * linux side consumer thread. + */ +struct log_rb { + volatile uint32_t alloc; + volatile uint32_t put; + uint32_t sz; + volatile char data[0]; +} __packed; + +#define SMC_SC_SHARED_LOG_VERSION SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 0) +#define SMC_SC_SHARED_LOG_ADD SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 1) +#define SMC_SC_SHARED_LOG_RM SMC_STDCALL_NR(SMC_ENTITY_LOGGING, 2) + +#define TRUSTY_LOG_API_VERSION 1 + +#define VMM_DUMP_VERSION 1 + +struct dump_data { + uint32_t length; + uint8_t data[0]; +} __packed; + +struct dump_header { + uint8_t vmm_version[64]; /* version of the vmm */ + uint8_t signature[16]; /* signature for the dump structure */ + uint8_t error_info[32]; /* filename:linenum */ + uint16_t cpuid; +} __packed; + +struct deadloop_dump { + uint16_t size_of_this_struct; + uint16_t version_of_this_struct; + uint32_t is_valid; + struct dump_header header; + struct dump_data data; +} __packed; + +#endif + diff --git a/drivers/trusty/trusty-mem.c b/drivers/trusty/trusty-mem.c new file mode 100644 index 000000000000..470df8823d3a --- /dev/null +++ b/drivers/trusty/trusty-mem.c @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + +/* Normal memory */ +#define NS_MAIR_NORMAL_CACHED_WB_RWA 0xFF /* inner and outer write back read/write allocate */ +#define NS_MAIR_NORMAL_CACHED_WT_RA 0xAA /* inner and outer write through read allocate */ +#define NS_MAIR_NORMAL_CACHED_WB_RA 0xEE /* inner and outer wriet back, read allocate */ +#define NS_MAIR_NORMAL_UNCACHED 0x44 /* uncached */ + +static int get_mem_attr(struct page *page, pgprot_t pgprot) +{ +#if defined(CONFIG_ARM64) + uint64_t mair; + uint attr_index = (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) >> 2; + + asm ("mrs %0, mair_el1\n" : "=&r" (mair)); + return (mair >> (attr_index * 8)) & 0xff; + +#elif defined(CONFIG_ARM_LPAE) + uint32_t mair; + uint attr_index = ((pgprot_val(pgprot) & L_PTE_MT_MASK) >> 2); + + if (attr_index >= 4) { + attr_index -= 4; + asm volatile("mrc p15, 0, %0, c10, c2, 1\n" : "=&r" (mair)); + } else { + asm volatile("mrc p15, 0, %0, c10, c2, 0\n" : "=&r" (mair)); + } + return (mair >> (attr_index * 8)) & 0xff; + +#elif defined(CONFIG_ARM) + /* check memory type */ + switch (pgprot_val(pgprot) & L_PTE_MT_MASK) { + case L_PTE_MT_WRITEALLOC: + /* Normal: write back write allocate */ + return 0xFF; + + case L_PTE_MT_BUFFERABLE: + /* Normal: non-cacheble */ + return 0x44; + + case L_PTE_MT_WRITEBACK: + /* Normal: writeback, read allocate */ + return 0xEE; + + case L_PTE_MT_WRITETHROUGH: + /* Normal: write through */ + return 0xAA; + + case L_PTE_MT_UNCACHED: + /* strongly ordered */ + return 0x00; + + case L_PTE_MT_DEV_SHARED: + case L_PTE_MT_DEV_NONSHARED: + /* device */ + return 0x04; + + default: + return -EINVAL; + } +#elif defined(CONFIG_X86) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) + /* The porting to CHT kernel (3.14.55) is in the #else clause. + ** For BXT kernel (4.1.0), the function get_page_memtype() is static. + ** + ** The orignal google code (for arm) getst the cache states and page + ** flags from input parameter "pgprot", which is not prefered in x86. + ** In x86, both cache states and page flags should be got from input + ** parameter "page". But, since current caller of trusty_call32_mem_buf() + ** always allocate memory in kernel heap, it is also ok to use hardcode + ** here. + ** + ** The memory allocated in kernel heap should be CACHED. The reason to + ** return UNCACHED here is to pass the check in LK sm_decode_ns_memory_attr() + ** with SMP, which only allow UNCACHED. + */ + return NS_MAIR_NORMAL_UNCACHED; +#else + unsigned long type; + int ret_mem_attr = 0; + + type = get_page_memtype(page); + /* + * -1 from get_page_memtype() implies RAM page is in its + * default state and not reserved, and hence of type WB + */ + if (type == -1) { + type = _PAGE_CACHE_MODE_WB; + } + switch (type) { + case _PAGE_CACHE_MODE_UC_MINUS: + /* uncacheable */ + ret_mem_attr = NS_MAIR_NORMAL_UNCACHED; + break; + case _PAGE_CACHE_MODE_WB: + /* writeback */ + ret_mem_attr = NS_MAIR_NORMAL_CACHED_WB_RWA; + break; + case _PAGE_CACHE_MODE_WC: + /* write combined */ + ret_mem_attr = NS_MAIR_NORMAL_UNCACHED; + break; + + default: + printk(KERN_ERR "%s(): invalid type: 0x%x\n", __func__, type); + ret_mem_attr = -EINVAL; + } + return ret_mem_attr; +#endif +#else + return 0; +#endif +} + +int trusty_encode_page_info(struct ns_mem_page_info *inf, + struct page *page, pgprot_t pgprot) +{ + int mem_attr; + uint64_t pte; + + if (!inf || !page) + return -EINVAL; + + /* get physical address */ + pte = (uint64_t) page_to_phys(page); + + /* get memory attributes */ + mem_attr = get_mem_attr(page, pgprot); + if (mem_attr < 0) + return mem_attr; + + /* add other attributes */ +#if defined(CONFIG_ARM64) || defined(CONFIG_ARM_LPAE) + pte |= pgprot_val(pgprot); +#elif defined(CONFIG_ARM) + if (pgprot_val(pgprot) & L_PTE_USER) + pte |= (1 << 6); + if (pgprot_val(pgprot) & L_PTE_RDONLY) + pte |= (1 << 7); + if (pgprot_val(pgprot) & L_PTE_SHARED) + pte |= (3 << 8); /* inner sharable */ +#elif defined(CONFIG_X86) + if (pgprot_val(pgprot) & _PAGE_USER) + pte |= (1 << 6); + if (!(pgprot_val(pgprot) & _PAGE_RW)) + pte |= (1 << 7); +#endif + + inf->attr = (pte & 0x0000FFFFFFFFFFFFull) | ((uint64_t)mem_attr << 48); + return 0; +} + +int trusty_call32_mem_buf(struct device *dev, u32 smcnr, + struct page *page, u32 size, + pgprot_t pgprot) +{ + int ret; + struct ns_mem_page_info pg_inf; + + if (!dev || !page) + return -EINVAL; + + ret = trusty_encode_page_info(&pg_inf, page, pgprot); + if (ret) + return ret; + + if (SMC_IS_FASTCALL(smcnr)) { + return trusty_fast_call32(dev, smcnr, + (u32)pg_inf.attr, + (u32)(pg_inf.attr >> 32), size); + } else { + return trusty_std_call32(dev, smcnr, + (u32)pg_inf.attr, + (u32)(pg_inf.attr >> 32), size); + } +} +EXPORT_SYMBOL(trusty_call32_mem_buf); +MODULE_LICENSE("GPL"); diff --git a/drivers/trusty/trusty-timer.c b/drivers/trusty/trusty-timer.c new file mode 100644 index 000000000000..6783a30b4a11 --- /dev/null +++ b/drivers/trusty/trusty-timer.c @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct trusty_timer { + struct sec_timer_state *sts; + struct hrtimer tm; + struct work_struct work; +}; + +struct trusty_timer_dev_state { + struct device *dev; + struct device *smwall_dev; + struct device *trusty_dev; + struct notifier_block call_notifier; + struct trusty_timer timer; + struct workqueue_struct *workqueue; +}; + +static void timer_work_func(struct work_struct *work) +{ + int ret; + struct trusty_timer_dev_state *s; + + s = container_of(work, struct trusty_timer_dev_state, timer.work); + + ret = trusty_std_call32(s->trusty_dev, SMC_SC_LK_TIMER, 0, 0, 0); + if (ret != 0) + dev_err(s->dev, "%s failed %d\n", __func__, ret); +} + +static enum hrtimer_restart trusty_timer_cb(struct hrtimer *tm) +{ + struct trusty_timer_dev_state *s; + + s = container_of(tm, struct trusty_timer_dev_state, timer.tm); + + queue_work_on(0, s->workqueue, &s->timer.work); + + return HRTIMER_NORESTART; +} + +static int trusty_timer_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_timer *tt; + struct sec_timer_state *sts; + struct trusty_timer_dev_state *s; + + if (action != TRUSTY_CALL_RETURNED) + return NOTIFY_DONE; + + s = container_of(nb, struct trusty_timer_dev_state, call_notifier); + + /* this notifier is executed in non-preemptible context */ + tt = &s->timer; + sts = tt->sts; + + if (sts->tv_ns > sts->cv_ns) { + hrtimer_cancel(&tt->tm); + } else if (sts->cv_ns > sts->tv_ns) { + /* need to set/reset timer */ + hrtimer_start(&tt->tm, ns_to_ktime(sts->cv_ns - sts->tv_ns), + HRTIMER_MODE_REL_PINNED); + } + + sts->cv_ns = 0ULL; + sts->tv_ns = 0ULL; + + return NOTIFY_OK; +} + +static int trusty_timer_probe(struct platform_device *pdev) +{ + int ret; + struct trusty_timer_dev_state *s; + struct trusty_timer *tt; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + + if (!trusty_wall_base(pdev->dev.parent)) { + dev_notice(&pdev->dev, "smwall: is not setup by parent\n"); + return -ENODEV; + } + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->dev = &pdev->dev; + s->smwall_dev = s->dev->parent; + s->trusty_dev = s->smwall_dev->parent; + platform_set_drvdata(pdev, s); + + tt = &s->timer; + + hrtimer_init(&tt->tm, CLOCK_BOOTTIME, HRTIMER_MODE_REL_PINNED); + tt->tm.function = trusty_timer_cb; + tt->sts = + trusty_wall_per_cpu_item_ptr(s->smwall_dev, 0, + SM_WALL_PER_CPU_SEC_TIMER_ID, + sizeof(*tt->sts)); + WARN_ON(!tt->sts); + + s->workqueue = alloc_workqueue("trusty-timer-wq", WQ_CPU_INTENSIVE, 0); + if (!s->workqueue) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed to allocate work queue\n"); + goto err_allocate_work_queue; + } + + /* register notifier */ + s->call_notifier.notifier_call = trusty_timer_call_notify; + ret = trusty_call_notifier_register(s->trusty_dev, &s->call_notifier); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register call notifier\n"); + kfree(s); + return ret; + } + + INIT_WORK(&s->timer.work, timer_work_func); + + dev_info(s->dev, "initialized\n"); + + return 0; + + destroy_workqueue(s->workqueue); +err_allocate_work_queue: + kfree(s); + return ret; + +} + +static int trusty_timer_remove(struct platform_device *pdev) +{ + struct trusty_timer_dev_state *s = platform_get_drvdata(pdev); + struct trusty_timer *tt; + + + dev_dbg(&pdev->dev, "%s\n", __func__); + + /* unregister notifier */ + trusty_call_notifier_unregister(s->trusty_dev, &s->call_notifier); + + tt = &s->timer; + hrtimer_cancel(&tt->tm); + + flush_work(&tt->work); + destroy_workqueue(s->workqueue); + /* free state */ + kfree(s); + return 0; +} + +static const struct of_device_id trusty_test_of_match[] = { + { .compatible = "android,trusty-timer-v1", }, + {}, +}; + +static struct platform_driver trusty_timer_driver = { + .probe = trusty_timer_probe, + .remove = trusty_timer_remove, + .driver = { + .name = "trusty-timer", + .owner = THIS_MODULE, + .of_match_table = trusty_test_of_match, + }, +}; + +module_platform_driver(trusty_timer_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Trusty timer driver"); diff --git a/drivers/trusty/trusty-virtio.c b/drivers/trusty/trusty-virtio.c new file mode 100644 index 000000000000..df066dda80d3 --- /dev/null +++ b/drivers/trusty/trusty-virtio.c @@ -0,0 +1,765 @@ +/* + * Trusty Virtio driver + * + * Copyright (C) 2015 Google, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define RSC_DESCR_VER 1 + +struct trusty_vdev; + +struct trusty_ctx { + struct device *dev; + void *shared_va; + size_t shared_sz; + struct work_struct check_vqs; + struct work_struct kick_vqs; + struct notifier_block call_notifier; + struct list_head vdev_list; + struct mutex mlock; /* protects vdev_list */ + struct workqueue_struct *kick_wq; + struct workqueue_struct *check_wq; +}; + +struct trusty_vring { + void *vaddr; + phys_addr_t paddr; + size_t size; + uint align; + uint elem_num; + u32 notifyid; + atomic_t needs_kick; + struct fw_rsc_vdev_vring *vr_descr; + struct virtqueue *vq; + struct trusty_vdev *tvdev; + struct trusty_nop kick_nop; +}; + +struct trusty_vdev { + struct list_head node; + struct virtio_device vdev; + struct trusty_ctx *tctx; + u32 notifyid; + uint config_len; + void *config; + struct fw_rsc_vdev *vdev_descr; + uint vring_num; + struct trusty_vring vrings[0]; +}; + +#define vdev_to_tvdev(vd) container_of((vd), struct trusty_vdev, vdev) + +static void check_all_vqs(struct work_struct *work) +{ + uint i; + struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, + check_vqs); + struct trusty_vdev *tvdev; + + list_for_each_entry(tvdev, &tctx->vdev_list, node) { + for (i = 0; i < tvdev->vring_num; i++) + vring_interrupt(0, tvdev->vrings[i].vq); + } +} + +static int trusty_call_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct trusty_ctx *tctx; + + if (action != TRUSTY_CALL_RETURNED) + return NOTIFY_DONE; + + tctx = container_of(nb, struct trusty_ctx, call_notifier); + queue_work(tctx->check_wq, &tctx->check_vqs); + + return NOTIFY_OK; +} + +static void kick_vq(struct trusty_ctx *tctx, + struct trusty_vdev *tvdev, + struct trusty_vring *tvr) +{ + int ret; + + dev_dbg(tctx->dev, "%s: vdev_id=%d: vq_id=%d\n", + __func__, tvdev->notifyid, tvr->notifyid); + + ret = trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_KICK_VQ, + tvdev->notifyid, tvr->notifyid, 0); + if (ret) { + dev_err(tctx->dev, "vq notify (%d, %d) returned %d\n", + tvdev->notifyid, tvr->notifyid, ret); + } +} + +static void kick_vqs(struct work_struct *work) +{ + uint i; + struct trusty_vdev *tvdev; + struct trusty_ctx *tctx = container_of(work, struct trusty_ctx, + kick_vqs); + mutex_lock(&tctx->mlock); + list_for_each_entry(tvdev, &tctx->vdev_list, node) { + for (i = 0; i < tvdev->vring_num; i++) { + struct trusty_vring *tvr = &tvdev->vrings[i]; + if (atomic_xchg(&tvr->needs_kick, 0)) + kick_vq(tctx, tvdev, tvr); + } + } + mutex_unlock(&tctx->mlock); +} + +static bool trusty_virtio_notify(struct virtqueue *vq) +{ + struct trusty_vring *tvr = vq->priv; + struct trusty_vdev *tvdev = tvr->tvdev; + struct trusty_ctx *tctx = tvdev->tctx; + + u32 api_ver = trusty_get_api_version(tctx->dev->parent); + + if (api_ver < TRUSTY_API_VERSION_SMP_NOP) { + atomic_set(&tvr->needs_kick, 1); + queue_work_on(0, tctx->kick_wq, &tctx->kick_vqs); + } else { + trusty_enqueue_nop(tctx->dev->parent, &tvr->kick_nop); + } + + return true; +} + +static int trusty_load_device_descr(struct trusty_ctx *tctx, + void *va, size_t sz) +{ + int ret; + + dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); + + ret = trusty_call32_mem_buf(tctx->dev->parent, + SMC_SC_VIRTIO_GET_DESCR, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret < 0) { + dev_err(tctx->dev, "%s: virtio get descr returned (%d)\n", + __func__, ret); + return -ENODEV; + } + return ret; +} + +static void trusty_virtio_stop(struct trusty_ctx *tctx, void *va, size_t sz) +{ + int ret; + + dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); + + ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_STOP, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret) { + dev_err(tctx->dev, "%s: virtio done returned (%d)\n", + __func__, ret); + return; + } +} + +static int trusty_virtio_start(struct trusty_ctx *tctx, + void *va, size_t sz) +{ + int ret; + + dev_dbg(tctx->dev, "%s: %zu bytes @ %p\n", __func__, sz, va); + + ret = trusty_call32_mem_buf(tctx->dev->parent, SMC_SC_VIRTIO_START, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret) { + dev_err(tctx->dev, "%s: virtio start returned (%d)\n", + __func__, ret); + return -ENODEV; + } + return 0; +} + +static void trusty_virtio_reset(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + struct trusty_ctx *tctx = tvdev->tctx; + + dev_dbg(&vdev->dev, "reset vdev_id=%d\n", tvdev->notifyid); + trusty_std_call32(tctx->dev->parent, SMC_SC_VDEV_RESET, + tvdev->notifyid, 0, 0); + vdev->config->set_status(vdev, 0); +} + +static u64 trusty_virtio_get_features(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + return ((u64)tvdev->vdev_descr->dfeatures) & 0x00000000FFFFFFFFULL; +} + +static int trusty_virtio_finalize_features(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + tvdev->vdev_descr->gfeatures = (u32)(vdev->features); + return 0; +} + +static void trusty_virtio_get_config(struct virtio_device *vdev, + unsigned offset, void *buf, + unsigned len) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + + dev_dbg(&vdev->dev, "%s: %d bytes @ offset %d\n", + __func__, len, offset); + + if (tvdev->config) { + if (offset + len <= tvdev->config_len) + memcpy(buf, tvdev->config + offset, len); + } +} + +static void trusty_virtio_set_config(struct virtio_device *vdev, + unsigned offset, const void *buf, + unsigned len) +{ + dev_dbg(&vdev->dev, "%s\n", __func__); +} + +static u8 trusty_virtio_get_status(struct virtio_device *vdev) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + return tvdev->vdev_descr->status; +} + +static void trusty_virtio_set_status(struct virtio_device *vdev, u8 status) +{ + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + tvdev->vdev_descr->status = status; +} + +static void _del_vqs(struct virtio_device *vdev) +{ + uint i; + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + struct trusty_vring *tvr = &tvdev->vrings[0]; + + for (i = 0; i < tvdev->vring_num; i++, tvr++) { + /* dequeue kick_nop */ + trusty_dequeue_nop(tvdev->tctx->dev->parent, &tvr->kick_nop); + + /* delete vq */ + if (tvr->vq) { + vring_del_virtqueue(tvr->vq); + tvr->vq = NULL; + } + /* delete vring */ + if (tvr->vaddr) { + free_pages_exact(tvr->vaddr, tvr->size); + tvr->vaddr = NULL; + } + } +} + +static void trusty_virtio_del_vqs(struct virtio_device *vdev) +{ + dev_dbg(&vdev->dev, "%s\n", __func__); + _del_vqs(vdev); +} + + +static struct virtqueue *_find_vq(struct virtio_device *vdev, + unsigned id, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct trusty_vring *tvr; + struct trusty_vdev *tvdev = vdev_to_tvdev(vdev); + phys_addr_t pa; + + if (!name) + return ERR_PTR(-EINVAL); + + if (id >= tvdev->vring_num) + return ERR_PTR(-EINVAL); + + tvr = &tvdev->vrings[id]; + + /* actual size of vring (in bytes) */ + tvr->size = PAGE_ALIGN(vring_size(tvr->elem_num, tvr->align)); + + /* allocate memory for the vring. */ + tvr->vaddr = alloc_pages_exact(tvr->size, GFP_KERNEL | __GFP_ZERO); + if (!tvr->vaddr) { + dev_err(&vdev->dev, "vring alloc failed\n"); + return ERR_PTR(-ENOMEM); + } + + pa = virt_to_phys(tvr->vaddr); + /* save vring address to shared structure */ + tvr->vr_descr->da = (u32)pa; + /* da field is only 32 bit wide. Use previously unused 'reserved' field + * to store top 32 bits of 64-bit address + */ + tvr->vr_descr->pa = (u32)HIULINT(pa); + + dev_info(&vdev->dev, "vring%d: va(pa) %p(%llx) qsz %d notifyid %d\n", + id, tvr->vaddr, (u64)tvr->paddr, tvr->elem_num, tvr->notifyid); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) + tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, + vdev, true, true, tvr->vaddr, + trusty_virtio_notify, callback, name); +#else + tvr->vq = vring_new_virtqueue(id, tvr->elem_num, tvr->align, + vdev, true, tvr->vaddr, + trusty_virtio_notify, callback, name); +#endif + if (!tvr->vq) { + dev_err(&vdev->dev, "vring_new_virtqueue %s failed\n", + name); + goto err_new_virtqueue; + } + + tvr->vq->priv = tvr; + + return tvr->vq; + +err_new_virtqueue: + free_pages_exact(tvr->vaddr, tvr->size); + tvr->vaddr = NULL; + return ERR_PTR(-ENOMEM); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) +static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx, + struct irq_affinity *desc) +#else +static int trusty_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[]) +#endif +{ + uint i; + int ret; + + for (i = 0; i < nvqs; i++) { + vqs[i] = _find_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) { + ret = PTR_ERR(vqs[i]); + _del_vqs(vdev); + return ret; + } + } + return 0; +} + +static const char *trusty_virtio_bus_name(struct virtio_device *vdev) +{ + return "trusty-virtio"; +} + +/* The ops structure which hooks everything together. */ +static const struct virtio_config_ops trusty_virtio_config_ops = { + .get_features = trusty_virtio_get_features, + .finalize_features = trusty_virtio_finalize_features, + .get = trusty_virtio_get_config, + .set = trusty_virtio_set_config, + .get_status = trusty_virtio_get_status, + .set_status = trusty_virtio_set_status, + .reset = trusty_virtio_reset, + .find_vqs = trusty_virtio_find_vqs, + .del_vqs = trusty_virtio_del_vqs, + .bus_name = trusty_virtio_bus_name, +}; + +void virtio_vdev_release(struct device *dev) +{ + dev_dbg(dev, "%s() is called\n", __func__); + return; +} + +static int trusty_virtio_add_device(struct trusty_ctx *tctx, + struct fw_rsc_vdev *vdev_descr, + struct fw_rsc_vdev_vring *vr_descr, + void *config) +{ + int i, ret; + struct trusty_vdev *tvdev; + + tvdev = kzalloc(sizeof(struct trusty_vdev) + + vdev_descr->num_of_vrings * sizeof(struct trusty_vring), + GFP_KERNEL); + if (!tvdev) { + dev_err(tctx->dev, "Failed to allocate VDEV\n"); + return -ENOMEM; + } + + /* setup vdev */ + tvdev->tctx = tctx; + tvdev->vdev.dev.parent = tctx->dev; + tvdev->vdev.dev.release = virtio_vdev_release; + tvdev->vdev.id.device = vdev_descr->id; + tvdev->vdev.config = &trusty_virtio_config_ops; + tvdev->vdev_descr = vdev_descr; + tvdev->notifyid = vdev_descr->notifyid; + + /* setup config */ + tvdev->config = config; + tvdev->config_len = vdev_descr->config_len; + + /* setup vrings and vdev resource */ + tvdev->vring_num = vdev_descr->num_of_vrings; + + for (i = 0; i < tvdev->vring_num; i++, vr_descr++) { + struct trusty_vring *tvr = &tvdev->vrings[i]; + tvr->tvdev = tvdev; + tvr->vr_descr = vr_descr; + tvr->align = vr_descr->align; + tvr->elem_num = vr_descr->num; + tvr->notifyid = vr_descr->notifyid; + trusty_nop_init(&tvr->kick_nop, SMC_NC_VDEV_KICK_VQ, + tvdev->notifyid, tvr->notifyid); + } + + /* register device */ + ret = register_virtio_device(&tvdev->vdev); + if (ret) { + dev_err(tctx->dev, + "Failed (%d) to register device dev type %u\n", + ret, vdev_descr->id); + goto err_register; + } + + /* add it to tracking list */ + list_add_tail(&tvdev->node, &tctx->vdev_list); + + return 0; + +err_register: + kfree(tvdev); + return ret; +} + +static int trusty_parse_device_descr(struct trusty_ctx *tctx, + void *descr_va, size_t descr_sz) +{ + u32 i; + struct resource_table *descr = descr_va; + + if (descr_sz < sizeof(*descr)) { + dev_err(tctx->dev, "descr table is too small (0x%x)\n", + (int)descr_sz); + return -ENODEV; + } + + if (descr->ver != RSC_DESCR_VER) { + dev_err(tctx->dev, "unexpected descr ver (0x%x)\n", + (int)descr->ver); + return -ENODEV; + } + + if (descr_sz < (sizeof(*descr) + descr->num * sizeof(u32))) { + dev_err(tctx->dev, "descr table is too small (0x%x)\n", + (int)descr->ver); + return -ENODEV; + } + + for (i = 0; i < descr->num; i++) { + struct fw_rsc_hdr *hdr; + struct fw_rsc_vdev *vd; + struct fw_rsc_vdev_vring *vr; + void *cfg; + size_t vd_sz; + + u32 offset = descr->offset[i]; + + if (offset >= descr_sz) { + dev_err(tctx->dev, "offset is out of bounds (%u)\n", + (uint)offset); + return -ENODEV; + } + + /* check space for rsc header */ + if ((descr_sz - offset) < sizeof(struct fw_rsc_hdr)) { + dev_err(tctx->dev, "no space for rsc header (%u)\n", + (uint)offset); + return -ENODEV; + } + hdr = (struct fw_rsc_hdr *)((u8 *)descr + offset); + offset += sizeof(struct fw_rsc_hdr); + + /* check type */ + if (hdr->type != RSC_VDEV) { + dev_err(tctx->dev, "unsupported rsc type (%u)\n", + (uint)hdr->type); + continue; + } + + /* got vdev: check space for vdev */ + if ((descr_sz - offset) < sizeof(struct fw_rsc_vdev)) { + dev_err(tctx->dev, "no space for vdev descr (%u)\n", + (uint)offset); + return -ENODEV; + } + vd = (struct fw_rsc_vdev *)((u8 *)descr + offset); + + /* check space for vrings and config area */ + vd_sz = sizeof(struct fw_rsc_vdev) + + vd->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) + + vd->config_len; + + if ((descr_sz - offset) < vd_sz) { + dev_err(tctx->dev, "no space for vdev (%u)\n", + (uint)offset); + return -ENODEV; + } + vr = (struct fw_rsc_vdev_vring *)vd->vring; + cfg = (void *)(vr + vd->num_of_vrings); + + trusty_virtio_add_device(tctx, vd, vr, cfg); + } + + return 0; +} + +static void _remove_devices_locked(struct trusty_ctx *tctx) +{ + struct trusty_vdev *tvdev, *next; + + list_for_each_entry_safe(tvdev, next, &tctx->vdev_list, node) { + list_del(&tvdev->node); + unregister_virtio_device(&tvdev->vdev); + kfree(tvdev); + } +} + +static void trusty_virtio_remove_devices(struct trusty_ctx *tctx) +{ + mutex_lock(&tctx->mlock); + _remove_devices_locked(tctx); + mutex_unlock(&tctx->mlock); +} + +static int trusty_virtio_add_devices(struct trusty_ctx *tctx) +{ + int ret; + void *descr_va; + size_t descr_sz; + size_t descr_buf_sz; + + /* allocate buffer to load device descriptor into */ + descr_buf_sz = PAGE_SIZE; + descr_va = alloc_pages_exact(descr_buf_sz, GFP_KERNEL | __GFP_ZERO); + if (!descr_va) { + dev_err(tctx->dev, "Failed to allocate shared area\n"); + return -ENOMEM; + } + + /* load device descriptors */ + ret = trusty_load_device_descr(tctx, descr_va, descr_buf_sz); + if (ret < 0) { + dev_err(tctx->dev, "failed (%d) to load device descr\n", ret); + goto err_load_descr; + } + + descr_sz = (size_t)ret; + + mutex_lock(&tctx->mlock); + + /* parse device descriptor and add virtio devices */ + ret = trusty_parse_device_descr(tctx, descr_va, descr_sz); + if (ret) { + dev_err(tctx->dev, "failed (%d) to parse device descr\n", ret); + goto err_parse_descr; + } + + /* register call notifier */ + ret = trusty_call_notifier_register(tctx->dev->parent, + &tctx->call_notifier); + if (ret) { + dev_err(tctx->dev, "%s: failed (%d) to register notifier\n", + __func__, ret); + goto err_register_notifier; + } + + /* start virtio */ + ret = trusty_virtio_start(tctx, descr_va, descr_sz); + if (ret) { + dev_err(tctx->dev, "failed (%d) to start virtio\n", ret); + goto err_start_virtio; + } + + /* attach shared area */ + tctx->shared_va = descr_va; + tctx->shared_sz = descr_buf_sz; + + mutex_unlock(&tctx->mlock); + + return 0; + +err_start_virtio: + trusty_call_notifier_unregister(tctx->dev->parent, + &tctx->call_notifier); + cancel_work_sync(&tctx->check_vqs); +err_register_notifier: +err_parse_descr: + _remove_devices_locked(tctx); + mutex_unlock(&tctx->mlock); + cancel_work_sync(&tctx->kick_vqs); + trusty_virtio_stop(tctx, descr_va, descr_sz); +err_load_descr: + free_pages_exact(descr_va, descr_buf_sz); + return ret; +} + +static int trusty_virtio_probe(struct platform_device *pdev) +{ + int ret; + struct trusty_ctx *tctx; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_info(&pdev->dev, "initializing\n"); + + tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); + if (!tctx) { + dev_err(&pdev->dev, "Failed to allocate context\n"); + return -ENOMEM; + } + + tctx->dev = &pdev->dev; + tctx->call_notifier.notifier_call = trusty_call_notify; + mutex_init(&tctx->mlock); + INIT_LIST_HEAD(&tctx->vdev_list); + INIT_WORK(&tctx->check_vqs, check_all_vqs); + INIT_WORK(&tctx->kick_vqs, kick_vqs); + platform_set_drvdata(pdev, tctx); + + tctx->check_wq = alloc_workqueue("trusty-check-wq", WQ_UNBOUND, 0); + if (!tctx->check_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-check-wq\n"); + goto err_create_check_wq; + } + + tctx->kick_wq = alloc_workqueue("trusty-kick-wq", + WQ_CPU_INTENSIVE, 0); + if (!tctx->kick_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-kick-wq\n"); + goto err_create_kick_wq; + } + + ret = trusty_virtio_add_devices(tctx); + if (ret) { + dev_err(&pdev->dev, "Failed to add virtio devices\n"); + goto err_add_devices; + } + + dev_info(&pdev->dev, "initializing done\n"); + return 0; + +err_add_devices: + destroy_workqueue(tctx->kick_wq); +err_create_kick_wq: + destroy_workqueue(tctx->check_wq); +err_create_check_wq: + kfree(tctx); + return ret; +} + +static int trusty_virtio_remove(struct platform_device *pdev) +{ + struct trusty_ctx *tctx = platform_get_drvdata(pdev); + + dev_err(&pdev->dev, "removing\n"); + + /* unregister call notifier and wait until workqueue is done */ + trusty_call_notifier_unregister(tctx->dev->parent, + &tctx->call_notifier); + cancel_work_sync(&tctx->check_vqs); + + /* remove virtio devices */ + trusty_virtio_remove_devices(tctx); + cancel_work_sync(&tctx->kick_vqs); + + /* destroy workqueues */ + destroy_workqueue(tctx->kick_wq); + destroy_workqueue(tctx->check_wq); + + /* notify remote that shared area goes away */ + trusty_virtio_stop(tctx, tctx->shared_va, tctx->shared_sz); + + /* free shared area */ + free_pages_exact(tctx->shared_va, tctx->shared_sz); + + /* free context */ + kfree(tctx); + return 0; +} + +static const struct of_device_id trusty_of_match[] = { + { + .compatible = "android,trusty-virtio-v1", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, trusty_of_match); + +static struct platform_driver trusty_virtio_driver = { + .probe = trusty_virtio_probe, + .remove = trusty_virtio_remove, + .driver = { + .name = "trusty-virtio", + .owner = THIS_MODULE, + .of_match_table = trusty_of_match, + }, +}; + +module_platform_driver(trusty_virtio_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Trusty virtio driver"); diff --git a/drivers/trusty/trusty-wall.c b/drivers/trusty/trusty-wall.c new file mode 100644 index 000000000000..2345f56a6405 --- /dev/null +++ b/drivers/trusty/trusty-wall.c @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include + + +void *trusty_wall_base(struct device *dev) +{ + struct trusty_wall_dev_state *s; + + s = platform_get_drvdata(to_platform_device(dev)); + + if (NULL == s) + return NULL; + + return s->va; +} +EXPORT_SYMBOL(trusty_wall_base); + +void *trusty_wall_per_cpu_item_ptr(struct device *dev, unsigned int cpu, + u32 item_id, size_t exp_sz) +{ + uint i; + struct sm_wall_toc *toc; + struct sm_wall_toc_item *item; + struct trusty_wall_dev_state *s; + + s = platform_get_drvdata(to_platform_device(dev)); + + if (!s->va) { + dev_dbg(s->dev, "No smwall buffer is set\n"); + return NULL; + } + + toc = (struct sm_wall_toc *)s->va; + if (toc->version != SM_WALL_TOC_VER) { + dev_err(s->dev, "Unexpected toc version: %d\n", toc->version); + return NULL; + } + + if (cpu >= toc->cpu_num) { + dev_err(s->dev, "Unsupported cpu (%d) requested\n", cpu); + return NULL; + } + + item = (struct sm_wall_toc_item *)((uintptr_t)toc + + toc->per_cpu_toc_offset); + for (i = 0; i < toc->per_cpu_num_items; i++, item++) { + if (item->id != item_id) + continue; + + if (item->size != exp_sz) { + dev_err(s->dev, + "Size mismatch (%zd vs. %zd) for item_id %d\n", + (size_t)item->size, exp_sz, item_id); + return NULL; + } + + return s->va + toc->per_cpu_base_offset + + cpu * toc->per_cpu_region_size + item->offset; + } + return NULL; +} +EXPORT_SYMBOL(trusty_wall_per_cpu_item_ptr); + +static int trusty_wall_setup(struct trusty_wall_dev_state *s) +{ + int ret; + void *va; + size_t sz; + + /* check if wall feature is supported by Trusted OS */ + ret = trusty_fast_call32(s->trusty_dev, SMC_FC_GET_WALL_SIZE, 0, 0, 0); + if (ret == SM_ERR_UNDEFINED_SMC || ret == SM_ERR_NOT_SUPPORTED) { + /* wall is not supported */ + dev_notice(s->dev, "smwall: is not supported by Trusted OS\n"); + return 0; + } else if (ret < 0) { + dev_err(s->dev, "smwall: failed (%d) to query buffer size\n", + ret); + return ret; + } else if (ret == 0) { + dev_notice(s->dev, "smwall: zero-sized buffer requested\n"); + return 0; + } + sz = (size_t)ret; + + /* allocate memory for shared buffer */ + va = alloc_pages_exact(sz, GFP_KERNEL | __GFP_ZERO); + if (!va) { + dev_err(s->dev, "smwall: failed to allocate buffer\n"); + return -ENOMEM; + } + + /* call into Trusted OS to setup wall */ + ret = trusty_call32_mem_buf(s->trusty_dev, SMC_SC_SETUP_WALL, + virt_to_page(va), sz, PAGE_KERNEL); + if (ret < 0) { + dev_err(s->dev, "smwall: TEE returned (%d)\n", ret); + free_pages_exact(va, sz); + return -ENODEV; + } + + dev_info(s->dev, "smwall: initialized %zu bytes\n", sz); + + s->va = va; + s->sz = sz; + + return 0; +} + +static void trusty_wall_destroy(struct trusty_wall_dev_state *s) +{ + int ret; + + ret = trusty_std_call32(s->trusty_dev, SMC_SC_DESTROY_WALL, 0, 0, 0); + if (ret) { + /** + * It should never happen, but if it happens, it is + * unsafe to free buffer so we have to leak memory + */ + dev_err(s->dev, "Failed (%d) to destroy the wall buffer\n", + ret); + } else { + free_pages_exact(s->va, s->sz); + } +} + +static int trusty_wall_probe(struct platform_device *pdev) +{ + int ret; + struct trusty_wall_dev_state *s; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "%s\n", __func__); + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->dev = &pdev->dev; + s->trusty_dev = s->dev->parent; + platform_set_drvdata(pdev, s); + + ret = trusty_wall_setup(s); + if (ret < 0) { + dev_warn(s->dev, "Failed (%d) to setup the wall\n", ret); + kfree(s); + return ret; + } + + return 0; +} + +static int trusty_wall_remove(struct platform_device *pdev) +{ + struct trusty_wall_dev_state *s = platform_get_drvdata(pdev); + + trusty_wall_destroy(s); + + return 0; +} + +static const struct of_device_id trusty_wall_of_match[] = { + { .compatible = "android, trusty-wall-v1", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, trusty_wall_of_match); + +static struct platform_driver trusty_wall_driver = { + .probe = trusty_wall_probe, + .remove = trusty_wall_remove, + .driver = { + .name = "trusty-wall", + .owner = THIS_MODULE, + .of_match_table = trusty_wall_of_match, + }, +}; + +module_platform_driver(trusty_wall_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Trusty smwall driver"); diff --git a/drivers/trusty/trusty.c b/drivers/trusty/trusty.c new file mode 100755 index 000000000000..881924f88e4f --- /dev/null +++ b/drivers/trusty/trusty.c @@ -0,0 +1,824 @@ +/* + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define EVMM_SMC_HC_ID 0x74727500 +#define ACRN_HC_SWITCH_WORLD 0x80000071 +#define ACRN_HC_SAVE_SWORLD_CONTEXT 0x80000072 + +struct trusty_state; + +struct trusty_work { + struct trusty_state *ts; + struct work_struct work; +}; + +struct trusty_state { + struct mutex smc_lock; + struct atomic_notifier_head notifier; + struct completion cpu_idle_completion; + char *version_str; + u32 api_version; + struct device *dev; + struct workqueue_struct *nop_wq; + struct trusty_work __percpu *nop_works; + struct list_head nop_queue; + spinlock_t nop_lock; /* protects nop_queue */ +}; + +struct trusty_smc_interface { + struct device *dev; + ulong args[5]; +}; + +static ulong (*smc)(ulong, ulong, ulong, ulong); + +static inline ulong smc_evmm(ulong r0, ulong r1, ulong r2, ulong r3) +{ + register unsigned long smc_id asm("rax") = EVMM_SMC_HC_ID; + __asm__ __volatile__( + "vmcall; \n" + : "=D"(r0) + : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) + ); + + return r0; +} + +static inline ulong smc_acrn(ulong r0, ulong r1, ulong r2, ulong r3) +{ + register unsigned long smc_id asm("r8") = ACRN_HC_SWITCH_WORLD; + register signed long ret asm("rax"); + __asm__ __volatile__( + "vmcall; \n" + : "=D"(r0), "=r"(ret) + : "r"(smc_id), "D"(r0), "S"(r1), "d"(r2), "b"(r3) + ); + + if(ret < 0) { + pr_err("trusty: %s: hypercall failed: %ld\n", __func__, ret); + r0 = (ulong)SM_ERR_NOT_SUPPORTED; + } + + return r0; +} + +static void acrn_save_sworld_context(void *arg) +{ + long *save_ret = arg; + register signed long result asm("rax"); + register unsigned long hc_id asm("r8") = ACRN_HC_SAVE_SWORLD_CONTEXT; + __asm__ __volatile__( + "vmcall; \n" + : "=r"(result) + : "r"(hc_id) + ); + + *save_ret = result; +} + +static void trusty_fast_call32_remote(void *args) +{ + struct trusty_smc_interface *p_args = args; + struct device *dev = p_args->dev; + ulong smcnr = p_args->args[0]; + ulong a0 = p_args->args[1]; + ulong a1 = p_args->args[2]; + ulong a2 = p_args->args[3]; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + BUG_ON(!s); + BUG_ON(!SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); + + p_args->args[4] = smc(smcnr, a0, a1, a2); +} + +s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) +{ + int cpu = 0; + int ret = 0; + struct trusty_smc_interface s; + s.dev = dev; + s.args[0] = smcnr; + s.args[1] = a0; + s.args[2] = a1; + s.args[3] = a2; + s.args[4] = 0; + + ret = smp_call_function_single(cpu, trusty_fast_call32_remote, (void *)&s, 1); + + if (ret) { + pr_err("%s: smp_call_function_single failed: %d\n", __func__, ret); + } + + return s.args[4]; +} +EXPORT_SYMBOL(trusty_fast_call32); + +#ifdef CONFIG_64BIT +s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + BUG_ON(!s); + BUG_ON(!SMC_IS_FASTCALL(smcnr)); + BUG_ON(!SMC_IS_SMC64(smcnr)); + + return smc(smcnr, a0, a1, a2); +} +#endif + +static ulong trusty_std_call_inner(struct device *dev, ulong smcnr, + ulong a0, ulong a1, ulong a2) +{ + ulong ret; + int retry = 5; + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n", + __func__, smcnr, a0, a1, a2); + while (true) { + ret = smc(smcnr, a0, a1, a2); + while ((s32)ret == SM_ERR_FIQ_INTERRUPTED) + ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0); + if ((int)ret != SM_ERR_BUSY || !retry) + break; + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n", + __func__, smcnr, a0, a1, a2); + retry--; + } + + return ret; +} + +static ulong trusty_std_call_helper(struct device *dev, ulong smcnr, + ulong a0, ulong a1, ulong a2) +{ + ulong ret; + int sleep_time = 1; + unsigned long flags; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + while (true) { + local_irq_save(flags); + atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE, + NULL); + ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2); + atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED, + NULL); + local_irq_restore(flags); + + if ((int)ret != SM_ERR_BUSY) + break; + + if (sleep_time == 256) + dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n", + __func__, smcnr, a0, a1, a2); + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n", + __func__, smcnr, a0, a1, a2, sleep_time); + + msleep(sleep_time); + if (sleep_time < 1000) + sleep_time <<= 1; + + dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n", + __func__, smcnr, a0, a1, a2); + } + + if (sleep_time > 256) + dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n", + __func__, smcnr, a0, a1, a2); + + return ret; +} + +static void trusty_std_call_cpu_idle(struct trusty_state *s) +{ + int ret; + + ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10); + if (!ret) { + pr_warn("%s: timed out waiting for cpu idle to clear, retry anyway\n", + __func__); + } +} + + +struct trusty_std_call32_args { + struct device *dev; + u32 smcnr; + u32 a0; + u32 a1; + u32 a2; +}; + +static long trusty_std_call32_work(void *args) +{ + int ret; + struct device *dev; + u32 smcnr, a0, a1, a2; + struct trusty_state *s; + struct trusty_std_call32_args *work_args; + + BUG_ON(!args); + + work_args = (struct trusty_std_call32_args *)args; + dev = work_args->dev; + s = platform_get_drvdata(to_platform_device(dev)); + + smcnr = work_args->smcnr; + a0 = work_args->a0; + a1 = work_args->a1; + a2 = work_args->a2; + + BUG_ON(SMC_IS_FASTCALL(smcnr)); + BUG_ON(SMC_IS_SMC64(smcnr)); + + if (smcnr != SMC_SC_NOP) { + mutex_lock(&s->smc_lock); + reinit_completion(&s->cpu_idle_completion); + } + + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n", + __func__, smcnr, a0, a1, a2); + + ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2); + while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) { + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n", + __func__, smcnr, a0, a1, a2); + if (ret == SM_ERR_CPU_IDLE) + trusty_std_call_cpu_idle(s); + ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0); + } + dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n", + __func__, smcnr, a0, a1, a2, ret); + + WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed"); + + if (smcnr == SMC_SC_NOP) + complete(&s->cpu_idle_completion); + else + mutex_unlock(&s->smc_lock); + + return ret; +} + +s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2) +{ + struct trusty_std_call32_args args = { + .dev = dev, + .smcnr = smcnr, + .a0 = a0, + .a1 = a1, + .a2 = a2, + }; + + /* bind cpu 0 for now since trusty OS is running on physical cpu #0*/ + if((smcnr == SMC_SC_VDEV_KICK_VQ) || (smcnr == SMC_SC_LK_TIMER) + || (smcnr == SMC_SC_LOCKED_NOP) || (smcnr == SMC_SC_NOP)) + return trusty_std_call32_work((void *) &args); + else + return work_on_cpu(0, trusty_std_call32_work, (void *) &args); +} + +EXPORT_SYMBOL(trusty_std_call32); + +int trusty_call_notifier_register(struct device *dev, struct notifier_block *n) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return atomic_notifier_chain_register(&s->notifier, n); +} +EXPORT_SYMBOL(trusty_call_notifier_register); + +int trusty_call_notifier_unregister(struct device *dev, + struct notifier_block *n) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return atomic_notifier_chain_unregister(&s->notifier, n); +} +EXPORT_SYMBOL(trusty_call_notifier_unregister); + +static int trusty_remove_child(struct device *dev, void *data) +{ + dev_dbg(dev, "%s() is called()\n", __func__); + platform_device_unregister(to_platform_device(dev)); + return 0; +} + +ssize_t trusty_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str); +} + +DEVICE_ATTR(trusty_version, S_IRUSR, trusty_version_show, NULL); + +const char *trusty_version_str_get(struct device *dev) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return s->version_str; +} +EXPORT_SYMBOL(trusty_version_str_get); + +static void trusty_init_version(struct trusty_state *s, struct device *dev) +{ + int ret; + int i; + int version_str_len; + + ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0); + if (ret <= 0) + goto err_get_size; + + version_str_len = ret; + + s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL); + if (!s->version_str) + goto err_get_size; + for (i = 0; i < version_str_len; i++) { + ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0); + if (ret < 0) + goto err_get_char; + s->version_str[i] = ret; + } + s->version_str[i] = '\0'; + + dev_info(dev, "trusty version: %s\n", s->version_str); + + ret = device_create_file(dev, &dev_attr_trusty_version); + if (ret) + goto err_create_file; + return; + +err_create_file: +err_get_char: + kfree(s->version_str); + s->version_str = NULL; +err_get_size: + dev_err(dev, "failed to get version: %d\n", ret); +} + +u32 trusty_get_api_version(struct device *dev) +{ + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + return s->api_version; +} +EXPORT_SYMBOL(trusty_get_api_version); + +static int trusty_init_api_version(struct trusty_state *s, struct device *dev) +{ + u32 api_version; + api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION, + TRUSTY_API_VERSION_CURRENT, 0, 0); + if (api_version == SM_ERR_UNDEFINED_SMC) + api_version = 0; + + if (api_version > TRUSTY_API_VERSION_CURRENT) { + dev_err(dev, "unsupported api version %u > %u\n", + api_version, TRUSTY_API_VERSION_CURRENT); + return -EINVAL; + } + + dev_info(dev, "selected api version: %u (requested %u)\n", + api_version, TRUSTY_API_VERSION_CURRENT); + s->api_version = api_version; + + return 0; +} + +static bool dequeue_nop(struct trusty_state *s, u32 *args) +{ + unsigned long flags; + struct trusty_nop *nop = NULL; + + spin_lock_irqsave(&s->nop_lock, flags); + if (!list_empty(&s->nop_queue)) { + nop = list_first_entry(&s->nop_queue, + struct trusty_nop, node); + list_del_init(&nop->node); + args[0] = nop->args[0]; + args[1] = nop->args[1]; + args[2] = nop->args[2]; + } else { + args[0] = 0; + args[1] = 0; + args[2] = 0; + } + spin_unlock_irqrestore(&s->nop_lock, flags); + return nop; +} + +static void locked_nop_work_func(struct work_struct *work) +{ + int ret; + struct trusty_work *tw = container_of(work, struct trusty_work, work); + struct trusty_state *s = tw->ts; + + dev_dbg(s->dev, "%s\n", __func__); + + ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0); + if (ret != 0) + dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d", + __func__, ret); + dev_dbg(s->dev, "%s: done\n", __func__); +} + +static void nop_work_func(struct work_struct *work) +{ + int ret; + bool next; + u32 args[3]; + struct trusty_work *tw = container_of(work, struct trusty_work, work); + struct trusty_state *s = tw->ts; + + dev_dbg(s->dev, "%s:\n", __func__); + + dequeue_nop(s, args); + do { + dev_dbg(s->dev, "%s: %x %x %x\n", + __func__, args[0], args[1], args[2]); + + ret = trusty_std_call32(s->dev, SMC_SC_NOP, + args[0], args[1], args[2]); + + next = dequeue_nop(s, args); + + if (ret == SM_ERR_NOP_INTERRUPTED) + next = true; + else if (ret != SM_ERR_NOP_DONE) + dev_err(s->dev, "%s: SMC_SC_NOP failed %d", + __func__, ret); + } while (next); + + dev_dbg(s->dev, "%s: done\n", __func__); +} + +static void trusty_init_smc(int vmm_id) +{ + if (vmm_id == VMM_ID_EVMM) { + smc = smc_evmm; + } else if (vmm_id == VMM_ID_ACRN) { + smc = smc_acrn; + } else { + pr_err("%s: No smc supports VMM[%d](sig:%s)!", + __func__, vmm_id, vmm_signature[vmm_id]); + BUG(); + } +} + +void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop) +{ + unsigned long flags; + struct trusty_work *tw; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + preempt_disable(); + tw = this_cpu_ptr(s->nop_works); + if (nop) { + WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP); + + spin_lock_irqsave(&s->nop_lock, flags); + if (list_empty(&nop->node)) + list_add_tail(&nop->node, &s->nop_queue); + spin_unlock_irqrestore(&s->nop_lock, flags); + } + queue_work_on(0, s->nop_wq, &tw->work); + preempt_enable(); +} +EXPORT_SYMBOL(trusty_enqueue_nop); + +void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop) +{ + unsigned long flags; + struct trusty_state *s = platform_get_drvdata(to_platform_device(dev)); + + if (WARN_ON(!nop)) + return; + + spin_lock_irqsave(&s->nop_lock, flags); + if (!list_empty(&nop->node)) + list_del_init(&nop->node); + spin_unlock_irqrestore(&s->nop_lock, flags); +} +EXPORT_SYMBOL(trusty_dequeue_nop); + +static int trusty_probe(struct platform_device *pdev) +{ + int ret; + unsigned int cpu; + work_func_t work_func; + struct trusty_state *s; + struct device_node *node = pdev->dev.of_node; + + ret = trusty_detect_vmm(); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot detect VMM which supports trusty!"); + return -EINVAL; + } + dev_dbg(&pdev->dev, "Detected VMM: sig=%s\n", vmm_signature[ret]); + + trusty_init_smc(ret); + + if (!node) { + dev_err(&pdev->dev, "of_node required\n"); + return -EINVAL; + } + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) { + ret = -ENOMEM; + goto err_allocate_state; + } + + s->dev = &pdev->dev; + spin_lock_init(&s->nop_lock); + INIT_LIST_HEAD(&s->nop_queue); + + mutex_init(&s->smc_lock); + ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier); + init_completion(&s->cpu_idle_completion); + platform_set_drvdata(pdev, s); + + trusty_init_version(s, &pdev->dev); + + ret = trusty_init_api_version(s, &pdev->dev); + if (ret < 0) + goto err_api_version; + + s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0); + if (!s->nop_wq) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed create trusty-nop-wq\n"); + goto err_create_nop_wq; + } + + s->nop_works = alloc_percpu(struct trusty_work); + if (!s->nop_works) { + ret = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate works\n"); + goto err_alloc_works; + } + + if (s->api_version < TRUSTY_API_VERSION_SMP) + work_func = locked_nop_work_func; + else + work_func = nop_work_func; + + for_each_possible_cpu(cpu) { + struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); + + tw->ts = s; + INIT_WORK(&tw->work, work_func); + } + + return 0; + +err_alloc_works: + for_each_possible_cpu(cpu) { + struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); + + flush_work(&tw->work); + } + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); +err_create_nop_wq: +err_api_version: + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); + } + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + mutex_destroy(&s->smc_lock); + kfree(s); +err_allocate_state: + return ret; +} + +static int trusty_remove(struct platform_device *pdev) +{ + unsigned int cpu; + struct trusty_state *s = platform_get_drvdata(pdev); + + dev_dbg(&(pdev->dev), "%s() is called\n", __func__); + + device_for_each_child(&pdev->dev, NULL, trusty_remove_child); + + for_each_possible_cpu(cpu) { + struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu); + + flush_work(&tw->work); + } + free_percpu(s->nop_works); + destroy_workqueue(s->nop_wq); + + mutex_destroy(&s->smc_lock); + if (s->version_str) { + device_remove_file(&pdev->dev, &dev_attr_trusty_version); + kfree(s->version_str); + } + kfree(s); + return 0; +} + +static int trusty_suspend(struct platform_device *pdev, pm_message_t state) +{ + long ret = 0, save_ret = 0; + int cpu = 0; + + dev_info(&pdev->dev, "%s() is called\n", __func__); + + ret = smp_call_function_single(cpu, acrn_save_sworld_context, (void *)&save_ret, 1); + if (ret) { + pr_err("%s: smp_call_function_single failed: %ld\n", __func__, ret); + } + if(save_ret < 0) { + dev_err(&pdev->dev, "%s(): failed to save world context!\n", __func__); + return -EPERM; + } + + return 0; +} + +static const struct of_device_id trusty_of_match[] = { + { .compatible = "android,trusty-smc-v1", }, + {}, +}; + +static struct platform_driver trusty_driver = { + .probe = trusty_probe, + .remove = trusty_remove, + .driver = { + .name = "trusty", + .owner = THIS_MODULE, + .of_match_table = trusty_of_match, + }, +}; + +void trusty_dev_release(struct device *dev) +{ + dev_dbg(dev, "%s() is called()\n", __func__); + return; +} + +static struct device_node trusty_timer_node = { + .name = "trusty-timer", + .sibling = NULL, +}; + +static struct device_node trusty_wall_node = { + .name = "trusty-wall", + .sibling = NULL, +}; + +static struct device_node trusty_irq_node = { + .name = "trusty-irq", + .sibling = NULL, +}; + +static struct device_node trusty_virtio_node = { + .name = "trusty-virtio", + .sibling = &trusty_irq_node, +}; + +static struct device_node trusty_log_node = { + .name = "trusty-log", + .sibling = &trusty_virtio_node, +}; + + +static struct device_node trusty_node = { + .name = "trusty", + .child = &trusty_log_node, +}; + +static struct platform_device trusty_platform_dev = { + .name = "trusty", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .of_node = &trusty_node, + }, +}; +static struct platform_device trusty_platform_dev_log = { + .name = "trusty-log", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_log_node, + }, +}; + +static struct platform_device trusty_platform_dev_virtio = { + .name = "trusty-virtio", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_virtio_node, + }, +}; + +static struct platform_device trusty_platform_dev_irq = { + .name = "trusty-irq", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_irq_node, + }, +}; + +static struct platform_device trusty_platform_dev_wall = { + .name = "trusty-wall", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev.dev, + .of_node = &trusty_wall_node, + }, +}; + +static struct platform_device trusty_platform_dev_timer = { + .name = "trusty-timer", + .id = -1, + .num_resources = 0, + .dev = { + .release = trusty_dev_release, + .parent = &trusty_platform_dev_wall.dev, + .of_node = &trusty_timer_node, + }, +}; + +static struct platform_device *trusty_devices[] __initdata = { + &trusty_platform_dev, + &trusty_platform_dev_log, + &trusty_platform_dev_virtio, + &trusty_platform_dev_irq, + &trusty_platform_dev_wall, + &trusty_platform_dev_timer +}; +static int __init trusty_driver_init(void) +{ + int ret = 0; + + ret = platform_add_devices(trusty_devices, ARRAY_SIZE(trusty_devices)); + if (ret) { + printk(KERN_ERR "%s(): platform_add_devices() failed, ret %d\n", __func__, ret); + return ret; + } + + if(trusty_detect_vmm() == VMM_ID_ACRN) { + trusty_driver.suspend = trusty_suspend; + } + + return platform_driver_register(&trusty_driver); +} + +static void __exit trusty_driver_exit(void) +{ + platform_driver_unregister(&trusty_driver); + platform_device_unregister(&trusty_platform_dev); +} + +subsys_initcall(trusty_driver_init); +module_exit(trusty_driver_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index cc2b4d9433ed..0efe3f566477 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -394,10 +394,14 @@ config GOLDFISH_TTY depends on GOLDFISH select SERIAL_CORE select SERIAL_CORE_CONSOLE - select SERIAL_EARLYCON help Console and system TTY driver for the Goldfish virtual platform. +config GOLDFISH_TTY_EARLY_CONSOLE + bool + default y if GOLDFISH_TTY=y + select SERIAL_EARLYCON + config DA_TTY bool "DA TTY" depends on METAG_DA @@ -463,4 +467,7 @@ config VCC depends on SUN_LDOMS help Support for Sun logical domain consoles. + +source "drivers/tty/cbc/Kconfig" + endif # TTY diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile index 8ce3a8661b31..68cb4581428b 100644 --- a/drivers/tty/Makefile +++ b/drivers/tty/Makefile @@ -35,5 +35,6 @@ obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o obj-$(CONFIG_DA_TTY) += metag_da.o obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o obj-$(CONFIG_VCC) += vcc.o +obj-$(CONFIG_CBC_LDISC) += cbc/ obj-y += ipwireless/ diff --git a/drivers/tty/cbc/Kconfig b/drivers/tty/cbc/Kconfig new file mode 100644 index 000000000000..c9cde7fa43ca --- /dev/null +++ b/drivers/tty/cbc/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# CBC (CarrierBoardCommunication) subsystem configuration +# + +config CBC_LDISC + tristate "CBC (Carrier Board Communication) line discipline" + depends on TTY + help + The CBC driver implements a line discipline supporting + the proprietary CBC (Carrier Board Communication) protocol. + + The CBC protocol is a serial line protocol with multiplexing + intended to be used in automotive IVI platforms for multi- + plexed communication between a vehicle IOC and CPU. It is + designed to transport small data packets (up to 64 bytes) + and features a transport protocol to transport larger data + chunks over a point to point connection. + + When initialised the driver presents a number of channels as + character devices. + diff --git a/drivers/tty/cbc/Makefile b/drivers/tty/cbc/Makefile new file mode 100644 index 000000000000..c0517817f605 --- /dev/null +++ b/drivers/tty/cbc/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + + +cbc-ldisc-y := cbc_core.o +cbc-ldisc-y += cbc_device.o +cbc-ldisc-y += cbc_device_manager.o +cbc-ldisc-y += cbc_link_checksum.o +cbc-ldisc-y += cbc_link_layer.o +cbc-ldisc-y += cbc_memory.o +cbc-ldisc-y += cbc_mux_multiplexer.o + +obj-$(CONFIG_CBC_LDISC) += cbc-ldisc.o + +ccflags-y := -O2 -D_FORTIFY_SOURCE=2 -Wformat -Wformat-security -fstack-protector \ No newline at end of file diff --git a/drivers/tty/cbc/cbc_core.c b/drivers/tty/cbc/cbc_core.c new file mode 100644 index 000000000000..d9d15fa224ae --- /dev/null +++ b/drivers/tty/cbc/cbc_core.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cbc_core.h" +#include "cbc_types.h" +#include "cbc_device_manager.h" +#include "cbc_link_layer.h" +#include "cbc_mux_multiplexer.h" + +MODULE_AUTHOR("Gerhard Bocksch. gerhard.bocksch@intel.com"); +MODULE_DESCRIPTION("CBC serial protocol demultiplexer"); +MODULE_LICENSE("GPL"); + +static int granularity = 4; +module_param(granularity, int, 0644); +MODULE_PARM_DESC(granularity, + "The granularity of the CBC messages. default: 4"); + +MODULE_ALIAS_LDISC(N_CBCCORE); + +/* Marker to mark the cbc_core as valid. */ +#define CBC_MAGIC 0xAFFEAFFE + +static struct cbc_struct cbc_core = { .magic = CBC_MAGIC }; + +/* + * cbc_core_configure_channel - Set the priority and receive + * callback for a CBC channel. + * @channel_idx: Channel identifier (see cbc_channel_enumeration) + * @priority: Priority for channel. + * @data: Data associated with channel. + * @receive: Receive callback associated with channel. + */ +void cbc_core_configure_channel(u32 const channel_idx, const u8 priority, + void *data, + void (*receive)(void *data, const u16 length, + const u8 * const buffer)) +{ + cbc_mux_configure_data_channel(channel_idx, priority, data, receive); +} +EXPORT_SYMBOL_GPL(cbc_core_configure_channel); + +/* + * cbc_core_send_data - Send to a CBC channel. + * @channel_idx: Channel identifier (see cbc_channel_enumeration) + * @priority: Priority for channel. + * @data: Data associated with channel. + * @receive: Receive function associated with channel. + */ +void cbc_core_send_data(const u32 channel_idx, const u16 length, + const u8 * const buffer) +{ + cbc_manager_transmit_data(channel_idx, length, buffer); +} +EXPORT_SYMBOL_GPL(cbc_core_send_data); + +/* + * cbc_ldisc_open - Open the CBC connection to the IOC. + * @tty: Handle to tty. + * + * This function is called by the tty module when the + * line discipline is requested. It allocates the memory pool and creates the + * CBC devices. + * Called in process context serialized from other ldisc calls. + * + * Return: 0 on success error otherwise. + */ +static int cbc_ldisc_open(struct tty_struct *tty) +{ + struct cbc_struct *cbc; + int err; + + pr_debug("cbc-ldisc open.\n"); + mutex_lock(&cbc_core.ldisc_mutex); + + if (WARN_ON(tty->ops->write == NULL)) { + pr_err("cbc-ldisc open write not supported.\n"); + err = -EOPNOTSUPP; + goto err_exit; + } + + cbc = tty->disc_data; + + err = -EEXIST; + /* First make sure we're not already connected. */ + if (WARN_ON(cbc && cbc->magic == CBC_MAGIC)) { + pr_err( + "cbc-ldisc CBC line discipline already open or CBC magic wrong.\n"); + goto err_exit; + } + + cbc = &cbc_core; + + cbc->tty = tty; + tty->disc_data = cbc; + tty->receive_room = 65536; + + /* Create class cbc-core. This will appear as /sys/class/cbc* */ + cbc->cbc_class = class_create(THIS_MODULE, "cbc"); + + if (WARN_ON(!cbc->cbc_class)) { + pr_err("cbc-ldisc open could not create cbc class.\n"); + goto err_exit; + } + + /* Create memory pool */ + cbc->memory_pool = cbc_memory_pool_create( + CBC_QUEUE_LENGTH * CBC_CHANNEL_MAX_NUMBER); + if (WARN_ON(!cbc->memory_pool)) { + pr_err("failed to create memory pool.\n"); + goto err_exit; + } + + /* Initialise on every open, so we start with sequence-counters at 0.*/ + cbc_link_layer_init(cbc->memory_pool); + + /* Register devices here. */ + err = cbc_register_devices(cbc->cbc_class, cbc->memory_pool); + if (err != 0) { + pr_err("register devices failed\n"); + goto err_exit; + } + + cbc_link_layer_set_frame_granularity(granularity); + + /* tty layer expects 0 on success */ + mutex_unlock(&cbc_core.ldisc_mutex); + return 0; + +err_exit: mutex_unlock(&cbc_core.ldisc_mutex); + return err; +} + +/* + * cbc_ldisc_close - Close down the CBC communication. + * @tty: Handle to tty + * + * Unregister CBC devices and destroy CBC class. + */ +static void cbc_ldisc_close(struct tty_struct *tty) +{ + struct cbc_struct *cbc; + + mutex_lock(&cbc_core.ldisc_mutex); + + cbc = (struct cbc_struct *) tty->disc_data; + pr_debug("cbc-ldisc close\n"); + + if (cbc && cbc->magic == CBC_MAGIC && cbc->tty == tty) { + cbc_unregister_devices(cbc->cbc_class); + class_destroy(cbc->cbc_class); + if (WARN_ON(!cbc_memory_pool_try_free(cbc->memory_pool))) + pr_err("could not free memory pool.\n"); + tty->disc_data = NULL; + cbc->tty = NULL; + } else { + WARN_ON(1); + pr_err("ldisc close with wrong CBC magic.\n"); + } + + mutex_unlock(&cbc_core.ldisc_mutex); +} + +/* + * Close line discipline on ldisc hangup. + * @tty: Handle to tty + */ +static int cbc_ldisc_hangup(struct tty_struct *tty) +{ + cbc_ldisc_close(tty); + return 0; +} + +/* + * cbc_ldisc_receive_buf - .receive call for a line discipline. + * @tty: Handle to tty + * @cp: Received data buffer. + * @fp: Not used + * @count: Amount of data received. + * + * Attempts to read a single CBC frame at a time. Cycles round + * until all data has been processed. + */ +static void cbc_ldisc_receive_buf(struct tty_struct *tty, + const unsigned char *cp, char *fp, int count) +{ + struct cbc_struct *cbc; + u8 accepted_bytes; /* per cbc_serial_receive call */ + unsigned int accepted_bytes_sum = 0; + + mutex_lock(&cbc_core.ldisc_mutex); + cbc = (struct cbc_struct *) tty->disc_data; + + if (cbc && (cbc->magic == CBC_MAGIC) && (cbc->tty == tty)) { + do { + u8 chunksize = 254; + + if ((count - accepted_bytes_sum) < 255) + chunksize = count - accepted_bytes_sum; + accepted_bytes = cbc_core_on_receive_cbc_serial_data( + chunksize, cp + accepted_bytes_sum); + accepted_bytes_sum += accepted_bytes; + cbc_link_layer_rx_handler(); + } while (accepted_bytes_sum != count); + } + mutex_unlock(&cbc_core.ldisc_mutex); +} + +/* + * Called from ldisc when write is possible. Not used. + */ +static void cbc_ldisc_write_wakeup(struct tty_struct *tty) +{ + (void) tty; +} + +/* Called to send messages from channel specific device to the IOC */ +enum cbc_error target_specific_send_cbc_uart_data(u16 length, + const u8 *raw_buffer) +{ + mutex_lock(&cbc_core.ldisc_mutex); + if (cbc_core.tty) { + set_bit(TTY_DO_WRITE_WAKEUP, &cbc_core.tty->flags); + + cbc_core.tty->ops->write(cbc_core.tty, raw_buffer, length); + } + mutex_unlock(&cbc_core.ldisc_mutex); + + return CBC_OK; +} + +static struct tty_ldisc_ops cbc_ldisc = { + .owner = THIS_MODULE, + .name = "cbc-ldisc", + .magic = TTY_LDISC_MAGIC, + .open = cbc_ldisc_open, + .close = cbc_ldisc_close, + .hangup = cbc_ldisc_hangup, + .receive_buf = cbc_ldisc_receive_buf, + .write_wakeup = cbc_ldisc_write_wakeup +}; + +/* + * cbc_init - Module init call. + * Registers this module as TTY line discipline. + */ +static int __init cbc_init(void) +{ + int status; + + pr_debug("cbc-ldisc init\n"); + mutex_init(&cbc_core.ldisc_mutex); + mutex_lock(&cbc_core.ldisc_mutex); + + cbc_kmod_devices_init(); + + /* Fill in line discipline, and register it */ + status = tty_register_ldisc(N_CBCCORE, &cbc_ldisc); + if (status) + pr_err("cbc-ldisc: can't register line discipline\n"); + + mutex_unlock(&cbc_core.ldisc_mutex); + return 0; +} + +/* + * cbc_exit - Module exit call. + * + * De-registers itself as line discipline. + */ +static void __exit cbc_exit(void) +{ + int i; + + mutex_lock(&cbc_core.ldisc_mutex); + + pr_debug("cbc-ldisc Exit\n"); + + i = tty_unregister_ldisc(N_CBCCORE); + if (i) + pr_err("cbc-ldisc: can't unregister ldisc (err %d).\n", i); + + mutex_unlock(&cbc_core.ldisc_mutex); +} + +module_init(cbc_init); +module_exit(cbc_exit); diff --git a/drivers/tty/cbc/cbc_core.h b/drivers/tty/cbc/cbc_core.h new file mode 100644 index 000000000000..71e07a3f7765 --- /dev/null +++ b/drivers/tty/cbc/cbc_core.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_CORE_MOD_H_ +#define CBC_CORE_MOD_H_ + +#include +#include +#include +#include +#include + +#include "cbc_memory.h" + +#define CBC_IOCTL_MAGIC 0xf4 + +/* + * struct cbc_struct - + * + * @magic: Marker to mark the cbc_core as valid. + * @tty: tty associated with the CBC driver. + * @class: CBC device class + * @memory_pool: Memory pool of CBC buffer allocated for used by CBC driver. + * @ldisc_mutex: Mutex to avoid unloading while accessing the driver + */ +struct cbc_struct { + int magic; + struct tty_struct *tty; + struct class *cbc_class; + struct cbc_memory_pool *memory_pool; + struct mutex ldisc_mutex; +}; + +#endif /* CBC_CORE_MOD_H_ */ diff --git a/drivers/tty/cbc/cbc_core_public.h b/drivers/tty/cbc/cbc_core_public.h new file mode 100644 index 000000000000..85928f6efc68 --- /dev/null +++ b/drivers/tty/cbc/cbc_core_public.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_CORE_PUBLIC_H +#define CBC_CORE_PUBLIC_H + +#include "cbc_types.h" + +/* CBC version. */ +#define CBC_VERSION_ID 1 + +/* The following function needs to be implemented on CM/IOC. */ +enum cbc_error target_specific_send_cbc_uart_data(u16 length, + const u8 *raw_buffer); + + +#endif /* CBC_CORE_PUBLIC_H */ + diff --git a/drivers/tty/cbc/cbc_device.c b/drivers/tty/cbc/cbc_device.c new file mode 100644 index 000000000000..1933a8527015 --- /dev/null +++ b/drivers/tty/cbc/cbc_device.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include + +#include "cbc_device.h" + + +void cbc_device_init(struct cbc_device_data *cd) +{ + if (cd) + INIT_LIST_HEAD(&cd->open_files_head); +} + +void cbc_file_init(struct cbc_file_data *file) +{ + if (file) { + cbc_buffer_queue_init(&file->queue); + init_waitqueue_head(&file->wq_read); + INIT_LIST_HEAD(&file->list); + } +} + +void cbc_file_enqueue(struct cbc_file_data *fd, struct cbc_buffer *buffer) +{ + if (fd) { + if (cbc_buffer_queue_enqueue(&fd->queue, buffer)) { + cbc_buffer_increment_ref(buffer); + wake_up_interruptible(&fd->wq_read); + } + } +} + +struct cbc_buffer *cbc_file_dequeue(struct cbc_file_data *fd) +{ + struct cbc_buffer *buffer = NULL; + + if (fd) + buffer = cbc_buffer_queue_dequeue(&fd->queue); + + if (buffer && atomic_read(&buffer->refcount) == 0) { + buffer = NULL; + pr_err("cbc-core: De-queueing an already freed buffer\n"); + } + + return buffer; +} + +int cbc_file_queue_empty(struct cbc_file_data *fd) +{ + if (fd) + return (fd->queue.write == fd->queue.read); + + return 1; +} + diff --git a/drivers/tty/cbc/cbc_device.h b/drivers/tty/cbc/cbc_device.h new file mode 100644 index 000000000000..deb0cd922316 --- /dev/null +++ b/drivers/tty/cbc/cbc_device.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_DEVICE_H_ +#define CBC_DEVICE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "cbc_types.h" +#include "cbc_memory.h" + +enum cbc_device_type { + CBC_DEVICE_TYPE_DEFAULT, + CBC_DEVICE_TYPE_RAW, + CBC_DEVICE_TYPE_HIDDEN, + CBC_DEVICE_TYPE_DEBUG +}; + +/* + * struct cbc_device_data -Data for a single channel e.g. /dev/cbc-pmt. + * @device_name: Device name. + * @device_type: CBC device type. + * See c:type 'enum cbc_device_type '. + * @device: Pointer to device struct. + * @open_files_head: Linked list of open files for this device. + * + * Configuration for a given CBC device. It will be stored in the device private + * data. + */ +struct cbc_device_data { + char *device_name; + enum cbc_device_type device_type; + struct device *device; + struct list_head open_files_head; +}; + +/* + * struct cbc_file_data - Data for a CBC device file . + * @queue: CBC buffer queue for this device. + * @wq_read: wait_queue_head_t fused for waking device on events. + * @cbc_device: Device data for this device. + * @list: list_head. + * + */ +struct cbc_file_data { + struct cbc_buffer_queue queue; + wait_queue_head_t wq_read; + struct cbc_device_data *cbc_device; + struct list_head list; +}; + +/* + * cbc_device_init - Initialise CBC device data + * @cd: pointer to CBC device data. + * + * Initialises device's list_head. + */ +void cbc_device_init(struct cbc_device_data *cd); + +/* + * cbc_file_init - Initialise CBC file data + * @file: pointer to CBC file data. + * + * Initialises device file's CBC queue, wait_queue_head_t and list_head. + */ +void cbc_file_init(struct cbc_file_data *file); + +/* + * cbc_file_enqueue -Add CBC buffer to queue. + * @fd: CBC device file data. + * buffer: Pointer to CBC buffer. + * Increases reference count on buffer. + */ +void cbc_file_enqueue(struct cbc_file_data *fd, struct cbc_buffer *buffer); + +/* + * cbc_file_dequeue - Remove buffer from head of queue. + * @fd: CBC device file data. + * + * Does not decrease reference count. + */ +struct cbc_buffer *cbc_file_dequeue(struct cbc_file_data *fd); + +/* + * cbc_file_queue_empty - Is CBC queue empty? + */ +int cbc_file_queue_empty(struct cbc_file_data *fd); + +#endif /* CBC_DEVICE_H_ */ diff --git a/drivers/tty/cbc/cbc_device_manager.c b/drivers/tty/cbc/cbc_device_manager.c new file mode 100644 index 000000000000..0e74183d9828 --- /dev/null +++ b/drivers/tty/cbc/cbc_device_manager.c @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include "cbc_core.h" +#include "cbc_device.h" +#include "cbc_device_manager.h" +#include "cbc_memory.h" +#include "cbc_mux_multiplexer.h" +#include "cbc_types.h" + +static int major; /* Default to dynamic major */ +module_param(major, int, 0); +MODULE_PARM_DESC(major, "Major device number"); + +/* + * Minor start number. + * This has to be 0 to allow a lookup of the device structs in an array. + */ +#define CBC_MINOR 0 +/* Device-name/driver-name when registering the devices in the linux kernel.*/ +#define DEVICE_NAME "cbc-core" + +/* Max number of open files per cbc channel */ +#define MAX_OPEN_FILES 6 + +static void demuxed_receive(void *void_data, struct cbc_buffer *cbc_buffer); + +static int cbc_device_open(struct inode *, struct file *); +static int cbc_device_release(struct inode *, struct file *); +static ssize_t cbc_device_read(struct file *, char __user *, size_t, + loff_t *); +static ssize_t cbc_device_write(struct file *, const char __user *, size_t, + loff_t *); +static unsigned int cbc_device_poll(struct file *file, poll_table *wait); +static long cbc_device_ioctl(struct file *, unsigned int, unsigned long); + +static const struct file_operations cbc_dev_file_operations = { + .owner = THIS_MODULE, + .open = cbc_device_open, + .release = cbc_device_release, + .read = cbc_device_read, + .write = cbc_device_write, + .poll = cbc_device_poll, + .unlocked_ioctl = cbc_device_ioctl +}; + +struct cbc_device_manager { + struct cdev cdev; + struct cbc_device_data channels[CBC_CHANNEL_MAX_NUMBER]; + struct mutex send_lock; + struct cbc_memory_pool *cbc_memory; +}; + +/* Currently, only one CBC per kernel supported.*/ +static struct cbc_device_manager cbc_device_mgr_configuration = { + .channels[CBC_CHANNEL_PMT].device_type = + CBC_DEVICE_TYPE_HIDDEN, + .channels[CBC_CHANNEL_PMT].device_name = + "cbc-pmt", + + .channels[CBC_CHANNEL_LIFECYCLE].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_LIFECYCLE].device_name = + "cbc-lifecycle", + + .channels[CBC_CHANNEL_SIGNALS].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_SIGNALS].device_name = + "cbc-signals", + + .channels[CBC_CHANNEL_EARLY_SIGNALS].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_EARLY_SIGNALS].device_name = + "cbc-early-signals", + + .channels[CBC_CHANNEL_DIAGNOSIS].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_DIAGNOSIS].device_name = + "cbc-diagnosis", + + .channels[CBC_CHANNEL_DLT].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_DLT].device_name = + "cbc-dlt", + + .channels[CBC_CHANNEL_LINDA].device_type = + CBC_DEVICE_TYPE_HIDDEN, + .channels[CBC_CHANNEL_LINDA].device_name = + "cbc-linda", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_0].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_0].device_name = + "cbc-raw0", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_1].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_1].device_name = + "cbc-raw1", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_2].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_2].device_name = + "cbc-raw2", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_3].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_3].device_name = + "cbc-raw3", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_4].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_4].device_name = + "cbc-raw4", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_5].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_5].device_name = + "cbc-raw5", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_6].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_6].device_name = + "cbc-raw6", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_7].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_7].device_name = + "cbc-raw7", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_8].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_8].device_name = + "cbc-raw8", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_9].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_9].device_name = + "cbc-raw9", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_10].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_10].device_name = + "cbc-raw10", + + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_11].device_type = + CBC_DEVICE_TYPE_DEFAULT, + .channels[CBC_CHANNEL_OEM_RAW_CHANNEL_11].device_name = + "cbc-raw11", + + .channels[CBC_CHANNEL_DEBUG_OUT].device_type = + CBC_DEVICE_TYPE_DEBUG, + .channels[CBC_CHANNEL_DEBUG_OUT].device_name = + "cbc-debug-out", + + .channels[CBC_CHANNEL_DEBUG_IN].device_type = + CBC_DEVICE_TYPE_DEBUG, + .channels[CBC_CHANNEL_DEBUG_IN].device_name = + "cbc-debug-in", +}; + +static struct cbc_mux_channel_configuration cbc_mux_config; + +/* + * priority_show - Retrieve device attribute priority. + * @dev: device (i.e /dev/cbc*) + * @attr: Priority attribute + * @buf: Buffer to write to. + * + * Every channel (entry in /dev) has a priority. + * This can be set/read by a ioctl or in the sysfs. + */ +static ssize_t priority_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cbc_device_data *chn_data = + (struct cbc_device_data *) dev_get_drvdata(dev); + int idx = chn_data - &cbc_device_mgr_configuration.channels[0]; + int prio = cbc_mux_multiplexer_get_priority(idx); + + pr_debug("cbc-core: read priority %i for channel: %i\n", prio, idx); + return scnprintf(buf, PAGE_SIZE, "%i\n", prio); +} + +/* + * priority_store - Store device attribute priority. + * @dev: device (i.e /dev/cbc*) + * @attr: Priority attribute + * @buf: Buffer to write to. + * + * Every channel (entry in /dev) has a priority. + * This can be set/read by a ioctl or in the sysfs. + */ +static ssize_t priority_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct cbc_device_data *chn_data = + (struct cbc_device_data *) dev_get_drvdata(dev); + int idx = chn_data - &cbc_device_mgr_configuration.channels[0]; + u8 tmp = 0; + int res = kstrtou8(buf, 0, &tmp); + + if ((res == 0) && (tmp < 8)) { + pr_debug("cbc-core: write priority %i to channel %i\n", tmp, + idx); + cbc_mux_multiplexer_set_priority(idx, tmp); + } + return count; +} +static DEVICE_ATTR_RW(priority); + +/* + * cbc_device_open - Open CBC char device. Implementation of .open. + * @inode:Pointer to inode object for this device. + * @file: Pointer to file object for this device. + * + * Return 0 if device opened successfully, Linux error code otherwise. + */ +static int cbc_device_open(struct inode *inode, struct file *file) +{ + struct cbc_device_data *device_data = + &cbc_device_mgr_configuration.channels[MINOR( + inode->i_rdev)]; + int ret = 0; + u32 num_open_files = 0; + struct cbc_file_data *file_data = kmalloc(sizeof(struct cbc_file_data), + GFP_KERNEL); + + if (!device_data) + ret = -EIO; + + if (!file_data) + ret = -ENOMEM; + + if (ret == 0) { + pr_debug("cbc_core: device_open: %d.%d %s\n", + MAJOR(inode->i_rdev), MINOR(inode->i_rdev), + device_data->device_name); + + if (MINOR(inode->i_rdev) >= CBC_CHANNEL_MAX_NUMBER) { + pr_err("cbc-core: invalid cbc channel number.\n"); + ret = -ENODEV; + } + } + + if (ret == 0) { + struct list_head *tmp; + + list_for_each(tmp, &device_data->open_files_head) + num_open_files++; + + if (num_open_files > MAX_OPEN_FILES) + ret = -EBUSY; + } + + if (ret == 0) { + cbc_file_init(file_data); + file_data->cbc_device = device_data; + list_add(&file_data->list, &device_data->open_files_head); + file->private_data = file_data; + } else { + kfree(file_data); + } + + return ret; +} + +/* + * cbc_device_release - Release char device. Implementation of .release + * @inode:Pointer to inode object for this device. + * @file: Pointer to file object for this device. + */ +static int cbc_device_release(struct inode *inode, struct file *file) +{ + u32 dev_idx = MINOR(inode->i_rdev); + struct cbc_file_data *file_data = file->private_data; + + if (file_data) { + list_del(&file_data->list); + + pr_debug("cbc-core: device_release: %d.%d %s\n", + MAJOR(inode->i_rdev), dev_idx, + file_data->cbc_device->device_name); + + while (!cbc_file_queue_empty(file_data)) + cbc_buffer_release(cbc_file_dequeue(file_data)); + + kfree(file_data); + file->private_data = NULL; + } + return 0; +} + +/* + * cbc_device_read - CBC device read. Implementation of .read. + * @file: Pointer to file object for this device. + * @user_buffer: Pointer to buffer containing data to be read. + * @length:Length of buffer. + * @offset: Offset into buffer. + */ +static ssize_t cbc_device_read(struct file *file, char __user *user_buffer, + size_t length, loff_t *offset) +{ + struct cbc_file_data *f = (struct cbc_file_data *) file->private_data; + s32 ret = 0; + + if (!f) + ret = -EIO; + + if (ret == 0) { + while (cbc_file_queue_empty(f) && (ret == 0)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + return ret; + } + ret = wait_event_interruptible(f->wq_read, + !(cbc_file_queue_empty(f))); + if ((ret != 0) && (ret != -ERESTARTSYS)) { + /* + * ERESTARTSYS happens when a file is polled + * while shutting down the ldisc. + * This is not an error. + */ + pr_err("cbc-core: fifo_read: woke up with error %d.\n", + ret); + ret = -EIO; + } + } + } + + if (ret == 0) { + if (!cbc_file_queue_empty(f)) { + struct cbc_buffer *cbc_buffer; + + cbc_buffer = cbc_file_dequeue(f); + + if (cbc_buffer) { + u32 offset = CBC_HEADER_SIZE; + u16 data_length = cbc_buffer->payload_length; + + if (f->cbc_device->device_type == + CBC_DEVICE_TYPE_RAW) { + offset = CBC_HEADER_SIZE + + CBC_RAWHEADER_SIZE; + data_length = data_length - + CBC_RAWHEADER_SIZE; + } else if (f->cbc_device->device_type == + CBC_DEVICE_TYPE_DEBUG) { + offset = 0; + data_length = cbc_buffer->frame_length; + } + + if (data_length <= length) { + ret = copy_to_user( + (void __user *) user_buffer, + &cbc_buffer->data[offset], + data_length); + if (ret == 0) { + ret = data_length; + } else { + pr_err( + "cbc-core: device_read %u bytes copy to user failed.\n", + data_length); + } + } else { + pr_err( + "cbc-core: device_read, buffer too small for %u bytes.\n", + data_length); + ret = -EINVAL; + } + cbc_buffer_release(cbc_buffer); + } else { + pr_err("cbc-core: dequeued a null-buffer.\n"); + } + + } else { + pr_err("cbc-core: queue empty after response to wait.\n"); + } + + } + return ret; +} + +/* + * cbc_device_write - Write data to char device. Implementation of .write. + * @file: Pointer to file object for this device. + * @user_buffer: Pointer to buffer containing data to be read. + * @length:Length of buffer. + * @offset: Offset into buffer. + */ +static ssize_t cbc_device_write(struct file *file, + const char __user *user_buffer, size_t length, loff_t *offset) +{ + int n = 0; + struct cbc_file_data *file_data = + (struct cbc_file_data *) file->private_data; + struct cbc_device_data *chn_data = file_data->cbc_device; + + struct cbc_buffer *cbc_buffer = cbc_memory_pool_get_buffer( + cbc_device_mgr_configuration.cbc_memory); + int ret = 0; + u32 payload_offset = CBC_HEADER_SIZE; + u32 additional_header_size = 0; + + u32 tmp = (u32) length; + + if (!cbc_buffer) { + pr_err("cbc-core: Out of memory.\n"); + ret = -ENOMEM; + } + + if (ret == 0) { + if (chn_data == NULL) { + pr_err("cbc-core: Channel data is NULL.\n"); + ret = -EINVAL; + } + } + + if (ret == 0) { + if (chn_data->device_type == CBC_DEVICE_TYPE_RAW) { + payload_offset = CBC_HEADER_SIZE + CBC_RAWHEADER_SIZE; + additional_header_size = CBC_RAWHEADER_SIZE; + } else if (chn_data->device_type == CBC_DEVICE_TYPE_DEBUG) { + ret = -EINVAL; /* debug channels do not support write */ + } + } + + if (length + payload_offset > CBC_BUFFER_SIZE) { + pr_err( + "cbc-core: Device_write %u bytes not possible,maximum buffer size exceeded.\n", + tmp); + ret = -EINVAL; + } + + if (ret == 0) { + if (user_buffer == NULL) { + pr_err("cbc-core: Device_write buffer is NULL.\n"); + ret = -EINVAL; + } + } + + if (ret == 0) { + if (copy_from_user(&cbc_buffer->data[payload_offset], + (void __user *) user_buffer, length) == 0) { + int idx = chn_data - + &cbc_device_mgr_configuration.channels[0]; + + n = length; + cbc_buffer->payload_length = length + + additional_header_size; + cbc_manager_transmit_buffer(idx, cbc_buffer); + } + } + + cbc_buffer_release(cbc_buffer); + + if (ret == 0) + ret = n; + return ret; +} + +/* + * cbc_mux_configure_data_channel - Configure the specified channel. + * @channel_idx: Channel identifier. + * @data: Data associated with this channel. + * @receive: Data receive function for this channel. + * + * Other kernel modules may wish to use the CBC line discipline. + * They can potentially define their own configurations for the CBC channels. + */ +void cbc_mux_configure_data_channel(u32 const channel_idx, const u8 priority, + void *data, + void (*receive)(void *data, const u16 length, + const u8 * const buffer)) +{ + if (channel_idx < CBC_CHANNEL_MAX_NUMBER) { + struct cbc_mux_channel *list = + &cbc_mux_config.cbc_mux_channel_list[channel_idx]; + list->data = data; + list->priority = priority; + list->buffer_receive = NULL; + list->data_receive = receive; + } +} + + +/* + * cbc_register_devices - Register the CBC channels as Linux character devices. + * @cbc_class: CBC device class. + * @memory: CBC memory pool allocated for CBC buffers. + * + * Return: 0 on success or Linux error code. + */ +int cbc_register_devices(struct class *cbc_class, + struct cbc_memory_pool *memory) +{ + int ret = 0; + int i; + dev_t devid; + + struct cbc_device_manager *cbc = &cbc_device_mgr_configuration; + + cbc->cbc_memory = memory; + + /* Set up the devices after the line discipline is opened. */ + if (major) { + devid = MKDEV(major, 0); + ret = register_chrdev_region(devid, CBC_CHANNEL_MAX_NUMBER, + DEVICE_NAME); + } else { + ret = alloc_chrdev_region(&devid, 0, CBC_CHANNEL_MAX_NUMBER, + DEVICE_NAME); + major = MAJOR(devid); + } + + if (ret < 0) + pr_err("cbc-core: ldisc open register chrdev region failed.\n"); + + if (ret == 0) { + cdev_init(&cbc->cdev, &cbc_dev_file_operations); + cbc->cdev.owner = THIS_MODULE; + cbc->cdev.ops = &cbc_dev_file_operations; + ret = cdev_add(&cbc->cdev, MKDEV(major, CBC_MINOR), + CBC_CHANNEL_MAX_NUMBER); + if (ret < 0) { + unregister_chrdev_region(MKDEV(major, CBC_MINOR), + CBC_CHANNEL_MAX_NUMBER); + pr_err("cbc-core: ldisc open add cdev failed\n"); + } + } + + if (ret == 0) { + for (i = 0; i < CBC_CHANNEL_MAX_NUMBER; i++) { + cbc_device_init(&cbc->channels[i]); + + if (cbc->channels[i].device_type != + CBC_DEVICE_TYPE_HIDDEN) { + /* + * Create the devices. + * These will appear in /sys/class/cbc and + * if udev is running, /dev + */ + cbc->channels[i].device = device_create( + cbc_class, NULL, + MKDEV(major, i), NULL, + cbc->channels[i].device_name, + i); + + /* Add the attribute */ + ret = device_create_file( + cbc->channels[i].device, + &dev_attr_priority); + + /* Set private data to point to the fifo */ + dev_set_drvdata(cbc->channels[i].device, + &cbc->channels[i]); + } else { + cbc->channels[i].device = NULL; + } + } + } + + if (ret != 0) + cbc_unregister_devices(cbc_class); + + return ret; +} + +/* + * cbc_unregister_devices - Remove CBC devices. + * @cbc_class: CBC Device class. + * + * Remove CBC device files and unregisters chrdev region. + */ +void cbc_unregister_devices(struct class *cbc_class) +{ + int i; + struct cbc_device_manager *cbc = &cbc_device_mgr_configuration; + + /* Remove the /dev/cbc* devices */ + cdev_del(&cbc->cdev); + + for (i = 0; i < CBC_CHANNEL_MAX_NUMBER; i++) { + if (cbc->channels[i].device != NULL) { + device_remove_file(cbc->channels[i].device, + &dev_attr_priority); + device_destroy(cbc_class, MKDEV(major, i)); + } + } + + /* + * Also destroys all class attribute files, + * because they are ref. counted. + */ + unregister_chrdev_region(MKDEV(major, CBC_MINOR), + CBC_CHANNEL_MAX_NUMBER); + + cbc->cbc_memory = NULL; +} + +/* + * cbc_manager_transmit_data - Transmit data on specified channel. + * @channel_idx: Channel identifier. + * @length: Length of data. + * @buffer: Pointer to data buffer. + * + * If a CBC buffer is available from the memory pool, and the channel + * is valid, the supplied data is copied into a CBC buffer and transmitted. + */ +void cbc_manager_transmit_data(const u32 channel_idx, const u16 length, + const u8 * const buffer) +{ + struct cbc_buffer *cbc_buffer = cbc_memory_pool_get_buffer( + cbc_device_mgr_configuration.cbc_memory); + struct cbc_device_data *chn_data; + u32 offset = CBC_HEADER_SIZE; + u32 copy_length = length; + + if (channel_idx >= CBC_CHANNEL_MAX_NUMBER) { + pr_err("cbc_mux_transmit_data(): Invalid cbc channel idx.\n"); + return; + } + + chn_data = &cbc_device_mgr_configuration.channels[channel_idx]; + + if (!cbc_buffer) + return; + + if (chn_data->device_type == CBC_DEVICE_TYPE_RAW) + offset = CBC_HEADER_SIZE + CBC_RAWHEADER_SIZE; + + if (length + offset > CBC_MAX_TOTAL_FRAME_SIZE) + copy_length = CBC_MAX_TOTAL_FRAME_SIZE - offset; + + memcpy(&cbc_buffer->data[offset], buffer, copy_length); + + cbc_manager_transmit_buffer(channel_idx, cbc_buffer); + cbc_buffer_release(cbc_buffer); +} + +/* + * cbc_manager_transmit_buffer - Transmits CBC buffer on specified channel. + * @channel_idx: Channel identifier. + * @buffer: CBC buffer to transmit. + */ +void cbc_manager_transmit_buffer(const u32 channel_idx, + struct cbc_buffer *buffer) +{ + if (channel_idx >= CBC_CHANNEL_MAX_NUMBER) { + pr_err("cbc_mux_transmit_data(): Invalid cbc channel idx.\n"); + } else { + enum cbc_error res = CBC_OK; + struct cbc_device_data *chn_data = + &cbc_device_mgr_configuration.channels[channel_idx]; + + mutex_lock(&cbc_device_mgr_configuration.send_lock); + + if (chn_data->device_type == CBC_DEVICE_TYPE_RAW) { + /* + * Room for raw header is already reserved in buffer. + * Calculate raw header data length without raw + * header. + */ + u32 real_payload_size = buffer->payload_length - + CBC_RAWHEADER_SIZE; + + buffer->data[CBC_HEADER_SIZE] = + CBC_RAW_CHANNEL_DIRECT_TRANSPORT; + buffer->data[CBC_HEADER_SIZE + 1] = + (u8) (real_payload_size & 0xFF); + buffer->data[CBC_HEADER_SIZE + 2] = + (u8) ((real_payload_size >> 8) & 0xFFU); + } + + res = cbc_mux_multiplexer_transmit_buffer(channel_idx, buffer); + mutex_unlock(&cbc_device_mgr_configuration.send_lock); + if (res != CBC_OK) + pr_err("Error transmitting frame %u.\n", res); + } + /* Buffer is released in the calling cbc_device_write() */ +} + +/* + * cbc_device_poll - Set up polling based on current status of queue. + * @file: Handle to cbc_device_data. + * @wait: Pointer to poll_table. + * + * Return: Updated poll mask. + */ +static unsigned int cbc_device_poll(struct file *file, poll_table *wait) +{ + struct cbc_file_data *f = (struct cbc_file_data *) file->private_data; + unsigned int mask = 0; + + poll_wait(file, &f->wq_read, wait); + + if (!cbc_file_queue_empty(f)) + mask |= (POLLIN | POLLRDNORM); + + if (!(f->queue.read + CBC_QUEUE_LENGTH == f->queue.write)) + mask |= (POLLOUT | POLLWRNORM); + + return mask; +} + +/* + * cbc_device_ioctl - Handle CBC channel ioctl call. + * @file: Handle to cbc_device_data. + * @cmd: ioctl command. + * @arg: argument associated with the command. + * + * The flag field and priority can get get/set for a CBC device (channel). + * + * Return: 0 if command successfully handled, Linux error otherwise. + */ +static long cbc_device_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int tmp; + struct cbc_file_data *file_data = + (struct cbc_file_data *) file->private_data; + struct cbc_device_data *chn_data = file_data->cbc_device; + + int idx = chn_data - &cbc_device_mgr_configuration.channels[0]; + + switch (cmd) { + case CBC_PRIORITY_GET: + tmp = cbc_mux_multiplexer_get_priority(idx); + if (copy_to_user((void __user *) arg, &tmp, sizeof(tmp))) + return -EFAULT; + return 0; + + case CBC_PRIORITY_SET: + if (copy_from_user(&tmp, (void __user *) arg, sizeof(tmp))) + return -EFAULT; + cbc_mux_multiplexer_set_priority(idx, tmp); + return 0; + + default: + return -EINVAL; + } + +} + +/* + * get_default_priority - Get default priority for specified channel. + * @channel_id: channel identifier. + * + * Return: Priority for specified channel. + */ +static u8 get_default_priority(enum cbc_channel_enumeration channel_id) +{ + u8 result = 1; + + switch (channel_id) { + case CBC_CHANNEL_PMT: + case CBC_CHANNEL_LIFECYCLE: + case CBC_CHANNEL_DLT: + case CBC_CHANNEL_LINDA: + result = 6; + break; + + case CBC_CHANNEL_DIAGNOSIS: + result = 2; + break; + + default: + result = 3; + break; + } + return result; +} + +/* + * demuxed_receive - Handle a CBC buffer received over UART. + * @void_data: CBC device data. + * @cbc_buffer: CBC buffer received over UART. + * + * Checks if there is valid data. Determines the frame type and + * adds to buffer queue. + */ +static void demuxed_receive(void *void_data, struct cbc_buffer *cbc_buffer) +{ + struct cbc_device_data *device_data = + (struct cbc_device_data *) void_data; + struct list_head *current_item; + struct cbc_file_data *current_file_data; + + if (device_data && cbc_buffer + && cbc_buffer->frame_length > + CBC_HEADER_SIZE + CBC_CHECKSUM_SIZE) { + /* Payload_length includes raw_header */ + u16 payload_length = cbc_buffer->frame_length - + (CBC_HEADER_SIZE + CBC_CHECKSUM_SIZE); + + if (device_data->device_type == CBC_DEVICE_TYPE_RAW) { + if (cbc_buffer->frame_length > + (CBC_HEADER_SIZE + CBC_RAWHEADER_SIZE + + CBC_CHECKSUM_SIZE)) { + u16 raw_length; + + raw_length = cbc_buffer->data[4]; + raw_length |= (cbc_buffer->data[5] << 8); + + if (raw_length + CBC_RAWHEADER_SIZE > + payload_length) { + pr_err( + "raw length (%i) is longer than payload length (%i)\n", + raw_length, payload_length); + /* Payload_length already set + * to max value + */ + } else { + payload_length = raw_length + + CBC_RAWHEADER_SIZE; + } + } else { + pr_err("cbc-core: Frame to short for a raw frame\n"); + } + cbc_buffer->payload_length = payload_length; + } else if (device_data->device_type == + CBC_DEVICE_TYPE_DEFAULT) { + cbc_buffer->payload_length = payload_length; + } + /* else, do not touch payload_length in a debug-channel */ + + /* Enqueue */ + for (current_item = device_data->open_files_head.next + ; current_item != &device_data->open_files_head; current_item = + current_item->next) { + + current_file_data = list_entry(current_item, + struct cbc_file_data, list); + /* File_enqueue increases ref. count. */ + cbc_file_enqueue(current_file_data, cbc_buffer); + } + } else { + pr_err("cbc-core: (<- IOC) dev_receive data is null\n"); + } +} + +/* + * cbc_kmod_devices_init - Configure CBC multiplexer. + * + * Configures multiplexer channel list and configures the multiplexer using + * this list. Initialises mutex lock for multiplexer. + */ +void cbc_kmod_devices_init(void) +{ + /* + * Set up the multiplexer channel list and use it to configure the + * multiplexer. + */ + u32 i = 0; + + for (; i < CBC_CHANNEL_MAX_NUMBER; i++) { + cbc_mux_config.cbc_mux_channel_list[i].buffer_receive = + demuxed_receive; + cbc_mux_config.cbc_mux_channel_list[i].data_receive = NULL; + cbc_mux_config.cbc_mux_channel_list[i].data = + &cbc_device_mgr_configuration.channels[i]; + cbc_mux_config.cbc_mux_channel_list[i].priority = + get_default_priority(i); + } + + cbc_mux_multiplexer_setup(&cbc_mux_config); + mutex_init(&cbc_device_mgr_configuration.send_lock); +} diff --git a/drivers/tty/cbc/cbc_device_manager.h b/drivers/tty/cbc/cbc_device_manager.h new file mode 100644 index 000000000000..a6f9b45da1aa --- /dev/null +++ b/drivers/tty/cbc/cbc_device_manager.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_DEVICE_MANAGER_H +#define CBC_DEVICE_MANAGER_H + +#include "cbc_memory.h" + +#include +#include + +void cbc_kmod_devices_init(void); + +int cbc_register_devices(struct class *cbc_class, + struct cbc_memory_pool *memory); + +void cbc_unregister_devices(struct class *cbc_class); + +/* + * cbc_mux_configure_data_channel - Configure channels + * @channel_idx: Channel identifier (see cbc_channel_enumeration) + * @priority: Priority for this channel + * @data: Channel data + * @receive: Receive data function associated with this channel. + * Channels can only be configured after cbc_kmod_devices_init(). + * This will overwrite the settings for the devices. + * The device will be created anyway, to allow the cbc_socket_server to + * work without a requirement for handling missing devices. + */ +void cbc_mux_configure_data_channel(u32 const channel_idx, const u8 priority, + void *data, + void (*receive)(void *data, const u16 length, + const u8 * const buffer)); + +/* + * cbc_manager_transmit_data - Transmit data to IOC. + * @channel_idx: Channel identifier (see cbc_channel_enumeration) + * @length: Length of data + * @buffer: The data + * + * This is the version provided as a kernel symbol. + */ +void cbc_manager_transmit_data(const u32 channel_idx, const u16 length, + const u8 * const buffer); + +/* + * cbc_manager_transmit_data - Transmit data to IOC. + * @channel_idx: Channel identifier (see cbc_channel_enumeration) + * @buffer: The data + * + * This is the internal version, without memcpy. + */ +void cbc_manager_transmit_buffer(const u32 channel_idx, + struct cbc_buffer *buffer); + +#endif /* CBC_DEVICE_MANAGER_H */ diff --git a/drivers/tty/cbc/cbc_link_checksum.c b/drivers/tty/cbc/cbc_link_checksum.c new file mode 100644 index 000000000000..69f0ca74c3c8 --- /dev/null +++ b/drivers/tty/cbc/cbc_link_checksum.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include "cbc_types.h" +#include "cbc_link_checksum.h" + +enum cbc_error cbc_checksum_calculate(u8 length, + u8 const * const payload_data, u8 *checksum) +{ + u8 result = 0; /* Holds result of calculation */ + u8 counter = 0; + + /* Parameter validation */ + if (length == 0U) + return CBC_ERROR_PARAMETER_INCORRECT; + + if ((payload_data == NULL) || (checksum == NULL)) + return CBC_ERROR_NULL_POINTER_SUPPLIED; + + /* Perform calculation */ + do { + result += (u8) ((0x100 - *(payload_data + counter++)) & 0xFFU); + } while (counter != length); + + *checksum = result; + + return CBC_OK; +} + +enum cbc_error cbc_checksum_check(u8 length, u8 const * const payload_data, + u8 checksum, u8 *expected_checksum) +{ + u8 calculated_checksum = 0U; + enum cbc_error result = CBC_OK; + + enum cbc_error calc_result = cbc_checksum_calculate(length, + payload_data, &calculated_checksum); + + if ((calc_result == CBC_OK) && + (checksum == calculated_checksum)) + result = CBC_OK; + else + result = CBC_ERROR_CHECKSUM_MISMATCH; + + if (expected_checksum != NULL) + *expected_checksum = calculated_checksum; + + return result; +} diff --git a/drivers/tty/cbc/cbc_link_checksum.h b/drivers/tty/cbc/cbc_link_checksum.h new file mode 100644 index 000000000000..9f8f50f2cbd7 --- /dev/null +++ b/drivers/tty/cbc/cbc_link_checksum.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _CBC_LINK_CHECKSUM_H_ +#define _CBC_LINK_CHECKSUM_H_ + +#include "cbc_types.h" + +/* + * cbc_checksum_calculate - Calculate checksum. + * @length: data length + * @payload_data:The data buffer. + * @checksum: Pointer to checksum. + * + * Based on summation of inverted individual byte values. + * + * Return: cbc_error if checksum cannot be generated. + */ + +enum cbc_error cbc_checksum_calculate(u8 length, + u8 const * const payload_data, u8 *checksum); + +/* + * Check checksum is valid for current data. + * @length: data length + * @payload_data: The data buffer. + * @checksum: Checksum value to check + * @expected_checksum: Expected checksum. + * + * Return: cbc_error if checksum is invalid. + */ +enum cbc_error cbc_checksum_check(u8 length, u8 const * const payload_data, + u8 checksum, u8 *expected_checksum); + +#endif /*_CBC_LINK_CHECKSUM_H_ */ diff --git a/drivers/tty/cbc/cbc_link_layer.c b/drivers/tty/cbc/cbc_link_layer.c new file mode 100644 index 000000000000..bd747aac250f --- /dev/null +++ b/drivers/tty/cbc/cbc_link_layer.c @@ -0,0 +1,482 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include + +#include "cbc_core_public.h" +#include "cbc_link_checksum.h" +#include "cbc_link_layer.h" +#include "cbc_memory.h" +#include "cbc_mux_multiplexer.h" + +#define CBC_MAX_RING_BUFFER_SIZE 256 + +/* + * struct cbc_queue_control - Structure holding the queue control data. + * @next_element: Next element in received data buffer. + * @current_element: Current element in received data buffer. + * + * Handles the current (and next) position in the received data circular buffer. + */ +struct cbc_queue_control { + u8 next_element; + u8 current_element; +}; + +static struct cbc_memory_pool *memory_pool; + +static u8 rx_cvh_ring_message[CBC_MAX_RING_BUFFER_SIZE]; +static struct cbc_queue_control cvh_rx_queue_control; +static u8 number_of_bytes_expected; +static u8 number_of_bytes_skipped; +static u8 ignore_all_skipped_bytes = 1; +static u8 last_rx_frame_valid = 1; +static u8 rx_sequence_counter; + +/* Transmitted data queue. */ +static struct cbc_buffer_queue tx_queue; +static u8 tx_sequence_counter; /* Sequence counter value for next tx frame */ +static u8 cbc_frame_granularity; +static struct mutex transmit_frame_mutex; + +static void cbc_link_layer_transmit_frame(void); +static void cbc_link_release_rx_data(u8 bytes_to_free); + +/* + * calculate_total_frame_length - Calculates the total length of a frame. + * @buffer: Pointer to CBC buffer. + * + * Used for outgoing frames. Calculates the total length of a frame + * depending on its payload_length. Total length is stored in + * cbc_buffer->frame_length. + */ +static void calculate_total_frame_length(struct cbc_buffer *buffer); + +/* + * cbc_link_release_rx_data - Release the specified number of bytes from the + * internal rx buffer. + * @bytes_to_free: Number of bytes to be released. + */ +static void cbc_link_release_rx_data(u8 bytes_to_free) +{ + cvh_rx_queue_control.current_element += bytes_to_free; +} + +static void calculate_total_frame_length(struct cbc_buffer *buffer) +{ + u8 frame_length_in_bytes; + + if (!buffer) + return; + + frame_length_in_bytes = buffer->payload_length + CBC_HEADER_SIZE + + CBC_CHECKSUM_SIZE; + + /* Adjust frame_length to granularity */ + if ((frame_length_in_bytes % cbc_frame_granularity) != 0) + frame_length_in_bytes += cbc_frame_granularity - + (frame_length_in_bytes % + cbc_frame_granularity); + + buffer->frame_length = frame_length_in_bytes; +} + +/* + * cbc_link_layer_transmit_frame - Transmits a frame over a UART. + */ +static void cbc_link_layer_transmit_frame(void) +{ + u8 total_len; + u8 checksum = 0U; + s32 mutex_lock_result = -1; + u32 frame_transmission_counter = CBC_MAX_FRAME_TRANSMISSION_NUMBER; + struct cbc_buffer *buffer = NULL; + + mutex_lock_result = mutex_lock_interruptible(&transmit_frame_mutex); + if (mutex_lock_result != 0) { + pr_err("cbc-core: Could not lock the transmit_frame_mutex\n"); + return; + } + + /* If queue is not empty. */ + if (tx_queue.read != tx_queue.write) + buffer = cbc_buffer_queue_dequeue(&tx_queue); + + while (buffer && frame_transmission_counter) { + total_len = buffer->frame_length; + frame_transmission_counter--; + + /* Reset sequence counter bits first */ + buffer->data[1U] &= ~CBC_SEQUENCE_COUNTER_WIDTH_MASK; + + /* Qdd sequence counter */ + buffer->data[1U] |= tx_sequence_counter; + + /* + * Qdd checksum, subtract 1, as the checksum field + * itself cannot be included in calculation + */ + cbc_checksum_calculate(total_len - 1U, buffer->data, + &checksum); + buffer->data[total_len - 1U] = checksum; + + /* Try to send the frame */ + if (target_specific_send_cbc_uart_data(total_len, + buffer->data) != CBC_OK) { + /* Not sent, release anyway */ + pr_debug("cbc-core: Could not send packet.\n"); + + } else { + /* + * Data was transmitted, so increase the + * sequence counter for next frame + */ + tx_sequence_counter = (tx_sequence_counter + 1U) + & CBC_SEQUENCE_COUNTER_WIDTH_MASK; + } /* else */ + cbc_buffer_release(buffer); + buffer = NULL; + + /* If queue is not empty. */ + if (tx_queue.read != tx_queue.write) + buffer = cbc_buffer_queue_dequeue(&tx_queue); + } + + mutex_unlock(&transmit_frame_mutex); +} + +/* + * cbc_link_layer_get_stored_serial_data - Get stored serial data. + * @out_buf:Pointer to buffer populated with serial data. + * @max_length: Maximum amount of data to be retrieved. + * + * Populates out_buf and returns number of bytes read. Stop reading if the + * maximum length is reached. + * + * Return: The amount of data read. + */ +static u8 cbc_link_layer_get_stored_serial_data(u8 *out_buf, + u8 const max_length) +{ + u8 index8 = 0; + u8 curr = cvh_rx_queue_control.current_element; + u8 next = cvh_rx_queue_control.next_element; + + while (((curr + index8) & 0xFFU) != next) { + out_buf[index8] = rx_cvh_ring_message[(curr + index8) & 0xFFU]; + index8++; + /* Avoid memory overflow of target array */ + if (index8 >= max_length) + break; + } + return index8; +} + +/* + * cbc_link_layer_set_frame_granularity - Set the CBC frame granularity. + * @granularity: Supported values are 4, 8, 16 and 32 bytes. + * + * Return: CBC error, OK or incorrect parameter (if invalid granularity + * supplied). + */ +enum cbc_error cbc_link_layer_set_frame_granularity(u8 granularity) +{ + if ((granularity == 4) || (granularity == 8) || (granularity == 16) || + (granularity == 32U)) { + cbc_frame_granularity = granularity; + return CBC_OK; + } else { + return CBC_ERROR_PARAMETER_INCORRECT; + } /* else */ +} + +/* + * cbc_link_layer_init - Initialize link layer. + * + * This function shall be called once during startup. + * It shall be the first function of this file to be called. + */ +void cbc_link_layer_init(struct cbc_memory_pool *memory) +{ + cvh_rx_queue_control.next_element = 0; + cvh_rx_queue_control.current_element = 0; + + memory_pool = memory; + + number_of_bytes_expected = 0; + number_of_bytes_skipped = 0; + ignore_all_skipped_bytes = 1; + last_rx_frame_valid = 1; + rx_sequence_counter = 0; + + cbc_buffer_queue_init(&tx_queue); + tx_sequence_counter = 0; + + cbc_frame_granularity = 4; + + mutex_init(&transmit_frame_mutex); +} + +/* + * cbc_core_on_receive_cbc_serial_data - Called on reception of data on UART. + * @length: Maximum size of data to retrieve in one go. + * @rx_buf: Pointer to buffer to populate. + * + * This function is called on reception of serial data. It extracts single CBC + * frames from the received data. If incomplete frames are received, it waits + * for more data. Buffers are added to a circular buffer rx_cvh_ring_message. + * + * Return number of bytes retrieved. + */ +u8 cbc_core_on_receive_cbc_serial_data(u8 length, const u8 *rx_buf) +{ + u8 number_of_bytes_accepted = 0; + u8 next_try_element = 0; + + while (length != 0) { + next_try_element = + (u8) ((cvh_rx_queue_control.next_element + 1U) + % CBC_MAX_RING_BUFFER_SIZE); + if (next_try_element != cvh_rx_queue_control.current_element) { + rx_cvh_ring_message[cvh_rx_queue_control.next_element] = + *rx_buf; + + cvh_rx_queue_control.next_element = next_try_element; + rx_buf++; /* next byte */ + length--; + number_of_bytes_accepted++; + } else { + /* Buffer is full, do not store additional bytes */ + return number_of_bytes_accepted; + } /* else */ + } /* while */ + return number_of_bytes_accepted; +} + +static void _cbc_link_layer_checksum(u8 *rx_cvh_frame, u8 frame_length, + struct cbc_buffer *buffer) +{ + u8 expected_checksum = 0U; + u8 checksum = 0U; + + checksum = rx_cvh_frame[frame_length - 1U]; + /* Check checksum is valid. */ + if (cbc_checksum_check(frame_length - 1U, + rx_cvh_frame, checksum, + &expected_checksum) + != CBC_OK) { + pr_err("cbc-core: Received CBC frame contains an invalid checksum\n"); + pr_err("cbc-core: found 0x%x expected 0x%x. frame discarded (length: %i), try to realign.\n", + checksum, + expected_checksum, + frame_length); + cbc_link_release_rx_data(1U); + number_of_bytes_skipped = 1; + last_rx_frame_valid = 0; + } else { + /* check the sequence counter */ + if ((rx_cvh_frame[1] + & CBC_SEQUENCE_COUNTER_WIDTH_MASK) + != rx_sequence_counter) { + pr_err("cbc-core: Found unexpected Rx sequence counter %i, expected %i\n", + rx_cvh_frame[1] + & 0x3, + rx_sequence_counter); + + /* + * Reset the sequence counter + * to the received value. + * + */ + rx_sequence_counter = + rx_cvh_frame[1] + & CBC_SEQUENCE_COUNTER_WIDTH_MASK; + } + + /* Increment seq. counter. */ + rx_sequence_counter++; + rx_sequence_counter &= + CBC_SEQUENCE_COUNTER_WIDTH_MASK; + + /* Forward frame to Mux. layer. */ + buffer->frame_length = frame_length; + cbc_mux_multiplexer_process_rx_buffer(buffer); + cbc_link_release_rx_data(frame_length); + last_rx_frame_valid = 1; + } + +} + +/* + * cbc_link_layer_rx_handler - Process data received on UART. + * + * Processes received serial data and parses for complete frames + */ +void cbc_link_layer_rx_handler(void) +{ + u8 bytes_avail = 0U; + u8 service_layer_frame_length = 0U; + u8 frame_length = 0U; + + struct cbc_buffer *buffer; + u8 *rx_cvh_frame; + + buffer = cbc_memory_pool_get_buffer(memory_pool); + + if (!buffer) { + pr_err("cbc-core: Out of memory.\n"); + return; + } + + bytes_avail = cbc_link_layer_get_stored_serial_data( + buffer->data, CBC_BUFFER_SIZE); + rx_cvh_frame = buffer->data; + + /* Wait for at least one frame (minimum size) */ + while ((bytes_avail >= 8U) && + (bytes_avail >= number_of_bytes_expected)) { + /* Check for start of frame */ + if (rx_cvh_frame[0] == CBC_SOF) { + /* Log skipped bytes if necessary */ + if (number_of_bytes_skipped > 0U) { + if (ignore_all_skipped_bytes == 0U) + pr_err("Skipped %d bytes.\n", + number_of_bytes_skipped); + + number_of_bytes_skipped = 0U; + ignore_all_skipped_bytes = 1U; + } + + service_layer_frame_length = (rx_cvh_frame[1] >> + CBC_FRAME_LENGTH_SHIFT) & + CBC_FRAME_LENGTH_WIDTH_MASK; + + frame_length = (service_layer_frame_length + 2U) * 4U; + + if (frame_length > CBC_MAX_TOTAL_FRAME_SIZE) { + pr_err("cbc: Received frame has illegal length (%u bytes).Frame discarded, try to realign.\n", + frame_length); + cbc_link_release_rx_data(1U); + number_of_bytes_skipped = 1U; + last_rx_frame_valid = 0U; + } else if (bytes_avail >= frame_length) { + /* ok */ + _cbc_link_layer_checksum(rx_cvh_frame, + frame_length, buffer); + number_of_bytes_expected = 0; + } else { + /* + * Wait for missing bytes to arrive, + * leave and try again. + */ + number_of_bytes_expected = frame_length; + } /* else */ + } else { + if (!(last_rx_frame_valid && (rx_cvh_frame[0] == + CBC_INTER_FRAME_FILL_BYTE))) + ignore_all_skipped_bytes = 0; + + /* + * No alignment found, + * skip current byte and try next one. + */ + cbc_link_release_rx_data(1U); + number_of_bytes_expected = 0; + ++number_of_bytes_skipped; + } /* else */ + + /* Process rx_buffer increases ref count, + * so always release here. + */ + cbc_buffer_release(buffer); + buffer = cbc_memory_pool_get_buffer(memory_pool); + + if (!buffer) { + pr_err("cbc-core: Out of memory.\n"); + rx_cvh_frame = NULL; + return; + } + bytes_avail = + cbc_link_layer_get_stored_serial_data( + buffer->data, CBC_BUFFER_SIZE); + rx_cvh_frame = buffer->data; + } /* while */ + + cbc_buffer_release(buffer); + +} + +/* + * cbc_link_layer_tx_handler - Triggers pending data transmission. + */ +enum cbc_error cbc_link_layer_tx_handler(void) +{ + cbc_link_layer_transmit_frame(); + + return CBC_OK; +} + +/* + * cbc_link_layer_assemble_buffer_for_transmission - Add frame to queue for + * transmission. + * @mux: CBC channel frame is associated with. + * @priority: Priority for frame. + * @buffer: Frame data. + * + * Generate a CBC frame from supplied data. + * Fills in CBC header details (adds start of frame identifier, frame length + * and channel. Also adds padding. Buffer is added to queue and transmission + * is triggered. + * + * Return CBC error code (CBC_OK if frame assembled successfully). + */ +enum cbc_error cbc_link_layer_assemble_buffer_for_transmission(u8 mux, + u8 priority, struct cbc_buffer *buffer) +{ + u8 frame_length_in_bytes; + enum cbc_error result = CBC_OK; + u32 i; + + if (!buffer) + return CBC_ERROR_NULL_POINTER_SUPPLIED; + + calculate_total_frame_length(buffer); + frame_length_in_bytes = buffer->frame_length; + + /* Fill in padding */ + for (i = buffer->payload_length + CBC_HEADER_SIZE; + i < frame_length_in_bytes; i++) + buffer->data[i] = 0xFF; + + /* Fill in cbc header. */ + buffer->data[0] = CBC_SOF; /* set start of frame byte */ + buffer->data[1] = ((((frame_length_in_bytes - 4U - 1U) / 4U) & + CBC_FRAME_LENGTH_WIDTH_MASK) << + CBC_FRAME_LENGTH_SHIFT); + buffer->data[2] = ((mux & CBC_MULTIPLEXER_WIDTH_MASK) << + CBC_MULTIPLEXER_SHIFT) | + (priority & CBC_PRIORITY_WIDTH_MASK); + + /* + * If transmission is done in a different thread, + * check for queue full first. + */ + cbc_buffer_queue_enqueue(&tx_queue, buffer); + cbc_buffer_increment_ref(buffer); + + /* Trigger transmission */ + cbc_link_layer_transmit_frame(); + + return result; +} diff --git a/drivers/tty/cbc/cbc_link_layer.h b/drivers/tty/cbc/cbc_link_layer.h new file mode 100644 index 000000000000..ba70a99b04e6 --- /dev/null +++ b/drivers/tty/cbc/cbc_link_layer.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_LINK_LAYER_H_ +#define CBC_LINK_LAYER_H_ + +#include "cbc_types.h" +#include "cbc_memory.h" + +void cbc_link_layer_init(struct cbc_memory_pool *memory); + +enum cbc_error cbc_link_layer_set_frame_granularity(u8 granularity); + +enum cbc_error cbc_link_layer_tx_handler(void); + +void cbc_link_layer_rx_handler(void); + +u8 cbc_core_on_receive_cbc_serial_data(u8 length, const u8 *rx_buf); + +enum cbc_error cbc_link_layer_assemble_frame_for_transmission(u8 mux, + u8 priority, u8 service_frame_length, + u8 const * const raw_buffer); + +enum cbc_error cbc_link_layer_assemble_buffer_for_transmission(u8 mux, + u8 priority, struct cbc_buffer *buffer); + +#endif /*CBC_LINK_LAYER_H_ */ + diff --git a/drivers/tty/cbc/cbc_memory.c b/drivers/tty/cbc/cbc_memory.c new file mode 100644 index 000000000000..69cf65e8901a --- /dev/null +++ b/drivers/tty/cbc/cbc_memory.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include + +#include "cbc_memory.h" + +bool cbc_memory_pool_try_free(struct cbc_memory_pool *pool) +{ + u32 i = 0; + bool allfree = true; + int tmp; + + if (!pool) + return 0; + + mutex_lock(&pool->lock); + + for (i = 0; i < (pool->num_blocks); i++) { + tmp = atomic_read(&pool->pool[i].refcount); + if (tmp > 0) { + pr_err("Buffer %i was not freed. (%i refs)\n", + i, tmp); + allfree = false; + } + } + + mutex_unlock(&pool->lock); + + if (allfree) + kfree(pool); + + return allfree; +} + +struct cbc_memory_pool *cbc_memory_pool_create(const u16 num_blocks) +{ + size_t size; + struct cbc_memory_pool *new_pool; + u32 i = 0; + + /* Check we have a valid queue length before we go any further. */ + BUILD_BUG_ON(CBC_QUEUE_LENGTH & (CBC_QUEUE_LENGTH - 1)); + + size = sizeof(struct cbc_memory_pool) + + (sizeof(struct cbc_buffer) * num_blocks); + + new_pool = kmalloc(size, GFP_KERNEL); + new_pool->num_blocks = num_blocks; + mutex_init(&new_pool->lock); + + for (i = 0; i < num_blocks; i++) + atomic_set(&new_pool->pool[i].refcount, 0); + + return new_pool; +} + +struct cbc_buffer *cbc_memory_pool_get_buffer(struct cbc_memory_pool *pool) +{ + u32 i = 0; + struct cbc_buffer *buffer = NULL; + int tmp; + + if (pool) { + mutex_lock(&pool->lock); + + for (; i < pool->num_blocks; i++) { + tmp = atomic_read(&pool->pool[i].refcount); + if (tmp == 0) { + atomic_inc_and_test(&pool->pool[i].refcount); + buffer = &pool->pool[i]; + buffer->payload_length = 0; + buffer->frame_length = 0; + break; + } + } + mutex_unlock(&pool->lock); + } + return buffer; +} + +void cbc_buffer_release(struct cbc_buffer *buffer) +{ + int tmp; + + if (!buffer) + return; + + atomic_read(&buffer->refcount); + + tmp = atomic_dec_return(&buffer->refcount); + if (tmp == 0) + memset(buffer->data, 0xCD, CBC_BUFFER_SIZE); + +} + +void cbc_buffer_increment_ref(struct cbc_buffer *buffer) +{ + if (buffer) + atomic_inc(&buffer->refcount); +} + +void cbc_buffer_queue_init(struct cbc_buffer_queue *queue) +{ + queue->write = 0; + queue->read = 0; +} + +int cbc_buffer_queue_enqueue(struct cbc_buffer_queue *queue, + struct cbc_buffer *buffer) +{ + if (!queue || !buffer) + return 0; + + if (queue->read + CBC_QUEUE_LENGTH == queue->write) { + pr_err("cbc buffer queue full\n"); + return 0; + } + + queue->queue[queue->write & CBC_QUEUE_BM] = buffer; + queue->write++; + return 1; +} + +struct cbc_buffer *cbc_buffer_queue_dequeue(struct cbc_buffer_queue *queue) +{ + struct cbc_buffer *buffer = NULL; + + if (!queue) + return buffer; + + if (queue->read == queue->write) { + pr_err("cbc buffer queue: dequeue while empty.\n"); + return buffer; + } + + buffer = queue->queue[queue->read & CBC_QUEUE_BM]; + queue->queue[queue->read & CBC_QUEUE_BM] = NULL; + queue->read++; + + return buffer; +} + diff --git a/drivers/tty/cbc/cbc_memory.h b/drivers/tty/cbc/cbc_memory.h new file mode 100644 index 000000000000..d6e34fcd48ca --- /dev/null +++ b/drivers/tty/cbc/cbc_memory.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_MEMORY_H_ +#define CBC_MEMORY_H_ + +#include +#include +#include + +#include "cbc_types.h" + +#define CBC_BUFFER_SIZE CBC_MAX_TOTAL_FRAME_SIZE + +/* + * struct cbc_buffer - Represents a single CBC frame buffer. + * @frame_length: Total length including headers and checksum. + * @payload_length: Length of payload without fill-bytes, including raw- + * header if present. + * @refcount: Reference count incremented/decremented when queuing + * de-queueing the buffer. + * @data: Contents of buffer. + */ +struct cbc_buffer { + u16 frame_length; + u16 payload_length; + atomic_t refcount; + u8 data[CBC_BUFFER_SIZE]; +}; + +/* + * struct cbc_memory_pool - Memory pool for cbc_buffer. + * @num_blocks: Number of blocks allocated (CBC queue length * maximum number of + * channels. + * lock: Mutex to lock memory operations. + * pool: The actual pool of CBC buffers. + * + * The cbc_memory_pool holds a number of cbc_buffers with reference counting. + */ +struct cbc_memory_pool { + u16 num_blocks; + struct mutex lock; + struct cbc_buffer pool[0]; +}; + +/* CBC queue length has to be a power of 2 */ +#define CBC_QUEUE_LENGTH 16 +#define CBC_QUEUE_BM (CBC_QUEUE_LENGTH - 1) + +/* + * cbc_buffer_queue - Circular buffer for cbc_buffer pointers. + * @queue: The queue of CBC buffer pointers. + * @write: Head of queue. + * @read: Tail of queue. + * + * Reference count handling is not done by this queue. + */ +struct cbc_buffer_queue { + struct cbc_buffer *queue[CBC_QUEUE_LENGTH]; + u8 write; + u8 read; +}; + +/* cbc_memory_pool_create - Create memory pool of CBC buffers. + * @num_blocks: Size of memory pool to create (based on queue size and number + * maximum of channels). + * + * Use kmalloc to create a new cbc_memory_pool with given number of cbc_buffers. + */ +struct cbc_memory_pool *cbc_memory_pool_create(const u16 num_blocks); + +/* + * cbc_memory_pool_try_free - Frees the pool if no buffer is in use. + * @pool: Pointer to memory pool. + * + * Ensure no new buffers are requested while calling this. + * + * Return: True if pool has been freed, false if not. + */ +bool cbc_memory_pool_try_free(struct cbc_memory_pool *pool); + +/* + * cbc_memory_pool_get_buffer - Returns a free CBC buffer if available. + * @pool: Pointer to memory pool. + * + * Return: Pointer to buffer is one is available, null otherwise. + */ +struct cbc_buffer *cbc_memory_pool_get_buffer(struct cbc_memory_pool *pool); + +/* + * cbc_buffer_release - Release CBC buffer (if not is use elsewhere). + * @buffer: Buffer to release. + * + * Decreases the reference count. A reference count of 0 marks this buffer as + * free. + */ +void cbc_buffer_release(struct cbc_buffer *buffer); + +/* + * cbc_buffer_increment_ref - Increases the reference count for a CBC buffer. + * @buffer: Buffer to increment ref count for. + */ +void cbc_buffer_increment_ref(struct cbc_buffer *buffer); + +/* + * cbc_buffer_queue_init - Initializes a cbc_buffer_queue. + * @queue: CBC buffer queue to initialise. + * + * Initialises head and tail. + */ +void cbc_buffer_queue_init(struct cbc_buffer_queue *queue); + +/* + * cbc_buffer_queue_enqueue - Add CBC buffer to a queue. + * @queue: CBC buffer queue. + * @buffer: Buffer to add. + * + * Enqueues a buffer into the queue. If the queue is full, + * the buffer will not be enqueued without any error. + * does not do reference count handling. + */ +int cbc_buffer_queue_enqueue(struct cbc_buffer_queue *queue, + struct cbc_buffer *buffer); + +/* + * cbc_buffer_queue_dequeue - Remove buffer from queue if not in use + * elsewhere. + * @queue: CBC buffer queue + * @buffer: Buffer to dequeue + * + * Dequeues a buffer. If queue is empty, null is returned. + * Does not do reference count handling + */ +struct cbc_buffer *cbc_buffer_queue_dequeue(struct cbc_buffer_queue *queue); + +#endif /* CBC_DEVICE_H_ */ diff --git a/drivers/tty/cbc/cbc_mux_multiplexer.c b/drivers/tty/cbc/cbc_mux_multiplexer.c new file mode 100644 index 000000000000..4439e34f5142 --- /dev/null +++ b/drivers/tty/cbc/cbc_mux_multiplexer.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include "cbc_link_layer.h" +#include "cbc_mux_multiplexer.h" + +static struct cbc_mux_channel_configuration *cbc_mux_configuration; + +void cbc_mux_multiplexer_setup(struct cbc_mux_channel_configuration *config) +{ + cbc_mux_configuration = config; +} + +/* + * cbc_mux_multiplexer_process_rx_buffer - Processes a received buffer. + * @cbc_buffer: CBC buffer to process. + * + * If the frame is valid, and the channel is valid, the contents of the CBC + * buffer are passed to the channel's receive function. + */ +void cbc_mux_multiplexer_process_rx_buffer(struct cbc_buffer *cbc_buffer) +{ + u8 mux_idx = 0U; + struct cbc_mux_channel_configuration *config; + + config = cbc_mux_configuration; + if (!cbc_buffer || cbc_buffer->frame_length < CBC_HEADER_SIZE) + return; + + mux_idx = (u8) ((cbc_buffer->data[2] >> CBC_MULTIPLEXER_SHIFT) + & CBC_MULTIPLEXER_WIDTH_MASK) & 0xFFU; + + if (config) { + struct cbc_mux_channel *channel; + + channel = &config->cbc_mux_channel_list[mux_idx]; + + if (channel) { + if (channel->buffer_receive) { + channel->buffer_receive(channel->data, + cbc_buffer); + } else if (channel->data_receive) { + channel->data_receive(channel->data, + cbc_buffer->payload_length, + &cbc_buffer->data[CBC_HEADER_SIZE]); + cbc_buffer_release(cbc_buffer); + } + } + + /* Send to debug device */ + channel = &config->cbc_mux_channel_list[CBC_CHANNEL_DEBUG_IN]; + if (channel && channel->buffer_receive) + channel->buffer_receive(channel->data, cbc_buffer); + } +} + +/* + * cbc_mux_multiplexer_transmit_buffer - Send a buffer. + * @channel_idx: Channel identifier. + * @buffer: CBC buffer to transmit. + * + * Assembles CBC buffer for transmission. + * + * Return: CBC Error. + */ +enum cbc_error cbc_mux_multiplexer_transmit_buffer( + enum cbc_channel_enumeration channel_idx, + struct cbc_buffer *cbc_buffer) +{ + enum cbc_error result = CBC_OK; + struct cbc_mux_channel *channel; + struct cbc_mux_channel_configuration *config; + + config = cbc_mux_configuration; + + /* + * Transmit will release the buffer, make sure a reference is held. + * until it is enqueued in the debug device. + */ + cbc_buffer_increment_ref(cbc_buffer); + + if (config) { + cbc_link_layer_assemble_buffer_for_transmission( + (u8) channel_idx, + config->cbc_mux_channel_list[channel_idx].priority, + cbc_buffer); + + /* Send to debug device */ + channel = &config->cbc_mux_channel_list[CBC_CHANNEL_DEBUG_OUT]; + if (channel && channel->buffer_receive) + channel->buffer_receive(channel->data, cbc_buffer); + } + + /* Send to debug device */ + channel = &config->cbc_mux_channel_list[CBC_CHANNEL_DEBUG_OUT]; + if (channel && channel->buffer_receive) + channel->buffer_receive(channel->data, cbc_buffer); + + cbc_buffer_release(cbc_buffer); + + return result; +} + +/* + * cbc_mux_multiplexer_set_priority - Set priority for specified channel. + * multiplexer. + * @channel_num: Channel ID. + * @new_priority: UNew priority value. + */ + +void cbc_mux_multiplexer_set_priority(u32 channel_num, u8 new_priority) +{ + struct cbc_mux_channel_configuration *config; + + config = cbc_mux_configuration; + if (config && (channel_num < CBC_CHANNEL_MAX_NUMBER)) + config->cbc_mux_channel_list[channel_num].priority = + new_priority; +} + +/* + * cbc_mux_multiplexer_get_priority - Get priority for specified channel. + * @channel_num: Channel ID. + * + * Return: Priority for this channel. + */ +u8 cbc_mux_multiplexer_get_priority(u32 channel_num) +{ + struct cbc_mux_channel_configuration *config; + + config = cbc_mux_configuration; + if (config && (channel_num < CBC_CHANNEL_MAX_NUMBER)) + return config->cbc_mux_channel_list[channel_num].priority; + return 0; +} diff --git a/drivers/tty/cbc/cbc_mux_multiplexer.h b/drivers/tty/cbc/cbc_mux_multiplexer.h new file mode 100644 index 000000000000..aa24777d6a13 --- /dev/null +++ b/drivers/tty/cbc/cbc_mux_multiplexer.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_MUX_MULTIPLEXER_H_ +#define CBC_MUX_MULTIPLEXER_H_ + +#include "cbc_types.h" +#include "cbc_memory.h" + +struct cbc_mux_channel { + void (*buffer_receive)(void *data, struct cbc_buffer *cbc_buffer); + void (*data_receive)(void *data, const u16 length, + const u8 * const buffer); + void *data; + u8 priority; +}; + +/* + * Channel configuration struct. + * + * Priorities can be set via ioctl or in sysfs. + * Recommended values for the priorities: + * - CBC_CHANNEL_PMT: 6 + * - CBC_CHANNEL_SYSTEM_CONTROL: 6 + * - CBC_CHANNEL_PROCESS_DATA: 3 + * - CBC_CHANNEL_DIAGNOSTICS: 3 + * - CBC_CHANNEL_SW_TRANSFER: 2 + * - CBC_CHANNEL_DEBUG: 6 + * - CBC_CHANNEL_LINDA: 6 + * - default: 3 + * + */ +struct cbc_mux_channel_configuration { + struct cbc_mux_channel cbc_mux_channel_list[CBC_CHANNEL_MAX_NUMBER]; +}; + +/* Pass configuration to Multiplexer. */ +void cbc_mux_multiplexer_setup(struct cbc_mux_channel_configuration *config); + +/* Process buffer received over UARYT via link layer. */ +void cbc_mux_multiplexer_process_rx_buffer(struct cbc_buffer *cbc_buffer); + +enum cbc_error cbc_mux_multiplexer_transmit_buffer( + enum cbc_channel_enumeration channel, + struct cbc_buffer *cbc_buffer); + +void cbc_mux_multiplexer_set_priority(u32 channel_num, u8 new_priority); + +u8 cbc_mux_multiplexer_get_priority(u32 channel_num); + +#endif /*CBC_MUX_MULTIPLEXER_H_ */ + diff --git a/drivers/tty/cbc/cbc_types.h b/drivers/tty/cbc/cbc_types.h new file mode 100644 index 000000000000..28d6877ff36e --- /dev/null +++ b/drivers/tty/cbc/cbc_types.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CBC line discipline kernel module. + * Handles Carrier Board Communications (CBC) protocol. + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef CBC_TYPES_H_ +#define CBC_TYPES_H_ + +#include + + +/* Start of frame indicator. */ +#define CBC_SOF 0x05 +/* Fill byte allowed between CBC frames */ +#define CBC_INTER_FRAME_FILL_BYTE 0xFF + +/*Width bit mask for priority in Mux. layer */ +#define CBC_PRIORITY_WIDTH_MASK GENMASK(2, 0) +/* Width bit mask for multiplexer in Mux layer */ +#define CBC_MULTIPLEXER_WIDTH_MASK GENMASK(4, 0) +/* Shift multiplexer in Mux layer */ +#define CBC_MULTIPLEXER_SHIFT 0x03 + +/* Width bit mask for sequence counter in link layer */ +#define CBC_SEQUENCE_COUNTER_WIDTH_MASK GENMASK(1, 0) +/* Width bit mask for frame length link layer */ +#define CBC_FRAME_LENGTH_WIDTH_MASK GENMASK(4, 0) +/* Maximum possible frame size that can be specified + * with the IAS_CBC_FRAME_LENGTH_WIDTH_MASK + */ +#define CBC_MAX_POSSIBLE_FRAME_SIZE ((CBC_FRAME_LENGTH_WIDTH_MASK + 2) * 4) +/* Frame shift length in link layer */ +#define CBC_FRAME_LENGTH_SHIFT 0x02 + +#define CBC_HEADER_SIZE 0x03 +#define CBC_RAWHEADER_SIZE 0x03 +#define CBC_CHECKSUM_SIZE 1 + +/* + * Maximum size of a CBC frame. This includes the + * IAS_CBC_MAX_SERVICE_FRAME_SIZE, 4 bytes of CBC protocol + * overhead and up to 28 additional bytes for padding to + * 32 byte granularity. + */ +#define CBC_MAX_TOTAL_FRAME_SIZE 96 + +/* Enumeration of supported CBC channels */ +enum cbc_channel_enumeration { + CBC_CHANNEL_PMT = 0, + CBC_CHANNEL_LIFECYCLE = 1, + CBC_CHANNEL_SIGNALS = 2, + CBC_CHANNEL_EARLY_SIGNALS = 3, + CBC_CHANNEL_DIAGNOSIS = 4, + CBC_CHANNEL_DLT = 5, + CBC_CHANNEL_LINDA = 6, + CBC_CHANNEL_OEM_RAW_CHANNEL_0 = 7, + CBC_CHANNEL_OEM_RAW_CHANNEL_1 = 8, + CBC_CHANNEL_OEM_RAW_CHANNEL_2 = 9, + CBC_CHANNEL_OEM_RAW_CHANNEL_3 = 10, + CBC_CHANNEL_OEM_RAW_CHANNEL_4 = 11, + CBC_CHANNEL_OEM_RAW_CHANNEL_5 = 12, + CBC_CHANNEL_OEM_RAW_CHANNEL_6 = 13, + CBC_CHANNEL_OEM_RAW_CHANNEL_7 = 14, + CBC_CHANNEL_OEM_RAW_CHANNEL_8 = 15, + CBC_CHANNEL_OEM_RAW_CHANNEL_9 = 16, + CBC_CHANNEL_OEM_RAW_CHANNEL_10 = 17, + CBC_CHANNEL_OEM_RAW_CHANNEL_11 = 18, + CBC_CHANNEL_DEBUG_OUT = 19, + CBC_CHANNEL_DEBUG_IN = 20, + CBC_CHANNEL_MAX_NUMBER = 21 +}; + +/* + * CBC load monitoring (transmit/receive throughput, errors etc.) can + * be compiled in using the following define. + */ + +/* Enumeration containing available errors */ +enum cbc_error { + CBC_OK = 0, + CBC_ERROR_QUEUE_UNINITIALIZED = 1, + CBC_ERROR_QUEUE_FULL = 2, + CBC_ERROR_QUEUE_EMPTY = 3, + CBC_ERROR_PARAMETER_INCORRECT = 4, + CBC_ERROR_NULL_POINTER_SUPPLIED = 5, + CBC_ERROR_CHECKSUM_MISMATCH = 6, + CBC_ERROR_UNKNOWN_CHANNEL = 7, + CBC_ERROR_OUT_OF_QUEUE_MEMORY = 8, + CBC_ERROR_NO_DATA_IN_QUEUE_MEMORY = 9, + CBC_ERROR_NOT_PROCESSED = 10, + CBC_ERROR_TP_FRAME_NOT_SUPPORTED = 11, + CBC_ERROR_TP_FRAME_NOT_EXPECTED = 12, + CBC_ERROR_BUSY_TRY_AGAIN = 13, + CBC_ERROR_TEC = 14, + CBC_ERROR_UNKNOWN_PERIPHERAL_ID = 15, + CBC_ERROR_HW_NO_WRITE_ACCESS = 16, + CBC_ERROR_HW_NO_READ_ACCESS = 17, + CBC_ERROR_NOT_IMPLEMENTED = 18, + CBC_ERROR_GENERAL_ERROR = 19, + CBC_ERROR_UDP_GET_ADR = 20, + CBC_ERROR_UDP_OPEN_SOCKET = 21, + CBC_ERROR_UDP_CONNECTION_REFUSED = 22, + CBC_ERROR_UDP_CLOSE_INVALID_ID = 23, + CBC_ERROR_UDP_CLOSE_ERR = 24, + CBC_ERROR_DTC_LIST_EMPTY = 25, + CBC_ERROR_INCORRECT_VERSION = 26, + CBC_ERROR_POWER_SUPPLY_ERROR = 27, + CBC_ERROR_PARAMETER_INVALID = 28, + E_CBC_ERROR_NOT_INITIALIZED = 29, + CBC_ERROR_NUMBER_OF_ERRORS = 30, + CBC_ERROR_CUSTOMER_IMPLEMENTATION_MISSING = 31 +}; + +/* + * Maximum number of CBC frames transmitted in each cyclic call of the CBC + * core. + */ +# define CBC_MAX_FRAME_TRANSMISSION_NUMBER 50 + +/* + * Enumeration indicating whether raw channel uses protocol or handles raw + * data. + */ +enum cbc_service_raw_channel_svc { + CBC_RAW_CHANNEL_USE_TRANSPORT_PROTOCOL = 1, + CBC_RAW_CHANNEL_DIRECT_TRANSPORT = 2 +} +; +#endif /* CBC_TYPES_H_ */ diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index 381e981dee06..85a500ddbcaa 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -442,6 +442,7 @@ static int goldfish_tty_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE static void gf_early_console_putchar(struct uart_port *port, int ch) { __raw_writel(ch, port->membase); @@ -465,6 +466,7 @@ static int __init gf_earlycon_setup(struct earlycon_device *device, } OF_EARLYCON_DECLARE(early_gf_tty, "google,goldfish-tty", gf_earlycon_setup); +#endif static const struct of_device_id goldfish_tty_of_match[] = { { .compatible = "google,goldfish-tty", }, diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 16331a90c1e8..9da8474fe50a 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -332,7 +332,6 @@ static void udbg_init_opal_common(void) udbg_putc = udbg_opal_putc; udbg_getc = udbg_opal_getc; udbg_getc_poll = udbg_opal_getc_poll; - tb_ticks_per_usec = 0x200; /* Make udelay not suck */ } void __init hvc_opal_init_early(void) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 0a3c9665e015..f46bd1af7a10 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -133,6 +133,9 @@ struct gsm_dlci { struct mutex mutex; /* Link layer */ + int mode; +#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */ +#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */ spinlock_t lock; /* Protects the internal state */ struct timer_list t1; /* Retransmit timer for SABM and UA */ int retries; @@ -1376,7 +1379,13 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, ctrl->data = data; ctrl->len = clen; gsm->pending_cmd = ctrl; - gsm->cretries = gsm->n2; + + /* If DLCI0 is in ADM mode skip retries, it won't respond */ + if (gsm->dlci[0]->mode == DLCI_MODE_ADM) + gsm->cretries = 1; + else + gsm->cretries = gsm->n2; + mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); gsm_control_transmit(gsm, ctrl); spin_unlock_irqrestore(&gsm->control_lock, flags); @@ -1463,6 +1472,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) + * + * Some control dlci can stay in ADM mode with other dlci working just + * fine. In that case we can just keep the control dlci open after the + * DLCI_OPENING retries time out. */ static void gsm_dlci_t1(unsigned long data) @@ -1476,8 +1489,16 @@ static void gsm_dlci_t1(unsigned long data) if (dlci->retries) { gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); - } else + } else if (!dlci->addr && gsm->control == (DM | PF)) { + if (debug & 8) + pr_info("DLCI %d opening in ADM mode.\n", + dlci->addr); + dlci->mode = DLCI_MODE_ADM; + gsm_dlci_open(dlci); + } else { gsm_dlci_close(dlci); + } + break; case DLCI_CLOSING: dlci->retries--; @@ -1495,8 +1516,8 @@ static void gsm_dlci_t1(unsigned long data) * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages - * to the modem which should then reply with a UA, at which point we - * will move into open state. Opening is done asynchronously with retry + * to the modem which should then reply with a UA or ADM, at which point + * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. */ @@ -2864,11 +2885,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) static int gsm_carrier_raised(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); + struct gsm_mux *gsm = dlci->gsm; + /* Not yet open so no carrier info */ if (dlci->state != DLCI_OPEN) return 0; if (debug & 2) return 1; + + /* + * Basic mode with control channel in ADM mode may not respond + * to CMD_MSC at all and modem_rx is empty. + */ + if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM && + !dlci->modem_rx) + return 1; + return dlci->modem_rx & TIOCM_CD; } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index bdf0e6e89991..0475f9685a41 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -126,6 +126,8 @@ struct n_tty_data { struct mutex output_lock; }; +#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1)) + static inline size_t read_cnt(struct n_tty_data *ldata) { return ldata->read_head - ldata->read_tail; @@ -143,6 +145,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i) static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) { + smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */ return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } @@ -318,9 +321,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; - ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; ldata->commit_head = 0; - ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; @@ -619,12 +620,19 @@ static size_t __process_echoes(struct tty_struct *tty) old_space = space = tty_write_room(tty); tail = ldata->echo_tail; - while (ldata->echo_commit != tail) { + while (MASK(ldata->echo_commit) != MASK(tail)) { c = echo_buf(ldata, tail); if (c == ECHO_OP_START) { unsigned char op; int no_space_left = 0; + /* + * Since add_echo_byte() is called without holding + * output_lock, we might see only portion of multi-byte + * operation. + */ + if (MASK(ldata->echo_commit) == MASK(tail + 1)) + goto not_yet_stored; /* * If the buffer byte is the start of a multi-byte * operation, get the next byte, which is either the @@ -636,6 +644,8 @@ static size_t __process_echoes(struct tty_struct *tty) unsigned int num_chars, num_bs; case ECHO_OP_ERASE_TAB: + if (MASK(ldata->echo_commit) == MASK(tail + 2)) + goto not_yet_stored; num_chars = echo_buf(ldata, tail + 2); /* @@ -730,7 +740,8 @@ static size_t __process_echoes(struct tty_struct *tty) /* If the echo buffer is nearly full (so that the possibility exists * of echo overrun before the next commit), then discard enough * data at the tail to prevent a subsequent overrun */ - while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { + while (ldata->echo_commit > tail && + ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; @@ -740,6 +751,7 @@ static size_t __process_echoes(struct tty_struct *tty) tail++; } + not_yet_stored: ldata->echo_tail = tail; return old_space - space; } @@ -750,6 +762,7 @@ static void commit_echoes(struct tty_struct *tty) size_t nr, old, echoed; size_t head; + mutex_lock(&ldata->output_lock); head = ldata->echo_head; ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; @@ -758,10 +771,12 @@ static void commit_echoes(struct tty_struct *tty) * is over the threshold (and try again each time another * block is accumulated) */ nr = head - ldata->echo_tail; - if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) + if (nr < ECHO_COMMIT_WATERMARK || + (nr % ECHO_BLOCK > old % ECHO_BLOCK)) { + mutex_unlock(&ldata->output_lock); return; + } - mutex_lock(&ldata->output_lock); ldata->echo_commit = head; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); @@ -812,7 +827,9 @@ static void flush_echoes(struct tty_struct *tty) static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) { - *echo_buf_addr(ldata, ldata->echo_head++) = c; + *echo_buf_addr(ldata, ldata->echo_head) = c; + smp_wmb(); /* Matches smp_rmb() in echo_buf(). */ + ldata->echo_head++; } /** @@ -980,14 +997,15 @@ static void eraser(unsigned char c, struct tty_struct *tty) } seen_alnums = 0; - while (ldata->read_head != ldata->canon_head) { + while (MASK(ldata->read_head) != MASK(ldata->canon_head)) { head = ldata->read_head; /* erase a single possibly multibyte character */ do { head--; c = read_buf(ldata, head); - } while (is_continuation(c, tty) && head != ldata->canon_head); + } while (is_continuation(c, tty) && + MASK(head) != MASK(ldata->canon_head)); /* do not partially erase */ if (is_continuation(c, tty)) @@ -1029,7 +1047,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) * This info is used to go back the correct * number of columns. */ - while (tail != ldata->canon_head) { + while (MASK(tail) != MASK(ldata->canon_head)) { tail--; c = read_buf(ldata, tail); if (c == '\t') { @@ -1304,7 +1322,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) finish_erasing(ldata); echo_char(c, tty); echo_char_raw('\n', ldata); - while (tail != ldata->read_head) { + while (MASK(tail) != MASK(ldata->read_head)) { echo_char(read_buf(ldata, tail), tty); tail++; } @@ -1764,7 +1782,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; - if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { + if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->read_tail; if (!L_ICANON(tty) || !read_cnt(ldata)) { @@ -1880,30 +1898,21 @@ static int n_tty_open(struct tty_struct *tty) struct n_tty_data *ldata; /* Currently a malloc failure here can panic */ - ldata = vmalloc(sizeof(*ldata)); + ldata = vzalloc(sizeof(*ldata)); if (!ldata) - goto err; + return -ENOMEM; ldata->overrun_time = jiffies; mutex_init(&ldata->atomic_read_lock); mutex_init(&ldata->output_lock); tty->disc_data = ldata; - reset_buffer_flags(tty->disc_data); - ldata->column = 0; - ldata->canon_column = 0; - ldata->num_overrun = 0; - ldata->no_room = 0; - ldata->lnext = 0; tty->closing = 0; /* indicate buffer work may resume */ clear_bit(TTY_LDISC_HALTED, &tty->flags); n_tty_set_termios(tty, NULL); tty_unthrottle(tty); - return 0; -err: - return -ENOMEM; } static inline int input_available_p(struct tty_struct *tty, int poll) @@ -2182,6 +2191,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, } if (tty_hung_up_p(file)) break; + /* + * Abort readers for ttys which never actually + * get hung up. See __tty_hangup(). + */ + if (test_bit(TTY_HUPPING, &tty->flags)) + break; if (!timeout) break; if (file->f_flags & O_NONBLOCK) { @@ -2407,7 +2422,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata) tail = ldata->read_tail; nr = head - tail; /* Skip EOF-chars.. */ - while (head != tail) { + while (MASK(head) != MASK(tail)) { if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && read_buf(ldata, tail) == __DISABLED_CHAR) nr--; @@ -2427,7 +2442,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file, return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); - if (L_ICANON(tty)) + if (L_ICANON(tty) && !L_EXTPROC(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 64338442050e..899e8fe5e00f 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -110,16 +110,19 @@ static void pty_unthrottle(struct tty_struct *tty) static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; + unsigned long flags; if (tty->stopped) return 0; if (c > 0) { + spin_lock_irqsave(&to->port->lock, flags); /* Stuff the data into the input queue of the other end */ c = tty_insert_flip_string(to->port, buf, c); /* And shovel */ if (c) tty_flip_buffer_push(to->port); + spin_unlock_irqrestore(&to->port->lock, flags); } return c; } diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index c68fb3a8ea1c..ae2564ecddcd 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -65,21 +65,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env) */ int serdev_device_add(struct serdev_device *serdev) { + struct serdev_controller *ctrl = serdev->ctrl; struct device *parent = serdev->dev.parent; int err; dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr); + /* Only a single slave device is currently supported. */ + if (ctrl->serdev) { + dev_err(&serdev->dev, "controller busy\n"); + return -EBUSY; + } + ctrl->serdev = serdev; + err = device_add(&serdev->dev); if (err < 0) { dev_err(&serdev->dev, "Can't add %s, status %d\n", dev_name(&serdev->dev), err); - goto err_device_add; + goto err_clear_serdev; } dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev)); -err_device_add: + return 0; + +err_clear_serdev: + ctrl->serdev = NULL; return err; } EXPORT_SYMBOL_GPL(serdev_device_add); @@ -90,7 +101,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add); */ void serdev_device_remove(struct serdev_device *serdev) { + struct serdev_controller *ctrl = serdev->ctrl; + device_unregister(&serdev->dev); + ctrl->serdev = NULL; } EXPORT_SYMBOL_GPL(serdev_device_remove); @@ -295,7 +309,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl) return NULL; serdev->ctrl = ctrl; - ctrl->serdev = serdev; device_initialize(&serdev->dev); serdev->dev.parent = &ctrl->dev; serdev->dev.bus = &serdev_bus_type; @@ -469,6 +482,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register); static void __exit serdev_exit(void) { bus_unregister(&serdev_bus_type); + ida_destroy(&ctrl_ida); } module_exit(serdev_exit); diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 302018d67efa..69fc6d9ab490 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -35,23 +35,41 @@ static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp, { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); + int ret; if (!test_bit(SERPORT_ACTIVE, &serport->flags)) return 0; - return serdev_controller_receive_buf(ctrl, cp, count); + ret = serdev_controller_receive_buf(ctrl, cp, count); + + dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count, + "receive_buf returns %d (count = %zu)\n", + ret, count); + if (ret < 0) + return 0; + else if (ret > count) + return count; + + return ret; } static void ttyport_write_wakeup(struct tty_port *port) { struct serdev_controller *ctrl = port->client_data; struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty; + + tty = tty_port_tty_get(port); + if (!tty) + return; - if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &port->tty->flags) && + if (test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && test_bit(SERPORT_ACTIVE, &serport->flags)) serdev_controller_write_wakeup(ctrl); - wake_up_interruptible_poll(&port->tty->write_wait, POLLOUT); + wake_up_interruptible_poll(&tty->write_wait, POLLOUT); + + tty_kref_put(tty); } static const struct tty_port_client_operations client_ops = { @@ -102,10 +120,10 @@ static int ttyport_open(struct serdev_controller *ctrl) return PTR_ERR(tty); serport->tty = tty; - if (tty->ops->open) - tty->ops->open(serport->tty, NULL); - else - tty_port_open(serport->port, tty, NULL); + if (!tty->ops->open) + goto err_unlock; + + tty->ops->open(serport->tty, NULL); /* Bring the UART into a known 8 bits no parity hw fc state */ ktermios = tty->termios; @@ -122,6 +140,12 @@ static int ttyport_open(struct serdev_controller *ctrl) tty_unlock(serport->tty); return 0; + +err_unlock: + tty_unlock(tty); + tty_release_struct(tty, serport->tty_idx); + + return -ENODEV; } static void ttyport_close(struct serdev_controller *ctrl) @@ -131,8 +155,10 @@ static void ttyport_close(struct serdev_controller *ctrl) clear_bit(SERPORT_ACTIVE, &serport->flags); + tty_lock(tty); if (tty->ops->close) tty->ops->close(tty, NULL); + tty_unlock(tty); tty_release_struct(tty, serport->tty_idx); } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index d29b512a7d9f..1dfe18928b07 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -619,6 +619,14 @@ static int univ8250_console_setup(struct console *co, char *options) return retval; } +static void univ8250_console_exit(struct console *co) +{ + struct uart_port *port; + + port = &serial8250_ports[co->index].port; + serial8250_console_exit(port); +} + /** * univ8250_console_match - non-standard console matching * @co: registering console @@ -677,6 +685,7 @@ static struct console univ8250_console = { .write = univ8250_console_write, .device = uart_console_device, .setup = univ8250_console_setup, + .exit = univ8250_console_exit, .match = univ8250_console_match, .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, @@ -835,7 +844,6 @@ static int serial8250_probe(struct platform_device *dev) uart.port.set_termios = p->set_termios; uart.port.set_ldisc = p->set_ldisc; uart.port.get_mctrl = p->get_mctrl; - uart.port.pm = p->pm; uart.port.dev = &dev->dev; uart.port.irqflags |= irqflag; ret = serial8250_register_8250_port(&uart); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 7e638997bfc2..4a7b36ec8e53 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -24,9 +24,11 @@ #include #include #include +#include #include #include #include +#include #include @@ -240,18 +242,6 @@ static int dw8250_handle_irq(struct uart_port *p) return 0; } -static void -dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) -{ - if (!state) - pm_runtime_get_sync(port->dev); - - serial8250_do_pm(port, state, old); - - if (state) - pm_runtime_put_sync_suspend(port->dev); -} - static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old) { @@ -260,7 +250,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, long rate; int ret; - if (IS_ERR(d->clk) || !old) + if (IS_ERR(d->clk)) goto out; clk_disable_unprepare(d->clk); @@ -408,10 +398,10 @@ static void dw8250_setup_port(struct uart_port *p) static int dw8250_probe(struct platform_device *pdev) { - struct uart_8250_port uart = {}; + struct uart_8250_port uart = {}, *up = &uart; struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); - struct uart_port *p = &uart.port; + struct uart_port *p = &up->port; struct device *dev = &pdev->dev; struct dw8250_data *data; int err; @@ -432,7 +422,6 @@ static int dw8250_probe(struct platform_device *pdev) p->mapbase = regs->start; p->irq = irq; p->handle_irq = dw8250_handle_irq; - p->pm = dw8250_do_pm; p->type = PORT_8250; p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; p->dev = dev; @@ -513,7 +502,8 @@ static int dw8250_probe(struct platform_device *pdev) /* If no clock rate is defined, fail. */ if (!p->uartclk) { dev_err(dev, "clock rate not defined\n"); - return -EINVAL; + err = -EINVAL; + goto err_clk; } data->pclk = devm_clk_get(dev, "apb_pclk"); @@ -549,10 +539,10 @@ static int dw8250_probe(struct platform_device *pdev) if (p->fifosize) { data->dma.rxconf.src_maxburst = p->fifosize / 4; data->dma.txconf.dst_maxburst = p->fifosize / 4; - uart.dma = &data->dma; + up->dma = &data->dma; } - data->line = serial8250_register_8250_port(&uart); + data->line = serial8250_register_8250_port(up); if (data->line < 0) { err = data->line; goto err_reset; @@ -560,6 +550,9 @@ static int dw8250_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, -1); + pm_runtime_set_active(dev); pm_runtime_enable(dev); @@ -582,8 +575,9 @@ static int dw8250_probe(struct platform_device *pdev) static int dw8250_remove(struct platform_device *pdev) { struct dw8250_data *data = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; - pm_runtime_get_sync(&pdev->dev); + pm_runtime_get_sync(dev); serial8250_unregister_port(data->line); @@ -595,8 +589,8 @@ static int dw8250_remove(struct platform_device *pdev) if (!IS_ERR(data->clk)) clk_disable_unprepare(data->clk); - pm_runtime_disable(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); return 0; } @@ -606,6 +600,20 @@ static int dw8250_suspend(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); + /* + * FIXME: For Platforms with LPSS PCI UARTs, the parent device should + * be prevented from going into D3 for the no_console_suspend flag to + * work as expected. + */ + if (platform_get_resource_byname(to_platform_device(dev), + IORESOURCE_MEM, "lpss_dev")) { + struct uart_8250_port *up = serial8250_get_port(data->line); + struct pci_dev *pdev = to_pci_dev(dev->parent); + + if (pdev && !console_suspend_enabled && uart_console(&up->port)) + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + } + serial8250_suspend_port(data->line); return 0; @@ -671,6 +679,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { { "APMC0D08", 0}, { "AMD0020", 0 }, { "AMDI0020", 0 }, + { "BRCM2032", 0 }, { "HISI0031", 0 }, { }, }; diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c index af72ec32e404..f135c1846477 100644 --- a/drivers/tty/serial/8250/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c @@ -125,12 +125,14 @@ static void __init init_port(struct earlycon_device *device) serial8250_early_out(port, UART_FCR, 0); /* no fifo */ serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */ - divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); - c = serial8250_early_in(port, UART_LCR); - serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); - serial8250_early_out(port, UART_DLL, divisor & 0xff); - serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); - serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + if (port->uartclk && device->baud) { + divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); + c = serial8250_early_in(port, UART_LCR); + serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); + serial8250_early_out(port, UART_DLL, divisor & 0xff); + serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); + serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); + } } int __init early_serial8250_setup(struct earlycon_device *device, diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index c55624703fdf..411b4b03457b 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -37,6 +37,7 @@ #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 +#define UART_EXAR_INT0 0x80 #define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */ #define UART_EXAR_FCTR 0x08 /* Feature Control Register */ @@ -124,6 +125,7 @@ struct exar8250_board { struct exar8250 { unsigned int nr; struct exar8250_board *board; + void __iomem *virt; int line[0]; }; @@ -134,12 +136,9 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev, const struct exar8250_board *board = priv->board; unsigned int bar = 0; - if (!pcim_iomap_table(pcidev)[bar] && !pcim_iomap(pcidev, bar, 0)) - return -ENOMEM; - port->port.iotype = UPIO_MEM; port->port.mapbase = pci_resource_start(pcidev, bar) + offset; - port->port.membase = pcim_iomap_table(pcidev)[bar] + offset; + port->port.membase = priv->virt + offset; port->port.regshift = board->reg_shift; return 0; @@ -423,6 +422,29 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev) port->port.private_data = NULL; } +/* + * These Exar UARTs have an extra interrupt indicator that could fire for a + * few interrupts that are not presented/cleared through IIR. One of which is + * a wakeup interrupt when coming out of sleep. These interrupts are only + * cleared by reading global INT0 or INT1 registers as interrupts are + * associated with channel 0. The INT[3:0] registers _are_ accessible from each + * channel's address space, but for the sake of bus efficiency we register a + * dedicated handler at the PCI device level to handle them. + */ +static irqreturn_t exar_misc_handler(int irq, void *data) +{ + struct exar8250 *priv = data; + + /* Clear all PCI interrupts by reading INT0. No effect on IIR */ + readb(priv->virt + UART_EXAR_INT0); + + /* Clear INT0 for Expansion Interface slave ports, too */ + if (priv->board->num_ports > 8) + readb(priv->virt + 0x2000 + UART_EXAR_INT0); + + return IRQ_HANDLED; +} + static int exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) { @@ -451,6 +473,9 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) return -ENOMEM; priv->board = board; + priv->virt = pcim_iomap(pcidev, bar, 0); + if (!priv->virt) + return -ENOMEM; pci_set_master(pcidev); @@ -464,6 +489,11 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) uart.port.irq = pci_irq_vector(pcidev, 0); uart.port.dev = &pcidev->dev; + rc = devm_request_irq(&pcidev->dev, uart.port.irq, exar_misc_handler, + IRQF_SHARED, "exar_uart", priv); + if (rc) + return rc; + for (i = 0; i < nr_ports && i < maxnr; i++) { rc = board->setup(priv, pcidev, &uart, i); if (rc) { diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index e500f7dd2470..ba4af5434b91 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -118,6 +118,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key) if (!request_muxed_region(base_port, 2, "8250_fintek")) return -EBUSY; + /* Force to deactive all SuperIO in this base_port */ + outb(EXIT_KEY, base_port + ADDR_PORT); + outb(key, base_port + ADDR_PORT); outb(key, base_port + ADDR_PORT); return 0; @@ -208,7 +211,7 @@ static int fintek_8250_rs485_config(struct uart_port *port, if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) - rs485->flags &= SER_RS485_ENABLED; + rs485->flags &= ~SER_RS485_ENABLED; else config |= RS485_URA; diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index fb45770d47aa..38d0596f70d5 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -140,18 +140,6 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev) return 0; } -static void -mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) -{ - if (!state) - pm_runtime_get_sync(port->dev); - - serial8250_do_pm(port, state, old); - - if (state) - pm_runtime_put_sync_suspend(port->dev); -} - static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, struct mtk8250_data *data) { @@ -206,7 +194,6 @@ static int mtk8250_probe(struct platform_device *pdev) spin_lock_init(&uart.port.lock); uart.port.mapbase = regs->start; uart.port.irq = irq->start; - uart.port.pm = mtk8250_do_pm; uart.port.type = PORT_16550; uart.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; uart.port.dev = &pdev->dev; diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 1222c005fb98..3613a6aabfb3 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -141,8 +141,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev, } info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); - if (IS_ERR(info->rst)) + if (IS_ERR(info->rst)) { + ret = PTR_ERR(info->rst); goto err_dispose; + } + ret = reset_control_deassert(info->rst); if (ret) goto err_dispose; @@ -318,6 +321,7 @@ static const struct of_device_id of_platform_serial_table[] = { { .compatible = "mrvl,mmp-uart", .data = (void *)PORT_XSCALE, }, { .compatible = "ti,da830-uart", .data = (void *)PORT_DA830, }, + { .compatible = "nuvoton,npcm750-uart", .data = (void *)PORT_NPCM, }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, of_platform_serial_table); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 833771bca0a5..3ef6a3a0c74a 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -483,28 +483,6 @@ static void omap_8250_set_termios(struct uart_port *port, tty_termios_encode_baud_rate(termios, baud, baud); } -/* same as 8250 except that we may have extra flow bits set in EFR */ -static void omap_8250_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct uart_8250_port *up = up_to_u8250p(port); - u8 efr; - - pm_runtime_get_sync(port->dev); - serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - efr = serial_in(up, UART_EFR); - serial_out(up, UART_EFR, efr | UART_EFR_ECB); - serial_out(up, UART_LCR, 0); - - serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); - serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - serial_out(up, UART_EFR, efr); - serial_out(up, UART_LCR, 0); - - pm_runtime_mark_last_busy(port->dev); - pm_runtime_put_autosuspend(port->dev); -} - static void omap_serial_fill_features_erratas(struct uart_8250_port *up, struct omap8250_priv *priv) { @@ -1100,13 +1078,14 @@ static int omap8250_no_handle_irq(struct uart_port *port) return 0; } +static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE; static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE; static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE; static const struct of_device_id omap8250_dt_ids[] = { { .compatible = "ti,omap2-uart" }, { .compatible = "ti,omap3-uart" }, - { .compatible = "ti,omap4-uart" }, + { .compatible = "ti,omap4-uart", .data = &omap4_habit, }, { .compatible = "ti,am3352-uart", .data = &am3352_habit, }, { .compatible = "ti,am4372-uart", .data = &am3352_habit, }, { .compatible = "ti,dra742-uart", .data = &dra742_habit, }, @@ -1171,7 +1150,6 @@ static int omap8250_probe(struct platform_device *pdev) #endif up.port.set_termios = omap_8250_set_termios; up.port.set_mctrl = omap8250_set_mctrl; - up.port.pm = omap_8250_pm; up.port.startup = omap_8250_startup; up.port.shutdown = omap_8250_shutdown; up.port.throttle = omap_8250_throttle; @@ -1343,6 +1321,19 @@ static int omap8250_soft_reset(struct device *dev) int sysc; int syss; + /* + * At least on omap4, unused uarts may not idle after reset without + * a basic scr dma configuration even with no dma in use. The + * module clkctrl status bits will be 1 instead of 3 blocking idle + * for the whole clockdomain. The softreset below will clear scr, + * and we restore it on resume so this is safe to do on all SoCs + * needing omap8250_soft_reset() quirk. Do it in two writes as + * recommended in the comment for omap8250_update_scr(). + */ + serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1); + serial_out(up, UART_OMAP_SCR, + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL); + sysc = serial_in(up, UART_OMAP_SYSC); /* softreset the UART */ diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 0c101a7470b0..4986b4aebe80 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -3345,9 +3345,7 @@ static const struct pci_device_id blacklist[] = { /* multi-io cards handled by parport_serial */ { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ - { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */ { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ - { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */ /* Moxa Smartio MUE boards handled by 8250_moxa */ { PCI_VDEVICE(MOXA, 0x1024), }, @@ -3389,11 +3387,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev) /* * If it is not a communications device or the programming * interface is greater than 6, give up. - * - * (Should we try to make guesses for multiport serial devices - * later?) */ if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && + ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) && ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || (dev->class & 0xff) > 6) return -ENODEV; @@ -3430,6 +3426,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) { int num_iomem, num_port, first_port = -1, i; + /* + * Should we try to make guesses for multiport serial devices later? + */ + if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL) + return -ENODEV; + num_iomem = num_port = 0; for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { if (pci_resource_flags(dev, i) & IORESOURCE_IO) { @@ -4700,6 +4702,17 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, + /* + * BrainBoxes UC-260 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D21, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0E34, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, /* * Perle PCI-RAS cards */ @@ -5137,6 +5150,9 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, + /* Amazon PCI serial device */ + { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 }, + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index f0cc04f62b67..75b53427f087 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -51,6 +51,10 @@ #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ #define UART_EXAR_DVID 0x8d /* Device identification */ +/* Nuvoton NPCM timeout register */ +#define UART_NPCM_TOR 7 +#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */ + /* * Debugging. */ @@ -90,8 +94,7 @@ static const struct serial8250_config uart_config[] = { .name = "16550A", .fifo_size = 16, .tx_loadsz = 16, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | - UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, .rxtrig_bytes = {1, 4, 8, 14}, .flags = UART_CAP_FIFO, }, @@ -297,6 +300,15 @@ static const struct serial8250_config uart_config[] = { UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, .flags = UART_CAP_FIFO, }, + [PORT_NPCM] = { + .name = "Nuvoton 16550", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 | + UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ @@ -445,7 +457,6 @@ static void io_serial_out(struct uart_port *p, int offset, int value) } static int serial8250_default_handle_irq(struct uart_port *port); -static int exar_handle_irq(struct uart_port *port); static void set_io_from_upio(struct uart_port *p) { @@ -573,16 +584,12 @@ EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos); void serial8250_rpm_get(struct uart_8250_port *p) { - if (!(p->capabilities & UART_CAP_RPM)) - return; pm_runtime_get_sync(p->port.dev); } EXPORT_SYMBOL_GPL(serial8250_rpm_get); void serial8250_rpm_put(struct uart_8250_port *p) { - if (!(p->capabilities & UART_CAP_RPM)) - return; pm_runtime_mark_last_busy(p->port.dev); pm_runtime_put_autosuspend(p->port.dev); } @@ -666,13 +673,15 @@ void serial8250_rpm_get_tx(struct uart_8250_port *p) { unsigned char rpm_active; - if (!(p->capabilities & UART_CAP_RPM)) - return; - rpm_active = xchg(&p->rpm_tx_active, 1); if (rpm_active) return; - pm_runtime_get_sync(p->port.dev); + /* + * Device has to be powered on at this point. Here we just increase + * reference count to prevent autosuspend until the TX FIFO becomes + * empty. See also a comment in serial8250_tx_chars(). + */ + pm_runtime_get_noresume(p->port.dev); } EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx); @@ -680,9 +689,6 @@ void serial8250_rpm_put_tx(struct uart_8250_port *p) { unsigned char rpm_active; - if (!(p->capabilities & UART_CAP_RPM)) - return; - rpm_active = xchg(&p->rpm_tx_active, 0); if (!rpm_active) return; @@ -706,11 +712,10 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) * offset but the UART channel may only write to the corresponding * bit. */ - serial8250_rpm_get(p); if ((p->port.type == PORT_XR17V35X) || (p->port.type == PORT_XR17D15X)) { serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0); - goto out; + return; } if (p->capabilities & UART_CAP_SLEEP) { @@ -728,8 +733,6 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) serial_out(p, UART_LCR, lcr); } } -out: - serial8250_rpm_put(p); } #ifdef CONFIG_SERIAL_8250_RSA @@ -1420,13 +1423,9 @@ static void serial8250_stop_rx(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); - serial8250_rpm_get(up); - up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); up->port.read_status_mask &= ~UART_LSR_DR; serial_port_out(port, UART_IER, up->ier); - - serial8250_rpm_put(up); } static void __do_stop_tx_rs485(struct uart_8250_port *p) @@ -1527,7 +1526,6 @@ static void serial8250_stop_tx(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); - serial8250_rpm_get(up); __stop_tx(up); /* @@ -1537,7 +1535,6 @@ static void serial8250_stop_tx(struct uart_port *port) up->acr |= UART_ACR_TXDIS; serial_icr_write(up, UART_ACR, up->acr); } - serial8250_rpm_put(up); } static inline void __start_tx(struct uart_port *port) @@ -1670,9 +1667,7 @@ static void serial8250_enable_ms(struct uart_port *port) up->ier |= UART_IER_MSI; - serial8250_rpm_get(up); serial_port_out(port, UART_IER, up->ier); - serial8250_rpm_put(up); } static void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr) @@ -1803,8 +1798,6 @@ void serial8250_tx_chars(struct uart_8250_port *up) * HW can go idle. So we get here once again with empty FIFO and disable * the interrupt and RPM in __stop_tx() */ - if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM)) - __stop_tx(up); } EXPORT_SYMBOL_GPL(serial8250_tx_chars); @@ -1862,7 +1855,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) status = serial_port_in(port, UART_LSR); - if (status & (UART_LSR_DR | UART_LSR_BI)) { + if (status & (UART_LSR_DR | UART_LSR_BI) && + iir & UART_IIR_RDI) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); } @@ -1877,37 +1871,19 @@ EXPORT_SYMBOL_GPL(serial8250_handle_irq); static int serial8250_default_handle_irq(struct uart_port *port) { - struct uart_8250_port *up = up_to_u8250p(port); unsigned int iir; - int ret; - serial8250_rpm_get(up); + /* + * The IRQ might be shared with other peripherals so we must first + * check that are we RPM suspended or not. If we are we assume that + * the IRQ was not for us (we shouldn't be RPM suspended when the + * interrupt is enabled). + */ + if (pm_runtime_suspended(port->dev)) + return 0; iir = serial_port_in(port, UART_IIR); - ret = serial8250_handle_irq(port, iir); - - serial8250_rpm_put(up); - return ret; -} - -/* - * These Exar UARTs have an extra interrupt indicator that could - * fire for a few unimplemented interrupts. One of which is a - * wakeup event when coming out of sleep. Put this here just - * to be on the safe side that these interrupts don't go unhandled. - */ -static int exar_handle_irq(struct uart_port *port) -{ - unsigned int iir = serial_port_in(port, UART_IIR); - int ret = 0; - - if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) && - serial_port_in(port, UART_EXAR_INT0) != 0) - ret = 1; - - ret |= serial8250_handle_irq(port, iir); - - return ret; + return serial8250_handle_irq(port, iir); } /* @@ -1940,15 +1916,11 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) unsigned long flags; unsigned int lsr; - serial8250_rpm_get(up); - spin_lock_irqsave(&port->lock, flags); lsr = serial_port_in(port, UART_LSR); up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; spin_unlock_irqrestore(&port->lock, flags); - serial8250_rpm_put(up); - return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; } @@ -1958,9 +1930,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port) unsigned int status; unsigned int ret; - serial8250_rpm_get(up); status = serial8250_modem_status(up); - serial8250_rpm_put(up); ret = 0; if (status & UART_MSR_DCD) @@ -2017,7 +1987,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) struct uart_8250_port *up = up_to_u8250p(port); unsigned long flags; - serial8250_rpm_get(up); spin_lock_irqsave(&port->lock, flags); if (break_state == -1) up->lcr |= UART_LCR_SBC; @@ -2025,7 +1994,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) up->lcr &= ~UART_LCR_SBC; serial_port_out(port, UART_LCR, up->lcr); spin_unlock_irqrestore(&port->lock, flags); - serial8250_rpm_put(up); } /* @@ -2072,21 +2040,12 @@ static int serial8250_get_poll_char(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); unsigned char lsr; - int status; - - serial8250_rpm_get(up); lsr = serial_port_in(port, UART_LSR); + if (!(lsr & UART_LSR_DR)) + return NO_POLL_CHAR; - if (!(lsr & UART_LSR_DR)) { - status = NO_POLL_CHAR; - goto out; - } - - status = serial_port_in(port, UART_RX); -out: - serial8250_rpm_put(up); - return status; + return serial_port_in(port, UART_RX); } @@ -2096,7 +2055,6 @@ static void serial8250_put_poll_char(struct uart_port *port, unsigned int ier; struct uart_8250_port *up = up_to_u8250p(port); - serial8250_rpm_get(up); /* * First save the IER then disable the interrupts */ @@ -2118,7 +2076,6 @@ static void serial8250_put_poll_char(struct uart_port *port, */ wait_for_xmitr(up, BOTH_EMPTY); serial_port_out(port, UART_IER, ier); - serial8250_rpm_put(up); } #endif /* CONFIG_CONSOLE_POLL */ @@ -2141,7 +2098,6 @@ int serial8250_do_startup(struct uart_port *port) if (port->iotype != up->cur_iotype) set_io_from_upio(port); - serial8250_rpm_get(up); if (port->type == PORT_16C950) { /* Wake up and initialize UART */ up->acr = 0; @@ -2168,6 +2124,15 @@ int serial8250_do_startup(struct uart_port *port) UART_DA830_PWREMU_MGMT_FREE); } + if (port->type == PORT_NPCM) { + /* + * Nuvoton calls the scratch register 'UART_TOR' (timeout + * register). Enable it, and set TIOC (timeout interrupt + * comparator) to be 0x20 for correct operation. + */ + serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20); + } + #ifdef CONFIG_SERIAL_8250_RSA /* * If this is an RSA port, see if we can kick it up to the @@ -2215,8 +2180,7 @@ int serial8250_do_startup(struct uart_port *port) (serial_port_in(port, UART_LSR) == 0xff)) { printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n", serial_index(port)); - retval = -ENODEV; - goto out; + return -ENODEV; } /* @@ -2298,7 +2262,7 @@ int serial8250_do_startup(struct uart_port *port) retval = up->ops->setup_irq(up); if (retval) - goto out; + return retval; /* * Now, initialize the UART @@ -2372,7 +2336,7 @@ int serial8250_do_startup(struct uart_port *port) * Request DMA channels for both RX and TX. */ if (up->dma) { - retval = serial8250_request_dma(up); + retval = uart_console(port) ? -ENXIO : serial8250_request_dma(up); if (retval) { pr_warn_ratelimited("ttyS%d - failed to request DMA\n", serial_index(port)); @@ -2396,10 +2360,7 @@ int serial8250_do_startup(struct uart_port *port) outb_p(0x80, icp); inb_p(icp); } - retval = 0; -out: - serial8250_rpm_put(up); - return retval; + return 0; } EXPORT_SYMBOL_GPL(serial8250_do_startup); @@ -2415,7 +2376,6 @@ void serial8250_do_shutdown(struct uart_port *port) struct uart_8250_port *up = up_to_u8250p(port); unsigned long flags; - serial8250_rpm_get(up); /* * Disable interrupts from this port */ @@ -2459,7 +2419,6 @@ void serial8250_do_shutdown(struct uart_port *port) * the IRQ chain. */ serial_port_in(port, UART_RX); - serial8250_rpm_put(up); up->ops->release_irq(up); } @@ -2490,6 +2449,15 @@ static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up, return quot_16 >> 4; } +/* Nuvoton NPCM UARTs have a custom divisor calculation */ +static unsigned int npcm_get_divisor(struct uart_8250_port *up, + unsigned int baud) +{ + struct uart_port *port = &up->port; + + return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2; +} + static unsigned int serial8250_get_divisor(struct uart_8250_port *up, unsigned int baud, unsigned int *frac) @@ -2510,6 +2478,8 @@ static unsigned int serial8250_get_divisor(struct uart_8250_port *up, quot = 0x8002; else if (up->port.type == PORT_XR17V35X) quot = xr17v35x_get_divisor(up, baud, frac); + else if (up->port.type == PORT_NPCM) + quot = npcm_get_divisor(up, baud); else quot = uart_get_divisor(port, baud); @@ -2586,8 +2556,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, serial_dl_write(up, quot); /* XR17V35x UARTs have an extra fractional divisor register (DLD) */ - if (up->port.type == PORT_XR17V35X) + if (up->port.type == PORT_XR17V35X) { + /* Preserve bits not related to baudrate; DLD[7:4]. */ + quot_frac |= serial_port_in(port, 0x2) & 0xf0; serial_port_out(port, 0x2, quot_frac); + } } static unsigned int serial8250_get_baud_rate(struct uart_port *port, @@ -2605,6 +2578,42 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port, port->uartclk); } +void serial8250_do_restore_context(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + /* Write extended features at first */ + if (up->capabilities & UART_CAP_EFR) { + serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); + if (port->flags & UPF_EXAR_EFR) + serial_port_out(port, UART_XR_EFR, up->efr); + else + serial_port_out(port, UART_EFR, up->efr); + } + + serial8250_set_divisor(port, up->baud, up->quot, up->frac); + + /* + * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR + * is written without DLAB set, this mode will be disabled. + */ + if (port->type == PORT_16750) + serial_port_out(port, UART_FCR, up->fcr); + + serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */ + if (port->type != PORT_16750) { + /* emulated UARTs (Lucent Venus 167x) need two steps */ + if (up->fcr & UART_FCR_ENABLE_FIFO) + serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO); + serial_port_out(port, UART_FCR, up->fcr); /* set fcr */ + } + serial8250_set_mctrl(port, port->mctrl); + + /* Enable interrupts at last */ + serial_port_out(port, UART_IER, up->ier); +} +EXPORT_SYMBOL_GPL(serial8250_do_restore_context); + void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) @@ -2629,10 +2638,12 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, * Ok, we're now changing the port state. Do it with * interrupts disabled. */ - serial8250_rpm_get(up); spin_lock_irqsave(&port->lock, flags); up->lcr = cval; /* Save computed LCR */ + up->baud = baud; /* Save baud rate */ + up->quot = quot; /* Save quot */ + up->frac = frac; /* Save fraction */ if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) { /* NOTE: If fifo_bug is not set, a user can set RX_trigger. */ @@ -2698,8 +2709,6 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, if (up->capabilities & UART_CAP_RTOIE) up->ier |= UART_IER_RTOIE; - serial_port_out(port, UART_IER, up->ier); - if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; /* @@ -2710,32 +2719,13 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, if (termios->c_cflag & CRTSCTS) efr |= UART_EFR_CTS; - serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); - if (port->flags & UPF_EXAR_EFR) - serial_port_out(port, UART_XR_EFR, efr); - else - serial_port_out(port, UART_EFR, efr); + up->efr = efr; } - serial8250_set_divisor(port, baud, quot, frac); - - /* - * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR - * is written without DLAB set, this mode will be disabled. - */ - if (port->type == PORT_16750) - serial_port_out(port, UART_FCR, up->fcr); + /* Write saved values to the registers */ + serial8250_do_restore_context(port); - serial_port_out(port, UART_LCR, up->lcr); /* reset DLAB */ - if (port->type != PORT_16750) { - /* emulated UARTs (Lucent Venus 167x) need two steps */ - if (up->fcr & UART_FCR_ENABLE_FIFO) - serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO); - serial_port_out(port, UART_FCR, up->fcr); /* set fcr */ - } - serial8250_set_mctrl(port, port->mctrl); spin_unlock_irqrestore(&port->lock, flags); - serial8250_rpm_put(up); /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) @@ -2789,16 +2779,6 @@ void serial8250_do_pm(struct uart_port *port, unsigned int state, } EXPORT_SYMBOL(serial8250_do_pm); -static void -serial8250_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - if (port->pm) - port->pm(port, state, oldstate); - else - serial8250_do_pm(port, state, oldstate); -} - static unsigned int serial8250_port_size(struct uart_8250_port *pt) { if (pt->port.mapsize) @@ -3071,11 +3051,6 @@ static void serial8250_config_port(struct uart_port *port, int flags) if (port->type == PORT_UNKNOWN) serial8250_release_std_resource(up); - /* Fixme: probably not the best place for this */ - if ((port->type == PORT_XR17V35X) || - (port->type == PORT_XR17D15X)) - port->handle_irq = exar_handle_irq; - register_dev_spec_attr_grp(up); up->fcr = uart_config[up->port.type].fcr; } @@ -3115,7 +3090,6 @@ static const struct uart_ops serial8250_pops = { .shutdown = serial8250_shutdown, .set_termios = serial8250_set_termios, .set_ldisc = serial8250_set_ldisc, - .pm = serial8250_pm, .type = serial8250_type, .release_port = serial8250_release_port, .request_port = serial8250_request_port, @@ -3201,6 +3175,9 @@ static void serial8250_console_restore(struct uart_8250_port *up) * any possible real use of the port... * * The console_lock must be held when we get here. + * + * Doing runtime PM is a really bad idea for the kernel console. + * Thus we assume that the function called when device is powered on. */ void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count) @@ -3212,8 +3189,6 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, touch_nmi_watchdog(); - serial8250_rpm_get(up); - if (port->sysrq) locked = 0; else if (oops_in_progress) @@ -3258,7 +3233,6 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, if (locked) spin_unlock_irqrestore(&port->lock, flags); - serial8250_rpm_put(up); } static unsigned int probe_baud(struct uart_port *port) @@ -3282,6 +3256,7 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) int bits = 8; int parity = 'n'; int flow = 'n'; + int ret; if (!port->iobase && !port->membase) return -ENODEV; @@ -3291,7 +3266,18 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe) else if (probe) baud = probe_baud(port); - return uart_set_options(port, port->cons, baud, parity, bits, flow); + ret = uart_set_options(port, port->cons, baud, parity, bits, flow); + + if (port->dev) + pm_runtime_get_noresume(port->dev); + + return ret; +} + +void serial8250_console_exit(struct uart_port *port) +{ + if (port->dev) + pm_runtime_put_noidle(port->dev); } #endif /* CONFIG_SERIAL_8250_CONSOLE */ diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index 8a10b10e27aa..c206f173f912 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c @@ -259,12 +259,13 @@ static int uniphier_uart_probe(struct platform_device *pdev) up.dl_read = uniphier_serial_dl_read; up.dl_write = uniphier_serial_dl_write; - priv->line = serial8250_register_8250_port(&up); - if (priv->line < 0) { + ret = serial8250_register_8250_port(&up); + if (ret < 0) { dev_err(dev, "failed to register 8250 port\n"); clk_disable_unprepare(priv->clk); return ret; } + priv->line = ret; platform_set_drvdata(pdev, priv); diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 3e4b717670d7..59cb62de236b 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -331,7 +331,7 @@ static int altera_uart_startup(struct uart_port *port) /* Enable RX interrupts now */ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK; - writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); + altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG); spin_unlock_irqrestore(&port->lock, flags); @@ -347,7 +347,7 @@ static void altera_uart_shutdown(struct uart_port *port) /* Disable all interrupts now */ pp->imr = 0; - writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); + altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG); spin_unlock_irqrestore(&port->lock, flags); @@ -436,7 +436,7 @@ static void altera_uart_console_putc(struct uart_port *port, int c) ALTERA_UART_STATUS_TRDY_MSK)) cpu_relax(); - writel(c, port->membase + ALTERA_UART_TXDATA_REG); + altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG); } static void altera_uart_console_write(struct console *co, const char *s, @@ -506,13 +506,13 @@ static int __init altera_uart_earlycon_setup(struct earlycon_device *dev, return -ENODEV; /* Enable RX interrupts now */ - writel(ALTERA_UART_CONTROL_RRDY_MSK, - port->membase + ALTERA_UART_CONTROL_REG); + altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK, + ALTERA_UART_CONTROL_REG); if (dev->baud) { unsigned int baudclk = port->uartclk / dev->baud; - writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG); + altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG); } dev->con->write = altera_uart_earlycon_write; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 111e6a950779..c9f701aca677 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1747,10 +1747,26 @@ static int pl011_allocate_irq(struct uart_amba_port *uap) */ static void pl011_enable_interrupts(struct uart_amba_port *uap) { + unsigned int i; + spin_lock_irq(&uap->port.lock); /* Clear out any spuriously appearing RX interrupts */ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); + + /* + * RXIS is asserted only when the RX FIFO transitions from below + * to above the trigger threshold. If the RX FIFO is already + * full to the threshold this can't happen and RXIS will now be + * stuck off. Drain the RX FIFO explicitly to fix this: + */ + for (i = 0; i < uap->fifosize * 2; ++i) { + if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE) + break; + + pl011_read(uap, REG_DR); + } + uap->im = UART011_RTIM; if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 77fe306690c4..71e37abb6bcb 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -596,6 +596,11 @@ static int arc_serial_probe(struct platform_device *pdev) if (dev_id < 0) dev_id = 0; + if (dev_id >= ARRAY_SIZE(arc_uart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", dev_id); + return -EINVAL; + } + uart = &arc_uart_ports[dev_id]; port = &uart->port; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 7551cab438ff..77b0e6e73e06 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1763,6 +1763,7 @@ static void atmel_get_ip_name(struct uart_port *port) switch (version) { case 0x302: case 0x10213: + case 0x10302: dev_dbg(port->dev, "This version is usart\n"); atmel_port->has_frac_baudrate = true; atmel_port->has_hw_timer = true; @@ -1785,7 +1786,6 @@ static int atmel_startup(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct tty_struct *tty = port->state->port.tty; int retval; /* @@ -1800,8 +1800,8 @@ static int atmel_startup(struct uart_port *port) * Allocate the IRQ */ retval = request_irq(port->irq, atmel_interrupt, - IRQF_SHARED | IRQF_COND_SUSPEND, - tty ? tty->name : "atmel_serial", port); + IRQF_SHARED | IRQF_COND_SUSPEND, + dev_name(&pdev->dev), port); if (retval) { dev_err(port->dev, "atmel_startup - Can't get irq\n"); return retval; @@ -1991,41 +1991,6 @@ static void atmel_shutdown(struct uart_port *port) atmel_flush_buffer(port); } -/* - * Power / Clock management. - */ -static void atmel_serial_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - - switch (state) { - case 0: - /* - * Enable the peripheral clock for this serial port. - * This is called on uart_open() or a resume event. - */ - clk_prepare_enable(atmel_port->clk); - - /* re-enable interrupts if we disabled some on suspend */ - atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); - break; - case 3: - /* Back up the interrupt mask and disable all interrupts */ - atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); - atmel_uart_writel(port, ATMEL_US_IDR, -1); - - /* - * Disable the peripheral clock for this serial port. - * This is called on uart_close() or a suspend event. - */ - clk_disable_unprepare(atmel_port->clk); - break; - default: - dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); - } -} - /* * Change the port parameters */ @@ -2354,7 +2319,6 @@ static const struct uart_ops atmel_pops = { .request_port = atmel_request_port, .config_port = atmel_config_port, .verify_port = atmel_verify_port, - .pm = atmel_serial_pm, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = atmel_poll_get_char, .poll_put_char = atmel_poll_put_char, diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c index ff465ff43577..2e64c0e24df8 100644 --- a/drivers/tty/serial/dz.c +++ b/drivers/tty/serial/dz.c @@ -635,26 +635,6 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios, spin_unlock_irqrestore(&dport->port.lock, flags); } -/* - * Hack alert! - * Required solely so that the initial PROM-based console - * works undisturbed in parallel with this one. - */ -static void dz_pm(struct uart_port *uport, unsigned int state, - unsigned int oldstate) -{ - struct dz_port *dport = to_dport(uport); - unsigned long flags; - - spin_lock_irqsave(&dport->port.lock, flags); - if (state < 3) - dz_start_tx(&dport->port); - else - dz_stop_tx(&dport->port); - spin_unlock_irqrestore(&dport->port.lock, flags); -} - - static const char *dz_type(struct uart_port *uport) { return "DZ"; @@ -750,7 +730,6 @@ static const struct uart_ops dz_ops = { .startup = dz_startup, .shutdown = dz_shutdown, .set_termios = dz_set_termios, - .pm = dz_pm, .type = dz_type, .release_port = dz_release_port, .request_port = dz_request_port, @@ -778,6 +757,7 @@ static void __init dz_init_ports(void) struct uart_port *uport = &dport->port; dport->mux = &dz_mux; + spin_lock_init(&uport->lock); uport->irq = dec_interrupt[DEC_IRQ_DZ11]; uport->fifosize = 1; @@ -874,10 +854,7 @@ static int __init dz_console_setup(struct console *co, char *options) if (ret) return ret; - spin_lock_init(&dport->port.lock); /* For dz_pm(). */ - dz_reset(dport); - dz_pm(uport, 0, -1); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 98928f082d87..ac667b47f199 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -172,7 +172,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match) */ int __init setup_earlycon(char *buf) { - const struct earlycon_id *match; + const struct earlycon_id **p_match; if (!buf || !buf[0]) return -EINVAL; @@ -180,7 +180,9 @@ int __init setup_earlycon(char *buf) if (early_con.flags & CON_ENABLED) return -EALREADY; - for (match = __earlycon_table; match < __earlycon_table_end; match++) { + for (p_match = __earlycon_table; p_match < __earlycon_table_end; + p_match++) { + const struct earlycon_id *match = *p_match; size_t len = strlen(match->name); if (strncmp(buf, match->name, len)) @@ -253,11 +255,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match, } port->mapbase = addr; port->uartclk = BASE_BAUD * 16; - port->membase = earlycon_map(port->mapbase, SZ_4K); val = of_get_flat_dt_prop(node, "reg-offset", NULL); if (val) port->mapbase += be32_to_cpu(*val); + port->membase = earlycon_map(port->mapbase, SZ_4K); + val = of_get_flat_dt_prop(node, "reg-shift", NULL); if (val) port->regshift = be32_to_cpu(*val); diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index f0252184291e..7a3db9378fa3 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2151,6 +2151,10 @@ static int lpuart_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); return ret; } + if (ret >= ARRAY_SIZE(lpuart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", ret); + return -EINVAL; + } sport->port.line = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sport->port.membase = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index dfeff3951f93..8deaf2ad8b34 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -334,7 +334,8 @@ static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2) { *ucr2 &= ~(UCR2_CTSC | UCR2_CTS); - mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); + sport->port.mctrl |= TIOCM_RTS; + mctrl_gpio_set(sport->gpios, sport->port.mctrl); } static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) @@ -342,7 +343,8 @@ static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) *ucr2 &= ~UCR2_CTSC; *ucr2 |= UCR2_CTS; - mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); + sport->port.mctrl &= ~TIOCM_RTS; + mctrl_gpio_set(sport->gpios, sport->port.mctrl); } static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2) @@ -2094,6 +2096,12 @@ static int serial_imx_probe(struct platform_device *pdev) else if (ret < 0) return ret; + if (sport->port.line >= ARRAY_SIZE(imx_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", + sport->port.line); + return -EINVAL; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) @@ -2273,12 +2281,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on) val &= ~UCR3_AWAKEN; writel(val, sport->port.membase + UCR3); - val = readl(sport->port.membase + UCR1); - if (on) - val |= UCR1_RTSDEN; - else - val &= ~UCR1_RTSDEN; - writel(val, sport->port.membase + UCR1); + if (sport->have_rtscts) { + val = readl(sport->port.membase + UCR1); + if (on) + val |= UCR1_RTSDEN; + else + val &= ~UCR1_RTSDEN; + writel(val, sport->port.membase + UCR1); + } } static int imx_serial_port_suspend_noirq(struct device *dev) diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index a260cde743e2..5532c440bf61 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -245,7 +245,8 @@ static void kgdboc_put_char(u8 chr) kgdb_tty_line, chr); } -static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) +static int param_set_kgdboc_var(const char *kmessage, + const struct kernel_param *kp) { int len = strlen(kmessage); diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c index 791c4c74f6d6..a7caa65e7da9 100644 --- a/drivers/tty/serial/mpc52xx_uart.c +++ b/drivers/tty/serial/mpc52xx_uart.c @@ -1359,7 +1359,6 @@ static const struct uart_ops mpc52xx_uart_ops = { .startup = mpc52xx_uart_startup, .shutdown = mpc52xx_uart_shutdown, .set_termios = mpc52xx_uart_set_termios, -/* .pm = mpc52xx_uart_pm, Not supported yet */ .type = mpc52xx_uart_type, .release_port = mpc52xx_uart_release_port, .request_port = mpc52xx_uart_request_port, diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 1db79ee8a886..23fdc324337c 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1387,25 +1387,6 @@ static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) return 0; } -static void msm_power(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct msm_port *msm_port = UART_TO_MSM(port); - - switch (state) { - case 0: - clk_prepare_enable(msm_port->clk); - clk_prepare_enable(msm_port->pclk); - break; - case 3: - clk_disable_unprepare(msm_port->clk); - clk_disable_unprepare(msm_port->pclk); - break; - default: - pr_err("msm_serial: Unknown PM state %d\n", state); - } -} - #ifdef CONFIG_CONSOLE_POLL static int msm_poll_get_char_single(struct uart_port *port) { @@ -1525,7 +1506,6 @@ static struct uart_ops msm_uart_pops = { .request_port = msm_request_port, .config_port = msm_config_port, .verify_port = msm_verify_port, - .pm = msm_power, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = msm_poll_get_char, .poll_put_char = msm_poll_put_char, diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index be94246b6fcc..673c8fd7e34f 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1667,6 +1667,10 @@ static int mxs_auart_probe(struct platform_device *pdev) s->port.line = pdev->id < 0 ? 0 : pdev->id; else if (ret < 0) return ret; + if (s->port.line >= ARRAY_SIZE(auart_port)) { + dev_err(&pdev->dev, "serial%d out of range\n", s->port.line); + return -EINVAL; + } if (of_id) { pdev->id_entry = of_id->data; diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7754053deeda..377635e670f1 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl) if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) up->efr |= UART_EFR_RTS; else - up->efr &= UART_EFR_RTS; + up->efr &= ~UART_EFR_RTS; serial_out(up, UART_EFR, up->efr); serial_out(up, UART_LCR, lcr); @@ -1101,30 +1101,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line); } -static void -serial_omap_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct uart_omap_port *up = to_uart_omap_port(port); - unsigned char efr; - - dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line); - - pm_runtime_get_sync(up->dev); - serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - efr = serial_in(up, UART_EFR); - serial_out(up, UART_EFR, efr | UART_EFR_ECB); - serial_out(up, UART_LCR, 0); - - serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); - serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); - serial_out(up, UART_EFR, efr); - serial_out(up, UART_LCR, 0); - - pm_runtime_mark_last_busy(up->dev); - pm_runtime_put_autosuspend(up->dev); -} - static void serial_omap_release_port(struct uart_port *port) { dev_dbg(port->dev, "serial_omap_release_port+\n"); @@ -1463,7 +1439,6 @@ static const struct uart_ops serial_omap_pops = { .startup = serial_omap_startup, .shutdown = serial_omap_shutdown, .set_termios = serial_omap_set_termios, - .pm = serial_omap_pm, .type = serial_omap_type, .release_port = serial_omap_release_port, .request_port = serial_omap_request_port, diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index d9123f995705..36740ad170f9 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -1611,7 +1611,6 @@ static const struct uart_ops pch_uart_ops = { .startup = pch_uart_startup, .shutdown = pch_uart_shutdown, .set_termios = pch_uart_set_termios, -/* .pm = pch_uart_pm, Not supported yet */ .type = pch_uart_type, .release_port = pch_uart_release_port, .request_port = pch_uart_request_port, diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c index 905631df1f8b..fe4cec427233 100644 --- a/drivers/tty/serial/pxa.c +++ b/drivers/tty/serial/pxa.c @@ -551,18 +551,6 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios, spin_unlock_irqrestore(&up->port.lock, flags); } -static void -serial_pxa_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct uart_pxa_port *up = (struct uart_pxa_port *)port; - - if (!state) - clk_prepare_enable(up->clk); - else - clk_disable_unprepare(up->clk); -} - static void serial_pxa_release_port(struct uart_port *port) { } @@ -774,7 +762,6 @@ static const struct uart_ops serial_pxa_pops = { .startup = serial_pxa_startup, .shutdown = serial_pxa_shutdown, .set_termios = serial_pxa_set_termios, - .pm = serial_pxa_pm, .type = serial_pxa_type, .release_port = serial_pxa_release_port, .request_port = serial_pxa_request_port, diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c index fd3d1329d48c..a22cea27a989 100644 --- a/drivers/tty/serial/sa1100.c +++ b/drivers/tty/serial/sa1100.c @@ -660,7 +660,6 @@ void sa1100_register_uart_fns(struct sa1100_port_fns *fns) if (fns->set_mctrl) sa1100_pops.set_mctrl = fns->set_mctrl; - sa1100_pops.pm = fns->pm; /* * FIXME: fns->set_wake is unused - this should be called from * the suspend() callback if device_may_wakeup(dev)) is set. diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 8aca18c4cdea..1ce4ebb063cc 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -865,15 +865,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) dma->rx_conf.direction = DMA_DEV_TO_MEM; dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH; - dma->rx_conf.src_maxburst = 16; + dma->rx_conf.src_maxburst = 1; dma->tx_conf.direction = DMA_MEM_TO_DEV; dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH; - if (dma_get_cache_alignment() >= 16) - dma->tx_conf.dst_maxburst = 16; - else - dma->tx_conf.dst_maxburst = 1; + dma->tx_conf.dst_maxburst = 1; dma->rx_chan = dma_request_chan(p->port.dev, "rx"); @@ -1082,39 +1079,6 @@ static int s3c64xx_serial_startup(struct uart_port *port) return ret; } -/* power power management control */ - -static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, - unsigned int old) -{ - struct s3c24xx_uart_port *ourport = to_ourport(port); - int timeout = 10000; - - ourport->pm_level = level; - - switch (level) { - case 3: - while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) - udelay(100); - - if (!IS_ERR(ourport->baudclk)) - clk_disable_unprepare(ourport->baudclk); - - clk_disable_unprepare(ourport->clk); - break; - - case 0: - clk_prepare_enable(ourport->clk); - - if (!IS_ERR(ourport->baudclk)) - clk_prepare_enable(ourport->baudclk); - - break; - default: - dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level); - } -} - /* baud rate calculation * * The UARTs on the S3C2410/S3C2440 can take their clocks from a number @@ -1470,7 +1434,6 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port, #endif static struct uart_ops s3c24xx_serial_ops = { - .pm = s3c24xx_serial_pm, .tx_empty = s3c24xx_serial_tx_empty, .get_mctrl = s3c24xx_serial_get_mctrl, .set_mctrl = s3c24xx_serial_set_mctrl, @@ -1821,6 +1784,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index); + if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", index); + return -EINVAL; + } ourport = &s3c24xx_serial_ports[index]; ourport->drv_data = s3c24xx_get_driver_data(pdev); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index ca54ce074a5f..3dbe98cc7701 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1064,12 +1064,6 @@ static int sc16is7xx_verify_port(struct uart_port *port, return 0; } -static void sc16is7xx_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - sc16is7xx_power(port, (state == UART_PM_STATE_ON) ? 1 : 0); -} - static void sc16is7xx_null_void(struct uart_port *port) { /* Do nothing */ @@ -1091,7 +1085,6 @@ static const struct uart_ops sc16is7xx_ops = { .release_port = sc16is7xx_null_void, .config_port = sc16is7xx_config_port, .verify_port = sc16is7xx_verify_port, - .pm = sc16is7xx_pm, }; #ifdef CONFIG_GPIOLIB diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 3a14cccbd7ff..f02965936f2e 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -55,8 +56,6 @@ static struct lock_class_key port_lock_key; static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, struct ktermios *old_termios); static void uart_wait_until_sent(struct tty_struct *tty, int timeout); -static void uart_change_pm(struct uart_state *state, - enum uart_pm_state pm_state); static void uart_port_shutdown(struct tty_port *port); @@ -65,15 +64,32 @@ static int uart_dcd_enabled(struct uart_port *uport) return !!(uport->status & UPSTAT_DCD_ENABLE); } -static inline struct uart_port *uart_port_ref(struct uart_state *state) +static inline struct uart_port *uart_port_ref_no_rpm(struct uart_state *state) { if (atomic_add_unless(&state->refcount, 1, 0)) return state->uart_port; return NULL; } +static inline void uart_port_deref_no_rpm(struct uart_port *uport) +{ + if (atomic_dec_and_test(&uport->state->refcount)) + wake_up(&uport->state->remove_wait); +} + +static inline struct uart_port *uart_port_ref(struct uart_state *state) +{ + if (atomic_add_unless(&state->refcount, 1, 0)) { + pm_runtime_get_sync(state->uart_port->dev); + return state->uart_port; + } + return NULL; +} + static inline void uart_port_deref(struct uart_port *uport) { + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); if (atomic_dec_and_test(&uport->state->refcount)) wake_up(&uport->state->remove_wait); } @@ -154,12 +170,15 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) unsigned long flags; unsigned int old; + pm_runtime_get_sync(port->dev); spin_lock_irqsave(&port->lock, flags); old = port->mctrl; port->mctrl = (old & ~clear) | set; if (old != port->mctrl) port->ops->set_mctrl(port, port->mctrl); spin_unlock_irqrestore(&port->lock, flags); + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); } #define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0) @@ -200,11 +219,6 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, if (uport->type == PORT_UNKNOWN) return 1; - /* - * Make sure the device is in D0 state. - */ - uart_change_pm(state, UART_PM_STATE_ON); - /* * Initialise and allocate the transmit and temporary * buffer. @@ -219,7 +233,11 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, uart_circ_clear(&state->xmit); } + pm_runtime_get_sync(uport->dev); retval = uport->ops->startup(uport); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); + if (retval == 0) { if (uart_console(uport) && uport->cons->cflag) { tty->termios.c_cflag = uport->cons->cflag; @@ -507,6 +525,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, return; termios = &tty->termios; + + pm_runtime_get_sync(uport->dev); uport->ops->set_termios(uport, termios, old_termios); /* @@ -535,6 +555,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, __uart_start(tty); } spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); } static int uart_put_char(struct tty_struct *tty, unsigned char c) @@ -549,13 +571,18 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) if (!circ->buf) return 0; - port = uart_port_lock(state, flags); - if (port && uart_circ_chars_free(circ) != 0) { + port = uart_port_ref_no_rpm(state); + if (!port) + return 0; + + spin_lock_irqsave(&port->lock, flags); + if (uart_circ_chars_free(circ) != 0) { circ->buf[circ->head] = c; circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); ret = 1; } - uart_port_unlock(port, flags); + spin_unlock_irqrestore(&port->lock, flags); + uart_port_deref_no_rpm(port); return ret; } @@ -586,7 +613,11 @@ static int uart_write(struct tty_struct *tty, if (!circ->buf) return 0; - port = uart_port_lock(state, flags); + port = uart_port_ref_no_rpm(state); + if (!port) + return 0; + + spin_lock_irqsave(&port->lock, flags); while (port) { c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); if (count < c) @@ -601,7 +632,8 @@ static int uart_write(struct tty_struct *tty, } __uart_start(tty); - uart_port_unlock(port, flags); + spin_unlock_irqrestore(&port->lock, flags); + uart_port_deref_no_rpm(port); return ret; } @@ -612,9 +644,17 @@ static int uart_write_room(struct tty_struct *tty) unsigned long flags; int ret; - port = uart_port_lock(state, flags); + if (!state->xmit.buf) + return 0; + + port = uart_port_ref_no_rpm(state); + if (!port) + return 0; + + spin_lock_irqsave(&port->lock, flags); ret = uart_circ_chars_free(&state->xmit); - uart_port_unlock(port, flags); + spin_unlock_irqrestore(&port->lock, flags); + uart_port_deref_no_rpm(port); return ret; } @@ -625,9 +665,17 @@ static int uart_chars_in_buffer(struct tty_struct *tty) unsigned long flags; int ret; - port = uart_port_lock(state, flags); + if (!state->xmit.buf) + return 0; + + port = uart_port_ref_no_rpm(state); + if (!port) + return 0; + + spin_lock_irqsave(&port->lock, flags); ret = uart_circ_chars_pending(&state->xmit); - uart_port_unlock(port, flags); + spin_unlock_irqrestore(&port->lock, flags); + uart_port_deref_no_rpm(port); return ret; } @@ -987,6 +1035,8 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, } } else { retval = uart_startup(tty, state, 1); + if (retval == 0) + tty_port_set_initialized(port, true); if (retval > 0) retval = 0; } @@ -1029,7 +1079,10 @@ static int uart_get_lsr_info(struct tty_struct *tty, struct uart_port *uport = uart_port_check(state); unsigned int result; + pm_runtime_get_sync(uport->dev); result = uport->ops->tx_empty(uport); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); /* * If we're about to load something into the transmit @@ -1059,9 +1112,13 @@ static int uart_tiocmget(struct tty_struct *tty) if (!tty_io_error(tty)) { result = uport->mctrl; + + pm_runtime_get_sync(uport->dev); spin_lock_irq(&uport->lock); result |= uport->ops->get_mctrl(uport); spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); } out: mutex_unlock(&port->mutex); @@ -1102,8 +1159,11 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state) if (!uport) goto out; + pm_runtime_get_sync(uport->dev); if (uport->type != PORT_UNKNOWN) uport->ops->break_ctl(uport, break_state); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); ret = 0; out: mutex_unlock(&port->mutex); @@ -1152,9 +1212,14 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state) * This will claim the ports resources if * a port is found. */ + pm_runtime_get_sync(uport->dev); uport->ops->config_port(uport, flags); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); ret = uart_startup(tty, state, 1); + if (ret == 0) + tty_port_set_initialized(port, true); if (ret > 0) ret = 0; } @@ -1246,13 +1311,13 @@ static int uart_get_icount(struct tty_struct *tty, struct uart_icount cnow; struct uart_port *uport; - uport = uart_port_ref(state); + uport = uart_port_ref_no_rpm(state); if (!uport) return -EIO; spin_lock_irq(&uport->lock); memcpy(&cnow, &uport->icount, sizeof(struct uart_icount)); spin_unlock_irq(&uport->lock); - uart_port_deref(uport); + uart_port_deref_no_rpm(uport); icount->cts = cnow.cts; icount->dsr = cnow.dsr; @@ -1412,8 +1477,12 @@ static void uart_set_ldisc(struct tty_struct *tty) mutex_lock(&state->port.mutex); uport = uart_port_check(state); - if (uport && uport->ops->set_ldisc) + if (uport && uport->ops->set_ldisc) { + pm_runtime_get_sync(uport->dev); uport->ops->set_ldisc(uport, &tty->termios); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); + } mutex_unlock(&state->port.mutex); } @@ -1513,9 +1582,12 @@ static void uart_tty_port_shutdown(struct tty_port *port) if (WARN(!uport, "detached port still initialized!\n")) return; + pm_runtime_get_sync(uport->dev); spin_lock_irq(&uport->lock); uport->ops->stop_rx(uport); spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); uart_port_shutdown(port); @@ -1525,9 +1597,6 @@ static void uart_tty_port_shutdown(struct tty_port *port) * we don't try to resume a port that has been shutdown. */ tty_port_set_suspended(port, 0); - - uart_change_pm(state, UART_PM_STATE_OFF); - } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) @@ -1618,8 +1687,6 @@ static void uart_hangup(struct tty_struct *tty) spin_unlock_irqrestore(&port->lock, flags); tty_port_set_active(port, 0); tty_port_tty_set(port, NULL); - if (uport && !uart_console(uport)) - uart_change_pm(state, UART_PM_STATE_OFF); wake_up_interruptible(&port->open_wait); wake_up_interruptible(&port->delta_msr_wait); } @@ -1644,8 +1711,12 @@ static void uart_port_shutdown(struct tty_port *port) /* * Free the IRQ and disable the port. */ - if (uport) + if (uport) { + pm_runtime_get_sync(uport->dev); uport->ops->shutdown(uport); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); + } /* * Ensure that the IRQ handler isn't running on another CPU. @@ -1752,7 +1823,6 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) { struct uart_state *state = drv->state + i; struct tty_port *port = &state->port; - enum uart_pm_state pm_state; struct uart_port *uport; char stat_buf[32]; unsigned int status; @@ -1777,14 +1847,12 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i) } if (capable(CAP_SYS_ADMIN)) { - pm_state = state->pm_state; - if (pm_state != UART_PM_STATE_ON) - uart_change_pm(state, UART_PM_STATE_ON); + pm_runtime_get_sync(uport->dev); spin_lock_irq(&uport->lock); status = uport->ops->get_mctrl(uport); spin_unlock_irq(&uport->lock); - if (pm_state != UART_PM_STATE_ON) - uart_change_pm(state, pm_state); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); seq_printf(m, " tx:%d rx:%d", uport->icount.tx, uport->icount.rx); @@ -2040,7 +2108,15 @@ uart_set_options(struct uart_port *port, struct console *co, */ port->mctrl |= TIOCM_DTR; - port->ops->set_termios(port, &termios, &dummy); + /* At early stage device is not created yet, we can't do PM */ + if (port->dev) { + pm_runtime_get_sync(port->dev); + port->ops->set_termios(port, &termios, &dummy); + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); + } else + port->ops->set_termios(port, &termios, &dummy); + /* * Allow the setting of the UART parameters with a NULL console * too: @@ -2053,26 +2129,6 @@ uart_set_options(struct uart_port *port, struct console *co, EXPORT_SYMBOL_GPL(uart_set_options); #endif /* CONFIG_SERIAL_CORE_CONSOLE */ -/** - * uart_change_pm - set power state of the port - * - * @state: port descriptor - * @pm_state: new state - * - * Locking: port->mutex has to be held - */ -static void uart_change_pm(struct uart_state *state, - enum uart_pm_state pm_state) -{ - struct uart_port *port = uart_port_check(state); - - if (state->pm_state != pm_state) { - if (port && port->ops->pm) - port->ops->pm(port, pm_state, state->pm_state); - state->pm_state = pm_state; - } -} - struct uart_match { struct uart_port *port; struct uart_driver *driver; @@ -2119,11 +2175,14 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) tty_port_set_suspended(port, 1); tty_port_set_initialized(port, 0); + pm_runtime_get_sync(uport->dev); spin_lock_irq(&uport->lock); ops->stop_tx(uport); ops->set_mctrl(uport, 0); ops->stop_rx(uport); spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); /* * Wait for the transmitter to empty. @@ -2134,7 +2193,10 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) dev_err(uport->dev, "%s: Unable to drain transmitter\n", uport->name); + pm_runtime_get_sync(uport->dev); ops->shutdown(uport); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); } /* @@ -2142,8 +2204,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) */ if (uart_console(uport)) console_stop(uport->cons); - - uart_change_pm(state, UART_PM_STATE_OFF); unlock: mutex_unlock(&port->mutex); @@ -2187,9 +2247,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (port->tty && termios.c_cflag == 0) termios = port->tty->termios; - if (console_suspend_enabled) - uart_change_pm(state, UART_PM_STATE_ON); + pm_runtime_get_sync(uport->dev); uport->ops->set_termios(uport, &termios, NULL); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); + if (console_suspend_enabled) console_start(uport->cons); } @@ -2198,21 +2260,31 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) const struct uart_ops *ops = uport->ops; int ret; - uart_change_pm(state, UART_PM_STATE_ON); + pm_runtime_get_sync(uport->dev); spin_lock_irq(&uport->lock); ops->set_mctrl(uport, 0); spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); + if (console_suspend_enabled || !uart_console(uport)) { /* Protected by port mutex for now */ struct tty_struct *tty = port->tty; + + pm_runtime_get_sync(uport->dev); ret = ops->startup(uport); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); if (ret == 0) { if (tty) uart_change_speed(tty, state, NULL); + pm_runtime_get_sync(uport->dev); spin_lock_irq(&uport->lock); ops->set_mctrl(uport, uport->mctrl); ops->start_tx(uport); spin_unlock_irq(&uport->lock); + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); tty_port_set_initialized(port, 1); } else { /* @@ -2298,17 +2370,17 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, uart_report_port(drv, port); - /* Power up port for set_mctrl() */ - uart_change_pm(state, UART_PM_STATE_ON); - /* * Ensure that the modem control lines are de-activated. * keep the DTR setting that is set in uart_set_options() * We probably don't need a spinlock around this, but */ + pm_runtime_get_sync(port->dev); spin_lock_irqsave(&port->lock, flags); port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); spin_unlock_irqrestore(&port->lock, flags); + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); /* * If this driver supports console, and it hasn't been @@ -2317,13 +2389,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, */ if (port->cons && !(port->cons->flags & CON_ENABLED)) register_console(port->cons); - - /* - * Power down all ports by default, except the - * console if we have one. - */ - if (!uart_console(port)) - uart_change_pm(state, UART_PM_STATE_OFF); } } @@ -2751,7 +2816,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) state->uart_port = uport; uport->state = state; - state->pm_state = UART_PM_STATE_UNDEFINED; uport->cons = drv->cons; uport->minor = drv->tty_driver->minor_start + uport->line; uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name, @@ -2970,6 +3034,7 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status) uport->icount.cts++; if (uart_softcts_mode(uport)) { + pm_runtime_get_sync(uport->dev); if (uport->hw_stopped) { if (status) { uport->hw_stopped = 0; @@ -2982,6 +3047,8 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status) uport->ops->stop_tx(uport); } } + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); } } diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index f80fead6c5fc..c81ecc546a2e 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -734,22 +734,6 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, spin_unlock_irqrestore(&up->port.lock, flags); } -static void -serial_txx9_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - /* - * If oldstate was -1 this is called from - * uart_configure_port(). In this case do not initialize the - * port now, because the port was already initialized (for - * non-console port) or should not be initialized here (for - * console port). If we initialized the port here we lose - * serial console settings. - */ - if (state == 0 && oldstate != -1) - serial_txx9_initialize(port); -} - static int serial_txx9_request_resource(struct uart_txx9_port *up) { unsigned int size = TXX9_REGION_SIZE; @@ -856,7 +840,6 @@ static const struct uart_ops serial_txx9_pops = { .startup = serial_txx9_startup, .shutdown = serial_txx9_shutdown, .set_termios = serial_txx9_set_termios, - .pm = serial_txx9_pm, .type = serial_txx9_type, .release_port = serial_txx9_release_port, .request_port = serial_txx9_request_port, diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 784dd42002ea..ac3b582b394b 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -886,6 +886,8 @@ static void sci_receive_chars(struct uart_port *port) /* Tell the rest of the system the news. New characters! */ tty_flip_buffer_push(tport); } else { + /* TTY buffers full; read from RX reg to prevent lockup */ + serial_port_in(port, SCxRDR); serial_port_in(port, SCxSR); /* dummy read */ sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); } @@ -1491,6 +1493,14 @@ static void sci_request_dma(struct uart_port *port) return; s->cookie_tx = -EINVAL; + + /* + * Don't request a dma channel if no channel was specified + * in the device tree. + */ + if (!of_find_property(port->dev->of_node, "dmas", NULL)) + return; + chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV); dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); if (chan) { @@ -2468,21 +2478,6 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, sci_enable_ms(port); } -static void sci_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct sci_port *sci_port = to_sci_port(port); - - switch (state) { - case UART_PM_STATE_OFF: - sci_port_disable(sci_port); - break; - default: - sci_port_enable(sci_port); - break; - } -} - static const char *sci_type(struct uart_port *port) { switch (port->type) { @@ -2597,7 +2592,6 @@ static const struct uart_ops sci_uart_ops = { .shutdown = sci_shutdown, .flush_buffer = sci_flush_buffer, .set_termios = sci_set_termios, - .pm = sci_pm, .type = sci_type, .release_port = sci_release_port, .request_port = sci_request_port, @@ -2659,8 +2653,8 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev) dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i], PTR_ERR(clk)); else - dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i], - clk, clk); + dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i], + clk, clk_get_rate(clk)); sci_port->clks[i] = IS_ERR(clk) ? NULL : clk; } return 0; @@ -2844,16 +2838,15 @@ static void serial_console_write(struct console *co, const char *s, unsigned long flags; int locked = 1; - local_irq_save(flags); #if defined(SUPPORT_SYSRQ) if (port->sysrq) locked = 0; else #endif if (oops_in_progress) - locked = spin_trylock(&port->lock); + locked = spin_trylock_irqsave(&port->lock, flags); else - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); /* first save SCSCR then disable interrupts, keep clock source */ ctrl = serial_port_in(port, SCSCR); @@ -2873,8 +2866,7 @@ static void serial_console_write(struct console *co, const char *s, serial_port_out(port, SCSCR, ctrl); if (locked) - spin_unlock(&port->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&port->lock, flags); } static int serial_console_setup(struct console *co, char *options) @@ -3066,6 +3058,10 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, dev_err(&pdev->dev, "failed to get alias id (%d)\n", id); return NULL; } + if (id >= ARRAY_SIZE(sci_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", id); + return NULL; + } sp = &sci_ports[id]; *dev_id = id; diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c index 684cb8dd8050..2f79ab4dec8c 100644 --- a/drivers/tty/serial/sirfsoc_uart.c +++ b/drivers/tty/serial/sirfsoc_uart.c @@ -882,16 +882,6 @@ static void sirfsoc_uart_set_termios(struct uart_port *port, spin_unlock_irqrestore(&port->lock, flags); } -static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct sirfsoc_uart_port *sirfport = to_sirfport(port); - if (!state) - clk_prepare_enable(sirfport->clk); - else - clk_disable_unprepare(sirfport->clk); -} - static int sirfsoc_uart_startup(struct uart_port *port) { struct sirfsoc_uart_port *sirfport = to_sirfport(port); @@ -1073,7 +1063,6 @@ static const struct uart_ops sirfsoc_uart_ops = { .startup = sirfsoc_uart_startup, .shutdown = sirfsoc_uart_shutdown, .set_termios = sirfsoc_uart_set_termios, - .pm = sirfsoc_uart_pm, .type = sirfsoc_uart_type, .release_port = sirfsoc_uart_release_port, .request_port = sirfsoc_uart_request_port, diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c index 9e0e6586c698..e076d1abc0bc 100644 --- a/drivers/tty/serial/sn_console.c +++ b/drivers/tty/serial/sn_console.c @@ -391,7 +391,6 @@ static const struct uart_ops sn_console_ops = { .startup = snp_startup, .shutdown = snp_shutdown, .set_termios = snp_set_termios, - .pm = NULL, .type = snp_type, .release_port = snp_release_port, .request_port = snp_request_port, diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index b313a792b149..93f40cb5bb88 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -483,32 +483,6 @@ static void asc_shutdown(struct uart_port *port) free_irq(port->irq, port); } -static void asc_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct asc_port *ascport = to_asc_port(port); - unsigned long flags = 0; - u32 ctl; - - switch (state) { - case UART_PM_STATE_ON: - clk_prepare_enable(ascport->clk); - break; - case UART_PM_STATE_OFF: - /* - * Disable the ASC baud rate generator, which is as close as - * we can come to turning it off. Note this is not called with - * the port spinlock held. - */ - spin_lock_irqsave(&port->lock, flags); - ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN; - asc_out(port, ASC_CTL, ctl); - spin_unlock_irqrestore(&port->lock, flags); - clk_disable_unprepare(ascport->clk); - break; - } -} - static void asc_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { @@ -715,7 +689,6 @@ static const struct uart_ops asc_uart_ops = { .request_port = asc_request_port, .config_port = asc_config_port, .verify_port = asc_verify_port, - .pm = asc_pm, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = asc_get_poll_char, .poll_put_char = asc_put_poll_char, diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 03a583264d9e..8800a8a21da6 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -626,28 +626,6 @@ stm32_verify_port(struct uart_port *port, struct serial_struct *ser) return -EINVAL; } -static void stm32_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - struct stm32_port *stm32port = container_of(port, - struct stm32_port, port); - struct stm32_usart_offsets *ofs = &stm32port->info->ofs; - struct stm32_usart_config *cfg = &stm32port->info->cfg; - unsigned long flags = 0; - - switch (state) { - case UART_PM_STATE_ON: - clk_prepare_enable(stm32port->clk); - break; - case UART_PM_STATE_OFF: - spin_lock_irqsave(&port->lock, flags); - stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); - spin_unlock_irqrestore(&port->lock, flags); - clk_disable_unprepare(stm32port->clk); - break; - } -} - static const struct uart_ops stm32_uart_ops = { .tx_empty = stm32_tx_empty, .set_mctrl = stm32_set_mctrl, @@ -661,7 +639,6 @@ static const struct uart_ops stm32_uart_ops = { .startup = stm32_startup, .shutdown = stm32_shutdown, .set_termios = stm32_set_termios, - .pm = stm32_pm, .type = stm32_type, .release_port = stm32_release_port, .request_port = stm32_request_port, diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c index 439057e8107a..b538a61cd252 100644 --- a/drivers/tty/serial/vr41xx_siu.c +++ b/drivers/tty/serial/vr41xx_siu.c @@ -594,32 +594,6 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new, spin_unlock_irqrestore(&port->lock, flags); } -static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) -{ - switch (state) { - case 0: - switch (port->type) { - case PORT_VR41XX_SIU: - vr41xx_supply_clock(SIU_CLOCK); - break; - case PORT_VR41XX_DSIU: - vr41xx_supply_clock(DSIU_CLOCK); - break; - } - break; - case 3: - switch (port->type) { - case PORT_VR41XX_SIU: - vr41xx_mask_clock(SIU_CLOCK); - break; - case PORT_VR41XX_DSIU: - vr41xx_mask_clock(DSIU_CLOCK); - break; - } - break; - } -} - static const char *siu_type(struct uart_port *port) { return siu_type_name(port); @@ -693,7 +667,6 @@ static const struct uart_ops siu_uart_ops = { .startup = siu_startup, .shutdown = siu_shutdown, .set_termios = siu_set_termios, - .pm = siu_pm, .type = siu_type, .release_port = siu_release_port, .request_port = siu_request_port, diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 31a630ae0870..86be1151c1b5 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1065,20 +1065,6 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c) } #endif -static void cdns_uart_pm(struct uart_port *port, unsigned int state, - unsigned int oldstate) -{ - switch (state) { - case UART_PM_STATE_OFF: - pm_runtime_mark_last_busy(port->dev); - pm_runtime_put_autosuspend(port->dev); - break; - default: - pm_runtime_get_sync(port->dev); - break; - } -} - static const struct uart_ops cdns_uart_ops = { .set_mctrl = cdns_uart_set_mctrl, .get_mctrl = cdns_uart_get_mctrl, @@ -1090,7 +1076,6 @@ static const struct uart_ops cdns_uart_ops = { .set_termios = cdns_uart_set_termios, .startup = cdns_uart_startup, .shutdown = cdns_uart_shutdown, - .pm = cdns_uart_pm, .type = cdns_uart_type, .verify_port = cdns_uart_verify_port, .request_port = cdns_uart_request_port, @@ -1115,7 +1100,7 @@ static struct uart_port *cdns_uart_get_port(int id) struct uart_port *port; /* Try the given port id if failed use default method */ - if (cdns_uart_port[id].mapbase != 0) { + if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) { /* Find the next unused port */ for (id = 0; id < CDNS_UART_NR_PORTS; id++) if (cdns_uart_port[id].mapbase == 0) diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c index d32bd499d684..cce33ad53c73 100644 --- a/drivers/tty/serial/zs.c +++ b/drivers/tty/serial/zs.c @@ -958,24 +958,6 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios, spin_unlock_irqrestore(&scc->zlock, flags); } -/* - * Hack alert! - * Required solely so that the initial PROM-based console - * works undisturbed in parallel with this one. - */ -static void zs_pm(struct uart_port *uport, unsigned int state, - unsigned int oldstate) -{ - struct zs_port *zport = to_zport(uport); - - if (state < 3) - zport->regs[5] |= TxENAB; - else - zport->regs[5] &= ~TxENAB; - write_zsreg(zport, R5, zport->regs[5]); -} - - static const char *zs_type(struct uart_port *uport) { return "Z85C30 SCC"; @@ -1057,7 +1039,6 @@ static const struct uart_ops zs_ops = { .startup = zs_startup, .shutdown = zs_shutdown, .set_termios = zs_set_termios, - .pm = zs_pm, .type = zs_type, .release_port = zs_release_port, .request_port = zs_request_port, @@ -1211,7 +1192,6 @@ static int __init zs_console_setup(struct console *co, char *options) return ret; zs_reset(zport); - zs_pm(uport, 0, -1); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index d008f5a75197..377b3592384e 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -246,8 +246,10 @@ static void sysrq_handle_showallcpus(int key) * architecture has no support for it: */ if (!trigger_all_cpu_backtrace()) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + if (in_irq()) + regs = get_irq_regs(); if (regs) { pr_info("CPU%d:\n", smp_processor_id()); show_regs(regs); @@ -266,7 +268,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = { static void sysrq_handle_showregs(int key) { - struct pt_regs *regs = get_irq_regs(); + struct pt_regs *regs = NULL; + + if (in_irq()) + regs = get_irq_regs(); if (regs) show_regs(regs); perf_event_print_debug(); diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index f8eba1c5412f..677fa99b7747 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -446,7 +446,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); * Callers other than flush_to_ldisc() need to exclude the kworker * from concurrent use of the line discipline, see paste_selection(). * - * Returns the number of bytes not processed + * Returns the number of bytes processed */ int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 94cccb6efa32..562d31073f9a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -585,6 +585,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) return; } + /* + * Some console devices aren't actually hung up for technical and + * historical reasons, which can lead to indefinite interruptible + * sleep in n_tty_read(). The following explicitly tells + * n_tty_read() to abort readers. + */ + set_bit(TTY_HUPPING, &tty->flags); + /* inuse_filps is protected by the single tty lock, this really needs to change if we want to flush the workqueue with the lock held */ @@ -639,6 +647,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) * from the ldisc side, which is now guaranteed. */ set_bit(TTY_HUPPED, &tty->flags); + clear_bit(TTY_HUPPING, &tty->flags); tty_unlock(tty); if (f) @@ -1322,6 +1331,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n", __func__, tty->driver->name); + retval = tty_ldisc_lock(tty, 5 * HZ); + if (retval) + goto err_release_lock; tty->port->itty = tty; /* @@ -1332,6 +1344,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) retval = tty_ldisc_setup(tty, tty->link); if (retval) goto err_release_tty; + tty_ldisc_unlock(tty); /* Return the tty locked so that it cannot vanish under the caller */ return tty; @@ -1344,9 +1357,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx) /* call the tty release_tty routine to clean out this slot */ err_release_tty: - tty_unlock(tty); + tty_ldisc_unlock(tty); tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n", retval, idx); +err_release_lock: + tty_unlock(tty); release_tty(tty, idx); return ERR_PTR(retval); } @@ -1475,6 +1490,8 @@ static void release_tty(struct tty_struct *tty, int idx) if (tty->link) tty->link->port->itty = NULL; tty_buffer_cancel_work(tty->port); + if (tty->link) + tty_buffer_cancel_work(tty->link->port); tty_kref_put(tty->link); tty_kref_put(tty); @@ -2798,7 +2815,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx) kref_init(&tty->kref); tty->magic = TTY_MAGIC; - tty_ldisc_init(tty); + if (tty_ldisc_init(tty)) { + kfree(tty); + return NULL; + } tty->session = NULL; tty->pgrp = NULL; mutex_init(&tty->legacy_mutex); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 84a8ac2a779f..ca656ef8de64 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -175,12 +175,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc) return ERR_CAST(ldops); } - ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); - if (ld == NULL) { - put_ldops(ldops); - return ERR_PTR(-ENOMEM); - } - + /* + * There is no way to handle allocation failure of only 16 bytes. + * Let's simplify error handling and save more memory. + */ + ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL); ld->ops = ldops; ld->tty = tty; @@ -336,7 +335,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty) ldsem_up_write(&tty->ldisc_sem); } -static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) +int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) { int ret; @@ -347,7 +346,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout) return 0; } -static void tty_ldisc_unlock(struct tty_struct *tty) +void tty_ldisc_unlock(struct tty_struct *tty) { clear_bit(TTY_LDISC_HALTED, &tty->flags); __tty_ldisc_unlock(tty); @@ -526,19 +525,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld) static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) { /* There is an outstanding reference here so this is safe */ - old = tty_ldisc_get(tty, old->ops->num); - WARN_ON(IS_ERR(old)); - tty->ldisc = old; - tty_set_termios_ldisc(tty, old->ops->num); - if (tty_ldisc_open(tty, old) < 0) { - tty_ldisc_put(old); + if (tty_ldisc_failto(tty, old->ops->num) < 0) { + const char *name = tty_name(tty); + + pr_warn("Falling back ldisc for %s.\n", name); /* The traditional behaviour is to fall back to N_TTY, we want to avoid falling back to N_NULL unless we have no choice to avoid the risk of breaking anything */ if (tty_ldisc_failto(tty, N_TTY) < 0 && tty_ldisc_failto(tty, N_NULL) < 0) - panic("Couldn't open N_NULL ldisc for %s.", - tty_name(tty)); + panic("Couldn't open N_NULL ldisc for %s.", name); } } @@ -823,12 +819,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release); * the tty structure is not completely set up when this call is made. */ -void tty_ldisc_init(struct tty_struct *tty) +int tty_ldisc_init(struct tty_struct *tty) { struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); if (IS_ERR(ld)) - panic("n_tty: init_tty"); + return PTR_ERR(ld); tty->ldisc = ld; + return 0; } /** diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 2ebaba16f785..e77421e7bf46 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -782,7 +782,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); - vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -869,7 +869,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_screen_size > (4 << 20)) return -EINVAL; - newscreen = kmalloc(new_screen_size, GFP_USER); + newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) return -ENOMEM; @@ -1352,6 +1352,11 @@ static void csi_m(struct vc_data *vc) case 3: vc->vc_italic = 1; break; + case 21: + /* + * No console drivers support double underline, so + * convert it to a single underline. + */ case 4: vc->vc_underline = 1; break; @@ -1387,7 +1392,6 @@ static void csi_m(struct vc_data *vc) vc->vc_disp_ctrl = 1; vc->vc_toggle_meta = 1; break; - case 21: case 22: vc->vc_intensity = 1; break; @@ -1725,7 +1729,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) default_attr(vc); update_attr(vc); - vc->vc_tab_stop[0] = 0x01010100; + vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = vc->vc_tab_stop[2] = vc->vc_tab_stop[3] = @@ -1769,7 +1773,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) vc->vc_pos -= (vc->vc_x << 1); while (vc->vc_x < vc->vc_cols - 1) { vc->vc_x++; - if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) + if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) break; } vc->vc_pos += (vc->vc_x << 1); @@ -1829,7 +1833,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); return; case 'Z': respond_ID(tty); @@ -2022,7 +2026,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 'g': if (!vc->vc_par[0]) - vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); else if (vc->vc_par[0] == 3) { vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 48d5327d38d4..fe5cdda80b2c 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -124,6 +124,13 @@ hv_uio_probe(struct hv_device *dev, if (ret) goto fail; + /* Communicating with host has to be via shared memory not hypercall */ + if (!dev->channel->offermsg.monitor_allocated) { + dev_err(&dev->device, "vmbus channel requires hypercall\n"); + ret = -ENOTSUPP; + goto fail_close; + } + dev->channel->inbound.ring_buffer->interrupt_mask = 1; set_channel_read_mode(dev->channel, HV_CALL_DIRECT); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 939a63bca82f..72eb3e41e3b6 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -19,6 +19,14 @@ config USB_EHCI_BIG_ENDIAN_MMIO config USB_EHCI_BIG_ENDIAN_DESC bool +config USB_UHCI_BIG_ENDIAN_MMIO + bool + default y if SPARC_LEON + +config USB_UHCI_BIG_ENDIAN_DESC + bool + default y if SPARC_LEON + menuconfig USB_SUPPORT bool "USB support" depends on HAS_IOMEM diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index bb626120296f..53f3bf459dd1 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -251,7 +251,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev) if (ret) goto err_mux; - ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi"); + ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi"); if (ulpi_node) { phy_node = of_get_next_available_child(ulpi_node, NULL); ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy"); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 18c923a4c16e..f2f31fc16f29 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -187,6 +187,7 @@ static int acm_wb_alloc(struct acm *acm) wb = &acm->wb[wbn]; if (!wb->use) { wb->use = 1; + wb->len = 0; return wbn; } wbn = (wbn + 1) % ACM_NW; @@ -438,7 +439,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) res = usb_submit_urb(acm->read_urbs[index], mem_flags); if (res) { - if (res != -EPERM) { + if (res != -EPERM && res != -ENODEV) { dev_err(&acm->data->dev, "urb %d failed submission with %d\n", index, res); @@ -818,16 +819,18 @@ static int acm_tty_write(struct tty_struct *tty, static void acm_tty_flush_chars(struct tty_struct *tty) { struct acm *acm = tty->driver_data; - struct acm_wb *cur = acm->putbuffer; + struct acm_wb *cur; int err; unsigned long flags; + spin_lock_irqsave(&acm->write_lock, flags); + + cur = acm->putbuffer; if (!cur) /* nothing to do */ - return; + goto out; acm->putbuffer = NULL; err = usb_autopm_get_interface_async(acm->control); - spin_lock_irqsave(&acm->write_lock, flags); if (err < 0) { cur->use = 0; acm->putbuffer = cur; @@ -1765,6 +1768,12 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */ .driver_info = SINGLE_RX_URB, /* firmware bug */ }, + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ + .driver_info = SINGLE_RX_URB, + }, + { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, @@ -1835,6 +1844,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ }, + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ .driver_info = CLEAR_HALT_CONDITIONS, diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 4aa5195db8ea..e02acfb1ca95 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -183,9 +183,9 @@ static int ulpi_of_register(struct ulpi *ulpi) /* Find a ulpi bus underneath the parent or the grandparent */ parent = ulpi->dev.parent; if (parent->of_node) - np = of_find_node_by_name(parent->of_node, "ulpi"); + np = of_get_child_by_name(parent->of_node, "ulpi"); else if (parent->parent && parent->parent->of_node) - np = of_find_node_by_name(parent->parent->of_node, "ulpi"); + np = of_get_child_by_name(parent->parent->of_node, "ulpi"); if (!np) return 0; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 883549ee946c..1a6ccdd5a5fc 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -191,7 +191,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = { static const unsigned short high_speed_maxpacket_maxes[4] = { [USB_ENDPOINT_XFER_CONTROL] = 64, [USB_ENDPOINT_XFER_ISOC] = 1024, - [USB_ENDPOINT_XFER_BULK] = 512, + + /* Bulk should be 512, but some devices use 1024: we will warn below */ + [USB_ENDPOINT_XFER_BULK] = 1024, [USB_ENDPOINT_XFER_INT] = 1024, }; static const unsigned short super_speed_maxpacket_maxes[4] = { @@ -555,6 +557,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, unsigned iad_num = 0; memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); + nintf = nintf_orig = config->desc.bNumInterfaces; + config->desc.bNumInterfaces = 0; // Adjusted later + if (config->desc.bDescriptorType != USB_DT_CONFIG || config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength > size) { @@ -568,7 +573,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, buffer += config->desc.bLength; size -= config->desc.bLength; - nintf = nintf_orig = config->desc.bNumInterfaces; if (nintf > USB_MAXINTERFACES) { dev_warn(ddev, "config %d has too many interfaces: %d, " "using maximum allowed: %d\n", @@ -764,18 +768,21 @@ void usb_destroy_configuration(struct usb_device *dev) return; if (dev->rawdescriptors) { - for (i = 0; i < dev->descriptor.bNumConfigurations; i++) + for (i = 0; i < dev->descriptor.bNumConfigurations && + i < USB_MAXCONFIG; i++) kfree(dev->rawdescriptors[i]); kfree(dev->rawdescriptors); dev->rawdescriptors = NULL; } - for (c = 0; c < dev->descriptor.bNumConfigurations; c++) { + for (c = 0; c < dev->descriptor.bNumConfigurations && + c < USB_MAXCONFIG; c++) { struct usb_host_config *cf = &dev->config[c]; kfree(cf->string); - for (i = 0; i < cf->desc.bNumInterfaces; i++) { + for (i = 0; i < cf->desc.bNumInterfaces && + i < USB_MAXINTERFACES; i++) { if (cf->intf_cache[i]) kref_put(&cf->intf_cache[i]->ref, usb_release_interface_cache); @@ -905,14 +912,25 @@ void usb_release_bos_descriptor(struct usb_device *dev) } } +static const __u8 bos_desc_len[256] = { + [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE, + [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE, + [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE, + [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1), + [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE, + [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE, +}; + /* Get BOS descriptor set */ int usb_get_bos_descriptor(struct usb_device *dev) { struct device *ddev = &dev->dev; struct usb_bos_descriptor *bos; struct usb_dev_cap_header *cap; + struct usb_ssp_cap_descriptor *ssp_cap; unsigned char *buffer; - int length, total_len, num, i; + int length, total_len, num, i, ssac; + __u8 cap_type; int ret; bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); @@ -965,7 +983,13 @@ int usb_get_bos_descriptor(struct usb_device *dev) dev->bos->desc->bNumDeviceCaps = i; break; } + cap_type = cap->bDevCapabilityType; length = cap->bLength; + if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) { + dev->bos->desc->bNumDeviceCaps = i; + break; + } + total_len -= length; if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { @@ -973,7 +997,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) continue; } - switch (cap->bDevCapabilityType) { + switch (cap_type) { case USB_CAP_TYPE_WIRELESS_USB: /* Wireless USB cap descriptor is handled by wusb */ break; @@ -986,8 +1010,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) (struct usb_ss_cap_descriptor *)buffer; break; case USB_SSP_CAP_TYPE: - dev->bos->ssp_cap = - (struct usb_ssp_cap_descriptor *)buffer; + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; + ssac = (le32_to_cpu(ssp_cap->bmAttributes) & + USB_SSP_SUBLINK_SPEED_ATTRIBS); + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) + dev->bos->ssp_cap = ssp_cap; break; case CONTAINER_ID_TYPE: dev->bos->ss_id = diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..ab245352f102 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1455,14 +1455,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb int number_of_packets = 0; unsigned int stream_id = 0; void *buf; - - if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | - USBDEVFS_URB_SHORT_NOT_OK | + unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_ZERO_PACKET | - USBDEVFS_URB_NO_INTERRUPT)) - return -EINVAL; + USBDEVFS_URB_NO_INTERRUPT; + /* USBDEVFS_URB_ISO_ASAP is a special case */ + if (uurb->type == USBDEVFS_URB_TYPE_ISO) + mask |= USBDEVFS_URB_ISO_ASAP; + + if (uurb->flags & ~mask) + return -EINVAL; + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) return -EINVAL; if (uurb->buffer_length > 0 && !uurb->buffer) @@ -1833,6 +1837,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg) return 0; } +static void compute_isochronous_actual_length(struct urb *urb) +{ + unsigned int i; + + if (urb->number_of_packets > 0) { + urb->actual_length = 0; + for (i = 0; i < urb->number_of_packets; i++) + urb->actual_length += + urb->iso_frame_desc[i].actual_length; + } +} + static int processcompl(struct async *as, void __user * __user *arg) { struct urb *urb = as->urb; @@ -1840,6 +1856,7 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) goto err_out; @@ -2008,6 +2025,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; + compute_isochronous_actual_length(urb); if (as->userbuffer && urb->actual_length) { if (copy_urb_data_to_user(as->userbuffer, urb)) return -EFAULT; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index eb87a259d55c..1c40a037ba8e 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1914,4 +1914,5 @@ struct bus_type usb_bus_type = { .name = "usb", .match = usb_device_match, .uevent = usb_uevent, + .need_parent_lock = true, }; diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index bd3e0c5a6db2..212289c55b6f 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -210,8 +210,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg) if (!udev->parent) rc = hcd_bus_suspend(udev, msg); - /* Non-root devices don't need to do anything for FREEZE or PRETHAW */ - else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) + /* + * Non-root USB2 devices don't need to do anything for FREEZE + * or PRETHAW. USB3 devices don't support global suspend and + * needs to be selectively suspended. + */ + else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW) + && (udev->speed < USB_SPEED_SUPER)) rc = 0; else rc = usb_port_suspend(udev, msg); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 75ad6718858c..d0b2e0ed9bab 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2376,6 +2376,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd) spin_lock_irqsave (&hcd_root_hub_lock, flags); if (hcd->rh_registered) { + pm_wakeup_event(&hcd->self.root_hub->dev, 0); set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); queue_work(pm_wq, &hcd->wakeup_work); } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index e9ce6bb0b22d..a9db0887edca 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -650,12 +650,17 @@ void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum) { struct usb_hub *hub; + struct usb_port *port_dev; if (!hdev) return; hub = usb_hub_to_struct_hub(hdev); if (hub) { + port_dev = hub->ports[portnum - 1]; + if (port_dev && port_dev->child) + pm_wakeup_event(&port_dev->child->dev, 0); + set_bit(portnum, hub->wakeup_bits); kick_hub_wq(hub); } @@ -1136,10 +1141,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) if (!udev || udev->state == USB_STATE_NOTATTACHED) { /* Tell hub_wq to disconnect the device or - * check for a new connection + * check for a new connection or over current condition. + * Based on USB2.0 Spec Section 11.12.5, + * C_PORT_OVER_CURRENT could be set while + * PORT_OVER_CURRENT is not. So check for any of them. */ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || - (portstatus & USB_PORT_STAT_OVERCURRENT)) + (portstatus & USB_PORT_STAT_OVERCURRENT) || + (portchange & USB_PORT_STAT_C_OVERCURRENT)) set_bit(port1, hub->change_bits); } else if (portstatus & USB_PORT_STAT_ENABLE) { @@ -3352,6 +3361,10 @@ static int wait_for_connected(struct usb_device *udev, while (delay_ms < 2000) { if (status || *portstatus & USB_PORT_STAT_CONNECTION) break; + if (!port_is_power_on(hub, *portstatus)) { + status = -ENODEV; + break; + } msleep(20); delay_ms += 20; status = hub_port_status(hub, *port1, portstatus, portchange); @@ -3415,8 +3428,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); - if (status == 0 && !port_is_suspended(hub, portstatus)) + if (status == 0 && !port_is_suspended(hub, portstatus)) { + if (portchange & USB_PORT_STAT_C_SUSPEND) + pm_wakeup_event(&udev->dev, 0); goto SuspendCleared; + } /* see 7.1.7.7; affects power usage, but not budgeting */ if (hub_is_superspeed(hub->hdev)) @@ -4511,7 +4527,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, * reset. But only on the first attempt, * lest we get into a time out/reset loop */ - if (r == 0 || (r == -ETIMEDOUT && retries == 0)) + if (r == 0 || (r == -ETIMEDOUT && + retries == 0 && + udev->speed > USB_SPEED_FULL)) break; } udev->descriptor.bMaxPacketSize0 = @@ -4935,6 +4953,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; + + /* When halfway through our retry count, power-cycle the port */ + if (i == (SET_CONFIG_TRIES / 2) - 1) { + dev_info(&port_dev->dev, "attempt power cycle\n"); + usb_hub_set_port_power(hdev, hub, port1, false); + msleep(2 * hub_power_on_good_delay(hub)); + usb_hub_set_port_power(hdev, hub, port1, true); + msleep(hub_power_on_good_delay(hub)); + } } if (hub->hdev->parent || !hcd->driver->port_handed_over || diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c index 1af877942110..2de0444c95a8 100644 --- a/drivers/usb/core/ledtrig-usbport.c +++ b/drivers/usb/core/ledtrig-usbport.c @@ -140,11 +140,17 @@ static bool usbport_trig_port_observed(struct usbport_trig_data *usbport_data, if (!led_np) return false; - /* Get node of port being added */ + /* + * Get node of port being added + * + * FIXME: This is really the device node of the connected device + */ port_np = usb_of_get_child_node(usb_dev->dev.of_node, port1); if (!port_np) return false; + of_node_put(port_np); + /* Amount of trigger sources for this LED */ count = of_count_phandle_with_args(led_np, "trigger-sources", "#trigger-source-cells"); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 371a07d874a3..dd29e6ec1c43 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -150,6 +150,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); + /* Linger a bit, prior to the next control message. */ + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) + msleep(200); + kfree(dr); return ret; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a6aaf2f193a4..99f67764765f 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* HP v222w 16GB Mini USB Drive */ + { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -57,10 +60,11 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -151,6 +155,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, + + /* ELSA MicroLink 56K */ + { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, @@ -218,8 +228,19 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1a0a, 0x0200), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + /* Corsair K70 RGB */ + { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Corsair Strafe */ + { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + /* Corsair Strafe RGB */ - { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + + /* Corsair K70 LUX */ + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 47903d510955..8b800e34407b 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); /*-------------------------------------------------------------------*/ +static const int pipetypes[4] = { + PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT +}; + +/** + * usb_urb_ep_type_check - sanity check of endpoint in the given urb + * @urb: urb to be checked + * + * This performs a light-weight sanity check for the endpoint in the + * given urb. It returns 0 if the urb contains a valid endpoint, otherwise + * a negative error code. + */ +int usb_urb_ep_type_check(const struct urb *urb) +{ + const struct usb_host_endpoint *ep; + + ep = usb_pipe_endpoint(urb->dev, urb->pipe); + if (!ep) + return -EINVAL; + if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); + /** * usb_submit_urb - issue an asynchronous transfer request for an endpoint * @urb: pointer to the urb describing the request @@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); */ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { - static int pipetypes[4] = { - PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT - }; int xfertype, max; struct usb_device *dev; struct usb_host_endpoint *ep; @@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) */ /* Check that the pipe's type matches the endpoint's type */ - if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) + if (usb_urb_ep_type_check(urb)) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 8367d4f985c1..ec965ac5f1f5 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -216,7 +216,7 @@ struct dwc2_hsotg_ep { unsigned char dir_in; unsigned char index; unsigned char mc; - unsigned char interval; + u16 interval; unsigned int halted:1; unsigned int periodic:1; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0d8e09ccb59c..6ef001a83fe2 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3414,12 +3414,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); - dwc2_hsotg_enqueue_setup(hsotg); - - dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", - dwc2_readl(hsotg->regs + DIEPCTL0), - dwc2_readl(hsotg->regs + DOEPCTL0)); - /* clear global NAKs */ val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; if (!is_usb_reset) @@ -3430,6 +3424,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, mdelay(3); hsotg->lx_state = DWC2_L0; + + dwc2_hsotg_enqueue_setup(hsotg); + + dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", + dwc2_readl(hsotg->regs + DIEPCTL0), + dwc2_readl(hsotg->regs + DOEPCTL0)); } static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index c2631145f404..46d3b0fc00c5 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -979,6 +979,24 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "%s()\n", __func__); + + /* + * In buffer DMA or external DMA mode channel can't be halted + * for non-split periodic channels. At the end of the next + * uframe/frame (in the worst case), the core generates a channel + * halted and disables the channel automatically. + */ + if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) || + hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) { + if (!chan->do_split && + (chan->ep_type == USB_ENDPOINT_XFER_ISOC || + chan->ep_type == USB_ENDPOINT_XFER_INT)) { + dev_err(hsotg->dev, "%s() Channel can't be halted\n", + __func__); + return; + } + } + if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); @@ -2311,10 +2329,22 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) */ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) { - u32 hcfg, hfir, otgctl; + u32 hcfg, hfir, otgctl, usbcfg; dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); + /* Set HS/FS Timeout Calibration to 7 (max available value). + * The number of PHY clocks that the application programs in + * this field is added to the high/full speed interpacket timeout + * duration in the core to account for any additional delays + * introduced by the PHY. This can be required, because the delay + * introduced by the PHY in generating the linestate condition + * can vary from one PHY to another. + */ + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg |= GUSBCFG_TOUTCAL(7); + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + /* Restart the Phy Clock */ dwc2_writel(0, hsotg->regs + PCGCTL); @@ -2576,34 +2606,29 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, #define DWC2_USB_DMA_ALIGN 4 -struct dma_aligned_buffer { - void *kmalloc_ptr; - void *old_xfer_buffer; - u8 data[0]; -}; - static void dwc2_free_dma_aligned_buffer(struct urb *urb) { - struct dma_aligned_buffer *temp; + void *stored_xfer_buffer; if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) return; - temp = container_of(urb->transfer_buffer, - struct dma_aligned_buffer, data); + /* Restore urb->transfer_buffer from the end of the allocated area */ + memcpy(&stored_xfer_buffer, urb->transfer_buffer + + urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); if (usb_urb_dir_in(urb)) - memcpy(temp->old_xfer_buffer, temp->data, + memcpy(stored_xfer_buffer, urb->transfer_buffer, urb->transfer_buffer_length); - urb->transfer_buffer = temp->old_xfer_buffer; - kfree(temp->kmalloc_ptr); + kfree(urb->transfer_buffer); + urb->transfer_buffer = stored_xfer_buffer; urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) { - struct dma_aligned_buffer *temp, *kmalloc_ptr; + void *kmalloc_ptr; size_t kmalloc_size; if (urb->num_sgs || urb->sg || @@ -2611,22 +2636,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) return 0; - /* Allocate a buffer with enough padding for alignment */ + /* + * Allocate a buffer with enough padding for original transfer_buffer + * pointer. This allocation is guaranteed to be aligned properly for + * DMA + */ kmalloc_size = urb->transfer_buffer_length + - sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; + sizeof(urb->transfer_buffer); kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); if (!kmalloc_ptr) return -ENOMEM; - /* Position our struct dma_aligned_buffer such that data is aligned */ - temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; - temp->kmalloc_ptr = kmalloc_ptr; - temp->old_xfer_buffer = urb->transfer_buffer; + /* + * Position value of original urb->transfer_buffer pointer to the end + * of allocation for later referencing + */ + memcpy(kmalloc_ptr + urb->transfer_buffer_length, + &urb->transfer_buffer, sizeof(urb->transfer_buffer)); + if (usb_urb_dir_out(urb)) - memcpy(temp->data, urb->transfer_buffer, + memcpy(kmalloc_ptr, urb->transfer_buffer, urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; + urb->transfer_buffer = kmalloc_ptr; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; @@ -3277,7 +3309,6 @@ static void dwc2_conn_id_status_change(struct work_struct *work) dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); spin_lock_irqsave(&hsotg->lock, flags); - dwc2_hsotg_disconnect(hsotg); dwc2_hsotg_core_init_disconnected(hsotg, false); spin_unlock_irqrestore(&hsotg->lock, flags); dwc2_hsotg_core_connect(hsotg); @@ -3296,8 +3327,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work) if (count > 250) dev_err(hsotg->dev, "Connection id status change timed out\n"); - hsotg->op_state = OTG_STATE_A_HOST; + spin_lock_irqsave(&hsotg->lock, flags); + dwc2_hsotg_disconnect(hsotg); + spin_unlock_irqrestore(&hsotg->lock, flags); + + hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ dwc2_core_init(hsotg, false); dwc2_enable_global_interrupts(hsotg); diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 3ae8b1bbaa55..7f51a77bc5cc 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -379,7 +379,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, /* Get the map and adjust if this is a multi_tt hub */ map = qh->dwc_tt->periodic_bitmaps; if (qh->dwc_tt->usb_tt->multi) - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1); return map; } diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index 7ac725038f8d..025bc68094fc 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_USB_DWC3) += dwc3.o dwc3-y := core.o -ifneq ($(CONFIG_FTRACE),) +ifneq ($(CONFIG_TRACING),) dwc3-y += trace.o endif diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 03474d3575ab..c33d973bbfca 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -128,6 +128,9 @@ static void __dwc3_set_mode(struct work_struct *work) if (dwc->dr_mode != USB_DR_MODE_OTG) return; + if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG) + return; + switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_HOST: dwc3_host_exit(dwc); @@ -186,7 +189,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode) dwc->desired_dr_role = mode; spin_unlock_irqrestore(&dwc->lock, flags); - queue_work(system_power_efficient_wq, &dwc->drd_work); + queue_work(system_freezable_wq, &dwc->drd_work); } u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) @@ -240,12 +243,26 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) do { reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (!(reg & DWC3_DCTL_CSFTRST)) - return 0; + goto done; udelay(1); } while (--retries); + phy_exit(dwc->usb3_generic_phy); + phy_exit(dwc->usb2_generic_phy); + return -ETIMEDOUT; + +done: + /* + * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared, + * we must wait at least 50ms before accessing the PHY domain + * (synchronization delay). DWC_usb31 programming guide section 1.3.2. + */ + if (dwc3_is_usb31(dwc)) + msleep(50); + + return 0; } /* @@ -1011,6 +1028,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) hird_threshold = 12; dwc->maximum_speed = usb_get_maximum_speed(dev); + if (dwc->maximum_speed > USB_SPEED_HIGH) + dwc->maximum_speed = USB_SPEED_HIGH; + dwc->dr_mode = usb_get_dr_mode(dev); dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); @@ -1125,7 +1145,7 @@ static void dwc3_check_params(struct dwc3 *dwc) /* fall through */ case USB_SPEED_UNKNOWN: /* default to superspeed */ - dwc->maximum_speed = USB_SPEED_SUPER; + dwc->maximum_speed = USB_SPEED_HIGH; /* * default to superspeed plus if we are capable. diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index ea910acb4bb0..b782ba58a7fc 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -166,13 +166,15 @@ #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) -#define DWC3_TXFIFOQ 1 -#define DWC3_RXFIFOQ 3 -#define DWC3_TXREQQ 5 -#define DWC3_RXREQQ 7 -#define DWC3_RXINFOQ 9 -#define DWC3_DESCFETCHQ 13 -#define DWC3_EVENTQ 15 +#define DWC3_TXFIFOQ 0 +#define DWC3_RXFIFOQ 1 +#define DWC3_TXREQQ 2 +#define DWC3_RXREQQ 3 +#define DWC3_RXINFOQ 4 +#define DWC3_PSTATQ 5 +#define DWC3_DESCFETCHQ 6 +#define DWC3_EVENTQ 7 +#define DWC3_AUXEVENTQ 8 /* Global RX Threshold Configuration Register */ #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) @@ -247,6 +249,8 @@ #define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1) /* Global TX Fifo Size Register */ +#define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */ +#define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */ #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index a26d1fde0f5e..fbfc09ebd2ec 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -57,8 +57,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count) clk = of_clk_get(np, i); if (IS_ERR(clk)) { - while (--i >= 0) + while (--i >= 0) { + clk_disable_unprepare(simple->clks[i]); clk_put(simple->clks[i]); + } return PTR_ERR(clk); } diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 3530795bbb8f..fdd0d5aa1f5e 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -590,9 +590,25 @@ static int dwc3_omap_resume(struct device *dev) return 0; } +static void dwc3_omap_complete(struct device *dev) +{ + struct dwc3_omap *omap = dev_get_drvdata(dev); + + if (extcon_get_state(omap->edev, EXTCON_USB)) + dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID); + else + dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF); + + if (extcon_get_state(omap->edev, EXTCON_USB_HOST)) + dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND); + else + dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT); +} + static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) + .complete = dwc3_omap_complete, }; #define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 54343fbd85ee..e11e39bd6757 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -25,6 +25,7 @@ #include #include #include +#include #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce @@ -124,6 +125,14 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) dwc->has_dsm_for_pm = true; } + if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M || + pdev->device == PCI_DEVICE_ID_INTEL_APL ) { + if (IS_ERR_OR_NULL(usb_get_phy(USB_PHY_TYPE_USB2))) + return -ENOMEM; + } + + if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) { struct gpio_desc *gpio; @@ -212,7 +221,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res)); if (ret) { dev_err(dev, "couldn't add resources to dwc3 device\n"); - return ret; + goto err; } dwc->pci = pci; @@ -245,6 +254,7 @@ static void dwc3_pci_remove(struct pci_dev *pci) device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); + platform_device_unregister(dwc->dwc3); } diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 75e6cb044eb2..89fe53c846ef 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -884,7 +884,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, trb++; trb->ctrl &= ~DWC3_TRB_CTRL_HWO; trace_dwc3_complete_trb(ep0, trb); - ep0->trb_enqueue = 0; + + if (r->direction) + dwc->eps[1]->trb_enqueue = 0; + else + dwc->eps[0]->trb_enqueue = 0; + dwc->ep0_bounced = false; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f064f1549333..3e24733eebb4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -174,18 +175,8 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep) dwc3_ep_inc_trb(&dep->trb_dequeue); } -/** - * dwc3_gadget_giveback - call struct usb_request's ->complete callback - * @dep: The endpoint to whom the request belongs to - * @req: The request we're giving back - * @status: completion code for the request - * - * Must be called with controller's lock held and interrupts disabled. This - * function will unmap @req and call its ->complete() callback to notify upper - * layers that it has completed. - */ -void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, - int status) +void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, + struct dwc3_request *req, int status) { struct dwc3 *dwc = dep->dwc; @@ -198,18 +189,35 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (req->trb) usb_gadget_unmap_request_by_dev(dwc->sysdev, - &req->request, req->direction); + &req->request, req->direction); req->trb = NULL; - trace_dwc3_gadget_giveback(req); + if (dep->number > 1) + pm_runtime_put(dwc->dev); +} + +/** + * dwc3_gadget_giveback - call struct usb_request's ->complete callback + * @dep: The endpoint to whom the request belongs to + * @req: The request we're giving back + * @status: completion code for the request + * + * Must be called with controller's lock held and interrupts disabled. This + * function will unmap @req and call its ->complete() callback to notify upper + * layers that it has completed. + */ +void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + int status) +{ + struct dwc3 *dwc = dep->dwc; + + dwc3_gadget_del_and_unmap_request(dep, req, status); + spin_unlock(&dwc->lock); usb_gadget_giveback_request(&dep->endpoint, &req->request); spin_lock(&dwc->lock); - - if (dep->number > 1) - pm_runtime_put(dwc->dev); } /** @@ -267,7 +275,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; - u32 timeout = 500; + u32 timeout = 1000; u32 reg; int cmd_status = 0; @@ -1233,7 +1241,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) if (req->trb) memset(req->trb, 0, sizeof(struct dwc3_trb)); dep->queued_requests--; - dwc3_gadget_giveback(dep, req, ret); + dwc3_gadget_del_and_unmap_request(dep, req, ret); return ret; } @@ -1437,7 +1445,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, dwc->lock); if (!r->trb) - goto out1; + goto out0; if (r->num_pending_sgs) { struct dwc3_trb *trb; @@ -1853,12 +1861,66 @@ static void dwc3_gadget_setup_nump(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_DCFG, reg); } +static inline bool platform_is_bxtp(void) +{ +#ifdef CONFIG_X86_64 + if ((boot_cpu_data.x86_model == 0x5c) + && (boot_cpu_data.x86_stepping >= 0x8) + && (boot_cpu_data.x86_stepping <= 0xf)) + return true; +#endif + return false; +} + static int __dwc3_gadget_start(struct dwc3 *dwc) { struct dwc3_ep *dep; int ret = 0; u32 reg; + reg = dwc3_readl(dwc->regs, DWC3_DCFG); + reg &= ~(DWC3_DCFG_SPEED_MASK); + + /** + * WORKAROUND: DWC3 revision < 2.20a have an issue + * which would cause metastability state on Run/Stop + * bit if we try to force the IP to USB2-only mode. + * + * Because of that, we cannot configure the IP to any + * speed other than the SuperSpeed + * + * Refers to: + * + * STAR#9000525659: Clock Domain Crossing on DCTL in + * USB 2.0 Mode + */ + if (dwc->revision < DWC3_REVISION_220A) { + reg |= DWC3_DCFG_SUPERSPEED; + } else { + switch (dwc->maximum_speed) { + case USB_SPEED_LOW: + reg |= DWC3_DCFG_LOWSPEED; + break; + case USB_SPEED_FULL: + reg |= DWC3_DCFG_FULLSPEED; + break; + case USB_SPEED_HIGH: + reg |= DWC3_DCFG_HIGHSPEED; + break; + case USB_SPEED_SUPER_PLUS: + reg |= DWC3_DCFG_SUPERSPEED_PLUS; + break; + default: + dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n", + dwc->maximum_speed); + /* fall through */ + case USB_SPEED_SUPER: + reg |= DWC3_DCFG_SUPERSPEED; + break; + } + } + dwc3_writel(dwc->regs, DWC3_DCFG, reg); + /* * Use IMOD if enabled via dwc->imod_interval. Otherwise, if * the core supports IMOD, disable it. @@ -1923,6 +1985,15 @@ static int dwc3_gadget_start(struct usb_gadget *g, int ret = 0; int irq; + if (dwc->usb2_phy) { + ret = otg_set_peripheral(dwc->usb2_phy->otg, &dwc->gadget); + if (ret == -ENOTSUPP) + dev_info(dwc->dev, "no OTG driver registered\n"); + else if (ret) + return ret; + } + + irq = dwc->irq_gadget; ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt, IRQF_SHARED, "dwc3", dwc->ev_buf); @@ -1971,6 +2042,10 @@ static int dwc3_gadget_stop(struct usb_gadget *g) unsigned long flags; int epnum; + + if (dwc->usb2_phy) + otg_set_peripheral(dwc->usb2_phy->otg, NULL); + spin_lock_irqsave(&dwc->lock, flags); if (pm_runtime_suspended(dwc->dev)) @@ -2039,10 +2114,24 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g, reg |= DWC3_DCFG_HIGHSPEED; break; case USB_SPEED_SUPER: - reg |= DWC3_DCFG_SUPERSPEED; + /* + * WORKAROUND: BXTP platform USB3.0 port SS fail, + * We switch SS to HS to enable USB3.0. + */ + if (platform_is_bxtp()) + reg |= DWC3_DCFG_HIGHSPEED; + else + reg |= DWC3_DCFG_SUPERSPEED; break; case USB_SPEED_SUPER_PLUS: - reg |= DWC3_DCFG_SUPERSPEED_PLUS; + /* + * WORKAROUND: BXTP platform USB3.0 port SS fail, + * We switch SS to HS to enable USB3.0. + */ + if (platform_is_bxtp()) + reg |= DWC3_DCFG_HIGHSPEED; + else + reg |= DWC3_DCFG_SUPERSPEED_PLUS; break; default: dev_err(dwc->dev, "invalid speed (%d)\n", speed); @@ -2774,6 +2863,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) break; } + dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; + /* Enable USB2 LPM Capability */ if ((dwc->revision > DWC3_REVISION_194A) && diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h index 2df0f6e613fe..a516cab0bf4a 100644 --- a/drivers/usb/early/xhci-dbc.h +++ b/drivers/usb/early/xhci-dbc.h @@ -90,8 +90,8 @@ struct xdbc_context { #define XDBC_INFO_CONTEXT_SIZE 48 #define XDBC_MAX_STRING_LENGTH 64 -#define XDBC_STRING_MANUFACTURER "Linux" -#define XDBC_STRING_PRODUCT "Remote GDB" +#define XDBC_STRING_MANUFACTURER "Linux Foundation" +#define XDBC_STRING_PRODUCT "Linux USB GDB Target" #define XDBC_STRING_SERIAL "0001" struct xdbc_strings { @@ -103,7 +103,7 @@ struct xdbc_strings { #define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */ #define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */ -#define XDBC_PRODUCT_ID 0x0004 /* __le16 idProduct; device 0004 */ +#define XDBC_PRODUCT_ID 0x0011 /* __le16 idProduct; device 0011 */ #define XDBC_DEVICE_REV 0x0010 /* 0.10 */ /* diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 31cce7805eb2..cc0dceea40f5 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -215,6 +215,22 @@ config USB_F_PRINTER config USB_F_TCM tristate +config USB_F_DVCTRACE + tristate + select DVC_TRACE_BUS + +config USB_F_MTP + tristate + +config USB_F_PTP + tristate + +config USB_F_AUDIO_SRC + tristate + +config USB_F_ACC + tristate + # this first set of drivers all depend on bulk-capable hardware. config USB_CONFIGFS @@ -368,6 +384,44 @@ config USB_CONFIGFS_F_FS implemented in kernel space (for instance Ethernet, serial or mass storage) and other are implemented in user space. +config USB_CONFIGFS_F_MTP + boolean "MTP gadget" + depends on USB_CONFIGFS + select USB_F_MTP + help + USB gadget MTP support + +config USB_CONFIGFS_F_PTP + boolean "PTP gadget" + depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP + select USB_F_PTP + help + USB gadget PTP support + +config USB_CONFIGFS_F_ACC + boolean "Accessory gadget" + depends on USB_CONFIGFS + select USB_F_ACC + help + USB gadget Accessory support + +config USB_CONFIGFS_F_AUDIO_SRC + boolean "Audio Source gadget" + depends on USB_CONFIGFS && USB_CONFIGFS_F_ACC + depends on SND + select SND_PCM + select USB_F_AUDIO_SRC + help + USB gadget Audio Source support + +config USB_CONFIGFS_UEVENT + boolean "Uevent notification of Gadget state" + depends on USB_CONFIGFS + help + Enable uevent notifications to userspace when the gadget + state changes. The gadget can be in any of the following + three states: "CONNECTED/DISCONNECTED/CONFIGURED" + config USB_CONFIGFS_F_UAC1 bool "Audio Class 1.0" depends on USB_CONFIGFS @@ -508,6 +562,13 @@ choice controller, and the relevant drivers for each function declared by the device. +config USB_CONFIGFS_F_DVCTRACE + bool "DvC Trace gadget" + depends on USB_CONFIGFS + select USB_F_DVCTRACE + help + USB gadget DvC Trace support + source "drivers/usb/gadget/legacy/Kconfig" endchoice diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5d061b3d8224..fa34388a2d11 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep) { - struct usb_composite_dev *cdev = get_gadget_data(g); struct usb_endpoint_descriptor *chosen_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; @@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g, _ep->maxburst = comp_desc->bMaxBurst + 1; break; default: - if (comp_desc->bMaxBurst != 0) + if (comp_desc->bMaxBurst != 0) { + struct usb_composite_dev *cdev; + + cdev = get_gadget_data(g); ERROR(cdev, "ep0 bMaxBurst must be 0\n"); + } _ep->maxburst = 1; break; } @@ -1422,7 +1425,7 @@ static int count_ext_compat(struct usb_configuration *c) return res; } -static void fill_ext_compat(struct usb_configuration *c, u8 *buf) +static int fill_ext_compat(struct usb_configuration *c, u8 *buf) { int i, count; @@ -1449,10 +1452,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf) buf += 23; } count += 24; - if (count >= 4096) - return; + if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ) + return count; } } + + return count; } static int count_ext_prop(struct usb_configuration *c, int interface) @@ -1497,25 +1502,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf) struct usb_os_desc *d; struct usb_os_desc_ext_prop *ext_prop; int j, count, n, ret; - u8 *start = buf; f = c->interface[interface]; + count = 10; /* header length */ for (j = 0; j < f->os_desc_n; ++j) { if (interface != f->os_desc_table[j].if_id) continue; d = f->os_desc_table[j].os_desc; if (d) list_for_each_entry(ext_prop, &d->ext_prop, entry) { - /* 4kB minus header length */ - n = buf - start; - if (n >= 4086) - return 0; - - count = ext_prop->data_len + + n = ext_prop->data_len + ext_prop->name_len + 14; - if (count > 4086 - n) - return -EINVAL; - usb_ext_prop_put_size(buf, count); + if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ) + return count; + usb_ext_prop_put_size(buf, n); usb_ext_prop_put_type(buf, ext_prop->type); ret = usb_ext_prop_put_name(buf, ext_prop->name, ext_prop->name_len); @@ -1541,11 +1541,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf) default: return -EINVAL; } - buf += count; + buf += n; + count += n; } } - return 0; + return count; } /* @@ -1827,6 +1828,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->complete = composite_setup_complete; buf = req->buf; os_desc_cfg = cdev->os_desc_config; + w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ); memset(buf, 0, w_length); buf[5] = 0x01; switch (ctrl->bRequestType & USB_RECIP_MASK) { @@ -1850,8 +1852,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) count += 16; /* header */ put_unaligned_le32(count, buf); buf += 16; - fill_ext_compat(os_desc_cfg, buf); - value = w_length; + value = fill_ext_compat(os_desc_cfg, buf); + value = min_t(u16, w_length, value); } break; case USB_RECIP_INTERFACE: @@ -1880,8 +1882,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) interface, buf); if (value < 0) return value; - - value = w_length; + value = min_t(u16, w_length, value); } break; } @@ -2000,6 +2001,12 @@ void composite_disconnect(struct usb_gadget *gadget) struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; + if (cdev == NULL) { + WARN(1, "%s: Calling disconnect on a Gadget that is \ + not connected\n", __func__); + return; + } + /* REVISIT: should we have config and device level * disconnect callbacks? */ @@ -2156,8 +2163,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, goto end; } - /* OS feature descriptor length <= 4kB */ - cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); + cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ, + GFP_KERNEL); if (!cdev->os_desc_req->buf) { ret = -ENOMEM; usb_ep_free_request(ep0, cdev->os_desc_req); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index aeb9f3c40521..bf2d0ce80c99 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -9,6 +9,31 @@ #include "u_f.h" #include "u_os_desc.h" +#ifdef CONFIG_USB_CONFIGFS_UEVENT +#include +#include +#include + +#ifdef CONFIG_USB_CONFIGFS_F_ACC +extern int acc_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl); +void acc_disconnect(void); +#endif +static struct class *android_class; +static struct device *android_device; +static int index; + +struct device *create_function_device(char *name) +{ + if (android_device && !IS_ERR(android_device)) + return device_create(android_class, android_device, + MKDEV(0, index++), NULL, name); + else + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(create_function_device); +#endif + int check_user_usb_string(const char *name, struct usb_gadget_strings *stringtab_dev) { @@ -60,6 +85,12 @@ struct gadget_info { bool use_os_desc; char b_vendor_code; char qw_sign[OS_STRING_QW_SIGN_LEN]; +#ifdef CONFIG_USB_CONFIGFS_UEVENT + bool connected; + bool sw_connected; + struct work_struct work; + struct device *dev; +#endif }; static inline struct gadget_info *to_gadget_info(struct config_item *item) @@ -265,7 +296,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item, mutex_lock(&gi->lock); - if (!strlen(name)) { + if (!strlen(name) || strcmp(name, "none") == 0) { ret = unregister_gadget(gi); if (ret) goto err; @@ -1371,6 +1402,60 @@ static int configfs_composite_bind(struct usb_gadget *gadget, return ret; } +#ifdef CONFIG_USB_CONFIGFS_UEVENT +static void android_work(struct work_struct *data) +{ + struct gadget_info *gi = container_of(data, struct gadget_info, work); + struct usb_composite_dev *cdev = &gi->cdev; + char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL }; + char *connected[2] = { "USB_STATE=CONNECTED", NULL }; + char *configured[2] = { "USB_STATE=CONFIGURED", NULL }; + /* 0-connected 1-configured 2-disconnected*/ + bool status[3] = { false, false, false }; + unsigned long flags; + bool uevent_sent = false; + + spin_lock_irqsave(&cdev->lock, flags); + if (cdev->config) + status[1] = true; + + if (gi->connected != gi->sw_connected) { + if (gi->connected) + status[0] = true; + else + status[2] = true; + gi->sw_connected = gi->connected; + } + spin_unlock_irqrestore(&cdev->lock, flags); + + if (status[0]) { + kobject_uevent_env(&android_device->kobj, + KOBJ_CHANGE, connected); + pr_info("%s: sent uevent %s\n", __func__, connected[0]); + uevent_sent = true; + } + + if (status[1]) { + kobject_uevent_env(&android_device->kobj, + KOBJ_CHANGE, configured); + pr_info("%s: sent uevent %s\n", __func__, configured[0]); + uevent_sent = true; + } + + if (status[2]) { + kobject_uevent_env(&android_device->kobj, + KOBJ_CHANGE, disconnected); + pr_info("%s: sent uevent %s\n", __func__, disconnected[0]); + uevent_sent = true; + } + + if (!uevent_sent) { + pr_info("%s: did not send uevent (%d %d %p)\n", __func__, + gi->connected, gi->sw_connected, cdev->config); + } +} +#endif + static void configfs_composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; @@ -1390,14 +1475,91 @@ static void configfs_composite_unbind(struct usb_gadget *gadget) set_gadget_data(gadget, NULL); } +#ifdef CONFIG_USB_CONFIGFS_UEVENT +static int android_setup(struct usb_gadget *gadget, + const struct usb_ctrlrequest *c) +{ + struct usb_composite_dev *cdev = get_gadget_data(gadget); + unsigned long flags; + struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); + int value = -EOPNOTSUPP; + struct usb_function_instance *fi; + + spin_lock_irqsave(&cdev->lock, flags); + if (!gi->connected) { + gi->connected = 1; + schedule_work(&gi->work); + } + spin_unlock_irqrestore(&cdev->lock, flags); + list_for_each_entry(fi, &gi->available_func, cfs_list) { + if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) { + value = fi->f->setup(fi->f, c); + if (value >= 0) + break; + } + } + +#ifdef CONFIG_USB_CONFIGFS_F_ACC + if (value < 0) + value = acc_ctrlrequest(cdev, c); +#endif + + if (value < 0) + value = composite_setup(gadget, c); + + spin_lock_irqsave(&cdev->lock, flags); + if (c->bRequest == USB_REQ_SET_CONFIGURATION && + cdev->config) { + schedule_work(&gi->work); + } + spin_unlock_irqrestore(&cdev->lock, flags); + + return value; +} + +static void android_disconnect(struct usb_gadget *gadget) +{ + struct usb_composite_dev *cdev = get_gadget_data(gadget); + struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev); + + /* FIXME: There's a race between usb_gadget_udc_stop() which is likely + * to set the gadget driver to NULL in the udc driver and this drivers + * gadget disconnect fn which likely checks for the gadget driver to + * be a null ptr. It happens that unbind (doing set_gadget_data(NULL)) + * is called before the gadget driver is set to NULL and the udc driver + * calls disconnect fn which results in cdev being a null ptr. + */ + if (cdev == NULL) { + WARN(1, "%s: gadget driver already disconnected\n", __func__); + return; + } + + /* accessory HID support can be active while the + accessory function is not actually enabled, + so we need to inform it when we are disconnected. + */ + +#ifdef CONFIG_USB_CONFIGFS_F_ACC + acc_disconnect(); +#endif + gi->connected = 0; + schedule_work(&gi->work); + composite_disconnect(gadget); +} +#endif + static const struct usb_gadget_driver configfs_driver_template = { .bind = configfs_composite_bind, .unbind = configfs_composite_unbind, - +#ifdef CONFIG_USB_CONFIGFS_UEVENT + .setup = android_setup, + .reset = android_disconnect, + .disconnect = android_disconnect, +#else .setup = composite_setup, .reset = composite_disconnect, .disconnect = composite_disconnect, - +#endif .suspend = composite_suspend, .resume = composite_resume, @@ -1409,6 +1571,89 @@ static const struct usb_gadget_driver configfs_driver_template = { .match_existing_only = 1, }; +#ifdef CONFIG_USB_CONFIGFS_UEVENT +static ssize_t state_show(struct device *pdev, struct device_attribute *attr, + char *buf) +{ + struct gadget_info *dev = dev_get_drvdata(pdev); + struct usb_composite_dev *cdev; + char *state = "DISCONNECTED"; + unsigned long flags; + + if (!dev) + goto out; + + cdev = &dev->cdev; + + if (!cdev) + goto out; + + spin_lock_irqsave(&cdev->lock, flags); + if (cdev->config) + state = "CONFIGURED"; + else if (dev->connected) + state = "CONNECTED"; + spin_unlock_irqrestore(&cdev->lock, flags); +out: + return sprintf(buf, "%s\n", state); +} + +static DEVICE_ATTR(state, S_IRUGO, state_show, NULL); + +static struct device_attribute *android_usb_attributes[] = { + &dev_attr_state, + NULL +}; + +static int android_device_create(struct gadget_info *gi) +{ + struct device_attribute **attrs; + struct device_attribute *attr; + + INIT_WORK(&gi->work, android_work); + android_device = device_create(android_class, NULL, + MKDEV(0, 0), NULL, "android0"); + if (IS_ERR(android_device)) + return PTR_ERR(android_device); + + dev_set_drvdata(android_device, gi); + + attrs = android_usb_attributes; + while ((attr = *attrs++)) { + int err; + + err = device_create_file(android_device, attr); + if (err) { + device_destroy(android_device->class, + android_device->devt); + return err; + } + } + + return 0; +} + +static void android_device_destroy(void) +{ + struct device_attribute **attrs; + struct device_attribute *attr; + + attrs = android_usb_attributes; + while ((attr = *attrs++)) + device_remove_file(android_device, attr); + device_destroy(android_device->class, android_device->devt); +} +#else +static inline int android_device_create(struct gadget_info *gi) +{ + return 0; +} + +static inline void android_device_destroy(void) +{ +} +#endif + static struct config_group *gadgets_make( struct config_group *group, const char *name) @@ -1460,7 +1705,11 @@ static struct config_group *gadgets_make( if (!gi->composite.gadget_driver.function) goto err; + if (android_device_create(gi) < 0) + goto err; + return &gi->group; + err: kfree(gi); return ERR_PTR(-ENOMEM); @@ -1469,6 +1718,7 @@ static struct config_group *gadgets_make( static void gadgets_drop(struct config_group *group, struct config_item *item) { config_item_put(item); + android_device_destroy(); } static struct configfs_group_operations gadgets_ops = { @@ -1508,6 +1758,13 @@ static int __init gadget_cfs_init(void) config_group_init(&gadget_subsys.su_group); ret = configfs_register_subsystem(&gadget_subsys); + +#ifdef CONFIG_USB_CONFIGFS_UEVENT + android_class = class_create(THIS_MODULE, "android_usb"); + if (IS_ERR(android_class)) + return PTR_ERR(android_class); +#endif + return ret; } module_init(gadget_cfs_init); @@ -1515,5 +1772,10 @@ module_init(gadget_cfs_init); static void __exit gadget_cfs_exit(void) { configfs_unregister_subsystem(&gadget_subsys); +#ifdef CONFIG_USB_CONFIGFS_UEVENT + if (!IS_ERR(android_class)) + class_destroy(android_class); +#endif + } module_exit(gadget_cfs_exit); diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 5d3a6cf02218..c91db3d592f5 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -50,3 +50,12 @@ usb_f_printer-y := f_printer.o obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o usb_f_tcm-y := f_tcm.o obj-$(CONFIG_USB_F_TCM) += usb_f_tcm.o +obj-$(CONFIG_USB_F_DVCTRACE) += f_dvctrace.o +usb_f_mtp-y := f_mtp.o +obj-$(CONFIG_USB_F_MTP) += usb_f_mtp.o +usb_f_ptp-y := f_ptp.o +obj-$(CONFIG_USB_F_PTP) += usb_f_ptp.o +usb_f_audio_source-y := f_audio_source.o +obj-$(CONFIG_USB_F_AUDIO_SRC) += usb_f_audio_source.o +usb_f_accessory-y := f_accessory.o +obj-$(CONFIG_USB_F_ACC) += usb_f_accessory.o diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c new file mode 100644 index 000000000000..7aa2656a2328 --- /dev/null +++ b/drivers/usb/gadget/function/f_accessory.c @@ -0,0 +1,1352 @@ +/* + * Gadget Function Driver for Android USB accessories + * + * Copyright (C) 2011 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#define MAX_INST_NAME_LEN 40 +#define BULK_BUFFER_SIZE 16384 +#define ACC_STRING_SIZE 256 + +#define PROTOCOL_VERSION 2 + +/* String IDs */ +#define INTERFACE_STRING_INDEX 0 + +/* number of tx and rx requests to allocate */ +#define TX_REQ_MAX 4 +#define RX_REQ_MAX 2 + +struct acc_hid_dev { + struct list_head list; + struct hid_device *hid; + struct acc_dev *dev; + /* accessory defined ID */ + int id; + /* HID report descriptor */ + u8 *report_desc; + /* length of HID report descriptor */ + int report_desc_len; + /* number of bytes of report_desc we have received so far */ + int report_desc_offset; +}; + +struct acc_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + + /* online indicates state of function_set_alt & function_unbind + * set to 1 when we connect + */ + int online:1; + + /* disconnected indicates state of open & release + * Set to 1 when we disconnect. + * Not cleared until our file is closed. + */ + int disconnected:1; + + /* strings sent by the host */ + char manufacturer[ACC_STRING_SIZE]; + char model[ACC_STRING_SIZE]; + char description[ACC_STRING_SIZE]; + char version[ACC_STRING_SIZE]; + char uri[ACC_STRING_SIZE]; + char serial[ACC_STRING_SIZE]; + + /* for acc_complete_set_string */ + int string_index; + + /* set to 1 if we have a pending start request */ + int start_requested; + + int audio_mode; + + /* synchronize access to our device file */ + atomic_t open_excl; + + struct list_head tx_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + struct usb_request *rx_req[RX_REQ_MAX]; + int rx_done; + + /* delayed work for handling ACCESSORY_START */ + struct delayed_work start_work; + + /* worker for registering and unregistering hid devices */ + struct work_struct hid_work; + + /* list of active HID devices */ + struct list_head hid_list; + + /* list of new HID devices to register */ + struct list_head new_hid_list; + + /* list of dead HID devices to unregister */ + struct list_head dead_hid_list; +}; + +static struct usb_interface_descriptor acc_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, + .bInterfaceProtocol = 0, +}; + +static struct usb_endpoint_descriptor acc_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acc_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acc_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor acc_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_acc_descs[] = { + (struct usb_descriptor_header *) &acc_interface_desc, + (struct usb_descriptor_header *) &acc_fullspeed_in_desc, + (struct usb_descriptor_header *) &acc_fullspeed_out_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_acc_descs[] = { + (struct usb_descriptor_header *) &acc_interface_desc, + (struct usb_descriptor_header *) &acc_highspeed_in_desc, + (struct usb_descriptor_header *) &acc_highspeed_out_desc, + NULL, +}; + +static struct usb_string acc_string_defs[] = { + [INTERFACE_STRING_INDEX].s = "Android Accessory Interface", + { }, /* end of list */ +}; + +static struct usb_gadget_strings acc_string_table = { + .language = 0x0409, /* en-US */ + .strings = acc_string_defs, +}; + +static struct usb_gadget_strings *acc_strings[] = { + &acc_string_table, + NULL, +}; + +/* temporary variable used between acc_open() and acc_gadget_bind() */ +static struct acc_dev *_acc_dev; + +struct acc_instance { + struct usb_function_instance func_inst; + const char *name; +}; + +static inline struct acc_dev *func_to_dev(struct usb_function *f) +{ + return container_of(f, struct acc_dev, function); +} + +static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void acc_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +/* add a request to the tail of a list */ +static void req_put(struct acc_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void acc_set_disconnected(struct acc_dev *dev) +{ + dev->disconnected = 1; +} + +static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = _acc_dev; + + if (req->status == -ESHUTDOWN) { + pr_debug("acc_complete_in set disconnected"); + acc_set_disconnected(dev); + } + + req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = _acc_dev; + + dev->rx_done = 1; + if (req->status == -ESHUTDOWN) { + pr_debug("acc_complete_out set disconnected"); + acc_set_disconnected(dev); + } + + wake_up(&dev->read_wq); +} + +static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req) +{ + struct acc_dev *dev = ep->driver_data; + char *string_dest = NULL; + int length = req->actual; + + if (req->status != 0) { + pr_err("acc_complete_set_string, err %d\n", req->status); + return; + } + + switch (dev->string_index) { + case ACCESSORY_STRING_MANUFACTURER: + string_dest = dev->manufacturer; + break; + case ACCESSORY_STRING_MODEL: + string_dest = dev->model; + break; + case ACCESSORY_STRING_DESCRIPTION: + string_dest = dev->description; + break; + case ACCESSORY_STRING_VERSION: + string_dest = dev->version; + break; + case ACCESSORY_STRING_URI: + string_dest = dev->uri; + break; + case ACCESSORY_STRING_SERIAL: + string_dest = dev->serial; + break; + } + if (string_dest) { + unsigned long flags; + + if (length >= ACC_STRING_SIZE) + length = ACC_STRING_SIZE - 1; + + spin_lock_irqsave(&dev->lock, flags); + memcpy(string_dest, req->buf, length); + /* ensure zero termination */ + string_dest[length] = 0; + spin_unlock_irqrestore(&dev->lock, flags); + } else { + pr_err("unknown accessory string index %d\n", + dev->string_index); + } +} + +static void acc_complete_set_hid_report_desc(struct usb_ep *ep, + struct usb_request *req) +{ + struct acc_hid_dev *hid = req->context; + struct acc_dev *dev = hid->dev; + int length = req->actual; + + if (req->status != 0) { + pr_err("acc_complete_set_hid_report_desc, err %d\n", + req->status); + return; + } + + memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length); + hid->report_desc_offset += length; + if (hid->report_desc_offset == hid->report_desc_len) { + /* After we have received the entire report descriptor + * we schedule work to initialize the HID device + */ + schedule_work(&dev->hid_work); + } +} + +static void acc_complete_send_hid_event(struct usb_ep *ep, + struct usb_request *req) +{ + struct acc_hid_dev *hid = req->context; + int length = req->actual; + + if (req->status != 0) { + pr_err("acc_complete_send_hid_event, err %d\n", req->status); + return; + } + + hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1); +} + +static int acc_hid_parse(struct hid_device *hid) +{ + struct acc_hid_dev *hdev = hid->driver_data; + + hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len); + return 0; +} + +static int acc_hid_start(struct hid_device *hid) +{ + return 0; +} + +static void acc_hid_stop(struct hid_device *hid) +{ +} + +static int acc_hid_open(struct hid_device *hid) +{ + return 0; +} + +static void acc_hid_close(struct hid_device *hid) +{ +} + +static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, int reqtype) +{ + return 0; +} + +static struct hid_ll_driver acc_hid_ll_driver = { + .parse = acc_hid_parse, + .start = acc_hid_start, + .stop = acc_hid_stop, + .open = acc_hid_open, + .close = acc_hid_close, + .raw_request = acc_hid_raw_request, +}; + +static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev, + int id, int desc_len) +{ + struct acc_hid_dev *hdev; + + hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC); + if (!hdev) + return NULL; + hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC); + if (!hdev->report_desc) { + kfree(hdev); + return NULL; + } + hdev->dev = dev; + hdev->id = id; + hdev->report_desc_len = desc_len; + + return hdev; +} + +static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id) +{ + struct acc_hid_dev *hid; + + list_for_each_entry(hid, list, list) { + if (hid->id == id) + return hid; + } + return NULL; +} + +static int acc_register_hid(struct acc_dev *dev, int id, int desc_length) +{ + struct acc_hid_dev *hid; + unsigned long flags; + + /* report descriptor length must be > 0 */ + if (desc_length <= 0) + return -EINVAL; + + spin_lock_irqsave(&dev->lock, flags); + /* replace HID if one already exists with this ID */ + hid = acc_hid_get(&dev->hid_list, id); + if (!hid) + hid = acc_hid_get(&dev->new_hid_list, id); + if (hid) + list_move(&hid->list, &dev->dead_hid_list); + + hid = acc_hid_new(dev, id, desc_length); + if (!hid) { + spin_unlock_irqrestore(&dev->lock, flags); + return -ENOMEM; + } + + list_add(&hid->list, &dev->new_hid_list); + spin_unlock_irqrestore(&dev->lock, flags); + + /* schedule work to register the HID device */ + schedule_work(&dev->hid_work); + return 0; +} + +static int acc_unregister_hid(struct acc_dev *dev, int id) +{ + struct acc_hid_dev *hid; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + hid = acc_hid_get(&dev->hid_list, id); + if (!hid) + hid = acc_hid_get(&dev->new_hid_list, id); + if (!hid) { + spin_unlock_irqrestore(&dev->lock, flags); + return -EINVAL; + } + + list_move(&hid->list, &dev->dead_hid_list); + spin_unlock_irqrestore(&dev->lock, flags); + + schedule_work(&dev->hid_work); + return 0; +} + +static int create_bulk_endpoints(struct acc_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + /* now allocate requests for our endpoints */ + for (i = 0; i < TX_REQ_MAX; i++) { + req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = acc_complete_in; + req_put(dev, &dev->tx_idle, req); + } + for (i = 0; i < RX_REQ_MAX; i++) { + req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = acc_complete_out; + dev->rx_req[i] = req; + } + + return 0; + +fail: + pr_err("acc_bind() could not allocate requests\n"); + while ((req = req_get(dev, &dev->tx_idle))) + acc_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + acc_request_free(dev->rx_req[i], dev->ep_out); + return -1; +} + +static ssize_t acc_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct acc_dev *dev = fp->private_data; + struct usb_request *req; + ssize_t r = count; + unsigned xfer; + int ret = 0; + + pr_debug("acc_read(%zu)\n", count); + + if (dev->disconnected) { + pr_debug("acc_read disconnected"); + return -ENODEV; + } + + if (count > BULK_BUFFER_SIZE) + count = BULK_BUFFER_SIZE; + + /* we will block until we're online */ + pr_debug("acc_read: waiting for online\n"); + ret = wait_event_interruptible(dev->read_wq, dev->online); + if (ret < 0) { + r = ret; + goto done; + } + + if (dev->rx_done) { + // last req cancelled. try to get it. + req = dev->rx_req[0]; + goto copy_data; + } + +requeue_req: + /* queue a request */ + req = dev->rx_req[0]; + req->length = count; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + goto done; + } else { + pr_debug("rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + r = ret; + ret = usb_ep_dequeue(dev->ep_out, req); + if (ret != 0) { + // cancel failed. There can be a data already received. + // it will be retrieved in the next read. + pr_debug("acc_read: cancelling failed %d", ret); + } + goto done; + } + +copy_data: + dev->rx_done = 0; + if (dev->online) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + pr_debug("rx %p %u\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + r = xfer; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + pr_debug("acc_read returning %zd\n", r); + return r; +} + +static ssize_t acc_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct acc_dev *dev = fp->private_data; + struct usb_request *req = 0; + ssize_t r = count; + unsigned xfer; + int ret; + + pr_debug("acc_write(%zu)\n", count); + + if (!dev->online || dev->disconnected) { + pr_debug("acc_write disconnected or not online"); + return -ENODEV; + } + + while (count > 0) { + if (!dev->online) { + pr_debug("acc_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = req_get(dev, &dev->tx_idle)) || !dev->online)); + if (!req) { + r = ret; + break; + } + + if (count > BULK_BUFFER_SIZE) { + xfer = BULK_BUFFER_SIZE; + /* ZLP, They will be more TX requests so not yet. */ + req->zero = 0; + } else { + xfer = count; + /* If the data length is a multple of the + * maxpacket size then send a zero length packet(ZLP). + */ + req->zero = ((xfer % dev->ep_in->maxpacket) == 0); + } + if (copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + pr_debug("acc_write: xfer error %d\n", ret); + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + req_put(dev, &dev->tx_idle, req); + + pr_debug("acc_write returning %zd\n", r); + return r; +} + +static long acc_ioctl(struct file *fp, unsigned code, unsigned long value) +{ + struct acc_dev *dev = fp->private_data; + char *src = NULL; + int ret; + + switch (code) { + case ACCESSORY_GET_STRING_MANUFACTURER: + src = dev->manufacturer; + break; + case ACCESSORY_GET_STRING_MODEL: + src = dev->model; + break; + case ACCESSORY_GET_STRING_DESCRIPTION: + src = dev->description; + break; + case ACCESSORY_GET_STRING_VERSION: + src = dev->version; + break; + case ACCESSORY_GET_STRING_URI: + src = dev->uri; + break; + case ACCESSORY_GET_STRING_SERIAL: + src = dev->serial; + break; + case ACCESSORY_IS_START_REQUESTED: + return dev->start_requested; + case ACCESSORY_GET_AUDIO_MODE: + return dev->audio_mode; + } + if (!src) + return -EINVAL; + + ret = strlen(src) + 1; + if (copy_to_user((void __user *)value, src, ret)) + ret = -EFAULT; + return ret; +} + +static int acc_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "acc_open\n"); + if (atomic_xchg(&_acc_dev->open_excl, 1)) + return -EBUSY; + + _acc_dev->disconnected = 0; + fp->private_data = _acc_dev; + return 0; +} + +static int acc_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "acc_release\n"); + + WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0)); + /* indicate that we are disconnected + * still could be online so don't touch online flag + */ + _acc_dev->disconnected = 1; + return 0; +} + +/* file operations for /dev/usb_accessory */ +static const struct file_operations acc_fops = { + .owner = THIS_MODULE, + .read = acc_read, + .write = acc_write, + .unlocked_ioctl = acc_ioctl, + .open = acc_open, + .release = acc_release, +}; + +static int acc_hid_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int ret; + + ret = hid_parse(hdev); + if (ret) + return ret; + return hid_hw_start(hdev, HID_CONNECT_DEFAULT); +} + +static struct miscdevice acc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "usb_accessory", + .fops = &acc_fops, +}; + +static const struct hid_device_id acc_hid_table[] = { + { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) }, + { } +}; + +static struct hid_driver acc_hid_driver = { + .name = "USB accessory", + .id_table = acc_hid_table, + .probe = acc_hid_probe, +}; + +static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req) +{ + /* + * Default no-op function when nothing needs to be done for the + * setup request + */ +} + +int acc_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + struct acc_dev *dev = _acc_dev; + int value = -EOPNOTSUPP; + struct acc_hid_dev *hid; + int offset; + u8 b_requestType = ctrl->bRequestType; + u8 b_request = ctrl->bRequest; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + unsigned long flags; + +/* + printk(KERN_INFO "acc_ctrlrequest " + "%02x.%02x v%04x i%04x l%u\n", + b_requestType, b_request, + w_value, w_index, w_length); +*/ + + if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) { + if (b_request == ACCESSORY_START) { + dev->start_requested = 1; + schedule_delayed_work( + &dev->start_work, msecs_to_jiffies(10)); + value = 0; + cdev->req->complete = acc_complete_setup_noop; + } else if (b_request == ACCESSORY_SEND_STRING) { + dev->string_index = w_index; + cdev->gadget->ep0->driver_data = dev; + cdev->req->complete = acc_complete_set_string; + value = w_length; + } else if (b_request == ACCESSORY_SET_AUDIO_MODE && + w_index == 0 && w_length == 0) { + dev->audio_mode = w_value; + cdev->req->complete = acc_complete_setup_noop; + value = 0; + } else if (b_request == ACCESSORY_REGISTER_HID) { + cdev->req->complete = acc_complete_setup_noop; + value = acc_register_hid(dev, w_value, w_index); + } else if (b_request == ACCESSORY_UNREGISTER_HID) { + cdev->req->complete = acc_complete_setup_noop; + value = acc_unregister_hid(dev, w_value); + } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) { + spin_lock_irqsave(&dev->lock, flags); + hid = acc_hid_get(&dev->new_hid_list, w_value); + spin_unlock_irqrestore(&dev->lock, flags); + if (!hid) { + value = -EINVAL; + goto err; + } + offset = w_index; + if (offset != hid->report_desc_offset + || offset + w_length > hid->report_desc_len) { + value = -EINVAL; + goto err; + } + cdev->req->context = hid; + cdev->req->complete = acc_complete_set_hid_report_desc; + value = w_length; + } else if (b_request == ACCESSORY_SEND_HID_EVENT) { + spin_lock_irqsave(&dev->lock, flags); + hid = acc_hid_get(&dev->hid_list, w_value); + spin_unlock_irqrestore(&dev->lock, flags); + if (!hid) { + value = -EINVAL; + goto err; + } + cdev->req->context = hid; + cdev->req->complete = acc_complete_send_hid_event; + value = w_length; + } + } else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) { + if (b_request == ACCESSORY_GET_PROTOCOL) { + *((u16 *)cdev->req->buf) = PROTOCOL_VERSION; + value = sizeof(u16); + cdev->req->complete = acc_complete_setup_noop; + /* clear any string left over from a previous session */ + memset(dev->manufacturer, 0, sizeof(dev->manufacturer)); + memset(dev->model, 0, sizeof(dev->model)); + memset(dev->description, 0, sizeof(dev->description)); + memset(dev->version, 0, sizeof(dev->version)); + memset(dev->uri, 0, sizeof(dev->uri)); + memset(dev->serial, 0, sizeof(dev->serial)); + dev->start_requested = 0; + dev->audio_mode = 0; + } + } + + if (value >= 0) { + cdev->req->zero = 0; + cdev->req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "%s setup response queue error\n", + __func__); + } + +err: + if (value == -EOPNOTSUPP) + VDBG(cdev, + "unknown class-specific control req " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + return value; +} +EXPORT_SYMBOL_GPL(acc_ctrlrequest); + +static int +__acc_function_bind(struct usb_configuration *c, + struct usb_function *f, bool configfs) +{ + struct usb_composite_dev *cdev = c->cdev; + struct acc_dev *dev = func_to_dev(f); + int id; + int ret; + + DBG(cdev, "acc_function_bind dev: %p\n", dev); + + if (configfs) { + if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) { + ret = usb_string_id(c->cdev); + if (ret < 0) + return ret; + acc_string_defs[INTERFACE_STRING_INDEX].id = ret; + acc_interface_desc.iInterface = ret; + } + dev->cdev = c->cdev; + } + ret = hid_register_driver(&acc_hid_driver); + if (ret) + return ret; + + dev->start_requested = 0; + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + acc_interface_desc.bInterfaceNumber = id; + + /* allocate endpoints */ + ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc, + &acc_fullspeed_out_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + acc_highspeed_in_desc.bEndpointAddress = + acc_fullspeed_in_desc.bEndpointAddress; + acc_highspeed_out_desc.bEndpointAddress = + acc_fullspeed_out_desc.bEndpointAddress; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static int +acc_function_bind_configfs(struct usb_configuration *c, + struct usb_function *f) { + return __acc_function_bind(c, f, true); +} + +static void +kill_all_hid_devices(struct acc_dev *dev) +{ + struct acc_hid_dev *hid; + struct list_head *entry, *temp; + unsigned long flags; + + /* do nothing if usb accessory device doesn't exist */ + if (!dev) + return; + + spin_lock_irqsave(&dev->lock, flags); + list_for_each_safe(entry, temp, &dev->hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + list_del(&hid->list); + list_add(&hid->list, &dev->dead_hid_list); + } + list_for_each_safe(entry, temp, &dev->new_hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + list_del(&hid->list); + list_add(&hid->list, &dev->dead_hid_list); + } + spin_unlock_irqrestore(&dev->lock, flags); + + schedule_work(&dev->hid_work); +} + +static void +acc_hid_unbind(struct acc_dev *dev) +{ + hid_unregister_driver(&acc_hid_driver); + kill_all_hid_devices(dev); +} + +static void +acc_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_request *req; + int i; + + dev->online = 0; /* clear online flag */ + wake_up(&dev->read_wq); /* unblock reads on closure */ + wake_up(&dev->write_wq); /* likewise for writes */ + + while ((req = req_get(dev, &dev->tx_idle))) + acc_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + acc_request_free(dev->rx_req[i], dev->ep_out); + + acc_hid_unbind(dev); +} + +static void acc_start_work(struct work_struct *data) +{ + char *envp[2] = { "ACCESSORY=START", NULL }; + + kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp); +} + +static int acc_hid_init(struct acc_hid_dev *hdev) +{ + struct hid_device *hid; + int ret; + + hid = hid_allocate_device(); + if (IS_ERR(hid)) + return PTR_ERR(hid); + + hid->ll_driver = &acc_hid_ll_driver; + hid->dev.parent = acc_device.this_device; + + hid->bus = BUS_USB; + hid->vendor = HID_ANY_ID; + hid->product = HID_ANY_ID; + hid->driver_data = hdev; + ret = hid_add_device(hid); + if (ret) { + pr_err("can't add hid device: %d\n", ret); + hid_destroy_device(hid); + return ret; + } + + hdev->hid = hid; + return 0; +} + +static void acc_hid_delete(struct acc_hid_dev *hid) +{ + kfree(hid->report_desc); + kfree(hid); +} + +static void acc_hid_work(struct work_struct *data) +{ + struct acc_dev *dev = _acc_dev; + struct list_head *entry, *temp; + struct acc_hid_dev *hid; + struct list_head new_list, dead_list; + unsigned long flags; + + INIT_LIST_HEAD(&new_list); + + spin_lock_irqsave(&dev->lock, flags); + + /* copy hids that are ready for initialization to new_list */ + list_for_each_safe(entry, temp, &dev->new_hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + if (hid->report_desc_offset == hid->report_desc_len) + list_move(&hid->list, &new_list); + } + + if (list_empty(&dev->dead_hid_list)) { + INIT_LIST_HEAD(&dead_list); + } else { + /* move all of dev->dead_hid_list to dead_list */ + dead_list.prev = dev->dead_hid_list.prev; + dead_list.next = dev->dead_hid_list.next; + dead_list.next->prev = &dead_list; + dead_list.prev->next = &dead_list; + INIT_LIST_HEAD(&dev->dead_hid_list); + } + + spin_unlock_irqrestore(&dev->lock, flags); + + /* register new HID devices */ + list_for_each_safe(entry, temp, &new_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + if (acc_hid_init(hid)) { + pr_err("can't add HID device %p\n", hid); + acc_hid_delete(hid); + } else { + spin_lock_irqsave(&dev->lock, flags); + list_move(&hid->list, &dev->hid_list); + spin_unlock_irqrestore(&dev->lock, flags); + } + } + + /* remove dead HID devices */ + list_for_each_safe(entry, temp, &dead_list) { + hid = list_entry(entry, struct acc_hid_dev, list); + list_del(&hid->list); + if (hid->hid) + hid_destroy_device(hid->hid); + acc_hid_delete(hid); + } +} + +static int acc_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt); + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_in); + if (ret) + return ret; + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_out); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + + dev->online = 1; + dev->disconnected = 0; /* if online then not disconnected */ + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void acc_function_disable(struct usb_function *f) +{ + struct acc_dev *dev = func_to_dev(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "acc_function_disable\n"); + acc_set_disconnected(dev); /* this now only sets disconnected */ + dev->online = 0; /* so now need to clear online flag here too */ + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int acc_setup(void) +{ + struct acc_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + spin_lock_init(&dev->lock); + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + atomic_set(&dev->open_excl, 0); + INIT_LIST_HEAD(&dev->tx_idle); + INIT_LIST_HEAD(&dev->hid_list); + INIT_LIST_HEAD(&dev->new_hid_list); + INIT_LIST_HEAD(&dev->dead_hid_list); + INIT_DELAYED_WORK(&dev->start_work, acc_start_work); + INIT_WORK(&dev->hid_work, acc_hid_work); + + /* _acc_dev must be set before calling usb_gadget_register_driver */ + _acc_dev = dev; + + ret = misc_register(&acc_device); + if (ret) + goto err; + + return 0; + +err: + kfree(dev); + pr_err("USB accessory gadget driver failed to initialize\n"); + return ret; +} + +void acc_disconnect(void) +{ + /* unregister all HID devices if USB is disconnected */ + kill_all_hid_devices(_acc_dev); +} +EXPORT_SYMBOL_GPL(acc_disconnect); + +static void acc_cleanup(void) +{ + misc_deregister(&acc_device); + kfree(_acc_dev); + _acc_dev = NULL; +} +static struct acc_instance *to_acc_instance(struct config_item *item) +{ + return container_of(to_config_group(item), struct acc_instance, + func_inst.group); +} + +static void acc_attr_release(struct config_item *item) +{ + struct acc_instance *fi_acc = to_acc_instance(item); + + usb_put_function_instance(&fi_acc->func_inst); +} + +static struct configfs_item_operations acc_item_ops = { + .release = acc_attr_release, +}; + +static struct config_item_type acc_func_type = { + .ct_item_ops = &acc_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct acc_instance *to_fi_acc(struct usb_function_instance *fi) +{ + return container_of(fi, struct acc_instance, func_inst); +} + +static int acc_set_inst_name(struct usb_function_instance *fi, const char *name) +{ + struct acc_instance *fi_acc; + char *ptr; + int name_len; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + fi_acc = to_fi_acc(fi); + fi_acc->name = ptr; + return 0; +} + +static void acc_free_inst(struct usb_function_instance *fi) +{ + struct acc_instance *fi_acc; + + fi_acc = to_fi_acc(fi); + kfree(fi_acc->name); + acc_cleanup(); +} + +static struct usb_function_instance *acc_alloc_inst(void) +{ + struct acc_instance *fi_acc; + struct acc_dev *dev; + int err; + + fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL); + if (!fi_acc) + return ERR_PTR(-ENOMEM); + fi_acc->func_inst.set_inst_name = acc_set_inst_name; + fi_acc->func_inst.free_func_inst = acc_free_inst; + + err = acc_setup(); + if (err) { + kfree(fi_acc); + pr_err("Error setting ACCESSORY\n"); + return ERR_PTR(err); + } + + config_group_init_type_name(&fi_acc->func_inst.group, + "", &acc_func_type); + dev = _acc_dev; + return &fi_acc->func_inst; +} + +static void acc_free(struct usb_function *f) +{ +/*NO-OP: no function specific resource allocation in mtp_alloc*/ +} + +int acc_ctrlrequest_configfs(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) { + if (f->config != NULL && f->config->cdev != NULL) + return acc_ctrlrequest(f->config->cdev, ctrl); + else + return -1; +} + +static struct usb_function *acc_alloc(struct usb_function_instance *fi) +{ + struct acc_dev *dev = _acc_dev; + + pr_info("acc_alloc\n"); + + dev->function.name = "accessory"; + dev->function.strings = acc_strings, + dev->function.fs_descriptors = fs_acc_descs; + dev->function.hs_descriptors = hs_acc_descs; + dev->function.bind = acc_function_bind_configfs; + dev->function.unbind = acc_function_unbind; + dev->function.set_alt = acc_function_set_alt; + dev->function.disable = acc_function_disable; + dev->function.free_func = acc_free; + dev->function.setup = acc_ctrlrequest_configfs; + + return &dev->function; +} +DECLARE_USB_FUNCTION_INIT(accessory, acc_alloc_inst, acc_alloc); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c new file mode 100644 index 000000000000..8124af33b738 --- /dev/null +++ b/drivers/usb/gadget/function/f_audio_source.c @@ -0,0 +1,1071 @@ +/* + * Gadget Function Driver for USB audio source device + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#define SAMPLE_RATE 44100 +#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000) + +#define IN_EP_MAX_PACKET_SIZE 256 + +/* Number of requests to allocate */ +#define IN_EP_REQ_COUNT 4 + +#define AUDIO_AC_INTERFACE 0 +#define AUDIO_AS_INTERFACE 1 +#define AUDIO_NUM_INTERFACES 2 +#define MAX_INST_NAME_LEN 40 + +/* B.3.1 Standard AC Interface Descriptor */ +static struct usb_interface_descriptor ac_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +}; + +DECLARE_UAC_AC_HEADER_DESCRIPTOR(2); + +#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES) +/* 1 input terminal, 1 output terminal and 1 feature unit */ +#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \ + + UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \ + + UAC_DT_FEATURE_UNIT_SIZE(0)) +/* B.3.2 Class-Specific AC Interface Descriptor */ +static struct uac1_ac_header_descriptor_2 ac_header_desc = { + .bLength = UAC_DT_AC_HEADER_LENGTH, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_HEADER, + .bcdADC = __constant_cpu_to_le16(0x0100), + .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH), + .bInCollection = AUDIO_NUM_INTERFACES, + .baInterfaceNr = { + [0] = AUDIO_AC_INTERFACE, + [1] = AUDIO_AS_INTERFACE, + } +}; + +#define INPUT_TERMINAL_ID 1 +static struct uac_input_terminal_descriptor input_terminal_desc = { + .bLength = UAC_DT_INPUT_TERMINAL_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_INPUT_TERMINAL, + .bTerminalID = INPUT_TERMINAL_ID, + .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE, + .bAssocTerminal = 0, + .wChannelConfig = 0x3, +}; + +DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0); + +#define FEATURE_UNIT_ID 2 +static struct uac_feature_unit_descriptor_0 feature_unit_desc = { + .bLength = UAC_DT_FEATURE_UNIT_SIZE(0), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FEATURE_UNIT, + .bUnitID = FEATURE_UNIT_ID, + .bSourceID = INPUT_TERMINAL_ID, + .bControlSize = 2, +}; + +#define OUTPUT_TERMINAL_ID 3 +static struct uac1_output_terminal_descriptor output_terminal_desc = { + .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, + .bTerminalID = OUTPUT_TERMINAL_ID, + .wTerminalType = UAC_TERMINAL_STREAMING, + .bAssocTerminal = FEATURE_UNIT_ID, + .bSourceID = FEATURE_UNIT_ID, +}; + +/* B.4.1 Standard AS Interface Descriptor */ +static struct usb_interface_descriptor as_interface_alt_0_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, +}; + +static struct usb_interface_descriptor as_interface_alt_1_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, +}; + +/* B.4.2 Class-Specific AS Interface Descriptor */ +static struct uac1_as_header_descriptor as_header_desc = { + .bLength = UAC_DT_AS_HEADER_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_AS_GENERAL, + .bTerminalLink = INPUT_TERMINAL_ID, + .bDelay = 1, + .wFormatTag = UAC_FORMAT_TYPE_I_PCM, +}; + +DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); + +static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = { + .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FORMAT_TYPE, + .bFormatType = UAC_FORMAT_TYPE_I, + .bSubframeSize = 2, + .bBitResolution = 16, + .bSamFreqType = 1, +}; + +/* Standard ISO IN Endpoint Descriptor for highspeed */ +static struct usb_endpoint_descriptor hs_as_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_SYNC + | USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE), + .bInterval = 4, /* poll 1 per millisecond */ +}; + +/* Standard ISO IN Endpoint Descriptor for highspeed */ +static struct usb_endpoint_descriptor fs_as_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_SYNC + | USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = __constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE), + .bInterval = 1, /* poll 1 per millisecond */ +}; + +/* Class-specific AS ISO OUT Endpoint Descriptor */ +static struct uac_iso_endpoint_descriptor as_iso_in_desc = { + .bLength = UAC_ISO_ENDPOINT_DESC_SIZE, + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = UAC_EP_GENERAL, + .bmAttributes = 1, + .bLockDelayUnits = 1, + .wLockDelay = __constant_cpu_to_le16(1), +}; + +static struct usb_descriptor_header *hs_audio_desc[] = { + (struct usb_descriptor_header *)&ac_interface_desc, + (struct usb_descriptor_header *)&ac_header_desc, + + (struct usb_descriptor_header *)&input_terminal_desc, + (struct usb_descriptor_header *)&output_terminal_desc, + (struct usb_descriptor_header *)&feature_unit_desc, + + (struct usb_descriptor_header *)&as_interface_alt_0_desc, + (struct usb_descriptor_header *)&as_interface_alt_1_desc, + (struct usb_descriptor_header *)&as_header_desc, + + (struct usb_descriptor_header *)&as_type_i_desc, + + (struct usb_descriptor_header *)&hs_as_in_ep_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + +static struct usb_descriptor_header *fs_audio_desc[] = { + (struct usb_descriptor_header *)&ac_interface_desc, + (struct usb_descriptor_header *)&ac_header_desc, + + (struct usb_descriptor_header *)&input_terminal_desc, + (struct usb_descriptor_header *)&output_terminal_desc, + (struct usb_descriptor_header *)&feature_unit_desc, + + (struct usb_descriptor_header *)&as_interface_alt_0_desc, + (struct usb_descriptor_header *)&as_interface_alt_1_desc, + (struct usb_descriptor_header *)&as_header_desc, + + (struct usb_descriptor_header *)&as_type_i_desc, + + (struct usb_descriptor_header *)&fs_as_in_ep_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + +static struct snd_pcm_hardware audio_hw_info = { + .info = SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_BATCH | + SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels_min = 2, + .channels_max = 2, + .rate_min = SAMPLE_RATE, + .rate_max = SAMPLE_RATE, + + .buffer_bytes_max = 1024 * 1024, + .period_bytes_min = 64, + .period_bytes_max = 512 * 1024, + .periods_min = 2, + .periods_max = 1024, +}; + +/*-------------------------------------------------------------------------*/ + +struct audio_source_config { + int card; + int device; +}; + +struct audio_dev { + struct usb_function func; + struct snd_card *card; + struct snd_pcm *pcm; + struct snd_pcm_substream *substream; + + struct list_head idle_reqs; + struct usb_ep *in_ep; + + spinlock_t lock; + + /* beginning, end and current position in our buffer */ + void *buffer_start; + void *buffer_end; + void *buffer_pos; + + /* byte size of a "period" */ + unsigned int period; + /* bytes sent since last call to snd_pcm_period_elapsed */ + unsigned int period_offset; + /* time we started playing */ + ktime_t start_time; + /* number of frames sent since start_time */ + s64 frames_sent; + struct audio_source_config *config; + /* for creating and issuing QoS requests */ + struct pm_qos_request pm_qos; +}; + +static inline struct audio_dev *func_to_audio(struct usb_function *f) +{ + return container_of(f, struct audio_dev, func); +} + +/*-------------------------------------------------------------------------*/ + +struct audio_source_instance { + struct usb_function_instance func_inst; + const char *name; + struct audio_source_config *config; + struct device *audio_device; +}; + +static void audio_source_attr_release(struct config_item *item); + +static struct configfs_item_operations audio_source_item_ops = { + .release = audio_source_attr_release, +}; + +static struct config_item_type audio_source_func_type = { + .ct_item_ops = &audio_source_item_ops, + .ct_owner = THIS_MODULE, +}; + +static ssize_t audio_source_pcm_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL); + +static struct device_attribute *audio_source_function_attributes[] = { + &dev_attr_pcm, + NULL +}; + +/*--------------------------------------------------------------------------*/ + +static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + + if (!req) + return NULL; + + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + req->length = buffer_size; + return req; +} + +static void audio_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static void audio_req_put(struct audio_dev *audio, struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&audio->lock, flags); + list_add_tail(&req->list, &audio->idle_reqs); + spin_unlock_irqrestore(&audio->lock, flags); +} + +static struct usb_request *audio_req_get(struct audio_dev *audio) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&audio->lock, flags); + if (list_empty(&audio->idle_reqs)) { + req = 0; + } else { + req = list_first_entry(&audio->idle_reqs, struct usb_request, + list); + list_del(&req->list); + } + spin_unlock_irqrestore(&audio->lock, flags); + return req; +} + +/* send the appropriate number of packets to match our bitrate */ +static void audio_send(struct audio_dev *audio) +{ + struct snd_pcm_runtime *runtime; + struct usb_request *req; + int length, length1, length2, ret; + s64 msecs; + s64 frames; + ktime_t now; + + /* audio->substream will be null if we have been closed */ + if (!audio->substream) + return; + /* audio->buffer_pos will be null if we have been stopped */ + if (!audio->buffer_pos) + return; + + runtime = audio->substream->runtime; + + /* compute number of frames to send */ + now = ktime_get(); + msecs = div_s64((ktime_to_ns(now) - ktime_to_ns(audio->start_time)), + 1000000); + frames = div_s64((msecs * SAMPLE_RATE), 1000); + + /* Readjust our frames_sent if we fall too far behind. + * If we get too far behind it is better to drop some frames than + * to keep sending data too fast in an attempt to catch up. + */ + if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC) + audio->frames_sent = frames - FRAMES_PER_MSEC; + + frames -= audio->frames_sent; + + /* We need to send something to keep the pipeline going */ + if (frames <= 0) + frames = FRAMES_PER_MSEC; + + while (frames > 0) { + req = audio_req_get(audio); + if (!req) + break; + + length = frames_to_bytes(runtime, frames); + if (length > IN_EP_MAX_PACKET_SIZE) + length = IN_EP_MAX_PACKET_SIZE; + + if (audio->buffer_pos + length > audio->buffer_end) + length1 = audio->buffer_end - audio->buffer_pos; + else + length1 = length; + memcpy(req->buf, audio->buffer_pos, length1); + if (length1 < length) { + /* Wrap around and copy remaining length + * at beginning of buffer. + */ + length2 = length - length1; + memcpy(req->buf + length1, audio->buffer_start, + length2); + audio->buffer_pos = audio->buffer_start + length2; + } else { + audio->buffer_pos += length1; + if (audio->buffer_pos >= audio->buffer_end) + audio->buffer_pos = audio->buffer_start; + } + + req->length = length; + ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC); + if (ret < 0) { + pr_err("usb_ep_queue failed ret: %d\n", ret); + audio_req_put(audio, req); + break; + } + + frames -= bytes_to_frames(runtime, length); + audio->frames_sent += bytes_to_frames(runtime, length); + } +} + +static void audio_control_complete(struct usb_ep *ep, struct usb_request *req) +{ + /* nothing to do here */ +} + +static void audio_data_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct audio_dev *audio = req->context; + + pr_debug("audio_data_complete req->status %d req->actual %d\n", + req->status, req->actual); + + audio_req_put(audio, req); + + if (!audio->buffer_start || req->status) + return; + + audio->period_offset += req->actual; + if (audio->period_offset >= audio->period) { + snd_pcm_period_elapsed(audio->substream); + audio->period_offset = 0; + } + audio_send(audio); +} + +static int audio_set_endpoint_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + int value = -EOPNOTSUPP; + u16 ep = le16_to_cpu(ctrl->wIndex); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + + pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", + ctrl->bRequest, w_value, len, ep); + + switch (ctrl->bRequest) { + case UAC_SET_CUR: + case UAC_SET_MIN: + case UAC_SET_MAX: + case UAC_SET_RES: + value = len; + break; + default: + break; + } + + return value; +} + +static int audio_get_endpoint_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + int value = -EOPNOTSUPP; + u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + u8 *buf = cdev->req->buf; + + pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", + ctrl->bRequest, w_value, len, ep); + + if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) { + switch (ctrl->bRequest) { + case UAC_GET_CUR: + case UAC_GET_MIN: + case UAC_GET_MAX: + case UAC_GET_RES: + /* return our sample rate */ + buf[0] = (u8)SAMPLE_RATE; + buf[1] = (u8)(SAMPLE_RATE >> 8); + buf[2] = (u8)(SAMPLE_RATE >> 16); + value = 3; + break; + default: + break; + } + } + + return value; +} + +static int +audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything; interface + * activation uses set_alt(). + */ + switch (ctrl->bRequestType) { + case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: + value = audio_set_endpoint_req(f, ctrl); + break; + + case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: + value = audio_get_endpoint_req(f, ctrl); + break; + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + pr_debug("audio req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + req->complete = audio_control_complete; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + pr_err("audio response on err %d\n", value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + +static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct audio_dev *audio = func_to_audio(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt); + + ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep); + if (ret) + return ret; + + usb_ep_enable(audio->in_ep); + return 0; +} + +static void audio_disable(struct usb_function *f) +{ + struct audio_dev *audio = func_to_audio(f); + + pr_debug("audio_disable\n"); + usb_ep_disable(audio->in_ep); +} + +static void audio_free_func(struct usb_function *f) +{ + /* no-op */ +} + +/*-------------------------------------------------------------------------*/ + +static void audio_build_desc(struct audio_dev *audio) +{ + u8 *sam_freq; + int rate; + + /* Set channel numbers */ + input_terminal_desc.bNrChannels = 2; + as_type_i_desc.bNrChannels = 2; + + /* Set sample rates */ + rate = SAMPLE_RATE; + sam_freq = as_type_i_desc.tSamFreq[0]; + memcpy(sam_freq, &rate, 3); +} + + +static int snd_card_setup(struct usb_configuration *c, + struct audio_source_config *config); +static struct audio_source_instance *to_fi_audio_source( + const struct usb_function_instance *fi); + + +/* audio function driver setup/binding */ +static int +audio_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct audio_dev *audio = func_to_audio(f); + int status; + struct usb_ep *ep; + struct usb_request *req; + int i; + int err; + + if (IS_ENABLED(CONFIG_USB_CONFIGFS)) { + struct audio_source_instance *fi_audio = + to_fi_audio_source(f->fi); + struct audio_source_config *config = + fi_audio->config; + + err = snd_card_setup(c, config); + if (err) + return err; + } + + audio_build_desc(audio); + + /* allocate instance-specific interface IDs, and patch descriptors */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ac_interface_desc.bInterfaceNumber = status; + + /* AUDIO_AC_INTERFACE */ + ac_header_desc.baInterfaceNr[0] = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + as_interface_alt_0_desc.bInterfaceNumber = status; + as_interface_alt_1_desc.bInterfaceNumber = status; + + /* AUDIO_AS_INTERFACE */ + ac_header_desc.baInterfaceNr[1] = status; + + status = -ENODEV; + + /* allocate our endpoint */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc); + if (!ep) + goto fail; + audio->in_ep = ep; + ep->driver_data = audio; /* claim */ + + if (gadget_is_dualspeed(c->cdev->gadget)) + hs_as_in_ep_desc.bEndpointAddress = + fs_as_in_ep_desc.bEndpointAddress; + + f->fs_descriptors = fs_audio_desc; + f->hs_descriptors = hs_audio_desc; + + for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) { + req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE); + if (req) { + req->context = audio; + req->complete = audio_data_complete; + audio_req_put(audio, req); + } else + status = -ENOMEM; + } + +fail: + return status; +} + +static void +audio_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct audio_dev *audio = func_to_audio(f); + struct usb_request *req; + + while ((req = audio_req_get(audio))) + audio_request_free(req, audio->in_ep); + + snd_card_free_when_closed(audio->card); + audio->card = NULL; + audio->pcm = NULL; + audio->substream = NULL; + audio->in_ep = NULL; + + if (IS_ENABLED(CONFIG_USB_CONFIGFS)) { + struct audio_source_instance *fi_audio = + to_fi_audio_source(f->fi); + struct audio_source_config *config = + fi_audio->config; + + config->card = -1; + config->device = -1; + } +} + +static void audio_pcm_playback_start(struct audio_dev *audio) +{ + audio->start_time = ktime_get(); + audio->frames_sent = 0; + audio_send(audio); +} + +static void audio_pcm_playback_stop(struct audio_dev *audio) +{ + unsigned long flags; + + spin_lock_irqsave(&audio->lock, flags); + audio->buffer_start = 0; + audio->buffer_end = 0; + audio->buffer_pos = 0; + spin_unlock_irqrestore(&audio->lock, flags); +} + +static int audio_pcm_open(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio = substream->private_data; + + runtime->private_data = audio; + runtime->hw = audio_hw_info; + snd_pcm_limit_hw_rates(runtime); + runtime->hw.channels_max = 2; + + audio->substream = substream; + + /* Add the QoS request and set the latency to 0 */ + pm_qos_add_request(&audio->pm_qos, PM_QOS_CPU_DMA_LATENCY, 0); + + return 0; +} + +static int audio_pcm_close(struct snd_pcm_substream *substream) +{ + struct audio_dev *audio = substream->private_data; + unsigned long flags; + + spin_lock_irqsave(&audio->lock, flags); + + /* Remove the QoS request */ + pm_qos_remove_request(&audio->pm_qos); + + audio->substream = NULL; + spin_unlock_irqrestore(&audio->lock, flags); + + return 0; +} + +static int audio_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + unsigned int channels = params_channels(params); + unsigned int rate = params_rate(params); + + if (rate != SAMPLE_RATE) + return -EINVAL; + if (channels != 2) + return -EINVAL; + + return snd_pcm_lib_alloc_vmalloc_buffer(substream, + params_buffer_bytes(params)); +} + +static int audio_pcm_hw_free(struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_vmalloc_buffer(substream); +} + +static int audio_pcm_prepare(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio = runtime->private_data; + + audio->period = snd_pcm_lib_period_bytes(substream); + audio->period_offset = 0; + audio->buffer_start = runtime->dma_area; + audio->buffer_end = audio->buffer_start + + snd_pcm_lib_buffer_bytes(substream); + audio->buffer_pos = audio->buffer_start; + + return 0; +} + +static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio = runtime->private_data; + ssize_t bytes = audio->buffer_pos - audio->buffer_start; + + /* return offset of next frame to fill in our buffer */ + return bytes_to_frames(runtime, bytes); +} + +static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream, + int cmd) +{ + struct audio_dev *audio = substream->runtime->private_data; + int ret = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + audio_pcm_playback_start(audio); + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + audio_pcm_playback_stop(audio); + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static struct audio_dev _audio_dev = { + .func = { + .name = "audio_source", + .bind = audio_bind, + .unbind = audio_unbind, + .set_alt = audio_set_alt, + .setup = audio_setup, + .disable = audio_disable, + .free_func = audio_free_func, + }, + .lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock), + .idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs), +}; + +static struct snd_pcm_ops audio_playback_ops = { + .open = audio_pcm_open, + .close = audio_pcm_close, + .ioctl = snd_pcm_lib_ioctl, + .hw_params = audio_pcm_hw_params, + .hw_free = audio_pcm_hw_free, + .prepare = audio_pcm_prepare, + .trigger = audio_pcm_playback_trigger, + .pointer = audio_pcm_pointer, +}; + +int audio_source_bind_config(struct usb_configuration *c, + struct audio_source_config *config) +{ + struct audio_dev *audio; + int err; + + config->card = -1; + config->device = -1; + + audio = &_audio_dev; + + err = snd_card_setup(c, config); + if (err) + return err; + + err = usb_add_function(c, &audio->func); + if (err) + goto add_fail; + + return 0; + +add_fail: + snd_card_free(audio->card); + return err; +} + +static int snd_card_setup(struct usb_configuration *c, + struct audio_source_config *config) +{ + struct audio_dev *audio; + struct snd_card *card; + struct snd_pcm *pcm; + int err; + + audio = &_audio_dev; + + err = snd_card_new(&c->cdev->gadget->dev, + SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, + THIS_MODULE, 0, &card); + if (err) + return err; + + err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm); + if (err) + goto pcm_fail; + + pcm->private_data = audio; + pcm->info_flags = 0; + audio->pcm = pcm; + + strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name)); + + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops); + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, + NULL, 0, 64 * 1024); + + strlcpy(card->driver, "audio_source", sizeof(card->driver)); + strlcpy(card->shortname, card->driver, sizeof(card->shortname)); + strlcpy(card->longname, "USB accessory audio source", + sizeof(card->longname)); + + err = snd_card_register(card); + if (err) + goto register_fail; + + config->card = pcm->card->number; + config->device = pcm->device; + audio->card = card; + return 0; + +register_fail: +pcm_fail: + snd_card_free(audio->card); + return err; +} + +static struct audio_source_instance *to_audio_source_instance( + struct config_item *item) +{ + return container_of(to_config_group(item), struct audio_source_instance, + func_inst.group); +} + +static struct audio_source_instance *to_fi_audio_source( + const struct usb_function_instance *fi) +{ + return container_of(fi, struct audio_source_instance, func_inst); +} + +static void audio_source_attr_release(struct config_item *item) +{ + struct audio_source_instance *fi_audio = to_audio_source_instance(item); + + usb_put_function_instance(&fi_audio->func_inst); +} + +static int audio_source_set_inst_name(struct usb_function_instance *fi, + const char *name) +{ + struct audio_source_instance *fi_audio; + char *ptr; + int name_len; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + fi_audio = to_fi_audio_source(fi); + fi_audio->name = ptr; + + return 0; +} + +static void audio_source_free_inst(struct usb_function_instance *fi) +{ + struct audio_source_instance *fi_audio; + + fi_audio = to_fi_audio_source(fi); + device_destroy(fi_audio->audio_device->class, + fi_audio->audio_device->devt); + kfree(fi_audio->name); + kfree(fi_audio->config); +} + +static ssize_t audio_source_pcm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct audio_source_instance *fi_audio = dev_get_drvdata(dev); + struct audio_source_config *config = fi_audio->config; + + /* print PCM card and device numbers */ + return sprintf(buf, "%d %d\n", config->card, config->device); +} + +struct device *create_function_device(char *name); + +static struct usb_function_instance *audio_source_alloc_inst(void) +{ + struct audio_source_instance *fi_audio; + struct device_attribute **attrs; + struct device_attribute *attr; + struct device *dev; + void *err_ptr; + int err = 0; + + fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL); + if (!fi_audio) + return ERR_PTR(-ENOMEM); + + fi_audio->func_inst.set_inst_name = audio_source_set_inst_name; + fi_audio->func_inst.free_func_inst = audio_source_free_inst; + + fi_audio->config = kzalloc(sizeof(struct audio_source_config), + GFP_KERNEL); + if (!fi_audio->config) { + err_ptr = ERR_PTR(-ENOMEM); + goto fail_audio; + } + + config_group_init_type_name(&fi_audio->func_inst.group, "", + &audio_source_func_type); + dev = create_function_device("f_audio_source"); + + if (IS_ERR(dev)) { + err_ptr = dev; + goto fail_audio_config; + } + + fi_audio->config->card = -1; + fi_audio->config->device = -1; + fi_audio->audio_device = dev; + + attrs = audio_source_function_attributes; + if (attrs) { + while ((attr = *attrs++) && !err) + err = device_create_file(dev, attr); + if (err) { + err_ptr = ERR_PTR(-EINVAL); + goto fail_device; + } + } + + dev_set_drvdata(dev, fi_audio); + _audio_dev.config = fi_audio->config; + + return &fi_audio->func_inst; + +fail_device: + device_destroy(dev->class, dev->devt); +fail_audio_config: + kfree(fi_audio->config); +fail_audio: + kfree(fi_audio); + return err_ptr; + +} + +static struct usb_function *audio_source_alloc(struct usb_function_instance *fi) +{ + return &_audio_dev.func; +} + +DECLARE_USB_FUNCTION_INIT(audio_source, audio_source_alloc_inst, + audio_source_alloc); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_dvctrace.c b/drivers/usb/gadget/function/f_dvctrace.c new file mode 100644 index 000000000000..2589632a0e90 --- /dev/null +++ b/drivers/usb/gadget/function/f_dvctrace.c @@ -0,0 +1,825 @@ +/* + * Gadget Driver for DvC.Trace Function + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#ifdef VERBOSE_DEBUG +#define DVCT_IN() pr_debug("in\n") +#else +#define DVCT_IN() do {} while (0) +#endif + +#include +#include +#include +#include +#include "u_dvctrace.h" + +enum { + DVCT_IAD_DESC_POS, + DVCT_CITF_DESC_POS, + DVCT_DITF_DESC_POS, + DVCT_EP_DESC_POS, + DVCT_LS_NULL_DESC_POS, /*Low speed descriptors end with this one*/ + DVCT_EP_COMP_DESC_POS = DVCT_LS_NULL_DESC_POS, + DVCT_LS_DESC_COUNT, /*Count of low speed descriptors*/ + DVCT_NULL_DESC_POS = DVCT_LS_DESC_COUNT, + DVCT_HS_DESC_COUNT,/*Count of super speed descriptors*/ +}; + +/*The full list of descriptors will look like: + * IAD_DESCRIPTOR -----|=> USB function specific + * CONTROL_ITF_DESCRIPTOR -----| + * SOURCE_SPECIFIC_DESCRIPTOR_0 ----| + * .... |=> s_cnt descriptors provided by the + * SOURCE_SPECIFIC_DESCRIPTOR_s_cnt----| source device. + * DATA_ITF_DESCRIPTOR -----| + * ENDPOINT_DESCRIPTOR |=> USB function specific + * .... -----| + * This makes a good part of the descriptors to shift, + * the following should help*/ +#define DVCT_IAD_DESC_DYN_POS(s_cnt) (DVCT_IAD_DESC_POS) +#define DVCT_CITF_DESC_DYN_POS(s_cnt) (DVCT_CITF_DESC_POS) +#define DVCT_SOURCE_DESC_FIRST(s_cnt) (DVCT_DITF_DESC_POS) +#define DVCT_DITF_DESC_DYN_POS(s_cnt) ((s_cnt)+DVCT_DITF_DESC_POS) +#define DVCT_EP_DESC_DYN_POS(s_cnt) ((s_cnt)+DVCT_EP_DESC_POS) +#define DVCT_EP_COMP_DESC_DYN_POS(s_cnt) ((s_cnt)+DVCT_EP_COMP_DESC_POS) +#define DVCT_LS_DESC_DYN_COUNT(s_cnt) ((s_cnt)+DVCT_LS_DESC_COUNT) +#define DVCT_HS_DESC_DYN_COUNT(s_cnt) ((s_cnt)+DVCT_HS_DESC_COUNT) + +enum { + DVCT_STR_IAD_IDX, + DVCT_STR_C_ITF_IDX, + DVCT_STR_D_ITF_IDX, + DVCT_STR_NULL_IDX, /*always last */ + DVCT_STR_COUNT, +}; + +static int dvct_alloc_desc(struct dvct_function *d_fun) +{ + int i; + unsigned int s_desc_count = 0; + struct usb_descriptor_header **s_desc; + struct dvct_function_desc *desc = &d_fun->desc; + + DVCT_IN(); + + if (d_fun->source_dev->desc) { + for (s_desc = d_fun->source_dev->desc->dvc_spec; + s_desc && (*s_desc); s_desc++) + s_desc_count++; + } + + /*alloc the descriptors array */ + desc->fs = + kzalloc(DVCT_LS_DESC_DYN_COUNT(s_desc_count) * + sizeof(struct usb_descriptor_header *), GFP_KERNEL); + if (!desc->fs) + goto err_fs; + + desc->hs = + kzalloc(DVCT_LS_DESC_DYN_COUNT(s_desc_count) * + sizeof(struct usb_descriptor_header *), GFP_KERNEL); + if (!desc->hs) + goto err_hs; + + desc->ss = + kzalloc(DVCT_HS_DESC_DYN_COUNT(s_desc_count) * + sizeof(struct usb_descriptor_header *), GFP_KERNEL); + if (!desc->ss) + goto err_ss; + + /*IAD */ + desc->iad = kzalloc(sizeof(*desc->iad), GFP_KERNEL); + if (!desc->iad) + goto err_iad; + + desc->iad->bLength = sizeof(*desc->iad); + desc->iad->bDescriptorType = USB_DT_INTERFACE_ASSOCIATION; + desc->iad->bInterfaceCount = 2; + desc->iad->bFunctionClass = USB_CLASS_DEBUG; + desc->iad->bFunctionSubClass = USB_SUBCLASS_DVC_TRACE; + desc->iad->bFunctionProtocol = d_fun->source_dev->protocol; + /*bFirstInterface - updated on bind */ + + desc->fs[DVCT_IAD_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->iad; + desc->hs[DVCT_IAD_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->iad; + desc->ss[DVCT_IAD_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->iad; + + /*Control interface */ + desc->c_itf = kzalloc(sizeof(*desc->c_itf), GFP_KERNEL); + if (!desc->c_itf) + goto err_c_itf; + + desc->c_itf->bLength = USB_DT_INTERFACE_SIZE; + desc->c_itf->bDescriptorType = USB_DT_INTERFACE; + desc->c_itf->bInterfaceClass = USB_CLASS_DEBUG; + desc->c_itf->bInterfaceSubClass = USB_SUBCLASS_DEBUG_CONTROL; + desc->c_itf->bInterfaceProtocol = d_fun->source_dev->protocol; + + desc->fs[DVCT_CITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->c_itf; + desc->hs[DVCT_CITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->c_itf; + desc->ss[DVCT_CITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->c_itf; + + if (d_fun->source_dev->desc) { + /*Copy whatever the source device has provided */ + s_desc = d_fun->source_dev->desc->dvc_spec; + for (i = 0; i < s_desc_count; i++) { + desc->fs[DVCT_SOURCE_DESC_FIRST(s_desc_count) + i] + = s_desc[i]; + desc->hs[DVCT_SOURCE_DESC_FIRST(s_desc_count) + i] + = s_desc[i]; + desc->ss[DVCT_SOURCE_DESC_FIRST(s_desc_count) + i] + = s_desc[i]; + } + } + /*Data interface */ + desc->d_itf = kzalloc(sizeof(*desc->d_itf), GFP_KERNEL); + if (!desc->d_itf) + goto err_d_itf; + + desc->d_itf->bLength = USB_DT_INTERFACE_SIZE; + desc->d_itf->bDescriptorType = USB_DT_INTERFACE; + desc->d_itf->bNumEndpoints = 1; + desc->d_itf->bInterfaceClass = USB_CLASS_DEBUG; + desc->d_itf->bInterfaceSubClass = USB_SUBCLASS_DVC_TRACE; + desc->d_itf->bInterfaceProtocol = d_fun->source_dev->protocol; + + desc->fs[DVCT_DITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->d_itf; + desc->hs[DVCT_DITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->d_itf; + desc->ss[DVCT_DITF_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->d_itf; + + /*Full Speed ep */ + desc->fs_ep = kzalloc(sizeof(*desc->fs_ep), GFP_KERNEL); + if (!desc->fs_ep) + goto err_fs_ep; + + desc->fs_ep->bLength = USB_DT_ENDPOINT_SIZE; + desc->fs_ep->bDescriptorType = USB_DT_ENDPOINT; + desc->fs_ep->bEndpointAddress = USB_DIR_IN; + desc->fs_ep->bmAttributes = USB_ENDPOINT_XFER_BULK; + desc->fs_ep->wMaxPacketSize = cpu_to_le16(64); + + desc->fs[DVCT_EP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->fs_ep; + + /*High Speed ep */ + desc->hs_ep = kzalloc(sizeof(*desc->hs_ep), GFP_KERNEL); + if (!desc->hs_ep) + goto err_hs_ep; + + desc->hs_ep->bLength = USB_DT_ENDPOINT_SIZE; + desc->hs_ep->bDescriptorType = USB_DT_ENDPOINT; + desc->hs_ep->bEndpointAddress = USB_DIR_IN; + desc->hs_ep->bmAttributes = USB_ENDPOINT_XFER_BULK; + desc->hs_ep->wMaxPacketSize = cpu_to_le16(512); + + desc->hs[DVCT_EP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->hs_ep; + + /*Super Speed ep */ + desc->ss_ep = kzalloc(sizeof(*desc->ss_ep), GFP_KERNEL); + if (!desc->ss_ep) + goto err_ss_ep; + + desc->ss_ep->bLength = USB_DT_ENDPOINT_SIZE; + desc->ss_ep->bDescriptorType = USB_DT_ENDPOINT; + desc->ss_ep->bEndpointAddress = USB_DIR_IN; + desc->ss_ep->bmAttributes = USB_ENDPOINT_XFER_BULK; + desc->ss_ep->wMaxPacketSize = cpu_to_le16(1024); + + desc->ss[DVCT_EP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->ss_ep; + + /*Super Speed ep comp */ + desc->ss_ep_comp = kzalloc(sizeof(*desc->ss_ep_comp), GFP_KERNEL); + if (!desc->ss_ep_comp) + goto err_ss_ep_comp; + + desc->ss_ep_comp->bLength = USB_DT_SS_EP_COMP_SIZE; + desc->ss_ep_comp->bDescriptorType = USB_DT_SS_ENDPOINT_COMP; + + desc->ss[DVCT_EP_COMP_DESC_DYN_POS(s_desc_count)] = + (struct usb_descriptor_header *)desc->ss_ep_comp; + + /* strings */ + /*the table */ + desc->str.language = 0x0409; /*en-us */ + desc->str.strings = + kzalloc(DVCT_STR_COUNT * sizeof(struct usb_string), GFP_KERNEL); + if (!desc->str.strings) + goto err_str; + + /*lookup table */ + desc->lk_tbl = + kzalloc(DVCT_STR_COUNT * sizeof(struct dvct_string_lookup), + GFP_KERNEL); + if (!desc->lk_tbl) + goto err_str_lk; + + /*actual strings */ + /*IAD*/ + desc->str.strings[DVCT_STR_IAD_IDX].s = + kasprintf(GFP_KERNEL, "DvC Trace (%s)", + dev_name(&d_fun->source_dev->device)); + if (!desc->str.strings[DVCT_STR_IAD_IDX].s) + goto err_str_iad; + + desc->lk_tbl[DVCT_STR_IAD_IDX].str = + &desc->str.strings[DVCT_STR_IAD_IDX]; + desc->lk_tbl[DVCT_STR_IAD_IDX].id = &desc->iad->iFunction; + + /*control */ + desc->str.strings[DVCT_STR_C_ITF_IDX].s = + kasprintf(GFP_KERNEL, "DvC Trace Control (%s)", + dev_name(&d_fun->source_dev->device)); + if (!desc->str.strings[DVCT_STR_C_ITF_IDX].s) + goto err_str_ctrl; + + desc->lk_tbl[DVCT_STR_C_ITF_IDX].str = + &desc->str.strings[DVCT_STR_C_ITF_IDX]; + desc->lk_tbl[DVCT_STR_C_ITF_IDX].id = &desc->c_itf->iInterface; + + /*data */ + desc->str.strings[DVCT_STR_D_ITF_IDX].s = + kasprintf(GFP_KERNEL, "DvC Trace Data (%s)", + dev_name(&d_fun->source_dev->device)); + if (!desc->str.strings[DVCT_STR_D_ITF_IDX].s) + goto err_str_data; + + desc->lk_tbl[DVCT_STR_D_ITF_IDX].str = + &desc->str.strings[DVCT_STR_D_ITF_IDX]; + desc->lk_tbl[DVCT_STR_D_ITF_IDX].id = &desc->d_itf->iInterface; + + return 0; +/*cleanup*/ +err_str_data: + kfree(desc->str.strings[DVCT_STR_C_ITF_IDX].s); +err_str_ctrl: + kfree(desc->str.strings[DVCT_STR_IAD_IDX].s); +err_str_iad: + kfree(desc->lk_tbl); +err_str_lk: + kfree(desc->str.strings); +err_str: + kfree(desc->ss_ep_comp); +err_ss_ep_comp: + kfree(desc->ss_ep); +err_ss_ep: + kfree(desc->hs_ep); +err_hs_ep: + kfree(desc->fs_ep); +err_fs_ep: + kfree(desc->d_itf); +err_d_itf: + kfree(desc->c_itf); +err_c_itf: + kfree(desc->iad); +err_iad: + kfree(desc->ss); +err_ss: + kfree(desc->hs); +err_hs: + kfree(desc->fs); +err_fs: + pr_err("Failed OFM"); + return -ENOMEM; +} + +static void dvct_free_desc(struct dvct_function *d_fun) +{ + struct dvct_function_desc *desc = &d_fun->desc; + + DVCT_IN(); + + kfree(desc->str.strings[DVCT_STR_D_ITF_IDX].s); + kfree(desc->str.strings[DVCT_STR_C_ITF_IDX].s); + kfree(desc->str.strings[DVCT_STR_IAD_IDX].s); + kfree(desc->lk_tbl); + kfree(desc->str.strings); + kfree(desc->ss_ep_comp); + kfree(desc->ss_ep); + kfree(desc->hs_ep); + kfree(desc->fs_ep); + kfree(desc->d_itf); + kfree(desc->c_itf); + kfree(desc->iad); + kfree(desc->ss); + kfree(desc->hs); + kfree(desc->fs); +} + +ssize_t dvct_start_transfer(struct dvct_function *d_fun, u8 config) +{ + DVCT_IN(); + if (!dvct_get_status(&d_fun->status, DVCT_MASK_ONLINE)) + return -EIO; + + d_fun->trace_config = config; + return d_fun->source_drv->start_transfer(d_fun->source_dev, config); +} +EXPORT_SYMBOL(dvct_start_transfer); + +int dvct_stop_transfer(struct dvct_function *d_fun) +{ + + DVCT_IN(); + if (!dvct_get_status(&d_fun->status, DVCT_MASK_ONLINE)) + return -EIO; + + if (dvct_get_status(&d_fun->status, DVCT_MASK_TRANS)) { + d_fun->trace_config = 0; + return d_fun->source_drv->stop_transfer(d_fun->source_dev); + } + + return 0; +} +EXPORT_SYMBOL(dvct_stop_transfer); + +static int dvct_strings_setup(struct usb_composite_dev *cdev, + struct usb_string *strings, + struct dvct_string_lookup *lk_tbl) +{ + int status; + struct dvct_string_lookup *str_lk; + + DVCT_IN(); + if (!strings || !lk_tbl) + return -EINVAL; + + status = usb_string_ids_tab(cdev, strings); + if (status < 0) + return status; + + for (str_lk = lk_tbl; str_lk->str; str_lk++) { + *str_lk->id = str_lk->str->id; + pr_info("Setting id %d for str \"%s\"\n", str_lk->str->id, + str_lk->str->s); + } + return 0; +} + +static int dvct_setup(struct usb_function *func, + const struct usb_ctrlrequest *ctrl) +{ + int status = -EOPNOTSUPP; + u16 w_index; + u16 w_value; + u16 w_length; + u8 b_index_value; + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + + w_index = le16_to_cpu(ctrl->wIndex); + w_value = le16_to_cpu(ctrl->wValue); + w_length = le16_to_cpu(ctrl->wLength); + b_index_value = (u8) (w_index >> 8); + + if (ctrl->bRequestType != + (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) + goto done; + + switch (ctrl->bRequest) { + case DC_REQUEST_SET_RESET: + + pr_info("SET_RESET v%04x i%04x l%u\n", + w_value, w_index, w_length); + + dvct_stop_transfer(d_fun); + status = 0; + break; + + case DC_REQUEST_SET_TRACE: + /* There are some inconsistencies in the spec regarding some of the + * control requests, like SET/GET _TRACE, where even if the message + * is defined as interface specific the wIndex field is used for + * something else, making these request unusable in a "standard" + * composite device. + * To get around this we expect the interface to be specified in + * wIndex 7:0 and any other values in wIndex 15:8. + * A "special" composite implementation is free to treat these setup + * requests "on spec" and call directly dvct_start_transfer and/or + * dvct_stop_transfer (exported in u_dvctrace.h). + */ + pr_info("SET_TRACE v%04x i%04x l%u\n", + w_value, w_index, w_length); + + if (!b_index_value) { + dvct_stop_transfer(d_fun); + status = 0; + } else { + status = dvct_start_transfer(d_fun, b_index_value); + } + break; + } + +done: + if (status >= 0) { + d_fun->cdev->req->zero = 0; + d_fun->cdev->req->length = 0; + status = + usb_ep_queue(d_fun->cdev->gadget->ep0, d_fun->cdev->req, + GFP_ATOMIC); + if (status) + pr_err("Setup response queue error\n"); + } else { + pr_debug("Unexpected request %02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, w_value, w_index, + w_length); + } + + return status; +} + +static int dvct_function_bind(struct usb_configuration *cconfig, + struct usb_function *func) +{ + int id, ret; + struct usb_ep *ep; + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + d_fun->cdev = cconfig->cdev; + + spin_lock(&d_fun->source_dev->lock); + d_fun->source_dev->function_taken = 1; + spin_unlock(&d_fun->source_dev->lock); + + /*allocate id's */ + /*strings. not crucial just print on failure */ + if (d_fun->source_dev->desc && d_fun->source_dev->desc->str.strings) { + ret = dvct_strings_setup(d_fun->cdev, + d_fun->source_dev->desc->str.strings, + d_fun->source_dev->desc->lk_tbl); + if (ret) + pr_warn("Cannot allocate source device string id's\n"); + } + ret = dvct_strings_setup(d_fun->cdev, d_fun->desc.str.strings, + d_fun->desc.lk_tbl); + if (ret) + pr_warn("Cannot allocate function string id's\n"); + + /* allocate interface ID(s) */ + id = usb_interface_id(cconfig, func); + if (id < 0) + return id; + + d_fun->desc.c_itf->bInterfaceNumber = id; + d_fun->desc.iad->bFirstInterface = id; + + pr_debug("Setting id %d for dvc-control interface\n", id); + + id = usb_interface_id(cconfig, func); + if (id < 0) + return id; + + d_fun->desc.d_itf->bInterfaceNumber = id; + + pr_debug("Setting id %d for dvc-trace-data interface\n", id); + + /* allocate endpoints */ + d_fun->desc.ss_ep->wMaxPacketSize = 0; /*get the real max */ + ep = usb_ep_autoconfig_ss(d_fun->cdev->gadget, + d_fun->desc.ss_ep, d_fun->desc.ss_ep_comp); + + if (!ep) { + pr_err("usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + + /*copy over the endpoint parameters */ + d_fun->desc.hs_ep->bEndpointAddress = + d_fun->desc.ss_ep->bEndpointAddress; + d_fun->desc.fs_ep->bEndpointAddress = + d_fun->desc.ss_ep->bEndpointAddress; + + if (le16_to_cpu(d_fun->desc.hs_ep->wMaxPacketSize) > + le16_to_cpu(d_fun->desc.ss_ep->wMaxPacketSize)) + d_fun->desc.hs_ep->wMaxPacketSize = + d_fun->desc.ss_ep->wMaxPacketSize; + + if (le16_to_cpu(d_fun->desc.fs_ep->wMaxPacketSize) > + le16_to_cpu(d_fun->desc.ss_ep->wMaxPacketSize)) + d_fun->desc.fs_ep->wMaxPacketSize = + d_fun->desc.ss_ep->wMaxPacketSize; + + pr_info("usb_ep_autoconfig %s, addr 0x%hhx, size ss=%hu hs=%hu fs=%hu\n", + ep->name, + d_fun->desc.ss_ep->bEndpointAddress, + d_fun->desc.ss_ep->wMaxPacketSize, + d_fun->desc.hs_ep->wMaxPacketSize, + d_fun->desc.fs_ep->wMaxPacketSize); + + ep->driver_data = d_fun; /* claim the endpoint */ + d_fun->ep_in = ep; + + ret = d_fun->source_drv->binded(d_fun->source_dev, ep, + &d_fun->function); + + return ret; +} + +static void dvct_function_unbind(struct usb_configuration *c, + struct usb_function *func) +{ + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + dvct_clr_status(&d_fun->status, DVCT_MASK_ONLINE); + d_fun->online_data = 0; + d_fun->online_ctrl = 0; + + d_fun->source_drv->unbinded(d_fun->source_dev); + + spin_lock(&d_fun->source_dev->lock); + d_fun->source_dev->function_taken = 0; + spin_unlock(&d_fun->source_dev->lock); +} + +static int dvct_function_set_alt(struct usb_function *func, + unsigned intf, unsigned alt) +{ + struct dvct_function *d_fun = to_dvct_function(func); + struct usb_composite_dev *cdev = func->config->cdev; + int ret; + + DVCT_IN(); + + if (intf == d_fun->desc.c_itf->bInterfaceNumber) { + d_fun->online_ctrl = 1; + pr_debug("dvc-control interface %u set alt %u\n", intf, alt); + } + + if (intf == d_fun->desc.d_itf->bInterfaceNumber) { + ret = config_ep_by_speed(cdev->gadget, func, d_fun->ep_in); + if (ret) { + pr_debug("intf: %d alt: %d ep_by_speed in err %d\n", + intf, alt, ret); + return ret; + } + + ret = usb_ep_enable(d_fun->ep_in); + if (ret) { + pr_debug("intf: %d alt: %d ep_enable in err %d\n", + intf, alt, ret); + return ret; + } + d_fun->online_data = 1; + } + + pr_info("dvc-trace interface %u set alt %u\n", intf, alt); + + if (unlikely(dvct_get_status(&d_fun->status, DVCT_MASK_TRANS))) + dvct_stop_transfer(d_fun); + + if (d_fun->online_data && d_fun->online_ctrl) { + dvct_set_status(&d_fun->status, DVCT_MASK_ONLINE); + if (d_fun->source_drv->connected) + d_fun->source_drv->connected(d_fun->source_dev, + cdev->gadget->speed); + } + return 0; +} + +static void dvct_function_disable(struct usb_function *func) +{ + struct dvct_function *d_fun = to_dvct_function(func); + struct usb_composite_dev *cdev; + + DVCT_IN(); + + cdev = d_fun->cdev; + + if (dvct_get_status(&d_fun->status, DVCT_MASK_TRANS)) + dvct_stop_transfer(d_fun); + + usb_ep_disable(d_fun->ep_in); + + d_fun->online_ctrl = 0; + d_fun->online_data = 0; + + if (d_fun->source_drv->disconnected) + d_fun->source_drv->disconnected(d_fun->source_dev); + + pr_debug("%s disabled\n", d_fun->function.name); +} + +static void dvct_attr_release(struct config_item *item) +{ + struct dvct_function_inst *d_inst; + + DVCT_IN(); + d_inst = container_of(to_config_group(item), struct dvct_function_inst, + instance.group); + usb_put_function_instance(&d_inst->instance); +} + +static struct configfs_item_operations dvctrace_item_ops = { + .release = dvct_attr_release, +}; + +static ssize_t f_dvctrace_device_show(struct config_item *item, char *page) +{ + struct dvct_function_inst *d_inst; + + DVCT_IN(); + d_inst = container_of(to_config_group(item), struct dvct_function_inst, + instance.group); + + return sprintf(page, "%s\n", dev_name(&d_inst->source_dev->device)); +} + +CONFIGFS_ATTR_RO(f_dvctrace_, device); + +static struct configfs_attribute *dvct_attrs[] = { + &f_dvctrace_attr_device, + NULL, +}; + +static struct config_item_type dvct_func_type = { + .ct_item_ops = &dvctrace_item_ops, + .ct_attrs = dvct_attrs, + .ct_owner = THIS_MODULE, +}; + +static void dvct_free_func_inst(struct usb_function_instance *inst) +{ + struct dvct_function_inst *d_inst; + + DVCT_IN(); + d_inst = to_dvct_function_inst(inst); + + spin_lock(&d_inst->source_dev->lock); + d_inst->source_dev->instance_taken = 0; + spin_unlock(&d_inst->source_dev->lock); + + kfree(d_inst); +} + +static int dvct_set_inst_name(struct usb_function_instance *inst, + const char *name) +{ + struct dvct_function_inst *d_inst; + struct dvct_source_device *new_src; + struct dvct_source_device *old_src; + + DVCT_IN(); + d_inst = to_dvct_function_inst(inst); + old_src = d_inst->source_dev; + + new_src = dvct_source_find_by_name(name); + + if (IS_ERR_OR_NULL(new_src)) + return -ENODEV; + + if (new_src != old_src) { + if (new_src->instance_taken) + return -EBUSY; + + spin_lock(&new_src->lock); + spin_lock(&old_src->lock); + + d_inst->source_dev = new_src; + new_src->instance_taken = 1; + old_src->instance_taken = 0; + + spin_unlock(&old_src->lock); + spin_unlock(&new_src->lock); + } + return 0; +} + +static struct usb_function_instance *dvct_alloc_inst(void) +{ + struct dvct_function_inst *d_inst; + struct dvct_source_device *src_dev = NULL; + + DVCT_IN(); + /*get the first free source, this will change via set name + * if available */ + src_dev = dvct_source_find_free(); + + if (IS_ERR_OR_NULL(src_dev)) + return ERR_PTR(-ENODEV); + + d_inst = kzalloc(sizeof(*d_inst), GFP_KERNEL); + + if (!d_inst) + return ERR_PTR(-ENOMEM); + + d_inst->instance.free_func_inst = dvct_free_func_inst; + d_inst->instance.set_inst_name = dvct_set_inst_name; + + spin_lock(&src_dev->lock); + d_inst->source_dev = src_dev; + src_dev->instance_taken = 1; + spin_unlock(&src_dev->lock); + + config_group_init_type_name(&d_inst->instance.group, + "", &dvct_func_type); + return &d_inst->instance; +} + +static void dvct_free_func(struct usb_function *func) +{ + struct dvct_function *d_fun = to_dvct_function(func); + + DVCT_IN(); + d_fun->source_drv->deactivate(d_fun->source_dev); + + dvct_free_desc(d_fun); + + kfree(d_fun); +} + +static struct usb_function *dvct_alloc_func(struct usb_function_instance *inst) +{ + int ret; + struct dvct_function *d_fun; + struct dvct_function_inst *d_inst = to_dvct_function_inst(inst); + + DVCT_IN(); + d_fun = kzalloc(sizeof(struct dvct_function), GFP_KERNEL); + if (!d_fun) + return ERR_PTR(-ENOMEM); + + d_fun->source_dev = d_inst->source_dev; + d_fun->source_drv = dvct_source_get_drv(d_fun->source_dev); + d_fun->trace_config = 0; + + ret = d_fun->source_drv->activate(d_fun->source_dev, &d_fun->status); + if (ret) { + pr_err("Cannot activate source device %d\n", ret); + goto err; + } + + ret = dvct_alloc_desc(d_fun); + if (ret) + goto err_des; + + /*String table*/ + /*1 - source dev (if present) , 1 - function, 1 - NULL */ + if (d_fun->source_dev->desc && d_fun->source_dev->desc->str.strings) + d_fun->function.strings = + kzalloc(3 * sizeof(struct usb_gadget_strings), GFP_KERNEL); + else + d_fun->function.strings = + kzalloc(2 * sizeof(struct usb_gadget_strings), GFP_KERNEL); + + if (!d_fun->function.strings) { + ret = -ENOMEM; + goto err_string_table; + } + + d_fun->function.strings[0] = &d_fun->desc.str; + if (d_fun->source_dev->desc && d_fun->source_dev->desc->str.strings) + d_fun->function.strings[1] = &d_fun->source_dev->desc->str; + + d_fun->function.name = "dvctrace"; + d_fun->function.fs_descriptors = d_fun->desc.fs; + d_fun->function.hs_descriptors = d_fun->desc.hs; + d_fun->function.ss_descriptors = d_fun->desc.ss; + d_fun->function.bind = dvct_function_bind; + d_fun->function.unbind = dvct_function_unbind; + d_fun->function.set_alt = dvct_function_set_alt; + d_fun->function.disable = dvct_function_disable; + d_fun->function.free_func = dvct_free_func; + d_fun->function.setup = dvct_setup; + + return &d_fun->function; + +err_string_table: + dvct_free_desc(d_fun); +err_des: + d_fun->source_drv->deactivate(d_fun->source_dev); +err: + kfree(d_fun); + return ERR_PTR(ret); +} + +DECLARE_USB_FUNCTION_INIT(dvctrace, dvct_alloc_inst, dvct_alloc_func); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DvC-Trace function driver"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8b342587f8ad..054e3308b28a 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -219,6 +219,7 @@ struct ffs_io_data { struct mm_struct *mm; struct work_struct work; + struct work_struct cancellation_work; struct usb_ep *ep; struct usb_request *req; @@ -759,9 +760,13 @@ static void ffs_user_copy_worker(struct work_struct *work) bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; if (io_data->read && ret > 0) { + mm_segment_t oldfs = get_fs(); + + set_fs(USER_DS); use_mm(io_data->mm); ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data); unuse_mm(io_data->mm); + set_fs(oldfs); } io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); @@ -1010,13 +1015,17 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) if (interrupted) ret = -EINTR; + else if (epfile->ep != ep) { + /* In the meantime, endpoint got disabled or changed. */ + ret = -ESHUTDOWN; + } else if (io_data->read && ep->status > 0) ret = __ffs_epfile_read_data(epfile, data, ep->status, &io_data->data); else ret = ep->status; goto error_mutex; - } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) { + } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { ret = -ENOMEM; } else { req->buf = data; @@ -1069,22 +1078,31 @@ ffs_epfile_open(struct inode *inode, struct file *file) return 0; } +static void ffs_aio_cancel_worker(struct work_struct *work) +{ + struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, + cancellation_work); + + ENTER(); + + usb_ep_dequeue(io_data->ep, io_data->req); +} + static int ffs_aio_cancel(struct kiocb *kiocb) { struct ffs_io_data *io_data = kiocb->private; - struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + struct ffs_data *ffs = io_data->ffs; int value; ENTER(); - spin_lock_irq(&epfile->ffs->eps_lock); - - if (likely(io_data && io_data->ep && io_data->req)) - value = usb_ep_dequeue(io_data->ep, io_data->req); - else + if (likely(io_data && io_data->ep && io_data->req)) { + INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); + queue_work(ffs->io_completion_wq, &io_data->cancellation_work); + value = -EINPROGRESS; + } else { value = -EINVAL; - - spin_unlock_irq(&epfile->ffs->eps_lock); + } return value; } @@ -1539,7 +1557,6 @@ ffs_fs_kill_sb(struct super_block *sb) if (sb->s_fs_info) { ffs_release_dev(sb->s_fs_info); ffs_data_closed(sb->s_fs_info); - ffs_data_put(sb->s_fs_info); } } @@ -1856,44 +1873,20 @@ static int ffs_func_eps_enable(struct ffs_function *func) spin_lock_irqsave(&func->ffs->eps_lock, flags); while(count--) { - struct usb_endpoint_descriptor *ds; - struct usb_ss_ep_comp_descriptor *comp_desc = NULL; - int needs_comp_desc = false; - int desc_idx; - - if (ffs->gadget->speed == USB_SPEED_SUPER) { - desc_idx = 2; - needs_comp_desc = true; - } else if (ffs->gadget->speed == USB_SPEED_HIGH) - desc_idx = 1; - else - desc_idx = 0; - - /* fall-back to lower speed if desc missing for current speed */ - do { - ds = ep->descs[desc_idx]; - } while (!ds && --desc_idx >= 0); - - if (!ds) { - ret = -EINVAL; - break; - } - ep->ep->driver_data = ep; - ep->ep->desc = ds; - if (needs_comp_desc) { - comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + - USB_DT_ENDPOINT_SIZE); - ep->ep->maxburst = comp_desc->bMaxBurst + 1; - ep->ep->comp_desc = comp_desc; + ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); + if (ret) { + pr_err("%s: config_ep_by_speed(%s) returned %d\n", + __func__, ep->ep->name, ret); + break; } ret = usb_ep_enable(ep->ep); if (likely(!ret)) { epfile->ep = ep; - epfile->in = usb_endpoint_dir_in(ds); - epfile->isoc = usb_endpoint_xfer_isoc(ds); + epfile->in = usb_endpoint_dir_in(ep->ep->desc); + epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc); } else { break; } @@ -2286,9 +2279,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, int i; if (len < sizeof(*d) || - d->bFirstInterfaceNumber >= ffs->interfaces_count || - !d->Reserved1) + d->bFirstInterfaceNumber >= ffs->interfaces_count) return -EINVAL; + if (d->Reserved1 != 1) { + /* + * According to the spec, Reserved1 must be set to 1 + * but older kernels incorrectly rejected non-zero + * values. We fix it here to avoid returning EINVAL + * in response to values we used to accept. + */ + pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n"); + d->Reserved1 = 1; + } for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) if (d->Reserved2[i]) return -EINVAL; @@ -2971,10 +2973,8 @@ static int _ffs_func_bind(struct usb_configuration *c, struct ffs_data *ffs = func->ffs; const int full = !!func->ffs->fs_descs_count; - const int high = gadget_is_dualspeed(func->gadget) && - func->ffs->hs_descs_count; - const int super = gadget_is_superspeed(func->gadget) && - func->ffs->ss_descs_count; + const int high = !!func->ffs->hs_descs_count; + const int super = !!func->ffs->ss_descs_count; int fs_len, hs_len, ss_len, ret, i; struct ffs_ep *eps_ptr; @@ -3257,7 +3257,7 @@ static int ffs_func_setup(struct usb_function *f, __ffs_event_add(ffs, FUNCTIONFS_SETUP); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); - return 0; + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; } static bool ffs_func_req_match(struct usb_function *f, @@ -3677,6 +3677,7 @@ static void ffs_closed(struct ffs_data *ffs) goto done; ffs_obj->desc_ready = false; + ffs_obj->ffs_data = NULL; if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && ffs_obj->ffs_closed_callback) @@ -3694,7 +3695,8 @@ static void ffs_closed(struct ffs_data *ffs) ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; ffs_dev_unlock(); - unregister_gadget_item(ci); + if (test_bit(FFS_FL_BOUND, &ffs->flags)) + unregister_gadget_item(ci); return; done: ffs_dev_unlock(); diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 5d3d7941d2c2..f0a8e66e7a9c 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -405,7 +405,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (err) { ERROR(midi, "%s: couldn't enqueue request: %d\n", midi->out_ep->name, err); - free_ep_req(midi->out_ep, req); + if (req->buf != NULL) + free_ep_req(midi->out_ep, req); return err; } } @@ -1207,6 +1208,65 @@ static void f_midi_free_inst(struct usb_function_instance *f) kfree(opts); } +#ifdef CONFIG_USB_CONFIGFS_UEVENT +extern struct device *create_function_device(char *name); +static ssize_t alsa_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_function_instance *fi_midi = dev_get_drvdata(dev); + struct f_midi *midi; + + if (!fi_midi->f) + dev_warn(dev, "f_midi: function not set\n"); + + if (fi_midi && fi_midi->f) { + midi = func_to_midi(fi_midi->f); + if (midi->rmidi && midi->rmidi->card) + return sprintf(buf, "%d %d\n", + midi->rmidi->card->number, midi->rmidi->device); + } + + /* print PCM card and device numbers */ + return sprintf(buf, "%d %d\n", -1, -1); +} + +static DEVICE_ATTR(alsa, S_IRUGO, alsa_show, NULL); + +static struct device_attribute *alsa_function_attributes[] = { + &dev_attr_alsa, + NULL +}; + +static int create_alsa_device(struct usb_function_instance *fi) +{ + struct device *dev; + struct device_attribute **attrs; + struct device_attribute *attr; + int err = 0; + + dev = create_function_device("f_midi"); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + attrs = alsa_function_attributes; + if (attrs) { + while ((attr = *attrs++) && !err) + err = device_create_file(dev, attr); + if (err) { + device_destroy(dev->class, dev->devt); + return -EINVAL; + } + } + dev_set_drvdata(dev, fi); + return 0; +} +#else +static int create_alsa_device(struct usb_function_instance *fi) +{ + return 0; +} +#endif + static struct usb_function_instance *f_midi_alloc_inst(void) { struct f_midi_opts *opts; @@ -1224,6 +1284,11 @@ static struct usb_function_instance *f_midi_alloc_inst(void) opts->in_ports = 1; opts->out_ports = 1; + if (create_alsa_device(&opts->func_inst)) { + kfree(opts); + return ERR_PTR(-ENODEV); + } + config_group_init_type_name(&opts->func_inst.group, "", &midi_func_type); @@ -1242,6 +1307,7 @@ static void f_midi_free(struct usb_function *f) kfree(midi->id); kfifo_free(&midi->in_req_fifo); kfree(midi); + opts->func_inst.f = NULL; --opts->refcnt; } mutex_unlock(&opts->lock); @@ -1328,6 +1394,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) midi->func.disable = f_midi_disable; midi->func.free_func = f_midi_free; + fi->f = &midi->func; return &midi->func; setup_fail: diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c new file mode 100644 index 000000000000..54f7ebbf858e --- /dev/null +++ b/drivers/usb/gadget/function/f_mtp.c @@ -0,0 +1,1554 @@ +/* + * Gadget Function Driver for MTP + * + * Copyright (C) 2010 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "configfs.h" + +#define MTP_BULK_BUFFER_SIZE 16384 +#define INTR_BUFFER_SIZE 28 +#define MAX_INST_NAME_LEN 40 +#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL + +/* String IDs */ +#define INTERFACE_STRING_INDEX 0 + +/* values for mtp_dev.state */ +#define STATE_OFFLINE 0 /* initial state, disconnected */ +#define STATE_READY 1 /* ready for userspace calls */ +#define STATE_BUSY 2 /* processing userspace calls */ +#define STATE_CANCELED 3 /* transaction canceled by host */ +#define STATE_ERROR 4 /* error from completion routine */ + +/* number of tx and rx requests to allocate */ +#define TX_REQ_MAX 4 +#define RX_REQ_MAX 2 +#define INTR_REQ_MAX 5 + +/* ID for Microsoft MTP OS String */ +#define MTP_OS_STRING_ID 0xEE + +/* MTP class reqeusts */ +#define MTP_REQ_CANCEL 0x64 +#define MTP_REQ_GET_EXT_EVENT_DATA 0x65 +#define MTP_REQ_RESET 0x66 +#define MTP_REQ_GET_DEVICE_STATUS 0x67 + +/* constants for device status */ +#define MTP_RESPONSE_OK 0x2001 +#define MTP_RESPONSE_DEVICE_BUSY 0x2019 +#define DRIVER_NAME "mtp" + +static const char mtp_shortname[] = DRIVER_NAME "_usb"; + +struct mtp_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; + struct usb_ep *ep_intr; + + int state; + + /* synchronize access to our device file */ + atomic_t open_excl; + /* to enforce only one ioctl at a time */ + atomic_t ioctl_excl; + + struct list_head tx_idle; + struct list_head intr_idle; + + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + wait_queue_head_t intr_wq; + struct usb_request *rx_req[RX_REQ_MAX]; + int rx_done; + + /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and + * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue + */ + struct workqueue_struct *wq; + struct work_struct send_file_work; + struct work_struct receive_file_work; + struct file *xfer_file; + loff_t xfer_file_offset; + int64_t xfer_file_length; + unsigned xfer_send_header; + uint16_t xfer_command; + uint32_t xfer_transaction_id; + int xfer_result; +}; + +static struct usb_interface_descriptor mtp_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC, + .bInterfaceProtocol = 0, +}; + +static struct usb_interface_descriptor ptp_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_STILL_IMAGE, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 1, +}; + +static struct usb_endpoint_descriptor mtp_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = { + .bLength = sizeof(mtp_ss_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* .bMaxBurst = DYNAMIC, */ +}; + +static struct usb_endpoint_descriptor mtp_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = { + .bLength = sizeof(mtp_ss_out_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* .bMaxBurst = DYNAMIC, */ +}; + +static struct usb_endpoint_descriptor mtp_highspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor mtp_highspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor mtp_intr_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE), + .bInterval = 6, +}; + +static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = { + .bLength = sizeof(mtp_intr_ss_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE), +}; + +static struct usb_descriptor_header *fs_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_highspeed_in_desc, + (struct usb_descriptor_header *) &mtp_highspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *ss_mtp_descs[] = { + (struct usb_descriptor_header *) &mtp_interface_desc, + (struct usb_descriptor_header *) &mtp_ss_in_desc, + (struct usb_descriptor_header *) &mtp_ss_in_comp_desc, + (struct usb_descriptor_header *) &mtp_ss_out_desc, + (struct usb_descriptor_header *) &mtp_ss_out_comp_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc, + NULL, +}; + +static struct usb_descriptor_header *fs_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_in_desc, + (struct usb_descriptor_header *) &mtp_fullspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_highspeed_in_desc, + (struct usb_descriptor_header *) &mtp_highspeed_out_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + NULL, +}; + +static struct usb_descriptor_header *ss_ptp_descs[] = { + (struct usb_descriptor_header *) &ptp_interface_desc, + (struct usb_descriptor_header *) &mtp_ss_in_desc, + (struct usb_descriptor_header *) &mtp_ss_in_comp_desc, + (struct usb_descriptor_header *) &mtp_ss_out_desc, + (struct usb_descriptor_header *) &mtp_ss_out_comp_desc, + (struct usb_descriptor_header *) &mtp_intr_desc, + (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc, + NULL, +}; + +static struct usb_string mtp_string_defs[] = { + /* Naming interface "MTP" so libmtp will recognize us */ + [INTERFACE_STRING_INDEX].s = "MTP", + { }, /* end of list */ +}; + +static struct usb_gadget_strings mtp_string_table = { + .language = 0x0409, /* en-US */ + .strings = mtp_string_defs, +}; + +static struct usb_gadget_strings *mtp_strings[] = { + &mtp_string_table, + NULL, +}; + +/* Microsoft MTP OS String */ +static u8 mtp_os_string[] = { + 18, /* sizeof(mtp_os_string) */ + USB_DT_STRING, + /* Signature field: "MSFT100" */ + 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, + /* vendor code */ + 1, + /* padding */ + 0 +}; + +/* Microsoft Extended Configuration Descriptor Header Section */ +struct mtp_ext_config_desc_header { + __le32 dwLength; + __u16 bcdVersion; + __le16 wIndex; + __u8 bCount; + __u8 reserved[7]; +}; + +/* Microsoft Extended Configuration Descriptor Function Section */ +struct mtp_ext_config_desc_function { + __u8 bFirstInterfaceNumber; + __u8 bInterfaceCount; + __u8 compatibleID[8]; + __u8 subCompatibleID[8]; + __u8 reserved[6]; +}; + +/* MTP Extended Configuration Descriptor */ +struct { + struct mtp_ext_config_desc_header header; + struct mtp_ext_config_desc_function function; +} mtp_ext_config_desc = { + .header = { + .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)), + .bcdVersion = __constant_cpu_to_le16(0x0100), + .wIndex = __constant_cpu_to_le16(4), + .bCount = 1, + }, + .function = { + .bFirstInterfaceNumber = 0, + .bInterfaceCount = 1, + .compatibleID = { 'M', 'T', 'P' }, + }, +}; + +struct mtp_device_status { + __le16 wLength; + __le16 wCode; +}; + +struct mtp_data_header { + /* length of packet, including this header */ + __le32 length; + /* container type (2 for data packet) */ + __le16 type; + /* MTP command code */ + __le16 command; + /* MTP transaction ID */ + __le32 transaction_id; +}; + +struct mtp_instance { + struct usb_function_instance func_inst; + const char *name; + struct mtp_dev *dev; + char mtp_ext_compat_id[16]; + struct usb_os_desc mtp_os_desc; +}; + +/* temporary variable used between mtp_open() and mtp_gadget_bind() */ +static struct mtp_dev *_mtp_dev; + +static inline struct mtp_dev *func_to_mtp(struct usb_function *f) +{ + return container_of(f, struct mtp_dev, function); +} + +static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) +{ + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); + + if (!req) + return NULL; + + /* now allocate buffers for the requests */ + req->buf = kmalloc(buffer_size, GFP_KERNEL); + if (!req->buf) { + usb_ep_free_request(ep, req); + return NULL; + } + + return req; +} + +static void mtp_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static inline int mtp_lock(atomic_t *excl) +{ + if (atomic_inc_return(excl) == 1) { + return 0; + } else { + atomic_dec(excl); + return -1; + } +} + +static inline void mtp_unlock(atomic_t *excl) +{ + atomic_dec(excl); +} + +/* add a request to the tail of a list */ +static void mtp_req_put(struct mtp_dev *dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&dev->lock, flags); +} + +/* remove a request from the head of a list */ +static struct usb_request +*mtp_req_get(struct mtp_dev *dev, struct list_head *head) +{ + unsigned long flags; + struct usb_request *req; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(head)) { + req = 0; + } else { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&dev->lock, flags); + return req; +} + +static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + if (req->status != 0) + dev->state = STATE_ERROR; + + mtp_req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); +} + +static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + dev->rx_done = 1; + if (req->status != 0) + dev->state = STATE_ERROR; + + wake_up(&dev->read_wq); +} + +static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *dev = _mtp_dev; + + if (req->status != 0) + dev->state = STATE_ERROR; + + mtp_req_put(dev, &dev->intr_idle, req); + + wake_up(&dev->intr_wq); +} + +static int mtp_create_bulk_endpoints(struct mtp_dev *dev, + struct usb_endpoint_descriptor *in_desc, + struct usb_endpoint_descriptor *out_desc, + struct usb_endpoint_descriptor *intr_desc) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + struct usb_ep *ep; + int i; + + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); + + ep = usb_ep_autoconfig(cdev->gadget, in_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_in = ep; + + ep = usb_ep_autoconfig(cdev->gadget, out_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_out = ep; + + ep = usb_ep_autoconfig(cdev->gadget, intr_desc); + if (!ep) { + DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); + return -ENODEV; + } + DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name); + ep->driver_data = dev; /* claim the endpoint */ + dev->ep_intr = ep; + + /* now allocate requests for our endpoints */ + for (i = 0; i < TX_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_in; + mtp_req_put(dev, &dev->tx_idle, req); + } + for (i = 0; i < RX_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_out; + dev->rx_req[i] = req; + } + for (i = 0; i < INTR_REQ_MAX; i++) { + req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE); + if (!req) + goto fail; + req->complete = mtp_complete_intr; + mtp_req_put(dev, &dev->intr_idle, req); + } + + return 0; + +fail: + pr_err("mtp_bind() could not allocate requests\n"); + return -1; +} + +static ssize_t mtp_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct mtp_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + ssize_t r = count; + unsigned xfer; + int ret = 0; + size_t len = 0; + + DBG(cdev, "mtp_read(%zu)\n", count); + + /* we will block until we're online */ + DBG(cdev, "mtp_read: waiting for online state\n"); + ret = wait_event_interruptible(dev->read_wq, + dev->state != STATE_OFFLINE); + if (ret < 0) { + r = ret; + goto done; + } + spin_lock_irq(&dev->lock); + if (dev->ep_out->desc) { + len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count); + if (len > MTP_BULK_BUFFER_SIZE) { + spin_unlock_irq(&dev->lock); + return -EINVAL; + } + } + + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + +requeue_req: + /* queue a request */ + req = dev->rx_req[0]; + req->length = len; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + goto done; + } else { + DBG(cdev, "rx %p queue\n", req); + } + + /* wait for a request to complete */ + ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + if (ret < 0) { + r = ret; + usb_ep_dequeue(dev->ep_out, req); + goto done; + } + if (dev->state == STATE_BUSY) { + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) + goto requeue_req; + + DBG(cdev, "rx %p %d\n", req, req->actual); + xfer = (req->actual < count) ? req->actual : count; + r = xfer; + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + } else + r = -EIO; + +done: + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + r = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + + DBG(cdev, "mtp_read returning %zd\n", r); + return r; +} + +static ssize_t mtp_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mtp_dev *dev = fp->private_data; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + ssize_t r = count; + unsigned xfer; + int sendZLP = 0; + int ret; + + DBG(cdev, "mtp_write(%zu)\n", count); + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + return -ECANCELED; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + return -ENODEV; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + /* we need to send a zero length packet to signal the end of transfer + * if the transfer size is aligned to a packet boundary. + */ + if ((count & (dev->ep_in->maxpacket - 1)) == 0) + sendZLP = 1; + + while (count > 0 || sendZLP) { + /* so we exit after sending ZLP */ + if (count == 0) + sendZLP = 0; + + if (dev->state != STATE_BUSY) { + DBG(cdev, "mtp_write dev->error\n"); + r = -EIO; + break; + } + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + ((req = mtp_req_get(dev, &dev->tx_idle)) + || dev->state != STATE_BUSY)); + if (!req) { + r = ret; + break; + } + + if (count > MTP_BULK_BUFFER_SIZE) + xfer = MTP_BULK_BUFFER_SIZE; + else + xfer = count; + if (xfer && copy_from_user(req->buf, buf, xfer)) { + r = -EFAULT; + break; + } + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "mtp_write: xfer error %d\n", ret); + r = -EIO; + break; + } + + buf += xfer; + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + mtp_req_put(dev, &dev->tx_idle, req); + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + r = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + + DBG(cdev, "mtp_write returning %zd\n", r); + return r; +} + +/* read from a local file and write to USB */ +static void send_file_work(struct work_struct *data) +{ + struct mtp_dev *dev = container_of(data, struct mtp_dev, + send_file_work); + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = 0; + struct mtp_data_header *header; + struct file *filp; + loff_t offset; + int64_t count; + int xfer, ret, hdr_size; + int r = 0; + int sendZLP = 0; + + /* read our parameters */ + smp_rmb(); + filp = dev->xfer_file; + offset = dev->xfer_file_offset; + count = dev->xfer_file_length; + + DBG(cdev, "send_file_work(%lld %lld)\n", offset, count); + + if (dev->xfer_send_header) { + hdr_size = sizeof(struct mtp_data_header); + count += hdr_size; + } else { + hdr_size = 0; + } + + /* we need to send a zero length packet to signal the end of transfer + * if the transfer size is aligned to a packet boundary. + */ + if ((count & (dev->ep_in->maxpacket - 1)) == 0) + sendZLP = 1; + + while (count > 0 || sendZLP) { + /* so we exit after sending ZLP */ + if (count == 0) + sendZLP = 0; + + /* get an idle tx request to use */ + req = 0; + ret = wait_event_interruptible(dev->write_wq, + (req = mtp_req_get(dev, &dev->tx_idle)) + || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + break; + } + if (!req) { + r = ret; + break; + } + + if (count > MTP_BULK_BUFFER_SIZE) + xfer = MTP_BULK_BUFFER_SIZE; + else + xfer = count; + + if (hdr_size) { + /* prepend MTP data header */ + header = (struct mtp_data_header *)req->buf; + /* + * set file size with header according to + * MTP Specification v1.0 + */ + header->length = (count > MTP_MAX_FILE_SIZE) ? + MTP_MAX_FILE_SIZE : __cpu_to_le32(count); + header->type = __cpu_to_le16(2); /* data packet */ + header->command = __cpu_to_le16(dev->xfer_command); + header->transaction_id = + __cpu_to_le32(dev->xfer_transaction_id); + } + + ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, + &offset); + if (ret < 0) { + r = ret; + break; + } + xfer = ret + hdr_size; + hdr_size = 0; + + req->length = xfer; + ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); + if (ret < 0) { + DBG(cdev, "send_file_work: xfer error %d\n", ret); + dev->state = STATE_ERROR; + r = -EIO; + break; + } + + count -= xfer; + + /* zero this so we don't try to free it on error exit */ + req = 0; + } + + if (req) + mtp_req_put(dev, &dev->tx_idle, req); + + DBG(cdev, "send_file_work returning %d\n", r); + /* write the result */ + dev->xfer_result = r; + smp_wmb(); +} + +/* read from USB and write to a local file */ +static void receive_file_work(struct work_struct *data) +{ + struct mtp_dev *dev = container_of(data, struct mtp_dev, + receive_file_work); + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *read_req = NULL, *write_req = NULL; + struct file *filp; + loff_t offset; + int64_t count, len; + int ret, cur_buf = 0; + int r = 0; + + /* read our parameters */ + smp_rmb(); + filp = dev->xfer_file; + offset = dev->xfer_file_offset; + count = dev->xfer_file_length; + + DBG(cdev, "receive_file_work(%lld)\n", count); + + while (count > 0 || write_req) { + if (count > 0) { + /* queue a request */ + read_req = dev->rx_req[cur_buf]; + cur_buf = (cur_buf + 1) % RX_REQ_MAX; + + len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count); + if (len > MTP_BULK_BUFFER_SIZE) + len = MTP_BULK_BUFFER_SIZE; + read_req->length = len; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + dev->state = STATE_ERROR; + break; + } + } + + if (write_req) { + DBG(cdev, "rx %p %d\n", write_req, write_req->actual); + ret = vfs_write(filp, write_req->buf, write_req->actual, + &offset); + DBG(cdev, "vfs_write %d\n", ret); + if (ret != write_req->actual) { + r = -EIO; + dev->state = STATE_ERROR; + break; + } + write_req = NULL; + } + + if (read_req) { + /* wait for our last read to complete */ + ret = wait_event_interruptible(dev->read_wq, + dev->rx_done || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + if (!dev->rx_done) + usb_ep_dequeue(dev->ep_out, read_req); + break; + } + if (read_req->status) { + r = read_req->status; + break; + } + /* if xfer_file_length is 0xFFFFFFFF, then we read until + * we get a zero length packet + */ + if (count != 0xFFFFFFFF) + count -= read_req->actual; + if (read_req->actual < read_req->length) { + /* + * short packet is used to signal EOF for + * sizes > 4 gig + */ + DBG(cdev, "got short packet\n"); + count = 0; + } + + write_req = read_req; + read_req = NULL; + } + } + + DBG(cdev, "receive_file_work returning %d\n", r); + /* write the result */ + dev->xfer_result = r; + smp_wmb(); +} + +static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) +{ + struct usb_request *req = NULL; + int ret; + int length = event->length; + + DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length); + + if (length < 0 || length > INTR_BUFFER_SIZE) + return -EINVAL; + if (dev->state == STATE_OFFLINE) + return -ENODEV; + + ret = wait_event_interruptible_timeout(dev->intr_wq, + (req = mtp_req_get(dev, &dev->intr_idle)), + msecs_to_jiffies(1000)); + if (!req) + return -ETIME; + + if (copy_from_user(req->buf, (void __user *)event->data, length)) { + mtp_req_put(dev, &dev->intr_idle, req); + return -EFAULT; + } + req->length = length; + ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL); + if (ret) + mtp_req_put(dev, &dev->intr_idle, req); + + return ret; +} + +static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) +{ + struct mtp_dev *dev = fp->private_data; + struct file *filp = NULL; + int ret = -EINVAL; + + if (mtp_lock(&dev->ioctl_excl)) + return -EBUSY; + + switch (code) { + case MTP_SEND_FILE: + case MTP_RECEIVE_FILE: + case MTP_SEND_FILE_WITH_HEADER: + { + struct mtp_file_range mfr; + struct work_struct *work; + + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) { + /* report cancelation to userspace */ + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); + ret = -ECANCELED; + goto out; + } + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + ret = -ENODEV; + goto out; + } + dev->state = STATE_BUSY; + spin_unlock_irq(&dev->lock); + + if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { + ret = -EFAULT; + goto fail; + } + /* hold a reference to the file while we are working with it */ + filp = fget(mfr.fd); + if (!filp) { + ret = -EBADF; + goto fail; + } + + /* write the parameters */ + dev->xfer_file = filp; + dev->xfer_file_offset = mfr.offset; + dev->xfer_file_length = mfr.length; + smp_wmb(); + + if (code == MTP_SEND_FILE_WITH_HEADER) { + work = &dev->send_file_work; + dev->xfer_send_header = 1; + dev->xfer_command = mfr.command; + dev->xfer_transaction_id = mfr.transaction_id; + } else if (code == MTP_SEND_FILE) { + work = &dev->send_file_work; + dev->xfer_send_header = 0; + } else { + work = &dev->receive_file_work; + } + + /* We do the file transfer on a work queue so it will run + * in kernel context, which is necessary for vfs_read and + * vfs_write to use our buffers in the kernel address space. + */ + queue_work(dev->wq, work); + /* wait for operation to complete */ + flush_workqueue(dev->wq); + fput(filp); + + /* read the result */ + smp_rmb(); + ret = dev->xfer_result; + break; + } + case MTP_SEND_EVENT: + { + struct mtp_event event; + /* return here so we don't change dev->state below, + * which would interfere with bulk transfer state. + */ + if (copy_from_user(&event, (void __user *)value, sizeof(event))) + ret = -EFAULT; + else + ret = mtp_send_event(dev, &event); + goto out; + } + } + +fail: + spin_lock_irq(&dev->lock); + if (dev->state == STATE_CANCELED) + ret = -ECANCELED; + else if (dev->state != STATE_OFFLINE) + dev->state = STATE_READY; + spin_unlock_irq(&dev->lock); +out: + mtp_unlock(&dev->ioctl_excl); + DBG(dev->cdev, "ioctl returning %d\n", ret); + return ret; +} + +static int mtp_open(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "mtp_open\n"); + if (mtp_lock(&_mtp_dev->open_excl)) + return -EBUSY; + + /* clear any error condition */ + if (_mtp_dev->state != STATE_OFFLINE) + _mtp_dev->state = STATE_READY; + + fp->private_data = _mtp_dev; + return 0; +} + +static int mtp_release(struct inode *ip, struct file *fp) +{ + printk(KERN_INFO "mtp_release\n"); + + mtp_unlock(&_mtp_dev->open_excl); + return 0; +} + +/* file operations for /dev/mtp_usb */ +static const struct file_operations mtp_fops = { + .owner = THIS_MODULE, + .read = mtp_read, + .write = mtp_write, + .unlocked_ioctl = mtp_ioctl, + .open = mtp_open, + .release = mtp_release, +}; + +static struct miscdevice mtp_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = mtp_shortname, + .fops = &mtp_fops, +}; + +static int mtp_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + struct mtp_dev *dev = _mtp_dev; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + unsigned long flags; + + VDBG(cdev, "mtp_ctrlrequest " + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + + /* Handle MTP OS string */ + if (ctrl->bRequestType == + (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) + && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR + && (w_value >> 8) == USB_DT_STRING + && (w_value & 0xFF) == MTP_OS_STRING_ID) { + value = (w_length < sizeof(mtp_os_string) + ? w_length : sizeof(mtp_os_string)); + memcpy(cdev->req->buf, mtp_os_string, value); + } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { + /* Handle MTP OS descriptor */ + DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n", + ctrl->bRequest, w_index, w_value, w_length); + + if (ctrl->bRequest == 1 + && (ctrl->bRequestType & USB_DIR_IN) + && (w_index == 4 || w_index == 5)) { + value = (w_length < sizeof(mtp_ext_config_desc) ? + w_length : sizeof(mtp_ext_config_desc)); + memcpy(cdev->req->buf, &mtp_ext_config_desc, value); + } + } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { + DBG(cdev, "class request: %d index: %d value: %d length: %d\n", + ctrl->bRequest, w_index, w_value, w_length); + + if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0 + && w_value == 0) { + DBG(cdev, "MTP_REQ_CANCEL\n"); + + spin_lock_irqsave(&dev->lock, flags); + if (dev->state == STATE_BUSY) { + dev->state = STATE_CANCELED; + wake_up(&dev->read_wq); + wake_up(&dev->write_wq); + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* We need to queue a request to read the remaining + * bytes, but we don't actually need to look at + * the contents. + */ + value = w_length; + } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS + && w_index == 0 && w_value == 0) { + struct mtp_device_status *status = cdev->req->buf; + + status->wLength = + __constant_cpu_to_le16(sizeof(*status)); + + DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n"); + spin_lock_irqsave(&dev->lock, flags); + /* device status is "busy" until we report + * the cancelation to userspace + */ + if (dev->state == STATE_CANCELED) + status->wCode = + __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY); + else + status->wCode = + __cpu_to_le16(MTP_RESPONSE_OK); + spin_unlock_irqrestore(&dev->lock, flags); + value = sizeof(*status); + } + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + int rc; + + cdev->req->zero = value < w_length; + cdev->req->length = value; + rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (rc < 0) + ERROR(cdev, "%s: response queue error\n", __func__); + } + return value; +} + +static int +mtp_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct mtp_dev *dev = func_to_mtp(f); + int id; + int ret; + struct mtp_instance *fi_mtp; + + dev->cdev = cdev; + DBG(cdev, "mtp_function_bind dev: %p\n", dev); + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + mtp_interface_desc.bInterfaceNumber = id; + + if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) { + ret = usb_string_id(c->cdev); + if (ret < 0) + return ret; + mtp_string_defs[INTERFACE_STRING_INDEX].id = ret; + mtp_interface_desc.iInterface = ret; + } + + fi_mtp = container_of(f->fi, struct mtp_instance, func_inst); + + if (cdev->use_os_string) { + f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), + GFP_KERNEL); + if (!f->os_desc_table) + return -ENOMEM; + f->os_desc_n = 1; + f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc; + } + + /* allocate endpoints */ + ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc, + &mtp_fullspeed_out_desc, &mtp_intr_desc); + if (ret) + return ret; + + /* support high speed hardware */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + mtp_highspeed_in_desc.bEndpointAddress = + mtp_fullspeed_in_desc.bEndpointAddress; + mtp_highspeed_out_desc.bEndpointAddress = + mtp_fullspeed_out_desc.bEndpointAddress; + } + /* support super speed hardware */ + if (gadget_is_superspeed(c->cdev->gadget)) { + unsigned max_burst; + + /* Calculate bMaxBurst, we know packet size is 1024 */ + max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15); + mtp_ss_in_desc.bEndpointAddress = + mtp_fullspeed_in_desc.bEndpointAddress; + mtp_ss_in_comp_desc.bMaxBurst = max_burst; + mtp_ss_out_desc.bEndpointAddress = + mtp_fullspeed_out_desc.bEndpointAddress; + mtp_ss_out_comp_desc.bMaxBurst = max_burst; + } + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"), + f->name, dev->ep_in->name, dev->ep_out->name); + return 0; +} + +static void +mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct mtp_dev *dev = func_to_mtp(f); + struct usb_request *req; + int i; + + mtp_string_defs[INTERFACE_STRING_INDEX].id = 0; + while ((req = mtp_req_get(dev, &dev->tx_idle))) + mtp_request_free(req, dev->ep_in); + for (i = 0; i < RX_REQ_MAX; i++) + mtp_request_free(dev->rx_req[i], dev->ep_out); + while ((req = mtp_req_get(dev, &dev->intr_idle))) + mtp_request_free(req, dev->ep_intr); + dev->state = STATE_OFFLINE; + kfree(f->os_desc_table); + f->os_desc_n = 0; +} + +static int mtp_function_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct mtp_dev *dev = func_to_mtp(f); + struct usb_composite_dev *cdev = f->config->cdev; + int ret; + + DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt); + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_in); + if (ret) + return ret; + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_out); + if (ret) { + usb_ep_disable(dev->ep_in); + return ret; + } + + ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr); + if (ret) + return ret; + + ret = usb_ep_enable(dev->ep_intr); + if (ret) { + usb_ep_disable(dev->ep_out); + usb_ep_disable(dev->ep_in); + return ret; + } + dev->state = STATE_READY; + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + return 0; +} + +static void mtp_function_disable(struct usb_function *f) +{ + struct mtp_dev *dev = func_to_mtp(f); + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "mtp_function_disable\n"); + dev->state = STATE_OFFLINE; + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + usb_ep_disable(dev->ep_intr); + + /* readers may be blocked waiting for us to go online */ + wake_up(&dev->read_wq); + + VDBG(cdev, "%s disabled\n", dev->function.name); +} + +static int __mtp_setup(struct mtp_instance *fi_mtp) +{ + struct mtp_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + + if (fi_mtp != NULL) + fi_mtp->dev = dev; + + if (!dev) + return -ENOMEM; + + spin_lock_init(&dev->lock); + init_waitqueue_head(&dev->read_wq); + init_waitqueue_head(&dev->write_wq); + init_waitqueue_head(&dev->intr_wq); + atomic_set(&dev->open_excl, 0); + atomic_set(&dev->ioctl_excl, 0); + INIT_LIST_HEAD(&dev->tx_idle); + INIT_LIST_HEAD(&dev->intr_idle); + + dev->wq = create_singlethread_workqueue("f_mtp"); + if (!dev->wq) { + ret = -ENOMEM; + goto err1; + } + INIT_WORK(&dev->send_file_work, send_file_work); + INIT_WORK(&dev->receive_file_work, receive_file_work); + + _mtp_dev = dev; + + ret = misc_register(&mtp_device); + if (ret) + goto err2; + + return 0; + +err2: + destroy_workqueue(dev->wq); +err1: + _mtp_dev = NULL; + kfree(dev); + printk(KERN_ERR "mtp gadget driver failed to initialize\n"); + return ret; +} + +static int mtp_setup_configfs(struct mtp_instance *fi_mtp) +{ + return __mtp_setup(fi_mtp); +} + + +static void mtp_cleanup(void) +{ + struct mtp_dev *dev = _mtp_dev; + + if (!dev) + return; + + misc_deregister(&mtp_device); + destroy_workqueue(dev->wq); + _mtp_dev = NULL; + kfree(dev); +} + +static struct mtp_instance *to_mtp_instance(struct config_item *item) +{ + return container_of(to_config_group(item), struct mtp_instance, + func_inst.group); +} + +static void mtp_attr_release(struct config_item *item) +{ + struct mtp_instance *fi_mtp = to_mtp_instance(item); + + usb_put_function_instance(&fi_mtp->func_inst); +} + +static struct configfs_item_operations mtp_item_ops = { + .release = mtp_attr_release, +}; + +static struct config_item_type mtp_func_type = { + .ct_item_ops = &mtp_item_ops, + .ct_owner = THIS_MODULE, +}; + + +static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi) +{ + return container_of(fi, struct mtp_instance, func_inst); +} + +static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name) +{ + struct mtp_instance *fi_mtp; + char *ptr; + int name_len; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + fi_mtp = to_fi_mtp(fi); + fi_mtp->name = ptr; + + return 0; +} + +static void mtp_free_inst(struct usb_function_instance *fi) +{ + struct mtp_instance *fi_mtp; + + fi_mtp = to_fi_mtp(fi); + kfree(fi_mtp->name); + mtp_cleanup(); + kfree(fi_mtp); +} + +struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config) +{ + struct mtp_instance *fi_mtp; + int ret = 0; + struct usb_os_desc *descs[1]; + char *names[1]; + + fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL); + if (!fi_mtp) + return ERR_PTR(-ENOMEM); + fi_mtp->func_inst.set_inst_name = mtp_set_inst_name; + fi_mtp->func_inst.free_func_inst = mtp_free_inst; + + fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id; + INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop); + descs[0] = &fi_mtp->mtp_os_desc; + names[0] = "MTP"; + + if (mtp_config) { + ret = mtp_setup_configfs(fi_mtp); + if (ret) { + kfree(fi_mtp); + pr_err("Error setting MTP\n"); + return ERR_PTR(ret); + } + } else + fi_mtp->dev = _mtp_dev; + + config_group_init_type_name(&fi_mtp->func_inst.group, + "", &mtp_func_type); + usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1, + descs, names, THIS_MODULE); + + return &fi_mtp->func_inst; +} +EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp); + +static struct usb_function_instance *mtp_alloc_inst(void) +{ + return alloc_inst_mtp_ptp(true); +} + +static int mtp_ctrlreq_configfs(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + return mtp_ctrlrequest(f->config->cdev, ctrl); +} + +static void mtp_free(struct usb_function *f) +{ + /*NO-OP: no function specific resource allocation in mtp_alloc*/ +} + +struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi, + bool mtp_config) +{ + struct mtp_instance *fi_mtp = to_fi_mtp(fi); + struct mtp_dev *dev; + + /* + * PTP piggybacks on MTP function so make sure we have + * created MTP function before we associate this PTP + * function with a gadget configuration. + */ + if (fi_mtp->dev == NULL) { + pr_err("Error: Create MTP function before linking" + " PTP function with a gadget configuration\n"); + pr_err("\t1: Delete existing PTP function if any\n"); + pr_err("\t2: Create MTP function\n"); + pr_err("\t3: Create and symlink PTP function" + " with a gadget configuration\n"); + return ERR_PTR(-EINVAL); /* Invalid Configuration */ + } + + dev = fi_mtp->dev; + dev->function.name = DRIVER_NAME; + dev->function.strings = mtp_strings; + if (mtp_config) { + dev->function.fs_descriptors = fs_mtp_descs; + dev->function.hs_descriptors = hs_mtp_descs; + dev->function.ss_descriptors = ss_mtp_descs; + } else { + dev->function.fs_descriptors = fs_ptp_descs; + dev->function.hs_descriptors = hs_ptp_descs; + dev->function.ss_descriptors = ss_ptp_descs; + } + dev->function.bind = mtp_function_bind; + dev->function.unbind = mtp_function_unbind; + dev->function.set_alt = mtp_function_set_alt; + dev->function.disable = mtp_function_disable; + dev->function.setup = mtp_ctrlreq_configfs; + dev->function.free_func = mtp_free; + + return &dev->function; +} +EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp); + +static struct usb_function *mtp_alloc(struct usb_function_instance *fi) +{ + return function_alloc_mtp_ptp(fi, true); +} + +DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_mtp.h b/drivers/usb/gadget/function/f_mtp.h new file mode 100644 index 000000000000..7adb1ff08eff --- /dev/null +++ b/drivers/usb/gadget/function/f_mtp.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2014 Google, Inc. + * Author: Badhri Jagan Sridharan + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +extern struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config); +extern struct usb_function *function_alloc_mtp_ptp( + struct usb_function_instance *fi, bool mtp_config); diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index ea0da35a44e2..e6d4fa5eeff1 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -635,19 +635,19 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return -EAGAIN; } + list_add(&req->list, &dev->tx_reqs_active); + /* here, we unlock, and only unlock, to avoid deadlock. */ spin_unlock(&dev->lock); value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC); spin_lock(&dev->lock); if (value) { + list_del(&req->list); list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->lock, flags); mutex_unlock(&dev->lock_printer_io); return -EAGAIN; } - - list_add(&req->list, &dev->tx_reqs_active); - } spin_unlock_irqrestore(&dev->lock, flags); diff --git a/drivers/usb/gadget/function/f_ptp.c b/drivers/usb/gadget/function/f_ptp.c new file mode 100644 index 000000000000..da3e4d53e085 --- /dev/null +++ b/drivers/usb/gadget/function/f_ptp.c @@ -0,0 +1,38 @@ +/* + * Gadget Function Driver for PTP + * + * Copyright (C) 2014 Google, Inc. + * Author: Badhri Jagan Sridharan + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include + +#include +#include + +#include "f_mtp.h" + +static struct usb_function_instance *ptp_alloc_inst(void) +{ + return alloc_inst_mtp_ptp(false); +} + +static struct usb_function *ptp_alloc(struct usb_function_instance *fi) +{ + return function_alloc_mtp_ptp(fi, false); +} + +DECLARE_USB_FUNCTION_INIT(ptp, ptp_alloc_inst, ptp_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Badhri Jagan Sridharan"); diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index f05c3f3e6103..97cb2dfd6369 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -528,6 +528,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); return ret; } + iad_desc.bFirstInterface = ret; + std_ac_if_desc.bInterfaceNumber = ret; uac2->ac_intf = ret; uac2->ac_alt = 0; diff --git a/drivers/usb/gadget/function/u_dvctrace.h b/drivers/usb/gadget/function/u_dvctrace.h new file mode 100644 index 000000000000..56a5d0cd577a --- /dev/null +++ b/drivers/usb/gadget/function/u_dvctrace.h @@ -0,0 +1,73 @@ + +/* + * Gadget Driver for DvC.Trace Function + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __U_DVCTRACE_H +#define __U_DVCTRACE_H + +#include +#include + +struct dvct_function_desc { + struct usb_descriptor_header **fs; + struct usb_descriptor_header **hs; + struct usb_descriptor_header **ss; + + /*special descriptors, update on bind */ + struct usb_interface_assoc_descriptor *iad; + struct usb_interface_descriptor *d_itf; + struct usb_interface_descriptor *c_itf; + struct usb_endpoint_descriptor *fs_ep; + struct usb_endpoint_descriptor *hs_ep; + struct usb_endpoint_descriptor *ss_ep; + struct usb_ss_ep_comp_descriptor *ss_ep_comp; + + /* strings */ + struct usb_gadget_strings str; + struct dvct_string_lookup *lk_tbl; +}; + +struct dvct_function { + struct usb_function function; + struct usb_composite_dev *cdev; + struct usb_ep *ep_in; + + u32 online_data:1; /*set to one when the data itf is set */ + u32 online_ctrl:1; /*set to one when the control itf is set */ + atomic_t status; + + struct dvct_source_device *source_dev; + struct dvct_source_driver *source_drv; + + u8 trace_config; + struct dvct_function_desc desc; +}; + +struct dvct_function_inst { + struct usb_function_instance instance; + struct dvct_source_device *source_dev; +}; + +#define to_dvct_function_inst(inst) \ + container_of(inst, struct dvct_function_inst, instance) + +#define to_dvct_function(func) \ + container_of(func, struct dvct_function, function) + +ssize_t dvct_start_transfer(struct dvct_function *dev, u8 config); +int dvct_stop_transfer(struct dvct_function *dev); + +#endif /*__U_DVCTRACE_H*/ diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 7d53a4773d1a..2f03334c6874 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -64,7 +64,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len); /* Frees a usb_request previously allocated by alloc_ep_req() */ static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) { + WARN_ON(req->buf == NULL); kfree(req->buf); + req->buf = NULL; usb_ep_free_request(ep, req); } diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 02968842b359..708e36f530d8 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c @@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (ret) { dev_err(&pci->dev, "couldn't add resources to bdc device\n"); + platform_device_put(bdc); return ret; } diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index d41d07aae0ce..ad315c4c6f35 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request); void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req) { - ep->ops->free_request(ep, req); trace_usb_ep_free_request(ep, req, 0); + ep->ops->free_request(ep, req); } EXPORT_SYMBOL_GPL(usb_ep_free_request); @@ -249,6 +249,9 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request); * arranges to poll once per interval, and the gadget driver usually will * have queued some data to transfer at that time. * + * Note that @req's ->complete() callback must never be called from + * within usb_ep_queue() as that can create deadlock situations. + * * Returns zero, or a negative error code. Endpoints that are not enabled * report errors; errors will also be * reported when the usb peripheral is disconnected. @@ -923,7 +926,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, return 0; /* "high bandwidth" works only at high speed */ - if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11)) + if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1) return 0; switch (type) { @@ -1080,8 +1083,12 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc) static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, enum usb_device_speed speed) { - if (udc->gadget->ops->udc_set_speed) - udc->gadget->ops->udc_set_speed(udc->gadget, speed); + if (udc->gadget->ops->udc_set_speed) { + enum usb_device_speed s; + + s = min(speed, udc->gadget->max_speed); + udc->gadget->ops->udc_set_speed(udc->gadget, s); + } } /** @@ -1154,11 +1161,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) - goto err1; - - ret = device_add(&gadget->dev); - if (ret) - goto err2; + goto err_put_gadget; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; @@ -1167,7 +1170,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, udc->dev.parent = parent; ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); if (ret) - goto err3; + goto err_put_udc; + + ret = device_add(&gadget->dev); + if (ret) + goto err_put_udc; udc->gadget = gadget; gadget->udc = udc; @@ -1177,7 +1184,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, ret = device_add(&udc->dev); if (ret) - goto err4; + goto err_unlist_udc; usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); udc->vbus = true; @@ -1185,27 +1192,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, /* pick up one of pending gadget drivers */ ret = check_pending_gadget_drivers(udc); if (ret) - goto err5; + goto err_del_udc; mutex_unlock(&udc_lock); return 0; -err5: + err_del_udc: device_del(&udc->dev); -err4: + err_unlist_udc: list_del(&udc->list); mutex_unlock(&udc_lock); -err3: - put_device(&udc->dev); device_del(&gadget->dev); -err2: - kfree(udc); + err_put_udc: + put_device(&udc->dev); -err1: + err_put_gadget: put_device(&gadget->dev); return ret; } diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 6f2f71c054be..7874c112f3fd 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -1309,7 +1309,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe) { struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); - if (ep->name) + if (ep->ep.name) nuke(ep, -ESHUTDOWN); } @@ -1697,7 +1697,7 @@ static void dtd_complete_irq(struct fsl_udc *udc) curr_ep = get_ep_by_pipe(udc, i); /* If the ep is configured */ - if (curr_ep->name == NULL) { + if (!curr_ep->ep.name) { WARNING("Invalid EP?"); continue; } diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h index 86d2adafe149..64eb0f2b5ea0 100644 --- a/drivers/usb/gadget/udc/goku_udc.h +++ b/drivers/usb/gadget/udc/goku_udc.h @@ -28,7 +28,7 @@ struct goku_udc_regs { # define INT_EP1DATASET 0x00040 # define INT_EP2DATASET 0x00080 # define INT_EP3DATASET 0x00100 -#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */ +#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */ # define INT_EP1NAK 0x00200 # define INT_EP2NAK 0x00400 # define INT_EP3NAK 0x00800 diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 63a206122058..c12a1a6554ba 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -254,7 +254,7 @@ #define USB3_EP0_SS_MAX_PACKET_SIZE 512 #define USB3_EP0_HSFS_MAX_PACKET_SIZE 64 #define USB3_EP0_BUF_SIZE 8 -#define USB3_MAX_NUM_PIPES 30 +#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */ #define USB3_WAIT_US 3 #define USB3_DMA_NUM_SETTING_AREA 4 /* @@ -334,6 +334,7 @@ struct renesas_usb3 { struct usb_gadget_driver *driver; struct extcon_dev *extcon; struct work_struct extcon_work; + struct dentry *dentry; struct renesas_usb3_ep *usb3_ep; int num_usb3_eps; @@ -623,6 +624,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3) usb3_usb2_pullup(usb3, 0); usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON); usb3_reset_epc(usb3); + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP | + USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE | + USB_INT_1_SPEED | USB_INT_1_B3_WRMRST | + USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND | + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST); + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON); + usb3_init_epc_registers(usb3); if (usb3->driver) usb3->driver->disconnect(&usb3->gadget); @@ -2390,8 +2398,12 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3, file = debugfs_create_file("b_device", 0644, root, usb3, &renesas_usb3_b_device_fops); - if (!file) + if (!file) { dev_info(dev, "%s: Can't create debugfs mode\n", __func__); + debugfs_remove_recursive(root); + } else { + usb3->dentry = root; + } } /*------- platform_driver ------------------------------------------------*/ @@ -2399,6 +2411,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) { struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); + debugfs_remove_recursive(usb3->dentry); device_remove_file(&pdev->dev, &dev_attr_role); usb_del_gadget_udc(&usb3->gadget); diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index fa5692dec832..92b19721b595 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -637,14 +637,6 @@ config USB_UHCI_ASPEED bool default y if ARCH_ASPEED -config USB_UHCI_BIG_ENDIAN_MMIO - bool - default y if SPARC_LEON - -config USB_UHCI_BIG_ENDIAN_DESC - bool - default y if SPARC_LEON - config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" depends on OF_GPIO && QE_GPIO && QUICC_ENGINE diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index cbb9b8e12c3c..8c5a6fee4dfd 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) default: /* unknown */ break; } - temp = (cap >> 8) & 0xff; + offset = (cap >> 8) & 0xff; } } #endif diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index df169c8e7225..37ef2ac9cdae 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -787,12 +787,12 @@ static struct urb *request_single_step_set_feature_urb( atomic_inc(&urb->use_count); atomic_inc(&urb->dev->urbnum); urb->setup_dma = dma_map_single( - hcd->self.controller, + hcd->self.sysdev, urb->setup_packet, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); urb->transfer_dma = dma_map_single( - hcd->self.controller, + hcd->self.sysdev, urb->transfer_buffer, urb->transfer_buffer_length, DMA_FROM_DEVICE); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..b4599aa428f3 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -73,6 +73,7 @@ static const char hcd_name [] = "ohci_hcd"; #define STATECHANGE_DELAY msecs_to_jiffies(300) #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) +#define IO_WATCHDOG_OFF 0xffffff00 #include "ohci.h" #include "pci-quirks.h" @@ -230,7 +231,7 @@ static int ohci_urb_enqueue ( } /* Start up the I/O watchdog timer, if it's not running */ - if (!timer_pending(&ohci->io_watchdog) && + if (ohci->prev_frame_no == IO_WATCHDOG_OFF && list_empty(&ohci->eds_in_use) && !(ohci->flags & OHCI_QUIRK_QEMU)) { ohci->prev_frame_no = ohci_frame_no(ohci); @@ -445,7 +446,8 @@ static int ohci_init (struct ohci_hcd *ohci) struct usb_hcd *hcd = ohci_to_hcd(ohci); /* Accept arbitrarily long scatter-gather lists */ - hcd->self.sg_tablesize = ~0; + if (!(hcd->driver->flags & HCD_LOCAL_MEM)) + hcd->self.sg_tablesize = ~0; if (distrust_firmware) ohci->flags |= OHCI_QUIRK_HUB_POWER; @@ -501,6 +503,7 @@ static int ohci_init (struct ohci_hcd *ohci) setup_timer(&ohci->io_watchdog, io_watchdog_func, (unsigned long) ohci); + ohci->prev_frame_no = IO_WATCHDOG_OFF; ohci->hcca = dma_alloc_coherent (hcd->self.controller, sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); @@ -730,7 +733,7 @@ static void io_watchdog_func(unsigned long _ohci) u32 head; struct ed *ed; struct td *td, *td_start, *td_next; - unsigned frame_no; + unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF; unsigned long flags; spin_lock_irqsave(&ohci->lock, flags); @@ -835,7 +838,7 @@ static void io_watchdog_func(unsigned long _ohci) } } if (!list_empty(&ohci->eds_in_use)) { - ohci->prev_frame_no = frame_no; + prev_frame_no = frame_no; ohci->prev_wdh_cnt = ohci->wdh_cnt; ohci->prev_donehead = ohci_readl(ohci, &ohci->regs->donehead); @@ -845,6 +848,7 @@ static void io_watchdog_func(unsigned long _ohci) } done: + ohci->prev_frame_no = prev_frame_no; spin_unlock_irqrestore(&ohci->lock, flags); } @@ -973,6 +977,7 @@ static void ohci_stop (struct usb_hcd *hcd) if (quirk_nec(ohci)) flush_work(&ohci->nec_work); del_timer_sync(&ohci->io_watchdog); + ohci->prev_frame_no = IO_WATCHDOG_OFF; ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); ohci_usb_reset(ohci); diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 248eb7702463..aca57bcb9afe 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -310,8 +310,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd) rc = ohci_rh_suspend (ohci, 0); spin_unlock_irq (&ohci->lock); - if (rc == 0) + if (rc == 0) { del_timer_sync(&ohci->io_watchdog); + ohci->prev_frame_no = IO_WATCHDOG_OFF; + } return rc; } diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index 641fed609911..24edb7674710 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1018,6 +1018,8 @@ static void finish_unlinks(struct ohci_hcd *ohci) * have modified this list. normally it's just prepending * entries (which we'd ignore), but paranoia won't hurt. */ + *last = ed->ed_next; + ed->ed_next = NULL; modified = 0; /* unlink urbs as requested, but rescan the list after @@ -1076,21 +1078,22 @@ static void finish_unlinks(struct ohci_hcd *ohci) goto rescan_this; /* - * If no TDs are queued, take ED off the ed_rm_list. + * If no TDs are queued, ED is now idle. * Otherwise, if the HC is running, reschedule. - * If not, leave it on the list for further dequeues. + * If the HC isn't running, add ED back to the + * start of the list for later processing. */ if (list_empty(&ed->td_list)) { - *last = ed->ed_next; - ed->ed_next = NULL; ed->state = ED_IDLE; list_del(&ed->in_use_list); } else if (ohci->rh_state == OHCI_RH_RUNNING) { - *last = ed->ed_next; - ed->ed_next = NULL; ed_schedule(ohci, ed); } else { - last = &ed->ed_next; + ed->ed_next = ohci->ed_rm_list; + ohci->ed_rm_list = ed; + /* Don't loop on the same ED */ + if (last == &ohci->ed_rm_list) + last = &ed->ed_next; } if (modified) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 6dda3623a276..1d30f8826c54 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "pci-quirks.h" #include "xhci-ext-caps.h" @@ -65,6 +66,23 @@ #define AX_INDXC 0x30 #define AX_DATAC 0x34 +#define PT_ADDR_INDX 0xE8 +#define PT_READ_INDX 0xE4 +#define PT_SIG_1_ADDR 0xA520 +#define PT_SIG_2_ADDR 0xA521 +#define PT_SIG_3_ADDR 0xA522 +#define PT_SIG_4_ADDR 0xA523 +#define PT_SIG_1_DATA 0x78 +#define PT_SIG_2_DATA 0x56 +#define PT_SIG_3_DATA 0x34 +#define PT_SIG_4_DATA 0x12 +#define PT4_P1_REG 0xB521 +#define PT4_P2_REG 0xB522 +#define PT2_P1_REG 0xD520 +#define PT2_P2_REG 0xD521 +#define PT1_P1_REG 0xD522 +#define PT1_P2_REG 0xD523 + #define NB_PCIE_INDX_ADDR 0xe0 #define NB_PCIE_INDX_DATA 0xe4 #define PCIE_P_CNTL 0x10040 @@ -511,6 +529,98 @@ void usb_amd_dev_put(void) } EXPORT_SYMBOL_GPL(usb_amd_dev_put); +/* + * Check if port is disabled in BIOS on AMD Promontory host. + * BIOS Disabled ports may wake on connect/disconnect and need + * driver workaround to keep them disabled. + * Returns true if port is marked disabled. + */ +bool usb_amd_pt_check_port(struct device *device, int port) +{ + unsigned char value, port_shift; + struct pci_dev *pdev; + u16 reg; + + pdev = to_pci_dev(device); + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_1_DATA) + return false; + + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_2_DATA) + return false; + + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_3_DATA) + return false; + + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_4_DATA) + return false; + + /* Check disabled port setting, if bit is set port is enabled */ + switch (pdev->device) { + case 0x43b9: + case 0x43ba: + /* + * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba) + * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0 + * PT4_P2_REG bits[6..0] represents ports 13 to 7 + */ + if (port > 6) { + reg = PT4_P2_REG; + port_shift = port - 7; + } else { + reg = PT4_P1_REG; + port_shift = port + 1; + } + break; + case 0x43bb: + /* + * device is AMD_PROMONTORYA_2(0x43bb) + * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0 + * PT2_P2_REG bits[5..0] represents ports 9 to 3 + */ + if (port > 2) { + reg = PT2_P2_REG; + port_shift = port - 3; + } else { + reg = PT2_P1_REG; + port_shift = port + 5; + } + break; + case 0x43bc: + /* + * device is AMD_PROMONTORYA_1(0x43bc) + * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0 + * PT1_P2_REG[5..0] represents ports 9 to 4 + */ + if (port > 3) { + reg = PT1_P2_REG; + port_shift = port - 4; + } else { + reg = PT1_P1_REG; + port_shift = port + 4; + } + break; + default: + return false; + } + pci_write_config_word(pdev, PT_ADDR_INDX, reg); + pci_read_config_byte(pdev, PT_READ_INDX, &value); + + return !(value & BIT(port_shift)); +} +EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); + /* * Make sure the controller is completely inactive, unable to * generate interrupts or do DMA. @@ -1091,9 +1201,31 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); hc_init: - if (pdev->vendor == PCI_VENDOR_ID_INTEL) + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { usb_enable_intel_xhci_ports(pdev); + /* + * Initialize the internal mux that shares a port between USB + * Device Controller and xHCI on platforms that have it. + */ +#define XHCI_INTEL_VENDOR_CAPS 192 + ext_cap_offset = xhci_find_next_ext_cap(base, + XHCI_HCC_PARAMS_OFFSET, + XHCI_INTEL_VENDOR_CAPS); + if (ext_cap_offset) { + struct intel_usb_mux *mux; + struct resource r; + + r.start = pci_resource_start(pdev, 0) + 0x80d8; + r.end = r.start + 8; + r.flags = IORESOURCE_MEM; + + mux = intel_usb_mux_register(&pdev->dev, &r); + if (IS_ERR(mux)) + dev_err(&pdev->dev, "failed to register mux\n"); + } + } + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); /* Wait for the host controller to be ready before writing any diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b68dcb5dd0fd..4ca0d9b7e463 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); +bool usb_amd_pt_check_port(struct device *device, int port); #else struct pci_dev; static inline void usb_amd_quirk_pll_disable(void) {} @@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} static inline void usb_amd_dev_put(void) {} static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} static inline void sb800_prefetch(struct device *dev, int on) {} +static inline bool usb_amd_pt_check_port(struct device *device, int port) +{ + return false; +} #endif /* CONFIG_USB_PCI */ #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a2336deb5e36..c01d1f3a1c7d 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -366,7 +366,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci, slot_id = 0; for (i = 0; i < MAX_HC_SLOTS; i++) { - if (!xhci->devs[i]) + if (!xhci->devs[i] || !xhci->devs[i]->udev) continue; speed = xhci->devs[i]->udev->speed; if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) @@ -634,7 +634,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, xhci_dbg(xhci, "Disable all slots\n"); spin_unlock_irqrestore(&xhci->lock, *flags); for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { - retval = xhci_disable_slot(xhci, NULL, i); + if (!xhci->devs[i]) + continue; + + retval = xhci_disable_slot(xhci, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); @@ -1528,6 +1531,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) t2 |= PORT_WKOC_E | PORT_WKCONN_E; t2 &= ~PORT_WKDISC_E; } + + if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && + (hcd->speed < HCD_USB3)) { + if (usb_amd_pt_check_port(hcd->self.controller, + port_index)) + t2 &= ~PORT_WAKE_BITS; + } } else t2 &= ~PORT_WAKE_BITS; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 2a82c927ded2..b7b55eb82714 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -604,7 +604,7 @@ struct xhci_ring *xhci_stream_id_to_ring( if (!ep->stream_info) return NULL; - if (stream_id > ep->stream_info->num_streams) + if (stream_id >= ep->stream_info->num_streams) return NULL; return ep->stream_info->stream_rings[stream_id]; } @@ -891,12 +891,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) dev = xhci->devs[slot_id]; - trace_xhci_free_virt_device(dev); - xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; + trace_xhci_free_virt_device(dev); + if (dev->tt_info) old_active_eps = dev->tt_info->active_eps; @@ -926,6 +926,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->out_ctx) xhci_free_container_ctx(xhci, dev->out_ctx); + if (dev->udev && dev->udev->slot_id) + dev->udev->slot_id = 0; kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = NULL; } @@ -947,6 +949,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) if (!vdev) return; + if (vdev->real_port == 0 || + vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { + xhci_dbg(xhci, "Bad vdev->real_port.\n"); + goto out; + } + tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { /* is this a hub device that added a tt_info to the tts list */ @@ -960,6 +968,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) } } } +out: /* we are now at a leaf device */ xhci_free_virt_device(xhci, slot_id); } @@ -976,10 +985,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, return 0; } - xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); - if (!xhci->devs[slot_id]) + dev = kzalloc(sizeof(*dev), flags); + if (!dev) return 0; - dev = xhci->devs[slot_id]; /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); @@ -1020,9 +1028,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, trace_xhci_alloc_virt_device(dev); + xhci->devs[slot_id] = dev; + return 1; fail: - xhci_free_virt_device(xhci, slot_id); + + if (dev->in_ctx) + xhci_free_container_ctx(xhci, dev->in_ctx); + if (dev->out_ctx) + xhci_free_container_ctx(xhci, dev->out_ctx); + kfree(dev); + return 0; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 76f392954733..01a8c7e6960f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -24,6 +24,9 @@ #include #include #include +#include +#include +#include #include "xhci.h" #include "xhci-trace.h" @@ -54,6 +57,12 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba +#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb +#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define XHCI_INTEL_VENDOR_CAPS 192 + #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 static const char hcd_name[] = "xhci_hcd"; @@ -69,11 +78,33 @@ static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { /* called after powerup, by probe or system-pm "wakeup" */ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev) { + struct usb_hcd *hcd; + int retval; + /* * TODO: Implement finding debug ports later. * TODO: see if there are any quirks that need to be added to handle * new extended capabilities. */ + retval = XHCI_HCC_EXT_CAPS(readl(&xhci->cap_regs->hcc_params)); + retval = xhci_find_next_ext_cap(&xhci->cap_regs->hc_capbase, + retval << 2, + XHCI_INTEL_VENDOR_CAPS); + /* If This capbility is found, register host on PHY for OTG purpose */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL && retval) { + hcd = xhci_to_hcd(xhci); + + hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2); + if (!IS_ERR_OR_NULL(hcd->usb_phy)) { + retval = otg_set_host(hcd->usb_phy->otg, &hcd->self); + if (retval) + usb_put_phy(hcd->usb_phy); + } else { + xhci_dbg(xhci, "No USB2 PHY transceiver found\n"); + hcd->usb_phy = NULL; + } + } + /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ if (!pci_set_mwi(pdev)) @@ -134,9 +165,22 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == 0x15e0 || + pdev->device == 0x15e1 || + pdev->device == 0x43bb)) + xhci->quirks |= XHCI_SUSPEND_DELAY; + if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if ((pdev->vendor == PCI_VENDOR_ID_AMD) && + ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) + xhci->quirks |= XHCI_U2_DISABLE_WAKE; + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; @@ -189,6 +233,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + pdev->device == 0x0014) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) xhci->quirks |= XHCI_RESET_ON_RESUME; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 1cb6eaef4ae1..830dd0dbbce0 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -355,7 +355,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int ret; /* * xhci_suspend() needs `do_wakeup` to know whether host is allowed @@ -365,12 +364,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) * reconsider this when xhci_plat_suspend enlarges its scope, e.g., * also applies to runtime suspend. */ - ret = xhci_suspend(xhci, device_may_wakeup(dev)); - - if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) - clk_disable_unprepare(xhci->clk); - - return ret; + return xhci_suspend(xhci, device_may_wakeup(dev)); } static int __maybe_unused xhci_plat_resume(struct device *dev) @@ -379,9 +373,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; - if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) - clk_prepare_enable(xhci->clk); - ret = xhci_priv_resume_quirk(hcd); if (ret) return ret; @@ -423,7 +414,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, - .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = &xhci_plat_pm_ops, diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 198bc188ab25..97f23cc31f4c 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -86,6 +86,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = { .soc_id = "r8a7796", .data = (void *)RCAR_XHCI_FIRMWARE_V3, }, + { + .soc_id = "r8a77965", + .data = (void *)RCAR_XHCI_FIRMWARE_V3, + }, { /* sentinel */ }, }; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 82c746e2d85c..6996235e34a9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2486,12 +2486,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ if (list_empty(&ep_ring->td_list)) { /* - * A stopped endpoint may generate an extra completion - * event if the device was suspended. Don't print - * warnings. + * Don't print wanings if it's due to a stopped endpoint + * generating an extra completion event if the device + * was suspended. Or, a event for the last TRB of a + * short TD we already got a short event for. + * The short TD is already removed from the TD list. */ + if (!(trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + ep_ring->last_td_was_short)) { xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); @@ -3117,7 +3121,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, { u32 maxp, total_packet_count; - /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ + /* MTK xHCI 0.96 contains some features from 1.0 */ if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) return ((td_total_len - transferred) >> 10); @@ -3126,8 +3130,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, trb_buff_len == td_total_len) return 0; - /* for MTK xHCI, TD size doesn't include this TRB */ - if (xhci->quirks & XHCI_MTK_HOST) + /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ + if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) trb_buff_len = 0; maxp = usb_endpoint_maxp(&urb->ep->desc); diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h index f20753b99624..02a1164ca599 100644 --- a/drivers/usb/host/xhci-trace.h +++ b/drivers/usb/host/xhci-trace.h @@ -158,6 +158,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_queue_trb, TP_ARGS(ring, trb) ); +DECLARE_EVENT_CLASS(xhci_log_free_virt_dev, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev), + TP_STRUCT__entry( + __field(void *, vdev) + __field(unsigned long long, out_ctx) + __field(unsigned long long, in_ctx) + __field(u8, fake_port) + __field(u8, real_port) + __field(u16, current_mel) + + ), + TP_fast_assign( + __entry->vdev = vdev; + __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma; + __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma; + __entry->fake_port = (u8) vdev->fake_port; + __entry->real_port = (u8) vdev->real_port; + __entry->current_mel = (u16) vdev->current_mel; + ), + TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d", + __entry->vdev, __entry->in_ctx, __entry->out_ctx, + __entry->fake_port, __entry->real_port, __entry->current_mel + ) +); + +DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device, + TP_PROTO(struct xhci_virt_device *vdev), + TP_ARGS(vdev) +); + DECLARE_EVENT_CLASS(xhci_log_virt_dev, TP_PROTO(struct xhci_virt_device *vdev), TP_ARGS(vdev), @@ -195,11 +226,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device, TP_ARGS(vdev) ); -DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device, - TP_PROTO(struct xhci_virt_device *vdev), - TP_ARGS(vdev) -); - DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device, TP_PROTO(struct xhci_virt_device *vdev), TP_ARGS(vdev) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 51535ba2bcd4..fe84b36627ec 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -856,6 +856,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) spin_unlock_irqrestore(&xhci->lock, flags); } +static bool xhci_pending_portevent(struct xhci_hcd *xhci) +{ + __le32 __iomem **port_array; + int port_index; + u32 status; + u32 portsc; + + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) + return true; + /* + * Checking STS_EINT is not enough as there is a lag between a change + * bit being set and the Port Status Change Event that it generated + * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. + */ + + port_index = xhci->num_usb2_ports; + port_array = xhci->usb2_ports; + while (port_index--) { + portsc = readl(port_array[port_index]); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + port_index = xhci->num_usb3_ports; + port_array = xhci->usb3_ports; + while (port_index--) { + portsc = readl(port_array[port_index]); + if (portsc & PORT_CHANGE_MASK || + (portsc & PORT_PLS_MASK) == XDEV_RESUME) + return true; + } + return false; +} + /* * Stop HC (not bus-specific) * @@ -887,6 +922,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); del_timer_sync(&xhci->shared_hcd->rh_timer); + if (xhci->quirks & XHCI_SUSPEND_DELAY) + usleep_range(1000, 1500); + spin_lock_irq(&xhci->lock); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); @@ -952,7 +990,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend); */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0, status; + u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1074,8 +1112,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { /* Resume root hubs only when have pending events. */ - status = readl(&xhci->op_regs->status); - if (status & STS_EINT) { + if (xhci_pending_portevent(xhci)) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } @@ -3520,11 +3557,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_virt_device *virt_dev; struct xhci_slot_ctx *slot_ctx; int i, ret; - struct xhci_command *command; - - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); - if (!command) - return; #ifndef CONFIG_USB_DEFAULT_PERSIST /* @@ -3540,10 +3572,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) /* If the host is halted due to driver unload, we still need to free the * device. */ - if (ret <= 0 && ret != -ENODEV) { - kfree(command); + if (ret <= 0 && ret != -ENODEV) return; - } virt_dev = xhci->devs[udev->slot_id]; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); @@ -3555,26 +3585,22 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } - xhci_disable_slot(xhci, command, udev->slot_id); + virt_dev->udev = NULL; + xhci_disable_slot(xhci, udev->slot_id); /* * Event command completion handler will free any data structures * associated with the slot. XXX Can free sleep? */ } -int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command, - u32 slot_id) +int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) { + struct xhci_command *command; unsigned long flags; u32 state; int ret = 0; - struct xhci_virt_device *virt_dev; - virt_dev = xhci->devs[slot_id]; - if (!virt_dev) - return -EINVAL; - if (!command) - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); if (!command) return -ENOMEM; @@ -3583,17 +3609,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command, state = readl(&xhci->op_regs->status); if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_free_virt_device(xhci, slot_id); spin_unlock_irqrestore(&xhci->lock, flags); kfree(command); - return ret; + return -ENODEV; } ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, slot_id); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + kfree(command); return ret; } xhci_ring_cmd_db(xhci); @@ -3668,6 +3693,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) return 0; } + xhci_free_command(xhci, command); + if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { spin_lock_irqsave(&xhci->lock, flags); ret = xhci_reserve_host_control_ep_resources(xhci); @@ -3703,18 +3730,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) pm_runtime_get_noresume(hcd->self.controller); #endif - - xhci_free_command(xhci, command); /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; disable_slot: - /* Disable slot, if we can do it without mem alloc */ - kfree(command->completion); - command->completion = NULL; - command->status = 0; - return xhci_disable_slot(xhci, command, udev->slot_id); + return xhci_disable_slot(xhci, udev->slot_id); } /* @@ -4778,6 +4799,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) * quirks */ struct device *dev = hcd->self.sysdev; + unsigned int minor_rev; int retval; /* Accept arbitrarily long scatter-gather lists */ @@ -4805,12 +4827,19 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) */ hcd->has_tt = 1; } else { - /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */ - if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) { - xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); + /* + * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol + * minor revision instead of sbrn + */ + minor_rev = xhci->usb3_rhub.min_rev; + if (minor_rev) { hcd->speed = HCD_USB31; hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; } + xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", + minor_rev, + minor_rev ? "Enhanced" : ""); + /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2b48aa4f6b76..a9e1fa5d443b 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -392,6 +392,10 @@ struct xhci_op_regs { #define PORT_PLC (1 << 22) /* port configure error change - port failed to configure its link partner */ #define PORT_CEC (1 << 23) +#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \ + PORT_RC | PORT_PLC | PORT_CEC) + + /* Cold Attach Status - xHC can set this bit to report device attached during * Sx state. Warm port reset should be perfomed to clear this bit and move port * to connected state. @@ -728,11 +732,12 @@ struct xhci_ep_ctx { /* bits 10:14 are Max Primary Streams */ /* bit 15 is Linear Stream Array */ /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p) (((p) & 0xff) << 16) -#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) -#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) -#define EP_MAXPSTREAMS_MASK (0x1f << 10) -#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define EP_INTERVAL(p) (((p) & 0xff) << 16) +#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) +#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) +#define EP_MAXPSTREAMS_MASK (0x1f << 10) +#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) +#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ #define EP_HAS_LSA (1 << 15) /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ @@ -1828,8 +1833,10 @@ struct xhci_hcd { /* For controller with a broken Port Disable implementation */ #define XHCI_BROKEN_PORT_PED (1 << 25) #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) -/* Reserved. It was XHCI_U2_DISABLE_WAKE */ +#define XHCI_U2_DISABLE_WAKE (1 << 27) #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) +#define XHCI_SUSPEND_DELAY (1 << 30) +#define XHCI_INTEL_USB_ROLE_SW (1 << 31) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -2012,8 +2019,8 @@ int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); -int xhci_disable_slot(struct xhci_hcd *xhci, - struct xhci_command *command, u32 slot_id); +int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); +int xhci_ext_cap_init(struct xhci_hcd *xhci); int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); int xhci_resume(struct xhci_hcd *xhci, bool hibernated); @@ -2539,21 +2546,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq, u8 burst; u8 cerr; u8 mult; - u8 lsa; - u8 hid; + + bool lsa; + bool hid; esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); ep_state = info & EP_STATE_MASK; - max_pstr = info & EP_MAXPSTREAMS_MASK; + max_pstr = CTX_TO_EP_MAXPSTREAMS(info); interval = CTX_TO_EP_INTERVAL(info); mult = CTX_TO_EP_MULT(info) + 1; - lsa = info & EP_HAS_LSA; + lsa = !!(info & EP_HAS_LSA); cerr = (info2 & (3 << 1)) >> 1; ep_type = CTX_TO_EP_TYPE(info2); - hid = info2 & (1 << 7); + hid = !!(info2 & (1 << 7)); burst = CTX_TO_MAX_BURST(info2); maxp = MAX_PACKET_DECODED(info2); diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 680bddb3ce05..6635a3c990f6 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -46,6 +46,9 @@ #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ +#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */ +#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */ +#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */ #define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ #define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ #define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ @@ -88,6 +91,9 @@ static const struct usb_device_id ld_usb_table[] = { { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, + { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c index f019d80ca9e4..dc45cfc8eb10 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_con.c +++ b/drivers/usb/misc/sisusbvga/sisusb_con.c @@ -1216,7 +1216,7 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot, /* Interface routine */ static int sisusbcon_font_set(struct vc_data *c, struct console_font *font, - unsigned flags) + unsigned int flags) { struct sisusb_usb_data *sisusb; unsigned charcount = font->charcount; @@ -1337,29 +1337,65 @@ static void sisusbdummycon_init(struct vc_data *vc, int init) vc_resize(vc, 80, 25); } -static int sisusbdummycon_dummy(void) +static void sisusbdummycon_deinit(struct vc_data *vc) { } +static void sisusbdummycon_clear(struct vc_data *vc, int sy, int sx, + int height, int width) { } +static void sisusbdummycon_putc(struct vc_data *vc, int c, int ypos, + int xpos) { } +static void sisusbdummycon_putcs(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos) { } +static void sisusbdummycon_cursor(struct vc_data *vc, int mode) { } + +static bool sisusbdummycon_scroll(struct vc_data *vc, unsigned int top, + unsigned int bottom, enum con_scroll dir, + unsigned int lines) { - return 0; + return false; } -#define SISUSBCONDUMMY (void *)sisusbdummycon_dummy +static int sisusbdummycon_switch(struct vc_data *vc) +{ + return 0; +} + +static int sisusbdummycon_blank(struct vc_data *vc, int blank, int mode_switch) +{ + return 0; +} + +static int sisusbdummycon_font_set(struct vc_data *vc, + struct console_font *font, + unsigned int flags) +{ + return 0; +} + +static int sisusbdummycon_font_default(struct vc_data *vc, + struct console_font *font, char *name) +{ + return 0; +} + +static int sisusbdummycon_font_copy(struct vc_data *vc, int con) +{ + return 0; +} static const struct consw sisusb_dummy_con = { .owner = THIS_MODULE, .con_startup = sisusbdummycon_startup, .con_init = sisusbdummycon_init, - .con_deinit = SISUSBCONDUMMY, - .con_clear = SISUSBCONDUMMY, - .con_putc = SISUSBCONDUMMY, - .con_putcs = SISUSBCONDUMMY, - .con_cursor = SISUSBCONDUMMY, - .con_scroll = SISUSBCONDUMMY, - .con_switch = SISUSBCONDUMMY, - .con_blank = SISUSBCONDUMMY, - .con_font_set = SISUSBCONDUMMY, - .con_font_get = SISUSBCONDUMMY, - .con_font_default = SISUSBCONDUMMY, - .con_font_copy = SISUSBCONDUMMY, + .con_deinit = sisusbdummycon_deinit, + .con_clear = sisusbdummycon_clear, + .con_putc = sisusbdummycon_putc, + .con_putcs = sisusbdummycon_putcs, + .con_cursor = sisusbdummycon_cursor, + .con_scroll = sisusbdummycon_scroll, + .con_switch = sisusbdummycon_switch, + .con_blank = sisusbdummycon_blank, + .con_font_set = sisusbdummycon_font_set, + .con_font_default = sisusbdummycon_font_default, + .con_font_copy = sisusbdummycon_font_copy, }; int diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index 8e7737d7ac0a..03be5d574f23 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c @@ -292,6 +292,8 @@ static int usb3503_probe(struct usb3503 *hub) if (gpio_is_valid(hub->gpio_reset)) { err = devm_gpio_request_one(dev, hub->gpio_reset, GPIOF_OUT_INIT_LOW, "usb3503 reset"); + /* Datasheet defines a hardware reset to be at least 100us */ + usleep_range(100, 10000); if (err) { dev_err(dev, "unable to request GPIO %d as reset pin (%d)\n", diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 58abdf28620a..47763311a42e 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -400,8 +400,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; - int retval = 0; - int bytes_read = 0; + int len = 0; char in_buffer[20]; unsigned long flags; @@ -409,26 +408,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, mutex_lock(&dev->io_mutex); if (!dev->interface) { /* already disconnected */ - retval = -ENODEV; - goto exit; + mutex_unlock(&dev->io_mutex); + return -ENODEV; } spin_lock_irqsave(&dev->lock, flags); - bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); + len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); spin_unlock_irqrestore(&dev->lock, flags); - - if (*ppos < bytes_read) { - if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos)) - retval = -EFAULT; - else { - retval = bytes_read - *ppos; - *ppos += bytes_read; - } - } - -exit: mutex_unlock(&dev->io_mutex); - return retval; + + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } static ssize_t yurex_write(struct file *file, const char __user *user_buffer, diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f6ae753ab99b..f932f40302df 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg break; case MON_IOCQ_RING_SIZE: + mutex_lock(&rp->fetch_lock); ret = rp->b_size; + mutex_unlock(&rp->fetch_lock); break; case MON_IOCT_RING_SIZE: @@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf) unsigned long offset, chunk_idx; struct page *pageptr; + mutex_lock(&rp->fetch_lock); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) + if (offset >= rp->b_size) { + mutex_unlock(&rp->fetch_lock); return VM_FAULT_SIGBUS; + } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); + mutex_unlock(&rp->fetch_lock); vmf->page = pageptr; return 0; } diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index f5e1bb5e5217..984f7e12a6a5 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -85,6 +85,8 @@ struct mon_reader_text { wait_queue_head_t wait; int printf_size; + size_t printf_offset; + size_t printf_togo; char *printf_buf; struct mutex printf_lock; @@ -376,75 +378,103 @@ static int mon_text_open(struct inode *inode, struct file *file) return rc; } -/* - * For simplicity, we read one record in one system call and throw out - * what does not fit. This means that the following does not work: - * dd if=/dbg/usbmon/0t bs=10 - * Also, we do not allow seeks and do not bother advancing the offset. - */ +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, + char __user * const buf, const size_t nbytes) +{ + const size_t togo = min(nbytes, rp->printf_togo); + + if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) + return -EFAULT; + rp->printf_togo -= togo; + rp->printf_offset += togo; + return togo; +} + +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_t(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - ep = mon_text_read_wait(rp, file); - if (IS_ERR(ep)) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - - mon_text_read_head_t(rp, &ptr, ep); - mon_text_read_statset(rp, &ptr, ep); - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_t(rp, &ptr, ep); + mon_text_read_statset(rp, &ptr, ep); + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); + } + + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_u(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - ep = mon_text_read_wait(rp, file); - if (IS_ERR(ep)) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - mon_text_read_head_u(rp, &ptr, ep); - if (ep->type == 'E') { - mon_text_read_statset(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { - mon_text_read_isostat(rp, &ptr, ep); - mon_text_read_isodesc(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { - mon_text_read_intstat(rp, &ptr, ep); - } else { - mon_text_read_statset(rp, &ptr, ep); + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_u(rp, &ptr, ep); + if (ep->type == 'E') { + mon_text_read_statset(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { + mon_text_read_isostat(rp, &ptr, ep); + mon_text_read_isodesc(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { + mon_text_read_intstat(rp, &ptr, ep); + } else { + mon_text_read_statset(rp, &ptr, ep); + } + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); } - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 99c65b0788ff..947579842ad7 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -774,9 +774,9 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb) return -ENOMEM; mtu->irq = platform_get_irq(pdev, 0); - if (mtu->irq <= 0) { + if (mtu->irq < 0) { dev_err(dev, "fail to get irq number\n"); - return -ENODEV; + return mtu->irq; } dev_info(dev, "irq %d\n", mtu->irq); diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index df88123274ca..972bf4210189 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -305,7 +305,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); - } else { + } else if (!(musb->int_usb & MUSB_INTR_BABBLE)){ + /* + * When babble condition happens, drvvbus interrupt + * is also generated. Ignore this drvvbus interrupt + * and let babble interrupt handler recovers the + * controller; otherwise, the host-mode flag is lost + * due to the MUSB_DEV_MODE() call below and babble + * recovery logic will not called. + */ musb->is_active = 0; MUSB_DEV_MODE(musb); otg->default_a = 0; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ff5a1a8989d5..ff17e94ef465 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1780,6 +1780,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) int vbus; u8 devctl; + pm_runtime_get_sync(dev); spin_lock_irqsave(&musb->lock, flags); val = musb->a_wait_bcon; vbus = musb_platform_get_vbus_status(musb); @@ -1793,6 +1794,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) vbus = 0; } spin_unlock_irqrestore(&musb->lock, flags); + pm_runtime_put_sync(dev); return sprintf(buf, "Vbus %s, timeout %lu msec\n", vbus ? "on" : "off", val); @@ -2496,11 +2498,11 @@ static int musb_remove(struct platform_device *pdev) musb_disable_interrupts(musb); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); spin_unlock_irqrestore(&musb->lock, flags); + musb_platform_exit(musb); pm_runtime_dont_use_autosuspend(musb->controller); pm_runtime_put_sync(musb->controller); pm_runtime_disable(musb->controller); - musb_platform_exit(musb); musb_phy_callback = NULL; if (musb->dma_controller) musb_dma_controller_destroy(musb->dma_controller); @@ -2733,7 +2735,8 @@ static int musb_resume(struct device *dev) if ((devctl & mask) != (musb->context.devctl & mask)) musb->port1_status = 0; - musb_start(musb); + musb_enable_interrupts(musb); + musb_platform_enable(musb); spin_lock_irqsave(&musb->lock, flags); error = musb_run_resume_work(musb); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index bc6d1717c9ec..87f932d4b72c 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -442,7 +442,6 @@ void musb_g_tx(struct musb *musb, u8 epnum) req = next_request(musb_ep); request = &req->request; - trace_musb_req_tx(req); csr = musb_readw(epio, MUSB_TXCSR); musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr); @@ -481,6 +480,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) u8 is_dma = 0; bool short_packet = false; + trace_musb_req_tx(req); + if (dma && (csr & MUSB_TXCSR_DMAENAB)) { is_dma = 1; csr |= MUSB_TXCSR_P_WZC_BITS; diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 844a309fe895..e85b9c2a4910 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -114,15 +114,19 @@ static int service_tx_status_request( } is_in = epnum & USB_DIR_IN; - if (is_in) { - epnum &= 0x0f; + epnum &= 0x0f; + if (epnum >= MUSB_C_NUM_EPS) { + handled = -EINVAL; + break; + } + + if (is_in) ep = &musb->endpoints[epnum].ep_in; - } else { + else ep = &musb->endpoints[epnum].ep_out; - } regs = musb->endpoints[epnum].regs; - if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { + if (!ep->desc) { handled = -EINVAL; break; } diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index b17450a59882..802388bb42ba 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -418,13 +418,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, } } - /* - * The pipe must be broken if current urb->status is set, so don't - * start next urb. - * TODO: to minimize the risk of regression, only check urb->status - * for RX, until we have a test case to understand the behavior of TX. - */ - if ((!status || !is_in) && qh && qh->is_ready) { + if (qh != NULL && qh->is_ready) { musb_dbg(musb, "... next ep%d %cX urb %p", hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); musb_start_urb(musb, is_in, qh); @@ -1029,7 +1023,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, /* set tx_reinit and schedule the next qh */ ep->tx_reinit = 1; } - musb_start_urb(musb, is_in, next_qh); + + if (next_qh) + musb_start_urb(musb, is_in, next_qh); } } @@ -2564,8 +2560,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); u8 devctl; + int ret; - musb_port_suspend(musb, true); + ret = musb_port_suspend(musb, true); + if (ret) + return ret; if (!is_host_active(musb)) return 0; diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h index 7bbf01bf4bb0..54d02ed032df 100644 --- a/drivers/usb/musb/musb_host.h +++ b/drivers/usb/musb/musb_host.h @@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8); extern void musb_root_disconnect(struct musb *musb); extern void musb_host_resume_root_hub(struct musb *musb); extern void musb_host_poke_root_hub(struct musb *musb); -extern void musb_port_suspend(struct musb *musb, bool do_suspend); +extern int musb_port_suspend(struct musb *musb, bool do_suspend); extern void musb_port_reset(struct musb *musb, bool do_reset); extern void musb_host_finish_resume(struct work_struct *work); #else @@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {} static inline void musb_host_resume_root_hub(struct musb *musb) {} static inline void musb_host_poll_rh_status(struct musb *musb) {} static inline void musb_host_poke_root_hub(struct musb *musb) {} -static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} +static inline int musb_port_suspend(struct musb *musb, bool do_suspend) +{ + return 0; +} static inline void musb_port_reset(struct musb *musb, bool do_reset) {} static inline void musb_host_finish_resume(struct work_struct *work) {} #endif diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 0b4595439d51..5eca5d2d5e00 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -73,14 +73,14 @@ void musb_host_finish_resume(struct work_struct *work) spin_unlock_irqrestore(&musb->lock, flags); } -void musb_port_suspend(struct musb *musb, bool do_suspend) +int musb_port_suspend(struct musb *musb, bool do_suspend) { struct usb_otg *otg = musb->xceiv->otg; u8 power; void __iomem *mbase = musb->mregs; if (!is_host_active(musb)) - return; + return 0; /* NOTE: this doesn't necessarily put PHY into low power mode, * turning off its clock; that's a function of PHY integration and @@ -91,16 +91,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) if (do_suspend) { int retries = 10000; - power &= ~MUSB_POWER_RESUME; - power |= MUSB_POWER_SUSPENDM; - musb_writeb(mbase, MUSB_POWER, power); + if (power & MUSB_POWER_RESUME) + return -EBUSY; - /* Needed for OPT A tests */ - power = musb_readb(mbase, MUSB_POWER); - while (power & MUSB_POWER_SUSPENDM) { + if (!(power & MUSB_POWER_SUSPENDM)) { + power |= MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, power); + + /* Needed for OPT A tests */ power = musb_readb(mbase, MUSB_POWER); - if (retries-- < 1) - break; + while (power & MUSB_POWER_SUSPENDM) { + power = musb_readb(mbase, MUSB_POWER); + if (retries-- < 1) + break; + } } musb_dbg(musb, "Root port suspended, power %02x", power); @@ -136,6 +140,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) schedule_delayed_work(&musb->finish_resume_work, msecs_to_jiffies(USB_RESUME_TIMEOUT)); } + return 0; } void musb_port_reset(struct musb *musb, bool do_reset) diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index aff702c0eb9f..8d3cf2c6dd72 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -7,6 +7,14 @@ config USB_PHY select EXTCON def_bool n +config USB_OTG_WAKELOCK + bool "Hold a wakelock when USB connected" + depends on PM_WAKELOCKS + select USB_OTG_UTILS + help + Select this to automatically hold a wakelock when USB is + connected, preventing suspend. + # # USB Transceiver Drivers # @@ -202,4 +210,21 @@ config USB_ULPI_VIEWPORT Provides read/write operations to the ULPI phy register set for controllers with a viewport register (e.g. Chipidea/ARC controllers). +config USB_INTEL_DUAL_ROLE_PHY + tristate "Intel Dual Role Transceiver Driver" + depends on USB_OTG_FSM && PM + depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' + help + Enable this to support USB Dual Role transceiver for Broxton/Cherrytrail + platforms. + +config DUAL_ROLE_USB_INTF + bool "Generic DUAL ROLE sysfs interface" + depends on SYSFS && USB_PHY + help + A generic sysfs interface to track and change the state of + dual role usb phys. The usb phy drivers can register to + this interface to expose it capabilities to the userspace + and thereby allowing userspace to change the port mode. + endmenu diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index 0c40ccc90631..07bf3e6eae52 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -4,6 +4,8 @@ # obj-$(CONFIG_USB_PHY) += phy.o obj-$(CONFIG_OF) += of.o +obj-$(CONFIG_USB_OTG_WAKELOCK) += otg-wakelock.o +obj-$(CONFIG_DUAL_ROLE_USB_INTF) += class-dual-role.o # transceiver drivers, keep the list sorted @@ -26,3 +28,4 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o +obj-$(CONFIG_USB_INTEL_DUAL_ROLE_PHY) += phy-intel-dualrole.o diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c new file mode 100644 index 000000000000..51fcb545a9d5 --- /dev/null +++ b/drivers/usb/phy/class-dual-role.c @@ -0,0 +1,529 @@ +/* + * class-dual-role.c + * + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DUAL_ROLE_NOTIFICATION_TIMEOUT 2000 + +static ssize_t dual_role_store_property(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t dual_role_show_property(struct device *dev, + struct device_attribute *attr, + char *buf); + +#define DUAL_ROLE_ATTR(_name) \ +{ \ + .attr = { .name = #_name }, \ + .show = dual_role_show_property, \ + .store = dual_role_store_property, \ +} + +static struct device_attribute dual_role_attrs[] = { + DUAL_ROLE_ATTR(supported_modes), + DUAL_ROLE_ATTR(mode), + DUAL_ROLE_ATTR(power_role), + DUAL_ROLE_ATTR(data_role), + DUAL_ROLE_ATTR(powers_vconn), +}; + +struct class *dual_role_class; +EXPORT_SYMBOL_GPL(dual_role_class); + +static struct device_type dual_role_dev_type; + +static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper) +{ + char *ret, *ustr; + + ustr = ret = kmalloc(strlen(str) + 1, gfp); + + if (!ret) + return NULL; + + while (*str) + *ustr++ = to_upper ? toupper(*str++) : tolower(*str++); + + *ustr = 0; + + return ret; +} + +static void dual_role_changed_work(struct work_struct *work) +{ + struct dual_role_phy_instance *dual_role = + container_of(work, struct dual_role_phy_instance, + changed_work); + + dev_dbg(&dual_role->dev, "%s\n", __func__); + kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE); +} + +void dual_role_instance_changed(struct dual_role_phy_instance *dual_role) +{ + dev_dbg(&dual_role->dev, "%s\n", __func__); + pm_wakeup_event(&dual_role->dev, DUAL_ROLE_NOTIFICATION_TIMEOUT); + schedule_work(&dual_role->changed_work); +} +EXPORT_SYMBOL_GPL(dual_role_instance_changed); + +int dual_role_get_property(struct dual_role_phy_instance *dual_role, + enum dual_role_property prop, + unsigned int *val) +{ + return dual_role->desc->get_property(dual_role, prop, val); +} +EXPORT_SYMBOL_GPL(dual_role_get_property); + +int dual_role_set_property(struct dual_role_phy_instance *dual_role, + enum dual_role_property prop, + const unsigned int *val) +{ + if (!dual_role->desc->set_property) + return -ENODEV; + + return dual_role->desc->set_property(dual_role, prop, val); +} +EXPORT_SYMBOL_GPL(dual_role_set_property); + +int dual_role_property_is_writeable(struct dual_role_phy_instance *dual_role, + enum dual_role_property prop) +{ + if (!dual_role->desc->property_is_writeable) + return -ENODEV; + + return dual_role->desc->property_is_writeable(dual_role, prop); +} +EXPORT_SYMBOL_GPL(dual_role_property_is_writeable); + +static void dual_role_dev_release(struct device *dev) +{ + struct dual_role_phy_instance *dual_role = + container_of(dev, struct dual_role_phy_instance, dev); + pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + kfree(dual_role); +} + +static struct dual_role_phy_instance *__must_check +__dual_role_register(struct device *parent, + const struct dual_role_phy_desc *desc) +{ + struct device *dev; + struct dual_role_phy_instance *dual_role; + int rc; + + dual_role = kzalloc(sizeof(*dual_role), GFP_KERNEL); + if (!dual_role) + return ERR_PTR(-ENOMEM); + + dev = &dual_role->dev; + + device_initialize(dev); + + dev->class = dual_role_class; + dev->type = &dual_role_dev_type; + dev->parent = parent; + dev->release = dual_role_dev_release; + dev_set_drvdata(dev, dual_role); + dual_role->desc = desc; + + rc = dev_set_name(dev, "%s", desc->name); + if (rc) + goto dev_set_name_failed; + + INIT_WORK(&dual_role->changed_work, dual_role_changed_work); + + rc = device_init_wakeup(dev, true); + if (rc) + goto wakeup_init_failed; + + rc = device_add(dev); + if (rc) + goto device_add_failed; + + dual_role_instance_changed(dual_role); + + return dual_role; + +device_add_failed: + device_init_wakeup(dev, false); +wakeup_init_failed: +dev_set_name_failed: + put_device(dev); + kfree(dual_role); + + return ERR_PTR(rc); +} + +static void dual_role_instance_unregister(struct dual_role_phy_instance + *dual_role) +{ + cancel_work_sync(&dual_role->changed_work); + device_init_wakeup(&dual_role->dev, false); + device_unregister(&dual_role->dev); +} + +static void devm_dual_role_release(struct device *dev, void *res) +{ + struct dual_role_phy_instance **dual_role = res; + + dual_role_instance_unregister(*dual_role); +} + +struct dual_role_phy_instance *__must_check +devm_dual_role_instance_register(struct device *parent, + const struct dual_role_phy_desc *desc) +{ + struct dual_role_phy_instance **ptr, *dual_role; + + ptr = devres_alloc(devm_dual_role_release, sizeof(*ptr), GFP_KERNEL); + + if (!ptr) + return ERR_PTR(-ENOMEM); + dual_role = __dual_role_register(parent, desc); + if (IS_ERR(dual_role)) { + devres_free(ptr); + } else { + *ptr = dual_role; + devres_add(parent, ptr); + } + return dual_role; +} +EXPORT_SYMBOL_GPL(devm_dual_role_instance_register); + +static int devm_dual_role_match(struct device *dev, void *res, void *data) +{ + struct dual_role_phy_instance **r = res; + + if (WARN_ON(!r || !*r)) + return 0; + + return *r == data; +} + +void devm_dual_role_instance_unregister(struct device *dev, + struct dual_role_phy_instance + *dual_role) +{ + int rc; + + rc = devres_release(dev, devm_dual_role_release, + devm_dual_role_match, dual_role); + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_dual_role_instance_unregister); + +void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role) +{ + return dual_role->drv_data; +} +EXPORT_SYMBOL_GPL(dual_role_get_drvdata); + +/***************** Device attribute functions **************************/ + +/* port type */ +static char *supported_modes_text[] = { + "ufp dfp", "dfp", "ufp" +}; + +/* current mode */ +static char *mode_text[] = { + "ufp", "dfp", "none" +}; + +/* Power role */ +static char *pr_text[] = { + "source", "sink", "none" +}; + +/* Data role */ +static char *dr_text[] = { + "host", "device", "none" +}; + +/* Vconn supply */ +static char *vconn_supply_text[] = { + "n", "y" +}; + +static ssize_t dual_role_show_property(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + const ptrdiff_t off = attr - dual_role_attrs; + unsigned int value; + + if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) { + value = dual_role->desc->supported_modes; + } else { + ret = dual_role_get_property(dual_role, off, &value); + + if (ret < 0) { + if (ret == -ENODATA) + dev_dbg(dev, + "driver has no data for `%s' property\n", + attr->attr.name); + else if (ret != -ENODEV) + dev_err(dev, + "driver failed to report `%s' property: %zd\n", + attr->attr.name, ret); + return ret; + } + } + + if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) { + BUILD_BUG_ON(DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL != + ARRAY_SIZE(supported_modes_text)); + if (value < DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + supported_modes_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_MODE) { + BUILD_BUG_ON(DUAL_ROLE_PROP_MODE_TOTAL != + ARRAY_SIZE(mode_text)); + if (value < DUAL_ROLE_PROP_MODE_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + mode_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_PR) { + BUILD_BUG_ON(DUAL_ROLE_PROP_PR_TOTAL != ARRAY_SIZE(pr_text)); + if (value < DUAL_ROLE_PROP_PR_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + pr_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_DR) { + BUILD_BUG_ON(DUAL_ROLE_PROP_DR_TOTAL != ARRAY_SIZE(dr_text)); + if (value < DUAL_ROLE_PROP_DR_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + dr_text[value]); + else + return -EIO; + } else if (off == DUAL_ROLE_PROP_VCONN_SUPPLY) { + BUILD_BUG_ON(DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL != + ARRAY_SIZE(vconn_supply_text)); + if (value < DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL) + return snprintf(buf, PAGE_SIZE, "%s\n", + vconn_supply_text[value]); + else + return -EIO; + } else + return -EIO; +} + +static ssize_t dual_role_store_property(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + ssize_t ret; + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + const ptrdiff_t off = attr - dual_role_attrs; + unsigned int value; + int total, i; + char *dup_buf, **text_array; + bool result = false; + + dup_buf = kstrdupcase(buf, GFP_KERNEL, false); + switch (off) { + case DUAL_ROLE_PROP_MODE: + total = DUAL_ROLE_PROP_MODE_TOTAL; + text_array = mode_text; + break; + case DUAL_ROLE_PROP_PR: + total = DUAL_ROLE_PROP_PR_TOTAL; + text_array = pr_text; + break; + case DUAL_ROLE_PROP_DR: + total = DUAL_ROLE_PROP_DR_TOTAL; + text_array = dr_text; + break; + case DUAL_ROLE_PROP_VCONN_SUPPLY: + ret = strtobool(dup_buf, &result); + value = result; + if (!ret) + goto setprop; + default: + ret = -EINVAL; + goto error; + } + + for (i = 0; i <= total; i++) { + if (i == total) { + ret = -ENOTSUPP; + goto error; + } + if (!strncmp(*(text_array + i), dup_buf, + strlen(*(text_array + i)))) { + value = i; + break; + } + } + +setprop: + ret = dual_role->desc->set_property(dual_role, off, &value); + +error: + kfree(dup_buf); + + if (ret < 0) + return ret; + + return count; +} + +static umode_t dual_role_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int attrno) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; + int i; + + if (attrno == DUAL_ROLE_PROP_SUPPORTED_MODES) + return mode; + + for (i = 0; i < dual_role->desc->num_properties; i++) { + int property = dual_role->desc->properties[i]; + + if (property == attrno) { + if (dual_role->desc->property_is_writeable && + dual_role_property_is_writeable(dual_role, property) + > 0) + mode |= S_IWUSR; + + return mode; + } + } + + return 0; +} + +static struct attribute *__dual_role_attrs[ARRAY_SIZE(dual_role_attrs) + 1]; + +static struct attribute_group dual_role_attr_group = { + .attrs = __dual_role_attrs, + .is_visible = dual_role_attr_is_visible, +}; + +static const struct attribute_group *dual_role_attr_groups[] = { + &dual_role_attr_group, + NULL, +}; + +void dual_role_init_attrs(struct device_type *dev_type) +{ + int i; + + dev_type->groups = dual_role_attr_groups; + + for (i = 0; i < ARRAY_SIZE(dual_role_attrs); i++) + __dual_role_attrs[i] = &dual_role_attrs[i].attr; +} + +int dual_role_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev); + int ret = 0, j; + char *prop_buf; + char *attrname; + + dev_dbg(dev, "uevent\n"); + + if (!dual_role || !dual_role->desc) { + dev_dbg(dev, "No dual_role phy yet\n"); + return ret; + } + + dev_dbg(dev, "DUAL_ROLE_NAME=%s\n", dual_role->desc->name); + + ret = add_uevent_var(env, "DUAL_ROLE_NAME=%s", dual_role->desc->name); + if (ret) + return ret; + + prop_buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!prop_buf) + return -ENOMEM; + + for (j = 0; j < dual_role->desc->num_properties; j++) { + struct device_attribute *attr; + char *line; + + attr = &dual_role_attrs[dual_role->desc->properties[j]]; + + ret = dual_role_show_property(dev, attr, prop_buf); + if (ret == -ENODEV || ret == -ENODATA) { + ret = 0; + continue; + } + + if (ret < 0) + goto out; + line = strnchr(prop_buf, PAGE_SIZE, '\n'); + if (line) + *line = 0; + + attrname = kstrdupcase(attr->attr.name, GFP_KERNEL, true); + if (!attrname) + ret = -ENOMEM; + + dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); + + ret = add_uevent_var(env, "DUAL_ROLE_%s=%s", attrname, + prop_buf); + kfree(attrname); + if (ret) + goto out; + } + +out: + free_page((unsigned long)prop_buf); + + return ret; +} + +/******************* Module Init ***********************************/ + +static int __init dual_role_class_init(void) +{ + dual_role_class = class_create(THIS_MODULE, "dual_role_usb"); + + if (IS_ERR(dual_role_class)) + return PTR_ERR(dual_role_class); + + dual_role_class->dev_uevent = dual_role_uevent; + dual_role_init_attrs(&dual_role_dev_type); + + return 0; +} + +static void __exit dual_role_class_exit(void) +{ + class_destroy(dual_role_class); +} + +subsys_initcall(dual_role_class_init); +module_exit(dual_role_class_exit); diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c new file mode 100644 index 000000000000..ecd741027f53 --- /dev/null +++ b/drivers/usb/phy/otg-wakelock.c @@ -0,0 +1,170 @@ +/* + * otg-wakelock.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define TEMPORARY_HOLD_TIME 2000 + +static bool enabled = true; +static struct usb_phy *otgwl_xceiv; +static struct notifier_block otgwl_nb; + +/* + * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the + * held field is updated to match. + */ + +static DEFINE_SPINLOCK(otgwl_spinlock); + +/* + * Only one lock, but since these 3 fields are associated with each other... + */ + +struct otgwl_lock { + char name[40]; + struct wakeup_source wakesrc; + bool held; +}; + +/* + * VBUS present lock. Also used as a timed lock on charger + * connect/disconnect and USB host disconnect, to allow the system + * to react to the change in power. + */ + +static struct otgwl_lock vbus_lock; + +static void otgwl_hold(struct otgwl_lock *lock) +{ + if (!lock->held) { + __pm_stay_awake(&lock->wakesrc); + lock->held = true; + } +} + +static void otgwl_temporary_hold(struct otgwl_lock *lock) +{ + __pm_wakeup_event(&lock->wakesrc, TEMPORARY_HOLD_TIME); + lock->held = false; +} + +static void otgwl_drop(struct otgwl_lock *lock) +{ + if (lock->held) { + __pm_relax(&lock->wakesrc); + lock->held = false; + } +} + +static void otgwl_handle_event(unsigned long event) +{ + unsigned long irqflags; + + spin_lock_irqsave(&otgwl_spinlock, irqflags); + + if (!enabled) { + otgwl_drop(&vbus_lock); + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); + return; + } + + switch (event) { + case USB_EVENT_VBUS: + case USB_EVENT_ENUMERATED: + otgwl_hold(&vbus_lock); + break; + + case USB_EVENT_NONE: + case USB_EVENT_ID: + case USB_EVENT_CHARGER: + otgwl_temporary_hold(&vbus_lock); + break; + + default: + break; + } + + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); +} + +static int otgwl_otg_notifications(struct notifier_block *nb, + unsigned long event, void *unused) +{ + otgwl_handle_event(event); + return NOTIFY_OK; +} + +static int set_enabled(const char *val, const struct kernel_param *kp) +{ + int rv = param_set_bool(val, kp); + + if (rv) + return rv; + + if (otgwl_xceiv) + otgwl_handle_event(otgwl_xceiv->last_event); + + return 0; +} + +static struct kernel_param_ops enabled_param_ops = { + .set = set_enabled, + .get = param_get_bool, +}; + +module_param_cb(enabled, &enabled_param_ops, &enabled, 0644); +MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present"); + +static int __init otg_wakelock_init(void) +{ + int ret; + struct usb_phy *phy; + + phy = usb_get_phy(USB_PHY_TYPE_USB2); + + if (IS_ERR(phy)) { + pr_err("%s: No USB transceiver found\n", __func__); + return PTR_ERR(phy); + } + otgwl_xceiv = phy; + + snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s", + dev_name(otgwl_xceiv->dev)); + wakeup_source_init(&vbus_lock.wakesrc, vbus_lock.name); + + otgwl_nb.notifier_call = otgwl_otg_notifications; + ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb); + + if (ret) { + pr_err("%s: usb_register_notifier on transceiver %s" + " failed\n", __func__, + dev_name(otgwl_xceiv->dev)); + otgwl_xceiv = NULL; + wakeup_source_trash(&vbus_lock.wakesrc); + return ret; + } + + otgwl_handle_event(otgwl_xceiv->last_event); + return ret; +} + +late_initcall(otg_wakelock_init); diff --git a/drivers/usb/phy/phy-intel-dualrole.c b/drivers/usb/phy/phy-intel-dualrole.c new file mode 100644 index 000000000000..eae21d7254e7 --- /dev/null +++ b/drivers/usb/phy/phy-intel-dualrole.c @@ -0,0 +1,422 @@ +/* + * Intel USB Dual Role transceiver driver + * + * This driver is based on Cherrytrail OTG driver written by + * Wu, Hao + * + * Copyright (C) 2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "../host/xhci.h" + +struct intel_dr_phy { + struct usb_phy phy; + struct otg_fsm fsm; + struct notifier_block nb; + struct extcon_dev *edev; + struct delayed_work mux_work; +}; + +static struct intel_dr_phy *intel_phy_dev; + +static int intel_usb_mux_update(struct intel_dr_phy *intel_phy, + enum phy_mode mode) +{ + struct usb_bus *host = intel_phy->phy.otg->host; + struct usb_gadget *gadget = intel_phy->phy.otg->gadget; + + if (!host || !gadget || !gadget->dev.parent || !intel_phy->edev) + return -ENODEV; + + /* PHY is shared between host and device controller. So both needs + * to be in D0 before making any PHY state transition.*/ + pm_runtime_get_sync(host->controller); + pm_runtime_get_sync(gadget->dev.parent); + + if (mode == PHY_MODE_USB_HOST) + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 1); + else if (mode == PHY_MODE_USB_DEVICE) + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 0); + + pm_runtime_put(gadget->dev.parent); + pm_runtime_put(host->controller); + + return 0; +} + +static int intel_dr_phy_start_host(struct otg_fsm *fsm, int on) +{ + struct intel_dr_phy *intel_phy; + + if (!fsm || !fsm->otg) + return -ENODEV; + + intel_phy = container_of(fsm->otg->usb_phy, struct intel_dr_phy, phy); + + if (!fsm->otg->host) + return -ENODEV; + + /* Just switch the mux to host path */ + return intel_usb_mux_update(intel_phy, PHY_MODE_USB_HOST); +} + +static int intel_dr_phy_start_gadget(struct otg_fsm *fsm, int on) +{ + struct intel_dr_phy *intel_phy; + + if (!fsm || !fsm->otg) + return -ENODEV; + + intel_phy = container_of(fsm->otg->usb_phy, struct intel_dr_phy, phy); + + if (!fsm->otg->gadget) + return -ENODEV; + + /* Just switch the mux to device mode */ + return intel_usb_mux_update(intel_phy, PHY_MODE_USB_DEVICE); +} + +static int intel_dr_phy_set_host(struct usb_otg *otg, struct usb_bus *host) +{ + struct intel_dr_phy *intel_phy; + + if (!otg || !host) + return -ENODEV; + + intel_phy = container_of(otg->usb_phy, struct intel_dr_phy, phy); + + otg->host = host; + + intel_phy->fsm.a_bus_drop = 0; + intel_phy->fsm.a_bus_req = 0; + + if (intel_phy->phy.otg->gadget) + otg_statemachine(&intel_phy->fsm); + + return 0; +} + +static int intel_dr_phy_set_peripheral(struct usb_otg *otg, + struct usb_gadget *gadget) +{ + struct intel_dr_phy *intel_phy; + + if (!otg || !gadget) + return -ENODEV; + + intel_phy = container_of(otg->usb_phy, struct intel_dr_phy, phy); + + otg->gadget = gadget; + + intel_phy->fsm.b_bus_req = 1; + + /* if host is registered already then kick the state machine. + * Only trigger the mode switch once both host and device are + * registered */ + if (intel_phy->phy.otg->host) + otg_statemachine(&intel_phy->fsm); + + return 0; +} + +static int intel_dr_phy_handle_notification(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct usb_bus *host; + struct usb_gadget *gadget; + int state; + + if (!intel_phy_dev) + return NOTIFY_BAD; + + host = intel_phy_dev->phy.otg->host; + gadget = intel_phy_dev->phy.otg->gadget; + + switch (event) { + case USB_EVENT_VBUS: + if (intel_phy_dev->fsm.id) + intel_phy_dev->fsm.b_sess_vld = 1; + state = NOTIFY_OK; + break; + case USB_EVENT_ID: + intel_phy_dev->fsm.id = 0; + state = NOTIFY_OK; + break; + case USB_EVENT_NONE: + if (intel_phy_dev->fsm.id == 0) + intel_phy_dev->fsm.id = 1; + else if (intel_phy_dev->fsm.b_sess_vld) + intel_phy_dev->fsm.b_sess_vld = 0; + else + dev_err(intel_phy_dev->phy.dev, "USB_EVENT_NONE?\n"); + state = NOTIFY_OK; + break; + default: + dev_info(intel_phy_dev->phy.dev, "unknown notification\n"); + state = NOTIFY_DONE; + break; + } + + /* + * Don't kick the state machine if host or device controller + * were never registered before. Just wait to kick it when + * set_host or set_peripheral. + * */ + + if (host && gadget) + otg_statemachine(&intel_phy_dev->fsm); + + return state; +} + + +static struct otg_fsm_ops intel_dr_phy_fsm_ops = { + .start_host = intel_dr_phy_start_host, + .start_gadget = intel_dr_phy_start_gadget, +}; + +static ssize_t show_mux_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct otg_fsm *fsm = &intel_phy_dev->fsm; + + if (fsm->id && fsm->a_vbus_vld) + return sprintf(buf, "peripheral state\n"); + else if (!fsm->id && !fsm->a_vbus_vld) + return sprintf(buf, "host state\n"); + else + return sprintf(buf, "unknown state\n"); + +} + +static ssize_t store_mux_state(struct device *_dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct otg_fsm *fsm; + + if (!intel_phy_dev) + return -EINVAL; + + fsm = &intel_phy_dev->fsm; + + if (count != 2) + return -EINVAL; + + switch (buf[0]) { + case 'D': + case 'd': + case 'P': + case 'p': + case 'a': + case 'A': + case '1': + dev_info(intel_phy_dev->phy.dev, "p: set PERIPHERAL mode\n"); + /* disable host mode */ + dev_info(intel_phy_dev->phy.dev, "ID = 1\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_NONE, NULL); + /* enable device mode */ + dev_info(intel_phy_dev->phy.dev, "VBUS = 1\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_VBUS, NULL); + return count; + case 'H': + case 'h': + case 'b': + case 'B': + case '0': + dev_info(intel_phy_dev->phy.dev, "h: set HOST mode\n"); + /* disable device mode */ + dev_info(intel_phy_dev->phy.dev, "VBUS = 0\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_NONE, NULL); + /* enable host mode */ + dev_info(intel_phy_dev->phy.dev, "ID = 0\n"); + atomic_notifier_call_chain(&intel_phy_dev->phy.notifier, + USB_EVENT_ID, NULL); + return count; + default: + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR(mux_state, S_IRWXU|S_IRWXG, show_mux_state, store_mux_state); + +static int intel_usb_mux_init(struct intel_dr_phy *intel_phy) +{ + if (!intel_phy) + return -ENODEV; + + intel_phy->edev = extcon_get_extcon_dev("intel_usb_mux"); + if (!intel_phy->edev) { + dev_err(intel_phy->phy.dev, "intel mux device not ready\n"); + return -ENODEV; + } + + /* first switch the mux to host mode to force the host + * enumerate the port */ + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 1); + msleep(10); + extcon_set_state_sync(intel_phy->edev, EXTCON_USB_HOST, 0); + + return 0; +} + +static void intel_usb_mux_delayed_init(struct work_struct *work) +{ + struct intel_dr_phy *intel_phy = container_of(work, + struct intel_dr_phy, + mux_work.work); + static int retry_count = 0; + + if (intel_usb_mux_init(intel_phy) && retry_count < 10) { + retry_count++; + schedule_delayed_work(&intel_phy->mux_work, HZ); + } +} + +static int intel_dr_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct intel_dr_phy *intel_phy; + int err; + + intel_phy = devm_kzalloc(dev, sizeof(*intel_phy), GFP_KERNEL); + if (!intel_phy) + return -ENOMEM; + + intel_phy->phy.otg = devm_kzalloc(dev, sizeof(*intel_phy->phy.otg), + GFP_KERNEL); + if (!intel_phy->phy.otg) + return -ENOMEM; + + INIT_DELAYED_WORK(&intel_phy->mux_work, intel_usb_mux_delayed_init); + + if (intel_usb_mux_init(intel_phy)) { + dev_warn(dev, "mux is not available, so delayed mux_init"); + schedule_delayed_work(&intel_phy->mux_work, HZ); + } + + /* initialize fsm ops */ + intel_phy->fsm.ops = &intel_dr_phy_fsm_ops; + intel_phy->fsm.otg = intel_phy->phy.otg; + /* default device mode */ + intel_phy->fsm.id = 1; + intel_phy->fsm.b_sess_vld = 1; + mutex_init(&intel_phy->fsm.lock); + + /* initalize the phy structure */ + intel_phy->phy.label = "intel_usb_dr_phy"; + intel_phy->phy.dev = &pdev->dev; + intel_phy->phy.type = USB_PHY_TYPE_USB2; + /* initalize otg structure */ + intel_phy->phy.otg->state = OTG_STATE_UNDEFINED; + intel_phy->phy.otg->usb_phy = &intel_phy->phy; + intel_phy->phy.otg->set_host = intel_dr_phy_set_host; + intel_phy->phy.otg->set_peripheral = intel_dr_phy_set_peripheral; + /* No support for ADP, HNP and SRP */ + intel_phy->phy.otg->start_hnp = NULL; + intel_phy->phy.otg->start_srp = NULL; + + intel_phy_dev = intel_phy; + + err = usb_add_phy_dev(&intel_phy->phy); + if (err) { + dev_err(&pdev->dev, "can't register intel_dr_phy, err: %d\n", + err); + return err; + } + + intel_phy->nb.notifier_call = intel_dr_phy_handle_notification; + usb_register_notifier(&intel_phy->phy, &intel_phy->nb); + + platform_set_drvdata(pdev, intel_phy); + + err = device_create_file(&pdev->dev, &dev_attr_mux_state); + if (err) { + dev_err(&pdev->dev, "failed to create mux_status sysfs attribute\n"); + usb_remove_phy(&intel_phy->phy); + return err; + } + + otg_statemachine(&intel_phy->fsm); + + return 0; +} + +static int intel_dr_phy_remove(struct platform_device *pdev) +{ + struct intel_dr_phy *intel_phy = platform_get_drvdata(pdev); + + usb_remove_phy(&intel_phy->phy); + + return 0; +} + +static struct platform_driver intel_dr_phy_driver = { + .probe = intel_dr_phy_probe, + .remove = intel_dr_phy_remove, + .driver = { + .name = "intel_usb_dr_phy", + }, +}; + +struct platform_device *intel_dr_phy_device; + +static int __init intel_dr_phy_init(void) +{ + int ret; + ret = platform_driver_register(&intel_dr_phy_driver); + if (ret) + return ret; + + intel_dr_phy_device = platform_device_register_simple("intel_usb_dr_phy", 0, NULL, 0); + if (IS_ERR(intel_dr_phy_device) || !platform_get_drvdata(intel_dr_phy_device)) { + platform_driver_unregister(&intel_dr_phy_driver); + return -ENODEV; + } + return 0; +} +rootfs_initcall(intel_dr_phy_init); + +static void __exit intel_dr_phy_exit(void) +{ + platform_device_unregister(intel_dr_phy_device); + platform_driver_unregister(&intel_dr_phy_driver); +} +module_exit(intel_dr_phy_exit); + +MODULE_ALIAS("platform:intel_usb_dr_phy"); +MODULE_DESCRIPTION("Intel USB Dual Role Transceiver driver"); +MODULE_AUTHOR("Hao Wu"); +MODULE_AUTHOR("Sathya Kuppuswamy "); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 8babd318c0ed..1ec00eae339a 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable); if (IS_ERR(tu->extcon)) { dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); - return -ENOMEM; + ret = PTR_ERR(tu->extcon); + goto err_disable_clk; } ret = devm_extcon_dev_register(&pdev->dev, tu->extcon); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 50285b01da92..5d369b38868a 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -998,6 +998,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) goto usbhsf_pio_prepare_pop; + /* return at this time if the pipe is running */ + if (usbhs_pipe_is_running(pipe)) + return 0; + usbhs_pipe_config_change_bfre(pipe, 1); ret = usbhsf_fifo_select(pipe, fifo, 0); @@ -1188,6 +1192,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt, usbhsf_fifo_clear(pipe, fifo); pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); + usbhs_pipe_running(pipe, 0); usbhsf_dma_stop(pipe, fifo); usbhsf_dma_unmap(pkt); usbhsf_fifo_unselect(pipe, pipe->fifo); diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index a8d5f2e4878d..c508e2d7104b 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -62,7 +62,9 @@ config USB_SERIAL_SIMPLE - Fundamental Software dongle. - Google USB serial devices - HP4x calculators + - Libtransistor USB console - a number of Motorola phones + - Motorola Tetra devices - Novatel Wireless GPS receivers - Siemens USB/MPI adapter. - ViVOtech ViVOpay USB device. diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 351745aec0e1..578596d301b8 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -131,7 +131,7 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - if (r < bufsize) { + if (r < (int)bufsize) { if (r >= 0) { dev_err(&dev->dev, "short control message received (%d < %u)\n", diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 412f812522ee..c931ae689a91 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -98,6 +98,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ + { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ @@ -115,6 +118,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ + { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */ + { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */ + { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ @@ -127,6 +133,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ + { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */ + { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ + { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ @@ -136,17 +145,24 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ + { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ + { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */ + { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ @@ -157,6 +173,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ @@ -177,6 +194,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ + { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ @@ -214,6 +232,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 49d1b2d4606d..385f2ae3be24 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, + { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, @@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, @@ -1017,6 +1019,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, { } /* Terminating entry */ }; @@ -1899,7 +1902,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) return ftdi_jtag_probe(serial); if (udev->product && - (!strcmp(udev->product, "BeagleBone/XDS100V2") || + (!strcmp(udev->product, "Arrow USB Blaster") || + !strcmp(udev->product, "BeagleBone/XDS100V2") || !strcmp(udev->product, "SNAP Connect E10"))) return ftdi_jtag_probe(serial); diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4faa09fe308c..975d02666c5a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -914,9 +914,18 @@ #define ICPDAS_I7561U_PID 0x0104 #define ICPDAS_I7563U_PID 0x0105 +/* + * Airbus Defence and Space + */ +#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */ +#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */ + /* * RT Systems programming cables for various ham radios */ +/* This device uses the VID of FTDI */ +#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */ + #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ #define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ @@ -1435,6 +1444,12 @@ */ #define FTDI_CINTERION_MC55I_PID 0xA951 +/* + * Product: FirmwareHubEmulator + * Manufacturer: Harman Becker Automotive Systems + */ +#define FTDI_FHE_PID 0xA9A0 + /* * Product: Comet Caller ID decoder * Manufacturer: Crucible Technologies diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index b2f2e87aed94..91e7e3a166a5 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -138,6 +138,7 @@ struct garmin_data { __u8 privpkt[4*6]; spinlock_t lock; struct list_head pktlist; + struct usb_anchor write_urbs; }; @@ -905,13 +906,19 @@ static int garmin_init_session(struct usb_serial_port *port) sizeof(GARMIN_START_SESSION_REQ), 0); if (status < 0) - break; + goto err_kill_urbs; } if (status > 0) status = 0; } + return status; + +err_kill_urbs: + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); + usb_kill_urb(port->interrupt_in_urb); + return status; } @@ -930,7 +937,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port) spin_unlock_irqrestore(&garmin_data_p->lock, flags); /* shutdown any bulk reads that might be going on */ - usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); if (garmin_data_p->state == STATE_RESET) @@ -953,7 +959,7 @@ static void garmin_close(struct usb_serial_port *port) /* shutdown our urbs */ usb_kill_urb(port->read_urb); - usb_kill_urb(port->write_urb); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) @@ -1037,12 +1043,14 @@ static int garmin_write_bulk(struct usb_serial_port *port, } /* send it down the pipe */ + usb_anchor_urb(urb, &garmin_data_p->write_urbs); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed with status = %d\n", __func__, status); count = status; + usb_unanchor_urb(urb); kfree(buffer); } @@ -1401,9 +1409,16 @@ static int garmin_port_probe(struct usb_serial_port *port) garmin_data_p->state = 0; garmin_data_p->flags = 0; garmin_data_p->count = 0; + init_usb_anchor(&garmin_data_p->write_urbs); usb_set_serial_port_data(port, garmin_data_p); status = garmin_init_session(port); + if (status) + goto err_free; + + return 0; +err_free: + kfree(garmin_data_p); return status; } @@ -1413,6 +1428,7 @@ static int garmin_port_remove(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); + usb_kill_anchored_urbs(&garmin_data_p->write_urbs); usb_kill_urb(port->interrupt_in_urb); del_timer_sync(&garmin_data_p->timer); kfree(garmin_data_p); diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index bdf8bd814a9a..01f3ac7769f3 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2286,7 +2286,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port, /* something went wrong */ dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n", __func__, status); - usb_kill_urb(urb); usb_free_urb(urb); atomic_dec(&CmdUrbs); return status; diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 196908dd25a1..f8e8285663a6 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial, 3, /* get pins */ USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 0, 0, data, 1, 2000); - if (rc >= 0) + if (rc == 1) *value = *data; + else if (rc >= 0) + rc = -EIO; kfree(data); return rc; diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c index 14511d6a7d44..3950d44b80f1 100644 --- a/drivers/usb/serial/metro-usb.c +++ b/drivers/usb/serial/metro-usb.c @@ -189,7 +189,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); - goto exit; + return result; } /* Send activate cmd to device */ @@ -198,9 +198,14 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) dev_err(&port->dev, "%s - failed to configure device, error code=%d\n", __func__, result); - goto exit; + goto err_kill_urb; } -exit: + + return 0; + +err_kill_urb: + usb_kill_urb(port->interrupt_in_urb); + return result; } diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index e8669aae14b3..5e490177cf75 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -481,6 +481,9 @@ static void mos7840_control_callback(struct urb *urb) } dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); + if (urb->actual_length < 1) + goto out; + dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, mos7840_port->MsrLsr, mos7840_port->port_num); data = urb->transfer_buffer; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ba672cf4e888..0600dadd6a0c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -199,6 +199,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ +#define DELL_PRODUCT_5821E 0x81d7 + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -236,11 +238,17 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 +/* These u-blox products use Qualcomm's vendor ID */ +#define UBLOX_PRODUCT_R410M 0x90b2 +/* These Yuga products use Qualcomm's vendor ID */ +#define YUGA_PRODUCT_CLM920_NC5 0x9625 #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_BG96 0x0296 +#define QUECTEL_PRODUCT_EP06 0x0306 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -282,6 +290,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_ME910 0x1100 +#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101 #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 @@ -379,6 +388,9 @@ static void option_instat_callback(struct urb *urb); #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 +/* Fujisoft products */ +#define FUJISOFT_PRODUCT_FS040U 0x9b02 + /* iBall 3.5G connect wireless modem */ #define IBALL_3_5G_CONNECT 0x9605 @@ -543,138 +555,15 @@ static void option_instat_callback(struct urb *urb); #define WETELECOM_PRODUCT_6802 0x6802 #define WETELECOM_PRODUCT_WMD300 0x6803 -struct option_blacklist_info { - /* bitmask of interface numbers blacklisted for send_setup */ - const unsigned long sendsetup; - /* bitmask of interface numbers that are reserved */ - const unsigned long reserved; -}; - -static const struct option_blacklist_info four_g_w14_blacklist = { - .sendsetup = BIT(0) | BIT(1), -}; - -static const struct option_blacklist_info four_g_w100_blacklist = { - .sendsetup = BIT(1) | BIT(2), - .reserved = BIT(3), -}; - -static const struct option_blacklist_info alcatel_x200_blacklist = { - .sendsetup = BIT(0) | BIT(1), - .reserved = BIT(4), -}; - -static const struct option_blacklist_info zte_0037_blacklist = { - .sendsetup = BIT(0) | BIT(1), -}; - -static const struct option_blacklist_info zte_k3765_z_blacklist = { - .sendsetup = BIT(0) | BIT(1) | BIT(2), - .reserved = BIT(4), -}; -static const struct option_blacklist_info zte_ad3812_z_blacklist = { - .sendsetup = BIT(0) | BIT(1) | BIT(2), -}; +/* Device flags */ -static const struct option_blacklist_info zte_mc2718_z_blacklist = { - .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), -}; +/* Interface does not support modem-control requests */ +#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8) -static const struct option_blacklist_info zte_mc2716_z_blacklist = { - .sendsetup = BIT(1) | BIT(2) | BIT(3), -}; +/* Interface is reserved */ +#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) -static const struct option_blacklist_info zte_me3620_mbim_blacklist = { - .reserved = BIT(2) | BIT(3) | BIT(4), -}; - -static const struct option_blacklist_info zte_me3620_xl_blacklist = { - .reserved = BIT(3) | BIT(4) | BIT(5), -}; - -static const struct option_blacklist_info zte_zm8620_x_blacklist = { - .reserved = BIT(3) | BIT(4) | BIT(5), -}; - -static const struct option_blacklist_info huawei_cdc12_blacklist = { - .reserved = BIT(1) | BIT(2), -}; - -static const struct option_blacklist_info net_intf0_blacklist = { - .reserved = BIT(0), -}; - -static const struct option_blacklist_info net_intf1_blacklist = { - .reserved = BIT(1), -}; - -static const struct option_blacklist_info net_intf2_blacklist = { - .reserved = BIT(2), -}; - -static const struct option_blacklist_info net_intf3_blacklist = { - .reserved = BIT(3), -}; - -static const struct option_blacklist_info net_intf4_blacklist = { - .reserved = BIT(4), -}; - -static const struct option_blacklist_info net_intf5_blacklist = { - .reserved = BIT(5), -}; - -static const struct option_blacklist_info net_intf6_blacklist = { - .reserved = BIT(6), -}; - -static const struct option_blacklist_info zte_mf626_blacklist = { - .sendsetup = BIT(0) | BIT(1), - .reserved = BIT(4), -}; - -static const struct option_blacklist_info zte_1255_blacklist = { - .reserved = BIT(3) | BIT(4), -}; - -static const struct option_blacklist_info simcom_sim7100e_blacklist = { - .reserved = BIT(5) | BIT(6), -}; - -static const struct option_blacklist_info telit_me910_blacklist = { - .sendsetup = BIT(0), - .reserved = BIT(1) | BIT(3), -}; - -static const struct option_blacklist_info telit_le910_blacklist = { - .sendsetup = BIT(0), - .reserved = BIT(1) | BIT(2), -}; - -static const struct option_blacklist_info telit_le920_blacklist = { - .sendsetup = BIT(0), - .reserved = BIT(1) | BIT(5), -}; - -static const struct option_blacklist_info telit_le920a4_blacklist_1 = { - .sendsetup = BIT(0), - .reserved = BIT(1), -}; - -static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { - .sendsetup = BIT(2), - .reserved = BIT(0) | BIT(1) | BIT(3), -}; - -static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = { - .sendsetup = BIT(0), - .reserved = BIT(1) | BIT(2) | BIT(3), -}; - -static const struct option_blacklist_info cinterion_rmnet2_blacklist = { - .reserved = BIT(4) | BIT(5), -}; static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -708,26 +597,26 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, + .driver_info = RSVD(1) | RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, + .driver_info = RSVD(1) | RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */ - .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, + .driver_info = RSVD(1) | RSVD(2) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, @@ -1146,6 +1035,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1172,60 +1063,70 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ /* Quectel products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, + /* Yuga products use Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), + .driver_info = RSVD(1) | RSVD(4) }, + /* u-blox products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), + .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), + .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), + .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213), - .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, @@ -1233,36 +1134,38 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), - .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, + .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), - .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), + .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), - .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), - .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), - .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), - .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), - .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, @@ -1278,58 +1181,58 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, - 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff), + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_0037_blacklist }, + .driver_info = NCTRL(0) | NCTRL(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, @@ -1354,26 +1257,26 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + .driver_info = RSVD(6) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, @@ -1389,50 +1292,50 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -1549,23 +1452,23 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_1255_blacklist }, + .driver_info = RSVD(3) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, @@ -1580,7 +1483,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, @@ -1616,17 +1519,17 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1644,8 +1547,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, - 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff), + .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ @@ -1656,20 +1559,20 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) }, @@ -1821,19 +1724,19 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, + .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, + .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, + .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L), - .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, + .driver_info = RSVD(3) | RSVD(4) | RSVD(5) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM), - .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist }, + .driver_info = RSVD(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X), - .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist }, + .driver_info = RSVD(3) | RSVD(4) | RSVD(5) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X), - .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist }, + .driver_info = RSVD(3) | RSVD(4) | RSVD(5) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, @@ -1853,35 +1756,34 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), - .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist }, + .driver_info = RSVD(5) | RSVD(6) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), - .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist - }, + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + .driver_info = RSVD(6) }, { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + .driver_info = RSVD(6) }, { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), - .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + .driver_info = RSVD(5) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA), - .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + .driver_info = RSVD(2) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), - .driver_info = (kernel_ulong_t)&four_g_w14_blacklist - }, + .driver_info = NCTRL(0) | NCTRL(1) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), - .driver_info = (kernel_ulong_t)&four_g_w100_blacklist - }, + .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) }, + {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U), + .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), - .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, @@ -1907,14 +1809,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff), - .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist }, + .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, @@ -1924,20 +1826,20 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + .driver_info = RSVD(6) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + .driver_info = RSVD(6) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + .driver_info = RSVD(6) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, @@ -2014,9 +1916,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d01, 0xff) }, /* D-Link DWM-156 (variant) */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d02, 0xff) }, @@ -2024,9 +1926,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ - .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ @@ -2086,7 +1988,7 @@ static int option_probe(struct usb_serial *serial, struct usb_interface_descriptor *iface_desc = &serial->interface->cur_altsetting->desc; struct usb_device_descriptor *dev_desc = &serial->dev->descriptor; - const struct option_blacklist_info *blacklist; + unsigned long device_flags = id->driver_info; /* Never bind to the CD-Rom emulation interface */ if (iface_desc->bInterfaceClass == 0x08) @@ -2097,9 +1999,7 @@ static int option_probe(struct usb_serial *serial, * the same class/subclass/protocol as the serial interfaces. Look at * the Windows driver .INF files for reserved interface numbers. */ - blacklist = (void *)id->driver_info; - if (blacklist && test_bit(iface_desc->bInterfaceNumber, - &blacklist->reserved)) + if (device_flags & RSVD(iface_desc->bInterfaceNumber)) return -ENODEV; /* * Don't bind network interface on Samsung GT-B3730, it is handled by @@ -2110,8 +2010,8 @@ static int option_probe(struct usb_serial *serial, iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA) return -ENODEV; - /* Store the blacklist info so we can use it during attach. */ - usb_set_serial_data(serial, (void *)blacklist); + /* Store the device flags so we can use them during attach. */ + usb_set_serial_data(serial, (void *)device_flags); return 0; } @@ -2119,22 +2019,21 @@ static int option_probe(struct usb_serial *serial, static int option_attach(struct usb_serial *serial) { struct usb_interface_descriptor *iface_desc; - const struct option_blacklist_info *blacklist; struct usb_wwan_intf_private *data; + unsigned long device_flags; data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; - /* Retrieve blacklist info stored at probe. */ - blacklist = usb_get_serial_data(serial); + /* Retrieve device flags stored at probe. */ + device_flags = (unsigned long)usb_get_serial_data(serial); iface_desc = &serial->interface->cur_altsetting->desc; - if (!blacklist || !test_bit(iface_desc->bInterfaceNumber, - &blacklist->sendsetup)) { + if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; - } + spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index a585b477415d..2153e67eeeee 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -41,6 +41,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, @@ -54,6 +55,8 @@ static const struct usb_device_id id_table[] = { .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC232B), + .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 3b5a15d1dc0d..cec7141245ef 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -17,6 +17,7 @@ #define PL2303_PRODUCT_ID_DCU11 0x1234 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2 +#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8 #define PL2303_PRODUCT_ID_ALDIGA 0x0611 #define PL2303_PRODUCT_ID_MMX 0x0612 #define PL2303_PRODUCT_ID_GPRS 0x0609 @@ -28,6 +29,7 @@ #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 #define ATEN_PRODUCT_UC485 0x2021 +#define ATEN_PRODUCT_UC232B 0x2022 #define ATEN_PRODUCT_ID2 0x2118 #define IODATA_VENDOR_ID 0x04bb diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index eb9928963a53..55a8fb25ce2b 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -148,6 +148,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ + {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ @@ -165,6 +166,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ + {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -345,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) break; case 2: dev_dbg(dev, "NMEA GPS interface found\n"); + sendsetup = true; break; case 3: dev_dbg(dev, "Modem port found\n"); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 4c4ac4705ac0..a9c5564b6b65 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -773,9 +773,9 @@ static void sierra_close(struct usb_serial_port *port) kfree(urb->transfer_buffer); usb_free_urb(urb); usb_autopm_put_interface_async(serial->interface); - spin_lock(&portdata->lock); + spin_lock_irq(&portdata->lock); portdata->outstanding_urbs--; - spin_unlock(&portdata->lock); + spin_unlock_irq(&portdata->lock); } sierra_stop_rx_urbs(port); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index e98b6e57b703..2674da40d9cd 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS); 0x01) } DEVICE(google, GOOGLE_IDS); +/* Libtransistor USB console */ +#define LIBTRANSISTOR_IDS() \ + { USB_DEVICE(0x1209, 0x8b00) } +DEVICE(libtransistor, LIBTRANSISTOR_IDS); + /* ViVOpay USB Serial Driver */ #define VIVOPAY_IDS() \ { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ @@ -80,6 +85,11 @@ DEVICE(vivopay, VIVOPAY_IDS); { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ DEVICE(moto_modem, MOTO_IDS); +/* Motorola Tetra driver */ +#define MOTOROLA_TETRA_IDS() \ + { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ +DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); + /* Novatel Wireless GPS driver */ #define NOVATEL_IDS() \ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ @@ -108,8 +118,10 @@ static struct usb_serial_driver * const serial_drivers[] = { &funsoft_device, &flashloader_device, &google_device, + &libtransistor_device, &vivopay_device, &moto_modem_device, + &motorola_tetra_device, &novatel_gps_device, &hp4x_device, &suunto_device, @@ -123,8 +135,10 @@ static const struct usb_device_id id_table[] = { FUNSOFT_IDS(), FLASHLOADER_IDS(), GOOGLE_IDS(), + LIBTRANSISTOR_IDS(), VIVOPAY_IDS(), MOTO_IDS(), + MOTOROLA_TETRA_IDS(), NOVATEL_IDS(), HP4X_IDS(), SUUNTO_IDS(), diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 12f4c5a91e62..c593ca8800e5 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -34,13 +34,15 @@ static const struct usb_device_id id_table[] = { }; static const struct usb_device_id dbc_id_table[] = { - { USB_DEVICE(0x1d6b, 0x0004) }, + { USB_DEVICE(0x1d6b, 0x0010) }, + { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(0x0525, 0x127a) }, - { USB_DEVICE(0x1d6b, 0x0004) }, + { USB_DEVICE(0x1d6b, 0x0010) }, + { USB_DEVICE(0x1d6b, 0x0011) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table_combined); diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 9f3317a940ef..879840ec0658 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -338,47 +338,48 @@ static int palm_os_3_probe(struct usb_serial *serial, goto exit; } - if (retval == sizeof(*connection_info)) { - connection_info = (struct visor_connection_info *) - transfer_buffer; - - num_ports = le16_to_cpu(connection_info->num_ports); - for (i = 0; i < num_ports; ++i) { - switch ( - connection_info->connections[i].port_function_id) { - case VISOR_FUNCTION_GENERIC: - string = "Generic"; - break; - case VISOR_FUNCTION_DEBUGGER: - string = "Debugger"; - break; - case VISOR_FUNCTION_HOTSYNC: - string = "HotSync"; - break; - case VISOR_FUNCTION_CONSOLE: - string = "Console"; - break; - case VISOR_FUNCTION_REMOTE_FILE_SYS: - string = "Remote File System"; - break; - default: - string = "unknown"; - break; - } - dev_info(dev, "%s: port %d, is for %s use\n", - serial->type->description, - connection_info->connections[i].port, string); - } + if (retval != sizeof(*connection_info)) { + dev_err(dev, "Invalid connection information received from device\n"); + retval = -ENODEV; + goto exit; } - /* - * Handle devices that report invalid stuff here. - */ + + connection_info = (struct visor_connection_info *)transfer_buffer; + + num_ports = le16_to_cpu(connection_info->num_ports); + + /* Handle devices that report invalid stuff here. */ if (num_ports == 0 || num_ports > 2) { dev_warn(dev, "%s: No valid connect info available\n", serial->type->description); num_ports = 2; } + for (i = 0; i < num_ports; ++i) { + switch (connection_info->connections[i].port_function_id) { + case VISOR_FUNCTION_GENERIC: + string = "Generic"; + break; + case VISOR_FUNCTION_DEBUGGER: + string = "Debugger"; + break; + case VISOR_FUNCTION_HOTSYNC: + string = "HotSync"; + break; + case VISOR_FUNCTION_CONSOLE: + string = "Console"; + break; + case VISOR_FUNCTION_REMOTE_FILE_SYS: + string = "Remote File System"; + break; + default: + string = "unknown"; + break; + } + dev_info(dev, "%s: port %d, is for %s use\n", + serial->type->description, + connection_info->connections[i].port, string); + } dev_info(dev, "%s: Number of ports: %d\n", serial->type->description, num_ports); diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 1fcd758a961f..3734a25e09e5 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf, } } + /* All Seagate disk enclosures have broken ATA pass-through support */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) + flags |= US_FL_NO_ATA_1X; + usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 63cf981ed81c..33a6d624c843 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_BROKEN_FUA) sdev->broken_fua = 1; + /* UAS also needs to support FL_ALWAYS_SYNC */ + if (devinfo->flags & US_FL_ALWAYS_SYNC) { + sdev->skip_ms_page_3f = 1; + sdev->skip_ms_page_8 = 1; + sdev->wce_default_on = 1; + } scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } @@ -1076,20 +1082,19 @@ static int uas_post_reset(struct usb_interface *intf) return 0; err = uas_configure_endpoints(devinfo); - if (err) { + if (err && err != -ENODEV) shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); - return 1; - } + /* we must unblock the host in every case lest we deadlock */ spin_lock_irqsave(shost->host_lock, flags); scsi_report_bus_reset(shost, 0); spin_unlock_irqrestore(shost->host_lock, flags); scsi_unblock_requests(shost); - return 0; + return err ? 1 : 0; } static int uas_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index eb06d88b41d6..d100290628bd 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2113,6 +2113,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA ), +/* Reported by David Kozub */ +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + "JMicron", + "JMS567", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + /* * Reported by Alexandre Oliva * JMicron responds to USN and several other SCSI ioctls with a @@ -2130,6 +2137,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA ), +/* Reported by Teijo Kinnunen */ +UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported-by George Cherian */ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, "JMicron", @@ -2326,6 +2340,15 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100, "Micro Mini 1GB", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), +/* "G-DRIVE" external HDD hangs on write without these. + * Patch submitted by Alexander Kappner + */ +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999, + "SimpleTech", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_ALWAYS_SYNC), + /* * Nick Bowler * SCSI stack spams (otherwise harmless) error messages. diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cde115359793..f15aa47c54a9 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -142,6 +142,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), +/* Reported-by: David Kozub */ +UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + "JMicron", + "JMS567", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", @@ -149,6 +156,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Icenowy Zheng */ +UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999, + "Norelsys", + "NS1068X", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Takeo Nakayama */ UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, "JMicron", @@ -169,3 +183,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, "External HDD", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES), + +/* "G-DRIVE" external HDD hangs on write without these. + * Patch submitted by Alexander Kappner + */ +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999, + "SimpleTech", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_ALWAYS_SYNC), diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c index e9c4e784a9cb..74607bb6cfc4 100644 --- a/drivers/usb/typec/typec_wcove.c +++ b/drivers/usb/typec/typec_wcove.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include /* Register offsets */ #define WCOVE_CHGRIRQ0 0x4e09 @@ -86,6 +88,7 @@ struct wcove_typec { struct typec_port *port; struct typec_capability cap; struct typec_partner *partner; + struct pci_dev *xhci_dev; }; enum wcove_typec_func { @@ -108,6 +111,18 @@ enum wcove_typec_role { static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49, 0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37); +static int wcove_access_xhci(struct wcove_typec *wcove, bool enable) +{ + if (wcove->xhci_dev) { + if (enable) { + pm_runtime_get_sync(&wcove->xhci_dev->dev); + } else { + pm_runtime_put(&wcove->xhci_dev->dev); + } + } + return 0; +} + static int wcove_typec_func(struct wcove_typec *wcove, enum wcove_typec_func func, int param) { @@ -205,7 +220,9 @@ static irqreturn_t wcove_typec_irq(int irq, void *data) WCOVE_ORIENTATION_NORMAL); /* This makes sure the device controller is disconnected */ + wcove_access_xhci(wcove, true); wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST); + wcove_access_xhci(wcove, false); /* Port to default role */ typec_set_data_role(wcove->port, TYPEC_DEVICE); @@ -257,11 +274,15 @@ static irqreturn_t wcove_typec_irq(int irq, void *data) } if (role == TYPEC_SINK) { + wcove_access_xhci(wcove, true); wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_DEVICE); + wcove_access_xhci(wcove, false); typec_set_data_role(wcove->port, TYPEC_DEVICE); typec_set_pwr_role(wcove->port, TYPEC_SINK); } else { + wcove_access_xhci(wcove, true); wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST); + wcove_access_xhci(wcove, false); typec_set_pwr_role(wcove->port, TYPEC_SOURCE); typec_set_data_role(wcove->port, TYPEC_HOST); } @@ -342,6 +363,8 @@ static int wcove_typec_probe(struct platform_device *pdev) regmap_write(wcove->regmap, USBC_IRQMASK2, val & ~USBC_IRQMASK2_ALL); platform_set_drvdata(pdev, wcove); + + wcove->xhci_dev = pci_get_class(PCI_CLASS_SERIAL_USB_XHCI, NULL); return 0; } diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index b57891c1fd31..7afbea512207 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o typec_ucsi-y := ucsi.o -typec_ucsi-$(CONFIG_FTRACE) += trace.o +typec_ucsi-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 714c5bcedf2b..251f5d66651e 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -31,7 +31,7 @@ * difficult to estimate the time it takes for the system to process the command * before it is actually passed to the PPM. */ -#define UCSI_TIMEOUT_MS 1000 +#define UCSI_TIMEOUT_MS 5000 /* * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests @@ -346,6 +346,19 @@ static void ucsi_connector_change(struct work_struct *work) } if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) { + typec_set_pwr_role(con->port, con->status.pwr_dir); + + switch (con->status.partner_type) { + case UCSI_CONSTAT_PARTNER_TYPE_UFP: + typec_set_data_role(con->port, TYPEC_HOST); + break; + case UCSI_CONSTAT_PARTNER_TYPE_DFP: + typec_set_data_role(con->port, TYPEC_DEVICE); + break; + default: + break; + } + if (con->status.connected) ucsi_register_partner(con); else diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index cabd47612b0a..494d2a49203a 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -82,6 +82,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev) return -ENODEV; } + /* This will make sure we can use ioremap_nocache() */ + status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1); + if (ACPI_FAILURE(status)) + return -ENOMEM; + /* * NOTE: The memory region for the data structures is used also in an * operation region, which means ACPI has already reserved it. Therefore diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig index eeefa29f8aa2..a20b65cb6678 100644 --- a/drivers/usb/usbip/Kconfig +++ b/drivers/usb/usbip/Kconfig @@ -27,7 +27,7 @@ config USBIP_VHCI_HCD config USBIP_VHCI_HC_PORTS int "Number of ports per USB/IP virtual host controller" - range 1 31 + range 1 15 default 8 depends on USBIP_VHCI_HCD ---help--- diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h index 910f027773aa..84c0599b45b7 100644 --- a/drivers/usb/usbip/stub.h +++ b/drivers/usb/usbip/stub.h @@ -87,6 +87,7 @@ struct bus_id_priv { struct stub_device *sdev; struct usb_device *udev; char shutdown_busid; + spinlock_t busid_lock; }; /* stub_priv is allocated from stub_priv_cache */ @@ -97,6 +98,7 @@ extern struct usb_device_driver stub_driver; /* stub_main.c */ struct bus_id_priv *get_busid_priv(const char *busid); +void put_busid_priv(struct bus_id_priv *bid); int del_match_busid(char *busid); void stub_device_cleanup_urbs(struct stub_device *sdev); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index c653ce533430..cc847f2edf38 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -87,6 +87,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, goto err; sdev->ud.tcp_socket = socket; + sdev->ud.sockfd = sockfd; spin_unlock_irq(&sdev->ud.lock); @@ -163,8 +164,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) * step 1? */ if (ud->tcp_socket) { - dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", - ud->tcp_socket); + dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } @@ -187,6 +187,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); ud->tcp_socket = NULL; + ud->sockfd = -1; } /* 3. free used data */ @@ -281,6 +282,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) sdev->ud.status = SDEV_ST_AVAILABLE; spin_lock_init(&sdev->ud.lock); sdev->ud.tcp_socket = NULL; + sdev->ud.sockfd = -1; INIT_LIST_HEAD(&sdev->priv_init); INIT_LIST_HEAD(&sdev->priv_tx); @@ -312,9 +314,9 @@ static int stub_probe(struct usb_device *udev) struct stub_device *sdev = NULL; const char *udev_busid = dev_name(&udev->dev); struct bus_id_priv *busid_priv; - int rc; + int rc = 0; - dev_dbg(&udev->dev, "Enter\n"); + dev_dbg(&udev->dev, "Enter probe\n"); /* check we should claim or not by busid_table */ busid_priv = get_busid_priv(udev_busid); @@ -329,13 +331,15 @@ static int stub_probe(struct usb_device *udev) * other matched drivers by the driver core. * See driver_probe_device() in driver/base/dd.c */ - return -ENODEV; + rc = -ENODEV; + goto call_put_busid_priv; } if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", udev_busid); - return -ENODEV; + rc = -ENODEV; + goto call_put_busid_priv; } if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { @@ -343,13 +347,16 @@ static int stub_probe(struct usb_device *udev) "%s is attached on vhci_hcd... skip!\n", udev_busid); - return -ENODEV; + rc = -ENODEV; + goto call_put_busid_priv; } /* ok, this is my device */ sdev = stub_device_alloc(udev); - if (!sdev) - return -ENOMEM; + if (!sdev) { + rc = -ENOMEM; + goto call_put_busid_priv; + } dev_info(&udev->dev, "usbip-host: register new device (bus %u dev %u)\n", @@ -381,7 +388,9 @@ static int stub_probe(struct usb_device *udev) } busid_priv->status = STUB_BUSID_ALLOC; - return 0; + rc = 0; + goto call_put_busid_priv; + err_files: usb_hub_release_port(udev->parent, udev->portnum, (struct usb_dev_state *) udev); @@ -391,6 +400,9 @@ static int stub_probe(struct usb_device *udev) busid_priv->sdev = NULL; stub_device_free(sdev); + +call_put_busid_priv: + put_busid_priv(busid_priv); return rc; } @@ -416,7 +428,7 @@ static void stub_disconnect(struct usb_device *udev) struct bus_id_priv *busid_priv; int rc; - dev_dbg(&udev->dev, "Enter\n"); + dev_dbg(&udev->dev, "Enter disconnect\n"); busid_priv = get_busid_priv(udev_busid); if (!busid_priv) { @@ -429,7 +441,7 @@ static void stub_disconnect(struct usb_device *udev) /* get stub_device */ if (!sdev) { dev_err(&udev->dev, "could not get device"); - return; + goto call_put_busid_priv; } dev_set_drvdata(&udev->dev, NULL); @@ -444,12 +456,12 @@ static void stub_disconnect(struct usb_device *udev) (struct usb_dev_state *) udev); if (rc) { dev_dbg(&udev->dev, "unable to release port\n"); - return; + goto call_put_busid_priv; } /* If usb reset is called from event handler */ if (usbip_in_eh(current)) - return; + goto call_put_busid_priv; /* shutdown the current connection */ shutdown_busid(busid_priv); @@ -460,12 +472,11 @@ static void stub_disconnect(struct usb_device *udev) busid_priv->sdev = NULL; stub_device_free(sdev); - if (busid_priv->status == STUB_BUSID_ALLOC) { + if (busid_priv->status == STUB_BUSID_ALLOC) busid_priv->status = STUB_BUSID_ADDED; - } else { - busid_priv->status = STUB_BUSID_OTHER; - del_match_busid((char *)udev_busid); - } + +call_put_busid_priv: + put_busid_priv(busid_priv); } #ifdef CONFIG_PM diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 7170404e8979..108dd65fbfbc 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -28,6 +28,7 @@ #define DRIVER_DESC "USB/IP Host Driver" struct kmem_cache *stub_priv_cache; + /* * busid_tables defines matching busids that usbip can grab. A user can change * dynamically what device is locally used and what device is exported to a @@ -39,6 +40,8 @@ static spinlock_t busid_table_lock; static void init_busid_table(void) { + int i; + /* * This also sets the bus_table[i].status to * STUB_BUSID_OTHER, which is 0. @@ -46,6 +49,9 @@ static void init_busid_table(void) memset(busid_table, 0, sizeof(busid_table)); spin_lock_init(&busid_table_lock); + + for (i = 0; i < MAX_BUSID; i++) + spin_lock_init(&busid_table[i].busid_lock); } /* @@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid) int i; int idx = -1; - for (i = 0; i < MAX_BUSID; i++) + for (i = 0; i < MAX_BUSID; i++) { + spin_lock(&busid_table[i].busid_lock); if (busid_table[i].name[0]) if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { idx = i; + spin_unlock(&busid_table[i].busid_lock); break; } + spin_unlock(&busid_table[i].busid_lock); + } return idx; } +/* Returns holding busid_lock. Should call put_busid_priv() to unlock */ struct bus_id_priv *get_busid_priv(const char *busid) { int idx; @@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid) spin_lock(&busid_table_lock); idx = get_busid_idx(busid); - if (idx >= 0) + if (idx >= 0) { bid = &(busid_table[idx]); + /* get busid_lock before returning */ + spin_lock(&bid->busid_lock); + } spin_unlock(&busid_table_lock); return bid; } +void put_busid_priv(struct bus_id_priv *bid) +{ + if (bid) + spin_unlock(&bid->busid_lock); +} + static int add_match_busid(char *busid) { int i; @@ -92,15 +112,19 @@ static int add_match_busid(char *busid) goto out; } - for (i = 0; i < MAX_BUSID; i++) + for (i = 0; i < MAX_BUSID; i++) { + spin_lock(&busid_table[i].busid_lock); if (!busid_table[i].name[0]) { strlcpy(busid_table[i].name, busid, BUSID_SIZE); if ((busid_table[i].status != STUB_BUSID_ALLOC) && (busid_table[i].status != STUB_BUSID_REMOV)) busid_table[i].status = STUB_BUSID_ADDED; ret = 0; + spin_unlock(&busid_table[i].busid_lock); break; } + spin_unlock(&busid_table[i].busid_lock); + } out: spin_unlock(&busid_table_lock); @@ -121,6 +145,8 @@ int del_match_busid(char *busid) /* found */ ret = 0; + spin_lock(&busid_table[idx].busid_lock); + if (busid_table[idx].status == STUB_BUSID_OTHER) memset(busid_table[idx].name, 0, BUSID_SIZE); @@ -128,6 +154,7 @@ int del_match_busid(char *busid) (busid_table[idx].status != STUB_BUSID_ADDED)) busid_table[idx].status = STUB_BUSID_REMOV; + spin_unlock(&busid_table[idx].busid_lock); out: spin_unlock(&busid_table_lock); @@ -140,9 +167,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf) char *out = buf; spin_lock(&busid_table_lock); - for (i = 0; i < MAX_BUSID; i++) + for (i = 0; i < MAX_BUSID; i++) { + spin_lock(&busid_table[i].busid_lock); if (busid_table[i].name[0]) out += sprintf(out, "%s ", busid_table[i].name); + spin_unlock(&busid_table[i].busid_lock); + } spin_unlock(&busid_table_lock); out += sprintf(out, "\n"); @@ -183,6 +213,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf, } static DRIVER_ATTR_RW(match_busid); +static int do_rebind(char *busid, struct bus_id_priv *busid_priv) +{ + int ret; + + /* device_attach() callers should hold parent lock for USB */ + if (busid_priv->udev->dev.parent) + device_lock(busid_priv->udev->dev.parent); + ret = device_attach(&busid_priv->udev->dev); + if (busid_priv->udev->dev.parent) + device_unlock(busid_priv->udev->dev.parent); + if (ret < 0) { + dev_err(&busid_priv->udev->dev, "rebind failed\n"); + return ret; + } + return 0; +} + +static void stub_device_rebind(void) +{ +#if IS_MODULE(CONFIG_USBIP_HOST) + struct bus_id_priv *busid_priv; + int i; + + /* update status to STUB_BUSID_OTHER so probe ignores the device */ + spin_lock(&busid_table_lock); + for (i = 0; i < MAX_BUSID; i++) { + if (busid_table[i].name[0] && + busid_table[i].shutdown_busid) { + busid_priv = &(busid_table[i]); + busid_priv->status = STUB_BUSID_OTHER; + } + } + spin_unlock(&busid_table_lock); + + /* now run rebind - no need to hold locks. driver files are removed */ + for (i = 0; i < MAX_BUSID; i++) { + if (busid_table[i].name[0] && + busid_table[i].shutdown_busid) { + busid_priv = &(busid_table[i]); + do_rebind(busid_table[i].name, busid_priv); + } + } +#endif +} + static ssize_t rebind_store(struct device_driver *dev, const char *buf, size_t count) { @@ -200,11 +275,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf, if (!bid) return -ENODEV; - ret = device_attach(&bid->udev->dev); - if (ret < 0) { - dev_err(&bid->udev->dev, "rebind failed\n"); + /* mark the device for deletion so probe ignores it during rescan */ + bid->status = STUB_BUSID_OTHER; + /* release the busid lock */ + put_busid_priv(bid); + + ret = do_rebind((char *) buf, bid); + if (ret < 0) return ret; - } + + /* delete device from busid_table */ + del_match_busid((char *) buf); return count; } @@ -251,11 +332,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) struct stub_priv *priv; struct urb *urb; - dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); + dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); while ((priv = stub_priv_pop(sdev))) { urb = priv->urb; - dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); + dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", + priv->seqnum); usb_kill_urb(urb); kmem_cache_free(stub_priv_cache, priv); @@ -325,6 +407,9 @@ static void __exit usbip_host_exit(void) */ usb_deregister_device_driver(&stub_driver); + /* initiate scan to attach devices */ + stub_device_rebind(); + kmem_cache_destroy(stub_priv_cache); } diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 191b176ffedf..5b807185f79e 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -225,9 +225,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, if (priv->seqnum != pdu->u.cmd_unlink.seqnum) continue; - dev_info(&priv->urb->dev->dev, "unlink urb %p\n", - priv->urb); - /* * This matched urb is not completed yet (i.e., be in * flight in usb hcd hardware/driver). Now we are @@ -266,8 +263,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, ret = usb_unlink_urb(priv->urb); if (ret != -EINPROGRESS) dev_err(&priv->urb->dev->dev, - "failed to unlink a urb %p, ret %d\n", - priv->urb, ret); + "failed to unlink a urb # %lu, ret %d\n", + priv->seqnum, ret); return 0; } @@ -336,23 +333,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, return priv; } -static int get_pipe(struct stub_device *sdev, int epnum, int dir) +static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) { struct usb_device *udev = sdev->udev; struct usb_host_endpoint *ep; struct usb_endpoint_descriptor *epd = NULL; + int epnum = pdu->base.ep; + int dir = pdu->base.direction; + + if (epnum < 0 || epnum > 15) + goto err_ret; if (dir == USBIP_DIR_IN) ep = udev->ep_in[epnum & 0x7f]; else ep = udev->ep_out[epnum & 0x7f]; - if (!ep) { - dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", - epnum); - BUG(); - } + if (!ep) + goto err_ret; epd = &ep->desc; + + /* validate transfer_buffer_length */ + if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { + dev_err(&sdev->udev->dev, + "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", + pdu->u.cmd_submit.transfer_buffer_length); + return -1; + } + if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); @@ -375,15 +383,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) } if (usb_endpoint_xfer_isoc(epd)) { + /* validate packet size and number of packets */ + unsigned int maxp, packets, bytes; + + maxp = usb_endpoint_maxp(epd); + maxp *= usb_endpoint_maxp_mult(epd); + bytes = pdu->u.cmd_submit.transfer_buffer_length; + packets = DIV_ROUND_UP(bytes, maxp); + + if (pdu->u.cmd_submit.number_of_packets < 0 || + pdu->u.cmd_submit.number_of_packets > packets) { + dev_err(&sdev->udev->dev, + "CMD_SUBMIT: isoc invalid num packets %d\n", + pdu->u.cmd_submit.number_of_packets); + return -1; + } if (dir == USBIP_DIR_OUT) return usb_sndisocpipe(udev, epnum); else return usb_rcvisocpipe(udev, epnum); } +err_ret: /* NOT REACHED */ - dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); - return 0; + dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); + return -1; } static void masking_bogus_flags(struct urb *urb) @@ -447,7 +471,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, struct stub_priv *priv; struct usbip_device *ud = &sdev->ud; struct usb_device *udev = sdev->udev; - int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); + int pipe = get_pipe(sdev, pdu); + + if (pipe == -1) + return; priv = stub_priv_alloc(sdev, pdu); if (!priv) @@ -466,7 +493,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, } /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0) { + if (pdu->u.cmd_submit.transfer_buffer_length > 0 && + pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index be50cef645d8..96aa375b80d9 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -102,7 +102,7 @@ void stub_complete(struct urb *urb) /* link a urb to the queue of tx. */ spin_lock_irqsave(&sdev->priv_lock, flags); if (sdev->ud.tcp_socket == NULL) { - usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); + usbip_dbg_stub_tx("ignore urb for closed connection\n"); /* It will be freed in stub_device_cleanup_urbs(). */ } else if (priv->unlinking) { stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); @@ -181,6 +181,13 @@ static int stub_send_ret_submit(struct stub_device *sdev) memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); + if (urb->actual_length > 0 && !urb->transfer_buffer) { + dev_err(&sdev->udev->dev, + "urb: actual_length %d transfer_buffer null\n", + urb->actual_length); + return -1; + } + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) iovnum = 2 + urb->number_of_packets; else @@ -197,8 +204,8 @@ static int stub_send_ret_submit(struct stub_device *sdev) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 2281f3562870..7f0d22131121 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -105,7 +105,7 @@ static void usbip_dump_usb_device(struct usb_device *udev) dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", udev->devnum, udev->devpath, usb_speed_string(udev->speed)); - pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); + pr_debug("tt hub ttport %d\n", udev->ttport); dev_dbg(dev, " "); for (i = 0; i < 16; i++) @@ -138,12 +138,8 @@ static void usbip_dump_usb_device(struct usb_device *udev) } pr_debug("\n"); - dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); - - dev_dbg(dev, - "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n", - &udev->descriptor, udev->config, - udev->actconfig, udev->rawdescriptors); + dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev), + udev->bus->bus_name); dev_dbg(dev, "have_langid %d, string_langid %d\n", udev->have_langid, udev->string_langid); @@ -251,9 +247,6 @@ void usbip_dump_urb(struct urb *urb) dev = &urb->dev->dev; - dev_dbg(dev, " urb :%p\n", urb); - dev_dbg(dev, " dev :%p\n", urb->dev); - usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); @@ -262,11 +255,9 @@ void usbip_dump_urb(struct urb *urb) dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); - dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); - dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( @@ -276,8 +267,6 @@ void usbip_dump_urb(struct urb *urb) dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); - dev_dbg(dev, " context :%p\n", urb->context); - dev_dbg(dev, " complete :%p\n", urb->complete); } EXPORT_SYMBOL_GPL(usbip_dump_urb); @@ -331,26 +320,20 @@ int usbip_recv(struct socket *sock, void *buf, int size) struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; + if (!sock || !buf || !size) + return -EINVAL; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); - if (!sock || !buf || !size) { - pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, - size); - return -EINVAL; - } - do { - int sz = msg_data_left(&msg); + msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; result = sock_recvmsg(sock, &msg, MSG_WAITALL); - if (result <= 0) { - pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf + total, sz, result, total); + if (result <= 0) goto err; - } total += result; } while (msg_data_left(&msg)); diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 3050fc99a417..c81c44c13a56 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -257,7 +257,7 @@ enum usbip_side { #define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) #define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) -#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) +#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE) #define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) #define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) #define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) @@ -270,6 +270,7 @@ struct usbip_device { /* lock for status */ spinlock_t lock; + int sockfd; struct socket *tcp_socket; struct task_struct *tcp_rx; diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index f1635662c299..f8f7f3803a99 100644 --- a/drivers/usb/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c @@ -105,10 +105,6 @@ static void event_handler(struct work_struct *work) unset_event(ud, USBIP_EH_UNUSABLE); } - /* Stop the error handler. */ - if (ud->event & USBIP_EH_BYE) - usbip_dbg_eh("removed %p\n", ud); - wake_up(&ud->eh_waitq); } } diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 11b9a22799cc..05aa1ba351b6 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -368,6 +368,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, usbip_dbg_vhci_rh(" ClearHubFeature\n"); break; case ClearPortFeature: + if (rhport < 0) + goto error; switch (wValue) { case USB_PORT_FEAT_SUSPEND: if (hcd->speed == HCD_USB3) { @@ -525,11 +527,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, goto error; } + if (rhport < 0) + goto error; + vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND; break; case USB_PORT_FEAT_POWER: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_POWER\n"); + if (rhport < 0) + goto error; if (hcd->speed == HCD_USB3) vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; else @@ -538,6 +545,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_BH_PORT_RESET: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n"); + if (rhport < 0) + goto error; /* Applicable only for USB3.0 hub */ if (hcd->speed != HCD_USB3) { pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " @@ -548,6 +557,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_RESET: usbip_dbg_vhci_rh( " SetPortFeature: USB_PORT_FEAT_RESET\n"); + if (rhport < 0) + goto error; /* if it's already enabled, disable */ if (hcd->speed == HCD_USB3) { vhci_hcd->port_status[rhport] = 0; @@ -568,6 +579,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, default: usbip_dbg_vhci_rh(" SetPortFeature: default %d\n", wValue); + if (rhport < 0) + goto error; if (hcd->speed == HCD_USB3) { if ((vhci_hcd->port_status[rhport] & USB_SS_PORT_STAT_POWER) != 0) { @@ -670,9 +683,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct vhci_device *vdev; unsigned long flags; - usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", - hcd, urb, mem_flags); - if (portnum > VHCI_HC_PORTS) { pr_err("invalid port number %d\n", portnum); return -ENODEV; @@ -836,8 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) struct vhci_device *vdev; unsigned long flags; - pr_info("dequeue a urb %p\n", urb); - spin_lock_irqsave(&vhci->lock, flags); priv = urb->hcpriv; @@ -865,7 +873,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* tcp connection is closed */ spin_lock(&vdev->priv_lock); - pr_info("device %p seems to be disconnected\n", vdev); list_del(&priv->list); kfree(priv); urb->hcpriv = NULL; @@ -877,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) * vhci_rx will receive RET_UNLINK and give back the URB. * Otherwise, we give back it here. */ - pr_info("gives back urb %p\n", urb); - usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&vhci->lock, flags); @@ -906,8 +911,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) unlink->unlink_seqnum = priv->seqnum; - pr_info("device %p seems to be still connected\n", vdev); - /* send cmd_unlink and try to cancel the pending URB in the * peer */ list_add_tail(&unlink->list, &vdev->unlink_tx); @@ -989,7 +992,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) /* need this? see stub_dev.c */ if (ud->tcp_socket) { - pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); + pr_debug("shutdown tcp_socket %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } @@ -1008,6 +1011,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) if (vdev->ud.tcp_socket) { sockfd_put(vdev->ud.tcp_socket); vdev->ud.tcp_socket = NULL; + vdev->ud.sockfd = -1; } pr_info("release socket\n"); @@ -1054,6 +1058,7 @@ static void vhci_device_reset(struct usbip_device *ud) if (ud->tcp_socket) { sockfd_put(ud->tcp_socket); ud->tcp_socket = NULL; + ud->sockfd = -1; } ud->status = VDEV_ST_NULL; @@ -1112,7 +1117,6 @@ static int hcd_name_to_id(const char *name) static int vhci_setup(struct usb_hcd *hcd) { struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller)); - hcd->self.sg_tablesize = ~0; if (usb_hcd_is_primary_hcd(hcd)) { vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd); vhci->vhci_hcd_hs->vhci = vhci; diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index ef2f2d5ca6b2..1343037d00f9 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) urb = priv->urb; status = urb->status; - usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", - urb, priv, seqnum); + usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum); switch (status) { case -ENOENT: /* fall through */ case -ECONNRESET: - dev_info(&urb->dev->dev, - "urb %p was unlinked %ssynchronuously.\n", urb, - status == -ENOENT ? "" : "a"); + dev_dbg(&urb->dev->dev, + "urb seq# %u was unlinked %ssynchronuously\n", + seqnum, status == -ENOENT ? "" : "a"); break; case -EINPROGRESS: /* no info output */ break; default: - dev_info(&urb->dev->dev, - "urb %p may be in a error, status %d\n", urb, - status); + dev_dbg(&urb->dev->dev, + "urb seq# %u may be in a error, status %d\n", + seqnum, status); } list_del(&priv->list); @@ -81,8 +80,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, spin_unlock_irqrestore(&vdev->priv_lock, flags); if (!urb) { - pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); - pr_info("max seqnum %d\n", + pr_err("cannot find a urb of seqnum %u max seqnum %d\n", + pdu->base.seqnum, atomic_read(&vhci_hcd->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; @@ -105,7 +104,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); @@ -172,7 +171,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, pr_info("the urb (seqnum %d) was already given back\n", pdu->base.seqnum); } else { - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum); /* If unlink is successful, status is -ECONNRESET */ urb->status = pdu->u.ret_unlink.status; diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 1b9f60a22e0b..4a22a9f06d96 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -24,6 +24,9 @@ #include #include +/* Hardening for Spectre-v1 */ +#include + #include "usbip_common.h" #include "vhci.h" @@ -31,15 +34,20 @@ /* * output example: - * hub port sta spd dev socket local_busid - * hs 0000 004 000 00000000 c5a7bb80 1-2.3 + * hub port sta spd dev sockfd local_busid + * hs 0000 004 000 00000000 3 1-2.3 * ................................................ - * ss 0008 004 000 00000000 d8cee980 2-3.4 + * ss 0008 004 000 00000000 4 2-3.4 * ................................................ * - * IP address can be retrieved from a socket pointer address by looking - * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a - * port number and its peer IP address. + * Output includes socket fd instead of socket pointer address to avoid + * leaking kernel memory address in: + * /sys/devices/platform/vhci_hcd.0/status and in debug output. + * The socket pointer address is not used at the moment and it was made + * visible as a convenient way to find IP address from socket pointer + * address by looking up /proc/net/{tcp,tcp6}. As this opens a security + * hole, the change is made to use sockfd instead. + * */ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) { @@ -53,8 +61,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd if (vdev->ud.status == VDEV_ST_USED) { *out += sprintf(*out, "%03u %08x ", vdev->speed, vdev->devid); - *out += sprintf(*out, "%16p %s", - vdev->ud.tcp_socket, + *out += sprintf(*out, "%u %s", + vdev->ud.sockfd, dev_name(&vdev->udev->dev)); } else { @@ -174,7 +182,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr, char *s = out; /* - * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. + * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, + * thus the * 2. */ out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); return out - s; @@ -213,16 +222,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) return 0; } -static int valid_port(__u32 pdev_nr, __u32 rhport) +static int valid_port(__u32 *pdev_nr, __u32 *rhport) { - if (pdev_nr >= vhci_num_controllers) { - pr_err("pdev %u\n", pdev_nr); + if (*pdev_nr >= vhci_num_controllers) { + pr_err("pdev %u\n", *pdev_nr); return 0; } - if (rhport >= VHCI_HC_PORTS) { - pr_err("rhport %u\n", rhport); + *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers); + + if (*rhport >= VHCI_HC_PORTS) { + pr_err("rhport %u\n", *rhport); return 0; } + *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS); + return 1; } @@ -240,7 +253,7 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr, pdev_nr = port_to_pdev_nr(port); rhport = port_to_rhport(port); - if (!valid_port(pdev_nr, rhport)) + if (!valid_port(&pdev_nr, &rhport)) return -EINVAL; hcd = platform_get_drvdata(vhcis[pdev_nr].pdev); @@ -266,7 +279,8 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach); -static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed) +static int valid_args(__u32 *pdev_nr, __u32 *rhport, + enum usb_device_speed speed) { if (!valid_port(pdev_nr, rhport)) { return 0; @@ -330,7 +344,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, sockfd, devid, speed); /* check received parameters */ - if (!valid_args(pdev_nr, rhport, speed)) + if (!valid_args(&pdev_nr, &rhport, speed)) return -EINVAL; hcd = platform_get_drvdata(vhcis[pdev_nr].pdev); @@ -380,6 +394,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, vdev->devid = devid; vdev->speed = speed; + vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; vdev->ud.status = VDEV_ST_NOTASSIGNED; diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 3e7878fe2fd4..a9a663a578b6 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -83,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) memset(&msg, 0, sizeof(msg)); memset(&iov, 0, sizeof(iov)); - usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); + usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", + priv->seqnum); /* 1. setup usbip_header */ setup_cmd_submit_pdu(&pdu_header, urb); diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c index e429b59f6f8a..d020e72b3122 100644 --- a/drivers/usb/usbip/vudc_rx.c +++ b/drivers/usb/usbip/vudc_rx.c @@ -132,6 +132,25 @@ static int v_recv_cmd_submit(struct vudc *udc, urb_p->new = 1; urb_p->seqnum = pdu->base.seqnum; + if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) { + /* validate packet size and number of packets */ + unsigned int maxp, packets, bytes; + + maxp = usb_endpoint_maxp(urb_p->ep->desc); + maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc); + bytes = pdu->u.cmd_submit.transfer_buffer_length; + packets = DIV_ROUND_UP(bytes, maxp); + + if (pdu->u.cmd_submit.number_of_packets < 0 || + pdu->u.cmd_submit.number_of_packets > packets) { + dev_err(&udc->gadget.dev, + "CMD_SUBMIT: isoc invalid num packets %d\n", + pdu->u.cmd_submit.number_of_packets); + ret = -EMSGSIZE; + goto free_urbp; + } + } + ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); if (ret) { usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index 0f98f2c7475f..7efa374a4970 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c @@ -117,10 +117,14 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, if (rv != 0) return -EINVAL; + if (!udc) { + dev_err(dev, "no device"); + return -ENODEV; + } spin_lock_irqsave(&udc->lock, flags); /* Don't export what we don't have */ - if (!udc || !udc->driver || !udc->pullup) { - dev_err(dev, "no device or gadget not bound"); + if (!udc->driver || !udc->pullup) { + dev_err(dev, "gadget not bound"); ret = -ENODEV; goto unlock; } diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c index 234661782fa0..3ab4c86486a7 100644 --- a/drivers/usb/usbip/vudc_tx.c +++ b/drivers/usb/usbip/vudc_tx.c @@ -97,6 +97,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); + if (urb->actual_length > 0 && !urb->transfer_buffer) { + dev_err(&udc->gadget.dev, + "urb: actual_length %d transfer_buffer null\n", + urb->actual_length); + return -1; + } + if (urb_p->type == USB_ENDPOINT_XFER_ISOC) iovnum = 2 + urb->number_of_packets; else @@ -112,8 +119,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb_p); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; diff --git a/drivers/vbs/Kconfig b/drivers/vbs/Kconfig new file mode 100644 index 000000000000..c7a5c6ca25b8 --- /dev/null +++ b/drivers/vbs/Kconfig @@ -0,0 +1,30 @@ +# +# This Kconfig describes VBS for ACRN hypervisor +# +config VBS + bool "Enable VBS framework for ACRN hypervisor" + depends on ACRN + default n + ---help--- + This option is selected by any driver which needs to use + the Virtio Backend Service (VBS) framework on ACRN + hypervisor. + +config VBS_DEBUG + bool "ACRN VBS debugging" + depends on VBS != n + default n + ---help--- + This is an option for use by developers; most people should + say N here. This enables ACRN VBS debugging. + +config VBS_RNG + bool "ACRN VBS reference driver: virtio RNG" + depends on VBS != n + default n + ---help--- + Say M or * here to enable a VBS-K reference driver for ACRN + hypervisor, virtio RNG driver, to work with virtio-rng + frontend driver in guest. + The reference driver shows an example on how to use VBS-K + APIs. diff --git a/drivers/vbs/Makefile b/drivers/vbs/Makefile new file mode 100644 index 000000000000..85e1cc252197 --- /dev/null +++ b/drivers/vbs/Makefile @@ -0,0 +1,6 @@ +ccflags-$(CONFIG_VBS_DEBUG) := -DDEBUG + +obj-$(CONFIG_VBS) += vbs.o +obj-$(CONFIG_VBS) += vq.o + +obj-$(CONFIG_VBS_RNG) += vbs_rng.o diff --git a/drivers/vbs/vbs.c b/drivers/vbs/vbs.c new file mode 100644 index 000000000000..65b8a0bbe3d3 --- /dev/null +++ b/drivers/vbs/vbs.c @@ -0,0 +1,346 @@ +/* + * ACRN Project + * Virtio Backend Service (VBS) for ACRN hypervisor + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: Hao Li + * + * BSD LICENSE + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Hao Li + * Created Virtio Backend Service (VBS) framework: + * - VBS-K is a kernel-level virtio framework that can be used for + * virtio backend driver development for ACRN hypervisor. + * - VBS-K should be working with VBS-U (Virtio Backend Service in + * User) together, in order to connect with virtio frontend driver. + * - VBS-K mainly handles data plane part of a virtio backend driver, + * such as virtqueue parsing and processing, while VBS-U mainly + * hanldes control plane part. + */ + +#include +#include +#include +#include + +long virtio_dev_register(struct virtio_dev_info *dev) +{ + struct vm_info info; + int ret; + + pr_debug("vmid is %d\n", dev->_ctx.vmid); + + if (dev->dev_notify == NULL) { + pr_err("%s dev_notify empty!\n", dev->name); + goto err; + } + + /* + * dev->name is 32 chars while vhm only accepts 16 chars + * at most, so we make sure there will be a NULL + * terminator for the chars. + */ + dev->name[15] = '\0'; + dev->_ctx.vhm_client_id = + acrn_ioreq_create_client(dev->_ctx.vmid, + dev->dev_notify, + dev->name); + if (dev->_ctx.vhm_client_id < 0) { + pr_err("failed to create client of ioreq!\n"); + goto err; + } + + ret = acrn_ioreq_add_iorange(dev->_ctx.vhm_client_id, + dev->io_range_type ? REQ_MMIO : REQ_PORTIO, + dev->io_range_start, + dev->io_range_start + dev->io_range_len - 1); + if (ret < 0) { + pr_err("failed to add iorange to ioreq!\n"); + goto err; + } + + /* feed up max_cpu and req_buf */ + ret = vhm_get_vm_info(dev->_ctx.vmid, &info); + if (ret < 0) { + pr_err("failed in vhm_get_vm_info!\n"); + goto range_err; + } + dev->_ctx.max_vcpu = info.max_vcpu; + + dev->_ctx.req_buf = acrn_ioreq_get_reqbuf(dev->_ctx.vhm_client_id); + if (dev->_ctx.req_buf == NULL) { + pr_err("failed in ioreq_get_reqbuf!\n"); + goto range_err; + } + + acrn_ioreq_attach_client(dev->_ctx.vhm_client_id, 0); + + return 0; + +range_err: + acrn_ioreq_del_iorange(dev->_ctx.vhm_client_id, + dev->io_range_type ? REQ_MMIO : REQ_PORTIO, + dev->io_range_start, + dev->io_range_start + dev->io_range_len); + +err: + acrn_ioreq_destroy_client(dev->_ctx.vhm_client_id); + + return -EINVAL; +} + +long virtio_dev_deregister(struct virtio_dev_info *dev) +{ + if (dev->_ctx.vhm_client_id < 0) + return 0; + + acrn_ioreq_del_iorange(dev->_ctx.vhm_client_id, + dev->io_range_type ? REQ_MMIO : REQ_PORTIO, + dev->io_range_start, + dev->io_range_start + dev->io_range_len); + acrn_ioreq_destroy_client(dev->_ctx.vhm_client_id); + dev->_ctx.vhm_client_id = -1; + + return 0; +} + +int virtio_vq_index_get(struct virtio_dev_info *dev, unsigned long *ioreqs_map) +{ + int val = -1; + struct vhm_request *req; + int vcpu; + + if (dev == NULL) { + pr_err("%s: dev is NULL!\n", __func__); + return -EINVAL; + } + + while (1) { + vcpu = find_first_bit(ioreqs_map, dev->_ctx.max_vcpu); + if (vcpu == dev->_ctx.max_vcpu) + break; + req = &dev->_ctx.req_buf[vcpu]; + if (atomic_read(&req->processed) == REQ_STATE_PROCESSING && + req->client == dev->_ctx.vhm_client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) { + /* currently we handle kick only, + * so read will return 0 + */ + pr_debug("%s: read request!\n", __func__); + if (dev->io_range_type == PIO_RANGE) + req->reqs.pio_request.value = 0; + else + req->reqs.mmio_request.value = 0; + } else { + pr_debug("%s: write request! type %d\n", + __func__, req->type); + if (dev->io_range_type == PIO_RANGE) + val = req->reqs.pio_request.value; + else + val = req->reqs.mmio_request.value; + } + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + acrn_ioreq_complete_request(req->client, vcpu); + } + } + + return val; +} + +static long virtio_vqs_info_set(struct virtio_dev_info *dev, + struct vbs_vqs_info __user *i) +{ + struct vbs_vqs_info info; + struct virtio_vq_info *vq; + int j; + + vq = dev->vqs; + + if (copy_from_user(&info, i, sizeof(struct vbs_vqs_info))) + return -EFAULT; + + /* setup struct virtio_vq_info based on info in struct vbs_vq_info */ + if (dev->nvq && dev->nvq != info.nvq) { + pr_err("Oops! dev's nvq != vqs's nvq. Not the same device?\n"); + return -EFAULT; + } + + for (j = 0; j < info.nvq; j++) { + vq->qsize = info.vqs[j].qsize; + vq->pfn = info.vqs[j].pfn; + vq->msix_idx = info.vqs[j].msix_idx; + vq->msix_addr = info.vqs[j].msix_addr; + vq->msix_data = info.vqs[j].msix_data; + + pr_debug("msix id %x, addr %llx, data %x\n", vq->msix_idx, + vq->msix_addr, vq->msix_data); + + virtio_vq_init(vq, vq->pfn); + + vq++; + } + + return 0; +} + +/* invoked by VBS-K device's ioctl routine */ +long virtio_vqs_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp) +{ + long ret; + + /* + * Currently we don't conduct ownership checking, + * but assuming caller would have device mutex. + */ + + switch (ioctl) { + case VBS_SET_VQ: + ret = virtio_vqs_info_set(dev, argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} +EXPORT_SYMBOL_GPL(virtio_vqs_ioctl); + +static long virtio_dev_info_set(struct virtio_dev_info *dev, + struct vbs_dev_info __user *i) +{ + struct vbs_dev_info info; + + if (copy_from_user(&info, i, sizeof(struct vbs_dev_info))) + return -EFAULT; + + /* setup struct virtio_dev_info based on info in vbs_dev_info */ + strncpy(dev->name, info.name, VBS_NAME_LEN); + dev->_ctx.vmid = info.vmid; + dev->nvq = info.nvq; + dev->negotiated_features = info.negotiated_features; + dev->io_range_start = info.pio_range_start; + dev->io_range_len = info.pio_range_len; + dev->io_range_type = PIO_RANGE; + + return 0; +} + +/* invoked by VBS-K device's ioctl routine */ +long virtio_dev_ioctl(struct virtio_dev_info *dev, unsigned int ioctl, + void __user *argp) +{ + long ret; + + /* + * Currently we don't conduct ownership checking, + * but assuming caller would have device mutex. + */ + + switch (ioctl) { + case VBS_SET_DEV: + ret = virtio_dev_info_set(dev, argp); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + return ret; +} +EXPORT_SYMBOL_GPL(virtio_dev_ioctl); + +/* called in VBS-K device's .open() */ +long virtio_dev_init(struct virtio_dev_info *dev, + struct virtio_vq_info *vqs, int nvq) +{ + int i; + + for (i = 0; i < nvq; i++) + virtio_vq_reset(&vqs[i]); + + dev->_ctx.vhm_client_id = -1; + + return 0; +} +EXPORT_SYMBOL_GPL(virtio_dev_init); + +long virtio_dev_reset(struct virtio_dev_info *dev) +{ + int i; + + for (i = 0; i < dev->nvq; i++) + virtio_vq_reset(&dev->vqs[i]); + + memset(dev->name, 0, sizeof(dev->name)); + dev->_ctx.vmid = 0; + dev->nvq = 0; + dev->negotiated_features = 0; + dev->io_range_start = 0; + dev->io_range_len = 0; + dev->io_range_type = PIO_RANGE; + + return 0; +} +EXPORT_SYMBOL_GPL(virtio_dev_reset); + +static int __init vbs_init(void) +{ + return 0; +} + +static void __exit vbs_exit(void) +{ +} + +module_init(vbs_init); +module_exit(vbs_exit); + +MODULE_VERSION("0.1"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_DESCRIPTION("Virtio Backend Service framework for ACRN hypervisor"); diff --git a/drivers/vbs/vbs_rng.c b/drivers/vbs/vbs_rng.c new file mode 100644 index 000000000000..fd2bb27af66e --- /dev/null +++ b/drivers/vbs/vbs_rng.c @@ -0,0 +1,498 @@ +/* + * ACRN Project + * Virtio Backend Service (VBS) for ACRN hypervisor + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: Hao Li + * + * BSD LICENSE + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Hao Li + * VBS-K Reference Driver: virtio-rng + * - Each VBS-K driver exports a char device to /dev/, e.g. /dev/vbs_rng; + * - Each VBS-K driver uses Virtqueue APIs to interact with the virtio + * frontend driver in guest; + * - Each VBS-K driver registers itelf as VHM (Virtio and Hypervisor + * service Module) client, which enables in-kernel handling of register + * access of virtio device; + * - Each VBS-K driver could maintain the connections, from VBS-U, in a + * list/table, so that it could serve multiple guests. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +enum { + VBS_K_RNG_VQ = 0, + VBS_K_RNG_VQ_MAX = 1, +}; + +#define VTRND_RINGSZ 64 + +/* VBS-K features if any */ +/* + *enum { + * VBS_K_RNG_FEATURES = VBS_K_FEATURES | + * (1ULL << VIRTIO_F_VERSION_1), + *}; + */ + +/** + * struct vbs_rng - Backend of virtio-rng based on VBS-K + * + * @dev : instance of struct virtio_dev_info + * @vqs : instances of struct virtio_vq_info + * @hwrng : device specific member + * @node : hashtable maintaining multiple connections + * from multiple guests/devices + */ +struct vbs_rng { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[VBS_K_RNG_VQ_MAX]; + /* Below could be device specific members */ + struct hwrng hwrng; + /* + * Each VBS-K module might serve multiple connections + * from multiple guests/device models/VBS-Us, so better + * to maintain the connections in a list, and here we + * use hashtable as an example. + */ + struct hlist_node node; +}; + +#define RNG_MAX_HASH_BITS 4 /* MAX is 2^4 */ +#define HASH_NAME vbs_rng_hash + +DECLARE_HASHTABLE(HASH_NAME, RNG_MAX_HASH_BITS); +static int vbs_rng_hash_initialized = 0; +static int vbs_rng_connection_cnt = 0; + +/* function declarations */ +static int handle_kick(int client_id, unsigned long *ioreqs_map); +static long vbs_rng_reset(struct vbs_rng *rng); +static void vbs_rng_stop(struct vbs_rng *rng); +static void vbs_rng_flush(struct vbs_rng *rng); +#ifdef RUNTIME_CTRL +static int vbs_rng_enable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +static void vbs_rng_disable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +static void vbs_rng_stop_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq); +static void vbs_rng_flush_vq(struct vbs_rng *rng, int index); +#endif + +/* hash table related functions */ +static void vbs_rng_hash_init(void) +{ + if (vbs_rng_hash_initialized) + return; + + hash_init(HASH_NAME); + vbs_rng_hash_initialized = 1; +} + +static int vbs_rng_hash_add(struct vbs_rng *entry) +{ + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_add(HASH_NAME, &entry->node, virtio_dev_client_id(&entry->dev)); + return 0; +} + +static struct vbs_rng *vbs_rng_hash_find(int client_id) +{ + struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return NULL; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) + return entry; + + pr_err("Not found item matching client_id!\n"); + return NULL; +} + +static int vbs_rng_hash_del(int client_id) +{ + struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + if (virtio_dev_client_id(&entry->dev) == client_id) { + hash_del(&entry->node); + return 0; + } + + pr_err("%s failed, not found matching client_id!\n", + __func__); + return -1; +} + +static int vbs_rng_hash_del_all(void) +{ + struct vbs_rng *entry; + int bkt; + + if (!vbs_rng_hash_initialized) { + pr_err("RNG hash table not initialized!\n"); + return -1; + } + + hash_for_each(HASH_NAME, bkt, entry, node) + hash_del(&entry->node); + + return 0; +} + +static void handle_vq_kick(struct vbs_rng *rng, int vq_idx) +{ + struct iovec iov; + struct vbs_rng *sc; + struct virtio_vq_info *vq; + int len; + uint16_t idx; + + pr_debug("%s: vq_idx %d\n", __func__, vq_idx); + + sc = rng; + + if (!sc) { + pr_err("rng is NULL! Cannot proceed!\n"); + return; + } + + vq = &(sc->vqs[vq_idx]); + + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + /* device specific operations, for example: */ + /* len = read(sc->vrsc_fd, iov.iov_base, iov.iov_len); */ + pr_debug("iov base %p len %lx\n", iov.iov_base, iov.iov_len); + + /* let's generate some cool data... :-) */ + len = iov.iov_len; + + pr_debug("vtrnd: vtrnd_notify(): %d\r\n", len); + + /* + * Release this chain and handle more + */ + virtio_vq_relchain(vq, idx, len); + } + virtio_vq_endchains(vq, 1); /* Generate interrupt if appropriate. */ +} + +static int handle_kick(int client_id, unsigned long *ioreqs_map) +{ + int val = -1; + struct vbs_rng *rng; + + if (unlikely(bitmap_empty(ioreqs_map, VHM_REQUEST_MAX))) + return 0; + + pr_debug("%s: handle kick!\n", __func__); + + rng = vbs_rng_hash_find(client_id); + if (rng == NULL) { + pr_err("%s: client %d not found!\n", + __func__, client_id); + return -EINVAL; + } + + val = virtio_vq_index_get(&rng->dev, ioreqs_map); + + if (val >= 0) + handle_vq_kick(rng, val); + + return 0; +} + +static int vbs_rng_open(struct inode *inode, struct file *f) +{ + struct vbs_rng *rng; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + rng = kmalloc(sizeof(*rng), GFP_KERNEL); + if (rng == NULL) { + pr_err("Failed to allocate memory for vbs_rng!\n"); + return -ENOMEM; + } + + dev = &rng->dev; + strncpy(dev->name, "vbs_rng", VBS_NAME_LEN); + dev->dev_notify = handle_kick; + vqs = (struct virtio_vq_info *)&rng->vqs; + + for (i = 0; i < VBS_K_RNG_VQ_MAX; i++) { + vqs[i].dev = dev; + /* + * Currently relies on VHM to kick us, + * thus vq_notify not used + */ + vqs[i].vq_notify = NULL; + } + + /* link dev and vqs */ + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, VBS_K_RNG_VQ_MAX); + + f->private_data = rng; + + /* init a hash table to maintain multi-connections */ + vbs_rng_hash_init(); + + return 0; +} + +static int vbs_rng_release(struct inode *inode, struct file *f) +{ + struct vbs_rng *rng = f->private_data; + + if (!rng) + pr_err("%s: UNLIKELY rng NULL!\n", + __func__); + + vbs_rng_stop(rng); + vbs_rng_flush(rng); + + /* device specific release */ + vbs_rng_reset(rng); + + pr_debug("vbs_rng_connection cnt is %d\n", + vbs_rng_connection_cnt); + + if (rng && vbs_rng_connection_cnt--) + vbs_rng_hash_del(virtio_dev_client_id(&rng->dev)); + if (!vbs_rng_connection_cnt) { + pr_debug("vbs_rng remove all hash entries\n"); + vbs_rng_hash_del_all(); + } + + kfree(rng); + + pr_debug("%s done\n", __func__); + return 0; +} + +static long vbs_rng_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct vbs_rng *rng = f->private_data; + void __user *argp = (void __user *)arg; + /*u64 __user *featurep = argp;*/ + /*u64 features;*/ + int r; + + switch (ioctl) { +/* + * case VHOST_GET_FEATURES: + * features = VHOST_NET_FEATURES; + * if (copy_to_user(featurep, &features, sizeof features)) + * return -EFAULT; + * return 0; + * case VHOST_SET_FEATURES: + * if (copy_from_user(&features, featurep, sizeof features)) + * return -EFAULT; + * if (features & ~VHOST_NET_FEATURES) + * return -EOPNOTSUPP; + * return vhost_net_set_features(n, features); + */ + case VBS_SET_VQ: + /* + * we handle this here because we want to register VHM client + * after handling VBS_K_SET_VQ request + */ + pr_debug("VBS_K_SET_VQ ioctl:\n"); + r = virtio_vqs_ioctl(&rng->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) { + pr_err("VBS_K_SET_VQ: virtio_vqs_ioctl failed!\n"); + return -EFAULT; + } + /* Register VHM client */ + if (virtio_dev_register(&rng->dev) < 0) { + pr_err("failed to register VHM client!\n"); + return -EFAULT; + } + /* Added to local hash table */ + if (vbs_rng_hash_add(rng) < 0) { + pr_err("failed to add to hashtable!\n"); + return -EFAULT; + } + /* Increment counter */ + vbs_rng_connection_cnt++; + return r; + case VBS_RESET_DEV: + pr_debug("VBS_RESET_DEV ioctl:\n"); + vbs_rng_stop(rng); + vbs_rng_flush(rng); + r = vbs_rng_reset(rng); + return r; + default: + /*mutex_lock(&n->dev.mutex);*/ + pr_debug("VBS_K generic ioctls!\n"); + r = virtio_dev_ioctl(&rng->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&rng->dev, ioctl, argp); + else + vbs_rng_flush(rng); + /*mutex_unlock(&n->dev.mutex);*/ + return r; + } +} + +/* device specific function to cleanup itself */ +static long vbs_rng_reset(struct vbs_rng *rng) +{ + return virtio_dev_reset(&rng->dev); +} + +/* device specific function */ +static void vbs_rng_stop(struct vbs_rng *rng) +{ + virtio_dev_deregister(&rng->dev); +} + +/* device specific function */ +static void vbs_rng_flush(struct vbs_rng *rng) +{ +} + +#ifdef RUNTIME_CTRL +/* device specific function */ +static int vbs_rng_enable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +{ + return 0; +} + +/* device specific function */ +static void vbs_rng_disable_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void vbs_rng_stop_vq(struct vbs_rng *rng, + struct virtio_vq_info *vq) +{ +} + +/* device specific function */ +static void vbs_rng_flush_vq(struct vbs_rng *rng, int index) +{ +} + +static struct hwrng get_hwrng(struct vbs_rng *rng) +{ + return rng->hwrng; +} + +/* Set feature bits in kernel side device */ +static int vbs_rng_set_features(struct vbs_rng *rng, u64 features) +{ + return 0; +} +#endif + +static const struct file_operations vbs_rng_fops = { + .owner = THIS_MODULE, + .release = vbs_rng_release, + .unlocked_ioctl = vbs_rng_ioctl, + .open = vbs_rng_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_rng_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_rng", + .fops = &vbs_rng_fops, +}; + +static int vbs_rng_init(void) +{ + return misc_register(&vbs_rng_misc); +} +module_init(vbs_rng_init); + +static void vbs_rng_exit(void) +{ + misc_deregister(&vbs_rng_misc); +} +module_exit(vbs_rng_exit); + +MODULE_VERSION("0.1"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_DESCRIPTION("Virtio Backend Service reference driver on ACRN hypervisor"); diff --git a/drivers/vbs/vq.c b/drivers/vbs/vq.c new file mode 100644 index 000000000000..9f7a829c7a67 --- /dev/null +++ b/drivers/vbs/vq.c @@ -0,0 +1,395 @@ +/* + * ACRN Project + * Virtio Backend Service (VBS) for ACRN hypervisor + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: Hao Li + * + * BSD LICENSE + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Chris Torek + * Hao Li + * Created Virtqueue APIs for ACRN VBS framework: + * - VBS-K is a kernel-level virtio framework that can be used for + * virtio backend driver development for ACRN hypervisor. + * - Virtqueue APIs abstract away the details of the internal data + * structures of virtqueue, so that callers could easily access + * the data from guest through virtqueues. + */ + +#include +#include +#include +#include + +/* helper function for remote memory map */ +void * paddr_guest2host(struct ctx *ctx, uintptr_t gaddr, size_t len) +{ + return map_guest_phys(ctx->vmid, gaddr, len); +} + +/* + * helper function for vq_getchain(): + * record the i'th "real" descriptor. + */ +static inline void _vq_record(int i, volatile struct virtio_desc *vd, + struct ctx *ctx, struct iovec *iov, + int n_iov, uint16_t *flags) +{ + if (i >= n_iov) + return; + + iov[i].iov_base = paddr_guest2host(ctx, vd->addr, vd->len); + iov[i].iov_len = vd->len; + + if (flags != NULL) + flags[i] = vd->flags; +} + +/* + * Walk descriptor table and put requests into iovec. + * + * Examine the chain of descriptors starting at the "next one" to + * make sure that they describe a sensible request. If so, return + * the number of "real" descriptors that would be needed/used in + * acting on this request. This may be smaller than the number of + * available descriptors, e.g., if there are two available but + * they are two separate requests, this just returns 1. Or, it + * may be larger: if there are indirect descriptors involved, + * there may only be one descriptor available but it may be an + * indirect pointing to eight more. We return 8 in this case, + * i.e., we do not count the indirect descriptors, only the "real" + * ones. + * + * Basically, this vets the vd_flags and vd_next field of each + * descriptor and tells you how many are involved. Since some may + * be indirect, this also needs the vmctx (in the pci_vdev + * at vc->vc_pi) so that it can find indirect descriptors. + * + * As we process each descriptor, we copy and adjust it (guest to + * host address wise, also using the vmtctx) into the given iov[] + * array (of the given size). If the array overflows, we stop + * placing values into the array but keep processing descriptors, + * up to VQ_MAX_DESCRIPTORS, before giving up and returning -1. + * So you, the caller, must not assume that iov[] is as big as the + * return value (you can process the same thing twice to allocate + * a larger iov array if needed, or supply a zero length to find + * out how much space is needed). + * + * If you want to verify the WRITE flag on each descriptor, pass a + * non-NULL "flags" pointer to an array of "uint16_t" of the same size + * as n_iov and we'll copy each vd_flags field after unwinding any + * indirects. + * + * If some descriptor(s) are invalid, this prints a diagnostic message + * and returns -1. If no descriptors are ready now it simply returns 0. + * + * You are assumed to have done a vq_ring_ready() if needed (note + * that vq_has_descs() does one). + */ +int virtio_vq_getchain(struct virtio_vq_info *vq, uint16_t *pidx, + struct iovec *iov, int n_iov, uint16_t *flags) +{ + int i; + unsigned int ndesc, n_indir; + unsigned int idx, next; + struct ctx *ctx; + struct virtio_dev_info *dev; + const char *name; + + volatile struct virtio_desc *vdir, *vindir, *vp; + + dev = vq->dev; + name = dev->name; + + /* + * Note: it's the responsibility of the guest not to + * update vq->vq_avail->va_idx until all of the descriptors + * the guest has written are valid (including all their + * vd_next fields and vd_flags). + * + * Compute (last_avail - va_idx) in integers mod 2**16. This is + * the number of descriptors the device has made available + * since the last time we updated vq->vq_last_avail. + * + * We just need to do the subtraction as an unsigned int, + * then trim off excess bits. + */ + idx = vq->last_avail; + ndesc = (uint16_t)((unsigned int)vq->avail->idx - idx); + + if (ndesc == 0) + return 0; + + if (ndesc > vq->qsize) { + /* XXX need better way to diagnose issues */ + pr_err("%s: ndesc (%u) out of range, driver confused?\r\n", + name, (unsigned int)ndesc); + return -1; + } + + /* + * Now count/parse "involved" descriptors starting from + * the head of the chain. + * + * To prevent loops, we could be more complicated and + * check whether we're re-visiting a previously visited + * index, but we just abort if the count gets excessive. + */ + ctx = &dev->_ctx; + *pidx = next = vq->avail->ring[idx & (vq->qsize - 1)]; + vq->last_avail++; + for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->next) { + if (next >= vq->qsize) { + pr_err("%s: descriptor index %u out of range, " + "driver confused?\r\n", name, next); + return -1; + } + vdir = &vq->desc[next]; + if ((vdir->flags & VRING_DESC_F_INDIRECT) == 0) { + _vq_record(i, vdir, ctx, iov, n_iov, flags); + i++; + } else if ((dev->negotiated_features & + VIRTIO_RING_F_INDIRECT_DESC) == 0) { + pr_err("%s: descriptor has forbidden INDIRECT flag, " + "driver confused?\r\n", name); + return -1; + } else { + n_indir = vdir->len / 16; + if ((vdir->len & 0xf) || n_indir == 0) { + pr_err("%s: invalid indir len 0x%x, " + "driver confused?\r\n", name, + (unsigned int)vdir->len); + return -1; + } + vindir = paddr_guest2host(ctx, vdir->addr, vdir->len); + /* + * Indirects start at the 0th, then follow + * their own embedded "next"s until those run + * out. Each one's indirect flag must be off + * (we don't really have to check, could just + * ignore errors...). + */ + next = 0; + for (;;) { + vp = &vindir[next]; + if (vp->flags & VRING_DESC_F_INDIRECT) { + pr_err("%s: indirect desc has INDIR flag," + " driver confused?\r\n", name); + return -1; + } + _vq_record(i, vp, ctx, iov, n_iov, flags); + if (++i > VQ_MAX_DESCRIPTORS) + goto loopy; + if ((vp->flags & VRING_DESC_F_NEXT) == 0) + break; + next = vp->next; + if (next >= n_indir) { + pr_err("%s: invalid next %u > %u, " + "driver confused?\r\n", + name, (unsigned int)next, n_indir); + return -1; + } + } + } + if ((vdir->flags & VRING_DESC_F_NEXT) == 0) + return i; + } +loopy: + pr_err("%s: descriptor loop? count > %d - driver confused?\r\n", + name, i); + return -1; +} + +/* + * Return the currently-first request chain back to the available queue. + * + * (This chain is the one you handled when you called vq_getchain() + * and used its positive return value.) + */ +void virtio_vq_retchain(struct virtio_vq_info *vq) +{ + vq->last_avail--; +} + +/* + * Return specified request chain to the guest, setting its I/O length + * to the provided value. + * + * (This chain is the one you handled when you called vq_getchain() + * and used its positive return value.) + */ +void virtio_vq_relchain(struct virtio_vq_info *vq, uint16_t idx, + uint32_t iolen) +{ + uint16_t uidx, mask; + volatile struct vring_used *vuh; + volatile struct virtio_used *vue; + + /* + * Notes: + * - mask is N-1 where N is a power of 2 so computes x % N + * - vuh points to the "used" data shared with guest + * - vue points to the "used" ring entry we want to update + * - head is the same value we compute in vq_iovecs(). + * + * (I apologize for the two fields named vu_idx; the + * virtio spec calls the one that vue points to, "id"...) + */ + mask = vq->qsize - 1; + vuh = vq->used; + + uidx = vuh->idx; + vue = &vuh->ring[uidx++ & mask]; + vue->idx = idx; + vue->len = iolen; + vuh->idx = uidx; +} + +/* + * Driver has finished processing "available" chains and calling + * vq_relchain on each one. If driver used all the available + * chains, used_all should be set. + * + * If the "used" index moved we may need to inform the guest, i.e., + * deliver an interrupt. Even if the used index did NOT move we + * may need to deliver an interrupt, if the avail ring is empty and + * we are supposed to interrupt on empty. + * + * Note that used_all_avail is provided by the caller because it's + * a snapshot of the ring state when he decided to finish interrupt + * processing -- it's possible that descriptors became available after + * that point. (It's also typically a constant 1/True as well.) + */ +void virtio_vq_endchains(struct virtio_vq_info *vq, int used_all_avail) +{ + struct virtio_dev_info *dev; + uint16_t event_idx, new_idx, old_idx; + int intr; + + /* + * Interrupt generation: if we're using EVENT_IDX, + * interrupt if we've crossed the event threshold. + * Otherwise interrupt is generated if we added "used" entries, + * but suppressed by VRING_AVAIL_F_NO_INTERRUPT. + * + * In any case, though, if NOTIFY_ON_EMPTY is set and the + * entire avail was processed, we need to interrupt always. + */ + dev = vq->dev; + old_idx = vq->save_used; + vq->save_used = new_idx = vq->used->idx; + if (used_all_avail && + (dev->negotiated_features & VIRTIO_F_NOTIFY_ON_EMPTY)) + intr = 1; + else if (dev->negotiated_features & VIRTIO_RING_F_EVENT_IDX) { + event_idx = VQ_USED_EVENT_IDX(vq); + /* + * This calculation is per docs and the kernel + * (see src/sys/dev/virtio/virtio_ring.h). + */ + intr = (uint16_t)(new_idx - event_idx - 1) < + (uint16_t)(new_idx - old_idx); + } else { + intr = new_idx != old_idx && + !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT); + } + if (intr) + virtio_vq_interrupt(dev, vq); +} + +/* + * Initialize the currently-selected virtqueue. + * The guest just gave us a page frame number, from which we can + * calculate the addresses of the queue. + */ +void virtio_vq_init(struct virtio_vq_info *vq, uint32_t pfn) +{ + uint64_t phys; + size_t size; + char *base; + struct ctx *ctx; + + ctx = &vq->dev->_ctx; + + phys = (uint64_t)pfn << VRING_PAGE_BITS; + size = virtio_vq_ring_size(vq->qsize); + base = paddr_guest2host(ctx, phys, size); + + /* First page(s) are descriptors... */ + vq->desc = (struct virtio_desc *)base; + base += vq->qsize * sizeof(struct virtio_desc); + + /* ... immediately followed by "avail" ring (entirely uint16_t's) */ + vq->avail = (struct vring_avail *)base; + base += (2 + vq->qsize + 1) * sizeof(uint16_t); + + /* Then it's rounded up to the next page... */ + base = (char *)roundup2((uintptr_t)base, VRING_ALIGN); + + /* ... and the last page(s) are the used ring. */ + vq->used = (struct vring_used *)base; + + /* Mark queue as allocated, and start at 0 when we use it. */ + vq->flags = VQ_ALLOC; + vq->last_avail = 0; + vq->save_used = 0; +} + +/* reset one virtqueue, make it invalid */ +void virtio_vq_reset(struct virtio_vq_info *vq) +{ + if (!vq) { + pr_info("%s: vq is NULL!\n", __func__); + return; + } + + vq->pfn = 0; + vq->msix_idx = VIRTIO_MSI_NO_VECTOR; + vq->flags = 0; + vq->last_avail = 0; + vq->save_used = 0; +} diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index 126991046eb7..0212f0ee8aea 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -66,34 +66,6 @@ uuid_le mdev_uuid(struct mdev_device *mdev) } EXPORT_SYMBOL(mdev_uuid); -static int _find_mdev_device(struct device *dev, void *data) -{ - struct mdev_device *mdev; - - if (!dev_is_mdev(dev)) - return 0; - - mdev = to_mdev_device(dev); - - if (uuid_le_cmp(mdev->uuid, *(uuid_le *)data) == 0) - return 1; - - return 0; -} - -static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid) -{ - struct device *dev; - - dev = device_find_child(parent->dev, &uuid, _find_mdev_device); - if (dev) { - put_device(dev); - return true; - } - - return false; -} - /* Should be called holding parent_list_lock */ static struct mdev_parent *__find_parent_device(struct device *dev) { @@ -221,7 +193,6 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) } kref_init(&parent->ref); - mutex_init(&parent->lock); parent->dev = dev; parent->ops = ops; @@ -297,6 +268,10 @@ static void mdev_device_release(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); + mutex_lock(&mdev_list_lock); + list_del(&mdev->next); + mutex_unlock(&mdev_list_lock); + dev_dbg(&mdev->dev, "MDEV: destroying\n"); kfree(mdev); } @@ -304,7 +279,7 @@ static void mdev_device_release(struct device *dev) int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) { int ret; - struct mdev_device *mdev; + struct mdev_device *mdev, *tmp; struct mdev_parent *parent; struct mdev_type *type = to_mdev_type(kobj); @@ -312,21 +287,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) if (!parent) return -EINVAL; - mutex_lock(&parent->lock); + mutex_lock(&mdev_list_lock); /* Check for duplicate */ - if (mdev_device_exist(parent, uuid)) { - ret = -EEXIST; - goto create_err; + list_for_each_entry(tmp, &mdev_list, next) { + if (!uuid_le_cmp(tmp->uuid, uuid)) { + mutex_unlock(&mdev_list_lock); + ret = -EEXIST; + goto mdev_fail; + } } mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) { + mutex_unlock(&mdev_list_lock); ret = -ENOMEM; - goto create_err; + goto mdev_fail; } memcpy(&mdev->uuid, &uuid, sizeof(uuid_le)); + list_add(&mdev->next, &mdev_list); + mutex_unlock(&mdev_list_lock); + mdev->parent = parent; kref_init(&mdev->ref); @@ -338,35 +320,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) ret = device_register(&mdev->dev); if (ret) { put_device(&mdev->dev); - goto create_err; + goto mdev_fail; } ret = mdev_device_create_ops(kobj, mdev); if (ret) - goto create_failed; + goto create_fail; ret = mdev_create_sysfs_files(&mdev->dev, type); if (ret) { mdev_device_remove_ops(mdev, true); - goto create_failed; + goto create_fail; } mdev->type_kobj = kobj; + mdev->active = true; dev_dbg(&mdev->dev, "MDEV: created\n"); - mutex_unlock(&parent->lock); - - mutex_lock(&mdev_list_lock); - list_add(&mdev->next, &mdev_list); - mutex_unlock(&mdev_list_lock); - - return ret; + return 0; -create_failed: +create_fail: device_unregister(&mdev->dev); - -create_err: - mutex_unlock(&parent->lock); +mdev_fail: mdev_put_parent(parent); return ret; } @@ -377,44 +352,39 @@ int mdev_device_remove(struct device *dev, bool force_remove) struct mdev_parent *parent; struct mdev_type *type; int ret; - bool found = false; mdev = to_mdev_device(dev); mutex_lock(&mdev_list_lock); list_for_each_entry(tmp, &mdev_list, next) { - if (tmp == mdev) { - found = true; + if (tmp == mdev) break; - } } - if (found) - list_del(&mdev->next); + if (tmp != mdev) { + mutex_unlock(&mdev_list_lock); + return -ENODEV; + } - mutex_unlock(&mdev_list_lock); + if (!mdev->active) { + mutex_unlock(&mdev_list_lock); + return -EAGAIN; + } - if (!found) - return -ENODEV; + mdev->active = false; + mutex_unlock(&mdev_list_lock); type = to_mdev_type(mdev->type_kobj); parent = mdev->parent; - mutex_lock(&parent->lock); ret = mdev_device_remove_ops(mdev, force_remove); if (ret) { - mutex_unlock(&parent->lock); - - mutex_lock(&mdev_list_lock); - list_add(&mdev->next, &mdev_list); - mutex_unlock(&mdev_list_lock); - + mdev->active = true; return ret; } mdev_remove_sysfs_files(dev, type); device_unregister(dev); - mutex_unlock(&parent->lock); mdev_put_parent(parent); return 0; diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index a9cefd70a705..b5819b7d7ef7 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -20,7 +20,6 @@ struct mdev_parent { struct device *dev; const struct mdev_parent_ops *ops; struct kref ref; - struct mutex lock; struct list_head next; struct kset *mdev_types_kset; struct list_head type_list; @@ -34,6 +33,7 @@ struct mdev_device { struct kref ref; struct list_head next; struct kobject *type_kobj; + bool active; }; #define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index f041b1a6cf66..695b9d1a1aae 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -746,6 +747,9 @@ static long vfio_pci_ioctl(void *device_data, if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) return -EINVAL; + info.index = array_index_nospec(info.index, + VFIO_PCI_NUM_REGIONS + + vdev->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 5628fe114347..115a36f6f403 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -808,6 +808,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, { __le16 *ctrl = (__le16 *)(vdev->vconfig + pos - offset + PCI_EXP_DEVCTL); + int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ; count = vfio_default_config_write(vdev, pos, count, perm, offset, val); if (count < 0) @@ -833,6 +834,27 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pci_try_reset_function(vdev->pdev); } + /* + * MPS is virtualized to the user, writes do not change the physical + * register since determining a proper MPS value requires a system wide + * device view. The MRRS is largely independent of MPS, but since the + * user does not have that system-wide view, they might set a safe, but + * inefficiently low value. Here we allow writes through to hardware, + * but we set the floor to the physical device MPS setting, so that + * we can at least use full TLPs, as defined by the MPS value. + * + * NB, if any devices actually depend on an artificially low MRRS + * setting, this will need to be revisited, perhaps with a quirk + * though pcie_set_readrq(). + */ + if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) { + readrq = 128 << + ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12); + readrq = max(readrq, pcie_get_mps(vdev->pdev)); + + pcie_set_readrq(vdev->pdev, readrq); + } + return count; } @@ -849,11 +871,14 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm) /* * Allow writes to device control fields, except devctl_phantom, - * which could confuse IOMMU, and the ARI bit in devctl2, which - * is set at probe time. FLR gets virtualized via our writefn. + * which could confuse IOMMU, MPS, which can break communication + * with other physical devices, and the ARI bit in devctl2, which + * is set at probe time. FLR and MRRS get virtualized via our + * writefn. */ p_setw(perm, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM); + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD | + PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM); p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI); return 0; } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 4c27f4be3c3d..aa9e792110e3 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -681,18 +681,23 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev, group = vfio_iommu_group_get(dev); if (!group) { pr_err("VFIO: No IOMMU group for device %s\n", vdev->name); - return -EINVAL; + ret = -EINVAL; + goto put_reset; } ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev); - if (ret) { - vfio_iommu_group_put(group, dev); - return ret; - } + if (ret) + goto put_iommu; mutex_init(&vdev->igate); return 0; + +put_iommu: + vfio_iommu_group_put(group, dev); +put_reset: + vfio_platform_put_reset(vdev); + return ret; } EXPORT_SYMBOL_GPL(vfio_platform_probe_common); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 63112c36ab2d..b4c68f3b82be 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container, } static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, - unsigned long tce, unsigned long size, + unsigned long tce, unsigned long shift, unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) { long ret = 0; struct mm_iommu_table_group_mem_t *mem; - mem = mm_iommu_lookup(container->mm, tce, size); + mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); if (!mem) return -EINVAL; - ret = mm_iommu_ua_to_hpa(mem, tce, phpa); + ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); if (ret) return -EINVAL; @@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container, if (!pua) return; - ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift, &hpa, &mem); if (ret) pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", @@ -609,7 +609,7 @@ static long tce_iommu_build_v2(struct tce_container *container, entry + i); ret = tce_iommu_prereg_ua_to_hpa(container, - tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); + tce, tbl->it_page_shift, &hpa, &mem); if (ret) break; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 92155cce926d..50eeb74ddc0a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -83,6 +83,7 @@ struct vfio_dma { size_t size; /* Map size (bytes) */ int prot; /* IOMMU_READ/WRITE */ bool iommu_mapped; + bool lock_cap; /* capable(CAP_IPC_LOCK) */ struct task_struct *task; struct rb_root pfn_list; /* Ex-user pinned pfn list */ }; @@ -246,29 +247,25 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) return ret; } -static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) +static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) { struct mm_struct *mm; - bool is_current; int ret; if (!npage) return 0; - is_current = (task->mm == current->mm); - - mm = is_current ? task->mm : get_task_mm(task); + mm = async ? get_task_mm(dma->task) : dma->task->mm; if (!mm) return -ESRCH; /* process exited */ ret = down_write_killable(&mm->mmap_sem); if (!ret) { if (npage > 0) { - if (lock_cap ? !*lock_cap : - !has_capability(task, CAP_IPC_LOCK)) { + if (!dma->lock_cap) { unsigned long limit; - limit = task_rlimit(task, + limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (mm->locked_vm + npage > limit) @@ -282,7 +279,7 @@ static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) up_write(&mm->mmap_sem); } - if (!is_current) + if (async) mmput(mm); return ret; @@ -338,22 +335,32 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, { struct page *page[1]; struct vm_area_struct *vma; + struct vm_area_struct *vmas[1]; + unsigned int flags = 0; int ret; + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + down_read(&mm->mmap_sem); if (mm == current->mm) { - ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), - page); + ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); } else { - unsigned int flags = 0; - - if (prot & IOMMU_WRITE) - flags |= FOLL_WRITE; - - down_read(&mm->mmap_sem); ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, - NULL, NULL); - up_read(&mm->mmap_sem); + vmas, NULL); + /* + * The lifetime of a vaddr_get_pfn() page pin is + * userspace-controlled. In the fs-dax case this could + * lead to indefinite stalls in filesystem operations. + * Disallow attempts to pin fs-dax pages via this + * interface. + */ + if (ret > 0 && vma_is_fsdax(vmas[0])) { + ret = -EOPNOTSUPP; + put_page(page[0]); + } } + up_read(&mm->mmap_sem); if (ret == 1) { *pfn = page_to_pfn(page[0]); @@ -381,7 +388,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, */ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, long npage, unsigned long *pfn_base, - bool lock_cap, unsigned long limit) + unsigned long limit) { unsigned long pfn = 0; long ret, pinned = 0, lock_acct = 0; @@ -404,7 +411,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, * pages are already counted against the user. */ if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!lock_cap && current->mm->locked_vm + 1 > limit) { + if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { put_pfn(*pfn_base, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); @@ -430,7 +437,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, } if (!rsvd && !vfio_find_vpfn(dma, iova)) { - if (!lock_cap && + if (!dma->lock_cap && current->mm->locked_vm + lock_acct + 1 > limit) { put_pfn(pfn, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", @@ -443,7 +450,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, } out: - ret = vfio_lock_acct(current, lock_acct, &lock_cap); + ret = vfio_lock_acct(dma, lock_acct, false); unpin_out: if (ret) { @@ -474,7 +481,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, } if (do_accounting) - vfio_lock_acct(dma->task, locked - unlocked, NULL); + vfio_lock_acct(dma, locked - unlocked, true); return unlocked; } @@ -491,7 +498,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { - ret = vfio_lock_acct(dma->task, 1, NULL); + ret = vfio_lock_acct(dma, 1, true); if (ret) { put_pfn(*pfn_base, dma->prot); if (ret == -ENOMEM) @@ -518,7 +525,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); if (do_accounting) - vfio_lock_acct(dma->task, -unlocked, NULL); + vfio_lock_acct(dma, -unlocked, true); return unlocked; } @@ -713,7 +720,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, dma->iommu_mapped = false; if (do_accounting) { - vfio_lock_acct(dma->task, -unlocked, NULL); + vfio_lock_acct(dma, -unlocked, true); return 0; } return unlocked; @@ -925,14 +932,12 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, size_t size = map_size; long npage; unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - bool lock_cap = capable(CAP_IPC_LOCK); int ret = 0; while (size) { /* Pin a contiguous chunk of memory */ npage = vfio_pin_pages_remote(dma, vaddr + dma->size, - size >> PAGE_SHIFT, &pfn, - lock_cap, limit); + size >> PAGE_SHIFT, &pfn, limit); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1007,8 +1012,36 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; - get_task_struct(current); - dma->task = current; + + /* + * We need to be able to both add to a task's locked memory and test + * against the locked memory limit and we need to be able to do both + * outside of this call path as pinning can be asynchronous via the + * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a + * task_struct and VM locked pages requires an mm_struct, however + * holding an indefinite mm reference is not recommended, therefore we + * only hold a reference to a task. We could hold a reference to + * current, however QEMU uses this call path through vCPU threads, + * which can be killed resulting in a NULL mm and failure in the unmap + * path when called via a different thread. Avoid this problem by + * using the group_leader as threads within the same group require + * both CLONE_THREAD and CLONE_VM and will therefore use the same + * mm_struct. + * + * Previously we also used the task for testing CAP_IPC_LOCK at the + * time of pinning and accounting, however has_capability() makes use + * of real_cred, a copy-on-write field, so we can't guarantee that it + * matches group_leader, or in fact that it might not change by the + * time it's evaluated. If a process were to call MAP_DMA with + * CAP_IPC_LOCK but later drop it, it doesn't make sense that they + * possibly see different results for an iommu_mapped vfio_dma vs + * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the + * time of calling MAP_DMA. + */ + get_task_struct(current->group_leader); + dma->task = current->group_leader; + dma->lock_cap = capable(CAP_IPC_LOCK); + dma->pfn_list = RB_ROOT; /* Insert zero-sized and grow as we map chunks of it */ @@ -1043,7 +1076,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *d; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - bool lock_cap = capable(CAP_IPC_LOCK); int ret; /* Arbitrarily pick the first domain in the list for lookups */ @@ -1090,8 +1122,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, npage = vfio_pin_pages_remote(dma, vaddr, n >> PAGE_SHIFT, - &pfn, lock_cap, - limit); + &pfn, limit); if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; @@ -1368,7 +1399,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) if (!is_invalid_reserved_pfn(vpfn->pfn)) locked++; } - vfio_lock_acct(dma->task, locked - unlocked, NULL); + vfio_lock_acct(dma, locked - unlocked, true); } } diff --git a/drivers/vhm/Kconfig b/drivers/vhm/Kconfig new file mode 100644 index 000000000000..4ddb1314709a --- /dev/null +++ b/drivers/vhm/Kconfig @@ -0,0 +1,18 @@ +config ACRN_VHM + bool "Intel ACRN Hypervisor Virtio and Hypervisor service Module (VHM)" + depends on ACRN + depends on DMA_CMA + depends on PCI_MSI + depends on HUGETLBFS + depends on !VMAP_STACK + default n + ---help--- + This is the Virtio and Hypervisor service Module (VHM) for + Intel ACRN hypervisor. + + It is required for Service OS. + User OS doesn't need to have this config. + + Say Y for SOS and say N for UOS. + + If unsure, say N. diff --git a/drivers/vhm/Makefile b/drivers/vhm/Makefile new file mode 100644 index 000000000000..23f17ae24f78 --- /dev/null +++ b/drivers/vhm/Makefile @@ -0,0 +1 @@ +obj-y += vhm_mm.o vhm_hugetlb.o vhm_ioreq.o vhm_vm_mngt.o vhm_msi.o vhm_hypercall.o diff --git a/drivers/vhm/vhm_hugetlb.c b/drivers/vhm/vhm_hugetlb.c new file mode 100644 index 000000000000..34ebbd90acea --- /dev/null +++ b/drivers/vhm/vhm_hugetlb.c @@ -0,0 +1,303 @@ +/* + * virtio and hyperviosr service module (VHM): hugetlb + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Jason Chen CJ + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define HUGEPAGE_2M_SHIFT 21 +#define HUGEPAGE_1G_SHIFT 30 + +#define HUGEPAGE_1G_HLIST_IDX (HUGEPAGE_HLIST_ARRAY_SIZE - 1) + +struct hugepage_map { + struct hlist_node hlist; + u64 vm0_gpa; + size_t size; + u64 guest_gpa; +}; + +static inline struct hlist_head *hlist_2m_hash(struct vhm_vm *vm, + unsigned long guest_gpa) +{ + return &vm->hugepage_hlist[guest_gpa >> HUGEPAGE_2M_SHIFT & + (HUGEPAGE_2M_HLIST_ARRAY_SIZE - 1)]; +} + +static int add_guest_map(struct vhm_vm *vm, unsigned long vm0_gpa, + unsigned long guest_gpa, unsigned long size) +{ + struct hugepage_map *map; + int max_gfn; + + map = kzalloc(sizeof(struct hugepage_map), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + map->vm0_gpa = vm0_gpa; + map->guest_gpa = guest_gpa; + map->size = size; + + INIT_HLIST_NODE(&map->hlist); + + max_gfn = (map->guest_gpa + map->size) >> PAGE_SHIFT; + if (vm->max_gfn < max_gfn) + vm->max_gfn = max_gfn; + + pr_info("VHM: add hugepage with size=0x%lx, vm0_gpa=0x%llx," + " and its guest gpa = 0x%llx, vm max_gfn 0x%x\n", + map->size, map->vm0_gpa, map->guest_gpa, vm->max_gfn); + + mutex_lock(&vm->hugepage_lock); + /* 1G hugepage? */ + if (map->size == (1UL << HUGEPAGE_1G_SHIFT)) + hlist_add_head(&map->hlist, + &vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX]); + else + hlist_add_head(&map->hlist, + hlist_2m_hash(vm, map->guest_gpa)); + mutex_unlock(&vm->hugepage_lock); + + return 0; +} + +int hugepage_map_guest(struct vhm_vm *vm, struct vm_memmap *memmap) +{ + struct page *page = NULL, *regions_buf_pg = NULL; + unsigned long len, guest_gpa, vma; + struct vm_memory_region *region_array; + struct set_regions regions; + int max_size = PAGE_SIZE/sizeof(struct vm_memory_region); + int ret; + + if (vm == NULL || memmap == NULL) + return -EINVAL; + + len = memmap->len; + vma = memmap->vma_base; + guest_gpa = memmap->gpa; + + /* prepare set_memory_regions info */ + regions_buf_pg = alloc_page(GFP_KERNEL); + if (regions_buf_pg == NULL) + return -ENOMEM; + regions.mr_num = 0; + regions.vmid = vm->vmid; + regions.regions_gpa = page_to_phys(regions_buf_pg); + region_array = page_to_virt(regions_buf_pg); + + while (len > 0) { + unsigned long vm0_gpa, pagesize; + + ret = get_user_pages_fast(vma, 1, 1, &page); + if (unlikely(ret != 1) || (page == NULL)) { + pr_err("failed to pin huge page!\n"); + ret = -ENOMEM; + goto err; + } + + vm0_gpa = page_to_phys(page); + pagesize = PAGE_SIZE << compound_order(page); + + ret = add_guest_map(vm, vm0_gpa, guest_gpa, pagesize); + if (ret < 0) { + pr_err("failed to add memseg for huge page!\n"); + goto err; + } + + /* fill each memory region into region_array */ + region_array[regions.mr_num].type = MR_ADD; + region_array[regions.mr_num].gpa = guest_gpa; + region_array[regions.mr_num].vm0_gpa = vm0_gpa; + region_array[regions.mr_num].size = pagesize; + region_array[regions.mr_num].prot = + (MEM_TYPE_WB & MEM_TYPE_MASK) | + (memmap->prot & MEM_ACCESS_RIGHT_MASK); + regions.mr_num++; + if (regions.mr_num == max_size) { + pr_info("region buffer full, set & renew regions!\n"); + ret = set_memory_regions(®ions); + if (ret < 0) { + pr_err("failed to set regions,ret=%d!\n", ret); + goto err; + } + regions.mr_num = 0; + } + + len -= pagesize; + vma += pagesize; + guest_gpa += pagesize; + } + + ret = set_memory_regions(®ions); + if (ret < 0) { + pr_err("failed to set regions, ret=%d!\n", ret); + goto err; + } + + __free_page(regions_buf_pg); + + return 0; +err: + if (regions_buf_pg) + __free_page(regions_buf_pg); + if (page) + put_page(page); + return ret; +} + +void hugepage_free_guest(struct vhm_vm *vm) +{ + struct hlist_node *htmp; + struct hugepage_map *map; + int i; + + mutex_lock(&vm->hugepage_lock); + for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) { + if (!hlist_empty(&vm->hugepage_hlist[i])) { + hlist_for_each_entry_safe(map, htmp, + &vm->hugepage_hlist[i], hlist) { + hlist_del(&map->hlist); + /* put_page to unpin huge page */ + put_page(pfn_to_page( + map->vm0_gpa >> PAGE_SHIFT)); + kfree(map); + } + } + } + mutex_unlock(&vm->hugepage_lock); +} + +void *hugepage_map_guest_phys(struct vhm_vm *vm, u64 guest_phys, size_t size) +{ + struct hlist_node *htmp; + struct hugepage_map *map; + + mutex_lock(&vm->hugepage_lock); + /* check 1G hlist first */ + if (!hlist_empty(&vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX])) { + hlist_for_each_entry_safe(map, htmp, + &vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX], hlist) { + if (map->guest_gpa >= guest_phys + size || + guest_phys >= map->guest_gpa + map->size) + continue; + + if (guest_phys + size > map->guest_gpa + map->size || + guest_phys < map->guest_gpa) + goto err; + + mutex_unlock(&vm->hugepage_lock); + return phys_to_virt(map->vm0_gpa + + guest_phys - map->guest_gpa); + } + } + + /* check 2m hlist */ + hlist_for_each_entry_safe(map, htmp, + hlist_2m_hash(vm, guest_phys), hlist) { + if (map->guest_gpa >= guest_phys + size || + guest_phys >= map->guest_gpa + map->size) + continue; + + if (guest_phys + size > map->guest_gpa + map->size || + guest_phys < map->guest_gpa) + goto err; + + mutex_unlock(&vm->hugepage_lock); + return phys_to_virt(map->vm0_gpa + + guest_phys - map->guest_gpa); + } + +err: + mutex_unlock(&vm->hugepage_lock); + printk(KERN_WARNING "cannot find correct mem map, please check the " + "input's range or alignment"); + return NULL; +} + +int hugepage_unmap_guest_phys(struct vhm_vm *vm, u64 guest_phys) +{ + struct hlist_node *htmp; + struct hugepage_map *map; + + mutex_lock(&vm->hugepage_lock); + /* check 1G hlist first */ + if (!hlist_empty(&vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX])) { + hlist_for_each_entry_safe(map, htmp, + &vm->hugepage_hlist[HUGEPAGE_1G_HLIST_IDX], hlist) { + if (map->guest_gpa <= guest_phys && + guest_phys < map->guest_gpa + map->size) { + mutex_unlock(&vm->hugepage_lock); + return 0; + } + } + } + /* check 2m hlist */ + hlist_for_each_entry_safe(map, htmp, + hlist_2m_hash(vm, guest_phys), hlist) { + if (map->guest_gpa <= guest_phys && + guest_phys < map->guest_gpa + map->size) { + mutex_unlock(&vm->hugepage_lock); + return 0; + } + } + mutex_unlock(&vm->hugepage_lock); + return -ESRCH; +} diff --git a/drivers/vhm/vhm_hypercall.c b/drivers/vhm/vhm_hypercall.c new file mode 100644 index 000000000000..9a92d6888b90 --- /dev/null +++ b/drivers/vhm/vhm_hypercall.c @@ -0,0 +1,174 @@ +/* + * virtio and hyperviosr service module (VHM): hypercall wrap + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include +#include +#include + +inline long hcall_sos_offline_cpu(unsigned long cpu) +{ + return acrn_hypercall1(HC_SOS_OFFLINE_CPU, cpu); +} + +inline long hcall_get_api_version(unsigned long api_version) +{ + return acrn_hypercall1(HC_GET_API_VERSION, api_version); +} + +inline long hcall_create_vm(unsigned long vminfo) +{ + return acrn_hypercall1(HC_CREATE_VM, vminfo); +} + +inline long hcall_start_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_START_VM, vmid); +} + +inline long hcall_pause_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_PAUSE_VM, vmid); +} + +inline long hcall_reset_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_RESET_VM, vmid); +} + +inline long hcall_destroy_vm(unsigned long vmid) +{ + return acrn_hypercall1(HC_DESTROY_VM, vmid); +} + +inline long hcall_setup_sbuf(unsigned long sbuf_head) +{ + return acrn_hypercall1(HC_SETUP_SBUF, sbuf_head); +} + +inline long hcall_set_sstate_data(unsigned long sx_data_addr) +{ + return acrn_hypercall1(HC_PM_SET_SSTATE_DATA, sx_data_addr); +} + +inline long hcall_get_cpu_state(unsigned long cmd, unsigned long state_pa) +{ + return acrn_hypercall2(HC_PM_GET_CPU_STATE, cmd, state_pa); +} + +inline long hcall_set_memory_regions(unsigned long pa_regions) +{ + return acrn_hypercall1(HC_VM_SET_MEMORY_REGIONS, pa_regions); +} + +inline long hcall_write_protect_page(unsigned long vmid, unsigned long wp) +{ + return acrn_hypercall2(HC_VM_WRITE_PROTECT_PAGE, vmid, wp); +} + +inline long hcall_set_ioreq_buffer(unsigned long vmid, unsigned long buffer) +{ + return acrn_hypercall2(HC_SET_IOREQ_BUFFER, vmid, buffer); +} + +inline long hcall_notify_req_finish(unsigned long vmid, unsigned long vcpu) +{ + return acrn_hypercall2(HC_NOTIFY_REQUEST_FINISH, vmid, vcpu); +} + +inline long hcall_assert_irqline(unsigned long vmid, unsigned long irq) +{ + return acrn_hypercall2(HC_ASSERT_IRQLINE, vmid, irq); +} + +inline long hcall_deassert_irqline(unsigned long vmid, unsigned long irq) +{ + return acrn_hypercall2(HC_DEASSERT_IRQLINE, vmid, irq); +} + +inline long hcall_pulse_irqline(unsigned long vmid, unsigned long irq) +{ + return acrn_hypercall2(HC_PULSE_IRQLINE, vmid, irq); +} + +inline long hcall_inject_msi(unsigned long vmid, unsigned long msi) +{ + return acrn_hypercall2(HC_INJECT_MSI, vmid, msi); +} + +inline long hcall_assign_ptdev(unsigned long vmid, unsigned long bdf) +{ + return acrn_hypercall2(HC_ASSIGN_PTDEV, vmid, bdf); +} + +inline long hcall_deassign_ptdev(unsigned long vmid, unsigned long bdf) +{ + return acrn_hypercall2(HC_DEASSIGN_PTDEV, vmid, bdf); +} + +inline long hcall_set_ptdev_intr_info(unsigned long vmid, unsigned long pt_irq) +{ + return acrn_hypercall2(HC_SET_PTDEV_INTR_INFO, vmid, pt_irq); +} + +inline long hcall_reset_ptdev_intr_info(unsigned long vmid, + unsigned long pt_irq) +{ + return acrn_hypercall2(HC_RESET_PTDEV_INTR_INFO, vmid, pt_irq); +} + +inline long hcall_remap_pci_msix(unsigned long vmid, unsigned long msi) +{ + return acrn_hypercall2(HC_VM_PCI_MSIX_REMAP, vmid, msi); +} + +inline long hcall_vm_gpa2hpa(unsigned long vmid, unsigned long addr) +{ + return acrn_hypercall2(HC_VM_GPA2HPA, vmid, addr); +} diff --git a/drivers/vhm/vhm_ioreq.c b/drivers/vhm/vhm_ioreq.c new file mode 100644 index 000000000000..960723b1778d --- /dev/null +++ b/drivers/vhm/vhm_ioreq.c @@ -0,0 +1,927 @@ +/* + * virtio and hyperviosr service module (VHM): ioreq multi client feature + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ioreq_range { + struct list_head list; + uint32_t type; + long start; + long end; +}; + +struct ioreq_client { + /* client name */ + char name[16]; + /* client id */ + int id; + /* vm this client belongs to */ + unsigned long vmid; + /* list node for this ioreq_client */ + struct list_head list; + /* + * is this client fallback? + * there is only one fallback client in a vm - dm + * a fallback client shares IOReq buffer pages + * a fallback client handles all left IOReq not handled by other clients + * a fallback client does not need add io ranges + * a fallback client handles ioreq in its own context + */ + bool fallback; + + volatile bool destroying; + volatile bool kthread_exit; + + /* client covered io ranges - N/A for fallback client */ + struct list_head range_list; + spinlock_t range_lock; + + /* + * this req records the req number this client need handle + */ + DECLARE_BITMAP(ioreqs_map, VHM_REQUEST_MAX); + + /* + * client ioreq handler: + * if client provides a handler, it means vhm need create a kthread + * to call the handler while there is ioreq. + * if client doesn't provide a handler, client should handle ioreq + * in its own context when calls acrn_ioreq_attach_client. + * + * NOTE: for fallback client, there is no ioreq handler. + */ + ioreq_handler_t handler; + bool vhm_create_kthread; + struct task_struct *thread; + wait_queue_head_t wq; + + /* pci bdf trap */ + bool trap_bdf; + int pci_bus; + int pci_dev; + int pci_func; +}; + +#define MAX_CLIENT 64 +static struct ioreq_client *clients[MAX_CLIENT]; +static DECLARE_BITMAP(client_bitmap, MAX_CLIENT); + +static void acrn_ioreq_notify_client(struct ioreq_client *client); + +static inline bool is_range_type(uint32_t type) +{ + return (type == REQ_MMIO || type == REQ_PORTIO || type == REQ_WP); +} + +static int alloc_client(void) +{ + struct ioreq_client *client; + int i; + + i = find_first_zero_bit(client_bitmap, MAX_CLIENT); + if (i >= MAX_CLIENT) + return -ENOMEM; + set_bit(i, client_bitmap); + + client = kzalloc(sizeof(struct ioreq_client), GFP_KERNEL); + if (!client) + return -ENOMEM; + client->id = i; + clients[i] = client; + + return i; +} + +static void free_client(int i) +{ + if (i < MAX_CLIENT && i >= 0) { + if (test_and_clear_bit(i, client_bitmap)) { + kfree(clients[i]); + clients[i] = NULL; + } + } +} + +int acrn_ioreq_create_client(unsigned long vmid, ioreq_handler_t handler, + char *name) +{ + struct vhm_vm *vm; + struct ioreq_client *client; + unsigned long flags; + int client_id; + + might_sleep(); + + vm = find_get_vm(vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + vmid); + return -EINVAL; + } + if (unlikely(vm->req_buf == NULL)) { + pr_err("vhm-ioreq: vm[%ld]'s reqbuf is not ready\n", + vmid); + put_vm(vm); + return -EINVAL; + } + + client_id = alloc_client(); + if (unlikely(client_id < 0)) { + pr_err("vhm-ioreq: vm[%ld] failed to alloc ioreq " + "client id\n", vmid); + put_vm(vm); + return -EINVAL; + } + + client = clients[client_id]; + + if (handler) { + client->handler = handler; + client->vhm_create_kthread = true; + } + + client->vmid = vmid; + if (name) + strncpy(client->name, name, sizeof(client->name) - 1); + spin_lock_init(&client->range_lock); + INIT_LIST_HEAD(&client->range_list); + init_waitqueue_head(&client->wq); + + spin_lock_irqsave(&vm->ioreq_client_lock, flags); + list_add(&client->list, &vm->ioreq_client_list); + spin_unlock_irqrestore(&vm->ioreq_client_lock, flags); + + put_vm(vm); + + pr_info("vhm-ioreq: created ioreq client %d\n", client_id); + + return client_id; +} + +int acrn_ioreq_create_fallback_client(unsigned long vmid, char *name) +{ + struct vhm_vm *vm; + int client_id; + + vm = find_get_vm(vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + vmid); + return -EINVAL; + } + + if (unlikely(vm->ioreq_fallback_client > 0)) { + pr_err("vhm-ioreq: there is already fallback " + "client exist for vm %ld\n", + vmid); + put_vm(vm); + return -EINVAL; + } + + client_id = acrn_ioreq_create_client(vmid, NULL, name); + if (unlikely(client_id < 0)) { + put_vm(vm); + return -EINVAL; + } + + clients[client_id]->fallback = true; + vm->ioreq_fallback_client = client_id; + + put_vm(vm); + + return client_id; +} + +static void acrn_ioreq_destroy_client_pervm(struct ioreq_client *client, + struct vhm_vm *vm) +{ + struct list_head *pos, *tmp; + unsigned long flags; + + client->destroying = true; + acrn_ioreq_notify_client(client); + + /* the client thread will mark kthread_exit flag as true before exit, + * so wait for it exited. + */ + while (!client->kthread_exit) + msleep(10); + + spin_lock_irqsave(&client->range_lock, flags); + list_for_each_safe(pos, tmp, &client->range_list) { + struct ioreq_range *range = + container_of(pos, struct ioreq_range, list); + list_del(&range->list); + kfree(range); + } + spin_unlock_irqrestore(&client->range_lock, flags); + + spin_lock_irqsave(&vm->ioreq_client_lock, flags); + list_del(&client->list); + spin_unlock_irqrestore(&vm->ioreq_client_lock, flags); + free_client(client->id); + + if (client->id == vm->ioreq_fallback_client) + vm->ioreq_fallback_client = -1; +} + +void acrn_ioreq_destroy_client(int client_id) +{ + struct vhm_vm *vm; + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + + might_sleep(); + + vm = find_get_vm(client->vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + client->vmid); + return; + } + + acrn_ioreq_destroy_client_pervm(client, vm); + + put_vm(vm); +} + +static void __attribute__((unused)) dump_iorange(struct ioreq_client *client) +{ + struct list_head *pos; + unsigned long flags; + + spin_lock_irqsave(&client->range_lock, flags); + list_for_each(pos, &client->range_list) { + struct ioreq_range *range = + container_of(pos, struct ioreq_range, list); + pr_debug("\tio range: type %d, start 0x%lx, " + "end 0x%lx\n", range->type, range->start, range->end); + } + spin_unlock_irqrestore(&client->range_lock, flags); +} + +/* + * NOTE: here just add iorange entry directly, no check for the overlap.. + * please client take care of it + */ +int acrn_ioreq_add_iorange(int client_id, uint32_t type, + long start, long end) +{ + struct ioreq_client *client; + struct ioreq_range *range; + unsigned long flags; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + + if (end < start) { + pr_err("vhm-ioreq: end < start\n"); + return -EFAULT; + } + + might_sleep(); + + range = kzalloc(sizeof(struct ioreq_range), GFP_KERNEL); + if (!range) { + pr_err("vhm-ioreq: failed to alloc ioreq range\n"); + return -ENOMEM; + } + range->type = type; + range->start = start; + range->end = end; + + spin_lock_irqsave(&client->range_lock, flags); + list_add(&range->list, &client->range_list); + spin_unlock_irqrestore(&client->range_lock, flags); + + return 0; +} + +int acrn_ioreq_del_iorange(int client_id, uint32_t type, + long start, long end) +{ + struct ioreq_client *client; + struct ioreq_range *range; + struct list_head *pos, *tmp; + unsigned long flags; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + + if (end < start) { + pr_err("vhm-ioreq: end < start\n"); + return -EFAULT; + } + + might_sleep(); + + spin_lock_irqsave(&client->range_lock, flags); + list_for_each_safe(pos, tmp, &client->range_list) { + range = container_of(pos, struct ioreq_range, list); + if (range->type == type) { + if (is_range_type(type)) { + if (start == range->start && + end == range->end) { + list_del(&range->list); + kfree(range); + break; + } + } else { + list_del(&range->list); + kfree(range); + break; + } + } + } + spin_unlock_irqrestore(&client->range_lock, flags); + + return 0; +} + +static inline bool is_destroying(struct ioreq_client *client) +{ + if (client) + return client->destroying; + else + return true; +} + +static inline bool has_pending_request(struct ioreq_client *client) +{ + if (client) + return !bitmap_empty(client->ioreqs_map, VHM_REQUEST_MAX); + else + return false; +} + +struct vhm_request *acrn_ioreq_get_reqbuf(int client_id) +{ + struct ioreq_client *client; + struct vhm_vm *vm; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return NULL; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return NULL; + } + vm = find_get_vm(client->vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm-ioreq: failed to find vm from vmid %ld\n", + client->vmid); + return NULL; + } + + if (vm->req_buf == NULL) { + pr_warn("vhm-ioreq: the req buf page not ready yet " + "for vmid %ld\n", client->vmid); + } + put_vm(vm); + return (struct vhm_request *)vm->req_buf; +} + +static int ioreq_client_thread(void *data) +{ + struct ioreq_client *client; + int ret, client_id = (unsigned long)data; + + while (1) { + client = clients[client_id]; + if (is_destroying(client)) { + pr_info("vhm-ioreq: client destroying->stop thread\n"); + break; + } + if (has_pending_request(client)) { + if (client->handler) { + ret = client->handler(client->id, + client->ioreqs_map); + if (ret < 0) + BUG(); + } else { + pr_err("vhm-ioreq: no ioreq handler\n"); + break; + } + } else + wait_event_freezable(client->wq, + (has_pending_request(client) || + is_destroying(client))); + } + + /* the client thread such as for hyper-dma will exit from here, + * so mark kthread_exit as true before exit */ + client->kthread_exit = true; + + return 0; +} + +int acrn_ioreq_attach_client(int client_id, bool check_kthread_stop) +{ + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EFAULT; + } + + if (client->vhm_create_kthread) { + if (client->thread) { + pr_warn("vhm-ioreq: kthread already exist" + " for client %s\n", client->name); + return 0; + } + client->thread = kthread_run(ioreq_client_thread, + (void *)(unsigned long)client_id, + "ioreq_client[%ld]:%s", + client->vmid, client->name); + if (IS_ERR(client->thread)) { + pr_err("vhm-ioreq: failed to run kthread " + "for client %s\n", client->name); + return -ENOMEM; + } + } else { + might_sleep(); + + if (check_kthread_stop) { + wait_event_freezable(client->wq, + (kthread_should_stop() || + has_pending_request(client) || + is_destroying(client))); + if (kthread_should_stop()) + client->kthread_exit = true; + } else { + wait_event_freezable(client->wq, + (has_pending_request(client) || + is_destroying(client))); + } + + if (is_destroying(client)) { + /* the client thread for vcpu will exit from here, + * so mark kthread_exit as true before exit */ + client->kthread_exit = true; + return 1; + } + } + + return 0; +} + +void acrn_ioreq_intercept_bdf(int client_id, int bus, int dev, int func) +{ + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client->trap_bdf = true; + client->pci_bus = bus; + client->pci_dev = dev; + client->pci_func = func; +} + +void acrn_ioreq_unintercept_bdf(int client_id) +{ + struct ioreq_client *client; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return; + } + client->trap_bdf = false; + client->pci_bus = -1; + client->pci_dev = -1; + client->pci_func = -1; +} + +static void acrn_ioreq_notify_client(struct ioreq_client *client) +{ + /* if client thread is in waitqueue, wake up it */ + if (waitqueue_active(&client->wq)) + wake_up_interruptible(&client->wq); +} + +static bool req_in_range(struct ioreq_range *range, struct vhm_request *req) +{ + bool ret = false; + + if (range->type == req->type) { + switch (req->type) { + case REQ_MMIO: + case REQ_WP: + { + if (req->reqs.mmio_request.address >= range->start && + (req->reqs.mmio_request.address + + req->reqs.mmio_request.size - 1) <= range->end) + ret = true; + break; + } + case REQ_PORTIO: { + if (req->reqs.pio_request.address >= range->start && + (req->reqs.pio_request.address + + req->reqs.pio_request.size - 1) <= range->end) + ret = true; + break; + } + + default: + ret = false; + break; + } + } + + return ret; +} + +static bool is_cfg_addr(struct vhm_request *req) +{ + return (req->type == REQ_PORTIO && + (req->reqs.pio_request.address >= 0xcf8 && + req->reqs.pio_request.address < 0xcf8+4)); +} + +static bool is_cfg_data(struct vhm_request *req) +{ + return (req->type == REQ_PORTIO && + (req->reqs.pio_request.address >= 0xcfc && + req->reqs.pio_request.address < 0xcfc+4)); +} + +static int cached_bus; +static int cached_dev; +static int cached_func; +static int cached_reg; +static int cached_enable; +#define PCI_REGMAX 255 /* highest supported config register addr.*/ +#define PCI_FUNCMAX 7 /* highest supported function number */ +#define PCI_SLOTMAX 31 /* highest supported slot number */ +#define PCI_BUSMAX 255 /* highest supported bus number */ +#define CONF1_ENABLE 0x80000000ul +static int handle_cf8cfc(struct vhm_vm *vm, struct vhm_request *req, int vcpu) +{ + int req_handled = 0; + + /*XXX: like DM, assume cfg address write is size 4 */ + if (is_cfg_addr(req)) { + if (req->reqs.pio_request.direction == REQUEST_WRITE) { + if (req->reqs.pio_request.size == 4) { + int value = req->reqs.pio_request.value; + + cached_bus = (value >> 16) & PCI_BUSMAX; + cached_dev = (value >> 11) & PCI_SLOTMAX; + cached_func = (value >> 8) & PCI_FUNCMAX; + cached_reg = value & PCI_REGMAX; + cached_enable = + (value & CONF1_ENABLE) == CONF1_ENABLE; + req_handled = 1; + } + } else { + if (req->reqs.pio_request.size == 4) { + req->reqs.pio_request.value = + (cached_bus << 16) | + (cached_dev << 11) | (cached_func << 8) + | cached_reg; + if (cached_enable) + req->reqs.pio_request.value |= + CONF1_ENABLE; + req_handled = 1; + } + } + } else if (is_cfg_data(req)) { + if (!cached_enable) { + if (req->reqs.pio_request.direction == REQUEST_READ) + req->reqs.pio_request.value = 0xffffffff; + req_handled = 1; + } else { + /* pci request is same as io request at top */ + int offset = req->reqs.pio_request.address - 0xcfc; + + req->type = REQ_PCICFG; + req->reqs.pci_request.bus = cached_bus; + req->reqs.pci_request.dev = cached_dev; + req->reqs.pci_request.func = cached_func; + req->reqs.pci_request.reg = cached_reg + offset; + } + } + + if (req_handled) { + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + if (hcall_notify_req_finish(vm->vmid, vcpu) < 0) { + pr_err("vhm-ioreq: failed to " + "notify request finished !\n"); + return -EFAULT; + } + } + + return req_handled; +} + +static bool bdf_match(struct ioreq_client *client) +{ + return (client->trap_bdf && + client->pci_bus == cached_bus && + client->pci_dev == cached_dev && + client->pci_func == cached_func); +} + +static struct ioreq_client *acrn_ioreq_find_client_by_request(struct vhm_vm *vm, + struct vhm_request *req) +{ + struct list_head *pos, *range_pos; + struct ioreq_client *client; + struct ioreq_client *target_client = NULL, *fallback_client = NULL; + struct ioreq_range *range; + bool found = false; + + spin_lock(&vm->ioreq_client_lock); + list_for_each(pos, &vm->ioreq_client_list) { + client = container_of(pos, struct ioreq_client, list); + + if (client->fallback) { + fallback_client = client; + continue; + } + + if (req->type == REQ_PCICFG) { + if (bdf_match(client)) { /* bdf match client */ + target_client = client; + break; + } else /* other or fallback client */ + continue; + } + + spin_lock(&client->range_lock); + list_for_each(range_pos, &client->range_list) { + range = + container_of(range_pos, struct ioreq_range, list); + if (req_in_range(range, req)) { + found = true; + target_client = client; + break; + } + } + spin_unlock(&client->range_lock); + + if (found) + break; + } + spin_unlock(&vm->ioreq_client_lock); + + if (target_client) + return target_client; + + if (fallback_client) + return fallback_client; + + return NULL; +} + +int acrn_ioreq_distribute_request(struct vhm_vm *vm) +{ + struct vhm_request *req; + struct list_head *pos; + struct ioreq_client *client; + int i, vcpu_num; + + vcpu_num = atomic_read(&vm->vcpu_num); + for (i = 0; i < vcpu_num; i++) { + req = vm->req_buf->req_queue + i; + + /* This function is called in tasklet only on SOS CPU0. Thus it + * is safe to read the state first and update it later as long + * as the update is atomic. */ + if (atomic_read(&req->processed) == REQ_STATE_PENDING) { + if (handle_cf8cfc(vm, req, i)) + continue; + client = acrn_ioreq_find_client_by_request(vm, req); + if (client == NULL) { + pr_err("vhm-ioreq: failed to " + "find ioreq client -> " + "BUG\n"); + BUG(); + } else { + req->client = client->id; + atomic_set(&req->processed, REQ_STATE_PROCESSING); + set_bit(i, client->ioreqs_map); + } + } + } + + spin_lock(&vm->ioreq_client_lock); + list_for_each(pos, &vm->ioreq_client_list) { + client = container_of(pos, struct ioreq_client, list); + if (has_pending_request(client)) + acrn_ioreq_notify_client(client); + } + spin_unlock(&vm->ioreq_client_lock); + + return 0; +} + +int acrn_ioreq_complete_request(int client_id, uint64_t vcpu) +{ + struct ioreq_client *client; + int ret; + + if (client_id < 0 || client_id >= MAX_CLIENT) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EINVAL; + } + client = clients[client_id]; + if (!client) { + pr_err("vhm-ioreq: no client for id %d\n", client_id); + return -EINVAL; + } + + clear_bit(vcpu, client->ioreqs_map); + ret = hcall_notify_req_finish(client->vmid, vcpu); + if (ret < 0) { + pr_err("vhm-ioreq: failed to notify request finished !\n"); + return -EFAULT; + } + + return 0; +} + +unsigned int vhm_dev_poll(struct file *filep, poll_table *wait) +{ + struct vhm_vm *vm = filep->private_data; + struct ioreq_client *fallback_client; + unsigned int ret = 0; + + if (vm == NULL || vm->req_buf == NULL || + vm->ioreq_fallback_client <= 0) { + pr_err("vhm: invalid VM !\n"); + ret = POLLERR; + return ret; + } + + fallback_client = clients[vm->ioreq_fallback_client]; + if (!fallback_client) { + pr_err("vhm-ioreq: no client for id %d\n", + vm->ioreq_fallback_client); + return -EINVAL; + } + + poll_wait(filep, &fallback_client->wq, wait); + if (has_pending_request(fallback_client) || + is_destroying(fallback_client)) + ret = POLLIN | POLLRDNORM; + + return ret; +} + +int acrn_ioreq_init(struct vhm_vm *vm, unsigned long vma) +{ + struct acrn_set_ioreq_buffer set_buffer; + struct page *page; + int ret; + + if (vm->req_buf) + return -EEXIST; + + ret = get_user_pages_fast(vma, 1, 1, &page); + if (unlikely(ret != 1) || (page == NULL)) { + pr_err("vhm-ioreq: failed to pin request buffer!\n"); + return -ENOMEM; + } + + vm->req_buf = page_address(page); + vm->pg = page; + + set_buffer.req_buf = page_to_phys(page); + + ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(&set_buffer)); + if (ret < 0) { + pr_err("vhm-ioreq: failed to set request buffer !\n"); + return -EFAULT; + } + + /* reserve 0, let client_id start from 1 */ + set_bit(0, client_bitmap); + + pr_info("vhm-ioreq: init request buffer @ %p!\n", + vm->req_buf); + + return 0; +} + +void acrn_ioreq_free(struct vhm_vm *vm) +{ + struct list_head *pos, *tmp; + + list_for_each_safe(pos, tmp, &vm->ioreq_client_list) { + struct ioreq_client *client = + container_of(pos, struct ioreq_client, list); + acrn_ioreq_destroy_client_pervm(client, vm); + } + + if (vm->req_buf && vm->pg) { + put_page(vm->pg); + vm->pg = NULL; + vm->req_buf = NULL; + } +} diff --git a/drivers/vhm/vhm_mm.c b/drivers/vhm/vhm_mm.c new file mode 100644 index 000000000000..4d5854d0c139 --- /dev/null +++ b/drivers/vhm/vhm_mm.c @@ -0,0 +1,274 @@ +/* + * virtio and hyperviosr service module (VHM): memory map + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Jason Zeng + * Jason Chen CJ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static u64 _alloc_memblk(struct device *dev, size_t len) +{ + unsigned int count; + struct page *page; + + if (!PAGE_ALIGNED(len)) { + pr_warn("alloc size of memblk must be page aligned\n"); + return 0ULL; + } + + count = PAGE_ALIGN(len) >> PAGE_SHIFT; + page = dma_alloc_from_contiguous(dev, count, 1, GFP_KERNEL); + if (page) + return page_to_phys(page); + else + return 0ULL; +} + +static bool _free_memblk(struct device *dev, u64 vm0_gpa, size_t len) +{ + unsigned int count = PAGE_ALIGN(len) >> PAGE_SHIFT; + struct page *page = pfn_to_page(vm0_gpa >> PAGE_SHIFT); + + return dma_release_from_contiguous(dev, page, count); +} + +static int set_memory_region(unsigned long vmid, + struct vm_memory_region *region) +{ + struct set_regions regions; + + regions.vmid = vmid; + regions.mr_num = 1; + regions.regions_gpa = virt_to_phys(region); + + if (set_memory_regions(®ions) < 0) { + pr_err("vhm: failed to set memory region for vm[%ld]!\n", vmid); + return -EFAULT; + } + + return 0; +} + +int add_memory_region(unsigned long vmid, unsigned long gpa, + unsigned long host_gpa, unsigned long size, + unsigned int mem_type, unsigned mem_access_right) +{ + struct vm_memory_region region; + + region.type = MR_ADD; + region.gpa = gpa; + region.vm0_gpa = host_gpa; + region.size = size; + region.prot = ((mem_type & MEM_TYPE_MASK) | + (mem_access_right & MEM_ACCESS_RIGHT_MASK)); + return set_memory_region(vmid, ®ion); +} + +int del_memory_region(unsigned long vmid, unsigned long gpa, + unsigned long size) +{ + struct vm_memory_region region; + + region.type = MR_DEL; + region.gpa = gpa; + region.vm0_gpa = 0; + region.size = size; + region.prot = 0; + + return set_memory_region(vmid, ®ion); +} + +int set_memory_regions(struct set_regions *regions) +{ + if (regions == NULL) + return -EINVAL; + if (regions->mr_num > 0) { + if (hcall_set_memory_regions(virt_to_phys(regions)) < 0) { + pr_err("vhm: failed to set memory regions!\n"); + return -EFAULT; + } + } + + return 0; +} + +/* + * when set is true, set page write protection, + * else clear page write protection. + */ +int write_protect_page(unsigned long vmid, + unsigned long gpa, unsigned char set) +{ + struct wp_data wp; + + wp.set = set; + wp.gpa = gpa; + + if (hcall_write_protect_page(vmid, + virt_to_phys(&wp)) < 0) { + pr_err("vhm: vm[%ld] %s failed !\n", vmid, __func__); + return -EFAULT; + } + + pr_debug("VHM: %s, gpa: 0x%lx, set: %d\n", __func__, gpa, set); + + return 0; +} + +int map_guest_memseg(struct vhm_vm *vm, struct vm_memmap *memmap) +{ + /* hugetlb use vma to do the mapping */ + if (memmap->type == VM_MEMMAP_SYSMEM && memmap->using_vma) + return hugepage_map_guest(vm, memmap); + + /* mmio */ + if (memmap->type != VM_MEMMAP_MMIO) { + pr_err("vhm: %s invalid memmap type: %d\n", + __func__, memmap->type); + return -EINVAL; + } + + if (add_memory_region(vm->vmid, memmap->gpa, + acrn_hpa2gpa(memmap->hpa), memmap->len, + MEM_TYPE_UC, memmap->prot) < 0){ + pr_err("vhm: failed to set memory region %ld!\n", vm->vmid); + return -EFAULT; + } + + return 0; +} + +void free_guest_mem(struct vhm_vm *vm) +{ + return hugepage_free_guest(vm); +} + +#define TRUSTY_MEM_GPA_BASE (511UL * 1024UL * 1024UL * 1024UL) +#define TRUSTY_MEM_SIZE (0x01000000) +int init_trusty(struct vhm_vm *vm) +{ + unsigned long host_gpa, guest_gpa = TRUSTY_MEM_GPA_BASE; + unsigned long len = TRUSTY_MEM_SIZE; + + host_gpa = _alloc_memblk(vm->dev, TRUSTY_MEM_SIZE); + if (host_gpa == 0ULL) + return -ENOMEM; + + vm->trusty_host_gpa = host_gpa; + + pr_info("VHM: set ept for trusty memory [host_gpa=0x%lx, " + "guest_gpa=0x%lx, len=0x%lx]", host_gpa, guest_gpa, len); + return add_memory_region(vm->vmid, guest_gpa, host_gpa, len, + MEM_TYPE_WB, MEM_ACCESS_RWX); +} + +void deinit_trusty(struct vhm_vm *vm) +{ + _free_memblk(vm->dev, vm->trusty_host_gpa, TRUSTY_MEM_SIZE); + vm->trusty_host_gpa = 0; +} + +void *map_guest_phys(unsigned long vmid, u64 guest_phys, size_t size) +{ + struct vhm_vm *vm; + void *ret; + + vm = find_get_vm(vmid); + if (vm == NULL) + return NULL; + + ret = hugepage_map_guest_phys(vm, guest_phys, size); + + put_vm(vm); + + return ret; +} +EXPORT_SYMBOL(map_guest_phys); + +int unmap_guest_phys(unsigned long vmid, u64 guest_phys) +{ + struct vhm_vm *vm; + int ret; + + vm = find_get_vm(vmid); + if (vm == NULL) { + pr_warn("vm_list corrupted\n"); + return -ESRCH; + } + + ret = hugepage_unmap_guest_phys(vm, guest_phys); + + put_vm(vm); + return ret; +} +EXPORT_SYMBOL(unmap_guest_phys); diff --git a/drivers/vhm/vhm_msi.c b/drivers/vhm/vhm_msi.c new file mode 100644 index 000000000000..73affd60fc46 --- /dev/null +++ b/drivers/vhm/vhm_msi.c @@ -0,0 +1,135 @@ +/* + * virtio and hyperviosr service module (VHM): msi paravirt + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ + +#include +#include +#include +#include + +#include "../pci/pci.h" + +static struct msi_msg acrn_notify_msix_remap(struct msi_desc *entry, + struct msi_msg *msg) +{ + volatile struct acrn_vm_pci_msix_remap notify; + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + struct msi_msg remapped_msg = *msg; + u16 msgctl; + int ret; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); + + notify.msi_ctl = msgctl; + notify.virt_bdf = (dev->bus->number << 8) | dev->devfn; + notify.msi_addr = msg->address_hi; + notify.msi_addr <<= 32; + notify.msi_addr |= msg->address_lo; + notify.msi_data = msg->data; + notify.msix = !!entry->msi_attrib.is_msix; + + if (notify.msix) + notify.msix_entry_index = entry->msi_attrib.entry_nr; + else + notify.msix_entry_index = 0; + + ret = hcall_remap_pci_msix(0, virt_to_phys(¬ify)); + if (ret < 0) + dev_err(&dev->dev, "Failed to notify MSI/x change to HV\n"); + else { + remapped_msg.address_hi = (unsigned int)(notify.msi_addr >> 32); + remapped_msg.address_lo = (unsigned int)notify.msi_addr; + remapped_msg.data = notify.msi_data; + } + return remapped_msg; +} + +void acrn_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + struct msi_msg fmsg; + + if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { + /* Don't touch the hardware now */ + } else if (entry->msi_attrib.is_msix) { + void __iomem *base = pci_msix_desc_addr(entry); + + fmsg = acrn_notify_msix_remap(entry, msg); + + writel(fmsg.address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(fmsg.address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(fmsg.data, base + PCI_MSIX_ENTRY_DATA); + } else { + int pos = dev->msi_cap; + u16 msgctl; + + fmsg = acrn_notify_msix_remap(entry, msg); + + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); + msgctl &= ~PCI_MSI_FLAGS_QSIZE; + msgctl |= entry->msi_attrib.multiple << 4; + pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); + + pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, + fmsg.address_lo); + if (entry->msi_attrib.is_64) { + pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, + fmsg.address_hi); + pci_write_config_word(dev, pos + PCI_MSI_DATA_64, + fmsg.data); + } else { + pci_write_config_word(dev, pos + PCI_MSI_DATA_32, + fmsg.data); + } + } + entry->msg = *msg; +} diff --git a/drivers/vhm/vhm_vm_mngt.c b/drivers/vhm/vhm_vm_mngt.c new file mode 100644 index 000000000000..8f1a00777dd4 --- /dev/null +++ b/drivers/vhm/vhm_vm_mngt.c @@ -0,0 +1,162 @@ +/* + * virtio and hyperviosr service module (VHM): vm management + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Liang Ding + * Jason Zeng + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +LIST_HEAD(vhm_vm_list); +DEFINE_MUTEX(vhm_vm_list_lock); + +struct vhm_vm *find_get_vm(unsigned long vmid) +{ + struct vhm_vm *vm; + + mutex_lock(&vhm_vm_list_lock); + list_for_each_entry(vm, &vhm_vm_list, list) { + if (vm->vmid == vmid) { + vm->refcnt++; + mutex_unlock(&vhm_vm_list_lock); + return vm; + } + } + mutex_unlock(&vhm_vm_list_lock); + return NULL; +} + +void put_vm(struct vhm_vm *vm) +{ + mutex_lock(&vhm_vm_list_lock); + vm->refcnt--; + if (vm->refcnt == 0) { + list_del(&vm->list); + free_guest_mem(vm); + acrn_ioreq_free(vm); + kfree(vm); + pr_info("vhm: freed vm\n"); + } + mutex_unlock(&vhm_vm_list_lock); +} + +int vhm_get_vm_info(unsigned long vmid, struct vm_info *info) +{ + struct vhm_vm *vm; + + vm = find_get_vm(vmid); + if (unlikely(vm == NULL)) { + pr_err("vhm: failed to find vm from vmid %ld\n", + vmid); + return -EINVAL; + } + /*TODO: hardcode max_vcpu here, should be fixed by getting at runtime */ + info->max_vcpu = 4; + info->max_gfn = vm->max_gfn; + put_vm(vm); + return 0; +} + +int vhm_inject_msi(unsigned long vmid, unsigned long msi_addr, + unsigned long msi_data) +{ + struct acrn_msi_entry msi; + int ret; + + /* msi_addr: addr[19:12] with dest vcpu id */ + /* msi_data: data[7:0] with vector */ + msi.msi_addr = msi_addr; + msi.msi_data = msi_data; + ret = hcall_inject_msi(vmid, virt_to_phys(&msi)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + return 0; +} + +unsigned long vhm_vm_gpa2hpa(unsigned long vmid, unsigned long gpa) +{ + struct vm_gpa2hpa gpa2hpa; + int ret; + + gpa2hpa.gpa = gpa; + gpa2hpa.hpa = -1UL; /* Init value as invalid gpa */ + ret = hcall_vm_gpa2hpa(vmid, virt_to_phys(&gpa2hpa)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + mb(); + return gpa2hpa.hpa; +} + +void vm_list_add(struct list_head *list) +{ + list_add(list, &vhm_vm_list); +} + +void vm_mutex_lock(struct mutex *mlock) +{ + mutex_lock(mlock); +} + +void vm_mutex_unlock(struct mutex *mlock) +{ + mutex_unlock(mlock); +} diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 58585ec8699e..6123b4dd8638 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -622,7 +622,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) if (!len && vq->busyloop_timeout) { /* Both tx vq and rx socket were polled here */ - mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, 1); vhost_disable_notify(&net->dev, vq); preempt_disable(); @@ -755,7 +755,7 @@ static void handle_rx(struct vhost_net *net) struct iov_iter fixup; __virtio16 num_buffers; - mutex_lock(&vq->mutex); + mutex_lock_nested(&vq->mutex, 0); sock = vq->private_data; if (!sock) goto out; @@ -782,16 +782,6 @@ static void handle_rx(struct vhost_net *net) /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) goto out; - if (nvq->rx_array) - msg.msg_control = vhost_net_buf_consume(&nvq->rxq); - /* On overrun, truncate and discard */ - if (unlikely(headcount > UIO_MAXIOV)) { - iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); - err = sock->ops->recvmsg(sock, &msg, - 1, MSG_DONTWAIT | MSG_TRUNC); - pr_debug("Discarded rx packet: len %zd\n", sock_len); - continue; - } /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { @@ -804,6 +794,16 @@ static void handle_rx(struct vhost_net *net) * they refilled. */ goto out; } + if (nvq->rx_array) + msg.msg_control = vhost_net_buf_consume(&nvq->rxq); + /* On overrun, truncate and discard */ + if (unlikely(headcount > UIO_MAXIOV)) { + iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); + err = sock->ops->recvmsg(sock, &msg, + 1, MSG_DONTWAIT | MSG_TRUNC); + pr_debug("Discarded rx packet: len %zd\n", sock_len); + continue; + } /* We don't need to be notified again. */ iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); fixup = msg.msg_iter; @@ -1186,7 +1186,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) if (ubufs) vhost_net_ubuf_put_wait_and_free(ubufs); err_ubufs: - sockfd_put(sock); + if (sock) + sockfd_put(sock); err_vq: mutex_unlock(&vq->mutex); err: @@ -1212,6 +1213,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) } vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); + vhost_dev_stop(&n->dev); vhost_dev_reset_owner(&n->dev, umem); vhost_net_vq_reset(n); done: diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..e47c5bc3ddca 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -688,6 +688,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; + struct scatterlist *p = sg; int i, ret; for (i = 0; i < iter->nr_segs; i++) { @@ -696,8 +697,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); if (ret < 0) { - for (i = 0; i < sg_count; i++) { - struct page *page = sg_page(&sg[i]); + while (p < sg) { + struct page *page = sg_page(p++); if (page) put_page(page); } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index d6dbb28245e6..3cf74f54c7a1 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -213,8 +213,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); if (mask & POLLERR) { - if (poll->wqh) - remove_wait_queue(poll->wqh, &poll->wait); + vhost_poll_stop(poll); ret = -EINVAL; } @@ -757,7 +756,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, struct iov_iter t; void __user *uaddr = vhost_vq_meta_fetch(vq, (u64)(uintptr_t)to, size, - VHOST_ADDR_DESC); + VHOST_ADDR_USED); if (uaddr) return __copy_to_user(uaddr, from, size); @@ -904,7 +903,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; for (i = 0; i < d->nvqs; ++i) - mutex_lock(&d->vqs[i]->mutex); + mutex_lock_nested(&d->vqs[i]->mutex, i); } static void vhost_dev_unlock_vqs(struct vhost_dev *d) @@ -994,6 +993,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, { int ret = 0; + mutex_lock(&dev->mutex); vhost_dev_lock_vqs(dev); switch (msg->type) { case VHOST_IOTLB_UPDATE: @@ -1025,6 +1025,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, } vhost_dev_unlock_vqs(dev); + mutex_unlock(&dev->mutex); + return ret; } ssize_t vhost_chr_write_iter(struct vhost_dev *dev, @@ -1253,14 +1255,14 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, /* Caller should have vq mutex and device mutex */ int vhost_vq_access_ok(struct vhost_virtqueue *vq) { - if (vq->iotlb) { - /* When device IOTLB was used, the access validation - * will be validated during prefetching. - */ + if (!vq_log_access_ok(vq, vq->log_base)) + return 0; + + /* Access validation occurs at prefetch time with IOTLB */ + if (vq->iotlb) return 1; - } - return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && - vq_log_access_ok(vq, vq->log_base); + + return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); @@ -1576,9 +1578,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) d->iotlb = niotlb; for (i = 0; i < d->nvqs; ++i) { - mutex_lock(&d->vqs[i]->mutex); - d->vqs[i]->iotlb = niotlb; - mutex_unlock(&d->vqs[i]->mutex); + struct vhost_virtqueue *vq = d->vqs[i]; + + mutex_lock(&vq->mutex); + vq->iotlb = niotlb; + __vhost_vq_meta_reset(vq); + mutex_unlock(&vq->mutex); } vhost_umem_clean(oiotlb); @@ -2380,6 +2385,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); if (!node) return NULL; + + /* Make sure all padding within the structure is initialized. */ + memset(&node->msg, 0, sizeof node->msg); node->vq = vq; node->msg.type = type; return node; diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c index 734a9158946b..e55304d5cf07 100644 --- a/drivers/video/backlight/as3711_bl.c +++ b/drivers/video/backlight/as3711_bl.c @@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev, static int as3711_backlight_parse_dt(struct device *dev) { struct as3711_bl_pdata *pdata = dev_get_platdata(dev); - struct device_node *bl = - of_find_node_by_name(dev->parent->of_node, "backlight"), *fb; + struct device_node *bl, *fb; int ret; + bl = of_get_child_by_name(dev->parent->of_node, "backlight"); if (!bl) { dev_dbg(dev, "backlight node not found\n"); return -ENODEV; @@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev) if (pdata->su1_max_uA <= 0) ret = -EINVAL; if (ret < 0) - return ret; + goto err_put_bl; } fb = of_parse_phandle(bl, "su2-dev", 0); @@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev) if (pdata->su2_max_uA <= 0) ret = -EINVAL; if (ret < 0) - return ret; + goto err_put_bl; if (of_find_property(bl, "su2-feedback-voltage", NULL)) { pdata->su2_feedback = AS3711_SU2_VOLTAGE; @@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev) pdata->su2_feedback = AS3711_SU2_CURR_AUTO; count++; } - if (count != 1) - return -EINVAL; + if (count != 1) { + ret = -EINVAL; + goto err_put_bl; + } count = 0; if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) { @@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev) pdata->su2_fbprot = AS3711_SU2_GPIO4; count++; } - if (count != 1) - return -EINVAL; + if (count != 1) { + ret = -EINVAL; + goto err_put_bl; + } count = 0; if (of_find_property(bl, "su2-auto-curr1", NULL)) { @@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev) * At least one su2-auto-curr* must be specified iff * AS3711_SU2_CURR_AUTO is used */ - if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) - return -EINVAL; + if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) { + ret = -EINVAL; + goto err_put_bl; + } } + of_node_put(bl); + return 0; + +err_put_bl: + of_node_put(bl); + + return ret; } static int as3711_backlight_probe(struct platform_device *pdev) diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index d7c239ea3d09..f5574060f9c8 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data) struct spi_message msg; struct spi_transfer xfer = { .len = 1, - .cs_change = 1, + .cs_change = 0, .tx_buf = lcd->buf, }; diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index 7b738d60ecc2..f3aa6088f1d9 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev) if (!pdata) return; - np = of_find_node_by_name(nproot, "backlight"); + np = of_get_child_by_name(nproot, "backlight"); if (!np) { dev_err(&pdev->dev, "failed to find backlight node\n"); return; @@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev) if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val)) pdata->dual_string = val; + of_node_put(np); + pdev->dev.platform_data = pdata; } diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 9bd17682655a..0fa7d2bd0e48 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb) static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) { unsigned int lth = pb->lth_brightness; - int duty_cycle; + u64 duty_cycle; if (pb->levels) duty_cycle = pb->levels[brightness]; else duty_cycle = brightness; - return (duty_cycle * (pb->period - lth) / pb->scale) + lth; + duty_cycle *= pb->period - lth; + do_div(duty_cycle, pb->scale); + + return duty_cycle + lth; } static int pwm_backlight_update_status(struct backlight_device *bl) @@ -298,14 +301,14 @@ static int pwm_backlight_probe(struct platform_device *pdev) /* * If the GPIO is not known to be already configured as output, that - * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL, - * change the direction to output and set the GPIO as active. + * is, if gpiod_get_direction returns either 1 or -EINVAL, change the + * direction to output and set the GPIO as active. * Do not force the GPIO to active when it was already output as it * could cause backlight flickering or we would enable the backlight too * early. Leave the decision of the initial backlight state for later. */ if (pb->enable_gpio && - gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT) + gpiod_get_direction(pb->enable_gpio) != 0) gpiod_direction_output(pb->enable_gpio, 1); pb->power_supply = devm_regulator_get(&pdev->dev, "power"); diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index eab1f842f9c0..e4bd63e9db6b 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c @@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi) spi_message_init(m); - x->cs_change = 1; + x->cs_change = 0; x->tx_buf = &lcd->buf[0]; spi_message_add_tail(x, m); diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 6a41ea92737a..4dc5ee8debeb 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) struct spi_message msg; struct spi_transfer xfer = { .len = 1, - .cs_change = 1, + .cs_change = 0, .tx_buf = buf, }; diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c index fd524ad860a5..f45d0c9467db 100644 --- a/drivers/video/backlight/tps65217_bl.c +++ b/drivers/video/backlight/tps65217_bl.c @@ -184,11 +184,11 @@ static struct tps65217_bl_pdata * tps65217_bl_parse_dt(struct platform_device *pdev) { struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); - struct device_node *node = of_node_get(tps->dev->of_node); + struct device_node *node; struct tps65217_bl_pdata *pdata, *err; u32 val; - node = of_find_node_by_name(node, "backlight"); + node = of_get_child_by_name(tps->dev->of_node, "backlight"); if (!node) return ERR_PTR(-ENODEV); diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c index 9269d5685239..f2eafe2ed980 100644 --- a/drivers/video/console/dummycon.c +++ b/drivers/video/console/dummycon.c @@ -41,12 +41,47 @@ static void dummycon_init(struct vc_data *vc, int init) vc_resize(vc, DUMMY_COLUMNS, DUMMY_ROWS); } -static int dummycon_dummy(void) +static void dummycon_deinit(struct vc_data *vc) { } +static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height, + int width) { } +static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } +static void dummycon_putcs(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos) { } +static void dummycon_cursor(struct vc_data *vc, int mode) { } + +static bool dummycon_scroll(struct vc_data *vc, unsigned int top, + unsigned int bottom, enum con_scroll dir, + unsigned int lines) +{ + return false; +} + +static int dummycon_switch(struct vc_data *vc) { - return 0; + return 0; } -#define DUMMY (void *)dummycon_dummy +static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch) +{ + return 0; +} + +static int dummycon_font_set(struct vc_data *vc, struct console_font *font, + unsigned int flags) +{ + return 0; +} + +static int dummycon_font_default(struct vc_data *vc, + struct console_font *font, char *name) +{ + return 0; +} + +static int dummycon_font_copy(struct vc_data *vc, int con) +{ + return 0; +} /* * The console `switch' structure for the dummy console @@ -55,20 +90,19 @@ static int dummycon_dummy(void) */ const struct consw dummy_con = { - .owner = THIS_MODULE, - .con_startup = dummycon_startup, - .con_init = dummycon_init, - .con_deinit = DUMMY, - .con_clear = DUMMY, - .con_putc = DUMMY, - .con_putcs = DUMMY, - .con_cursor = DUMMY, - .con_scroll = DUMMY, - .con_switch = DUMMY, - .con_blank = DUMMY, - .con_font_set = DUMMY, - .con_font_get = DUMMY, - .con_font_default = DUMMY, - .con_font_copy = DUMMY, + .owner = THIS_MODULE, + .con_startup = dummycon_startup, + .con_init = dummycon_init, + .con_deinit = dummycon_deinit, + .con_clear = dummycon_clear, + .con_putc = dummycon_putc, + .con_putcs = dummycon_putcs, + .con_cursor = dummycon_cursor, + .con_scroll = dummycon_scroll, + .con_switch = dummycon_switch, + .con_blank = dummycon_blank, + .con_font_set = dummycon_font_set, + .con_font_default = dummycon_font_default, + .con_font_copy = dummycon_font_copy, }; EXPORT_SYMBOL_GPL(dummy_con); diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 42d02a206059..7f2526b43b33 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -673,12 +673,12 @@ static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b, return true; } -static int newport_dummy(struct vc_data *c) +static int newport_set_origin(struct vc_data *vc) { return 0; } -#define DUMMY (void *) newport_dummy +static void newport_save_screen(struct vc_data *vc) { } const struct consw newport_con = { .owner = THIS_MODULE, @@ -694,8 +694,8 @@ const struct consw newport_con = { .con_blank = newport_blank, .con_font_set = newport_font_set, .con_font_default = newport_font_default, - .con_set_origin = DUMMY, - .con_save_screen = DUMMY + .con_set_origin = newport_set_origin, + .con_save_screen = newport_save_screen }; static int newport_probe(struct gio_device *dev, diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 445b1dc5d441..f09e17b60e45 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -422,7 +422,10 @@ static const char *vgacon_startup(void) vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = - { .name = "ega", .start = 0x3B0, .end = 0x3BF }; + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; @@ -430,9 +433,15 @@ static const char *vgacon_startup(void) &ega_console_resource); } else { static struct resource mda1_console_resource = - { .name = "mda", .start = 0x3B0, .end = 0x3BB }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BB }; static struct resource mda2_console_resource = - { .name = "mda", .start = 0x3BF, .end = 0x3BF }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3BF, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; @@ -454,15 +463,21 @@ static const char *vgacon_startup(void) vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { - static struct resource ega_console_resource - = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; + static struct resource ega_console_resource = + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { - static struct resource vga_console_resource - = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; + static struct resource vga_console_resource = + { .name = "vga+", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, @@ -494,7 +509,10 @@ static const char *vgacon_startup(void) } } else { static struct resource cga_console_resource = - { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; + { .name = "cga", + .flags = IORESOURCE_IO, + .start = 0x3D4, + .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; @@ -1254,7 +1272,8 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) return 0; } -static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned flags) +static int vgacon_font_set(struct vc_data *c, struct console_font *font, + unsigned int flags) { unsigned charcount = font->charcount; int rc; @@ -1389,21 +1408,20 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, * The console `switch' structure for the VGA based console */ -static int vgacon_dummy(struct vc_data *c) -{ - return 0; -} - -#define DUMMY (void *) vgacon_dummy +static void vgacon_clear(struct vc_data *vc, int sy, int sx, int height, + int width) { } +static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } +static void vgacon_putcs(struct vc_data *vc, const unsigned short *s, + int count, int ypos, int xpos) { } const struct consw vga_con = { .owner = THIS_MODULE, .con_startup = vgacon_startup, .con_init = vgacon_init, .con_deinit = vgacon_deinit, - .con_clear = DUMMY, - .con_putc = DUMMY, - .con_putcs = DUMMY, + .con_clear = vgacon_clear, + .con_putc = vgacon_putc, + .con_putcs = vgacon_putcs, .con_cursor = vgacon_cursor, .con_scroll = vgacon_scroll, .con_switch = vgacon_switch, diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index e06358da4b99..3dee267d7c75 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1119,7 +1119,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) goto put_display_node; } - timings_np = of_find_node_by_name(display_np, "display-timings"); + timings_np = of_get_child_by_name(display_np, "display-timings"); if (!timings_np) { dev_err(dev, "failed to find display-timings node\n"); ret = -ENODEV; @@ -1140,6 +1140,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) fb_add_videomode(&fb_vm, &info->modelist); } + /* + * FIXME: Make sure we are not referencing any fields in display_np + * and timings_np and drop our references to them before returning to + * avoid leaking the nodes on probe deferral and driver unbind. + */ + return 0; put_timings_node: diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 5f04b4096c42..6c542d0ca076 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1681,8 +1681,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) fbi = framebuffer_alloc(sizeof(struct au1200fb_device), &dev->dev); - if (!fbi) + if (!fbi) { + ret = -ENOMEM; goto failed; + } _au1200fb_infos[plane] = fbi; fbdev = fbi->par; @@ -1701,7 +1703,8 @@ static int au1200fb_drv_probe(struct platform_device *dev) if (!fbdev->fb_mem) { print_err("fail to allocate frambuffer (size: %dK))", fbdev->fb_len / 1024); - return -ENOMEM; + ret = -ENOMEM; + goto failed; } /* diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h index 6026c60fc100..261522fabdac 100644 --- a/drivers/video/fbdev/controlfb.h +++ b/drivers/video/fbdev/controlfb.h @@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = { {{ 1, 2}}, /* 1152x870, 75Hz */ {{ 0, 1}}, /* 1280x960, 75Hz */ {{ 0, 1}}, /* 1280x1024, 75Hz */ + {{ 1, 2}}, /* 1152x768, 60Hz */ + {{ 0, 1}}, /* 1600x1024, 60Hz */ }; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 04612f938bab..235d549d5958 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2588,7 +2588,8 @@ static int fbcon_copy_font(struct vc_data *vc, int con) * is ever implemented. */ -static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigned flags) +static int fbcon_set_font(struct vc_data *vc, struct console_font *font, + unsigned int flags) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; unsigned charcount = font->charcount; diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 6082f653c68a..67773e8bbb95 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) int timeout = 1000; /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ - if (cpu_data(0).x86_mask == 1) { + if (cpu_data(0).x86_stepping == 1) { pll_table = gx_pll_table_14MHz; pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); } else { diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 7f6c9e6cfc6c..1e56b50e4082 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -26,6 +26,7 @@ #include #include #include +#include enum { FB_GET_WIDTH = 0x00, @@ -234,7 +235,7 @@ static int goldfish_fb_probe(struct platform_device *pdev) fb->fb.var.activate = FB_ACTIVATE_NOW; fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT); fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH); - fb->fb.var.pixclock = 10000; + fb->fb.var.pixclock = 0; fb->fb.var.red.offset = 11; fb->fb.var.red.length = 5; @@ -304,12 +305,25 @@ static int goldfish_fb_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_fb_of_match[] = { + { .compatible = "google,goldfish-fb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_fb_of_match); + +static const struct acpi_device_id goldfish_fb_acpi_match[] = { + { "GFSH0004", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match); static struct platform_driver goldfish_fb_driver = { .probe = goldfish_fb_probe, .remove = goldfish_fb_remove, .driver = { - .name = "goldfish_fb" + .name = "goldfish_fb", + .of_match_table = goldfish_fb_of_match, + .acpi_match_table = ACPI_PTR(goldfish_fb_acpi_match), } }; diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c index a0f496049db7..3a6bb6561ba0 100644 --- a/drivers/video/fbdev/mmp/core.c +++ b/drivers/video/fbdev/mmp/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include